cinder-8.0.0/0000775000567000056710000000000012701406543014126 5ustar jenkinsjenkins00000000000000cinder-8.0.0/etc/0000775000567000056710000000000012701406543014701 5ustar jenkinsjenkins00000000000000cinder-8.0.0/etc/cinder/0000775000567000056710000000000012701406543016145 5ustar jenkinsjenkins00000000000000cinder-8.0.0/etc/cinder/api-httpd.conf0000664000567000056710000000107412701406250020703 0ustar jenkinsjenkins00000000000000Listen 8776 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" cinder_combined WSGIDaemonProcess osapi_volume processes=2 threads=1 user=cinder display-name=%{GROUP} WSGIProcessGroup osapi_volume WSGIScriptAlias / /var/www/cgi-bin/cinder/osapi_volume WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/cinder_error.log CustomLog /var/log/apache2/cinder.log cinder_combined cinder-8.0.0/etc/cinder/rootwrap.d/0000775000567000056710000000000012701406543020244 5ustar jenkinsjenkins00000000000000cinder-8.0.0/etc/cinder/rootwrap.d/volume.filters0000664000567000056710000002313312701406250023142 0ustar jenkinsjenkins00000000000000# cinder-rootwrap command filters for volume nodes # This file should be owned by (and only-writeable by) the root user [Filters] # cinder/volume/iscsi.py: iscsi_helper '--op' ... ietadm: CommandFilter, ietadm, root tgtadm: CommandFilter, tgtadm, root iscsictl: CommandFilter, iscsictl, root tgt-admin: CommandFilter, tgt-admin, root cinder-rtstool: CommandFilter, cinder-rtstool, root scstadmin: CommandFilter, scstadmin, root # LVM related show commands pvs: EnvFilter, env, root, LC_ALL=C, pvs vgs: EnvFilter, env, root, LC_ALL=C, vgs lvs: EnvFilter, env, root, LC_ALL=C, lvs lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay # LVM conf var pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay # os-brick library commands # os_brick.privileged.run_as_root oslo.privsep context # This line ties the superuser privs with the config files, context name, # and (implicitly) the actual python code invoked. privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* # The following and any cinder/brick/* entries should all be obsoleted # by privsep, and may be removed once the os-brick version requirement # is updated appropriately. scsi_id: CommandFilter, /lib/udev/scsi_id, root drbdadm: CommandFilter, drbdadm, root # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list vgcreate: CommandFilter, vgcreate, root # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ... lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... dd: CommandFilter, dd, root # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... lvremove: CommandFilter, lvremove, root # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... lvrename: CommandFilter, lvrename, root # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ... lvextend: EnvFilter, env, root, LC_ALL=C, lvextend lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' lvchange: CommandFilter, lvchange, root # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name lvconvert: CommandFilter, lvconvert, root # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... iscsiadm: CommandFilter, iscsiadm, root # cinder/volume/drivers/lvm.py: 'shred', '-n3' # cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB' shred: CommandFilter, shred, root # cinder/volume/utils.py: utils.temporary_chown(path, 0) chown: CommandFilter, chown, root # cinder/volume/utils.py: copy_volume(..., ionice='...') ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] # cinder/volume/utils.py: setup_blkio_cgroup() cgcreate: CommandFilter, cgcreate, root cgset: CommandFilter, cgset, root cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ # cinder/volume/driver.py dmsetup: CommandFilter, dmsetup, root ln: CommandFilter, ln, root # cinder/image/image_utils.py qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img qemu-img_convert: CommandFilter, qemu-img, root udevadm: CommandFilter, udevadm, root # cinder/volume/driver.py: utils.read_file_as_root() cat: CommandFilter, cat, root # cinder/volume/nfs.py stat: CommandFilter, stat, root mount: CommandFilter, mount, root df: CommandFilter, df, root du: CommandFilter, du, root truncate: CommandFilter, truncate, root chmod: CommandFilter, chmod, root rm: CommandFilter, rm, root # cinder/volume/drivers/netapp/nfs.py: netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ # cinder/volume/drivers/glusterfs.py chgrp: CommandFilter, chgrp, root umount: CommandFilter, umount, root fallocate: CommandFilter, fallocate, root # cinder/volumes/drivers/hds/hds.py: hus-cmd: CommandFilter, hus-cmd, root hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root # cinder/volumes/drivers/hds/hnas_backend.py ssc: CommandFilter, ssc, root # cinder/brick/initiator/connector.py: ls: CommandFilter, ls, root tee: CommandFilter, tee, root multipath: CommandFilter, multipath, root multipathd: CommandFilter, multipathd, root systool: CommandFilter, systool, root # cinder/volume/drivers/block_device.py blockdev: CommandFilter, blockdev, root # cinder/volume/drivers/ibm/gpfs.py # cinder/volume/drivers/tintri.py mv: CommandFilter, mv, root # cinder/volume/drivers/ibm/gpfs.py cp: CommandFilter, cp, root mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root mkfs: CommandFilter, mkfs, root mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root # cinder/volume/drivers/ibm/gpfs.py # cinder/volume/drivers/ibm/ibmnas.py find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -inum, \d+ # cinder/brick/initiator/connector.py: aoe-revalidate: CommandFilter, aoe-revalidate, root aoe-discover: CommandFilter, aoe-discover, root aoe-flush: CommandFilter, aoe-flush, root # cinder/brick/initiator/linuxscsi.py: sg_scan: CommandFilter, sg_scan, root #cinder/backup/services/tsm.py dsmc:CommandFilter,/usr/bin/dsmc,root # cinder/volume/drivers/hitachi/hbsd_horcm.py raidqry: CommandFilter, raidqry, root raidcom: CommandFilter, raidcom, root pairsplit: CommandFilter, pairsplit, root paircreate: CommandFilter, paircreate, root pairdisplay: CommandFilter, pairdisplay, root pairevtwait: CommandFilter, pairevtwait, root horcmstart.sh: CommandFilter, horcmstart.sh, root horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr # cinder/volume/drivers/hitachi/hbsd_snm2.py auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon # cinder/volume/drivers/hgst.py vgc-cluster: CommandFilter, vgc-cluster, root # cinder/volume/drivers/vzstorage.py pstorage-mount: CommandFilter, pstorage-mount, root pstorage: CommandFilter, pstorage, root # initiator/connector.py: drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid cinder-8.0.0/etc/cinder/rootwrap.conf0000664000567000056710000000171512701406250020670 0ustar jenkinsjenkins00000000000000# Configuration for cinder-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR cinder-8.0.0/etc/cinder/policy.json0000664000567000056710000001153712701406257020350 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", "admin_api": "is_admin:True", "volume:create": "", "volume:delete": "rule:admin_or_owner", "volume:get": "rule:admin_or_owner", "volume:get_all": "rule:admin_or_owner", "volume:get_volume_metadata": "rule:admin_or_owner", "volume:delete_volume_metadata": "rule:admin_or_owner", "volume:update_volume_metadata": "rule:admin_or_owner", "volume:get_volume_admin_metadata": "rule:admin_api", "volume:update_volume_admin_metadata": "rule:admin_api", "volume:get_snapshot": "rule:admin_or_owner", "volume:get_all_snapshots": "rule:admin_or_owner", "volume:create_snapshot": "rule:admin_or_owner", "volume:delete_snapshot": "rule:admin_or_owner", "volume:update_snapshot": "rule:admin_or_owner", "volume:extend": "rule:admin_or_owner", "volume:update_readonly_flag": "rule:admin_or_owner", "volume:retype": "rule:admin_or_owner", "volume:update": "rule:admin_or_owner", "volume_extension:types_manage": "rule:admin_api", "volume_extension:types_extra_specs": "rule:admin_api", "volume_extension:access_types_qos_specs_id": "rule:admin_api", "volume_extension:access_types_extra_specs": "rule:admin_api", "volume_extension:volume_type_access": "rule:admin_or_owner", "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", "volume_extension:volume_type_encryption": "rule:admin_api", "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", "volume_extension:extended_snapshot_attributes": "rule:admin_or_owner", "volume_extension:volume_image_metadata": "rule:admin_or_owner", "volume_extension:quotas:show": "", "volume_extension:quotas:update": "rule:admin_api", "volume_extension:quotas:delete": "rule:admin_api", "volume_extension:quota_classes": "rule:admin_api", "volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api", "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", "volume_extension:volume_host_attribute": "rule:admin_api", "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", "volume_extension:volume_mig_status_attribute": "rule:admin_api", "volume_extension:hosts": "rule:admin_api", "volume_extension:services:index": "rule:admin_api", "volume_extension:services:update" : "rule:admin_api", "volume_extension:volume_manage": "rule:admin_api", "volume_extension:volume_unmanage": "rule:admin_api", "volume_extension:capabilities": "rule:admin_api", "volume:create_transfer": "rule:admin_or_owner", "volume:accept_transfer": "", "volume:delete_transfer": "rule:admin_or_owner", "volume:get_all_transfers": "rule:admin_or_owner", "volume_extension:replication:promote": "rule:admin_api", "volume_extension:replication:reenable": "rule:admin_api", "volume:enable_replication": "rule:admin_api", "volume:disable_replication": "rule:admin_api", "volume:failover_replication": "rule:admin_api", "volume:list_replication_targets": "rule:admin_api", "backup:create" : "", "backup:delete": "rule:admin_or_owner", "backup:get": "rule:admin_or_owner", "backup:get_all": "rule:admin_or_owner", "backup:restore": "rule:admin_or_owner", "backup:backup-import": "rule:admin_api", "backup:backup-export": "rule:admin_api", "snapshot_extension:snapshot_actions:update_snapshot_status": "", "snapshot_extension:snapshot_manage": "rule:admin_api", "snapshot_extension:snapshot_unmanage": "rule:admin_api", "consistencygroup:create" : "group:nobody", "consistencygroup:delete": "group:nobody", "consistencygroup:update": "group:nobody", "consistencygroup:get": "group:nobody", "consistencygroup:get_all": "group:nobody", "consistencygroup:create_cgsnapshot" : "group:nobody", "consistencygroup:delete_cgsnapshot": "group:nobody", "consistencygroup:get_cgsnapshot": "group:nobody", "consistencygroup:get_all_cgsnapshots": "group:nobody", "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" } cinder-8.0.0/etc/cinder/api-paste.ini0000664000567000056710000000456012701406250020531 0ustar jenkinsjenkins00000000000000############# # OpenStack # ############# [composite:osapi_volume] use = call:cinder.api:root_app_factory /: apiversions /v1: openstack_volume_api_v1 /v2: openstack_volume_api_v2 /v3: openstack_volume_api_v3 [composite:openstack_volume_api_v1] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv1 keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 [composite:openstack_volume_api_v2] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv2 keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 [composite:openstack_volume_api_v3] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv3 keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 [filter:request_id] paste.filter_factory = oslo_middleware.request_id:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = cinder [filter:faultwrap] paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:noauth] paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory [filter:sizelimit] paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory [app:apiv1] paste.app_factory = cinder.api.v1.router:APIRouter.factory [app:apiv2] paste.app_factory = cinder.api.v2.router:APIRouter.factory [app:apiv3] paste.app_factory = cinder.api.v3.router:APIRouter.factory [pipeline:apiversions] pipeline = cors faultwrap osvolumeversionapp [app:osvolumeversionapp] paste.app_factory = cinder.api.versions:Versions.factory ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory cinder-8.0.0/etc/cinder/logging_sample.conf0000664000567000056710000000333612701406250022003 0ustar jenkinsjenkins00000000000000[loggers] keys = root, cinder, taskflow, cinder_flow_utils [handlers] keys = stderr, stdout, watchedfile, syslog, tasks, null [formatters] keys = context, default [logger_root] level = WARNING handlers = null [logger_cinder] level = INFO handlers = stderr qualname = cinder # Both of these are used for tracking what cinder and taskflow is doing with # regard to flows and tasks (and the activity there-in). [logger_cinder_flow_utils] level = INFO handlers = tasks,stderr qualname = cinder.flow_utils [logger_taskflow] level = INFO handlers = tasks qualname = taskflow [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = context [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = context [handler_watchedfile] class = handlers.WatchedFileHandler args = ('cinder.log',) formatter = context [handler_tasks] class = handlers.WatchedFileHandler args = ('tasks.log',) formatter = context [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = context [handler_null] class = logging.NullHandler formatter = default args = () [formatter_context] class = oslo_log.formatters.ContextFormatter [formatter_default] format = %(message)s cinder-8.0.0/etc/cinder/README-cinder.conf.sample0000664000567000056710000000024612701406250022470 0ustar jenkinsjenkins00000000000000The cinder.conf sample file is no longer generated and maintained in Trunk. To generate your own version of cinder.conf, use the following command: tox -egenconfig cinder-8.0.0/babel.cfg0000664000567000056710000000002112701406250015640 0ustar jenkinsjenkins00000000000000[python: **.py] cinder-8.0.0/.coveragerc0000664000567000056710000000016312701406250016242 0ustar jenkinsjenkins00000000000000[run] branch = True source = cinder omit = cinder/tests/*,cinder/openstack/common/* [report] ignore_errors = True cinder-8.0.0/setup.cfg0000664000567000056710000000731612701406543015756 0ustar jenkinsjenkins00000000000000[metadata] name = cinder summary = OpenStack Block Storage description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook [files] packages = cinder [entry_points] cinder.scheduler.filters = AvailabilityZoneFilter = cinder.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter CapabilitiesFilter = cinder.scheduler.filters.capabilities_filter:CapabilitiesFilter CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter DriverFilter = cinder.scheduler.filters.driver_filter:DriverFilter JsonFilter = cinder.scheduler.filters.json_filter:JsonFilter RetryFilter = cinder.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter cinder.scheduler.weights = AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher GoodnessWeigher = cinder.scheduler.weights.goodness:GoodnessWeigher VolumeNumberWeigher = cinder.scheduler.weights.volume_number:VolumeNumberWeigher oslo.config.opts = cinder = cinder.opts:list_opts keystonemiddleware = keystonemiddleware.auth_token:list_opts oslo_concurrency = oslo_concurrency.opts:list_opts oslo.messaging = oslo_messaging.opts:list_opts oslo.db.concurrency = oslo.db.concurrency:list_opts oslo.config.opts.defaults = cinder = cinder.common.config:set_middleware_defaults console_scripts = cinder-all = cinder.cmd.all:main cinder-api = cinder.cmd.api:main cinder-backup = cinder.cmd.backup:main cinder-manage = cinder.cmd.manage:main cinder-rootwrap = oslo_rootwrap.cmd:main cinder-rtstool = cinder.cmd.rtstool:main cinder-scheduler = cinder.cmd.scheduler:main cinder-volume = cinder.cmd.volume:main cinder-volume-usage-audit = cinder.cmd.volume_usage_audit:main wsgi_scripts = cinder-wsgi = cinder.wsgi.wsgi:initialize_application oslo_messaging.notify.drivers = cinder.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver cinder.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver cinder.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver cinder.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver cinder.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver oslo_middleware = cinder.api.middleware.sizelimit = oslo_middleware.sizelimit cinder.openstack.common.middleware.request_id = oslo_middleware.request_id cinder.database.migration_backend = sqlalchemy = oslo_db.sqlalchemy.migration [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = cinder/locale domain = cinder [update_catalog] domain = cinder output_dir = cinder/locale input_file = cinder/locale/cinder.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = cinder/locale/cinder.pot cinder-8.0.0/tools/0000775000567000056710000000000012701406543015266 5ustar jenkinsjenkins00000000000000cinder-8.0.0/tools/lintstack.py0000775000567000056710000002225512701406250017640 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" from __future__ import print_function import json import re import sys from pylint import lint from pylint.reporters import text from six.moves import cStringIO as StringIO # Note(maoy): E1103 is error code related to partial type inference ignore_codes = ["E1103"] # Note(maoy): the error message is the pattern of E0202. It should be ignored # for cinder.tests modules # Note(fengqian): the second error message is the pattern of [E0611]. # It should be ignored because use six module to keep py3.X compatibility. # Note(e0ne): the third error message is for SQLAlchemy update() calls # in DB schema migrations. # Note(xyang): the fourth and fifth error messages are for the code [E1101]. # They should be ignored because 'sha256' and 'sha224' are functions in # 'hashlib'. # Note(aarefiev): the sixth error message is for SQLAlchemy rename calls in # DB migration(033_add_encryption_unique_key). ignore_messages = ["An attribute affected in cinder.tests", "No name 'urllib' in module '_MovedItems'", "No value passed for parameter 'dml'", "Module 'hashlib' has no 'sha256' member", "Module 'hashlib' has no 'sha224' member", "Instance of 'Table' has no 'rename' member"] # Note(maoy): We ignore cinder.tests for now due to high false # positive rate. ignore_modules = ["cinder/tests/"] # Note(thangp): E0213, E1101, and E1102 should be ignored for only # cinder.object modules. E0213 and E1102 are error codes related to # the first argument of a method, but should be ignored because the method # is a remotable class method. E1101 is error code related to accessing a # non-existent member of an object, but should be ignored because the object # member is created dynamically. objects_ignore_codes = ["E0213", "E1101", "E1102"] # Note(thangp): The error messages are for codes [E1120, E1101] appearing in # the cinder code base using objects. E1120 is an error code related no value # passed for a parameter in function call, but should be ignored because it is # reporting false positives. E1101 is error code related to accessing a # non-existent member of an object, but should be ignored because the object # member is created dynamically. objects_ignore_messages = [ "No value passed for parameter 'id' in function call", "Module 'cinder.objects' has no 'Backup' member", "Module 'cinder.objects' has no 'BackupImport' member", "Module 'cinder.objects' has no 'BackupList' member", "Module 'cinder.objects' has no 'CGSnapshot' member", "Module 'cinder.objects' has no 'CGSnapshotList' member", "Module 'cinder.objects' has no 'ConsistencyGroup' member", "Module 'cinder.objects' has no 'ConsistencyGroupList' member", "Module 'cinder.objects' has no 'Service' member", "Module 'cinder.objects' has no 'ServiceList' member", "Module 'cinder.objects' has no 'Snapshot' member", "Module 'cinder.objects' has no 'SnapshotList' member", "Module 'cinder.objects' has no 'Volume' member", "Module 'cinder.objects' has no 'VolumeList' member", ] objects_ignore_modules = ["cinder/objects/"] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict, where each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True if any(msg in self.message for msg in (ignore_messages + objects_ignore_messages)): return True if (self.code in objects_ignore_codes and any(self.filename.startswith(name) for name in objects_ignore_modules)): return True if (self.code in objects_ignore_codes and any(self.filename.startswith(name) for name in objects_ignore_modules)): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % self.__dict__) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO() reporter = text.ParseableTextReporter(output=buff) args = ["--include-ids=y", "-E", "cinder"] lint.Run(args, reporter=reporter, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false " "positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() cinder-8.0.0/tools/fast8.sh0000775000567000056710000000045312701406250016647 0ustar jenkinsjenkins00000000000000#!/bin/bash cd $(dirname "$0")/.. CHANGED=$(git diff --name-only HEAD~1 | tr '\n' ' ') # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK="$CHECK $FILE" fi done diff -u --from-file /dev/null $CHECK | flake8 --diff cinder-8.0.0/tools/install_venv_common.py0000664000567000056710000001350712701406250021715 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() cinder-8.0.0/tools/install_venv.py0000664000567000056710000000454412701406250020346 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Installation script for Cinder's development virtualenv.""" from __future__ import print_function import optparse import os import subprocess import sys import install_venv_common as install_venv def print_help(): help = """ Cinder development environment setup is complete. Cinder development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Cinder virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') if os.environ.get('venv'): venv = os.environ['venv'] pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') project = 'Cinder' py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) cinder-8.0.0/tools/generate_driver_list.py0000775000567000056710000000276412701406250022047 0ustar jenkinsjenkins00000000000000#! /usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generate list of cinder drivers""" import importlib import inspect import pkgutil import pprint from cinder.volume import drivers from cinder.volume import driver package = drivers def get_driver_list(): dr_list = [] for _loader, modname, _ispkg in pkgutil.walk_packages( path=package.__path__, prefix=package.__name__ + '.', onerror=lambda x: None): try: mod = importlib.import_module(modname) list_classes = inspect.getmembers(mod, inspect.isclass) dr_list += [ modname + '.' + dr_name for dr_name, dr in list_classes if driver.BaseVD in inspect.getmro(dr)] except ImportError: print("%s module ignored!!" % modname) return dr_list def main(): dr_list = get_driver_list() print("Drivers list:") pprint.pprint(dr_list) if __name__ == '__main__': main() cinder-8.0.0/tools/config/0000775000567000056710000000000012701406543016533 5ustar jenkinsjenkins00000000000000cinder-8.0.0/tools/config/check_uptodate.sh0000775000567000056710000000413112701406250022046 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash CHECKOPTS=0 if [ "$1" == "--checkopts" ]; then CHECKOPTS=1 fi PROJECT_NAME=${PROJECT_NAME:-cinder} CFGFILE_NAME=${PROJECT_NAME}.conf.sample if [ $CHECKOPTS -eq 1 ]; then if [ ! -e cinder/opts.py ]; then echo -en "\n\n#################################################" echo -en "\nERROR: cinder/opts.py file is missing." echo -en "\n#################################################\n" exit 1 else mv cinder/opts.py cinder/opts.py.orig tox -e genopts &> /dev/null if [ $? -ne 0 ]; then echo -en "\n\n#################################################" echo -en "\nERROR: Non-zero exit from generate_cinder_opts.py." echo -en "\n See output above for details.\n" echo -en "#################################################\n" mv cinder/opts.py.orig cinder/opts.py exit 1 else diff cinder/opts.py.orig cinder/opts.py if [ $? -ne 0 ]; then echo -en "\n\n########################################################" echo -en "\nERROR: Configuration options change detected." echo -en "\n A new cinder/opts.py file must be generated." echo -en "\n Run 'tox -e genopts' from the base directory" echo -en "\n and add the result to your commit." echo -en "\n########################################################\n\n" rm cinder/opts.py mv cinder/opts.py.orig cinder/opts.py exit 1 else rm cinder/opts.py.orig fi fi fi else tox -e genconfig &> /dev/null if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME} rm -f $CFGFILE else echo -en "\n\n####################################################" echo -en "\n${0##*/}: Can't find config file." echo -en "\n####################################################\n\n" exit 1 fi fi cinder-8.0.0/tools/config/generate_sample.sh0000775000567000056710000000631712701406250022227 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Generate sample configuration for your project. # # Aside from the command line flags, it also respects a config file which # should be named oslo.config.generator.rc and be placed in the same directory. # # You can then export the following variables: # CINDER_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options. # CINDER_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover. # CINDER_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing. BASEDIR=${BASEDIR:-`pwd`} NOSAMPLE=0 if [ ! -z ${2} ] ; then if [ "${2}" == "--nosamplefile" ]; then NOSAMPLE=1 fi fi print_error () { echo -en "\n\n##########################################################" echo -en "\nERROR: ${0} was not called from tox." echo -en "\n Execute 'tox -e genconfig' for cinder.conf.sample" echo -en "\n generation." echo -en "\n##########################################################\n\n" } if [ -z ${1} ] ; then print_error exit 1 fi if [ ${1} != "from_tox" ] ; then print_error exit 1 fi if ! [ -d $BASEDIR ] ; then echo "${0##*/}: missing project base directory" >&2 ; exit 1 elif [[ $BASEDIR != /* ]] ; then BASEDIR=$(cd "$BASEDIR" && pwd) fi PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)} TARGETDIR=$BASEDIR/$PACKAGENAME if ! [ -d $TARGETDIR ] ; then echo "${0##*/}: invalid project package name" >&2 ; exit 1 fi BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'` find $TARGETDIR -type f -name "*.pyc" -delete export TARGETDIR=$TARGETDIR export BASEDIRESC=$BASEDIRESC if [ -e $TARGETDIR/opts.py ] ; then mv $TARGETDIR/opts.py $TARGETDIR/opts.py.bak fi python cinder/config/generate_cinder_opts.py if [ $? -ne 0 ] ; then echo -en "\n\n#################################################" echo -en "\nERROR: Non-zero exit from generate_cinder_opts.py." echo -en "\n See output above for details.\n" echo -en "#################################################\n" if [ -e $TARGETDIR/opts.py.bak ] ; then mv $TARGETDIR/opts.py.bak $TARGETDIR/opts.py fi exit 1 fi if [ $NOSAMPLE -eq 0 ] ; then oslo-config-generator --config-file=cinder/config/cinder-config-generator.conf if [ $? -ne 0 ] ; then echo -en "\n\n#################################################" echo -en "\nERROR: Non-zero exit from oslo-config-generator." echo -en "\n See output above for details.\n" echo -en "#################################################\n" mv $TARGETDIR/opts.py.bak $TARGETDIR/opts.py exit 1 fi diff $TARGETDIR/opts.py $TARGETDIR/opts.py.bak &> /dev/null if [ $? -ne 0 ] ; then mv $TARGETDIR/opts.py.bak $TARGETDIR/opts.py else rm -f $TARGETDIR/opts.py.bak fi if [ ! -s ./etc/cinder/cinder.conf.sample ] ; then echo -en "\n\n#########################################################" echo -en "\nERROR: etc/cinder/cinder.sample.conf not created properly." echo -en "\n See above output for details.\n" echo -en "###########################################################\n" exit 1 fi else rm -f $TARGETDIR/opts.py.bak fi cinder-8.0.0/tools/bandit.yaml0000664000567000056710000001156012701406257017420 0ustar jenkinsjenkins00000000000000# optional: after how many files to update progress #show_progress_every: 100 # optional: plugins directory name #plugins_dir: 'plugins' # optional: plugins discovery name pattern plugin_name_pattern: '*.py' # optional: terminal escape sequences to display colors #output_colors: # DEFAULT: '\033[0m' # HEADER: '\033[95m' # LOW: '\033[94m' # MEDIUM: '\033[93m' # HIGH: '\033[91m' # optional: log format string #log_format: "[%(module)s]\t%(levelname)s\t%(message)s" # globs of files which should be analyzed include: - '*.py' # a list of strings, which if found in the path will cause files to be excluded # for example /tests/ - to remove all all files in tests directory exclude_dirs: - '/tests/' profiles: XSS: include: - jinja2_autoescape_false - use_of_mako_templates ShellInjection: include: - subprocess_popen_with_shell_equals_true - subprocess_without_shell_equals_true - any_other_function_with_shell_equals_true - start_process_with_a_shell - start_process_with_no_shell exclude: SqlInjection: include: - hardcoded_sql_expressions blacklist_calls: bad_name_sets: - pickle: qualnames: [pickle.loads, pickle.load, pickle.Unpickler, cPickle.loads, cPickle.load, cPickle.Unpickler] message: "Pickle library appears to be in use, possible security issue." - marshal: qualnames: [marshal.load, marshal.loads] message: "Deserialization with the marshal module is possibly dangerous." - md5: qualnames: [hashlib.md5] message: "Use of insecure MD5 hash function." - mktemp_q: qualnames: [tempfile.mktemp] message: "Use of insecure and deprecated function (mktemp)." - eval: qualnames: [eval] message: "Use of possibly insecure function - consider using safer ast.literal_eval." - mark_safe: names: [mark_safe] message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - httpsconnection: qualnames: [httplib.HTTPSConnection] message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - yaml_load: qualnames: [yaml.load] message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - urllib_urlopen: qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - paramiko_injection: qualnames: [paramiko.exec_command, paramiko.invoke_shell] message: "Paramiko exec_command() and invoke_shell() usage may expose command injection vulnerabilities and should be reviewed." shell_injection: # Start a process using the subprocess module, or one of its wrappers. subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute, utils.execute_with_timeout] # Start a process with a function vulnerable to shell injection. shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # Start a process with a function that is not vulnerable to shell injection. no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, os.startfile] blacklist_imports: bad_import_sets: - telnet: imports: [telnetlib] level: HIGH message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - info_libs: imports: [pickle, cPickle, subprocess, Crypto] level: LOW message: "Consider possible security implications associated with {module} module." hardcoded_password: word_list: "wordlist/default-passwords" ssl_with_bad_version: bad_protocol_versions: - 'PROTOCOL_SSLv2' - 'SSLv2_METHOD' - 'SSLv23_METHOD' - 'PROTOCOL_SSLv3' # strict option - 'PROTOCOL_TLSv1' # strict option - 'SSLv3_METHOD' # strict option - 'TLSv1_METHOD' # strict option password_config_option_not_marked_secret: function_names: - oslo.config.cfg.StrOpt - oslo_config.cfg.StrOpt execute_with_run_as_root_equals_true: function_names: - cinder.utils.execute cinder-8.0.0/tools/with_venv.sh0000775000567000056710000000033212701406250017627 0ustar jenkinsjenkins00000000000000#!/bin/bash tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source ${VENV}/bin/activate && "$@" cinder-8.0.0/tools/lintstack.sh0000775000567000056710000000420612701406250017616 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions cinder-8.0.0/tools/colorizer.py0000775000567000056710000002712612701406250017656 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, Nebula, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Display a subunit stream through a colorized unittest test runner.""" import heapq import subunit import sys import unittest import testtools class _AnsiColorizer(object): """ANSI colorizer that wraps a stream object. colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): """Check if platform is supported. A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): import win32console red, green, blue, bold = (win32console.FOREGROUND_RED, win32console.FOREGROUND_GREEN, win32console.FOREGROUND_BLUE, win32console.FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } def supported(cls, stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'red' elif elapsed_time > 0.25: return 'yellow' else: return 'green' class NovaTestResult(testtools.TestResult): def __init__(self, stream, descriptions, verbosity): super(NovaTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.num_slow_tests = 10 self.slow_tests = [] # this is a fixed-sized heap self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout self.start_time = None self.last_time = {} self.results = {} self.last_written = None def _writeElapsedTime(self, elapsed): color = get_elapsed_time_color(elapsed) self.colorizer.write(" %.2f" % elapsed, color) def _addResult(self, test, *args): try: name = test.id() except AttributeError: name = 'Unknown.unknown' test_class, test_name = name.rsplit('.', 1) elapsed = (self._now() - self.start_time).total_seconds() item = (elapsed, test_class, test_name) if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) self.results.setdefault(test_class, []) self.results[test_class].append((test_name, elapsed) + args) self.last_time[test_class] = self._now() self.writeTests() def _writeResult(self, test_name, elapsed, long_result, color, short_result, success): if self.showAll: self.stream.write(' %s' % str(test_name).ljust(66)) self.colorizer.write(long_result, color) if success: self._writeElapsedTime(elapsed) self.stream.writeln() else: self.colorizer.write(short_result, color) def addSuccess(self, test): super(NovaTestResult, self).addSuccess(test) self._addResult(test, 'OK', 'green', '.', True) def addFailure(self, test, err): if test.id() == 'process-returncode': return super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'FAIL', 'red', 'F', False) def addError(self, test, err): super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'ERROR', 'red', 'E', False) def addSkip(self, test, reason=None, details=None): super(NovaTestResult, self).addSkip(test, reason, details) self._addResult(test, 'SKIP', 'blue', 'S', True) def startTest(self, test): self.start_time = self._now() super(NovaTestResult, self).startTest(test) def writeTestCase(self, cls): if not self.results.get(cls): return if cls != self.last_written: self.colorizer.write(cls, 'white') self.stream.writeln() for result in self.results[cls]: self._writeResult(*result) del self.results[cls] self.stream.flush() self.last_written = cls def writeTests(self): time = self.last_time.get(self.last_written, self._now()) if not self.last_written or (self._now() - time).total_seconds() > 2.0: diff = 3.0 while diff > 2.0: classes = self.results.keys() oldest = min(classes, key=lambda x: self.last_time[x]) diff = (self._now() - self.last_time[oldest]).total_seconds() self.writeTestCase(oldest) else: self.writeTestCase(self.last_written) def done(self): self.stopTestRun() def stopTestRun(self): for cls in list(self.results): self.writeTestCase(cls) self.stream.writeln() self.writeSlowTests() def writeSlowTests(self): # Pare out 'fast' tests slow_tests = [item for item in self.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) slow = ("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) self.colorizer.write(slow, 'yellow') self.stream.writeln() last_cls = None # sort by name for elapsed, cls, name in sorted(slow_tests, key=lambda x: x[1] + x[2]): if cls != last_cls: self.colorizer.write(cls, 'white') self.stream.writeln() last_cls = cls self.stream.write(' %s' % str(name).ljust(68)) self._writeElapsedTime(elapsed) self.stream.writeln() def printErrors(self): if self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavor, errors): for test, err in errors: self.colorizer.write("=" * 70, 'red') self.stream.writeln() self.colorizer.write(flavor, 'red') self.stream.writeln(": %s" % test.id()) self.colorizer.write("-" * 70, 'red') self.stream.writeln() self.stream.writeln("%s" % err) test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) if sys.version_info[0:2] <= (2, 6): runner = unittest.TextTestRunner(verbosity=2) else: runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult) if runner.run(test).wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code) cinder-8.0.0/tools/enable-pre-commit-hook.sh0000775000567000056710000000232012701406250022053 0ustar jenkinsjenkins00000000000000#!/bin/sh # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PRE_COMMIT_SCRIPT=.git/hooks/pre-commit make_hook() { echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT chmod +x $PRE_COMMIT_SCRIPT if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then echo "pre-commit hook was created successfully" else echo "unable to create pre-commit hook" fi } # NOTE(jk0): Make sure we are in cinder's root directory before adding the hook. if [ ! -d ".git" ]; then echo "unable to find .git; moving up a directory" cd .. if [ -d ".git" ]; then make_hook else echo "still unable to find .git; hook not created" fi else make_hook fi cinder-8.0.0/tools/check_exec.py0000775000567000056710000000225512701406250017723 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Print a list and return with error if any executable files are found. # Compatible with both python 2 and 3. import os.path import stat import sys if len(sys.argv) < 2: print("Usage: %s " % sys.argv[0]) sys.exit(1) directory = sys.argv[1] executable = [] for root, mydir, myfile in os.walk(directory): for f in myfile: path = os.path.join(root, f) mode = os.lstat(path).st_mode if stat.S_IXUSR & mode: executable.append(path) if executable: print("Executable files found:") for f in executable: print(f) sys.exit(1) cinder-8.0.0/doc/0000775000567000056710000000000012701406543014673 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/find_autodoc_modules.sh0000775000567000056710000000072512701406250021417 0ustar jenkinsjenkins00000000000000#!/bin/bash CINDER_DIR='cinder/' # include trailing slash DOCS_DIR='source' modules='' for x in `find ${CINDER_DIR} -name '*.py' | grep -v cinder/tests`; do if [ `basename ${x} .py` == "__init__" ] ; then continue fi relative=cinder.`echo ${x} | sed -e 's$^'${CINDER_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` modules="${modules} ${relative}" done for mod in ${modules} ; do if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; then echo ${mod} fi done cinder-8.0.0/doc/source/0000775000567000056710000000000012701406543016173 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/scheduler-filters.rst0000664000567000056710000000022312701406250022341 0ustar jenkinsjenkins00000000000000============================== Cinder Scheduler Filters ============================== .. list-plugins:: cinder.scheduler.filters :detailed: cinder-8.0.0/doc/source/index.rst0000664000567000056710000000404112701406250020026 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Cinder's developer documentation! ============================================ Cinder is an OpenStack project to provide "block storage as a service". * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault-Tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open Standards**: Be a reference implementation for a community-driven api This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional draft and project documentation on Cinder and other components of OpenStack can be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_. .. _`OpenStack wiki`: http://wiki.openstack.org .. _`docs.openstack.org`: http://docs.openstack.org Developer Docs ============== .. toctree:: :maxdepth: 1 devref/index database_architecture scheduler-filters scheduler-weights drivers oslo-middleware API Extensions ============== Go to http://api.openstack.org for information about Cinder API extensions. Outstanding Documentation Tasks =============================== .. todolist:: Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` cinder-8.0.0/doc/source/drivers.rst0000664000567000056710000000017312701406250020377 0ustar jenkinsjenkins00000000000000=================== Available Drivers =================== .. list-plugins:: oslo_messaging.notify.drivers :detailed: cinder-8.0.0/doc/source/_ga/0000775000567000056710000000000012701406543016721 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/_ga/layout.html0000664000567000056710000000105512701406250021120 0ustar jenkinsjenkins00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} cinder-8.0.0/doc/source/man/0000775000567000056710000000000012701406543016746 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/man/cinder-manage.rst0000664000567000056710000001003312701406250022162 0ustar jenkinsjenkins00000000000000============= cinder-manage ============= ------------------------------------------------------ Control and manage OpenStack block storage ------------------------------------------------------ :Author: openstack@lists.openstack.org :Date: 2015-11-03 :Copyright: OpenStack Foundation :Version: 7.0.0 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== cinder-manage [] DESCRIPTION =========== cinder-manage provides control of cinder database migration, and provides an interface to get information about the current state of cinder. More information about OpenStack Cinder is available at http://cinder.openstack.org. OPTIONS ======= The standard pattern for executing a cinder-manage command is: ``cinder-manage []`` For example, to obtain a list of the cinder services currently running: ``cinder-manage service list`` Run without arguments to see a list of available command categories: ``cinder-manage`` Categories are shell, logs, migrate, db, volume, host, service, backup, version, and config. Detailed descriptions are below. You can also run with a category argument such as 'db' to see a list of all commands in that category: ``cinder-manage db`` These sections describe the available categories and arguments for cinder-manage. Cinder Db ~~~~~~~~~ ``cinder-manage db version`` Print the current database version. ``cinder-manage db sync`` Sync the database up to the most recent version. This is the standard way to create the db as well. ``cinder-manage db purge []`` Purge database entries that are marked as deleted, that are older than the number of days specified. Cinder Logs ~~~~~~~~~~~ ``cinder-manage logs errors`` Displays cinder errors from log files. ``cinder-manage logs syslog []`` Displays cinder the most recent entries from syslog. The optional number argument specifies the number of entries to display (default 10). Cinder Shell ~~~~~~~~~~~~ ``cinder-manage shell bpython`` Starts a new bpython shell. ``cinder-manage shell ipython`` Starts a new ipython shell. ``cinder-manage shell python`` Starts a new python shell. ``cinder-manage shell run`` Starts a new shell using python. ``cinder-manage shell script `` Runs the named script from the specified path with flags set. Cinder Volume ~~~~~~~~~~~~~ ``cinder-manage volume delete `` Delete a volume without first checking that the volume is available. ``cinder-manage volume update_host --currenthost --newhost `` Updates the host name of all volumes currently associated with a specified host. Cinder Host ~~~~~~~~~~~ ``cinder-manage host list []`` Displays a list of all physical hosts and their zone. The optional zone argument allows the list to be filtered on the requested zone. Cinder Service ~~~~~~~~~~~~~~ ``cinder-manage service list`` Displays a list of all cinder services and their host, zone, status, state and when the information was last updated. ``cinder-manage service remove `` Removes a specified cinder service from a specified host. Cinder Backup ~~~~~~~~~~~~~ ``cinder-manage backup list`` Displays a list of all backups (including ones in progress) and the host on which the backup operation is running. Cinder Version ~~~~~~~~~~~~~~ ``cinder-manage version list`` Displays the codebase version cinder is running upon. Cinder Config ~~~~~~~~~~~~~ ``cinder-manage config list []`` Displays the current configuration parameters (options) for Cinder. The optional flag parameter may be used to display the configuration of one parameter. FILES ===== The cinder.conf file contains configuration information in the form of python-gflags. The cinder-manage.log file logs output from cinder-manage. SEE ALSO ======== * `OpenStack Cinder `__ BUGS ==== * Cinder is hosted on Launchpad so you can view current bugs at `Bugs : Cinder `__ cinder-8.0.0/doc/source/scheduler-weights.rst0000664000567000056710000000021312701406250022342 0ustar jenkinsjenkins00000000000000========================== Cinder Scheduler Weights ========================== .. list-plugins:: cinder.scheduler.weights :detailed: cinder-8.0.0/doc/source/conf.py0000664000567000056710000001724512701406250017476 0ustar jenkinsjenkins00000000000000# cinder documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.cinder_todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', 'stevedore.sphinxext' ] # autodoc generation is a bit aggressive and a nuisance # when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" # in your terminal to disable if not os.getenv('SPHINX_DEBUG'): extensions += ['ext.cinder_autodoc'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # Changing the path so that the Hudson build output contains GA code # and the source docs do not contain the code so local, offline sphinx builds # are "clean." templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'cinder' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from cinder.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['cinder.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/cinder-manage', 'cinder-manage', u'Cloud controller fabric', [u'OpenStack'], 1) ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'cinderdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Cinder.tex', u'Cinder Documentation', u'Anso Labs, LLC', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True cinder-8.0.0/doc/source/database_architecture.rst0000664000567000056710000000032312701406250023224 0ustar jenkinsjenkins00000000000000============================== Cinder Database Architecture ============================== Cinder Database Backends ~~~~~~~~~~~~~~~~~~~~~~~~ .. list-plugins:: cinder.database.migration_backend :detailed: cinder-8.0.0/doc/source/images/0000775000567000056710000000000012701406543017440 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/images/rpc/0000775000567000056710000000000012701406543020224 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/images/rpc/flow1.svg0000664000567000056710000010610212701406250021770 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 cinder-8.0.0/doc/source/images/rpc/state.png0000664000567000056710000011321712701406250022052 0ustar jenkinsjenkins00000000000000PNG  IHDR `JmsRGBgAMA a cHRMz&u0`:pQ< pHYsodIDATx^E]`vDfB[ IAN$$\qAeq]Yv{[շns={zoΩsРAbۦѶ 9snWڟ%S Г!K6nLcB_|} /ұ/9ߏ/5)հ9?>T>?D~ۧYSO @>i}mӇGA!m ۶lm+mnlMClfĶ16ц~F n}]ښ6,Žn&vAEF>li&>ͬ[K[ne }?|L}(sk39?t)Fxy?5l94zXeƴMF[ShtmoM?]3&QF(׍Y=w75#oEWgzq׮kk3<bX~wĘ>V+v5v#)cߎ]~յvوkvW{vW+lg]z%]xrve.(οȱ.Dٹ);*;sΣNy|tg*;ؓNcN(4dh{CC [ـ!{*?x07h(ͱ>PgAzۍH Pֵҫ?uWY}u֛g$X.=]؍`%*kݮZDtTֲMjQҞSyyλAW[ԝu҃5ӓӫ 6Di?knko=iX{>VB/tǾRàvrv0C>tңÏ>^ݢyvv '+;S{ҩܾc'5p{⩧ٞIɧ)[3)ʞy9f-*5Xfs9~.Ɩ?ٚu ΠV)+[3aZ;ͱ l]ʷ>6!l&1ߔ3Glme]ک)isOEソn.Zkt+\ά^N /q@R4T-`{ X*0^wF]u3l,0z,8`nY6 q46F>&00GmHk +lVA9W1.`R%W_ye^@ K'=L:3踓j"=/=8@ `MW ^w 뀕/ll~>₋ o" E m۩gLCV,̈́l@pIL Ӱ?1^d b`A[3vi f۵9×MW5/ tx1x+*@JZxF`]{3p={z1 \Bl{Сt^ ߃x yv G27ߢv>SlO+s [I_q% QyhGծϷk' 淚lٶNr#]o8~]}̏iʶmqh=Z5V'}ッ 8CswPC{-\7$V32] `U@s <6}k9~R~'Z~ʣ $6;ȴ18Fz;[|twn]÷f nM&F0dzǀM6@:Y_v`}5]^3ep籷{.Cs3q˰==ۣr'/eؚb'BV{/6'뇬b kzړ5AkBV{ jS ,ɺ^dK2,@M{a- 7j.gUQു5aukl5p;<^Xt=]Cw@.]7 .`k][ׁ [m^oe֬/TVgD^|޼Neﲭf{;i Z I6muwMϜI}wmx.Z˶lﲽŶ%t^C<W^}fd`;c `OkM@ [GkMk\V+yw;﹏~1{ࡌ[v}RÏ w?^Ox{)nRvݞƃ @X(dNcn875B8o_zT<~C{ O'qs<[!cj"dgÓY+T Ț&dŚ.=(ȶC-ȓ(u@ 0nlüX&tnkCtyvY{ 5ˀ^!f xݟY0*/k E}hcK|豥[lڒe1mK,[qr λtڋiOdٲ'd?DvM<8Ц =ԣiջlouzm5mw mw4}Ǯ > <M8J~eςl6u}]}Xz-c -c ʂm:*4T?<$֭bxa7r^*3w}>L?=8~=$?q2=2qJ0l{x$Jb=2#pax~<`zp/lӃsf0hO-| 9ƜE,.sp=l5lp37cY.dMXnXXd="dֆDzs6l[sf1?f,ȳ5iGk ޸ڰyۤsIA fw3$e ݘ]g^+^t t1\3&)rZtB .p۶;Л|?%pl&MAN${M9 \Ņ)i^-|` Țs6dMldh1?Bքo۳pM64at7jqUetܤsnfN3UxܨTAfhYCrETвb.7h>!e հ}i*ze&ΊQdע|MmҔ|-eiNVDvO|Ӧ@^;Sg /;ߏ_zmk+V]w1ue6;l,/ul]D]N:=xYg:흑[lo6zmյKȷTLҾ=`P2vT mпW^yƎufx&W/&3*xQFbu-,{$ٜf=1r1jY1m6C6>9 h),>{<~,kx;gϟ ~pD^U>۝X<2V-c2V>q̉l0Ze06zu1FdBldl`moVV\iz [ӫ5C札 %yUYBDVM CZlyA lo7B*żnkjOW /eXl*ؾlqO=۱qCCDm"ClyI' # sAϰ7x5<2=‹t!{{ӳ9V\y>|pe1b8Ui5W;޵u|{-]Y{չ=k>V߾uyk#Σl/t"lޚ1Օ=5WkZju\رd¶nC' Є,B],XX"ڳEwa ߣm>BB[ѱ|gT]_@} R{mle(tc"T Of" yn;45wER7|:ldZ17 U Y6+u؅ 0ȚaQa m H6(t5ʞ. [Y <ݠ&t5p|oXhٞ5P[^eضiA-TN}̓imX s/3oE0`9SO?CaVݻ}klT0_~Q1]{LCO]tgؖ=Yg Ul]r֥g;lﱽ ZkN^jeg*{v+I/B۳lO1:{apl5hYh1g. ڛwyG>O/]2{ D&sinKQ,2uoKQkLan wscs͞ac7q7xhg7<^\| »=a{1ǫQTs&kX2b9 jV-fM/Vd]bKhhL}miA6 -PBO ,g؞g{%n~sc9_ag[nm %GyQEv4_{6mr+ijMͳY e-kz;W=.V-Re?+ m{M3+H?\AvMsyknʏ{V92]~:Y"sv@lx71¦uCM%3h/e::m E{c:#ɼp *e=2:osEH^/>`coO66ŚmȚՐfMzgyۈ Zu Z:6XosLf[{mV 9)lu)ŭ`V!*l/mB̼p~]&3W.csXr ]rIYD6CIkS핗~*5`Dx2{P%j>Aאg|[LGvtỐAK$UA7?lŋ@iVz[+zmBV4mی-gK׳-ٜjNK{7ؖ=>7ۘTiXG/rg؞b[ƶw-UD{9Zhj4m"ۄ驫/Ailuv$, Ѕ>ز bjN,¹$'_2q;v=4/St΃ Ychٞ Ai)/Y%ʳ~f؞})[ʞ2Y{ ~HׄZF_23QiTr1 öm;ݓ#M{ll3O+: 4dE+6ۅQ r+]H 駟 s0Y!vMh{zh|bX1ì,l3z^nAKe}[Slٞg{0'wN8/Yؖ-\ػ%݊.iZ6g c =} [^yAk5zBvj0$gu@810C{Y:YfٓF:6|Yc1 ߇i8Cw Zt!{8=]{ҹsw=`˞xȁ-cH/a(/yA+D=Ʃigxι3i԰?mϡ{?^{vֽ|tjB&^Oןv'Mvfo>s)O\,zLa%=AŶ-zK!=Jy{{{:i*S5]0}wЄh4m ی:Ah.|ElKg{rpzvp{znH{zqH;p\~9&vt xq/0=-g{fHzmh "w9lfHSwHS&={Õ4h[ Z@@V-?(*HM7:l7\s<9dIcp:!}q'!cǟOLȿOƚf϶8'mv&ٛՇ`VWo϶p^h gAϲ=ۋ>q>}t/c{mkOϟ϶m1w}hLlSٖaGh[:CyZg1ӣ^-Gye 3y3psryr^χ-{R~_x ~:ۇ}x\e4 c˿܇<"Ǘ7<,* xj~x<}Jә|L2lG!| j!` 4 Nɏ Ex#';٣ݽ`f'|6mJVے*gwkpgóN^KNqQ|GP@ ~\@Ffb52rќlUݍS-P@X!kjj!i.~A Z X_ ڪm]mu@[Bo^5W-AUAs|ўϵ3Q \ˀ-l'M Loئw#Eiǜ|wNŽ)X#]N<:vi񚛉4g=iji|z ε~a]ď;v;2fm=2FvhF&?HJ3f6*v0ZؑCi)lO3=fp± V۴}[ ӏÖ;DSO5-f[t0Z6=t ;jz[iX]*[s~lǦk" lu4v<<@v*%`oOx鰱.5z02sJ:QI k/|2sa2NYg^4@׆nzAۀ] r `7*հl"${q.raruC̺PVV*-U:|5QU tJt<^x | @rb3xwi S6õs.<h_,m/Hed: /Wz c#"~ܵ0]cQ&MM#3'ܓylOҢSؖ=~ҳlϟy0p9˰sW\{om8UJE,gJٞa{Ciibz8 ?kJ9Amc<=]XX3q2onCW#c ]yNY}O I='t&@ a:@f8 Uyz 醂g!j{Y億ϺO/rW'z1ەHuX;' Ggw`F*3`O*:U{xVm2:T;=\G5؝Fw@v4exrhЈ-?ziU*$~BIBXUkyqY1B!9!#UEC8 #Z\$P\X.ٚ>6Ɓ ɅBZBY■lH6aq;[Tls]'5ݠ4f!{P|nV!"y-tY]3@ι+3`<7ܵ||+a$%*:c +y?uTvYv1KE]:vU7rͬX@7TfҢNEL"-DzɋN}'֓腋O='˗H.;^eo\~i[|?ˮ5~+NWV^Οɉv-w?uI vi e.ͼU]T70Eh5l*csձ.ȃ\w57I[*k %߮` bEnb١i:«d=w3M9gq$؞z#o9x;xOA9gU^'oQɾe@ {5[zyWx SPrVϻLwLOf*oN96cKC=ƞ;FO.؀\j[J_˫٫ŖdR9Z\uQ  -W{ _a$VQb{@kʚ{eò=%ʢ̼l[%к^NkBH%lmjO׮kg V&c\8->vC<ݨUne U +JM?^]bG<4~);/[ {-Zߖ>\ڞ⿟i0ǖ,{1\AjǞx֞ۓ|ߵǟ\ΎQ=ŏSמ[n߽mz%zszл3{hl6lf׶mmdzl.?`[l]w@kg6w-6ƍQ J [f^Nla(hN6h5i6'JjrJw;LGHnu(^V>M%}So5 Y~R 'HgoN--mQZQ[Z1Z1].%t\Wnmm]@'3 xz ǟq\tlvtL׶t4QlGv*#:-ճ=bZ \4nh5tV6(rl1.'y[7[0`=\qaS6R.: Hȣy0ާK 0zD7j@esK8ͼɘC^W/?2ICv\HQwVqޮ^,m=6r伮 [7 t3j tc"tѷZ-%0kF T\^Qz_0$9Syiʰ`0LJ! #r٪l' {mCm2sh[amiH1k*%1$ iAVLz&hMFC5#-Wy7ߢLmw xd`¾Pdebery%0ib4Y&G<0(ETu=N]ORaH24501Z΅afZUVGqym*|ԯEEZ^uPhXo9 ^SNixAV^T@/2Kus Tfa ݪеAcaЭpߪN mcj~*ǮzJ{7 7UV8s;@[rP ȣ.cp!Ze gc̹b`hn|]ʋP> .<Q^.<\n[f l/q[U{ ]DG3\</N(垜Ш< 7 ;3V+m=m|2im|ln1GYtA T Rs0 H`&V;t ]zjlW2+Չ(k='U^թǝgO- &P{8՝! Cjo֞)Ak9$#hyq T 2AFӞ͚ݥ)Cq!CuHݠ𲙍L6$U@٧]aa^nQ^nU<\xu!l h&1l ,O.x^ĐhΜ.6;'x.o|3aR W'0CAsI\3l{GI3ܸyܠ2`[a$!e&Wsڦa#jm_$Y ۠[ ] 8=^/8 _ /x|ks=@<BաT iouz$8a?9[2eS d/4X{mDE l"8]^'+ag ;E-YP@q O@!v-DRp^ ,vbpbTрEc4d{m=.dmІA^K\N8+qxYI0W)NV>lޓ[Eky|vٞ^Dce2knJ*,&,l`{$^VNg?hy-y:4Q8|e[f+oðO[J{%ezA {K={a^xQrop~aO/~ֱYɧUpqlRT-.| !C*r s#S^WdrmU*ϲQ^m2s-E (s0&ajy)js2LciV׈q9ӲWs2ma{&}qƋGY+}꾻@߆dn#6_Myw Az='`/6Gm=DC?DT6Er?!}i.c])_}e||y~^Sk+{E&4 o_}1 V0+kvXآy=5nZ$&m  D @$@V˔ DuS/o~~g/>nM_~~vm\y¶w [MHg& *4ak/ J`[% DuSin6lXq,.M4  TNN[lV ܼmi9X±2OFX2(/܎e+|VV4 CUb{M^-(VTPYw0+4lU(@'}}V-ڣ&jq{|=^[ ZaU?jD9ɮs{ݪklZ6#AgTl*ʩ4a|Jzg5ϋ{5- "$Wt\ IuՎI?ykjC9poCZ 괏addX mY2l?kPo68CW^6nM﫴\_K}|f<)Yﱎ Q[އ7.7?xG' #wzbგxU"{#|~J7H.+Ѿ)/uh#_!G;TaOBJ(,0e<Bm_5\Wk7})}cT?%w(bk@۫W,[Lr}~/|:agOH|_B=Os^5y [/|lymf;tlOUL;:v̅|ls>3{@c \*9ʌ8 NfTn?o@Nq|q AB?AOh1^8m=7': Ʀ^جVu<[;2' n3χ۞x"7קW}vo|<տ?:?$j]zMl5hKJJhvN;8>~[N{5g o]FD[{҄U0a-B_}I:ΔpöP؆7ƴ _噃Vվ8v{7A `w{y|Dog^gk3}sοm%NW[ vѡJFqtwxis׮>Ta+m\6+^w`Q AfJ {>tK3W*h4vvX5o>OKҾU=O wqПl b0JpNlu990rgƄBMYO| mپЕ"*sЪ{q4'5՟H~%/PRo^ՃWl#o2C3YaʠOھ~y1 A}yaH&d'i k$_ק , >'*`/@LOr|\s1GؚFvl͂l?grt S>Ysil)tA9R,픬jEjxr?I`۸2-JW+Eڿ^2KIǬ^ǴH{V=Ju~ = lJG㬿Yν{@~kfa{ϫwʺي[r~ 6(M QتNKV3ad҉EрhH Ͽ}E >\V% lEUO# % ۳ϻnjd*MYRgcDU@ZE)sO>ϽYleTc.(, s/8"FC0_٪8TG/יJrщᚬ1[S.Pr>|RBlQK. .L‚l>TF#E 7JG! m sϿek`뀶*5;UN=ͨ<ðzIU -R3RN\25! ҄0F7 ^ti`GvaiǫZO3 իճtrJθv O_!j -ؾel_1_=[3|'W=S4c0rlCAիi%):e%y^4"; [=_ fKռ-lI-9۠y XO3-آIh!ΐ0)LuP&u!j Wul0l8nْ, l{LROJ k׫P*hי"W"'9yMyH}@Z]tEy]Y`BkxpIA&W'0iJN4rӰ=ZԲulEh@4PH /q)Ub$`f<[ #K96bG$ ԮRUn@VR϶vOt>iрh@4P3H ZS _ХW\E.p2l*lE5#tigigрh65lᤂ`9[W%WPad7Yy ez*-рh@4H pR5KS_ p򫮡F\MFY~ZrB y|9 ԾR-;'X- W\u+IWXI+TTP9?y3*{*25/^5+r9T#}AIs  5:lqe'/l׌$W*;p+5VՇUpQu f*'$hUeVY>LC}|_1^[^umSoUK/ jSiܼFun7Mf^h@4  d4l_c؎!Q0˯ wF4F8ڳs[6I/ D` n^op0l΄qcil7Qoq)emRW' ҄ eʄ-4cxo-?RG4  ҂q,W_lk=6Iۘ[ͷJ'+NVh#t=u*`[=L` F+]ٛo lEh@4P4lMP7g5~'~]lųV4  ҂[M| #ڝ|{'jtdum.+^h } [8KQa-~nsw'l2Ih@4PH f7-{莻UVγ!kCwD`<*2WLyC,f=ۄb3ǹ^xqpdvY>鈅y R{)4S5a;ݯlkmֳuNtg믗R6r/;g/+dtr~ Siv5;s8wg;=qX Ԏ҂-[VF?-% ҂~D2?Pŝ˞%l-}[8} VtZqϛ$_&Z$=?;9/@Zx2(qgǟ|iZƷKx>'[9.EI:C> K75E`1;K1C=>E?3_i؈qSV= R4Tjrb/\K'L2ߋxqg[ՁB*F<9Fۭѝ4 a~)+3k#H]4 ȥ҂Iu4=ɷO2S|瞧gÞSO?ͻz^nd hly+¨z^^c# P`-z>̲UjP0KVn b0 IzmV.pgEiJ<=ru ˯°}i/({|mԳB[ -`׻u:Md]c­}>^,l|B/B9b5lqCa/;/V}wla OпeМ߳N6 VGB $8\w|Dii -I}4Kg+啯K/rl*/m(l0a[o֚ #d!vj{ ,Vϫsq~g J4 (P [8+^ZI+^v \7|g+*{y+>\9/1dѕlŝW_{^yuzUkŗ~ئP5[6u9\_'9K b~Kߙ0o^-z7ߤW_ꫜzuM >RW.uf@Z˯ `wxm|^gꫯs4G>C:h@4  ` '+ 3-;[oޡ7پm.HCr  EiN*c;wWl|#-!h@4 @je'2(x|}=~يg+[D@k -~N0d]\d {l5CoJ'+N&}D`7:Nk`` ;tB鄢рh5l9"p\c[vZn5l}}^zm1\g,;c9Ǣ@H rDxuc`Z{^[l2I- * ND}V>Գ*SʴLKRisNsZ0Cu(_R dhĵ#銫M2 V/ nD=[ʱS 麻zwcɪ}u dP" ҂UҥW^MF\x z?#ci^{lW6ʳg3NGW5^@?lSɅOΑh@4`k -[ ٓ6t/f龴3ŝ{07h(7<|gۘD+AVyۼ ɅT.@e5l[@쮬nCOlAP3p͝hճ m|=[tv@yhF4P?4lS~Q ]nK l|-` Ж;͂mB ~[K[JD0 ] [npٺzXb5VU*7Գ˅O4  Ҁm]vܝڏZsBYU!d v]r |i`9рh@4&l[wmOj٦{2׶f¦r&\y"nh@4  ҀmQ4m֣ԪloԢa #7RnQMG^!V: le9~/ϗ(!P Ҁm}=+[ l[p|m+j[DSg+;_94llܜK͋{PV]Nnֺ#vc&ņW3hÄ؊g'޽h@4 HMДiYK,{Zu u}ݤ`ۺ}Z3eζLF2 ҀmiԙԤE{jܢ ߶|nL=[,MְEY{/}@4P4 lIS+ڼXYlآ,B JHzрh@4PO4ls.\GYV VٰEYa4#TЩvP&+]b&UB/6S#2縞|UןQk9ע@*mZD&Md-3adlUe-` !86#UƮ窀k&⏬7['B:h@4 (\ ۿ6nAm҂vjڂaۂ֮ ltZ [O6MiY_Y' NTYf%% D9@:mΞk"mQluA:ޱU^.lXF;Rs+V4 Ha=f{jq9[F6K0$Uq jr{>jEрh l5p aˏ[{6Q=لa`Y!z9hणyE@j MejlEd+29rnE`ɔjV#!_.Hрh056lwܵYgS5 l SPr* 5&lwܥ)sFF(V<[E@}@ڰa @j6/n>5VЈD4a *u RjGdrAs- w n(lZtK  ?n֬ l5hGdrAs- w Ŝx{Ǔ/}@4P4+[H%iDрh5 A4  rmXBE+T$[ηh@4Z$1I4  ' lųh@4  X5 [I(#4*z ҂ ZB֖لʅH4   [[ұKǒ)Z L 8l%P.Bрhi m鯍 RV:[}l{EYG`[E'9@}@` 64]쳕V:^Ѽh~k@`Uw/_4 jVD'рh@4P5 V2LjD@5 q'u/+VPyzN"  lSmYE9V3\\W S-sUt@JJ4 ȵ ><=꾂Y=@7HZ ߀mzs?'ʳU-/g~t\w|јh@4+ lix>ju=X.Ϋ5a!ޫ -?X|[-@i ->21SGl F'򼰪^0d4)l|)G_g+]s[ZZ4_`4úګ䷋w# G zUrg[аߚ4 , A lmبZӂ=ېz^Vyen0*,>/^˨>z7E`;y*I 5gmMF4ڇ;B-`~_TY^mvY`+¾+@öYvFl`A+-рh@4PHzE2[g~YmaG.rE@2 (ljK0fjdleQh@4  Ԓ҄NM[Nw|-!}׸-2(= ,*ߞff@qf)y٪\Zč@!K>6+V/*q6{|}L`w1Xd}I_WۧVE/-}N҂- uLe6˳ؖUwwV:p pAh fw瓴_mq%/}T;&9$A^ps?G5 [7qfÝȱgNueF~8՛'6<皩S*f<s-3f`|Lcz {+t%"XGI:ޣeǞs~;R'H.+ѾQt|mm êx6TJQP6ώ,vCD?cOlTRoVivWL˺u2"y2LC׆|_wȸzq;pV [s!1ޯk{ʥTjt:|ywhqg{ύo6KVjq0%X|l"?AOh2C|zqwG~t~qBMc/9z5!1خa…D^U6,tWo6yrI`u#quVl>?-B?{P'gΚpP,➯6l"B7_p<ϟ7A I6qP{PX-$x|Wzӑ7Qk@l77yQs!ZwV:|\yƟ[{ǿlsaϪG[:iE=[4COx u8Z`ƭk?Ō&`#MW/IE1:7a]s]h5p l:\ X8pƇKמ~kzrrl4(ݹ[g[G`&Z#rSگr%JͳMviFg+}Q9s  ҆mSTs,M=>21`gk / nK I#_(]hS\OF{ij @mֺ5y[x,V`+MSY'рhhFaFމ| 0tQ8E@u4P mV$J D@ ֟]أm<&Z8 lUJ&`1יPr{et- ҂3yq{j޺=b۰QFlQ'h#R|*'O8*|fwR#9 Dta-v%cbu&%4atjZ:1lNa9NSO3sb/1me;߭SuJϯ/C·hf5lhYTTܑێ Z@#W`um{.N&Nɰ-I [-DUQ |%lju~yf;h4*lK:S[bvanmmǰm.;`M2hlU _.IQ 7Po3zO^t֢@:mljѦ+[7jٶ;lד6lL 6oo{ҮE=غ.EԼeHfb=ʹ`E_CA-z!9 [^PFBM[&r[Bdz:\ӌ$&z=[uf]]ƽ_."5}͉5l4l0hQqAԦjuO퇴v [Ynp[IK*m/AD`;m\h*mm?ڸ#v;*6/DM[x#guaUVuFz@:mEfsڡԮ>ԡǁi ۝L³mHjYĦsbE@h -Ngض;GlFB͊ERWK[t>L{]΅ рh4lٖtİa佨C9 ϖjd,^m/j=AI-j!{D@4l[ry.t6Jދa.rnyʣUI-8Ԅ35J' D@m̫jj" @v8mc΍<-Am5|tBрh@4 ȕҁ-'Pl{QkNhQ[J: R# k+RUԹqqDрh 4 l jlũ 83UZ;viA;pm[_=ۄ%j?q r DU@:-t8/r׺*߸a˅Sv]n?܌OL)6I!\1(OtU/"Wh } [kQ҉:ߝ?(v{;7ul9Ϊ'rzDWA7{N *+3N֋S)Յ|UT"Sc,/+ UNKBl=$kGJ/7+m*m*;H]϶\i[T6l:eoզ<ۦVt@d߬^tޠ4P=q (4plt7諯 (׮JW_ Cݹ0ȹs%HWցl nxg[N0&\ k!06km^"N-`M.>ou]:l_u_:oWSS4Pw4 ph9h¤u Js`Qa+ ,׷Zpa(؆_zk0+:4Mra;9WrDjV`W$ r}r´|~_e~6ݎ*))7MlǗ-[/ riVLNNрh } l%Md%/ҦҦkFa}YjzV{AIKDl lųV4  rmXFx2 DVFрh@4c lQzf!>*=RVFiI>K$Y lkk.SH=ۚr1 @6jPF3pL=V/iQƫk|?]?~Lic԰NNh@g+:N-#Z zIņzqUkܼYyJ>:7~)ەz׉&D4 @vzK"{ml#$]܁_U5(^v\nEBν{@5 q֐"zsҶI[D5mO|\lD@i@`+рh@4c ls2rh@4 leD+ D9ր6 ,9.D@i@`+h@4  X5۵MZxem͍d*m- jWJfj^t0@U4w5vVz"rT\KhTA*2 3znd}^ŘzTOUNG:h@4 o l2w3@hCͺS"fپ$b\A~Ks-\P%'r9?U@uma|ڲAxx(计mS`~,{Lrr=E@~k֪&lەBΏрh QԴ"9۳ W딣Ӟgdل5=Ux<^Uap8ir1 DuKy[ǮW ,RRϛc] rgbyb!z2'sрhi `њԋ[#\jA>[  ԤRYԲm'jѦ#tP~Fڦa#jmZąså^t\K>[% DiV`LR RE. R:h@4 _܉ vDрh QܬJ϶~d/[4 l1o ˚JgϝM~_4P5P+B)DMC7rq79rI[ Eрh QҼ5Prڞ]#'{Ue c@Ҍ|$~Ȩ66   lq² T{S!}i} u'ؤoz竾\wf*QUVS7k=_^ml[~46o`; Fǫ줳4scKR7+Cg(  6X ջڟgkכEج FNROC-(4rvY^Y8kP <6hP]zBT"9rn ][&NferƇt#ayy`eҬ$zJR63цZ,zp=+5Oq6E[4 G l-a$nȉV{!9n^/$m~벅v99Couׅ%(~рh HyLYl|X}7ݨnj>fH7ެ@h/mwko~|ml&i5յq IޯkCU\"& U lj#qK^8D H^1NjeF!W!Dmx,Rmr j x5r%ފdl#/ j@`+T DkV`IS[dt$DmG3Ngs%J4 ȕjMZmlӯZXls% \8Dx  lqӯ9HdZV:D|66 rԳue#&g qgEG4  Գ5r Wtt:h@4jo 4^,l?|'^>O.&рh4g~ԳoZ\4  jM[%"9ȳzNY<ۚm^Z4  lS7clcDǐDij `ϒ" Dm䋘8& 5P+ż-lrjdU4   O[leh@4  X5 fۑ^,md4.T4  AY`+Q:h@4 6ǡ(*r1 D~ l2W# D9@fPEsET]F2 D e[.+dGK"fD@vFj^ܞ")۳[ˍt8QPt^Rϓ.-w"㫧zzHG(HΑh57@ed׷Fz-`lV <L}E|U\!1<ެYDί_@2 l} 8!5FsCU `DSA2 DB@Z6cjיZD-tT#4lD MCZF֞m(lM0F̃FbK o7E:pj oDU@)jdaۀiy 5=P1< e/n2<_][J̪t4y\E[y[sAQsտ)`,R-ަ ̭?+'_οh~j o`+.]4 Hm9[m7n #ׇ(рh@4P4*lR[Yрh@4 04*lų59 DH7RH4  ? ]6'V<' h9 Dl{=Zd M2%˦Níi/k?@` tQTAHM>7+?"xIIDQ-@W__짟~#}7g}NoK*6֞mxJ'K0VBрh@4Pl'Mo~W_/f~A3ӆ7ަMЄ ##g#nNs-kIdn a\K/8,а՞͠W׿~'?>~v+o*6"gΖ=[6}4ܖ)tA%SqnʦϜM͊Z> cwE^4 M _@?GB[|H6l5k6oG[![Ocnk{kl6ڎ6nL]P;eiԼUqJX^ ۰zծ+,m-m- Ҁm-hTܙN]=۰3H5ږ4v{ڸy uu5YsS5$ ;rv9mvksd%-#{рh@4P;H-Z9ԦGn73p[6mz Fn((jӾf`Tn%FV%JXoW^;Bvv jSiiV4c\jׇzQlϴyg>{נ=}t1TMXo6O|\lD@h 6/)Y SA ummܺm=`mV8 ݏWټ.lu)U}*gFgF*'Nvv ԼҀmQ4{Bא96}O`?`;`Ra+h P)rj2𸐼JjzL$#Gŭ/na V&⋯BmW#Ԟ*,EtvӀ=b;PK`+'ȱE@5`hС4s#={,ضґ<*ݗm?ۗ6!m~gjVJ'6 D ^pT1{6=䓞˴z@ض։>q9o{ۿ0lysnCg +JS4   ZildGnuH `gHjdrA YF?s$>k 0lXeBʞg֟C>ngbjݶ@@ėDuXi[v@s=F=އzh;ho^ !m#/jԢ=bJjѲ (V@k 0rhc >2P!Fkܴy+u3(BƢV%"Xb lH? #7oۆf=:vߍ6m}N!-ԱG?U>4 BINWU#mey\Q=*5Ҟ@logʕ\gCjf4sY嚶]#;V`+ Ҁmcz9qkQVQ4Tw]ضlۉ`Sx"!Kb\ٸ{^lWwp\Jy&wOshlsc~u6 \r- m]^h76eΜEw8FضnEAEʦNgm. o]wߗ^s&5Nm>? zq߬z=o<<3 $2Qg?O`^' hk 6WaDӊ[[lU|~0Xc>Qya'L %'^,x N\?U#¶mت02^TV_zN #jzea'K@ KӳE d ?IV}+K`+؊ XlC`Il0rg Ƅa´zN1yY= " -I[5՟ߘɎ߿HInXs!Z[>؊h64`5lW?~{U u6{ﭥ7zVzo '~`^ad-`!&߂98ilD+x5$ hhU4믿enQ`}۫ߢW D-W l!dE2K)鷩\MEyv…W_ѯJ~ʻ??>oV.޳{4|س,r`@ki#͛ZƂ-R1 '5~mIǮI-8,Zl B#[GވC.r Dh &#k*M1;;V`+ D4`FN0m:#&yJ;D@gy- r~D@Yت{]TA4.UB"t+)) E (lv \ԵdԳmҬfœ$,рh@4P5P3:{7m]{S=3f-e!xрh@4P-? 5u46lmڼ:@Rn}hԬEn`Ѩ?9@-@۷o_ԩ]{A\p7NA8fy="hiV0u;ٍ:@̧V%[ъD@Ak ʳ=謳΢QFԌVR lCa-[?ރ^RGEqۂn4FD2 Du[as:|ҢE@LR>϶S'iG^р=CUbjզVFрh@4Pа|)“]dAEPni9{0 @h X ,Ѻ='O4 HC?ʁ 4K5 BFO;>oGѰ!*.XJxm?D>C:h@4  4l0@gmrWku ܣi G{p|m9.鸢рh@44l,X X6g(4xRR(MDgA4  4lÀ1 K[#qБh톑ų^D׀mLO[@jѠjUN"w-F΍hf4a hVbHm}|HOxO!?'MƯ׷v> e%tP[qǂSݢ55-jM6iBxnܴ +XӢV 1w?|F+zj31iрh@4 5p ']h?q:k=a*EC .L=y h4k|YmͪLg[~^?Sm}ߋp ~Y*a1cONϚl cQV1f*2M>L6&O irMl ?cL#M16aTMB {N&NGblWe֦Sy]v|Go. ԃ+u]5o[BmV{-JÆV9~Mq~--=num:Ryq{j֎ִU[&-ېi[PZTLҼ5EZm;5mIq&-ȴ4.8qTYafg޹)USmgg~tyyњҷJoA`]‚|<ߒ~P?N}/%7A}%f?I'*zX5mav? OP_ҿP4[tljܤ)APYou  ;T R`m:u15^t>6@M43fBV54.J mٶ k Y-r ZZynħ6=۫\CՃ-h-{ϏΧÎ:+" U@ k@"̫}!3 TM]tJN`0h0]\,\b*Tر$&" @ąɹmXXW6N0:3imTYآ/ա\Ml Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 cinder-8.0.0/doc/source/images/rpc/rabt.svg0000664000567000056710000010200712701406250021670 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) cinder-8.0.0/doc/source/images/rpc/flow2.png0000664000567000056710000007367212701406250021775 0ustar jenkinsjenkins00000000000000PNG  IHDRWcsRGBgAMA a cHRMz&u0`:pQ< pHYs&?w#IDATx^ $E!CfoA@<("Ȣ0(=,.l31 #8 ( "*SdVFUeUETu]y9OƲj|PPPPPPPPPPPPPPPPPPPPPPPP`p ,v۩fϞ}Zkuc6 `fͺqbbҩ}+(((+ h=rbo1cpЋyq'.3o64l ?8 3>?%l-~~|xu{Ofzq(((c y|!/`eClF;X+ˆX, dfMܿWF c60~6pg*}xkfdۢQp@@@U9*oxv m݀GY:wqΜq-m3@@@3g/y٢ݿx^lmn)6xӥ8 h*~]ț?n<;6 ܳb-|ێfK] d̙kj_+v>_z /3w]6EV!T2o[[_7wW\3MiPJa^[džM45C`["MNmFܒݖoo]hGIPP 3"?&pU2^U̞=mHHADǵ)Ǖ|%l`«ޓ@.(0j 8@6 `M;P/`epD`q1@+6帒 `@u`q1@$`q8l`6p+[bK.B_>eWDw6K<;Ц`؁>r\ P Xe5b{>Q۬Yw޹6;Xtݍ6ڨX*n`ATz7lT7޸2lb酗<;AvM; } @ Š[3޶N*N9"+V&9sԦS#8XhQTZ%dMe˖Ew/tg}.Dh!f# =>D(xCPIͰOWl5 =>D(,3(:mk,a"gm"o@Woh!&l(Xvn`d*SNCB @& G |;X6fqP`XvPOA-XW0|l(XvZϞ`+5ڳE |i^m`siTΑأYRn!.ndV}_p@w0ox Ālˠ @ 0o9~vizɔbw(M`;NA>~i#y Mj`1j (Q?tC1 Q"# {{[+[AW`, _σ*|~jš{rקl,pѯ|-X6نp8{=-|t7cGTZ}ݏC0T!v8ݰ*HP<,uX #rm 0Hi*ES} VitLii=,<]y8*$}g6ƶX4}.Xi9vݔS|'6nU;@`w4zv  6]lٸ:`:;zQ) Kl0u+.UX%iS6_{hR3,X`'EءLg,s[ه5xVj ֑lj4UZyO|譥O-i >5ԒO VJE`>Nv@!r]/G寫y]\+SVOՌ,6DN,XF 2gDOڎcT 5gt`l`V`ʩ}3U.#f*^XEPe~MGbYlp/KZ >`dׇK< [_:]" l]\w;gyYl1yl)lhXucRk)  *{3ܾmU* .mvetF`#}#P  o,60@`a1ljeC4f-a @gm0|Ɇy_Y__Ua=XϏ8relv8߉-i މ(#ƪO]NX`bor;Ϻ*VXQ/̙SN 8bѢEQihM6)-[^u zCGߨ}*)\ݡGXk?y+vgT+Cl6/n[~<#?O[m3=度iS66v"O}vD`31 ;>u=;`؁l҆d6&NYD`g}WݾG֛= y}F.^Hw^*kƆz^5 9`}ē Mz+ !ύ!{ݒ73fhu=jm 1֡`N1u뢬ZNb\ډޯ둼Z7Thk]8QH$j `OVTl5~Xv /)nA \\sbM7?ˮX 2uescu[cWv*j==-|* ^}i+ՑM܂{s&ٲ`pb= XtMSN)f͚:xU' `XV, `Zu w/mU6JuȡizI|_]wÖS?:`&s-?ox+_9 4-GpL, ڄwv{n: ejaw(惮޹уtLinU_oDQ^_5-?-|[9²*d xfLquQl,.LaVVk;裋}{ŏ~MKt(:k;W\}]_LO lmY9&?un7Si xRZ㣎YJ ,|yj1;m:_u;SkJcmʧ!6>.2/7MDqmʍ\@FN̄]xw,?qvy9ldzK dI^u,H{&lD@A`؝X(4X] X]5A _@qܪ,~&Uy4 ;@PXo C w]3(~t?(N8UǪ;φn`XV)0[ 9}cfl_R׺[:=_wN5 8y= k9{a$?y[|[-Y5. -HK`ս7˲U.U9eY䶮 q-:\ Gݟ`XVAjۮ?_owvֳv߳[f~#i, ^6\VﶺI..mX$O!]+['[ [x;v$;IKZ~F\6`]x֬Pjos9f31,;j-iW2:;16&Mc,݊V۹#Jg+t g6ەIw[w>vu`;4l=j-r _ַg۴ϛ鷿t, ¶,Vhg ~Վ2q:ơ3[uuMOs4I6ѓ/6 qjIOwQp~s;NvVBu Ewu #29t v ^9`#ql{yu5,N??6Nôjqn*_~%}tܺ+?c!6Dޮue~[K^奁mMR3q`;؛okE.6+z;W}g:G, {ۺ6wtϷa=ٓ8 b|"1cF.vdIe][@VbӪ}86ŕFF&J`bwܾ/ ܟԧ7߼ws[v gS`o[o-.]W{zhh㪼ہ>;5fW݌>w:*>6t}0n~~T|h97Yy>bw.MnZ;׿6Eciv)lT&1nxcxWF{+/8*]e^9,ԁ`h`HiIoV݄} {&8뮻Z׼5nşgGyd_z.myyqt`_ [#f]M>[B\fH&%wp1wpM:Iu`6*@m6p0vHť`08䠃M]zqM&&JV3^[<.絢oiO<5.VewM9/n'ظT,+SB<*k]czΚv݇u}v `È߭8URe)Yzأr9:sTj{^Z?я[ne񖷼noۡlńoA,ASN| mM`؞V]dqf7Ty1]ݬdW`4~bw,ji:E`؎i'vbm`⁑Qn|[Zu+V(4_l%\2pXmFm`7Z1q`v~;wSq*]jbNj徏w1Vi3 ,>ZV3 c`-??}<i2gIfoWn۟r-a⧻gf`w~_nyI4ֺ[V{}?я ->/_PT>C5\Ӛ-,5՚_-|H`q¿wSC>lt3}KMȿk~gdK'm#n}®7tf# ft/v[-zscƦfaB(5X,˷*mlo`WU/V3^tgr$C'>Qlu!=_M^xÑG7Z\V]xӟ]vYV[{xxG_mV|K_j;ëM` 6on2`iy`Ѥˇ-O E[ [@:}mKj4$t7usbx zEakF9uM'`<a-[FcXΗ&)m=Z:lc^׬FH?m s=7`?ϊw-G>҂^7{ō7ؚL (E~tS}AD`}o+v;mfޝ-?uX]}n1񲚨hǝvx٘u`ΟJ1B:V5+sY8d}v!>؊r!Ů/fʕ+[}kl"cx=}_p*?|i]vݭ}㗶)ʩ:"׹گs۵_O۵_ ԔʢoS:uR2~,04- k`6b|MdZ |/6hbxn:[r@;Z6 `52:6LGfwON27*vi? MOj!7Dl}ۏ WL;QA F-iw4 @:fI+[WS׳4VVQY<5+_K n{ylt; FKE¾&=rɋݶ,mE'fo3`:viy:hV_vQIEF{ꢐݼC2wⷯ=-;8̍ύu[o-^W/]sU^VVbeV}of ifWu[3+2f{!ROG>kp((RTZT p-@Wf8.BoOgye SNFlT$QbwUGqr>9+M|x57UvBO}W<د녺oS,,;lE{f3gƝF`Ս8t" .lu"<#ٍMu'7`Bܐ558֚+/d ^ Bu{EOSVSN9xH O(:vZ˖-ku+^hQUXU:E^SX?~KwQHgPhm|G7?ܰp6lsvFlT$D6 t@OL|;GL$, d.jfb-7)JNU׬XM5]-RuוºunVd_6m/6[o?>;;wnZU)f~>ǭ 1U&cy i5]ֈ tnl8fVleyB>jRzXur:ֆ>sXո;Vڰe0)ϘtLdlWI?mMk9_\wKtI'x`5{Y)Q*žzVf$ֶ/(]}n<7tbDhS2k[+8 x ǜ*u-MdyҔN_WQ\g]O׳Q_>JdTMR nhHԲ9ro[D '|r曯 o^Ʀn]sܶk 2<lOy`&Nr`vmw `nbqد~n@ws7Io|cq}Cnp X߾Jt6VE[n&J6R+-IcqT;G"oݷ2/cݖ_x-) 2:ͷgfVa7Q ֻzm.== UV?(x`uYlf42`nߟ uS Xt~O8a.7u\<\o\lRnO&UeA0Vp&4QS]v#A㨕l~cٸ~\'25 ZgncnW]y{c]S\W`Ea7sT= 4r a57֕=5=96S+2NX=~Գ;I .ݒRU`$V0qT9yw6??:BtڧS0ky:}]YMj]>lpۉS^+_Jk)+~-=(~mwn|yrJCه)uh5~]_?k6lzl DIiTR'lt; FKEBWXBlQXQi`} yj_Z 뀳 >R^{)Y`m?]ϋ\4nWp@kb[dϫ9ն&X[sun8[ Q>Ѿ- k8'IPl"6X &(*Hֵp AuÞA6|بνVպoU]vokp*nU Vwviuk_Z+n,PPWU Ѯ%ڼ$Dj+dQbJԠ]Vk]` f{6;1لu`5A&qRv1JǻtT5Qԗ_k'ԧ*elu댓-l=?;J^6*`Җu 2,4е_E`mlٌ51P9k]٦b +n//|akYR?<ȖEQ_ᆱj&6ؠַ؛nmw AZ+Z{7^{m/}>WlF;HG]{Zcy]UohF|;M(P$Zw`fIlV&`R:jZ:LON4udPY-}Cn=\w`EJ7qcZ"׺I5לؽ]w^qnҪf0~'":GalK` LTomm&G`󵁑XE誺m&`עekhׅ؟MX{[ɚ`zs=*vw^zn?U]˺ھ<;ZGV3 ˶Tv酗T.ޠQ'|;M(kYl,Yv=m׭^"^Lbtl0[;TMU `b+UXiok.g} `5V٪m]Y@+zЋ^RVv1c8mW\}ݦz)l49 4 vr> ;y>~,"\=8j&}[u.Nq9裋]mXgDQU۠Wo^w->ymvs6s95|;M(kB8.J=l}[dAt>v[kVagQ?kt^wZ=uZ_lٲnÚIsλ4ŽM7%;Qxvp7WB))aS`؂lO(9^wuŖ믿J`8@aZ{v밺 enmM'yҾ7߼&W׿^̝95{~vk__r1Tw5Io}G݂rb"\NFYWO6>åNMBVllPޙZL t\7uccWz3ֈdKW@VݎMTvL{5״R\v~g5ɷ*`ϟl"uC9>4WݹM/^?);U|A]Sk]WnV@V/| {\p \WGث;j7 lZ1ȺkѮ- k`D|'`Bzg `o𗽬x/1~Ӌ\%X8)I&\vL{UW~{ M>Yp-6`"! 1 ;.`iMZFon*馛9._ܚj;5i-^vemӆy`Fl_ZݘΗֹ6ڨ5C0˺ASN| vhHblciG-e%aݠz;\xqq׷V2}+635U*iVmt\u39G AgU>+U w +l:uhע pW;-[ &}BFmCyM %m\D`_Yubu7I@v]7~Oo~X`떷{k2'U>E{]K`nD`Qqnɛ9s>N*e={[ D*2ΜnY^O?[O84/u?uMU/:4 >c\g`~O,t奁mMB0۝3K 4vaf![5<Κ՚xG|}nkwL `?϶Ƶv1nA`_M'yl .q>m lMQœ6U%o = `\N`sW]uUkv[^gZ `/B0/~>Gz6 2+<.J Mnʌն9e y.!S Fba:1c,[G]h X7qYkPsNum^}]mx`q콐7`N67xwxXY87am'a4[Mx FA>[o}&umOkNjcER;ݎ=BKLklq. cy4lQ\!Tk&rР튫+ g>Ph9q(*pXZ|WFoZvivQ]`t,5 ,&pZ؄+'â@;XM̤H⨣j-6E]T\q_nZgT0`X6C!"' ߼kEqWn*nFڟsF-6@%ՏRPu|`${Vk= 1C:3U,;Gw=*jYV阺+w?VaEzu[´lOB WNE`dN4jN;q{_~yk[ti{NfK/${L͙dyXkHYUn~b~}JouUzKc)w/`|Q*o,ls_Fmb#"߀O|-/~X'!؄+'âl6wy&`Rbm/vK̝;zYZ)7ڔ` |rlتqTE[ `n:n`X6`Um V0 fړa}' :0v, &dx۴ H ƥPFO`kK^V;m.X;[3'SPP`krlFS鸓Fb;GFnn;.^]uםUX6__̙36qE'&˖-۽SuZ?g4Nfp۩lm5XـN͝7w{{'C?v|p_4>˝``ky׊kmQo^xI60RDKx\qK_(S"1}ф^Ko`pnPAV GyvAk9. r)lQ[\yُ&J> ҅XA} ]k`O]sT\'| ,׺wڅ`8Ku $N` j0#@V [#$cepgt^x*?k)NAVٹo}ؾ `>Uÿ0ϪkuU~G>ƏVՃPVŮ>l&`~`ؑmHnևW]u /8GfIoM&dzk-2}n TemP; /mU`Ď۠ EkX#mZٷq5*.ku҅Xl۷ozƿײzu5xViUvl~`5  d +m/k]15 NP +bM*  2 |4ТzR??5>)x;Tf+lS:ǠJe+Ҕu!6k*]̵bGAu>CmοgrڏUꃿiaew; CgI6g*-Pr>XFw&zԤ|mIaQCȮe=бAaYuOv_M i 2gG=IOŽiT`k]2'aΠz;ԢeN5дb}K;ԶVUCiPH(E VW~i`)q>{`17y}ز/|t;~({MُC˗.kFjPaztY]~|,M>r/ ZwRWo3`bB`AߘAY;X ZUECMFۢcPU^NʉkʁPՏd???mwV^Н-(, 8$XlDmMLv=n,nXo`^?vZ U >M}*'a4[6 E-c%"a~4vzviҮZN" Eu~8 l *Ch?xCbC?~@<o?l?`ӪK&x^#@ ?.Fp.l\8~ +ˢ\-I_m4c[5 %5'F:f~:'x`˟:2wu{X{&h#P$v*1[`XcѶ^#l6&,_hCX4~7ܘ?35e͵? v_VkUVUׅαz`.V^~7eHit`؄|Q)J_"6PSIMh?HcN"zYuQ׻.ģ`~ZW? ҇il=аQjf1,kFlLmx{W~LVk5y9az+[/@?tB` (E; t`gRu@~-nz s{z6Yc˺c`;pO#ilT$PQ:"XcT4L>X6m&Ig `.g^1#c7=| g' ׉gج{ {Όq q:*N]e2rf]H3RX6y&U `Vl΁Nz =xgY)jȩ]STf^YI:̓W`b`ؑ&R>܍Clb~s+ 10w 3:ͨ2(* WA6\k` |Aq|^>lۋwnz~wL o6p j36(0dXc<.q8RC]`5qCn.l$,{0R Y2dCy @ pZ3ywu4R,Q[|H[.-ǁv6mҿ 6;suBJ(hk*}MS~GP̎#_H.^XlYԶ_|qmڳ:M~^{V7p3ΈJީQ:-Oq&>{כ=|ZZX)56 H)0{9w֥su|Ӡ n酗g[m>s7 oYN~…SJ哷*vQ7|tYW\}]6",P<`.6n֦Ȭ"8:ǠN\EJu҇cG혎oWޚ!tct]+gg]}6&Tcڴ . я QP(8 paw[1HU_ȭR4-s$ʣtSl׷%G4lYذˮҰ۱`1P*EU}ؕ"ԇ)lyX޺2PZֆݬ꒺l`%߄]AhP]Ɯ[ !$[ Sӵ+U?6 p elt loB6ߺ +>~2g #uk -16 btk?ln'm#֋z3尽]g`|]%Wt>Nq* g!dʢÏj Nu S12 %+SԺmCz ]`]&vެYn9s iPu||o};w-a&qH>eXyjMo$Q~e'n~7L!iL??S31TY{yfMh=?{beh  GZ+-;،+/k\""-'3)1GflE;met옮<:gxGX~2:ue}O)_Rs k7ۏ@gGyT}6U6FgѩI ,s&(M4%RS57XBiCgvX6U&ӧrp^t `5X{Lo'6Q`<6ɰh,J0Y8u 짳ډvkAup|կInQ fe}vnLԢ3*Iܰ;-{T6]콡rW/ ܦ )hdz fQ`A6ɰh,;AV`Թq퇓XgSe*sqֺǺ]m4'Ͷ=sַƆzaxn\DMΦ6a^mMe`݆6cߧ6Bg#ȹ+&\l•aXvHK?:ol8qA'󗪒f7F-YgYb7}ʹU+ʆTMtoTpv #ȹ+&\l•aXESsG<6X89} d2-k" #Uaڰ|׆ȾVm06C"@`oO=Ɂ, @lecam~7\ְpWl8N $Y}r 4 C`gP؄MMr2, /}:w6 -Yڃut* ' `OD`Iʺ{? fqPdl)w'8l 2:u<:#}$q>+E^Vllb#wjkα ,kYT\U?) ֵíRqf@3 ;p/kEz%K k@je4ialB y~fkww6ͱZ~~gX:P5.3lxzd*{^tj@l֡GR`>K 4elĵ^UKTuNUxS?i:N֕]^M^M`.3|PW^}ΟG`Xl 1nƃZW9MA `)6Z*(b"(0& , ZiFiU_k.F{QlT$`@), `@6F;YlT$`@)p*2<%¹)6 h5 3!f(0> 8rt. - QPPS=(Nl`X6FlT$D@@qo {u5l -`] 6Z* 6 h(3\яϷT.iE/&Q6 m=- Q _滢a(Р,r2e~lnhH*[wեNMBV@;@i*Y/ 96-gm-+&\yl•aX5 dΎ7em#ȹ+&\l•aX`lh ="l5&\9 `q\v\^BC4 ="l5&\9 `Xm#ȹ+&\l•aXǵa5' nlE]6` , `@6fQ`A6ɰh,ڰJa6F{D)I`6ɰh, 6 4llGtK9/:5 QZ6aC~Ghy)8 ;G(?Olw`.3|PW^3?ُ, 6 4ll#FKEXLDqmq%z{ 0w`(6Z* `Xla`,6Z* X )0s:޶nܣ(H 6 tclwFKEr'P PF''sl(6}`"! x 86 `MbRPPXLe>bRPPX>8MEpȇh 6 .- Q _&\ȷT.:86 flt[FKEBWz 4LyIl _`h6Z*@ %OX6_G'lnhH*[w)\)Slh5 &l"l•aъ ܗ":86 fltS FKE㓨lkة`qSs)6 kl[tK9#:5 QZ6a` ,R:6 4llE]6` 8 ;DQw]S6fQ`A6ɰh, 6 4llE]6` 8 ;MEpȇh 6 zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎ+Q|fuה zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎkS! klG4/:% Ql&\9 `Xmr)A^`{UOUA|P XǵaǕYQ3ꎺkhe b(N`Xl6`"aD`1Xǵaǵ 6ڋ`"! H`Xl6`"! H1f\ۖߍװG)uGa]RF}(c۽v6 jlFKEB@@`qqlhh(((e.6 h(((qm*C>D|mv1hH*0ኾ4SrHS(Sw6 m=- Q _滢a(Р,pj0&|mnhH*[wD|mvhHb(g3gm9lmvhHb(R`S7Al.hHX TB@QN<$6 - QPPSQɧl 5`] 6Z* 8ԜF`@ 6FlT$D@@el`lFKEBW W->%G4 1Q6;F(]AhPv4FR6`"! oQ`qzSpz)v >- Q _|.ŒbQ&v4FR6%`"aVL*2-w`qzSpz)v 4- kXOR iK`j zlvEKEB6_`K,ч%4Rp) ]4 M؄+'â, b6а zD9w؄kMr2, 6^ >`3(r pdX4`Xl6C"p  WNE`X׆T"@h$60<`3(r pdX4`Xl6C"p  WNE`X׆W^Ëz=ڧblE]6` , `@6F{D)I`6ɰh,ڰJrglG¥ozUU>?mnk6_e*`Xla`=a]W巶_9^MF[ SJt6^Qga=E3yw5^ ѮN}YK/C5}((0: spF@@HRzhglM=B@zJvW60j6 ((0,Nw6ω"B\#?XQsl PbP *Zt=lQv yrk\ P`$`qǴ~il`x6&Ѵm\P= ˂4>v r@UVSq{!og%#o\Š|reg`d=6 # ݊~j+eiTlT `OFouG6|9h>֥€X?_kܣmRz][ūAH%+zhgl*!U]TS;Ǣa[j+ >762,)_23.4J wa _ayv^av{q spFرs-kA,ł$,jc>~1x<}P1cǮaU2E t=p/{X(塏;Em-=O *@`lMo,j(U~da@kUBǂ/CJgk t ?+Շb[&M+)ڋ@5XZʻW;eׂؕ2\nVtp̬e%%;M7K$@is |~tR Ee_0M@^CVŁ5;F}xZ^f蛆KD3ކ<-~WfS"6Gvx6•QP9z8h `fl3msRգ/ z*Wt{6uXa6FL " @[XQsl ct(Yz >ۯway>`T93LKul͓G)fMLu8|sl`Tl)A@@9s؇pGŁ>el`x6S"@@5?%Çh `b8H>((((7pЋl yZkpZ+2F@@@[oJ8|60*6pu?eE@&ݭ a6н lrB[@@Q@ XUwª@ M:R`a7C;lgm݅!P0A_t '?Y>>9&+[]rBVT־cؿ?]4?JcW3ohƬYg{@l̘1M Kdռl*(چލ=t(eЧ_d^,UWS1u_9:麖Fy?je4q5&_5 v{p޺s ݰl`\m@_gF-'[}nq*~oY ܉Q4Y) '?xE#vUH~A_re+*i #5:a7 `@6:qoC&&TO@h`%(0{Ծگsѷ҄ gst;u$P!K^,UXh'k%å `>zX(=9ݩY)p-}bl>rE3gu5fq r (h60>/zr@>o|_Wi /-;ς;~9]ߘon `~ G=P;׺ۋ?K#g6k֬/Bt@34qq*'dO@P)Pϕ#*>\Tn:CW Au!u` EqrĸWxœrk'քQr {VE&gVrW/u2<^~9{DRbi]oAu/Gk;"?ֺI H ÞN;v!w l wa޼ ַ f>5ȞXom `AZ(Pu~[7a_pv'u$Y{u=<&FS`bb6b.U@,clso>G8g-j"h+R >6螀6/ZY4պ6QAm,-fX$@(, tjz0zUVČՃ]"GSmgϞ}Q#6 `g[̚xx?k4p.{*Njd0:[ClKP镶]/&ݧV\6l?z䍍-ln6Ef=s:akq^ D +GFva@.6pGO͞Vrkۄ}(i'`0,CO ,rփ&<ձ0O|529ul FO͝q+PS(ؼl79y+pZ᭷vMǝTh{;=`3V6 `y#^{[{u~"wxϻ٢S~3zukw=SwͲj_y(( * :-vߓlh `@v6`%SuG2zϞMS6LWA5PPPPPFu޴~WON.a&/k=? G3=AJsf2|PPPPPP` N­Bi=miXtLZBB+L^`*AYݢ~(n;qjoaܮAꤓ:V9PPPPPd Mĺ{X^Gi5T ([*u<:VIaewv F5A@@@@h\XEa=40w/~RFR ؘA,oyZDW=2Vb(((((zeQH[UUs= NFi@Ri>eZ:++#ۃqr* @^ vq:WA}WMQ(F+#k{:Yqm:f+ʤtK*ZG]ں!D|UNXi2_YX=>Wi.~:LU#0QPPPPW `}5ˢ|u  ,iװ|>U3UZ2{SY>v~̤Nu?Wj`rZ^~ުVki.u _QPPPPPT*U,<c?!G] ȧcS+'^VkyYe,Y>e[$Q_O!l[}?B\hGM&&E@@@@@U0@3X#ief@,i08a@Vآ&V)ӲX,[qP76.jݮ1 t@ZӪe윲n"M>`}.ezuQ=\~t8N].' ehv5u1FԕE"{ʫ@cN@[ǫ`} jϱ"޷p|o: q(((((+PUi죁:+P궚 J8+Ֆ rݏEЯ: ӖMskPPPPPPU[5VҺѵnWuޕ]4xentK;QU7]vV_X+lB\C@@@@@(PXժbl/j5*j]ҲTU*w6҄P\UUePz®nY]lvi/Fa];yY]@PPPPPLv[Yl7pXc(\ZzKmUe͇*ѪL++ge5mMTf@Դ@418!DZM[іQ xPPPPPPQ.d>뎪og0vXAct MYwU,bە߇[rVu:Oԅ״y*+}+:gj̮3YQ5u:_*CcUzv6tW:+WF:ڪ4`UnHxkn((((((*Gہa\((((((c@UW1E@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pߎő>7IENDB`cinder-8.0.0/doc/source/images/rpc/flow1.png0000664000567000056710000012002612701406250021756 0ustar jenkinsjenkins00000000000000PNG  IHDRYiásRGBgAMA a cHRMz&u0`:pQ< pHYs&?IDATx^ ?pEPWJHb"nqGçDTB *"ʧĨ QED%cHL&1Im=3=<̽oWw_SU P*@T P*@T P*@T P*@T P*@T P*@T g}N۷oFm3Q̶xn+[)G!{84T P* mm}馛}|?1r;Z5e @v7Q^=ˇy7h555QCe\< *@PdžnD1WKW>  <6o~;wf 7VzMxT P*@@eP-|166P zٹ|Դv5T P*@@;v\cF Zllllh+WG;v9ϝ ύ P*@Ȏ=6d?~`6666.ḩb鮭1;kJT MMѥ-ו!llllͷM4bf |T dCF6Gh6666.]RU&]Ґ-gWebӿoZ8+C[m:.6666 r;O|VUOl ^,2fMNj0NȓC1X.&7S| _ ŹeI&fT P*Z'4<h}Fw33~*+@e T P*V7^8e N"iRmQ`.yT T@۶>XtE$FKoY=@lllnnMuhYLړrdTfW?}4m0 L9[mFKjXT P*@,W @dmQ=aKT P hhM'mmQ=]u.%?T $@> CғњGT `llllDlϮ=OQ1eOZK.grDe*@@h磾O`Nx# jyE!F"ˠT  `u ( kdlvEl F@> `"md ,V8z`x4*@@F =2DyxjX^ u{(A@ 6666i :u9N9 Ϩ^DQ6;eP*P4r)DѱKߵ[=աCL뮻s#)_9/QF5L= N/`25휅8 %YW SF$}Pkk/]w\>ǏWs{E]䔿K.jΜ9Ny0uSܦ͜g5֩'` N 3Q*@Z+)Pf>B@K&+ ` X> U(@e66H ` ^lp)JoXa&&I,6,` l޻5uVmm 6@%ֹK |`vW>T dBD פ==<lkXl&z*I%`x$*@r(@"mKQũ`Ū\zj~΢k>;>v!euh%&G lb}|/-_#a(@"mKc'Z9`[n%!X Ib/e=8B)r8hօa>C9U&_Y"GM{QxoXlnXl;ĄG+)i3g{ `3FG\_LwNG9KYiC u A CJ,oll,Pk`y[=a/+y&b?nf7JVs`zX,(˶g/׏KB`c!OSS oL}77*E9/P AN{@lj/ +R᥽aONNXl̨O)2Eaq)@KYK7y x Xv(edzYhzQj^as"'Ӌm⽕j6{" `pX*@GD׎e$K8'B&$N,!L4<#9ݳiEVU"*ٸSz# ɉ‰qd\ d ` 97pl,@ R:N{B'b+ rK[!¶E(|}ӌ~YS T " m/ 66H `F&²(1+[ <6USF` Kا=\XP: .ix^,>TV!mvnցrecK Ј1kk7m#*eZQ&ʐ|?VQlJ#̱ $0Ɉ NW>y|',[x/ {Y7yX7 4'X*W9A@,ʇAl/^&pF{PyL$ vɉ`vǝv"U_W>װ?`5g&&2[[ "l(pP~0><sHX,; vQCd=@Գ>I! !NJLµ|靧1B\h.8[Bm20dX.~/VC&sL@_^?")OnMuܹsziV%c5l Xly`^X+}b?٬+Kk/_'Qs6b{YbЉ2n:_f5Gƽ^BlN |f9~^W_Z.L 0H%zt.%p}?|RZ`4hP)OϭVSΈ^Y̋PO,Ao"jVg_y]_{냊 Wypd?˟^K^foCHfyvR/y>\]|W(,-7So}OX)C`cMyx] JPڵkWuM7zl{U޽K eYVWГK%֧e,<GVB!M@yڰ}k2-+Ʒ*;h&c.s\,ݲu+6N!,:KSz_:tu/{7Ϩ,k3:6g!<6 J^Q `voB_%E߉Gɱfn VR|ئ7ooг> S]` -mn=hQ8W,#کS'u 7իWGLݾ8~5$IL]u&` 9E?,Ҍv9cccc`mlgYs|,l[ظDf]}!`kj@ݙ@Xޠ8#F7xC^5Gj|숑VYr,Ko2E4+<ƕ:GOP9Sc3ks=!K؄*!:ֆ ^Wm~{Yj˱#fh8fxxe^!,:d,s;ҋ/=Xc|lguf jKKZyd`Ye#|a2k^ g dR&.Z֞1uAqƈVhpFaÖ EA5 ` Q.eCYW6lO'xBׯ@` oT;w.,r|l4,_NMMMS'L㐠8`j6o|7~CB6M<[>Kyr2eNuvkypw&e(?ͺ0-028i9,h}G1z~59c&o=8;Z $` X,Ee <럸\uH6QQ9@Lo@10ȋQvC2E7)PjzPy emB|^f} ,Va"(,G?zմin<@Ϫ?uMX?~SuMx,˞8J0 pg{FB|@s9$m(6Ӌ[ 4weJXt%- 8f5!=>SG/H\pBˠ6c)t^NH]pZh0aѣ6lZjU]!=sLէOj)Y, ` 6a_`ˁOMϪYF_ޠyoxXMFJ%vvi'/aaE(gl_ji9ۗS%وfmƎ>裺qƵ{ҩUz\QD%`s֓t6nJքz,f*L!)komӠR]v)o7=ǥL S0zY_ kN%X?)i{+;6'NrYM(A:czV2i V~McA~G{,pPE<&@+chM>+p6]l6M+B,8_Bul^}ݞVZt:=k_׺_޹Ոd٢l{k`c ?|?8qBh&7L m*Ui}>W{fcmw rqb@(<+~V&C۱ iXl-RvK{G=?\}]۶׿ZmV\,XShNX l~(seHHr%؉[US$Ne#ٳgP_)Mjv*+xq;?\T|I]ɠDXg紟 6fVZGuO\+C3^a3Vj>GGqSj-(XLd{LBًLdzq,n֘{Kl{'ڴU+Z1;0v[O<PI,E6fgHu FDѮ(^BӜI[/azZ,,$N2iI1&b(Ij'OP͜;B"c`1h=uu oxٳ:KCM:U}駉>LV?O?.Rl (le=f[Y2 Nv{>)[`3i륇x4.vj,.e`ᕑL)3 與՜^F.Ky8n~XoȌwy%胔rP1Ly*q#ۥI]r%&q1B,X,f&~ bR(f ?k^Ń>H `+[l}bAUk5x| )clSk6h S4@Sk ԬԼhaRX3,Nt CSR͚+[miُ[sT@dW]oT2lX`1 6mxa]ӏ5vpCue)BZz>}ݻ:՛oQ%_y['pLM'zȞQ=.oGNGc+@@v4b,jf?26.2`Y6PȎ! p^s5j 6 v9 ;n8oJX?O>PW]uٳ3fn,B}MqG^x {I 7<Փ|؉nYróJasGʳ}69?֩&:L `M`v\xmLM,Ʊ9 8R+m'zrFS%mYaEK#;!wɘ+Iخ`-ZcȧB|g-,oHou6 @f><.k{c]|X'S$39* `fyϣv"9x6lvI9! p9?mzBWP R gV:&w}j;=&MZ}քZl^HJ]ClcKmaQB<^rG km7l!xTk{zD0Dy_]#bRW=.5%wXFtjTsϩoQ=%,}: /r `}% 7:l0Ld/\O>K%BI7ݦ `ǃYW h.Ab;|E8&62W`su9~2mSH\xM]#t11BI{uPA,p}ꩧu1qSW\qE}#lP t uh:I{EݫJ8!IB o~ҞY<[wʖ !~]wݥ0$,cilKՏVڴ|=Iױǝ:q&K-7U+-3 ^Z$OUM+9~vjY!ZR9x~ͬ?fEVj`]ղN=Ƿf`1#0f6w3br0h E~|W_uޝ[~s`TuI{@8kv;YDL{:PCnnԬY2q輪.eK,\PaJXĉ` ̩ |ro+&YXշ~[]ry-e= u 7WꤣVWn-z"\x^7*[D8`zi>a귖jr^(/x~/d+o_Wc`csqw眣~zjwb‹uy]?6xaO<(;h5iXz<_l /&r lNeoNBXmN@"+0o9H"#)a~+!dIʖr\lC9&dC sT'z`. ZWj/j,ׄ}/}%@I~!P`'( aHXJ `S٧  cõSl6 T  d`1k5& ʨ: 96hmT*`cK 1ʕ+ը#=//'NPC[MK:~D߫N;[>yd V`|Me,,a  P/ꕵ6@fT d@,`PBY]֞؄Jke,$-C1#>*`c^ucZW{3'ք׫/l-{ԀexPuܸq(V1v"v;o,6.ǡ%`'% d`-&0(o VJPl{Rs0m/ z'[Y1<:ýz=X@묣~{UWy XK8&f ɘAy޼y^3^r= ɢ v@dwM 41@ DK:hq\mu_y&lnze+ȏ0`^w9AXWgl.2KXQu`_"bҦFH0†ixⷻᄏٓĽvU=kR`Xjc`W ^I[Hx)ݧErA2> `V,Ac`1v/`++kڳ'BmsgKXV:D&:L `T6,KƸժ !~%/:u^a}hu\U.S/f̘+ 0+/y`A)\/sZ½D6U];Lu : lX,[-RE0rh=t=>o_o&cdXٰc1GP \Cu\"D&:L `T6,KƐ,wq5g!;ݛmsgϮ'N&r_x)`Lt]j ܧ5$:X'QPF%` _c[;"P8eo A`u]^X,V\.o̘1jСjN.`"zT\>`l"<6,K0F :먣:J=c>sjᄏ6hyI{_&zgJK<}-D,Bh"ZTL>׏X'K:2m׮/G1eOQ9j="K%Vkxbc?d{b %KZ,ւuI#>[ >'O5ar.ӳ uM`v?lzEkn׎dFMMMSӭ}IN & 6 #,c.Z#`[&kvt-UW d{s56hmVs;kޚ=&[nEmmCLX3&lz lzE <Յd%`Cy?s`a'% DFAmQ Ɣ! pj0%m2O VсN:Cm.%Wbvߠ5Yz"P=O#}xc|kBMu5^H[/Ν;7t"`ueOMj{Z\"φn赕]o K6m8CYaA,֩s< I\JuW2JgDlo'~gtJ~V<4uV؇5 n>NQ=PݻP.D1%go ]tS~9s>vs᭭@~d-lgd$FMRmh d(Hz?~Ho\,:F=gyt7zc`` 7n,`#|Pa2(oϽD"R##a$x'&kJ`wd$FMRmh \bL:В7vM7U^{4N_.אq~4G?<$XeKM!`-ZW`jP>lF=K'S{>%; 1t\zm"4rԩjžiҤIb9z)-:vTM Ξ=[U>1mOd;HH{ ` 삣TKmma)9d}hS`U,ŀF$@$BnMB{O~ /԰ ̘FpҩgK\yN%`Gb'% `Sr!rR l IpI(,vv:rHhѢRz衇=T] E%lw@: dVXlj;O=JXO?* >LX;s/Ǟc|~H%`jBDZ9QuWp+ !:q-c3KYi/&Fq ΪVTKM=ar]wۣ| kΟ?K;m4/r-jРAxj< ٝxkqݻW/cܔ' vE͙3ǩh0uS{0ԠO;UhW=E[|X~;C(„G7 ӳ{SRVJ%AXlf ALԱc|jƌva "ZtjQk2si4(=HQZ $bx_x,`+Ʒa/+y6p N 3Q+@%f .Tc۴i*+\GH.Xl6v V?Pe,[Z?~fr#QH `3҉b#:vUzt^&))!m!` Y-[%xXl߬9TK4aR=[n{52}.iˤC%`sh]?%X{<+æU6'r2&[yG\O\=oAKHm\C%` ɛihXCXթGG="VEd D/PI֐E>7'63<#9ՠ)WK%`Xl򖂦%I7H)X ե@<+$Q,'t,"ٳrb˕W uyh*!غK:O'Ym XlV ~av%_f! u<+[McOTMiއ[;/C6 x|+p>=+5&6p ` ~kyeWk& i:6Qq 6.?)Q$%W"hiDv%ʎeep!` a˵WvmX+∣쁕tE޻͐{ĥBǥ.-X,@mK~۶m} yy>6ɲjT[ܝ TP^玘gT>)l6V}s٩.Cs?^)/2.]9s8 a6Ե}GB`szE]̉ -ey3؟`k`{myfW^:"최cfdĒZ41/]wA 1k-t~_tHtqM'l&|1t`R(ס [XdQXg#} l=`kR;6U˼؄'ݢ7mC؆e;;%lΩeCg/vcer,f~۰eoX,n..f9ucP-ͲN;_ٯ^(H6#z߾aa ?,]gu`hhKCP=IpTlI}&W= s`/6;jO$[l Ap.+0[I󵽺c;ز]gMn ݐl uu ZkOkz!c 3ܳ(9r|Am/7kn󋜲˗')yUR>g-l\~R6%"'@ ?_(;?fg 5y3T:U̐S@]FGǬ ?`K^U RtGlkQ F*aQ@K&ZONvq ն4\sáܾ.לyJ` JQ n zI6/4 `!*/ZK.04엲Ug_AHC4\ԁh1;ZVe9:yclϘ҉TW4e˸]s*3#|MPm28xq,bs68$`iģ@oB' z,x`74J$,4$sOTlB~@3ʸF|WX 5={fDž}e ~G2UfD.!ɘYׄF\(KB]rm85˖I!eH'.0.,Vf/tRGݾ9I$L`^fX˯ I_]Ji)ZJ`LrQ6fXVū߄Dk8ZZ~%dva6K9fÝέs˭Ejau~E9] F=>7:(:Ko3%%cP+"\p#{d#P)@%:r"`g;$ű`\Tnrbx[t%`sfOz:Xbu\1Hok.@~${"'&Ƕqc>R&kdt=Fܟ C,67KJuZ` 032gaBSCŒT ` XF#$B[k<}78Yɤ`ՇP*X,F@a_ux%.x<<*X,F@#` ʤ`3uϲTn ` 9K$݁xŞL#ueM%֭O Xd@ `[,f11@YTZ>hnO?T&yYK=uz^$Z8@IMX4st#G}~_%ݡO?vX`;wD͙3'0~j뭷'eW577;tM"ϥa}3w ` Q,l[ظDf]Ϊj?_*Ph!`Kr xw}4 3X{q^Sw+0ͷU;찓S{mvmwoS>#Lw=(+A%F htqnd2`љG)26X4bsf}#7ܷ9N: vxlXR&'6zV*V xl@%`nlf"fPk~cNMw/W3$Ya%`i2 0WYGʳё~-y9),i1YtuL1ј \j:9ԒqˀϢma)9d}hS|@6wQq/}&q\sj~͸K6/\t`MR`ibP /%.yd:+!1CMϤ{Z ͨ^:q@cO0Y;Y=H$ʶL6U~qQ>c}<灜,F>\ruvꀲ$ïiX,*@^JUIxuV4`Ő4biR?㽖o1 ^-QmUvh]Q"yt%4CʳNj#(7^W.+k2K=L Ica_9>`O֖{qKϢ䥔]5^]WK-9-F.O04=koڍ 6RX{oԣ\x1 6_Au[!ߘSR=96/H.HƫC%`Mxq= ^Q3$_/h3Q,3x|À[,7vHzuʥR¤;ӝr2VQ9xA;#` oXۀ@WՄVs `즷Ƌ0»k$n[ףv'<X'[j5)'3Q`K%>ZDȃY,#$i$X" !MViV_ tۃtl}7@lS(@MϵCM&@ hh*xƷt\/"ꫯzU3g]ۭ[w5GAW `[_@p1pq,X6C4Yg^{5oWLSNU{.l>+U˸ΛqKϢ䥔]5^]WKJhg`Xu=|b\a;*][nXa fRby)eW$e$!` /qypX.=0&`b؃7!I'Kw}j*u:@NTeRjrɌTjL]WKqJe@9JfS 0}Yzʕ+= 6C,„;d&*y)XDs$*wF%^2/UY3_2Jչ&%p=蠃ŋ{epbJ(ǁ>qyQdK Թ8d&*y)XDs4Mkt8s^6<":L `T6=ȠqnC]:aʤwkA#`ix^n{`:u}<Л?[sN뙐Ȝ`LQLe3Pbʮ4:,KeHi0駟V .T\sѣ+\5g;73$c)L<_,Z!:L `T6؅{vS45Ël,4kQi<=묳fmƎ>裺yۯݺuWnӖ)֩ `4х.DTtmrj13`#1uE.Ԋ+26[mm 5m>sjܹꨣb5k5UmŸ]}WfNzCu#Xm`tav [N-L+]h'zЪf;k5 ^-YD=jwVV/_Rt7頃Ju#'uFf(Ѡ}KuenZL%!<$ڞra ,H0Bq6 /Zl0a|~3[o?n kՎ=ZuС=ղ9Ku"ا Ff(Ѡqz`aFNhY0NM,ߝ gXԘeԅ#Gag=20uOv;1/gaPIjX@ !~ǔ,b|,@kbq7|Svi%mll"MDu2Ψ F NS[<Zn6D'LQm߷袋7tQC 'ekN ><0_|(wwT{vʋaZ_S~8~66P `^_1V3g_Z>CJ ۩&jk#+@` l[}L؂XBSAFcgm{ɓ'do[#SO;ep jСNy)L]s"to7tF>|~3^O4D+WT/~;?p[5=j/=~{iso` n`f"'zސr1 _0PrĕXxQキj?MJ' W_}kVu]v'|RkظKKX~[ptd6786{BLK%L9sǏ~aQb<[n^L:T&y"ڟeMtZiYF8eol?` Q֞|VXlZcZO:HuzyW}wۭbx`o7… ՠAڣh"駟ƞx'|R͘1|#`26߷'K'` )5{ yT]!K\s52ط~[&@;{ァ>0ٳլYc.K- 5סɞyG;)PCc;Sc1[cShhvN1]w.zj/+TݺuSƍSΨqߘkV ) 0ŘvKH\pA Vy7|qEhzTK,=9PnB h$NmUuNKXz&"ƼV DOzիzƪV>s\.>uXXlKCm ({x^ u<*#bO?_(õ^(z`遭g+{`yu 7Uf!~)Z|~ k9XLGycbw_o,_sp>Zdzg }w|YfLsFl;L!Ee3fFKI٢# osuJSƍᑕ Dxb<|)o.6K(a|4郺`c Klj<}SCetnf_/-YX\=?ϥY{F>80A(!/"^2:ۭ O3¾])+v[+մcAasǩ<lrI=THm-7xL rcFv#첪ڇmõ^(z`遭g㱃=X,fuZUӞX٦1|x(|wG,^dL淀+f2Lflٻ ۙXۙ;4ɨ?ؾvWÖKM>Ǒ LxEZm6 n{ W\qECѬSsL,=@%s=Տ=2:V&x /k^577:,^]V,b -f6&/;EulM{`ќ? U28[8kL`0Űe~, $lk޸>lN |Zv{ pXz`xl7,bM^xA-[[FcFbxa%wNo&sXYFǜ * [wL t"\?BXn{Υ'ծE@?TVV6Ӌ+b3c;I^8`kki߻kߜ.+y!9'qj}`遥<sO5.IJp[l `NА -JmW[4̚:^D, IO$? %)tOʀVs.:>qi5oݶ>B}skghK߄hbtv=MiF='% 4znB6?G)iڕP▥qZ2dp l|XlF}[jNag)lg>Fcam G(cB죈j8>V{m{oo~*/#J'tTD: lqEt&͂ծcLضE$oYuZ]{e>,FkBME7 m4zc^g=:va)ST{n OcoyeBrwV#< >"D\`1YӱǝzO']ho:c8#0-}K.JqXLi̩v( uYF{TwrK 53ACpe{`Q@<*D5 %KI5V@cnO}Z/E慓& 8&hv]A G՞W=#ӽf7\P0!$f͊ev{ Hf~@_xD;oك%Nb Vxִ` +Mg7X؟~a?w`+٥vbNJV&?=Ys~Z34ﭔyCg־pӸae4es;i_]+C'`[Ծ}o pBi έ^y$NǏW[n:g!AŋYzꙇpS6gĉLĘ燂XXx\`ًὝ={5k1cׯ"%e=Qwbf' v 0'rǹXN䚑vx1% RM\,l[ظDL`1 )7,b? ! ~RDʉ` )~P b,{+.\P=3 PxUWJ+c#9D;K0]OtO:نyF3&rI!xW A@ .kqgf9@+ٹ)!ftvmL /EB[b$D0Wj%DA7Frùx`˅d70|(țrӘgKm,!K,=yqԱ%/G=iU<2:AK4^1 0 5L%` IXaksTMY`a,6ٹoRzL6^JX\$Vbm+Ltxk"dr,}mLcr>oܲy RcӻE,=l<ݕSR bs9`L^vB,to[{cefG}v}`j!WQb} SnY\z`km?sT/qޠ3\።z;3xL픩37t->{챪I-K&[`m]tZ+a'qbX~*}4I8U ٕhC`ـل%"obs GtɜX %M}o V2$|юAWܟK,=9pɘ+Ucz~Ue=Sp^Y%whƛ]WmoW]Lb|]?c0K 3 lP Dl#eiQȋy[f16㶷 ڷu/QנZ`W6yU78O tKlP`oi-[2'yj;vl(y晪^J:dNgFe|9ڸ5XFyD%`T>6V6` --Sy(FXlLjHcRXV[mtRc~U 0/h;Hy=M\66dLgIl3s%tgX,{G` !fq&oWuةw=$O.aHWRƬY<;wnD͓%`uoܐ S+݇Cc3iFɢبˋרgb|l3֎x8u^;b`u]>j&Ϙ1A}rbKUsF3hYg@!?yRKrF!u`(z1>vq7V]wK$N:g)ƾXC m5'k2s!` R9,K%`+}A]!Cr bCwqGջwoή9uF}矌UYS|w dqw} kVJ&b6>#S}Ob3gz Om5IVÄ|X'CK%:*D%` S#FrJ?>'jOG8E5յl׼g ?['冩kpNUNxF}=&tС~xb$o;,Vnu_zKM%`ng"` بdoŋj|j.z=.:Z_kƮu"$q+m` n,td3S/]AVKmiC{C*8q-Ԥ̬zz2Y3/]7[nnP-*kj&?b9t饗M7ݴt!8ݰϣ':Y Xl 6.?)QYףYؔU0! [lYXMs`u˞y?X҆'^i3g{7H>}7cq<X$~_]v٥t c\n֩'`XFL)@- 2:ɵlpU%C{;`asakU۶m=\GzQGy橅 iӦyr K`r7N;+n^4p%:؂۷)9"عbʦ' sX~e6ئ6Šk{I%8K.D-X`t{~SOm5#/X'"R}_Wn)CN-2@Tۨa0eR5nȲ L`4Cf# a=0.CAn P/c6'.ƴBlp)yin-UW db u7~ f8מ={= m|IՍdD td9[-LlN)@c5V`RP&y„F:3hWO F H3 ]2R/9:<:u2^q<(Ko1ҠvS%w-^{ ϟ?_M:X#M4Il06W N}xIdtd994bZ~r6g*ht ~0} 7r[ hխE& ˱2BM/pjlݬ,o3KzmڴQ'|2ed;cK\gϘqlyV P:(@5h0cʱ /H.HƫCm} si)7=W*2Cih+PNJ]q%pXewv1,rΙ]㵋 `3n)YT ` BLa2Қ3%']p6P6@͸gQRʮ/H.HƫC%~F4o+Xs-3)'YW`^NuuN9 +@^ ( ʝ` ,Bl1:Hc`)wr,μf :Ru9N9 +@^ (CnHSX'3$39*@u*lMIʵX,֘6mm 6@uN21XG dQ,Ke`HUu{t*(#:GX'QPF*5Jeѓ666&G:`swIzBغσSx ` d `5P:ؔ]> Z: ~P sF=L0a&z-c&/m嫔:[vx)]ʽΩ6$g7kac P:YeYi<\!N&F/Giװ*Q60 P*v +cy'ּAmIaʼnn)=?tw'a'e|Mx8'*uR''`e ^*6 ]*Hȧ;%xZNOx-\͹s*{~cebxi!T7X/a_%| r̝uǐmm,)rpa_M;2D&v"ʅ}o!#яaP*PAj7\M `0@,XM煰RLXuugoːh9[xd v9AфvDo=&'8,d3IW@5+q02nլ<0dxQ y@`{zz`ۇKe``` `g DPcQS  ՜c QeWʰ!:SgT TP/A`|$ DEB 1`"5Z荠Gmmmm[ ,%RPwt4Zֶmq~\jjp#rOT P)9(jcxmmm5asFgc;)ed ъ%iU&]1<|t.y&0cԕֻ ء7nTo12~Z(cN~cBfI0;1Ca=rN86cHPWoy|f⑮5 s| 6IBRh]0#QNj,5a9;hf#n3Gō$J(׬ B5Ȣ79K Ģa% It9{$@&x@E{醲r^ԡ>2q<_<6aĊ l/}>(+M}ӡ Ûd,Va^Sl͡t0ZU)KhfJ'8 85!֮jY lOL^s"ۀx#]`v:c[KYI{ҩ>С=HPA] V+`2ZE%I|J]cx6OaS˛3, רa-;fWUv_2txzA{v",&J7ؠßuXUĝR\$gGuŎ =,;,E 3F}ze(f<8Dx-iFFN%F+I~KC&bpxH(`M[ۨT_$V }f>^|^J(5GnfWZ?M{fE7جH;E.Cx(@ `Qaإ=-"uOM;a|6l70m\q,F7AkB,SA y`f|['Vn80BM&aĵ< ޱc/ḩi1X| ++@~~]vd+Z< M"=S#foa`gͺӨ vʝK8Gi㧍YI[hwUNnP;ҩc yÌY{ 9aT9?wh⍓E3?&%%@mQ;l5ss6ӛ +6XIJy~}.1:[5+ǐ}Źoh2=4bsbxX&+_2y Y$jM1dB͘YWd\1qƝ:=rɧsL,'|B8hI8cH \s?cI1ܜ0Eډw]רQ f 7n@y[d{tÔ)y,n^?Tmvxd̕0l#7@=u#?2k:\-|1{| =T> 4gd JԞ v;V Gyrؖ@,uَ`˹͖lf8/V43Vǽ³ A`ޤaD #j˱7IvFzZWa΃ym^ml\Z!?S}zcD 2\кm.GƳ LaQdHoaʌ+ohŸ(˥T D@S 8E'.`, P*P'z6:UY*VsfcEwC[1ݧ {ъ^1GT P*@Q L;X!axLbn$?8TǢT P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@fĬXi\ẏSODT P*@@)%,uy֟eO ([9 ,>AL,K1d|rJ'\ʋݨT P*@FF@+jKj!ؕM]SΥ`fQOW~f+=ʍ. o, P*@T P0=Â7@(ҮJ+N~y ƒP,M^m 6]rKuQyT P*@ȼBvb]O$EpXxŻX),X~}P%@#1`d*@T P*@@%d Wp B~h,N~gy`_6 8Ts8=h@.QGW(:+?`ECeV5BrQ~pHnT P*@T +P `l(ȘN'߆#6!$^H? 3cL˘ScHMhϻ̺PNyhtuL5.Wwk?Zü[`%4Zݢ{1AT P*@T P_,\+J .M57˒ c?+k1pi?J1 jX_{WlzM-5?~!2|u2CŇ%T P*@TYs s< RBfz<()Pm' c6t1ߣXYmmv9WW>B)c'Ow:.۠\Q*@T P*@JKRUni9L`]ARH3+^a36 m ٨6hvg?5V4)w [ m% njT P*@T Psd9,V/i{* r". e{/XZ a~\",`yZG8f {рl5CT P*@(M$ڢ شZ[ !A;JwY:*E9rΑ۩T P*@ Txx[.&,03VF偅h8o$ X5k^.;78@2O /Pgc%5F@ J=b3]onkkyubM P*@T P(UZRԘ=gSpr8rk"O]}ʕe ǖ8Mk6u9' N@ל7lt̗-'e#p鶹J!p&py\[n nWv} Z_+e+ufzC p \kdӆDo2 3K`N|7ls8FuI{+7nSx^9[k~@#p5뮹~n>KDޥ%iUӿy˹m̻eV p~pL[$Nn p p@ΥYuT>bs屺)7xJ^K[Usuݪ{_* n w9nI LMU@U_"U&V'dZ`KBDdE 7ɀҎLiI'p@'peI+bg[mGs@% \'py ѼymZR*N˜@8f89 W+rfDlۦZuͤwv\xWp2U蜛ULĭ2 nl\~p0ǁ&˾yt+pGۛ}g?W̞<8@8@=j-Q:ArU?Շ[g p T}"CT^(z~  p@յ@1_[ p{@-zV1UL"ކ#pR%ai8 pRU'a zi.S8@*:^}z p A7˻i p7WΛ6c#ru 1ۙ p{muM6, p}K;-o~7Ьey(]y啟߼yM6u#MlܸBaEٲe˅e{݈e?VN @zg۫_պ|辞Y۽5eNc{ ԓߖ^#Cx6}fVk>}K'}ǚA-2(/N$\2"m"[ܿ7i]DD$:vׯ4?~y8Ν;|'Zrk׮g4z'4 "6rOiYN4+p"\d_\+%Ѭ_8vX+5 /Id[oȨDgff>&QEN&f;ԗZr8yԏOY^ "q"t C{qD٤?Y$n?Vx3g&Bڽ$xѦD/YEf9`ed5|aJ4<ۥitff{ed+++S!mq,,ѹ 6|5$i8?9+^ \L 5"p i$VH&Qϛm 5u"pC8)#3%ږihfƍ;33+t0i'DD,Ҭj3 pC8I*#He$|#- LӼn!mذᖫ&e鸼$m۶F"w.pMdJR4m8҈ pm.˽okv…|gɈLKRK'y_G;xky,p~G$Ft2G~kuLC:8 oSO=o^'"^{G-4'9 C qeQ%pRtΝߑ/p"k?pGuz;8oh2 \;KIKS9an@`lN*(IxE^D>O~Z%J"`"Pmּ뮻ɖlKǮ#O8(p|'}{ُ/p;ɶ*~տΗemn$d*iFC8)%B'5$tiN|}INEeEt/>*hNoX񿇼~NU>w_]W?틧ø(\q!p C^L8۔ (Ӵ Q#=&LY_"W*p?X+ UwwmfڿIh]Y}I9WnýkEnx厛>!T0q*]`dw{m& )P,$PYKfm3 *E| nq/I["0'&IM@8nLNRsܟ>ſ ?Kj^s5gu4 NrW|;Y6 ZT&oe]F>ڪv?ML.#?wDDeLC8Pꫯڇ>u}x%ͪ"pqA^w[n&&pZo>ڣVn|9:"HVd2Mee85]G  mтvK]F~|}wҮ~+eV#۶mDZ^2[ٴiol (p*T6zr F+оty.9m7q7odPXo\o/i.ݵk׳ҿ0{*pފȑ/V>2ҸmHPB?,GD佢Q9v@&Ñe˖;\YY^裏6_>i&붏  pU"cڟL3-IGT"/mIA86;;333ۿ.ٳڦS+|Y8ی*BMPjI?7`#A'E?!Y@Ñ㒈]c=6u9IA6I0= gY1d-fNѮpFʺ2@͛(͆RjRs{= ]tыGI`"Niis BF V"pMj\m:YF[v/!p=TvHmOU*J:{MqܹL[#G{KXS}D̀)Q.jd<4iδD4凊.7wkojeIKyt^~/ wC:{lNSqGDHF "K0^ܫ|i %.97rH~ 8bO+5,8@Oщ%AIK/ȔE#y"ocƍ?KsE{pߋDDbǢ pT \*;I*8ynZ}^t[lމV%W>o[},[G88Fi81Ras2VM Y[u(鲡&T'sx{ߗiYÅW`Qp \vC8@{INiڌUEyiщޢ׬}hBE8@`.ois(N'%h?~NnW$:"0U'$r$B*aÎ Ҳˋ2)(F%mwF88@C@*@88fd~^@18M+h=AU/IL#7-' F88^ $ -KWy!-B8@"oEɼ4d CN^͗VOV lKk2)VkW~J}8,,F^!pPNu ҈H/,cבZ|^dL[ѓ:WvlQ{ݞJLSe}9D8Pi\ؠͬPDOh//u:WHTL5Gnr p-^ɰSe5jהvg?/+g*zU|c+p*AI͇$ p p8mT4Gr"EVO~6XN4=lDC8  p pS#p[_~ |7{CC8mx)y%N<98@88@8@8@C8@8Bo} rȮ/8@8@ƙNn]E`[:Z0wח8#(E4.(`Q]Ku[K>!\X͸9'ry..nl!7n+=3\}N͵uwmo+v'!p#*޴~>(镈^Q7h{wuf*!v?>ќT~;"q~,>r躪F!p.VMs9#m }IXAY uULd~FT("p!pOv_Y$nNVąXK/яrG:9بX}YIeY[nL)y*&bWw&C8@5ڇc/ymjΛVH#p'oU|ZUrQ5g}e'e8õMWqb`6ֽ抁\4mO/Bt p7>ק^hom>_|8w$>TupYv?7ѷ{ K&nIB94}@%#y(UmjU~o6@%.%`=0m]Wn14mޔܳL{?g!p I{aLCk˵'8Oa+plN8K2_85B^@D$fT^!p!p@q-H!p7zH Vd62CR6(%&&ͨڔ*5!p_5nڌM2}%/\&ӉQ]G%ǹ #8C\jMYVݘ*unN.H3"}ߤG8CF=D`YĹ*SN ,WbޫĐY_$ \`4\s/P8@8.k0ebaJej U3F :u]}As w'7绻kG߿3_U/I+z!p-vFiIX%_gmfJeB hVtpe?3rkK7\>/V`5w GW%&Q7i: !pHwвYm/H['` ߤ>pKȽُf^Pm:#pZ'ͧ0ivg=0=5mЭMAETڤߛD$H/8@8?\޳\ 0NW4P .p \ tX'pF?\M/:^ZJKGф!p7뀤0^n 93d"knE~]n[Ծmf=1^moe+__ovN8@xy>CC\*.iK0_u@E|gZN7%))8i T7K?88nn2'TL9MsUf7ɼ38[ӑ&TCv 6%z@FμnM/j8#y p\ّؗQ!p]cΘݫyNw|g4ij?MK=d9"9N1 ppz8M#"zM!pgvlnv pЗKӈhTȀ!pK$n;%pZUp*s!p\q-Aj'HN襤ŞD8TN"nkŞg 8HEN8s78kځ=KG9NJC8SH  Bഘ ͨA !pd }K2 p!pC^@`(gn!p\Qqn8&#Ry!pK{ p0pc*!p\_4I{ B C8NqN8; C8|&i/ p0XKMQ2 8@8 K{s8@8I{Cːg( p\F!peH(-pg+#=} :#i/ pq'ه'#CǾt_s۫_N}7qGs.wi I{C8h{-Y&&BҒ8_"p.iiJfϱ&??Z}0M"u<[o ܑ2'p$m/,w&m#ozs)?ҝw4Y8;-&Gܴ)5Ko=i굛e~/} 8䤥TYri~"y{ byڟ9w'#{N8NxC8DZG&VtsS|'DN?=KGA8!q8‰O Y8^$E8!q8 騒s(!pC#pS!prHڋ!pC#S!p6sBw<8=7'ed@Zfڐ4 Z 3fg7~~מ"25.ud8!q8m*6o/$GG8!űMexMC^/BhNI-g>B\wHdE4q> 8|AhN4x=\+Y&!pAn*eM)ZEc/&GN8c!pm\nZ"p pMn`By ceEhB 5q|p؄:M%85Mj}m$pc/ѵ2=)g^U=T\)amho{8ܚ/`~6v8;cn"cZbIͮyzqY֗8%)C$uȴ<385UEl3vG'Ix;=w FT-є5^3sQysЭSX0f4%?Jcs}]G?xg=pxi}w.E!p5iuhSmNS;7kg?7>֍S+h.$#p]nx˭."nP~tQ \hQ^q+ѫLkȱ1;:Y+\c]tAC8n v/nJ?ZOG|نVY}mC b6`f`ќJf;%nzNj%Jq;n|9^Cw'|I~w>̍{K$`c-p˧N)Rg+ _DRJF&T=4ݧD⒍m٨8]?TTM]7 ,8[0 70ȼ<.pI&{ jXϻ>:&C*IQ~pi' =v3MxY~EX_ܠFZyMV\Ɋ ڞ~TtZ{j.i7d>[i+/yilBO/6 7QgC=HRG$1lG.t3& K8˻6Q9<5MoH4 kO'?hD'.  _)/˽{6&hGM 9!\j 7Z4{sSFɛ]= \^ q֎1Vgf#֎#)'&vΗa$$=$ä"8}3FB{l4=_CWX~ŏN?\wMUݼm:Pň2.cCCAͺi$mGNE"JO}j2/&%M[KZ5Z 'fIzh]j%)6{.kIe_n˖󣽡h} @F"pClBuIMX>OBQ4tqY"նoM9% 5}jGAH \:ǹ8:Bk7W;6@/^ky5t}k.A4Ku¯Gg`;%pk"PRӈu칦sPkz[)ϵӈlٲiW2^JV K7ȒY!p}( T;SqM~"P h}ŴSd/5o{sE71d_I()AJ!}#ceͩr~K42Y;6mܱ{MȡLIe\ۚ^Y1 M |nu lbP\oB6RPs?tNjtzct?Cǰ} i)Nsv 3gW$p"#p f "p_#6!*~Iuա=iKLisD- F[?+NEfԁK78/I2)O85߱=)GD$č"Ljw؅dI>rX:(AaN4!$R p4C 鹥N8ӇͥX^lr(>oR)PscoSih6srIlS$5}gG&vJ8V=?A/pXz/t"ycr!p pCS4 дM 1G*IC NK)-=*Eo~6 &ˮ:{ pU3i^$%ErmʵzٶowF`h7L[1{@H oC:9^{j@7h:$NqcMu.n8 bP#!p9j"uE".x!p p,KsRk IjlWN /͸}u"pMY?IBe۽J*<8='p˒,4'[^#wv]nٚ~6N$p~~^j(#pC#pc)p:9%2Vp9SJZ4T*Uv%+O~Ύ:Wi ?G+lY>zqc!p)˵'v]ۋ%7qv'+Ez!t_!S1Gk%#8!q8PG)pK!p{=^{ٗ8'#G8X rT5P;=^.'\rT-&tϒi3h鶚K\ŕq:^8%7M1gRO :+TCٗ3iZ}8!q8QS*[n"NQ;}٬iH:8+uݗ?AL/<8=7K}Hj8/89.]M`:˝+nn :x9(J4"pKrjϱf&dt_qPrILkx厛>>G? :? C8sx87 .'ܹI.p}}GNm_<OC8w&ۑ]F/t"V.p:y9םǞB8[NCk{%/ pMMpҜ!p/ps+.!( pH{ght5nj7*;}͋.!i2%-U4=ѽ,i e{ρIõ~C 86߄1g 7ZK?C8J8;\; -< p7'ͫړT~er!p!p!pA'H!p!p8y3C8@;OʨT8CC#p˒C8@+pC8@\\]=!5R8@8C8.+TqC8C2"p.'ܹI(p!pMh+p85_C8! RmGtΞᜁn8C8=#"y!p .;wPR9oC8oeDNJNI+ppB󩧞 !p%Eͻo^}#p!pIݵؗs8n$6^dC8k{p!p7=\;"p!p\FN[99C*pZ5=!p@q8}}ӟnEgy4"!p2wv z<97Tѧ|>8qC8D^*pBN8@ DBR!puY[y78CpKy|8nh8|8C>Sι7pm/7Ji!pu}/G<7p MIE97pӺ6&B8n=G|I88Cz:q> \$&i*!p=.'9  E%?%'Є!p|4  p.r3S!psiIy!p p8)%8C"p2Uj5C8.5;9 7ț: "tRJ C8~,b? p@ΎFѧ"sA8󼾻wr -@ ܉'V+3ڨ!p6 pܘ DDz-99kG/@TN1< b@8K9'7 3w00o* RZ>!p@nF#\y啟߼yM6>7} Š[7oٲ傿e]l_9cZNFJ7k]}G8Cz>ˈT1ȬEK 1331.K/G*[z{m>-8}@Ξ?}A'?'ᵯ}"yߎNFl4/8}1 C8.\ 2!p"<lr/ȚҭGoٳǏ_kwDJ'El C8K 8`,E܈}O<^+++wnݺLC!p p-p.'9iN圃\.{[ltN:gle~ ۔biH⾙ Cos˜smذo{yE[iwĉ־5tƿ뮻n:yIF$C8@_$ᜃTNFJn߾z]gO&#'m-MgH[GyH@&_9ZrAj'?|TNNOSaבmؔv<ɲ2]~R%?]]_hX7F@88)%8 ^۶lrxR􏋋hYђyϙ|yt;S>%MLҧMiTqWfY8i%S%M"1Q#v?>coMm''$p{h(N% FgDb(jh+Kj%}6mWc+RC , \*x3{jie 9[ ,X'LW[_"^ *0i Q]UHJ6v"p=x0Μ9TMkN埡Y{GDÊXGT#i Uqϲ^rބۄڍ٨,/͠@麡"~tivW۾vJc*M4\or=5] ~2M8nNkA%L{5o:UmTt_ĹfčZA,C8FFo'H mSdsѰ_>|ͫ LTn"p pFj;$3gaBb?\J΍lU}fN#p  `@90l[pѪbH.h{BWksfCN _!_7=?҃!pr= ]CZyLҊʵrwm$o8䄻M0VpQ p!p^""z~0d!L~.jC&V91|Y?si=lʀWJ`F8`:سT=\} ps*޴F(Ȁ_BC&õ=!p8|V>0ꕽ*xӋ*1)s#{cwߪI[rIx~ {[hBOyh pc#p/_eN8n:PKF*Fƴ"&-!V4/M;Vn(8}yߵn=,7~\B%D:#D$!pEom#I m.gWer~=++yR;MpKѵsC%m!Q*y͐K WFLkAD81vSInzMkZ-f]ҡSw1Aw(֯k-W?p,K^85r$2eU쳑$On[ŀ)cdۊr@?!p*)&S`}7iF iM )@qC8 #p]CCFD8 #-vDKЈPzaK`3"r.an +g3]Mo. W\knre?ݚ'?݇08D~E}`:@Q;,׎uMpq ܺT^z7 foa/Fz\ Y i{>x& :¢W̾`*鶶iTCuL%[!nX~KKtT~tэLjɖ`k{w/מ@.gZ px=$'u*qO⋝\ޥյ*L\;" p\ZWtѰ7=Xʦ1T;9 Y p\KrM!p!,j{׊dõ=!p@V%7 pA2Hu@[팮.oI _҆ pxmCzKRvWf9+.j48 #@^#`TimTVk2_3mƑɢM\|?8I@*2*p~Ky侥?Mi_I؛ E\ݒWdWLbޒMŅ&u˗#u?*脳A_k_D3;ŗ]/BCG{Cs۫_7qGDN[/qU'Fk}"勞^qrXtSWt4B\g;|>O4b !}l}UZR'dµ,C\ƒF#_dɫaZJhYhKhUJlǟ^ @,r ܧ!pܿ/Vd̓eykeBӺn'l3MJjy'K5} g=fڼ[?ׅ8@ 8 .Y(֏?[)e5z=ѓn :VBr& :_ewM]'\ ڢ67秊=yf'oz͛gE p0Q{j" șHO͎"jg<=0]F$LI5g?vN'-#h4yr6gϘռFE7dRZylgA G͈)tU/O4O}="4"d6Y i+.2$Z:Me+6jnNӁz]ƒ\ p@H8'oʏw'=*IF▵"'k.VDԴ֧iM.{Y2U[G p}kxN%ITZn?}s}FΏMGFJ1˭ۏC#|kh<jB{8~MqҴګte pN/מ |(uNH>[)ҦJګUrfQ$gbuVNu`0HI8DoXJ *V*Gqԋv/2HU3q۝h9̴I b@8N%=1WRDlXR34CJ3O+qq42'ےmE |Yn(.~IJ,֢Il|ش !pɸ (sD(B7d)o=]+Y3yBhrUL~K[{ V ~t݊Nfy}ُ-gߨ/ p\ % 1 RrL%Z*:TwjEtb pyj-_ "QrHڤ91t*p1 杘j4ڊ%]\y>%BǤ~?`k/>k1.Egͩ +yNi m Hu"p%;xҴlL4[ݿ0B}5eUL_ӄ`G2&CElM8y:?:'1{~f+ !pU#ju#ꁁeoz$팋pi$oeJ&4[>4ɵ-?biHFMAI71|-xK5@:,l3Utb4I!& p5aOqvZd7qkJJV?h7 >.޼Ri[ެIDͭR;"LCH`puN;)iSMC/[\̀B׶|ݦFBQ=]&TP/0x봫ڍ:uH8_hqDGʶ{T.3f _i^nG>ډ b%UY22M뚪 ٮz}::O+Ifa{j?_ه~^ !p0T\(b'pzz>|R3 0Xay9SJZ%R:aD"pON#zf[XvL6]ͯw7.^\ oFv҄$I%WnIQ? $z%BBbASw&Y ^n'q~t7oƔG}tpM YHiz3{w#pzMvK=~H^ezopnuΛy @I[:r[ v8?YlWgж6ӆEF`0I8'_NuМqOC畴ļyjKbed6nW^[6Yy.|v"76: Ok'\IT V8$È!p0D1M1 BR^e.N~bM5P՝-[>k (SkQ8JfE%]].onFnjqi8mF$e!Tl׍:3#78mfИ^"%/WrQ ̳ͯ"pWiCҬ ^TfXJ ;D"K''"[}N viWN89I7o_ p7eyC`JGp uy]0rVItZ=$Q&lnޭS67om5 FmE3h\#p7ßo?zAo$y:\/oRm7'_e7Nk-SԨٶOW4Ӌ^@z"^*p!pϧrd@Ϧe +.!p%`G|٬2b.=8C`*k%HR?=Ȱ*# p&&[\i|hٸ}pQ7< KM3kn%ifeVs+*2O-*P!pW}*3׈@r7DElNmZC5MUdyM)mO_8`M FTD$7+y5:~mC8ۦSur\HMp~'lZFLK,BԡO-:ge04MZp5F+^\[k/_pڥ bѽ1ZRKrA \R=A]._wQI>! pᇌVTh fS[0^-Y#\gHe\-SCb(Ҥ5-4 p\zurȪi(pu͆)VG|Yo{fl 'pYH6Nde4MQ$ FIW4Q8/Z{[#gG+FCj_9mJ@Cĩt "j2 >P!Sl:*U0.JRu=:;@%ai12`͇:1+(`Gq$^>j~j6Tt 5"78>pc+0xٲ#OYx}+r!Ls8@8ٰeȹ A!p0 pTY~ e+njޤh/+Y4B)O}l#f+es-俜K9 ^b.꼙em-ՆB'gJ}/󾧢"NEWaQmO@8 [:n~/mTU'V4-}?w1f%JDI܂}rZU.H p(J[ɫP7Fl5-YjxߣNO %\MHa@8)._54ڧ%/JsM߹$R\U/ -m.e2=+RX=n0h ̜rk.ЯnMG4& N5Iӯ|'ܢ,8`XXz6Ħ_˙A%AOt|zW5uui~\L/@8Q \tL@lsZ[i޴Jq9,|@8 \3v) px0 8@@^BS$q$p;D`d<8IQSi3IENDB`cinder-8.0.0/doc/source/images/rpc/rabt.png0000664000567000056710000012764412701406250021673 0ustar jenkinsjenkins00000000000000PNG  IHDR6sRGBgAMA a cHRMz&u0`:pQ< pHYs&? IDATx^ U H dWV5"AЈ#,FAAq (O"@%&D@v1 (*:3sm{9TuuuuZ6 `m`g͡޳f. @M2:qU_7M/*}șa]Fv lvs?sne +|ml*@@@J+tJ+'9_z{/l6Y\ykV]uo## `66[OwG +jk|ȏ~wXh?N76 `؀o;=5*GC`~МGl@6p#n׽2;,WÄQ~{'ˆȂ( $6y WslhВpZ׼4PP[e}͑ BAl`l`:!yo5OPPS/?w?@qs<l/M\eئ(((0 ?<8=qN:6 `aZO9~4IIW@ ,W^`yl`(m '>\uH`7؆^+@E٨a;606e4*@`t5h T89`6P9xWl;(0 GZԏ e, v՛Y7)}q5> n7M*l_a`9+ RRmVU~UWn3aC$ 0?0O<֒ t z[uUc_ٳg)SDh~\/n޸fx׻nA|tzӒoP! ,kcVHלl>9s8CKb4G2~} 6%S`zRU?5|0\{/ǕW-T O=;s_pTiӦnɝ}nݾF/iMӪu],[w-CT|) n7{xv;(; pp쬊).BS%̹7y: .M?;0o׷.ziU^:`X:,.Km{)[Y?rV4akֵr<i5cm[Qv7OSCE3 B-*8>ә?{׻5zݑ3q.~* ;X-VLLJR7*?"8#*UV{xcοR7[ەG)#_ByL dԵ@c6Y-qڧj{PX;70V;_,ɏ`Î3r{{gUW]z2&igD=p;i;g'j l-;d.++jXEXP!8'UbEfW}6.xZ몬w=׭mJMmqX~ץunVNu< U`ql1G&aǹnnpկJ}knҤI 5qq\=vP, ؂CvXA--bdw. jnVjbVGPUL+cpmuYR  leŃ_[) z[1MSY\~awGeYf˲;Cޥ`bleNED=AM9a *3ܦE|cV.Jk( ufYjuQH~`Ǎ3+IbN쏱&YsGRyGŋg}Ҵ`{RW2>V3k;v ]X-v?99} :Tɚq'z6݌.1]3zlݡc|u`%eVdj>ZĐi'RY%*Q-x 5&Y1vӧOww{纚.b֫5q^_ ZO`leRE'w= `ET~bAm]Mgkd(`֜<`yVwvpr98MdfsOf 1xc~ǝݍCҵ`ble~F+x Oj݃f*N돉MB![bOei~ ;WQFP[C蘴zÉXs[Nϱ^|U vhKll1G'G{UVi⪫9O^xQw'O 7ڸ'n<(s ,[wt L)vTW/6ߴ#slde`#`;6=ݡYg/Q+~g'>1Lid͘\r:N5`X}F: !kT,)LƤ1On`9:"_W>G?rozVF-ɌP/Z& e2/`K`XJ|r`9:1;|ww;mܣ>~5*]nʔ)L>̠;*5`X= V*li`ӟ/<3Ug?ӀleR"{]w uk;[oT:j?QY3+W Uչjl ->}f=[c,UB[ovmJˬ}E`+{p*-{?^׻|;_lTz'̙3nj41٘Zgόb6ݏz [o}ckSN9e еh|:\u^p}l? `;(,6.Ċw}wsql;C/FA.sm(Ns%8}m7|IgqK~򪼢sᇻ78MEME9zklm` usW~3~]F]V謺ݎ}@~ۧϣJYӶ=1[]S7=[kܩ?6.|n„ [vw-Xj&foOq;6J`167oϯrvS&oxct$7xjScN9M`5eMmFrUm`V&4@4UF7c) Zv`}+;kX؇~)R;cƌVD+tѨ㏻~v4ԩ`;U}6YSoQ"lYXEDZXu/#\?낫nFIYvvz޼QB;& neq'pB*zǜy{VF%iƌeٝj'm5S^me tjrqymVPv 63MS v޼y$bk^K^0%7{r"OOg}nTַ56̹`;m6ӭ.D` -F`($S`L9*F(h8c[ ._#p#}=[^ v~OQooh7 `jlYc`yc`:1]*a w˝Rȫ"XŋSO=4AN~򓟸Y/??}7v,l3K75eVsF5,U`f_u 'cRuX,XHCusjewi lW} `;wҺns>&c`?k]};<[{'kؗ^z}ݭ|`Ts}3'Ы h`ZuI/m՛^$ t!:8C"K-ԍU>[n'\VDžj)l٭2>8`v6mydE`\~y[fg!I~+sϹ:˭ڭ߀_| .׿oG?Tlg /^[1X>`+C&&%.[R6 i5تmTl6S) vnAlW_=<7+դN'reN}O?>O 7]veNYu2pUwaD>H/[bek#li"ۧ)߱ۨ60luDž^p!leRRP] lش:3ßYs=+nYmwfƬCu;KYt, b-ۿ23hM9m $}| Wy Nm'l8V)z8[A_Vmm!@Tum:^AS`k ^VIIA( 6W  `/H7[ɎH*hS#Xnn9Gs.pmove;Fg!3g"[EaՕ(o¤YMyM6q?Zլ$pGg5 Kt ׫WNr] f ~0) KD[ ԮyX}/n;5 *?Z-k6 vNuk`\lTdAIli5y+J*jWYj~&2"Vss4_IօVKwK^\1=, 뮻m?׿R=eVr9kO^ nMpsM CPմhQ"El# `6ܞ5a楃PLJ @in=]B<+1|%>9 ЅgϷ@ēN-Cy{=C_\0f/M_B~z{+b6wۚku5VWa?-x<O=b+B[v]V.9"DW44Sd3**p?cUWܢq^Bg2W LJ @(T$ai$ɭkU*7H7Vsϔ-}Qm"mtubV"i5ցtM~ j=guV4tŭɔ` YMn+GWCUa.[> <~>2T^O(E҅;9&sǿS=et> >㏻6޸>*ɚQ=s[?Y;[oeb9?dkl}݌ګnl{`+*P {>QS'cq!/86:U"/@Uweଲ p'X+S[0Zx["Ν*sΘ5ӓg$*{T!>?w?.ܘ|žs9j+XMKZGmx衇ZhsAo7~뮻57 =<&a 'b<|P  , |P,,*"yȫpb16u-6Ej<Ϫ`{{;\UwъYf̤MXE^ ^H@v{p5BEa-i 4!Ԏ]R `m`fU 2:>E*iKt 6V6`:l ꏱXK~6]ƖVuה+XE/^ ~|Aett(+{r0Uz=UDu7q x7:Fp߶ŋ&s;^O` W `Umnx̚؟ *` :k]  jl۹Õ̜VL"\7ovfEO=XEc7JgΞ:ooTR?v!2HZW20[AU(PMqi4)d6{+ykWegc ږQY~TyOOQa;Ͻa5Qǻt 'L/'|m2ՄNZSVsm~C8+ jYO:ia*1 qv<9c7W RYwfCZ\NdPk@),<~],MiPo]`# ;p!{mtXdGbTr4a2ɢieذMlS9Wdh_ZbǯVKc`N,xlNkMN_j݀#r_K"}XT0qj߾{>EO~Ҋ>mSS_"gҤRբD`;a![i ; U@e<2f=vz%`;wB=]* 8P$bkzH2v~Z3x≎ֆ6^șr7j?i}\{ CC"CzD;mǍ`;wBk -ksHY=_%u '-s„Qx=)9vV[fVM_cWۍk}rK `y jmYszʶQ0KWXθá![?Q l)`{yU8i8QXomn…|u;(^c{[K쬝@ dw*7/5`r#Ofswedҙvv tUE/ [21)iNeQ Ԡ 5hUWAsg7 `%I$v5pW_}ܔd Uex6r<@k_kMXMn7܊j4쬱D`c leźII++PXF s/kN>:k t >͓I`8yV[}ݣ_ynqۮK Ә.7[`ͮ|u׵h$T}{νpPЫsϪltӮ)5`+,ʤ`Xz,XX-GկvR,UUo׶0%["Io],Y tMf)I8K/=] leN[T,$̺`;wԲVݭ(m9`7yw(DOLn=k_Kf͚4@W+}{oT`;^Aۿ׮W63y1leRRP Mg.M4+1X%>9 ;V,+ Or8'p,ĚTdM%pdg@7cn7Xil.x?=] le[T fN*lw[E;j7zwƐjǍ1ZvK-x*r$w)<_jml`;^۟׭BB\V,(uwՌlG7g;;nܹcV /LG`yo}[nɜ4+7X+Cm c۹mn`;v"-U4~DԤdj>7'$pZk3eK`;wvNW%vm-X{'ft\ph"Z6Y+V3+ ;)鎬oiYPl+(-> 188a t!.A,q\7v檫Nzi"i5XկKerAQ㫪=Xhتr9Ze7qd'$3tI-UwauZؑGcً5Q {cʘ&nUWmՕV_ߊ |$: 56'6rCQY>%$O4VV) vhL(VPigItؽ op}\uX'Fd78]tEe`L"~qhj:85D`ÆMS2W =cߏ؟#Hߒ8K*|yd_jd:xmS޵^yJl}vkOYkÎt;sT~vMʫ2/s7ol]tWˮ,vjUF: XoD`r۩ 9}3>}o+2Zb@v$.X͓w `e]뷕29} g$%u+-Z6NƻǶh~[;A26^1lu~xH>Ul`{%0mFAvsG'h)ik5Ubu5r4nUXM"U6";`]GcWMͯr!SLmRkB*1s`#d+ 2Iw @`#{(9 +2 믿; Af!V qvZoѕMlm%dX(Nթ (^u 8`#mmh|cd2ҵ;쮺*wwm7h F dW9 B;I6 f!nSluG: 6YBHߧJ+`TjQ`zR Ol1GȏqZf5 J@tƌ[.\ `uxd<컦MsZՎ1]'.s{+)|9W,[`*lYD'زr PStͽz8 hiIG}Úk;oC{x(ܦ=ߴ_-}0w9 V|P l1G?EW};`ۯTޱo 9s渝vi8#g4h[ [`XؽV#U"nK[g98mjyMԏ>y{ح/(UHybN??hp\k`lÐ XUY LqX1`|O `͚(Pll1GD ~H!?;T>3Nr;Zp'V~Mn}l FWl1Gq) ~H!?[ZAͿ1 - {`X} F: :IC,;s6>c`#1[Ы6l)6a)b`8ϝ*`|k st 4 ,[7tU!vybNiגetۀϭV]uh~\ٳg)SDh~\9s8(C~ևoa}79_6@ϰ>,=y;tz7+|~l>/n5,|-d_XIipkߋj8/)}dP5U鸓m`#//;/֛aIyMkO'Ҵc]r \V _&~|[IFj&OѣjoWn>(ȫ>6mJpAyrͿ,]#dU`bQ|97_Vo 4A6*z0[AeͺU]W)~]X6a lPOMj'iHu,蔵Ɂs`b O'GMpFtm5P68Zt<+82X*x`˚`b? 4P.u8ݾVq۷"íϴOIHU:VyE4G#G-[y/^1]vn{Y)+OWS;Vm QI+-lWYUv>vLzk;ZK?_Otva'kb?Eg.Yc[D`쟴dbКZV=#[Ѥg ς߯}%P:QCX(=0Pҏ`P "zog0npp]ʷvXUiPC4hdD*G49(lN_ڦ:^ Ͽ F*OUP2iH~meZyt W=>N@ nZIwQ9a8.w\Q[!y sK%Ex6V`? u4bWI zBPAy  6i~\( X3PkAV,2@+S[C- :mR@hD0صs#jsxja~0:c|K!эwr:`l^ygKϠg2np2`*3fu[[:ҶOoվ:m}HPY hVd~-|aTYHgvQ, nkiE#,4xjږuJ{=tGdW럧{ܐ l|Kڌ=#%ې%늬m>K[+6*|Y4eX%ް~àzL`# @AjXb(`+ FCS՛_!kcpp986F:VLxu`XĤY_ y/ד={e{>c~$4`{fÉV+ga(?ígz 0+$+Ut Ml^vSV6`BBdaN7-*:"k!6@ KkgҔ;`XϺI+yN3%|akz{ļ\ {v aOzf+IWlwš +$ 4UD`j61gq YD2|V6`}nNcl`Nb $ۛ_ [jpx]C 6ζ]u|XvyQN`XM"ÅbVeY6[k޷AoޱlWM GRIkRߺv:և:+;i?En>?ipipo]Ah8Em5+Xp[ڟ!l= cu'/3u]®u{ZP`X>Ll+Rƴb5P=*ǯ#X/+wl;H#YƖ j\3k̨a6ɢWGtL(ZSO`-ZW4sÞ#ip8 ~xU]۽W]gY$y؏5)0+GOCN'0vy"uONQyQVy`XnQ_=,gtZ.U /3VZ>Z^VmɵɝT oT,NwyJlr$o3VYgJɸWSJ{,k9iCГp6 l@S|_T蘴zGacTn/fs9< XPll3.аkw\ `H\X~{+*rQ(Pll~yP=6P ,&cH]/As tG6RW-(tXte`8j`#'N85 Al, w8 Z stXte`XܽQ(pk `x,[xE"w5-.ot: `H\6`;D`kQ7K˔!I=Ui~e΁ Yvʔ)={+r̜9s1'Og l`8zFeNeR tU"ِC\/&E8~D^ iuN0QRtZjWEUE=9ԑÎ,66H!?$ 嘥; } 2l`mT7XmJM}5}8H(/6F: ITWn>(ȫf;r9:n oYAc8Ӻ> .{Mw׎ #H;aY-뗫\EdlЛl,[f$v!օneq(FH &k M ®Ƃ:ZY֝V 4Vx[F}ǛZ9Y#eO[d,mЪUVZ0kqEA63`ill,[ زq\Ԭ'G6`& J+8 ZC8A^15߰I|NsڧrԾ"MC.ayc\^6 46U`ز:59м)~hl#~$3tvFj{Yc] os6\i݊<~1p'`@7mmsJX vU^ G(;YKdM ljuMV~?qX"B^x/**u;ܺF@/+B"3 4C6~ l)i w'B6cʬx[+.6n291`7u $[%gBDS' t? *M.OzjNdT5VU?n&vjkVTl4,VT5d`\Jgb@N ݛuou卣󩢽l@+ţë`IlI4P5NΕE! .ѱHԍ_`.ѓ6ɱ>i G٘p4) ngif(.~@r-6 B `+"Pnl h׭7ʁLX|E@TvΤOmiW_iݪcl@VዯN)L^%;nۋ.BH_ U-ǕvM `mWjZ>) ڂo+Seile[Tӓ2f):TeF!YN wQ>h͈mk3iy=2Һ[ `pDǪ fZL$Iu=ء<Ir'`y톶Ql'LG@`mo%HԒ"@ ]4ov@=M~WkVJ4x`# kgҶ-$:C[l^.>a]AZltӏfu !`MXn:.l]~>(*9+,i@)F^N^Aol{6`$U&R5ӘkTy!Ԫ,"! ӺEt!ni_u#lxwMOl5`L8Al:HvԆp^[)-kc`K4c+еq9Z+h7p__$bΌNӮv$NC~66_#rԧ[Tllus4u#a4j]ӖQ9f!ƂFVݟXkcy?߀7mc.vmȯ_ߦ4}flÐ ׈)֧55U)$ 4͙=dY熓9 e%`Fk>#`}W6/ǏZ7mU]H[(m[ɖ*J[c6OC~66_#rԧ[Tl,PH}\m \fat>`#`aw^9l)#lt>`#`{4Nh 49n*+o{lÐ ׈)֧55U)$<z؝I:mtMLܜ(Pl-2SI j8u:ԅa@ltȆ@ۀ 4A6*8Ms&i6 `u0 K(rkȫ ((R `M6a  l]/Z(0}$!F,dӜIڃMb@6*@*059CΌlBG*((R `M MB`kjPJXɦ9:mҫ,/^Z 8u:ԅa@l!BUkϙ,dӜIڃMb@6CQCp,b"uaofs'ݰ?/[11)iNeQ Ԡ[TU+`Fly Zf &:SϽ96 VY(jPAd@`DN{l`IX7}}ǀ{6`mV[͝u@lC ̳`+RKiA ` :uƀf}ln„ yf[w ,[SV&%ե[$W4v?~(n /tO?t/2te|llU`+*P`zRr<_ʔD7zz瘣}tך̶ s9gMrcjw/JleRRP HȝT @iL6RJ"q t4p]{ٳݯ~ctx0g`5"G} iMM)F  pݰmēNu"&Mr'p[x{g;J*GYٛm<\_g6akDح\Q_ *F}y[{:r8G|IsU}QwGeYf=zPlt>P +kv)D[lm`΅8EB-**vÍ6v_z% [DOlÐ ׈)֧55U)$6I^iWw?яo~FuQc֛-,d;Y6akD`Ӛ*R&o01c[c5ܧ?8/B#OS}LÎ`|l F: |Q[ƍIC7>ll; `կo}l9soƤ+M2śyc|l9[`#llFO6 :h\*i6RL4606ϟ﮽Z]vzw]c7M6g}C~IĝP';MWtwqi5uZ/*_>O;-d#C%t` , `M* @CmG! wl-[Z(vrk @;sOk\_zw'Ig1cymlQϡ LLX%ih4zӒ +6ު%`[ DJJ|@+p/bcfOfmƌ=s_pO=" l 硆C47ޘX s*4-(pK;=(CtqxEi2wb?4&{cNf|l*t|}?7pёUIJͳn"lX.lD6ޱ |( ~o[y駟v~_;nحl뮽aؑh,L^~}Q ގQW2qg>N;GmW_wm]|W{&Nt$3s}ZN1{'>~aO}gqƤܽ3>v>2>6خ P, F,M9ƓY};i k*_9FtrT4Sٳg 蠌8;F|E]Zw]7UjM>hv[m?5&m;(.3~| y|,$䕶l lwZu~ŋ]GpYS4.r&YuUT4 .z?>V"wt˻ɽk =SꫯvSNuz׻C=P:~GvHV;`+v,**`+2,[ǝVqlc /gK r!~ss믴;}֨b'p/g_}~u _Q`+s,&&%U6  Vfm `X;:8`تfʋ)7n\O>>^{mw/=M_ֵ`*~cq&'`تl]9, qU\Dl E}qf:o~MI `z{oK'?qYkz饗vvR;,[kXY1RSf몎a%`d(/ޫ[1n‹^L~2\Bg.>>3WkI[p+vש]{uB\{H= CJq2`L9]ؙO8'`)ꋻ7) `V\ѽA;+[nqN,>swzp I'?8u:)*pڴwq馛Xo*V*pH)NO,{vuebcјK AS槁ne_J V[̚,[5(Q^o5Oet4 MWt+~'I&KT}#i[`߸n̙nM6q]vJ">h ^mk{le .IJs'Jjzu!@j`y;nSUleRsX6|w/5],UwMWY=72 60)b>n-,/~-Zk/{x#8WM$@O.֋vr`Rl8XsVyE1.rL/=8J9,t}qrNY+뷢ߨ-;lS>{`Ѕ.nY"28?':֌PoTȪ.ĭY#_A-srX6|w/5]<n<}~޿+#Y(q va7=LJjVݙX-C2 8zKS 6) %t ,H5ձ0x˜]v؆i. 3,'4-(`m:xѾ{9O ,hu!u`F (UW]8W?]q-{'ƺn FtNl.i`32+Oízr*rh CULe)جR@f7 <`MQ3p0o|l-(ƍ`^KM)`5)f Vz$عs3L;n緿=u}nu`0g#pHؕ7 2pjZoK`ZM0B2I]F'߬ 1z7< {%jXl9=,t}qrN?ywH2KևDcN_=䓭O?{gZR9'4͙3=i6s܍78fX\`_6aklͣ#Zq0 ɴYp8X xװkf&y`Eohl&q"t3l?^jN{v_ߚXi˱&waMdۓ1&-# #-gle^llЧ3l8!eD``t7P{BϢ~x?*+.kڊXX6&Z&6`ئ틻t`~a{>k-raӦ^5^֒'MrIO؞wyg4V-[\IIVI Z>Pz?nr0Yc`>zl2#,"D`0~H,$<5}#ہ7k lթm9З-sxX6|w/5]NVW\]vYO}j pq ~I#YEf6A`[_ذ1qDZMlaهz9 ha%~[U7a.'` +Dz8`^Ծ9^┺qΝ$ԇպ'Nt>hk 췓hwXDb'IVC}_ó҂SK[gtvfz+aoLlHX-B6Ӱe8iw.V 4Mn)) l7`2P85Tfeƻr$H%Q`9(>iw-VYŽAIÇzgZZU@)͟?5& [ܦHl~o̬>Ԇ&cubz\j~ڞIּN.~ «~bn:6`*2< `~~ǝ݉'n[4VՒnXmlfkيZ?(YVK~cJ VӒ`k֩UCMtw'nrZs{y_waG?] `oJS>4ϺϥkZ%C~Uy~O6Ρ`تfʋtXf7pH]{üB0U :]1%0{^2+{&\"m7&q,=+UVC^U/m[>Rʯn iG6ʰXԍ&]1yP:nܳ#X`GIZ7rt5X5q/Χr|WEфjtWEJ+qI6&FbRAoL7<[}o[}׊pg?s>,r8ց-]K9]R˛&qzܸ/<<4OM q6 v\w(Hs/~1;mD@6&5`w˾uXa7« VuGS Rۥ:&qzvSwuRHzSz `E{#0U:.oʸq9o4Oʃ?qn) `ӢGZFG>æk]آ%:ޫ {z_k[dzRkXuSJZzdv+ӂb #9C/U γJ}مXF"y!#1Cq_(wm֛˧aWdڮr|hqͯ'{cfEfkKu]:Nos¶uƻUVHlSmQi[ITEXLmwv%pjzTi=dօ1jIk *-<@n`>\XEmk3so6ƸdOݍ̸}c.ȂLIm3hQ(4pRmKoz_A)Oگm*˺Lox87Oћ9ZO6`~tisn'u;ݷѱ[f7!I X&~reG^4idccNf~馛ZyiL qN@Gs{nPV{`+3 `ȦE}ƈɓkC l@LFi@6?<FoVWo Exa4эrS(-r<[كTAl ygE`oI"I8}pK u$/#cmR3fMWܵ5b3]hIlyv2zD=1}?To}^.?2~'tNaO_6oGE`J,9]Sgz;nO{olZ" aY7rCݏpYC,r[=SMCSVcYSuq63l`/`/V֟+t;o-Y";3ۂʴفoF7yd⣒ΛI8{.3&`zK7ÝA|ߐȾ!K}!~ HYP^i=?TY~Pcɩ"~}ʬ_ 1VUFaƣrt!. v34B\:9)\ q6/F;;v…(jwOtw,gq[1 RAM6iɞ6Hf2`XPش^El[;3Ap+36;qT'>ֱlYmvolҥP0z7#3ó7 ~;Xzԃp҅8?K6Ζf'ƍ7؝}٭%o4V([C}sђ7'tR+L_WUw}wt`|Xc~#1p' {m+&`m VVNhe%w%.N/oT\9l ^ܽ2,:unWٵ^{kllt9,_j jD5~XM&U&tMNK=sk']> %N4u5K<l`rVXK/= o~[{e7%@֢GS[nT`^S~HVl}pK.QO,ꖨW|s7wHq6NIcx^d|Ig}{ĉ< bOO"h9\sM `uI, vhS+,\ y8l>qNsm7d>ڱEMd<,7pC?N&)BmKuX& ui9, x+igV^yd79s渻;*mF; b2:j ؗmر8ȔlwZuqNmny]ҫ_|VRIز[VkMF_5/6=f-royt!Nho4xtșǸWo}[_ >jyE_Їw1`$]|NcvJ+ ;۔΃q{f9 ==06]DU^Q,cqI6=Fci~N:f|2:Z'- `$@˻7߼5[+lE`e}ahE~HV,[e%K;:ph?v6pWu_(0&SOM½.?|%uh={_.-{Mwͪ9Cm3[)ܯ!jiy|_GUuնSկo"r&ʗ6VTgǨ qEf:>4r( ӜYZۧ6% ScjOUKlFtWtsUW~~5]yDQFmC~Jv$.d&V(GnyEbɌSsp4_?lhہeFhU":΢VZykt8!Z?1l-Fw gs[׿͚5=tzʶNCatUU2XC&)F^ cQİk ч\ߏpg]8eV ~ 5^U6i9 `CX`Z `k񱂬^z4"?aD\K.QF: |Ql}ZSSE B_H~wa`$Er8)F+!Zqi6-fk1g4]3>vpIpt>`#`9$N6Iuni3a*hL^EA5f.ayy2-Ӵ(.c`p^Y6^b' 0g`5"G} iMM)F s~et6yVPiqx3Ӵ樖Zڤ[Mڤr-8!ZdSN x`#llFO>"H!x'G **@󣗊k"Ӣayiδe(!XDmipE:~[M(6d?B/lz`#llFO>"H!ꝡ( ƔY6=Yup\o ȷ6akD`Ӛ*Rwn Hn7-ӺZg-Yx67v Ԛ9ydoәÓ<@#`qhDH&v.TF˱`ͱzmtȆ@۫Z6:J8 `@ltȆ@N@]o-yf9R8\l6a  l]/Z(0}$!Fl)zc6,`qP` աygF6O!@`Hr=li6ڤ21]^[l5)$4ՠ@ 8E=c3؀&tK9K+ڀfVמ;`*( KE8LNlh (\S(Џ l7tY9'=1"g>x4lEw6P-4tn ou߰m5StQzZ}V-irl[VVvֹ 6&OpSOZ(PlygcJ~kM}䈙NTJߏjm<_tOu18c i_nx=_<[ePV}nCqY'( a~+bM:ԁ((C]`A_l>0pɄ!|k#AS&v "'-qgSg l6``UvL+ׇTw+׏>}PC}7}LCrG@@:`scEqMr6 i|} (PH\dX[N!b[uۀ͟WoxQ}#0ni.lA[FGyնp=oEd:16˲AͻN?WϟY՞Prom<0iܔll#e7!0jD2bҩIQ)P#COt},šVX+W7}6?ڦK琶LhI;ElZj\f"A6le.Š$[=-O3`ºt~! `Nʀm)h <i TujeZ;غ%(`uܰll@{3ݚ.ƊZd6 kҘ6ʟPj@ d=rC~\jw{ .}PaXArVt5m x/%o ɹa@6 Uw_n̶̎`ֶ M` (U/ T'-H ,, bqY&C CtY|my@,aFb}F5! B-[~ V0ya+606tU_?mFufaP*V+|zMoZln*P`}6Enu!#~u, :v `6 T )`2U %g4_>-7}60‚߳X34lYiՅXMi݊n.[AǨm&oC}=mk5jYA'̆[c]~O4-xdG Hj{-F-c Hֈ&>doUB1og-].e`qb؆KfZ& 3jGJG:^ـ` ix)]3T[liUyCUg@RSX`닗aD7,ۢ*dQ``Aæd뭿{hImkjڼJkJײ68v枚! +*P2P &>p#41=6;H-E B˦iMݨKtWYe~#4 PPP`KU{CF\OlVҏTۭ g Vٴq~7Y*{Ë"Cueu,] ڡV鐥v.0M8a@@@>P`Vx,SĹ&//6`jpi[ x!*Ϗ1MꯒU>BiWhuvn~bMOU;i, :oCDZ;\x[inG6MD@@Xy-%gaۀ`,:u5aTpçAOI58w֧V( / 4`UO[emh9B܏]4?Sr-5<@@@>P`+_{9Ѕ.60P6`p.^| !ASDg9 D6@LJ0kX#G[>4PPP Q`OD?5 >XF- `|lu7v5iZ]g+oo(FobT<@SS^5Ŷև0{_/r606ۅXiMռl^43沠 f}ȓne0>9g@T@k( TfWGpE }Q08XǟuHvi,3I kc` f/mcHjXwf^Ӵ6뱵V2ӗN;í &r"{s)r*e?~g;/ 9^6`6^˰?q 8#uXeڌ!̽;IFƝZYUi+˖Q9~j[Vȟ9 7Mp~n̳c4 BZoV)U ]rSԎ)zmǥuoTKqӶ17/ߩ+U:!h"jKɜ}Lij1Im֡2~mWڕ!VYvNa{*29US-p'N? r+(lh.SdVIfiхcWoD72}>_# SeH!nAl(jYfc3 O\=NtY3w^=w+ 0<:Y-8HC0{ .aRӇGƬɐ"O+pou -h_OhYT+ڧn[(,˩`؀u.xRETc-zVw; rZ?% $m8RowX/P9w+Rc.pS7lzE4¥tK5R}p9VN.3yJx> 7[I1[5z5vb],Xf]3M>h_v3}ԆAGui^3\_Lߠڱu3ѥEvNV[TUF*ol׾ct(6 `ՋqϺ[I}P 4m(`?g*_umJ ;CȠiӼ|LشYOSQi|v-xFiŰLJݼ12Lu|,ݝZꪓO^8ԉS `6- Zmⳗ6u0teUӱ]sgi-YŏԆ+X7c7s줭R:6 X4+CLZЀޒ0(-bc-'rqPlE^ߺ~k~yٜшaSY=CùflioO+#wYuL6l$*҂tlo˴'v2DXq&`ONg"f~7F۲?Pzn[qi{t3.gzp̱lh ,\H<҆]aj³6tMHC?SVC woMM+cV:Z]3ea\GY^3lUŶ~ւd8v /4&aպ+s- m &Zflk3u:`6o[uҤgVC O&ZD:N?3p8C擫>}:u!qVIM6Z/BP A?7ʪ1MOVT9G|rmɋ"8QN6 `W<ۭj/N8=qӜaжav0d+)Y-Xծ/oijpgeZy~WN_ZS ֵڇh) @]G<vf!- ^"MhԺ b,}8VaSW^y{Xc?~șq, `6PO8//_p4yp{g跇˾KP>oj@kvk3 P~Lruc#i5?3\/GF̎umq *c4՝#\1vۑꍌ9)Iq 9 ows?snvl6`=sv:Ə_)<Mf};_qr-{9@@@@@X>)GEn_Tt2kФE?lB:h=BW/5츲8+K[t~e{ɋ(((((0 &YIQBXK?. ԥAky;gvv_yŜ{<l r< @KXA4m 1S'vEt v1GR"WTo)Xдv|1W<(((((Sذq^l0TENm't,>Eu!NXt&1݆4PPPPP Fvu~Eo"vݍCTF'-ke+Ϗ8eGcifg2&im^<4λiۂE۵ʻ1L@@@@@V `Ɍ|P ZwP6*Auv]~V7+ kQSՍZ2mjV9k;k_uUNkw]fZ~Ktژ`ٕ_Kyɩ X+ XZɴҢMб>M^P~g &X #ifAFuOx]Z4S~0rx6TyO(l 1~lY^]GficZ鬺 ,× iv⃯-¬sL%;@@@@@h)``֭VpQfЬm*fz> U )+j],=L7^K:㳺 !녅I>67k4]eQPPPP\?2h]j53P6oW+ˠ&EL>٘6mتtg>6"t6i^0OLd9\Hk ۖG@@@@T @;5 R$UaFqCP[+*X#UldNֵ8'eq-pM!=9]@@@@@_]OТ(((((}mσr]G6 _ce{U10N+W/j† @* pM8v%#x ۮV_nj. YPcF ڭYݙ^aLC;6jjf9_BMUF Ĵ!.Tڷ3ߞAu>mZ:ԝ(((((mH*smR%e߾ U *Q!ژU}Y3qaQW>JjYGtvܬˡsY9w V~pib6L.Ŵ!.o߀6\Z&,˺lG@@@@P@PNdvx3yT`Ӡ %BX lڸ߬(i AݱX@^0&FK6(iFѭ6X=ۆłoi܇UE#n 4Am18оp,mI!LBuv3e bISUWeHkC<]p[xiggڤ`^;cڐVFڶMɿo:<6Mա (((((PR0;ƕX Ni|Ӝ Ӗ`U+tlܱ̃S]> 9 PPPPU!OF% %`Mjp)pI2 ` " D?cam 1]v.YjB _b!ͮm>Q~Ft=qj?XV3 qv@@@@l_|Jn3kG- -QP^nYT"t?Vg8mW.;,2=m[x}ٙv/\{}YHp;_}^b#u#,_ٔl(C|V&(ە& duLZT-pF[ՙ,_EwϬxڢ㲎-]^Ӯm;߼6䝃owl[ Ŷ|((((( P mңe3}hNA4PPPPPPP4T~7>wFꖜ PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KEmWIENDB`cinder-8.0.0/doc/source/images/rpc/arch.svg0000664000567000056710000004405012701406250021660 0ustar jenkinsjenkins00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Cinder-Manage Cinder-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method cinder-8.0.0/doc/source/devref/0000775000567000056710000000000012701406543017446 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/source/devref/jenkins.rst0000664000567000056710000000335712701406250021644 0ustar jenkinsjenkins00000000000000Continuous Integration with Jenkins =================================== Cinder uses a `Jenkins`_ server to automate development tasks. The Jenkins front-end is at http://jenkins.openstack.org. You must have an account on `Launchpad`_ to be able to access the OpenStack Jenkins site. Jenkins performs tasks such as: `gate-cinder-pep8`_ Run PEP8 checks on proposed code changes that have been reviewed. `gate-cinder-pylint`_ Run Pylint checks on proposed code changes that have been reviewed. `gate-cinder-python27`_ Run unit tests using python2.7 on proposed code changes that have been reviewed. `gate-cinder-python34`_ Run unit tests using python3.4 on proposed code changes that have been reviewed. `cinder-coverage`_ Calculate test coverage metrics. `cinder-docs`_ Build this documentation and push it to http://cinder.openstack.org. `cinder-merge-release-tags`_ Merge reviewed code into the git repository. `cinder-tarball`_ Do ``python setup.py sdist`` to create a tarball of the cinder code and upload it to http://cinder.openstack.org/tarballs .. _Jenkins: http://jenkins-ci.org .. _Launchpad: http://launchpad.net .. _gate-cinder-pep8: https://jenkins.openstack.org/job/gate-cinder-pep8 .. _gate-cinder-pylint: https://jenkins.openstack.org/job/gate-cinder-pylint .. _gate-cinder-python27: https://jenkins.openstack.org/job/gate-cinder-python27 .. _gate-cinder-python34: https://jenkins.openstack.org/job/gate-cinder-python34 .. _cinder-coverage: https://jenkins.openstack.org/job/cinder-coverage .. _cinder-docs: https://jenkins.openstack.org/job/cinder-docs .. _cinder-merge-release-tags: https://jenkins.openstack.org/job/cinder-merge-release-tags .. _cinder-tarball: https://jenkins.openstack.org/job/cinder-tarball cinder-8.0.0/doc/source/devref/index.rst0000664000567000056710000000336412701406257021317 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Developer Guide =============== In this section you will find information on Cinder's lower level programming APIs. Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 3 development.environment api_microversion_dev api_microversion_history unit_tests addmethod.openstackapi drivers gmr replication migration api.apache rolling.upgrades Background Concepts for Cinder ------------------------------ .. toctree:: :maxdepth: 3 architecture attach_detach_conventions threading i18n rpc Other Resources --------------- .. toctree:: :maxdepth: 3 launchpad gerrit jenkins releasenotes API Reference ------------- .. toctree:: :maxdepth: 3 ../api/autoindex Module Reference ---------------- .. toctree:: :maxdepth: 3 services database volume auth api scheduler fakes cinder Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` cinder-8.0.0/doc/source/devref/drivers.rst0000664000567000056710000000360412701406250021654 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Drivers ======= Cinder exposes an API to users to interact with different storage backend solutions. The following are standards across all drivers for Cinder services to properly interact with a driver. Minimum Features ---------------- Minimum features are enforced to avoid having a grid of what features are supported by which drivers and which releases. Cinder Core requires that all drivers implement the following minimum features. Havana ------ * Volume Create/Delete * Volume Attach/Detach * Snapshot Create/Delete * Create Volume from Snapshot * Get Volume Stats * Copy Image to Volume * Copy Volume to Image * Clone Volume Icehouse -------- * All of the above plus * Extend Volume Volume Stats ------------ Volume stats are used by the different schedulers for the drivers to provide a report on their current state of the backend. The following should be provided by a driver. * driver_version * free_capacity_gb * reserved_percentage * storage_protocol * total_capacity_gb * vendor_name * volume_backend_name **NOTE:** If the driver is unable to provide a value for free_capacity_gb or total_capacity_gb, keywords can be provided instead. Please use 'unknown' if the array cannot report the value or 'infinite' if the array has no upper limit. cinder-8.0.0/doc/source/devref/threading.rst0000664000567000056710000000435612701406250022150 0ustar jenkinsjenkins00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@utils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) MySQL access and eventlet ------------------------- Queries to the MySQL database will block the main thread of a service. This is because OpenStack services use an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, the resulting database query blocks the thread. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/cinder/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html cinder-8.0.0/doc/source/devref/volume.rst0000664000567000056710000000353212701406250021505 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Storage Volumes, Disks ====================== .. todo:: rework after iSCSI merge (see 'Old Docs') (todd or vish) The :mod:`cinder.volume.manager` Module --------------------------------------- .. automodule:: cinder.volume.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.volume.driver` Module -------------------------------------- .. automodule:: cinder.volume.driver :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: FakeAOEDriver Tests ----- The :mod:`volume_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.volume_unittest :noindex: :members: :undoc-members: :show-inheritance: Old Docs -------- Cinder uses iSCSI to export storage volumes from multiple storage nodes. These iSCSI exports are attached (using libvirt) directly to running instances. Cinder volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs. The underlying volumes by default are LVM logical volumes, created on demand within a single large volume group. cinder-8.0.0/doc/source/devref/auth.rst0000664000567000056710000002014512701406250021136 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth: Authentication and Authorization ================================ The :mod:`cinder.quota` Module ------------------------------ .. automodule:: cinder.quota :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.auth.signer` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.auth.signer :noindex: :members: :undoc-members: :show-inheritance: Auth Manager ------------ The :mod:`cinder.auth.manager` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.auth.manager :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`auth_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.auth_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`access_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.access_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`quota_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.quota_unittest :noindex: :members: :undoc-members: :show-inheritance: Legacy Docs ----------- Cinder provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: Roles-Based Access Control of AWS-style APIs using SAML Assertions “Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications” Introduction ------------ We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. Relationship of US eAuth to RBAC -------------------------------- Typical implementations of US eAuth authentication systems are structured as follows:: [ MS Active Directory or other federated LDAP user store ] --> backends to… [ SUN Identity Manager or other SAML Policy Controller ] --> maps URLs to groups… [ Apache Policy Agent in front of eAuth-secured Web Application ] In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. .. _auth_roles: Roles ----- AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: * Base User * System Administrator/Developer (currently have the same permissions) * Network Administrator * Project Manager * Cloud Administrator/IT-Security (currently have the same permissions) There is an additional, conceptual end-user that may or may not have API access: * (EXTERNAL) End-user / Third-party User Basic operations are available to any : * Describe Instances * Describe Images * Describe Volumes * Describe Keypairs * Create Keypair * Delete Keypair * Create, Upload, Delete: Buckets and Keys (Object Store) System Administrators/Developers/Project Manager: * Create, Attach, Delete Volume (Block Store) * Launch, Reboot, Terminate Instance * Register/Unregister Machine Image (project-wide) * Request / Review CloudAudit Scans Project Manager: * Add and remove other users (currently no api) * Set roles (currently no api) Network Administrator: * Change Machine Image properties (public / private) * Change Firewall Rules, define Security Groups * Allocate, Associate, Deassociate Public IP addresses Cloud Administrator/IT-Security: * All permissions Enhancements ------------ * SAML Token passing * REST interfaces * SOAP interfaces Wrapping the SAML token into the API calls. Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. CloudAudit APIs --------------- * Request formats * Response formats * Stateless asynchronous queries CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. Type declarations ----------------- * Data declarations – Volumes and Objects * System declarations – Instances Existing API calls to launch instances specific a single, combined “type” flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability” classifications of FIPS 199. An example API call would look like:: RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) Request Brokering ----------------- * Cloud Interop * IMF Registration / PubSub * Digital C&A Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. Dirty Cloud - Hybrid Data Centers --------------------------------- * CloudAudit bridge interfaces * Anything in the ARP table A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. The Details ----------- * Preliminary Roles Definitions * Categorization of available API calls * SAML assertion vocabulary System limits ------------- The following limits need to be defined and enforced: * Total number of instances allowed (user / project) * Total number of instances, per instance type (user / project) * Total number of volumes (user / project) * Maximum size of volume * Cumulative size of all volumes * Total use of object storage (GB) * Total number of Public IPs Further Challenges ------------------ * Prioritization of users / jobs in shared computing environments * Incident response planning * Limit launch of instances to specific security groups based on AMI * Store AMIs in LDAP for added property control cinder-8.0.0/doc/source/devref/replication.rst0000664000567000056710000001454512701406250022515 0ustar jenkinsjenkins00000000000000Replication ============ How to implement replication features in a backend driver. For backend devices that offer replication features, Cinder provides a common mechanism for exposing that functionality on a volume per volume basis while still trying to allow flexibility for the varying implementation and requirements of all the different backend devices. Most of the configuration is done via the cinder.conf file under the driver section and through the use of volume types. NOTE: This implementation is intended to solve a specific use case. It's critical that you read the Use Cases section of the spec here: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/cheesecake.html Config file examples -------------------- The cinder.conf file is used to specify replication config info for a specific driver. There is no concept of managed vs unmanaged, ALL replication configurations are expected to work by using the same driver. In other words, rather than trying to perform any magic by changing host entries in the DB for a Volume etc, all replication targets are considered "unmanged" BUT if a failover is issued, it's the drivers responsiblity to access replication volumes on the replicated backend device. This results in no changes for the end-user. For example, He/She can still issue an attach call to a replicated volume that has been failed over, and the driver will still receive the call BUT the driver will need to figure out if it needs to redirect the call to the a different backend than the default or not. Information regarding if the backend is in a failed over state should be stored in the driver, and in the case of a restart, the service entry in the DB will have the replication status info and pass it in during init to allow the driver to be set in the correct state. In the case of a failover event, and a volume was NOT of type replicated, that volume will now be UNAVAILABLE and any calls to access that volume should return a VolumeNotFound exception. **replication_device** Is a multi-dict opt, that should be specified for each replication target device the admin would like to configure. *NOTE:* There is one standaredized and REQUIRED key in the config entry, all others are vendor-unique: * backend_id: An example driver config for a device with multiple replication targets is show below:: ..... [driver-biz] volume_driver=xxxx volume_backend_name=biz [driver-baz] volume_driver=xxxx volume_backend_name=baz [driver-foo] volume_driver=xxxx volume_backend_name=foo replication_device = backend_id:vendor-id-1,unique_key:val.... replication_device = backend_id:vendor-id-2,unique_key:val.... In this example the result is self.configuration.get('replication_device) with the list:: [{backend_id: vendor-id-1, unique_key: val1}, {backend_id: vendor-id-2, unique_key: val1}] Volume Types / Extra Specs --------------------------- In order for a user to specify they'd like a replicated volume, there needs to be a corresponding Volume Type created by the Cloud Administrator. There's a good deal of flexibility by using volume types. The scheduler can send the create request to a backend that provides replication by simply providing the replication=enabled key to the extra-specs of the volume type. For example, if the type was set to simply create the volume on any (or if you only had one) backend that supports replication, the extra-specs entry would be:: {replication: enabled} Additionally you could provide additional details using scoped keys:: {replication: enabled, volume_backend_name: foo, replication: replication_type: async} It's up to the driver to parse the volume type info on create and set things up as requested. While the scoping key can be anything, it's strongly recommended that all backends utilize the same key (replication) for consistency and to make things easier for the Cloud Administrator. Additionally it's expected that if a backend is configured with 3 replciation targets, that if a volume of type replication=enabled is issued against that backend then it will replicate to ALL THREE of the configured targets. Capabilities reporting ---------------------- The following entries are expected to be added to the stats/capabilities update for replication configured devices:: stats["replication_enabled"] = True|False stats["replication_targets"] = [...] NOTICE, we report configured replication targets via volume stats_update This information is added to the get_capabilities admin call. Required methods ----------------- The number of API methods associated with replication is intentionally very limited, Admin only methods. They include:: replication_failover(self, context, volumes) Additionally we have freeze/thaw methods that will act on the scheduler but may or may not require something from the driver:: freeze_backend(self, context) thaw_backend(self, context) **replication_failover** Used to instruct the backend to fail over to the secondary/target device. If not secondary is specified (via backend_id argument) it's up to the driver to choose which device to failover to. In the case of only a single replication target this argument should be ignored. Note that ideally drivers will know how to update the volume reference properly so that Cinder is now pointing to the secondary. Also, while it's not required, at this time; ideally the command would act as a toggle, allowing to switch back and forth between primary and secondary and back to primary. Keep in mind the use case is that the backend has died a horrible death and is no longer valid. Any volumes that were on the primary and NOT of replication type should now be unavailable. NOTE: We do not expect things like create requests to go to the driver and magically create volumes on the replication target. The concept is that the backend is lost, and we're just providing a DR mechanism to preserve user data for volumes that were speicfied as such via type settings. **freeze_backend** Puts a backend host/service into a R/O state for the control plane. For example if a failover is issued, it is likely desireable that while data access to existing volumes is maintained, it likely would not be wise to continue doing things like creates, deletes, extends etc. **thaw_backend** Clear frozen control plane on a backend. cinder-8.0.0/doc/source/devref/api.rst0000664000567000056710000000530012701406250020742 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. API Endpoint ============ Cinder has a system for managing multiple APIs on different subdomains. Currently there is support for the OpenStack API. Common Components ----------------- The :mod:`cinder.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.api :noindex: :members: :undoc-members: :show-inheritance: OpenStack API ------------- The :mod:`openstack` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.api.openstack :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`api_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api_integration` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api_integration :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cloud_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.cloud_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api.fakes` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api.fakes :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api.test_wsgi` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api.test_wsgi :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api.openstack.test_api :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_auth` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api.openstack.test_auth :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_faults` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.api.openstack.test_faults :noindex: :members: :undoc-members: :show-inheritance: cinder-8.0.0/doc/source/devref/services.rst0000664000567000056710000000457112701406250022025 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to cinder. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`cinder.service` Module -------------------------------- .. automodule:: cinder.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.manager` Module -------------------------------- .. automodule:: cinder.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. cinder-8.0.0/doc/source/devref/unit_tests.rst0000664000567000056710000001523012701406257022404 0ustar jenkinsjenkins00000000000000Unit Tests ========== Cinder contains a suite of unit tests, in the cinder/tests/unit directory. Any proposed code change will be automatically rejected by the OpenStack Jenkins server [#f1]_ if the change causes unit test failures. Running the tests ----------------- There are a number of ways to run unit tests currently, and there's a combination of frameworks used depending on what commands you use. The preferred method is to use tox, which calls ostestr via the tox.ini file. To run all tests simply run:: tox This will create a virtual environment, load all the packages from test-requirements.txt and run all unit tests as well as run flake8 and hacking checks against the code. Note that you can inspect the tox.ini file to get more details on the available options and what the test run does by default. Running a subset of tests using tox ----------------------------------- One common activity is to just run a single test, you can do this with tox simply by specifying to just run py27 or py34 tests against a single test:: tox -epy27 -- -n cinder.tests.unit.test_volume.AvailabilityZoneTestCase.test_list_availability_zones_cached Or all tests in the test_volume.py file:: tox -epy27 -- -n cinder.tests.unit.test_volume For more information on these options and how to run tests, please see the `ostestr documentation `_. Run tests wrapper script ------------------------ In addition you can also use the wrapper script run_tests.sh by simply executing:: ./run_tests.sh This script is a wrapper around the testr testrunner and the flake8 checker. Note that there has been talk around deprecating this wrapper and this method of testing, it's currently available still but it may be good to get used to using tox or even ostestr directly. Documenation is left in place for those that still use it. Flags ----- The ``run_tests.sh`` script supports several flags. You can view a list of flags by doing:: run_tests.sh -h This will show the following help information:: Usage: ./run_tests.sh [OPTION]... Run Cinder's test suite(s) -V, --virtual-env Always use virtualenv. Install automatically if not present -N, --no-virtual-env Don't use virtualenv. Run tests in local environment -s, --no-site-packages Isolate the virtualenv from the global Python environment -r, --recreate-db Recreate the test database (deprecated, as this is now the default). -n, --no-recreate-db Don't recreate the test database. -x, --stop Stop running tests after the first error or failure. -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added. -p, --pep8 Just run pep8 -P, --no-pep8 Don't run pep8 -c, --coverage Generate coverage report -h, --help Print this usage message --hide-elapsed Don't print the elapsed time for each test along with slow test list Because ``run_tests.sh`` is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _nose options documentation: http://readthedocs.org/docs/nose/en/latest/usage.html#options Running a subset of tests ------------------------- Instead of running all tests, you can specify an individual directory, file, class, or method that contains test code. To run the tests in the ``cinder/tests/scheduler`` directory:: ./run_tests.sh scheduler To run the tests in the ``cinder/tests/test_libvirt.py`` file:: ./run_tests.sh test_libvirt To run the tests in the `HostStateTestCase` class in ``cinder/tests/test_libvirt.py``:: ./run_tests.sh test_libvirt.HostStateTestCase To run the `ToPrimitiveTestCase.test_dict` test method in ``cinder/tests/test_utils.py``:: ./run_tests.sh test_utils.ToPrimitiveTestCase.test_dict Suppressing logging output when tests fail ------------------------------------------ By default, when one or more unit test fails, all of the data sent to the logger during the failed tests will appear on standard output, which typically consists of many lines of texts. The logging output can make it difficult to identify which specific tests have failed, unless your terminal has a large scrollback buffer or you have redirected output to a file. You can suppress the logging output by calling ``run_tests.sh`` with the nose flag:: --nologcapture Virtualenv ---------- By default, the tests use the Python packages installed inside a virtualenv [#f2]_. (This is equivalent to using the ``-V, --virtualenv`` flag). If the virtualenv does not exist, it will be created the first time the tests are run. If you wish to recreate the virtualenv, call ``run_tests.sh`` with the flag:: -f, --force Recreating the virtualenv is useful if the package dependencies have changed since the virtualenv was last created. If the ``requirements.txt`` or ``tools/install_venv.py`` files have changed, it's a good idea to recreate the virtualenv. By default, the unit tests will see both the packages in the virtualenv and the packages that have been installed in the Python global environment. In some cases, the packages in the Python global environment may cause a conflict with the packages in the virtualenv. If this occurs, you can isolate the virtualenv from the global environment by using the flag:: -s, --no-site packages If you do not wish to use a virtualenv at all, use the flag:: -N, --no-virtual-env Database -------- Some of the unit tests make queries against an sqlite database [#f3]_. By default, the test database (``tests.sqlite``) is deleted and recreated each time ``run_tests.sh`` is invoked (This is equivalent to using the ``-r, --recreate-db`` flag). To reduce testing time if a database already exists it can be reused by using the flag:: -n, --no-recreate-db Reusing an existing database may cause tests to fail if the schema has changed. If any files in the ``cinder/db/sqlalchemy`` have changed, it's a good idea to recreate the test database. Gotchas ------- **Running Tests from Shared Folders** If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues [#f4]_. You can get around this by manually setting or updating the following line in ``cinder/tests/conf_fixture.py``:: CONF['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. .. rubric:: Footnotes .. [#f1] See :doc:`jenkins`. .. [#f2] See :doc:`development.environment` for more details about the use of virtualenv. cinder-8.0.0/doc/source/devref/cinder.rst0000664000567000056710000000746212701406250021450 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common and Misc Libraries ========================= Libraries common throughout Cinder or just ones that haven't been categorized very well yet. The :mod:`cinder.adminclient` Module ------------------------------------ .. automodule:: cinder.adminclient :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.context` Module -------------------------------- .. automodule:: cinder.context :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.exception` Module ---------------------------------- .. automodule:: cinder.exception :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.common.config` Module ------------------------------ .. automodule:: cinder.common.config :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.process` Module -------------------------------- .. automodule:: cinder.process :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.rpc` Module ---------------------------- .. automodule:: cinder.rpc :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.server` Module ------------------------------- .. automodule:: cinder.server :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.test` Module ----------------------------- .. automodule:: cinder.test :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.utils` Module ------------------------------ .. automodule:: cinder.utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.validate` Module --------------------------------- .. automodule:: cinder.validate :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.wsgi` Module ----------------------------- .. automodule:: cinder.wsgi :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`declare_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.declare_conf :noindex: :members: :undoc-members: :show-inheritance: The :mod:`conf_fixture` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.conf_fixture :noindex: :members: :undoc-members: :show-inheritance: The :mod:`process_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.process_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`rpc_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.rpc_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`runtime_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.runtime_conf :noindex: :members: :undoc-members: :show-inheritance: The :mod:`validator_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.validator_unittest :noindex: :members: :undoc-members: :show-inheritance: cinder-8.0.0/doc/source/devref/releasenotes.rst0000664000567000056710000000356212701406250022672 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Intel Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Release notes ============= The release notes for a patch should be included in the patch. If not, the release notes should be in a follow-on review. If the following applies to the patch, a release note is required: * Upgrades * The deployer needs to take an action when upgrading * A new config option is added that the deployer should consider changing from the default * A configuration option is deprecated or removed * Features * A new feature or driver is implemented * Feature is deprecated or removed * Current behavior is changed * Bugs * A security bug is fixed * A long-standing or important bug is fixed * APIs * The storage or backup driver interface changes * REST API changes Cinder uses `reno `_ to generate release notes. Please read the docs for details. In summary, use .. code-block:: bash $ tox -e venv -- reno new Then edit the sample file that was created and push it with your change. To see the results: .. code-block:: bash $ git commit # Commit the change because reno scans git log. $ tox -e releasenotes Then look at the generated release notes files in releasenotes/build/html in your favorite browser. cinder-8.0.0/doc/source/devref/migration.rst0000664000567000056710000003060612701406250022171 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Migration ========= Introduction to volume migration -------------------------------- Cinder provides the volume migration support within the same deployment, which means the node of cinder volume service, c-vol node where the source volume is located, is able to access the c-vol node where the destination volume is located, and both of them share the same Cinder API service, scheduler service, message queue service, etc. As a general rule migration is possible for volumes in 'available' or ‘in-use’ status, for the driver which has implemented volume migration. So far, we are confident that migration will succeed for 'available' volumes, whose drivers implement the migration routines. However, the migration of 'in-use' volumes is driver dependent. It depends on different drivers involved in the operation. It may fail depending on the source or destination driver of the volume. For example, from RBD to LVM, the migration of 'in-use' volume will succeed, but from LVM to RBD, it will fail. There are two major scenarios, which volume migration supports in Cinder: Scenario 1: Migration between two back-ends with the same volume type, regardless if they are located on the same c-vol node or not. Scenario 2: Migration between two back-ends with different volume types, regardless if the back-ends are located on the same c-vol node or not. How to do volume migration via CLI ---------------------------------- Scenario 1 of volume migration is done via the following command from the CLI: cinder migrate [--force-host-copy []] [--lock-volume []] Mandatory arguments: ID of volume to migrate. Destination host. The format of host is host@backend#POOL, while 'host' is the host name of the volume node, 'backend' is the back-end name and 'POOL' is a logical concept to describe a set of storage resource, residing in the back-end. If the back-end does not have specified pools, 'POOL' needs to be set with the same name as 'backend'. Optional arguments: --force-host-copy [] Enables or disables generic host-based force- migration, which bypasses the driver optimization. Default=False. --lock-volume [] Enables or disables the termination of volume migration caused by other commands. This option applies to the available volume. True means it locks the volume state and does not allow the migration to be aborted. The volume status will be in maintenance during the migration. False means it allows the volume migration to be aborted. The volume status is still in the original status. Default=False. Important note: Currently, error handling for failed migration operations is under development in Cinder. If we would like the volume migration to finish without any interruption, please set --lock-volume to True. If it is set to False, we cannot predict what will happen, if other actions like attach, detach, extend, etc, are issued on the volume during the migration. It all depends on which stage the volume migration has reached and when the request of another action comes. Scenario 2 of volume migration can be done via the following command from the CLI: cinder retype --migration-policy on-demand Mandatory arguments: Name or ID of volume for which to modify type. New volume type. Source volume type and destination volume type must be different and they must refer to different back-ends. Configurations -------------- To set up a an environment to try the volume migration, we need to configure at least two different back-ends on the same node of cinder volume service, c-vol node or two back-ends on two different volume nodes of cinder volume service, c-vol nodes. Which command to use, ‘cinder migrate’ or ‘cinder retype’, depends on which type of volume we would like to test. **Scenario 1 for migration** To configure the environment for Scenario 1 migration, e.g. a volume is migrated from back-end on Node 1 to back-end on Node 2, cinder.conf needs to contains the following entries for the same back-end on both of source and the destination nodes: For Node 1: ... [] volume_driver=xxxx volume_backend_name= ... For Node 2: ... [] volume_driver=xxxx volume_backend_name= ... If a volume with a predefined volume type is going to migrate, the back-end drivers from Node 1 and Node2 should have the same value for volume_backend_name, which means should be the same for Noe 1 and Node 2. The volume type can be created with the extra specs {volume_backend_name: driver-biz}. If we are going to migrate a volume with a volume type of none, it is not necessary to set the same value to volume_backend_name for both Node 1 and Node 2. **Scenario 2 for migration** To configure the environment for Scenario 2 migration: For example, a volume is migrated from driver-biz back-end on Node 1 to driver-net back-end on Node 2, cinder.conf needs to contains the following entries: For Node 1: ... [driver-biz] volume_driver=xxxx volume_backend_name=driver-biz ... For Node 2: ... [driver-net] volume_driver=xxxx volume_backend_name=driver-net ... For example, a volume is migrated from driver-biz back-end on Node 1 to driver-biz back-net on the same node, cinder.conf needs to contains the following entries: ... [driver-biz] volume_driver=xxxx volume_backend_name=driver-biz ... ... [driver-net] volume_driver=xxxx volume_backend_name=driver-net ... Two volume type needs to be created. One is with the extra specs: {volume_backend_name: driver-biz}. The other is with the extra specs: {volume_backend_name: driver-net}. What can be tracked during volume migration ------------------------------------------- The volume migration is an administrator only action and it may take a relatively long time to finish. The property ‘migration status’ will indicate the stage of the migration process for the volume. The administrator can check the ‘migration status’ via the ‘cinder list’ or ‘cinder show ’ command. The ‘cinder list’ command presents a list of all the volumes with some properties displayed, including the migration status, only to the administrator. However, the migration status is not included if ‘cinder list’ is issued by an ordinary user. The ‘cinder show ’ will present all the detailed information of a specific volume, including the migration status, only to the administrator. If the migration status of a volume shows ‘starting’, ‘migrating’ or ‘completing’, it means the volume is in the process of a migration. If the migration status is ‘success’, it means the migration has finished and the previous migration of this volume succeeded. If the migration status is ‘error’, it means the migration has finished and the previous migration of this volume failed. How to implement volume migration for a back-end driver ------------------------------------------------------- There are two kinds of implementations for the volume migration currently in Cinder. The first is the generic host-assisted migration, which consists of two different transfer modes, block-based and file-based. This implementation is based on the volume attachment to the node of cinder volume service, c-vol node. Any back-end driver supporting iSCSI will be able to support the generic host-assisted migration for sure. The back-end driver without iSCSI supported needs to be tested to decide if it supports this kind of migration. The block-based transfer mode is done by ‘dd’ command, applying to drivers like LVM, Storwize, etc, and the file-based transfer mode is done by file copy, typically applying to the RBD driver. The second is the driver specific migration. Since some storage back-ends have their special commands to copy the volume, Cinder also provides a way for them to implement in terms of their own internal commands to migrate. If the volume is migrated between two nodes configured with the same storage back-end, the migration will be optimized by calling the method migrate_volume in the driver, if the driver provides an implementation for it to migrate the volume within the same back-end, and will fallback to the generic host-assisted migration provided in the manager, if no such implementation is found or this implementation is not applicable for this migration. If your storage driver in Cinder provides iSCSI support, it should naturally work under the generic host-assisted migration, when --force-host-copy is set to True from the API request. Normally you do not need to change any code, unless you need to transfer the volume from your driver via a different way from the the block-based transfer or the file-based transfer. If your driver uses a network connection to communicate the block data itself, you can use file I/O to participate in migration. Please take the RBD driver as a reference for this implementation. If you would like to implement a driver specific volume migration for your driver, the API method associated with the driver specific migration is the following admin only method: migrate_volume(self, ctxt, volume, host) If your driver is taken as the destination back-end for a generic host-assisted migration and your driver needs to update the volume model after a successful migration, you need to implement the following method for your driver: update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): Required methods ---------------- There is one mandatory method that needs to be implemented for the driver to implement the driver specific volume migration. **migrate_volume** Used to migrate the volume directly if source and destination are managed by same storage. There is one optional method that could be implemented for the driver to implement the generic host-assisted migration. **update_migrated_volume** Used to return the key-value pairs to update the volume model after a successful migration. The key-value pairs returned are supposed to be the final values your driver would like to be in the volume model, if a migration is completed. This method can be used in a generally wide range, but the most common use case covered in this method is to rename the back-end name to the original volume id in your driver to make sure that the back-end still keeps the same id or name as it is before the volume migration. For this use case, there are two important fields: _name_id and provider_location. The field _name_id is used to map the cinder volume id and the back-end id or name. The default value is None, which means the cinder volume id is the same to the back-end id or name. If they are different, _name_id is used to saved the back-end id or name. The field provider_location is used to save the export information, created by the volume attach. This field is optional, since some drivers support the export creation and some do not. It is the driver maintainer's responsibility to decide what this field needs to be. If the back-end id or name is renamed successfully, this method can return {'_name_id': None, 'provider_location': None}. It is the choice for your driver to implement this method and decide what use cases should be covered. cinder-8.0.0/doc/source/devref/gmr.rst0000664000567000056710000000603012701406250020757 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Cinder contains a mechanism whereby developers and system administrators can generate a report about the state of a running Cinder executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Cinder process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``cinder-api`` has process id ``8675``, and was run with ``2>/var/log/cinder/cinder-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/cinder/cinder-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Cinder version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from cinder import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ cinder-8.0.0/doc/source/devref/development.environment.rst0000664000567000056710000001135412701406250025064 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing cinder on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: http://wiki.openstack.org/GettingTheCode Following these instructions will allow you to run the cinder unit tests. Running cinder is currently only supported on Linux, although you can run the unit tests on Mac OS X. Virtual environments -------------------- Cinder development uses `virtualenv `__ to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or "virtualenv" (a special subdirectory of your cinder directory), instead of installing the packages at the system level. .. note:: Virtualenv is useful for running the unit tests, but is not typically used for full integration testing or production usage. Linux Systems ------------- .. note:: Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. On Ubuntu (tested on 12.04-64 and 14.04-64):: sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libpq-dev libffi-dev libxslt-dev On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 6.5):: sudo yum install python-virtualenv openssl-devel python-pip git gcc libffi-devel libxslt-devel mysql-devel postgresql-devel On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: sudo zypper install gcc git libmysqlclient-devel libopenssl-devel postgresql-devel python-devel python-pip Mac OS X Systems ---------------- Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version If you have installed OpenSSL 1.0.0a, which can happen when installing a MacPorts package for OpenSSL, you will see an error when running ``cinder.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) works fine with cinder. Getting the code ---------------- Grab the code:: git clone https://github.com/openstack/cinder.git cd cinder Running unit tests ------------------ The unit tests will run by default inside a virtualenv in the ``.venv`` directory. Run the unit tests by doing:: ./run_tests.sh The first time you run them, you will be asked if you want to create a virtual environment (hit "y"):: No virtual environment found...create one? (Y/n) See :doc:`unit_tests` for more details. .. _virtualenv: Manually installing and using the virtualenv -------------------------------------------- You can manually install the virtual environment instead of having ``run_tests.sh`` do it for you:: python tools/install_venv.py This will install all of the Python packages listed in the ``requirements.txt`` file into your virtualenv. There will also be some additional packages (pip, setuptools) that are installed by the ``tools/install_venv.py`` file into the virtualenv. If all goes well, you should get a message something like this:: Cinder development environment setup is complete. To activate the Cinder virtualenv for the extent of your current shell session you can run:: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running:: $ tools/with_venv.sh Contributing Your Work ---------------------- Once your work is complete you may wish to contribute it to the project. Cinder uses the Gerrit code review system. For information on how to submit your branch to Gerrit, see GerritWorkflow_. .. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow cinder-8.0.0/doc/source/devref/genconfig.rst0000664000567000056710000000333212701406250022133 0ustar jenkinsjenkins00000000000000Generation of Sample Configuration Options ========================================== opts.py ------- This file is dynamically created through the following commands and is used in the generation of the cinder.conf.sample file by the oslo config generator. It is kept in tree because deployers cannot run tox -e genconfig due to dependency issues. To generate this file only, use the command 'tox -e genopts'. To generate the cinder.conf.sample file use the command 'tox -e genconfig'. tox -e genconfig ---------------- This command will generate a new cinder.conf.sample file by running the cinder/tools/config/generate_sample.sh script. tox -e genopts -------------- This command dynamically generates the opts.py file only in the event that new configuration options have been added. To do this it runs the generate_sample.sh with the --nosamplefile option. check_uptodate.sh ----------------- This script will check that the opts.py file exists and if it does, it will then create a temp opts.py file to verify that the current opts.py file is up to date with all new configuration options that may have been added. If it is not up to date it will suggest the generation of a new file using 'tox -e genopts'. generate_sample.sh ------------------ This script is responsible for calling the generate_cinder_opts.py file which dynamically generates the opts.py file by parsing through the entire cinder project. All instances of CONF.register_opt() and CONF.register_opts() are collected and the needed arguments are pulled out of those methods. A list of the options being registered is created to be written to the opts.py file. Later, the oslo config generator takes in the opts.py file, parses through those lists and creates the sample file.cinder-8.0.0/doc/source/devref/api_microversion_history.rst0000664000567000056710000000011012701406250025314 0ustar jenkinsjenkins00000000000000.. include:: ../../../cinder/api/openstack/rest_api_version_history.rst cinder-8.0.0/doc/source/devref/rpc.rst0000664000567000056710000003213112701406250020757 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Cinder =============== AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Cinder components and allows them to communicate in a loosely coupled fashion. More precisely, Cinder components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Cinder uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /images/rpc/arch.png :width: 60% .. Cinder implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Cinder service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Cinder-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Cinder RPC Mappings ------------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Cinder component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the Cinder object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rpc.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Cinder. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: /images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: /images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below the message flow during an rpc.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Cinder components' bootstrap in a test environment. Exchanges and queues being created by Cinder components are: * Exchanges 1. cinder (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. volume.phantom (phantom is hostname) 6. volume 7. scheduler.phantom (phantom is hostname) 8. scheduler .. image:: /images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Cinder uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgement is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgement on the server-side. This is different from auto_ack in that acknowledgement is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. cinder-8.0.0/doc/source/devref/gerrit.rst0000664000567000056710000000133712701406250021473 0ustar jenkinsjenkins00000000000000Code Reviews with Gerrit ======================== Cinder uses the `Gerrit`_ tool to review proposed code changes. The review site is http://review.openstack.org. Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the Cinder repository will be ignored`. See `Gerrit Workflow Quick Reference`_ for information about how to get started using Gerrit. See `Development Workflow`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: http://code.google.com/p/gerrit .. _Development Workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow .. _Gerrit Workflow Quick Reference: http://docs.openstack.org/infra/manual/developers.html#development-workflow cinder-8.0.0/doc/source/devref/addmethod.openstackapi.rst0000664000567000056710000000441512701406250024610 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``cinder/api/openstack/__init__/ApiRouter.__init__`` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``cinder/api/openstack``, and inherit from cinder.wsgi.Controller. See ``cinder/api/v2/volumes.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML based on the request's content-type. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc.HTTPNotFound()) replacing the exception as appropriate. cinder-8.0.0/doc/source/devref/api.apache.rst0000664000567000056710000000326412701406250022171 0ustar jenkinsjenkins00000000000000.. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Running Cinder API under Apache =============================== Files ----- Copy the file etc/cinder/api-httpd.conf to the appropriate location for your Apache server, most likely: ``/etc/httpd/conf.d/cinder_wsgi.conf`` Update this file to match your system configuration (for example, some distributions put httpd logs in the apache2 directory and some in the httpd directory). Create the directory /var/www/cgi-bin/cinder/. You can either hard or soft link the file cinder/wsgi/wsgi.py to be osapi_volume under the /var/www/cgi-bin/cinder/ directory. For a distribution appropriate place, it should probably be copied to: ``/usr/share/openstack/cinder/httpd/cinder.py`` Cinder's primary configuration file (etc/cinder.conf) and the PasteDeploy configuration file (etc/cinder-paste.ini) must be readable to httpd in one of the default locations described in Configuring Cinder. Access Control -------------- If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file.cinder-8.0.0/doc/source/devref/scheduler.rst0000664000567000056710000000310212701406250022145 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scheduler ========= The :mod:`cinder.scheduler.manager` Module ------------------------------------------ .. automodule:: cinder.scheduler.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.scheduler.driver` Module ----------------------------------------- .. automodule:: cinder.scheduler.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.scheduler.filter_scheduler` Driver ----------------------------------------- .. automodule:: cinder.scheduler.filter_scheduler :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`scheduler_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.tests.scheduler_unittest :noindex: :members: :undoc-members: :show-inheritance: cinder-8.0.0/doc/source/devref/architecture.rst0000664000567000056710000000506012701406250022656 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Cinder System Architecture ========================== The Cinder Block Storage Service is intended to be ran on one or more nodes. Cinder uses a sql-based central database that is shared by all Cinder services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, cinder will be moving towards multiple data stores with some kind of aggregation system. Components ---------- Below you will a brief explanation of the different components. :: /- ( LDAP ) [ Auth Manager ] --- | \- ( DB ) | | cinderclient | / \ | [ Web Dashboard ]- -[ api ] -- < AMQP > -- [ scheduler ] -- [ volume ] -- ( iSCSI ) \ / | novaclient | | | | < REST > * DB: sql database for data storage. Used by all components (LINKS NOT SHOWN) * Web Dashboard: potential external component that talks to the api * api: component that receives http requests, converts commands and communicates with other components via the queue or http * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. * scheduler: decides which host gets each volume * volume: manages dynamically attachable block devices. cinder-8.0.0/doc/source/devref/fakes.rst0000664000567000056710000000433212701406250021266 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fake Drivers ============ .. todo:: document general info about fakes When the real thing isn't available and you have some development to do these fake implementations of various drivers let you get on with your day. The :mod:`cinder.virt.fake` Module ---------------------------------- .. automodule:: cinder.virt.fake :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.auth.fakeldap` Module -------------------------------------- .. automodule:: cinder.auth.fakeldap :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.testing.fake.rabbit` Module -------------------------------------------- .. automodule:: cinder.testing.fake.rabbit :noindex: :members: :undoc-members: :show-inheritance: The :class:`cinder.volume.driver.FakeAOEDriver` Class ----------------------------------------------------- .. autoclass:: cinder.volume.driver.FakeAOEDriver :noindex: :members: :undoc-members: :show-inheritance: The :class:`cinder.tests.service_unittest.FakeManager` Class ------------------------------------------------------------ .. autoclass:: cinder.tests.service_unittest.FakeManager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.tests.api.openstack.fakes` Module -------------------------------------------------- .. automodule:: cinder.tests.api.openstack.fakes :noindex: :members: :undoc-members: :show-inheritance: cinder-8.0.0/doc/source/devref/api_microversion_dev.rst0000664000567000056710000002503212701406250024403 0ustar jenkinsjenkins00000000000000API Microversions ================= Background ---------- Cinder uses a framework we called 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``OpenStack-API-Version`` which is a monotonically increasing semantic version number starting from ``3.0``. Each service that uses microversions will share this header, so the Volume service will need to specifiy ``volume``: ``OpenStack-API-Version: volume 3.0`` If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``cinder/api/openstack/api_version_request.py``. This value is currently ``3.0`` and is expected to remain so for quite a long time. The Nova project was the first to implement microversions. For full details please read Nova's `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new shares/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of shares/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to shares/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", label="Did we return a 500 before?"]; new_error[shape="diamond", style="", label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label="no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label="no"]; new_error -> new_attr[label="no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label="no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label="no"]; new_param -> yes[label="yes"]; new_resource -> no[label="no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** [1] - When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion. The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Cinder. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Cinder versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. In Code ------- In ``cinder/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``3.4``. If they had specified a lower version (or not specified it and received the default of ``3.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.1", "3.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of <= ``3.4``. If ``3.5`` or later is specified the server will respond with ``HTTP/404``. Changing a method's behaviour ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.1", "3.3") def my_api_method(self, req, id): .... method_1 ... @wsgi.Controller.api_version("3.4") # noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``3.1``, ``3.2`` or ``3.3`` (or received the default of ``3.1``) they would see the result from ``method_1``, ``3.4`` or later ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) A method with only small changes between versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A method may have only small changes between microversions, in which case you can decorate a private method:: @api_version("3.1", "3.4") def _version_specific_func(self, req, arg1): pass @api_version(min_version="3.5") # noqa def _version_specific_func(self, req, arg1): pass def show(self, req, id): .... common stuff .... self._version_specific_func(req, "foo") .... common stuff .... When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behaviour within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behaviour based on its value:: def index(self, req): req_version = req.api_version_request if req_version.matches("3.1", "3.5"): ....stuff.... elif req_version.matches("3.6", "3.10"): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("3.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``cinder/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``cinder/api/openstack/api_version_request.py`` * Add a verbose description to ``cinder/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the blueprint for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Unit tests for microversions should be put in cinder/tests/unit/api/v3/ . Since all existing functionality is tested in cinder/tests/unit/api/v2, these unit tests are not replicated in .../v3, and only new functionality needs to be place in the .../v3/directory. Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``OpenStack-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'OpenStack-API-Version': 'volume 3.2'} req.api_version_request = api_version.APIVersionRequest('3.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... cinder-8.0.0/doc/source/devref/database.rst0000664000567000056710000000330012701406250021733 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Database Layer ================== The :mod:`cinder.db.api` Module ------------------------------- .. automodule:: cinder.db.api :noindex: :members: :undoc-members: :show-inheritance: The Sqlalchemy Driver --------------------- The :mod:`cinder.db.sqlalchemy.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.api :noindex: The :mod:`cinder.db.sqlalchemy.models` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.models :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cinder.db.sqlalchemy.session` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.session :noindex: :members: :undoc-members: :show-inheritance: Tests ----- Tests are lacking for the db api layer and for the sqlalchemy driver. Failures in the drivers would be detected in other test cases, though. cinder-8.0.0/doc/source/devref/launchpad.rst0000664000567000056710000000316412701406250022136 0ustar jenkinsjenkins00000000000000Project hosting with Launchpad ============================== `Launchpad`_ hosts the Cinder project. The Cinder project homepage on Launchpad is http://launchpad.net/cinder. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Jenkins (see :doc:`jenkins`) Mailing list ------------ The mailing list email is ``openstack@lists.openstack.org``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: #. Subscribe to the list at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack The mailing list archives are at http://lists.openstack.org/pipermail/openstack/. Bug tracking ------------ Report Cinder bugs at https://bugs.launchpad.net/cinder Feature requests (Blueprints) ----------------------------- Cinder uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/cinder. Technical support (Answers) --------------------------- Cinder uses Launchpad Answers to track Cinder technical support questions. The Cinder Answers page is at https://answers.launchpad.net/cinder. Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can also be used for technical support requests. .. _Launchpad: http://launchpad.net .. _Wiki: http://wiki.openstack.org .. _Cinder Team: https://launchpad.net/~cinder .. _OpenStack Team: https://launchpad.net/~openstack .. _Ask OpenStack: http://ask.openstack.org cinder-8.0.0/doc/source/devref/i18n.rst0000664000567000056710000000240712701406250020755 0ustar jenkinsjenkins00000000000000Internationalization ==================== cinder uses `gettext `_ so that user-facing strings such as log messages appear in the appropriate language in different locales. To use gettext, make sure that the strings passed to the logger are wrapped in a ``_()`` function call. For example:: LOG.info(_("block_device_mapping %s") % block_device_mapping) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause the LocalizationTestCase.test_multiple_positional_format_placeholders test to fail in cinder/tests/test_localization.py. For translation to work properly, the top level scripts for Cinder need to first do the following before any Cinder modules are imported:: from cinder import i18n i18n.enable_lazy() Any files that use the _() for translation then must have the following lines:: from cinder.i18n import _ If the above code is missing, it may result in an error that looks like:: NameError: name '_' is not defined cinder-8.0.0/doc/source/devref/rolling.upgrades.rst0000664000567000056710000003777212701406250023472 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2016 Intel Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Upgrades ======== Starting from Mitaka release Cinder gained the ability to be upgraded without introducing downtime of control plane services. Operator can simply upgrade Cinder services instances one-by-one. To achieve that, developers need to make sure that any introduced change doesn't break older services running in the same Cinder deployment. In general there is a requirement that release N will keep backward compatibility with release N-1 and in a deployment N's and N-1's services can safely coexist. This means that when performing a live upgrade you cannot skip any release (e.g. you cannot upgrade N to N+2 without upgrading it to N+1 first). Further in the document N will denote the current release, N-1 a previous one, N+1 the next one, etc. Having in mind that we only support compatibility with N-1, most of the compatibility code written in N needs to exist just for one release and can be removed in the beginning of N+1. A good practice here is to mark them with :code:`TODO` or :code:`FIXME` comments to make them easy to find in the future. Please note that proper upgrades solution should support both release-to-release upgrades as well as upgrades of deployments following the Cinder master more closely. We cannot just merge patches implementing compatibility at the end of the release - we should keep things compatible through the whole release. To achieve compatibility, discipline is required from the developers. There are several planes on which incompatibility may occurr: * **REST API changes** - these are prohibited by definition and this document will not describe the subject. For further information one may use `API Working Group guidelines `_ for reference. * **Database schema migrations** - e.g. if N-1 was relying on some column in the DB being present, N's migrations cannot remove it. N+1's however can (assuming N has no notion of the column). * **Database data migrations** - if a migration requires big amount of data to be transfered between columns or tables or converted, it will most likely lock the tables. This may cause services to be unresponsive, causing the downtime. * **RPC API changes** - adding or removing RPC method parameter, or the method itself, may lead to incompatibilities. * **RPC payload changes** - adding, renaming or removing a field from the dict passed over RPC may lead to incompatibilities. Next sections of this document will focus on explaining last four points and provide means to tackle required changes in these matters while maintaining backward compatibility. Database schema and data migrations ----------------------------------- In general incompatible database schema migrations can be tracked to ALTER and DROP SQL commands instruction issued either against a column or table. This is why a unit test that blocks such migrations was introduced. We should try to keep our DB modifications additive. Moreover we should aim not to introduce migrations that cause the database tables to lock for a long period. Long lock on whole table can block other queries and may make real requests to fail. Adding a column ............... This is the simplest case - we don't have any requirements when adding a new column apart from the fact that it should be added as the last one in the table. If that's covered, the DB engine will make sure the migraton won't be disruptive. Dropping a column not referenced in SQLAlchemy code ................................................... When we want to remove a column that wasn't present in any SQLAlchemy model or it was in the model, but model was not referenced in any SQLAlchemy API function (this basically means that N-1 wasn't depending on the presence of that column in the DB), then the situation is simple. We should be able to safely drop the column in N release. Removal of unnecessary column ............................. When we want to remove a used column without migrating any data out of it (for example because what's kept in the column is obsolete), then we just need to remove it from the SQLAlchemy model and API in N release. In N+1 or as a post-upgrade migration in N we can merge a migration issuing DROP for this column (we cannot do that earlier because N-1 will depend on the presence of that column). ALTER on a column ................. A rule of thumb to judge which ALTER or DROP migrations should be allowed is to look in the `MySQL documentation `_. If operation has "yes" in all 4 columns besides "Copies Table?", then it *probably* can be allowed. If operation doesn't allow concurrent DML it means that table row modifications or additions will be blocked during the migration. This sometimes isn't a problem - for example it's not the end of the world if a service won't be able to report it's status one or two times (and :code:`services` table is normally small). Please note that even if this does apply to "rename a column" operation, we cannot simply do such ALTER, as N-1 will depend on the older name. If an operation on column or table cannot be allowed, then it is required to create a new column with desired properties and start moving the data (in a live manner). In worst case old column can be removed in N+2. Whole procedure is described in more details below. In aforementioned case we need to make more complicated steps streching through 3 releases - always keeping the backwards compatibility. In short when we want to start to move data inside the DB, then in N we should: * Add a new column for the data. * Write data in both places (N-1 needs to read it). * Read data from the old place (N-1 writes there). * Prepare online data migration cinder-manage command to be run before upgrading to N+1 (because N+1 will read from new place, so we need to make sure all the records have new place populated). In N+1 we should: * Write data to both places (N reads from old one). * Read data from the new place (N saves there). In N+2 * Remove old place from SQLAlchemy. * Read and write only to the new place. * Remove the column as the post-upgrade migration (or as first migration in N+3). Please note that this is the most complicated case. If data in the column cannot actually change (for example :code:`host` in :code:`services` table), in N we can read from new place and fallback to the old place if data is missing. This way we can skip one release from the process. Of course real-world examples may be different. E.g. sometimes it may be required to write some more compatibility code in the oslo.versionedobjects layer to compensate for different versions of objects passed over RPC. This is explained more in `RPC payload changes (oslo.versionedobjects)`_ section. More details about that can be found in the `online-schema-upgrades spec `_. RPC API changes --------------- It can obviously break service communication if RPC interface changes. In particular this applies to changes of the RPC method definitions. To avoid that we assume N's RPC API compatibility with N-1 version (both ways - :code:`rpcapi` module should be able to downgrade the message if needed and :code:`manager` module should be able to tolerate receiving messages in older version. Below is an example RPC compatibility shim from Mitaka's :code:`cinder.volume.manager`. This code allows us to tolerate older versions of the messages:: def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): """Creates the volume.""" # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) And here's a contrary shim in cinder.volume.rpcapi (RPC client) that downgrades the message to make sure it will be understood by older instances of the service:: def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'allow_reschedule': allow_reschedule} if self.client.can_send_version('1.32'): version = '1.32' msg_args['volume'] = volume else: version = '1.24' new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version=version) request_spec_p = jsonutils.to_primitive(request_spec) cctxt.cast(ctxt, 'create_volume', **msg_args) As can be seen there's this magic :code:`self.client.can_send_version()` method which detects if we're running in a version-heterogenious environment and need to downgrade the message. Detection is based on dynamic RPC version pinning. In general all the services (managers) report supported RPC API version. RPC API client gets all the versions from the DB, chooses the lowest one and starts to downgrade messages to it. To limit impact on the DB the pinned version of certain RPC API is cached. After all the services in the deployment are updated, operator should restart all the services or send them a SIGHUP signal to force reload of version pins. As we need to support only N RPC API in N+1 release, we should be able to drop all the compatibility shims in N+1. To be technically correct when doing so we should also bump the major RPC API version. We do not need to do that in every release (it may happen that through the release nothing will change in RPC API or cost of technical debt of compatibility code is lower than the cost of complicated procedure of increasing major version of RPC APIs). The process of increasing the major version is explained in details in `Nova's documentation `_. Please note that in case of Cinder we're accessing the DB from all of the services, so we should follow the more complicated "Mixed version environments" process for every of our services. In case of removing whole RPC method we need to leave it there in N's manager and can remove it in N+1 (because N-1 will be talking with N). When adding a new one we need to make sure that when the RPC client is pinned to a too low version any attempt to send new message should fail (because client will not know if manager receiving the message will understand it) or ensure the manager will get updated before clients by stating the recommended order of upgrades for that release. RPC payload changes (oslo.versionedobjects) ------------------------------------------ `oslo.versionedobjects `_ is a library that helps us to maintain compatibility of the payload sent over RPC. As during the process of upgrades it is possible that a newer version of the service will send an object to an older one, it may happen that newer object is incompatible with older service. Version of an object should be bumped every time we make an incompatible change inside it. Rule of thumb is that we should always do that, but well-thought exceptions were also allowed in the past (for example releasing a NOT NULL constraint). Imagine that we (finally!) decide that :code:`request_spec` sent in :code:`create_volume` RPC cast is duplicating data and we want to start to remove redundant occurrences. When running in version-mixed environment older services will still expect this redundant data. We need a way to somehow downgrade the :code:`request_spec` before sending it over RPC. And this is were o.vo come in handy. o.vo provide us the infrastructure to keep the changes in object versioned and to be able to downgrade them to a particular version. Let's take a step back - similarly to the RPC API situation we need a way to tell if we need to send a backward-compatible version of the message. In this case we need to know to what version to downgrade the object. We're using a similar solution to the one used for RPC API for that. A problem here is that we need a single identifier (that we will be reported to :code:`services` DB table) to denote whole set of versions of all the objects. To do that we've introduced a concept of :code:`CinderObjectVersionHistory` object, where we keep sets of individual object versions aggregated into a single version string. When making an incompatible change in a single object you need to bump its version (we have a unit test enforcing that) *and* add a new version to :code:`cinder.objects.base.CinderObjectVersionsHistory` (there's a unit test as well). Example code doing that is below:: OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) This line adds a new 1.1 aggregated object version that is different from 1.0 by two objects - :code:`Service` in 1.2 and :code:`ServiceList` in 1.1. This means that the commit which added this line bumped versions of these two objects. Now if we know that a service we're talking to is running 1.1 aggregated version - we need to downgrade :code:`Service` and :code:`ServiceList` to 1.2 and 1.1 respectively before sending. Please note that of course other objects are included in the 1.1 aggregated version, but you just need to specify what changed (all the other versions of individual objects will be taken from the last version - 1.0 in this case). Getting back to :code:`request_spec` example. So let's assume we want to remove :code:`volume_properties` from there (most of data in there is already somewhere else inside the :code:`request_spec` object). We've made a change in the object fields, we've bumped it's version (from 1.0 to 1.1), we've updated hash in the :code:`cinder.tests.unit.test_objects` to synchronize it with the current state of the object, making the unit test pass and we've added a new aggregated object history version in :code:`cinder.objects.base`. What else is required? We need to provide code that actually downgrades RequestSpec object from 1.1 to 1.0 - to be used when sending the object to older services. This is done by implementing :code:`obj_make_compatible` method in the object:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and not 'volume_properties' in primitive: volume_properties = {} # TODO: Aggregate all the required information from primitive. primitive['volume_properties'] = volume_properties Please note that primitive is a dictionary representation of the object and not an object itself. This is because o.vo are of course sent over RPC as dicts. With these pieces in place Cinder will take care of sending :code:`request_spec` with :code:`volume_properties` when running in mixed environment and without when all services are upgraded and will understand :code:`request_spec` without :code:`volume_properties` element. Note that o.vo layer is able to recursively downgrade all of its fields, so when `request_spec` will be used as a field in other object, it will be correctly downgraded. cinder-8.0.0/doc/source/devref/attach_detach_conventions.rst0000664000567000056710000001710312701406250025376 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Volume Attach/Detach workflow ============================= There are six API calls associated with attach/detach of volumes in Cinder (3 calls for each operation). This can lead to some confusion for developers trying to work on Cinder. The convention is actually quite simple, although it may be difficult to decipher from the code. Attach/Detach Operations are mulit-part commands ================================================ There are three things that happen in the workflow for an attach or detach call. 1. Update the status of the volume in the DB (ie attaching/detaching) - For Attach, this is the cinder.volume.api.reserve method - For Detach, the analagous call is cinder.volume.api.begin_detaching 2. Handle the connection operations that need to be done on the Volume - For Attach, this is the cinder.volume.api.initialize_connection method - For Detach, the analagous calls is cinder.volume.api.terminate_connection 3. Finalize the status of the volume and release the resource - For attach, this is the cinder.volume.api.attach method - For detach, the analagous call is cinder.volume.api.detach Attach workflow =============== reserve_volume(self, context, volume) ------------------------------------- Probably the most simple call in to Cinder. This method simply checks that the specified volume is in an “available” state and can be attached. Any other state results in an Error response notifying Nova that the volume is NOT available. The only valid state for this call to succeed is “available”. NOTE: multi-attach will add "in-use" to the above acceptable states. If the volume is in fact available, we immediately issue an update to the Cinder database and mark the status of the volume to “attaching” thereby reserving the volume so that it won’t be used by another API call anywhere else. initialize_connection(self, context, volume, connector) ------------------------------------------------------- This is the only attach related API call that should be doing any significant work. This method is responsible for building and returning all of the info needed by the caller (Nova) to actually attach the specified volume to the remote node. This method returns vital information to the caller that includes things like CHAP credential, iqn and lun information. An example response is shown here: :: {‘driver_volume_type': ‘iscsi’, ‘data': {‘auth_password': ‘YZ2Hceyh7VySh5HY’, ‘target_discovered': False, ‘encrypted': False, ‘qos_specs': None, ‘target_iqn': ‘iqn.2010-10.org.openstack:volume-8b1ec3fe-8c5 ‘target_portal': ‘11.0.0.8:3260′, ‘volume_id': ‘8b1ec3fe-8c57-45ca-a1cf-a481bfc8fce2′, ‘target_lun': 1, ‘access_mode': ‘rw’, ‘auth_username': ‘nE9PY8juynmmZ95F7Xb7′, ‘auth_method': ‘CHAP’}}`` In the process of building this data structure, the Cinder Volume Manager makes a number of calls to the backend driver, and builds a volume_attachment entry in the database to store the connection information passed in via the connector object. driver.validate_connector ************************* Simply verifies that the initiator data is included in the passed in connector (there are some drivers that utilize pieces of this connector data, but in the case of the reference, it just verifies it's there). driver.create_export ******************** This is the target specific, persistent data associated with a volume. This method is responsible for building an actual iSCSI target, and providing the "location" and "auth" information which will be used to form the response data in the parent request. We call this infor the model_update and it's used to update vital target information associated with the volume in the Cinder database. driver.intialize_connection *************************** Now that we've actually built a target and persisted the important bits of information associated with it, we're ready to actually assign the target to a volume and form the needed info to pass back out to our caller. This is where we finally put everything together and form the example data structure response shown earlier. This method is sort of deceptive, it does a whole lot of formatting of the data we've put together in the create_export call, but it doesn't really offer any new info. It's completely dependent on the information that was gathered in the create_export call and put into the database. At this point, all we're doing is taking all the various entries from the database and putting it together into the desired format/structure. The key method call for updating and obtaining all of this info was done by the create_export call. This formatted data is then passed back up to the API and returned as the response back out to Nova. At this point, we return attach info to the caller that provides everything needed to make the remote iSCSI connection. attach(self, context, volume, instance_uuid, host_name, mount_point, mode) -------------------------------------------------------------------------- This is the last call that *should* be pretty simple. The intent is that this is simply used to finalize the attach process. In other words, we simply update the status on the Volume in the database, and provide a mechanism to notify the driver that the attachment has completed succesfully. There's some additional information that has been added to this finalize call over time like instance_uuid, host_name etc. Some of these are only provided during the actual attach call and may be desired for some drivers for one reason or another. Detach workflow =============== begin_detaching(self, context, volume) -------------------------------------- Analagous to the Attach workflows ``reserve_volume`` method. Performs a simple conditional update of Volume status to ``detaching``. terminate_connection(self, context, volume, connector, force=False) ------------------------------------------------------------------- Analagous to the Attach workflows ``initialize_connection`` method. Used to send calls down to drivers/target-drivers to do any sort of cleanup they might require. For most this is a noop, as connections and **iscsi session management is the responsibility of the initiator**. HOWEVER, there are a number of special cases here, particularly for target-drivers like LIO that use access-groups, in those cases they remove the initiator from the access list during this call which effectively closes sessions from the target side. detach(self, context, volume, attachment_id) ------------------------------------------------------------------- The final update to the DB and yet another opportunity to pass something down to the volume-driver. Initially a simple call-back that now has quite a bit of cruft built up in the volume-manager. For drivers like LVM this again is a noop and just updates the db entry to mark things as complete and set the volume to available again. cinder-8.0.0/doc/source/oslo-middleware.rst0000664000567000056710000000017112701406250022006 0ustar jenkinsjenkins00000000000000========================== Oslo Middleware ========================== .. list-plugins:: oslo_middleware :detailed: cinder-8.0.0/doc/Makefile0000664000567000056710000000637212701406250016336 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXSOURCE = source PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest .DEFAULT_GOAL = html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* -rm -rf cinder.sqlite if [ -f .autogenerated ] ; then \ cat .autogenerated | xargs rm ; \ rm .autogenerated ; \ fi html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cinder.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cinder.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." cinder-8.0.0/doc/ext/0000775000567000056710000000000012701406543015473 5ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/ext/__init__.py0000664000567000056710000000000012701406250017565 0ustar jenkinsjenkins00000000000000cinder-8.0.0/doc/ext/cinder_todo.py0000664000567000056710000000640512701406250020336 0ustar jenkinsjenkins00000000000000# This is a hack of the builtin todo extension, to make the todo_list # more user friendly from sphinx.ext.todo import * import re def _(s): return s def process_todo_nodes(app, doctree, fromdocname): if not app.config['todo_include_todos']: for node in doctree.traverse(todo_node): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. # Augment each todo with a backlink to the original location. env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] # remove the item that was added in the constructor, since I'm tired of # reading through docutils for the proper way to construct an empty list lists = [] for i in range(5): lists.append(nodes.bullet_list("", nodes.Text('', ''))) lists[i].remove(lists[i][0]) lists[i]['classes'].append('todo_list') for node in doctree.traverse(todolist): if not app.config['todo_include_todos']: node.replace_self([]) continue for todo_info in env.todo_all_todos: para = nodes.paragraph() filename = env.doc2path(todo_info['docname'], base=None) # Create a reference newnode = nodes.reference('', '') line_info = todo_info['lineno'] link = _('%(filename)s, line %(line_info)d') % locals() innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] try: newnode['refuri'] = app.builder.get_relative_uri( fromdocname, todo_info['docname']) newnode['refuri'] += '#' + todo_info['target']['refid'] except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output pass newnode.append(innernode) para += newnode para['classes'].append('todo_link') todo_entry = todo_info['todo'] env.resolve_references(todo_entry, todo_info['docname'], app.builder) item = nodes.list_item('', para) todo_entry[1]['classes'].append('details') comment = todo_entry[1] m = re.match(r"^P(\d)", comment.astext()) priority = 5 if m: priority = int(m.group(1)) if priority < 0: priority = 1 if priority > 5: priority = 5 item['classes'].append('todo_p' + str(priority)) todo_entry['classes'].append('todo_p' + str(priority)) item.append(comment) lists[priority - 1].insert(0, item) node.replace_self(lists) def setup(app): app.add_config_value('todo_include_todos', False, False) app.add_node(todolist) app.add_node(todo_node, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', Todo) app.add_directive('todolist', TodoList) app.connect('doctree-read', process_todos) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) cinder-8.0.0/doc/ext/cinder_autodoc.py0000664000567000056710000000042612701406250021024 0ustar jenkinsjenkins00000000000000from __future__ import print_function import gettext import os gettext.install('cinder') from cinder import utils def setup(app): print("**Autodocumenting from %s" % os.path.abspath(os.curdir)) rv = utils.execute('./doc/generate_autodoc_index.sh') print(rv[0]) cinder-8.0.0/doc/generate_autodoc_index.sh0000775000567000056710000000177512701406250021736 0ustar jenkinsjenkins00000000000000#!/bin/sh SOURCEDIR=doc/source/api if [ ! -d ${SOURCEDIR} ] ; then mkdir -p ${SOURCEDIR} fi for x in `./doc/find_autodoc_modules.sh`; do echo "Generating ${SOURCEDIR}/${x}.rst" echo "${SOURCEDIR}/${x}.rst" >> .autogenerated heading="The :mod:\`${x}\` Module" # Figure out how long the heading is # and make sure to emit that many '=' under # it to avoid heading format errors # in Sphinx. heading_len=$(echo "$heading" | wc -c) underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') ( cat < ${SOURCEDIR}/${x}.rst done if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst done echo ${SOURCEDIR}/autoindex.rst >> .autogenerated fi cinder-8.0.0/doc/README.rst0000664000567000056710000000217312701406250016360 0ustar jenkinsjenkins00000000000000================= Building the docs ================= Dependencies ============ Sphinx_ You'll need sphinx (the python one) and if you are using the virtualenv you'll need to install it in the virtualenv specifically so that it can load the cinder modules. :: pip install Sphinx Graphviz_ Some of the diagrams are generated using the ``dot`` language from Graphviz. :: sudo apt-get install graphviz .. _Sphinx: http://sphinx.pocoo.org .. _Graphviz: http://www.graphviz.org/ Use `make` ========== Just type make:: % make Look in the Makefile for more targets. Manually ======== 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: % ./generate_autodoc_index.sh > source/code.rst 2. Run `sphinx_build`:: % sphinx-build -b html source build/html Use `tox` ========= The easiest way to build the docs and avoid dealing with all dependencies is to let tox prepare a virtualenv and run the build_sphinx target inside the virtualenv:: % cd .. % tox -e docs The docs have been built ======================== Check out the `build` directory to find them. Yay! cinder-8.0.0/doc/.gitignore0000664000567000056710000000004512701406250016655 0ustar jenkinsjenkins00000000000000_build/* source/api/* .autogenerated cinder-8.0.0/LICENSE0000664000567000056710000002363712701406250015141 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. cinder-8.0.0/run_tests.sh0000775000567000056710000001777312701406250016525 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Cinder's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo " --concurrency How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count" echo " Default: 1" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; --concurrency) (( i++ )) concurrency=${!i} ;; -*) testropts="$testropts ${!i}";; *) testrargs="$testrargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testrargs= testropts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 concurrency=1 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then # Default to running all tests if specific test is not # provided. testrargs="discover ./cinder/tests" fi ${wrapper} python -m testtools.run $testropts $testrargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" if [ -z "${PYTHONPATH:-}" ]; then export PYTHONPATH=./ else export PYTHONPATH=$PYTHONPATH:./ fi else TESTRTESTS="$TESTRTESTS" fi # Just run the test suites in current environment set +e testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'" if [ setup.cfg -nt cinder.egg-info/entry_points.txt ] then ${wrapper} python setup.py egg_info fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-trace" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='cinder/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv bash -c "${wrapper} flake8" ${wrapper} bash tools/config/check_uptodate.sh --checkonly ${wrapper} tools/check_exec.py cinder || exit 1 } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi if [ $just_pep8_changed -eq 1 ]; then # NOTE(gilliard) We want to use flake8 to check the # entirety of every file that has a change in it. # Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. files=$(git diff --name-only HEAD~1 | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff" exit fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testropts), which begin with a '-', and # arguments (testrargs). if [ -z "$testrargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi cinder-8.0.0/ChangeLog0000664000567000056710000063410312701406541015705 0ustar jenkinsjenkins00000000000000CHANGES ======= 8.0.0 ----- * Huawei: Fix getting admin_metadata * Imported Translations from Zanata * NexentaStor4 iSCSI: convert blocksize to str * Imported Translations from Zanata * Imported Translations from Zanata * XtremIO handle errors in terminate_connection: * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Report 2.0 as supported version of RPC APIs * Fix volume RPC API methods related to backups * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Dell SC: create_cgsnapshot returning wrong structure * Imported Translations from Zanata * Fix race condition when toggling SP * Huawei: Record and check LUN wwn * SMBFS: fix parsing volume type extra specs and metadata * VMware: Bump driver version * Update .gitreview for stable/mitaka 8.0.0.0rc1 ---------- * Reserve 5 migrations for Mitaka backports * Add update_host for backup in cinder-manager * Fix formatting in vol/snap delete API messages * Cleaned duplicate dictionary values * Fix compatibility mode of backup jobs scheduling * Add devref on rolling upgrades * 3PAR fix create_cloned_volume for larger size * Check volume_id consistent when creating backup * Huawei: Check when attach hypermetro volume * Huawei: Check the QoS status before we use * LeftHand: Add default SSH timeout and key values * Pass correct source_id to _handle_bootable_volume_glance_meta() * Use get_by_args instead of get_by_host_and_topic * Fix format in cinder/volume/drivers/netapp/dataontap/block_base.py * Revert "VNX: Set timeout for naviseccli" * NetApp E-Series: Volumes not added to consisgroup * Fix volume migration VolumeType exception * Fix retype failure when original has no volume type * Add backup RPC API v2.0 * IBM XIV/DS8K: Implement Replication v2.1 * Huawei: Implement v2.1 replication * Remove circular import to fix config generation * Wrong comment line in quotas.py * Imported Translations from Zanata * VNX: Set timeout for naviseccli * VNX: Allow set migrate rate when migrating volumes * Fix ScaleIO driver does not honor clone size * Imported Translations from Zanata * Huawei: Creating hypermetro failed in the remote pool * Delete deprecated configuration in NFS tests * Imported Translations from Zanata * Emit notifications for volume retype * Fixup stats key for replication in init_host_with_pc * EMC VMAX - SSl connection is not picking up values * Add the key 'replication' and set the correct 'replication_status' * register the config generator default hook with the right name * Fix for Pure drivers not checking full client version * Fixup for Pure drivers cheesecake replication setup * Remove empty directories and unused files from unit tests * Cleanup 3PAR/LeftHand failover_host exceptions * Use googleapiclient import instead of apiclient * Enable api.view.test_versions unit tests * Allow clone volume with different size * DRBD driver: resize volume if cloned image is larger * Fix 500 error if 'offset' is out of range * Add volume RPC API v2.0 * Imported Translations from Zanata * Fix up failover_host exceptions to preserve states * Add release note for delete volume with snaps * RBD: remove duplicate clone test * Disallow quota deletes if default under usage * EMC VMAX - SnapVX and other snapshot improvements * Move replication_status update to init_with_rpc * Permit volume type operations for policy authorized users * Host selection in backup service * Add volume_type to volume object expected_attrs * VNX: Update replication for v2.1 * Imported Translations from Zanata * Pass new volume size when cloning (blockbridge) * Storwize: Update replication to v2.1 * LeftHand: Create cloned volume didn't honor size * Switch failover-host from rpc call to cast * Dell SC: Active_backend_id wrong type * Show qos_specs_id based on policy * Remove remaining oslo-incubator code from Cinder * Updated from global requirements * Pass RBD order to clone call * Remove "sqlite_clean_db" option * Exclude test.py from sample conf * Fix invalid UUID warnings for test_volume_* * Huawei: Check before delete host * rbd: Change capacity calculation from integer to float * Fix failure with rbd on slow ceph clusters * Remove those unnecessary statements "return True" * Imported Translations from Zanata * Report versions in cinder-manager service list * Dell SC: create_cloned_volume didn't honor size * Cleanup Mitaka release notes * Dell SC: Incorrect values in REST API Login call * Moved CORS middleware configuration into oslo-config-generator * Correcting thin provisioning behavior * NetApp: volume resize using clone fails with QoS * Fixes creating volume issue for multiple management IPs * Imported Translations from Zanata * Add volumes table definition when migrating to 67 * Trim 5s+ from storwize unit tests * Allow api_version_request.matches to accept a string * microversion header for legacy endpoints removed * Update quotas to handle domain acting as project * Continue volume delete on encryption key delete errors * Fix backup import * Unset executable bit in release note * DRBD: Policy-based waiting for completion * Block subtractive operations in DB migrations * Handle exceptions about snapshot in backup create * Replace logging with oslo_log * Imported Translations from Zanata * Fixup release notes for v2 -> v2.1 replication impls * support new HTTP microversion header * Fix for glance_metadata during volume migration * Readd iscsi_target table * Imported Translations from Zanata * Fix issue with Pure drivers delete_snapshot exception handling * Add backend id to Pure Volume Driver trace logs * Don't fail on clearing 3PAR object volume key * Fix invalid uuid warnings in backup unit tests * Update quota_utils with import for keystone_auth * Fix invalid uuid warnings in test_volume.py * Tintri image direct clone * Use get_by_args instead of host_and_topic * Remove a vol in error state from a CG * Fix call to Barbican Secrets create() * 3PAR use same LUN id for each export path * Fix oslo.service config generation * Update unittest for Storwize pool-aware-cinder-scheduler * Huawei: Check the real size before extend volume * Revert "Remove Cisco FC Zone Manager Driver" * Make query to quota usage table order preserved * Allow for Pure drivers to verify HTTPS requests * Fix volume filtering for quoted display name 8.0.0.0b3 --------- * Use openstack.org URLs in README * Add attach/detach doc to index * Don't run test_volume.VolumeTestCase twice * Fixes running error for storwize _run_ssh * Dell SC: Support Replication V2.1 * Use OSprofiler options consolidated in lib itself * Fix test_create_volume_flow test issue * Fix test isolation issues related to versions * Add missing requirements * Convert huawei ISCSIDriver unit tests to RFC5737 addrs * Changes in ScaleIO configurations options * Storwize/SVC: Clone between different size volumes * Huawei: Manage volume fails due to lower array version * Fix exception during service update * Huawei: Create snapshot have a log error * EMC VMAX - Limit SG and MV to 64 characters * Fix spelling mistake in docstring * EMC VMAX - Recreating SG when it has been deleted * VMAX-Replacing deprecated API EMCGetTargetEndpoints * 3PAR: Update replication to v2.1 * LeftHand: Update replication to v2.1 * Update Pure replication to cheesecake * Fixed logging for oslo versioned objects * Move replication volume manager warnings to info * Trim 50s from huawei ISCSIDriver unit tests * Copy unit tests for StandardLogging fixture from Nova * Add 'conf' param for TextGuruMeditation autorun setup * Use is_int_like method from oslo_utils * Fix sshpool.remove code * Remove an useless and wrong call * Copy StandardLogging fixture from Nova * Add ability to failback for replication V2.1 * Storwize SVC multiple management IPs * Updating Datera DataFabric Driver to v2 of Datera DataFabric API * Capture warnings into logs * Return BadRequest for invalid unicode names (continued) * IBM Storwize with pool-aware-cinder-scheduler * Fix HTTP sessions left open in Brocade zone driver * Fix invalid uuid warnings in block device unit tests * Fix invalid uuid warnings in scheduler unit tests * Add necessary fields to volume creation * Add scheduler RPC API v2.0 * Clean up replication v2.1 (Cheesecake) RPC API * always use pip constraints * Remove unused columns from Service ORM model * CONF add suppress_requests_ssl_warnings * Make nullable of fields in db model and object match * Remove unused pngmath sphinx extension * Delete volumes with snapshots * NetApp: Fix SSH Client File Creation in Unit Test * Trim 12s from disco unit tests * Remove QoS settings from SolidFire attributes * EMC VMAX - get iscsi ip from port in existing MV * Misprint in policy.json * Re-enable -1 child limits for nested quotas * Updated from global requirements * Huawei: delete_snapshot need not return any value * Match the ip more accurately in Huawei driver * Huawei: Consider bandwidth when selecting port * Add description when rename LUN in Huawei driver * Huawei: Code cleanup * Modify the number of params of hypermetro in HuaweiDriver * EMC VMAX - Changing PercentSynced to CopyState in isSynched * Add basic workflow of attach/detach to devref * Change frozen error messages from _LE to _ * Replication v2.1 (Cheesecake) * Change Fail to Failed in error messages * NetApp: Add Consistency Group support for E-Series * Return BadRequest for invalid Unicode names * Check for service existance in capabilities API * Enable all unit tests on Python 3.4 * Adding general notes about rolling upgrades * Move deprecation release notes to correct section * Tests: Don't assert on LOG.warn * Return all target_wwpns for FC storwize * Fix error message in cinder type-update * Fix NoneType error in service_get_all * Add os-brick rootwrap filter for privsep * LeftHand: Updating minimum client version * NetApp: Implement CGs for ONTAP Drivers * Fix HNAS iSCSI driver attachment * NetApp: Support iSCSI CHAP Uni-directional Auth * Avoid hardcoding value from oslo library * cinder-api-microversions code * Port netapp dataontap driver to Python 3 * Scalable backup service - Liberty compatibility * Huawei: Log the IP when login fails * Huawei: Fix create volume with prefetch value error * Huawei: Don't fail when port group does not exist * Force target_lun to be int type to make os-brick happy * Volume manage/unmanage support to ZFSSA drivers * Disable multi-attach for RBD * Correct iscsi lun type * Fix invalid uuid warnings in object unit tests * Imported Translations from Zanata * Fixed help message for AllocatedCapacityWeigher * EMC VMAX - not cleaning up HW Resource WWPN initiators * Huawei: Fixed url range * Updated from global requirements * Move anyjson to test-requirements * Huawei: Judgement failure when creating hypermetro * Remove useless get_replication_updates driver call * [LVM] Restore target config during ensure_export * 3PAR get host by WWN now handles mixed cases * NetApp: E-Series remove snapshot limitations * Fix race condition in RemoteFS create_snapshot_online * Add SIGHUP handlers to reset RPC version pins * Handle OverQuota exception during volume transfer * Tintri image cache cleanup * Huawei: Ensure the port is online * Rtstool sets wrong exception message on save * Test middleware test_faults to Python 3 * Split out NestedQuotas into a separate driver * Revert changes to use ostestr * doc: Fix wrong description about adding RESTful API * Propoerly call oslo's Service stop and wait * Remove unused kill method from Service * Wait for periodic tasks to stop on exit * Dell: Failed vol create could leave dead volumes * Roll back reservations quota in RPC if necessary * Scaling backup service * EMC ScaleIO - fix bug in extend volume * VMware: manage_existing for VMDK driver * VMware: manage_existing_get_size for VMDK driver * Delete unuseful code in Huawei driver * Cleanup unused conf variables * Port infortrend driver to Python 3 * Disable Rally backup-related scenarios * Fix last Python 3 issues in zonemanager * Port hgst driver to Python 3 * Port API v1 and v2 to Python 3 * Port API contribs to Python 3 * Port test_emc_vnx to Python 3 * Fix service-list filter * Coho volume stats update * Sheepdog: Fix malformed image url format * Update quota when volume type renames * Add restore_volume_id in backup * Updated from global requirements * Support for consistency groups in ScaleIO driver * Use of metadata id instead of metadata name * Fix 'asert' typo in unit test * Fix dynamic import of CONF.volume_api_class * Filter & goodness functions in NetApp drivers * Manage/unmanage volume in ScaleIO driver * HNAS driver: Fix SSH and cluster_admin_ip0 bug * Remove duplicated code in volume manager and base driver * Don't use Mock.called_once_with that does not exist * EMC VMAX - necessary updates for CG changes * RBD: use versioned objects * Tests: Fix calls to non-existent assert methods * Updated from global requirements * Profiler: make it possible to run without loading osprofiler * Profiler: don't call trace_cls if profiler is not enabled * Tests: Set volume listen port to test_service_listen_port * Remove Cisco FC Zone Manager Driver * Add variable QoS to NetApp cDOT drivers * Move integrated tests to 'functional' directory * py3: Fix usage of JSON in API contrib * Port utils.safe_minidom_parse_string() to Python 3 * Include allocated quota value in the quota reserve * Log stack trace for middleware faults * hacking: Fix false positive in C302 check * Port backup drivers to Python 3 * Storwize: Implement v2 replication * Volume create fails with FakeISCSIDriver * Adds support for configuring zoning in a virtual fabric * ScaleIO QoS Support * Zfssaiscsi driver should not use 'default' initiator group * Update db in CGSnapshot create * 3PAR: Create consistency group from source CG * Remove old client version checks from 3PAR driver * Improve logging to debug invalid "extra_specs" entries * Huawei: Implement v2 replication (managed) * DRBD: Fix arguments for resize_volume DBus API call * Port objects unit tests to Python 3 * Updated from global requirements * NexentaStor5 iSCSI driver unit tests * LeftHand: Implement un/manage snapshot support * Updated from global requirements * EMC VMAX - Method not being called for V3 * Allow for eradicating Pure volumes on Cinder delete * HNAS driver: retry on Connection reset fix * Fix issue in hacking with underscore imports * DRBD: Compat for current DRBDmanage versions * Fix variable scope issue in try-except * Imported Translations from Zanata * Bootable filter for listening volumes from CLI * Clean uploading volume when c-vol restarts * mock time.sleep in Broadcom unit test * Don't create cgsnapshot if cg is empty * Added osprofiler headers to cors middleware * Imported Translations from Zanata * Fix the replication spelling in message * 3PAR fix driver to work with image cache * Updated from global requirements * Improve logging for volume detach * Remove useless unit tests mock in Huawei driver * Return updated volume type after updating * Remove 'external=True' in Huawei driver * Fix XtremIO multi cluster support * EMC VMAX - Fix for last volume in VMAX3 storage group * Filtering type extra-spec support to ZFSSA drivers * Zfssaiscsi driver should return target_lun as int * Pin RPC and object version to lowest running * Report RPC and objects versions * Add missing RPC calls versions to rpcapi modules * Huawei: Balanced FC port selection when zoning * VNX: Replication V2 support(managed) * Adds HTTPS southbound connector for Brocade FC Zone Driver * Replication V2 for Pure Storage * Support ZeroMQ messaging driver in cinder * Remove redundant definition of 'deleted' * Fix update_consistencygroup log info * Correct opt type for nexenta_chunksize/blocksize * Huawei: Add manage/unmanage snapshot support * Remove deprecated options from NFS driver * Fixing HNAS XML parser * Replace exit() by sys.exit() * IBM XIV/DS8K: Implements Replication V2 * Support cinder_img_volume_type in image metadata * Adds friendly zone name support * LeftHand: Implement v2 replication (unmanaged) * EMC VMAX - VMAX driver failing to remove zones * Remove access_mode 'rw' setting in drivers * Tests: Strengthen assertFalse assertions * Fix laggard cisco FC zone client unit tests * Fix xtremio slow unit tests * Fix sluggish rbd unit tests * Fix torpid coordinator unit tests * Rework Storwize/SVC protocol to fix add_vdisk_copy * NetApp ONTAP - Reapply API tracing * Run flake8 also on cinder/common * Replace assertEqual(*, None) with assertIsNone in tests * Wrap the method to calculate virtual free capacity * Add pagination support to consistency group 8.0.0.0b2 --------- * Fix NFS driver unit test docstring * Adding action to policy.json * fix NFS driver max_over_subscription_ratio typo * Add pip-missing-reqs tox env * Add missing requirements * Added 'bootable volume' filter for non-admin user * Move wsgi to oslo_service.wsgi * Set LVM driver default overprovisioning ratio to 1.0 * Tegile tests: Change volume size to int * VMware: Fix release notes * FlashSystem reports error in _find_host_exhaustive() * Huawei: Refactor driver for the second time * Add cinder backup driver for Google Cloud Storage * NexentaStor 5 iSCSI backend driver * NexentaStor 5 NFS backend driver * DRBD: Rename "dres" to avoid confusion with "res" * EMC VMAX - Incorrect SG selected on an VMAX3 attach * Activate sparse copy for Netapp * Add Fujitsu ETERNUS DX Volume Driver (FC part) * Imported Translations from Zanata * XtremIO: FC initialize connection failed * Updated from global requirements * Disallow transferring volume in consistency group * Reduce use of eval() * Remote unused iscsi_targets table * ITRI DISCO cinder driver * NetApp eseries: report max_over_subscription_ratio correctly * Python 3: Replace reduce and xrange with six.moves * Infrastructure to use the DRBD transport for NOVA * NetApp ONTAP: Fix extending volume beyond lun geometry * Refactor Windows drivers using os-win * Base iSCSI initiator validation * Fix ChunkedBackupDriver _create_container * XtremIO: Set the location of a CA certificate * Huawei: Add manage/unmanage volume support * Remove DB calls from Pure Volume Driver CG methods * Dell SC: Adding logging to httpclient * LeftHand: Implement v2 replication (managed) * Enable trim/discard in SolidFire's driver * API Middleware fault: Log exception type * Re-add Nexenta drivers * Remove invalid NetApp QoS keys * Fix thin provisioning flags in NetApp drivers * Return BadRequest for invalid Unicode names * Handling Invalid argument iflag=direct in dd * Execute mount.nfs check as root * Report discard support for Dell SC connections * Add ConsistencyGroupStatus enum field * 3PAR: Adding volume checks to manage snapshot API * Added Keystone and RequestID headers to CORS middleware * Allow host and instance_uuid at attach * Trival: Remove 'MANIFEST.in' * Imported Translations from Zanata * Add finish_volume_migration to volume object * Fix tox -e fast8 * Updated from global requirements * Removes the Violin 6000 FC and iSCSI drivers * Dell SC: Implements Replication V2 * Storwize: Add force flag on vdisk mapping call * Storwize/SVC: Volume manage using source-name * Enable consisgroups in SolidFire driver * Replace deprecated library function os.popen() with subprocess * Constant defined for sqlAlchemy VARCHAR & INTEGER * Fix image volume creation error * Cleanup in backup reset status * NetApp FC drivers should not set 'rw' access mode * Change minimum 3PAR API version for replication * Optimize 3PAR array ID retrieval * Add metadata aliases to Volume object * Add pagination support to volume type * Re-enabled hacking checks for H105 * Don’t log warnings for image cache when disabled * Fix grammatical mistake in defining articles * Scality SOFS: don't always read /proc/mounts twice * Add BackupStatus enum field * Updated from global requirements * Fix issue with flake8 check and full paths * Fix race conditions in migration 061 * Replace use of mox with mock in test_nfs * Updated "deleted" column of volume_type_access * Add Fujitsu ETERNUS DX Volume Driver (again) * Add backref relationships to ConsistencyGroup obj * Misspelling in message * Fix some warnings about 'unused variable' for XIO * Updates consistency group for ibm svc driver * Add volume driver for Tegile IntelliFlash array * Fix to allow RBD delete an unprotected snapshot * Remove the deprecated ibmnas driver * Wrong usage of "a" * VMware: optimize in get_cluster_refs * Rebrand HP XP driver to now be HPE * Fix creating volume by snapshot for GPFS driver * Rename Huawei drivers * Check min config requirements for rbd driver * Remove API races from delete methods * Remove API races from attach and detach methods * Quota API is now compatible with keystone API v2 * Add pagination support to Qos specs * Remove name_id when creating volume with cache enabled * Imported Translations from Zanata * Updated from global requirements * Enhance the stats reported from the Pure Volume Drivers * Allow replicated volumes to be recoverable * Imported Translations from Zanata * Retyping volume got error under max vol limit * Add config option to enable reporting discard * Storwize: Split up __init__ into separate files * Fix volume upload failure with glance_api_version=2 * EMC VMAX - Extend Volume for VMAX3 * XtremIO add support for create CG from CG src * Fix bugs caused by porting to python3 * Imported Translations from Zanata * Disable capabilities based on 3PAR licenses * Add empty initialize_connection method to PureBaseVolumeDriver * Fix python 3.x import issues with pure.py * Override osapi_volume_listen_port for test_workers * Updated from global requirements * replace deprecated oslo_messaging _impl_messaging * Remove downgrade migrations * Fix delete_snapshot error case in Pure driver * 3PAR: Implement un/manage snapshot support * Error handling for invalid SLO/Workload combo * 3PAR: Implement v2 replication (unmanaged) * Add serial number to eseries ASUP payload * XtremIO: fix generic glance cache with XtremIO * VMware: Add support for VVOL datastores * Tooz locks * Small refactoring in test_admin_actions.py * Imported Translations from Zanata * Making NFS _find_share efficient * Change the format of some inconsistent docstring * Preserve request id in Cinder logs * Add volume_extensions:quotas:delete to policy.json * Pass volume_id in request_spec for manage_existing * VMware: Fix volume copy across vCenter datacenters * Imported Translations from Zanata * Modify VO so that obj.get always defaults to None * Replace use of mox with mock in test_quotas * Replace use of mox with mock in test_rbd * Skip check whether volume is local if it's None * Imported Translations from Zanata * Implement refresh() for cinder objects * EMC VMAX - get_short_host_name not called in find_device_number * Move retype quota checks to API * Implement snapshots-related features for Block Device Driver * Refactor cinder.utils.is_valid_boolstr * Add synchronization in Block Device driver * Recalculate allocated value of parent project * Updated from global requirements * Volume migration: add 'name_id' as valid skippable field * Fix invalid cache image-volume creation * Imported Translations from Zanata * Remove eventlet WSGI functionality * Deprecated tox -downloadcache option removed * Fix for showing default quotas to non-admin user * VNX: Fix failure in SnapCopy feature * Use wild card for passing env variable * Recognize extra fields in CinderObjectDictCompat * Imported Translations from Zanata * VNX: Fix issue in deleting cg/cgsnapshot * Add validation for volume_type of volume object * Update Pure REST API supported version numbers in Pure driver * Updated from global requirements * Retype functionality in Tintri driver * Fix non-migration swap with error * Replace use of mox with mock in test_solidfire * Check context before returning cached value * 3PAR: Implement v2 replication (managed) * Use Cinder API v2 for Rally scenarios * Check backup service before backup delete * Python 3: fix a lot of tests * EMC VMAX - Fix for randomly selecting a portgroup * Volume driver for Coho Data storage solutions * XtremIO: fix iscsi chap discovery bug * XtremIO: fix missing multiattach flag * Fix StrOpts with integer defaults * Dynamically Pull Out Option Sections * Address potential races in SolidFire VAG * Update migrate_volume API to use versionedobjects * Storwize: add config option to control flash copy rate * Remove version per M-1 release instructions * Use proper config option to connect to keystone * Robustify writing iscsi target persistence file 8.0.0.0b1 --------- * Adding devref about genconfig * LIO: Handle initiator IQNs as case insensitive * Fix dictionary key error * VMware: Replace mox with mock * VMware: Unit test refactoring (image to vol - 2/2) * VMware: Unit test refactoring (image to vol - 1/2) * Imported Translations from Zanata * Remove deprecated LVM ISCSI and ISER Drivers * Delete unused codes in rbd.retype * NetApp: E-Series fix JSONDecodeError on first add * Add Mitaka-1 release notes * NetApp: Refactor E-Series tests * xio: fix regression in authentication * Add some missing fields to Volume object * Imported Translations from Zanata * Add retype in lvm driver * Updated violin driver check for volume objects * Take into consideration races in XtremIOClient3 * Optimize "open" method with context manager * Updated from global requirements * Sheepdog: Optimization of error handling * Fix the bug of can't get the desired image info * Cleanup orphaned code in sqlalchemy API * Cleanup orphaned code from ceph backup driver * Force releasenotes warnings to be treated as errors * Remove db access in VNX driver * Fix quotas issue during volume transfer * Declare multiattach is True in Dell Eqlx driver * Nexenta Edge iSCSI backend driver * RBD: Make snapshot_delete more robust * Hacking Checks for assertTrue/IsNone() * Remove netaddr useless requirement * Improve metadata update operations * Add atomic conditional updates to objects * Revert "Add Scality SRB driver" * VMware: Validate extra spec opt vmware:clone_type * Update list_replication_targets * Port zonemanager to Python 3 * Port key manager to Python 3 * Move oslo-incubator's scheduler module to cinder * Remove stubs and add resource cleanup * VMware: Skip unsupported datastore types * Port IBM storewize_svc driver to Python 3 * Declare multiattach is True in RBD driver * XtremIO fix attach readonly bug * CG API should return volume type IDs * EMC VMAX - Change naming convention for MV and SG for FAST * Fix the bug of OSError when convert image * Don't build two tox envs for pep8(-constraints) * Add guidelines for release notes to devref * Imported Translations from Zanata * Imported Translations from Zanata * Updated from global requirements * Eager load columns in volume_get_active_by_window * Backup snapshots * LeftHand: Remove self.db assignment * Deprecate *_multipath_enabled flag for IBM drivers * Fix debug output for cinder-volume-usage-audit * Add check_uptodate.sh --checkopts to "pep8" * Move get_by_id to CinderObject * Imported Translations from Zanata * fast8: Skip git rm'd files * Manage existing: fix volume object saving * Fix swap_volume for case without migration * Remove .mailmap file * Remove db access from 3PAR and LH cg functions * Additional VAG support for SolidFire * Fix InstanceLocalityFilter scheduler filter * Add a FakeGateDriver * Fix metadata retrieval in GPFS driver * Imported Translations from Zanata * VNX: Fix metadata get overriden issue * Added VAG support to SolidFire * Refactor HP LeftHand driver to now be HPE * Remove db access from XIV/DS8K CG functions * Do not use api-paste.ini osprofiler options * Remove duplicate keys from dictionary * Bad exception clauses order * Imported Translations from Zanata * Check specific driver enabled in create_backup * get_all_snapshots: Fix log message typo * Updated from global requirements * ScaleIO extend volume round up capacity * Port HP 3PAR driver to Python 3 * Modify test_hpe3par to support random hash * Imported Translations from Zanata * Fix ScaleIO driver provisioning key * Imported Translations from Zanata * Remove the HP CLIQ proxy driver * Retry on database deadlock on service_update method * Downstream Fix for Genconfig * Correct assertDictMatch argument order * 3PAR Fix find_existing_vluns * Port xio driver to Python 3 * Remove kombu as a dependency for Cinder * Port EMC VMAX to Python 3 * Port EMC VNX CLI to Python 3 * Sheepdog: Fix a problem about multi backend * Imported Translations from Zanata * Update compression license check * py3: Fix error handling in prophetstor driver * Updated from global requirements * Refactor HP 3PAR drivers to now be HPE * add "unreleased" release notes page * Add os-win to requirements.txt * Update extend_volume API to use versionedobjects * Update retype API to use versionedobjects * Update get/delete_volume API to use versionedobjects * Update create_volume API to use versionedobjects * Test for object version hash changes * Fix cinder objects unit test registration * CG driver function should not access db * Add test for snapshot filtering by project id * Imported Translations from Zanata * Use oslo_config new type PortOpt for port options * Update CONTRIBUTING.md to CONTRIBUTING.rst * CG creation should be scheduled on backend level * Removal of deprecated NPIV option in Storwize * Fix ZFSSA drivers' local cache bugs * OpenStack typo * Change nfs to NFS in the help strings of nfs.py * Port zfssa driver to Python 3 * Port vzstorage to Python 3 * Port cinder.utils.monkey_patch() to Python 3 * XtremIO fix create CG from src flow * Don't use default=None for config options * Imported Translations from Zanata * 3PAR drivers volume size conversion is incorrect * Port vmware datastore to Python 3 * Use Service object instead of DB API directly * Docstring fix in scheduler-stats * Add LC_ALL=C to lvcreate, lvextend and pvresize * Port cinder.hacking to Python 3 * Port test_tintri to Python 3 * Add reno for release notes management * Imported Translations from Zanata * Fix failure of unit test TestCinderAllCmd * Execute mount.nfs check with absolute path * Imported Translations from Zanata * Update minimum tox version to 1.8 * Update cinder-manage man to match current options * Replace warnings.warn in sqlalchemy-api * Replace warnings.warn in glusterfs * Using extra-specs in cloned vols for Nimble driver * SheepdogDriver: Improve get_volume_stats operation * Add retype logic in manage_existing for VNX * Adds CORS support to Cinder * Fix calling delete_zones method with a wrong argument * Return volume_type extra specs based on policy * Revert "Handle correct exception raised by python-novaclient" * NetApp: Fix issue with updating E-Series password * NetApp: Fix issue with E-Series volume expand * Update register_opts hacking check to allow tuples * Updated from global requirements * optimize the copy_image_to_volume method of sheepdogdriver * Fix UsedLimitsController's authorizer to soft * Imported Translations from Zanata * Handle correct exception raised by python-novaclient * lvconvert missing from cinder volume.filters * Support insecure NAS security options in Quobyte * Brocade driver add_zone optimization * Imported Translations from Zanata * Add missing cgsnapshot field to Snapshot object * Eager load snapshot_metadata in *snapshot_get_all * Case sensitivity problem in cinder scheduler * Add protocol to help of glance_api_servers option * SMBFS: Fix retrieving total allocated size * Make relationships in objects consistent * Imported Translations from Zanata * Remove the jointly loaded model in finish_volume_migration * Update docs to generate Guru Meditation Report * Add qos_specs_id to volume type show * Fix NoneType Attribute error * Support initialization state in Backup Manager * Imported Translations from Zanata * Add -constraints for CI jobs * Fix typos about 'target_discovered' * NetApp: Cleanup if E-Series volume create fails * VMware: Unit test refactoring * Cleanup for cinder tests with CGSnapshot * Imported Translations from Zanata * VMware: Enforce min vCenter version * Add hypermetro support for Huawei driver * Updated from global requirements * devref doc: assorted fixes in "Unit Tests" * LIO: Let delete_initiator succeed if iqn not found * CGSnapshot Object * Updates in consistency_group in xiv/ds8k driver * Retype support for CloudByte iSCSI cinder driver * Add retries for Cisco FCZM client CLI _cfg_save * Updated from global requirements * Remove unused gettextutils from oslo-incubator * Wrong usage of "an" in the mesages: * NetApp: E-Series fix deletion of missing volume * Wrong usage of "an" in the mesages: an service * VMware: Relocate volume only during no disk space * Port Windows drivers to Python 3 * Use project id from volume when retyping volumes * Fix typo in LIO terminate_connection error msg * Update the devref for volume migration * Implement update_migrated_volume for NFS driver * Only use LOG.exception in exception handler * Port API admin action tests to Python 3 * Port API types extra specs to Python 3 * Port API to Python 3 * Mark XML API as deprecated in Mitaka * windows: don't use LOG.exception if not logging an exception * Improve performance listing detail for volumes * Move CloneableVD to common functions * Fix updating only volume type is_public * encryption_api_url requires a version * Cleanup/move code in Storwize Driver * Port WSGI tests to Python 3 * Fix method VolumeTypeList.get_all * Use lvm_conf_file directory for LVM_SYSTEM_DIR value 7.0.0 ----- * Squashed commit of WebOb 1.5 and oslo.db fixes * Change default Exception code to 500 * Dell SC: Disable REST verify warnings * Update config format for replication_devices * Fix log formatting for rbd driver * Fix Status-Line in HTTP response * Huawei driver handle volume exists error * Updated from global requirements * Tox fast8: use pep8 env dir * Move ssh_utils tests to test_ssh_utils * Volume extend error does not catch exception * Fix test_misc for WebOb 1.5 * ScaleIO driver: update_migrated_volume * Fix error string format for replication API calls * Port IBM flashsystem to Python 3 * Port ceph driver to Python 3 * Provide better debug log when 'cinder manage' fails * Remove references to Swift in chunked driver * Add insecure option for swift backup * ScaleIO: Fix protection_domain_id log message at init * Port test_srb to Python 3 * Add fast format option for thick volume creation * Imported Translations from Zanata * Retype enhancement for EMC VNX cinder driver * Updated from global requirements * Verify volume is replication capable * Add device identifier to replication device * Port violin driver to Python 3 * Port EMC scaleio to Python 3 * Remove extra register_opts() calls in netapp eseries * Add multi-initiator extra-spec for Nimble driver * Fix SolidFire target composition * Port targets test_iet_driver to Python 3 * Port image cache to Python 3 * py3: Run unit tests with ostestr on Python 3.4 * Add testresources and testscenarios used by oslo.db fixture * Clone cg support in VNX driver * Test_backup_swift: Don't leak notifications * test_backup_nfs: Set volume id per test * test_backup_swift: Set volume id per test * Add backup_swift_auth_url to swift backup driver * Dell Eqlx: Support over subscription in thin provisioning * Hacking check for opt name registration * Add ability to set prefix in SolidFire Volume name * Updated from global requirements * Fix broken format string in vol mgr log * Detach volume on device validation failure * Convert Retry-After header parameter value to string * Fix capacity report error in Huawei driver * emc vmax driver: use integer division for Python 3 * VMAX Target iSCSI IP Address * Updated from global requirements * Delete a temporary volume in DB with admin context * Fix update quota of subprojects * Port test_quobyte to Python 3 * Remove unused 'deprecated' option from auth_strategy opt 7.0.0.0rc2 ---------- * Fix VMAX live migration problem * Imported Translations from Zanata * Port netapp SSC Cmode to Python 3 * Port test_netapp to Python 3 * VMAX Truncate Storage Group Name * HNAS iSCSI manage does not work with spaces * Port scheduler host manager to Python 3 * Fix various Python 3 issues * Fix volume throttling to Python 3 * Ensure replication functions check driver status * Fix enable/disable_replication raise InvalidVolume * Tests: print fake_notifier queue upon mismatch * Cleanup orphaned code from cinder root directory * Image cache tests: use fake_notifier * Implement extend_volume method to Block Device driver * Small optimization in Block Device driver * DRBD: new option "drbdmanage_devs_on_controller" * Obtain target authentication from database same as LIO target * Dell SC: cgsnapshot-delete doesn't actually delete * LVM: Make sparse_copy_volume private, use for capabilities * Dell SC: cgsnapshot-delete doesn't actually delete * Fix typo in cinder-config-generator.conf * Port test_volume to Python 3 * Fix unreachable code pylint issues * Huawei driver add check before use a QoS * Report *real* free capacity in Huawei driver * Fix update Huawei driver issue * Fix Python 3 issues in wsgi * py3: Port pure driver test to Python 3 * GlusterFS: extend volume to the right path * Use pbr wsgi_scripts to install Cinder WSGI entry point * Report *real* free capacity in Huawei driver * Imported Translations from Zanata * Fix Bad indentation pylint issues * Show image metadata * XtremIO fix remapping bug * Revert use of netapp_lib from NetApp Drivers * Fix volume related operation in CloudByte driver * Add placholder for migration backports in Liberty * Add placholder for migration backports in Liberty * Revert use of netapp_lib from NetApp Drivers * Remove the destination volume check in delete_volume * Huawei driver add check before use a QoS * Fix VMAX live migration problem * Cleanup of Translations * Missing configuration opts from cinder.sample.conf * Use function capsulation in Huawei driver 7.0.0.0rc1 ---------- * Open Mitaka development * Create volume in cg enhancement in VNX driver * Remove duplicate keys from dictionary * Fix URL format in Huawei driver * Setup LVM_SYSTEM_DIR earlier in LVM.__init() * Add "fast8" tox env * Allow c-vol backends to start when some backends fail to load * Fix use of wrong storage pools for NetApp Drivers * VMware: Remove VMDK driver for ESX server * Use of ast for integers doesn't changes type * Make rpc_client method private for VolumeCommands * Ignore Forbidden error on quotas-get for nested projects * Change ignore-errors to ignore_errors * NetApp volume/snapshot delete performance fix * Replace soft_delete in volume_type_access_remove * Fix way of getting LUN id in Huawei driver * Fixing create CG from Cgsnapshot bug in VNX driver * Fix delete quota of subprojects * Dynamically create cinder.conf.sample * Updated from global requirements * Fix MITM vulnerability for Brocade FC SAN lookup * Imported Translations from Zanata * Fix cinder-all binary * NetApp: Fix volume extend with E-Series * Fix netapp_enable_multiattach default for E-Series * Check for None on service's updated_at * Fix issue of volume after host-assisted migration * Attaching enhancement for EMC VNX driver * Tests: Split VolumeTestCase into separate classes * Local img-cache files ignored for image transfers * Snapmirror targets should not be reported as pools * Change check method for 'all_tenants' * Create a page of drivers with stevedore.sphinxext * Enable certificate verification during image copy * Fix NetApp clone from glance failure * Storwize: Fix format string * Fix usage of novaclient * Check for empty attributes on SF volume * Fix volume lookups in SolidFire template caching * Don't rely on provider_id for resource deletion * Fix Pure get pgroup volume snapshot name * Dothill fix options access * HPMSA fix access to common options * Lenovo driver fix access to common opts * Fixed missing log variable types * VMware: Fix invalid product name * Retrieve volume in update_migrated_volume * Swap the decorator order for PureFCDriver methods * Add ScaleIO Cinder driver commands * SolidFire provider_id for snapshots on init * LeftHand Add update_migrated_volume to drivers * Huawei: fix multi REST-URLs bug * Improve coverage for snapshot_get_by_host * LVM: add the exception handling to volume copy * Fix NetApp loop in clone of NFS backed images * Hacking log format arg check * backup init_host cleanup exception handling * Making opt names consistent * Fix QoS keys not being available to scheduler * Add ConsistencyGroup object entries to linstack.py * Pass in snapshot refs for host on provider_update * Filter hosts with pool in snapshot_get_by_host * Fix typos in comments * Filter scheduler: Fix KeyError on invalid create request * Updated from global requirements * Return a tuple from SolidFire update_provider_info * Add unmanage default implementation to VolumeDriver * Correctly report multiattach in Pure drivers * Add manage_existing and unmanage to BaseVD * Add migrate_volume to BaseVD * Update update_migrated_volume in VNX driver * 3PAR Disable generic image volume cache * Add updated_at into response of listing detail * Add os-brick's scsi_id command to Cinder rootwrap * Fix order of arguments in assertEqual * Updated from global requirements * Error message in update_migrated_volume was incorrect * Remove empty rules from policies for API access * Fix HDS HNAS driver logging password as plain text * Add mechanism to update snapshot provider_id * VMware: Remove global patching of open * VMware: Skip ESX hosts in maintenance mode * 3PAR Add update_migrated_volume to drivers * Updated from global requirements * Switch SVC driver to use lsportfc to determine FC target WWPNS * Use consolidated update for failover_replication * VMware: Fix exception messages * Adds allow_availability_zone_fallback option to Cinder * NetApp E-Series over-subscription support * ZFSSA driver to return project 'available' space 7.0.0.0b3 --------- * Get full volume model in Replication manager API’s * Fix problem of efficient volume copy for migration * Generic image-volume cache * Implement thin provisioning support for E-Series * Remove useless response checks in SolidFire driver * Sheepdog: Improve snapshot and clone operation * Fix the virtual port support in VNX driver * DotHill driver fix create_cloned_volume parent id * 3PAR Fix create_cloned_volume source volume id * Cloudbyte fix create_cloned_volume parent id * Scheduler-based over-subscription for NFS drivers * ScaleIO driver should use os-brick connector * Add instructions on how to deploy API under Apache * Sync volume versionedobject to ORM * Check before add lun to QoS in Huawei driver * Fix backup metadata import missing fields * Remove the unnecassary volume_api.get(context, volume_id) * Port image_utils to Python 3 * Port volume transfer to Python 3 * Service object * Allow specified backend capabilities to be retrieved * Remove deprecated options * Add cinder.conf.sample to gitignore * Add delete_snapshot and update_snapshot rules * Handle KeyManager exception when deleting a volume * Fix a merge problem in VMAX driver * Don't require OpenSSL for unit tests * Add pagination to backups * Enhance FC zone support for Huawei driver * Add support for file I/O volume migration * Add debug logging before attaching volume in driver * Detect addition of executable files * Remove executable bits on files * NetApp DOT block driver over-subscription support * Cleanup for SnapshotObject * Add additional SSC extra specs to E-Series driver * Minor optimization * Adding delete-wait-loop for CloudByte Volumes * get_replication_updates call to driver is wrong * Earlier authority check for create volume API * Fix url in API response to get original * Efficient volume copy for generic volume migration * Volume status management during migration * Clean up line continuation in Storwize driver * LeftHand: Adding Consistency Group Support * 3PAR update driver to store stats * Remove driver.set_execute() * Skip intermittent VMDK tests * Rework Scality SOFS driver to use RemoteFS class * Adds framework for get_capabilities() feature * Implement AutoSupport for NetApp E-Series driver * Add retries to delete a volume in the RBD driver * Add support for volume groups and netapp_raid_type * Dell SC: init_volume stale volume info fix * Validate filters in snapshot*, backup* in db.api * Fix volume copy for 'virtual' volumes in DotHill * Imported Translations from Transifex * Use version convert methods from oslo.utils * Implement manage/unmanage snapshot in Pure drivers * Reduce runtime of E-Series iSCSI tests * Cinder Nested Quota Driver * Add manage/unmanage volume support for Nimble * Python 3 incompatible expression fix * Local cache feature of Oracle ZFSSA drivers * Replace urllib.unquote with urllib.parse.unquote * Remove unused dependency discover * Update volume status AFTER terminate_connection is done * Add unit test for backup get_all * Incremental backup improvements for L * Sheepdog: improve create and delete operation * Implement function to manage/unmanage snapshots * Sheepdog: Add class for dog command executor * Dont eager load volume type specs on volume list * Filter out extra-specs from type get for non-admin * Prevent that all backup objects are deleted * Add pagination to snapshots * Parameter osapi_max_limit is always used by default * Update NetApp Drivers to use netapp_lib * Extend unit tests for backup get_all * Fix nimble storage volume stats reporting * TemporaryImages to inspect image before conversion * Efficient image transfer for Glance cinder store * adds user_id to check_is_admin * Fix backup list all_tenants=0 filtering for admin * Add Cinder API wsgi application * Add consistency group tests to test_volume_rpcapi * Cinder replication V2 * force_detach terminate_connection needs connector * Assisted volume migration for Oracle ZFSSA drivers * Add https options and minor code changes * Fix bad except clauses order * Add volume retype support for Huawei driver * Fix URLs to admin-guide-cloud * Nested Quota Driver: Get Project Hierarchy * Check sio_storage_pools in check_for_setup_error * Fix description for "Barbarism of editting a file" * Dell SC: Added logging for the find_wwns functions * Add missing space to logged error in create volume * Cleaning up CONF.register_opts() in compute/__init__.py * Update provider_id column on SolidFire init * Add ability to update provider_id during init * Fix _LI() to _LW() in LOG.warning message * Remove the method delete_volume_admin_metadata * Support efficient non-disruptive volume backup in VNX * Validate value when user update quota * Add SolidFire svip to config options * Return multiple iSCSI portals in VNX Cinder driver * Avoid returning volume metadata in DotHill driver * Small cleanups in BaseVD/VolumeDriver * Port 3PAR drivers to use ABCMeta driver model * Updated from global requirements * Switch to the oslo_utils.fileutils * Parse out SolidFire account from api response * Dell SC: Better exception handling in init_conn * Port test_nfs to Python 3 * Corrects the order of AssertEquals params in Quobyte tests * Adds the random option to cinder retry function * Extra specs may not be in volume types * VMware: Fix re-attach volume error for VC 5.1 * Remove duplicate keys from dictionary * LeftHand driver is ignoring reserved_percentage * Update devref unit tests doc * Tests: Fix zfssa TestRestClientURL.test_request * Test whether sample config generation works * Revert "mark oslo.vmware as optional dependency" * Register the volume_opts config options in remotefs.py * Create CG needs extra specs * Configure space reservation on NetApp Data ONTAP * Dell SC: Fix error causing missed log message * Rename free_virtual in capacity filter * Make migration's volume source deletion async * Add the ability to update type public status * Adds manage/unmanage methods for HNAS drivers * Update deprecated version of novaclient * Add version columns to services table * 3PAR: Adding Consistency Group Support * Remove unused function volume_type_encryption_get * Refactor to remove duplicate code * Correct comment to be consistent with code * Allow 0 length name * Add volume migration support for Huawei driver * Cleanup for cinder tests with ConsistencyGroups * VMware: Change inventory folder hierarchy * Adapt SnapshotController to view builder * Add backup/restore methods to Sheepdog driver * Use min and max on IntOpt option types * Over subscription for HP 3PAR drivers * Allow CG without snapshot to be deleted * Tintri snapshot id * Add volume type support to Datera * Fix Pure create volume from cgsnapshot * Implement Clone CG in Pure Volume Drivers * Dell Eqlx: Use generic option ssh_timeout * Make X-IO volume driver wait for delete volume to complete * Reduced file size to prevent timeout * Update SolidFire driver to pass newer flake8 * 3PAR: Adding performance metrics to volume status * Don't use context.elevated to get volume * Enable cinder-manage to remove services * VMware: Bump driver version * Fix backup init_host volume cleanup * VMware: Deprecate vCenter version less than 5.1 * Updated from global requirements * Small clean up in volume object * Move import and export backup metadata to object * On Volume list only retrieve needed data from DB * Return volume name from backup_restore * Switch Pure volume drivers to use Snapshot Objects * Don't return Exception when volume is detached * Use Requests HTTP library and URL safe names * Remove RetypeVD class, fix NFS driver retype * Fix Python 3 issues in Windows tests * Add objects.register_all() to cinder-all command * GPFS volume encryption-at-rest support * VMware: Set virtual disk UUID to volume ID * Add oslo.vmware into test-requirements * Add multipath support to 3PAR iSCSI driver * Prevent volume already in CG to be added to another * LVM Thin Provisioning auto-detect * Fix HNAS iSCSI 32 targets limitation error * Remove unused fake objects in vmdk test module * VMware: Add volume ID in vCenter's volume config * Enhance PureISCSIDriver multipath support * Add unit test cases for the capacity scheduler * Fix argument order for assertEqual in tests * Fix order of parms in assertEqual for scheduler ut * VNX driver needs extra params for create cg from src * Prevent creating encrypted volume with image * EMC VMAX Create CG from CG Snapshot * mark oslo.vmware as optional dependency * ConsistencyGroup Object * Validate string, integer limit for input parameter * Validate name and description string * Handle missing temp volume and snapshot during cleanup * Updated from global requirements * Validate 'is_public' when creating volume type * Remove StorPool Driver * Ignore InsecureReq warning in SolidFire Driver * Attach snapshot - driver only * Remove bad tests for the VMAX driver * Update authorization actions for services API * Fix missing parameters in driver CG interface * Remove incorrect URLs from jenkins.rst * Fix list comparison for empty list * Snap copy feature for EMC VNX Cinder driver * Tests: Fix os.path.exists mock (emc_vnxdirect) * Add connector object to create_export * Correct usage of assertEqual for boolean values * Remove unit test migration logging * Add support '--all-tenants' for cinder backup-list * Corrected order of parameters in docstring * Fix wrong exception usage in cinder exception classes * Fix RestURL to storage backend in Huawei driver * Sync scheduler module from oslo-incubator * VNX driver needs to return snapshot objects * Revert "Revert First version of Cinder driver for Quobyte" * Enhance unit tests for zfssa drivers * VMware: Remove unused constants * Fix volume limit exceeded exception * Refactor api.v2.volumes unit tests * Dell SC: Add check of current value on retype * Update snap-quota to unlimited in Nimble driver * Add more Rally scenarios to run * Updated from global requirements * Fix PEP476 & format message of Oracle ZFSSA drivers * Add SmartX support for Huawei driver * Enhance deletion efficiency when backup init host * Fix order of arguments in assertEqual * Add multiple pools support to VMAX driver * Fix status comparison for attached volume backup * Updated from global requirements * NetApp SSC job will be run periodically * RBD: use user-configured value for chunk size * Over subscription for HP LeftHand iSCSI driver * Use prefix for SolidFire template account * Fix multi-line docstrings to meet hacking rules * sqlalchemy exception kills FixedIntervalLoopingCall thread * VMware: Fix protocol in backend stats * Fix error message in cinder/api/v2/volumes.py * Fix concurrent attaches on HNAS iSCSI driver * GlusterFS: Using 'fallocate' instead of 'dd' * Fixing notify message of manage_existing flow * Clone CG * Fix get default quota values for subprojects * Add deactivate step to extend_lv * Fix exception on uploading a volume to image with glance v2 API * Set VERSION on the Nimble driver 7.0.0.0b2 --------- * Log which service is down * Move update_migrated_volume() to BaseVD * GlusterFS backup driver * Posix backup driver * Add mock cases for IBM FlashSystem * Add discard to connection properties * Remove deprecated config options for Liberty * RBD: use user-configured value for max_clone_depth * Updated from global requirements * Fix lvm manage existing volume * Add entry create and cast tasks to manage workflow * Fix cleanup_temp_volume_snapshots for missing vol * Remove unused context parameter * Adding NFS support to the GPFS Driver * Remove deprecated SimpleScheduler * Fix doc string definitions * Port StorwizeSVCDriver to use ABCMeta driver model * Add extra spec capability for Nimble Cinder Driver * XtremIO support for iscsi discovery auth * Add bandit for security static analysis testing * typos(?) in create_snapshots_in_db * Add multiple pools support for Huawei driver * Port XIVDS8K Driver to use ABCMeta driver model * Fix Python 3 issues in Hitachi HNAS tests * Port remotefs driver to Python 3 * Port IBM driver to Python 3 * Clean up volume_types logging * NetApp ESeries: fix delete of non-existent volume * Refactoring of manager's create_volume flow * Remove unused arguments from c-vol's create_volume * Updated from global requirements * Add I/T mapping check for IBM FlashSystem * Remove simple scheduler which is deprecated since Juno * LVM: Support efficient data copy for LVM driver * Implement retype for Pure drivers * Dell SC: Add support for driver retype * EMC VMAX Modify CG * XtremIO volume driver consistency group support * Add Cinder internal tenant support * VMware:Replace vCenter calls with oslo.vmware calls * Rename filename from il8n.rst to i18n.rst * Non-disruptive backup * DRBD: Rename a constant to a better name * Remove resource lock operation for HBSD * Dell SC: Fix legacy bug, init_conn bug and REST API bug * Dell SC: Fix Consistency Group issues * Add drivers list generator * Fix 033 add encryption unique key migration * Add CHAP support for Huawei driver * Move volume.api test to correct location * Remove logging statements from migrations * DRBD: Define a separate prefix for snapshots * Prevent missing Purity hosts from raising errors * Revert "Remove X-IO volume driver" * Filter cgsnapshots data on the DB side * Refactor Huawei Volume driver * Add volume_attachment to volume usage notifications * Graceful shutdown WSGI/RPC server * Backups: allow name to be specified during restore * Set default policy for "volume:get" * Add iSCSI multipath support for Huawei driver * Fix 3PAR driver handling of existing VLUNs * Don’t log warnings in Pure initialize_connection * scality: add export and name keys to os-initialize_connection info * Add delete/update_volume_metadata policy rules * Remove "volume:services" rule from policy.json * Report capability of multiattach for FlashSystem * Handle volume not found on zfssa volume delete * Raise BadRequest for invalid replication status * Add unit tests for cinder.api.v2.volumes * Raise HTTP exception for backup not found * Port NetApp NFS drivers to use ABC driver model * Removing OpenvStorage for no CI * Remove unused serialize_args method * Remove obsolete API from documentation * Tests: test_volume mock conversion * Fix restore point if backup base is diff-format in ceph * Add white list support for target ports in VNX driver * Preserve mock side_effect’s in test_pure * StorPool: clean up the last uses of str.format() * Removing archaic references * Remove useless logging from unit tests * cinder list fails with 'name' sort key * Storwize_svc_npiv_compatibility_mode default value change * Remove unused parameter in PureFCDriver _connect * Cleanup unused method fake_get_target * Set driver version in Sheepdog driver * Updated from global requirements * Fix saving tz aware datetimes in Versioned Objects * set/unset volume image metadata * Fix not implemented wording in update_migrated_volume * Add support for force-delete backups * Improve 3PAR driver VLUN creation and deletion * Remove hacking check N327 * Fix tests failing in gate * Fix properties extracting from image with glance api v2 * Support SMI-S provider v8.0.3 in VMAX driver * Add ability to override OpenStack privileged user auth url * VMEM v6000: Fix export verify routines * Port Tintri driver to ABC driver model * Fix block_device driver to behave as documented * NetApp E-Series: Add debug tracing * Set encrypted key in connection_info during initialize * Nested Quota: Set default values to subproject * Dell SC: Add support for ManageableVD * Fix NetApp cDOT driver use of Glance locations * Fix missing pool name in consistency group * NetApp ONTAP: Add debug tracing * Add tracing facility for drivers * Fix error message in Pure driver with correct text * Notify the transfer volume action in cinder * Storwize Driver zone removing * Dell SC: Add support for consistency groups * Remove duplicate volume.filters entry * Port NetApp E-Series iSCSI driver to ABC model * Fix getting out-of-date volume operation state issue for VNX * Separate FlashSystem FC and iSCSI common code * Update expected error message from lvs * Fix HBSD horcm driver with oslo.concurrency 2.1.0 * Remove X-IO volume driver * RemoteFS: Fix the offline snapshot delete operation * Implement the update_migrated_volume for the drivers * Avoid race condition at snapshot deletion stage * Fix Python 3 issues in cmd * Port image/glance.py to Python 3 * Switch to oslo.reports * Validate maximum limit for quota * Updated from global requirements * Fix block eventlet threads on rbd calls * RemoteFS: Reporting configured reserved_percentage in _update_volume_stats * GlusterFS: support extending a volume that has snapshots * Port dothill to Python 3 * Fix backup.rpcapi to pass object backup * Fix typo in solidfire driver option * Mock socket.gethostbyaddr in test_v7000_fcp * Replace missed basestring by six for python3 compatability * Return 404 if volume type encryption is not found * Updated from global requirements * smbfs: fix invalid check for smbfs_used_ratio correctness * Remove lio_initiator_iqns config option * Move HDS drivers to Hitachi folder * Fix Python 3 issues in targets unit tests * Port drbdmanagedrv driver to Python 3 * Port test_db_api to Python 3 * Port hitachi driver to Python 3 * Fix getting wwpn information in infortrend driver for DS4000 * Do not allow to modify access for public volume type * Add dependency check in RBD delete_snapshot * Port huawei driver to Python 3 * XtremIO driver fix array snapshot problem * Fix cinder.conf.sample generation * Handle attachment of second volume * VMware: Create volume backing in specific clusters * Use versionutils from oslo.log * Correct overquota error message * Updated from global requirements * Fix timeout issue in EMC VNX driver unit test * Remove oslo logging from backup unit tests * Add notifications about snapshot.update.* * Sync the latest fileutils module from oslo-incubator * Port NetApp DATAONTAP blocks drivers to ABC model * Fix 'no actual-pathname' NetApp API error * Use right oslo.service entry points * Use symbol for error code in VNX cinder driver * Storwize driver report capability for multiattach * Filter snapshots data on the DB side * Change generic NotFound to specific exception * Storwize: add the missing stops in the end of the messages * Ensure 'WSGIService' derives from oslo_service base class * Switch to oslo.service * Fix library includes for config generator * Revert First version of Cinder driver for Quobyte * Fix cinder-manage volume delete cmd * Fix Python 3 issues in the blockbridge driver * Fix Python 3 issues in the swift backup driver * Fix Python 3 issues in ceph and rbd drivers * Fix Python 3 issues in backup * Remove generate_glance_url * Fix manage_existing function in infortrend driver * Add unit tests for the capacity filter * Modify template account creation in SolidFire drvr * Tests: Fix assertRaisesRegexp deprecation warnings 7.0.0.0b1 --------- * Harden scheduler.rpcapi unit tests * Fix backups.rpcapi to pass objects over RPC * Fix weird change of volume status in re-scheduling * Fix tox -e py34 * Add exception catch in report_state for DBError * Updated from global requirements * Dell SC: Enable use of Storage Profiles * Use elevated context for backup destroy * Fix Cinder Objects unit tests * rbd: add volume_id to connection_info in initialize_connection * Fix Datera driver export call * Add iscsi_target_flags configuration option * Adds the Violin Memory V7000 series FC driver * Remove the hardcoded concurrency limit for ostestr * Revert "Disable backup progress notifications for unit tests" * Nested Quota : Create allocated column in cinder.quotas * Handle incorrect '--config-dir' param * Get updated volume status in begin_detaching * Tests: Make fake_notifier per-instance * Validate outermost request body element name consistently * Add missing argument to delete_keys method * Port LeftHand driver to use ABCMeta driver model * Add Virtuozzo Storage Volume Driver * Disable profiler for unit tests * Use a hard-coded project_id in racy cinder.tests.unit.test_volume tests * Validate bool value using strutils.bool_from_string method * Incorrect exception caught in qos-specs create api * VMware: Remove unused methods * Scality SOFS: enhance how the remoteFS mount is detected * Backup object * Add missing Jinja2 to requirements.txt * Storwize: remove the useless method check_copy_ok * Update version for Liberty 7.0.0a0 ------- * ScaleIO: Fix broken format string * Sync 'report' from oslo-incubator * Ceph driver support retries on rados_connect_timeout * Dell SC Removed _find_domain and associated tests * LVM add multiattach flag capability * Add volume drivers for Infortrend Storage * XtremIO Volume driver requests, multipath * Updated from global requirements * Adds FC and ISCSI Cinder drivers for Lenovo Storage Arrays * Adds FC and ISCSI Cinder drivers for HPMSA Storage Arrays * Replace basestring with six.string_types * Fix broken export commands on block_device driver * Switch to oslo.policy 0.3.0 * Add config option to set max_volume_size_limit * Fix LIO target helper when missing targetcli * Move DRBD tests into tests/unit * Volume driver for HP XP storage * Replace xrange() with six.moves.range() * Drop L suffix from long integers * Pass proxy environment variables to tox * Re-add DRBD driver * Refactor API create_volume flow * Introduce Guru Meditation Reports into Cinder * Adds FC and ISCSI Cinder drivers for DotHill Storage Arrays * Get rid of oslo-incubator copy of middleware * SQL scripts should not manage transactions * Targets tests: Clean up long lines * Update 3PAR user config help strings * Disallow backing files when uploading volumes to image * Remove WritableLogger wrapper * Get StringIO from six for Python 3 compatibility * Fix Python 3 issues in utils * Update SolidFire to use target driver model * Wait until service thread is done on service stop * Add cinder volume driver for Blockbridge EPS * 3PAR enable multiattach capability reporting * Replace dit.itervalues() with dict.values() * Rewrite code merging two dictionaries * Replace dict(obj.iteritems() with dict(obj) * Replace dict.iteritems() with dict.items() * san driver: don't use relative Python import * Implement Cinder Volume driver for HGST Solutions * Volume manager should set filter_function and goodness_function * Tintri driver to manage existing backend storage objects * Replace it.next() with next(it) for py3 compat * Use six to fix imports on Python 3 * NetApp E-Series: Add Fibre Channel Support * NetApp E-Series: Refactor class structure for FC * NetApp E-Series driver: Remove caching logic * Use six.reraise() for Python 3 compatibility * Updated from global requirements * Add secondary account capability to SolidFire * Replace urllib and urllib2 with six.moves.urllib * Replace unicode with six.text_type * Use correct rtslib namespace for newer versions * Dispose DB connections between backend proc starts * EMC ScaleIO Cinder Driver * RemoteFS: Fix doc for locked_volume_id_operation * Re-integrate Oracle iSCSI Cinder driver * Dell SC: Expanded comments and update var names * Re-add the StorPool distributed storage driver * Add iSCSI protocol support for IBM FlashSystem * Fixes 3PAR snapshot failure with optional params * ConsistencyGroup: Return 400 instead of 500 for invalid body * Port remote_fs driver to use new driver model * Make VNX Cinder Driver aware of VNX Pool Full Threshold * Add 'source-id' and 'source-name' support in VNX driver * Revert "Adds drivers for DotHill Storage Arrays." * Dell SC: Added support for alternate iscsi portals * Dell: Added verify cert option for REST calls * Handle ineffective backup compression * Prophetstor driver needs to return snapshot objects * Complete switch to snapshot objects * DriverFilter: don't check volume_backend_name * Add Pure Storage FibreChannel driver * Fix exception parameter name * Move Dothill tests out of root test directory * Fix remaining memory issues with nfs backup unit tests * Don't send heartbeats if Manager reports a problem * Changes in rally-jobs/README.rst * Removed explicit return from __init__ method * Return provider_id in SolidFire model update * Deprecate the HPLeftHandISCSIDriver CLIQ driver * Allow provisioning to reach max oversubscription * Port ProphetStor driver to use ABCMeta driver model * Clean up unused exceptions * Refactor scheduler's create_volume flow * Adds FC and ISCSI Cinder drivers for DotHill Storage Arrays * Bump SolidFire version number * Dell SC: update_volume_stats could use uninitialized vars * Disable backup progress notifications for unit tests * Tintri Cinder Volume driver * Fix assertRaisesRegexp deprecation warnings in UT * Refactor PureISCSIDriver into base and iSCSI classes * Add missing unit test for goodness weigher * Non-admin user to query volume filter by az * Fix cinder concurrency issues on rtstool * Use SolidFire snapshots for Cinder snapshots * Switch get_all_snapshots to use objects * rbd driver in cinder does not manage glance images multi-location * Notification with volume and snapshot metadata * Remove pretty_tox and use ostestr * Add volume ID to fake volumes in Gluster tests * Fix capacity filter to allow oversubscription * EMC VMAX Manage/Unmanage Volume * Add chap support to CloudByte cinder driver * Multiple pools support enhancement in VNX cinder driver * Remove un-used import at test_volume_transfer.py * NetApp FC driver shims missing manage/unmanage * Updating cmd/manage.py get_arg_string() argument parser and adding unit test * Fix expression-not-assigned pylint issues * Add standard QoS spec support to cDOT drivers * Avoid LUN ID collisions in NetApp iSCSI drivers * VMware: insecure option should be exposed * Create iSCSI lio portals with right IPs and port * Create consistgroup from cgsnapshot support in VNX driver * Stop using deprecated timeutils.isotime() * Fix response when querying host detail by host name * Fix wrong response with version details * Display NOTIFICATIONS on assert failure * Brocade driver not parsing zone data correctly * Fix issues with extra specs in VMAX driver * Don't use dict.iterkeys() * Address 0x712d8e0e error in VNX Cinder Driver * Leverage dict comprehension in PEP-0274 * Add missing '-o' CLI option to VNX Cinder Driver * Validate name and description for volume type * Leave sqlalchemy convert to boolean to the DB SQL type to use * Switch from MySQL-python to PyMySQL * Add ability for drivers to copy data preserving sparseness * Remove HDS HUS iSCSI driver * Updated from global requirements * Use nfs_oversub_ratio when reporting pool capacity * LVM: Pass volume size in MiB to copy_volume() during volume migration * LVM: Support efficient data copy using "dd" for create_cloned_volume * Fix a problem with FAST support in VMAX driver * Remove use of deprecated LOG.warn * Fix incorrect reraising of exceptions * Switch to oslo_versionedobjects * Cinder os-force_detach api returns 500 * Check volume_backend in retype * Fix overwrite of params in SF image cache update * Dell SC driver honoring folder name after volume creation * Check type match on create from source/snap * Add patch for consistency group update in ProphetStor driver * Logging not using oslo.i18n guidelines (openstack) * Remove unused context parameter * Replace suds test dependency with suds-jurko * Fix missing translations for log messages * Remove Brick from cinder codebase * Follow i18n guidelines in LIO target * Windows SMBFS: Fix image resize errors during volume creation * Windows iSCSI: Add CHAP authentication support * NFS Backup: Correcting backup_sha_block_size_bytes help message * Fix common misspellings * GlusterFS: Renaming test case to test_mount_glusterfs * Add new exception to retryables in SolidFire driver * Convert mox to mock: tests/compute/test_service.py * FlashSystem reports error while running tests with multi-thread * Dell: Added support for update_migrated_volume * Fix FakeISCSIDriver and FakeISERDriver * Add volume status to error messages in backup create flow * Bad link in API version details response * Fix xxx=\n pep8 errors in volume_utils.py * Log command failure details before raising ISCSITargetDetachFailed * Eqlx: Fixes the retries on Network Connection Error * Rename Datera test to test_datera for discovery * Allow rexports for targets with Datera * Add os-brick to cinder requirements.txt * Fix the KeyError in CloudByte iSCSI cinder driver * LIO: Enable iSER for IPv6 * LIO: Use rtslib property instead of private method * Fix missing translations for log messages * Cinder os-attach api returns 500 * cinder os-detach api returns 500 * HDS HNAS Driver fails when FS unhealthy * Logging not using oslo.i18n guidelines (zonemgr) * Fix broken add_iscsi_conn log message * Fix unit tests spam output * Preserve usage and reservations on quota deletion * Fix 'driver is uninitialize' typo * Removing sleep between when a command is sent and 'YES' is sent * Windows iSCSI: remove ensure_export * tests: replace mox by mox3, clean out mox usage * Catch additional type conversion errors * Tests: Remove sleep from NFS tests * Port block_device driver to use new driver model * VMware: Enable vCenter certificate verification * Fix typo in log messages and comments * Clean up failed clones in VMAX driver * Correct directories check for N327 hacking check * Fake out sleeps in unit tests * Fix range check for NFS used ratio * Move logging sample to use oslo_log * Targets test refactoring * Revert state if attachment already exists * Add retry to lvm delete * Admin extends tenant's volume but change admin's quota * Drop use of 'oslo' namespace package * Add Multi-connection support to XIV * VNX Cinder driver Over Subscription Support * Fix namespace issue in generate_sample.sh * Add hacking check for str and unicode in exceptions * Fix volume creation from image with allowed_direct_url_schemes * Change default of option volume_driver to LVMVolumeDriver * GlusterFS: Support over subscription in thin provisioning * Remove unnecessary checks for encrypted types * Add test case for volume_encryption_metadata_get * Updated from global requirements * Port rbd driver to use new driver model * Don't truncate osapi_volume_link prefixes * Fixed issue with mismatched config in VMAX driver 2015.1.0 -------- * Add external genconfig calls * Create initiator id for VMAX iSCSI driver * Remove deprecated methods in VNX driver * Remove unused find_attribute_or_element() * 3PAR don't log version numbers every stats update * Sync oslo service module * Add external genconfig calls * Enable use of filter_function in PureISCIDriver * NetApp E-Series: Fix instance live-migration with attached volumes * Add resource tag to logging in volume.manager.py * VMware: Handle concurrent inventory folder create * Leverage timeutils, drop strtime() usage * GlusterFS: Using mount method in RemoteFsClient * Remove redundant code from VNX Cinder Driver * Remove force check from copy_volume_to_image * Logging not using oslo.i18n guidelines (scheduler) * service child process normal SIGTERM exit * service child process normal SIGTERM exit * Move unit tests into dedicated directory * Dell SC driver calls out the wrong REST API version * Move RBD calls to a separate threads * Windows SMBFS: fix volume extend * Fix a wrong argument of create method * Fix tiny typo: compatability => compatibility * Reserve 5 migrations for Kilo backports * RBD: Add missing Ceph customized cluster name support * Standardize logging in volume.api.py * Release Import of Translations from Transifex * Fix fetch_to_volume_format if vhd is requested * Windows: Improve vhdutils error messages * SMBFS: Add minimum qemu-img version requirement * VolMgr: reschedule only when filter_properties has retry * Storwize driver should only report active wwpn port * update .gitreview for stable/kilo * Mask passwords with iscsiadm commands * Add support for customized cluster name * Updated from global requirements * SMBFS: Lock on a per-volume basis * Windows SMBFS: fix volume extend * Complete the doc/README.rst instructions to build docs * Verify all quotas before updating the database * Add locking to PureISCSIDriver around creating Purity Host objects * Include boot properties from glance v2 images * Add CA cert option to backups swift driver * Fix a wrong argument of create method * Add locking to PureISCSIDriver around creating Purity Host objects * Reworked Dell SC iSCSI target portal return * Fix LUN misalignment issue with NetApp iSCSI drivers * Remove the export creation during volume creation for migration * Fix assertEqual in test_volume.py in correct order of params * VNX Cinder Driver should report 0 free_capacity_gb in some scenarios * Include boot properties from glance v2 images * Logging not using oslo.i18n guidelines (brick) * set default auth_strategy to keystone 2015.1.0rc1 ----------- * Open Liberty development * Removed sleep before 'YES' is sent to confim an operation * Update openstack-common reference in openstack/common/README * GlusterFS: Returning provider location of volume from snapshot * Fixes snapshot creation failure in CloudByte driver * Delete the temporary volume if migration fails * Revert "Removing Windows drivers" * Correct cinder hacking check numbering * Add hacking check for print() statements * Rbd update volume stats in wrong way * Add missing copy_volume_to_image method to Sheepdog driver * Partial Revert "Removing Huawei drivers" * Create initiator id if not exist in VMAX driver * Fixed encrypted property for 3PAR FC and iSCSI drivers * Partial Revert "Removing ZFSSA driver" * Mock wait_for_volume_removal in test_brick_connector * Dell SC driver has insufficient iscsi logging * VMware: Skip vSAN for preallocated image download * Enable H238 hacking rule * Use six.text_type instead of unicode * Fix ISCSIDriver initialized connection volume type * Fix multipath device discovery when UFN is enabled * Fix missing clone_image API support for sheepdog driver * More error handling on EMC VNX migration failure * Set volume_attachment to [] for the temporary volume creation * Add volume:update rule to policy.json * Fix always false condition in glance wrapper * Only use operational LIFs for iscsi target details * Revert "Removing Netapp FC drivers for no reported CI" * Get volume from db again before updating it's status * Catch more general exception in manager's create_volume * Fix broken fetch_to_volume_format log message * Tests: Fix v6000 test failure with random hash seed * Check volume status in detach db api * Fix wrong command for _rescan_multipath * Storwize: Replication status still active when primary copy is offline * VMware: Fix instance_uuid access in volume retype * Logging not using oslo.i18n guidelines * Remove LP bug ref in remove_iscsi_device * Fix potential access to missing key * Brick: Fix race in removing iSCSI device * VMware: Improve invalid container error message * Fix the format of the system name in VMAX driver * Hitachi: Fix access to volume instance_uuid * VMware: Fix ImageNotAuthorized during copy volume * Fix: Boot from image with HNAS iSCSI * SMBFS: Fix missing volume provider location * Enhance VNX Cinder volume creation logic * Properly use obj_extra_fields in objects * Create unit tests for volume objects * Fix incorrect invocation of _add_to_threadpool * VMware: Fixed usage of volume instance_uuid * Change volume and snapshot stuck creating to error * Imported Translations from Transifex * Fixed access to instance_uuid in retype * Ensure initialize_connection in targets pass multipath parameter * Eager load volume extra specs * Be safe with getting attachment * Added the missing attachment to detach_volume * Make lio iSCSI changes persistent to avoid lost * Sort list of cinder_object.changes * Move to hacking 0.10 * Syncing versionutils from oslo-incubator * Properly remove host object from ISE * Dell Storage Center API change fails init_conn * Windows iSCSI: fix volume clone * Enable request-id in cinder API logs * Use cached values for stats on query failures for vmem drivers * The value of netapp_storage_protocol should default to none * Change leftover oslo.* to oslo_* * Updated from global requirements * Fix: Synchronise Quobyte Share mounting * Fix typo in cinder/cinder/volume/drivers/emc_vmax * Update file doc string for pure.py * update oslo policy to remove policy.d log spam * Fix QoSSpecManageApiTest to work in parallel 2015.1.0b3 ---------- * Remove chap secret DEBUG logging in PureISCSIDriver * Removing Windows drivers for no reported CI * Fix logging mistake in swift backup driver * Removing Zadara driver for no reported CI * Removing Huawei drivers for no reported CI * Removing Netapp FC drivers for no reported CI * Removing Fujitsu driver for no reported CI * Removing DRBD driver for no reported CI * Removing FusionIO driver for no reported CI * Removing Nexenta driver for no reported CI * Removing Symantec NFS driver for no reported CI * Removing StorPool driver for no reported CI * Removing ZFSSA driver for no reported CI * Make the 3PAR drivers honor the pool in create * Removing HP MSA driver for no reported CI * Removing Coraid driver for no reported CI * Add retry to create resource in Datera driver * Logging not using oslo.i18n guidelines * Tests: Fix az test failure when PYTHONHASHSEED != 0 * Change datetime.now() to timeutils.utcnow() from oslo_utils * Fixes nits in check_no_contextlib_nested * Fix logging to catch original exceptions and tracebacks * Remove error messages from multipath command output before parsing * Return updated volume object to the caller of _attach_volume() * Fix SAN generic driver ssh whitespaced commands * EMC: Fix use of "_" as variable name * Reduce configured file size for nfs backup unit tests * tests: remove useless variable * Revert "Datera's export to avoid deprecated keys" * Don't override extra specs with config in VMAX * Check license before clone in VMAX driver * Fixing mount when state_path is configured with a final '/' * Verify all quotas before updating the database * Update Violin REST client library name * Remove the reference of volume['instance_uuid']in VNX driver * Increase LeftHand driver minimum client version * Decrement remaining retries after failed REST call * VMware: Fail immediately for images in a container * Make unused iscsi_num_targets, iser_num_targets configs as deprecated * Raise exception for invalid mock assert calls * Mocked utils.execute for broken tests * Huawei driver check before associating LUN to a LUN group * Windows: Fixes wintypes import issue in vhdutils * Fix typos in LVMVolumeDriver * Add minimum qemu-img version check functions * Implement IET target driver * Fix unit tests for multiattach patch * Fixed a concurrency issue in VMAX driver * Fix LVM thin pool creation race * Added provider_id to volume and snapshot object * Fix ArgsAlreadyParsedError in emc_vnx_cli.py * Fix typo in log message * remotefs: Fix doc string for _create_snapshot * Fix a typo in sf_template_account_name help * Move to the oslo.middleware library * Remove use of contextlib.nested * Remove strutils from oslo-incubator * Add waiting for the driver to SchedulerManager * Fix retype return value in volume driver base class * Fix retype arguments in volume driver base class * Fix sqlalchemy reuse in multi-backend children * Fix Cinder logs to show authentication error in RBD driver * Update hacking check for oslo_log * Add is_ready method to scheduler driver * Fix for inconsistent cinder-services state change * Fix HNAS iSCSI driver error on LUN creation * Datera driver looks for lun-0 instead lun-1 now * Use oslo.log instead of oslo-incubator * Remove the useless next link for volumes, transfers and backups * Unset auth token before trying to login to Datera * NFS backup driver * Sort snapshots in create CG from CG snapshot * Add multiattach capabilities injection * Tests: Harden fake_notifier asserts * Error trying to delete snapshots on Hitachi driver * Remove global mocking from test_pure.py * Allow scheduler to receive volume stats when starting service * VMware: Fix exception logging * Adjust Cinder to support FCP on System z systems * Refactor Swift backup driver and introduce chunking driver * Namespace updates for _i18n and imageutils & fileutils * Dell FC driver inheritance order causing failures * Add volume multi attach support * Add project_id to barbican keymgr wrapper * Fixes VNX NotImplementedError of unmanage * Replace assertEqual(True, *) -> assertTrue(*) * Update Datera's export to avoid deprecated keys * Improve error handling in refactored Tgt driver * Adds pool aware scheduling for HNAS drivers * PureISCSIDriver consistency group updates * HP lefthand driver filter and evalautor function * Fix the unicode encode error when create volume * Add consistency group support for XIV/DS8K cinder driver proxy * Don't fail target_delete if ACL's don't exist * Change log level for no object attribute found * Add Manage/Unmanage support to NetApp NFS drivers * Use snapshot object in create_volume flow * Fix "rtsllib" -> "rtslib" typos * Fix some issues with pool name sent to SVC * Fix allocated_capacity tracking when rescheduling * HP 3par driver filter and evaluator function * Add support to incremental backups in cinder * Convert all eqlx tests from mox to mock * Fixed the order of mock decorators in VMAX driver * Adds SSH communication to HNAS drivers * Add CHAP support to PureISCSIDriver * Make objects behave more like our old dictionaries * Two choices for iscsi_helper is missing * Update Datera's Authentication method * Simplify cxt test_create_export() unit test * CG Modification Support in EMC VNX Cinder Driver * Dell SC API change fails snapshot creation * Adding manage/unmanage support for LeftHand driver * More validation logic for VNX CG creation * Change default value of gpfs_images_share_mode to None * Add DB table for driver specific data * Move oslo.messaging to the oslo_messaging namespace * Create Consistency Group from CG Snapshot API * Modify Consistency Group API * Remove useless storage_availability_zone import * Failover to alternative iSCSI portals on login failure * Update volume type name for volume type API * Add config option to override url for versions * Snapshot and volume objects * Cinder objects base * Dell Storage Center Driver API bug * Password config options should be marked secret * Clear migration_status from a destination volume if migration fails * RBD: Query volume features from ceph.conf * i18n Compatibility in VMAX driver * Correct a few changes in the VMAX driver * Fix HNAS driver parsing errors * RBD: remove non-layering support for antiquated versions * Fixed errors in docstrings in the VMAX driver * SMBFS: Fix retrieving the volume path and format * More validation logic for VNX CG creation * Add flash cache policy to 3PAR driver * Update v1 deprecation warnings * Fixes the import for Lefthand driver * NetApp eseries implementation for manage/unmanage * Replication status periodic task optimization * XtreamIO version 4.0 support * Change max_over_subscription_ratio default value * Use Unique SCST Group names in SCST cinder helper driver * Add CHAP persistence to SCST target helper * Fix for infinity capacity reporting in EQL driver * Use iscsi_helper instead of target_helper on logs * Import only modules: H302 * Revert "Remove retry_deactivation directive on lvremove" * Over subscription for Pure Storage iSCSI driver * Use oslo_config choices support * Custom zone name prefix not being used by brcd driver * cinder-manage man update * GET volumes API sorting REST/volume/DB updates * GET volumes API sorting enhancements common utilities * FCZM fix reading of cinder config entries * Sync scheduler.filters module from oslo-incubator * Limit volume copy bandwidth per backend * Generic filter support for volume queries * Remove warnings for long vgs and lvs calls * Use subunit-trace to enable output during unit test runs * VMware: Relocate volume to compliant datastore * VMware:Use datastore selection logic in new module * VMware: Refactor initialize_connection unit tests * Fix exceptions logging in TgtAdm * Sync 'versionutils' module from oslo-incubator * Sync 'threadgroup' from oslo-incubator * Update 'systemd' module from oslo-incubator * Sync 'service' module from oslo-incubator * Sync 'loopingcall' module from oslo-incubator * Sync the 'fileutils' module from oslo-incubator * Sync 'eventlet_backdoor' module from oslo-incubator * Remove unused 'test' module from oslo-incubator * IBM GPFS Consistency Group Implementation * Fixed 3PAR driver load balancing during migration * NetApp E-series: Allow scheduling by disk * Make Interval and Retries Configurable for VMAX * Pass region name to Nova client * Remove retry_deactivation directive on lvremove * Manage/unmanage impl for NetApp ONTAP iscsi driver * Fix argument order in assertEqual: tests/test_service.py * Fix some message nits in the ZoneManager * Implement refresh as kwargs in get_volume_stats * Dell sc driver iscsi multipath enhancement * Tests: Fix cxt target tests opening files * LVM: Fix thin provisioning and mirrors stats reporting * Fix exception error on HNAS drivers * Fix comments style according to the Hacking Rules * Passing privileged user to create nova assisted snapshots * Fix return value inconsistency in VNX Driver * Fixed typo * Pool-aware scheduler support in EMC VNX Cinder driver * Fix extraneous comma that breaks docbook generation * Sync policy module from oslo-incubator * Dell Storage Center: Add retries to API calls * EQLX: Consolidate CHAP config options * Add support for chiscsi iscsi helper * Fix logging guideline violations in volume/api.py * Remove useless requirement on wsgiref * Snapshot of bootable volume goes in error state * Sync periodic_task module from oslo-incubator * Tests: Don't require binding to port 4444 * Tests: Remove TestWSGIService.test_reset_pool_size_to_default * Tests: Remove randomness from NFS mount tests * Change exception message in volume api * Refactoring for export functions in Target object * Add iSCSI SCST Target support to cinder * EMC VMAX driver Kilo update * Fix Scality SRB driver security concerns * Fixes total_capacity_gb value in CloudByte driver * EMC VNX Cinder Driver iSCSI multipath enhancement * Add dedup provisioning to 3PAR drivers * Provided requirements are meant to be immutable * Remove optional parameter from lun mapping call * quobyte: remove dependency to xattr * Don't fail target_delete if target doesn't exist * Remove custom lazy loading * DRBD: Use correct function object after DBus disconnect * Split volume driver into ABC classes * Mock out the wait routine in the VMAX driver * Limit ram and disk used by ceph backup tests * Fix detach volume from host problem in VMAX driver * fix typo in config.py * Update hacking ignore list * VMware: Delay string interpolation in log messages * VMware: Integrate VMDK driver with oslo.vmware * Enhance iSCSI multipath support * Dell Storage Center Unit Test Updates for Kilo * Updated from global requirements * Update eqlx driver help text * Add extra library oslo.concurrency to oslo.config.generator.rc 2015.1.0b2 ---------- * Support over subscription in thin provisioning * Change oslo.* to oslo_* * Lefthand driver fails to attach a cloned volume * Purge deleted rows * Make PureISCSIDriver iSCSI port discovery more flexible * EMC VNX Cinder Driver Update * Make storwize debug log more readable * Fixes the EQL driver CI tests AttributeError * Add manage/unmanage methods for Hitachi Block Storage Driver * RemoteFS: Use nas_ip and nas_share_path options * Scality SOFS : Use ensure_tree from fileutils * Tests: Don't sleep for looping calls (eqlx) * Enable use of an /etc/cinder/lvm.conf file * Roll back if VMAX masking view not created * Tests: Don't sleep for looping calls * Windows iSCSI driver: Fixes copy disk method exception handling * VMware: Fix missing target resource pool * Revert "Implement Huawei SDSHypervisor driver" * Remove the solaris volume driver * Fix SSHPoolTestCase to work in parallel * Drop deprecated namespace for oslo.rootwrap * Fixes attribute content checking * Imported Translations from Transifex * Support iSER driver within the ISCSITarget flow * HP3Par: Set snapCPG when managing existing volumes * Fixed misspelling in solidfire.py * Adds unit tests for HNAS backend * Failed to discovery when iscsi multipath and CHAP both enabled * Add retry for tgtadm update when tgt exists * Add completion logging for snapshots and volumes * Fix configratuion of rally jobs * Create SolidFire Template account on init * Updated from global requirements * Add debug message for lvremove after udev settle * IBM Storwize driver Consistency Group Implementation * Use get_my_ipv4 from oslo.utils * TgtAdm: Fix _recreate_backing_lun logging * Revert "Create SolidFire Template account on init" * HP 3PAR modules have bad log messages * Remove useless and unused request_utils * Create SolidFire Template account on init * Fetch_to_volume_format calls copy_volume using wrong parameter * Changed pvs separator from ':' to '|' to support names with ':' * Raise correct exception when validate_connector failed * Add provisioned_capacity * Move 3 Fujitsu ETERNUS DX related file * Add retry to lvm snapshot create * Add a generic retry decorator to cinder/utils * Use uuidutils from oslo.utils * Remove unnecessary method: _ensure_iscsi_targets() in tgt.py * Raise correct exception if deleting of LIO iSCSI target is failed * Cleanup unused DB APIs, part I * Remove argparse from requirements * Update tests for Quobyte Cinder drv from mox->mock * Fixes a small issue in find_autodoc_modules.sh * Fix the eqlx driver to retry on ssh timeout * Add retrying lib from global requirements * Remove usage of taskflow 'utils.misc' module * Move oslo.serialization to oslo_serialization namespace * HP 3PAR modules do not follow coding guidelines * Improve debug logging of Dell Storage Center driver * Fix _usage_from_snapshot in volume.utils * VMware:Fix error creating vCenter inventory folder * New Cinder volume driver for openvstorage * Fix cinder-manage shell ipython * Shrink down customized logging listener * Prevent deleting volumes in a consistency group * Fix bug in rbd driver: the cloned volume size is wrong * Fix HNAS driver confusing error message (iSCSI driver) * Updated from global requirements * Ensure lazy translation is disabled properly * DRBD: remove a wrong comma, it creates a tuple * Move oslo.utils to oslo_utils namespace * Make test_create_delete_snapshot more robust * Add policy_dirs conf fixture * DRBD: Log an error if libraries can't be loaded * Fix the iSER legacy usage in the new targets flow * Move oslo.config to oslo_config namespace * Add support for manage/unmanage volume commands to PureISCSIDriver * Scality: Lock around SOFS mount to avoid a race * Set 'driver_volume_type' to 'gpfs' * Verify the instance's existance in the VMAX driver * Updated from global requirements * Switch the PureISCSIDriver over to using the purestorage pypi module * Fix zfssa driver volume attach to work with latest zfssa software * Updated from global requirements * Move oslo.db to oslo_db namespace * Fix eqlx endless loop when server closes the connection * Increase unit test coverage in hacking test * Fixed server name being retained after detach in LeftHand * Fixes misspelled words in Cinder * Imported Translations from Transifex * Add mock for cinder-rtstool call in tests.targets.test_lio_driver * Skip LIO target unit tests until mocked * Fix LOG formatting in api initialize_connection * TgtAdm: Don't change CHAP username/password on live migration * Deal with PEP-0476 certificate chaining checking * Add hacking check for oslo namespace usage * Remove locks from LeftHand driver * Fix bug in tgt conf for volume * Use is_valid_ipv6 from oslo.utils * Use lockutils.set_defaults to set lock_path in test * Fix bug in sheepdog driver: the wrong volume size * Add loopingcalls for Huawei storage system driver * Implement clone_image caching on SolidFire * Add migration tests for PostgreSQL * Garbage Remains when Attached Volume is Migrated with NFS Driver * Update README.rst to current state * Remove unused variables from ensure_export() * Fix incorrect usage of get_flow in volume.manager * Fix iscsi_write_cache setting for iscsi targets * Add debug messaging for tgt already exists * Clean up QoSSpecManageApiTest setup * Add more rally benchmarks related to Cinder * Use cinder.utils.execute directly * Deal with tgt already exists errors * Fix drbd driver to load without 3'rd party libs * i18n fixes for PureISCSIDriver * cinder-rtstool: should use acl.node_wwn * LVM: Add terminate_connection call for Target Objects * Add an instance-locality filter * Adds cinder iscsi driver for CloudByte storage * Add driver filter and evaluator for scheduler * Remove import of private _lazy module * Fix argument order in assertEqual: tests/test_nfs.py * Fix the continuation line indent to pass flake8 * Capitalize the first letter in log messages * Fix argument order in assertEqual: tests/test_glusterfs.py * Use assertRaisesRegexp() in test_srb.py * The DRBD(manage) Cinder volume driver * Make ProphetStor drivers compliant with logging standards * Transition LVM Driver to use Target Objects * Replace oslo-incubator with oslo_context * Create proxy volume driver * Fix handling of serialized data in filtering of volumes * Convert mox to mock: tests/test_glusterfs.py * Remove check on db_exc.DBError * Add specific docs build option to tox * Imported Translations from Transifex * Add a privileged user for OpenStack services * Add support to PureISCSIDriver for Consistency Groups * Expand the description of extra_capabilities * Fix broken StorPool driver * Brick LVM: Remove self.lv_list * Revert "Outputs the message about failing to bind * Replace the NetApp driver proxy layer with a proper factory * Quobyte Driver Exception Cleanup * Handle the volume not found case in the VMAX driver * Fix format errors in brick/iscsi LOG messages * Add unit tests for NetApp do_setup methods * Outputs the message about failing to bind to IPv6 * NetApp E-series: Do not log passwords in requests * Set iet_conf to nonexistent file in unit test * Fix issue with passing lists in filters * Rename oslo.concurrency to oslo_concurrency * Add a provider_id column to Volumes and Snapshots * Mock leaked _execute() calls in driver tests * Sync request_utils module from oslo-incubator * Sync periodic_task module from oslo-incubator * Persist volume uuid on VMAX array * Fixed a problem in terminate_connection in VMAX driver * Sync the latest middleware module from oslo-incubator * LVM: Volume is deleted unexpectedly during volume migration * RBD: use image_conversion_dir for image operations * Sync the latest loopingcall module from oslo-incubator * Sync install_venv_common from oslo-incubator * Sync latest imageutils from oslo-incubator * rtstool on Ubuntu installs in /usr/local/bin * encryption_id needs to be non-nullable * Mock calls to rpm and dpkg from NetApp unit tests * Fix files in Cinder with execute bit set * Add error handling to _connect function in PureISCSIDriver * Fix typo that escaped review in connector.py * Fix 3PAR host persona mapping to match WSAPI * Punctuation and Copyright changes * Make 3PAR drivers compliant with logging standards * Fixing 3PAR connection name cache error * Remove redundant args for clone_image method * Add Oracle ZFSSA NFS Cinder Driver Support * Fix HNAS driver initialization 2015.1.0b1 ---------- * Make GPFS driver compliant with logging standards * Updated from global requirements * Fixed wait for job completion in VMAX driver * Logging updates to properly use ',' instead of '%' * Add support for Purity Protection Groups to PureISCSIDriver * Catch ImageNotFound exception when deleting rbd volume * Isolate Cinder Attach and Connect in Base Driver * Uncouple scheduler stats from volume creation * Fibrechannel and iSCSI for Violin Memory 6000 Series Arrays * Add Scality SRB driver * Update volume driver for Huawei storage system * Implement Huawei SDSHypervisor driver * Implement Huawei SDSHypervisor connector * Added volume type description for volume type API * Added UUID as primary key for Encryption model * Fix 3PAR driver hang on SSH calls * Delete default volume size 100M in drivers * Send the notifications to the Ceilometer for backup service * Add the StorPool block storage driver * Update global requirements * Remove commented out code from cinder/test.py * Fix HNAS driver confusing error message * Remove iscsi_helper calls from base iscsi driver * Add unit test for commit 22abe9081 * Add Support for Dell Storage Center * Ensure that lun_id is an int for NetApp Drivers * Symantec NFS cinder driver * DB migration tests * Convert mox to mock: tests/compute/test_nova.py * Correct default service_name for nova_catalog*_info config option * FlashSystem Code Cleanup * FibreChannel drivers for NetApp Data ONTAP storage controllers * First version of Cinder driver for Quobyte USP * Fix use of invalid variable in tgt exists check * Remove an unused variable in volume/manager.py * Brick: fix bug in tgt conf for volume * Convert test_image_utils tests to mock * Report better capacity info for a limitless 3par cpg * VMware: Fix datastore selection with single host * Add support for backup encryption metadata * Improve use of temporary_file and temporary_dir * RemoteFS: Move Nova snapshot code into RemoteFSSnapDriver * Implementing the use of _L’x’/i18n markers * Fixes intermittent NFS driver mount failure * Updated from global requirements * Use pbr entry_points to setup the cinder scripts * ZFSSA iSCSI driver should support extra specs * Remove the cinder.conf.sample file * Fix for typo in Purity Host create/delete methods in PureISCSIDriver * Fix a clone volume problem in VMAX driver * Updated from global requirements * Fix 3PAR driver attach error when host name missing * NetApp fix vol migration unusability * Updated from global requirements * Allow HostState to handle empty capabilities * Inherit RequestContext from oslo * Imported Translations from Transifex * Workflow documentation is now in infra-manual * Remove the check_uptodate conf checks * Improve unit tests for cinder/volume/utils.py * Remove lio_initiator_iqns * Bring cinder up-to-date with new oslo libraries * VMware: Add missing storage profile requirement * Use object.property instead of object.dump()['property'] * NetApp 7mode NFS driver doesn't honor netapp_vfiler option * Revert "Fix Brocade FC SAN lookup MITM vulnerability" * Add ability to zfssa driver to create multiple initiator groups * Improve testing of cinder/utils.py * Fix rpc initialization of cinder-manager volume * Fix 3PAR drivers attempt to locate existing host * Volume type access extension * Remove driver compatibility in volume manager * Don't use _execute directly in brick/iscsi * Deal with tgt already exists errors * Fix find_autodoc_modules.sh to support OSX * Raise exception if invalid IP is specified * Fix check_ssh_injection in cinder/utils * Fix _get_disk_of_partition edgecase in utils * Adding volume driver for X-IO ISE * Remove Python 2.6 backwards compatibility code * Imported Translations from Transifex * Get the 'consumer' in a correct way for retyping with qos-specs * PureISCSIDriver:Handle delete called on already deleted volume * Add limited retype support for rbd * Add iSCSI Target objects as independent objects * Remove Python 2.6 classifier * Implementing the use of _L’x’/i18n markers * Match mock.patch decorator with appropriate param * Correct misspelled words * Brick LVM: LV not found logging and error handling * etc: replace NullHandler by Python one * Don't use module importutils from oslo-incubator * Removing locks from 3PAR FC and iSCSI drivers * Update rally job files * Fix calls to assert_called_once in unit tests * Refactoring to allow addition of NetApp FibreChannel drivers * Add ability to create volume from image by image name * Fix exception message formatting * VMware: Set target ESX host for backing VM clone * Create "image_conversion_dir" before creating temporary file * Convert the DateTime into ISO8601 format for Ceilometer * Imported Translations from Transifex * Remove module timeutils * NetApp NFS and iSCSI: move zapi client logic into modules * Context cleanup * ProphetStor with pool aware cinder scheduler * Updated from global requirements * Imported Translations from Transifex * Fix messages in EMC VMAX driver with no translation * Scality SOFS: implement volume backup and restore * Fixup regressions in PureISCSIDriver log statements * Implementing the use of _L’x’/i18n markers * Remove module jsonutils * Sync policy from oslo-incubator * Don't use module excutils from oslo-incubator * Sync latest versionutils from oslo-incubator * GlusterFS: Lock on a per-volume basis * Defining the variable "tmp" before try block * PureISCSIDriver needs to disconnect hosts before deleting volumes * context.elevated() should use copy.deepcopy() * Added missing rules in policy.json * Fix message translations for MSA common class * Switch Cinder to use oslo.concurrency * Use oslo.utils * Remove code for deprecated extension path * Imported Translations from Transifex * Update prerequisite packages in development docs * Change CHAP secret default length * Implementing the use of _L’x’/i18n markers * Switch to oslo.serialization * Fix typo in SolidFire xDBVersionMismatch label * Fix a problem in creating consistency group in ProphetStor driver * Updated from global requirements * Disable Cgsnapshot APIs by default * Invalid GlusterFS share format error * allow image_id for imageRef in create volume API v2 * Changing PureISCSIDriver to use % string formatting instead of .format * Update cinder.conf.sample to fix max db conn retries * CiscoFCSanLookupService passing command as string * Documentation Bug fix committed * Add i18n _LX tags for relevant log levels in EQLX driver * Bump Req timeout to 30 seconds in SolidFire Driver * Remove cinder/compute/aggregate_states.py * Remove deprecation warnings relating to api-paste * Mock isfile in test_ssh_missing_hosts_key_file * Implementing the use of _L’x’/i18n markers * Scality driver:use self.configuration instead of CONF * Mock cinder.wsgi.Server in TestWSGIService * Explicitly close requests obj in SolidFire Driver * Remove Mock class monkey patching * Add volume attribute support to volume created using clone, snapshot * Stop stacktracing on QuotaErrors * Stop stacktracing on InvalidInput exceptions * Add automatic creation and deletion of Purity hosts for PureISCSIDriver * Mox -> Mock for test_block_device.py * Fix Brocade FC SAN lookup MITM vulnerability * Implementing the use of _L’x’/i18n markers * Imported Translations from Transifex * Updated from global requirements * Fix the LV NotFound situation for thin-type LVM * Fix wrapper to work with barbicanclient 3.0.1 * Retry remove iscsi target * Adding support for 'source-id' in 3PAR manage * Remove test_barbican from keymgr tests * Implementing the use of _L’x’/i18n markers * Capture exception when delete a volume detached * Add cinder support for IBM FlashSystem * Use urllib.urlencode instead of dict_to_query_str * Disable python-barbicanclient 3.0.0 version * Activate pep8 check that _ is imported * LIO: Fix UnboundLocalError in ensure_export * Amend unused variables to assist pylint testing * Brick LVM: Rename get_all_volumes, further optimize * Fix wrong arg number for _fix_id_migration issue * Cleanly override config in tests * Add debug output indicating provider_location * Use look up service for auto zoning * Fix for debugging c-vol in PyCharm * CiscoFCSanLookupSerive uses extra argument in init * Fix SolidFire inaccurate model on migrated vols * Eventlet green threads not released back to pool * Add ability to update migration info on backend * Reserve 5 migrations for backports * Verify the full interface of the context object * IBM Storwize: Improve error message * Imported Translations from Transifex * LioAdm: Delete initiator from targets on terminate_connection * NFS Security Enhancements: allows secure NFS environment setup * Brick LVM: Optimize get_volume * TgtAdm: Don't change CHAP username/password on live migration * Update volume-type's quota when extending volume * Cinder api service doesn't handle SIGHUP properly * Handle DBConnectionError instead of Exception * Remove outdated _ as a builting from pylintrc * ProphetStor driver consistency group support * Turn on Flake-8 Complexity Checking * Log a warning when getting lvs and vgs takes longer than 60 seconds * Add client_socket_timeout option * IBM Storwize driver: Add local variable assignment to "ctxt" * Updated from global requirements * Multipath commands with error messages in stdout fail to parse * NetApp fix to set non default server port in api * Correct the message string 2014.2 ------ * Fix LVM iSCSI driver tgtadm CHAP authentication * Export cinder volumes only if the status is 'in-use' * Fix LVM iSCSI driver tgtadm CHAP authentication * Export cinder volumes only if the status is 'in-use' * Revert "Relocate volume to compliant datastore" * Remove vol_type_id cast to str * Move SolidFire driver from httplib to requests * check the configuration item glance_num_retries * VMware: Fix initialization of datastore selector * Imported Translations from Transifex * Fix exception handling on test_delete_nonexistent_volume * check the configuration eqlx_cli_max_retries * Revert "Relocate volume to compliant datastore" * Remove deprecated use of gettextutils import _ * Fix NetApp AutoSupport Shortcomings * HP 3PAR: Don't ignore extra-specs snap_cpg when missing cpg * 3PAR migrate without losing type settings * 3PAR with pool-aware-cinder-scheduler * Fix display name change during backup restore * gitignore /.* * Fixes docstring typos (Cinder) 2014.2.rc2 ---------- * Remove useless sslutils from openstack.common * Truncate fail_reason to column length * Fix eqlx CLI output parsing on bad input * Eqlx fix NoSuchOptError for volume_name_template on clone * VMware: Bump driver version * Updated translations * NetApp fix eseries unit test mock clean * Make sure device support Direct before setting * Make sure device support Direct before setting * Eseries warn if multipath is not set for img xfer * GlusterFS: Remove unneeded conf import * ZFSSA iSCSI vol create fails with vol type option * Handle eqlx SSH connection close on abort * ZFSSA iSCSI driver cannot add multple initiators to a group * Fix race condition in ISCSIConnector _disconnect_volume_multipath_iscsi * Deprecate / obsolete NetApp volume extra specs * IBM Storwize driver: Retype the volume with correct empty QoS * Fixed Typo from modfied to modified * Updated from global requirements * Sync latest processutils from oslo-incubator * Imported Translations from Transifex * Updated from global requirements * coraid: allow setting default repository * Sync latest processutils from oslo-incubator * Windows SMBFS: Handle volume_name in _qemu_img_info * Refuse invalid qcow2 backing files * Windows SMBFS: Handle volume_name in _qemu_img_info * Refuse invalid qcow2 backing files * Clarify InvalidInput exception when the size is missing * Handle eqlx SSH connection close on abort * Deprecate / obsolete NetApp volume extra specs * Fix race condition in ISCSIConnector _disconnect_volume_multipath_iscsi * ZFSSA iSCSI driver cannot add multple initiators to a group * ZFSSA iSCSI vol create fails with vol type option * Open Kilo development 2014.2.rc1 ---------- * Fix race condition in ISCSIConnector disconnect_volume * Adds openSUSE support for developer documentation * IBM Storwize driver: Retype the volume with correct empty QoS * VMware:Unquote folder name for folder exists check * VMware: cinder-volume create_session fail at retry * Fixing format for log messages * Update /etc/cinder/cinder.conf.sample for memcache * VMware: Relocate volume to compliant datastore * Fix parameter miss in test_snapshot_metadata test case * Failed to re-detach volume when volume detached * Imported Translations from Transifex * IBM Storwize:Failed to retype from non-type to replication enable * Fix unnecessary WSGI worker warning at API startup * Remove XenAPI driver * Add required spaces in log messages * Fix ssh_host_key_file default in help and config.sample.conf * Downgrade 'infinite' and 'unknown' capacity in weigher * Remove unused py33 tox env * Add unit test to cinder cgsnapshot api * DB migration 25->24 failed when dropping column * Allow scheduler pool information to be retrieved * Increase the 3PAR hostname length * Timeout triggers failures running tempest for ZFSSA driver * NetApp fix for default host type in eseries * HP 3PAR drivers should not claim to have 'infinite' space * Add tests for consistency groups DB migration * Verify requested size in volume.api create * Typo "asscoiated" should be "associated" * NetApp fix eseries unit test mock clean * Updated from global requirements * Set socket options in correct way * HP 3PAR: Allow retype when the old snapshot CPG (3PAR pool) is None * NetApp fix for controller preferred path * VMware: Add storage profile related unit tests * Check replication status failed for non-replication * VMware: Implement retype for VMDK driver * VMware: Improve datastore selection logic * Sync latest strutils from oslo-incubator for mask_password fix * Remove executable bits on various files * Fix a problem with 'volume list' when 'all_tenants=0' * IBMNAS: Remove call to set r/w permissions to all * Updated from global requirements * Getting iscsi_ip_address from cinder.conf * Handle config file with newlines and whitespaces * Volume types need to be specified when creating CG * Stop using intersphinx * Netapp drivers support for pool-aware scheduling * coraid: fix snapshot deletion * SQL scripts should not manage transactions * Add reset-state function for backups * Add test case for volume_types.py * Block sqlalchemy-migrate 0.9.2 * Destroy Datera export target after detach * EMC VNX Direct Driver Consistency Group support * Update oslo.config and oslo.messaging requirements * Fixes Windows Volume Driver upload volume fails * Log an error on nfs mount failure * Sync service.py and its dependencies to Cinder * HP 3PAR configurable ssh-host-key-policy * Fix confusing exception message in NetApp iscsi driver * Delete consistency group failed * Fixing leaking sessions in 3PAR on attach/detach * Add Windows SMB Volume Driver * Netapp: fix multiple copies of cinder-volume * Add SMB Volume Driver * Fix possible race condition for accept transfer * Imported Translations from Transifex * Mock glance client object in version unit tests * Revert iSCSI Target objects as independent objects * Use right body for test_create_missing_specs_name * remove object in wsgi LOG.info * Don't clear _mounted_shares list in remoteFS while updating * Some tcp configuration paramters are ignored * Add filter to volume transfer REST api * Fix help for running specified unit tests * Deprecate the V1 API * Set default pool value to system in gpfs driver * Fixes Cinder fails to upload volume to vhd image * Unit test for restore with different hosts 2014.2.b3 --------- * During a restore send the restore request to the right host * Add Datera driver for Cinder * warn against sorting requirements * VMware: Remove redundant extend disk API call * VMware: Implement backup/restore for VMDK driver * Update the HP 3PAR default persona * Fixed Typo - from hypens to hyphens * Fixed typo from 'the the' to 'the' * Fix running unit tests with coverage * Support Volume Backup Quota * Volume Replication implementation for IBM Storwize/SVC * Add Fujitsu ETERNUS DX support * Pool-aware Scheduler Support * Small typos * Add QoS support to IBM Storwize driver * Fix unnecessary snap of glance image, with non-raw images * Driver for Fusion-io ioControl Hybrid array * Make ssh-host-key-policy configurable * Add Cisco FC Zoning plugin to the FC ZoneManager * Typo * Ignore pylint error 'hashlib' has no shaxxx member * Update oslo policy and its dependencies * Avoid using the disk cache on volume initialisation * Introduce Hitachi storage volume driver * XtremIO cinder iSCSI & FC volume drivers for Juno * Consistency Groups * Add retype method to xiv/ds8k driver interface * Fixes terminate_connection live migration issue * Fixing 3PAR excessive FC port usage * Sync latest processutils from oslo-incubator * Sync latest strutils from oslo-incubator * Mock processutils.execute properly in test_ibmnas * VMware: Disable suds caching * Adds volume replication methods to xiv/ds8k driver interface * Pass an empty context to the notifier * Add Oracle ZFS Storage Appliance ISCSI Driver * Add support in Cinder for volume replication - driver approach * EMC VMAX Driver Juno Update * Fix duplicate teardown to allow tox upgrade * Revert test_rootwrap_filter to avoid python2.6 test failure * Improve Cinder API internal cache interface * Allow backup-to-swift to take swift URL from service catalogue * Integrate OSprofiler and Cinder * Fix variable name in api/v/snapshot.py * Honor volume:get policy * Extending IBMNAS driver to support NFS based GPFS storage system * GlusterFS: Use image_utils for tempfile creation * Modify error code compatible with Mac OS * Cache snapshots in request for extension * Remove redundant temporary_chown from IetAdm * Failed to initialize connection * Mock out image source file in image_utils tests * Provide a quick way to run flake8 * Ignore No value passed for parameter 'dml' message * Create RemoteFSSnapDriver class * VMware: Handle exceptions raised by image update * Adds barbican keymgr wrapper * Imported Translations from Transifex * Catch vol not found in SolidFire transfer * Fix LOG string formatting in image_utils * Change the froce delete volume flage to True * Update ref used for notifications * HP 3PAR manage_existing with volume-type support * Add iSCSI Target objects as independent objects * Rewrite ionice command filter using ChainingRegExpFilter * Use abstract class for the backup driver interface * Put result in quotes * Fix exception handling in PureISCSIDriver * Catch DBDuplicateEntry instead of IntegrityError * Enable import group hacking rule * Actually encode the SolidFire json dump result * Sync latest oslo-incubator log for debug fixes * Enable F402 checks and fix violations * Prevent tenant viewing volumes owed by another * VMware: Check snapshot and rename backing * Fix bad indentation in netapp and san.hp volume drivers * Ignore HTTP_PROXY during test requests * Issue one SQL statement per execute() call * Add ProphetStor DPL Storage server volume driver for Cinder * Add timer info for copy operations * Make manage.py usable * Enable H104, F841 hacking rule and fix violations * Adds CA cert file path option for glance connections * Enable Swift backup driver for auth 2.0 * Updated HACKING.rst so that it is accurate * Update help strings * Add hacking check for use of LOG.audit * Imported Translations from Transifex * Use oslo.i18n * Add CHAP support for 3PAR ISCSI * EMC: Fix minor issue in VNX driver and unit tests * fix a small typo in development.environment.rst * Do not translate debug messages * Fixing LeftHand live migration error * Improve regex for _ import hacking check * General cleanup of unused objects * RPC client lazy initialization * Fix snapshot id for snapshot_destroy * Use auth_token from keystonemiddleware * Fixes wrong usage of mock.assert_not_called() * Fix error log level in restore-backup routine * Add retry_on_deadlock to db update methods * Fix unit test test_import_record_with_verify * Change the exception type for Storwize/SVC driver * VMware: Update default task_poll_interval value * Change logging level AUDIT to INFO * Fix solidfire accept_transfer * VMware: Volume from non-streamOptimized image * Enable checks for E711, E712 and E713 * Add note that some checks are disabled on purpose * VMware:Disk type conversion during clone backing * VMware:Support for attaching disk to backing * Change 3PAR delete message when volume is busy * Move generate_password into volume utils * Move SSHPool into ssh_utils.py * Fixes migrate_volume_completion * Change corrupted spelling mistakes * EMC VNX Direct Driver Update for Juno * Storwize/SVC can not get the right host * Skip incompatible test on OSX * Have task/flow logging go to a separate log file * fix atom link in XML Version API * Update ref used for notifications * Fix glance metadata SQL query performance * Add return of updated object on update from DB * fixing the iSER transport protocol when using LVMISERDriver * Add hacking check for vim headers * Get updated model info on volume transfer * Introduce iSCSI driver for Pure Storage FlashArray * Further cleanup of reservations index * Sync log from oslo-incubator for isEnabledFor fix * Modify the index migration slightly for backport * Remove cinder-clear-rabbit-queues * Remove cinder-rpc-zmq-receiver * Remove reattach function in cinder-manage * Set python hash seed to 0 in tox.ini * HP 3PAR retype implementation * Add index for reservations on (deleted, expire) * Remove Hyper-V dependency in the Windows Cinder Volume Driver * Fix no handlers could be found issue * Add storageHost content to volume messages * Add hacking check for explicit import of _ * Make manage/unmanage admin only * Avoid OSError in get_blkdev_major_minor with network filesystems * VMware:Support for create disk and disk descriptor * Implement import/export for SolidFire Driver 2014.2.b2 --------- * Implements new 'bootable' option for manage existing volume * Add hacking test * Fixes Cinder volume upload to image on windows * Add explicit import of _ to hp_3par_fc and iscsi * Adds storwize_svc_npiv_compatibility_mode flag to Storwize/SVC driver * Switch to use oslo.db * Add additional explicit imports of _ where needed * Fix failure of source volume deletion in migrate_volume_completion * Remove hard coded reference from gettextutils.py * Enable lazy translation for Cinder * Explicitly import _() in Cinder code * Fix performance issues with brocade zone driver * Don't leave snapshots on the floor * Add some log info for NoValidHost * Use immutable default values for args * Update cinder generate_sample script * XIV volume manage/unmanage support * Add affinity/anti-affinity filters * Bump oslo.rootwrap to 1.3.0.0a1 for Cinder * Mock out time.sleep in storwize unit tests * Fix the section name in CONTRIBUTING.rst * Cinder-api service throws error on SIGHUP signal * Clean up base Volume Driver * Fixes EqualLogic volume live migration * Correct misspelled word * Remove definition of Python Source Code Encodings * Fixed some typos in the cinder codebase * Sync gettextutils.py from oslo-incubator * Use PyCrypto to generate randomness passwords * Remove $sqlite_db from default database connection * Sync processutils and log from oslo * Configure write cache option of tgtd iscsi driver * Enhance docstring for iscsi_helper * Updated from global requirements * Ensure FC ZoneManager is called * Remove cinder.context warning logging * sync periodic_task fix from incubator * Slow down Storwize driver initialization * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * volume_image_metadata missing from volume list * Correct lvm_mirrors help message * Ceph rbd volume manage/unmanage support * Enable E121,E122,E123,E126,E128 hacking rules * Replace tearDown with addCleanup - Part 3 * Fix begin_detach logic * Use (# of CPUs) osapi_volume_workers by default * Restore osapi_volume_workers config option * Fix host option isn't set when using multiple backend * Add optional_args to fix Volume Import failure * 3PAR Only remove FC Zone on last volume detach * Ensure rbd connect exception is properly caught * Add cinder-manage cmd to update host column * Add cinder volume driver support for Nimble Storage * Sync processutils from oslo with deps * Synced jsonutils from oslo-incubator * Enable hacking rule E111,E112,E113 * Bump minimum hacking version to 0.9.2 * Only warn about deprecation warnings once * Fix dropped exception for create_export in vol manager * Misuse of i18n log translation to output error message * Support Volume Num Weighter * Fix docstring for snapshots API * Don't use ModelBase.save() inside of transaction * Fix unsaved exception in backup/drivers * 3PAR volume manage/unmanage support * Add cgroups related commands to rootwrap filters * Use a task subclass instead of a functor + task wrapper * Fix BrcdFCSANlookupService to iterate multiple switches * GlusterFS: Handle deletion of snapshot with no backing file * Fixed data copy issue of volume/driver.py * Make rbd driver string encoding checks consistent * Remove check_volume_az_zone functor and associated passing * Minor cleanups in test_volume * Fix retyping attached volumes requiring migration * Update _resize_volume_file() to support appropriate permissions * test_storwize_vdisk_copy_ops fails if green thread context switch * VMware:Fix params for copy-image-to-volume * VMware: Optional create backing parameters * Fixes cinder volume from snapshot on Windows * Fixes cinder volume create on Windows Server 2012 R2 * Fixes cinder volume from image on Windows * Use oslo-incubator module units * Attach log listeners to other engines * Adding filter options to backup list * Remove global conf settings from iscsi helper * Add genconfig tox job for sample config file generation * Fix nfs_shares config file parsing of spaces * GlusterFS: Various unit test improvements * vmware: Force chunked transfer for upload-to-image * Sync the latest common db code from oslo * Fix order dependency of admin metadata * GlusterFS tests: Mock out compute, don't load novaclient * Updated from global requirements * debug level logs should not be translated * Implement extend volume in NFS driver * Fixes an issue with 'dd' bug from Illumos repo * Handle the case where az is disabled/removed * I/O rate limit for volume copy with qemu-img convert * I/O rate limit for volume copy with dd * glusterfs: Honor mount options when restarting cinder service * Add keyword argument missing at some exc.HTTPError subclass * Made provision for providing optional arguments * Removes unecessary call to rbd.Image * Add task/flow listener support around the volume api flow * Retry lvremove with ignore_suspended_devices * Allow reset-state on attach and migration fields * Implements HDS-Cinder HNAS Drivers * vmware: Fixes VMDK volume incompatibility issue * Remove unused oslo module and adjust opentstack-common.conf 2014.2.b1 --------- * Copy custom properties to image from volume * Add strip size support to rbd driver * Fix log capturing fixture * Fix Brocade FC driver's use of the fc_fabric_names * LIO: Don't add ACL for local initiator name * Delete image on upload-to-image failure * Ensure flushing of IO prior to removing FC device * Fixed the comment spelling error - voumes to volumes * Remove Quota Exception Stack Traces from Cinder Logs * Use os.urandom in volume transfer * Remove check_{attach,detach} from volumes API * Make begin_detaching fail if volume not "in-use" * hp_lefthand_rest_proxy no handler for logger during tests * 3PAR volume detach with host in a host set * Update cinder.conf * Sync periodic_task from oslo-incubator * Remove second get call to list/show volumes * Fix a message format error in migration cleanup * Add support for z/VM driver * Handle volumes no longer existing in resume delete * Fix ISER scan retry option * Only create volume with an active image * Updated from global requirements * Ensure metadata is saved before updating volume status * Add XML deserializer for qos_manage delete_keys API * Use error instead of warning to log mount exc * Allow host config to be overriden in backend * Remove all mostly untranslated PO files * Updated from global requirements * Remove create_from* functor jump table * SSHPool in utils should allow customized host key missing policy * Check whether O_DIRECT is supported to iflag and oflag separately * Set volume usage audit period to not NoneType * BrcdFCSanLookupService should allow customize host key and policy * NetApp fix eseries concurrent vol map failure * NetApp fix attach fail for already mapped volume * Imported Translations from Transifex * Convert SolidFire Capacity response to GiB * eliminate the need for hplefthandclient in tests * Fix solaris_execute in SolarisISCSIDriver * Fix for solidfire driver to use reserved_percentage * Fix retyping volume that has volume type None * eliminate the need for hp3parclient in tests * Add missing methods to FakeISCSIDriver * Add mailmap entry * Fix wrong exception reference * Limit formatting routes when adding resources * Use oslo network utils function to set tcp_keepalive * Properly initialize rpc in cinder-volume-usage-audit * Add exception handling for copy_volume_to_image() * NetApp NFS: Do not reference dst_img_local before assignment * Remove explicit dependency on amqplib * Fixes an issue with 3PAR attach * Ensure that lun_id is an int * Implement validate_connector in FibreChannelDriver * Fix broken version responses * Fix double "the" in Cinder quota warning * CinderException args to strings when exceptions * Fixed 3PAR driver issue finding correct vlun * Storwize/SVC driver detach volume failed * Add disabled kwarg to service_get_all_by_topic * Add rally job * Improve consistency of help strings * Remove unused volume instance_uuid methods * Cinder list does not filter admin metadata * Specify lld in tgt config backends * Replace tearDown with addCleanup - Part 2 * Keep volume available if retype fails due to quota * Remove unused 3PAR driver method * Fix bad indentation in tests * Add set-bootable command * Fix handling multiple WWPNs on preferred FC node * Fallback to None on missing Glance image attrs * Remove old driver mappings from Havana * Adjust sample config for keystoneclient 0.8.0 release * Remove unused reservation methods from db.api * Re-raise exceptions in upload-to-image * Update Cinder dev doc * vmware: Fix problems with VIM API retry logic * Create volume fail when image id is "" * Use cached db object in volume_mig_status ext * Add exception catch if Storwize/SVC driver failed when retyping * Replace tearDown with addCleanup - Part 5 * Replace tearDown with addCleanup - Part 4 * Enable flake8 H303,H304 checking * Storwize/SVC driver crashes when check volume copy status * Switch over to FixedIntervalLoopingCall * Correct metadata ordering issue in tests * driver.create/remove_export() require elevated context * Inform about instance_uuid in volume usage notification * Check for silent failure of tgtadm remove * GlusterFS: Delete active snapshot file on volume delete * Fixes HP LeftHand driver with Paramiko 1.13.0 * Fixes cinder error state volume delete on Windows * Added unit test cases for _is_share_eligible in NFS driver * Log initialize_connection error before remove_export * Force detach should only be an admin api * Updated from global requirements * Change iogrp property when retyping for Storwize/SVC * Check that all po/pot files are valid * Allow deprecated volume update keys in v2 * _translate_from_glance() can cause an unnecessary HTTP request * Adds ionice command permutations to rootwrap filters * Append nas_opts to IBMNAS_NFSDriver configuration * Enable flake8 F841 checking * GET details REST API next link missing 'details' * GlusterFS: Delete volume-.info file when volume is deleted * Fix Jenkins translation jobs * Fixes HostTestCase failures due to slow test run * Imported Translations from Transifex * Updated from global requirements * Fixes cinder volume delete on Windows * Fixes cinder volume attach on Windows * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Changes to correct name of missing NetApp license * NetApp cmode nfs: Fix QOS extra spec * NetApp cmode iscsi: Fix QOS extra spec * Fixes a problem in attach volume in EMC driver * Update config generator from OSLO * Pass the mirrorlog option as two arguments * Import request_id middleware bug fix from oslo * Netapp iscsi: allow snapshots with unspecified block range * Serialize the notification payload * Disable oslo.messaging debug logs * Updated from global requirements * Update tests to use CONF.set_override * Adds xiv_chap to xiv/ds8k driver configuration * vmware: Use SessionIsActive to find stale session * init_host should be called before RPC consumer is created * Add RequestContextSerializer for rpc notifications * Allow NetApp iSCSI driver to sub-clone large volumes * Can't force-create snapshot by an non-exist error volume * Remove rootwrap module * Simplify test force delete snapshot unit test * ceph backup driver: improve log messages * resolve KeyError for IBM Storwize/SVC driver * vmware: Remove deprecation warning from VC driver * Remove unused method from NetApp iscsi driver * vmware: Remove pbm_default_policy config option * VMware: Implement vmdk extend_volume * Fix create_export/remove_export in driver.py * Imported Translations from Transifex * vmware:Ignore inaccessible/inMaintenance datastore * Ensure name is utf-8 when deleting rbd vol or snap * Use six.moves.urllib.parse instead of urlparse * Use the error_out_volume from flow common instead * Revert "Re-enable lazy translation" * Sync latest Oslo code for imageutils * Don't send untextified exc to webob * Imported Translations from Transifex * Updated from global requirements * Use debug level logging during unit tests * Sync log.py from oslo-incubator * Fixed some FCZM unit tests hacking issues * Add missing config values for vmwware_vmdk test * cinder-rtstool imports a not existing module * get volumes with limit and filters does not work * Fixes cinder-volume service startup on Windows * Fixed nova VM live migration issue with 3PAR * Adding domain to context * Switch over to oslosphinx * Add libffi-dev to list of packages to install in dev env * VMware: Take the volume size from the user input * Fix exception message of CoraidESMConfigureError * vmware: Mark VMware ESX vmdk driver as deprecated * Fixes ssh-injection error while using chap authentication * Generate config samples for oslo.messaging * Add conversion types in some strings * Port to oslo.messaging * Updated from global requirements * get volumes API does not handle limit=0 * EMC SMI-S delete snapshot unit test takes too long * 3PAR: Support extend volume based on snapshot * Fixed spelling error - accomodate to accommodate * GPFS unit tests: increased coverage, uses mock * Clean Up EMC VNX Direct Driver in Cinder * gpfs driver: fix logging problems * Convert cinder utils tests to use mock * Include next link when default limit is reached * Re-enable lazy translation * Sync latest Oslo config code for i18n * Fix HP LeftHand Performance issue with AO * NetApp implementation for copy offload in clustered nfs driver 2014.1.b3 --------- * Remove str() from LOG.* and exceptions * Storwize volume manage/unmanage support * Volume manage/unmanage support * Add user defined extra capabilities * remove _check_container_exists from Swift backup driver * Add initiator_target_map for IBM Storwize/SVC * Fix HP LeftHand migration with snapshots * Updated from global requirements * Fix docstring ordering * Typo corrections for test files in cinder * vmware: PBM wsdl file configuration * vmware: default global pbm policy configuration * vmware: check datastore availability during create * vmware: Storage policy based volume placement * Add EMC VNX Direct Driver in Cinder * gpfs volume driver backup file access fixes * Check if snapshot is deleted cleanly * Restrict rootwrap find filter for IBM NAS and GPFS * Add initiator target map in EMC SMI-S FC driver * GlusterFS: Set permissions on qcow2 snapshot files * Make EMC SMI-S driver unit tests faster * change time.sleep to use loopingcall * Change RBD delete failure log level to warn * Updated from global requirements * Update Oslo wiki link in README * Add versioning output for the FC Zone Manager * Fix volume stats with multiple LeftHand clusters * Export and import backup service metadata * Don't clear host_state_map when scheduling * Add volume metadata backup suport to swift driver * Add optional ionice to volume clearing process * Quota delete operation in cinder * Restrict rootwrap find filter for NetAppNFS driver * GlusterFS: Increase snapshot delete job timeout to two hours * Segment LUN clones in NetApp iSCSI * updating testing readme with more current information * Remove unused variable * Python 3: replace "im_self" by "__self__" * Update FibreChannel Zone Manager config * Change warning message in NetApp driver for vsadmin creds * 3PAR: Fix extend volume GiB to MiB * TSM backup driver changes to support file backup * Fix silly error in comment * 3PAR: Create volume from snapshot with larger size * Fix free_capacity reporting in SolidFire driver * Fix test_delete_should_not_.. to assert something * Replace assertEqual(None, *) with assertIsNone in tests * Replace tearDown with addCleanup * Use six.StringIO instead of StringIO.StringIO * Implement retype in IBM GPFS Driver and refactor * 3PAR: Delete missing snapshot stuck in error_del * Added 3par initiator target map for FCZM * Fix race in test_delete_backup * Driver for IBM SONAS and Storwize V7000 Unified * Fix webob.exc.HTTPForbidden parameter miss * Add snapshot related data to limits api * Storwize/SVC: Change volume copy task to async * Fix FC connection handling in the storwize driver * Sync log.py from oslo * Replace httplib.HTTPSConnection in unittests * Add support for FC zone lifecycle management * Give a way to save why a service has been disabled * Remove old driver mapping deprecation * 3PAR: Backend assisted volume migrate * Add HP MSA Fiber Channel driver * Ensure return for db api functions * HP LeftHand Backend assisted volume migrate * Add support for qos_specs feature to 3PAR drivers * Add x-openstack-request-id to cinder responses * Update 3PAR drivers to pass cert test * Remove unused function * Use len instead of for-loop to get the end index * Ensures NetApp iSCSI driver correctly compares int values for size * Sync request_id, request_utils for cinder * IBM XIV and DS8000 Driver reorganizing (IBM dir) * Sync oslo imageutils, strutils to cinder * GPFS: Implement volume backup and restore * Fix missing package dependency for requests * test_volume unittest fails if ran only this module * Fix invalid facilities documented in rootwrap.conf * Use six.moves cStringIO instead of cStringIO * NetApp api fix structure conversion methods * Add support for backing up volume metadata * Imported Translations from Transifex * Replace assertEqual(None, *) with assertIsNone in tests * Add encrypted flag to volumes * Implement retype in HP LeftHand driver * Cast the quota set values to integer before checking their validity * Remove rabbit_notifier (Sync notifier with oslo d6e1ba7) * Remove dependent module py3kcompat * Add EMC SMI-S FC Driver in Cinder * Fix wrong example of "nova_endpoint_template" * NetApp eseries iscsi driver implementation * Update gpfs driver volume creation process * Deactivate LV before removing * VMware: changing volumeops tests from mox to mock * Remove unused exception * Add searchDepth when getClusterInfo called * Check type argument on create from source and snap * Rename Openstack to OpenStack * Removes use of timeutils.set_time_override * Removed unused context in _extend_snapshot method * Remove unused methods * Storwize/SVC: Check wwpn not None * Changes to cast variable as integer as XML API fails * Ceph backup driver tidyup * Move create_, remove_ and ensure_export from drivers * New HP LeftHand array iSCSI driver * GlusterFS: Fix create/restore backup * Allow operators to customize max header size * Fixup persistence file not found on tgt remove * Remove tox locale overrides * Add method for unit tests to set logging level * Brick support for pNFS * Storwize/SVC: Fix races in host-related functions * Fix cinder-backup volume restore with ceph driver * Dont set error_extending status on API extend call * Fix misspellings in cinder * Fixes cinder failed to create/restore a backup with NFS driver * Brick fix BrickException message formatting * lvm: unhandled exception when migrating volume * Implement retype in SolidFire driver * Validate the quota in the API layer for volume extend * Rename self.tgtadm to self.target_helper * Fix LVM migrate_volume tests * Brick connector fix for GlusterFS * Updated from global requirements * vmware: intermittent failure in test_vmware_vmdk * RBD unit test improvements * Move clear_volume back to it's own method * Don't use shred for volume_clear=zero * Nexenta iSCSI driver: fixed volume_migration * Move clear_volume method to volume.utils * Add update support to volume type encryption * LVM: log thin pool name and size upon creation * Remove create_export from volume create * vmdk: To add missing time unit in driver option * Update SolidFire Driver to use cinder's units * Update cinder.conf.sample for new keystone client * LVM: remove redundant casts to float * On create_volume flow get rid of host parameter * Imported Translations from Transifex * Allow spaces in host names in the storwize driver * Remove a catching exception during delete_volume * Remove SSH code from 3PAR drivers * Remove unused task from manager create_volume flow * Add support for special char in volume metadata * Brick LVM: Handle space info as numeric types * Set a sane default for state_path * Fixes incorrect key in dictionary * Stop volume_type_encryption creation when in use * Revert initialize_connection changes * Convert ceph backup unit tests from mox to mock * VolumeManager: initialize even if a volume can't be found * Add create_iscsi_target stub to TargetAdmin * 3PAR iSCSI volumes attach to single nsp * Extra_spec containing '/' can't be deleted * LVM: Robustify skipactivation detection * Make sure report_interval is less than service_down_time * Redundant check in os-migrate_volume_completion * Updated error messages for volume clone * Imported Translations from Transifex * Updated from global requirements * Fix up the way we do iqn variable in unit test * Catch new iscsi exception * Delete volume transfer in volume_destroy function * Create structure of flows' packages * Fix docstring and remove unused variable * GlusterFS: Fix deadlock in volume clone * Enable multi-process for API service * Sync RPC module from Oslo * Sync common modules from Oslo * Sync py3kcompat, sslutils, versionutils from Oslo * Sync gettextutils from Oslo * Storwize driver cleanup * Add support for retype in Storwize/SVC driver * Add notifier events to cinder volume rename, reset-state 2014.1.b2 --------- * Convert RBD unit tests to use mock instead of mox * Fixed inconsistency in iqn * Update HACKING.rst with regard to mock usage * Remove unused method 'is_key_value_present()' * Remove unused config options * Remove unused exceptions * Do not show quota of removed volume types in Default Quotas panel * Fix up calculating space info for mirrored volumes * Rename __init__.py to create_volume.py * Use oslo.rootwrap library instead of local copy * Fix UnboundLocalError in TgtAdm.update_iscsi_target * Update host on driver retype * Remove unused variable in restore_backup method * Ensure hostnames are converted to IP for comparison * Add Backup Service to 'cinder-all' script * Remove env from rootwrap filter * Allow user to specify audit period * Fix exception log msg in attach volume method * Fix import log_handler error with publish_errors set * Use a mirrored mirror log * Added missing accept_transfer to FC * Register EMC config options globally * Fix os.getlogin() problem with no tty * Updates for version list to show correct references * Fix cross-import bug in cinder.db.sqlalchemy.api * Pull Bug #1263122 fix for service module from Oslo * Pull latest scheduler change from Oslo * Use loopingcall from openstack-common * Use a *args pass-in instead of a list one * Remove unused variable in os-extend api * GlusterFS: Synchronize additional operations * Move driver initialization check into the method * Update cinder.conf.sample for keystoneclient change * Transfer creation doesn't support 'name' via xml * Change default policy for volume_extension:volume_tenant_attribute * Print driver exception on retype * Drop Chance/SimpleScheduler Implementation * Fix sqlalchemy bug in transfer_get_all_by_project * Fix sheepdog copy_image_to_volume method * NFS/GlusterFS: Skip incorrectly formatted shares * Remove unused message from iogrp_data setup * Remove legacy config option 'connection_type' * Modify default prefix for solidfire account * Add time_type dictionary to test_migrations * 3PAR: Raise Ex when del snapshot with depend vol * Add bool_type dictionary to test_migrations * Hiding postgresql password in connection string * Fixed a problem in iSCSI multipath * Fix the invalid argument of webob.exc.HTTPBadRequest * Add ability to modify volume type * Fix downgrade in 002_quota_class.py for MySQL * Removed deprecated config option hp3par_domain * Fix Brick LVM test command parsing * Update V2 API to return detailed volume information on create * LVM: Fix "intialized" typo in warning msg * Imported Translations from Transifex * removed unused context in check_*** methods * add 'force' verification in _volume_upload_image * Raise max header size to accommodate large tokens * LVM: update iscsi target on volume attach * LVM: Activate Thin Pool LV upon initialization * GlusterFS: Use correct base argument when deleting attached snaps * Switch to Oslo's config generator * Removed copyright from empty files * Remove unused fake_flags * Replace Simple/Chance Scheduler with FilterScheduler * Reduce the redundant variable declarations * Imported Translations from Transifex * Remove vim header * Redundant size check in volume restore api * Add AllocatedCapacityWeigher * Imported Translations from Transifex * Adding helpful URL links to README.rst and HACKING.rst * Handle terminate_connection() exception in volume manager * Empty files shouldn't contain copyright nor license * Bugfix missing foreign key removal for mysql * Fix spelling errors * Imported Translations from Transifex * Add additional metadata as key-value pairs in 3PAR * Handle initialize_connection() exception in volume manager * Output Strings of bin/*.py should support i18n * Add qos_specs support to solidfire driver * Service launcher method docstring corrected * Fix QoS information in initialize_connection() result * Fix and enable gating on F401 * Only reverify backing lun when create backing lun * Set volume_dd_blocksize configurable on per-driver basis * Add exception logging if flow creation fails * Remove dynamic default on sf_account_prefix * make delete recovery messages debug level * Remove unused code from volume manager (reset_stats) * Pylint fixes for GlusterFS driver * Pylint fixes for Brick iSCSI/LVM * 3PAR FC: add ability to add WWNs to host * Imported Translations from Transifex * Adjust import order according to PEP8 imports rule * Do not clone non-raw images in rbd backend * Adds unit tests for drivers.rbd.RBDImageIOWrapper * [Netapp/Nexenta] Move registration of config options * Fix and enable gating on H402 * LVM: Activate source snap LV when cloning from volume * Remove test that was no longer used for long * make help text more meaningful for cinder docs * Switch create volume commands to Taskflow 0.1.1 * Use mock for scheduler tests * Remove LANG=C from rootwrap invocations * Add the driver name to get stats log output * Remove hashbang (#!) at beginning of python modules * Fix KeyError while generating a WSGI response * Updated from global requirements * Lazy log the fixed_key warnings * Add disabled_reason field to services table * Catch TypeError when new_size is None on extend * Sync matchmaker_ring.py from oslo-incubator * Add RBD test for volume not existing during delete * Sync rpc fix from oslo-incubator * Returns thin pool free space calculated from actual usage * Brick LVM: Set C locale when gathering PV info * LVM migration: Check if name is equal to dest_vg * Convert lvm_mirrors to int * LVM migrate: Use keywords for the brick instance * LVM: Create thin pools of adequate size * GlusterFS: Remove glusterfs_disk_util option * Catch ImageBusy exception when deleting rbd volume * Adds lock for create from vol/snap to avoid race conditions * Fix docstring for snapshot_metadata controller * Fixes case insensitive for resp body * VMDK:Using host mount info for datastore selection * Fixes case insensitive for resp body 2014.1.b1 --------- * All API controllers inherit from wsgi.Controller * delete.start/delete.end notification for hostless * Fix typo/misspelled words * Update hacking to hacking>=0.8.0,<0.9 * Add more logging to migrate_volume_completion * Use model_query() in db.*****_destroy * Change method name to test_get_volume_stats * Adjust RBD delete log level * Bump to sqlalchemy-migrate 0.8.2 * Add unit tests for volume reserve and unreserve * Don't stop volume service for failed re-export operations * GlusterFS: Complete snapshot_delete when info doesn't exist * Fix typo in cinder * Imported Translations from Transifex * Add attach/detach notifications * Removes dublicated assert from test_migrations.py * Use assertAlmostEqual instead of failUnlessAlmostEqual in unit tests * Fixing check order for empty body in get_body() * Updates .gitignore * Remove unused import and CinderNode sqlalchemy model * Fix suppressed exceptions for migration downgrade * Fix the wrong verification for 'readonly' * Parse out '@' in volume['host'] to do discovery * Add volume migration code to Nexenta iSCSI volume driver * Handle NotFound exception in snapshots API code * Add chance weigher to scheduler * Redundant body validation for volume_upload_image * Imported Translations from Transifex * Fix Storwize terminate_connection with no host * Fix _update_volume_stats typos * Remove the redundant judgment for 'restore' * Make volume_glance_metadata_create compat with DB2 * GlusterFS: Set correct permissions for volume file created via clone * GlusterFS: Ensure Cinder can write to shares * The param 'readonly' is incorrect checked * Fix docstring for Snapshot model * Make sure report_interval is less than service_down_time * Ensure 'status' in update_snapshot_status * Update openstack/common/periodic_task * Initialize and terminate connection raise 500 err * Fix docstring for _migrate_volume_completion * Migrate volume should check para "host" in request * Continue to delete volumes that DNE in rbd backend * Pull latest service module from Oslo * Add greenthread.sleep() to parent wait() * Fix ./run_tests.sh -V --virtual-env-name * Pass the size when fetching image in xenapi driver * Remove unused code in test_admin_actions.py * Add support for extend volume in GPFS vol driver * Remove dead code from test_get_volume_stats() * Remove suffixes from LVM brick test vgs output * Subclass vendor specific exceptions * Don't do glance v2 calls when config is set to v1 * LVM: Activate source LV before cloning from it * Add default quota class into DB during migration * To fix test_get_dss_rp in test_vmware_vmdk.py * Fix typo in cinder.volume.API * NetApp fix for vsadmin role failure for ssc * Create snapshot throws 500 Internal Error * Fixes inappropriate error message * NetApp fix free space as zero during 1st vol stats update * Add valid check and unit tests on quota class * GlusterFS: Synchronize operations that manipulate qcow2 data * Check only our VG name when testing if VG exists * Update quota-set throw 500 error * Using HttpNfcLease to transfer vmdk files * Adds extend volume to Dell EqualLogic Driver * Remove the use of common.uuidutils.gen_uuid * Imported Translations from Transifex * Do not allow bad keys while updating quota * Use cached volumes in REST API extensions * Enable object caching in cinder REST API requests * Nexenta iSCSI driver: extend volume stats of _update_volume_stats * Fail when image is bigger than the volume * Update URL for global HACKING document and remove duplicate section * Retrieve volume image metadata using single query * Add call to retrieve image metadata for volumes in bulk * Do not remove volume silently if GPFS is unmounted * Report zero capacity if GPFS is unmounted * Nexenta NFS driver refactoring * RequestContext initialization failed in cinder * Nexenta: Remove snapshot after volume-clone deletion * Don't use deprecated module commands * Remove dup of LVMISCSIDriver in LVMISERDriver * Remove duplication of ISCSIDriver in ISERDriver * Support volume_readonly_update using XML format * Fix typo in test_check_ssh_injection_on error test * Remove lvm-thin pool_size config option * Examine if GPFS is mounted before writing data * Imported Translations from Transifex * Remove unused db calls to fetch original metadata * replace u\2013 char with dash * Sync log from oslo * Add tests for LVM -cow clearing * clean up numeric expressions in test * Fixes typo in method name _notify_voloume_type_error * Allow spaces in quoted SSH command arguments * Use pipe between ceph backup diff export/import * Imported Translations from Transifex * Add missing space to num_iser_scan_tries text * Add cinder.db.migration.db_initial_version() * remove rundundant lockfile requirement * Imported Translations from Transifex * Revert "Brick connector fix for NFS drivers" * Update my mailmap * GlusterFS: set correct filename when cloning volume * Handle NotFound exceptions in API * Unit test fails in pbuilder environment * Updated from global requirements * Check if dir exists before calling listdir * Rename "rtstool" to "cinder-rtstool", add dep * Downgrade target create failure mesg to warning * Nexenta iSCSI driver: Refactor create_cloned_volume * VMware: Registering vmdk opts in global space * Brick connector revised fix for NFS drivers * Nexenta drivers ignore "does not exist" exception * Add openstack/common/crypto from OSLO * Fix volume transfer href issue * Remove duplication of brick.iscsi in brick.iser * Drop auth_token configs for api-paste.ini * NetApp unit test fail fix for http_proxy * Revert "remove cinder-rtstool because of rtslib dep" * Let GPFS driver to rename snapshot with root permission * Imported Translations from Transifex * NetApp fix for 7mode iscsi volume stats * Brick connector fix for NFS drivers * NetApp fix ssc volume filtering inconsistency * Updated from global requirements * NetApp fix mirrored stats * NetApp fix for compression and dedup stats * Fix generate conf script can't handle multistropt * Add auth_token settings to cinder.conf.sample * Add extend_volume for Huawei drivers * Update openstack/common/notifier * Imported Translations from Transifex * Apply six for metaclass * Provide gettext _ in missing locations * Nexenta NFS driver: caching for appliance volroot * Cinder extension to add used resources in absolute limits * Fix Huawei HVS driver AttributeError * Storwize: Fix iogrp availability check * Imported Translations from Transifex * Uses oslo.imageutils * Don't zero out thin provisioned LV's on delete * Fix lvm.extend_volume to pass Gig suffix * Nexenta NFS volume driver folder auto sharing * FK lookup failures during migration * Initialize shares variables for RemoteFsDriver(s) * Fix indentation errors in drivers * Imported Translations from Transifex * Fix Huawei drivers to support other host OSs * Fix all occurences of H404 Hacking warning * Imported Translations from Transifex * VMware: Fixed upload-to-image for available volume * Refactor Nexenta iSCSI driver * Remove unused 'initiator' imports * Fix tests to work in debug mode * Updated from global requirements * Remove whitespace from cfg options * Remove option count from sample configuration * improves lvm version parsing for customised builds * Fix typo in cinder.volume.drivers.nexenta.__init__ * Remove obsolete redhat-eventlet.patch * long flashcopy operation may block volume service * Support Huawei driver upgrade from grizzly to havana * Imported Translations from Transifex * VMware: Disallow snapshot of attached volume * Clean up comparison assertions * Utilizes assertIsNone and assertIsNotNone * Nexenta volume drivers: refactor NexentaJSONProxy * remove unused methods in driver.Scheduler * Imported Translations from Transifex * Nexenta iSCSI driver fix _lu_exists * Ignore H803 from Hacking * Drop conf_key_mgr warning message! * VMware: Re-create session for RetrievePropertiesEx * use cinder utils.get_root_helper * Provide user with more information on quota fail * Cleanup and more tests for db.api methods * Fix broken solidfire create-snapshot * Clean CONF out of brick iser * Open Icehouse development * Imported Translations from Transifex * Add key manager implementation with static key * Remove need for CONF acces in brick iscsi * Quotas roll back failure of create volume task * Remove incorrect class in cinder.conf.sample * Fixes incorrect class path in logging_sample.conf * Storwize SVC driver hostname can't start with number * After commiting quota we should avoid certain reverts * Remove CONF from brick remotefs * Pass through args and kwargs in brick connectors * Clean CONF out of brick initiator * Update Babel from Openstack Requirements * Disable lazy translation * Improve gpfs config flag help text readability * Check for backing lun on iscsi target create * usedevelop in tox * Fixes ceph backup import errors * Add XML response tests for qos specs manage ext * v2 api - return bootable attr value on volume list * Fixes backup with multiple volume backend * Dont retry if target creation succeeds * VMware ESX: Fixes vol clone & clone from snapshot * Create volume revert fails for non admin user * VMware: Usng RetrvProprtisEx & does multi ESX scan * Fix XML serializer for QoS Specs extension * Fix Huawei HVS driver attaching volume error * Add debug logging for targets * Add support for querying the quotas usage * Validate force_host_copy API param for migration * Imported Translations from Transifex * Update OpenStack Style Commandments link * Set vol driver initialized before deleting volumes * Add error logs for Huawei driver * Clean CONF out of brick exception * Fix translation of CinderExceptions in REST API * Allow upgrade from Grizzly with ThinLVMVolumeDriver * Use module units for some drivers * Get host group id when Huawei driver initializing * Fix mandatory and optional args for create_volume * Pass correct args to vol_rpc create_volume calls * Fix processutils.execute errors on windows * Sync gettextutils from oslo * LVM volume_clear: error on unexpected inputs * Revert "Fix volume_rpcapi calls for chance/simple scheds" * Fix finish_volume_migration() on SQLAlchemy 0.8.x * VMware: Handles no datastores case * Fixes some typos in cinder * Update rootwrap with code from oslo * Specific /usr/local/bin/hus-cmd rootwrap filter * Allow v2 Volume API to create volume with type name * Imported Translations from Transifex * Fix volume_rpcapi calls for chance/simple scheds * Require assisted_volume_snapshots from novaclient * Fix over-indent in compute/nova * Add sg_scan filter to rootwrap * Add extend to reference LVM driver * Fix issues with failed lvremove * GlusterFS: Copy snap from correct source file * GlusterFS: Use image_utils for qemu-img calls * Remove default root_helper of sudo for remotefs * Add a retry to create_iscsi_target for LVM * Fix HP3PAR iSCSI path connection * Added mapper for update_all on snapshot_metadata * Add volume metadata to v2 * Enforce driver is initialized * Added mapper for snapshot_metadata * Fix type change in bootable setting of volume view * Add logging to prior to raising exceptions * GPFS Driver missing clone depth limit for snapshots * remove VolumeNotFoundForInstance class * Sync gettextutils from oslo * Use built-in print() instead of print statement * Fixes vol restore discard final bytes unzeroed * Fixes call GlanceConnectionFailed in invalid ARG * Fixes call VolumeNotFound in the invalid argument * Soft delete tmp migration volume * Fix __init__ methods of brick initiator connectors * Fix secure delete for thick LVM snapshots * assertEquals is deprecated, use assertEqual * Storwize/SVC: Optional CHAP authentication * Fix huawei driver test issues * fix wrong desciption of monkey_patch config * Allow display_name for v2 snapshot-update * Pass down root_helper in more cases * Set rootwrap_config path to rootwrap.conf * Do not use qemu-img --backing-chain or --output=json * VMware driver: Fix for invalid datastore selection * Fixes ceph volume restore to larger image than source * Imported Translations from Transifex * nms.folder.create_with_opts not supported on Nexenta 3.1.4.2 * Use $state_path/conversion for image_conversion_dir default * Improves the parsing way of ssh returns * Fixes the use of exception.InvalidInput with the wrong arguments * Remove unused exceptions * Fix client connection leaks in HP3PAR drivers * Add default_availability_zone configuration option to cinder * Imported Translations from Transifex * Turn db model object into a primitive object to avoid error * Catch generic exceptions * Add delete support for volume type encryption * Adds Dell EqualLogic volume driver for Cinder * Fixing UnicodeEncodeError against volume creating function * Fix deleting qos specs key * Move novaclient to requirements.txt * fix missing unit in log message * Add check for qemu-img to image_utils fetch_to_raw * Changed header from LLC to Foundation based on trademark policies * Fixed erroneous force full copy in ceph backup driver * Call to_primitive on volumes.rpcapi.create_volume * Fix typo in cinder.tests.test_create_volume_flow * Fix Qos Specs association corner case * Fixes pep8 violation in nova * Fix bug in Nexenta NFS driver _do_create_volume * Restrict Volume type deletion with volumes assoc * Replace assertEquals with assertEqual - 2/2 * Check cinder-backup service before "backing-up" * Do not attempt vg.update on uninitialized vg * Replace assertEquals with assertEqual - 1/2 * Add support for LocalConnector type in brick * Remove unused/redundant methods in cinder/test.py * Fix error casting value to float in lvm.py * Fixes misuse of assertTrue in test scripts * Utilizes assertIsNotNone * Utilize assertIsInstance * Remove deprecated assert_() usage * Fix brick remotefs dependency on cinder * Remove quota fetch race condition * Synchronize extend_volume methods in 3PAR drivers * Added copy-on-write support for all RBD cloning 2013.2.b3 --------- * fix log string in conversion type * VMDK copy_image_to_volume and copy_volume_to_image * Validate VV Set exists in 3PAR drivers * This adds a README to brick * Fix tuple usage error * Fixes brick Nova pep8 violation for lvm.py * fix inconsistent i18n log message * QEMU-assisted-snapshots for GlusterFS volumes * Add view builder to QoS specs API extension * Add features to Zadara Storage Cinder driver * Use tempfile and cleanup in windows unit test * Adds Nexenta NFS driver * Set vg_thin_pool to pool name instead of pool_path * Fixes cinder-volume service startup on Windows * extract 'limits.' to constant for ratelimiting logic * Send notifications when extending volume * Fix errors in volume usage audit script * New update_snapshot_status API * Add volume driver for Huawei HVS storage system * Increase test coverage for cinder.utils * Add Fibre Channel drivers for Huawei storage systems * Refactor huawei Dorado array iSCSI driver * Refactor Huawei iSCSI driver * Enable gating on F811 * Add support for Havana missing features in Windows driver * Add venv wrapper for check_uptodate.sh * Clone volume with right size with SolidFire * Fixes bug to allow for encrypted volume deletion * Sync rpc fix from oslo-incubator * Move comment back to right place * copy_image_to_volume for Nexenta volume driver * Fix pep8 violation in backup * Utilizes assertIn and assertNotIn * Implements APIs for VMDK driver * Remove _create_volume function from several tests * Don't need to init testr explicitly * Add missing LH SAN driver features for Havana * Multi storage backend support for Nexenta driver * Fix typo in bin/cinder-volume-usage-audit * Remove unused methods from cinder.utils * Increase test coverage for cinder.image.image_utils * Add kwargs to create_volume in tests/utils.py * Update the version for the FC and iSCSI driver * Pass MB size on copy_volume_data call copy_volume * Adding Read-Only volume attaching support to Cinder * Add NFS/GlusterFS support to brick library * Pass db into driver as constructor's parameter * Modified 3PAR drives to support 3parclient 2.0.0 * Move create_volume flow to a subfolder * Import order cleanup * Migrate manage script needs import of db session module * Migration for attached volumes * Add optimized volume migration to Storwize/SVC * Fix quota update validation for non-int types * Imported Translations from Transifex * Removes exception instance creation on execute() * Fix except in lvm.py * Add automated check of conf sample * Remove deprecated code from Nexenta Exception class * Sync up with global requirements * Extend volume for GlusterFS * Offline snapshots for GlusterFS volumes * Ensure that qpid connection is closed (from oslo) * Imported Translations from Transifex * Test WWNs with basestring * Imported Translations from Transifex * Remove print statement in db api test * Ignore stmf target must be offline exception * Sync execute() related exceptions with oslo * The DB migration shouldn't populate types table * Use a new rest client for every Coraid ESM command * Remove unused methods from LVM driver * Storwize/SVC: allow setting of I/O group * Implement QoS support for volumes * Move the frequently injection task to the base folder * Move root task class to base file * Backup driver for IBM Tivoli Storage manager (TSM) * Dont crash service if sf cluster isnt available * 3PAR driver add missing domain check on QOS cmd * Remove unused methods from cinder.utils * Refactor cinder/tests/test_volume.py * Unified Volume Driver for IBM XIV and IBM DS8K * Adds brick helpers to cinder utils * Fix python 3 pep8 errors for print * Fix incorrect msgstr's to avoid translation errors * GPFS use clone_image for creating volumes * 3PAR driver terminate connection host validation * Re-enable a lot of cinder scheduler tests * emit warning while running flake8 without virtual env * Set bootable flag for volume cloned from image * Remove unused methods from cinder.utils * Clean up the sqlalchemy migrate manage.py script * Allow to delete a volume in error_extending status * Update Brick to use executor * flake8 H202 error in test_image_utils.py * Removes ssh_execute in utils.py * Fix volume_glance_metadata deletion * Use system locale when Accept-Language header is not provided * Generic backup_volume and restore_backup functions * Relax policy so owner can access encryption info * Fix Fibre Channel attach for single WWN * Make the SolidFire driver api port configurable * Add accept_transfer to solidfire driver * Added need info to accept_transfer * Allow volume create from source unless in error status * Avoid serializing CinderExceptions before they are translated * Add root_helper param to get_connector_properties * Standardize on ID for log messages * Reduce hidden effects of sqlalchemy objects * Removed need for domain in 3PAR drivers * Allow Cinder to call Nova client * Use FakeLoopingCall instead of the real one * Fix some pylint error in Coraid Driver * Storwize/SVC: More error logging * Remove strcmp_const_time * Refactor LVM driver to use Brick VG utility * Added missing import * Fixes SSH injection threat in 3PAR driver * Implement missing Coraid Driver functionality for Havana * Increase test coverage brick/initiator/connector * Fix SSH injection threat in 3PAR driver * refactor/unify driver version strings * Refactor Nexenta driver * Update Nexenta ISCSI volume driver authors * Extract ISCSI tries option into connector module * Externalize error messages in the v2 API * Add more asserts to the limiter unit tests to test the RateLimit * Replace os.unlink with delete_if_exists * No need to declare the exception conf * Add support for encrypted volumes * Add tests for cinder/brick/initiator/connector * Tidy up the SSH call to avoid injection attacks for HP's driver * Raise exception when Glance metadata not found * Interprete scoped key as nested tags * Adding the -online option to the 3PAR clone * Fixes some unseen flake8 violations * Fixes volume clone from volume * Fixes docstring formats in connector.py * Fixes files with wrong bitmode * Add unit tests for cinder/api/contrib/quotas * remove Brick deps on cinder.exception * Remove Brick iser dependency on cinder * Fix handling ImageUnacceptable in create_volume * Use native methods for list manipulation * Fix signature of _create_volume() in ThinLVMVolumeDriver * Add H233 to ignores in tox.ini * Imported Translations from Transifex * Add support for volume cloning to Nexenta driver * Fix ratelimiting * GPFS support for various volume attributes * Upgrade Scality driver to match minimum features * Ignore purge_props for v2 Glance api and fix upload * Add support for API message localization * 3PAR drivers creating incorrect comment data * Imported Translations from Transifex * Use utils.safe_minidom_parse_string in extensions * Move resource usage sync functions to db backend * Imported Translations from Transifex * Refactoring of create_volume to use taskflow * Add minimum features in HDS driver (for Havana & Icehouse) * Ignore stmf target must be offline exception * Added glance_request_timeout config option * Set lock_path in tests * 3PAR volumes created from snaps failed to attach * Add test for brick.local_dev.lvm * Imported Translations from Transifex * Remove Brick's iscsi dependency on cinder * Remove locals() from iser * Move volume_clear and clear_size opts up to driver * Imported Translations from Transifex * Set the concurrent connections on the 3PAR array * Create key manager interface * Remove usage of obsolete oslo.exception * Fixes create rbd volume from image v1 glance api * Imported Translations from Transifex * Remove Storage Manager from cinder-manage * Remove cinder.exception from Brick * Add bin directory to flake8 when not in venv * Add support for volume extension to Nexenta Systems volume driver * GPFS Verify min release level for mmclone command * Sync gettextutils from oslo * Add eclipse project files to .gitignore * Remove unnecessary metadata from the 3PAR drivers * Adding support for iSER transport protocol * NetApp fix clone image compatibility issue with ssc * Set bootable flag for volume serializer * Fix chown fail for nfs file without necessary permission * Add new persona value in the 3PAR driver * Update driver version to 1.1 * Fix NetApp iscsi drivers for cinder backup * Fix pep8 and pylint violation in Nexenta volume driver * Test tools barfs on reusage of 'id' attribute * Ignore "volume does not exist error" * Call get_session() only when necessary * Fix volume_create()/snapshot_create() DB methods * Execute DB API methods in a single transaction * Improve DB API test coverage * Fix check for mount.nfs helper installation * Imported Translations from Transifex * Remove xen storage manager tables * Remove unused migration_* methods from db api * Factorize code between nfs.py and glusterfs.py * NetApp fix create vol different size than snapshot * LVM / Block Device Drivers: Fix duplicated flags * tox.ini: Change sitepackages to False * Tidy up the SSH call to avoid injection attacks in storwize_svc * NetApp check for 7 mode controller version * Storwize/SVC: Use reserved percentage from conf * Imported Translations from Transifex * Pop out 'offset' and 'limit' before use for filter * Imported Translations from Transifex * Fix running of migrations tests by Jenkins gate * Update to latest oslo rootwrap * Make unicode-to-utf8 conversion universal in ceph backup driver * Add more info to delete error message * Update references with new Mailing List location * Allow connect by FC-only or iSCSI-only systems * NetApp NFS efficient clone_image impl * Removed the dep on cinder.utils * Fix the multi-backend storge issue for ZMQ * NetApp storage service feature support * Imported Translations from Transifex * Create volume from snapshot must be in the same AZ as snapshot * Using volume name property instead of using template and id * Fix unit suffix and add no_suffix option * GPFS stub calls to truncate and dd in unit tests * Storwize/SVC: Use VolumeDriver's copy vol<->image * Implements extend volume feature in HP 3PAR driver * use encode('utf8') instead of str() * Imported Translations from Transifex * Migration for detached volumes with no snaps * Fix cinder error for deprecated Netapp drivers * get_snapshot should populate the snapshot metadata * Adding driver minimum features and volume stats to dev doc * Update RBD driver to be compliant with HACKING * GPFS convert glance image to raw only when needed * Fix oslo.config.cfg.NoSuchOptError when running individual tests * Fixes RBD driver docstring format issues * fix name 'update_volume_status' to 'update_volume_stats' * use 'exc_info=1' instead of import traceback * Fix further Hacking 0.6.x warnings * Add create & attach times to SolidFire attributes * Implement extend volume for Storwize/SVC * Cleanup README.rst * Fix volumes search by metadata * Add test for volume status check when extending * 3PAR Driver modifications to support QOS * Make Storwize/SVC tests work without simulator * Revert hardening of Storwize/SVC SSH commands * Clone_image method added image_id as parameter * Added incremental backup support to Ceph backup driver * Sync gettextutils from oslo * Imported Translations from Transifex * Fix duplicate config options * Move copy_volume function to volume/utils.py * Fixes default value of use_default_quota_class * Imported Translations from Transifex * Delete snapshot metadata when snapshot is deleted * Tidy up the SSH call to avoid injection attacks in storwize_svc * Fix extend_volume error handling * Fixes race condition in LVMVolumeDriver create_cloned_volume method * Checks the volume_clear flag and just return if it is none 2013.2.b2 --------- * Fixes Opt type of use_multipath_for_image_xfer * Fixes Opt types in cinder/backup/drivers/ceph.py * Fix indent in cincer/volume/configuration.py * Implement validate_connector for Storwize/SVC * Fix error when QuotaUsage.updated_at is NULL * Rename SolidFire driver for consistency * Add Brick Fibre Channel attach/detach support * Increase timeout period for clone volume * Be sure to check deleted types on quota update * CoraidDriver: Allow volumes in error state to be deleted * Adds multiple iSCSI port support to 3PAR * Implement extend volume functionality in Sheepdog * Mark methods used in class only with prefix "_" * Add te field user_id into the volume detailed information * Catch additional connect fail cases * Clean up Huawei tmp files from tests * Add flag argument to 'cinder-manage config list' * Imported Translations from Transifex * Add generic block device driver * Use base ISCSI driver to fulfill some driver requirements * Cleanup and make HACKING.rst DRYer * Clone_image should return dict of vol properties, clone status * Update requirements from openstack/requirements * Refactor SSHPool.get() to use Pool.get() * Enable zero the snapshot when delete snapshot in LVMVolumeDriver * Fixes ceph-backup failure if original volume deleted * Implement extend volume functionality in Rbd * Handle errors raised by extend_volume * Minor reorg for (array resource usage and backend options naming) * Check enabled backup service before rpc request * Fixed Ceph backup librbd segfault * Add support to import images into sheepdog volumes * Add tests for cinder/api/urlmap.py * remove improper assert usage * Enable setting blocksize on volumes * cinder.api: Replace 'locals()' with explicit values * Update upper bound of keystoneclient version * Fix missing volume_name_template flag * Change check-detach to reject more states * Implement extend volume functionality in SolidFire * Add unit tests for cinder/api/versions * Make String column creation compatible with SQLAlchemy 0.8 * Remove suds requirement * Add support for storing volumes on GPFS * Consist terminate_connection function signature * SolidFire API RequestID is useless * Add ability to specify SolidFire API version * Refactor reschedule in exception handling of volume manager * Don't pass 'session' arg to public DB API methods * Add interface class for backup drivers * Prevent wrongly privilege escalation of a context * Move brick initiator tests to brick subdirectory * Fix extent size issue when creating thin pool * Sync install_venv_common from oslo * Fix a few Sphinx warnings * Ignore files created by Sphinx build * Use oslo.sphinx and remove local copy of doc theme * Add unit tests for cinder/api/contrib/volume_actions * Scheduler should not select down volume managers * Add check for snapshot to Brick LVM * Fix typo 'Flase' -> 'False' * Rename cinder.flags to cinder.common.config * Add execute wrapper to brick LVM code * Imported Translations from Transifex * CoraidDriver: Create_volume_from_snapshot of a different size * Make os-services API extension consistent * Imported Translations from Transifex * Removes 3PAR domain option from cinder config file * Skip brick_initiator test in virtual environments * Added Cinder volume backup to Ceph support * Handle ECONNREFUSED exception in SolidFire driver * Add os-availability-zone extension * Run flake8 also on cinder/*/openstack * Imported Translations from Transifex * Quotas by Volume Type * xenapi: implement xenserver image to volume * Save some more image attributes to volume_glance_metadata * Fix check_for_setup_error for sheepdog driver * Add Brick iSCSI attach/detach * Added volume backup and restore to Ceph RBD driver * Fix service alive information in os-services extension * Calculate count for customized dd blocksize * Content-length missing in put_object * Replace glance_metadata check with bootable column * Imported Translations from Transifex * Avoid winning the useless use of cat award * Fix up trivial H103 license check mismatches * Register used CONF entries in cinder.api.common.py * Fix and enable gating on H401 * Do not raise NEW exceptions * cinder.[brick,db,image] Replace 'locals()' * Update kombu requirement * Remove usage of locals() for formatting from cinder.tests.* * Adds create_from_volume test cases * Use list comprehensions when possible * NetApp:iSCSI drivers reserved percent need to change to 0 * Add support for swift user/key authentication * Refactor the backup method of SwiftBackupService * Imported Translations from Transifex * NetApp unified driver implementation * Add _create_volume to ThinLVMVolumeDriver * Add the project name into CinderKeystoneContext * Add build directory to flake8 ignore dirs * Add missing extend volume test (rpcapi) * fix error class path in logging sample * Modify check for volume-type-id to a get w/default * Don't perform retry_execute in certain cases * Adding host attaching support to Cinder * Update attach status when instance id invalid * Fix and enable gating on H403 * Use Python 3.x compatible except construct * cinder.backup: Replace 'locals()' with explicit values * cinder/.: replace 'locals()' with explicit values * Editable default quota support for cinder * Imported Translations from Transifex * Use common.processutils.execute * Remove usage of locals() for formatting from cinder.volume.* * cinder.schedule: Replace 'locals()' with explicit values * Imported Translations from Transifex * Remove the 'migrate' option from cinder-manage * Use Python 3.x compatible octal numbers * Use Python 3.x compatible except: construct * Update and add notifiers in create volume * Imported Translations from Transifex * Fix up the test framework * Raise an error if iSCSI is not supported * Remove usage of locals() for formatting from cinder.api.* * Implement capability to extend existing volume * Replace utils.to_bytes() with strutils.to_bytes() * Flatten Volume from Snapshot * Imported Translations from Transifex * Replace FLAGS with cfg.CONF in volume * Replace FLAGS with cfg.CONF in other modules, unless tests * Elevate volume/snap "is busy" log message for volume/snap_delete * Imported Translations from Transifex * Fixes 3PAR drivers terminate_connection issue * Added policy check for backup operations * Update to the latest stevedore * Fix various Sphinx warnings * Fix some unittest cases failed on osx * Fix the after subscription size checks * Re-set default sql_connection and sqlite_db * Remove explicit distribute depend * Add missing exception from volume/api.py * Allow disabling ssl compression for glance client * Add availability zone checking in the api service * Add missing attributes to xml deserializer for volume request * Integrate oslo's periodic tasks * Fix LVM logging error * Remove direct call to utils.execute * Add policy checking for transfer create/accept * Replace FLAGS with cfg.CONF in tests * Replace FLAGS with cfg.CONF in api * Start using Pyflakes * Add the iscsi device check and exception processing * Minor Logic bug in NFS Driver * Imported Translations from Transifex * Fix 'undefined symbol conn' error * NFS drivers don't honor vm size with volume from an image * Add missing tests for backup_* methods * Replace functions in utils with oslo.fileutils * Remove E12 errors from tox.ini Flake ignores * Unset all stubs before running other cleanups * Fix config registration in cinder volume drivers * Elevate acceptors context on accept reserve udpate * Removing service_* options from authtoken * Add call to vol driver when accepting a transfer * Imported Translations from Transifex * Implement DB migration for volume transfer BP * Replace FLAGS with cfg.CONF in db * Add missing tests for iscsi_* methods * Log iSCSI target output on error * Re-write of the cinder-manage man page * Replace FLAGS with cfg.CONF in scheduler * python3: Introduce py33 to tox.ini * Fix AttributeError typo * Fix path for pylint Gate * Fixed method db.api.reservation_expire * Handle IPv6 specifid glance servers gracefully * HDS Cinder Driver. Rev #1 * Imported Translations from Transifex * Add error reporting to generate_sample.sh on import failure * Updating HACKING to disallow the use of locals() * Prevent force delete if the volume is attached * InvalidUUID can not be raised * Fix incorrect authorization rule in quota contrib api * Rename requires files to standard names * rbd: simplify configuration and use librbd and librados * Update 3PAR driver session management * Fix typos * Add testrepository to git ignores * Fix incorrect copyright * Add missing tests for cinder.db.api.quota_ * Return 404 from delete of extra spec if not found * Fix incorrect status for volume clone from image * Imported Translations from Transifex * Support for NFS shares with spaces in path * Fixes 3PAR Host already exists error * Ensure that pbr>=0.5.10 is installed * Add missing tests for cinder.db.api * Remove execute permissions from test files * Migrate to Oslo DB code 2013.2.b1 --------- * Catch and report errors from copy image to volume * test_glance.py: Stub out _get_member_model as well * rbd: send ceph monitor addresses with connection info * Don't set signing_dir by default * Remove cinder_emc_config.xml.sample * Update cloned volumes QoS settings * Fix 'Inheritance-based rule deprecated' log warning * Added '%' before snapshot variable * Hack run_tests.sh to work with single tests again * Imported Translations from Transifex * Don't throw ValueError for invalid volume id * ModifyVolume attributes on Clone * Improve "service is down or disabled" warning message * Add "_" builtin method for config generation * Replace custom skip_ methods * Migrate base test class to testtools * Fix ownership transfer when cloning with SolidFire * Make NFS share selection more intelligent * Add common Oslo DB code to the source tree * Add the service_state into test_schedule_happy_day * Implement scheduler hints for API v2 * Update log.py and jsonutils.py from oslo-incubator * Added a test for bad limit param * Added test for nonnumerical limit param * Raise VolumeNotFound with correct volume_id * Removes a broken link from the sidebar of Sphinx built pages * Imported Translations from Transifex * Support mount options for NFS/GlusterFS volumes * Hide v1/v2 version entities in API when disabled * Allow flake8 to run in venv * Imported Translations from Transifex * Imported Translations from Transifex * Convert to oslo strutils.bool_from_string * Update import of strutils from oslo * Add thin provisioning support checks * Update/Publish volume service updates on delete * RemoteFsDriver: copy_image_to_volume and copy_volume_to_image * Imported Translations from Transifex * solidfire: Make sure src_uuid is passed correctly * Implement cloned volume for the RBD driver * Add .coveragerc to show proper coverage statistics. As in other openstack projects * NetApp server tunneling fix * Move iscsi helpers to brick directory * Fix up hacking ignores a bit * Hide lock_prefix argument using synchronized_with_prefix() * Storwize/SVC: fix attach bug for live migration * Deprecating old dot path locations for Folsom configs * solidfire: Add ability to override account prefix * Fixes an get_volume_stats reporting issue * Increased unit test code coverage * Create an LVM utility to use for local storage * Add CINDER_LOCALEDIR env variable * Remove gettext.install() from cinder/__init__.py * Use flake8 and hacking * Use pbr instead of openstack.common.setup * Change the type of "free_capacity_gb" to be float * Set default values for NFS/GlusterFS share_config files * Add missing spaces to iscsi_iotype help * Adds notifiers to both volumeTypes and volumeTypeExtraSpecs * Fix missing spaces in Huawei Logging * Add pylint-based lintstack test to tox environment * Remove outdated cinder test doc * Implement copy_image_to_volume and copy_volume_to_image on nfs backends * Update import of oslo's processutils * Fix ability to add custom volume_backend_name * Add db client packages to dev env setup doc * Check that volume is at least minDisk size * Remove old_name from kwargs when using IET helper * Copy the RHEL6 eventlet workaround from Oslo * Remove setuptools-git as run time dependency * Fix LHN driver to allow backend name configuration * Deleting a backup removed the backup record from database * Remove _path_exists method * Encode username and password in config file * Clear volumes stuck in 'downloading' * Fixes 3PAR FC driver synchronization * Avoid using whitespace in test_safe_parse_xml * Add stats reporting to Nexenta Driver * Remove duplicate method definition * iscsi: Add ability to specify or autodetect block vs fileio * Rename duplicate test method * Update to latest copy of OSLO incubator * Cinder wasn't filtering the backups returned to backup list API * cinder volume service keeps retrying even code exception * Add missing space to "volumes already consumed" message * Add capabilities reporting to ThinLVM driver * NetApp: Fix failing NetApp tests * Use VERSION var for volume_stats version (Gluster/NFS) * Add parsing to extra-specs key check * Use a SSH pool to manage SSH connection * Remove Flags usage from cinder.volume.driver * new cinder.conf.sample and fix extract_opts.py * fix default config option types * Fix incompatible Storwize/SVC commands * Fix backup manager formatting error * Add service list functionality cinder-manage * Clean up attach/detach tests * Reformat openstack-common.conf * Sync with oslo-incubator copy of setup.py * Don't hard code AUTH_ into the swift backup url * Remove update_volume_status log message from NFS driver * Implement get_volume_stats for GlusterFS driver * Fixed a volume creation re-schedule error * Allow deletion of backups where the service is None * Fix cinder-manage backup list to work with uuids * leave re-scheduled volume status to creating * Prevent create volume from snapshot with bad size * Add du to rootwrap filters * Change format of some judgments * Remove InvalidPortRange exception * Add availability_zone to the volume and snapshot notifications * Throw InvalidSnapshot for failed snap delete * remove deprecated assert_unicode sqlalchemy attribute * Fix IBM copyright strings * REST session validity not checked in get_volume_info * Enforce exclusive options snapshot-id, source-volid and image-id * Add snapshot events to the cinder notification * getLogger should be called after logging is configured * Mark sql_connection with secret flag * Sync lockutils from oslo-incubator stable/grizzly * Remove unused tools/rfc.sh * Add the volume and snapshot gigabytes together * Force deletes using tgt to workaround bug 1159948 * Fixed shared gigabytes quota resource * CoraidDriver: support users that are not admin * Fix quota updating when admin deletes common user's volume * Last driver sync for Folsom and Grizzly * Fix bug with 3PAR host entry in wrong domain * Snapshot reservation sync calls wrong resource * Fetch volume_types by uuid and not by name in v2 * Use the local configuration in the nfs drivers * Fixed attach volume for EMC SMI-S iSCSI driver * Extend param2id() to work with uuids * Clean up started volume services in tests * CoraidDriver: do not call login from __init__ * CoraidDriver: typo in _login exception handler * Fixes Cinder REST API /volumes issue * Add missing processutils for impl_zmq in oslo rpc * Update Cinder's latest copy of OSLO grizzly stable * Remove the log spam generated by the NetApp driver unit tests * Speedup solidfire unit tests * Updates to OSAPI sizelimit middleware * Use OpenStack common project requires * Rename cinder-rtstool to rtstool * Make dd block size user configurable * remove cinder-rtstool because of rtslib dep * Add snapshots to the volume usage audit report * CoraidDriver: retrive volume info (improvement) * Remove AGPL rtslib pkg from pip-requires * Fix Storwize/SVC LUN allocation with holes * Remove references to FLAGS from volume/manager.py * Allow snapshot_delete for NFS/GlusterFS drivers * Pull Oslo log fix to enable root logger initialization * Clean up exec_dirs prefix from rootwrap conf * Fix typo in persona valid values * Use self.configuration to support the multi-backend case 2013.1.rc1 ---------- * Bump version for Grizzly RC1 cut * Count Snapshots towards volume/gigabyte quotas * Fix 3PAR driver hiding existing host error * Switch all uses of 422 response code to 400 * Implement get_volume_stats in NFS driver * cinder-manage does not print any version information * Fix ISCSIDriver rescan * Compression/tier capabilities for Storwize/SVC * Fixes dettach volumes in Windows cinder plugin * Fix _migrate_up in test_migrations * Switch to final 1.1.0 oslo.config release * Adds a flag to set glance api version to call * Storwize/SVC driver fix for multibackend scenario * Fix bad request response code on extra_specs create * Fix bugs for Huawei driver * Do not use prefix to lookup host in Storwize/SVC * update error log arguements in filter scheduler * Update oslo rpc libraries * Remove/update unused log arguements in manager * Removing flags in RBD in favor of configuration * LIO iSCSI initiator ACL auto-config * Fix a few bugs for LeftHand Grizzly * Update tox.ini to support RHEL 6.x * Fix volume capacity reporting * Pull newly merged Olso update for 'is' operator * Use nose and openstack nose plugin * Exit run_tests with the result code of the test runner * Mark configuration option netapp_password secret * Add get_volume_stats in the sheepdog driver * Switch to oslo.config * Fix calling setUp() method of superclass from tearDown method * Fix 3PAR drivers to work in multi-backend mode * Fixed copy image to volume and clone volume * Fixes issues found in /os-hosts API * Fix Storwize/SVC storage_protocol reporting * sync oslo changes for setup / version * swift backup service checks version during restore * Add some useful log to filter scheduler * Elevate context for delete volume with no host * Improved fail_reason for cinder-backup swift connection errors * Convert from using FLAGS directly in SF driver * Improve logging for volume operations via manager * Only use iscsi_helper config option if using LVMISCSIDriver * Fix query filter in volume_get_active_by_window() * Changed to INFO level logging for main cinder-backup operations * NetApp: Clean up lock file left behind by unit tests * NetApp: Fix race condition in 7-mode iSCSI driver with DFM * update install_venv_common to handle bootstrapping * allow run_tests.sh to report why it failed * Remove compat cfg wrapper * XenAPINFS: Fix Volume always uploaded as vhd/ovf * Fixed cinder-backup start errors seen with devstack * Cinder devref doc cleanups * Fix various exception paths 2013.1.g3 --------- * Implement metadata options for snapshots * Skip timestamp check if 'capabilities' is none * Fix stale volume list for NetApp 7-mode ISCSI driver * Implement a basic backup-volume-to-swift service * Better error handling around volume delete * Moved cinder_emc_config.xml.sample to emc folder * Uses tempdir module to create/delete xml file * Add HUAWEI volume driver in Cinder * XenAPINFS: Create volume from image (generic) * Bump the oslo-config version to address issues * Ensure volume exists before deleting * Add LIO configuration for iSCSI initiators * rbd: implement get_volume_stats() * Handle maxclonepervolume/node limits in SF driver * Use oslo-config-2013.1b3 * Fix syntax error in cinder-volume-usage-audit * HP 3PAR Fibre Channel Driver and iSCSI Updates * Fibre Channel base class for Cinder drivers * Update cinder-manage to use FLAGS.log_dir * Add a safe_minidom_parse_string function * Add a volume driver in Cinder for Scality SOFS * Fix create volume from image * XenAPINFS: fix capacity reporting * Update Storwize/SVC driver for Grizzly * Set rootwrap_config in cinder.conf sample * Skip tests if cinder is not installed * Fix undef function call in test_migrations for py26 * Fix PEP8 violation (again) * Update cinder-volume to enable multi volume support * Install rtslib when installing cinder * Sync latest cfg and log from oslo-incubator * Handle 'infinite' and 'unknown' capacity in CapacityWeigher * Add get_cluster_stats to SolidFire driver * NetApp: Fix for snapshot not deleted in error state * NetApp bug fix for multibackend scenario * Adding support for Coraid AoE SANs Appliances * Add an update option to run_tests.sh * Update EMC SMI-S Driver * Add LIO iSCSI backend support using python-rtslib * Add GlusterFS volume driver * Create a RemoteFsDriver class * Fix ordering of function args * Add an ID to temporary volume snapshot object * Allow create_volume() to retry when exception happened * Fixes the provisioning on selected volumes for NetApp 7 mode * rbd: update volume<->image copying * Fix PEP8 violation * Update snapshot rest api to be consistent with volumes * change display_description to description in volumes * v2 volume/snapshot create will correctly give a 202 response * add postgresql opportunistic testing * make test_databases instance variable * Move create_cloned_volume() to LVMVolumeDriver * Update to latest oslo-version code * Allow disabling of long-lived SSH connections * Don't require importing paramiko for error * Allow for specifying nfs mount options * rework migration 004 testing with real data * Allow tools/install_venv_common.py to be run from within the source directory * add data injection on migrations * sync database connect changes from nova * XenAPINFS: Copy volume to glance * XenAPINFS: Copy image from glance * Fix inability to delete volumes in error state for NetApp driver * Copy glance_image_metadata when cloning volumes * Add volume_glance_metadata to volume.api.get * Import Oslo's common rootwrap to Cinder * Mark password config options with secret * Fixes 'not in' operator usage * Skip tests if cinder is not installed * Fix provider_location column add for PSQL * Update 3PAR driver * Fix the generalized copy_image_to_volume operation * import tools/flakes from oslo * Add unit tests for ISCSIDriver._do_iscsi_discovery and ISCSIDriver._get_iscsi_properties * Fixes "is not" usage * Pull cfg module from Oslo and update cinder-manage accordingly * Set source volume to "in use" during clone * Update some Oslo Packages * Fix typo in cinder/db/api.py * Replace CRLF with unix-style "LF" * Allow volume back-end to report 'infinite' or 'unknown' as capacity * Wrap SolidFire size parameter in int * Use install_venv_common.py from oslo * Update osapi_volume_extension default * Generic iSCSI copy volume<->image * Implement LVM thin provisioning support * Check for installed cinder in filter tests * Fix hosts extension and enable its tests * Check for non-default volume name template * Get updated vol status in volume.api.reserve * Update EMC SMI-S iSCSI Driver * Clean up QTree when deleting volume on NetApp storage box * Fix NFS volume creation * Improve error message for missing NFS share config * ensure zeros are written out when clearing volumes * Fix error for extra specs update with empty body * Clean up IPV6 config checks in test_wsgi * Add capability to update volume metadata * Fix sheepdog volume creation * Add LUN# to provider_location in Nexenta driver * Check for configured IPV6 before running tests * New cinder.conf.sample format * Move iscsi flags back to driver.py * Snapshot support for XenAPINFS * support a configurable volume wiping method * Relax various version constraints * Support for SSL in wsgi.Server * Enhance wsgi to listen on ipv6 address * Factor out LVM code * Implement filter scheduler * Revert "Implement filter scheduler" * Update SolidFire Volume driver grizzly-2 --------- * Provide HP 3PAR array iSCSI driver * Fix CinderClient exception name in EMCISCSIDriver * Enable cinder exception format checking in tests * Update exceptions to pass correct kwargs * Add option to make exception format errors fatal * Implement filter scheduler * Use tempdir for lock_path in tests * Upgrade WebOb to 1.2.3 * Make WebOb version specification more flexible * Fix cmds clearing in TargetAdminTestCase * Add missing library * use deleted = False, not 0 for update * Implement ability to Clone volumes in Cinder * Add pyflakes * Adds synchronization to attach volume * Add EMC Volume Driver in Cinder * Added extra-spec key scoping to the 3PAR drivers * Adding marker, pagination, sort key and sort direction to v2 api * Fix typo in image_utils tempfile handling * Make the NetAppISCSIDriver._is_clone_done() method able to handle empty responses. Add unit tests to exercise this case * Make sure we don't double remove tmp on exception * Add service mgmt extension * Added the lockutils, fileutils, gettextutils * Fixes a Windows volume driver bug on disk export * Moving host admin extension with other extensions * Allow the lvm backed drivers to use mirrrors * CHAP support for IBM Storwize/SVC driver * Remove instance quota re-sync code * Add image metadata API extension * Raise NotImplemented for drivers that don't support images * Add *.swp to gitignore * Support glance servers over https * Add commands used by NFS volume driver to rootwrap * Changing display_name to name in v2 api * Make summary and detail view consistent with other projects * creating separate v1 and v2 stubs and general fakes * Make copy_to_volume a bit more useful * Delete type call in api needs update to use ID * Convert volume_type id from int to uuid * Fixes the 3PAR drivers CPG validation * Rename Config osapi_compute_link_prefix to osapi_volume_base_URL * Fix exception when size is None * Ensure request_spec can be serialized * attaching volumes will set instance_uuid instantly * Revert changes to monkey_patch * Improve provider_location cleanup code for RBD * Fix import order to make it alphabetical * Fix None being passed into as_int() * Use auth_token middleware from keystoneclient * Provide i18n to those messages without _() * Revert "use O_DIRECT when copying from /dev/zero too" * Make pep8 checks a bit stricter * Unpin lxml requirements * use O_DIRECT when copying from /dev/zero too * Add CONTRIBUTING file * Add the persistency to the volume created by iscsi IET grizzly-1 --------- * adding copy of v1 as v2 * Moving contrib to cinder.api * Moving api v1 implementation into v1 directory * Switching api to use base extension manager * moving all middleware code in cinder.api.middleware * Moving common api code into cinder.api * Cleaning up volume driver paths * Add volume bootable information to api response * Add XenAPINFSDriver * Add db table for Glance Metadata * Remove redundant db.volume_update() in volume manager create_volume() * Pin pep8 1.3.3 * Removes the xensm driver * Pass in correct volume_ref to create_from_snapshot * NetApp direct to filer drivers for iscsi and nfs * Add hosts extension to Cinder * Remove unused python-daemon dependency * Make tox.ini run pep8/hacking checks on bin * Various pep8/HACKING fixes for Cinder * Volume RPC API Versioning * Remove gen_uuid() * Remove obsolete use_local_volumes * Import order cleanup per HACKING * Remove unused volume API method - remove_from_compute() * Scheduler API clean up and refactor * Remove dm_setup(remove) call in volume_delete * Add ability to disable secure volume delete * Remove the zeroing out of the volume altogether * Add 'create_volume' to scheduler RPC API * Fix run_tests.sh ambiguous usage msg and behaviour for -x option * Add admin only action for force detach * Changes bit mode of zadara.py to 644 * Port openstack-common/uuidutils to Cinder * Fix 401 from auth_token middleware * Splitting out volume drivers in driver.py * Minor optimization in create_volume in HpSanISCSIDriver * Adding a SSH Connection Pool * Fixes 3par driver methods that were double locking * Return volume type name on volume create * pin sqlalchemy to the 0.7 series * Add VolumeTenantAttribute API extension * Log the body of an /action * Detect and fix issues caused by vol ID migration * Split out drivers in san.py * Add VolumeHostAttribute API extension * Add default volume type flag * Fix typo so setting volume_tmp_dir works * Rollback for resources during volume creation failure * Allow the user to update a volume's metadata * Add the generation of the username and password for iSCSI target * Update HACKING.rst and related changes from Nova/PEP8 * Add trove classifiers for PyPI * Ensure device node exists before wiping during volume deletion * Update volume and snapshot status on delete * Drop unused quota_usage db methods * Drop duplicate sqlalchemy db api methods * Change output strings to i18ned * Adds support for Windows 2012 Storage Server blueprint windows2012driver https://blueprints.launchpad.net/cinder/+spec/windows2012driver * Update common * Fix incorrect class path for legacycinder formatter in logging_sample.conf * Error message references incorrect variable * Loosen anyjson dependency to avoid clash with ceilometer * Configuration Options clean up * Fix typo in policy documentation * Add snapshot force delete admin action * Mock out sleep in some retry tests * Use policy based rule to define context.is_admin * Sync openstack common and add policy * Fix typo in sample configuration file * Update distribute version in test requires * Revert explicit usage of tgt-adm --conf option * Fixes remove_export for IetAdm * Add missing entries in setup, fix up pip-requires * Fix NetAppCmodeISCSIDriver._get_lun_handle() method * Remove unused code: check_for_export * Return 400 if create volume snapshot force parameter is invalid * Fix cinder-volume-usage-audit * Sync with nova change I135ed85a * Remove cinder gating hack * Set the default availability zone back to nova * Add lun number (0) to model_update in HpSanDriver * Fixes to the SolarisISCSI Driver * Stop double logging to the console * Restore SIGPIPE default action for subprocesses * Replace builtin hash with MD5 to solve 32/64-bit issues * Correct IetAdm remove_iscsi_target * Add nova migrate_version check to cinder import * Bump version to 2013.1 * Clean up db.volume_create() * Fix volume deletion when device mapper is used * Update quota when deleting volume that failed to be scheduled * Sync a change to rpc from openstack-common * Add a resume delete on volume manager startup * Improve entity validation in volumes APIs * Add entity body validation helper * Should've added super().tearDown() in test_iscsi * Fixes bug 1050135 * Fix FLAGS.volumes_dir help message * Use tmpdir and avoid leaving test files behind * Sync log format changes from openstack-common * Update rpc from openstack-common * Add volume quota in volume/api.py and olume/manager.py * Fixes bug 1049446 * Revert "Don't zero out snapshot volume on snapshot_delete" * Add update to volume and snapshot controllers * Nail the pip requirement at 1.1 * Clean up .gitignore * Prevent from bug #1008866 is reverted * rename nova.pot => cinder.pot, nova.po => cinder.po * Don't zero out snapshot volume on snapshot_delete * Recent changes to SolidFire API changed iqn format * Remove unused utils.wrap_exception * Sync notifier changes from openstack-common * Clean up some codes about compute in VolumeTestCase * Remove unused db api * Typo nova => cinder * Remove vpn_ping function in cinder/utils.py * Update SolidFire driver to reflect IQN changes * Rename test_nova_rootwrap.py to test_cinder_rootwrap.py * Fixes potential bugs found by pylint * Handle missing 'provider_location' in rm_export * Specify the conf file when creating a volume * avoid the buffer cache when copying volumes * Fix Typo in LOG.error * Remove dependencies for netaddr * Filter volumes and snapshots by query string * Remove null_kernel option * Remove default_schedule_zone * Remove memcached_servers config option * Regenerate cinder.conf.sample * Sync improvements to config file generator tools * Sync misc changes from openstack-common * Sync zmq changes from openstack-common * Sync cfg changes from openstack-common * Fix xml metadata for volumes api in cinder * Fix bug where image size is incorrectly rejected * Several hacking compliance fixes * Remove Cheetah from pip-requires * Update dev docs * Quick pass at implementing the basics for cinder dev docs * Remove the N/A compute related stuff * Clean up the architecture a bit to only show cinder related * Remove various modules form TOC's that aren't applicable * Typo fix: nova => cinder * Move newly created NFS exceptions to standard location in exception.py Addresses bug 1037619 * Add admin actions extension * Removed unnecessary call to ensure_export * Add cinder- prefix to all binaries * Make size optional when creating a volume from a snap * Fix creation of iscsi targets * Spelling: Persistant=>Persistent * Implement volume quota support in Cinder * Remove unused return values and commented out code from NFS driver * Remove unused flags * Fix PEP8 issues * Fix incorrect tgt-admin call in create_iscsi_target * Add 'detaching' to volume status * Typo fix in cinder: existant => existent * Make glance image service check base exception classes * Fix PEP8 issues * Remove unused exceptions from cinder/exception.py * Add nosehtmloutput as a test dependency * Migrate volume related quota info in db migration * Use event.listen() instead of deprecated listeners kwarg * Add declare for xiv driver in fake_flags * Remove logging in volume tests * Call driver for attach/detach_volume * Fix spelling typos * Remove unused function folsom-3 -------- * blueprint zadara-volume-driver * Adding the volume notifications to cinder * add ability to clone images * Update SolidFire volume driver * Add proper support for deprecation messages * Remove utils.deprecated functions * Move volume size validation to api layer * Map internal exceptions in the nova style * Add driver for using files on a generic NFS server as virtual block devices Add NetApp-specific NFS virtual block driver * Implements bp migrate-nova-volumes-to-cinder * add get_location method for images * rbd: implement create_volume_from_snapshot * Replace deprecated client with python-glanceclient * Remove unused imports * Fix check_for_export() in non-exporting drivers * Adds new volume API extensions * Driver for IBM XIV storage * Fake requests in tests should be to v1 * Add C-mode driver for NetApp * storwize-svc: improved test coverage and fixes * Use setuptools-git * Add iscsiadm path for qauntal * Create unique volumes_dir for testing * Remove redundant 'availability_zone' config options * Straight port of the NetApp driver updates from nova-volume to cinder * Use volume driver specific execeptions * Admin users should be restricted from seeing all snapshots by default * Use openstack.common.notifier * Admin users should be restricted from seeing all volumes by default * Deprecate root_helper in favor of rootwrap_config * Send 'create volume from snapshot' to the proper host * Add persistent volumes for tgtd * Scheduler-clean-up * Include AUTHORS file in MANIFEST.in * Add authors for IBM Storwize and SVC driver * Driver for IBM Storwize and SVC storage * Remove unused instance_name_template flag * Allow XML payload for volume creation * Include volume_metadata with object on vol create * Trim volume type representation * Port nova-rootwrap changes to cinder-rootwrap * Don't do PEP8 test for openstack-common code * Cleanup unused code in servce.py * Use openstack.common.setup * utils module is still being used by cinder-volume service * Remove unused fake memcache client * Remove unused check_snapshots_enabled * Use openstack.common.log for logging * Don't create volumes if an incorrect size was given * Use rpc from openstack-common * Add missing gettextutils from openstack-common * Use save_and_reraise_exception() from common * Use openstack.common.cfg.CONF * Remove cinder.log usage from cinder.rpc * Remove cinder.context dependency from cinder.rpc * Localize rpc options to rpc code * Add version to scheduler rpc API * Sync cfg and iniparser from openstack-common * Use cfg's new global CONF object * Make use of openstack.common.jsonutils * Sync with latest version of openstack.common.cfg * Convert Cinder to use openstack-common jsonutils * Add missing ack to impl_qpid * Move queue_get_for() from db to rpc * Add base support for rpc API versioning * Make kombu support optional for running unit tests * Stop using cinder.exception from cinder.rpc * Remove unused synchronization decorator * Remove 'cinder-manage config convert' * Use cfg's new behavior of reset() clearing overrides * Remove unused enabled_apis flag * Remove some unused helper scripts * Remove unused wrap_errors decorator * Remove unused get_{id,version}_from_href() * Remove unused metadata serialization * Remove unused raise_http_conflict_for_instance_invalid_state() * Remove unused OverLimitFault * Remove old flagfile support * Misused and not used config options * Pass 'cinder' project into ConfigOpts * Sync to newer openstack.common.cfg * Convert Cinder to use openstack-common timeutils * Do not duplicate nova docs in cinder * Remove unused db api methods * Create single initial Cinder DB migration file * Updated HpSanISCSIDriver to use initialize/terminate methods folsom-2 -------- * Pruned Authors file to active contributors (from nova-volumes * Move nova-manage.rst to cinder-manage.rst * Add action extensions to support nova integration * Revert "Add action extensions to support nova integration." * Fix volume['id'] from integer to string * Add action extensions to support nova integration * Set pep8 version to 1.1 in test_requires * Fix topics so that the do not collide with nova * Fix up coverage and jenkins test running * Remove instance Foreign Key in volumes table, replace with instance_uuid * Align the tox.ini file * Removed cinder/api/openstack/compute and moved the relevant pieces under cinder/api/openstack/volume. Fixes bug 994177 * Initial fork out of Nova cinder-8.0.0/PKG-INFO0000664000567000056710000000303112701406543015220 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: cinder Version: 8.0.0 Summary: OpenStack Block Storage Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== CINDER ====== You have come across a storage service for an open cloud computing service. It has identified itself as `Cinder`. It was abstracted from the Nova project. * Wiki: http://wiki.openstack.org/Cinder * Developer docs: http://docs.openstack.org/developer/cinder Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://git.openstack.org/openstack/cinder.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/cinder Python client ------------- https://git.openstack.org/cgit/openstack/python-cinderclient Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 cinder-8.0.0/rally-jobs/0000775000567000056710000000000012701406543016204 5ustar jenkinsjenkins00000000000000cinder-8.0.0/rally-jobs/plugins/0000775000567000056710000000000012701406543017665 5ustar jenkinsjenkins00000000000000cinder-8.0.0/rally-jobs/plugins/__init__.py0000664000567000056710000000000012701406250021757 0ustar jenkinsjenkins00000000000000cinder-8.0.0/rally-jobs/plugins/README.rst0000664000567000056710000000060612701406250021351 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. cinder-8.0.0/rally-jobs/cinder-fake.yaml0000664000567000056710000000041012701406250021226 0ustar jenkinsjenkins00000000000000--- CinderVolumes.create_and_list_volume: - args: size: 1 detailed: True runner: type: "constant" times: 200 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 cinder-8.0.0/rally-jobs/extra/0000775000567000056710000000000012701406543017327 5ustar jenkinsjenkins00000000000000cinder-8.0.0/rally-jobs/extra/README.rst0000664000567000056710000000025412701406250021012 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* cinder-8.0.0/rally-jobs/README.rst0000664000567000056710000000212712701406250017670 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * cinder.yaml is a task that will be run in gates against OpenStack deployed by DevStack. * cinder-fake.yaml is a task that will be run in gates against OpenStack deployed by DevStack with fake cinder driver. * plugins - directory where you can add rally plugins. Almost everything in Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins cinder-8.0.0/rally-jobs/cinder.yaml0000664000567000056710000002257012701406250020335 0ustar jenkinsjenkins00000000000000{% set image_name = "^cirros.*uec$" %} --- Authenticate.validate_cinder: - args: repetitions: 2 runner: type: "constant" times: 10 concurrency: 5 context: users: tenants: 3 users_per_tenant: 5 sla: failure_rate: max: 0 Quotas.cinder_update_and_delete: - args: max_quota: 1024 runner: type: "constant" times: 4 concurrency: 1 context: users: tenants: 3 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 Quotas.cinder_update: - args: max_quota: 1024 runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 3 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_delete_volume: - args: size: 1 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: 1 image: name: {{image_name}} runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: min: 1 max: 3 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_list_volume: - args: size: 1 detailed: True runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: min: 1 max: 3 detailed: True runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: 1 detailed: True image: name: {{image_name}} runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.list_volumes: - args: detailed: True runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 1 volumes: size: 1 volumes_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_volume: - args: size: 1 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: min: 1 max: 3 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: 1 image: name: {{image_name}} runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_extend_volume: - args: size: 1 new_size: 2 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: min: 1 max: 2 new_size: min: 3 max: 4 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_attach_volume: - args: size: 1 image: name: {{image_name}} flavor: name: "m1.tiny" runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_snapshot_and_attach_volume: - args: volume_type: false size: min: 1 max: 1 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 servers: image: name: {{image_name}} flavor: name: "m1.tiny" servers_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: volume_type: true size: min: 1 max: 1 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 servers: image: name: {{image_name}} flavor: name: "m1.tiny" servers_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_from_volume_and_delete_volume: - args: size: 1 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 - args: size: min: 1 max: 2 runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_delete_snapshot: - args: force: false runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 volumes: size: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_list_snapshots: - args: force: False detailed: True runner: type: "constant" times: 2 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 CinderVolumes.create_and_upload_volume_to_image: - args: size: 1 runner: type: "constant" times: 1 concurrency: 1 context: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 2 service_name: cinderv2 sla: failure_rate: max: 0 cinder-8.0.0/tox.ini0000664000567000056710000000752112701406257015450 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.8 skipsdist = True envlist = py27,pep8 [testenv] # Note the hash seed is set to 0 until cinder can be tested with a # random hash seed successfully. setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 usedevelop = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} # TODO(mriedem): Move oslo.versionedobjects[fixtures] to test-requirements.txt # after I937823ffeb95725f0b55e298ebee1857d6482883 lands. deps = -r{toxinidir}/test-requirements.txt oslo.versionedobjects[fixtures] # By default ostestr will set concurrency # to ncpu, to specify something else use # the concurrency= option. # call ie: 'tox -epy27 -- --concurrency=4' commands = ostestr {posargs} whitelist_externals = bash passenv = *_proxy *_PROXY [testenv:releasenotes] # NOTE(jaegerandi): This target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:functional] setenv = OS_TEST_PATH = ./cinder/tests/functional [testenv:pep8] commands = flake8 {posargs} . # Check that .po and .pot files are valid: bash -c "find cinder -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" {toxinidir}/tools/config/check_uptodate.sh --checkopts {toxinidir}/tools/config/check_uptodate.sh {toxinidir}/tools/check_exec.py {toxinidir}/cinder [testenv:fast8] # Use same environment directory as pep8 env to save space and install time envdir = {toxworkdir}/pep8 commands = {toxinidir}/tools/fast8.sh [testenv:pylint] deps = -r{toxinidir}/requirements.txt pylint==0.26.0 commands = bash tools/lintstack.sh [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. # NOTE(jaegerandi): This target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = python setup.py testr --coverage \ --testr-args='^(?!.*test.*coverage).*$' [testenv:genconfig] sitepackages = False envdir = {toxworkdir}/pep8 commands = {toxinidir}/tools/config/generate_sample.sh from_tox [testenv:genopts] sitepackages = False envdir = {toxworkdir}/pep8 commands = {toxinidir}/tools/config/generate_sample.sh from_tox --nosamplefile [testenv:venv] # NOTE(jaegerandi): This target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx [testenv:gendriverlist] sitepackages = False envdir = {toxworkdir}/venv commands = python {toxinidir}/tools/generate_driver_list.py [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -c tools/bandit.yaml -r cinder -n 5 -ll [flake8] # Following checks are ignored on purpose. # # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability ignore = E251 exclude = .git,.venv,.tox,dist,tools,doc,*egg,build max-complexity=30 [hacking] local-check-factory = cinder.hacking.checks.factory import_exceptions = cinder.i18n [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs commands = pip-missing-reqs -d --ignore-file=cinder/tests/* cinder cinder-8.0.0/cinder/0000775000567000056710000000000012701406543015372 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/test.py0000664000567000056710000003407312701406250016725 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of CONF for use of fakes, and some black magic for inline callbacks. """ import copy import logging import os import shutil import uuid import fixtures import mock from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log.fixture import logging_error as log_fixture from oslo_log import log from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import strutils from oslo_utils import timeutils from oslotest import moxstubout import testtools from cinder.common import config # noqa Need to register global_opts from cinder.db import migration from cinder.db.sqlalchemy import api as sqla_api from cinder import i18n from cinder.objects import base as objects_base from cinder import rpc from cinder import service from cinder.tests import fixtures as cinder_fixtures from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_notifier CONF = cfg.CONF LOG = log.getLogger(__name__) _DB_CACHE = None class TestingException(Exception): pass class Database(fixtures.Fixture): def __init__(self, db_api, db_migrate, sql_connection, sqlite_db, sqlite_clean_db): self.sql_connection = sql_connection self.sqlite_db = sqlite_db self.sqlite_clean_db = sqlite_clean_db # Suppress logging for test runs migrate_logger = logging.getLogger('migrate') migrate_logger.setLevel(logging.WARNING) self.engine = db_api.get_engine() self.engine.dispose() conn = self.engine.connect() db_migrate.db_sync() if sql_connection == "sqlite://": conn = self.engine.connect() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() else: cleandb = os.path.join(CONF.state_path, sqlite_clean_db) testdb = os.path.join(CONF.state_path, sqlite_db) shutil.copyfile(testdb, cleandb) def setUp(self): super(Database, self).setUp() if self.sql_connection == "sqlite://": conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) else: shutil.copyfile( os.path.join(CONF.state_path, self.sqlite_clean_db), os.path.join(CONF.state_path, self.sqlite_db)) def _patch_mock_to_raise_for_invalid_assert_calls(): def raise_for_invalid_assert_calls(wrapped): def wrapper(_self, name): valid_asserts = [ 'assert_called_with', 'assert_called_once_with', 'assert_has_calls', 'assert_any_call'] if name.startswith('assert') and name not in valid_asserts: raise AttributeError('%s is not a valid mock assert method' % name) return wrapped(_self, name) return wrapper mock.Mock.__getattr__ = raise_for_invalid_assert_calls( mock.Mock.__getattr__) # NOTE(gibi): needs to be called only once at import time # to patch the mock lib _patch_mock_to_raise_for_invalid_assert_calls() class TestCase(testtools.TestCase): """Test case base class for all unit tests.""" def _get_joined_notifier(self, *args, **kwargs): # We create a new fake notifier but we join the notifications with # the default notifier notifier = fake_notifier.get_fake_notifier(*args, **kwargs) notifier.notifications = self.notifier.notifications return notifier def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier p = mock.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) p.start() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db='clean.sqlite') self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), 'cinder/tests/unit/policy.json'), group='oslo_policy') self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {} def _restore_obj_registry(self): objects_base.CinderObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup def _disable_osprofiler(self): """Disable osprofiler. osprofiler should not run for unit tests. """ side_effect = lambda value: value mock_decorator = mock.MagicMock(side_effect=side_effect) p = mock.patch("osprofiler.profiler.trace_cls", return_value=mock_decorator) p.start() def _common_cleanup(self): """Runs after each test method to tear down test environment.""" # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass # Kill any services for x in self._services: try: x.kill() except Exception: pass # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def override_config(self, name, override, group=None): """Cleanly override CONF variables.""" CONF.set_override(name, override, group) self.addCleanup(CONF.clear_override, name, group) def flags(self, **kw): """Override CONF variables for a test.""" for k, v in kw.items(): self.override_config(k, v) def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex kwargs.setdefault('host', host) kwargs.setdefault('binary', 'cinder-%s' % name) svc = service.Service.create(**kwargs) svc.start() self._services.append(svc) return svc def mock_object(self, obj, attr_name, new_attr=None, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ if not new_attr: new_attr = mock.Mock() patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) patcher.start() self.addCleanup(patcher.stop) return new_attr # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ def raise_assertion(msg): d1str = d1 d2str = d2 base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' 'd2: %(d2str)s' % {'msg': msg, 'd1str': d1str, 'd2str': d2str}) raise AssertionError(base_msg) d1keys = set(d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' 'Keys in d2 and not d1: %(d2only)s' % {'d1only': d1only, 'd2only': d2only}) for key in d1keys: d1value = d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue elif approx_equal and within_tolerance: continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % { 'key': key, 'd1value': d1value, 'd2value': d2value, }) cinder-8.0.0/cinder/quota.py0000664000567000056710000014531512701406257017110 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for volumes.""" from collections import deque import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils from oslo_utils import timeutils import six from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LE from cinder import quota_utils LOG = logging.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_volumes', default=10, help='Number of volumes allowed per project'), cfg.IntOpt('quota_snapshots', default=10, help='Number of volume snapshots allowed per project'), cfg.IntOpt('quota_consistencygroups', default=10, help='Number of consistencygroups allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for volumes and snapshots per project'), cfg.IntOpt('quota_backups', default=10, help='Number of volume backups allowed per project'), cfg.IntOpt('quota_backup_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for backups per project'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes'), cfg.StrOpt('quota_driver', default="cinder.quota.DbQuotaDriver", help='Default driver to use for quota checks'), cfg.BoolOpt('use_default_quota_class', default=True, help='Enables or disables use of default quota class ' 'with default quota.'), cfg.IntOpt('per_volume_size_limit', default=-1, help='Max size allowed per volume, in gigabytes'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): """Driver to perform check to enforcement of quotas. Also allows to obtain quota information. The default driver utilizes the local database. """ def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" return db.quota_get(context, project_id, resource_name) def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource_name) def get_default(self, context, resource, project_id): """Get a specific default quota for a resource.""" default_quotas = db.quota_class_get_default(context) return default_quotas.get(resource.name, resource.default) def get_defaults(self, context, resources, project_id=None): """Given a list of resources, retrieve the default quotas. Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, if it exists. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The id of the current project """ quotas = {} default_quotas = {} if CONF.use_default_quota_class: default_quotas = db.quota_class_get_default(context) for resource in resources.values(): if default_quotas: if resource.name not in default_quotas: versionutils.report_deprecated_feature(LOG, _( "Default quota for resource: %(res)s is set " "by the default quota flag: quota_%(res)s, " "it is now deprecated. Please use the " "default quota class for default " "quota.") % {'res': resource.name}) quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Given list of resources, retrieve the quotas for given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} default_quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) if defaults: default_quotas = db.quota_class_get_default(context) for resource in resources.values(): if resource.name in class_quotas: quotas[resource.name] = class_quotas[resource.name] continue if defaults: quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True): """Retrieve quotas for a project. Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use, reserved and allocated counts will also be returned. """ quotas = {} project_quotas = db.quota_get_all_by_project(context, project_id) allocated_quotas = None if usages: project_usages = db.quota_usage_get_all_by_project(context, project_id) allocated_quotas = db.quota_allocated_get_all_by_project( context, project_id) allocated_quotas.pop('project_id') # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} # TODO(mc_nair): change this to be lazy loaded default_quotas = self.get_defaults(context, resources, project_id) for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in project_quotas: continue quotas[resource.name] = dict( limit=project_quotas.get( resource.name, class_quotas.get(resource.name, default_quotas[resource.name])), ) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = project_usages.get(resource.name, {}) quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) if allocated_quotas: quotas[resource.name].update( allocated=allocated_quotas.get(resource.name, 0), ) return quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None): """A helper method which retrieves the quotas for specific resources. This specific resource is identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Filter resources if has_sync: sync_filt = lambda x: hasattr(x, 'sync') else: sync_filt = lambda x: not hasattr(x, 'sync') desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired and sync_filt(v)} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # Get the applicable quotas quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) def reserve(self, context, resources, deltas, expire=None, project_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Set up the reservation expiration if expire is None: expire = CONF.reservation_expire if isinstance(expire, six.integer_types): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id) return self._reserve(context, resources, quotas, deltas, expire, project_id) def _reserve(self, context, resources, quotas, deltas, expire, project_id): # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, deltas, expire, CONF.until_refresh, CONF.max_age, project_id=project_id) def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id db.reservation_commit(context, reservations, project_id=project_id) def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id db.reservation_rollback(context, reservations, project_id=project_id) def destroy_by_project(self, context, project_id): """Destroy all limit quotas associated with a project. Leave usage and reservation quotas intact. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class NestedDbQuotaDriver(DbQuotaDriver): def validate_nested_setup(self, ctxt, resources, project_tree, fix_allocated_quotas=False): """Ensures project_tree has quotas that make sense as nested quotas. Validates the following: * No parent project has child_projects who have more combined quota than the parent's quota limit * No child quota has a larger in-use value than it's current limit (could happen before because child default values weren't enforced) * All parent projects' "allocated" quotas match the sum of the limits of its children projects TODO(mc_nair): need a better way to "flip the switch" to use nested quotas to make this less race-ee """ self._allocated = {} project_queue = deque(project_tree.items()) borked_allocated_quotas = {} while project_queue: # Tuple of (current root node, subtree) cur_proj_id, project_subtree = project_queue.popleft() # If we're on a leaf node, no need to do validation on it, and in # order to avoid complication trying to get its children, skip it. if not project_subtree: continue cur_project_quotas = self.get_project_quotas( ctxt, resources, cur_proj_id) # Validate each resource when compared to it's child quotas for resource in cur_project_quotas.keys(): parent_quota = cur_project_quotas[resource] parent_limit = parent_quota['limit'] parent_usage = (parent_quota['in_use'] + parent_quota['reserved']) cur_parent_allocated = parent_quota.get('allocated', 0) calc_parent_allocated = self._get_cur_project_allocated( ctxt, resources[resource], {cur_proj_id: project_subtree}) if parent_limit > 0: parent_free_quota = parent_limit - parent_usage if parent_free_quota < calc_parent_allocated: msg = _("Sum of child usage '%(sum)s' is greater " "than free quota of '%(free)s' for project " "'%(proj)s' for resource '%(res)s'. Please " "lower the limit or usage for one or more of " "the following projects: '%(child_ids)s'") % { 'sum': calc_parent_allocated, 'free': parent_free_quota, 'proj': cur_proj_id, 'res': resource, 'child_ids': ', '.join(project_subtree.keys()) } raise exception.InvalidNestedQuotaSetup(reason=msg) # If "allocated" value wasn't right either err or fix DB if calc_parent_allocated != cur_parent_allocated: if fix_allocated_quotas: try: db.quota_allocated_update(ctxt, cur_proj_id, resource, calc_parent_allocated) except exception.ProjectQuotaNotFound: # If it was default quota create DB entry for it db.quota_create( ctxt, cur_proj_id, resource, parent_limit, allocated=calc_parent_allocated) else: if cur_proj_id not in borked_allocated_quotas: borked_allocated_quotas[cur_proj_id] = {} borked_allocated_quotas[cur_proj_id][resource] = { 'db_allocated_quota': cur_parent_allocated, 'expected_allocated_quota': calc_parent_allocated} project_queue.extend(project_subtree.items()) if borked_allocated_quotas: msg = _("Invalid allocated quotas defined for the following " "project quotas: %s") % borked_allocated_quotas raise exception.InvalidNestedQuotaSetup(message=msg) def _get_cur_project_allocated(self, ctxt, resource, project_tree): """Recursively calculates the allocated value of a project :param ctxt: context used to retrieve DB values :param resource: the resource to calculate allocated value for :param project_tree: the project tree used to calculate allocated e.g. {'A': {'B': {'D': None}, 'C': None} A project's "allocated" value depends on: 1) the quota limits which have been "given" to it's children, in the case those limits are not unlimited (-1) 2) the current quota being used by a child plus whatever the child has given to it's children, in the case of unlimited (-1) limits Scenario #2 requires recursively calculating allocated, and in order to efficiently calculate things we will save off any previously calculated allocated values. NOTE: this currently leaves a race condition when a project's allocated value has been calculated (with a -1 limit), but then a child project gets a volume created, thus changing the in-use value and messing up the child's allocated value. We should look into updating the allocated values as we're going along and switching to NestedQuotaDriver with flip of a switch. """ # Grab the current node cur_project_id = list(project_tree)[0] project_subtree = project_tree[cur_project_id] res_name = resource.name if cur_project_id not in self._allocated: self._allocated[cur_project_id] = {} if res_name not in self._allocated[cur_project_id]: # Calculate the allocated value for this resource since haven't yet cur_project_allocated = 0 child_proj_ids = project_subtree.keys() if project_subtree else {} res_dict = {res_name: resource} child_project_quotas = {child_id: self.get_project_quotas( ctxt, res_dict, child_id) for child_id in child_proj_ids} for child_id, child_quota in child_project_quotas.items(): child_limit = child_quota[res_name]['limit'] # Non-unlimited quota is easy, anything explicitly given to a # child project gets added into allocated value if child_limit != -1: if child_quota[res_name].get('in_use', 0) > child_limit: msg = _("Quota limit invalid for project '%(proj)s' " "for resource '%(res)s': limit of %(limit)d " "is less than in-use value of %(used)d") % { 'proj': child_id, 'res': res_name, 'limit': child_limit, 'used': child_quota[res_name]['in_use'] } raise exception.InvalidNestedQuotaSetup(reason=msg) cur_project_allocated += child_limit # For -1, take any quota being eaten up by child, as well as # what the child itself has given up to its children else: child_in_use = child_quota[res_name].get('in_use', 0) # Recursively calculate child's allocated child_alloc = self._get_cur_project_allocated( ctxt, resource, {child_id: project_subtree[child_id]}) cur_project_allocated += child_in_use + child_alloc self._allocated[cur_project_id][res_name] = cur_project_allocated return self._allocated[cur_project_id][res_name] def get_default(self, context, resource, project_id): """Get a specific default quota for a resource.""" resource = super(NestedDbQuotaDriver, self).get_default( context, resource, project_id) return 0 if quota_utils.get_parent_project_id( context, project_id) else resource.default def get_defaults(self, context, resources, project_id=None): defaults = super(NestedDbQuotaDriver, self).get_defaults( context, resources, project_id) # All defaults are 0 for child project if quota_utils.get_parent_project_id(context, project_id): for key in defaults.keys(): defaults[key] = 0 return defaults def _reserve(self, context, resources, quotas, deltas, expire, project_id): reserved = [] # As to not change the exception behavior, flag every res that would # be over instead of failing on first OverQuota resources_failed_to_update = [] failed_usages = {} for res in deltas.keys(): try: reserved += db.quota_reserve( context, resources, quotas, {res: deltas[res]}, expire, CONF.until_refresh, CONF.max_age, project_id) if quotas[res] == -1: reserved += quota_utils.update_alloc_to_next_hard_limit( context, resources, deltas, res, expire, project_id) except exception.OverQuota as e: resources_failed_to_update.append(res) failed_usages.update(e.kwargs['usages']) if resources_failed_to_update: db.reservation_rollback(context, reserved, project_id) # We change OverQuota to OverVolumeLimit in other places and expect # to find all of the OverQuota kwargs raise exception.OverQuota(overs=sorted(resources_failed_to_update), quotas=quotas, usages=failed_usages) return reserved class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None, parent_project_id=None): """Initializes a Resource. :param name: The name of the resource, i.e., "volumes". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param parent_project_id: The id of the current project's parent, if any. """ self.name = name self.flag = flag self.parent_project_id = parent_project_id def quota(self, driver, context, **kwargs): """Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. :param project_id: The project to obtain the quota value for. If not provided, it is taken from the context. If it is given as None, no project-specific quota will be searched for. :param quota_class: The quota class corresponding to the project, or for which the quota is to be looked up. If not provided, it is taken from the context. If it is given as None, no quota class-specific quota will be searched for. Note that the quota class defaults to the value in the context, which may not correspond to the project if project_id is not the same as the one in the context. """ # Get the project ID project_id = kwargs.get('project_id', context.project_id) # Ditto for the quota class quota_class = kwargs.get('quota_class', context.quota_class) # Look up the quota for the project if project_id: try: return driver.get_by_project(context, project_id, self.name) except exception.ProjectQuotaNotFound: pass # Try for the quota class if quota_class: try: return driver.get_by_class(context, quota_class, self.name) except exception.QuotaClassNotFound: pass # OK, return the default return driver.get_default(context, self, parent_project_id=self.parent_project_id) @property def default(self): """Return the default value of the quota.""" if self.parent_project_id: return 0 return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., volumes, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "volumes". :param sync: A dbapi methods name which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) if sync: self.sync = sync class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" pass class CountableResource(AbsoluteResource): """Describe a resource where counts aren't based only on the project ID.""" def __init__(self, name, count, flag=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., volumes, gigabytes, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "volumes". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count = count class VolumeTypeResource(ReservableResource): """ReservableResource for a specific volume type.""" def __init__(self, part_name, volume_type): """Initializes a VolumeTypeResource. :param part_name: The kind of resource, i.e., "volumes". :param volume_type: The volume type for this resource. """ self.volume_type_name = volume_type['name'] self.volume_type_id = volume_type['id'] name = "%s_%s" % (part_name, self.volume_type_name) super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name) class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._quota_driver_class = quota_driver_class self._driver_class = None @property def _driver(self): # Lazy load the driver so we give a chance for the config file to # be read before grabbing the config for which QuotaDriver to use if self._driver_class: return self._driver_class if not self._quota_driver_class: # Grab the current driver class from CONF self._quota_driver_class = CONF.quota_driver if isinstance(self._quota_driver_class, six.string_types): self._quota_driver_class = importutils.import_object( self._quota_driver_class) self._driver_class = self._quota_driver_class return self._driver_class def using_nested_quotas(self): """Returns true if nested quotas are being used""" return isinstance(self._driver, NestedDbQuotaDriver) def __contains__(self, resource): return resource in self.resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" return self._driver.get_by_project(context, project_id, resource_name) def get_by_project_or_default(self, context, project_id, resource_name): """Get specific quota by project or default quota if doesn't exists.""" try: val = self.get_by_project( context, project_id, resource_name).hard_limit except exception.ProjectQuotaNotFound: val = self.get_defaults(context, project_id)[resource_name] return val def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource_name) def get_default(self, context, resource, parent_project_id=None): """Get a specific default quota for a resource. :param parent_project_id: The id of the current project's parent, if any. """ return self._driver.get_default(context, resource, parent_project_id=parent_project_id) def get_defaults(self, context, project_id=None): """Retrieve the default quotas. :param context: The request context, for access checks. :param project_id: The id of the current project """ return self._driver.get_defaults(context, self.resources, project_id) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self.resources, quota_class, defaults=defaults) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use, reserved and allocated counts will also be returned. """ return self._driver.get_project_quotas(context, self.resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages) def count(self, context, resource, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. """ # Get the resource res = self.resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) def limit_check(self, context, project_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ return self._driver.limit_check(context, self.resources, values, project_id=project_id) def reserve(self, context, expire=None, project_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve(context, self.resources, deltas, expire=expire, project_id=project_id) LOG.debug("Created reservations %s", reservations) return reservations def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to commit " "reservations %s"), reservations) def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to roll back reservations " "%s"), reservations) def destroy_by_project(self, context, project_id): """Destroy all quota limits associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) def add_volume_type_opts(self, context, opts, volume_type_id): """Add volume type resource options. Adds elements to the opts hash for volume type quotas. If a resource is being reserved ('gigabytes', etc) and the volume type is set up for its own quotas, these reservations are copied into keys for 'gigabytes_', etc. :param context: The request context, for access checks. :param opts: The reservations options hash. :param volume_type_id: The volume type id for this reservation. """ if not volume_type_id: return # NOTE(jdg): set inactive to True in volume_type_get, as we # may be operating on a volume that was created with a type # that has since been deleted. volume_type = db.volume_type_get(context, volume_type_id, True) for quota in ('volumes', 'gigabytes', 'snapshots'): if quota in opts: vtype_quota = "%s_%s" % (quota, volume_type['name']) opts[vtype_quota] = opts[quota] @property def resource_names(self): return sorted(self.resources.keys()) @property def resources(self): return self._resources class VolumeTypeQuotaEngine(QuotaEngine): """Represent the set of all quotas.""" @property def resources(self): """Fetches all possible quota resources.""" result = {} # Global quotas. argses = [('volumes', '_sync_volumes', 'quota_volumes'), ('per_volume_gigabytes', None, 'per_volume_size_limit'), ('snapshots', '_sync_snapshots', 'quota_snapshots'), ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ('backups', '_sync_backups', 'quota_backups'), ('backup_gigabytes', '_sync_backup_gigabytes', 'quota_backup_gigabytes')] for args in argses: resource = ReservableResource(*args) result[resource.name] = resource # Volume type quotas. volume_types = db.volume_type_get_all(context.get_admin_context(), False) for volume_type in volume_types.values(): for part_name in ('volumes', 'gigabytes', 'snapshots'): resource = VolumeTypeResource(part_name, volume_type) result[resource.name] = resource return result def register_resource(self, resource): raise NotImplementedError(_("Cannot register resource")) def register_resources(self, resources): raise NotImplementedError(_("Cannot register resources")) def update_quota_resource(self, context, old_type_name, new_type_name): """Update resource in quota. This is to update resource in quotas, quota_classes, and quota_usages once the name of a volume type is changed. :param context: The request context, for access checks. :param old_type_name: old name of volume type. :param new_type_name: new name of volume type. """ for quota in ('volumes', 'gigabytes', 'snapshots'): old_res = "%s_%s" % (quota, old_type_name) new_res = "%s_%s" % (quota, new_type_name) db.quota_usage_update_resource(context, old_res, new_res) db.quota_class_update_resource(context, old_res, new_res) db.quota_update_resource(context, old_res, new_res) class CGQuotaEngine(QuotaEngine): """Represent the consistencygroup quotas.""" @property def resources(self): """Fetches all possible quota resources.""" result = {} # Global quotas. argses = [('consistencygroups', '_sync_consistencygroups', 'quota_consistencygroups'), ] for args in argses: resource = ReservableResource(*args) result[resource.name] = resource return result def register_resource(self, resource): raise NotImplementedError(_("Cannot register resource")) def register_resources(self, resources): raise NotImplementedError(_("Cannot register resources")) QUOTAS = VolumeTypeQuotaEngine() CGQUOTAS = CGQuotaEngine() cinder-8.0.0/cinder/backup/0000775000567000056710000000000012701406543016637 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/backup/chunkeddriver.py0000664000567000056710000007714512701406250022057 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic base class to implement metadata, compression and chunked data operations """ import abc import hashlib import json import os import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import six from cinder.backup import driver from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import objects from cinder.objects import fields from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) chunkedbackup_service_opts = [ cfg.StrOpt('backup_compression_algorithm', default='zlib', help='Compression algorithm (None to disable)'), ] CONF = cfg.CONF CONF.register_opts(chunkedbackup_service_opts) @six.add_metaclass(abc.ABCMeta) class ChunkedBackupDriver(driver.BackupDriver): """Abstract chunked backup driver. Implements common functionality for backup drivers that store volume data in multiple "chunks" in a backup repository when the size of the backed up cinder volume exceeds the size of a backup repository "chunk." Provides abstract methods to be implemented in concrete chunking drivers. """ DRIVER_VERSION = '1.0.0' DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'} def _get_compressor(self, algorithm): try: if algorithm.lower() in ('none', 'off', 'no'): return None elif algorithm.lower() in ('zlib', 'gzip'): import zlib as compressor return compressor elif algorithm.lower() in ('bz2', 'bzip2'): import bz2 as compressor return compressor except ImportError: pass err = _('unsupported compression algorithm: %s') % algorithm raise ValueError(err) def __init__(self, context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, db_driver=None): super(ChunkedBackupDriver, self).__init__(context, db_driver) self.chunk_size_bytes = chunk_size_bytes self.sha_block_size_bytes = sha_block_size_bytes self.backup_default_container = backup_default_container self.enable_progress_timer = enable_progress_timer self.backup_timer_interval = CONF.backup_timer_interval self.data_block_num = CONF.backup_object_number_per_notification self.az = CONF.storage_availability_zone self.backup_compression_algorithm = CONF.backup_compression_algorithm self.compressor = \ self._get_compressor(CONF.backup_compression_algorithm) self.support_force_delete = True # To create your own "chunked" backup driver, implement the following # abstract methods. @abc.abstractmethod def put_container(self, container): """Create the container if needed. No failure if it pre-exists.""" return @abc.abstractmethod def get_container_entries(self, container, prefix): """Get container entry names.""" return @abc.abstractmethod def get_object_writer(self, container, object_name, extra_metadata=None): """Returns a writer object which stores the chunk data in backup repository. The object returned should be a context handler that can be used in a "with" context. """ return @abc.abstractmethod def get_object_reader(self, container, object_name, extra_metadata=None): """Returns a reader object for the backed up chunk.""" return @abc.abstractmethod def delete_object(self, container, object_name): """Delete object from container.""" return @abc.abstractmethod def _generate_object_name_prefix(self, backup): return @abc.abstractmethod def update_container_name(self, backup, container): """Allow sub-classes to override container name. This method exists so that sub-classes can override the container name as it comes in to the driver in the backup object. Implementations should return None if no change to the container name is desired. """ return @abc.abstractmethod def get_extra_metadata(self, backup, volume): """Return extra metadata to use in prepare_backup. This method allows for collection of extra metadata in prepare_backup() which will be passed to get_object_reader() and get_object_writer(). Subclass extensions can use this extra information to optimize data transfers. Return a json serializable object. """ return def _create_container(self, context, backup): # Container's name will be decided by the driver (returned by method # update_container_name), if no change is required by the driver then # we'll use the one the backup object already has, but if it doesn't # have one backup_default_container will be used. new_container = self.update_container_name(backup, backup.container) if new_container: # If the driver is not really changing the name we don't want to # dirty the field in the object and save it to the DB with the same # value. if new_container != backup.container: backup.container = new_container elif backup.container is None: backup.container = self.backup_default_container LOG.debug('_create_container started, container: %(container)s,' 'backup: %(backup_id)s.', {'container': backup.container, 'backup_id': backup.id}) backup.save() self.put_container(backup.container) return backup.container def _generate_object_names(self, backup): prefix = backup['service_metadata'] object_names = self.get_container_entries(backup['container'], prefix) LOG.debug('generated object list: %s.', object_names) return object_names def _metadata_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_metadata' % object_name return filename def _sha256_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_sha256file' % object_name return filename def _write_metadata(self, backup, volume_id, container, object_list, volume_meta, extra_metadata=None): filename = self._metadata_filename(backup) LOG.debug('_write_metadata started, container name: %(container)s,' ' metadata filename: %(filename)s.', {'container': container, 'filename': filename}) metadata = {} metadata['version'] = self.DRIVER_VERSION metadata['backup_id'] = backup['id'] metadata['volume_id'] = volume_id metadata['backup_name'] = backup['display_name'] metadata['backup_description'] = backup['display_description'] metadata['created_at'] = str(backup['created_at']) metadata['objects'] = object_list metadata['parent_id'] = backup['parent_id'] metadata['volume_meta'] = volume_meta if extra_metadata: metadata['extra_metadata'] = extra_metadata metadata_json = json.dumps(metadata, sort_keys=True, indent=2) if six.PY3: metadata_json = metadata_json.encode('utf-8') with self.get_object_writer(container, filename) as writer: writer.write(metadata_json) LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json) def _write_sha256file(self, backup, volume_id, container, sha256_list): filename = self._sha256_filename(backup) LOG.debug('_write_sha256file started, container name: %(container)s,' ' sha256file filename: %(filename)s.', {'container': container, 'filename': filename}) sha256file = {} sha256file['version'] = self.DRIVER_VERSION sha256file['backup_id'] = backup['id'] sha256file['volume_id'] = volume_id sha256file['backup_name'] = backup['display_name'] sha256file['backup_description'] = backup['display_description'] sha256file['created_at'] = six.text_type(backup['created_at']) sha256file['chunk_size'] = self.sha_block_size_bytes sha256file['sha256s'] = sha256_list sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2) if six.PY3: sha256file_json = sha256file_json.encode('utf-8') with self.get_object_writer(container, filename) as writer: writer.write(sha256file_json) LOG.debug('_write_sha256file finished.') def _read_metadata(self, backup): container = backup['container'] filename = self._metadata_filename(backup) LOG.debug('_read_metadata started, container name: %(container)s, ' 'metadata filename: %(filename)s.', {'container': container, 'filename': filename}) with self.get_object_reader(container, filename) as reader: metadata_json = reader.read() if six.PY3: metadata_json = metadata_json.decode('utf-8') metadata = json.loads(metadata_json) LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json) return metadata def _read_sha256file(self, backup): container = backup['container'] filename = self._sha256_filename(backup) LOG.debug('_read_sha256file started, container name: %(container)s, ' 'sha256 filename: %(filename)s.', {'container': container, 'filename': filename}) with self.get_object_reader(container, filename) as reader: sha256file_json = reader.read() if six.PY3: sha256file_json = sha256file_json.decode('utf-8') sha256file = json.loads(sha256file_json) LOG.debug('_read_sha256file finished (%s).', sha256file) return sha256file def _prepare_backup(self, backup): """Prepare the backup process and return the backup metadata.""" volume = self.db.volume_get(self.context, backup.volume_id) if volume['size'] <= 0: err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) container = self._create_container(self.context, backup) object_prefix = self._generate_object_name_prefix(backup) backup.service_metadata = object_prefix backup.save() volume_size_bytes = volume['size'] * units.Gi availability_zone = self.az LOG.debug('starting backup of volume: %(volume_id)s,' ' volume size: %(volume_size_bytes)d, object names' ' prefix %(object_prefix)s, availability zone:' ' %(availability_zone)s', { 'volume_id': backup.volume_id, 'volume_size_bytes': volume_size_bytes, 'object_prefix': object_prefix, 'availability_zone': availability_zone, }) object_meta = {'id': 1, 'list': [], 'prefix': object_prefix, 'volume_meta': None} object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix} extra_metadata = self.get_extra_metadata(backup, volume) if extra_metadata is not None: object_meta['extra_metadata'] = extra_metadata return (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) def _backup_chunk(self, backup, container, data, data_offset, object_meta, extra_metadata): """Backup data chunk based on the object metadata and offset.""" object_prefix = object_meta['prefix'] object_list = object_meta['list'] object_id = object_meta['id'] object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = data_offset obj[object_name]['length'] = len(data) LOG.debug('Backing up chunk of data from volume.') algorithm, output_data = self._prepare_output_data(data) obj[object_name]['compression'] = algorithm LOG.debug('About to put_object') with self.get_object_writer( container, object_name, extra_metadata=extra_metadata ) as writer: writer.write(output_data) md5 = hashlib.md5(data).hexdigest() obj[object_name]['md5'] = md5 LOG.debug('backup MD5 for %(object_name)s: %(md5)s', {'object_name': object_name, 'md5': md5}) object_list.append(obj) object_id += 1 object_meta['list'] = object_list object_meta['id'] = object_id LOG.debug('Calling eventlet.sleep(0)') eventlet.sleep(0) def _prepare_output_data(self, data): if self.compressor is None: return 'none', data data_size_bytes = len(data) compressed_data = self.compressor.compress(data) comp_size_bytes = len(compressed_data) algorithm = CONF.backup_compression_algorithm.lower() if comp_size_bytes >= data_size_bytes: LOG.debug('Compression of this chunk was ineffective: ' 'original length: %(data_size_bytes)d, ' 'compressed length: %(compressed_size_bytes)d. ' 'Using original data for this chunk.', {'data_size_bytes': data_size_bytes, 'compressed_size_bytes': comp_size_bytes, }) return 'none', data LOG.debug('Compressed %(data_size_bytes)d bytes of data ' 'to %(comp_size_bytes)d bytes using %(algorithm)s.', {'data_size_bytes': data_size_bytes, 'comp_size_bytes': comp_size_bytes, 'algorithm': algorithm, }) return algorithm, compressed_data def _finalize_backup(self, backup, container, object_meta, object_sha256): """Write the backup's metadata to the backup repository.""" object_list = object_meta['list'] object_id = object_meta['id'] volume_meta = object_meta['volume_meta'] sha256_list = object_sha256['sha256s'] extra_metadata = object_meta.get('extra_metadata') self._write_sha256file(backup, backup.volume_id, container, sha256_list) self._write_metadata(backup, backup.volume_id, container, object_list, volume_meta, extra_metadata) backup.object_count = object_id backup.save() LOG.debug('backup %s finished.', backup['id']) def _backup_metadata(self, backup, object_meta): """Backup volume metadata. NOTE(dosaboy): the metadata we are backing up is obtained from a versioned api so we should not alter it in any way here. We must also be sure that the service that will perform the restore is compatible with version used. """ json_meta = self.get_metadata(backup['volume_id']) if not json_meta: LOG.debug("No volume metadata to backup.") return object_meta["volume_meta"] = json_meta def _send_progress_end(self, context, backup, object_meta): object_meta['backup_percent'] = 100 volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def _send_progress_notification(self, context, backup, object_meta, total_block_sent_num, total_volume_size): backup_percent = total_block_sent_num * 100 / total_volume_size object_meta['backup_percent'] = backup_percent volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def backup(self, backup, volume_file, backup_metadata=True): """Backup the given volume. If backup['parent_id'] is given, then an incremental backup is performed. """ if self.chunk_size_bytes % self.sha_block_size_bytes: err = _('Chunk size is not multiple of ' 'block size for creating hash.') raise exception.InvalidBackup(reason=err) # Read the shafile of the parent backup if backup['parent_id'] # is given. parent_backup_shafile = None parent_backup = None if backup.parent_id: parent_backup = objects.Backup.get_by_id(self.context, backup.parent_id) parent_backup_shafile = self._read_sha256file(parent_backup) parent_backup_shalist = parent_backup_shafile['sha256s'] if (parent_backup_shafile['chunk_size'] != self.sha_block_size_bytes): err = (_('Hash block size has changed since the last ' 'backup. New hash block size: %(new)s. Old hash ' 'block size: %(old)s. Do a full backup.') % {'old': parent_backup_shafile['chunk_size'], 'new': self.sha_block_size_bytes}) raise exception.InvalidBackup(reason=err) # If the volume size increased since the last backup, fail # the incremental backup and ask user to do a full backup. if backup.size > parent_backup.size: err = _('Volume size increased since the last ' 'backup. Do a full backup.') raise exception.InvalidBackup(reason=err) (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) = self._prepare_backup(backup) counter = 0 total_block_sent_num = 0 # There are two mechanisms to send the progress notification. # 1. The notifications are periodically sent in a certain interval. # 2. The notifications are sent after a certain number of chunks. # Both of them are working simultaneously during the volume backup, # when "chunked" backup drivers are deployed. def _notify_progress(): self._send_progress_notification(self.context, backup, object_meta, total_block_sent_num, volume_size_bytes) timer = loopingcall.FixedIntervalLoopingCall( _notify_progress) if self.enable_progress_timer: timer.start(interval=self.backup_timer_interval) sha256_list = object_sha256['sha256s'] shaindex = 0 is_backup_canceled = False while True: # First of all, we check the status of this backup. If it # has been changed to delete or has been deleted, we cancel the # backup process to do forcing delete. backup = objects.Backup.get_by_id(self.context, backup.id) if backup.status in (fields.BackupStatus.DELETING, fields.BackupStatus.DELETED): is_backup_canceled = True # To avoid the chunk left when deletion complete, need to # clean up the object of chunk again. self.delete(backup) LOG.debug('Cancel the backup process of %s.', backup.id) break data_offset = volume_file.tell() data = volume_file.read(self.chunk_size_bytes) if data == b'': break # Calculate new shas with the datablock. shalist = [] off = 0 datalen = len(data) while off < datalen: chunk_start = off chunk_end = chunk_start + self.sha_block_size_bytes if chunk_end > datalen: chunk_end = datalen chunk = data[chunk_start:chunk_end] sha = hashlib.sha256(chunk).hexdigest() shalist.append(sha) off += self.sha_block_size_bytes sha256_list.extend(shalist) # If parent_backup is not None, that means an incremental # backup will be performed. if parent_backup: # Find the extent that needs to be backed up. extent_off = -1 for idx, sha in enumerate(shalist): if sha != parent_backup_shalist[shaindex]: if extent_off == -1: # Start of new extent. extent_off = idx * self.sha_block_size_bytes else: if extent_off != -1: # We've reached the end of extent. extent_end = idx * self.sha_block_size_bytes segment = data[extent_off:extent_end] self._backup_chunk(backup, container, segment, data_offset + extent_off, object_meta, extra_metadata) extent_off = -1 shaindex += 1 # The last extent extends to the end of data buffer. if extent_off != -1: extent_end = datalen segment = data[extent_off:extent_end] self._backup_chunk(backup, container, segment, data_offset + extent_off, object_meta, extra_metadata) extent_off = -1 else: # Do a full backup. self._backup_chunk(backup, container, data, data_offset, object_meta, extra_metadata) # Notifications total_block_sent_num += self.data_block_num counter += 1 if counter == self.data_block_num: # Send the notification to Ceilometer when the chunk # number reaches the data_block_num. The backup percentage # is put in the metadata as the extra information. self._send_progress_notification(self.context, backup, object_meta, total_block_sent_num, volume_size_bytes) # Reset the counter counter = 0 # Stop the timer. timer.stop() # If backup has been cancelled we have nothing more to do # but timer.stop(). if is_backup_canceled: return # All the data have been sent, the backup_percent reaches 100. self._send_progress_end(self.context, backup, object_meta) object_sha256['sha256s'] = sha256_list if backup_metadata: try: self._backup_metadata(backup, object_meta) # Whatever goes wrong, we want to log, cleanup, and re-raise. except Exception as err: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Backup volume metadata failed: %s."), err) self.delete(backup) self._finalize_backup(backup, container, object_meta, object_sha256) def _restore_v1(self, backup, volume_id, metadata, volume_file): """Restore a v1 volume backup.""" backup_id = backup['id'] LOG.debug('v1 volume backup restore of %s started.', backup_id) extra_metadata = metadata.get('extra_metadata') container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = [] for obj in metadata_objects: metadata_object_names.extend(obj.keys()) LOG.debug('metadata_object_names = %s.', metadata_object_names) prune_list = [self._metadata_filename(backup), self._sha256_filename(backup)] object_names = [object_name for object_name in self._generate_object_names(backup) if object_name not in prune_list] if sorted(object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual object list ' 'does not match object list stored in metadata.') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: object_name, obj = list(metadata_object.items())[0] LOG.debug('restoring object. backup: %(backup_id)s, ' 'container: %(container)s, object name: ' '%(object_name)s, volume: %(volume_id)s.', { 'backup_id': backup_id, 'container': container, 'object_name': object_name, 'volume_id': volume_id, }) with self.get_object_reader( container, object_name, extra_metadata=extra_metadata) as reader: body = reader.read() compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) volume_file.seek(obj['offset']) if decompressor is not None: LOG.debug('decompressing data using %s algorithm', compression_algorithm) decompressed = decompressor.decompress(body) volume_file.write(decompressed) else: volume_file.write(body) # force flush every write to avoid long blocking write on close volume_file.flush() # Be tolerant to IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.info(_LI("volume_file does not support " "fileno() so skipping " "fsync()")) else: os.fsync(fileno) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug('v1 volume backup restore of %s finished.', backup_id) def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from backup repository.""" backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug('starting restore of backup %(object_prefix)s ' 'container: %(container)s, to volume %(volume_id)s, ' 'backup: %(backup_id)s.', { 'object_prefix': object_prefix, 'container': container, 'volume_id': volume_id, 'backup_id': backup_id, }) metadata = self._read_metadata(backup) metadata_version = metadata['version'] LOG.debug('Restoring backup version %s', metadata_version) try: restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get( metadata_version)) except TypeError: err = (_('No support to restore backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) # Build a list of backups based on parent_id. A full backup # will be the last one in the list. backup_list = [] backup_list.append(backup) current_backup = backup while current_backup.parent_id: prev_backup = objects.Backup.get_by_id(self.context, current_backup.parent_id) backup_list.append(prev_backup) current_backup = prev_backup # Do a full restore first, then layer the incremental backups # on top of it in order. index = len(backup_list) - 1 while index >= 0: backup1 = backup_list[index] index = index - 1 metadata = self._read_metadata(backup1) restore_func(backup1, volume_id, metadata, volume_file) volume_meta = metadata.get('volume_meta', None) try: if volume_meta: self.put_metadata(volume_id, volume_meta) else: LOG.debug("No volume metadata in this backup.") except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version.") LOG.error(msg) raise exception.BackupOperationError(msg) LOG.debug('restore %(backup_id)s to %(volume_id)s finished.', {'backup_id': backup_id, 'volume_id': volume_id}) def delete(self, backup): """Delete the given backup.""" container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug('delete started, backup: %(id)s, container: %(cont)s, ' 'prefix: %(pre)s.', {'id': backup['id'], 'cont': container, 'pre': object_prefix}) if container is not None and object_prefix is not None: object_names = [] try: object_names = self._generate_object_names(backup) except Exception: LOG.warning(_LW('Error while listing objects, continuing' ' with delete.')) for object_name in object_names: self.delete_object(container, object_name) LOG.debug('deleted object: %(object_name)s' ' in container: %(container)s.', { 'object_name': object_name, 'container': container }) # Deleting a backup's objects can take some time. # Yield so other threads can run eventlet.sleep(0) LOG.debug('delete %s finished.', backup['id']) cinder-8.0.0/cinder/backup/__init__.py0000664000567000056710000000166412701406250020752 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.backup import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF API = importutils.import_class(CONF.backup_api_class) cinder-8.0.0/cinder/backup/drivers/0000775000567000056710000000000012701406543020315 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/backup/drivers/tsm.py0000664000567000056710000005053412701406250021474 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Backup driver for IBM Tivoli Storage Manager (TSM). Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM) as the backend. The driver uses TSM command line dsmc utility to run the backup and restore operations. This version supports backup of block devices, e.g, FC, iSCSI, local as well as regular files. A prerequisite for using the IBM TSM backup service is configuring the Cinder host for using TSM. """ import json import os import stat from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from cinder.backup import driver from cinder import exception from cinder.i18n import _LE, _ from cinder import utils LOG = logging.getLogger(__name__) tsm_opts = [ cfg.StrOpt('backup_tsm_volume_prefix', default='backup', help='Volume prefix for the backup id when backing up to TSM'), cfg.StrOpt('backup_tsm_password', default='password', help='TSM password for the running username', secret=True), cfg.BoolOpt('backup_tsm_compression', default=True, help='Enable or Disable compression for backups'), ] CONF = cfg.CONF CONF.register_opts(tsm_opts) VALID_BACKUP_MODES = ['image', 'file'] def _get_backup_metadata(backup, operation): """Return metadata persisted with backup object.""" try: svc_dict = json.loads(backup.service_metadata) backup_path = svc_dict.get('backup_path') backup_mode = svc_dict.get('backup_mode') except TypeError: # for backwards compatibility vol_prefix = CONF.backup_tsm_volume_prefix backup_id = backup['id'] backup_path = utils.make_dev_path('%s-%s' % (vol_prefix, backup_id)) backup_mode = 'image' if backup_mode not in VALID_BACKUP_MODES: volume_id = backup['volume_id'] backup_id = backup['id'] err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. ' 'Backup object has unexpected mode. Image or file ' 'backups supported, actual mode is %(vol_mode)s.') % {'op': operation, 'bck_id': backup_id, 'vol_id': volume_id, 'vol_mode': backup_mode}) LOG.error(err) raise exception.InvalidBackup(reason=err) return backup_path, backup_mode def _image_mode(backup_mode): """True if backup is image type.""" return backup_mode == 'image' def _make_link(volume_path, backup_path, vol_id): """Create a hard link for the volume block device. The IBM TSM client performs an image backup on a block device. The name of the block device is the backup prefix plus the backup id :param volume_path: real device path name for volume :param backup_path: path name TSM will use as volume to backup :param vol_id: id of volume to backup (for reporting) :raises: InvalidBackup """ try: utils.execute('ln', volume_path, backup_path, run_as_root=True, check_exit_code=True) except processutils.ProcessExecutionError as exc: err = (_('backup: %(vol_id)s failed to create device hardlink ' 'from %(vpath)s to %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': vol_id, 'vpath': volume_path, 'bpath': backup_path, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode): """Create a consistent hardlink for the volume block device. Create a consistent hardlink using the backup id so TSM will be able to backup and restore to the same block device. :param backup_id: the backup id :param volume_path: real path of the backup/restore device :param volume_id: Volume id for backup or as restore target :param bckup_mode: TSM backup mode, either 'image' or 'file' :raises: InvalidBackup :returns: str -- hardlink path of the volume block device """ if _image_mode(bckup_mode): hardlink_path = utils.make_dev_path('%s-%s' % (CONF.backup_tsm_volume_prefix, backup_id)) else: dir, volname = os.path.split(volume_path) hardlink_path = ('%s/%s-%s' % (dir, CONF.backup_tsm_volume_prefix, backup_id)) _make_link(volume_path, hardlink_path, volume_id) return hardlink_path def _check_dsmc_output(output, check_attrs, exact_match=True): """Check dsmc command line utility output. Parse the output of the dsmc command and make sure that a given attribute is present, and that it has the proper value. TSM attribute has the format of "text : value". :param output: TSM output to parse :param check_attrs: text to identify in the output :param exact_match: if True, the check will pass only if the parsed value is equal to the value specified in check_attrs. If false, the check will pass if the parsed value is greater than or equal to the value specified in check_attrs. This is needed because for file backups, the parent directories may also be included the first a volume is backed up. :returns: bool -- indicate if requited output attribute found in output """ parsed_attrs = {} for line in output.split('\n'): # parse TSM output: look for "msg : value key, sep, val = line.partition(':') if sep is not None and key is not None and len(val.strip()) > 0: parsed_attrs[key] = val.strip() for ckey, cval in check_attrs.items(): if ckey not in parsed_attrs: return False elif exact_match and parsed_attrs[ckey] != cval: return False elif not exact_match and int(parsed_attrs[ckey]) < int(cval): return False return True def _get_volume_realpath(volume_file, volume_id): """Get the real path for the volume block device. If the volume is not a block device or a regular file issue an InvalidBackup exception. :param volume_file: file object representing the volume :param volume_id: Volume id for backup or as restore target :raises: InvalidBackup :returns: str -- real path of volume device :returns: str -- backup mode to be used """ try: # Get real path volume_path = os.path.realpath(volume_file.name) # Verify that path is a block device volume_mode = os.stat(volume_path).st_mode if stat.S_ISBLK(volume_mode): backup_mode = 'image' elif stat.S_ISREG(volume_mode): backup_mode = 'file' else: err = (_('backup: %(vol_id)s failed. ' '%(path)s is unexpected file type. Block or regular ' 'files supported, actual file mode is %(vol_mode)s.') % {'vol_id': volume_id, 'path': volume_path, 'vol_mode': volume_mode}) LOG.error(err) raise exception.InvalidBackup(reason=err) except AttributeError: err = (_('backup: %(vol_id)s failed. Cannot obtain real path ' 'to volume at %(path)s.') % {'vol_id': volume_id, 'path': volume_file}) LOG.error(err) raise exception.InvalidBackup(reason=err) except OSError: err = (_('backup: %(vol_id)s failed. ' '%(path)s is not a file.') % {'vol_id': volume_id, 'path': volume_path}) LOG.error(err) raise exception.InvalidBackup(reason=err) return volume_path, backup_mode def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id): """Remove the hardlink for the volume block device. :param hardlink_path: hardlink to the volume block device :param volume_path: real path of the backup/restore device :param volume_id: Volume id for backup or as restore target """ try: utils.execute('rm', '-f', hardlink_path, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink ' 'from %(vpath)s to %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s.'), {'vol_id': volume_id, 'vpath': volume_path, 'bpath': hardlink_path, 'out': exc.stdout, 'err': exc.stderr}) class TSMBackupDriver(driver.BackupDriver): """Provides backup, restore and delete of volumes backup for TSM.""" DRIVER_VERSION = '1.0.0' def __init__(self, context, db_driver=None): super(TSMBackupDriver, self).__init__(context, db_driver) self.tsm_password = CONF.backup_tsm_password self.volume_prefix = CONF.backup_tsm_volume_prefix def _do_backup(self, backup_path, vol_id, backup_mode): """Perform the actual backup operation. :param backup_path: volume path :param vol_id: volume id :param backup_mode: file mode of source volume; 'image' or 'file' :raises: InvalidBackup """ backup_attrs = {'Total number of objects backed up': '1'} compr_flag = 'yes' if CONF.backup_tsm_compression else 'no' backup_cmd = ['dsmc', 'backup'] if _image_mode(backup_mode): backup_cmd.append('image') backup_cmd.extend(['-quiet', '-compression=%s' % compr_flag, '-password=%s' % self.tsm_password, backup_path]) out, err = utils.execute(*backup_cmd, run_as_root=True, check_exit_code=False) success = _check_dsmc_output(out, backup_attrs, exact_match=False) if not success: err = (_('backup: %(vol_id)s failed to obtain backup ' 'success notification from server.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': vol_id, 'out': out, 'err': err}) LOG.error(err) raise exception.InvalidBackup(reason=err) def _do_restore(self, backup_path, restore_path, vol_id, backup_mode): """Perform the actual restore operation. :param backup_path: the path the backup was created from, this identifies the backup to tsm :param restore_path: volume path to restore into :param vol_id: volume id :param backup_mode: mode used to create the backup ('image' or 'file') :raises: InvalidBackup """ restore_attrs = {'Total number of objects restored': '1'} restore_cmd = ['dsmc', 'restore'] if _image_mode(backup_mode): restore_cmd.append('image') restore_cmd.append('-noprompt') # suppress prompt else: restore_cmd.append('-replace=yes') # suppress prompt restore_cmd.extend(['-quiet', '-password=%s' % self.tsm_password, backup_path]) if restore_path != backup_path: restore_cmd.append(restore_path) out, err = utils.execute(*restore_cmd, run_as_root=True, check_exit_code=False) success = _check_dsmc_output(out, restore_attrs) if not success: err = (_('restore: %(vol_id)s failed.\n' 'stdout: %(out)s\n stderr: %(err)s.') % {'vol_id': vol_id, 'out': out, 'err': err}) LOG.error(err) raise exception.InvalidBackup(reason=err) def backup(self, backup, volume_file, backup_metadata=False): """Backup the given volume to TSM. TSM performs a backup of a volume. The volume_file is used to determine the path of the block device that TSM will back-up. :param backup: backup information for volume :param volume_file: file object representing the volume :param backup_metadata: whether or not to backup volume metadata :raises InvalidBackup """ # TODO(dosaboy): this needs implementing (see backup.drivers.ceph for # an example) if backup_metadata: msg = _("Volume metadata backup requested but this driver does " "not yet support this feature.") raise exception.InvalidBackup(reason=msg) volume_path, backup_mode = _get_volume_realpath(volume_file, backup.volume_id) LOG.debug('Starting backup of volume: %(volume_id)s to TSM,' ' volume path: %(volume_path)s, mode: %(mode)s.', {'volume_id': backup.volume_id, 'volume_path': volume_path, 'mode': backup_mode}) backup_path = _create_unique_device_link(backup.id, volume_path, backup.volume_id, backup_mode) service_metadata = {'backup_mode': backup_mode, 'backup_path': backup_path} backup.service_metadata = json.dumps(service_metadata) backup.save() try: self._do_backup(backup_path, backup.volume_id, backup_mode) except processutils.ProcessExecutionError as exc: err = (_('backup: %(vol_id)s failed to run dsmc ' 'on %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': backup.volume_id, 'bpath': backup_path, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) except exception.Error as exc: err = (_('backup: %(vol_id)s failed to run dsmc ' 'due to invalid arguments ' 'on %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': backup.volume_id, 'bpath': backup_path, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) finally: _cleanup_device_hardlink(backup_path, volume_path, backup.volume_id) LOG.debug('Backup %s finished.', backup.id) def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from TSM server. :param backup: backup information for volume :param volume_id: volume id :param volume_file: file object representing the volume :raises: InvalidBackup """ # backup_path is the path that was originally backed up. backup_path, backup_mode = _get_backup_metadata(backup, 'restore') LOG.debug('Starting restore of backup from TSM ' 'to volume %(volume_id)s, ' 'backup: %(backup_id)s, ' 'mode: %(mode)s.', {'volume_id': volume_id, 'backup_id': backup.id, 'mode': backup_mode}) # volume_path is the path to restore into. This may # be different than the original volume. volume_path, unused = _get_volume_realpath(volume_file, volume_id) restore_path = _create_unique_device_link(backup.id, volume_path, volume_id, backup_mode) try: self._do_restore(backup_path, restore_path, volume_id, backup_mode) except processutils.ProcessExecutionError as exc: err = (_('restore: %(vol_id)s failed to run dsmc ' 'on %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': volume_id, 'bpath': restore_path, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) except exception.Error as exc: err = (_('restore: %(vol_id)s failed to run dsmc ' 'due to invalid arguments ' 'on %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': volume_id, 'bpath': restore_path, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) finally: _cleanup_device_hardlink(restore_path, volume_path, volume_id) LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.', {'backup_id': backup.id, 'volume_id': volume_id}) def delete(self, backup): """Delete the given backup from TSM server. :param backup: backup information for volume :raises: InvalidBackup """ delete_attrs = {'Total number of objects deleted': '1'} delete_path, backup_mode = _get_backup_metadata(backup, 'restore') LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.', {'backup': backup.id, 'mode': backup_mode}) try: out, err = utils.execute('dsmc', 'delete', 'backup', '-quiet', '-noprompt', '-objtype=%s' % backup_mode, '-password=%s' % self.tsm_password, delete_path, run_as_root=True, check_exit_code=False) except processutils.ProcessExecutionError as exc: err = (_('delete: %(vol_id)s failed to run dsmc with ' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': backup.volume_id, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) except exception.Error as exc: err = (_('delete: %(vol_id)s failed to run dsmc ' 'due to invalid arguments with ' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': backup.volume_id, 'out': exc.stdout, 'err': exc.stderr}) LOG.error(err) raise exception.InvalidBackup(reason=err) success = _check_dsmc_output(out, delete_attrs) if not success: # log error if tsm cannot delete the backup object # but do not raise exception so that cinder backup # object can be removed. LOG.error(_LE('delete: %(vol_id)s failed with ' 'stdout: %(out)s\n stderr: %(err)s'), {'vol_id': backup.volume_id, 'out': out, 'err': err}) LOG.debug('Delete %s finished.', backup['id']) def get_backup_driver(context): return TSMBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/posix.py0000664000567000056710000001236312701406250022031 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Tom Barron # Copyright (C) 2015 Kevin Fox # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses a posix filesystem as the backend.""" import os import os.path import stat from oslo_config import cfg from oslo_log import log as logging from cinder.backup import chunkeddriver from cinder import exception LOG = logging.getLogger(__name__) SHA_SIZE = 32768 # Multiple of SHA_SIZE, close to a characteristic OS max file system size. BACKUP_FILE_SIZE = 61035 * 32768 posixbackup_service_opts = [ cfg.IntOpt('backup_file_size', default=BACKUP_FILE_SIZE, help='The maximum size in bytes of the files used to hold ' 'backups. If the volume being backed up exceeds this ' 'size, then it will be backed up into multiple files.' 'backup_file_size must be a multiple of ' 'backup_sha_block_size_bytes.'), cfg.IntOpt('backup_sha_block_size_bytes', default=SHA_SIZE, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_file_size has ' 'to be multiple of backup_sha_block_size_bytes.'), cfg.BoolOpt('backup_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the backend storage. The ' 'default value is True to enable the timer.'), cfg.StrOpt('backup_posix_path', default='$state_path/backup', help='Path specifying where to store backups.'), cfg.StrOpt('backup_container', help='Custom directory to use for backups.'), ] CONF = cfg.CONF CONF.register_opts(posixbackup_service_opts) class PosixBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete using a Posix file system.""" def __init__(self, context, db_driver=None, backup_path=None): chunk_size_bytes = CONF.backup_file_size sha_block_size_bytes = CONF.backup_sha_block_size_bytes backup_default_container = CONF.backup_container enable_progress_timer = CONF.backup_enable_progress_timer super(PosixBackupDriver, self).__init__(context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, db_driver) self.backup_path = backup_path if not backup_path: self.backup_path = CONF.backup_posix_path if not self.backup_path: raise exception.ConfigNotFound(path='backup_path') LOG.debug("Using backup repository: %s", self.backup_path) def update_container_name(self, backup, container): if container is not None: return container id = backup['id'] return os.path.join(id[0:2], id[2:4], id) def put_container(self, container): path = os.path.join(self.backup_path, container) if not os.path.exists(path): os.makedirs(path) permissions = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP) os.chmod(path, permissions) def get_container_entries(self, container, prefix): path = os.path.join(self.backup_path, container) return [i for i in os.listdir(path) if i.startswith(prefix)] def get_object_writer(self, container, object_name, extra_metadata=None): path = os.path.join(self.backup_path, container, object_name) f = open(path, 'wb') permissions = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP) os.chmod(path, permissions) return f def get_object_reader(self, container, object_name, extra_metadata=None): path = os.path.join(self.backup_path, container, object_name) return open(path, 'rb') def delete_object(self, container, object_name): # TODO(tbarron): clean up the container path if it is empty path = os.path.join(self.backup_path, container, object_name) os.remove(path) def _generate_object_name_prefix(self, backup): return 'backup' def get_extra_metadata(self, backup, volume): return None def get_backup_driver(context): return PosixBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/__init__.py0000664000567000056710000000000012701406250022407 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/backup/drivers/ceph.py0000664000567000056710000014014412701406250021605 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ceph Backup Service Implementation. This driver supports backing up volumes of any type to a Ceph object store. It is also capable of detecting whether the volume to be backed up is a Ceph RBD volume and, if so, attempts to perform incremental/differential backups. Support is also included for the following in the case of a source volume being a Ceph RBD volume: * backing up within the same Ceph pool (not recommended) * backing up between different Ceph pools * backing up between different Ceph clusters At the time of writing, differential backup support in Ceph/librbd was quite new so this driver accounts for this by first attempting differential backup and falling back to full backup/copy if the former fails. It is recommended that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results. If incremental backups are used, multiple backups of the same volume are stored as snapshots so that minimal space is consumed in the object store and restoring the volume takes a far reduced amount of time compared to a full copy. Note that Cinder supports restoring to a new volume or the original volume the backup was taken from. For the latter case, a full copy is enforced since this was deemed the safest action to take. It is therefore recommended to always restore to a new volume (default). """ import fcntl import os import re import subprocess import time import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from six.moves import range from cinder.backup import driver from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils import cinder.volume.drivers.rbd as rbd_driver try: import rados import rbd except ImportError: rados = None rbd = None LOG = logging.getLogger(__name__) service_opts = [ cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf', help='Ceph configuration file to use.'), cfg.StrOpt('backup_ceph_user', default='cinder', help='The Ceph user to connect with. Default here is to use ' 'the same user as for Cinder volumes. If not using cephx ' 'this should be set to None.'), cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128), help='The chunk size, in bytes, that a backup is broken into ' 'before transfer to the Ceph object store.'), cfg.StrOpt('backup_ceph_pool', default='backups', help='The Ceph pool where volume backups are stored.'), cfg.IntOpt('backup_ceph_stripe_unit', default=0, help='RBD stripe unit to use when creating a backup image.'), cfg.IntOpt('backup_ceph_stripe_count', default=0, help='RBD stripe count to use when creating a backup image.'), cfg.BoolOpt('restore_discard_excess_bytes', default=True, help='If True, always discard excess bytes when restoring ' 'volumes i.e. pad with zeroes.') ] CONF = cfg.CONF CONF.register_opts(service_opts) class VolumeMetadataBackup(object): def __init__(self, client, backup_id): self._client = client self._backup_id = backup_id @property def name(self): return utils.convert_str("backup.%s.meta" % self._backup_id) @property def exists(self): meta_obj = rados.Object(self._client.ioctx, self.name) return self._exists(meta_obj) def _exists(self, obj): try: obj.stat() except rados.ObjectNotFound: return False else: return True def set(self, json_meta): """Write JSON metadata to a new object. This should only be called once per backup. Raises VolumeMetadataBackupExists if the object already exists. """ meta_obj = rados.Object(self._client.ioctx, self.name) if self._exists(meta_obj): msg = _("Metadata backup object '%s' already exists") % self.name raise exception.VolumeMetadataBackupExists(msg) meta_obj.write(json_meta) def get(self): """Get metadata backup object. Returns None if the object does not exist. """ meta_obj = rados.Object(self._client.ioctx, self.name) if not self._exists(meta_obj): LOG.debug("Metadata backup object %s does not exist", self.name) return None return meta_obj.read() def remove_if_exists(self): meta_obj = rados.Object(self._client.ioctx, self.name) try: meta_obj.remove() except rados.ObjectNotFound: LOG.debug("Metadata backup object '%s' not found - ignoring", self.name) class CephBackupDriver(driver.BackupDriver): """Backup Cinder volumes to Ceph Object Store. This class enables backing up Cinder volumes to a Ceph object store. Backups may be stored in their own pool or even cluster. Store location is defined by the Ceph conf file and service config options supplied. If the source volume is itself an RBD volume, the backup will be performed using incremental differential backups which *should* give a performance gain. """ def __init__(self, context, db_driver=None, execute=None): super(CephBackupDriver, self).__init__(context, db_driver) self.rbd = rbd self.rados = rados self.chunk_size = CONF.backup_ceph_chunk_size self._execute = execute or utils.execute if self._supports_stripingv2: self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit self.rbd_stripe_count = CONF.backup_ceph_stripe_count else: LOG.info(_LI("RBD striping not supported - ignoring configuration " "settings for rbd striping")) self.rbd_stripe_count = 0 self.rbd_stripe_unit = 0 self._ceph_backup_user = utils.convert_str(CONF.backup_ceph_user) self._ceph_backup_pool = utils.convert_str(CONF.backup_ceph_pool) self._ceph_backup_conf = utils.convert_str(CONF.backup_ceph_conf) def _validate_string_args(self, *args): """Ensure all args are non-None and non-empty.""" return all(args) def _ceph_args(self, user, conf=None, pool=None): """Create default ceph args for executing rbd commands. If no --conf is provided, rbd will look in the default locations e.g. /etc/ceph/ceph.conf """ # Make sure user arg is valid since rbd command may not fail if # invalid/no user provided, resulting in unexpected behaviour. if not self._validate_string_args(user): raise exception.BackupInvalidCephArgs(_("invalid user '%s'") % user) args = ['--id', user] if conf: args.extend(['--conf', conf]) if pool: args.extend(['--pool', pool]) return args @property def _supports_layering(self): """Determine if copy-on-write is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') @property def _supports_stripingv2(self): """Determine if striping is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2') def _get_rbd_support(self): """Determine RBD features supported by our version of librbd.""" old_format = True features = 0 if self._supports_layering: old_format = False features |= self.rbd.RBD_FEATURE_LAYERING if self._supports_stripingv2: old_format = False features |= self.rbd.RBD_FEATURE_STRIPINGV2 return (old_format, features) def _connect_to_rados(self, pool=None): """Establish connection to the backup Ceph cluster.""" client = self.rados.Rados(rados_id=self._ceph_backup_user, conffile=self._ceph_backup_conf) try: client.connect() pool_to_open = utils.convert_str(pool or self._ceph_backup_pool) ioctx = client.open_ioctx(pool_to_open) return client, ioctx except self.rados.Error: # shutdown cannot raise an exception client.shutdown() raise def _disconnect_from_rados(self, client, ioctx): """Terminate connection with the backup Ceph cluster.""" # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def _get_backup_base_name(self, volume_id, backup_id=None, diff_format=False): """Return name of base image used for backup. Incremental backups use a new base name so we support old and new style format. """ # Ensure no unicode if diff_format: return utils.convert_str("volume-%s.backup.base" % volume_id) else: if backup_id is None: msg = _("Backup id required") raise exception.InvalidParameterValue(msg) return utils.convert_str("volume-%s.backup.%s" % (volume_id, backup_id)) def _discard_bytes(self, volume, offset, length): """Trim length bytes from offset. If the volume is an rbd do a discard() otherwise assume it is a file and pad with zeroes. """ if length: LOG.debug("Discarding %(length)s bytes from offset %(offset)s", {'length': length, 'offset': offset}) if self._file_is_rbd(volume): volume.rbd_image.discard(offset, length) else: zeroes = '\0' * length chunks = int(length / self.chunk_size) for chunk in range(0, chunks): LOG.debug("Writing zeroes chunk %d", chunk) volume.write(zeroes) volume.flush() # yield to any other pending backups eventlet.sleep(0) rem = int(length % self.chunk_size) if rem: zeroes = '\0' * rem volume.write(zeroes) volume.flush() def _transfer_data(self, src, src_name, dest, dest_name, length): """Transfer data between files (Python IO objects).""" LOG.debug("Transferring data between '%(src)s' and '%(dest)s'", {'src': src_name, 'dest': dest_name}) chunks = int(length / self.chunk_size) LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred", {'chunks': chunks, 'bytes': self.chunk_size}) for chunk in range(0, chunks): before = time.time() data = src.read(self.chunk_size) # If we have reach end of source, discard any extraneous bytes from # destination volume if trim is enabled and stop writing. if data == b'': if CONF.restore_discard_excess_bytes: self._discard_bytes(dest, dest.tell(), length - dest.tell()) return dest.write(data) dest.flush() delta = (time.time() - before) rate = (self.chunk_size / delta) / 1024 LOG.debug("Transferred chunk %(chunk)s of %(chunks)s " "(%(rate)dK/s)", {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate}) # yield to any other pending backups eventlet.sleep(0) rem = int(length % self.chunk_size) if rem: LOG.debug("Transferring remaining %s bytes", rem) data = src.read(rem) if data == b'': if CONF.restore_discard_excess_bytes: self._discard_bytes(dest, dest.tell(), rem) else: dest.write(data) dest.flush() # yield to any other pending backups eventlet.sleep(0) def _create_base_image(self, name, size, rados_client): """Create a base backup image. This will be the base image used for storing differential exports. """ LOG.debug("Creating base image '%s'", name) old_format, features = self._get_rbd_support() self.rbd.RBD().create(ioctx=rados_client.ioctx, name=name, size=size, old_format=old_format, features=features, stripe_unit=self.rbd_stripe_unit, stripe_count=self.rbd_stripe_count) def _delete_backup_snapshot(self, rados_client, base_name, backup_id): """Delete snapshot associated with this backup if one exists. A backup should have at most ONE associated snapshot. This is required before attempting to delete the base image. The snapshot on the original volume can be left as it will be purged when the volume is deleted. Returns tuple(deleted_snap_name, num_of_remaining_snaps). """ remaining_snaps = 0 base_rbd = self.rbd.Image(rados_client.ioctx, base_name) try: snap_name = self._get_backup_snap_name(base_rbd, base_name, backup_id) if snap_name: LOG.debug("Deleting backup snapshot='%s'", snap_name) base_rbd.remove_snap(snap_name) else: LOG.debug("No backup snapshot to delete") # Now check whether any snapshots remain on the base image backup_snaps = self.get_backup_snaps(base_rbd) if backup_snaps: remaining_snaps = len(backup_snaps) finally: base_rbd.close() return snap_name, remaining_snaps def _try_delete_base_image(self, backup_id, volume_id, base_name=None): """Try to delete backup RBD image. If the rbd image is a base image for incremental backups, it may have snapshots. Delete the snapshot associated with backup_id and if the image has no more snapshots, delete it. Otherwise return. If no base name is provided try normal (full) format then diff format image name. If a base name is provided but does not exist, ImageNotFound will be raised. If the image is busy, a number of retries will be performed if ImageBusy is received, after which the exception will be propagated to the caller. """ retries = 3 delay = 5 try_diff_format = False if base_name is None: try_diff_format = True base_name = self._get_backup_base_name(volume_id, backup_id) LOG.debug("Trying diff format basename='%(basename)s' for " "backup base image of volume %(volume)s.", {'basename': base_name, 'volume': volume_id}) with rbd_driver.RADOSClient(self) as client: rbd_exists, base_name = \ self._rbd_image_exists(base_name, volume_id, client, try_diff_format=try_diff_format) if not rbd_exists: raise self.rbd.ImageNotFound(_("image %s not found") % base_name) while retries >= 0: # First delete associated snapshot from base image (if exists) snap, rem = self._delete_backup_snapshot(client, base_name, backup_id) if rem: LOG.info( _LI("Backup base image of volume %(volume)s still " "has %(snapshots)s snapshots so skipping base " "image delete."), {'snapshots': rem, 'volume': volume_id}) return LOG.info(_LI("Deleting backup base image='%(basename)s' of " "volume %(volume)s."), {'basename': base_name, 'volume': volume_id}) # Delete base if no more snapshots try: self.rbd.RBD().remove(client.ioctx, base_name) except self.rbd.ImageBusy: # Allow a retry if the image is busy if retries > 0: LOG.info(_LI("Backup image of volume %(volume)s is " "busy, retrying %(retries)s more time(s) " "in %(delay)ss."), {'retries': retries, 'delay': delay, 'volume': volume_id}) eventlet.sleep(delay) else: LOG.error(_LE("Max retries reached deleting backup " "%(basename)s image of volume " "%(volume)s."), {'volume': volume_id, 'basename': base_name}) raise else: LOG.debug("Base backup image='%(basename)s' of volume " "%(volume)s deleted.", {'basename': base_name, 'volume': volume_id}) retries = 0 finally: retries -= 1 # Since we have deleted the base image we can delete the source # volume backup snapshot. src_name = utils.convert_str(volume_id) if src_name in self.rbd.RBD().list(client.ioctx): LOG.debug("Deleting source volume snapshot '%(snapshot)s' " "for backup %(basename)s.", {'snapshot': snap, 'basename': base_name}) src_rbd = self.rbd.Image(client.ioctx, src_name) try: src_rbd.remove_snap(snap) finally: src_rbd.close() def _piped_execute(self, cmd1, cmd2): """Pipe output of cmd1 into cmd2.""" LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) LOG.debug("cmd2='%s'", ' '.join(cmd2)) try: p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_LE("Pipe1 failed - %s "), e) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work # around the case where evenlet.green.subprocess is used which seems to # use a non-blocking pipe. flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) try: p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_LE("Pipe2 failed - %s "), e) raise p1.stdout.close() stdout, stderr = p2.communicate() return p2.returncode, stderr def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool, src_user, src_conf, dest_user, dest_conf, src_snap=None, from_snap=None): """Copy only extents changed between two points. If no snapshot is provided, the diff extents will be all those changed since the rbd volume/base was created, otherwise it will be those changed since the snapshot was created. """ LOG.debug("Performing differential transfer from '%(src)s' to " "'%(dest)s'", {'src': src_name, 'dest': dest_name}) # NOTE(dosaboy): Need to be tolerant of clusters/clients that do # not support these operations since at the time of writing they # were very new. src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool) dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool) cmd1 = ['rbd', 'export-diff'] + src_ceph_args if from_snap is not None: cmd1.extend(['--from-snap', from_snap]) if src_snap: path = utils.convert_str("%s/%s@%s" % (src_pool, src_name, src_snap)) else: path = utils.convert_str("%s/%s" % (src_pool, src_name)) cmd1.extend([path, '-']) cmd2 = ['rbd', 'import-diff'] + dest_ceph_args rbd_path = utils.convert_str("%s/%s" % (dest_pool, dest_name)) cmd2.extend(['-', rbd_path]) ret, stderr = self._piped_execute(cmd1, cmd2) if ret: msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % {'ret': ret, 'stderr': stderr}) LOG.info(msg) raise exception.BackupRBDOperationFailed(msg) def _rbd_image_exists(self, name, volume_id, client, try_diff_format=False): """Return tuple (exists, name).""" rbds = self.rbd.RBD().list(client.ioctx) if name not in rbds: LOG.debug("Image '%s' not found - trying diff format name", name) if try_diff_format: name = self._get_backup_base_name(volume_id, diff_format=True) if name not in rbds: LOG.debug("Diff format image '%s' not found", name) return False, name else: return False, name return True, name def _snap_exists(self, base_name, snap_name, client): """Return True if snapshot exists in base image.""" base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) try: snaps = base_rbd.list_snaps() finally: base_rbd.close() if snaps is None: return False for snap in snaps: if snap['name'] == snap_name: return True return False def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name, length): """Create an incremental backup from an RBD image.""" rbd_user = volume_file.rbd_user rbd_pool = volume_file.rbd_pool rbd_conf = volume_file.rbd_conf source_rbd_image = volume_file.rbd_image # Identify our --from-snap point (if one exists) from_snap = self._get_most_recent_snap(source_rbd_image) LOG.debug("Using --from-snap '%(snap)s' for incremental backup of " "volume %(volume)s.", {'snap': from_snap, 'volume': volume_id}) base_name = self._get_backup_base_name(volume_id, diff_format=True) image_created = False with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: # If from_snap does not exist at the destination (and the # destination exists), this implies a previous backup has failed. # In this case we will force a full backup. # # TODO(dosaboy): find a way to repair the broken backup # if base_name not in self.rbd.RBD().list(ioctx=client.ioctx): # If a from_snap is defined but the base does not exist, we # ignore it since it is stale and waiting to be cleaned up. if from_snap: LOG.debug("Source snapshot '%(snapshot)s' of volume " "%(volume)s is stale so deleting.", {'snapshot': from_snap, 'volume': volume_id}) source_rbd_image.remove_snap(from_snap) from_snap = None # Create new base image self._create_base_image(base_name, length, client) image_created = True else: # If a from_snap is defined but does not exist in the back base # then we cannot proceed (see above) if not self._snap_exists(base_name, from_snap, client): errmsg = (_("Snapshot='%(snap)s' does not exist in base " "image='%(base)s' - aborting incremental " "backup") % {'snap': from_snap, 'base': base_name}) LOG.info(errmsg) # Raise this exception so that caller can try another # approach raise exception.BackupRBDOperationFailed(errmsg) # Snapshot source volume so that we have a new point-in-time new_snap = self._get_new_snap_name(backup_id) LOG.debug("Creating backup snapshot='%s'", new_snap) source_rbd_image.create_snap(new_snap) # Attempt differential backup. If this fails, perhaps because librbd # or Ceph cluster version does not support it, do a full backup # instead. # # TODO(dosaboy): find a way to determine if the operation is supported # rather than brute force approach. try: before = time.time() self._rbd_diff_transfer(volume_name, rbd_pool, base_name, self._ceph_backup_pool, src_user=rbd_user, src_conf=rbd_conf, dest_user=self._ceph_backup_user, dest_conf=self._ceph_backup_conf, src_snap=new_snap, from_snap=from_snap) LOG.debug("Differential backup transfer completed in %.4fs", (time.time() - before)) # We don't need the previous snapshot (if there was one) anymore so # delete it. if from_snap: source_rbd_image.remove_snap(from_snap) except exception.BackupRBDOperationFailed: with excutils.save_and_reraise_exception(): LOG.debug("Differential backup transfer failed") # Clean up if image was created as part of this operation if image_created: self._try_delete_base_image(backup_id, volume_id, base_name=base_name) # Delete snapshot LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of " "source volume='%(volume)s'.", {'snapshot': new_snap, 'volume': volume_id}) source_rbd_image.remove_snap(new_snap) def _file_is_rbd(self, volume_file): """Returns True if the volume_file is actually an RBD image.""" return hasattr(volume_file, 'rbd_image') def _full_backup(self, backup_id, volume_id, src_volume, src_name, length): """Perform a full backup of src volume. First creates a base backup image in our backup location then performs an chunked copy of all data from source volume to a new backup rbd image. """ backup_name = self._get_backup_base_name(volume_id, backup_id) with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: # First create base backup image old_format, features = self._get_rbd_support() LOG.debug("Creating backup base image='%(name)s' for volume " "%(volume)s.", {'name': backup_name, 'volume': volume_id}) self.rbd.RBD().create(ioctx=client.ioctx, name=backup_name, size=length, old_format=old_format, features=features, stripe_unit=self.rbd_stripe_unit, stripe_count=self.rbd_stripe_count) LOG.debug("Copying data from volume %s.", volume_id) dest_rbd = self.rbd.Image(client.ioctx, backup_name) try: rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd, self._ceph_backup_pool, self._ceph_backup_user, self._ceph_backup_conf) rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta) self._transfer_data(src_volume, src_name, rbd_fd, backup_name, length) finally: dest_rbd.close() @staticmethod def backup_snapshot_name_pattern(): """Returns the pattern used to match backup snapshots. It is essential that snapshots created for purposes other than backups do not have this name format. """ return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$" @classmethod def get_backup_snaps(cls, rbd_image, sort=False): """Get all backup snapshots for the given rbd image. NOTE: this call is made public since these snapshots must be deleted before the base volume can be deleted. """ snaps = rbd_image.list_snaps() backup_snaps = [] for snap in snaps: search_key = cls.backup_snapshot_name_pattern() result = re.search(search_key, snap['name']) if result: backup_snaps.append({'name': result.group(0), 'backup_id': result.group(1), 'timestamp': result.group(2)}) if sort: # Sort into ascending order of timestamp backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True) return backup_snaps def _get_new_snap_name(self, backup_id): return utils.convert_str("backup.%s.snap.%s" % (backup_id, time.time())) def _get_backup_snap_name(self, rbd_image, name, backup_id): """Return the name of the snapshot associated with backup_id. The rbd image provided must be the base image used for an incremental backup. A backup is only allowed ONE associated snapshot. If more are found, exception.BackupOperationError is raised. """ snaps = self.get_backup_snaps(rbd_image) LOG.debug("Looking for snapshot of backup base '%s'", name) if not snaps: LOG.debug("Backup base '%s' has no snapshots", name) return None snaps = [snap['name'] for snap in snaps if snap['backup_id'] == backup_id] if not snaps: LOG.debug("Backup '%s' has no snapshot", backup_id) return None if len(snaps) > 1: msg = (_("Backup should only have one snapshot but instead has %s") % len(snaps)) LOG.error(msg) raise exception.BackupOperationError(msg) LOG.debug("Found snapshot '%s'", snaps[0]) return snaps[0] def _get_most_recent_snap(self, rbd_image): """Get the most recent backup snapshot of the provided image. Returns name of most recent backup snapshot or None if there are no backup snapshots. """ backup_snaps = self.get_backup_snaps(rbd_image, sort=True) if not backup_snaps: return None return backup_snaps[0]['name'] def _get_volume_size_gb(self, volume): """Return the size in gigabytes of the given volume. Raises exception.InvalidParameterValue if volume size is 0. """ if int(volume['size']) == 0: errmsg = _("Need non-zero volume size") raise exception.InvalidParameterValue(errmsg) return int(volume['size']) * units.Gi def _backup_metadata(self, backup): """Backup volume metadata. NOTE(dosaboy): the metadata we are backing up is obtained from a versioned api so we should not alter it in any way here. We must also be sure that the service that will perform the restore is compatible with version used. """ json_meta = self.get_metadata(backup['volume_id']) if not json_meta: LOG.debug("No metadata to backup for volume %s.", backup['volume_id']) return LOG.debug("Backing up metadata for volume %s.", backup['volume_id']) try: with rbd_driver.RADOSClient(self) as client: vol_meta_backup = VolumeMetadataBackup(client, backup['id']) vol_meta_backup.set(json_meta) except exception.VolumeMetadataBackupExists as e: msg = (_("Failed to backup volume metadata - %s") % e) raise exception.BackupOperationError(msg) def backup(self, backup, volume_file, backup_metadata=True): """Backup volume and metadata (if available) to Ceph object store. If the source volume is an RBD we will attempt to do an incremental/differential backup, otherwise a full copy is performed. If this fails we will attempt to fall back to full copy. """ backup_id = backup['id'] volume = self.db.volume_get(self.context, backup['volume_id']) volume_id = volume['id'] volume_name = volume['name'] LOG.debug("Starting backup of volume='%s'.", volume_id) # Ensure we are at the beginning of the volume volume_file.seek(0) length = self._get_volume_size_gb(volume) do_full_backup = False if self._file_is_rbd(volume_file): # If volume an RBD, attempt incremental backup. try: self._backup_rbd(backup_id, volume_id, volume_file, volume_name, length) except exception.BackupRBDOperationFailed: LOG.debug("Forcing full backup of volume %s.", volume_id) do_full_backup = True else: do_full_backup = True if do_full_backup: self._full_backup(backup_id, volume_id, volume_file, volume_name, length) backup.container = self._ceph_backup_pool backup.save() if backup_metadata: try: self._backup_metadata(backup) except exception.BackupOperationError: with excutils.save_and_reraise_exception(): # Cleanup. self.delete(backup) LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.", {'backup_id': backup_id, 'volume_id': volume_id}) def _full_restore(self, backup_id, volume_id, dest_file, dest_name, length, src_snap=None): """Restore volume using full copy i.e. all extents. This will result in all extents being copied from source to destination. """ with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: # If a source snapshot is provided we assume the base is diff # format. if src_snap: diff_format = True else: diff_format = False backup_name = self._get_backup_base_name(volume_id, backup_id=backup_id, diff_format=diff_format) # Retrieve backup volume src_rbd = self.rbd.Image(client.ioctx, backup_name, snapshot=src_snap, read_only=True) try: rbd_meta = rbd_driver.RBDImageMetadata(src_rbd, self._ceph_backup_pool, self._ceph_backup_user, self._ceph_backup_conf) rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta) self._transfer_data(rbd_fd, backup_name, dest_file, dest_name, length) finally: src_rbd.close() def _check_restore_vol_size(self, backup_base, restore_vol, restore_length, src_pool): """Ensure that the restore volume is the correct size. If the restore volume was bigger than the backup, the diff restore will shrink it to the size of the original backup so we need to post-process and resize it back to its expected size. """ with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: adjust_size = 0 base_image = self.rbd.Image(client.ioctx, utils.convert_str(backup_base), read_only=True) try: if restore_length != base_image.size(): adjust_size = restore_length finally: base_image.close() if adjust_size: with rbd_driver.RADOSClient(self, src_pool) as client: restore_vol_encode = utils.convert_str(restore_vol) dest_image = self.rbd.Image(client.ioctx, restore_vol_encode) try: LOG.debug("Adjusting restore vol size") dest_image.resize(adjust_size) finally: dest_image.close() def _diff_restore_rbd(self, base_name, restore_file, restore_name, restore_point, restore_length): """Attempt restore rbd volume from backup using diff transfer.""" rbd_user = restore_file.rbd_user rbd_pool = restore_file.rbd_pool rbd_conf = restore_file.rbd_conf LOG.debug("Attempting incremental restore from base='%(base)s' " "snap='%(snap)s'", {'base': base_name, 'snap': restore_point}) before = time.time() try: self._rbd_diff_transfer(base_name, self._ceph_backup_pool, restore_name, rbd_pool, src_user=self._ceph_backup_user, src_conf=self._ceph_backup_conf, dest_user=rbd_user, dest_conf=rbd_conf, src_snap=restore_point) except exception.BackupRBDOperationFailed: LOG.exception(_LE("Differential restore failed, trying full " "restore")) raise # If the volume we are restoring to is larger than the backup volume, # we will need to resize it after the diff import since import-diff # appears to shrink the target rbd volume to the size of the original # backup volume. self._check_restore_vol_size(base_name, restore_name, restore_length, rbd_pool) LOG.debug("Restore transfer completed in %.4fs", (time.time() - before)) def _get_restore_point(self, base_name, backup_id): """Get restore point snapshot name for incremental backup. If the backup was not incremental (determined by the fact that the base has no snapshots/restore points), None is returned. Otherwise, the restore point associated with backup_id is returned. """ with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) try: restore_point = self._get_backup_snap_name(base_rbd, base_name, backup_id) finally: base_rbd.close() return restore_point def _rbd_has_extents(self, rbd_volume): """Check whether the given rbd volume has extents. Return True if has extents, otherwise False. """ extents = [] def iter_cb(offset, length, exists): if exists: extents.append(length) rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb) if extents: LOG.debug("RBD has %s extents", sum(extents)) return True return False def _diff_restore_allowed(self, base_name, backup, volume, volume_file, rados_client): """Determine if differential restore is possible and restore point. Determine whether a differential restore is possible/allowed, and find out the restore point if backup base is diff-format. In order for a differential restore to be performed we need: * destination volume must be RBD * destination volume must have zero extents * backup base image must exist * backup must have a restore point * target volume is different from source volume of backup Returns True if differential restore is allowed, False otherwise. Return the restore point if back base is diff-format. """ # NOTE(dosaboy): base_name here must be diff format. rbd_exists, base_name = self._rbd_image_exists(base_name, backup['volume_id'], rados_client) if not rbd_exists: return False, None # Get the restore point. If no restore point is found, we assume # that the backup was not performed using diff/incremental methods # so we enforce full copy. restore_point = self._get_restore_point(base_name, backup['id']) if restore_point: if self._file_is_rbd(volume_file): # If the volume we are restoring to is the volume the backup # was made from, force a full restore since a diff will not # work in this case. if volume['id'] == backup['volume_id']: LOG.debug("Destination volume is same as backup source " "volume %s - forcing full copy.", volume['id']) return False, restore_point # If the destination volume has extents we cannot allow a diff # restore. if self._rbd_has_extents(volume_file.rbd_image): # We return the restore point so that a full copy is done # from snapshot. LOG.debug("Destination has extents - forcing full copy") return False, restore_point return True, restore_point else: LOG.info(_LI("No restore point found for backup=" "'%(backup)s' of volume %(volume)s " "although base image is found - " "forcing full copy."), {'backup': backup['id'], 'volume': backup['volume_id']}) return False, restore_point def _restore_volume(self, backup, volume, volume_file): """Restore volume from backup using diff transfer if possible. Attempts a differential restore and reverts to full copy if diff fails. """ volume_name = volume['name'] backup_id = backup['id'] backup_volume_id = backup['volume_id'] length = int(volume['size']) * units.Gi base_name = self._get_backup_base_name(backup['volume_id'], diff_format=True) with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: diff_allowed, restore_point = \ self._diff_restore_allowed(base_name, backup, volume, volume_file, client) do_full_restore = True if diff_allowed: # Attempt diff try: self._diff_restore_rbd(base_name, volume_file, volume_name, restore_point, length) do_full_restore = False except exception.BackupRBDOperationFailed: LOG.debug("Forcing full restore to volume %s.", volume['id']) if do_full_restore: # Otherwise full copy self._full_restore(backup_id, backup_volume_id, volume_file, volume_name, length, src_snap=restore_point) def _restore_metadata(self, backup, volume_id): """Restore volume metadata from backup. If this backup has associated metadata, save it to the restore target otherwise do nothing. """ try: with rbd_driver.RADOSClient(self) as client: meta_bak = VolumeMetadataBackup(client, backup['id']) meta = meta_bak.get() if meta is not None: self.put_metadata(volume_id, meta) else: LOG.debug("Volume %s has no backed up metadata.", backup['volume_id']) except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version") LOG.error(msg) raise exception.BackupOperationError(msg) def restore(self, backup, volume_id, volume_file): """Restore volume from backup in Ceph object store. If volume metadata is available this will also be restored. """ target_volume = self.db.volume_get(self.context, volume_id) LOG.debug('Starting restore from Ceph backup=%(src)s to ' 'volume=%(dest)s', {'src': backup['id'], 'dest': target_volume['name']}) try: self._restore_volume(backup, target_volume, volume_file) # Be tolerant of IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.debug("Restore target I/O object does not support " "fileno() - skipping call to fsync().") else: os.fsync(fileno) self._restore_metadata(backup, volume_id) LOG.debug('Restore to volume %s finished successfully.', volume_id) except exception.BackupOperationError as e: LOG.error(_LE('Restore to volume %(volume)s finished with error - ' '%(error)s.'), {'error': e, 'volume': volume_id}) raise def delete(self, backup): """Delete the given backup from Ceph object store.""" LOG.debug('Delete started for backup=%s', backup['id']) delete_failed = False try: self._try_delete_base_image(backup['id'], backup['volume_id']) except self.rbd.ImageNotFound: LOG.warning( _LW("RBD image for backup %(backup)s of volume %(volume)s " "not found. Deleting backup metadata."), {'backup': backup['id'], 'volume': backup['volume_id']}) delete_failed = True with rbd_driver.RADOSClient(self) as client: VolumeMetadataBackup(client, backup['id']).remove_if_exists() if delete_failed: LOG.info(_LI("Delete of backup '%(backup)s' " "for volume '%(volume)s' " "finished with warning."), {'backup': backup['id'], 'volume': backup['volume_id']}) else: LOG.debug("Delete of backup '%(backup)s' for volume " "'%(volume)s' finished.", {'backup': backup['id'], 'volume': backup['volume_id']}) def get_backup_driver(context): return CephBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/google.py0000664000567000056710000003314012701406250022137 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service using Google Cloud Storage(GCS) Google Cloud Storage json apis are used for backup operations. Authentication and authorization are based on OAuth2.0. Server-centric flow is used for authentication. """ import base64 import hashlib import httplib2 from googleapiclient import discovery from googleapiclient import errors from googleapiclient import http from oauth2client import client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import six from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) gcsbackup_service_opts = [ cfg.StrOpt('backup_gcs_bucket', help='The GCS bucket to use.'), cfg.IntOpt('backup_gcs_object_size', default=52428800, help='The size in bytes of GCS backup objects.'), cfg.IntOpt('backup_gcs_block_size', default=32768, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_gcs_object_size ' 'has to be multiple of backup_gcs_block_size.'), cfg.IntOpt('backup_gcs_reader_chunk_size', default=2097152, help='GCS object will be downloaded in chunks of bytes.'), cfg.IntOpt('backup_gcs_writer_chunk_size', default=2097152, help='GCS object will be uploaded in chunks of bytes. ' 'Pass in a value of -1 if the file ' 'is to be uploaded as a single chunk.'), cfg.IntOpt('backup_gcs_num_retries', default=3, help='Number of times to retry.'), cfg.ListOpt('backup_gcs_retry_error_codes', default=['429'], help='List of GCS error codes.'), cfg.StrOpt('backup_gcs_bucket_location', default='US', help='Location of GCS bucket.'), cfg.StrOpt('backup_gcs_storage_class', default='NEARLINE', help='Storage class of GCS bucket.'), cfg.StrOpt('backup_gcs_credential_file', help='Absolute path of GCS service account credential file.'), cfg.StrOpt('backup_gcs_project_id', help='Owner project id for GCS bucket.'), cfg.StrOpt('backup_gcs_user_agent', default='gcscinder', help='Http user-agent string for gcs api.'), cfg.BoolOpt('backup_gcs_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the GCS backend storage. The ' 'default value is True to enable the timer.'), ] CONF = cfg.CONF CONF.register_opts(gcsbackup_service_opts) def gcs_logger(func): def func_wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except errors.Error as err: raise exception.GCSApiFailure(reason=err) except client.Error as err: raise exception.GCSOAuth2Failure(reason=err) except Exception as err: raise exception.GCSConnectionFailure(reason=err) return func_wrapper class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete of backup objects within GCS.""" def __init__(self, context, db_driver=None): self.check_gcs_options() backup_bucket = CONF.backup_gcs_bucket backup_credential = CONF.backup_gcs_credential_file self.gcs_project_id = CONF.backup_gcs_project_id chunk_size_bytes = CONF.backup_gcs_object_size sha_block_size_bytes = CONF.backup_gcs_block_size enable_progress_timer = CONF.backup_gcs_enable_progress_timer super(GoogleBackupDriver, self).__init__(context, chunk_size_bytes, sha_block_size_bytes, backup_bucket, enable_progress_timer, db_driver) credentials = client.GoogleCredentials.from_stream(backup_credential) self.reader_chunk_size = CONF.backup_gcs_reader_chunk_size self.writer_chunk_size = CONF.backup_gcs_writer_chunk_size self.bucket_location = CONF.backup_gcs_bucket_location self.storage_class = CONF.backup_gcs_storage_class self.num_retries = CONF.backup_gcs_num_retries http_user_agent = http.set_user_agent(httplib2.Http(), CONF.backup_gcs_user_agent) self.conn = discovery.build('storage', 'v1', http=http_user_agent, credentials=credentials) self.resumable = self.writer_chunk_size != -1 def check_gcs_options(self): required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file', 'backup_gcs_project_id') unset_options = [opt for opt in required_options if not getattr(CONF, opt, None)] if unset_options: msg = _('Unset gcs options: %s') % unset_options LOG.error(msg) raise exception.InvalidInput(reason=msg) @gcs_logger def put_container(self, bucket): """Create the bucket if not exists.""" buckets = self.conn.buckets().list( project=self.gcs_project_id, prefix=bucket, fields="items(name)").execute( num_retries=self.num_retries).get('items', []) if not any(b.get('name') == bucket for b in buckets): self.conn.buckets().insert( project=self.gcs_project_id, body={'name': bucket, 'location': self.bucket_location, 'storageClass': self.storage_class}).execute( num_retries=self.num_retries) @gcs_logger def get_container_entries(self, bucket, prefix): """Get bucket entry names.""" obj_list_dict = self.conn.objects().list( bucket=bucket, fields="items(name)", prefix=prefix).execute(num_retries=self.num_retries).get( 'items', []) return [obj_dict.get('name') for obj_dict in obj_list_dict] def get_object_writer(self, bucket, object_name, extra_metadata=None): """Return a writer object. Returns a writer object that stores a chunk of volume data in a GCS object store. """ return GoogleObjectWriter(bucket, object_name, self.conn, self.writer_chunk_size, self.num_retries, self.resumable) def get_object_reader(self, bucket, object_name, extra_metadata=None): """Return reader object. Returns a reader object that retrieves a chunk of backed-up volume data from a GCS object store. """ return GoogleObjectReader(bucket, object_name, self.conn, self.reader_chunk_size, self.num_retries) @gcs_logger def delete_object(self, bucket, object_name): """Deletes a backup object from a GCS object store.""" self.conn.objects().delete( bucket=bucket, object=object_name).execute(num_retries=self.num_retries) def _generate_object_name_prefix(self, backup): """Generates a GCS backup object name prefix. prefix = volume_volid/timestamp/az_saz_backup_bakid volid is volume id. timestamp is time in UTC with format of YearMonthDateHourMinuteSecond. saz is storage_availability_zone. bakid is backup id for volid. """ az = 'az_%s' % self.az backup_name = '%s_backup_%s' % (az, backup.id) volume = 'volume_%s' % (backup.volume_id) timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = volume + '/' + timestamp + '/' + backup_name LOG.debug('generate_object_name_prefix: %s', prefix) return prefix def update_container_name(self, backup, bucket): """Use the bucket name as provided - don't update.""" return def get_extra_metadata(self, backup, volume): """GCS driver does not use any extra metadata.""" return class GoogleObjectWriter(object): def __init__(self, bucket, object_name, conn, writer_chunk_size, num_retries, resumable): self.bucket = bucket self.object_name = object_name self.conn = conn self.data = bytearray() self.chunk_size = writer_chunk_size self.num_retries = num_retries self.resumable = resumable def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def write(self, data): self.data += data @gcs_logger def close(self): media = http.MediaIoBaseUpload(six.BytesIO(self.data), 'application/octet-stream', chunksize=self.chunk_size, resumable=self.resumable) resp = self.conn.objects().insert( bucket=self.bucket, name=self.object_name, body={}, media_body=media).execute(num_retries=self.num_retries) etag = resp['md5Hash'] md5 = hashlib.md5(self.data).digest() if six.PY3: md5 = md5.encode('utf-8') etag = etag.encode('utf-8') md5 = base64.b64encode(md5) if etag != md5: err = _('MD5 of object: %(object_name)s before: ' '%(md5)s and after: %(etag)s is not same.') % { 'object_name': self.object_name, 'md5': md5, 'etag': etag, } raise exception.InvalidBackup(reason=err) else: LOG.debug('MD5 before: %(md5)s and after: %(etag)s ' 'writing object: %(object_name)s in GCS.', {'etag': etag, 'md5': md5, 'object_name': self.object_name, }) return md5 class GoogleObjectReader(object): def __init__(self, bucket, object_name, conn, reader_chunk_size, num_retries): self.bucket = bucket self.object_name = object_name self.conn = conn self.chunk_size = reader_chunk_size self.num_retries = num_retries def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass @gcs_logger def read(self): req = self.conn.objects().get_media( bucket=self.bucket, object=self.object_name) fh = six.BytesIO() downloader = GoogleMediaIoBaseDownload( fh, req, chunksize=self.chunk_size) done = False while not done: status, done = downloader.next_chunk(num_retries=self.num_retries) LOG.debug('GCS Object download Complete.') return fh.getvalue() class GoogleMediaIoBaseDownload(http.MediaIoBaseDownload): @http.util.positional(1) def next_chunk(self, num_retries=None): error_codes = CONF.backup_gcs_retry_error_codes headers = {'range': 'bytes=%d-%d' % (self._progress, self._progress + self._chunksize)} gcs_http = self._request.http for retry_num in range(num_retries + 1): if retry_num > 0: self._sleep(self._rand() * 2 ** retry_num) resp, content = gcs_http.request(self._uri, headers=headers) if resp.status < 500 and (six.text_type(resp.status) not in error_codes): break if resp.status in [200, 206]: if 'content-location' in resp and ( resp['content-location'] != self._uri): self._uri = resp['content-location'] self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) elif 'content-length' in resp: self._total_size = int(resp['content-length']) if self._progress == self._total_size: self._done = True return (http.MediaDownloadProgress(self._progress, self._total_size), self._done) else: raise http.HttpError(resp, content, uri=self._uri) def get_backup_driver(context): return GoogleBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/glusterfs.py0000664000567000056710000000654312701406250022710 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses GlusterFS as the backend.""" import os import stat from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from cinder.backup.drivers import posix from cinder import exception from cinder import utils glusterfsbackup_service_opts = [ cfg.StrOpt('glusterfs_backup_mount_point', default='$state_path/backup_mount', help='Base dir containing mount point for gluster share.'), cfg.StrOpt('glusterfs_backup_share', help='GlusterFS share in ' ': format. ' 'Eg: 1.2.3.4:backup_vol'), ] CONF = cfg.CONF CONF.register_opts(glusterfsbackup_service_opts) class GlusterfsBackupDriver(posix.PosixBackupDriver): """Provides backup, restore and delete using GlusterFS repository.""" def __init__(self, context, db_driver=None): self._check_configuration() self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_share = CONF.glusterfs_backup_share self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() super(GlusterfsBackupDriver, self).__init__(context, backup_path=backup_path) @staticmethod def _check_configuration(): """Raises error if any required configuration flag is missing.""" required_flags = ['glusterfs_backup_share'] for flag in required_flags: if not getattr(CONF, flag, None): raise exception.ConfigNotFound(path=flag) def _init_backup_repo_path(self): remotefsclient = remotefs_brick.RemoteFsClient( 'glusterfs', self._root_helper, glusterfs_mount_point_base=self.backup_mount_point_base) remotefsclient.mount(self.backup_share) # Ensure we can write to this share mount_path = remotefsclient.get_mount_point(self.backup_share) group_id = os.getegid() current_group_id = utils.get_file_gid(mount_path) current_mode = utils.get_file_mode(mount_path) if group_id != current_group_id: cmd = ['chgrp', group_id, mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if not (current_mode & stat.S_IWGRP): cmd = ['chmod', 'g+w', mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) return mount_path def get_backup_driver(context): return GlusterfsBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/swift.py0000664000567000056710000003707212701406250022027 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses Swift as the backend **Related Flags** :backup_swift_url: The URL of the Swift endpoint (default: None, use catalog). :backup_swift_auth_url: The URL of the Keystone endpoint for authentication (default: None, use catalog). :swift_catalog_info: Info to match when looking for swift in the service ' catalog. :keystone_catalog_info: Info to match when looking for keystone in the service catalog. :backup_swift_object_size: The size in bytes of the Swift objects used for volume backups (default: 52428800). :backup_swift_retry_attempts: The number of retries to make for Swift operations (default: 10). :backup_swift_retry_backoff: The backoff time in seconds between retrying failed Swift operations (default: 10). :backup_compression_algorithm: Compression algorithm to use for volume backups. Supported options are: None (to disable), zlib and bz2 (default: zlib) :backup_swift_ca_cert_file: The location of the CA certificate file to use for swift client requests (default: None) :backup_swift_auth_insecure: If true, bypass verification of server's certificate for SSL connections (default: False) """ import hashlib import socket from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import six from swiftclient import client as swift from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ from cinder.i18n import _LE LOG = logging.getLogger(__name__) swiftbackup_service_opts = [ cfg.StrOpt('backup_swift_url', help='The URL of the Swift endpoint'), cfg.StrOpt('backup_swift_auth_url', help='The URL of the Keystone endpoint'), cfg.StrOpt('swift_catalog_info', default='object-store:swift:publicURL', help='Info to match when looking for swift in the service ' 'catalog. Format is: separated values of the form: ' ':: - ' 'Only used if backup_swift_url is unset'), cfg.StrOpt('keystone_catalog_info', default='identity:Identity Service:publicURL', help='Info to match when looking for keystone in the service ' 'catalog. Format is: separated values of the form: ' ':: - ' 'Only used if backup_swift_auth_url is unset'), cfg.StrOpt('backup_swift_auth', default='per_user', help='Swift authentication mechanism'), cfg.StrOpt('backup_swift_auth_version', default='1', help='Swift authentication version. Specify "1" for auth 1.0' ', or "2" for auth 2.0'), cfg.StrOpt('backup_swift_tenant', help='Swift tenant/account name. Required when connecting' ' to an auth 2.0 system'), cfg.StrOpt('backup_swift_user', help='Swift user name'), cfg.StrOpt('backup_swift_key', help='Swift key for authentication'), cfg.StrOpt('backup_swift_container', default='volumebackups', help='The default Swift container to use'), cfg.IntOpt('backup_swift_object_size', default=52428800, help='The size in bytes of Swift backup objects'), cfg.IntOpt('backup_swift_block_size', default=32768, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_swift_object_size ' 'has to be multiple of backup_swift_block_size.'), cfg.IntOpt('backup_swift_retry_attempts', default=3, help='The number of retries to make for Swift operations'), cfg.IntOpt('backup_swift_retry_backoff', default=2, help='The backoff time in seconds between Swift retries'), cfg.BoolOpt('backup_swift_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the Swift backend storage. The ' 'default value is True to enable the timer.'), cfg.StrOpt('backup_swift_ca_cert_file', help='Location of the CA certificate file to use for swift ' 'client requests.'), cfg.BoolOpt('backup_swift_auth_insecure', default=False, help='Bypass verification of server certificate when ' 'making SSL connection to Swift.'), ] CONF = cfg.CONF CONF.register_opts(swiftbackup_service_opts) class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete of backup objects within Swift.""" def __init__(self, context, db_driver=None): chunk_size_bytes = CONF.backup_swift_object_size sha_block_size_bytes = CONF.backup_swift_block_size backup_default_container = CONF.backup_swift_container enable_progress_timer = CONF.backup_swift_enable_progress_timer super(SwiftBackupDriver, self).__init__(context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, db_driver) if CONF.backup_swift_url is None: self.swift_url = None info = CONF.swift_catalog_info try: service_type, service_name, endpoint_type = info.split(':') except ValueError: raise exception.BackupDriverException(_( "Failed to parse the configuration option " "'swift_catalog_info', must be in the form " "::")) for entry in context.service_catalog: if entry.get('type') == service_type: # It is assumed that service_types are unique within # the service catalog, so once the correct one is found # it is safe to break out of the loop self.swift_url = entry.get( 'endpoints')[0].get(endpoint_type) break else: self.swift_url = '%s%s' % (CONF.backup_swift_url, context.project_id) if self.swift_url is None: raise exception.BackupDriverException(_( "Could not determine which Swift endpoint to use. This can " "either be set in the service catalog or with the " "cinder.conf config option 'backup_swift_url'.")) if CONF.backup_swift_auth_url is None: self.auth_url = None info = CONF.keystone_catalog_info try: service_type, service_name, endpoint_type = info.split(':') except ValueError: raise exception.BackupDriverException(_( "Failed to parse the configuration option " "'keystone_catalog_info', must be in the form " "::")) for entry in context.service_catalog: if entry.get('type') == service_type: # It is assumed that service_types are unique within # the service catalog, so once the correct one is found # it is safe to break out of the loop self.auth_url = entry.get( 'endpoints')[0].get(endpoint_type) break else: self.auth_url = '%s%s' % (CONF.backup_swift_auth_url, context.project_id) if self.auth_url is None: raise exception.BackupDriverException(_( "Could not determine which Keystone endpoint to use. This can " "either be set in the service catalog or with the " "cinder.conf config option 'backup_swift_auth_url'.")) LOG.debug("Using swift URL %s", self.swift_url) LOG.debug("Using auth URL %s", self.auth_url) self.swift_attempts = CONF.backup_swift_retry_attempts self.swift_backoff = CONF.backup_swift_retry_backoff LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_url, CONF.backup_swift_auth) self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure if CONF.backup_swift_auth == 'single_user': if CONF.backup_swift_user is None: LOG.error(_LE("single_user auth mode enabled, " "but %(param)s not set"), {'param': 'backup_swift_user'}) raise exception.ParameterNotFound(param='backup_swift_user') self.conn = swift.Connection( authurl=self.auth_url, auth_version=CONF.backup_swift_auth_version, tenant_name=CONF.backup_swift_tenant, user=CONF.backup_swift_user, key=CONF.backup_swift_key, retries=self.swift_attempts, starting_backoff=self.swift_backoff, insecure=self.backup_swift_auth_insecure, cacert=CONF.backup_swift_ca_cert_file) else: self.conn = swift.Connection(retries=self.swift_attempts, preauthurl=self.swift_url, preauthtoken=self.context.auth_token, starting_backoff=self.swift_backoff, insecure= ( self.backup_swift_auth_insecure), cacert=CONF.backup_swift_ca_cert_file) class SwiftObjectWriter(object): def __init__(self, container, object_name, conn): self.container = container self.object_name = object_name self.conn = conn self.data = bytearray() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def write(self, data): self.data += data def close(self): reader = six.BytesIO(self.data) try: etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) LOG.debug('swift MD5 for %(object_name)s: %(etag)s', {'object_name': self.object_name, 'etag': etag, }) md5 = hashlib.md5(self.data).hexdigest() LOG.debug('backup MD5 for %(object_name)s: %(md5)s', {'object_name': self.object_name, 'md5': md5}) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s'), {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) return md5 class SwiftObjectReader(object): def __init__(self, container, object_name, conn): self.container = container self.object_name = object_name self.conn = conn def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def read(self): try: (_resp, body) = self.conn.get_object(self.container, self.object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return body def put_container(self, container): """Create the container if needed. No failure if it pre-exists.""" try: self.conn.put_container(container) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return def get_container_entries(self, container, prefix): """Get container entry names""" try: swift_objects = self.conn.get_container(container, prefix=prefix, full_listing=True)[1] except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) swift_object_names = [swift_obj['name'] for swift_obj in swift_objects] return swift_object_names def get_object_writer(self, container, object_name, extra_metadata=None): """Return a writer object. Returns a writer object that stores a chunk of volume data in a Swift object store. """ return self.SwiftObjectWriter(container, object_name, self.conn) def get_object_reader(self, container, object_name, extra_metadata=None): """Return reader object. Returns a reader object that retrieves a chunk of backed-up volume data from a Swift object store. """ return self.SwiftObjectReader(container, object_name, self.conn) def delete_object(self, container, object_name): """Deletes a backup object from a Swift object store.""" try: self.conn.delete_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) def _generate_object_name_prefix(self, backup): """Generates a Swift backup object name prefix.""" az = 'az_%s' % self.az backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = volume + '/' + timestamp + '/' + backup_name LOG.debug('generate_object_name_prefix: %s', prefix) return prefix def update_container_name(self, backup, container): """Use the container name as provided - don't update.""" return container def get_extra_metadata(self, backup, volume): """Swift driver does not use any extra metadata.""" return None def get_backup_driver(context): return SwiftBackupDriver(context) cinder-8.0.0/cinder/backup/drivers/nfs.py0000664000567000056710000000575612701406250021465 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Tom Barron # Copyright (C) 2015 Kevin Fox # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses NFS storage as the backend.""" from os_brick.remotefs import remotefs as remotefs_brick from oslo_config import cfg from oslo_log import log as logging from cinder.backup.drivers import posix from cinder import exception from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) nfsbackup_service_opts = [ cfg.StrOpt('backup_mount_point_base', default='$state_path/backup_mount', help='Base dir containing mount point for NFS share.'), cfg.StrOpt('backup_share', help='NFS share in hostname:path, ipv4addr:path, ' 'or "[ipv6addr]:path" format.'), cfg.StrOpt('backup_mount_options', help=('Mount options passed to the NFS client. See NFS ' 'man page for details.')), ] CONF = cfg.CONF CONF.register_opts(nfsbackup_service_opts) class NFSBackupDriver(posix.PosixBackupDriver): """Provides backup, restore and delete using NFS supplied repository.""" def __init__(self, context, db_driver=None): self._check_configuration() self.backup_mount_point_base = CONF.backup_mount_point_base self.backup_share = CONF.backup_share self.mount_options = CONF.backup_mount_options or {} backup_path = self._init_backup_repo_path() LOG.debug("Using NFS backup repository: %s", backup_path) super(NFSBackupDriver, self).__init__(context, backup_path=backup_path) @staticmethod def _check_configuration(): """Raises error if any required configuration flag is missing.""" required_flags = ['backup_share'] for flag in required_flags: if not getattr(CONF, flag, None): raise exception.ConfigNotFound(_( 'Required flag %s is not set') % flag) def _init_backup_repo_path(self): remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', utils.get_root_helper(), nfs_mount_point_base=self.backup_mount_point_base, nfs_mount_options=self.mount_options) remotefsclient.mount(self.backup_share) return remotefsclient.get_mount_point(self.backup_share) def get_backup_driver(context): return NFSBackupDriver(context) cinder-8.0.0/cinder/backup/manager.py0000664000567000056710000011666112701406250020631 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Backup manager manages volume backups. Volume Backups are full copies of persistent volumes stored in a backup store e.g. an object store or any other backup store if and when support is added. They are usable without the original object being available. A volume backup can be restored to the original volume it was created from or any other available volume with a minimum size of the original volume. Volume backups can be created, restored, deleted and listed. **Related Flags** :backup_topic: What :mod:`rpc` topic to listen to (default: `cinder-backup`). :backup_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.backup.manager.Manager`). """ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils from oslo_utils import importutils import six from cinder.backup import driver from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import manager from cinder import objects from cinder.objects import fields from cinder import quota from cinder import rpc from cinder import utils from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) backup_manager_opts = [ cfg.StrOpt('backup_driver', default='cinder.backup.drivers.swift', help='Driver to use for backups.',), cfg.BoolOpt('backup_service_inithost_offload', default=False, help='Offload pending backup delete during ' 'backup service startup.',), ] # This map doesn't need to be extended in the future since it's only # for old backup services mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift', 'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'} CONF = cfg.CONF CONF.register_opts(backup_manager_opts) CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver') CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver') QUOTAS = quota.QUOTAS class BackupManager(manager.SchedulerDependentManager): """Manages backup of block storage devices.""" RPC_API_VERSION = '2.0' target = messaging.Target(version=RPC_API_VERSION) def __init__(self, service_name=None, *args, **kwargs): self.service = importutils.import_module(self.driver_name) self.az = CONF.storage_availability_zone self.volume_managers = {} self.backup_rpcapi = backup_rpcapi.BackupAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() super(BackupManager, self).__init__(service_name='backup', *args, **kwargs) self.additional_endpoints.append(_BackupV1Proxy(self)) @property def driver_name(self): """This function maps old backup services to backup drivers.""" return self._map_service_to_driver(CONF.backup_driver) def _map_service_to_driver(self, service): """Maps services to drivers.""" if service in mapper: return mapper[service] return service def _update_backup_error(self, backup, context, err): backup.status = fields.BackupStatus.ERROR backup.fail_reason = err backup.save() def init_host(self): """Run initialization needed for a standalone service.""" ctxt = context.get_admin_context() try: self._cleanup_incomplete_backup_operations(ctxt) except Exception: # Don't block startup of the backup service. LOG.exception(_LE("Problem cleaning incomplete backup " "operations.")) def reset(self): super(BackupManager, self).reset() self.backup_rpcapi = backup_rpcapi.BackupAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() def _cleanup_incomplete_backup_operations(self, ctxt): LOG.info(_LI("Cleaning up incomplete backup operations.")) # TODO(smulcahy) implement full resume of backup and restore # operations on restart (rather than simply resetting) backups = objects.BackupList.get_all_by_host(ctxt, self.host) for backup in backups: try: self._cleanup_one_backup(ctxt, backup) except Exception: LOG.exception(_LE("Problem cleaning up backup %(bkup)s."), {'bkup': backup['id']}) try: self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt, backup) except Exception: LOG.exception(_LE("Problem cleaning temp volumes and " "snapshots for backup %(bkup)s."), {'bkup': backup['id']}) def _cleanup_one_volume(self, ctxt, volume): if volume['status'] == 'backing-up': self._detach_all_attachments(ctxt, volume) LOG.info(_LI('Resetting volume %(vol_id)s to previous ' 'status %(status)s (was backing-up).'), {'vol_id': volume['id'], 'status': volume['previous_status']}) self.db.volume_update(ctxt, volume['id'], {'status': volume['previous_status']}) elif volume['status'] == 'restoring-backup': self._detach_all_attachments(ctxt, volume) LOG.info(_LI('setting volume %s to error_restoring ' '(was restoring-backup).'), volume['id']) self.db.volume_update(ctxt, volume['id'], {'status': 'error_restoring'}) def _cleanup_one_backup(self, ctxt, backup): if backup['status'] == fields.BackupStatus.CREATING: LOG.info(_LI('Resetting backup %s to error (was creating).'), backup['id']) volume = objects.Volume.get_by_id(ctxt, backup.volume_id) self._cleanup_one_volume(ctxt, volume) err = 'incomplete backup reset on manager restart' self._update_backup_error(backup, ctxt, err) elif backup['status'] == fields.BackupStatus.RESTORING: LOG.info(_LI('Resetting backup %s to ' 'available (was restoring).'), backup['id']) volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id) self._cleanup_one_volume(ctxt, volume) backup.status = fields.BackupStatus.AVAILABLE backup.save() elif backup['status'] == fields.BackupStatus.DELETING: LOG.info(_LI('Resuming delete on backup: %s.'), backup['id']) if CONF.backup_service_inithost_offload: # Offload all the pending backup delete operations to the # threadpool to prevent the main backup service thread # from being blocked. self._add_to_threadpool(self.delete_backup, ctxt, backup) else: # By default, delete backups sequentially self.delete_backup(ctxt, backup) def _detach_all_attachments(self, ctxt, volume): attachments = volume['volume_attachment'] or [] for attachment in attachments: if (attachment['attached_host'] == self.host and attachment['instance_uuid'] is None): try: rpcapi = self.volume_rpcapi rpcapi.detach_volume(ctxt, volume, attachment['id']) except Exception: LOG.exception(_LE("Detach attachment %(attach_id)s" " failed."), {'attach_id': attachment['id']}, resource=volume) def _delete_temp_volume(self, ctxt, backup): try: temp_volume = objects.Volume.get_by_id( ctxt, backup.temp_volume_id) self.volume_rpcapi.delete_volume(ctxt, temp_volume) except exception.VolumeNotFound: LOG.debug("Could not find temp volume %(vol)s to clean up " "for backup %(backup)s.", {'vol': backup.temp_volume_id, 'backup': backup.id}) backup.temp_volume_id = None backup.save() def _delete_temp_snapshot(self, ctxt, backup): try: temp_snapshot = objects.Snapshot.get_by_id( ctxt, backup.temp_snapshot_id) volume = objects.Volume.get_by_id( ctxt, backup.volume_id) # The temp snapshot should be deleted directly thru the # volume driver, not thru the volume manager. self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot, volume.host) except exception.SnapshotNotFound: LOG.debug("Could not find temp snapshot %(snap)s to clean " "up for backup %(backup)s.", {'snap': backup.temp_snapshot_id, 'backup': backup.id}) backup.temp_snapshot_id = None backup.save() def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup): # NOTE(xyang): If the service crashes or gets restarted during the # backup operation, there could be temporary volumes or snapshots # that are not deleted. Make sure any temporary volumes or snapshots # create by the backup job are deleted when service is started. if (backup.temp_volume_id and backup.status == fields.BackupStatus.ERROR): self._delete_temp_volume(ctxt, backup) if (backup.temp_snapshot_id and backup.status == fields.BackupStatus.ERROR): self._delete_temp_snapshot(ctxt, backup) def _cleanup_temp_volumes_snapshots_when_backup_created( self, ctxt, backup): # Delete temp volumes or snapshots when backup creation is completed. if backup.temp_volume_id: self._delete_temp_volume(ctxt, backup) if backup.temp_snapshot_id: self._delete_temp_snapshot(ctxt, backup) def create_backup(self, context, backup): """Create volume backups using configured backup service.""" volume_id = backup.volume_id volume = objects.Volume.get_by_id(context, volume_id) previous_status = volume.get('previous_status', None) LOG.info(_LI('Create backup started, backup: %(backup_id)s ' 'volume: %(volume_id)s.'), {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, "create.start") backup.host = self.host backup.service = self.driver_name backup.availability_zone = self.az backup.save() expected_status = 'backing-up' actual_status = volume['status'] if actual_status != expected_status: err = _('Create backup aborted, expected volume status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } self._update_backup_error(backup, context, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status if actual_status != expected_status: err = _('Create backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } self._update_backup_error(backup, context, err) backup.save() raise exception.InvalidBackup(reason=err) try: self._run_backup(context, backup, volume) except Exception as err: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) self._update_backup_error(backup, context, six.text_type(err)) # Restore the original status. self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'backing-up'}) backup.status = fields.BackupStatus.AVAILABLE backup.size = volume['size'] backup.save() # Handle the num_dependent_backups of parent backup when child backup # has created successfully. if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() LOG.info(_LI('Create backup finished. backup: %s.'), backup.id) self._notify_about_backup_usage(context, backup, "create.end") def _run_backup(self, context, backup, volume): backup_service = self.service.get_backup_driver(context) properties = utils.brick_get_connector_properties() backup_dic = self.volume_rpcapi.get_backup_device(context, backup, volume) try: backup_device = backup_dic.get('backup_device') is_snapshot = backup_dic.get('is_snapshot') attach_info = self._attach_device(context, backup_device, properties, is_snapshot) try: device_path = attach_info['device']['path'] if isinstance(device_path, six.string_types): if backup_dic.get('secure_enabled', False): with open(device_path) as device_file: backup_service.backup(backup, device_file) else: with utils.temporary_chown(device_path): with open(device_path) as device_file: backup_service.backup(backup, device_file) else: backup_service.backup(backup, device_path) finally: self._detach_device(context, attach_info, backup_device, properties, is_snapshot) finally: backup = objects.Backup.get_by_id(context, backup.id) self._cleanup_temp_volumes_snapshots_when_backup_created( context, backup) def restore_backup(self, context, backup, volume_id): """Restore volume backups from configured backup service.""" LOG.info(_LI('Restore backup started, backup: %(backup_id)s ' 'volume: %(volume_id)s.'), {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) self._notify_about_backup_usage(context, backup, "restore.start") backup.host = self.host backup.save() expected_status = 'restoring-backup' actual_status = volume['status'] if actual_status != expected_status: err = (_('Restore backup aborted, expected volume status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) backup.status = fields.BackupStatus.AVAILABLE backup.save() raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.RESTORING actual_status = backup['status'] if actual_status != expected_status: err = (_('Restore backup aborted: expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) self._update_backup_error(backup, context, err) self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) if volume['size'] > backup['size']: LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is ' 'larger than backup: %(backup_id)s, ' 'size: %(backup_size)d, continuing with restore.'), {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], 'backup_size': backup['size']}) backup_service = self._map_service_to_driver(backup['service']) configured_service = self.driver_name if backup_service != configured_service: err = _('Restore backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].') % { 'configured_service': configured_service, 'backup_service': backup_service, } backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) try: self._run_restore(context, backup, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_restoring'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': 'available'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored' ' to volume %(volume_id)s.'), {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, "restore.end") def _run_restore(self, context, backup, volume): backup_service = self.service.get_backup_driver(context) properties = utils.brick_get_connector_properties() secure_enabled = ( self.volume_rpcapi.secure_file_operations_enabled(context, volume)) attach_info = self._attach_device(context, volume, properties) try: device_path = attach_info['device']['path'] if isinstance(device_path, six.string_types): if secure_enabled: with open(device_path, 'wb') as device_file: backup_service.restore(backup, volume.id, device_file) else: with utils.temporary_chown(device_path): with open(device_path, 'wb') as device_file: backup_service.restore(backup, volume.id, device_file) else: backup_service.restore(backup, volume.id, device_path) finally: self._detach_device(context, attach_info, volume, properties) def delete_backup(self, context, backup): """Delete volume backup from configured backup service.""" LOG.info(_LI('Delete backup started, backup: %s.'), backup.id) self._notify_about_backup_usage(context, backup, "delete.start") backup.host = self.host backup.save() expected_status = fields.BackupStatus.DELETING actual_status = backup.status if actual_status != expected_status: err = _('Delete_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') \ % {'expected_status': expected_status, 'actual_status': actual_status} self._update_backup_error(backup, context, err) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) if backup_service is not None: configured_service = self.driver_name if backup_service != configured_service: err = _('Delete backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].')\ % {'configured_service': configured_service, 'backup_service': backup_service} self._update_backup_error(backup, context, err) raise exception.InvalidBackup(reason=err) try: backup_service = self.service.get_backup_driver(context) backup_service.delete(backup) except Exception as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, context, six.text_type(err)) # Get reservations try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup.size, } reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Failed to update usages deleting backup")) backup.destroy() # If this backup is incremental backup, handle the # num_dependent_backups of parent backup if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id) self._notify_about_backup_usage(context, backup, "delete.end") def _notify_about_backup_usage(self, context, backup, event_suffix, extra_usage_info=None): volume_utils.notify_about_backup_usage( context, backup, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def export_record(self, context, backup): """Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup: backup object to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises: InvalidBackup """ LOG.info(_LI('Export record started, backup: %s.'), backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if actual_status != expected_status: err = (_('Export backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {} backup_record['backup_service'] = backup.service backup_service = self._map_service_to_driver(backup.service) configured_service = self.driver_name if backup_service != configured_service: err = (_('Export record aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].') % {'configured_service': configured_service, 'backup_service': backup_service}) raise exception.InvalidBackup(reason=err) # Call driver to create backup description string try: backup_service = self.service.get_backup_driver(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) LOG.info(_LI('Export record finished, backup %s exported.'), backup.id) return backup_record def import_record(self, context, backup, backup_service, backup_url, backup_hosts): """Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises: InvalidBackup :raises: ServiceNotFound """ LOG.info(_LI('Import record started, backup_url: %s.'), backup_url) # Can we import this backup? if (backup_service != self.driver_name): # No, are there additional potential backup hosts in the list? if len(backup_hosts) > 0: # try the next host on the list, maybe he can import first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: # empty list - we are the last host on the list, fail err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s') % {'service': backup_service} self._update_backup_error(backup, context, err) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... try: # Deserialize backup record information backup_options = backup.decode_record(backup_url) # Extract driver specific info and pass it to the driver driver_options = backup_options.pop('driver_info', {}) backup_service = self.service.get_backup_driver(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) required_import_options = { 'display_name', 'display_description', 'container', 'size', 'service_metadata', 'service', 'object_count', 'id' } # Check for missing fields in imported data missing_opts = required_import_options - set(backup_options) if missing_opts: msg = (_('Driver successfully decoded imported backup data, ' 'but there are missing fields (%s).') % ', '.join(missing_opts)) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) # Confirm the ID from the record in the DB is the right one backup_id = backup_options['id'] if backup_id != backup.id: msg = (_('Trying to import backup metadata from id %(meta_id)s' ' into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) # Overwrite some fields backup_options['status'] = fields.BackupStatus.AVAILABLE backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host # Remove some values which are not actual fields and some that # were set by the API node for key in ('name', 'user_id', 'project_id'): backup_options.pop(key, None) # Update the database backup.update(backup_options) backup.save() # Verify backup try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: LOG.warning(_LW('Backup service %(service)s does not ' 'support verify. Backup id %(id)s is ' 'not verified. Skipping verify.'), {'service': self.driver_name, 'id': backup.id}) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, context, six.text_type(err)) LOG.info(_LI('Import record id %s metadata from driver ' 'finished.'), backup.id) def reset_status(self, context, backup, status): """Reset volume backup status. :param context: running context :param backup: The backup object for reset status operation :param status: The status to be set :raises: InvalidBackup :raises: BackupVerifyUnsupportedDriver :raises: AttributeError """ LOG.info(_LI('Reset backup status started, backup_id: ' '%(backup_id)s, status: %(status)s.'), {'backup_id': backup.id, 'status': status}) backup_service = self._map_service_to_driver(backup.service) LOG.info(_LI('Backup service: %s.'), backup_service) if backup_service is not None: configured_service = self.driver_name if backup_service != configured_service: err = _('Reset backup status aborted, the backup service' ' currently configured [%(configured_service)s] ' 'is not the backup service that was used to create' ' this backup [%(backup_service)s].') % \ {'configured_service': configured_service, 'backup_service': backup_service} raise exception.InvalidBackup(reason=err) # Verify backup try: # check whether the backup is ok or not if (status == fields.BackupStatus.AVAILABLE and backup['status'] != fields.BackupStatus.RESTORING): # check whether we could verify the backup is ok or not if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) backup.status = status backup.save() # driver does not support verify function else: msg = (_('Backup service %(configured_service)s ' 'does not support verify. Backup id' ' %(id)s is not verified. ' 'Skipping verify.') % {'configured_service': self.driver_name, 'id': backup.id}) raise exception.BackupVerifyUnsupportedDriver( reason=msg) # reset status to error or from restoring to available else: if (status == fields.BackupStatus.ERROR or (status == fields.BackupStatus.AVAILABLE and backup.status == fields.BackupStatus.RESTORING)): backup.status = status backup.save() except exception.InvalidBackup: with excutils.save_and_reraise_exception(): LOG.error(_LE("Backup id %s is not invalid. " "Skipping reset."), backup.id) except exception.BackupVerifyUnsupportedDriver: with excutils.save_and_reraise_exception(): LOG.error(_LE('Backup service %(configured_service)s ' 'does not support verify. Backup id ' '%(id)s is not verified. ' 'Skipping verify.'), {'configured_service': self.driver_name, 'id': backup.id}) except AttributeError: msg = (_('Backup service %(service)s does not support ' 'verify. Backup id %(id)s is not verified. ' 'Skipping reset.') % {'service': self.driver_name, 'id': backup.id}) LOG.error(msg) raise exception.BackupVerifyUnsupportedDriver( reason=msg) # Needs to clean temporary volumes and snapshots. try: self._cleanup_temp_volumes_snapshots_for_one_backup( context, backup) except Exception: LOG.exception(_LE("Problem cleaning temp volumes and " "snapshots for backup %(bkup)s."), {'bkup': backup.id}) # send notification to ceilometer notifier_info = {'id': backup.id, 'update': {'status': status}} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, "backups.reset_status.end", notifier_info) def check_support_to_force_delete(self, context): """Check if the backup driver supports force delete operation. :param context: running context """ backup_service = self.service.get_backup_driver(context) return backup_service.support_force_delete def _attach_device(self, context, backup_device, properties, is_snapshot=False): """Attach backup device.""" if not is_snapshot: return self._attach_volume(context, backup_device, properties) else: msg = _("Can't attach snapshot.") raise NotImplementedError(msg) def _attach_volume(self, context, volume, properties): """Attach a volume.""" try: conn = self.volume_rpcapi.initialize_connection(context, volume, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning(_LW("Failed to terminate the connection " "of volume %(volume_id)s, but it is " "acceptable."), {'volume_id', volume.id}) def _connect_device(self, conn): """Establish connection to device.""" use_multipath = CONF.use_multipath_for_image_xfer device_scan_attempts = CONF.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) return {'conn': conn, 'device': vol_handle, 'connector': connector} def _detach_device(self, context, attach_info, volume, properties, is_snapshot=False, force=False): """Disconnect the volume from the host. """ connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) rpcapi = self.volume_rpcapi rpcapi.terminate_connection(context, volume, properties, force=force) rpcapi.remove_export(context, volume) # TODO(dulek): This goes away immediately in Newton and is just present in # Mitaka so that we can receive v1.x and v2.0 messages. class _BackupV1Proxy(object): target = messaging.Target(version='1.3') def __init__(self, manager): self.manager = manager def create_backup(self, context, backup): return self.manager.create_backup(context, backup) def restore_backup(self, context, backup, volume_id): return self.manager.restore_backup(context, backup, volume_id) def delete_backup(self, context, backup): return self.manager.delete_backup(context, backup) def export_record(self, context, backup): return self.manager.export_record(context, backup) def import_record(self, context, backup, backup_service, backup_url, backup_hosts): return self.manager.import_record(context, backup, backup_service, backup_url, backup_hosts) def reset_status(self, context, backup, status): return self.manager.reset_status(context, backup, status) def check_support_to_force_delete(self, context): return self.manager.check_support_to_force_delete(context) cinder-8.0.0/cinder/backup/driver.py0000664000567000056710000003715512701406250020512 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for all backup drivers.""" import abc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import six from cinder.db import base from cinder import exception from cinder.i18n import _, _LI, _LW from cinder import keymgr service_opts = [ cfg.IntOpt('backup_metadata_version', default=2, help='Backup metadata version to be used when backing up ' 'volume metadata. If this number is bumped, make sure the ' 'service doing the restore supports the new version.'), cfg.IntOpt('backup_object_number_per_notification', default=10, help='The number of chunks or objects, for which one ' 'Ceilometer notification will be sent'), cfg.IntOpt('backup_timer_interval', default=120, help='Interval, in seconds, between two progress notifications ' 'reporting the backup status'), ] CONF = cfg.CONF CONF.register_opts(service_opts) LOG = logging.getLogger(__name__) class BackupMetadataAPI(base.Base): TYPE_TAG_VOL_BASE_META = 'volume-base-metadata' TYPE_TAG_VOL_META = 'volume-metadata' TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata' def __init__(self, context, db_driver=None): super(BackupMetadataAPI, self).__init__(db_driver) self.context = context @staticmethod def _is_serializable(value): """Returns True if value is serializable.""" try: jsonutils.dumps(value) except TypeError: LOG.info(_LI("Value with type=%s is not serializable"), type(value)) return False return True def _save_vol_base_meta(self, container, volume_id): """Save base volume metadata to container. This will fetch all fields from the db Volume object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_BASE_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_get(self.context, volume_id) if meta: container[type_tag] = {} for key, value in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(value): LOG.info(_LI("Unable to serialize field '%s' - excluding " "from backup"), key) continue # Copy the encryption key uuid for backup if key is 'encryption_key_id' and value is not None: value = keymgr.API().copy_key(self.context, value) LOG.debug("Copying encryption key uuid for backup.") container[type_tag][key] = value LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_meta(self, container, volume_id): """Save volume metadata to container. This will fetch all fields from the db VolumeMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(meta[entry]): LOG.info(_LI("Unable to serialize field '%s' - excluding " "from backup"), entry) continue container[type_tag][entry] = meta[entry] LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_glance_meta(self, container, volume_id): """Save volume Glance metadata to container. This will fetch all fields from the db VolumeGlanceMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_GLANCE_META LOG.debug("Getting metadata type '%s'", type_tag) try: meta = self.db.volume_glance_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(entry.value): LOG.info(_LI("Unable to serialize field '%s' - " "excluding from backup"), entry) continue container[type_tag][entry.key] = entry.value LOG.debug("Completed fetching metadata type '%s'", type_tag) except exception.GlanceMetadataNotFound: LOG.debug("No metadata type '%s' available", type_tag) @staticmethod def _filter(metadata, fields): """Returns set of metadata restricted to required fields. If fields is empty list, the full set is returned. """ if not fields: return metadata subset = {} for field in fields: if field in metadata: subset[field] = metadata[field] else: LOG.debug("Excluding field '%s'", field) return subset def _restore_vol_base_meta(self, metadata, volume_id, fields): """Restore values to Volume object for provided fields.""" LOG.debug("Restoring volume base metadata") # Ignore unencrypted backups. key = 'encryption_key_id' if key in fields and key in metadata and metadata[key] is not None: self._restore_vol_encryption_meta(volume_id, metadata['volume_type_id']) metadata = self._filter(metadata, fields) self.db.volume_update(self.context, volume_id, metadata) def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id): """Restores the volume_type_id for encryption if needed. Only allow restoration of an encrypted backup if the destination volume has the same volume type as the source volume. Otherwise encryption will not work. If volume types are already the same, no action is needed. """ dest_vol = self.db.volume_get(self.context, volume_id) if dest_vol['volume_type_id'] != src_volume_type_id: LOG.debug("Volume type id's do not match.") # If the volume types do not match, and the destination volume # does not have a volume type, force the destination volume # to have the encrypted volume type, provided it still exists. if dest_vol['volume_type_id'] is None: try: self.db.volume_type_get( self.context, src_volume_type_id) except exception.VolumeTypeNotFound: LOG.debug("Volume type of source volume has been " "deleted. Encrypted backup restore has " "failed.") msg = _("The source volume type '%s' is not " "available.") % (src_volume_type_id) raise exception.EncryptedBackupOperationFailed(msg) # Update dest volume with src volume's volume_type_id. LOG.debug("The volume type of the destination volume " "will become the volume type of the source " "volume.") self.db.volume_update(self.context, volume_id, {'volume_type_id': src_volume_type_id}) else: # Volume type id's do not match, and destination volume # has a volume type. Throw exception. LOG.warning(_LW("Destination volume type is different from " "source volume type for an encrypted volume. " "Encrypted backup restore has failed.")) msg = (_("The source volume type '%(src)s' is different " "than the destination volume type '%(dest)s'.") % {'src': src_volume_type_id, 'dest': dest_vol['volume_type_id']}) raise exception.EncryptedBackupOperationFailed(msg) def _restore_vol_meta(self, metadata, volume_id, fields): """Restore values to VolumeMetadata object for provided fields.""" LOG.debug("Restoring volume metadata") metadata = self._filter(metadata, fields) self.db.volume_metadata_update(self.context, volume_id, metadata, True) def _restore_vol_glance_meta(self, metadata, volume_id, fields): """Restore values to VolumeGlanceMetadata object for provided fields. First delete any existing metadata then save new values. """ LOG.debug("Restoring volume glance metadata") metadata = self._filter(metadata, fields) self.db.volume_glance_metadata_delete_by_volume(self.context, volume_id) for key, value in metadata.items(): self.db.volume_glance_metadata_create(self.context, volume_id, key, value) # Now mark the volume as bootable self.db.volume_update(self.context, volume_id, {'bootable': True}) def _v1_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {: (, )} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def _v2_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {: (, )} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_BASE_META: (self._restore_vol_base_meta, ['encryption_key_id']), self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def get(self, volume_id): """Get volume metadata. Returns a json-encoded dict containing all metadata and the restore version i.e. the version used to decide what actually gets restored from this container when doing a backup restore. """ container = {'version': CONF.backup_metadata_version} self._save_vol_base_meta(container, volume_id) self._save_vol_meta(container, volume_id) self._save_vol_glance_meta(container, volume_id) if container: return jsonutils.dumps(container) else: return None def put(self, volume_id, json_metadata): """Restore volume metadata to a volume. The json container should contain a version that is supported here. """ meta_container = jsonutils.loads(json_metadata) version = meta_container['version'] if version == 1: factory = self._v1_restore_factory() elif version == 2: factory = self._v2_restore_factory() else: msg = (_("Unsupported backup metadata version (%s)") % (version)) raise exception.BackupMetadataUnsupportedVersion(msg) for type in factory: func = factory[type][0] fields = factory[type][1] if type in meta_container: func(meta_container[type], volume_id, fields) else: LOG.debug("No metadata of type '%s' to restore", type) @six.add_metaclass(abc.ABCMeta) class BackupDriver(base.Base): def __init__(self, context, db_driver=None): super(BackupDriver, self).__init__(db_driver) self.context = context self.backup_meta_api = BackupMetadataAPI(context, db_driver) # This flag indicates if backup driver supports force # deletion. So it should be set to True if the driver that inherits # from BackupDriver supports the force deletion function. self.support_force_delete = False def get_metadata(self, volume_id): return self.backup_meta_api.get(volume_id) def put_metadata(self, volume_id, json_metadata): self.backup_meta_api.put(volume_id, json_metadata) @abc.abstractmethod def backup(self, backup, volume_file, backup_metadata=False): """Start a backup of a specified volume.""" return @abc.abstractmethod def restore(self, backup, volume_id, volume_file): """Restore a saved backup.""" return @abc.abstractmethod def delete(self, backup): """Delete a saved backup.""" return def export_record(self, backup): """Export driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method and return it here as a dictionary so it can be serialized into a string. Default backup driver implementation has no extra information. :param backup: backup object to export :returns: driver_info - dictionary with extra information """ return {} def import_record(self, backup, driver_info): """Import driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method since it will be called with the extra information that was provided by export_record when exporting the backup. Default backup driver implementation does nothing since it didn't export any specific data in export_record. :param backup: backup object to export :param driver_info: dictionary with driver specific backup record information :returns: nothing """ return @six.add_metaclass(abc.ABCMeta) class BackupDriverWithVerify(BackupDriver): @abc.abstractmethod def verify(self, backup): """Verify that the backup exists on the backend. Verify that the backup is OK, possibly following an import record operation. :param backup: backup id of the backup to verify :raises: InvalidBackup, NotImplementedError """ return cinder-8.0.0/cinder/backup/rpcapi.py0000664000567000056710000001105112701406250020460 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume backup RPC API. """ from oslo_config import cfg from oslo_log import log as logging from cinder import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class BackupAPI(rpc.RPCAPI): """Client side of the volume rpc API. API version history: 1.0 - Initial version. 1.1 - Changed methods to accept backup objects instead of IDs. 1.2 - A version that got in by mistake (without breaking anything). 1.3 - Dummy version bump to mark start of having cinder-backup service decoupled from cinder-volume. ... Mitaka supports messaging 1.3. Any changes to existing methods in 1.x after this point should be done so that they can handle version cap set to 1.3. 2.0 - Remove 1.x compatibility """ RPC_API_VERSION = '1.3' TOPIC = CONF.backup_topic BINARY = 'cinder-backup' def _compat_ver(self, current, legacy): if self.client.can_send_version(current): return current else: return legacy def create_backup(self, ctxt, backup): LOG.debug("create_backup in rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) cctxt.cast(ctxt, 'create_backup', backup=backup) def restore_backup(self, ctxt, volume_host, backup, volume_id): LOG.debug("restore_backup in rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=volume_host, version=version) cctxt.cast(ctxt, 'restore_backup', backup=backup, volume_id=volume_id) def delete_backup(self, ctxt, backup): LOG.debug("delete_backup rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) cctxt.cast(ctxt, 'delete_backup', backup=backup) def export_record(self, ctxt, backup): LOG.debug("export_record in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) return cctxt.call(ctxt, 'export_record', backup=backup) def import_record(self, ctxt, host, backup, backup_service, backup_url, backup_hosts): LOG.debug("import_record rpcapi backup id %(id)s " "on host %(host)s for backup_url %(url)s.", {'id': backup.id, 'host': host, 'url': backup_url}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'import_record', backup=backup, backup_service=backup_service, backup_url=backup_url, backup_hosts=backup_hosts) def reset_status(self, ctxt, backup, status): LOG.debug("reset_status in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) return cctxt.cast(ctxt, 'reset_status', backup=backup, status=status) def check_support_to_force_delete(self, ctxt, host): LOG.debug("Check if backup driver supports force delete " "on host %(host)s.", {'host': host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'check_support_to_force_delete') cinder-8.0.0/cinder/backup/api.py0000664000567000056710000006301712701406250017764 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to the volume backups service. """ from datetime import datetime from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import versionutils from pytz import timezone import random from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder.db import base from cinder import exception from cinder.i18n import _, _LI, _LW from cinder import objects from cinder.objects import fields import cinder.policy from cinder import quota from cinder import utils import cinder.volume from cinder.volume import utils as volume_utils backup_api_opts = [ cfg.BoolOpt('backup_use_same_host', default=False, help='Backup services use same backend.') ] CONF = cfg.CONF CONF.register_opts(backup_api_opts) LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def check_policy(context, action): target = { 'project_id': context.project_id, 'user_id': context.user_id, } _action = 'backup:%s' % action cinder.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the volume backup manager.""" def __init__(self, db_driver=None): self.backup_rpcapi = backup_rpcapi.BackupAPI() self.volume_api = cinder.volume.API() super(API, self).__init__(db_driver) def get(self, context, backup_id): check_policy(context, 'get') return objects.Backup.get_by_id(context, backup_id) def _check_support_to_force_delete(self, context, backup_host): result = self.backup_rpcapi.check_support_to_force_delete(context, backup_host) return result def delete(self, context, backup, force=False): """Make the RPC call to delete a volume backup. Call backup manager to execute backup delete or force delete operation. :param context: running context :param backup: the dict of backup that is got from DB. :param force: indicate force delete or not :raises: InvalidBackup :raises: BackupDriverException :raises: ServiceNotFound """ check_policy(context, 'delete') if not force and backup.status not in [fields.BackupStatus.AVAILABLE, fields.BackupStatus.ERROR]: msg = _('Backup status must be available or error') raise exception.InvalidBackup(reason=msg) if force and not self._check_support_to_force_delete(context, backup.host): msg = _('force delete') raise exception.NotSupportedOperation(operation=msg) # Don't allow backup to be deleted if there are incremental # backups dependent on it. deltas = self.get_all(context, search_opts={'parent_id': backup.id}) if deltas and len(deltas): msg = _('Incremental backups exist for this backup.') raise exception.InvalidBackup(reason=msg) backup.status = fields.BackupStatus.DELETING backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() self.backup_rpcapi.delete_backup(context, backup) def get_all(self, context, search_opts=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): check_policy(context, 'get_all') search_opts = search_opts or {} all_tenants = search_opts.pop('all_tenants', '0') if not utils.is_valid_boolstr(all_tenants): msg = _("all_tenants must be a boolean, got '%s'.") % all_tenants raise exception.InvalidParameterValue(err=msg) if context.is_admin and strutils.bool_from_string(all_tenants): backups = objects.BackupList.get_all(context, search_opts, marker, limit, offset, sort_keys, sort_dirs) else: backups = objects.BackupList.get_all_by_project( context, context.project_id, search_opts, marker, limit, offset, sort_keys, sort_dirs ) return backups def _is_scalable_only(self): """True if we're running in deployment where all c-bak are scalable. We need this method to decide if we can assume that all of our c-bak services are decoupled from c-vol. FIXME(dulek): This shouldn't be needed in Newton. """ cap = self.backup_rpcapi.client.version_cap if cap: cap = versionutils.convert_version_to_tuple(cap) return cap >= (1, 3) # Mitaka is marked by c-bak 1.3+. else: # NOTE(dulek): No version cap means we're running in an environment # without c-bak services. Letting it pass as Mitaka, request will # just fail anyway so it doesn't really matter. return True def _az_matched(self, service, availability_zone): return ((not availability_zone) or service.availability_zone == availability_zone) def _is_backup_service_enabled(self, availability_zone, host): """Check if there is a backup service available.""" topic = CONF.backup_topic ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) for srv in services: if (self._az_matched(srv, availability_zone) and srv.host == host and utils.service_is_up(srv)): return True return False def _get_any_available_backup_service(self, availability_zone): """Get an available backup service host. Get an available backup service host in the specified availability zone. """ services = [srv for srv in self._list_backup_services()] random.shuffle(services) # Get the next running service with matching availability zone. idx = 0 while idx < len(services): srv = services[idx] if(self._az_matched(srv, availability_zone) and utils.service_is_up(srv)): return srv.host idx = idx + 1 return None def _get_available_backup_service_host(self, host, az, volume_host=None): """Return an appropriate backup service host.""" # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak # were coupled with c-vol. If we're running in mixed Liberty-Mitaka # environment we will be scheduling backup jobs the old way. # # This snippet should go away in Newton. Note that volume_host # parameter will also be unnecessary then. if not self._is_scalable_only(): if volume_host: volume_host = volume_utils.extract_host(volume_host, level='host') if volume_host and self._is_backup_service_enabled(az, volume_host): return volume_host elif host and self._is_backup_service_enabled(az, host): return host else: raise exception.ServiceNotFound(service_id='cinder-backup') backup_host = None if (not host or not CONF.backup_use_same_host): backup_host = self._get_any_available_backup_service(az) elif self._is_backup_service_enabled(az, host): backup_host = host if not backup_host: raise exception.ServiceNotFound(service_id='cinder-backup') return backup_host def _list_backup_services(self): """List all enabled backup services. :returns: list -- hosts for services that are enabled for backup. """ topic = CONF.backup_topic ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) return services def _list_backup_hosts(self): services = self._list_backup_services() return [srv.host for srv in services if not srv.disabled and utils.service_is_up(srv)] def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None, force=False, snapshot_id=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) snapshot = None if snapshot_id: snapshot = self.volume_api.get_snapshot(context, snapshot_id) if volume_id != snapshot.volume_id: msg = (_('Volume %(vol1)s does not match with ' 'snapshot.volume_id %(vol2)s.') % {'vol1': volume_id, 'vol2': snapshot.volume_id}) raise exception.InvalidVolume(reason=msg) if volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) elif volume['status'] in ["in-use"] and not snapshot_id and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) elif snapshot_id and snapshot['status'] not in ["available"]: msg = (_('Snapshot to be backed up must be available, ' 'but the current status is "%s".') % snapshot['status']) raise exception.InvalidSnapshot(reason=msg) previous_status = volume['status'] host = self._get_available_backup_service_host( None, volume.availability_zone, volume_utils.extract_host(volume.host, 'host')) # Reserve a quota before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "backups (%(d_consumed)d backups " "already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.BackupLimitExceeded( allowed=quotas[over]) # Find the latest backup and use it as the parent backup to do an # incremental backup. latest_backup = None if incremental: backups = objects.BackupList.get_all_by_volume(context.elevated(), volume_id) if backups.objects: # NOTE(xyang): The 'data_timestamp' field records the time # when the data on the volume was first saved. If it is # a backup from volume, 'data_timestamp' will be the same # as 'created_at' for a backup. If it is a backup from a # snapshot, 'data_timestamp' will be the same as # 'created_at' for a snapshot. # If not backing up from snapshot, the backup with the latest # 'data_timestamp' will be the parent; If backing up from # snapshot, the backup with the latest 'data_timestamp' will # be chosen only if 'data_timestamp' is earlier than the # 'created_at' timestamp of the snapshot; Otherwise, the # backup will not be chosen as the parent. # For example, a volume has a backup taken at 8:00, then # a snapshot taken at 8:10, and then a backup at 8:20. # When taking an incremental backup of the snapshot, the # parent should be the backup at 8:00, not 8:20, and the # 'data_timestamp' of this new backup will be 8:10. latest_backup = max( backups.objects, key=lambda x: x['data_timestamp'] if (not snapshot or (snapshot and x['data_timestamp'] < snapshot['created_at'])) else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC'))) else: msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) parent_id = None if latest_backup: parent_id = latest_backup.id if latest_backup['status'] != fields.BackupStatus.AVAILABLE: msg = _('The parent backup must be available for ' 'incremental backup.') raise exception.InvalidBackup(reason=msg) data_timestamp = None if snapshot_id: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) data_timestamp = snapshot.created_at self.db.volume_update(context, volume_id, {'status': 'backing-up', 'previous_status': previous_status}) backup = None try: kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': fields.BackupStatus.CREATING, 'container': container, 'parent_id': parent_id, 'size': volume['size'], 'host': host, 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, } backup = objects.Backup(context=context, **kwargs) backup.create() if not snapshot_id: backup.data_timestamp = backup.created_at backup.save() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: if backup and 'id' in backup: backup.destroy() finally: QUOTAS.rollback(context, reservations) # TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup) return backup def restore(self, context, backup_id, volume_id=None, name=None): """Make the RPC call to restore a volume backup.""" check_policy(context, 'restore') backup = self.get(context, backup_id) if backup['status'] != fields.BackupStatus.AVAILABLE: msg = _('Backup status must be available') raise exception.InvalidBackup(reason=msg) size = backup['size'] if size is None: msg = _('Backup to be restored has invalid size') raise exception.InvalidBackup(reason=msg) # Create a volume if none specified. If a volume is specified check # it is large enough for the backup if volume_id is None: if name is None: name = 'restore_backup_%s' % backup_id description = 'auto-created_from_restore_from_backup' LOG.info(_LI("Creating volume of %(size)s GB for restore of " "backup %(backup_id)s."), {'size': size, 'backup_id': backup_id}, context=context) volume = self.volume_api.create(context, size, name, description) volume_id = volume['id'] while True: volume = self.volume_api.get(context, volume_id) if volume['status'] != 'creating': break greenthread.sleep(1) else: volume = self.volume_api.get(context, volume_id) if volume['status'] != "available": msg = _('Volume to be restored to must be available') raise exception.InvalidVolume(reason=msg) LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {'bs': size, 'vs': volume['size']}) if size > volume['size']: msg = (_('volume size %(volume_size)d is too small to restore ' 'backup of size %(size)d.') % {'volume_size': volume['size'], 'size': size}) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Overwriting volume %(volume_id)s with restore of " "backup %(backup_id)s"), {'volume_id': volume_id, 'backup_id': backup_id}, context=context) # Setting the status here rather than setting at start and unrolling # for each error condition, it should be a very small window backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone, volume_host=volume.host) backup.status = fields.BackupStatus.RESTORING backup.restore_volume_id = volume.id backup.save() self.db.volume_update(context, volume_id, {'status': 'restoring-backup'}) self.backup_rpcapi.restore_backup(context, backup.host, backup, volume_id) d = {'backup_id': backup_id, 'volume_id': volume_id, 'volume_name': volume['display_name'], } return d def reset_status(self, context, backup_id, status): """Make the RPC call to reset a volume backup's status. Call backup manager to execute backup status reset operation. :param context: running context :param backup_id: which backup's status to be reset :parma status: backup's status to be reset :raises: InvalidBackup """ # get backup info backup = self.get(context, backup_id) backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() # send to manager to do reset operation self.backup_rpcapi.reset_status(ctxt=context, backup=backup, status=status) def export_record(self, context, backup_id): """Make the RPC call to export a volume backup. Call backup manager to execute backup export. :param context: running context :param backup_id: backup id to export :returns: dictionary -- a description of how to import the backup :returns: contains 'backup_url' and 'backup_service' :raises: InvalidBackup """ check_policy(context, 'backup-export') backup = self.get(context, backup_id) if backup['status'] != fields.BackupStatus.AVAILABLE: msg = (_('Backup status must be available and not %s.') % backup['status']) raise exception.InvalidBackup(reason=msg) LOG.debug("Calling RPCAPI with context: " "%(ctx)s, host: %(host)s, backup: %(id)s.", {'ctx': context, 'host': backup['host'], 'id': backup['id']}) backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() export_data = self.backup_rpcapi.export_record(context, backup) return export_data def _get_import_backup(self, context, backup_url): """Prepare database backup record for import. This method decodes provided backup_url and expects to find the id of the backup in there. Then checks the DB for the presence of this backup record and if it finds it and is not deleted it will raise an exception because the record cannot be created or used. If the record is in deleted status then we must be trying to recover this record, so we'll reuse it. If the record doesn't already exist we create it with provided id. :param context: running context :param backup_url: backup description to be used by the backup driver :return: BackupImport object :raises: InvalidBackup :raises: InvalidInput """ # Deserialize string backup record into a dictionary backup_record = objects.Backup.decode_record(backup_url) # ID is a required field since it's what links incremental backups if 'id' not in backup_record: msg = _('Provided backup record is missing an id') raise exception.InvalidInput(reason=msg) kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'volume_id': '0000-0000-0000-0000', 'status': fields.BackupStatus.CREATING, } try: # Try to get the backup with that ID in all projects even among # deleted entries. backup = objects.BackupImport.get_by_id(context, backup_record['id'], read_deleted='yes', project_only=False) # If record exists and it's not deleted we cannot proceed with the # import if backup.status != fields.BackupStatus.DELETED: msg = _('Backup already exists in database.') raise exception.InvalidBackup(reason=msg) # Otherwise we'll "revive" delete backup record backup.update(kwargs) backup.save() except exception.BackupNotFound: # If record doesn't exist create it with the specific ID backup = objects.BackupImport(context=context, id=backup_record['id'], **kwargs) backup.create() return backup def import_record(self, context, backup_service, backup_url): """Make the RPC call to import a volume backup. :param context: running context :param backup_service: backup service name :param backup_url: backup description to be used by the backup driver :raises: InvalidBackup :raises: ServiceNotFound :raises: InvalidInput """ check_policy(context, 'backup-import') # NOTE(ronenkat): since we don't have a backup-scheduler # we need to find a host that support the backup service # that was used to create the backup. # We send it to the first backup service host, and the backup manager # on that host will forward it to other hosts on the hosts list if it # cannot support correct service itself. hosts = self._list_backup_hosts() if len(hosts) == 0: raise exception.ServiceNotFound(service_id=backup_service) # Get Backup object that will be used to import this backup record backup = self._get_import_backup(context, backup_url) first_host = hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, hosts) return backup cinder-8.0.0/cinder/wsgi/0000775000567000056710000000000012701406543016343 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/wsgi/__init__.py0000664000567000056710000000000012701406250020435 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/wsgi/wsgi.py0000664000567000056710000000240212701406250017657 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cinder OS API WSGI application.""" import sys import warnings from cinder import objects warnings.simplefilter('once', DeprecationWarning) from oslo_config import cfg from oslo_log import log as logging from oslo_service import wsgi from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config from cinder import rpc from cinder import version CONF = cfg.CONF def initialize_application(): objects.register_all() CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") config.set_middleware_defaults() rpc.init(CONF) return wsgi.Loader(CONF).load_app(name='osapi_volume') cinder-8.0.0/cinder/wsgi/common.py0000664000567000056710000001160612701406250020204 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" from oslo_log import log as logging import webob.dec import webob.exc from cinder.i18n import _ LOG = logging.getLogger(__name__) class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = cinder.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import cinder.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = cinder.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import cinder.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) cinder-8.0.0/cinder/wsgi/eventlet_server.py0000664000567000056710000000412612701406250022127 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Methods for working with eventlet WSGI servers.""" from __future__ import print_function import socket from oslo_config import cfg from oslo_log import log as logging from oslo_service import wsgi from oslo_utils import netutils socket_opts = [ cfg.BoolOpt('tcp_keepalive', default=True, help="Sets the value of TCP_KEEPALIVE (True/False) for each " "server socket."), cfg.IntOpt('tcp_keepalive_interval', help="Sets the value of TCP_KEEPINTVL in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('tcp_keepalive_count', help="Sets the value of TCP_KEEPCNT for each " "server socket. Not supported on OS X."), ] CONF = cfg.CONF CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) class Server(wsgi.Server): """Server class to manage a WSGI server, serving a WSGI application.""" def _set_socket_opts(self, _socket): _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # NOTE(praneshp): Call set_tcp_keepalive in oslo to set # tcp keepalive parameters. Sockets can hang around forever # without keepalive netutils.set_tcp_keepalive(_socket, self.conf.tcp_keepalive, self.conf.tcp_keepidle, self.conf.tcp_keepalive_count, self.conf.tcp_keepalive_interval) return _socket cinder-8.0.0/cinder/tests/0000775000567000056710000000000012701406543016534 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/__init__.py0000664000567000056710000000000012701406250020626 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/0000775000567000056710000000000012701406543017513 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/test_quobyte.py0000664000567000056710000011723012701406250022613 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Quobyte Inc. # Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Quobyte driver module.""" import errno import os import six import traceback import mock from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_utils import imageutils from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import quobyte CONF = cfg.CONF class DumbVolume(object): fields = {} def __setitem__(self, key, value): self.fields[key] = value def __getitem__(self, item): return self.fields[item] class FakeDb(object): msg = "Tests are broken: mock this out." def volume_get(self, *a, **kw): raise Exception(self.msg) def snapshot_get_all_for_volume(self, *a, **kw): """Mock this if you want results from it.""" return [] class QuobyteDriverTestCase(test.TestCase): """Test case for Quobyte driver.""" TEST_QUOBYTE_VOLUME = 'quobyte://quobyte-host/openstack-volumes' TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL = 'quobyte-host/openstack-volumes' TEST_SIZE_IN_GB = 1 TEST_MNT_POINT = '/mnt/quobyte' TEST_MNT_POINT_BASE = '/mnt' TEST_LOCAL_PATH = '/mnt/quobyte/volume-123' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' TEST_TMP_FILE = '/tmp/tempfile' VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca' SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede' def setUp(self): super(QuobyteDriverTestCase, self).setUp() self._configuration = mock.Mock(conf.Configuration) self._configuration.append_config_values(mock.ANY) self._configuration.quobyte_volume_url = \ self.TEST_QUOBYTE_VOLUME self._configuration.quobyte_client_cfg = None self._configuration.quobyte_sparsed_volumes = True self._configuration.quobyte_qcow2_volumes = False self._configuration.quobyte_mount_point_base = \ self.TEST_MNT_POINT_BASE self._configuration.nas_secure_file_operations = "auto" self._configuration.nas_secure_file_permissions = "auto" self._driver =\ quobyte.QuobyteDriver(configuration=self._configuration, db=FakeDb()) self._driver.shares = {} self._driver.set_nas_security_options(is_new_cinder_install=False) def assertRaisesAndMessageMatches( self, excClass, msg, callableObj, *args, **kwargs): """Ensure that the specified exception was raised. """ caught = False try: callableObj(*args, **kwargs) except Exception as exc: caught = True self.assertIsInstance(exc, excClass, 'Wrong exception caught: %s Stacktrace: %s' % (exc, traceback.format_exc())) self.assertIn(msg, six.text_type(exc)) if not caught: self.fail('Expected raised exception but nothing caught.') def test_local_path(self): """local_path common use case.""" drv = self._driver volume = DumbVolume() volume['provider_location'] = self.TEST_QUOBYTE_VOLUME volume['name'] = 'volume-123' self.assertEqual( '/mnt/1331538734b757ed52d0e18c0a7210cd/volume-123', drv.local_path(volume)) def test_mount_quobyte_should_mount_correctly(self): with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open: # Content of /proc/mount (not mounted yet). mock_open.return_value = six.StringIO( "/dev/sda5 / ext4 rw,relatime,data=ordered 0 0") self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT) mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) getfattr_call = mock.call( 'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls( [mkdir_call, mount_call, getfattr_call], any_order=False) def test_mount_quobyte_already_mounted_detected_seen_in_proc_mount(self): with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open: # Content of /proc/mount (already mounted). mock_open.return_value = six.StringIO( "quobyte@%s %s fuse rw,nosuid,nodev,noatime,user_id=1000" ",group_id=100,default_permissions,allow_other 0 0" % (self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT)) self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT) mock_execute.assert_called_once_with( 'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT, run_as_root=False) def test_mount_quobyte_should_suppress_and_log_already_mounted_error(self): """test_mount_quobyte_should_suppress_and_log_already_mounted_error Based on /proc/mount, the file system is not mounted yet. However, mount.quobyte returns with an 'already mounted' error. This is a last-resort safe-guard in case /proc/mount parsing was not successful. Because _mount_quobyte gets called with ensure=True, the error will be suppressed and logged instead. """ with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open, \ mock.patch('cinder.volume.drivers.quobyte.LOG') as mock_LOG: # Content of /proc/mount (empty). mock_open.return_value = six.StringIO() mock_execute.side_effect = [None, putils.ProcessExecutionError( stderr='is busy or already mounted')] self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, ensure=True) mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls([mkdir_call, mount_call], any_order=False) mock_LOG.warning.assert_called_once_with('%s is already mounted', self.TEST_QUOBYTE_VOLUME) def test_mount_quobyte_should_reraise_already_mounted_error(self): """test_mount_quobyte_should_reraise_already_mounted_error Like test_mount_quobyte_should_suppress_and_log_already_mounted_error but with ensure=False. """ with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open: mock_open.return_value = six.StringIO() mock_execute.side_effect = [ None, # mkdir putils.ProcessExecutionError( # mount stderr='is busy or already mounted')] self.assertRaises(putils.ProcessExecutionError, self._driver._mount_quobyte, self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, ensure=False) mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls([mkdir_call, mount_call], any_order=False) def test_get_hash_str(self): """_get_hash_str should calculation correct value.""" drv = self._driver self.assertEqual('1331538734b757ed52d0e18c0a7210cd', drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) def test_get_available_capacity_with_df(self): """_get_available_capacity should calculate correct value.""" drv = self._driver df_total_size = 2620544 df_avail = 1490560 df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' df_data = 'quobyte@%s %d 996864 %d 41%% %s' % \ (self.TEST_QUOBYTE_VOLUME, df_total_size, df_avail, self.TEST_MNT_POINT) df_output = df_head + df_data drv._get_mount_point_for_share = mock.Mock(return_value=self. TEST_MNT_POINT) drv._execute = mock.Mock(return_value=(df_output, None)) self.assertEqual((df_avail, df_total_size), drv._get_available_capacity(self.TEST_QUOBYTE_VOLUME)) (drv._get_mount_point_for_share. assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) (drv._execute. assert_called_once_with('df', '--portability', '--block-size', '1', self.TEST_MNT_POINT, run_as_root=self._driver._execute_as_root)) def test_get_capacity_info(self): with mock.patch.object(self._driver, '_get_available_capacity') \ as mock_get_available_capacity: drv = self._driver df_size = 2620544 df_avail = 1490560 mock_get_available_capacity.return_value = (df_avail, df_size) size, available, used = drv._get_capacity_info(mock.ANY) mock_get_available_capacity.assert_called_once_with(mock.ANY) self.assertEqual(df_size, size) self.assertEqual(df_avail, available) self.assertEqual(size - available, used) def test_load_shares_config(self): """_load_shares_config takes the Volume URL and strips quobyte://.""" drv = self._driver drv._load_shares_config() self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) def test_load_shares_config_without_protocol(self): """Same as test_load_shares_config, but URL is without quobyte://.""" drv = self._driver drv.configuration.quobyte_volume_url = \ self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL drv._load_shares_config() self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) def test_ensure_share_mounted(self): """_ensure_share_mounted simple use case.""" with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ mock_get_mount_point, \ mock.patch.object(self._driver, '_mount_quobyte') as \ mock_mount: drv = self._driver drv._ensure_share_mounted(self.TEST_QUOBYTE_VOLUME) mock_get_mount_point.assert_called_once_with( self.TEST_QUOBYTE_VOLUME) mock_mount.assert_called_once_with( self.TEST_QUOBYTE_VOLUME, mock_get_mount_point.return_value, ensure=True) def test_ensure_shares_mounted_should_save_mounting_successfully(self): """_ensure_shares_mounted should save share if mounted with success.""" with mock.patch.object(self._driver, '_ensure_share_mounted') \ as mock_ensure_share_mounted: drv = self._driver drv._ensure_shares_mounted() mock_ensure_share_mounted.assert_called_once_with( self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv._mounted_shares) def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): """_ensure_shares_mounted should not save if mount raised an error.""" with mock.patch.object(self._driver, '_ensure_share_mounted') \ as mock_ensure_share_mounted: drv = self._driver mock_ensure_share_mounted.side_effect = Exception() drv._ensure_shares_mounted() mock_ensure_share_mounted.assert_called_once_with( self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) self.assertEqual(1, len(drv.shares)) self.assertEqual(0, len(drv._mounted_shares)) @mock.patch.object(quobyte.QuobyteDriver, "set_nas_security_options") def test_do_setup(self, qb_snso_mock): """do_setup runs successfully.""" drv = self._driver drv.do_setup(mock.create_autospec(context.RequestContext)) qb_snso_mock.assert_called_once_with(is_new_cinder_install=mock.ANY) def test_check_for_setup_error_throws_quobyte_volume_url_not_set(self): """check_for_setup_error throws if 'quobyte_volume_url' is not set.""" drv = self._driver drv.configuration.quobyte_volume_url = None self.assertRaisesAndMessageMatches(exception.VolumeDriverException, 'no Quobyte volume configured', drv.check_for_setup_error) def test_check_for_setup_error_throws_client_not_installed(self): """check_for_setup_error throws if client is not installed.""" drv = self._driver drv._execute = mock.Mock(side_effect=OSError (errno.ENOENT, 'No such file or directory')) self.assertRaisesAndMessageMatches(exception.VolumeDriverException, 'mount.quobyte is not installed', drv.check_for_setup_error) drv._execute.assert_called_once_with('mount.quobyte', check_exit_code=False, run_as_root=False) def test_check_for_setup_error_throws_client_not_executable(self): """check_for_setup_error throws if client cannot be executed.""" drv = self._driver drv._execute = mock.Mock(side_effect=OSError (errno.EPERM, 'Operation not permitted')) self.assertRaisesAndMessageMatches(OSError, 'Operation not permitted', drv.check_for_setup_error) drv._execute.assert_called_once_with('mount.quobyte', check_exit_code=False, run_as_root=False) def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): """_find_share should throw error if there is no mounted share.""" drv = self._driver drv._mounted_shares = [] self.assertRaises(exception.NotFound, drv._find_share, self.TEST_SIZE_IN_GB) def test_find_share(self): """_find_share simple use case.""" drv = self._driver drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] self.assertEqual(self.TEST_QUOBYTE_VOLUME, drv._find_share(self.TEST_SIZE_IN_GB)) def test_find_share_does_not_throw_error_if_there_isnt_enough_space(self): """_find_share intentionally does not throw when no space is left.""" with mock.patch.object(self._driver, '_get_available_capacity') \ as mock_get_available_capacity: drv = self._driver df_size = 2620544 df_avail = 0 mock_get_available_capacity.return_value = (df_avail, df_size) drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] self.assertEqual(self.TEST_QUOBYTE_VOLUME, drv._find_share(self.TEST_SIZE_IN_GB)) # The current implementation does not call _get_available_capacity. # Future ones might do and therefore we mocked it. self.assertGreaterEqual(mock_get_available_capacity.call_count, 0) def _simple_volume(self, uuid=None): volume = DumbVolume() volume['provider_location'] = self.TEST_QUOBYTE_VOLUME if uuid is None: volume['id'] = self.VOLUME_UUID else: volume['id'] = uuid # volume['name'] mirrors format from db/sqlalchemy/models.py volume['name'] = 'volume-%s' % volume['id'] volume['size'] = 10 volume['status'] = 'available' return volume def test_create_sparsed_volume(self): drv = self._driver volume = self._simple_volume() drv._create_sparsed_file = mock.Mock() drv._set_rw_permissions_for_all = mock.Mock() drv._do_create_volume(volume) drv._create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY) drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) def test_create_nonsparsed_volume(self): drv = self._driver volume = self._simple_volume() old_value = self._configuration.quobyte_sparsed_volumes self._configuration.quobyte_sparsed_volumes = False drv._create_regular_file = mock.Mock() drv._set_rw_permissions_for_all = mock.Mock() drv._do_create_volume(volume) drv._create_regular_file.assert_called_once_with(mock.ANY, mock.ANY) drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) self._configuration.quobyte_sparsed_volumes = old_value def test_create_qcow2_volume(self): drv = self._driver volume = self._simple_volume() old_value = self._configuration.quobyte_qcow2_volumes self._configuration.quobyte_qcow2_volumes = True drv._execute = mock.Mock() hashed = drv._get_hash_str(volume['provider_location']) path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, hashed, self.VOLUME_UUID) drv._do_create_volume(volume) assert_calls = [mock.call('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', path, str(volume['size'] * units.Gi), run_as_root=self._driver._execute_as_root), mock.call('chmod', 'ugo+rw', path, run_as_root=self._driver._execute_as_root)] drv._execute.assert_has_calls(assert_calls) self._configuration.quobyte_qcow2_volumes = old_value def test_create_volume_should_ensure_quobyte_mounted(self): """create_volume ensures shares provided in config are mounted.""" drv = self._driver drv.LOG = mock.Mock() drv._find_share = mock.Mock() drv._do_create_volume = mock.Mock() drv._ensure_shares_mounted = mock.Mock() volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB drv.create_volume(volume) drv._find_share.assert_called_once_with(mock.ANY) drv._do_create_volume.assert_called_once_with(volume) drv._ensure_shares_mounted.assert_called_once_with() def test_create_volume_should_return_provider_location(self): """create_volume should return provider_location with found share.""" drv = self._driver drv.LOG = mock.Mock() drv._ensure_shares_mounted = mock.Mock() drv._do_create_volume = mock.Mock() drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB result = drv.create_volume(volume) self.assertEqual(self.TEST_QUOBYTE_VOLUME, result['provider_location']) drv._do_create_volume.assert_called_once_with(volume) drv._ensure_shares_mounted.assert_called_once_with() drv._find_share.assert_called_once_with(self.TEST_SIZE_IN_GB) def test_create_cloned_volume(self): drv = self._driver drv._create_snapshot = mock.Mock() drv._copy_volume_from_snapshot = mock.Mock() drv._delete_snapshot = mock.Mock() volume = self._simple_volume() src_vref = self._simple_volume() src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref['name'] = 'volume-%s' % src_vref['id'] volume_ref = {'id': volume['id'], 'name': volume['name'], 'status': volume['status'], 'provider_location': volume['provider_location'], 'size': volume['size']} snap_ref = {'volume_name': src_vref['name'], 'name': 'clone-snap-%s' % src_vref['id'], 'size': src_vref['size'], 'volume_size': src_vref['size'], 'volume_id': src_vref['id'], 'id': 'tmp-snap-%s' % src_vref['id'], 'volume': src_vref} drv.create_cloned_volume(volume, src_vref) drv._create_snapshot.assert_called_once_with(snap_ref) drv._copy_volume_from_snapshot.assert_called_once_with(snap_ref, volume_ref, volume['size']) drv._delete_snapshot.assert_called_once_with(mock.ANY) @mock.patch('oslo_utils.fileutils.delete_if_exists') def test_delete_volume(self, mock_delete_if_exists): volume = self._simple_volume() volume_filename = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename) info_file = volume_path + '.info' with mock.patch.object(self._driver, '_ensure_share_mounted') as \ mock_ensure_share_mounted, \ mock.patch.object(self._driver, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(self._driver, 'get_active_image_from_info') as \ mock_active_image_from_info, \ mock.patch.object(self._driver, '_execute') as \ mock_execute, \ mock.patch.object(self._driver, '_local_path_volume') as \ mock_local_path_volume, \ mock.patch.object(self._driver, '_local_path_volume_info') as \ mock_local_path_volume_info: mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_active_image_from_info.return_value = volume_filename mock_local_path_volume.return_value = volume_path mock_local_path_volume_info.return_value = info_file self._driver.delete_volume(volume) mock_ensure_share_mounted.assert_called_once_with( volume['provider_location']) mock_local_volume_dir.assert_called_once_with(volume) mock_active_image_from_info.assert_called_once_with(volume) mock_execute.assert_called_once_with('rm', '-f', volume_path, run_as_root= self._driver._execute_as_root) mock_local_path_volume_info.assert_called_once_with(volume) mock_local_path_volume.assert_called_once_with(volume) mock_delete_if_exists.assert_any_call(volume_path) mock_delete_if_exists.assert_any_call(info_file) def test_delete_should_ensure_share_mounted(self): """delete_volume should ensure that corresponding share is mounted.""" drv = self._driver drv._execute = mock.Mock() volume = DumbVolume() volume['name'] = 'volume-123' volume['provider_location'] = self.TEST_QUOBYTE_VOLUME drv._ensure_share_mounted = mock.Mock() drv.delete_volume(volume) (drv._ensure_share_mounted. assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) drv._execute.assert_called_once_with('rm', '-f', mock.ANY, run_as_root=False) def test_delete_should_not_delete_if_provider_location_not_provided(self): """delete_volume shouldn't delete if provider_location missed.""" drv = self._driver drv._ensure_share_mounted = mock.Mock() drv._execute = mock.Mock() volume = DumbVolume() volume['name'] = 'volume-123' volume['provider_location'] = None drv.delete_volume(volume) assert not drv._ensure_share_mounted.called assert not drv._execute.called def test_extend_volume(self): drv = self._driver volume = self._simple_volume() volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, drv._get_hash_str( self.TEST_QUOBYTE_VOLUME), self.VOLUME_UUID) qemu_img_info_output = """image: volume-%s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 473K """ % self.VOLUME_UUID img_info = imageutils.QemuImgInfo(qemu_img_info_output) drv.get_active_image_from_info = mock.Mock(return_value=volume['name']) image_utils.qemu_img_info = mock.Mock(return_value=img_info) image_utils.resize_image = mock.Mock() drv.extend_volume(volume, 3) drv.get_active_image_from_info.assert_called_once_with(volume) image_utils.qemu_img_info.assert_called_once_with(volume_path) image_utils.resize_image.assert_called_once_with(volume_path, 3) def test_copy_volume_from_snapshot(self): drv = self._driver # lots of test vars to be prepared at first dest_volume = self._simple_volume( 'c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) src_vol_path = os.path.join(vol_dir, src_volume['name']) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) info_path = os.path.join(vol_dir, src_volume['name']) + '.info' snapshot = {'volume_name': src_volume['name'], 'name': 'clone-snap-%s' % src_volume['id'], 'size': src_volume['size'], 'volume_size': src_volume['size'], 'volume_id': src_volume['id'], 'id': 'tmp-snap-%s' % src_volume['id'], 'volume': src_volume} snap_file = dest_volume['name'] + '.' + snapshot['id'] snap_path = os.path.join(vol_dir, snap_file) size = dest_volume['size'] qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output) # mocking and testing starts here image_utils.convert_image = mock.Mock() drv._read_info_file = mock.Mock(return_value= {'active': snap_file, snapshot['id']: snap_file}) image_utils.qemu_img_info = mock.Mock(return_value=img_info) drv._set_rw_permissions_for_all = mock.Mock() drv._copy_volume_from_snapshot(snapshot, dest_volume, size) drv._read_info_file.assert_called_once_with(info_path) image_utils.qemu_img_info.assert_called_once_with(snap_path) (image_utils.convert_image. assert_called_once_with(src_vol_path, dest_vol_path, 'raw', run_as_root=self._driver._execute_as_root)) drv._set_rw_permissions_for_all.assert_called_once_with(dest_vol_path) def test_create_volume_from_snapshot_status_not_available(self): """Expect an error when the snapshot's status is not 'available'.""" drv = self._driver src_volume = self._simple_volume() snap_ref = {'volume_name': src_volume['name'], 'name': 'clone-snap-%s' % src_volume['id'], 'size': src_volume['size'], 'volume_size': src_volume['size'], 'volume_id': src_volume['id'], 'id': 'tmp-snap-%s' % src_volume['id'], 'volume': src_volume, 'status': 'error'} new_volume = DumbVolume() new_volume['size'] = snap_ref['size'] self.assertRaises(exception.InvalidSnapshot, drv.create_volume_from_snapshot, new_volume, snap_ref) def test_create_volume_from_snapshot(self): drv = self._driver src_volume = self._simple_volume() snap_ref = {'volume_name': src_volume['name'], 'name': 'clone-snap-%s' % src_volume['id'], 'size': src_volume['size'], 'volume_size': src_volume['size'], 'volume_id': src_volume['id'], 'id': 'tmp-snap-%s' % src_volume['id'], 'volume': src_volume, 'status': 'available'} new_volume = DumbVolume() new_volume['size'] = snap_ref['size'] drv._ensure_shares_mounted = mock.Mock() drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) drv._do_create_volume = mock.Mock() drv._copy_volume_from_snapshot = mock.Mock() drv.create_volume_from_snapshot(new_volume, snap_ref) drv._ensure_shares_mounted.assert_called_once_with() drv._find_share.assert_called_once_with(new_volume['size']) drv._do_create_volume.assert_called_once_with(new_volume) (drv._copy_volume_from_snapshot. assert_called_once_with(snap_ref, new_volume, new_volume['size'])) def test_initialize_connection(self): drv = self._driver volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) vol_path = os.path.join(vol_dir, volume['name']) qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) drv.get_active_image_from_info = mock.Mock(return_value=volume['name']) image_utils.qemu_img_info = mock.Mock(return_value=img_info) conn_info = drv.initialize_connection(volume, None) drv.get_active_image_from_info.assert_called_once_with(volume) image_utils.qemu_img_info.assert_called_once_with(vol_path) self.assertEqual('raw', conn_info['data']['format']) self.assertEqual('quobyte', conn_info['driver_volume_type']) self.assertEqual(volume['name'], conn_info['data']['name']) self.assertEqual(self.TEST_MNT_POINT_BASE, conn_info['mount_point_base']) def test_copy_volume_to_image_raw_image(self): drv = self._driver volume = self._simple_volume() volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = volume_path drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_once_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertTrue(mock_create_temporary_file.called) def test_copy_volume_to_image_qcow2_image(self): """Upload a qcow2 image file which has to be converted to raw first.""" drv = self._driver volume = self._simple_volume() volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw') mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertTrue(mock_create_temporary_file.called) def test_copy_volume_to_image_snapshot_exists(self): """Upload an active snapshot which has to be converted to raw first.""" drv = self._driver volume = self._simple_volume() volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID) volume_filename = 'volume-%s' % self.VOLUME_UUID image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: volume-%s.%s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename) img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw') mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertTrue(mock_create_temporary_file.called) def test_set_nas_security_options_default(self): drv = self._driver self.assertTrue(drv.configuration.nas_secure_file_operations == "true") self.assertTrue(drv.configuration.nas_secure_file_permissions == "true") self.assertFalse(drv._execute_as_root) def test_set_nas_security_options_insecure(self): drv = self._driver drv.configuration.nas_secure_file_operations = "false" drv.configuration.nas_secure_file_permissions = "false" drv.set_nas_security_options(is_new_cinder_install=True) self.assertTrue(drv.configuration.nas_secure_file_operations == "false") self.assertTrue(drv.configuration.nas_secure_file_permissions == "false") self.assertTrue(drv._execute_as_root) def test_set_nas_security_options_explicitly_secure(self): drv = self._driver drv.configuration.nas_secure_file_operations = "true" drv.configuration.nas_secure_file_permissions = "true" drv.set_nas_security_options(is_new_cinder_install=True) self.assertTrue(drv.configuration.nas_secure_file_operations == "true") self.assertTrue(drv.configuration.nas_secure_file_permissions == "true") self.assertFalse(drv._execute_as_root) cinder-8.0.0/cinder/tests/unit/test_misc.py0000664000567000056710000000432112701406250022052 0ustar jenkinsjenkins00000000000000 # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os from cinder import exception from cinder.i18n import _ from cinder import test class ExceptionTestCase(test.TestCase): @staticmethod def _raise_exc(exc): raise exc() def test_exceptions_raise(self): # NOTE(dprince): disable format errors since we are not passing kwargs self.flags(fatal_exception_format_errors=False) for name in dir(exception): exc = getattr(exception, name) if isinstance(exc, type): self.assertRaises(exc, self._raise_exc, exc) class ProjectTestCase(test.TestCase): def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") downgrades = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and has_downgrade: fname = os.path.basename(path) downgrades.append(fname) helpful_msg = (_("The following migrations have a downgrade, " "which are not allowed: " "\n\t%s") % '\n\t'.join(sorted(downgrades))) self.assertFalse(downgrades, msg=helpful_msg) cinder-8.0.0/cinder/tests/unit/test_hitachi_hnas_iscsi.py0000664000567000056710000005072012701406257024746 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Self test for Hitachi Unified Storage (HUS-HNAS) platform. """ import os import tempfile import time import mock from oslo_concurrency import processutils as putils import six from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi from cinder.volume import volume_types HNASCONF = """ ssc True 172.17.44.15 supervisor supervisor default 172.17.39.132 fs2 silver 172.17.39.133 fs2 """ HNAS_WRONG_CONF1 = """ ssc 172.17.44.15 supervisor supervisor default 172.17.39.132:/cinder """ HNAS_WRONG_CONF2 = """ ssc 172.17.44.15 supervisor supervisor default silver """ # The following information is passed on to tests, when creating a volume _VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128, 'volume_type': 'silver', 'volume_type_id': '1', 'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-4182' '-afb8-1755025c35b8', 'id': 'abcdefg', 'host': 'host1@hnas-iscsi-backend#silver'} class SimulatedHnasBackend(object): """Simulation Back end. Talks to HNAS.""" # these attributes are shared across object instances start_lun = 0 init_index = 0 target_index = 0 hlun = 0 def __init__(self): self.type = 'HNAS' self.out = '' self.volumes = [] # iSCSI connections self.connections = [] def rename_existing_lu(self, cmd, ip0, user, pw, fslabel, vol_name, vol_ref_name): return 'Logical unit modified successfully.' def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun): out = "Name : volume-test \n\ Comment: \n\ Path : /.cinder/volume-test.iscsi \n\ Size : 20 GB \n\ File System : manage_iscsi_test \n\ File System Mounted : Yes \n\ Logical Unit Mounted: Yes" return out def deleteVolume(self, name): volume = self.getVolume(name) if volume: self.volumes.remove(volume) return True else: return False def deleteVolumebyProvider(self, provider): volume = self.getVolumebyProvider(provider) if volume: self.volumes.remove(volume) return True else: return False def getVolumes(self): return self.volumes def getVolume(self, name): if self.volumes: for volume in self.volumes: if str(volume['name']) == name: return volume return None def getVolumebyProvider(self, provider): if self.volumes: for volume in self.volumes: if str(volume['provider_location']) == provider: return volume return None def createVolume(self, name, provider, sizeMiB, comment): new_vol = {'additionalStates': [], 'adminSpace': {'freeMiB': 0, 'rawReservedMiB': 384, 'reservedMiB': 128, 'usedMiB': 128}, 'baseId': 115, 'copyType': 1, 'creationTime8601': '2012-10-22T16:37:57-07:00', 'creationTimeSec': 1350949077, 'failedStates': [], 'id': 115, 'provider_location': provider, 'name': name, 'comment': comment, 'provisioningType': 1, 'readOnly': False, 'sizeMiB': sizeMiB, 'state': 1, 'userSpace': {'freeMiB': 0, 'rawReservedMiB': 41984, 'reservedMiB': 31488, 'usedMiB': 31488}, 'usrSpcAllocLimitPct': 0, 'usrSpcAllocWarningPct': 0, 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243', 'wwn': '50002AC00073383D'} self.volumes.append(new_vol) def create_lu(self, cmd, ip0, user, pw, hdp, size, name): vol_id = name _out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" % (self.start_lun, size)) self.createVolume(name, vol_id, size, "create-lu") self.start_lun += 1 return _out def delete_lu(self, cmd, ip0, user, pw, hdp, lun): _out = "" id = "myID" self.deleteVolumebyProvider(id + '.' + str(lun)) return _out def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): _out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" % (self.start_lun, size)) id = name self.createVolume(name, id + '.' + str(self.start_lun), size, "create-dup") self.start_lun += 1 return _out def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp, port, iqn, initiator): ctl = "" conn = (self.hlun, lun, initiator, self.init_index, iqn, self.target_index, ctl, port) _out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \ and Target: %s @ index %d is successfully paired @ CTL: %s, \ Port: %s" % conn) self.init_index += 1 self.target_index += 1 self.hlun += 1 self.connections.append(conn) return _out def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator): self.connections.pop() _out = ("H-LUN: successfully deleted from target") return _out def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name): _out = ("LUN: %s successfully extended to %s MB" % (lu, size)) id = name self.out = _out v = self.getVolumebyProvider(id + '.' + str(lu)) if v: v['sizeMiB'] = size return _out def get_luns(self): return len(self.alloc_lun) def get_conns(self): return len(self.connections) def get_out(self): return str(self.out) def get_version(self, cmd, ver, ip0, user, pw): self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ "version: 11.2.3319.09 LU: 256" \ " RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" return self.out def get_iscsi_info(self, cmd, ip0, user, pw): self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \ "CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up" return self.out def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None): self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \ "70 Normal fs1\n" \ "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2" return self.out def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret): self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" return self.out def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret): self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget""" return self.out def get_targetsecret(self, cmd, ip0, user, pw, target, hdp): self.out = """wGkJhTpXaaYJ5Rv""" return self.out def get_evs(self, cmd, ip0, user, pw, fsid): return '1' def check_lu(self, cmd, ip0, user, pw, volume_name, hdp): return True, 1, {'alias': 'cinder-default', 'secret': 'mysecret', 'iqn': 'iqn.1993-08.org.debian:01:11f90746eb2'} def check_target(self, cmd, ip0, user, pw, hdp, target_alias): return False, None class HNASiSCSIDriverTest(test.TestCase): """Test HNAS iSCSI volume driver.""" def __init__(self, *args, **kwargs): super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs) @mock.patch.object(iscsi, 'factory_bend') def setUp(self, _factory_bend): super(HNASiSCSIDriverTest, self).setUp() self.backend = SimulatedHnasBackend() _factory_bend.return_value = self.backend self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(HNASCONF) self.config_file.flush() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_iscsi_config_file = self.config_file.name self.configuration.hds_svc_iscsi_chap_enabled = True self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration) self.driver.do_setup("") def _create_volume(self): loc = self.driver.create_volume(_VOLUME) vol = _VOLUME.copy() vol['provider_location'] = loc['provider_location'] return vol @mock.patch('six.moves.builtins.open') @mock.patch.object(os, 'access') def test_read_config(self, m_access, m_open): # Test exception when file is not found m_access.return_value = False m_open.return_value = six.StringIO(HNASCONF) self.assertRaises(exception.NotFound, iscsi._read_config, '') # Test exception when config file has parsing errors # due to missing tag m_access.return_value = True m_open.return_value = six.StringIO(HNAS_WRONG_CONF1) self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '') # Test exception when config file has parsing errors # due to missing tag m_open.return_value = six.StringIO(HNAS_WRONG_CONF2) self.configuration.hds_hnas_iscsi_config_file = '' self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '') def test_create_volume(self): loc = self.driver.create_volume(_VOLUME) self.assertNotEqual(loc, None) self.assertNotEqual(loc['provider_location'], None) # cleanup self.backend.deleteVolumebyProvider(loc['provider_location']) def test_get_volume_stats(self): stats = self.driver.get_volume_stats(True) self.assertEqual("HDS", stats["vendor_name"]) self.assertEqual("iSCSI", stats["storage_protocol"]) self.assertEqual(2, len(stats['pools'])) def test_delete_volume(self): vol = self._create_volume() self.driver.delete_volume(vol) # should not be deletable twice prov_loc = self.backend.getVolumebyProvider(vol['provider_location']) self.assertTrue(prov_loc is None) def test_extend_volume(self): vol = self._create_volume() new_size = _VOLUME['size'] * 2 self.driver.extend_volume(vol, new_size) # cleanup self.backend.deleteVolumebyProvider(vol['provider_location']) @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') def test_create_snapshot(self, m_id_to_vol): vol = self._create_volume() m_id_to_vol.return_value = vol svol = vol.copy() svol['volume_size'] = svol['size'] loc = self.driver.create_snapshot(svol) self.assertNotEqual(loc, None) svol['provider_location'] = loc['provider_location'] # cleanup self.backend.deleteVolumebyProvider(svol['provider_location']) self.backend.deleteVolumebyProvider(vol['provider_location']) @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') def test_create_clone(self, m_id_to_vol): src_vol = self._create_volume() m_id_to_vol.return_value = src_vol src_vol['volume_size'] = src_vol['size'] dst_vol = self._create_volume() dst_vol['volume_size'] = dst_vol['size'] loc = self.driver.create_cloned_volume(dst_vol, src_vol) self.assertNotEqual(loc, None) # cleanup self.backend.deleteVolumebyProvider(src_vol['provider_location']) self.backend.deleteVolumebyProvider(loc['provider_location']) @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol') def test_delete_snapshot(self, m_id_to_vol): svol = self._create_volume() lun = svol['provider_location'] m_id_to_vol.return_value = svol self.driver.delete_snapshot(svol) self.assertTrue(self.backend.getVolumebyProvider(lun) is None) def test_create_volume_from_snapshot(self): svol = self._create_volume() svol['volume_size'] = svol['size'] vol = self.driver.create_volume_from_snapshot(_VOLUME, svol) self.assertNotEqual(vol, None) # cleanup self.backend.deleteVolumebyProvider(svol['provider_location']) self.backend.deleteVolumebyProvider(vol['provider_location']) @mock.patch.object(time, 'sleep') @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') def test_initialize_connection(self, m_update_vol_location, m_sleep): connector = {} connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' connector['host'] = 'dut_1.lab.hds.com' vol = self._create_volume() conn = self.driver.initialize_connection(vol, connector) self.assertTrue('3260' in conn['data']['target_portal']) self.assertTrue(type(conn['data']['target_lun']) is int) self.backend.add_iscsi_conn = mock.MagicMock() self.backend.add_iscsi_conn.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetAttachFailed, self.driver.initialize_connection, vol, connector) # cleanup self.backend.deleteVolumebyProvider(vol['provider_location']) @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') def test_terminate_connection(self, m_update_vol_location): connector = {} connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' connector['host'] = 'dut_1.lab.hds.com' vol = self._create_volume() vol['provider_location'] = "portal," +\ connector['initiator'] +\ ",18-48-A5-A1-80-13.0,ctl,port,hlun" conn = self.driver.initialize_connection(vol, connector) num_conn_before = self.backend.get_conns() self.driver.terminate_connection(vol, conn) num_conn_after = self.backend.get_conns() self.assertNotEqual(num_conn_before, num_conn_after) # cleanup self.backend.deleteVolumebyProvider(vol['provider_location']) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', return_value={'key': 'type', 'service_label': 'silver'}) def test_get_pool(self, m_ext_spec): label = self.driver.get_pool(_VOLUME) self.assertEqual('silver', label) @mock.patch.object(time, 'sleep') @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location') def test_get_service_target(self, m_update_vol_location, m_sleep): vol = _VOLUME.copy() self.backend.check_lu = mock.MagicMock() self.backend.check_target = mock.MagicMock() # Test the case where volume is not already mapped - CHAP enabled self.backend.check_lu.return_value = (False, 0, None) self.backend.check_target.return_value = (False, None) ret = self.driver._get_service_target(vol) iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret self.assertEqual('evs1-tgt0', alias) # Test the case where volume is not already mapped - CHAP disabled self.driver.config['chap_enabled'] = 'False' ret = self.driver._get_service_target(vol) iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret self.assertEqual('evs1-tgt0', alias) # Test the case where all targets are full fake_tgt = {'alias': 'fake', 'luns': range(0, 32)} self.backend.check_lu.return_value = (False, 0, None) self.backend.check_target.return_value = (True, fake_tgt) self.assertRaises(exception.NoMoreTargets, self.driver._get_service_target, vol) @mock.patch.object(iscsi.HDSISCSIDriver, '_get_service') def test_unmanage(self, get_service): get_service.return_value = ('fs2') self.driver.unmanage(_VOLUME) get_service.assert_called_once_with(_VOLUME) def test_manage_existing_get_size(self): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'manage_iscsi_test/volume-test'} out = self.driver.manage_existing_get_size(vol, existing_vol_ref) self.assertEqual(20, out) def test_manage_existing_get_size_error(self): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'invalid_FS/vol-not-found'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, existing_vol_ref) def test_manage_existing_get_size_without_source_name(self): vol = _VOLUME.copy() existing_vol_ref = { 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, existing_vol_ref) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_manage_existing(self, m_get_extra_specs): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'fs2/volume-test'} version = {'provider_location': '18-48-A5-A1-80-13.testvol'} m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'silver'} out = self.driver.manage_existing(vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('1') self.assertEqual(version, out) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_manage_existing_invalid_pool(self, m_get_extra_specs): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'fs2/volume-test'} m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'gold'} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('1') def test_manage_existing_invalid_volume_name(self): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'fs2/t/est_volume'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, existing_vol_ref) def test_manage_existing_without_volume_name(self): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'fs2/'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, existing_vol_ref) def test_manage_existing_with_FS_and_spaces(self): vol = _VOLUME.copy() existing_vol_ref = {'source-name': 'fs2/ '} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, existing_vol_ref) cinder-8.0.0/cinder/tests/unit/backup/0000775000567000056710000000000012701406543020760 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/backup/fake_service_with_verify.py0000664000567000056710000000172512701406250026377 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.backup import driver from cinder.tests.unit.backup import fake_service class FakeBackupServiceWithVerify(driver.BackupDriverWithVerify, fake_service.FakeBackupService): def verify(self, backup): pass def get_backup_driver(context): return FakeBackupServiceWithVerify(context) cinder-8.0.0/cinder/tests/unit/backup/__init__.py0000664000567000056710000000000012701406250023052 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/backup/drivers/0000775000567000056710000000000012701406543022436 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/backup/drivers/__init__.py0000664000567000056710000000000012701406250024530 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/backup/drivers/test_backup_glusterfs.py0000664000567000056710000000764712701406250027423 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for GlusterFS backup driver. """ import os import mock from os_brick.remotefs import remotefs as remotefs_brick from cinder.backup.drivers import glusterfs from cinder import context from cinder import exception from cinder import test from cinder import utils FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_HOST = 'fake_host' FAKE_VOL_NAME = 'backup_vol' FAKE_BACKUP_SHARE = '%s:%s' % (FAKE_HOST, FAKE_VOL_NAME) FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, 'e51e43e3c63fd5770e90e58e2eafc709') class BackupGlusterfsShareTestCase(test.TestCase): def setUp(self): super(BackupGlusterfsShareTestCase, self).setUp() self.ctxt = context.get_admin_context() def test_check_configuration(self): self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) self.mock_object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path', mock.Mock(return_value=FAKE_BACKUP_PATH)) with mock.patch.object(glusterfs.GlusterfsBackupDriver, '_check_configuration'): driver = glusterfs.GlusterfsBackupDriver(self.ctxt) driver._check_configuration() def test_check_configuration_no_backup_share(self): self.override_config('glusterfs_backup_share', None) self.mock_object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path', mock.Mock(return_value=FAKE_BACKUP_PATH)) with mock.patch.object(glusterfs.GlusterfsBackupDriver, '_check_configuration'): driver = glusterfs.GlusterfsBackupDriver(self.ctxt) self.assertRaises(exception.ConfigNotFound, driver._check_configuration) def test_init_backup_repo_path(self): self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) self.override_config('glusterfs_backup_mount_point', FAKE_BACKUP_MOUNT_POINT_BASE) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=FAKE_BACKUP_PATH) self.mock_object(glusterfs.GlusterfsBackupDriver, '_check_configuration') self.mock_object(remotefs_brick, 'RemoteFsClient', mock.Mock(return_value=mock_remotefsclient)) self.mock_object(os, 'getegid', mock.Mock(return_value=333333)) self.mock_object(utils, 'get_file_gid', mock.Mock(return_value=333333)) self.mock_object(utils, 'get_file_mode', mock.Mock(return_value=00000)) self.mock_object(utils, 'get_root_helper') with mock.patch.object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path'): driver = glusterfs.GlusterfsBackupDriver(self.ctxt) self.mock_object(driver, '_execute') path = driver._init_backup_repo_path() self.assertEqual(FAKE_BACKUP_PATH, path) utils.get_root_helper.called_once() mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) mock_remotefsclient.get_mount_point.assert_called_once_with( FAKE_BACKUP_SHARE) cinder-8.0.0/cinder/tests/unit/backup/drivers/test_backup_posix.py0000664000567000056710000001472012701406250026535 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Posix backup driver. """ import os import mock from six.moves import builtins from cinder.backup.drivers import posix from cinder import context from cinder import test from cinder.tests.unit import fake_constants as fake FAKE_FILE_SIZE = 52428800 FAKE_SHA_BLOCK_SIZE_BYTES = 1024 FAKE_BACKUP_ENABLE_PROGRESS_TIMER = True FAKE_CONTAINER = 'fake/container' FAKE_BACKUP_ID = fake.backup_id FAKE_BACKUP_ID_PART1 = fake.backup_id[:2] FAKE_BACKUP_ID_PART2 = fake.backup_id[2:4] FAKE_BACKUP_ID_REST = fake.backup_id[4:] FAKE_BACKUP = {'id': FAKE_BACKUP_ID, 'container': None} UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, FAKE_BACKUP_ID_PART2, FAKE_BACKUP_ID) FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_EXPORT_PATH = 'fake/export/path' FAKE_BACKUP_POSIX_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, FAKE_EXPORT_PATH) FAKE_PREFIX = 'prefix-' FAKE_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two', 'three'] EXPECTED_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two'] FAKE_OBJECT_NAME = 'fake-object-name' FAKE_OBJECT_PATH = os.path.join(FAKE_BACKUP_POSIX_PATH, FAKE_CONTAINER, FAKE_OBJECT_NAME) class PosixBackupDriverTestCase(test.TestCase): def setUp(self): super(PosixBackupDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.override_config('backup_file_size', FAKE_FILE_SIZE) self.override_config('backup_sha_block_size_bytes', FAKE_SHA_BLOCK_SIZE_BYTES) self.override_config('backup_enable_progress_timer', FAKE_BACKUP_ENABLE_PROGRESS_TIMER) self.override_config('backup_posix_path', FAKE_BACKUP_POSIX_PATH) self.mock_object(posix, 'LOG') self.driver = posix.PosixBackupDriver(self.ctxt) def test_init(self): drv = posix.PosixBackupDriver(self.ctxt) self.assertEqual(FAKE_BACKUP_POSIX_PATH, drv.backup_path) def test_update_container_name_container_passed(self): result = self.driver.update_container_name(FAKE_BACKUP, FAKE_CONTAINER) self.assertEqual(FAKE_CONTAINER, result) def test_update_container_na_container_passed(self): result = self.driver.update_container_name(FAKE_BACKUP, None) self.assertEqual(UPDATED_CONTAINER_NAME, result) def test_put_container(self): self.mock_object(os.path, 'exists', mock.Mock(return_value=False)) self.mock_object(os, 'makedirs') self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.driver.put_container(FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) os.makedirs.assert_called_once_with(path) os.chmod.assert_called_once_with(path, 0o770) def test_put_container_already_exists(self): self.mock_object(os.path, 'exists', mock.Mock(return_value=True)) self.mock_object(os, 'makedirs') self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.driver.put_container(FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) self.assertEqual(0, os.makedirs.call_count) self.assertEqual(0, os.chmod.call_count) def test_put_container_exception(self): self.mock_object(os.path, 'exists', mock.Mock(return_value=False)) self.mock_object(os, 'makedirs', mock.Mock( side_effect=OSError)) self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.assertRaises(OSError, self.driver.put_container, FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) os.makedirs.assert_called_once_with(path) self.assertEqual(0, os.chmod.call_count) def test_get_container_entries(self): self.mock_object(os, 'listdir', mock.Mock( return_value=FAKE_CONTAINER_ENTRIES)) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) self.assertEqual(EXPECTED_CONTAINER_ENTRIES, result) def test_get_container_entries_no_list(self): self.mock_object(os, 'listdir', mock.Mock( return_value=[])) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) self.assertEqual([], result) def test_get_container_entries_no_match(self): self.mock_object(os, 'listdir', mock.Mock( return_value=FAKE_CONTAINER_ENTRIES)) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX + 'garbage') self.assertEqual([], result) def test_get_object_writer(self): self.mock_object(builtins, 'open', mock.mock_open()) self.mock_object(os, 'chmod') self.driver.get_object_writer(FAKE_CONTAINER, FAKE_OBJECT_NAME) os.chmod.assert_called_once_with(FAKE_OBJECT_PATH, 0o660) builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'wb') def test_get_object_reader(self): self.mock_object(builtins, 'open', mock.mock_open()) self.driver.get_object_reader(FAKE_CONTAINER, FAKE_OBJECT_NAME) builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'rb') def test_delete_object(self): self.mock_object(os, 'remove') self.driver.delete_object(FAKE_CONTAINER, FAKE_OBJECT_NAME) def test_delete_nonexistent_object(self): self.mock_object(os, 'remove', mock.Mock( side_effect=OSError)) self.assertRaises(OSError, self.driver.delete_object, FAKE_CONTAINER, FAKE_OBJECT_NAME) cinder-8.0.0/cinder/tests/unit/backup/drivers/test_backup_nfs.py0000664000567000056710000006676012701406250026174 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Backup NFS driver. """ import bz2 import filecmp import hashlib import os import shutil import tempfile import zlib import mock from os_brick.remotefs import remotefs as remotefs_brick from oslo_config import cfg import six from cinder.backup.drivers import nfs from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import test from cinder.tests.unit import fake_constants as fake from cinder import utils CONF = cfg.CONF FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_HOST = 'fake_host' FAKE_EXPORT_PATH = 'fake/export/path' FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH) FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, FAKE_EXPORT_PATH) FAKE_BACKUP_ID = fake.backup_id FAKE_BACKUP_ID_PART1 = fake.backup_id[:2] FAKE_BACKUP_ID_PART2 = fake.backup_id[2:4] FAKE_BACKUP_ID_REST = fake.backup_id[4:] UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, FAKE_BACKUP_ID_PART2, FAKE_BACKUP_ID) class BackupNFSShareTestCase(test.TestCase): def setUp(self): super(BackupNFSShareTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_object(nfs, 'LOG') def test_check_configuration_no_backup_share(self): self.override_config('backup_share', None) self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path', mock.Mock(return_value=FAKE_BACKUP_PATH)) with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'): driver = nfs.NFSBackupDriver(self.ctxt) self.assertRaises(exception.ConfigNotFound, driver._check_configuration) def test_init_backup_repo_path(self): self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_point_base', FAKE_BACKUP_MOUNT_POINT_BASE) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=FAKE_BACKUP_PATH) self.mock_object(nfs.NFSBackupDriver, '_check_configuration') self.mock_object(remotefs_brick, 'RemoteFsClient', mock.Mock(return_value=mock_remotefsclient)) self.mock_object(utils, 'get_root_helper') with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): driver = nfs.NFSBackupDriver(self.ctxt) path = driver._init_backup_repo_path() self.assertEqual(FAKE_BACKUP_PATH, path) utils.get_root_helper.called_once() mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) mock_remotefsclient.get_mount_point.assert_called_once_with( FAKE_BACKUP_SHARE) def fake_md5(arg): class result(object): def hexdigest(self): return 'fake-md5-sum' ret = result() return ret class BackupNFSSwiftBasedTestCase(test.TestCase): """Test Cases for based on Swift tempest backup tests.""" _DEFAULT_VOLUME_ID = fake.volume_id def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container='test-container', backup_id=fake.backup_id, parent_id=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) backup = {'id': backup_id, 'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.user_id, 'project_id': fake.project_id, } return db.backup_create(self.ctxt, backup)['id'] def setUp(self): super(BackupNFSSwiftBasedTestCase, self).setUp() self.ctxt = context.get_admin_context() self.stubs.Set(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_point_base', '/tmp') self.override_config('backup_file_size', 52428800) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=self.temp_dir) self.mock_object(remotefs_brick, 'RemoteFsClient', mock.Mock(return_value=mock_remotefsclient)) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) for _i in range(0, 32): self.volume_file.write(os.urandom(1024)) def test_backup_uncompressed(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) def test_backup_bz2(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) def test_backup_zlib(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) def test_backup_default_container(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id, container=None, backup_id=FAKE_BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. CONF.set_override("backup_object_number_per_notification", 1) CONF.set_override("backup_enable_progress_timer", False) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_enable_progress_timer", True) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) def test_backup_custom_container(self): volume_id = fake.volume_id container_name = 'fake99' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(backup['container'], container_name) def test_backup_shafile(self): volume_id = fake.volume_id def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(backup['container'], container_name) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(32 * 1024 / content1['chunk_size'], len(content1['sha256s'])) def test_backup_cmp_shafiles(self): volume_id = fake.volume_id def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(deltabackup['container'], container_name) # Compare shas from both files content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) def test_backup_delta_two_objects_change(self): volume_id = fake.volume_id def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(deltabackup['container'], container_name) content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 20 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_backup_delta_two_blocks_in_object_change(self): volume_id = fake.volume_id def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(deltabackup['container'], container_name) # Verify that two shas are changed at index 16 and 20 content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata', fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete(). """ volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata', fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. self.stubs.Set(nfs.NFSBackupDriver, 'delete', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) def test_restore_uncompressed(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') self.flags(backup_sha_block_size_bytes=32) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.restore(backup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_bz2(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') self.flags(backup_file_size=(1024 * 3)) self.flags(backup_sha_block_size_bytes=1024) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.restore(backup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_zlib(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') self.flags(backup_file_size=(1024 * 3)) self.flags(backup_sha_block_size_bytes = 1024) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.restore(backup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_delta(self): volume_id = fake.volume_id def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size =(1024 * 8)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file, True) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.restore(backup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_delete(self): volume_id = fake.volume_id self._create_backup_db_entry(volume_id=volume_id) service = nfs.NFSBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.delete(backup) def test_get_compressor(self): service = nfs.NFSBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(compressor, zlib) compressor = service._get_compressor('bz2') self.assertEqual(compressor, bz2) self.assertRaises(ValueError, service._get_compressor, 'fake') def create_buffer(self, size): # Set up buffer of zeroed bytes fake_data = bytearray(size) if six.PY2: # On Python 2, zlib.compressor() accepts buffer, but not bytearray fake_data = buffer(fake_data) return fake_data def test_prepare_output_data_effective_compression(self): service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertTrue(len(result) < len(fake_data)) def test_prepare_output_data_no_compresssion(self): self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) def test_prepare_output_data_ineffective_compression(self): service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) cinder-8.0.0/cinder/tests/unit/backup/fake_swift_client.py0000664000567000056710000000766112701406250025017 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import socket import zlib import six from six.moves import http_client from swiftclient import client as swift class FakeSwiftClient(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Connection(self, *args, **kargs): return FakeSwiftConnection() class FakeSwiftConnection(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def head_container(self, container): if container == 'missing_container': raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') pass def put_container(self, container): pass def get_container(self, container, **kwargs): fake_header = None fake_body = [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}] return fake_header, fake_body def head_object(self, container, name): return {'etag': 'fake-md5-sum'} def get_object(self, container, name): if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') if 'metadata' in name: fake_object_header = None metadata = {} if container == 'unsupported_version': metadata['version'] = '9.9.9' else: metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2013-02-19 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10, 'offset': 0}, 'backup_002': {'compression': 'zlib', 'length': 10, 'offset': 10}, 'backup_003': {'compression': 'zlib', 'length': 10, 'offset': 20} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) if six.PY3: metadata_json = metadata_json.encode('utf-8') fake_object_body = metadata_json return (fake_object_header, fake_object_body) fake_header = None fake_object_body = os.urandom(1024 * 1024) return (fake_header, zlib.compress(fake_object_body)) def put_object(self, container, name, reader, content_length=None, etag=None, chunk_size=None, content_type=None, headers=None, query_string=None): if container == 'socket_error_on_put': raise socket.error(111, 'ECONNREFUSED') return 'fake-md5-sum' def delete_object(self, container, name): if container == 'socket_error_on_delete': raise socket.error(111, 'ECONNREFUSED') pass cinder-8.0.0/cinder/tests/unit/backup/fake_service.py0000664000567000056710000000246212701406250023757 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.backup import driver class FakeBackupService(driver.BackupDriver): def __init__(self, context, db_driver=None): super(FakeBackupService, self).__init__(context, db_driver) def backup(self, backup, volume_file): pass def restore(self, backup, volume_id, volume_file): pass def delete(self, backup): # if backup has magic name of 'fail_on_delete' # we raise an error - useful for some tests - # otherwise we return without error if backup['display_name'] == 'fail_on_delete': raise IOError('fake') def get_backup_driver(context): return FakeBackupService(context) cinder-8.0.0/cinder/tests/unit/backup/fake_google_client2.py0000664000567000056710000000722412701406250025214 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile class FakeGoogleObjectInsertExecute(object): def execute(self, *args, **kwargs): return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} class FakeGoogleObjectListExecute(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.prefix = kwargs['prefix'] def execute(self, *args, **kwargs): bucket_dir = tempfile.gettempdir() + '/' + self.bucket_name fake_body = [] for f in os.listdir(bucket_dir): try: f.index(self.prefix) fake_body.append({'name': f}) except Exception: pass return {'items': fake_body} class FakeGoogleBucketListExecute(object): def execute(self, *args, **kwargs): return {u'items': [{u'name': u'gcscinderbucket'}, {u'name': u'gcsbucket'}]} class FakeGoogleBucketInsertExecute(object): def execute(self, *args, **kwargs): pass class FakeMediaObject(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.object_name = kwargs['object'] class FakeGoogleObject(object): def insert(self, *args, **kwargs): object_path = (tempfile.gettempdir() + '/' + kwargs['bucket'] + '/' + kwargs['name']) kwargs['media_body']._fd.getvalue() with open(object_path, 'wb') as object_file: kwargs['media_body']._fd.seek(0) object_file.write(kwargs['media_body']._fd.read()) return FakeGoogleObjectInsertExecute() def get_media(self, *args, **kwargs): return FakeMediaObject(*args, **kwargs) def list(self, *args, **kwargs): return FakeGoogleObjectListExecute(*args, **kwargs) class FakeGoogleBucket(object): def list(self, *args, **kwargs): return FakeGoogleBucketListExecute() def insert(self, *args, **kwargs): return FakeGoogleBucketInsertExecute() class FakeGoogleDiscovery(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Build(self, *args, **kargs): return FakeDiscoveryBuild() class FakeDiscoveryBuild(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def objects(self): return FakeGoogleObject() def buckets(self): return FakeGoogleBucket() class FakeGoogleCredentials(object): def __init__(self, *args, **kwargs): pass @classmethod def from_stream(self, *args, **kwargs): pass class FakeGoogleMediaIoBaseDownload(object): def __init__(self, fh, req, chunksize=None): object_path = (tempfile.gettempdir() + '/' + req.bucket_name + '/' + req.object_name) with open(object_path, 'rb') as object_file: fh.write(object_file.read()) def next_chunk(self, **kwargs): return (100, True) cinder-8.0.0/cinder/tests/unit/backup/fake_swift_client2.py0000664000567000056710000000565012701406250025075 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2014 TrilioData, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import socket import tempfile from six.moves import http_client from swiftclient import client as swift class FakeSwiftClient2(object): def __init__(self, *args, **kwargs): pass @classmethod def Connection(self, *args, **kargs): return FakeSwiftConnection2() class FakeSwiftConnection2(object): def __init__(self, *args, **kwargs): self.tempdir = tempfile.mkdtemp() def head_container(self, container): if container == 'missing_container': raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') def put_container(self, container): pass def get_container(self, container, **kwargs): fake_header = None container_dir = tempfile.gettempdir() + '/' + container fake_body = [] for f in os.listdir(container_dir): try: f.index(kwargs['prefix']) fake_body.append({'name': f}) except Exception: pass return fake_header, fake_body def head_object(self, container, name): return {'etag': 'fake-md5-sum'} def get_object(self, container, name): if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'rb') as object_file: return (None, object_file.read()) def put_object(self, container, name, reader, content_length=None, etag=None, chunk_size=None, content_type=None, headers=None, query_string=None): object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'wb') as object_file: object_file.write(reader.read()) return hashlib.md5(reader.read()).hexdigest() def delete_object(self, container, name): pass cinder-8.0.0/cinder/tests/unit/backup/test_rpcapi.py0000664000567000056710000002116612701406250023650 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cinder.backup.rpcapi """ import copy import mock from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import objects from cinder import test from cinder.tests.unit import fake_backup from cinder.tests.unit import fake_constants as fake class BackupRpcAPITestCase(test.TestCase): def setUp(self): super(BackupRpcAPITestCase, self).setUp() self.context = context.RequestContext(fake.user_id, fake.project_id) self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) def _test_backup_api(self, method, rpc_method, server=None, fanout=False, **kwargs): rpcapi = backup_rpcapi.BackupAPI() expected_retval = 'foo' if rpc_method == 'call' else None target = { "server": server, "fanout": fanout, "version": kwargs.pop('version', rpcapi.RPC_API_VERSION) } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(self.context, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [self.context, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) for kwarg, value in self.fake_kwargs.items(): if isinstance(value, objects.Backup): expected_back = expected_msg[kwarg].obj_to_primitive() backup = value.obj_to_primitive() self.assertEqual(expected_back, backup) else: self.assertEqual(expected_msg[kwarg], value) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_backup(self, can_send_version): self._test_backup_api('create_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='2.0') can_send_version.return_value = False self._test_backup_api('create_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_restore_backup(self, can_send_version): self._test_backup_api('restore_backup', rpc_method='cast', server='fake_volume_host', volume_host='fake_volume_host', backup=self.fake_backup_obj, volume_id='fake_volume_id', version='2.0') can_send_version.return_value = False self._test_backup_api('restore_backup', rpc_method='cast', server='fake_volume_host', volume_host='fake_volume_host', backup=self.fake_backup_obj, volume_id=fake.volume_id, version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_backup(self, can_send_version): self._test_backup_api('delete_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='2.0') can_send_version.return_value = False self._test_backup_api('delete_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_export_record(self, can_send_version): self._test_backup_api('export_record', rpc_method='call', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='2.0') can_send_version.return_value = False self._test_backup_api('export_record', rpc_method='call', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_import_record(self, can_send_version): self._test_backup_api('import_record', rpc_method='cast', server='fake_volume_host', host='fake_volume_host', backup=self.fake_backup_obj, backup_service='fake_service', backup_url='fake_url', backup_hosts=['fake_host1', 'fake_host2'], version='2.0') can_send_version.return_value = False self._test_backup_api('import_record', rpc_method='cast', server='fake_volume_host', host='fake_volume_host', backup=self.fake_backup_obj, backup_service='fake_service', backup_url='fake_url', backup_hosts=['fake_host1', 'fake_host2'], version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_reset_status(self, can_send_version): self._test_backup_api('reset_status', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, status='error', version='2.0') can_send_version.return_value = False self._test_backup_api('reset_status', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, status='error', version='1.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_check_support_to_force_delete(self, can_send_version): self._test_backup_api('check_support_to_force_delete', rpc_method='call', server='fake_volume_host', host='fake_volume_host', version='2.0') can_send_version.return_value = False self._test_backup_api('check_support_to_force_delete', rpc_method='call', server='fake_volume_host', host='fake_volume_host', version='1.1') cinder-8.0.0/cinder/tests/unit/backup/fake_google_client.py0000664000567000056710000001100712701406250025124 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import zlib from googleapiclient import errors from oauth2client import client from oslo_utils import units import six class FakeGoogleObjectInsertExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['bucket'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_api_failure': raise errors.Error return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} class FakeGoogleObjectListExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['bucket'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_connection_failure': raise Exception return {'items': [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}]} class FakeGoogleBucketListExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['prefix'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_oauth2_failure': raise client.Error return {u'items': [{u'name': u'gcscinderbucket'}, {u'name': u'gcsbucket'}]} class FakeGoogleBucketInsertExecute(object): def execute(self, *args, **kwargs): pass class FakeMediaObject(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.object_name = kwargs['object'] class FakeGoogleObject(object): def insert(self, *args, **kwargs): return FakeGoogleObjectInsertExecute(*args, **kwargs) def get_media(self, *args, **kwargs): return FakeMediaObject(*args, **kwargs) def list(self, *args, **kwargs): return FakeGoogleObjectListExecute(*args, **kwargs) class FakeGoogleBucket(object): def list(self, *args, **kwargs): return FakeGoogleBucketListExecute(*args, **kwargs) def insert(self, *args, **kwargs): return FakeGoogleBucketInsertExecute() class FakeGoogleDiscovery(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Build(self, *args, **kargs): return FakeDiscoveryBuild() class FakeDiscoveryBuild(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def objects(self): return FakeGoogleObject() def buckets(self): return FakeGoogleBucket() class FakeGoogleCredentials(object): def __init__(self, *args, **kwargs): pass @classmethod def from_stream(self, *args, **kwargs): pass class FakeGoogleMediaIoBaseDownload(object): def __init__(self, fh, req, chunksize=None): if 'metadata' in req.object_name: metadata = {} metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2016-01-09 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10, 'offset': 0}, 'backup_002': {'compression': 'zlib', 'length': 10, 'offset': 10}, 'backup_003': {'compression': 'zlib', 'length': 10, 'offset': 20} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) if six.PY3: metadata_json = metadata_json.encode('utf-8') fh.write(metadata_json) else: fh.write(zlib.compress(os.urandom(units.Mi))) def next_chunk(self, **kwargs): return (100, True) cinder-8.0.0/cinder/tests/unit/test_volume_rpcapi.py0000664000567000056710000011727112701406250023775 0ustar jenkinsjenkins00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cinder.volume.rpcapi """ import copy import mock from oslo_config import cfg from oslo_serialization import jsonutils from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils CONF = cfg.CONF class VolumeRpcAPITestCase(test.TestCase): def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" vol['metadata'] = {"test_key": "test_val"} vol['size'] = 1 volume = db.volume_create(self.context, vol) kwargs = { 'status': "creating", 'progress': '0%', 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = tests_utils.create_snapshot(self.context, vol['id'], **kwargs) source_group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool') cgsnapshot = tests_utils.create_cgsnapshot( self.context, consistencygroup_id=source_group.id) group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', cgsnapshot_id=cgsnapshot.id) group2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', source_cgid=source_group.id) group = objects.ConsistencyGroup.get_by_id(self.context, group.id) group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = snapshot self.fake_reservations = ["RESERVATION"] self.fake_cg = group self.fake_cg2 = group2 self.fake_src_cg = jsonutils.to_primitive(source_group) self.fake_cgsnap = cgsnapshot self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) def test_serialized_volume_has_id(self): self.assertIn('id', self.fake_volume) def _test_volume_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') if 'rpcapi_class' in kwargs: rpcapi_class = kwargs['rpcapi_class'] del kwargs['rpcapi_class'] else: rpcapi_class = volume_rpcapi.VolumeAPI rpcapi = rpcapi_class() expected_retval = 'foo' if method == 'call' else None target = { "version": kwargs.pop('version', rpcapi.RPC_API_VERSION) } if 'request_spec' in kwargs: spec = jsonutils.to_primitive(kwargs['request_spec']) kwargs['request_spec'] = spec expected_msg = copy.deepcopy(kwargs) if 'volume' in expected_msg: volume = expected_msg['volume'] # NOTE(thangp): copy.deepcopy() is making oslo_versionedobjects # think that 'metadata' was changed. if isinstance(volume, objects.Volume): volume.obj_reset_changes() del expected_msg['volume'] expected_msg['volume_id'] = volume['id'] expected_msg['volume'] = volume if 'snapshot' in expected_msg: snapshot = expected_msg['snapshot'] del expected_msg['snapshot'] expected_msg['snapshot_id'] = snapshot.id expected_msg['snapshot'] = snapshot if 'cgsnapshot' in expected_msg: cgsnapshot = expected_msg['cgsnapshot'] if cgsnapshot: cgsnapshot.consistencygroup kwargs['cgsnapshot'].consistencygroup if 'backup' in expected_msg: backup = expected_msg['backup'] del expected_msg['backup'] expected_msg['backup_id'] = backup.id expected_msg['backup'] = backup if 'host' in expected_msg: del expected_msg['host'] if 'dest_host' in expected_msg: dest_host = expected_msg['dest_host'] dest_host_dict = {'host': dest_host.host, 'capabilities': dest_host.capabilities} del expected_msg['dest_host'] expected_msg['host'] = dest_host_dict if 'new_volume' in expected_msg: volume = expected_msg['new_volume'] expected_msg['new_volume_id'] = volume['id'] if 'host' in kwargs: host = kwargs['host'] elif 'group' in kwargs: host = kwargs['group']['host'] elif 'volume' in kwargs: host = kwargs['volume']['host'] elif 'snapshot' in kwargs: host = 'fake_host' elif 'cgsnapshot' in kwargs: host = kwargs['cgsnapshot'].consistencygroup.host target['server'] = utils.extract_host(host) target['topic'] = '%s.%s' % (CONF.volume_topic, host) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(kwds[kwd], target[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method) self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) for kwarg, value in self.fake_kwargs.items(): if isinstance(value, objects.Snapshot): expected_snapshot = expected_msg[kwarg].obj_to_primitive() snapshot = value.obj_to_primitive() self.assertEqual(expected_snapshot, snapshot) elif isinstance(value, objects.ConsistencyGroup): expected_cg = expected_msg[kwarg].obj_to_primitive() cg = value.obj_to_primitive() self.assertEqual(expected_cg, cg) elif isinstance(value, objects.CGSnapshot): expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive() cgsnapshot = value.obj_to_primitive() self.assertEqual(expected_cgsnapshot, cgsnapshot) elif isinstance(value, objects.Volume): expected_volume = expected_msg[kwarg].obj_to_primitive() volume = value.obj_to_primitive() self.assertEqual(expected_volume, volume) elif isinstance(value, objects.Backup): expected_backup = expected_msg[kwarg].obj_to_primitive() backup = value.obj_to_primitive() self.assertEqual(expected_backup, backup) else: self.assertEqual(expected_msg[kwarg], value) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_consistencygroup(self, mock_can_send_version): self._test_volume_api('create_consistencygroup', rpc_method='cast', group=self.fake_cg, host='fake_host1', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('create_consistencygroup', rpc_method='cast', group=self.fake_cg, host='fake_host1', version='1.26') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_consistencygroup(self, mock_can_send_version): self._test_volume_api('delete_consistencygroup', rpc_method='cast', group=self.fake_cg, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('delete_consistencygroup', rpc_method='cast', group=self.fake_cg, version='1.26') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_update_consistencygroup(self, mock_can_send_version): self._test_volume_api('update_consistencygroup', rpc_method='cast', group=self.fake_cg, add_volumes=['vol1'], remove_volumes=['vol2'], version='2.0') mock_can_send_version.return_value = False self._test_volume_api('update_consistencygroup', rpc_method='cast', group=self.fake_cg, add_volumes=['vol1'], remove_volumes=['vol2'], version='1.26') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_cgsnapshot(self, mock_can_send_version): self._test_volume_api('create_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('create_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='1.31') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_cgsnapshot(self, mock_can_send_version): self._test_volume_api('delete_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('delete_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='1.31') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_volume(self, can_send_version): self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, host='fake_host1', request_spec='fake_request_spec', filter_properties='fake_properties', allow_reschedule=True, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_create_volume_old(self, can_send_version): # Tests backwards compatibility with older clients self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, host='fake_host1', request_spec='fake_request_spec', filter_properties='fake_properties', allow_reschedule=True, version='1.24') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.32')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_volume_serialization(self, can_send_version): request_spec = {"metadata": self.fake_volume_metadata} self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, host='fake_host1', request_spec=request_spec, filter_properties='fake_properties', allow_reschedule=True, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_volume(self, can_send_version): self._test_volume_api('delete_volume', rpc_method='cast', volume=self.fake_volume_obj, unmanage_only=False, cascade=False, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_delete_volume_old(self, can_send_version): self._test_volume_api('delete_volume', rpc_method='cast', volume=self.fake_volume_obj, unmanage_only=False, version='1.15') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.40'), mock.call('1.33')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_volume_cascade(self, can_send_version): self._test_volume_api('delete_volume', rpc_method='cast', volume=self.fake_volume_obj, unmanage_only=False, cascade=True, version='2.0') can_send_version.assert_any_call('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_snapshot(self, mock_can_send_version): self._test_volume_api('create_snapshot', rpc_method='cast', volume=self.fake_volume, snapshot=self.fake_snapshot, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('create_snapshot', rpc_method='cast', volume=self.fake_volume, snapshot=self.fake_snapshot, version='1.20') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_snapshot(self, mock_can_send_version): self._test_volume_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host', unmanage_only=False, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host', unmanage_only=False, version='1.20') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_delete_snapshot_with_unmanage_only(self, mock_can_send_version): self._test_volume_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host', unmanage_only=True, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host', unmanage_only=True, version='1.20') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_attach_volume_to_instance(self, mock_can_send_version): self._test_volume_api('attach_volume', rpc_method='call', volume=self.fake_volume, instance_uuid='fake_uuid', host_name=None, mountpoint='fake_mountpoint', mode='ro', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('attach_volume', rpc_method='call', volume=self.fake_volume, instance_uuid='fake_uuid', host_name=None, mountpoint='fake_mountpoint', mode='ro', version='1.11') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_attach_volume_to_host(self, mock_can_send_version): self._test_volume_api('attach_volume', rpc_method='call', volume=self.fake_volume, instance_uuid=None, host_name='fake_host', mountpoint='fake_mountpoint', mode='rw', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('attach_volume', rpc_method='call', volume=self.fake_volume, instance_uuid=None, host_name='fake_host', mountpoint='fake_mountpoint', mode='rw', version='1.11') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_detach_volume(self, mock_can_send_version): self._test_volume_api('detach_volume', rpc_method='call', volume=self.fake_volume, attachment_id='fake_uuid', version="2.0") mock_can_send_version.return_value = False self._test_volume_api('detach_volume', rpc_method='call', volume=self.fake_volume, attachment_id='fake_uuid', version="1.20") @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_copy_volume_to_image(self, mock_can_send_version): self._test_volume_api('copy_volume_to_image', rpc_method='cast', volume=self.fake_volume, image_meta={'id': 'fake_image_id', 'container_format': 'fake_type', 'disk_format': 'fake_type'}, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('copy_volume_to_image', rpc_method='cast', volume=self.fake_volume, image_meta={'id': 'fake_image_id', 'container_format': 'fake_type', 'disk_format': 'fake_type'}, version='1.3') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_initialize_connection(self, mock_can_send_version): self._test_volume_api('initialize_connection', rpc_method='call', volume=self.fake_volume, connector='fake_connector', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('initialize_connection', rpc_method='call', volume=self.fake_volume, connector='fake_connector', version='1.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_terminate_connection(self, mock_can_send_version): self._test_volume_api('terminate_connection', rpc_method='call', volume=self.fake_volume, connector='fake_connector', force=False, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('terminate_connection', rpc_method='call', volume=self.fake_volume, connector='fake_connector', force=False, version='1.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_accept_transfer(self, mock_can_send_version): self._test_volume_api('accept_transfer', rpc_method='call', volume=self.fake_volume, new_user='e5565fd0-06c8-11e3-' '8ffd-0800200c9b77', new_project='e4465fd0-06c8-11e3' '-8ffd-0800200c9a66', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('accept_transfer', rpc_method='call', volume=self.fake_volume, new_user='e5565fd0-06c8-11e3-' '8ffd-0800200c9b77', new_project='e4465fd0-06c8-11e3' '-8ffd-0800200c9a66', version='1.9') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_extend_volume(self, can_send_version): self._test_volume_api('extend_volume', rpc_method='cast', volume=self.fake_volume_obj, new_size=1, reservations=self.fake_reservations, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_extend_volume_old(self, can_send_version): self._test_volume_api('extend_volume', rpc_method='cast', volume=self.fake_volume_obj, new_size=1, reservations=self.fake_reservations, version='1.14') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.35')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_migrate_volume(self, can_send_version): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('migrate_volume', rpc_method='cast', volume=self.fake_volume_obj, dest_host=dest_host, force_host_copy=True, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_migrate_volume_old(self, can_send_version): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('migrate_volume', rpc_method='cast', volume=self.fake_volume_obj, dest_host=dest_host, force_host_copy=True, version='1.8') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.36')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_migrate_volume_completion(self, can_send_version): self._test_volume_api('migrate_volume_completion', rpc_method='call', volume=self.fake_volume_obj, new_volume=self.fake_volume_obj, error=False, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_migrate_volume_completion_old(self, can_send_version): self._test_volume_api('migrate_volume_completion', rpc_method='call', volume=self.fake_volume_obj, new_volume=self.fake_volume_obj, error=False, version='1.10') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.36')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) @mock.patch('cinder.quota.DbQuotaDriver.rollback') def test_retype(self, rollback, can_send_version): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('retype', rpc_method='cast', volume=self.fake_volume_obj, new_type_id='fake', dest_host=dest_host, migration_policy='never', reservations=self.fake_reservations, old_reservations=self.fake_reservations, version='2.0') rollback.assert_not_called() can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', side_effect=[False, True]) @mock.patch('cinder.quota.DbQuotaDriver.rollback') def test_retype_137(self, rollback, can_send_version): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('retype', rpc_method='cast', volume=self.fake_volume_obj, new_type_id='fake', dest_host=dest_host, migration_policy='never', reservations=self.fake_reservations, old_reservations=self.fake_reservations, version='1.37') rollback.assert_not_called() can_send_version.assert_any_call('2.0') can_send_version.assert_any_call('1.37') @mock.patch('cinder.quota.DbQuotaDriver.rollback') @mock.patch('oslo_messaging.RPCClient.can_send_version', side_effect=[False, False, True]) def test_retype_version_134(self, can_send_version, rollback): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('retype', rpc_method='cast', volume=self.fake_volume_obj, new_type_id='fake', dest_host=dest_host, migration_policy='never', reservations=self.fake_reservations, old_reservations=self.fake_reservations, version='1.34') self.assertTrue(rollback.called) can_send_version.assert_any_call('2.0') can_send_version.assert_any_call('1.37') can_send_version.assert_any_call('1.34') @mock.patch('cinder.quota.DbQuotaDriver.rollback') @mock.patch('oslo_messaging.RPCClient.can_send_version', side_effect=[False, False, False]) def test_retype_version_112(self, can_send_version, rollback): class FakeHost(object): def __init__(self): self.host = 'host' self.capabilities = {} dest_host = FakeHost() self._test_volume_api('retype', rpc_method='cast', volume=self.fake_volume_obj, new_type_id='fake', dest_host=dest_host, migration_policy='never', reservations=self.fake_reservations, old_reservations=self.fake_reservations, version='1.12') self.assertTrue(rollback.called) can_send_version.assert_any_call('1.37') can_send_version.assert_any_call('1.34') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_manage_existing(self, mock_can_send_version): self._test_volume_api('manage_existing', rpc_method='cast', volume=self.fake_volume, ref={'lv_name': 'foo'}, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('manage_existing', rpc_method='cast', volume=self.fake_volume, ref={'lv_name': 'foo'}, version='1.15') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_manage_existing_snapshot(self, mock_can_send_version): volume_update = {'host': 'fake_host'} snpshot = { 'id': fake.snapshot_id, 'volume_id': fake.volume_id, 'status': "creating", 'progress': '0%', 'volume_size': 0, 'display_name': 'fake_name', 'display_description': 'fake_description', 'volume': fake_volume.fake_db_volume(**volume_update), 'expected_attrs': ['volume'], } my_fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self._test_volume_api('manage_existing_snapshot', rpc_method='cast', snapshot=my_fake_snapshot_obj, ref='foo', host='fake_host', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('manage_existing_snapshot', rpc_method='cast', snapshot=my_fake_snapshot_obj, ref='foo', host='fake_host', version='1.28') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_promote_replica(self, mock_can_send_version): self._test_volume_api('promote_replica', rpc_method='cast', volume=self.fake_volume, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('promote_replica', rpc_method='cast', volume=self.fake_volume, version='1.17') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_reenable_replica(self, mock_can_send_version): self._test_volume_api('reenable_replication', rpc_method='cast', volume=self.fake_volume, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('reenable_replication', rpc_method='cast', volume=self.fake_volume, version='1.17') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_freeze_host(self, mock_can_send_version): self._test_volume_api('freeze_host', rpc_method='call', host='fake_host', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('freeze_host', rpc_method='call', host='fake_host', version='1.39') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_thaw_host(self, mock_can_send_version): self._test_volume_api('thaw_host', rpc_method='call', host='fake_host', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('thaw_host', rpc_method='call', host='fake_host', version='1.39') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_failover_host(self, mock_can_send_version): self._test_volume_api('failover_host', rpc_method='cast', host='fake_host', secondary_backend_id='fake_backend', version='2.0') mock_can_send_version.return_value = False self._test_volume_api('failover_host', rpc_method='cast', host='fake_host', secondary_backend_id='fake_backend', version='1.39') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_consistencygroup_from_src_cgsnapshot( self, mock_can_send_version): self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg, cgsnapshot=self.fake_cgsnap, source_cg=None, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg, cgsnapshot=self.fake_cgsnap, source_cg=None, version='1.31') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_consistencygroup_from_src_cg(self, mock_can_send_version): self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg2, cgsnapshot=None, source_cg=self.fake_src_cg, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg2, cgsnapshot=None, source_cg=self.fake_src_cg, version='1.31') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_get_capabilities(self, mock_can_send_version): self._test_volume_api('get_capabilities', rpc_method='call', host='fake_host', discover=True, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('get_capabilities', rpc_method='call', host='fake_host', discover=True, version='1.29') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_remove_export(self, mock_can_send_version): self._test_volume_api('remove_export', rpc_method='cast', volume=self.fake_volume, version='2.0') mock_can_send_version.return_value = False self._test_volume_api('remove_export', rpc_method='cast', volume=self.fake_volume, version='1.30') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_get_backup_device(self, mock_can_send_version): self._test_volume_api('get_backup_device', rpc_method='call', backup=self.fake_backup_obj, volume=self.fake_volume_obj, version='2.0') mock_can_send_version.return_value = False self.assertRaises(exception.ServiceTooOld, self._test_volume_api, 'get_backup_device', rpc_method='call', backup=self.fake_backup_obj, volume=self.fake_volume_obj, version='1.38') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_secure_file_operations_enabled(self, mock_can_send_version): self._test_volume_api('secure_file_operations_enabled', rpc_method='call', volume=self.fake_volume_obj, version='2.0') mock_can_send_version.return_value = False self.assertRaises(exception.ServiceTooOld, self._test_volume_api, 'secure_file_operations_enabled', rpc_method='call', volume=self.fake_volume_obj, version='1.38') cinder-8.0.0/cinder/tests/unit/test_emc_vmax.py0000664000567000056710000116363612701406250022736 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile import time from xml.dom import minidom import mock from oslo_service import loopingcall from oslo_utils import units import six from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.emc import emc_vmax_common from cinder.volume.drivers.emc import emc_vmax_fast from cinder.volume.drivers.emc import emc_vmax_fc from cinder.volume.drivers.emc import emc_vmax_iscsi from cinder.volume.drivers.emc import emc_vmax_masking from cinder.volume.drivers.emc import emc_vmax_provision from cinder.volume.drivers.emc import emc_vmax_provision_v3 from cinder.volume.drivers.emc import emc_vmax_utils from cinder.volume import volume_types CINDER_EMC_CONFIG_DIR = '/etc/cinder/' class EMC_StorageVolume(dict): pass class CIM_StorageExtent(dict): pass class SE_InitiatorMaskingGroup(dict): pass class SE_ConcreteJob(dict): pass class SE_StorageHardwareID(dict): pass class CIM_ReplicationServiceCapabilities(dict): pass class SYMM_SrpStoragePool(dict): pass class SYMM_LunMasking(dict): pass class CIM_DeviceMaskingGroup(dict): pass class EMC_LunMaskingSCSIProtocolController(dict): pass class CIM_TargetMaskingGroup(dict): pass class EMC_StorageHardwareID(dict): pass class CIM_IPProtocolEndpoint(dict): pass class SE_ReplicationSettingData(dict): def __init__(self, *args, **kwargs): self['DefaultInstance'] = self.createInstance() def createInstance(self): self.DesiredCopyMethodology = 0 class Fake_CIMProperty(object): def fake_getCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = True return cimproperty def fake_getBlockSizeCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = '512' return cimproperty def fake_getConsumableBlocksCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = '12345' return cimproperty def fake_getIsConcatenatedCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = True return cimproperty def fake_getIsCompositeCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = False return cimproperty def fake_getTotalManagedSpaceCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = '20000000000' return cimproperty def fake_getRemainingManagedSpaceCIMProperty(self): cimproperty = Fake_CIMProperty() cimproperty.value = '10000000000' return cimproperty def fake_getElementNameCIMProperty(self, name): cimproperty = Fake_CIMProperty() cimproperty.value = name return cimproperty def fake_getSupportedReplicationTypes(self): cimproperty = Fake_CIMProperty() cimproperty.value = [2, 10] return cimproperty def fake_getipv4address(self): cimproperty = Fake_CIMProperty() cimproperty.key = 'IPv4Address' cimproperty.value = '10.10.10.10' return cimproperty class Fake_CIM_TierPolicyServiceCapabilities(object): def fake_getpolicyinstance(self): classinstance = Fake_CIM_TierPolicyServiceCapabilities() classcimproperty = Fake_CIMProperty() cimproperty = classcimproperty.fake_getCIMProperty() cimproperties = {u'SupportsTieringPolicies': cimproperty} classinstance.properties = cimproperties return classinstance class FakeCIMInstanceName(dict): def fake_getinstancename(self, classname, bindings): instancename = FakeCIMInstanceName() for key in bindings: instancename[key] = bindings[key] instancename.classname = classname instancename.namespace = 'root/emc' return instancename class FakeDB(object): def volume_update(self, context, volume_id, model_update): pass def volume_get(self, context, volume_id): conn = FakeEcomConnection() objectpath = {} objectpath['CreationClassName'] = 'Symm_StorageVolume' if volume_id == 'vol1': device_id = '1' objectpath['DeviceID'] = device_id else: objectpath['DeviceID'] = volume_id return conn.GetInstance(objectpath) def volume_get_all_by_group(self, context, group_id): volumes = [] volumes.append(EMCVMAXCommonData.test_source_volume) return volumes def consistencygroup_get(self, context, cg_group_id): return EMCVMAXCommonData.test_CG def snapshot_get_all_for_cgsnapshot(self, context, cgsnapshot_id): snapshots = [] snapshots.append(EMCVMAXCommonData.test_snapshot) return snapshots class EMCVMAXCommonData(object): wwpn1 = "123456789012345" wwpn2 = "123456789054321" connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian: 01: 222', 'wwpns': [wwpn1, wwpn2], 'wwnns': ["223456789012345", "223456789054321"], 'host': 'fakehost'} target_wwns = [wwn[::-1] for wwn in connector['wwpns']] fabric_name_prefix = "fakeFabric" end_point_map = {connector['wwpns'][0]: [target_wwns[0]], connector['wwpns'][1]: [target_wwns[1]]} device_map = {} for wwn in connector['wwpns']: fabric_name = ''.join([fabric_name_prefix, wwn[-2:]]) target_wwn = wwn[::-1] fabric_map = {'initiator_port_wwn_list': [wwn], 'target_port_wwn_list': [target_wwn] } device_map[fabric_name] = fabric_map default_storage_group = ( u'//10.10.10.10/root/emc: SE_DeviceMaskingGroup.InstanceID=' '"SYMMETRIX+000198700440+OS_default_GOLD1_SG"') storage_system = 'SYMMETRIX+000195900551' storage_system_v3 = 'SYMMETRIX-+-000197200056' port_group = 'OS-portgroup-PG' lunmaskctrl_id = ( 'SYMMETRIX+000195900551+OS-fakehost-gold-I-MV') lunmaskctrl_name = ( 'OS-fakehost-gold-I-MV') initiatorgroup_id = ( 'SYMMETRIX+000195900551+OS-fakehost-IG') initiatorgroup_name = 'OS-fakehost-IG' initiatorgroup_creationclass = 'SE_InitiatorMaskingGroup' iscsi_initiator = 'iqn.1993-08.org.debian' storageextent_creationclass = 'CIM_StorageExtent' initiator1 = 'iqn.1993-08.org.debian: 01: 1a2b3c4d5f6g' stconf_service_creationclass = 'Symm_StorageConfigurationService' ctrlconf_service_creationclass = 'Symm_ControllerConfigurationService' elementcomp_service_creationclass = 'Symm_ElementCompositionService' storreloc_service_creationclass = 'Symm_StorageRelocationService' replication_service_creationclass = 'EMC_ReplicationService' vol_creationclass = 'Symm_StorageVolume' pool_creationclass = 'Symm_VirtualProvisioningPool' lunmask_creationclass = 'Symm_LunMaskingSCSIProtocolController' lunmask_creationclass2 = 'Symm_LunMaskingView' hostedservice_creationclass = 'CIM_HostedService' policycapability_creationclass = 'CIM_TierPolicyServiceCapabilities' policyrule_creationclass = 'Symm_TierPolicyRule' assoctierpolicy_creationclass = 'CIM_StorageTier' storagepool_creationclass = 'Symm_VirtualProvisioningPool' srpstoragepool_creationclass = 'Symm_SRPStoragePool' storagegroup_creationclass = 'CIM_DeviceMaskingGroup' hardwareid_creationclass = 'EMC_StorageHardwareID' replicationgroup_creationclass = 'CIM_ReplicationGroup' storagepoolid = 'SYMMETRIX+000195900551+U+gold' storagegroupname = 'OS-fakehost-gold-I-SG' defaultstoragegroupname = 'OS_default_GOLD1_SG' storagevolume_creationclass = 'EMC_StorageVolume' policyrule = 'gold' poolname = 'gold' totalmanagedspace_bits = '1000000000000' subscribedcapacity_bits = '500000000000' totalmanagedspace_gbs = 931 subscribedcapacity_gbs = 466 fake_host = 'HostX@Backend#gold+1234567891011' fake_host_v3 = 'HostX@Backend#Bronze+SRP_1+1234567891011' fake_host_2_v3 = 'HostY@Backend#SRP_1+1234567891011' unit_creationclass = 'CIM_ProtocolControllerForUnit' storage_type = 'gold' keybindings = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'1', 'SystemCreationClassName': u'Symm_StorageSystem'} keybindings2 = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'99999', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings} provider_location2 = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings2} provider_location_multi_pool = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings, 'version': '2.2.0'} block_size = 512 majorVersion = 1 minorVersion = 2 revNumber = 3 block_size = 512 metaHead_volume = {'DeviceID': 10, 'ConsumableBlocks': 1000} meta_volume1 = {'DeviceID': 11, 'ConsumableBlocks': 200} meta_volume2 = {'DeviceID': 12, 'ConsumableBlocks': 300} properties = {'ConsumableBlocks': '12345', 'BlockSize': '512'} test_volume = {'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '1', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': fake_host, 'NumberOfBlocks': 100, 'BlockSize': block_size } test_volume_v2 = {'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': 'vol1', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': fake_host, 'NumberOfBlocks': 100, 'BlockSize': block_size } test_volume_v3 = {'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': 'vol1', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': fake_host_v3, 'NumberOfBlocks': 100, 'BlockSize': block_size } test_volume_CG = {'name': 'volInCG', 'consistencygroup_id': 'abc', 'size': 1, 'volume_name': 'volInCG', 'id': 'volInCG', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'volInCG', 'display_description': 'test volume in Consistency group', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': fake_host } test_volume_CG_v3 = {'name': 'volInCG', 'consistencygroup_id': 'abc', 'size': 1, 'volume_name': 'volInCG', 'id': 'volInCG', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'volInCG', 'display_description': 'test volume in Consistency group', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': fake_host_v3} test_failed_volume = {'name': 'failed_vol', 'size': 1, 'volume_name': 'failed_vol', 'id': '4', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'failed_vol', 'display_description': 'test failed volume', 'volume_type_id': 'abc', 'host': fake_host} failed_delete_vol = {'name': 'failed_delete_vol', 'size': '-1', 'volume_name': 'failed_delete_vol', 'id': '99999', 'device_id': '99999', 'provider_auth': None, 'project_id': 'project', 'display_name': 'failed delete vol', 'display_description': 'failed delete volume', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location2), 'host': fake_host} test_source_volume = {'size': 1, 'volume_type_id': 'sourceid', 'display_name': 'sourceVolume', 'name': 'sourceVolume', 'device_id': '1', 'volume_name': 'vmax-154326', 'provider_auth': None, 'project_id': 'project', 'id': '2', 'host': fake_host, 'provider_location': six.text_type(provider_location), 'display_description': 'snapshot source volume'} test_source_volume_v3 = {'size': 1, 'volume_type_id': 'sourceid', 'display_name': 'sourceVolume', 'name': 'sourceVolume', 'device_id': '1', 'volume_name': 'vmax-154326', 'provider_auth': None, 'project_id': 'project', 'id': '2', 'host': fake_host_v3, 'provider_location': six.text_type(provider_location), 'display_description': 'snapshot source volume'} test_CG = {'name': 'myCG1', 'id': '12345abcde', 'volume_type_id': 'abc', 'status': fields.ConsistencyGroupStatus.AVAILABLE } test_snapshot = {'name': 'myCG1', 'id': '12345abcde', 'status': 'available', 'host': fake_host } test_CG_snapshot = {'name': 'testSnap', 'id': '12345abcde', 'consistencygroup_id': '123456789', 'status': 'available', 'snapshots': [], 'consistencygroup': test_CG } location_info = {'location_info': '000195900551#silver#None', 'storage_protocol': 'ISCSI'} location_info_v3 = {'location_info': '1234567891011#SRP_1#Bronze#DSS', 'storage_protocol': 'FC'} test_host = {'capabilities': location_info, 'host': 'fake_host'} test_host_v3 = {'capabilities': location_info_v3, 'host': fake_host_2_v3} initiatorNames = ["123456789012345", "123456789054321"] storagegroups = [{'CreationClassName': storagegroup_creationclass, 'ElementName': storagegroupname}, {'CreationClassName': storagegroup_creationclass, 'ElementName': 'OS-SRP_1-Bronze-DSS-SG'}] test_ctxt = {} new_type = {} diff = {} extra_specs = {'storagetype:pool': u'SRP_1', 'volume_backend_name': 'V3_BE', 'storagetype:workload': u'DSS', 'storagetype:slo': u'Bronze', 'storagetype:array': u'1234567891011', 'isV3': True, 'portgroupname': u'OS-portgroup-PG'} remainingSLOCapacity = '123456789' SYNCHRONIZED = 4 UNSYNCHRONIZED = 3 class FakeLookupService(object): def get_device_mapping_from_network(self, initiator_wwns, target_wwns): return EMCVMAXCommonData.device_map class FakeEcomConnection(object): def __init__(self, *args, **kwargs): self.data = EMCVMAXCommonData() def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, ElementType=None, Size=None, SyncType=None, SourceElement=None, TargetElement=None, Operation=None, Synchronization=None, TheElements=None, TheElement=None, LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None, ProtocolControllers=None, MaskingGroup=None, Members=None, HardwareId=None, ElementSource=None, EMCInPools=None, CompositeType=None, EMCNumberOfMembers=None, EMCBindElements=None, InElements=None, TargetPool=None, RequestedState=None, ReplicationGroup=None, ReplicationType=None, ReplicationSettingData=None, GroupName=None, Force=None, RemoveElements=None, RelationshipName=None, SourceGroup=None, TargetGroup=None, Goal=None, Type=None, EMCSRP=None, EMCSLO=None, EMCWorkload=None, EMCCollections=None, InitiatorMaskingGroup=None, DeviceMaskingGroup=None, TargetMaskingGroup=None, ProtocolController=None, StorageID=None, IDType=None, WaitForCopyState=None, Collections=None): rc = 0 myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' myjob['InstanceID'] = '9999' myjob['status'] = 'success' myjob['type'] = ElementName if Size == -1073741824 and ( MethodName == 'CreateOrModifyCompositeElement'): rc = 0 myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' myjob['InstanceID'] = '99999' myjob['status'] = 'success' myjob['type'] = 'failed_delete_vol' if ElementName == 'failed_vol' and ( MethodName == 'CreateOrModifyElementFromStoragePool'): rc = 10 myjob['status'] = 'failure' elif TheElements and TheElements[0]['DeviceID'] == '99999' and ( MethodName == 'ReturnElementsToStoragePool'): rc = 10 myjob['status'] = 'failure' elif HardwareId: rc = 0 targetendpoints = {} endpoints = [] endpoint = {} endpoint['Name'] = (EMCVMAXCommonData.end_point_map[ EMCVMAXCommonData.connector['wwpns'][0]]) endpoints.append(endpoint) endpoint2 = {} endpoint2['Name'] = (EMCVMAXCommonData.end_point_map[ EMCVMAXCommonData.connector['wwpns'][1]]) endpoints.append(endpoint2) targetendpoints['TargetEndpoints'] = endpoints return rc, targetendpoints elif ReplicationType and ( MethodName == 'GetDefaultReplicationSettingData'): rc = 0 rsd = SE_ReplicationSettingData() rsd['DefaultInstance'] = SE_ReplicationSettingData() return rc, rsd if MethodName == 'CreateStorageHardwareID': ret = {} rc = 0 ret['HardwareID'] = self.data.iscsi_initiator return rc, ret if MethodName == 'GetSupportedSizeRange': ret = {} rc = 0 ret['EMCInformationSource'] = 3 ret['EMCRemainingSLOCapacity'] = self.data.remainingSLOCapacity return rc, ret elif MethodName == 'GetCompositeElements': ret = {} rc = 0 ret['OutElements'] = [self.data.metaHead_volume, self.data.meta_volume1, self.data.meta_volume2] return rc, ret job = {'Job': myjob} return rc, job def EnumerateInstanceNames(self, name): result = None if name == 'EMC_StorageConfigurationService': result = self._enum_stconfsvcs() elif name == 'EMC_ControllerConfigurationService': result = self._enum_ctrlconfsvcs() elif name == 'Symm_ElementCompositionService': result = self._enum_elemcompsvcs() elif name == 'Symm_StorageRelocationService': result = self._enum_storrelocsvcs() elif name == 'EMC_ReplicationService': result = self._enum_replicsvcs() elif name == 'EMC_VirtualProvisioningPool': result = self._enum_pools() elif name == 'EMC_StorageVolume': result = self._enum_storagevolumes() elif name == 'Symm_StorageVolume': result = self._enum_storagevolumes() elif name == 'CIM_StorageVolume': result = self._enum_storagevolumes() elif name == 'CIM_ProtocolControllerForUnit': result = self._enum_unitnames() elif name == 'EMC_LunMaskingSCSIProtocolController': result = self._enum_lunmaskctrls() elif name == 'EMC_StorageProcessorSystem': result = self._enum_processors() elif name == 'EMC_StorageHardwareIDManagementService': result = self._enum_hdwidmgmts() elif name == 'SE_StorageHardwareID': result = self._enum_storhdwids() elif name == 'EMC_StorageSystem': result = self._enum_storagesystems() elif name == 'Symm_TierPolicyRule': result = self._enum_policyrules() elif name == 'CIM_ReplicationServiceCapabilities': result = self._enum_repservcpbls() elif name == 'SE_StorageSynchronized_SV_SV': result = self._enum_storageSyncSvSv() elif name == 'Symm_SRPStoragePool': result = self._enum_srpstoragepool() else: result = self._default_enum() return result def EnumerateInstances(self, name): result = None if name == 'EMC_VirtualProvisioningPool': result = self._enum_pool_details() elif name == 'SE_StorageHardwareID': result = self._enum_storhdwids() elif name == 'SE_ManagementServerSoftwareIdentity': result = self._enum_sw_identity() else: result = self._default_enum() return result def GetInstance(self, objectpath, LocalOnly=False): try: name = objectpath['CreationClassName'] except KeyError: name = objectpath.classname result = None if name == 'Symm_StorageVolume': result = self._getinstance_storagevolume(objectpath) elif name == 'CIM_ProtocolControllerForUnit': result = self._getinstance_unit(objectpath) elif name == 'SE_ConcreteJob': result = self._getinstance_job(objectpath) elif name == 'SE_StorageSynchronized_SV_SV': result = self._getinstance_syncsvsv(objectpath) elif name == 'Symm_TierPolicyServiceCapabilities': result = self._getinstance_policycapabilities(objectpath) elif name == 'CIM_TierPolicyServiceCapabilities': result = self._getinstance_policycapabilities(objectpath) elif name == 'SE_InitiatorMaskingGroup': result = self._getinstance_initiatormaskinggroup(objectpath) elif name == 'CIM_InitiatorMaskingGroup': result = self._getinstance_initiatormaskinggroup(objectpath) elif name == 'SE_StorageHardwareID': result = self._getinstance_storagehardwareid(objectpath) elif name == 'CIM_ReplicationGroup': result = self._getinstance_replicationgroup(objectpath) elif name == 'Symm_SRPStoragePool': result = self._getinstance_srpstoragepool(objectpath) elif name == 'CIM_TargetMaskingGroup': result = self._getinstance_targetmaskinggroup(objectpath) elif name == 'CIM_DeviceMaskingGroup': result = self._getinstance_devicemaskinggroup(objectpath) elif name == 'EMC_StorageHardwareID': result = self._getinstance_storagehardwareid(objectpath) elif name == 'Symm_VirtualProvisioningPool': result = self._getinstance_pool(objectpath) elif name == 'Symm_ReplicationServiceCapabilities': result = self._getinstance_replicationServCapabilities(objectpath) else: result = self._default_getinstance(objectpath) return result def ModifyInstance(self, objectpath, PropertyList=None): pass def DeleteInstance(self, objectpath): pass def Associators(self, objectpath, ResultClass='EMC_StorageHardwareID'): result = None if '_StorageHardwareID' in ResultClass: result = self._assoc_hdwid() elif ResultClass == 'EMC_iSCSIProtocolEndpoint': result = self._assoc_endpoint() elif ResultClass == 'EMC_StorageVolume': result = self._assoc_storagevolume(objectpath) elif ResultClass == 'Symm_LunMaskingView': result = self._assoc_maskingview() elif ResultClass == 'CIM_DeviceMaskingGroup': result = self._assoc_storagegroup() elif ResultClass == 'CIM_StorageExtent': result = self._assoc_storageextent() elif ResultClass == 'EMC_LunMaskingSCSIProtocolController': result = self._assoc_lunmaskctrls() elif ResultClass == 'CIM_TargetMaskingGroup': result = self._assoc_portgroup() else: result = self._default_assoc(objectpath) return result def AssociatorNames(self, objectpath, ResultClass='default', AssocClass='default'): result = None if objectpath == 'point_to_storage_instance_names': result = ['FirstStorageTierInstanceNames'] if ResultClass != 'default': result = self.ResultClassHelper(ResultClass, objectpath) if result is None and AssocClass != 'default': result = self.AssocClassHelper(AssocClass, objectpath) if result is None: result = self._default_assocnames(objectpath) return result def AssocClassHelper(self, AssocClass, objectpath): if AssocClass == 'CIM_HostedService': result = self._assocnames_hostedservice() elif AssocClass == 'CIM_AssociatedTierPolicy': result = self._assocnames_assoctierpolicy() elif AssocClass == 'CIM_OrderedMemberOfCollection': result = self._enum_storagevolumes() elif AssocClass == 'CIM_BindsTo': result = self._assocnames_bindsto() elif AssocClass == 'CIM_MemberOfCollection': result = self._assocnames_memberofcollection() else: result = None return result def ResultClassHelper(self, ResultClass, objectpath): if ResultClass == 'EMC_LunMaskingSCSIProtocolController': result = self._assocnames_lunmaskctrl() elif ResultClass == 'CIM_TierPolicyServiceCapabilities': result = self._assocnames_policyCapabilities() elif ResultClass == 'Symm_TierPolicyRule': result = self._assocnames_policyrule() elif ResultClass == 'CIM_StoragePool': result = self._assocnames_storagepool() elif ResultClass == 'EMC_VirtualProvisioningPool': result = self._assocnames_storagepool() elif ResultClass == 'CIM_DeviceMaskingGroup': result = self._assocnames_storagegroup() elif ResultClass == 'EMC_StorageVolume': result = self._enum_storagevolumes() elif ResultClass == 'Symm_StorageVolume': result = self._enum_storagevolumes() elif ResultClass == 'SE_InitiatorMaskingGroup': result = self._enum_initiatorMaskingGroup() elif ResultClass == 'CIM_InitiatorMaskingGroup': result = self._enum_initiatorMaskingGroup() elif ResultClass == 'CIM_StorageExtent': result = self._enum_storage_extent() elif ResultClass == 'SE_StorageHardwareID': result = self._enum_storhdwids() elif ResultClass == 'CIM_ReplicationServiceCapabilities': result = self._enum_repservcpbls() elif ResultClass == 'CIM_ReplicationGroup': result = self._enum_repgroups() elif ResultClass == 'Symm_FCSCSIProtocolEndpoint': result = self._enum_fcscsiendpoint() elif ResultClass == 'EMC_FCSCSIProtocolEndpoint': result = self._enum_fcscsiendpoint() elif ResultClass == 'Symm_SRPStoragePool': result = self._enum_srpstoragepool() elif ResultClass == 'Symm_StoragePoolCapabilities': result = self._enum_storagepoolcapabilities() elif ResultClass == 'CIM_storageSetting': result = self._enum_storagesettings() elif ResultClass == 'CIM_TargetMaskingGroup': result = self._assocnames_portgroup() elif ResultClass == 'CIM_InitiatorMaskingGroup': result = self._enum_initMaskingGroup() elif ResultClass == 'Symm_LunMaskingView': result = self._enum_maskingView() elif ResultClass == 'EMC_Meta': result = self._enum_metavolume() elif ResultClass == 'EMC_FrontEndSCSIProtocolController': result = self._enum_maskingView() elif ResultClass == 'CIM_TierPolicyRule': result = self._assocnames_tierpolicy(objectpath) else: result = None return result def ReferenceNames(self, objectpath, ResultClass='CIM_ProtocolControllerForUnit'): result = None if ResultClass == 'CIM_ProtocolControllerForUnit': result = self._ref_unitnames2() else: result = self._default_ref(objectpath) return result def _ref_unitnames(self): unitnames = [] unitname = {} dependent = {} dependent['CreationClassName'] = self.data.vol_creationclass dependent['DeviceID'] = self.data.test_volume['id'] dependent['ElementName'] = self.data.test_volume['name'] dependent['SystemName'] = self.data.storage_system antecedent = {} antecedent['CreationClassName'] = self.data.lunmask_creationclass antecedent['DeviceID'] = self.data.lunmaskctrl_id antecedent['SystemName'] = self.data.storage_system unitname['Dependent'] = dependent unitname['Antecedent'] = antecedent unitname['CreationClassName'] = self.data.unit_creationclass unitnames.append(unitname) return unitnames def mv_entry(self, mvname): unitname = {} dependent = {} dependent['CreationClassName'] = self.data.vol_creationclass dependent['DeviceID'] = self.data.test_volume['id'] dependent['ElementName'] = self.data.test_volume['name'] dependent['SystemName'] = self.data.storage_system antecedent = SYMM_LunMasking() antecedent['CreationClassName'] = self.data.lunmask_creationclass2 antecedent['SystemName'] = self.data.storage_system antecedent['ElementName'] = mvname classcimproperty = Fake_CIMProperty() elementName = ( classcimproperty.fake_getElementNameCIMProperty(mvname)) properties = {u'ElementName': elementName} antecedent.properties = properties unitname['Dependent'] = dependent unitname['Antecedent'] = antecedent unitname['CreationClassName'] = self.data.unit_creationclass return unitname def _ref_unitnames2(self): unitnames = [] unitname = self.mv_entry('OS-myhost-MV') unitnames.append(unitname) # Second masking unitname2 = self.mv_entry('OS-fakehost-MV') unitnames.append(unitname2) # third masking amended = 'OS-rslong493156848e71b072a17c1c4625e45f75-MV' unitname3 = self.mv_entry(amended) unitnames.append(unitname3) return unitnames def _default_ref(self, objectpath): return objectpath def _assoc_hdwid(self): assocs = [] assoc = EMC_StorageHardwareID() assoc['StorageID'] = self.data.connector['initiator'] assoc['SystemName'] = self.data.storage_system assoc['CreationClassName'] = 'EMC_StorageHardwareID' assoc.path = assoc assocs.append(assoc) for wwpn in self.data.connector['wwpns']: assoc2 = EMC_StorageHardwareID() assoc2['StorageID'] = wwpn assoc2['SystemName'] = self.data.storage_system assoc2['CreationClassName'] = 'EMC_StorageHardwareID' assoc2.path = assoc2 assocs.append(assoc2) assocs.append(assoc) return assocs def _assoc_endpoint(self): assocs = [] assoc = {} assoc['Name'] = 'iqn.1992-04.com.emc: 50000973f006dd80' assoc['SystemName'] = self.data.storage_system assocs.append(assoc) return assocs def _assoc_storagegroup(self): assocs = [] assoc1 = CIM_DeviceMaskingGroup() assoc1['ElementName'] = self.data.storagegroupname assoc1['SystemName'] = self.data.storage_system assoc1['CreationClassName'] = 'CIM_DeviceMaskingGroup' assoc1.path = assoc1 assocs.append(assoc1) assoc2 = CIM_DeviceMaskingGroup() assoc2['ElementName'] = self.data.defaultstoragegroupname assoc2['SystemName'] = self.data.storage_system assoc2['CreationClassName'] = 'CIM_DeviceMaskingGroup' assoc2.path = assoc2 assocs.append(assoc2) return assocs def _assoc_portgroup(self): assocs = [] assoc = CIM_TargetMaskingGroup() assoc['ElementName'] = self.data.port_group assoc['SystemName'] = self.data.storage_system assoc['CreationClassName'] = 'CIM_TargetMaskingGroup' assoc.path = assoc assocs.append(assoc) return assocs def _assoc_lunmaskctrls(self): ctrls = [] ctrl = EMC_LunMaskingSCSIProtocolController() ctrl['CreationClassName'] = self.data.lunmask_creationclass ctrl['DeviceID'] = self.data.lunmaskctrl_id ctrl['SystemName'] = self.data.storage_system ctrl['ElementName'] = self.data.lunmaskctrl_name ctrl.path = ctrl ctrls.append(ctrl) return ctrls def _assoc_maskingview(self): assocs = [] assoc = SYMM_LunMasking() assoc['Name'] = 'myMaskingView' assoc['SystemName'] = self.data.storage_system assoc['CreationClassName'] = 'Symm_LunMaskingView' assoc['DeviceID'] = '1234' assoc['SystemCreationClassName'] = '1234' assoc['ElementName'] = 'OS-fakehost-gold-I-MV' assoc.classname = assoc['CreationClassName'] assoc.path = assoc assocs.append(assoc) return assocs # Added test for EMC_StorageVolume associators def _assoc_storagevolume(self, objectpath): assocs = [] if 'type' not in objectpath: vol = self.data.test_volume elif objectpath['type'] == 'failed_delete_vol': vol = self.data.failed_delete_vol elif objectpath['type'] == 'vol1': vol = self.data.test_volume elif objectpath['type'] == 'volInCG': vol = self.data.test_volume_CG elif objectpath['type'] == 'appendVolume': vol = self.data.test_volume elif objectpath['type'] == 'failed_vol': vol = self.data.test_failed_volume else: vol = self.data.test_volume vol['DeviceID'] = vol['device_id'] assoc = self._getinstance_storagevolume(vol) assocs.append(assoc) return assocs def _assoc_storageextent(self): assocs = [] assoc = CIM_StorageExtent() assoc['Name'] = 'myStorageExtent' assoc['SystemName'] = self.data.storage_system assoc['CreationClassName'] = 'CIM_StorageExtent' assoc.classname = assoc['CreationClassName'] assoc.path = assoc classcimproperty = Fake_CIMProperty() isConcatenatedcimproperty = ( classcimproperty.fake_getIsCompositeCIMProperty()) properties = {u'IsConcatenated': isConcatenatedcimproperty} assoc.properties = properties assocs.append(assoc) return assocs def _default_assoc(self, objectpath): return objectpath def _assocnames_lunmaskctrl(self): return self._enum_lunmaskctrls() def _assocnames_hostedservice(self): return self._enum_hostedservice() def _assocnames_policyCapabilities(self): return self._enum_policycapabilities() def _assocnames_policyrule(self): return self._enum_policyrules() def _assocnames_assoctierpolicy(self): return self._enum_assoctierpolicy() def _assocnames_storagepool(self): return self._enum_storagepool() def _assocnames_storagegroup(self): return self._enum_storagegroup() def _assocnames_storagevolume(self): return self._enum_storagevolume() def _assocnames_portgroup(self): return self._enum_portgroup() def _assocnames_memberofcollection(self): return self._enum_hostedservice() def _assocnames_bindsto(self): return self._enum_ipprotocolendpoint() def _default_assocnames(self, objectpath): return objectpath def _getinstance_storagevolume(self, objectpath): foundinstance = None instance = EMC_StorageVolume() vols = self._enum_storagevolumes() for vol in vols: if vol['DeviceID'] == objectpath['DeviceID']: instance = vol break if not instance: foundinstance = None else: foundinstance = instance return foundinstance def _getinstance_lunmask(self): lunmask = {} lunmask['CreationClassName'] = self.data.lunmask_creationclass lunmask['DeviceID'] = self.data.lunmaskctrl_id lunmask['SystemName'] = self.data.storage_system return lunmask def _getinstance_initiatormaskinggroup(self, objectpath): initiatorgroup = SE_InitiatorMaskingGroup() initiatorgroup['CreationClassName'] = ( self.data.initiatorgroup_creationclass) initiatorgroup['DeviceID'] = self.data.initiatorgroup_id initiatorgroup['SystemName'] = self.data.storage_system initiatorgroup['ElementName'] = self.data.initiatorgroup_name initiatorgroup.path = initiatorgroup return initiatorgroup def _getinstance_storagehardwareid(self, objectpath): hardwareid = SE_StorageHardwareID() hardwareid['CreationClassName'] = self.data.hardwareid_creationclass hardwareid['SystemName'] = self.data.storage_system hardwareid['StorageID'] = self.data.connector['wwpns'][0] hardwareid.path = hardwareid return hardwareid def _getinstance_pool(self, objectpath): pool = {} pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' pool['ElementName'] = self.data.poolname pool['SystemName'] = self.data.storage_system pool['TotalManagedSpace'] = self.data.totalmanagedspace_bits pool['EMCSubscribedCapacity'] = self.data.subscribedcapacity_bits return pool def _getinstance_replicationgroup(self, objectpath): replicationgroup = {} replicationgroup['CreationClassName'] = ( self.data.replicationgroup_creationclass) replicationgroup['ElementName'] = '1234bcde' return replicationgroup def _getinstance_srpstoragepool(self, objectpath): srpstoragepool = SYMM_SrpStoragePool() srpstoragepool['CreationClassName'] = ( self.data.srpstoragepool_creationclass) srpstoragepool['ElementName'] = 'SRP_1' classcimproperty = Fake_CIMProperty() totalManagedSpace = ( classcimproperty.fake_getTotalManagedSpaceCIMProperty()) remainingManagedSpace = ( classcimproperty.fake_getRemainingManagedSpaceCIMProperty()) properties = {u'TotalManagedSpace': totalManagedSpace, u'RemainingManagedSpace': remainingManagedSpace} srpstoragepool.properties = properties return srpstoragepool def _getinstance_targetmaskinggroup(self, objectpath): targetmaskinggroup = CIM_TargetMaskingGroup() targetmaskinggroup['CreationClassName'] = 'CIM_TargetMaskingGroup' targetmaskinggroup['ElementName'] = self.data.port_group targetmaskinggroup.path = targetmaskinggroup return targetmaskinggroup def _getinstance_devicemaskinggroup(self, objectpath): targetmaskinggroup = {} if 'CreationClassName' in objectpath: targetmaskinggroup['CreationClassName'] = ( objectpath['CreationClassName']) else: targetmaskinggroup['CreationClassName'] = ( 'CIM_DeviceMaskingGroup') if 'ElementName' in objectpath: targetmaskinggroup['ElementName'] = objectpath['ElementName'] else: targetmaskinggroup['ElementName'] = ( self.data.storagegroupname) return targetmaskinggroup def _getinstance_unit(self, objectpath): unit = {} dependent = {} dependent['CreationClassName'] = self.data.vol_creationclass dependent['DeviceID'] = self.data.test_volume['id'] dependent['ElementName'] = self.data.test_volume['name'] dependent['SystemName'] = self.data.storage_system antecedent = {} antecedent['CreationClassName'] = self.data.lunmask_creationclass antecedent['DeviceID'] = self.data.lunmaskctrl_id antecedent['SystemName'] = self.data.storage_system unit['Dependent'] = dependent unit['Antecedent'] = antecedent unit['CreationClassName'] = self.data.unit_creationclass unit['DeviceNumber'] = '1' return unit def _getinstance_job(self, jobpath): jobinstance = {} jobinstance['InstanceID'] = '9999' if jobpath['status'] == 'failure': jobinstance['JobState'] = 10 jobinstance['ErrorCode'] = 99 jobinstance['ErrorDescription'] = 'Failure' else: jobinstance['JobState'] = 7 jobinstance['ErrorCode'] = 0 jobinstance['ErrorDescription'] = '' return jobinstance def _getinstance_policycapabilities(self, policycapabilitypath): instance = Fake_CIM_TierPolicyServiceCapabilities() fakeinstance = instance.fake_getpolicyinstance() return fakeinstance def _getinstance_syncsvsv(self, objectpath): svInstance = {} svInstance['SyncedElement'] = 'SyncedElement' svInstance['SystemElement'] = 'SystemElement' svInstance['PercentSynced'] = 100 if 'PercentSynced' in objectpath and objectpath['PercentSynced'] < 100: svInstance['PercentSynced'] = 50 svInstance['CopyState'] = self.data.SYNCHRONIZED if 'CopyState' in objectpath and ( objectpath['CopyState'] != self.data.SYNCHRONIZED): svInstance['CopyState'] = self.data.UNSYNCHRONIZED return svInstance def _getinstance_replicationServCapabilities(self, objectpath): repServCpblInstance = SYMM_SrpStoragePool() classcimproperty = Fake_CIMProperty() repTypesCimproperty = ( classcimproperty.fake_getSupportedReplicationTypes()) properties = {u'SupportedReplicationTypes': repTypesCimproperty} repServCpblInstance.properties = properties return repServCpblInstance def _getinstance_ipprotocolendpoint(self, objectpath): return self._enum_ipprotocolendpoint()[0] def _getinstance_lunmaskingview(self, objectpath): return self._enum_maskingView()[0] def _default_getinstance(self, objectpath): return objectpath def _enum_stconfsvcs(self): conf_services = [] conf_service1 = {} conf_service1['SystemName'] = self.data.storage_system conf_service1['CreationClassName'] = ( self.data.stconf_service_creationclass) conf_services.append(conf_service1) conf_service2 = {} conf_service2['SystemName'] = self.data.storage_system_v3 conf_service2['CreationClassName'] = ( self.data.stconf_service_creationclass) conf_services.append(conf_service2) return conf_services def _enum_ctrlconfsvcs(self): conf_services = [] conf_service = {} conf_service['SystemName'] = self.data.storage_system conf_service['CreationClassName'] = ( self.data.ctrlconf_service_creationclass) conf_services.append(conf_service) conf_service1 = {} conf_service1['SystemName'] = self.data.storage_system_v3 conf_service1['CreationClassName'] = ( self.data.ctrlconf_service_creationclass) conf_services.append(conf_service1) return conf_services def _enum_elemcompsvcs(self): comp_services = [] comp_service = {} comp_service['SystemName'] = self.data.storage_system comp_service['CreationClassName'] = ( self.data.elementcomp_service_creationclass) comp_services.append(comp_service) return comp_services def _enum_storrelocsvcs(self): reloc_services = [] reloc_service = {} reloc_service['SystemName'] = self.data.storage_system reloc_service['CreationClassName'] = ( self.data.storreloc_service_creationclass) reloc_services.append(reloc_service) return reloc_services def _enum_replicsvcs(self): replic_services = [] replic_service = {} replic_service['SystemName'] = self.data.storage_system replic_service['CreationClassName'] = ( self.data.replication_service_creationclass) replic_services.append(replic_service) replic_service2 = {} replic_service2['SystemName'] = self.data.storage_system_v3 replic_service2['CreationClassName'] = ( self.data.replication_service_creationclass) replic_services.append(replic_service2) return replic_services def _enum_pools(self): pools = [] pool = {} pool['InstanceID'] = ( self.data.storage_system + '+U+' + self.data.storage_type) pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' pool['ElementName'] = 'gold' pools.append(pool) return pools def _enum_pool_details(self): pools = [] pool = {} pool['InstanceID'] = ( self.data.storage_system + '+U+' + self.data.storage_type) pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' pool['TotalManagedSpace'] = 12345678 pool['RemainingManagedSpace'] = 123456 pools.append(pool) return pools def _enum_storagevolumes(self): vols = [] vol = EMC_StorageVolume() vol['Name'] = self.data.test_volume['name'] vol['CreationClassName'] = 'Symm_StorageVolume' vol['ElementName'] = self.data.test_volume['id'] vol['DeviceID'] = self.data.test_volume['device_id'] vol['Id'] = self.data.test_volume['id'] vol['SystemName'] = self.data.storage_system vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] vol['BlockSize'] = self.data.test_volume['BlockSize'] # Added vol to vol.path vol['SystemCreationClassName'] = 'Symm_StorageSystem' vol.path = vol vol.path.classname = vol['CreationClassName'] classcimproperty = Fake_CIMProperty() blocksizecimproperty = classcimproperty.fake_getBlockSizeCIMProperty() consumableBlockscimproperty = ( classcimproperty.fake_getConsumableBlocksCIMProperty()) isCompositecimproperty = ( classcimproperty.fake_getIsCompositeCIMProperty()) properties = {u'ConsumableBlocks': blocksizecimproperty, u'BlockSize': consumableBlockscimproperty, u'IsComposite': isCompositecimproperty} vol.properties = properties name = {} name['classname'] = 'Symm_StorageVolume' keys = {} keys['CreationClassName'] = 'Symm_StorageVolume' keys['SystemName'] = self.data.storage_system keys['DeviceID'] = vol['DeviceID'] keys['SystemCreationClassName'] = 'Symm_StorageSystem' name['keybindings'] = keys vol['provider_location'] = str(name) vols.append(vol) failed_delete_vol = EMC_StorageVolume() failed_delete_vol['name'] = 'failed_delete_vol' failed_delete_vol['CreationClassName'] = 'Symm_StorageVolume' failed_delete_vol['ElementName'] = 'failed_delete_vol' failed_delete_vol['DeviceID'] = '99999' failed_delete_vol['SystemName'] = self.data.storage_system # Added vol to vol.path failed_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' failed_delete_vol.path = failed_delete_vol failed_delete_vol.path.classname = ( failed_delete_vol['CreationClassName']) vols.append(failed_delete_vol) failed_vol = EMC_StorageVolume() failed_vol['name'] = 'failed__vol' failed_vol['CreationClassName'] = 'Symm_StorageVolume' failed_vol['ElementName'] = 'failed_vol' failed_vol['DeviceID'] = '4' failed_vol['SystemName'] = self.data.storage_system # Added vol to vol.path failed_vol['SystemCreationClassName'] = 'Symm_StorageSystem' failed_vol.path = failed_vol failed_vol.path.classname = failed_vol['CreationClassName'] name_failed = {} name_failed['classname'] = 'Symm_StorageVolume' keys_failed = {} keys_failed['CreationClassName'] = 'Symm_StorageVolume' keys_failed['SystemName'] = self.data.storage_system keys_failed['DeviceID'] = failed_vol['DeviceID'] keys_failed['SystemCreationClassName'] = 'Symm_StorageSystem' name_failed['keybindings'] = keys_failed failed_vol['provider_location'] = str(name_failed) vols.append(failed_vol) volumeHead = EMC_StorageVolume() volumeHead.classname = 'Symm_StorageVolume' blockSize = self.data.block_size volumeHead['ConsumableBlocks'] = ( self.data.metaHead_volume['ConsumableBlocks']) volumeHead['BlockSize'] = blockSize volumeHead['DeviceID'] = self.data.metaHead_volume['DeviceID'] vols.append(volumeHead) metaMember1 = EMC_StorageVolume() metaMember1.classname = 'Symm_StorageVolume' metaMember1['ConsumableBlocks'] = ( self.data.meta_volume1['ConsumableBlocks']) metaMember1['BlockSize'] = blockSize metaMember1['DeviceID'] = self.data.meta_volume1['DeviceID'] vols.append(metaMember1) metaMember2 = EMC_StorageVolume() metaMember2.classname = 'Symm_StorageVolume' metaMember2['ConsumableBlocks'] = ( self.data.meta_volume2['ConsumableBlocks']) metaMember2['BlockSize'] = blockSize metaMember2['DeviceID'] = self.data.meta_volume2['DeviceID'] vols.append(metaMember2) return vols def _enum_initiatorMaskingGroup(self): initatorgroups = [] initatorgroup = {} initatorgroup['CreationClassName'] = ( self.data.initiatorgroup_creationclass) initatorgroup['DeviceID'] = self.data.initiatorgroup_id initatorgroup['SystemName'] = self.data.storage_system initatorgroup['ElementName'] = self.data.initiatorgroup_name initatorgroups.append(initatorgroup) return initatorgroups def _enum_storage_extent(self): storageExtents = [] storageExtent = CIM_StorageExtent() storageExtent['CreationClassName'] = ( self.data.storageextent_creationclass) classcimproperty = Fake_CIMProperty() isConcatenatedcimproperty = ( classcimproperty.fake_getIsConcatenatedCIMProperty()) properties = {u'IsConcatenated': isConcatenatedcimproperty} storageExtent.properties = properties storageExtents.append(storageExtent) return storageExtents def _enum_lunmaskctrls(self): ctrls = [] ctrl = {} ctrl['CreationClassName'] = self.data.lunmask_creationclass ctrl['DeviceID'] = self.data.lunmaskctrl_id ctrl['SystemName'] = self.data.storage_system ctrl['ElementName'] = self.data.lunmaskctrl_name ctrls.append(ctrl) return ctrls def _enum_hostedservice(self): hostedservices = [] hostedservice = {} hostedservice['CreationClassName'] = ( self.data.hostedservice_creationclass) hostedservice['SystemName'] = self.data.storage_system hostedservice['Name'] = self.data.storage_system hostedservices.append(hostedservice) return hostedservices def _enum_policycapabilities(self): policycapabilities = [] policycapability = {} policycapability['CreationClassName'] = ( self.data.policycapability_creationclass) policycapability['SystemName'] = self.data.storage_system propertiesList = [] CIMProperty = {'is_array': True} properties = {u'SupportedTierFeatures': CIMProperty} propertiesList.append(properties) policycapability['Properties'] = propertiesList policycapabilities.append(policycapability) return policycapabilities def _enum_policyrules(self): policyrules = [] policyrule = {} policyrule['CreationClassName'] = self.data.policyrule_creationclass policyrule['SystemName'] = self.data.storage_system policyrule['PolicyRuleName'] = self.data.policyrule policyrules.append(policyrule) return policyrules def _enum_assoctierpolicy(self): assoctierpolicies = [] assoctierpolicy = {} assoctierpolicy['CreationClassName'] = ( self.data.assoctierpolicy_creationclass) assoctierpolicies.append(assoctierpolicy) return assoctierpolicies def _enum_storagepool(self): storagepools = [] storagepool = {} storagepool['CreationClassName'] = self.data.storagepool_creationclass storagepool['InstanceID'] = self.data.storagepoolid storagepool['ElementName'] = 'gold' storagepools.append(storagepool) return storagepools def _enum_srpstoragepool(self): storagepools = [] storagepool = {} storagepool['CreationClassName'] = ( self.data.srpstoragepool_creationclass) storagepool['InstanceID'] = 'SYMMETRIX-+-000197200056-+-SRP_1' storagepool['ElementName'] = 'SRP_1' storagepools.append(storagepool) return storagepools def _enum_storagepoolcapabilities(self): storagepoolcaps = [] storagepoolcap = {} storagepoolcap['CreationClassName'] = 'Symm_StoragePoolCapabilities' storagepoolcap['InstanceID'] = 'SYMMETRIX-+-000197200056-+-SRP_1' storagepoolcaps.append(storagepoolcap) return storagepoolcaps def _enum_storagesettings(self): storagesettings = [] storagesetting = {} storagesetting['CreationClassName'] = 'CIM_StoragePoolSetting' storagesetting['InstanceID'] = ('SYMMETRIX-+-000197200056-+-SBronze:' 'DSS-+-F-+-0-+-SR-+-SRP_1') storagesettings.append(storagesetting) return storagesettings def _enum_targetMaskingGroup(self): targetMaskingGroups = [] targetMaskingGroup = {} targetMaskingGroup['CreationClassName'] = 'CIM_TargetMaskingGroup' targetMaskingGroup['ElementName'] = self.data.port_group targetMaskingGroups.append(targetMaskingGroup) return targetMaskingGroups def _enum_initMaskingGroup(self): initMaskingGroups = [] initMaskingGroup = {} initMaskingGroup['CreationClassName'] = 'CIM_InitiatorMaskingGroup' initMaskingGroup['ElementName'] = 'myInitGroup' initMaskingGroups.append(initMaskingGroup) return initMaskingGroups def _enum_storagegroup(self): storagegroups = [] storagegroup1 = {} storagegroup1['CreationClassName'] = ( self.data.storagegroup_creationclass) storagegroup1['ElementName'] = self.data.storagegroupname storagegroups.append(storagegroup1) storagegroup2 = {} storagegroup2['CreationClassName'] = ( self.data.storagegroup_creationclass) storagegroup2['ElementName'] = self.data.defaultstoragegroupname storagegroup2['SystemName'] = self.data.storage_system storagegroups.append(storagegroup2) storagegroup3 = {} storagegroup3['CreationClassName'] = ( self.data.storagegroup_creationclass) storagegroup3['ElementName'] = 'OS-fakehost-SRP_1-Bronze-DSS-SG' storagegroups.append(storagegroup3) storagegroup4 = {} storagegroup4['CreationClassName'] = ( self.data.storagegroup_creationclass) storagegroup4['ElementName'] = 'OS-SRP_1-Bronze-DSS-SG' storagegroups.append(storagegroup4) return storagegroups def _enum_storagevolume(self): storagevolumes = [] storagevolume = {} storagevolume['CreationClassName'] = ( self.data.storagevolume_creationclass) storagevolumes.append(storagevolume) return storagevolumes def _enum_hdwidmgmts(self): services = [] srv = {} srv['SystemName'] = self.data.storage_system services.append(srv) return services def _enum_storhdwids(self): storhdwids = [] hdwid = SE_StorageHardwareID() hdwid['CreationClassName'] = self.data.hardwareid_creationclass hdwid['StorageID'] = self.data.connector['wwpns'][0] hdwid['InstanceID'] = "W-+-" + self.data.connector['wwpns'][0] hdwid.path = hdwid storhdwids.append(hdwid) return storhdwids def _enum_storagesystems(self): storagesystems = [] storagesystem = {} storagesystem['SystemName'] = self.data.storage_system storagesystem['Name'] = self.data.storage_system storagesystems.append(storagesystem) return storagesystems def _enum_repservcpbls(self): repservcpbls = [] servcpbl = CIM_ReplicationServiceCapabilities() servcpbl['CreationClassName'] = 'Symm_ReplicationServiceCapabilities' servcpbl['InstanceID'] = self.data.storage_system repservcpbls.append(servcpbl) return repservcpbls def _enum_repgroups(self): repgroups = [] repgroup = {} repgroup['CreationClassName'] = ( self.data.replicationgroup_creationclass) repgroups.append(repgroup) return repgroups def _enum_fcscsiendpoint(self): wwns = [] wwn = {} wwn['Name'] = "5000090000000000" wwns.append(wwn) return wwns def _enum_maskingView(self): maskingViews = [] maskingView = SYMM_LunMasking() maskingView['CreationClassName'] = 'Symm_LunMaskingView' maskingView['ElementName'] = self.data.lunmaskctrl_name cimproperty = Fake_CIMProperty() cimproperty.value = self.data.lunmaskctrl_name properties = {u'ElementName': cimproperty} maskingView.properties = properties maskingViews.append(maskingView) return maskingViews def _enum_portgroup(self): portgroups = [] portgroup = {} portgroup['CreationClassName'] = ( 'CIM_TargetMaskingGroup') portgroup['ElementName'] = self.data.port_group portgroups.append(portgroup) return portgroups def _enum_metavolume(self): return [] def _enum_storageSyncSvSv(self): conn = FakeEcomConnection() sourceVolume = {} sourceVolume['CreationClassName'] = 'Symm_StorageVolume' sourceVolume['DeviceID'] = self.data.test_volume['device_id'] sourceInstanceName = conn.GetInstance(sourceVolume) svInstances = [] svInstance = {} svInstance['SyncedElement'] = 'SyncedElement' svInstance['SystemElement'] = sourceInstanceName svInstance['CreationClassName'] = 'SE_StorageSynchronized_SV_SV' svInstance['PercentSynced'] = 100 svInstance['CopyState'] = self.data.UNSYNCHRONIZED svInstances.append(svInstance) return svInstances def _enum_sw_identity(self): swIdentities = [] swIdentity = {} swIdentity['MajorVersion'] = self.data.majorVersion swIdentity['MinorVersion'] = self.data.minorVersion swIdentity['RevisionNumber'] = self.data.revNumber swIdentities.append(swIdentity) return swIdentities def _enum_ipprotocolendpoint(self): ipprotocolendpoints = [] ipprotocolendpoint = CIM_IPProtocolEndpoint() ipprotocolendpoint['CreationClassName'] = 'CIM_IPProtocolEndpoint' ipprotocolendpoint['SystemName'] = self.data.storage_system classcimproperty = Fake_CIMProperty() ipv4addresscimproperty = ( classcimproperty.fake_getipv4address()) properties = {u'IPv4Address': ipv4addresscimproperty} ipprotocolendpoint.properties = properties ipprotocolendpoint.path = ipprotocolendpoint ipprotocolendpoints.append(ipprotocolendpoint) return ipprotocolendpoints def _default_enum(self): names = [] name = {} name['Name'] = 'default' names.append(name) return names class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.tempdir = tempfile.mkdtemp() super(EMCVMAXISCSIDriverNoFastTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_no_fast() self.addCleanup(self._cleanup) configuration = conf.Configuration(None) configuration.append_config_values = mock.Mock(return_value=0) configuration.config_group = 'ISCSINoFAST' configuration.cinder_emc_config_file = self.config_file_path self.stubs.Set(configuration, 'safe_get', self.fake_safe_get({'driver_use_ssl': True, 'volume_backend_name': 'ISCSINoFAST'})) self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery', self.fake_do_iscsi_discovery) self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def fake_safe_get(self, values): def _safe_get(key): return values.get(key) return _safe_get def create_fake_config_file_no_fast(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) doc = self.add_array_info(doc, emc) filename = 'cinder_emc_config_ISCSINoFAST.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def create_fake_config_file_no_fast_with_interval_retries(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) doc = self.add_array_info(doc, emc) doc = self.add_interval_and_retries(doc, emc) filename = 'cinder_emc_config_ISCSINoFAST_int_ret.xml' config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() return config_file_path def create_fake_config_file_no_fast_with_interval(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) doc = self.add_array_info(doc, emc) doc = self.add_interval_only(doc, emc) filename = 'cinder_emc_config_ISCSINoFAST_int.xml' config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() return config_file_path def create_fake_config_file_no_fast_with_retries(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) doc = self.add_array_info(doc, emc) doc = self.add_retries_only(doc, emc) filename = 'cinder_emc_config_ISCSINoFAST_ret.xml' config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() return config_file_path def add_array_info(self, doc, emc): array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) ecomserverip = doc.createElement("EcomServerIp") ecomserveriptext = doc.createTextNode("1.1.1.1") emc.appendChild(ecomserverip) ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") ecomserverporttext = doc.createTextNode("10") emc.appendChild(ecomserverport) ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") ecomusernametext = doc.createTextNode("user") emc.appendChild(ecomusername) ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") ecompasswordtext = doc.createTextNode("pass") emc.appendChild(ecompassword) ecompassword.appendChild(ecompasswordtext) portgroup = doc.createElement("PortGroup") portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) portgroups = doc.createElement("PortGroups") portgroups.appendChild(portgroup) emc.appendChild(portgroups) pool = doc.createElement("Pool") pooltext = doc.createTextNode("gold") emc.appendChild(pool) pool.appendChild(pooltext) array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) timeout = doc.createElement("Timeout") timeouttext = doc.createTextNode("0") emc.appendChild(timeout) timeout.appendChild(timeouttext) return doc def add_interval_and_retries(self, doc, emc): interval = doc.createElement("Interval") intervaltext = doc.createTextNode("5") emc.appendChild(interval) interval.appendChild(intervaltext) retries = doc.createElement("Retries") retriestext = doc.createTextNode("40") emc.appendChild(retries) retries.appendChild(retriestext) return doc def add_interval_only(self, doc, emc): interval = doc.createElement("Interval") intervaltext = doc.createTextNode("20") emc.appendChild(interval) interval.appendChild(intervaltext) return doc def add_retries_only(self, doc, emc): retries = doc.createElement("Retries") retriestext = doc.createTextNode("70") emc.appendChild(retries) retries.appendChild(retriestext) return doc # fix for https://bugs.launchpad.net/cinder/+bug/1364232 def create_fake_config_file_1364232(self): filename = 'cinder_emc_config_1364232.xml' config_file_1364232 = self.tempdir + '/' + filename text_file = open(config_file_1364232, "w") text_file.write("\n\n" "10.10.10.10\n" "5988\n" "user\t\n" "password\n" "OS-PORTGROUP1-PG" "OS-PORTGROUP2-PG" " \n" "OS-PORTGROUP3-PG" "OS-PORTGROUP4-PG" "\n000198700439" " \n\nFC_SLVR1\n" "\nSILVER1\n" "") text_file.close() return config_file_1364232 def fake_ecom_connection(self): conn = FakeEcomConnection() return conn def fake_do_iscsi_discovery(self, volume): output = [] item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' output.append(item) return output def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False def populate_masking_dict_setup(self): extraSpecs = {'storagetype:pool': u'gold_pool', 'volume_backend_name': 'GOLD_POOL_BE', 'storagetype:array': u'1234567891011', 'isV3': False, 'portgroupname': u'OS-portgroup-PG', 'storagetype:fastpolicy': u'GOLD'} vol = {'SystemName': self.data.storage_system} self.driver.common._find_lun = mock.Mock( return_value=vol) self.driver.common.utils.find_controller_configuration_service = ( mock.Mock(return_value=None)) return extraSpecs def test_populate_masking_dict_fast(self): extraSpecs = self.populate_masking_dict_setup() # If fast is enabled it will uniquely determine the SG and MV # on the host along with the protocol(iSCSI) e.g. I maskingViewDict = self.driver.common._populate_masking_dict( self.data.test_volume, self.data.connector, extraSpecs) self.assertEqual( 'OS-fakehost-GOLD-FP-I-SG', maskingViewDict['sgGroupName']) self.assertEqual( 'OS-fakehost-GOLD-FP-I-MV', maskingViewDict['maskingViewName']) def test_populate_masking_dict_fast_more_than_14chars(self): # If the length of the FAST policy name is greater than 14 chars extraSpecs = self.populate_masking_dict_setup() extraSpecs['storagetype:fastpolicy'] = 'GOLD_MORE_THAN_FOURTEEN_CHARS' maskingViewDict = self.driver.common._populate_masking_dict( self.data.test_volume, self.data.connector, extraSpecs) self.assertEqual( 'OS-fakehost-GOLD_MO__CHARS-FP-I-SG', maskingViewDict['sgGroupName']) self.assertEqual( 'OS-fakehost-GOLD_MO__CHARS-FP-I-MV', maskingViewDict['maskingViewName']) def test_populate_masking_dict_no_fast(self): # If fast isn't enabled the pool will uniquely determine the SG and MV # on the host along with the protocol(iSCSI) e.g. I extraSpecs = self.populate_masking_dict_setup() extraSpecs['storagetype:fastpolicy'] = None maskingViewDict = self.driver.common._populate_masking_dict( self.data.test_volume, self.data.connector, extraSpecs) self.assertEqual( 'OS-fakehost-gold_pool-I-SG', maskingViewDict['sgGroupName']) self.assertEqual( 'OS-fakehost-gold_pool-I-MV', maskingViewDict['maskingViewName']) def test_populate_masking_dict_fast_both_exceeding(self): # If the length of the FAST policy name is greater than 14 chars and # the length of the short host is more than 38 characters extraSpecs = self.populate_masking_dict_setup() connector = {'host': 'SHORT_HOST_MORE_THEN THIRTY_EIGHT_CHARACTERS'} extraSpecs['storagetype:fastpolicy'] = ( 'GOLD_MORE_THAN_FOURTEEN_CHARACTERS') maskingViewDict = self.driver.common._populate_masking_dict( self.data.test_volume, connector, extraSpecs) self.assertLessEqual(len(maskingViewDict['sgGroupName']), 64) self.assertLessEqual(len(maskingViewDict['maskingViewName']), 64) def test_populate_masking_dict_no_fast_both_exceeding(self): # If the length of the FAST policy name is greater than 14 chars and # the length of the short host is more than 38 characters extraSpecs = self.populate_masking_dict_setup() connector = {'host': 'SHORT_HOST_MORE_THEN THIRTY_EIGHT_CHARACTERS'} extraSpecs['storagetype:pool'] = ( 'GOLD_POOL_MORE_THAN_SIXTEEN_CHARACTERS') extraSpecs['storagetype:fastpolicy'] = None maskingViewDict = self.driver.common._populate_masking_dict( self.data.test_volume, connector, extraSpecs) self.assertLessEqual(len(maskingViewDict['sgGroupName']), 64) self.assertLessEqual(len(maskingViewDict['maskingViewName']), 64) def test_filter_list(self): portgroupnames = ['pg3', 'pg1', 'pg4', 'pg2'] portgroupnames = ( self.driver.common.utils._filter_list(portgroupnames)) self.assertEqual(4, len(portgroupnames)) self.assertEqual(['pg1', 'pg2', 'pg3', 'pg4'], sorted(portgroupnames)) portgroupnames = ['pg1'] portgroupnames = ( self.driver.common.utils._filter_list(portgroupnames)) self.assertEqual(1, len(portgroupnames)) self.assertEqual(['pg1'], portgroupnames) portgroupnames = ['only_pg', '', '', '', '', ''] portgroupnames = ( self.driver.common.utils._filter_list(portgroupnames)) self.assertEqual(1, len(portgroupnames)) self.assertEqual(['only_pg'], portgroupnames) def test_get_random_pg_from_list(self): portGroupNames = ['pg1', 'pg2', 'pg3', 'pg4'] portGroupName = ( self.driver.common.utils._get_random_pg_from_list(portGroupNames)) self.assertTrue('pg' in portGroupName) portGroupNames = ['pg1'] portGroupName = ( self.driver.common.utils._get_random_pg_from_list(portGroupNames)) self.assertEqual('pg1', portGroupName) def test_get_random_portgroup(self): # 4 portgroups data = ("\n\n" "" "OS-PG1\n" "OS-PG2\n" "OS-PG3\n" "OS-PG4\n" "" "") dom = minidom.parseString(data) portgroup = self.driver.common.utils._get_random_portgroup(dom) self.assertTrue('OS-PG' in portgroup) # Duplicate portgroups data = ("\n\n" "" "OS-PG1\n" "OS-PG1\n" "OS-PG1\n" "OS-PG2\n" "" "") dom = minidom.parseString(data) portgroup = self.driver.common.utils._get_random_portgroup(dom) self.assertTrue('OS-PG' in portgroup) def test_get_random_portgroup_exception(self): # Missing PortGroup values data = ("\n\n" "" "\n" "\n" "" "") dom = minidom.parseString(data) self.assertRaises(exception.VolumeBackendAPIException, self.driver.common.utils._get_random_portgroup, dom) # Missing portgroups data = ("\n\n" "" "" "") dom = minidom.parseString(data) self.assertRaises(exception.VolumeBackendAPIException, self.driver.common.utils._get_random_portgroup, dom) def test_is_sync_complete(self): conn = self.fake_ecom_connection() syncname = SE_ConcreteJob() syncname.classname = 'SE_StorageSynchronized_SV_SV' syncname['CopyState'] = self.data.UNSYNCHRONIZED issynched = self.driver.common.utils._is_sync_complete(conn, syncname) self.assertFalse(issynched) def test_get_correct_port_group(self): self.driver.common.conn = self.fake_ecom_connection() maskingViewInstanceName = {'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'OS-fakehost-gold-I-MV', 'SystemName': 'SYMMETRIX+000195900551'} deviceinfodict = {'controller': maskingViewInstanceName} portgroupname = self.driver.common._get_correct_port_group( deviceinfodict, self.data.storage_system) self.assertEqual('OS-portgroup-PG', portgroupname) def test_generate_unique_trunc_pool(self): pool_under_16_chars = 'pool_under_16' pool1 = self.driver.utils.generate_unique_trunc_pool( pool_under_16_chars) self.assertEqual(pool_under_16_chars, pool1) pool_over_16_chars = ( 'pool_over_16_pool_over_16') # Should generate truncated string first 8 chars and # last 7 chars pool2 = self.driver.utils.generate_unique_trunc_pool( pool_over_16_chars) self.assertEqual('pool_ove_over_16', pool2) def test_generate_unique_trunc_host(self): host_under_38_chars = 'host_under_38_chars' host1 = self.driver.utils.generate_unique_trunc_host( host_under_38_chars) self.assertEqual(host_under_38_chars, host1) host_over_38_chars = ( 'host_over_38_chars_host_over_38_chars_host_over_38_chars') # Check that the same md5 value is retrieved from multiple calls host2 = self.driver.utils.generate_unique_trunc_host( host_over_38_chars) host3 = self.driver.utils.generate_unique_trunc_host( host_over_38_chars) self.assertEqual(host2, host3) def test_find_ip_protocol_endpoints(self): conn = self.fake_ecom_connection() foundIpAddresses = self.driver.common._find_ip_protocol_endpoints( conn, self.data.storage_system, self.data.port_group) self.assertEqual('10.10.10.10', foundIpAddresses[0]) def test_find_device_number(self): host = 'fakehost' data = ( self.driver.common.find_device_number(self.data.test_volume_v2, host)) self.assertEqual('OS-fakehost-MV', data['maskingview']) host = 'bogushost' data = ( self.driver.common.find_device_number(self.data.test_volume_v2, host)) self.assertFalse(data) def test_find_device_number_long_host(self): # Long host name host = 'myhost.mydomain.com' data = ( self.driver.common.find_device_number(self.data.test_volume_v2, host)) self.assertEqual('OS-myhost-MV', data['maskingview']) def test_find_device_number_short_name_over_38_chars(self): # short name over 38 chars host = 'myShortnameIsOverThirtyEightCharactersLong' host = self.driver.common.utils.generate_unique_trunc_host(host) amended = 'OS-' + host + '-MV' v2_host_over_38 = self.data.test_volume_v2.copy() # Pool aware scheduler enabled v2_host_over_38['host'] = host data = ( self.driver.common.find_device_number(v2_host_over_38, host)) self.assertEqual(amended, data['maskingview']) def test_unbind_and_get_volume_from_storage_pool(self): conn = self.fake_ecom_connection() common = self.driver.common common.utils.is_volume_bound_to_pool = mock.Mock( return_value='False') storageConfigService = ( common.utils.find_storage_configuration_service( conn, self.data.storage_system)) volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeName = "unbind-vol" extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': False} volumeInstance = ( common._unbind_and_get_volume_from_storage_pool( conn, storageConfigService, volumeInstanceName, volumeName, extraSpecs)) self.assertEqual(self.data.storage_system, volumeInstance['SystemName']) self.assertEqual('1', volumeInstance['ElementName']) def test_create_hardware_ids(self): conn = self.fake_ecom_connection() connector = { 'ip': '10.0.0.2', 'initiator': self.data.iscsi_initiator, 'host': 'fakehost'} initiatorNames = ( self.driver.common.masking._find_initiator_names(conn, connector)) storageHardwareIDInstanceNames = ( self.driver.common.masking._create_hardware_ids( conn, initiatorNames, self.data.storage_system)) self.assertEqual(self.data.iscsi_initiator, storageHardwareIDInstanceNames[0]) def test_get_pool_instance_and_system_name(self): conn = self.fake_ecom_connection() # V2 - old '+' separator storagesystem = {} storagesystem['SystemName'] = self.data.storage_system storagesystem['Name'] = self.data.storage_system pools = conn.EnumerateInstanceNames("EMC_VirtualProvisioningPool") poolname = 'gold' poolinstancename, systemname = ( self.driver.common.utils._get_pool_instance_and_system_name( conn, pools, storagesystem, poolname)) self.assertEqual(self.data.storage_system, systemname) self.assertEqual(self.data.storagepoolid, poolinstancename['InstanceID']) # V3 - note: V2 can also have the '-+-' separator storagesystem = {} storagesystem['SystemName'] = self.data.storage_system_v3 storagesystem['Name'] = self.data.storage_system_v3 pools = conn.EnumerateInstanceNames('Symm_SRPStoragePool') poolname = 'SRP_1' poolinstancename, systemname = ( self.driver.common.utils._get_pool_instance_and_system_name( conn, pools, storagesystem, poolname)) self.assertEqual(self.data.storage_system_v3, systemname) self.assertEqual('SYMMETRIX-+-000197200056-+-SRP_1', poolinstancename['InstanceID']) # Invalid poolname poolname = 'bogus' poolinstancename, systemname = ( self.driver.common.utils._get_pool_instance_and_system_name( conn, pools, storagesystem, poolname)) self.assertIsNone(poolinstancename) self.assertEqual(self.data.storage_system_v3, systemname) def test_get_hardware_type(self): iqn_initiator = 'iqn.1992-04.com.emc: 50000973f006dd80' hardwaretypeid = ( self.driver.utils._get_hardware_type(iqn_initiator)) self.assertEqual(5, hardwaretypeid) wwpn_initiator = '123456789012345' hardwaretypeid = ( self.driver.utils._get_hardware_type(wwpn_initiator)) self.assertEqual(2, hardwaretypeid) bogus_initiator = 'bogus' hardwaretypeid = ( self.driver.utils._get_hardware_type(bogus_initiator)) self.assertEqual(0, hardwaretypeid) def test_check_if_rollback_action_for_masking_required(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': False, 'storagetype:fastpolicy': 'GOLD1'} vol = EMC_StorageVolume() vol['name'] = self.data.test_volume['name'] vol['CreationClassName'] = 'Symm_StorageVolume' vol['ElementName'] = self.data.test_volume['id'] vol['DeviceID'] = self.data.test_volume['device_id'] vol['Id'] = self.data.test_volume['id'] vol['SystemName'] = self.data.storage_system vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] vol['BlockSize'] = self.data.test_volume['BlockSize'] # Added vol to vol.path vol['SystemCreationClassName'] = 'Symm_StorageSystem' vol.path = vol vol.path.classname = vol['CreationClassName'] rollbackDict = {} rollbackDict['isV3'] = False rollbackDict['defaultStorageGroupInstanceName'] = ( self.data.default_storage_group) rollbackDict['sgName'] = self.data.storagegroupname rollbackDict['volumeName'] = 'vol1' rollbackDict['fastPolicyName'] = 'GOLD1' rollbackDict['volumeInstance'] = vol rollbackDict['controllerConfigService'] = controllerConfigService rollbackDict['extraSpecs'] = extraSpecs # Path 1 - The volume is in another storage group that isn't the # default storage group expectedmessage = (_("V2 rollback - Volume in another storage " "group besides default storage group.")) message = ( self.driver.common.masking. _check_if_rollback_action_for_masking_required( conn, rollbackDict)) self.assertEqual(expectedmessage, message) # Path 2 - The volume is not in any storage group rollbackDict['sgName'] = 'sq_not_exist' expectedmessage = (_("V2 rollback, volume is not in any storage " "group.")) message = ( self.driver.common.masking. _check_if_rollback_action_for_masking_required( conn, rollbackDict)) self.assertEqual(expectedmessage, message) def test_migrate_cleanup(self): conn = self.fake_ecom_connection() extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': False, 'storagetype:fastpolicy': 'GOLD1'} vol = EMC_StorageVolume() vol['name'] = self.data.test_volume['name'] vol['CreationClassName'] = 'Symm_StorageVolume' vol['ElementName'] = self.data.test_volume['id'] vol['DeviceID'] = self.data.test_volume['device_id'] vol['Id'] = self.data.test_volume['id'] vol['SystemName'] = self.data.storage_system vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] vol['BlockSize'] = self.data.test_volume['BlockSize'] # Added vol to vol.path vol['SystemCreationClassName'] = 'Symm_StorageSystem' vol.path = vol vol.path.classname = vol['CreationClassName'] # The volume is already belong to default storage group return_to_default = self.driver.common._migrate_cleanup( conn, vol, self.data.storage_system, 'GOLD1', vol['name'], extraSpecs) self.assertFalse(return_to_default) # The volume does not belong to default storage group return_to_default = self.driver.common._migrate_cleanup( conn, vol, self.data.storage_system, 'BRONZE1', vol['name'], extraSpecs) self.assertTrue(return_to_default) def test_wait_for_job_complete(self): myjob = SE_ConcreteJob() myjob.classname = 'SE_ConcreteJob' myjob['InstanceID'] = '9999' myjob['status'] = 'success' myjob['type'] = 'type' myjob['CreationClassName'] = 'SE_ConcreteJob' myjob['Job'] = myjob conn = self.fake_ecom_connection() self.driver.utils._is_job_finished = mock.Mock( return_value=True) rc = self.driver.utils._wait_for_job_complete(conn, myjob) self.assertIsNone(rc) self.driver.utils._is_job_finished.assert_called_once_with( conn, myjob) self.assertTrue(self.driver.utils._is_job_finished.return_value) self.driver.utils._is_job_finished.reset_mock() # Save the original state and restore it after this test loopingcall_orig = loopingcall.FixedIntervalLoopingCall loopingcall.FixedIntervalLoopingCall = mock.Mock() rc = self.driver.utils._wait_for_job_complete(conn, myjob) self.assertIsNone(rc) loopingcall.FixedIntervalLoopingCall.assert_called_once_with( mock.ANY) loopingcall.FixedIntervalLoopingCall.reset_mock() loopingcall.FixedIntervalLoopingCall = loopingcall_orig def test_wait_for_sync(self): mysync = 'fakesync' conn = self.fake_ecom_connection() self.driver.utils._is_sync_complete = mock.Mock( return_value=True) rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) self.driver.utils._is_sync_complete.assert_called_once_with( conn, mysync) self.assertTrue(self.driver.utils._is_sync_complete.return_value) self.driver.utils._is_sync_complete.reset_mock() # Save the original state and restore it after this test loopingcall_orig = loopingcall.FixedIntervalLoopingCall loopingcall.FixedIntervalLoopingCall = mock.Mock() rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) loopingcall.FixedIntervalLoopingCall.assert_called_once_with( mock.ANY) loopingcall.FixedIntervalLoopingCall.reset_mock() loopingcall.FixedIntervalLoopingCall = loopingcall_orig def test_wait_for_sync_extra_specs(self): mysync = 'fakesync' conn = self.fake_ecom_connection() file_name = ( self.create_fake_config_file_no_fast_with_interval_retries()) extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) poolRec = self.driver.utils.extract_record(arrayInfo, pool) extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, poolRec) self.driver.utils._is_sync_complete = mock.Mock( return_value=True) rc = self.driver.utils.wait_for_sync(conn, mysync, extraSpecs) self.assertIsNotNone(rc) self.driver.utils._is_sync_complete.assert_called_once_with( conn, mysync) self.assertTrue(self.driver.utils._is_sync_complete.return_value) self.assertEqual(40, self.driver.utils._get_max_job_retries(extraSpecs)) self.assertEqual(5, self.driver.utils._get_interval_in_secs(extraSpecs)) self.driver.utils._is_sync_complete.reset_mock() # Save the original state and restore it after this test loopingcall_orig = loopingcall.FixedIntervalLoopingCall loopingcall.FixedIntervalLoopingCall = mock.Mock() rc = self.driver.utils.wait_for_sync(conn, mysync) self.assertIsNotNone(rc) loopingcall.FixedIntervalLoopingCall.assert_called_once_with( mock.ANY) loopingcall.FixedIntervalLoopingCall.reset_mock() loopingcall.FixedIntervalLoopingCall = loopingcall_orig bExists = os.path.exists(file_name) if bExists: os.remove(file_name) # Bug 1395830: _find_lun throws exception when lun is not found. def test_find_lun(self): keybindings = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'1', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings} volume = EMC_StorageVolume() volume['name'] = 'vol1' volume['provider_location'] = six.text_type(provider_location) self.driver.common.conn = self.driver.common._get_ecom_connection() findlun = self.driver.common._find_lun(volume) getinstance = self.driver.common.conn._getinstance_storagevolume( keybindings) # Found lun. self.assertEqual(getinstance, findlun) keybindings2 = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'9', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location2 = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings2} volume2 = EMC_StorageVolume() volume2['name'] = 'myVol' volume2['provider_location'] = six.text_type(provider_location2) verify_orig = self.driver.common.conn.GetInstance self.driver.common.conn.GetInstance = mock.Mock( return_value=None) findlun2 = self.driver.common._find_lun(volume2) # Not found. self.assertIsNone(findlun2) self.driver.utils.get_instance_name( provider_location2['classname'], keybindings2) self.driver.common.conn.GetInstance.assert_called_once_with( keybindings2) self.driver.common.conn.GetInstance.reset_mock() self.driver.common.conn.GetInstance = verify_orig keybindings3 = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900551', 'DeviceID': u'9999', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location3 = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings3} instancename3 = self.driver.utils.get_instance_name( provider_location3['classname'], keybindings3) # Error other than not found. arg = 9999, "test_error" self.assertRaises(exception.VolumeBackendAPIException, self.driver.common.utils.process_exception_args, arg, instancename3) # Bug 1403160 - make sure the masking view is cleanly deleted def test_last_volume_delete_masking_view(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) maskingViewInstanceName = ( self.driver.common.masking._find_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) maskingViewName = conn.GetInstance( maskingViewInstanceName)['ElementName'] # Deleting Masking View failed self.assertRaises( exception.VolumeBackendAPIException, self.driver.common.masking._last_volume_delete_masking_view, conn, controllerConfigService, maskingViewInstanceName, maskingViewName, extraSpecs) # Deleting Masking view successful self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) self.driver.common.masking._last_volume_delete_masking_view( conn, controllerConfigService, maskingViewInstanceName, maskingViewName, extraSpecs) # Bug 1403160 - make sure the storage group is cleanly deleted def test_remove_last_vol_and_delete_sg(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) storageGroupName = self.data.storagegroupname storageGroupInstanceName = ( self.driver.utils.find_storage_masking_group( conn, controllerConfigService, storageGroupName)) volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeName = "1403160-Vol" extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': False} # Deleting Storage Group failed self.assertRaises( exception.VolumeBackendAPIException, self.driver.common.masking._remove_last_vol_and_delete_sg, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstanceName, volumeName, extraSpecs) # Deleting Storage group successful self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) self.driver.common.masking._remove_last_vol_and_delete_sg( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstanceName, volumeName, extraSpecs) # Bug 1504192 - if the last volume is being unmapped and the masking view # goes away, cleanup the initiators and associated initiator group. def test_delete_initiators_from_initiator_group(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) initiatorGroupName = self.data.initiatorgroup_name initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) conn.InvokeMethod = mock.Mock(return_value=1) # Deletion of initiators failed. self.driver.common.masking._delete_initiators_from_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName) conn.InvokeMethod = mock.Mock(return_value=0) # Deletion of initiators successful. self.driver.common.masking._delete_initiators_from_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName) # Bug 1504192 - if the last volume is being unmapped and the masking view # goes away, cleanup the initiators and associated initiator group. def test_last_volume_delete_initiator_group_exception(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) job = { 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} conn.InvokeMethod = mock.Mock(return_value=(4096, job)) self.driver.common.masking.get_masking_views_by_initiator_group = ( mock.Mock(return_value=[])) self.driver.common.masking._delete_initiators_from_initiator_group = ( mock.Mock(return_value=True)) self.driver.common.masking.utils.wait_for_job_complete = ( mock.Mock(return_value=(2, 'failure'))) # Exception occurrs while deleting the initiator group. self.assertRaises( exception.VolumeBackendAPIException, self.driver.common.masking._last_volume_delete_initiator_group, conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs) # Bug 1504192 - if the last volume is being unmapped and the masking view # goes away, cleanup the initiators and associated initiator group. def test_last_volume_delete_initiator_group(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) initiatorGroupName = self.data.initiatorgroup_name initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) self.assertEqual(initiatorGroupName, conn.GetInstance( initiatorGroupInstanceName)['ElementName']) # masking view is associated with the initiator group and initiator # group will not be deleted. self.driver.common.masking._last_volume_delete_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs) self.driver.common.masking.get_masking_views_by_initiator_group = ( mock.Mock(return_value=[])) self.driver.common.masking._delete_initiators_from_initiator_group = ( mock.Mock(return_value=True)) # No Masking view and initiators associated with the Initiator group # and initiator group will be deleted. self.driver.common.masking._last_volume_delete_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs) job = { 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} conn.InvokeMethod = mock.Mock(return_value=(4096, job)) self.driver.common.masking.utils.wait_for_job_complete = ( mock.Mock(return_value=(0, 'success'))) # Deletion of initiator group is successful after waiting for job # to complete. self.driver.common.masking._last_volume_delete_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs) # Tests removal of last volume in a storage group V2 def test_remove_and_reset_members(self): extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': False} conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) volumeName = "Last-Vol" self.driver.common.masking.get_devices_from_storage_group = mock.Mock( return_value=['one_value']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) self.driver.common.masking.remove_and_reset_members( conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) # Bug 1393555 - masking view has been deleted by another process. def test_find_maskingview(self): conn = self.fake_ecom_connection() foundMaskingViewInstanceName = ( self.driver.common.masking._find_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The masking view has been found. self.assertEqual( self.data.lunmaskctrl_name, conn.GetInstance(foundMaskingViewInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundMaskingViewInstanceName2 = ( self.driver.common.masking._find_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The masking view has not been found. self.assertIsNone(foundMaskingViewInstanceName2) # Bug 1393555 - port group has been deleted by another process. def test_find_portgroup(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) foundPortGroupInstanceName = ( self.driver.common.masking.find_port_group( conn, controllerConfigService, self.data.port_group)) # The port group has been found. self.assertEqual( self.data.port_group, conn.GetInstance(foundPortGroupInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundPortGroupInstanceName2 = ( self.driver.common.masking.find_port_group( conn, controllerConfigService, self.data.port_group)) # The port group has not been found as it has been deleted # externally or by another thread. self.assertIsNone(foundPortGroupInstanceName2) # Bug 1393555 - storage group has been deleted by another process. def test_get_storage_group_from_masking_view(self): conn = self.fake_ecom_connection() foundStorageGroupInstanceName = ( self.driver.common.masking._get_storage_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The storage group has been found. self.assertEqual( self.data.storagegroupname, conn.GetInstance(foundStorageGroupInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundStorageGroupInstanceName2 = ( self.driver.common.masking._get_storage_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The storage group has not been found as it has been deleted # externally or by another thread. self.assertIsNone(foundStorageGroupInstanceName2) # Bug 1393555 - initiator group has been deleted by another process. def test_get_initiator_group_from_masking_view(self): conn = self.fake_ecom_connection() foundInitiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The initiator group has been found. self.assertEqual( self.data.initiatorgroup_name, conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundInitiatorGroupInstanceName2 = ( self.driver.common.masking._get_storage_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The initiator group has not been found as it has been deleted # externally or by another thread. self.assertIsNone(foundInitiatorGroupInstanceName2) # Bug 1393555 - port group has been deleted by another process. def test_get_port_group_from_masking_view(self): conn = self.fake_ecom_connection() foundPortGroupInstanceName = ( self.driver.common.masking._get_port_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The port group has been found. self.assertEqual( self.data.port_group, conn.GetInstance(foundPortGroupInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundPortGroupInstanceName2 = ( self.driver.common.masking._get_port_group_from_masking_view( conn, self.data.lunmaskctrl_name, self.data.storage_system)) # The port group has not been found as it has been deleted # externally or by another thread. self.assertIsNone(foundPortGroupInstanceName2) # Bug 1393555 - initiator group has been deleted by another process. def test_find_initiator_group(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) foundInitiatorGroupInstanceName = ( self.driver.common.masking._find_initiator_masking_group( conn, controllerConfigService, self.data.initiatorNames)) # The initiator group has been found. self.assertEqual( self.data.initiatorgroup_name, conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundInitiatorGroupInstanceName2 = ( self.driver.common.masking._find_initiator_masking_group( conn, controllerConfigService, self.data.initiatorNames)) # The initiator group has not been found as it has been deleted # externally or by another thread. self.assertIsNone(foundInitiatorGroupInstanceName2) # Bug 1393555 - hardware id has been deleted by another process. def test_get_storage_hardware_id_instance_names(self): conn = self.fake_ecom_connection() foundHardwareIdInstanceNames = ( self.driver.common.masking._get_storage_hardware_id_instance_names( conn, self.data.initiatorNames, self.data.storage_system)) # The hardware id list has been found. self.assertEqual( '123456789012345', conn.GetInstance( foundHardwareIdInstanceNames[0])['StorageID']) self.driver.common.masking.utils.get_existing_instance = mock.Mock( return_value=None) foundHardwareIdInstanceNames2 = ( self.driver.common.masking._get_storage_hardware_id_instance_names( conn, self.data.initiatorNames, self.data.storage_system)) # The hardware id list has not been found as it has been removed # externally. self.assertTrue(len(foundHardwareIdInstanceNames2) == 0) # Bug 1393555 - controller has been deleted by another process. def test_find_lunmasking_scsi_protocol_controller(self): self.driver.common.conn = self.fake_ecom_connection() foundControllerInstanceName = ( self.driver.common._find_lunmasking_scsi_protocol_controller( self.data.storage_system, self.data.connector)) # The controller has been found. self.assertEqual( 'OS-fakehost-gold-I-MV', self.driver.common.conn.GetInstance( foundControllerInstanceName)['ElementName']) self.driver.common.utils.get_existing_instance = mock.Mock( return_value=None) foundControllerInstanceName2 = ( self.driver.common._find_lunmasking_scsi_protocol_controller( self.data.storage_system, self.data.connector)) # The controller has not been found as it has been removed # externally. self.assertIsNone(foundControllerInstanceName2) # Bug 1393555 - storage group has been deleted by another process. def test_get_policy_default_storage_group(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) foundStorageMaskingGroupInstanceName = ( self.driver.common.fast.get_policy_default_storage_group( conn, controllerConfigService, 'OS_default')) # The storage group has been found. self.assertEqual( 'OS_default_GOLD1_SG', conn.GetInstance( foundStorageMaskingGroupInstanceName)['ElementName']) self.driver.common.fast.utils.get_existing_instance = mock.Mock( return_value=None) foundStorageMaskingGroupInstanceName2 = ( self.driver.common.fast.get_policy_default_storage_group( conn, controllerConfigService, 'OS_default')) # The storage group has not been found as it has been removed # externally. self.assertIsNone(foundStorageMaskingGroupInstanceName2) # Bug 1393555 - policy has been deleted by another process. def test_get_capacities_associated_to_policy(self): conn = self.fake_ecom_connection() total_capacity_gb, free_capacity_gb = ( self.driver.common.fast.get_capacities_associated_to_policy( conn, self.data.storage_system, self.data.policyrule)) # The capacities associated to the policy have been found. self.assertEqual(self.data.totalmanagedspace_gbs, total_capacity_gb) self.assertEqual(self.data.subscribedcapacity_gbs, free_capacity_gb) self.driver.common.fast.utils.get_existing_instance = mock.Mock( return_value=None) total_capacity_gb_2, free_capacity_gb_2 = ( self.driver.common.fast.get_capacities_associated_to_policy( conn, self.data.storage_system, self.data.policyrule)) # The capacities have not been found as the policy has been # removed externally. self.assertEqual(0, total_capacity_gb_2) self.assertEqual(0, free_capacity_gb_2) # Bug 1393555 - storage group has been deleted by another process. def test_find_storage_masking_group(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) foundStorageMaskingGroupInstanceName = ( self.driver.common.utils.find_storage_masking_group( conn, controllerConfigService, self.data.storagegroupname)) # The storage group has been found. self.assertEqual( self.data.storagegroupname, conn.GetInstance( foundStorageMaskingGroupInstanceName)['ElementName']) self.driver.common.utils.get_existing_instance = mock.Mock( return_value=None) foundStorageMaskingGroupInstanceName2 = ( self.driver.common.utils.find_storage_masking_group( conn, controllerConfigService, self.data.storagegroupname)) # The storage group has not been found as it has been removed # externally. self.assertIsNone(foundStorageMaskingGroupInstanceName2) # Bug 1393555 - pool has been deleted by another process. def test_get_pool_by_name(self): conn = self.fake_ecom_connection() foundPoolInstanceName = self.driver.common.utils.get_pool_by_name( conn, self.data.poolname, self.data.storage_system) # The pool has been found. self.assertEqual( self.data.poolname, conn.GetInstance(foundPoolInstanceName)['ElementName']) self.driver.common.utils.get_existing_instance = mock.Mock( return_value=None) foundPoolInstanceName2 = self.driver.common.utils.get_pool_by_name( conn, self.data.poolname, self.data.storage_system) # The pool has not been found as it has been removed externally. self.assertIsNone(foundPoolInstanceName2) def test_get_volume_stats_1364232(self): file_name = self.create_fake_config_file_1364232() arrayInfo = self.driver.utils.parse_file_to_get_array_map(file_name) self.assertEqual( '000198700439', arrayInfo[0]['SerialNumber']) self.assertEqual( 'FC_SLVR1', arrayInfo[0]['PoolName']) self.assertEqual( 'SILVER1', arrayInfo[0]['FastPolicy']) self.assertTrue( 'OS-PORTGROUP' in arrayInfo[0]['PortGroup']) bExists = os.path.exists(file_name) if bExists: os.remove(file_name) def test_intervals_and_retries_override( self): file_name = ( self.create_fake_config_file_no_fast_with_interval_retries()) extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) poolRec = self.driver.utils.extract_record(arrayInfo, pool) extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, poolRec) self.assertEqual(40, self.driver.utils._get_max_job_retries(extraSpecs)) self.assertEqual(5, self.driver.utils._get_interval_in_secs(extraSpecs)) bExists = os.path.exists(file_name) if bExists: os.remove(file_name) def test_intervals_and_retries_default(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) poolRec = self.driver.utils.extract_record(arrayInfo, pool) extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, poolRec) self.assertEqual(60, self.driver.utils._get_max_job_retries(extraSpecs)) self.assertEqual(10, self.driver.utils._get_interval_in_secs(extraSpecs)) def test_interval_only(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} file_name = self.create_fake_config_file_no_fast_with_interval() pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) poolRec = self.driver.utils.extract_record(arrayInfo, pool) extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, poolRec) self.assertEqual(60, self.driver.utils._get_max_job_retries(extraSpecs)) self.assertEqual(20, self.driver.utils._get_interval_in_secs(extraSpecs)) bExists = os.path.exists(file_name) if bExists: os.remove(file_name) def test_retries_only(self): extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} file_name = self.create_fake_config_file_no_fast_with_retries() pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) poolRec = self.driver.utils.extract_record(arrayInfo, pool) extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, poolRec) self.assertEqual(70, self.driver.utils._get_max_job_retries(extraSpecs)) self.assertEqual(10, self.driver.utils._get_interval_in_secs(extraSpecs)) bExists = os.path.exists(file_name) if bExists: os.remove(file_name) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', return_value=False) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_pool_capacities', return_value=(1234, 1200)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', return_value=False) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storageSystem', return_value=None) def test_get_volume_stats_no_fast(self, mock_storage_system, mock_is_fast_enabled, mock_capacity, mock_is_v3): self.driver.get_volume_stats(True) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_volume_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype: stripedmetacount': '4', 'volume_backend_name': 'ISCSINoFAST'}) def test_create_volume_no_fast_striped_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_volume_in_CG_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_volume_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.delete_volume(self.data.test_volume) def test_create_volume_no_fast_failed(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_failed_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_volume_no_fast_notfound(self, _mock_volume_type): notfound_delete_vol = {} notfound_delete_vol['name'] = 'notfound_delete_vol' notfound_delete_vol['id'] = '10' notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' notfound_delete_vol['SystemName'] = self.data.storage_system notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' notfound_delete_vol['volume_type_id'] = 'abc' notfound_delete_vol['provider_location'] = None notfound_delete_vol['host'] = self.data.fake_host name = {} name['classname'] = 'Symm_StorageVolume' keys = {} keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] keys['SystemName'] = notfound_delete_vol['SystemName'] keys['DeviceID'] = notfound_delete_vol['DeviceID'] keys['SystemCreationClassName'] = ( notfound_delete_vol['SystemCreationClassName']) name['keybindings'] = keys self.driver.delete_volume(notfound_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_volume_failed( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.failed_delete_vol) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.data.failed_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=True) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'hostlunid': 1, 'storagesystem': EMCVMAXCommonData.storage_system}) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_already_mapped_no_fast_success( self, _mock_volume_type, mock_wrap_group, mock_wrap_device, mock_is_same_host): self.driver.common._get_correct_port_group = mock.Mock( return_value=self.data.port_group) self.driver.initialize_connection(self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_check_adding_volume_to_storage_group', return_value=None) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storage_masking_group', return_value='value') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_map_new_masking_view_no_fast_success( self, _mock_volume_type, mock_wrap_group, mock_storage_group, mock_add_volume): self.driver.common._wrap_find_device_number = mock.Mock( return_value={}) self.driver.initialize_connection(self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_check_adding_volume_to_storage_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=False) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storage_masking_group', return_value='value') @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'hostlunid': 1, 'storagesystem': EMCVMAXCommonData.storage_system}) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_map_live_migration_no_fast_success(self, _mock_volume_type, mock_wrap_group, mock_wrap_device, mock_storage_group, mock_same_host, mock_check): self.driver.initialize_connection(self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_get_initiator_group_from_masking_view', return_value='value') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='value') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_masking_view', return_value='value') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_map_existing_masking_view_no_fast_success( self, _mock_volume_type, mock_wrap_group, mock_storage_group, mock_initiator_group, mock_ig_from_mv): self.driver.initialize_connection(self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'storagesystem': EMCVMAXCommonData.storage_system}) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device): self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_initiator_group_from_masking_view', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='myInitGroup') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storage_masking_group', return_value=EMCVMAXCommonData.storagegroupname) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_detach_no_fast_success( self, mock_volume_type, mock_storage_group, mock_ig, mock_igc): self.driver.terminate_connection( self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_size', return_value='2147483648') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_extend_volume_no_fast_success( self, _mock_volume_type, mock_volume_size): newSize = '2' self.driver.extend_volume(self.data.test_volume, newSize) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'check_if_volume_is_extendable', return_value='False') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype: stripedmetacount': '4', 'volume_backend_name': 'ISCSINoFAST'}) def test_extend_volume_striped_no_fast_failed( self, _mock_volume_type, _mock_is_extendable): newSize = '2' self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.data.test_volume, newSize) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_snapshot_different_sizes_meta_no_fast_success( self, mock_volume_type, mock_volume, mock_meta, mock_size, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} common.provision.create_volume_from_pool = ( mock.Mock(return_value=(volumeDict, 0))) common.provision.get_volume_dict_from_job = ( mock.Mock(return_value=volumeDict)) self.driver.create_snapshot(self.data.test_volume) def test_create_snapshot_no_fast_failed(self): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_volume_from_same_size_meta_snapshot( self, mock_volume_type, mock_sync_sv, mock_meta, mock_size): self.data.test_volume['volume_name'] = "vmax-1234567" self.driver.create_volume_from_snapshot( self.data.test_volume, self.data.test_volume) def test_create_volume_from_snapshot_no_fast_failed(self): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.data.test_volume, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_storage_sync_sv_sv', return_value=(None, None)) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_clone_simple_volume_no_fast_success( self, mock_volume_type, mock_volume, mock_sync_sv, mock_simple_volume): self.data.test_volume['volume_name'] = "vmax-1234567" self.driver.create_cloned_volume(self.data.test_volume, EMCVMAXCommonData.test_source_volume) # Bug https://bugs.launchpad.net/cinder/+bug/1440154 @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) @mock.patch.object( emc_vmax_provision.EMCVMAXProvision, 'create_element_replica') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) def test_create_clone_assert_clean_up_target_volume( self, mock_sync, mock_create_replica, mock_volume_type, mock_volume, mock_capacities, mock_pool, mock_meta_volume): self.data.test_volume['volume_name'] = "vmax-1234567" e = exception.VolumeBackendAPIException('CreateElementReplica Ex') common = self.driver.common common._delete_from_pool = mock.Mock(return_value=0) conn = self.fake_ecom_connection() storageConfigService = ( common.utils.find_storage_configuration_service( conn, self.data.storage_system)) mock_create_replica.side_effect = e self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.data.test_volume, EMCVMAXCommonData.test_source_volume) extraSpecs = common._initial_setup(self.data.test_volume) fastPolicy = extraSpecs['storagetype:fastpolicy'] targetInstance = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) common._delete_from_pool.assert_called_with(storageConfigService, targetInstance, targetInstance['Name'], targetInstance['DeviceID'], fastPolicy, extraSpecs) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_migrate_volume_no_fast_success(self, _mock_volume_type): self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, self.data.test_host) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'parse_pool_instance_id', return_value=('silver', 'SYMMETRIX+000195900551')) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_retype_volume_no_fast_success( self, _mock_volume_type, mock_values): self.driver.retype( self.data.test_ctxt, self.data.test_volume, self.data.new_type, self.data.diff, self.data.test_host) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_CG_no_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_CG_no_volumes_no_fast_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_CG_with_volumes_no_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value="") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=()) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, _mock_rg): self.driver.create_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_delete_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage): self.driver.delete_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_update_CG_add_volume_no_fast_success( self, _mock_volume_type, _mock_storage_system): add_volumes = [] add_volumes.append(self.data.test_source_volume) remove_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes add_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Can't find CG self.driver.common._find_consistency_group = mock.Mock( return_value=None) self.assertRaises(exception.ConsistencyGroupNotFound, self.driver.update_consistencygroup, self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_update_CG_remove_volume_no_fast_success( self, _mock_volume_type, _mock_storage_system): remove_volumes = [] remove_volumes.append(self.data.test_source_volume) add_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes remove_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Bug https://bugs.launchpad.net/cinder/+bug/1442376 @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_clone_with_different_meta_sizes( self, mock_volume_type, mock_volume, mock_meta, mock_size, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} volume = {'size': 0} common.provision.create_volume_from_pool = ( mock.Mock(return_value=(volumeDict, volume['size']))) common.provision.get_volume_dict_from_job = ( mock.Mock(return_value=volumeDict)) common._create_composite_volume = ( mock.Mock(return_value=(0, volumeDict, EMCVMAXCommonData.storage_system))) self.driver.create_cloned_volume(self.data.test_volume, EMCVMAXCommonData.test_source_volume) extraSpecs = self.driver.common._initial_setup(self.data.test_volume) common._create_composite_volume.assert_called_with( volume, "TargetBaseVol", 1234567, extraSpecs, 1) def test_find_volume_by_device_id_on_array(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils volumeInstanceName = utils.find_volume_by_device_id_on_array( conn, self.data.storage_system, self.data.test_volume['device_id']) expectVolume = {} expectVolume['CreationClassName'] = 'Symm_StorageVolume' expectVolume['DeviceID'] = self.data.test_volume['device_id'] expect = conn.GetInstance(expectVolume) self.assertEqual(expect, volumeInstanceName) def test_get_volume_element_name(self): volumeId = 'ea95aa39-080b-4f11-9856-a03acf9112ad' utils = self.driver.common.utils volumeElementName = utils.get_volume_element_name(volumeId) expectVolumeElementName = ( emc_vmax_utils.VOLUME_ELEMENT_NAME_PREFIX + volumeId) self.assertEqual(expectVolumeElementName, volumeElementName) def test_get_associated_replication_from_source_volume(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils repInstanceName = ( utils.get_associated_replication_from_source_volume( conn, self.data.storage_system, self.data.test_volume['device_id'])) expectInstanceName = ( conn.EnumerateInstanceNames('SE_StorageSynchronized_SV_SV')[0]) self.assertEqual(expectInstanceName, repInstanceName) def test_get_array_and_device_id_success(self): deviceId = '0123' arrayId = u'array1234' external_ref = {u'source-name': deviceId} volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}] } utils = self.driver.common.utils (arrId, devId) = utils.get_array_and_device_id(volume, external_ref) self.assertEqual(arrayId, arrId) self.assertEqual(deviceId, devId) def test_get_array_and_device_id_failed(self): deviceId = '0123' arrayId = u'array1234' external_ref = {u'no-source-name': deviceId} volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}] } utils = self.driver.common.utils self.assertRaises(exception.VolumeBackendAPIException, utils.get_array_and_device_id, volume, external_ref) def test_rename_volume(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils newName = 'new_name' volume = {} volume['CreationClassName'] = 'Symm_StorageVolume' volume['DeviceID'] = '1' volume['ElementName'] = 'original_name' pywbem = mock.Mock() pywbem.cim_obj = mock.Mock() pywbem.cim_obj.CIMInstance = mock.Mock() emc_vmax_utils.pywbem = pywbem volumeInstance = conn.GetInstance(volume) originalName = volumeInstance['ElementName'] volumeInstance = utils.rename_volume(conn, volumeInstance, newName) self.assertEqual(newName, volumeInstance['ElementName']) volumeInstance = utils.rename_volume( conn, volumeInstance, originalName) self.assertEqual(originalName, volumeInstance['ElementName']) def test_get_smi_version(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils version = utils.get_smi_version(conn) expected = int(str(self.data.majorVersion) + str(self.data.minorVersion) + str(self.data.revNumber)) self.assertEqual(version, expected) def test_get_pool_name(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils poolInstanceName = {} poolInstanceName['InstanceID'] = "SATA_GOLD1" poolInstanceName['CreationClassName'] = 'Symm_VirtualProvisioningPool' poolName = utils.get_pool_name(conn, poolInstanceName) self.assertEqual(poolName, self.data.poolname) def test_get_meta_members_capacity_in_byte(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils memberVolumeInstanceNames = [] volumeHead = EMC_StorageVolume() volumeHead.classname = 'Symm_StorageVolume' blockSize = self.data.block_size volumeHead['ConsumableBlocks'] = ( self.data.metaHead_volume['ConsumableBlocks']) volumeHead['BlockSize'] = blockSize volumeHead['DeviceID'] = self.data.metaHead_volume['DeviceID'] memberVolumeInstanceNames.append(volumeHead) metaMember1 = EMC_StorageVolume() metaMember1.classname = 'Symm_StorageVolume' metaMember1['ConsumableBlocks'] = ( self.data.meta_volume1['ConsumableBlocks']) metaMember1['BlockSize'] = blockSize metaMember1['DeviceID'] = self.data.meta_volume1['DeviceID'] memberVolumeInstanceNames.append(metaMember1) metaMember2 = EMC_StorageVolume() metaMember2.classname = 'Symm_StorageVolume' metaMember2['ConsumableBlocks'] = ( self.data.meta_volume2['ConsumableBlocks']) metaMember2['BlockSize'] = blockSize metaMember2['DeviceID'] = self.data.meta_volume2['DeviceID'] memberVolumeInstanceNames.append(metaMember2) capacities = utils.get_meta_members_capacity_in_byte( conn, memberVolumeInstanceNames) headSize = ( volumeHead['ConsumableBlocks'] - metaMember1['ConsumableBlocks'] - metaMember2['ConsumableBlocks']) expected = [headSize * blockSize, metaMember1['ConsumableBlocks'] * blockSize, metaMember2['ConsumableBlocks'] * blockSize] self.assertEqual(capacities, expected) def test_get_composite_elements(self): conn = self.fake_ecom_connection() utils = self.driver.common.utils volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) memberVolumeInstanceNames = utils.get_composite_elements( conn, volumeInstance) expected = [self.data.metaHead_volume, self.data.meta_volume1, self.data.meta_volume2] self.assertEqual(memberVolumeInstanceNames, expected) def test_get_volume_model_updates(self): utils = self.driver.common.utils status = 'status-string' volumes = utils.get_volume_model_updates( None, self.driver.db.volume_get_all_by_group("", 5), self.data.test_CG['id'], status) self.assertEqual(status, volumes[0]['status']) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value="") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSINoFAST'}) def test_create_consistencygroup_from_src( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_rg): volumes = [] volumes.append(self.data.test_source_volume) snapshots = [] self.data.test_snapshot['volume_size'] = "10" snapshots.append(self.data.test_snapshot) model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( self.data.test_ctxt, self.data.test_CG, volumes, self.data.test_CG_snapshot, snapshots)) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual([{'status': 'available', 'id': '2'}], volumes_model_update) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_update_pool_stats', return_value={1, 2, 3}) def test_ssl_support(self, pool_stats): self.driver.common.update_volume_stats() self.assertTrue(self.driver.common.ecomUseSSL) def _cleanup(self): if self.config_file_path: bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCVMAXISCSIDriverFastTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.tempdir = tempfile.mkdtemp() super(EMCVMAXISCSIDriverFastTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_fast() self.addCleanup(self._cleanup) configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'ISCSIFAST' configuration.config_group = 'ISCSIFAST' self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery', self.fake_do_iscsi_discovery) self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver def create_fake_config_file_fast(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) fastPolicy = doc.createElement("FastPolicy") fastPolicyText = doc.createTextNode("GOLD1") emc.appendChild(fastPolicy) fastPolicy.appendChild(fastPolicyText) ecomserverip = doc.createElement("EcomServerIp") ecomserveriptext = doc.createTextNode("1.1.1.1") emc.appendChild(ecomserverip) ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") ecomserverporttext = doc.createTextNode("10") emc.appendChild(ecomserverport) ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") ecomusernametext = doc.createTextNode("user") emc.appendChild(ecomusername) ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") ecompasswordtext = doc.createTextNode("pass") emc.appendChild(ecompassword) ecompassword.appendChild(ecompasswordtext) timeout = doc.createElement("Timeout") timeouttext = doc.createTextNode("0") emc.appendChild(timeout) timeout.appendChild(timeouttext) portgroup = doc.createElement("PortGroup") portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pool = doc.createElement("Pool") pooltext = doc.createTextNode("gold") emc.appendChild(pool) pool.appendChild(pooltext) portgroups = doc.createElement("PortGroups") portgroups.appendChild(portgroup) emc.appendChild(portgroups) filename = 'cinder_emc_config_ISCSIFAST.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): conn = FakeEcomConnection() return conn def fake_do_iscsi_discovery(self, volume): output = [] item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' output.append(item) return output def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_capacities_associated_to_policy', return_value=(1234, 1200)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_tier_policy_by_name', return_value=None) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', return_value=True) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storageSystem', return_value=None) def test_get_volume_stats_fast(self, mock_storage_system, mock_is_fast_enabled, mock_get_policy, mock_capacity): self.driver.get_volume_stats(True) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_volume_fast_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype: stripedmetacount': '4', 'volume_backend_name': 'ISCSIFAST'}) def test_create_volume_fast_striped_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_volume_in_CG_fast_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_CG) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_volume_fast_success( self, _mock_volume_type, mock_storage_group): self.driver.delete_volume(self.data.test_volume) def test_create_volume_fast_failed(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_failed_volume) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_volume_fast_notfound( self, _mock_volume_type, mock_wrapper): notfound_delete_vol = {} notfound_delete_vol['name'] = 'notfound_delete_vol' notfound_delete_vol['id'] = '10' notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' notfound_delete_vol['SystemName'] = self.data.storage_system notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' notfound_delete_vol['host'] = self.data.fake_host name = {} name['classname'] = 'Symm_StorageVolume' keys = {} keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] keys['SystemName'] = notfound_delete_vol['SystemName'] keys['DeviceID'] = notfound_delete_vol['DeviceID'] keys['SystemCreationClassName'] = ( notfound_delete_vol['SystemCreationClassName']) name['keybindings'] = keys notfound_delete_vol['volume_type_id'] = 'abc' notfound_delete_vol['provider_location'] = None self.driver.delete_volume(notfound_delete_vol) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_volume_fast_failed( self, _mock_volume_type, _mock_storage_group, mock_storage_system, mock_policy_pool): self.driver.create_volume(self.data.failed_delete_vol) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.data.failed_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=True) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'hostlunid': 1, 'storagesystem': EMCVMAXCommonData.storage_system}) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_already_mapped_fast_success( self, _mock_volume_type, mock_wrap_group, mock_wrap_device, mock_is_same_host): self.driver.common._get_correct_port_group = mock.Mock( return_value=self.data.port_group) self.driver.initialize_connection(self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'storagesystem': EMCVMAXCommonData.storage_system}) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device): self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_initiator_group_from_masking_view', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='myInitGroup') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storage_masking_group', return_value=EMCVMAXCommonData.storagegroupname) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_detach_fast_success( self, mock_volume_type, mock_storage_group, mock_ig, mock_igc): self.driver.terminate_connection( self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_size', return_value='2147483648') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_extend_volume_fast_success( self, _mock_volume_type, mock_volume_size): newSize = '2' self.driver.extend_volume(self.data.test_volume, newSize) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'check_if_volume_is_extendable', return_value='False') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_extend_volume_striped_fast_failed( self, _mock_volume_type, _mock_is_extendable): newSize = '2' self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.data.test_volume, newSize) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_snapshot_different_sizes_meta_fast_success( self, mock_volume_type, mock_volume, mock_meta, mock_size, mock_pool, mock_policy): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} common.provision.create_volume_from_pool = ( mock.Mock(return_value=(volumeDict, 0))) common.provision.get_volume_dict_from_job = ( mock.Mock(return_value=volumeDict)) common.fast.is_volume_in_default_SG = ( mock.Mock(return_value=True)) self.driver.create_snapshot(self.data.test_volume) def test_create_snapshot_fast_failed(self): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_volume_from_same_size_meta_snapshot( self, mock_volume_type, mock_sync_sv, mock_meta, mock_size): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common common.fast.is_volume_in_default_SG = mock.Mock(return_value=True) self.driver.create_volume_from_snapshot( self.data.test_volume, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_replication_service', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_create_volume_from_snapshot_fast_failed( self, mock_volume_type, mock_rep_service, mock_sync_sv): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.data.test_volume, EMCVMAXCommonData.test_source_volume) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_clone_fast_failed( self, mock_volume_type, mock_vol, mock_policy, mock_meta, mock_size, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.driver.common._modify_and_get_composite_volume_instance = ( mock.Mock(return_value=(1, None))) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.data.test_volume, EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_migrate_volume_fast_success(self, _mock_volume_type): self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, self.data.test_host) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'parse_pool_instance_id', return_value=('silver', 'SYMMETRIX+000195900551')) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_retype_volume_fast_success( self, _mock_volume_type, mock_values, mock_wrap): self.driver.retype( self.data.test_ctxt, self.data.test_volume, self.data.new_type, self.data.diff, self.data.test_host) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_CG_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_CG_no_volumes_fast_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_CG_with_volumes_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value="") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=()) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_create_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, _mock_rg): self.driver.create_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_delete_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage): self.driver.delete_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_update_CG_add_volume_fast_success( self, _mock_volume_type, _mock_storage_system): add_volumes = [] add_volumes.append(self.data.test_source_volume) remove_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes add_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'ISCSIFAST'}) def test_update_CG_remove_volume_fast_success( self, _mock_volume_type, _mock_storage_system): remove_volumes = [] remove_volumes.append(self.data.test_source_volume) add_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes remove_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCVMAXFCDriverNoFastTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.tempdir = tempfile.mkdtemp() super(EMCVMAXFCDriverNoFastTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_no_fast() self.addCleanup(self._cleanup) configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'FCNoFAST' configuration.config_group = 'FCNoFAST' self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() driver.common.conn = FakeEcomConnection() driver.zonemanager_lookup_service = FakeLookupService() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def create_fake_config_file_no_fast(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) ecomserverip = doc.createElement("EcomServerIp") ecomserveriptext = doc.createTextNode("1.1.1.1") emc.appendChild(ecomserverip) ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") ecomserverporttext = doc.createTextNode("10") emc.appendChild(ecomserverport) ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") ecomusernametext = doc.createTextNode("user") emc.appendChild(ecomusername) ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") ecompasswordtext = doc.createTextNode("pass") emc.appendChild(ecompassword) ecompassword.appendChild(ecompasswordtext) portgroup = doc.createElement("PortGroup") portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) portgroups = doc.createElement("PortGroups") portgroups.appendChild(portgroup) emc.appendChild(portgroups) pool = doc.createElement("Pool") pooltext = doc.createTextNode("gold") emc.appendChild(pool) pool.appendChild(pooltext) timeout = doc.createElement("Timeout") timeouttext = doc.createTextNode("0") emc.appendChild(timeout) timeout.appendChild(timeouttext) filename = 'cinder_emc_config_FCNoFAST.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): conn = FakeEcomConnection() return conn def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_pool_capacities', return_value=(1234, 1200)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', return_value=False) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storageSystem', return_value=None) def test_get_volume_stats_no_fast(self, mock_storage_system, mock_is_fast_enabled, mock_capacity): self.driver.get_volume_stats(True) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_create_volume_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype: stripedmetacount': '4', 'volume_backend_name': 'FCNoFAST'}) def test_create_volume_no_fast_striped_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_create_volume_in_CG_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.test_volume_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_volume_no_fast_success( self, _mock_volume_type, mock_storage_system): self.driver.delete_volume(self.data.test_volume) def test_create_volume_no_fast_failed(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_failed_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_volume_no_fast_notfound(self, _mock_volume_type): notfound_delete_vol = {} notfound_delete_vol['name'] = 'notfound_delete_vol' notfound_delete_vol['id'] = '10' notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' notfound_delete_vol['SystemName'] = self.data.storage_system notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' notfound_delete_vol['host'] = self.data.fake_host name = {} name['classname'] = 'Symm_StorageVolume' keys = {} keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] keys['SystemName'] = notfound_delete_vol['SystemName'] keys['DeviceID'] = notfound_delete_vol['DeviceID'] keys['SystemCreationClassName'] = ( notfound_delete_vol['SystemCreationClassName']) name['keybindings'] = keys notfound_delete_vol['volume_type_id'] = 'abc' notfound_delete_vol['provider_location'] = None self.driver.delete_volume(notfound_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_volume_failed( self, _mock_volume_type, mock_storage_system): self.driver.create_volume(self.data.failed_delete_vol) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.data.failed_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=True) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_from_storage_group', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_map_lookup_service_no_fast_success( self, _mock_volume_type, mock_maskingview, mock_is_same_host): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common common.get_target_wwns_from_masking_view = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) common._get_correct_port_group = mock.Mock( return_value=self.data.port_group) lookup_service = self.driver.zonemanager_lookup_service lookup_service.get_device_mapping_from_network = mock.Mock( return_value=EMCVMAXCommonData.device_map) data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) common.get_target_wwns_from_masking_view.assert_called_once_with( EMCVMAXCommonData.storage_system, self.data.test_volume, EMCVMAXCommonData.connector) lookup_service.get_device_mapping_from_network.assert_called_once_with( EMCVMAXCommonData.connector['wwpns'], EMCVMAXCommonData.target_wwns) # Test the lookup service code path. for init, target in data['data']['initiator_target_map'].items(): self.assertEqual(init, target[0][::-1]) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'Name': "0001"}) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_map_no_fast_failed(self, _mock_volume_type, mock_wrap_device): self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_initiator_group_from_masking_view', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_by_volume', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_detach_no_fast_last_volume_success( self, mock_volume_type, mock_mv, mock_ig, mock_igc): self.driver.terminate_connection(self.data.test_source_volume, self.data.connector) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_size', return_value='2147483648') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_extend_volume_no_fast_success(self, _mock_volume_type, _mock_volume_size): newSize = '2' self.driver.extend_volume(self.data.test_volume, newSize) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'check_if_volume_is_extendable', return_value='False') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_extend_volume_striped_no_fast_failed( self, _mock_volume_type, _mock_is_extendable): newSize = '2' self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.data.test_volume, newSize) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_migrate_volume_no_fast_success(self, _mock_volume_type): self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, self.data.test_host) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'parse_pool_instance_id', return_value=('silver', 'SYMMETRIX+000195900551')) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_retype_volume_no_fast_success( self, _mock_volume_type, mock_values): self.driver.retype( self.data.test_ctxt, self.data.test_volume, self.data.new_type, self.data.diff, self.data.test_host) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_create_CG_no_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_CG_no_volumes_no_fast_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_CG_with_volumes_no_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value="") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=()) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_create_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, _mock_rg): self.driver.create_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCNoFAST'}) def test_delete_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage): self.driver.delete_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) def test_manage_existing_get_size(self): volume = {} metadata = {'key': 'array', 'value': '12345'} volume['volume_metadata'] = [metadata] external_ref = {'source-name': '0123'} utils = self.driver.common.utils gbSize = 2 utils.get_volume_size = mock.Mock( return_value=gbSize * units.Gi) volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", 'DeviceID': "0123", 'SystemName': "12345"} utils.find_volume_by_device_id_on_array = mock.Mock( return_value=volumeInstanceName) size = self.driver.manage_existing_get_size(volume, external_ref) self.assertEqual(gbSize, size) def test_manage_existing_no_fast_success(self): volume = {} metadata = {'key': 'array', 'value': '12345'} poolInstanceName = {} storageSystem = {} poolInstanceName['InstanceID'] = "SATA_GOLD1" storageSystem['InstanceID'] = "SYMMETRIX+00019870000" volume['volume_metadata'] = [metadata] volume['name'] = "test-volume" external_ref = {'source-name': '0123'} utils = self.driver.common.utils gbSize = 2 utils.get_volume_size = mock.Mock( return_value=gbSize * units.Gi) utils.get_associated_replication_from_source_volume = mock.Mock( return_value=None) utils.get_assoc_pool_from_volume = mock.Mock( return_value=(poolInstanceName)) vol = EMC_StorageVolume() vol['CreationClassName'] = 'Symm_StorageVolume' vol['ElementName'] = 'OS-' + volume['name'] vol['DeviceID'] = external_ref['source-name'] vol['SystemName'] = storageSystem['InstanceID'] vol['SystemCreationClassName'] = 'Symm_StorageSystem' vol.path = vol utils.rename_volume = mock.Mock( return_value=vol) common = self.driver.common common._initial_setup = mock.Mock( return_value={'volume_backend_name': 'FCNoFAST', 'storagetype:fastpolicy': None}) common._get_pool_and_storage_system = mock.Mock( return_value=(poolInstanceName, storageSystem)) volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", 'DeviceID': "0123", 'SystemName': "12345"} utils.find_volume_by_device_id_on_array = mock.Mock( return_value=volumeInstanceName) masking = self.driver.common.masking masking.get_masking_view_from_storage_group = mock.Mock( return_value=None) self.driver.manage_existing(volume, external_ref) utils.rename_volume.assert_called_once_with( common.conn, volumeInstanceName, volume['name']) def test_unmanage_no_fast_success(self): keybindings = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900000', 'DeviceID': u'1', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings} volume = {'name': 'vol1', 'size': 1, 'id': '1', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': self.data.fake_host, 'NumberOfBlocks': 100, 'BlockSize': self.data.block_size } common = self.driver.common common._initial_setup = mock.Mock( return_value={'volume_backend_name': 'FCNoFAST', 'storagetype:fastpolicy': None}) utils = self.driver.common.utils utils.rename_volume = mock.Mock(return_value=None) self.driver.unmanage(volume) utils.rename_volume.assert_called_once_with( common.conn, common._find_lun(volume), '1') def test_unmanage_no_fast_failed(self): keybindings = {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000195900000', 'DeviceID': u'999', 'SystemCreationClassName': u'Symm_StorageSystem'} provider_location = {'classname': 'Symm_StorageVolume', 'keybindings': keybindings} volume = {'name': 'NO_SUCH_VOLUME', 'size': 1, 'id': '999', 'device_id': '999', 'provider_auth': None, 'project_id': 'project', 'display_name': 'No such volume', 'display_description': 'volume not on the array', 'volume_type_id': 'abc', 'provider_location': six.text_type(provider_location), 'status': 'available', 'host': self.data.fake_host, 'NumberOfBlocks': 100, 'BlockSize': self.data.block_size } common = self.driver.common common._initial_setup = mock.Mock( return_value={'volume_backend_name': 'FCNoFAST', 'fastpolicy': None}) self.assertRaises(exception.VolumeBackendAPIException, self.driver.unmanage, volume) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCVMAXFCDriverFastTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.tempdir = tempfile.mkdtemp() super(EMCVMAXFCDriverFastTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_fast() self.addCleanup(self._cleanup) configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'FCFAST' configuration.config_group = 'FCFAST' self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() driver.common.conn = FakeEcomConnection() driver.zonemanager_lookup_service = None self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) self.driver.masking = emc_vmax_masking.EMCVMAXMasking('FC') def create_fake_config_file_fast(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) fastPolicy = doc.createElement("FastPolicy") fastPolicyText = doc.createTextNode("GOLD1") emc.appendChild(fastPolicy) fastPolicy.appendChild(fastPolicyText) ecomserverip = doc.createElement("EcomServerIp") ecomserveriptext = doc.createTextNode("1.1.1.1") emc.appendChild(ecomserverip) ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") ecomserverporttext = doc.createTextNode("10") emc.appendChild(ecomserverport) ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") ecomusernametext = doc.createTextNode("user") emc.appendChild(ecomusername) ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") ecompasswordtext = doc.createTextNode("pass") emc.appendChild(ecompassword) ecompassword.appendChild(ecompasswordtext) portgroup = doc.createElement("PortGroup") portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pool = doc.createElement("Pool") pooltext = doc.createTextNode("gold") emc.appendChild(pool) pool.appendChild(pooltext) array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) portgroups = doc.createElement("PortGroups") portgroups.appendChild(portgroup) emc.appendChild(portgroups) timeout = doc.createElement("Timeout") timeouttext = doc.createTextNode("0") emc.appendChild(timeout) timeout.appendChild(timeouttext) filename = 'cinder_emc_config_FCFAST.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): conn = FakeEcomConnection() return conn def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_capacities_associated_to_policy', return_value=(1234, 1200)) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_tier_policy_by_name', return_value=None) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'is_tiering_policy_enabled', return_value=True) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storageSystem', return_value=None) def test_get_volume_stats_fast(self, mock_storage_system, mock_is_fast_enabled, mock_get_policy, mock_capacity): self.driver.get_volume_stats(True) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_volume_fast_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype: stripedmetacount': '4', 'volume_backend_name': 'FCFAST'}) def test_create_volume_fast_striped_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_v2) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_volume_in_CG_fast_success( self, _mock_volume_type, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.test_volume_CG) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_volume_fast_success(self, _mock_volume_type, mock_storage_group): self.driver.delete_volume(self.data.test_volume) def test_create_volume_fast_failed(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_failed_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_volume_fast_notfound(self, _mock_volume_type): """"Test delete volume with volume not found.""" notfound_delete_vol = {} notfound_delete_vol['name'] = 'notfound_delete_vol' notfound_delete_vol['id'] = '10' notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' notfound_delete_vol['SystemName'] = self.data.storage_system notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' notfound_delete_vol['host'] = self.data.fake_host name = {} name['classname'] = 'Symm_StorageVolume' keys = {} keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] keys['SystemName'] = notfound_delete_vol['SystemName'] keys['DeviceID'] = notfound_delete_vol['DeviceID'] keys['SystemCreationClassName'] = ( notfound_delete_vol['SystemCreationClassName']) name['keybindings'] = keys notfound_delete_vol['volume_type_id'] = 'abc' notfound_delete_vol['provider_location'] = None self.driver.delete_volume(notfound_delete_vol) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_volume_fast_failed( self, _mock_volume_type, mock_wrapper, mock_storage_system, mock_pool_policy): self.driver.create_volume(self.data.failed_delete_vol) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.data.failed_delete_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=True) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_from_storage_group', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_map_fast_success(self, _mock_volume_type, mock_maskingview, mock_is_same_host): common = self.driver.common common.get_target_wwns = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) self.driver.common._get_correct_port_group = mock.Mock( return_value=self.data.port_group) data = self.driver.initialize_connection( self.data.test_volume, self.data.connector) # Test the no lookup service, pre-zoned case. common.get_target_wwns.assert_called_once_with( EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector) for init, target in data['data']['initiator_target_map'].items(): self.assertIn(init[::-1], target) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'Name': "0001"}) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_map_fast_failed(self, _mock_volume_type, mock_wrap_device): self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'get_masking_views_by_port_group', return_value=[]) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_initiator_group_from_masking_view', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_by_volume', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_detach_fast_success(self, mock_volume_type, mock_maskingview, mock_ig, mock_igc, mock_mv): common = self.driver.common common.get_target_wwns = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) data = self.driver.terminate_connection(self.data.test_volume, self.data.connector) common.get_target_wwns.assert_called_once_with( EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector) numTargetWwns = len(EMCVMAXCommonData.target_wwns) self.assertEqual(numTargetWwns, len(data['data'])) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_size', return_value='2147483648') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_extend_volume_fast_success(self, _mock_volume_type, _mock_volume_size): newSize = '2' self.driver.extend_volume(self.data.test_volume, newSize) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'check_if_volume_is_extendable', return_value='False') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_extend_volume_striped_fast_failed(self, _mock_volume_type, _mock_is_extendable): newSize = '2' self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.data.test_volume, newSize) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_snapshot_different_sizes_meta_fast_success( self, mock_volume_type, mock_volume, mock_meta, mock_size, mock_pool, mock_policy): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} common.provision.create_volume_from_pool = ( mock.Mock(return_value=(volumeDict, 0))) common.provision.get_volume_dict_from_job = ( mock.Mock(return_value=volumeDict)) common.fast.is_volume_in_default_SG = ( mock.Mock(return_value=True)) self.driver.create_snapshot(self.data.test_volume) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_validate_pool', return_value=('Bogus_Pool')) def test_create_snapshot_fast_failed(self, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_volume_from_same_size_meta_snapshot( self, mock_volume_type, mock_sync_sv, mock_meta, mock_size): self.data.test_volume['volume_name'] = "vmax-1234567" common = self.driver.common common.fast.is_volume_in_default_SG = mock.Mock(return_value=True) self.driver.create_volume_from_snapshot( self.data.test_volume, self.data.test_volume) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_replication_service', return_value=None) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST', 'FASTPOLICY': 'FC_GOLD1'}) def test_create_volume_from_snapshot_fast_failed( self, mock_volume_type, mock_rep_service, mock_sync_sv): self.data.test_volume['volume_name'] = "vmax-1234567" self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.data.test_volume, EMCVMAXCommonData.test_source_volume) def test_create_clone_simple_volume_fast_success(self): extraSpecs = {'storagetype:fastpolicy': 'FC_GOLD1', 'volume_backend_name': 'FCFAST', 'isV3': False} self.driver.common._initial_setup = ( mock.Mock(return_value=extraSpecs)) self.driver.common.extraSpecs = extraSpecs self.driver.utils.is_clone_licensed = ( mock.Mock(return_value=True)) FakeDB.volume_get = ( mock.Mock(return_value=EMCVMAXCommonData.test_source_volume)) self.data.test_volume['volume_name'] = "vmax-1234567" self.driver.common.fast.is_volume_in_default_SG = ( mock.Mock(return_value=True)) self.driver.utils.isArrayV3 = mock.Mock(return_value=False) self.driver.common._find_storage_sync_sv_sv = ( mock.Mock(return_value=(None, None))) self.driver.create_cloned_volume(self.data.test_volume, EMCVMAXCommonData.test_source_volume) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_meta_members_capacity_in_byte', return_value=[1234567, 7654321]) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_meta_head', return_value=[EMCVMAXCommonData.test_volume]) @mock.patch.object( emc_vmax_fast.EMCVMAXFast, 'get_pool_associated_to_policy', return_value=1) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_clone_fast_failed( self, mock_volume_type, mock_vol, mock_policy, mock_meta, mock_size, mock_pool): self.data.test_volume['volume_name'] = "vmax-1234567" self.driver.common._modify_and_get_composite_volume_instance = ( mock.Mock(return_value=(1, None))) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.data.test_volume, EMCVMAXCommonData.test_source_volume) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_migrate_volume_fast_success(self, _mock_volume_type): self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, self.data.test_host) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'parse_pool_instance_id', return_value=('silver', 'SYMMETRIX+000195900551')) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_retype_volume_fast_success( self, _mock_volume_type, mock_values, mock_wrap): self.driver.retype( self.data.test_ctxt, self.data.test_volume, self.data.new_type, self.data.diff, self.data.test_host) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_CG_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_CG_no_volumes_fast_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_CG_with_volumes_fast_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value="") @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=()) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_create_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, _mock_rg): self.driver.create_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'FCFAST'}) def test_delete_snapshot_for_CG_no_fast_success( self, _mock_volume_type, _mock_storage): self.driver.delete_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) # Bug 1385450 def test_create_clone_without_license(self): mockRepServCap = {} mockRepServCap['InstanceID'] = 'SYMMETRIX+1385450' self.driver.utils.find_replication_service_capabilities = ( mock.Mock(return_value=mockRepServCap)) self.driver.utils.is_clone_licensed = ( mock.Mock(return_value=False)) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.data.test_volume, EMCVMAXCommonData.test_source_volume) def test_manage_existing_fast_failed(self): volume = {} metadata = {'key': 'array', 'value': '12345'} poolInstanceName = {} storageSystem = {} poolInstanceName['InstanceID'] = "SATA_GOLD1" storageSystem['InstanceID'] = "SYMMETRIX+00019870000" volume['volume_metadata'] = [metadata] volume['name'] = "test-volume" external_ref = {'source-name': '0123'} common = self.driver.common common._initial_setup = mock.Mock( return_value={'volume_backend_name': 'FCFAST', 'storagetype:fastpolicy': 'GOLD'}) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing, volume, external_ref) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCV3DriverTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.data.storage_system = 'SYMMETRIX-+-000197200056' self.tempdir = tempfile.mkdtemp() super(EMCV3DriverTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_v3() self.addCleanup(self._cleanup) self.set_configuration() def set_configuration(self): configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'V3' configuration.config_group = 'V3' self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver def create_fake_config_file_v3(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) ecomserverip = doc.createElement("EcomServerIp") ecomserveriptext = doc.createTextNode("1.1.1.1") emc.appendChild(ecomserverip) ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") ecomserverporttext = doc.createTextNode("10") emc.appendChild(ecomserverport) ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") ecomusernametext = doc.createTextNode("user") emc.appendChild(ecomusername) ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") ecompasswordtext = doc.createTextNode("pass") emc.appendChild(ecompassword) ecompassword.appendChild(ecompasswordtext) portgroup = doc.createElement("PortGroup") portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pool = doc.createElement("Pool") pooltext = doc.createTextNode("SRP_1") emc.appendChild(pool) pool.appendChild(pooltext) array = doc.createElement("Array") arraytext = doc.createTextNode("1234567891011") emc.appendChild(array) array.appendChild(arraytext) slo = doc.createElement("SLO") slotext = doc.createTextNode("Bronze") emc.appendChild(slo) slo.appendChild(slotext) workload = doc.createElement("Workload") workloadtext = doc.createTextNode("DSS") emc.appendChild(workload) workload.appendChild(workloadtext) portgroups = doc.createElement("PortGroups") portgroups.appendChild(portgroup) emc.appendChild(portgroups) timeout = doc.createElement("Timeout") timeouttext = doc.createTextNode("0") emc.appendChild(timeout) timeout.appendChild(timeouttext) filename = 'cinder_emc_config_V3.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): self.conn = FakeEcomConnection() return self.conn def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return True def default_extraspec(self): return {'storagetype:pool': 'SRP_1', 'volume_backend_name': 'V3_BE', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze', 'storagetype:array': '1234567891011', 'isV3': True, 'portgroupname': 'OS-portgroup-PG'} def default_vol(self): vol = EMC_StorageVolume() vol['name'] = self.data.test_volume['name'] vol['CreationClassName'] = 'Symm_StorageVolume' vol['ElementName'] = self.data.test_volume['id'] vol['DeviceID'] = self.data.test_volume['device_id'] vol['Id'] = self.data.test_volume['id'] vol['SystemName'] = self.data.storage_system vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] vol['BlockSize'] = self.data.test_volume['BlockSize'] # Added vol to vol.path vol['SystemCreationClassName'] = 'Symm_StorageSystem' vol.path = vol vol.path.classname = vol['CreationClassName'] return vol def default_storage_group(self): storagegroup = {} storagegroup['CreationClassName'] = ( self.data.storagegroup_creationclass) storagegroup['ElementName'] = 'no_masking_view' return storagegroup def test_last_vol_in_SG_with_MV(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.common.utils.find_controller_configuration_service( conn, self.data.storage_system)) extraSpecs = self.default_extraspec() storageGroupName = self.data.storagegroupname storageGroupInstanceName = ( self.driver.common.utils.find_storage_masking_group( conn, controllerConfigService, storageGroupName)) vol = self.default_vol() self.driver.common.masking._delete_mv_ig_and_sg = mock.Mock() self.assertTrue(self.driver.common.masking._last_vol_in_SG( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, vol, vol['name'], extraSpecs)) def test_last_vol_in_SG_no_MV(self): conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.common.utils.find_controller_configuration_service( conn, self.data.storage_system)) extraSpecs = self.default_extraspec() self.driver.common.masking.get_masking_view_from_storage_group = ( mock.Mock(return_value=None)) self.driver.common.masking.utils.get_existing_instance = ( mock.Mock(return_value=None)) storagegroup = self.default_storage_group() vol = self.default_vol() self.assertTrue(self.driver.common.masking._last_vol_in_SG( conn, controllerConfigService, storagegroup, storagegroup['ElementName'], vol, vol['name'], extraSpecs)) def test_last_vol_in_SG_no_MV_fail(self): self.driver.common.masking.utils.get_existing_instance = ( mock.Mock(return_value='value')) conn = self.fake_ecom_connection() controllerConfigService = ( self.driver.common.utils.find_controller_configuration_service( conn, self.data.storage_system)) extraSpecs = self.default_extraspec() vol = self.default_vol() storagegroup = self.default_storage_group() storagegroup['ElementName'] = 'no_masking_view' self.assertRaises(exception.VolumeBackendAPIException, self.driver.common.masking._last_vol_in_SG, conn, controllerConfigService, storagegroup, storagegroup['ElementName'], vol, vol['name'], extraSpecs) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_storageSystem', return_value={'Name': EMCVMAXCommonData.storage_system_v3}) def test_get_volume_stats_v3( self, mock_storage_system): self.driver.get_volume_stats(True) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_volume_v3_success( self, _mock_volume_type, mock_storage_system): self.data.test_volume_v3['host'] = self.data.fake_host_v3 self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.data.test_volume_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_volume_v3_no_slo_success( self, _mock_volume_type, mock_storage_system): v3_vol = self.data.test_volume_v3 v3_vol['host'] = 'HostX@Backend#NONE+SRP_1+1234567891011' instid = 'SYMMETRIX-+-000197200056-+-NONE:DSS-+-F-+-0-+-SR-+-SRP_1' storagepoolsetting = ( {'InstanceID': instid, 'CreationClassName': 'CIM_StoragePoolSetting'}) self.driver.common.provisionv3.get_storage_pool_setting = mock.Mock( return_value=storagepoolsetting) extraSpecs = {'storagetype:pool': 'SRP_1', 'volume_backend_name': 'V3_BE', 'storagetype:workload': 'DSS', 'storagetype:slo': 'NONE', 'storagetype:array': '1234567891011', 'isV3': True, 'portgroupname': 'OS-portgroup-PG'} self.driver.common._initial_setup = mock.Mock( return_value=extraSpecs) self.driver.create_volume(v3_vol) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_volume_v3_invalid_slo_failed( self, _mock_volume_type, mock_storage_system): extraSpecs = {'storagetype:pool': 'SRP_1', 'volume_backend_name': 'V3_BE', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bogus', 'storagetype:array': '1234567891011', 'isV3': True, 'portgroupname': 'OS-portgroup-PG'} self.driver.common._initial_setup = mock.Mock( return_value=extraSpecs) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_volume) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_volume_in_CG_v3_success( self, _mock_volume_type, mock_storage_system): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.data.test_volume_CG_v3) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_delete_volume_v3_success(self, _mock_volume_type): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.delete_volume(self.data.test_volume_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume_v3) def test_create_snapshot_v3_success( self, mock_volume_db, mock_type, moke_pool): self.data.test_volume_v3['volume_name'] = "vmax-1234567" self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_snapshot(self.data.test_volume_v3) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume_v3) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_delete_snapshot_v3_success(self, mock_volume_type, mock_db): self.data.test_volume_v3['volume_name'] = "vmax-1234567" self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.delete_snapshot(self.data.test_volume_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume) def test_create_cloned_volume_v3_success( self, mock_volume_db, mock_type, moke_pool): self.data.test_volume_v3['volume_name'] = "vmax-1234567" cloneVol = {} cloneVol['name'] = 'vol1' cloneVol['id'] = '10' cloneVol['CreationClassName'] = 'Symmm_StorageVolume' cloneVol['SystemName'] = self.data.storage_system cloneVol['DeviceID'] = cloneVol['id'] cloneVol['SystemCreationClassName'] = 'Symm_StorageSystem' cloneVol['volume_type_id'] = 'abc' cloneVol['provider_location'] = None cloneVol['NumberOfBlocks'] = 100 cloneVol['BlockSize'] = self.data.block_size cloneVol['host'] = self.data.fake_host_v3 self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_cloned_volume(cloneVol, self.data.test_volume_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_CG_v3_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_volume_CG_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_delete_CG_no_volumes_v3_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_delete_CG_with_volumes_v3_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_migrate_volume_v3_success(self, _mock_volume_type): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, self.data.test_host) @mock.patch.object( emc_vmax_provision_v3.EMCVMAXProvisionV3, '_find_new_storage_group', return_value='Any') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, '_get_fast_settings_from_storage_group', return_value='Gold+DSS_REP') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_retype_volume_v3_success( self, _mock_volume_type, mock_fast_settings, mock_storage_group, mock_found_SG): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.assertTrue(self.driver.retype( self.data.test_ctxt, self.data.test_volume_v3, self.data.new_type, self.data.diff, self.data.test_host_v3)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, '_get_fast_settings_from_storage_group', return_value='Bronze+DSS') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_retype_volume_same_host_failure( self, _mock_volume_type, mock_fast_settings): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.assertFalse(self.driver.retype( self.data.test_ctxt, self.data.test_volume_v3, self.data.new_type, self.data.diff, self.data.test_host_v3)) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_group_sync_rg_by_target', return_value=1) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=()) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_find_consistency_group', return_value=(None, EMCVMAXCommonData.test_CG)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_create_cgsnapshot_v3_success( self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, mock_rg): provisionv3 = self.driver.common.provisionv3 provisionv3.create_group_replica = mock.Mock(return_value=(0, None)) self.driver.create_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) repServ = self.conn.EnumerateInstanceNames("EMC_ReplicationService")[0] provisionv3.create_group_replica.assert_called_once_with( self.conn, repServ, (None, EMCVMAXCommonData.test_CG), (None, EMCVMAXCommonData.test_CG), '12de', EMCVMAXCommonData.extra_specs) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_delete_cgsnapshot_v3_success( self, _mock_volume_type, _mock_storage): self.driver.delete_cgsnapshot( self.data.test_ctxt, self.data.test_CG_snapshot, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system_v3)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_update_CG_add_volume_v3_success( self, _mock_volume_type, _mock_storage_system): add_volumes = [] add_volumes.append(self.data.test_source_volume) remove_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes add_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Can't find CG self.driver.common._find_consistency_group = mock.Mock( return_value=None) self.assertRaises(exception.ConsistencyGroupNotFound, self.driver.update_consistencygroup, self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system_v3)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_update_CG_remove_volume_v3_success( self, _mock_volume_type, _mock_storage_system): remove_volumes = [] remove_volumes.append(self.data.test_source_volume) add_volumes = None self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) # Multiple volumes remove_volumes.append(self.data.test_source_volume) self.driver.update_consistencygroup( self.data.test_ctxt, self.data.test_CG, add_volumes, remove_volumes) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_is_same_host', return_value=True) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_from_storage_group', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_map_v3_success( self, _mock_volume_type, mock_maskingview, mock_is_same_host): common = self.driver.common common.get_target_wwns = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.common._get_correct_port_group = mock.Mock( return_value=self.data.port_group) data = self.driver.initialize_connection( self.data.test_volume_v3, self.data.connector) # Test the no lookup service, pre-zoned case. common.get_target_wwns.assert_called_once_with( EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector) for init, target in data['data']['initiator_target_map'].items(): self.assertIn(init[::-1], target) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'find_device_number', return_value={'Name': "0001"}) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_map_v3_failed(self, _mock_volume_type, mock_wrap_device): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, 'get_masking_views_by_port_group', return_value=[]) @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_initiator_group_from_masking_view', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, '_find_initiator_masking_group', return_value='myInitGroup') @mock.patch.object( emc_vmax_masking.EMCVMAXMasking, 'get_masking_view_from_storage_group', return_value=EMCVMAXCommonData.lunmaskctrl_name) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) def test_detach_v3_success(self, mock_volume_type, mock_maskingview, mock_ig, mock_igc, mock_mv): common = self.driver.common common.get_target_wwns = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) common.masking.utils.find_storage_masking_group = mock.Mock( return_value=self.data.storagegroups[0]) self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) data = self.driver.terminate_connection(self.data.test_volume_v3, self.data.connector) common.get_target_wwns.assert_called_once_with( EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector) numTargetWwns = len(EMCVMAXCommonData.target_wwns) self.assertEqual(numTargetWwns, len(data['data'])) # Bug https://bugs.launchpad.net/cinder/+bug/1440154 @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'V3_BE'}) @mock.patch.object( FakeDB, 'volume_get', return_value=EMCVMAXCommonData.test_source_volume_v3) @mock.patch.object( emc_vmax_provision_v3.EMCVMAXProvisionV3, 'create_element_replica') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'find_sync_sv_by_target', return_value=(None, None)) def test_create_clone_v3_assert_clean_up_target_volume( self, mock_sync, mock_create_replica, mock_volume_db, mock_type, moke_pool): self.data.test_volume['volume_name'] = "vmax-1234567" e = exception.VolumeBackendAPIException('CreateElementReplica Ex') common = self.driver.common volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} common._create_v3_volume = ( mock.Mock(return_value=(0, volumeDict, self.data.storage_system))) conn = self.fake_ecom_connection() storageConfigService = [] storageConfigService = {} storageConfigService['SystemName'] = EMCVMAXCommonData.storage_system storageConfigService['CreationClassName'] = ( self.data.stconf_service_creationclass) common._delete_from_pool_v3 = mock.Mock(return_value=0) mock_create_replica.side_effect = e self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.data.test_volume_v3, EMCVMAXCommonData.test_source_volume_v3) extraSpecs = common._initial_setup(self.data.test_volume_v3) targetInstance = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) deviceID = targetInstance['DeviceID'] common._delete_from_pool_v3.assert_called_with(storageConfigService, targetInstance, targetInstance['Name'], deviceID, extraSpecs) def test_get_remaining_slo_capacity_wlp(self): conn = self.fake_ecom_connection() array_info = {'Workload': u'DSS', 'SLO': u'Bronze'} storagesystem = self.data.storage_system_v3 srpPoolInstanceName = {} srpPoolInstanceName['InstanceID'] = ( self.data.storage_system_v3 + '+U+' + 'SRP_1') srpPoolInstanceName['CreationClassName'] = ( 'Symm_VirtualProvisioningPool') srpPoolInstanceName['ElementName'] = 'SRP_1' remainingCapacityGb = ( self.driver.common.provisionv3._get_remaining_slo_capacity_wlp( conn, srpPoolInstanceName, array_info, storagesystem)) remainingSLOCapacityGb = self.driver.common.utils.convert_bits_to_gbs( self.data.remainingSLOCapacity) self.assertEqual(remainingSLOCapacityGb, remainingCapacityGb) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'get_volume_size', return_value='2147483648') def test_extend_volume(self, mock_volume_size): newSize = '2' self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.extend_volume(self.data.test_volume_v3, newSize) def test_extend_volume_smaller_size_exception(self): test_local_volume = {'name': 'vol1', 'size': 4, 'volume_name': 'vol1', 'id': 'vol1', 'device_id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': 'abc', 'provider_location': six.text_type( self.data.provider_location), 'status': 'available', 'host': self.data.fake_host_v3, 'NumberOfBlocks': 100, 'BlockSize': self.data.block_size } newSize = '2' self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.assertRaises( exception.VolumeBackendAPIException, self.driver.extend_volume, test_local_volume, newSize) def test_extend_volume_exception(self): common = self.driver.common newsize = '2' common._initial_setup = mock.Mock(return_value=None) common._find_lun = mock.Mock(return_value=None) self.assertRaises( exception.VolumeBackendAPIException, common.extend_volume, self.data.test_volume, newsize) def test_extend_volume_size_tally_exception(self): common = self.driver.common newsize = '2' self.driver.common._initial_setup = mock.Mock( return_value=self.data.extra_specs) vol = {'SystemName': self.data.storage_system} common._find_lun = mock.Mock(return_value=vol) common._extend_v3_volume = mock.Mock(return_value=(0, vol)) common.utils.find_volume_instance = mock.Mock( return_value='2147483648') common.utils.get_volume_size = mock.Mock(return_value='2147483646') self.assertRaises( exception.VolumeBackendAPIException, common.extend_volume, self.data.test_volume, newsize) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCV2MultiPoolDriverTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.vol_v2 = self.data.test_volume_v2 self.vol_v2['provider_location'] = ( six.text_type(self.data.provider_location_multi_pool)) self.tempdir = tempfile.mkdtemp() super(EMCV2MultiPoolDriverTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_multi_pool() self.addCleanup(self._cleanup) configuration = mock.Mock() configuration.safe_get.return_value = 'MULTI_POOL' configuration.cinder_emc_config_file = self.config_file_path configuration.config_group = 'MULTI_POOL' self.stubs.Set(emc_vmax_iscsi.EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery', self.fake_do_iscsi_discovery) self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def create_fake_config_file_multi_pool(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) eComServers = doc.createElement("EcomServers") emc.appendChild(eComServers) eComServer = doc.createElement("EcomServer") eComServers.appendChild(eComServer) ecomserverip = doc.createElement("EcomServerIp") eComServer.appendChild(ecomserverip) ecomserveriptext = doc.createTextNode("1.1.1.1") ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") eComServer.appendChild(ecomserverport) ecomserverporttext = doc.createTextNode("10") ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") eComServer.appendChild(ecomusername) ecomusernametext = doc.createTextNode("user") ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") eComServer.appendChild(ecompassword) ecompasswordtext = doc.createTextNode("pass") ecompassword.appendChild(ecompasswordtext) arrays = doc.createElement("Arrays") eComServer.appendChild(arrays) array = doc.createElement("Array") arrays.appendChild(array) serialNo = doc.createElement("SerialNumber") array.appendChild(serialNo) serialNoText = doc.createTextNode("1234567891011") serialNo.appendChild(serialNoText) portgroups = doc.createElement("PortGroups") array.appendChild(portgroups) portgroup = doc.createElement("PortGroup") portgroups.appendChild(portgroup) portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pools = doc.createElement("Pools") array.appendChild(pools) pool = doc.createElement("Pool") pools.appendChild(pool) poolName = doc.createElement("PoolName") pool.appendChild(poolName) poolNameText = doc.createTextNode("gold") poolName.appendChild(poolNameText) pool2 = doc.createElement("Pool") pools.appendChild(pool2) pool2Name = doc.createElement("PoolName") pool2.appendChild(pool2Name) pool2NameText = doc.createTextNode("SATA_BRONZE1") pool2Name.appendChild(pool2NameText) pool2FastPolicy = doc.createElement("FastPolicy") pool2.appendChild(pool2FastPolicy) pool2FastPolicyText = doc.createTextNode("BRONZE1") pool2FastPolicy.appendChild(pool2FastPolicyText) filename = 'cinder_emc_config_V2_MULTI_POOL.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): self.conn = FakeEcomConnection() return self.conn def fake_do_iscsi_discovery(self, volume): output = [] item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80' output.append(item) return output def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False def default_extraspec(self): return {'storagetype:pool': u'gold', 'volume_backend_name': 'MULTI_POOL_BE', 'storagetype:fastpolicy': None, 'storagetype:compositetype': u'concatenated', 'storagetype:membercount': 1, 'storagetype:array': u'1234567891011', 'isV3': False, 'portgroupname': u'OS-portgroup-PG'} def test_validate_pool(self): v2_valid_pool = self.data.test_volume_v2.copy() # Pool aware scheduler enabled v2_valid_pool['host'] = self.data.fake_host pool = self.driver.common._validate_pool(v2_valid_pool) self.assertEqual('gold+1234567891011', pool) # Cannot get the pool from the host v2_valid_pool['host'] = 'HostX@Backend' self.assertRaises(exception.VolumeBackendAPIException, self.driver.common._validate_pool, v2_valid_pool) # Legacy test. Provider Location does not have the version v2_valid_pool['host'] = self.data.fake_host v2_valid_pool['provider_location'] = self.data.provider_location pool = self.driver.common._validate_pool(v2_valid_pool) self.assertIsNone(pool) def test_array_info_multi_pool(self): arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) self.assertTrue(len(arrayInfo) == 2) for arrayInfoRec in arrayInfo: self.assertEqual( '1234567891011', arrayInfoRec['SerialNumber']) self.assertTrue( self.data.port_group in arrayInfoRec['PortGroup']) self.assertTrue( self.data.poolname in arrayInfoRec['PoolName'] or 'SATA_BRONZE1' in arrayInfoRec['PoolName']) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_create_volume_multi_pool_success( self, _mock_volume_type, mock_storage_system): self.vol_v2['provider_location'] = None self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.vol_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_delete_volume_multi_pool_success( self, _mock_volume_type, mock_storage_system): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.delete_volume(self.vol_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_create_volume_in_CG_multi_pool_success( self, _mock_volume_type, mock_storage_system): self.data.test_volume_CG['provider_location'] = None self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.data.test_volume_CG) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_retype_volume_multi_pool_success( self, _mock_volume_type): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.retype( self.data.test_ctxt, self.vol_v2, self.data.new_type, self.data.diff, self.data.test_host) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) # There is only one unique array in the conf file def test_create_CG_multi_pool_success( self, _mock_volume_type, _mock_storage_system): self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_delete_CG_no_volumes_multi_pool_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_POOL_BE'}) def test_delete_CG_with_volumes_multi_pool_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCV3MultiSloDriverTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.vol_v3 = self.data.test_volume_v3 self.vol_v3['provider_location'] = ( six.text_type(self.data.provider_location_multi_pool)) self.tempdir = tempfile.mkdtemp() super(EMCV3MultiSloDriverTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_multi_slo_v3() self.addCleanup(self._cleanup) self.set_configuration() def set_configuration(self): configuration = mock.Mock() configuration.safe_get.return_value = 'MULTI_SLO_V3' configuration.cinder_emc_config_file = self.config_file_path configuration.config_group = 'MULTI_SLO_V3' self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def create_fake_config_file_multi_slo_v3(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) eComServers = doc.createElement("EcomServers") emc.appendChild(eComServers) eComServer = doc.createElement("EcomServer") eComServers.appendChild(eComServer) ecomserverip = doc.createElement("EcomServerIp") eComServer.appendChild(ecomserverip) ecomserveriptext = doc.createTextNode("1.1.1.1") ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") eComServer.appendChild(ecomserverport) ecomserverporttext = doc.createTextNode("10") ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") eComServer.appendChild(ecomusername) ecomusernametext = doc.createTextNode("user") ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") eComServer.appendChild(ecompassword) ecompasswordtext = doc.createTextNode("pass") ecompassword.appendChild(ecompasswordtext) arrays = doc.createElement("Arrays") eComServer.appendChild(arrays) array = doc.createElement("Array") arrays.appendChild(array) serialNo = doc.createElement("SerialNumber") array.appendChild(serialNo) serialNoText = doc.createTextNode("1234567891011") serialNo.appendChild(serialNoText) portgroups = doc.createElement("PortGroups") array.appendChild(portgroups) portgroup = doc.createElement("PortGroup") portgroups.appendChild(portgroup) portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) vpools = doc.createElement("Pools") array.appendChild(vpools) vpool = doc.createElement("Pool") vpools.appendChild(vpool) poolName = doc.createElement("PoolName") vpool.appendChild(poolName) poolNameText = doc.createTextNode("SRP_1") poolName.appendChild(poolNameText) poolslo = doc.createElement("SLO") vpool.appendChild(poolslo) poolsloText = doc.createTextNode("Bronze") poolslo.appendChild(poolsloText) poolworkload = doc.createElement("Workload") vpool.appendChild(poolworkload) poolworkloadText = doc.createTextNode("DSS") poolworkload.appendChild(poolworkloadText) vpool2 = doc.createElement("Pool") vpools.appendChild(vpool2) pool2Name = doc.createElement("PoolName") vpool2.appendChild(pool2Name) pool2NameText = doc.createTextNode("SRP_1") pool2Name.appendChild(pool2NameText) pool2slo = doc.createElement("SLO") vpool2.appendChild(pool2slo) pool2sloText = doc.createTextNode("Silver") pool2slo.appendChild(pool2sloText) pool2workload = doc.createElement("Workload") vpool.appendChild(pool2workload) pool2workloadText = doc.createTextNode("OLTP") pool2workload.appendChild(pool2workloadText) filename = 'cinder_emc_config_MULTI_SLO_V3.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): self.conn = FakeEcomConnection() return self.conn def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return True def default_extraspec(self): return {'storagetype:pool': u'SRP_1', 'volume_backend_name': 'MULTI_SLO_BE', 'storagetype:workload': u'DSS', 'storagetype:slo': u'Bronze', 'storagetype:array': u'1234567891011', 'isV3': True, 'portgroupname': u'OS-portgroup-PG'} def test_validate_pool(self): v3_valid_pool = self.data.test_volume_v3.copy() # Pool aware scheduler enabled v3_valid_pool['host'] = self.data.fake_host_v3 pool = self.driver.common._validate_pool(v3_valid_pool) self.assertEqual('Bronze+SRP_1+1234567891011', pool) # Cannot get the pool from the host v3_valid_pool['host'] = 'HostX@Backend' self.assertRaises(exception.VolumeBackendAPIException, self.driver.common._validate_pool, v3_valid_pool) # Legacy test. Provider Location does not have the version v3_valid_pool['host'] = self.data.fake_host_v3 v3_valid_pool['provider_location'] = self.data.provider_location pool = self.driver.common._validate_pool(v3_valid_pool) self.assertIsNone(pool) def test_array_info_multi_slo(self): arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) self.assertTrue(len(arrayInfo) == 2) for arrayInfoRec in arrayInfo: self.assertEqual( '1234567891011', arrayInfoRec['SerialNumber']) self.assertTrue( self.data.port_group in arrayInfoRec['PortGroup']) self.assertTrue('SRP_1' in arrayInfoRec['PoolName']) self.assertTrue( 'Bronze' in arrayInfoRec['SLO'] or 'Silver' in arrayInfoRec['SLO']) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_create_volume_multi_slo_success( self, _mock_volume_type, mock_storage_system): self.vol_v3['host'] = self.data.fake_host_v3 self.vol_v3['provider_location'] = None self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.vol_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_delete_volume_multi_slo_success( self, _mock_volume_type, mock_storage_system): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.delete_volume(self.vol_v3) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_create_volume_in_CG_multi_slo_success( self, _mock_volume_type, mock_storage_system): self.data.test_volume_CG_v3['provider_location'] = None self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_volume(self.data.test_volume_CG_v3) @mock.patch.object( emc_vmax_provision_v3.EMCVMAXProvisionV3, '_find_new_storage_group', return_value='Any') @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, 'wrap_get_storage_group_from_volume', return_value=None) @mock.patch.object( emc_vmax_utils.EMCVMAXUtils, '_get_fast_settings_from_storage_group', return_value='Gold+DSS_REP') @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_retype_volume_multi_slo_success( self, _mock_volume_type, mock_fast_settings, mock_storage_group, mock_found_SG): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.assertTrue(self.driver.retype( self.data.test_ctxt, self.data.test_volume_v3, self.data.new_type, self.data.diff, self.data.test_host_v3)) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) # There is only one unique array in the conf file def test_create_CG_multi_slo_success( self, _mock_volume_type, _mock_storage_system): self.driver.common._initial_setup = mock.Mock( return_value=self.default_extraspec()) self.driver.create_consistencygroup( self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_delete_CG_no_volumes_multi_slo_success( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_SLO_BE'}) def test_delete_CG_with_volumes_multi_slo_success( self, _mock_volume_type, _mock_storage_system): self.driver.delete_consistencygroup( self.data.test_ctxt, self.data.test_CG, []) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCV2MultiPoolDriverMultipleEcomsTestCase(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() self.vol_v2 = self.data.test_volume_v2 self.vol_v2['provider_location'] = ( six.text_type(self.data.provider_location_multi_pool)) self.tempdir = tempfile.mkdtemp() super(EMCV2MultiPoolDriverMultipleEcomsTestCase, self).setUp() self.config_file_path = None self.create_fake_config_file_multi_ecom() self.addCleanup(self._cleanup) configuration = mock.Mock() configuration.cinder_emc_config_file = self.config_file_path configuration.safe_get.return_value = 'MULTI_ECOM' configuration.config_group = 'MULTI_ECOM' self.stubs.Set(emc_vmax_common.EMCVMAXCommon, '_get_ecom_connection', self.fake_ecom_connection) instancename = FakeCIMInstanceName() self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'get_instance_name', instancename.fake_getinstancename) self.stubs.Set(time, 'sleep', self.fake_sleep) self.stubs.Set(emc_vmax_utils.EMCVMAXUtils, 'isArrayV3', self.fake_is_v3) driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() driver.common.conn = FakeEcomConnection() driver.zonemanager_lookup_service = FakeLookupService() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def create_fake_config_file_multi_ecom(self): doc = minidom.Document() emc = doc.createElement("EMC") doc.appendChild(emc) eComServers = doc.createElement("EcomServers") emc.appendChild(eComServers) eComServer = doc.createElement("EcomServer") eComServers.appendChild(eComServer) ecomserverip = doc.createElement("EcomServerIp") eComServer.appendChild(ecomserverip) ecomserveriptext = doc.createTextNode("1.1.1.1") ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") eComServer.appendChild(ecomserverport) ecomserverporttext = doc.createTextNode("10") ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") eComServer.appendChild(ecomusername) ecomusernametext = doc.createTextNode("user") ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") eComServer.appendChild(ecompassword) ecompasswordtext = doc.createTextNode("pass") ecompassword.appendChild(ecompasswordtext) arrays = doc.createElement("Arrays") eComServer.appendChild(arrays) array = doc.createElement("Array") arrays.appendChild(array) serialNo = doc.createElement("SerialNumber") array.appendChild(serialNo) serialNoText = doc.createTextNode("1110987654321") serialNo.appendChild(serialNoText) portgroups = doc.createElement("PortGroups") array.appendChild(portgroups) portgroup = doc.createElement("PortGroup") portgroups.appendChild(portgroup) portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pools = doc.createElement("Pools") array.appendChild(pools) pool = doc.createElement("Pool") pools.appendChild(pool) poolName = doc.createElement("PoolName") pool.appendChild(poolName) poolNameText = doc.createTextNode("gold") poolName.appendChild(poolNameText) pool2 = doc.createElement("Pool") pools.appendChild(pool2) pool2Name = doc.createElement("PoolName") pool2.appendChild(pool2Name) pool2NameText = doc.createTextNode("SATA_BRONZE1") pool2Name.appendChild(pool2NameText) pool2FastPolicy = doc.createElement("FastPolicy") pool2.appendChild(pool2FastPolicy) pool2FastPolicyText = doc.createTextNode("BRONZE1") pool2FastPolicy.appendChild(pool2FastPolicyText) eComServer = doc.createElement("EcomServer") eComServers.appendChild(eComServer) ecomserverip = doc.createElement("EcomServerIp") eComServer.appendChild(ecomserverip) ecomserveriptext = doc.createTextNode("1.1.1.1") ecomserverip.appendChild(ecomserveriptext) ecomserverport = doc.createElement("EcomServerPort") eComServer.appendChild(ecomserverport) ecomserverporttext = doc.createTextNode("10") ecomserverport.appendChild(ecomserverporttext) ecomusername = doc.createElement("EcomUserName") eComServer.appendChild(ecomusername) ecomusernametext = doc.createTextNode("user") ecomusername.appendChild(ecomusernametext) ecompassword = doc.createElement("EcomPassword") eComServer.appendChild(ecompassword) ecompasswordtext = doc.createTextNode("pass") ecompassword.appendChild(ecompasswordtext) arrays = doc.createElement("Arrays") eComServer.appendChild(arrays) array = doc.createElement("Array") arrays.appendChild(array) serialNo = doc.createElement("SerialNumber") array.appendChild(serialNo) serialNoText = doc.createTextNode("1234567891011") serialNo.appendChild(serialNoText) portgroups = doc.createElement("PortGroups") array.appendChild(portgroups) portgroup = doc.createElement("PortGroup") portgroups.appendChild(portgroup) portgrouptext = doc.createTextNode(self.data.port_group) portgroup.appendChild(portgrouptext) pools = doc.createElement("Pools") array.appendChild(pools) pool = doc.createElement("Pool") pools.appendChild(pool) poolName = doc.createElement("PoolName") pool.appendChild(poolName) poolNameText = doc.createTextNode("gold") poolName.appendChild(poolNameText) pool2 = doc.createElement("Pool") pools.appendChild(pool2) pool2Name = doc.createElement("PoolName") pool2.appendChild(pool2Name) pool2NameText = doc.createTextNode("SATA_BRONZE1") pool2Name.appendChild(pool2NameText) pool2FastPolicy = doc.createElement("FastPolicy") pool2.appendChild(pool2FastPolicy) pool2FastPolicyText = doc.createTextNode("BRONZE1") pool2FastPolicy.appendChild(pool2FastPolicyText) filename = 'cinder_emc_config_V2_MULTI_ECOM.xml' self.config_file_path = self.tempdir + '/' + filename f = open(self.config_file_path, 'w') doc.writexml(f) f.close() def fake_ecom_connection(self): self.conn = FakeEcomConnection() return self.conn def fake_sleep(self, seconds): return def fake_is_v3(self, conn, serialNumber): return False def test_array_info_multi_ecom_no_fast(self): pool = 'gold+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) self.assertTrue(len(arrayInfo) == 4) poolRec = self.driver.utils.extract_record(arrayInfo, pool) self.assertEqual('1234567891011', poolRec['SerialNumber']) self.assertEqual(self.data.port_group, poolRec['PortGroup']) self.assertEqual(self.data.poolname, poolRec['PoolName']) self.assertEqual('user', poolRec['EcomUserName']) self.assertEqual('pass', poolRec['EcomPassword']) self.assertIsNone(poolRec['FastPolicy']) self.assertFalse(poolRec['EcomUseSSL']) def test_array_info_multi_ecom_fast(self): pool = 'SATA_BRONZE1+1234567891011' arrayInfo = self.driver.utils.parse_file_to_get_array_map( self.config_file_path) self.assertTrue(len(arrayInfo) == 4) poolRec = self.driver.utils.extract_record(arrayInfo, pool) self.assertEqual('1234567891011', poolRec['SerialNumber']) self.assertEqual(self.data.port_group, poolRec['PortGroup']) self.assertEqual('SATA_BRONZE1', poolRec['PoolName']) self.assertEqual('user', poolRec['EcomUserName']) self.assertEqual('pass', poolRec['EcomPassword']) self.assertEqual('BRONZE1', poolRec['FastPolicy']) self.assertFalse(poolRec['EcomUseSSL']) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_ECOM_BE'}) def test_create_volume_multi_ecom_success( self, _mock_volume_type, mock_storage_system): self.vol_v2['provider_location'] = None self.driver.create_volume(self.vol_v2) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_ECOM_BE'}) # If there are more than one unique arrays in conf file def test_create_CG_multi_array_failure( self, _mock_volume_type, _mock_storage_system): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_consistencygroup, self.data.test_ctxt, self.data.test_CG) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_members_of_replication_group', return_value=None) @mock.patch.object( FakeDB, 'volume_get_all_by_group', return_value=None) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_ECOM_BE'}) # There is more than one unique arrays in the conf file def test_delete_CG_no_volumes_multi_array_failure( self, _mock_volume_type, _mock_storage_system, _mock_db_volumes, _mock_members): self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_consistencygroup, self.data.test_ctxt, self.data.test_CG, []) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'volume_backend_name': 'MULTI_ECOM_BE'}) def test_create_volume_in_CG_multi_ecom_success( self, _mock_volume_type, mock_storage_system): self.data.test_volume_CG['provider_location'] = None self.driver.create_volume(self.data.test_volume_CG) def _cleanup(self): bExists = os.path.exists(self.config_file_path) if bExists: os.remove(self.config_file_path) shutil.rmtree(self.tempdir) class EMCVMAXProvisionV3Test(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() super(EMCVMAXProvisionV3Test, self).setUp() configuration = mock.Mock() configuration.safe_get.return_value = 'ProvisionV3Tests' configuration.config_group = 'ProvisionV3Tests' emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock() driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver def test_get_storage_pool_setting(self): provisionv3 = self.driver.common.provisionv3 conn = FakeEcomConnection() slo = 'Bronze' workload = 'DSS' poolInstanceName = {} poolInstanceName['InstanceID'] = "SATA_GOLD1" poolInstanceName['CreationClassName'] = ( self.data.storagepool_creationclass) storagePoolCapability = provisionv3.get_storage_pool_capability( conn, poolInstanceName) storagepoolsetting = provisionv3.get_storage_pool_setting( conn, storagePoolCapability, slo, workload) self.assertTrue( 'Bronze:DSS' in storagepoolsetting['InstanceID']) def test_get_storage_pool_setting_exception(self): provisionv3 = self.driver.common.provisionv3 conn = FakeEcomConnection() slo = 'Bronze' workload = 'NONE' poolInstanceName = {} poolInstanceName['InstanceID'] = "SATA_GOLD1" poolInstanceName['CreationClassName'] = ( self.data.storagepool_creationclass) storagePoolCapability = provisionv3.get_storage_pool_capability( conn, poolInstanceName) self.assertRaises(exception.VolumeBackendAPIException, provisionv3.get_storage_pool_setting, conn, storagePoolCapability, slo, workload) def test_extend_volume_in_SG(self): provisionv3 = self.driver.common.provisionv3 conn = FakeEcomConnection() storageConfigService = { 'CreationClassName': 'Symm_ElementCompositionService', 'SystemName': 'SYMMETRIX+000195900551'} theVolumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) inVolumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeSize = 3 extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': True} job = { 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} conn.InvokeMethod = mock.Mock(return_value=(4096, job)) provisionv3.utils.wait_for_job_complete = mock.Mock(return_value=( 0, 'Success')) volumeDict = {'classname': u'Symm_StorageVolume', 'keybindings': EMCVMAXCommonData.keybindings} provisionv3.get_volume_dict_from_job = ( mock.Mock(return_value=volumeDict)) result = provisionv3.extend_volume_in_SG(conn, storageConfigService, theVolumeInstanceName, inVolumeInstanceName, volumeSize, extraSpecs) self.assertEqual( ({'classname': u'Symm_StorageVolume', 'keybindings': { 'CreationClassName': u'Symm_StorageVolume', 'DeviceID': u'1', 'SystemCreationClassName': u'Symm_StorageSystem', 'SystemName': u'SYMMETRIX+000195900551'}}, 0), result) def test_extend_volume_in_SG_with_Exception(self): provisionv3 = self.driver.common.provisionv3 conn = FakeEcomConnection() storageConfigService = { 'CreationClassName': 'Symm_ElementCompositionService', 'SystemName': 'SYMMETRIX+000195900551'} theVolumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) inVolumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeSize = 3 extraSpecs = {'volume_backend_name': 'GOLD_BE', 'isV3': True} job = { 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} conn.InvokeMethod = mock.Mock(return_value=(4096, job)) provisionv3.utils.wait_for_job_complete = mock.Mock(return_value=( 2, 'Failure')) self.assertRaises( exception.VolumeBackendAPIException, provisionv3.extend_volume_in_SG, conn, storageConfigService, theVolumeInstanceName, inVolumeInstanceName, volumeSize, extraSpecs) class EMCVMAXMaskingTest(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() super(EMCVMAXMaskingTest, self).setUp() configuration = mock.Mock() configuration.safe_get.return_value = 'MaskingTests' configuration.config_group = 'MaskingTests' emc_vmax_common.EMCVMAXCommon._get_ecom_connection = mock.Mock( return_value=self.fake_ecom_connection()) emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock( return_value=self.fake_gather_info()) instancename = FakeCIMInstanceName() emc_vmax_utils.EMCVMAXUtils.get_instance_name = ( instancename.fake_getinstancename) driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def fake_ecom_connection(self): conn = FakeEcomConnection() return conn def fake_gather_info(self): return def test_get_v3_default_storage_group_instance_name(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() extraSpecs = self.data.extra_specs masking._get_and_remove_from_storage_group_v3 = mock.Mock() controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) maskingviewdict = self.driver.common._populate_masking_dict( self.data.test_volume, self.data.connector, extraSpecs) result = ( masking._get_v3_default_storagegroup_instancename( conn, maskingviewdict['volumeInstance'], maskingviewdict, controllerConfigService, maskingviewdict['volumeName'])) self.assertEqual('OS-SRP_1-Bronze-DSS-SG', result['ElementName']) def test_get_v3_default_storage_group_instance_name_warning(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() extraSpecs = self.data.extra_specs masking.utils.get_storage_groups_from_volume = mock.Mock( return_value=[]) controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) maskingviewdict = self.driver.common._populate_masking_dict( self.data.test_volume, self.data.connector, extraSpecs) result = ( masking._get_v3_default_storagegroup_instancename( conn, maskingviewdict['volumeInstance'], maskingviewdict, controllerConfigService, maskingviewdict['volumeName'])) self.assertIsNone(result) def test_return_volume_to_default_storage_group_v3(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) volumeName = "V3-Vol" extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) masking.provisionv3.create_storage_group_v3 = mock.Mock( return_value={'Value'}) masking._is_volume_in_storage_group = mock.Mock( return_value=True) masking.return_volume_to_default_storage_group_v3 = mock.Mock() masking._return_back_to_default_sg( conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) masking.return_volume_to_default_storage_group_v3.assert_called_with( conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) def test_return_volume_to_default_storage_group_v3_exception(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) volumeName = "V3-Vol" extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) self.assertRaises( exception.VolumeBackendAPIException, masking.return_volume_to_default_storage_group_v3, conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) def test_add_volume_to_sg_and_verify(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) volumeName = "V3-Vol" storageGroupInstanceName = self.data.storagegroups[0] sgGroupName = self.data.storagegroupname extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) msg = masking._add_volume_to_sg_and_verify( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, extraSpecs) self.assertIsNone(msg) def test_remove_volume_from_sg(self): masking = self.driver.common.masking conn = self.fake_ecom_connection() volumeInstanceName = ( conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) volumeInstance = conn.GetInstance(volumeInstanceName) storageGroupInstanceName = self.data.storagegroups[1] extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} controllerConfigService = ( self.driver.utils.find_controller_configuration_service( conn, self.data.storage_system)) masking._remove_volume_from_sg = mock.Mock() masking._cleanup_deletion_v3( conn, controllerConfigService, volumeInstance, extraSpecs) masking._remove_volume_from_sg.assert_called_with( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, extraSpecs) class EMCVMAXFCTest(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() super(EMCVMAXFCTest, self).setUp() configuration = mock.Mock() configuration.safe_get.return_value = 'FCTests' configuration.config_group = 'FCTests' emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock() driver = emc_vmax_fc.EMCVMAXFCDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver def test_terminate_connection(self): common = self.driver.common common.conn = FakeEcomConnection() common._unmap_lun = mock.Mock() common.get_masking_view_by_volume = mock.Mock( return_value='testMV') common.get_masking_views_by_port_group = mock.Mock( return_value=[]) common.get_target_wwns = mock.Mock( return_value=EMCVMAXCommonData.target_wwns) data = self.driver.terminate_connection(self.data.test_volume_v3, self.data.connector) common.get_target_wwns.assert_called_once_with( EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector) numTargetWwns = len(EMCVMAXCommonData.target_wwns) self.assertEqual(numTargetWwns, len(data['data'])) def test_get_common_masking_views_two_exist(self): common = self.driver.common common.conn = FakeEcomConnection() maskingviews = [{'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV1'}, {'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV2'}] portGroupInstanceName = ( self.driver.common.masking._get_port_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) common.get_masking_views_by_port_group = mock.Mock( return_value=maskingviews) common.get_masking_views_by_initiator_group = mock.Mock( return_value=maskingviews) mvInstances = self.driver._get_common_masking_views( portGroupInstanceName, initiatorGroupInstanceName) self.assertTrue(len(mvInstances) == 2) def test_get_common_masking_views_one_overlap(self): common = self.driver.common common.conn = FakeEcomConnection() maskingviewsPG = [{'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV1'}, {'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV2'}] maskingviewsIG = [{'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV1'}] portGroupInstanceName = ( self.driver.common.masking._get_port_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) common.get_masking_views_by_port_group = mock.Mock( return_value=maskingviewsPG) common.get_masking_views_by_initiator_group = mock.Mock( return_value=maskingviewsIG) mvInstances = self.driver._get_common_masking_views( portGroupInstanceName, initiatorGroupInstanceName) self.assertTrue(len(mvInstances) == 1) def test_get_common_masking_views_no_overlap(self): common = self.driver.common common.conn = FakeEcomConnection() maskingviewsPG = [{'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV2'}] maskingviewsIG = [{'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'MV1'}] portGroupInstanceName = ( self.driver.common.masking._get_port_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) initiatorGroupInstanceName = ( self.driver.common.masking._get_initiator_group_from_masking_view( common.conn, self.data.lunmaskctrl_name, self.data.storage_system)) common.get_masking_views_by_port_group = mock.Mock( return_value=maskingviewsPG) common.get_masking_views_by_initiator_group = mock.Mock( return_value=maskingviewsIG) mvInstances = self.driver._get_common_masking_views( portGroupInstanceName, initiatorGroupInstanceName) self.assertTrue(len(mvInstances) == 0) class EMCVMAXUtilsTest(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() super(EMCVMAXUtilsTest, self).setUp() configuration = mock.Mock() configuration.safe_get.return_value = 'UtilsTests' configuration.config_group = 'UtilsTests' emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock() driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) def test_get_target_endpoints(self): conn = FakeEcomConnection() hardwareid = 123456789012345 result = self.driver.utils.get_target_endpoints(conn, hardwareid) self.assertEqual( ([{'Name': '5000090000000000'}]), result) def test_get_protocol_controller(self): conn = FakeEcomConnection() hardwareid = 123456789012345 result = self.driver.utils.get_protocol_controller(conn, hardwareid) self.assertEqual( ({'CreationClassName': 'Symm_LunMaskingView', 'ElementName': 'OS-fakehost-gold-I-MV'}), result) def test_get_protocol_controller_exception(self): conn = FakeEcomConnection() conn.AssociatorNames = mock.Mock(return_value=[]) hardwareid = 123456789012345 self.assertRaises( exception.VolumeBackendAPIException, self.driver.utils.get_protocol_controller, conn, hardwareid) def test_set_target_element_supplier_in_rsd(self): conn = FakeEcomConnection() extraSpecs = self.data.extra_specs repServiceInstanceName = ( self.driver.utils.find_replication_service( conn, self.data.storage_system)) rsdInstance = self.driver.utils.set_target_element_supplier_in_rsd( conn, repServiceInstanceName, emc_vmax_common.SNAPVX_REPLICATION_TYPE, emc_vmax_common.CREATE_NEW_TARGET, extraSpecs) self.assertIsNotNone(rsdInstance) def test_set_copy_methodology_in_rsd(self): conn = FakeEcomConnection() extraSpecs = self.data.extra_specs repServiceInstanceName = ( self.driver.utils.find_replication_service( conn, self.data.storage_system)) rsdInstance = self.driver.utils.set_copy_methodology_in_rsd( conn, repServiceInstanceName, emc_vmax_provision.SYNC_CLONE_LOCAL, emc_vmax_provision.COPY_ON_WRITE, extraSpecs) self.assertIsNotNone(rsdInstance) class EMCVMAXCommonTest(test.TestCase): def setUp(self): self.data = EMCVMAXCommonData() super(EMCVMAXCommonTest, self).setUp() configuration = mock.Mock() configuration.safe_get.return_value = 'CommonTests' configuration.config_group = 'CommonTests' emc_vmax_common.EMCVMAXCommon._gather_info = mock.Mock() driver = emc_vmax_iscsi.EMCVMAXISCSIDriver(configuration=configuration) driver.db = FakeDB() self.driver = driver self.driver.utils = emc_vmax_utils.EMCVMAXUtils(object) @mock.patch.object( emc_vmax_common.EMCVMAXCommon, '_get_pool_and_storage_system', return_value=(None, EMCVMAXCommonData.storage_system)) def test_create_duplicate_volume(self, mock_pool): common = self.driver.common common.conn = FakeEcomConnection() volumeInstanceName = ( common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) sourceInstance = common.conn.GetInstance(volumeInstanceName) cloneName = "SS-V3-Vol" extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} targetInstance = common.conn.GetInstance(volumeInstanceName) common.utils.find_volume_instance = mock.Mock( return_value=targetInstance) duplicateVolumeInstance = self.driver.common._create_duplicate_volume( sourceInstance, cloneName, extraSpecs) self.assertIsNotNone(duplicateVolumeInstance) def test_cleanup_target(self): common = self.driver.common common.conn = FakeEcomConnection() volumeInstanceName = ( common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) extraSpecs = {'volume_backend_name': 'V3_BE', 'isV3': True, 'storagetype:pool': 'SRP_1', 'storagetype:workload': 'DSS', 'storagetype:slo': 'Bronze'} targetInstance = common.conn.GetInstance(volumeInstanceName) repServiceInstanceName = ( self.driver.utils.find_replication_service( common.conn, self.data.storage_system)) common.utils.find_sync_sv_by_target = mock.Mock( return_value=(None, None)) self.driver.common._cleanup_target( repServiceInstanceName, targetInstance, extraSpecs) cinder-8.0.0/cinder/tests/unit/fake_driver.py0000664000567000056710000003027112701406257022353 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from cinder import exception from cinder.objects import fields from cinder.tests.unit.brick import fake_lvm from cinder.volume import driver from cinder.volume.drivers import lvm from cinder.zonemanager import utils as fczm_utils class FakeISCSIDriver(lvm.LVMVolumeDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default', self.fake_execute) def check_for_setup_error(self): """No setup necessary in fake mode.""" pass def initialize_connection(self, volume, connector): volume_metadata = {} for metadata in volume['volume_admin_metadata']: volume_metadata[metadata['key']] = metadata['value'] access_mode = volume_metadata.get('attached_mode') if access_mode is None: access_mode = ('ro' if volume_metadata.get('readonly') == 'True' else 'rw') return {'driver_volume_type': 'iscsi', 'data': {'access_mode': access_mode}} def terminate_connection(self, volume, connector, **kwargs): pass @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" return (None, None) class FakeISERDriver(FakeISCSIDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeISERDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'iser', 'data': {} } @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" return (None, None) class FakeFibreChannelDriver(driver.FibreChannelDriver): @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} @fczm_utils.AddFCZone def no_zone_initialize_connection(self, volume, connector): """This shouldn't call the ZM.""" return { 'driver_volume_type': 'bogus', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): return { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} @fczm_utils.RemoveFCZone def no_zone_terminate_connection(self, volume, connector, **kwargs): return { 'driver_volume_type': 'bogus', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} class LoggingVolumeDriver(driver.VolumeDriver): """Logs and records calls, for unit tests.""" def check_for_setup_error(self): pass def create_volume(self, volume): self.log_action('create_volume', volume) def delete_volume(self, volume): self.clear_volume(volume) self.log_action('delete_volume', volume) def clear_volume(self, volume): self.log_action('clear_volume', volume) def local_path(self, volume): raise NotImplementedError() def ensure_export(self, context, volume): self.log_action('ensure_export', volume) def create_export(self, context, volume): self.log_action('create_export', volume) def remove_export(self, context, volume): self.log_action('remove_export', volume) def initialize_connection(self, volume, connector): self.log_action('initialize_connection', volume) def terminate_connection(self, volume, connector): self.log_action('terminate_connection', volume) def create_export_snapshot(self, context, snapshot): self.log_action('create_export_snapshot', snapshot) def remove_export_snapshot(self, context, snapshot): self.log_action('remove_export_snapshot', snapshot) def initialize_connection_snapshot(self, snapshot, connector): self.log_action('initialize_connection_snapshot', snapshot) def terminate_connection_snapshot(self, snapshot, connector): self.log_action('terminate_connection_snapshot', snapshot) def create_cloned_volume(self, volume, src_vol): self.log_action('create_cloned_volume', volume) _LOGS = [] @staticmethod def clear_logs(): LoggingVolumeDriver._LOGS = [] @staticmethod def log_action(action, parameters): """Logs the command.""" log_dictionary = {} if parameters: log_dictionary = dict(parameters) log_dictionary['action'] = action LoggingVolumeDriver._LOGS.append(log_dictionary) @staticmethod def all_logs(): return LoggingVolumeDriver._LOGS @staticmethod def logs_like(action, **kwargs): matches = [] for entry in LoggingVolumeDriver._LOGS: if entry['action'] != action: continue match = True for k, v in kwargs.items(): if entry.get(k) != v: match = False break if match: matches.append(entry) return matches class FakeGateDriver(lvm.LVMVolumeDriver): """Class designation for FakeGateDriver. FakeGateDriver is for TESTING ONLY. There are a few driver features such as CG and replication that are not supported by the reference driver LVM currently. Adding those functions in this fake driver will help detect problems when changes are introduced in those functions. Implementation of this driver is NOT meant for production. They are implemented simply to make sure calls to the driver functions are passing in the correct parameters, and the results returned by the driver are handled properly by the manager. """ def __init__(self, *args, **kwargs): super(FakeGateDriver, self).__init__(*args, **kwargs) def _update_volume_stats(self): super(FakeGateDriver, self)._update_volume_stats() self._stats["pools"][0]["consistencygroup_support"] = True self._stats["pools"][0]["replication_enabled"] = True # NOTE(xyang): Consistency Group functions implemented below # are for testing purpose only. Data consistency cannot be # achieved by running these functions. def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" # A consistencygroup entry is already created in db # This driver just returns a status now = timeutils.utcnow() model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE, 'updated_at': now} return model_update def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, soure_cg=None, source_vols=None): """Creates a consistencygroup from cgsnapshot or source cg.""" for vol in volumes: try: if snapshots: for snapshot in snapshots: if vol['snapshot_id'] == snapshot['id']: self.create_volume_from_snapshot(vol, snapshot) break except Exception: raise try: if source_vols: for source_vol in source_vols: if vol['source_volid'] == source_vol['id']: self.create_cloned_volume(vol, source_vol) break except Exception: raise return None, None def delete_consistencygroup(self, context, group, volumes): """Deletes a consistencygroup and volumes in the group.""" model_update = {'status': group.status} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} try: self.remove_export(context, volume_ref) self.delete_volume(volume_ref) volume_model_update['status'] = 'deleted' except exception.VolumeIsBusy: volume_model_update['status'] = 'available' except Exception: volume_model_update['status'] = 'error' model_update['status'] = fields.ConsistencyGroupStatus.ERROR volume_model_updates.append(volume_model_update) return model_update, volume_model_updates def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group.""" return None, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. Snapshots created here are NOT consistent. This is for testing purpose only. """ model_update = {'status': 'available'} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: self.create_snapshot(snapshot) snapshot_model_update['status'] = 'available' except Exception: snapshot_model_update['status'] = 'error' model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" model_update = {'status': cgsnapshot.status} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: self.delete_snapshot(snapshot) snapshot_model_update['status'] = 'deleted' except exception.SnapshotIsBusy: snapshot_model_update['status'] = 'available' except Exception: snapshot_model_update['status'] = 'error' model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates # Replication functions here are not really doing replication. # They are added so that we can do basic sanity check of replication # APIs. def replication_enable(self, context, volume): return def replication_disable(self, context, volume): return def replication_failover(self, context, volume, secondary): return {'model_update': {'status': volume['status']}, 'replication_driver_data': {'replication_driver_data': ''}} def list_replication_targets(self, context, volume): targets = [] remote_target = {'managed_backend_name': None, 'type': 'unmanaged', 'remote_device_id': 'fake_remote_device', 'san_ip': '123.456.78.90'} targets.append(remote_target) return {'volume_id': volume['id'], 'targets': targets} cinder-8.0.0/cinder/tests/unit/glance/0000775000567000056710000000000012701406543020744 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/glance/__init__.py0000664000567000056710000000127012701406250023050 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`glance` -- Stubs for Glance ================================= """ cinder-8.0.0/cinder/tests/unit/glance/stubs.py0000664000567000056710000001072612701406257022466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient.exc NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'checksum', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'status', 'min_disk', 'min_ram', 'is_public'] class StubGlanceClient(object): def __init__(self, images=None): self._images = [] _images = images or [] map(lambda image: self.create(**image), _images) # NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'upload', 'delete'): setattr(self.images, fn, getattr(self, fn)) self.schemas = lambda: None setattr(self.schemas, 'get', getattr(self, 'schemas_get')) # TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30): if marker is None: index = 0 else: for index, image in enumerate(self._images): if image.id == str(marker): index += 1 break else: raise glanceclient.exc.BadRequest('Marker not found') return self._images[index:index + limit] def get(self, image_id): for image in self._images: if image.id == str(image_id): return image raise glanceclient.exc.NotFound(image_id) def data(self, image_id): image = self.get(image_id) if getattr(image, 'size', 0): return ['*' * image.size] else: return [] def create(self, **metadata): metadata['created_at'] = NOW_GLANCE_FORMAT metadata['updated_at'] = NOW_GLANCE_FORMAT self._images.append(FakeImage(metadata)) try: image_id = str(metadata['id']) except KeyError: # auto-generate an id if one wasn't provided image_id = str(len(self._images)) self._images[-1].id = image_id return self._images[-1] def update(self, image_id, **metadata): for i, image in enumerate(self._images): if image.id == str(image_id): for k, v in metadata.items(): if k == 'data': setattr(self._images[i], 'size', len(v)) else: setattr(self._images[i], k, v) return self._images[i] raise glanceclient.exc.NotFound(image_id) def delete(self, image_id): for i, image in enumerate(self._images): if image.id == image_id: del self._images[i] return raise glanceclient.exc.NotFound(image_id) def upload(self, image_id, data): for i, image in enumerate(self._images): if image.id == image_id: setattr(self._images[i], 'size', len(data)) return raise glanceclient.exc.NotFound(image_id) def schemas_get(self, schema_name): if schema_name != 'image': raise glanceclient.exc.NotFound() return FakeSchema() class FakeImage(object): def __init__(self, metadata): raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw def __getattr__(self, key): try: return self.__dict__['raw'][key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): try: self.__dict__['raw'][key] = value except KeyError: raise AttributeError(key) def keys(self): return self.__dict__['raw'].keys() class FakeSchema(object): def is_base_property(self, key): if key in IMAGE_ATTRIBUTES: return True else: return False cinder-8.0.0/cinder/tests/unit/test_vmware_vmdk.py0000664000567000056710000034417212701406250023454 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for VMware vCenter VMDK driver. """ from distutils import version as ver import ddt import mock from oslo_utils import units from oslo_vmware import api from oslo_vmware import exceptions from oslo_vmware import image_transfer import six from cinder import exception as cinder_exceptions from cinder import test from cinder.volume import configuration from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import vmdk from cinder.volume.drivers.vmware import volumeops class FakeObject(object): def __init__(self): self._fields = {} def __setitem__(self, key, value): self._fields[key] = value def __getitem__(self, item): return self._fields[item] # TODO(vbala) Split test methods handling multiple cases into multiple methods, # each handling a specific case. @ddt.ddt class VMwareVcVmdkDriverTestCase(test.TestCase): """Unit tests for VMwareVcVmdkDriver.""" IP = 'localhost' PORT = 443 USERNAME = 'username' PASSWORD = 'password' VOLUME_FOLDER = 'cinder-volumes' API_RETRY_COUNT = 3 TASK_POLL_INTERVAL = 5.0 IMG_TX_TIMEOUT = 10 MAX_OBJECTS = 100 TMP_DIR = "/vmware-tmp" CA_FILE = "/etc/ssl/rui-ca-cert.pem" VMDK_DRIVER = vmdk.VMwareVcVmdkDriver CLUSTERS = ["cls-1", "cls-2"] DEFAULT_VC_VERSION = '5.5' VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab', DISPLAY_NAME = 'foo', VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a' VOL_SIZE = 2 PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc' SNAPSHOT_ID = '2f59670a-0355-4790-834c-563b65bba740' SNAPSHOT_NAME = 'snap-foo' SNAPSHOT_DESCRIPTION = 'test snapshot' IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a' IMAGE_NAME = 'image-1' def setUp(self): super(VMwareVcVmdkDriverTestCase, self).setUp() self._config = mock.Mock(spec=configuration.Configuration) self._config.vmware_host_ip = self.IP self._config.vmware_host_username = self.USERNAME self._config.vmware_host_password = self.PASSWORD self._config.vmware_wsdl_location = None self._config.vmware_volume_folder = self.VOLUME_FOLDER self._config.vmware_api_retry_count = self.API_RETRY_COUNT self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS self._config.vmware_tmp_dir = self.TMP_DIR self._config.vmware_ca_file = self.CA_FILE self._config.vmware_insecure = False self._config.vmware_cluster_name = self.CLUSTERS self._config.vmware_host_version = self.DEFAULT_VC_VERSION self._db = mock.Mock() self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config, db=self._db) api_retry_count = self._config.vmware_api_retry_count task_poll_interval = self._config.vmware_task_poll_interval, self._session = api.VMwareAPISession(self.IP, self.USERNAME, self.PASSWORD, api_retry_count, task_poll_interval, create_session=False) self._volumeops = volumeops.VMwareVolumeOps(self._session, self.MAX_OBJECTS) def test_get_volume_stats(self): stats = self._driver.get_volume_stats() self.assertEqual('VMware', stats['vendor_name']) self.assertEqual(self._driver.VERSION, stats['driver_version']) self.assertEqual('vmdk', stats['storage_protocol']) self.assertEqual(0, stats['reserved_percentage']) self.assertEqual('unknown', stats['total_capacity_gb']) self.assertEqual('unknown', stats['free_capacity_gb']) def _create_volume_dict(self, vol_id=VOL_ID, display_name=DISPLAY_NAME, volume_type_id=VOL_TYPE_ID, status='available', size=VOL_SIZE, attachment=None, project_id=PROJECT_ID): return {'id': vol_id, 'display_name': display_name, 'name': 'volume-%s' % vol_id, 'volume_type_id': volume_type_id, 'status': status, 'size': size, 'volume_attachment': attachment, 'project_id': project_id, } @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') def test_verify_volume_creation(self, select_ds_for_volume): volume = self._create_volume_dict() self._driver._verify_volume_creation(volume) select_ds_for_volume.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_verify_volume_creation') def test_create_volume(self, verify_volume_creation): volume = self._create_volume_dict() self._driver.create_volume(volume) verify_volume_creation.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_volume_without_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() self._driver.delete_volume(volume) vops.get_backing.assert_called_once_with(volume['name']) self.assertFalse(vops.delete_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_volume(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() self._driver.delete_volume(volume) vops.get_backing.assert_called_once_with(volume['name']) vops.delete_backing.assert_called_once_with(backing) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskType.validate') def test_get_extra_spec_disk_type(self, validate, get_volume_type_extra_spec): vmdk_type = mock.sentinel.vmdk_type get_volume_type_extra_spec.return_value = vmdk_type type_id = mock.sentinel.type_id self.assertEqual(vmdk_type, self._driver._get_extra_spec_disk_type(type_id)) get_volume_type_extra_spec.assert_called_once_with( type_id, 'vmdk_type', default_value=vmdk.THIN_VMDK_TYPE) validate.assert_called_once_with(vmdk_type) @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_disk_type') def test_get_disk_type(self, get_extra_spec_disk_type): vmdk_type = mock.sentinel.vmdk_type get_extra_spec_disk_type.return_value = vmdk_type volume = self._create_volume_dict() self.assertEqual(vmdk_type, self._driver._get_disk_type(volume)) get_extra_spec_disk_type.assert_called_once_with( volume['volume_type_id']) def _create_snapshot_dict(self, volume, snap_id=SNAPSHOT_ID, name=SNAPSHOT_NAME, description=SNAPSHOT_DESCRIPTION): return {'id': snap_id, 'volume': volume, 'volume_name': volume['name'], 'name': name, 'display_description': description, } @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_snapshot_without_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) self._driver.create_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name']) self.assertFalse(vops.create_snapshot.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_snapshot_with_backing(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) self._driver.create_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name']) vops.create_snapshot.assert_called_once_with( backing, snapshot['name'], snapshot['display_description']) def test_create_snapshot_when_attached(self): volume = self._create_volume_dict(status='in-use') snapshot = self._create_snapshot_dict(volume) self.assertRaises(cinder_exceptions.InvalidVolume, self._driver.create_snapshot, snapshot) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_snapshot_without_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name']) self.assertFalse(vops.delete_snapshot.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_snapshot_with_backing(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name']) vops.delete_snapshot.assert_called_once_with( backing, snapshot['name']) def test_delete_snapshot_when_attached(self): volume = self._create_volume_dict(status='in-use') snapshot = self._create_snapshot_dict(volume) self.assertRaises(cinder_exceptions.InvalidVolume, self._driver.delete_snapshot, snapshot) @ddt.data('vmdk', 'VMDK', None) def test_validate_disk_format(self, disk_format): self._driver._validate_disk_format(disk_format) def test_validate_disk_format_with_invalid_format(self): self.assertRaises(cinder_exceptions.ImageUnacceptable, self._driver._validate_disk_format, 'img') def _create_image_meta(self, _id=IMAGE_ID, name=IMAGE_NAME, disk_format='vmdk', size=1 * units.Gi, container_format='bare', vmware_disktype='streamOptimized', vmware_adaptertype='lsiLogic', is_public=True): return {'id': _id, 'name': name, 'disk_format': disk_format, 'size': size, 'container_format': container_format, 'properties': {'vmware_disktype': vmware_disktype, 'vmware_adaptertype': vmware_adaptertype, }, 'is_public': is_public, } @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_disk_format') def test_copy_image_to_volume_with_ova_container(self, validate_disk_format): image_service = mock.Mock() image_meta = self._create_image_meta(container_format='ova') image_service.show.return_value = image_meta context = mock.sentinel.context volume = self._create_volume_dict() image_id = mock.sentinel.image_id self.assertRaises( cinder_exceptions.ImageUnacceptable, self._driver.copy_image_to_volume, context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_disk_format') @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskAdapterType.validate') @mock.patch('cinder.volume.drivers.vmware.vmdk.ImageDiskType.' 'validate') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_non_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, '_fetch_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def _test_copy_image_to_volume(self, extend_backing, vops, fetch_stream_optimized_image, create_volume_from_non_stream_opt_image, validate_image_disk_type, validate_image_adapter_type, validate_disk_format, vmware_disk_type='streamOptimized', backing_disk_size=VOL_SIZE, call_extend_backing=False): image_service = mock.Mock() image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type) image_service.show.return_value = image_meta backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.get_disk_size.return_value = backing_disk_size * units.Gi context = mock.sentinel.context volume = self._create_volume_dict() image_id = mock.sentinel.image_id self._driver.copy_image_to_volume( context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) validate_image_disk_type.assert_called_once_with( image_meta['properties']['vmware_disktype']) validate_image_adapter_type.assert_called_once_with( image_meta['properties']['vmware_adaptertype']) if vmware_disk_type == 'streamOptimized': fetch_stream_optimized_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype']) else: create_volume_from_non_stream_opt_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype'], image_meta['properties']['vmware_disktype']) vops.get_disk_size.assert_called_once_with(backing) if call_extend_backing: extend_backing.assert_called_once_with(backing, volume['size']) else: self.assertFalse(extend_backing.called) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type, backing_disk_size=1, call_extend_backing=True) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_check_disk_conversion') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_preallocated_image') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') def _test_create_volume_from_non_stream_optimized_image( self, delete_tmp_backing, select_ds_for_volume, create_disk_from_preallocated_image, create_disk_from_sparse_image, vops, get_ds_name_folder_path, create_backing, generate_uuid, check_disk_conversion, get_disk_type, image_disk_type='sparse', disk_conversion=False): disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type check_disk_conversion.return_value = disk_conversion volume = self._create_volume_dict() if disk_conversion: disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e" generate_uuid.return_value = disk_name else: disk_name = volume['name'] backing = mock.sentinel.backing create_backing.return_value = backing ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_ds_name_folder_path.return_value = (ds_name, folder_path) host = mock.sentinel.host dc_ref = mock.sentinel.dc_ref vops.get_host.return_value = host vops.get_dc.return_value = dc_ref vmdk_path = mock.Mock() create_disk_from_sparse_image.return_value = vmdk_path create_disk_from_preallocated_image.return_value = vmdk_path if disk_conversion: rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) clone = mock.sentinel.clone vops.clone_backing.return_value = clone context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = units.Gi adapter_type = mock.sentinel.adapter_type self._driver._create_volume_from_non_stream_optimized_image( context, volume, image_service, image_id, image_size_in_bytes, adapter_type, image_disk_type) check_disk_conversion.assert_called_once_with(image_disk_type, mock.sentinel.disk_type) if disk_conversion: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True, vmdk.CREATE_PARAM_BACKING_NAME: disk_name}) else: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) if image_disk_type == 'sparse': create_disk_from_sparse_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) else: create_disk_from_preallocated_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name, adapter_type) vops.attach_disk_to_backing.assert_called_once_with( backing, image_size_in_bytes / units.Ki, disk_type, adapter_type, vmdk_path.get_descriptor_ds_file_path()) if disk_conversion: select_ds_for_volume.assert_called_once_with(volume) vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, folder=folder) delete_tmp_backing.assert_called_once_with(backing) vops.update_backing_disk_uuid(clone, volume['id']) else: vops.update_backing_disk_uuid(backing, volume['id']) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_optimized_image(self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_opt_image_with_disk_conversion( self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type, disk_conversion=True) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image( self, vops, copy_image, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = mock.Mock(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = mock.sentinel.dest_dc_ref dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) exp_flat_extent_path_calls = [ mock.call(ds_name, folder_path, uuid), mock.call(dest_ds_name, dest_folder_path, dest_disk_name)] self.assertEqual(exp_flat_extent_path_calls, flat_extent_path.call_args_list) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) copy_temp_virtual_disk.assert_called_once_with(dc_ref, path, dest_dc_ref, dest_path) self.assertEqual(dest_path, ret) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy( self, vops, copy_image, flat_extent_path, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = mock.Mock(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) path = mock.Mock() flat_extent_path.return_value = path context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = mock.Mock(value=mock.sentinel.dc_ref) dest_ds_name = ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) flat_extent_path.assert_called_once_with( dest_ds_name, dest_folder_path, dest_disk_name) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) self.assertFalse(copy_temp_virtual_disk.called) self.assertEqual(path, ret) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_copy_error( self, vops, copy_image, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = mock.Mock(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] copy_image.side_effect = exceptions.VimException("error") context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = mock.sentinel.dest_dc_ref dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type self.assertRaises( exceptions.VimException, self._driver._create_virtual_disk_from_preallocated_image, context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) vops.delete_file.assert_called_once_with( path.get_descriptor_ds_file_path(), dc_ref) self.assertFalse(copy_temp_virtual_disk.called) @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.' 'MonolithicSparseVirtualDiskPath') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_copy_image') def test_create_virtual_disk_from_sparse_image( self, copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path, generate_uuid): uuid = mock.sentinel.uuid generate_uuid.return_value = uuid src_path = mock.Mock() sparse_path.return_value = src_path dest_path = mock.Mock() flat_extent_path.return_value = dest_path context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dc_ref = mock.sentinel.dc_ref ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path disk_name = mock.sentinel.disk_name ret = self._driver._create_virtual_disk_from_sparse_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) sparse_path.assert_called_once_with(ds_name, folder_path, uuid) copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, src_path.get_descriptor_file_path()) flat_extent_path.assert_called_once_with( ds_name, folder_path, disk_name) copy_temp_virtual_disk.assert_called_once_with( dc_ref, src_path, dc_ref, dest_path) self.assertEqual(dest_path, ret) @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_temp_image_folder(self, vops, select_datastore): host = mock.sentinel.host resource_pool = mock.sentinel.rp summary = mock.Mock() ds_name = mock.sentinel.ds_name summary.name = ds_name select_datastore.return_value = (host, resource_pool, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc image_size = 2 * units.Gi ret = self._driver._get_temp_image_folder(image_size) self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH), ret) exp_req = { hub.DatastoreSelector.SIZE_BYTES: image_size, hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE: {hub.DatastoreType.VMFS, hub.DatastoreType.NFS}} select_datastore.assert_called_once_with(exp_req) vops.create_datastore_folder.assert_called_once_with( ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_extra_config') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch.object(image_transfer, 'download_stream_optimized_image') def _test_copy_image_to_volume_stream_optimized(self, download_image, session, vops, get_extra_config, get_disk_type, get_profile_id, select_ds_for_volume, download_error=False): host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder # NOTE(mriedem): The summary.name gets logged so it has to be a string summary = mock.Mock(name=six.text_type(mock.sentinel.ds_name)) select_ds_for_volume.return_value = (host, rp, folder, summary) profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type extra_config = mock.sentinel.extra_config get_extra_config.return_value = extra_config vm_create_spec = mock.sentinel.vm_create_spec vops.get_create_spec.return_value = vm_create_spec import_spec = mock.Mock() session.vim.client.factory.create.return_value = import_spec backing = mock.sentinel.backing if download_error: download_image.side_effect = exceptions.VimException vops.get_backing.return_value = backing else: download_image.return_value = backing context = mock.sentinel.context volume = self._create_volume_dict(size=3) image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size = 2 * units.Gi adapter_type = mock.sentinel.adapter_type if download_error: self.assertRaises( exceptions.VimException, self._driver._fetch_stream_optimized_image, context, volume, image_service, image_id, image_size, adapter_type) else: self._driver._fetch_stream_optimized_image( context, volume, image_service, image_id, image_size, adapter_type) select_ds_for_volume.assert_called_once_with(volume) vops.get_create_spec.assert_called_once_with( volume['name'], 0, disk_type, summary.name, profileId=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.assertEqual(vm_create_spec, import_spec.configSpec) download_image.assert_called_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_id, session=session, host=self._config.vmware_host_ip, port=443, resource_pool=rp, vm_folder=folder, vm_import_spec=import_spec, image_size=image_size) if download_error: self.assertFalse(vops.update_backing_disk_uuid.called) vops.delete_backing.assert_called_once_with(backing) else: vops.update_backing_disk_uuid.assert_called_once_with( backing, volume['id']) def test_copy_image_to_volume_stream_optimized(self): self._test_copy_image_to_volume_stream_optimized() def test_copy_image_to_volume_stream_optimized_with_download_error(self): self._test_copy_image_to_volume_stream_optimized(download_error=True) def test_copy_volume_to_image_when_attached(self): volume = self._create_volume_dict( attachment=[mock.sentinel.attachment_1]) self.assertRaises( cinder_exceptions.InvalidVolume, self._driver.copy_volume_to_image, mock.sentinel.context, volume, mock.sentinel.image_service, mock.sentinel.image_meta) @mock.patch.object(VMDK_DRIVER, '_validate_disk_format') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('oslo_vmware.image_transfer.upload_image') @mock.patch.object(VMDK_DRIVER, 'session') def test_copy_volume_to_image( self, session, upload_image, vops, validate_disk_format): backing = mock.sentinel.backing vops.get_backing.return_value = backing vmdk_file_path = mock.sentinel.vmdk_file_path vops.get_vmdk_path.return_value = vmdk_file_path context = mock.sentinel.context volume = self._create_volume_dict() image_service = mock.sentinel.image_service image_meta = self._create_image_meta() self._driver.copy_volume_to_image( context, volume, image_service, image_meta) validate_disk_format.assert_called_once_with(image_meta['disk_format']) vops.get_backing.assert_called_once_with(volume['name']) vops.get_vmdk_path.assert_called_once_with(backing) upload_image.assert_called_once_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_meta['id'], volume['project_id'], session=session, host=self._config.vmware_host_ip, port=443, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1, is_public=image_meta['is_public']) @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_retype(self, ds_sel, vops, get_volume_type_extra_specs, get_volume_group_folder, generate_uuid, delete_temp_backing): self._test_retype(ds_sel, vops, get_volume_type_extra_specs, get_volume_group_folder, generate_uuid, delete_temp_backing) def test_in_use(self): # Test with in-use volume. vol = {'size': 1, 'status': 'in-use', 'name': 'vol-1', 'volume_type_id': 'def'} vol['volume_attachment'] = [mock.sentinel.volume_attachment] self.assertTrue(self._driver._in_use(vol)) # Test with available volume. vol['status'] = 'available' vol['volume_attachment'] = None self.assertIsNone(self._driver._in_use(vol)) vol['volume_attachment'] = [] ret = self._driver._in_use(vol) # _in_use returns [] here self.assertFalse(ret) self.assertEqual(0, len(ret)) def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs, get_volume_group_folder, genereate_uuid, delete_temp_backing): self._driver._storage_policy_enabled = True context = mock.sentinel.context diff = mock.sentinel.diff host = mock.sentinel.host new_type = {'id': 'abc'} # Test with in-use volume. vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1', 'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e', 'volume_type_id': 'def', 'project_id': '63c19a12292549818c09946a5e59ddaf'} vol['volume_attachment'] = [mock.sentinel.volume_attachment] self.assertFalse(self._driver.retype(context, vol, new_type, diff, host)) # Test with no backing. vops.get_backing.return_value = None vol['volume_attachment'] = None self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) # Test with no disk type conversion, no profile change and # compliant datastore. ds_value = mock.sentinel.datastore_value datastore = mock.Mock(value=ds_value) vops.get_datastore.return_value = datastore backing = mock.sentinel.backing vops.get_backing.return_value = backing get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, None, None] ds_sel.is_datastore_compliant.return_value = True self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) # Test with no disk type conversion, profile change and # compliant datastore. new_profile = mock.sentinel.new_profile get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', new_profile] ds_sel.is_datastore_compliant.return_value = True profile_id = mock.sentinel.profile_id ds_sel.get_profile_id.return_value = profile_id self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) vops.change_backing_profile.assert_called_once_with(backing, profile_id) # Test with disk type conversion, profile change and a backing with # snapshots. Also test the no candidate datastore case. get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', new_profile] vops.snapshot_exists.return_value = True ds_sel.select_datastore.return_value = () self.assertFalse(self._driver.retype(context, vol, new_type, diff, host)) exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value], hub.DatastoreSelector.PROFILE_NAME: new_profile, hub.DatastoreSelector.SIZE_BYTES: units.Gi} ds_sel.select_datastore.assert_called_once_with(exp_req) # Modify the previous case with a candidate datastore which is # different than the backing's current datastore. get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', new_profile] vops.snapshot_exists.return_value = True host = mock.sentinel.host rp = mock.sentinel.rp candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value) summary = mock.Mock(datastore=candidate_ds) ds_sel.select_datastore.return_value = (host, rp, summary) folder = mock.sentinel.folder get_volume_group_folder.return_value = folder vops.change_backing_profile.reset_mock() self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) vops.relocate_backing.assert_called_once_with( backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE) vops.move_backing_to_folder.assert_called_once_with(backing, folder) vops.change_backing_profile.assert_called_once_with(backing, profile_id) # Modify the previous case with no profile change. get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', 'gold-1'] ds_sel.select_datastore.reset_mock() vops.relocate_backing.reset_mock() vops.move_backing_to_folder.reset_mock() vops.change_backing_profile.reset_mock() self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value], hub.DatastoreSelector.PROFILE_NAME: 'gold-1', hub.DatastoreSelector.SIZE_BYTES: units.Gi} ds_sel.select_datastore.assert_called_once_with(exp_req) vops.relocate_backing.assert_called_once_with( backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE) vops.move_backing_to_folder.assert_called_once_with(backing, folder) self.assertFalse(vops.change_backing_profile.called) # Test with disk type conversion, profile change, backing with # no snapshots and candidate datastore which is same as the backing # datastore. get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', new_profile] vops.snapshot_exists.return_value = False summary.datastore = datastore uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc' genereate_uuid.return_value = uuid clone = mock.sentinel.clone vops.clone_backing.return_value = clone vops.change_backing_profile.reset_mock() self.assertTrue(self._driver.retype(context, vol, new_type, diff, host)) vops.rename_backing.assert_called_once_with(backing, uuid) vops.clone_backing.assert_called_once_with( vol['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=vmdk.THIN_VMDK_TYPE, host=host, resource_pool=rp, folder=folder) vops.update_backing_disk_uuid.assert_called_once_with(clone, vol['id']) delete_temp_backing.assert_called_once_with(backing) vops.change_backing_profile.assert_called_once_with(clone, profile_id) # Modify the previous case with exception during clone. get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE, vmdk.THIN_VMDK_TYPE, 'gold-1', new_profile] vops.clone_backing.side_effect = exceptions.VimException('error') vops.update_backing_disk_uuid.reset_mock() vops.rename_backing.reset_mock() vops.change_backing_profile.reset_mock() self.assertRaises( exceptions.VimException, self._driver.retype, context, vol, new_type, diff, host) self.assertFalse(vops.update_backing_disk_uuid.called) exp_rename_calls = [mock.call(backing, uuid), mock.call(backing, vol['name'])] self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) self.assertFalse(vops.change_backing_profile.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_extend_backing(self, vops): vmdk_path = mock.sentinel.vmdk_path vops.get_vmdk_path.return_value = vmdk_path dc = mock.sentinel.datacenter vops.get_dc.return_value = dc backing = mock.sentinel.backing new_size = 1 self._driver._extend_backing(backing, new_size) vops.get_vmdk_path.assert_called_once_with(backing) vops.get_dc.assert_called_once_with(backing) vops.extend_virtual_disk.assert_called_once_with(new_size, vmdk_path, dc) @mock.patch.object(image_transfer, 'copy_stream_optimized_disk') @mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True) @mock.patch.object(VMDK_DRIVER, '_temporary_file') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') def test_backup_volume(self, session, vops, create_backing, generate_uuid, temporary_file, file_open, copy_disk): self._test_backup_volume(session, vops, create_backing, generate_uuid, temporary_file, file_open, copy_disk) def _test_backup_volume(self, session, vops, create_backing, generate_uuid, temporary_file, file_open, copy_disk): volume = {'name': 'vol-1', 'id': 1, 'size': 1} self._db.volume_get.return_value = volume vops.get_backing.return_value = None backing = mock.sentinel.backing create_backing.return_value = backing uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" generate_uuid.return_value = uuid tmp_file_path = mock.sentinel.tmp_file_path temporary_file_ret = mock.Mock() temporary_file.return_value = temporary_file_ret temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path) temporary_file_ret.__exit__ = mock.Mock(return_value=None) vmdk_path = mock.sentinel.vmdk_path vops.get_vmdk_path.return_value = vmdk_path tmp_file = mock.sentinel.tmp_file file_open_ret = mock.Mock() file_open.return_value = file_open_ret file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) file_open_ret.__exit__ = mock.Mock(return_value=None) context = mock.sentinel.context backup = {'id': 2, 'volume_id': 1} backup_service = mock.Mock() self._driver.backup_volume(context, backup, backup_service) create_backing.assert_called_once_with(volume) temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) self.assertEqual(mock.call(tmp_file_path, "wb"), file_open.call_args_list[0]) copy_disk.assert_called_once_with( context, self.IMG_TX_TIMEOUT, tmp_file, session=session, host=self.IP, port=self.PORT, vm=backing, vmdk_file_path=vmdk_path, vmdk_size=volume['size'] * units.Gi) self.assertEqual(mock.call(tmp_file_path, "rb"), file_open.call_args_list[1]) backup_service.backup.assert_called_once_with(backup, tmp_file) @mock.patch.object(VMDK_DRIVER, 'extend_volume') @mock.patch.object(VMDK_DRIVER, '_restore_backing') @mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True) @mock.patch.object(VMDK_DRIVER, '_temporary_file') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_restore_backup(self, vops, generate_uuid, temporary_file, file_open, restore_backing, extend_volume): self._test_restore_backup(vops, generate_uuid, temporary_file, file_open, restore_backing, extend_volume) def _test_restore_backup( self, vops, generate_uuid, temporary_file, file_open, restore_backing, extend_volume): volume = {'name': 'vol-1', 'id': 1, 'size': 1} backup = {'id': 2, 'size': 1} context = mock.sentinel.context backup_service = mock.Mock() backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.snapshot_exists.return_value = True self.assertRaises( cinder_exceptions.InvalidVolume, self._driver.restore_backup, context, backup, volume, backup_service) uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" generate_uuid.return_value = uuid tmp_file_path = mock.sentinel.tmp_file_path temporary_file_ret = mock.Mock() temporary_file.return_value = temporary_file_ret temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path) temporary_file_ret.__exit__ = mock.Mock(return_value=None) tmp_file = mock.sentinel.tmp_file file_open_ret = mock.Mock() file_open.return_value = file_open_ret file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) file_open_ret.__exit__ = mock.Mock(return_value=None) vops.snapshot_exists.return_value = False self._driver.restore_backup(context, backup, volume, backup_service) temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) file_open.assert_called_once_with(tmp_file_path, "wb") backup_service.restore.assert_called_once_with( backup, volume['id'], tmp_file) restore_backing.assert_called_once_with( context, volume, backing, tmp_file_path, backup['size'] * units.Gi) self.assertFalse(extend_volume.called) temporary_file.reset_mock() file_open.reset_mock() backup_service.reset_mock() restore_backing.reset_mock() volume = {'name': 'vol-1', 'id': 1, 'size': 2} self._driver.restore_backup(context, backup, volume, backup_service) temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid) file_open.assert_called_once_with(tmp_file_path, "wb") backup_service.restore.assert_called_once_with( backup, volume['id'], tmp_file) restore_backing.assert_called_once_with( context, volume, backing, tmp_file_path, backup['size'] * units.Gi) extend_volume.assert_called_once_with(volume, volume['size']) @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch( 'cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver._get_disk_type') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_create_backing_from_stream_optimized_file') @mock.patch('oslo_utils.uuidutils.generate_uuid') def test_restore_backing( self, generate_uuid, create_backing, select_ds, get_disk_type, vops, delete_temp_backing): self._test_restore_backing( generate_uuid, create_backing, select_ds, get_disk_type, vops, delete_temp_backing) def _test_restore_backing( self, generate_uuid, create_backing, select_ds, get_disk_type, vops, delete_temp_backing): src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0" generate_uuid.return_value = src_uuid src = mock.sentinel.src create_backing.return_value = src summary = mock.Mock() summary.datastore = mock.sentinel.datastore select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp, mock.sentinel.folder, summary) disk_type = vmdk.THIN_VMDK_TYPE get_disk_type.return_value = disk_type dest = mock.sentinel.dest vops.clone_backing.return_value = dest context = mock.sentinel.context volume = {'name': 'vol-1', 'id': 'bd45dfe5-d411-435d-85ac-2605fe7d5d8f', 'size': 1} backing = None tmp_file_path = mock.sentinel.tmp_file_path backup_size = units.Gi self._driver._restore_backing( context, volume, backing, tmp_file_path, backup_size) create_backing.assert_called_once_with( context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( volume['name'], src, None, volumeops.FULL_CLONE_TYPE, summary.datastore, disk_type=disk_type, host=mock.sentinel.host, resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder) vops.update_backing_disk_uuid.assert_called_once_with(dest, volume['id']) delete_temp_backing.assert_called_once_with(src) create_backing.reset_mock() vops.clone_backing.reset_mock() vops.update_backing_disk_uuid.reset_mock() delete_temp_backing.reset_mock() dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b" tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa" generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid] backing = mock.sentinel.backing self._driver._restore_backing( context, volume, backing, tmp_file_path, backup_size) create_backing.assert_called_once_with( context, src_uuid, volume, tmp_file_path, backup_size) vops.clone_backing.assert_called_once_with( dest_uuid, src, None, volumeops.FULL_CLONE_TYPE, summary.datastore, disk_type=disk_type, host=mock.sentinel.host, resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder) vops.update_backing_disk_uuid.assert_called_once_with(dest, volume['id']) exp_rename_calls = [mock.call(backing, tmp_uuid), mock.call(dest, volume['name'])] self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)] self.assertEqual(exp_delete_temp_backing_calls, delete_temp_backing.call_args_list) delete_temp_backing.reset_mock() vops.rename_backing.reset_mock() def vops_rename(backing, new_name): if backing == dest and new_name == volume['name']: raise exceptions.VimException("error") vops.rename_backing.side_effect = vops_rename generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid] self.assertRaises( exceptions.VimException, self._driver._restore_backing, context, volume, backing, tmp_file_path, backup_size) exp_rename_calls = [mock.call(backing, tmp_uuid), mock.call(dest, volume['name']), mock.call(backing, volume['name'])] self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)] self.assertEqual(exp_delete_temp_backing_calls, delete_temp_backing.call_args_list) @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') @mock.patch.object(image_transfer, 'download_stream_optimized_data') @mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') def test_create_backing_from_stream_optimized_file( self, select_ds, session, get_storage_profile_id, get_disk_type, vops, file_open, download_data, delete_temp_backing): self._test_create_backing_from_stream_optimized_file( select_ds, session, get_storage_profile_id, get_disk_type, vops, file_open, download_data, delete_temp_backing) def _test_create_backing_from_stream_optimized_file( self, select_ds, session, get_storage_profile_id, get_disk_type, vops, file_open, download_data, delete_temp_backing): rp = mock.sentinel.rp folder = mock.sentinel.folder summary = mock.Mock() summary.name = mock.sentinel.name select_ds.return_value = (mock.ANY, rp, folder, summary) import_spec = mock.Mock() session.vim.client.factory.create.return_value = import_spec profile_id = 'profile-1' get_storage_profile_id.return_value = profile_id disk_type = vmdk.THIN_VMDK_TYPE get_disk_type.return_value = disk_type create_spec = mock.Mock() vops.get_create_spec.return_value = create_spec tmp_file = mock.sentinel.tmp_file file_open_ret = mock.Mock() file_open.return_value = file_open_ret file_open_ret.__enter__ = mock.Mock(return_value=tmp_file) file_open_ret.__exit__ = mock.Mock(return_value=None) vm_ref = mock.sentinel.vm_ref download_data.return_value = vm_ref context = mock.sentinel.context name = 'vm-1' volume = {'name': 'vol-1', 'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e', 'size': 1} tmp_file_path = mock.sentinel.tmp_file_path file_size_bytes = units.Gi ret = self._driver._create_backing_from_stream_optimized_file( context, name, volume, tmp_file_path, file_size_bytes) self.assertEqual(vm_ref, ret) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']} vops.get_create_spec.assert_called_once_with( name, 0, disk_type, summary.name, profileId=profile_id, extra_config=extra_config) file_open.assert_called_once_with(tmp_file_path, "rb") download_data.assert_called_once_with( context, self.IMG_TX_TIMEOUT, tmp_file, session=session, host=self.IP, port=self.PORT, resource_pool=rp, vm_folder=folder, vm_import_spec=import_spec, image_size=file_size_bytes) download_data.side_effect = exceptions.VimException("error") backing = mock.sentinel.backing vops.get_backing.return_value = backing self.assertRaises( exceptions.VimException, self._driver._create_backing_from_stream_optimized_file, context, name, volume, tmp_file_path, file_size_bytes) delete_temp_backing.assert_called_once_with(backing) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'session', new_callable=mock.PropertyMock) def test_get_vc_version(self, session): # test config overrides fetching from vCenter server version = self._driver._get_vc_version() self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version) # explicitly remove config entry self._driver.configuration.vmware_host_version = None session.return_value.vim.service_content.about.version = '6.0.1' version = self._driver._get_vc_version() self.assertEqual(ver.LooseVersion('6.0.1'), version) @ddt.data('5.1', '5.5') def test_validate_vcenter_version(self, version): # vCenter versions 5.1 and above should pass validation. self._driver._validate_vcenter_version(ver.LooseVersion(version)) def test_validate_vcenter_version_with_less_than_min_supported_version( self): vc_version = ver.LooseVersion('5.0') # Validation should fail for vCenter version less than 5.1. self.assertRaises(exceptions.VMwareDriverException, self._driver._validate_vcenter_version, vc_version) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_vcenter_version') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_vc_version') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'session', new_callable=mock.PropertyMock) def test_do_setup_with_pbm_disabled(self, session, get_vc_version, vops_cls, validate_vc_version): session_obj = mock.Mock(name='session') session.return_value = session_obj vc_version = ver.LooseVersion('5.0') get_vc_version.return_value = vc_version cluster_refs = mock.Mock() cluster_refs.values.return_value = mock.sentinel.cluster_refs vops = mock.Mock() vops.get_cluster_refs.return_value = cluster_refs def vops_side_effect(session, max_objects): vops._session = session vops._max_objects = max_objects return vops vops_cls.side_effect = vops_side_effect self._driver.do_setup(mock.ANY) validate_vc_version.assert_called_once_with(vc_version) self.assertFalse(self._driver._storage_policy_enabled) get_vc_version.assert_called_once_with() self.assertEqual(session_obj, self._driver.volumeops._session) self.assertEqual(session_obj, self._driver.ds_sel._session) self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_vcenter_version') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_vc_version') def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version, get_pbm_wsdl_location, validate_vc_version): vc_version = ver.LooseVersion('5.5') get_vc_version.return_value = vc_version get_pbm_wsdl_location.return_value = None self.assertRaises(exceptions.VMwareDriverException, self._driver.do_setup, mock.ANY) validate_vc_version.assert_called_once_with(vc_version) self.assertFalse(self._driver._storage_policy_enabled) get_vc_version.assert_called_once_with() get_pbm_wsdl_location.assert_called_once_with( six.text_type(vc_version)) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_vcenter_version') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_vc_version') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'session', new_callable=mock.PropertyMock) def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location, vops_cls, validate_vc_version): session_obj = mock.Mock(name='session') session.return_value = session_obj vc_version = ver.LooseVersion('5.5') get_vc_version.return_value = vc_version get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl' cluster_refs = mock.Mock() cluster_refs.values.return_value = mock.sentinel.cluster_refs vops = mock.Mock() vops.get_cluster_refs.return_value = cluster_refs def vops_side_effect(session, max_objects): vops._session = session vops._max_objects = max_objects return vops vops_cls.side_effect = vops_side_effect self._driver.do_setup(mock.ANY) validate_vc_version.assert_called_once_with(vc_version) self.assertTrue(self._driver._storage_policy_enabled) get_vc_version.assert_called_once_with() get_pbm_wsdl_location.assert_called_once_with( six.text_type(vc_version)) self.assertEqual(session_obj, self._driver.volumeops._session) self.assertEqual(session_obj, self._driver.ds_sel._session) self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters) vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') def test_select_ds_for_volume(self, get_volume_group_folder, vops, ds_sel, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile host_ref = mock.sentinel.host_ref rp = mock.sentinel.rp summary = mock.sentinel.summary ds_sel.select_datastore.return_value = (host_ref, rp, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder host = mock.sentinel.host project_id = '63c19a12292549818c09946a5e59ddaf' vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'project_id': project_id} ret = self._driver._select_ds_for_volume(vol, host) self.assertEqual((host_ref, rp, folder, summary), ret) exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile} ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=[host]) vops.get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with(dc, project_id) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') def test_select_ds_for_volume_with_no_host( self, get_volume_group_folder, vops, ds_sel, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile host_ref = mock.sentinel.host_ref rp = mock.sentinel.rp summary = mock.sentinel.summary ds_sel.select_datastore.return_value = (host_ref, rp, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder project_id = '63c19a12292549818c09946a5e59ddaf' vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'project_id': project_id} ret = self._driver._select_ds_for_volume(vol) self.assertEqual((host_ref, rp, folder, summary), ret) exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile} ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None) vops.get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with(dc, project_id) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_ds_for_volume_with_no_best_candidate( self, ds_sel, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile ds_sel.select_datastore.return_value = () vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1, 'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'} self.assertRaises(vmdk_exceptions.NoValidDatastoreException, self._driver._select_ds_for_volume, vol) exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile} ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_relocate_backing') def test_initialize_connection_with_instance_and_backing( self, relocate_backing, vops): instance = mock.sentinel.instance connector = {'instance': instance} backing = mock.Mock(value=mock.sentinel.backing_value) vops.get_backing.return_value = backing host = mock.sentinel.host vops.get_host.return_value = host volume = {'name': 'vol-1', 'id': 1} conn_info = self._driver.initialize_connection(volume, connector) relocate_backing.assert_called_once_with(volume, backing, host) self.assertEqual('vmdk', conn_info['driver_volume_type']) self.assertEqual(backing.value, conn_info['data']['volume']) self.assertEqual(volume['id'], conn_info['data']['volume_id']) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_relocate_backing') @mock.patch.object(VMDK_DRIVER, '_create_backing') def test_initialize_connection_with_instance_and_no_backing( self, create_backing, relocate_backing, vops): instance = mock.sentinel.instance connector = {'instance': instance} vops.get_backing.return_value = None host = mock.sentinel.host vops.get_host.return_value = host backing = mock.Mock(value=mock.sentinel.backing_value) create_backing.return_value = backing volume = {'name': 'vol-1', 'id': 1} conn_info = self._driver.initialize_connection(volume, connector) create_backing.assert_called_once_with(volume, host) self.assertFalse(relocate_backing.called) self.assertEqual('vmdk', conn_info['driver_volume_type']) self.assertEqual(backing.value, conn_info['data']['volume']) self.assertEqual(volume['id'], conn_info['data']['volume_id']) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_relocate_backing') @mock.patch.object(VMDK_DRIVER, '_create_backing') def test_initialize_connection_with_no_instance_and_no_backing( self, create_backing, relocate_backing, vops): vops.get_backing.return_value = None host = mock.sentinel.host vops.get_host.return_value = host backing = mock.Mock(value=mock.sentinel.backing_value) create_backing.return_value = backing connector = {} volume = {'name': 'vol-1', 'id': 1} conn_info = self._driver.initialize_connection(volume, connector) create_backing.assert_called_once_with(volume) self.assertFalse(relocate_backing.called) self.assertEqual('vmdk', conn_info['driver_volume_type']) self.assertEqual(backing.value, conn_info['data']['volume']) self.assertEqual(volume['id'], conn_info['data']['volume_id']) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_volume_group_folder(self, vops): folder = mock.sentinel.folder vops.create_vm_inventory_folder.return_value = folder datacenter = mock.sentinel.dc project_id = '63c19a12292549818c09946a5e59ddaf' self.assertEqual(folder, self._driver._get_volume_group_folder(datacenter, project_id)) project_folder_name = 'Project (%s)' % project_id vops.create_vm_inventory_folder.assert_called_once_with( datacenter, ['OpenStack', project_folder_name, self.VOLUME_FOLDER]) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') @ddt.data('full', 'linked') def test_get_clone_type(self, clone_type, get_volume_type_extra_spec): get_volume_type_extra_spec.return_value = clone_type volume = self._create_volume_dict() self.assertEqual(clone_type, self._driver._get_clone_type(volume)) get_volume_type_extra_spec.assert_called_once_with( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') def test_get_clone_type_invalid( self, get_volume_type_extra_spec): get_volume_type_extra_spec.return_value = 'foo' volume = self._create_volume_dict() self.assertRaises( cinder_exceptions.Invalid, self._driver._get_clone_type, volume) get_volume_type_extra_spec.assert_called_once_with( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) @mock.patch.object(VMDK_DRIVER, '_extend_backing') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_clone_backing_linked(self, volume_ops, extend_backing): """Test _clone_backing with clone type - linked.""" clone = mock.sentinel.clone volume_ops.clone_backing.return_value = clone fake_size = 3 fake_volume = {'volume_type_id': None, 'name': 'fake_name', 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'size': fake_size} fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name', 'volume_size': 2} fake_type = volumeops.LINKED_CLONE_TYPE fake_backing = mock.sentinel.backing self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot, volumeops.LINKED_CLONE_TYPE, fake_snapshot['volume_size']) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']} volume_ops.clone_backing.assert_called_with(fake_volume['name'], fake_backing, fake_snapshot, fake_type, None, host=None, resource_pool=None, extra_config=extra_config, folder=None) volume_ops.update_backing_disk_uuid.assert_called_once_with( clone, fake_volume['id']) # If the volume size is greater than the original snapshot size, # _extend_backing will be called. extend_backing.assert_called_with(clone, fake_volume['size']) # If the volume size is not greater than the original snapshot size, # _extend_backing will not be called. fake_size = 2 fake_volume['size'] = fake_size extend_backing.reset_mock() self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot, volumeops.LINKED_CLONE_TYPE, fake_snapshot['volume_size']) self.assertFalse(extend_backing.called) @mock.patch.object(VMDK_DRIVER, '_extend_backing') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_clone_backing_full(self, volume_ops, _select_ds_for_volume, extend_backing): """Test _clone_backing with clone type - full.""" fake_host = mock.sentinel.host fake_folder = mock.sentinel.folder fake_datastore = mock.sentinel.datastore fake_resource_pool = mock.sentinel.resourcePool fake_summary = mock.Mock(spec=object) fake_summary.datastore = fake_datastore fake_size = 3 _select_ds_for_volume.return_value = (fake_host, fake_resource_pool, fake_folder, fake_summary) clone = mock.sentinel.clone volume_ops.clone_backing.return_value = clone fake_backing = mock.sentinel.backing fake_volume = {'volume_type_id': None, 'name': 'fake_name', 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'size': fake_size} fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name', 'volume_size': 2} self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot, volumeops.FULL_CLONE_TYPE, fake_snapshot['volume_size']) _select_ds_for_volume.assert_called_with(fake_volume) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']} volume_ops.clone_backing.assert_called_with( fake_volume['name'], fake_backing, fake_snapshot, volumeops.FULL_CLONE_TYPE, fake_datastore, host=fake_host, resource_pool=fake_resource_pool, extra_config=extra_config, folder=fake_folder) volume_ops.update_backing_disk_uuid.assert_called_once_with( clone, fake_volume['id']) # If the volume size is greater than the original snapshot size, # _extend_backing will be called. extend_backing.assert_called_with(clone, fake_volume['size']) # If the volume size is not greater than the original snapshot size, # _extend_backing will not be called. fake_size = 2 fake_volume['size'] = fake_size extend_backing.reset_mock() self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot, volumeops.FULL_CLONE_TYPE, fake_snapshot['volume_size']) self.assertFalse(extend_backing.called) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) def test_create_volume_from_snapshot_without_backing(self, mock_vops): """Test create_volume_from_snapshot without a backing.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'name': 'mock_vol'} snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'} driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = None # invoke the create_volume_from_snapshot api driver.create_volume_from_snapshot(volume, snapshot) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('mock_vol') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) def test_create_volume_from_snap_without_backing_snap(self, mock_vops): """Test create_volume_from_snapshot without a backing snapshot.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'volume_type_id': None, 'name': 'mock_vol'} snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'} backing = mock.sentinel.backing driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = backing mock_vops.get_snapshot.return_value = None # invoke the create_volume_from_snapshot api driver.create_volume_from_snapshot(volume, snapshot) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('mock_vol') mock_vops.get_snapshot.assert_called_once_with(backing, 'mock_snap') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) def test_create_volume_from_snapshot(self, mock_vops): """Test create_volume_from_snapshot.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'volume_type_id': None, 'name': 'mock_vol'} snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap', 'volume_size': 2} backing = mock.sentinel.backing snap_moref = mock.sentinel.snap_moref driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = backing mock_vops.get_snapshot.return_value = snap_moref driver._clone_backing = mock.MagicMock() # invoke the create_volume_from_snapshot api driver.create_volume_from_snapshot(volume, snapshot) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('mock_vol') mock_vops.get_snapshot.assert_called_once_with(backing, 'mock_snap') default_clone_type = volumeops.FULL_CLONE_TYPE driver._clone_backing.assert_called_once_with(volume, backing, snap_moref, default_clone_type, snapshot['volume_size']) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) def test_create_cloned_volume_without_backing(self, mock_vops): """Test create_cloned_volume without a backing.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'name': 'mock_vol'} src_vref = {'name': 'src_snapshot_name'} driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = None # invoke the create_volume_from_snapshot api driver.create_cloned_volume(volume, src_vref) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) def test_create_cloned_volume_with_backing(self, mock_vops): """Test create_cloned_volume with clone type - full.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'volume_type_id': None, 'name': 'mock_vol'} src_vref = {'name': 'src_snapshot_name', 'size': 1} backing = mock.sentinel.backing driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = backing default_clone_type = volumeops.FULL_CLONE_TYPE driver._clone_backing = mock.MagicMock() # invoke the create_volume_from_snapshot api driver.create_cloned_volume(volume, src_vref) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('src_snapshot_name') driver._clone_backing.assert_called_once_with(volume, backing, None, default_clone_type, src_vref['size']) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_clone_type') def test_create_linked_cloned_volume_with_backing(self, get_clone_type, mock_vops): """Test create_cloned_volume with clone type - linked.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'} src_vref = {'name': 'src_snapshot_name', 'status': 'available', 'size': 1} backing = mock.sentinel.backing driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = backing linked_clone = volumeops.LINKED_CLONE_TYPE get_clone_type.return_value = linked_clone driver._clone_backing = mock.MagicMock() mock_vops.create_snapshot = mock.MagicMock() mock_vops.create_snapshot.return_value = mock.sentinel.snapshot # invoke the create_volume_from_snapshot api driver.create_cloned_volume(volume, src_vref) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('src_snapshot_name') get_clone_type.assert_called_once_with(volume) name = 'snapshot-%s' % volume['id'] mock_vops.create_snapshot.assert_called_once_with(backing, name, None) driver._clone_backing.assert_called_once_with(volume, backing, mock.sentinel.snapshot, linked_clone, src_vref['size']) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' 'volumeops', new_callable=mock.PropertyMock) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_clone_type') def test_create_linked_cloned_volume_when_attached(self, get_clone_type, mock_vops): """Test create_cloned_volume linked clone when volume is attached.""" mock_vops = mock_vops.return_value driver = self._driver volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'} src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'} backing = mock.sentinel.backing driver._verify_volume_creation = mock.MagicMock() mock_vops.get_backing.return_value = backing linked_clone = volumeops.LINKED_CLONE_TYPE get_clone_type.return_value = linked_clone # invoke the create_volume_from_snapshot api self.assertRaises(cinder_exceptions.InvalidVolume, driver.create_cloned_volume, volume, src_vref) # verify calls driver._verify_volume_creation.assert_called_once_with(volume) mock_vops.get_backing.assert_called_once_with('src_snapshot_name') get_clone_type.assert_called_once_with(volume) @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') def test_get_storage_profile(self, get_volume_type_extra_specs): """Test vmdk _get_storage_profile.""" # volume with no type id returns None volume = FakeObject() volume['volume_type_id'] = None sp = self._driver._get_storage_profile(volume) self.assertIsNone(sp, "Without a volume_type_id no storage " "profile should be returned.") # profile associated with the volume type should be returned fake_id = 'fake_volume_id' volume['volume_type_id'] = fake_id get_volume_type_extra_specs.return_value = 'fake_profile' profile = self._driver._get_storage_profile(volume) self.assertEqual('fake_profile', profile) spec_key = 'vmware:storage_profile' get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key) # None should be returned when no storage profile is # associated with the volume type get_volume_type_extra_specs.return_value = False profile = self._driver._get_storage_profile(volume) self.assertIsNone(profile) def _test_copy_image(self, download_flat_image, session, vops, expected_cacerts=False): dc_name = mock.sentinel.dc_name vops.get_entity_name.return_value = dc_name context = mock.sentinel.context dc_ref = mock.sentinel.dc_ref image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 102400 ds_name = mock.sentinel.ds_name upload_file_path = mock.sentinel.upload_file_path self._driver._copy_image( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, upload_file_path) vops.get_entity_name.assert_called_once_with(dc_ref) cookies = session.vim.client.options.transport.cookiejar download_flat_image.assert_called_once_with( context, self.IMG_TX_TIMEOUT, image_service, image_id, image_size=image_size_in_bytes, host=self.IP, port=self.PORT, data_center_name=dc_name, datastore_name=ds_name, cookies=cookies, file_path=upload_file_path, cacerts=expected_cacerts) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.image_transfer.download_flat_image') def test_copy_image(self, download_flat_image, session, vops): # Default value of vmware_ca_file is not None; it should be passed # to download_flat_image as cacerts. self._test_copy_image(download_flat_image, session, vops, expected_cacerts=self._config.vmware_ca_file) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.image_transfer.download_flat_image') def test_copy_image_insecure(self, download_flat_image, session, vops): # Set config options to allow insecure connections. self._config.vmware_ca_file = None self._config.vmware_insecure = True # Since vmware_ca_file is unset and vmware_insecure is True, # dowload_flat_image should be called with cacerts=False. self._test_copy_image(download_flat_image, session, vops) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_backing_with_params(self, vops, select_ds_for_volume): host = mock.sentinel.host resource_pool = mock.sentinel.resource_pool folder = mock.sentinel.folder summary = mock.sentinel.summary select_ds_for_volume.return_value = (host, resource_pool, folder, summary) backing = mock.sentinel.backing vops.create_backing_disk_less.return_value = backing volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1, 'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e'} create_params = {vmdk.CREATE_PARAM_DISK_LESS: True} ret = self._driver._create_backing(volume, host, create_params) self.assertEqual(backing, ret) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']} vops.create_backing_disk_less.assert_called_once_with( 'vol-1', folder, resource_pool, host, summary.name, profileId=None, extra_config=extra_config) self.assertFalse(vops.update_backing_disk_uuid.called) vops.create_backing.return_value = backing create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'} ret = self._driver._create_backing(volume, host, create_params) self.assertEqual(backing, ret) vops.create_backing.assert_called_once_with('vol-1', units.Mi, vmdk.THIN_VMDK_TYPE, folder, resource_pool, host, summary.name, profileId=None, adapter_type='ide', extra_config=extra_config) vops.update_backing_disk_uuid.assert_called_once_with(backing, volume['id']) vops.create_backing.reset_mock() vops.update_backing_disk_uuid.reset_mock() backing_name = "temp-vol" create_params = {vmdk.CREATE_PARAM_BACKING_NAME: backing_name} ret = self._driver._create_backing(volume, host, create_params) self.assertEqual(backing, ret) vops.create_backing.assert_called_once_with(backing_name, units.Mi, vmdk.THIN_VMDK_TYPE, folder, resource_pool, host, summary.name, profileId=None, adapter_type='lsiLogic', extra_config=extra_config) vops.update_backing_disk_uuid.assert_called_once_with(backing, volume['id']) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_utils.fileutils.delete_if_exists') @mock.patch('tempfile.mkstemp') @mock.patch('os.close') def test_temporary_file( self, close, mkstemp, delete_if_exists, ensure_tree): fd = mock.sentinel.fd tmp = mock.sentinel.tmp mkstemp.return_value = (fd, tmp) prefix = ".vmdk" suffix = "test" with self._driver._temporary_file(prefix=prefix, suffix=suffix) as tmp_file: self.assertEqual(tmp, tmp_file) ensure_tree.assert_called_once_with(self.TMP_DIR) mkstemp.assert_called_once_with(dir=self.TMP_DIR, prefix=prefix, suffix=suffix) close.assert_called_once_with(fd) delete_if_exists.assert_called_once_with(tmp) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_hosts(self, vops): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]] # host_1 and host_3 are usable, host_2 is not usable vops.is_host_usable.side_effect = [True, False, True] cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self.assertEqual([host_1, host_3], self._driver._get_hosts([cls_1, cls_2])) exp_calls = [mock.call(cls_1), mock.call(cls_2)] self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) exp_calls = [mock.call(host_1), mock.call(host_2), mock.call(host_3)] self.assertEqual(exp_calls, vops.is_host_usable.call_args_list) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 get_hosts.return_value = [host_1, host_2, host_3] best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req self.assertEqual(best_candidate, self._driver._select_datastore(req)) get_hosts.assert_called_once_with(self._driver._clusters) ds_sel.select_datastore.assert_called_once_with( req, hosts=[host_1, host_2, host_3]) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_no_best_candidate(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 get_hosts.return_value = [host_1, host_2, host_3] ds_sel.select_datastore.return_value = () req = mock.sentinel.req self.assertRaises(vmdk_exceptions.NoValidDatastoreException, self._driver._select_datastore, req) get_hosts.assert_called_once_with(self._driver._clusters) ds_sel.select_datastore.assert_called_once_with( req, hosts=[host_1, host_2, host_3]) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_single_host(self, ds_sel, get_hosts): best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req host_1 = mock.sentinel.host_1 self.assertEqual(best_candidate, self._driver._select_datastore(req, host_1)) ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1]) self.assertFalse(get_hosts.called) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_empty_clusters(self, ds_sel, get_hosts): self._driver._clusters = None best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req self.assertEqual(best_candidate, self._driver._select_datastore(req)) ds_sel.select_datastore.assert_called_once_with(req, hosts=None) self.assertFalse(get_hosts.called) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_no_valid_host(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] get_hosts.return_value = [] req = mock.sentinel.req self.assertRaises(vmdk_exceptions.NoValidHostException, self._driver._select_datastore, req) get_hosts.assert_called_once_with(self._driver._clusters) self.assertFalse(ds_sel.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_nop(self, ds_sel, vops): self._driver._storage_policy_enabled = True volume = {'name': 'vol-1', 'size': 1} datastore = mock.sentinel.datastore vops.get_datastore.return_value = datastore profile = mock.sentinel.profile vops.get_profile.return_value = profile vops.is_datastore_accessible.return_value = True ds_sel.is_datastore_compliant.return_value = True backing = mock.sentinel.backing host = mock.sentinel.host self._driver._relocate_backing(volume, backing, host) vops.is_datastore_accessible.assert_called_once_with(datastore, host) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, profile) self.assertFalse(vops.relocate_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_with_no_datastore( self, ds_sel, vops): self._driver._storage_policy_enabled = True volume = {'name': 'vol-1', 'size': 1} profile = mock.sentinel.profile vops.get_profile.return_value = profile vops.is_datastore_accessible.return_value = True ds_sel.is_datastore_compliant.return_value = False ds_sel.select_datastore.return_value = [] backing = mock.sentinel.backing host = mock.sentinel.host self.assertRaises(vmdk_exceptions.NoValidDatastoreException, self._driver._relocate_backing, volume, backing, host) ds_sel.select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host]) self.assertFalse(vops.relocate_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing( self, ds_sel, get_volume_group_folder, vops): volume = {'name': 'vol-1', 'size': 1, 'project_id': '63c19a12292549818c09946a5e59ddaf'} vops.is_datastore_accessible.return_value = False ds_sel.is_datastore_compliant.return_value = True backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) ds_sel.select_datastore.return_value = (host, rp, summary) folder = mock.sentinel.folder get_volume_group_folder.return_value = folder self._driver._relocate_backing(volume, backing, host) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder.assert_called_once_with(backing, folder) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_with_pbm_disabled( self, ds_sel, get_volume_group_folder, vops): self._driver._storage_policy_enabled = False volume = {'name': 'vol-1', 'size': 1, 'project_id': 'abc'} vops.is_datastore_accessible.return_value = False backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) ds_sel.select_datastore.return_value = (host, rp, summary) folder = mock.sentinel.folder get_volume_group_folder.return_value = folder self._driver._relocate_backing(volume, backing, host) self.assertFalse(vops.get_profile.called) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder.assert_called_once_with(backing, folder) ds_sel.select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: None}, hosts=[host]) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_disk_device(self, vops): vm = mock.sentinel.vm vops.get_entity_by_inventory_path.return_value = vm dev = mock.sentinel.dev vops.get_disk_device.return_value = dev vm_inv_path = mock.sentinel.vm_inv_path vmdk_path = mock.sentinel.vmdk_path ret = self._driver._get_disk_device(vmdk_path, vm_inv_path) self.assertEqual((vm, dev), ret) vops.get_entity_by_inventory_path.assert_called_once_with(vm_inv_path) vops.get_disk_device.assert_called_once_with(vm, vmdk_path) def test_get_existing_with_empty_source_name(self): self.assertRaises(cinder_exceptions.InvalidInput, self._driver._get_existing, {}) def test_get_existing_with_invalid_source_name(self): self.assertRaises(cinder_exceptions.InvalidInput, self._driver._get_existing, {'source-name': 'foo'}) @mock.patch.object(VMDK_DRIVER, '_get_disk_device', return_value=None) def test_get_existing_with_invalid_existing_ref(self, get_disk_device): self.assertRaises(cinder_exceptions.ManageExistingInvalidReference, self._driver._get_existing, {'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'}) get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', '/dc-1/vm/foo') @mock.patch.object(VMDK_DRIVER, '_get_disk_device') def test_get_existing(self, get_disk_device): vm = mock.sentinel.vm disk_device = mock.sentinel.disk_device get_disk_device.return_value = (vm, disk_device) self.assertEqual( (vm, disk_device), self._driver._get_existing({'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'})) get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', '/dc-1/vm/foo') @mock.patch.object(VMDK_DRIVER, '_get_existing') @ddt.data((16384, 1), (1048576, 1), (1572864, 2)) def test_manage_existing_get_size(self, test_data, get_existing): (capacity_kb, exp_size) = test_data disk_device = mock.Mock(capacityInKB=capacity_kb) get_existing.return_value = (mock.sentinel.vm, disk_device) volume = mock.sentinel.volume existing_ref = mock.sentinel.existing_ref self.assertEqual(exp_size, self._driver.manage_existing_get_size(volume, existing_ref)) get_existing.assert_called_once_with(existing_ref) @mock.patch.object(VMDK_DRIVER, '_get_existing') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') def test_manage_existing( self, get_disk_type, get_ds_name_folder_path, vops, create_backing, get_existing): vm = mock.sentinel.vm src_path = mock.sentinel.src_path disk_backing = mock.Mock(fileName=src_path) disk_device = mock.Mock(backing=disk_backing, capacityInKB=1048576) get_existing.return_value = (vm, disk_device) backing = mock.sentinel.backing create_backing.return_value = backing src_dc = mock.sentinel.src_dc dest_dc = mock.sentinel.dest_dc vops.get_dc.side_effect = [src_dc, dest_dc] volume = self._create_volume_dict() ds_name = "ds1" folder_path = "%s/" % volume['name'] get_ds_name_folder_path.return_value = (ds_name, folder_path) disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type existing_ref = mock.sentinel.existing_ref self._driver.manage_existing(volume, existing_ref) get_existing.assert_called_once_with(existing_ref) create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) vops.detach_disk_from_backing.assert_called_once_with(vm, disk_device) dest_path = "[%s] %s%s.vmdk" % (ds_name, folder_path, volume['name']) vops.move_vmdk_file.assert_called_once_with( src_dc, src_path, dest_path, dest_dc_ref=dest_dc) vops.attach_disk_to_backing.assert_called_once_with( backing, disk_device.capacityInKB, disk_type, 'lsiLogic', dest_path) vops.update_backing_disk_uuid.assert_called_once_with(backing, volume['id']) @mock.patch('oslo_vmware.api.VMwareAPISession') def test_session(self, apiSession): self._session = None self._driver.session() apiSession.assert_called_once_with( self._config.vmware_host_ip, self._config.vmware_host_username, self._config.vmware_host_password, self._config.vmware_api_retry_count, self._config.vmware_task_poll_interval, wsdl_loc=self._config.safe_get('vmware_wsdl_location'), pbm_wsdl_loc=None, cacert=self._config.vmware_ca_file, insecure=self._config.vmware_insecure) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume_with_no_backing(self, extend_backing, vops): vops.get_backing.return_value = None volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53', 'volume_type_id': None, 'size': 1, 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'display_name': 'foo'} self._driver.extend_volume(volume, 2) self.assertFalse(extend_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume(self, extend_backing, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53', 'volume_type_id': None, 'size': 1, 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'display_name': 'foo'} new_size = 2 self._driver.extend_volume(volume, new_size) extend_backing.assert_called_once_with(backing, new_size) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') def test_extend_volume_with_no_disk_space(self, select_ds_for_volume, extend_backing, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing extend_backing.side_effect = [exceptions.NoDiskSpaceException, None] host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53', 'volume_type_id': None, 'size': 1, 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'display_name': 'foo'} new_size = 2 self._driver.extend_volume(volume, new_size) create_params = {vmdk.CREATE_PARAM_DISK_SIZE: new_size} select_ds_for_volume.assert_called_once_with( volume, create_params=create_params) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder(backing, folder) extend_backing_calls = [mock.call(backing, new_size), mock.call(backing, new_size)] self.assertEqual(extend_backing_calls, extend_backing.call_args_list) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume_with_extend_backing_error( self, extend_backing, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing extend_backing.side_effect = exceptions.VimException("Error") volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53', 'volume_type_id': None, 'size': 1, 'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53', 'display_name': 'foo'} new_size = 2 self.assertRaises(exceptions.VimException, self._driver.extend_volume, volume, new_size) extend_backing.assert_called_once_with(backing, new_size) class ImageDiskTypeTest(test.TestCase): """Unit tests for ImageDiskType.""" def test_is_valid(self): self.assertTrue(vmdk.ImageDiskType.is_valid("thin")) self.assertTrue(vmdk.ImageDiskType.is_valid("preallocated")) self.assertTrue(vmdk.ImageDiskType.is_valid("streamOptimized")) self.assertTrue(vmdk.ImageDiskType.is_valid("sparse")) self.assertFalse(vmdk.ImageDiskType.is_valid("thick")) def test_validate(self): vmdk.ImageDiskType.validate("thin") vmdk.ImageDiskType.validate("preallocated") vmdk.ImageDiskType.validate("streamOptimized") vmdk.ImageDiskType.validate("sparse") self.assertRaises(cinder_exceptions.ImageUnacceptable, vmdk.ImageDiskType.validate, "thick") cinder-8.0.0/cinder/tests/unit/test_backup.py0000664000567000056710000017065312701406250022400 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup code.""" import copy import ddt import tempfile import uuid import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils from oslo_utils import timeutils from cinder.backup import api from cinder.backup import manager from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import test from cinder.tests.unit.backup import fake_service_with_verify as fake_service from cinder.tests.unit import utils CONF = cfg.CONF class FakeBackupException(Exception): pass class BaseBackupTest(test.TestCase): def setUp(self): super(BaseBackupTest, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.ctxt = context.get_admin_context() paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', 'cinder.volume.rpcapi.VolumeAPI.delete_volume', 'cinder.volume.rpcapi.VolumeAPI.detach_volume', 'cinder.volume.rpcapi.VolumeAPI.' 'secure_file_operations_enabled'] self.volume_patches = {} self.volume_mocks = {} for path in paths: name = path.split('.')[-1] self.volume_patches[name] = mock.patch(path) self.volume_mocks[name] = self.volume_patches[name].start() self.addCleanup(self.volume_patches[name].stop) def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot'] = False kwargs['parent_id'] = None kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = 'testhost' vol['user_id'] = str(uuid.uuid4()) vol['project_id'] = str(uuid.uuid4()) vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = 'detached' vol['availability_zone'] = '1' vol['previous_status'] = previous_status volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_snapshot_db_entry(self, display_name='test_snapshot', display_description='test snapshot', status='available', size=1, volume_id=str(uuid.uuid4()), provider_location=None): """Create a snapshot entry in the DB. Return the entry ID. """ kwargs = {} kwargs['size'] = size kwargs['host'] = 'testhost' kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = str(uuid.uuid4()) kwargs['status'] = status kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['volume_id'] = volume_id kwargs['cgsnapshot_id'] = None kwargs['volume_size'] = size kwargs['metadata'] = {} kwargs['provider_location'] = provider_location snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) snapshot_obj.create() return snapshot_obj def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': 'attached', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0') def _create_exported_record_entry(self, vol_size=1, exported_id=None): """Create backup metadata export entry.""" vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) if exported_id is not None: backup.id = exported_id export = self.backup_mgr.export_record(self.ctxt, backup) return export def _create_export_record_db_entry(self, volume_id=str(uuid.uuid4()), status=fields.BackupStatus.CREATING, project_id=str(uuid.uuid4()), backup_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['status'] = status if backup_id: kwargs['id'] = backup_id backup = objects.BackupImport(context=self.ctxt, **kwargs) backup.create() return backup @ddt.ddt class BackupTestCase(BaseBackupTest): """Test Case for backups.""" @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context): """Test stuck volumes and backups. Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ def get_admin_context(): return self.ctxt vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = 'available' temp_snap.save() backup1 = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol2_id) backup3 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol3_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol5_id, temp_snapshot_id=temp_snap.id) mock_get_admin_context.side_effect = get_admin_context self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) vol3 = db.volume_get(self.ctxt, vol3_id) self.assertEqual('available', vol3['status']) vol4 = db.volume_get(self.ctxt, vol4_id) self.assertEqual('available', vol4['status']) vol5 = db.volume_get(self.ctxt, vol5_id) self.assertEqual('available', vol5['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) self.volume_mocks['delete_volume'].assert_called_once_with( self.ctxt, temp_vol) self.assertTrue(self.volume_mocks['detach_volume'].called) @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') @mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool') def test_init_host_with_service_inithost_offload(self, mock_add_threadpool, mock_get_all_by_host): self.override_config('backup_service_inithost_offload', True) vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) backup1 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol1_id) vol2_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol2_id) mock_get_all_by_host.return_value = [backup1, backup2] self.backup_mgr.init_host() calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] mock_add_threadpool.assert_has_calls(calls, any_order=True) self.assertEqual(2, mock_add_threadpool.call_count) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', 'cinder-volume': '1.7'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.5', 'cinder-volume': '1.4'}) def test_reset(self): backup_mgr = manager.BackupManager() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual('1.3', backup_rpcapi.client.version_cap) self.assertEqual('1.5', backup_rpcapi.client.serializer._base.version_cap) self.assertEqual('1.7', volume_rpcapi.client.version_cap) self.assertEqual('1.4', volume_rpcapi.client.serializer._base.version_cap) backup_mgr.reset() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertIsNone(backup_rpcapi.client.version_cap) self.assertIsNone(backup_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.version_cap) def test_is_working(self): self.assertTrue(self.backup_mgr.is_working()) def test_cleanup_incomplete_backup_operations_with_exceptions(self): """Test cleanup resilience in the face of exceptions.""" fake_backup_list = [{'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}] mock_backup_get_by_host = self.mock_object( objects.BackupList, 'get_all_by_host') mock_backup_get_by_host.return_value = fake_backup_list mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_backup_cleanup.side_effect = [Exception] mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') mock_temp_cleanup.side_effect = [Exception] self.assertIsNone( self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt)) self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status']) def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status']) def test_cleanup_one_creating_backup(self): """Test cleanup_one_backup for volume status 'creating'.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.ERROR, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('available', volume.status) def test_cleanup_one_restoring_backup(self): """Test cleanup_one_backup for volume status 'restoring'.""" vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('error_restoring', volume.status) def test_cleanup_one_deleting_backup(self): """Test cleanup_one_backup for volume status 'deleting'.""" backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) def test_detach_all_attachments_handles_exceptions(self): """Test detach_all_attachments with exceptions.""" mock_log = self.mock_object(manager, 'LOG') self.volume_mocks['detach_volume'].side_effect = [Exception] fake_attachments = [ { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, }, { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, } ] fake_volume = { 'id': str(uuid.uuid4()), 'volume_attachment': fake_attachments } self.backup_mgr._detach_all_attachments(self.ctxt, fake_volume) self.assertEqual(len(fake_attachments), mock_log.exception.call_count) @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( self, err): """Ensure we handle missing volume for a backup.""" mock_volume_get = self.mock_object(db, 'volume_get') mock_volume_get.side_effect = [err] backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) def test_cleanup_temp_snapshot_for_one_backup_not_found(self): """Ensure we handle missing temp snapshot for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry( status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_snapshot_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_snapshot'].called) self.assertIsNone(backup.temp_snapshot_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_cleanup_temp_volume_for_one_backup_not_found(self): """Ensure we handle missing temp volume for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_volume_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_volume'].called) self.assertIsNone(backup.temp_volume_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_create_backup_with_bad_volume_status(self): """Test creating a backup from a volume with a bad status.""" vol_id = self._create_volume_db_entry(status='restoring', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_bad_backup_status(self): """Test creating a backup with a backup with a bad status.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_error(self): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup') mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') def test_create_backup_old_volume_service(self, mock_open, mock_temporary_chown, mock_get_backup_device): """Test error handling when there's too old volume service in env.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) with mock.patch.object(self.backup_mgr.volume_rpcapi.client, 'version_cap', '1.37'): self.assertRaises(exception.ServiceTooOld, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') def test_create_backup(self, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test normal backup creation.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_get_backup_device.return_value = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False, } attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_attach_device.assert_called_once_with(self.ctxt, vol, properties, False) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, False) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_create_backup_with_notify(self, notify): """Test normal backup creation with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_run_backup') self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_restore_backup_with_bad_volume_status(self): """Test error handling. Test error handling when restoring a backup to a volume with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_restore_backup_with_bad_backup_status(self): """Test error handling. Test error handling when restoring a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_restore_backup_with_driver_error(self): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertTrue(mock_run_restore.called) @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_with_old_volume_service(self, mock_get_conn): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) # Unmock secure_file_operations_enabled self.volume_patches['secure_file_operations_enabled'].stop() with mock.patch.object(self.backup_mgr.volume_rpcapi.client, 'version_cap', '1.37'): self.assertRaises(exception.ServiceTooOld, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.volume_patches['secure_file_operations_enabled'].start() def test_restore_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a restore of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') def test_restore_backup(self, mock_open, mock_temporary_chown, mock_get_conn): """Test normal backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'wb') mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False vol = objects.Volume.get_by_id(self.ctxt, vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) mock_temporary_chown.assert_called_once_with('/dev/null') mock_get_conn.assert_called_once_with() mock_secure_enabled.assert_called_once_with(self.ctxt, vol) mock_attach_device.assert_called_once_with(self.ctxt, vol, properties) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_restore_backup_with_notify(self, notify): """Test normal backup restoration with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) self.backup_mgr._run_restore = mock.Mock() self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) self.assertEqual(2, notify.call_count) def test_delete_backup_with_bad_backup_status(self): """Test error handling. Test error handling when deleting a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_error(self): """Test error handling when an error occurs during backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, display_name='fail_on_delete', volume_id=vol_id) self.assertRaises(IOError, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a delete of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_no_service(self): """Test error handling. Test error handling when attempting a delete of a backup with no service defined for that backup, relates to bug #1162908 """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) backup.service = None backup.save() self.backup_mgr.delete_backup(self.ctxt, backup) def test_delete_backup(self): """Test normal backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_delete_backup_with_notify(self, notify): """Test normal backup deletion with notifications.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_list_backup(self): project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) self._create_backup_db_entry() b2 = self._create_backup_db_entry(project_id=project_id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(b2.id, backups[0].id) def test_backup_get_all_by_project_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes'. """ project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry(project_id=project_id) backup = self._create_backup_db_entry(project_id=project_id) db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) self.assertEqual(2, len(backups)) def test_backup_get_all_by_host_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes' """ backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry() backup = self._create_backup_db_entry() db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') self.assertEqual(2, len(backups)) def test_backup_manager_driver_name(self): """Test mapping between backup services and backup drivers.""" self.override_config('backup_driver', "cinder.backup.services.swift") backup_mgr = \ importutils.import_object(CONF.backup_manager) self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name) def test_export_record_with_bad_service(self): """Test error handling. Test error handling when attempting an export of a backup record with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record_with_bad_backup_status(self): """Test error handling. Test error handling when exporting a backup record with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record(self): """Test normal backup record export.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) export = self.backup_mgr.export_record(self.ctxt, backup) self.assertEqual(CONF.backup_driver, export['backup_service']) self.assertTrue('backup_url' in export) def test_import_record_with_verify_not_implemented(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry(vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_wrong_id(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 export = self._create_exported_record_entry(vol_size=vol_size) imported_record = self._create_export_record_db_entry() backup_hosts = [] self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) def test_import_record_with_bad_service(self): """Test error handling. Test error handling when attempting an import of a backup record with a different service to that used to create the backup. """ export = self._create_exported_record_entry() export['backup_service'] = 'cinder.tests.unit.backup.bad_service' imported_record = self._create_export_record_db_entry() # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) # Test that the import backup keeps calling other hosts to find a # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] backup_hosts_expect = list(backup_hosts) BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) next_host = backup_hosts_expect.pop() _mock_backup_import.assert_called_once_with( self.ctxt, next_host, imported_record, export['backup_service'], export['backup_url'], backup_hosts_expect) def test_import_record_with_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ export = self._create_exported_record_entry() backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) imported_record = self._create_export_record_db_entry() backup_hosts = [] with mock.patch(_mock_record_import_class) as _mock_record_import: _mock_record_import.side_effect = FakeBackupException('fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_import.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_not_supported_driver_to_force_delete(self): """Test force delete check method for not supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.ceph') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertFalse(result) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_init_backup_repo_path', return_value=None) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_check_configuration', return_value=None) def test_check_support_to_force_delete(self, mock_check_configuration, mock_init_backup_repo_path): """Test force delete check method for supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.nfs') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertTrue(result) def test_backup_has_dependent_backups(self): """Test backup has dependent backups. Test the query of has_dependent_backups in backup object is correct. """ vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertFalse(backup.has_dependent_backups) class BackupTestCaseWithVerify(BaseBackupTest): """Test Case for backups.""" def setUp(self): self.override_config( "backup_driver", "cinder.tests.unit.backup.fake_service_with_verify") super(BackupTestCaseWithVerify, self).setUp() def test_import_record_with_verify(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver implements verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class): self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_verify_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as _mock_record_verify: _mock_record_verify.side_effect = \ exception.InvalidBackup(reason='fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_verify.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_nonrestoring_to_available( self, mock_clean_temp): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) with mock.patch.object(manager.BackupManager, '_map_service_to_driver') as \ mock_map_service_to_driver: mock_map_service_to_driver.return_value = \ fake_service.get_backup_driver(self.ctxt) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_backup_reset_status_to_available_invalid_backup(self): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=volume['id']) backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as \ _mock_record_verify: _mock_record_verify.side_effect = \ exception.BackupVerifyUnsupportedDriver(reason='fake') self.assertRaises(exception.BackupVerifyUnsupportedDriver, self.backup_mgr.reset_status, self.ctxt, backup, fields.BackupStatus.AVAILABLE) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_restoring_to_available( self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_to_error(self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup['id']) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @ddt.ddt class BackupAPITestCase(BaseBackupTest): def setUp(self): super(BackupAPITestCase, self).setUp() self.api = api.API() def test_get_all_wrong_all_tenants_value(self): self.assertRaises(exception.InvalidParameterValue, self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) @mock.patch.object(objects, 'BackupList') def test_get_all_no_all_tenants_value(self, mock_backuplist): result = self.api.get_all(self.ctxt, {'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(False, 'false', '0', 0, 'no') def test_get_all_false_value_all_tenants( self, false_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': false_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(True, 'true', '1', 1, 'yes') def test_get_all_true_value_all_tenants( self, true_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': true_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all_by_project.called) self.assertEqual(mock_backuplist.get_all.return_value, result) mock_backuplist.get_all.assert_called_once_with( self.ctxt, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) result = self.api.get_all(ctxt, {'all_tenants': '1', 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(db, 'backup_create', side_effect=db_exc.DBError()) def test_create_when_failed_to_create_backup_object( self, mock_create, mock_get_service): # Create volume in admin context volume_id = utils.create_volume(self.ctxt)['id'] # Will try to backup from a different context new_context = copy.copy(self.ctxt) new_context.user_id = uuid.uuid4() new_context.project_id = uuid.uuid4() # The opposite side of this test case is a "NotImplementedError: # Cannot load 'id' in the base class" being raised. # More detailed, in the try clause, if backup.create() failed # with DB exception, backup.id won't be assigned. However, # in the except clause, backup.destroy() is invoked to do cleanup, # which internally tries to access backup.id. self.assertRaises(db_exc.DBError, self.api.create, context=new_context, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(objects.Backup, '__init__', side_effect=exception.InvalidInput( reason='Failed to new')) def test_create_when_failed_to_new_backup_object(self, mock_new, mock_get_service): volume_id = utils.create_volume(self.ctxt)['id'] # The opposite side of this test case is that a "UnboundLocalError: # local variable 'backup' referenced before assignment" is raised. # More detailed, in the try clause, backup = objects.Backup(...) # raises exception, so 'backup' is not assigned. But in the except # clause, 'backup' is referenced to invoke cleanup methods. self.assertRaises(exception.InvalidInput, self.api.create, context=self.ctxt, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, mock_rpcapi_restore, mock_get_backup_host): volume_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(size=1, status='available') mock_get_backup_host.return_value = 'testhost' self.api.restore(self.ctxt, backup.id, volume_id) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(volume_id, backup.restore_volume_id) cinder-8.0.0/cinder/tests/unit/test_ibm_xiv_ds8k.py0000664000567000056710000010110412701406250023502 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Authors: # Erik Zaadi # Avishay Traeger import copy from mox3 import mox from cinder import context from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import xiv_ds8k from cinder.volume import volume_types FAKE = "fake" FAKE2 = "fake2" CANNOT_DELETE = "Can not delete" TOO_BIG_VOLUME_SIZE = 12000 POOL_SIZE = 100 CONSISTGROUP_ID = 1 VOLUME = {'size': 16, 'name': FAKE, 'id': 1, 'status': 'available'} VOLUME2 = {'size': 32, 'name': FAKE2, 'id': 2, 'status': 'available'} CG_VOLUME = {'size': 16, 'name': FAKE, 'id': 3, 'consistencygroup_id': CONSISTGROUP_ID, 'status': 'available'} MANAGED_FAKE = "managed_fake" MANAGED_VOLUME = {'size': 16, 'name': MANAGED_FAKE, 'id': 2} REPLICA_FAKE = "repicated_fake" REPLICATED_VOLUME = {'size': 64, 'name': REPLICA_FAKE, 'id': 2} REPLICATION_TARGETS = [{'target_device_id': 'fakedevice'}] SECONDARY = 'fakedevice' FAKE_FAILOVER_HOST = 'fakehost@fakebackend#fakepool' FAKE_PROVIDER_LOCATION = 'fake_provider_location' FAKE_DRIVER_DATA = 'fake_driver_data' CONTEXT = {} FAKESNAPSHOT = 'fakesnapshot' SNAPSHOT = {'name': 'fakesnapshot', 'id': 3} CONSISTGROUP = {'id': CONSISTGROUP_ID, } CG_SNAPSHOT_ID = 1 CG_SNAPSHOT = {'id': CG_SNAPSHOT_ID, 'consistencygroup_id': CONSISTGROUP_ID} CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } class XIVDS8KFakeProxyDriver(object): """Fake IBM XIV and DS8K Proxy Driver.""" def __init__(self, xiv_ds8k_info, logger, expt, driver=None, active_backend_id=None): """Initialize Proxy.""" self.xiv_ds8k_info = xiv_ds8k_info self.logger = logger self.exception = expt self.xiv_ds8k_portal = \ self.xiv_ds8k_iqn = FAKE self.volumes = {} self.snapshots = {} self.driver = driver def setup(self, context): if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\ .configuration.san_login: raise self.exception.NotAuthorized() if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\ .configuration.san_ip: raise self.exception.HostNotFound(host='fake') def create_volume(self, volume): if volume['size'] > POOL_SIZE: raise self.exception.VolumeBackendAPIException(data='blah') self.volumes[volume['name']] = volume def volume_exists(self, volume): return self.volumes.get(volume['name'], None) is not None def delete_volume(self, volume): if self.volumes.get(volume['name'], None) is not None: del self.volumes[volume['name']] def manage_volume_get_size(self, volume, existing_ref): if self.volumes.get(existing_ref['source-name'], None) is None: raise self.exception.VolumeNotFound(volume_id=volume['id']) return self.volumes[existing_ref['source-name']]['size'] def manage_volume(self, volume, existing_ref): if self.volumes.get(existing_ref['source-name'], None) is None: raise self.exception.VolumeNotFound(volume_id=volume['id']) volume['size'] = MANAGED_VOLUME['size'] return {} def unmanage_volume(self, volume): pass def initialize_connection(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) lun_id = volume['id'] self.volumes[volume['name']]['attached'] = connector return {'driver_volume_type': 'iscsi', 'data': {'target_discovered': True, 'target_portal': self.xiv_ds8k_portal, 'target_iqn': self.xiv_ds8k_iqn, 'target_lun': lun_id, 'volume_id': volume['id'], 'multipath': True, 'provider_location': "%s,1 %s %s" % ( self.xiv_ds8k_portal, self.xiv_ds8k_iqn, lun_id), }, } def terminate_connection(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) if not self.is_volume_attached(volume, connector): raise self.exception.NotFound(_('Volume not found for ' 'instance %(instance_id)s.') % {'instance_id': 'fake'}) del self.volumes[volume['name']]['attached'] def is_volume_attached(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) return (self.volumes[volume['name']].get('attached', None) == connector) def get_replication_status(self, context, volume): if volume['replication_status'] == 'invalid_status_val': raise exception.CinderException() return {'replication_status': 'active'} def retype(self, ctxt, volume, new_type, diff, host): volume['easytier'] = new_type['extra_specs']['easytier'] return True, volume def create_consistencygroup(self, ctxt, group): volumes = [volume for k, volume in self.volumes.items() if volume['consistencygroup_id'] == group['id']] if volumes: raise exception.CinderException( message='The consistency group id of volume may be wrong.') return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def delete_consistencygroup(self, ctxt, group, volumes): for volume in self.volumes.values(): if (group.get('id', None) == volume.get('consistencygroup_id', None)): if volume['name'] == CANNOT_DELETE: raise exception.VolumeBackendAPIException( message='Volume can not be deleted') else: volume['status'] = 'deleted' volumes.append(volume) # Delete snapshots in consistency group self.snapshots = {k: snap for k, snap in self.snapshots.items() if not(snap.get('consistencygroup_id', None) == group.get('id', None))} # Delete volume in consistency group self.volumes = {k: vol for k, vol in self.volumes.items() if not(vol.get('consistencygroup_id', None) == group.get('id', None))} return {'status': 'deleted'}, volumes def update_consistencygroup( self, context, group, add_volumes, remove_volumes): model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update, None, None def create_consistencygroup_from_src( self, context, group, volumes, cgsnapshot, snapshots, source_cg=None, source_vols=None): return None, None def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): for volume in self.volumes.values(): if (cgsnapshot.get('consistencygroup_id', None) == volume.get('consistencygroup_id', None)): if volume['size'] > POOL_SIZE / 2: raise self.exception.VolumeBackendAPIException(data='blah') snapshot = copy.deepcopy(volume) snapshot['name'] = CANNOT_DELETE \ if snapshot['name'] == CANNOT_DELETE \ else snapshot['name'] + 'Snapshot' snapshot['status'] = 'available' snapshot['cgsnapshot_id'] = cgsnapshot.get('id', None) snapshot['consistencygroup_id'] = \ cgsnapshot.get('consistencygroup_id', None) self.snapshots[snapshot['name']] = snapshot snapshots.append(snapshot) return {'status': 'available'}, snapshots def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): updated_snapshots = [] for snapshot in snapshots: if snapshot['name'] == CANNOT_DELETE: raise exception.VolumeBackendAPIException( message='Snapshot can not be deleted') else: snapshot['status'] = 'deleted' updated_snapshots.append(snapshot) # Delete snapshots in consistency group self.snapshots = {k: snap for k, snap in self.snapshots.items() if not(snap.get('consistencygroup_id', None) == cgsnapshot.get('cgsnapshot_id', None))} return {'status': 'deleted'}, updated_snapshots def freeze_backend(self, context): return True def thaw_backend(self, context): return True def failover_host(self, context, volumes, secondary_id): target_id = 'BLA' volume_update_list = [] for volume in volumes: status = 'failed-over' if volume['replication_status'] == 'invalid_status_val': status = 'error' volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': status}}) return target_id, volume_update_list class XIVDS8KVolumeDriverTest(test.TestCase): """Test IBM XIV and DS8K volume driver.""" def setUp(self): """Initialize IBM XIV and DS8K Driver.""" super(XIVDS8KVolumeDriverTest, self).setUp() configuration = mox.MockObject(conf.Configuration) configuration.san_is_local = False configuration.xiv_ds8k_proxy = \ 'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver' configuration.xiv_ds8k_connection_type = 'iscsi' configuration.xiv_chap = 'disabled' configuration.san_ip = FAKE configuration.management_ips = FAKE configuration.san_login = FAKE configuration.san_clustername = FAKE configuration.san_password = FAKE configuration.append_config_values(mox.IgnoreArg()) self.driver = xiv_ds8k.XIVDS8KDriver( configuration=configuration) def test_initialized_should_set_xiv_ds8k_info(self): """Test that the san flags are passed to the IBM proxy.""" self.assertEqual( self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'], self.driver.configuration.san_login) self.assertEqual( self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'], self.driver.configuration.san_password) self.assertEqual( self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'], self.driver.configuration.san_ip) self.assertEqual( self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'], self.driver.configuration.san_clustername) def test_setup_should_fail_if_credentials_are_invalid(self): """Test that the xiv_ds8k_proxy validates credentials.""" self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid' self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) def test_setup_should_fail_if_connection_is_invalid(self): """Test that the xiv_ds8k_proxy validates connection.""" self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \ 'invalid' self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) def test_create_volume(self): """Test creating a volume.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) self.assertTrue(has_volume) self.driver.delete_volume(VOLUME) def test_volume_exists(self): """Test the volume exist method with a volume that doesn't exist.""" self.driver.do_setup(None) self.assertFalse( self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE}) ) def test_delete_volume(self): """Verify that a volume is deleted.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.delete_volume(VOLUME) has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) self.assertFalse(has_volume) def test_delete_volume_should_fail_for_not_existing_volume(self): """Verify that deleting a non-existing volume is OK.""" self.driver.do_setup(None) self.driver.delete_volume(VOLUME) def test_create_volume_should_fail_if_no_pool_space_left(self): """Verify that the xiv_ds8k_proxy validates volume pool space.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, {'name': FAKE, 'id': 1, 'size': TOO_BIG_VOLUME_SIZE}) def test_initialize_connection(self): """Test that inititialize connection attaches volume to host.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.initialize_connection(VOLUME, CONNECTOR) self.assertTrue( self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR)) self.driver.terminate_connection(VOLUME, CONNECTOR) self.driver.delete_volume(VOLUME) def test_initialize_connection_should_fail_for_non_existing_volume(self): """Verify that initialize won't work for non-existing volume.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeNotFound, self.driver.initialize_connection, VOLUME, CONNECTOR) def test_terminate_connection(self): """Test terminating a connection.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.initialize_connection(VOLUME, CONNECTOR) self.driver.terminate_connection(VOLUME, CONNECTOR) self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached( VOLUME, CONNECTOR)) self.driver.delete_volume(VOLUME) def test_terminate_connection_should_fail_on_non_existing_volume(self): """Test that terminate won't work for non-existing volumes.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeNotFound, self.driver.terminate_connection, VOLUME, CONNECTOR) def test_manage_existing_get_size(self): """Test that manage_existing_get_size returns the expected size. """ self.driver.do_setup(None) self.driver.create_volume(MANAGED_VOLUME) existing_ref = {'source-name': MANAGED_VOLUME['name']} return_size = self.driver.manage_existing_get_size( VOLUME, existing_ref) self.assertEqual(return_size, MANAGED_VOLUME['size']) # cover both case, whether driver renames the volume or not self.driver.delete_volume(VOLUME) self.driver.delete_volume(MANAGED_VOLUME) def test_manage_existing_get_size_should_fail_on_non_existing_volume(self): """Test that manage_existing_get_size fails on non existing volume. """ self.driver.do_setup(None) # on purpose - do NOT create managed volume existing_ref = {'source-name': MANAGED_VOLUME['name']} self.assertRaises(exception.VolumeNotFound, self.driver.manage_existing_get_size, VOLUME, existing_ref) def test_manage_existing(self): """Test that manage_existing returns successfully. """ self.driver.do_setup(None) self.driver.create_volume(MANAGED_VOLUME) existing_ref = {'source-name': MANAGED_VOLUME['name']} self.driver.manage_existing(VOLUME, existing_ref) self.assertEqual(VOLUME['size'], MANAGED_VOLUME['size']) # cover both case, whether driver renames the volume or not self.driver.delete_volume(VOLUME) self.driver.delete_volume(MANAGED_VOLUME) def test_manage_existing_should_fail_on_non_existing_volume(self): """Test that manage_existing fails on non existing volume. """ self.driver.do_setup(None) # on purpose - do NOT create managed volume existing_ref = {'source-name': MANAGED_VOLUME['name']} self.assertRaises(exception.VolumeNotFound, self.driver.manage_existing, VOLUME, existing_ref) def test_get_replication_status(self): """Test that get_replication_status return successfully. """ self.driver.do_setup(None) # assume the replicated volume is inactive replicated_volume = copy.deepcopy(REPLICATED_VOLUME) replicated_volume['replication_status'] = 'inactive' model_update = self.driver.get_replication_status( CONTEXT, replicated_volume ) self.assertEqual( model_update['replication_status'], 'active' ) def test_get_replication_status_fail_on_exception(self): """Test that get_replication_status fails on exception""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # on purpose - set invalid value to replication_status # expect an exception. replicated_volume['replication_status'] = 'invalid_status_val' self.assertRaises( exception.CinderException, self.driver.get_replication_status, CONTEXT, replicated_volume ) def test_retype(self): """Test that retype returns successfully.""" self.driver.do_setup(None) # prepare parameters ctxt = context.get_admin_context() host = { 'host': 'foo', 'capabilities': { 'location_info': 'xiv_ds8k_fake_1', 'extent_size': '1024' } } key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff( ctxt, old_type_ref['id'], new_type_ref['id'], ) volume = copy.deepcopy(VOLUME) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) ret = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(ret) self.assertTrue(volume['easytier']) def test_retype_fail_on_exception(self): """Test that retype fails on exception.""" self.driver.do_setup(None) # prepare parameters ctxt = context.get_admin_context() host = { 'host': 'foo', 'capabilities': { 'location_info': 'xiv_ds8k_fake_1', 'extent_size': '1024' } } key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new') diff, equal = volume_types.volume_types_diff( ctxt, old_type_ref['id'], new_type_ref['id'], ) volume = copy.deepcopy(VOLUME) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.assertRaises( KeyError, self.driver.retype, ctxt, volume, new_type, diff, host ) def test_create_consistencygroup(self): """Test that create_consistencygroup return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "Consistency Group created failed") def test_create_consistencygroup_fail_on_cg_not_empty(self): """Test create_consistencygroup with empty consistency group.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create volumes # And add the volumes into the consistency group before creating cg self.driver.create_volume(CG_VOLUME) self.assertRaises(exception.CinderException, self.driver.create_consistencygroup, ctxt, CONSISTGROUP) def test_delete_consistencygroup(self): """Test that delete_consistencygroup return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Create volumes and add them to consistency group self.driver.create_volume(CG_VOLUME) # Delete consistency group model_update, volumes = \ self.driver.delete_consistencygroup( ctxt, CONSISTGROUP, [CG_VOLUME]) # Verify the result self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update['status'], 'Consistency Group deleted failed') for volume in volumes: self.assertEqual('deleted', volume['status'], 'Consistency Group deleted failed') def test_delete_consistencygroup_fail_on_volume_not_delete(self): """Test delete_consistencygroup with volume delete failure.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Set the volume not to be deleted volume = copy.deepcopy(CG_VOLUME) volume['name'] = CANNOT_DELETE # Create volumes and add them to consistency group self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_consistencygroup, ctxt, CONSISTGROUP, [volume]) def test_create_cgsnapshot(self): """Test that create_cgsnapshot return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Create volumes and add them to consistency group self.driver.create_volume(VOLUME) # Create consistency group snapshot model_update, snapshots = \ self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT, [VOLUME]) # Verify the result self.assertEqual('available', model_update['status'], 'Consistency Group Snapshot created failed') for snap in snapshots: self.assertEqual('available', snap['status']) # Clean the environment self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT, [VOLUME]) self.driver.delete_consistencygroup(ctxt, CONSISTGROUP, [VOLUME]) def test_create_cgsnapshot_fail_on_no_pool_space_left(self): """Test that create_cgsnapshot return fail when no pool space left.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Set the volume size volume = copy.deepcopy(CG_VOLUME) volume['size'] = POOL_SIZE / 2 + 1 # Create volumes and add them to consistency group self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cgsnapshot, ctxt, CG_SNAPSHOT, [volume]) # Clean the environment self.driver.volumes = None self.driver.delete_consistencygroup(ctxt, CONSISTGROUP, [volume]) def test_delete_cgsnapshot(self): """Test that delete_cgsnapshot return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Create volumes and add them to consistency group self.driver.create_volume(CG_VOLUME) # Create consistency group snapshot self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT, [CG_VOLUME]) # Delete consistency group snapshot model_update, snapshots = \ self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT, [CG_VOLUME]) # Verify the result self.assertEqual('deleted', model_update['status'], 'Consistency Group Snapshot deleted failed') for snap in snapshots: self.assertEqual('deleted', snap['status']) # Clean the environment self.driver.delete_consistencygroup(ctxt, CONSISTGROUP, [CG_VOLUME]) def test_delete_cgsnapshot_fail_on_snapshot_not_delete(self): """Test delete_cgsnapshot when the snapshot cannot be deleted.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group self.driver.create_consistencygroup(ctxt, CONSISTGROUP) # Set the snapshot not to be deleted volume = copy.deepcopy(CG_VOLUME) volume['name'] = CANNOT_DELETE # Create volumes and add them to consistency group self.driver.create_volume(volume) # Create consistency group snapshot self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT, [volume]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_cgsnapshot, ctxt, CG_SNAPSHOT, [volume]) def test_update_consistencygroup_without_volumes(self): """Test update_consistencygroup when there are no volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Update consistency group model_update, added, removed = self.driver.update_consistencygroup( ctxt, CONSISTGROUP, [], []) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "Consistency Group update failed") self.assertIsNone(added, "added volumes list is not empty") self.assertIsNone(removed, "removed volumes list is not empty") def test_update_consistencygroup_with_volumes(self): """Test update_consistencygroup when there are volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Update consistency group model_update, added, removed = self.driver.update_consistencygroup( ctxt, CONSISTGROUP, [VOLUME], [VOLUME2]) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "Consistency Group update failed") self.assertIsNone(added, "added volumes list is not empty") self.assertIsNone(removed, "removed volumes list is not empty") def test_create_consistencygroup_from_src_without_volumes(self): """Test create_consistencygroup_from_src with no volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group from source model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( ctxt, CONSISTGROUP, [], CG_SNAPSHOT, [])) # model_update can be None or return available in status if model_update: self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "Consistency Group create from source failed") # volumes_model_update can be None or return available in status if volumes_model_update: self.assertFalse(volumes_model_update, "volumes list is not empty") def test_create_consistencygroup_from_src_with_volumes(self): """Test create_consistencygroup_from_src with volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create consistency group from source model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( ctxt, CONSISTGROUP, [VOLUME], CG_SNAPSHOT, [SNAPSHOT])) # model_update can be None or return available in status if model_update: self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "Consistency Group create from source failed") # volumes_model_update can be None or return available in status if volumes_model_update: self.assertEqual('available', volumes_model_update['status'], "volumes list status failed") def test_freeze_backend(self): """Test that freeze_backend returns successful""" self.driver.do_setup(None) # not much we can test here... self.assertTrue(self.driver.freeze_backend(CONTEXT)) def test_thaw_backend(self): """Test that thaw_backend returns successful""" self.driver.do_setup(None) # not much we can test here... self.assertTrue(self.driver.thaw_backend(CONTEXT)) def test_failover_host(self): """Test that failover_host returns expected values""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # assume the replication_status is active replicated_volume['replication_status'] = 'active' expected_target_id = 'BLA' expected_volume_update_list = [ {'volume_id': REPLICATED_VOLUME['id'], 'updates': {'replication_status': 'failed-over'}}] target_id, volume_update_list = self.driver.failover_host( CONTEXT, [replicated_volume], SECONDARY ) self.assertEqual(expected_target_id, target_id) self.assertEqual(expected_volume_update_list, volume_update_list) def test_failover_host_bad_state(self): """Test that failover_host returns with error""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # assume the replication_status is active replicated_volume['replication_status'] = 'invalid_status_val' expected_target_id = 'BLA' expected_volume_update_list = [ {'volume_id': REPLICATED_VOLUME['id'], 'updates': {'replication_status': 'error'}}] target_id, volume_update_list = self.driver.failover_host( CONTEXT, [replicated_volume], SECONDARY ) self.assertEqual(expected_target_id, target_id) self.assertEqual(expected_volume_update_list, volume_update_list) cinder-8.0.0/cinder/tests/unit/test_netapp_ssc.py0000664000567000056710000007653412701406250023275 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NetApp-specific ssc module.""" import copy import ddt from lxml import etree import mock from mox3 import mox import six from six.moves import BaseHTTPServer from six.moves import http_client from cinder import exception from cinder import test from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import ssc_cmode class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """HTTP handler that doesn't spam the log.""" def log_message(self, format, *args): pass class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse.""" def __init__(self, value): self._rbuffer = six.StringIO(value) self._wbuffer = six.StringIO('') oldclose = self._wbuffer.close def newclose(): self.result = self._wbuffer.getvalue() oldclose() self._wbuffer.close = newclose def makefile(self, mode, _other): """Returns the socket's internal buffer""" if mode == 'r' or mode == 'rb': return self._rbuffer if mode == 'w' or mode == 'wb': return self._wbuffer RESPONSE_PREFIX_DIRECT_CMODE = """ """ RESPONSE_PREFIX_DIRECT = """ """ RESPONSE_SUFFIX_DIRECT = """""" class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): """HTTP handler that fakes enough stuff to allow the driver to run.""" def do_GET(s): """Respond to a GET request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() out = s.wfile out.write('' '') def do_POST(s): """Respond to a POST request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return request_xml = s.rfile.read(int(s.headers['Content-Length'])) root = etree.fromstring(request_xml) body = [x for x in root.iterchildren()] request = body[0] tag = request.tag localname = etree.QName(tag).localname or tag if 'volume-get-iter' == localname: body = """ iscsi Openstack aggr0 /iscsi rw 214748364 224748364 enabled file true false online false false true nfsvol Openstack aggr0 /nfs rw 14748364 24748364 enabled volume true false online false false true nfsvol2 Openstack aggr0 /nfs2 rw 14748364 24748364 enabled volume true false online true true true nfsvol3 Openstack aggr0 /nfs3 rw enabled volume true false online false false true 4""" elif 'aggr-options-list-info' == localname: body = """ ha_policy cfo raidtype raid_dp """ elif 'sis-get-iter' == localname: body = """ /vol/iscsi true enabled """ elif 'storage-disk-get-iter' == localname: body = """ SATA """ else: # Unknown API s.send_response(500) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) s.wfile.write(RESPONSE_PREFIX_DIRECT) s.wfile.write(body) s.wfile.write(RESPONSE_SUFFIX_DIRECT) class FakeDirectCmodeHTTPConnection(object): """A fake http_client.HTTPConnection for netapp tests. Requests made via this connection actually get translated and routed into the fake direct handler above, we then turn the response into the http_client.HTTPResponse that the caller expects. """ def __init__(self, host, timeout=None): self.host = host def request(self, method, path, data=None, headers=None): if not headers: headers = {} req_str = '%s %s HTTP/1.1\r\n' % (method, path) for key, value in headers.items(): req_str += "%s: %s\r\n" % (key, value) if data: req_str += '\r\n%s' % data # NOTE(vish): normally the http transport normailizes from unicode sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) # NOTE(vish): stop the server from trying to look up address from # the fake socket FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) self.sock = FakeHttplibSocket(sock.result) self.http_response = http_client.HTTPResponse(self.sock) def set_debuglevel(self, level): pass def getresponse(self): self.http_response.begin() return self.http_response def getresponsebody(self): return self.sock.result def createNetAppVolume(**kwargs): vol = ssc_cmode.NetAppVolume(kwargs['name'], kwargs['vs']) vol.state['vserver_root'] = kwargs.get('vs_root') vol.state['status'] = kwargs.get('status') vol.state['junction_active'] = kwargs.get('junc_active') vol.space['size_avl_bytes'] = kwargs.get('avl_byt') vol.space['size_total_bytes'] = kwargs.get('total_byt') vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled') vol.space['space-guarantee'] = kwargs.get('sg') vol.space['thin_provisioned'] = kwargs.get('thin') vol.mirror['mirrored'] = kwargs.get('mirrored') vol.qos['qos_policy_group'] = kwargs.get('qos') vol.aggr['name'] = kwargs.get('aggr_name') vol.aggr['junction'] = kwargs.get('junction') vol.sis['dedup'] = kwargs.get('dedup') vol.sis['compression'] = kwargs.get('compression') vol.aggr['raid_type'] = kwargs.get('raid') vol.aggr['ha_policy'] = kwargs.get('ha') vol.aggr['disk_type'] = kwargs.get('disk') return vol @ddt.ddt class SscUtilsTestCase(test.TestCase): """Test ssc utis.""" vol1 = createNetAppVolume(name='vola', vs='openstack', vs_root=False, status='online', junc_active=True, avl_byt='1000', total_byt='1500', sg_enabled=False, sg='file', thin=False, mirrored=False, qos=None, aggr_name='aggr1', junction='/vola', dedup=False, compression=False, raid='raiddp', ha='cfo', disk='SSD') vol2 = createNetAppVolume(name='volb', vs='openstack', vs_root=False, status='online', junc_active=True, avl_byt='2000', total_byt='2500', sg_enabled=True, sg='file', thin=True, mirrored=False, qos=None, aggr_name='aggr2', junction='/volb', dedup=True, compression=False, raid='raid4', ha='cfo', disk='SSD') vol3 = createNetAppVolume(name='volc', vs='openstack', vs_root=False, status='online', junc_active=True, avl_byt='3000', total_byt='3500', sg_enabled=True, sg='volume', thin=True, mirrored=False, qos=None, aggr_name='aggr1', junction='/volc', dedup=True, compression=True, raid='raiddp', ha='cfo', disk='SAS') vol4 = createNetAppVolume(name='vold', vs='openstack', vs_root=False, status='online', junc_active=True, avl_byt='4000', total_byt='4500', sg_enabled=False, sg='none', thin=False, mirrored=False, qos=None, aggr_name='aggr1', junction='/vold', dedup=False, compression=False, raid='raiddp', ha='cfo', disk='SSD') vol5 = createNetAppVolume(name='vole', vs='openstack', vs_root=False, status='online', junc_active=True, avl_byt='5000', total_byt='5500', sg_enabled=True, sg='none', thin=False, mirrored=True, qos=None, aggr_name='aggr2', junction='/vole', dedup=True, compression=False, raid='raid4', ha='cfo', disk='SAS') test_vols = {vol1, vol2, vol3, vol4, vol5} ssc_map = { 'mirrored': {vol1}, 'dedup': {vol1, vol2, vol3}, 'compression': {vol3, vol4}, 'thin': {vol5, vol2}, 'all': test_vols } def setUp(self): super(SscUtilsTestCase, self).setUp() self.stubs.Set(http_client, 'HTTPConnection', FakeDirectCmodeHTTPConnection) @ddt.data({'na_server_exists': False, 'volume': None}, {'na_server_exists': True, 'volume': 'vol'}, {'na_server_exists': True, 'volume': None}) @ddt.unpack def test_query_cluster_vols_for_ssc(self, na_server_exists, volume): if na_server_exists: na_server = netapp_api.NaServer('127.0.0.1') fake_api_return = mock.Mock(return_value=[]) self.mock_object(ssc_cmode.netapp_api, 'invoke_api', new_attr=fake_api_return) ssc_cmode.query_cluster_vols_for_ssc(na_server, 'vserver', volume) else: na_server = None fake_api_error = mock.Mock(side_effect=exception.InvalidInput) self.mock_object(ssc_cmode.netapp_api, 'invoke_api', new_attr=fake_api_error) self.assertRaises(KeyError, ssc_cmode.query_cluster_vols_for_ssc, na_server, 'vserver', volume) def test_cl_vols_ssc_all(self): """Test cluster ssc for all vols.""" na_server = netapp_api.NaServer('127.0.0.1') vserver = 'openstack' test_vols = set([copy.deepcopy(self.vol1), copy.deepcopy(self.vol2), copy.deepcopy(self.vol3)]) sis = {'vola': {'dedup': False, 'compression': False}, 'volb': {'dedup': True, 'compression': False}} mirrored = {'vola': [{'dest_loc': 'openstack1:vol1', 'rel_type': 'data_protection', 'mirr_state': 'broken'}, {'dest_loc': 'openstack2:vol2', 'rel_type': 'data_protection', 'mirr_state': 'snapmirrored'}], 'volb': [{'dest_loc': 'openstack1:vol2', 'rel_type': 'data_protection', 'mirr_state': 'broken'}]} self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc') self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict') self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict') self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options') self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk') ssc_cmode.query_cluster_vols_for_ssc( na_server, vserver, None).AndReturn(test_vols) ssc_cmode.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis) ssc_cmode.get_snapmirror_vol_dict(na_server, vserver, None).AndReturn( mirrored) raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'} ssc_cmode.query_aggr_options( na_server, mox.IgnoreArg()).AndReturn(raiddp) ssc_cmode.query_aggr_storage_disk( na_server, mox.IgnoreArg()).AndReturn('SSD') raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'} ssc_cmode.query_aggr_options( na_server, mox.IgnoreArg()).AndReturn(raid4) ssc_cmode.query_aggr_storage_disk( na_server, mox.IgnoreArg()).AndReturn('SAS') self.mox.ReplayAll() res_vols = ssc_cmode.get_cluster_vols_with_ssc( na_server, vserver, volume=None) self.mox.VerifyAll() for vol in res_vols: if vol.id['name'] == 'volc': self.assertEqual(False, vol.sis['compression']) self.assertEqual(False, vol.sis['dedup']) else: pass def test_cl_vols_ssc_single(self): """Test cluster ssc for single vol.""" na_server = netapp_api.NaServer('127.0.0.1') vserver = 'openstack' test_vols = set([copy.deepcopy(self.vol1)]) sis = {'vola': {'dedup': False, 'compression': False}} mirrored = {'vola': [{'dest_loc': 'openstack1:vol1', 'rel_type': 'data_protection', 'mirr_state': 'broken'}, {'dest_loc': 'openstack2:vol2', 'rel_type': 'data_protection', 'mirr_state': 'snapmirrored'}]} self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc') self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict') self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict') self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options') self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk') ssc_cmode.query_cluster_vols_for_ssc( na_server, vserver, 'vola').AndReturn(test_vols) ssc_cmode.get_sis_vol_dict( na_server, vserver, 'vola').AndReturn(sis) ssc_cmode.get_snapmirror_vol_dict( na_server, vserver, 'vola').AndReturn(mirrored) raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'} ssc_cmode.query_aggr_options( na_server, 'aggr1').AndReturn(raiddp) ssc_cmode.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD') self.mox.ReplayAll() res_vols = ssc_cmode.get_cluster_vols_with_ssc( na_server, vserver, volume='vola') self.mox.VerifyAll() self.assertEqual(1, len(res_vols)) def test_get_cluster_ssc(self): """Test get cluster ssc map.""" na_server = netapp_api.NaServer('127.0.0.1') vserver = 'openstack' test_vols = set( [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5]) self.mox.StubOutWithMock(ssc_cmode, 'get_cluster_vols_with_ssc') ssc_cmode.get_cluster_vols_with_ssc( na_server, vserver).AndReturn(test_vols) self.mox.ReplayAll() res_map = ssc_cmode.get_cluster_ssc(na_server, vserver) self.mox.VerifyAll() self.assertEqual(1, len(res_map['mirrored'])) self.assertEqual(3, len(res_map['dedup'])) self.assertEqual(1, len(res_map['compression'])) self.assertEqual(2, len(res_map['thin'])) self.assertEqual(5, len(res_map['all'])) def test_vols_for_boolean_specs(self): """Test ssc for boolean specs.""" test_vols = set( [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5]) ssc_map = {'mirrored': set([self.vol1]), 'dedup': set([self.vol1, self.vol2, self.vol3]), 'compression': set([self.vol3, self.vol4]), 'thin': set([self.vol5, self.vol2]), 'all': test_vols} test_map = {'mirrored': ('netapp_mirrored', 'netapp_unmirrored'), 'dedup': ('netapp_dedup', 'netapp_nodedup'), 'compression': ('netapp_compression', 'netapp_nocompression'), 'thin': ('netapp_thin_provisioned', 'netapp_thick_provisioned')} for type in test_map.keys(): # type extra_specs = {test_map[type][0]: 'true'} res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs) self.assertEqual(len(ssc_map[type]), len(res)) # opposite type extra_specs = {test_map[type][1]: 'true'} res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs) self.assertEqual(len(ssc_map['all'] - ssc_map[type]), len(res)) # both types extra_specs =\ {test_map[type][0]: 'true', test_map[type][1]: 'true'} res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs) self.assertEqual(len(ssc_map['all']), len(res)) def test_vols_for_optional_specs(self): """Test ssc for optional specs.""" extra_specs =\ {'netapp_dedup': 'true', 'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'} res = ssc_cmode.get_volumes_for_specs(self.ssc_map, extra_specs) self.assertEqual(1, len(res)) def test_get_volumes_for_specs_none_specs(self): none_specs = None expected = self.ssc_map['all'] result = ssc_cmode.get_volumes_for_specs(self.ssc_map, none_specs) self.assertEqual(expected, result) def test_get_volumes_for_specs_empty_dict(self): empty_dict = {} expected = self.ssc_map['all'] result = ssc_cmode.get_volumes_for_specs( self.ssc_map, empty_dict) self.assertEqual(expected, result) def test_get_volumes_for_specs_not_a_dict(self): not_a_dict = False expected = self.ssc_map['all'] result = ssc_cmode.get_volumes_for_specs( self.ssc_map, not_a_dict) self.assertEqual(expected, result) def test_query_cl_vols_for_ssc(self): na_server = netapp_api.NaServer('127.0.0.1') body = etree.XML(""" iscsi Openstack aggr0 /iscsi rw 214748364 224748364 enabled file true false online false false true nfsvol Openstack aggr0 /nfs rw 14748364 24748364 enabled volume true false online false false true nfsvol2 Openstack aggr0 /nfs2 rw 14748364 24748364 enabled volume true false online true true true nfsvol3 Openstack aggr0 /nfs3 rw enabled volume true false online false false true 4""") self.mock_object(ssc_cmode.netapp_api, 'invoke_api', mock.Mock( return_value=[netapp_api.NaElement(body)])) vols = ssc_cmode.query_cluster_vols_for_ssc(na_server, 'Openstack') self.assertEqual(2, len(vols)) for vol in vols: if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol': pass else: raise exception.InvalidVolume('Invalid volume returned.') def test_query_aggr_options(self): na_server = netapp_api.NaServer('127.0.0.1') body = etree.XML(""" ha_policy cfo raidtype raid_dp """) self.mock_object(ssc_cmode.netapp_api, 'invoke_api', mock.Mock( return_value=[netapp_api.NaElement(body)])) aggr_attribs = ssc_cmode.query_aggr_options(na_server, 'aggr0') if aggr_attribs: self.assertEqual('cfo', aggr_attribs['ha_policy']) self.assertEqual('raid_dp', aggr_attribs['raid_type']) else: raise exception.InvalidParameterValue("Incorrect aggr options") def test_query_aggr_storage_disk(self): na_server = netapp_api.NaServer('127.0.0.1') body = etree.XML(""" SATA """) self.mock_object(ssc_cmode.netapp_api, 'invoke_api', mock.Mock(return_value=[netapp_api.NaElement(body)])) eff_disk_type = ssc_cmode.query_aggr_storage_disk(na_server, 'aggr0') self.assertEqual('SATA', eff_disk_type) cinder-8.0.0/cinder/tests/unit/test_db_api.py0000664000567000056710000032522312701406250022344 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for cinder.db.api.""" import datetime import enum import mock from oslo_utils import uuidutils import six from cinder.api import common from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder import objects from cinder import quota from cinder import test from cinder.tests.unit import fake_constants THREE = 3 THREE_HUNDREDS = 300 ONE_HUNDREDS = 100 def _quota_reserve(context, project_id): """Create sample Quota, QuotaUsage and Reservation objects. There is no method db.quota_usage_create(), so we have to use db.quota_reserve() for creating QuotaUsage objects. Returns reservations uuids. """ def get_sync(resource, usage): def sync(elevated, project_id, session): return {resource: usage} return sync quotas = {} resources = {} deltas = {} for i, resource in enumerate(('volumes', 'gigabytes')): quota_obj = db.quota_create(context, project_id, resource, i + 1) quotas[resource] = quota_obj.hard_limit resources[resource] = quota.ReservableResource(resource, '_sync_%s' % resource) deltas[resource] = i + 1 return db.quota_reserve( context, resources, quotas, deltas, datetime.datetime.utcnow(), datetime.datetime.utcnow(), datetime.timedelta(days=1), project_id ) class ModelsObjectComparatorMixin(object): def _dict_from_object(self, obj, ignored_keys): if ignored_keys is None: ignored_keys = [] if isinstance(obj, dict): items = obj.items() else: items = obj.iteritems() return {k: v for k, v in items if k not in ignored_keys} def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): obj1 = self._dict_from_object(obj1, ignored_keys) obj2 = self._dict_from_object(obj2, ignored_keys) self.assertEqual( len(obj1), len(obj2), "Keys mismatch: %s" % six.text_type( set(obj1.keys()) ^ set(obj2.keys()))) for key, value in obj1.items(): self.assertEqual(value, obj2[key]) def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) sort_key = lambda d: [d[k] for k in sorted(d)] conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): self.assertEqual(len(primitives1), len(primitives2)) for primitive in primitives1: self.assertIn(primitive, primitives2) for primitive in primitives2: self.assertIn(primitive, primitives1) class BaseTest(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(BaseTest, self).setUp() self.ctxt = context.get_admin_context() class DBAPIServiceTestCase(BaseTest): """Unit tests for cinder.db.api.service_*.""" def _get_base_values(self): return { 'host': 'fake_host', 'binary': 'fake_binary', 'topic': 'fake_topic', 'report_count': 3, 'disabled': False } def _create_service(self, values): v = self._get_base_values() v.update(values) return db.service_create(self.ctxt, v) def test_service_create(self): service = self._create_service({}) self.assertFalse(service['id'] is None) for key, value in self._get_base_values().items(): self.assertEqual(value, service[key]) def test_service_destroy(self): service1 = self._create_service({}) service2 = self._create_service({'host': 'fake_host2'}) db.service_destroy(self.ctxt, service1['id']) self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, service1['id']) self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), service2) def test_service_update(self): service = self._create_service({}) new_values = { 'host': 'fake_host1', 'binary': 'fake_binary1', 'topic': 'fake_topic1', 'report_count': 4, 'disabled': True } db.service_update(self.ctxt, service['id'], new_values) updated_service = db.service_get(self.ctxt, service['id']) for key, value in new_values.items(): self.assertEqual(value, updated_service[key]) def test_service_update_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_update, self.ctxt, 100500, {}) def test_service_get(self): service1 = self._create_service({}) real_service1 = db.service_get(self.ctxt, service1['id']) self._assertEqualObjects(service1, real_service1) def test_service_get_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, 100500) def test_service_get_by_host_and_topic(self): service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) real_service1 = db.service_get_by_host_and_topic(self.ctxt, host='host1', topic='topic1') self._assertEqualObjects(service1, real_service1) def test_service_get_all(self): values = [ {'host': 'host1', 'binary': 'b1'}, {'host': 'host1@ceph', 'binary': 'b2'}, {'host': 'host2', 'binary': 'b2'}, {'disabled': True} ] services = [self._create_service(vals) for vals in values] disabled_services = [services[-1]] non_disabled_services = services[:-1] expected = services[:2] expected_bin = services[1:3] compares = [ (services, db.service_get_all(self.ctxt, {})), (services, db.service_get_all(self.ctxt)), (expected, db.service_get_all(self.ctxt, {'host': 'host1'})), (expected_bin, db.service_get_all(self.ctxt, {'binary': 'b2'})), (disabled_services, db.service_get_all(self.ctxt, {'disabled': True})), (non_disabled_services, db.service_get_all(self.ctxt, {'disabled': False})), ] for comp in compares: self._assertEqualListsOfObjects(*comp) def test_service_get_all_by_topic(self): values = [ {'host': 'host1', 'topic': 't1'}, {'host': 'host2', 'topic': 't1'}, {'host': 'host4', 'disabled': True, 'topic': 't1'}, {'host': 'host3', 'topic': 't2'} ] services = [self._create_service(vals) for vals in values] expected = services[:3] real = db.service_get_all_by_topic(self.ctxt, 't1') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_binary(self): values = [ {'host': 'host1', 'binary': 'b1'}, {'host': 'host2', 'binary': 'b1'}, {'host': 'host4', 'disabled': True, 'binary': 'b1'}, {'host': 'host3', 'binary': 'b2'} ] services = [self._create_service(vals) for vals in values] expected = services[:3] real = db.service_get_all_by_binary(self.ctxt, 'b1') self._assertEqualListsOfObjects(expected, real) def test_service_get_by_args(self): values = [ {'host': 'host1', 'binary': 'a'}, {'host': 'host2', 'binary': 'b'} ] services = [self._create_service(vals) for vals in values] service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') self._assertEqualObjects(services[0], service1) service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') self._assertEqualObjects(services[1], service2) def test_service_get_by_args_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get_by_args, self.ctxt, 'non-exists-host', 'a') @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_service_get_by_args_with_case_insensitive(self, model_query): class case_insensitive_filter(object): def __init__(self, records): self.records = records def filter_by(self, **kwargs): ret = mock.Mock() ret.all = mock.Mock() results = [] for record in self.records: for key, value in kwargs.items(): if record[key].lower() != value.lower(): break else: results.append(record) ret.filter_by = case_insensitive_filter(results).filter_by ret.all.return_value = results return ret values = [ {'host': 'host', 'binary': 'a'}, {'host': 'HOST', 'binary': 'a'} ] services = [self._create_service(vals) for vals in values] query = mock.Mock() query.filter_by = case_insensitive_filter(services).filter_by model_query.return_value = query service1 = db.service_get_by_args(self.ctxt, 'host', 'a') self._assertEqualObjects(services[0], service1) service2 = db.service_get_by_args(self.ctxt, 'HOST', 'a') self._assertEqualObjects(services[1], service2) self.assertRaises(exception.ServiceNotFound, db.service_get_by_args, self.ctxt, 'Host', 'a') class DBAPIVolumeTestCase(BaseTest): """Unit tests for cinder.db.api.volume_*.""" def test_volume_create(self): volume = db.volume_create(self.ctxt, {'host': 'host1'}) self.assertTrue(uuidutils.is_uuid_like(volume['id'])) self.assertEqual('host1', volume.host) def test_volume_attached_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt, 42, 'invalid-uuid', None, '/tmp') def test_volume_attached_to_instance(self): volume = db.volume_create(self.ctxt, {'host': 'host1'}) instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': 'attaching', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], instance_uuid, None, '/tmp') volume = db.volume_get(self.ctxt, volume['id']) attachment = db.volume_attachment_get(self.ctxt, attachment['id']) self.assertEqual('in-use', volume['status']) self.assertEqual('/tmp', attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) def test_volume_attached_to_host(self): volume = db.volume_create(self.ctxt, {'host': 'host1'}) host_name = 'fake_host' values = {'volume_id': volume['id'], 'attached_host': host_name, 'attach_status': 'attaching', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, host_name, '/tmp') volume = db.volume_get(self.ctxt, volume['id']) attachment = db.volume_attachment_get(self.ctxt, attachment['id']) self.assertEqual('in-use', volume['status']) self.assertEqual('/tmp', attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(attachment['attached_host'], host_name) def test_volume_data_get_for_host(self): for i in range(THREE): for j in range(THREE): db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': ONE_HUNDREDS}) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_host( self.ctxt, 'h%d' % i)) def test_volume_data_get_for_host_for_multi_backend(self): for i in range(THREE): for j in range(THREE): db.volume_create(self.ctxt, {'host': 'h%d@lvmdriver-1#lvmdriver-1' % i, 'size': ONE_HUNDREDS}) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_host( self.ctxt, 'h%d@lvmdriver-1' % i)) def test_volume_data_get_for_project(self): for i in range(THREE): for j in range(THREE): db.volume_create(self.ctxt, {'project_id': 'p%d' % i, 'size': ONE_HUNDREDS, 'host': 'h-%d-%d' % (i, j), }) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_project( self.ctxt, 'p%d' % i)) def test_volume_detached_from_instance(self): volume = db.volume_create(self.ctxt, {}) instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': 'attaching', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], instance_uuid, None, '/tmp') db.volume_detached(self.ctxt, volume['id'], attachment['id']) volume = db.volume_get(self.ctxt, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, attachment['id']) self.assertEqual('available', volume['status']) def test_volume_detached_from_host(self): volume = db.volume_create(self.ctxt, {}) host_name = 'fake_host' values = {'volume_id': volume['id'], 'attach_host': host_name, 'attach_status': 'attaching', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, host_name, '/tmp') db.volume_detached(self.ctxt, volume['id'], attachment['id']) volume = db.volume_get(self.ctxt, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, attachment['id']) self.assertEqual('available', volume['status']) def test_volume_get(self): volume = db.volume_create(self.ctxt, {}) self._assertEqualObjects(volume, db.volume_get(self.ctxt, volume['id'])) def test_volume_destroy(self): volume = db.volume_create(self.ctxt, {}) db.volume_destroy(self.ctxt, volume['id']) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.ctxt, volume['id']) def test_volume_get_all(self): volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i}) for i in range(3)] self._assertEqualListsOfObjects(volumes, db.volume_get_all( self.ctxt, None, None, ['host'], None)) def test_volume_get_all_marker_passed(self): volumes = [ db.volume_create(self.ctxt, {'id': 1}), db.volume_create(self.ctxt, {'id': 2}), db.volume_create(self.ctxt, {'id': 3}), db.volume_create(self.ctxt, {'id': 4}), ] self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all( self.ctxt, 2, 2, ['id'], ['asc'])) def test_volume_get_all_by_host(self): volumes = [] for i in range(3): volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_host( self.ctxt, 'h%d' % i)) def test_volume_get_all_by_host_with_pools(self): volumes = [] vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'}) for j in range(3)] vol_on_host_w_pool = [db.volume_create( self.ctxt, {'host': 'foo#pool0'})] volumes.append((vol_on_host_wo_pool + vol_on_host_w_pool)) # insert an additional record that doesn't belongs to the same # host as 'foo' and test if it is included in the result db.volume_create(self.ctxt, {'host': 'foobar'}) self._assertEqualListsOfObjects(volumes[0], db.volume_get_all_by_host( self.ctxt, 'foo')) def test_volume_get_all_by_host_with_filters(self): v1 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v1', 'status': 'available'}) v2 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v2', 'status': 'available'}) v3 = db.volume_create(self.ctxt, {'host': 'h2', 'display_name': 'v1', 'status': 'available'}) self._assertEqualListsOfObjects( [v1], db.volume_get_all_by_host(self.ctxt, 'h1', filters={'display_name': 'v1'})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_host( self.ctxt, 'h1', filters={'display_name': ['v1', 'v2', 'foo']})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_host(self.ctxt, 'h1', filters={'status': 'available'})) self._assertEqualListsOfObjects( [v3], db.volume_get_all_by_host(self.ctxt, 'h2', filters={'display_name': 'v1'})) # No match vols = db.volume_get_all_by_host(self.ctxt, 'h1', filters={'status': 'foo'}) self.assertEqual([], vols) # Bogus filter, should return empty list vols = db.volume_get_all_by_host(self.ctxt, 'h1', filters={'foo': 'bar'}) self.assertEqual([], vols) def test_volume_get_all_by_group(self): volumes = [] for i in range(3): volumes.append([db.volume_create(self.ctxt, { 'consistencygroup_id': 'g%d' % i}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_group( self.ctxt, 'g%d' % i)) def test_volume_get_all_by_group_with_filters(self): v1 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', 'display_name': 'v1'}) v2 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', 'display_name': 'v2'}) v3 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g2', 'display_name': 'v1'}) self._assertEqualListsOfObjects( [v1], db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': 'v1'})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': ['v1', 'v2']})) self._assertEqualListsOfObjects( [v3], db.volume_get_all_by_group(self.ctxt, 'g2', filters={'display_name': 'v1'})) # No match vols = db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': 'foo'}) self.assertEqual([], vols) # Bogus filter, should return empty list vols = db.volume_get_all_by_group(self.ctxt, 'g1', filters={'foo': 'bar'}) self.assertEqual([], vols) def test_volume_get_all_by_project(self): volumes = [] for i in range(3): volumes.append([db.volume_create(self.ctxt, { 'project_id': 'p%d' % i}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_project( self.ctxt, 'p%d' % i, None, None, ['host'], None)) def test_volume_get_by_name(self): db.volume_create(self.ctxt, {'display_name': 'vol1'}) db.volume_create(self.ctxt, {'display_name': 'vol2'}) db.volume_create(self.ctxt, {'display_name': 'vol3'}) # no name filter volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc']) self.assertEqual(3, len(volumes)) # filter on name volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'display_name': 'vol2'}) self.assertEqual(1, len(volumes)) self.assertEqual('vol2', volumes[0]['display_name']) # filter no match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'display_name': 'vol4'}) self.assertEqual(0, len(volumes)) def test_volume_list_by_status(self): db.volume_create(self.ctxt, {'display_name': 'vol1', 'status': 'available'}) db.volume_create(self.ctxt, {'display_name': 'vol2', 'status': 'available'}) db.volume_create(self.ctxt, {'display_name': 'vol3', 'status': 'in-use'}) # no status filter volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc']) self.assertEqual(3, len(volumes)) # single match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'in-use'}) self.assertEqual(1, len(volumes)) self.assertEqual('in-use', volumes[0]['status']) # multiple match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'available'}) self.assertEqual(2, len(volumes)) for volume in volumes: self.assertEqual('available', volume['status']) # multiple filters volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'available', 'display_name': 'vol1'}) self.assertEqual(1, len(volumes)) self.assertEqual('vol1', volumes[0]['display_name']) self.assertEqual('available', volumes[0]['status']) # no match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'in-use', 'display_name': 'vol1'}) self.assertEqual(0, len(volumes)) def _assertEqualsVolumeOrderResult(self, correct_order, limit=None, sort_keys=None, sort_dirs=None, filters=None, project_id=None, marker=None, match_keys=['id', 'display_name', 'volume_metadata', 'created_at']): """Verifies that volumes are returned in the correct order.""" if project_id: result = db.volume_get_all_by_project(self.ctxt, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters) else: result = db.volume_get_all(self.ctxt, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters) self.assertEqual(len(correct_order), len(result)) for vol1, vol2 in zip(result, correct_order): for key in match_keys: val1 = vol1.get(key) val2 = vol2.get(key) # metadata is a dict, compare the 'key' and 'value' of each if key == 'volume_metadata': self.assertEqual(len(val1), len(val2)) val1_dict = {x.key: x.value for x in val1} val2_dict = {x.key: x.value for x in val2} self.assertDictMatch(val1_dict, val2_dict) else: self.assertEqual(val1, val2) return result def test_volume_get_by_filter(self): """Verifies that all filtering is done at the DB layer.""" vols = [] vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i, 'size': 1}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i, 'size': 2}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g2', 'display_name': 'name_%d' % i, 'size': 1}) for i in range(2)]) # By project, filter on size and name filters = {'size': '1'} correct_order = [vols[1], vols[0]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') filters = {'size': '1', 'display_name': 'name_1'} correct_order = [vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') # Remove project scope filters = {'size': '1'} correct_order = [vols[7], vols[6], vols[1], vols[0]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) filters = {'size': '1', 'display_name': 'name_1'} correct_order = [vols[7], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Remove size constraint filters = {'display_name': 'name_1'} correct_order = [vols[5], vols[3], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') correct_order = [vols[7], vols[5], vols[3], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Verify bogus values return nothing filters = {'display_name': 'name_1', 'bogus_value': 'foo'} self._assertEqualsVolumeOrderResult([], filters=filters, project_id='g1') self._assertEqualsVolumeOrderResult([], project_id='bogus') self._assertEqualsVolumeOrderResult([], filters=filters) self._assertEqualsVolumeOrderResult([], filters={'metadata': 'not valid'}) self._assertEqualsVolumeOrderResult([], filters={'metadata': ['not', 'valid']}) # Verify that relationship property keys return nothing, these # exist on the Volumes model but are not columns filters = {'volume_type': 'bogus_type'} self._assertEqualsVolumeOrderResult([], filters=filters) def test_volume_get_all_filters_limit(self): vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'}) vol2 = db.volume_create(self.ctxt, {'display_name': 'test2'}) vol3 = db.volume_create(self.ctxt, {'display_name': 'test2', 'metadata': {'key1': 'val1'}}) vol4 = db.volume_create(self.ctxt, {'display_name': 'test3', 'metadata': {'key1': 'val1', 'key2': 'val2'}}) vol5 = db.volume_create(self.ctxt, {'display_name': 'test3', 'metadata': {'key2': 'val2', 'key3': 'val3'}, 'host': 'host5'}) db.volume_admin_metadata_update(self.ctxt, vol5.id, {"readonly": "True"}, False) vols = [vol5, vol4, vol3, vol2, vol1] # Ensure we have 5 total instances self._assertEqualsVolumeOrderResult(vols) # No filters, test limit self._assertEqualsVolumeOrderResult(vols[:1], limit=1) self._assertEqualsVolumeOrderResult(vols[:4], limit=4) # Just the test2 volumes filters = {'display_name': 'test2'} self._assertEqualsVolumeOrderResult([vol3, vol2], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=100, filters=filters) # metadata filters filters = {'metadata': {'key1': 'val1'}} self._assertEqualsVolumeOrderResult([vol4, vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol4], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol4, vol3], limit=10, filters=filters) filters = {'metadata': {'readonly': 'True'}} self._assertEqualsVolumeOrderResult([vol5], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'val2'}} self._assertEqualsVolumeOrderResult([vol4], filters=filters) self._assertEqualsVolumeOrderResult([vol4], limit=1, filters=filters) # No match filters = {'metadata': {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'bogus'}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'val1'}} self._assertEqualsVolumeOrderResult([], filters=filters) # Combination filters = {'display_name': 'test2', 'metadata': {'key1': 'val1'}} self._assertEqualsVolumeOrderResult([vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=100, filters=filters) filters = {'display_name': 'test3', 'metadata': {'key2': 'val2', 'key3': 'val3'}, 'host': 'host5'} self._assertEqualsVolumeOrderResult([vol5], filters=filters) self._assertEqualsVolumeOrderResult([vol5], limit=1, filters=filters) def test_volume_get_no_migration_targets(self): """Verifies the unique 'no_migration_targets'=True filter. This filter returns volumes with either a NULL 'migration_status' or a non-NULL value that does not start with 'target:'. """ vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'}) vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'migration_status': 'bogus'}) vol3 = db.volume_create(self.ctxt, {'display_name': 'test3', 'migration_status': 'btarget:'}) vol4 = db.volume_create(self.ctxt, {'display_name': 'test4', 'migration_status': 'target:'}) # Ensure we have 4 total instances, default sort of created_at (desc) self._assertEqualsVolumeOrderResult([vol4, vol3, vol2, vol1]) # Apply the unique filter filters = {'no_migration_targets': True} self._assertEqualsVolumeOrderResult([vol3, vol2, vol1], filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, filters=filters) filters = {'no_migration_targets': True, 'display_name': 'test4'} self._assertEqualsVolumeOrderResult([], filters=filters) def test_volume_get_all_by_filters_sort_keys(self): # Volumes that will reply to the query test_h1_avail = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'available', 'host': 'h1'}) test_h1_error = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h1'}) test_h1_error2 = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h1'}) test_h2_avail = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'available', 'host': 'h2'}) test_h2_error = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h2'}) test_h2_error2 = db.volume_create(self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h2'}) # Other volumes in the DB, will not match name filter other_error = db.volume_create(self.ctxt, {'display_name': 'other', 'status': 'error', 'host': 'a'}) other_active = db.volume_create(self.ctxt, {'display_name': 'other', 'status': 'available', 'host': 'a'}) filters = {'display_name': 'test'} # Verify different sort key/direction combinations sort_keys = ['host', 'status', 'created_at'] sort_dirs = ['asc', 'asc', 'asc'] correct_order = [test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['asc', 'desc', 'asc'] correct_order = [test_h1_error, test_h1_error2, test_h1_avail, test_h2_error, test_h2_error2, test_h2_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['desc', 'desc', 'asc'] correct_order = [test_h2_error, test_h2_error2, test_h2_avail, test_h1_error, test_h1_error2, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # created_at is added by default if not supplied, descending order sort_keys = ['host', 'status'] sort_dirs = ['desc', 'desc'] correct_order = [test_h2_error2, test_h2_error, test_h2_avail, test_h1_error2, test_h1_error, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['asc', 'asc'] correct_order = [test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # Remove name filter correct_order = [other_active, other_error, test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, sort_keys=sort_keys, sort_dirs=sort_dirs) # No sort data, default sort of created_at, id (desc) correct_order = [other_active, other_error, test_h2_error2, test_h2_error, test_h2_avail, test_h1_error2, test_h1_error, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order) def test_volume_get_all_by_filters_sort_keys_paginate(self): """Verifies sort order with pagination.""" # Volumes that will reply to the query test1_avail = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'available'}) test1_error = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'error'}) test1_error2 = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'error'}) test2_avail = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'available'}) test2_error = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'error'}) test2_error2 = db.volume_create(self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'error'}) # Other volumes in the DB, will not match name filter db.volume_create(self.ctxt, {'display_name': 'other'}) db.volume_create(self.ctxt, {'display_name': 'other'}) filters = {'display_name': 'test'} # Common sort information for every query sort_keys = ['size', 'status', 'created_at'] sort_dirs = ['asc', 'desc', 'asc'] # Overall correct volume order based on the sort keys correct_order = [test1_error, test1_error2, test1_avail, test2_error, test2_error2, test2_avail] # Limits of 1, 2, and 3, verify that the volumes returned are in the # correct sorted order, update the marker to get the next correct page for limit in range(1, 4): marker = None # Include the maximum number of volumes (ie, 6) to ensure that # the last query (with marker pointing to the last volume) # returns 0 servers for i in range(0, 7, limit): if i == len(correct_order): correct = [] else: correct = correct_order[i:i + limit] vols = self._assertEqualsVolumeOrderResult( correct, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker) if correct: marker = vols[-1]['id'] self.assertEqual(correct[-1]['id'], marker) def test_volume_get_all_invalid_sort_key(self): for keys in (['foo'], ['display_name', 'foo']): self.assertRaises(exception.InvalidInput, db.volume_get_all, self.ctxt, None, None, sort_keys=keys) def test_volume_update(self): volume = db.volume_create(self.ctxt, {'host': 'h1'}) ref_a = db.volume_update(self.ctxt, volume['id'], {'host': 'h2', 'metadata': {'m1': 'v1'}}) volume = db.volume_get(self.ctxt, volume['id']) self.assertEqual('h2', volume['host']) expected = dict(ref_a) expected['volume_metadata'] = list(map(dict, expected['volume_metadata'])) result = dict(volume) result['volume_metadata'] = list(map(dict, result['volume_metadata'])) self.assertEqual(expected, result) def test_volume_update_nonexistent(self): self.assertRaises(exception.VolumeNotFound, db.volume_update, self.ctxt, 42, {}) def test_volume_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) def test_volume_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False) self.assertEqual(should_be, db_meta) @mock.patch.object(db.sqlalchemy.api, '_volume_glance_metadata_key_to_id', return_value = '1') def test_volume_glance_metadata_key_to_id_called(self, metadata_key_to_id_mock): image_metadata = {'abc': '123'} # create volume with metadata. db.volume_create(self.ctxt, {'id': 1, 'metadata': image_metadata}) # delete metadata associated with the volume. db.volume_metadata_delete(self.ctxt, 1, 'abc', meta_type=common.METADATA_TYPES.image) # assert _volume_glance_metadata_key_to_id() was called exactly once metadata_key_to_id_mock.assert_called_once_with(self.ctxt, 1, 'abc') def test_case_sensitive_glance_metadata_delete(self): user_metadata = {'a': '1', 'c': '2'} image_metadata = {'abc': '123', 'ABC': '123'} # create volume with metadata. db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata}) # delete user metadata associated with the volume. db.volume_metadata_delete(self.ctxt, 1, 'c', meta_type=common.METADATA_TYPES.user) user_metadata.pop('c') self.assertEqual(user_metadata, db.volume_metadata_get(self.ctxt, 1)) # create image metadata associated with the volume. db.volume_metadata_update( self.ctxt, 1, image_metadata, False, meta_type=common.METADATA_TYPES.image) # delete image metadata associated with the volume. db.volume_metadata_delete( self.ctxt, 1, 'abc', meta_type=common.METADATA_TYPES.image) image_metadata.pop('abc') # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.ctxt, 1) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(image_metadata, result) def test_volume_metadata_update_with_metatype(self): user_metadata1 = {'a': '1', 'c': '2'} user_metadata2 = {'a': '3', 'd': '5'} expected1 = {'a': '3', 'c': '2', 'd': '5'} image_metadata1 = {'e': '1', 'f': '2'} image_metadata2 = {'e': '3', 'g': '5'} expected2 = {'e': '3', 'f': '2', 'g': '5'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata1}) # update user metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, user_metadata2, False, meta_type=common.METADATA_TYPES.user) self.assertEqual(expected1, db_meta) # create image metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, image_metadata1, False, meta_type=common.METADATA_TYPES.image) self.assertEqual(image_metadata1, db_meta) # update image metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, image_metadata2, False, meta_type=common.METADATA_TYPES.image) self.assertEqual(expected2, db_meta) # update volume with invalid metadata type. self.assertRaises(exception.InvalidMetadataType, db.volume_metadata_update, self.ctxt, 1, image_metadata1, False, FAKE_METADATA_TYPE.fake_type) def test_volume_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True) self.assertEqual(should_be, db_meta) def test_volume_metadata_delete(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) db.volume_metadata_delete(self.ctxt, 1, 'c') metadata.pop('c') self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) def test_volume_metadata_delete_with_metatype(self): user_metadata = {'a': '1', 'c': '2'} image_metadata = {'e': '1', 'f': '2'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') # test that user metadata deleted with meta_type specified. db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata}) db.volume_metadata_delete(self.ctxt, 1, 'c', meta_type=common.METADATA_TYPES.user) user_metadata.pop('c') self.assertEqual(user_metadata, db.volume_metadata_get(self.ctxt, 1)) # update the image metadata associated with the volume. db.volume_metadata_update( self.ctxt, 1, image_metadata, False, meta_type=common.METADATA_TYPES.image) # test that image metadata deleted with meta_type specified. db.volume_metadata_delete(self.ctxt, 1, 'e', meta_type=common.METADATA_TYPES.image) image_metadata.pop('e') # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.ctxt, 1) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(image_metadata, result) # delete volume with invalid metadata type. self.assertRaises(exception.InvalidMetadataType, db.volume_metadata_delete, self.ctxt, 1, 'f', FAKE_METADATA_TYPE.fake_type) def test_volume_glance_metadata_create(self): volume = db.volume_create(self.ctxt, {'host': 'h1'}) db.volume_glance_metadata_create(self.ctxt, volume['id'], 'image_name', u'\xe4\xbd\xa0\xe5\xa5\xbd') glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) for meta_entry in glance_meta: if meta_entry.key == 'image_name': image_name = meta_entry.value self.assertEqual(u'\xe4\xbd\xa0\xe5\xa5\xbd', image_name) def test_volume_glance_metadata_list_get(self): """Test volume_glance_metadata_list_get in DB API.""" db.volume_create(self.ctxt, {'id': 'fake1', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key1', 'value1') db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key2', 'value2') db.volume_create(self.ctxt, {'id': 'fake2', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key3', 'value3') db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key4', 'value4') expect_result = [{'volume_id': 'fake1', 'key': 'key1', 'value': 'value1'}, {'volume_id': 'fake1', 'key': 'key2', 'value': 'value2'}, {'volume_id': 'fake2', 'key': 'key3', 'value': 'value3'}, {'volume_id': 'fake2', 'key': 'key4', 'value': 'value4'}] self._assertEqualListsOfObjects(expect_result, db.volume_glance_metadata_list_get( self.ctxt, ['fake1', 'fake2']), ignored_keys=['id', 'snapshot_id', 'created_at', 'deleted', 'deleted_at', 'updated_at']) class DBAPISnapshotTestCase(BaseTest): """Tests for cinder.db.api.snapshot_*.""" def test_snapshot_data_get_for_project(self): actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') self.assertEqual((0, 0), actual) db.volume_create(self.ctxt, {'id': 1, 'project_id': 'project1', 'size': 42}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'project_id': 'project1', 'volume_size': 42}) actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') self.assertEqual((1, 42), actual) def test_snapshot_get_all_by_filter(self): db.volume_create(self.ctxt, {'id': 1}) db.volume_create(self.ctxt, {'id': 2}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'display_name': 'one', 'status': 'available'}) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 1, 'display_name': 'two', 'status': 'creating'}) snapshot3 = db.snapshot_create(self.ctxt, {'id': 3, 'volume_id': 2, 'display_name': 'three', 'status': 'available'}) # no filter filters = {} snapshots = db.snapshot_get_all(self.ctxt, filters=filters) self.assertEqual(3, len(snapshots)) # single match filters = {'display_name': 'two'} self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'volume_id': 2} self._assertEqualListsOfObjects([snapshot3], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) # filter no match filters = {'volume_id': 5} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'status': 'error'} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) # multiple match filters = {'volume_id': 1} self._assertEqualListsOfObjects([snapshot1, snapshot2], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'status': 'available'} self._assertEqualListsOfObjects([snapshot1, snapshot3], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'volume_id': 1, 'status': 'available'} self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'fake_key': 'fake'} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) def test_snapshot_get_by_host(self): db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'}) db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, 'status': 'error'}) self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_by_host( self.ctxt, 'host1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_by_host( self.ctxt, 'host2'), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_by_host( self.ctxt, 'host2', {'status': 'available'}), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_by_host( self.ctxt, 'host2', {'status': 'error'}), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_by_host( self.ctxt, 'host2', {'fake_key': 'fake'}), ignored_keys='volume') # If host is None or empty string, empty list should be returned. self.assertEqual([], db.snapshot_get_by_host(self.ctxt, None)) self.assertEqual([], db.snapshot_get_by_host(self.ctxt, '')) def test_snapshot_get_by_host_with_pools(self): db.volume_create(self.ctxt, {'id': 1, 'host': 'host1#pool1'}) db.volume_create(self.ctxt, {'id': 2, 'host': 'host1#pool2'}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2}) self._assertEqualListsOfObjects([snapshot1, snapshot2], db.snapshot_get_by_host( self.ctxt, 'host1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_by_host( self.ctxt, 'host1#pool1'), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_by_host( self.ctxt, 'host1#pool0'), ignored_keys='volume') def test_snapshot_get_all_by_project(self): db.volume_create(self.ctxt, {'id': 1}) db.volume_create(self.ctxt, {'id': 2}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'project_id': 'project1'}) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, 'status': 'error', 'project_id': 'project2'}) self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all_by_project( self.ctxt, 'project1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all_by_project( self.ctxt, 'project2'), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_all_by_project( self.ctxt, 'project2', {'status': 'available'}), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all_by_project( self.ctxt, 'project2', {'status': 'error'}), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_all_by_project( self.ctxt, 'project2', {'fake_key': 'fake'}), ignored_keys='volume') def test_snapshot_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata}) self.assertEqual(metadata, db.snapshot_metadata_get(self.ctxt, 1)) def test_snapshot_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} db.volume_create(self.ctxt, {'id': 1}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata1}) db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, False) self.assertEqual(should_be, db_meta) def test_snapshot_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = metadata2 db.volume_create(self.ctxt, {'id': 1}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata1}) db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, True) self.assertEqual(should_be, db_meta) def test_snapshot_metadata_delete(self): metadata = {'a': '1', 'c': '2'} should_be = {'a': '1'} db.volume_create(self.ctxt, {'id': 1}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata}) db.snapshot_metadata_delete(self.ctxt, 1, 'c') self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1)) class DBAPICgsnapshotTestCase(BaseTest): """Tests for cinder.db.api.cgsnapshot_*.""" def test_cgsnapshot_get_all_by_filter(self): cgsnapshot1 = db.cgsnapshot_create(self.ctxt, {'id': 1, 'consistencygroup_id': 'g1'}) cgsnapshot2 = db.cgsnapshot_create(self.ctxt, {'id': 2, 'consistencygroup_id': 'g1'}) cgsnapshot3 = db.cgsnapshot_create(self.ctxt, {'id': 3, 'consistencygroup_id': 'g2'}) tests = [ ({'consistencygroup_id': 'g1'}, [cgsnapshot1, cgsnapshot2]), ({'id': 3}, [cgsnapshot3]), ({'fake_key': 'fake'}, []) ] # no filter filters = None cgsnapshots = db.cgsnapshot_get_all(self.ctxt, filters=filters) self.assertEqual(3, len(cgsnapshots)) for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all( self.ctxt, filters)) def test_cgsnapshot_get_all_by_group(self): cgsnapshot1 = db.cgsnapshot_create(self.ctxt, {'id': 1, 'consistencygroup_id': 'g1'}) cgsnapshot2 = db.cgsnapshot_create(self.ctxt, {'id': 2, 'consistencygroup_id': 'g1'}) db.cgsnapshot_create(self.ctxt, {'id': 3, 'consistencygroup_id': 'g2'}) tests = [ ({'consistencygroup_id': 'g1'}, [cgsnapshot1, cgsnapshot2]), ({'id': 3}, []), ({'fake_key': 'fake'}, []), ({'consistencygroup_id': 'g2'}, []), (None, [cgsnapshot1, cgsnapshot2]), ] for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all_by_group( self.ctxt, 'g1', filters)) db.cgsnapshot_destroy(self.ctxt, '1') db.cgsnapshot_destroy(self.ctxt, '2') db.cgsnapshot_destroy(self.ctxt, '3') def test_cgsnapshot_get_all_by_project(self): cgsnapshot1 = db.cgsnapshot_create(self.ctxt, {'id': 1, 'consistencygroup_id': 'g1', 'project_id': 1}) cgsnapshot2 = db.cgsnapshot_create(self.ctxt, {'id': 2, 'consistencygroup_id': 'g1', 'project_id': 1}) project_id = 1 tests = [ ({'id': 1}, [cgsnapshot1]), ({'consistencygroup_id': 'g1'}, [cgsnapshot1, cgsnapshot2]), ({'fake_key': 'fake'}, []) ] for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all_by_project( self.ctxt, project_id, filters)) class DBAPIVolumeTypeTestCase(BaseTest): """Tests for the db.api.volume_type_* methods.""" def setUp(self): self.ctxt = context.get_admin_context() super(DBAPIVolumeTypeTestCase, self).setUp() def test_volume_type_create_exists(self): vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) self.assertRaises(exception.VolumeTypeExists, db.volume_type_create, self.ctxt, {'name': 'n1'}) self.assertRaises(exception.VolumeTypeExists, db.volume_type_create, self.ctxt, {'name': 'n2', 'id': vt['id']}) def test_volume_type_access_remove(self): vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(1, len(vtas)) db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(0, len(vtas)) def test_volume_type_access_remove_high_id(self): vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) vta = db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(1, len(vtas)) # NOTE(dulek): Bug 1496747 uncovered problems when deleting accesses # with id column higher than 128. This is regression test for that # case. session = sqlalchemy_api.get_session() vta.id = 150 vta.save(session=session) session.close() db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(0, len(vtas)) def test_get_volume_type_extra_specs(self): # Ensure that volume type extra specs can be accessed after # the DB session is closed. vt_extra_specs = {'mock_key': 'mock_value'} vt = db.volume_type_create(self.ctxt, {'name': 'n1', 'extra_specs': vt_extra_specs}) volume_ref = db.volume_create(self.ctxt, {'volume_type_id': vt.id}) session = sqlalchemy_api.get_session() volume = sqlalchemy_api._volume_get(self.ctxt, volume_ref.id, session=session) session.close() actual_specs = {} for spec in volume.volume_type.extra_specs: actual_specs[spec.key] = spec.value self.assertEqual(vt_extra_specs, actual_specs) class DBAPIEncryptionTestCase(BaseTest): """Tests for the db.api.volume_(type_)?encryption_* methods.""" _ignored_keys = [ 'deleted', 'deleted_at', 'created_at', 'updated_at', 'encryption_id', ] def setUp(self): super(DBAPIEncryptionTestCase, self).setUp() self.created = \ [db.volume_type_encryption_create(self.ctxt, values['volume_type_id'], values) for values in self._get_values()] def _get_values(self, one=False, updated=False): base_values = { 'cipher': 'fake_cipher', 'key_size': 256, 'provider': 'fake_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } updated_values = { 'cipher': 'fake_updated_cipher', 'key_size': 512, 'provider': 'fake_updated_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } if one: return base_values if updated: values = updated_values else: values = base_values def compose(val, step): if isinstance(val, str): step = str(step) return val + step return [{k: compose(v, i) for k, v in values.items()} for i in range(1, 4)] def test_volume_type_encryption_create(self): values = self._get_values() for i, encryption in enumerate(self.created): self._assertEqualObjects(values[i], encryption, self._ignored_keys) def test_volume_type_encryption_update(self): update_values = self._get_values(updated=True) self.updated = \ [db.volume_type_encryption_update(self.ctxt, values['volume_type_id'], values) for values in update_values] for i, encryption in enumerate(self.updated): self._assertEqualObjects(update_values[i], encryption, self._ignored_keys) def test_volume_type_encryption_get(self): for encryption in self.created: encryption_get = \ db.volume_type_encryption_get(self.ctxt, encryption['volume_type_id']) self._assertEqualObjects(encryption, encryption_get, self._ignored_keys) def test_volume_type_encryption_update_with_no_create(self): self.assertRaises(exception.VolumeTypeEncryptionNotFound, db.volume_type_encryption_update, self.ctxt, 'fake_no_create_type', {'cipher': 'fake_updated_cipher'}) def test_volume_type_encryption_delete(self): values = { 'cipher': 'fake_cipher', 'key_size': 256, 'provider': 'fake_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } encryption = db.volume_type_encryption_create(self.ctxt, 'fake_type', values) self._assertEqualObjects(values, encryption, self._ignored_keys) db.volume_type_encryption_delete(self.ctxt, encryption['volume_type_id']) encryption_get = \ db.volume_type_encryption_get(self.ctxt, encryption['volume_type_id']) self.assertIsNone(encryption_get) def test_volume_type_encryption_delete_no_create(self): self.assertRaises(exception.VolumeTypeEncryptionNotFound, db.volume_type_encryption_delete, self.ctxt, 'fake_no_create_type') def test_volume_encryption_get(self): # normal volume -- metadata should be None volume = db.volume_create(self.ctxt, {}) values = db.volume_encryption_metadata_get(self.ctxt, volume.id) self.assertEqual({'encryption_key_id': None}, values) # encrypted volume -- metadata should match volume type volume_type = self.created[0] volume = db.volume_create(self.ctxt, {'volume_type_id': volume_type['volume_type_id']}) values = db.volume_encryption_metadata_get(self.ctxt, volume.id) expected = { 'encryption_key_id': volume.encryption_key_id, 'control_location': volume_type['control_location'], 'cipher': volume_type['cipher'], 'key_size': volume_type['key_size'], 'provider': volume_type['provider'], } self.assertEqual(expected, values) class DBAPIReservationTestCase(BaseTest): """Tests for db.api.reservation_* methods.""" def setUp(self): super(DBAPIReservationTestCase, self).setUp() self.values = { 'uuid': 'sample-uuid', 'project_id': 'project1', 'resource': 'resource', 'delta': 42, 'expire': (datetime.datetime.utcnow() + datetime.timedelta(days=1)), 'usage': {'id': 1} } def test_reservation_commit(self): reservations = _quota_reserve(self.ctxt, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) db.reservation_commit(self.ctxt, reservations, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 0, 'in_use': 1}, 'gigabytes': {'reserved': 0, 'in_use': 2}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) def test_reservation_rollback(self): reservations = _quota_reserve(self.ctxt, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) db.reservation_rollback(self.ctxt, reservations, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 0, 'in_use': 0}, 'gigabytes': {'reserved': 0, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) def test_reservation_expire(self): self.values['expire'] = datetime.datetime.utcnow() + \ datetime.timedelta(days=1) _quota_reserve(self.ctxt, 'project1') db.reservation_expire(self.ctxt) expected = {'project_id': 'project1', 'gigabytes': {'reserved': 0, 'in_use': 0}, 'volumes': {'reserved': 0, 'in_use': 0}} self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) class DBAPIQuotaClassTestCase(BaseTest): """Tests for db.api.quota_class_* methods.""" def setUp(self): super(DBAPIQuotaClassTestCase, self).setUp() self.sample_qc = db.quota_class_create(self.ctxt, 'test_qc', 'test_resource', 42) def test_quota_class_get(self): qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') self._assertEqualObjects(self.sample_qc, qc) def test_quota_class_destroy(self): db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource') self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, self.ctxt, 'test_qc', 'test_resource') def test_quota_class_get_not_found(self): self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, self.ctxt, 'nonexistent', 'nonexistent') def test_quota_class_get_all_by_name(self): db.quota_class_create(self.ctxt, 'test2', 'res1', 43) db.quota_class_create(self.ctxt, 'test2', 'res2', 44) self.assertEqual({'class_name': 'test_qc', 'test_resource': 42}, db.quota_class_get_all_by_name(self.ctxt, 'test_qc')) self.assertEqual({'class_name': 'test2', 'res1': 43, 'res2': 44}, db.quota_class_get_all_by_name(self.ctxt, 'test2')) def test_quota_class_update(self): db.quota_class_update(self.ctxt, 'test_qc', 'test_resource', 43) updated = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') self.assertEqual(43, updated['hard_limit']) def test_quota_class_update_resource(self): old = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') db.quota_class_update_resource(self.ctxt, 'test_resource', 'test_resource1') new = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource1') self.assertEqual(old.id, new.id) self.assertEqual('test_resource1', new.resource) def test_quota_class_destroy_all_by_name(self): db.quota_class_create(self.ctxt, 'test2', 'res1', 43) db.quota_class_create(self.ctxt, 'test2', 'res2', 44) db.quota_class_destroy_all_by_name(self.ctxt, 'test2') self.assertEqual({'class_name': 'test2'}, db.quota_class_get_all_by_name(self.ctxt, 'test2')) class DBAPIQuotaTestCase(BaseTest): """Tests for db.api.reservation_* methods.""" def test_quota_create(self): quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) self.assertEqual('resource', quota.resource) self.assertEqual(99, quota.hard_limit) self.assertEqual('project1', quota.project_id) def test_quota_get(self): quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) quota_db = db.quota_get(self.ctxt, 'project1', 'resource') self._assertEqualObjects(quota, quota_db) def test_quota_get_all_by_project(self): for i in range(3): for j in range(3): db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j) for i in range(3): quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i) self.assertEqual({'project_id': 'proj%d' % i, 'res0': 0, 'res1': 1, 'res2': 2}, quotas_db) def test_quota_update(self): db.quota_create(self.ctxt, 'project1', 'resource1', 41) db.quota_update(self.ctxt, 'project1', 'resource1', 42) quota = db.quota_get(self.ctxt, 'project1', 'resource1') self.assertEqual(42, quota.hard_limit) self.assertEqual('resource1', quota.resource) self.assertEqual('project1', quota.project_id) def test_quota_update_resource(self): old = db.quota_create(self.ctxt, 'project1', 'resource1', 41) db.quota_update_resource(self.ctxt, 'resource1', 'resource2') new = db.quota_get(self.ctxt, 'project1', 'resource2') self.assertEqual(old.id, new.id) self.assertEqual('resource2', new.resource) def test_quota_update_nonexistent(self): self.assertRaises(exception.ProjectQuotaNotFound, db.quota_update, self.ctxt, 'project1', 'resource1', 42) def test_quota_get_nonexistent(self): self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, self.ctxt, 'project1', 'resource1') def test_quota_reserve(self): reservations = _quota_reserve(self.ctxt, 'project1') self.assertEqual(2, len(reservations)) quota_usage = db.quota_usage_get_all_by_project(self.ctxt, 'project1') self.assertEqual({'project_id': 'project1', 'gigabytes': {'reserved': 2, 'in_use': 0}, 'volumes': {'reserved': 1, 'in_use': 0}}, quota_usage) def test_quota_destroy(self): db.quota_create(self.ctxt, 'project1', 'resource1', 41) self.assertIsNone(db.quota_destroy(self.ctxt, 'project1', 'resource1')) self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, self.ctxt, 'project1', 'resource1') def test_quota_destroy_by_project(self): # Create limits, reservations and usage for project project = 'project1' _quota_reserve(self.ctxt, project) expected_usage = {'project_id': project, 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}} expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} # Check that quotas are there self.assertEqual(expected, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) # Destroy only the limits db.quota_destroy_by_project(self.ctxt, project) # Confirm that limits have been removed self.assertEqual({'project_id': project}, db.quota_get_all_by_project(self.ctxt, project)) # But that usage and reservations are the same self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_quota_destroy_sqlalchemy_all_by_project_(self): # Create limits, reservations and usage for project project = 'project1' _quota_reserve(self.ctxt, project) expected_usage = {'project_id': project, 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}} expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} expected_result = {'project_id': project} # Check that quotas are there self.assertEqual(expected, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) # Destroy all quotas using SQLAlchemy Implementation sqlalchemy_api.quota_destroy_all_by_project(self.ctxt, project, only_quotas=False) # Check that all quotas have been deleted self.assertEqual(expected_result, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_result, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_quota_usage_get_nonexistent(self): self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get, self.ctxt, 'p1', 'nonexitent_resource') def test_quota_usage_get(self): _quota_reserve(self.ctxt, 'p1') quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes') expected = {'resource': 'gigabytes', 'project_id': 'p1', 'in_use': 0, 'reserved': 2, 'total': 2} for key, value in expected.items(): self.assertEqual(value, quota_usage[key], key) def test_quota_usage_get_all_by_project(self): _quota_reserve(self.ctxt, 'p1') expected = {'project_id': 'p1', 'volumes': {'in_use': 0, 'reserved': 1}, 'gigabytes': {'in_use': 0, 'reserved': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'p1')) class DBAPIBackupTestCase(BaseTest): """Tests for db.api.backup_* methods.""" _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at', 'data_timestamp'] def setUp(self): super(DBAPIBackupTestCase, self).setUp() self.created = [db.backup_create(self.ctxt, values) for values in self._get_values()] def _get_values(self, one=False): base_values = { 'user_id': 'user', 'project_id': 'project', 'volume_id': 'volume', 'host': 'host', 'availability_zone': 'zone', 'display_name': 'display', 'display_description': 'description', 'container': 'container', 'status': 'status', 'fail_reason': 'test', 'service_metadata': 'metadata', 'service': 'service', 'parent_id': "parent_id", 'size': 1000, 'object_count': 100, 'temp_volume_id': 'temp_volume_id', 'temp_snapshot_id': 'temp_snapshot_id', 'num_dependent_backups': 0, 'snapshot_id': 'snapshot_id', 'restore_volume_id': 'restore_volume_id'} if one: return base_values def compose(val, step): if isinstance(val, bool): return val if isinstance(val, str): step = str(step) return val + step return [{k: compose(v, i) for k, v in base_values.items()} for i in range(1, 4)] def test_backup_create(self): values = self._get_values() for i, backup in enumerate(self.created): self.assertTrue(backup['id']) self._assertEqualObjects(values[i], backup, self._ignored_keys) def test_backup_get(self): for backup in self.created: backup_get = db.backup_get(self.ctxt, backup['id']) self._assertEqualObjects(backup, backup_get) def test_backup_get_deleted(self): backup_dic = {'user_id': 'user', 'project_id': 'project', 'volume_id': fake_constants.volume_id, 'size': 1, 'object_count': 1} backup = objects.Backup(self.ctxt, **backup_dic) backup.create() backup.destroy() backup_get = db.backup_get(self.ctxt, backup.id, read_deleted='yes') self.assertEqual(backup.id, backup_get.id) def tests_backup_get_all(self): all_backups = db.backup_get_all(self.ctxt) self._assertEqualListsOfObjects(self.created, all_backups) def tests_backup_get_all_by_filter(self): filters = {'status': self.created[1]['status']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'display_name': self.created[1]['display_name']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'volume_id': self.created[1]['volume_id']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'fake_key': 'fake'} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([], filtered_backups) def test_backup_get_all_by_host(self): byhost = db.backup_get_all_by_host(self.ctxt, self.created[1]['host']) self._assertEqualObjects(self.created[1], byhost[0]) def test_backup_get_all_by_project(self): byproj = db.backup_get_all_by_project(self.ctxt, self.created[1]['project_id']) self._assertEqualObjects(self.created[1], byproj[0]) byproj = db.backup_get_all_by_project(self.ctxt, self.created[1]['project_id'], {'fake_key': 'fake'}) self._assertEqualListsOfObjects([], byproj) def test_backup_get_all_by_volume(self): byvol = db.backup_get_all_by_volume(self.ctxt, self.created[1]['volume_id']) self._assertEqualObjects(self.created[1], byvol[0]) byvol = db.backup_get_all_by_volume(self.ctxt, self.created[1]['volume_id'], {'fake_key': 'fake'}) self._assertEqualListsOfObjects([], byvol) def test_backup_update_nonexistent(self): self.assertRaises(exception.BackupNotFound, db.backup_update, self.ctxt, 'nonexistent', {}) def test_backup_update(self): updated_values = self._get_values(one=True) update_id = self.created[1]['id'] updated_backup = db.backup_update(self.ctxt, update_id, updated_values) self._assertEqualObjects(updated_values, updated_backup, self._ignored_keys) def test_backup_update_with_fail_reason_truncation(self): updated_values = self._get_values(one=True) fail_reason = '0' * 512 updated_values['fail_reason'] = fail_reason update_id = self.created[1]['id'] updated_backup = db.backup_update(self.ctxt, update_id, updated_values) updated_values['fail_reason'] = fail_reason[:255] self._assertEqualObjects(updated_values, updated_backup, self._ignored_keys) def test_backup_destroy(self): for backup in self.created: db.backup_destroy(self.ctxt, backup['id']) self.assertFalse(db.backup_get_all(self.ctxt)) def test_backup_not_found(self): self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, 'notinbase') class DBAPIProcessSortParamTestCase(test.TestCase): def test_process_sort_params_defaults(self): """Verifies default sort parameters.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], []) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_keys(self): """Verifies that the default keys can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3']) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_dir(self): """Verifies that the default direction can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_dir='dir1') self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['dir1', 'dir1'], sort_dirs) def test_process_sort_params_override_default_key_and_dir(self): """Verifies that the default key and dir can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3'], default_dir='dir1') self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=[], default_dir='dir1') self.assertEqual([], sort_keys) self.assertEqual([], sort_dirs) def test_process_sort_params_non_default(self): """Verifies that non-default keys are added correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['key1', 'key2'], ['asc', 'desc']) self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys) # First sort_dir in list is used when adding the default keys self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default(self): """Verifies that default keys are added correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['asc', 'desc']) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'desc', 'asc'], sort_dirs) # Include default key value, rely on default direction sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], []) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default_dir(self): """Verifies that the default dir is applied to all keys.""" # Direction is set, ignore default dir sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['desc'], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc'], sort_dirs) # But should be used if no direction is set sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], [], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['dir', 'dir', 'dir'], sort_dirs) def test_process_sort_params_unequal_length(self): """Verifies that a sort direction list is applied correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs) # Default direction is the first key in the list sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs) def test_process_sort_params_extra_dirs_lengths(self): """InvalidInput raised if more directions are given.""" self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key1', 'key2'], ['asc', 'desc', 'desc']) def test_process_sort_params_invalid_sort_dir(self): """InvalidInput raised if invalid directions are given.""" for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]: self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key'], dirs) class DBAPIDriverInitiatorDataTestCase(BaseTest): initiator = 'iqn.1993-08.org.debian:01:222' namespace = 'test_ns' def test_driver_initiator_data_set_and_remove(self): data_key = 'key1' data_value = 'value1' update = { 'set_values': { data_key: data_value } } db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, update) data = db.driver_initiator_data_get(self.ctxt, self.initiator, self.namespace) self.assertIsNotNone(data) self.assertEqual(data_key, data[0]['key']) self.assertEqual(data_value, data[0]['value']) update = {'remove_values': [data_key]} db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, update) data = db.driver_initiator_data_get(self.ctxt, self.initiator, self.namespace) self.assertIsNotNone(data) self.assertEqual([], data) def test_driver_initiator_data_no_changes(self): db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, {}) data = db.driver_initiator_data_get(self.ctxt, self.initiator, self.namespace) self.assertIsNotNone(data) self.assertEqual([], data) def test_driver_initiator_data_update_existing_values(self): data_key = 'key1' data_value = 'value1' update = {'set_values': {data_key: data_value}} db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, update) data_value = 'value2' update = {'set_values': {data_key: data_value}} db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, update) data = db.driver_initiator_data_get(self.ctxt, self.initiator, self.namespace) self.assertEqual(data_value, data[0]['value']) def test_driver_initiator_data_remove_not_existing(self): update = {'remove_values': ['key_that_doesnt_exist']} db.driver_initiator_data_update(self.ctxt, self.initiator, self.namespace, update) class DBAPIImageVolumeCacheEntryTestCase(BaseTest): def _validate_entry(self, entry, host, image_id, image_updated_at, volume_id, size): self.assertIsNotNone(entry) self.assertIsNotNone(entry['id']) self.assertEqual(host, entry['host']) self.assertEqual(image_id, entry['image_id']) self.assertEqual(image_updated_at, entry['image_updated_at']) self.assertEqual(volume_id, entry['volume_id']) self.assertEqual(size, entry['size']) self.assertIsNotNone(entry['last_used']) def test_create_delete_query_cache_entry(self): host = 'abc@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' image_updated_at = datetime.datetime.utcnow() volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' size = 6 entry = db.image_volume_cache_create(self.ctxt, host, image_id, image_updated_at, volume_id, size) self._validate_entry(entry, host, image_id, image_updated_at, volume_id, size) entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host) self._validate_entry(entry, host, image_id, image_updated_at, volume_id, size) entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) self._validate_entry(entry, host, image_id, image_updated_at, volume_id, size) db.image_volume_cache_delete(self.ctxt, entry['volume_id']) entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host) self.assertIsNone(entry) def test_cache_entry_get_multiple(self): host = 'abc@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' image_updated_at = datetime.datetime.utcnow() volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' size = 6 entries = [] for i in range(0, 3): entries.append(db.image_volume_cache_create(self.ctxt, host, image_id, image_updated_at, volume_id, size)) # It is considered OK for the cache to have multiple of the same # entries. Expect only a single one from the query. entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host) self._validate_entry(entry, host, image_id, image_updated_at, volume_id, size) # We expect to get the same one on subsequent queries due to the # last_used field being updated each time and ordering by it. entry_id = entry['id'] entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host) self._validate_entry(entry, host, image_id, image_updated_at, volume_id, size) self.assertEqual(entry_id, entry['id']) # Cleanup for entry in entries: db.image_volume_cache_delete(self.ctxt, entry['volume_id']) def test_cache_entry_get_none(self): host = 'abc@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host) self.assertIsNone(entry) def test_cache_entry_get_by_volume_id_none(self): volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) self.assertIsNone(entry) def test_cache_entry_get_all_for_host(self): host = 'abc@123#poolz' image_updated_at = datetime.datetime.utcnow() size = 6 entries = [] for i in range(0, 3): entries.append(db.image_volume_cache_create(self.ctxt, host, 'image-' + str(i), image_updated_at, 'vol-' + str(i), size)) other_entry = db.image_volume_cache_create(self.ctxt, 'someOtherHost', 'image-12345', image_updated_at, 'vol-1234', size) found_entries = db.image_volume_cache_get_all_for_host(self.ctxt, host) self.assertIsNotNone(found_entries) self.assertEqual(len(entries), len(found_entries)) for found_entry in found_entries: for entry in entries: if found_entry['id'] == entry['id']: self._validate_entry(found_entry, entry['host'], entry['image_id'], entry['image_updated_at'], entry['volume_id'], entry['size']) # Cleanup db.image_volume_cache_delete(self.ctxt, other_entry['volume_id']) for entry in entries: db.image_volume_cache_delete(self.ctxt, entry['volume_id']) def test_cache_entry_get_all_for_host_none(self): host = 'abc@123#poolz' entries = db.image_volume_cache_get_all_for_host(self.ctxt, host) self.assertEqual([], entries) cinder-8.0.0/cinder/tests/unit/test_coho.py0000664000567000056710000003614112701406250022054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Coho Data, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import binascii import errno import mock import os import six import socket import xdrlib from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import coho from cinder.volume.drivers import nfs ADDR = 'coho-datastream-addr' PATH = '/test/path' RPC_PORT = 2049 LOCAL_PATH = '/opt/cinder/mnt/test/path' VOLUME = { 'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128, 'volume_type': 'silver', 'volume_type_id': 'test', 'metadata': [{'key': 'type', 'service_label': 'silver'}], 'provider_location': None, 'id': 'bcc48c61-9691-4e5f-897c-793686093190', 'status': 'available', } CLONE_VOL = VOLUME.copy() CLONE_VOL['size'] = 256 SNAPSHOT = { 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128, 'volume_type': None, 'provider_location': None, 'volume_size': 128, 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', } INVALID_SNAPSHOT = SNAPSHOT.copy() INVALID_SNAPSHOT['name'] = '' INVALID_HEADER_BIN = binascii.unhexlify('800000') NO_REPLY_BIN = binascii.unhexlify( 'aaaaa01000000010000000000000000000000003') MSG_DENIED_BIN = binascii.unhexlify( '00000a010000000110000000000000000000000000000003') PROC_UNAVAIL_BIN = binascii.unhexlify( '00000a010000000100000000000000000000000000000003') PROG_UNAVAIL_BIN = binascii.unhexlify( '000003c70000000100000000000000000000000000000001') PROG_MISMATCH_BIN = binascii.unhexlify( '00000f7700000001000000000000000000000000000000020000000100000001') GARBAGE_ARGS_BIN = binascii.unhexlify( '00000d6e0000000100000000000000000000000000000004') class CohoDriverTest(test.TestCase): """Test Coho Data's NFS volume driver.""" def __init__(self, *args, **kwargs): super(CohoDriverTest, self).__init__(*args, **kwargs) def setUp(self): super(CohoDriverTest, self).setUp() self.context = mock.Mock() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.volume_backend_name = 'coho-1' self.configuration.coho_rpc_port = 2049 self.configuration.nfs_shares_config = '/etc/cinder/coho_shares' self.configuration.nfs_sparsed_volumes = True self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' self.configuration.nfs_mount_options = None self.configuration.nas_ip = None self.configuration.nas_share_path = None self.configuration.nas_mount_options = None def test_setup_failure_when_rpc_port_unconfigured(self): self.configuration.coho_rpc_port = None drv = coho.CohoDriver(configuration=self.configuration) self.mock_object(coho, 'LOG') self.mock_object(nfs.NfsDriver, 'do_setup') with self.assertRaisesRegex(exception.CohoException, ".*Coho rpc port is not configured.*"): drv.do_setup(self.context) self.assertTrue(coho.LOG.warning.called) self.assertTrue(nfs.NfsDriver.do_setup.called) def test_setup_failure_when_coho_rpc_port_is_invalid(self): self.configuration.coho_rpc_port = 99999 drv = coho.CohoDriver(configuration=self.configuration) self.mock_object(coho, 'LOG') self.mock_object(nfs.NfsDriver, 'do_setup') with self.assertRaisesRegex(exception.CohoException, "Invalid port number.*"): drv.do_setup(self.context) self.assertTrue(coho.LOG.warning.called) self.assertTrue(nfs.NfsDriver.do_setup.called) def test_create_snapshot(self): drv = coho.CohoDriver(configuration=self.configuration) mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') mock_get_volume_location = self.mock_object(coho.CohoDriver, '_get_volume_location') mock_get_volume_location.return_value = ADDR, PATH drv.create_snapshot(SNAPSHOT) mock_get_volume_location.assert_has_calls( [mock.call(SNAPSHOT['volume_id'])]) mock_rpc_client.assert_has_calls( [mock.call(ADDR, self.configuration.coho_rpc_port), mock.call().create_snapshot( os.path.join(PATH, SNAPSHOT['volume_name']), SNAPSHOT['name'], 0)]) def test_delete_snapshot(self): drv = coho.CohoDriver(configuration=self.configuration) mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') mock_get_volume_location = self.mock_object(coho.CohoDriver, '_get_volume_location') mock_get_volume_location.return_value = ADDR, PATH drv.delete_snapshot(SNAPSHOT) mock_get_volume_location.assert_has_calls( [mock.call(SNAPSHOT['volume_id'])]) mock_rpc_client.assert_has_calls( [mock.call(ADDR, self.configuration.coho_rpc_port), mock.call().delete_snapshot(SNAPSHOT['name'])]) def test_create_volume_from_snapshot(self): drv = coho.CohoDriver(configuration=self.configuration) mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') mock_find_share = self.mock_object(drv, '_find_share') mock_find_share.return_value = ADDR + ':' + PATH drv.create_volume_from_snapshot(VOLUME, SNAPSHOT) mock_find_share.assert_has_calls( [mock.call(VOLUME['size'])]) mock_rpc_client.assert_has_calls( [mock.call(ADDR, self.configuration.coho_rpc_port), mock.call().create_volume_from_snapshot( SNAPSHOT['name'], os.path.join(PATH, VOLUME['name']))]) def test_create_cloned_volume(self): drv = coho.CohoDriver(configuration=self.configuration) mock_find_share = self.mock_object(drv, '_find_share') mock_find_share.return_value = ADDR + ':' + PATH mock_execute = self.mock_object(drv, '_execute') mock_local_path = self.mock_object(drv, 'local_path') mock_local_path.return_value = LOCAL_PATH drv.create_cloned_volume(VOLUME, CLONE_VOL) mock_find_share.assert_has_calls( [mock.call(VOLUME['size'])]) mock_local_path.assert_has_calls( [mock.call(VOLUME), mock.call(CLONE_VOL)]) mock_execute.assert_has_calls( [mock.call('cp', LOCAL_PATH, LOCAL_PATH, run_as_root=True)]) def test_extend_volume(self): drv = coho.CohoDriver(configuration=self.configuration) mock_execute = self.mock_object(drv, '_execute') mock_local_path = self.mock_object(drv, 'local_path') mock_local_path.return_value = LOCAL_PATH drv.extend_volume(VOLUME, 512) mock_local_path.assert_has_calls( [mock.call(VOLUME)]) mock_execute.assert_has_calls( [mock.call('truncate', '-s', '512G', LOCAL_PATH, run_as_root=True)]) def test_snapshot_failure_when_source_does_not_exist(self): drv = coho.CohoDriver(configuration=self.configuration) self.mock_object(coho.Client, '_make_call') mock_init_socket = self.mock_object(coho.Client, 'init_socket') mock_unpack_uint = self.mock_object(xdrlib.Unpacker, 'unpack_uint') mock_unpack_uint.return_value = errno.ENOENT mock_get_volume_location = self.mock_object(coho.CohoDriver, '_get_volume_location') mock_get_volume_location.return_value = ADDR, PATH with self.assertRaisesRegex(exception.CohoException, "No such file or directory.*"): drv.create_snapshot(SNAPSHOT) self.assertTrue(mock_init_socket.called) self.assertTrue(mock_unpack_uint.called) mock_get_volume_location.assert_has_calls( [mock.call(SNAPSHOT['volume_id'])]) def test_snapshot_failure_with_invalid_input(self): drv = coho.CohoDriver(configuration=self.configuration) self.mock_object(coho.Client, '_make_call') mock_init_socket = self.mock_object(coho.Client, 'init_socket') mock_unpack_uint = self.mock_object(xdrlib.Unpacker, 'unpack_uint') mock_unpack_uint.return_value = errno.EINVAL mock_get_volume_location = self.mock_object(coho.CohoDriver, '_get_volume_location') mock_get_volume_location.return_value = ADDR, PATH with self.assertRaisesRegex(exception.CohoException, "Invalid argument"): drv.delete_snapshot(INVALID_SNAPSHOT) self.assertTrue(mock_init_socket.called) self.assertTrue(mock_unpack_uint.called) mock_get_volume_location.assert_has_calls( [mock.call(INVALID_SNAPSHOT['volume_id'])]) def test_snapshot_failure_when_remote_is_unreachable(self): drv = coho.CohoDriver(configuration=self.configuration) mock_get_volume_location = self.mock_object(coho.CohoDriver, '_get_volume_location') mock_get_volume_location.return_value = 'uknown-address', PATH with self.assertRaisesRegex(exception.CohoException, "Failed to establish connection.*"): drv.create_snapshot(SNAPSHOT) mock_get_volume_location.assert_has_calls( [mock.call(INVALID_SNAPSHOT['volume_id'])]) def test_rpc_client_make_call_proper_order(self): """This test ensures that the RPC client logic is correct. When the RPC client's make_call function is called it creates a packet and sends it to the Coho cluster RPC server. This test ensures that the functions needed to complete the process are called in the proper order with valid arguments. """ mock_packer = self.mock_object(xdrlib, 'Packer') mock_unpacker = self.mock_object(xdrlib, 'Unpacker') mock_unpacker.return_value.unpack_uint.return_value = 0 mock_socket = self.mock_object(socket, 'socket') mock_init_call = self.mock_object(coho.Client, 'init_call') mock_init_call.return_value = (1, 2) mock_sendrecord = self.mock_object(coho.Client, '_sendrecord') mock_recvrecord = self.mock_object(coho.Client, '_recvrecord') mock_recvrecord.return_value = 'test_reply' mock_unpack_replyheader = self.mock_object(coho.Client, 'unpack_replyheader') mock_unpack_replyheader.return_value = (123, 1) rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) rpc_client.create_volume_from_snapshot('src', 'dest') self.assertTrue(mock_sendrecord.called) self.assertTrue(mock_unpack_replyheader.called) mock_packer.assert_has_calls([mock.call().reset()]) mock_unpacker.assert_has_calls( [mock.call().reset('test_reply'), mock.call().unpack_uint()]) mock_socket.assert_has_calls( [mock.call(socket.AF_INET, socket.SOCK_STREAM), mock.call().bind(('', 0)), mock.call().connect((ADDR, RPC_PORT))]) mock_init_call.assert_has_calls( [mock.call(coho.COHO1_CREATE_VOLUME_FROM_SNAPSHOT, [(six.b('src'), mock_packer().pack_string), (six.b('dest'), mock_packer().pack_string)])]) def test_rpc_client_error_in_reply_header(self): """Ensure excpetions in reply header are raised by the RPC client. Coho cluster's RPC server packs errors into the reply header. This test ensures that the RPC client parses the reply header correctly and raises exceptions on various errors that can be included in the reply header. """ mock_socket = self.mock_object(socket, 'socket') mock_recvrecord = self.mock_object(coho.Client, '_recvrecord') rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) mock_recvrecord.return_value = NO_REPLY_BIN with self.assertRaisesRegex(exception.CohoException, "no REPLY.*"): rpc_client.create_snapshot('src', 'dest', 0) mock_recvrecord.return_value = MSG_DENIED_BIN with self.assertRaisesRegex(exception.CohoException, ".*MSG_DENIED.*"): rpc_client.delete_snapshot('snapshot') mock_recvrecord.return_value = PROG_UNAVAIL_BIN with self.assertRaisesRegex(exception.CohoException, ".*PROG_UNAVAIL"): rpc_client.delete_snapshot('snapshot') mock_recvrecord.return_value = PROG_MISMATCH_BIN with self.assertRaisesRegex(exception.CohoException, ".*PROG_MISMATCH.*"): rpc_client.delete_snapshot('snapshot') mock_recvrecord.return_value = GARBAGE_ARGS_BIN with self.assertRaisesRegex(exception.CohoException, ".*GARBAGE_ARGS"): rpc_client.delete_snapshot('snapshot') mock_recvrecord.return_value = PROC_UNAVAIL_BIN with self.assertRaisesRegex(exception.CohoException, ".*PROC_UNAVAIL"): rpc_client.delete_snapshot('snapshot') self.assertTrue(mock_recvrecord.called) mock_socket.assert_has_calls( [mock.call(socket.AF_INET, socket.SOCK_STREAM), mock.call().bind(('', 0)), mock.call().connect((ADDR, RPC_PORT))]) def test_rpc_client_error_in_receive_fragment(self): """Ensure exception is raised when malformed packet is recieved.""" mock_sendrcd = self.mock_object(coho.Client, '_sendrecord') mock_socket = self.mock_object(socket, 'socket') mock_socket.return_value.recv.return_value = INVALID_HEADER_BIN rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) with self.assertRaisesRegex(exception.CohoException, "Invalid response header.*"): rpc_client.create_snapshot('src', 'dest', 0) self.assertTrue(mock_sendrcd.called) mock_socket.assert_has_calls( [mock.call(socket.AF_INET, socket.SOCK_STREAM), mock.call().bind(('', 0)), mock.call().connect((ADDR, RPC_PORT)), mock.call().recv(4)]) cinder-8.0.0/cinder/tests/unit/test_coordination.py0000664000567000056710000001075712701406250023621 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import tooz.coordination import tooz.locking from cinder import coordination from cinder import test class Locked(Exception): pass class MockToozLock(tooz.locking.Lock): active_locks = set() def acquire(self, blocking=True): if self.name not in self.active_locks: self.active_locks.add(self.name) return True elif not blocking: return False else: raise Locked def release(self): self.active_locks.remove(self.name) @mock.patch('time.sleep', lambda _: None) @mock.patch('eventlet.spawn', lambda f: f()) @mock.patch('eventlet.tpool.execute', lambda f: f()) @mock.patch.object(coordination.Coordinator, 'heartbeat') @mock.patch('tooz.coordination.get_coordinator') @mock.patch('random.uniform', lambda _a, _b: 0) class CoordinatorTestCase(test.TestCase): def test_coordinator_start(self, get_coordinator, heartbeat): crd = get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertTrue(get_coordinator.called) self.assertTrue(heartbeat.called) self.assertTrue(crd.start.called) def test_coordinator_stop(self, get_coordinator, heartbeat): crd = get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertIsNotNone(agent.coordinator) agent.stop() self.assertTrue(crd.stop.called) self.assertIsNone(agent.coordinator) def test_coordinator_lock(self, get_coordinator, heartbeat): crd = get_coordinator.return_value crd.get_lock.side_effect = lambda n: MockToozLock(n) agent1 = coordination.Coordinator() agent1.start() agent2 = coordination.Coordinator() agent2.start() self.assertNotIn('lock', MockToozLock.active_locks) with agent1.get_lock('lock'): self.assertIn('lock', MockToozLock.active_locks) self.assertRaises(Locked, agent1.get_lock('lock').acquire) self.assertRaises(Locked, agent2.get_lock('lock').acquire) self.assertNotIn('lock', MockToozLock.active_locks) def test_coordinator_offline(self, get_coordinator, heartbeat): crd = get_coordinator.return_value crd.start.side_effect = tooz.coordination.ToozConnectionError('err') agent = coordination.Coordinator() self.assertRaises(tooz.coordination.ToozError, agent.start) self.assertFalse(agent.started) self.assertFalse(heartbeat.called) def test_coordinator_reconnect(self, get_coordinator, heartbeat): start_online = iter([True] + [False] * 5 + [True]) heartbeat_online = iter((False, True, True)) def raiser(cond): if not cond: raise tooz.coordination.ToozConnectionError('err') crd = get_coordinator.return_value crd.start.side_effect = lambda *_: raiser(next(start_online)) crd.heartbeat.side_effect = lambda *_: raiser(next(heartbeat_online)) agent = coordination.Coordinator() agent.start() self.assertRaises(tooz.coordination.ToozConnectionError, agent._heartbeat) self.assertEqual(1, get_coordinator.call_count) agent._reconnect() self.assertEqual(7, get_coordinator.call_count) agent._heartbeat() @mock.patch.object(coordination.COORDINATOR, 'get_lock') class CoordinationTestCase(test.TestCase): def test_lock(self, get_lock): with coordination.Lock('lock'): self.assertTrue(get_lock.called) def test_synchronized(self, get_lock): @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') def func(foo, bar): pass foo = mock.Mock() foo.val = 7 bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) get_lock.assert_called_with('lock-func-7-8') cinder-8.0.0/cinder/tests/unit/fake_snapshot.py0000664000567000056710000000351612701406250022712 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder.objects import snapshot from cinder.tests.unit import fake_constants as fake def fake_db_snapshot(**updates): db_snapshot = { 'id': fake.snapshot_id, 'volume_id': fake.volume_id, 'status': "creating", 'progress': '0%', 'volume_size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'metadata': {}, 'snapshot_metadata': [], } for name, field in snapshot.Snapshot.fields.items(): if name in db_snapshot: continue if field.nullable: db_snapshot[name] = None elif field.default != fields.UnspecifiedDefault: db_snapshot[name] = field.default else: raise Exception('fake_db_snapshot needs help with %s' % name) if updates: db_snapshot.update(updates) return db_snapshot def fake_snapshot_obj(context, **updates): expected_attrs = updates.pop('expected_attrs', None) return snapshot.Snapshot._from_db_object(context, snapshot.Snapshot(), fake_db_snapshot(**updates), expected_attrs=expected_attrs) cinder-8.0.0/cinder/tests/unit/test_volume_utils.py0000664000567000056710000011126412701406250023653 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with volume.""" import datetime import io import mock import six from oslo_concurrency import processutils from oslo_config import cfg from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import utils from cinder.volume import throttling from cinder.volume import utils as volume_utils CONF = cfg.CONF class NotifyUsageTestCase(test.TestCase): @mock.patch('cinder.volume.utils._usage_from_volume') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_volume_usage(mock.sentinel.context, mock.sentinel.volume, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.context, mock.sentinel.volume) mock_rpc.get_notifier.assert_called_once_with('volume', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'volume.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_volume') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_volume_usage( mock.sentinel.context, mock.sentinel.volume, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.context, mock.sentinel.volume, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('volume', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'volume.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_snapshot') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_snapshot_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_snapshot_usage( mock.sentinel.context, mock.sentinel.snapshot, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.snapshot) mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'snapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_snapshot') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_snapshot_usage( mock.sentinel.context, mock.sentinel.snapshot, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.snapshot, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'snapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.objects.Volume.get_by_id') def test_usage_from_snapshot(self, volume_get_by_id): raw_volume = { 'id': fake.volume_id, 'availability_zone': 'nova' } ctxt = context.get_admin_context() volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) volume_get_by_id.return_value = volume_obj raw_snapshot = { 'project_id': fake.project_id, 'user_id': fake.user_id, 'volume': volume_obj, 'volume_id': fake.volume_id, 'volume_size': 1, 'id': fake.snapshot_id, 'display_name': '11', 'created_at': '2014-12-11T10:10:00', 'status': 'pause', 'deleted': '', 'snapshot_metadata': [{'key': 'fake_snap_meta_key', 'value': 'fake_snap_meta_value'}], 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) usage_info = volume_utils._usage_from_snapshot(snapshot_obj) expected_snapshot = { 'tenant_id': fake.project_id, 'user_id': fake.user_id, 'availability_zone': 'nova', 'volume_id': fake.volume_id, 'volume_size': 1, 'snapshot_id': fake.snapshot_id, 'display_name': '11', 'created_at': 'DONTCARE', 'status': 'pause', 'deleted': '', 'metadata': six.text_type({'fake_snap_meta_key': u'fake_snap_meta_value'}), } self.assertDictMatch(expected_snapshot, usage_info) @mock.patch('cinder.db.volume_glance_metadata_get') @mock.patch('cinder.db.volume_attachment_get_used_by_volume_id') def test_usage_from_volume(self, mock_attachment, mock_image_metadata): mock_image_metadata.return_value = {'image_id': 'fake_image_id'} mock_attachment.return_value = [{'instance_uuid': 'fake_instance_id'}] raw_volume = { 'project_id': '12b0330ec2584a', 'user_id': '158cba1b8c2bb6008e', 'host': 'fake_host', 'availability_zone': 'nova', 'volume_type_id': 'fake_volume_type_id', 'id': 'fake_volume_id', 'size': 1, 'display_name': 'test_volume', 'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(2015, 1, 1, 1, 1, 1), 'snapshot_id': None, 'replication_status': None, 'replication_extended_status': None, 'replication_driver_data': None, 'status': 'available', 'volume_metadata': {'fake_metadata_key': 'fake_metadata_value'}, } usage_info = volume_utils._usage_from_volume( mock.sentinel.context, raw_volume) expected_volume = { 'tenant_id': '12b0330ec2584a', 'user_id': '158cba1b8c2bb6008e', 'host': 'fake_host', 'availability_zone': 'nova', 'volume_type': 'fake_volume_type_id', 'volume_id': 'fake_volume_id', 'size': 1, 'display_name': 'test_volume', 'created_at': '2015-01-01T01:01:01', 'launched_at': '2015-01-01T01:01:01', 'snapshot_id': None, 'replication_status': None, 'replication_extended_status': None, 'replication_driver_data': None, 'status': 'available', 'metadata': {'fake_metadata_key': 'fake_metadata_value'}, 'glance_metadata': {'image_id': 'fake_image_id'}, 'volume_attachment': [{'instance_uuid': 'fake_instance_id'}], } self.assertEqual(expected_volume, usage_info) @mock.patch('cinder.volume.utils._usage_from_consistencygroup') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_consistencygroup_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_consistencygroup_usage( mock.sentinel.context, mock.sentinel.consistencygroup, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.consistencygroup) mock_rpc.get_notifier.assert_called_once_with('consistencygroup', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'consistencygroup.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_consistencygroup') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_consistencygroup_usage( mock.sentinel.context, mock.sentinel.consistencygroup, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.consistencygroup, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('consistencygroup', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'consistencygroup.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_cgsnapshot') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_cgsnapshot_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_cgsnapshot_usage( mock.sentinel.context, mock.sentinel.cgsnapshot, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot) mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'cgsnapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.utils._usage_from_cgsnapshot') @mock.patch('cinder.volume.utils.CONF') @mock.patch('cinder.volume.utils.rpc') def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_cgsnapshot_usage( mock.sentinel.context, mock.sentinel.cgsnapshot, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'cgsnapshot.test_suffix', mock_usage.return_value) def test_usage_from_backup(self): raw_backup = { 'project_id': '12b0330ec2584a', 'user_id': '158cba1b8c2bb6008e', 'availability_zone': 'nova', 'id': 'fake_id', 'host': 'fake_host', 'display_name': 'test_backup', 'created_at': '2014-12-11T10:10:00', 'status': 'available', 'volume_id': 'fake_volume_id', 'size': 1, 'service_metadata': None, 'service': 'cinder.backup.drivers.swift', 'fail_reason': None, 'parent_id': 'fake_parent_id', 'num_dependent_backups': 0, 'snapshot_id': None, } # Make it easier to find out differences between raw and expected. expected_backup = raw_backup.copy() expected_backup['tenant_id'] = expected_backup.pop('project_id') expected_backup['backup_id'] = expected_backup.pop('id') usage_info = volume_utils._usage_from_backup(raw_backup) self.assertEqual(expected_backup, usage_info) class LVMVolumeDriverTestCase(test.TestCase): def test_convert_blocksize_option(self): # Test valid volume_dd_blocksize bs, count = volume_utils._calculate_count(1024, '10M') self.assertEqual('10M', bs) self.assertEqual(103, count) bs, count = volume_utils._calculate_count(1024, '1xBBB') self.assertEqual('1M', bs) self.assertEqual(1024, count) # Test 'volume_dd_blocksize' with fraction bs, count = volume_utils._calculate_count(1024, '1.3M') self.assertEqual('1M', bs) self.assertEqual(1024, count) # Test zero-size 'volume_dd_blocksize' bs, count = volume_utils._calculate_count(1024, '0M') self.assertEqual('1M', bs) self.assertEqual(1024, count) # Test negative 'volume_dd_blocksize' bs, count = volume_utils._calculate_count(1024, '-1M') self.assertEqual('1M', bs) self.assertEqual(1024, count) # Test non-digital 'volume_dd_blocksize' bs, count = volume_utils._calculate_count(1024, 'ABM') self.assertEqual('1M', bs) self.assertEqual(1024, count) class OdirectSupportTestCase(test.TestCase): @mock.patch('cinder.utils.execute') def test_check_for_odirect_support(self, mock_exec): output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def', 'iflag=direct') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'iflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def', 'iflag=direct') self.assertFalse(output) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', 'of=/dev/def', 'oflag=direct', run_as_root=True) @mock.patch('cinder.utils.execute', side_effect=processutils.ProcessExecutionError) def test_check_for_odirect_support_error(self, mock_exec): output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') self.assertFalse(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def') self.assertFalse(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', 'of=/dev/def', 'oflag=direct', run_as_root=True) class ClearVolumeTestCase(test.TestCase): @mock.patch('cinder.volume.utils.copy_volume', return_value=None) @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_conf(self, mock_conf, mock_copy): mock_conf.volume_clear = 'zero' mock_conf.volume_clear_size = 0 mock_conf.volume_dd_blocksize = '1M' mock_conf.volume_clear_ionice = '-c3' output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024, '1M', sync=True, execute=utils.execute, ionice='-c3', throttle=None, sparse=False) @mock.patch('cinder.volume.utils.copy_volume', return_value=None) @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_args(self, mock_conf, mock_copy): mock_conf.volume_clear = 'shred' mock_conf.volume_clear_size = 0 mock_conf.volume_dd_blocksize = '1M' mock_conf.volume_clear_ionice = '-c3' output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1, '-c0') self.assertIsNone(output) mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1, '1M', sync=True, execute=utils.execute, ionice='-c0', throttle=None, sparse=False) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_shred(self, mock_conf, mock_exec): mock_conf.volume_clear = 'shred' mock_conf.volume_clear_size = 1 mock_conf.volume_clear_ionice = None output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) mock_exec.assert_called_once_with( 'shred', '-n3', '-s1MiB', "volume_path", run_as_root=True) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_shred_not_clear_size(self, mock_conf, mock_exec): mock_conf.volume_clear = 'shred' mock_conf.volume_clear_size = None mock_conf.volume_clear_ionice = None output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) mock_exec.assert_called_once_with( 'shred', '-n3', "volume_path", run_as_root=True) @mock.patch('cinder.volume.utils.CONF') def test_clear_volume_invalid_opt(self, mock_conf): mock_conf.volume_clear = 'non_existent_volume_clearer' mock_conf.volume_clear_size = 0 mock_conf.volume_clear_ionice = None self.assertRaises(exception.InvalidConfigurationValue, volume_utils.clear_volume, 1024, "volume_path") class CopyVolumeTestCase(test.TestCase): @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.utils.CONF') def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec, mock_support, mock_count): fake_throttle = throttling.Throttle(['fake_throttle']) output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'iflag=direct', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=False, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'iflag=direct', 'oflag=direct', run_as_root=True) @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec, mock_support, mock_count): fake_throttle = throttling.Throttle(['fake_throttle']) output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'conv=fdatasync', run_as_root=True) mock_exec.reset_mock() output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=False, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', run_as_root=True) @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support, mock_count): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, ionice=None) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'conv=fdatasync', run_as_root=True) @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_ionice(self, mock_exec, mock_support, mock_count): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, ionice='-c3') self.assertIsNone(output) mock_exec.assert_called_once_with('ionice', '-c3', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'conv=fdatasync', run_as_root=True) @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_sparse(self, mock_exec, mock_support, mock_count): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, sparse=True) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'conv=fdatasync,sparse', run_as_root=True) @mock.patch('cinder.volume.utils._calculate_count', return_value=(1234, 5678)) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_sparse_iflag_and_oflag(self, mock_exec, mock_support, mock_count): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1, sync=True, execute=utils.execute, sparse=True) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=5678', 'bs=1234', 'iflag=direct', 'oflag=direct', 'conv=sparse', run_as_root=True) @mock.patch('cinder.volume.utils._copy_volume_with_file') def test_copy_volume_handles(self, mock_copy): handle1 = io.RawIOBase() handle2 = io.RawIOBase() output = volume_utils.copy_volume(handle1, handle2, 1024, 1) self.assertIsNone(output) mock_copy.assert_called_once_with(handle1, handle2, 1024) @mock.patch('cinder.volume.utils._transfer_data') @mock.patch('cinder.volume.utils._open_volume_with_path') def test_copy_volume_handle_transfer(self, mock_open, mock_transfer): handle = io.RawIOBase() output = volume_utils.copy_volume('/foo/bar', handle, 1024, 1) self.assertIsNone(output) mock_transfer.assert_called_once_with(mock.ANY, mock.ANY, 1073741824, mock.ANY) class VolumeUtilsTestCase(test.TestCase): def test_null_safe_str(self): self.assertEqual('', volume_utils.null_safe_str(None)) self.assertEqual('', volume_utils.null_safe_str(False)) self.assertEqual('', volume_utils.null_safe_str(0)) self.assertEqual('', volume_utils.null_safe_str([])) self.assertEqual('', volume_utils.null_safe_str(())) self.assertEqual('', volume_utils.null_safe_str({})) self.assertEqual('', volume_utils.null_safe_str(set())) self.assertEqual('a', volume_utils.null_safe_str('a')) self.assertEqual('1', volume_utils.null_safe_str(1)) self.assertEqual('True', volume_utils.null_safe_str(True)) @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning') def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper): self.assertEqual(mock_supports_thin.return_value, volume_utils.supports_thin_provisioning()) mock_helper.assert_called_once_with() @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') def test_get_all_physical_volumes(self, mock_get_vols, mock_helper): self.assertEqual(mock_get_vols.return_value, volume_utils.get_all_physical_volumes()) mock_helper.assert_called_once_with() @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups') def test_get_all_volume_groups(self, mock_get_groups, mock_helper): self.assertEqual(mock_get_groups.return_value, volume_utils.get_all_volume_groups()) mock_helper.assert_called_once_with() def test_generate_password(self): password = volume_utils.generate_password() self.assertTrue(any(c for c in password if c in '23456789')) self.assertTrue(any(c for c in password if c in 'abcdefghijkmnopqrstuvwxyz')) self.assertTrue(any(c for c in password if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ')) self.assertEqual(16, len(password)) self.assertEqual(10, len(volume_utils.generate_password(10))) @mock.patch('cinder.volume.utils.generate_password') def test_generate_username(self, mock_gen_pass): output = volume_utils.generate_username() self.assertEqual(mock_gen_pass.return_value, output) def test_extract_host(self): host = 'Host' # default level is 'backend' self.assertEqual(host, volume_utils.extract_host(host)) self.assertEqual(host, volume_utils.extract_host(host, 'host')) self.assertEqual(host, volume_utils.extract_host(host, 'backend')) # default_pool_name doesn't work for level other than 'pool' self.assertEqual(host, volume_utils.extract_host(host, 'host', True)) self.assertEqual(host, volume_utils.extract_host(host, 'host', False)) self.assertEqual(host, volume_utils.extract_host(host, 'backend', True)) self.assertEqual(host, volume_utils.extract_host(host, 'backend', False)) self.assertIsNone(volume_utils.extract_host(host, 'pool')) self.assertEqual('_pool0', volume_utils.extract_host(host, 'pool', True)) host = 'Host@Backend' self.assertEqual('Host@Backend', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual(host, volume_utils.extract_host(host, 'backend')) self.assertIsNone(volume_utils.extract_host(host, 'pool')) self.assertEqual('_pool0', volume_utils.extract_host(host, 'pool', True)) host = 'Host@Backend#Pool' pool = 'Pool' self.assertEqual('Host@Backend', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual('Host@Backend', volume_utils.extract_host(host, 'backend')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True)) host = 'Host#Pool' self.assertEqual('Host', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual('Host', volume_utils.extract_host(host, 'backend')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True)) def test_get_volume_rpc_host(self): host = 'Host@backend' # default level is 'backend' # check if host with backend is returned self.assertEqual(volume_utils.extract_host(host), volume_utils.get_volume_rpc_host(host)) def test_append_host(self): host = 'Host' pool = 'Pool' expected = 'Host#Pool' self.assertEqual(expected, volume_utils.append_host(host, pool)) pool = None expected = 'Host' self.assertEqual(expected, volume_utils.append_host(host, pool)) host = None pool = 'pool' expected = None self.assertEqual(expected, volume_utils.append_host(host, pool)) host = None pool = None expected = None self.assertEqual(expected, volume_utils.append_host(host, pool)) def test_compare_hosts(self): host_1 = 'fake_host@backend1' host_2 = 'fake_host@backend1#pool1' self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) host_2 = 'fake_host@backend1' self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) host_2 = 'fake_host2@backend1' self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2)) def test_check_managed_volume_already_managed(self): mock_db = mock.Mock() result = volume_utils.check_already_managed_volume( mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1') self.assertTrue(result) @mock.patch('cinder.volume.utils.CONF') def test_check_already_managed_with_vol_id_vol_pattern(self, conf_mock): mock_db = mock.Mock() conf_mock.volume_name_template = 'volume-%s-volume' result = volume_utils.check_already_managed_volume( mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume') self.assertTrue(result) @mock.patch('cinder.volume.utils.CONF') def test_check_already_managed_with_id_vol_pattern(self, conf_mock): mock_db = mock.Mock() conf_mock.volume_name_template = '%s-volume' result = volume_utils.check_already_managed_volume( mock_db, 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume') self.assertTrue(result) def test_check_managed_volume_not_managed_cinder_like_name(self): mock_db = mock.Mock() mock_db.volume_get = mock.Mock( side_effect=exception.VolumeNotFound( 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) result = volume_utils.check_already_managed_volume( mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1') self.assertFalse(result) def test_check_managed_volume_not_managed(self): mock_db = mock.Mock() result = volume_utils.check_already_managed_volume( mock_db, 'test-volume') self.assertFalse(result) def test_check_managed_volume_not_managed_id_like_uuid(self): mock_db = mock.Mock() result = volume_utils.check_already_managed_volume( mock_db, 'volume-d8cd1fe') self.assertFalse(result) def test_convert_config_string_to_dict(self): test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}" expected_dict = {'key-1': 'val-1', 'key-2': 'val-2', 'key-3': 'val-3'} self.assertEqual( expected_dict, volume_utils.convert_config_string_to_dict(test_string)) def test_process_reserve_over_quota(self): ctxt = context.get_admin_context() ctxt.project_id = 'fake' overs_one = ['gigabytes'] over_two = ['snapshots'] usages = {'gigabytes': {'reserved': 1, 'in_use': 9}, 'snapshots': {'reserved': 1, 'in_use': 9}} quotas = {'gigabytes': 10, 'snapshots': 10} size = 1 self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, volume_utils.process_reserve_over_quota, ctxt, overs_one, usages, quotas, size) self.assertRaises(exception.SnapshotLimitExceeded, volume_utils.process_reserve_over_quota, ctxt, over_two, usages, quotas, size) cinder-8.0.0/cinder/tests/unit/test_rbd.py0000664000567000056710000015457612701406257021717 0ustar jenkinsjenkins00000000000000 # Copyright 2012 Josh Durgin # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import math import os import tempfile import mock from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ import cinder.image.glance from cinder.image import image_utils from cinder import objects from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test_volume from cinder.tests.unit import utils from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver from cinder.volume.flows.manager import create_volume # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockImageExistsException(MockException): """Used as mock for rbd.ImageExists.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('time.sleep') @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_client, mock_proxy, mock_sleep): inst.mock_rbd = mock_rbd inst.mock_rados = mock_rados inst.mock_client = mock_client inst.mock_proxy = mock_proxy inst.mock_sleep = mock_sleep inst.mock_rbd.RBD.Error = Exception inst.mock_rados.Error = Exception inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.mock_rbd.ImageExists = MockImageExistsException inst.mock_rbd.InvalidArgument = MockImageNotFoundException inst.driver.rbd = inst.mock_rbd inst.driver.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ @ddt.ddt class RBDTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self.cfg.rbd_cluster_name = 'nondefault' self.cfg.rbd_pool = 'rbd' self.cfg.rbd_ceph_conf = None self.cfg.rbd_secret_uuid = None self.cfg.rbd_user = None self.cfg.volume_dd_blocksize = '1M' self.cfg.rbd_store_chunk_size = 4 self.cfg.rados_connection_retries = 3 self.cfg.rados_connection_interval = 5 mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.RBDDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.context = context.get_admin_context() self.volume_a = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38', 'size': 10}) self.volume_b = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000b', 'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6', 'size': 10}) self.snapshot = fake_snapshot.fake_snapshot_obj( self.context, name='snapshot-0000000a') @ddt.data({'cluster_name': None, 'pool_name': 'rbd'}, {'cluster_name': 'volumes', 'pool_name': None}) @ddt.unpack def test_min_config(self, cluster_name, pool_name): self.cfg.rbd_cluster_name = cluster_name self.cfg.rbd_pool = pool_name with mock.patch('cinder.volume.drivers.rbd.rados'): self.assertRaises(exception.InvalidConfigurationValue, self.driver.check_for_setup_error) @common_mocks def test_create_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client self.driver.create_volume(self.volume_a) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_a.name), self.volume_a.size * units.Gi, order] kwargs = {'old_format': False, 'features': client.features} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @common_mocks def test_manage_existing_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 2 * units.Gi existing_ref = {'source-name': self.volume_a.name} return_size = self.driver.manage_existing_get_size( self.volume_a, existing_ref) self.assertEqual(2, return_size) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_get_invalid_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 'abcd' existing_ref = {'source-name': self.volume_a.name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, self.volume_a, existing_ref) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 self.driver.manage_existing(self.volume_a, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_a.name) @common_mocks def test_manage_existing_with_exist_rbd_image(self): client = self.mock_client.return_value client.__enter__.return_value = client self.mock_rbd.RBD.return_value.rename.side_effect = ( MockImageExistsException) exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} self.assertRaises(self.mock_rbd.ImageExists, self.driver.manage_existing, self.volume_a, existing_ref) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageExists]) @common_mocks def test_delete_backup_snaps(self): self.driver.rbd.Image.remove_snap = mock.Mock() with mock.patch.object(self.driver, '_get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = [{'name': 'snap1'}] rbd_image = self.driver.rbd.Image() self.driver._delete_backup_snaps(rbd_image) mock_get_backup_snaps.assert_called_once_with(rbd_image) self.assertTrue( self.driver.rbd.Image.return_value.remove_snap.called) @common_mocks def test_delete_volume(self): client = self.mock_client.return_value self.driver.rbd.Image.return_value.list_snaps.return_value = [] with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.driver.rbd.Image.return_value .list_snaps.assert_called_once_with()) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.remove.call_count) @common_mocks def delete_volume_not_found(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume_a)) self.mock_rbd.Image.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageBusy) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 3, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(3, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) @common_mocks def test_delete_volume_not_found(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageNotFound) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertIsNone(self.driver.delete_volume(self.volume_a)) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_create_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snapshot(self.snapshot) args = [str(self.snapshot.name)] proxy.create_snap.assert_called_with(*args) proxy.protect_snap.assert_called_with(*args) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_notfound_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageNotFound) self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_unprotected_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = self.mock_rbd.InvalidArgument self.driver.delete_snapshot(self.snapshot) self.assertTrue(proxy.unprotect_snap.called) self.assertTrue(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_busy_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageBusy) with mock.patch.object(self.driver, '_get_children_info') as \ mock_get_children_info: mock_get_children_info.return_value = [('pool', 'volume2')] with mock.patch.object(driver, 'LOG') as \ mock_log: self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) mock_get_children_info.assert_called_once_with( proxy, self.snapshot.name) self.assertTrue(mock_log.info.called) self.assertTrue(proxy.unprotect_snap.called) self.assertFalse(proxy.remove_snap.called) @common_mocks def test_get_children_info(self): volume = self.mock_proxy volume.set_snap = mock.Mock() volume.list_children = mock.Mock() list_children = [('pool', 'volume2')] volume.list_children.return_value = list_children info = self.driver._get_children_info(volume, self.snapshot['name']) self.assertEqual(list_children, info) @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, self.volume_a.name) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_snap(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual(parent_info, info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_exception(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() volume.parent_info.side_effect = self.mock_rbd.ImageNotFound snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual((None, None, None), info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_get_clone_info_deleted_volume(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, "%s.deleted" % (self.volume_a.name)) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_create_cloned_volume_same_size(self): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 0, mock_resize.call_count) @common_mocks def test_create_cloned_volume_different_size(self): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.volume_b.size = 20 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 1, mock_resize.call_count) @common_mocks def test_create_cloned_volume_w_flatten(self): self.cfg.rbd_max_clone_depth = 1 with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = ( ('fake_pool', self.volume_b.name, '.'.join((self.volume_b.name, 'clone_snap')))) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) # We expect the driver to close both volumes, so 2 is expected self.assertEqual( 2, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) @common_mocks def test_create_cloned_volume_w_clone_exception(self): self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.return_value.clone.side_effect = ( self.mock_rbd.RBD.Error) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.mock_rbd.Image.return_value.close.assert_called_once_with() @common_mocks def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) @common_mocks def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' info = {'disk_format': 'raw'} self.assertTrue(self.driver._is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_different_fsid(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' self.assertFalse( self.driver._is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_unreadable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' self.driver.rbd.Error = Exception self.mock_proxy.side_effect = Exception args = [location, {'disk_format': 'raw'}] self.assertFalse(self.driver._is_cloneable(*args)) self.assertEqual(1, self.mock_proxy.call_count) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_bad_format(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: self.assertFalse( self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) def _copy_image(self): with mock.patch.object(tempfile, 'NamedTemporaryFile'): with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(image_utils, 'fetch_to_raw'): with mock.patch.object(self.driver, 'delete_volume'): with mock.patch.object(self.driver, '_resize'): mock_image_service = mock.MagicMock() args = [None, self.volume_a, mock_image_service, None] self.driver.copy_image_to_volume(*args) @common_mocks def test_copy_image_no_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self._copy_image() @common_mocks def test_copy_image_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image() @common_mocks def test_update_volume_stats(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.mon_command = mock.Mock() client.cluster.mon_command.return_value = ( 0, '{"stats":{"total_bytes":64385286144,' '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' '"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},' '{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,' '"max_avail":28987613184,"objects":0}}]}\n', '') self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=28.44, free_capacity_gb=27.0, reserved_percentage=0, multiattach=False) actual = self.driver.get_volume_stats(True) client.cluster.mon_command.assert_called_once_with( '{"prefix":"df", "format":"json"}', '') self.assertDictMatch(expected, actual) @common_mocks def test_update_volume_stats_error(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.mon_command = mock.Mock() client.cluster.mon_command.return_value = (22, '', '') self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict(volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, multiattach=False) actual = self.driver.get_volume_stats(True) client.cluster.mon_command.assert_called_once_with( '{"prefix":"df", "format":"json"}', '') self.assertDictMatch(expected, actual) @common_mocks def test_get_mon_addrs(self): with mock.patch.object(self.driver, '_execute') as mock_execute: mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) @common_mocks def test_initialize_connection(self): hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] with mock.patch.object(self.driver, '_get_mon_addrs') as \ mock_get_mon_addrs: mock_get_mon_addrs.return_value = (hosts, ports) expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.cfg.rbd_pool, self.volume_a.name), 'hosts': hosts, 'ports': ports, 'auth_enabled': False, 'auth_username': None, 'secret_type': 'ceph', 'secret_uuid': None, 'volume_id': self.volume_a.id } } actual = self.driver.initialize_connection(self.volume_a, None) self.assertDictMatch(expected, actual) self.assertTrue(mock_get_mon_addrs.called) @ddt.data({'rbd_chunk_size': 1, 'order': 20}, {'rbd_chunk_size': 8, 'order': 23}, {'rbd_chunk_size': 32, 'order': 25}) @ddt.unpack @common_mocks def test_clone(self, rbd_chunk_size, order): self.cfg.rbd_store_chunk_size = rbd_chunk_size src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) self.driver._clone(self.volume_a, src_pool, src_image, src_snap) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_a.name)] kwargs = {'features': client.features, 'order': order} self.mock_rbd.RBD.return_value.clone.assert_called_once_with( *args, **kwargs) self.assertEqual(2, client.__enter__.call_count) @common_mocks def test_extend_volume(self): fake_size = '20' size = int(fake_size) * units.Gi with mock.patch.object(self.driver, '_resize') as mock_resize: self.driver.extend_volume(self.volume_a, fake_size) mock_resize.assert_called_once_with(self.volume_a, size=size) @common_mocks def test_retype(self): context = {} diff = {'encryption': {}, 'extra_specs': {}} updates = {'name': 'testvolume', 'host': 'currenthost', 'id': 'fakeid'} fake_type = 'high-IOPS' volume = fake_volume.fake_volume_obj(context, **updates) # The hosts have been checked same before rbd.retype # is called. # RBD doesn't support multiple pools in a driver. host = {'host': 'currenthost'} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) # The encryptions have been checked as same before rbd.retype # is called. diff['encryption'] = {} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) # extra_specs changes are supported. diff['extra_specs'] = {'non-empty': 'non-empty'} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) diff['extra_specs'] = {} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) @common_mocks def test_update_migrated_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: context = {} mock_rename.return_value = 0 model_update = self.driver.update_migrated_volume(context, self.volume_a, self.volume_b, 'available') mock_rename.assert_called_with(client.ioctx, 'volume-%s' % self.volume_b.id, 'volume-%s' % self.volume_a.id) self.assertEqual({'_name_id': None, 'provider_location': None}, model_update) def test_rbd_volume_proxy_init(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) with driver.RBDVolumeProxy(mock_driver, self.volume_a.name): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) mock_driver.reset_mock() snap = u'snapshot-name' with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, snapshot=snap): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) @common_mocks @mock.patch('time.sleep') def test_connect_to_rados(self, sleep_mock): # Default self.cfg.rados_connect_timeout = -1 self.mock_rados.Rados.return_value.open_ioctx.return_value = \ self.mock_rados.Rados.return_value.ioctx # default configured pool ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.return_value.connect.called) # Expect no timeout if default is used self.mock_rados.Rados.return_value.connect.assert_called_once_with() self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( self.cfg.rbd_pool) # different pool ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.return_value.connect.called) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( 'alt_pool') # With timeout self.cfg.rados_connect_timeout = 1 self.mock_rados.Rados.return_value.connect.reset_mock() self.driver._connect_to_rados() self.mock_rados.Rados.return_value.connect.assert_called_once_with( timeout=1) # error self.mock_rados.Rados.return_value.open_ioctx.reset_mock() self.mock_rados.Rados.return_value.shutdown.reset_mock() self.mock_rados.Rados.return_value.open_ioctx.side_effect = ( self.mock_rados.Error) self.assertRaises(exception.VolumeBackendAPIException, self.driver._connect_to_rados) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual( 3, self.mock_rados.Rados.return_value.shutdown.call_count) class RBDImageIOWrapperTestCase(test.TestCase): def setUp(self): super(RBDImageIOWrapperTestCase, self).setUp() self.meta = mock.Mock() self.meta.user = 'mock_user' self.meta.conf = 'mock_conf' self.meta.pool = 'mock_pool' self.meta.image = mock.Mock() self.meta.image.read = mock.Mock() self.meta.image.write = mock.Mock() self.meta.image.size = mock.Mock() self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta) self.data_length = 1024 self.full_data = b'abcd' * 256 def test_init(self): self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta) self.assertEqual(0, self.mock_rbd_wrapper._offset) def test_inc_offset(self): self.mock_rbd_wrapper._inc_offset(10) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(20, self.mock_rbd_wrapper._offset) def test_rbd_image(self): self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image) def test_rbd_user(self): self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user) def test_rbd_pool(self): self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf) def test_rbd_conf(self): self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool) def test_read(self): def mock_read(offset, length): return self.full_data[offset:length] self.meta.image.read.side_effect = mock_read self.meta.image.size.return_value = self.data_length data = self.mock_rbd_wrapper.read() self.assertEqual(self.full_data, data) data = self.mock_rbd_wrapper.read() self.assertEqual(b'', data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read() self.assertEqual(self.full_data, data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read(10) self.assertEqual(self.full_data[:10], data) def test_write(self): self.mock_rbd_wrapper.write(self.full_data) self.assertEqual(1024, self.mock_rbd_wrapper._offset) def test_seekable(self): self.assertTrue(self.mock_rbd_wrapper.seekable) def test_seek(self): self.assertEqual(0, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10) self.assertEqual(10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10) self.assertEqual(10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10, 1) self.assertEqual(20, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(0) self.mock_rbd_wrapper.write(self.full_data) self.meta.image.size.return_value = self.data_length self.mock_rbd_wrapper.seek(0) self.assertEqual(0, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10, 2) self.assertEqual(self.data_length + 10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(-10, 2) self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset) # test exceptions. self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3) self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1) # offset should not have been changed by any of the previous # operations. self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset) def test_tell(self): self.assertEqual(0, self.mock_rbd_wrapper.tell()) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(10, self.mock_rbd_wrapper.tell()) def test_flush(self): with mock.patch.object(driver, 'LOG') as mock_logger: self.meta.image.flush = mock.Mock() self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() self.meta.image.flush.reset_mock() # this should be caught and logged silently. self.meta.image.flush.side_effect = AttributeError self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() msg = _("flush() not supported in this version of librbd") mock_logger.warning.assert_called_with(msg) def test_fileno(self): self.assertRaises(IOError, self.mock_rbd_wrapper.fileno) def test_close(self): self.mock_rbd_wrapper.close() class ManagedRBDTestCase(test_volume.DriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.called = [] def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db_volume = {'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'availability_zone': 'fake_zone', 'attach_status': 'detached', 'host': 'dummy'} volume = objects.Volume(context=self.context, **db_volume) volume.create() try: if not clone_error: self.volume.create_volume(self.context, volume.id, request_spec={'image_id': image_id}, volume=volume) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume.id, request_spec={'image_id': image_id}, volume=volume) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(expected_status, volume.status) finally: # cleanup volume.destroy() @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_available(self, mock_gdis): """Clone raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, True with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch.object(cinder.image.glance, 'get_default_image_service') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') def test_create_vol_from_non_raw_image_status_available(self, mock_fetch, mock_gdis): """Clone non-raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, False mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=False) self.assertTrue(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertTrue(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_error(self, mock_gdis): """Fail to clone raw image then verify volume is in error state.""" with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = exception.CinderException with mock.patch.object(self.volume.driver, 'create_volume'): with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('error', raw=True, clone_error=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(self.volume.driver.create_volume.called) self.assertTrue(mock_gdis.called) def test_clone_failure(self): driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', lambda *args: False): image_loc = (mock.Mock(), None) actual = driver.clone_image(mock.Mock(), mock.Mock(), image_loc, {}, mock.Mock()) self.assertEqual(({}, False), actual) self.assertEqual(({}, False), driver.clone_image('', object(), None, {}, '')) def test_clone_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver with mock.patch.object(self.volume.driver, '_is_cloneable') as \ mock_is_cloneable: mock_is_cloneable.return_value = True with mock.patch.object(self.volume.driver, '_clone') as \ mock_clone: with mock.patch.object(self.volume.driver, '_resize') as \ mock_resize: image_loc = ('rbd://fee/fi/fo/fum', None) volume = {'name': 'vol1'} actual = driver.clone_image(mock.Mock(), volume, image_loc, {'disk_format': 'raw', 'id': 'id.foo'}, mock.Mock()) self.assertEqual(expected, actual) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_resize.assert_called_once_with(volume) def test_clone_multilocation_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver def cloneable_side_effect(url_location, image_meta): return url_location == 'rbd://fee/fi/fo/fum' with mock.patch.object(self.volume.driver, '_is_cloneable') \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: mock_is_cloneable.side_effect = cloneable_side_effect image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum', image_meta) mock_resize.assert_called_once_with(volume) def test_clone_multilocation_failure(self): expected = ({}, False) driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', return_value=False) \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum', image_meta) mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum', image_meta) self.assertFalse(mock_clone.called) self.assertFalse(mock_resize.called) cinder-8.0.0/cinder/tests/unit/fake_utils.py0000664000567000056710000000553312701406250022214 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in cinder.utils.""" import re from eventlet import greenthread import six from cinder import utils _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] break if isinstance(reply_handler, six.string_types): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) # Replicate the sleep call in the real function greenthread.sleep(0) return reply def stub_out_utils_execute(stubs): fake_execute_set_repliers([]) fake_execute_clear_log() stubs.Set(utils, 'execute', fake_execute) cinder-8.0.0/cinder/tests/unit/test_test_utils.py0000664000567000056710000000313212701406250023315 0ustar jenkinsjenkins00000000000000# # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import test from cinder.tests.unit import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() self.assertIsNone(ctxt.project_id) self.assertIsNone(ctxt.user_id) self.assertIsNone(ctxt.domain) self.assertIsNone(ctxt.project_domain) self.assertIsNone(ctxt.user_domain) self.assertIsNone(ctxt.project_name) self.assertIsNone(ctxt.remote_address) self.assertIsNone(ctxt.auth_token) self.assertIsNone(ctxt.quota_class) self.assertIsNotNone(ctxt.request_id) self.assertIsNotNone(ctxt.timestamp) self.assertEqual(['admin'], ctxt.roles) self.assertEqual([], ctxt.service_catalog) self.assertEqual('no', ctxt.read_deleted) self.assertTrue(ctxt.read_deleted) self.assertTrue(ctxt.is_admin) cinder-8.0.0/cinder/tests/unit/test_quota.py0000664000567000056710000026724112701406250022264 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import timeutils import six from cinder import backup from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqa_api from cinder.db.sqlalchemy import models as sqa_models from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder import quota_utils from cinder import test import cinder.tests.unit.image.fake from cinder import volume from keystonemiddleware import auth_token CONF = cfg.CONF class QuotaIntegrationTestCase(test.TestCase): def setUp(self): objects.register_all() super(QuotaIntegrationTestCase, self).setUp() self.volume_type_name = CONF.default_volume_type self.volume_type = db.volume_type_create( context.get_admin_context(), dict(name=self.volume_type_name)) self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.volume_type['id']) self.flags(quota_volumes=2, quota_snapshots=2, quota_gigabytes=20, quota_backups=2, quota_backup_gigabytes=20) self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) # Destroy the 'default' quota_class in the database to avoid # conflicts with the test cases here that are setting up their own # defaults. db.quota_class_destroy_all_by_name(self.context, 'default') self.addCleanup(cinder.tests.unit.image.fake.FakeImageService_reset) def _create_volume(self, size=1): """Create a test volume.""" vol = {} vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['size'] = size vol['status'] = 'available' vol['volume_type_id'] = self.volume_type['id'] vol['host'] = 'fake_host' vol['availability_zone'] = 'fake_zone' vol['attach_status'] = 'detached' volume = objects.Volume(context=self.context, **vol) volume.create() return volume def _create_snapshot(self, volume): snapshot = objects.Snapshot(self.context) snapshot.user_id = self.user_id or 'fake_user_id' snapshot.project_id = self.project_id or 'fake_project_id' snapshot.volume_id = volume['id'] snapshot.volume_size = volume['size'] snapshot.host = volume['host'] snapshot.status = 'available' snapshot.create() return snapshot def _create_backup(self, volume): backup = {} backup['user_id'] = self.user_id backup['project_id'] = self.project_id backup['volume_id'] = volume['id'] backup['volume_size'] = volume['size'] backup['status'] = fields.BackupStatus.AVAILABLE return db.backup_create(self.context, backup) def test_volume_size_limit_exceeds(self): resource = 'volumes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 10, 'quota_gigabytes': 1000, 'per_volume_size_limit': 5 } self.flags(**flag_args) self.assertRaises(exception.VolumeSizeExceedsLimit, volume.API().create, self.context, 10, '', '',) def test_too_many_volumes(self): volume_ids = [] for _i in range(CONF.quota_volumes): vol_ref = self._create_volume() volume_ids.append(vol_ref['id']) ex = self.assertRaises(exception.VolumeLimitExceeded, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) msg = ("Maximum number of volumes allowed (%d) exceeded for" " quota 'volumes'." % CONF.quota_volumes) self.assertEqual(msg, six.text_type(ex)) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) def test_too_many_volumes_of_type(self): resource = 'volumes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000 } self.flags(**flag_args) vol_ref = self._create_volume() ex = self.assertRaises(exception.VolumeLimitExceeded, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) msg = ("Maximum number of volumes allowed (1) exceeded for" " quota '%s'." % resource) self.assertEqual(msg, six.text_type(ex)) vol_ref.destroy() def test_too_many_snapshots_of_type(self): resource = 'snapshots_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000, } self.flags(**flag_args) vol_ref = self._create_volume() snap_ref = self._create_snapshot(vol_ref) self.assertRaises(exception.SnapshotLimitExceeded, volume.API().create_snapshot, self.context, vol_ref, '', '') snap_ref.destroy() vol_ref.destroy() def test_too_many_backups(self): resource = 'backups' db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_backups': 2000, 'quota_backup_gigabytes': 2000 } self.flags(**flag_args) vol_ref = self._create_volume() backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup.API, '_get_available_backup_service_host') as \ mock__get_available_backup_service: mock__get_available_backup_service.return_value = 'host' self.assertRaises(exception.BackupLimitExceeded, backup.API().create, self.context, 'name', 'description', vol_ref['id'], 'container', False, None) db.backup_destroy(self.context, backup_ref['id']) db.volume_destroy(self.context, vol_ref['id']) def test_too_many_gigabytes(self): volume_ids = [] vol_ref = self._create_volume(size=20) volume_ids.append(vol_ref['id']) raised_exc = self.assertRaises( exception.VolumeSizeExceedsAvailableQuota, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) expected = exception.VolumeSizeExceedsAvailableQuota( requested=1, quota=20, consumed=20) self.assertEqual(str(expected), str(raised_exc)) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) def test_too_many_combined_gigabytes(self): vol_ref = self._create_volume(size=10) snap_ref = self._create_snapshot(vol_ref) self.assertRaises(exception.QuotaError, volume.API().create_snapshot, self.context, vol_ref, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) snap_ref.destroy() vol_ref.destroy() def test_too_many_combined_backup_gigabytes(self): vol_ref = self._create_volume(size=10000) backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup.API, '_get_available_backup_service_host') as \ mock__get_available_backup_service: mock__get_available_backup_service.return_value = 'host' self.assertRaises( exception.VolumeBackupSizeExceedsAvailableQuota, backup.API().create, context=self.context, name='name', description='description', volume_id=vol_ref['id'], container='container', incremental=False) db.backup_destroy(self.context, backup_ref['id']) vol_ref.destroy() def test_no_snapshot_gb_quota_flag(self): self.flags(quota_volumes=2, quota_snapshots=2, quota_gigabytes=20, no_snapshot_gb_quota=True) vol_ref = self._create_volume(size=10) snap_ref = self._create_snapshot(vol_ref) snap_ref2 = volume.API().create_snapshot(self.context, vol_ref, '', '') # Make sure the snapshot volume_size isn't included in usage. vol_ref2 = volume.API().create(self.context, 10, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) self.assertEqual(0, usages['gigabytes']['reserved']) snap_ref.destroy() snap_ref2.destroy() vol_ref.destroy() vol_ref2.destroy() def test_backup_gb_quota_flag(self): self.flags(quota_volumes=2, quota_snapshots=2, quota_backups=2, quota_gigabytes=20 ) vol_ref = self._create_volume(size=10) backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup.API, '_get_available_backup_service_host') as \ mock_mock__get_available_backup_service: mock_mock__get_available_backup_service.return_value = 'host' backup_ref2 = backup.API().create(self.context, 'name', 'description', vol_ref['id'], 'container', False, None) # Make sure the backup volume_size isn't included in usage. vol_ref2 = volume.API().create(self.context, 10, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) self.assertEqual(0, usages['gigabytes']['reserved']) db.backup_destroy(self.context, backup_ref['id']) db.backup_destroy(self.context, backup_ref2['id']) vol_ref.destroy() vol_ref2.destroy() def test_too_many_gigabytes_of_type(self): resource = 'gigabytes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 10) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000, } self.flags(**flag_args) vol_ref = self._create_volume(size=10) raised_exc = self.assertRaises( exception.VolumeSizeExceedsAvailableQuota, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) expected = exception.VolumeSizeExceedsAvailableQuota( requested=1, quota=10, consumed=10, name=resource) self.assertEqual(str(expected), str(raised_exc)) vol_ref.destroy() class FakeContext(object): def __init__(self, project_id, quota_class): self.is_admin = False self.user_id = 'fake_user' self.project_id = project_id self.quota_class = quota_class def elevated(self): elevated = self.__class__(self.project_id, self.quota_class) elevated.is_admin = True return elevated class FakeDriver(object): def __init__(self, by_project=None, by_class=None, reservations=None): self.called = [] self.by_project = by_project or {} self.by_class = by_class or {} self.reservations = reservations or [] def get_by_project(self, context, project_id, resource): self.called.append(('get_by_project', context, project_id, resource)) try: return self.by_project[project_id][resource] except KeyError: raise exception.ProjectQuotaNotFound(project_id=project_id) def get_by_class(self, context, quota_class, resource): self.called.append(('get_by_class', context, quota_class, resource)) try: return self.by_class[quota_class][resource] except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) def get_default(self, context, resource, parent_project_id=None): self.called.append(('get_default', context, resource, parent_project_id)) return resource.default def get_defaults(self, context, resources, parent_project_id=None): self.called.append(('get_defaults', context, resources, parent_project_id)) return resources def get_class_quotas(self, context, resources, quota_class, defaults=True): self.called.append(('get_class_quotas', context, resources, quota_class, defaults)) return resources def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True): self.called.append(('get_project_quotas', context, resources, project_id, quota_class, defaults, usages)) return resources def limit_check(self, context, resources, values, project_id=None): self.called.append(('limit_check', context, resources, values, project_id)) def reserve(self, context, resources, deltas, expire=None, project_id=None): self.called.append(('reserve', context, resources, deltas, expire, project_id)) return self.reservations def commit(self, context, reservations, project_id=None): self.called.append(('commit', context, reservations, project_id)) def rollback(self, context, reservations, project_id=None): self.called.append(('rollback', context, reservations, project_id)) def destroy_by_project(self, context, project_id): self.called.append(('destroy_by_project', context, project_id)) def expire(self, context): self.called.append(('expire', context)) class BaseResourceTestCase(test.TestCase): def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual('test_resource', resource.name) self.assertIsNone(resource.flag) self.assertEqual(-1, resource.default) def test_with_flag(self): # We know this flag exists, so use it... self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_volumes', resource.flag) self.assertEqual(10, resource.default) def test_with_flag_no_quota(self): self.flags(quota_volumes=-1) resource = quota.BaseResource('test_resource', 'quota_volumes') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_volumes', resource.flag) self.assertEqual(-1, resource.default) def test_quota_no_project_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver() context = FakeContext(None, None) quota_value = resource.quota(driver, context) self.assertEqual(10, quota_value) def test_quota_with_project_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_no_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(20, quota_value) def test_quota_with_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), ), by_class=dict(test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_override_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') self.assertEqual(20, quota_value) def test_quota_override_subproject_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes', parent_project_id='test_parent_project') driver = FakeDriver() context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(0, quota_value) def test_quota_with_project_override_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=15), override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') self.assertEqual(20, quota_value) class VolumeTypeResourceTestCase(test.TestCase): def test_name_and_flag(self): volume_type_name = 'foo' volume = {'name': volume_type_name, 'id': 'myid'} resource = quota.VolumeTypeResource('volumes', volume) self.assertEqual('volumes_%s' % volume_type_name, resource.name) self.assertIsNone(resource.flag) self.assertEqual(-1, resource.default) class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() self.assertEqual({}, quota_obj.resources) self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='cinder.tests.unit.test_quota.FakeDriver') self.assertEqual({}, quota_obj.resources) self.assertIsInstance(quota_obj._driver, FakeDriver) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) self.assertEqual({}, quota_obj.resources) self.assertEqual(FakeDriver, quota_obj._driver) def test_register_resource(self): quota_obj = quota.QuotaEngine() resource = quota.AbsoluteResource('test_resource') quota_obj.register_resource(resource) self.assertEqual(dict(test_resource=resource), quota_obj.resources) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.AbsoluteResource('test_resource1'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource3'), ] quota_obj.register_resources(resources) self.assertEqual(dict(test_resource1=resources[0], test_resource2=resources[1], test_resource3=resources[2], ), quota_obj.resources) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') self.assertEqual([('get_by_project', context, 'test_project', 'test_resource'), ], driver.called) self.assertEqual(42, result) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') self.assertEqual([('get_by_class', context, 'test_class', 'test_resource'), ], driver.called) self.assertEqual(42, result) def _make_quota_obj(self, driver): quota_obj = quota.QuotaEngine(quota_driver_class=driver) resources = [ quota.AbsoluteResource('test_resource4'), quota.AbsoluteResource('test_resource3'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj def test_get_defaults(self): context = FakeContext(None, None) parent_project_id = None driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) self.assertEqual([('get_defaults', context, quota_obj.resources, parent_project_id), ], driver.called) self.assertEqual(quota_obj.resources, result) def test_get_class_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_class_quotas(context, 'test_class') result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual([ ('get_class_quotas', context, quota_obj.resources, 'test_class', True), ('get_class_quotas', context, quota_obj.resources, 'test_class', False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_get_project_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual([ ('get_project_quotas', context, quota_obj.resources, 'test_project', None, True, True), ('get_project_quotas', context, quota_obj.resources, 'test_project', 'test_class', False, False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_get_subproject_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual([ ('get_project_quotas', context, quota_obj.resources, 'test_project', None, True, True), ('get_project_quotas', context, quota_obj.resources, 'test_project', 'test_class', False, False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_count_no_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource5', True, foo='bar') def test_count_wrong_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource1', True, foo='bar') def test_count(self): def fake_count(context, *args, **kwargs): self.assertEqual((True,), args) self.assertEqual(dict(foo='bar'), kwargs) return 5 context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.register_resource(quota.CountableResource('test_resource5', fake_count)) result = quota_obj.count(context, 'test_resource5', True, foo='bar') self.assertEqual(5, result) def test_limit_check(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.limit_check(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) self.assertEqual([ ('limit_check', context, quota_obj.resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1,), None), ], driver.called) def test_reserve(self): context = FakeContext(None, None) driver = FakeDriver(reservations=['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) result3 = quota_obj.reserve(context, project_id='fake_project', test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) self.assertEqual([ ('reserve', context, quota_obj.resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None), ('reserve', context, quota_obj.resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), 3600, None), ('reserve', context, quota_obj.resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), None, 'fake_project'), ], driver.called) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result1) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result2) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result3) def test_commit(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('commit', context, ['resv-01', 'resv-02', 'resv-03'], None), ], driver.called) def test_rollback(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None), ], driver.called) def test_destroy_by_project(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_by_project(context, 'test_project') self.assertEqual([('destroy_by_project', context, 'test_project'), ], driver.called) def test_expire(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) self.assertEqual([('expire', context), ], driver.called) def test_resource_names(self): quota_obj = self._make_quota_obj(None) self.assertEqual(['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4'], quota_obj.resource_names) class VolumeTypeQuotaEngineTestCase(test.TestCase): def test_default_resources(self): def fake_vtga(context, inactive=False, filters=None): return {} self.stubs.Set(db, 'volume_type_get_all', fake_vtga) engine = quota.VolumeTypeQuotaEngine() self.assertEqual(['backup_gigabytes', 'backups', 'gigabytes', 'per_volume_gigabytes', 'snapshots', 'volumes'], engine.resource_names) def test_volume_type_resources(self): ctx = context.RequestContext('admin', 'admin', is_admin=True) vtype = db.volume_type_create(ctx, {'name': 'type1'}) vtype2 = db.volume_type_create(ctx, {'name': 'type_2'}) def fake_vtga(context, inactive=False, filters=None): return { 'type1': { 'id': vtype['id'], 'name': 'type1', 'extra_specs': {}, }, 'type_2': { 'id': vtype['id'], 'name': 'type_2', 'extra_specs': {}, }, } self.stubs.Set(db, 'volume_type_get_all', fake_vtga) engine = quota.VolumeTypeQuotaEngine() self.assertEqual(['backup_gigabytes', 'backups', 'gigabytes', 'gigabytes_type1', 'gigabytes_type_2', 'per_volume_gigabytes', 'snapshots', 'snapshots_type1', 'snapshots_type_2', 'volumes', 'volumes_type1', 'volumes_type_2', ], engine.resource_names) db.volume_type_destroy(ctx, vtype['id']) db.volume_type_destroy(ctx, vtype2['id']) def test_update_quota_resource(self): ctx = context.RequestContext('admin', 'admin', is_admin=True) engine = quota.VolumeTypeQuotaEngine() engine.update_quota_resource(ctx, 'type1', 'type2') class DbQuotaDriverBaseTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverBaseTestCase, self).setUp() self.flags(quota_volumes=10, quota_snapshots=10, quota_gigabytes=1000, quota_backups=10, quota_backup_gigabytes=1000, reservation_expire=86400, until_refresh=0, max_age=0, ) # These can be used for expected defaults for child/non-child self._default_quotas_non_child = dict( volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1) self._default_quotas_child = dict( volumes=0, snapshots=0, gigabytes=0, backups=0, backup_gigabytes=0, per_volume_gigabytes=0) self.calls = [] patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def _stub_quota_class_get_default(self): # Stub out quota_class_get_default def fake_qcgd(context): self.calls.append('quota_class_get_default') return dict(volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000 ) self.stubs.Set(db, 'quota_class_get_default', fake_qcgd) def _stub_volume_type_get_all(self): def fake_vtga(context, inactive=False, filters=None): return {} self.stubs.Set(db, 'volume_type_get_all', fake_vtga) def _stub_quota_class_get_all_by_name(self): # Stub out quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual('test_class', quota_class) return dict(gigabytes=500, volumes=10, snapshots=10, backups=10, backup_gigabytes=500) self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn) def _stub_allocated_get_all_by_project(self, allocated_quota=False): def fake_qagabp(context, project_id): self.calls.append('quota_allocated_get_all_by_project') if allocated_quota: return dict(project_id=project_id, volumes=3) return dict(project_id=project_id) self.stubs.Set(db, 'quota_allocated_get_all_by_project', fake_qagabp) class DbQuotaDriverTestCase(DbQuotaDriverBaseTestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.driver = quota.DbQuotaDriver() def test_get_defaults(self): # Use our pre-defined resources self._stub_quota_class_get_default() self._stub_volume_type_get_all() result = self.driver.get_defaults(None, quota.QUOTAS.resources) self.assertEqual( dict( volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1), result) def test_get_class_quotas(self): self._stub_quota_class_get_all_by_name() self._stub_volume_type_get_all() result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, 'test_class') self.assertEqual(['quota_class_get_all_by_name'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=500, snapshots=10, backups=10, backup_gigabytes=500, per_volume_gigabytes=-1), result) def test_get_class_quotas_no_defaults(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, 'test_class', False) self.assertEqual(['quota_class_get_all_by_name'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=500, snapshots=10, backups=10, backup_gigabytes=500), result) def _stub_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual('test_project', project_id) return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10, backups=10, backup_gigabytes=50) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual('test_project', project_id) return dict(volumes=dict(in_use=2, reserved=0), snapshots=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0), backups=dict(in_use=2, reserved=0), backup_gigabytes=dict(in_use=10, reserved=0) ) self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp) self._stub_quota_class_get_all_by_name() self._stub_quota_class_get_default() def test_get_project_quotas(self): self._stub_get_by_project() self._stub_volume_type_get_all() self._stub_allocated_get_all_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_allocated_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved= 0) ), result) def test_get_root_project_with_subprojects_quotas(self): self._stub_get_by_project() self._stub_volume_type_get_all() self._stub_allocated_get_all_by_project(allocated_quota=True) result = self.driver.get_project_quotas( FakeContext('test_project', None), quota.QUOTAS.resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_allocated_get_all_by_project', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, allocated=3, ), snapshots=dict(limit=10, in_use=2, reserved=0, allocated=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, allocated=0, ), backups=dict(limit=10, in_use=2, reserved=0, allocated=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, allocated=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved=0, allocated=0) ), result) def test_get_project_quotas_alt_context_no_class(self): self._stub_get_by_project() self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS.resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved=0) ), result) def test_get_project_quotas_alt_context_with_class(self): self._stub_get_by_project() self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS.resources, 'test_project', quota_class='test_class') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved= 0)), result) def test_get_project_quotas_no_defaults(self): self._stub_get_by_project() self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project', defaults=False) self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), volumes=dict(limit=10, in_use=2, reserved=0, ), ), result) def test_get_project_quotas_no_usages(self): self._stub_get_by_project() self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project', usages=False) self.assertEqual(['quota_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_default', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, ), snapshots=dict(limit=10, ), backups=dict(limit=10, ), gigabytes=dict(limit=50, ), backup_gigabytes=dict(limit=50, ), per_volume_gigabytes=dict(limit=-1, )), result) def _stub_get_project_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True, parent_project_id=None): self.calls.append('get_project_quotas') return {k: dict(limit=v.default) for k, v in resources.items()} self.stubs.Set(self.driver, 'get_project_quotas', fake_get_project_quotas) def test_get_quotas_has_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['unknown'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['unknown'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync_no_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['metadata_items'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_has_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['volumes'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, ['volumes', 'gigabytes'], True) self.assertEqual(['get_project_quotas'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=1000, ), result) def _stub_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] self.stubs.Set(db, 'quota_reserve', fake_quota_reserve) def test_reserve_bad_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire='invalid') self.assertEqual([], self.calls) def test_reserve_default_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_int_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_timedelta_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_datetime_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_until_refresh(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 500, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_max_age(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 86400), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def _stub_quota_destroy_by_project(self): def fake_quota_destroy_by_project(context, project_id): self.calls.append(('quota_destroy_by_project', project_id)) return None self.stubs.Set(sqa_api, 'quota_destroy_by_project', fake_quota_destroy_by_project) def test_destroy_quota_by_project(self): self._stub_quota_destroy_by_project() self.driver.destroy_by_project(FakeContext('test_project', 'test_class'), 'test_project') self.assertEqual([('quota_destroy_by_project', ('test_project')), ], self.calls) class NestedDbQuotaDriverBaseTestCase(DbQuotaDriverBaseTestCase): def setUp(self): super(NestedDbQuotaDriverBaseTestCase, self).setUp() self.context = context.RequestContext('user_id', 'project_id', is_admin=True, auth_token="fake_token") self.auth_url = 'http://localhost:5000' self._child_proj_id = 'child_id' self._non_child_proj_id = 'non_child_id' keystone_mock = mock.Mock() keystone_mock.version = 'v3' class FakeProject(object): def __init__(self, parent_id): self.parent_id = parent_id self.parents = {parent_id: None} self.domain_id = 'default' def fake_get_project(project_id, subtree_as_ids=False, parents_as_ids=False): # Enable imitation of projects with and without parents if project_id == self._child_proj_id: return FakeProject('parent_id') else: return FakeProject(None) keystone_mock.projects.get.side_effect = fake_get_project def _keystone_mock(self): return keystone_mock keystone_patcher = mock.patch('cinder.quota_utils._keystone_client', _keystone_mock) keystone_patcher.start() self.addCleanup(keystone_patcher.stop) self.fixture = self.useFixture(config_fixture.Config(auth_token.CONF)) self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken') self.driver = quota.NestedDbQuotaDriver() def _stub_get_by_subproject(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') return dict(volumes=10, gigabytes=50, reserved=0) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') return dict(volumes=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0)) self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp) self._stub_quota_class_get_all_by_name() class NestedDbQuotaDriverTestCase(NestedDbQuotaDriverBaseTestCase): def test_get_defaults(self): self._stub_volume_type_get_all() # Test for child project defaults result = self.driver.get_defaults(self.context, quota.QUOTAS.resources, self._child_proj_id) self.assertEqual(self._default_quotas_child, result) # Test for non-child project defaults result = self.driver.get_defaults(self.context, quota.QUOTAS.resources, self._non_child_proj_id) self.assertEqual(self._default_quotas_non_child, result) def test_subproject_enforce_defaults(self): # Non-child defaults should allow volume to get created self.driver.reserve(self.context, quota.QUOTAS.resources, {'volumes': 1, 'gigabytes': 1}, project_id=self._non_child_proj_id) # Child defaults should not allow volume to be created self.assertRaises(exception.OverQuota, self.driver.reserve, self.context, quota.QUOTAS.resources, {'volumes': 1, 'gigabytes': 1}, project_id=self._child_proj_id) def test_get_subproject_quotas(self): self._stub_get_by_subproject() self._stub_volume_type_get_all() self._stub_allocated_get_all_by_project(allocated_quota=True) result = self.driver.get_project_quotas( self.context, quota.QUOTAS.resources, self._child_proj_id) self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_allocated_get_all_by_project', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, allocated=3, ), snapshots=dict(limit=0, in_use=0, reserved=0, allocated=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, allocated=0, ), backups=dict(limit=0, in_use=0, reserved=0, allocated=0, ), backup_gigabytes=dict(limit=0, in_use=0, reserved=0, allocated=0, ), per_volume_gigabytes=dict(in_use=0, limit=0, reserved=0, allocated=0) ), result) class NestedQuotaValidation(NestedDbQuotaDriverBaseTestCase): def setUp(self): super(NestedQuotaValidation, self).setUp() """ Quota hierarchy setup like so +-----------+ | | | A | | / \ | | B C | | / | | D | +-----------+ """ self.project_tree = {'A': {'B': {'D': None}, 'C': None}} self.proj_vals = { 'A': {'limit': 7, 'in_use': 1, 'alloc': 6}, 'B': {'limit': 3, 'in_use': 1, 'alloc': 2}, 'D': {'limit': 2, 'in_use': 0}, 'C': {'limit': 3, 'in_use': 3}, } # Just using one resource currently for simplicity of test self.resources = {'volumes': quota.ReservableResource( 'volumes', '_sync_volumes', 'quota_volumes')} to_patch = [('cinder.db.quota_allocated_get_all_by_project', self._fake_quota_allocated_get_all_by_project), ('cinder.db.quota_get_all_by_project', self._fake_quota_get_all_by_project), ('cinder.db.quota_usage_get_all_by_project', self._fake_quota_usage_get_all_by_project)] for patch_path, patch_obj in to_patch: patcher = mock.patch(patch_path, patch_obj) patcher.start() self.addCleanup(patcher.stop) def _fake_quota_get_all_by_project(self, context, project_id): return {'volumes': self.proj_vals[project_id]['limit']} def _fake_quota_usage_get_all_by_project(self, context, project_id): return {'volumes': self.proj_vals[project_id]} def _fake_quota_allocated_get_all_by_project(self, context, project_id): ret = {'project_id': project_id} proj_val = self.proj_vals[project_id] if 'alloc' in proj_val: ret['volumes'] = proj_val['alloc'] return ret def test_validate_nested_quotas(self): self.driver.validate_nested_setup(self.context, self.resources, self.project_tree) # Fail because 7 - 2 < 3 + 3 self.proj_vals['A']['in_use'] = 2 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) self.proj_vals['A']['in_use'] = 1 # Fail because 7 - 1 < 3 + 7 self.proj_vals['C']['limit'] = 7 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) self.proj_vals['C']['limit'] = 3 # Fail because 3 < 4 self.proj_vals['D']['limit'] = 4 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) self.proj_vals['D']['limit'] = 2 def test_validate_nested_quotas_usage_over_limit(self): self.proj_vals['D']['in_use'] = 5 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) def test_validate_nested_quota_bad_allocated_quotas(self): self.proj_vals['A']['alloc'] = 5 self.proj_vals['B']['alloc'] = 8 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) def test_validate_nested_quota_negative_child_limits(self): # Redefining the project limits with -1, doing it all in this test # for readability self.proj_vals = { 'A': {'limit': 8, 'in_use': 1}, 'B': {'limit': -1, 'in_use': 3}, 'D': {'limit': 4, 'in_use': 0}, 'C': {'limit': 2, 'in_use': 2}, } # A's child usage is 3 (from B) + 4 (from D) + 2 (from C) = 9 self.assertRaises(exception.InvalidNestedQuotaSetup, self.driver.validate_nested_setup, self.context, self.resources, self.project_tree) self.proj_vals['D']['limit'] = 2 self.driver.validate_nested_setup( self.context, self.resources, self.project_tree, fix_allocated_quotas=True) def test_get_cur_project_allocated(self): # Redefining the project limits with -1, doing it all in this test # for readability self.proj_vals = { # Allocated are here to simulate a bad existing value 'A': {'limit': 8, 'in_use': 1, 'alloc': 6}, 'B': {'limit': -1, 'in_use': 3, 'alloc': 2}, 'D': {'limit': 1, 'in_use': 0}, 'C': {'limit': 2, 'in_use': 2}, } self.driver._allocated = {} allocated_a = self.driver._get_cur_project_allocated( self.context, self.resources['volumes'], self.project_tree) # A's allocated will be: # 2 (from C's limit) + 3 (from B's in-use) + 1 (from D's limit) = 6 self.assertEqual(6, allocated_a) # B's allocated value should also be calculated and cached as part # of A's calculation self.assertEqual(1, self.driver._allocated['B']['volumes']) class FakeSession(object): def begin(self): return self def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): return False def query(self, *args, **kwargs): pass class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): pass class QuotaReserveSqlAlchemyTestCase(test.TestCase): # cinder.db.sqlalchemy.api.quota_reserve is so complex it needs its # own test case, and since it's a quota manipulator, this is the # best place to put it... def setUp(self): super(QuotaReserveSqlAlchemyTestCase, self).setUp() self.sync_called = set() def make_sync(res_name): def fake_sync(context, project_id, volume_type_id=None, volume_type_name=None, session=None): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: return {res_name: 2} else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} return fake_sync self.resources = {} QUOTA_SYNC_FUNCTIONS = {} for res_name in ('volumes', 'gigabytes'): res = quota.ReservableResource(res_name, '_sync_%s' % res_name) QUOTA_SYNC_FUNCTIONS['_sync_%s' % res_name] = make_sync(res_name) self.resources[res_name] = res self.stubs.Set(sqa_api, 'QUOTA_SYNC_FUNCTIONS', QUOTA_SYNC_FUNCTIONS) self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} self.usages_created = {} self.reservations_created = {} def fake_get_session(): return FakeSession() def fake_get_quota_usages(context, session, project_id): return self.usages.copy() def fake_quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None, save=True): quota_usage_ref = self._make_quota_usage( project_id, resource, in_use, reserved, until_refresh, timeutils.utcnow(), timeutils.utcnow()) self.usages_created[resource] = quota_usage_ref return quota_usage_ref def fake_reservation_create(context, uuid, usage_id, project_id, resource, delta, expire, session=None, allocated_id=None): reservation_ref = self._make_reservation( uuid, usage_id, project_id, resource, delta, expire, timeutils.utcnow(), timeutils.utcnow(), allocated_id) self.reservations_created[resource] = reservation_ref return reservation_ref def fake_qagabp(context, project_id): self.assertEqual('test_project', project_id) return {'project_id': project_id} self.stubs.Set(sqa_api, 'quota_allocated_get_all_by_project', fake_qagabp) self.stubs.Set(sqa_api, 'get_session', fake_get_session) self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages) self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create) self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create) patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def _make_quota_usage(self, project_id, resource, in_use, reserved, until_refresh, created_at, updated_at): quota_usage_ref = FakeUsage() quota_usage_ref.id = len(self.usages) + len(self.usages_created) quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.created_at = created_at quota_usage_ref.updated_at = updated_at quota_usage_ref.deleted_at = None quota_usage_ref.deleted = False return quota_usage_ref def init_usage(self, project_id, resource, in_use, reserved, until_refresh=None, created_at=None, updated_at=None): if created_at is None: created_at = timeutils.utcnow() if updated_at is None: updated_at = timeutils.utcnow() quota_usage_ref = self._make_quota_usage(project_id, resource, in_use, reserved, until_refresh, created_at, updated_at) self.usages[resource] = quota_usage_ref def compare_usage(self, usage_dict, expected): for usage in expected: resource = usage['resource'] for key, value in usage.items(): actual = getattr(usage_dict[resource], key) self.assertEqual(value, actual, "%s != %s on usage for resource %s" % (actual, value, resource)) def _make_reservation(self, uuid, usage_id, project_id, resource, delta, expire, created_at, updated_at, alloc_id): reservation_ref = sqa_models.Reservation() reservation_ref.id = len(self.reservations_created) reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.created_at = created_at reservation_ref.updated_at = updated_at reservation_ref.deleted_at = None reservation_ref.deleted = False reservation_ref.allocated_id = alloc_id return reservation_ref def compare_reservation(self, reservations, expected): reservations = set(reservations) for resv in expected: resource = resv['resource'] resv_obj = self.reservations_created[resource] self.assertIn(resv_obj.uuid, reservations) reservations.discard(resv_obj.uuid) for key, value in resv.items(): actual = getattr(resv_obj, key) self.assertEqual(value, actual, "%s != %s on reservation for resource %s" % (actual, value, resource)) self.assertEqual(0, len(reservations)) def _stub_allocated_get_all_by_project(self, allocated_quota=False): def fake_qagabp(context, project_id): self.assertEqual('test_project', project_id) if allocated_quota: return dict(project_id=project_id, volumes=3, gigabytes = 2 * 1024) return dict(project_id=project_id) self.stubs.Set(sqa_api, 'quota_allocated_get_all_by_project', fake_qagabp) def test_quota_reserve_with_allocated(self): context = FakeContext('test_project', 'test_class') # Allocated quota for volume will be updated for 3 self._stub_allocated_get_all_by_project(allocated_quota=True) # Quota limited for volume updated for 10 quotas = dict(volumes=10, gigabytes=10 * 1024, ) # Try reserve 7 volumes deltas = dict(volumes=7, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 5, 0) # The reservation works self.compare_reservation( result, [dict(resource='volumes', usage_id=self.usages_created['volumes'], project_id='test_project', delta=7), dict(resource='gigabytes', usage_id=self.usages_created['gigabytes'], delta=2 * 1024), ]) # But if we try reserve 8 volumes(more free quota that we have) deltas = dict(volumes=8, gigabytes=2 * 1024, ) self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, context, self.resources, quotas, deltas, self.expire, 0, 0) def test_quota_reserve_create_usages(self): context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages_created, [dict(resource='volumes', project_id='test_project', in_use=0, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=0, reserved=2 * 1024, until_refresh=None), ]) self.compare_reservation( result, [dict(resource='volumes', usage_id=self.usages_created['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages_created['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_until_refresh(self): self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_max_age(self): max_age = 3600 record_created = (timeutils.utcnow() - datetime.timedelta(seconds=max_age)) self.init_usage('test_project', 'volumes', 3, 0, created_at=record_created, updated_at=record_created) self.init_usage('test_project', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, max_age) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_no_refresh(self): self.init_usage('test_project', 'volumes', 3, 0) self.init_usage('test_project', 'gigabytes', 3, 0) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=3, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_unders(self): self.init_usage('test_project', 'volumes', 1, 0) self.init_usage('test_project', 'gigabytes', 1 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=1, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=1 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=-2 * 1024), ]) def test_quota_reserve_overs(self): self.init_usage('test_project', 'volumes', 4, 0) self.init_usage('test_project', 'gigabytes', 10 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self._stub_allocated_get_all_by_project() self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=4, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=10 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.assertEqual({}, self.reservations_created) def test_quota_reserve_reduction(self): self.init_usage('test_project', 'volumes', 10, 0) self.init_usage('test_project', 'gigabytes', 20 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) self._stub_allocated_get_all_by_project() result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=10, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=20 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], project_id='test_project', delta=-2 * 1024), ]) class QuotaVolumeTypeReservationTestCase(test.TestCase): def setUp(self): super(QuotaVolumeTypeReservationTestCase, self).setUp() self.volume_type_name = CONF.default_volume_type self.volume_type = db.volume_type_create( context.get_admin_context(), dict(name=self.volume_type_name)) @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(quota.QUOTAS, 'add_volume_type_opts') def test_volume_type_reservation(self, mock_add_volume_type_opts, mock_reserve): my_context = FakeContext('MyProject', None) volume = {'name': 'my_vol_name', 'id': 'my_vol_id', 'size': '1', 'project_id': 'vol_project_id', } reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} quota_utils.get_volume_type_reservation(my_context, volume, self.volume_type['id']) mock_add_volume_type_opts.assert_called_once_with( my_context, reserve_opts, self.volume_type['id']) mock_reserve.assert_called_once_with(my_context, project_id='vol_project_id', gigabytes='1', volumes=1) @mock.patch.object(quota.QUOTAS, 'reserve') def test_volume_type_reservation_with_type_only(self, mock_reserve): my_context = FakeContext('MyProject', None) volume = {'name': 'my_vol_name', 'id': 'my_vol_id', 'size': '1', 'project_id': 'vol_project_id', } quota_utils.get_volume_type_reservation(my_context, volume, self.volume_type['id'], reserve_vol_type_only=True) vtype_volume_quota = "%s_%s" % ('volumes', self.volume_type['name']) vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name']) reserve_opts = {vtype_volume_quota: 1, vtype_size_quota: volume['size']} mock_reserve.assert_called_once_with(my_context, project_id='vol_project_id', **reserve_opts) cinder-8.0.0/cinder/tests/unit/test_rpc.py0000664000567000056710000000646712701406250021720 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import rpc from cinder import test class FakeAPI(rpc.RPCAPI): RPC_API_VERSION = '1.5' TOPIC = 'cinder-scheduler-topic' BINARY = 'cinder-scheduler' class RPCAPITestCase(test.TestCase): """Tests RPCAPI mixin aggregating stuff related to RPC compatibility.""" def setUp(self): super(RPCAPITestCase, self).setUp() # Reset cached version pins rpc.LAST_RPC_VERSIONS = {} rpc.LAST_OBJ_VERSIONS = {} @mock.patch('cinder.objects.Service.get_minimum_rpc_version', return_value='1.2') @mock.patch('cinder.objects.Service.get_minimum_obj_version', return_value='1.7') @mock.patch('cinder.rpc.get_client') def test_init(self, get_client, get_min_obj, get_min_rpc): def fake_get_client(target, version_cap, serializer): self.assertEqual(FakeAPI.TOPIC, target.topic) self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) self.assertEqual('1.2', version_cap) self.assertEqual('1.7', serializer.version_cap) get_client.side_effect = fake_get_client FakeAPI() @mock.patch('cinder.objects.Service.get_minimum_rpc_version', return_value='liberty') @mock.patch('cinder.objects.Service.get_minimum_obj_version', return_value='liberty') @mock.patch('cinder.rpc.get_client') def test_init_liberty_caps(self, get_client, get_min_obj, get_min_rpc): def fake_get_client(target, version_cap, serializer): self.assertEqual(FakeAPI.TOPIC, target.topic) self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) self.assertEqual(rpc.LIBERTY_RPC_VERSIONS[FakeAPI.BINARY], version_cap) self.assertEqual('liberty', serializer.version_cap) get_client.side_effect = fake_get_client FakeAPI() @mock.patch('cinder.objects.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.get_client') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.4'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.3'}) def test_init_cached_caps(self, get_client, get_min_obj, get_min_rpc): def fake_get_client(target, version_cap, serializer): self.assertEqual(FakeAPI.TOPIC, target.topic) self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) self.assertEqual('1.4', version_cap) self.assertEqual('1.3', serializer.version_cap) get_client.side_effect = fake_get_client FakeAPI() self.assertFalse(get_min_obj.called) self.assertFalse(get_min_rpc.called) cinder-8.0.0/cinder/tests/unit/targets/0000775000567000056710000000000012701406543021164 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/targets/test_scst_driver.py0000664000567000056710000002475012701406250025127 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import scst from cinder.volume import utils as vutils class TestSCSTAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestSCSTAdmDriver, self).setUp() self.target = scst.SCSTAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.fake_iscsi_scan = \ ('Collecting current configuration: done.\n' 'Driver Target\n' '----------------------------------------------\n' 'iscsi iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba\n' 'All done.\n') self.fake_iscsi_attribute_scan = \ ('Collecting current configuration: done.\n' 'Attribute Value Writable KEY\n' '------------------------------------------\n' 'rel_tgt_id 1 Yes Yes\n' 'Dynamic attributes available\n' '----------------------------\n' 'IncomingUser\n' 'OutgoingUser\n' 'allowed_portal\n' 'LUN CREATE attributes available\n' '-------------------------------\n' 'read_only\n' 'All done.\n') self.fake_list_group = \ ('org.openstack:volume-vedams\n' 'Collecting current configuration: done.\n' 'Driver: iscsi\n' 'Target: iqn.2010-10.org.openstack:volume-vedams\n' 'Driver/target \'iscsi/iqn.2010-10.org.openstack:volume-vedams\'' 'has no associated LUNs.\n' 'Group: iqn.1993-08.org.debian:01:626bf14ebdc\n' 'Assigned LUNs:\n' 'LUN Device\n' '------------------\n' '1 1b67387810256\n' '2 2a0f1cc9cd595\n' 'Assigned Initiators:\n' 'Initiator\n' '-------------------------------------\n' 'iqn.1993-08.org.debian:01:626bf14ebdc\n' 'All done.\n') self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_target_attribute') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_get_target(self, mock_execute, mock_target_attribute, mock_scst_execute): mock_target_attribute.return_value = 1 mock_execute.return_value = (self.fake_iscsi_scan, None) expected = 1 self.assertEqual(expected, self.target._get_target( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) @mock.patch.object(utils, 'execute') def test_target_attribute(self, mock_execute): mock_execute.return_value = (self.fake_iscsi_attribute_scan, None) self.assertEqual(str(1), self.target._target_attribute( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) def test_single_lun_get_target_and_lun(self): ctxt = context.get_admin_context() self.assertEqual((0, 1), self.target._get_target_and_lun( ctxt, self.testvol)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_group') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_multi_lun_get_target_and_lun(self, mock_execute, mock_get_group, mock_scst_execute): mock_execute.return_value = (self.fake_list_group, None) mock_get_group.return_value = self.fake_list_group ctxt = context.get_admin_context() with mock.patch.object(self.target, 'target_name', return_value='iqn.2010-10.org.openstack:' 'volume-vedams'): self.assertEqual((0, 3), self.target._get_target_and_lun( ctxt, self.testvol)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_create_iscsi_target(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_get_target.return_value = 1 self.assertEqual(1, self.target.create_iscsi_target( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba', 'vol1', 0, 1, self.fake_volumes_dir)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_create_export(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_iscsi_location(*args, **kwargs): return '10.9.8.7:3260,1 iqn.2010-10.org.openstack:' \ 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1' def _fake_get_target_chap_auth(*args, **kwargs): return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') ctxt = context.get_admin_context() expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} with mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun),\ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth),\ mock.patch.object(self.target, 'initiator_iqn', return_value='iqn.1993-08.org.debian:' '01:626bf14ebdc'),\ mock.patch.object(self.target, '_iscsi_location', side_effect=_fake_iscsi_location),\ mock.patch.object(self.target, 'target_driver', return_value='iscsi'),\ mock.patch.object(vutils, 'generate_username', side_effect=lambda: 'QZJbisGmn9AL954FNF4D'),\ mock.patch.object(vutils, 'generate_password', side_effect=lambda: 'P68eE7u9eFqDGexd28DQ'): self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) @mock.patch('cinder.utils.execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_ensure_export(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 ctxt = context.get_admin_context() def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_get_target_chap_auth(*args, **kwargs): return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') with mock.patch.object(self.target, 'create_iscsi_target'),\ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth),\ mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 0, 1, self.fake_volumes_dir, _fake_get_target_chap_auth()) @mock.patch('cinder.utils.execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_ensure_export_chap(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 ctxt = context.get_admin_context() def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_get_target_chap_auth(*args, **kwargs): return None with mock.patch.object(self.target, 'create_iscsi_target'),\ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth),\ mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 0, 1, self.fake_volumes_dir, None) cinder-8.0.0/cinder/tests/unit/targets/test_iser_driver.py0000664000567000056710000000726012701406250025112 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import iser from cinder.volume.targets import lio from cinder.volume.targets import tgt class TestIserAdmDriver(tf.TargetDriverFixture): """Unit tests for the deprecated ISERTgtAdm flow""" def setUp(self): super(TestIserAdmDriver, self).setUp() self.target = iser.ISERTgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) @mock.patch.object(iser.ISERTgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} # Test the normal case mock_get_iscsi.return_value = {} expected_return = {'driver_volume_type': 'iser', 'data': {}} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) class TestIserTgtDriver(tf.TargetDriverFixture): """Unit tests for the iSER TGT flow""" def setUp(self): super(TestIserTgtDriver, self).setUp() self.configuration.iscsi_protocol = 'iser' self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} expected_return = {'driver_volume_type': 'iser', 'data': {}} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) class TestIserLioAdmDriver(tf.TargetDriverFixture): """Unit tests for the iSER LIO flow""" def setUp(self): super(TestIserLioAdmDriver, self).setUp() self.configuration.iscsi_protocol = 'iser' with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi, mock_execute): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} ret = self.target.initialize_connection(self.testvol, connector) driver_volume_type = ret['driver_volume_type'] self.assertEqual('iser', driver_volume_type) cinder-8.0.0/cinder/tests/unit/targets/test_tgt_driver.py0000664000567000056710000003730612701406250024752 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import tgt from cinder.volume import utils as vutils class TestTgtAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestTgtAdmDriver, self).setUp() self.configuration.get = mock.Mock(side_effect=self.fake_get) self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.testvol_path = \ '/dev/stack-volumes-lvmdriver-1/%s' % self.VOLUME_NAME self.fake_iscsi_scan =\ ('Target 1: %(test_vol)s\n' ' System information:\n' ' Driver: iscsi\n' ' State: ready\n' ' I_T nexus information:\n' ' LUN information:\n' ' LUN: 0\n' ' Type: controller\n' ' SCSI ID: IET 00010000\n' ' SCSI SN: beaf10\n' ' Size: 0 MB, Block size: 1\n' ' Online: Yes\n' ' Removable media: No\n' ' Prevent removal: No\n' ' Readonly: No\n' ' SWP: No\n' ' Thin-provisioning: No\n' ' Backing store type: null\n' ' Backing store path: None\n' ' Backing store flags:\n' ' LUN: 1\n' ' Type: disk\n' ' SCSI ID: IET 00010001\n' ' SCSI SN: beaf11\n' ' Size: 1074 MB, Block size: 512\n' ' Online: Yes\n' ' Removable media: No\n' ' Prevent removal: No\n' ' Readonly: No\n' ' SWP: No\n' ' Thin-provisioning: No\n' ' Backing store type: rdwr\n' ' Backing store path: %(bspath)s\n' ' Backing store flags:\n' ' Account information:\n' ' mDVpzk8cZesdahJC9h73\n' ' ACL information:\n' ' ALL"\n' % {'test_vol': self.test_vol, 'bspath': self.testvol_path}) def fake_get(self, value, default): if value in ('iscsi_target_flags', 'iscsi_write_cache'): return getattr(self, value, default) def test_iscsi_protocol(self): self.assertEqual('iscsi', self.target.iscsi_protocol) def test_get_target(self): with mock.patch('cinder.utils.execute', return_value=(self.fake_iscsi_scan, None)): iqn = self.test_vol self.assertEqual('1', self.target._get_target(iqn)) def test_verify_backing_lun(self): iqn = self.test_vol with mock.patch('cinder.utils.execute', return_value=(self.fake_iscsi_scan, None)): self.assertTrue(self.target._verify_backing_lun(iqn, '1')) # Test the failure case bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3') with mock.patch('cinder.utils.execute', return_value=(bad_scan, None)): self.assertFalse(self.target._verify_backing_lun(iqn, '1')) @mock.patch.object(time, 'sleep') @mock.patch('cinder.utils.execute') def test_recreate_backing_lun(self, mock_execute, mock_sleep): mock_execute.return_value = ('out', 'err') self.target._recreate_backing_lun(self.test_vol, '1', self.testvol['name'], self.testvol_path) expected_command = ('tgtadm', '--lld', 'iscsi', '--op', 'new', '--mode', 'logicalunit', '--tid', '1', '--lun', '1', '-b', self.testvol_path) mock_execute.assert_called_once_with(*expected_command, run_as_root=True) # Test the failure case mock_execute.side_effect = putils.ProcessExecutionError self.assertIsNone( self.target._recreate_backing_lun(self.test_vol, '1', self.testvol['name'], self.testvol_path)) def test_get_iscsi_target(self): ctxt = context.get_admin_context() expected = 0 self.assertEqual(expected, self.target._get_iscsi_target(ctxt, self.testvol['id'])) def test_get_target_and_lun(self): lun = 1 iscsi_target = 0 ctxt = context.get_admin_context() expected = (iscsi_target, lun) self.assertEqual(expected, self.target._get_target_and_lun(ctxt, self.testvol)) def test_create_iscsi_target(self): with mock.patch('cinder.utils.execute', return_value=('', '')),\ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1),\ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) def test_create_iscsi_target_content(self): self.iscsi_target_flags = 'foo' self.iscsi_write_cache = 'bar' mock_open = mock.mock_open() with mock.patch('cinder.utils.execute', return_value=('', '')),\ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1),\ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True),\ mock.patch('cinder.volume.targets.tgt.open', mock_open, create=True): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.testvol_path, chap_auth=('chap_foo', 'chap_bar'))) def test_create_iscsi_target_already_exists(self): def _fake_execute(*args, **kwargs): if 'update' in args: raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='target already exists', cmd='tgtad --lld iscsi --op show --mode target') else: return 'fake out', 'fake err' with mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1),\ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True),\ mock.patch('cinder.utils.execute', _fake_execute): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) @mock.patch('os.path.isfile', return_value=True) @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.utils.execute') @mock.patch('os.unlink', return_value=None) def test_delete_target_not_found(self, mock_unlink, mock_exec, mock_pathexists, mock_isfile): def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='can\'t find the target', cmd='tgt-admin --force --delete') def _fake_execute_wrong_message(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this is not the error you are looking for', cmd='tgt-admin --force --delete') mock_exec.side_effect = _fake_execute with mock.patch.object(self.target, '_get_target', return_value=False): self.assertIsNone(self.target.remove_iscsi_target( 1, 0, self.VOLUME_ID, self.VOLUME_NAME)) mock_exec.side_effect = _fake_execute_wrong_message self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.VOLUME_ID, self.VOLUME_NAME) @mock.patch('os.path.isfile', return_value=True) @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.utils.execute') @mock.patch('os.unlink', return_value=None) def test_delete_target_acl_not_found(self, mock_unlink, mock_exec, mock_pathexists, mock_isfile): def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this access control rule does not exist', cmd='tgt-admin --force --delete') def _fake_execute_wrong_message(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this is not the error you are looking for', cmd='tgt-admin --force --delete') mock_exec.side_effect = _fake_execute with mock.patch.object(self.target, '_get_target', return_value=False): self.assertIsNone(self.target.remove_iscsi_target( 1, 0, self.VOLUME_ID, self.VOLUME_NAME)) mock_exec.side_effect = _fake_execute_wrong_message self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.VOLUME_ID, self.VOLUME_NAME) @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} # Test the normal case mock_get_iscsi.return_value = 'foo bar' expected_return = {'driver_volume_type': 'iscsi', 'data': 'foo bar'} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) @mock.patch('cinder.utils.execute') @mock.patch.object(tgt.TgtAdm, '_get_target') @mock.patch.object(os.path, 'exists') @mock.patch.object(os.path, 'isfile') @mock.patch.object(os, 'unlink') def test_remove_iscsi_target(self, mock_unlink, mock_isfile, mock_path_exists, mock_get_target, mock_execute): # Test the failure case: path does not exist mock_path_exists.return_value = None self.assertIsNone(self.target.remove_iscsi_target( 0, 1, self.testvol['id'], self.testvol['name'])) # Test the normal case mock_path_exists.return_value = True mock_isfile.return_value = True self.target.remove_iscsi_target(0, 1, self.testvol['id'], self.testvol['name']) calls = [mock.call('tgt-admin', '--force', '--delete', self.iscsi_target_prefix + self.testvol['name'], run_as_root=True), mock.call('tgt-admin', '--delete', self.iscsi_target_prefix + self.testvol['name'], run_as_root=True)] mock_execute.assert_has_calls(calls) def test_create_export(self): expected_result = {'location': '10.9.8.7:3260,1 ' + self.iscsi_target_prefix + self.testvol['name'] + ' 1', 'auth': 'CHAP QZJb P68e'} with mock.patch('cinder.utils.execute', return_value=('', '')),\ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1),\ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True),\ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=lambda x, y: None) as m_chap,\ mock.patch.object(vutils, 'generate_username', side_effect=lambda: 'QZJb'),\ mock.patch.object(vutils, 'generate_password', side_effect=lambda: 'P68e'): ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) m_chap.side_effect = lambda x, y: ('otzL', '234Z') expected_result['auth'] = ('CHAP otzL 234Z') self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) @mock.patch.object(tgt.TgtAdm, '_get_target_chap_auth') @mock.patch.object(tgt.TgtAdm, 'create_iscsi_target') def test_ensure_export(self, _mock_create, mock_get_chap): ctxt = context.get_admin_context() mock_get_chap.return_value = ('foo', 'bar') self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) _mock_create.assert_called_once_with( self.iscsi_target_prefix + self.testvol['name'], 0, 1, self.fake_volumes_dir, ('foo', 'bar'), check_exit_code=False, old_name=None, portals_ips=[self.configuration.iscsi_ip_address], portals_port=self.configuration.iscsi_port) cinder-8.0.0/cinder/tests/unit/targets/__init__.py0000664000567000056710000000000012701406250023256 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/targets/test_base_iscsi_driver.py0000664000567000056710000001577012701406250026261 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume import configuration as conf from cinder.volume.targets import fake from cinder.volume.targets import iscsi class FakeIncompleteDriver(iscsi.ISCSITarget): def null_method(): pass class TestBaseISCSITargetDriver(tf.TargetDriverFixture): def setUp(self): super(TestBaseISCSITargetDriver, self).setUp() self.target = fake.FakeTarget(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'CHAP otzL 234Z'}) def test_abc_methods_not_present_fails(self): configuration = conf.Configuration(cfg.StrOpt('iscsi_target_prefix', default='foo', help='you wish')) self.assertRaises(TypeError, FakeIncompleteDriver, configuration=configuration) def test_get_iscsi_properties(self): self.assertEqual(self.expected_iscsi_properties, self.target._get_iscsi_properties(self.testvol)) def test_get_iscsi_properties_multiple_targets(self): testvol = self.testvol.copy() expected_iscsi_properties = self.expected_iscsi_properties.copy() iqn = expected_iscsi_properties['target_iqn'] testvol.update( {'provider_location': '10.10.7.1:3260;10.10.8.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id}) expected_iscsi_properties.update( {'target_portals': ['10.10.7.1:3260', '10.10.8.1:3260'], 'target_iqns': [iqn, iqn], 'target_luns': [0, 0]}) self.assertEqual(expected_iscsi_properties, self.target._get_iscsi_properties(testvol)) def test_build_iscsi_auth_string(self): auth_string = 'chap chap-user chap-password' self.assertEqual(auth_string, self.target._iscsi_authentication('chap', 'chap-user', 'chap-password')) def test_do_iscsi_discovery(self): with mock.patch.object(self.configuration, 'safe_get', return_value='127.0.0.1'),\ mock.patch('cinder.utils.execute', return_value=(self.target_string, '')): self.assertEqual(self.target_string, self.target._do_iscsi_discovery(self.testvol)) def test_remove_export(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target,\ mock.patch.object(self.target, 'show_target'),\ mock.patch.object(self.target, 'remove_iscsi_target') as \ mock_remove_target: mock_get_target.return_value = (0, 1) iscsi_target, lun = mock_get_target.return_value ctxt = context.get_admin_context() self.target.remove_export(ctxt, self.testvol) mock_remove_target.assert_called_once_with( iscsi_target, lun, 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 'testvol') def test_remove_export_notfound(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target,\ mock.patch.object(self.target, 'show_target'),\ mock.patch.object(self.target, 'remove_iscsi_target'): mock_get_target.side_effect = exception.NotFound ctxt = context.get_admin_context() self.assertIsNone(self.target.remove_export(ctxt, self.testvol)) def test_remove_export_show_error(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target,\ mock.patch.object(self.target, 'show_target') as mshow,\ mock.patch.object(self.target, 'remove_iscsi_target'): mock_get_target.return_value = (0, 1) iscsi_target, lun = mock_get_target.return_value mshow.side_effect = Exception ctxt = context.get_admin_context() self.assertIsNone(self.target.remove_export(ctxt, self.testvol)) def test_initialize_connection(self): expected = {'driver_volume_type': 'iscsi', 'data': self.expected_iscsi_properties} self.assertEqual(expected, self.target.initialize_connection(self.testvol, {})) def test_validate_connector(self): bad_connector = {'no_initiator': 'nada'} self.assertRaises(exception.InvalidConnectorException, self.target.validate_connector, bad_connector) connector = {'initiator': 'fake_init'} self.assertTrue(self.target.validate_connector, connector) def test_show_target_error(self): self.assertRaises(exception.InvalidParameterValue, self.target.show_target, 0, None) with mock.patch.object(self.target, '_get_target') as mock_get_target: mock_get_target.side_effect = exception.NotFound() self.assertRaises(exception.NotFound, self.target.show_target, 0, self.expected_iscsi_properties['target_iqn']) def test_iscsi_location(self): location = self.target._iscsi_location('portal', 1, 'target', 2) self.assertEqual('portal:3260,1 target 2', location) location = self.target._iscsi_location('portal', 1, 'target', 2, ['portal2']) self.assertEqual('portal:3260;portal2:3260,1 target 2', location) def test_get_target_chap_auth(self): ctxt = context.get_admin_context() self.assertEqual(('otzL', '234Z'), self.target._get_target_chap_auth(ctxt, self.test_vol)) cinder-8.0.0/cinder/tests/unit/targets/test_cxt_driver.py0000664000567000056710000001725212701406250024750 0ustar jenkinsjenkins00000000000000# Copyright 2015 Chelsio Communications Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from cinder import context from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import cxt class TestCxtAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestCxtAdmDriver, self).setUp() self.cxt_subdir = cxt.CxtAdm.cxt_subdir self.target = cxt.CxtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.VG = 'stack-volumes-lvmdriver-1' self.fake_iscsi_scan = \ ('\n' 'TARGET: iqn.2010-10.org.openstack:%(vol)s, id=1, login_ip=0\n' ' PortalGroup=1@10.9.8.7:3260,timeout=0\n' ' TargetDevice=/dev/%(vg)s/%(vol)s' ',BLK,PROD=CHISCSI ' 'Target,SN=0N0743000000000,ID=0D074300000000000000000,' 'WWN=:W00743000000000\n' % {'vol': self.VOLUME_NAME, 'vg': self.VG}) def test_get_target(self): with mock.patch.object(self.target, '_get_volumes_dir', return_value=self.fake_volumes_dir),\ mock.patch('cinder.utils.execute', return_value=(self.fake_iscsi_scan, None)) as m_exec: self.assertEqual( '1', self.target._get_target( 'iqn.2010-10.org.openstack:volume-%s' % self.VOLUME_ID ) ) self.assertTrue(m_exec.called) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') def test_create_iscsi_target(self, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir, portals_ips=[self.configuration.iscsi_ip_address])) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) self.assertTrue(mock_get_targ.called) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) def test_create_iscsi_target_port_ips(self, mock_execute, mock_get_targ): ips = ['10.0.0.15', '127.0.0.1'] port = 3261 mock_execute.return_value = ('', '') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir test_vol = 'iqn.2010-10.org.openstack:'\ 'volume-83c2e877-feed-46be-8435-77884fe55b45' self.assertEqual( 1, self.target.create_iscsi_target( test_vol, 1, 0, self.fake_volumes_dir, portals_port=port, portals_ips=ips)) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) self.assertTrue(mock_get_targ.called) file_path = os.path.join(self.fake_volumes_dir, test_vol.split(':')[1]) expected_cfg = { 'name': test_vol, 'device': self.fake_volumes_dir, 'ips': ','.join(map(lambda ip: '%s:%s' % (ip, port), ips)), 'spaces': ' ' * 14, 'spaces2': ' ' * 23} expected_file = ('\n%(spaces)starget:' '\n%(spaces2)sTargetName=%(name)s' '\n%(spaces2)sTargetDevice=%(device)s' '\n%(spaces2)sPortalGroup=1@%(ips)s' '\n%(spaces)s ') % expected_cfg with open(file_path, 'r') as cfg_file: result = cfg_file.read() self.assertEqual(expected_file, result) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) def test_create_iscsi_target_already_exists(self, mock_execute, mock_get_targ): with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir, portals_ips=[self.configuration.iscsi_ip_address])) self.assertTrue(mock_get.called) self.assertTrue(mock_get_targ.called) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') @mock.patch.object(cxt.CxtAdm, '_get_target_chap_auth') def test_create_export(self, mock_chap, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') mock_chap.return_value = ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:testvol 0', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target_chap_auth') def test_ensure_export(self, mock_get_chap): fake_creds = ('asdf', 'qwert') mock_get_chap.return_value = fake_creds ctxt = context.get_admin_context() with mock.patch.object(self.target, 'create_iscsi_target'): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 1, 0, self.fake_volumes_dir, fake_creds, check_exit_code=False, old_name=None, portals_ips=[self.configuration.iscsi_ip_address], portals_port=self.configuration.iscsi_port) cinder-8.0.0/cinder/tests/unit/targets/targets_fixture.py0000664000567000056710000001063412701406250024754 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile import mock from oslo_utils import fileutils from oslo_utils import timeutils from cinder import test from cinder.volume import configuration as conf class TargetDriverFixture(test.TestCase): def setUp(self): super(TargetDriverFixture, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get) self.configuration.iscsi_ip_address = '10.9.8.7' self.configuration.iscsi_port = 3260 self.fake_volumes_dir = tempfile.mkdtemp() fileutils.ensure_tree(self.fake_volumes_dir) self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' self.addCleanup(self._cleanup) self.testvol =\ {'project_id': self.fake_project_id, 'name': 'testvol', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id, 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' self.target_string = ('127.0.0.1:3260,1 ' + self.iscsi_target_prefix + 'volume-%s' % self.testvol['id']) self.testvol_2 =\ {'project_id': self.fake_project_id_2, 'name': 'testvol2', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' % {'ip': self.configuration.iscsi_ip_address, 'port': self.configuration.iscsi_port, 'iqn': self.iscsi_target_prefix, 'vol': self.fake_volume_id}), 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.expected_iscsi_properties = \ {'auth_method': 'CHAP', 'auth_password': '2FE0CQ8J196R', 'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b', 'encrypted': False, 'logical_block_size': '512', 'physical_block_size': '512', 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % self.fake_volume_id, 'target_lun': 0, 'target_portal': '10.10.7.1:3260', 'volume_id': self.fake_volume_id} self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45' self.VOLUME_NAME = 'volume-' + self.VOLUME_ID self.test_vol = (self.iscsi_target_prefix + self.VOLUME_NAME) def _cleanup(self): if os.path.exists(self.fake_volumes_dir): shutil.rmtree(self.fake_volumes_dir) def fake_safe_get(self, value): if value == 'volumes_dir': return self.fake_volumes_dir elif value == 'iscsi_protocol': return self.configuration.iscsi_protocol elif value == 'iscsi_target_prefix': return self.iscsi_target_prefix cinder-8.0.0/cinder/tests/unit/targets/test_lio_driver.py0000664000567000056710000003460012701406250024731 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import lio class TestLioAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestLioAdmDriver, self).setUp() with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_get_target(self, mexecute, mpersist_cfg, mlock_exec): mexecute.return_value = (self.test_vol, None) self.assertEqual(self.test_vol, self.target._get_target(self.test_vol)) self.assertFalse(mpersist_cfg.called) expected_args = ('cinder-rtstool', 'get-targets') mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) def test_get_iscsi_target(self): ctxt = context.get_admin_context() expected = 0 self.assertEqual(expected, self.target._get_iscsi_target(ctxt, self.testvol['id'])) def test_get_target_and_lun(self): lun = 0 iscsi_target = 0 ctxt = context.get_admin_context() expected = (iscsi_target, lun) self.assertEqual(expected, self.target._get_target_and_lun(ctxt, self.testvol)) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_target') def test_create_iscsi_target(self, mget_target, mexecute, mpersist_cfg, mlock_exec): mget_target.return_value = 1 # create_iscsi_target sends volume_name instead of volume_id on error self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) mexecute.assert_called_once_with( 'cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, '', '', self.target.iscsi_protocol == 'iser', run_as_root=True) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch.object(utils, 'execute') @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) def test_create_iscsi_target_port_ip(self, mget_target, mexecute, mpersist_cfg, mlock_exec): ip = '10.0.0.15' port = 3261 self.assertEqual( 1, self.target.create_iscsi_target( name=self.test_vol, tid=1, lun=0, path=self.fake_volumes_dir, **{'portals_port': port, 'portals_ips': [ip]})) expected_args = ( 'cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, '', '', self.target.iscsi_protocol == 'iser', '-p%s' % port, '-a' + ip) mlock_exec.assert_any_call(*expected_args, run_as_root=True) mexecute.assert_any_call(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch.object(utils, 'execute') @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) def test_create_iscsi_target_port_ips(self, mget_target, mexecute, mpersist_cfg, mlock_exec): test_vol = 'iqn.2010-10.org.openstack:' + self.VOLUME_NAME ips = ['10.0.0.15', '127.0.0.1'] port = 3261 self.assertEqual( 1, self.target.create_iscsi_target( name=test_vol, tid=1, lun=0, path=self.fake_volumes_dir, **{'portals_port': port, 'portals_ips': ips})) expected_args = ( 'cinder-rtstool', 'create', self.fake_volumes_dir, test_vol, '', '', self.target.iscsi_protocol == 'iser', '-p%s' % port, '-a' + ','.join(ips)) mlock_exec.assert_any_call(*expected_args, run_as_root=True) mexecute.assert_any_call(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute', side_effect=putils.ProcessExecutionError) @mock.patch.object(lio.LioAdm, '_get_target') def test_create_iscsi_target_already_exists(self, mget_target, mexecute, mpersist_cfg, mlock_exec): chap_auth = ('foo', 'bar') self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 1, 0, self.fake_volumes_dir, chap_auth) self.assertFalse(mpersist_cfg.called) expected_args = ('cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, chap_auth[0], chap_auth[1], False) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_remove_iscsi_target(self, mexecute, mpersist_cfg, mlock_exec): # Test the normal case self.target.remove_iscsi_target(0, 0, self.testvol['id'], self.testvol['name']) expected_args = ('cinder-rtstool', 'delete', self.iscsi_target_prefix + self.testvol['name']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) # Test the failure case: putils.ProcessExecutionError mlock_exec.reset_mock() mpersist_cfg.reset_mock() mexecute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 0, 0, self.testvol['id'], self.testvol['name']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) # Ensure there have been no calls to persist configuration self.assertFalse(mpersist_cfg.called) @mock.patch.object(lio.LioAdm, '_get_targets') @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch('cinder.utils.execute') def test_ensure_export(self, mock_exec, mock_execute, mock_get_targets): ctxt = context.get_admin_context() mock_get_targets.return_value = None self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) expected_args = ('cinder-rtstool', 'restore') mock_exec.assert_called_once_with(*expected_args, run_as_root=True) @mock.patch.object(lio.LioAdm, '_get_targets') @mock.patch.object(lio.LioAdm, '_restore_configuration') def test_ensure_export_target_exist(self, mock_restore, mock_get_targets): ctxt = context.get_admin_context() mock_get_targets.return_value = 'target' self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.assertFalse(mock_restore.called) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id connector = {'initiator': 'fake_init'} # Test the normal case mock_get_iscsi.return_value = 'foo bar' expected_return = {'driver_volume_type': 'iscsi', 'data': 'foo bar'} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) expected_args = ('cinder-rtstool', 'add-initiator', target_id, self.expected_iscsi_properties['auth_username'], '2FE0CQ8J196R', connector['initiator']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mock_execute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) # Test the failure case: putils.ProcessExecutionError mlock_exec.reset_mock() mpersist_cfg.reset_mock() mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetAttachFailed, self.target.initialize_connection, self.testvol, connector) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) # Ensure there have been no calls to persist configuration self.assertFalse(mpersist_cfg.called) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_terminate_connection(self, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id connector = {'initiator': 'fake_init'} self.target.terminate_connection(self.testvol, connector) expected_args = ('cinder-rtstool', 'delete-initiator', target_id, connector['initiator']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mock_execute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_terminate_connection_fail(self, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id mock_execute.side_effect = putils.ProcessExecutionError connector = {'initiator': 'fake_init'} self.assertRaises(exception.ISCSITargetDetachFailed, self.target.terminate_connection, self.testvol, connector) mlock_exec.assert_called_once_with('cinder-rtstool', 'delete-initiator', target_id, connector['initiator'], run_as_root=True) self.assertFalse(mpersist_cfg.called) def test_iscsi_protocol(self): self.assertEqual('iscsi', self.target.iscsi_protocol) @mock.patch.object(lio.LioAdm, '_get_target_and_lun', return_value=(1, 2)) @mock.patch.object(lio.LioAdm, 'create_iscsi_target', return_value=3) @mock.patch.object(lio.LioAdm, '_get_target_chap_auth', return_value=(mock.sentinel.user, mock.sentinel.pwd)) def test_create_export(self, mock_chap, mock_create, mock_get_target): ctxt = context.get_admin_context() result = self.target.create_export(ctxt, self.testvol_2, self.fake_volumes_dir) loc = (u'%(ip)s:%(port)d,3 %(prefix)s%(name)s 2' % {'ip': self.configuration.iscsi_ip_address, 'port': self.configuration.iscsi_port, 'prefix': self.iscsi_target_prefix, 'name': self.testvol_2['name']}) expected_result = { 'location': loc, 'auth': 'CHAP %s %s' % (mock.sentinel.user, mock.sentinel.pwd), } self.assertEqual(expected_result, result) mock_create.assert_called_once_with( self.iscsi_target_prefix + self.testvol_2['name'], 1, 2, self.fake_volumes_dir, (mock.sentinel.user, mock.sentinel.pwd), portals_ips=[self.configuration.iscsi_ip_address], portals_port=self.configuration.iscsi_port) cinder-8.0.0/cinder/tests/unit/targets/test_iet_driver.py0000664000567000056710000002220612701406250024726 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from oslo_concurrency import processutils as putils import six from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import iet class TestIetAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestIetAdmDriver, self).setUp() self.target = iet.IetAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_get_target(self): tmp_file = six.StringIO() tmp_file.write( 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') tmp_file.seek(0) with mock.patch('six.moves.builtins.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual('1', self.target._get_target( 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa )) # Test the failure case: Failed to handle the config file mock_open.side_effect = MemoryError() self.assertRaises(MemoryError, self.target._get_target, '') @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=0) @mock.patch('cinder.utils.execute') @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.utils.temporary_chown') @mock.patch.object(iet, 'LOG') def test_create_iscsi_target(self, mock_log, mock_chown, mock_exists, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') tmp_file = six.StringIO() with mock.patch('six.moves.builtins.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual( 0, self.target.create_iscsi_target( self.test_vol, 0, 0, self.fake_volumes_dir)) self.assertTrue(mock_execute.called) self.assertTrue(mock_open.called) self.assertTrue(mock_get_targ.called) # Test the failure case: Failed to chown the config file mock_open.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 0, 0, self.fake_volumes_dir) # Test the failure case: Failed to set new auth mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 0, 0, self.fake_volumes_dir) @mock.patch('cinder.utils.execute') @mock.patch('os.path.exists', return_value=True) def test_update_config_file_failure(self, mock_exists, mock_execute): # Test the failure case: conf file does not exist mock_exists.return_value = False mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetCreateFailed, self.target.update_config_file, self.test_vol, 0, self.fake_volumes_dir, "foo bar") @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') def test_create_iscsi_target_already_exists(self, mock_execute, mock_get_targ): mock_execute.return_value = ('fake out', 'fake err') self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) self.assertTrue(mock_get_targ.called) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.iet.IetAdm._find_sid_cid_for_target', return_value=None) @mock.patch('os.path.exists', return_value=False) @mock.patch('cinder.utils.execute') def test_remove_iscsi_target(self, mock_execute, mock_exists, mock_find): # Test the normal case self.target.remove_iscsi_target(1, 0, self.testvol['id'], self.testvol['name']) mock_execute.assert_any_call('ietadm', '--op', 'delete', '--tid=1', run_as_root=True) # Test the failure case: putils.ProcessExecutionError mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.testvol['id'], self.testvol['name']) def test_find_sid_cid_for_target(self): tmp_file = six.StringIO() tmp_file.write( 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') tmp_file.seek(0) with mock.patch('six.moves.builtins.open') as mock_open: mock_open.return_value = contextlib.closing(tmp_file) self.assertEqual(('844427031282176', '0'), self.target._find_sid_cid_for_target( '1', 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45', # noqa 'volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa )) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') @mock.patch.object(iet.IetAdm, '_get_target_chap_auth') def test_create_export(self, mock_get_chap, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') mock_get_chap.return_value = ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:testvol 0', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target_chap_auth', return_value=None) @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', return_value=1) def test_ensure_export(self, mock_get_targetm, mock_get_chap): ctxt = context.get_admin_context() with mock.patch.object(self.target, 'create_iscsi_target'): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 1, 0, self.fake_volumes_dir, None, portals_ips=[self.configuration.iscsi_ip_address], portals_port=int(self.configuration.iscsi_port), check_exit_code=False, old_name=None) cinder-8.0.0/cinder/tests/unit/test_volume.py0000664000567000056710000126731012701406257022447 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Code.""" import datetime import ddt import os import shutil import socket import sys import tempfile import time import uuid import enum import eventlet import mock from mox3 import mox import os_brick from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units import six from stevedore import extension from taskflow.engines.action_engine import engine from cinder.api import common from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception from cinder.image import image_utils from cinder import keymgr from cinder import objects from cinder.objects import fields import cinder.policy from cinder import quota from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.brick import fake_lvm from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_driver from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import utils as tests_utils from cinder import utils import cinder.volume from cinder.volume import api as volume_api from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers import lvm from cinder.volume import manager as vol_manager from cinder.volume import rpcapi as volume_rpcapi import cinder.volume.targets.tgt from cinder.volume import utils as volutils from cinder.volume import volume_types QUOTAS = quota.QUOTAS CGQUOTAS = quota.CGQUOTAS CONF = cfg.CONF ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor' fake_opt = [ cfg.StrOpt('fake_opt1', default='fake', help='fake opts') ] def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, **kwargs): """Create a snapshot object.""" metadata = metadata or {} snap = objects.Snapshot(ctxt or context.get_admin_context()) snap.volume_size = size snap.user_id = fake.user_id snap.project_id = fake.project_id snap.volume_id = volume_id snap.status = "creating" if metadata is not None: snap.metadata = metadata snap.update(kwargs) snap.create() return snap class FakeImageService(object): def __init__(self, db_driver=None, image_service=None): pass def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'status': 'active'} class BaseVolumeTestCase(test.TestCase): """Test Case for volumes.""" FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' def setUp(self): super(BaseVolumeTestCase, self).setUp() self.extension_manager = extension.ExtensionManager( "BaseVolumeTestCase") vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir, notification_driver=["test"]) self.addCleanup(self._cleanup) self.volume = importutils.import_object(CONF.volume_manager) self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.user_id # NOTE(mriedem): The id is hard-coded here for tracking race fail # assertions with the notification code, it's part of an # elastic-recheck query so don't remove it or change it. self.project_id = '7f265bd4-3a85-465e-a899-5dc4854a86d3' self.context.project_id = self.project_id self.volume_params = { 'status': 'creating', 'host': CONF.host, 'size': 1} self.stubs.Set(brick_lvm.LVM, 'get_all_volume_groups', self.fake_get_all_volume_groups) fake_image.stub_out_image_service(self.stubs) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) self.stubs.Set(os.path, 'exists', lambda x: True) self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} # keep ordered record of what we execute self.called = [] self.volume_api = cinder.volume.api.API() def _cleanup(self): try: shutil.rmtree(CONF.volumes_dir) except OSError: pass def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): return [{'name': 'cinder-volumes', 'size': '5.00', 'available': '2.50', 'lv_count': '2', 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask._clone_image_volume') def _create_volume_from_image(self, mock_clone_image_volume, mock_fetch_img, fakeout_copy_image_to_volume=False, fakeout_clone_image=False, clone_image_volume=False): """Test function of create_volume_from_image. Test cases call this function to create a volume from image, caller can choose whether to fake out copy_image_to_volume and clone_image, after calling this, test cases should check status of the volume. """ def fake_local_path(volume): return dst_path def fake_copy_image_to_volume(context, volume, image_service, image_id): pass def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize, size=None, throttle=None): pass def fake_clone_image(ctx, volume_ref, image_location, image_meta, image_service): return {'provider_location': None}, True dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) if fakeout_clone_image: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw) if fakeout_copy_image_to_volume: self.stubs.Set(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) mock_clone_image_volume.return_value = ({}, clone_image_volume) mock_fetch_img.return_value = mock.MagicMock( spec=tests_utils.get_file_spec()) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume = tests_utils.create_volume(self.context, **self.volume_params) # creating volume testdata try: request_spec = { 'volume_properties': self.volume_params, 'image_id': image_id, } self.volume.create_volume(self.context, volume.id, request_spec, volume=volume) finally: # cleanup os.unlink(dst_path) volume = objects.Volume.get_by_id(self.context, volume.id) return volume class AvailabilityZoneTestCase(BaseVolumeTestCase): def test_list_availability_zones_cached(self): volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'service_get_all_by_topic') as get_all: get_all.return_value = [ { 'availability_zone': 'a', 'disabled': False, }, ] azs = volume_api.list_availability_zones(enable_cache=True) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNotNone(volume_api.availability_zones_last_fetched) self.assertTrue(get_all.called) volume_api.list_availability_zones(enable_cache=True) self.assertEqual(1, get_all.call_count) def test_list_availability_zones_no_cached(self): volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'service_get_all_by_topic') as get_all: get_all.return_value = [ { 'availability_zone': 'a', 'disabled': False, }, ] azs = volume_api.list_availability_zones(enable_cache=False) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNone(volume_api.availability_zones_last_fetched) with mock.patch.object(volume_api.db, 'service_get_all_by_topic') as get_all: get_all.return_value = [ { 'availability_zone': 'a', 'disabled': True, }, ] azs = volume_api.list_availability_zones(enable_cache=False) self.assertEqual([{"name": 'a', 'available': False}], list(azs)) self.assertIsNone(volume_api.availability_zones_last_fetched) def test_list_availability_zones_refetched(self): timeutils.set_time_override() volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'service_get_all_by_topic') as get_all: get_all.return_value = [ { 'availability_zone': 'a', 'disabled': False, }, ] azs = volume_api.list_availability_zones(enable_cache=True) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNotNone(volume_api.availability_zones_last_fetched) last_fetched = volume_api.availability_zones_last_fetched self.assertTrue(get_all.called) volume_api.list_availability_zones(enable_cache=True) self.assertEqual(1, get_all.call_count) # The default cache time is 3600, push past that... timeutils.advance_time_seconds(3800) get_all.return_value = [ { 'availability_zone': 'a', 'disabled': False, }, { 'availability_zone': 'b', 'disabled': False, }, ] azs = volume_api.list_availability_zones(enable_cache=True) azs = sorted([n['name'] for n in azs]) self.assertEqual(['a', 'b'], azs) self.assertEqual(2, get_all.call_count) self.assertGreater(volume_api.availability_zones_last_fetched, last_fetched) def test_list_availability_zones_enabled_service(self): services = [ {'availability_zone': 'ping', 'disabled': 0}, {'availability_zone': 'ping', 'disabled': 1}, {'availability_zone': 'pong', 'disabled': 0}, {'availability_zone': 'pung', 'disabled': 1}, ] def stub_service_get_all_by_topic(*args, **kwargs): return services self.stubs.Set(db, 'service_get_all_by_topic', stub_service_get_all_by_topic) def sort_func(obj): return obj['name'] volume_api = cinder.volume.api.API() azs = volume_api.list_availability_zones() azs = sorted(azs, key=sort_func) expected = sorted([ {'name': 'pung', 'available': False}, {'name': 'pong', 'available': True}, {'name': 'ping', 'available': True}, ], key=sort_func) self.assertEqual(expected, azs) class VolumeTestCase(BaseVolumeTestCase): def setUp(self): super(VolumeTestCase, self).setUp() self._clear_patch = mock.patch('cinder.volume.utils.clear_volume', autospec=True) self._clear_patch.start() self.expected_status = 'available' def tearDown(self): super(VolumeTestCase, self).tearDown() self._clear_patch.stop() def test_init_host_clears_downloads(self): """Test that init_host will unwedge a volume stuck in downloading.""" volume = tests_utils.create_volume(self.context, status='downloading', size=0, host=CONF.host) volume_id = volume['id'] self.volume.init_host() volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("error", volume['status']) self.volume.delete_volume(self.context, volume_id) def test_init_host_clears_uploads_available_volume(self): """init_host will clean an available volume stuck in uploading.""" volume = tests_utils.create_volume(self.context, status='uploading', size=0, host=CONF.host) self.volume.init_host() volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual("available", volume.status) def test_init_host_clears_uploads_in_use_volume(self): """init_host will clean an in-use volume stuck in uploading.""" volume = tests_utils.create_volume(self.context, status='uploading', size=0, host=CONF.host) fake_uuid = fakes.get_fake_uuid() tests_utils.attach_volume(self.context, volume.id, fake_uuid, 'fake_host', '/dev/vda') self.volume.init_host() volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual("in-use", volume.status) def test_init_host_resumes_deletes(self): """init_host will resume deleting volume in deleting status.""" volume = tests_utils.create_volume(self.context, status='deleting', size=0, host=CONF.host) volume_id = volume['id'] self.volume.init_host() self.assertRaises(exception.VolumeNotFound, db.volume_get, context.get_admin_context(), volume_id) def test_init_host_count_allocated_capacity(self): vol0 = tests_utils.create_volume( self.context, size=100, host=CONF.host) vol1 = tests_utils.create_volume( self.context, size=128, host=volutils.append_host(CONF.host, 'pool0')) vol2 = tests_utils.create_volume( self.context, size=256, host=volutils.append_host(CONF.host, 'pool0')) vol3 = tests_utils.create_volume( self.context, size=512, host=volutils.append_host(CONF.host, 'pool1')) vol4 = tests_utils.create_volume( self.context, size=1024, host=volutils.append_host(CONF.host, 'pool2')) self.volume.init_host() stats = self.volume.stats self.assertEqual(2020, stats['allocated_capacity_gb']) self.assertEqual( 384, stats['pools']['pool0']['allocated_capacity_gb']) self.assertEqual( 512, stats['pools']['pool1']['allocated_capacity_gb']) self.assertEqual( 1024, stats['pools']['pool2']['allocated_capacity_gb']) # NOTE(jdg): On the create we have host='xyz', BUT # here we do a db.volume_get, and now the host has # been updated to xyz#pool-name. Note this is # done via the managers init, which calls the drivers # get_pool method, which in the legacy case is going # to be volume_backend_name or None vol0 = db.volume_get(context.get_admin_context(), vol0['id']) self.assertEqual(volutils.append_host(CONF.host, 'LVM'), vol0['host']) self.volume.delete_volume(self.context, vol0['id']) self.volume.delete_volume(self.context, vol1['id']) self.volume.delete_volume(self.context, vol2['id']) self.volume.delete_volume(self.context, vol3['id']) self.volume.delete_volume(self.context, vol4['id']) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.5'}) def test_reset(self): vol_mgr = vol_manager.VolumeManager() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertEqual('1.3', scheduler_rpcapi.client.version_cap) self.assertEqual('1.5', scheduler_rpcapi.client.serializer._base.version_cap) vol_mgr.reset() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertIsNone(scheduler_rpcapi.client.version_cap) self.assertIsNone(scheduler_rpcapi.client.serializer._base.version_cap) @mock.patch.object(vol_manager.VolumeManager, 'update_service_capabilities') def test_report_filter_goodness_function(self, mock_update): manager = vol_manager.VolumeManager() manager.driver.set_initialized() myfilterfunction = "myFilterFunction" mygoodnessfunction = "myGoodnessFunction" expected = {'name': 'cinder-volumes', 'filter_function': myfilterfunction, 'goodness_function': mygoodnessfunction, } with mock.patch.object(manager.driver, 'get_volume_stats') as m_get_stats: with mock.patch.object(manager.driver, 'get_goodness_function') as m_get_goodness: with mock.patch.object(manager.driver, 'get_filter_function') as m_get_filter: m_get_stats.return_value = {'name': 'cinder-volumes'} m_get_filter.return_value = myfilterfunction m_get_goodness.return_value = mygoodnessfunction manager._report_driver_status(1) self.assertTrue(m_get_stats.called) mock_update.assert_called_once_with(expected) def test_is_working(self): # By default we have driver mocked to be initialized... self.assertTrue(self.volume.is_working()) # ...lets switch it and check again! self.volume.driver._initialized = False self.assertFalse(self.volume.is_working()) def test_create_volume_fails_with_creating_and_downloading_status(self): """Test init_host in case of volume. While the status of volume is 'creating' or 'downloading', volume process down. After process restarting this 'creating' status is changed to 'error'. """ for status in ['creating', 'downloading']: volume = tests_utils.create_volume(self.context, status=status, size=0, host=CONF.host) volume_id = volume['id'] self.volume.init_host() volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('error', volume['status']) self.volume.delete_volume(self.context, volume_id) def test_create_snapshot_fails_with_creating_status(self): """Test init_host in case of snapshot. While the status of snapshot is 'creating', volume process down. After process restarting this 'creating' status is changed to 'error'. """ volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = tests_utils.create_snapshot(self.context, volume['id'], status='creating') snap_id = snapshot['id'] self.volume.init_host() snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) self.assertEqual('error', snapshot_obj.status) self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume['id']) @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'rollback') def test_create_driver_not_initialized(self, reserve, commit, rollback): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertIsNone(volume['encryption_key_id']) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume_id) volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("error", volume.status) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_driver_not_initialized_rescheduling(self): self.volume.driver._initialized = False volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume_id, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_non_cinder_exception_rescheduling(self): params = self.volume_params del params['host'] volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **params) volume_id = volume['id'] with mock.patch.object(self.volume.driver, 'create_volume', side_effect=processutils.ProcessExecutionError): self.assertRaises(processutils.ProcessExecutionError, self.volume.create_volume, self.context, volume_id, {'volume_properties': params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch.object(QUOTAS, 'rollback') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'reserve') def test_delete_driver_not_initialized(self, reserve, commit, rollback): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) self.assertIsNone(volume['encryption_key_id']) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.assertRaises(exception.DriverNotInitialized, self.volume.delete_volume, self.context, volume.id) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error_deleting", volume.status) volume.destroy() @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION']) def test_create_delete_volume(self, _mock_reserve): """Test volume can be created and deleted.""" volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertIsNone(volume['encryption_key_id']) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.volume.create_volume(self.context, volume_id) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[0] self.assertEqual('volume.create.start', msg['event_type']) expected = { 'status': 'creating', 'host': socket.gethostname(), 'display_name': 'test_volume', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'volume_id': volume_id, 'volume_type': None, 'snapshot_id': None, 'user_id': fake.user_id, 'launched_at': 'DONTCARE', 'size': 1, 'replication_status': 'disabled', 'replication_extended_status': None, 'replication_driver_data': None, 'metadata': [], 'volume_attachment': [], } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[1] self.assertEqual('volume.create.end', msg['event_type']) expected['status'] = 'available' self.assertDictMatch(expected, msg['payload']) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEqual('deleted', vol['status']) self.assertEqual(4, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[2] self.assertEqual('volume.delete.start', msg['event_type']) self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('volume.delete.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) def test_create_delete_volume_with_metadata(self): """Test volume can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, metadata=test_meta, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.assertEqual(test_meta, volume.metadata) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) def test_create_volume_with_invalid_metadata(self): """Test volume create with too much metadata fails.""" volume_api = cinder.volume.api.API() test_meta = {'fake_key': 'fake_value' * 256} self.assertRaises(exception.InvalidVolumeMetadataSize, volume_api.create, self.context, 1, 'name', 'description', None, None, None, test_meta) def test_update_volume_metadata_with_metatype(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} test_meta2 = {'fake_key1': 'fake_value2'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) self.volume.create_volume(self.context, volume.id, volume=volume) volume_api = cinder.volume.api.API() # update user metadata associated with the volume. result_meta = volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.user) self.assertEqual(test_meta2, result_meta) # create image metadata associated with the volume. result_meta = volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # update image metadata associated with the volume. result_meta = volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.image) self.assertEqual(test_meta2, result_meta) # update volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) def test_update_volume_metadata_maintenance(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) @mock.patch('cinder.db.volume_update') def test_update_with_ovo(self, volume_update): """Test update volume using oslo_versionedobject.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_api = cinder.volume.api.API() updates = {'display_name': 'foobbar'} volume_api.update(self.context, volume, updates) volume_update.assert_called_once_with(self.context, volume.id, updates) self.assertEqual('foobbar', volume.display_name) def test_delete_volume_metadata_with_metatype(self): """Test delete volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} test_meta2 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) volume_api = cinder.volume.api.API() # delete user metadata associated with the volume. volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.user) self.assertEqual(test_meta2, db.volume_metadata_get(self.context, volume_id)) # create image metadata associated with the volume. result_meta = volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # delete image metadata associated with the volume. volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.image) # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.context, volume_id) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(test_meta2, result) # delete volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) def test_delete_volume_metadata_maintenance(self): """Test delete volume metadata in maintenance.""" FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) def test_volume_attach_in_maintenance(self): """Test attach the volume in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.attach, self.context, volume, None, None, None, None) def test_volume_detach_in_maintenance(self): """Test detach the volume in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.detach, self.context, volume, None) def test_initialize_connection_maintenance(self): """Test initialize connection in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.initialize_connection, self.context, volume, None) def test_accept_transfer_maintenance(self): """Test accept transfer in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.accept_transfer, self.context, volume, None, None) def test_copy_volume_to_image_maintenance(self): """Test copy volume to image in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.copy_volume_to_image, self.context, volume, test_meta1, force=True) @mock.patch.object(cinder.volume.api.API, 'list_availability_zones') def test_create_volume_uses_default_availability_zone(self, mock_list_az): """Test setting availability_zone correctly during volume create.""" mock_list_az.return_value = ({'name': 'az1', 'available': True}, {'name': 'az2', 'available': True}, {'name': 'default-az', 'available': True}) volume_api = cinder.volume.api.API() # Test backwards compatibility, default_availability_zone not set self.override_config('storage_availability_zone', 'az2') volume = volume_api.create(self.context, 1, 'name', 'description') self.assertEqual('az2', volume['availability_zone']) self.override_config('default_availability_zone', 'default-az') volume = volume_api.create(self.context, 1, 'name', 'description') self.assertEqual('default-az', volume['availability_zone']) @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_create_volume_with_volume_type(self, _mock_reserve): """Test volume creation with default volume type.""" volume_api = cinder.volume.api.API() # Create volume with default volume type while default # volume type doesn't exist, volume_type_id should be NULL volume = volume_api.create(self.context, 1, 'name', 'description') self.assertIsNone(volume['volume_type_id']) self.assertIsNone(volume['encryption_key_id']) # Create default volume type vol_type = conf_fixture.def_vol_type db.volume_type_create(context.get_admin_context(), {'name': vol_type, 'extra_specs': {}}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) # Create volume with default volume type volume = volume_api.create(self.context, 1, 'name', 'description') self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertIsNone(volume['encryption_key_id']) # Create volume with specific volume type vol_type = 'test' db.volume_type_create(context.get_admin_context(), {'name': vol_type, 'extra_specs': {}}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) @mock.patch.object(keymgr, 'API', fake_keymgr.fake_api) def test_create_volume_with_encrypted_volume_type(self): ctxt = context.get_admin_context() db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertIsNotNone(volume['encryption_key_id']) def test_create_volume_with_provider_id(self): volume_params_with_provider_id = dict(provider_id=fake.provider_id, **self.volume_params) volume = tests_utils.create_volume(self.context, **volume_params_with_provider_id) self.volume.create_volume(self.context, volume['id']) self.assertEqual(fake.provider_id, volume['provider_id']) @mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api) def test_create_delete_volume_with_encrypted_volume_type(self): db_vol_type = db.volume_type_create( self.context, {'id': fake.volume_type_id, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.volume_type_id, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertIsNotNone(volume.get('encryption_key_id', None)) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertIsNotNone(volume['encryption_key_id']) volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume) volume = db.volume_get(self.context, volume['id']) self.assertEqual('deleting', volume['status']) db.volume_destroy(self.context, volume['id']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) def test_extra_capabilities(self): # Test valid extra_capabilities. fake_capabilities = {'key1': 1, 'key2': 2} with mock.patch.object(jsonutils, 'loads') as mock_loads: mock_loads.return_value = fake_capabilities manager = vol_manager.VolumeManager() manager.stats = {'pools': {}} manager.driver.set_initialized() manager.publish_service_capabilities(self.context) self.assertTrue(mock_loads.called) volume_stats = manager.last_capabilities self.assertEqual(fake_capabilities['key1'], volume_stats['key1']) self.assertEqual(fake_capabilities['key2'], volume_stats['key2']) def test_extra_capabilities_fail(self): with mock.patch.object(jsonutils, 'loads') as mock_loads: mock_loads.side_effect = exception.CinderException('test') self.assertRaises(exception.CinderException, vol_manager.VolumeManager) def test_delete_busy_volume(self): """Test volume survives deletion if driver reports it as busy.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.mox.StubOutWithMock(self.volume.driver, 'delete_volume') self.volume.driver.delete_volume( mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy( volume_name='fake')) self.mox.ReplayAll() self.volume.delete_volume(self.context, volume_id) volume_ref = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(volume_id, volume_ref.id) self.assertEqual("available", volume_ref.status) def test_get_volume_different_tenant(self): """Test can't get volume of another tenant when viewable_admin_meta.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) another_context = context.RequestContext('another_user_id', 'another_project_id', is_admin=False) self.assertNotEqual(another_context.project_id, self.context.project_id) volume_api = cinder.volume.api.API() self.assertRaises(exception.VolumeNotFound, volume_api.get, another_context, volume_id, viewable_admin_meta=True) self.assertEqual(volume_id, volume_api.get(self.context, volume_id)['id']) self.volume.delete_volume(self.context, volume_id) def test_get_all_limit_bad_value(self): """Test value of 'limit' is numeric and >= 0""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="A") self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="-1") def test_get_all_tenants_volume_list(self): """Validate when the volume list for all tenants is returned""" volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'volume_get_all_by_project') as by_project: with mock.patch.object(volume_api.db, 'volume_get_all') as get_all: db_volume = {'volume_type_id': fake.volume_type_id, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.volume_id} volume = fake_volume.fake_db_volume(**db_volume) by_project.return_value = [volume] get_all.return_value = [volume] volume_api.get_all(self.context, filters={'all_tenants': '0'}) self.assertTrue(by_project.called) by_project.called = False self.context.is_admin = False volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(by_project.called) # check for volume list of all tenants self.context.is_admin = True volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(get_all.called) def test_delete_volume_in_error_extending(self): """Test volume can be deleted in error_extending stats.""" # create a volume volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) # delete 'error_extending' volume db.volume_update(self.context, volume['id'], {'status': 'error_extending'}) self.volume.delete_volume(self.context, volume['id']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='12345678-1234-5678-1234-567812345678')) def test_delete_volume_not_found(self, mock_get_volume): """Test delete volume moves on if the volume does not exist.""" volume_id = '12345678-1234-5678-1234-567812345678' self.volume.delete_volume(self.context, volume_id) self.assertTrue(mock_get_volume.called) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_from_snap): """Test volume can be created from a snapshot.""" volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, volume_src['id'], snapshot_obj) volume_dst = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, volume_dst['id']) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume_src['id']) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') def test_create_volume_from_snapshot_with_types(self, _get_flow): """Test volume create from snapshot with types including mistmatch.""" volume_api = cinder.volume.api.API() db.volume_type_create( context.get_admin_context(), {'name': 'foo', 'extra_specs': {'volume_backend_name': 'dev_1'}}) db.volume_type_create( context.get_admin_context(), {'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}}) foo_type = db.volume_type_get_by_name(context.get_admin_context(), 'foo') biz_type = db.volume_type_get_by_name(context.get_admin_context(), 'biz') snapshot = {'id': fake.snapshot_id, 'status': 'available', 'volume_size': 10, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) # Make sure the case of specifying a type that # doesn't match the snapshots type fails self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) # Make sure that trying to specify a type # when the snapshots type is None fails snapshot_obj.volume_type_id = None self.assertRaises(exception.InvalidVolumeType, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) with mock.patch.object(cinder.volume.volume_types, 'get_volume_type') as mock_get_type: mock_get_type.return_value = biz_type snapshot_obj.volume_type_id = foo_type['id'] volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) db.volume_type_destroy(context.get_admin_context(), foo_type['id']) db.volume_type_destroy(context.get_admin_context(), biz_type['id']) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') def test_create_volume_from_source_with_types(self, _get_flow): """Test volume create from source with types including mistmatch.""" volume_api = cinder.volume.api.API() db.volume_type_create( context.get_admin_context(), {'name': 'foo', 'extra_specs': {'volume_backend_name': 'dev_1'}}) db.volume_type_create( context.get_admin_context(), {'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}}) foo_type = db.volume_type_get_by_name(context.get_admin_context(), 'foo') biz_type = db.volume_type_get_by_name(context.get_admin_context(), 'biz') source_vol = {'id': fake.volume_id, 'status': 'available', 'volume_size': 10, 'volume_type': biz_type, 'volume_type_id': biz_type['id']} self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) # Make sure that trying to specify a type # when the source type is None fails source_vol['volume_type_id'] = None source_vol['volume_type'] = None self.assertRaises(exception.InvalidVolumeType, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) with mock.patch.object(cinder.volume.volume_types, 'get_volume_type') as mock_get_type: mock_get_type.return_value = biz_type source_vol['volume_type_id'] = biz_type['id'] source_vol['volume_type'] = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=biz_type, source_volume=source_vol) db.volume_type_destroy(context.get_admin_context(), foo_type['id']) db.volume_type_destroy(context.get_admin_context(), biz_type['id']) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') def test_create_volume_from_source_with_same_backend(self, _get_flow): """Test volume create from source with type mismatch same backend.""" volume_api = cinder.volume.api.API() foo_type = { 'name': 'foo', 'qos_specs_id': None, 'deleted': False, 'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), 'updated_at': None, 'extra_specs': {'volume_backend_name': 'dev_1'}, 'is_public': True, 'deleted_at': None, 'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38', 'description': None} biz_type = { 'name': 'biz', 'qos_specs_id': None, 'deleted': False, 'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), 'updated_at': None, 'extra_specs': {'volume_backend_name': 'dev_1'}, 'is_public': True, 'deleted_at': None, 'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59', 'description': None} source_vol = {'id': fake.volume_id, 'status': 'available', 'volume_size': 10, 'volume_type': biz_type, 'volume_type_id': biz_type['id']} with mock.patch.object(cinder.volume.volume_types, 'get_volume_type') as mock_get_type: mock_get_type.return_value = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') def test_create_from_source_and_snap_only_one_backend(self, _get_flow): """Test create from source and snap with type mismatch one backend.""" volume_api = cinder.volume.api.API() foo_type = { 'name': 'foo', 'qos_specs_id': None, 'deleted': False, 'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), 'updated_at': None, 'extra_specs': {'some_key': 3}, 'is_public': True, 'deleted_at': None, 'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38', 'description': None} biz_type = { 'name': 'biz', 'qos_specs_id': None, 'deleted': False, 'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), 'updated_at': None, 'extra_specs': {'some_other_key': 4}, 'is_public': True, 'deleted_at': None, 'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59', 'description': None} source_vol = {'id': fake.volume_id, 'status': 'available', 'volume_size': 10, 'volume_type': biz_type, 'volume_type_id': biz_type['id']} snapshot = {'id': fake.snapshot_id, 'status': 'available', 'volume_size': 10, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) with mock.patch.object(db, 'service_get_all_by_topic') as mock_get_service, \ mock.patch.object(volume_api, 'list_availability_zones') as mock_get_azs: mock_get_service.return_value = [{'host': 'foo'}] mock_get_azs.return_value = {} volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) def test_create_snapshot_driver_not_initialized(self): volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.create_snapshot, self.context, volume_src['id'], snapshot_obj) # NOTE(flaper87): The volume status should be error. self.assertEqual("error", snapshot_obj.status) # lets cleanup the mess self.volume.driver._initialized = True self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume_src['id']) def _mock_synchronized(self, name, *s_args, **s_kwargs): def inner_sync1(f): def inner_sync2(*args, **kwargs): self.called.append('lock-%s' % (name)) ret = f(*args, **kwargs) self.called.append('unlock-%s' % (name)) return ret return inner_sync2 return inner_sync1 def _fake_execute(self, *cmd, **kwargs): pass @mock.patch.object(cinder.volume.drivers.lvm.LVMVolumeDriver, 'create_volume_from_snapshot') def test_create_volume_from_snapshot_check_locks( self, mock_lvm_create): # mock the synchroniser so we can record events self.stubs.Set(utils, 'synchronized', self._mock_synchronized) orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken self.assertEqual(1, len(self.called)) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol_id) snap_id = create_snapshot(src_vol_id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, **self.volume_params) dst_vol_id = dst_vol['id'] admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, volume_id=dst_vol_id, request_spec={'snapshot_id': snap_id}) self.assertEqual(2, len(self.called)) self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) self.assertEqual(snap_id, db.volume_get(admin_ctxt, dst_vol_id).snapshot_id) # locked self.volume.delete_volume(self.context, dst_vol_id) self.assertEqual(4, len(self.called)) # locked self.volume.delete_snapshot(self.context, snapshot_obj) self.assertEqual(6, len(self.called)) # locked self.volume.delete_volume(self.context, src_vol_id) self.assertEqual(8, len(self.called)) self.assertEqual(['lock-%s' % ('%s-delete_snapshot' % (snap_id)), 'unlock-%s' % ('%s-delete_snapshot' % (snap_id)), 'lock-%s' % ('%s-delete_volume' % (dst_vol_id)), 'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)), 'lock-%s' % ('%s-delete_snapshot' % (snap_id)), 'unlock-%s' % ('%s-delete_snapshot' % (snap_id)), 'lock-%s' % ('%s-delete_volume' % (src_vol_id)), 'unlock-%s' % ('%s-delete_volume' % (src_vol_id))], self.called) self.assertTrue(mock_lvm_create.called) def test_create_volume_from_volume_check_locks(self): # mock the synchroniser so we can record events self.stubs.Set(utils, 'synchronized', self._mock_synchronized) self.stubs.Set(utils, 'execute', self._fake_execute) orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken self.assertEqual(1, len(self.called)) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol_id) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) dst_vol_id = dst_vol['id'] admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, volume_id=dst_vol_id, request_spec={'source_volid': src_vol_id}) self.assertEqual(2, len(self.called)) self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) self.assertEqual(src_vol_id, db.volume_get(admin_ctxt, dst_vol_id).source_volid) # locked self.volume.delete_volume(self.context, dst_vol_id) self.assertEqual(4, len(self.called)) # locked self.volume.delete_volume(self.context, src_vol_id) self.assertEqual(6, len(self.called)) self.assertEqual(['lock-%s' % ('%s-delete_volume' % (src_vol_id)), 'unlock-%s' % ('%s-delete_volume' % (src_vol_id)), 'lock-%s' % ('%s-delete_volume' % (dst_vol_id)), 'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)), 'lock-%s' % ('%s-delete_volume' % (src_vol_id)), 'unlock-%s' % ('%s-delete_volume' % (src_vol_id))], self.called) def test_create_volume_from_volume_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol_id) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.stubs.Set(self.context, 'elevated', orig_elevated) # we expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume_id=dst_vol.id, request_spec={'source_volid': src_vol_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.stubs.Set(self.context, 'elevated', mock_elevated) # locked self.volume.delete_volume(self.context, src_vol_id) # we expect the volume create to fail with the following err since the # source volume was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) def _raise_metadata_copy_failure(self, method, dst_vol_id, **kwargs): # MetadataCopyFailure exception will be raised if DB service is Down # while copying the volume glance metadata with mock.patch.object(db, method) as mock_db: mock_db.side_effect = exception.MetadataCopyFailure( reason="Because of DB service down.") self.assertRaises(exception.MetadataCopyFailure, self.volume.create_volume, self.context, dst_vol_id, **kwargs) # ensure that status of volume is 'error' vol = db.volume_get(self.context, dst_vol_id) self.assertEqual('error', vol['status']) # cleanup resource db.volume_destroy(self.context, dst_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol_id) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol['id']) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, self.context, src_vol_id, dst_vol['id']) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol_id) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', dst_vol['id']) # cleanup resource db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol_id) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from snapshot snapshot_id = create_snapshot(src_vol['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, src_vol['id'], snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual('available', snapshot_obj.status) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_to_volume', dst_vol['id']) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) @mock.patch( 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') @mock.patch('cinder.utils.execute') def test_create_volume_from_srcreplica_raise_metadata_copy_failure( self, mock_execute, _create_replica_test): mock_execute.return_value = None _create_replica_test.return_value = None # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol_id) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', dst_vol['id']) # cleanup resource db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol_id) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) volume = db.volume_get(self.context, src_vol_id) # create snapshot of volume snapshot_id = create_snapshot(volume['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, volume['id'], snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual('available', snapshot_obj.status) # create volume from snapshot dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_to_volume, self.context, dst_vol['id'], snapshot_id) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @mock.patch( 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') def test_create_volume_from_srcreplica_with_glance_volume_metadata_none( self, _create_replica_test): """Test volume can be created from a volume replica.""" _create_replica_test.return_value = None volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) db.volume_update(self.context, volume_src['id'], {'bootable': True}) volume = db.volume_get(self.context, volume_src['id']) volume_dst = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, volume_dst['id'], {'source_replicaid': volume['id']}) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, self.context, volume_src['id'], volume_dst['id']) self.assertEqual('available', db.volume_get(self.context, volume_dst['id']).status) self.assertTrue(_create_replica_test.called) # cleanup resource db.volume_destroy(self.context, volume_dst['id']) db.volume_destroy(self.context, volume_src['id']) def test_create_volume_from_snapshot_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol_id) # create snapshot snap_id = create_snapshot(src_vol_id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj) # create vol from snapshot... dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, source_volid=src_vol_id, **self.volume_params) dst_vol_id = dst_vol['id'] orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.stubs.Set(self.context, 'elevated', orig_elevated) # We expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume_id=dst_vol_id, request_spec={'snapshot_id': snap_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.stubs.Set(self.context, 'elevated', mock_elevated) # locked self.volume.delete_snapshot(self.context, snapshot_obj) # we expect the volume create to fail with the following err since the # snapshot was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) # locked self.volume.delete_volume(self.context, src_vol_id) # make sure it is gone self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, src_vol_id) @mock.patch.object(keymgr, 'API', fake_keymgr.fake_api) def test_create_volume_from_snapshot_with_encryption(self): """Test volume can be created from a snapshot of an encrypted volume""" ctxt = context.get_admin_context() db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_src['host'] = 'fake_host' snapshot_ref = volume_api.create_snapshot_force(self.context, volume_src, 'name', 'description') snapshot_ref['status'] = 'available' # status must be available volume_dst = volume_api.create(self.context, 1, 'name', 'description', snapshot=snapshot_ref) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_ref['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) key_manager = volume_api.key_manager # must use *same* key manager volume_src_key = key_manager.get_key(self.context, volume_src['encryption_key_id']) volume_dst_key = key_manager.get_key(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_create_volume_from_encrypted_volume(self): """Test volume can be created from an encrypted volume.""" self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api) volume_api = cinder.volume.api.API() ctxt = context.get_admin_context() db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_src['status'] = 'available' # status must be available volume_dst = volume_api.create(self.context, 1, 'name', 'description', source_volume=volume_src) self.assertEqual(volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertEqual(volume_src['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).source_volid) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) key_manager = volume_api.key_manager # must use *same* key manager volume_src_key = key_manager.get_key(self.context, volume_src['encryption_key_id']) volume_dst_key = key_manager.get_key(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_create_volume_from_snapshot_fail_bad_size(self): """Test volume can't be created from snapshot with bad volume size.""" volume_api = cinder.volume.api.API() snapshot = {'id': fake.snapshot_id, 'status': 'available', 'volume_size': 10} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot_obj) def test_create_volume_from_snapshot_fail_wrong_az(self): """Test volume can't be created from snapshot in a different az.""" volume_api = cinder.volume.api.API() def fake_list_availability_zones(enable_cache=False): return ({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True}) self.stubs.Set(volume_api, 'list_availability_zones', fake_list_availability_zones) volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) snapshot = create_snapshot(volume_src['id']) self.volume.create_snapshot(self.context, volume_src['id'], snapshot) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot, availability_zone='nova') def test_create_volume_with_invalid_exclusive_options(self): """Test volume create with multiple exclusive options fails.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 1, 'name', 'description', snapshot=fake.snapshot_id, image_id=fake.image_id, source_volume=fake.volume_id) @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_fetchqos(self, _mock_volume_update, _mock_volume_get, _mock_volume_admin_metadata_get, mock_get_target): """Make sure initialize_connection returns correct information.""" _fake_admin_meta = {'fake-key': 'fake-value'} _fake_volume = {'volume_type_id': fake.volume_type_id, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.volume_id, 'volume_admin_metadata': _fake_admin_meta} _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_volume_admin_metadata_get.return_value = _fake_admin_meta connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2'} } with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ mock.patch.object(cinder.tests.unit.fake_driver.FakeISCSIDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) driver_init.return_value = {'data': {}} mock_get_target.return_value = None qos_specs_expected = {'key1': 'value1', 'key2': 'value2'} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection(self.context, fake.volume_id, connector) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection(self.context, fake.volume_id, connector) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) # initialize_connection() skips qos_specs that is designated to be # consumed by back-end only qos_values.update({'consumer': 'back-end'}) type_qos.return_value = dict(qos_specs=qos_values) conn_info = self.volume.initialize_connection(self.context, fake.volume_id, connector) self.assertIsNone(conn_info['data']['qos_specs']) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_export_failure(self, _mock_volume_update, _mock_volume_get, _mock_create_export): """Test exception path for create_export failure.""" _fake_admin_meta = {'fake-key': 'fake-value'} _fake_volume = {'volume_type_id': fake.volume_type_id, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.volume_id, 'volume_admin_metadata': _fake_admin_meta} _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_create_export.side_effect = exception.CinderException connector = {'ip': 'IP', 'initiator': 'INITIATOR'} self.assertRaises(exception.VolumeBackendAPIException, self.volume.initialize_connection, self.context, fake.volume_id, connector) @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db, 'volume_update') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(fake_driver.FakeISCSIDriver, 'initialize_connection') @mock.patch.object(db, 'driver_initiator_data_get') @mock.patch.object(db, 'driver_initiator_data_update') def test_initialize_connection_initiator_data(self, mock_data_update, mock_data_get, mock_driver_init, mock_volume_get, mock_volume_update, mock_metadata_get, mock_get_target): fake_admin_meta = {'fake-key': 'fake-value'} fake_volume = {'volume_type_id': None, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.volume_id, 'volume_admin_metadata': fake_admin_meta, 'encryption_key_id': ('d371e7bb-7392-4c27-' 'ac0b-ebd9f5d16078')} mock_volume_get.return_value = fake_volume mock_volume_update.return_value = fake_volume mock_get_target.return_value = None connector = {'ip': 'IP', 'initiator': 'INITIATOR'} mock_driver_init.return_value = { 'driver_volume_type': 'iscsi', 'data': {'access_mode': 'rw', 'encrypted': False} } mock_data_get.return_value = [] conn_info = self.volume.initialize_connection(self.context, 'id', connector) # Asserts that if the driver sets the encrypted flag then the # VolumeManager doesn't overwrite it regardless of what's in the # volume for the encryption_key_id field. self.assertFalse(conn_info['data']['encrypted']) mock_driver_init.assert_called_with(fake_volume, connector) data = [{'key': 'key1', 'value': 'value1'}] mock_data_get.return_value = data self.volume.initialize_connection(self.context, 'id', connector) mock_driver_init.assert_called_with(fake_volume, connector, data) update = { 'set_values': { 'foo': 'bar' }, 'remove_values': [ 'foo', 'foo2' ] } mock_driver_init.return_value['initiator_update'] = update self.volume.initialize_connection(self.context, 'id', connector) mock_driver_init.assert_called_with(fake_volume, connector, data) mock_data_update.assert_called_with(self.context, 'INITIATOR', 'FakeISCSIDriver', update) connector['initiator'] = None mock_data_update.reset_mock() mock_data_get.reset_mock() mock_driver_init.return_value['data'].pop('encrypted') conn_info = self.volume.initialize_connection(self.context, 'id', connector) # Asserts that VolumeManager sets the encrypted flag if the driver # doesn't set it. self.assertTrue(conn_info['data']['encrypted']) mock_driver_init.assert_called_with(fake_volume, connector) self.assertFalse(mock_data_get.called) self.assertFalse(mock_data_update.called) def test_run_attach_detach_volume_for_instance(self): """Make sure volume can be attached and detached from instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("in-use", vol['status']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_detach_invalid_attachment_id(self): """Make sure if the attachment id isn't found we raise.""" attachment_id = "notfoundid" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) self.volume.detach_volume(self.context, volume['id'], attachment_id) volume = db.volume_get(self.context, volume['id']) self.assertEqual('available', volume['status']) instance_uuid = '12345678-1234-5678-1234-567812345678' attached_host = 'fake_host' mountpoint = '/dev/fake' tests_utils.attach_volume(self.context, volume['id'], instance_uuid, attached_host, mountpoint) self.volume.detach_volume(self.context, volume['id'], attachment_id) volume = db.volume_get(self.context, volume['id']) self.assertEqual('in-use', volume['status']) def test_detach_no_attachments(self): self.volume_params['status'] = 'detaching' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) self.volume.detach_volume(self.context, volume['id']) volume = db.volume_get(self.context, volume['id']) self.assertEqual('available', volume['status']) def test_run_attach_detach_volume_for_instance_no_attachment_id(self): """Make sure volume can be attached and detached from instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' instance_uuid_2 = '12345678-4321-8765-4321-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) attachment2 = self.volume.attach_volume(self.context, volume_id, instance_uuid_2, None, mountpoint, 'ro') connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.assertRaises(exception.InvalidVolume, self.volume.detach_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.volume.detach_volume(self.context, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_multiattach_volume_for_instances(self): """Make sure volume can be attached to multiple instances.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, instance2_uuid, None, mountpoint2, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment2['attach_status']) self.assertEqual(mountpoint2, attachment2['mountpoint']) self.assertEqual(instance2_uuid, attachment2['instance_uuid']) self.assertIsNone(attachment2['attached_host']) self.assertNotEqual(attachment, attachment2) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_twice_multiattach_volume_for_instances(self): """Make sure volume can be attached to multiple instances.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345699' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint2, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertIsNone(attachment2) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) def test_attach_detach_not_multiattach_volume_for_instances(self): """Make sure volume can't be attached to more than one instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertFalse(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' mountpoint2 = "/dev/sdx" self.assertRaises(exception.InvalidVolume, self.volume.attach_volume, self.context, volume_id, instance2_uuid, None, mountpoint2, 'ro') self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_volume_for_host(self): """Make sure volume can be attached and detached from host.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_multiattach_volume_for_hosts(self): """Make sure volume can be attached and detached from hosts.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, None, 'fake_host2', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual('attached', attachment2['attach_status']) self.assertEqual(mountpoint2, attachment2['mountpoint']) self.assertIsNone(attachment2['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host2', attachment2['attached_host']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("in-use", vol['status']) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_twice_multiattach_volume_for_hosts(self): """Make sure volume can be attached and detached from hosts.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertIsNone(attachment2) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) def test_run_attach_detach_not_multiattach_volume_for_hosts(self): """Make sure volume can't be attached to more than one host.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=False, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertFalse(vol['multiattach']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" self.assertRaises(exception.InvalidVolume, self.volume.attach_volume, self.context, volume_id, None, 'fake_host2', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_volume_with_attach_mode(self): instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] db.volume_update(self.context, volume_id, {'status': 'available', }) self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) attachment = vol['volume_attachment'][0] self.assertEqual('in-use', vol['status']) self.assertEqual('attached', vol['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) attachment = vol['volume_attachment'] self.assertEqual('available', vol['status']) self.assertEqual('detached', vol['attach_status']) self.assertEqual([], attachment) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) attachment = vol['volume_attachment'][0] self.assertEqual('in-use', vol['status']) self.assertEqual('attached', vol['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume_id, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) attachment = vol['volume_attachment'] self.assertEqual('available', vol['status']) self.assertEqual('detached', vol['attach_status']) self.assertEqual([], attachment) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self): # Not allow using 'read-write' mode attach readonly volume instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.assertRaises(exception.InvalidVolumeAttachMode, self.volume.attach_volume, self.context, volume_id, instance_uuid, None, mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('error_attaching', vol['status']) self.assertEqual('detached', vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) db.volume_update(self.context, volume_id, {'status': 'available'}) self.assertRaises(exception.InvalidVolumeAttachMode, self.volume.attach_volume, self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('error_attaching', vol['status']) self.assertEqual('detached', vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictMatch(expected, ret) def test_run_api_attach_detach_volume_with_wrong_attach_mode(self): # Not allow using 'read-write' mode attach readonly volume instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolumeAttachMode, volume_api.attach, self.context, volume, instance_uuid, None, mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('detached', vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) db.volume_update(self.context, volume_id, {'status': 'available'}) self.assertRaises(exception.InvalidVolumeAttachMode, volume_api.attach, self.context, volume, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('detached', vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) def test_detach_volume_while_uploading_to_image_is_in_progress(self): # If instance is booted from volume with 'Terminate on Delete' flag # set, and when we delete instance then it tries to delete volume # even it is in 'uploading' state. # It is happening because detach call is setting volume status to # 'available'. mountpoint = "/dev/sdf" # Attach volume to the instance instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') # Change volume status to 'uploading' db.volume_update(self.context, volume_id, {'status': 'uploading'}) # Call detach api self.volume.detach_volume(self.context, volume_id) vol = db.volume_get(self.context, volume_id) # Check that volume status is 'uploading' self.assertEqual("uploading", vol['status']) self.assertEqual("detached", vol['attach_status']) def test_reserve_volume_success(self): volume = tests_utils.create_volume(self.context, status='available') cinder.volume.api.API().reserve_volume(self.context, volume) volume_db = db.volume_get(self.context, volume.id) self.assertEqual('attaching', volume_db.status) db.volume_destroy(self.context, volume.id) def test_reserve_volume_in_attaching(self): self._test_reserve_volume_bad_status('attaching') def test_reserve_volume_in_maintenance(self): self._test_reserve_volume_bad_status('maintenance') def _test_reserve_volume_bad_status(self, status): volume = tests_utils.create_volume(self.context, status=status) self.assertRaises(exception.InvalidVolume, cinder.volume.api.API().reserve_volume, self.context, volume) db.volume_destroy(self.context, volume.id) def test_unreserve_volume_success_in_use(self): UUID = six.text_type(uuid.uuid4()) volume = tests_utils.create_volume(self.context, status='attaching') tests_utils.attach_volume(self.context, volume.id, UUID, 'attached_host', 'mountpoint', mode='rw') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('in-use', db_volume.status) def test_unreserve_volume_success_available(self): volume = tests_utils.create_volume(self.context, status='attaching') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('available', db_volume.status) def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node # This will allow us to test cross-node interactions pass def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.volume.create_volume(self.context, volume['id']) msg = self.notifier.notifications[0] self.assertEqual('volume.create.start', msg['event_type']) self.assertEqual('creating', msg['payload']['status']) self.assertEqual('INFO', msg['priority']) msg = self.notifier.notifications[1] self.assertEqual('volume.create.end', msg['event_type']) self.assertEqual('available', msg['payload']['status']) self.assertEqual('INFO', msg['priority']) if len(self.notifier.notifications) > 2: # Cause an assert to print the unexpected item # and all of the notifications. self.assertFalse(self.notifier.notifications[2], self.notifier.notifications) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) snapshot = create_snapshot(volume['id'], size=volume['size']) snapshot_id = snapshot.id self.volume.create_snapshot(self.context, volume['id'], snapshot) self.assertEqual( snapshot_id, objects.Snapshot.get_by_id(self.context, snapshot_id).id) msg = self.notifier.notifications[2] self.assertEqual('snapshot.create.start', msg['event_type']) expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'creating', 'tenant_id': fake.project_id, 'user_id': fake.user_id, 'volume_id': volume['id'], 'volume_size': 1, 'availability_zone': 'nova', 'metadata': '', } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('snapshot.create.end', msg['event_type']) expected['status'] = 'available' self.assertDictMatch(expected, msg['payload']) if len(self.notifier.notifications) > 4: # Cause an assert to print the unexpected item # and all of the notifications. self.assertFalse(self.notifier.notifications[4], self.notifier.notifications) self.assertEqual(4, len(self.notifier.notifications), self.notifier.notifications) self.volume.delete_snapshot(self.context, snapshot) msg = self.notifier.notifications[4] self.assertEqual('snapshot.delete.start', msg['event_type']) expected['status'] = 'available' self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[5] self.assertEqual('snapshot.delete.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) if len(self.notifier.notifications) > 6: # Cause an assert to print the unexpected item # and all of the notifications. self.assertFalse(self.notifier.notifications[6], self.notifier.notifications) self.assertEqual(6, len(self.notifier.notifications), self.notifier.notifications) snap = objects.Snapshot.get_by_id(context.get_admin_context( read_deleted='yes'), snapshot_id) self.assertEqual('deleted', snap.status) self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id']) def test_create_delete_snapshot_with_metadata(self): """Test snapshot can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = create_snapshot(volume['id'], size=volume['size'], metadata=test_meta) snapshot_id = snapshot.id result_dict = snapshot.metadata self.assertEqual(test_meta, result_dict) self.volume.delete_snapshot(self.context, snapshot) self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) @mock.patch.object(db, 'snapshot_create', side_effect=exception.InvalidSnapshot( 'Create snapshot in db failed!')) def test_create_snapshot_failed_db_snapshot(self, mock_snapshot): """Test exception handling when create snapshot in db failed.""" test_volume = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, test_volume['id']) test_volume['status'] = 'available' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidSnapshot, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') def test_create_snapshot_failed_maintenance(self): """Test exception handling when create snapshot in maintenance.""" test_volume = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, test_volume['id']) test_volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') @mock.patch.object(QUOTAS, 'commit', side_effect=exception.QuotaError( 'Snapshot quota commit failed!')) def test_create_snapshot_failed_quota_commit(self, mock_snapshot): """Test exception handling when snapshot quota commit failed.""" test_volume = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, test_volume['id'], request_spec={}) test_volume['status'] = 'available' volume_api = cinder.volume.api.API() self.assertRaises(exception.QuotaError, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') def test_cannot_delete_volume_in_use(self): """Test volume can't be deleted in in-use status.""" self._test_cannot_delete_volume('in-use') def test_cannot_delete_volume_maintenance(self): """Test volume can't be deleted in maintenance status.""" self._test_cannot_delete_volume('maintenance') def _test_cannot_delete_volume(self, status): """Test volume can't be deleted in invalid stats.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, CONF.host, status=status) # 'in-use' status raises InvalidVolume self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) # clean up self.volume.delete_volume(self.context, volume['id']) def test_force_delete_volume(self): """Test volume can be forced to delete.""" # create a volume and assign to host self.volume_params['status'] = 'error_deleting' volume = tests_utils.create_volume(self.context, **self.volume_params) # 'error_deleting' volumes can't be deleted self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) # delete with force self.volume_api.delete(self.context, volume, force=True) # status is deleting volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('deleting', volume.status) # clean up self.volume.delete_volume(self.context, volume.id) def test_cannot_force_delete_attached_volume(self): """Test volume can't be force delete in attached state.""" volume = tests_utils.create_volume(self.context, CONF.host, status='in-use', attach_status = 'attached') self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume, force=True) db.volume_destroy(self.context, volume.id) def test_cannot_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, volume['id'], snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot) self.volume.delete_volume(self.context, volume['id']) def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status='bad') self.assertRaises(exception.InvalidSnapshot, self.volume_api.delete_snapshot, self.context, snapshot) snapshot.status = 'error' snapshot.save() self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual('deleting', snapshot.status) self.volume.delete_volume(self.context, volume.id) def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" instance_uuid = '12345678-1234-5678-1234-567812345678' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': 'attaching', } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': 'attaching', } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) def test_create_snapshot_from_bootable_volume(self): """Test create snapshot from bootable volume.""" # create bootable volume from image volume = self._create_volume_from_image() volume_id = volume['id'] self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) # get volume's volume_glance_metadata ctxt = context.get_admin_context() vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) self.assertTrue(vol_glance_meta) # create snapshot from bootable volume snap = create_snapshot(volume_id) self.volume.create_snapshot(ctxt, volume_id, snap) # get snapshot's volume_glance_metadata snap_glance_meta = db.volume_snapshot_glance_metadata_get( ctxt, snap.id) self.assertTrue(snap_glance_meta) # ensure that volume's glance metadata is copied # to snapshot's glance metadata self.assertEqual(len(vol_glance_meta), len(snap_glance_meta)) vol_glance_dict = {x.key: x.value for x in vol_glance_meta} snap_glance_dict = {x.key: x.value for x in snap_glance_meta} self.assertDictMatch(vol_glance_dict, snap_glance_dict) # ensure that snapshot's status is changed to 'available' self.assertEqual('available', snap.status) # cleanup resource snap.destroy() db.volume_destroy(ctxt, volume_id) def test_create_snapshot_from_bootable_volume_fail(self): """Test create snapshot from bootable volume. But it fails to volume_glance_metadata_copy_to_snapshot. As a result, status of snapshot is changed to ERROR. """ # create bootable volume from image volume = self._create_volume_from_image() volume_id = volume['id'] self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) # get volume's volume_glance_metadata ctxt = context.get_admin_context() vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) self.assertTrue(vol_glance_meta) snap = create_snapshot(volume_id) snap_stat = snap.status self.assertTrue(snap.id) self.assertTrue(snap_stat) # set to return DB exception with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\ as mock_db: mock_db.side_effect = exception.MetadataCopyFailure( reason="Because of DB service down.") # create snapshot from bootable volume self.assertRaises(exception.MetadataCopyFailure, self.volume.create_snapshot, ctxt, volume_id, snap) # get snapshot's volume_glance_metadata self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, ctxt, snap.id) # ensure that status of snapshot is 'error' self.assertEqual('error', snap.status) # cleanup resource snap.destroy() db.volume_destroy(ctxt, volume_id) def test_create_snapshot_from_bootable_volume_with_volume_metadata_none( self): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) # set bootable flag of volume to True db.volume_update(self.context, volume_id, {'bootable': True}) snapshot = create_snapshot(volume['id']) self.volume.create_snapshot(self.context, volume['id'], snapshot) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, self.context, snapshot.id) # ensure that status of snapshot is 'available' self.assertEqual('available', snapshot.status) # cleanup resource snapshot.destroy() db.volume_destroy(self.context, volume_id) def test_delete_busy_snapshot(self): """Test snapshot can be created and deleted.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) snapshot = create_snapshot(volume_id, size=volume['size']) self.volume.create_snapshot(self.context, volume_id, snapshot) self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') self.volume.driver.delete_snapshot( mox.IgnoreArg()).AndRaise( exception.SnapshotIsBusy(snapshot_name='fake')) self.mox.ReplayAll() snapshot_id = snapshot.id self.volume.delete_snapshot(self.context, snapshot) snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual("available", snapshot_ref.status) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_delete_no_dev_fails(self): """Test delete snapshot with no dev file fails.""" self.stubs.Set(os.path, 'exists', lambda x: False) self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) snapshot = create_snapshot(volume_id) snapshot_id = snapshot.id self.volume.create_snapshot(self.context, volume_id, snapshot) self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') self.volume.driver.delete_snapshot( mox.IgnoreArg()).AndRaise( exception.SnapshotIsBusy(snapshot_name='fake')) self.mox.ReplayAll() self.volume.delete_snapshot(self.context, snapshot) snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual("available", snapshot_ref.status) self.mox.UnsetStubs() self.assertRaises(exception.VolumeBackendAPIException, self.volume.delete_snapshot, self.context, snapshot) self.assertRaises(exception.VolumeBackendAPIException, self.volume.delete_volume, self.context, volume_id) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_cloned_volume') @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_clone_image_volume(self, mock_reserve, mock_commit, mock_rollback, mock_cloned_volume): vol = tests_utils.create_volume(self.context, **self.volume_params) # unnecessary attributes should be removed from image volume vol.consistencygroup = None result = self.volume._clone_image_volume(self.context, vol, {'id': fake.volume_id}) self.assertNotEqual(False, result) mock_reserve.assert_called_once_with(self.context, volumes=1, gigabytes=vol.size) mock_commit.assert_called_once_with(self.context, ["RESERVATION"], project_id=vol.project_id) @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_clone_image_volume_creation_failure(self, mock_reserve, mock_commit, mock_rollback): vol = tests_utils.create_volume(self.context, **self.volume_params) with mock.patch.object(objects, 'Volume', side_effect=ValueError): self.assertFalse(self.volume._clone_image_volume( self.context, vol, {'id': fake.volume_id})) mock_reserve.assert_called_once_with(self.context, volumes=1, gigabytes=vol.size) mock_rollback.assert_called_once_with(self.context, ["RESERVATION"]) def test_create_volume_from_image_cloned_status_available(self): """Test create volume from image via cloning. Verify that after cloning image to volume, it is in available state and is bootable. """ volume = self._create_volume_from_image() self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume['id']) def test_create_volume_from_image_not_cloned_status_available(self): """Test create volume from image via full copy. Verify that after copying image to volume, it is in available state and is bootable. """ volume = self._create_volume_from_image(fakeout_clone_image=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume['id']) def test_create_volume_from_image_exception(self): """Test create volume from a non-existing image. Verify that create volume from a non-existing image, the volume status is 'error' and is not bootable. """ dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) # creating volume testdata kwargs = {'display_description': 'Test Desc', 'size': 20, 'availability_zone': 'fake_availability_zone', 'status': 'creating', 'attach_status': 'detached', 'host': 'dummy'} volume = objects.Volume(context=self.context, **kwargs) volume.create() self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume.id, {'image_id': self.FAKE_UUID}) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error", volume['status']) self.assertFalse(volume['bootable']) # cleanup volume.destroy() os.unlink(dst_path) def test_create_volume_from_image_copy_exception_rescheduling(self): """Test create volume with ImageCopyFailure This exception should not trigger rescheduling and allocated_capacity should be incremented so we're having assert for that here. """ def fake_copy_image_to_volume(context, volume, image_service, image_id): raise exception.ImageCopyFailure() self.stubs.Set(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image) # NOTE(dulek): Rescheduling should not occur, so lets assert that # allocated_capacity is incremented. self.assertDictEqual(self.volume.stats['pools'], {'_pool0': {'allocated_capacity_gb': 1}}) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.brick_get_connector') @mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled') @mock.patch('cinder.volume.driver.BaseVD._detach_volume') def test_create_volume_from_image_unavailable(self, mock_detach, mock_secure, *args): """Test create volume with ImageCopyFailure We'll raise an exception inside _connect_device after volume has already been attached to confirm that it detaches the volume. """ mock_secure.side_effect = NameError # We want to test BaseVD copy_image_to_volume and since FakeISCSIDriver # inherits from LVM it overwrites it, so we'll mock it to use the # BaseVD implementation. unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume bound_copy_method = unbound_copy_method.__get__(self.volume.driver) with mock.patch.object(self.volume.driver, 'copy_image_to_volume', side_effect=bound_copy_method): self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image, fakeout_copy_image_to_volume=False) # We must have called detach method. self.assertEqual(1, mock_detach.call_count) def test_create_volume_from_image_clone_image_volume(self): """Test create volume from image via image volume. Verify that after cloning image to volume, it is in available state and is bootable. """ volume = self._create_volume_from_image(clone_image_volume=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume['id']) def test_create_volume_from_exact_sized_image(self): """Test create volume from an image of the same size. Verify that an image which is exactly the same size as the volume, will work correctly. """ try: volume_id = None volume_api = cinder.volume.api.API( image_service=FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', image_id=1) volume_id = volume['id'] self.assertEqual('creating', volume['status']) finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_volume_from_oversized_image(self): """Verify that an image which is too big will fail correctly.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi + 1, 'disk_format': 'raw', 'container_format': 'bare', 'status': 'active'} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def test_create_volume_with_mindisk_error(self): """Verify volumes smaller than image minDisk will cause an error.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'min_disk': 5, 'status': 'active'} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def test_create_volume_with_deleted_imaged(self): """Verify create volume from image will cause an error.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'min_disk': 5, 'status': 'deleted'} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks): volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, size, 'name', 'description') self.assertEqual(int(size), volume['size']) def test_create_volume_int_size(self): """Test volume creation with int size.""" self._do_test_create_volume_with_size(2) def test_create_volume_string_size(self): """Test volume creation with string size.""" self._do_test_create_volume_with_size('2') @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def test_create_volume_with_bad_size(self, *_unused_quota_mocks): volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '2Gb', 'name', 'description') def test_create_volume_with_float_fails(self): """Test volume creation with invalid float size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '1.5', 'name', 'description') def test_create_volume_with_zero_size_fails(self): """Test volume creation with string size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '0', 'name', 'description') def test_begin_detaching_fails_available(self): volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, status='available') # Volume status is 'available'. self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'status': 'in-use', 'attach_status': 'detached'}) # Should raise an error since not attached self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'attach_status': 'attached'}) # Ensure when attached no exception raised volume_api.begin_detaching(self.context, volume) volume_api.update(self.context, volume, {'status': 'maintenance'}) self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_destroy(self.context, volume.id) def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, **self.volume_params) attachment = db.volume_attach(self.context, {'volume_id': volume['id'], 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb') volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("detaching", volume['status']) volume_api.roll_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("in-use", volume['status']) def test_volume_api_update(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update(self.context, volume, update_dict) # read changes from db vol = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('test update name', vol['display_name']) def test_volume_api_update_maintenance(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) volume['status'] = 'maintenance' # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} self.assertRaises(exception.InvalidVolume, volume_api.update, self.context, volume, update_dict) def test_volume_api_update_snapshot(self): # create raw snapshot volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = create_snapshot(volume['id']) snapshot_id = snapshot.id self.assertIsNone(snapshot.display_name) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update_snapshot(self.context, snapshot, update_dict) # read changes from db snap = objects.Snapshot.get_by_id(context.get_admin_context(), snapshot_id) self.assertEqual('test update name', snap.display_name) def test_volume_api_get_list_volumes_image_metadata(self): """Test get_list_volumes_image_metadata in volume API.""" ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': 'fake1', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1') db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2') db.volume_create(ctxt, {'id': 'fake2', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3') db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4') volume_api = cinder.volume.api.API() results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1', 'fake2']) expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'}, 'fake2': {'key3': 'value3', 'key4': 'value4'}} self.assertEqual(expect_results, results) @mock.patch.object(QUOTAS, 'reserve') def test_extend_volume(self, reserve): """Test volume can be extended at API level.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume['id']) volume['status'] = 'in-use' volume_api = cinder.volume.api.API() # Extend fails when status != available self.assertRaises(exception.InvalidVolume, volume_api.extend, self.context, volume, 3) volume['status'] = 'available' # Extend fails when new_size < orig_size self.assertRaises(exception.InvalidInput, volume_api.extend, self.context, volume, 1) # Extend fails when new_size == orig_size self.assertRaises(exception.InvalidInput, volume_api.extend, self.context, volume, 2) # works when new_size > orig_size reserve.return_value = ["RESERVATION"] volume_api.extend(self.context, volume, 3) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('extending', volume['status']) reserve.assert_called_once_with(self.context, gigabytes=1, project_id=volume['project_id']) # Test the quota exceeded volume['status'] = 'available' reserve.side_effect = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, volume_api.extend, self.context, volume, 3) # clean up self.volume.delete_volume(self.context, volume['id']) def test_extend_volume_driver_not_initialized(self): """Test volume can be extended at API level.""" # create a volume and assign to host fake_reservations = ['RESERVATION'] volume = tests_utils.create_volume(self.context, size=2, status='available', host=CONF.host) self.volume.create_volume(self.context, volume['id']) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.extend_volume, self.context, volume['id'], 3, fake_reservations) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('error_extending', volume.status) # lets cleanup the mess. self.volume.driver._initialized = True self.volume.delete_volume(self.context, volume['id']) def test_extend_volume_manager(self): """Test volume can be extended at the manager level.""" def fake_extend(volume, new_size): volume['size'] = new_size fake_reservations = ['RESERVATION'] volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume['id']) # Test driver exception with mock.patch.object(self.volume.driver, 'extend_volume') as extend_volume: extend_volume.side_effect =\ exception.CinderException('fake exception') volume['status'] = 'extending' self.volume.extend_volume(self.context, volume['id'], '4', fake_reservations) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual(2, volume['size']) self.assertEqual('error_extending', volume['status']) # Test driver success with mock.patch.object(self.volume.driver, 'extend_volume') as extend_volume: with mock.patch.object(QUOTAS, 'commit') as quotas_commit: extend_volume.return_value = fake_extend volume['status'] = 'extending' self.volume.extend_volume(self.context, volume['id'], '4', fake_reservations) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual(4, volume['size']) self.assertEqual('available', volume['status']) quotas_commit.assert_called_with( self.context, ['RESERVATION'], project_id=volume['project_id']) # clean up self.volume.delete_volume(self.context, volume['id']) def test_extend_volume_with_volume_type(self): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}}) vol_type = db.volume_type_get_by_name(elevated, 'type') volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, 100, 'name', 'description', volume_type=vol_type) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_in_use = usage.in_use except exception.QuotaUsageNotFound: volumes_in_use = 0 self.assertEqual(100, volumes_in_use) volume['status'] = 'available' volume['host'] = 'fakehost' volume['volume_type_id'] = vol_type.get('id') volume_api.extend(self.context, volume, 200) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_reserved = usage.reserved except exception.QuotaUsageNotFound: volumes_reserved = 0 self.assertEqual(100, volumes_reserved) @mock.patch( 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') def test_create_volume_from_sourcereplica(self, _create_replica_test): """Test volume can be created from a volume replica.""" _create_replica_test.return_value = None volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) volume_dst = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, volume_dst['id'], {'source_replicaid': volume_src['id']}) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) self.assertTrue(_create_replica_test.called) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_volume(self.context, volume_src['id']) def test_create_volume_from_sourcevol(self): """Test volume can be created from a source volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.stubs.Set(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst['id']) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_volume(self.context, volume_src['id']) @mock.patch('cinder.volume.api.API.list_availability_zones', return_value=({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True})) def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz): """Test volume can't be cloned from an other volume in different az.""" volume_api = cinder.volume.api.API() volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) volume_src = db.volume_get(self.context, volume_src['id']) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src, availability_zone='nova') def test_create_volume_from_sourcevol_with_glance_metadata(self): """Test glance metadata can be correctly copied to new volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.stubs.Set(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) volume_src = self._create_volume_from_image() self.volume.create_volume(self.context, volume_src['id']) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst['id']) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) src_glancemeta = db.volume_get(context.get_admin_context(), volume_src['id']).volume_glance_metadata dst_glancemeta = db.volume_get(context.get_admin_context(), volume_dst['id']).volume_glance_metadata for meta_src in src_glancemeta: for meta_dst in dst_glancemeta: if meta_dst.key == meta_src.key: self.assertEqual(meta_src.value, meta_dst.value) self.volume.delete_volume(self.context, volume_src['id']) self.volume.delete_volume(self.context, volume_dst['id']) def test_create_volume_from_sourcevol_failed_clone(self): """Test src vol status will be restore by error handling code.""" def fake_error_create_cloned_volume(volume, src_vref): db.volume_update(self.context, src_vref['id'], {'status': 'error'}) raise exception.CinderException('fake exception') self.stubs.Set(self.volume.driver, 'create_cloned_volume', fake_error_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_dst['id']) self.assertEqual('creating', volume_src['status']) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_volume(self.context, volume_src['id']) def test_clean_temporary_volume(self): def fake_delete_volume(ctxt, volume): volume.destroy() fake_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, migration_status='migrating') fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) # 1. Only clean the db self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=True) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # 2. Delete the backend storage fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \ mock_delete_volume: mock_delete_volume.side_effect = fake_delete_volume self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=False) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # Check when the migrated volume is not in migration fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) fake_volume.migration_status = 'non-migrating' fake_volume.save() self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume) volume = db.volume_get(context.get_admin_context(), fake_new_volume.id) self.assertIsNone(volume.migration_status) def test_check_volume_filters_true(self): """Test bootable as filter for true""" volume_api = cinder.volume.api.API() filters = {'bootable': 'TRUE'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against True self.assertTrue(filters['bootable']) def test_check_volume_filters_false(self): """Test bootable as filter for false""" volume_api = cinder.volume.api.API() filters = {'bootable': 'false'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against False self.assertEqual(False, filters['bootable']) def test_check_volume_filters_invalid(self): """Test bootable as filter""" volume_api = cinder.volume.api.API() filters = {'bootable': 'invalid'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against invalid value self.assertTrue(filters['bootable']) def test_update_volume_readonly_flag(self): """Test volume readonly flag can be updated at API level.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) self.volume.create_volume(self.context, volume['id']) volume['status'] = 'in-use' def sort_func(obj): return obj['name'] volume_api = cinder.volume.api.API() # Update fails when status != available self.assertRaises(exception.InvalidVolume, volume_api.update_readonly_flag, self.context, volume, False) volume['status'] = 'available' # works when volume in 'available' status volume_api.update_readonly_flag(self.context, volume, False) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) # clean up self.volume.delete_volume(self.context, volume['id']) def test_secure_file_operations_enabled(self): """Test secure file operations setting for base driver. General, non network file system based drivers do not have anything to do with "secure_file_operations". This test verifies that calling the method always returns False. """ ret_flag = self.volume.driver.secure_file_operations_enabled() self.assertFalse(ret_flag) @mock.patch('cinder.volume.flows.common.make_pretty_name', new=mock.MagicMock()) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume', return_value=None) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute', side_effect=exception.DriverNotInitialized()) def test_create_volume_raise_rescheduled_exception(self, mock_execute, mock_reschedule): # Create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, test_vol_id, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) self.assertTrue(mock_reschedule.called) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('creating', volume['status']) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute') def test_create_volume_raise_unrescheduled_exception(self, mock_execute): # create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] mock_execute.side_effect = exception.VolumeNotFound( volume_id=test_vol_id) self.assertRaises(exception.VolumeNotFound, self.volume.create_volume, self.context, test_vol_id, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('error', volume['status']) def test__get_driver_initiator_data(self): manager = vol_manager.VolumeManager() data = manager._get_driver_initiator_data(None, {'key': 'val'}) self.assertIsNone(data) connector = {'initiator': {'key': 'val'}} self.assertRaises(exception.InvalidInput, manager._get_driver_initiator_data, None, connector) def test_cascade_delete_volume_with_snapshots(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, volume['id'], snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() volume_api.delete(self.context, volume, cascade=True) def test_cascade_delete_volume_with_snapshots_error(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, volume['id'], snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) snapshot.update({'status': 'in-use'}) snapshot.save() volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume, cascade=True) @ddt.ddt class VolumeMigrationTestCase(BaseVolumeTestCase): def setUp(self): super(VolumeMigrationTestCase, self).setUp() self._clear_patch = mock.patch('cinder.volume.utils.clear_volume', autospec=True) self._clear_patch.start() self.expected_status = 'available' def tearDown(self): super(VolumeMigrationTestCase, self).tearDown() self._clear_patch.stop() def test_migrate_volume_driver(self): """Test volume migration done by driver.""" # stub out driver and rpc functions self.stubs.Set(self.volume.driver, 'migrate_volume', lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.user_id})) volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume.id, host_obj, False, volume=volume) # check volume properties volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) def _fake_create_volume(self, ctxt, volume, host, req_spec, filters, allow_reschedule=True): return db.volume_update(ctxt, volume['id'], {'status': self.expected_status}) def test_migrate_volume_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume') as \ mock_migrate,\ mock.patch.object(self.volume.driver, 'create_export') as \ mock_create_export: # Exception case at self.driver.migrate_volume and create_export mock_migrate.side_effect = processutils.ProcessExecutionError mock_create_export.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume.id, host_obj, False, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) self.assertEqual('available', volume.status) @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic(self, volume_get, migrate_volume_completion, nova_api): fake_db_new_volume = {'status': 'available', 'id': fake.volume_id} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) new_volume_obj = fake_volume.fake_volume_obj(self.context, **fake_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} volume_get.return_value = fake_new_volume update_server_volume = nova_api.return_value.update_server_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) with mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume: self.volume._migrate_volume_generic(self.context, volume, host_obj, None) mock_copy_volume.assert_called_with(self.context, volume, new_volume_obj, remote='dest') migrate_volume_completion.assert_called_with( self.context, volume.id, new_volume_obj.id, error=False) self.assertFalse(update_server_volume.called) @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic_attached_volume(self, volume_get, migrate_volume_completion, nova_api): attached_host = 'some-host' fake_volume_id = fake.volume_id fake_db_new_volume = {'status': 'available', 'id': fake_volume_id} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} fake_uuid = fakes.get_fake_uuid() update_server_volume = nova_api.return_value.update_server_volume volume_get.return_value = fake_new_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume_attach = tests_utils.attach_volume( self.context, volume['id'], fake_uuid, attached_host, '/dev/vda') self.assertIsNotNone(volume_attach['volume_attachment'][0]['id']) self.assertEqual( fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid']) self.assertEqual('in-use', volume_attach['status']) self.volume._migrate_volume_generic(self.context, volume, host_obj, None) self.assertFalse(migrate_volume_completion.called) update_server_volume.assert_called_with(self.context, fake_uuid, volume['id'], fake_volume_id) @mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') def test_migrate_volume_for_volume_generic(self, create_volume, rpc_delete_volume, update_migrated_volume): fake_volume = tests_utils.create_volume(self.context, size=1, previous_status='available', host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} with mock.patch.object(self.volume.driver, 'migrate_volume') as \ mock_migrate_volume,\ mock.patch.object(self.volume, '_copy_volume_data'),\ mock.patch.object(self.volume.driver, 'delete_volume') as \ delete_volume: create_volume.side_effect = self._fake_create_volume self.volume.migrate_volume(self.context, fake_volume.id, host_obj, True, volume=fake_volume) volume = objects.Volume.get_by_id(context.get_admin_context(), fake_volume.id) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertFalse(mock_migrate_volume.called) self.assertFalse(delete_volume.called) self.assertTrue(rpc_delete_volume.called) self.assertTrue(update_migrated_volume.called) def test_migrate_volume_generic_copy_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume'),\ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ as mock_create_volume,\ mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ mock.patch.object(self.volume, 'migrate_volume_completion'),\ mock.patch.object(self.volume.driver, 'create_export'): # Exception case at migrate_volume_generic # source_volume['migration_status'] is 'migrating' mock_create_volume.side_effect = self._fake_create_volume mock_copy_volume.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) self.assertEqual('available', volume.status) def test_migrate_volume_with_glance_metadata(self): volume = self._create_volume_from_image(clone_image_volume=True) glance_metadata = volume.glance_metadata # We imitate the behavior of rpcapi, by serializing and then # deserializing the volume object we created earlier. serializer = objects.base.CinderObjectSerializer() serialized_volume = serializer.serialize_entity(self.context, volume) volume = serializer.deserialize_entity(self.context, serialized_volume) host_obj = {'host': 'newhost', 'capabilities': {}} with mock.patch.object(self.volume.driver, 'migrate_volume') as mock_migrate_volume: mock_migrate_volume.side_effect = ( lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.user_id})) self.volume.migrate_volume(self.context, volume.id, host_obj, False, volume=volume) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertEqual(glance_metadata, volume.glance_metadata) @mock.patch('cinder.db.volume_update') def test_update_migrated_volume(self, volume_update): fake_host = 'fake_host' fake_new_host = 'fake_new_host' fake_update = {'_name_id': fake.volume2_name_id, 'provider_location': 'updated_location'} fake_elevated = context.RequestContext(fake.user_id, self.project_id, is_admin=True) volume = tests_utils.create_volume(self.context, size=1, status='available', host=fake_host) new_volume = tests_utils.create_volume( self.context, size=1, status='available', provider_location='fake_provider_location', _name_id=fake.volume_name_id, host=fake_new_host) new_volume._name_id = fake.volume_name_id new_volume.provider_location = 'fake_provider_location' fake_update_error = {'_name_id': new_volume._name_id, 'provider_location': new_volume.provider_location} expected_update = {'_name_id': volume._name_id, 'provider_location': volume.provider_location} with mock.patch.object(self.volume.driver, 'update_migrated_volume') as migrate_update,\ mock.patch.object(self.context, 'elevated') as elevated: migrate_update.return_value = fake_update elevated.return_value = fake_elevated self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( mock.call(fake_elevated, new_volume.id, expected_update), mock.call(fake_elevated, volume.id, fake_update))) # Test the case for update_migrated_volume not implemented # for the driver. migrate_update.reset_mock() volume_update.reset_mock() # Reset the volume objects to their original value, since they # were changed in the last call. new_volume._name_id = fake.volume_name_id new_volume.provider_location = 'fake_provider_location' migrate_update.side_effect = NotImplementedError self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( mock.call(fake_elevated, new_volume.id, fake_update), mock.call(fake_elevated, volume.id, fake_update_error))) def test_migrate_volume_generic_create_volume_error(self): self.expected_status = 'error' with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \ mock_create_volume, \ mock.patch.object(self.volume, '_clean_temporary_volume') as \ clean_temporary_volume: # Exception case at the creation of the new temporary volume mock_create_volume.side_effect = self._fake_create_volume volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) self.assertTrue(clean_temporary_volume.called) self.expected_status = 'available' def test_migrate_volume_generic_timeout_error(self): CONF.set_override("migration_create_volume_timeout_secs", 2) with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \ mock_create_volume, \ mock.patch.object(self.volume, '_clean_temporary_volume') as \ clean_temporary_volume, \ mock.patch.object(time, 'sleep'): # Exception case at the timeout of the volume creation self.expected_status = 'creating' mock_create_volume.side_effect = self._fake_create_volume volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) self.assertTrue(clean_temporary_volume.called) self.expected_status = 'available' def test_migrate_volume_generic_create_export_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume'),\ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ as mock_create_volume,\ mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ mock.patch.object(self.volume, 'migrate_volume_completion'),\ mock.patch.object(self.volume.driver, 'create_export') as \ mock_create_export: # Exception case at create_export mock_create_volume.side_effect = self._fake_create_volume mock_copy_volume.side_effect = processutils.ProcessExecutionError mock_create_export.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) def test_migrate_volume_generic_migrate_volume_completion_error(self): def fake_migrate_volume_completion(ctxt, volume_id, new_volume_id, error=False): db.volume_update(ctxt, volume['id'], {'migration_status': 'completing'}) raise processutils.ProcessExecutionError with mock.patch.object(self.volume.driver, 'migrate_volume'),\ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ as mock_create_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ mock.patch.object(self.volume, 'migrate_volume_completion')\ as mock_migrate_compl,\ mock.patch.object(self.volume.driver, 'create_export'), \ mock.patch.object(self.volume, '_attach_volume') \ as mock_attach, \ mock.patch.object(self.volume, '_detach_volume'), \ mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') \ as mock_get_connector_properties, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') \ as mock_get_capabilities: # Exception case at delete_volume # source_volume['migration_status'] is 'completing' mock_create_volume.side_effect = self._fake_create_volume mock_migrate_compl.side_effect = fake_migrate_volume_completion mock_get_connector_properties.return_value = {} mock_attach.side_effect = [{'device': {'path': 'bar'}}, {'device': {'path': 'foo'}}] mock_get_capabilities.return_value = {'sparse_copy_volume': True} volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) mock_copy.assert_called_once_with('foo', 'bar', 0, '1M', sparse=True) def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): tests_utils.attach_volume(ctxt, volume.id, instance_uuid, host_name, '/dev/vda') def _test_migrate_volume_completion(self, status='available', instance_uuid=None, attached_host=None, retyping=False, previous_status='available'): initial_status = retyping and 'retyping' or status old_volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, status=initial_status, migration_status='migrating', previous_status=previous_status) attachment_id = None if status == 'in-use': vol = tests_utils.attach_volume(self.context, old_volume.id, instance_uuid, attached_host, '/dev/vda') self.assertEqual('in-use', vol['status']) attachment_id = vol['volume_attachment'][0]['id'] target_status = 'target:%s' % old_volume.id new_host = CONF.host + 'new' new_volume = tests_utils.create_volume(self.context, size=0, host=new_host, migration_status=target_status) with mock.patch.object(self.volume, 'detach_volume') as \ mock_detach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \ mock_delete_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as \ mock_attach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume'),\ mock.patch.object(self.volume.driver, 'attach_volume'): mock_attach_volume.side_effect = self.fake_attach_volume self.volume.migrate_volume_completion(self.context, old_volume.id, new_volume.id) after_new_volume = objects.Volume.get_by_id(self.context, new_volume.id) after_old_volume = objects.Volume.get_by_id(self.context, old_volume.id) if status == 'in-use': mock_detach_volume.assert_called_with(self.context, old_volume.id, attachment_id) attachment = db.volume_attachment_get_by_instance_uuid( self.context, old_volume.id, instance_uuid) self.assertIsNotNone(attachment) self.assertEqual(attached_host, attachment['attached_host']) self.assertEqual(instance_uuid, attachment['instance_uuid']) else: self.assertFalse(mock_detach_volume.called) self.assertTrue(mock_delete_volume.called) self.assertEqual(old_volume.host, after_new_volume.host) self.assertEqual(new_volume.host, after_old_volume.host) def test_migrate_volume_completion_retype_available(self): self._test_migrate_volume_completion('available', retyping=True) def test_migrate_volume_completion_retype_in_use(self): self._test_migrate_volume_completion( 'in-use', '83c969d5-065e-4c9c-907d-5394bc2e98e2', 'some-host', retyping=True, previous_status='in-use') def test_migrate_volume_completion_migrate_available(self): self._test_migrate_volume_completion() def test_migrate_volume_completion_migrate_in_use(self): self._test_migrate_volume_completion( 'in-use', '83c969d5-065e-4c9c-907d-5394bc2e98e2', 'some-host', retyping=False, previous_status='in-use') @ddt.data(False, True) def test_api_migrate_volume_completion_from_swap_with_no_migration( self, swap_error): # This test validates that Cinder properly finishes the swap volume # status updates for the case that no migration has occurred instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2' attached_host = 'attached-host' orig_attached_vol = tests_utils.create_volume(self.context, size=0) orig_attached_vol = tests_utils.attach_volume( self.context, orig_attached_vol['id'], instance_uuid, attached_host, '/dev/vda') new_volume = tests_utils.create_volume(self.context, size=0) @mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') def _run_migration_completion(rpc_attach_volume, rpc_detach_volume): attachment = orig_attached_vol['volume_attachment'][0] attachment_id = attachment['id'] rpc_attach_volume.side_effect = self.fake_attach_volume vol_id = volume_api.API().migrate_volume_completion( self.context, orig_attached_vol, new_volume, swap_error) if swap_error: # When swap failed, we don't want to finish attachment self.assertFalse(rpc_detach_volume.called) self.assertFalse(rpc_attach_volume.called) else: # When no error, we should be finishing the attachment rpc_detach_volume.assert_called_with(self.context, orig_attached_vol, attachment_id) rpc_attach_volume.assert_called_with( self.context, new_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw') self.assertEqual(new_volume['id'], vol_id) _run_migration_completion() def test_retype_setup_fail_volume_is_available(self): """Verify volume is still available if retype prepare failed.""" elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) old_vol_type = db.volume_type_get_by_name(elevated, 'old') db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}}) new_vol_type = db.volume_type_get_by_name(elevated, 'new') db.quota_create(elevated, project_id, 'volumes_new', 0) volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, status='available', volume_type_id=old_vol_type['id']) api = cinder.volume.api.API() self.assertRaises(exception.VolumeLimitExceeded, api.retype, self.context, volume, new_vol_type['id']) volume = db.volume_get(elevated, volume.id) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.assertEqual('available', volume['status']) def _retype_volume_exec(self, driver, snap=False, policy='on-demand', migrate_exc=False, exc=None, diff_equal=False, replica=False, reserve_vol_type_only=False): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) old_vol_type = db.volume_type_get_by_name(elevated, 'old') db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}}) vol_type = db.volume_type_get_by_name(elevated, 'new') db.quota_create(elevated, project_id, 'volumes_new', 10) if replica: rep_status = 'active' else: rep_status = 'disabled' volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, status='retyping', volume_type_id=old_vol_type['id'], replication_status=rep_status) volume.previous_status = 'available' volume.save() if snap: create_snapshot(volume.id, size=volume.size) if driver or diff_equal: host_obj = {'host': CONF.host, 'capabilities': {}} else: host_obj = {'host': 'newhost', 'capabilities': {}} reserve_opts = {'volumes': 1, 'gigabytes': volume.size} QUOTAS.add_volume_type_opts(self.context, reserve_opts, vol_type['id']) if reserve_vol_type_only: reserve_opts.pop('volumes') reserve_opts.pop('gigabytes') try: usage = db.quota_usage_get(elevated, project_id, 'volumes') total_volumes_in_use = usage.in_use usage = db.quota_usage_get(elevated, project_id, 'gigabytes') total_gigabytes_in_use = usage.in_use except exception.QuotaUsageNotFound: total_volumes_in_use = 0 total_gigabytes_in_use = 0 reservations = QUOTAS.reserve(self.context, project_id=project_id, **reserve_opts) old_reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(self.context, old_reserve_opts, old_vol_type['id']) old_reservations = QUOTAS.reserve(self.context, project_id=project_id, **old_reserve_opts) with mock.patch.object(self.volume.driver, 'retype') as _retype,\ mock.patch.object(volume_types, 'volume_types_diff') as _diff,\ mock.patch.object(self.volume, 'migrate_volume') as _mig,\ mock.patch.object(db.sqlalchemy.api, 'volume_get') as mock_get: mock_get.return_value = volume _retype.return_value = driver _diff.return_value = ({}, diff_equal) if migrate_exc: _mig.side_effect = KeyError else: _mig.return_value = True if not exc: self.volume.retype(self.context, volume.id, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, old_reservations=old_reservations, volume=volume) else: self.assertRaises(exc, self.volume.retype, self.context, volume.id, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, old_reservations=old_reservations, volume=volume) # get volume/quota properties volume = objects.Volume.get_by_id(elevated, volume.id) try: usage = db.quota_usage_get(elevated, project_id, 'volumes_new') volumes_in_use = usage.in_use except exception.QuotaUsageNotFound: volumes_in_use = 0 # Get new in_use after retype, it should not be changed. if reserve_vol_type_only: try: usage = db.quota_usage_get(elevated, project_id, 'volumes') new_total_volumes_in_use = usage.in_use usage = db.quota_usage_get(elevated, project_id, 'gigabytes') new_total_gigabytes_in_use = usage.in_use except exception.QuotaUsageNotFound: new_total_volumes_in_use = 0 new_total_gigabytes_in_use = 0 self.assertEqual(total_volumes_in_use, new_total_volumes_in_use) self.assertEqual(total_gigabytes_in_use, new_total_gigabytes_in_use) # check properties if driver or diff_equal: self.assertEqual(vol_type['id'], volume.volume_type_id) self.assertEqual('available', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(1, volumes_in_use) self.assertEqual(1, len(self.notifier.notifications), "Notifier count incorrect %s" % (self.notifier.notifications)) self.assertEqual(vol_type['id'], self.notifier.notifications[0] ['payload']['volume_type']) self.assertEqual('volume.retype', self.notifier.notifications[0] ['event_type']) elif not exc: self.assertEqual(old_vol_type['id'], volume.volume_type_id) self.assertEqual('retyping', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(1, volumes_in_use) self.assertEqual(1, len(self.notifier.notifications), "Notifier count incorrect %s" % (self.notifier.notifications)) self.assertEqual(vol_type['id'], self.notifier.notifications[0] ['payload']['volume_type']) self.assertEqual('volume.retype', self.notifier.notifications[0] ['event_type']) else: self.assertEqual(old_vol_type['id'], volume.volume_type_id) self.assertEqual('available', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(0, volumes_in_use) self.assertEqual(0, len(self.notifier.notifications), "Notifier count incorrect %s" % (self.notifier.notifications)) def test_retype_volume_driver_success(self): self._retype_volume_exec(True) def test_retype_volume_migration_bad_policy(self): # Test volume retype that requires migration by not allowed self._retype_volume_exec(False, policy='never', exc=exception.VolumeMigrationFailed) def test_retype_volume_migration_with_replica(self): self._retype_volume_exec(False, replica=True, exc=exception.InvalidVolume) def test_retype_volume_migration_with_snaps(self): self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume) def test_retype_volume_migration_failed(self): self._retype_volume_exec(False, migrate_exc=True, exc=KeyError) def test_retype_volume_migration_success(self): self._retype_volume_exec(False, migrate_exc=False, exc=None) def test_retype_volume_migration_equal_types(self): self._retype_volume_exec(False, diff_equal=True) def test_retype_volume_with_type_only(self): self._retype_volume_exec(True, reserve_vol_type_only=True) def test_migrate_driver_not_initialized(self): volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.migrate_volume, self.context, volume.id, host_obj, True, volume=volume) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) # lets cleanup the mess. self.volume.driver._initialized = True self.volume.delete_volume(self.context, volume['id']) def test_delete_source_volume_in_migration(self): """Test deleting a source volume that is in migration.""" self._test_delete_volume_in_migration('migrating') def test_delete_destination_volume_in_migration(self): """Test deleting a destination volume that is in migration.""" self._test_delete_volume_in_migration('target:vol-id') def _test_delete_volume_in_migration(self, migration_status): """Test deleting a volume that is in migration.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume = db.volume_update(self.context, volume['id'], {'status': 'available', 'migration_status': migration_status}) self.volume.delete_volume(self.context, volume['id']) # The volume is successfully removed during the volume delete # and won't exist in the database any more. self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume['id']) class ConsistencyGroupTestCase(BaseVolumeTestCase): def test_delete_volume_in_consistency_group(self): """Test deleting a volume that's tied to a consistency group fails.""" consistencygroup_id = fake.consistency_group_id volume_api = cinder.volume.api.API() self.volume_params.update({'status': 'available', 'consistencygroup_id': consistencygroup_id}) volume = tests_utils.create_volume(self.context, **self.volume_params) self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) @mock.patch.object(CGQUOTAS, "reserve", return_value=["RESERVATION"]) @mock.patch.object(CGQUOTAS, "commit") @mock.patch.object(CGQUOTAS, "rollback") @mock.patch.object(driver.VolumeDriver, "delete_consistencygroup", return_value=({'status': ( fields.ConsistencyGroupStatus.DELETED)}, [])) def test_create_delete_consistencygroup(self, fake_delete_cg, fake_rollback, fake_commit, fake_reserve): """Test consistencygroup can be created and deleted.""" def fake_driver_create_cg(context, group): """Make sure that the pool is part of the host.""" self.assertIn('host', group) host = group.host pool = volutils.extract_host(host, level='pool') self.assertEqual('fakepool', pool) return {'status': 'available'} self.stubs.Set(self.volume.driver, 'create_consistencygroup', fake_driver_create_cg) group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool') group = objects.ConsistencyGroup.get_by_id(self.context, group.id) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.volume.create_consistencygroup(self.context, group) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[0] self.assertEqual('consistencygroup.create.start', msg['event_type']) expected = { 'status': fields.ConsistencyGroupStatus.AVAILABLE, 'name': 'test_cg', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': fake.user_id, 'consistencygroup_id': group.id } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[1] self.assertEqual('consistencygroup.create.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) self.assertEqual( group.id, objects.ConsistencyGroup.get_by_id(context.get_admin_context(), group.id).id) self.volume.delete_consistencygroup(self.context, group) cg = objects.ConsistencyGroup.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) self.assertEqual(4, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[2] self.assertEqual('consistencygroup.delete.start', msg['event_type']) self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('consistencygroup.delete.end', msg['event_type']) expected['status'] = fields.ConsistencyGroupStatus.DELETED self.assertDictMatch(expected, msg['payload']) self.assertRaises(exception.NotFound, objects.ConsistencyGroup.get_by_id, self.context, group.id) @mock.patch.object(CGQUOTAS, "reserve", return_value=["RESERVATION"]) @mock.patch.object(CGQUOTAS, "commit") @mock.patch.object(CGQUOTAS, "rollback") @mock.patch.object(driver.VolumeDriver, "create_consistencygroup", return_value={'status': 'available'}) @mock.patch.object(driver.VolumeDriver, "update_consistencygroup") def test_update_consistencygroup(self, fake_update_cg, fake_create_cg, fake_rollback, fake_commit, fake_reserve): """Test consistencygroup can be updated.""" group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') self.volume.create_consistencygroup(self.context, group) volume = tests_utils.create_volume( self.context, consistencygroup_id=group.id, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) volume2 = tests_utils.create_volume( self.context, consistencygroup_id=None, **self.volume_params) volume_id2 = volume2['id'] self.volume.create_volume(self.context, volume_id2) fake_update_cg.return_value = ( {'status': fields.ConsistencyGroupStatus.AVAILABLE}, [{'id': volume_id2, 'status': 'available'}], [{'id': volume_id, 'status': 'available'}]) self.volume.update_consistencygroup(self.context, group, add_volumes=volume_id2, remove_volumes=volume_id) cg = objects.ConsistencyGroup.get_by_id(self.context, group.id) expected = { 'status': fields.ConsistencyGroupStatus.AVAILABLE, 'name': 'test_cg', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': fake.user_id, 'consistencygroup_id': group.id } self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status) self.assertEqual(10, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[6] self.assertEqual('consistencygroup.update.start', msg['event_type']) self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('consistencygroup.update.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) cgvolumes = db.volume_get_all_by_group(self.context, group.id) cgvol_ids = [cgvol['id'] for cgvol in cgvolumes] # Verify volume is removed. self.assertNotIn(volume_id, cgvol_ids) # Verify volume is added. self.assertIn(volume_id2, cgvol_ids) self.volume_params['status'] = 'wrong-status' volume3 = tests_utils.create_volume( self.context, consistencygroup_id=None, **self.volume_params) volume_id3 = volume3['id'] volume_get_orig = self.volume.db.volume_get self.volume.db.volume_get = mock.Mock( return_value={'status': 'wrong_status', 'id': volume_id3}) # Try to add a volume in wrong status self.assertRaises(exception.InvalidVolume, self.volume.update_consistencygroup, self.context, group, add_volumes=volume_id3, remove_volumes=None) self.volume.db.volume_get.reset_mock() self.volume.db.volume_get = volume_get_orig @mock.patch.object(driver.VolumeDriver, "create_consistencygroup", return_value={'status': 'available'}) @mock.patch.object(driver.VolumeDriver, "delete_consistencygroup", return_value=({'status': 'deleted'}, [])) @mock.patch.object(driver.VolumeDriver, "create_cgsnapshot", return_value={'status': 'available'}) @mock.patch.object(driver.VolumeDriver, "delete_cgsnapshot", return_value=({'status': 'deleted'}, [])) @mock.patch.object(driver.VolumeDriver, "create_consistencygroup_from_src", return_value=(None, None)) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_cloned_volume') def test_create_consistencygroup_from_src(self, mock_create_cloned_vol, mock_create_vol_from_snap, mock_create_from_src, mock_delete_cgsnap, mock_create_cgsnap, mock_delete_cg, mock_create_cg): """Test consistencygroup can be created and deleted.""" group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', status=fields.ConsistencyGroupStatus.AVAILABLE) volume = tests_utils.create_volume( self.context, consistencygroup_id=group.id, status='available', host=CONF.host, size=1) volume_id = volume['id'] cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id) cgsnapshot = cgsnapshot_returns[0] snapshot_id = cgsnapshot_returns[1]['id'] # Create CG from source CG snapshot. group2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', cgsnapshot_id=cgsnapshot.id) group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) volume2 = tests_utils.create_volume( self.context, consistencygroup_id=group2.id, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, volume2.id, volume=volume2) self.volume.create_consistencygroup_from_src( self.context, group2, cgsnapshot=cgsnapshot) cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) expected = { 'status': fields.ConsistencyGroupStatus.AVAILABLE, 'name': 'test_cg', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': fake.user_id, 'consistencygroup_id': group2.id, } self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg2.status) self.assertEqual(group2.id, cg2['id']) self.assertEqual(cgsnapshot.id, cg2['cgsnapshot_id']) self.assertIsNone(cg2['source_cgid']) msg = self.notifier.notifications[2] self.assertEqual('consistencygroup.create.start', msg['event_type']) self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[4] self.assertEqual('consistencygroup.create.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) if len(self.notifier.notifications) > 6: self.assertFalse(self.notifier.notifications[6], self.notifier.notifications) self.assertEqual(6, len(self.notifier.notifications), self.notifier.notifications) self.volume.delete_consistencygroup(self.context, group2) if len(self.notifier.notifications) > 10: self.assertFalse(self.notifier.notifications[10], self.notifier.notifications) self.assertEqual(10, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[6] self.assertEqual('consistencygroup.delete.start', msg['event_type']) expected['status'] = fields.ConsistencyGroupStatus.AVAILABLE self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('consistencygroup.delete.end', msg['event_type']) expected['status'] = fields.ConsistencyGroupStatus.DELETED self.assertDictMatch(expected, msg['payload']) cg2 = objects.ConsistencyGroup.get_by_id( context.get_admin_context(read_deleted='yes'), group2.id) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg2.status) self.assertRaises(exception.NotFound, objects.ConsistencyGroup.get_by_id, self.context, group2.id) # Create CG from source CG. group3 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', source_cgid=group.id) volume3 = tests_utils.create_volume( self.context, consistencygroup_id=group3.id, source_volid=volume_id, **self.volume_params) self.volume.create_volume(self.context, volume3.id, volume=volume3) self.volume.create_consistencygroup_from_src( self.context, group3, source_cg=group) cg3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg3.status) self.assertEqual(group3.id, cg3.id) self.assertEqual(group.id, cg3.source_cgid) self.assertIsNone(cg3.cgsnapshot_id) self.volume.delete_cgsnapshot(self.context, cgsnapshot) self.volume.delete_consistencygroup(self.context, group) def test_sort_snapshots(self): vol1 = {'id': fake.volume_id, 'name': 'volume 1', 'snapshot_id': fake.snapshot_id, 'consistencygroup_id': fake.consistency_group_id} vol2 = {'id': fake.volume2_id, 'name': 'volume 2', 'snapshot_id': fake.snapshot2_id, 'consistencygroup_id': fake.consistency_group_id} vol3 = {'id': fake.volume3_id, 'name': 'volume 3', 'snapshot_id': fake.snapshot3_id, 'consistencygroup_id': fake.consistency_group_id} snp1 = {'id': fake.snapshot_id, 'name': 'snap 1', 'cgsnapshot_id': fake.consistency_group_id} snp2 = {'id': fake.snapshot2_id, 'name': 'snap 2', 'cgsnapshot_id': fake.consistency_group_id} snp3 = {'id': fake.snapshot3_id, 'name': 'snap 3', 'cgsnapshot_id': fake.consistency_group_id} snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3) volumes = [] snapshots = [] volumes.append(vol1) volumes.append(vol2) volumes.append(vol3) snapshots.append(snp2_obj) snapshots.append(snp3_obj) snapshots.append(snp1_obj) i = 0 for vol in volumes: snap = snapshots[i] i += 1 self.assertNotEqual(vol['snapshot_id'], snap.id) sorted_snaps = self.volume._sort_snapshots(volumes, snapshots) i = 0 for vol in volumes: snap = sorted_snaps[i] i += 1 self.assertEqual(vol['snapshot_id'], snap.id) snapshots[2]['id'] = fake.will_not_be_found_id self.assertRaises(exception.SnapshotNotFound, self.volume._sort_snapshots, volumes, snapshots) self.assertRaises(exception.InvalidInput, self.volume._sort_snapshots, volumes, []) def test_sort_source_vols(self): vol1 = {'id': '1', 'name': 'volume 1', 'source_volid': '1', 'consistencygroup_id': '2'} vol2 = {'id': '2', 'name': 'volume 2', 'source_volid': '2', 'consistencygroup_id': '2'} vol3 = {'id': '3', 'name': 'volume 3', 'source_volid': '3', 'consistencygroup_id': '2'} src_vol1 = {'id': '1', 'name': 'source vol 1', 'consistencygroup_id': '1'} src_vol2 = {'id': '2', 'name': 'source vol 2', 'consistencygroup_id': '1'} src_vol3 = {'id': '3', 'name': 'source vol 3', 'consistencygroup_id': '1'} volumes = [] src_vols = [] volumes.append(vol1) volumes.append(vol2) volumes.append(vol3) src_vols.append(src_vol2) src_vols.append(src_vol3) src_vols.append(src_vol1) i = 0 for vol in volumes: src_vol = src_vols[i] i += 1 self.assertNotEqual(vol['source_volid'], src_vol['id']) sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols) i = 0 for vol in volumes: src_vol = sorted_src_vols[i] i += 1 self.assertEqual(vol['source_volid'], src_vol['id']) src_vols[2]['id'] = '9999' self.assertRaises(exception.VolumeNotFound, self.volume._sort_source_vols, volumes, src_vols) self.assertRaises(exception.InvalidInput, self.volume._sort_source_vols, volumes, []) def _create_cgsnapshot(self, group_id, volume_id, size='0'): """Create a cgsnapshot object.""" cgsnap = objects.CGSnapshot(self.context) cgsnap.user_id = fake.user_id cgsnap.project_id = fake.project_id cgsnap.consistencygroup_id = group_id cgsnap.status = "creating" cgsnap.create() # Create a snapshot object snap = objects.Snapshot(context.get_admin_context()) snap.volume_size = size snap.user_id = fake.user_id snap.project_id = fake.project_id snap.volume_id = volume_id snap.status = "available" snap.cgsnapshot_id = cgsnap.id snap.create() return cgsnap, snap @mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup', autospec=True, return_value={'status': 'available'}) @mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup', autospec=True, return_value=({'status': 'deleted'}, [])) @mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot', autospec=True, return_value=({'status': 'available'}, [])) @mock.patch('cinder.volume.driver.VolumeDriver.delete_cgsnapshot', autospec=True, return_value=({'status': 'deleted'}, [])) def test_create_delete_cgsnapshot(self, mock_del_cgsnap, mock_create_cgsnap, mock_del_cg, _mock_create_cg): """Test cgsnapshot can be created and deleted.""" group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') volume = tests_utils.create_volume( self.context, consistencygroup_id=group.id, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) if len(self.notifier.notifications) > 2: self.assertFalse(self.notifier.notifications[2], self.notifier.notifications) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id) cgsnapshot = cgsnapshot_returns[0] self.volume.create_cgsnapshot(self.context, cgsnapshot) self.assertEqual(cgsnapshot.id, objects.CGSnapshot.get_by_id( context.get_admin_context(), cgsnapshot.id).id) if len(self.notifier.notifications) > 6: self.assertFalse(self.notifier.notifications[6], self.notifier.notifications) msg = self.notifier.notifications[2] self.assertEqual('cgsnapshot.create.start', msg['event_type']) expected = { 'created_at': 'DONTCARE', 'name': None, 'cgsnapshot_id': cgsnapshot.id, 'status': 'creating', 'tenant_id': fake.project_id, 'user_id': fake.user_id, 'consistencygroup_id': group.id } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('snapshot.create.start', msg['event_type']) msg = self.notifier.notifications[4] expected['status'] = 'available' self.assertEqual('cgsnapshot.create.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[5] self.assertEqual('snapshot.create.end', msg['event_type']) self.assertEqual(6, len(self.notifier.notifications), self.notifier.notifications) self.volume.delete_cgsnapshot(self.context, cgsnapshot) if len(self.notifier.notifications) > 10: self.assertFalse(self.notifier.notifications[10], self.notifier.notifications) msg = self.notifier.notifications[6] self.assertEqual('cgsnapshot.delete.start', msg['event_type']) expected['status'] = 'available' self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('cgsnapshot.delete.end', msg['event_type']) expected['status'] = 'deleted' self.assertDictMatch(expected, msg['payload']) self.assertEqual(10, len(self.notifier.notifications), self.notifier.notifications) cgsnap = objects.CGSnapshot.get_by_id( context.get_admin_context(read_deleted='yes'), cgsnapshot.id) self.assertEqual('deleted', cgsnap.status) self.assertRaises(exception.NotFound, objects.CGSnapshot.get_by_id, self.context, cgsnapshot.id) self.volume.delete_consistencygroup(self.context, group) self.assertTrue(mock_create_cgsnap.called) self.assertTrue(mock_del_cgsnap.called) self.assertTrue(mock_del_cg.called) @mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup', return_value={'status': 'available'}) @mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup', return_value=({'status': 'deleted'}, [])) def test_delete_consistencygroup_correct_host(self, mock_del_cg, _mock_create_cg): """Test consistencygroup can be deleted. Test consistencygroup can be deleted when volumes are on the correct volume node. """ group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') volume = tests_utils.create_volume( self.context, consistencygroup_id=group.id, host='host1@backend1#pool1', status='creating', size=1) self.volume.host = 'host1@backend1' self.volume.create_volume(self.context, volume.id, volume=volume) self.volume.delete_consistencygroup(self.context, group) cg = objects.ConsistencyGroup.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) self.assertRaises(exception.NotFound, objects.ConsistencyGroup.get_by_id, self.context, group.id) self.assertTrue(mock_del_cg.called) @mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup', return_value={'status': 'available'}) def test_delete_consistencygroup_wrong_host(self, *_mock_create_cg): """Test consistencygroup cannot be deleted. Test consistencygroup cannot be deleted when volumes in the group are not local to the volume node. """ group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') volume = tests_utils.create_volume( self.context, consistencygroup_id=group.id, host='host1@backend1#pool1', status='creating', size=1) self.volume.host = 'host1@backend2' self.volume.create_volume(self.context, volume.id, volume=volume) self.assertRaises(exception.InvalidVolume, self.volume.delete_consistencygroup, self.context, group) cg = objects.ConsistencyGroup.get_by_id(self.context, group.id) # Group is not deleted self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status) def test_create_volume_with_consistencygroup_invalid_type(self): """Test volume creation with ConsistencyGroup & invalid volume type.""" vol_type = db.volume_type_create( context.get_admin_context(), dict(name=conf_fixture.def_vol_type, extra_specs={}) ) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) cg = { 'id': '1', 'name': 'cg1', 'volume_type_id': db_vol_type['id'], } fake_type = { 'id': '9999', 'name': 'fake', } vol_api = cinder.volume.api.API() # Volume type must be provided when creating a volume in a # consistency group. self.assertRaises(exception.InvalidInput, vol_api.create, self.context, 1, 'vol1', 'volume 1', consistencygroup=cg) # Volume type must be valid. self.assertRaises(exception.InvalidInput, vol_api.create, self.context, 1, 'vol1', 'volume 1', volume_type=fake_type, consistencygroup=cg) @mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') def test_get_capabilities(self, mock_init_vendor, mock_get_volume_stats): stats = { 'volume_backend_name': 'lvm', 'vendor_name': 'Open Source', 'storage_protocol': 'iSCSI', 'vendor_prefix': 'abcd' } expected = stats.copy() expected['properties'] = { 'compression': { 'title': 'Compression', 'description': 'Enables compression.', 'type': 'boolean'}, 'qos': { 'title': 'QoS', 'description': 'Enables QoS.', 'type': 'boolean'}, 'replication': { 'title': 'Replication', 'description': 'Enables replication.', 'type': 'boolean'}, 'thin_provisioning': { 'title': 'Thin Provisioning', 'description': 'Sets thin provisioning.', 'type': 'boolean'}, } # Test to get updated capabilities discover = True mock_get_volume_stats.return_value = stats mock_init_vendor.return_value = ({}, None) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) mock_get_volume_stats.assert_called_once_with(True) # Test to get existing original capabilities mock_get_volume_stats.reset_mock() discover = False capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) self.assertFalse(mock_get_volume_stats.called) # Normal test case to get vendor unique capabilities def init_vendor_properties(self): properties = {} self._set_property( properties, "abcd:minIOPS", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer", minimum=10, default=100) return properties, 'abcd' expected['properties'].update( {'abcd:minIOPS': { 'title': 'Minimum IOPS QoS', 'description': 'Sets minimum IOPS if QoS is enabled.', 'type': 'integer', 'minimum': 10, 'default': 100}}) mock_get_volume_stats.reset_mock() mock_init_vendor.reset_mock() discover = True mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) self.assertTrue(mock_get_volume_stats.called) @mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') def test_get_capabilities_prefix_error(self, mock_init_standard, mock_init_vendor, mock_get_volume_stats): # Error test case: propety does not match vendor prefix def init_vendor_properties(self): properties = {} self._set_property( properties, "aaa:minIOPS", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer") self._set_property( properties, "abcd:compression_type", "Compression type", "Specifies compression type.", "string") return properties, 'abcd' expected = { 'abcd:compression_type': { 'title': 'Compression type', 'description': 'Specifies compression type.', 'type': 'string'}} discover = True mock_get_volume_stats.return_value = {} mock_init_standard.return_value = {} mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities['properties']) @mock.patch.object(fake_driver.FakeISCSIDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') def test_get_capabilities_fail_override(self, mock_init_standard, mock_init_vendor, mock_get_volume_stats): # Error test case: propety cannot override any standard capabilities def init_vendor_properties(self): properties = {} self._set_property( properties, "qos", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer") self._set_property( properties, "ab::cd:compression_type", "Compression type", "Specifies compression type.", "string") return properties, 'ab::cd' expected = { 'ab__cd:compression_type': { 'title': 'Compression type', 'description': 'Specifies compression type.', 'type': 'string'}} discover = True mock_get_volume_stats.return_value = {} mock_init_standard.return_value = {} mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities['properties']) def test_delete_encryptied_volume(self): self.volume_params['status'] = 'active' volume = tests_utils.create_volume(self.context, **self.volume_params) vol_api = cinder.volume.api.API() with mock.patch.object( vol_api.key_manager, 'delete_key', side_effect=Exception): self.assertRaises(exception.InvalidVolume, vol_api.delete, self.context, volume) @mock.patch.object(driver.BaseVD, 'get_backup_device') @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_get_backup_device(self, mock_secure, mock_get_backup): vol = tests_utils.create_volume(self.context) backup = tests_utils.create_backup(self.context, vol['id']) mock_secure.return_value = False mock_get_backup.return_value = (vol, False) result = self.volume.get_backup_device(self.context, backup) mock_get_backup.assert_called_once_with(self.context, backup) mock_secure.assert_called_once_with() expected_result = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False} self.assertEqual(expected_result, result) @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_secure_file_operations_enabled(self, mock_secure): mock_secure.return_value = True vol = tests_utils.create_volume(self.context) result = self.volume.secure_file_operations_enabled(self.context, vol) mock_secure.assert_called_once_with() self.assertTrue(result) class CopyVolumeToImageTestCase(BaseVolumeTestCase): def fake_local_path(self, volume): return self.dst_path def setUp(self): super(CopyVolumeToImageTestCase, self).setUp() self.dst_fd, self.dst_path = tempfile.mkstemp() self.addCleanup(os.unlink, self.dst_path) os.close(self.dst_fd) self.stubs.Set(self.volume.driver, 'local_path', self.fake_local_path) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.image_meta = { 'id': self.image_id, 'container_format': 'bare', 'disk_format': 'raw' } self.volume_id = 1 self.addCleanup(db.volume_destroy, self.context, self.volume_id) self.volume_attrs = { 'id': self.volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'host': 'dummy' } def test_copy_volume_to_image_status_available(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_instance_deleted(self): # During uploading volume to image if instance is deleted, # volume should be in available status. self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' # Creating volume testdata self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \ '45b1161abb02' db.volume_create(self.context, self.volume_attrs) # Storing unmocked db api function reference here, because we have to # update volume status (set instance_uuid to None) before calling the # 'volume_update_status_based_on_attached_instance_id' db api. unmocked_db_api = db.volume_update_status_based_on_attachment def mock_volume_update_after_upload(context, volume_id): # First update volume and set 'instance_uuid' to None # because after deleting instance, instance_uuid of volume is # set to None db.volume_update(context, volume_id, {'instance_uuid': None}) # Calling unmocked db api unmocked_db_api(context, volume_id) with mock.patch.object( db, 'volume_update_status_based_on_attachment', side_effect=mock_volume_update_after_upload) as mock_update: # Start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) # Check 'volume_update_status_after_copy_volume_to_image' # is called 1 time self.assertEqual(1, mock_update.call_count) # Check volume status has changed to available because # instance is deleted volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_status_use(self): self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' # creating volume testdata db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_exception(self): self.image_meta['id'] = self.FAKE_UUID # creating volume testdata self.volume_attrs['status'] = 'in-use' db.volume_create(self.context, self.volume_attrs) # start test self.assertRaises(exception.ImageNotFound, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_driver_not_initialized(self): # creating volume testdata db.volume_create(self.context, self.volume_attrs) # set initialized to False self.volume.driver._initialized = False # start test self.assertRaises(exception.DriverNotInitialized, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume.status) def test_copy_volume_to_image_driver_exception(self): self.image_meta['id'] = self.image_id image_service = fake_image.FakeImageService() # create new image in queued state queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9' queued_image_meta = image_service.show(self.context, self.image_id) queued_image_meta['id'] = queued_image_id queued_image_meta['status'] = 'queued' image_service.create(self.context, queued_image_meta) # create new image in saving state saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' saving_image_meta = image_service.show(self.context, self.image_id) saving_image_meta['id'] = saving_image_id saving_image_meta['status'] = 'saving' image_service.create(self.context, saving_image_meta) # create volume self.volume_attrs['status'] = 'available' self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) with mock.patch.object(self.volume.driver, 'copy_volume_to_image') as driver_copy_mock: driver_copy_mock.side_effect = exception.VolumeDriverException( "Error") # test with image not in queued state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # image shouldn't be deleted if it is not in queued state image_service.show(self.context, self.image_id) # test with image in queued state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, queued_image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # queued image should be deleted self.assertRaises(exception.ImageNotFound, image_service.show, self.context, queued_image_id) # test with image in saving state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, saving_image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # image in saving state should be deleted self.assertRaises(exception.ImageNotFound, image_service.show, self.context, saving_image_id) @test.testtools.skip('SKIP BUG #1173266') @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(vol_manager.VolumeManager, 'create_volume') @mock.patch.object(fake_driver.FakeISCSIDriver, 'copy_volume_to_image') def _test_copy_volume_to_image_with_image_volume( self, mock_copy, mock_create, mock_quota_commit, mock_quota_reserve): self.flags(glance_api_version=2) self.volume.driver.configuration.image_upload_use_cinder_backend = True image_service = fake_image.FakeImageService() image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' self.image_meta['id'] = image_id self.image_meta['status'] = 'queued' image_service.create(self.context, self.image_meta) # creating volume testdata self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) def fake_create(context, volume_id, **kwargs): db.volume_update(context, volume_id, {'status': 'available'}) mock_create.side_effect = fake_create # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # return create image image = image_service.show(self.context, image_id) image_service.delete(self.context, image_id) return image def test_copy_volume_to_image_with_image_volume(self): image = self._test_copy_volume_to_image_with_image_volume() self.assertTrue(image['locations'][0]['url'].startswith('cinder://')) def test_copy_volume_to_image_with_image_volume_qcow2(self): self.image_meta['disk_format'] = 'qcow2' image = self._test_copy_volume_to_image_with_image_volume() self.assertIsNone(image.get('locations')) @test.testtools.skip('SKIP BUG #1173266') @mock.patch.object(vol_manager.VolumeManager, 'delete_volume') @mock.patch.object(fake_image._FakeImageService, 'add_location', side_effect=exception.Invalid) def test_copy_volume_to_image_with_image_volume_failure( self, mock_add_location, mock_delete): image = self._test_copy_volume_to_image_with_image_volume() self.assertIsNone(image.get('locations')) self.assertTrue(mock_delete.called) class GetActiveByWindowTestCase(BaseVolumeTestCase): def setUp(self): super(GetActiveByWindowTestCase, self).setUp() self.ctx = context.get_admin_context(read_deleted="yes") self.db_attrs = [ { 'id': fake.volume_id, 'host': 'devstack', 'project_id': fake.project_id, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), }, { 'id': fake.volume2_id, 'host': 'devstack', 'project_id': fake.project_id, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), }, { 'id': fake.volume3_id, 'host': 'devstack', 'project_id': fake.project_id, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), }, { 'id': fake.volume4_id, 'host': 'devstack', 'project_id': fake.project_id, 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), }, { 'id': fake.volume5_id, 'host': 'devstack', 'project_id': fake.project_id, 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), } ] def test_volume_get_active_by_window(self): # Find all all volumes valid within a timeframe window. # Not in window db.volume_create(self.ctx, self.db_attrs[0]) # In - deleted in window db.volume_create(self.ctx, self.db_attrs[1]) # In - deleted after window db.volume_create(self.ctx, self.db_attrs[2]) # In - created in window db.volume_create(self.context, self.db_attrs[3]) # Not of window. db.volume_create(self.context, self.db_attrs[4]) volumes = db.volume_get_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1), project_id=fake.project_id) self.assertEqual(3, len(volumes)) self.assertEqual(fake.volume2_id, volumes[0].id) self.assertEqual(fake.volume3_id, volumes[1].id) self.assertEqual(fake.volume4_id, volumes[2].id) def test_snapshot_get_active_by_window(self): # Find all all snapshots valid within a timeframe window. db.volume_create(self.context, {'id': fake.volume_id}) for i in range(5): self.db_attrs[i]['volume_id'] = fake.volume_id # Not in window del self.db_attrs[0]['id'] snap1 = objects.Snapshot(self.ctx, **self.db_attrs[0]) snap1.create() # In - deleted in window del self.db_attrs[1]['id'] snap2 = objects.Snapshot(self.ctx, **self.db_attrs[1]) snap2.create() # In - deleted after window del self.db_attrs[2]['id'] snap3 = objects.Snapshot(self.ctx, **self.db_attrs[2]) snap3.create() # In - created in window del self.db_attrs[3]['id'] snap4 = objects.Snapshot(self.ctx, **self.db_attrs[3]) snap4.create() # Not of window. del self.db_attrs[4]['id'] snap5 = objects.Snapshot(self.ctx, **self.db_attrs[4]) snap5.create() snapshots = objects.SnapshotList.get_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1)).objects self.assertEqual(3, len(snapshots)) self.assertEqual(snap2.id, snapshots[0].id) self.assertEqual(fake.volume_id, snapshots[0].volume_id) self.assertEqual(snap3.id, snapshots[1].id) self.assertEqual(fake.volume_id, snapshots[1].volume_id) self.assertEqual(snap4.id, snapshots[2].id) self.assertEqual(fake.volume_id, snapshots[2].volume_id) class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" driver_name = "cinder.volume.driver.FakeBaseDriver" def setUp(self): super(DriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) self.volume = importutils.import_object(CONF.volume_manager) self.context = context.get_admin_context() self.output = "" self.configuration = conf.Configuration(None) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None exec_patcher = mock.patch.object(self.volume.driver, '_execute', _fake_execute) exec_patcher.start() self.addCleanup(exec_patcher.stop) self.volume.driver.set_initialized() self.addCleanup(self._cleanup) def _cleanup(self): try: shutil.rmtree(CONF.volumes_dir) except OSError: pass def _attach_volume(self): """Attach volumes to an instance.""" return [] def _detach_volume(self, volume_id_list): """Detach volumes from an instance.""" for volume_id in volume_id_list: db.volume_detached(self.context, volume_id) self.volume.delete_volume(self.context, volume_id) class GenericVolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver.""" driver_name = "cinder.tests.unit.fake_driver.LoggingVolumeDriver" @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume_available(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() self.volume.driver.create_snapshot = mock.MagicMock() self.volume.driver.delete_snapshot = mock.MagicMock() mock_volume_get.return_value = vol mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info, vol self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume_inuse_temp_volume(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') temp_vol = tests_utils.create_volume(self.context) self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() self.volume.driver._create_temp_cloned_volume = mock.MagicMock() self.volume.driver._delete_temp_volume = mock.MagicMock() mock_volume_get.return_value = vol self.volume.driver._create_temp_cloned_volume.return_value = temp_vol mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info, vol self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) self.volume.driver._create_temp_cloned_volume.assert_called_once_with( self.context, vol) self.volume.driver._delete_temp_volume.assert_called_once_with( self.context, temp_vol) @mock.patch.object(cinder.volume.driver.VolumeDriver, 'backup_use_temp_snapshot', return_value=True) @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector.LocalConnector, 'connect_volume') @mock.patch.object(os_brick.initiator.connector.LocalConnector, 'check_valid_device', return_value=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties', return_value={}) @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume_inuse_temp_snapshot(self, mock_volume_get, mock_get_connector_properties, mock_check_device, mock_connect_volume, mock_file_open, mock_temporary_chown, mock_temp_snapshot): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) attach_info = {'device': {'path': '/dev/null'}, 'driver_volume_type': 'LOCAL', 'data': {}} backup_service = mock.Mock() self.volume.driver.terminate_connection_snapshot = mock.MagicMock() self.volume.driver.initialize_connection_snapshot = mock.MagicMock() self.volume.driver.create_snapshot = mock.MagicMock() self.volume.driver.delete_snapshot = mock.MagicMock() self.volume.driver.create_export_snapshot = mock.MagicMock() self.volume.driver.remove_export_snapshot = mock.MagicMock() mock_volume_get.return_value = vol mock_connect_volume.return_value = {'type': 'local', 'path': '/dev/null'} f = mock_file_open.return_value = open('/dev/null', 'rb') self.volume.driver._connect_device backup_service.backup(backup_obj, f, None) self.volume.driver.initialize_connection_snapshot.return_value = ( attach_info) self.volume.driver.create_export_snapshot.return_value = ( {'provider_location': '/dev/null', 'provider_auth': 'xxxxxxxx'}) self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) self.assertTrue(self.volume.driver.create_snapshot.called) self.assertTrue(self.volume.driver.create_export_snapshot.called) self.assertTrue( self.volume.driver.initialize_connection_snapshot.called) self.assertTrue( self.volume.driver.terminate_connection_snapshot.called) self.assertTrue(self.volume.driver.remove_export_snapshot.called) self.assertTrue(self.volume.driver.delete_snapshot.called) @mock.patch.object(utils, 'temporary_chown') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch('six.moves.builtins.open') def test_restore_backup(self, mock_open, mock_get_connector_properties, mock_temporary_chown): dev_null = '/dev/null' vol = tests_utils.create_volume(self.context) backup = {'volume_id': vol['id'], 'id': 'backup-for-%s' % vol['id']} properties = {} attach_info = {'device': {'path': dev_null}} root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' volume_file = mock.MagicMock() mock_open.return_value.__enter__.return_value = volume_file mock_get_connector_properties.return_value = properties self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver._attach_volume.return_value = attach_info, vol self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() self.volume.driver.secure_file_operations_enabled = mock.MagicMock() self.volume.driver.secure_file_operations_enabled.side_effect = (False, True) backup_service = mock.MagicMock() for i in (1, 2): self.volume.driver.restore_backup(self.context, backup, vol, backup_service) mock_get_connector_properties.assert_called_with(root_helper, CONF.my_ip, False, False) self.volume.driver._attach_volume.assert_called_with( self.context, vol, properties) self.assertEqual(i, self.volume.driver._attach_volume.call_count) self.volume.driver._detach_volume.assert_called_with( self.context, attach_info, vol, properties) self.assertEqual(i, self.volume.driver._detach_volume.call_count) self.volume.driver.secure_file_operations_enabled.\ assert_called_with() self.assertEqual( i, self.volume.driver.secure_file_operations_enabled.call_count ) mock_temporary_chown.assert_called_once_with(dev_null) mock_open.assert_called_with(dev_null, 'wb') self.assertEqual(i, mock_open.call_count) backup_service.restore.assert_called_with(backup, vol['id'], volume_file) self.assertEqual(i, backup_service.restore.call_count) def test_get_backup_device_available(self): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) (backup_device, is_snapshot) = self.volume.driver.get_backup_device( self.context, backup_obj) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertEqual(volume, backup_device) self.assertFalse(is_snapshot) backup_obj = objects.Backup.get_by_id(self.context, backup.id) self.assertIsNone(backup.temp_volume_id) def test_get_backup_device_in_use(self): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') temp_vol = tests_utils.create_volume(self.context) self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) with mock.patch.object( self.volume.driver, '_create_temp_cloned_volume') as mock_create_temp: mock_create_temp.return_value = temp_vol (backup_device, is_snapshot) = ( self.volume.driver.get_backup_device(self.context, backup_obj)) self.assertEqual(temp_vol, backup_device) self.assertFalse(is_snapshot) backup_obj = objects.Backup.get_by_id(self.context, backup.id) self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) def test__create_temp_volume_from_snapshot(self): volume_dict = {'id': fake.snapshot_id, 'host': 'fakehost', 'availability_zone': 'fakezone', 'size': 1} vol = fake_volume.fake_volume_obj(self.context, **volume_dict) snapshot = fake_snapshot.fake_snapshot_obj(self.context) with mock.patch.object( self.volume.driver, 'create_volume_from_snapshot'): temp_vol = self.volume.driver._create_temp_volume_from_snapshot( self.context, vol, snapshot) self.assertEqual('detached', temp_vol['attach_status']) self.assertEqual('fakezone', temp_vol['availability_zone']) @mock.patch.object(utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') @mock.patch.object(volutils, 'copy_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') def test_copy_volume_data_mgr(self, mock_get_capabilities, mock_copy, mock_detach, mock_attach, mock_get_connector): """Test function of _copy_volume_data.""" src_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) dest_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) mock_get_connector.return_value = {} self.volume.driver._throttle = mock.MagicMock() attach_expected = [ mock.call(self.context, dest_vol, {}, remote=False), mock.call(self.context, src_vol, {}, remote=False)] detach_expected = [ mock.call(self.context, {'device': {'path': 'bar'}}, dest_vol, {}, force=False, remote=False), mock.call(self.context, {'device': {'path': 'foo'}}, src_vol, {}, force=False, remote=False)] attach_volume_returns = [ {'device': {'path': 'bar'}}, {'device': {'path': 'foo'}} ] # Test case for sparse_copy_volume = False mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False) self.assertEqual(detach_expected, mock_detach.mock_calls) # Test case for sparse_copy_volume = True mock_attach.reset_mock() mock_detach.reset_mock() mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {'sparse_copy_volume': True} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True) self.assertEqual(detach_expected, mock_detach.mock_calls) # cleanup resource db.volume_destroy(self.context, src_vol['id']) db.volume_destroy(self.context, dest_vol['id']) @ddt.ddt class LVMVolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" FAKE_VOLUME = {'name': 'test1', 'id': 'test1'} @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_invalid_parameter(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) self.mox.StubOutWithMock(os.path, 'exists') os.path.exists(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() # Test volume without 'size' field and 'volume_size' field self.assertRaises(exception.InvalidParameterValue, lvm_driver._delete_volume, self.FAKE_VOLUME) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_bad_path(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.volume_type = 'default' volume = dict(self.FAKE_VOLUME, size=1) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) self.mox.StubOutWithMock(os.path, 'exists') os.path.exists(mox.IgnoreArg()).AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.VolumeBackendAPIException, lvm_driver._delete_volume, volume) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_thinlvm_snap(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.lvm_type = 'thin' self.configuration.iscsi_helper = 'tgtadm' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=mox.MockAnything(), db=db) # Ensures that copy_volume is not called for ThinLVM self.mox.StubOutWithMock(volutils, 'copy_volume') self.mox.StubOutWithMock(volutils, 'clear_volume') self.mox.StubOutWithMock(lvm_driver, '_execute') self.mox.ReplayAll() uuid = '00000000-0000-0000-0000-c3aa7ee01536' fake_snapshot = {'name': 'volume-' + uuid, 'id': uuid, 'size': 123} lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) def test_check_for_setup_error(self): def get_all_volume_groups(vg): return [{'name': 'cinder-volumes'}] self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') configuration = conf.Configuration(fake_opt, 'fake_group') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj, db=db) lvm_driver.delete_snapshot = mock.Mock() self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) volume = tests_utils.create_volume(self.context, host=socket.gethostname()) volume_id = volume['id'] backup = {} backup['volume_id'] = volume_id backup['user_id'] = fake.user_id backup['project_id'] = fake.project_id backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = 'test_check_for_setup_error' backup['display_description'] = 'test_check_for_setup_error' backup['container'] = 'fake' backup['status'] = fields.BackupStatus.CREATING backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = None backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 db.backup_create(self.context, backup) lvm_driver.check_for_setup_error() @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.user_id self.context.project_id = fake.project_id backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() mock_volume_get.return_value = vol mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) def test_retype_volume(self): vol = tests_utils.create_volume(self.context) new_type = fake.volume_type_id diff = {} host = 'fake_host' retyped = self.volume.driver.retype(self.context, vol, new_type, diff, host) self.assertTrue(retyped) def test_update_migrated_volume(self): fake_volume_id = fake.volume_id fake_new_volume_id = fake.volume2_id fake_provider = 'fake_provider' original_volume_name = CONF.volume_name_template % fake_volume_id current_name = CONF.volume_name_template % fake_new_volume_id fake_volume = tests_utils.create_volume(self.context) fake_volume['id'] = fake_volume_id fake_new_volume = tests_utils.create_volume(self.context) fake_new_volume['id'] = fake_new_volume_id fake_new_volume['provider_location'] = fake_provider fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') with mock.patch.object(self.volume.driver, 'vg') as vg: vg.return_value = fake_vg vg.rename_volume.return_value = None update = self.volume.driver.update_migrated_volume(self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({'_name_id': None, 'provider_location': None}, update) vg.rename_volume.reset_mock() vg.rename_volume.side_effect = processutils.ProcessExecutionError update = self.volume.driver.update_migrated_volume(self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({'_name_id': fake_new_volume_id, 'provider_location': fake_provider}, update) @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume_inuse(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') self.context.user_id = fake.user_id self.context.project_id = fake.project_id mock_volume_get.return_value = vol temp_snapshot = tests_utils.create_snapshot(self.context, vol['id']) backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() self.volume.driver._create_temp_snapshot = mock.MagicMock() self.volume.driver._delete_temp_snapshot = mock.MagicMock() mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info self.volume.driver._create_temp_snapshot.return_value = temp_snapshot self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) self.volume.driver._create_temp_snapshot.assert_called_once_with( self.context, vol) self.volume.driver._delete_temp_snapshot.assert_called_once_with( self.context, temp_snapshot) def test_create_volume_from_snapshot_none_sparse(self): with mock.patch.object(self.volume.driver, 'vg'), \ mock.patch.object(self.volume.driver, '_create_volume'), \ mock.patch.object(volutils, 'copy_volume') as mock_copy: # Test case for thick LVM src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) self.volume.driver.create_volume_from_snapshot(dst_volume, snapshot_ref) volume_path = self.volume.driver.local_path(dst_volume) snapshot_path = self.volume.driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=self.volume.driver._execute, sparse=False) def test_create_volume_from_snapshot_sparse(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(lvm_driver, 'vg'), \ mock.patch.object(lvm_driver, '_create_volume'), \ mock.patch.object(volutils, 'copy_volume') as mock_copy: # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) volume_path = lvm_driver.local_path(dst_volume) snapshot_path = lvm_driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=lvm_driver._execute, sparse=True) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes', return_value=[]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_no_lvs(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_support(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume') def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): self.configuration.lvm_type = 'thin' fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=fake_vg, db=db) fake_volume = tests_utils.create_volume(self.context, size=1) fake_new_volume = tests_utils.create_volume(self.context, size=2) lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) fake_vg.create_lv_snapshot.assert_called_once_with( fake_new_volume['name'], fake_volume['name'], 'thin') mock_extend.assert_called_once_with(fake_new_volume, 2) fake_vg.activate_lv.assert_called_once_with( fake_new_volume['name'], is_snapshot=True, permanent=True) def test_lvm_migrate_volume_no_loc_info(self): host = {'capabilities': {}} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_bad_loc_info(self): capabilities = {'location_info': 'foo'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_driver(self): capabilities = {'location_info': 'FooDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_host(self): capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_in_use(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) def test_lvm_migrate_volume_same_volume_group(self, vgs): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.migrate_volume, self.context, vol, host) @mock.patch.object(lvm.LVMVolumeDriver, '_create_volume') @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') @mock.patch.object(brick_lvm.LVM, 'delete') @mock.patch.object(volutils, 'copy_volume', side_effect=processutils.ProcessExecutionError) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume, mock_delete, mock_pvs, mock_create): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old', False, None, 'default') self.assertRaises(processutils.ProcessExecutionError, self.volume.driver.migrate_volume, self.context, vol, host) mock_delete.assert_called_once_with(vol) def test_lvm_volume_group_missing(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-3:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} def get_all_volume_groups(): return [{'name': 'cinder-volumes-2'}] self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_proceed(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(self.volume.driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(self.volume.driver, '_delete_volume'): self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') moved, model_update = \ self.volume.driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=False) def test_lvm_migrate_volume_proceed_with_thin(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(lvm_driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(lvm_driver, '_delete_volume'): lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver._sparse_copy_volume = True moved, model_update = \ lvm_driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=True) @staticmethod def _get_manage_existing_lvs(name): """Helper method used by the manage_existing tests below.""" lvs = [{'name': 'fake_lv', 'size': '1.75'}, {'name': 'fake_lv_bad_size', 'size': 'Not a float'}] for lv in lvs: if lv['name'] == name: return lv def _setup_stubs_for_manage_existing(self): """Helper to set up common stubs for the manage_existing tests.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.stubs.Set(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs) @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) def test_lvm_manage_existing_not_found(self, mock_vol_get): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': 'fake_lv'} vol = {'name': vol_name, 'id': fake.volume_id, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_lvm_manage_existing_already_managed(self, mock_conf): self._setup_stubs_for_manage_existing() mock_conf.volume_name_template = 'volume-%s' vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': vol_name} vol = {'name': 'test', 'id': 1, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): self.assertRaises(exception.ManageExistingAlreadyManaged, self.volume.driver.manage_existing, vol, ref) def test_lvm_manage_existing(self): """Good pass on managing an LVM volume. This test case ensures that, when a logical volume with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} vol = {'name': 'test', 'id': fake.volume_id, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(vol['name'], new_name) self.stubs.Set(self.volume.driver.vg, 'rename_volume', _rename_volume) size = self.volume.driver.manage_existing_get_size(vol, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} vol = {'name': 'test', 'id': fake.volume_id, 'size': 2} self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_bad_ref(self): """Error case where specified LV doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a volume that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'} self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_snapshot(self): """Good pass on managing an LVM snapshot. This test case ensures that, when a logical volume's snapshot with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing_snapshot, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} snp = {'name': 'test', 'id': fake.snapshot_id, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(snp['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as \ mock_rename_volume: mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_snapshot_get_size(snp, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing_snapshot(snp, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_snapshot_bad_ref(self): """Error case where specified LV snapshot doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a snapshot that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} snp = { 'name': 'test', 'id': fake.snapshot_id, 'size': 0, 'status': 'available', } self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_manage_existing_snapshot_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing_snapshot code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} snp = {'name': 'test', 'id': fake.snapshot_id, 'size': 2} self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_unmanage(self): volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host) ret = self.volume.driver.unmanage(volume) self.assertIsNone(ret) # Global setting, LVM setting, expected outcome @ddt.data((10.0, 2.0, 2.0)) @ddt.data((10.0, None, 10.0)) @ddt.unpack def test_lvm_max_over_subscription_ratio(self, global_value, lvm_value, expected_value): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.max_over_subscription_ratio = global_value configuration.lvm_max_over_subscription_ratio = lvm_value fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=fake_vg, db=db) self.assertEqual(expected_value, lvm_driver.configuration.max_over_subscription_ratio) class ISCSITestCase(DriverTestCase): """Test Case for ISCSIDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" def setUp(self): super(ISCSITestCase, self).setUp() self.configuration = mox.MockObject(conf.Configuration) self.configuration.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' self.configuration.iscsi_ip_address = '0.0.0.0' self.configuration.iscsi_port = 3260 def _attach_volume(self): """Attach volumes to an instance.""" volume_id_list = [] for index in range(3): vol = {} vol['size'] = 0 vol_ref = db.volume_create(self.context, vol) self.volume.create_volume(self.context, vol_ref['id']) vol_ref = db.volume_get(self.context, vol_ref['id']) # each volume has a different mountpoint mountpoint = "/dev/sd" + chr((ord('b') + index)) instance_uuid = '12345678-1234-5678-1234-567812345678' db.volume_attached(self.context, vol_ref['id'], instance_uuid, mountpoint) volume_id_list.append(vol_ref['id']) return volume_id_list def test_do_iscsi_discovery(self): self.configuration = conf.Configuration(None) iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm( configuration=self.configuration) utils.execute = lambda *a, **kw: \ ("%s dummy" % CONF.iscsi_ip_address, '') volume = {"name": "dummy", "host": "0.0.0.0", "id": "12345678-1234-5678-1234-567812345678"} iscsi_driver._do_iscsi_discovery(volume) def test_get_iscsi_properties(self): volume = {"provider_location": '', "id": "0", "provider_auth": "a b c", "attached_mode": "rw"} iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0" result = iscsi_driver._get_iscsi_properties(volume) self.assertEqual("0.0.0.0:0000", result["target_portal"]) self.assertEqual("iqn:iqn", result["target_iqn"]) self.assertEqual(0, result["target_lun"]) def test_get_iscsi_properties_multiple_portals(self): volume = {"provider_location": '1.1.1.1:3260;2.2.2.2:3261,1 iqn:iqn 0', "id": "0", "provider_auth": "a b c", "attached_mode": "rw"} iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) result = iscsi_driver._get_iscsi_properties(volume) self.assertEqual("1.1.1.1:3260", result["target_portal"]) self.assertEqual("iqn:iqn", result["target_iqn"]) self.assertEqual(0, result["target_lun"]) self.assertEqual(["1.1.1.1:3260", "2.2.2.2:3261"], result["target_portals"]) self.assertEqual(["iqn:iqn", "iqn:iqn"], result["target_iqns"]) self.assertEqual([0, 0], result["target_luns"]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', return_value=(2, 2, 100)) def test_get_volume_stats(self, _mock_get_version): def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] @staticmethod def _fake_get_all_volume_groups(root_helper, vg_name=None): return [{'name': 'cinder-volumes', 'size': '5.52', 'available': '0.52', 'lv_count': '2', 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] def _fake_get_volumes(obj, lv_name=None): return [{'vg': 'fake_vg', 'name': 'fake_vol', 'size': '1000'}] self.stubs.Set(brick_lvm.LVM, 'get_all_volume_groups', _fake_get_all_volume_groups) self.stubs.Set(brick_lvm.LVM, 'get_all_physical_volumes', _fake_get_all_physical_volumes) self.stubs.Set(brick_lvm.LVM, 'get_volumes', _fake_get_volumes) self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') self.volume.driver._update_volume_stats() stats = self.volume.driver._stats self.assertEqual( float('5.52'), stats['pools'][0]['total_capacity_gb']) self.assertEqual( float('0.52'), stats['pools'][0]['free_capacity_gb']) self.assertEqual( float('5.0'), stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual( int('1'), stats['pools'][0]['total_volumes']) self.assertFalse(stats['sparse_copy_volume']) # Check value of sparse_copy_volume for thin enabled case. # This value is set in check_for_setup_error. self.configuration = conf.Configuration(None) self.configuration.lvm_type = 'thin' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db, vg_obj=vg_obj) lvm_driver.check_for_setup_error() lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') lvm_driver._update_volume_stats() stats = lvm_driver._stats self.assertTrue(stats['sparse_copy_volume']) def test_validate_connector(self): iscsi_driver =\ cinder.volume.targets.tgt.TgtAdm( configuration=self.configuration) # Validate a valid connector connector = {'ip': '10.0.0.2', 'host': 'fakehost', 'initiator': 'iqn.2012-07.org.fake:01'} iscsi_driver.validate_connector(connector) # Validate a connector without the initiator connector = {'ip': '10.0.0.2', 'host': 'fakehost'} self.assertRaises(exception.InvalidConnectorException, iscsi_driver.validate_connector, connector) class FibreChannelTestCase(DriverTestCase): """Test Case for FibreChannelDriver.""" driver_name = "cinder.volume.driver.FibreChannelDriver" def test_initialize_connection(self): self.assertRaises(NotImplementedError, self.volume.driver.initialize_connection, {}, {}) def test_validate_connector(self): """validate_connector() successful use case. validate_connector() does not throw an exception when wwpns and wwnns are both set and both are not empty. """ connector = {'wwpns': ["not empty"], 'wwnns': ["not empty"]} self.volume.driver.validate_connector(connector) def test_validate_connector_no_wwpns(self): """validate_connector() throws exception when it has no wwpns.""" connector = {'wwnns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_empty_wwpns(self): """validate_connector() throws exception when it has empty wwpns.""" connector = {'wwpns': [], 'wwnns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_no_wwnns(self): """validate_connector() throws exception when it has no wwnns.""" connector = {'wwpns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_empty_wwnns(self): """validate_connector() throws exception when it has empty wwnns.""" connector = {'wwnns': [], 'wwpns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) class VolumePolicyTestCase(test.TestCase): def setUp(self): super(VolumePolicyTestCase, self).setUp() cinder.policy.init() self.context = context.get_admin_context() def test_check_policy(self): self.mox.StubOutWithMock(cinder.policy, 'enforce') target = { 'project_id': self.context.project_id, 'user_id': self.context.user_id, } cinder.policy.enforce(self.context, 'volume:attach', target) self.mox.ReplayAll() cinder.volume.api.check_policy(self.context, 'attach') def test_check_policy_with_target(self): self.mox.StubOutWithMock(cinder.policy, 'enforce') target = { 'project_id': self.context.project_id, 'user_id': self.context.user_id, 'id': 2, } cinder.policy.enforce(self.context, 'volume:attach', target) self.mox.ReplayAll() cinder.volume.api.check_policy(self.context, 'attach', {'id': 2}) class ImageVolumeCacheTestCase(BaseVolumeTestCase): def setUp(self): super(ImageVolumeCacheTestCase, self).setUp() self.volume.driver.set_initialized() @mock.patch('oslo_utils.importutils.import_object') def test_cache_configs(self, mock_import_object): opts = { 'image_volume_cache_enabled': True, 'image_volume_cache_max_size_gb': 100, 'image_volume_cache_max_count': 20 } def conf_get(option): if option in opts: return opts[option] else: return None mock_driver = mock.Mock() mock_driver.configuration.safe_get.side_effect = conf_get mock_driver.configuration.extra_capabilities = 'null' def import_obj(*args, **kwargs): return mock_driver mock_import_object.side_effect = import_obj manager = vol_manager.VolumeManager(volume_driver=mock_driver) self.assertIsNotNone(manager) self.assertIsNotNone(manager.image_volume_cache) self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb) self.assertEqual(20, manager.image_volume_cache.max_cache_size_count) def test_delete_image_volume(self): volume_params = { 'status': 'creating', 'host': 'some_host', 'size': 1 } volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, **volume_params) volume.status = 'available' volume.save() image_id = '70a599e0-31e7-49b7-b260-868f441e862b' db.image_volume_cache_create(self.context, volume['host'], image_id, datetime.datetime.utcnow(), volume['id'], volume['size']) volume_api.delete(self.context, volume) entry = db.image_volume_cache_get_by_volume_id(self.context, volume['id']) self.assertIsNone(entry) def test_delete_volume_with_keymanager_exception(self): volume_params = { 'host': 'some_host', 'size': 1 } volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, **volume_params) with mock.patch.object( volume_api.key_manager, 'delete_key') as key_del_mock: key_del_mock.side_effect = Exception("Key not found") volume_api.delete(self.context, volume) @ddt.ddt class DiscardFlagTestCase(BaseVolumeTestCase): def setUp(self): super(DiscardFlagTestCase, self).setUp() self.volume.driver = mock.MagicMock() self.mock_db = mock.MagicMock() self.volume.db = self.mock_db @ddt.data(dict(config_discard_flag=True, driver_discard_flag=None, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=None, expected_flag=None), dict(config_discard_flag=True, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=False, expected_flag=False), dict(config_discard_flag=None, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=None, driver_discard_flag=False, expected_flag=False)) @ddt.unpack def test_initialize_connection_discard_flag(self, config_discard_flag, driver_discard_flag, expected_flag): volume_properties = {'volume_type_id': None} def _get_item(key): return volume_properties[key] mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = _get_item self.mock_db.volume_get.return_value = mock_volume self.mock_db.volume_update.return_value = mock_volume self.volume.driver.create_export.return_value = None connector = {'ip': 'IP', 'initiator': 'INITIATOR'} conn_info = { 'driver_volume_type': 'iscsi', 'data': {'access_mode': 'rw', 'encrypted': False} } if driver_discard_flag is not None: conn_info['data']['discard'] = driver_discard_flag self.volume.driver.initialize_connection.return_value = conn_info def _safe_get(key): if key is 'report_discard_supported': return config_discard_flag else: return None self.volume.driver.configuration.safe_get.side_effect = _safe_get conn_info = self.volume.initialize_connection(self.context, 'id', connector) self.assertEqual(expected_flag, conn_info['data'].get('discard')) cinder-8.0.0/cinder/tests/unit/test_emc_xtremio.py0000664000567000056710000010224312701406250023434 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2014 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock import six from cinder import exception from cinder import test from cinder.tests.unit import fake_consistencygroup as fake_cg from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.volume.drivers.emc import xtremio typ2id = {'volumes': 'vol-id', 'snapshots': 'vol-id', 'initiators': 'initiator-id', 'initiator-groups': 'ig-id', 'lun-maps': 'mapping-id', 'consistency-groups': 'cg-id', 'consistency-group-volumes': 'cg-vol-id', } xms_data = {'xms': {1: {'version': '4.0.0'}}, 'clusters': {1: {'name': 'brick1', 'sys-sw-version': "4.0.0-devel_ba23ee5381eeab73", 'ud-ssd-space': '8146708710', 'ud-ssd-space-in-use': '708710', 'vol-size': '29884416', 'chap-authentication-mode': 'disabled', 'chap-discovery-mode': 'disabled', "index": 1, }, }, 'target-groups': {'Default': {"index": 1, "name": "Default"}, }, 'iscsi-portals': {'10.205.68.5/16': {"port-address": "iqn.2008-05.com.xtremio:001e67939c34", "ip-port": 3260, "ip-addr": "10.205.68.5/16", "name": "10.205.68.5/16", "index": 1, }, }, 'targets': {'X1-SC2-fc1': {'index': 1, "name": "X1-SC2-fc1", "port-address": "21:00:00:24:ff:57:b2:36", 'port-state': 'up', }, 'X1-SC2-fc2': {'index': 2, "name": "X1-SC2-fc2", "port-address": "21:00:00:24:ff:57:b2:55", 'port-state': 'up', } }, 'volumes': {}, 'initiator-groups': {}, 'initiators': {}, 'lun-maps': {}, 'consistency-groups': {}, 'consistency-group-volumes': {}, } xms_filters = { 'eq': lambda x, y: x == y, 'ne': lambda x, y: x != y, 'gt': lambda x, y: x > y, 'ge': lambda x, y: x >= y, 'lt': lambda x, y: x < y, 'le': lambda x, y: x <= y, } def get_xms_obj_by_name(typ, name): for item in xms_data[typ].values(): if 'name' in item and item['name'] == name: return item raise exception.NotFound() def clean_xms_data(): xms_data['volumes'] = {} xms_data['initiator-groups'] = {} xms_data['initiators'] = {} xms_data['lun-maps'] = {} xms_data['consistency-group-volumes'] = {} xms_data['consistency-groups'] = {} def fix_data(data, object_type): d = {} for key, value in data.items(): if 'name' in key: key = 'name' d[key] = value if object_type == 'lun-maps': d['lun'] = 1 vol_idx = get_xms_obj_by_name('volumes', data['vol-id'])['index'] ig_idx = get_xms_obj_by_name('initiator-groups', data['ig-id'])['index'] d['name'] = '_'.join([six.text_type(vol_idx), six.text_type(ig_idx), '1']) d[typ2id[object_type]] = ["a91e8c81c2d14ae4865187ce4f866f8a", d.get('name'), len(xms_data.get(object_type, [])) + 1] d['index'] = len(xms_data[object_type]) + 1 return d def get_xms_obj_key(data): for key in data.keys(): if 'name' in key: return key def get_obj(typ, name, idx): if name: return {"content": get_xms_obj_by_name(typ, name)} elif idx: if idx not in xms_data.get(typ, {}): raise exception.NotFound() return {"content": xms_data[typ][idx]} def xms_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if object_type == 'snapshots': object_type = 'volumes' try: res = xms_data[object_type] except KeyError: raise exception.VolumeDriverException if method == 'GET': if name or idx: return get_obj(object_type, name, idx) else: if data and data.get('full') == 1: filter_term = data.get('filter') if not filter_term: entities = list(res.values()) else: field, oper, value = filter_term.split(':', 2) comp = xms_filters[oper] entities = [o for o in res.values() if comp(o.get(field), value)] return {object_type: entities} else: return {object_type: [{"href": "/%s/%d" % (object_type, obj['index']), "name": obj.get('name')} for obj in res.values()]} elif method == 'POST': data = fix_data(data, object_type) name_key = get_xms_obj_key(data) try: if name_key and get_xms_obj_by_name(object_type, data[name_key]): raise (exception .VolumeBackendAPIException ('Volume by this name already exists')) except exception.NotFound: pass data['index'] = len(xms_data[object_type]) + 1 xms_data[object_type][data['index']] = data # find the name key if name_key: data['name'] = data[name_key] if object_type == 'lun-maps': data['ig-name'] = data['ig-id'] return {"links": [{"href": "/%s/%d" % (object_type, data[typ2id[object_type]][2])}]} elif method == 'DELETE': if object_type == 'consistency-group-volumes': data = [cgv for cgv in xms_data['consistency-group-volumes'].values() if cgv['vol-id'] == data['vol-id'] and cgv['cg-id'] == data['cg-id']][0] else: data = get_obj(object_type, name, idx)['content'] if data: del xms_data[object_type][data['index']] else: raise exception.NotFound() elif method == 'PUT': obj = get_obj(object_type, name, idx)['content'] data = fix_data(data, object_type) del data['index'] obj.update(data) def xms_bad_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if method == 'GET': raise exception.NotFound() elif method == 'POST': raise exception.VolumeBackendAPIException('Failed to create ig') def xms_failed_rename_snapshot_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if method == 'POST': xms_data['volumes'][27] = {} return { "links": [ { "href": "https://host/api/json/v2/types/snapshots/27", "rel": "self"}]} elif method == 'PUT': raise exception.VolumeBackendAPIException(data='Failed to delete') elif method == 'DELETE': del xms_data['volumes'][27] class D(dict): def update(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) return dict.update(self, *args, **kwargs) class CommonData(object): connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["123456789012345", "123456789054321"], 'wwnns': ["223456789012345", "223456789054321"], 'host': 'fakehost', } test_volume = {'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0001', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', } test_snapshot = D() test_snapshot.update({'name': 'snapshot1', 'size': 1, 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0002', 'volume_name': 'vol-vol1', 'volume_id': '192eb39b-6c2f-420c-bae3-3cfd117f0001', 'project_id': 'project', 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', }) test_snapshot.__dict__.update(test_snapshot) test_volume2 = {'name': 'vol2', 'size': 1, 'volume_name': 'vol2', 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0004', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol2', 'display_description': 'test volume 2', 'volume_type_id': None, 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', } test_clone = {'name': 'clone1', 'size': 1, 'volume_name': 'vol3', 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0003', 'provider_auth': None, 'project_id': 'project', 'display_name': 'clone1', 'display_description': 'volume created from snapshot', 'volume_type_id': None, 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', } unmanaged1 = {'id': 'unmanaged1', 'name': 'unmanaged1', 'size': 3, } context = {'user': 'admin', } group = {'id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', 'name': 'cg1', 'status': 'OK', } cgsnapshot = mock.Mock(id='192eb39b-6c2f-420c-bae3-3cfd117f9876', consistencygroup_id=group['id']) def cgsnap_getitem(self, val): return self.__dict__[val] cgsnapshot.__getitem__ = cgsnap_getitem @mock.patch('cinder.volume.drivers.emc.xtremio.XtremIOClient.req') class EMCXIODriverISCSITestCase(test.TestCase): def setUp(self): super(EMCXIODriverISCSITestCase, self).setUp() clean_xms_data() config = mock.Mock() config.san_login = '' config.san_password = '' config.san_ip = '' config.xtremio_cluster_name = 'brick1' config.xtremio_provisioning_factor = 20.0 config.max_over_subscription_ratio = 20.0 config.xtremio_volumes_per_glance_cache = 100 def safe_get(key): return getattr(config, key) config.safe_get = safe_get self.driver = xtremio.XtremIOISCSIDriver(configuration=config) self.driver.client = xtremio.XtremIOClient4(config, config .xtremio_cluster_name) self.data = CommonData() def test_check_for_setup_error(self, req): req.side_effect = xms_request clusters = xms_data['clusters'] del xms_data['clusters'] self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) xms_data['clusters'] = clusters self.driver.check_for_setup_error() def test_client4_uses_v2(self, req): def base_req(*args, **kwargs): self.assertIn('v2', args) req.side_effect = base_req self.driver.client.req('volumes') def test_create_extend_delete_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.extend_volume(self.data.test_volume, 5) self.driver.delete_volume(self.data.test_volume) def test_create_delete_snapshot(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_snapshot(self.data.test_snapshot) self.assertEqual(self.data.test_snapshot['id'], xms_data['volumes'][2]['name']) self.driver.delete_snapshot(self.data.test_snapshot) self.driver.delete_volume(self.data.test_volume) def test_failed_rename_snapshot(self, req): req.side_effect = xms_failed_rename_snapshot_request self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.data.test_snapshot) self.assertEqual(0, len(xms_data['volumes'])) def test_volume_from_snapshot(self, req): req.side_effect = xms_request xms_data['volumes'] = {} self.driver.create_volume(self.data.test_volume) self.driver.create_snapshot(self.data.test_snapshot) self.driver.create_volume_from_snapshot(self.data.test_volume2, self.data.test_snapshot) self.driver.delete_volume(self.data.test_volume2) self.driver.delete_volume(self.data.test_snapshot) self.driver.delete_volume(self.data.test_volume) def test_clone_volume(self, req): req.side_effect = xms_request self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() self.driver.create_volume(self.data.test_volume) vol = xms_data['volumes'][1] vol['num-of-dest-snaps'] = 200 self.assertRaises(exception.CinderException, self.driver.create_cloned_volume, self.data.test_clone, self.data.test_volume) vol['num-of-dest-snaps'] = 50 self.driver.create_cloned_volume(self.data.test_clone, self.data.test_volume) self.driver.delete_volume(self.data.test_clone) self.driver.delete_volume(self.data.test_volume) mock.patch.object(self.driver.client, 'create_snapshot', mock.Mock(side_effect= exception.XtremIOSnapshotsLimitExceeded())) self.assertRaises(exception.CinderException, self.driver.create_cloned_volume, self.data.test_clone, self.data.test_volume) response = mock.MagicMock() response.status_code = 400 response.json.return_value = { "message": "too_many_snapshots_per_vol", "error_code": 400 } self.assertRaises(exception.XtremIOSnapshotsLimitExceeded, self.driver.client.handle_errors, response, '', '') response.json.return_value = { "message": "too_many_objs", "error_code": 400 } self.assertRaises(exception.XtremIOSnapshotsLimitExceeded, self.driver.client.handle_errors, response, '', '') def test_duplicate_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_volume) self.driver.delete_volume(self.data.test_volume) def test_no_portals_configured(self, req): req.side_effect = xms_request portals = xms_data['iscsi-portals'].copy() xms_data['iscsi-portals'].clear() lunmap = {'lun': 4} self.assertRaises(exception.VolumeDriverException, self.driver._get_iscsi_properties, lunmap) xms_data['iscsi-portals'] = portals def test_initialize_terminate_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] i1['chap-authentication-initiator-password'] = 'chap_password1' i1['chap-discovery-initiator-password'] = 'chap_password2' map_data = self.driver.initialize_connection(self.data.test_volume2, self.data.connector) self.driver.terminate_connection(self.data.test_volume, self.data.connector) def test_terminate_connection_fail_on_bad_volume(self, req): req.side_effect = xms_request self.assertRaises(exception.NotFound, self.driver.terminate_connection, self.data.test_volume, self.data.connector) def test_get_ig_indexes_from_initiators_called_once(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] self.assertEqual(1, map_data['data']['target_lun']) with mock.patch.object(self.driver, '_get_ig_indexes_from_initiators') as get_idx: get_idx.return_value = [1] self.driver.terminate_connection(self.data.test_volume, self.data.connector) get_idx.assert_called_once_with(self.data.connector) def test_initialize_chap_connection(self, req): req.side_effect = xms_request clean_xms_data() self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertIsNone(map_data['data'].get('access_mode')) c1 = xms_data['clusters'][1] c1['chap-authentication-mode'] = 'initiator' c1['chap-discovery-mode'] = 'initiator' i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] i1['chap-authentication-initiator-password'] = 'chap_password1' i1['chap-discovery-initiator-password'] = 'chap_password2' map_data = self.driver.initialize_connection(self.data.test_volume2, self.data.connector) self.assertEqual('chap_password1', map_data['data']['auth_password']) self.assertEqual('chap_password2', map_data['data']['discovery_auth_password']) self.driver.terminate_connection(self.data.test_volume2, self.data.connector) i1['chap-authentication-initiator-password'] = None i1['chap-discovery-initiator-password'] = None map_data = self.driver.initialize_connection(self.data.test_volume2, self.data.connector) data = {} self.driver._add_auth(data, True, True) self.assertIn('initiator-discovery-user-name', data, 'Missing discovery user in data') self.assertIn('initiator-discovery-password', data, 'Missing discovery password in data') def test_initialize_connection_bad_ig(self, req): req.side_effect = xms_bad_request self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) def test_get_stats(self, req): req.side_effect = xms_request stats = self.driver.get_volume_stats(True) self.assertEqual(self.driver.backend_name, stats['volume_backend_name']) def test_manage_unmanage(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '3', }, } ref_vol = {"source-name": "unmanaged1"} invalid_ref = {"source-name": "invalid"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.data.test_volume, invalid_ref) self.driver.manage_existing_get_size(self.data.test_volume, ref_vol) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.data.test_volume, invalid_ref) self.driver.manage_existing(self.data.test_volume, ref_vol) self.assertRaises(exception.VolumeNotFound, self.driver.unmanage, self.data.test_volume2) self.driver.unmanage(self.data.test_volume) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_operations(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.assertEqual(1, len(xms_data['consistency-groups'])) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.assertEqual(2, len(xms_data['consistency-group-volumes'])) self.driver.update_consistencygroup(d.context, d.group, remove_volumes=[d.test_volume2]) self.assertEqual(1, len(xms_data['consistency-group-volumes'])) self.driver.db = mock.Mock() (self.driver.db. volume_get_all_by_group.return_value) = [mock.MagicMock()] self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) self.assertEqual(snapset_name, '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' '33cfd117f9876') snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], 'consistencygroup_id': d.group['id'], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, []) self.driver.delete_consistencygroup(d.context, d.group, []) xms_data['snapshot-sets'] = {} @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_from_src(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, d.context, d.group, [], None, None, None, None) snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] snapshot_obj.volume_id = d.test_volume['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] ['vol-id']) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} cg_obj = fake_cg.fake_consistencyobject_obj(d.context) new_vol1 = fake_volume.fake_volume_obj(d.context) snapshot1 = (fake_snapshot .fake_snapshot_obj (d.context, volume_id=d.test_volume['id'])) self.driver.create_consistencygroup_from_src(d.context, cg_obj, [new_vol1], d.cgsnapshot, [snapshot1]) new_cg_obj = fake_cg.fake_consistencyobject_obj(d.context, id=5) snapset2_name = new_cg_obj.id new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001' new_vol2 = fake_volume.fake_volume_obj(d.context) snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset2_name, 'index': 1} xms_data['snapshot-sets'].update({5: snapset2, snapset2_name: snapset2}) self.driver.create_consistencygroup_from_src(d.context, new_cg_obj, [new_vol2], None, None, cg_obj, [new_vol1]) @mock.patch('requests.request') class EMCXIODriverTestCase(test.TestCase): def setUp(self): super(EMCXIODriverTestCase, self).setUp() configuration = mock.Mock() configuration.san_login = '' configuration.san_password = '' configuration.san_ip = '' configuration.xtremio_cluster_name = '' configuration.driver_ssl_cert_verify = True configuration.driver_ssl_cert_path = '/test/path/root_ca.crt' def safe_get(key): return getattr(configuration, key) configuration.safe_get = safe_get self.driver = xtremio.XtremIOISCSIDriver(configuration=configuration) self.data = CommonData() @mock.patch.object(time, 'sleep', mock.Mock(return_value=0)) def test_retry_request(self, req): busy_response = mock.MagicMock() busy_response.status_code = 400 busy_response.json.return_value = { "message": "system_is_busy", "error_code": 400 } good_response = mock.MagicMock() good_response.status_code = 200 EMCXIODriverTestCase.req_count = 0 def busy_request(*args, **kwargs): if EMCXIODriverTestCase.req_count < 1: EMCXIODriverTestCase.req_count += 1 return busy_response return good_response req.side_effect = busy_request self.driver.create_volume(self.data.test_volume) def test_verify_cert(self, req): good_response = mock.MagicMock() good_response.status_code = 200 def request_verify_cert(*args, **kwargs): self.assertEqual(kwargs['verify'], '/test/path/root_ca.crt') return good_response req.side_effect = request_verify_cert self.driver.client.req('volumes') @mock.patch('cinder.volume.drivers.emc.xtremio.XtremIOClient.req') class EMCXIODriverFibreChannelTestCase(test.TestCase): def setUp(self): super(EMCXIODriverFibreChannelTestCase, self).setUp() clean_xms_data() self.config = mock.Mock(san_login='', san_password='', san_ip='', xtremio_cluster_name='', xtremio_provisioning_factor=20.0) self.driver = xtremio.XtremIOFibreChannelDriver( configuration=self.config) self.data = CommonData() def test_initialize_terminate_connection(self, req): req.side_effect = xms_request self.driver.client = xtremio.XtremIOClient4( self.config, self.config.xtremio_cluster_name) self.driver.create_volume(self.data.test_volume) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) for i1 in xms_data['initiators'].values(): i1['ig-id'] = ['', i1['ig-id'], 1] self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) def test_initialize_existing_ig_terminate_connection(self, req): req.side_effect = xms_request self.driver.client = xtremio.XtremIOClient4( self.config, self.config.xtremio_cluster_name) self.driver.create_volume(self.data.test_volume) pre_existing = 'pre_existing_host' self.driver._create_ig(pre_existing) wwpns = self.driver._get_initiator_names(self.data.connector) for wwpn in wwpns: data = {'initiator-name': wwpn, 'ig-id': pre_existing, 'port-address': wwpn} self.driver.client.req('initiators', 'POST', data) def get_fake_initiator(wwpn): return {'port-address': wwpn, 'ig-id': ['', pre_existing, 1]} with mock.patch.object(self.driver.client, 'get_initiator', side_effect=get_fake_initiator): map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) self.assertEqual(1, len(xms_data['initiator-groups'])) self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) def test_race_on_terminate_connection(self, req): """Test for race conditions on num_of_mapped_volumes. This test confirms that num_of_mapped_volumes won't break even if we receive a NotFound exception when retrieving info on a specific mapping, as that specific mapping could have been deleted between the request to get the list of exiting mappings and the request to get the info on one of them. """ req.side_effect = xms_request self.driver.client = xtremio.XtremIOClient3( self.config, self.config.xtremio_cluster_name) # We'll wrap num_of_mapped_volumes, we'll store here original method original_method = self.driver.client.num_of_mapped_volumes def fake_num_of_mapped_volumes(*args, **kwargs): # Add a nonexistent mapping mappings = [{'href': 'volumes/1'}, {'href': 'volumes/12'}] # Side effects will be: 1st call returns the list, then we return # data for existing mappings, and on the nonexistent one we added # we return NotFound side_effect = [{'lun-maps': mappings}, {'content': xms_data['lun-maps'][1]}, exception.NotFound] with mock.patch.object(self.driver.client, 'req', side_effect=side_effect): return original_method(*args, **kwargs) self.driver.create_volume(self.data.test_volume) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) with mock.patch.object(self.driver.client, 'num_of_mapped_volumes', side_effect=fake_num_of_mapped_volumes): self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) cinder-8.0.0/cinder/tests/unit/test_dellscapi.py0000664000567000056710000113533112701406250023066 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from requests import models import uuid from cinder import context from cinder import exception from cinder import test from cinder.volume.drivers.dell import dell_storagecenter_api # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @ddt.ddt @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '__init__', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'open_connection') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'close_connection') class DellSCSanAPITestCase(test.TestCase): """DellSCSanAPITestCase Class to test the Storage Center API using Mock. """ SC = {u'IPv6ManagementIPPrefix': 128, u'connectionError': u'', u'instanceId': u'64702', u'scSerialNumber': 64702, u'dataProgressionRunning': False, u'hostOrIpAddress': u'192.168.0.80', u'userConnected': True, u'portsBalanced': True, u'managementIp': u'192.168.0.80', u'version': u'6.5.1.269', u'location': u'', u'objectType': u'StorageCenter', u'instanceName': u'Storage Center 64702', u'statusMessage': u'', u'status': u'Up', u'flashOptimizedConfigured': False, u'connected': True, u'operationMode': u'Normal', u'userName': u'Admin', u'nonFlashOptimizedConfigured': True, u'name': u'Storage Center 64702', u'scName': u'Storage Center 64702', u'notes': u'', u'serialNumber': 64702, u'raidRebalanceRunning': False, u'userPasswordExpired': False, u'contact': u'', u'IPv6ManagementIP': u'::'} VOLUME = {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} VOLUME_LIST = [{u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}] # Volume list that contains multiple volumes VOLUME_LIST_MULTI_VOLS = [ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}, {u'instanceId': u'64702.3495', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3495, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da9', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}] VOLUME_CONFIG = \ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'maximumSiblingCount': 100, u'writeCacheStatus': u'Up', u'objectType': u'ScVolumeConfiguration', u'currentSiblingConfiguredSize': u'2.147483648E9 Bytes', u'compressionPaused': False, u'enforceConsumptionLimit': False, u'volumeSpaceConsumptionLimit': u'2.147483648E9 Bytes', u'readCacheEnabled': True, u'writeCacheEnabled': True, u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'dateModified': u'04/03/2015 12:01:08 AM', u'modifyUser': u'Admin', u'replayExpirationPaused': False, u'currentSiblingCount': 1, u'replayCreationPaused': False, u'replayProfileList': [{u'instanceId': u'64702.2', u'instanceName': u'Daily', u'objectType': u'ScReplayProfile'}], u'dateCreated': u'04/04/2014 03:54:26 AM', u'volume': {u'instanceId': u'64702.3494', u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'objectType': u'ScVolume'}, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'coalesceIntoActive': False, u'createUser': u'Admin', u'importToLowestTier': False, u'readCacheStatus': u'Up', u'maximumSiblingConfiguredSpace': u'5.49755813888E14 Bytes', u'storageProfile': {u'instanceId': u'64702.1', u'instanceName': u'Recommended', u'objectType': u'ScStorageProfile'}, u'scName': u'Storage Center 64702', u'notes': u'', u'diskFolder': {u'instanceId': u'64702.3', u'instanceName': u'Assigned', u'objectType': u'ScDiskFolder'}, u'openVmsUniqueDiskId': 48, u'compressionEnabled': False} INACTIVE_VOLUME = \ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': False, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } # ScServer where deletedAllowed=False (not allowed to be deleted) SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': False, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } SCSERVERS = [{u'scName': u'Storage Center 64702', u'volumeCount': 5, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack4', u'instanceId': u'64702.1', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Up', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack4', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.3', u'instanceName': u'Other Multipath', u'objectType': u'ScServerOperatingSystem'}}, {u'scName': u'Storage Center 64702', u'volumeCount': 1, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack5', u'instanceId': u'64702.2', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Up', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack5', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.2', u'instanceName': u'Other Singlepath', u'objectType': u'ScServerOperatingSystem'}}] # ScServers list where status = Down SCSERVERS_DOWN = \ [{u'scName': u'Storage Center 64702', u'volumeCount': 5, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack4', u'instanceId': u'64702.1', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Down', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack4', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.3', u'instanceName': u'Other Multipath', u'objectType': u'ScServerOperatingSystem'}}] MAP_PROFILE = {u'instanceId': u'64702.2941', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'lunUsed': [1], u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'connectivity': u'Up', u'readOnly': False, u'objectType': u'ScMappingProfile', u'hostCache': False, u'mappedVia': u'Server', u'mapCount': 3, u'instanceName': u'6025-47', u'lunRequested': u'N/A'} MAP_PROFILES = [MAP_PROFILE] MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] # Multiple mappings to test find_iscsi_properties with multiple portals MAPPINGS_MULTI_PORTAL = \ [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] MAPPINGS_READ_ONLY = \ [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': True, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7639.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218607', u'instanceName': u'21000024FF30441C', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.73', u'instanceName': u'21000024FF30441C-5000D31000FCBE36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000D31000FCBE36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7639', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7640.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.78', u'instanceName': u'21000024FF30441D-5000D31000FCBE36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000D31000FCBE36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7640', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7638.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.28.76', u'instanceName': u'21000024FF30441D-5000D31000FCBE3E', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736126.60', u'instanceName': u'5000D31000FCBE3E', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7638', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}] FC_MAPPINGS_LUN_MISMATCH = \ [{u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7639.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218607', u'instanceName': u'21000024FF30441C', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.73', u'instanceName': u'21000024FF30441C-5000D31000FCBE36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000D31000FCBE36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7639', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7640.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.78', u'instanceName': u'21000024FF30441D-5000D31000FCBE36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000D31000FCBE36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7640', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7638.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 2, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.28.76', u'instanceName': u'21000024FF30441D-5000D31000FCBE3E', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736126.60', u'instanceName': u'5000D31000FCBE3E', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7638', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}] RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-46-250', u'description': u'Cinder Clone Replay', u'parent': {u'instanceId': u'64702.46.249', u'instanceName': u'64702-46-249', u'objectType': u'ScReplay'}, u'instanceId': u'64702.46.250', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'12/09/2014 03:52:08 PM', u'createVolume': {u'instanceId': u'64702.46', u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'objectType': u'ScVolume'}, u'expireTime': u'12/09/2014 04:52:08 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7910, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'12/09/2014 03:52:08 PM', u'size': u'0.0 Bytes' } RPLAYS = [{u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-5', u'description': u'Manually Created', u'parent': {u'instanceId': u'64702.6025.4', u'instanceName': u'64702-6025-4', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.5', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:55 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:55 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7889, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:55 PM', u'size': u'0.0 Bytes'}, {u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-4', u'description': u'Cinder Test Replay012345678910', u'parent': {u'instanceId': u'64702.6025.3', u'instanceName': u'64702-6025-3', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.4', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:47 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:47 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7869, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:47 PM', u'size': u'0.0 Bytes'}] TST_RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-4', u'description': u'Cinder Test Replay012345678910', u'parent': {u'instanceId': u'64702.6025.3', u'instanceName': u'64702-6025-3', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.4', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:47 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:47 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7869, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:47 PM', u'size': u'0.0 Bytes'} FLDR = {u'status': u'Up', u'instanceName': u'opnstktst', u'name': u'opnstktst', u'parent': {u'instanceId': u'64702.0', u'instanceName': u'Volumes', u'objectType': u'ScVolumeFolder'}, u'instanceId': u'64702.43', u'scName': u'Storage Center 64702', u'notes': u'Folder for OpenStack Cinder Driver', u'scSerialNumber': 64702, u'parentIndex': 0, u'okToDelete': True, u'folderPath': u'', u'root': False, u'statusMessage': u'', u'objectType': u'ScVolumeFolder'} SVR_FLDR = {u'status': u'Up', u'instanceName': u'devstacksrv', u'name': u'devstacksrv', u'parent': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'instanceId': u'64702.4', u'scName': u'Storage Center 64702', u'notes': u'Folder for OpenStack Cinder Driver', u'scSerialNumber': 64702, u'parentIndex': 0, u'okToDelete': False, u'folderPath': u'', u'root': False, u'statusMessage': u'', u'objectType': u'ScServerFolder'} ISCSI_HBA = {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 1, u'name': u'iqn.1993-08.org.debian:01:52332b70525', u'connectivity': u'Down', u'instanceId': u'64702.3786433166', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.38', u'instanceName': u'Server_iqn.1993-08.org.debian:01:52332b70525', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'Iscsi', u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525', u'objectType': u'ScServerHba'} FC_HBAS = [{u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 2, u'name': u'21000024FF30441C', u'connectivity': u'Up', u'instanceId': u'64702.3282218607', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024FF30441C', u'objectType': u'ScServerHba'}, {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 3, u'name': u'21000024FF30441D', u'connectivity': u'Partial', u'instanceId': u'64702.3282218606', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'}] FC_HBA = {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 3, u'name': u'21000024FF30441D', u'connectivity': u'Partial', u'instanceId': u'64702.3282218606', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024FF30441D', u'objectType': u'ScServerHba'} SVR_OS_S = [{u'allowsLunGaps': True, u'product': u'Red Hat Linux', u'supportsActiveMappingDeletion': True, u'version': u'6.x', u'requiresLunZero': False, u'scName': u'Storage Center 64702', u'virtualMachineGuest': True, u'virtualMachineHost': False, u'allowsCrossTransportMapping': False, u'objectType': u'ScServerOperatingSystem', u'instanceId': u'64702.38', u'lunCanVaryAcrossPaths': False, u'scSerialNumber': 64702, u'maximumVolumeSize': u'0.0 Bytes', u'multipath': True, u'instanceName': u'Red Hat Linux 6.x', u'supportsActiveMappingCreation': True, u'name': u'Red Hat Linux 6.x'}] ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}] # For testing find_iscsi_properties where multiple portals are found ISCSI_FLT_DOMAINS_MULTI_PORTALS = \ [{u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}, {u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.25', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}] ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'} CTRLR_PORT = {u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'WWN': u'5000D31000FCBE06', u'name': u'5000D31000FCBE06', u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736070.51', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': False, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.4.3', u'instanceName': u'Domain 1', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE06', u'statusMessage': u'', u'objectType': u'ScControllerPort'} ISCSI_CTRLR_PORT = {u'preferredParent': {u'instanceId': u'64702.5764839588723736074.69', u'instanceName': u'5000D31000FCBE0A', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'10.23.8.235', u'WWN': u'5000D31000FCBE43', u'name': u'5000D31000FCBE43', u'parent': {u'instanceId': u'64702.5764839588723736074.69', u'instanceName': u'5000D31000FCBE0A', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736131.91', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'Iscsi', u'virtual': True, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.6.5', u'instanceName': u'iSCSI 10G 2', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE43', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} FC_CTRLR_PORT = {u'preferredParent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'WWN': u'5000D31000FCBE36', u'name': u'5000D31000FCBE36', u'parent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736118.50', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': True, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.1.0', u'instanceName': u'Domain 0', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE36', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} FC_CTRLR_PORT_WWN_ERROR = \ {u'preferredParent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'Wwn': u'5000D31000FCBE36', u'name': u'5000D31000FCBE36', u'parent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736118.50', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': True, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.1.0', u'instanceName': u'Domain 0', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE36', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes', u'freeSpace': u'1.297659461632E13 Bytes', u'oversubscribedSpace': u'0.0 Bytes', u'instanceId': u'64702', u'scName': u'Storage Center 64702', u'savingVsRaidTen': u'1.13737990144E11 Bytes', u'allocatedSpace': u'1.66791217152E12 Bytes', u'usedSpace': u'3.25716017152E11 Bytes', u'configuredSpace': u'9.155796533248E12 Bytes', u'alertThresholdSpace': u'1.197207956992E13 Bytes', u'availableSpace': u'1.3302310633472E13 Bytes', u'badSpace': u'0.0 Bytes', u'time': u'02/02/2015 02:23:39 PM', u'scSerialNumber': 64702, u'instanceName': u'Storage Center 64702', u'storageAlertThreshold': 10, u'objectType': u'StorageCenterStorageUsage'} RPLAY_PROFILE = {u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'type': u'Consistent', u'notes': u'Created by Dell Cinder Driver', u'volumeCount': 0, u'expireIncompleteReplaySets': True, u'replayCreationTimeout': 20, u'enforceReplayCreationTimeout': False, u'ruleCount': 0, u'userCreated': True, u'scSerialNumber': 64702, u'scName': u'Storage Center 64702', u'objectType': u'ScReplayProfile', u'instanceId': u'64702.11', u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} STORAGE_PROFILE_LIST = [ {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 1, u'instanceId': u'64158.1', u'instanceName': u'Recommended', u'name': u'Recommended', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 1, Tier 2, Tier 3', u'useTier1Storage': True, u'useTier2Storage': True, u'useTier3Storage': True, u'userCreated': False, u'volumeCount': 125}, {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 2, u'instanceId': u'64158.2', u'instanceName': u'High Priority', u'name': u'High Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 1', u'useTier1Storage': True, u'useTier2Storage': False, u'useTier3Storage': False, u'userCreated': False, u'volumeCount': 0}, {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 3, u'instanceId': u'64158.3', u'instanceName': u'Medium Priority', u'name': u'Medium Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 2', u'useTier1Storage': False, u'useTier2Storage': True, u'useTier3Storage': False, u'userCreated': False, u'volumeCount': 0}, {u'allowedForFlashOptimized': True, u'allowedForNonFlashOptimized': True, u'index': 4, u'instanceId': u'64158.4', u'instanceName': u'Low Priority', u'name': u'Low Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 3', u'useTier1Storage': False, u'useTier2Storage': False, u'useTier3Storage': True, u'userCreated': False, u'volumeCount': 0}] CGS = [{u'profile': {u'instanceId': u'65690.4', u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', u'objectType': u'ScReplayProfile'}, u'scSerialNumber': 65690, u'globalIndex': u'65690-4-2', u'description': u'GUID1-0869559e-6881-454e-ba18-15c6726d33c1', u'instanceId': u'65690.65690.4.2', u'scName': u'Storage Center 65690', u'expires': False, u'freezeTime': u'2015-09-28T14:00:59-05:00', u'expireTime': u'1969-12-31T18:00:00-06:00', u'expectedReplayCount': 2, u'writesHeldDuration': 19809, u'replayCount': 2, u'instanceName': u'Name1', u'objectType': u'ScReplayConsistencyGroup'}, {u'profile': {u'instanceId': u'65690.4', u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', u'objectType': u'ScReplayProfile'}, u'scSerialNumber': 65690, u'globalIndex': u'65690-4-3', u'description': u'GUID2-0869559e-6881-454e-ba18-15c6726d33c1', u'instanceId': u'65690.65690.4.3', u'scName': u'Storage Center 65690', u'expires': False, u'freezeTime': u'2015-09-28T14:00:59-05:00', u'expireTime': u'1969-12-31T18:00:00-06:00', u'expectedReplayCount': 2, u'writesHeldDuration': 19809, u'replayCount': 2, u'instanceName': u'Name2', u'objectType': u'ScReplayConsistencyGroup'} ] ISCSI_CONFIG = { u'initialReadyToTransfer': True, u'scSerialNumber': 64065, u'macAddress': u'00c0dd-1da173', u'instanceId': u'64065.5764839588723573038.6', u'vlanTagging': False, u'mapCount': 8, u'cardModel': u'Qle4062', u'portNumber': 3260, u'firstBurstSize': 256, u'deviceName': u'PCIDEV09', u'subnetMask': u'255.255.255.0', u'speed': u'1 Gbps', u'maximumVlanCount': 0, u'gatewayIpAddress': u'192.168.0.1', u'slot': 4, u'sfpData': u'', u'dataDigest': False, u'chapEnabled': False, u'firmwareVersion': u'03.00.01.77', u'preferredControllerIndex': 64066, u'defaultTimeToRetain': 20, u'objectType': u'ScControllerPortIscsiConfiguration', u'instanceName': u'5000d31000FCBE43', u'scName': u'sc64065', u'revision': u'0', u'controllerPortIndex': 5764839588723573038, u'maxBurstSize': 512, u'targetCount': 20, u'description': u'QLogic QLE4062 iSCSI Adapter Rev 0 Copper', u'vlanSupported': True, u'chapName': u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'windowSize': 128, u'vlanId': 0, u'defaultTimeToWait': 2, u'headerDigest': False, u'slotPort': 2, u'immediateDataWrite': False, u'storageCenterTargetCount': 20, u'vlanCount': 0, u'scsiCommandTimeout': 60, u'slotType': u'PCI4', u'ipAddress': u'192.168.0.21', u'vlanUserPriority': 0, u'bothCount': 0, u'initiatorCount': 33, u'keepAliveTimeout': 30, u'homeControllerIndex': 64066, u'chapSecret': u'', u'maximumTransmissionUnit': 1500} SCQOS = {u'linkSpeed': u'1 Gbps', u'numberDevices': 1, u'bandwidthLimited': False, u'name': u'Cinder QoS', u'instanceId': u'64702.2', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'instanceName': u'Cinder QoS', u'advancedSettings': {u'globalMaxSectorPerIo': 512, u'destinationMaxSectorCount': 65536, u'queuePassMaxSectorCount': 65536, u'destinationMaxIoCount': 18, u'globalMaxIoCount': 32, u'queuePassMaxIoCount': 8}, u'objectType': u'ScReplicationQosNode'} SCREPL = [{u'destinationVolume': {u'instanceId': u'65495.167', u'instanceName': u'Cinder repl of abcd9' u'5b2-1284-4cf0-a397-9' u'70fa6c68092', u'objectType': u'ScVolume'}, u'instanceId': u'64702.9', u'scSerialNumber': 64702, u'syncStatus': u'NotApplicable', u'objectType': u'ScReplication', u'sourceStorageCenter': {u'instanceId': u'64702', u'instanceName': u'Storage Center ' '64702', u'objectType': u'StorageCenter'}, u'secondaryTransportTypes': [], u'dedup': False, u'state': u'Up', u'replicateActiveReplay': False, u'qosNode': {u'instanceId': u'64702.2', u'instanceName': u'Cinder QoS', u'objectType': u'ScReplicationQosNode'}, u'sourceVolume': {u'instanceId': u'64702.13108', u'instanceName': u'abcd95b2-1284-4cf0-a397-' u'970fa6c68092', u'objectType': u'ScVolume'}, u'type': u'Asynchronous', u'statusMessage': u'', u'status': u'Up', u'syncMode': u'None', u'stateMessage': u'', u'managedByLiveVolume': False, u'destinationScSerialNumber': 65495, u'pauseAllowed': True, u'instanceName': u"Replication of 'abcd95b2-1284-4cf0-" u"a397-970fa6c68092'", u'simulation': False, u'transportTypes': [u'FibreChannel'], u'replicateStorageToLowestTier': True, u'scName': u'Storage Center 64702', u'destinationStorageCenter': {u'instanceId': u'65495', u'instanceName': u'Storage Center' u' 65495', u'objectType': u'StorageCenter'}}] IQN = 'iqn.2002-03.com.compellent:5000D31000000001' WWN = u'21000024FF30441C' WWNS = [u'21000024FF30441C', u'21000024FF30441D'] # Used to test finding no match in find_wwns WWNS_NO_MATCH = [u'21000024FF30451C', u'21000024FF30451D'] FLDR_PATH = 'StorageCenter/ScVolumeFolder/' # Create a Response object that indicates OK response_ok = models.Response() response_ok.status_code = 200 response_ok.reason = u'ok' RESPONSE_200 = response_ok # Create a Response object that indicates created response_created = models.Response() response_created.status_code = 201 response_created.reason = u'created' RESPONSE_201 = response_created # Create a Response object that can indicate a failure. Although # 204 can be a success with no return. (Know your calls!) response_nc = models.Response() response_nc.status_code = 204 response_nc.reason = u'duplicate' RESPONSE_204 = response_nc # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 400 response_bad.reason = u'bad request' RESPONSE_400 = response_bad def setUp(self): super(DellSCSanAPITestCase, self).setUp() # Configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' # Note that we set this to True even though we do not # test this functionality. This is sent directly to # the requests calls as the verify parameter and as # that is a third party library deeply stubbed out is # not directly testable by this code. Note that in the # case that this fails the driver fails to even come # up. self.configuration.dell_sc_verify_cert = True self.configuration.dell_sc_api_port = 3033 self.configuration.iscsi_ip_address = '192.168.1.1' self.configuration.iscsi_port = 3260 self._context = context.get_admin_context() self.apiversion = '2.0' # Set up the StorageCenterApi self.scapi = dell_storagecenter_api.StorageCenterApi( self.configuration.san_ip, self.configuration.dell_sc_api_port, self.configuration.san_login, self.configuration.san_password, self.configuration.dell_sc_verify_cert, self.apiversion) # Set up the scapi configuration vars self.scapi.ssn = self.configuration.dell_sc_ssn self.scapi.sfname = self.configuration.dell_sc_server_folder self.scapi.vfname = self.configuration.dell_sc_volume_folder # Note that we set this to True (or not) on the replication tests. self.scapi.failed_over = False self.volid = str(uuid.uuid4()) self.volume_name = "volume" + self.volid def test_path_to_array(self, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._path_to_array(u'folder1/folder2/folder3') expected = [u'folder1', u'folder2', u'folder3'] self.assertEqual(expected, res, 'Unexpected folder path') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_result', return_value=SC) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc(self, mock_get, mock_get_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_sc() mock_get.assert_called_once_with('StorageCenter/StorageCenter') self.assertTrue(mock_get_result.called) self.assertEqual(u'64702', res, 'Unexpected SSN') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_result', return_value=None) def test_find_sc_failure(self, mock_get_result, mock_get, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_sc) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_folder(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', '', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_folder_with_parent(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where parent folder name is specified res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', 'parentFolder', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_folder_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', '', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Test Create folder - None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path(self, mock_path_to_array, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_create_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path_create_fldr(self, mock_path_to_array, mock_find_folder, mock_create_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where folder is not found and must be created res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertTrue(mock_create_folder.called) self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_create_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path_failure(self, mock_path_to_array, mock_find_folder, mock_create_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where folder is not found, must be created # and creation fails res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertTrue(mock_create_folder.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_result', return_value=u'devstackvol/fcvm/') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_folder( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_post.called) self.assertTrue(mock_get_result.called) self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_result', return_value=u'devstackvol/fcvm/') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder_multi_fldr(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): # Test case for folder path with multiple folders res = self.scapi._find_folder( 'StorageCenter/ScVolumeFolder', u'testParentFolder/opnstktst') self.assertTrue(mock_post.called) self.assertTrue(mock_get_result.called) self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_folder_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_folder( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Test find folder - None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) def test_find_volume_folder_fail(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none res = self.scapi._find_volume_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=FLDR) def test_find_volume_folder(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_volume_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_storage_profile_fail(self, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none res = self.scapi._find_storage_profile("Blah") self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_storage_profile_none(self, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_storage_profile returns none res = self.scapi._find_storage_profile(None) self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @ddt.data('HighPriority', 'highpriority', 'High Priority') def test_find_storage_profile(self, value, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_storage_profile(value) self.assertIsNotNone(res, 'Expected matching storage profile!') self.assertEqual(self.STORAGE_PROFILE_LIST[1]['instanceId'], res.get('instanceId')) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_create_folder_path', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) def test_find_volume_folder_create_folder(self, mock_find_folder, mock_create_folder_path, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none and folder must be # created res = self.scapi._find_volume_folder( True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCSERVERS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_init_volume(self, mock_post, mock_get_json, mock_map_volume, mock_unmap_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): self.scapi._init_volume(self.VOLUME) self.assertTrue(mock_map_volume.called) self.assertTrue(mock_unmap_volume.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_init_volume_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where ScServer list fails self.scapi._init_volume(self.VOLUME) self.assertTrue(mock_post.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCSERVERS_DOWN) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_init_volume_servers_down(self, mock_post, mock_get_json, mock_map_volume, mock_unmap_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where ScServer Status = Down self.scapi._init_volume(self.VOLUME) self.assertFalse(mock_map_volume.called) self.assertFalse(mock_unmap_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_storage_profile', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) def test_create_volume_storage_profile_missing(self, mock_find_volume_folder, mock_find_storage_profile, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_volume, self.volume_name, 1, 'Blah') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_storage_profile', return_value=STORAGE_PROFILE_LIST[0]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume_storage_profile(self, mock_post, mock_find_volume_folder, mock_find_storage_profile, mock_get_json, mock_close_connection, mock_open_connection, mock_init): self.scapi.create_volume( self.volume_name, 1, 'Recommended') actual = mock_post.call_args[0][1]['StorageProfile'] expected = self.STORAGE_PROFILE_LIST[0]['instanceId'] self.assertEqual(expected, actual) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume_retry_find(self, mock_post, mock_find_volume_folder, mock_get_json, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where find_volume is used to do a retry of finding the # created volume res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertTrue(mock_find_volume.called) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_vol_folder_fail(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test calling create_volume where volume folder does not exist and # fails to be created res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_volume_failure(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_volume( self.volume_name, 1) mock_find_volume_folder.assert_called_once_with(True) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME_LIST) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test__get_volume_list_enforce_vol_fldr(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume in the configured volume folder res = self.scapi._get_volume_list(self.volume_name, None, True) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME_LIST) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test__get_volume_list_any_fldr(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume anywhere in the configured SC res = self.scapi._get_volume_list(self.volume_name, None, False) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') def test_get_volume_list_no_name_no_id(self, mock_close_connection, mock_open_connection, mock_init): # Test case specified volume name is None and device id is None. res = self.scapi._get_volume_list(None, None, True) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test__get_volume_list_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume in the configured volume folder res = self.scapi._get_volume_list(self.volume_name, None, True) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=VOLUME_LIST) def test_find_volume(self, mock_get_vol_list, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume by name res = self.scapi.find_volume(self.volume_name) self.assertTrue(mock_get_vol_list.called) self.assertEqual(self.VOLUME, res, 'Unexpected volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=None) def test_find_volume_no_name(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test calling find_volume with no name or instanceid res = self.scapi.find_volume(None) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list') def test_find_volume_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test calling find_volume with result of no volume found mock_get_volume_list.side_effect = [[], []] res = self.scapi.find_volume(self.volume_name) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_import_one', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list') def test_find_volume_complete_replication(self, mock_get_volume_list, mock_import_one, mock_close_connection, mock_open_connection, mock_init): self.scapi.failed_over = True mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST] res = self.scapi.find_volume(self.volume_name) self.assertEqual(self.VOLUME, res, 'Unexpected volume') self.scapi.failed_over = False @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_import_one', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list') def test_find_volume_complete_replication_fail(self, mock_get_volume_list, mock_import_one, mock_close_connection, mock_open_connection, mock_init): self.scapi.failed_over = True mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST] self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_volume, self.volume_name) self.scapi.failed_over = False @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list') def test_find_volume_complete_replication_multi(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test case where multiple repl volumes are found. mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST_MULTI_VOLS] self.scapi.failed_over = True self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_volume, self.volume_name) self.scapi.failed_over = False @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=VOLUME_LIST_MULTI_VOLS) def test_find_volume_multi_vols_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test case where multiple volumes are found self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_volume, self.volume_name) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=True) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_delete_volume(self, mock_find_volume, mock_delete, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_volume(self.volume_name) self.assertTrue(mock_delete.called) mock_find_volume.assert_called_once_with(self.volume_name) self.assertTrue(mock_get_json.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_delete_volume_failure(self, mock_find_volume, mock_delete, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.delete_volume, self.volume_name) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_delete_volume_no_vol_found(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where volume to be deleted does not exist res = self.scapi.delete_volume(self.volume_name) self.assertTrue(res, 'Expected True') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=SVR_FLDR) def test_find_server_folder(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_server_folder(False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_server_folder) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_create_folder_path', return_value=SVR_FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) def test_find_server_folder_create_folder(self, mock_find_folder, mock_create_folder_path, mock_close_connection, mock_open_connection, mock_init): # Test case where specified server folder is not found and must be # created res = self.scapi._find_server_folder(True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_server_folder) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_folder', return_value=None) def test_find_server_folder_fail(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_server_folder returns none res = self.scapi._find_server_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_add_hba(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._add_hba(self.SCSERVER, self.IQN, False) self.assertTrue(mock_post.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_add_hba_fc(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._add_hba(self.SCSERVER, self.WWN, True) self.assertTrue(mock_post.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_add_hba_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._add_hba(self.SCSERVER, self.IQN, False) self.assertTrue(mock_post.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SVR_OS_S) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serveros(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_serveros('Red Hat Linux 6.x') self.assertTrue(mock_get_json.called) self.assertTrue(mock_post.called) self.assertEqual('64702.38', res, 'Wrong InstanceId') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SVR_OS_S) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serveros_not_found(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test requesting a Server OS that will not be found res = self.scapi._find_serveros('Non existent OS') self.assertTrue(mock_get_json.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_serveros_failed(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_serveros('Red Hat Linux 6.x') self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=FC_HBA) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) def test_create_server_multiple_hbas(self, mock_create_server, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server_multiple_hbas( self.WWNS) self.assertTrue(mock_create_server.called) self.assertTrue(mock_add_hba.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value='64702.38') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server( self.IQN, False) self.assertTrue(mock_find_serveros.called) self.assertTrue(mock_find_server_folder.called) self.assertTrue(mock_first_result.called) self.assertTrue(mock_add_hba.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_os_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server( self.IQN, False) self.assertTrue(mock_find_serveros.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value='64702.38') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_fldr_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server( self.IQN, False) self.assertTrue(mock_find_server_folder.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value='64702.38') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_server_failure(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server( self.IQN, False) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value='64702.38') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): # Test create server where _first_result is None res = self.scapi.create_server( self.IQN, False) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_delete_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_hba', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serveros', return_value='64702.38') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_addhba_fail(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_delete_server, mock_close_connection, mock_open_connection, mock_init): # Tests create server where add hba fails res = self.scapi.create_server( self.IQN, False) self.assertTrue(mock_delete_server.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serverhba', return_value=ISCSI_HBA) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_server(self, mock_post, mock_find_serverhba, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertTrue(mock_first_result.called) self.assertIsNotNone(res, 'Expected ScServer') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serverhba', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_server_no_hba(self, mock_post, mock_find_serverhba, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer HBA does not exist with the specified IQN # or WWN res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_serverhba', return_value=ISCSI_HBA) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_server_failure(self, mock_post, mock_find_serverhba, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer does not exist with the specified # ScServerHba res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=ISCSI_HBA) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serverhba(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_server(self.IQN) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertIsNotNone(res, 'Expected ScServerHba') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_serverhba_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer does not exist with the specified # ScServerHba res = self.scapi.find_server(self.IQN) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_domains(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_domains(u'64702.5764839588723736074.69') self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual( self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_domains_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScControllerPort FaultDomainList fails res = self.scapi._find_domains(u'64702.5764839588723736074.69') self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=FC_HBAS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_fc_initiators(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_fc_initiators(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertIsNotNone(res, 'Expected WWN list') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_fc_initiators_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScServer HbaList fails res = self.scapi._find_fc_initiators(self.SCSERVER) self.assertListEqual([], res, 'Expected empty list') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_volume_count(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_volume_count_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScServer MappingList fails res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertEqual(-1, res, 'Mapping count not -1') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_volume_count_no_volumes(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(len([]), res, 'Mapping count mismatch') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings_inactive_vol(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test getting volume mappings on inactive volume res = self.scapi._find_mappings(self.INACTIVE_VOLUME) self.assertFalse(mock_get.called) self.assertEqual([], res, 'No mappings expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_mappings_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScVolume MappingList fails res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertEqual([], res, 'Mapping count not empty') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings_no_mappings(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual([], res, 'Mapping count mismatch') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=MAP_PROFILES) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mapping_profiles(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mapping_profiles(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.MAP_PROFILES, res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_mapping_profiles_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mapping_profiles(self.VOLUME) self.assertTrue(mock_get.called) self.assertEqual([], res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_controller_port(self, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_controller_port(u'64702.5764839588723736070.51') self.assertTrue(mock_get.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_controller_port_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScVolume MappingList fails res = self.scapi._find_controller_port(self.VOLUME) self.assertTrue(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=WWNS) def test_find_wwns(self, mock_find_fc_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) # The _find_controller_port is Mocked, so all mapping pairs # will have the same WWN for the ScControllerPort itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'], u'21000024FF30441D': [u'5000D31000FCBE36', u'5000D31000FCBE36']} self.assertEqual(1, lun, 'Incorrect LUN') self.assertIsNotNone(wwns, 'WWNs is None') self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=FC_HBAS) def test_find_wwns_no_mappings(self, mock_find_fc_initiators, mock_find_mappings, mock_close_connection, mock_open_connection, mock_init): # Test case where there are no ScMapping(s) lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=WWNS) def test_find_wwns_no_ctlr_port(self, mock_find_fc_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): # Test case where ScControllerPort is none lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=FC_CTRLR_PORT_WWN_ERROR) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=WWNS) def test_find_wwns_wwn_error(self, mock_find_fc_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): # Test case where ScControllerPort object has WWn instead of wwn for a # property lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=WWNS_NO_MATCH) # Test case where HBA name is not found in list of initiators def test_find_wwns_hbaname_not_found(self, mock_find_fc_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=FC_MAPPINGS_LUN_MISMATCH) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_fc_initiators', return_value=WWNS) # Test case where FC mappings contain a LUN mismatch def test_find_wwns_lun_mismatch(self, mock_find_fc_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_fc_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) # The _find_controller_port is Mocked, so all mapping pairs # will have the same WWN for the ScControllerPort itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'], u'21000024FF30441D': [u'5000D31000FCBE36', u'5000D31000FCBE36']} self.assertEqual(1, lun, 'Incorrect LUN') self.assertIsNotNone(wwns, 'WWNs is None') self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=VOLUME_CONFIG) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_active_controller(self, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_active_controller(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_first_result.called) self.assertEqual('64702.64703', res, 'Unexpected Active Controller') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_active_controller_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScVolume MappingList fails res = self.scapi._find_active_controller(self.VOLUME) self.assertTrue(mock_get.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_mappings(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_by_address(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case to find iSCSI mappings by IP Address & port res = self.scapi.find_iscsi_properties( self.VOLUME, '192.168.0.21', 3260) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_by_address_not_found( self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_ctrl, mock_close_connection, mock_open_connection, mock_init): # Test case to find iSCSI mappings by IP Address & port are not found res = self.scapi.find_iscsi_properties( self.VOLUME, '192.168.1.21', 3260) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_ctrl.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[]) def test_find_iscsi_properties_no_mapping(self, mock_find_mappings, mock_close_connection, mock_open_connection, mock_init): # Test case where there are no ScMapping(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME) self.assertTrue(mock_find_mappings.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_no_domain(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are no ScFaultDomain(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_no_ctrl_port(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are no ScFaultDomain(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS_READ_ONLY) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_ro(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where Read Only mappings are found res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_multi_portals(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are multiple portals res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1, 1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.25:3260', u'192.168.0.21:3260', u'192.168.0.25:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_mappings_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=None) def test_find_iscsi_properties_mappings_legacy_no_iscsi_config( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_by_address_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case to find iSCSI mappings by IP Address & port res = self.scapi.find_iscsi_properties( self.VOLUME, '192.168.0.21', 3260) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_by_address_not_found_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_ctrl, mock_close_connection, mock_open_connection, mock_init): # Test case to find iSCSI mappings by IP Address & port are not found res = self.scapi.find_iscsi_properties( self.VOLUME, '192.168.1.21', 3260) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_ctrl.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS_READ_ONLY) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_ro_legacy(self, mock_find_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where Read Only mappings are found res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_find_iscsi_config.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_multi_portals_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are multiple portals res = self.scapi.find_iscsi_properties(self.VOLUME) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) # Since we're feeding the same info back multiple times the information # will be duped. expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=[]) def test_map_volume(self, mock_find_mapping_profiles, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_map_volume_existing_mapping(self, mock_find_mappings, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mappings.called) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=[]) def test_map_volume_existing_mapping_not_us(self, mock_find_mappings, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): server = {'instanceId': 64702.48} res = self.scapi.map_volume(self.VOLUME, server) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') def test_map_volume_no_vol_id(self, mock_post, mock_first_result, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId is None mock_get_id.side_effect = [None, '64702.47'] res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') def test_map_volume_no_server_id(self, mock_post, mock_first_result, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId is None mock_get_id.side_effect = ['64702.3494', None] res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=[]) def test_map_volume_failure(self, mock_find_mapping_profiles, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where mapping volume to server fails res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value={'result': True}) def test_unmap_volume(self, mock_get_json, mock_find_mapping_profiles, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_204) def test_unmap_volume_failure(self, mock_delete, mock_find_mapping_profiles, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=[]) def test_unmap_volume_no_map_profile(self, mock_find_mapping_profiles, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_204) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_del_fail(self, mock_find_mapping_profiles, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertFalse(res, False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_no_vol_id(self, mock_find_mapping_profiles, mock_delete, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId = None mock_get_id.side_effect = [None, '64702.47'] res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_find_mapping_profiles.called) self.assertFalse(mock_delete.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_no_server_id(self, mock_find_mapping_profiles, mock_delete, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId = None mock_get_id.side_effect = ['64702.3494', None] res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_find_mapping_profiles.called) self.assertFalse(mock_delete.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[{'a': 1}, {'a': 2}]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_controller_port_iscsi_config(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Not much to test here. Just make sure we call our stuff and # that we return the first item returned to us. res = self.scapi._find_controller_port_iscsi_config('guid') self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual({'a': 1}, res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_controller_port_iscsi_config_err(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_controller_port_iscsi_config('guid') self.assertTrue(mock_get.called) self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=STRG_USAGE) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_storage_usage(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_storage_usage() self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_get_storage_usage_no_ssn(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where SSN is none self.scapi.ssn = None res = self.scapi.get_storage_usage() self.scapi.ssn = 12345 self.assertFalse(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) # Test case where get of Storage Usage fails def test_get_storage_usage_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_storage_usage() self.assertTrue(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=RPLAY) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=RPLAY) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_init_volume') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_inact_vol(self, mock_post, mock_init_volume, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where the specified volume is inactive res = self.scapi.create_replay(self.INACTIVE_VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=RPLAY) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_no_expire(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 0) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_no_volume(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where no ScVolume is specified res = self.scapi.create_replay(None, 'Test Replay', 60) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_replay_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where create ScReplay fails res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=RPLAYS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_replay(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_replay_no_replays(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where no replays are found res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_replay_failure(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where None is returned for replays res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value=RPLAYS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_replay(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertTrue(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertTrue(res, 'Expected True') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_replay_no_replay(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): # Test case where specified ScReplay does not exist replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertFalse(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertTrue(res, 'Expected True') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value=TST_RPLAY) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_delete_replay_failure(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): # Test case where delete ScReplay results in an error replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertTrue(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertFalse(res, 'Expected False') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume_create_fldr(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where volume folder does not exist and must be created vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume_no_vol_fldr(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where volume folder does not exist and cannot be created vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_view_volume_failure(self, mock_post, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where view volume create fails vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value=RPLAY) def test_create_cloned_volume(self, mock_create_replay, mock_create_view_volume, mock_close_connection, mock_open_connection, mock_init): vol_name = u'Test_create_clone_vol' res = self.scapi.create_cloned_volume( vol_name, self.VOLUME, ['Daily']) mock_create_replay.assert_called_once_with(self.VOLUME, 'Cinder Clone Replay', 60) mock_create_view_volume.assert_called_once_with( vol_name, self.RPLAY, ['Daily']) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay') def test_create_cloned_volume_failure(self, mock_create_replay, mock_create_view_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where create cloned volumes fails because create_replay # fails vol_name = u'Test_create_clone_vol' mock_create_replay.return_value = None res = self.scapi.create_cloned_volume( vol_name, self.VOLUME, ['Daily']) mock_create_replay.assert_called_once_with(self.VOLUME, 'Cinder Clone Replay', 60) self.assertFalse(mock_create_view_volume.called) self.assertIsNone(res, 'Expected None') # Again buy let create_view_volume fail. mock_create_replay.return_value = self.RPLAY res = self.scapi.create_cloned_volume( vol_name, self.VOLUME, ['Daily']) mock_create_view_volume.assert_called_once_with( vol_name, self.RPLAY, ['Daily']) self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_expand_volume(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.expand_volume(self.VOLUME, 550) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_expand_volume_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.expand_volume(self.VOLUME, 550) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_rename_volume(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.rename_volume(self.VOLUME, 'newname') self.assertTrue(mock_post.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_rename_volume_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.rename_volume(self.VOLUME, 'newname') self.assertTrue(mock_post.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_server(self, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._delete_server(self.SCSERVER) self.assertTrue(mock_delete.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_server_del_not_allowed(self, mock_delete, mock_close_connection, mock_open_connection, mock_init): # Test case where delete of ScServer not allowed res = self.scapi._delete_server(self.SCSERVER_NO_DEL) self.assertFalse(mock_delete.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value={'test': 'test'}) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_user_preferences(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Not really testing anything other than the ability to mock, but # including for completeness. res = self.scapi._get_user_preferences() self.assertEqual({'test': 'test'}, res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_user_preferences_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_user_preferences() self.assertEqual({}, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_user_preferences', return_value=None) def test_update_storage_profile_noprefs(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': False}) def test_update_storage_profile_not_allowed(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(dell_storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_storage_profile', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True}) def test_update_storage_profile_prefs_not_found(self, mock_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(dell_storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, 'Fake') self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True, 'storageProfile': None}) def test_update_storage_profile_default_not_found(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(dell_storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object( dell_storagecenter_api.StorageCenterApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True, 'storageProfile': {'name': 'Fake', 'instanceId': 'fakeId'}}) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_update_storage_profile(self, mock_post, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(dell_storagecenter_api, "LOG") fake_scvolume = {'name': 'name', 'instanceId': 'id'} res = self.scapi.update_storage_profile(fake_scvolume, None) self.assertTrue(res) self.assertTrue('fakeId' in repr(mock_post.call_args_list[0])) self.assertEqual(1, LOG.info.call_count) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[RPLAY_PROFILE]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[RPLAY_PROFILE, RPLAY_PROFILE]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile_more_than_one(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_replay_profile, 'guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile_empty_list(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_replay_profile_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=RPLAY_PROFILE) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_replay_profile(self, mock_post, mock_first_result, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=RPLAY_PROFILE) def test_create_replay_profile_exists(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_replay_profile_fail(self, mock_post, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_delete_replay_profile(self, mock_get_id, mock_delete, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} self.scapi.delete_replay_profile(profile) self.assertTrue(mock_get_id.called) self.assertTrue(mock_delete.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_delete_replay_profile_fail(self, mock_get_id, mock_delete, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.delete_replay_profile, profile) self.assertTrue(mock_get_id.called) self.assertTrue(mock_delete.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_first_result', return_value=VOLUME_CONFIG) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_get_volume_configuration(self, mock_get_id, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_volume_configuration({}) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get.called) self.assertEqual(self.VOLUME_CONFIG, res, 'Unexpected config') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_get_volume_configuration_bad_response(self, mock_get_id, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_volume_configuration({}) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get.called) self.assertIsNone(res, 'Unexpected result') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_configuration', return_value=VOLUME_CONFIG) @mock.patch.object(dell_storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_update_volume_profiles(self, mock_get_id, mock_put, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] vcid = self.VOLUME_CONFIG[u'instanceId'] # First get_id is for our existing replay profile id and the second # is for the volume config and the last is for the volume id. And # then we do this again for the second call below. mock_get_id.side_effect = [existingid, vcid, scvolume['instanceId'], existingid, vcid, scvolume['instanceId']] newid = '64702.1' expected_payload = {'ReplayProfileList': [newid, existingid]} expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid res = self.scapi._update_volume_profiles(scvolume, newid, None) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_once_with(expected_url, expected_payload) self.assertTrue(res) # Now do a remove. (Restarting with the original config so this will # end up as an empty list.) expected_payload['ReplayProfileList'] = [] res = self.scapi._update_volume_profiles(scvolume, None, existingid) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_with(expected_url, expected_payload) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_configuration', return_value=VOLUME_CONFIG) @mock.patch.object(dell_storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) # We set this to 1 so we can check our payload @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id') def test_update_volume_profiles_bad_response(self, mock_get_id, mock_put, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] vcid = self.VOLUME_CONFIG[u'instanceId'] # First get_id is for our existing replay profile id and the second # is for the volume config and the last is for the volume id. And # then we do this again for the second call below. mock_get_id.side_effect = [existingid, vcid, scvolume['instanceId'], existingid, vcid, scvolume['instanceId']] newid = '64702.1' expected_payload = {'ReplayProfileList': [newid, existingid]} expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid res = self.scapi._update_volume_profiles(scvolume, newid, None) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_once_with(expected_url, expected_payload) self.assertFalse(res) # Now do a remove. (Restarting with the original config so this will # end up as an empty list.) expected_payload['ReplayProfileList'] = [] res = self.scapi._update_volume_profiles(scvolume, None, existingid) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_with(expected_url, expected_payload) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_configuration', return_value=None) def test_update_volume_profiles_no_config(self, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} res = self.scapi._update_volume_profiles(scvolume, '64702.2', None) self.assertTrue(mock_get_volume_configuration.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=999) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_update_volume_profiles', return_value=True) def test_add_cg_volumes(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' add_volumes = [{'id': '1'}] res = self.scapi._add_cg_volumes(profileid, add_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=profileid, removeid=None) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=999) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_update_volume_profiles', return_value=False) def test_add_cg_volumes_fail(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' add_volumes = [{'id': '1'}] res = self.scapi._add_cg_volumes(profileid, add_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=profileid, removeid=None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=999) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_update_volume_profiles', return_value=True) def test_remove_cg_volumes(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' remove_volumes = [{'id': '1'}] res = self.scapi._remove_cg_volumes(profileid, remove_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=None, removeid=profileid) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=999) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_update_volume_profiles', return_value=False) def test_remove_cg_volumes_false(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' remove_volumes = [{'id': '1'}] res = self.scapi._remove_cg_volumes(profileid, remove_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=None, removeid=profileid) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_update_cg_volumes(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_update_cg_volumes_no_remove(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) self.assertFalse(mock_remove_cg_volumes.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_update_cg_volumes_no_add(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [] remove_volumes = [{'id': '1'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertFalse(mock_add_cg_volumes.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_remove_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_cg_volumes', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_update_cg_volumes_add_fail(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) self.assertTrue(not mock_remove_cg_volumes.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_remove_cg_volumes', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_add_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_update_cg_volumes_remove_fail(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[INACTIVE_VOLUME]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_init_volume') def test_init_cg_volumes_inactive(self, mock_init_volume, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): profileid = 100 self.scapi._init_cg_volumes(profileid) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[VOLUME]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_init_volume') def test_init_cg_volumes_active(self, mock_init_volume, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): profileid = 100 self.scapi._init_cg_volumes(profileid) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertFalse(mock_init_volume.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_init_cg_volumes') def test_snap_cg_replay(self, mock_init_cg_volumes, mock_get_id, mock_post, mock_close_connection, mock_open_connection, mock_init): replayid = 'guid' expire = 0 profile = {'instanceId': '100'} # See the 100 from get_id above? expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' expected_payload = {'description': replayid, 'expireTime': expire} res = self.scapi.snap_cg_replay(profile, replayid, expire) mock_post.assert_called_once_with(expected_url, expected_payload) self.assertTrue(mock_get_id.called) self.assertTrue(mock_init_cg_volumes.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_init_cg_volumes') def test_snap_cg_replay_bad_return(self, mock_init_cg_volumes, mock_get_id, mock_post, mock_close_connection, mock_open_connection, mock_init): replayid = 'guid' expire = 0 profile = {'instanceId': '100'} # See the 100 from get_id above? expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' expected_payload = {'description': replayid, 'expireTime': expire} res = self.scapi.snap_cg_replay(profile, replayid, expire) mock_post.assert_called_once_with(expected_url, expected_payload) self.assertTrue(mock_get_id.called) self.assertTrue(mock_init_cg_volumes.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=CGS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc_cg(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') self.assertEqual(self.CGS[0], res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=CGS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc_cg_not_found(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID3-0869559e-6881-454e-ba18-15c6726d33c1') self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_sc_cg_fail(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_sc_cg', return_value={'instanceId': 101}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=RPLAYS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get') def test_find_cg_replays(self, mock_get, mock_get_json, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' mock_get.assert_called_once_with(expected_url) self.assertTrue(mock_find_sc_cg.called) self.assertTrue(mock_get_json.called) # We should fine RPLAYS self.assertEqual(self.RPLAYS, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_sc_cg', return_value=None) def test_find_cg_replays_no_cg(self, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) self.assertTrue(mock_find_sc_cg.called) # We should return an empty list. self.assertEqual([], res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_sc_cg', return_value={'instanceId': 101}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=None) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get') def test_find_cg_replays_bad_json(self, mock_get, mock_get_json, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' mock_get.assert_called_once_with(expected_url) self.assertTrue(mock_find_sc_cg.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_cg_replays', return_value=RPLAYS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_cg_replay(self, mock_post, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_cg_replay({}, '') expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[0]['instanceId'] + '/Expire') mock_post.assert_any_call(expected_url, {}) expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[1]['instanceId'] + '/Expire') mock_post.assert_any_call(expected_url, {}) self.assertTrue(mock_find_cg_replays.called) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_cg_replays', return_value=RPLAYS) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_delete_cg_replay_error(self, mock_post, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[0]['instanceId'] + '/Expire') res = self.scapi.delete_cg_replay({}, '') mock_post.assert_called_once_with(expected_url, {}) self.assertTrue(mock_find_cg_replays.called) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_cg_replays', return_value=[]) def test_delete_cg_replay_cant_find(self, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_cg_replay({}, '') self.assertTrue(mock_find_cg_replays.called) self.assertTrue(res) def test_size_to_gb(self, mock_close_connection, mock_open_connection, mock_init): gb, rem = self.scapi.size_to_gb('1.073741824E9 Byte') self.assertEqual(1, gb) self.assertEqual(0, rem) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.size_to_gb, 'banana') gb, rem = self.scapi.size_to_gb('1.073741924E9 Byte') self.assertEqual(1, gb) self.assertEqual(100, rem) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_volume_folder') @mock.patch.object(dell_storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=VOLUME) def test_import_one(self, mock_get_json, mock_put, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' # First test is folder found. Second ist is not found. mock_find_volume_folder.side_effect = [{'instanceId': '1'}, None] expected_url = 'StorageCenter/ScVolume/100' expected_payload = {'Name': newname, 'VolumeFolder': '1'} self.scapi._import_one({'instanceId': '100'}, newname) mock_put.assert_called_once_with(expected_url, expected_payload) self.assertTrue(mock_find_volume_folder.called) expected_payload = {'Name': newname} self.scapi._import_one({'instanceId': '100'}, newname) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_import_one', return_value=VOLUME) def test_manage_existing(self, mock_import_one, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' existing = {'source-name': 'scvolname'} self.scapi.manage_existing(newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), None, False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[]) def test_manage_existing_vol_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{}, {}, {}]) def test_manage_existing_vol_multiple_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741924E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 100)) def test_manage_existing_bad_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[{}, {}]) def test_manage_existing_already_mapped(self, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_mappings', return_value=[]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_import_one', return_value=None) def test_manage_existing_import_fail(self, mock_import_one, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # We fail on the _find_volume_folder to make this easier. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 0)) def test_get_unmanaged_volume_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} res = self.scapi.get_unmanaged_volume_size(existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_size_to_gb.called) self.assertEqual(1, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[]) def test_get_unmanaged_volume_size_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.get_unmanaged_volume_size, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{}, {}, {}]) def test_get_unmanaged_volume_size_many_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.get_unmanaged_volume_size, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741924E9 Bytes'}]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'size_to_gb', return_value=(1, 100)) def test_get_unmanaged_volume_size_bad_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.get_unmanaged_volume_size, existing) self.assertTrue(mock_size_to_gb.called) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(dell_storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_unmanage(self, mock_get_id, mock_put, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. scvolume = {'name': 'guid'} expected_url = 'StorageCenter/ScVolume/100' newname = 'Unmanaged_' + scvolume['name'] expected_payload = {'Name': newname} self.scapi.unmanage(scvolume) self.assertTrue(mock_get_id.called) mock_put.assert_called_once_with(expected_url, expected_payload) @mock.patch.object(dell_storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_id', return_value='100') def test_unmanage_fail(self, mock_get_id, mock_put, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. scvolume = {'name': 'guid'} expected_url = 'StorageCenter/ScVolume/100' newname = 'Unmanaged_' + scvolume['name'] expected_payload = {'Name': newname} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.unmanage, scvolume) self.assertTrue(mock_get_id.called) mock_put.assert_called_once_with(expected_url, expected_payload) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[SCQOS]) # def _find_qos(self, qosnode): def test__find_qos(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._find_qos('Cinder QoS') self.assertDictEqual(self.SCQOS, ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json') # def _find_qos(self, qosnode): def test__find_qos_not_found(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): # set side effect for posts. # first empty second returns qosnode mock_get_json.side_effect = [[], self.SCQOS] ret = self.scapi._find_qos('Cinder QoS') self.assertDictEqual(self.SCQOS, ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) # def _find_qos(self, qosnode): def test__find_qos_find_fail(self, mock_post, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_qos, 'Cinder QoS') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) # def _find_qos(self, qosnode): def test__find_qos_create_fail(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.side_effect = [self.RESPONSE_200, self.RESPONSE_400] self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_qos, 'Cinder QoS') @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCREPL) def test_get_screplication(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) def test_get_screplication_not_found(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65496) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_screplication_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_screplication', return_value=SCREPL[0]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_replication(self, mock_delete, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 expected = 'StorageCenter/ScReplication/%s' % ( self.SCREPL[0]['instanceId']) ret = self.scapi.delete_replication(self.VOLUME, destssn) mock_delete.assert_any_call(expected) self.assertTrue(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_screplication', return_value=None) def test_delete_replication_not_found(self, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 ret = self.scapi.delete_replication(self.VOLUME, destssn) self.assertFalse(ret) ret = self.scapi.delete_replication(self.VOLUME, destssn) self.assertFalse(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_screplication', return_value=SCREPL[0]) @mock.patch.object(dell_storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) def test_delete_replication_error(self, mock_delete, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 expected = 'StorageCenter/ScReplication/%s' % ( self.SCREPL[0]['instanceId']) ret = self.scapi.delete_replication(self.VOLUME, destssn) mock_delete.assert_any_call(expected) self.assertFalse(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_qos', return_value=SCQOS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCREPL[0]) def test_create_replication(self, mock_get_json, mock_post, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): # We don't test diskfolder. If one is found we include it. If not # then we leave it out. Checking for disk folder is tested elsewhere. ssn = 64702 destssn = 65495 qosnode = 'Cinder QoS' notes = 'Created by Dell Cinder Driver' repl_prefix = 'Cinder repl of ' mock_find_sc.side_effect = [destssn, ssn, destssn, ssn, destssn, ssn] payload = {'DestinationStorageCenter': destssn, 'QosNode': self.SCQOS['instanceId'], 'SourceVolume': self.VOLUME['instanceId'], 'StorageCenter': ssn, 'ReplicateActiveReplay': False, 'Type': 'Asynchronous', 'DestinationVolumeAttributes': {'CreateSourceVolumeFolderPath': True, 'Notes': notes, 'Name': repl_prefix + self.VOLUME['name']} } ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, False, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload) self.assertDictEqual(self.SCREPL[0], ret) payload['Type'] = 'Synchronous' payload['ReplicateActiveReplay'] = True ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload) self.assertDictEqual(self.SCREPL[0], ret) ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, True) mock_post.assert_any_call('StorageCenter/ScReplication', payload) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_qos', return_value=SCQOS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc') @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCREPL[0]) def test_create_replication_error(self, mock_get_json, mock_post, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): ssn = 64702 destssn = 65495 qosnode = 'Cinder QoS' notes = 'Created by Dell Cinder Driver' repl_prefix = 'Cinder repl of ' mock_find_sc.side_effect = [destssn, ssn, destssn, ssn] mock_post.side_effect = [self.RESPONSE_400, self.RESPONSE_400, self.RESPONSE_400, self.RESPONSE_400] payload = {'DestinationStorageCenter': destssn, 'QosNode': self.SCQOS['instanceId'], 'SourceVolume': self.VOLUME['instanceId'], 'StorageCenter': ssn, 'ReplicateActiveReplay': False, 'Type': 'Asynchronous', 'DestinationVolumeAttributes': {'CreateSourceVolumeFolderPath': True, 'Notes': notes, 'Name': repl_prefix + self.VOLUME['name']} } ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, False, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload) self.assertIsNone(ret) payload['Type'] = 'Synchronous' payload['ReplicateActiveReplay'] = True ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, True) mock_post.assert_any_call('StorageCenter/ScReplication', payload) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=SCREPL) def test_find_repl_volume(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[]) def test_find_repl_volume_empty_list(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=[{'instanceId': '1'}, {'instanceId': '2'}]) def test_find_repl_volume_multiple_results(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_repl_volume_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_screplication') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_repl_volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'remove_mappings') def test_break_replication(self, mock_remove_mappings, mock_find_volume, mock_find_repl_volume, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): # Find_volume doesn't actually matter. We do not gate on this. # Switch it up just to prove that. mock_find_volume.side_effect = [self.VOLUME, # 1 self.VOLUME, # 2 None, # 3 None] # 4 # Much like find volume we do not gate on this. mock_get_screplication.side_effect = [self.SCREPL[0], # 1 None, # 2 None, # 3 None] # 4 # This mock_find_repl_volume.side_effect = [self.VOLUME, # 1 self.VOLUME, # 2 self.VOLUME, # 3 self.VOLUME] # 4 mock_remove_mappings.side_effect = [True, # 1 True, True, # 2 False, True, # 3 True, False] # 4 # Good path. ret = self.scapi.break_replication('name', 65495) self.assertTrue(ret) # Source found, screpl not found. ret = self.scapi.break_replication('name', 65495) self.assertTrue(ret) # No source vol good path. ret = self.scapi.break_replication('name', 65495) self.assertTrue(ret) # fail remove mappings ret = self.scapi.break_replication('name', 65495) self.assertFalse(ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_user_preferences') def test__find_user_replay_profiles(self, mock_get_user_preferences, mock_close_connection, mock_open_connection, mock_init): mock_get_user_preferences.return_value = {} ret = self.scapi._find_user_replay_profiles() self.assertEqual([], ret) mock_get_user_preferences.return_value = {'test': 'test', 'replayProfileList': []} ret = self.scapi._find_user_replay_profiles() self.assertEqual([], ret) mock_get_user_preferences.return_value = { 'test': 'test', 'replayProfileList': [{'instanceId': 'a'}, {'instanceId': 'b'}]} ret = self.scapi._find_user_replay_profiles() self.assertEqual(['a', 'b'], ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json') def test__find_daily_replay_profile(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': 'a'}] ret = self.scapi._find_daily_replay_profile() self.assertEqual('a', ret) mock_get_json.return_value = [] ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) mock_get_json.return_value = None ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) mock_post.return_value = self.RESPONSE_400 ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json') def test__find_replay_profiles(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): # Good run. rps = 'a,b' mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'name': 'a', 'instanceId': 'a'}, {'name': 'b', 'instanceId': 'b'}, {'name': 'c', 'instanceId': 'c'}] reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual(['a', 'b'], reta) self.assertEqual(['c'], retb) # Looking for profile that doesn't exist. rps = 'a,b,d' self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_replay_profiles, rps) # Looking for nothing. rps = '' reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual([], reta) self.assertEqual([], retb) # Still Looking for nothing. rps = None reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual([], reta) self.assertEqual([], retb) # Bad call. rps = 'a,b' mock_post.return_value = self.RESPONSE_400 self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_replay_profiles, rps) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_replay_profiles') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_user_replay_profiles') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_find_daily_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_update_volume_profiles') def test_update_replay_profiles(self, mock_update_volume_profiles, mock_find_daily_replay_profile, mock_find_user_replay_profiles, mock_find_replay_profiles, mock_close_connection, mock_open_connection, mock_init): scvol = {} mock_find_replay_profiles.return_value = (['a', 'b'], ['c']) mock_update_volume_profiles.side_effect = [ True, True, True, False, True, True, False, True, True, True, True, True, True, True, True, True, False] ret = self.scapi.update_replay_profiles(scvol, 'a,b') # Two adds and one remove self.assertEqual(3, mock_update_volume_profiles.call_count) self.assertTrue(ret) # Now update fails. ret = self.scapi.update_replay_profiles(scvol, 'a,b') # 1 failed update plus 3 from before. self.assertEqual(4, mock_update_volume_profiles.call_count) self.assertFalse(ret) # Fail adding Ids.. ret = self.scapi.update_replay_profiles(scvol, 'a,b') # 3 more 4 from before. self.assertEqual(7, mock_update_volume_profiles.call_count) self.assertFalse(ret) # User clearing profiles. mock_find_replay_profiles.return_value = ([], ['a', 'b', 'c']) mock_find_user_replay_profiles.return_value = ['d', 'u'] ret = self.scapi.update_replay_profiles(scvol, '') # 3 removes and 2 adds plus 7 from before self.assertEqual(12, mock_update_volume_profiles.call_count) self.assertTrue(ret) # User clearing profiles and no defaults. (Probably not possible.) mock_find_user_replay_profiles.return_value = [] mock_find_daily_replay_profile.return_value = 'd' ret = self.scapi.update_replay_profiles(scvol, '') # 3 removes and 1 add plus 12 from before. self.assertEqual(16, mock_update_volume_profiles.call_count) self.assertTrue(ret) # _find_replay_profiles blows up so we do too. mock_find_replay_profiles.side_effect = ( exception.VolumeBackendAPIException('aaa')) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.update_replay_profiles, scvol, 'a,b') @mock.patch.object(dell_storagecenter_api.HttpClient, 'put') def test_manage_replay(self, mock_put, mock_close_connection, mock_open_connection, mock_init): screplay = {'description': 'notguid', 'instanceId': 1} payload = {'description': 'guid', 'expireTime': 0} mock_put.return_value = self.RESPONSE_200 ret = self.scapi.manage_replay(screplay, 'guid') self.assertTrue(ret) mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload) mock_put.return_value = self.RESPONSE_400 ret = self.scapi.manage_replay(screplay, 'guid') self.assertFalse(ret) @mock.patch.object(dell_storagecenter_api.HttpClient, 'put') def test_unmanage_replay(self, mock_put, mock_close_connection, mock_open_connection, mock_init): screplay = {'description': 'guid', 'instanceId': 1} payload = {'expireTime': 1440} mock_put.return_value = self.RESPONSE_200 ret = self.scapi.unmanage_replay(screplay) self.assertTrue(ret) mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload) mock_put.return_value = self.RESPONSE_400 ret = self.scapi.unmanage_replay(screplay) self.assertFalse(ret) class DellSCSanAPIConnectionTestCase(test.TestCase): """DellSCSanAPIConnectionTestCase Class to test the Storage Center API connection using Mock. """ # Create a Response object that indicates OK response_ok = models.Response() response_ok.status_code = 200 response_ok.reason = u'ok' RESPONSE_200 = response_ok # Create a Response object with no content response_nc = models.Response() response_nc.status_code = 204 response_nc.reason = u'duplicate' RESPONSE_204 = response_nc # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 400 response_bad.reason = u'bad request' RESPONSE_400 = response_bad APIDICT = {u'instanceId': u'0', u'hostName': u'192.168.0.200', u'userId': 434226, u'connectionKey': u'', u'minApiVersion': u'0.1', u'webServicesPort': 3033, u'locale': u'en_US', u'objectType': u'ApiConnection', u'secureString': u'', u'applicationVersion': u'2.0.1', u'source': u'REST', u'commandLine': False, u'application': u'Cinder REST Driver', u'sessionKey': 1436460614863, u'provider': u'EnterpriseManager', u'instanceName': u'ApiConnection', u'connected': True, u'userName': u'Admin', u'useHttps': False, u'providerVersion': u'15.3.1.186', u'apiVersion': u'2.2', u'apiBuild': 199} def setUp(self): super(DellSCSanAPIConnectionTestCase, self).setUp() # Configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'openstack' self.configuration.dell_sc_volume_folder = 'openstack' # Note that we set this to True even though we do not # test this functionality. This is sent directly to # the requests calls as the verify parameter and as # that is a third party library deeply stubbed out is # not directly testable by this code. Note that in the # case that this fails the driver fails to even come # up. self.configuration.dell_sc_verify_cert = True self.configuration.dell_sc_api_port = 3033 self.configuration.iscsi_ip_address = '192.168.1.1' self.configuration.iscsi_port = 3260 self._context = context.get_admin_context() self.apiversion = '2.0' # Set up the StorageCenterApi self.scapi = dell_storagecenter_api.StorageCenterApi( self.configuration.san_ip, self.configuration.dell_sc_api_port, self.configuration.san_login, self.configuration.san_password, self.configuration.dell_sc_verify_cert, self.apiversion) # Set up the scapi configuration vars self.scapi.ssn = self.configuration.dell_sc_ssn self.scapi.sfname = self.configuration.dell_sc_server_folder self.scapi.vfname = self.configuration.dell_sc_volume_folder @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=APIDICT) def test_open_connection(self, mock_get_json, mock_post): self.scapi.open_connection() self.assertTrue(mock_post.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_check_version_fail', return_value=RESPONSE_400) def test_open_connection_failure(self, mock_check_version_fail, mock_post): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.open_connection) self.assertTrue(mock_check_version_fail.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_check_version_fail', return_value=RESPONSE_200) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '_get_json', return_value=APIDICT) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_open_connection_sc(self, mock_post, mock_get_json, mock_check_version_fail): self.scapi.open_connection() self.assertTrue(mock_check_version_fail.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_close_connection(self, mock_post): self.scapi.close_connection() self.assertTrue(mock_post.called) @mock.patch.object(dell_storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_close_connection_failure(self, mock_post): self.scapi.close_connection() self.assertTrue(mock_post.called) cinder-8.0.0/cinder/tests/unit/test_backup_ceph.py0000664000567000056710000014157212701406250023375 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Ceph backup service.""" import hashlib import os import tempfile import uuid import mock from oslo_concurrency import processutils from oslo_serialization import jsonutils import six from six.moves import range from cinder.backup import driver from cinder.backup.drivers import ceph from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import test from cinder.volume.drivers import rbd as rbddriver # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockObjectNotFoundException(MockException): """Used as mock for rados.MockObjectNotFoundException.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): # NOTE(dosaboy): mock Popen to, by default, raise Exception in order to # ensure that any test ending up in a subprocess fails # if not properly mocked. @mock.patch('subprocess.Popen', spec=True) # NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing. @mock.patch('eventlet.sleep', spec=True) @mock.patch('time.time', spec=True) # NOTE(dosaboy): set spec to empty object so that hasattr calls return # False by default. @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep, mock_popen): mock_time.side_effect = inst.time_inc mock_popen.side_effect = Exception inst.mock_rados = mock_rados inst.mock_rbd = mock_rbd inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.service.rbd = inst.mock_rbd inst.service.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 class BackupCephTestCase(test.TestCase): """Test case for ceph backup driver.""" def _create_volume_db_entry(self, id, size): vol = {'id': id, 'size': size, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, backupid, volid, size, userid=str(uuid.uuid4()), projectid=str(uuid.uuid4())): backup = {'id': backupid, 'size': size, 'volume_id': volid, 'user_id': userid, 'project_id': projectid} return db.backup_create(self.ctxt, backup)['id'] def time_inc(self): self.counter += 1 return self.counter def _get_wrapped_rbd_io(self, rbd_image): rbd_meta = rbddriver.RBDImageMetadata(rbd_image, 'pool_foo', 'user_foo', 'conf_foo') return rbddriver.RBDImageIOWrapper(rbd_meta) def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None, p2hook=None): class MockPopen(object): hooks = [p2hook, p1hook] def __init__(mock_inst, cmd, *args, **kwargs): self.callstack.append('popen_init') mock_inst.stdout = mock.Mock() mock_inst.stdout.close = mock.Mock() mock_inst.stdout.close.side_effect = \ lambda *args: self.callstack.append('stdout_close') mock_inst.returncode = 0 hook = mock_inst.__class__.hooks.pop() if hook is not None: hook() def communicate(mock_inst): self.callstack.append('communicate') return retval mock_popen.side_effect = MockPopen def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(BackupCephTestCase, self).setUp() self.ctxt = context.get_admin_context() # Create volume. self.volume_size = 1 self.volume_id = str(uuid.uuid4()) self._create_volume_db_entry(self.volume_id, self.volume_size) self.volume = db.volume_get(self.ctxt, self.volume_id) # Create backup of volume. self.backup_id = str(uuid.uuid4()) self._create_backup_db_entry(self.backup_id, self.volume_id, self.volume_size) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) # Create alternate volume. self.alt_volume_id = str(uuid.uuid4()) self._create_volume_db_entry(self.alt_volume_id, self.volume_size) self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id) self.chunk_size = 1024 self.num_chunks = 128 self.data_length = self.num_chunks * self.chunk_size self.checksum = hashlib.sha256() # Create a file with some data in it. self.volume_file = tempfile.NamedTemporaryFile() self.addCleanup(self.volume_file.close) for _i in range(0, self.num_chunks): data = os.urandom(self.chunk_size) self.checksum.update(data) self.volume_file.write(data) self.volume_file.seek(0) # Always trigger an exception if a command is executed since it should # always be dealt with gracefully. At time of writing on rbd # export/import-diff is executed and if they fail we expect to find # alternative means of backing up. mock_exec = mock.Mock() mock_exec.side_effect = processutils.ProcessExecutionError self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec) # Ensure that time.time() always returns more than the last time it was # called to avoid div by zero errors. self.counter = float(0) self.callstack = [] @common_mocks def test_get_rbd_support(self): del self.service.rbd.RBD_FEATURE_LAYERING del self.service.rbd.RBD_FEATURE_STRIPINGV2 self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2')) oldformat, features = self.service._get_rbd_support() self.assertTrue(oldformat) self.assertEqual(0, features) self.service.rbd.RBD_FEATURE_LAYERING = 1 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1, features) self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2, features) @common_mocks def test_get_most_recent_snap(self): last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4()) image = self.mock_rbd.Image.return_value image.list_snaps.return_value = \ [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())}, {'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())}, {'name': last}, {'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}] snap = self.service._get_most_recent_snap(image) self.assertEqual(last, snap) @common_mocks def test_get_backup_snap_name(self): snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) def get_backup_snaps(inst, *args): return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()), 'backup_id': str(uuid.uuid4())}, {'name': snap_name, 'backup_id': self.backup_id}] with mock.patch.object(self.service, 'get_backup_snaps'): name = self.service._get_backup_snap_name(self.service.rbd.Image(), 'base_foo', self.backup_id) self.assertIsNone(name) with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.side_effect = get_backup_snaps name = self.service._get_backup_snap_name(self.service.rbd.Image(), 'base_foo', self.backup_id) self.assertEqual(snap_name, name) self.assertTrue(mock_get_backup_snaps.called) @common_mocks def test_get_backup_snaps(self): image = self.mock_rbd.Image.return_value image.list_snaps.return_value = [ {'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())}, {'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())}, {'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())}, {'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())}, {'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}] snaps = self.service.get_backup_snaps(image) self.assertEqual(3, len(snaps)) @common_mocks def test_transfer_data_from_rbd_to_file(self): def fake_read(offset, length): self.volume_file.seek(offset) return self.volume_file.read(length) self.mock_rbd.Image.return_value.read.side_effect = fake_read self.mock_rbd.Image.return_value.size.return_value = self.data_length with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) self.service._transfer_data(rbd_io, 'src_foo', test_file, 'dest_foo', self.data_length) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_rbd_to_rbd(self): def fake_read(offset, length): self.volume_file.seek(offset) return self.volume_file.read(length) def mock_write_data(data, offset): checksum.update(data) test_file.write(data) rbd1 = mock.Mock() rbd1.read.side_effect = fake_read rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size rbd2 = mock.Mock() rbd2.write.side_effect = mock_write_data with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() src_rbd_io = self._get_wrapped_rbd_io(rbd1) dest_rbd_io = self._get_wrapped_rbd_io(rbd2) self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io, 'dest_foo', self.data_length) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_file_to_rbd(self): def mock_write_data(data, offset): checksum.update(data) test_file.write(data) self.mock_rbd.Image.return_value.write.side_effect = mock_write_data with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) self.service._transfer_data(self.volume_file, 'src_foo', rbd_io, 'dest_foo', self.data_length) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_file_to_file(self): with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() self.service._transfer_data(self.volume_file, 'src_foo', test_file, 'dest_foo', self.data_length) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_backup_volume_from_file(self): checksum = hashlib.sha256() def mock_write_data(data, offset): checksum.update(data) test_file.write(data) self.service.rbd.Image.return_value.write.side_effect = mock_write_data with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, '_discard_bytes'): with tempfile.NamedTemporaryFile() as test_file: self.service.backup(self.backup, self.volume_file) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) self.assertTrue(self.service.rbd.Image.return_value.write.called) @common_mocks def test_get_backup_base_name(self): name = self.service._get_backup_base_name(self.volume_id, diff_format=True) self.assertEqual("volume-%s.backup.base" % (self.volume_id), name) self.assertRaises(exception.InvalidParameterValue, self.service._get_backup_base_name, self.volume_id) name = self.service._get_backup_base_name(self.volume_id, '1234') self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'), name) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @mock.patch('subprocess.Popen', spec=True) def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl): backup_name = self.service._get_backup_base_name(self.backup_id, diff_format=True) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(mock_popen, ['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: with mock.patch.object(self.service, '_full_backup') as \ mock_full_backup: with mock.patch.object(self.service, '_try_delete_base_image'): with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = rbddriver.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = rbddriver.RBDImageIOWrapper(meta) self.service.backup(self.backup, rbdio) self.assertEqual(['popen_init', 'read', 'popen_init', 'write', 'stdout_close', 'communicate'], self.callstack) self.assertFalse(mock_full_backup.called) self.assertTrue(mock_get_backup_snaps.called) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @mock.patch('subprocess.Popen', spec=True) def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl): """Test of when an exception occurs in an exception handler. In _backup_rbd(), after an exception.BackupRBDOperationFailed occurs in self._rbd_diff_transfer(), we want to check the process when the second exception occurs in self._try_delete_base_image(). """ backup_name = self.service._get_backup_base_name(self.backup_id, diff_format=True) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(mock_popen, ['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, 'get_backup_snaps'), \ mock.patch.object(self.service, '_rbd_diff_transfer') as \ mock_rbd_diff_transfer: def mock_rbd_diff_transfer_side_effect(src_name, src_pool, dest_name, dest_pool, src_user, src_conf, dest_user, dest_conf, src_snap, from_snap): raise exception.BackupRBDOperationFailed(_('mock')) # Raise a pseudo exception.BackupRBDOperationFailed. mock_rbd_diff_transfer.side_effect \ = mock_rbd_diff_transfer_side_effect with mock.patch.object(self.service, '_full_backup'), \ mock.patch.object(self.service, '_try_delete_base_image') as \ mock_try_delete_base_image: def mock_try_delete_base_image_side_effect(backup_id, volume_id, base_name): raise self.service.rbd.ImageNotFound(_('mock')) # Raise a pesudo exception rbd.ImageNotFound. mock_try_delete_base_image.side_effect \ = mock_try_delete_base_image_side_effect with mock.patch.object(self.service, '_backup_metadata'): with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = rbddriver.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = rbddriver.RBDImageIOWrapper(meta) # We expect that the second exception is # notified. self.assertRaises( self.service.rbd.ImageNotFound, self.service.backup, self.backup, rbdio) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @mock.patch('subprocess.Popen', spec=True) def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl): """Test of when an exception occurs in an exception handler. In backup(), after an exception.BackupOperationError occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete(). """ backup_name = self.service._get_backup_base_name(self.backup_id, diff_format=True) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(mock_popen, ['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, 'get_backup_snaps'), \ mock.patch.object(self.service, '_rbd_diff_transfer'), \ mock.patch.object(self.service, '_full_backup'), \ mock.patch.object(self.service, '_backup_metadata') as \ mock_backup_metadata: def mock_backup_metadata_side_effect(backup): raise exception.BackupOperationError(_('mock')) # Raise a pseudo exception.BackupOperationError. mock_backup_metadata.side_effect = mock_backup_metadata_side_effect with mock.patch.object(self.service, 'delete') as mock_delete: def mock_delete_side_effect(backup): raise self.service.rbd.ImageBusy() # Raise a pseudo exception rbd.ImageBusy. mock_delete.side_effect = mock_delete_side_effect with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = rbddriver.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = rbddriver.RBDImageIOWrapper(meta) # We expect that the second exception is # notified. self.assertRaises( self.service.rbd.ImageBusy, self.service.backup, self.backup, rbdio) @common_mocks def test_backup_vol_length_0(self): volume_id = str(uuid.uuid4()) self._create_volume_db_entry(volume_id, 0) backup_id = str(uuid.uuid4()) self._create_backup_db_entry(backup_id, volume_id, 1) backup = objects.Backup.get_by_id(self.ctxt, backup_id) self.assertRaises(exception.InvalidParameterValue, self.service.backup, backup, self.volume_file) @common_mocks def test_restore(self): backup_name = self.service._get_backup_base_name(self.backup_id, diff_format=True) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] def mock_read_data(offset, length): return self.volume_file.read(self.data_length) self.mock_rbd.Image.return_value.read.side_effect = mock_read_data self.mock_rbd.Image.return_value.size.return_value = \ self.chunk_size * self.num_chunks with mock.patch.object(self.service, '_restore_metadata') as \ mock_restore_metadata: with mock.patch.object(self.service, '_discard_bytes') as \ mock_discard_bytes: with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) self.service.restore(self.backup, self.volume_id, test_file) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) self.assertTrue(mock_restore_metadata.called) self.assertTrue(mock_discard_bytes.called) self.assertTrue(mock_discard_bytes.called) self.assertTrue(self.service.rbd.Image.return_value.read.called) @common_mocks def test_discard_bytes(self): # Lower the chunksize to a memory managable number self.service.chunk_size = 1024 image = self.mock_rbd.Image.return_value wrapped_rbd = self._get_wrapped_rbd_io(image) self.service._discard_bytes(wrapped_rbd, 0, 0) self.assertEqual(0, image.discard.call_count) self.service._discard_bytes(wrapped_rbd, 0, 1234) self.assertEqual(1, image.discard.call_count) image.reset_mock() # Test discard with no remainder with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False self.service._discard_bytes(wrapped_rbd, 0, self.service.chunk_size * 2) self.assertEqual(2, image.write.call_count) self.assertEqual(2, image.flush.call_count) self.assertFalse(image.discard.called) image.reset_mock() # Now test with a remainder. with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False self.service._discard_bytes(wrapped_rbd, 0, (self.service.chunk_size * 2) + 1) self.assertEqual(3, image.write.call_count) self.assertEqual(3, image.flush.call_count) self.assertFalse(image.discard.called) @common_mocks def test_delete_backup_snapshot(self): snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) base_name = self.service._get_backup_base_name(self.volume_id, diff_format=True) self.mock_rbd.RBD.remove_snap = mock.Mock() with mock.patch.object(self.service, '_get_backup_snap_name') as \ mock_get_backup_snap_name: mock_get_backup_snap_name.return_value = snap_name with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = None rem = self.service._delete_backup_snapshot(self.mock_rados, base_name, self.backup_id) self.assertTrue(mock_get_backup_snap_name.called) self.assertTrue(mock_get_backup_snaps.called) self.assertEqual((snap_name, 0), rem) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image_diff_format(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, diff_format=True) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] with mock.patch.object(self.service, '_delete_backup_snapshot') as \ mock_del_backup_snap: snap_name = self.service._get_new_snap_name(self.backup_id) mock_del_backup_snap.return_value = (snap_name, 0) self.service.delete(self.backup) self.assertTrue(mock_del_backup_snap.called) self.assertTrue(self.mock_rbd.RBD.return_value.list.called) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, self.backup_id) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] with mock.patch.object(self.service, 'get_backup_snaps'): self.service.delete(self.backup) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) @common_mocks def test_try_delete_base_image_busy(self): """This should induce retries then raise rbd.ImageBusy.""" backup_name = self.service._get_backup_base_name(self.volume_id, self.backup_id) rbd = self.mock_rbd.RBD.return_value rbd.list.return_value = [backup_name] rbd.remove.side_effect = self.mock_rbd.ImageBusy with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: self.assertRaises(self.mock_rbd.ImageBusy, self.service._try_delete_base_image, self.backup['id'], self.backup['volume_id']) self.assertTrue(mock_get_backup_snaps.called) self.assertTrue(rbd.list.called) self.assertTrue(rbd.remove.called) self.assertTrue(MockImageBusyException in RAISED_EXCEPTIONS) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_delete(self, mock_meta_backup): with mock.patch.object(self.service, '_try_delete_base_image'): self.service.delete(self.backup) self.assertEqual([], RAISED_EXCEPTIONS) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_delete_image_not_found(self, mock_meta_backup): with mock.patch.object(self.service, '_try_delete_base_image') as \ mock_del_base: mock_del_base.side_effect = self.mock_rbd.ImageNotFound # ImageNotFound exception is caught so that db entry can be cleared self.service.delete(self.backup) self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS) @common_mocks def test_diff_restore_allowed_with_image_not_exists(self): """Test diff restore not allowed when backup not diff-format.""" not_allowed = (False, None) backup_base = 'backup.base' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (False, backup_base) resp = self.service._diff_restore_allowed(*args_vols_different) self.assertEqual(not_allowed, resp) mock_rbd_image_exists.assert_called_once_with( backup_base, self.backup['volume_id'], self.mock_rados) @common_mocks def test_diff_restore_allowed_with_no_restore_point(self): """Test diff restore not allowed when no restore point found. Detail conditions: 1. backup base is diff-format 2. restore point does not exist """ not_allowed = (False, None) backup_base = 'backup.base' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = None args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual(not_allowed, resp) self.assertTrue(mock_rbd_image_exists.called) mock_get_restore_point.assert_called_once_with( backup_base, self.backup['id']) @common_mocks def test_diff_restore_allowed_with_not_rbd(self): """Test diff restore not allowed when destination volume is not rbd. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is not an rbd. """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) mock_file_is_rbd.assert_called_once_with( rbd_io) @common_mocks def test_diff_restore_allowed_with_same_volume(self): """Test diff restore not allowed when volumes are same. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are the same """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_same = [backup_base, self.backup, self.volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True resp = self.service._diff_restore_allowed(*args_vols_same) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) @common_mocks def test_diff_restore_allowed_with_has_extents(self): """Test diff restore not allowed when destination volume has data. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are different 5. destination volume has data on it - full copy is mandated """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True with mock.patch.object(self.service, '_rbd_has_extents') \ as mock_rbd_has_extents: mock_rbd_has_extents.return_value = True args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) mock_rbd_has_extents.assert_called_once_with( rbd_io.rbd_image) @common_mocks def test_diff_restore_allowed_with_no_extents(self): """Test diff restore allowed when no data in destination volume. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are different 5. destination volume no data on it """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True with mock.patch.object(self.service, '_rbd_has_extents') \ as mock_rbd_has_extents: mock_rbd_has_extents.return_value = False args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((True, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) self.assertTrue(mock_rbd_has_extents.called) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @mock.patch('subprocess.Popen', spec=True) def test_piped_execute(self, mock_popen, mock_fcntl): mock_fcntl.return_value = 0 self._setup_mock_popen(mock_popen, ['out', 'err']) self.service._piped_execute(['foo'], ['bar']) self.assertEqual(['popen_init', 'popen_init', 'stdout_close', 'communicate'], self.callstack) @common_mocks def test_restore_metdata(self): version = 2 def mock_read(*args): base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, glance_tag: {'image_name': 'image.glance'}, 'version': version}) self.mock_rados.Object.return_value.read.side_effect = mock_read self.service._restore_metadata(self.backup, self.volume_id) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.assertTrue(self.mock_rados.Object.return_value.read.called) version = 3 try: self.service._restore_metadata(self.backup, self.volume_id) except exception.BackupOperationError as exc: msg = _("Metadata restore failed due to incompatible version") self.assertEqual(msg, six.text_type(exc)) else: # Force a test failure self.assertFalse(True) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_backup_metadata_already_exists(self, mock_meta_backup): def mock_set(json_meta): msg = (_("Metadata backup object '%s' already exists") % ("backup.%s.meta" % (self.backup_id))) raise exception.VolumeMetadataBackupExists(msg) mock_meta_backup.return_value.set = mock.Mock() mock_meta_backup.return_value.set.side_effect = mock_set with mock.patch.object(self.service, 'get_metadata') as \ mock_get_metadata: mock_get_metadata.return_value = "some.json.metadata" try: self.service._backup_metadata(self.backup) except exception.BackupOperationError as e: msg = (_("Failed to backup volume metadata - Metadata backup " "object 'backup.%s.meta' already exists") % (self.backup_id)) self.assertEqual(msg, six.text_type(e)) else: # Make the test fail self.assertFalse(True) self.assertFalse(mock_meta_backup.set.called) @common_mocks def test_backup_metata_error(self): """Ensure that delete() is called if the metadata backup fails. Also ensure that the exception is propagated to the caller. """ with mock.patch.object(self.service, '_backup_metadata') as \ mock_backup_metadata: mock_backup_metadata.side_effect = exception.BackupOperationError with mock.patch.object(self.service, '_get_volume_size_gb'): with mock.patch.object(self.service, '_file_is_rbd', return_value=False): with mock.patch.object(self.service, '_full_backup'): with mock.patch.object(self.service, 'delete') as \ mock_delete: self.assertRaises(exception.BackupOperationError, self.service.backup, self.backup, mock.Mock(), backup_metadata=True) self.assertTrue(mock_delete.called) @common_mocks def test_restore_invalid_metadata_version(self): def mock_read(*args): base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, glance_tag: {'image_name': 'image.glance'}, 'version': 3}) self.mock_rados.Object.return_value.read.side_effect = mock_read with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \ mock_exists: mock_exists.return_value = True self.assertRaises(exception.BackupOperationError, self.service._restore_metadata, self.backup, self.volume_id) self.assertTrue(mock_exists.called) self.assertTrue(self.mock_rados.Object.return_value.read.called) def common_meta_backup_mocks(f): """Decorator to set mocks common to all metadata backup tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd): inst.mock_rados = mock_rados inst.mock_rbd = mock_rbd inst.mock_rados.ObjectNotFound = MockObjectNotFoundException return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 class VolumeMetadataBackupTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(VolumeMetadataBackupTestCase, self).setUp() self.backup_id = str(uuid.uuid4()) self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id) @common_meta_backup_mocks def test_name(self): self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name) @common_meta_backup_mocks def test_exists(self): # True self.assertTrue(self.mb.exists) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.mock_rados.Object.return_value.reset_mock() # False self.mock_rados.Object.return_value.stat.side_effect = ( self.mock_rados.ObjectNotFound) self.assertFalse(self.mb.exists) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) @common_meta_backup_mocks def test_set(self): obj_data = [] called = [] def mock_read(*args): called.append('read') self.assertTrue(len(obj_data) == 1) return obj_data[0] def _mock_write(data): obj_data.append(data) called.append('write') self.mb.get = mock.Mock() self.mb.get.side_effect = mock_read with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write: mock_write.side_effect = _mock_write self.mb.set({'foo': 'bar'}) self.assertEqual({'foo': 'bar'}, self.mb.get()) self.assertTrue(self.mb.get.called) self.mb._exists = mock.Mock() self.mb._exists.return_value = True # use the unmocked set() method. self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set, {'doo': 'dah'}) # check the meta obj state has not changed. self.assertEqual({'foo': 'bar'}, self.mb.get()) self.assertEqual(['write', 'read', 'read'], called) @common_meta_backup_mocks def test_get(self): self.mock_rados.Object.return_value.stat.side_effect = ( self.mock_rados.ObjectNotFound) self.mock_rados.Object.return_value.read.return_value = 'meta' self.assertIsNone(self.mb.get()) self.mock_rados.Object.return_value.stat.side_effect = None self.assertEqual('meta', self.mb.get()) @common_meta_backup_mocks def remove_if_exists(self): with mock.patch.object(self.mock_rados.Object, 'remove') as \ mock_remove: mock_remove.side_effect = self.mock_rados.ObjectNotFound self.mb.remove_if_exists() self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) self.mock_rados.Object.remove.side_effect = None self.mb.remove_if_exists() self.assertEqual([], RAISED_EXCEPTIONS) cinder-8.0.0/cinder/tests/unit/test_hitachi_hnas_nfs.py0000664000567000056710000005253112701406257024424 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import tempfile import mock import six from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hnas_nfs as nfs from cinder.volume.drivers import nfs as drivernfs from cinder.volume.drivers import remotefs from cinder.volume import volume_types SHARESCONF = """172.17.39.132:/cinder 172.17.39.133:/cinder""" HNASCONF = """ ssc 172.17.44.15 supervisor supervisor default 172.17.39.132:/cinder silver 172.17.39.133:/cinder """ HNAS_WRONG_CONF1 = """ ssc 172.17.44.15 supervisor supervisor default 172.17.39.132:/cinder """ HNAS_WRONG_CONF2 = """ ssc 172.17.44.15 supervisor supervisor default silver """ HNAS_WRONG_CONF3 = """ ssc 172.17.44.15 supervisor default 172.17.39.132:/cinder silver 172.17.39.133:/cinder """ HNAS_WRONG_CONF4 = """ ssc 172.17.44.15 super supervisor default 172.17.39.132:/cinder silver 172.17.39.133:/cinder """ HNAS_FULL_CONF = """ ssc 172.17.44.15 super supervisor True 2222 True /etc/cinder/ssh_priv 10.0.0.1 default 172.17.39.132:/cinder silver 172.17.39.133:/cinder/silver gold 172.17.39.133:/cinder/gold platinum 172.17.39.133:/cinder/platinum """ # The following information is passed on to tests, when creating a volume _SERVICE = ('Test_hdp', 'Test_path', 'Test_label') _SHARE = '172.17.39.132:/cinder' _SHARE2 = '172.17.39.133:/cinder' _EXPORT = '/cinder' _VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128, 'volume_type': 'silver', 'volume_type_id': 'test', 'metadata': [{'key': 'type', 'service_label': 'silver'}], 'provider_location': None, 'id': 'bcc48c61-9691-4e5f-897c-793686093190', 'status': 'available', 'host': 'host1@hnas-iscsi-backend#silver'} _SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128, 'volume_type': None, 'provider_location': None, 'volume_size': 128, 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', 'host': 'host1@hnas-iscsi-backend#silver'} _VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc', 'id': '61da3-8d23-4bb9-3136-ca819d89e7fc', 'size': 4, 'metadata': [{'key': 'type', 'service_label': 'silver'}], 'volume_type': 'silver', 'volume_type_id': 'silver', 'provider_location': '172.24.44.34:/silver/', 'volume_size': 128, 'host': 'host1@hnas-nfs#silver'} GET_ID_VOL = { ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME], ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME] } def id_to_vol(arg): return GET_ID_VOL.get(arg) class SimulatedHnasBackend(object): """Simulation Back end. Talks to HNAS.""" # these attributes are shared across object instances start_lun = 0 def __init__(self): self.type = 'HNAS' self.out = '' def file_clone(self, cmd, ip0, user, pw, fslabel, source_path, target_path): return "" def get_version(self, ver, cmd, ip0, user, pw): self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ "version: 11.2.3319.09 LU: 256 " \ "RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" return self.out def get_hdp_info(self, ip0, user, pw): self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \ "Normal fs1\n" \ "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \ "Normal fs2" return self.out def get_nfs_info(self, cmd, ip0, user, pw): self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \ "EVS: 1 IPS: 172.17.39.132\n" \ "Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \ "EVS: 1 IPS: 172.17.39.133" return self.out class HDSNFSDriverTest(test.TestCase): """Test HNAS NFS volume driver.""" def __init__(self, *args, **kwargs): super(HDSNFSDriverTest, self).__init__(*args, **kwargs) @mock.patch.object(nfs, 'factory_bend') def setUp(self, m_factory_bend): super(HDSNFSDriverTest, self).setUp() self.backend = SimulatedHnasBackend() m_factory_bend.return_value = self.backend self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(HNASCONF) self.config_file.flush() self.shares_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.shares_file.close) self.shares_file.write(SHARESCONF) self.shares_file.flush() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.hds_hnas_nfs_config_file = self.config_file.name self.configuration.nfs_shares_config = self.shares_file.name self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' self.configuration.nfs_mount_options = None self.configuration.nas_ip = None self.configuration.nas_share_path = None self.configuration.nas_mount_options = None self.driver = nfs.HDSNFSDriver(configuration=self.configuration) self.driver.do_setup("") @mock.patch('six.moves.builtins.open') @mock.patch.object(os, 'access') def test_read_config(self, m_access, m_open): # Test exception when file is not found m_access.return_value = False m_open.return_value = six.StringIO(HNASCONF) self.assertRaises(exception.NotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to missing tag m_access.return_value = True m_open.return_value = six.StringIO(HNAS_WRONG_CONF1) self.assertRaises(exception.ConfigNotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to missing tag m_open.return_value = six.StringIO(HNAS_WRONG_CONF2) self.configuration.hds_hnas_iscsi_config_file = '' self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to blank tag m_open.return_value = six.StringIO(HNAS_WRONG_CONF3) self.configuration.hds_hnas_iscsi_config_file = '' self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') # Test when config file has parsing errors due invalid svc_number m_open.return_value = six.StringIO(HNAS_WRONG_CONF4) self.configuration.hds_hnas_iscsi_config_file = '' config = nfs._read_config('') self.assertEqual(1, len(config['services'])) # Test config with full options # due invalid svc_number m_open.return_value = six.StringIO(HNAS_FULL_CONF) self.configuration.hds_hnas_iscsi_config_file = '' config = nfs._read_config('') self.assertEqual(4, len(config['services'])) @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_snapshot(self, m_get_volume_location, m_get_export_path, m_get_provider_location, m_id_to_vol): svol = _SNAPVOLUME.copy() m_id_to_vol.return_value = svol m_get_provider_location.return_value = _SHARE m_get_volume_location.return_value = _SHARE m_get_export_path.return_value = _EXPORT loc = self.driver.create_snapshot(svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(nfs.HDSNFSDriver, '_get_service') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_cloned_volume(self, m_get_volume_location, m_get_provider_location, m_id_to_vol, m_get_service): vol = _VOLUME.copy() svol = _SNAPVOLUME.copy() m_get_service.return_value = _SERVICE m_get_provider_location.return_value = _SHARE m_get_volume_location.return_value = _SHARE loc = self.driver.create_cloned_volume(vol, svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted') @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_volume(self, m_get_volume_location, m_get_provider_location, m_id_to_vol, m_do_create_volume, m_ensure_shares_mounted): vol = _VOLUME.copy() m_get_provider_location.return_value = _SHARE2 m_get_volume_location.return_value = _SHARE2 loc = self.driver.create_volume(vol) out = "{'provider_location': \'" + _SHARE2 + "'}" self.assertEqual(str(loc), out) @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present') def test_delete_snapshot(self, m_volume_not_present, m_get_provider_location, m_id_to_vol): svol = _SNAPVOLUME.copy() m_id_to_vol.return_value = svol m_get_provider_location.return_value = _SHARE m_volume_not_present.return_value = True self.driver.delete_snapshot(svol) self.assertIsNone(svol['provider_location']) @mock.patch.object(nfs.HDSNFSDriver, '_get_service') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_volume_from_snapshot(self, m_get_volume_location, m_get_export_path, m_get_provider_location, m_id_to_vol, m_get_service): vol = _VOLUME.copy() svol = _SNAPVOLUME.copy() m_get_service.return_value = _SERVICE m_get_provider_location.return_value = _SHARE m_get_export_path.return_value = _EXPORT m_get_volume_location.return_value = _SHARE loc = self.driver.create_volume_from_snapshot(vol, svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', return_value={'key': 'type', 'service_label': 'silver'}) def test_get_pool(self, m_ext_spec): vol = _VOLUME.copy() self.assertEqual('silver', self.driver.get_pool(vol)) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'silver'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} with mock.patch.object(self.driver, '_execute'): out = self.driver.manage_existing(vol, existing_vol_ref) loc = {'provider_location': '172.17.39.133:/cinder'} self.assertEqual(loc, out) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'silver'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.driver._execute = mock.Mock(side_effect=OSError) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing, vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'gold'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.driver._execute = mock.Mock(side_effect=OSError) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(utils, 'get_file_size', return_value=4000000000) @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_get_size(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_file_size): vol = _VOLUME_NFS.copy() self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} out = self.driver.manage_existing_get_size(vol, existing_vol_ref) self.assertEqual(vol['size'], out) m_file_size.assert_called_once_with('/mnt/gold/volume-test') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(utils, 'get_file_size', return_value='badfloat') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_file_size): vol = _VOLUME_NFS.copy() self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, vol, existing_vol_ref) m_file_size.assert_called_once_with('/mnt/gold/volume-test') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() def test_manage_existing_get_size_without_source_name(self): vol = _VOLUME.copy() existing_vol_ref = { 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, existing_vol_ref) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') def test_unmanage(self, m_mount_point): with mock.patch.object(self.driver, '_execute'): vol = _VOLUME_NFS.copy() self.driver.unmanage(vol) m_mount_point.assert_called_once_with('172.24.44.34:/silver/') cinder-8.0.0/cinder/tests/unit/fake_notifier.py0000664000567000056710000000601012701406250022662 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import anyjson import oslo_messaging as messaging from cinder import rpc FakeMessage = collections.namedtuple('Message', ['publisher_id', 'priority', 'event_type', 'payload']) class FakeNotifier(object): def __init__(self, transport, publisher_id, serializer=None, driver=None, topic=None, retry=None): self.transport = transport self.publisher_id = publisher_id for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) self._serializer = serializer or messaging.serializer.NoOpSerializer() self._topic = topic self.retry = retry self.notifications = [] def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, self._serializer) def get_notification_count(self): return len(self.notifications) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly anyjson.serialize(payload) msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) self.notifications.append(msg) def reset(self): del self.notifications[:] def stub_notifier(stubs): stubs.Set(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) stubs.Set(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=serializer)) def get_fake_notifier(service=None, host=None, publisher_id=None): if not publisher_id: publisher_id = "%s.%s" % (service, host) serializer = getattr(rpc.NOTIFIER, '_serializer', None) notifier = FakeNotifier(None, publisher_id=publisher_id, serializer=serializer) return notifier.prepare(publisher_id=publisher_id) cinder-8.0.0/cinder/tests/unit/test_infortrend_common.py0000664000567000056710000022706012701406250024650 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from cinder import exception from cinder import test from cinder.tests.unit import test_infortrend_cli from cinder.tests.unit import utils from cinder.volume import configuration from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli SUCCEED = (0, '') FAKE_ERROR_RETURN = (-1, '') class InfortrendTestCass(test.TestCase): def __init__(self, *args, **kwargs): super(InfortrendTestCass, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendTestCass, self).setUp() self.cli_data = test_infortrend_cli.InfortrendCLITestData() self.configuration = configuration.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.safe_get = self._fake_safe_get def _fake_safe_get(self, key): return getattr(self.configuration, key) def _driver_setup(self, mock_commands, configuration=None): if configuration is None: configuration = self.configuration self.driver = self._get_driver(configuration) mock_commands_execute = self._mock_command_execute(mock_commands) mock_cli = mock.Mock(side_effect=mock_commands_execute) self.driver._execute_command = mock_cli def _get_driver(self, conf): raise NotImplementedError def _mock_command_execute(self, mock_commands): def fake_execute_command(cli_type, *args, **kwargs): if cli_type in mock_commands.keys(): if isinstance(mock_commands[cli_type], list): ret = mock_commands[cli_type][0] del mock_commands[cli_type][0] return ret elif isinstance(mock_commands[cli_type], tuple): return mock_commands[cli_type] else: return mock_commands[cli_type](*args, **kwargs) return FAKE_ERROR_RETURN return fake_execute_command def _mock_show_lv_for_migrate(self, *args, **kwargs): if 'tier' in args: return self.cli_data.get_test_show_lv_tier_for_migration() return self.cli_data.get_test_show_lv() def _mock_show_lv(self, *args, **kwargs): if 'tier' in args: return self.cli_data.get_test_show_lv_tier() return self.cli_data.get_test_show_lv() def _assert_cli_has_calls(self, expect_cli_cmd): self.driver._execute_command.assert_has_calls(expect_cli_cmd) class InfortrendFCCommonTestCase(InfortrendTestCass): def __init__(self, *args, **kwargs): super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendFCCommonTestCase, self).setUp() self.configuration.volume_backend_name = 'infortrend_backend_1' self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] self.configuration.san_password = '111111' self.configuration.infortrend_provisioning = 'full' self.configuration.infortrend_tiering = '0' self.configuration.infortrend_pools_name = 'LV-1, LV-2' self.configuration.infortrend_slots_a_channels_id = '0,5' self.configuration.infortrend_slots_b_channels_id = '0,5' self.configuration.infortrend_cli_timeout = 30 def _get_driver(self, conf): return common_cli.InfortrendCommon('FC', configuration=conf) def test_normal_channel(self): test_map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands) self.driver._init_map_info(True) self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) def test_normal_channel_with_r_model(self): test_map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {'0': [], '5': []}, } test_target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {'0': '113', '5': '113'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands) self.driver._init_map_info(True) self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch(self.cli_data.test_fc_properties, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_specific_channel(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '5' mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch( self.cli_data.test_fc_properties_with_specific_channel, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_diff_target_id(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '5' mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_diff_target_id(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) expect_cli_cmd = [ mock.call('ShowChannel'), mock.call('ShowMap'), mock.call('ShowWWN'), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[0]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch( self.cli_data.test_fc_properties_with_specific_channel, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_multipath_with_r_model(self): test_volume = self.cli_data.test_volume test_connector = copy.deepcopy(self.cli_data.test_connector_fc) mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch( self.cli_data.test_fc_properties_multipath_r_model, properties) def test_initialize_connection_with_get_wwn_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] test_lookup_map = self.cli_data.fake_lookup_map mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowChannel'), mock.call('ShowMap'), mock.call('ShowWWN'), mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch( self.cli_data.test_fc_properties_zoning, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning_r_model(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] test_lookup_map = self.cli_data.fake_lookup_map_r_model mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowChannel'), mock.call('ShowMap'), mock.call('ShowWWN'), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch( self.cli_data.test_fc_properties_zoning_r_model, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning_r_model_diff_target_id(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] test_lookup_map = self.cli_data.fake_lookup_map_r_model mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model_diff_target_id(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowChannel'), mock.call('ShowMap'), mock.call('ShowWWN'), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch( self.cli_data.test_fc_properties_zoning_r_model, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), } self._driver_setup(mock_commands) self.driver.terminate_connection(test_volume, test_connector) expect_cli_cmd = [ mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('ShowMap'), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection_with_zoning(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] test_lookup_map = self.cli_data.fake_lookup_map mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), } self._driver_setup(mock_commands) self.driver.map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map conn_info = self.driver.terminate_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('ShowMap'), mock.call('ShowWWN'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch( self.cli_data.test_fc_terminate_conn_info, conn_info) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection_with_zoning_and_lun_map_exist(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(), } self._driver_setup(mock_commands) self.driver.map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } self.driver.target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {}, } self.driver.fc_lookup_service = mock.Mock() conn_info = self.driver.terminate_connection( test_volume, test_connector) expect_cli_cmd = [ mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('ShowMap'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertIsNone(conn_info) class InfortrendiSCSICommonTestCase(InfortrendTestCass): def __init__(self, *args, **kwargs): super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendiSCSICommonTestCase, self).setUp() self.configuration.volume_backend_name = 'infortrend_backend_1' self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] self.configuration.san_password = '111111' self.configuration.infortrend_provisioning = 'full' self.configuration.infortrend_tiering = '0' self.configuration.infortrend_pools_name = 'LV-1, LV-2' self.configuration.infortrend_slots_a_channels_id = '1,2,4' self.configuration.infortrend_slots_b_channels_id = '1,2,4' def _get_driver(self, conf): return common_cli.InfortrendCommon('iSCSI', configuration=conf) @mock.patch.object(common_cli.LOG, 'warning') def test_create_map_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'CreateMap': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('CreateMap') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_map_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (11, '') mock_commands = { 'DeleteMap': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteMap') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_create_iqn_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'CreateIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('CreateIQN') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_iqn_warning_return_code_has_map(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'DeleteIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteIQN') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_iqn_warning_return_code_no_such_name(self, log_warning): FAKE_RETURN_CODE = (11, '') mock_commands = { 'DeleteIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteIQN') self.assertEqual(1, log_warning.call_count) def test_normal_channel(self): test_map_dict = { 'slot_a': {'1': [], '2': [], '4': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0', '4': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands) self.driver._init_map_info() self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) def test_normal_channel_with_multipath(self): test_map_dict = { 'slot_a': {'1': [], '2': [], '4': []}, 'slot_b': {'1': [], '2': [], '4': []}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0', '4': '0'}, 'slot_b': {'1': '1', '2': '1', '4': '1'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands) self.driver._init_map_info(multipath=True) self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) def test_specific_channel(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '2, 4' test_map_dict = { 'slot_a': {'2': [], '4': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'2': '0', '4': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) def test_update_mcs_dict(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True test_mcs_dict = { 'slot_a': {'1': ['1', '2'], '2': ['4']}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictMatch(test_mcs_dict, self.driver.mcs_dict) def test_mapping_info_with_mcs(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True fake_mcs_dict = { 'slot_a': {'0': ['1', '2'], '2': ['4']}, 'slot_b': {}, } lun_list = list(range(0, 127)) fake_map_dict = { 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, 'slot_b': {}, } test_map_chl = { 'slot_a': ['1', '2'], } test_map_lun = ['2'] test_mcs_id = '0' self.driver = self._get_driver(configuration) self.driver.mcs_dict = fake_mcs_dict self.driver.map_dict = fake_map_dict map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs() self.assertDictMatch(test_map_chl, map_chl) self.assertEqual(test_map_lun, map_lun) self.assertEqual(test_mcs_id, mcs_id) def test_mapping_info_with_mcs_multi_group(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True fake_mcs_dict = { 'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']}, 'slot_b': {}, } lun_list = list(range(0, 127)) fake_map_dict = { 'slot_a': { '1': lun_list[2:], '2': lun_list[:], '3': lun_list[:], '4': lun_list[1:], '5': lun_list[:], }, 'slot_b': {}, } test_map_chl = { 'slot_a': ['3', '4'], } test_map_lun = ['1'] test_mcs_id = '1' self.driver = self._get_driver(configuration) self.driver.mcs_dict = fake_mcs_dict self.driver.map_dict = fake_map_dict map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs() self.assertDictMatch(test_map_chl, map_chl) self.assertEqual(test_map_lun, map_lun) self.assertEqual(test_mcs_id, mcs_id) def test_specific_channel_with_multipath(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '1,2' test_map_dict = { 'slot_a': {'1': [], '2': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info(multipath=True) self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) def test_specific_channel_with_multipath_r_model(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '1,2' configuration.infortrend_slots_b_channels_id = '1' test_map_dict = { 'slot_a': {'1': [], '2': []}, 'slot_b': {'1': []}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0'}, 'slot_b': {'1': '1'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info(multipath=True) self.assertDictMatch(test_map_dict, self.driver.map_dict) self.assertDictMatch(test_target_dict, self.driver.target_dict) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume(self, log_info): test_volume = self.cli_data.test_volume test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), self.cli_data.fake_partition_id[0]), } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) model_update = self.driver.create_volume(test_volume) self.assertDictMatch(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_volume_with_create_fail(self): test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': FAKE_ERROR_RETURN, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.create_volume, test_volume) @mock.patch.object(common_cli.LOG, 'info') def test_delete_volume(self, log_info): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_snapshot_id = self.cli_data.fake_snapshot_id test_pair_id = self.cli_data.fake_pair_id mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteReplica': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), 'DeleteSnapshot': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), mock.call('ShowReplica', '-l'), mock.call('DeleteReplica', test_pair_id[0], '-y'), mock.call('ShowSnapshot', 'part=%s' % test_partition_id), mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'), mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('DeletePartition', test_partition_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'warning', mock.Mock()) def test_delete_volume_with_sync_pair(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_sync_pair(), } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeDriverException, self.driver.delete_volume, test_volume) def test_delete_volume_with_delete_fail(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteReplica': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), 'DeleteSnapshot': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'DeleteMap': SUCCEED, 'DeletePartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.delete_volume, test_volume) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_volume_with_partiton_not_found(self, log_warning): test_volume = self.cli_data.test_volume mock_commands = { 'ShowPartition': self.cli_data.get_test_show_empty_list(), } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'info') def test_delete_volume_without_provider(self, log_info): test_system_id = self.cli_data.fake_system_id[0] test_volume = copy.deepcopy(self.cli_data.test_volume) test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( int(test_system_id, 16), 'None') test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteReplica': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), 'DeleteSnapshot': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) self.assertEqual(1, log_info.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_cloned_volume(self, log_info): fake_partition_id = self.cli_data.fake_partition_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume_id = test_dst_volume['id'].replace('-', '') test_src_volume = self.cli_data.test_volume test_dst_part_id = self.cli_data.fake_partition_id[1] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), self.cli_data.fake_partition_id[1]), } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( fake_partition_id, test_dst_part_id, test_dst_volume_id), 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_cloned_volume( test_dst_volume, test_src_volume) self.assertDictMatch(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_cloned_volume_with_create_replica_fail(self): test_dst_volume = self.cli_data.test_dst_volume test_src_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': FAKE_ERROR_RETURN, 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.create_cloned_volume, test_dst_volume, test_src_volume) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_export(self): test_volume = self.cli_data.test_volume test_model_update = { 'provider_location': test_volume['provider_location'], } self.driver = self._get_driver(self.configuration) model_update = self.driver.create_export(None, test_volume) self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_get_volume_stats(self): test_volume_states = self.cli_data.test_volume_states mock_commands = { 'ShowLicense': self.cli_data.get_test_show_license(), 'ShowLV': self.cli_data.get_test_show_lv(), 'ShowPartition': self.cli_data.get_test_show_partition_detail(), } self._driver_setup(mock_commands) self.driver.VERSION = '99.99' volume_states = self.driver.get_volume_stats(True) self.assertDictMatch(test_volume_states, volume_states) def test_get_volume_stats_fail(self): mock_commands = { 'ShowLicense': self.cli_data.get_test_show_license(), 'ShowLV': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.get_volume_stats) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_snapshot(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) model_update = self.driver.create_snapshot(self.cli_data.test_snapshot) self.assertEqual(fake_snapshot_id, model_update['provider_location']) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_snapshot_without_partition_id(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.create_snapshot, test_snapshot) def test_create_snapshot_with_create_fail(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': FAKE_ERROR_RETURN, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.create_snapshot, test_snapshot) def test_create_snapshot_with_show_fail(self): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': FAKE_ERROR_RETURN, 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.create_snapshot, test_snapshot) @mock.patch.object(common_cli.LOG, 'info') def test_delete_snapshot(self, log_info): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteSnapshot': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_snapshot(test_snapshot) self.assertEqual(1, log_info.call_count) def test_delete_snapshot_without_provider_location(self): test_snapshot = self.cli_data.test_snapshot self.driver = self._get_driver(self.configuration) self.driver._get_raid_snapshot_id = mock.Mock(return_value=None) self.assertRaises( exception.VolumeBackendAPIException, self.driver.delete_snapshot, test_snapshot) def test_delete_snapshot_with_fail(self): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteSnapshot': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.delete_snapshot, test_snapshot) @mock.patch.object(common_cli.LOG, 'warning', mock.Mock()) def test_delete_snapshot_with_sync_pair(self): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_si_sync_pair(), 'DeleteSnapshot': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeDriverException, self.driver.delete_snapshot, test_snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume_from_snapshot(self, log_info): test_snapshot = self.cli_data.test_snapshot test_snapshot_id = self.cli_data.fake_snapshot_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume_id = test_dst_volume['id'].replace('-', '') test_dst_part_id = self.cli_data.fake_partition_id[1] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), self.cli_data.fake_partition_id[1]), } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail_filled_block(), 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_snapshot_id, test_dst_part_id, test_dst_volume_id), 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_volume_from_snapshot( test_dst_volume, test_snapshot) self.assertDictMatch(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume_from_snapshot_without_filled_block(self, log_info): test_snapshot = self.cli_data.test_snapshot test_snapshot_id = self.cli_data.fake_snapshot_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume_id = test_dst_volume['id'].replace('-', '') test_dst_part_id = self.cli_data.fake_partition_id[1] test_src_part_id = self.cli_data.fake_partition_id[0] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), self.cli_data.fake_partition_id[1]), } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(), 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv, 'ShowReplica': [ self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_dst_volume_id), self.cli_data.get_test_show_replica_detail_for_migrate( test_snapshot_id, test_dst_part_id, test_dst_volume_id), ], 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_volume_from_snapshot( test_dst_volume, test_snapshot) self.assertDictMatch(test_model_update, model_update) self.assertEqual(1, log_info.call_count) def test_create_volume_from_snapshot_without_provider_location( self): test_snapshot = self.cli_data.test_snapshot test_dst_volume = self.cli_data.test_dst_volume self.driver = self._get_driver(self.configuration) self.driver._get_raid_snapshot_id = mock.Mock(return_value=None) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_dst_volume, test_snapshot) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_iqn_not_exist(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1]) test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False test_connector['initiator'] = test_initiator mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateIQN': SUCCEED, 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('CreateIQN', test_initiator, test_initiator[-16:]), mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_empty_map(self): test_volume = self.cli_data.test_volume test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_empty_list(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch( self.cli_data.test_iscsi_properties_empty_map, properties) def test_initialize_connection_with_create_map_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': FAKE_ERROR_RETURN, 'ShowNet': SUCCEED, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_with_get_ip_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_mcs(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictMatch(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_extend_volume(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_new_size = 10 test_expand_size = test_new_size - test_volume['size'] mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.extend_volume(test_volume, test_new_size) expect_cli_cmd = [ mock.call('SetPartition', 'expand', test_partition_id, 'size=%sGB' % test_expand_size), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_extend_volume_mb(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_new_size = 5.5 test_expand_size = round((test_new_size - test_volume['size']) * 1024) mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.extend_volume(test_volume, test_new_size) expect_cli_cmd = [ mock.call('SetPartition', 'expand', test_partition_id, 'size=%sMB' % test_expand_size), ] self._assert_cli_has_calls(expect_cli_cmd) def test_extend_volume_fail(self): test_volume = self.cli_data.test_volume test_new_size = 10 mock_commands = { 'SetPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.extend_volume, test_volume, test_new_size) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'DeleteMap': SUCCEED, 'DeleteIQN': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), } self._driver_setup(mock_commands) self.driver.terminate_connection(test_volume, test_connector) expect_cli_cmd = [ mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('DeleteIQN', test_connector['initiator'][-16:]), mock.call('ShowMap'), ] self._assert_cli_has_calls(expect_cli_cmd) def test_terminate_connection_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'DeleteMap': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.terminate_connection, test_volume, test_connector) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_migrate_volume(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'].replace('-', '') test_src_part_id = self.cli_data.fake_partition_id[0] test_dst_part_id = self.cli_data.fake_partition_id[2] test_pair_id = self.cli_data.fake_pair_id[0] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id), } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume_id, fake_pool['pool_id']), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_volume_id), 'DeleteReplica': SUCCEED, 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) rc, model_update = self.driver.migrate_volume(test_volume, test_host) expect_cli_cmd = [ mock.call('CreatePartition', fake_pool['pool_id'], test_volume['id'].replace('-', ''), 'size=%s' % (test_volume['size'] * 1024), ''), mock.call('ShowPartition'), mock.call('CreateReplica', 'Cinder-Migrate', 'part', test_src_part_id, 'part', test_dst_part_id, 'type=mirror'), mock.call('ShowReplica', '-l'), mock.call('DeleteReplica', test_pair_id, '-y'), mock.call('DeleteMap', 'part', test_src_part_id, '-y'), mock.call('DeletePartition', test_src_part_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertTrue(rc) self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'warning') def test_migrate_volume_with_invalid_storage(self, log_warning): fake_host = self.cli_data.fake_host test_volume = self.cli_data.test_volume mock_commands = { 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) rc, model_update = self.driver.migrate_volume(test_volume, fake_host) self.assertFalse(rc) self.assertTrue(model_update is None) self.assertEqual(1, log_warning.call_count) def test_migrate_volume_with_get_part_id_fail(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'DeleteMap': SUCCEED, 'CreateReplica': SUCCEED, 'CreateMap': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeDriverException, self.driver.migrate_volume, test_volume, test_host) def test_migrate_volume_with_create_replica_fail(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume['id'].replace('-', ''), fake_pool['pool_id']), 'DeleteMap': SUCCEED, 'CreateReplica': FAKE_ERROR_RETURN, 'CreateMap': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.migrate_volume, test_volume, test_host) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_migrate_volume_timeout(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'].replace('-', '') test_src_part_id = self.cli_data.fake_partition_id[0] test_dst_part_id = self.cli_data.fake_partition_id[2] configuration = copy.copy(self.configuration) configuration.infortrend_cli_timeout = 0 mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume_id, fake_pool['pool_id']), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_volume_id, 'Copy'), } self._driver_setup(mock_commands, configuration) self.assertRaises( exception.VolumeDriverException, self.driver.migrate_volume, test_volume, test_host) def test_manage_existing_get_size(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.fake_partition_id[2] test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'ShowMap': SUCCEED, } self._driver_setup(mock_commands) size = self.driver.manage_existing_get_size( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('ShowMap', 'part=%s' % test_partition_id), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, size) def test_manage_existing_get_size_with_import(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_import test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.fake_partition_id[2] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( test_ref_volume['source-name'], test_pool), 'ShowMap': SUCCEED, } self._driver_setup(mock_commands) size = self.driver.manage_existing_get_size( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('ShowMap', 'part=%s' % test_partition_id), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, size) def test_manage_existing_get_size_in_use(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'ShowMap': self.cli_data.get_test_show_map(), } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_no_source_id(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_dst_volume self.driver = self._get_driver(self.configuration) self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_show_part_fail(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume mock_commands = { 'ShowPartition': FAKE_ERROR_RETURN, 'ShowMap': SUCCEED, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_show_map_fail(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'ShowMap': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.manage_existing_get_size, test_volume, test_ref_volume) @mock.patch.object(common_cli.LOG, 'info') def test_manage_existing(self, log_info): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.fake_partition_id[2] test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_partition_id), } mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'SetPartition': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) model_update = self.driver.manage_existing( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('SetPartition', test_partition_id, 'name=%s' % test_volume['id'].replace('-', '')), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) self.assertDictMatch(test_model_update, model_update) def test_manage_existing_rename_fail(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'SetPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( exception.InfortrendCliException, self.driver.manage_existing, test_volume, test_ref_volume) def test_manage_existing_with_part_not_found(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail(), 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, test_ref_volume) @mock.patch.object(common_cli.LOG, 'info') def test_manage_existing_with_import(self, log_info): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_import test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.fake_partition_id[2] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_partition_id), } mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( test_ref_volume['source-name'], test_pool), 'SetPartition': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) model_update = self.driver.manage_existing( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('SetPartition', test_partition_id, 'name=%s' % test_volume['id'].replace('-', '')), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'info') def test_unmanage(self, log_info): test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'].replace('-', '') test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.unmanage(test_volume) expect_cli_cmd = [ mock.call( 'SetPartition', test_partition_id, 'name=cinder-unmanaged-%s' % test_volume_id[:-17]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info') def test_retype_without_change(self, log_info): test_volume = self.cli_data.test_volume test_new_type = self.cli_data.test_new_type test_diff = {'extra_specs': {}} test_host = self.cli_data.test_migrate_host_2 self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_retype_with_change_provision(self, log_warning): test_volume = self.cli_data.test_volume test_new_type = self.cli_data.test_new_type test_diff = self.cli_data.test_diff test_host = self.cli_data.test_migrate_host_2 self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertFalse(rc) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_retype_with_migrate(self): fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_host = copy.deepcopy(self.cli_data.test_migrate_host) test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'].replace('-', '') test_new_type = self.cli_data.test_new_type test_diff = self.cli_data.test_diff test_src_part_id = self.cli_data.fake_partition_id[0] test_dst_part_id = self.cli_data.fake_partition_id[2] test_pair_id = self.cli_data.fake_pair_id[0] test_model_update = { 'provider_location': 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id), } mock_commands = { 'ShowSnapshot': SUCCEED, 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume_id, fake_pool['pool_id']), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_volume_id), 'DeleteReplica': SUCCEED, 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) rc, model_update = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) min_size = int(test_volume['size'] * 1024 * 0.2) create_params = {'init': 'disable', 'min': '%sMB' % min_size} create_params = ' '.join('%s=%s' % (key, value) for key, value in create_params.items()) expect_cli_cmd = [ mock.call('ShowSnapshot', 'part=%s' % test_src_part_id), mock.call( 'CreatePartition', fake_pool['pool_id'], test_volume['id'].replace('-', ''), 'size=%s' % (test_volume['size'] * 1024), create_params, ), mock.call('ShowPartition'), mock.call( 'CreateReplica', 'Cinder-Migrate', 'part', test_src_part_id, 'part', test_dst_part_id, 'type=mirror' ), mock.call('ShowReplica', '-l'), mock.call('DeleteReplica', test_pair_id, '-y'), mock.call('DeleteMap', 'part', test_src_part_id, '-y'), mock.call('DeletePartition', test_src_part_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertTrue(rc) self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_update_migrated_volume(self): src_volume = self.cli_data.test_volume dst_volume = copy.deepcopy(self.cli_data.test_dst_volume) test_dst_part_id = self.cli_data.fake_partition_id[1] dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) test_model_update = { '_name_id': None, 'provider_location': dst_volume['provider_location'], } mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.update_migrated_volume( None, src_volume, dst_volume, 'available') expect_cli_cmd = [ mock.call('SetPartition', test_dst_part_id, 'name=%s' % src_volume['id'].replace('-', '')), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) def test_update_migrated_volume_rename_fail(self): src_volume = self.cli_data.test_volume dst_volume = self.cli_data.test_dst_volume dst_volume['_name_id'] = 'fake_name_id' test_dst_part_id = self.cli_data.fake_partition_id[1] dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) mock_commands = { 'SetPartition': FAKE_ERROR_RETURN } self._driver_setup(mock_commands) model_update = self.driver.update_migrated_volume( None, src_volume, dst_volume, 'available') self.assertEqual({'_name_id': 'fake_name_id'}, model_update) cinder-8.0.0/cinder/tests/unit/volume/0000775000567000056710000000000012701406543021022 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/__init__.py0000664000567000056710000000000012701406250023114 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/0000775000567000056710000000000012701406543022500 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/0000775000567000056710000000000012701406543023601 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_delete_volume.py0000664000567000056710000000341612701406250030042 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test case for the delete volume function.""" import mock from cinder import exception from cinder.tests.unit.volume.drivers import disco class DeleteVolumeTestCase(disco.TestDISCODriver): """Test cases to delete DISCO volumes.""" def setUp(self): """Initialise variables and mock functions.""" super(DeleteVolumeTestCase, self).setUp() # Mock volumeDelete function from suds client. mock.patch.object(self.requester, 'volumeDelete', self.perform_disco_request).start() self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] def perform_disco_request(self, *cmd, **kwargs): """Mock function to delete a volume.""" return self.response def test_delete_volume(self): """Delete a volume.""" self.driver.delete_volume(self.volume) def test_delete_volume_fail(self): """Make the API returns an error while deleting.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_delete_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py0000664000567000056710000001441312701406250031366 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test cases for create cloned volume.""" import copy import mock import six import time from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as utils from cinder.tests.unit.volume.drivers import disco class CreateCloneVolumeTestCase(disco.TestDISCODriver): """Test cases for DISCO connector.""" def setUp(self): """Initialise variables and mock functions.""" super(CreateCloneVolumeTestCase, self).setUp() self.dest_volume = fake_volume.fake_volume_obj(self.ctx) # Create mock functions for all the suds call done by the driver.""" mock.patch.object(self.requester, 'volumeClone', self.clone_request).start() mock.patch.object(self.requester, 'cloneDetail', self.clone_detail_request).start() mock.patch.object(self.requester, 'volumeDetailByName', self.volume_detail_request).start() self.volume_detail_response = { 'status': 0, 'volumeInfoResult': {'volumeId': 1234567} } clone_success = ( copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success'])) clone_pending = ( copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success'])) clone_fail = ( copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success'])) clone_response_fail = ( copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success'])) clone_success['result'] = ( six.text_type(self.DETAIL_OPTIONS['success'])) clone_pending['result'] = ( six.text_type(self.DETAIL_OPTIONS['pending'])) clone_fail['result'] = ( six.text_type(self.DETAIL_OPTIONS['failure'])) clone_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['clone_detail'] = { 'success': clone_success, 'fail': clone_fail, 'pending': clone_pending, 'request_fail': clone_response_fail } self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response['result'] = '1234' self.response_detail = ( self.FAKE_SOAP_RESPONSE['clone_detail']['success']) self.test_pending = False self.test_pending_count = 0 def clone_request(self, *cmd, **kwargs): """Mock function for the createVolumeFromSnapshot function.""" return self.response def clone_detail_request(self, *cmd, **kwargs): """Mock function for the restoreDetail function.""" if self.test_pending: if self.test_pending_count == 0: self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['clone_detail']['success'] else: return self.response_detail def volume_detail_request(self, *cmd, **kwargs): """Mock function for the volumeDetail function.""" return self.volume_detail_response def test_create_cloned_volume(self): """Normal case.""" expected = 1234567 actual = self.driver.create_cloned_volume(self.dest_volume, self.volume) self.assertEqual(expected, actual['provider_location']) def test_create_clone_volume_fail(self): """Clone volume request to DISCO fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume) def test_create_cloned_volume_fail_not_immediate(self): """Get clone detail returns that the clone fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['clone_detail']['fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume) def test_create_cloned_volume_fail_not_immediate_response_fail(self): """Get clone detail request to DISCO fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['clone_detail']['request_fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume) def test_create_cloned_volume_fail_not_immediate_request_fail(self): """Get clone detail returns the task is pending then complete.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_cloned_volume() @mock.patch.object(time, 'time') def test_create_cloned_volume_timeout(self, mock_time): """Clone request timeout.""" timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.clone_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['clone_detail']['pending']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume) def test_create_cloned_volume_volume_detail_fail(self): """Get volume detail request to DISCO fails.""" self.volume_detail_response['status'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_extend_volume.py0000664000567000056710000000345212701406250030067 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test cases for the extend volume feature.""" import mock from cinder import exception from cinder.tests.unit.volume.drivers import disco class VolumeExtendTestCase(disco.TestDISCODriver): """Test cases for DISCO connector.""" def setUp(self): """Initialise variables and mock functions.""" super(VolumeExtendTestCase, self).setUp() # Mock function to extend a volume. mock.patch.object(self.requester, 'volumeExtend', self.perform_disco_request).start() self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.new_size = 5 def perform_disco_request(self, *cmd, **kwargs): """Mock volumExtend function from suds client.""" return self.response def test_extend_volume(self): """Extend a volume, normal case.""" self.driver.extend_volume(self.volume, self.new_size) def test_extend_volume_fail(self): """Request to DISCO failed.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_extend_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/__init__.py0000664000567000056710000001146512701406250025714 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Industrial Technology Research Institute. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Parent class for the DISCO driver unit test.""" import mock from suds import client from os_brick.initiator import connector from cinder import context from cinder import test from cinder.tests.unit import fake_volume from cinder.volume import configuration as conf import cinder.volume.drivers.disco.disco as driver class TestDISCODriver(test.TestCase): """Generic class for the DISCO test case.""" DETAIL_OPTIONS = { 'success': 1, 'pending': 2, 'failure': 3 } ERROR_STATUS = 1 def setUp(self): """Initialise variable common to all the test cases.""" super(TestDISCODriver, self).setUp() mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.disco_client = '127.0.0.1' self.cfg.disco_client_port = '9898' self.cfg.disco_wsdl_path = 'somewhere' self.cfg.volume_name_prefix = 'openstack-' self.cfg.num_volume_device_scan_tries = 1 self.cfg.snapshot_check_timeout = 3600 self.cfg.restore_check_timeout = 3600 self.cfg.clone_check_timeout = 3600 self.cfg.snapshot_reserve_days = -1 self.cfg.retry_interval = 1 self.FAKE_SOAP_RESPONSE = { 'standard': { 'success': {'status': 0, 'result': 'a normal message'}, 'fail': {'status': 1, 'result': 'an error message'}} } mock.patch.object(client, 'Client', self.create_client).start() mock.patch.object(connector.InitiatorConnector, 'factory', self.get_mock_connector).start() mock.patch.object(driver.DiscoDriver, '_get_connector_identifier', self.get_mock_attribute).start() self.driver = driver.DiscoDriver(execute=mock_exec, configuration=self.cfg) self.driver.do_setup(None) self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj(self.ctx) self.volume['volume_id'] = '1234567' self.requester = self.driver.client.service def create_client(self, *cmd, **kwargs): """Mock the suds client.""" return FakeClient() def get_mock_connector(self, *cmd, **kwargs): """Mock the os_brick connector.""" return None def get_mock_attribute(self, *cmd, **kwargs): """Mock the os_brick connector.""" return 'DISCO' class FakeClient(object): """Fake class to mock suds.Client.""" def __init__(self, *args, **kwargs): """Create a fake service attribute.""" self.service = FakeMethod() class FakeMethod(object): """Fake class recensing some of the method of the suds client.""" def __init__(self, *args, **kwargs): """Fake class to mock the suds client.""" def volumeCreate(self, *args, **kwargs): """"Mock function to create a volume.""" def volumeDelete(self, *args, **kwargs): """"Mock function to delete a volume.""" def snapshotCreate(self, *args, **kwargs): """"Mock function to create a snapshot.""" def snapshotDetail(self, *args, **kwargs): """"Mock function to get the snapshot detail.""" def snapshotDelete(self, *args, **kwargs): """"Mock function to delete snapshot.""" def restoreFromSnapshot(self, *args, **kwargs): """"Mock function to create a volume from a snasphot.""" def restoreDetail(self, *args, **kwargs): """"Mock function to detail the restore operation.""" def volumeDetailByName(self, *args, **kwargs): """"Mock function to get the volume detail from its name.""" def volumeClone(self, *args, **kwargs): """"Mock function to clone a volume.""" def cloneDetail(self, *args, **kwargs): """Mock function to get the clone detail.""" def volumeExtend(self, *args, **kwargs): """Mock function to extend a volume.""" def systemInformationList(self, *args, **kwargs): """Mock function to get the backend properties.""" cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_create_volume_from_snapshot.py0000664000567000056710000001534412701406250033010 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test case for create volume from snapshot.""" import copy import mock import time from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import utils as utils from cinder.tests.unit.volume.drivers import disco class CreateVolumeFromSnapshotTestCase(disco.TestDISCODriver): """Test cases for the create volume from snapshot of DISCO connector.""" def setUp(self): """Initialise variables and mock functions.""" super(CreateVolumeFromSnapshotTestCase, self).setUp() self.snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume}) # Mock restoreFromSnapshot, restoreDetail # and volume detail since they are in the function path mock.patch.object(self.requester, 'restoreFromSnapshot', self.restore_request).start() mock.patch.object(self.requester, 'restoreDetail', self.restore_detail_request).start() mock.patch.object(self.requester, 'volumeDetailByName', self.volume_detail_request).start() restore_detail_response = { 'status': 0, 'restoreInfoResult': {'restoreId': 1234, 'startTime': '', 'statusPercent': '', 'volumeName': 'aVolumeName', 'snapshotId': 1234, 'status': 0} } self.volume_detail_response = { 'status': 0, 'volumeInfoResult': {'volumeId': 1234567} } rest_success = copy.deepcopy(restore_detail_response) rest_pending = copy.deepcopy(restore_detail_response) rest_fail = copy.deepcopy(restore_detail_response) rest_response_fail = copy.deepcopy(restore_detail_response) rest_success['restoreInfoResult']['status'] = ( self.DETAIL_OPTIONS['success']) rest_pending['restoreInfoResult']['status'] = ( self.DETAIL_OPTIONS['pending']) rest_fail['restoreInfoResult']['status'] = ( self.DETAIL_OPTIONS['failure']) rest_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['restore_detail'] = { 'success': rest_success, 'fail': rest_fail, 'pending': rest_pending, 'request_fail': rest_response_fail } self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response['result'] = '1234' self.response_detail = ( self.FAKE_SOAP_RESPONSE['restore_detail']['success']) self.test_pending = False self.test_pending_count = 0 def restore_request(self, *cmd, **kwargs): """Mock function for the createVolumeFromSnapshot function.""" return self.response def restore_detail_request(self, *cmd, **kwargs): """Mock function for the restoreDetail function.""" if self.test_pending: if self.test_pending_count == 0: self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['restore_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['restore_detail']['success'] else: return self.response_detail def volume_detail_request(self, *cmd, **kwargs): """Mock function for the volumeDetail function.""" return self.volume_detail_response def test_create_volume_from_snapshot(self): """Normal case.""" expected = 1234567 actual = self.driver.create_volume_from_snapshot(self.volume, self.snapshot) self.assertEqual(expected, actual['provider_location']) def test_create_volume_from_snapshot_fail(self): """Create volume from snapshot request fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume_from_snapshot) def test_create_volume_from_snapshot_fail_not_immediate(self): """Get restore details request fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['restore_detail']['fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume_from_snapshot) def test_create_volume_from_snapshot_fail_detail_response_fail(self): """Get restore details reports that restore operation fails.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['restore_detail']['request_fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume_from_snapshot) def test_create_volume_from_snapshot_fail_not_immediate_resp_fail(self): """Get restore details reports that the task is pending, then done.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_volume_from_snapshot() @mock.patch.object(time, 'time') def test_create_volume_from_snapshot_timeout(self, mock_time): """Create volume from snapshot task timeout.""" timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.restore_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['restore_detail']['pending']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume_from_snapshot) def test_create_volume_from_snapshot_volume_detail_fail(self): """Cannot get the newly created volume information.""" self.volume_detail_response['status'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume_from_snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_create_snapshot.py0000664000567000056710000001357712701406250030404 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test case for the function create snapshot.""" import copy import mock import time from cinder import db from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import utils as utils from cinder.tests.unit.volume.drivers import disco class CreateSnapshotTestCase(disco.TestDISCODriver): """Test cases for DISCO connector.""" def get_fake_volume(self, ctx, id): """Return fake volume from db calls.""" return self.volume def setUp(self): """Initialise variables and mock functions.""" super(CreateSnapshotTestCase, self).setUp() self.snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume}) # Mock db call in the cinder driver self.mock_object(db.sqlalchemy.api, 'volume_get', self.get_fake_volume) mock.patch.object(self.requester, 'snapshotCreate', self.snapshot_request).start() mock.patch.object(self.requester, 'snapshotDetail', self.snapshot_detail_request).start() snapshot_detail_response = { 'status': 0, 'snapshotInfoResult': {'snapshotId': 1234, 'description': 'a description', 'createTime': '', 'expireTime': '', 'isDeleted': False, 'status': 0} } snap_success = copy.deepcopy(snapshot_detail_response) snap_pending = copy.deepcopy(snapshot_detail_response) snap_fail = copy.deepcopy(snapshot_detail_response) snap_response_fail = copy.deepcopy(snapshot_detail_response) snap_success['snapshotInfoResult']['status'] = ( self.DETAIL_OPTIONS['success']) snap_pending['snapshotInfoResult']['status'] = ( self.DETAIL_OPTIONS['pending']) snap_fail['snapshotInfoResult']['status'] = ( self.DETAIL_OPTIONS['failure']) snap_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['snapshot_detail'] = { 'success': snap_success, 'fail': snap_fail, 'pending': snap_pending, 'request_fail': snap_response_fail} self.response = ( self.FAKE_SOAP_RESPONSE['standard']['success']) self.response['result'] = 1234 self.response_detail = ( self.FAKE_SOAP_RESPONSE['snapshot_detail']['success']) self.test_pending = False self.test_pending_count = 0 def snapshot_request(self, *cmd, **kwargs): """Mock function for the createSnapshot call.""" return self.response def snapshot_detail_request(self, *cmd, **kwargs): """Mock function for the snapshotDetail call.""" if self.test_pending: if self.test_pending_count == 0: self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['snapshot_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['snapshot_detail']['success'] else: return self.response_detail def test_create_snapshot(self): """Normal test case.""" expected = 1234 actual = self.driver.create_snapshot(self.volume) self.assertEqual(expected, actual['provider_location']) def test_create_snapshot_fail(self): """Request to DISCO failed.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_snapshot) def test_create_snapshot_fail_not_immediate(self): """Request to DISCO failed when monitoring the snapshot details.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['snapshot_detail']['fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_snapshot) def test_create_snapshot_fail_not_immediate_response_fail(self): """Request to get the snapshot details returns a failure.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['snapshot_detail']['request_fail']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_snapshot) def test_create_snapshot_detail_pending(self): """Request to get the snapshot detail return pending then success.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_snapshot() @mock.patch.object(time, 'time') def test_create_snapshot_timeout(self, mock_time): """Snapshot request timeout.""" timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.snapshot_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = ( self.FAKE_SOAP_RESPONSE['snapshot_detail']['pending']) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_delete_snapshot.py0000664000567000056710000000367312701406250030377 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test case for the delete snapshot function.""" import mock from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit.volume.drivers import disco class DeleteSnapshotTestCase(disco.TestDISCODriver): """Test cases to delete DISCO volumes.""" def setUp(self): """Initialise variables and mock functions.""" super(DeleteSnapshotTestCase, self).setUp() # Mock snapshotDelete function from suds client. mock.patch.object(self.requester, 'snapshotDelete', self.perform_disco_request).start() self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume}) def perform_disco_request(self, *cmd, **kwargs): """Mock function to delete a snapshot.""" return self.response def test_delete_snapshot(self): """Delete a snapshot.""" self.driver.delete_snapshot(self.snapshot) def test_delete_snapshot_fail(self): """Make the API returns an error while deleting.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_delete_snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/disco/test_create_volume.py0000664000567000056710000000357312701406250030047 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Industrial Technology Research Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test case for the create volume function.""" import mock from cinder import exception from cinder.tests.unit.volume.drivers import disco class CreateVolumeTestCase(disco.TestDISCODriver): """Test cases for DISCO connector.""" def setUp(self): """Prepare variables and mock functions.""" super(CreateVolumeTestCase, self).setUp() # Mock the suds cliebt. mock.patch.object(self.requester, 'volumeCreate', self.perform_disco_request).start() self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] def perform_disco_request(self, *cmd, **kwargs): """Mock function for the suds client.""" return self.response def test_create_volume(self): """Normal case.""" expected = '1234567' self.response['result'] = expected ret = self.driver.create_volume(self.volume) actual = ret['provider_location'] self.assertEqual(expected, actual) def test_create_volume_fail(self): """Request to DISCO failed.""" self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/test_fujitsu.py0000664000567000056710000007704712701406250025614 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import tempfile from oslo_utils import units from cinder import exception from cinder import test from cinder.volume import configuration as conf with mock.patch.dict('sys.modules', pywbem=mock.Mock()): from cinder.volume.drivers.fujitsu import eternus_dx_common as dx_common from cinder.volume.drivers.fujitsu import eternus_dx_fc as dx_fc from cinder.volume.drivers.fujitsu import eternus_dx_iscsi as dx_iscsi CONFIG_FILE_NAME = 'cinder_fujitsu_eternus_dx.xml' STORAGE_SYSTEM = '172.16.0.2' CONF = """ 172.16.0.2 5988 testuser testpass 10.0.0.3 abcd1234_TPP abcd1234_OSVD """ TEST_VOLUME = { 'id': '3d6eeb5d-109b-4435-b891-d01415178490', 'name': 'volume1', 'display_name': 'volume1', 'provider_location': None, 'volume_metadata': [], 'size': 1, } TEST_SNAP = { 'id': 'f47a8da3-d9e2-46aa-831f-0ef04158d5a1', 'volume_name': 'volume-3d6eeb5d-109b-4435-b891-d01415178490', 'name': 'snap1', 'display_name': 'test_snapshot', 'volume': TEST_VOLUME, 'volume_id': '3d6eeb5d-109b-4435-b891-d01415178490', } TEST_CLONE = { 'name': 'clone1', 'size': 1, 'volume_name': 'vol1', 'id': '391fb914-8a55-4384-a747-588641db3b15', 'project_id': 'project', 'display_name': 'clone1', 'display_description': 'volume created from snapshot', 'volume_metadata': [], } ISCSI_INITIATOR = 'iqn.1993-08.org.debian:01:8261afe17e4c' ISCSI_TARGET_IP = '10.0.0.3' ISCSI_TARGET_IQN = 'iqn.2000-09.com.fujitsu:storage-system.eternus-dxl:0' FC_TARGET_WWN = ['500000E0DA000001', '500000E0DA000002'] TEST_WWPN = ['0123456789111111', '0123456789222222'] TEST_CONNECTOR = {'initiator': ISCSI_INITIATOR, 'wwpns': TEST_WWPN} STOR_CONF_SVC = 'FUJITSU_StorageConfigurationService' CTRL_CONF_SVC = 'FUJITSU_ControllerConfigurationService' REPL_SVC = 'FUJITSU_ReplicationService' STOR_VOL = 'FUJITSU_StorageVolume' SCSI_PROT_CTR = 'FUJITSU_AffinityGroupController' STOR_HWID = 'FUJITSU_StorageHardwareID' STOR_HWID_MNG_SVC = 'FUJITSU_StorageHardwareIDManagementService' STOR_POOL = 'FUJITSU_RAIDStoragePool' STOR_POOLS = ['FUJITSU_ThinProvisioningPool', 'FUJITSU_RAIDStoragePool'] AUTH_PRIV = 'FUJITSU_AuthorizedPrivilege' STOR_SYNC = 'FUJITSU_StorageSynchronized' PROT_CTRL_UNIT = 'CIM_ProtocolControllerForUnit' STORAGE_TYPE = 'abcd1234_TPP' LUNMASKCTRL_IDS = ['AFG0010_CM00CA00P00', 'AFG0011_CM01CA00P00'] MAP_STAT = '0' VOL_STAT = '0' FAKE_CAPACITY = 1170368102400 FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000' FAKE_LUN_NO1 = '0x0014' FAKE_LUN_ID2 = '600000E00D2A0000002A0115001E0000' FAKE_LUN_NO2 = '0x001E' FAKE_SYSTEM_NAME = 'ET603SA4621302115' FAKE_STATS = { 'vendor_name': 'FUJITSU', 'total_capacity_gb': FAKE_CAPACITY / units.Gi, 'free_capacity_gb': FAKE_CAPACITY / units.Gi, } FAKE_KEYBIND1 = { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID1, 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', } FAKE_LOCATION1 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND1, } FAKE_LUN_META1 = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, 'FJ_Volume_Name': u'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } FAKE_MODEL_INFO1 = { 'provider_location': six.text_type(FAKE_LOCATION1), 'metadata': FAKE_LUN_META1, } FAKE_KEYBIND2 = { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID2, 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', } FAKE_LOCATION2 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND2, } FAKE_SNAP_INFO = {'provider_location': six.text_type(FAKE_LOCATION2)} FAKE_LUN_META2 = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, 'FJ_Volume_Name': u'FJosv_UkCZqMFZW3SU_JzxjHiKfg==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } FAKE_MODEL_INFO2 = { 'provider_location': six.text_type(FAKE_LOCATION1), 'metadata': FAKE_LUN_META2, } class FJ_StorageVolume(dict): pass class FJ_StoragePool(dict): pass class FJ_AffinityGroupController(dict): pass class FakeCIMInstanceName(dict): def fake_create_eternus_instance_name(self, classname, bindings): instancename = FakeCIMInstanceName() for key in bindings: instancename[key] = bindings[key] instancename.classname = classname instancename.namespace = 'root/eternus' return instancename class FakeEternusConnection(object): def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, ElementType=None, TheElement=None, LUNames=None, Size=None, Type=None, Mode=None, Locality=None, InitiatorPortIDs=None, TargetPortIDs=None, DeviceAccesses=None, SyncType=None, SourceElement=None, TargetElement=None, Operation=None, CopyType=None, Synchronization=None, ProtocolControllers=None, TargetPool=None): global MAP_STAT, VOL_STAT if MethodName == 'CreateOrModifyElementFromStoragePool': VOL_STAT = '1' rc = 0 vol = self._enum_volumes() job = {'TheElement': vol[0].path} elif MethodName == 'ReturnToStoragePool': VOL_STAT = '0' rc = 0 job = {} elif MethodName == 'GetReplicationRelationships': rc = 0 job = {'Synchronizations': []} elif MethodName == 'ExposePaths': MAP_STAT = '1' rc = 0 job = {} elif MethodName == 'HidePaths': MAP_STAT = '0' rc = 0 job = {} elif MethodName == 'CreateElementReplica': rc = 0 snap = self._enum_snapshots() job = {'TargetElement': snap[0].path} elif MethodName == 'CreateReplica': rc = 0 snap = self._enum_snapshots() job = {'TargetElement': snap[0].path} elif MethodName == 'ModifyReplicaSynchronization': rc = 0 job = {} else: raise exception.VolumeBackendAPIException(data="invoke method") return (rc, job) def EnumerateInstanceNames(self, name): result = [] if name == 'FUJITSU_StorageVolume': result = self._enum_volumes() elif name == 'FUJITSU_StorageConfigurationService': result = self._enum_confservice() elif name == 'FUJITSU_ReplicationService': result = self._enum_repservice() elif name == 'FUJITSU_ControllerConfigurationService': result = self._enum_ctrlservice() elif name == 'FUJITSU_AffinityGroupController': result = self._enum_afntyservice() elif name == 'FUJITSU_StorageHardwareIDManagementService': result = self._enum_sthwidmngsvc() elif name == 'CIM_ProtocolControllerForUnit': result = self._ref_unitnames() elif name == 'CIM_StoragePool': result = self._enum_pools() elif name == 'FUJITSU_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_IPProtocolEndpoint': result = self._enum_ipproto_endpoint() return result def EnumerateInstances(self, name): result = None if name == 'FUJITSU_StorageProduct': result = self._enum_sysnames() elif name == 'FUJITSU_RAIDStoragePool': result = self._enum_pool_details('RAID') elif name == 'FUJITSU_ThinProvisioningPool': result = self._enum_pool_details('TPP') elif name == 'FUJITSU_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_iSCSIProtocolEndpoint': result = self._enum_iscsiprot_endpoint() elif name == 'FUJITSU_StorageHardwareID': result = self._enum_sthwid() elif name == 'CIM_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_StorageHardwareID': result = None else: result = None return result def GetInstance(self, objectpath, LocalOnly=False): try: name = objectpath['CreationClassName'] except KeyError: name = objectpath.classname result = None if name == 'FUJITSU_StorageVolume': result = self._getinstance_storagevolume(objectpath) elif name == 'FUJITSU_IPProtocolEndpoint': result = self._getinstance_ipprotocolendpoint(objectpath) elif name == 'CIM_ProtocolControllerForUnit': result = self._getinstance_unit(objectpath) elif name == 'FUJITSU_AffinityGroupController': result = self._getinstance_unit(objectpath) return result def Associators(self, objectpath, AssocClass=None, ResultClass='FUJITSU_StorageHardwareID'): result = None if ResultClass == 'FUJITSU_StorageHardwareID': result = self._assoc_hdwid() elif ResultClass == 'FUJITSU_iSCSIProtocolEndpoint': result = self._assoc_endpoint(objectpath) elif ResultClass == 'FUJITSU_StorageVolume': result = self._assoc_storagevolume(objectpath) elif ResultClass == 'FUJITSU_AuthorizedPrivilege': result = self._assoc_authpriv() else: result = self._default_assoc(objectpath) return result def AssociatorNames(self, objectpath, AssocClass=None, ResultClass=SCSI_PROT_CTR): result = None if ResultClass == SCSI_PROT_CTR: result = self._assocnames_lunmaskctrl() elif ResultClass == 'FUJITSU_TCPProtocolEndpoint': result = self._assocnames_tcp_endpoint() elif ResultClass == 'FUJITSU_AffinityGroupController': result = self._assocnames_afngroup() else: result = self._default_assocnames(objectpath) return result def ReferenceNames(self, objectpath, ResultClass='CIM_ProtocolControllerForUnit'): result = [] if ResultClass == 'CIM_ProtocolControllerForUnit': if MAP_STAT == '1': result = self._ref_unitnames() else: result = [] elif ResultClass == 'FUJITSU_StorageSynchronized': result = self._ref_storage_sync() else: result = self._default_ref(objectpath) return result def _ref_unitnames(self): unitnames = [] unitname = FJ_AffinityGroupController() dependent = {} dependent['CreationClassName'] = STOR_VOL dependent['DeviceID'] = FAKE_LUN_ID1 dependent['SystemName'] = STORAGE_SYSTEM antecedent = {} antecedent['CreationClassName'] = SCSI_PROT_CTR antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] antecedent['SystemName'] = STORAGE_SYSTEM unitname['Dependent'] = dependent unitname['Antecedent'] = antecedent unitname['CreationClassName'] = PROT_CTRL_UNIT unitname.path = unitname unitnames.append(unitname) unitname2 = FJ_AffinityGroupController() dependent2 = {} dependent2['CreationClassName'] = STOR_VOL dependent2['DeviceID'] = FAKE_LUN_ID1 dependent2['SystemName'] = STORAGE_SYSTEM antecedent2 = {} antecedent2['CreationClassName'] = SCSI_PROT_CTR antecedent2['DeviceID'] = LUNMASKCTRL_IDS[1] antecedent2['SystemName'] = STORAGE_SYSTEM unitname2['Dependent'] = dependent2 unitname2['Antecedent'] = antecedent2 unitname2['CreationClassName'] = PROT_CTRL_UNIT unitname2.path = unitname2 unitnames.append(unitname2) return unitnames def _ref_storage_sync(self): syncnames = [] return syncnames def _default_ref(self, objectpath): return objectpath def _default_assoc(self, objectpath): return objectpath def _assocnames_lunmaskctrl(self): return self._enum_lunmaskctrls() def _assocnames_tcp_endpoint(self): return self._enum_tcp_endpoint() def _assocnames_afngroup(self): return self._enum_afntyservice() def _default_assocnames(self, objectpath): return objectpath def _assoc_authpriv(self): authprivs = [] iscsi = {} iscsi['InstanceID'] = ISCSI_INITIATOR authprivs.append(iscsi) fc = {} fc['InstanceID'] = TEST_WWPN[0] authprivs.append(fc) fc1 = {} fc1['InstanceID'] = TEST_WWPN[1] authprivs.append(fc1) return authprivs def _assoc_endpoint(self, objectpath): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' 'eternus-dxl:0123456789,t,0x0009') targetlist.append(tgtport1) return targetlist def _getinstance_unit(self, objectpath): unit = FJ_AffinityGroupController() unit.path = None if MAP_STAT == '0': return unit dependent = {} dependent['CreationClassName'] = STOR_VOL dependent['DeviceID'] = FAKE_LUN_ID1 dependent['ElementName'] = TEST_VOLUME['name'] dependent['SystemName'] = STORAGE_SYSTEM antecedent = {} antecedent['CreationClassName'] = SCSI_PROT_CTR antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] antecedent['SystemName'] = STORAGE_SYSTEM unit['Dependent'] = dependent unit['Antecedent'] = antecedent unit['CreationClassName'] = PROT_CTRL_UNIT unit['DeviceNumber'] = '0' unit.path = unit return unit def _enum_sysnames(self): sysnamelist = [] sysname = {} sysname['IdentifyingNumber'] = FAKE_SYSTEM_NAME sysnamelist.append(sysname) return sysnamelist def _enum_confservice(self): services = [] service = {} service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_StorageConfigurationService' services.append(service) return services def _enum_ctrlservice(self): services = [] service = {} service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_ControllerConfigurationService' services.append(service) return services def _enum_afntyservice(self): services = [] service = {} service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_AffinityGroupController' services.append(service) return services def _enum_repservice(self): services = [] service = {} service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_ReplicationService' services.append(service) return services def _enum_pools(self): pools = [] pool = {} pool['InstanceID'] = 'FUJITSU:RSP0004' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pools.append(pool) pool2 = {} pool2['InstanceID'] = 'FUJITSU:TPP0004' pool2['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pools.append(pool2) return pools def _enum_pool_details(self, pooltype): pools = [] pool = FJ_StoragePool() if pooltype == 'RAID': pool['InstanceID'] = 'FUJITSU:RSP0004' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool['ElementName'] = 'abcd1234_OSVD' pool['TotalManagedSpace'] = 1170368102400 pool['RemainingManagedSpace'] = 1170368102400 pool.path = pool pool.path.classname = 'FUJITSU_RAIDStoragePool' else: pool = FJ_StoragePool() pool['InstanceID'] = 'FUJITSU:TPP0004' pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pool['ElementName'] = 'abcd1234_TPP' pool['TotalManagedSpace'] = 1170368102400 pool['RemainingManagedSpace'] = 1170368102400 pool.path = pool pool.path.classname = 'FUJITSU_ThinProvisioningPool' pools.append(pool) return pools def _enum_volumes(self): volumes = [] if VOL_STAT == '0': return volumes volume = FJ_StorageVolume() volume['name'] = TEST_VOLUME['name'] volume['CreationClassName'] = 'FUJITSU_StorageVolume' volume['Name'] = FAKE_LUN_ID1 volume['DeviceID'] = FAKE_LUN_ID1 volume['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' volume['SystemName'] = STORAGE_SYSTEM volume['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' volume['volume_type_id'] = None volume.path = volume volume.path.classname = volume['CreationClassName'] name = {} name['classname'] = 'FUJITSU_StorageVolume' keys = {} keys['CreationClassName'] = 'FUJITSU_StorageVolume' keys['SystemName'] = STORAGE_SYSTEM keys['DeviceID'] = volume['DeviceID'] keys['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' name['keybindings'] = keys volume['provider_location'] = str(name) volumes.append(volume) snap_vol = FJ_StorageVolume() snap_vol['name'] = TEST_SNAP['name'] snap_vol['CreationClassName'] = 'FUJITSU_StorageVolume' snap_vol['Name'] = FAKE_LUN_ID2 snap_vol['DeviceID'] = FAKE_LUN_ID2 snap_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' snap_vol['SystemName'] = STORAGE_SYSTEM snap_vol['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' snap_vol.path = snap_vol snap_vol.path.classname = snap_vol['CreationClassName'] name2 = {} name2['classname'] = 'FUJITSU_StorageVolume' keys2 = {} keys2['CreationClassName'] = 'FUJITSU_StorageVolume' keys2['SystemName'] = STORAGE_SYSTEM keys2['DeviceID'] = snap_vol['DeviceID'] keys2['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' name2['keybindings'] = keys2 snap_vol['provider_location'] = str(name2) volumes.append(snap_vol) clone_vol = FJ_StorageVolume() clone_vol['name'] = TEST_CLONE['name'] clone_vol['CreationClassName'] = 'FUJITSU_StorageVolume' clone_vol['ElementName'] = TEST_CLONE['name'] clone_vol['DeviceID'] = FAKE_LUN_ID2 clone_vol['SystemName'] = STORAGE_SYSTEM clone_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' clone_vol.path = clone_vol clone_vol.path.classname = clone_vol['CreationClassName'] volumes.append(clone_vol) return volumes def _enum_snapshots(self): snapshots = [] snap = FJ_StorageVolume() snap['CreationClassName'] = 'FUJITSU_StorageVolume' snap['SystemName'] = STORAGE_SYSTEM snap['DeviceID'] = FAKE_LUN_ID2 snap['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' snap.path = snap snap.path.classname = snap['CreationClassName'] snapshots.append(snap) return snapshots def _enum_lunmaskctrls(self): ctrls = [] ctrl = {} ctrl2 = {} if MAP_STAT == '1': ctrl['CreationClassName'] = SCSI_PROT_CTR ctrl['SystemName'] = STORAGE_SYSTEM ctrl['DeviceID'] = LUNMASKCTRL_IDS[0] ctrls.append(ctrl) ctrl2['CreationClassName'] = SCSI_PROT_CTR ctrl2['SystemName'] = STORAGE_SYSTEM ctrl2['DeviceID'] = LUNMASKCTRL_IDS[1] ctrls.append(ctrl2) return ctrls def _enum_scsiport_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['Name'] = '1234567890000021' tgtport1['CreationClassName'] = 'FUJITSU_SCSIProtocolEndpoint' tgtport1['ConnectionType'] = 2 tgtport1['RAMode'] = 0 targetlist.append(tgtport1) return targetlist def _enum_ipproto_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' tgtport1['NAME'] = 'IP_CM01CA00P00_00' targetlist.append(tgtport1) return targetlist def _enum_tcp_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_TCPProtocolEndpoint' tgtport1['NAME'] = 'TCP_CM01CA00P00_00' targetlist.append(tgtport1) return targetlist def _enum_iscsiprot_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' 'eternus-dxl:0123456789,t,0x0009') tgtport1['ConnectionType'] = 7 tgtport1['RAMode'] = 0 targetlist.append(tgtport1) return targetlist def _getinstance_storagevolume(self, objpath): foundinstance = None instance = FJ_StorageVolume() volumes = self._enum_volumes() for volume in volumes: if volume['DeviceID'] == objpath['DeviceID']: instance = volume break if not instance: foundinstance = None else: foundinstance = instance return foundinstance def _getinstance_ipprotocolendpoint(self, objpath): instance = {} instance['IPv4Address'] = '10.0.0.3' return instance class FJFCDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJFCDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJFCDriverTestCase, self).setUp() # Make fake xml-configuration file. self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(CONF) self.config_file.flush() # Make fake Object by using mock as configuration object. self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.cinder_eternus_config_file = self.config_file.name self.stubs.Set(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) instancename = FakeCIMInstanceName() self.stubs.Set(dx_common.FJDXCommon, '_create_eternus_instance_name', instancename.fake_create_eternus_instance_name) # Set iscsi driver to self.driver. driver = dx_fc.FJDXFCDriver(configuration=self.configuration) self.driver = driver def fake_eternus_connection(self): conn = FakeEternusConnection() return conn def test_get_volume_stats(self): ret = self.driver.get_volume_stats(True) stats = {'vendor_name': ret['vendor_name'], 'total_capacity_gb': ret['total_capacity_gb'], 'free_capacity_gb': ret['free_capacity_gb']} self.assertEqual(FAKE_STATS, stats) def test_create_and_delete_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) self.driver.delete_volume(TEST_VOLUME) @mock.patch.object(dx_common.FJDXCommon, '_get_mapdata') def test_map_unmap(self, mock_mapdata): fake_data = {'target_wwn': FC_TARGET_WWN, 'target_lun': 0} mock_mapdata.return_value = fake_data fake_mapdata = dict(fake_data) fake_mapdata['initiator_target_map'] = { initiator: FC_TARGET_WWN for initiator in TEST_WWPN } fake_mapdata['volume_id'] = TEST_VOLUME['id'] fake_mapdata['target_discovered'] = True fake_info = {'driver_volume_type': 'fibre_channel', 'data': fake_mapdata} model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) self.driver.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) self.driver.delete_volume(TEST_VOLUME) def test_create_and_delete_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.assertEqual(FAKE_SNAP_INFO, snap_info) model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, TEST_SNAP) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_create_cloned_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_extend_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) self.driver.extend_volume(TEST_VOLUME, 10) class FJISCSIDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJISCSIDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJISCSIDriverTestCase, self).setUp() # Make fake xml-configuration file. self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(CONF) self.config_file.flush() # Make fake Object by using mock as configuration object. self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.cinder_eternus_config_file = self.config_file.name self.stubs.Set(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) instancename = FakeCIMInstanceName() self.stubs.Set(dx_common.FJDXCommon, '_create_eternus_instance_name', instancename.fake_create_eternus_instance_name) self.stubs.Set(dx_common.FJDXCommon, '_get_mapdata_iscsi', self.fake_get_mapdata) # Set iscsi driver to self.driver. driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration) self.driver = driver def fake_eternus_connection(self): conn = FakeEternusConnection() return conn def fake_get_mapdata(self, vol_instance, connector, target_portlist): multipath = connector.get('multipath', False) if multipath: return {'target_portals': [ISCSI_TARGET_IP], 'target_iqns': [ISCSI_TARGET_IQN], 'target_luns': [0]} else: return {'target_portal': ISCSI_TARGET_IP, 'target_iqns': ISCSI_TARGET_IQN, 'target_lun': 0} def test_get_volume_stats(self): ret = self.driver.get_volume_stats(True) stats = {'vendor_name': ret['vendor_name'], 'total_capacity_gb': ret['total_capacity_gb'], 'free_capacity_gb': ret['free_capacity_gb']} self.assertEqual(FAKE_STATS, stats) def test_create_and_delete_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) self.driver.delete_volume(TEST_VOLUME) def test_map_unmap(self): fake_mapdata = self.fake_get_mapdata(None, {}, None) fake_mapdata['volume_id'] = TEST_VOLUME['id'] fake_mapdata['target_discovered'] = True fake_info = {'driver_volume_type': 'iscsi', 'data': fake_mapdata} model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) self.driver.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) self.driver.delete_volume(TEST_VOLUME) def test_create_and_delete_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.assertEqual(FAKE_SNAP_INFO, snap_info) model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, TEST_SNAP) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_create_cloned_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_extend_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.assertEqual(FAKE_MODEL_INFO1, model_info) self.driver.extend_volume(TEST_VOLUME, 10) cinder-8.0.0/cinder/tests/unit/volume/drivers/__init__.py0000664000567000056710000000000012701406250024572 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/0000775000567000056710000000000012701406543023244 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/0000775000567000056710000000000012701406543024663 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py0000664000567000056710000001723512701406250027232 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestMisc(scaleio.TestScaleIODriver): DOMAIN_NAME = 'PD1' POOL_NAME = 'SP1' STORAGE_POOLS = ['{}:{}'.format(DOMAIN_NAME, POOL_NAME)] def setUp(self): """Set up the test case environment. Defines the mock HTTPS responses for the REST API calls. """ super(TestMisc, self).setUp() self.domain_name_enc = urllib.parse.quote(self.DOMAIN_NAME) self.pool_name_enc = urllib.parse.quote(self.POOL_NAME) self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( self.ctx, **{'name': 'vol1', 'provider_id': '0123456789abcdef'} ) self.new_volume = fake_volume.fake_volume_obj( self.ctx, **{'name': 'vol2', 'provider_id': 'fedcba9876543210'} ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Domain/instances/getByName::' + self.domain_name_enc: '"{}"'.format(self.DOMAIN_NAME).encode( 'ascii', 'ignore' ), 'types/Pool/instances/getByName::{},{}'.format( self.DOMAIN_NAME, self.POOL_NAME ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'), 'types/StoragePool/instances/action/querySelectedStatistics': { '"{}"'.format(self.POOL_NAME): { 'capacityInUseInKb': 502, 'capacityLimitInKb': 1024, }, }, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): self.new_volume['provider_id'], 'instances/Volume::{}/action/setVolumeName'.format( self.new_volume['provider_id']): self.volume['provider_id'], }, self.RESPONSE_MODE.BadStatus: { 'types/Domain/instances/getByName::' + self.domain_name_enc: self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Domain/instances/getByName::' + self.domain_name_enc: None, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): mocks.MockHTTPSResponse( { 'message': 'Invalid volume.', 'httpStatusCode': 400, 'errorCode': 0 }, 400), }, } def test_valid_configuration(self): self.driver.check_for_setup_error() def test_both_storage_pool(self): """Both storage name and ID provided.""" self.driver.storage_pool_id = "test_pool_id" self.driver.storage_pool_name = "test_pool_name" self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_no_storage_pool(self): """No storage name or ID provided.""" self.driver.storage_pool_name = None self.driver.storage_pool_id = None self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_both_domain(self): self.driver.protection_domain_name = "test_domain_name" self.driver.protection_domain_id = "test_domain_id" self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_no_storage_pools(self): """No storage pools.""" self.driver.storage_pools = None self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_volume_size_round_true(self): self.driver._check_volume_size(1) def test_volume_size_round_false(self): self.driver.configuration.set_override('sio_round_volume_capacity', override=False) self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_volume_size, 1) def test_get_volume_stats_bad_status(self): self.driver.storage_pools = self.STORAGE_POOLS self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, True) def test_get_volume_stats_invalid_domain(self): self.driver.storage_pools = self.STORAGE_POOLS self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, True) def test_get_volume_stats(self): self.driver.storage_pools = self.STORAGE_POOLS self.driver.get_volume_stats(True) @mock.patch( 'cinder.volume.drivers.emc.scaleio.ScaleIODriver._rename_volume', return_value=None) def test_update_migrated_volume(self, mock_rename): test_vol = self.driver.update_migrated_volume( self.ctx, self.volume, self.new_volume, 'available') mock_rename.assert_called_with(self.new_volume, self.volume['id']) self.assertEqual({'_name_id': None, 'provider_location': None}, test_vol) @mock.patch( 'cinder.volume.drivers.emc.scaleio.ScaleIODriver._rename_volume', return_value=None) def test_update_unavailable_migrated_volume(self, mock_rename): test_vol = self.driver.update_migrated_volume( self.ctx, self.volume, self.new_volume, 'unavailable') self.assertFalse(mock_rename.called) self.assertEqual({'_name_id': fake.volume_id, 'provider_location': None}, test_vol) @mock.patch( 'cinder.volume.drivers.emc.scaleio.ScaleIODriver._rename_volume', side_effect=exception.VolumeBackendAPIException(data='Error!')) def test_fail_update_migrated_volume(self, mock_rename): self.assertRaises( exception.VolumeBackendAPIException, self.driver.update_migrated_volume, self.ctx, self.volume, self.new_volume, 'available' ) mock_rename.assert_called_with(self.volume, "ff" + self.volume['id']) def test_rename_volume(self): rc = self.driver._rename_volume( self.volume, self.new_volume['id']) self.assertIsNone(rc) def test_fail_rename_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises( exception.VolumeBackendAPIException, self.driver._rename_volume, self.volume, self.new_volume['id'] ) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py0000664000567000056710000000624012701406250031122 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.delete_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': 'pid_1'}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): """Setting the unmap volume before delete flag for tests """ self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True) self.driver.delete_volume(self.volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_cloned_volume.py0000664000567000056710000000777712701406250032467 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestCreateClonedVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.create_cloned_volume()``""" def setUp(self): """Setup a test case environment. Creates fake volume objects and sets up the required API responses. """ super(TestCreateClonedVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.src_volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': 'pid001'}) self.src_volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote( self.driver._id_to_base64(self.src_volume.id) ) ) self.new_volume_extras = { 'volumeIdList': ['cloned'], 'snapshotGroupId': 'cloned_snapshot' } self.new_volume = fake_volume.fake_volume_obj( ctx, **self.new_volume_extras ) self.new_volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote( self.driver._id_to_base64(self.new_volume.id) ) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.src_volume_name_2x_enc: self.src_volume.id, 'instances/System/action/snapshotVolumes': '{}'.format( json.dumps(self.new_volume_extras)), }, self.RESPONSE_MODE.BadStatus: { 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.src_volume['provider_id']: self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.src_volume_name_2x_enc: None, 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': 400, 'message': 'Invalid Volume Snapshot Test' }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.new_volume, self.src_volume) def test_invalid_source_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.new_volume, self.src_volume) def test_create_cloned_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_cloned_volume(self.new_volume, self.src_volume) def test_create_cloned_volume_larger_size(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.new_volume.size = 2 self.driver.create_cloned_volume(self.new_volume, self.src_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_manage_existing.py0000664000567000056710000001251412701406250031434 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks from cinder.volume import volume_types from mock import patch from six.moves import urllib class TestManageExisting(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.manage_existing()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestManageExisting, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': 'pid_1'}) self.volume_attached = fake_volume.fake_volume_obj( ctx, **{'provider_id': 'pid_2'}) self.volume_no_provider_id = fake_volume.fake_volume_obj(ctx) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::' + self.volume['provider_id']: mocks.MockHTTPSResponse({ 'id': 'pid_1', 'sizeInKb': 8388608, 'mappedSdcInfo': None }, 200) }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::' + self.volume['provider_id']: mocks.MockHTTPSResponse({ 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401), 'instances/Volume::' + self.volume_attached['provider_id']: mocks.MockHTTPSResponse({ 'id': 'pid_2', 'sizeInKb': 8388608, 'mappedSdcInfo': 'Mapped' }, 200) } } def test_no_source_id(self): existing_ref = {'source-name': 'scaleioVolName'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, existing_ref) def test_no_type_id(self): self.volume['volume_type_id'] = None existing_ref = {'source-id': 'pid_1'} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, self.volume, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_volume_not_found(self, _mock_volume_type): self.volume['volume_type_id'] = 'ScaleIO' existing_ref = {'source-id': 'pid_1'} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_volume_attached(self, _mock_volume_type): self.volume_attached['volume_type_id'] = 'ScaleIO' existing_ref = {'source-id': 'pid_2'} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume_attached, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_get_size_calc(self, _mock_volume_type): self.volume['volume_type_id'] = 'ScaleIO' existing_ref = {'source-id': 'pid_1'} self.set_https_response_mode(self.RESPONSE_MODE.Valid) result = self.driver.manage_existing_get_size(self.volume, existing_ref) self.assertEqual(8, result) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_existing_valid(self, _mock_volume_type): self.volume['volume_type_id'] = 'ScaleIO' existing_ref = {'source-id': 'pid_1'} result = self.driver.manage_existing(self.volume, existing_ref) self.assertEqual('pid_1', result['provider_id']) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py0000664000567000056710000001061012701406250031143 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit.fake_volume import fake_volume_obj from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestExtendVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.extend_volume()``""" """ New sizes for the volume. Since ScaleIO has a granularity of 8 GB, multiples of 8 always work. The 7 size should be either rounded up to 8 or raise an exception based on the round_volume_capacity config setting. """ NEW_SIZE = 16 BAD_SIZE = 7 def setUp(self): """Setup a test case environment. Creates fake volume object and sets up the required API responses. """ super(TestExtendVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume_obj(ctx, **{'id': 'fake_volume', 'provider_id': 'pid_1'}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: '"{}"'.format(self.volume.id), 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id ): mocks.MockHTTPSResponse({}, 200), }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id): self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: None, 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id): mocks.MockHTTPSResponse( { 'errorCode': self.VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, self.NEW_SIZE) def test_invalid_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, self.NEW_SIZE) def test_extend_volume_bad_size_no_round(self): self.driver.configuration.set_override('sio_round_volume_capacity', override=False) self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.extend_volume(self.volume, self.BAD_SIZE) def test_extend_volume_bad_size_round(self): self.driver.configuration.set_override('sio_round_volume_capacity', override=True) self.driver.extend_volume(self.volume, self.BAD_SIZE) def test_extend_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.extend_volume(self.volume, self.NEW_SIZE) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py0000664000567000056710000001312012701406250026764 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import requests import six from cinder import test from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class CustomResponseMode(object): """A context manager to define a custom set of per-request response modes. Example: with CustomResponseMode(self, **{ 'some/api/path': RESPONSE_MODE.Valid, 'another/api/path': RESPONSE_MODE.BadStatus, 'last/api/path': MockResponse('some data', status_code=403), }): self.assertRaises(SomeException, self.driver.api_call, data) """ def __init__(self, test_instance, **kwargs): self.test_instance = test_instance self.custom_responses = kwargs self.current_responses = None def __enter__(self): self.current_responses = self.test_instance.HTTPS_MOCK_RESPONSES https_responses = copy.deepcopy( self.test_instance.HTTPS_MOCK_RESPONSES ) current_mode = self.test_instance.current_https_response_mode for call, new_mode in self.custom_responses.items(): if isinstance(new_mode, mocks.MockHTTPSResponse): https_responses[current_mode][call] = new_mode else: https_responses[current_mode][call] = \ self.test_instance.get_https_response(call, new_mode) self.test_instance.HTTPS_MOCK_RESPONSES = https_responses def __exit__(self, exc_type, exc_val, exc_tb): self.test_instance.HTTPS_MOCK_RESPONSES = self.current_responses class TestScaleIODriver(test.TestCase): """Base ``TestCase`` subclass for the ``ScaleIODriver``""" RESPONSE_MODE = type(str('ResponseMode'), (object, ), dict( Valid='0', Invalid='1', BadStatus='2', )) __RESPONSE_MODE_NAMES = { '0': 'Valid', '1': 'Invalid', '2': 'BadStatus', } BAD_STATUS_RESPONSE = mocks.MockHTTPSResponse( { 'errorCode': 500, 'message': 'BadStatus Response Test', }, 500 ) VOLUME_NOT_FOUND_ERROR = 78 HTTPS_MOCK_RESPONSES = {} __COMMON_HTTPS_MOCK_RESPONSES = { RESPONSE_MODE.Valid: { 'login': 'login_token', }, RESPONSE_MODE.BadStatus: { 'login': mocks.MockHTTPSResponse( { 'errorCode': 403, 'message': 'Bad Login Response Test', }, 403 ), }, } __https_response_mode = RESPONSE_MODE.Valid log = None STORAGE_POOL_ID = six.text_type('1') STORAGE_POOL_NAME = 'SP1' PROT_DOMAIN_ID = six.text_type('1') PROT_DOMAIN_NAME = 'PD1' def setUp(self): """Setup a test case environment. Creates a ``ScaleIODriver`` instance Mocks the ``requests.get/post`` methods to return ``MockHTTPSResponse``'s instead. """ super(TestScaleIODriver, self).setUp() self.driver = mocks.ScaleIODriver() self.mock_object(requests, 'get', self.do_request) self.mock_object(requests, 'post', self.do_request) def do_request(self, url, *args, **kwargs): """Do a fake GET/POST API request. Splits `url` on '/api/' to get the what API call is, then returns the value of `self.HTTPS_MOCK_RESPONSES[][]` converting to a `MockHTTPSResponse` if necessary. :raises test.TestingException: If the current mode/api_call does not exist. :returns MockHTTPSResponse: """ return self.get_https_response(url.split('/api/')[1]) def set_https_response_mode(self, mode=RESPONSE_MODE.Valid): """Set the HTTPS response mode. RESPONSE_MODE.Valid: Respond with valid data RESPONSE_MODE.Invalid: Respond with invalid data RESPONSE_MODE.BadStatus: Response with not-OK status code. """ self.__https_response_mode = mode def get_https_response(self, api_path, mode=None): if mode is None: mode = self.__https_response_mode try: response = self.HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: try: response = self.__COMMON_HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: raise test.TestingException( 'Mock API Endpoint not implemented: [{}]{}'.format( self.__RESPONSE_MODE_NAMES[mode], api_path ) ) if not isinstance(response, mocks.MockHTTPSResponse): return mocks.MockHTTPSResponse(response, 200) return response @property def current_https_response_mode(self): return self.__https_response_mode def https_response_mode_name(self, mode): return self.__RESPONSE_MODE_NAMES[mode] def custom_response_mode(self, **kwargs): return CustomResponseMode(self, **kwargs) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py0000664000567000056710000002330212701406250032070 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock from cinder import context from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestConsistencyGroups(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver consistency groups support``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestConsistencyGroups, self).setUp() self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.consistency_group = ( fake_consistencygroup.fake_consistencyobject_obj(self.ctx, **{'id': 'cgid'})) fake_volume1 = fake_volume.fake_volume_obj( self.ctx, **{'id': 'volid1', 'provider_id': 'pid_1'}) fake_volume2 = fake_volume.fake_volume_obj( self.ctx, **{'id': 'volid2', 'provider_id': 'pid_2'}) fake_volume3 = fake_volume.fake_volume_obj( self.ctx, **{'id': 'volid3', 'provider_id': 'pid_3'}) fake_volume4 = fake_volume.fake_volume_obj( self.ctx, **{'id': 'volid4', 'provider_id': 'pid_4'}) self.volumes = [fake_volume1, fake_volume2] self.volumes2 = [fake_volume3, fake_volume4] fake_snapshot1 = fake_snapshot.fake_snapshot_obj( self.ctx, **{'id': 'snapid1', 'volume_id': 'volid1', 'volume': fake_volume1}) fake_snapshot2 = fake_snapshot.fake_snapshot_obj( self.ctx, **{'id': 'snapid2', 'volume_id': 'volid2', 'volume': fake_volume2}) self.snapshots = [fake_snapshot1, fake_snapshot2] self.snapshot_reply = json.dumps({ 'volumeIdList': ['sid1', 'sid2'], 'snapshotGroupId': 'sgid1'}) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::{}/action/removeVolume'.format( fake_volume1['provider_id'] ): fake_volume1['provider_id'], 'instances/Volume::{}/action/removeVolume'.format( fake_volume2['provider_id'] ): fake_volume2['provider_id'], 'instances/Volume::{}/action/removeMappedSdc'.format( fake_volume1['provider_id'] ): fake_volume1['provider_id'], 'instances/Volume::{}/action/removeMappedSdc'.format( fake_volume2['provider_id'] ): fake_volume2['provider_id'], 'instances/System/action/snapshotVolumes': self.snapshot_reply, }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::{}/action/removeVolume'.format( fake_volume1['provider_id'] ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( fake_volume2['provider_id'] ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE }, } def _fake_cgsnapshot(self): cgsnap = {'id': 'cgsid', 'name': 'testsnap', 'consistencygroup_id': 'cgid', 'status': 'available'} return cgsnap def test_create_consistencygroup(self): result = self.driver.create_consistencygroup(self.ctx, self.consistency_group) self.assertEqual('available', result['status']) def test_delete_consistencygroup_valid(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True) result_model_update, result_volumes_update = ( self.driver.delete_consistencygroup(self.ctx, self.consistency_group, self.volumes)) self.assertTrue(all(volume['status'] == 'deleted' for volume in result_volumes_update)) self.assertEqual('deleted', result_model_update['status']) def test_delete_consistency_group_fail(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) result_model_update, result_volumes_update = ( self.driver.delete_consistencygroup(self.ctx, self.consistency_group, self.volumes)) self.assertTrue(any(volume['status'] == 'error_deleting' for volume in result_volumes_update)) self.assertTrue(result_model_update['status'] in ['error_deleting', 'error']) def test_create_consistencygroup_from_cg(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) result_model_update, result_volumes_model_update = ( self.driver.create_consistencygroup_from_src( self.ctx, self.consistency_group, self.volumes2, source_cg=self.consistency_group, source_vols=self.volumes)) self.assertEqual('available', result_model_update['status']) get_pid = lambda snapshot: snapshot['provider_id'] volume_provider_list = list(map(get_pid, result_volumes_model_update)) self.assertListEqual(volume_provider_list, ['sid1', 'sid2']) def test_create_consistencygroup_from_cgs(self): self.snapshots[0]['provider_id'] = 'pid_1' self.snapshots[1]['provider_id'] = 'pid_2' self.set_https_response_mode(self.RESPONSE_MODE.Valid) result_model_update, result_volumes_model_update = ( self.driver.create_consistencygroup_from_src( self.ctx, self.consistency_group, self.volumes2, cgsnapshot=self._fake_cgsnapshot(), snapshots=self.snapshots)) self.assertEqual('available', result_model_update['status']) get_pid = lambda snapshot: snapshot['provider_id'] volume_provider_list = list(map(get_pid, result_volumes_model_update)) self.assertListEqual(['sid1', 'sid2'], volume_provider_list) @mock.patch('cinder.objects.snapshot') @mock.patch('cinder.objects.snapshot') def test_create_cgsnapshots(self, snapshot1, snapshot2): type(snapshot1).volume = mock.PropertyMock( return_value=self.volumes[0]) type(snapshot2).volume = mock.PropertyMock( return_value=self.volumes[1]) snapshots = [snapshot1, snapshot2] self.set_https_response_mode(self.RESPONSE_MODE.Valid) result_model_update, result_snapshot_model_update = ( self.driver.create_cgsnapshot( self.ctx, self._fake_cgsnapshot(), snapshots )) self.assertEqual('available', result_model_update['status']) self.assertTrue(all(snapshot['status'] == 'available' for snapshot in result_snapshot_model_update)) get_pid = lambda snapshot: snapshot['provider_id'] snapshot_provider_list = list(map(get_pid, result_snapshot_model_update)) self.assertListEqual(['sid1', 'sid2'], snapshot_provider_list) @mock.patch('cinder.objects.snapshot') @mock.patch('cinder.objects.snapshot') def test_delete_cgsnapshots(self, snapshot1, snapshot2): type(snapshot1).volume = mock.PropertyMock( return_value=self.volumes[0]) type(snapshot2).volume = mock.PropertyMock( return_value=self.volumes[1]) type(snapshot1).provider_id = mock.PropertyMock( return_value='pid_1') type(snapshot2).provider_id = mock.PropertyMock( return_value='pid_2') snapshots = [snapshot1, snapshot2] self.set_https_response_mode(self.RESPONSE_MODE.Valid) result_model_update, result_snapshot_model_update = ( self.driver.delete_cgsnapshot( self.ctx, self._fake_cgsnapshot(), snapshots )) self.assertEqual('deleted', result_model_update['status']) self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in result_snapshot_model_update)) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_initialize_connection.py0000664000567000056710000000570112701406250032652 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio class TestInitializeConnection(scaleio.TestScaleIODriver): def setUp(self): """Setup a test case environment.""" super(TestInitializeConnection, self).setUp() self.connector = {} self.ctx = ( context.RequestContext('fake', 'fake', True, auth_token=True)) self.volume = fake_volume.fake_volume_obj(self.ctx) def test_only_qos(self): qos = {'maxIOPS': 1000, 'maxBWS': 3000} extraspecs = {} connection_properties = ( self._initialize_connection(qos, extraspecs)['data']) self.assertEqual(1000, connection_properties['iopsLimit']) self.assertEqual(3000, connection_properties['bandwidthLimit']) def test_no_qos(self): qos = {} extraspecs = {} connection_properties = ( self._initialize_connection(qos, extraspecs)['data']) self.assertIsNone(connection_properties['iopsLimit']) self.assertIsNone(connection_properties['bandwidthLimit']) def test_only_extraspecs(self): qos = {} extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4000} connection_properties = ( self._initialize_connection(qos, extraspecs)['data']) self.assertEqual(2000, connection_properties['iopsLimit']) self.assertEqual(4000, connection_properties['bandwidthLimit']) def test_qos_and_extraspecs(self): qos = {'maxIOPS': 1000, 'maxBWS': 3000} extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4000} connection_properties = ( self._initialize_connection(qos, extraspecs)['data']) self.assertEqual(1000, connection_properties['iopsLimit']) self.assertEqual(3000, connection_properties['bandwidthLimit']) def _initialize_connection(self, qos, extraspecs): self.driver._get_volumetype_qos = mock.MagicMock() self.driver._get_volumetype_qos.return_value = qos self.driver._get_volumetype_extraspecs = mock.MagicMock() self.driver._get_volumetype_extraspecs.return_value = extraspecs return self.driver.initialize_connection(self.volume, self.connector) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py0000664000567000056710000000735212701406250034072 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.create_volume_from_snapshot()``""" def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestCreateVolumeFromSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.snapshot = fake_snapshot.fake_snapshot_obj(ctx) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.snapshot.id)) ) self.volume = fake_volume.fake_volume_obj(ctx) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.snapshot_reply = json.dumps( { 'volumeIdList': [self.volume.id], 'snapshotGroupId': 'snap_group' } ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, 'instances/System/action/snapshotVolumes': self.snapshot_reply, }, self.RESPONSE_MODE.BadStatus: { 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': self.VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: None, }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot ) def test_invalid_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot ) def test_create_volume_from_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_volume_from_snapshot(self.volume, self.snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_snapshot.py0000664000567000056710000001030612701406250031451 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from six.moves import urllib from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestCreateSnapShot(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.create_snapshot()``""" def return_fake_volume(self, ctx, id): return self.fake_volume def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestCreateSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': 'fake_pid'}) self.snapshot = fake_snapshot.fake_snapshot_obj( ctx, **{'volume': self.fake_volume}) self.mock_object(db.sqlalchemy.api, 'volume_get', self.return_fake_volume) snap_vol_id = self.snapshot.volume_id self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(snap_vol_id)) ) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.snapshot.id)) ) self.snapshot_reply = json.dumps( { 'volumeIdList': ['cloned'], 'snapshotGroupId': 'cloned_snapshot' } ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: '"{}"'.format( self.snapshot.volume_id ), 'instances/System/action/snapshotVolumes': self.snapshot_reply, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: None, 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': 400, 'message': 'Invalid Volume Snapshot Test' }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot ) def test_invalid_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot ) def test_create_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_snapshot(self.snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py0000664000567000056710000000574212701406250026354 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests import six from cinder.volume import configuration as conf from cinder.volume.drivers.emc import scaleio from oslo_config import cfg class ScaleIODriver(scaleio.ScaleIODriver): """Mock ScaleIO Driver class. Provides some fake configuration options """ def __init__(self, *args, **kwargs): configuration = conf.Configuration( [ cfg.StrOpt('fake'), ], None ) # Override the defaults to fake values configuration.set_override('san_ip', override='127.0.0.1') configuration.set_override('sio_rest_server_port', override='8888') configuration.set_override('san_login', override='test') configuration.set_override('san_password', override='pass') configuration.set_override('sio_storage_pool_id', override='test_pool') configuration.set_override('sio_protection_domain_id', override='test_domain') configuration.set_override('sio_storage_pools', override='test_domain:test_pool') super(ScaleIODriver, self).__init__(configuration=configuration, *args, **kwargs) def local_path(self, volume): pass def reenable_replication(self, context, volume): pass def promote_replica(self, context, volume): pass def create_replica_test_volume(self, volume, src_vref): pass def unmanage(self, volume): pass class MockHTTPSResponse(requests.Response): """Mock HTTP Response Defines the https replies from the mocked calls to do_request() """ def __init__(self, content, status_code=200): super(MockHTTPSResponse, self).__init__() if isinstance(content, six.text_type): content = content.encode('utf-8') self._content = content self.status_code = status_code def json(self, **kwargs): if isinstance(self._content, (bytes, six.text_type)): return super(MockHTTPSResponse, self).json(**kwargs) return self._content @property def text(self): if not isinstance(self._content, (bytes, six.text_type)): return json.dumps(self._content) return super(MockHTTPSResponse, self).text cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py0000664000567000056710000000747212701406250031462 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit.fake_snapshot import fake_snapshot_obj from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteSnapShot(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.delete_snapshot()``""" def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestDeleteSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.snapshot = fake_snapshot_obj( ctx, **{'provider_id': 'snap_1'}) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote( self.driver._id_to_base64(self.snapshot.id) ) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.snapshot.provider_id ): self.snapshot.id, 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id ): self.snapshot.id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id ): self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': self.VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400 ), 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id): mocks.MockHTTPSResponse( { 'errorCode': self.VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400, ) }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot) def test_delete_invalid_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.delete_snapshot(self.snapshot) def test_delete_snapshot(self): """Setting the unmap volume before delete flag for tests """ self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True) self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.delete_snapshot(self.snapshot) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py0000664000567000056710000001146412701406250031127 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio class TestCreateVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.create_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestCreateVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj(ctx) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume.name: '"{}"'.format(self.volume.id), 'types/Volume/instances': {'id': self.volume.id}, 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), }, self.RESPONSE_MODE.Invalid: { 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: None, 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): None, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances': self.BAD_STATUS_RESPONSE, 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE, 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): self.BAD_STATUS_RESPONSE, }, } def test_no_domain(self): """No protection domain name or ID provided.""" self.driver.protection_domain_name = None self.driver.protection_domain_id = None self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) def test_no_domain_id(self): """Only protection domain name provided.""" self.driver.protection_domain_id = None self.driver.protection_domain_name = self.PROT_DOMAIN_NAME self.driver.storage_pool_name = None self.driver.storage_pool_id = self.STORAGE_POOL_ID self.test_create_volume() def test_no_domain_id_invalid_response(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.test_no_domain_id) def test_no_domain_id_badstatus_response(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_no_domain_id) def test_no_storage_id(self): """Only protection domain name provided.""" self.driver.storage_pool_id = None self.driver.storage_pool_name = self.STORAGE_POOL_NAME self.driver.protection_domain_id = self.PROT_DOMAIN_ID self.driver.protection_domain_name = None self.test_create_volume() def test_no_storage_id_invalid_response(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.test_no_storage_id) def test_no_storage_id_badstatus_response(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_no_storage_id) def test_create_volume(self): """Valid create volume parameters""" self.driver.create_volume(self.volume) def test_create_volume_badstatus_response(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) cinder-8.0.0/cinder/tests/unit/volume/drivers/emc/__init__.py0000664000567000056710000000000012701406250025336 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/test_hgst.py0000664000567000056710000011541512701406250025060 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 HGST Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import processutils from cinder import context from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.hgst import HGSTDriver from cinder.volume import volume_types class HGSTTestCase(test.TestCase): # Need to mock these since we use them on driver creation @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def setUp(self, mock_ghn, mock_grnam, mock_pwnam): """Set up UUT and all the flags required for later fake_executes.""" super(HGSTTestCase, self).setUp() self.stubs.Set(processutils, 'execute', self._fake_execute) self._fail_vgc_cluster = False self._fail_ip = False self._fail_network_list = False self._fail_domain_list = False self._empty_domain_list = False self._fail_host_storage = False self._fail_space_list = False self._fail_space_delete = False self._fail_set_apphosts = False self._fail_extend = False self._request_cancel = False self._return_blocked = 0 self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = self._fake_safe_get self._reset_configuration() self.driver = HGSTDriver(configuration=self.configuration, execute=self._fake_execute) def _fake_safe_get(self, value): """Don't throw exception on missing parameters, return None.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val def _reset_configuration(self): """Set safe and sane values for config params.""" self.configuration.num_volume_device_scan_tries = 1 self.configuration.volume_dd_blocksize = '1M' self.configuration.volume_backend_name = 'hgst-1' self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2:gbd0' self.configuration.hgst_net = 'net1' self.configuration.hgst_redundancy = '0' self.configuration.hgst_space_user = 'kane' self.configuration.hgst_space_group = 'xanadu' self.configuration.hgst_space_mode = '0777' def _parse_space_create(self, *cmd): """Eats a vgc-cluster space-create command line to a dict.""" self.created = {'storageserver': ''} cmd = list(*cmd) while cmd: param = cmd.pop(0) if param == "-n": self.created['name'] = cmd.pop(0) elif param == "-N": self.created['net'] = cmd.pop(0) elif param == "-s": self.created['size'] = cmd.pop(0) elif param == "--redundancy": self.created['redundancy'] = cmd.pop(0) elif param == "--user": self.created['user'] = cmd.pop(0) elif param == "--user": self.created['user'] = cmd.pop(0) elif param == "--group": self.created['group'] = cmd.pop(0) elif param == "--mode": self.created['mode'] = cmd.pop(0) elif param == "-S": self.created['storageserver'] += cmd.pop(0) + "," else: pass def _parse_space_extend(self, *cmd): """Eats a vgc-cluster space-extend commandline to a dict.""" self.extended = {'storageserver': ''} cmd = list(*cmd) while cmd: param = cmd.pop(0) if param == "-n": self.extended['name'] = cmd.pop(0) elif param == "-s": self.extended['size'] = cmd.pop(0) elif param == "-S": self.extended['storageserver'] += cmd.pop(0) + "," else: pass if self._fail_extend: raise processutils.ProcessExecutionError(exit_code=1) else: return '', '' def _parse_space_delete(self, *cmd): """Eats a vgc-cluster space-delete commandline to a dict.""" self.deleted = {} cmd = list(*cmd) while cmd: param = cmd.pop(0) if param == "-n": self.deleted['name'] = cmd.pop(0) else: pass if self._fail_space_delete: raise processutils.ProcessExecutionError(exit_code=1) else: return '', '' def _parse_space_list(self, *cmd): """Eats a vgc-cluster space-list commandline to a dict.""" json = False nameOnly = False cmd = list(*cmd) while cmd: param = cmd.pop(0) if param == "--json": json = True elif param == "--name-only": nameOnly = True elif param == "-n": pass # Don't use the name here... else: pass if self._fail_space_list: raise processutils.ProcessExecutionError(exit_code=1) elif nameOnly: return "space1\nspace2\nvolume1\n", '' elif json: return HGST_SPACE_JSON, '' else: return '', '' def _parse_network_list(self, *cmd): """Eat a network-list command and return error or results.""" if self._fail_network_list: raise processutils.ProcessExecutionError(exit_code=1) else: return NETWORK_LIST, '' def _parse_domain_list(self, *cmd): """Eat a domain-list command and return error, empty, or results.""" if self._fail_domain_list: raise processutils.ProcessExecutionError(exit_code=1) elif self._empty_domain_list: return '', '' else: return "thisserver\nthatserver\nanotherserver\n", '' def _fake_execute(self, *cmd, **kwargs): """Sudo hook to catch commands to allow running on all hosts.""" cmdlist = list(cmd) exe = cmdlist.pop(0) if exe == 'vgc-cluster': exe = cmdlist.pop(0) if exe == "request-cancel": self._request_cancel = True if self._return_blocked > 0: return 'Request cancelled', '' else: raise processutils.ProcessExecutionError(exit_code=1) elif self._fail_vgc_cluster: raise processutils.ProcessExecutionError(exit_code=1) elif exe == "--version": return "HGST Solutions V2.5.0.0.x.x.x.x.x", '' elif exe == "space-list": return self._parse_space_list(cmdlist) elif exe == "space-create": self._parse_space_create(cmdlist) if self._return_blocked > 0: self._return_blocked = self._return_blocked - 1 out = "VGC_CREATE_000002\nBLOCKED\n" raise processutils.ProcessExecutionError(stdout=out, exit_code=1) return '', '' elif exe == "space-delete": return self._parse_space_delete(cmdlist) elif exe == "space-extend": return self._parse_space_extend(cmdlist) elif exe == "host-storage": if self._fail_host_storage: raise processutils.ProcessExecutionError(exit_code=1) return HGST_HOST_STORAGE, '' elif exe == "domain-list": return self._parse_domain_list() elif exe == "network-list": return self._parse_network_list() elif exe == "space-set-apphosts": if self._fail_set_apphosts: raise processutils.ProcessExecutionError(exit_code=1) return '', '' else: raise NotImplementedError elif exe == 'ip': if self._fail_ip: raise processutils.ProcessExecutionError(exit_code=1) else: return IP_OUTPUT, '' elif exe == 'dd': self.dd_count = -1 for p in cmdlist: if 'count=' in p: self.dd_count = int(p[6:]) return DD_OUTPUT, '' else: return '', '' @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_vgc_cluster_not_present(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when vgc-cluster returns an error.""" # Should pass self._fail_vgc_cluster = False self.driver.check_for_setup_error() # Should throw exception self._fail_vgc_cluster = True self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test when hgst_redundancy config parameter not 0 or 1.""" # Should pass self.driver.check_for_setup_error() # Should throw exceptions self.configuration.hgst_redundancy = '' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) self.configuration.hgst_redundancy = 'Fred' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when hgst_space_user doesn't map to UNIX user.""" # Should pass self.driver.check_for_setup_error() # Should throw exceptions mock_pwnam.side_effect = KeyError() self.configuration.hgst_space_user = '' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) self.configuration.hgst_space_user = 'Fred!`' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_group_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when hgst_space_group doesn't map to UNIX group.""" # Should pass self.driver.check_for_setup_error() # Should throw exceptions mock_grnam.side_effect = KeyError() self.configuration.hgst_space_group = '' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) self.configuration.hgst_space_group = 'Fred!`' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when mode for created spaces isn't proper format.""" # Should pass self.driver.check_for_setup_error() # Should throw exceptions self.configuration.hgst_space_mode = '' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) self.configuration.hgst_space_mode = 'Fred' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when hgst_net not in the domain.""" # Should pass self.driver.check_for_setup_error() # Should throw exceptions self._fail_network_list = True self.configuration.hgst_net = 'Fred' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) self._fail_network_list = False @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when IP ADDR command fails.""" # Should pass self.driver.check_for_setup_error() # Throw exception, need to clear internal cached host in driver self._fail_ip = True self.driver._vgc_host = None self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_domain_list_fails(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when domain-list fails for the domain.""" # Should pass self.driver.check_for_setup_error() # Throw exception, need to clear internal cached host in driver self._fail_domain_list = True self.driver._vgc_host = None self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when Cinder host not domain member.""" # Should pass self.driver.check_for_setup_error() # Throw exception, need to clear internal cached host in driver self._empty_domain_list = True self.driver._vgc_host = None self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) @mock.patch('pwd.getpwnam', return_value=1) @mock.patch('grp.getgrnam', return_value=1) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_parameter_storageservers_invalid(self, mock_ghn, mock_grnam, mock_pwnam): """Test exception when the storage servers are invalid/missing.""" # Should pass self.driver.check_for_setup_error() # Storage_hosts missing self.configuration.hgst_storage_servers = '' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) # missing a : between host and devnode self.configuration.hgst_storage_servers = 'stor1,stor2' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) # missing a : between host and devnode self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2' self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) # Host not in cluster self.configuration.hgst_storage_servers = 'stor1:gbd0' self._fail_host_storage = True self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) def test_update_volume_stats(self): """Get cluster space available, should pass.""" actual = self.driver.get_volume_stats(True) self.assertEqual('HGST', actual['vendor_name']) self.assertEqual('hgst', actual['storage_protocol']) self.assertEqual(90, actual['total_capacity_gb']) self.assertEqual(87, actual['free_capacity_gb']) self.assertEqual(0, actual['reserved_percentage']) def test_update_volume_stats_redundancy(self): """Get cluster space available, half-sized - 1 for mirrors.""" self.configuration.hgst_redundancy = '1' actual = self.driver.get_volume_stats(True) self.assertEqual('HGST', actual['vendor_name']) self.assertEqual('hgst', actual['storage_protocol']) self.assertEqual(44, actual['total_capacity_gb']) self.assertEqual(43, actual['free_capacity_gb']) self.assertEqual(0, actual['reserved_percentage']) def test_update_volume_stats_cached(self): """Get cached cluster space, should not call executable.""" self._fail_host_storage = True actual = self.driver.get_volume_stats(False) self.assertEqual('HGST', actual['vendor_name']) self.assertEqual('hgst', actual['storage_protocol']) self.assertEqual(90, actual['total_capacity_gb']) self.assertEqual(87, actual['free_capacity_gb']) self.assertEqual(0, actual['reserved_percentage']) def test_update_volume_stats_error(self): """Test that when host-storage gives an error, return unknown.""" self._fail_host_storage = True actual = self.driver.get_volume_stats(True) self.assertEqual('HGST', actual['vendor_name']) self.assertEqual('hgst', actual['storage_protocol']) self.assertEqual('unknown', actual['total_capacity_gb']) self.assertEqual('unknown', actual['free_capacity_gb']) self.assertEqual(0, actual['reserved_percentage']) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_volume(self, mock_ghn): """Test volume creation, ensure appropriate size expansion/name.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} ret = self.driver.create_volume(volume) expected = {'redundancy': '0', 'group': 'xanadu', 'name': 'volume10', 'mode': '0777', 'user': 'kane', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider, note the the provider_id is hashed expected_pid = {'provider_id': 'volume10'} self.assertDictMatch(expected_pid, ret) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_volume_name_creation_fail(self, mock_ghn): """Test volume creation exception when can't make a hashed name.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} self._fail_space_list = True self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, volume) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_snapshot(self, mock_ghn): """Test creating a snapshot, ensure full data of original copied.""" # Now snapshot the volume and check commands snapshot = {'volume_name': 'volume10', 'volume_id': 'xxx', 'display_name': 'snap10', 'name': '123abc', 'volume_size': 10, 'id': '123abc', 'volume': {'provider_id': 'space10'}} ret = self.driver.create_snapshot(snapshot) # We must copy entier underlying storage, ~12GB, not just 10GB self.assertEqual(11444, self.dd_count) # Check space-create command expected = {'redundancy': '0', 'group': 'xanadu', 'name': snapshot['display_name'], 'mode': '0777', 'user': 'kane', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'snap10'} self.assertDictMatch(expected_pid, ret) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_cloned_volume(self, mock_ghn): """Test creating a clone, ensure full size is copied from original.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) orig = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig'} clone = {'id': '2', 'name': 'clone1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} pid = self.driver.create_cloned_volume(clone, orig) # We must copy entier underlying storage, ~12GB, not just 10GB self.assertEqual(11444, self.dd_count) # Check space-create command expected = {'redundancy': '0', 'group': 'xanadu', 'name': 'clone1', 'mode': '0777', 'user': 'kane', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'clone1'} self.assertDictMatch(expected_pid, pid) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_add_cinder_apphosts_fails(self, mock_ghn): """Test exception when set-apphost can't connect volume to host.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) orig = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig'} clone = {'id': '2', 'name': 'clone1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} self._fail_set_apphosts = True self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, clone, orig) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_volume_from_snapshot(self, mock_ghn): """Test creating volume from snapshot, ensure full space copy.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) snap = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig'} volume = {'id': '2', 'name': 'volume2', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} pid = self.driver.create_volume_from_snapshot(volume, snap) # We must copy entier underlying storage, ~12GB, not just 10GB self.assertEqual(11444, self.dd_count) # Check space-create command expected = {'redundancy': '0', 'group': 'xanadu', 'name': 'volume2', 'mode': '0777', 'user': 'kane', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'volume2'} self.assertDictMatch(expected_pid, pid) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_volume_blocked(self, mock_ghn): """Test volume creation where only initial space-create is blocked. This should actually pass because we are blocked byt return an error in request-cancel, meaning that it got unblocked before we could kill the space request. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} self._return_blocked = 1 # Block & fail cancel => create succeeded ret = self.driver.create_volume(volume) expected = {'redundancy': '0', 'group': 'xanadu', 'name': 'volume10', 'mode': '0777', 'user': 'kane', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'volume10'} self.assertDictMatch(expected_pid, ret) self.assertTrue(self._request_cancel) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_create_volume_blocked_and_fail(self, mock_ghn): """Test volume creation where space-create blocked permanently. This should fail because the initial create was blocked and the request-cancel succeeded, meaning the create operation never completed. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} self._return_blocked = 2 # Block & pass cancel => create failed. :( self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, volume) self.assertTrue(self._request_cancel) def test_delete_volume(self): """Test deleting existing volume, ensure proper name used.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.driver.delete_volume(volume) expected = {'name': 'volume10'} self.assertDictMatch(expected, self.deleted) def test_delete_volume_failure_modes(self): """Test cases where space-delete fails, but OS delete is still OK.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self._fail_space_delete = True # This should not throw an exception, space-delete failure not problem self.driver.delete_volume(volume) self._fail_space_delete = False volume['provider_id'] = None # This should also not throw an exception self.driver.delete_volume(volume) def test_delete_snapshot(self): """Test deleting a snapshot, ensure proper name is removed.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) snapshot = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'snap10'} self.driver.delete_snapshot(snapshot) expected = {'name': 'snap10'} self.assertDictMatch(expected, self.deleted) def test_extend_volume(self): """Test extending a volume, check the size in GB vs. GiB.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self.driver.extend_volume(volume, 12) expected = {'name': 'volume10', 'size': '2', 'storageserver': 'stor1:gbd0,stor2:gbd0,'} self.assertDictMatch(expected, self.extended) def test_extend_volume_noextend(self): """Test extending a volume where Space does not need to be enlarged. Because Spaces are generated somewhat larger than the requested size from OpenStack due to the base10(HGST)/base2(OS) mismatch, they can sometimes be larger than requested from OS. In that case a volume_extend may actually be a noop since the volume is already large enough to satisfy OS's request. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self.driver.extend_volume(volume, 10) expected = {'name': '', 'size': '0', 'storageserver': ''} self.assertDictMatch(expected, self.extended) def test_space_list_fails(self): """Test exception is thrown when we can't call space-list.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self._fail_space_list = True self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, 12) def test_cli_error_not_blocked(self): """Test the _blocked handler's handlinf of a non-blocked error. The _handle_blocked handler is called on any process errors in the code. If the error was not caused by a blocked command condition (syntax error, out of space, etc.) then it should just throw the exception and not try and retry the command. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self._fail_extend = True self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, 12) self.assertFalse(self._request_cancel) @mock.patch('socket.gethostbyname', return_value='123.123.123.123') def test_initialize_connection(self, moch_ghn): """Test that the connection_info for Nova makes sense.""" volume = {'name': '123', 'provider_id': 'spacey'} conn = self.driver.initialize_connection(volume, None) expected = {'name': 'spacey', 'noremovehost': 'thisserver'} self.assertDictMatch(expected, conn['data']) # Below are some command outputs we emulate IP_OUTPUT = """ 3: em2: mtu 1500 qdisc mq state link/ether 00:25:90:d9:18:09 brd ff:ff:ff:ff:ff:ff inet 192.168.0.23/24 brd 192.168.0.255 scope global em2 valid_lft forever preferred_lft forever inet6 fe80::225:90ff:fed9:1809/64 scope link valid_lft forever preferred_lft forever 1: lo: mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 123.123.123.123/8 scope host lo valid_lft forever preferred_lft forever inet 169.254.169.254/32 scope link lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: em1: mtu 1500 qdisc mq master link/ether 00:25:90:d9:18:08 brd ff:ff:ff:ff:ff:ff inet6 fe80::225:90ff:fed9:1808/64 scope link valid_lft forever preferred_lft forever """ HGST_HOST_STORAGE = """ { "hostStatus": [ { "node": "tm33.virident.info", "up": true, "isManager": true, "cardStatus": [ { "cardName": "/dev/sda3", "cardSerialNumber": "002f09b4037a9d521c007ee4esda3", "cardStatus": "Good", "cardStateDetails": "Normal", "cardActionRequired": "", "cardTemperatureC": 0, "deviceType": "Generic", "cardTemperatureState": "Safe", "partitionStatus": [ { "partName": "/dev/gbd0", "partitionState": "READY", "usableCapacityBytes": 98213822464, "totalReadBytes": 0, "totalWriteBytes": 0, "remainingLifePCT": 100, "flashReservesLeftPCT": 100, "fmc": true, "vspaceCapacityAvailable": 94947041280, "vspaceReducedCapacityAvailable": 87194279936, "_partitionID": "002f09b4037a9d521c007ee4esda3:0", "_usedSpaceBytes": 3266781184, "_enabledSpaceBytes": 3266781184, "_disabledSpaceBytes": 0 } ] } ], "driverStatus": { "vgcdriveDriverLoaded": true, "vhaDriverLoaded": true, "vcacheDriverLoaded": true, "vlvmDriverLoaded": true, "ipDataProviderLoaded": true, "ibDataProviderLoaded": false, "driverUptimeSecs": 4800, "rVersion": "20368.d55ec22.master" }, "totalCapacityBytes": 98213822464, "totalUsedBytes": 3266781184, "totalEnabledBytes": 3266781184, "totalDisabledBytes": 0 }, { "node": "tm32.virident.info", "up": true, "isManager": false, "cardStatus": [], "driverStatus": { "vgcdriveDriverLoaded": true, "vhaDriverLoaded": true, "vcacheDriverLoaded": true, "vlvmDriverLoaded": true, "ipDataProviderLoaded": true, "ibDataProviderLoaded": false, "driverUptimeSecs": 0, "rVersion": "20368.d55ec22.master" }, "totalCapacityBytes": 0, "totalUsedBytes": 0, "totalEnabledBytes": 0, "totalDisabledBytes": 0 } ], "totalCapacityBytes": 98213822464, "totalUsedBytes": 3266781184, "totalEnabledBytes": 3266781184, "totalDisabledBytes": 0 } """ HGST_SPACE_JSON = """ { "resources": [ { "resourceType": "vLVM-L", "resourceID": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db", "state": "OFFLINE", "instanceStates": {}, "redundancy": 0, "sizeBytes": 12000000000, "name": "volume10", "nodes": [], "networks": [ "net1" ], "components": [ { "resourceType": "vLVM-S", "resourceID": "vLVM-S:698cdb43-54da-863e-eb10-6275f47b8ed2", "redundancy": 0, "order": 0, "sizeBytes": 12000000000, "numStripes": 1, "stripeSizeBytes": null, "name": "volume10s00", "state": "OFFLINE", "instanceStates": {}, "components": [ { "name": "volume10h00", "resourceType": "vHA", "resourceID": "vHA:3e86da54-40db-8c69-0300-0000ac10476e", "redundancy": 0, "sizeBytes": 12000000000, "state": "GOOD", "components": [ { "name": "volume10h00", "vspaceType": "vHA", "vspaceRole": "primary", "storageObjectID": "vHA:3e86da54-40db-8c69--18130019e486", "state": "Disconnected (DCS)", "node": "tm33.virident.info", "partName": "/dev/gbd0" } ], "crState": "GOOD" }, { "name": "volume10v00", "resourceType": "vShare", "resourceID": "vShare:3f86da54-41db-8c69-0300-ecf4bbcc14cc", "redundancy": 0, "order": 0, "sizeBytes": 12000000000, "state": "GOOD", "components": [ { "name": "volume10v00", "vspaceType": "vShare", "vspaceRole": "target", "storageObjectID": "vShare:3f86da54-41db-8c64bbcc14cc:T", "state": "Started", "node": "tm33.virident.info", "partName": "/dev/gbd0_volume10h00" } ] } ] } ], "_size": "12GB", "_state": "OFFLINE", "_ugm": "", "_nets": "net1", "_hosts": "tm33.virident.info(12GB,NC)", "_ahosts": "", "_shosts": "tm33.virident.info(12GB)", "_name": "volume10", "_node": "", "_type": "vLVM-L", "_detail": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db", "_device": "" } ] } """ NETWORK_LIST = """ Network Name Type Flags Description ------------ ---- ---------- ------------------------ net1 IPv4 autoConfig 192.168.0.0/24 1Gb/s net2 IPv4 autoConfig 192.168.10.0/24 10Gb/s """ DD_OUTPUT = """ 1+0 records in 1+0 records out 1024 bytes (1.0 kB) copied, 0.000427529 s, 2.4 MB/s """ cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/0000775000567000056710000000000012701406543023767 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/test_common.py0000664000567000056710000001116412701406250026666 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes import cinder.volume.drivers.netapp.common as na_common import cinder.volume.drivers.netapp.dataontap.fc_cmode as fc_cmode import cinder.volume.drivers.netapp.utils as na_utils class NetAppDriverFactoryTestCase(test.TestCase): def setUp(self): super(NetAppDriverFactoryTestCase, self).setUp() self.mock_object(na_common, 'LOG') def test_new(self): self.mock_object(na_utils.OpenStackInfo, 'info', mock.Mock(return_value='fake_info')) mock_create_driver = self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' config.netapp_storage_protocol = 'fake_protocol' kwargs = {'configuration': config} na_common.NetAppDriver(**kwargs) kwargs['app_version'] = 'fake_info' mock_create_driver.assert_called_with('fake_family', 'fake_protocol', *(), **kwargs) def test_new_missing_config(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{}) def test_new_missing_family(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_protocol = 'fake_protocol' config.netapp_storage_family = None kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_new_missing_protocol(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_create_driver(self): def get_full_class_name(obj): return obj.__module__ + '.' + obj.__class__.__name__ kwargs = {'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info'} registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY for family in six.iterkeys(registry): for protocol, full_class_name in registry[family].items(): driver = na_common.NetAppDriver.create_driver( family, protocol, **kwargs) self.assertEqual(full_class_name, get_full_class_name(driver)) def test_create_driver_case_insensitive(self): kwargs = {'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info'} driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC', **kwargs) self.assertIsInstance(driver, fc_cmode.NetAppCmodeFibreChannelDriver) def test_create_driver_invalid_family(self): kwargs = {'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info'} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, 'kardashian', 'iscsi', **kwargs) def test_create_driver_invalid_protocol(self): kwargs = {'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info'} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, 'ontap_7mode', 'carrier_pigeon', **kwargs) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/__init__.py0000664000567000056710000000000012701406250026061 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/test_utils.py0000664000567000056710000010530412701406250026536 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Michael Price. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import copy import ddt import platform import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.fakes as fake from cinder import version from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import qos_specs from cinder.volume import volume_types class NetAppDriverUtilsTestCase(test.TestCase): @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_instantiation(**kwargs) self.assertEqual(0, na_utils.LOG.warning.call_count) @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_instantiation(**kwargs) self.assertEqual(1, na_utils.LOG.warning.call_count) def test_check_flags(self): class TestClass(object): pass required_flags = ['flag1', 'flag2'] configuration = TestClass() setattr(configuration, 'flag1', 'value1') setattr(configuration, 'flag3', 'value3') self.assertRaises(exception.InvalidInput, na_utils.check_flags, required_flags, configuration) setattr(configuration, 'flag2', 'value2') self.assertIsNone(na_utils.check_flags(required_flags, configuration)) def test_to_bool(self): self.assertTrue(na_utils.to_bool(True)) self.assertTrue(na_utils.to_bool('true')) self.assertTrue(na_utils.to_bool('yes')) self.assertTrue(na_utils.to_bool('y')) self.assertTrue(na_utils.to_bool(1)) self.assertTrue(na_utils.to_bool('1')) self.assertFalse(na_utils.to_bool(False)) self.assertFalse(na_utils.to_bool('false')) self.assertFalse(na_utils.to_bool('asdf')) self.assertFalse(na_utils.to_bool('no')) self.assertFalse(na_utils.to_bool('n')) self.assertFalse(na_utils.to_bool(0)) self.assertFalse(na_utils.to_bool('0')) self.assertFalse(na_utils.to_bool(2)) self.assertFalse(na_utils.to_bool('2')) def test_set_safe_attr(self): fake_object = mock.Mock() fake_object.fake_attr = None # test initial checks self.assertFalse(na_utils.set_safe_attr(None, fake_object, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, None, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', None)) # test value isn't changed if it shouldn't be and retval is False fake_object.fake_attr = 'fake_value' self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', 'fake_value')) self.assertEqual('fake_value', fake_object.fake_attr) # test value is changed if it should be and retval is True self.assertTrue(na_utils.set_safe_attr(fake_object, 'fake_attr', 'new_fake_value')) self.assertEqual('new_fake_value', fake_object.fake_attr) def test_round_down(self): self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5) self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5) self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5) self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5) def test_iscsi_connection_properties(self): actual_properties = na_utils.get_iscsi_connection_properties( fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) actual_properties_mapped = actual_properties['data'] self.assertDictEqual(actual_properties_mapped, fake.FC_ISCSI_TARGET_INFO_DICT) def test_iscsi_connection_lun_id_type_str(self): FAKE_LUN_ID = '1' actual_properties = na_utils.get_iscsi_connection_properties( FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) actual_properties_mapped = actual_properties['data'] self.assertIs(type(actual_properties_mapped['target_lun']), int) def test_iscsi_connection_lun_id_type_dict(self): FAKE_LUN_ID = {'id': 'fake_id'} self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties, FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) def test_get_volume_extra_specs(self): fake_extra_specs = {'fake_key': 'fake_value'} fake_volume_type = {'extra_specs': fake_extra_specs} fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock( return_value=fake_volume_type)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual(fake_extra_specs, result) def test_get_volume_extra_specs_no_type_id(self): fake_volume = {} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type') self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_get_volume_extra_specs_no_volume_type(self): fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock( return_value=None)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_log_extra_spec_warnings_obsolete_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp:raid_type': 'raid4'}) self.assertEqual(1, mock_log.call_count) def test_log_extra_spec_warnings_deprecated_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp_thick_provisioned': 'true'}) self.assertEqual(1, mock_log.call_count) def test_validate_qos_spec_none(self): qos_spec = None # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_keys_weirdly_cased(self): qos_spec = {'mAxIopS': 33000} # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_bad_key(self): qos_spec = {'maxFlops': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination(self): qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_map_qos_spec_none(self): qos_spec = None result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertIsNone(result) def test_map_qos_spec_maxiops(self): qos_spec = {'maxIOPs': 33000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '33000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxiopspergib(self): qos_spec = {'maxIOPSperGiB': 1000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '42000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxbps(self): qos_spec = {'maxBPS': 1000000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '1000000B/s', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxbpspergib(self): qos_spec = {'maxBPSperGiB': 100000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '4200000B/s', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_no_key_present(self): qos_spec = {} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': None, } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_dict_to_lower(self): original = {'UPperKey': 'Value'} expected = {'upperkey': 'Value'} result = na_utils.map_dict_to_lower(original) self.assertEqual(expected, result) def test_get_qos_policy_group_name(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name(fake.VOLUME) self.assertEqual(expected, result) def test_get_qos_policy_group_name_no_id(self): volume = copy.deepcopy(fake.VOLUME) del(volume['id']) result = na_utils.get_qos_policy_group_name(volume) self.assertIsNone(result) def test_get_qos_policy_group_name_from_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_info_no_info(self): result = na_utils.get_qos_policy_group_name_from_info(None) self.assertIsNone(result) def test_get_qos_policy_group_name_from_legacy_info(self): expected = fake.QOS_POLICY_GROUP_NAME result = na_utils.get_qos_policy_group_name_from_info( fake.LEGACY_QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_spec_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_none_qos_info(self): expected = None result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO_NONE) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_exception_path(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.side_effect = exception.VolumeTypeNotFound expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_volume_type_none(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = None expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_no_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_legacy_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = fake.LEGACY_QOS mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.LEGACY_QOS_POLICY_GROUP_INFO, result) def test_get_valid_spec_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value =\ fake.QOS_POLICY_GROUP_SPEC self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) def test_get_valid_backend_qos_spec_from_volume_type_no_spec(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = None mock_validate = self.mock_object(na_utils, 'validate_qos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertIsNone(result) self.assertEqual(0, mock_validate.call_count) def test_get_valid_backend_qos_spec_from_volume_type(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = fake.QOS_SPEC mock_validate = self.mock_object(na_utils, 'validate_qos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertEqual(fake.QOS_POLICY_GROUP_SPEC, result) self.assertEqual(1, mock_validate.call_count) def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): volume_type = copy.deepcopy(fake.VOLUME_TYPE) del(volume_type['qos_specs_id']) mock_get_context = self.mock_object(context, 'get_admin_context') result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) self.assertEqual(0, mock_get_context.call_count) def test_get_backend_qos_spec_from_volume_type_no_qos_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = None result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) def test_get_backend_qos_spec_from_volume_type_with_frontend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_FRONTEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) def test_get_backend_qos_spec_from_volume_type_with_backend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BACKEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_get_backend_qos_spec_from_volume_type_with_both_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BOTH_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_check_for_invalid_qos_spec_combination(self): self.assertRaises(exception.Invalid, na_utils.check_for_invalid_qos_spec_combination, fake.INVALID_QOS_POLICY_GROUP_INFO, fake.VOLUME_TYPE) def test_get_legacy_qos_policy(self): extra_specs = fake.LEGACY_EXTRA_SPECS expected = {'policy_name': fake.QOS_POLICY_GROUP_NAME} result = na_utils.get_legacy_qos_policy(extra_specs) self.assertEqual(expected, result) def test_get_legacy_qos_policy_no_policy_name(self): extra_specs = fake.EXTRA_SPECS result = na_utils.get_legacy_qos_policy(extra_specs) self.assertIsNone(result) class OpenStackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def setUp(self): super(OpenStackInfoTestCase, self).setUp() def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_xcption_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_xcption_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called) @ddt.ddt class FeaturesTestCase(test.TestCase): def setUp(self): super(FeaturesTestCase, self).setUp() self.features = na_utils.Features() def test_init(self): self.assertSetEqual(set(), self.features.defined_features) def test_add_feature_default(self): self.features.add_feature('FEATURE_1') self.assertTrue(self.features.FEATURE_1.supported) self.assertIn('FEATURE_1', self.features.defined_features) @ddt.data(True, False) def test_add_feature(self, value): self.features.add_feature('FEATURE_2', value) self.assertEqual(value, bool(self.features.FEATURE_2)) self.assertEqual(value, self.features.FEATURE_2.supported) self.assertIsNone(self.features.FEATURE_2.minimum_version) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data((True, '1'), (False, 2), (False, None), (True, None)) @ddt.unpack def test_add_feature_min_version(self, enabled, min_version): self.features.add_feature('FEATURE_2', enabled, min_version=min_version) self.assertEqual(enabled, bool(self.features.FEATURE_2)) self.assertEqual(enabled, self.features.FEATURE_2.supported) self.assertEqual(min_version, self.features.FEATURE_2.minimum_version) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,)) def test_add_feature_type_error(self, value): self.assertRaises(TypeError, self.features.add_feature, 'FEATURE_3', value) self.assertNotIn('FEATURE_3', self.features.defined_features) def test_get_attr_missing(self): self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4') @ddt.ddt class BitSetTestCase(test.TestCase): def setUp(self): super(BitSetTestCase, self).setUp() def test_default(self): self.assertEqual(na_utils.BitSet(0), na_utils.BitSet()) def test_set(self): bitset = na_utils.BitSet(0) bitset.set(16) self.assertEqual(na_utils.BitSet(1 << 16), bitset) def test_unset(self): bitset = na_utils.BitSet(1 << 16) bitset.unset(16) self.assertEqual(na_utils.BitSet(0), bitset) def test_is_set(self): bitset = na_utils.BitSet(1 << 16) self.assertTrue(bitset.is_set(16)) def test_not_equal(self): set1 = na_utils.BitSet(1 << 15) set2 = na_utils.BitSet(1 << 16) self.assertNotEqual(set1, set2) def test_repr(self): raw_val = 1 << 16 actual = repr(na_utils.BitSet(raw_val)) expected = str(raw_val) self.assertEqual(actual, expected) def test_str(self): raw_val = 1 << 16 actual = str(na_utils.BitSet(raw_val)) expected = bin(raw_val) self.assertEqual(actual, expected) def test_int(self): val = 1 << 16 actual = int(int(na_utils.BitSet(val))) self.assertEqual(val, actual) def test_and(self): actual = na_utils.BitSet(1 << 16 | 1 << 15) actual &= 1 << 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_or(self): actual = na_utils.BitSet() actual |= 1 << 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_invert(self): actual = na_utils.BitSet(1 << 16) actual = ~actual self.assertEqual(~(1 << 16), actual) def test_xor(self): actual = na_utils.BitSet(1 << 16) actual ^= 1 << 16 self.assertEqual(na_utils.BitSet(), actual) def test_lshift(self): actual = na_utils.BitSet(1) actual <<= 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_rshift(self): actual = na_utils.BitSet(1 << 16) actual >>= 16 self.assertEqual(na_utils.BitSet(1), actual) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/0000775000567000056710000000000012701406543025426 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_driver.py0000664000567000056710000006304312701406250030333 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All rights reserved. # Copyright (c) 2015 Michael Price. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import ddt import mock import socket from cinder import exception from cinder.volume import configuration as conf from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ fakes from cinder.volume.drivers.netapp import common from cinder.volume.drivers.netapp.eseries import client from cinder.volume.drivers.netapp.eseries import library from cinder.volume.drivers.netapp.eseries import utils from cinder.volume.drivers.netapp import options import cinder.volume.drivers.netapp.utils as na_utils @ddt.ddt class NetAppESeriesDriverTestCase(object): """Test case for NetApp e-series iscsi driver.""" volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'provider_auth': 'provider a b', 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} snapshot = {'id': '17928122-553b-4da9-9737-e5c3dcd97f75', 'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 2, 'volume_name': 'lun1', 'volume_size': 2, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} volume_sec = {'id': 'b6c01641-8955-4917-a5e3-077147478575', 'size': 2, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': 'b6c01641-8955-4917-a5e3-077147478575', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} volume_clone = {'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'size': 3, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'cl_sm', 'name_id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} volume_clone_large = {'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553', 'size': 6, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'cl_lg', 'name_id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} fake_eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['id']) fake_size_gb = volume['size'] fake_eseries_pool_label = 'DDP' fake_ref = {'source-name': 'CFDGJSLS'} fake_ret_vol = {'id': 'vol_id', 'label': 'label', 'worldWideName': 'wwn', 'capacity': '2147583648'} PROTOCOL = 'iscsi' def setUp(self): super(NetAppESeriesDriverTestCase, self).setUp() self._custom_setup() def _custom_setup(self): self.mock_object(na_utils, 'OpenStackInfo') configuration = self._set_config(self.create_configuration()) self.driver = common.NetAppDriver(configuration=configuration) self.library = self.driver.library self.mock_object(self.library, '_check_mode_get_or_register_storage_system') self.mock_object(self.library, '_version_check') self.mock_object(self.driver.library, '_check_storage_system') self.driver.do_setup(context='context') self.driver.library._client._endpoint = fakes.FAKE_ENDPOINT_HTTP self.driver.library._client.features = mock.Mock() self.driver.library._client.features.REST_1_4_RELEASE = True def _set_config(self, configuration): configuration.netapp_storage_family = 'eseries' configuration.netapp_storage_protocol = self.PROTOCOL configuration.netapp_transport_type = 'http' configuration.netapp_server_hostname = '127.0.0.1' configuration.netapp_server_port = None configuration.netapp_webservice_path = '/devmgr/vn' configuration.netapp_controller_ips = '127.0.0.2,127.0.0.3' configuration.netapp_sa_password = 'pass1234' configuration.netapp_login = 'rw' configuration.netapp_password = 'rw' configuration.netapp_storage_pools = 'DDP' configuration.netapp_enable_multiattach = False return configuration @staticmethod def create_configuration(): configuration = conf.Configuration(None) configuration.append_config_values(options.netapp_basicauth_opts) configuration.append_config_values(options.netapp_eseries_opts) configuration.append_config_values(options.netapp_san_opts) return configuration @abc.abstractmethod @mock.patch.object(na_utils, 'validate_instantiation') def test_instantiation(self, mock_validate_instantiation): pass def test_embedded_mode(self): self.mock_object(client.RestClient, '_init_features') configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3' driver = common.NetAppDriver(configuration=configuration) self.mock_object(driver.library, '_version_check') self.mock_object(client.RestClient, 'list_storage_systems', mock.Mock( return_value=[fakes.STORAGE_SYSTEM])) driver.do_setup(context='context') self.assertEqual('1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b', driver.library._client.get_system_id()) def test_check_system_pwd_not_sync(self): def list_system(): if getattr(self, 'test_count', None): self.test_count = 1 return {'status': 'passwordoutofsync'} return {'status': 'needsAttention'} self.library._client.list_storage_system = mock.Mock(wraps=list_system) result = self.library._check_storage_system() self.assertTrue(result) def test_create_destroy(self): self.mock_object(client.RestClient, 'delete_volume', mock.Mock(return_value='None')) self.mock_object(self.driver.library, 'create_volume', mock.Mock(return_value=self.volume)) self.mock_object(self.library._client, 'list_volume', mock.Mock( return_value=fakes.VOLUME)) self.driver.create_volume(self.volume) self.driver.delete_volume(self.volume) def test_vol_stats(self): self.driver.get_volume_stats(refresh=False) def test_get_pool(self): self.mock_object(self.library, '_get_volume', mock.Mock(return_value={ 'volumeGroupRef': 'fake_ref'})) self.mock_object(self.library._client, "get_storage_pool", mock.Mock(return_value={'volumeGroupRef': 'fake_ref', 'label': 'ddp1'})) pool = self.driver.get_pool({'name_id': 'fake-uuid'}) self.assertEqual('ddp1', pool) def test_get_pool_no_pools(self): self.mock_object(self.library, '_get_volume', mock.Mock(return_value={ 'volumeGroupRef': 'fake_ref'})) self.mock_object(self.library._client, "get_storage_pool", mock.Mock(return_value=None)) pool = self.driver.get_pool({'name_id': 'fake-uuid'}) self.assertIsNone(pool) @mock.patch.object(library.NetAppESeriesLibrary, '_create_volume', mock.Mock()) def test_create_volume(self): self.driver.create_volume(self.volume) self.library._create_volume.assert_called_with( 'DDP', self.fake_eseries_volume_label, self.volume['size'], {}) def test_create_volume_no_pool_provided_by_scheduler(self): volume = copy.deepcopy(self.volume) volume['host'] = "host@backend" # missing pool self.assertRaises(exception.InvalidHost, self.driver.create_volume, volume) @mock.patch.object(client.RestClient, 'list_storage_pools') def test_helper_create_volume_fail(self, fake_list_pools): fake_pool = {} fake_pool['label'] = self.fake_eseries_pool_label fake_pool['volumeGroupRef'] = 'foo' fake_pool['raidLevel'] = 'raidDiskPool' fake_pools = [fake_pool] fake_list_pools.return_value = fake_pools wrong_eseries_pool_label = 'hostname@backend' self.assertRaises(exception.NetAppDriverException, self.library._create_volume, wrong_eseries_pool_label, self.fake_eseries_volume_label, self.fake_size_gb) @mock.patch.object(library.LOG, 'info') @mock.patch.object(client.RestClient, 'list_storage_pools') @mock.patch.object(client.RestClient, 'create_volume', mock.MagicMock(return_value='CorrectVolume')) def test_helper_create_volume(self, storage_pools, log_info): fake_pool = {} fake_pool['label'] = self.fake_eseries_pool_label fake_pool['volumeGroupRef'] = 'foo' fake_pool['raidLevel'] = 'raidDiskPool' fake_pools = [fake_pool] storage_pools.return_value = fake_pools storage_vol = self.library._create_volume( self.fake_eseries_pool_label, self.fake_eseries_volume_label, self.fake_size_gb) log_info.assert_called_once_with("Created volume with label %s.", self.fake_eseries_volume_label) self.assertEqual('CorrectVolume', storage_vol) @mock.patch.object(client.RestClient, 'list_storage_pools') @mock.patch.object(client.RestClient, 'create_volume', mock.MagicMock( side_effect=exception.NetAppDriverException)) @mock.patch.object(library.LOG, 'info', mock.Mock()) def test_create_volume_check_exception(self, fake_list_pools): fake_pool = {} fake_pool['label'] = self.fake_eseries_pool_label fake_pool['volumeGroupRef'] = 'foo' fake_pool['raidLevel'] = 'raidDiskPool' fake_pools = [fake_pool] fake_list_pools.return_value = fake_pools self.assertRaises(exception.NetAppDriverException, self.library._create_volume, self.fake_eseries_pool_label, self.fake_eseries_volume_label, self.fake_size_gb) def test_portal_for_vol_controller(self): volume = {'id': 'vol_id', 'currentManager': 'ctrl1'} vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'} portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'}, {'controller': 'ctrl1', 'iqn': 'iqn1'}] portal = self.library._get_iscsi_portal_for_vol(volume, portals) self.assertEqual({'controller': 'ctrl1', 'iqn': 'iqn1'}, portal) portal = self.library._get_iscsi_portal_for_vol(vol_nomatch, portals) self.assertEqual({'controller': 'ctrl2', 'iqn': 'iqn2'}, portal) def test_portal_for_vol_any_false(self): vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'} portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'}, {'controller': 'ctrl1', 'iqn': 'iqn1'}] self.assertRaises(exception.NetAppDriverException, self.library._get_iscsi_portal_for_vol, vol_nomatch, portals, False) def test_setup_error_unsupported_host_type(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_host_type = 'garbage' driver = common.NetAppDriver(configuration=configuration) self.assertRaises(exception.NetAppDriverException, driver.library.check_for_setup_error) def test_check_host_type_default(self): configuration = self._set_config(self.create_configuration()) driver = common.NetAppDriver(configuration=configuration) driver.library._check_host_type() self.assertEqual('LnxALUA', driver.library.host_type) def test_do_setup_all_default(self): configuration = self._set_config(self.create_configuration()) driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system = mock.Mock() mock_invoke = self.mock_object(client, 'RestClient') driver.do_setup(context='context') mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS) def test_do_setup_http_default_port(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_transport_type = 'http' driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system = mock.Mock() mock_invoke = self.mock_object(client, 'RestClient') driver.do_setup(context='context') mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS) def test_do_setup_https_default_port(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_transport_type = 'https' driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system = mock.Mock() mock_invoke = self.mock_object(client, 'RestClient') driver.do_setup(context='context') FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=8443, scheme='https') mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) def test_do_setup_http_non_default_port(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_server_port = 81 driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system = mock.Mock() mock_invoke = self.mock_object(client, 'RestClient') driver.do_setup(context='context') FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=81) mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) def test_do_setup_https_non_default_port(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_transport_type = 'https' configuration.netapp_server_port = 446 driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system = mock.Mock() mock_invoke = self.mock_object(client, 'RestClient') driver.do_setup(context='context') FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=446, scheme='https') mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) def test_setup_good_controller_ip(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '127.0.0.1' driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system def test_setup_good_controller_ips(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '127.0.0.2,127.0.0.1' driver = common.NetAppDriver(configuration=configuration) driver.library._check_mode_get_or_register_storage_system def test_setup_missing_controller_ip(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = None driver = common.NetAppDriver(configuration=configuration) self.assertRaises(exception.InvalidInput, driver.do_setup, context='context') def test_setup_error_invalid_controller_ip(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '987.65.43.21' driver = common.NetAppDriver(configuration=configuration) self.mock_object(na_utils, 'resolve_hostname', mock.Mock(side_effect=socket.gaierror)) self.assertRaises( exception.NoValidHost, driver.library._check_mode_get_or_register_storage_system) def test_setup_error_invalid_first_controller_ip(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '987.65.43.21,127.0.0.1' driver = common.NetAppDriver(configuration=configuration) self.mock_object(na_utils, 'resolve_hostname', mock.Mock(side_effect=socket.gaierror)) self.assertRaises( exception.NoValidHost, driver.library._check_mode_get_or_register_storage_system) def test_setup_error_invalid_second_controller_ip(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '127.0.0.1,987.65.43.21' driver = common.NetAppDriver(configuration=configuration) self.mock_object(na_utils, 'resolve_hostname', mock.Mock(side_effect=socket.gaierror)) self.assertRaises( exception.NoValidHost, driver.library._check_mode_get_or_register_storage_system) def test_setup_error_invalid_both_controller_ips(self): configuration = self._set_config(self.create_configuration()) configuration.netapp_controller_ips = '564.124.1231.1,987.65.43.21' driver = common.NetAppDriver(configuration=configuration) self.mock_object(na_utils, 'resolve_hostname', mock.Mock(side_effect=socket.gaierror)) self.assertRaises( exception.NoValidHost, driver.library._check_mode_get_or_register_storage_system) def test_manage_existing_get_size(self): self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=self.fake_ret_vol) size = self.driver.manage_existing_get_size(self.volume, self.fake_ref) self.assertEqual(3, size) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( self.fake_ref) def test_get_exist_vol_source_name_missing(self): self.library._client.list_volume = mock.Mock( side_effect=exception.InvalidInput) self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'id': '1234'}) @ddt.data('source-id', 'source-name') def test_get_exist_vol_source_not_found(self, attr_name): def _get_volume(v_id): d = {'id': '1', 'name': 'volume1', 'worldWideName': '0'} if v_id in d: return d[v_id] else: raise exception.VolumeNotFound(message=v_id) self.library._client.list_volume = mock.Mock(wraps=_get_volume) self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {attr_name: 'name2'}) self.library._client.list_volume.assert_called_once_with( 'name2') def test_get_exist_vol_with_manage_ref(self): fake_ret_vol = {'id': 'right'} self.library._client.list_volume = mock.Mock(return_value=fake_ret_vol) actual_vol = self.library._get_existing_vol_with_manage_ref( {'source-name': 'name2'}) self.library._client.list_volume.assert_called_once_with('name2') self.assertEqual(fake_ret_vol, actual_vol) @mock.patch.object(utils, 'convert_uuid_to_es_fmt') def test_manage_existing_same_label(self, mock_convert_es_fmt): self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=self.fake_ret_vol) mock_convert_es_fmt.return_value = 'label' self.driver.manage_existing(self.volume, self.fake_ref) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( self.fake_ref) mock_convert_es_fmt.assert_called_once_with( '114774fb-e15a-4fae-8ee2-c9723e3645ef') @mock.patch.object(utils, 'convert_uuid_to_es_fmt') def test_manage_existing_new(self, mock_convert_es_fmt): self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=self.fake_ret_vol) mock_convert_es_fmt.return_value = 'vol_label' self.library._client.update_volume = mock.Mock( return_value={'id': 'update', 'worldWideName': 'wwn'}) self.driver.manage_existing(self.volume, self.fake_ref) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( self.fake_ref) mock_convert_es_fmt.assert_called_once_with( '114774fb-e15a-4fae-8ee2-c9723e3645ef') self.library._client.update_volume.assert_called_once_with( 'vol_id', 'vol_label') @mock.patch.object(library.LOG, 'info') def test_unmanage(self, log_info): self.library._get_volume = mock.Mock(return_value=self.fake_ret_vol) self.driver.unmanage(self.volume) self.library._get_volume.assert_called_once_with( '114774fb-e15a-4fae-8ee2-c9723e3645ef') self.assertEqual(1, log_info.call_count) @mock.patch.object(library.NetAppESeriesLibrary, 'ensure_export', mock.Mock()) def test_ensure_export(self): self.driver.ensure_export('context', self.fake_ret_vol) self.assertTrue(self.library.ensure_export.called) @mock.patch.object(library.NetAppESeriesLibrary, 'extend_volume', mock.Mock()) def test_extend_volume(self): capacity = 10 self.driver.extend_volume(self.fake_ret_vol, capacity) self.library.extend_volume.assert_called_with(self.fake_ret_vol, capacity) @mock.patch.object(library.NetAppESeriesLibrary, 'create_cgsnapshot', mock.Mock()) def test_create_cgsnapshot(self): cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) self.driver.create_cgsnapshot('ctx', cgsnapshot, snapshots) self.library.create_cgsnapshot.assert_called_with(cgsnapshot, snapshots) @mock.patch.object(library.NetAppESeriesLibrary, 'delete_cgsnapshot', mock.Mock()) def test_delete_cgsnapshot(self): cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) self.driver.delete_cgsnapshot('ctx', cgsnapshot, snapshots) self.library.delete_cgsnapshot.assert_called_with(cgsnapshot, snapshots) @mock.patch.object(library.NetAppESeriesLibrary, 'create_consistencygroup', mock.Mock()) def test_create_consistencygroup(self): cg = copy.deepcopy(fakes.FAKE_CINDER_CG) self.driver.create_consistencygroup('ctx', cg) self.library.create_consistencygroup.assert_called_with(cg) @mock.patch.object(library.NetAppESeriesLibrary, 'delete_consistencygroup', mock.Mock()) def test_delete_consistencygroup(self): cg = copy.deepcopy(fakes.FAKE_CINDER_CG) volumes = copy.deepcopy([fakes.VOLUME]) self.driver.delete_consistencygroup('ctx', cg, volumes) self.library.delete_consistencygroup.assert_called_with(cg, volumes) @mock.patch.object(library.NetAppESeriesLibrary, 'update_consistencygroup', mock.Mock()) def test_update_consistencygroup(self): group = copy.deepcopy(fakes.FAKE_CINDER_CG) self.driver.update_consistencygroup('ctx', group, {}, {}) self.library.update_consistencygroup.assert_called_with(group, {}, {}) @mock.patch.object(library.NetAppESeriesLibrary, 'create_consistencygroup_from_src', mock.Mock()) def test_create_consistencygroup_from_src(self): cg = copy.deepcopy(fakes.FAKE_CINDER_CG) volumes = copy.deepcopy([fakes.VOLUME]) source_vols = copy.deepcopy([fakes.VOLUME]) cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) source_cg = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) self.driver.create_consistencygroup_from_src( 'ctx', cg, volumes, cgsnapshot, snapshots, source_cg, source_vols) self.library.create_consistencygroup_from_src.assert_called_with( cg, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py0000664000567000056710000032764112701406250030513 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Andrew Kerr # Copyright (c) 2015 Alex Meade # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Yogesh Kshirsagar # Copyright (c) 2015 Michael Price # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import time import uuid import mock from oslo_utils import units import six from six.moves import range from six.moves import reduce from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import utils as cinder_utils from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ eseries_fake from cinder.volume.drivers.netapp.eseries import client as es_client from cinder.volume.drivers.netapp.eseries import exception as eseries_exc from cinder.volume.drivers.netapp.eseries import host_mapper from cinder.volume.drivers.netapp.eseries import library from cinder.volume.drivers.netapp.eseries import utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.zonemanager import utils as fczm_utils def get_fake_volume(): """Return a fake Cinder Volume that can be used as a parameter""" return { 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'provider_auth': 'provider a b', 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'migration_status': None, 'attach_status': "detached" } @ddt.ddt class NetAppEseriesLibraryTestCase(test.TestCase): def setUp(self): super(NetAppEseriesLibraryTestCase, self).setUp() kwargs = {'configuration': eseries_fake.create_configuration_eseries()} self.library = library.NetAppESeriesLibrary('FAKE', **kwargs) # We don't want the looping calls to run self.mock_object(self.library, '_start_periodic_tasks', new_attr=mock.Mock()) # Deprecated Option self.library.configuration.netapp_storage_pools = None self.library._client = eseries_fake.FakeEseriesClient() self.mock_object(self.library, '_start_periodic_tasks', new_attr=mock.Mock()) self.mock_object(library.cinder_utils, 'synchronized', mock.Mock(return_value=lambda f: f)) with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new = cinder_utils.ZeroIntervalLoopingCall): self.library.check_for_setup_error() self.ctxt = context.get_admin_context() def test_do_setup(self): self.mock_object(self.library, '_check_mode_get_or_register_storage_system') self.mock_object(es_client, 'RestClient', eseries_fake.FakeEseriesClient) mock_check_flags = self.mock_object(na_utils, 'check_flags') self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) @ddt.data(('optimal', True), ('offline', False), ('needsAttn', True), ('neverContacted', False), ('newKey', True), (None, True)) @ddt.unpack @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_check_storage_system_status(self, status, status_valid): system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) system['status'] = status status = status.lower() if status is not None else '' actual_status, actual_valid = ( self.library._check_storage_system_status(system)) self.assertEqual(status, actual_status) self.assertEqual(status_valid, actual_valid) @ddt.data(('valid', True), ('invalid', False), ('unknown', False), ('newKey', True), (None, True)) @ddt.unpack @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_check_password_status(self, status, status_valid): system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) system['passwordStatus'] = status status = status.lower() if status is not None else '' actual_status, actual_valid = ( self.library._check_password_status(system)) self.assertEqual(status, actual_status) self.assertEqual(status_valid, actual_valid) def test_check_storage_system_bad_system(self): exc_str = "bad_system" controller_ips = self.library.configuration.netapp_controller_ips self.library._client.list_storage_system = mock.Mock( side_effect=exception.NetAppDriverException(message=exc_str)) info_log = self.mock_object(library.LOG, 'info', mock.Mock()) self.assertRaisesRegexp(exception.NetAppDriverException, exc_str, self.library._check_storage_system) info_log.assert_called_once_with(mock.ANY, controller_ips) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_check_storage_system(self): system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) self.mock_object(self.library._client, 'list_storage_system', new_attr=mock.Mock(return_value=system)) update_password = self.mock_object(self.library._client, 'update_stored_system_password') info_log = self.mock_object(library.LOG, 'info', mock.Mock()) self.library._check_storage_system() self.assertTrue(update_password.called) self.assertTrue(info_log.called) @ddt.data({'status': 'optimal', 'passwordStatus': 'invalid'}, {'status': 'offline', 'passwordStatus': 'valid'}) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_check_storage_system_bad_status(self, system): self.mock_object(self.library._client, 'list_storage_system', new_attr=mock.Mock(return_value=system)) self.mock_object(self.library._client, 'update_stored_system_password') self.mock_object(time, 'time', new_attr = mock.Mock( side_effect=range(0, 60, 5))) self.assertRaisesRegexp(exception.NetAppDriverException, 'bad.*?status', self.library._check_storage_system) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_check_storage_system_update_password(self): self.library.configuration.netapp_sa_password = 'password' def get_system_iter(): key = 'passwordStatus' system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) system[key] = 'invalid' yield system yield system system[key] = 'valid' yield system self.mock_object(self.library._client, 'list_storage_system', new_attr=mock.Mock(side_effect=get_system_iter())) update_password = self.mock_object(self.library._client, 'update_stored_system_password', new_attr=mock.Mock()) info_log = self.mock_object(library.LOG, 'info', mock.Mock()) self.library._check_storage_system() update_password.assert_called_once_with( self.library.configuration.netapp_sa_password) self.assertTrue(info_log.called) def test_get_storage_pools_empty_result(self): """Verify an exception is raised if no pools are returned.""" self.library.configuration.netapp_pool_name_search_pattern = '$' def test_get_storage_pools_invalid_conf(self): """Verify an exception is raised if the regex pattern is invalid.""" self.library.configuration.netapp_pool_name_search_pattern = '(.*' self.assertRaises(exception.InvalidConfigurationValue, self.library._get_storage_pools) def test_get_storage_pools_default(self): """Verify that all pools are returned if the search option is empty.""" filtered_pools = self.library._get_storage_pools() self.assertEqual(eseries_fake.STORAGE_POOLS, filtered_pools) @ddt.data(('[\d]+,a', ['1', '2', 'a', 'b'], ['1', '2', 'a']), ('1 , 3', ['1', '2', '3'], ['1', '3']), ('$,3', ['1', '2', '3'], ['3']), ('[a-zA-Z]+', ['1', 'a', 'B'], ['a', 'B']), ('', ['1', '2'], ['1', '2']) ) @ddt.unpack def test_get_storage_pools(self, pool_filter, pool_labels, expected_pool_labels): """Verify that pool filtering via the search_pattern works correctly :param pool_filter: A regular expression to be used for filtering via pool labels :param pool_labels: A list of pool labels :param expected_pool_labels: The labels from 'pool_labels' that should be matched by 'pool_filter' """ self.library.configuration.netapp_pool_name_search_pattern = ( pool_filter) pools = [{'label': label} for label in pool_labels] self.library._client.list_storage_pools = mock.Mock( return_value=pools) filtered_pools = self.library._get_storage_pools() filtered_pool_labels = [pool['label'] for pool in filtered_pools] self.assertEqual(expected_pool_labels, filtered_pool_labels) def test_get_volume(self): fake_volume = copy.deepcopy(get_fake_volume()) volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) result = self.library._get_volume(fake_volume['id']) self.assertEqual(1, self.library._client.list_volume.call_count) self.assertDictMatch(volume, result) def test_get_volume_bad_input(self): volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) self.assertRaises(exception.InvalidInput, self.library._get_volume, None) def test_get_volume_bad_uuid(self): volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) self.assertRaises(ValueError, self.library._get_volume, '1') def test_update_ssc_info_no_ssc(self): drives = [{'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd'}] pools = [{'volumeGroupRef': 'test_vg1', 'label': 'test_vg1', 'raidLevel': 'raid6', 'securityType': 'enabled'}] self.library._client = mock.Mock() self.library._client.features.SSC_API_V2 = na_utils.FeatureState( False, minimum_version="1.53.9000.1") self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1), (1, 53, 9010, 15)] self.library.configuration.netapp_pool_name_search_pattern = "test_vg1" self.library._client.list_storage_pools = mock.Mock(return_value=pools) self.library._client.list_drives = mock.Mock(return_value=drives) self.library._update_ssc_info() self.assertEqual( {'test_vg1': {'netapp_disk_encryption': 'true', 'netapp_disk_type': 'SSD', 'netapp_raid_type': 'raid6'}}, self.library._ssc_stats) @ddt.data(True, False) def test_update_ssc_info(self, data_assurance_supported): self.library._client = mock.Mock() self.library._client.features.SSC_API_V2 = na_utils.FeatureState( True, minimum_version="1.53.9000.1") self.library._client.list_ssc_storage_pools = mock.Mock( return_value=eseries_fake.SSC_POOLS) self.library._get_storage_pools = mock.Mock( return_value=eseries_fake.STORAGE_POOLS) # Data Assurance is not supported on some storage backends self.library._is_data_assurance_supported = mock.Mock( return_value=data_assurance_supported) self.library._update_ssc_info() for pool in eseries_fake.SSC_POOLS: poolId = pool['poolId'] raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get( pool['raidLevel'], 'unknown') if pool['pool']["driveMediaType"] == 'ssd': disk_type = 'SSD' else: disk_type = pool['pool']['drivePhysicalType'] disk_type = ( self.library.SSC_DISK_TYPE_MAPPING.get( disk_type, 'unknown')) da_enabled = pool['dataAssuranceCapable'] and ( data_assurance_supported) thin_provisioned = pool['thinProvisioningCapable'] expected = { 'consistencygroup_support': True, 'netapp_disk_encryption': six.text_type(pool['encrypted']).lower(), 'netapp_eseries_flash_read_cache': six.text_type(pool['flashCacheCapable']).lower(), 'netapp_thin_provisioned': six.text_type(thin_provisioned).lower(), 'netapp_eseries_data_assurance': six.text_type(da_enabled).lower(), 'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'], 'netapp_raid_type': raid_lvl, 'netapp_disk_type': disk_type } actual = self.library._ssc_stats[poolId] self.assertDictMatch(expected, actual) @ddt.data(('FC', True), ('iSCSI', False)) @ddt.unpack def test_is_data_assurance_supported(self, backend_storage_protocol, enabled): self.mock_object(self.library, 'driver_protocol', backend_storage_protocol) actual = self.library._is_data_assurance_supported() self.assertEqual(enabled, actual) @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') def test_update_ssc_disk_types(self, disk_type): drives = [{'currentVolumeGroupRef': 'test_vg1', 'interfaceType': {'driveType': disk_type}}] pools = [{'volumeGroupRef': 'test_vg1'}] self.library._client.list_drives = mock.Mock(return_value=drives) self.library._client.get_storage_pool = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_types(pools) expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown') self.assertEqual({'test_vg1': {'netapp_disk_type': expected}}, ssc_stats) @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') def test_update_ssc_disk_types_ssd(self, disk_type): drives = [{'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd', 'driveType': disk_type}] pools = [{'volumeGroupRef': 'test_vg1'}] self.library._client.list_drives = mock.Mock(return_value=drives) self.library._client.get_storage_pool = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_types(pools) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}}, ssc_stats) @ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED', 'garbage') def test_update_ssc_disk_encryption(self, securityType): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(pools) # Convert the boolean value to a lower-case string value expected = 'true' if securityType == "enabled" else 'false' self.assertEqual({'test_vg1': {'netapp_disk_encryption': expected}}, ssc_stats) def test_update_ssc_disk_encryption_multiple(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'}, {'volumeGroupRef': 'test_vg2', 'securityType': 'enabled'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(pools) self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'}, 'test_vg2': {'netapp_disk_encryption': 'true'}}, ssc_stats) @ddt.data(True, False) def test_get_volume_stats(self, refresh): fake_stats = {'key': 'val'} def populate_stats(): self.library._stats = fake_stats self.library._update_volume_stats = mock.Mock( side_effect=populate_stats) self.library._update_ssc_info = mock.Mock() self.library._ssc_stats = {self.library.THIN_UQ_SPEC: True} actual = self.library.get_volume_stats(refresh = refresh) if(refresh): self.library._update_volume_stats.assert_called_once_with() self.assertEqual(fake_stats, actual) else: self.assertEqual(0, self.library._update_volume_stats.call_count) self.assertEqual(0, self.library._update_ssc_info.call_count) def test_get_volume_stats_no_ssc(self): """Validate that SSC data is collected if not yet populated""" fake_stats = {'key': 'val'} def populate_stats(): self.library._stats = fake_stats self.library._update_volume_stats = mock.Mock( side_effect=populate_stats) self.library._update_ssc_info = mock.Mock() self.library._ssc_stats = None actual = self.library.get_volume_stats(refresh = True) self.library._update_volume_stats.assert_called_once_with() self.library._update_ssc_info.assert_called_once_with() self.assertEqual(fake_stats, actual) def test_update_volume_stats_provisioning(self): """Validate pool capacity calculations""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ "volumeGroupRef"]: {self.library.THIN_UQ_SPEC: True}}) self.library.configuration = mock.Mock() reserved_pct = 5 over_subscription_ratio = 1.0 self.library.configuration.max_over_subscription_ratio = ( over_subscription_ratio) self.library.configuration.reserved_percentage = reserved_pct total_gb = int(fake_pool['totalRaidedSpace']) / units.Gi used_gb = int(fake_pool['usedSpace']) / units.Gi free_gb = total_gb - used_gb self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertEqual(fake_pool['label'], pool_stats.get('pool_name')) self.assertEqual(reserved_pct, pool_stats['reserved_percentage']) self.assertEqual(over_subscription_ratio, pool_stats['max_over_subscription_ratio']) self.assertEqual(total_gb, pool_stats.get('total_capacity_gb')) self.assertEqual(used_gb, pool_stats.get('provisioned_capacity_gb')) self.assertEqual(free_gb, pool_stats.get('free_capacity_gb')) @ddt.data(False, True) def test_update_volume_stats_thin_provisioning(self, thin_provisioning): """Validate that thin provisioning support is correctly reported""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ "volumeGroupRef"]: {self.library.THIN_UQ_SPEC: thin_provisioning}}) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertEqual(thin_provisioning, pool_stats.get( 'thin_provisioning_support')) # Should always be True self.assertTrue(pool_stats.get('thick_provisioning_support')) def test_update_volume_stats_ssc(self): """Ensure that the SSC data is correctly reported in the pool stats""" ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'} fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[ "volumeGroupRef"]: ssc}) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] for key in ssc: self.assertIn(key, pool_stats) self.assertEqual(ssc[key], pool_stats[key]) def test_update_volume_stats_no_ssc(self): """Ensure that that pool stats are correctly reported without SSC""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertFalse(pool_stats.get('thin_provisioning_support')) # Should always be True self.assertTrue(pool_stats.get('thick_provisioning_support')) def test_terminate_connection_iscsi_no_hosts(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} volume = copy.deepcopy(eseries_fake.VOLUME) volume['listOfMappings'] = [] self.library._get_volume = mock.Mock(return_value=volume) self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ eseries_fake.VOLUME_MAPPING ] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_initialize_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ eseries_fake.VOLUME_MAPPING ] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ eseries_fake.VOLUME_MAPPING ] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(self.library._client.list_hosts.called) self.assertTrue(self.library._client.create_host_with_ports.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_mapping_to_other_host = copy.deepcopy( eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[ 'hostRef'] self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_iscsi, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) @ddt.data(eseries_fake.WWPN, fczm_utils.get_formatted_wwn(eseries_fake.WWPN)) def test_get_host_with_matching_port_wwpn(self, port_id): port_ids = [port_id] host = copy.deepcopy(eseries_fake.HOST) host.update( { 'hostSidePorts': [{'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN}] } ) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update( { 'hostSidePorts': [{'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN_2}] } ) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port( port_ids) self.assertEqual(host, actual_host) def test_get_host_with_matching_port_iqn(self): port_ids = [eseries_fake.INITIATOR_NAME] host = copy.deepcopy(eseries_fake.HOST) host.update( { 'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME}] } ) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update( { 'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME_2}] } ) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port( port_ids) self.assertEqual(host, actual_host) def test_terminate_connection_fc_no_hosts(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] volume = copy.deepcopy(eseries_fake.VOLUME) volume['listOfMappings'] = [] self.mock_object(self.library, '_get_volume', mock.Mock(return_value=volume)) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': {}, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object(self.library._client, 'get_volume_mappings_for_host', mock.Mock(return_value=[copy.deepcopy (eseries_fake. VOLUME_MAPPING)])) target_info = self.library.terminate_connection_fc(get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_wwn': [eseries_fake.WWPN_2], 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object(self.library._client, 'get_volume_mappings_for_host', mock.Mock(return_value=[])) target_info = self.library.terminate_connection_fc(get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_initialize_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 0, 'target_wwn': [eseries_fake.WWPN_2], 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } target_info = self.library.initialize_connection_fc(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) self.assertDictEqual(expected_target_info, target_info) def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.library.driver_protocol = 'FC' self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.library._client.create_host_with_ports.assert_called_once_with( mock.ANY, mock.ANY, [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], port_type='fc', group_id=None ) def test_initialize_connection_fc_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'wwpns': [eseries_fake.WWPN]} fake_mapping_to_other_host = copy.deepcopy( eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[ 'hostRef'] self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_no_target_wwpns(self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.mock_object(self.library._client, 'list_target_wwpns', mock.Mock(return_value=[])) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_build_initiator_target_map_fc_with_lookup_service( self): connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]} self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network = ( mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP)) (target_wwpns, initiator_target_map, num_paths) = ( self.library._build_initiator_target_map_fc(connector)) self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map) self.assertEqual(4, num_paths) @ddt.data(('raid0', 'raid0'), ('raid1', 'raid1'), ('raid3', 'raid5'), ('raid5', 'raid5'), ('raid6', 'raid6'), ('raidDiskPool', 'DDP')) @ddt.unpack def test_update_ssc_raid_type(self, raid_lvl, raid_lvl_mapping): pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_raid_type(pools) self.assertEqual({'test_vg1': {'netapp_raid_type': raid_lvl_mapping}}, ssc_stats) @ddt.data('raidAll', '__UNDEFINED', 'unknown', 'raidUnsupported', 'garbage') def test_update_ssc_raid_type_invalid(self, raid_lvl): pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_raid_type(pools) self.assertEqual({'test_vg1': {'netapp_raid_type': 'unknown'}}, ssc_stats) def test_create_asup(self): self.library._client = mock.Mock() self.library._client.features.AUTOSUPPORT = na_utils.FeatureState() self.library._client.api_operating_mode = ( eseries_fake.FAKE_ASUP_DATA['operating-mode']) self.library._app_version = eseries_fake.FAKE_APP_VERSION self.mock_object( self.library._client, 'get_asup_info', mock.Mock(return_value=eseries_fake.GET_ASUP_RETURN)) self.mock_object( self.library._client, 'set_counter', mock.Mock(return_value={'value': 1})) mock_invoke = self.mock_object( self.library._client, 'add_autosupport_data') self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) mock_invoke.assert_called_with(eseries_fake.FAKE_KEY, eseries_fake.FAKE_ASUP_DATA) def test_create_asup_not_supported(self): self.library._client = mock.Mock() self.library._client.features.AUTOSUPPORT = na_utils.FeatureState( supported=False) mock_invoke = self.mock_object( self.library._client, 'add_autosupport_data') self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) mock_invoke.assert_not_called() @mock.patch.object(library, 'LOG', mock.Mock()) def test_create_volume_fail_clean(self): """Test volume creation fail w/o a partial volume being created. Test the failed creation of a volume where a partial volume with the name has not been created, thus no cleanup is required. """ self.library._get_volume = mock.Mock( side_effect = exception.VolumeNotFound(message='')) self.library._client.create_volume = mock.Mock( side_effect = exception.NetAppDriverException) self.library._client.delete_volume = mock.Mock() fake_volume = copy.deepcopy(get_fake_volume()) self.assertRaises(exception.NetAppDriverException, self.library.create_volume, fake_volume) self.assertTrue(self.library._get_volume.called) self.assertFalse(self.library._client.delete_volume.called) self.assertEqual(1, library.LOG.error.call_count) @mock.patch.object(library, 'LOG', mock.Mock()) def test_create_volume_fail_dirty(self): """Test volume creation fail where a partial volume has been created. Test scenario where the creation of a volume fails and a partial volume is created with the name/id that was supplied by to the original creation call. In this situation the partial volume should be detected and removed. """ fake_volume = copy.deepcopy(get_fake_volume()) self.library._get_volume = mock.Mock(return_value=fake_volume) self.library._client.list_volume = mock.Mock(return_value=fake_volume) self.library._client.create_volume = mock.Mock( side_effect = exception.NetAppDriverException) self.library._client.delete_volume = mock.Mock() self.assertRaises(exception.NetAppDriverException, self.library.create_volume, fake_volume) self.assertTrue(self.library._get_volume.called) self.assertTrue(self.library._client.delete_volume.called) self.library._client.delete_volume.assert_called_once_with( fake_volume["id"]) self.assertEqual(1, library.LOG.error.call_count) @mock.patch.object(library, 'LOG', mock.Mock()) def test_create_volume_fail_dirty_fail_delete(self): """Volume creation fail with partial volume deletion fails Test scenario where the creation of a volume fails and a partial volume is created with the name/id that was supplied by to the original creation call. The partial volume is detected but when the cleanup deletetion of that fragment volume is attempted it fails. """ fake_volume = copy.deepcopy(get_fake_volume()) self.library._get_volume = mock.Mock(return_value=fake_volume) self.library._client.list_volume = mock.Mock(return_value=fake_volume) self.library._client.create_volume = mock.Mock( side_effect = exception.NetAppDriverException) self.library._client.delete_volume = mock.Mock( side_effect = exception.NetAppDriverException) self.assertRaises(exception.NetAppDriverException, self.library.create_volume, fake_volume) self.assertTrue(self.library._get_volume.called) self.assertTrue(self.library._client.delete_volume.called) self.library._client.delete_volume.assert_called_once_with( fake_volume["id"]) self.assertEqual(2, library.LOG.error.call_count) def test_create_consistencygroup(self): fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) expected = {'status': 'available'} create_cg = self.mock_object(self.library, '_create_consistency_group', mock.Mock(return_value=expected)) actual = self.library.create_consistencygroup(fake_cg) create_cg.assert_called_once_with(fake_cg) self.assertEqual(expected, actual) def test_create_consistency_group(self): fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) expected = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) create_cg = self.mock_object(self.library._client, 'create_consistency_group', mock.Mock(return_value=expected)) result = self.library._create_consistency_group(fake_cg) name = utils.convert_uuid_to_es_fmt(fake_cg['id']) create_cg.assert_called_once_with(name) self.assertEqual(expected, result) def test_delete_consistencygroup(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) volumes = [get_fake_volume()] * 3 model_update = {'status': 'deleted'} volume_update = [{'status': 'deleted', 'id': vol['id']} for vol in volumes] delete_cg = self.mock_object(self.library._client, 'delete_consistency_group') updt_index = self.mock_object( self.library, '_merge_soft_delete_changes') delete_vol = self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_get_consistencygroup', mock.Mock(return_value=cg)) result = self.library.delete_consistencygroup(fake_cg, volumes) self.assertEqual(len(volumes), delete_vol.call_count) delete_cg.assert_called_once_with(cg['id']) self.assertEqual((model_update, volume_update), result) updt_index.assert_called_once_with(None, [cg['id']]) def test_delete_consistencygroup_index_update_failure(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) volumes = [get_fake_volume()] * 3 model_update = {'status': 'deleted'} volume_update = [{'status': 'deleted', 'id': vol['id']} for vol in volumes] delete_cg = self.mock_object(self.library._client, 'delete_consistency_group') delete_vol = self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_get_consistencygroup', mock.Mock(return_value=cg)) result = self.library.delete_consistencygroup(fake_cg, volumes) self.assertEqual(len(volumes), delete_vol.call_count) delete_cg.assert_called_once_with(cg['id']) self.assertEqual((model_update, volume_update), result) def test_delete_consistencygroup_not_found(self): fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) delete_cg = self.mock_object(self.library._client, 'delete_consistency_group') updt_index = self.mock_object( self.library, '_merge_soft_delete_changes') delete_vol = self.mock_object(self.library, 'delete_volume') exc = exception.ConsistencyGroupNotFound(consistencygroup_id='') self.mock_object(self.library, '_get_consistencygroup', mock.Mock(side_effect=exc)) self.library.delete_consistencygroup(fake_cg, []) delete_cg.assert_not_called() delete_vol.assert_not_called() updt_index.assert_not_called() def test_get_consistencygroup(self): fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) name = utils.convert_uuid_to_es_fmt(fake_cg['id']) cg['name'] = name list_cgs = self.mock_object(self.library._client, 'list_consistency_groups', mock.Mock(return_value=[cg])) result = self.library._get_consistencygroup(fake_cg) self.assertEqual(cg, result) list_cgs.assert_called_once_with() def test_get_consistencygroup_not_found(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) list_cgs = self.mock_object(self.library._client, 'list_consistency_groups', mock.Mock(return_value=[cg])) self.assertRaises(exception.ConsistencyGroupNotFound, self.library._get_consistencygroup, copy.deepcopy(eseries_fake.FAKE_CINDER_CG)) list_cgs.assert_called_once_with() def test_update_consistencygroup(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) vol = copy.deepcopy(eseries_fake.VOLUME) volumes = [get_fake_volume()] * 3 self.mock_object( self.library, '_get_volume', mock.Mock(return_value=vol)) self.mock_object(self.library, '_get_consistencygroup', mock.Mock(return_value=cg)) self.library.update_consistencygroup(fake_cg, volumes, volumes) def test_create_consistencygroup_from_src(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) volumes = [cinder_utils.create_volume(self.ctxt) for i in range(3)] src_volumes = [cinder_utils.create_volume(self.ctxt) for v in volumes] update_cg = self.mock_object( self.library, '_update_consistency_group_members') create_cg = self.mock_object( self.library, '_create_consistency_group', mock.Mock(return_value=cg)) self.mock_object( self.library, '_create_volume_from_snapshot') self.mock_object( self.library, '_get_snapshot', mock.Mock(return_value=snap)) self.library.create_consistencygroup_from_src( fake_cg, volumes, None, None, None, src_volumes) create_cg.assert_called_once_with(fake_cg) update_cg.assert_called_once_with(cg, volumes, []) def test_create_consistencygroup_from_src_cgsnapshot(self): cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) fake_vol = cinder_utils.create_volume(self.ctxt) cgsnap = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) volumes = [fake_vol] snapshots = [cinder_utils.create_snapshot(self.ctxt, v['id']) for v in volumes] update_cg = self.mock_object( self.library, '_update_consistency_group_members') create_cg = self.mock_object( self.library, '_create_consistency_group', mock.Mock(return_value=cg)) clone_vol = self.mock_object( self.library, '_create_volume_from_snapshot') self.library.create_consistencygroup_from_src( fake_cg, volumes, cgsnap, snapshots, None, None) create_cg.assert_called_once_with(fake_cg) update_cg.assert_called_once_with(cg, volumes, []) self.assertEqual(clone_vol.call_count, len(volumes)) @ddt.data({'consistencyGroupId': utils.NULL_REF}, {'consistencyGroupId': None}, {'consistencyGroupId': '1'}, {}) def test_is_cgsnapshot(self, snapshot_image): if snapshot_image.get('consistencyGroupId'): result = not (utils.NULL_REF == snapshot_image[ 'consistencyGroupId']) else: result = False actual = self.library._is_cgsnapshot(snapshot_image) self.assertEqual(result, actual) def test_add_volume_to_consistencygroup(self): fake_volume = cinder_utils.create_volume(self.ctxt) fake_volume['consistencygroup'] = ( cinder_utils.create_consistencygroup(self.ctxt)) fake_volume['consistencygroup_id'] = fake_volume[ 'consistencygroup']['id'] cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) self.mock_object(self.library, '_get_consistencygroup', mock.Mock(return_value=cg)) update_members = self.mock_object(self.library, '_update_consistency_group_members') self.library._add_volume_to_consistencygroup(fake_volume) update_members.assert_called_once_with(cg, [fake_volume], []) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_copy_volume_high_priority_readonly(self): src_vol = copy.deepcopy(eseries_fake.VOLUME) dst_vol = copy.deepcopy(eseries_fake.VOLUME) vc = copy.deepcopy(eseries_fake.VOLUME_COPY_JOB) self.mock_object(self.library._client, 'create_volume_copy_job', mock.Mock(return_value=vc)) self.mock_object(self.library._client, 'list_vol_copy_job', mock.Mock(return_value=vc)) delete_copy = self.mock_object(self.library._client, 'delete_vol_copy_job') result = self.library._copy_volume_high_priority_readonly( src_vol, dst_vol) self.assertIsNone(result) delete_copy.assert_called_once_with(vc['volcopyRef']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_copy_volume_high_priority_readonly_job_create_failure(self): src_vol = copy.deepcopy(eseries_fake.VOLUME) dst_vol = copy.deepcopy(eseries_fake.VOLUME) self.mock_object( self.library._client, 'create_volume_copy_job', mock.Mock( side_effect=exception.NetAppDriverException)) self.assertRaises( exception.NetAppDriverException, self.library._copy_volume_high_priority_readonly, src_vol, dst_vol) @ddt.ddt class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase): """Test driver when netapp_enable_multiattach is enabled. Test driver behavior when the netapp_enable_multiattach configuration option is True. """ def setUp(self): super(NetAppEseriesLibraryMultiAttachTestCase, self).setUp() config = eseries_fake.create_configuration_eseries() config.netapp_enable_multiattach = True kwargs = {'configuration': config} self.library = library.NetAppESeriesLibrary("FAKE", **kwargs) self.library._client = eseries_fake.FakeEseriesClient() self.mock_object(library.cinder_utils, 'synchronized', mock.Mock(return_value=lambda f: f)) self.mock_object(self.library, '_start_periodic_tasks', new_attr=mock.Mock()) self.ctxt = context.get_admin_context() with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new = cinder_utils.ZeroIntervalLoopingCall): self.library.check_for_setup_error() def test_do_setup_host_group_already_exists(self): mock_check_flags = self.mock_object(na_utils, 'check_flags') self.mock_object(self.library, '_check_mode_get_or_register_storage_system') fake_rest_client = eseries_fake.FakeEseriesClient() self.mock_object(self.library, '_create_rest_client', mock.Mock(return_value=fake_rest_client)) mock_create = self.mock_object(fake_rest_client, 'create_host_group') self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertFalse(mock_create.call_count) def test_do_setup_host_group_does_not_exist(self): mock_check_flags = self.mock_object(na_utils, 'check_flags') fake_rest_client = eseries_fake.FakeEseriesClient() self.mock_object(self.library, '_create_rest_client', mock.Mock(return_value=fake_rest_client)) mock_get_host_group = self.mock_object( fake_rest_client, "get_host_group_by_name", mock.Mock(side_effect=exception.NotFound)) self.mock_object(self.library, '_check_mode_get_or_register_storage_system') self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_get_host_group.call_count) def test_create_volume(self): self.library._client.create_volume = mock.Mock( return_value=eseries_fake.VOLUME) update_members = self.mock_object(self.library, '_update_consistency_group_members') self.library.create_volume(get_fake_volume()) self.assertTrue(self.library._client.create_volume.call_count) update_members.assert_not_called() @ddt.data(('netapp_eseries_flash_read_cache', 'flash_cache', 'true'), ('netapp_eseries_flash_read_cache', 'flash_cache', 'false'), ('netapp_eseries_flash_read_cache', 'flash_cache', None), ('netapp_thin_provisioned', 'thin_provision', 'true'), ('netapp_thin_provisioned', 'thin_provision', 'false'), ('netapp_thin_provisioned', 'thin_provision', None), ('netapp_eseries_data_assurance', 'data_assurance', 'true'), ('netapp_eseries_data_assurance', 'data_assurance', 'false'), ('netapp_eseries_data_assurance', 'data_assurance', None), ('netapp:write_cache', 'write_cache', 'true'), ('netapp:write_cache', 'write_cache', 'false'), ('netapp:write_cache', 'write_cache', None), ('netapp:read_cache', 'read_cache', 'true'), ('netapp:read_cache', 'read_cache', 'false'), ('netapp:read_cache', 'read_cache', None), ('netapp_eseries_flash_read_cache', 'flash_cache', 'True'), ('netapp_eseries_flash_read_cache', 'flash_cache', '1'), ('netapp_eseries_data_assurance', 'data_assurance', '')) @ddt.unpack def test_create_volume_with_extra_spec(self, spec, key, value): fake_volume = get_fake_volume() extra_specs = {spec: value} volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.create_volume = mock.Mock( return_value=volume) # Make this utility method return our extra spec mocked_spec_method = self.mock_object(na_utils, 'get_volume_extra_specs') mocked_spec_method.return_value = extra_specs self.library.create_volume(fake_volume) self.assertEqual(1, self.library._client.create_volume.call_count) # Ensure create_volume is called with the correct argument args, kwargs = self.library._client.create_volume.call_args self.assertIn(key, kwargs) if(value is not None): expected = na_utils.to_bool(value) else: expected = value self.assertEqual(expected, kwargs[key]) def test_create_volume_too_many_volumes(self): self.library._client.list_volumes = mock.Mock( return_value=[eseries_fake.VOLUME for __ in range(utils.MAX_LUNS_PER_HOST_GROUP + 1)]) self.library._client.create_volume = mock.Mock( return_value=eseries_fake.VOLUME) self.assertRaises(exception.NetAppDriverException, self.library.create_volume, get_fake_volume()) self.assertFalse(self.library._client.create_volume.call_count) @ddt.data(0, 1, 2) def test_create_snapshot(self, group_count): """Successful Snapshot creation test""" fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.library._get_volume = mock.Mock(return_value=fake_eseries_volume) fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) fake_cinder_snapshot = copy.deepcopy( eseries_fake.FAKE_CINDER_SNAPSHOT) fake_snapshot_group_list = eseries_fake.list_snapshot_groups( group_count) fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) self.library._client.create_snapshot_group = mock.Mock( return_value=fake_snapshot_group) self.library._client.list_snapshot_groups = mock.Mock( return_value=fake_snapshot_group_list) self.library._client.create_snapshot_image = mock.Mock( return_value=fake_snapshot_image) self.library.create_snapshot(fake_cinder_snapshot) @ddt.data(0, 1, 3) def test_create_cloned_volume(self, snapshot_group_count): """Test creating cloned volume with different exist group counts. """ fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.library._get_volume = mock.Mock(return_value=fake_eseries_volume) fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) fake_snapshot_group_list = eseries_fake.list_snapshot_groups( snapshot_group_count) self.library._client.list_snapshot_groups = mock.Mock( return_value=fake_snapshot_group_list) fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) self.library._client.create_snapshot_group = mock.Mock( return_value=fake_snapshot_group) fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) self.library._client.create_snapshot_image = mock.Mock( return_value=fake_snapshot_image) self.library._get_snapshot_group_for_snapshot = mock.Mock( return_value=copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)) fake_created_volume = copy.deepcopy(eseries_fake.VOLUMES[1]) self.library.create_volume_from_snapshot = mock.Mock( return_value = fake_created_volume) fake_cinder_volume = copy.deepcopy(eseries_fake.FAKE_CINDER_VOLUME) extend_vol = {'id': uuid.uuid4(), 'size': 10} self.mock_object(self.library, '_create_volume_from_snapshot') self.library.create_cloned_volume(extend_vol, fake_cinder_volume) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new = cinder_utils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot(self): fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) self.mock_object(self.library, "_schedule_and_create_volume", mock.Mock(return_value=fake_eseries_volume)) self.mock_object(self.library, "_get_snapshot", mock.Mock(return_value=copy.deepcopy( eseries_fake.SNAPSHOT_IMAGE))) self.library.create_volume_from_snapshot( get_fake_volume(), fake_snap) self.assertEqual( 1, self.library._schedule_and_create_volume.call_count) def test_create_volume_from_snapshot_create_fails(self): fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.mock_object(self.library, "_schedule_and_create_volume", mock.Mock(return_value=fake_dest_eseries_volume)) self.mock_object(self.library._client, "delete_volume") self.mock_object(self.library._client, "delete_snapshot_volume") self.mock_object(self.library, "_get_snapshot", mock.Mock(return_value=copy.deepcopy( eseries_fake.SNAPSHOT_IMAGE))) self.mock_object(self.library._client, "create_snapshot_volume", mock.Mock( side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.create_volume_from_snapshot, get_fake_volume(), fake_snapshot.fake_snapshot_obj(None)) self.assertEqual( 1, self.library._schedule_and_create_volume.call_count) # Ensure the volume we were going to copy to is cleaned up self.library._client.delete_volume.assert_called_once_with( fake_dest_eseries_volume['volumeRef']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new = cinder_utils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_copy_job_fails(self): fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.mock_object(self.library, "_schedule_and_create_volume", mock.Mock(return_value=fake_dest_eseries_volume)) self.mock_object(self.library, "_create_snapshot_volume", mock.Mock(return_value=fake_dest_eseries_volume)) self.mock_object(self.library._client, "delete_volume") self.mock_object(self.library, "_get_snapshot", mock.Mock(return_value=copy.deepcopy( eseries_fake.SNAPSHOT_IMAGE))) fake_failed_volume_copy_job = copy.deepcopy( eseries_fake.VOLUME_COPY_JOB) fake_failed_volume_copy_job['status'] = 'failed' self.mock_object(self.library._client, "create_volume_copy_job", mock.Mock(return_value=fake_failed_volume_copy_job)) self.mock_object(self.library._client, "list_vol_copy_job", mock.Mock(return_value=fake_failed_volume_copy_job)) self.assertRaises(exception.NetAppDriverException, self.library.create_volume_from_snapshot, get_fake_volume(), fake_snapshot.fake_snapshot_obj(None)) self.assertEqual( 1, self.library._schedule_and_create_volume.call_count) # Ensure the volume we were going to copy to is cleaned up self.library._client.delete_volume.assert_called_once_with( fake_dest_eseries_volume['volumeRef']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new = cinder_utils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_fail_to_delete_snapshot_volume(self): fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_dest_eseries_volume['volumeRef'] = 'fake_volume_ref' self.mock_object(self.library, "_schedule_and_create_volume", mock.Mock(return_value=fake_dest_eseries_volume)) self.mock_object(self.library, "_get_snapshot", mock.Mock(return_value=copy.deepcopy( eseries_fake.SNAPSHOT_IMAGE))) self.mock_object(self.library, '_create_snapshot_volume', mock.Mock(return_value=copy.deepcopy( eseries_fake.SNAPSHOT_VOLUME))) self.mock_object(self.library, "_create_snapshot_volume", mock.Mock(return_value=copy.deepcopy( eseries_fake.VOLUME))) self.mock_object(self.library._client, "delete_snapshot_volume", mock.Mock(side_effect=exception.NetAppDriverException) ) self.mock_object(self.library._client, "delete_volume") self.library.create_volume_from_snapshot( get_fake_volume(), fake_snapshot.fake_snapshot_obj(None)) self.assertEqual( 1, self.library._schedule_and_create_volume.call_count) self.assertEqual( 1, self.library._client.delete_snapshot_volume.call_count) # Ensure the volume we created is not cleaned up self.assertEqual(0, self.library._client.delete_volume.call_count) def test_create_snapshot_volume_cgsnap(self): image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) self.mock_object(self.library, '_get_snapshot_group', mock.Mock( return_value=grp)) expected = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) self.mock_object(self.library, '_is_cgsnapshot', mock.Mock( return_value=True)) create_view = self.mock_object( self.library._client, 'create_cg_snapshot_view', mock.Mock(return_value=expected)) result = self.library._create_snapshot_volume(image) self.assertEqual(expected, result) create_view.assert_called_once_with(image['consistencyGroupId'], mock.ANY, image['id']) def test_create_snapshot_volume(self): image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) self.mock_object(self.library, '_get_snapshot_group', mock.Mock( return_value=grp)) expected = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) self.mock_object(self.library, '_is_cgsnapshot', mock.Mock( return_value=False)) create_view = self.mock_object( self.library._client, 'create_snapshot_volume', mock.Mock(return_value=expected)) result = self.library._create_snapshot_volume(image) self.assertEqual(expected, result) create_view.assert_called_once_with( image['pitRef'], mock.ANY, image['baseVol']) def test_create_snapshot_group(self): label = 'label' vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] get_call = self.mock_object( self.library, '_get_storage_pools', mock.Mock(return_value=None)) create_call = self.mock_object( self.library._client, 'create_snapshot_group', mock.Mock(return_value=snapshot_group)) actual = self.library._create_snapshot_group(label, vol) get_call.assert_not_called() create_call.assert_called_once_with(label, vol['id'], repo_percent=20) self.assertEqual(snapshot_group, actual) def test_create_snapshot_group_legacy_ddp(self): self.library._client.features.REST_1_3_RELEASE = False vol = copy.deepcopy(eseries_fake.VOLUME) pools = copy.deepcopy(eseries_fake.STORAGE_POOLS) pool = pools[-1] snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] vol['volumeGroupRef'] = pool['id'] pool['raidLevel'] = 'raidDiskPool' get_call = self.mock_object( self.library, '_get_storage_pools', mock.Mock(return_value=pools)) create_call = self.mock_object( self.library._client, 'create_snapshot_group', mock.Mock(return_value=snapshot_group)) actual = self.library._create_snapshot_group('label', vol) create_call.assert_called_with('label', vol['id'], vol['volumeGroupRef'], repo_percent=mock.ANY) get_call.assert_called_once_with() self.assertEqual(snapshot_group, actual) def test_create_snapshot_group_legacy_vg(self): self.library._client.features.REST_1_3_RELEASE = False vol = copy.deepcopy(eseries_fake.VOLUME) vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi pools = copy.deepcopy(eseries_fake.STORAGE_POOLS) pool = pools[0] pool['raidLevel'] = 'raid6' snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] vol['volumeGroupRef'] = pool['id'] get_call = self.mock_object( self.library, '_get_sorted_available_storage_pools', mock.Mock(return_value=pools)) self.mock_object(self.library._client, 'create_snapshot_group', mock.Mock(return_value=snapshot_group)) actual = self.library._create_snapshot_group('label', vol) get_call.assert_called_once_with(vol_size_gb) self.assertEqual(snapshot_group, actual) def test_get_snapshot(self): fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) get_snap = self.mock_object( self.library._client, 'list_snapshot_image', mock.Mock( return_value=snap)) result = self.library._get_snapshot(fake_snap) self.assertEqual(snap, result) get_snap.assert_called_once_with(fake_snap['provider_id']) def test_get_snapshot_fail(self): fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) get_snap = self.mock_object( self.library._client, 'list_snapshot_image', mock.Mock( side_effect=exception.NotFound)) self.assertRaises(exception.NotFound, self.library._get_snapshot, fake_snap) get_snap.assert_called_once_with(fake_snap['provider_id']) def test_get_snapshot_group_for_snapshot(self): fake_id = 'id' snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) get_snap = self.mock_object( self.library, '_get_snapshot', mock.Mock(return_value=snap)) get_grp = self.mock_object(self.library._client, 'list_snapshot_group', mock.Mock(return_value=grp)) result = self.library._get_snapshot_group_for_snapshot(fake_id) self.assertEqual(grp, result) get_grp.assert_called_once_with(snap['pitGroupRef']) get_snap.assert_called_once_with(fake_id) def test_get_snapshot_group_for_snapshot_fail(self): fake_id = 'id' snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) get_snap = self.mock_object( self.library, '_get_snapshot', mock.Mock(return_value=snap)) get_grp = self.mock_object(self.library._client, 'list_snapshot_group', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exception.NotFound, self.library._get_snapshot_group_for_snapshot, fake_id) get_grp.assert_called_once_with(snap['pitGroupRef']) get_snap.assert_called_once_with(fake_id) def test_get_snapshot_groups_for_volume(self): vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] # Generate some snapshot groups that will not match snapshot_groups = [copy.deepcopy(snapshot_group) for i in range( self.library.MAX_SNAPSHOT_GROUP_COUNT)] for i, group in enumerate(snapshot_groups): group['baseVolume'] = str(i) snapshot_groups.append(snapshot_group) get_call = self.mock_object( self.library._client, 'list_snapshot_groups', mock.Mock( return_value=snapshot_groups)) groups = self.library._get_snapshot_groups_for_volume(vol) get_call.assert_called_once_with() self.assertEqual([snapshot_group], groups) def test_get_available_snapshot_group(self): vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] snapshot_group['snapshotCount'] = 0 # Generate some snapshot groups that will not match reserved_group = copy.deepcopy(snapshot_group) reserved_group['label'] += self.library.SNAPSHOT_VOL_COPY_SUFFIX full_group = copy.deepcopy(snapshot_group) full_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT cgroup = copy.deepcopy(snapshot_group) cgroup['consistencyGroup'] = True snapshot_groups = [snapshot_group, reserved_group, full_group, cgroup] get_call = self.mock_object( self.library, '_get_snapshot_groups_for_volume', mock.Mock( return_value=snapshot_groups)) group = self.library._get_available_snapshot_group(vol) get_call.assert_called_once_with(vol) self.assertEqual(snapshot_group, group) def test_get_snapshot_groups_for_volume_not_found(self): vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snapshot_group['baseVolume'] = vol['id'] snapshot_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT # Generate some snapshot groups that will not match get_call = self.mock_object( self.library, '_get_snapshot_groups_for_volume', mock.Mock( return_value=[snapshot_group])) group = self.library._get_available_snapshot_group(vol) get_call.assert_called_once_with(vol) self.assertIsNone(group) def test_create_snapshot_available_snap_group(self): expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) expected = {'provider_id': expected_snap['id']} vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) fake_label = 'fakeName' self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) create_call = self.mock_object( self.library._client, 'create_snapshot_image', mock.Mock( return_value=expected_snap)) self.mock_object(self.library, '_get_available_snapshot_group', mock.Mock(return_value=snapshot_group)) self.mock_object(utils, 'convert_uuid_to_es_fmt', mock.Mock(return_value=fake_label)) fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) model_update = self.library.create_snapshot(fake_snapshot) self.assertEqual(expected, model_update) create_call.assert_called_once_with(snapshot_group['id']) @ddt.data(False, True) def test_create_snapshot_failure(self, cleanup_failure): """Validate the behavior for a failure during snapshot creation""" vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) fake_label = 'fakeName' create_fail_exc = exception.NetAppDriverException('fail_create') cleanup_fail_exc = exception.NetAppDriverException('volume_deletion') if cleanup_failure: exc_msg = cleanup_fail_exc.msg delete_snap_grp = self.mock_object( self.library, '_delete_snapshot_group', mock.Mock(side_effect=cleanup_fail_exc)) else: exc_msg = create_fail_exc.msg delete_snap_grp = self.mock_object( self.library, '_delete_snapshot_group') self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) self.mock_object(self.library._client, 'create_snapshot_image', mock.Mock( side_effect=create_fail_exc)) self.mock_object(self.library._client, 'create_snapshot_volume', mock.Mock(return_value=snap_vol)) self.mock_object(self.library, '_get_available_snapshot_group', mock.Mock(return_value=snapshot_group)) self.mock_object(utils, 'convert_uuid_to_es_fmt', mock.Mock(return_value=fake_label)) fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) self.assertRaisesRegexp(exception.NetAppDriverException, exc_msg, self.library.create_snapshot, fake_snapshot) self.assertTrue(delete_snap_grp.called) def test_create_snapshot_no_snap_group(self): self.library._client.features = mock.Mock() expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) fake_label = 'fakeName' self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) create_call = self.mock_object( self.library._client, 'create_snapshot_image', mock.Mock( return_value=expected_snap)) self.mock_object(self.library, '_get_snapshot_groups_for_volume', mock.Mock(return_value=[snapshot_group])) self.mock_object(self.library, '_get_available_snapshot_group', mock.Mock(return_value=None)) self.mock_object(utils, 'convert_uuid_to_es_fmt', mock.Mock(return_value=fake_label)) fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) snapshot = self.library.create_snapshot(fake_snapshot) expected = {'provider_id': expected_snap['id']} self.assertEqual(expected, snapshot) create_call.assert_called_once_with(snapshot_group['id']) def test_create_snapshot_no_snapshot_groups_remaining(self): """Test the failure condition where all snap groups are allocated""" self.library._client.features = mock.Mock() expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) vol = copy.deepcopy(eseries_fake.VOLUME) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) grp_count = (self.library.MAX_SNAPSHOT_GROUP_COUNT - self.library.RESERVED_SNAPSHOT_GROUP_COUNT) fake_label = 'fakeName' self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) self.mock_object(self.library._client, 'create_snapshot_image', mock.Mock(return_value=expected_snap)) self.mock_object(self.library._client, 'create_snapshot_volume', mock.Mock(return_value=snap_vol)) self.mock_object(self.library, '_get_available_snapshot_group', mock.Mock(return_value=None)) self.mock_object(self.library, '_get_snapshot_groups_for_volume', mock.Mock(return_value=[snapshot_group] * grp_count)) self.mock_object(utils, 'convert_uuid_to_es_fmt', mock.Mock(return_value=fake_label)) fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) # Error message should contain the maximum number of supported # snapshots self.assertRaisesRegexp(exception.SnapshotLimitExceeded, str(self.library.MAX_SNAPSHOT_COUNT * grp_count), self.library.create_snapshot, fake_snapshot) def test_delete_snapshot(self): fake_vol = cinder_utils.create_volume(self.ctxt) fake_snap = cinder_utils.create_snapshot(self.ctxt, fake_vol['id']) snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) vol = copy.deepcopy(eseries_fake.VOLUME) self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) self.mock_object(self.library, '_get_snapshot', mock.Mock( return_value=snap)) del_snap = self.mock_object(self.library, '_delete_es_snapshot', mock.Mock()) self.library.delete_snapshot(fake_snap) del_snap.assert_called_once_with(snap) def test_delete_es_snapshot(self): vol = copy.deepcopy(eseries_fake.VOLUME) snap_count = 30 # Ensure that it's the oldest PIT snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) fake_volume_refs = ['1', '2', snap['baseVol']] fake_snapshot_group_refs = ['3', '4', snapshot_group['id']] snapshots = [copy.deepcopy(snap) for i in range(snap_count)] bitset = na_utils.BitSet(0) for i, snapshot in enumerate(snapshots): volume_ref = fake_volume_refs[i % len(fake_volume_refs)] group_ref = fake_snapshot_group_refs[i % len(fake_snapshot_group_refs)] snapshot['pitGroupRef'] = group_ref snapshot['baseVol'] = volume_ref snapshot['pitSequenceNumber'] = str(i) snapshot['id'] = i bitset.set(i) snapshots.append(snap) filtered_snaps = list(filter(lambda x: x['pitGroupRef'] == snap[ 'pitGroupRef'], snapshots)) self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) self.mock_object(self.library, '_get_snapshot', mock.Mock( return_value=snap)) self.mock_object(self.library, '_get_soft_delete_map', mock.Mock( return_value={snap['pitGroupRef']: repr(bitset)})) self.mock_object(self.library._client, 'list_snapshot_images', mock.Mock(return_value=snapshots)) delete_image = self.mock_object( self.library, '_cleanup_snapshot_images', mock.Mock(return_value=({snap['pitGroupRef']: repr(bitset)}, None))) self.library._delete_es_snapshot(snap) delete_image.assert_called_once_with(filtered_snaps, bitset) def test_delete_snapshot_oldest(self): vol = copy.deepcopy(eseries_fake.VOLUME) snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) snapshots = [snap] self.mock_object(self.library, '_get_volume', mock.Mock( return_value=vol)) self.mock_object(self.library, '_get_snapshot', mock.Mock( return_value=snap)) self.mock_object(self.library, '_get_soft_delete_map', mock.Mock( return_value={})) self.mock_object(self.library._client, 'list_snapshot_images', mock.Mock(return_value=snapshots)) delete_image = self.mock_object( self.library, '_cleanup_snapshot_images', mock.Mock(return_value=(None, [snap['pitGroupRef']]))) self.library._delete_es_snapshot(snap) delete_image.assert_called_once_with(snapshots, na_utils.BitSet(1)) def test_get_soft_delete_map(self): fake_val = 'fake' self.mock_object(self.library._client, 'list_backend_store', mock.Mock( return_value=fake_val)) actual = self.library._get_soft_delete_map() self.assertEqual(fake_val, actual) def test_cleanup_snapshot_images_delete_all(self): image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) images = [image] * 32 bitset = na_utils.BitSet() for i, image in enumerate(images): image['pitSequenceNumber'] = i bitset.set(i) delete_grp = self.mock_object(self.library._client, 'delete_snapshot_group') updt, keys = self.library._cleanup_snapshot_images( images, bitset) delete_grp.assert_called_once_with(image['pitGroupRef']) self.assertIsNone(updt) self.assertEqual([image['pitGroupRef']], keys) def test_cleanup_snapshot_images_delete_all_fail(self): image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) bitset = na_utils.BitSet(2 ** 32 - 1) delete_grp = self.mock_object( self.library._client, 'delete_snapshot_group', mock.Mock(side_effect=exception.NetAppDriverException)) updt, keys = self.library._cleanup_snapshot_images( [image], bitset) delete_grp.assert_called_once_with(image['pitGroupRef']) self.assertIsNone(updt) self.assertEqual([image['pitGroupRef']], keys) def test_cleanup_snapshot_images(self): image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) images = [image] * 32 del_count = 16 bitset = na_utils.BitSet() for i, image in enumerate(images): image['pitSequenceNumber'] = i if i < del_count: bitset.set(i) exp_bitset = copy.deepcopy(bitset) exp_bitset >>= 16 delete_img = self.mock_object( self.library, '_delete_snapshot_image') updt, keys = self.library._cleanup_snapshot_images( images, bitset) self.assertEqual(del_count, delete_img.call_count) self.assertIsNone(keys) self.assertEqual({image['pitGroupRef']: exp_bitset}, updt) def test_delete_snapshot_image(self): snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) self.mock_object(self.library._client, 'list_snapshot_group', mock.Mock(return_value=snap_group)) self.library._delete_snapshot_image(snap) def test_delete_snapshot_image_fail_cleanup(self): snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) snap_group['snapshotCount'] = 0 snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) self.mock_object(self.library._client, 'list_snapshot_group', mock.Mock(return_value=snap_group)) self.library._delete_snapshot_image(snap) def test_delete_snapshot_not_found(self): fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) get_snap = self.mock_object(self.library, '_get_snapshot', mock.Mock(side_effect=exception.NotFound)) with mock.patch.object(library, 'LOG', mock.Mock()): self.library.delete_snapshot(fake_snapshot) get_snap.assert_called_once_with(fake_snapshot) self.assertTrue(library.LOG.warning.called) @ddt.data(['key1', 'key2'], [], None) def test_merge_soft_delete_changes_keys(self, keys_to_del): count = len(keys_to_del) if keys_to_del is not None else 0 save_store = self.mock_object( self.library._client, 'save_backend_store') index = {'key1': 'val'} get_store = self.mock_object(self.library, '_get_soft_delete_map', mock.Mock(return_value=index)) self.library._merge_soft_delete_changes(None, keys_to_del) if count: expected = copy.deepcopy(index) for key in keys_to_del: expected.pop(key, None) get_store.assert_called_once_with() save_store.assert_called_once_with( self.library.SNAPSHOT_PERSISTENT_STORE_KEY, expected) else: get_store.assert_not_called() save_store.assert_not_called() def test_create_cgsnapshot(self): fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) fake_vol = cinder_utils.create_volume(self.ctxt) fake_snapshots = [cinder_utils.create_snapshot(self.ctxt, fake_vol['id'])] vol = copy.deepcopy(eseries_fake.VOLUME) image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) image['baseVol'] = vol['id'] cg_snaps = [image] cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) for snap in cg_snaps: snap['baseVol'] = vol['id'] get_cg = self.mock_object( self.library, '_get_consistencygroup_by_name', mock.Mock(return_value=cg)) get_vol = self.mock_object( self.library, '_get_volume', mock.Mock(return_value=vol)) mk_snap = self.mock_object( self.library._client, 'create_consistency_group_snapshot', mock.Mock(return_value=cg_snaps)) model_update, snap_updt = self.library.create_cgsnapshot( fake_cgsnapshot, fake_snapshots) self.assertIsNone(model_update) for snap in cg_snaps: self.assertIn({'id': fake_snapshots[0]['id'], 'provider_id': snap['id'], 'status': 'available'}, snap_updt) self.assertEqual(len(cg_snaps), len(snap_updt)) get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( fake_cgsnapshot['consistencygroup_id'])) self.assertEqual(get_vol.call_count, len(fake_snapshots)) mk_snap.assert_called_once_with(cg['id']) def test_create_cgsnapshot_cg_fail(self): fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) fake_snapshots = [copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)] self.mock_object( self.library, '_get_consistencygroup_by_name', mock.Mock(side_effect=exception.NetAppDriverException)) self.assertRaises( exception.NetAppDriverException, self.library.create_cgsnapshot, fake_cgsnapshot, fake_snapshots) def test_delete_cgsnapshot(self): """Test the deletion of a cgsnapshot when a soft delete is required""" fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) fake_vol = cinder_utils.create_volume(self.ctxt) fake_snapshots = [cinder_utils.create_snapshot( self.ctxt, fake_vol['id'])] cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) # Ensure that the snapshot to be deleted is not the oldest cg_snap['pitSequenceNumber'] = str(max(cg['uniqueSequenceNumber'])) cg_snaps = [cg_snap] for snap in fake_snapshots: snap['provider_id'] = cg_snap['id'] vol = copy.deepcopy(eseries_fake.VOLUME) for snap in cg_snaps: snap['baseVol'] = vol['id'] get_cg = self.mock_object( self.library, '_get_consistencygroup_by_name', mock.Mock(return_value=cg)) self.mock_object( self.library._client, 'delete_consistency_group_snapshot') self.mock_object( self.library._client, 'get_consistency_group_snapshots', mock.Mock(return_value=cg_snaps)) soft_del = self.mock_object( self.library, '_soft_delete_cgsnapshot', mock.Mock(return_value=(None, None))) # Mock the locking mechanism model_update, snap_updt = self.library.delete_cgsnapshot( fake_cgsnapshot, fake_snapshots) self.assertIsNone(model_update) self.assertIsNone(snap_updt) get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( fake_cgsnapshot['consistencygroup_id'])) soft_del.assert_called_once_with( cg, cg_snap['pitSequenceNumber']) @ddt.data(True, False) def test_soft_delete_cgsnapshot(self, bitset_exists): """Test the soft deletion of a cgsnapshot""" cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) seq_num = 10 cg_snap['pitSequenceNumber'] = seq_num cg_snaps = [cg_snap] self.mock_object( self.library._client, 'delete_consistency_group_snapshot') self.mock_object( self.library._client, 'get_consistency_group_snapshots', mock.Mock(return_value=cg_snaps)) bitset = na_utils.BitSet(1) index = {cg['id']: repr(bitset)} if bitset_exists else {} bitset >>= len(cg_snaps) updt = {cg['id']: repr(bitset)} self.mock_object(self.library, '_get_soft_delete_map', mock.Mock( return_value=index)) save_map = self.mock_object( self.library, '_merge_soft_delete_changes') model_update, snap_updt = self.library._soft_delete_cgsnapshot( cg, seq_num) self.assertIsNone(model_update) self.assertIsNone(snap_updt) save_map.assert_called_once_with(updt, None) def test_delete_cgsnapshot_single(self): """Test the backend deletion of the oldest cgsnapshot""" fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) fake_vol = cinder_utils.create_volume(self.ctxt) fake_snapshots = [cinder_utils.create_snapshot(self.ctxt, fake_vol['id'])] cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) cg_snaps = [cg_snap] for snap in fake_snapshots: snap['provider_id'] = cg_snap['id'] cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) cg['uniqueSequenceNumber'] = [cg_snap['pitSequenceNumber']] vol = copy.deepcopy(eseries_fake.VOLUME) for snap in cg_snaps: snap['baseVol'] = vol['id'] get_cg = self.mock_object( self.library, '_get_consistencygroup_by_name', mock.Mock(return_value=cg)) del_snap = self.mock_object( self.library._client, 'delete_consistency_group_snapshot', mock.Mock(return_value=cg_snaps)) model_update, snap_updt = self.library.delete_cgsnapshot( fake_cgsnapshot, fake_snapshots) self.assertIsNone(model_update) self.assertIsNone(snap_updt) get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( fake_cgsnapshot['consistencygroup_id'])) del_snap.assert_called_once_with(cg['id'], cg_snap[ 'pitSequenceNumber']) def test_delete_cgsnapshot_snap_not_found(self): fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) fake_vol = cinder_utils.create_volume(self.ctxt) fake_snapshots = [cinder_utils.create_snapshot( self.ctxt, fake_vol['id'])] cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) cg_snaps = [cg_snap] cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) self.mock_object(self.library, '_get_consistencygroup_by_name', mock.Mock(return_value=cg)) self.mock_object( self.library._client, 'delete_consistency_group_snapshot', mock.Mock(return_value=cg_snaps)) self.assertRaises( exception.CgSnapshotNotFound, self.library.delete_cgsnapshot, fake_cgsnapshot, fake_snapshots) @ddt.data(0, 1, 10, 32) def test_cleanup_cg_snapshots(self, count): # Set the soft delete bit for 'count' snapshot images bitset = na_utils.BitSet() for i in range(count): bitset.set(i) cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) # Define 32 snapshots for the CG cg['uniqueSequenceNumber'] = list(range(32)) cg_id = cg['id'] del_snap = self.mock_object( self.library._client, 'delete_consistency_group_snapshot') expected_bitset = copy.deepcopy(bitset) >> count expected_updt = {cg_id: repr(expected_bitset)} updt = self.library._cleanup_cg_snapshots( cg_id, cg['uniqueSequenceNumber'], bitset) self.assertEqual(count, del_snap.call_count) self.assertEqual(expected_updt, updt) @ddt.data(False, True) def test_get_pool_operation_progress(self, expect_complete): """Validate the operation progress is interpreted correctly""" pool = copy.deepcopy(eseries_fake.STORAGE_POOL) if expect_complete: pool_progress = [] else: pool_progress = copy.deepcopy( eseries_fake.FAKE_POOL_ACTION_PROGRESS) expected_actions = set(action['currentAction'] for action in pool_progress) expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'], pool_progress, 0) self.library._client.get_pool_operation_progress = mock.Mock( return_value=pool_progress) complete, actions, eta = self.library._get_pool_operation_progress( pool['id']) self.assertEqual(expect_complete, complete) self.assertEqual(expected_actions, actions) self.assertEqual(expected_eta, eta) @ddt.data(False, True) def test_get_pool_operation_progress_with_action(self, expect_complete): """Validate the operation progress is interpreted correctly""" expected_action = 'fakeAction' pool = copy.deepcopy(eseries_fake.STORAGE_POOL) if expect_complete: pool_progress = copy.deepcopy( eseries_fake.FAKE_POOL_ACTION_PROGRESS) for progress in pool_progress: progress['currentAction'] = 'none' else: pool_progress = copy.deepcopy( eseries_fake.FAKE_POOL_ACTION_PROGRESS) pool_progress[0]['currentAction'] = expected_action expected_actions = set(action['currentAction'] for action in pool_progress) expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'], pool_progress, 0) self.library._client.get_pool_operation_progress = mock.Mock( return_value=pool_progress) complete, actions, eta = self.library._get_pool_operation_progress( pool['id'], expected_action) self.assertEqual(expect_complete, complete) self.assertEqual(expected_actions, actions) self.assertEqual(expected_eta, eta) @mock.patch('eventlet.greenthread.sleep') def test_extend_volume(self, _mock_sleep): """Test volume extend with a thick-provisioned volume""" def get_copy_progress(): for eta in range(5, -1, -1): action_status = 'none' if eta == 0 else 'remappingDve' complete = action_status == 'none' yield complete, action_status, eta fake_volume = copy.deepcopy(get_fake_volume()) volume = copy.deepcopy(eseries_fake.VOLUME) new_capacity = 10 volume['objectType'] = 'volume' self.library._client.expand_volume = mock.Mock() self.library._get_pool_operation_progress = mock.Mock( side_effect=get_copy_progress()) self.library._get_volume = mock.Mock(return_value=volume) self.library.extend_volume(fake_volume, new_capacity) # Ensure that the extend method waits until the expansion is completed self.assertEqual(6, self.library._get_pool_operation_progress.call_count ) self.library._client.expand_volume.assert_called_with(volume['id'], new_capacity, False) def test_extend_volume_thin(self): """Test volume extend with a thin-provisioned volume""" fake_volume = copy.deepcopy(get_fake_volume()) volume = copy.deepcopy(eseries_fake.VOLUME) new_capacity = 10 volume['objectType'] = 'thinVolume' self.library._client.expand_volume = mock.Mock(return_value=volume) self.library._get_volume_operation_progress = mock.Mock() self.library._get_volume = mock.Mock(return_value=volume) self.library.extend_volume(fake_volume, new_capacity) self.assertFalse(self.library._get_volume_operation_progress.called) self.library._client.expand_volume.assert_called_with(volume['id'], new_capacity, True) def test_delete_non_existing_volume(self): volume2 = get_fake_volume() # Change to a nonexistent id. volume2['name_id'] = '88888888-4444-4444-4444-cccccccccccc' self.assertIsNone(self.library.delete_volume(volume2)) def test_map_volume_to_host_volume_not_mapped(self): """Map the volume directly to destination host.""" self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.library.map_volume_to_host(get_fake_volume(), eseries_fake.VOLUME, eseries_fake.INITIATOR_NAME_2) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_map_volume_to_host_volume_not_mapped_host_does_not_exist(self): """Should create the host map directly to the host.""" self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock( return_value=eseries_fake.HOST_2)) self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.library.map_volume_to_host(get_fake_volume(), eseries_fake.VOLUME, eseries_fake.INITIATOR_NAME_2) self.assertTrue(self.library._client.create_host_with_ports.called) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_map_volume_to_host_volume_already_mapped(self): """Should be a no-op.""" self.mock_object(host_mapper, 'map_volume_to_multiple_hosts', mock.Mock( return_value=eseries_fake.VOLUME_MAPPING)) self.library.map_volume_to_host(get_fake_volume(), eseries_fake.VOLUME, eseries_fake.INITIATOR_NAME) self.assertTrue(host_mapper.map_volume_to_multiple_hosts.called) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/__init__.py0000664000567000056710000000000012701406250027520 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_fc_driver.py0000664000567000056710000000253212701406250030777 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade # Copyright (c) 2015 Yogesh Kshirsagar # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import test from cinder.tests.unit.volume.drivers.netapp.eseries import test_driver import cinder.volume.drivers.netapp.eseries.fc_driver as fc from cinder.volume.drivers.netapp import utils as na_utils class NetAppESeriesFibreChannelDriverTestCase(test_driver .NetAppESeriesDriverTestCase, test.TestCase): PROTOCOL = 'fc' @mock.patch.object(na_utils, 'validate_instantiation') def test_instantiation(self, mock_validate_instantiation): fc.NetAppEseriesFibreChannelDriver(configuration=mock.Mock()) self.assertTrue(mock_validate_instantiation.called) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_utils.py0000664000567000056710000000245012701406250030173 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp E-series driver utility module """ import six from cinder import test from cinder.volume.drivers.netapp.eseries import utils class NetAppEseriesDriverUtilsTestCase(test.TestCase): def test_convert_uuid_to_es_fmt(self): value = 'e67e931a-b2ed-4890-938b-3acc6a517fac' result = utils.convert_uuid_to_es_fmt(value) self.assertEqual('4Z7JGGVS5VEJBE4LHLGGUUL7VQ', result) def test_convert_es_fmt_to_uuid(self): value = '4Z7JGGVS5VEJBE4LHLGGUUL7VQ' result = six.text_type(utils.convert_es_fmt_to_uuid(value)) self.assertEqual('e67e931a-b2ed-4890-938b-3acc6a517fac', result) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_host_mapper.py0000664000567000056710000007627212701406250031371 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp E-series iscsi driver.""" import copy import mock import six from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.eseries \ import fakes as eseries_fakes from cinder.volume.drivers.netapp.eseries import host_mapper from cinder.volume.drivers.netapp.eseries import utils def get_fake_volume(): return { 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'provider_auth': 'provider a b', 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'migration_status': None, 'attach_status': "detached", "status": "available" } FAKE_MAPPINGS = [{u'lun': 1}] FAKE_USED_UP_MAPPINGS = [{u'lun': n} for n in range(256)] FAKE_USED_UP_LUN_ID_DICT = {n: 1 for n in range(256)} FAKE_UNUSED_LUN_ID = set([]) FAKE_USED_LUN_ID_DICT = ({0: 1, 1: 1}) FAKE_USED_LUN_IDS = [1, 2] FAKE_SINGLE_USED_LUN_ID = 1 FAKE_USED_UP_LUN_IDS = range(256) class NetAppEseriesHostMapperTestCase(test.TestCase): def setUp(self): super(NetAppEseriesHostMapperTestCase, self).setUp() self.client = eseries_fakes.FakeEseriesClient() def test_unmap_volume_from_host_volume_mapped_to_host(self): fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume['listOfMappings'] = [ eseries_fakes.VOLUME_MAPPING ] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(self.client, 'delete_volume_mapping') host_mapper.unmap_volume_from_host(self.client, get_fake_volume(), eseries_fakes.HOST, eseries_fakes.VOLUME_MAPPING) self.assertTrue(self.client.delete_volume_mapping.called) def test_unmap_volume_from_host_volume_mapped_to_different_host(self): fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) # Mapped to host 1 fake_eseries_volume['listOfMappings'] = [ eseries_fakes.VOLUME_MAPPING ] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(self.client, 'delete_volume_mapping') self.mock_object(self.client, 'get_host_group', mock.Mock( side_effect=exception.NotFound)) err = self.assertRaises(exception.NetAppDriverException, host_mapper.unmap_volume_from_host, self.client, get_fake_volume(), eseries_fakes.HOST_2, eseries_fakes.VOLUME_MAPPING) self.assertIn("not currently mapped to host", six.text_type(err)) def test_unmap_volume_from_host_volume_mapped_to_host_group_but_not_host( self): """Test volume mapped to host not in specified host group. Ensure an error is raised if the specified host is not in the host group the volume is mapped to. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ 'clusterRef'] fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_host = copy.deepcopy(eseries_fakes.HOST) fake_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'list_hosts', mock.Mock(return_value=[fake_host])) err = self.assertRaises(exception.NetAppDriverException, host_mapper.unmap_volume_from_host, self.client, get_fake_volume(), fake_host, fake_volume_mapping) self.assertIn("not currently mapped to host", six.text_type(err)) def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group( self): fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ 'clusterRef'] fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'delete_volume_mapping') self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_volume = get_fake_volume() fake_volume['status'] = 'detaching' host_mapper.unmap_volume_from_host(self.client, fake_volume, eseries_fakes.HOST, fake_volume_mapping) self.assertTrue(self.client.delete_volume_mapping.called) def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa self): fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ 'clusterRef'] fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'delete_volume_mapping') self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_volume = get_fake_volume() fake_volume['status'] = 'in-use' host_mapper.unmap_volume_from_host(self.client, fake_volume, eseries_fakes.HOST, fake_volume_mapping) self.assertFalse(self.client.delete_volume_mapping.called) def test_unmap_volume_from_host_volume_mapped_to_outside_host_group(self): """Test volume mapped to host group without host. Ensure we raise error when we find a volume is mapped to an unknown host group that does not have the host. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_ref = "8500000060080E500023C7340036035F515B78FD" fake_volume_mapping['mapRef'] = fake_ref fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_host = copy.deepcopy(eseries_fakes.HOST) fake_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.client, 'get_host_group', mock.Mock(return_value= eseries_fakes.FOREIGN_HOST_GROUP)) err = self.assertRaises(exception.NetAppDriverException, host_mapper.unmap_volume_from_host, self.client, get_fake_volume(), eseries_fakes.HOST, fake_volume_mapping) self.assertIn("unsupported host group", six.text_type(err)) def test_unmap_volume_from_host_volume_mapped_to_outside_host_group_w_host( self): """Test volume mapped to host in unknown host group. Ensure we raise error when we find a volume is mapped to an unknown host group that has the host. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_ref = "8500000060080E500023C7340036035F515B78FD" fake_volume_mapping['mapRef'] = fake_ref fake_eseries_volume['clusterRef'] = fake_ref fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_host = copy.deepcopy(eseries_fakes.HOST) fake_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.client, 'get_host_group', mock.Mock(return_value= eseries_fakes.FOREIGN_HOST_GROUP)) err = self.assertRaises(exception.NetAppDriverException, host_mapper.unmap_volume_from_host, self.client, get_fake_volume(), eseries_fakes.HOST, fake_volume_mapping) self.assertIn("unsupported host group", six.text_type(err)) def test_map_volume_to_single_host_volume_not_mapped(self): self.mock_object(self.client, 'create_volume_mapping', mock.Mock( return_value=eseries_fakes.VOLUME_MAPPING)) host_mapper.map_volume_to_single_host(self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, None, False) self.assertTrue(self.client.create_volume_mapping.called) def test_map_volume_to_single_host_volume_already_mapped_to_target_host( self): """Should be a no-op""" self.mock_object(self.client, 'create_volume_mapping', mock.Mock()) host_mapper.map_volume_to_single_host(self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, eseries_fakes.VOLUME_MAPPING, False) self.assertFalse(self.client.create_volume_mapping.called) def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group( self): """Test map volume to a single host. Should move mapping to target host if volume is not migrating or attached(in-use). If volume is not in use then it should not require a mapping making it ok to sever the mapping to the host group. """ fake_mapping_to_other_host = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = \ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] self.mock_object(self.client, 'move_volume_mapping_via_symbol', mock.Mock(return_value={'lun': 5})) host_mapper.map_volume_to_single_host(self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, fake_mapping_to_other_host, False) self.assertTrue(self.client.move_volume_mapping_via_symbol.called) def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa self): """Should raise error saying multiattach not enabled""" fake_mapping_to_other_host = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = \ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] fake_volume = get_fake_volume() fake_volume['attach_status'] = "attached" err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_single_host, self.client, fake_volume, eseries_fakes.VOLUME, eseries_fakes.HOST, fake_mapping_to_other_host, False) self.assertIn('multiattach is disabled', six.text_type(err)) def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_attached( # noqa self): """Should raise error saying multiattach not enabled""" fake_mapping_to_other_host = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = \ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] fake_volume = get_fake_volume() fake_volume['attach_status'] = "attached" err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_single_host, self.client, fake_volume, eseries_fakes.VOLUME, eseries_fakes.HOST, fake_mapping_to_other_host, False) self.assertIn('multiattach is disabled', six.text_type(err)) def test_map_volume_to_single_host_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" fake_mapping_to_other_host = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fakes.HOST_2[ 'hostRef'] err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_single_host, self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, fake_mapping_to_other_host, False) self.assertIn('multiattach is disabled', six.text_type(err)) def test_map_volume_to_multiple_hosts_volume_already_mapped_to_target_host( self): """Should be a no-op.""" self.mock_object(self.client, 'create_volume_mapping', mock.Mock()) host_mapper.map_volume_to_multiple_hosts(self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, eseries_fakes.VOLUME_MAPPING) self.assertFalse(self.client.create_volume_mapping.called) def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group( # noqa self): """Should ensure target host is in the multiattach host group.""" fake_host = copy.deepcopy(eseries_fakes.HOST_2) fake_host['clusterRef'] = utils.NULL_REF fake_mapping_to_host_group = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_host_group['mapRef'] = \ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] self.mock_object(self.client, 'set_host_group_for_host') self.mock_object(self.client, 'get_host_group', mock.Mock( return_value=eseries_fakes.MULTIATTACH_HOST_GROUP) ) host_mapper.map_volume_to_multiple_hosts(self.client, get_fake_volume(), eseries_fakes.VOLUME, fake_host, fake_mapping_to_host_group) self.assertEqual( 1, self.client.set_host_group_for_host.call_count) def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group_with_lun_collision( # noqa self): """Should ensure target host is in the multiattach host group.""" fake_host = copy.deepcopy(eseries_fakes.HOST_2) fake_host['clusterRef'] = utils.NULL_REF fake_mapping_to_host_group = copy.deepcopy( eseries_fakes.VOLUME_MAPPING) fake_mapping_to_host_group['mapRef'] = \ eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] self.mock_object(self.client, 'set_host_group_for_host', mock.Mock(side_effect=exception.NetAppDriverException) ) self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, fake_host, fake_mapping_to_host_group) def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host(self): """Test that mapping moves to another host group. Should ensure both existing host and destination host are in multiattach host group and move the mapping to the host group. """ existing_host = copy.deepcopy(eseries_fakes.HOST) existing_host['clusterRef'] = utils.NULL_REF target_host = copy.deepcopy(eseries_fakes.HOST_2) target_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'get_host', mock.Mock(return_value=existing_host)) self.mock_object(self.client, 'set_host_group_for_host') self.mock_object(self.client, 'get_host_group', mock.Mock(side_effect=exception.NotFound)) mock_move_mapping = mock.Mock( return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) self.mock_object(self.client, 'move_volume_mapping_via_symbol', mock_move_mapping) host_mapper.map_volume_to_multiple_hosts(self.client, get_fake_volume(), eseries_fakes.VOLUME, target_host, eseries_fakes.VOLUME_MAPPING) self.assertEqual( 2, self.client.set_host_group_for_host.call_count) self.assertTrue(self.client.move_volume_mapping_via_symbol .called) def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_source_host( # noqa self): """Test moving source host to multiattach host group. Should fail attempting to move source host to multiattach host group and raise an error. """ existing_host = copy.deepcopy(eseries_fakes.HOST) existing_host['clusterRef'] = utils.NULL_REF target_host = copy.deepcopy(eseries_fakes.HOST_2) target_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'get_host', mock.Mock(return_value=existing_host)) self.mock_object(self.client, 'set_host_group_for_host', mock.Mock(side_effect=[ None, exception.NetAppDriverException ])) self.mock_object(self.client, 'get_host_group', mock.Mock(side_effect=exception.NotFound)) mock_move_mapping = mock.Mock( return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) self.mock_object(self.client, 'move_volume_mapping_via_symbol', mock_move_mapping) self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, target_host, eseries_fakes.VOLUME_MAPPING) def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_dest_host( # noqa self): """Test moving destination host to multiattach host group. Should fail attempting to move destination host to multiattach host group and raise an error. """ existing_host = copy.deepcopy(eseries_fakes.HOST) existing_host['clusterRef'] = utils.NULL_REF target_host = copy.deepcopy(eseries_fakes.HOST_2) target_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'get_host', mock.Mock(return_value=existing_host)) self.mock_object(self.client, 'set_host_group_for_host', mock.Mock(side_effect=[ exception.NetAppDriverException, None ])) self.mock_object(self.client, 'get_host_group', mock.Mock(side_effect=exception.NotFound)) mock_move_mapping = mock.Mock( return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) self.mock_object(self.client, 'move_volume_mapping_via_symbol', mock_move_mapping) self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, target_host, eseries_fakes.VOLUME_MAPPING) def test_map_volume_to_multiple_hosts_volume_mapped_to_foreign_host_group( self): """Test a target when the host is in a foreign host group. Should raise an error stating the volume is mapped to an unsupported host group. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_ref = "8500000060080E500023C7340036035F515B78FD" fake_volume_mapping['mapRef'] = fake_ref self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) fake_host = copy.deepcopy(eseries_fakes.HOST) fake_host['clusterRef'] = utils.NULL_REF self.mock_object(self.client, 'get_host_group', mock.Mock(return_value= eseries_fakes.FOREIGN_HOST_GROUP)) err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, fake_host, fake_volume_mapping) self.assertIn("unsupported host group", six.text_type(err)) def test_map_volume_to_multiple_hosts_volume_mapped_to_host_in_foreign_host_group( # noqa self): """Test a target when the host is in a foreign host group. Should raise an error stating the volume is mapped to a host that is in an unsupported host group. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_host = copy.deepcopy(eseries_fakes.HOST_2) fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[ 'clusterRef'] fake_volume_mapping['mapRef'] = fake_host['hostRef'] fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(self.client, 'get_host', mock.Mock(return_value=fake_host)) self.mock_object(self.client, 'get_host_group', mock.Mock(side_effect=[ eseries_fakes.FOREIGN_HOST_GROUP])) err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, eseries_fakes.HOST, fake_volume_mapping) self.assertIn("unsupported host group", six.text_type(err)) def test_map_volume_to_multiple_hosts_volume_target_host_in_foreign_host_group( # noqa self): """Test a target when the host is in a foreign host group. Should raise an error stating the target host is in an unsupported host group. """ fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) fake_host = copy.deepcopy(eseries_fakes.HOST_2) fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[ 'clusterRef'] self.mock_object(self.client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(self.client, 'get_host', mock.Mock(return_value=eseries_fakes.HOST)) self.mock_object(self.client, 'get_host_group', mock.Mock(side_effect=[ eseries_fakes.FOREIGN_HOST_GROUP])) err = self.assertRaises(exception.NetAppDriverException, host_mapper.map_volume_to_multiple_hosts, self.client, get_fake_volume(), eseries_fakes.VOLUME, fake_host, fake_volume_mapping) self.assertIn("unsupported host group", six.text_type(err)) def test_get_unused_lun_ids(self): unused_lun_ids = host_mapper._get_unused_lun_ids(FAKE_MAPPINGS) self.assertEqual(set(range(2, 256)), unused_lun_ids) def test_get_unused_lun_id_counter(self): used_lun_id_count = host_mapper._get_used_lun_id_counter( FAKE_MAPPINGS) self.assertEqual(FAKE_USED_LUN_ID_DICT, used_lun_id_count) def test_get_unused_lun_ids_used_up_luns(self): unused_lun_ids = host_mapper._get_unused_lun_ids( FAKE_USED_UP_MAPPINGS) self.assertEqual(FAKE_UNUSED_LUN_ID, unused_lun_ids) def test_get_lun_id_counter_used_up_luns(self): used_lun_ids = host_mapper._get_used_lun_id_counter( FAKE_USED_UP_MAPPINGS) self.assertEqual(FAKE_USED_UP_LUN_ID_DICT, used_lun_ids) def test_host_not_full(self): fake_host = copy.deepcopy(eseries_fakes.HOST) self.assertFalse(host_mapper._is_host_full(self.client, fake_host)) def test_host_full(self): fake_host = copy.deepcopy(eseries_fakes.HOST) self.mock_object(self.client, 'get_volume_mappings_for_host', mock.Mock(return_value=FAKE_USED_UP_MAPPINGS)) self.assertTrue(host_mapper._is_host_full(self.client, fake_host)) def test_get_free_lun(self): fake_host = copy.deepcopy(eseries_fakes.HOST) with mock.patch('random.sample') as mock_random: mock_random.return_value = [3] lun = host_mapper._get_free_lun(self.client, fake_host, False, []) self.assertEqual(3, lun) def test_get_free_lun_host_full(self): fake_host = copy.deepcopy(eseries_fakes.HOST) self.mock_object(host_mapper, '_is_host_full', mock.Mock(return_value=True)) self.assertRaises( exception.NetAppDriverException, host_mapper._get_free_lun, self.client, fake_host, False, FAKE_USED_UP_MAPPINGS) def test_get_free_lun_no_unused_luns(self): fake_host = copy.deepcopy(eseries_fakes.HOST) lun = host_mapper._get_free_lun(self.client, fake_host, False, FAKE_USED_UP_MAPPINGS) self.assertEqual(255, lun) def test_get_free_lun_no_unused_luns_host_not_full(self): fake_host = copy.deepcopy(eseries_fakes.HOST) self.mock_object(host_mapper, '_is_host_full', mock.Mock(return_value=False)) lun = host_mapper._get_free_lun(self.client, fake_host, False, FAKE_USED_UP_MAPPINGS) self.assertEqual(255, lun) def test_get_free_lun_no_lun_available(self): fake_host = copy.deepcopy(eseries_fakes.HOST_3) self.mock_object(self.client, 'get_volume_mappings_for_host', mock.Mock(return_value=FAKE_USED_UP_MAPPINGS)) self.assertRaises(exception.NetAppDriverException, host_mapper._get_free_lun, self.client, fake_host, False, FAKE_USED_UP_MAPPINGS) def test_get_free_lun_multiattach_enabled_no_unused_ids(self): fake_host = copy.deepcopy(eseries_fakes.HOST_3) self.mock_object(self.client, 'get_volume_mappings', mock.Mock(return_value=FAKE_USED_UP_MAPPINGS)) self.assertRaises(exception.NetAppDriverException, host_mapper._get_free_lun, self.client, fake_host, True, FAKE_USED_UP_MAPPINGS) def test_get_lun_by_mapping(self): used_luns = host_mapper._get_used_lun_ids_for_mappings(FAKE_MAPPINGS) self.assertEqual(set([0, 1]), used_luns) def test_get_lun_by_mapping_no_mapping(self): used_luns = host_mapper._get_used_lun_ids_for_mappings([]) self.assertEqual(set([0]), used_luns) def test_lun_id_available_on_host(self): fake_host = copy.deepcopy(eseries_fakes.HOST) self.assertTrue(host_mapper._is_lun_id_available_on_host( self.client, fake_host, FAKE_UNUSED_LUN_ID)) def test_no_lun_id_available_on_host(self): fake_host = copy.deepcopy(eseries_fakes.HOST_3) self.mock_object(self.client, 'get_volume_mappings_for_host', mock.Mock(return_value=FAKE_USED_UP_MAPPINGS)) self.assertFalse(host_mapper._is_lun_id_available_on_host( self.client, fake_host, FAKE_SINGLE_USED_LUN_ID)) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_iscsi_driver.py0000664000567000056710000000241412701406250031520 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All rights reserved. # Copyright (c) 2015 Michael Price. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from cinder import test from cinder.tests.unit.volume.drivers.netapp.eseries import test_driver from cinder.volume.drivers.netapp.eseries import iscsi_driver as iscsi import cinder.volume.drivers.netapp.utils as na_utils @ddt.ddt class NetAppESeriesIscsiDriverTestCase(test_driver.NetAppESeriesDriverTestCase, test.TestCase): @mock.patch.object(na_utils, 'validate_instantiation') def test_instantiation(self, mock_validate_instantiation): iscsi.NetAppEseriesISCSIDriver(configuration=mock.Mock()) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py0000664000567000056710000014014012701406250030310 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade # Copyright (c) 2015 Yogesh Kshirsagar # Copyright (c) 2015 Michael Price # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import json import mock from simplejson import scanner from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ eseries_fake from cinder.volume.drivers.netapp.eseries import exception as es_exception from cinder.volume.drivers.netapp.eseries import client from cinder.volume.drivers.netapp import utils as na_utils @ddt.ddt class NetAppEseriesClientDriverTestCase(test.TestCase): """Test case for NetApp e-series client.""" def setUp(self): super(NetAppEseriesClientDriverTestCase, self).setUp() self.mock_log = mock.Mock() self.mock_object(client, 'LOG', self.mock_log) self.fake_password = 'mysecret' self.my_client = client.RestClient('http', 'host', '80', '/test', 'user', self.fake_password, system_id='fake_sys_id') self.my_client._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP fake_response = mock.Mock() fake_response.status_code = 200 self.my_client.invoke_service = mock.Mock(return_value=fake_response) self.my_client.api_version = '01.52.9000.1' @ddt.data(200, 201, 203, 204) def test_eval_response_success(self, status_code): fake_resp = mock.Mock() fake_resp.status_code = status_code self.assertIsNone(self.my_client._eval_response(fake_resp)) @ddt.data(300, 400, 404, 500) def test_eval_response_failure(self, status_code): fake_resp = mock.Mock() fake_resp.status_code = status_code expected_msg = "Response error code - %s." % status_code with self.assertRaisesRegex(es_exception.WebServiceException, expected_msg) as exc: self.my_client._eval_response(fake_resp) self.assertEqual(status_code, exc.status_code) @ddt.data(('30', 'storage array password.*?incorrect'), ('authFailPassword', 'storage array password.*?incorrect'), ('unknown', None)) @ddt.unpack def test_eval_response_422(self, ret_code, exc_regex): status_code = 422 fake_resp = mock.Mock() fake_resp.text = "fakeError" fake_resp.json = mock.Mock(return_value={'retcode': ret_code}) fake_resp.status_code = status_code exc_regex = exc_regex if exc_regex is not None else fake_resp.text with self.assertRaisesRegexp(es_exception.WebServiceException, exc_regex) as exc: self.my_client._eval_response(fake_resp) self.assertEqual(status_code, exc.status_code) def test_eval_response_424(self): status_code = 424 fake_resp = mock.Mock() fake_resp.status_code = status_code fake_resp.text = "Fake Error Message" with self.assertRaisesRegex(es_exception.WebServiceException, "The storage-system is offline") as exc: self.my_client._eval_response(fake_resp) self.assertEqual(status_code, exc.status_code) def test_register_storage_system_does_not_log_password(self): self.my_client._eval_response = mock.Mock() self.my_client.register_storage_system([], password=self.fake_password) for call in self.mock_log.debug.mock_calls: __, args, __ = call self.assertNotIn(self.fake_password, args[0]) def test_update_stored_system_password_does_not_log_password(self): self.my_client._eval_response = mock.Mock() self.my_client.update_stored_system_password( password=self.fake_password) for call in self.mock_log.debug.mock_calls: __, args, __ = call self.assertNotIn(self.fake_password, args[0]) def test_list_target_wwpns(self): fake_hardware_inventory = copy.deepcopy( eseries_fake.HARDWARE_INVENTORY) mock_hardware_inventory = mock.Mock( return_value=fake_hardware_inventory) self.mock_object(self.my_client, 'list_hardware_inventory', mock_hardware_inventory) expected_wwpns = [eseries_fake.WWPN, eseries_fake.WWPN_2] actual_wwpns = self.my_client.list_target_wwpns() self.assertEqual(expected_wwpns, actual_wwpns) def test_list_target_wwpns_single_wwpn(self): fake_hardware_inventory = copy.deepcopy( eseries_fake.HARDWARE_INVENTORY) fake_hardware_inventory['fibrePorts'] = [ fake_hardware_inventory['fibrePorts'][0] ] mock_hardware_inventory = mock.Mock( return_value=fake_hardware_inventory) self.mock_object(self.my_client, 'list_hardware_inventory', mock_hardware_inventory) expected_wwpns = [eseries_fake.WWPN] actual_wwpns = self.my_client.list_target_wwpns() self.assertEqual(expected_wwpns, actual_wwpns) def test_list_target_wwpns_no_wwpn(self): fake_hardware_inventory = copy.deepcopy( eseries_fake.HARDWARE_INVENTORY) fake_hardware_inventory['fibrePorts'] = [] mock_hardware_inventory = mock.Mock( return_value=fake_hardware_inventory) self.mock_object(self.my_client, 'list_hardware_inventory', mock_hardware_inventory) expected_wwpns = [] actual_wwpns = self.my_client.list_target_wwpns() self.assertEqual(expected_wwpns, actual_wwpns) def test_get_host_group_by_name(self): groups = copy.deepcopy(eseries_fake.HOST_GROUPS) group = groups[0] self.mock_object(self.my_client, 'list_host_groups', new_attr=mock.Mock(return_value=groups)) result = self.my_client.get_host_group_by_name(group['label']) self.assertEqual(group, result) def test_move_volume_mapping_via_symbol(self): invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(return_value='ok')) host_ref = 'host' cluster_ref = 'cluster' lun_id = 10 expected_data = {'lunMappingRef': host_ref, 'lun': lun_id, 'mapRef': cluster_ref} result = self.my_client.move_volume_mapping_via_symbol(host_ref, cluster_ref, lun_id) invoke.assert_called_once_with('POST', '/storage-systems/{system-id}/' 'symbol/moveLUNMapping', expected_data) self.assertEqual({'lun': lun_id}, result) def test_move_volume_mapping_via_symbol_fail(self): self.mock_object(self.my_client, '_invoke', mock.Mock(return_value='failure')) self.assertRaises( exception.NetAppDriverException, self.my_client.move_volume_mapping_via_symbol, '1', '2', 10) def test_create_host_from_ports_fc(self): label = 'fake_host' host_type = 'linux' port_type = 'fc' port_ids = [eseries_fake.WWPN, eseries_fake.WWPN_2] expected_ports = [ {'type': port_type, 'port': eseries_fake.WWPN, 'label': mock.ANY}, {'type': port_type, 'port': eseries_fake.WWPN_2, 'label': mock.ANY}] mock_create_host = self.mock_object(self.my_client, 'create_host') self.my_client.create_host_with_ports(label, host_type, port_ids, port_type) mock_create_host.assert_called_once_with(label, host_type, expected_ports, None) def test_host_from_ports_with_no_ports_provided_fc(self): label = 'fake_host' host_type = 'linux' port_type = 'fc' port_ids = [] expected_ports = [] mock_create_host = self.mock_object(self.my_client, 'create_host') self.my_client.create_host_with_ports(label, host_type, port_ids, port_type) mock_create_host.assert_called_once_with(label, host_type, expected_ports, None) def test_create_host_from_ports_iscsi(self): label = 'fake_host' host_type = 'linux' port_type = 'iscsi' port_ids = [eseries_fake.INITIATOR_NAME, eseries_fake.INITIATOR_NAME_2] expected_ports = [ {'type': port_type, 'port': eseries_fake.INITIATOR_NAME, 'label': mock.ANY}, {'type': port_type, 'port': eseries_fake.INITIATOR_NAME_2, 'label': mock.ANY}] mock_create_host = self.mock_object(self.my_client, 'create_host') self.my_client.create_host_with_ports(label, host_type, port_ids, port_type) mock_create_host.assert_called_once_with(label, host_type, expected_ports, None) def test_get_volume_mappings_for_volume(self): volume_mapping_1 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) volume_mapping_2['volumeRef'] = '2' self.mock_object(self.my_client, 'get_volume_mappings', mock.Mock(return_value=[volume_mapping_1, volume_mapping_2])) mappings = self.my_client.get_volume_mappings_for_volume( eseries_fake.VOLUME) self.assertEqual([volume_mapping_1], mappings) def test_get_volume_mappings_for_host(self): volume_mapping_1 = copy.deepcopy( eseries_fake.VOLUME_MAPPING) volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) volume_mapping_2['volumeRef'] = '2' volume_mapping_2['mapRef'] = 'hostRef' self.mock_object(self.my_client, 'get_volume_mappings', mock.Mock(return_value=[volume_mapping_1, volume_mapping_2])) mappings = self.my_client.get_volume_mappings_for_host( 'hostRef') self.assertEqual([volume_mapping_2], mappings) def test_get_volume_mappings_for_hostgroup(self): volume_mapping_1 = copy.deepcopy( eseries_fake.VOLUME_MAPPING) volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) volume_mapping_2['volumeRef'] = '2' volume_mapping_2['mapRef'] = 'hostGroupRef' self.mock_object(self.my_client, 'get_volume_mappings', mock.Mock(return_value=[volume_mapping_1, volume_mapping_2])) mappings = self.my_client.get_volume_mappings_for_host_group( 'hostGroupRef') self.assertEqual([volume_mapping_2], mappings) def test_to_pretty_dict_string(self): dict = { 'foo': 'bar', 'fu': { 'nested': 'boo' } } expected_dict_string = ("""{ "foo": "bar", "fu": { "nested": "boo" } }""") dict_string = self.my_client._to_pretty_dict_string(dict) self.assertEqual(expected_dict_string, dict_string) def test_log_http_request(self): mock_log = self.mock_object(client, 'LOG') verb = "POST" url = "/v2/test/me" headers = {"Content-Type": "application/json"} headers_string = """{ "Content-Type": "application/json" }""" body = {} body_string = "{}" self.my_client._log_http_request(verb, url, headers, body) args = mock_log.debug.call_args log_message, log_params = args[0] final_msg = log_message % log_params self.assertIn(verb, final_msg) self.assertIn(url, final_msg) self.assertIn(headers_string, final_msg) self.assertIn(body_string, final_msg) def test_log_http_request_no_body(self): mock_log = self.mock_object(client, 'LOG') verb = "POST" url = "/v2/test/me" headers = {"Content-Type": "application/json"} headers_string = """{ "Content-Type": "application/json" }""" body = None body_string = "" self.my_client._log_http_request(verb, url, headers, body) args = mock_log.debug.call_args log_message, log_params = args[0] final_msg = log_message % log_params self.assertIn(verb, final_msg) self.assertIn(url, final_msg) self.assertIn(headers_string, final_msg) self.assertIn(body_string, final_msg) def test_log_http_response(self): mock_log = self.mock_object(client, 'LOG') status = "200" headers = {"Content-Type": "application/json"} headers_string = """{ "Content-Type": "application/json" }""" body = {} body_string = "{}" self.my_client._log_http_response(status, headers, body) args = mock_log.debug.call_args log_message, log_params = args[0] final_msg = log_message % log_params self.assertIn(status, final_msg) self.assertIn(headers_string, final_msg) self.assertIn(body_string, final_msg) def test_log_http_response_no_body(self): mock_log = self.mock_object(client, 'LOG') status = "200" headers = {"Content-Type": "application/json"} headers_string = """{ "Content-Type": "application/json" }""" body = None body_string = "" self.my_client._log_http_response(status, headers, body) args = mock_log.debug.call_args log_message, log_params = args[0] final_msg = log_message % log_params self.assertIn(status, final_msg) self.assertIn(headers_string, final_msg) self.assertIn(body_string, final_msg) def test_add_autosupport_data(self): self.mock_object( client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=( eseries_fake.FAKE_ASUP_DATA['operating-mode'], eseries_fake.FAKE_ABOUT_RESPONSE['version']))) self.mock_object( self.my_client, 'get_asup_info', mock.Mock(return_value=eseries_fake.GET_ASUP_RETURN)) self.mock_object( self.my_client, 'set_counter', mock.Mock(return_value={'value': 1})) mock_invoke = self.mock_object( self.my_client, '_invoke', mock.Mock(return_value=eseries_fake.FAKE_ASUP_DATA)) client.RestClient.add_autosupport_data( self.my_client, eseries_fake.FAKE_KEY, eseries_fake.FAKE_ASUP_DATA ) mock_invoke.assert_called_with(*eseries_fake.FAKE_POST_INVOKE_DATA) @ddt.data((eseries_fake.FAKE_SERIAL_NUMBERS, eseries_fake.HARDWARE_INVENTORY), (eseries_fake.FAKE_DEFAULT_SERIAL_NUMBER, {}), (eseries_fake.FAKE_SERIAL_NUMBER, eseries_fake.HARDWARE_INVENTORY_SINGLE_CONTROLLER)) @ddt.unpack def test_get_asup_info_serial_numbers(self, expected_serial_numbers, controllers): self.mock_object( client.RestClient, 'list_hardware_inventory', mock.Mock(return_value=controllers)) self.mock_object( client.RestClient, 'list_storage_system', mock.Mock(return_value={})) sn = client.RestClient.get_asup_info(self.my_client)['serial_numbers'] self.assertEqual(expected_serial_numbers, sn) def test_get_asup_info_model_name(self): self.mock_object( client.RestClient, 'list_hardware_inventory', mock.Mock(return_value=eseries_fake.HARDWARE_INVENTORY)) self.mock_object( client.RestClient, 'list_storage_system', mock.Mock(return_value=eseries_fake.STORAGE_SYSTEM)) model_name = client.RestClient.get_asup_info(self.my_client)['model'] self.assertEqual(eseries_fake.HARDWARE_INVENTORY['controllers'][0] ['modelName'], model_name) def test_get_asup_info_model_name_empty_controllers_list(self): self.mock_object( client.RestClient, 'list_hardware_inventory', mock.Mock(return_value={})) self.mock_object( client.RestClient, 'list_storage_system', mock.Mock(return_value={})) model_name = client.RestClient.get_asup_info(self.my_client)['model'] self.assertEqual(eseries_fake.FAKE_DEFAULT_MODEL, model_name) def test_get_eseries_api_info(self): fake_invoke_service = mock.Mock() fake_invoke_service.json = mock.Mock( return_value=eseries_fake.FAKE_ABOUT_RESPONSE) self.mock_object( client.RestClient, '_get_resource_url', mock.Mock(return_value=eseries_fake.FAKE_RESOURCE_URL)) self.mock_object( self.my_client, 'invoke_service', mock.Mock(return_value=fake_invoke_service)) eseries_info = client.RestClient.get_eseries_api_info( self.my_client, verify=False) self.assertEqual((eseries_fake.FAKE_ASUP_DATA['operating-mode'], eseries_fake.FAKE_ABOUT_RESPONSE['version']), eseries_info) def test_list_ssc_storage_pools(self): self.my_client.features = mock.Mock() self.my_client._invoke = mock.Mock( return_value=eseries_fake.SSC_POOLS) pools = client.RestClient.list_ssc_storage_pools(self.my_client) self.assertEqual(eseries_fake.SSC_POOLS, pools) def test_get_ssc_storage_pool(self): fake_pool = eseries_fake.SSC_POOLS[0] self.my_client.features = mock.Mock() self.my_client._invoke = mock.Mock( return_value=fake_pool) pool = client.RestClient.get_ssc_storage_pool(self.my_client, fake_pool['poolId']) self.assertEqual(fake_pool, pool) @ddt.data(('volumes', True), ('volumes', False), ('volume', True), ('volume', False)) @ddt.unpack def test_get_volume_api_path(self, path_key, ssc_available): self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=ssc_available) expected_key = 'ssc_' + path_key if ssc_available else path_key expected = self.my_client.RESOURCE_PATHS.get(expected_key) actual = self.my_client._get_volume_api_path(path_key) self.assertEqual(expected, actual) @ddt.data(True, False) def test_get_volume_api_path_invalid(self, ssc_available): key = 'invalidKey' self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=ssc_available) self.assertRaises(KeyError, self.my_client._get_volume_api_path, key) def test_list_volumes(self): url = client.RestClient.RESOURCE_PATHS['ssc_volumes'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) self.my_client._invoke = mock.Mock( return_value=eseries_fake.VOLUMES) volumes = client.RestClient.list_volumes(self.my_client) self.assertEqual(eseries_fake.VOLUMES, volumes) self.my_client._invoke.assert_called_once_with('GET', url) @ddt.data(client.RestClient.ID, client.RestClient.WWN, client.RestClient.NAME) def test_list_volume_v1(self, uid_field_name): url = client.RestClient.RESOURCE_PATHS['volumes'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=False) fake_volume = copy.deepcopy(eseries_fake.VOLUME) self.my_client._invoke = mock.Mock( return_value=eseries_fake.VOLUMES) volume = client.RestClient.list_volume(self.my_client, fake_volume[uid_field_name]) self.my_client._invoke.assert_called_once_with('GET', url) self.assertEqual(fake_volume, volume) def test_list_volume_v1_not_found(self): url = client.RestClient.RESOURCE_PATHS['volumes'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=False) self.my_client._invoke = mock.Mock( return_value=eseries_fake.VOLUMES) self.assertRaises(exception.VolumeNotFound, client.RestClient.list_volume, self.my_client, 'fakeId') self.my_client._invoke.assert_called_once_with('GET', url) def test_list_volume_v2(self): url = client.RestClient.RESOURCE_PATHS['ssc_volume'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) fake_volume = copy.deepcopy(eseries_fake.VOLUME) self.my_client._invoke = mock.Mock(return_value=fake_volume) volume = client.RestClient.list_volume(self.my_client, fake_volume['id']) self.my_client._invoke.assert_called_once_with('GET', url, **{'object-id': mock.ANY}) self.assertEqual(fake_volume, volume) def test_list_volume_v2_not_found(self): status_code = 404 url = client.RestClient.RESOURCE_PATHS['ssc_volume'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) msg = "Response error code - %s." % status_code self.my_client._invoke = mock.Mock( side_effect=es_exception.WebServiceException(message=msg, status_code= status_code)) self.assertRaises(exception.VolumeNotFound, client.RestClient.list_volume, self.my_client, 'fakeId') self.my_client._invoke.assert_called_once_with('GET', url, **{'object-id': mock.ANY}) def test_list_volume_v2_failure(self): status_code = 422 url = client.RestClient.RESOURCE_PATHS['ssc_volume'] self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) msg = "Response error code - %s." % status_code self.my_client._invoke = mock.Mock( side_effect=es_exception.WebServiceException(message=msg, status_code= status_code)) self.assertRaises(es_exception.WebServiceException, client.RestClient.list_volume, self.my_client, 'fakeId') self.my_client._invoke.assert_called_once_with('GET', url, **{'object-id': mock.ANY}) def test_create_volume_V1(self): self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=False) create_volume = self.my_client._invoke = mock.Mock( return_value=eseries_fake.VOLUME) volume = client.RestClient.create_volume(self.my_client, 'fakePool', '1', 1) args, kwargs = create_volume.call_args verb, url, body = args # Ensure the correct API was used self.assertEqual('/storage-systems/{system-id}/volumes', url) self.assertEqual(eseries_fake.VOLUME, volume) def test_create_volume_V2(self): self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) create_volume = self.my_client._invoke = mock.Mock( return_value=eseries_fake.VOLUME) volume = client.RestClient.create_volume(self.my_client, 'fakePool', '1', 1) args, kwargs = create_volume.call_args verb, url, body = args # Ensure the correct API was used self.assertIn('/storage-systems/{system-id}/ssc/volumes', url, 'The legacy API was used!') self.assertEqual(eseries_fake.VOLUME, volume) def test_create_volume_unsupported_specs(self): self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=False) self.my_client.api_version = '01.52.9000.1' self.assertRaises(exception.NetAppDriverException, client.RestClient.create_volume, self.my_client, '1', 'label', 1, read_cache=True) @ddt.data(True, False) def test_update_volume(self, ssc_api_enabled): label = 'updatedName' fake_volume = copy.deepcopy(eseries_fake.VOLUME) expected_volume = copy.deepcopy(fake_volume) expected_volume['name'] = label self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=ssc_api_enabled) self.my_client._invoke = mock.Mock(return_value=expected_volume) updated_volume = self.my_client.update_volume(fake_volume['id'], label) if ssc_api_enabled: url = self.my_client.RESOURCE_PATHS.get('ssc_volume') else: url = self.my_client.RESOURCE_PATHS.get('volume') self.my_client._invoke.assert_called_once_with('POST', url, {'name': label}, **{'object-id': fake_volume['id']} ) self.assertDictMatch(expected_volume, updated_volume) def test_get_pool_operation_progress(self): fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) fake_response = copy.deepcopy(eseries_fake.FAKE_POOL_ACTION_PROGRESS) self.my_client._invoke = mock.Mock(return_value=fake_response) response = self.my_client.get_pool_operation_progress(fake_pool['id']) url = self.my_client.RESOURCE_PATHS.get('pool_operation_progress') self.my_client._invoke.assert_called_once_with('GET', url, **{'object-id': fake_pool['id']}) self.assertEqual(fake_response, response) def test_extend_volume(self): new_capacity = 10 fake_volume = copy.deepcopy(eseries_fake.VOLUME) self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) self.my_client._invoke = mock.Mock(return_value=fake_volume) expanded_volume = self.my_client.expand_volume(fake_volume['id'], new_capacity, False) url = self.my_client.RESOURCE_PATHS.get('volume_expand') body = {'expansionSize': new_capacity, 'sizeUnit': 'gb'} self.my_client._invoke.assert_called_once_with('POST', url, body, **{'object-id': fake_volume['id']}) self.assertEqual(fake_volume, expanded_volume) def test_extend_volume_thin(self): new_capacity = 10 fake_volume = copy.deepcopy(eseries_fake.VOLUME) self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=True) self.my_client._invoke = mock.Mock(return_value=fake_volume) expanded_volume = self.my_client.expand_volume(fake_volume['id'], new_capacity, True) url = self.my_client.RESOURCE_PATHS.get('thin_volume_expand') body = {'newVirtualSize': new_capacity, 'sizeUnit': 'gb', 'newRepositorySize': new_capacity} self.my_client._invoke.assert_called_once_with('POST', url, body, **{'object-id': fake_volume['id']}) self.assertEqual(fake_volume, expanded_volume) @ddt.data(True, False) def test_delete_volume(self, ssc_api_enabled): fake_volume = copy.deepcopy(eseries_fake.VOLUME) self.my_client.features = mock.Mock() self.my_client.features.SSC_API_V2 = na_utils.FeatureState( supported=ssc_api_enabled) self.my_client._invoke = mock.Mock() self.my_client.delete_volume(fake_volume['id']) if ssc_api_enabled: url = self.my_client.RESOURCE_PATHS.get('ssc_volume') else: url = self.my_client.RESOURCE_PATHS.get('volume') self.my_client._invoke.assert_called_once_with('DELETE', url, **{'object-id': fake_volume['id']}) def test_list_snapshot_group(self): grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=grp)) fake_ref = 'fake' result = self.my_client.list_snapshot_group(fake_ref) self.assertEqual(grp, result) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['snapshot_group'], **{'object-id': fake_ref}) def test_list_snapshot_groups(self): grps = [copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)] invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=grps)) result = self.my_client.list_snapshot_groups() self.assertEqual(grps, result) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['snapshot_groups']) def test_delete_snapshot_group(self): invoke = self.mock_object(self.my_client, '_invoke') fake_ref = 'fake' self.my_client.delete_snapshot_group(fake_ref) invoke.assert_called_once_with( 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_group'], **{'object-id': fake_ref}) @ddt.data((None, None, None, None, None), ('1', 50, 75, 32, 'purgepit')) @ddt.unpack def test_create_snapshot_group(self, pool_id, repo, warn, limit, policy): vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=vol)) snap_grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) result = self.my_client.create_snapshot_group( snap_grp['label'], snap_grp['id'], pool_id, repo, warn, limit, policy) self.assertEqual(vol, result) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['snapshot_groups'], {'baseMappableObjectId': snap_grp['id'], 'name': snap_grp['label'], 'storagePoolId': pool_id, 'repositoryPercentage': repo, 'warningThreshold': warn, 'autoDeleteLimit': limit, 'fullPolicy': policy}) def test_list_snapshot_volumes(self): vols = [copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)] invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=vols)) result = self.my_client.list_snapshot_volumes() self.assertEqual(vols, result) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['snapshot_volumes']) def test_delete_snapshot_volume(self): invoke = self.mock_object(self.my_client, '_invoke') fake_ref = 'fake' self.my_client.delete_snapshot_volume(fake_ref) invoke.assert_called_once_with( 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_volume'], **{'object-id': fake_ref}) @ddt.data((None, None, None, None), ('1', 50, 75, 'readWrite')) @ddt.unpack def test_create_snapshot_volume(self, pool_id, repo, warn, mode): vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=vol)) result = self.my_client.create_snapshot_volume( vol['basePIT'], vol['label'], vol['id'], pool_id, repo, warn, mode) self.assertEqual(vol, result) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['snapshot_volumes'], mock.ANY) def test_update_snapshot_volume(self): snap_id = '1' label = 'name' pct = 99 vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=vol)) result = self.my_client.update_snapshot_volume(snap_id, label, pct) self.assertEqual(vol, result) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['snapshot_volume'], {'name': label, 'fullThreshold': pct}, **{'object-id': snap_id}) def test_create_snapshot_image(self): img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=img)) grp_id = '1' result = self.my_client.create_snapshot_image(grp_id) self.assertEqual(img, result) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['snapshot_images'], {'groupId': grp_id}) def test_list_snapshot_image(self): img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=img)) fake_ref = 'fake' result = self.my_client.list_snapshot_image(fake_ref) self.assertEqual(img, result) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['snapshot_image'], **{'object-id': fake_ref}) def test_list_snapshot_images(self): imgs = [copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)] invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=imgs)) result = self.my_client.list_snapshot_images() self.assertEqual(imgs, result) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['snapshot_images']) def test_delete_snapshot_image(self): invoke = self.mock_object(self.my_client, '_invoke') fake_ref = 'fake' self.my_client.delete_snapshot_image(fake_ref) invoke.assert_called_once_with( 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_image'], **{'object-id': fake_ref}) def test_create_consistency_group(self): invoke = self.mock_object(self.my_client, '_invoke') name = 'fake' self.my_client.create_consistency_group(name) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['cgroups'], mock.ANY) def test_list_consistency_group(self): invoke = self.mock_object(self.my_client, '_invoke') ref = 'fake' self.my_client.get_consistency_group(ref) invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['cgroup'], **{'object-id': ref}) def test_list_consistency_groups(self): invoke = self.mock_object(self.my_client, '_invoke') self.my_client.list_consistency_groups() invoke.assert_called_once_with( 'GET', self.my_client.RESOURCE_PATHS['cgroups']) def test_delete_consistency_group(self): invoke = self.mock_object(self.my_client, '_invoke') ref = 'fake' self.my_client.delete_consistency_group(ref) invoke.assert_called_once_with( 'DELETE', self.my_client.RESOURCE_PATHS['cgroup'], **{'object-id': ref}) def test_add_consistency_group_member(self): invoke = self.mock_object(self.my_client, '_invoke') vol_id = eseries_fake.VOLUME['id'] cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.add_consistency_group_member(vol_id, cg_id) invoke.assert_called_once_with( 'POST', self.my_client.RESOURCE_PATHS['cgroup_members'], mock.ANY, **{'object-id': cg_id}) def test_remove_consistency_group_member(self): invoke = self.mock_object(self.my_client, '_invoke') vol_id = eseries_fake.VOLUME['id'] cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.remove_consistency_group_member(vol_id, cg_id) invoke.assert_called_once_with( 'DELETE', self.my_client.RESOURCE_PATHS['cgroup_member'], **{'object-id': cg_id, 'vol-id': vol_id}) def test_create_consistency_group_snapshot(self): invoke = self.mock_object(self.my_client, '_invoke') path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.create_consistency_group_snapshot(cg_id) invoke.assert_called_once_with('POST', path, **{'object-id': cg_id}) @ddt.data(0, 32) def test_delete_consistency_group_snapshot(self, seq_num): invoke = self.mock_object(self.my_client, '_invoke') path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.delete_consistency_group_snapshot(cg_id, seq_num) invoke.assert_called_once_with( 'DELETE', path, **{'object-id': cg_id, 'seq-num': seq_num}) def test_get_consistency_group_snapshots(self): invoke = self.mock_object(self.my_client, '_invoke') path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.get_consistency_group_snapshots(cg_id) invoke.assert_called_once_with( 'GET', path, **{'object-id': cg_id}) def test_create_cg_snapshot_view(self): cg_snap_view = copy.deepcopy( eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME) view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=cg_snap_view)) list_views = self.mock_object( self.my_client, 'list_cg_snapshot_views', mock.Mock(return_value=[view])) name = view['name'] snap_id = view['basePIT'] path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.my_client.create_cg_snapshot_view(cg_id, name, snap_id) invoke.assert_called_once_with( 'POST', path, mock.ANY, **{'object-id': cg_id}) list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef']) def test_create_cg_snapshot_view_not_found(self): cg_snap_view = copy.deepcopy( eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME) view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) invoke = self.mock_object(self.my_client, '_invoke', mock.Mock( return_value=cg_snap_view)) list_views = self.mock_object( self.my_client, 'list_cg_snapshot_views', mock.Mock(return_value=[view])) del_view = self.mock_object(self.my_client, 'delete_cg_snapshot_view') name = view['name'] # Ensure we don't get a match on the retrieved views snap_id = None path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] self.assertRaises( exception.NetAppDriverException, self.my_client.create_cg_snapshot_view, cg_id, name, snap_id) invoke.assert_called_once_with( 'POST', path, mock.ANY, **{'object-id': cg_id}) list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef']) del_view.assert_called_once_with(cg_id, cg_snap_view['id']) def test_list_cg_snapshot_views(self): invoke = self.mock_object(self.my_client, '_invoke') path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot_views') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] view_id = 'id' self.my_client.list_cg_snapshot_views(cg_id, view_id) invoke.assert_called_once_with( 'GET', path, **{'object-id': cg_id, 'view-id': view_id}) def test_delete_cg_snapshot_view(self): invoke = self.mock_object(self.my_client, '_invoke') path = self.my_client.RESOURCE_PATHS.get('cgroup_snap_view') cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] view_id = 'id' self.my_client.delete_cg_snapshot_view(cg_id, view_id) invoke.assert_called_once_with( 'DELETE', path, **{'object-id': cg_id, 'view-id': view_id}) @ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3', '01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4') def test_api_version_not_support_asup(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertFalse(self.my_client.features.AUTOSUPPORT.supported) @ddt.data('01.52.9000.3', '01.52.9000.4', '01.52.8999.2', '01.52.8999.3', '01.53.8999.3', '01.53.9000.2', '02.51.9000.3', '02.52.8999.3', '02.51.8999.2') def test_api_version_supports_asup(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertTrue(self.my_client.features.AUTOSUPPORT.supported) @ddt.data('00.00.00.00', '01.52.9000.1', '01.52.9001.2', '00.53.9001.3', '01.53.9090.1', '1.53.9010.14', '0.53.9011.15') def test_api_version_not_support_ssc_api(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertFalse(self.my_client.features.SSC_API_V2.supported) @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1', '01.53.9010.20', '01.53.9010.17', '01.54.9000.1', '02.51.9000.3', '02.52.8999.3', '02.51.8999.2') def test_api_version_supports_ssc_api(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertTrue(self.my_client.features.SSC_API_V2.supported) @ddt.data('00.00.00.00', '01.52.9000.5', '01.52.9001.2', '00.53.9001.3', '01.52.9090.1', '1.52.9010.7', '0.53.9011.7') def test_api_version_not_support_1_3(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertFalse(self.my_client.features.REST_1_3_RELEASE.supported) @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1', '01.54.9010.20', '01.54.9000.1', '02.51.9000.3', '02.52.8999.3', '02.51.8999.2') def test_api_version_1_3(self, api_version): self.mock_object(client.RestClient, 'get_eseries_api_info', mock.Mock(return_value=('proxy', api_version))) client.RestClient._init_features(self.my_client) self.assertTrue(self.my_client.features.REST_1_3_RELEASE.supported) def test_invoke_bad_content_type(self): """Tests the invoke behavior with a non-JSON response""" fake_response = mock.Mock() fake_response.json = mock.Mock(side_effect=scanner.JSONDecodeError( '', '{}', 1)) fake_response.status_code = 424 fake_response.text = "Fake Response" self.mock_object(self.my_client, 'invoke_service', mock.Mock(return_value=fake_response)) self.assertRaises(es_exception.WebServiceException, self.my_client._invoke, 'GET', eseries_fake.FAKE_ENDPOINT_HTTP) def test_list_backend_store(self): path = self.my_client.RESOURCE_PATHS.get('persistent-store') fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE) invoke = self.mock_object( self.my_client, '_invoke', mock.Mock( return_value=fake_store)) expected = json.loads(fake_store.get('value')) result = self.my_client.list_backend_store('key') self.assertEqual(expected, result) invoke.assert_called_once_with('GET', path, key='key') def test_save_backend_store(self): path = self.my_client.RESOURCE_PATHS.get('persistent-stores') fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE) key = 'key' invoke = self.mock_object( self.my_client, '_invoke', mock.Mock()) self.my_client.save_backend_store(key, fake_store) invoke.assert_called_once_with('POST', path, mock.ANY) @ddt.ddt class TestWebserviceClientTestCase(test.TestCase): def setUp(self): """sets up the mock tests""" super(TestWebserviceClientTestCase, self).setUp() self.mock_log = mock.Mock() self.mock_object(client, 'LOG', self.mock_log) self.webclient = client.WebserviceClient('http', 'host', '80', '/test', 'user', '****') @ddt.data({'params': {'host': None, 'scheme': 'https', 'port': '80'}}, {'params': {'host': 'host', 'scheme': None, 'port': '80'}}, {'params': {'host': 'host', 'scheme': 'http', 'port': None}}) @ddt.unpack def test__validate_params_value_error(self, params): """Tests various scenarios for ValueError in validate method""" self.assertRaises(exception.InvalidInput, self.webclient._validate_params, **params) def test_invoke_service_no_endpoint_error(self): """Tests Exception and Log error if no endpoint is provided""" self.webclient._endpoint = None log_error = 'Unexpected error while invoking web service' self.assertRaises(exception.NetAppDriverException, self.webclient.invoke_service) self.assertTrue(self.mock_log.exception.find(log_error)) def test_invoke_service(self): """Tests if invoke_service evaluates the right response""" self.webclient._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP self.mock_object(self.webclient.conn, 'request', mock.Mock(return_value=eseries_fake.FAKE_INVOC_MSG)) result = self.webclient.invoke_service() self.assertIsNotNone(result) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py0000664000567000056710000012662112701406250027074 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2015, Alex Meade # Copyright (c) - 2015, Yogesh Kshirsagar # Copyright (c) - 2015, Michael Price # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import mock from cinder.volume import configuration as conf from cinder.volume.drivers.netapp.eseries import utils import cinder.volume.drivers.netapp.options as na_opts import cinder.volume.drivers.netapp.utils as na_utils FAKE_CINDER_VOLUME = { 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'provider_auth': 'provider a b', 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'migration_status': None, 'attach_status': "detached" } FAKE_CINDER_SNAPSHOT = { 'id': '78f95b9d-3f02-4781-a512-1a1c921d48a1', 'volume': FAKE_CINDER_VOLUME } FAKE_CINDER_CG = { 'id': '78f95b9d-3f02-4781-a512-1a1c951d48a2', } FAKE_CINDER_CG_SNAPSHOT = { 'id': '78f95b9d-4d13-4781-a512-1a1c951d6a6', 'consistencygroup_id': FAKE_CINDER_CG['id'], } MULTIATTACH_HOST_GROUP = { 'clusterRef': '8500000060080E500023C7340036035F515B78FC', 'label': utils.MULTI_ATTACH_HOST_GROUP_NAME, } FOREIGN_HOST_GROUP = { 'clusterRef': '8500000060080E500023C7340036035F515B78FD', 'label': 'FOREIGN HOST GROUP', } HOST_GROUPS = [MULTIATTACH_HOST_GROUP, FOREIGN_HOST_GROUP] SSC_POOLS = [ { "poolId": "0400000060080E5000290D8000009C9955828DD2", "name": "DDP", "pool": { "sequenceNum": 2, "offline": False, "raidLevel": "raidDiskPool", "worldWideName": "60080E5000290D8000009C9955828DD2", "volumeGroupRef": "0400000060080E5000290D8000009C9955828DD2", "reserved1": "000000000000000000000000", "reserved2": "", "trayLossProtection": False, "label": "DDP", "state": "complete", "spindleSpeedMatch": True, "spindleSpeed": 7200, "isInaccessible": False, "securityType": "none", "drawerLossProtection": True, "protectionInformationCapable": False, "protectionInformationCapabilities": { "protectionInformationCapable": True, "protectionType": "type2Protection" }, "volumeGroupData": { "type": "diskPool", "diskPoolData": { "reconstructionReservedDriveCount": 1, "reconstructionReservedAmt": "2992518463488", "reconstructionReservedDriveCountCurrent": 1, "poolUtilizationWarningThreshold": 100, "poolUtilizationCriticalThreshold": 100, "poolUtilizationState": "utilizationOptimal", "unusableCapacity": "0", "degradedReconstructPriority": "high", "criticalReconstructPriority": "highest", "backgroundOperationPriority": "low", "allocGranularity": "4294967296" } }, "usage": "standard", "driveBlockFormat": "allNative", "reservedSpaceAllocated": True, "usedSpace": "13653701033984", "totalRaidedSpace": "23459111370752", "extents": [ { "sectorOffset": "0", "rawCapacity": "9805410336768", "raidLevel": "raidDiskPool", "volumeGroupRef": "0400000060080E5000290D8000009C9955828DD2", "freeExtentRef": "0301000060080E5000290D8000009C9955828DD2", "reserved1": "000000000000000000000000", "reserved2": "" } ], "largestFreeExtentSize": "9805410336768", "raidStatus": "optimal", "freeSpace": "9805410336768", "drivePhysicalType": "sas", "driveMediaType": "hdd", "normalizedSpindleSpeed": "spindleSpeed7200", "id": "0400000060080E5000290D8000009C9955828DD2", "diskPool": True, "name": "DDP" }, "flashCacheCapable": True, "dataAssuranceCapable": True, "encrypted": False, "thinProvisioningCapable": True, "spindleSpeed": "spindleSpeed7200", "raidLevel": "raidDiskPool", "availableFreeExtentCapacities": [ "9805410336768" ] }, { "poolId": "0400000060080E5000290D8000009CBA55828E96", "name": "pool_raid1", "pool": { "sequenceNum": 6, "offline": False, "raidLevel": "raid1", "worldWideName": "60080E5000290D8000009CBA55828E96", "volumeGroupRef": "0400000060080E5000290D8000009CBA55828E96", "reserved1": "000000000000000000000000", "reserved2": "", "trayLossProtection": False, "label": "pool_raid1", "state": "complete", "spindleSpeedMatch": True, "spindleSpeed": 10000, "isInaccessible": False, "securityType": "none", "drawerLossProtection": True, "protectionInformationCapable": False, "protectionInformationCapabilities": { "protectionInformationCapable": True, "protectionType": "type2Protection" }, "volumeGroupData": { "type": "unknown", "diskPoolData": None }, "usage": "standard", "driveBlockFormat": "allNative", "reservedSpaceAllocated": True, "usedSpace": "2978559819776", "totalRaidedSpace": "6662444097536", "extents": [ { "sectorOffset": "387891200", "rawCapacity": "3683884277760", "raidLevel": "raid1", "volumeGroupRef": "0400000060080E5000290D8000009CBA55828E96", "freeExtentRef": "030000B360080E5000290D8000009CBA55828E96", "reserved1": "000000000000000000000000", "reserved2": "" } ], "largestFreeExtentSize": "3683884277760", "raidStatus": "optimal", "freeSpace": "3683884277760", "drivePhysicalType": "sas", "driveMediaType": "hdd", "normalizedSpindleSpeed": "spindleSpeed10k", "id": "0400000060080E5000290D8000009CBA55828E96", "diskPool": False, "name": "pool_raid1" }, "flashCacheCapable": False, "dataAssuranceCapable": True, "encrypted": False, "thinProvisioningCapable": False, "spindleSpeed": "spindleSpeed10k", "raidLevel": "raid1", "availableFreeExtentCapacities": [ "3683884277760" ] }, { "poolId": "0400000060080E5000290D8000009CAB55828E51", "name": "pool_raid6", "pool": { "sequenceNum": 3, "offline": False, "raidLevel": "raid6", "worldWideName": "60080E5000290D8000009CAB55828E51", "volumeGroupRef": "0400000060080E5000290D8000009CAB55828E51", "reserved1": "000000000000000000000000", "reserved2": "", "trayLossProtection": False, "label": "pool_raid6", "state": "complete", "spindleSpeedMatch": True, "spindleSpeed": 15000, "isInaccessible": False, "securityType": "enabled", "drawerLossProtection": False, "protectionInformationCapable": False, "protectionInformationCapabilities": { "protectionInformationCapable": True, "protectionType": "type2Protection" }, "volumeGroupData": { "type": "unknown", "diskPoolData": None }, "usage": "standard", "driveBlockFormat": "allNative", "reservedSpaceAllocated": True, "usedSpace": "16413217521664", "totalRaidedSpace": "16637410312192", "extents": [ { "sectorOffset": "1144950784", "rawCapacity": "224192790528", "raidLevel": "raid6", "volumeGroupRef": "0400000060080E5000290D8000009CAB55828E51", "freeExtentRef": "0300005960080E5000290D8000009CAB55828E51", "reserved1": "000000000000000000000000", "reserved2": "" } ], "largestFreeExtentSize": "224192790528", "raidStatus": "optimal", "freeSpace": "224192790528", "drivePhysicalType": "sas", "driveMediaType": "hdd", "normalizedSpindleSpeed": "spindleSpeed15k", "id": "0400000060080E5000290D8000009CAB55828E51", "diskPool": False, "name": "pool_raid6" }, "flashCacheCapable": False, "dataAssuranceCapable": True, "encrypted": True, "thinProvisioningCapable": False, "spindleSpeed": "spindleSpeed15k", "raidLevel": "raid6", "availableFreeExtentCapacities": [ "224192790528" ] } ] STORAGE_POOLS = [ssc_pool['pool'] for ssc_pool in SSC_POOLS] VOLUMES = [ { "offline": False, "extremeProtection": False, "volumeHandle": 2, "raidLevel": "raid0", "sectorOffset": "0", "worldWideName": "60080E50002998A00000945355C37C19", "label": "1", "blkSize": 512, "capacity": "10737418240", "reconPriority": 1, "segmentSize": 131072, "action": "initializing", "cache": { "cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": True, "readCacheEnable": True, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec", "readAheadMultiplier": 1 }, "mediaScan": { "enable": False, "parityValidationEnable": False }, "volumeRef": "0200000060080E50002998A00000945355C37C19", "status": "optimal", "volumeGroupRef": "0400000060080E50002998A00000945255C37C14", "currentManager": "070000000000000000000001", "preferredManager": "070000000000000000000001", "perms": { "mapToLUN": True, "snapShot": True, "format": True, "reconfigure": True, "mirrorPrimary": True, "mirrorSecondary": True, "copySource": True, "copyTarget": True, "readable": True, "writable": True, "rollback": True, "mirrorSync": True, "newImage": True, "allowDVE": True, "allowDSS": True, "concatVolumeMember": True, "flashReadCache": True, "asyncMirrorPrimary": True, "asyncMirrorSecondary": True, "pitGroup": True, "cacheParametersChangeable": True, "allowThinManualExpansion": False, "allowThinGrowthParametersChange": False, "allowVaulting": False, "allowRestore": False }, "mgmtClientAttribute": 0, "dssPreallocEnabled": True, "dssMaxSegmentSize": 2097152, "preReadRedundancyCheckEnabled": False, "protectionInformationCapable": False, "protectionType": "type1Protection", "applicationTagOwned": False, "untrustworthy": 0, "volumeUse": "standardVolume", "volumeFull": False, "volumeCopyTarget": False, "volumeCopySource": False, "pitBaseVolume": False, "asyncMirrorTarget": False, "asyncMirrorSource": False, "remoteMirrorSource": False, "remoteMirrorTarget": False, "diskPool": False, "flashCached": False, "increasingBy": "0", "metadata": [], "dataAssurance": True, "name": "1", "id": "0200000060080E50002998A00000945355C37C19", "wwn": "60080E50002998A00000945355C37C19", "objectType": "volume", "mapped": False, "preferredControllerId": "070000000000000000000001", "totalSizeInBytes": "10737418240", "onlineVolumeCopy": False, "listOfMappings": [], "currentControllerId": "070000000000000000000001", "cacheSettings": { "cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": True, "readCacheEnable": True, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec", "readAheadMultiplier": 1 }, "thinProvisioned": False }, { "volumeHandle": 16385, "worldWideName": "60080E500029347000001D7B55C3791E", "label": "2", "allocationGranularity": 128, "capacity": "53687091200", "reconPriority": 1, "volumeRef": "3A00000060080E500029347000001D7B55C3791E", "status": "optimal", "repositoryRef": "3600000060080E500029347000001D7955C3791D", "currentManager": "070000000000000000000002", "preferredManager": "070000000000000000000002", "perms": { "mapToLUN": True, "snapShot": False, "format": True, "reconfigure": False, "mirrorPrimary": False, "mirrorSecondary": False, "copySource": True, "copyTarget": False, "readable": True, "writable": True, "rollback": True, "mirrorSync": True, "newImage": True, "allowDVE": True, "allowDSS": True, "concatVolumeMember": False, "flashReadCache": True, "asyncMirrorPrimary": True, "asyncMirrorSecondary": True, "pitGroup": True, "cacheParametersChangeable": True, "allowThinManualExpansion": False, "allowThinGrowthParametersChange": False, "allowVaulting": False, "allowRestore": False }, "mgmtClientAttribute": 0, "preReadRedundancyCheckEnabled": False, "protectionType": "type0Protection", "applicationTagOwned": True, "maxVirtualCapacity": "69269232549888", "initialProvisionedCapacity": "4294967296", "currentProvisionedCapacity": "4294967296", "provisionedCapacityQuota": "55834574848", "growthAlertThreshold": 85, "expansionPolicy": "automatic", "volumeCache": { "cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": True, "readCacheEnable": True, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec", "readAheadMultiplier": 0 }, "offline": False, "volumeFull": False, "volumeGroupRef": "0400000060080E50002998A00000945155C37C08", "blkSize": 512, "storageVolumeRef": "0200000060080E500029347000001D7855C3791D", "volumeCopyTarget": False, "volumeCopySource": False, "pitBaseVolume": False, "asyncMirrorTarget": False, "asyncMirrorSource": False, "remoteMirrorSource": False, "remoteMirrorTarget": False, "flashCached": False, "mediaScan": { "enable": False, "parityValidationEnable": False }, "metadata": [], "dataAssurance": False, "name": "2", "id": "3A00000060080E500029347000001D7B55C3791E", "wwn": "60080E500029347000001D7B55C3791E", "objectType": "thinVolume", "mapped": False, "diskPool": True, "preferredControllerId": "070000000000000000000002", "totalSizeInBytes": "53687091200", "onlineVolumeCopy": False, "listOfMappings": [], "currentControllerId": "070000000000000000000002", "segmentSize": 131072, "cacheSettings": { "cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": True, "readCacheEnable": True, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec", "readAheadMultiplier": 0 }, "thinProvisioned": True } ] VOLUME = VOLUMES[0] STORAGE_POOL = { 'label': 'DDP', 'id': 'fakevolgroupref', 'volumeGroupRef': 'fakevolgroupref', 'raidLevel': 'raidDiskPool', 'usedSpace': '16413217521664', 'totalRaidedSpace': '16637410312192', } INITIATOR_NAME = 'iqn.1998-01.com.vmware:localhost-28a58148' INITIATOR_NAME_2 = 'iqn.1998-01.com.vmware:localhost-28a58149' INITIATOR_NAME_3 = 'iqn.1998-01.com.vmware:localhost-28a58150' WWPN = '20130080E5322230' WWPN_2 = '20230080E5322230' FC_TARGET_WWPNS = [ '500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5' ] FC_I_T_MAP = { '20230080E5322230': [ '500a098280feeba5', '500a098290feeba5' ], '20130080E5322230': [ '500a098190feeba5', '500a098180feeba5' ] } FC_FABRIC_MAP = { 'fabricB': { 'target_port_wwn_list': [ '500a098190feeba5', '500a098180feeba5' ], 'initiator_port_wwn_list': [ '20130080E5322230' ] }, 'fabricA': { 'target_port_wwn_list': [ '500a098290feeba5', '500a098280feeba5' ], 'initiator_port_wwn_list': [ '20230080E5322230' ] } } HOST = { 'isSAControlled': False, 'confirmLUNMappingCreation': False, 'label': 'stlrx300s7-55', 'isLargeBlockFormatHost': False, 'clusterRef': '8500000060080E500023C7340036035F515B78FC', 'protectionInformationCapableAccessMethod': False, 'ports': [], 'hostRef': '8400000060080E500023C73400300381515BFBA3', 'hostTypeIndex': 6, 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': INITIATOR_NAME}] } HOST_2 = { 'isSAControlled': False, 'confirmLUNMappingCreation': False, 'label': 'stlrx300s7-55', 'isLargeBlockFormatHost': False, 'clusterRef': utils.NULL_REF, 'protectionInformationCapableAccessMethod': False, 'ports': [], 'hostRef': '8400000060080E500023C73400300381515BFBA5', 'hostTypeIndex': 6, 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': INITIATOR_NAME_2}] } # HOST_3 has all lun_ids in use. HOST_3 = { 'isSAControlled': False, 'confirmLUNMappingCreation': False, 'label': 'stlrx300s7-55', 'isLargeBlockFormatHost': False, 'clusterRef': '8500000060080E500023C73400360351515B78FC', 'protectionInformationCapableAccessMethod': False, 'ports': [], 'hostRef': '8400000060080E501023C73400800381515BFBA5', 'hostTypeIndex': 6, 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': INITIATOR_NAME_3}], } VOLUME_MAPPING = { 'lunMappingRef': '8800000000000000000000000000000000000000', 'lun': 0, 'ssid': 16384, 'perms': 15, 'volumeRef': VOLUME['volumeRef'], 'type': 'all', 'mapRef': HOST['hostRef'] } # VOLUME_MAPPING_3 corresponding to HOST_3 has all lun_ids in use. VOLUME_MAPPING_3 = { 'lunMappingRef': '8800000000000000000000000000000000000000', 'lun': range(255), 'ssid': 16384, 'perms': 15, 'volumeRef': VOLUME['volumeRef'], 'type': 'all', 'mapRef': HOST_3['hostRef'], } VOLUME_MAPPING_TO_MULTIATTACH_GROUP = copy.deepcopy(VOLUME_MAPPING) VOLUME_MAPPING_TO_MULTIATTACH_GROUP.update( {'mapRef': MULTIATTACH_HOST_GROUP['clusterRef']} ) STORAGE_SYSTEM = { 'chassisSerialNumber': 1, 'fwVersion': '08.10.15.00', 'freePoolSpace': 11142431623168, 'driveCount': 24, 'hostSparesUsed': 0, 'id': '1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b', 'hotSpareSizeAsString': '0', 'wwn': '60080E500023C73400000000515AF323', 'passwordStatus': 'valid', 'parameters': { 'minVolSize': 1048576, 'maxSnapshotsPerBase': 16, 'maxDrives': 192, 'maxVolumes': 512, 'maxVolumesPerGroup': 256, 'maxMirrors': 0, 'maxMappingsPerVolume': 1, 'maxMappableLuns': 256, 'maxVolCopys': 511, 'maxSnapshots': 256 }, 'hotSpareCount': 0, 'hostSpareCountInStandby': 0, 'status': 'needsattn', 'trayCount': 1, 'usedPoolSpaceAsString': '5313000380416', 'ip2': '10.63.165.216', 'ip1': '10.63.165.215', 'freePoolSpaceAsString': '11142431623168', 'types': 'SAS', 'name': 'stle2600-7_8', 'hotSpareSize': 0, 'usedPoolSpace': 5313000380416, 'driveTypes': ['sas'], 'unconfiguredSpaceByDriveType': {}, 'unconfiguredSpaceAsStrings': '0', 'model': '2650', 'unconfiguredSpace': 0 } SNAPSHOT_GROUP = { 'id': '3300000060080E500023C7340000098D5294AC9A', 'status': 'optimal', 'autoDeleteLimit': 0, 'maxRepositoryCapacity': '-65536', 'rollbackStatus': 'none', 'unusableRepositoryCapacity': '0', 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A', 'clusterSize': 65536, 'label': 'C6JICISVHNG2TFZX4XB5ZWL7F', 'maxBaseCapacity': '476187142128128', 'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C', 'fullWarnThreshold': 99, 'repFullPolicy': 'purgepit', 'action': 'none', 'rollbackPriority': 'medium', 'creationPendingStatus': 'none', 'consistencyGroupRef': '0000000000000000000000000000000000000000', 'volumeHandle': 49153, 'consistencyGroup': False, 'baseVolume': '0200000060080E500023C734000009825294A534', 'snapshotCount': 32 } SNAPSHOT_IMAGE = { 'id': '3400000060080E500023BB3400631F335294A5A8', 'baseVol': '0200000060080E500023C734000009825294A534', 'status': 'optimal', 'pitCapacity': '2147483648', 'pitTimestamp': '1389315375', 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A', 'creationMethod': 'user', 'repositoryCapacityUtilization': '2818048', 'activeCOW': True, 'isRollbackSource': False, 'pitRef': '3400000060080E500023BB3400631F335294A5A8', 'pitSequenceNumber': '19', 'consistencyGroupId': '0000000000000000000000000000000000000000', } SNAPSHOT_VOLUME = { 'id': '35000000600A0980006077F80000F8BF566581AA', 'viewRef': '35000000600A0980006077F80000F8BF566581AA', 'worldWideName': '600A0980006077F80000F8BF566581AA', 'baseVol': '02000000600A0980006077F80000F89B56657E26', 'basePIT': '0000000000000000000000000000000000000000', 'boundToPIT': False, 'accessMode': 'readOnly', 'label': 'UZJ45SLUKNGWRF3QZHBTOG4C4E_DEL', 'status': 'stopped', 'currentManager': '070000000000000000000001', 'preferredManager': '070000000000000000000001', 'repositoryVolume': '0000000000000000000000000000000000000000', 'fullWarnThreshold': 0, 'viewTime': '1449453419', 'viewSequenceNumber': '2104', 'volumeHandle': 16510, 'clusterSize': 0, 'maxRepositoryCapacity': '0', 'unusableRepositoryCapacity': '0', 'membership': { 'viewType': 'individual', 'cgViewRef': None }, 'mgmtClientAttribute': 0, 'offline': False, 'volumeFull': False, 'repositoryCapacity': '0', 'baseVolumeCapacity': '1073741824', 'totalSizeInBytes': '0', 'consistencyGroupId': None, 'volumeCopyTarget': False, 'cloneCopy': False, 'volumeCopySource': False, 'pitBaseVolume': False, 'asyncMirrorTarget': False, 'asyncMirrorSource': False, 'protectionType': 'type0Protection', 'remoteMirrorSource': False, 'remoteMirrorTarget': False, 'wwn': '600A0980006077F80000F8BF566581AA', 'listOfMappings': [], 'mapped': False, 'currentControllerId': '070000000000000000000001', 'preferredControllerId': '070000000000000000000001', 'onlineVolumeCopy': False, 'objectType': 'pitView', 'name': 'UZJ45SLUKNGWRF3QZHBTOG4C4E', } FAKE_BACKEND_STORE = { 'key': 'cinder-snapshots', 'value': '{"3300000060080E50003416400000E90D56B047E5":"2"}' } FAKE_CINDER_VOLUME = { 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', 'os_type': 'linux', 'provider_location': 'lun1', 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'provider_auth': 'provider a b', 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'migration_status': None, 'attach_status': "detached" } FAKE_CINDER_SNAPSHOT = { 'id': '78f95b9d-3f02-4781-a512-1a1c921d48a1', 'volume': FAKE_CINDER_VOLUME, 'provider_id': '3400000060080E500023BB3400631F335294A5A8' } HARDWARE_INVENTORY_SINGLE_CONTROLLER = { 'controllers': [ { 'modelName': '2752', 'serialNumber': '021436001321' } ] } HARDWARE_INVENTORY = { 'controllers': [ { 'modelName': '2752', 'serialNumber': '021436000943' }, { 'modelName': '2752', 'serialNumber': '021436001321' } ], 'iscsiPorts': [ { 'controllerId': '070000000000000000000002', 'ipv4Enabled': True, 'ipv4Data': { 'ipv4Address': '0.0.0.0', 'ipv4AddressConfigMethod': 'configStatic', 'ipv4VlanId': { 'isEnabled': False, 'value': 0 }, 'ipv4AddressData': { 'ipv4Address': '172.20.123.66', 'ipv4SubnetMask': '255.255.255.0', 'configState': 'configured', 'ipv4GatewayAddress': '0.0.0.0' } }, 'tcpListenPort': 3260, 'interfaceRef': '2202040000000000000000000000000000000000', 'iqn': 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323' } ], 'fibrePorts': [ { "channel": 1, "loopID": 126, "speed": 800, "hardAddress": 6, "nodeName": "20020080E5322230", "portName": "20130080E5322230", "portId": "011700", "topology": "fabric", "part": "PM8032 ", "revision": 8, "chanMiswire": False, "esmMiswire": False, "linkStatus": "up", "isDegraded": False, "speedControl": "auto", "maxSpeed": 800, "speedNegError": False, "reserved1": "000000000000000000000000", "reserved2": "", "ddsChannelState": 0, "ddsStateReason": 0, "ddsStateWho": 0, "isLocal": True, "channelPorts": [], "currentInterfaceSpeed": "speed8gig", "maximumInterfaceSpeed": "speed8gig", "interfaceRef": "2202020000000000000000000000000000000000", "physicalLocation": { "trayRef": "0000000000000000000000000000000000000000", "slot": 0, "locationParent": { "refType": "generic", "controllerRef": None, "symbolRef": "0000000000000000000000000000000000000000", "typedReference": None }, "locationPosition": 0 }, "isTrunkCapable": False, "trunkMiswire": False, "protectionInformationCapable": True, "controllerId": "070000000000000000000002", "interfaceId": "2202020000000000000000000000000000000000", "addressId": "20130080E5322230", "niceAddressId": "20:13:00:80:E5:32:22:30" }, { "channel": 2, "loopID": 126, "speed": 800, "hardAddress": 7, "nodeName": "20020080E5322230", "portName": "20230080E5322230", "portId": "011700", "topology": "fabric", "part": "PM8032 ", "revision": 8, "chanMiswire": False, "esmMiswire": False, "linkStatus": "up", "isDegraded": False, "speedControl": "auto", "maxSpeed": 800, "speedNegError": False, "reserved1": "000000000000000000000000", "reserved2": "", "ddsChannelState": 0, "ddsStateReason": 0, "ddsStateWho": 0, "isLocal": True, "channelPorts": [], "currentInterfaceSpeed": "speed8gig", "maximumInterfaceSpeed": "speed8gig", "interfaceRef": "2202030000000000000000000000000000000000", "physicalLocation": { "trayRef": "0000000000000000000000000000000000000000", "slot": 0, "locationParent": { "refType": "generic", "controllerRef": None, "symbolRef": "0000000000000000000000000000000000000000", "typedReference": None }, "locationPosition": 0 }, "isTrunkCapable": False, "trunkMiswire": False, "protectionInformationCapable": True, "controllerId": "070000000000000000000002", "interfaceId": "2202030000000000000000000000000000000000", "addressId": "20230080E5322230", "niceAddressId": "20:23:00:80:E5:32:22:30" }, ] } FAKE_POOL_ACTION_PROGRESS = [ { "volumeRef": "0200000060080E50002998A00000945355C37C19", "progressPercentage": 55, "estimatedTimeToCompletion": 1, "currentAction": "initializing" }, { "volumeRef": "0200000060080E50002998A00000945355C37C18", "progressPercentage": 0, "estimatedTimeToCompletion": 0, "currentAction": "progressDve" }, ] FAKE_RESOURCE_URL = '/devmgr/v2/devmgr/utils/about' FAKE_APP_VERSION = '2015.2|2015.2.dev59|vendor|Linux-3.13.0-24-generic' FAKE_BACKEND = 'eseriesiSCSI' FAKE_CINDER_HOST = 'ubuntu-1404' FAKE_SERIAL_NUMBERS = ['021436000943', '021436001321'] FAKE_SERIAL_NUMBER = ['021436001321'] FAKE_DEFAULT_SERIAL_NUMBER = ['unknown', 'unknown'] FAKE_DEFAULT_MODEL = 'unknown' FAKE_ABOUT_RESPONSE = { 'runningAsProxy': True, 'version': '01.53.9010.0005', 'systemId': 'a89355ab-692c-4d4a-9383-e249095c3c0', } FAKE_CONTROLLERS = [ {'serialNumber': FAKE_SERIAL_NUMBERS[0], 'modelName': '2752'}, {'serialNumber': FAKE_SERIAL_NUMBERS[1], 'modelName': '2752'}] FAKE_SINGLE_CONTROLLER = [{'serialNumber': FAKE_SERIAL_NUMBERS[1]}] FAKE_KEY = ('openstack-%s-%s-%s' % (FAKE_CINDER_HOST, FAKE_SERIAL_NUMBERS[0], FAKE_SERIAL_NUMBERS[1])) FAKE_ASUP_DATA = { 'category': 'provisioning', 'app-version': FAKE_APP_VERSION, 'event-source': 'Cinder driver NetApp_iSCSI_ESeries', 'event-description': 'OpenStack Cinder connected to E-Series proxy', 'system-version': '08.10.15.00', 'computer-name': FAKE_CINDER_HOST, 'model': FAKE_CONTROLLERS[0]['modelName'], 'controller2-serial': FAKE_CONTROLLERS[1]['serialNumber'], 'controller1-serial': FAKE_CONTROLLERS[0]['serialNumber'], 'chassis-serial-number': FAKE_SERIAL_NUMBER[0], 'operating-mode': 'proxy', } GET_ASUP_RETURN = { 'model': FAKE_CONTROLLERS[0]['modelName'], 'serial_numbers': FAKE_SERIAL_NUMBERS, 'firmware_version': FAKE_ASUP_DATA['system-version'], 'chassis_sn': FAKE_ASUP_DATA['chassis-serial-number'], } FAKE_POST_INVOKE_DATA = ('POST', '/key-values/%s' % FAKE_KEY, json.dumps(FAKE_ASUP_DATA)) VOLUME_COPY_JOB = { "status": "complete", "cloneCopy": True, "pgRef": "3300000060080E500023C73400000ACA52D29454", "volcopyHandle": 49160, "idleTargetWriteProt": True, "copyPriority": "priority2", "volcopyRef": "1800000060080E500023C73400000ACF52D29466", "worldWideName": "60080E500023C73400000ACF52D29466", "copyCompleteTime": "0", "sourceVolume": "3500000060080E500023C73400000ACE52D29462", "currentManager": "070000000000000000000002", "copyStartTime": "1389551671", "reserved1": "00000000", "targetVolume": "0200000060080E500023C73400000A8C52D10675", } FAKE_ENDPOINT_HTTP = 'http://host:80/endpoint' FAKE_ENDPOINT_HTTPS = 'https://host:8443/endpoint' FAKE_INVOC_MSG = 'success' FAKE_CLIENT_PARAMS = { 'scheme': 'http', 'host': '127.0.0.1', 'port': 8080, 'service_path': '/devmgr/vn', 'username': 'rw', 'password': 'rw', } FAKE_CONSISTENCY_GROUP = { 'cgRef': '2A000000600A0980006077F8008702F45480F41A', 'label': '5BO5GPO4PFGRPMQWEXGTILSAUI', 'repFullPolicy': 'failbasewrites', 'fullWarnThreshold': 75, 'autoDeleteLimit': 0, 'rollbackPriority': 'medium', 'uniqueSequenceNumber': [8940, 8941, 8942], 'creationPendingStatus': 'none', 'name': '5BO5GPO4PFGRPMQWEXGTILSAUI', 'id': '2A000000600A0980006077F8008702F45480F41A' } FAKE_CONSISTENCY_GROUP_MEMBER = { 'consistencyGroupId': '2A000000600A0980006077F8008702F45480F41A', 'volumeId': '02000000600A0980006077F8000002F55480F421', 'volumeWwn': '600A0980006077F8000002F55480F421', 'baseVolumeName': 'I5BHHNILUJGZHEUD4S36GCOQYA', 'clusterSize': 65536, 'totalRepositoryVolumes': 1, 'totalRepositoryCapacity': '4294967296', 'usedRepositoryCapacity': '5636096', 'fullWarnThreshold': 75, 'totalSnapshotImages': 3, 'totalSnapshotVolumes': 2, 'autoDeleteSnapshots': False, 'autoDeleteLimit': 0, 'pitGroupId': '33000000600A0980006077F8000002F85480F435', 'repositoryVolume': '36000000600A0980006077F8000002F75480F435' } FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME = { 'id': '2C00000060080E500034194F002C96A256BD50F9', 'name': '6TRZHKDG75DVLBC2JU5J647RME', 'cgViewRef': '2C00000060080E500034194F002C96A256BD50F9', 'groupRef': '2A00000060080E500034194F0087969856BD2D67', 'label': '6TRZHKDG75DVLBC2JU5J647RME', 'viewTime': '1455221060', 'viewSequenceNumber': '10', } def list_snapshot_groups(numGroups): snapshots = [] for n in range(0, numGroups): s = copy.deepcopy(SNAPSHOT_GROUP) s['label'] = s['label'][:-1] + str(n) snapshots.append(s) return snapshots def create_configuration_eseries(): config = conf.Configuration(None) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_eseries_opts) config.netapp_storage_protocol = 'iscsi' config.netapp_login = 'rw' config.netapp_password = 'rw' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '8080' config.netapp_storage_pools = 'DDP' config.netapp_storage_family = 'eseries' config.netapp_sa_password = 'saPass' config.netapp_controller_ips = '10.11.12.13,10.11.12.14' config.netapp_webservice_path = '/devmgr/v2' config.netapp_enable_multiattach = False return config def deepcopy_return_value_method_decorator(fn): """Returns a deepcopy of the returned value of the wrapped function.""" def decorator(*args, **kwargs): return copy.deepcopy(fn(*args, **kwargs)) return decorator def deepcopy_return_value_class_decorator(cls): """Wraps 'non-protected' methods of a class with decorator. Wraps all 'non-protected' methods of a class with the deepcopy_return_value_method_decorator decorator. """ class NewClass(cls): def __getattribute__(self, attr_name): obj = super(NewClass, self).__getattribute__(attr_name) if (hasattr(obj, '__call__') and not attr_name.startswith('_') and not isinstance(obj, mock.Mock)): return deepcopy_return_value_method_decorator(obj) return obj return NewClass @deepcopy_return_value_class_decorator class FakeEseriesClient(object): features = na_utils.Features() def __init__(self, *args, **kwargs): self.features.add_feature('AUTOSUPPORT') self.features.add_feature('SSC_API_V2') self.features.add_feature('REST_1_3_RELEASE') self.features.add_feature('REST_1_4_RELEASE') def list_storage_pools(self): return STORAGE_POOLS def register_storage_system(self, *args, **kwargs): return { 'freePoolSpace': '17055871480319', 'driveCount': 24, 'wwn': '60080E500023C73400000000515AF323', 'id': '1', 'hotSpareSizeAsString': '0', 'hostSparesUsed': 0, 'types': '', 'hostSpareCountInStandby': 0, 'status': 'optimal', 'trayCount': 1, 'usedPoolSpaceAsString': '37452115456', 'ip2': '10.63.165.216', 'ip1': '10.63.165.215', 'freePoolSpaceAsString': '17055871480319', 'hotSpareCount': 0, 'hotSpareSize': '0', 'name': 'stle2600-7_8', 'usedPoolSpace': '37452115456', 'driveTypes': ['sas'], 'unconfiguredSpaceByDriveType': {}, 'unconfiguredSpaceAsStrings': '0', 'model': '2650', 'unconfiguredSpace': '0' } def list_volume(self, volume_id): return VOLUME def list_volumes(self): return [VOLUME] def delete_volume(self, vol): pass def create_host_group(self, name): return MULTIATTACH_HOST_GROUP def get_host_group(self, ref): return MULTIATTACH_HOST_GROUP def list_host_groups(self): return [MULTIATTACH_HOST_GROUP, FOREIGN_HOST_GROUP] def get_host_group_by_name(self, name, *args, **kwargs): host_groups = self.list_host_groups() return [host_group for host_group in host_groups if host_group['label'] == name][0] def set_host_group_for_host(self, *args, **kwargs): pass def create_host_with_ports(self, *args, **kwargs): return HOST def list_hosts(self): return [HOST, HOST_2] def get_host(self, *args, **kwargs): return HOST def create_volume(self, *args, **kwargs): return VOLUME def create_volume_mapping(self, *args, **kwargs): return VOLUME_MAPPING def get_volume_mappings(self): return [VOLUME_MAPPING] def get_volume_mappings_for_volume(self, volume): return [VOLUME_MAPPING] def get_volume_mappings_for_host(self, host_ref): return [VOLUME_MAPPING] def get_volume_mappings_for_host_group(self, hg_ref): return [VOLUME_MAPPING] def delete_volume_mapping(self): return def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id): return {'lun': lun_id} def list_storage_system(self): return STORAGE_SYSTEM def list_storage_systems(self): return [STORAGE_SYSTEM] def list_snapshot_groups(self): return [SNAPSHOT_GROUP] def list_snapshot_images(self): return [SNAPSHOT_IMAGE] def list_snapshot_image(self, *args, **kwargs): return SNAPSHOT_IMAGE def create_cg_snapshot_view(self, *args, **kwargs): return SNAPSHOT_VOLUME def list_host_types(self): return [ { 'id': '4', 'code': 'AIX', 'name': 'AIX', 'index': 4 }, { 'id': '5', 'code': 'IRX', 'name': 'IRX', 'index': 5 }, { 'id': '6', 'code': 'LnxALUA', 'name': 'LnxALUA', 'index': 6 } ] def list_hardware_inventory(self): return HARDWARE_INVENTORY def get_eseries_api_info(self, verify=False): return 'Proxy', '1.53.9010.0005' def set_counter(self, key, value): pass def add_autosupport_data(self, *args): pass def get_serial_numbers(self): return FAKE_ASUP_DATA.get('controller1-serial'), FAKE_ASUP_DATA.get( 'controller2-serial') def get_model_name(self): pass def api_operating_mode(self): pass def get_firmware_version(self): return FAKE_ASUP_DATA['system-version'] def create_volume_copy_job(self, *args, **kwargs): return VOLUME_COPY_JOB def list_vol_copy_job(self, *args, **kwargs): return VOLUME_COPY_JOB def delete_vol_copy_job(self, *args, **kwargs): pass def create_snapshot_image(self, *args, **kwargs): return SNAPSHOT_IMAGE def create_snapshot_volume(self, *args, **kwargs): return SNAPSHOT_VOLUME def list_snapshot_volumes(self, *args, **kwargs): return [SNAPSHOT_VOLUME] def list_snapshot_volume(self, *args, **kwargs): return SNAPSHOT_IMAGE def create_snapshot_group(self, *args, **kwargs): return SNAPSHOT_GROUP def list_snapshot_group(self, *args, **kwargs): return SNAPSHOT_GROUP def delete_snapshot_volume(self, *args, **kwargs): pass def list_target_wwpns(self, *args, **kwargs): return [WWPN_2] def update_stored_system_password(self, *args, **kwargs): pass def update_snapshot_volume(self, *args, **kwargs): return SNAPSHOT_VOLUME def delete_snapshot_image(self, *args, **kwargs): pass def delete_snapshot_group(self, *args, **kwargs): pass def restart_snapshot_volume(self, *args, **kwargs): pass def create_consistency_group(self, *args, **kwargs): return FAKE_CONSISTENCY_GROUP def delete_consistency_group(self, *args, **kwargs): pass def list_consistency_groups(self, *args, **kwargs): return [FAKE_CONSISTENCY_GROUP] def remove_consistency_group_member(self, *args, **kwargs): pass def add_consistency_group_member(self, *args, **kwargs): pass def list_backend_store(self, key): return {} def save_backend_store(self, key, val): pass def create_consistency_group_snapshot(self, *args, **kwargs): return [SNAPSHOT_IMAGE] def get_consistency_group_snapshots(self, *args, **kwargs): return [SNAPSHOT_IMAGE] def delete_consistency_group_snapshot(self, *args, **kwargs): pass cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/0000775000567000056710000000000012701406543025742 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py0000664000567000056710000017774312701406250031455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Chuck Fouts. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp block storage library """ import copy import uuid import mock from oslo_log import versionutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LW from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils class NetAppBlockStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageLibraryTestCase, self).setUp() kwargs = {'configuration': self.get_config_base()} self.library = block_base.NetAppBlockStorageLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() def tearDown(self): super(NetAppBlockStorageLibraryTestCase, self).tearDown() def get_config_base(self): return na_fakes.create_configuration() @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage_default_multipler(self, mock_report): default = 1.2 reserved_percentage = 20.0 self.library.configuration.netapp_size_multiplier = default self.library.configuration.reserved_percentage = reserved_percentage result = self.library._get_reserved_percentage() self.assertEqual(reserved_percentage, result) self.assertFalse(mock_report.called) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage(self, mock_report): multiplier = 2.0 self.library.configuration.netapp_size_multiplier = multiplier result = self.library._get_reserved_percentage() reserved_ratio = round(1 - (1 / multiplier), 2) reserved_percentage = 100 * int(reserved_ratio) self.assertEqual(reserved_percentage, result) msg = _LW('The "netapp_size_multiplier" configuration option is ' 'deprecated and will be removed in the Mitaka release. ' 'Please set "reserved_percentage = %d" instead.') % ( result) mock_report.assert_called_once_with(block_base.LOG, msg) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'})) def test_get_pool(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertEqual('FAKE_CMODE_VOL1', pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=None)) def test_get_pool_no_metadata(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=dict())) def test_get_pool_volume_unknown(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) def test_create_volume(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', mock.Mock( return_value=fake.POOL_NAME)) self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(return_value=None)) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.create_volume(fake.VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, fake.LUN_METADATA, None) self.assertEqual(0, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def test_create_volume_no_pool(self): self.mock_object(volume_utils, 'extract_host', mock.Mock( return_value=None)) self.assertRaises(exception.InvalidHost, self.library.create_volume, fake.VOLUME) def test_create_volume_exception_path(self): self.mock_object(block_base, 'LOG') self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(return_value=None)) self.mock_object(self.library, '_create_lun', mock.Mock( side_effect=Exception)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.assertRaises(exception.VolumeBackendAPIException, self.library.create_volume, fake.VOLUME) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(1, block_base.LOG.exception.call_count) def test_create_volume_no_pool_provided_by_scheduler(self): fake_volume = copy.deepcopy(fake.VOLUME) # Set up fake volume whose 'host' field is missing pool information. fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) self.assertRaises(exception.InvalidHost, self.library.create_volume, fake_volume) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.zapi_client.map_lun.return_value = '1' lun_id = self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('1', lun_id) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, os) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_map_lun_mismatch_host_os( self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'windows' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, self.library.host_type) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) self.assertEqual(1, block_base.LOG.warning.call_count) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2') self.zapi_client.map_lun.side_effect = netapp_api.NaApiError lun_id = self.library._map_lun( 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('2', lun_id) mock_find_mapped_lun_igroup.assert_called_once_with( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_api_error(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (None, None) self.zapi_client.map_lun.side_effect = netapp_api.NaApiError self.assertRaises(netapp_api.NaApiError, self.library._map_lun, 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_unmap_lun(self, mock_find_mapped_lun_igroup): mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1) self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.IGROUP1_NAME) def test_find_mapped_lun_igroup(self): self.assertRaises(NotImplementedError, self.library._find_mapped_lun_igroup, fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) def test_has_luns_mapped_to_initiators(self): self.zapi_client.has_luns_mapped_to_initiators.return_value = True self.assertTrue(self.library._has_luns_mapped_to_initiators( fake.FC_FORMATTED_INITIATORS)) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) def test_get_or_create_igroup_preexisting(self): self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1] self.library._create_igroup_add_initiators = mock.Mock() igroup_name, host_os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual(fake.IGROUP1_NAME, igroup_name) self.assertEqual('linux', host_os) self.assertEqual('fcp', ig_type) self.zapi_client.get_igroup_by_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) self.assertEqual( 0, self.library._create_igroup_add_initiators.call_count) @mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1)) def test_get_or_create_igroup_none_preexisting(self): """This method also tests _create_igroup_add_initiators.""" self.zapi_client.get_igroup_by_initiators.return_value = [] igroup_name, os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual('openstack-' + fake.UUID1, igroup_name) self.zapi_client.create_igroup.assert_called_once_with( igroup_name, 'fcp', 'linux') self.assertEqual(len(fake.FC_FORMATTED_INITIATORS), self.zapi_client.add_igroup_initiator.call_count) self.assertEqual('linux', os) self.assertEqual('fcp', ig_type) def test_get_fc_target_wwpns(self): self.assertRaises(NotImplementedError, self.library._get_fc_target_wwpns) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc(self, mock_map_lun, mock_build_initiator_target_map): self.maxDiff = None mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) target_info = self.library.initialize_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO) mock_map_lun.assert_called_once_with( 'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc_no_wwpns( self, mock_map_lun, mock_build_initiator_target_map): mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (None, None, 0) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, fake.FC_VOLUME, fake.FC_CONNECTOR) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY) mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_no_more_luns( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators, mock_build_initiator_target_map): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = False mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_no_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = None mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map) self.assertEqual(0, num_paths) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_with_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network.\ return_value = fake.FC_FABRIC_MAP mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map) self.assertEqual(4, num_paths) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_configured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = 'windows' self.library.configuration.netapp_host_type = 'solaris' self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('windows', self.library.lun_ostype) self.assertEqual('solaris', self.library.host_type) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_unconfigured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('linux', self.library.lun_ostype) self.assertEqual('linux', self.library.host_type) def test_do_setup_space_reservation_disabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertEqual('false', self.library.lun_space_reservation) def test_do_setup_space_reservation_enabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertEqual('true', self.library.lun_space_reservation) def test_get_existing_vol_manage_missing_id_path(self): self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {}) def test_get_existing_vol_manage_not_found(self): self.zapi_client.get_lun_by_args.return_value = [] self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'source-id': 'src_id', 'source-name': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', mock.Mock(return_value=block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'src_id'}))) def test_get_existing_vol_manage_lun(self): self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] lun = self.library._get_existing_vol_with_manage_ref( {'source-id': 'src_id', 'path': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) self.library._extract_lun_info.assert_called_once_with('lun0') self.assertEqual('lun0', lun.name) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_existing_vol_with_manage_ref', mock.Mock(return_value=block_base.NetAppLun( 'handle', 'name', '1073742824', {}))) def test_manage_existing_get_size(self): size = self.library.manage_existing_get_size( {'id': 'vol_id'}, {'ref': 'ref'}) self.assertEqual(2, size) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) @mock.patch.object(block_base.LOG, 'info') def test_unmanage(self, log): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': 'p', 'UUID': 'uuid'}) self.library._get_lun_from_table = mock.Mock(return_value=mock_lun) self.library.unmanage({'name': 'vol'}) self.library._get_lun_from_table.assert_called_once_with('vol') self.assertEqual(1, log.call_count) def test_check_vol_type_for_lun(self): self.assertRaises(NotImplementedError, self.library._check_volume_type_for_lun, 'vol', 'lun', 'existing_ref', {}) def test_is_lun_valid_on_storage(self): self.assertTrue(self.library._is_lun_valid_on_storage('lun')) def test_initialize_connection_iscsi(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', mock.Mock(return_value=fake.ISCSI_LUN['lun_id'])) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', mock.Mock(return_value=target_details_list[1])) self.zapi_client.get_iscsi_service_details.return_value = ( fake.ISCSI_SERVICE_IQN) self.mock_object( na_utils, 'get_iscsi_connection_properties', mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES)) target_info = self.library.initialize_connection_iscsi(volume, connector) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_method'], target_info['data']['auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_password'], target_info['data']['auth_password']) self.assertTrue('auth_password' in target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['discovery_auth_method'], target_info['data']['discovery_auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_password'], target_info['data']['discovery_auth_password']) self.assertTrue('auth_password' in target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_username'], target_info['data']['discovery_auth_username']) self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ .assert_called_once_with( target_details_list) self.zapi_client.get_iscsi_service_details.assert_called_once_with() def test_initialize_connection_iscsi_no_target_list(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', mock.Mock(return_value=fake.ISCSI_LUN['lun_id'])) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list') self.mock_object( na_utils, 'get_iscsi_connection_properties', mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES)) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual( 0, block_base.NetAppBlockStorageLibrary ._get_preferred_target_from_list.call_count) self.assertEqual( 0, self.zapi_client.get_iscsi_service_details.call_count) self.assertEqual( 0, na_utils.get_iscsi_connection_properties.call_count) def test_initialize_connection_iscsi_no_preferred_target(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', mock.Mock(return_value=fake.ISCSI_LUN['lun_id'])) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', mock.Mock(return_value=None)) self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual(0, self.zapi_client .get_iscsi_service_details.call_count) self.assertEqual(0, na_utils.get_iscsi_connection_properties .call_count) def test_initialize_connection_iscsi_no_iscsi_service_details(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', mock.Mock(return_value=fake.ISCSI_LUN['lun_id'])) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', mock.Mock(return_value=target_details_list[1])) self.zapi_client.get_iscsi_service_details.return_value = None self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ .assert_called_once_with(target_details_list) def test_get_target_details_list(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[0], result) def test_get_preferred_target_from_empty_list(self): target_details_list = [] result = self.library._get_preferred_target_from_list( target_details_list) self.assertIsNone(result) def test_get_preferred_target_from_list_with_one_interface_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) target_details_list[0]['interface-enabled'] = 'false' result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[1], result) def test_get_preferred_target_from_list_with_all_interfaces_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) for target in target_details_list: target['interface-enabled'] = 'false' result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[0], result) def test_get_preferred_target_from_list_with_filter(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST filter = [target_detail['address'] for target_detail in target_details_list[1:]] result = self.library._get_preferred_target_from_list( target_details_list, filter) self.assertEqual(target_details_list[1], result) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_lun_os(self): self.library.configuration.netapp_lun_ostype = 'unknown' self.library.do_setup(mock.Mock()) self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) msg = _("Invalid value for NetApp configuration" " option netapp_lun_ostype.") block_base.LOG.error.assert_called_once_with(msg) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_host_type(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'future_os' self.library.do_setup(mock.Mock()) self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) msg = _("Invalid value for NetApp configuration" " option netapp_host_type.") block_base.LOG.error.assert_called_once_with(msg) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_both_config(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'linux' self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_no_os_host(self): self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) def test_delete_volume(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_volume(fake.VOLUME) mock_delete_lun.assert_called_once_with(fake.LUN_NAME) def test_delete_lun(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() self.library.lun_table = fake.LUN_TABLE self.library._delete_lun(fake.LUN_NAME) mock_get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) def test_delete_lun_no_metadata(self): self.mock_object(self.library, '_get_lun_attr', mock.Mock( return_value=None)) self.library.zapi_client = mock.Mock() self.mock_object(self.library, 'zapi_client') self.library._delete_lun(fake.LUN_NAME) self.library._get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count) self.assertEqual(0, self.zapi_client. mark_qos_policy_group_for_deletion.call_count) def test_delete_snapshot(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_snapshot(fake.SNAPSHOT) mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME) def test_clone_source_to_destination(self): self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock( return_value=fake.EXTRA_SPECS)) self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock( return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume') self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.lun_space_reservation = 'false' self.library._clone_source_to_destination(fake.CLONE_SOURCE, fake.CLONE_DESTINATION) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='false', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(0, self.library.delete_volume.call_count) self.assertEqual(0, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_clone_source_to_destination_exception_path(self): self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock( return_value=fake.EXTRA_SPECS)) self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock( return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume', mock.Mock( side_effect=Exception)) self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.lun_space_reservation = 'true' self.assertRaises(exception.VolumeBackendAPIException, self.library._clone_source_to_destination, fake.CLONE_SOURCE, fake.CLONE_DESTINATION) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='true', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(1, self.library.delete_volume.call_count) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_create_lun(self): self.assertRaises(NotImplementedError, self.library._create_lun, fake.VOLUME_ID, fake.LUN_ID, fake.SIZE, fake.LUN_METADATA) def test_clone_lun(self): self.assertRaises(NotImplementedError, self.library._clone_lun, fake.VOLUME_ID, 'new-' + fake.VOLUME_ID) def test_create_volume_from_snapshot(self): mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake.SNAPSHOT['name'], 'size': fake.SNAPSHOT['volume_size'] } self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_create_cloned_volume(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object(self.library, '_get_lun_from_table') mock_get_lun_from_table.return_value = fake_lun mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake_lun.name, 'size': fake.VOLUME_REF['size'] } self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_extend_volume(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', mock.Mock(return_value=fake.EXTRA_SPECS)) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO)) mock_extend_volume = self.mock_object(self.library, '_extend_volume') self.library.extend_volume(fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS) mock_extend_volume.assert_called_once_with(fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) def test_extend_volume_api_error(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', mock.Mock(return_value=fake.EXTRA_SPECS)) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO)) mock_extend_volume = self.mock_object( self.library, '_extend_volume', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises(netapp_api.NaApiError, self.library.extend_volume, fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_has_calls([ mock.call(volume_copy, fake.EXTRA_SPECS), mock.call(fake.VOLUME, fake.EXTRA_SPECS)]) mock_extend_volume.assert_called_once_with( fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) def test__extend_volume_direct(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 2 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', mock.Mock(return_value=fake_lun_geometry)) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], six.text_type(new_size_bytes)) self.assertFalse(mock_do_sub_clone_resize.called) self.assertEqual(six.text_type(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) def test__extend_volume_clone(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 20 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', mock.Mock(return_value=fake_lun_geometry)) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) self.assertFalse(mock_do_direct_resize.called) mock_do_sub_clone_resize.assert_called_once_with( fake.LUN_METADATA['Path'], six.text_type(new_size_bytes), qos_policy_group_name='fake_qos_policy') self.assertEqual(six.text_type(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) def test__extend_volume_no_change(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', mock.Mock(return_value=fake_lun_geometry)) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake_volume['name']: fake_lun} self.library._extend_volume(fake_volume, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake_volume['name']) self.assertFalse(mock_get_lun_geometry.called) self.assertFalse(mock_do_direct_resize.called) self.assertFalse(mock_do_sub_clone_resize.called) def test_do_sub_clone_resize(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', mock.Mock(return_value='off')) mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', mock.Mock(return_value=block_count)) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.library._do_sub_clone_resize(fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) mock_post_sub_clone_resize.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_compression_on(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', mock.Mock(return_value='on')) mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', mock.Mock(return_value=block_count)) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') self.assertFalse(mock_get_lun_block_count.called) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_no_blocks(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = 0 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', mock.Mock(return_value='off')) mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', mock.Mock(return_value=block_count)) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_create_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', mock.Mock(return_value='off')) mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', mock.Mock(return_value=block_count)) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun', mock.Mock(side_effect=netapp_api.NaApiError)) mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_clone_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME new_lun_path = '/vol/vol0/%s' % new_lun_name block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', mock.Mock(return_value=fake_lun)) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', mock.Mock(return_value='off')) mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', mock.Mock(return_value=block_count)) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object( self.library, '_clone_lun', mock.Mock(side_effect=netapp_api.NaApiError)) mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) self.assertFalse(mock_post_sub_clone_resize.called) mock_destroy_lun.assert_called_once_with(new_lun_path) def test_configure_chap_generate_username_and_password(self): """Ensure that a CHAP username and password are generated.""" initiator_name = fake.ISCSI_CONNECTOR['initiator'] username, password = self.library._configure_chap(initiator_name) self.assertEqual(na_utils.DEFAULT_CHAP_USER_NAME, username) self.assertIsNotNone(password) self.assertEqual(len(password), na_utils.CHAP_SECRET_LENGTH) def test_add_chap_properties(self): """Ensure that CHAP properties are added to the properties dictionary """ properties = {'data': {}} self.library._add_chap_properties(properties, 'user1', 'pass1') data = properties['data'] self.assertEqual('CHAP', data['auth_method']) self.assertEqual('user1', data['auth_username']) self.assertEqual('pass1', data['auth_password']) self.assertEqual('CHAP', data['discovery_auth_method']) self.assertEqual('user1', data['discovery_auth_username']) self.assertEqual('pass1', data['discovery_auth_password']) def test_create_cgsnapshot(self): snapshot = fake.CG_SNAPSHOT snapshot['volume'] = fake.CG_VOLUME mock_extract_host = self.mock_object( volume_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_busy = self.mock_object(self.library, '_handle_busy_snapshot') self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot]) mock_extract_host.assert_called_once_with(fake.CG_VOLUME['host'], level='pool') self.zapi_client.create_cg_snapshot.assert_called_once_with( set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID) mock_clone_lun.assert_called_once_with( fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME, source_snapshot=fake.CG_SNAPSHOT_ID) mock_busy.assert_called_once_with(fake.POOL_NAME, fake.CG_SNAPSHOT_ID) def test_delete_cgsnapshot(self): mock_delete_snapshot = self.mock_object( self.library, '_delete_lun') self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT]) mock_delete_snapshot.assert_called_once_with(fake.CG_SNAPSHOT['name']) def test_delete_cgsnapshot_not_found(self): self.mock_object(block_base, 'LOG') self.mock_object(self.library, '_get_lun_attr', mock.Mock(return_value=None)) self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT]) self.assertEqual(0, block_base.LOG.error.call_count) self.assertEqual(1, block_base.LOG.warning.call_count) self.assertEqual(0, block_base.LOG.info.call_count) def test_create_volume_with_cg(self): volume_size_in_bytes = int(fake.CG_VOLUME_SIZE) * units.Gi self._create_volume_test_helper() self.library.create_volume(fake.CG_VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.CG_VOLUME_NAME, volume_size_in_bytes, fake.CG_LUN_METADATA, None) self.assertEqual(0, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def _create_volume_test_helper(self): self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(return_value=None)) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') def test_create_consistency_group(self): model_update = self.library.create_consistencygroup( fake.CONSISTENCY_GROUP) self.assertEqual('available', model_update['status']) def test_delete_consistencygroup_volume_delete_failure(self): self.mock_object(block_base, 'LOG') self.mock_object(self.library, '_delete_lun', mock.Mock(side_effect=Exception)) model_update, volumes = self.library.delete_consistencygroup( fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) self.assertEqual('deleted', model_update['status']) self.assertEqual('error_deleting', volumes[0]['status']) self.assertEqual(1, block_base.LOG.exception.call_count) def test_delete_consistencygroup_not_found(self): self.mock_object(block_base, 'LOG') self.mock_object(self.library, '_get_lun_attr', mock.Mock(return_value=None)) model_update, volumes = self.library.delete_consistencygroup( fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) self.assertEqual(0, block_base.LOG.error.call_count) self.assertEqual(1, block_base.LOG.warning.call_count) self.assertEqual(0, block_base.LOG.info.call_count) self.assertEqual('deleted', model_update['status']) self.assertEqual('deleted', volumes[0]['status']) def test_create_consistencygroup_from_src_cg_snapshot(self): mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination') self.library.create_consistencygroup_from_src( fake.CONSISTENCY_GROUP, [fake.VOLUME], cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.CG_VOLUME_SNAPSHOT]) clone_source_to_destination_args = { 'name': fake.CG_SNAPSHOT['name'], 'size': fake.CG_SNAPSHOT['volume_size'], } mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) def test_create_consistencygroup_from_src_cg(self): class fake_lun_name(object): pass fake_lun_name_instance = fake_lun_name() fake_lun_name_instance.name = fake.SOURCE_CG_VOLUME['name'] self.mock_object(self.library, '_get_lun_from_table', mock.Mock( return_value=fake_lun_name_instance) ) mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination') self.library.create_consistencygroup_from_src( fake.CONSISTENCY_GROUP, [fake.VOLUME], source_cg=fake.SOURCE_CONSISTENCY_GROUP, source_vols=[fake.SOURCE_CG_VOLUME]) clone_source_to_destination_args = { 'name': fake.SOURCE_CG_VOLUME['name'], 'size': fake.SOURCE_CG_VOLUME['size'], } mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) def test_handle_busy_snapshot(self): self.mock_object(block_base, 'LOG') mock_get_snapshot = self.mock_object( self.zapi_client, 'get_snapshot', mock.Mock(return_value=fake.SNAPSHOT) ) self.library._handle_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME) self.assertEqual(1, block_base.LOG.info.call_count) mock_get_snapshot.assert_called_once_with(fake.FLEXVOL, fake.SNAPSHOT_NAME) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/0000775000567000056710000000000012701406543030243 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py0000664000567000056710000000000012701406250032335 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py0000664000567000056710000004633312701406250033763 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode @ddt.ddt class PerformanceCmodeLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceCmodeLibraryTestCase, self).setUp() with mock.patch.object(perf_cmode.PerformanceCmodeLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') self._set_up_fake_pools() def _set_up_fake_pools(self): class test_volume(object): self.id = None self.aggr = None volume1 = test_volume() volume1.id = {'name': 'pool1'} volume1.aggr = {'name': 'aggr1'} volume2 = test_volume() volume2.id = {'name': 'pool2'} volume2.aggr = {'name': 'aggr2'} volume3 = test_volume() volume3.id = {'name': 'pool3'} volume3.aggr = {'name': 'aggr2'} self.fake_volumes = [volume1, volume2, volume3] self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3']) self.fake_nodes = set(['node1', 'node2']) self.fake_aggr_node_map = { 'aggr1': 'node1', 'aggr2': 'node2', 'aggr3': 'node2', } def test_init_counter_info_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name') self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) self.assertFalse(mock_get_base_counter_name.called) @ddt.data({ 'system_constituent': False, 'base_counter': 'cpu_elapsed_time1', }, { 'system_constituent': True, 'base_counter': 'cpu_elapsed_time', }) @ddt.unpack def test_init_counter_info_api_error(self, system_constituent, base_counter): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = ( system_constituent) self.mock_object(self.perf_library, '_get_base_counter_name', mock.Mock(side_effect=netapp_api.NaApiError)) self.perf_library._init_counter_info() self.assertEqual( base_counter, self.perf_library.avg_processor_busy_base_counter_name) def test_init_counter_info_system(self): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(return_value='cpu_elapsed_time1')) self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_init_counter_info_system_constituent(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(return_value='cpu_elapsed_time')) self.perf_library._init_counter_info() self.assertEqual('system:constituent', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system:constituent', 'avg_processor_busy') def test_update_performance_cache(self): self.perf_library.performance_counters = { 'node1': list(range(11, 21)), 'node2': list(range(21, 31)), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggrs)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[21, 31])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': list(range(12, 22)), 'node2': list(range(22, 32)), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) mock_get_node_utilization.assert_has_calls([ mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')]) def test_update_performance_cache_first_pass(self): mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggrs)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[11, 21])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {'node1': [11], 'node2': [21]} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_unknown_nodes(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggrs)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(set(), {}))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[11, 21])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) self.assertFalse(mock_get_node_utilization_counters.called) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_counters_unavailable(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggrs)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[None, None])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools') self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) self.assertFalse(mock_get_aggregates_for_pools.called) @ddt.data({'pool': 'pool1', 'expected': 10.0}, {'pool': 'pool3', 'expected': perf_base.DEFAULT_UTILIZATION}) @ddt.unpack def test_get_node_utilization_for_pool(self, pool, expected): self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0} result = self.perf_library.get_node_utilization_for_pool(pool) self.assertAlmostEqual(expected, result) def test_get_aggregates_for_pools(self): class test_volume(object): self.aggr = None volume1 = test_volume() volume1.aggr = {'name': 'aggr1'} volume2 = test_volume() volume2.aggr = {'name': 'aggr2'} volume3 = test_volume() volume3.aggr = {'name': 'aggr2'} volumes = [volume1, volume2, volume3] result = self.perf_library._get_aggregates_for_pools(volumes) expected_aggregate_names = set(['aggr1', 'aggr2']) self.assertEqual(expected_aggregate_names, result) def test_get_nodes_for_aggregates(self): aggregate_names = ['aggr1', 'aggr2', 'aggr3'] aggregate_nodes = ['node1', 'node2', 'node2'] mock_get_node_for_aggregate = self.mock_object( self.zapi_client, 'get_node_for_aggregate', mock.Mock(side_effect=aggregate_nodes)) result = self.perf_library._get_nodes_for_aggregates(aggregate_names) self.assertEqual(2, len(result)) result_node_names, result_aggr_node_map = result expected_node_names = set(['node1', 'node2']) expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes)) self.assertEqual(expected_node_names, result_node_names) self.assertEqual(expected_aggr_node_map, result_aggr_node_map) mock_get_node_for_aggregate.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')]) def test_get_node_utilization_counters(self): mock_get_node_utilization_system_counters = self.mock_object( self.perf_library, '_get_node_utilization_system_counters', mock.Mock(return_value=['A', 'B', 'C'])) mock_get_node_utilization_wafl_counters = self.mock_object( self.perf_library, '_get_node_utilization_wafl_counters', mock.Mock(return_value=['D', 'E', 'F'])) mock_get_node_utilization_processor_counters = self.mock_object( self.perf_library, '_get_node_utilization_processor_counters', mock.Mock(return_value=['G', 'H', 'I'])) result = self.perf_library._get_node_utilization_counters(fake.NODE) expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] self.assertEqual(expected, result) mock_get_node_utilization_system_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_wafl_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_processor_counters.assert_called_once_with( fake.NODE) def test_get_node_utilization_counters_api_error(self): self.mock_object(self.perf_library, '_get_node_utilization_system_counters', mock.Mock(side_effect=netapp_api.NaApiError)) result = self.perf_library._get_node_utilization_counters(fake.NODE) self.assertIsNone(result) def test_get_node_utilization_system_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.SYSTEM_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.SYSTEM_COUNTERS)) result = self.perf_library._get_node_utilization_system_counters( fake.NODE) self.assertEqual(fake.SYSTEM_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'system', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'system', fake.SYSTEM_INSTANCE_UUIDS, ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) def test_get_node_utilization_wafl_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.WAFL_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.WAFL_COUNTERS)) mock_get_performance_counter_info = self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO)) result = self.perf_library._get_node_utilization_wafl_counters( fake.NODE) self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'wafl', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'wafl', fake.WAFL_INSTANCE_UUIDS, ['total_cp_msecs', 'cp_phase_times']) mock_get_performance_counter_info.assert_called_once_with( 'wafl', 'cp_phase_times') def test_get_node_utilization_processor_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.PROCESSOR_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.PROCESSOR_COUNTERS)) self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO)) result = self.perf_library._get_node_utilization_processor_counters( fake.NODE) self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'processor', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'processor', fake.PROCESSOR_INSTANCE_UUIDS, ['domain_busy', 'processor_elapsed_time']) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py0000664000567000056710000002501112701406250033675 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp.dataontap.performance import perf_base @ddt.ddt class Performance7modeLibraryTestCase(test.TestCase): def setUp(self): super(Performance7modeLibraryTestCase, self).setUp() with mock.patch.object(perf_7mode.Performance7modeLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.zapi_client.get_system_name.return_value = fake.NODE self.perf_library = perf_7mode.Performance7modeLibrary( self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') def test_init_counter_info_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name') self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) self.assertFalse(mock_get_base_counter_name.called) def test_init_counter_info_api_error(self): self.zapi_client.features.SYSTEM_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(side_effect=netapp_api.NaApiError)) self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_init_counter_info_system(self): self.zapi_client.features.SYSTEM_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(return_value='cpu_elapsed_time1')) self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_update_performance_cache(self): self.perf_library.performance_counters = list(range(11, 21)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(return_value=21)) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(return_value=25)) self.perf_library.update_performance_cache() self.assertEqual(list(range(12, 22)), self.perf_library.performance_counters) self.assertEqual(25, self.perf_library.utilization) mock_get_node_utilization_counters.assert_called_once_with() mock_get_node_utilization.assert_called_once_with(12, 21, fake.NODE) def test_update_performance_cache_first_pass(self): mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(return_value=11)) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(return_value=25)) self.perf_library.update_performance_cache() self.assertEqual([11], self.perf_library.performance_counters) mock_get_node_utilization_counters.assert_called_once_with() self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_counters_unavailable(self): self.perf_library.performance_counters = list(range(11, 21)) self.perf_library.utilization = 55.0 mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(return_value=None)) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(return_value=25)) self.perf_library.update_performance_cache() self.assertEqual(list(range(11, 21)), self.perf_library.performance_counters) self.assertEqual(55.0, self.perf_library.utilization) mock_get_node_utilization_counters.assert_called_once_with() self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters') self.perf_library.update_performance_cache() self.assertEqual([], self.perf_library.performance_counters) self.assertEqual(perf_base.DEFAULT_UTILIZATION, self.perf_library.utilization) self.assertFalse(mock_get_node_utilization_counters.called) def test_get_node_utilization(self): self.perf_library.utilization = 47.1 result = self.perf_library.get_node_utilization() self.assertEqual(47.1, result) def test_get_node_utilization_counters(self): mock_get_node_utilization_system_counters = self.mock_object( self.perf_library, '_get_node_utilization_system_counters', mock.Mock(return_value=['A', 'B', 'C'])) mock_get_node_utilization_wafl_counters = self.mock_object( self.perf_library, '_get_node_utilization_wafl_counters', mock.Mock(return_value=['D', 'E', 'F'])) mock_get_node_utilization_processor_counters = self.mock_object( self.perf_library, '_get_node_utilization_processor_counters', mock.Mock(return_value=['G', 'H', 'I'])) result = self.perf_library._get_node_utilization_counters() expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] self.assertEqual(expected, result) mock_get_node_utilization_system_counters.assert_called_once_with() mock_get_node_utilization_wafl_counters.assert_called_once_with() mock_get_node_utilization_processor_counters.assert_called_once_with() def test_get_node_utilization_counters_api_error(self): self.mock_object(self.perf_library, '_get_node_utilization_system_counters', mock.Mock(side_effect=netapp_api.NaApiError)) result = self.perf_library._get_node_utilization_counters() self.assertIsNone(result) def test_get_node_utilization_system_counters(self): mock_get_performance_instance_names = self.mock_object( self.zapi_client, 'get_performance_instance_names', mock.Mock(return_value=fake.SYSTEM_INSTANCE_NAMES)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.SYSTEM_COUNTERS)) result = self.perf_library._get_node_utilization_system_counters() self.assertEqual(fake.SYSTEM_COUNTERS, result) mock_get_performance_instance_names.assert_called_once_with('system') mock_get_performance_counters.assert_called_once_with( 'system', fake.SYSTEM_INSTANCE_NAMES, ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) def test_get_node_utilization_wafl_counters(self): mock_get_performance_instance_names = self.mock_object( self.zapi_client, 'get_performance_instance_names', mock.Mock(return_value=fake.WAFL_INSTANCE_NAMES)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.WAFL_COUNTERS)) mock_get_performance_counter_info = self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO)) result = self.perf_library._get_node_utilization_wafl_counters() self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) mock_get_performance_instance_names.assert_called_once_with('wafl') mock_get_performance_counters.assert_called_once_with( 'wafl', fake.WAFL_INSTANCE_NAMES, ['total_cp_msecs', 'cp_phase_times']) mock_get_performance_counter_info.assert_called_once_with( 'wafl', 'cp_phase_times') def test_get_node_utilization_processor_counters(self): mock_get_performance_instance_names = self.mock_object( self.zapi_client, 'get_performance_instance_names', mock.Mock(return_value=fake.PROCESSOR_INSTANCE_NAMES)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.PROCESSOR_COUNTERS)) self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO)) result = self.perf_library._get_node_utilization_processor_counters() self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) mock_get_performance_instance_names.assert_called_once_with( 'processor') mock_get_performance_counters.assert_called_once_with( 'processor', fake.PROCESSOR_INSTANCE_NAMES, ['domain_busy', 'processor_elapsed_time']) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py0000664000567000056710000005331312701406250031706 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NODE = 'cluster1-01' COUNTERS_T1 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29078861388', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373' ',6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21603833,0,0,3286,11075940,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573776', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11075940', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21603833', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33309624', 'instance-name': 'wafl', 'timestamp': '1453573776', }, { 'domain_busy:kahuna': '2712467226', 'timestamp': '1453573777', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6460782', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722094140', 'domain_busy:storage': '2253156562', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51275254', 'domain_busy:wafl_exempt': '1243553699', 'domain_busy:protocol': '54', 'domain_busy': '1028851855595,2712467226,2253156562,5688808118,' '722094140,28,6460782,59,434036,1243553699,51275254,' '61237441,34,54,11,20,5254181873,13656398235,452215', 'domain_busy:nwk_legacy': '5254181873', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5688808118', 'domain_busy:hostos': '13656398235', 'domain_busy:sm_exempt': '61237441', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028851855595', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063283843318', 'instance-name': 'processor0', 'timestamp': '1453573777', }, { 'domain_busy:kahuna': '1978024846', 'timestamp': '1453573777', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3330956', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722235930', 'domain_busy:storage': '1498890708', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50122685', 'domain_busy:wafl_exempt': '1265921369', 'domain_busy:protocol': '0', 'domain_busy': '1039557880852,1978024846,1498890708,3734060289,' '722235930,0,3330956,0,318584,1265921369,50122685,' '36417362,0,0,0,0,2815252976,10274810484,393451', 'domain_busy:nwk_legacy': '2815252976', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734060289', 'domain_busy:hostos': '10274810484', 'domain_busy:sm_exempt': '36417362', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039557880852', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063283843321', 'instance-name': 'processor1', 'timestamp': '1453573777', } ] COUNTERS_T2 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29081228905', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373,' '6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21604863,0,0,3286,11076392,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573834', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11076392', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21604863', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33311106', 'instance-name': 'wafl', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '2712629374', 'timestamp': '1453573834', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6461082', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722136824', 'domain_busy:storage': '2253260824', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51277506', 'domain_busy:wafl_exempt': '1243637154', 'domain_busy:protocol': '54', 'domain_busy': '1028906640232,2712629374,2253260824,5689093500,' '722136824,28,6461082,59,434036,1243637154,51277506,' '61240335,34,54,11,20,5254491236,13657992139,452215', 'domain_busy:nwk_legacy': '5254491236', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5689093500', 'domain_busy:hostos': '13657992139', 'domain_busy:sm_exempt': '61240335', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028906640232', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063341351916', 'instance-name': 'processor0', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '1978217049', 'timestamp': '1453573834', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3331147', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722276805', 'domain_busy:storage': '1498984059', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50126176', 'domain_busy:wafl_exempt': '1266039846', 'domain_busy:protocol': '0', 'domain_busy': '1039613222253,1978217049,1498984059,3734279672,' '722276805,0,3331147,0,318584,1266039846,50126176,' '36419297,0,0,0,0,2815435865,10276068104,393451', 'domain_busy:nwk_legacy': '2815435865', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734279672', 'domain_busy:hostos': '10276068104', 'domain_busy:sm_exempt': '36419297', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039613222253', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063341351919', 'instance-name': 'processor1', 'timestamp': '1453573834', }, ] SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system'] SYSTEM_INSTANCE_NAMES = ['system'] SYSTEM_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '27877641199', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, ] WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl'] WAFL_INSTANCE_NAMES = ['wafl'] WAFL_COUNTERS = [ { 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'instance-name': 'wafl', 'timestamp': '1453523339', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] WAFL_CP_PHASE_TIMES_COUNTER_INFO = { 'labels': [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ], 'name': 'cp_phase_times', } EXPANDED_WAFL_COUNTERS = [ { 'cp_phase_times:p2a_snap': '696', 'cp_phase_times:p4_finish': '13982', 'cp_phase_times:setup': '563', 'cp_phase_times:p2a_dlog1': '5954', 'cp_phase_times:p2a_dlog2': '2236', 'cp_phase_times:p2v_cont': '2380', 'cp_phase_times:p2v_volinfo': '1101', 'cp_phase_times:p2v_bm': '3344', 'cp_phase_times:p2v_fsinfo': '1937', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'cp_phase_times:p2v_dlog2': '359', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '5838', 'cp_phase_times:p1_quota': '469', 'cp_phase_times:p2v_inofile': '821', 'cp_phase_times:p2a_refcount': '476', 'cp_phase_times:p2a_fsinfo': '2190', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '515588', 'cp_phase_times:pre_p0': '4844', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1731', 'cp_phase_times:p2a_ino': '1318', 'cp_phase_times:p2v_df_scores_sub': '344', 'cp_phase_times:p2v_ino_pub': '763', 'cp_phase_times:p2a_ipu_bitmap_grow': '228', 'cp_phase_times:p2v_refcount': '418', 'timestamp': '1453523339', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9676', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '10567367', 'cp_phase_times:p2a_topaa': '1221', 'cp_phase_times:p2_flush': '20542954', 'cp_phase_times:p2v_df_scores': '1397', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '4867', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1282', 'cp_phase_times:p3v_volinfo': '3122', 'cp_phase_times:p2v_topaa': '1048', 'cp_phase_times:p3_finish': '20696', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] PROCESSOR_INSTANCE_UUIDS = [ 'cluster1-01:kernel:processor0', 'cluster1-01:kernel:processor1', ] PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1'] PROCESSOR_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'instance-name': 'processor1', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = { 'labels': [ 'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt', 'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner', 'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt', 'nwk_legacy', 'hostOS', 'ssan_exempt', ], 'name': 'domain_busy', } EXPANDED_PROCESSOR_COUNTERS = [ { 'domain_busy:kahuna': '2597164534', 'timestamp': '1453524150', 'domain_busy:cifs': '413895', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6180773', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '690280568', 'domain_busy:storage': '2155400686', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '48989575', 'domain_busy:wafl_exempt': '1190100947', 'domain_busy:protocol': '54', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'domain_busy:nwk_legacy': '5024141791', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5443901498', 'domain_busy:hostos': '13136260754', 'domain_busy:sm_exempt': '58549809', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '980648687811', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'domain_busy:kahuna': '1891766637', 'timestamp': '1453524150', 'domain_busy:cifs': '305947', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3188648', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '691372324', 'domain_busy:storage': '1433411516', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '47954620', 'domain_busy:wafl_exempt': '1211235777', 'domain_busy:protocol': '0', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'domain_busy:nwk_legacy': '2692084482', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3572427934', 'domain_busy:hostos': '9834648927', 'domain_busy:sm_exempt': '34832715', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '990957980543', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py0000664000567000056710000003607412701406250033607 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.performance import perf_base @ddt.ddt class PerformanceLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceLibraryTestCase, self).setUp() with mock.patch.object(perf_base.PerformanceLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_base.PerformanceLibrary(self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') def test_init(self): mock_zapi_client = mock.Mock() mock_init_counter_info = self.mock_object( perf_base.PerformanceLibrary, '_init_counter_info') library = perf_base.PerformanceLibrary(mock_zapi_client) self.assertEqual(mock_zapi_client, library.zapi_client) mock_init_counter_info.assert_called_once_with() def test_init_counter_info(self): self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) def test_get_node_utilization_kahuna_overutilized(self): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=61.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertAlmostEqual(100.0, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') self.assertFalse(mock_get_average_cpu_utilization.called) @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0}, {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000}, {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0}) @ddt.unpack def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=cp_time)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=cp_time)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=poll_time)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time') result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') expected = max(min(100.0, 100.0 * cpu), 0) self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') self.assertFalse(mock_get_adjusted_consistency_point_time.called) @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80}, {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80}, {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100}) @ddt.unpack def test_get_node_utilization(self, cpu, adjusted_cp_time, expected): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(return_value=adjusted_cp_time)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') mock_get_adjusted_consistency_point_time.assert_called_once_with( 90.0, 50.0) def test_get_node_utilization_calculation_error(self): self.mock_object(self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) self.mock_object(self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) self.mock_object(self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) self.mock_object(self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) self.mock_object(self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) self.mock_object(self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(side_effect=ZeroDivisionError)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(perf_base.DEFAULT_UTILIZATION, result) def test_get_kahuna_utilization(self): mock_get_performance_counter = self.mock_object( self.perf_library, '_get_performance_counter_average_multi_instance', mock.Mock(return_value=[0.2, 0.3])) result = self.perf_library._get_kahuna_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(50.0, result) mock_get_performance_counter.assert_called_once_with( 'fake_t1', 'fake_t2', 'domain_busy:kahuna', 'processor_elapsed_time') def test_get_average_cpu_utilization(self): mock_get_performance_counter_average = self.mock_object( self.perf_library, '_get_performance_counter_average', mock.Mock(return_value=0.45)) result = self.perf_library._get_average_cpu_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(0.45, result) mock_get_performance_counter_average.assert_called_once_with( 'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1') def test_get_total_consistency_point_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_total_consistency_point_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'total_cp_msecs') def test_get_consistency_point_p2_flush_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_consistency_point_p2_flush_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush') def test_get_total_time(self): mock_find_performance_counter_timestamp = self.mock_object( self.perf_library, '_find_performance_counter_timestamp', mock.Mock(side_effect=[100, 105])) result = self.perf_library._get_total_time('fake_t1', 'fake_t2', 'fake_counter') self.assertEqual(5000, result) mock_find_performance_counter_timestamp.assert_has_calls([ mock.call('fake_t1', 'fake_counter'), mock.call('fake_t2', 'fake_counter')]) def test_get_adjusted_consistency_point_time(self): result = self.perf_library._get_adjusted_consistency_point_time( 500, 200) self.assertAlmostEqual(250, result) def test_get_performance_counter_delta(self): result = self.perf_library._get_performance_counter_delta( fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs') self.assertEqual(1482, result) def test_get_performance_counter_average(self): result = self.perf_library._get_performance_counter_average( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time', 'processor0') self.assertAlmostEqual(0.00281954360981, result) def test_get_performance_counter_average_multi_instance(self): result = ( self.perf_library._get_performance_counter_average_multi_instance( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time')) expected = [0.002819543609809441, 0.0033421611147606135] self.assertAlmostEqual(expected, result) def test_find_performance_counter_value(self): result = self.perf_library._find_performance_counter_value( fake.COUNTERS_T1, 'domain_busy:kahuna', instance_name='processor0') self.assertEqual('2712467226', result) def test_find_performance_counter_value_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_value, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_find_performance_counter_timestamp(self): result = self.perf_library._find_performance_counter_timestamp( fake.COUNTERS_T1, 'domain_busy') self.assertEqual('1453573777', result) def test_find_performance_counter_timestamp_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_timestamp, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_expand_performance_array(self): counter_info = { 'labels': ['idle', 'kahuna', 'storage', 'exempt'], 'name': 'domain_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', } self.perf_library._expand_performance_array('wafl', 'domain_busy', counter) modified_counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', 'domain_busy:idle': '969142314286', 'domain_busy:kahuna': '2567571412', 'domain_busy:storage': '2131582146', 'domain_busy:exempt': '5383861579', } self.assertEqual(modified_counter, counter) def test_get_base_counter_name(self): counter_info = { 'base-counter': 'cpu_elapsed_time', 'labels': [], 'name': 'avg_processor_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) result = self.perf_library._get_base_counter_name( 'system:constituent', 'avg_processor_busy') self.assertEqual('cpu_elapsed_time', result) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py0000664000567000056710000001356312701406250031237 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp 7mode NFS storage driver """ import ddt import mock from os_brick.remotefs import remotefs as remotefs_brick from oslo_utils import units from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes from cinder import utils from cinder.volume.drivers.netapp.dataontap import nfs_7mode from cinder.volume.drivers.netapp import utils as na_utils @ddt.ddt class NetApp7modeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetApp7modeNfsDriverTestCase, self).setUp() kwargs = {'configuration': self.get_config_7mode()} with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs) self.driver._mounted_shares = [fake.NFS_SHARE] self.driver.ssc_vols = True self.driver.zapi_client = mock.Mock() self.driver.perf_library = mock.Mock() def get_config_7mode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'nfs' config.netapp_login = 'root' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '80' return config @ddt.data({'nfs_sparsed_volumes': True}, {'nfs_sparsed_volumes': False}) @ddt.unpack def test_get_pool_stats(self, nfs_sparsed_volumes): self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes thick = not nfs_sparsed_volumes total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES // units.Gi, '0.01') free_capacity_gb = na_utils.round_down( fake.AVAILABLE_BYTES // units.Gi, '0.01') provisioned_capacity_gb = total_capacity_gb - free_capacity_gb capacity = { 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, } self.mock_object(self.driver, '_get_share_capacity_info', mock.Mock(return_value=capacity)) self.mock_object(self.driver.perf_library, 'get_node_utilization', mock.Mock(return_value=30.0)) result = self.driver._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{'pool_name': '192.168.99.24:/fake/export/path', 'QoS_support': False, 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, 'free_capacity_gb': 12.0, 'total_capacity_gb': 4468.0, 'reserved_percentage': 7, 'max_over_subscription_ratio': 19.0, 'provisioned_capacity_gb': 4456.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness'}] self.assertEqual(expected, result) def test_shortlist_del_eligible_files(self): mock_get_path_for_export = self.mock_object( self.driver.zapi_client, 'get_actual_path_for_export') mock_get_path_for_export.return_value = fake.FLEXVOL mock_get_file_usage = self.mock_object( self.driver.zapi_client, 'get_file_usage') mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0] expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file in fake.FILE_LIST] result = self.driver._shortlist_del_eligible_files( fake.NFS_SHARE, fake.FILE_LIST) self.assertEqual(expected, result) def test_shortlist_del_eligible_files_empty_list(self): mock_get_export_ip_path = self.mock_object( self.driver, '_get_export_ip_path') mock_get_export_ip_path.return_value = ('', '/export_path') mock_get_path_for_export = self.mock_object( self.driver.zapi_client, 'get_actual_path_for_export') mock_get_path_for_export.return_value = fake.FLEXVOL result = self.driver._shortlist_del_eligible_files( fake.NFS_SHARE, []) self.assertEqual([], result) @ddt.data({'has_space': True, 'expected': True}, {'has_space': False, 'expected': False}) @ddt.unpack def test_is_share_clone_compatible(self, has_space, expected): mock_share_has_space_for_clone = self.mock_object( self.driver, '_share_has_space_for_clone') mock_share_has_space_for_clone.return_value = has_space result = self.driver._is_share_clone_compatible(fake.VOLUME, fake.NFS_SHARE) self.assertEqual(expected, result) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py0000664000567000056710000000000012701406250030034 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py0000664000567000056710000006645512701406250031553 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp block storage 7-mode library """ import ddt from lxml import etree import mock from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \ as client_fakes import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_7mode from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp import utils as na_utils @ddt.ddt class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): """Test case for NetApp's 7-Mode iSCSI library.""" def setUp(self): super(NetAppBlockStorage7modeLibraryTestCase, self).setUp() kwargs = {'configuration': self.get_config_7mode()} self.library = block_7mode.NetAppBlockStorage7modeLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.library.perf_library = mock.Mock() self.library.vfiler = mock.Mock() # Deprecated option self.library.configuration.netapp_volume_list = None def tearDown(self): super(NetAppBlockStorage7modeLibraryTestCase, self).tearDown() def get_config_7mode(self): config = na_fakes.create_configuration_7mode() config.netapp_storage_protocol = 'iscsi' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '80' return config @mock.patch.object(perf_7mode, 'Performance7modeLibrary', mock.Mock()) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, '_get_root_volume_name') @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, '_do_partner_setup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') def test_do_setup(self, super_do_setup, mock_do_partner_setup, mock_get_root_volume_name): self.mock_object(client_base.Client, '_init_ssh_client') mock_get_root_volume_name.return_value = 'vol0' context = mock.Mock() self.library.do_setup(context) super_do_setup.assert_called_once_with(context) mock_do_partner_setup.assert_called_once_with() mock_get_root_volume_name.assert_called_once_with() @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) def test_do_partner_setup(self): self.mock_object(client_base.Client, '_init_ssh_client') self.library.configuration.netapp_partner_backend_name = 'partner' self.library._do_partner_setup() self.assertIsNotNone(self.library.partner_zapi_client) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) def test_do_partner_setup_no_partner(self): self.mock_object(client_base.Client, '_init_ssh_client') self.library._do_partner_setup() self.assertFalse(hasattr(self.library, 'partner_zapi_client')) @mock.patch.object( block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') def test_check_for_setup_error(self, super_check_for_setup_error): self.zapi_client.get_ontapi_version.return_value = (1, 9) self.mock_object(self.library, '_refresh_volume_info') self.library.volume_list = ['open1', 'open2'] self.library.check_for_setup_error() super_check_for_setup_error.assert_called_once_with() def test_check_for_setup_error_no_filtered_pools(self): self.zapi_client.get_ontapi_version.return_value = (1, 9) self.mock_object(self.library, '_refresh_volume_info') self.library.volume_list = [] self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) def test_check_for_setup_error_too_old(self): self.zapi_client.get_ontapi_version.return_value = (1, 8) self.assertRaises(exception.VolumeBackendAPIException, self.library.check_for_setup_error) def test_find_mapped_lun_igroup(self): response = netapp_api.NaElement(etree.XML(""" %(initiator-group-name)s %(initiator-group-type)s 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 linux 0 false false true true true 21:00:00:24:ff:40:6c:c3 21:00:00:24:ff:40:6c:c2 Centos 2 """ % fake.IGROUP1)) initiators = fake.FC_FORMATTED_INITIATORS self.zapi_client.get_lun_map.return_value = response (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', initiators) self.assertEqual(fake.IGROUP1_NAME, igroup) self.assertEqual('2', lun_id) def test_find_mapped_lun_igroup_initiator_mismatch(self): response = netapp_api.NaElement(etree.XML(""" openstack-igroup1 fcp 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 linux 0 false false true true true 21:00:00:24:ff:40:6c:c3 2 """)) initiators = fake.FC_FORMATTED_INITIATORS self.zapi_client.get_lun_map.return_value = response (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', initiators) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_no_igroups(self): response = netapp_api.NaElement(etree.XML(""" """)) initiators = fake.FC_FORMATTED_INITIATORS self.zapi_client.get_lun_map.return_value = response (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', initiators) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_raises(self): self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError initiators = fake.FC_FORMATTED_INITIATORS self.assertRaises(netapp_api.NaApiError, self.library._find_mapped_lun_igroup, 'path', initiators) def test_has_luns_mapped_to_initiators_local_map(self): initiator_list = fake.FC_FORMATTED_INITIATORS self.zapi_client.has_luns_mapped_to_initiators.return_value = True self.library.partner_zapi_client = mock.Mock() result = self.library._has_luns_mapped_to_initiators(initiator_list) self.assertTrue(result) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( initiator_list) self.assertEqual(0, self.library.partner_zapi_client. has_luns_mapped_to_initiators.call_count) def test_has_luns_mapped_to_initiators_partner_map(self): initiator_list = fake.FC_FORMATTED_INITIATORS self.zapi_client.has_luns_mapped_to_initiators.return_value = False self.library.partner_zapi_client = mock.Mock() self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ return_value = True result = self.library._has_luns_mapped_to_initiators(initiator_list) self.assertTrue(result) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( initiator_list) self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ assert_called_with(initiator_list) def test_has_luns_mapped_to_initiators_no_maps(self): initiator_list = fake.FC_FORMATTED_INITIATORS self.zapi_client.has_luns_mapped_to_initiators.return_value = False self.library.partner_zapi_client = mock.Mock() self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ return_value = False result = self.library._has_luns_mapped_to_initiators(initiator_list) self.assertFalse(result) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( initiator_list) self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ assert_called_with(initiator_list) def test_has_luns_mapped_to_initiators_no_partner(self): initiator_list = fake.FC_FORMATTED_INITIATORS self.zapi_client.has_luns_mapped_to_initiators.return_value = False self.library.partner_zapi_client = mock.Mock() self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ return_value = True result = self.library._has_luns_mapped_to_initiators( initiator_list, include_partner=False) self.assertFalse(result) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( initiator_list) self.assertEqual(0, self.library.partner_zapi_client. has_luns_mapped_to_initiators.call_count) def test_clone_lun_zero_block_count(self): """Test for when clone lun is not passed a block count.""" self.library._get_lun_attr = mock.Mock(return_value={ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') self.library.zapi_client.clone_lun.assert_called_once_with( '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, source_snapshot=None, src_block=0) def test_clone_lun_blocks(self): """Test for when clone lun is passed block information.""" block_count = 10 src_block = 10 dest_block = 30 self.library._get_lun_attr = mock.Mock(return_value={ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', block_count=block_count, src_block=src_block, dest_block=dest_block) self.library.zapi_client.clone_lun.assert_called_once_with( '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=block_count, dest_block=dest_block, src_block=src_block, source_snapshot=None) def test_clone_lun_no_space_reservation(self): """Test for when space_reservation is not passed.""" self.library._get_lun_attr = mock.Mock(return_value={ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) self.library.lun_space_reservation = 'false' self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN') self.library.zapi_client.clone_lun.assert_called_once_with( '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, source_snapshot=None) def test_clone_lun_qos_supplied(self): """Test for qos supplied in clone lun invocation.""" self.assertRaises(exception.VolumeDriverException, self.library._clone_lun, 'fakeLUN', 'newFakeLUN', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) def test_get_fc_target_wwpns(self): ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0], fake.FC_FORMATTED_TARGET_WWPNS[1]] ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2], fake.FC_FORMATTED_TARGET_WWPNS[3]] self.zapi_client.get_fc_target_wwpns.return_value = ports1 self.library.partner_zapi_client = mock.Mock() self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \ ports2 result = self.library._get_fc_target_wwpns() self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result)) def test_get_fc_target_wwpns_no_partner(self): ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0], fake.FC_FORMATTED_TARGET_WWPNS[1]] ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2], fake.FC_FORMATTED_TARGET_WWPNS[3]] self.zapi_client.get_fc_target_wwpns.return_value = ports1 self.library.partner_zapi_client = mock.Mock() self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \ ports2 result = self.library._get_fc_target_wwpns(include_partner=False) self.assertSetEqual(set(ports1), set(result)) @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, '_refresh_volume_info', mock.Mock()) @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, '_get_pool_stats', mock.Mock()) def test_vol_stats_calls_provide_ems(self): self.library.zapi_client.provide_ems = mock.Mock() self.library.get_volume_stats(refresh=True) self.assertEqual(1, self.library.zapi_client.provide_ems.call_count) def test_create_lun(self): self.library.vol_refresh_voluntary = False self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.library.zapi_client.create_lun.assert_called_once_with( fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, None) self.assertTrue(self.library.vol_refresh_voluntary) def test_create_lun_with_qos_policy_group(self): self.assertRaises(exception.VolumeDriverException, self.library._create_lun, fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) def test_check_volume_type_for_lun_legacy_qos_not_supported(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.library._check_volume_type_for_lun, na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS) self.assertEqual(0, mock_get_volume_type.call_count) def test_check_volume_type_for_lun_no_volume_type(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = None mock_get_backend_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None) self.assertEqual(0, mock_get_backend_spec.call_count) def test_check_volume_type_for_lun_qos_spec_not_supported(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE mock_get_backend_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_backend_spec.return_value = na_fakes.QOS_SPEC self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.library._check_volume_type_for_lun, na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS) def test_get_preferred_target_from_list(self): result = self.library._get_preferred_target_from_list( fake.ISCSI_TARGET_DETAILS_LIST) self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result) def test_mark_qos_policy_group_for_deletion(self): result = self.library._mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_INFO) self.assertIsNone(result) def test_setup_qos_for_volume(self): result = self.library._setup_qos_for_volume(fake.VOLUME, fake.EXTRA_SPECS) self.assertIsNone(result) def test_manage_existing_lun_same_name(self): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() self.library.manage_existing({'name': 'name'}, {'ref': 'ref'}) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.assertEqual(0, self.zapi_client.move_lun.call_count) def test_manage_existing_lun_new_path(self): mock_lun = block_base.NetAppLun( 'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'}) self.assertEqual( 2, self.library._get_existing_vol_with_manage_ref.call_count) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.zapi_client.move_lun.assert_called_once_with( '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') def test_get_pool_stats_no_volumes(self): self.library.vols = [] result = self.library._get_pool_stats() self.assertListEqual([], result) @ddt.data({'netapp_lun_space_reservation': 'enabled'}, {'netapp_lun_space_reservation': 'disabled'}) @ddt.unpack def test_get_pool_stats(self, netapp_lun_space_reservation): self.library.volume_list = ['vol0', 'vol1', 'vol2'] self.library.root_volume_name = 'vol0' self.library.reserved_percentage = 5 self.library.max_over_subscription_ratio = 10.0 self.library.configuration.netapp_lun_space_reservation = ( netapp_lun_space_reservation) self.library.vols = netapp_api.NaElement( client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name( 'volumes').get_children() self.library.perf_library.get_node_utilization = ( mock.Mock(return_value=30.0)) thick = netapp_lun_space_reservation == 'enabled' result = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{ 'pool_name': 'vol1', 'consistencygroup_support': True, 'QoS_support': False, 'thin_provisioning_support': not thick, 'thick_provisioning_support': thick, 'provisioned_capacity_gb': 2.94, 'free_capacity_gb': 1339.27, 'total_capacity_gb': 1342.21, 'reserved_percentage': 5, 'max_over_subscription_ratio': 10.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', }] self.assertEqual(expected, result) def test_get_filtered_pools_invalid_conf(self): """Verify an exception is raised if the regex pattern is invalid.""" self.library.configuration.netapp_pool_name_search_pattern = '(.+' self.assertRaises(exception.InvalidConfigurationValue, self.library._get_filtered_pools) @ddt.data('.*?3$|mix.+', '(.+?[0-9]+) ', '^.+3$', '^[a-z].*?[^4]$') def test_get_filtered_pools_match_select_pools(self, patterns): self.library.vols = fake.FAKE_7MODE_VOLUME['all'] self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertEqual( fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'), filtered_pools[0] ) self.assertEqual( fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'), filtered_pools[1] ) @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed3, open1234', '.+') def test_get_filtered_pools_match_all_pools(self, patterns): self.library.vols = fake.FAKE_7MODE_VOLUME['all'] self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertEqual( fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'), filtered_pools[0] ) self.assertEqual( fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'), filtered_pools[1] ) self.assertEqual( fake.FAKE_7MODE_VOLUME['all'][2].get_child_content('name'), filtered_pools[2] ) @ddt.data('abc|stackopen|openstack|abc.*', 'abc', 'stackopen, openstack, open', '^$') def test_get_filtered_pools_non_matching_patterns(self, patterns): self.library.vols = fake.FAKE_7MODE_VOLUME['all'] self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertListEqual([], filtered_pools) def test_get_pool_stats_no_ssc_vols(self): self.library.vols = {} pools = self.library._get_pool_stats() self.assertListEqual([], pools) def test_get_pool_stats_with_filtered_pools(self): self.library.vols = fake.FAKE_7MODE_VOL1 self.library.volume_list = [ fake.FAKE_7MODE_VOL1[0].get_child_content('name') ] self.library.root_volume_name = '' self.library.perf_library.get_node_utilization = ( mock.Mock(return_value=30.0)) pools = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') self.assertListEqual(fake.FAKE_7MODE_POOLS, pools) def test_get_pool_stats_no_filtered_pools(self): self.library.vols = fake.FAKE_7MODE_VOL1 self.library.volume_list = ['open1', 'open2'] self.library.root_volume_name = '' pools = self.library._get_pool_stats() self.assertListEqual([], pools) def test_delete_volume(self): self.library.vol_refresh_voluntary = False mock_super_delete_volume = self.mock_object( block_base.NetAppBlockStorageLibrary, 'delete_volume') self.library.delete_volume(fake.VOLUME) mock_super_delete_volume.assert_called_once_with(fake.VOLUME) self.assertTrue(self.library.vol_refresh_voluntary) def test_delete_snapshot(self): self.library.vol_refresh_voluntary = False mock_super_delete_snapshot = self.mock_object( block_base.NetAppBlockStorageLibrary, 'delete_snapshot') self.library.delete_snapshot(fake.SNAPSHOT) mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT) self.assertTrue(self.library.vol_refresh_voluntary) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py0000664000567000056710000005170512701406250031136 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp NFS storage driver """ import os import copy import ddt import mock from os_brick.remotefs import remotefs as remotefs_brick from oslo_utils import units from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs @ddt.ddt class NetAppNfsDriverTestCase(test.TestCase): def setUp(self): super(NetAppNfsDriverTestCase, self).setUp() configuration = mock.Mock() configuration.reserved_percentage = 0 configuration.nfs_mount_point_base = '/mnt/test' configuration.reserved_percentage = 0 configuration.max_over_subscription_ratio = 1.1 kwargs = {'configuration': configuration} with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_base.NetAppNfsDriver(**kwargs) self.driver.ssc_enabled = False self.driver.db = mock.Mock() @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup): self.driver.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_super_do_setup.called) def test_get_share_capacity_info(self): mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info') mock_get_capacity.return_value = fake.CAPACITY_VALUES expected_total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES / units.Gi, '0.01') expected_free_capacity_gb = (na_utils.round_down( fake.AVAILABLE_BYTES / units.Gi, '0.01')) expected_reserved_percentage = round( self.driver.configuration.reserved_percentage) result = self.driver._get_share_capacity_info(fake.NFS_SHARE) self.assertEqual(expected_total_capacity_gb, result['total_capacity_gb']) self.assertEqual(expected_free_capacity_gb, result['free_capacity_gb']) self.assertEqual(expected_reserved_percentage, round(result['reserved_percentage'])) def test_get_capacity_info_ipv4_share(self): expected = fake.CAPACITY_VALUES self.driver.zapi_client = mock.Mock() get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITY_VALUES result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4) self.assertEqual(expected, result) get_capacity.assert_has_calls([ mock.call(fake.EXPORT_PATH)]) def test_get_capacity_info_ipv6_share(self): expected = fake.CAPACITY_VALUES self.driver.zapi_client = mock.Mock() get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITY_VALUES result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6) self.assertEqual(expected, result) get_capacity.assert_has_calls([ mock.call(fake.EXPORT_PATH)]) def test_create_volume(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(self.driver, '_do_create_volume') self.mock_object(self.driver, '_do_qos_for_volume') update_ssc = self.mock_object(self.driver, '_update_stale_vols') expected = {'provider_location': fake.NFS_SHARE} result = self.driver.create_volume(fake.NFS_VOLUME) self.assertEqual(expected, result) self.assertEqual(0, update_ssc.call_count) def test_create_volume_no_pool(self): volume = copy.deepcopy(fake.NFS_VOLUME) volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) self.mock_object(self.driver, '_ensure_shares_mounted') self.assertRaises(exception.InvalidHost, self.driver.create_volume, volume) def test_create_volume_exception(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(na_utils, 'get_volume_extra_specs') mock_create = self.mock_object(self.driver, '_do_create_volume') mock_create.side_effect = Exception update_ssc = self.mock_object(self.driver, '_update_stale_vols') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, fake.NFS_VOLUME) self.assertEqual(0, update_ssc.call_count) def test_create_volume_from_snapshot(self): provider_location = fake.POOL_NAME snapshot = fake.CLONE_SOURCE self.mock_object(self.driver, '_clone_source_to_destination_volume', mock.Mock(return_value=provider_location)) result = self.driver.create_cloned_volume(fake.NFS_VOLUME, snapshot) self.assertEqual(provider_location, result) def test_clone_source_to_destination_volume(self): self.mock_object(self.driver, '_get_volume_location', mock.Mock( return_value=fake.POOL_NAME)) self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock( return_value=fake.EXTRA_SPECS)) self.mock_object( self.driver, '_clone_with_extension_check') self.mock_object(self.driver, '_do_qos_for_volume') expected = {'provider_location': fake.POOL_NAME} result = self.driver._clone_source_to_destination_volume( fake.CLONE_SOURCE, fake.CLONE_DESTINATION) self.assertEqual(expected, result) def test_clone_source_to_destination_volume_with_do_qos_exception(self): self.mock_object(self.driver, '_get_volume_location', mock.Mock( return_value=fake.POOL_NAME)) self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock( return_value=fake.EXTRA_SPECS)) self.mock_object( self.driver, '_clone_with_extension_check') self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock( side_effect=Exception)) self.assertRaises( exception.VolumeBackendAPIException, self.driver._clone_source_to_destination_volume, fake.CLONE_SOURCE, fake.CLONE_DESTINATION) def test_clone_with_extension_check_equal_sizes(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) self.assertEqual(0, mock_extend_volume.call_count) def test_clone_with_extension_check_unequal_sizes(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] + 1 self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) self.assertEqual(1, mock_extend_volume.call_count) def test_clone_with_extension_check_extend_exception(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] + 1 self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') mock_extend_volume.side_effect = Exception mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.assertRaises(exception.CinderException, self.driver._clone_with_extension_check, clone_source, fake.NFS_VOLUME) self.assertEqual(1, mock_cleanup.call_count) def test_clone_with_extension_check_no_discovery(self): self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') self.mock_object(self.driver, '_set_rw_permissions') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = False self.assertRaises(exception.CinderException, self.driver._clone_with_extension_check, fake.CLONE_SOURCE, fake.NFS_VOLUME) def test_create_cloned_volume(self): provider_location = fake.POOL_NAME src_vref = fake.CLONE_SOURCE self.mock_object(self.driver, '_clone_source_to_destination_volume', mock.Mock(return_value=provider_location)) result = self.driver.create_cloned_volume(fake.NFS_VOLUME, src_vref) self.assertEqual(provider_location, result) def test_do_qos_for_volume(self): self.assertRaises(NotImplementedError, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS) def test_cleanup_volume_on_failure(self): path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) mock_local_path = self.mock_object(self.driver, 'local_path') mock_local_path.return_value = path mock_exists_check = self.mock_object(os.path, 'exists') mock_exists_check.return_value = True mock_delete = self.mock_object(self.driver, '_delete_file_at_path') self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) mock_delete.assert_has_calls([mock.call(path)]) def test_cleanup_volume_on_failure_no_path(self): self.mock_object(self.driver, 'local_path') mock_exists_check = self.mock_object(os.path, 'exists') mock_exists_check.return_value = False mock_delete = self.mock_object(self.driver, '_delete_file_at_path') self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) self.assertEqual(0, mock_delete.call_count) def test_get_vol_for_share(self): self.assertRaises(NotImplementedError, self.driver._get_vol_for_share, fake.NFS_SHARE) def test_get_export_ip_path_volume_id_provided(self): mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip') mock_get_host_ip.return_value = fake.IPV4_ADDRESS mock_get_export_path = self.mock_object( self.driver, '_get_export_path') mock_get_export_path.return_value = fake.EXPORT_PATH expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH) result = self.driver._get_export_ip_path(fake.VOLUME_ID) self.assertEqual(expected, result) def test_get_export_ip_path_share_provided(self): expected = (fake.SHARE_IP, fake.EXPORT_PATH) result = self.driver._get_export_ip_path(share=fake.NFS_SHARE) self.assertEqual(expected, result) def test_get_export_ip_path_volume_id_and_share_provided(self): mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip') mock_get_host_ip.return_value = fake.IPV4_ADDRESS mock_get_export_path = self.mock_object( self.driver, '_get_export_path') mock_get_export_path.return_value = fake.EXPORT_PATH expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH) result = self.driver._get_export_ip_path( fake.VOLUME_ID, fake.NFS_SHARE) self.assertEqual(expected, result) def test_get_export_ip_path_no_args(self): self.assertRaises(exception.InvalidInput, self.driver._get_export_ip_path) def test_get_host_ip(self): mock_get_provider_location = self.mock_object( self.driver, '_get_provider_location') mock_get_provider_location.return_value = fake.NFS_SHARE expected = fake.SHARE_IP result = self.driver._get_host_ip(fake.VOLUME_ID) self.assertEqual(expected, result) def test_get_export_path(self): mock_get_provider_location = self.mock_object( self.driver, '_get_provider_location') mock_get_provider_location.return_value = fake.NFS_SHARE expected = fake.EXPORT_PATH result = self.driver._get_export_path(fake.VOLUME_ID) self.assertEqual(expected, result) def test_extend_volume(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', mock.Mock(return_value=path)) mock_resize_image_file = self.mock_object(self.driver, '_resize_image_file') mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', mock.Mock(return_value=fake.EXTRA_SPECS)) mock_do_qos_for_volume = self.mock_object(self.driver, '_do_qos_for_volume') self.driver.extend_volume(fake.VOLUME, new_size) mock_resize_image_file.assert_called_once_with(path, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_do_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS, cleanup=False) def test_extend_volume_resize_error(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', mock.Mock(return_value=path)) mock_resize_image_file = self.mock_object( self.driver, '_resize_image_file', mock.Mock(side_effect=netapp_api.NaApiError)) mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', mock.Mock(return_value=fake.EXTRA_SPECS)) mock_do_qos_for_volume = self.mock_object(self.driver, '_do_qos_for_volume') self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, fake.VOLUME, new_size) mock_resize_image_file.assert_called_once_with(path, new_size) self.assertFalse(mock_get_volume_extra_specs.called) self.assertFalse(mock_do_qos_for_volume.called) def test_extend_volume_qos_error(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', mock.Mock(return_value=path)) mock_resize_image_file = self.mock_object(self.driver, '_resize_image_file') mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', mock.Mock(return_value=fake.EXTRA_SPECS)) mock_do_qos_for_volume = self.mock_object( self.driver, '_do_qos_for_volume', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, fake.VOLUME, new_size) mock_resize_image_file.assert_called_once_with(path, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_do_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS, cleanup=False) def test_is_share_clone_compatible(self): self.assertRaises(NotImplementedError, self.driver._is_share_clone_compatible, fake.NFS_VOLUME, fake.NFS_SHARE) @ddt.data( {'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True}, {'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False}, {'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False}, {'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True}, {'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True}, {'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False}, ) @ddt.unpack def test_share_has_space_for_clone(self, size, thin, over, res, expected): total_bytes = 20 * units.Gi available_bytes = 12 * units.Gi with mock.patch.object(self.driver, '_get_capacity_info', return_value=( total_bytes, available_bytes)): with mock.patch.object(self.driver, 'max_over_subscription_ratio', over): with mock.patch.object(self.driver, 'reserved_percentage', res): result = self.driver._share_has_space_for_clone( fake.NFS_SHARE, size, thin=thin) self.assertEqual(expected, result) @ddt.data( {'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True}, {'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False}, {'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False}, {'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True}, {'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True}, {'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False}, ) @ddt.unpack @mock.patch.object(nfs_base.NetAppNfsDriver, '_get_capacity_info') def test_share_has_space_for_clone2(self, mock_get_capacity, size, thin, over, res, expected): total_bytes = 20 * units.Gi available_bytes = 12 * units.Gi mock_get_capacity.return_value = (total_bytes, available_bytes) with mock.patch.object(self.driver, 'max_over_subscription_ratio', over): with mock.patch.object(self.driver, 'reserved_percentage', res): result = self.driver._share_has_space_for_clone( fake.NFS_SHARE, size, thin=thin) self.assertEqual(expected, result) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py0000664000567000056710000006616312701406250031317 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp cmode nfs storage driver """ import ddt import mock from os_brick.remotefs import remotefs as remotefs_brick from oslo_service import loopingcall from oslo_utils import units from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap import nfs_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume import utils as volume_utils @ddt.ddt class NetAppCmodeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeNfsDriverTestCase, self).setUp() kwargs = {'configuration': self.get_config_cmode()} with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) self.driver._mounted_shares = [fake.NFS_SHARE] self.driver.ssc_vols = True self.driver.vserver = fake.VSERVER_NAME self.driver.ssc_enabled = True self.driver.perf_library = mock.Mock() def get_config_cmode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'nfs' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '80' config.netapp_vserver = fake.VSERVER_NAME return config @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_cmode, 'Client', mock.Mock()) @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup): self.driver.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_super_do_setup.called) @ddt.data({'thin': True, 'nfs_sparsed_volumes': True}, {'thin': True, 'nfs_sparsed_volumes': False}, {'thin': False, 'nfs_sparsed_volumes': True}, {'thin': False, 'nfs_sparsed_volumes': False}) @ddt.unpack def test_get_pool_stats(self, thin, nfs_sparsed_volumes): class test_volume(object): pass test_volume = test_volume() test_volume.id = {'vserver': 'openstack', 'name': 'vola'} test_volume.aggr = { 'disk_type': 'SSD', 'ha_policy': 'cfo', 'junction': '/vola', 'name': 'aggr1', 'raid_type': 'raiddp', } test_volume.export = {'path': fake.NFS_SHARE} test_volume.sis = {'dedup': False, 'compression': False} test_volume.state = { 'status': 'online', 'vserver_root': False, 'junction_active': True, } test_volume.qos = {'qos_policy_group': None} ssc_map = { 'mirrored': {}, 'dedup': {}, 'compression': {}, 'thin': {test_volume if thin else None}, 'all': [test_volume], } self.driver.ssc_vols = ssc_map self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes netapp_thin = 'true' if thin else 'false' netapp_thick = 'false' if thin else 'true' thick = not thin and not nfs_sparsed_volumes total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES // units.Gi, '0.01') free_capacity_gb = na_utils.round_down( fake.AVAILABLE_BYTES // units.Gi, '0.01') provisioned_capacity_gb = total_capacity_gb - free_capacity_gb capacity = { 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, } self.mock_object(self.driver, '_get_share_capacity_info', mock.Mock(return_value=capacity)) self.driver.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) result = self.driver._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{'pool_name': '192.168.99.24:/fake/export/path', 'netapp_unmirrored': 'true', 'QoS_support': True, 'thick_provisioning_support': thick, 'netapp_thick_provisioned': netapp_thick, 'netapp_nocompression': 'true', 'thin_provisioning_support': not thick, 'free_capacity_gb': 12.0, 'netapp_thin_provisioned': netapp_thin, 'total_capacity_gb': 4468.0, 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'false', 'reserved_percentage': 7, 'netapp_raid_type': 'raiddp', 'netapp_disk_type': 'SSD', 'netapp_nodedup': 'true', 'max_over_subscription_ratio': 19.0, 'provisioned_capacity_gb': 4456.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness'}] self.assertEqual(expected, result) def test_check_for_setup_error(self): super_check_for_setup_error = self.mock_object( nfs_base.NetAppNfsDriver, 'check_for_setup_error') mock_check_ssc_api_permissions = self.mock_object( ssc_cmode, 'check_ssc_api_permissions') mock_start_periodic_tasks = self.mock_object( self.driver, '_start_periodic_tasks') self.driver.zapi_client = mock.Mock() self.driver.check_for_setup_error() self.assertEqual(1, super_check_for_setup_error.call_count) mock_check_ssc_api_permissions.assert_called_once_with( self.driver.zapi_client) self.assertEqual(1, mock_start_periodic_tasks.call_count) def test_delete_volume(self): fake_provider_location = 'fake_provider_location' fake_volume = {'provider_location': fake_provider_location} self.mock_object(self.driver, '_delete_backing_file_for_volume') self.mock_object(na_utils, 'get_valid_qos_policy_group_info') self.driver.zapi_client = mock.Mock() mock_prov_deprov = self.mock_object(self.driver, '_post_prov_deprov_in_ssc') self.driver.delete_volume(fake_volume) mock_prov_deprov.assert_called_once_with(fake_provider_location) def test_delete_volume_exception_path(self): fake_provider_location = 'fake_provider_location' fake_volume = {'provider_location': fake_provider_location} self.mock_object(self.driver, '_delete_backing_file_for_volume') self.mock_object(na_utils, 'get_valid_qos_policy_group_info') self.driver.zapi_client = mock.Mock(side_effect=[Exception]) mock_prov_deprov = self.mock_object(self.driver, '_post_prov_deprov_in_ssc') self.driver.delete_volume(fake_volume) mock_prov_deprov.assert_called_once_with(fake_provider_location) def test_delete_backing_file_for_volume(self): mock_filer_delete = self.mock_object(self.driver, '_delete_volume_on_filer') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME) self.assertEqual(0, mock_super_delete.call_count) def test_delete_backing_file_for_volume_exception_path(self): mock_filer_delete = self.mock_object(self.driver, '_delete_volume_on_filer') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME) mock_super_delete.assert_called_once_with(fake.NFS_VOLUME) def test_delete_volume_on_filer(self): mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL) self.driver.zapi_client = mock.Mock() mock_zapi_delete = self.driver.zapi_client.delete_file self.driver._delete_volume_on_filer(fake.NFS_VOLUME) mock_zapi_delete.assert_called_once_with( '/vol/%s/%s' % (fake.FLEXVOL, fake.NFS_VOLUME['name'])) def test_delete_snapshot(self): mock_get_location = self.mock_object(self.driver, '_get_provider_location') mock_get_location.return_value = fake.PROVIDER_LOCATION mock_delete_backing = self.mock_object( self.driver, '_delete_backing_file_for_snapshot') mock_prov_deprov = self.mock_object(self.driver, '_post_prov_deprov_in_ssc') self.driver.delete_snapshot(fake.test_snapshot) mock_delete_backing.assert_called_once_with(fake.test_snapshot) mock_prov_deprov.assert_called_once_with(fake.PROVIDER_LOCATION) def test_delete_backing_file_for_snapshot(self): mock_filer_delete = self.mock_object( self.driver, '_delete_snapshot_on_filer') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) mock_filer_delete.assert_called_once_with(fake.test_snapshot) self.assertEqual(0, mock_super_delete.call_count) def test_delete_backing_file_for_snapshot_exception_path(self): mock_filer_delete = self.mock_object( self.driver, '_delete_snapshot_on_filer') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) mock_filer_delete.assert_called_once_with(fake.test_snapshot) mock_super_delete.assert_called_once_with(fake.test_snapshot) def test_delete_snapshot_on_filer(self): mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL) self.driver.zapi_client = mock.Mock() mock_zapi_delete = self.driver.zapi_client.delete_file self.driver._delete_snapshot_on_filer(fake.test_snapshot) mock_zapi_delete.assert_called_once_with( '/vol/%s/%s' % (fake.FLEXVOL, fake.test_snapshot['name'])) def test_do_qos_for_volume_no_exception(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO self.driver.zapi_client = mock.Mock() mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) mock_provision_qos.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) mock_set_policy.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)]) self.assertEqual(0, mock_error_log.call_count) self.assertEqual(0, mock_debug_log.call_count) self.assertEqual(0, mock_cleanup.call_count) def test_do_qos_for_volume_exception_w_cleanup(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO self.driver.zapi_client = mock.Mock() mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_set_policy.side_effect = netapp_api.NaApiError mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.assertRaises(netapp_api.NaApiError, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) mock_provision_qos.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) mock_set_policy.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)]) self.assertEqual(1, mock_error_log.call_count) self.assertEqual(1, mock_debug_log.call_count) mock_cleanup.assert_has_calls([ mock.call(fake.NFS_VOLUME)]) def test_do_qos_for_volume_exception_no_cleanup(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.side_effect = exception.Invalid self.driver.zapi_client = mock.Mock() mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.assertRaises(exception.Invalid, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS, cleanup=False) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) self.assertEqual(0, mock_provision_qos.call_count) self.assertEqual(0, mock_set_policy.call_count) self.assertEqual(1, mock_error_log.call_count) self.assertEqual(0, mock_debug_log.call_count) self.assertEqual(0, mock_cleanup.call_count) def test_set_qos_policy_group_on_volume(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_get_name_from_info.return_value = fake.QOS_POLICY_GROUP_NAME mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = fake.NFS_SHARE self.driver.zapi_client = mock.Mock() mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_get_flex_vol_name.return_value = fake.FLEXVOL mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO) mock_get_name_from_info.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) mock_extract_host.assert_has_calls([ mock.call(fake.NFS_HOST_STRING, level='pool')]) mock_get_flex_vol_name.assert_has_calls([ mock.call(fake.VSERVER_NAME, fake.EXPORT_PATH)]) mock_file_assign_qos.assert_has_calls([ mock.call(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_VOLUME['name'])]) def test_set_qos_policy_group_on_volume_no_info(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_extract_host = self.mock_object(volume_utils, 'extract_host') self.driver.zapi_client = mock.Mock() mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, None) self.assertEqual(0, mock_get_name_from_info.call_count) self.assertEqual(0, mock_extract_host.call_count) self.assertEqual(0, mock_get_flex_vol_name.call_count) self.assertEqual(0, mock_file_assign_qos.call_count) def test_set_qos_policy_group_on_volume_no_name(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_get_name_from_info.return_value = None mock_extract_host = self.mock_object(volume_utils, 'extract_host') self.driver.zapi_client = mock.Mock() mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO) mock_get_name_from_info.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) self.assertEqual(0, mock_extract_host.call_count) self.assertEqual(0, mock_get_flex_vol_name.call_count) self.assertEqual(0, mock_file_assign_qos.call_count) def test_unmanage(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO self.driver.zapi_client = mock.Mock() mock_mark_for_deletion =\ self.driver.zapi_client.mark_qos_policy_group_for_deletion super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') self.driver.unmanage(fake.NFS_VOLUME) mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) mock_mark_for_deletion.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) def test_unmanage_invalid_qos(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.side_effect = exception.Invalid super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') self.driver.unmanage(fake.NFS_VOLUME) mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) def test_create_volume(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(self.driver, '_do_create_volume') self.mock_object(self.driver, '_do_qos_for_volume') update_ssc = self.mock_object(self.driver, '_update_stale_vols') self.mock_object(self.driver, '_get_vol_for_share') expected = {'provider_location': fake.NFS_SHARE} result = self.driver.create_volume(fake.NFS_VOLUME) self.assertEqual(expected, result) self.assertEqual(1, update_ssc.call_count) def test_create_volume_exception(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(na_utils, 'get_volume_extra_specs') mock_create = self.mock_object(self.driver, '_do_create_volume') mock_create.side_effect = Exception update_ssc = self.mock_object(self.driver, '_update_stale_vols') self.mock_object(self.driver, '_get_vol_for_share') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, fake.NFS_VOLUME) self.assertEqual(1, update_ssc.call_count) def test_start_periodic_tasks(self): self.driver.zapi_client = mock.Mock() mock_remove_unused_qos_policy_groups = self.mock_object( self.driver.zapi_client, 'remove_unused_qos_policy_groups') harvest_qos_periodic_task = mock.Mock() mock_loopingcall = self.mock_object( loopingcall, 'FixedIntervalLoopingCall', mock.Mock(side_effect=[harvest_qos_periodic_task])) self.driver._start_periodic_tasks() mock_loopingcall.assert_has_calls([ mock.call(mock_remove_unused_qos_policy_groups)]) self.assertTrue(harvest_qos_periodic_task.start.called) @ddt.data( {'space': True, 'ssc': True, 'match': True, 'expected': True}, {'space': True, 'ssc': True, 'match': False, 'expected': False}, {'space': True, 'ssc': False, 'match': True, 'expected': True}, {'space': True, 'ssc': False, 'match': False, 'expected': True}, {'space': False, 'ssc': True, 'match': True, 'expected': False}, {'space': False, 'ssc': True, 'match': False, 'expected': False}, {'space': False, 'ssc': False, 'match': True, 'expected': False}, {'space': False, 'ssc': False, 'match': False, 'expected': False}, ) @ddt.unpack @mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver, '_is_share_vol_type_match') @mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver, '_share_has_space_for_clone') @mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver, '_is_volume_thin_provisioned') def test_is_share_clone_compatible(self, mock_is_volume_thin_provisioned, mock_share_has_space_for_clone, mock_is_share_vol_type_match, space, ssc, match, expected): mock_share_has_space_for_clone.return_value = space mock_is_share_vol_type_match.return_value = match with mock.patch.object(self.driver, 'ssc_enabled', ssc): result = self.driver._is_share_clone_compatible(fake.VOLUME, fake.NFS_SHARE) self.assertEqual(expected, result) @ddt.data( {'sparsed': True, 'ssc': True, 'vol_thin': True, 'expected': True}, {'sparsed': True, 'ssc': True, 'vol_thin': False, 'expected': True}, {'sparsed': True, 'ssc': False, 'vol_thin': True, 'expected': True}, {'sparsed': True, 'ssc': False, 'vol_thin': False, 'expected': True}, {'sparsed': False, 'ssc': True, 'vol_thin': True, 'expected': True}, {'sparsed': False, 'ssc': True, 'vol_thin': False, 'expected': False}, {'sparsed': False, 'ssc': False, 'vol_thin': True, 'expected': False}, {'sparsed': False, 'ssc': False, 'vol_thin': False, 'expected': False}, ) @ddt.unpack def test_is_volume_thin_provisioned( self, sparsed, ssc, vol_thin, expected): fake_volume = object() ssc_vols = {'thin': {fake_volume if vol_thin else None}} with mock.patch.object(self.driver, 'ssc_enabled', ssc): with mock.patch.object(self.driver, 'ssc_vols', ssc_vols): with mock.patch.object(self.driver.configuration, 'nfs_sparsed_volumes', sparsed): result = self.driver._is_volume_thin_provisioned( fake_volume) self.assertEqual(expected, result) @ddt.data( {'ssc': True, 'share': fake.NFS_SHARE, 'vol': fake.test_volume}, {'ssc': True, 'share': fake.NFS_SHARE, 'vol': None}, {'ssc': True, 'share': None, 'vol': fake.test_volume}, {'ssc': True, 'share': None, 'vol': None}, {'ssc': False, 'share': fake.NFS_SHARE, 'vol': fake.test_volume}, {'ssc': False, 'share': fake.NFS_SHARE, 'vol': None}, {'ssc': False, 'share': None, 'vol': fake.test_volume}, {'ssc': False, 'share': None, 'vol': None}, ) @ddt.unpack def test_post_prov_deprov_in_ssc(self, ssc, share, vol): with mock.patch.object(self.driver, 'ssc_enabled', ssc): with mock.patch.object( self.driver, '_get_vol_for_share') as mock_get_vol: with mock.patch.object( self.driver, '_update_stale_vols') as mock_update: mock_get_vol.return_value = vol self.driver._post_prov_deprov_in_ssc(share) if ssc and share and vol: mock_update.assert_called_once_with(volume=vol) else: self.assertEqual(0, mock_update.call_count) def test_get_vol_for_share(self): fake_volume = fake.test_volume ssc_vols = {'all': {fake_volume}} with mock.patch.object(self.driver, 'ssc_vols', ssc_vols): result = self.driver._get_vol_for_share(fake.NFS_SHARE) self.assertEqual(fake.test_volume, result) def test_get_vol_for_share_no_ssc_vols(self): with mock.patch.object(self.driver, 'ssc_vols', None): self.assertIsNone(self.driver._get_vol_for_share(fake.NFS_SHARE)) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py0000664000567000056710000007341012701406250031614 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp block storage C-mode library """ import ddt import mock from oslo_service import loopingcall from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import utils as na_utils @ddt.ddt class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): """Test case for NetApp's C-Mode iSCSI library.""" def setUp(self): super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp() kwargs = {'configuration': self.get_config_cmode()} self.library = block_cmode.NetAppBlockStorageCmodeLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.library.perf_library = mock.Mock() self.library.vserver = mock.Mock() self.library.ssc_vols = None self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME, fake.SIZE, None) self.fake_snapshot_lun = block_base.NetAppLun( fake.SNAPSHOT_LUN_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE, None) self.mock_object(self.library, 'lun_table') self.library.lun_table = { fake.LUN_NAME: self.fake_lun, fake.SNAPSHOT_NAME: self.fake_snapshot_lun, } self.mock_object(block_base.NetAppBlockStorageLibrary, 'delete_volume') def tearDown(self): super(NetAppBlockStorageCmodeLibraryTestCase, self).tearDown() def get_config_cmode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'iscsi' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'https' config.netapp_server_port = '443' config.netapp_vserver = 'openstack' return config @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) @mock.patch.object(na_utils, 'check_flags') @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') def test_do_setup(self, super_do_setup, mock_check_flags): self.mock_object(client_base.Client, '_init_ssh_client') context = mock.Mock() self.library.do_setup(context) super_do_setup.assert_called_once_with(context) self.assertEqual(1, mock_check_flags.call_count) def test_check_for_setup_error(self): super_check_for_setup_error = self.mock_object( block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') mock_check_ssc_api_permissions = self.mock_object( ssc_cmode, 'check_ssc_api_permissions') mock_start_periodic_tasks = self.mock_object( self.library, '_start_periodic_tasks') self.mock_object(ssc_cmode, 'refresh_cluster_ssc') self.mock_object(self.library, '_get_filtered_pools', mock.Mock(return_value=fake.FAKE_CMODE_POOLS)) self.library.check_for_setup_error() self.assertEqual(1, super_check_for_setup_error.call_count) mock_check_ssc_api_permissions.assert_called_once_with( self.library.zapi_client) self.assertEqual(1, mock_start_periodic_tasks.call_count) def test_check_for_setup_error_no_filtered_pools(self): self.mock_object(block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') self.mock_object(ssc_cmode, 'check_ssc_api_permissions') self.mock_object(self.library, '_start_periodic_tasks') self.mock_object(ssc_cmode, 'refresh_cluster_ssc') self.mock_object(self.library, '_get_filtered_pools', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) def test_find_mapped_lun_igroup(self): igroups = [fake.IGROUP1] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [{'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertEqual(fake.IGROUP1_NAME, igroup) self.assertEqual('1', lun_id) def test_find_mapped_lun_igroup_initiator_mismatch(self): self.zapi_client.get_igroup_by_initiators.return_value = [] lun_maps = [{'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_name_mismatch(self): igroups = [{'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': 'igroup2'}] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [{'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_no_igroup_prefix(self): igroups = [{'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': 'igroup2'}] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [{'initiator-group': 'igroup2', 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_clone_lun_zero_block_count(self): """Test for when clone lun is not passed a block count.""" self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [ mock.Mock(spec=netapp_api.NaElement)] lun = fake.FAKE_LUN self.library._get_lun_by_args = mock.Mock(return_value=[lun]) self.library._add_lun_to_table = mock.Mock() self.library._update_stale_vols = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, source_snapshot=None) def test_clone_lun_blocks(self): """Test for when clone lun is passed block information.""" block_count = 10 src_block = 10 dest_block = 30 self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [ mock.Mock(spec=netapp_api.NaElement)] lun = fake.FAKE_LUN self.library._get_lun_by_args = mock.Mock(return_value=[lun]) self.library._add_lun_to_table = mock.Mock() self.library._update_stale_vols = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', block_count=block_count, src_block=src_block, dest_block=dest_block) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=block_count, dest_block=dest_block, src_block=src_block, qos_policy_group_name=None, source_snapshot=None) def test_clone_lun_no_space_reservation(self): """Test for when space_reservation is not passed.""" self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.lun_space_reservation = 'false' self.library.zapi_client.get_lun_by_args.return_value = [ mock.Mock(spec=netapp_api.NaElement)] lun = fake.FAKE_LUN self.library._get_lun_by_args = mock.Mock(return_value=[lun]) self.library._add_lun_to_table = mock.Mock() self.library._update_stale_vols = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN') self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, source_snapshot=None) def test_get_fc_target_wwpns(self): ports = [fake.FC_FORMATTED_TARGET_WWPNS[0], fake.FC_FORMATTED_TARGET_WWPNS[1]] self.zapi_client.get_fc_target_wwpns.return_value = ports result = self.library._get_fc_target_wwpns() self.assertSetEqual(set(ports), set(result)) @mock.patch.object(ssc_cmode, 'refresh_cluster_ssc', mock.Mock()) @mock.patch.object(block_cmode.NetAppBlockStorageCmodeLibrary, '_get_pool_stats', mock.Mock()) def test_vol_stats_calls_provide_ems(self): self.library.zapi_client.provide_ems = mock.Mock() self.library.get_volume_stats(refresh=True) self.assertEqual(1, self.library.zapi_client.provide_ems.call_count) def test_create_lun(self): self.library._update_stale_vols = mock.Mock() self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.library.zapi_client.create_lun.assert_called_once_with( fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, None) self.assertEqual(1, self.library._update_stale_vols.call_count) @mock.patch.object(ssc_cmode, 'get_volumes_for_specs') @mock.patch.object(ssc_cmode, 'get_cluster_latest_ssc') def test_check_volume_type_for_lun_fail(self, get_ssc, get_vols): self.library.ssc_vols = ['vol'] fake_extra_specs = {'specs': 's'} get_vols.return_value = [ssc_cmode.NetAppVolume(name='name', vserver='vs')] mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Volume': 'fake', 'Path': '/vol/lun'}) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.library._check_volume_type_for_lun, {'vol': 'vol'}, mock_lun, {'ref': 'ref'}, fake_extra_specs) get_vols.assert_called_with(['vol'], {'specs': 's'}) self.assertEqual(1, get_ssc.call_count) def test_get_preferred_target_from_list(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST operational_addresses = [ target['address'] for target in target_details_list[2:]] self.zapi_client.get_operational_network_interface_addresses = ( mock.Mock(return_value=operational_addresses)) result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[2], result) def test_get_pool_stats_no_volumes(self): self.library.ssc_vols = [] result = self.library._get_pool_stats() self.assertListEqual([], result) @ddt.data({'thin': True, 'netapp_lun_space_reservation': 'enabled'}, {'thin': True, 'netapp_lun_space_reservation': 'disabled'}, {'thin': False, 'netapp_lun_space_reservation': 'enabled'}, {'thin': False, 'netapp_lun_space_reservation': 'disabled'}) @ddt.unpack def test_get_pool_stats(self, thin, netapp_lun_space_reservation): class test_volume(object): self.id = None self.aggr = None test_volume = test_volume() test_volume.id = {'vserver': 'openstack', 'name': 'vola'} test_volume.aggr = { 'disk_type': 'SSD', 'ha_policy': 'cfo', 'junction': '/vola', 'name': 'aggr1', 'raid_type': 'raiddp' } test_volume.space = { 'size_total_bytes': '10737418240', 'space-guarantee': 'file', 'size_avl_bytes': '2147483648', 'space-guarantee-enabled': False, 'thin_provisioned': False } test_volume.sis = {'dedup': False, 'compression': False} test_volume.state = { 'status': 'online', 'vserver_root': False, 'junction_active': True } test_volume.qos = {'qos_policy_group': None} ssc_map = { 'mirrored': {}, 'dedup': {}, 'compression': {}, 'thin': {test_volume if thin else None}, 'all': [test_volume] } self.library.ssc_vols = ssc_map self.library.reserved_percentage = 5 self.library.max_over_subscription_ratio = 10 self.library.configuration.netapp_lun_space_reservation = ( netapp_lun_space_reservation) self.library.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) netapp_thin = 'true' if thin else 'false' netapp_thick = 'false' if thin else 'true' thick = not thin and (netapp_lun_space_reservation == 'enabled') result = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{'pool_name': 'vola', 'consistencygroup_support': True, 'netapp_unmirrored': 'true', 'QoS_support': True, 'thin_provisioning_support': not thick, 'thick_provisioning_support': thick, 'provisioned_capacity_gb': 8.0, 'netapp_thick_provisioned': netapp_thick, 'netapp_nocompression': 'true', 'free_capacity_gb': 2.0, 'netapp_thin_provisioned': netapp_thin, 'total_capacity_gb': 10.0, 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'false', 'reserved_percentage': 5, 'max_over_subscription_ratio': 10.0, 'netapp_raid_type': 'raiddp', 'netapp_disk_type': 'SSD', 'netapp_nodedup': 'true', 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness'}] self.assertEqual(expected, result) def test_delete_volume(self): self.mock_object(block_base.NetAppLun, 'get_metadata_property', mock.Mock(return_value=fake.POOL_NAME)) self.mock_object(self.library, '_update_stale_vols') self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock( return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.delete_volume(fake.VOLUME) self.assertEqual(1, block_base.NetAppLun.get_metadata_property.call_count) block_base.NetAppBlockStorageLibrary.delete_volume\ .assert_called_once_with(fake.VOLUME) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) self.assertEqual(1, self.library._update_stale_vols.call_count) def test_delete_volume_no_netapp_vol(self): self.mock_object(block_base.NetAppLun, 'get_metadata_property', mock.Mock(return_value=None)) self.mock_object(self.library, '_update_stale_vols') self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock( return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.delete_volume(fake.VOLUME) block_base.NetAppLun.get_metadata_property.assert_called_once_with( 'Volume') block_base.NetAppBlockStorageLibrary.delete_volume\ .assert_called_once_with(fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) self.assertEqual(0, self.library._update_stale_vols.call_count) def test_delete_volume_get_valid_qos_policy_group_info_exception(self): self.mock_object(block_base.NetAppLun, 'get_metadata_property', mock.Mock(return_value=fake.NETAPP_VOLUME)) self.mock_object(self.library, '_update_stale_vols') self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock(side_effect=exception.Invalid)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.delete_volume(fake.VOLUME) block_base.NetAppLun.get_metadata_property.assert_called_once_with( 'Volume') block_base.NetAppBlockStorageLibrary.delete_volume\ .assert_called_once_with(fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(None) self.assertEqual(1, self.library._update_stale_vols.call_count) def test_delete_snapshot(self): self.mock_object(block_base.NetAppLun, 'get_metadata_property', mock.Mock(return_value=fake.NETAPP_VOLUME)) mock_super_delete_snapshot = self.mock_object( block_base.NetAppBlockStorageLibrary, 'delete_snapshot') mock_update_stale_vols = self.mock_object(self.library, '_update_stale_vols') self.library.delete_snapshot(fake.SNAPSHOT) mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT) self.assertTrue(mock_update_stale_vols.called) def test_delete_snapshot_no_netapp_vol(self): self.mock_object(block_base.NetAppLun, 'get_metadata_property', mock.Mock(return_value=None)) mock_super_delete_snapshot = self.mock_object( block_base.NetAppBlockStorageLibrary, 'delete_snapshot') mock_update_stale_vols = self.mock_object(self.library, '_update_stale_vols') self.library.delete_snapshot(fake.SNAPSHOT) mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT) self.assertFalse(mock_update_stale_vols.called) def test_setup_qos_for_volume(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock( return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.zapi_client, 'provision_qos_policy_group') result = self.library._setup_qos_for_volume(fake.VOLUME, fake.EXTRA_SPECS) self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) self.zapi_client.provision_qos_policy_group.\ assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) def test_setup_qos_for_volume_exception_path(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock( side_effect=exception.Invalid)) self.mock_object(self.zapi_client, 'provision_qos_policy_group') self.assertRaises(exception.VolumeBackendAPIException, self.library._setup_qos_for_volume, fake.VOLUME, fake.EXTRA_SPECS) self.assertEqual(0, self.zapi_client. provision_qos_policy_group.call_count) def test_mark_qos_policy_group_for_deletion(self): self.mock_object(self.zapi_client, 'mark_qos_policy_group_for_deletion') self.library._mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_INFO) self.zapi_client.mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) def test_unmanage(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') self.library.unmanage(fake.VOLUME) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( fake.VOLUME) def test_unmanage_w_invalid_qos_policy(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', mock.Mock(side_effect=exception.Invalid)) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') self.library.unmanage(fake.VOLUME) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(None) block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( fake.VOLUME) def test_manage_existing_lun_same_name(self): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._setup_qos_for_volume = mock.Mock() self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME)) self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() mock_set_lun_qos_policy_group = self.mock_object( self.zapi_client, 'set_lun_qos_policy_group') self.library.manage_existing({'name': 'name'}, {'ref': 'ref'}) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.assertEqual(0, self.zapi_client.move_lun.call_count) self.assertEqual(1, mock_set_lun_qos_policy_group.call_count) def test_manage_existing_lun_new_path(self): mock_lun = block_base.NetAppLun( 'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'}) self.assertEqual( 2, self.library._get_existing_vol_with_manage_ref.call_count) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.zapi_client.move_lun.assert_called_once_with( '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') def test_start_periodic_tasks(self): mock_remove_unused_qos_policy_groups = self.mock_object( self.zapi_client, 'remove_unused_qos_policy_groups') harvest_qos_periodic_task = mock.Mock() mock_loopingcall = self.mock_object( loopingcall, 'FixedIntervalLoopingCall', mock.Mock(side_effect=[harvest_qos_periodic_task])) self.library._start_periodic_tasks() mock_loopingcall.assert_has_calls([ mock.call(mock_remove_unused_qos_policy_groups)]) self.assertTrue(harvest_qos_periodic_task.start.called) @ddt.data('open+|demix+', 'open.+', '.+\d', '^((?!mix+).)*$', 'open123, open321') def test_get_filtered_pools_match_selected_pools(self, patterns): self.library.ssc_vols = fake.FAKE_CMODE_VOLUME self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][0].id['name'], filtered_pools[0].id['name']) self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][2].id['name'], filtered_pools[1].id['name']) @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321', '.*?') def test_get_filtered_pools_match_all_pools(self, patterns): self.library.ssc_vols = fake.FAKE_CMODE_VOLUME self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][0].id['name'], filtered_pools[0].id['name']) self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][1].id['name'], filtered_pools[1].id['name']) self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][2].id['name'], filtered_pools[2].id['name']) def test_get_filtered_pools_invalid_conf(self): """Verify an exception is raised if the regex pattern is invalid""" self.library.configuration.netapp_pool_name_search_pattern = '(.+' self.assertRaises(exception.InvalidConfigurationValue, self.library._get_filtered_pools) @ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack', 'abc*', '^$') def test_get_filtered_pools_non_matching_patterns(self, patterns): self.library.ssc_vols = fake.FAKE_CMODE_VOLUME self.library.configuration.netapp_pool_name_search_pattern = patterns filtered_pools = self.library._get_filtered_pools() self.assertListEqual([], filtered_pools) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, vols): self.library.ssc_vols = vols pools = self.library._get_pool_stats() self.assertListEqual([], pools) def test_get_pool_stats_with_filtered_pools(self): self.library.ssc_vols = fake.ssc_map self.mock_object(self.library, '_get_filtered_pools', mock.Mock(return_value=[fake.FAKE_CMODE_VOL1])) self.library.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) pools = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') self.assertListEqual(fake.FAKE_CMODE_POOLS, pools) def test_get_pool_stats_no_filtered_pools(self): self.library.ssc_vols = fake.ssc_map self.mock_object(self.library, '_get_filtered_pools', mock.Mock(return_value=[])) pools = self.library._get_pool_stats() self.assertListEqual([], pools) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py0000664000567000056710000003311112701406250027377 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2014, Clinton Knight. All rights reserved. # Copyright (c) - 2015, Tom Barron. All rights reserved. # Copyright (c) - 2016 Chuck Fouts. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import ssc_cmode VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56' LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86' LUN_HANDLE = 'fake_lun_handle' LUN_NAME = 'lun1' LUN_SIZE = 3 LUN_TABLE = {LUN_NAME: None} SIZE = 1024 HOST_NAME = 'fake.host.name' BACKEND_NAME = 'fake_backend_name' POOL_NAME = 'aggr1' SHARE_IP = '192.168.99.24' EXPORT_PATH = '/fake/export/path' NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH) HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME) NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE) FLEXVOL = 'openstack-flexvol' NFS_FILE_PATH = 'nfsvol' PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME) LUN_METADATA = { 'OsType': None, 'SpaceReserved': 'true', 'Path': PATH, 'Qtree': None, 'Volume': POOL_NAME, } VOLUME = { 'name': LUN_NAME, 'size': SIZE, 'id': VOLUME_ID, 'host': HOST_STRING, } NFS_VOLUME = { 'name': NFS_FILE_PATH, 'size': SIZE, 'id': VOLUME_ID, 'host': NFS_HOST_STRING, } NETAPP_VOLUME = 'fake_netapp_volume' UUID1 = '12345678-1234-5678-1234-567812345678' LUN_PATH = '/vol/vol0/%s' % LUN_NAME VSERVER_NAME = 'openstack-vserver' FC_VOLUME = {'name': 'fake_volume'} FC_INITIATORS = ['21000024ff406cc3', '21000024ff406cc2'] FC_FORMATTED_INITIATORS = ['21:00:00:24:ff:40:6c:c3', '21:00:00:24:ff:40:6c:c2'] FC_TARGET_WWPNS = ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'] FC_FORMATTED_TARGET_WWPNS = ['50:0a:09:82:80:fe:eb:a5', '50:0a:09:82:90:fe:eb:a5', '50:0a:09:81:90:fe:eb:a5', '50:0a:09:81:80:fe:eb:a5'] FC_CONNECTOR = {'ip': '1.1.1.1', 'host': 'fake_host', 'wwnns': ['20000024ff406cc3', '20000024ff406cc2'], 'wwpns': ['21000024ff406cc3', '21000024ff406cc2']} FC_I_T_MAP = {'21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5']} FC_I_T_MAP_COMPLETE = {'21000024ff406cc3': FC_TARGET_WWPNS, '21000024ff406cc2': FC_TARGET_WWPNS} FC_FABRIC_MAP = {'fabricB': {'target_port_wwn_list': ['500a098190feeba5', '500a098180feeba5'], 'initiator_port_wwn_list': ['21000024ff406cc2']}, 'fabricA': {'target_port_wwn_list': ['500a098290feeba5', '500a098280feeba5'], 'initiator_port_wwn_list': ['21000024ff406cc3']}} FC_TARGET_INFO = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': 1, 'initiator_target_map': FC_I_T_MAP, 'target_wwn': FC_TARGET_WWPNS, 'target_discovered': True}} FC_TARGET_INFO_EMPTY = {'driver_volume_type': 'fibre_channel', 'data': {}} FC_TARGET_INFO_UNMAP = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': FC_TARGET_WWPNS, 'initiator_target_map': FC_I_T_MAP}} IGROUP1_NAME = 'openstack-igroup1' IGROUP1 = { 'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': IGROUP1_NAME, } ISCSI_VOLUME = { 'name': 'fake_volume', 'id': 'fake_id', 'provider_auth': 'fake provider auth', } ISCSI_LUN = {'name': ISCSI_VOLUME, 'lun_id': 42} ISCSI_SERVICE_IQN = 'fake_iscsi_service_iqn' ISCSI_CONNECTION_PROPERTIES = { 'data': { 'auth_method': 'fake_method', 'auth_password': 'auth', 'auth_username': 'provider', 'discovery_auth_method': 'fake_method', 'discovery_auth_username': 'provider', 'discovery_auth_password': 'auth', 'target_discovered': False, 'target_iqn': ISCSI_SERVICE_IQN, 'target_lun': 42, 'target_portal': '1.2.3.4:3260', 'volume_id': 'fake_id', }, 'driver_volume_type': 'iscsi', } ISCSI_CONNECTOR = { 'ip': '1.1.1.1', 'host': 'fake_host', 'initiator': 'fake_initiator_iqn', } ISCSI_TARGET_DETAILS_LIST = [ {'address': '5.6.7.8', 'port': '3260'}, {'address': '1.2.3.4', 'port': '3260'}, {'address': '99.98.97.96', 'port': '3260'}, ] IPV4_ADDRESS = '192.168.14.2' IPV6_ADDRESS = 'fe80::6e40:8ff:fe8a:130' NFS_SHARE_IPV4 = IPV4_ADDRESS + ':' + EXPORT_PATH NFS_SHARE_IPV6 = IPV6_ADDRESS + ':' + EXPORT_PATH RESERVED_PERCENTAGE = 7 MAX_OVER_SUBSCRIPTION_RATIO = 19.0 TOTAL_BYTES = 4797892092432 AVAILABLE_BYTES = 13479932478 CAPACITY_VALUES = (TOTAL_BYTES, AVAILABLE_BYTES) IGROUP1 = {'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': IGROUP1_NAME} QOS_SPECS = {} EXTRA_SPECS = {} MAX_THROUGHPUT = '21734278B/s' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' QOS_POLICY_GROUP_INFO_LEGACY = { 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME, 'spec': None, } QOS_POLICY_GROUP_SPEC = { 'max_throughput': MAX_THROUGHPUT, 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} CLONE_SOURCE_NAME = 'fake_clone_source_name' CLONE_SOURCE_ID = 'fake_clone_source_id' CLONE_SOURCE_SIZE = 1024 CLONE_SOURCE = { 'size': CLONE_SOURCE_SIZE, 'name': CLONE_SOURCE_NAME, 'id': CLONE_SOURCE_ID, } CLONE_DESTINATION_NAME = 'fake_clone_destination_name' CLONE_DESTINATION_SIZE = 1041 CLONE_DESTINATION_ID = 'fake_clone_destination_id' CLONE_DESTINATION = { 'size': CLONE_DESTINATION_SIZE, 'name': CLONE_DESTINATION_NAME, 'id': CLONE_DESTINATION_ID, } SNAPSHOT_NAME = 'fake_snapshot_name' SNAPSHOT_LUN_HANDLE = 'fake_snapshot_lun_handle' SNAPSHOT = { 'name': SNAPSHOT_NAME, 'volume_size': SIZE, 'volume_id': 'fake_volume_id', 'busy': False, } VOLUME_REF = {'name': 'fake_vref_name', 'size': 42} FAKE_CMODE_POOLS = [ { 'QoS_support': True, 'consistencygroup_support': True, 'free_capacity_gb': 3.72, 'netapp_compression': u'true', 'netapp_dedup': u'true', 'netapp_disk_type': 'SSD', 'netapp_mirrored': u'true', 'netapp_nocompression': u'false', 'netapp_nodedup': u'false', 'netapp_raid_type': 'raiddp', 'netapp_thick_provisioned': u'false', 'netapp_thin_provisioned': u'true', 'netapp_unmirrored': u'false', 'pool_name': 'open123', 'reserved_percentage': 0, 'total_capacity_gb': 4.65, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'provisioned_capacity_gb': 0.93, 'max_over_subscription_ratio': 20.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', } ] FAKE_CMODE_VOLUME = { 'all': [ssc_cmode.NetAppVolume(name='open123', vserver='vs'), ssc_cmode.NetAppVolume(name='mixed', vserver='vs'), ssc_cmode.NetAppVolume(name='open321', vserver='vs')], } FAKE_7MODE_VOLUME = { 'all': [ netapp_api.NaElement( etree.XML(""" open123 """)), netapp_api.NaElement( etree.XML(""" mixed3 """)), netapp_api.NaElement( etree.XML(""" open1234 """)) ], } FAKE_CMODE_VOL1 = ssc_cmode.NetAppVolume(name='open123', vserver='openstack') FAKE_CMODE_VOL1.state['vserver_root'] = False FAKE_CMODE_VOL1.state['status'] = 'online' FAKE_CMODE_VOL1.state['junction_active'] = True FAKE_CMODE_VOL1.space['size_avl_bytes'] = '4000000000' FAKE_CMODE_VOL1.space['size_total_bytes'] = '5000000000' FAKE_CMODE_VOL1.space['space-guarantee-enabled'] = False FAKE_CMODE_VOL1.space['space-guarantee'] = 'file' FAKE_CMODE_VOL1.space['thin_provisioned'] = True FAKE_CMODE_VOL1.mirror['mirrored'] = True FAKE_CMODE_VOL1.qos['qos_policy_group'] = None FAKE_CMODE_VOL1.aggr['name'] = 'aggr1' FAKE_CMODE_VOL1.aggr['junction'] = '/vola' FAKE_CMODE_VOL1.sis['dedup'] = True FAKE_CMODE_VOL1.sis['compression'] = True FAKE_CMODE_VOL1.aggr['raid_type'] = 'raiddp' FAKE_CMODE_VOL1.aggr['ha_policy'] = 'cfo' FAKE_CMODE_VOL1.aggr['disk_type'] = 'SSD' ssc_map = { 'mirrored': [FAKE_CMODE_VOL1], 'dedup': [FAKE_CMODE_VOL1], 'compression': [FAKE_CMODE_VOL1], 'thin': [FAKE_CMODE_VOL1], 'all': [FAKE_CMODE_VOL1], } FILE_LIST = ['file1', 'file2', 'file3'] FAKE_LUN = netapp_api.NaElement.create_node_with_children( 'lun-info', **{'alignment': 'indeterminate', 'block-size': '512', 'comment': '', 'creation-timestamp': '1354536362', 'is-space-alloc-enabled': 'false', 'is-space-reservation-enabled': 'true', 'mapped': 'false', 'multiprotocol-type': 'linux', 'online': 'true', 'path': '/vol/fakeLUN/fakeLUN', 'prefix-size': '0', 'qtree': '', 'read-only': 'false', 'serial-number': '2FfGI$APyN68', 'share-state': 'none', 'size': '20971520', 'size-used': '0', 'staging': 'false', 'suffix-size': '0', 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412', 'volume': 'fakeLUN', 'vserver': 'fake_vserver'}) FAKE_7MODE_VOL1 = [netapp_api.NaElement( etree.XML(""" open123 online 0 0 0 false false """))] FAKE_7MODE_POOLS = [ { 'pool_name': 'open123', 'consistencygroup_support': True, 'QoS_support': False, 'reserved_percentage': 0, 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'max_over_subscription_ratio': 20.0, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'provisioned_capacity_gb': 0.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', } ] CG_VOLUME_NAME = 'fake_cg_volume' CG_GROUP_NAME = 'fake_consistency_group' SOURCE_CG_VOLUME_NAME = 'fake_source_cg_volume' CG_VOLUME_ID = 'fake_cg_volume_id' CG_VOLUME_SIZE = 100 SOURCE_CG_VOLUME_ID = 'fake_source_cg_volume_id' CONSISTENCY_GROUP_NAME = 'fake_cg' SOURCE_CONSISTENCY_GROUP_ID = 'fake_source_cg_id' CONSISTENCY_GROUP_ID = 'fake_cg_id' CG_SNAPSHOT_ID = 'fake_cg_snapshot_id' CG_SNAPSHOT_NAME = 'snapshot-' + CG_SNAPSHOT_ID CG_VOLUME_SNAPSHOT_ID = 'fake_cg_volume_snapshot_id' CG_LUN_METADATA = { 'OsType': None, 'Path': '/vol/aggr1/fake_cg_volume', 'SpaceReserved': 'true', 'Qtree': None, 'Volume': POOL_NAME, } SOURCE_CG_VOLUME = { 'name': SOURCE_CG_VOLUME_NAME, 'size': CG_VOLUME_SIZE, 'id': SOURCE_CG_VOLUME_ID, 'host': 'hostname@backend#cdot', 'consistencygroup_id': None, 'status': 'fake_status', } CG_VOLUME = { 'name': CG_VOLUME_NAME, 'size': 100, 'id': CG_VOLUME_ID, 'host': 'hostname@backend#cdot', 'consistencygroup_id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', } SOURCE_CONSISTENCY_GROUP = { 'id': SOURCE_CONSISTENCY_GROUP_ID, 'status': 'fake_status', } CONSISTENCY_GROUP = { 'id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', 'name': CG_GROUP_NAME, } CG_SNAPSHOT = { 'id': CG_SNAPSHOT_ID, 'name': CG_SNAPSHOT_NAME, 'volume_size': CG_VOLUME_SIZE, 'consistencygroup_id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', 'volume_id': 'fake_source_volume_id', } CG_VOLUME_SNAPSHOT = { 'name': CG_SNAPSHOT_NAME, 'volume_size': CG_VOLUME_SIZE, 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_VOLUME_SNAPSHOT_ID, 'status': 'fake_status', 'volume_id': CG_VOLUME_ID, } class test_volume(object): pass test_volume = test_volume() test_volume.id = {'vserver': 'openstack', 'name': 'vola'} test_volume.aggr = { 'disk_type': 'SSD', 'ha_policy': 'cfo', 'junction': '/vola', 'name': 'aggr1', 'raid_type': 'raiddp', } test_volume.export = {'path': NFS_SHARE} test_volume.sis = {'dedup': False, 'compression': False} test_volume.state = { 'status': 'online', 'vserver_root': False, 'junction_active': True, } test_volume.qos = {'qos_policy_group': None} class test_snapshot(object): pass def __getitem__(self, key): return getattr(self, key) PROVIDER_LOCATION = 'fake_provider_location' test_snapshot = test_snapshot() test_snapshot.id = 'fake_snap_id' test_snapshot.name = 'snapshot-%s' % test_snapshot.id test_snapshot.volume_id = 'fake_volume_id' test_snapshot.provider_location = PROVIDER_LOCATION cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py0000664000567000056710000000617412701406250034226 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp block storage driver interfaces """ import mock from cinder import test from cinder.volume.drivers.netapp.dataontap import block_7mode from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap import fc_7mode from cinder.volume.drivers.netapp.dataontap import fc_cmode from cinder.volume.drivers.netapp.dataontap import iscsi_7mode from cinder.volume.drivers.netapp.dataontap import iscsi_cmode class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageDriverInterfaceTestCase, self).setUp() self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary, '__init__', mock.Mock(return_value=None)) self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary, '__init__', mock.Mock(return_value=None)) self.iscsi_7mode_driver = iscsi_7mode.NetApp7modeISCSIDriver() self.iscsi_cmode_driver = iscsi_cmode.NetAppCmodeISCSIDriver() self.fc_7mode_driver = fc_7mode.NetApp7modeFibreChannelDriver() self.fc_cmode_driver = fc_cmode.NetAppCmodeFibreChannelDriver() def test_driver_interfaces_match(self): """Ensure the NetApp block storage driver interfaces match. The four block storage Cinder drivers from NetApp (iSCSI/FC, 7-mode/C-mode) are merely passthrough shim layers atop a common block storage library. Bugs have been introduced when a Cinder method was exposed via a subset of those driver shims. This test ensures they remain in sync and the library features are uniformly available in the four drivers. """ # Get local functions of each driver interface iscsi_7mode = self._get_local_functions(self.iscsi_7mode_driver) iscsi_cmode = self._get_local_functions(self.iscsi_cmode_driver) fc_7mode = self._get_local_functions(self.fc_7mode_driver) fc_cmode = self._get_local_functions(self.fc_cmode_driver) # Ensure NetApp block storage driver shims are identical self.assertSetEqual(iscsi_7mode, iscsi_cmode) self.assertSetEqual(iscsi_7mode, fc_7mode) self.assertSetEqual(iscsi_7mode, fc_cmode) def _get_local_functions(self, obj): """Get function names of an object without superclass functions.""" return set([key for key, value in type(obj).__dict__.items() if callable(value)]) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/0000775000567000056710000000000012701406543027220 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py0000664000567000056710000010333212701406250033177 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree import mock import paramiko import six from cinder import exception from cinder import ssh_utils from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_7mode from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd'} class NetApp7modeClientTestCase(test.TestCase): def setUp(self): super(NetApp7modeClientTestCase, self).setUp() self.fake_volume = six.text_type(uuid.uuid4()) self.mock_object(client_7mode.Client, '_init_ssh_client') with mock.patch.object(client_7mode.Client, 'get_ontapi_version', return_value=(1, 20)): self.client = client_7mode.Client([self.fake_volume], **CONNECTION_INFO) self.client.ssh_client = mock.MagicMock() self.client.connection = mock.MagicMock() self.connection = self.client.connection self.fake_lun = six.text_type(uuid.uuid4()) def tearDown(self): super(NetApp7modeClientTestCase, self).tearDown() def test_get_iscsi_target_details_no_targets(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([], target_list) def test_get_iscsi_target_details(self): expected_target = { "address": "127.0.0.1", "port": "1337", "tpgroup-tag": "7777", } response = netapp_api.NaElement( etree.XML(""" %(address)s %(port)s %(tpgroup-tag)s """ % expected_target)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([expected_target], target_list) def test_get_iscsi_service_details_with_no_iscsi_service(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertIsNone(iqn) def test_get_iscsi_service_details(self): expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1' response = netapp_api.NaElement( etree.XML(""" %s """ % expected_iqn)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertEqual(expected_iqn, iqn) def test_get_lun_list(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response luns = self.client.get_lun_list() self.assertEqual(2, len(luns)) def test_get_igroup_by_initiators_none_found(self): initiators = fake.FC_FORMATTED_INITIATORS[0] response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response igroup = self.client.get_igroup_by_initiators(initiators) self.assertEqual([], igroup) def test_get_igroup_by_initiators(self): initiators = [fake.FC_FORMATTED_INITIATORS[0]] response = netapp_api.NaElement( etree.XML(""" %(initiator-group-name)s %(initiator-group-type)s 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 linux 0 false false true true true 21:00:00:24:ff:40:6c:c3 """ % fake.IGROUP1)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(fake.IGROUP1)]) self.assertSetEqual(igroups, expected) def test_get_igroup_by_initiators_multiple(self): initiators = fake.FC_FORMATTED_INITIATORS response = netapp_api.NaElement( etree.XML(""" %(initiator-group-name)s %(initiator-group-type)s 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 linux 21:00:00:24:ff:40:6c:c3 21:00:00:24:ff:40:6c:c2 openstack-igroup2 fcp 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 linux 21:00:00:24:ff:40:6c:c2 """ % fake.IGROUP1)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(fake.IGROUP1)]) self.assertSetEqual(igroups, expected) def test_clone_lun(self): fake_clone_start = netapp_api.NaElement( etree.XML(""" 1337 volume-uuid """)) fake_clone_status = netapp_api.NaElement( etree.XML(""" completed """)) self.connection.invoke_successfully.side_effect = [fake_clone_start, fake_clone_status] self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN') self.assertEqual(2, self.connection.invoke_successfully.call_count) def test_clone_lun_api_error(self): fake_clone_start = netapp_api.NaElement( etree.XML(""" 1337 volume-uuid """)) fake_clone_status = netapp_api.NaElement( etree.XML(""" error """)) self.connection.invoke_successfully.side_effect = [fake_clone_start, fake_clone_status] self.assertRaises(netapp_api.NaApiError, self.client.clone_lun, 'path', 'new_path', 'fakeLUN', 'newFakeLUN') def test_clone_lun_multiple_zapi_calls(self): # Max block-ranges per call = 32, max blocks per range = 2^24 # Force 2 calls bc = 2 ** 24 * 32 * 2 fake_clone_start = netapp_api.NaElement( etree.XML(""" 1337 volume-uuid """)) fake_clone_status = netapp_api.NaElement( etree.XML(""" completed """)) self.connection.invoke_successfully.side_effect = [fake_clone_start, fake_clone_status, fake_clone_start, fake_clone_status] self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN', block_count=bc) self.assertEqual(4, self.connection.invoke_successfully.call_count) def test_clone_lun_wait_for_clone_to_finish(self): # Max block-ranges per call = 32, max blocks per range = 2^24 # Force 2 calls bc = 2 ** 24 * 32 * 2 fake_clone_start = netapp_api.NaElement( etree.XML(""" 1337 volume-uuid """)) fake_clone_status = netapp_api.NaElement( etree.XML(""" running """)) fake_clone_status_completed = netapp_api.NaElement( etree.XML(""" completed """)) fake_responses = [fake_clone_start, fake_clone_status, fake_clone_status_completed, fake_clone_start, fake_clone_status_completed] self.connection.invoke_successfully.side_effect = fake_responses with mock.patch('time.sleep') as mock_sleep: self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN', block_count=bc) mock_sleep.assert_called_once_with(1) self.assertEqual(5, self.connection.invoke_successfully.call_count) def test_get_lun_by_args(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response luns = self.client.get_lun_by_args() self.assertEqual(1, len(luns)) def test_get_lun_by_args_no_lun_found(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response luns = self.client.get_lun_by_args() self.assertEqual(0, len(luns)) def test_get_lun_by_args_with_args_specified(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args(path=path) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] lun_info_args = actual_request.get_children() # Assert request is made with correct arguments self.assertEqual('path', lun_info_args[0].get_name()) self.assertEqual(path, lun_info_args[0].get_content()) self.assertEqual(1, len(lun)) def test_get_filer_volumes(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response volumes = self.client.get_filer_volumes() self.assertEqual(1, len(volumes)) def test_get_filer_volumes_no_volumes(self): response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = response volumes = self.client.get_filer_volumes() self.assertEqual([], volumes) def test_get_lun_map(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.connection.invoke_successfully.return_value = mock.Mock() self.client.get_lun_map(path=path) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] lun_info_args = actual_request.get_children() # Assert request is made with correct arguments self.assertEqual('path', lun_info_args[0].get_name()) self.assertEqual(path, lun_info_args[0].get_content()) def test_set_space_reserve(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.connection.invoke_successfully.return_value = mock.Mock() self.client.set_space_reserve(path, 'true') __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] lun_info_args = actual_request.get_children() # Assert request is made with correct arguments self.assertEqual('path', lun_info_args[0].get_name()) self.assertEqual(path, lun_info_args[0].get_content()) self.assertEqual('enable', lun_info_args[1].get_name()) self.assertEqual('true', lun_info_args[1].get_content()) def test_get_actual_path_for_export(self): fake_export_path = 'fake_export_path' expected_actual_pathname = 'fake_actual_pathname' response = netapp_api.NaElement( etree.XML(""" %(path)s """ % {'path': expected_actual_pathname})) self.connection.invoke_successfully.return_value = response actual_pathname = self.client.get_actual_path_for_export( fake_export_path) __, __, _kwargs = self.connection.invoke_successfully.mock_calls[0] enable_tunneling = _kwargs['enable_tunneling'] self.assertEqual(expected_actual_pathname, actual_pathname) self.assertTrue(enable_tunneling) def test_clone_file(self): expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf' fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2' fake_clone_id_response = netapp_api.NaElement( etree.XML(""" %(volume)s %(clone_id)s """ % {'volume': fake_volume_id, 'clone_id': fake_clone_op_id})) fake_clone_list_response = netapp_api.NaElement( etree.XML(""" %(volume)s %(clone_id)s %(clone_id)s completed """ % {'volume': fake_volume_id, 'clone_id': fake_clone_op_id})) self.connection.invoke_successfully.side_effect = [ fake_clone_id_response, fake_clone_list_response] self.client.clone_file(expected_src_path, expected_dest_path) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual(actual_request.get_child_by_name( 'destination-exists'), None) self.assertTrue(enable_tunneling) def test_clone_file_when_clone_fails(self): """Ensure clone is cleaned up on failure.""" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf' fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2' fake_clone_id_response = netapp_api.NaElement( etree.XML(""" %(volume)s %(clone_id)s """ % {'volume': fake_volume_id, 'clone_id': fake_clone_op_id})) fake_clone_list_response = netapp_api.NaElement( etree.XML(""" %(volume)s %(clone_id)s %(clone_id)s failed """ % {'volume': fake_volume_id, 'clone_id': fake_clone_op_id})) fake_clone_clear_response = mock.Mock() self.connection.invoke_successfully.side_effect = [ fake_clone_id_response, fake_clone_list_response, fake_clone_clear_response] self.assertRaises(netapp_api.NaApiError, self.client.clone_file, expected_src_path, expected_dest_path) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual(actual_request.get_child_by_name( 'destination-exists'), None) self.assertTrue(enable_tunneling) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[1] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_clone_id = actual_request.get_child_by_name('clone-id') actual_clone_id_info = actual_clone_id.get_child_by_name( 'clone-id-info') actual_clone_op_id = actual_clone_id_info.get_child_by_name( 'clone-op-id').get_content() actual_volume_uuid = actual_clone_id_info.get_child_by_name( 'volume-uuid').get_content() self.assertEqual(fake_clone_op_id, actual_clone_op_id) self.assertEqual(fake_volume_id, actual_volume_uuid) self.assertTrue(enable_tunneling) # Ensure that the clone-clear call is made upon error __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[2] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_clone_id = actual_request \ .get_child_by_name('clone-id').get_content() self.assertEqual(fake_clone_op_id, actual_clone_id) self.assertTrue(enable_tunneling) def test_get_file_usage(self): expected_bytes = "2048" fake_path = 'fake_path' response = netapp_api.NaElement( etree.XML(""" %(unique-bytes)s """ % {'unique-bytes': expected_bytes})) self.connection.invoke_successfully.return_value = response actual_bytes = self.client.get_file_usage(fake_path) self.assertEqual(expected_bytes, actual_bytes) def test_get_ifconfig(self): expected_response = mock.Mock() self.connection.invoke_successfully.return_value = expected_response actual_response = self.client.get_ifconfig() __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] self.assertEqual('net-ifconfig-get', actual_request.get_name()) self.assertEqual(expected_response, actual_response) def test_get_fc_target_wwpns(self): wwpn1 = '50:0a:09:81:90:fe:eb:a5' wwpn2 = '50:0a:09:82:90:fe:eb:a5' response = netapp_api.NaElement( etree.XML(""" %(wwpn1)s true 1a %(wwpn2)s true 1b """ % {'wwpn1': wwpn1, 'wwpn2': wwpn2})) self.connection.invoke_successfully.return_value = response wwpns = self.client.get_fc_target_wwpns() self.assertSetEqual(set(wwpns), set([wwpn1, wwpn2])) def test_get_flexvol_capacity(self): expected_total_bytes = 1000 expected_available_bytes = 750 fake_flexvol_path = '/fake/vol' response = netapp_api.NaElement( etree.XML(""" %(total_bytes)s %(available_bytes)s """ % {'total_bytes': expected_total_bytes, 'available_bytes': expected_available_bytes})) self.connection.invoke_successfully.return_value = response total_bytes, available_bytes = ( self.client.get_flexvol_capacity(fake_flexvol_path)) self.assertEqual(expected_total_bytes, total_bytes) self.assertEqual(expected_available_bytes, available_bytes) def test_get_performance_instance_names(self): mock_send_request = self.mock_object(self.client, 'send_request') mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE) result = self.client.get_performance_instance_names('processor') expected = ['processor0', 'processor1'] self.assertEqual(expected, result) perf_object_instance_list_info_args = {'objectname': 'processor'} mock_send_request.assert_called_once_with( 'perf-object-instance-list-info', perf_object_instance_list_info_args, enable_tunneling=False) def test_get_performance_counters(self): mock_send_request = self.mock_object(self.client, 'send_request') mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE) instance_names = ['system'] counter_names = ['avg_processor_busy'] result = self.client.get_performance_counters('system', instance_names, counter_names) expected = [ { 'avg_processor_busy': '13215732322', 'instance-name': 'system', 'timestamp': '1454146292', } ] self.assertEqual(expected, result) perf_object_get_instances_args = { 'objectname': 'system', 'instances': [ {'instance': instance} for instance in instance_names ], 'counters': [ {'counter': counter} for counter in counter_names ], } mock_send_request.assert_called_once_with( 'perf-object-get-instances', perf_object_get_instances_args, enable_tunneling=False) def test_get_system_name(self): mock_send_request = self.mock_object(self.client, 'send_request') mock_send_request.return_value = netapp_api.NaElement( fake_client.SYSTEM_GET_INFO_RESPONSE) result = self.client.get_system_name() self.assertEqual(fake_client.NODE_NAME, result) def test_check_iscsi_initiator_exists_when_no_initiator_exists(self): self.connection.invoke_successfully = mock.Mock( side_effect=netapp_api.NaApiError) initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertFalse(initiator_exists) def test_check_iscsi_initiator_exists_when_initiator_exists(self): self.connection.invoke_successfully = mock.Mock() initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertTrue(initiator_exists) def test_set_iscsi_chap_authentication(self): ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool self.mock_object(self.client.ssh_client, 'execute_command') sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__exit__ = mock.Mock(return_value=False) self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) command = ('iscsi security add -i iqn.2015-06.com.netapp:fake_iqn ' '-s CHAP -p passw0rd -n fake_user') self.client.ssh_client.execute_command.assert_has_calls( [mock.call(ssh, command)] ) def test_get_snapshot_if_snapshot_present_not_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE) self.connection.invoke_successfully.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertFalse(snapshot['busy']) def test_get_snapshot_if_snapshot_present_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE) self.connection.invoke_successfully.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertTrue(snapshot['busy']) def test_get_snapshot_if_snapshot_not_present(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement(fake_client.SNAPSHOT_NOT_PRESENT_7MODE) self.connection.invoke_successfully.return_value = response self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, expected_vol_name, expected_snapshot_name) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py0000664000567000056710000000000012701406250031312 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py0000664000567000056710000006227012701406250031404 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for NetApp API layer """ import ddt from lxml import etree import mock import paramiko import six from six.moves import urllib from cinder import exception from cinder.i18n import _ from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as zapi_fakes) from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api @ddt.ddt class NetAppApiServerTests(test.TestCase): """Test case for NetApp API server methods""" def setUp(self): self.root = netapp_api.NaServer('127.0.0.1') super(NetAppApiServerTests, self).setUp() @ddt.data(None, 'ftp') def test_set_transport_type_value_error(self, transport_type): """Tests setting an invalid transport type""" self.assertRaises(ValueError, self.root.set_transport_type, transport_type) @ddt.data({'params': {'transport_type': 'http', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'http', 'server_type_filer': 'xyz'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'xyz'}}) @ddt.unpack def test_set_transport_type_valid(self, params): """Tests setting a valid transport type""" self.root._server_type = params['server_type_filer'] mock_invoke = self.mock_object(self.root, 'set_port') self.root.set_transport_type(params['transport_type']) expected_call_args = zapi_fakes.FAKE_CALL_ARGS_LIST self.assertTrue(mock_invoke.call_args in expected_call_args) @ddt.data('stor', 'STORE', '') def test_set_server_type_value_error(self, server_type): """Tests Value Error on setting the wrong server type""" self.assertRaises(ValueError, self.root.set_server_type, server_type) @ddt.data('!&', '80na', '') def test_set_port__value_error(self, port): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_port, port) @ddt.data('!&', '80na', '') def test_set_timeout_value_error(self, timeout): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_timeout, timeout) @ddt.data({'params': {'major': 1, 'minor': '20a'}}, {'params': {'major': '20a', 'minor': 1}}, {'params': {'major': '!*', 'minor': '20a'}}) @ddt.unpack def test_set_api_version_value_error(self, params): """Tests Value Error on setting non-integer version""" self.assertRaises(ValueError, self.root.set_api_version, **params) def test_set_api_version_valid(self): """Tests Value Error on setting non-integer version""" args = {'major': '20', 'minor': 1} expected_call_args_list = [mock.call('20'), mock.call(1)] mock_invoke = self.mock_object(six, 'text_type', mock.Mock(return_value='str')) self.root.set_api_version(**args) self.assertEqual(expected_call_args_list, mock_invoke.call_args_list) @ddt.data({'params': {'result': zapi_fakes.FAKE_RESULT_API_ERR_REASON}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_INVALID}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_VALID}}) @ddt.unpack def test_invoke_successfully_naapi_error(self, params): """Tests invoke successfully raising NaApiError""" self.mock_object(self.root, 'send_http_request', mock.Mock(return_value=params['result'])) self.assertRaises(netapp_api.NaApiError, self.root.invoke_successfully, zapi_fakes.FAKE_NA_ELEMENT) def test_invoke_successfully_no_error(self): """Tests invoke successfully with no errors""" self.mock_object(self.root, 'send_http_request', mock.Mock( return_value=zapi_fakes.FAKE_RESULT_SUCCESS)) self.assertEqual(zapi_fakes.FAKE_RESULT_SUCCESS.to_string(), self.root.invoke_successfully( zapi_fakes.FAKE_NA_ELEMENT).to_string()) def test__create_request(self): """Tests method _create_request""" self.root._ns = zapi_fakes.FAKE_XML_STR self.root._api_version = '1.20' self.mock_object(self.root, '_enable_tunnel_request') self.mock_object(netapp_api.NaElement, 'add_child_elem') self.mock_object(netapp_api.NaElement, 'to_string', mock.Mock(return_value=zapi_fakes.FAKE_XML_STR)) mock_invoke = self.mock_object(urllib.request, 'Request') self.root._create_request(zapi_fakes.FAKE_NA_ELEMENT, True) self.assertTrue(mock_invoke.called) @ddt.data({'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_5}}, {'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_14}}) @ddt.unpack def test__enable_tunnel_request__value_error(self, params): """Tests value errors with creating tunnel request""" self.assertRaises(ValueError, params['server']._enable_tunnel_request, 'test') def test__enable_tunnel_request_valid(self): """Tests creating tunnel request with correct values""" netapp_elem = zapi_fakes.FAKE_NA_ELEMENT server = zapi_fakes.FAKE_NA_SERVER_API_1_20 mock_invoke = self.mock_object(netapp_elem, 'add_attr') expected_call_args = [mock.call('vfiler', 'filer'), mock.call('vfiler', 'server')] server._enable_tunnel_request(netapp_elem) self.assertEqual(expected_call_args, mock_invoke.call_args_list) def test__parse_response__naapi_error(self): """Tests NaApiError on no response""" self.assertRaises(netapp_api.NaApiError, self.root._parse_response, None) def test__parse_response_no_error(self): """Tests parse function with appropriate response""" mock_invoke = self.mock_object(etree, 'XML', mock.Mock( return_value='xml')) self.root._parse_response(zapi_fakes.FAKE_XML_STR) mock_invoke.assert_called_with(zapi_fakes.FAKE_XML_STR) def test__build_opener_not_implemented_error(self): """Tests whether certificate style authorization raises Exception""" self.root._auth_style = 'not_basic_auth' self.assertRaises(NotImplementedError, self.root._build_opener) def test__build_opener_valid(self): """Tests whether build opener works with valid parameters""" self.root._auth_style = 'basic_auth' mock_invoke = self.mock_object(urllib.request, 'build_opener') self.root._build_opener() self.assertTrue(mock_invoke.called) @ddt.data(None, zapi_fakes.FAKE_XML_STR) def test_send_http_request_value_error(self, na_element): """Tests whether invalid NaElement parameter causes error""" self.assertRaises(ValueError, self.root.send_http_request, na_element) def test_send_http_request_http_error(self): """Tests handling of HTTPError""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', mock.Mock( side_effect=urllib.error.HTTPError(url='', hdrs='', fp=None, code='401', msg='httperror'))) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) def test_send_http_request_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', mock.Mock( side_effect=Exception)) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) def test_send_http_request_valid(self): """Tests the method send_http_request with valid parameters""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.root._trace = True self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root, '_get_result', mock.Mock( return_value=zapi_fakes.FAKE_NA_ELEMENT)) opener_mock = self.mock_object( self.root._opener, 'open', mock.Mock()) opener_mock.read.side_effect = ['resp1', 'resp2'] self.root.send_http_request(na_element) class NetAppApiElementTransTests(test.TestCase): """Test case for NetApp API element translations.""" def setUp(self): super(NetAppApiElementTransTests, self).setUp() def test_translate_struct_dict_unique_key(self): """Tests if dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} root.translate_struct(child) self.assertEqual(3, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('v2', root.get_child_content('e2')) self.assertEqual('v3', root.get_child_content('e3')) def test_translate_struct_dict_nonunique_key(self): """Tests if list/dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] root.translate_struct(child) self.assertEqual(3, len(root.get_children())) children = root.get_children() for c in children: if c.get_name() == 'e1': self.assertIn(c.get_content(), ['v1', 'v3']) else: self.assertEqual('v2', c.get_content()) def test_translate_struct_list(self): """Tests if list gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ['e1', 'e2'] root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_struct_tuple(self): """Tests if tuple gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ('e1', 'e2') root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_invalid_struct(self): """Tests if invalid data structure raises exception.""" root = netapp_api.NaElement('root') child = 'random child element' self.assertRaises(ValueError, root.translate_struct, child) def test_setter_builtin_types(self): """Tests str, int, float get converted to NaElement.""" root = netapp_api.NaElement('root') root['e1'] = 'v1' root['e2'] = 1 root['e3'] = 2.0 root['e4'] = 8 self.assertEqual(4, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('1', root.get_child_content('e2')) self.assertEqual('2.0', root.get_child_content('e3')) self.assertEqual('8', root.get_child_content('e4')) def test_setter_na_element(self): """Tests na_element gets appended as child.""" root = netapp_api.NaElement('root') root['e1'] = netapp_api.NaElement('nested') self.assertEqual(1, len(root.get_children())) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, netapp_api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), netapp_api.NaElement) def test_setter_child_dict(self): """Tests dict is appended as child to root.""" root = netapp_api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, netapp_api.NaElement) sub_ch = e1.get_children() self.assertEqual(2, len(sub_ch)) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual('v1', c.get_content()) else: self.assertEqual('v2', c.get_content()) def test_setter_child_list_tuple(self): """Tests list/tuple are appended as child to root.""" root = netapp_api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') l = root.get_child_by_name('l') self.assertIsInstance(l, netapp_api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, netapp_api.NaElement) for le in l.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2']) def test_setter_no_value(self): """Tests key with None value.""" root = netapp_api.NaElement('root') root['k'] = None self.assertIsNone(root.get_child_content('k')) def test_setter_invalid_value(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root['k'] = netapp_api.NaServer('localhost') except Exception as e: if not isinstance(e, TypeError): self.fail(_('Error not a TypeError.')) def test_setter_invalid_key(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root[None] = 'value' except Exception as e: if not isinstance(e, KeyError): self.fail(_('Error not a KeyError.')) def test_getter_key_error(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') self.mock_object(root, 'get_child_by_name', mock.Mock(return_value=None)) self.mock_object(root, 'has_attr', mock.Mock(return_value=None)) self.assertRaises(KeyError, netapp_api.NaElement.__getitem__, root, '123') def test_getter_na_element_list(self): """Tests returning NaElement list""" root = netapp_api.NaElement('root') root['key'] = ['val1', 'val2'] self.assertEqual(root.get_child_by_name('key').get_name(), root.__getitem__('key').get_name()) def test_getter_child_text(self): """Tests NaElement having no children""" root = netapp_api.NaElement('root') root.set_content('FAKE_CONTENT') self.mock_object(root, 'get_child_by_name', mock.Mock(return_value=root)) self.assertEqual('FAKE_CONTENT', root.__getitem__('root')) def test_getter_child_attr(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') root.add_attr('val', 'FAKE_VALUE') self.assertEqual('FAKE_VALUE', root.__getitem__('val')) def test_add_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, 'create_node_with_children', mock.Mock(return_value=zapi_fakes.FAKE_INVOKE_DATA)) mock_invoke = self.mock_object(root, 'add_child_elem') root.add_node_with_children('options') mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA) def test_create_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(root, 'add_new_child', mock.Mock(return_value='abc')) self.assertEqual(zapi_fakes.FAKE_XML1, root.create_node_with_children( 'options', test1=zapi_fakes.FAKE_XML_STR, test2=zapi_fakes.FAKE_XML_STR).to_string()) def test_add_new_child(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, '_convert_entity_refs', mock.Mock(return_value=zapi_fakes.FAKE_INVOKE_DATA)) root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA) self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string()) def test_get_attr_names_empty_attr(self): """Tests _elements.attrib being empty""" root = netapp_api.NaElement('root') self.assertEqual([], root.get_attr_names()) def test_get_attr_names(self): """Tests _elements.attrib being non-empty""" root = netapp_api.NaElement('root') root.add_attr('attr1', 'a1') root.add_attr('attr2', 'a2') self.assertEqual(['attr1', 'attr2'], root.get_attr_names()) @ddt.ddt class NetAppApiInvokeTests(test.TestCase): """Test Cases for api request creation and invocation""" def setUp(self): super(NetAppApiInvokeTests, self).setUp() @ddt.data(None, zapi_fakes.FAKE_XML_STR) def test_invoke_api_invalid_input(self, na_server): """Tests Zapi Invocation Type Error""" na_server = None api_name = zapi_fakes.FAKE_API_NAME invoke_generator = netapp_api.invoke_api(na_server, api_name) self.assertRaises(exception.InvalidInput, next, invoke_generator) @ddt.data({'params': {'na_server': zapi_fakes.FAKE_NA_SERVER, 'api_name': zapi_fakes.FAKE_API_NAME}}, {'params': {'na_server': zapi_fakes.FAKE_NA_SERVER, 'api_name': zapi_fakes.FAKE_API_NAME, 'api_family': 'cm', 'query': zapi_fakes.FAKE_QUERY, 'des_result': zapi_fakes.FAKE_DES_ATTR, 'additional_elems': None, 'is_iter': True}}) @ddt.unpack def test_invoke_api_valid(self, params): """Test invoke_api with valid naserver""" self.mock_object(netapp_api, 'create_api_request', mock.Mock( return_value='success')) self.mock_object(netapp_api.NaServer, 'invoke_successfully', mock.Mock( return_value=netapp_api.NaElement('success'))) invoke_generator = netapp_api.invoke_api(**params) self.assertEqual(netapp_api.NaElement('success').to_string(), next(invoke_generator).to_string()) def test_create_api_request(self): """"Tests creating api request""" self.mock_object(netapp_api.NaElement, 'translate_struct') self.mock_object(netapp_api.NaElement, 'add_child_elem') params = {'api_name': zapi_fakes.FAKE_API_NAME, 'query': zapi_fakes.FAKE_QUERY, 'des_result': zapi_fakes.FAKE_DES_ATTR, 'additional_elems': zapi_fakes.FAKE_XML_STR, 'is_iter': True, 'tag': 'tag'} self.assertEqual(zapi_fakes.FAKE_API_NAME_ELEMENT.to_string(), netapp_api.create_api_request(**params).to_string()) @ddt.ddt class SSHUtilTests(test.TestCase): """Test Cases for SSH API invocation.""" def setUp(self): super(SSHUtilTests, self).setUp() self.mock_object(netapp_api.SSHUtil, '_init_ssh_pool') self.sshutil = netapp_api.SSHUtil('127.0.0.1', 'fake_user', 'fake_password') def test_execute_command(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files( paramiko.ChannelFile) self.mock_object(ssh, 'exec_command', mock.Mock(return_value=(stdin, stdout, stderr))) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') stdout_read = self.mock_object(stdout, 'read', mock.Mock(return_value='')) self.sshutil.execute_command(ssh, 'ls') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with() def test_execute_read_exception(self): ssh = mock.Mock(paramiko.SSHClient) exec_command = self.mock_object(ssh, 'exec_command') exec_command.side_effect = paramiko.SSHException('Failure') wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(paramiko.SSHException, self.sshutil.execute_command, ssh, 'ls') wait_on_stdout.assert_not_called() @ddt.data('Password:', 'Password: ', 'Password: \n\n') def test_execute_command_with_prompt(self, response): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', mock.Mock(return_value=response)) stdin_write = self.mock_object(stdin, 'write') self.mock_object(ssh, 'exec_command', mock.Mock(return_value=(stdin, stdout, stderr))) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.sshutil.execute_command_with_prompt(ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) stdin_write.assert_called_once_with('easypass' + '\n') def test_execute_command_unexpected_response(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', mock.Mock(return_value='bad response')) self.mock_object(ssh, 'exec_command', mock.Mock(return_value=(stdin, stdout, stderr))) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(exception.VolumeBackendAPIException, self.sshutil.execute_command_with_prompt, ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) def test_wait_on_stdout(self): stdout = mock.Mock() stdout.channel = mock.Mock(paramiko.Channel) exit_status = self.mock_object(stdout.channel, 'exit_status_ready', mock.Mock(return_value=False)) self.sshutil._wait_on_stdout(stdout, 1) exit_status.assert_any_call() self.assertTrue(exit_status.call_count > 2) def _mock_ssh_channel_files(self, channel): stdin = mock.Mock() stdin.channel = mock.Mock(channel) stdout = mock.Mock() stdout.channel = mock.Mock(channel) stderr = mock.Mock() stderr.channel = mock.Mock(channel) return stdin, stdout, stderr cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py0000664000567000056710000014662712701406250033271 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree import mock import paramiko import six from cinder import exception from cinder import ssh_utils from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'vserver': 'fake_vserver'} class NetAppCmodeClientTestCase(test.TestCase): def setUp(self): super(NetAppCmodeClientTestCase, self).setUp() self.mock_object(client_cmode.Client, '_init_ssh_client') with mock.patch.object(client_cmode.Client, 'get_ontapi_version', return_value=(1, 20)): self.client = client_cmode.Client(**CONNECTION_INFO) self.client.ssh_client = mock.MagicMock() self.client.connection = mock.MagicMock() self.connection = self.client.connection self.vserver = CONNECTION_INFO['vserver'] self.fake_volume = six.text_type(uuid.uuid4()) self.fake_lun = six.text_type(uuid.uuid4()) self.mock_send_request = self.mock_object(self.client, 'send_request') def tearDown(self): super(NetAppCmodeClientTestCase, self).tearDown() def _mock_api_error(self, code='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) def test_has_records(self): result = self.client._has_records(netapp_api.NaElement( fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE)) self.assertTrue(result) def test_has_records_not_found(self): result = self.client._has_records( netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)) self.assertFalse(result) def test_get_iscsi_target_details_no_targets(self): response = netapp_api.NaElement( etree.XML(""" 1 """)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([], target_list) def test_get_iscsi_target_details(self): expected_target = { "address": "127.0.0.1", "port": "1337", "interface-enabled": "true", "tpgroup-tag": "7777", } response = netapp_api.NaElement( etree.XML(""" 1 %(address)s %(port)s %(interface-enabled)s %(tpgroup-tag)s """ % expected_target)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([expected_target], target_list) def test_get_iscsi_service_details_with_no_iscsi_service(self): response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertIsNone(iqn) def test_get_iscsi_service_details(self): expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1' response = netapp_api.NaElement( etree.XML(""" 1 %s """ % expected_iqn)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertEqual(expected_iqn, iqn) def test_get_lun_list(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response luns = self.client.get_lun_list() self.assertEqual(2, len(luns)) def test_get_lun_list_with_multiple_pages(self): response = netapp_api.NaElement( etree.XML(""" 2 fake-next """)) response_2 = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.side_effect = [response, response_2] luns = self.client.get_lun_list() self.assertEqual(4, len(luns)) def test_get_lun_map_no_luns_mapped(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response lun_map = self.client.get_lun_map(path) self.assertEqual([], lun_map) def test_get_lun_map(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_lun_map = { "initiator-group": "igroup", "lun-id": "1337", "vserver": "vserver", } response = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s """ % expected_lun_map)) self.connection.invoke_successfully.return_value = response lun_map = self.client.get_lun_map(path) self.assertEqual([expected_lun_map], lun_map) def test_get_lun_map_multiple_pages(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_lun_map = { "initiator-group": "igroup", "lun-id": "1337", "vserver": "vserver", } response = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s blah """ % expected_lun_map)) response_2 = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s """ % expected_lun_map)) self.connection.invoke_successfully.side_effect = [response, response_2] lun_map = self.client.get_lun_map(path) self.assertEqual([expected_lun_map, expected_lun_map], lun_map) def test_get_igroup_by_initiator_none_found(self): initiator = 'initiator' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response igroup = self.client.get_igroup_by_initiators([initiator]) self.assertEqual([], igroup) def test_get_igroup_by_initiators(self): initiators = ['11:22:33:44:55:66:77:88'] expected_igroup = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } response = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 1 """ % expected_igroup)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup)]) self.assertSetEqual(igroups, expected) def test_get_igroup_by_initiators_multiple(self): initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11'] expected_igroup = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } response = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 88:77:66:55:44:33:22:11 cinder-iscsi 1 """ % expected_igroup)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup)]) self.assertSetEqual(igroups, expected) def test_get_igroup_by_initiators_multiple_pages(self): initiator = '11:22:33:44:55:66:77:88' expected_igroup1 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } expected_igroup2 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup2', } response_1 = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 12345 1 """ % expected_igroup1)) response_2 = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 1 """ % expected_igroup2)) self.connection.invoke_successfully.side_effect = [response_1, response_2] igroups = self.client.get_igroup_by_initiators([initiator]) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup1), netapp_utils.hashabledict(expected_igroup2)]) self.assertSetEqual(igroups, expected) def test_clone_lun(self): self.client.clone_lun( 'volume', 'fakeLUN', 'newFakeLUN', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.assertEqual(1, self.connection.invoke_successfully.call_count) def test_clone_lun_multiple_zapi_calls(self): """Test for when lun clone requires more than one zapi call.""" # Max block-ranges per call = 32, max blocks per range = 2^24 # Force 2 calls bc = 2 ** 24 * 32 * 2 self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN', block_count=bc) self.assertEqual(2, self.connection.invoke_successfully.call_count) def test_get_lun_by_args(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args() self.assertEqual(1, len(lun)) def test_get_lun_by_args_no_lun_found(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args() self.assertEqual(0, len(lun)) def test_get_lun_by_args_with_args_specified(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args(path=path) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] query = actual_request.get_child_by_name('query') lun_info_args = query.get_child_by_name('lun-info').get_children() # Assert request is made with correct arguments self.assertEqual('path', lun_info_args[0].get_name()) self.assertEqual(path, lun_info_args[0].get_content()) self.assertEqual(1, len(lun)) def test_file_assign_qos(self): api_args = { 'volume': fake.FLEXVOL, 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'file': fake.NFS_FILE_PATH, 'vserver': self.vserver } self.client.file_assign_qos( fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_FILE_PATH) self.mock_send_request.assert_has_calls([ mock.call('file-assign-qos', api_args, False)]) def test_set_lun_qos_policy_group(self): api_args = { 'path': fake.LUN_PATH, 'qos-policy-group': fake.QOS_POLICY_GROUP_NAME, } self.client.set_lun_qos_policy_group( fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME) self.mock_send_request.assert_has_calls([ mock.call('lun-set-qos-policy-group', api_args)]) def test_provision_qos_policy_group_no_qos_policy_group_info(self): self.client.provision_qos_policy_group(qos_policy_group_info=None) self.assertEqual(0, self.connection.qos_policy_group_create.call_count) def test_provision_qos_policy_group_legacy_qos_policy_group_info(self): self.client.provision_qos_policy_group( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY) self.assertEqual(0, self.connection.qos_policy_group_create.call_count) def test_provision_qos_policy_group_with_qos_spec_create(self): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=False)) self.mock_object(self.client, 'qos_policy_group_create') self.mock_object(self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO) self.client.qos_policy_group_create.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)]) self.assertFalse(self.client.qos_policy_group_modify.called) def test_provision_qos_policy_group_with_qos_spec_modify(self): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'qos_policy_group_create') self.mock_object(self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO) self.assertFalse(self.client.qos_policy_group_create.called) self.client.qos_policy_group_modify.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)]) def test_qos_policy_group_exists(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE) result = self.client.qos_policy_group_exists( fake.QOS_POLICY_GROUP_NAME) api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': fake.QOS_POLICY_GROUP_NAME, }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, }, }, } self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-get-iter', api_args, False)]) self.assertTrue(result) def test_qos_policy_group_exists_not_found(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) result = self.client.qos_policy_group_exists( fake.QOS_POLICY_GROUP_NAME) self.assertFalse(result) def test_qos_policy_group_create(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'max-throughput': fake.MAX_THROUGHPUT, 'vserver': self.vserver, } self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-create', api_args, False)]) def test_qos_policy_group_modify(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'max-throughput': fake.MAX_THROUGHPUT, } self.client.qos_policy_group_modify( fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-modify', api_args, False)]) def test_qos_policy_group_delete(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME } self.client.qos_policy_group_delete( fake.QOS_POLICY_GROUP_NAME) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-delete', api_args, False)]) def test_qos_policy_group_rename(self): new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME api_args = { 'policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'new-name': new_name, } self.client.qos_policy_group_rename( fake.QOS_POLICY_GROUP_NAME, new_name) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-rename', api_args, False)]) def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=None) self.assertEqual(0, mock_rename.call_count) self.assertEqual(0, mock_remove.call_count) def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY) self.assertEqual(0, mock_rename.call_count) self.assertEqual(1, mock_remove.call_count) def test_mark_qos_policy_group_for_deletion_w_qos_spec(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') mock_log = self.mock_object(client_cmode.LOG, 'warning') new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO) mock_rename.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)]) self.assertEqual(0, mock_log.call_count) self.assertEqual(1, mock_remove.call_count) def test_mark_qos_policy_group_for_deletion_exception_path(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_rename.side_effect = netapp_api.NaApiError mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') mock_log = self.mock_object(client_cmode.LOG, 'warning') new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO) mock_rename.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)]) self.assertEqual(1, mock_log.call_count) self.assertEqual(1, mock_remove.call_count) def test_remove_unused_qos_policy_groups(self): mock_log = self.mock_object(client_cmode.LOG, 'debug') api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': 'deleted_cinder_*', 'vserver': self.vserver, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } self.client.remove_unused_qos_policy_groups() self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-delete-iter', api_args, False)]) self.assertEqual(0, mock_log.call_count) def test_remove_unused_qos_policy_groups_api_error(self): mock_log = self.mock_object(client_cmode.LOG, 'debug') api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': 'deleted_cinder_*', 'vserver': self.vserver, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } self.mock_send_request.side_effect = netapp_api.NaApiError self.client.remove_unused_qos_policy_groups() self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-delete-iter', api_args, False)]) self.assertEqual(1, mock_log.call_count) @mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname', return_value='192.168.1.101') def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname): fake_ip = '192.168.1.101' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip, fake_ip) @mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname', return_value='192.168.1.101') def test_get_if_info_by_ip(self, mock_resolve_hostname): fake_ip = '192.168.1.101' response = netapp_api.NaElement( etree.XML(""" 1 """)) self.connection.invoke_successfully.return_value = response results = self.client.get_if_info_by_ip(fake_ip) self.assertEqual(1, len(results)) def test_get_vol_by_junc_vserver_not_found(self): fake_vserver = 'fake_vserver' fake_junc = 'fake_junction_path' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response self.assertRaises(exception.NotFound, self.client.get_vol_by_junc_vserver, fake_vserver, fake_junc) def test_get_vol_by_junc_vserver(self): fake_vserver = 'fake_vserver' fake_junc = 'fake_junction_path' expected_flex_vol = 'fake_flex_vol' response = netapp_api.NaElement( etree.XML(""" 1 %(flex_vol)s """ % {'flex_vol': expected_flex_vol})) self.connection.invoke_successfully.return_value = response actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver, fake_junc) self.assertEqual(expected_flex_vol, actual_flex_vol) def test_clone_file(self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 20) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual(actual_request.get_child_by_name( 'destination-exists'), None) def test_clone_file_when_destination_exists(self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 20) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver, dest_exists=True) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual('true', actual_request.get_child_by_name( 'destination-exists').get_content()) def test_clone_file_when_destination_exists_and_version_less_than_1_20( self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 19) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver, dest_exists=True) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertIsNone(actual_request.get_child_by_name( 'destination-exists')) def test_get_file_usage(self): expected_bytes = "2048" fake_vserver = 'fake_vserver' fake_path = 'fake_path' response = netapp_api.NaElement( etree.XML(""" %(unique-bytes)s """ % {'unique-bytes': expected_bytes})) self.connection.invoke_successfully.return_value = response actual_bytes = self.client.get_file_usage(fake_vserver, fake_path) self.assertEqual(expected_bytes, actual_bytes) def test_get_operational_network_interface_addresses(self): expected_result = ['1.2.3.4', '99.98.97.96'] api_response = netapp_api.NaElement( fake_client.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE) self.mock_send_request.return_value = api_response address_list = ( self.client.get_operational_network_interface_addresses()) self.assertEqual(expected_result, address_list) def test_get_flexvol_capacity(self): expected_total_size = 1000 expected_available_size = 750 fake_flexvol_path = '/fake/vol' api_response = netapp_api.NaElement( etree.XML(""" %(available_size)s %(total_size)s """ % {'available_size': expected_available_size, 'total_size': expected_total_size})) self.mock_send_request.return_value = api_response total_size, available_size = ( self.client.get_flexvol_capacity(fake_flexvol_path)) self.assertEqual(expected_total_size, total_size) self.assertEqual(expected_available_size, available_size) def test_get_aggregates(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_request.assert_has_calls([ mock.call('aggr-get-iter', {}, enable_tunneling=False)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_with_filters(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_SPACE_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-total': None, 'size-available': None, } } } result = self.client._get_aggregates( aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES, desired_attributes=desired_attributes) aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggregate-name': '|'.join( fake_client.VOLUME_AGGREGATE_NAMES), } }, 'desired-attributes': desired_attributes } self.client.send_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args, enable_tunneling=False)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_request.assert_has_calls([ mock.call('aggr-get-iter', {}, enable_tunneling=False)]) self.assertListEqual([], result) def test_get_node_for_aggregate(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], desired_attributes=desired_attributes)]) self.assertEqual(fake_client.NODE_NAME, result) def test_get_node_for_aggregate_none_requested(self): result = self.client.get_node_for_aggregate(None) self.assertIsNone(result) def test_get_node_for_aggregate_api_not_found(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EAPINOTFOUND))) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_node_for_aggregate_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.get_node_for_aggregate, fake_client.VOLUME_AGGREGATE_NAME) def test_get_node_for_aggregate_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_performance_instance_uuids(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE) result = self.client.get_performance_instance_uuids( 'system', fake_client.NODE_NAME) expected = [fake_client.NODE_NAME + ':kernel:system'] self.assertEqual(expected, result) perf_object_instance_list_info_iter_args = { 'objectname': 'system', 'query': { 'instance-info': { 'uuid': fake_client.NODE_NAME + ':*', } } } self.mock_send_request.assert_called_once_with( 'perf-object-instance-list-info-iter', perf_object_instance_list_info_iter_args, enable_tunneling=False) def test_get_performance_counters(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE) instance_uuids = [ fake_client.NODE_NAMES[0] + ':kernel:system', fake_client.NODE_NAMES[1] + ':kernel:system', ] counter_names = ['avg_processor_busy'] result = self.client.get_performance_counters('system', instance_uuids, counter_names) expected = [ { 'avg_processor_busy': '5674745133134', 'instance-name': 'system', 'instance-uuid': instance_uuids[0], 'node-name': fake_client.NODE_NAMES[0], 'timestamp': '1453412013', }, { 'avg_processor_busy': '4077649009234', 'instance-name': 'system', 'instance-uuid': instance_uuids[1], 'node-name': fake_client.NODE_NAMES[1], 'timestamp': '1453412013' }, ] self.assertEqual(expected, result) perf_object_get_instances_args = { 'objectname': 'system', 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } self.mock_send_request.assert_called_once_with( 'perf-object-get-instances', perf_object_get_instances_args, enable_tunneling=False) def test_check_iscsi_initiator_exists_when_no_initiator_exists(self): self.connection.invoke_successfully = mock.Mock( side_effect=netapp_api.NaApiError) initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertFalse(initiator_exists) def test_check_iscsi_initiator_exists_when_initiator_exists(self): self.connection.invoke_successfully = mock.Mock() initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertTrue(initiator_exists) def test_set_iscsi_chap_authentication_no_previous_initiator(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', mock.Mock(return_value=False)) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__exit__ = mock.Mock(return_value=False) self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) command = ('iscsi security create -vserver fake_vserver ' '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' '-auth-type CHAP -user-name fake_user') self.client.ssh_client.execute_command_with_prompt.assert_has_calls( [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] ) def test_set_iscsi_chap_authentication_with_preexisting_initiator(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', mock.Mock(return_value=True)) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__exit__ = mock.Mock(return_value=False) self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) command = ('iscsi security modify -vserver fake_vserver ' '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' '-auth-type CHAP -user-name fake_user') self.client.ssh_client.execute_command_with_prompt.assert_has_calls( [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] ) def test_set_iscsi_chap_authentication_with_ssh_exception(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', mock.Mock(return_value=True)) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__enter__.side_effect = paramiko.SSHException( 'Connection Failure') sshpool.item().__exit__ = mock.Mock(return_value=False) self.assertRaises(exception.VolumeBackendAPIException, self.client.set_iscsi_chap_authentication, fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) def test_get_snapshot_if_snapshot_present_not_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE) self.mock_send_request.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertFalse(snapshot['busy']) def test_get_snapshot_if_snapshot_present_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE) self.mock_send_request.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertTrue(snapshot['busy']) def test_get_snapshot_if_snapshot_not_present(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_send_request.return_value = response self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, expected_vol_name, expected_snapshot_name) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py0000664000567000056710000005451512701406250033106 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree import mock import six from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd'} class NetAppBaseClientTestCase(test.TestCase): def setUp(self): super(NetAppBaseClientTestCase, self).setUp() self.mock_object(client_base, 'LOG') self.mock_object(client_base.Client, '_init_ssh_client') self.client = client_base.Client(**CONNECTION_INFO) self.client.connection = mock.MagicMock() self.client.ssh_client = mock.MagicMock() self.connection = self.client.connection self.fake_volume = six.text_type(uuid.uuid4()) self.fake_lun = six.text_type(uuid.uuid4()) self.fake_size = '1024' self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'} self.mock_send_request = self.mock_object(self.client, 'send_request') def tearDown(self): super(NetAppBaseClientTestCase, self).tearDown() def test_get_ontapi_version(self): version_response = netapp_api.NaElement( etree.XML(""" 1 19 """)) self.connection.invoke_successfully.return_value = version_response major, minor = self.client.get_ontapi_version(cached=False) self.assertEqual('1', major) self.assertEqual('19', minor) def test_get_ontapi_version_cached(self): self.connection.get_api_version.return_value = (1, 20) major, minor = self.client.get_ontapi_version() self.assertEqual(1, self.connection.get_api_version.call_count) self.assertEqual(1, major) self.assertEqual(20, minor) def test_check_is_naelement(self): element = netapp_api.NaElement('name') self.assertIsNone(self.client.check_is_naelement(element)) self.assertRaises(ValueError, self.client.check_is_naelement, None) def test_create_lun(self): expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.create_lun(self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata) mock_create_node.assert_called_once_with( 'lun-create-by-size', **{'path': expected_path, 'size': self.fake_size, 'ostype': self.fake_metadata['OsType'], 'space-reservation-enabled': self.fake_metadata['SpaceReserved']}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_create_lun_with_qos_policy_group_name(self): expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_qos_group_name = 'qos_1' mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.create_lun( self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata, qos_policy_group_name=expected_qos_group_name) mock_create_node.assert_called_once_with( 'lun-create-by-size', **{'path': expected_path, 'size': self.fake_size, 'ostype': self.fake_metadata['OsType'], 'space-reservation-enabled': self.fake_metadata['SpaceReserved']}) mock_request.add_new_child.assert_called_once_with( 'qos-policy-group', expected_qos_group_name) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_create_lun_raises_on_failure(self): self.connection.invoke_successfully = mock.Mock( side_effect=netapp_api.NaApiError) self.assertRaises(netapp_api.NaApiError, self.client.create_lun, self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata) def test_destroy_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.destroy_lun(path) mock_create_node.assert_called_once_with( 'lun-destroy', **{'path': path}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_destroy_lun_force(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.destroy_lun(path) mock_create_node.assert_called_once_with('lun-destroy', **{'path': path}) mock_request.add_new_child.assert_called_once_with('force', 'true') self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_map_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' expected_lun_id = 'my_lun' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response mock_response.get_child_content.return_value = expected_lun_id with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: actual_lun_id = self.client.map_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) self.assertEqual(expected_lun_id, actual_lun_id) def test_map_lun_with_lun_id(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' expected_lun_id = 'my_lun' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response mock_response.get_child_content.return_value = expected_lun_id with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: actual_lun_id = self.client.map_lun(path, igroup, lun_id=expected_lun_id) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) self.assertEqual(expected_lun_id, actual_lun_id) def test_map_lun_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.assertRaises(netapp_api.NaApiError, self.client.map_lun, path, igroup) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.assertRaises(netapp_api.NaApiError, self.client.unmap_lun, path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) def test_unmap_lun_already_unmapped(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' EINVALIDINPUTERROR = '13115' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError(code=EINVALIDINPUTERROR) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun_lun_not_mapped_in_group(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' EVDISK_ERROR_NO_SUCH_LUNMAP = '9016' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError(code=EVDISK_ERROR_NO_SUCH_LUNMAP) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_create_igroup(self): igroup = 'igroup' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.create_igroup(igroup) mock_create_node.assert_called_once_with( 'igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': 'iscsi', 'os-type': 'default'}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_add_igroup_initiator(self): igroup = 'igroup' initiator = 'initiator' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.add_igroup_initiator(igroup, initiator) mock_create_node.assert_called_once_with( 'igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_do_direct_resize(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_size = 1024 mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.do_direct_resize(path, new_size) mock_create_node.assert_called_once_with( 'lun-resize', **{'path': path, 'size': new_size}) mock_request.add_new_child.assert_called_once_with( 'force', 'true') self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_do_direct_resize_not_forced(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_size = 1024 mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.do_direct_resize(path, new_size, force=False) mock_create_node.assert_called_once_with( 'lun-resize', **{'path': path, 'size': new_size}) self.assertFalse(mock_request.add_new_child.called) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_get_lun_geometry(self): expected_keys = set(['size', 'bytes_per_sector', 'sectors_per_track', 'tracks_per_cylinder', 'cylinders', 'max_resize']) path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response geometry = self.client.get_lun_geometry(path) self.assertEqual(expected_keys, set(geometry.keys())) def test_get_lun_geometry_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() geometry = self.client.get_lun_geometry(path) self.assertEqual({}, geometry) def test_get_volume_options(self): fake_response = netapp_api.NaElement('volume') fake_response.add_node_with_children('options', test='blah') self.connection.invoke_successfully.return_value = fake_response options = self.client.get_volume_options('volume') self.assertEqual(1, len(options)) def test_get_volume_options_with_no_options(self): fake_response = netapp_api.NaElement('options') self.connection.invoke_successfully.return_value = fake_response options = self.client.get_volume_options('volume') self.assertEqual([], options) def test_move_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) fake_response = netapp_api.NaElement('options') self.connection.invoke_successfully.return_value = fake_response self.client.move_lun(path, new_path) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_get_igroup_by_initiators(self): self.assertRaises(NotImplementedError, self.client.get_igroup_by_initiators, fake.FC_FORMATTED_INITIATORS) def test_get_fc_target_wwpns(self): self.assertRaises(NotImplementedError, self.client.get_fc_target_wwpns) def test_has_luns_mapped_to_initiator(self): initiator = fake.FC_FORMATTED_INITIATORS[0] version_response = netapp_api.NaElement( etree.XML(""" /vol/cinder1/volume-9be956b3-9854-4a5c-a7f5-13a16da52c9c openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b 0 /vol/cinder1/volume-ac90433c-a560-41b3-9357-7f3f80071eb5 openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b 1 """)) self.connection.invoke_successfully.return_value = version_response self.assertTrue(self.client._has_luns_mapped_to_initiator(initiator)) def test_has_luns_mapped_to_initiator_not_mapped(self): initiator = fake.FC_FORMATTED_INITIATORS[0] version_response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = version_response self.assertFalse(self.client._has_luns_mapped_to_initiator(initiator)) @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') def test_has_luns_mapped_to_initiators(self, mock_has_luns_mapped_to_initiator): initiators = fake.FC_FORMATTED_INITIATORS mock_has_luns_mapped_to_initiator.return_value = True self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators)) @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') def test_has_luns_mapped_to_initiators_not_mapped( self, mock_has_luns_mapped_to_initiator): initiators = fake.FC_FORMATTED_INITIATORS mock_has_luns_mapped_to_initiator.return_value = False self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) def test_get_performance_counter_info(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) result = self.client.get_performance_counter_info('wafl', 'cp_phase_times') expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': fake_client.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS, } self.assertEqual(expected, result) perf_object_counter_list_info_args = {'objectname': 'wafl'} self.mock_send_request.assert_called_once_with( 'perf-object-counter-list-info', perf_object_counter_list_info_args, enable_tunneling=False) def test_get_performance_counter_info_not_found(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) self.assertRaises(exception.NotFound, self.client.get_performance_counter_info, 'wafl', 'invalid') def test_delete_snapshot(self): api_args = { 'volume': fake.SNAPSHOT['volume_id'], 'snapshot': fake.SNAPSHOT['name'], } self.mock_object(self.client, 'send_request') self.client.delete_snapshot(api_args['volume'], api_args['snapshot']) asserted_api_args = { 'volume': api_args['volume'], 'snapshot': api_args['snapshot'], } self.client.send_request.assert_called_once_with('snapshot-delete', asserted_api_args) def test_create_cg_snapshot(self): self.mock_object(self.client, '_start_cg_snapshot', mock.Mock( return_value=fake.CONSISTENCY_GROUP_ID)) self.mock_object(self.client, '_commit_cg_snapshot') self.client.create_cg_snapshot([fake.CG_VOLUME_NAME], fake.CG_SNAPSHOT_NAME) self.client._commit_cg_snapshot.assert_called_once_with( fake.CONSISTENCY_GROUP_ID) def test_start_cg_snapshot(self): snapshot_init = { 'snapshot': fake.CG_SNAPSHOT_NAME, 'timeout': 'relaxed', 'volumes': [{'volume-name': fake.CG_VOLUME_NAME}], } self.mock_object(self.client, 'send_request') self.client._start_cg_snapshot([fake.CG_VOLUME_NAME], snapshot_init['snapshot']) self.client.send_request.assert_called_once_with('cg-start', snapshot_init) def test_commit_cg_snapshot(self): snapshot_commit = {'cg-id': fake.CG_VOLUME_ID} self.mock_object(self.client, 'send_request') self.client._commit_cg_snapshot(snapshot_commit['cg-id']) self.client.send_request.assert_called_once_with( 'cg-commit', {'cg-id': snapshot_commit['cg-id']}) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py0000664000567000056710000007053712701406250030672 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2015, Tom Barron. All rights reserved. # Copyright (c) - 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock from six.moves import urllib from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.volume.drivers.netapp.dataontap.client.api as netapp_api FAKE_VOL_XML = b""" open123 online 0 0 0 false false """ FAKE_XML1 = b"""\ abc\ abc\ """ FAKE_XML2 = b"""somecontent""" FAKE_NA_ELEMENT = netapp_api.NaElement(etree.XML(FAKE_VOL_XML)) FAKE_INVOKE_DATA = 'somecontent' FAKE_XML_STR = 'abc' FAKE_API_NAME = 'volume-get-iter' FAKE_API_NAME_ELEMENT = netapp_api.NaElement(FAKE_API_NAME) FAKE_NA_SERVER_STR = '127.0.0.1' FAKE_NA_SERVER = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5.set_vfiler('filer') FAKE_NA_SERVER_API_1_5.set_api_version(1, 5) FAKE_NA_SERVER_API_1_14 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_14.set_vserver('server') FAKE_NA_SERVER_API_1_14.set_api_version(1, 14) FAKE_NA_SERVER_API_1_20 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_20.set_vfiler('filer') FAKE_NA_SERVER_API_1_20.set_vserver('server') FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) FAKE_QUERY = {'volume-attributes': None} FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes', 'volume-space-attributes', 'volume-state-attributes', 'volume-qos-attributes']} FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443), mock.call(8488)] FAKE_RESULT_API_ERR_REASON = netapp_api.NaElement('result') FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000') FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason') FAKE_RESULT_API_ERRNO_INVALID = netapp_api.NaElement('result') FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000') FAKE_RESULT_API_ERRNO_VALID = netapp_api.NaElement('result') FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956') FAKE_RESULT_SUCCESS = netapp_api.NaElement('result') FAKE_RESULT_SUCCESS.add_attr('status', 'passed') FAKE_HTTP_OPENER = urllib.request.build_opener() INITIATOR_IQN = 'iqn.2015-06.com.netapp:fake_iqn' USER_NAME = 'fake_user' PASSWORD = 'passw0rd' ENCRYPTED_PASSWORD = 'B351F145DA527445' NO_RECORDS_RESPONSE = etree.XML(""" 0 """) GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE = etree.XML(""" 2
%(address1)s
%(address2)s
""" % {"address1": "1.2.3.4", "address2": "99.98.97.96"}) QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML(""" 30KB/S 1 53 fake_qos_policy_group_name user_defined 12496028-b641-11e5-abbd-123478563412 cinder-iscsi 1 """) VOLUME_LIST_INFO_RESPONSE = etree.XML(""" vol0 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol1 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol2 64_bit offline 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol3 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false """) SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE = etree.XML(""" %(snapshot_name)s False %(vol_name)s 1 """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE = etree.XML(""" %(snapshot_name)s True %(vol_name)s 1 """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE = etree.XML(""" %(snapshot_name)s False %(vol_name)s """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE = etree.XML(""" %(snapshot_name)s True %(vol_name)s """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) SNAPSHOT_NOT_PRESENT_7MODE = etree.XML(""" NOT_THE_RIGHT_SNAPSHOT false %(vol_name)s """ % {'vol_name': fake.SNAPSHOT['volume_id']}) NO_RECORDS_RESPONSE = etree.XML(""" 0 """) NODE_NAME = 'fake_node1' NODE_NAMES = ('fake_node1', 'fake_node2') VOLUME_AGGREGATE_NAME = 'fake_aggr1' VOLUME_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') AGGR_GET_ITER_RESPONSE = etree.XML(""" false 64_bit 1758646411 aggr 512 30384 96 30384 30384 30384 243191 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 3 cfo true false true false false false unmirrored online 1 true false /%(aggr1)s/plex0 normal,active block false false false /%(aggr1)s/plex0/rg0 0 0 0 on 16 raid_dp, normal raid_dp online false 0 0 true true 0 0 0 0 0 0 0 0 0 245760 0 95 45670400 943718400 898048000 0 898048000 897802240 1 0 0 %(aggr1)s 15863632-ea49-49a8-9c88-2bd2d57c6d7a cluster3-01 unknown false 64_bit 706602229 aggr 528 31142 96 31142 31142 31142 1945584 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 10 sfo false false true false false false unmirrored online 1 true false /%(aggr2)s/plex0 normal,active block false false false /%(aggr2)s/plex0/rg0 0 0 block false false false /%(aggr2)s/plex0/rg1 0 0 0 on 8 raid4, normal raid4 online false 0 0 true true 0 0 0 0 0 0 0 0 0 425984 0 15 6448431104 7549747200 1101316096 0 1101316096 1100890112 2 0 0 %(aggr2)s 2a741934-1aaf-42dd-93ca-aaf231be108a cluster3-01 not_striped 2 """ % { 'aggr1': VOLUME_AGGREGATE_NAMES[0], 'aggr2': VOLUME_AGGREGATE_NAMES[1], }) AGGR_GET_SPACE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 45670400 943718400 898048000 %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 4267659264 7549747200 3282087936 %(aggr2)s 2 """ % { 'aggr1': VOLUME_AGGREGATE_NAMES[0], 'aggr2': VOLUME_AGGREGATE_NAMES[1], }) AGGR_GET_NODE_RESPONSE = etree.XML(""" %(node)s %(aggr)s 1 """ % { 'aggr': VOLUME_AGGREGATE_NAME, 'node': NODE_NAME, }) PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ] PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML(""" No. of times 8.3 names are accessed per second. access_8_3_names diag rate per_sec Array of counts of different types of CPs wafl_timer generated CP snapshot generated CP wafl_avail_bufs generated CP dirty_blk_cnt generated CP full NV-log generated CP,back-to-back CP flush generated CP,sync generated CP deferred back-to-back CP low mbufs generated CP low datavecs generated CP nvlog replay takeover time limit CP cp_count diag delta array none total_cp_msecs Array of percentage time spent in different phases of CP %(labels)s cp_phase_times diag percent array percent """ % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)}) PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML(""" avg_processor_busy 5674745133134 system %(node1)s:kernel:system avg_processor_busy 4077649009234 system %(node2)s:kernel:system 1453412013 """ % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]}) PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML(""" 1454146292 system avg_processor_busy 13215732322 """) PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML(""" system %(node)s:kernel:system 1 """ % {'node': NODE_NAME}) PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML(""" processor0 processor1 """) SYSTEM_GET_INFO_RESPONSE = etree.XML(""" %(node)s 4082368508 SIMBOX SIMBOX NetApp 4082368508 2593 NetApp VSim 999999 2 1599 0x40661 15 2199023255552 17592186044416 500 true """ % {'node': NODE_NAME}) ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML(""" %s """ % INITIATOR_IQN) ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML(""" """ % INITIATOR_IQN) cinder-8.0.0/cinder/tests/unit/volume/drivers/netapp/fakes.py0000664000567000056710000000762012701406250025432 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2014, Clinton Knight All rights reserved. # Copyright (c) - 2015, Alex Meade. All Rights Reserved. # Copyright (c) - 2015, Rushil Chugh. All Rights Reserved. # Copyright (c) - 2015, Tom Barron. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.volume import configuration as conf import cinder.volume.drivers.netapp.options as na_opts ISCSI_FAKE_LUN_ID = 1 ISCSI_FAKE_IQN = 'iqn.1993-08.org.debian:01:10' ISCSI_FAKE_ADDRESS = '10.63.165.216' ISCSI_FAKE_PORT = '2232' ISCSI_FAKE_VOLUME = {'id': 'fake_id'} ISCSI_FAKE_TARGET = {} ISCSI_FAKE_TARGET['address'] = ISCSI_FAKE_ADDRESS ISCSI_FAKE_TARGET['port'] = ISCSI_FAKE_PORT ISCSI_FAKE_VOLUME = {'id': 'fake_id', 'provider_auth': 'None stack password'} FC_ISCSI_TARGET_INFO_DICT = {'target_discovered': False, 'target_portal': '10.63.165.216:2232', 'target_iqn': ISCSI_FAKE_IQN, 'target_lun': ISCSI_FAKE_LUN_ID, 'volume_id': ISCSI_FAKE_VOLUME['id'], 'auth_method': 'None', 'auth_username': 'stack', 'auth_password': 'password'} VOLUME_NAME = 'fake_volume_name' VOLUME_ID = 'fake_volume_id' VOLUME_TYPE_ID = 'fake_volume_type_id' VOLUME = { 'name': VOLUME_NAME, 'size': 42, 'id': VOLUME_ID, 'host': 'fake_host@fake_backend#fake_pool', 'volume_type_id': VOLUME_TYPE_ID, } SNAPSHOT_NAME = 'fake_snapshot_name' SNAPSHOT_ID = 'fake_snapshot_id' SNAPSHOT = { 'name': SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'volume_id': VOLUME_ID, 'volume_name': VOLUME_NAME, 'volume_size': 42, } QOS_SPECS = {} EXTRA_SPECS = {} MAX_THROUGHPUT = '21734278B/s' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME} LEGACY_QOS = { 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_SPEC = { 'max_throughput': MAX_THROUGHPUT, 'policy_name': 'openstack-%s' % VOLUME_ID, } QOS_POLICY_GROUP_INFO_NONE = {'legacy': None, 'spec': None} QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} LEGACY_QOS_POLICY_GROUP_INFO = { 'legacy': LEGACY_QOS, 'spec': None, } INVALID_QOS_POLICY_GROUP_INFO = { 'legacy': LEGACY_QOS, 'spec': QOS_POLICY_GROUP_SPEC, } QOS_SPECS_ID = 'fake_qos_specs_id' QOS_SPEC = {'maxBPS': 21734278} OUTER_BACKEND_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'back-end', } OUTER_FRONTEND_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'front-end', } OUTER_BOTH_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'both', } VOLUME_TYPE = {'id': VOLUME_TYPE_ID, 'qos_specs_id': QOS_SPECS_ID} def create_configuration(): config = conf.Configuration(None) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) return config def create_configuration_7mode(): config = create_configuration() config.append_config_values(na_opts.netapp_7mode_opts) return config def create_configuration_cmode(): config = create_configuration() config.append_config_values(na_opts.netapp_cluster_opts) return config cinder-8.0.0/cinder/tests/unit/volume/drivers/test_datera.py0000664000567000056710000004370112701406257025360 0ustar jenkinsjenkins00000000000000# Copyright 2015 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import datera from cinder.volume import volume_types DEFAULT_STORAGE_NAME = datera.DEFAULT_STORAGE_NAME DEFAULT_VOLUME_NAME = datera.DEFAULT_VOLUME_NAME class DateraVolumeTestCase(test.TestCase): def setUp(self): super(DateraVolumeTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.san_ip = '127.0.0.1' self.cfg.san_is_local = True self.cfg.datera_api_token = 'secret' self.cfg.datera_api_port = '7717' self.cfg.datera_api_version = '1' self.cfg.datera_num_replicas = '2' self.cfg.san_login = 'user' self.cfg.san_password = 'pass' mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = datera.DateraDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.volume = _stub_volume() self.api_patcher = mock.patch('cinder.volume.drivers.datera.' 'DateraDriver._issue_api_request') self.mock_api = self.api_patcher.start() self.addCleanup(self.api_patcher.stop) def test_volume_create_success(self): self.mock_api.return_value = stub_single_ai self.assertIsNone(self.driver.create_volume(self.volume)) def test_volume_create_fails(self): self.mock_api.side_effect = exception.DateraAPIException self.assertRaises(exception.DateraAPIException, self.driver.create_volume, self.volume) def test_volume_create_delay(self): """Verify after 1st retry volume becoming available is a success.""" def _progress_api_return(mock_api): if mock_api.retry_count == 1: _bad_vol_ai = stub_single_ai.copy() _bad_vol_ai['storage_instances'][ DEFAULT_STORAGE_NAME]['volumes'][DEFAULT_VOLUME_NAME][ 'op_status'] = 'unavailable' return _bad_vol_ai else: self.mock_api.retry_count += 1 return stub_single_ai self.mock_api.retry_count = 0 self.mock_api.return_value = _progress_api_return(self.mock_api) self.assertEqual(1, self.mock_api.retry_count) self.assertIsNone(self.driver.create_volume(self.volume)) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_extra_specs(self, mock_get_type): self.mock_api.return_value = stub_single_ai mock_get_type.return_value = { 'name': u'The Best', 'qos_specs_id': None, 'deleted': False, 'created_at': '2015-08-14 04:18:11', 'updated_at': None, 'extra_specs': { u'volume_backend_name': u'datera', u'qos:max_iops_read': u'2000', u'qos:max_iops_write': u'4000', u'qos:max_iops_total': u'4000' }, 'is_public': True, 'deleted_at': None, 'id': u'dffb4a83-b8fb-4c19-9f8c-713bb75db3b1', 'description': None } mock_volume = _stub_volume( volume_type_id='dffb4a83-b8fb-4c19-9f8c-713bb75db3b1' ) self.assertIsNone(self.driver.create_volume(mock_volume)) self.assertTrue(mock_get_type.called) def test_create_cloned_volume_success(self): source_volume = _stub_volume( id='7f91abfa-7964-41ed-88fc-207c3a290b4f', display_name='foo' ) self.assertIsNone(self.driver.create_cloned_volume(self.volume, source_volume)) def test_create_cloned_volume_fails(self): self.mock_api.side_effect = exception.DateraAPIException source_volume = _stub_volume( id='7f91abfa-7964-41ed-88fc-207c3a290b4f', display_name='foo' ) self.assertRaises(exception.DateraAPIException, self.driver.create_cloned_volume, self.volume, source_volume) def test_delete_volume_success(self): self.assertIsNone(self.driver.delete_volume(self.volume)) def test_delete_volume_not_found(self): self.mock_api.side_effect = exception.NotFound self.assertIsNone(self.driver.delete_volume(self.volume)) def test_delete_volume_fails(self): self.mock_api.side_effect = exception.DateraAPIException self.assertRaises(exception.DateraAPIException, self.driver.delete_volume, self.volume) def test_ensure_export_success(self): self.mock_api.side_effect = self._generate_fake_api_request() ctxt = context.get_admin_context() expected = { 'provider_location': '172.28.94.11:3260 iqn.2013-05.com.daterainc' ':c20aba21-6ef6-446b-b374-45733b4883ba--ST' '--storage-1:01:sn:34e5b20fbadd3abb 0'} self.assertEqual(expected, self.driver.ensure_export(ctxt, self.volume, None)) def test_ensure_export_fails(self): self.mock_api.side_effect = exception.DateraAPIException ctxt = context.get_admin_context() self.assertRaises(exception.DateraAPIException, self.driver.ensure_export, ctxt, self.volume, None) def test_create_export_target_does_not_exist_success(self): self.mock_api.side_effect = self._generate_fake_api_request( targets_exist=False) ctxt = context.get_admin_context() expected = { 'provider_location': '172.28.94.11:3260 iqn.2013-05.com.daterainc' ':c20aba21-6ef6-446b-b374-45733b4883ba--ST' '--storage-1:01:sn:34e5b20fbadd3abb 0'} self.assertEqual(expected, self.driver.create_export(ctxt, self.volume, None)) def test_create_export_fails(self): self.mock_api.side_effect = exception.DateraAPIException ctxt = context.get_admin_context() self.assertRaises(exception.DateraAPIException, self.driver.create_export, ctxt, self.volume, None) def test_detach_volume_success(self): self.mock_api.return_value = {} ctxt = context.get_admin_context() volume = _stub_volume(status='in-use') self.assertIsNone(self.driver.detach_volume(ctxt, volume)) def test_detach_volume_fails(self): self.mock_api.side_effect = exception.DateraAPIException ctxt = context.get_admin_context() volume = _stub_volume(status='in-use') self.assertRaises(exception.DateraAPIException, self.driver.detach_volume, ctxt, volume) def test_detach_volume_not_found(self): self.mock_api.side_effect = exception.NotFound ctxt = context.get_admin_context() volume = _stub_volume(status='in-use') self.assertIsNone(self.driver.detach_volume(ctxt, volume)) def test_create_snapshot_success(self): snapshot = _stub_snapshot(volume_id=self.volume['id']) self.assertIsNone(self.driver.create_snapshot(snapshot)) def test_create_snapshot_fails(self): self.mock_api.side_effect = exception.DateraAPIException snapshot = _stub_snapshot(volume_id=self.volume['id']) self.assertRaises(exception.DateraAPIException, self.driver.create_snapshot, snapshot) def test_delete_snapshot_success(self): snapshot = _stub_snapshot(volume_id=self.volume['id']) self.assertIsNone(self.driver.delete_snapshot(snapshot)) def test_delete_snapshot_not_found(self): self.mock_api.side_effect = [stub_return_snapshots, exception.NotFound] snapshot = _stub_snapshot(self.volume['id']) self.assertIsNone(self.driver.delete_snapshot(snapshot)) def test_delete_snapshot_fails(self): self.mock_api.side_effect = exception.DateraAPIException snapshot = _stub_snapshot(volume_id=self.volume['id']) self.assertRaises(exception.DateraAPIException, self.driver.delete_snapshot, snapshot) def test_create_volume_from_snapshot_success(self): snapshot = _stub_snapshot(volume_id=self.volume['id']) self.mock_api.side_effect = [stub_return_snapshots, None] self.assertIsNone( self.driver.create_volume_from_snapshot(self.volume, snapshot)) def test_create_volume_from_snapshot_fails(self): self.mock_api.side_effect = exception.DateraAPIException snapshot = _stub_snapshot(volume_id=self.volume['id']) self.assertRaises(exception.DateraAPIException, self.driver.create_volume_from_snapshot, self.volume, snapshot) def test_extend_volume_success(self): volume = _stub_volume(size=1) self.assertIsNone(self.driver.extend_volume(volume, 2)) def test_extend_volume_fails(self): self.mock_api.side_effect = exception.DateraAPIException volume = _stub_volume(size=1) self.assertRaises(exception.DateraAPIException, self.driver.extend_volume, volume, 2) def test_login_successful(self): self.mock_api.return_value = { 'key': 'dd2469de081346c28ac100e071709403' } self.assertIsNone(self.driver._login()) self.assertEqual(1, self.mock_api.call_count) def test_login_unsuccessful(self): self.mock_api.side_effect = exception.NotAuthorized self.assertRaises(exception.NotAuthorized, self.driver._login) self.assertEqual(1, self.mock_api.call_count) def _generate_fake_api_request(self, targets_exist=True): def _fake_api_request(resource_type, method='get', resource=None, body=None, action=None, sensitive=False): if resource_type.split('/')[-1] == 'storage-1': return stub_get_export elif resource_type == 'app_instances': return stub_single_ai elif (resource_type.split('/')[-1] == 'c20aba21-6ef6-446b-b374-45733b4883ba'): return stub_app_instance[ 'c20aba21-6ef6-446b-b374-45733b4883ba'] return _fake_api_request stub_create_export = { "_ipColl": ["172.28.121.10", "172.28.120.10"], "acls": {}, "activeServers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, "ctype": "TC_BLOCK_ISCSI", "endpointsExt1": { "4594953e-f97f-e111-ad85-001e6738c0f0": { "ipHigh": 0, "ipLow": "192421036", "ipStr": "172.28.120.11", "ipV": 4, "name": "", "network": 24 } }, "endpointsExt2": { "4594953e-f97f-e111-ad85-001e6738c0f0": { "ipHigh": 0, "ipLow": "192486572", "ipStr": "172.28.121.11", "ipV": 4, "name": "", "network": 24 } }, "inodes": {"c20aba21-6ef6-446b-b374-45733b4883ba": "1"}, "name": "", "networkPort": 0, "serverAllocation": "TS_ALLOC_COMPLETED", "servers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, "targetAllocation": "TS_ALLOC_COMPLETED", "targetIds": { "4594953e-f97f-e111-ad85-001e6738c0f0": { "ids": [{ "dev": None, "id": "iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe" }] } }, "typeName": "TargetIscsiConfig", "uuid": "7071efd7-9f22-4996-8f68-47e9ab19d0fd" } stub_app_instance = { "c20aba21-6ef6-446b-b374-45733b4883ba": { "admin_state": "online", "clone_src": {}, "create_mode": "openstack", "descr": "", "health": "ok", "name": "c20aba21-6ef6-446b-b374-45733b4883ba", "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba", "storage_instances": { "storage-1": { "access": { "ips": [ "172.28.94.11" ], "iqn": "iqn.2013-05.com.daterainc:c20aba21-6ef6-446b-" "b374-45733b4883ba--ST--storage-1:01:sn:" "34e5b20fbadd3abb", "path": "/app_instances/c20aba21-6ef6-446b-b374" "-45733b4883ba/storage_instances/storage-1/access" }, "access_control": { "initiator_groups": [], "initiators": [], "path": "/app_instances/c20aba21-6ef6-446b-b374-" "45733b4883ba/storage_instances/storage-1" "/access_control" }, "access_control_mode": "allow_all", "active_initiators": [], "active_storage_nodes": [ "/storage_nodes/1c4feac4-17c7-478b-8928-c76e8ec80b72" ], "admin_state": "online", "auth": { "initiator_pswd": "", "initiator_user_name": "", "path": "/app_instances/c20aba21-6ef6-446b-b374-" "45733b4883ba/storage_instances/storage-1/auth", "target_pswd": "", "target_user_name": "", "type": "none" }, "creation_type": "user", "descr": "c20aba21-6ef6-446b-b374-45733b4883ba__ST__storage-1", "name": "storage-1", "path": "/app_instances/c20aba21-6ef6-446b-b374-" "45733b4883ba/storage_instances/storage-1", "uuid": "b9897b84-149f-43c7-b19c-27d6af8fa815", "volumes": { "volume-1": { "capacity_in_use": 0, "name": "volume-1", "op_state": "available", "path": "/app_instances/c20aba21-6ef6-446b-b374-" "45733b4883ba/storage_instances/storage-1" "/volumes/volume-1", "replica_count": 3, "size": 500, "snapshot_policies": {}, "snapshots": { "1445384931.322468627": { "op_state": "available", "path": "/app_instances/c20aba21-6ef6-446b" "-b374-45733b4883ba/storage_instances" "/storage-1/volumes/volume-1/snapshots" "/1445384931.322468627", "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" } }, "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" } } } }, "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" } } stub_get_export = stub_app_instance[ 'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1'] stub_single_ai = stub_app_instance['c20aba21-6ef6-446b-b374-45733b4883ba'] stub_return_snapshots = \ { "1446076293.118600738": { "op_state": "available", "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" "/storage_instances/storage-1/volumes/volume-1/snapshots/" "1446076293.118600738", "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" }, "1446076384.00607846": { "op_state": "available", "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" "/storage_instances/storage-1/volumes/volume-1/snapshots/" "1446076384.00607846", "uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1" } } def _stub_datera_volume(*args, **kwargs): return { "status": "available", "name": "test", "num_replicas": "2", "parent": "00000000-0000-0000-0000-000000000000", "size": "1024", "sub_type": "IS_ORIGINAL", "uuid": "10305aa4-1343-4363-86fe-f49eb421a48c", "snapshots": [], "snapshot_configs": [], "targets": [ kwargs.get('targets', "744e1bd8-d741-4919-86cd-806037d98c8a"), ] } def _stub_volume(*args, **kwargs): uuid = u'c20aba21-6ef6-446b-b374-45733b4883ba' name = u'volume-00000001' size = 1 volume = {} volume['id'] = kwargs.get('id', uuid) volume['display_name'] = kwargs.get('display_name', name) volume['size'] = kwargs.get('size', size) volume['provider_location'] = kwargs.get('provider_location', None) volume['volume_type_id'] = kwargs.get('volume_type_id', None) return volume def _stub_snapshot(*args, **kwargs): uuid = u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c' name = u'snapshot-00000001' volume = {} volume['id'] = kwargs.get('id', uuid) volume['display_name'] = kwargs.get('display_name', name) volume['volume_id'] = kwargs.get('volume_id', None) return volume cinder-8.0.0/cinder/tests/unit/volume/flows/0000775000567000056710000000000012701406543022154 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/flows/__init__.py0000664000567000056710000000000012701406250024246 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/volume/flows/test_manage_volume_flow.py0000664000567000056710000000345112701406250027431 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manage_existing TaskFlow """ from cinder import context from cinder import test from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import manage_existing class ManageVolumeFlowTestCase(test.TestCase): def setUp(self): super(ManageVolumeFlowTestCase, self).setUp() self.ctxt = context.get_admin_context() self.counter = float(0) def test_cast_manage_existing(self): volume = fake_volume.fake_volume_type_obj(self.ctxt) spec = { 'name': 'name', 'description': 'description', 'host': 'host', 'ref': 'ref', 'volume_type': 'volume_type', 'metadata': 'metadata', 'availability_zone': 'availability_zone', 'bootable': 'bootable', 'volume_id': volume.id, } # Fake objects assert specs task = manage_existing.ManageCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeDb()) create_what = spec.copy() create_what.update({'volume': volume}) create_what.pop('volume_id') task.execute(self.ctxt, **create_what) cinder-8.0.0/cinder/tests/unit/volume/flows/fake_volume_api.py0000664000567000056710000000453212701406250025653 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeVolumeAPI(object): def __init__(self, expected_spec, test_inst): self.expected_spec = expected_spec self.test_inst = test_inst def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True, snapshot_id=None, image_id=None, source_volid=None, source_replicaid=None): self.test_inst.assertEqual(self.expected_spec, request_spec) self.test_inst.assertEqual(request_spec['source_volid'], source_volid) self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id) self.test_inst.assertEqual(request_spec['image_id'], image_id) self.test_inst.assertEqual(request_spec['source_replicaid'], source_replicaid) class FakeSchedulerRpcAPI(object): def __init__(self, expected_spec, test_inst): self.expected_spec = expected_spec self.test_inst = test_inst def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): self.test_inst.assertEqual(self.expected_spec, request_spec) def manage_existing(self, context, volume_topic, volume_id, request_spec=None): self.test_inst.assertEqual(self.expected_spec, request_spec) class FakeDb(object): def volume_get(self, *args, **kwargs): return {'host': 'barf'} def volume_update(self, *args, **kwargs): return {'host': 'farb'} def snapshot_get(self, *args, **kwargs): return {'volume_id': 1} def consistencygroup_get(self, *args, **kwargs): return {'consistencygroup_id': 1} cinder-8.0.0/cinder/tests/unit/volume/flows/test_create_volume_flow.py0000664000567000056710000012775212701406250027457 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for create_volume TaskFlow """ import ddt import mock from oslo_utils import imageutils from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit.keymgr import mock_key_mgr from cinder.tests.unit import utils from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import create_volume from cinder.volume.flows.manager import create_volume as create_volume_manager @ddt.ddt class CreateVolumeFlowTestCase(test.TestCase): def time_inc(self): self.counter += 1 return self.counter def setUp(self): super(CreateVolumeFlowTestCase, self).setUp() self.ctxt = context.get_admin_context() # Ensure that time.time() always returns more than the last time it was # called to avoid div by zero errors. self.counter = float(0) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.utils.extract_host') @mock.patch('time.time') @mock.patch('cinder.objects.ConsistencyGroup.get_by_id') def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time, mock_extract_host, volume_get_by_id): mock_time.side_effect = self.time_inc volume = fake_volume.fake_volume_obj(self.ctxt) volume_get_by_id.return_value = volume props = {} cg_obj = (fake_consistencygroup. fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1, host='host@backend#pool')) consistencygroup_get_by_id.return_value = cg_obj spec = {'volume_id': None, 'volume': None, 'source_volid': None, 'snapshot_id': None, 'image_id': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None} # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) task._cast_create_volume(self.ctxt, spec, props) spec = {'volume_id': volume.id, 'volume': volume, 'source_volid': 2, 'snapshot_id': 3, 'image_id': 4, 'source_replicaid': 5, 'consistencygroup_id': 5, 'cgsnapshot_id': None} # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) task._cast_create_volume(self.ctxt, spec, props) consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5) mock_extract_host.assert_called_once_with('host@backend#pool') @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_volume_request_from_image_encrypted( self, fake_get_volume_type_id, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 1 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = True self.assertRaises(exception.InvalidInput, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_volume_request_from_image( self, fake_get_type_id, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 2 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_availability_zone_without_fallback( self, fake_get_type_id, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 3 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_qos.return_value = {'qos_specs': None} self.assertRaises(exception.InvalidInput, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='notnova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_availability_zone_with_fallback( self, fake_get_type_id, fake_get_qos, fake_is_encrypted): self.override_config('allow_availability_zone_fallback', True) fake_image_service = fake_image.FakeImageService() image_id = 4 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='does_not_exist', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_volume_request_from_image_with_qos_specs( self, fake_get_type_id, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 5 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() volume_type = 'type1' task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_qos_spec = {'specs': {'fake_key': 'fake'}} fake_get_qos.return_value = {'qos_specs': fake_qos_spec} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': {'fake_key': 'fake'}, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_image_volume_type_from_image( self, fake_get_type_id, fake_get_vol_type, fake_get_def_vol_type, fake_get_qos, fake_is_encrypted): image_volume_type = 'type_from_image' fake_image_service = fake_image.FakeImageService() image_id = 6 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = image_volume_type fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_vol_type.return_value = image_volume_type fake_get_def_vol_type.return_value = 'fake_vol_type' fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': image_volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_image_volume_type_from_image_invalid_type( self, fake_get_type_id, fake_get_def_vol_type, fake_get_qos, fake_is_encrypted, fake_db_get_vol_type): image_volume_type = 'invalid' fake_image_service = fake_image.FakeImageService() image_id = 7 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = image_volume_type fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_def_vol_type.return_value = 'fake_vol_type' fake_db_get_vol_type.side_effect = ( exception.VolumeTypeNotFoundByName(volume_type_name='invalid')) fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': 'fake_vol_type', 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') @ddt.data((8, None), (9, {'cinder_img_volume_type': None})) @ddt.unpack def test_extract_image_volume_type_from_image_properties_error( self, image_id, fake_img_properties, fake_get_type_id, fake_get_def_vol_type, fake_get_qos, fake_is_encrypted, fake_db_get_vol_type): fake_image_service = fake_image.FakeImageService() image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = fake_img_properties fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_def_vol_type.return_value = 'fake_vol_type' fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zone': 'nova', 'volume_type': 'fake_vol_type', 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'source_replicaid': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, } self.assertEqual(expected_result, result) @mock.patch('cinder.db.volume_type_get_by_name') @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_volume_type_id') def test_extract_image_volume_type_from_image_invalid_input( self, fake_get_type_id, fake_get_def_vol_type, fake_get_qos, fake_is_encrypted, fake_db_get_vol_type): fake_image_service = fake_image.FakeImageService() image_id = 10 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'inactive' fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_mgr.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_type_id.return_value = 1 fake_get_def_vol_type.return_value = 'fake_vol_type' fake_get_qos.return_value = {'qos_specs': None} self.assertRaises(exception.InvalidInput, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, source_replica=None, consistencygroup=None, cgsnapshot=None) class CreateVolumeFlowManagerTestCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerTestCase, self).setUp() self.ctxt = context.get_admin_context() @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_from_snapshot(self, snapshot_get_by_id, volume_get_by_id, handle_bootable): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_db = {'bootable': True} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = volume_obj fake_manager._create_from_snapshot(self.ctxt, volume_obj, snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume_obj, snapshot_obj) handle_bootable.assert_called_once_with(self.ctxt, volume_obj.id, snapshot_id=snapshot_obj.id) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_from_snapshot_update_failure(self, snapshot_get_by_id): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume = fake_volume.fake_db_volume() snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj fake_db.volume_get.side_effect = exception.CinderException self.assertRaises(exception.MetadataUpdateFailure, fake_manager._create_from_snapshot, self.ctxt, volume, snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume, snapshot_obj) class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp() self.ctxt = context.get_admin_context() @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') def test_create_from_image_volume(self, handle_bootable, mock_fetch_img, format='raw', owner=None, location=True): self.flags(allowed_direct_url_schemes=['cinder']) mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( mock.MagicMock(), fake_db, fake_driver) fake_image_service = mock.MagicMock() volume = fake_volume.fake_volume_obj(self.ctxt) image_volume = fake_volume.fake_volume_obj(self.ctxt, volume_metadata={}) image_id = '34e54c31-3bc8-5c1d-9fff-2225bcce4b59' url = 'cinder://%s' % image_volume['id'] image_location = None if location: image_location = (url, [{'url': url, 'metadata': {}}]) image_meta = {'id': image_id, 'container_format': 'bare', 'disk_format': format, 'owner': owner or self.ctxt.project_id} fake_driver.clone_image.return_value = (None, False) fake_db.volume_get_all_by_host.return_value = [image_volume] fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) if format is 'raw' and not owner and location: fake_driver.create_cloned_volume.assert_called_once_with( volume, image_volume) handle_bootable.assert_called_once_with(self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta) else: self.assertFalse(fake_driver.create_cloned_volume.called) def test_create_from_image_volume_in_qcow2_format(self): self.test_create_from_image_volume(format='qcow2') def test_create_from_image_volume_of_other_owner(self): self.test_create_from_image_volume(owner='fake-owner') def test_create_from_image_volume_without_location(self): self.test_create_from_image_volume(location=False) @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_from_source_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_from_image_download') @mock.patch('cinder.context.get_internal_tenant_context') class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerImageCacheTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_db = mock.MagicMock() self.mock_driver = mock.MagicMock() self.mock_cache = mock.MagicMock() self.mock_image_service = mock.MagicMock() self.mock_volume_manager = mock.MagicMock() self.internal_context = self.ctxt self.internal_context.user_id = 'abc123' self.internal_context.project_id = 'def456' def test_create_from_image_clone_image_and_skip_cache( self, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, True) volume = fake_volume.fake_volume_obj(self.ctxt) image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = mock.Mock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called even if the cache is enabled self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if clone_image succeeds self.assertFalse(mock_create_from_src.called) # The image download should not happen if clone_image succeeds self.assertFalse(mock_create_from_img_dl.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta ) def test_create_from_image_cannot_use_cache( self, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_get_internal_context.return_value = None self.mock_driver.clone_image.return_value = (None, False) volume = fake_volume.fake_volume_obj(self.ctxt) image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = { 'properties': { 'virtual_size': '2147483648' } } manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if cache cannot be used. self.assertFalse(mock_create_from_src.called) # The image download should happen if clone fails and we can't use the # image-volume cache. mock_create_from_img_dl.assert_called_once_with( self.ctxt, volume, image_location, image_id, self.mock_image_service ) # This should not attempt to use a minimal size volume self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta ) def test_create_from_image_cache_hit( self, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, False) image_volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.mock_cache.get_entry.return_value = { 'volume_id': image_volume_id } volume = fake_volume.fake_volume_obj(self.ctxt) image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = mock.Mock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called even if the cache is enabled self.assertTrue(self.mock_driver.clone_image.called) # For a cache hit it should only clone from the image-volume mock_create_from_src.assert_called_once_with(self.ctxt, volume, image_volume_id) # The image download should not happen when we get a cache hit self.assertFalse(mock_create_from_img_dl.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_cache_miss( self, mock_qemu_info, mock_volume_get, mock_volume_update, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_get_internal_context.return_value = self.ctxt mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=10, host='foo@bar#pool') mock_volume_get.return_value = volume image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # The image download should happen if clone fails and # we get a cache miss mock_create_from_img_dl.assert_called_once_with( self.ctxt, mock.ANY, image_location, image_id, self.mock_image_service ) # The volume size should be reduced to virtual_size and then put back mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) # Make sure created a new cache entry (self.mock_volume_manager. _create_image_cache_volume_entry.assert_called_once_with( self.ctxt, volume, image_id, image_meta)) mock_handle_bootable.assert_called_once_with( self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_cache_miss_error_downloading( self, mock_qemu_info, mock_volume_get, mock_volume_update, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_fetch_img.return_value = mock.MagicMock() image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=10, host='foo@bar#pool') mock_volume_get.return_value = volume mock_create_from_img_dl.side_effect = exception.CinderException() image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.CinderException, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # The image download should happen if clone fails and # we get a cache miss mock_create_from_img_dl.assert_called_once_with( self.ctxt, mock.ANY, image_location, image_id, self.mock_image_service ) # The volume size should be reduced to virtual_size and then put back, # especially if there is an exception while creating the volume. self.assertEqual(2, mock_volume_update.call_count) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) def test_create_from_image_no_internal_context( self, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, False) mock_get_internal_context.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt) image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = { 'properties': { 'virtual_size': '2147483648' } } manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if cache cannot be used. self.assertFalse(mock_create_from_src.called) # The image download should happen if clone fails and we can't use the # image-volume cache due to not having an internal context available. mock_create_from_img_dl.assert_called_once_with( self.ctxt, volume, image_location, image_id, self.mock_image_service ) # This should not attempt to use a minimal size volume self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume['id'], image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_cache_miss_error_size_invalid( self, mock_qemu_info, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_fetch_img.return_value = mock.MagicMock() image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') image_volume = fake_volume.fake_db_volume(size=2) self.mock_db.volume_create.return_value = image_volume image_location = 'someImageLocationStr' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.ImageUnacceptable, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) # The volume size should NOT be changed when in this case self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) cinder-8.0.0/cinder/tests/unit/utils.py0000664000567000056710000002147112701406250021225 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import socket import sys import uuid from oslo_service import loopingcall from oslo_utils import timeutils import oslo_versionedobjects from cinder import context from cinder import db from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake def get_test_admin_context(): return context.get_admin_context() def create_volume(ctxt, host='test_host', display_name='test_volume', display_description='this is a test volume', status='available', migration_status=None, size=1, availability_zone='fake_az', volume_type_id=None, replication_status='disabled', replication_extended_status=None, replication_driver_data=None, consistencygroup_id=None, previous_status=None, **kwargs): """Create a volume object in the DB.""" vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = ctxt.user_id vol['project_id'] = ctxt.project_id vol['status'] = status if migration_status: vol['migration_status'] = migration_status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = 'detached' vol['availability_zone'] = availability_zone if consistencygroup_id: vol['consistencygroup_id'] = consistencygroup_id if volume_type_id: vol['volume_type_id'] = volume_type_id for key in kwargs: vol[key] = kwargs[key] vol['replication_status'] = replication_status if replication_extended_status: vol['replication_extended_status'] = replication_extended_status if replication_driver_data: vol['replication_driver_data'] = replication_driver_data if previous_status: vol['previous_status'] = previous_status volume = objects.Volume(ctxt, **vol) volume.create() return volume def attach_volume(ctxt, volume_id, instance_uuid, attached_host, mountpoint, mode='rw'): now = timeutils.utcnow() values = {} values['volume_id'] = volume_id values['attached_host'] = attached_host values['mountpoint'] = mountpoint values['attach_time'] = now attachment = db.volume_attach(ctxt, values) return db.volume_attached(ctxt, attachment['id'], instance_uuid, attached_host, mountpoint, mode) def create_snapshot(ctxt, volume_id, display_name='test_snapshot', display_description='this is a test snapshot', cgsnapshot_id = None, status='creating', **kwargs): vol = db.volume_get(ctxt, volume_id) snap = objects.Snapshot(ctxt) snap.volume_id = volume_id snap.user_id = ctxt.user_id or fake.user_id snap.project_id = ctxt.project_id or fake.project_id snap.status = status snap.volume_size = vol['size'] snap.display_name = display_name snap.display_description = display_description snap.cgsnapshot_id = cgsnapshot_id snap.create() return snap def create_consistencygroup(ctxt, host='test_host@fakedrv#fakepool', name='test_cg', description='this is a test cg', status=fields.ConsistencyGroupStatus.AVAILABLE, availability_zone='fake_az', volume_type_id=None, cgsnapshot_id=None, source_cgid=None, **kwargs): """Create a consistencygroup object in the DB.""" cg = objects.ConsistencyGroup(ctxt) cg.host = host cg.user_id = ctxt.user_id or fake.user_id cg.project_id = ctxt.project_id or fake.project_id cg.status = status cg.name = name cg.description = description cg.availability_zone = availability_zone if volume_type_id: cg.volume_type_id = volume_type_id cg.cgsnapshot_id = cgsnapshot_id cg.source_cgid = source_cgid for key in kwargs: setattr(cg, key, kwargs[key]) cg.create() return cg def create_cgsnapshot(ctxt, consistencygroup_id, name='test_cgsnapshot', description='this is a test cgsnapshot', status='creating', **kwargs): """Create a cgsnapshot object in the DB.""" cgsnap = objects.CGSnapshot(ctxt) cgsnap.user_id = ctxt.user_id or fake.user_id cgsnap.project_id = ctxt.project_id or fake.project_id cgsnap.status = status cgsnap.name = name cgsnap.description = description cgsnap.consistencygroup_id = consistencygroup_id for key in kwargs: setattr(cgsnap, key, kwargs[key]) cgsnap.create() return cgsnap def create_backup(ctxt, volume_id, display_name='test_backup', display_description='This is a test backup', status=fields.BackupStatus.CREATING, parent_id=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, data_timestamp=None): backup = {} backup['volume_id'] = volume_id backup['user_id'] = ctxt.user_id backup['project_id'] = ctxt.project_id backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = display_name backup['display_description'] = display_description backup['container'] = 'fake' backup['status'] = status backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = parent_id backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 backup['temp_volume_id'] = temp_volume_id backup['temp_snapshot_id'] = temp_snapshot_id backup['snapshot_id'] = snapshot_id backup['data_timestamp'] = data_timestamp return db.backup_create(ctxt, backup) class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall): def start(self, interval, **kwargs): kwargs['initial_delay'] = 0 return super(ZeroIntervalLoopingCall, self).start(0, **kwargs) def replace_obj_loader(testcase, obj): def fake_obj_load_attr(self, name): # This will raise KeyError for non existing fields as expected field = self.fields[name] if field.default != oslo_versionedobjects.fields.UnspecifiedDefault: value = field.default elif field.nullable: value = None elif isinstance(field, oslo_versionedobjects.fields.StringField): value = '' elif isinstance(field, oslo_versionedobjects.fields.IntegerField): value = 1 elif isinstance(field, oslo_versionedobjects.fields.UUIDField): value = uuid.uuid4() setattr(self, name, value) testcase.addCleanup(setattr, obj, 'obj_load_attr', obj.obj_load_attr) obj.obj_load_attr = fake_obj_load_attr file_spec = None def get_file_spec(): """Return a Python 2 and 3 compatible version of a 'file' spec. This is to be used anywhere that you need to do something such as mock.MagicMock(spec=file) to mock out something with the file attributes. Due to the 'file' built-in method being removed in Python 3 we need to do some special handling for it. """ global file_spec # set on first use if file_spec is None: if sys.version_info[0] == 3: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union( set(dir(_io.BytesIO)))) else: file_spec = file def generate_timeout_series(timeout): """Generate a series of times that exceeds the given timeout. Yields a series of fake time.time() floating point numbers such that the difference between each pair in the series just exceeds the timeout value that is passed in. Useful for mocking time.time() in methods that otherwise wait for timeout seconds. """ iteration = 0 while True: iteration += 1 yield (iteration * timeout) + iteration cinder-8.0.0/cinder/tests/unit/test_hitachi_hbsd_horcm_fc.py0000664000567000056710000012556512701406257025415 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, 2015, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Self test for Hitachi Block Storage Driver """ import mock from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hbsd_basiclib from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_horcm def _exec_raidcom(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_vals.get(args) def _exec_raidcom_get_ldev_no_stdout(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_no_stdout.get(args) def _exec_raidcom_get_ldev_no_nml(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_no_nml.get(args) def _exec_raidcom_get_ldev_no_open_v(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_no_open_v.get(args) def _exec_raidcom_get_ldev_no_hdp(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_no_hdp.get(args) def _exec_raidcom_get_ldev_pair(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_pair.get(args) def _exec_raidcom_get_ldev_permit(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_permit.get(args) def _exec_raidcom_get_ldev_invalid_size(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_invalid_size.get(args) def _exec_raidcom_get_ldev_num_port(*args, **kargs): return HBSDHORCMFCDriverTest.horcm_get_ldev_num_port.get(args) class HBSDHORCMFCDriverTest(test.TestCase): """Test HBSDHORCMFCDriver.""" raidqry_result = "DUMMY\n\ Ver&Rev: 01-31-03/06" raidcom_get_host_grp_result = "DUMMY\n\ CL1-A 0 HBSD-127.0.0.1 None -\n\ CL1-A 1 - None -" raidcom_get_result = "LDEV : 0\n\ VOL_TYPE : OPEN-V-CVS\n\ LDEV : 1\n\ VOL_TYPE : NOT DEFINED" raidcom_get_result2 = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : CVS : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS : NML" raidcom_get_result3 = "Serial# : 210944\n\ LDEV : 0\n\ SL : 0\n\ CL : 0\n\ VOL_TYPE : NOT DEFINED\n\ VOL_Capacity(BLK) : 2098560\n\ NUM_LDEV : 1\n\ LDEVs : 0\n\ NUM_PORT : 3\n\ PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ F_POOLID : NONE\n\ VOL_ATTR : CVS\n\ RAID_LEVEL : RAID5\n\ RAID_TYPE : 3D+1P\n\ NUM_GROUP : 1\n\ RAID_GROUPs : 01-01\n\ DRIVE_TYPE : DKR5C-J600SS\n\ DRIVE_Capa : 1143358736\n\ LDEV_NAMING : test\n\ STS : NML\n\ OPE_TYPE : NONE\n\ OPE_RATE : 100\n\ MP# : 0\n\ SSID : 0004" raidcom_get_command_status_result = "HANDLE SSB1 SSB2 ERR_CNT\ Serial# Description\n\ 00d4 - - 0 210944 -" raidcom_get_result4 = "Serial# : 210944\n\ LDEV : 0\n\ SL : 0\n\ CL : 0\n\ VOL_TYPE : DEFINED\n\ VOL_Capacity(BLK) : 2098560\n\ NUM_LDEV : 1\n\ LDEVs : 0\n\ NUM_PORT : 3\n\ PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ F_POOLID : NONE\n\ VOL_ATTR : CVS\n\ RAID_LEVEL : RAID5\n\ RAID_TYPE : 3D+1P\n\ NUM_GROUP : 1\n\ RAID_GROUPs : 01-01\n\ DRIVE_TYPE : DKR5C-J600SS\n\ DRIVE_Capa : 1143358736\n\ LDEV_NAMING : test\n\ STS : NML\n\ OPE_TYPE : NONE\n\ OPE_RATE : 100\n\ MP# : 0\n\ SSID : 0004" raidcom_get_copy_grp_result = "DUMMY\n\ HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31P - - None\n\ HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31S - - None" raidcom_get_device_grp_result1 = "DUMMY\n\ HBSD-127.0.0.1None1A31P HBSD-ldev-0-2 0 None" raidcom_get_device_grp_result2 = "DUMMY\n\ HBSD-127.0.0.1None1A31S HBSD-ldev-0-2 2 None" raidcom_get_snapshot_result = "DUMMY\n\ HBSD-sanp P-VOL PSUS None 0 3 3 18 100 G--- 53ee291f\n\ HBSD-sanp P-VOL PSUS None 0 4 4 18 100 G--- 53ee291f" raidcom_dp_pool_result = "DUMMY \n\ 030 POLN 0 6006 6006 75 80 1 14860 32 167477" raidcom_port_result = "DUMMY\n\ CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 None 50060E801053C2E0 -" raidcom_port_result2 = "DUMMY\n\ CL1-A 12345678912345aa None -\n\ CL1-A 12345678912345bb None -" raidcom_host_grp_result = "DUMMY\n\ CL1-A 0 HBSD-127.0.0.1 None LINUX/IRIX" raidcom_hba_wwn_result = "DUMMY\n\ CL1-A 0 HBSD-127.0.0.1 12345678912345aa None -" raidcom_get_lun_result = "DUMMY\n\ CL1-A 0 LINUX/IRIX 254 1 5 - None" pairdisplay_result = "DUMMY\n\ HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 L CL1-A-0 0 0 0 None 0 P-VOL PSUS None 2\ -\n\ HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 R CL1-A-0 0 0 0 None 2 S-VOL SSUS - 0 -" pairdisplay_result2 = "DUMMY\n\ HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 L CL1-A-1 0 0 0 None 1 P-VOL PAIR None 1\ -\n\ HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 R CL1-A-1 0 0 0 None 1 S-VOL PAIR - 1 -" horcm_vals = { ('raidqry', u'-h'): [0, "%s" % raidqry_result, ""], ('raidcom', '-login user pasword'): [0, "", ""], ('raidcom', u'get host_grp -port CL1-A -key host_grp'): [0, "%s" % raidcom_get_host_grp_result, ""], ('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-pair00'): [0, "", ""], ('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-127.0.0.2'): [0, "", ""], ('raidcom', u'delete host_grp -port CL1-A-1 HBSD-127.0.0.2'): [1, "", ""], ('raidcom', 'get ldev -ldev_id 0 -cnt 2'): [0, "%s" % raidcom_get_result, ""], ('raidcom', 'add ldev -pool 30 -ldev_id 1 -capacity 128G -emulation OPEN-V'): [0, "", ""], ('raidcom', 'add ldev -pool 30 -ldev_id 1 -capacity 256G -emulation OPEN-V'): [1, "", "SSB=0x2E22,0x0001"], ('raidcom', 'get command_status'): [0, "%s" % raidcom_get_command_status_result, ""], ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_result2, ""], ('raidcom', 'get ldev -ldev_id 1 -check_status NML -time 120'): [0, "", ""], ('raidcom', 'get snapshot -ldev_id 0'): [0, "", ""], ('raidcom', 'get snapshot -ldev_id 1'): [0, "%s" % raidcom_get_snapshot_result, ""], ('raidcom', 'get snapshot -ldev_id 2'): [0, "", ""], ('raidcom', 'get snapshot -ldev_id 3'): [0, "", ""], ('raidcom', 'get copy_grp'): [0, "%s" % raidcom_get_copy_grp_result, ""], ('raidcom', 'delete ldev -ldev_id 0'): [0, "", ""], ('raidcom', 'delete ldev -ldev_id 1'): [0, "", ""], ('raidcom', 'delete ldev -ldev_id 2'): [1, "", "error"], ('raidcom', 'delete ldev -ldev_id 3'): [1, "", "SSB=0x2E20,0x0000"], ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30P'): [0, "", ""], ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30S'): [0, "", ""], ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31P'): [0, "%s" % raidcom_get_device_grp_result1, ""], ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31S'): [0, "%s" % raidcom_get_device_grp_result2, ""], ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -CLI'): [0, "", ""], ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -CLI'): [0, "", ""], ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -CLI'): [0, "%s" % pairdisplay_result, ""], ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -CLI'): [0, "%s" % pairdisplay_result, ""], ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -CLI'): [0, "%s" % pairdisplay_result2, ""], ('raidcom', 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ HBSD-ldev-0-1 -ldev_id 0'): [0, "", ""], ('raidcom', 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ HBSD-ldev-0-1 -ldev_id 1'): [0, "", ""], ('raidcom', 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ HBSD-ldev-1-1 -ldev_id 1'): [0, "", ""], ('raidcom', 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ HBSD-ldev-1-1 -ldev_id 1'): [0, "", ""], ('raidcom', 'add copy_grp -copy_grp_name HBSD-127.0.0.1None1A30 \ HBSD-127.0.0.1None1A30P HBSD-127.0.0.1None1A30S -mirror_id 0'): [0, "", ""], ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 \ -split -fq quick -c 3 -vl'): [0, "", ""], ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 \ -split -fq quick -c 3 -vl'): [0, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowait'): [4, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowaits'): [4, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowait'): [1, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowaits'): [1, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowait'): [4, "", ""], ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowaits'): [200, "", ""], ('pairsplit', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -S'): [0, "", ""], ('raidcom', 'extend ldev -ldev_id 0 -capacity 128G'): [0, "", ""], ('raidcom', 'get dp_pool'): [0, "%s" % raidcom_dp_pool_result, ""], ('raidcom', 'get port'): [0, "%s" % raidcom_port_result, ""], ('raidcom', 'get port -port CL1-A'): [0, "%s" % raidcom_port_result2, ""], ('raidcom', 'get host_grp -port CL1-A'): [0, "%s" % raidcom_host_grp_result, ""], ('raidcom', 'get hba_wwn -port CL1-A-0'): [0, "%s" % raidcom_hba_wwn_result, ""], ('raidcom', 'get hba_wwn -port CL1-A-1'): [0, "", ""], ('raidcom', 'add hba_wwn -port CL1-A-0 -hba_wwn 12345678912345bb'): [0, "", ""], ('raidcom', 'add hba_wwn -port CL1-A-1 -hba_wwn 12345678912345bb'): [1, "", ""], ('raidcom', u'get lun -port CL1-A-0'): [0, "%s" % raidcom_get_lun_result, ""], ('raidcom', u'get lun -port CL1-A-1'): [0, "", ""], ('raidcom', u'add lun -port CL1-A-0 -ldev_id 0 -lun_id 0'): [0, "", ""], ('raidcom', u'add lun -port CL1-A-0 -ldev_id 1 -lun_id 0'): [0, "", ""], ('raidcom', u'add lun -port CL1-A-1 -ldev_id 0 -lun_id 0'): [0, "", ""], ('raidcom', u'add lun -port CL1-A-1 -ldev_id 1 -lun_id 0'): [0, "", ""], ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 0'): [0, "", ""], ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 1'): [0, "", ""], ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 0'): [0, "", ""], ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 2'): [0, "", ""], ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 1'): [1, "", ""]} horcm_get_ldev_no_stdout = { ('raidcom', 'get ldev -ldev_id 1'): [0, "", ""]} raidcom_get_ldev_no_nml = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : CVS : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS :" horcm_get_ldev_no_nml = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_no_nml, ""]} raidcom_get_ldev_no_open_v = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : CVS\n\ VOL_ATTR : CVS : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS : NML" horcm_get_ldev_no_open_v = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_no_open_v, ""]} raidcom_get_ldev_no_hdp = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : CVS :\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS : NML" horcm_get_ldev_no_hdp = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_no_hdp, ""]} raidcom_get_ldev_pair = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : HORC : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS : NML" horcm_get_ldev_pair = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_pair, ""]} raidcom_get_ldev_permit = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : XXX : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 0\n\ STS : NML" horcm_get_ldev_permit = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_permit, ""]} raidcom_get_ldev_invalid_size = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : CVS : HDP\n\ VOL_Capacity(BLK) : 2097151\n\ NUM_PORT : 0\n\ STS : NML" horcm_get_ldev_invalid_size = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_invalid_size, ""]} raidcom_get_ldev_num_port = "DUMMY\n\ LDEV : 1\n\ DUMMY\n\ DUMMY\n\ VOL_TYPE : OPEN-V-CVS\n\ VOL_ATTR : CVS : HDP\n\ VOL_Capacity(BLK) : 2097152\n\ NUM_PORT : 1\n\ STS : NML" horcm_get_ldev_num_port = { ('raidcom', 'get ldev -ldev_id 1'): [0, "%s" % raidcom_get_ldev_num_port, ""]} # The following information is passed on to tests, when creating a volume _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', 'provider_location': '0', 'name': 'test', 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} test_volume = {'name': 'test_volume', 'size': 128, 'id': 'test-volume', 'provider_location': '1', 'status': 'available'} test_volume_error = {'name': 'test_volume', 'size': 256, 'id': 'test-volume', 'status': 'creating'} test_volume_error2 = {'name': 'test_volume2', 'size': 128, 'id': 'test-volume2', 'provider_location': '1', 'status': 'available'} test_volume_error3 = {'name': 'test_volume3', 'size': 128, 'id': 'test-volume3', 'volume_metadata': [{'key': 'type', 'value': 'V-VOL'}], 'provider_location': '1', 'status': 'available'} test_volume_error4 = {'name': 'test_volume4', 'size': 128, 'id': 'test-volume2', 'provider_location': '3', 'status': 'available'} test_volume_error5 = {'name': 'test_volume', 'size': 256, 'id': 'test-volume', 'provider_location': '1', 'status': 'available'} test_snapshot = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, 'provider_location': '0', 'status': 'available'} test_snapshot_error = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, 'provider_location': '2', 'status': 'available'} test_snapshot_error2 = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, 'provider_location': '1', 'status': 'available'} SERIAL_NUM = '210944' test_existing_ref = {'ldev': '1', 'serial_number': SERIAL_NUM} test_existing_none_ldev_ref = {'ldev': None, 'serial_number': SERIAL_NUM} test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'serial_number': SERIAL_NUM} test_existing_no_ldev_ref = {'serial_number': SERIAL_NUM} test_existing_none_serial_ref = {'ldev': '1', 'serial_number': None} test_existing_invalid_serial_ref = {'ldev': '1', 'serial_number': '999999'} test_existing_no_serial_ref = {'ldev': '1'} def __init__(self, *args, **kwargs): super(HBSDHORCMFCDriverTest, self).__init__(*args, **kwargs) @mock.patch.object(utils, 'brick_get_connector_properties', return_value={'ip': '127.0.0.1', 'wwpns': ['12345678912345aa']}) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(utils, 'execute', return_value=['%s' % raidqry_result, '']) def setUp(self, arg1, arg2, arg3, arg4): super(HBSDHORCMFCDriverTest, self).setUp() self._setup_config() self._setup_driver() self.driver.check_param() self.driver.common.pair_flock = hbsd_basiclib.NopLock() self.driver.common.command = hbsd_horcm.HBSDHORCM(self.configuration) self.driver.common.command.horcmgr_flock = hbsd_basiclib.NopLock() self.driver.common.create_lock_file() self.driver.common.command.connect_storage() self.driver.max_hostgroups = \ self.driver.common.command.get_max_hostgroups() self.driver.add_hostgroup() self.driver.output_param_to_log() self.driver.do_setup_status.set() def _setup_config(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.hitachi_pool_id = 30 self.configuration.hitachi_thin_pool_id = 31 self.configuration.hitachi_target_ports = "CL1-A" self.configuration.hitachi_debug_level = 0 self.configuration.hitachi_serial_number = "None" self.configuration.hitachi_unit_name = None self.configuration.hitachi_group_request = True self.configuration.hitachi_group_range = None self.configuration.hitachi_zoning_request = False self.configuration.config_group = "None" self.configuration.hitachi_ldev_range = "0-1" self.configuration.hitachi_default_copy_method = 'FULL' self.configuration.hitachi_copy_check_interval = 1 self.configuration.hitachi_async_copy_check_interval = 1 self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_horcm_add_conf = True self.configuration.hitachi_horcm_numbers = "409,419" self.configuration.hitachi_horcm_user = "user" self.configuration.hitachi_horcm_password = "pasword" self.configuration.hitachi_horcm_resource_lock_timeout = 600 def _setup_driver(self): self.driver = hbsd_fc.HBSDFCDriver( configuration=self.configuration) context = None db = None self.driver.common = hbsd_common.HBSDCommon( self.configuration, self.driver, context, db) # API test cases @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_create_volume(self, arg1, arg2, arg3): """test create_volume.""" ret = self.driver.create_volume(self._VOLUME) vol = self._VOLUME.copy() vol['provider_location'] = ret['provider_location'] self.assertEqual('1', vol['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_create_volume_error(self, arg1, arg2, arg3): """test create_volume.""" self.assertRaises(exception.HBSDError, self.driver.create_volume, self.test_volume_error) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_get_volume_stats(self, arg1, arg2): """test get_volume_stats.""" stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_get_volume_stats_error(self, arg1, arg2): """test get_volume_stats.""" self.configuration.hitachi_pool_id = 29 stats = self.driver.get_volume_stats(True) self.assertEqual({}, stats) self.configuration.hitachi_pool_id = 30 @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_extend_volume(self, arg1, arg2, arg3, arg4): """test extend_volume.""" self.driver.extend_volume(self._VOLUME, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_extend_volume_error(self, arg1, arg2, arg3, arg4): """test extend_volume.""" self.assertRaises(exception.HBSDError, self.driver.extend_volume, self.test_volume_error3, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_delete_volume(self, arg1, arg2, arg3, arg4): """test delete_volume.""" self.driver.delete_volume(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_delete_volume_error(self, arg1, arg2, arg3, arg4): """test delete_volume.""" self.driver.delete_volume(self.test_volume_error4) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7): """test create_snapshot.""" ret = self.driver.create_volume(self._VOLUME) ret = self.driver.create_snapshot(self.test_snapshot) self.assertEqual('1', ret['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7): """test create_snapshot.""" ret = self.driver.create_volume(self.test_volume) ret = self.driver.create_snapshot(self.test_snapshot_error) self.assertEqual('1', ret['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_delete_snapshot(self, arg1, arg2, arg3, arg4): """test delete_snapshot.""" self.driver.delete_snapshot(self.test_snapshot) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_delete_snapshot_error(self, arg1, arg2, arg3, arg4): """test delete_snapshot.""" self.assertRaises(exception.HBSDCmdError, self.driver.delete_snapshot, self.test_snapshot_error) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_volume_from_snapshot(self, arg1, arg2, arg3, arg4, arg5): """test create_volume_from_snapshot.""" vol = self.driver.create_volume_from_snapshot(self.test_volume, self.test_snapshot) self.assertIsNotNone(vol) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): """test create_volume_from_snapshot.""" self.assertRaises(exception.HBSDError, self.driver.create_volume_from_snapshot, self.test_volume_error5, self.test_snapshot_error2) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_cloned_volume(self, arg1, arg2, arg3, arg4, arg5, arg6): """test create_cloned_volume.""" vol = self.driver.create_cloned_volume(self.test_volume, self._VOLUME) self.assertEqual('1', vol['provider_location']) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4, arg5, arg6): """test create_cloned_volume.""" self.assertRaises(exception.HBSDCmdError, self.driver.create_cloned_volume, self.test_volume, self.test_volume_error2) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_initialize_connection(self, arg1, arg2): """test initialize connection.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} rc = self.driver.initialize_connection(self._VOLUME, connector) self.assertEqual('fibre_channel', rc['driver_volume_type']) self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) self.assertEqual(0, rc['data']['target_lun']) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_initialize_connection_error(self, arg1, arg2): """test initialize connection.""" connector = {'wwpns': ['12345678912345bb'], 'ip': '127.0.0.2'} self.assertRaises(exception.HBSDError, self.driver.initialize_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_terminate_connection(self, arg1, arg2): """test terminate connection.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} rc = self.driver.terminate_connection(self._VOLUME, connector) self.assertEqual('fibre_channel', rc['driver_volume_type']) self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_terminate_connection_error(self, arg1, arg2): """test terminate connection.""" connector = {'ip': '127.0.0.1'} self.assertRaises(exception.HBSDError, self.driver.terminate_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_manage_existing(self, arg1, arg2): self.configuration.hitachi_serial_number = self.SERIAL_NUM rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) self.assertEqual(1, rc['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM size = self.driver.manage_existing_get_size(self._VOLUME, self.test_existing_ref) self.assertEqual(1, size) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_serial_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_serial_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_serial_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_serial_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_serial_ref(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_serial_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', return_value=[0, "", ""]) @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_unmanage(self, arg1, arg2, arg3, arg4): self.driver.unmanage(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom) def test_unmanage_busy(self, arg1, arg2): self.assertRaises(exception.HBSDVolumeIsBusy, self.driver.unmanage, self.test_volume_error3) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_no_stdout) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_no_stdout(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_no_nml) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_no_nml(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_no_open_v) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_no_open_v(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_no_hdp) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_no_hdp(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_pair) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_pair(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_permit) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_permit(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_invalid_size) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_invalid_size(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', side_effect=_exec_raidcom_get_ldev_num_port) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_get_ldev_num_port(self, arg1, arg2, arg3): self.configuration.hitachi_serial_number = self.SERIAL_NUM self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) def test_invalid_resource_lock_timeout_below_limit(self): self.configuration.hitachi_horcm_resource_lock_timeout = -1 self.assertRaises(exception.HBSDError, self.driver.check_param) def test_invalid_resource_lock_timeout_over_limit(self): self.configuration.hitachi_horcm_resource_lock_timeout = 7201 self.assertRaises(exception.HBSDError, self.driver.check_param) cinder-8.0.0/cinder/tests/unit/test_hpelefthand.py0000664000567000056710000027435612701406250023422 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for OpenStack Cinder volume drivers.""" import json import mock from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_hpe_lefthand_client as hpelefthandclient from cinder.volume.drivers.hpe import hpe_lefthand_iscsi from cinder.volume import volume_types hpeexceptions = hpelefthandclient.hpeexceptions GOODNESS_FUNCTION = \ "capabilities.capacity_utilization < 0.6? 100 : 25" FILTER_FUNCTION = \ "capabilities.total_volumes < 400 && capabilities.capacity_utilization" HPELEFTHAND_SAN_SSH_CON_TIMEOUT = 44 HPELEFTHAND_SAN_SSH_PRIVATE = 'foobar' HPELEFTHAND_API_URL = 'http://fake.foo:8080/lhos' HPELEFTHAND_API_URL2 = 'http://fake2.foo2:8080/lhos' HPELEFTHAND_SSH_IP = 'fake.foo' HPELEFTHAND_SSH_IP2 = 'fake2.foo2' HPELEFTHAND_USERNAME = 'foo1' HPELEFTHAND_PASSWORD = 'bar2' HPELEFTHAND_SSH_PORT = 16022 HPELEFTHAND_CLUSTER_NAME = 'CloudCluster1' VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db' FAKE_FAILOVER_HOST = 'fakefailover@foo#destfakepool' REPLICATION_BACKEND_ID = 'target' class HPELeftHandBaseDriver(object): cluster_id = 1 volume_name = "fakevolume" volume_name_repl = "fakevolume_replicated" volume_id = 1 volume = { 'name': volume_name, 'display_name': 'Foo Volume', 'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:' 'group01:25366:fakev 0'), 'id': volume_id, 'provider_auth': None, 'size': 1} volume_replicated = { 'name': volume_name_repl, 'display_name': 'Foo Volume', 'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:' 'group01:25366:fakev 0'), 'id': volume_id, 'provider_auth': None, 'size': 1, 'volume_type': 'replicated', 'volume_type_id': VOLUME_TYPE_ID_REPLICATED, 'replication_driver_data': ('{"location": "' + HPELEFTHAND_API_URL + '"}')} repl_targets = [{'backend_id': 'target', 'managed_backend_name': FAKE_FAILOVER_HOST, 'hpelefthand_api_url': HPELEFTHAND_API_URL2, 'hpelefthand_username': HPELEFTHAND_USERNAME, 'hpelefthand_password': HPELEFTHAND_PASSWORD, 'hpelefthand_clustername': HPELEFTHAND_CLUSTER_NAME, 'hpelefthand_ssh_port': HPELEFTHAND_SSH_PORT, 'ssh_conn_timeout': HPELEFTHAND_SAN_SSH_CON_TIMEOUT, 'san_private_key': HPELEFTHAND_SAN_SSH_PRIVATE, 'cluster_id': 6, 'cluster_vip': '10.0.1.6'}] repl_targets_unmgd = [{'backend_id': 'target', 'hpelefthand_api_url': HPELEFTHAND_API_URL2, 'hpelefthand_username': HPELEFTHAND_USERNAME, 'hpelefthand_password': HPELEFTHAND_PASSWORD, 'hpelefthand_clustername': HPELEFTHAND_CLUSTER_NAME, 'hpelefthand_ssh_port': HPELEFTHAND_SSH_PORT, 'ssh_conn_timeout': HPELEFTHAND_SAN_SSH_CON_TIMEOUT, 'san_private_key': HPELEFTHAND_SAN_SSH_PRIVATE, 'cluster_id': 6, 'cluster_vip': '10.0.1.6'}] list_rep_targets = [{'backend_id': REPLICATION_BACKEND_ID}] serverName = 'fakehost' server_id = 0 server_uri = '/lhos/servers/0' snapshot_name = "fakeshapshot" snapshot_id = 3 snapshot = { 'id': snapshot_id, 'name': snapshot_name, 'display_name': 'fakesnap', 'volume_name': volume_name, 'volume': volume} cloned_volume_name = "clone_volume" cloned_volume = {'name': cloned_volume_name, 'size': 1} cloned_volume_extend = {'name': cloned_volume_name, 'size': 5} cloned_snapshot_name = "clonedshapshot" cloned_snapshot_id = 5 cloned_snapshot = { 'name': cloned_snapshot_name, 'volume_name': volume_name} volume_type_id = 4 init_iqn = 'iqn.1993-08.org.debian:01:222' volume_type = {'name': 'gold', 'deleted': False, 'updated_at': None, 'extra_specs': {'hpelh:provisioning': 'thin', 'hpelh:ao': 'true', 'hpelh:data_pl': 'r-0'}, 'deleted_at': None, 'id': 'gold'} old_volume_type = {'name': 'gold', 'deleted': False, 'updated_at': None, 'extra_specs': {'hplh:provisioning': 'thin', 'hplh:ao': 'true', 'hplh:data_pl': 'r-0'}, 'deleted_at': None, 'id': 'gold'} connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'host': serverName} driver_startup_call_stack = [ mock.call.login('foo1', 'bar2'), mock.call.getClusterByName('CloudCluster1'), mock.call.setSSHOptions( HPELEFTHAND_SSH_IP, HPELEFTHAND_USERNAME, HPELEFTHAND_PASSWORD, missing_key_policy='AutoAddPolicy', privatekey=HPELEFTHAND_SAN_SSH_PRIVATE, known_hosts_file=mock.ANY, port=HPELEFTHAND_SSH_PORT, conn_timeout=HPELEFTHAND_SAN_SSH_CON_TIMEOUT), ] class TestHPELeftHandISCSIDriver(HPELeftHandBaseDriver, test.TestCase): CONSIS_GROUP_ID = '3470cc4c-63b3-4c7a-8120-8a0693b45838' CGSNAPSHOT_ID = '5351d914-6c90-43e7-9a8e-7e84610927da' class fake_consistencygroup_object(object): volume_type_id = '371c64d5-b92a-488c-bc14-1e63cef40e08' name = 'cg_name' cgsnapshot_id = None id = '3470cc4c-63b3-4c7a-8120-8a0693b45838' description = 'consistency group' class fake_cgsnapshot_object(object): consistencygroup_id = '3470cc4c-63b3-4c7a-8120-8a0693b45838' description = 'cgsnapshot' id = '5351d914-6c90-43e7-9a8e-7e84610927da' readOnly = False def default_mock_conf(self): mock_conf = mock.MagicMock() mock_conf.hpelefthand_api_url = HPELEFTHAND_API_URL mock_conf.hpelefthand_username = HPELEFTHAND_USERNAME mock_conf.hpelefthand_password = HPELEFTHAND_PASSWORD mock_conf.hpelefthand_ssh_port = HPELEFTHAND_SSH_PORT mock_conf.ssh_conn_timeout = HPELEFTHAND_SAN_SSH_CON_TIMEOUT mock_conf.san_private_key = HPELEFTHAND_SAN_SSH_PRIVATE mock_conf.hpelefthand_iscsi_chap_enabled = False mock_conf.hpelefthand_debug = False mock_conf.hpelefthand_clustername = "CloudCluster1" mock_conf.goodness_function = GOODNESS_FUNCTION mock_conf.filter_function = FILTER_FUNCTION mock_conf.reserved_percentage = 25 def safe_get(attr): try: return mock_conf.__getattribute__(attr) except AttributeError: return None mock_conf.safe_get = safe_get return mock_conf @mock.patch('hpelefthandclient.client.HPELeftHandClient', spec=True) def setup_driver(self, _mock_client, config=None): if config is None: config = self.default_mock_conf() _mock_client.return_value.getClusterByName.return_value = { 'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]} _mock_client.return_value.getCluster.return_value = { 'spaceTotal': units.Gi * 500, 'spaceAvailable': units.Gi * 250} _mock_client.return_value.getApiVersion.return_value = '1.2' _mock_client.return_value.getIPFromCluster.return_value = '1.1.1.1' self.driver = hpe_lefthand_iscsi.HPELeftHandISCSIDriver( configuration=config) self.driver.do_setup(None) self.cluster_name = config.hpelefthand_clustername return _mock_client.return_value @mock.patch('hpelefthandclient.version', "1.0.0") def test_unsupported_client_version(self): self.assertRaises(exception.InvalidInput, self.setup_driver) @mock.patch('hpelefthandclient.version', "3.0.0") def test_supported_client_version(self): self.setup_driver() def test_create_volume(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumes.return_value = {'total': 1, 'members': []} # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute driver volume_info = self.driver.create_volume(self.volume) self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', volume_info['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.createVolume( 'fakevolume', 1, units.Gi, {'isThinProvisioned': True, 'clusterName': 'CloudCluster1'}), mock.call.logout()] mock_client.assert_has_calls(expected) # mock HTTPServerError mock_client.createVolume.side_effect =\ hpeexceptions.HTTPServerError() # ensure the raised exception is a cinder exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) @mock.patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'hpelh:provisioning': 'full'}}) def test_create_volume_with_es(self, _mock_volume_type): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() volume_with_vt = self.volume volume_with_vt['volume_type_id'] = 1 # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute create_volume volume_info = self.driver.create_volume(volume_with_vt) self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', volume_info['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.createVolume( 'fakevolume', 1, units.Gi, {'isThinProvisioned': False, 'clusterName': 'CloudCluster1'}), mock.call.logout()] mock_client.assert_has_calls(expected) @mock.patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': (HPELeftHandBaseDriver. old_volume_type['extra_specs'])}) def test_create_volume_old_volume_type(self, _mock_volume_type): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumes.return_value = {'total': 1, 'members': []} # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute driver volume_info = self.driver.create_volume(self.volume) self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', volume_info['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.createVolume( 'fakevolume', 1, units.Gi, {'isThinProvisioned': True, 'clusterName': 'CloudCluster1'}), mock.call.logout()] mock_client.assert_has_calls(expected) # mock HTTPServerError mock_client.createVolume.side_effect =\ hpeexceptions.HTTPServerError() # ensure the raised exception is a cinder exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) def test_delete_volume(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() # mock return value of getVolumeByName mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute delete_volume del_volume = self.volume del_volume['volume_type_id'] = None self.driver.delete_volume(del_volume) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.deleteVolume(self.volume_id), mock.call.logout()] mock_client.assert_has_calls(expected) # mock HTTPNotFound (volume not found) mock_client.getVolumeByName.side_effect =\ hpeexceptions.HTTPNotFound() # no exception should escape method self.driver.delete_volume(del_volume) # mock HTTPConflict mock_client.deleteVolume.side_effect = hpeexceptions.HTTPConflict() # ensure the raised exception is a cinder exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, {}) def test_extend_volume(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() # mock return value of getVolumeByName mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute extend_volume self.driver.extend_volume(self.volume, 2) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.modifyVolume(1, {'size': 2 * units.Gi}), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) # mock HTTPServerError (array failure) mock_client.modifyVolume.side_effect =\ hpeexceptions.HTTPServerError() # ensure the raised exception is a cinder exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, 2) def test_initialize_connection(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() # mock return value of getVolumeByName mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() mock_client.createServer.return_value = {'id': self.server_id} mock_client.getVolumeByName.return_value = { 'id': self.volume_id, 'iscsiSessions': None } mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute initialize_connection result = self.driver.initialize_connection( self.volume, self.connector) # validate self.assertEqual('iscsi', result['driver_volume_type']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(self.volume_id, result['data']['volume_id']) self.assertTrue('auth_method' not in result['data']) expected = self.driver_startup_call_stack + [ mock.call.getServerByName('fakehost'), mock.call.createServer ( 'fakehost', 'iqn.1993-08.org.debian:01:222', None ), mock.call.getVolumeByName('fakevolume'), mock.call.addServerAccess(1, 0), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) # mock HTTPServerError (array failure) mock_client.createServer.side_effect =\ hpeexceptions.HTTPServerError() # ensure the raised exception is a cinder exception self.assertRaises( exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, self.connector) def test_initialize_connection_session_exists(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() # mock return value of getVolumeByName mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() mock_client.createServer.return_value = {'id': self.server_id} mock_client.getVolumeByName.return_value = { 'id': self.volume_id, 'iscsiSessions': [{'server': {'uri': self.server_uri}}] } mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute initialize_connection result = self.driver.initialize_connection( self.volume, self.connector) # validate self.assertEqual('iscsi', result['driver_volume_type']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(self.volume_id, result['data']['volume_id']) self.assertTrue('auth_method' not in result['data']) expected = self.driver_startup_call_stack + [ mock.call.getServerByName('fakehost'), mock.call.createServer ( 'fakehost', 'iqn.1993-08.org.debian:01:222', None ), mock.call.getVolumeByName('fakevolume'), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_initialize_connection_with_chaps(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() # mock return value of getVolumeByName mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() mock_client.createServer.return_value = { 'id': self.server_id, 'chapAuthenticationRequired': True, 'chapTargetSecret': 'dont_tell'} mock_client.getVolumeByName.return_value = { 'id': self.volume_id, 'iscsiSessions': None } mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute initialize_connection result = self.driver.initialize_connection( self.volume, self.connector) # validate self.assertEqual('iscsi', result['driver_volume_type']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(self.volume_id, result['data']['volume_id']) self.assertEqual('CHAP', result['data']['auth_method']) expected = self.driver_startup_call_stack + [ mock.call.getServerByName('fakehost'), mock.call.createServer ( 'fakehost', 'iqn.1993-08.org.debian:01:222', None ), mock.call.getVolumeByName('fakevolume'), mock.call.addServerAccess(1, 0), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_terminate_connection(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getServerByName.return_value = { 'id': self.server_id, 'name': self.serverName} mock_client.findServerVolumes.return_value = [{'id': self.volume_id}] mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute terminate_connection self.driver.terminate_connection(self.volume, self.connector) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.getServerByName('fakehost'), mock.call.findServerVolumes('fakehost'), mock.call.removeServerAccess(1, 0), mock.call.deleteServer(0)] # validate call chain mock_client.assert_has_calls(expected) mock_client.getVolumeByName.side_effect = ( hpeexceptions.HTTPNotFound()) # ensure the raised exception is a cinder exception self.assertRaises( exception.VolumeBackendAPIException, self.driver.terminate_connection, self.volume, self.connector) def test_terminate_connection_multiple_volumes_on_server(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getServerByName.return_value = { 'id': self.server_id, 'name': self.serverName} mock_client.findServerVolumes.return_value = [ {'id': self.volume_id}, {'id': 99999}] mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute terminate_connection self.driver.terminate_connection(self.volume, self.connector) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.getServerByName('fakehost'), mock.call.findServerVolumes('fakehost'), mock.call.removeServerAccess(1, 0)] # validate call chain mock_client.assert_has_calls(expected) self.assertFalse(mock_client.deleteServer.called) mock_client.getVolumeByName.side_effect = ( hpeexceptions.HTTPNotFound()) # ensure the raised exception is a cinder exception self.assertRaises( exception.VolumeBackendAPIException, self.driver.terminate_connection, self.volume, self.connector) def test_create_snapshot(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute create_snapshot self.driver.create_snapshot(self.snapshot) mock_client.getVolumes.return_value = {'total': 1, 'members': []} expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.createSnapshot( 'fakeshapshot', 1, {'inheritAccess': True}), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) # mock HTTPServerError (array failure) mock_client.getVolumeByName.side_effect =\ hpeexceptions.HTTPNotFound() # ensure the raised exception is a cinder exception self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot) def test_delete_snapshot(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute delete_snapshot self.driver.delete_snapshot(self.snapshot) expected = self.driver_startup_call_stack + [ mock.call.getSnapshotByName('fakeshapshot'), mock.call.deleteSnapshot(3), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) mock_client.getSnapshotByName.side_effect =\ hpeexceptions.HTTPNotFound() # no exception is thrown, just error msg is logged self.driver.delete_snapshot(self.snapshot) # mock HTTPServerError (array failure) ex = hpeexceptions.HTTPServerError({'message': 'Some message.'}) mock_client.getSnapshotByName.side_effect = ex # ensure the raised exception is a cinder exception self.assertRaises( exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot) # mock HTTPServerError because the snap is in use ex = hpeexceptions.HTTPServerError({ 'message': 'Hey, dude cannot be deleted because it is a clone point' ' duh.'}) mock_client.getSnapshotByName.side_effect = ex # ensure the raised exception is a cinder exception self.assertRaises( exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) def test_create_volume_from_snapshot(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} mock_client.cloneSnapshot.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute create_volume_from_snapshot model_update = self.driver.create_volume_from_snapshot( self.volume, self.snapshot) expected_iqn = 'iqn.1993-08.org.debian:01:222 0' expected_location = "10.0.1.6:3260,1 %s" % expected_iqn self.assertEqual(expected_location, model_update['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.getSnapshotByName('fakeshapshot'), mock.call.cloneSnapshot('fakevolume', 3), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_create_cloned_volume(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.cloneVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute create_cloned_volume model_update = self.driver.create_cloned_volume( self.cloned_volume, self.volume) expected_iqn = 'iqn.1993-08.org.debian:01:222 0' expected_location = "10.0.1.6:3260,1 %s" % expected_iqn self.assertEqual(expected_location, model_update['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.cloneVolume('clone_volume', 1), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_create_cloned_volume_extend(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.cloneVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute create_cloned_volume with extend model_update = self.driver.create_cloned_volume( self.cloned_volume_extend, self.volume) expected_iqn = 'iqn.1993-08.org.debian:01:222 0' expected_location = "10.0.1.6:3260,1 %s" % expected_iqn self.assertEqual(expected_location, model_update['provider_location']) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.cloneVolume('clone_volume', 1), mock.call.login('foo1', 'bar2'), mock.call.getClusterByName('CloudCluster1'), mock.call.setSSHOptions( HPELEFTHAND_SSH_IP, HPELEFTHAND_USERNAME, HPELEFTHAND_PASSWORD, missing_key_policy='AutoAddPolicy', privatekey=HPELEFTHAND_SAN_SSH_PRIVATE, known_hosts_file=mock.ANY, port=HPELEFTHAND_SSH_PORT, conn_timeout=HPELEFTHAND_SAN_SSH_CON_TIMEOUT), mock.call.getVolumeByName('clone_volume'), mock.call.modifyVolume(self.volume_id, {'size': 5 * units.Gi}), mock.call.logout(), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_extra_spec_mapping(self, _mock_get_volume_type): # setup drive with default configuration self.setup_driver() # 2 extra specs we don't care about, and # 1 that will get mapped _mock_get_volume_type.return_value = { 'extra_specs': { 'foo:bar': 'fake', 'bar:foo': 1234, 'hpelh:provisioning': 'full'}} volume_with_vt = self.volume volume_with_vt['volume_type_id'] = self.volume_type_id # get the extra specs of interest from this volume's volume type volume_extra_specs = self.driver._get_volume_extra_specs( volume_with_vt) extra_specs = self.driver._get_lh_extra_specs( volume_extra_specs, hpe_lefthand_iscsi.extra_specs_key_map.keys()) # map the extra specs key/value pairs to key/value pairs # used as optional configuration values by the LeftHand backend optional = self.driver._map_extra_specs(extra_specs) self.assertDictMatch({'isThinProvisioned': False}, optional) @mock.patch.object(volume_types, 'get_volume_type') def test_extra_spec_mapping_invalid_value(self, _mock_get_volume_type): # setup drive with default configuration self.setup_driver() volume_with_vt = self.volume volume_with_vt['volume_type_id'] = self.volume_type_id _mock_get_volume_type.return_value = { 'extra_specs': { # r-07 is an invalid value for hpelh:ao 'hpelh:data_pl': 'r-07', 'hpelh:ao': 'true'}} # get the extra specs of interest from this volume's volume type volume_extra_specs = self.driver._get_volume_extra_specs( volume_with_vt) extra_specs = self.driver._get_lh_extra_specs( volume_extra_specs, hpe_lefthand_iscsi.extra_specs_key_map.keys()) # map the extra specs key/value pairs to key/value pairs # used as optional configuration values by the LeftHand backend optional = self.driver._map_extra_specs(extra_specs) # {'hpelh:ao': 'true'} should map to # {'isAdaptiveOptimizationEnabled': True} # without hpelh:data_pl since r-07 is an invalid value self.assertDictMatch({'isAdaptiveOptimizationEnabled': True}, optional) def test_retype_with_no_LH_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumes.return_value = {'total': 1, 'members': []} ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'foo': False, 'bar': 2, 'error': True} key_specs_new = {'foo': True, 'bar': 5, 'error': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_retype_with_only_LH_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'hpelh:provisioning': 'thin'} key_specs_new = {'hpelh:provisioning': 'full', 'hpelh:ao': 'true'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.modifyVolume( 1, { 'isThinProvisioned': False, 'isAdaptiveOptimizationEnabled': True}), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_retype_with_both_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'hpelh:provisioning': 'full', 'foo': 'bar'} key_specs_new = {'hpelh:provisioning': 'thin', 'foo': 'foobar'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.modifyVolume(1, {'isThinProvisioned': True}), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_retype_same_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'hpelh:provisioning': 'full', 'hpelh:ao': 'true'} key_specs_new = {'hpelh:provisioning': 'full', 'hpelh:ao': 'false'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.modifyVolume( 1, {'isAdaptiveOptimizationEnabled': False}), mock.call.logout()] # validate call chain mock_client.assert_has_calls(expected) def test_migrate_no_location(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() host = {'host': self.serverName, 'capabilities': {}} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client (migrated, update) = self.driver.migrate_volume( None, self.volume, host) self.assertFalse(migrated) mock_client.assert_has_calls([]) self.assertEqual(0, len(mock_client.method_calls)) def test_migrate_incorrect_vip(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getClusterByName.return_value = { "virtualIPAddresses": [{ "ipV4Address": "10.10.10.10", "ipV4NetMask": "255.255.240.0"}], "id": self.cluster_id} mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} location = (self.driver.DRIVER_LOCATION % { 'cluster': 'New_CloudCluster', 'vip': '10.10.10.111'}) host = { 'host': self.serverName, 'capabilities': {'location_info': location}} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client (migrated, update) = self.driver.migrate_volume( None, self.volume, host) self.assertFalse(migrated) expected = self.driver_startup_call_stack + [ mock.call.getClusterByName('New_CloudCluster'), mock.call.logout()] mock_client.assert_has_calls(expected) # and nothing else self.assertEqual( len(expected), len(mock_client.method_calls)) def test_migrate_with_location(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getClusterByName.return_value = { "virtualIPAddresses": [{ "ipV4Address": "10.10.10.111", "ipV4NetMask": "255.255.240.0"}], "id": self.cluster_id} mock_client.getVolumeByName.return_value = {'id': self.volume_id, 'iscsiSessions': None} mock_client.getVolume.return_value = {'snapshots': { 'resource': None}} mock_client.getVolumes.return_value = {'total': 1, 'members': []} location = (self.driver.DRIVER_LOCATION % { 'cluster': 'New_CloudCluster', 'vip': '10.10.10.111'}) host = { 'host': self.serverName, 'capabilities': {'location_info': location}} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client (migrated, update) = self.driver.migrate_volume( None, self.volume, host) self.assertTrue(migrated) expected = self.driver_startup_call_stack + [ mock.call.getClusterByName('New_CloudCluster'), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.getVolume( 1, 'fields=snapshots,snapshots[resource[members[name]]]'), mock.call.modifyVolume(1, {'clusterName': 'New_CloudCluster'}), mock.call.logout()] mock_client.assert_has_calls(expected) # and nothing else self.assertEqual( len(expected), len(mock_client.method_calls)) def test_migrate_with_Snapshots(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getClusterByName.return_value = { "virtualIPAddresses": [{ "ipV4Address": "10.10.10.111", "ipV4NetMask": "255.255.240.0"}], "id": self.cluster_id} mock_client.getVolumeByName.return_value = { 'id': self.volume_id, 'iscsiSessions': None} mock_client.getVolume.return_value = {'snapshots': { 'resource': 'snapfoo'}} mock_client.getVolumes.return_value = {'total': 1, 'members': []} location = (self.driver.DRIVER_LOCATION % { 'cluster': 'New_CloudCluster', 'vip': '10.10.10.111'}) host = { 'host': self.serverName, 'capabilities': {'location_info': location}} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client (migrated, update) = self.driver.migrate_volume( None, self.volume, host) self.assertFalse(migrated) expected = self.driver_startup_call_stack + [ mock.call.getClusterByName('New_CloudCluster'), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.getVolume( 1, 'fields=snapshots,snapshots[resource[members[name]]]'), mock.call.logout()] mock_client.assert_has_calls(expected) # and nothing else self.assertEqual( len(expected), len(mock_client.method_calls)) def test_update_migrated_volume(self): mock_client = self.setup_driver() volume_id = 'fake_vol_id' clone_id = 'fake_clone_id' fake_old_volume = {'id': volume_id} provider_location = 'foo' fake_new_volume = {'id': clone_id, '_name_id': clone_id, 'provider_location': provider_location} original_volume_status = 'available' with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': None, 'provider_location': None} self.assertEqual(expected_update, actual_update) def test_update_migrated_volume_attached(self): mock_client = self.setup_driver() volume_id = 'fake_vol_id' clone_id = 'fake_clone_id' fake_old_volume = {'id': volume_id} provider_location = 'foo' fake_new_volume = {'id': clone_id, '_name_id': clone_id, 'provider_location': provider_location} original_volume_status = 'in-use' with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': fake_new_volume['_name_id'], 'provider_location': provider_location} self.assertEqual(expected_update, actual_update) @mock.patch.object(volume_types, 'get_volume_type', return_value={'extra_specs': {'hpelh:ao': 'true'}}) def test_create_volume_with_ao_true(self, _mock_volume_type): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() volume_with_vt = self.volume volume_with_vt['volume_type_id'] = 1 # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client volume_info = self.driver.create_volume(volume_with_vt) self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', volume_info['provider_location']) # make sure createVolume is called without # isAdaptiveOptimizationEnabled == true expected = self.driver_startup_call_stack + [ mock.call.createVolume( 'fakevolume', 1, units.Gi, {'isThinProvisioned': True, 'clusterName': 'CloudCluster1'}), mock.call.logout()] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type', return_value={'extra_specs': {'hpelh:ao': 'false'}}) def test_create_volume_with_ao_false(self, _mock_volume_type): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() volume_with_vt = self.volume volume_with_vt['volume_type_id'] = 1 # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.getVolumes.return_value = {'total': 1, 'members': []} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client volume_info = self.driver.create_volume(volume_with_vt) self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', volume_info['provider_location']) # make sure createVolume is called with # isAdaptiveOptimizationEnabled == false expected = self.driver_startup_call_stack + [ mock.call.createVolume( 'fakevolume', 1, units.Gi, {'isThinProvisioned': True, 'clusterName': 'CloudCluster1', 'isAdaptiveOptimizationEnabled': False}), mock.call.logout()] mock_client.assert_has_calls(expected) def test_get_existing_volume_ref_name(self): self.setup_driver() existing_ref = {'source-name': self.volume_name} result = self.driver._get_existing_volume_ref_name( existing_ref) self.assertEqual(self.volume_name, result) existing_ref = {'bad-key': 'foo'} self.assertRaises( exception.ManageExistingInvalidReference, self.driver._get_existing_volume_ref_name, existing_ref) def test_manage_existing(self): mock_client = self.setup_driver() self.driver.api_version = "1.1" volume = {'display_name': 'Foo Volume', 'volume_type': None, 'volume_type_id': None, 'id': '12345'} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } existing_ref = {'source-name': self.volume_name} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) mock_client.assert_has_calls( self.driver_startup_call_stack + [ mock.call.getVolumeByName(self.volume_name), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.modifyVolume(self.volume_id, {'name': 'volume-12345'}), mock.call.logout()]) self.assertEqual(expected_obj, obj) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_retype(self, _mock_volume_types): mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': { 'hpelh:provisioning': 'thin', 'hpelh:ao': 'true', 'hpelh:data_pl': 'r-0', 'volume_type': self.volume_type}} self.driver.api_version = "1.1" volume = {'display_name': 'Foo Volume', 'host': 'stack@lefthand#lefthand', 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '12345'} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } existing_ref = {'source-name': self.volume_name} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) mock_client.assert_has_calls( self.driver_startup_call_stack + [ mock.call.getVolumeByName(self.volume_name), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.modifyVolume(self.volume_id, {'name': 'volume-12345'}), mock.call.logout()]) self.assertEqual(expected_obj, obj) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_retype_exception(self, _mock_volume_types): mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': { 'hpelh:provisioning': 'thin', 'hpelh:ao': 'true', 'hpelh:data_pl': 'r-0', 'volume_type': self.volume_type}} self.driver.retype = mock.Mock( side_effect=exception.VolumeNotFound(volume_id="fake")) self.driver.api_version = "1.1" volume = {'display_name': 'Foo Volume', 'host': 'stack@lefthand#lefthand', 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '12345'} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } existing_ref = {'source-name': self.volume_name} self.assertRaises(exception.VolumeNotFound, self.driver.manage_existing, volume, existing_ref) mock_client.assert_has_calls( self.driver_startup_call_stack + [ mock.call.getVolumeByName(self.volume_name), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.modifyVolume(self.volume_id, {'name': 'volume-12345'}), mock.call.logout()] + self.driver_startup_call_stack + [ mock.call.modifyVolume(self.volume_id, {'name': 'fakevolume'}), mock.call.logout()]) def test_manage_existing_volume_type_exception(self): mock_client = self.setup_driver() self.driver.api_version = "1.1" volume = {'display_name': 'Foo Volume', 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '12345'} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } existing_ref = {'source-name': self.volume_name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) mock_client.assert_has_calls( self.driver_startup_call_stack + [ mock.call.getVolumeByName(self.volume_name), mock.call.logout()]) def test_manage_existing_snapshot(self): mock_client = self.setup_driver() self.driver.api_version = "1.1" volume = { 'id': '111', } snapshot = { 'display_name': 'Foo Snap', 'id': '12345', 'volume': volume, 'volume_id': '111', } with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getSnapshotByName.return_value = { 'id': self.snapshot_id } mock_client.getSnapshotParentVolume.return_value = { 'name': 'volume-111' } existing_ref = {'source-name': self.snapshot_name} expected_obj = {'display_name': 'Foo Snap'} obj = self.driver.manage_existing_snapshot(snapshot, existing_ref) mock_client.assert_has_calls( self.driver_startup_call_stack + [ mock.call.getSnapshotByName(self.snapshot_name), mock.call.getSnapshotParentVolume(self.snapshot_name), mock.call.modifySnapshot(self.snapshot_id, {'name': 'snapshot-12345'}), mock.call.logout()]) self.assertEqual(expected_obj, obj) def test_manage_existing_snapshot_failed_over_volume(self): mock_client = self.setup_driver() self.driver.api_version = "1.1" volume = { 'id': self.volume_id, 'replication_status': 'failed-over', } snapshot = { 'display_name': 'Foo Snap', 'id': '12345', 'volume': volume, } existing_ref = {'source-name': self.snapshot_name} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot, snapshot=snapshot, existing_ref=existing_ref) def test_manage_existing_get_size(self): mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'size': 2147483648} self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } volume = {} existing_ref = {'source-name': self.volume_name} size = self.driver.manage_existing_get_size(volume, existing_ref) expected_size = 2 expected = [mock.call.getVolumeByName(existing_ref['source-name']), mock.call.logout()] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) self.assertEqual(expected_size, size) def test_manage_existing_get_size_invalid_reference(self): mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'size': 2147483648} self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client volume = {} existing_ref = {'source-name': "volume-12345"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) mock_client.assert_has_calls([]) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) mock_client.assert_has_calls([]) def test_manage_existing_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getVolumeByName.side_effect = ( hpeexceptions.HTTPNotFound('fake')) self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } volume = {} existing_ref = {'source-name': self.volume_name} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolumeByName(existing_ref['source-name'])] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) def test_manage_existing_snapshot_get_size(self): mock_client = self.setup_driver() mock_client.getSnapshotByName.return_value = {'size': 2147483648} self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client snapshot = {} existing_ref = {'source-name': self.snapshot_name} size = self.driver.manage_existing_snapshot_get_size(snapshot, existing_ref) expected_size = 2 expected = [mock.call.getSnapshotByName( existing_ref['source-name']), mock.call.logout()] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) self.assertEqual(expected_size, size) def test_manage_existing_snapshot_get_size_invalid_reference(self): mock_client = self.setup_driver() mock_client.getSnapshotByName.return_value = {'size': 2147483648} self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client snapshot = {} existing_ref = {'source-name': "snapshot-12345"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) mock_client.assert_has_calls([]) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) mock_client.assert_has_calls([]) def test_manage_existing_snapshot_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getSnapshotByName.side_effect = ( hpeexceptions.HTTPNotFound('fake')) self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client snapshot = {} existing_ref = {'source-name': self.snapshot_name} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) expected = [mock.call.getSnapshotByName( existing_ref['source-name'])] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) def test_unmanage(self): mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} # mock return value of getVolumes mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": self.volume_id, "clusterName": self.cluster_name, "size": 1 }] } self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.unmanage(self.volume) new_name = 'unm-' + str(self.volume['id']) expected = [ mock.call.getVolumeByName(self.volume['name']), mock.call.modifyVolume(self.volume['id'], {'name': new_name}), mock.call.logout() ] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) def test_unmanage_snapshot(self): mock_client = self.setup_driver() volume = { 'id': self.volume_id, } snapshot = { 'name': self.snapshot_name, 'display_name': 'Foo Snap', 'volume': volume, 'id': self.snapshot_id, } mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id, } self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.driver.unmanage_snapshot(snapshot) new_name = 'ums-' + str(self.snapshot_id) expected = [ mock.call.getSnapshotByName(snapshot['name']), mock.call.modifySnapshot(self.snapshot_id, {'name': new_name}), mock.call.logout() ] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) def test_unmanage_snapshot_failed_over_volume(self): mock_client = self.setup_driver() volume = { 'id': self.volume_id, 'replication_status': 'failed-over', } snapshot = { 'name': self.snapshot_name, 'display_name': 'Foo Snap', 'volume': volume, 'id': self.snapshot_id, } mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id, } self.driver.api_version = "1.1" with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client self.assertRaises(exception.SnapshotIsBusy, self.driver.unmanage_snapshot, snapshot=snapshot) def test_api_version(self): self.setup_driver() self.driver.api_version = "1.1" self.driver._check_api_version() self.driver.api_version = "1.0" self.assertRaises(exception.InvalidInput, self.driver._check_api_version) def test_get_volume_stats(self): # set up driver with default config mock_client = self.setup_driver() # mock return value of getVolumes mock_client.getVolumes.return_value = { "type": "volume", "total": 1, "members": [{ "id": 12345, "clusterName": self.cluster_name, "size": 1 * units.Gi }] } with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # execute driver stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual(GOODNESS_FUNCTION, stats['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['filter_function']) self.assertEqual(1, int(stats['total_volumes'])) self.assertTrue(stats['thin_provisioning_support']) self.assertTrue(stats['thick_provisioning_support']) self.assertEqual(1, int(stats['provisioned_capacity_gb'])) self.assertEqual(25, int(stats['reserved_percentage'])) cap_util = ( float(units.Gi * 500 - units.Gi * 250) / float(units.Gi * 500) ) * 100 self.assertEqual(cap_util, float(stats['capacity_utilization'])) expected = self.driver_startup_call_stack + [ mock.call.getCluster(1), mock.call.getVolumes(fields=['members[id]', 'members[clusterName]', 'members[size]'], cluster=self.cluster_name), mock.call.logout()] mock_client.assert_has_calls(expected) def test_create_consistencygroup(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) def test_delete_consistencygroup(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() mock_volume = mock.MagicMock() volumes = [mock_volume] with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) # delete the consistency group group.status = fields.ConsistencyGroupStatus.DELETING cg, vols = self.driver.delete_consistencygroup(ctxt, group, volumes) self.assertEqual(fields.ConsistencyGroupStatus.DELETING, cg['status']) def test_update_consistencygroup_add_vol_delete_cg(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() mock_volume = mock.MagicMock() volumes = [mock_volume] mock_client.getVolumes.return_value = {'total': 1, 'members': []} # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) # add volume to consistency group cg = self.driver.update_consistencygroup( ctxt, group, add_volumes=[self.volume], remove_volumes=None) # delete the consistency group group.status = fields.ConsistencyGroupStatus.DELETING cg, vols = self.driver.delete_consistencygroup(ctxt, group, volumes) self.assertEqual(fields.ConsistencyGroupStatus.DELETING, cg['status']) def test_update_consistencygroup_remove_vol_delete_cg(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() mock_volume = mock.MagicMock() volumes = [mock_volume] mock_client.getVolumes.return_value = {'total': 1, 'members': []} # mock return value of createVolume mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) # add volume to consistency group cg = self.driver.update_consistencygroup( ctxt, group, add_volumes=[self.volume], remove_volumes=None) # remove volume from consistency group cg = self.driver.update_consistencygroup( ctxt, group, add_volumes=None, remove_volumes=[self.volume]) # delete the consistency group group.status = fields.ConsistencyGroupStatus.DELETING cg, vols = self.driver.delete_consistencygroup(ctxt, group, volumes) self.assertEqual(fields.ConsistencyGroupStatus.DELETING, cg['status']) def test_create_cgsnapshot(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() mock_client.getVolumes.return_value = {'total': 1, 'members': []} mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_snap = mock.MagicMock() mock_snap.volumeName = self.volume_name expected_snaps = [mock_snap] with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) # create volume and add it to the consistency group self.driver.update_consistencygroup( ctxt, group, add_volumes=[self.volume], remove_volumes=None) # create the conistency group snapshot cgsnapshot = self.fake_cgsnapshot_object() cgsnap, snaps = self.driver.create_cgsnapshot( ctxt, cgsnapshot, expected_snaps) self.assertEqual('available', cgsnap['status']) def test_delete_cgsnapshot(self): ctxt = context.get_admin_context() # set up driver with default config mock_client = self.setup_driver() mock_client.getVolumes.return_value = {'total': 1, 'members': []} mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_snap = mock.MagicMock() mock_snap.volumeName = self.volume_name expected_snaps = [mock_snap] with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup: mock_do_setup.return_value = mock_client # create a consistency group group = self.fake_consistencygroup_object() cg = self.driver.create_consistencygroup(ctxt, group) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg['status']) # create volume and add it to the consistency group self.driver.update_consistencygroup( ctxt, group, add_volumes=[self.volume], remove_volumes=None) # delete the consistency group snapshot cgsnapshot = self.fake_cgsnapshot_object() cgsnapshot.status = 'deleting' cgsnap, snaps = self.driver.delete_cgsnapshot( ctxt, cgsnapshot, expected_snaps) self.assertEqual('deleting', cgsnap['status']) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated(self, _mock_get_volume_type): # set up driver with default config conf = self.default_mock_conf() conf.replication_device = self.repl_targets_unmgd mock_client = self.setup_driver(config=conf) mock_client.createVolume.return_value = { 'iscsiIqn': self.connector['initiator']} mock_client.doesRemoteSnapshotScheduleExist.return_value = False mock_replicated_client = self.setup_driver(config=conf) _mock_get_volume_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True'}} with mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup, \ mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_replication_client') as mock_replication_client: mock_do_setup.return_value = mock_client mock_replication_client.return_value = mock_replicated_client return_model = self.driver.create_volume(self.volume_replicated) expected = [ mock.call.createVolume( 'fakevolume_replicated', 1, units.Gi, {'isThinProvisioned': True, 'clusterName': 'CloudCluster1'}), mock.call.doesRemoteSnapshotScheduleExist( 'fakevolume_replicated_SCHED_Pri'), mock.call.createRemoteSnapshotSchedule( 'fakevolume_replicated', 'fakevolume_replicated_SCHED', 1800, '1970-01-01T00:00:00Z', 5, 'CloudCluster1', 5, 'fakevolume_replicated', '1.1.1.1', 'foo1', 'bar2'), mock.call.logout()] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' rep_data = json.dumps({"location": HPELEFTHAND_API_URL}) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': rep_data, 'provider_location': prov_location}, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated(self, _mock_get_volume_type): # set up driver with default config conf = self.default_mock_conf() conf.replication_device = self.repl_targets mock_client = self.setup_driver(config=conf) mock_client.getVolumeByName.return_value = {'id': self.volume_id} mock_client.getVolumes.return_value = {'total': 1, 'members': []} mock_replicated_client = self.setup_driver(config=conf) _mock_get_volume_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True'}} with mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup, \ mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_replication_client') as mock_replication_client: mock_do_setup.return_value = mock_client mock_replication_client.return_value = mock_replicated_client self.driver.delete_volume(self.volume_replicated) expected = [ mock.call.deleteRemoteSnapshotSchedule( 'fakevolume_replicated_SCHED'), mock.call.getVolumeByName('fakevolume_replicated'), mock.call.deleteVolume(1)] mock_client.assert_has_calls( self.driver_startup_call_stack + expected) @mock.patch.object(volume_types, 'get_volume_type') def test_failover_host(self, _mock_get_volume_type): ctxt = context.get_admin_context() # set up driver with default config conf = self.default_mock_conf() conf.replication_device = self.repl_targets mock_client = self.setup_driver(config=conf) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getVolumeByName.return_value = { 'iscsiIqn': self.connector['initiator']} _mock_get_volume_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True'}} with mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup, \ mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_replication_client') as mock_replication_client: mock_do_setup.return_value = mock_client mock_replication_client.return_value = mock_replicated_client invalid_backend_id = 'INVALID' # Test invalid secondary target. self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, ctxt, [self.volume_replicated], invalid_backend_id) # Test a successful failover. return_model = self.driver.failover_host( context.get_admin_context(), [self.volume_replicated], REPLICATION_BACKEND_ID) prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' expected_model = (REPLICATION_BACKEND_ID, [{'updates': {'replication_status': 'failed-over', 'provider_location': prov_location}, 'volume_id': 1}]) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_host_ready(self, _mock_get_volume_type): # set up driver with default config conf = self.default_mock_conf() conf.replication_device = self.repl_targets_unmgd mock_client = self.setup_driver(config=conf) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getVolumeByName.return_value = { 'iscsiIqn': self.connector['initiator'], 'isPrimary': True} mock_replicated_client.getRemoteSnapshotSchedule.return_value = ( ['', 'HP StoreVirtual LeftHand OS Command Line Interface', '(C) Copyright 2007-2016', '', 'RESPONSE', ' result 0', ' period 1800', ' paused false']) _mock_get_volume_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True'}} with mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup, \ mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_replication_client') as mock_replication_client: mock_do_setup.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = self.volume_replicated.copy() rep_data = json.dumps({"primary_config_group": "failover_group"}) volume['replication_driver_data'] = rep_data return_model = self.driver.failover_host( context.get_admin_context(), [volume], 'default') prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' expected_model = (None, [{'updates': {'replication_status': 'available', 'provider_location': prov_location}, 'volume_id': 1}]) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_host_not_ready(self, _mock_get_volume_type): # set up driver with default config conf = self.default_mock_conf() conf.replication_device = self.repl_targets_unmgd mock_client = self.setup_driver(config=conf) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getVolumeByName.return_value = { 'iscsiIqn': self.connector['initiator'], 'isPrimary': False} mock_replicated_client.getRemoteSnapshotSchedule.return_value = ( ['', 'HP StoreVirtual LeftHand OS Command Line Interface', '(C) Copyright 2007-2016', '', 'RESPONSE', ' result 0', ' period 1800', ' paused true']) _mock_get_volume_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True'}} with mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_client') as mock_do_setup, \ mock.patch.object( hpe_lefthand_iscsi.HPELeftHandISCSIDriver, '_create_replication_client') as mock_replication_client: mock_do_setup.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = self.volume_replicated.copy() self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), [volume], 'default') def test__create_replication_client(self): # set up driver with default config self.setup_driver() # Ensure creating a replication client works without specifiying # ssh_conn_timeout or san_private_key. remote_array = { 'hpelefthand_api_url': 'https://1.1.1.1:8080/lhos', 'hpelefthand_username': 'user', 'hpelefthand_password': 'password', 'hpelefthand_ssh_port': '16022'} cl = self.driver._create_replication_client(remote_array) cl.setSSHOptions.assert_called_with( '1.1.1.1', 'user', 'password', conn_timeout=30, known_hosts_file=mock.ANY, missing_key_policy='AutoAddPolicy', port='16022', privatekey='') # Verify we can create a replication client with custom values for # ssh_conn_timeout and san_private_key. cl.reset_mock() remote_array['ssh_conn_timeout'] = 45 remote_array['san_private_key'] = 'foobarkey' cl = self.driver._create_replication_client(remote_array) cl.setSSHOptions.assert_called_with( '1.1.1.1', 'user', 'password', conn_timeout=45, known_hosts_file=mock.ANY, missing_key_policy='AutoAddPolicy', port='16022', privatekey='foobarkey') cinder-8.0.0/cinder/tests/unit/__init__.py0000664000567000056710000000316512701406250021624 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.tests.unit` -- Cinder Unittests ===================================================== .. automodule:: cinder.tests.unit :platform: Unix """ import eventlet from six.moves import builtins from cinder import objects eventlet.monkey_patch() # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables nosetests to work with i18n _() blocks setattr(builtins, '_', lambda x: x) # NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise # the threading.local() store used in oslo_messaging will be initialized to # threadlocal storage rather than greenthread local. This will cause context # sets and deletes in that storage to clobber each other. # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() cinder-8.0.0/cinder/tests/unit/test_emc_vnx.py0000664000567000056710000107566212701406257022606 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import json import os import re import mock from oslo_concurrency import processutils import six from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils from cinder.volume import configuration as conf from cinder.volume.drivers.emc import emc_cli_fc from cinder.volume.drivers.emc import emc_cli_iscsi from cinder.volume.drivers.emc import emc_vnx_cli from cinder.zonemanager import fc_san_lookup_service as fc_service from mock import patch SUCCEED = ("", 0) FAKE_ERROR_RETURN = ("FAKE ERROR", 255) VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION build_replication_data = ( emc_vnx_cli.EMCVnxCliBase._build_replication_driver_data) REPLICATION_KEYS = emc_vnx_cli.EMCVnxCliBase.REPLICATION_KEYS def build_provider_location(lun_id, lun_type, base_lun_name=None, system=None): pl_dict = {'system': 'FNM11111' if system is None else system, 'type': six.text_type(lun_type), 'id': six.text_type(lun_id), 'base_lun_name': six.text_type(base_lun_name), 'version': VERSION} return '|'.join([k + '^' + pl_dict[k] for k in pl_dict]) def build_migration_dest_name(src_name): return src_name + '_dest' class EMCVNXCLIDriverTestData(object): base_lun_name = 'volume-1' replication_metadata = {'host': 'host@backendsec#unit_test_pool', 'system': 'fake_serial'} test_volume = { 'status': 'creating', 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': '1', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'provider_location': build_provider_location(1, 'lun', base_lun_name), 'display_name': 'volume-1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None } test_legacy_volume = { 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': '1', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'provider_location': 'system^FNM11111|type^lun|id^1', 'display_name': 'volume-1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None } test_volume_clone_cg = { 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': '1', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None, 'provider_location': build_provider_location(1, 'lun', base_lun_name), } test_volume_cg = { 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': '1', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': 'cg_id', 'provider_location': build_provider_location(1, 'lun', base_lun_name), } test_volume_rw = { 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': '1', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None, 'provider_location': build_provider_location(1, 'lun', base_lun_name), } test_volume2 = { 'name': 'volume-2', 'size': 1, 'volume_name': 'volume-2', 'id': '2', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-2', 'consistencygroup_id': None, 'display_description': 'test volume', 'volume_type_id': None, 'provider_location': build_provider_location(1, 'lun', 'volume-2')} volume_in_cg = { 'name': 'volume-2', 'size': 1, 'volume_name': 'volume-2', 'id': '2', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-1_in_cg', 'consistencygroup_id': 'consistencygroup_id', 'display_description': 'test volume', 'provider_location': build_provider_location(1, 'lun', 'volume-2'), 'volume_type_id': None} volume2_in_cg = { 'name': 'volume-3', 'size': 1, 'volume_name': 'volume-3', 'id': '3', 'provider_auth': None, 'project_id': 'project', 'display_name': 'volume-3_in_cg', 'provider_location': build_provider_location(3, 'lun', 'volume-3'), 'consistencygroup_id': 'consistencygroup_id', 'display_description': 'test volume', 'volume_type_id': None} test_volume_with_type = { 'name': 'volume-1', 'size': 1, 'volume_name': 'volume-1', 'id': 1, 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'thin_vol', 'consistencygroup_id': None, 'display_description': 'vol with type', 'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231', 'provider_location': build_provider_location(1, 'smp', 'volume-1'), 'volume_metadata': [{'key': 'snapcopy', 'value': 'True'}]} test_failed_volume = { 'name': 'volume-4', 'size': 1, 'volume_name': 'volume-4', 'id': '4', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'failed_vol', 'consistencygroup_id': None, 'display_description': 'test failed volume', 'volume_type_id': None} test_volume1_in_sg = { 'name': 'volume-4', 'size': 1, 'volume_name': 'volume-4', 'id': '4', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'failed_vol', 'display_description': 'Volume 1 in SG', 'volume_type_id': None, 'provider_location': build_provider_location(4, 'lun', 'volume-4', 'fakesn')} test_volume2_in_sg = { 'name': 'volume-5', 'size': 1, 'volume_name': 'volume-5', 'id': '5', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'failed_vol', 'display_description': 'Volume 2 in SG', 'volume_type_id': None, 'provider_location': build_provider_location(3, 'lun', 'volume-5', 'fakesn')} test_snapshot = { 'name': 'snapshot-4444', 'size': 1, 'id': '4444', 'volume_name': test_volume['name'], 'volume': test_volume, 'volume_size': 1, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'project_id': 'project'} test_snapshot1 = { 'name': 'snapshot-5555', 'size': 1, 'id': '5555', 'volume_name': test_volume['name'], 'volume': test_volume, 'volume_size': 1, 'project_id': 'project'} test_clone = { 'name': 'volume-2', 'size': 1, 'id': '2', 'volume_name': 'volume-2', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-2', 'consistencygroup_id': None, 'display_description': 'volume created from snapshot', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'provider_location': None, 'volume_metadata': [{'key': 'snapcopy', 'value': 'True'}]} test_clone_cg = { 'name': 'volume-2', 'size': 1, 'id': '2', 'volume_name': 'volume-2', 'provider_auth': None, 'host': "host@backendsec#unit_test_pool", 'project_id': 'project', 'display_name': 'volume-2', 'consistencygroup_id': 'consistencygroup_id', 'display_description': 'volume created from snapshot', 'volume_type_id': None, 'provider_location': build_provider_location(2, 'lun', 'volume-2', 'fakesn')} test_volume3 = { 'migration_status': None, 'availability_zone': 'nova', 'id': '3', 'name': 'volume-3', 'size': 2, 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'host': "host@backendsec#unit_test_pool", 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test02', 'attach_status': 'detached', 'volume_type': [], 'volume_attachment': [], 'provider_location': build_provider_location(1, 'lun', 'volume-3'), '_name_id': None, 'metadata': {}} test_volume4 = {'migration_status': None, 'availability_zone': 'nova', 'id': '4', 'name': 'volume-4', 'size': 2, 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'provider_location': build_provider_location(4, 'lun', 'volume-4'), 'host': 'ubuntu-server12@array_backend_1', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test02', 'volume_attachment': [], 'attach_status': 'detached', 'volume_type': [], '_name_id': None, 'metadata': {}} test_volume5 = {'migration_status': None, 'availability_zone': 'nova', 'id': '5', 'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25', 'name': 'volume-5', 'size': 1, 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'provider_location': build_provider_location(5, 'lun', 'volume-5'), 'host': 'ubuntu-server12@array_backend_1#unit_test_pool', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test05', 'volume_attachment': [], 'attach_status': 'detached', 'volume_type': [], '_name_id': None, 'metadata': {}} test_volume_replication = { 'migration_status': None, 'availability_zone': 'nova', 'id': '5', 'name_id': None, 'name': 'volume-5', 'size': 1, 'status': 'available', 'volume_type_id': 'rep_type_id', 'deleted': False, 'provider_location': build_provider_location(5, 'lun', 'volume-5'), 'host': 'ubuntu-server12@array_backend_1#unit_test_pool', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test05', 'volume_attachment': [], 'attach_status': 'detached', 'volume_type': [], 'replication_driver_data': '', 'replication_status': 'enabled', '_name_id': None, 'metadata': replication_metadata} test_replication_failover = { 'migration_status': None, 'availability_zone': 'nova', 'id': '5', 'name_id': None, 'name': 'volume-5', 'size': 1, 'status': 'available', 'volume_type_id': 'rep_type_id', 'deleted': False, 'provider_location': build_provider_location(5, 'lun', 'volume-5'), 'host': 'ubuntu-server12@array_backend_1#unit_test_pool', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test05', 'volume_attachment': [], 'attach_status': 'detached', 'volume_type': [], 'replication_driver_data': '', 'replication_status': 'failed-over', '_name_id': None, 'metadata': replication_metadata} test_new_type = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'thin'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} test_replication_type = {'name': 'rep_type', 'extra_specs': {'replication_enbled': ' True'}, 'id': 'rep_type_id'} test_diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provisioning': ('thick', 'thin')}} test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1', 'capabilities': {'pool_name': 'POOL_SAS1', 'location_info': 'POOL_SAS1|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["1234567890123456", "1234567890543216"], 'wwnns': ["2234567890123456", "2234567890543216"], 'host': 'fakehost'} test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:pool': 'POOL_SAS2'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} test_diff2 = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}} test_host2 = {'host': 'ubuntu-server12@array_backend_1', 'capabilities': {'location_info': '|FNM00124500890', 'volume_backend_name': 'array_backend_1', 'storage_protocol': 'iSCSI'}} test_cg = {'id': 'consistencygroup_id', 'name': 'group_name', 'status': fields.ConsistencyGroupStatus.DELETING} test_cg_with_type = {'id': 'consistencygroup_id', 'name': 'group_name', 'status': fields.ConsistencyGroupStatus.CREATING, 'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231,' '19fdd0dd-03b3-4d7c-b541-f4df46f308c8,'} test_cgsnapshot = { 'consistencygroup_id': 'consistencygroup_id', 'id': 'cgsnapshot_id', 'status': 'available'} test_member_cgsnapshot = { 'name': 'snapshot-1111', 'size': 1, 'id': '1111', 'volume': test_volume, 'volume_name': 'volume-1', 'volume_size': 1, 'consistencygroup_id': 'consistencygroup_id', 'cgsnapshot_id': 'cgsnapshot_id', 'project_id': 'project' } test_member_cgsnapshot2 = { 'name': 'snapshot-2222', 'size': 1, 'id': '2222', 'volume': test_volume2, 'volume_name': 'volume-2', 'volume_size': 1, 'consistencygroup_id': 'consistencygroup_id', 'cgsnapshot_id': 'cgsnapshot_id', 'project_id': 'project' } test_lun_id = 1 test_existing_ref = {'source-id': test_lun_id} test_existing_ref_source_name = {'source-name': 'volume-1'} test_pool_name = 'unit_test_pool' device_map = { '1122334455667788': { 'initiator_port_wwn_list': ['123456789012345', '123456789054321'], 'target_port_wwn_list': ['1122334455667777']}} i_t_map = {'123456789012345': ['1122334455667777'], '123456789054321': ['1122334455667777']} POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool', '-userCap', '-availableCap', '-state', '-prcntFullThreshold') POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name', 'unit_test_pool', '-availableCap', '-userCap', '-state', '-subscribedCap', '-prcntFullThreshold', '-fastcache') def POOL_GET_ALL_CMD(self, withfastcache=False): if withfastcache: return ('storagepool', '-list', '-availableCap', '-userCap', '-state', '-subscribedCap', '-prcntFullThreshold', '-fastcache') else: return ('storagepool', '-list', '-availableCap', '-userCap', '-state', '-subscribedCap', '-prcntFullThreshold') def POOL_GET_ALL_RESULT(self, withfastcache=False): if withfastcache: return ("Pool Name: unit_test_pool\n" "Pool ID: 0\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 6881061888\n" "User Capacity (GBs): 3281.146\n" "Available Capacity (Blocks): 6512292864\n" "Available Capacity (GBs): 3105.303\n" "Total Subscribed Capacity (GBs): 536.140\n" "FAST Cache: Enabled\n" "State: Ready\n" "\n" "Pool Name: unit_test_pool2\n" "Pool ID: 1\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 8598306816\n" "User Capacity (GBs): 4099.992\n" "Available Capacity (Blocks): 8356663296\n" "Available Capacity (GBs): 3984.768\n" "Total Subscribed Capacity (GBs): 636.240\n" "FAST Cache: Disabled\n" "State: Ready\n", 0) else: return ("Pool Name: unit_test_pool\n" "Pool ID: 0\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 6881061888\n" "User Capacity (GBs): 3281.146\n" "Available Capacity (Blocks): 6512292864\n" "Available Capacity (GBs): 3105.303\n" "Total Subscribed Capacity (GBs): 536.140\n" "State: Ready\n" "\n" "Pool Name: unit_test_pool2\n" "Pool ID: 1\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 8598306816\n" "User Capacity (GBs): 4099.992\n" "Available Capacity (Blocks): 8356663296\n" "Available Capacity (GBs): 3984.768\n" "Total Subscribed Capacity (GBs): 636.240\n" "State: Ready\n", 0) def POOL_GET_STATE_RESULT(self, pools): output = [] for i, po in enumerate(pools): if i != 0: output.append("\n") output.append("Pool Name: %s" % po['pool_name']) output.append("Pool ID: %s" % i) output.append("State: %s" % po['state']) return ("\n".join(output), 0) def POOL_GET_ALL_STATES_TEST(self, states=['Ready']): output = "" for i, stat in enumerate(states): out = ("Pool Name: Pool_" + str(i) + "\n" "Pool ID: " + str(i) + "\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 8598306816\n" "User Capacity (GBs): 4099.992\n" "Available Capacity (Blocks): 8356663296\n" "Available Capacity (GBs): 3984.768\n" "FAST Cache: Enabled\n" "State: " + stat + "\n\n") output += out return (output, 0) def SNAP_NOT_EXIST(self): return ("Could not retrieve the specified (Snapshot).\n " "The (Snapshot) may not exist", 9) NDU_LIST_CMD = ('ndu', '-list') NDU_LIST_RESULT = ("Name of the software package: -Compression " + "Name of the software package: -Deduplication " + "Name of the software package: -FAST " + "Name of the software package: -FASTCache " + "Name of the software package: -ThinProvisioning " "Name of the software package: -VNXSnapshots " "Name of the software package: -MirrorView/S", 0) NDU_LIST_RESULT_WO_LICENSE = ( "Name of the software package: -Unisphere ", 0) MIGRATE_PROPERTY_MIGRATING = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATING Percent Complete: 50 Time Remaining: 0 second(s) """ MIGRATE_PROPERTY_STOPPED = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: STOPPED - Destination full Percent Complete: 60 Time Remaining: 0 second(s) """ LIST_LUN_1_SPECS = """ LOGICAL UNIT NUMBER 1 Name: os-044e89e9-3aeb-46eb-a1b0-946f0a13545c Pool Name: unit_test_pool Is Thin LUN: No Is Compressed: No Deduplication State: Off Deduplication Status: OK(0x0) Tiering Policy: Auto Tier Initial Tier: Highest Available """ LIST_LUN_1_ALL = """ LOGICAL UNIT NUMBER 1 Name: os-044e89e9-3aeb-46eb-a1b0-946f0a13545c Current Owner: SP A User Capacity (Blocks): 46137344 User Capacity (GBs): 1.000 Pool Name: unit_test_pool Current State: Ready Status: OK(0x0) Is Faulted: false Is Transitioning: false Current Operation: None Current Operation State: N/A Current Operation Status: N/A Current Operation Percent Completed: 0 Is Thin LUN: No Is Compressed: No Deduplication State: Off Deduplication Status: OK(0x0) Tiering Policy: Auto Tier Initial Tier: Highest Available Attached Snapshot: N/A """ def SNAP_MP_CREATE_CMD(self, name='volume-1', source='volume-1'): return ('lun', '-create', '-type', 'snap', '-primaryLunName', source, '-name', name) def SNAP_ATTACH_CMD(self, name='volume-1', snapName='snapshot-4444'): return ('lun', '-attach', '-name', name, '-snapName', snapName) def SNAP_DELETE_CMD(self, name): return ('snap', '-destroy', '-id', name, '-o') def SNAP_CREATE_CMD(self, name): return ('snap', '-create', '-res', 1, '-name', name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') def SNAP_MODIFY_CMD(self, name, rw): return ('snap', '-modify', '-id', name, '-allowReadWrite', rw, '-allowAutoDelete', 'yes') def SNAP_LIST_CMD(self, res_id=1): cmd = ('snap', '-list', '-res', int(res_id)) return cmd def LUN_DELETE_CMD(self, name): return ('lun', '-destroy', '-name', name, '-forceDetach', '-o') def LUN_EXTEND_CMD(self, name, newsize): return ('lun', '-expand', '-name', name, '-capacity', newsize, '-sq', 'gb', '-o', '-ignoreThresholds') def LUN_PROPERTY_POOL_CMD(self, lunname): return ('lun', '-list', '-name', lunname, '-poolName') def LUN_PROPERTY_ALL_CMD(self, lunname): return ('lun', '-list', '-name', lunname, '-state', '-status', '-opDetails', '-userCap', '-owner', '-attachedSnapshot') @staticmethod def LUN_RENAME_CMD(lun_id, lun_name): return ('lun', '-modify', '-l', int(lun_id), '-newName', lun_name, '-o') @staticmethod def LUN_LIST_ALL_CMD(lun_id): return ('lun', '-list', '-l', int(lun_id), '-attachedSnapshot', '-userCap', '-dedupState', '-initialTier', '-isCompressed', '-isThinLUN', '-opDetails', '-owner', '-poolName', '-state', '-status', '-tieringPolicy') @staticmethod def LUN_LIST_SPECS_CMD(lun_id): return ('lun', '-list', '-l', int(lun_id), '-poolName', '-isThinLUN', '-isCompressed', '-dedupState', '-initialTier', '-tieringPolicy') @staticmethod def LUN_MODIFY_TIER(lun_id, tier=None, policy=None): if tier is None: tier = 'highestAvailable' if policy is None: policy = 'highestAvailable' return ('lun', '-modify', '-l', lun_id, '-o', '-initialTier', tier, '-tieringPolicy', policy) def MIGRATION_CMD(self, src_id=1, dest_id=1, rate='high'): cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id, "-rate", rate, "-o") return cmd def MIGRATION_VERIFY_CMD(self, src_id): return ("migrate", "-list", "-source", src_id) def MIGRATION_CANCEL_CMD(self, src_id): return ("migrate", "-cancel", "-source", src_id, '-o') def GETPORT_CMD(self): return ("connection", "-getport", "-address", "-vlanid") def PINGNODE_CMD(self, sp, portid, vportid, ip): return ("connection", "-pingnode", "-sp", sp, '-portid', portid, "-vportid", vportid, "-address", ip, '-count', '1') def GETFCPORT_CMD(self): return ('port', '-list', '-sp') def CONNECTHOST_CMD(self, hostname, gname): return ('storagegroup', '-connecthost', '-host', hostname, '-gname', gname, '-o') def ENABLE_COMPRESSION_CMD(self, lun_id): return ('compression', '-on', '-l', lun_id, '-ignoreThresholds', '-o') def STORAGEGROUP_LIST_CMD(self, gname=None): if gname: return ('storagegroup', '-list', '-gname', gname, '-host', '-iscsiAttributes') else: return ('storagegroup', '-list') def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu): return ('storagegroup', '-removehlu', '-hlu', hlu, '-gname', gname, '-o') def SNAP_COPY_CMD(self, src_snap, snap_name): return ('snap', '-copy', '-id', src_snap, '-name', snap_name, '-ignoreMigrationCheck', '-ignoreDeduplicationCheck') def ALLOW_READWRITE_ON_SNAP_CMD(self, snap_name): return ('snap', '-modify', '-id', snap_name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'yes') def MODIFY_TIERING_CMD(self, lun_name, tiering): cmd = ['lun', '-modify', '-name', lun_name, '-o'] cmd.extend(self.tiering_values[tiering]) return tuple(cmd) provisioning_values = { 'thin': ['-type', 'Thin'], 'thick': ['-type', 'NonThin'], 'compressed': ['-type', 'Thin'], 'deduplicated': ['-type', 'Thin', '-deduplication', 'on']} tiering_values = { 'starthighthenauto': [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'autoTier'], 'auto': [ '-initialTier', 'optimizePool', '-tieringPolicy', 'autoTier'], 'highestavailable': [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'highestAvailable'], 'lowestavailable': [ '-initialTier', 'lowestAvailable', '-tieringPolicy', 'lowestAvailable'], 'nomovement': [ '-initialTier', 'optimizePool', '-tieringPolicy', 'noMovement']} def LUN_CREATION_CMD(self, name, size, pool, provisioning=None, tiering=None, ignore_thresholds=False, poll=True): initial = ['lun', '-create', '-capacity', size, '-sq', 'gb', '-poolName', pool, '-name', name] if not poll: initial = ['-np'] + initial if provisioning: initial.extend(self.provisioning_values[provisioning]) else: initial.extend(self.provisioning_values['thick']) if tiering: initial.extend(self.tiering_values[tiering]) if ignore_thresholds: initial.append('-ignoreThresholds') return tuple(initial) def CHECK_FASTCACHE_CMD(self, storage_pool): return ('storagepool', '-list', '-name', storage_pool, '-fastcache') def CREATE_CONSISTENCYGROUP_CMD(self, cg_name, members=None): create_cmd = ('snap', '-group', '-create', '-name', cg_name, '-allowSnapAutoDelete', 'no') if not members: return create_cmd else: return create_cmd + ('-res', ','.join(map(six.text_type, members))) def DELETE_CONSISTENCYGROUP_CMD(self, cg_name): return ('-np', 'snap', '-group', '-destroy', '-id', cg_name) def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id): return ('snap', '-group', '-addmember', '-id', cg_name, '-res', lun_id) def CREATE_CG_SNAPSHOT(self, cg_name, snap_name): return ('-np', 'snap', '-create', '-res', cg_name, '-resType', 'CG', '-name', snap_name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') def DELETE_CG_SNAPSHOT(self, snap_name): return ('-np', 'snap', '-destroy', '-id', snap_name, '-o') def GET_CG_BY_NAME_CMD(self, cg_name): return ('snap', '-group', '-list', '-id', cg_name) def GET_SNAP(self, snap_name): return ('snap', '-list', '-id', snap_name) def REMOVE_LUNS_FROM_CG_CMD(self, cg_name, remove_ids): return ('snap', '-group', '-rmmember', '-id', cg_name, '-res', ','.join(remove_ids)) def REPLACE_LUNS_IN_CG_CMD(self, cg_name, new_ids): return ('snap', '-group', '-replmember', '-id', cg_name, '-res', ','.join(new_ids)) # Replication related commands def MIRROR_CREATE_CMD(self, mirror_name, lun_id): return ('mirror', '-sync', '-create', '-name', mirror_name, '-lun', lun_id, '-usewriteintentlog', '-o') def MIRROR_DESTROY_CMD(self, mirror_name): return ('mirror', '-sync', '-destroy', '-name', mirror_name, '-force', '-o') def MIRROR_ADD_IMAGE_CMD(self, mirror_name, sp_ip, lun_id): return ('mirror', '-sync', '-addimage', '-name', mirror_name, '-arrayhost', sp_ip, '-lun', lun_id, '-recoverypolicy', 'auto', '-syncrate', 'high') def MIRROR_REMOVE_IMAGE_CMD(self, mirror_name, image_uid): return ('mirror', '-sync', '-removeimage', '-name', mirror_name, '-imageuid', image_uid, '-o') def MIRROR_FRACTURE_IMAGE_CMD(self, mirror_name, image_uid): return ('mirror', '-sync', '-fractureimage', '-name', mirror_name, '-imageuid', image_uid, '-o') def MIRROR_SYNC_IMAGE_CMD(self, mirror_name, image_uid): return ('mirror', '-sync', '-syncimage', '-name', mirror_name, '-imageuid', image_uid, '-o') def MIRROR_PROMOTE_IMAGE_CMD(self, mirror_name, image_uid): return ('mirror', '-sync', '-promoteimage', '-name', mirror_name, '-imageuid', image_uid, '-o') def MIRROR_LIST_CMD(self, mirror_name): return ('mirror', '-sync', '-list', '-name', mirror_name) # Mirror related output def MIRROR_LIST_RESULT(self, mirror_name, mirror_state='Synchronized'): return ("""MirrorView Name: %(name)s MirrorView Description: MirrorView UID: 50:06:01:60:B6:E0:1C:F4:0E:00:00:00:00:00:00:00 Logical Unit Numbers: 37 Remote Mirror Status: Mirrored MirrorView State: Active MirrorView Faulted: NO MirrorView Transitioning: NO Quiesce Threshold: 60 Minimum number of images required: 0 Image Size: 2097152 Image Count: 2 Write Intent Log Used: YES Images: Image UID: 50:06:01:60:B6:E0:1C:F4 Is Image Primary: YES Logical Unit UID: 60:06:01:60:13:00:3E:00:14:FA:3C:8B:A5:98:E5:11 Image Condition: Primary Image Preferred SP: A Image UID: 50:06:01:60:88:60:05:FE Is Image Primary: NO Logical Unit UID: 60:06:01:60:41:C4:3D:00:B2:D5:33:DB:C7:98:E5:11 Image State: %(state)s Image Condition: Normal Recovery Policy: Automatic Preferred SP: A Synchronization Rate: High Image Faulted: NO Image Transitioning: NO Synchronizing Progress(%%): 100 """ % {'name': mirror_name, 'state': mirror_state}, 0) def MIRROR_LIST_ERROR_RESULT(self, mirror_name): return ("Getting mirror list failed. Mirror not found", 145) def MIRROR_CREATE_ERROR_RESULT(self, mirror_name): return ( "Error: mirrorview command failed\n" "Mirror name already in use", 67) def MIRROR_DESTROY_ERROR_RESULT(self, mirror_name): return ("Destroying mirror failed. Mirror not found", 145) def MIRROR_ADD_IMAGE_ERROR_RESULT(self): return ( "Adding sync mirror image failed. Invalid LUN number\n" "LUN does not exist or Specified LU not available " "for mirroring.", 169) def MIRROR_PROMOTE_IMAGE_ERROR_RESULT(self): return ( "Error: mirrorview command failed\n" "UID of the secondary image to be promoted is not local to " "this array.Mirrorview can't promote a secondary image not " "local to this array. Make sure you are sending the promote " "command to the correct array where the secondary image is " "located. (0x7105824e)", 78) # Test Objects def CONSISTENCY_GROUP_VOLUMES(self): volumes = [] volumes.append(self.test_volume) volumes.append(self.test_volume) return volumes def SNAPS_IN_SNAP_GROUP(self): snaps = [] snaps.append(self.test_snapshot) snaps.append(self.test_snapshot) return snaps def VOLUMES_NOT_IN_CG(self): add_volumes = [] add_volumes.append(self.test_volume4) add_volumes.append(self.test_volume5) return add_volumes def VOLUMES_IN_CG(self): remove_volumes = [] remove_volumes.append(self.volume_in_cg) remove_volumes.append(self.volume2_in_cg) return remove_volumes def CG_PROPERTY(self, cg_name): return """ Name: %(cg_name)s Description: Allow auto delete: No Member LUN ID(s): 1, 3 State: Ready """ % {'cg_name': cg_name}, 0 def CG_NOT_FOUND(self): return ("Cannot find the consistency group. \n\n", 13) def CG_REPL_ERROR(self): return """ The specified LUN is already a member of another consistency group. (0x716d8045) """, 71 def LUN_PREP_ERROR(self): return ("The operation cannot be performed because " "the LUN is 'Preparing'. Wait for the LUN's " "Current Operation to complete 'Preparing' " "and retry the operation. (0x712d8e0e)", 14) POOL_PROPERTY = ( "Pool Name: unit_test_pool\n" "Pool ID: 1\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 6881061888\n" "User Capacity (GBs): 3281.146\n" "Available Capacity (Blocks): 6832207872\n" "Available Capacity (GBs): 3257.851\n" "State: Ready\n" "\n", 0) POOL_PROPERTY_W_FASTCACHE = ( "Pool Name: unit_test_pool\n" "Pool ID: 1\n" "Percent Full Threshold: 70\n" "User Capacity (Blocks): 6881061888\n" "User Capacity (GBs): 3281.146\n" "Available Capacity (Blocks): 6832207872\n" "Available Capacity (GBs): 3257.851\n" "Total Subscribed Capacity (GBs): 636.240\n" "FAST Cache: Enabled\n" "State: Ready\n\n", 0) ALL_PORTS = ("SP: A\n" + "Port ID: 4\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" + "iSCSI Alias: 0215.a4\n\n" + "Virtual Port ID: 0\n" + "VLAN ID: Disabled\n" + "IP Address: 10.244.214.118\n\n" + "SP: A\n" + "Port ID: 5\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" + "iSCSI Alias: 0215.a5\n" + "SP: A\n" + "Port ID: 0\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a0\n" + "iSCSI Alias: 0215.a0\n\n" + "Virtual Port ID: 0\n" + "VLAN ID: Disabled\n" + "IP Address: 10.244.214.119\n\n" + "SP: B\n" + "Port ID: 2\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.b2\n" + "iSCSI Alias: 0215.b2\n\n" + "Virtual Port ID: 0\n" + "VLAN ID: Disabled\n" + "IP Address: 10.244.214.120\n\n", 0) WHITE_LIST_PORTS = ("""SP: A Port ID: 0 Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a0 iSCSI Alias: 0235.a7 Virtual Port ID: 0 VLAN ID: Disabled IP Address: 192.168.3.52 SP: A Port ID: 9 Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a9 iSCSI Alias: 0235.a9 SP: A Port ID: 4 Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a4 iSCSI Alias: 0235.a4 SP: B Port ID: 2 Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.b2 iSCSI Alias: 0235.b6 Virtual Port ID: 0 VLAN ID: Disabled IP Address: 192.168.4.53 """, 0) iscsi_connection_info = { 'data': {'target_discovered': True, 'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'target_lun': 2, 'target_portal': '10.244.214.118:3260', 'target_iqns': ['iqn.1992-04.com.emc:cx.fnm00124000215.a4'], 'target_luns': [2], 'target_portals': ['10.244.214.118:3260'], 'volume_id': '1'}, 'driver_volume_type': 'iscsi'} iscsi_connection_info_mp = { 'data': {'target_discovered': True, 'target_iqns': [ 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'iqn.1992-04.com.emc:cx.fnm00124000215.a5'], 'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'target_luns': [2, 2], 'target_lun': 2, 'target_portals': [ '10.244.214.118:3260', '10.244.214.119:3260'], 'target_portal': '10.244.214.118:3260', 'volume_id': '1'}, 'driver_volume_type': 'iscsi'} PING_OK = ("Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n", 0) FC_PORTS = ("Information about each SPPORT:\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 0\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:60:08:60:01:95\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:00:05:1E:72:EC:A6:" + "20:46:00:05:1E:72:EC:A6\n" + "SP Source ID: 272896\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 4\n" + "SP UID: iqn.1992-04.com.emc:cx." + "fnm00124000215.b4\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: Not Applicable\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:62:08:60:01:95\n" + "Link Status: Down\n" + "Port Status: Online\n" + "Switch Present: NO\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:08:0F:" "50:06:01:6A:08:60:08:0F\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:50:EB:1A:03:3F:59:" "20:11:50:EB:1A:03:3F:59\n" + "SP Source ID: 69888\n", 0) FAKEHOST_PORTS = ( "Information about each HBA:\n" + "\n" + "HBA UID: 20:00:00:90:FA:53:46:41:12:34:" + "56:78:90:12:34:56\n" + "Server Name: fakehost\n" + "Server IP Address: 10.0.0.2" + "HBA Model Description:\n" + "HBA Vendor Description:\n" + "HBA Device Driver Name:\n" + "Information about each port of this HBA:\n\n" + " SP Name: SP A\n" + " SP Port ID: 0\n" + " HBA Devicename:\n" + " Trusted: NO\n" + " Logged In: YES\n" + " Defined: YES\n" + " Initiator Type: 3\n" + " StorageGroup Name: fakehost\n\n" + " SP Name: SP A\n" + " SP Port ID: 2\n" + " HBA Devicename:\n" + " Trusted: NO\n" + " Logged In: YES\n" + " Defined: YES\n" + " Initiator Type: 3\n" + " StorageGroup Name: fakehost\n\n" + " SP Name: SP B\n" + " SP Port ID: 2\n" + " HBA Devicename:\n" + " Trusted: NO\n" + " Logged In: YES\n" + " Defined: YES\n" + " Initiator Type: 3\n" + " StorageGroup Name: fakehost\n\n" "Information about each SPPORT:\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 0\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:60:08:60:01:95\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:00:05:1E:72:EC:A6:" + "20:46:00:05:1E:72:EC:A6\n" + "SP Source ID: 272896\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 4\n" + "SP UID: iqn.1992-04.com.emc:cx." + "fnm00124000215.b4\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: Not Applicable\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:62:08:60:01:95\n" + "Link Status: Down\n" + "Port Status: Online\n" + "Switch Present: NO\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:6A:08:60:08:0F\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:00:05:1E:72:EC:A6:" + "20:46:00:05:1E:72:EC:A6\n" + "SP Source ID: 272896\n", 0) def LUN_PROPERTY(self, name, is_thin=False, has_snap=False, size=1, state='Ready', faulted='false', operation='None', lunid=1, pool_name='unit_test_pool'): return (""" LOGICAL UNIT NUMBER %(lunid)s Name: %(name)s UID: 60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11 Current Owner: SP A Default Owner: SP A Allocation Owner: SP A Attached Snapshot: %(has_snap)s User Capacity (Blocks): 2101346304 User Capacity (GBs): %(size)d Consumed Capacity (Blocks): 2149576704 Consumed Capacity (GBs): 1024.998 Pool Name: %(pool_name)s Current State: %(state)s Status: OK(0x0) Is Faulted: %(faulted)s Is Transitioning: false Current Operation: %(operation)s Current Operation State: N/A Current Operation Status: N/A Current Operation Percent Completed: 0 Is Thin LUN: %(is_thin)s""" % { 'lunid': lunid, 'name': name, 'has_snap': 'FakeSnap' if has_snap else 'N/A', 'size': size, 'pool_name': pool_name, 'state': state, 'faulted': faulted, 'operation': operation, 'is_thin': 'Yes' if is_thin else 'No'}, 0) def STORAGE_GROUP_ISCSI_FC_HBA(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 Host name: fakehost SPPort: A-4v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid 22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56 SP B 2 Host name: fakehost2 SPPort: B-2v0 Initiator IP: N/A TPGT: 0 ISID: N/A 22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16 SP B 2 Host name: fakehost2 SPPort: B-2v0 Initiator IP: N/A TPGT: 0 ISID: N/A HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 Shareable: YES""" % sgname, 0) def STORAGE_GROUP_NO_MAP(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D Shareable: YES""" % sgname, 0) def STORAGE_GROUP_HAS_MAP(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 Host name: fakehost SPPort: A-4v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 Shareable: YES""" % sgname, 0) def STORAGE_GROUP_HAS_MAP_ISCSI(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 2 Host name: fakehost SPPort: A-2v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid iqn.1993-08.org.debian:01:222 SP A 0 Host name: fakehost SPPort: A-0v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid iqn.1993-08.org.debian:01:222 SP B 2 Host name: fakehost SPPort: B-2v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 Shareable: YES""" % sgname, 0) def STORAGE_GROUP_HAS_MAP_MP(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 Host name: fakehost SPPort: A-4v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid iqn.1993-08.org.debian:01:222 SP A 5 Host name: fakehost SPPort: A-5v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 Shareable: YES""" % sgname, 0) def STORAGE_GROUP_HAS_MAP_2(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 Host name: fakehost SPPort: A-4v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 2 3 Shareable: YES""" % sgname, 0) def POOL_FEATURE_INFO_POOL_LUNS_CMD(self): cmd = ('storagepool', '-feature', '-info', '-maxPoolLUNs', '-numPoolLUNs') return cmd def POOL_FEATURE_INFO_POOL_LUNS(self, max, total): return (('Max. Pool LUNs: %s\n' % max) + ('Total Number of Pool LUNs: %s\n' % total), 0) def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2): return (""" Storage Group Name: irrelative Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:5741c6307e60 SP A 6 Host name: fakehost SPPort: A-6v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid Storage Group Name: %(sgname1)s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 Host name: fakehost SPPort: A-4v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 31 3 41 4 Shareable: YES Storage Group Name: %(sgname2)s Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:5741c6307e60 SP A 6 Host name: fakehost SPPort: A-6v0 Initiator IP: fakeip TPGT: 3 ISID: fakeid HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 32 3 42 4 Shareable: YES""" % {'sgname1': sgname1, 'sgname2': sgname2}, 0) def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True): if up_to_date: return ("Cannot unbind LUN " "because it's contained in a Storage Group", 156) else: return ("SP B: Request failed. " "Host LUN/LUN mapping still exists.", 0) def set_path_cmd(self, gname, hba, sp, spport, vport=None, ip=None): if vport is None: return ('storagegroup', '-setpath', '-gname', gname, '-hbauid', hba, '-sp', sp, '-spport', spport, '-ip', ip, '-host', gname, '-o') return ('storagegroup', '-setpath', '-gname', gname, '-hbauid', hba, '-sp', sp, '-spport', spport, '-spvport', vport, '-ip', ip, '-host', gname, '-o') @staticmethod def convert_snapshot(snapshot, expected_attrs=['volume']): if expected_attrs: snapshot = snapshot.copy() snapshot['volume'] = fake_volume.fake_volume_obj( None, **snapshot['volume']) snap = fake_snapshot.fake_snapshot_obj( None, expected_attrs=expected_attrs, **snapshot) return snap @staticmethod def convert_volume(volume): vol = fake_volume.fake_volume_obj( None, **volume) return vol class DriverTestCaseBase(test.TestCase): def setUp(self): super(DriverTestCaseBase, self).setUp() self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute', self.fake_command_execute_for_driver_setup) self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial', mock.Mock(return_value={'array_serial': 'fake_serial'})) self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1)) self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01) self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01) self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' self.configuration.san_ip = '10.0.0.1' self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' self.configuration.initiator_auto_registration = True self.configuration.check_max_pool_luns_threshold = False self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get({'storage_vnx_pool_names': 'unit_test_pool', 'volume_backend_name': 'namedbackend'})) self.testData = EMCVNXCLIDriverTestData() self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' self.configuration.ignore_pool_full_threshold = False def driverSetup(self, commands=tuple(), results=tuple()): self.driver = self.generate_driver(self.configuration) fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.Mock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli return fake_cli def generate_driver(self, conf): raise NotImplementedError def get_command_execute_simulator(self, commands=tuple(), results=tuple()): assert(len(commands) == len(results)) def fake_command_execute(*args, **kwargv): for i in range(len(commands)): if args == commands[i]: if isinstance(results[i], list): if len(results[i]) > 0: ret = results[i][0] del results[i][0] return ret else: return results[i] return self.standard_fake_command_execute(*args, **kwargv) return fake_command_execute def standard_fake_command_execute(self, *args, **kwargv): standard_commands = [ self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'), self.testData.LUN_PROPERTY_ALL_CMD('snapshot-4444'), self.testData.POOL_PROPERTY_CMD] standard_results = [ self.testData.LUN_PROPERTY('volume-1'), self.testData.LUN_PROPERTY('volume-2'), self.testData.LUN_PROPERTY(build_migration_dest_name('volume-2')), self.testData.LUN_PROPERTY('vol-vol1'), self.testData.LUN_PROPERTY('snapshot-4444'), self.testData.POOL_PROPERTY] standard_default = SUCCEED for i in range(len(standard_commands)): if args == standard_commands[i]: return standard_results[i] return standard_default def fake_command_execute_for_driver_setup(self, *command, **kwargv): if (command == ('connection', '-getport', '-address', '-vlanid') or command == ('connection', '-getport', '-vlanid')): return self.testData.ALL_PORTS elif command == ('storagepool', '-list', '-state'): return self.testData.POOL_GET_STATE_RESULT([ {'pool_name': self.testData.test_pool_name, 'state': "Ready"}, {'pool_name': "unit_test_pool2", 'state': "Ready"}]) if command == self.testData.GETFCPORT_CMD(): return self.testData.FC_PORTS else: return SUCCEED def fake_safe_get(self, values): def _safe_get(key): return values.get(key) return _safe_get @ddt.ddt class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase): def generate_driver(self, conf): return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_destroy_volume_without_extra_spec(self): fake_cli = self.driverSetup() self.driver.create_volume(self.testData.test_volume) self.driver.delete_volume(self.testData.test_volume) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'thick', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.LUN_DELETE_CMD('volume-1'))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_volume_ignore_thresholds(self): self.configuration.ignore_pool_full_threshold = True fake_cli = self.driverSetup() self.driver.create_volume(self.testData.test_volume) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'thick', None, ignore_thresholds=True, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'compressed'})) def test_create_volume_compressed(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'compressed', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=True), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'provisioning:type': 'thin', 'storagetype:provisioning': 'thick'})) def test_create_volume_thin(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'thin', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'provisioning:type': 'thick'})) def test_create_volume_thick(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', False), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'thick', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'HighestAvailable'})) def test_create_volume_compressed_tiering_highestavailable(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'compressed', 'highestavailable', poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=True), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'})) def test_create_volume_deduplicated(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'deduplicated', None, poll=False))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:tiering': 'Auto'})) def test_create_volume_tiering_auto(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # case self.driver.create_volume(self.testData.test_volume_with_type) # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', None, 'auto', poll=False))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:tiering': 'Auto', 'storagetype:provisioning': 'Deduplicated'})) def test_create_volume_deduplicated_tiering_auto(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] self.driverSetup(commands, results) ex = self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.testData.test_volume_with_type) self.assertTrue( re.match(r".*deduplicated and auto tiering can't be both enabled", ex.msg)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'Compressed'})) def test_create_volume_compressed_no_enabler(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), ('No package', 0)] self.driverSetup(commands, results) ex = self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.testData.test_volume_with_type) self.assertTrue( re.match(r".*Compression Enabler is not installed", ex.msg)) def test_get_volume_stats(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True)] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True)] self.driverSetup(commands, results) stats = self.driver.get_volume_stats(True) self.assertTrue(stats['driver_version'] == VERSION, "driver_version is incorrect") self.assertTrue( stats['storage_protocol'] == 'iSCSI', "storage_protocol is incorrect") self.assertTrue( stats['vendor_name'] == "EMC", "vendor name is incorrect") self.assertTrue( stats['volume_backend_name'] == "namedbackend", "volume backend name is incorrect") pool_stats = stats['pools'][0] expected_pool_stats = { 'free_capacity_gb': 3105.303, 'reserved_percentage': 32, 'location_info': 'unit_test_pool|fake_serial', 'total_capacity_gb': 3281.146, 'provisioned_capacity_gb': 536.14, 'compression_support': 'True', 'deduplication_support': 'True', 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'max_over_subscription_ratio': 20.0, 'consistencygroup_support': 'True', 'replication_enabled': False, 'replication_targets': [], 'pool_name': 'unit_test_pool', 'fast_cache_enabled': True, 'fast_support': 'True'} self.assertEqual(expected_pool_stats, pool_stats) def test_get_volume_stats_ignore_threshold(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True)] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True)] self.driverSetup(commands, results) self.driver.cli.ignore_pool_full_threshold = True stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertEqual(2, pool_stats['reserved_percentage']) def test_get_volume_stats_reserved_percentage_from_conf(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True)] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True)] self.configuration.reserved_percentage = 22 self.driverSetup(commands, results) self.driver.cli.ignore_pool_full_threshold = True stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertEqual(22, pool_stats['reserved_percentage']) def test_get_volume_stats_too_many_luns(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True), self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True), self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)] fake_cli = self.driverSetup(commands, results) self.driver.cli.check_max_pool_luns_threshold = True stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertTrue( pool_stats['free_capacity_gb'] == 0, "free_capacity_gb is incorrect") expect_cmd = [ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(), poll=False)] fake_cli.assert_has_calls(expect_cmd) self.driver.cli.check_max_pool_luns_threshold = False stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertTrue(stats['driver_version'] is not None, "driver_version is not returned") self.assertTrue( pool_stats['free_capacity_gb'] == 3105.303, "free_capacity_gb is incorrect") @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) def test_volume_migration_timeout(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_ERROR_MSG = """\ A network error occurred while trying to connect: '10.244.213.142'. Message : Error occurred because connection refused. \ Unable to establish a secure connection to the Management Server. """ FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ') FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [(FAKE_ERROR_MSG, 255), [(FAKE_MIGRATE_PROPERTY, 0), (FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertTrue(ret) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) def test_volume_migration(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not ' 'currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fake_host = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fake_host)[0] self.assertTrue(ret) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) def test_volume_migration_with_rate(self): test_volume_asap = self.testData.test_volume.copy() test_volume_asap.update({'metadata': {'migrate_rate': 'asap'}}) commands = [self.testData.MIGRATION_CMD(rate="asap"), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: ASAP Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not ' 'currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fake_host = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, test_volume_asap, fake_host)[0] self.assertTrue(ret) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(rate='asap'), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 5})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:tiering': 'Auto'})) def test_volume_migration_02(self): commands = [self.testData.MIGRATION_CMD(5, 5), self.testData.MIGRATION_VERIFY_CMD(5)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume5, fakehost)[0] self.assertTrue(ret) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(5), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(5), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) def test_volume_migration_failed(self): commands = [self.testData.MIGRATION_CMD()] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertFalse(ret) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) def test_volume_migration_stopped(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1), self.testData.MIGRATION_CANCEL_CMD(1)] results = [SUCCEED, [(self.testData.MIGRATE_PROPERTY_MIGRATING, 0), (self.testData.MIGRATE_PROPERTY_STOPPED, 0), ('The specified source LUN is not ' 'currently migrating', 23)], SUCCEED] fake_cli = self.driverSetup(commands, results) fake_host = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} self.assertRaisesRegex(exception.VolumeBackendAPIException, "Migration of LUN 1 has been stopped or" " faulted.", self.driver.migrate_volume, None, self.testData.test_volume, fake_host) expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False), mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value={'lun_id': 1})) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:tiering': 'Auto'})) def test_volume_migration_smp(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not ' 'currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fake_host = {'capabilities': {'location_info': 'unit_test_pool2|fake_serial', 'storage_protocol': 'iSCSI'}} vol = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume) vol['provider_location'] = 'system^FNM11111|type^smp|id^1' vol['volume_metadata'] = [{'key': 'snapcopy', 'value': 'True'}] tmp_snap = "snap-as-vol-%s" % vol['id'] ret = self.driver.migrate_volume(None, vol, fake_host) self.assertTrue(ret[0]) self.assertIn('type^lun', ret[1]['provider_location']) # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False), mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap), poll=True)] fake_cli.assert_has_calls(expect_cmd) def test_create_destroy_volume_snapshot(self): fake_cli = self.driverSetup() # case self.driver.create_snapshot(self.testData.test_snapshot) self.driver.delete_snapshot(self.testData.test_snapshot) # verification expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD( 'snapshot-4444'), poll=False), mock.call(*self.testData.SNAP_DELETE_CMD( 'snapshot-4444'), poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_snapshot_preparing_volume(self): commands = [self.testData.SNAP_CREATE_CMD('snapshot-4444'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1')] results = [[self.testData.LUN_PREP_ERROR(), SUCCEED], [self.testData.LUN_PROPERTY('volume-1', size=1, operation='Preparing'), self.testData.LUN_PROPERTY('volume-1', size=1, operation='Optimizing'), self.testData.LUN_PROPERTY('volume-1', size=1, operation='None')]] fake_cli = self.driverSetup(commands, results) self.driver.create_snapshot(self.testData.test_snapshot) expected = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot-4444'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=True), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.SNAP_CREATE_CMD('snapshot-4444'), poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection(self): # Test for auto registration self.configuration.initiator_auto_registration = True commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [[("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.PING_OK] fake_cli = self.driverSetup(commands, results) connection_info = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertEqual(self.testData.iscsi_connection_info, connection_info) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call(*self.testData.set_path_cmd( 'fakehost', 'iqn.1993-08.org.debian:01:222', 'A', 4, 0, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', 'iqn.1993-08.org.debian:01:222', 'A', 0, 0, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', 'iqn.1993-08.org.debian:01:222', 'B', 2, 0, '10.0.0.2')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call(*self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2'))] fake_cli.assert_has_calls(expected) # Test for manual registration self.configuration.initiator_auto_registration = False commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [ [("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], ('', 0), self.testData.PING_OK ] fake_cli = self.driverSetup(commands, results) test_volume_rw = self.testData.test_volume_rw connection_info = self.driver.initialize_connection( test_volume_rw, self.testData.connector) self.assertEqual(self.testData.iscsi_connection_info, connection_info) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call(*self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2'))] fake_cli.assert_has_calls(expected) # Test No Ping self.configuration.iscsi_initiators = None commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost')] results = [ [("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], ('', 0)] fake_cli = self.driverSetup(commands, results) test_volume_rw = self.testData.test_volume_rw.copy() test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1' connection_info = self.driver.initialize_connection( test_volume_rw, self.testData.connector) self.assertEqual(self.testData.iscsi_connection_info, connection_info) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False)] fake_cli.assert_has_calls(expected) @mock.patch('random.randint', mock.Mock(return_value=0)) @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.' 'CommandLineHelper.ping_node', mock.Mock(return_value=True)) @mock.patch('random.shuffle', mock.Mock(return_value=0)) def test_initialize_connection_multipath(self): self.configuration.initiator_auto_registration = False commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')] results = [self.testData.STORAGE_GROUP_HAS_MAP_MP('fakehost')] fake_cli = self.driverSetup(commands, results) self.driver.cli.iscsi_targets = { 'A': [ {'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'SP': 'A', 'Port ID': 4, 'Virtual Port ID': 0, 'IP Address': '10.244.214.118'}, {'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a5', 'SP': 'A', 'Port ID': 5, 'Virtual Port ID': 0, 'IP Address': '10.244.214.119'}], 'B': []} test_volume_rw = self.testData.test_volume_rw.copy() test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1' connector_m = dict(self.testData.connector) connector_m['multipath'] = True connection_info = self.driver.initialize_connection( test_volume_rw, connector_m) self.assertEqual(self.testData.iscsi_connection_info_mp, connection_info) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( return_value=3)) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_exist(self): """Test if initialize connection exists. A LUN is added to the SG right before the attach, it may not exists in the first SG query """ # Test for auto registration self.configuration.initiator_auto_registration = True self.configuration.max_luns_per_storage_group = 2 commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3, '-gname', 'fakehost', '-o'), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')], ("fakeerror", 23), self.testData.PING_OK] fake_cli = self.driverSetup(commands, results) iscsi_data = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector ) self.assertTrue(iscsi_data['data']['target_lun'] == 2, "iSCSI initialize connection returned wrong HLU") expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3, '-gname', 'fakehost', '-o', poll=False), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call(*self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2'))] fake_cli.assert_has_calls(expected) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_iscsi_white_list(self): self.configuration.io_port_list = 'a-0-0,B-2-0' test_volume = self.testData.test_volume.copy() test_volume['provider_location'] = 'system^fakesn|type^lun|id^1' # Test for auto registration self.configuration.initiator_auto_registration = True commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')]] fake_cli = self.driverSetup(commands, results) self.driver.cli.iscsi_targets = {'A': [{'SP': 'A', 'Port ID': 0, 'Virtual Port ID': 0, 'Port WWN': 'fake_iqn', 'IP Address': '192.168.1.1'}], 'B': [{'SP': 'B', 'Port ID': 2, 'Virtual Port ID': 0, 'Port WWN': 'fake_iqn1', 'IP Address': '192.168.1.2'}]} self.driver.initialize_connection( test_volume, self.testData.connector) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call(*self.testData.set_path_cmd( 'fakehost', 'iqn.1993-08.org.debian:01:222', 'A', 0, 0, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', 'iqn.1993-08.org.debian:01:222', 'B', 2, 0, '10.0.0.2')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False)] fake_cli.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.' 'EMCVnxCliBase._build_pool_stats', mock.Mock(return_value=None)) @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.' 'CommandLineHelper.get_pool', mock.Mock(return_value={'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0})) def test_update_iscsi_io_ports(self): self.configuration.io_port_list = 'a-0-0,B-2-0' # Test for auto registration self.configuration.initiator_auto_registration = True commands = [self.testData.GETPORT_CMD()] results = [self.testData.WHITE_LIST_PORTS] fake_cli = self.driverSetup(commands, results) self.driver.cli.update_volume_stats() expected = [mock.call(*self.testData.GETPORT_CMD(), poll=False)] fake_cli.assert_has_calls(expected) io_ports = self.driver.cli.iscsi_targets self.assertEqual((0, 'iqn.1992-04.com.emc:cx.fnmxxx.a0'), (io_ports['A'][0]['Port ID'], io_ports['A'][0]['Port WWN'])) self.assertEqual((2, 'iqn.1992-04.com.emc:cx.fnmxxx.b2'), (io_ports['B'][0]['Port ID'], io_ports['B'][0]['Port WWN'])) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( return_value=4)) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_no_hlu_left_1(self): """Test initialize connection with no hlu per first SG query. There is no hlu per the first SG query But there are hlu left after the full poll """ # Test for auto registration self.configuration.initiator_auto_registration = True self.configuration.max_luns_per_storage_group = 2 commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4, '-gname', 'fakehost', '-o'), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], ("", 0), self.testData.PING_OK] fake_cli = self.driverSetup(commands, results) iscsi_data = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertTrue(iscsi_data['data']['target_lun'] == 2, "iSCSI initialize connection returned wrong HLU") expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4, '-gname', 'fakehost', '-o', poll=False), mock.call(*self.testData.PINGNODE_CMD('A', 4, 0, u'10.0.0.2'))] fake_cli.assert_has_calls(expected) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( return_value=4)) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_no_hlu_left_2(self): """Test initialize connection with no hlu left.""" # Test for auto registration self.configuration.initiator_auto_registration = True self.configuration.max_luns_per_storage_group = 2 commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')] results = [ [self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')] ] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.testData.test_volume, self.testData.connector) expected = [ mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), ] fake_cli.assert_has_calls(expected) @mock.patch('os.path.exists', return_value=True) def test_terminate_connection(self, _mock_exists): self.driver = emc_cli_iscsi.EMCCLIISCSIDriver( configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16, 2: 88, 3: 47}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.terminate_connection(self.testData.test_volume, self.testData.connector) cli_helper.remove_hlu_from_storagegroup.assert_called_once_with( 16, self.testData.connector["host"]) def test_create_volume_cli_failed(self): commands = [self.testData.LUN_CREATION_CMD( 'volume-4', 1, 'unit_test_pool', None, None, poll=False)] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.create_volume, self.testData.test_failed_volume) expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD( 'volume-4', 1, 'unit_test_pool', None, None, poll=False))] fake_cli.assert_has_calls(expect_cmd) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_create_faulted_volume(self): volume_name = 'faulted_volume' cmd_create = self.testData.LUN_CREATION_CMD( volume_name, 1, 'unit_test_pool', None, None, poll=False) cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name) commands = [cmd_create, cmd_list_preparing] results = [SUCCEED, [self.testData.LUN_PROPERTY(name=volume_name, state='Faulted', faulted='true', operation='Preparing'), self.testData.LUN_PROPERTY(name=volume_name, state='Faulted', faulted='true', operation='None')]] fake_cli = self.driverSetup(commands, results) faulted_volume = self.testData.test_volume.copy() faulted_volume.update({'name': volume_name}) self.driver.create_volume(faulted_volume) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( volume_name, 1, 'unit_test_pool', None, None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name), poll=False)] fake_cli.assert_has_calls(expect_cmd) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_create_offline_volume(self): volume_name = 'offline_volume' cmd_create = self.testData.LUN_CREATION_CMD( volume_name, 1, 'unit_test_pool', None, None, poll=False) cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name) commands = [cmd_create, cmd_list] results = [SUCCEED, self.testData.LUN_PROPERTY(name=volume_name, state='Offline', faulted='true')] self.driverSetup(commands, results) offline_volume = self.testData.test_volume.copy() offline_volume.update({'name': volume_name}) self.assertRaisesRegex(exception.VolumeBackendAPIException, "Volume %s was created in VNX, but in" " Offline state." % volume_name, self.driver.create_volume, offline_volume) def test_create_volume_snapshot_failed(self): test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_snapshot1) commands = [self.testData.SNAP_CREATE_CMD(test_snapshot.name)] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) # case self.assertRaises(exception.EMCVnxCLICmdError, self.driver.create_snapshot, test_snapshot) # verification expect_cmd = [ mock.call( *self.testData.SNAP_CREATE_CMD(test_snapshot.name), poll=False)] fake_cli.assert_has_calls(expect_cmd) @ddt.data('high', 'asap', 'low', 'medium') def test_create_volume_from_snapshot(self, migrate_rate): test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_snapshot) test_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume2) test_volume.metadata = {'migrate_rate': migrate_rate} cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name(test_volume.name)) cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name(test_volume.name)) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name(test_volume.name)) cmd_migrate = self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_dest, cmd_dest_np, cmd_migrate, cmd_migrate_verify] results = [output_dest, output_dest, output_migrate, output_migrate_verify] fake_cli1 = self.driverSetup(commands, results) self.driver.create_volume_from_snapshot(test_volume, test_snapshot) expect_cmd1 = [ mock.call(*self.testData.SNAP_MP_CREATE_CMD( name=test_volume.name, source=test_snapshot.volume_name), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(test_volume.name), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name=test_volume.name, snapName=test_snapshot.name)), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name(test_volume.name), 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name(test_volume.name)), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name(test_volume.name)), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True)] fake_cli1.assert_has_calls(expect_cmd1) self.configuration.ignore_pool_full_threshold = True fake_cli2 = self.driverSetup(commands, results) self.driver.create_volume_from_snapshot(test_volume, test_snapshot) expect_cmd2 = [ mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name(test_volume.name), 1, 'unit_test_pool', None, None, ignore_thresholds=True))] fake_cli2.assert_has_calls(expect_cmd2) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'provisioning:type': 'thick'})) def test_create_volume_from_snapshot_smp(self): fake_cli = self.driverSetup() test_snap = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_snapshot) new_volume = self.testData.test_volume_with_type.copy() new_volume['name_id'] = new_volume['id'] vol = self.driver.create_volume_from_snapshot( new_volume, test_snap) self.assertIn('type^smp', vol['provider_location']) expect_cmd = [ mock.call( *self.testData.SNAP_COPY_CMD( src_snap=test_snap.name, snap_name='snap-as-vol-%s' % test_snap.volume.id)), mock.call( *self.testData.SNAP_MODIFY_CMD( name='snap-as-vol-%s' % test_snap.volume.id, rw='yes')), mock.call( *self.testData.SNAP_MP_CREATE_CMD( name=new_volume['name'], source=test_snap.volume_name), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(new_volume['name']), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name=new_volume['name'], snapName='snap-as-vol-%s' % test_snap.volume.id))] fake_cli.assert_has_calls(expect_cmd) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_sync_failed(self): cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('vol2')) cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('vol2')) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name('vol2')) cmd_migrate = self.testData.MIGRATION_CMD(1, 1) cmd_detach_lun = ('lun', '-detach', '-name', 'volume-2', '-o') output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) cmd_migrate_cancel = self.testData.MIGRATION_CANCEL_CMD(1) output_migrate_cancel = ("", 0) commands = [cmd_dest, cmd_dest_np, cmd_migrate, cmd_migrate_verify, cmd_migrate_cancel] results = [output_dest, output_dest, output_migrate, [FAKE_ERROR_RETURN, output_migrate_verify], output_migrate_cancel] fake_cli = self.driverSetup(commands, results) new_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume2) src_snapshot = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_snapshot) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, new_volume, src_snapshot) expect_cmd = [ mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='volume-2', source='volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name='volume-2', snapName=src_snapshot.name)), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name('volume-2'), 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=False), mock.call(*self.testData.LUN_DELETE_CMD( build_migration_dest_name('volume-2'))), mock.call(*cmd_detach_lun), mock.call(*self.testData.LUN_DELETE_CMD('volume-2'))] fake_cli.assert_has_calls(expect_cmd) def test_create_vol_from_snap_failed_in_migrate_lun(self): cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('vol2')) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name('vol2')) cmd_migrate = self.testData.MIGRATION_CMD(1, 1) cmd_detach_lun = ('lun', '-detach', '-name', 'volume-2', '-o') commands = [cmd_dest, cmd_migrate] results = [output_dest, FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_snapshot) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.testData.test_volume2, test_snapshot) expect_cmd = [ mock.call(*self.testData.SNAP_MP_CREATE_CMD( name='volume-2', source='volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), poll=True), mock.call(*self.testData.SNAP_ATTACH_CMD( name='volume-2', snapName=test_snapshot.name)), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name('volume-2'), 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1), poll=True, retry_disable=True), mock.call(*self.testData.LUN_DELETE_CMD( build_migration_dest_name('volume-2'))), mock.call(*cmd_detach_lun), mock.call(*self.testData.LUN_DELETE_CMD('volume-2'))] fake_cli.assert_has_calls(expect_cmd) @ddt.data('high', 'asap', 'low', 'medium') def test_create_cloned_volume(self, migrate_rate): cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')) cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name('volume-2')) cmd_clone = self.testData.LUN_PROPERTY_ALL_CMD("volume-2") output_clone = self.testData.LUN_PROPERTY("volume-2") cmd_migrate = self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_dest, cmd_dest_p, cmd_clone, cmd_migrate, cmd_migrate_verify] results = [output_dest, output_dest, output_clone, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) volume = self.testData.test_volume.copy() volume['id'] = '2' volume = EMCVNXCLIDriverTestData.convert_volume(volume) # Make sure this size is used volume.size = 10 volume.metadata = {'migrate_rate': migrate_rate} self.driver.create_cloned_volume(volume, self.testData.test_volume) tmp_snap = 'tmp-snap-' + volume.id expect_cmd = [ mock.call( *self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False), mock.call(*self.testData.SNAP_MP_CREATE_CMD( name='volume-2', source='volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name='volume-2', snapName=tmp_snap)), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name('volume-2'), 10, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate), poll=True, retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap), poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'provisioning:type': 'thick'})) def test_create_cloned_volume_smp(self): fake_cli = self.driverSetup() test_clone = self.testData.test_clone.copy() test_clone['name_id'] = test_clone['id'] vol = self.driver.create_cloned_volume( test_clone, self.testData.test_volume_with_type) self.assertIn('type^smp', vol['provider_location']) expect_cmd = [ mock.call( *self.testData.SNAP_CREATE_CMD( name='snap-as-vol-%s' % '2'), poll=False), mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='volume-2', source='volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name='volume-2', snapName='snap-as-vol-%s' % '2'))] fake_cli.assert_has_calls(expect_cmd) def test_delete_volume_failed(self): commands = [self.testData.LUN_DELETE_CMD('volume-4')] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.delete_volume, self.testData.test_failed_volume) expected = [mock.call(*self.testData.LUN_DELETE_CMD('volume-4'))] fake_cli.assert_has_calls(expected) def test_delete_volume_in_sg_failed(self): commands = [self.testData.LUN_DELETE_CMD('volume-4'), self.testData.LUN_DELETE_CMD('volume-5')] results = [self.testData.LUN_DELETE_IN_SG_ERROR(), self.testData.LUN_DELETE_IN_SG_ERROR(False)] self.driverSetup(commands, results) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.delete_volume, self.testData.test_volume1_in_sg) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.delete_volume, self.testData.test_volume2_in_sg) def test_delete_volume_in_sg_force(self): commands = [self.testData.LUN_DELETE_CMD('volume-4'), self.testData.STORAGEGROUP_LIST_CMD(), self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1', '41'), self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1', '42'), self.testData.LUN_DELETE_CMD('volume-5'), self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2', '31'), self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2', '32')] results = [[self.testData.LUN_DELETE_IN_SG_ERROR(), SUCCEED], self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1', 'fakehost2'), SUCCEED, SUCCEED, [self.testData.LUN_DELETE_IN_SG_ERROR(False), SUCCEED], SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.cli.force_delete_lun_in_sg = True self.driver.delete_volume(self.testData.test_volume1_in_sg) self.driver.delete_volume(self.testData.test_volume2_in_sg) expected = [mock.call(*self.testData.LUN_DELETE_CMD('volume-4')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD(), poll=True), mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD( 'fakehost1', '41'), poll=False), mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD( 'fakehost2', '42'), poll=False), mock.call(*self.testData.LUN_DELETE_CMD('volume-4')), mock.call(*self.testData.LUN_DELETE_CMD('volume-5')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD(), poll=True), mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD( 'fakehost1', '31'), poll=False), mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD( 'fakehost2', '32'), poll=False), mock.call(*self.testData.LUN_DELETE_CMD('volume-5'))] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'compressed'})) def test_delete_volume_smp(self): fake_cli = self.driverSetup() vol = self.testData.test_volume_with_type.copy() vol['metadata'] = [{'key': 'snapcopy', 'value': 'True'}] vol['provider_location'] = 'system^FNM11111|type^smp|id^1' vol['name_id'] = vol['id'] tmp_snap = 'snap-as-vol-%s' % vol['id'] self.driver.delete_volume(vol) expected = [mock.call(*self.testData.LUN_DELETE_CMD(vol['name'])), mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap), poll=True)] fake_cli.assert_has_calls(expected) def test_extend_volume(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1')] results = [self.testData.LUN_PROPERTY('volume-1', size=2)] fake_cli = self.driverSetup(commands, results) # case self.driver.extend_volume(self.testData.test_volume, 2) expected = [mock.call(*self.testData.LUN_EXTEND_CMD('volume-1', 2), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False)] fake_cli.assert_has_calls(expected) def test_extend_volume_has_snapshot(self): commands = [self.testData.LUN_EXTEND_CMD('volume-4', 2)] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.extend_volume, self.testData.test_failed_volume, 2) expected = [mock.call(*self.testData.LUN_EXTEND_CMD('volume-4', 2), poll=False)] fake_cli.assert_has_calls(expected) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_extend_volume_failed(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-4')] results = [self.testData.LUN_PROPERTY('volume-4', size=2)] fake_cli = self.driverSetup(commands, results) self.driver.cli._client.timeout = 0 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.testData.test_failed_volume, 3) expected = [ mock.call( *self.testData.LUN_EXTEND_CMD('volume-4', 3), poll=False), mock.call( *self.testData.LUN_PROPERTY_ALL_CMD('volume-4'), poll=False)] fake_cli.assert_has_calls(expected) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_extend_preparing_volume(self): commands = [self.testData.LUN_EXTEND_CMD('volume-1', 2), self.testData.LUN_PROPERTY_ALL_CMD('volume-1')] results = [[self.testData.LUN_PREP_ERROR(), SUCCEED], [self.testData.LUN_PROPERTY('volume-1', size=1, operation='Preparing'), self.testData.LUN_PROPERTY('volume-1', size=1, operation='Optimizing'), self.testData.LUN_PROPERTY('volume-1', size=1, operation='None'), self.testData.LUN_PROPERTY('volume-1', size=2)]] fake_cli = self.driverSetup(commands, results) self.driver.extend_volume(self.testData.test_volume, 2) expected = [mock.call(*self.testData.LUN_EXTEND_CMD('volume-1', 2), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=True), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.LUN_EXTEND_CMD('volume-1', 2), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={})) def test_manage_existing(self): data = self.testData test_volume = data.test_volume_with_type lun_rename_cmd = data.LUN_RENAME_CMD( test_volume['id'], test_volume['name']) lun_list_cmd = data.LUN_LIST_ALL_CMD(test_volume['id']) commands = (lun_rename_cmd, lun_list_cmd) results = (SUCCEED, (data.LIST_LUN_1_ALL, 0)) self.configuration.storage_vnx_pool_name = ( self.testData.test_pool_name) fake_cli = self.driverSetup(commands, results) self.driver.manage_existing( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*lun_rename_cmd, poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={})) def test_manage_existing_source_name(self): data = self.testData test_volume = data.test_volume_with_type lun_rename_cmd = data.LUN_RENAME_CMD( test_volume['id'], test_volume['name']) lun_list_cmd = data.LUN_LIST_ALL_CMD(test_volume['id']) commands = (lun_rename_cmd, lun_list_cmd) results = (SUCCEED, (data.LIST_LUN_1_ALL, 0)) fake_cli = self.driverSetup(commands, results) self.driver.manage_existing( data.test_volume_with_type, data.test_existing_ref_source_name) expected = [mock.call(*lun_rename_cmd, poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={ 'storagetype:provisioning': 'compressed', 'compression_support': 'True'})) @mock.patch("time.time", mock.Mock(return_value=123)) def test_manage_existing_success_retype_with_migration(self): data = self.testData test_volume = EMCVNXCLIDriverTestData.convert_volume( data.test_volume_with_type) test_volume.metadata = {} test_volume.provider_location = build_provider_location( 1, 'lun', test_volume.name) lun_rename_cmd = data.LUN_RENAME_CMD( test_volume['id'], test_volume['name']) lun_list_cmd = data.LUN_LIST_ALL_CMD(test_volume['id']) snap_existing_cmd = data.SNAP_LIST_CMD(test_volume['id']) new_lun_name = test_volume['name'] + '-123' lun_create_cmd = data.LUN_CREATION_CMD( new_lun_name, 1, 'unit_test_pool', 'compressed') lun3_status_cmd = data.LUN_PROPERTY_ALL_CMD(new_lun_name) compression_cmd = data.ENABLE_COMPRESSION_CMD(3) lun1_status_cmd = data.LUN_PROPERTY_ALL_CMD(test_volume['name']) migration_cmd = data.MIGRATION_CMD(1, 3) migration_verify_cmd = data.MIGRATION_VERIFY_CMD(1) commands = (lun_list_cmd, snap_existing_cmd, lun_create_cmd, lun3_status_cmd, compression_cmd, lun1_status_cmd, migration_cmd, migration_verify_cmd, lun_rename_cmd) cmd_success = ('', 0) migrate_verify = ('The specified source LUN ' 'is not currently migrating', 23) lun3_status = data.LUN_PROPERTY(new_lun_name, lunid=3) lun1_status = data.LUN_PROPERTY(test_volume['name'], lunid=1) results = ((data.LIST_LUN_1_ALL, 0), ('no snap', 1023), cmd_success, lun3_status, cmd_success, lun1_status, cmd_success, migrate_verify, cmd_success) fake_cli = self.driverSetup(commands, results) self.driver.manage_existing( test_volume, {'source-id': 1}) expected = [mock.call(*lun_list_cmd, poll=False), mock.call(*snap_existing_cmd, poll=False), mock.call(*lun_create_cmd), mock.call(*lun3_status_cmd, poll=False), mock.call(*lun3_status_cmd, poll=False), mock.call(*lun3_status_cmd, poll=True), mock.call(*compression_cmd), mock.call(*migration_cmd, poll=True, retry_disable=True), mock.call(*migration_verify_cmd, poll=True), mock.call(*lun_rename_cmd, poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={ 'storagetype:provisioning': 'thick', 'storagetype:tiering': 'nomovement'})) @mock.patch("time.time", mock.Mock(return_value=1)) def test_manage_existing_success_retype_change_tier(self): data = self.testData test_volume = data.test_volume_with_type lun_rename_cmd = data.LUN_RENAME_CMD( test_volume['id'], test_volume['name']) lun_list_cmd = data.LUN_LIST_ALL_CMD(test_volume['id']) lun_tier_cmd = data.LUN_MODIFY_TIER(data.test_lun_id, 'optimizePool', 'noMovement') commands = (lun_rename_cmd, lun_list_cmd, lun_tier_cmd) cmd_success = ('', 0) results = (cmd_success, (data.LIST_LUN_1_ALL, 0), cmd_success) fake_cli = self.driverSetup(commands, results) self.driver.manage_existing( data.test_volume_with_type, {'source-id': 1}) expected = [mock.call(*lun_list_cmd, poll=False), mock.call(*lun_tier_cmd), mock.call(*lun_rename_cmd, poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={})) def test_manage_existing_lun_in_another_pool(self): data = self.testData get_lun_cmd = ('lun', '-list', '-l', data.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') lun_list_cmd = data.LUN_LIST_SPECS_CMD(data.test_lun_id) invalid_pool_name = "fake_pool" commands = (get_lun_cmd, lun_list_cmd) lun_properties = data.LUN_PROPERTY('lun_name', pool_name=invalid_pool_name) results = (lun_properties, (data.LIST_LUN_1_SPECS, 0)) self.configuration.storage_vnx_pool_name = invalid_pool_name fake_cli = self.driverSetup(commands, results) # mock the command executor ex = self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.testData.test_volume_with_type, self.testData.test_existing_ref) self.assertTrue( re.match(r'.*not managed by the host', ex.msg)) expected = [mock.call(*get_lun_cmd, poll=True)] fake_cli.assert_has_calls(expected) def test_manage_existing_get_size(self): get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') test_size = 2 commands = [get_lun_cmd] results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)] self.configuration.storage_vnx_pool_name = ( self.testData.test_pool_name) fake_cli = self.driverSetup(commands, results) get_size = self.driver.manage_existing_get_size( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*get_lun_cmd, poll=True)] assert get_size == test_size fake_cli.assert_has_calls(expected) # Test the function with invalid reference. invaild_ref = {'fake': 'fake_ref'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.testData.test_volume_with_type, invaild_ref) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'compressed'})) def test_retype_compressed_to_deduplicated(self): """Unit test for retype compressed to deduplicated.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('compressed', 'deduplicated')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'deduplicated'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD(), cmd_migrate_verify] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023), output_migrate_verify] fake_cli1 = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd1 = [ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False), mock.call(*self.testData.LUN_CREATION_CMD( 'volume-3-123456', 2, 'unit_test_pool', 'deduplicated', None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-3-123456'), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, None), retry_disable=True, poll=True)] fake_cli1.assert_has_calls(expect_cmd1) self.configuration.ignore_pool_full_threshold = True fake_cli2 = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd2 = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-3-123456', 2, 'unit_test_pool', 'deduplicated', None, ignore_thresholds=True))] fake_cli2.assert_has_calls(expect_cmd2) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(side_effect=[{'provisioning:type': 'thin'}, {'provisioning:type': 'thick'}])) def test_retype_turn_on_compression_and_autotiering(self): """Unit test for retype a volume to compressed and auto tiering.""" new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'auto'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'host@backendsec#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD()] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) # Retype a thin volume to a compressed volume self.driver.retype(None, self.testData.test_volume3, new_type_data, None, host_test_data) expect_cmd = [ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False), mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)), mock.call(*self.testData.MODIFY_TIERING_CMD('volume-3', 'auto')) ] fake_cli.assert_has_calls(expect_cmd) # Retype a thick volume to a compressed volume self.driver.retype(None, self.testData.test_volume3, new_type_data, None, host_test_data) fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'provisioning:type': 'thin'})) def test_retype_turn_on_compression_volume_has_snap(self): """Unit test for retype a volume which has snap to compressed.""" new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'host@backendsec#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD()] results = [self.testData.NDU_LIST_RESULT, ('Has snap', 0)] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) # Retype a thin volume which has a snap to a compressed volume retyped = self.driver.retype(None, self.testData.test_volume3, new_type_data, None, host_test_data) self.assertFalse(retyped, "Retype should failed due to " "the volume has snapshot") @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'deduplicated', 'storagetype:pool': 'unit_test_pool'})) def test_retype_pool_changed_dedup_to_compressed_auto(self): """Test retype from dedup to compressed and auto tiering. Unit test for retype dedup to compressed and auto tiering and pool changed """ diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('deduplicated', 'compressed'), 'storagetype:tiering': (None, 'auto'), 'storagetype:pool': ('unit_test_pool', 'unit_test_pool2')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool2'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1#unit_test_pool2', 'capabilities': {'location_info': 'unit_test_pool2|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023), ('The specified source LUN is not currently migrating', 23)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False), mock.call(*self.testData.LUN_CREATION_CMD( 'volume-3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'compressed', 'storagetype:pool': 'unit_test_pool', 'storagetype:tiering': 'auto'})) def test_retype_compressed_auto_to_compressed_nomovement(self): """Unit test for retype only tiering changed.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:tiering': ('auto', 'nomovement')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'nomovement', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = { 'host': 'host@backendsec#unit_test_pool', 'capabilities': { 'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD()] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call( 'lun', '-modify', '-name', 'volume-3', '-o', '-initialTier', 'optimizePool', '-tieringPolicy', 'noMovement')] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'thin', 'storagetype:pool': 'unit_test_pool'})) def test_retype_compressed_to_thin_cross_array(self): """Unit test for retype cross array.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('compressed', 'thin')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'thin', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = { 'host': 'ubuntu-server12@pool_backend_2#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500891', 'volume_backend_name': 'pool_backend_2', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD()] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) retyped = self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) self.assertFalse(retyped, "Retype should failed due to" " different protocol or array") @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'})) def test_retype_thin_auto_to_dedup_diff_procotol(self): """Unit test for retype different procotol.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('thin', 'deduplicated'), 'storagetype:tiering': ('auto', None)}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'deduplicated', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = { 'host': 'ubuntu-server12@pool_backend_2#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_2', 'storage_protocol': 'FC'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023), ('The specified source LUN is not currently migrating', 23)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False), mock.call(*self.testData.LUN_CREATION_CMD( 'volume-3-123456', 2, 'unit_test_pool', 'deduplicated', None)), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'})) def test_retype_thin_auto_has_snap_to_thick_highestavailable(self): """Unit test for retype volume has snap when need migration.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('thin', None), 'storagetype:tiering': ('auto', 'highestAvailable')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:tiering': 'highestAvailable', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = { 'host': 'ubuntu-server12@pool_backend_1#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD()] results = [self.testData.NDU_LIST_RESULT, ('Has snap', 0)] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) retyped = self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) self.assertFalse(retyped, "Retype should failed due to" " different protocol or array") @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'})) def test_retype_thin_auto_to_thin_auto(self): """Unit test for retype volume which has no change.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:tiering': 'auto', 'storagetype:provisioning': 'thin'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = { 'host': 'ubuntu-server12@pool_backend_1#unit_test_pool', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD] results = [self.testData.NDU_LIST_RESULT] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." "migrate_lun_with_verification", mock.Mock(return_value=True)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." "create_lun_with_advance_feature", mock.Mock(return_value={'lun_id': '1'})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'thin', 'copytype:snap': 'true'})) def test_retype_copytype_snap_true_to_false(self): diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'copytype:snap': ('true', 'false')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'thin', 'copytype:snap': 'false'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [self.testData.NDU_LIST_CMD, self.testData.SNAP_LIST_CMD(), cmd_migrate_verify] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023), output_migrate_verify] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) vol = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume3) vol['provider_location'] = 'system^FNM11111|type^smp|id^1' vol['volume_metadata'] = [{'key': 'snapcopy', 'value': 'True'}] tmp_snap = 'snap-as-vol-%s' % vol['id'] ret = self.driver.retype(None, vol, new_type_data, diff_data, host_test_data) self.assertTrue(type(ret) == tuple) self.assertTrue(ret[0]) self.assertIn('type^lun', ret[1]['provider_location']) expect_cmd = [ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False), mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap), poll=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'fast_cache_enabled': 'True'})) def test_create_volume_with_fastcache(self): """Test creating volume with fastcache enabled.""" commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_PROPERTY_W_FASTCACHE_CMD, self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), ] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_PROPERTY_W_FASTCACHE, self.testData.LUN_PROPERTY('volume-1', True), ] fake_cli = self.driverSetup(commands, results) lun_info = {'lun_name': "volume-1", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready", 'status': 'OK(0x0)', 'operation': 'None' } cli_helper = self.driver.cli._client cli_helper.command_execute = fake_cli cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache") cli_helper.get_pool_list = mock.Mock(return_value=[{ 'lun_nums': 1000, 'total_capacity_gb': 10, 'free_capacity_gb': 5, 'provisioned_capacity_gb': 8, 'pool_name': "unit_test_pool", 'fast_cache_enabled': 'True', 'state': 'Ready', 'pool_full_threshold': 70.0}]) self.driver.update_volume_stats() self.driver.create_volume(self.testData.test_volume_with_type) pool_stats = self.driver.cli.stats['pools'][0] self.assertEqual('True', pool_stats['fast_cache_enabled']) expect_cmd = [ mock.call('connection', '-getport', '-address', '-vlanid', poll=False), mock.call('-np', 'lun', '-create', '-capacity', 1, '-sq', 'gb', '-poolName', self.testData.test_pool_name, '-name', 'volume-1', '-type', 'NonThin')] fake_cli.assert_has_calls(expect_cmd) def test_get_lun_id_provider_location_exists(self): """Test function get_lun_id.""" self.driverSetup() volume_01 = { 'name': 'vol_01', 'size': 1, 'volume_name': 'vol_01', 'id': '1', 'name_id': '1', 'provider_location': 'system^FNM11111|type^lun|id^4', 'project_id': 'project', 'display_name': 'vol_01', 'display_description': 'test volume', 'volume_type_id': None} self.assertEqual(4, self.driver.cli.get_lun_id(volume_01)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 2})) def test_get_lun_id_provider_location_has_no_lun_id(self): """Test function get_lun_id.""" self.driverSetup() volume_02 = { 'name': 'vol_02', 'size': 1, 'volume_name': 'vol_02', 'id': '2', 'provider_location': 'system^FNM11111|type^lun|', 'project_id': 'project', 'display_name': 'vol_02', 'display_description': 'test volume', 'volume_type_id': None} self.assertEqual(2, self.driver.cli.get_lun_id(volume_02)) def test_create_consistency_group(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name), self.testData.GET_CG_BY_NAME_CMD(cg_name)] results = [SUCCEED, self.testData.CG_PROPERTY(cg_name)] fake_cli = self.driverSetup(commands, results) model_update = self.driver.create_consistencygroup( None, self.testData.test_cg) self.assertDictMatch({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) expect_cmd = [ mock.call( *self.testData.CREATE_CONSISTENCYGROUP_CMD( cg_name), poll=False), mock.call( *self.testData.GET_CG_BY_NAME_CMD(cg_name))] fake_cli.assert_has_calls(expect_cmd) def test_create_consistency_group_retry(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name), self.testData.GET_CG_BY_NAME_CMD(cg_name)] results = [SUCCEED, [self.testData.CG_NOT_FOUND(), self.testData.CG_PROPERTY(cg_name)]] fake_cli = self.driverSetup(commands, results) model_update = self.driver.create_consistencygroup( None, self.testData.test_cg) self.assertDictMatch({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) expect_cmd = [ mock.call( *self.testData.CREATE_CONSISTENCYGROUP_CMD( cg_name), poll=False), mock.call( *self.testData.GET_CG_BY_NAME_CMD(cg_name)), mock.call( *self.testData.GET_CG_BY_NAME_CMD(cg_name))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.volume_types.get_volume_type_extra_specs", mock.Mock(side_effect=[{'storagetype:provisioning': 'thin'}, {'storagetype:provisioning': 'compressed'}])) def test_create_consistency_group_failed_with_compression(self): self.driverSetup([], []) self.assertRaisesRegex(exception.VolumeBackendAPIException, "Failed to create consistency group " "consistencygroup_id " "because VNX consistency group cannot " "accept compressed LUNs as members.", self.driver.create_consistencygroup, None, self.testData.test_cg_with_type) def test_delete_consistency_group(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name), self.testData.LUN_DELETE_CMD('volume-1')] results = [SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.delete_consistencygroup( None, self.testData.test_cg, self.testData.CONSISTENCY_GROUP_VOLUMES()) expect_cmd = [ mock.call( *self.testData.DELETE_CONSISTENCYGROUP_CMD( cg_name)), mock.call(*self.testData.LUN_DELETE_CMD('volume-1')), mock.call(*self.testData.LUN_DELETE_CMD('volume-1'))] fake_cli.assert_has_calls(expect_cmd) def test_create_cgsnapshot(self): cgsnapshot = self.testData.test_cgsnapshot['id'] cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot), self.testData.GET_SNAP(cgsnapshot)] results = [SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) snapshot_obj = fake_snapshot.fake_snapshot_obj( self.testData.SNAPS_IN_SNAP_GROUP()) snapshot_obj.consistencygroup_id = cg_name self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot, [snapshot_obj]) expect_cmd = [ mock.call( *self.testData.CREATE_CG_SNAPSHOT( cg_name, cgsnapshot)), mock.call( *self.testData.GET_SNAP(cgsnapshot))] fake_cli.assert_has_calls(expect_cmd) def test_create_cgsnapshot_retry(self): cgsnapshot = self.testData.test_cgsnapshot['id'] cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot), self.testData.GET_SNAP(cgsnapshot)] results = [SUCCEED, [self.testData.SNAP_NOT_EXIST(), SUCCEED]] fake_cli = self.driverSetup(commands, results) snapshot_obj = fake_snapshot.fake_snapshot_obj( self.testData.SNAPS_IN_SNAP_GROUP()) snapshot_obj.consistencygroup_id = cg_name self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot, [snapshot_obj]) expect_cmd = [ mock.call( *self.testData.CREATE_CG_SNAPSHOT( cg_name, cgsnapshot)), mock.call( *self.testData.GET_SNAP(cgsnapshot)), mock.call( *self.testData.GET_SNAP(cgsnapshot))] fake_cli.assert_has_calls(expect_cmd) def test_delete_cgsnapshot(self): snap_name = self.testData.test_cgsnapshot['id'] commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)] results = [SUCCEED] fake_cli = self.driverSetup(commands, results) snapshot_obj = fake_snapshot.fake_snapshot_obj( self.testData.SNAPS_IN_SNAP_GROUP()) cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] snapshot_obj.consistencygroup_id = cg_name self.driver.delete_cgsnapshot(None, self.testData.test_cgsnapshot, [snapshot_obj]) expect_cmd = [ mock.call( *self.testData.DELETE_CG_SNAPSHOT( snap_name))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_add_volume_to_cg(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1), ] results = [self.testData.LUN_PROPERTY('volume-1', True), SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.create_volume(self.testData.test_volume_cg) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', None, None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False), mock.call(*self.testData.ADD_LUN_TO_CG_CMD( 'cg_id', 1), poll=False)] fake_cli.assert_has_calls(expect_cmd) def test_create_cloned_volume_from_consistency_group(self): cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-1')) cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-1')) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name('volume-1')) cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] commands = [cmd_dest, cmd_dest_p, cmd_migrate, cmd_migrate_verify] results = [output_dest, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) test_volume_clone_cg = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_clone_cg) self.driver.create_cloned_volume(test_volume_clone_cg, self.testData.test_clone_cg) tmp_cgsnapshot = 'tmp-snap-' + self.testData.test_volume['id'] expect_cmd = [ mock.call( *self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)), mock.call( *self.testData.GET_SNAP(tmp_cgsnapshot)), mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='volume-1', source='volume-2'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name='volume-1', snapName=tmp_cgsnapshot)), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name('volume-1'), 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-1')), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-1')), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True), mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_from_cgsnapshot(self): cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('vol2')) cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('vol2')) output_dest = self.testData.LUN_PROPERTY( build_migration_dest_name('vol2')) cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_dest, cmd_dest_np, cmd_migrate, cmd_migrate_verify] results = [output_dest, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_member_cgsnapshot) self.driver.create_volume_from_snapshot( self.testData.volume_in_cg, test_snapshot) expect_cmd = [ mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='volume-2', source='volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-2'), poll=True), mock.call( *self.testData.SNAP_ATTACH_CMD( name='volume-2', snapName='cgsnapshot_id')), mock.call(*self.testData.LUN_CREATION_CMD( build_migration_dest_name('volume-2'), 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( build_migration_dest_name('volume-2')), poll=False), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True, poll=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True)] fake_cli.assert_has_calls(expect_cmd) def test_update_consistencygroup(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)] results = [self.testData.CG_PROPERTY(cg_name)] fake_cli = self.driverSetup(commands, results) (model_update, add_vols, remove_vols) = ( self.driver.update_consistencygroup(None, self.testData.test_cg, self.testData. VOLUMES_NOT_IN_CG(), self.testData.VOLUMES_IN_CG())) expect_cmd = [ mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD( cg_name, ['4', '5']), poll=False)] fake_cli.assert_has_calls(expect_cmd) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status']) def test_update_consistencygroup_remove_all(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)] results = [self.testData.CG_PROPERTY(cg_name)] fake_cli = self.driverSetup(commands, results) (model_update, add_vols, remove_vols) = ( self.driver.update_consistencygroup(None, self.testData.test_cg, None, self.testData.VOLUMES_IN_CG())) expect_cmd = [ mock.call(*self.testData.REMOVE_LUNS_FROM_CG_CMD( cg_name, ['1', '3']), poll=False)] fake_cli.assert_has_calls(expect_cmd) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status']) def test_update_consistencygroup_remove_not_in_cg(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)] results = [self.testData.CG_PROPERTY(cg_name)] fake_cli = self.driverSetup(commands, results) (model_update, add_vols, remove_vols) = ( self.driver.update_consistencygroup(None, self.testData.test_cg, None, self.testData. VOLUMES_NOT_IN_CG())) expect_cmd = [ mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD( cg_name, ['1', '3']), poll=False)] fake_cli.assert_has_calls(expect_cmd) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status']) def test_update_consistencygroup_error(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name), self.testData.REPLACE_LUNS_IN_CG_CMD( cg_name, ['1', '3'])] results = [self.testData.CG_PROPERTY(cg_name), self.testData.CG_REPL_ERROR()] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.EMCVnxCLICmdError, self.driver.update_consistencygroup, None, self.testData.test_cg, [], self.testData.VOLUMES_NOT_IN_CG()) expect_cmd = [ mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD( cg_name, ['1', '3']), poll=False)] fake_cli.assert_has_calls(expect_cmd) def test_create_consistencygroup_from_cgsnapshot(self): output_migrate_verify = ('The specified source LUN ' 'is not currently migrating.', 23) new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) new_cg.id = 'new_cg_id' vol1_in_new_cg = self.testData.test_volume_cg.copy() vol1_in_new_cg.update( {'name': 'volume-1_in_cg', 'id': '111111', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) vol2_in_new_cg = self.testData.test_volume_cg.copy() vol2_in_new_cg.update( {'name': 'volume-2_in_cg', 'id': '222222', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) src_cgsnap = self.testData.test_cgsnapshot snap1_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_member_cgsnapshot) snap2_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_member_cgsnapshot2) copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id'] td = self.testData commands = [td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name), td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name), td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'], self.testData.test_volume['name']), td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'], copied_snap_name), td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest', vol1_in_new_cg['size'], 'unit_test_pool', 'thin', None), td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']), td.MIGRATION_CMD(6231, 1), td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'], self.testData.test_volume2['name']), td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'], copied_snap_name), td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest', vol2_in_new_cg['size'], 'unit_test_pool', 'thin', None), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']), td.MIGRATION_CMD(6232, 2), td.MIGRATION_VERIFY_CMD(6231), td.MIGRATION_VERIFY_CMD(6232), td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]), td.DELETE_CG_SNAPSHOT(copied_snap_name) ] results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED, SUCCEED, td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest', lunid=1), td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231), SUCCEED, SUCCEED, SUCCEED, SUCCEED, td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest', lunid=2), td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232), SUCCEED, output_migrate_verify, output_migrate_verify, SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) cg_model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( None, new_cg, [vol1_in_new_cg, vol2_in_new_cg], cgsnapshot=src_cgsnap, snapshots=[snap1_in_src_cgsnap, snap2_in_src_cgsnap], source_cg=None, source_vols=None)) self.assertEqual(2, len(volumes_model_update)) self.assertTrue('id^%s' % 6231 in volumes_model_update[0]['provider_location']) self.assertTrue('id^%s' % 6232 in volumes_model_update[1]['provider_location']) expect_cmd = [ mock.call(*td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name)), mock.call(*td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name)), mock.call(*td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'], self.testData.test_volume['name']), poll=False), mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']), poll=True), mock.call(*td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'], copied_snap_name)), mock.call(*td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest', vol1_in_new_cg['size'], 'unit_test_pool', 'thick', None)), mock.call(*td.LUN_PROPERTY_ALL_CMD( vol1_in_new_cg['name'] + '_dest'), poll=False), mock.call(*td.LUN_PROPERTY_ALL_CMD( vol1_in_new_cg['name'] + '_dest'), poll=False), mock.call(*td.MIGRATION_CMD(6231, 1), poll=True, retry_disable=True), mock.call(*td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'], self.testData.test_volume2['name']), poll=False), mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']), poll=True), mock.call(*td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'], copied_snap_name)), mock.call(*td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest', vol2_in_new_cg['size'], 'unit_test_pool', 'thick', None)), mock.call(*td.LUN_PROPERTY_ALL_CMD( vol2_in_new_cg['name'] + '_dest'), poll=False), mock.call(*td.LUN_PROPERTY_ALL_CMD( vol2_in_new_cg['name'] + '_dest'), poll=False), mock.call(*td.MIGRATION_CMD(6232, 2), poll=True, retry_disable=True), mock.call(*td.MIGRATION_VERIFY_CMD(6231), poll=True), mock.call(*td.MIGRATION_VERIFY_CMD(6232), poll=True), mock.call(*td.CREATE_CONSISTENCYGROUP_CMD( new_cg['id'], [6231, 6232]), poll=True), mock.call(*td.GET_CG_BY_NAME_CMD(new_cg.id)), mock.call(*td.DELETE_CG_SNAPSHOT(copied_snap_name))] self.assertEqual(expect_cmd, fake_cli.call_args_list) def test_create_cg_from_src_failed_without_source(self): new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) vol1_in_new_cg = self.testData.test_volume_cg self.driverSetup() self.assertRaises( exception.InvalidInput, self.driver.create_consistencygroup_from_src, new_cg, [vol1_in_new_cg], None, None, None, None) def test_create_cg_from_src_failed_with_multiple_sources(self): new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) vol1_in_new_cg = self.testData.test_volume_cg src_cgsnap = self.testData.test_cgsnapshot snap1_in_src_cgsnap = fake_snapshot.fake_snapshot_obj( None, **self.testData.test_member_cgsnapshot) src_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) src_cg.id = 'fake_source_cg' vol1_in_src_cg = {'id': 'fake_volume', 'consistencygroup_id': src_cg.id} self.driverSetup() self.assertRaises( exception.InvalidInput, self.driver.create_consistencygroup_from_src, new_cg, [vol1_in_new_cg], src_cgsnap, [snap1_in_src_cgsnap], src_cg, [vol1_in_src_cg]) def test_create_cg_from_src_failed_with_invalid_source(self): new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) src_cgsnap = self.testData.test_cgsnapshot vol1_in_new_cg = self.testData.test_volume_cg src_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) src_cg.id = 'fake_source_cg' self.driverSetup() self.assertRaises( exception.InvalidInput, self.driver.create_consistencygroup_from_src, new_cg, [vol1_in_new_cg], src_cgsnap, None, src_cg, None) def test_create_cg_from_cgsnapshot_migrate_failed(self): new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) new_cg.id = 'new_cg_id' vol1_in_new_cg = self.testData.test_volume_cg.copy() vol1_in_new_cg.update( {'name': 'volume-1_in_cg', 'id': '111111', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) vol2_in_new_cg = self.testData.test_volume_cg.copy() vol2_in_new_cg.update( {'name': 'volume-2_in_cg', 'id': '222222', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) src_cgsnap = self.testData.test_cgsnapshot snap1_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_member_cgsnapshot) snap2_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot( self.testData.test_member_cgsnapshot2) copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id'] td = self.testData commands = [td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']), td.MIGRATION_CMD(6232, 2)] results = [td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest', lunid=1), td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231), td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest', lunid=2), td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232), FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaisesRegex(exception.VolumeBackendAPIException, 'Migrate volume failed', self.driver.create_consistencygroup_from_src, None, new_cg, [vol1_in_new_cg, vol2_in_new_cg], cgsnapshot=src_cgsnap, snapshots=[snap1_in_src_cgsnap, snap2_in_src_cgsnap], source_cg=None, source_vols=None) expect_cmd = [ mock.call(*self.testData.LUN_DELETE_CMD( vol2_in_new_cg['name'] + '_dest')), mock.call('lun', '-detach', '-name', vol2_in_new_cg['name'], '-o'), mock.call(*self.testData.LUN_DELETE_CMD(vol2_in_new_cg['name'])), mock.call(*self.testData.LUN_DELETE_CMD( vol1_in_new_cg['name'] + '_dest')), mock.call('lun', '-detach', '-name', vol1_in_new_cg['name'], '-o'), mock.call(*self.testData.LUN_DELETE_CMD(vol1_in_new_cg['name'])), mock.call(*td.SNAP_DELETE_CMD(copied_snap_name), poll=True)] fake_cli.assert_has_calls(expect_cmd) def test_create_consistencygroup_from_cg(self): output_migrate_verify = ('The specified source LUN ' 'is not currently migrating.', 23) new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) new_cg.id = 'new_cg_id' vol1_in_new_cg = self.testData.test_volume_cg.copy() vol1_in_new_cg.update( {'name': 'volume-1_in_cg', 'id': '111111', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) vol2_in_new_cg = self.testData.test_volume_cg.copy() vol2_in_new_cg.update( {'name': 'volume-2_in_cg', 'id': '222222', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) src_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) src_cg.id = 'src_cg_id' vol1_in_src_cg = self.testData.test_volume_cg.copy() vol1_in_src_cg.update( {'name': 'volume-1_in_src_cg', 'id': '111110000', 'consistencygroup_id': 'src_cg_id', 'provider_location': build_provider_location( 1, 'lun', 'volume-1_in_src_cg')}) vol2_in_src_cg = self.testData.test_volume_cg.copy() vol2_in_src_cg.update( {'name': 'volume-2_in_src_cg', 'id': '222220000', 'consistencygroup_id': 'src_cg_id', 'provider_location': build_provider_location( 2, 'lun', 'volume-2_in_src_cg')}) temp_snap_name = 'temp_snapshot_for_%s' % new_cg['id'] td = self.testData commands = [td.CREATE_CG_SNAPSHOT(src_cg['id'], temp_snap_name), td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'], vol1_in_src_cg['name']), td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'], temp_snap_name), td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest', vol1_in_new_cg['size'], 'unit_test_pool', 'thin', None), td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']), td.MIGRATION_CMD(6231, 1), td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'], vol2_in_src_cg['name']), td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'], temp_snap_name), td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest', vol2_in_new_cg['size'], 'unit_test_pool', 'thin', None), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'), td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']), td.MIGRATION_CMD(6232, 2), td.MIGRATION_VERIFY_CMD(6231), td.MIGRATION_VERIFY_CMD(6232), td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]), td.DELETE_CG_SNAPSHOT(temp_snap_name) ] results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED, td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest', lunid=1), td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231), SUCCEED, SUCCEED, SUCCEED, SUCCEED, td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest', lunid=2), td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232), SUCCEED, output_migrate_verify, output_migrate_verify, SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) cg_model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( None, new_cg, [vol1_in_new_cg, vol2_in_new_cg], cgsnapshot=None, snapshots=None, source_cg=src_cg, source_vols=[vol1_in_src_cg, vol2_in_src_cg])) self.assertEqual(2, len(volumes_model_update)) self.assertTrue('id^%s' % 6231 in volumes_model_update[0]['provider_location']) self.assertTrue('id^%s' % 6232 in volumes_model_update[1]['provider_location']) delete_temp_snap_cmd = [ mock.call(*td.DELETE_CG_SNAPSHOT(temp_snap_name))] fake_cli.assert_has_calls(delete_temp_snap_cmd) @mock.patch.object(emc_vnx_cli, 'LOG') @mock.patch.object(emc_vnx_cli.CommandLineHelper, 'delete_cgsnapshot') def test_delete_temp_cgsnapshot_failed_will_not_raise_exception( self, mock_delete_cgsnapshot, mock_logger): temp_snap_name = 'fake_temp' self.driverSetup() mock_delete_cgsnapshot.side_effect = exception.EMCVnxCLICmdError( cmd='fake_cmd', rc=200, out='fake_output') self.driver.cli._delete_temp_cgsnap(temp_snap_name) mock_delete_cgsnapshot.assert_called_once_with(temp_snap_name) self.assertTrue(mock_logger.warning.called) @mock.patch.object(emc_vnx_cli.CreateSMPTask, 'execute', mock.Mock(side_effect=exception.EMCVnxCLICmdError( cmd='fake_cmd', rc=20, out='fake_output'))) @mock.patch.object(emc_vnx_cli.CreateSMPTask, 'revert', mock.Mock()) def test_create_consistencygroup_from_cg_roll_back(self): new_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) new_cg.id = 'new_cg_id' vol1_in_new_cg = self.testData.test_volume_cg.copy() vol1_in_new_cg.update( {'name': 'volume-1_in_cg', 'id': '111111', 'consistencygroup_id': 'new_cg_id', 'provider_location': None}) src_cg = fake_consistencygroup.fake_consistencyobject_obj( None, **self.testData.test_cg) src_cg.id = 'src_cg_id' vol1_in_src_cg = self.testData.test_volume_cg.copy() vol1_in_src_cg.update( {'name': 'volume-1_in_src_cg', 'id': '111110000', 'consistencygroup_id': 'src_cg_id', 'provider_location': build_provider_location( 1, 'lun', 'volume-1_in_src_cg')}) temp_snap_name = 'temp_snapshot_for_%s' % new_cg['id'] td = self.testData commands = [td.CREATE_CG_SNAPSHOT(src_cg['id'], temp_snap_name), td.DELETE_CG_SNAPSHOT(temp_snap_name)] results = [SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.assertRaises( exception.EMCVnxCLICmdError, self.driver.create_consistencygroup_from_src, None, new_cg, [vol1_in_new_cg], cgsnapshot=None, snapshots=None, source_cg=src_cg, source_vols=[vol1_in_src_cg]) rollback_cmd = [ mock.call(*td.DELETE_CG_SNAPSHOT(temp_snap_name))] fake_cli.assert_has_calls(rollback_cmd) def test_deregister_initiator(self): fake_cli = self.driverSetup() self.driver.cli.destroy_empty_sg = True self.driver.cli.itor_auto_dereg = True cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() cli_helper.disconnect_host_from_storage_group = mock.Mock() cli_helper.delete_storage_group = mock.Mock() self.driver.terminate_connection(self.testData.test_volume, self.testData.connector) expect_cmd = [ mock.call('port', '-removeHBA', '-hbauid', self.testData.connector['initiator'], '-o')] fake_cli.assert_has_calls(expect_cmd) def test_unmanage(self): self.driverSetup() try: self.driver.unmanage(self.testData.test_volume) except NotImplementedError: self.fail('Interface unmanage need to be implemented') @mock.patch("random.shuffle", mock.Mock()) def test_find_available_iscsi_targets_without_pingnode(self): self.configuration.iscsi_initiators = None self.driverSetup() port_a1 = {'Port WWN': 'fake_iqn_a1', 'SP': 'A', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_a1'} port_a2 = {'Port WWN': 'fake_iqn_a2', 'SP': 'A', 'Port ID': 2, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_a2'} port_b1 = {'Port WWN': 'fake_iqn_b1', 'SP': 'B', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_b1'} all_targets = {'A': [port_a1, port_a2], 'B': [port_b1]} targets = self.driver.cli._client.find_available_iscsi_targets( 'fakehost', {('A', 2, 0), ('B', 1, 0)}, all_targets) self.assertTrue(port_a2 in targets) self.assertTrue(port_b1 in targets) @mock.patch.object(emc_vnx_cli.CommandLineHelper, 'ping_node') def test_find_available_iscsi_targets_with_pingnode(self, ping_node): self.configuration.iscsi_initiators = ( '{"fakehost": ["10.0.0.2"]}') self.driverSetup() port_a1 = {'Port WWN': 'fake_iqn_a1', 'SP': 'A', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_a1'} port_a2 = {'Port WWN': 'fake_iqn_a2', 'SP': 'A', 'Port ID': 2, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_a2'} port_b1 = {'Port WWN': 'fake_iqn_b1', 'SP': 'B', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_b1'} all_targets = {'A': [port_a1, port_a2], 'B': [port_b1]} ping_node.side_effect = [False, False, True] targets = self.driver.cli._client.find_available_iscsi_targets( 'fakehost', {('A', 2, 0), ('A', 1, 0), ('B', 1, 0)}, all_targets) self.assertTrue(port_a1 in targets) self.assertTrue(port_a2 in targets) self.assertTrue(port_b1 in targets) @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.' 'EMCVnxCliBase.get_lun_owner', mock.Mock(return_value='A')) @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.' 'CommandLineHelper.get_registered_spport_set', mock.Mock()) @mock.patch.object(emc_vnx_cli.CommandLineHelper, 'find_available_iscsi_targets') def test_vnx_get_iscsi_properties(self, find_available_iscsi_targets): self.driverSetup() port_a1 = {'Port WWN': 'fake_iqn_a1', 'SP': 'A', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_a1'} port_b1 = {'Port WWN': 'fake_iqn_b1', 'SP': 'B', 'Port ID': 1, 'Virtual Port ID': 0, 'IP Address': 'fake_ip_b1'} find_available_iscsi_targets.return_value = [port_a1, port_b1] connect_info = self.driver.cli.vnx_get_iscsi_properties( self.testData.test_volume, self.testData.connector, 1, '') expected_info = { 'target_discovered': True, 'target_iqns': [ 'fake_iqn_a1', 'fake_iqn_b1'], 'target_iqn': 'fake_iqn_a1', 'target_luns': [1, 1], 'target_lun': 1, 'target_portals': [ 'fake_ip_a1:3260', 'fake_ip_b1:3260'], 'target_portal': 'fake_ip_a1:3260', 'volume_id': '1'} self.assertEqual(expected_info, connect_info) def test_update_migrated_volume(self): self.driverSetup() expected_update = {'provider_location': self.testData.test_volume2['provider_location'], 'metadata': {'snapcopy': 'False'}} model_update = self.driver.update_migrated_volume( None, self.testData.test_volume, self.testData.test_volume2, 'available') self.assertDictMatch(expected_update, model_update) class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase): def setUp(self): super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp() self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': None, 'volume_backend_name': 'namedbackend'}) def generate_driver(self, conf): driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf) return driver def test_get_volume_stats(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True)] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True)] self.driverSetup(commands, results) stats = self.driver.get_volume_stats(True) self.assertTrue(stats['driver_version'] == VERSION, "driver_version is incorrect") self.assertTrue( stats['storage_protocol'] == 'iSCSI', "storage_protocol is not correct") self.assertTrue( stats['vendor_name'] == "EMC", "vendor name is not correct") self.assertTrue( stats['volume_backend_name'] == "namedbackend", "volume backend name is not correct") self.assertEqual(2, len(stats['pools'])) pool_stats1 = stats['pools'][0] expected_pool_stats1 = { 'free_capacity_gb': 3105.303, 'reserved_percentage': 32, 'location_info': 'unit_test_pool|fake_serial', 'total_capacity_gb': 3281.146, 'provisioned_capacity_gb': 536.140, 'compression_support': 'True', 'deduplication_support': 'True', 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'consistencygroup_support': 'True', 'replication_enabled': False, 'replication_targets': [], 'pool_name': 'unit_test_pool', 'max_over_subscription_ratio': 20.0, 'fast_cache_enabled': True, 'fast_support': 'True'} self.assertEqual(expected_pool_stats1, pool_stats1) pool_stats2 = stats['pools'][1] expected_pool_stats2 = { 'free_capacity_gb': 3984.768, 'reserved_percentage': 32, 'location_info': 'unit_test_pool2|fake_serial', 'total_capacity_gb': 4099.992, 'provisioned_capacity_gb': 636.240, 'compression_support': 'True', 'deduplication_support': 'True', 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'consistencygroup_support': 'True', 'replication_enabled': False, 'replication_targets': [], 'pool_name': 'unit_test_pool2', 'max_over_subscription_ratio': 20.0, 'fast_cache_enabled': False, 'fast_support': 'True'} self.assertEqual(expected_pool_stats2, pool_stats2) def test_get_volume_stats_wo_fastcache(self): commands = (self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(False)) results = (self.testData.NDU_LIST_RESULT_WO_LICENSE, self.testData.POOL_GET_ALL_RESULT(False)) self.driverSetup(commands, results) stats = self.driver.get_volume_stats(True) self.assertEqual(2, len(stats['pools'])) pool_stats1 = stats['pools'][0] expected_pool_stats1 = { 'free_capacity_gb': 3105.303, 'reserved_percentage': 32, 'location_info': 'unit_test_pool|fake_serial', 'total_capacity_gb': 3281.146, 'provisioned_capacity_gb': 536.140, 'compression_support': 'False', 'deduplication_support': 'False', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'consistencygroup_support': 'False', 'pool_name': 'unit_test_pool', 'replication_enabled': False, 'replication_targets': [], 'max_over_subscription_ratio': 20.0, 'fast_cache_enabled': 'False', 'fast_support': 'False'} self.assertEqual(expected_pool_stats1, pool_stats1) pool_stats2 = stats['pools'][1] expected_pool_stats2 = { 'free_capacity_gb': 3984.768, 'reserved_percentage': 32, 'location_info': 'unit_test_pool2|fake_serial', 'total_capacity_gb': 4099.992, 'provisioned_capacity_gb': 636.240, 'compression_support': 'False', 'deduplication_support': 'False', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'consistencygroup_support': 'False', 'replication_enabled': False, 'replication_targets': [], 'pool_name': 'unit_test_pool2', 'max_over_subscription_ratio': 20.0, 'fast_cache_enabled': 'False', 'fast_support': 'False'} self.assertEqual(expected_pool_stats2, pool_stats2) def test_get_volume_stats_storagepool_states(self): commands = (self.testData.POOL_GET_ALL_CMD(False),) results = (self.testData.POOL_GET_ALL_STATES_TEST (['Initializing', 'Ready', 'Faulted', 'Offline', 'Deleting']),) self.driverSetup(commands, results) stats = self.driver.get_volume_stats(True) self.assertTrue( stats['pools'][0]['free_capacity_gb'] == 0, "free_capacity_gb is incorrect") self.assertTrue( stats['pools'][1]['free_capacity_gb'] != 0, "free_capacity_gb is incorrect") self.assertTrue( stats['pools'][2]['free_capacity_gb'] != 0, "free_capacity_gb is incorrect") self.assertTrue( stats['pools'][3]['free_capacity_gb'] == 0, "free_capacity_gb is incorrect") self.assertTrue( stats['pools'][4]['free_capacity_gb'] == 0, "free_capacity_gb is incorrect") @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'})) def test_create_volume_deduplicated(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1')] results = [self.testData.LUN_PROPERTY('volume-1', True)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # Case self.driver.create_volume(self.testData.test_volume_with_type) # Verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'deduplicated', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), poll=False)] fake_cli.assert_has_calls(expect_cmd) def test_get_pool(self): testVolume = self.testData.test_volume_with_type commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])] results = [self.testData.LUN_PROPERTY(testVolume['name'], False)] fake_cli = self.driverSetup(commands, results) pool = self.driver.get_pool(testVolume) self.assertEqual('unit_test_pool', pool) fake_cli.assert_has_calls( [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD( testVolume['name']), poll=False)]) def test_get_target_pool_for_cloned_volme(self): testSrcVolume = self.testData.test_volume testNewVolume = self.testData.test_volume2 fake_cli = self.driverSetup() pool = self.driver.cli.get_target_storagepool(testNewVolume, testSrcVolume) self.assertEqual('unit_test_pool', pool) self.assertFalse(fake_cli.called) def test_get_target_pool_for_clone_legacy_volme(self): testSrcVolume = self.testData.test_legacy_volume testNewVolume = self.testData.test_volume2 commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])] results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)] fake_cli = self.driverSetup(commands, results) pool = self.driver.cli.get_target_storagepool(testNewVolume, testSrcVolume) self.assertEqual('unit_test_pool', pool) fake_cli.assert_has_calls( [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD( testSrcVolume['name']), poll=False)]) def test_manage_existing_get_size(self): get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') test_size = 2 commands = [get_lun_cmd] results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)] fake_cli = self.driverSetup(commands, results) test_volume = self.testData.test_volume2.copy() test_volume['host'] = "host@backendsec#unit_test_pool" get_size = self.driver.manage_existing_get_size( test_volume, self.testData.test_existing_ref) expected = [mock.call(*get_lun_cmd, poll=True)] self.assertEqual(test_size, get_size) fake_cli.assert_has_calls(expected) def test_manage_existing_get_size_incorrect_pool(self): """Test manage_existing function of driver with an invalid pool.""" get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') commands = [get_lun_cmd] results = [self.testData.LUN_PROPERTY('lun_name')] fake_cli = self.driverSetup(commands, results) test_volume = self.testData.test_volume2.copy() test_volume['host'] = "host@backendsec#fake_pool" ex = self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, test_volume, self.testData.test_existing_ref) self.assertTrue( re.match(r'.*not managed by the host', ex.msg)) expected = [mock.call(*get_lun_cmd, poll=True)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={})) def test_manage_existing(self): data = self.testData test_volume = data.test_volume_with_type lun_rename_cmd = data.LUN_RENAME_CMD( test_volume['id'], test_volume['name']) lun_list_cmd = data.LUN_LIST_ALL_CMD(test_volume['id']) commands = lun_rename_cmd, lun_list_cmd results = SUCCEED, (data.LIST_LUN_1_SPECS, 0) fake_cli = self.driverSetup(commands, results) self.driver.manage_existing( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*lun_rename_cmd, poll=False)] fake_cli.assert_has_calls(expected) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'storagetype:provisioning': 'Compressed', 'storagetype:pool': 'unit_test_pool'})) def test_create_compression_volume(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.LUN_PROPERTY_ALL_CMD('volume-1'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('volume-1', True), self.testData.LUN_PROPERTY('volume-1', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.stats['compression_support'] = 'True' self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] # Case self.driver.create_volume(self.testData.test_volume_with_type) # Verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'volume-1', 1, 'unit_test_pool', 'compressed', None, poll=False)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=False), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'volume-1'), poll=True), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) def test_get_registered_spport_set(self): self.driverSetup() spport_set = self.driver.cli._client.get_registered_spport_set( 'iqn.1993-08.org.debian:01:222', 'fakehost', self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')[0]) self.assertEqual({('A', 2, 0), ('A', 0, 0), ('B', 2, 0)}, spport_set) def test_validate_iscsi_port(self): self.driverSetup() port_list = ( "SP: A\n" "Port ID: 6\n" "Port WWN: iqn.fake.a6\n" "iSCSI Alias: 1111.a6\n" "\n" "Virtual Port ID: 0\n" "VLAN ID: Disabled\n" "\n" "SP: B\n" "Port ID: 7\n" "Port WWN: iqn.fake.b7\n" "iSCSI Alias: 0235.b7" "\n" "Virtual Port ID: 0\n" "VLAN ID: Disabled\n" "\n" "Virtual Port ID: 1\n" "VLAN ID: 200\n" "\n\n") self.assertFalse(self.driver.cli._validate_iscsi_port( 'A', 5, 0, port_list)) self.assertTrue(self.driver.cli._validate_iscsi_port( 'A', 6, 0, port_list)) self.assertFalse(self.driver.cli._validate_iscsi_port( 'A', 6, 2, port_list)) self.assertTrue(self.driver.cli._validate_iscsi_port( 'B', 7, 1, port_list)) self.assertTrue(self.driver.cli._validate_iscsi_port( 'B', 7, 0, port_list)) self.assertFalse(self.driver.cli._validate_iscsi_port( 'B', 7, 2, port_list)) class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase): def generate_driver(self, conf): return emc_cli_fc.EMCCLIFCDriver(configuration=conf) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_fc_auto_reg(self): # Test for auto registration test_volume = self.testData.test_volume.copy() test_volume['provider_location'] = 'system^fakesn|type^lun|id^1' self.configuration.initiator_auto_registration = True commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) self.driver.initialize_connection( test_volume, self.testData.connector) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('port', '-list', '-sp'), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90' ':12:34:56', 'A', '0', None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90' ':12:34:56', 'B', '2', None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90' ':54:32:16', 'A', '0', None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90' ':54:32:16', 'B', '2', None, '10.0.0.2')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call('port', '-list', '-gname', 'fakehost') ] fake_cli.assert_has_calls(expected) # Test for manaul registration self.configuration.initiator_auto_registration = False commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost')], ('', 0), self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) self.driver.initialize_connection( test_volume, self.testData.connector) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call('port', '-list', '-gname', 'fakehost') ] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_fc_auto_zoning(self): # Test for auto zoning self.configuration.zoning_mode = 'fabric' self.configuration.initiator_auto_registration = False commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.GETFCPORT_CMD()] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], ('', 0), self.testData.FC_PORTS] fake_cli = self.driverSetup(commands, results) self.driver.cli.zonemanager_lookup_service = ( fc_service.FCSanLookupService(configuration=self.configuration)) conn_info = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertEqual(EMCVNXCLIDriverTestData.i_t_map, conn_info['data']['initiator_target_map']) self.assertEqual(['1122334455667777'], conn_info['data']['target_wwn']) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call('storagegroup', '-list', '-gname', 'fakehost', poll=True), mock.call('port', '-list', '-sp')] fake_cli.assert_has_calls(expected) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_fc_white_list(self): self.configuration.io_port_list = 'a-0,B-2' test_volume = self.testData.test_volume.copy() test_volume['provider_location'] = 'system^fakesn|type^lun|id^1' self.configuration.initiator_auto_registration = True commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')], self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) data = self.driver.initialize_connection( test_volume, self.testData.connector) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:' '90:12:34:56', 'A', 0, None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:' '90:12:34:56', 'B', 2, None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78' ':90:54:32:16', 'A', 0, None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78' ':90:54:32:16', 'B', 2, None, '10.0.0.2')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call('port', '-list', '-gname', 'fakehost')] fake_cli.assert_has_calls(expected) self.assertEqual(set(['5006016A0860080F', '5006016008600195']), set(data['data']['target_wwn'])) @mock.patch('random.randint', mock.Mock(return_value=0)) def test_initialize_connection_fc_port_registered_wl(self): self.configuration.io_port_list = 'a-0,B-2' test_volume = self.testData.test_volume.copy() test_volume['provider_location'] = 'system^fakesn|type^lun|id^1' self.configuration.initiator_auto_registration = True commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [self.testData.STORAGE_GROUP_ISCSI_FC_HBA('fakehost'), self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) data = self.driver.initialize_connection( test_volume, self.testData.connector) expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=False), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90' ':12:34:56', 'A', 0, None, '10.0.0.2')), mock.call(*self.testData.set_path_cmd( 'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:' '90:54:32:16', 'A', 0, None, '10.0.0.2')), mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'), poll=True), mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1, '-gname', 'fakehost', '-o', poll=False), mock.call('port', '-list', '-gname', 'fakehost')] fake_cli.assert_has_calls(expected) self.assertEqual(set(['5006016A0860080F', '5006016008600195']), set(data['data']['target_wwn'])) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) def test_terminate_connection_remove_zone_false(self): self.driver = emc_cli_fc.EMCCLIFCDriver( configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16, 2: 88, 3: 47}} cli_helper.get_storage_group = mock.Mock( return_value=data) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.cli.zonemanager_lookup_service = ( fc_service.FCSanLookupService(configuration=self.configuration)) connection_info = self.driver.terminate_connection( self.testData.test_volume, self.testData.connector) self.assertFalse(connection_info['data'], 'connection_info data should not be None.') cli_helper.remove_hlu_from_storagegroup.assert_called_once_with( 16, self.testData.connector["host"]) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) def test_terminate_connection_remove_zone_true(self): self.driver = emc_cli_fc.EMCCLIFCDriver( configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {}} cli_helper.get_storage_group = mock.Mock( return_value=data) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.cli.zonemanager_lookup_service = ( fc_service.FCSanLookupService(configuration=self.configuration)) connection_info = self.driver.terminate_connection( self.testData.test_volume, self.testData.connector) self.assertTrue('initiator_target_map' in connection_info['data'], 'initiator_target_map should be populated.') self.assertEqual(EMCVNXCLIDriverTestData.i_t_map, connection_info['data']['initiator_target_map']) def test_get_volume_stats(self): commands = [self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True)] results = [self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True)] self.driverSetup(commands, results) stats = self.driver.get_volume_stats(True) self.assertTrue(stats['driver_version'] == VERSION, "driver_version is incorrect") self.assertTrue( stats['storage_protocol'] == 'FC', "storage_protocol is incorrect") self.assertTrue( stats['vendor_name'] == "EMC", "vendor name is incorrect") self.assertTrue( stats['volume_backend_name'] == "namedbackend", "volume backend name is incorrect") pool_stats = stats['pools'][0] expected_pool_stats = { 'free_capacity_gb': 3105.303, 'reserved_percentage': 32, 'location_info': 'unit_test_pool|fake_serial', 'total_capacity_gb': 3281.146, 'provisioned_capacity_gb': 536.14, 'compression_support': 'True', 'deduplication_support': 'True', 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'max_over_subscription_ratio': 20.0, 'consistencygroup_support': 'True', 'replication_enabled': False, 'replication_targets': [], 'pool_name': 'unit_test_pool', 'fast_cache_enabled': True, 'fast_support': 'True'} self.assertEqual(expected_pool_stats, pool_stats) def test_get_volume_stats_too_many_luns(self): commands = (self.testData.NDU_LIST_CMD, self.testData.POOL_GET_ALL_CMD(True), self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()) results = (self.testData.NDU_LIST_RESULT, self.testData.POOL_GET_ALL_RESULT(True), self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)) fake_cli = self.driverSetup(commands, results) self.driver.cli.check_max_pool_luns_threshold = True stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertTrue( pool_stats['free_capacity_gb'] == 0, "free_capacity_gb is incorrect") expect_cmd = [ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(), poll=False)] fake_cli.assert_has_calls(expect_cmd) self.driver.cli.check_max_pool_luns_threshold = False stats = self.driver.get_volume_stats(True) pool_stats = stats['pools'][0] self.assertTrue(stats['driver_version'] is not None, "driver_version is incorrect") self.assertTrue( pool_stats['free_capacity_gb'] == 3105.303, "free_capacity_gb is incorrect") def test_deregister_initiator(self): fake_cli = self.driverSetup() self.driver.cli.destroy_empty_sg = True self.driver.cli.itor_auto_dereg = True cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() cli_helper.disconnect_host_from_storage_group = mock.Mock() cli_helper.delete_storage_group = mock.Mock() self.driver.terminate_connection(self.testData.test_volume, self.testData.connector) fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56' fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16' expect_cmd = [ mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'), mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')] fake_cli.assert_has_calls(expect_cmd) class EMCVNXCLIToggleSPTestData(object): def FAKE_COMMAND_PREFIX(self, sp_address): return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address, '-user', 'sysadmin', '-password', 'sysadmin', '-scope', 'global') @mock.patch('time.sleep') class EMCVNXCLIToggleSPTestCase(test.TestCase): def setUp(self): super(EMCVNXCLIToggleSPTestCase, self).setUp() self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1)) self.configuration = mock.Mock(conf.Configuration) self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' self.configuration.san_ip = '10.10.10.10' self.configuration.san_secondary_ip = "10.10.10.11" self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' self.configuration.default_timeout = 1 self.configuration.max_luns_per_storage_group = 10 self.configuration.destroy_empty_storage_group = 10 self.configuration.storage_vnx_authentication_type = "global" self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' self.configuration.zoning_mode = None self.configuration.storage_vnx_security_file_dir = "" self.configuration.config_group = 'toggle-backend' self.cli_client = emc_vnx_cli.CommandLineHelper( configuration=self.configuration) self.test_data = EMCVNXCLIToggleSPTestData() def test_no_sp_toggle(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip) expected = [ mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_not_called() def test_toggle_sp_with_server_unavailabe(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ Error occurred during HTTP request/response from the target: '10.244.213.142'. Message : HTTP/1.1 503 Service Unavailable""" FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip) expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_has_calls([mock.call(30)]) def test_toggle_sp_with_server_unavailabe_max_retry(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = ("Error occurred during HTTP request/response " "from the target: '10.244.213.142'.\n" "Message : HTTP/1.1 503 Service Unavailable") FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG)] * 5 with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.assertRaisesRegex(exception.EMCSPUnavailableException, '.*Error occurred during HTTP request', self.cli_client.command_execute, *FAKE_COMMAND) self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip) expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_has_calls([mock.call(30)] * 4) def test_toggle_sp_with_end_of_data(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = ("Error occurred during HTTP request/response " "from the target: '10.244.213.142'.\n" "Message : HTTP/1.1 503 Service Unavailable") FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip) expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_has_calls([mock.call(30)]) def test_toggle_sp_with_connection_refused(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ A network error occurred while trying to connect: '10.244.213.142'. Message : Error occurred because connection refused. \ Unable to establish a secure connection to the Management Server. """ FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip) expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_has_calls([mock.call(30)]) def test_toggle_sp_with_connection_error(self, time_mock): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ A network error occurred while trying to connect: '192.168.1.56'. Message : Error occurred because of time out""" FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip) expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) time_mock.assert_has_calls([mock.call(30)]) class EMCVNXCLIBackupTestCase(DriverTestCaseBase): """Provides cli-level and client-level mock test.""" def driverSetup(self): self.context = context.get_admin_context() self.driver = self.generate_driver(self.configuration) self.driver.cli._client = mock.Mock() self.snapshot = fake_snapshot.fake_snapshot_obj( self.context, **self.testData.test_snapshot) volume = fake_volume.fake_volume_obj(self.context) self.snapshot.volume = volume return self.driver.cli._client def generate_driver(self, conf): driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf) return driver @patch.object(emc_vnx_cli.EMCVnxCliBase, 'terminate_connection') def test_terminate_connection_snapshot(self, terminate_connection): fake_client = self.driverSetup() connector = self.testData.connector smp_name = 'tmp-smp-' + self.snapshot['id'] volume = {'name': smp_name} self.driver.terminate_connection_snapshot( self.snapshot, connector) terminate_connection.assert_called_once_with( volume, connector) fake_client.detach_mount_point.assert_called_once_with( smp_name) @patch.object(emc_vnx_cli.EMCVnxCliBase, 'initialize_connection') def test_initialize_connection_snapshot(self, initialize_connection): fake_client = self.driverSetup() connector = self.testData.connector smp_name = 'tmp-smp-' + self.snapshot['id'] self.driver.initialize_connection_snapshot( self.snapshot, connector) fake_client.attach_mount_point.assert_called_once_with( smp_name, self.snapshot['name']) volume = {'name': smp_name, 'id': self.snapshot['id']} initialize_connection.assert_called_once_with( volume, connector) def test_create_export_snapshot(self): fake_client = self.driverSetup() connector = self.testData.connector smp_name = 'tmp-smp-' + self.snapshot['id'] self.driver.create_export_snapshot( None, self.snapshot, connector) fake_client.create_mount_point.assert_called_once_with( self.snapshot['volume_name'], smp_name) @patch.object(emc_vnx_cli.EMCVnxCliBase, 'delete_volume') def test_remove_export_snapshot(self, delete_volume): self.driverSetup() smp_name = 'tmp-smp-' + self.snapshot['id'] self.driver.remove_export_snapshot(None, self.snapshot) volume = {'name': smp_name, 'provider_location': None} delete_volume.assert_called_once_with(volume, True) class EMCVNXCLIMultiPoolsTestCase(DriverTestCaseBase): def generate_driver(self, conf): driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf) return driver def fake_command_execute_for_driver_setup(self, *command, **kwargv): if command == ('connection', '-getport', '-address', '-vlanid'): return self.testData.ALL_PORTS elif command == ('storagepool', '-list', '-state'): return self.testData.POOL_GET_STATE_RESULT([ {'pool_name': self.testData.test_pool_name, 'state': "Ready"}, {'pool_name': "unit_test_pool2", 'state': "Ready"}, {'pool_name': "unit_test_pool3", 'state': "Ready"}, {'pool_name': "unit_text_pool4", 'state': "Ready"}]) else: return SUCCEED def test_storage_pool_names_option(self): self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': "unit_test_pool, unit_test_pool3", 'volume_backend_name': 'namedbackend'}) driver = self.generate_driver(self.configuration) self.assertEqual(set(["unit_test_pool", "unit_test_pool3"]), driver.cli.storage_pools) self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': "unit_test_pool2,", 'volume_backend_name': 'namedbackend'}) driver = self.generate_driver(self.configuration) self.assertEqual(set(["unit_test_pool2"]), driver.cli.storage_pools) self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': "unit_test_pool3", 'volume_backend_name': 'namedbackend'}) driver = self.generate_driver(self.configuration) self.assertEqual(set(["unit_test_pool3"]), driver.cli.storage_pools) def test_configured_pool_does_not_exist(self): self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': "unit_test_pool2, unit_test_pool_none2", 'volume_backend_name': 'namedbackend'}) driver = self.generate_driver(self.configuration) self.assertEqual(set(["unit_test_pool2"]), driver.cli.storage_pools) self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': "unit_test_pool_none1", "unit_test_pool_none2" 'volume_backend_name': 'namedbackend'}) self.assertRaises(exception.VolumeBackendAPIException, self.generate_driver, self.configuration) def test_no_storage_pool_is_configured(self): self.configuration.safe_get = self.fake_safe_get( {'storage_vnx_pool_names': None, 'volume_backend_name': 'namedbackend'}) driver = self.generate_driver(self.configuration) self.assertEqual(set(), driver.cli.storage_pools) @patch.object(emc_vnx_cli.EMCVnxCliBase, 'enablers', mock.PropertyMock(return_value=['-MirrorView/S'])) class EMCVNXCLIDriverReplicationV2TestCase(DriverTestCaseBase): def setUp(self): super(EMCVNXCLIDriverReplicationV2TestCase, self).setUp() self.backend_id = 'fake_serial' self.configuration.replication_device = [{ 'backend_id': self.backend_id, 'san_ip': '192.168.1.2', 'san_login': 'admin', 'san_password': 'admin', 'san_secondary_ip': '192.168.2.2', 'storage_vnx_authentication_type': 'global', 'storage_vnx_security_file_dir': None}] def generate_driver(self, conf, active_backend_id=None): return emc_cli_iscsi.EMCCLIISCSIDriver( configuration=conf, active_backend_id=active_backend_id) def _build_mirror_name(self, volume_id): return 'mirror_' + volume_id @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_create_volume_with_replication(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5), self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5)] results = [SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers.append('-MirrorView/S') with mock.patch.object( emc_vnx_cli.CommandLineHelper, 'create_lun_with_advance_feature', mock.Mock(return_value={'lun_id': 5})): model_update = self.driver.create_volume(rep_volume) self.assertTrue(model_update['replication_status'] == 'enabled') self.assertTrue(model_update['replication_driver_data'] == build_replication_data(self.configuration)) self.assertDictMatch({'system': self.backend_id, 'snapcopy': 'False'}, model_update['metadata']) fake_cli.assert_has_calls( [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5), poll=True), mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5), poll=True)]) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_create_replication_mirror_exists(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5), self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5)] results = [self.testData.MIRROR_CREATE_ERROR_RESULT(mirror_name), SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers.append('-MirrorView/S') with mock.patch.object( emc_vnx_cli.CommandLineHelper, 'create_lun_with_advance_feature', mock.Mock(return_value={'lun_id': 5})): model_update = self.driver.create_volume(rep_volume) self.assertTrue(model_update['replication_status'] == 'enabled') self.assertTrue(model_update['replication_driver_data'] == build_replication_data(self.configuration)) fake_cli.assert_has_calls( [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5), poll=True), mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5), poll=True)]) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_create_replication_add_image_error(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5), self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5), self.testData.LUN_DELETE_CMD(rep_volume.name), self.testData.MIRROR_DESTROY_CMD(mirror_name)] results = [SUCCEED, ("Add Image Error", 25), SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.cli._mirror._secondary_client.command_execute = fake_cli with mock.patch.object( emc_vnx_cli.CommandLineHelper, 'create_lun_with_advance_feature', mock.Mock(return_value={'lun_id': 5})): self.assertRaisesRegex(exception.EMCVnxCLICmdError, 'Add Image Error', self.driver.create_volume, rep_volume) fake_cli.assert_has_calls( [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5), poll=True), mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD( mirror_name, '192.168.1.2', 5), poll=True), mock.call(*self.testData.LUN_DELETE_CMD(rep_volume.name)), mock.call(*self.testData.MIRROR_DESTROY_CMD(mirror_name), poll=True)]) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_failover_replication_from_primary(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) image_uid = '50:06:01:60:88:60:05:FE' commands = [self.testData.MIRROR_LIST_CMD(mirror_name), self.testData.MIRROR_PROMOTE_IMAGE_CMD( mirror_name, image_uid)] results = [self.testData.MIRROR_LIST_RESULT(mirror_name), SUCCEED] fake_cli = self.driverSetup(commands, results) rep_volume.replication_driver_data = build_replication_data( self.configuration) rep_volume.metadata = self.testData.replication_metadata self.driver.cli._mirror._secondary_client.command_execute = fake_cli back_id, model_update = self.driver.failover_host( None, [rep_volume], self.backend_id) fake_cli.assert_has_calls([ mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name), poll=True), mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name, image_uid), poll=False)]) self.assertEqual( build_provider_location( '1', 'lun', rep_volume.name, self.backend_id), model_update[0]['updates']['provider_location']) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_failover_replication_from_secondary(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) image_uid = '50:06:01:60:88:60:05:FE' commands = [self.testData.MIRROR_LIST_CMD(mirror_name), self.testData.MIRROR_PROMOTE_IMAGE_CMD( mirror_name, image_uid)] results = [self.testData.MIRROR_LIST_RESULT(mirror_name), SUCCEED] fake_cli = self.driverSetup(commands, results) rep_volume.replication_driver_data = build_replication_data( self.configuration) rep_volume.metadata = self.testData.replication_metadata driver_data = json.loads(rep_volume.replication_driver_data) driver_data['is_primary'] = False rep_volume.replication_driver_data = json.dumps(driver_data) self.driver.cli._mirror._secondary_client.command_execute = fake_cli with mock.patch( 'cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper') \ as fake_remote: fake_remote.return_value = self.driver.cli._client backend_id, data = self.driver.failover_host( None, [rep_volume], 'default') updates = data[0]['updates'] rep_status = updates['replication_status'] self.assertEqual('enabled', rep_status) fake_cli.assert_has_calls([ mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name), poll=True), mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name, image_uid), poll=False)]) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_failover_replication_invalid_backend_id(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) self._build_mirror_name(rep_volume.id) fake_cli = self.driverSetup([], []) rep_volume.replication_driver_data = build_replication_data( self.configuration) rep_volume.metadata = self.testData.replication_metadata driver_data = json.loads(rep_volume.replication_driver_data) driver_data['is_primary'] = False rep_volume.replication_driver_data = json.dumps(driver_data) self.driver.cli._mirror._secondary_client.command_execute = fake_cli with mock.patch( 'cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper') \ as fake_remote: fake_remote.return_value = self.driver.cli._client invalid = 'invalid_backend_id' self.assertRaisesRegex(exception.VolumeBackendAPIException, "Invalid secondary_backend_id specified", self.driver.failover_host, None, [rep_volume], invalid) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_failover_already_promoted(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) image_uid = '50:06:01:60:88:60:05:FE' commands = [self.testData.MIRROR_LIST_CMD(mirror_name), self.testData.MIRROR_PROMOTE_IMAGE_CMD( mirror_name, image_uid)] results = [self.testData.MIRROR_LIST_RESULT(mirror_name), self.testData.MIRROR_PROMOTE_IMAGE_ERROR_RESULT()] fake_cli = self.driverSetup(commands, results) rep_volume.replication_driver_data = build_replication_data( self.configuration) rep_volume.metadata = self.testData.replication_metadata self.driver.cli._mirror._secondary_client.command_execute = fake_cli new_backend_id, model_updates = self.driver.failover_host( None, [rep_volume], self.backend_id) self.assertEqual(rep_volume.id, model_updates[0]['volume_id']) self.assertEqual('error', model_updates[0]['updates']['replication_status']) fake_cli.assert_has_calls([ mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name), poll=True), mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name, image_uid), poll=False)]) @mock.patch( "cinder.volume.volume_types." "get_volume_type_extra_specs", mock.Mock(return_value={'replication_enabled': ' True'})) def test_delete_volume_with_rep(self): rep_volume = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) mirror_name = self._build_mirror_name(rep_volume.id) image_uid = '50:06:01:60:88:60:05:FE' commands = [self.testData.MIRROR_LIST_CMD(mirror_name), self.testData.MIRROR_FRACTURE_IMAGE_CMD(mirror_name, image_uid), self.testData.MIRROR_REMOVE_IMAGE_CMD(mirror_name, image_uid), self.testData.MIRROR_DESTROY_CMD(mirror_name)] results = [self.testData.MIRROR_LIST_RESULT(mirror_name), SUCCEED, SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.cli._mirror._secondary_client.command_execute = fake_cli vol = EMCVNXCLIDriverTestData.convert_volume( self.testData.test_volume_replication) vol.replication_driver_data = build_replication_data( self.configuration) with mock.patch.object( emc_vnx_cli.CommandLineHelper, 'delete_lun', mock.Mock(return_value=None)): self.driver.delete_volume(vol) expected = [mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name), poll=False), mock.call(*self.testData.MIRROR_FRACTURE_IMAGE_CMD( mirror_name, image_uid), poll=False), mock.call(*self.testData.MIRROR_REMOVE_IMAGE_CMD( mirror_name, image_uid), poll=False), mock.call(*self.testData.MIRROR_DESTROY_CMD(mirror_name), poll=False)] fake_cli.assert_has_calls(expected) def test_build_client_with_invalid_id(self): self.driverSetup([], []) self.assertRaisesRegex( exception.VolumeBackendAPIException, 'replication_device with backend_id .* is missing.', self.driver.cli._build_client, 'invalid_backend_id') def test_build_client_with_id(self): self.driverSetup([], []) cli_client = self.driver.cli._build_client( active_backend_id='fake_serial') self.assertEqual('192.168.1.2', cli_client.active_storage_ip) self.assertEqual('192.168.1.2', cli_client.primary_storage_ip) VNXError = emc_vnx_cli.VNXError class VNXErrorTest(test.TestCase): def test_has_error(self): output = "The specified snapshot name is already in use. (0x716d8005)" self.assertTrue(VNXError.has_error(output)) def test_has_error_with_specific_error(self): output = "The specified snapshot name is already in use. (0x716d8005)" has_error = VNXError.has_error(output, VNXError.SNAP_NAME_EXISTED) self.assertTrue(has_error) has_error = VNXError.has_error(output, VNXError.LUN_ALREADY_EXPANDED) self.assertFalse(has_error) def test_has_error_not_found(self): output = "Cannot find the consistency group." has_error = VNXError.has_error(output) self.assertTrue(has_error) has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND) self.assertTrue(has_error) def test_has_error_not_exist(self): output = "The specified snapshot does not exist." has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND) self.assertTrue(has_error) output = "The (pool lun) may not exist." has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND) self.assertTrue(has_error) def test_has_error_multi_line(self): output = """Could not retrieve the specified (pool lun). The (pool lun) may not exist.""" has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND) self.assertTrue(has_error) def test_has_error_regular_string_false(self): output = "Cannot unbind LUN because it's contained in a Storage Group." has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND) self.assertFalse(has_error) def test_has_error_multi_errors(self): output = "Cannot unbind LUN because it's contained in a Storage Group." has_error = VNXError.has_error(output, VNXError.LUN_IN_SG, VNXError.GENERAL_NOT_FOUND) self.assertTrue(has_error) output = "Cannot unbind LUN because it's contained in a Storage Group." has_error = VNXError.has_error(output, VNXError.LUN_ALREADY_EXPANDED, VNXError.LUN_NOT_MIGRATING) self.assertFalse(has_error) VNXProvisionEnum = emc_vnx_cli.VNXProvisionEnum class VNXProvisionEnumTest(test.TestCase): def test_get_opt(self): opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.DEDUPED) self.assertEqual('-type Thin -deduplication on', ' '.join(opt)) def test_get_opt_not_available(self): self.assertRaises(ValueError, VNXProvisionEnum.get_opt, 'na') VNXTieringEnum = emc_vnx_cli.VNXTieringEnum class VNXTieringEnumTest(test.TestCase): def test_get_opt(self): opt = VNXTieringEnum.get_opt(VNXTieringEnum.HIGH_AUTO) self.assertEqual( '-initialTier highestAvailable -tieringPolicy autoTier', ' '.join(opt)) def test_get_opt_not_available(self): self.assertRaises(ValueError, VNXTieringEnum.get_opt, 'na') VNXLun = emc_vnx_cli.VNXLun class VNXLunTest(test.TestCase): def test_lun_id_setter_str_input(self): lun = VNXLun() lun.lun_id = '5' self.assertEqual(5, lun.lun_id) def test_lun_id_setter_dict_input(self): lun = VNXLun() lun.lun_id = {'lun_id': 12} self.assertEqual(12, lun.lun_id) def test_lun_id_setter_str_error(self): lun = VNXLun() self.assertRaises(ValueError, setattr, lun, 'lun_id', '12a') def test_lun_provision_default(self): lun = VNXLun() lun.provision = {} self.assertEqual(VNXProvisionEnum.THICK, lun.provision) def test_lun_provision_thin(self): lun = VNXLun() lun.provision = {'is_thin_lun': True, 'is_compressed': False, 'dedup_state': False} self.assertEqual(VNXProvisionEnum.THIN, lun.provision) def test_lun_provision_compressed(self): lun = VNXLun() lun.provision = {'is_thin_lun': True, 'is_compressed': True, 'dedup_state': False} self.assertEqual(VNXProvisionEnum.COMPRESSED, lun.provision) def test_lun_provision_dedup(self): lun = VNXLun() lun.provision = {'is_thin_lun': True, 'is_compressed': False, 'dedup_state': True} self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision) def test_lun_provision_str_not_valid(self): lun = VNXLun() self.assertRaises(ValueError, setattr, lun, 'provision', 'invalid') def test_lun_provision_plain_str(self): lun = VNXLun() lun.provision = VNXProvisionEnum.DEDUPED self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision) def test_lun_tier_default(self): lun = VNXLun() self.assertEqual(VNXTieringEnum.HIGH_AUTO, lun.tier) def test_lun_tier_invalid_str(self): lun = VNXLun() self.assertRaises(ValueError, setattr, lun, 'tier', 'invalid') def test_lun_tier_plain_str(self): lun = VNXLun() lun.tier = VNXTieringEnum.NO_MOVE self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier) def test_lun_tier_highest_available(self): lun = VNXLun() lun.tier = {'tiering_policy': 'Auto Tier', 'initial_tier': 'Highest Available'} self.assertEqual(VNXTieringEnum.HIGH_AUTO, lun.tier) def test_lun_tier_auto(self): lun = VNXLun() lun.tier = {'tiering_policy': 'Auto Tier', 'initial_tier': 'Optimize Pool'} self.assertEqual(VNXTieringEnum.AUTO, lun.tier) def test_lun_tier_high(self): lun = VNXLun() lun.tier = {'tiering_policy': 'Highest Available', 'initial_tier': 'Highest Available'} self.assertEqual(VNXTieringEnum.HIGH, lun.tier) def test_lun_tier_low(self): lun = VNXLun() lun.tier = {'tiering_policy': 'Lowest Available', 'initial_tier': 'Lowest Available'} self.assertEqual(VNXTieringEnum.LOW, lun.tier) def test_lun_tier_no_move_high_tier(self): lun = VNXLun() lun.tier = {'tiering_policy': 'No Movement', 'initial_tier': 'Highest Available'} self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier) def test_lun_tier_no_move_optimize_pool(self): lun = VNXLun() lun.tier = {'tiering_policy': 'No Movement', 'initial_tier': 'Optimize Pool'} self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier) def test_update(self): lun = VNXLun() lun.lun_id = 19 lun.update({ 'lun_name': 'test_lun', 'lun_id': 19, 'total_capacity_gb': 1.0, 'is_thin_lun': True, 'is_compressed': False, 'dedup_state': True, 'tiering_policy': 'No Movement', 'initial_tier': 'Optimize Pool'}) self.assertEqual(1.0, lun.capacity) self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision) self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier) Dict = emc_vnx_cli.Dict class DictTest(test.TestCase): def test_get_attr(self): result = Dict() result['a'] = 'A' self.assertEqual('A', result.a) self.assertEqual('A', result['a']) def test_get_attr_not_exists(self): result = Dict() self.assertRaises(AttributeError, getattr, result, 'a') VNXCliParser = emc_vnx_cli.VNXCliParser PropertyDescriptor = emc_vnx_cli.PropertyDescriptor class DemoParser(VNXCliParser): A = PropertyDescriptor('-a', 'Prop A (name)', 'prop_a') B = PropertyDescriptor('-b', 'Prop B:') C = PropertyDescriptor('-c', 'Prop C') ID = PropertyDescriptor(None, 'ID:') class VNXCliParserTest(test.TestCase): def test_get_property_options(self): options = DemoParser.get_property_options() self.assertEqual('-a -b -c', ' '.join(options)) def test_parse(self): output = """ ID: test Prop A (Name): ab (c) Prop B: d ef """ parsed = DemoParser.parse( output, [DemoParser.A, DemoParser.ID, DemoParser.C]) self.assertEqual('ab (c)', parsed.prop_a) self.assertIsNone(parsed.prop_c) self.assertEqual('test', parsed.id) self.assertRaises(AttributeError, getattr, parsed, 'prop_b') VNXLunProperties = emc_vnx_cli.VNXLunProperties class VNXLunPropertiesTest(test.TestCase): def test_parse(self): output = """ LOGICAL UNIT NUMBER 19 Name: test_lun User Capacity (Blocks): 2097152 User Capacity (GBs): 1.000 Pool Name: Pool4File Is Thin LUN: Yes Is Compressed: No Deduplication State: Off Deduplication Status: OK(0x0) Tiering Policy: No Movement Initial Tier: Optimize Pool """ parser = VNXLunProperties() parsed = parser.parse(output) self.assertEqual('test_lun', parsed.lun_name) self.assertEqual(19, parsed.lun_id) self.assertEqual(1.0, parsed.total_capacity_gb) self.assertTrue(parsed.is_thin_lun) self.assertFalse(parsed.is_compressed) self.assertFalse(parsed.dedup_state) self.assertEqual('No Movement', parsed.tiering_policy) self.assertEqual('Optimize Pool', parsed.initial_tier) self.assertIsNone(parsed['state']) VNXPoolProperties = emc_vnx_cli.VNXPoolProperties class VNXPoolPropertiesTest(test.TestCase): def test_parse(self): output = """ Pool Name: Pool4File Pool ID: 1 Raid Type: Mixed Percent Full Threshold: 70 Description: Disk Type: Mixed State: Ready Status: OK(0x0) Current Operation: None Current Operation State: N/A Current Operation Status: N/A Current Operation Percent Completed: 0 Raw Capacity (Blocks): 6398264602 Raw Capacity (GBs): 3050.930 User Capacity (Blocks): 4885926912 User Capacity (GBs): 2329.792 Consumed Capacity (Blocks): 1795516416 Consumed Capacity (GBs): 856.169 Available Capacity (Blocks): 3090410496 Available Capacity (GBs): 1473.623 Percent Full: 36.749 Total Subscribed Capacity (Blocks): 5666015232 Total Subscribed Capacity (GBs): 2701.767 Percent Subscribed: 115.966 Oversubscribed by (Blocks): 780088320 Oversubscribed by (GBs): 371.975 """ parser = VNXPoolProperties() pool = parser.parse(output) self.assertEqual('Ready', pool.state) self.assertEqual(1, pool.pool_id) self.assertEqual(2329.792, pool.total_capacity_gb) self.assertEqual(1473.623, pool.free_capacity_gb) self.assertIsNone(pool.fast_cache_enabled) self.assertEqual('Pool4File', pool.pool_name) self.assertEqual(2701.767, pool.provisioned_capacity_gb) self.assertEqual(70, pool.pool_full_threshold) cinder-8.0.0/cinder/tests/unit/test_hitachi_hnas_backend.py0000664000567000056710000007451312701406250025222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time import mock from oslo_concurrency import processutils as putils from oslo_config import cfg from cinder import exception from cinder import test from cinder import utils from cinder.volume.drivers.hitachi import hnas_backend from cinder.volume.drivers.hitachi import hnas_nfs as nfs CONF = cfg.CONF HNAS_RESULT1 = "\n\ FS ID FS Label FS Permanent ID EVS ID EVS Label\n\ ----- ----------- ------------------ ------ ---------\n\ 1026 gold 0xaadee0e035cfc0b7 1 EVSTest1\n\ 1025 fs01-husvm 0xaada5dff78668800 1 EVSTest1\n\ 1027 large-files 0xaadee0ef012a0d54 1 EVSTest1\n\ 1028 platinun 0xaadee1ea49d1a32c 1 EVSTest1\n\ 1029 test_hdp 0xaadee09634acfcac 1 EVSTest1\n\ 1030 cinder1 0xaadfcf742fba644e 1 EVSTest1\n\ 1031 cinder2 0xaadfcf7e0769a6bc 1 EVSTest1\n\ 1024 fs02-husvm 0xaac8715e2e9406cd 2 EVSTest2\n\ \n" HNAS_RESULT2 = "cluster MAC: 83-68-96-AA-DA-5D" HNAS_RESULT3 = "\n\ Model: HNAS 4040 \n\ Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\ Hardware: NAS Platform (M2SEKW1339109) \n\ board MMB1 \n\ mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\ board MFB1 \n\ mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \ RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \ WD v00E2 DI v001A FC v0002 \n\ Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\ board MCP \n\ Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\ \n" HNAS_RESULT4 = "\n\ EVS Type Label IP Address Mask Port \n\ ---------- --------------- ------------------ --------------- ------\n\ admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\ admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\ evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\ \n" HNAS_RESULT5 = "\n\ ID Label EVS Size Used Snapshots Deduped\ Avail Thin ThinSize ThinAvail \ FS Type \n\ ---- ----------- --- ------- ------------- --------- -------\ - ------------- ---- -------- --------- ---------------------\ ------------- \n\ 1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA \ 228 GB (91%) No 32 KB,\ WFS-2,128 DSBs\n\ 1026 gold 1 19.9 GB 2.30 GB (12% NA 0 B (0%)\ 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\ dedupe enabled\n\ 1027 large-files 1 19.8 GB 2.43 GB (12%) 0 B (0%) NA \ 17.3 GB (88%) No 32 KB,\ WFS-2,128 DSBs\n\ 1028 platinun 1 19.9 GB 2.30 GB (12%) NA 0 B (0%)\ 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\ dedupe enabled\n\ 1029 silver 1 19.9 GB 3.19 GB (16%) 0 B (0%) NA \ 6.7 GB (84%) No 4 KB,\ WFS-2,128 DSBs\n\ 1030 cinder1 1 40.8 GB 2.24 GB (5%) 0 B (0%) NA \ 38.5 GB (95%) No 4 KB,\ WFS-2,128 DSBs\n\ 1031 cinder2 1 39.8 GB 2.23 GB (6%) 0 B (0%) NA \ 37.6 GB (94%) No 4 KB,\ WFS-2,128 DSBs\n\ 1024 fs02-husvm 2 49.8 GB 3.54 GB (7%) 0 B (0%) NA \ 46.2 GB (93%) No 32 KB,\ WFS-2,128 DSBs\n\ 1032 test 2 3.97 GB 2.12 GB (53%) 0 B (0%) NA \ 1.85 GB (47%) No 4 KB,\ WFS-2,128 DSBs\n\ 1058 huge_FS 7 1.50 TB Not determined\n\ 1053 fs-unmounted 4 108 GB Not mounted \ NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\ WFS-2,128 DSBs,dedupe enabled\n\ \n" HNAS_RESULT6 = "\n\ ID Label EVS Size Used Snapshots Deduped Avail \ Thin ThinSize ThinAvail FS Type\n\ ---- ---------- --- ------ ------------ --------- ------- ------------ \ ---- -------- --------- --------------------\n\ 1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ No 32 KB,WFS-2,128 DSBs\n\ \n" HNAS_RESULT7 = "\n\ Export configuration: \n\ Export name: /export01-husvm \n\ Export path: /export01-husvm \n\ File system label: test_hdp \n\ File system size: 250 GB \n\ File system free space: 228 GB \n\ File system state: \n\ formatted = Yes \n\ mounted = Yes \n\ failed = No \n\ thin provisioned = No \n\ Access snapshots: Yes \n\ Display snapshots: Yes \n\ Read Caching: Disabled \n\ Disaster recovery setting: \n\ Recovered = No \n\ Transfer setting = Use file system default \n\ \n" HNAS_RESULT8 = "Logical unit creation started at 2014-12-24 00:38:30+00:00." HNAS_RESULT9 = "Logical unit deleted successfully." HNAS_RESULT10 = "" HNAS_RESULT11 = "Logical unit expansion started at 2014-12-24 01:25:03+00:00." HNAS_RESULT12 = "\n\ Alias : test_iqn \n\ Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\ Comment : \n\ Secret : test_secret \n\ Authentication : Enabled \n\ Logical units : No logical units. \n\ \n" HNAS_RESULT13 = "Logical unit added successfully." HNAS_RESULT14 = "Logical unit removed successfully." HNAS_RESULT15 = "Target created successfully." HNAS_RESULT16 = "" HNAS_RESULT17 = "\n\ EVS Type Label IP Address Mask Port \n\ ---------- --------------- ------------------ --------------- ------\n\ evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ evs 2 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ \n" HNAS_RESULT18 = "Version: 11.1.3225.01\n\ Directory: /u/u60/_Eng_Axalon_SMU/OfficialBuilds/fish/angel/3225.01/main/bin/\ x86_64_linux-bart_libc-2.7_release\n\ Date: Feb 22 2013, 04:10:09\n\ \n" HNAS_RESULT19 = " ID Label Size Used Snapshots \ Deduped Avail Thin ThinSize ThinAvail FS Type\n\ ---- ------------- ------- ------------- --------- ------- -------------\ ---- -------- --------- -------------------\n\ 1025 fs01-husvm 250 GB 47.1 GB (19%) 0 B (0%) NA 203 GB (81%)\ No 4 KB,WFS-2,128 DSBs\n\ 1047 manage_test02 19.9 GB 9.29 GB (47%) 0 B (0%) NA 10.6 GB (53%)\ No 4 KB,WFS-2,128 DSBs\n\ 1058 huge_FS 7 1.50 TB Not determined\n\ 1053 fs-unmounted 4 108 GB Not mounted \ NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\ WFS-2,128 DSBs,dedupe enabled\n\ \n" HNAS_RESULT20 = "\n\ Alias : test_iqn \n\ Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\ Comment : \n\ Secret : \n\ Authentication : Enabled \n\ Logical units : No logical units. \n\ \n" HNAS_RESULT20 = "Target does not exist." HNAS_RESULT21 = "Target created successfully." HNAS_RESULT22 = "Failed to establish SSC connection" HNAS_RESULT23 = "\n\ Alias : cinder-Gold\n\ Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-gold\n\ Comment :\n\ Secret : None\n\ Authentication : Enabled\n\ Logical units : No logical units.\n\ Access configuration :\n\ \n\ Alias : cinder-GoldIsh\n\ Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\ Comment :\n\ Secret : None\n\ Authentication : Enabled\n\ Logical units : No logical units.\n\ Access configuration :\n\ \n\ Alias : cinder-default\n\ Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\ Comment :\n\ Secret : pxr6U37LZZJBoMc\n\ Authentication : Disabled\n\ Logical units : Logical units :\n\ \n\ LUN Logical Unit\n\ ---- --------------------------------\n\ 0 volume-8ddd1a54-9daf-4fa5-842...\n\ 1 volume-99da7ae7-1e7f-4d57-8bf...\n\ \n\ Access configuration :\n\ " HNAS_RESULT24 = "Logical unit modified successfully." HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)." HNAS_RESULT26 = "Name : volume-test \n\ Comment: \n\ Path : /.cinder/volume-test.iscsi \n\ Size : 2 GB \n\ File System : fs1 \n\ File System Mounted : YES \n\ Logical Unit Mounted: No" HNAS_RESULT27 = "Connection reset" HNAS_CMDS = { ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'): ["%s" % HNAS_RESULT1, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'cluster-getmac',): ["%s" % HNAS_RESULT2, ""], ('ssh', '-version',): ["%s" % HNAS_RESULT18, ""], ('ssh', '-u', 'supervisor', '-p', 'supervisor', '0.0.0.0', 'ver',): ["%s" % HNAS_RESULT3, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'ver',): ["%s" % HNAS_RESULT3, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-l'): ["%s" % HNAS_RESULT4, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a'): ["%s" % HNAS_RESULT5, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-f', 'test_hdp'): ["%s" % HNAS_RESULT6, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'for-each-evs', '-q', 'nfs-export', 'list'): ["%s" % HNAS_RESULT7, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'add', '-e', 'test_name', 'test_hdp', '/.cinder/test_name.iscsi', '1M'): ["%s" % HNAS_RESULT8, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'del', '-d', '-f', 'test_lun'): ["%s" % HNAS_RESULT9, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'file-clone-create', '-f', 'fs01-husvm', '/.cinder/test_lu.iscsi', 'cloned_lu'): ["%s" % HNAS_RESULT10, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'expand', 'expanded_lu', '1M'): ["%s" % HNAS_RESULT11, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'list', 'test_iqn'): ["%s" % HNAS_RESULT12, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'test_iqn', 'test_lun', '0'): ["%s" % HNAS_RESULT13, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'dellu', 'test_iqn', 0): ["%s" % HNAS_RESULT14, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'add', 'myTarget', 'secret'): ["%s" % HNAS_RESULT15, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'mod', '-s', 'test_secret', '-a', 'enable', 'test_iqn'): ["%s" % HNAS_RESULT15, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'clone', '-e', 'test_lu', 'test_clone', '/.cinder/test_clone.iscsi'): ["%s" % HNAS_RESULT16, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-e', '1'): ["%s" % HNAS_RESULT17, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'list'): ["%s" % HNAS_RESULT23, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'cinder-default', 'volume-8ddd1a54-0000-0000-0000', '2'): ["%s" % HNAS_RESULT13, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'selectfs', 'fs01-husvm'): ["%s" % HNAS_RESULT25, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'list', 'test_lun'): ["%s" % HNAS_RESULT26, ""], ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', '1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'): ["%s" % HNAS_RESULT24, ""] } DRV_CONF = {'ssh_enabled': 'True', 'mgmt_ip0': '0.0.0.0', 'cluster_admin_ip0': None, 'ssh_port': '22', 'ssh_private_key': 'test_key', 'username': 'supervisor', 'password': 'supervisor'} UTILS_EXEC_OUT = ["output: test_cmd", ""] def m_run_cmd(*args, **kargs): return HNAS_CMDS.get(args) class HDSHNASBendTest(test.TestCase): def __init__(self, *args, **kwargs): super(HDSHNASBendTest, self).__init__(*args, **kwargs) @mock.patch.object(nfs, 'factory_bend') def setUp(self, m_factory_bend): super(HDSHNASBendTest, self).setUp() self.hnas_bend = hnas_backend.HnasBackend(DRV_CONF) @mock.patch('six.moves.builtins.open') @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('paramiko.SSHClient') @mock.patch.object(putils, 'ssh_execute', return_value=(HNAS_RESULT5, '')) @mock.patch.object(utils, 'execute') @mock.patch.object(time, 'sleep') def test_run_cmd(self, m_sleep, m_utl, m_ssh, m_ssh_cli, m_pvt_key, m_file, m_open): save_hkey_file = CONF.ssh_hosts_key_file save_spath = CONF.state_path CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts' CONF.state_path = '/var/lib/cinder' # Test main flow self.hnas_bend.drv_configs['ssh_enabled'] = 'True' out, err = self.hnas_bend.run_cmd('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a') self.assertIn('fs01-husvm', out) self.assertIn('WFS-2,128 DSBs', out) # Test exception throwing when not using SSH m_utl.side_effect = putils.ProcessExecutionError(stdout='', stderr=HNAS_RESULT22, exit_code=255) self.hnas_bend.drv_configs['ssh_enabled'] = 'False' self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a') m_utl.side_effect = putils.ProcessExecutionError(stdout='', stderr=HNAS_RESULT27, exit_code=255) self.hnas_bend.drv_configs['ssh_enabled'] = 'False' self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a') # Test exception throwing when using SSH m_ssh.side_effect = putils.ProcessExecutionError(stdout='', stderr=HNAS_RESULT22, exit_code=255) self.hnas_bend.drv_configs['ssh_enabled'] = 'True' self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd, 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a') CONF.state_path = save_spath CONF.ssh_hosts_key_file = save_hkey_file @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT) def test_get_version(self, m_cmd, m_exec): out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", "supervisor") self.assertIn('11.2.3319.14', out) self.assertIn('83-68-96-AA-DA-5D', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_get_version_ssh_cluster(self, m_cmd): self.hnas_bend.drv_configs['ssh_enabled'] = 'True' self.hnas_bend.drv_configs['cluster_admin_ip0'] = '1.1.1.1' out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", "supervisor") self.assertIn('11.2.3319.14', out) self.assertIn('83-68-96-AA-DA-5D', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT) def test_get_version_ssh_disable(self, m_cmd, m_exec): self.hnas_bend.drv_configs['ssh_enabled'] = 'False' out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor", "supervisor") self.assertIn('11.2.3319.14', out) self.assertIn('83-68-96-AA-DA-5D', out) self.assertIn('Utility_version', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_get_iscsi_info(self, m_execute): out = self.hnas_bend.get_iscsi_info("ssh", "0.0.0.0", "supervisor", "supervisor") self.assertIn('172.24.44.20', out) self.assertIn('172.24.44.21', out) self.assertIn('10.0.0.20', out) self.assertEqual(4, len(out.split('\n'))) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') def test_get_hdp_info(self, m_run_cmd): # tests when there is two or more evs m_run_cmd.return_value = (HNAS_RESULT5, "") out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor", "supervisor") self.assertEqual(10, len(out.split('\n'))) self.assertIn('gold', out) self.assertIn('silver', out) line1 = out.split('\n')[0] self.assertEqual(12, len(line1.split())) # test when there is only one evs m_run_cmd.return_value = (HNAS_RESULT19, "") out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor", "supervisor") self.assertEqual(3, len(out.split('\n'))) self.assertIn('fs01-husvm', out) self.assertIn('manage_test02', out) line1 = out.split('\n')[0] self.assertEqual(12, len(line1.split())) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_get_nfs_info(self, m_run_cmd): out = self.hnas_bend.get_nfs_info("ssh", "0.0.0.0", "supervisor", "supervisor") self.assertEqual(2, len(out.split('\n'))) self.assertIn('/export01-husvm', out) self.assertIn('172.24.44.20', out) self.assertIn('10.0.0.20', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_create_lu(self, m_cmd): out = self.hnas_bend.create_lu("ssh", "0.0.0.0", "supervisor", "supervisor", "test_hdp", "1", "test_name") self.assertIn('successfully created', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_delete_lu(self, m_cmd): out = self.hnas_bend.delete_lu("ssh", "0.0.0.0", "supervisor", "supervisor", "test_hdp", "test_lun") self.assertIn('deleted successfully', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_create_dup(self, m_cmd): out = self.hnas_bend.create_dup("ssh", "0.0.0.0", "supervisor", "supervisor", "test_lu", "test_hdp", "1", "test_clone") self.assertIn('successfully created', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_file_clone(self, m_cmd): out = self.hnas_bend.file_clone("ssh", "0.0.0.0", "supervisor", "supervisor", "fs01-husvm", "/.cinder/test_lu.iscsi", "cloned_lu") self.assertIn('LUN cloned_lu HDP', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_extend_vol(self, m_cmd): out = self.hnas_bend.extend_vol("ssh", "0.0.0.0", "supervisor", "supervisor", "test_hdp", "test_lun", "1", "expanded_lu") self.assertIn('successfully extended', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_add_iscsi_conn(self, m_cmd): out = self.hnas_bend.add_iscsi_conn("ssh", "0.0.0.0", "supervisor", "supervisor", "volume-8ddd1a54-0000-0000-0000", "test_hdp", "test_port", "cinder-default", "test_init") self.assertIn('successfully paired', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_del_iscsi_conn(self, m_cmd): out = self.hnas_bend.del_iscsi_conn("ssh", "0.0.0.0", "supervisor", "supervisor", "1", "test_iqn", 0) self.assertIn('already deleted', out) @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=0) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') def test_get_targetiqn(self, m_cmd, m_get_evs): m_cmd.side_effect = [[HNAS_RESULT12, '']] out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn", "test_hdp", "test_secret") self.assertEqual('test_iqn', out) m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']] out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn2", "test_hdp", "test_secret") self.assertEqual('test_iqn2', out) m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']] out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn3", "test_hdp", "") self.assertEqual('test_iqn3', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_set_targetsecret(self, m_execute): self.hnas_bend.set_targetsecret("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn", "test_hdp", "test_secret") @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') def test_get_targetsecret(self, m_run_cmd): # test when target has secret m_run_cmd.return_value = (HNAS_RESULT12, "") out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn", "test_hdp") self.assertEqual('test_secret', out) # test when target don't have secret m_run_cmd.return_value = (HNAS_RESULT20, "") out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor", "supervisor", "test_iqn", "test_hdp") self.assertEqual('', out) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd') def test_get_targets(self, m_run_cmd): # Test normal behaviour m_run_cmd.return_value = (HNAS_RESULT23, "") tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", "supervisor", 1) self.assertEqual(3, len(tgt_list)) self.assertEqual(2, len(tgt_list[2]['luns'])) # Test calling with parameter tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", "supervisor", 1, 'cinder-default') self.assertEqual(1, len(tgt_list)) self.assertEqual(2, len(tgt_list[0]['luns'])) # Test error in BE command m_run_cmd.side_effect = putils.ProcessExecutionError tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor", "supervisor", 1) self.assertEqual(0, len(tgt_list)) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_check_targets(self, m_run_cmd): result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0", "supervisor", "supervisor", "test_hdp", "cinder-default") self.assertTrue(result) self.assertEqual('cinder-default', tgt['alias']) result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0", "supervisor", "supervisor", "test_hdp", "cinder-no-target") self.assertFalse(result) self.assertIsNone(tgt) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', side_effect=m_run_cmd) def test_check_lu(self, m_run_cmd): ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor", "supervisor", "volume-8ddd1a54-9daf-4fa5-842", "test_hdp") result, lunid, tgt = ret self.assertTrue(result) self.assertEqual('0', lunid) ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor", "supervisor", "volume-8ddd1a54-0000-0000-000", "test_hdp") result, lunid, tgt = ret self.assertFalse(result) @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', return_value = (HNAS_RESULT26, "")) def test_get_existing_lu_info(self, m_run_cmd, m_get_evs): out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0", "supervisor", "supervisor", "fs01-husvm", "test_lun") m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'fs01-husvm') m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', 1, 'iscsi-lu', 'list', 'test_lun') self.assertEqual(HNAS_RESULT26, out) @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1) @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd', return_value=(HNAS_RESULT24, "")) def test_rename_existing_lu(self, m_run_cmd, m_get_evs): out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0", "supervisor", "supervisor", "fs01-husvm", "vol_test", "new_vol_test") m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'fs01-husvm') m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs', 1, 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test') self.assertEqual(HNAS_RESULT24, out) cinder-8.0.0/cinder/tests/unit/test_api.py0000664000567000056710000000511712701406250021674 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the API endpoint.""" import six from six.moves import http_client import webob class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse, trivial.""" def __init__(self, response_string): self.response_string = response_string self._buffer = six.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """A fake http_client.HTTPConnection for boto. requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into the http_client.HTTPResponse that boto expects. """ def __init__(self, app, host, is_secure=False): self.app = app self.host = host def request(self, method, path, data, headers): req = webob.Request.blank(path) req.method = method req.body = data req.headers = headers req.headers['Accept'] = 'text/html' req.host = self.host # Call the WSGI app, get the HTTP response resp = str(req.get_response(self.app)) # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp self.sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(self.sock) # NOTE(vish): boto is accessing private variables for some reason self._HTTPConnection__response = self.http_response self.http_response.begin() def getresponse(self): return self.http_response def getresponsebody(self): return self.sock.response_string def close(self): """Required for compatibility with boto/tornado.""" pass cinder-8.0.0/cinder/tests/unit/fake_hpe_lefthand_client.py0000664000567000056710000000173412701406250025032 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HPE client for testing LeftHand without installing the client.""" import sys import mock from cinder.tests.unit import fake_hpe_client_exceptions as hpeexceptions hpelefthand = mock.Mock() hpelefthand.version = "2.1.0" hpelefthand.exceptions = hpeexceptions sys.modules['hpelefthandclient'] = hpelefthand cinder-8.0.0/cinder/tests/unit/fake_consistencygroup.py0000664000567000056710000000336512701406250024473 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_consistencygroup(**updates): db_values = { 'id': fake.consistency_group_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'host': 'FakeHost', 'volumes': [], } for name, field in objects.ConsistencyGroup.fields.items(): if name in db_values: continue if field.nullable: db_values[name] = None elif field.default != fields.UnspecifiedDefault: db_values[name] = field.default else: raise Exception('fake_db_consistencygroup needs help with %s' % name) if updates: db_values.update(updates) return db_values def fake_consistencyobject_obj(context, **updates): return objects.ConsistencyGroup._from_db_object(context, objects.ConsistencyGroup(), fake_db_consistencygroup( **updates)) cinder-8.0.0/cinder/tests/unit/test_backup_tsm.py0000664000567000056710000003105012701406250023246 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Tests for volume backup to IBM Tivoli Storage Manager (TSM). """ import json import os import posix from oslo_concurrency import processutils as putils from oslo_utils import timeutils from cinder.backup.drivers import tsm from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit import fake_constants as fake from cinder import utils SIM = None VOLUME_PATH = '/dev/null' class TSMBackupSimulator(object): """Simulates TSM dsmc command. The simulator simulates the execution of the 'dsmc' command. This allows the TSM backup test to succeed even if TSM is not installed. """ def __init__(self): self._backup_list = {} self._hardlinks = [] self._next_cmd_error = { 'backup': '', } self._intro_msg = ('IBM Tivoli Storage Manager\n' 'Command Line Backup-Archive Client Interface\n' '...\n\n') def _cmd_backup(self, **kwargs): # simulates the execution of the dsmc backup command ret_msg = self._intro_msg path = kwargs['path'] ret_msg += ('Image backup of volume \'%s\'\n\n' 'Total number of objects inspected: 1\n' % path) if self._next_cmd_error['backup'] == 'fail': ret_msg += ('ANS1228E Sending of object \'%s\' ' 'failed\n' % path) ret_msg += ('ANS1063E The specified path is not a valid file ' 'system or logical volume name.') self._next_cmd_error['backup'] = '' retcode = 12 else: ret_msg += 'Total number of objects backed up: 1' if path not in self._backup_list: self._backup_list[path] = [] else: self._backup_list[path][-1]['active'] = False date = timeutils.utcnow() datestr = date.strftime("%m/%d/%Y %H:%M:%S") self._backup_list[path].append({'date': datestr, 'active': True}) retcode = 0 return (ret_msg, '', retcode) def _backup_exists(self, path): if path not in self._backup_list: return ('ANS4000E Error processing \'%s\': file space does ' 'not exist.' % path) return 'OK' def _cmd_restore(self, **kwargs): ret_msg = self._intro_msg path = kwargs['path'] exists = self._backup_exists(path) if exists == 'OK': ret_msg += ('Total number of objects restored: 1\n' 'Total number of objects failed: 0') retcode = 0 else: ret_msg += exists retcode = 12 return (ret_msg, '', retcode) def _cmd_delete(self, **kwargs): # simulates the execution of the dsmc delete command ret_msg = self._intro_msg path = kwargs['path'] exists = self._backup_exists(path) if exists == 'OK': ret_msg += ('Total number of objects deleted: 1\n' 'Total number of objects failed: 0') retcode = 0 index = len(self._backup_list[path]) - 1 del self._backup_list[path][index] if not len(self._backup_list[path]): del self._backup_list[path] else: ret_msg += exists retcode = 12 return (ret_msg, '', retcode) def _cmd_to_dict(self, arg_list): """Convert command for kwargs (assumes a properly formed command).""" ret = {'cmd': arg_list[0], 'type': arg_list[1], 'path': arg_list[-1]} for i in range(2, len(arg_list) - 1): arg = arg_list[i].split('=') if len(arg) == 1: ret[arg[0]] = True else: ret[arg[0]] = arg[1] return ret def _exec_dsmc_cmd(self, cmd): """Simulates the execution of the dsmc command.""" cmd_switch = {'backup': self._cmd_backup, 'restore': self._cmd_restore, 'delete': self._cmd_delete} kwargs = self._cmd_to_dict(cmd) if kwargs['cmd'] != 'dsmc' or kwargs['type'] not in cmd_switch: raise putils.ProcessExecutionError(exit_code=1, stdout='', stderr='Not dsmc command', cmd=' '.join(cmd)) out, err, ret = cmd_switch[kwargs['type']](**kwargs) return (out, err, ret) def exec_cmd(self, cmd): """Simulates the execution of dsmc, rm, and ln commands.""" if cmd[0] == 'dsmc': out, err, ret = self._exec_dsmc_cmd(cmd) elif cmd[0] == 'ln': dest = cmd[2] out = '' if dest in self._hardlinks: err = ('ln: failed to create hard link `%s\': ' 'File exists' % dest) ret = 1 else: self._hardlinks.append(dest) err = '' ret = 0 elif cmd[0] == 'rm': dest = cmd[2] out = '' if dest not in self._hardlinks: err = ('rm: cannot remove `%s\': No such file or ' 'directory' % dest) ret = 1 else: index = self._hardlinks.index(dest) del self._hardlinks[index] err = '' ret = 0 else: raise putils.ProcessExecutionError(exit_code=1, stdout='', stderr='Unsupported command', cmd=' '.join(cmd)) return (out, err, ret) def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error def fake_exec(*cmd, **kwargs): # Support only bool check_exit_code = kwargs.pop('check_exit_code', True) global SIM out, err, ret = SIM.exec_cmd(cmd) if ret and check_exit_code: raise putils.ProcessExecutionError( exit_code=-1, stdout=out, stderr=err, cmd=' '.join(cmd)) return (out, err) def fake_stat_image(path): # Simulate stat to return the mode of a block device # make sure that st_mode (the first in the sequence( # matches the mode of a block device return posix.stat_result((25008, 5753, 5, 1, 0, 6, 0, 1375881199, 1375881197, 1375881197)) def fake_stat_file(path): # Simulate stat to return the mode of a block device # make sure that st_mode (the first in the sequence( # matches the mode of a block device return posix.stat_result((33188, 5753, 5, 1, 0, 6, 0, 1375881199, 1375881197, 1375881197)) def fake_stat_illegal(path): # Simulate stat to return the mode of a block device # make sure that st_mode (the first in the sequence( # matches the mode of a block device return posix.stat_result((17407, 5753, 5, 1, 0, 6, 0, 1375881199, 1375881197, 1375881197)) class BackupTSMTestCase(test.TestCase): def setUp(self): super(BackupTSMTestCase, self).setUp() global SIM SIM = TSMBackupSimulator() self.sim = SIM self.ctxt = context.get_admin_context() self.driver = tsm.TSMBackupDriver(self.ctxt) self.stubs.Set(utils, 'execute', fake_exec) self.stubs.Set(os, 'stat', fake_stat_image) def _create_volume_db_entry(self, volume_id): vol = {'id': volume_id, 'size': 1, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, backup_id, mode): if mode == 'file': backup_path = VOLUME_PATH else: backup_path = '/dev/backup-%s' % backup_id service_metadata = json.dumps({'backup_mode': mode, 'backup_path': backup_path}) backup = {'id': backup_id, 'size': 1, 'container': 'test-container', 'volume_id': fake.volume_id, 'service_metadata': service_metadata, 'user_id': fake.user_id, 'project_id': fake.project_id, } return db.backup_create(self.ctxt, backup)['id'] def test_backup_image(self): volume_id = fake.volume_id mode = 'image' self._create_volume_db_entry(volume_id) backup_id1 = fake.backup_id backup_id2 = fake.backup2_id backup_id3 = fake.backup3_id self._create_backup_db_entry(backup_id1, mode) self._create_backup_db_entry(backup_id2, mode) self._create_backup_db_entry(backup_id3, mode) with open(VOLUME_PATH, 'w+') as volume_file: # Create two backups of the volume backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1) self.driver.backup(backup1, volume_file) backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2) self.driver.backup(backup2, volume_file) # Create a backup that fails fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3) self.sim.error_injection('backup', 'fail') self.assertRaises(exception.InvalidBackup, self.driver.backup, fail_back, volume_file) # Try to restore one, then the other self.driver.restore(backup1, volume_id, volume_file) self.driver.restore(backup2, volume_id, volume_file) # Delete both backups self.driver.delete(backup2) self.driver.delete(backup1) def test_backup_file(self): volume_id = fake.volume_id mode = 'file' self.stubs.Set(os, 'stat', fake_stat_file) self._create_volume_db_entry(volume_id) self._create_backup_db_entry(fake.backup_id, mode) self._create_backup_db_entry(fake.backup2_id, mode) with open(VOLUME_PATH, 'w+') as volume_file: # Create two backups of the volume backup1 = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.driver.backup(backup1, volume_file) backup2 = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.driver.backup(backup2, volume_file) # Create a backup that fails self._create_backup_db_entry(fake.backup3_id, mode) fail_back = objects.Backup.get_by_id(self.ctxt, fake.backup3_id) self.sim.error_injection('backup', 'fail') self.assertRaises(exception.InvalidBackup, self.driver.backup, fail_back, volume_file) # Try to restore one, then the other self.driver.restore(backup1, volume_id, volume_file) self.driver.restore(backup2, volume_id, volume_file) # Delete both backups self.driver.delete(backup1) self.driver.delete(backup2) def test_backup_invalid_mode(self): volume_id = fake.volume_id mode = 'illegal' self.stubs.Set(os, 'stat', fake_stat_illegal) self._create_volume_db_entry(volume_id) self._create_backup_db_entry(fake.backup_id, mode) with open(VOLUME_PATH, 'w+') as volume_file: # Create two backups of the volume backup1 = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertRaises(exception.InvalidBackup, self.driver.backup, backup1, volume_file) self.assertRaises(exception.InvalidBackup, self.driver.restore, backup1, volume_id, volume_file) self.assertRaises(exception.InvalidBackup, self.driver.delete, backup1) cinder-8.0.0/cinder/tests/unit/test_prophetstor_dpl.py0000664000567000056710000007760012701406250024361 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import re import mock from oslo_utils import units from six.moves import http_client from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_snapshot from cinder.volume import configuration as conf from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4' VOLUMEUUID = 'a000000000000000000000000000001' INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' DATA_IN_VOLUME = {'id': VOLUMEUUID} DATA_IN_CONNECTOR = {'initiator': INITIATOR} DATA_SERVER_INFO = 0, { 'metadata': {'vendor': 'ProphetStor', 'version': '1.5'}} DATA_POOLS = 0, { 'children': [POOLUUID] } DATA_POOLINFO = 0, { 'capabilitiesURI': '', 'children': [], 'childrenrange': '', 'completionStatus': 'Complete', 'metadata': {'available_capacity': 4294967296, 'ctime': 1390551362349, 'vendor': 'prophetstor', 'version': '1.5', 'display_description': 'Default Pool', 'display_name': 'default_pool', 'event_uuid': '4f7c4d679a664857afa4d51f282a516a', 'physical_device': {'cache': [], 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], 'log': [], 'spare': []}, 'pool_uuid': POOLUUID, 'properties': {'raid_level': 'raid0'}, 'state': 'Online', 'used_capacity': 0, 'total_capacity': 4294967296, 'zpool_guid': '8173612007304181810'}, 'objectType': 'application/cdmi-container', 'percentComplete': 100} DATA_ASSIGNVDEV = 0, { 'children': [], 'childrenrange': '', 'completionStatus': 'Complete', 'domainURI': '', 'exports': {'Network/iSCSI': [ {'logical_unit_name': '', 'logical_unit_number': '101', 'permissions': [INITIATOR], 'portals': ['172.31.1.210:3260'], 'target_identifier': 'iqn.2013-09.com.prophetstor:hypervisor.886423051816' }]}, 'metadata': {'ctime': 0, 'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5', 'type': 'volume'}, 'objectID': '', 'objectName': 'd827e23d403f4f12bb208a6fec208fd8', 'objectType': 'application/cdmi-container', 'parentID': '8daa374670af447e8efea27e16bf84cd', 'parentURI': '/dpl_volume', 'snapshots': [] } DATA_OUTPUT = 0, None MOD_OUTPUT = {'status': 'available'} DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'name': 'group123', 'description': 'des123', 'status': ''} DATA_IN_VOLUME = {'id': 'abc123', 'display_name': 'abc123', 'display_description': '', 'size': 1, 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_VOLUME_VG = {'id': 'abc123', 'display_name': 'abc123', 'display_description': '', 'size': 1, 'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'status': 'available', 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_REMOVE_VOLUME_VG = { 'id': 'fe2dbc515810451dab2f8c8a48d15bee', 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', 'display_description': '', 'size': 1, 'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'status': 'available', 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_VOLUME1 = {'id': 'abc456', 'display_name': 'abc456', 'display_description': '', 'size': 1, 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_CG_SNAPSHOT = { 'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'id': 'cgsnapshot1', 'name': 'cgsnapshot1', 'description': 'cgsnapshot1', 'status': ''} DATA_IN_SNAPSHOT = {'id': 'snapshot1', 'volume_id': 'abc123', 'display_name': 'snapshot1', 'display_description': ''} DATA_OUT_SNAPSHOT_CG = { 'id': 'snapshot1', 'volume_id': 'abc123', 'display_name': 'snapshot1', 'display_description': '', 'cgsnapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'} DATA_OUT_CG = { "objectType": "application/cdmi-container", "objectID": "fe2dbc515810451dab2f8c8a48d15bee", "objectName": "", "parentURI": "/dpl_volgroup", "parentID": "fe2dbc515810451dab2f8c8a48d15bee", "domainURI": "", "capabilitiesURI": "", "completionStatus": "Complete", "percentComplete": 100, "metadata": { "type": "volume|snapshot|replica", "volume_group_uuid": "", "origin_uuid": "", "snapshot_uuid": "", "display_name": "", "display_description": "", "ctime": 12345678, "total_capacity": 1024, "snapshot_used_capacity": 0, "maximum_snapshot": 1024, "snapshot_quota": 0, "state": "", "properties": { "snapshot_rotation": True, } }, "childrenrange": "", "children": [ "fe2dbc515810451dab2f8c8a48d15bee", ], } class TestProphetStorDPLVolume(test.TestCase): def _gen_snapshot_url(self, vdevid, snapshotid): snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT, snapshotid) return snapshot_url def setUp(self): super(TestProphetStorDPLVolume, self).setUp() self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password') self.DPL_MOCK = mock.MagicMock() self.dplcmd.objCmd = self.DPL_MOCK self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT def test_getserverinfo(self): self.dplcmd.get_server_info() self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM), None, [http_client.OK, http_client.ACCEPTED]) def test_createvdev(self): self.dplcmd.create_vdev(DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], POOLUUID, int(DATA_IN_VOLUME['size']) * units.Gi) metadata = {} metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['pool_uuid'] = POOLUUID metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params = {} params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def test_extendvdev(self): self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], int(DATA_IN_VOLUME['size']) * units.Gi) metadata = {} metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 params = {} params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def test_deletevdev(self): self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True) metadata = {} params = {} metadata['force'] = True params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'DELETE', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND, http_client.NO_CONTENT]) def test_createvdevfromsnapshot(self): self.dplcmd.create_vdev_from_snapshot( DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], DATA_IN_SNAPSHOT['id'], POOLUUID) metadata = {} params = {} metadata['snapshot_operation'] = 'copy' metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['pool_uuid'] = POOLUUID metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def test_getpool(self): self.dplcmd.get_pool(POOLUUID) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, POOLUUID), None, [http_client.OK, http_client.ACCEPTED]) def test_clonevdev(self): self.dplcmd.clone_vdev( DATA_IN_VOLUME['id'], DATA_IN_VOLUME1['id'], POOLUUID, DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], int(DATA_IN_VOLUME['size']) * units.Gi ) metadata = {} params = {} metadata["snapshot_operation"] = "clone" metadata["display_name"] = DATA_IN_VOLUME['display_name'] metadata["display_description"] = DATA_IN_VOLUME['display_description'] metadata["pool_uuid"] = POOLUUID metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params["metadata"] = metadata params["copy"] = DATA_IN_VOLUME['id'] self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME1['id']), params, [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) def test_createvdevsnapshot(self): self.dplcmd.create_vdev_snapshot( DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id'], DATA_IN_SNAPSHOT['display_name'], DATA_IN_SNAPSHOT['display_description'] ) metadata = {} params = {} metadata['display_name'] = DATA_IN_SNAPSHOT['display_name'] metadata['display_description'] = ( DATA_IN_SNAPSHOT['display_description']) params['metadata'] = metadata params['snapshot'] = DATA_IN_SNAPSHOT['id'] self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) def test_getvdev(self): self.dplcmd.get_vdev(DATA_IN_VOLUME['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), None, [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND]) def test_getvdevstatus(self): self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456') self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], '123456'), None, [http_client.OK, http_client.NOT_FOUND]) def test_getpoolstatus(self): self.dplcmd.get_pool_status(POOLUUID, '123456') self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, POOLUUID, '123456'), None, [http_client.OK, http_client.NOT_FOUND]) def test_assignvdev(self): self.dplcmd.assign_vdev( DATA_IN_VOLUME['id'], 'iqn.1993-08.org.debian:01:test1', '', '1.1.1.1:3260', 0 ) params = {} metadata = {} exports = {} metadata['export_operation'] = 'assign' exports['Network/iSCSI'] = {} target_info = {} target_info['logical_unit_number'] = 0 target_info['logical_unit_name'] = '' permissions = [] portals = [] portals.append('1.1.1.1:3260') permissions.append('iqn.1993-08.org.debian:01:test1') target_info['permissions'] = permissions target_info['portals'] = portals exports['Network/iSCSI'] = target_info params['metadata'] = metadata params['exports'] = exports self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def test_unassignvdev(self): self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'], 'iqn.1993-08.org.debian:01:test1', '') params = {} metadata = {} exports = {} metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/iSCSI'] = {} exports['Network/iSCSI']['target_identifier'] = '' permissions = [] permissions.append('iqn.1993-08.org.debian:01:test1') exports['Network/iSCSI']['permissions'] = permissions params['exports'] = exports self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, http_client.NOT_FOUND]) def test_deletevdevsnapshot(self): self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) params = {} params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'DELETE', '/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], DPLCOMMON.DPL_OBJ_SNAPSHOT, DATA_IN_SNAPSHOT['id']), None, [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, http_client.NOT_FOUND]) def test_listvdevsnapshots(self): self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], DPLCOMMON.DPL_OBJ_SNAPSHOT), None, [http_client.OK]) class TestProphetStorDPLDriver(test.TestCase): def __init__(self, method): super(TestProphetStorDPLDriver, self).__init__(method) def _conver_uuid2hex(self, strID): return strID.replace('-', '') def setUp(self): super(TestProphetStorDPLDriver, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_ip = '1.1.1.1' self.configuration.dpl_port = 8356 self.configuration.san_login = 'admin' self.configuration.san_password = 'password' self.configuration.dpl_pool = POOLUUID self.configuration.iscsi_port = 3260 self.configuration.san_is_local = False self.configuration.san_thin_provision = True self.context = '' self.DPL_MOCK = mock.MagicMock() self.DB_MOCK = mock.MagicMock() self.dpldriver = DPLDRIVER.DPLISCSIDriver( configuration=self.configuration) self.dpldriver.dpl = self.DPL_MOCK self.dpldriver.db = self.DB_MOCK self.dpldriver.do_setup(self.context) def test_get_volume_stats(self): self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO res = self.dpldriver.get_volume_stats(True) self.assertEqual('ProphetStor', res['vendor_name']) self.assertEqual('1.5', res['driver_version']) pool = res["pools"][0] self.assertEqual(4, pool['total_capacity_gb']) self.assertEqual(4, pool['free_capacity_gb']) self.assertEqual(0, pool['reserved_percentage']) self.assertFalse(pool['QoS_support']) def test_create_volume(self): self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.dpldriver.create_volume(DATA_IN_VOLUME) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], self.configuration.dpl_pool, int(DATA_IN_VOLUME['size']) * units.Gi, True) def test_create_volume_without_pool(self): fake_volume = copy.deepcopy(DATA_IN_VOLUME) self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.configuration.dpl_pool = "" fake_volume['host'] = "host@backend" # missing pool self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume, volume=fake_volume) def test_create_volume_with_configuration_pool(self): fake_volume = copy.deepcopy(DATA_IN_VOLUME) fake_volume['host'] = "host@backend" # missing pool self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.dpldriver.create_volume(fake_volume) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], self.configuration.dpl_pool, int(DATA_IN_VOLUME['size']) * units.Gi, True) def test_create_volume_of_group(self): self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT self.dpldriver.create_volume(DATA_IN_VOLUME_VG) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], self.configuration.dpl_pool, int(DATA_IN_VOLUME['size']) * units.Gi, True) self.DPL_MOCK.join_vg.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME_VG['id']), self._conver_uuid2hex( DATA_IN_VOLUME_VG['consistencygroup_id'])) def test_delete_volume(self): self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.dpldriver.delete_volume(DATA_IN_VOLUME) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id'])) def test_delete_volume_of_group(self): self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT self.dpldriver.delete_volume(DATA_IN_VOLUME_VG) self.DPL_MOCK.leave_vg.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME_VG['id']), self._conver_uuid2hex(DATA_IN_GROUP['id']) ) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id'])) def test_create_volume_from_snapshot(self): self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT self.dpldriver.create_volume_from_snapshot(DATA_IN_VOLUME, DATA_IN_SNAPSHOT) self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), self.configuration.dpl_pool, True) def test_create_cloned_volume(self): self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT self.dpldriver.create_cloned_volume(DATA_IN_VOLUME1, DATA_IN_VOLUME) self.DPL_MOCK.clone_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), self._conver_uuid2hex(DATA_IN_VOLUME1['id']), self.configuration.dpl_pool, DATA_IN_VOLUME1['display_name'], DATA_IN_VOLUME1['display_description'], int(DATA_IN_VOLUME1['size']) * units.Gi, True) def test_create_snapshot(self): self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT) self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), DATA_IN_SNAPSHOT['display_name'], DATA_IN_SNAPSHOT['display_description']) def test_delete_snapshot(self): self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT) self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), self._conver_uuid2hex(DATA_IN_SNAPSHOT['id'])) def test_initialize_connection(self): self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV res = self.dpldriver.initialize_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.assertEqual('iscsi', res['driver_volume_type']) self.assertEqual(101, res['data']['target_lun']) self.assertTrue(res['data']['target_discovered']) self.assertEqual('172.31.1.210:3260', res['data']['target_portal']) self.assertEqual( 'iqn.2013-09.com.prophetstor:hypervisor.886423051816', res['data']['target_iqn']) def test_terminate_connection(self): self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.DPL_MOCK.unassign_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_CONNECTOR['initiator']) def test_terminate_connection_volume_detached(self): self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.DPL_MOCK.unassign_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_CONNECTOR['initiator']) def test_terminate_connection_failed(self): self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None ex = self.assertRaises( exception.VolumeBackendAPIException, self.dpldriver.terminate_connection, volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR) self.assertTrue( re.match(r".*Flexvisor failed", ex.msg)) def test_get_pool_info(self): self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO _, res = self.dpldriver._get_pool_info(POOLUUID) self.assertEqual(4294967296, res['metadata']['available_capacity']) self.assertEqual(1390551362349, res['metadata']['ctime']) self.assertEqual('Default Pool', res['metadata']['display_description']) self.assertEqual('default_pool', res['metadata']['display_name']) self.assertEqual('4f7c4d679a664857afa4d51f282a516a', res['metadata']['event_uuid']) self.assertEqual( {'cache': [], 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], 'log': [], 'spare': []}, res['metadata']['physical_device']) self.assertEqual(POOLUUID, res['metadata']['pool_uuid']) self.assertEqual( {'raid_level': 'raid0'}, res['metadata']['properties']) self.assertEqual('Online', res['metadata']['state']) self.assertEqual(4294967296, res['metadata']['total_capacity']) self.assertEqual('8173612007304181810', res['metadata']['zpool_guid']) def test_create_consistency_group(self): self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT model_update = self.dpldriver.create_consistencygroup(self.context, DATA_IN_GROUP) self.DPL_MOCK.create_vg.assert_called_once_with( self._conver_uuid2hex(DATA_IN_GROUP['id']), DATA_IN_GROUP['name'], DATA_IN_GROUP['description']) self.assertDictMatch({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) def test_delete_consistency_group(self): self.DB_MOCK.volume_get_all_by_group.return_value = ( [DATA_IN_VOLUME_VG]) self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT model_update, volumes = self.dpldriver.delete_consistencygroup( self.context, DATA_IN_GROUP, []) self.DPL_MOCK.delete_vg.assert_called_once_with( self._conver_uuid2hex(DATA_IN_GROUP['id'])) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex((DATA_IN_VOLUME_VG['id']))) self.assertDictMatch({'status': ( fields.ConsistencyGroupStatus.DELETED)}, model_update) def test_update_consistencygroup(self): self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT add_vol = DATA_IN_VOLUME_VG remove_vol = DATA_IN_REMOVE_VOLUME_VG (model_update, add_vols, remove_vols) = ( self.dpldriver.update_consistencygroup(self.context, DATA_IN_GROUP, [add_vol], [remove_vol])) self.DPL_MOCK.join_vg.assert_called_once_with( self._conver_uuid2hex(add_vol['id']), self._conver_uuid2hex(DATA_IN_GROUP['id'])) self.DPL_MOCK.leave_vg.assert_called_once_with( self._conver_uuid2hex(remove_vol['id']), self._conver_uuid2hex(DATA_IN_GROUP['id'])) self.assertDictMatch({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) def test_update_consistencygroup_exception_join(self): self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.join_vg.return_value = -1, None self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT add_vol = DATA_IN_VOLUME_VG self.assertRaises(exception.VolumeBackendAPIException, self.dpldriver.update_consistencygroup, context=None, group=DATA_IN_GROUP, add_volumes=[add_vol], remove_volumes=None) def test_update_consistencygroup_exception_leave(self): self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.leave_vg.return_value = -1, None remove_vol = DATA_IN_REMOVE_VOLUME_VG self.assertRaises(exception.VolumeBackendAPIException, self.dpldriver.update_consistencygroup, context=None, group=DATA_IN_GROUP, add_volumes=None, remove_volumes=[remove_vol]) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_create_consistency_group_snapshot(self, get_all_for_cgsnapshot): snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) snapshot_obj.consistencygroup_id = \ DATA_IN_CG_SNAPSHOT['consistencygroup_id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT model_update, snapshots = self.dpldriver.create_cgsnapshot( self.context, snapshot_obj, []) self.assertDictMatch({'status': 'available'}, model_update) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_delete_consistency_group_snapshot(self, get_all_for_cgsnapshot): snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) snapshot_obj.consistencygroup_id = \ DATA_IN_CG_SNAPSHOT['consistencygroup_id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.DPL_MOCK.delete_cgsnapshot.return_value = DATA_OUTPUT model_update, snapshots = self.dpldriver.delete_cgsnapshot( self.context, DATA_IN_CG_SNAPSHOT, []) self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_CG_SNAPSHOT['consistencygroup_id']), self._conver_uuid2hex(DATA_IN_CG_SNAPSHOT['id']), True) self.assertDictMatch({'status': 'deleted'}, model_update) cinder-8.0.0/cinder/tests/unit/test_xio.py0000664000567000056710000016336712701406250021736 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 X-IO Technologies. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_log import log as logging from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import utils from cinder.volume.drivers import xio from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger("cinder.volume.driver") ISE_IP1 = '10.12.12.1' ISE_IP2 = '10.11.12.2' ISE_ISCSI_IP1 = '1.2.3.4' ISE_ISCSI_IP2 = '1.2.3.5' ISE_GID = 'isegid' ISE_IQN = ISE_GID ISE_WWN1 = ISE_GID + '1' ISE_WWN2 = ISE_GID + '2' ISE_WWN3 = ISE_GID + '3' ISE_WWN4 = ISE_GID + '4' ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4] ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS, 'init_wwn2': ISE_TARGETS} VOLUME_SIZE = 10 NEW_VOLUME_SIZE = 20 VOLUME1 = {'id': '1', 'name': 'volume1', 'size': VOLUME_SIZE, 'volume_type_id': 'type1'} VOLUME2 = {'id': '2', 'name': 'volume2', 'size': VOLUME_SIZE, 'volume_type_id': 'type2', 'provider_auth': 'CHAP abc abc'} VOLUME3 = {'id': '3', 'name': 'volume3', 'size': VOLUME_SIZE, 'volume_type_id': None} SNAPSHOT1 = {'name': 'snapshot1', 'volume_name': VOLUME1['name'], 'volume_type_id': 'type3'} CLONE1 = {'id': '3', 'name': 'clone1', 'size': VOLUME_SIZE, 'volume_type_id': 'type4'} HOST1 = 'host1' HOST2 = 'host2' ISCSI_CONN1 = {'initiator': 'init_iqn1', 'host': HOST1} ISCSI_CONN2 = {'initiator': 'init_iqn2', 'host': HOST2} FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'], 'host': HOST1} FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'], 'host': HOST2} ISE_HTTP_IP = 'http://' + ISE_IP1 ISE_HOST_LOCATION = '/storage/hosts/1' ISE_HOST_LOCATION_URL = ISE_HTTP_IP + ISE_HOST_LOCATION ISE_VOLUME1_LOCATION = '/storage/volumes/volume1' ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION ISE_VOLUME2_LOCATION = '/storage/volumes/volume2' ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION ISE_VOLUME3_LOCATION = '/storage/volumes/volume3' ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1' ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION ISE_CLONE_LOCATION = '/storage/volumes/clone1' ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION ISE_ALLOCATION_LOCATION = '/storage/allocations/a1' ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION ISE_GET_QUERY_XML =\ """ ABC12345 %s %s """ % (ISE_IP1, ISE_IP2) ISE_GET_QUERY_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_XML.split())} ISE_GET_QUERY_NO_CAP_XML =\ """ ABC12345 %s %s """ % (ISE_IP1, ISE_IP2) ISE_GET_QUERY_NO_CAP_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_NO_CAP_XML.split())} ISE_GET_QUERY_NO_CTRL_XML =\ """ ABC12345 """ ISE_GET_QUERY_NO_CTRL_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_NO_CTRL_XML.split())} ISE_GET_QUERY_NO_IP_XML =\ """ ABC12345 """ ISE_GET_QUERY_NO_IP_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_NO_IP_XML.split())} ISE_GET_QUERY_NO_GID_XML =\ """ %s %s """ % (ISE_IP1, ISE_IP2) ISE_GET_QUERY_NO_GID_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())} ISE_GET_QUERY_NO_CLONE_XML =\ """ ABC12345 %s %s """ % (ISE_IP1, ISE_IP2) ISE_GET_QUERY_NO_CLONE_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())} ISE_GET_STORAGE_POOLS_XML =\ """ Pool 1 1
None
60 30 45 0 40 0 100 volgid volgid2
""" ISE_GET_STORAGE_POOLS_RESP =\ {'status': 200, 'location': 'Pool location', 'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())} ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\ """""" ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\ {'status': 200, 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, 'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())} ISE_GET_VOL_STATUS_NO_STATUS_XML =\ """ """ % (ISE_VOLUME1_LOCATION_URL) ISE_GET_VOL_STATUS_NO_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, 'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())} ISE_GET_VOL1_STATUS_XML =\ """
Prepared
10
""" % (ISE_VOLUME1_LOCATION_URL) ISE_GET_VOL1_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, 'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())} ISE_GET_VOL2_STATUS_XML =\ """
Prepared
""" % (ISE_VOLUME2_LOCATION_URL) ISE_GET_VOL2_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_VOLUME2_LOCATION_URL, 'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())} ISE_GET_VOL3_STATUS_XML =\ """
Prepared
""" % (ISE_VOLUME3_LOCATION_URL) ISE_GET_VOL3_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_VOLUME3_LOCATION_URL, 'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())} ISE_GET_SNAP1_STATUS_XML =\ """
Prepared
""" % (ISE_SNAPSHOT_LOCATION_URL) ISE_GET_SNAP1_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL, 'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())} ISE_GET_CLONE1_STATUS_XML =\ """
Prepared
""" % (ISE_CLONE_LOCATION_URL) ISE_GET_CLONE1_STATUS_RESP =\ {'status': 200, 'location': 'u%s' % ISE_CLONE_LOCATION_URL, 'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())} ISE_CREATE_VOLUME_XML = """""" ISE_CREATE_VOLUME_RESP =\ {'status': 201, 'location': ISE_VOLUME1_LOCATION_URL, 'content': " ".join(ISE_CREATE_VOLUME_XML.split())} ISE_GET_IONETWORKS_XML =\ """ """ ISE_GET_IONETWORKS_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_IONETWORKS_XML.split())} ISE_GET_IONETWORKS_CHAP_XML =\ """ abc abc """ ISE_GET_IONETWORKS_CHAP_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())} ISE_DELETE_VOLUME_XML = """""" ISE_DELETE_VOLUME_RESP =\ {'status': 204, 'location': '', 'content': " ".join(ISE_DELETE_VOLUME_XML.split())} ISE_GET_ALLOC_WITH_EP_XML =\ """ %s %s 1 """ %\ (ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1) ISE_GET_ALLOC_WITH_EP_RESP =\ {'status': 200, 'location': ISE_ALLOCATION_LOCATION_URL, 'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())} ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\ """""" % ISE_ALLOCATION_LOCATION_URL ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\ {'status': 200, 'location': ISE_ALLOCATION_LOCATION_URL, 'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())} ISE_DELETE_ALLOC_XML = """""" ISE_DELETE_ALLOC_RESP =\ {'status': 204, 'location': '', 'content': " ".join(ISE_DELETE_ALLOC_XML.split())} ISE_GET_HOSTS_NOHOST_XML =\ """""" ISE_GET_HOSTS_NOHOST_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())} ISE_GET_HOSTS_HOST1_XML =\ """ "OPENSTACK" %s 1 init_wwn1 init_wwn2 init_iqn1 """ % HOST1 ISE_GET_HOSTS_HOST1_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())} ISE_GET_HOSTS_HOST1_HOST_TYPE_XML =\ """ "WINDOWS" %s 1 init_wwn1 init_wwn2 init_iqn1 """ % HOST1 ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_HOSTS_HOST1_HOST_TYPE_XML.split())} ISE_GET_HOSTS_HOST2_XML =\ """ %s 2 init_wwn3 init_wwn4 init_iqn2 """ % HOST2 ISE_GET_HOSTS_HOST2_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())} ISE_CREATE_HOST_XML =\ """""" ISE_CREATE_HOST_RESP =\ {'status': 201, 'location': 'http://ip/storage/hosts/host1', 'content': " ".join(ISE_CREATE_HOST_XML.split())} ISE_CREATE_ALLOC_XML =\ """""" ISE_CREATE_ALLOC_RESP =\ {'status': 201, 'location': ISE_ALLOCATION_LOCATION_URL, 'content': " ".join(ISE_CREATE_ALLOC_XML.split())} ISE_GET_ENDPOINTS_XML =\ """ isegid iSCSI ise1 a1 isegid Fibre Channel ise1 a1 """ % (ISE_ALLOCATION_LOCATION_URL, ISE_ALLOCATION_LOCATION_URL) ISE_GET_ENDPOINTS_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_ENDPOINTS_XML.split())} ISE_GET_CONTROLLERS_XML =\ """ %s isegid %s %s %s isegid %s %s """ % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2, ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4) ISE_GET_CONTROLLERS_RESP =\ {'status': 200, 'location': '', 'content': " ".join(ISE_GET_CONTROLLERS_XML.split())} ISE_CREATE_SNAPSHOT_XML = """""" ISE_CREATE_SNAPSHOT_RESP =\ {'status': 201, 'location': ISE_SNAPSHOT_LOCATION_URL, 'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())} ISE_PREP_SNAPSHOT_XML = """""" ISE_PREP_SNAPSHOT_RESP =\ {'status': 202, 'location': ISE_SNAPSHOT_LOCATION_URL, 'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())} ISE_MODIFY_VOLUME_XML = """""" ISE_MODIFY_VOLUME_RESP =\ {'status': 201, 'location': ISE_VOLUME1_LOCATION_URL, 'content': " ".join(ISE_MODIFY_VOLUME_XML.split())} ISE_MODIFY_HOST_XML = """""" ISE_MODIFY_HOST_RESP =\ {'status': 201, 'location': ISE_HOST_LOCATION_URL, 'content': " ".join(ISE_MODIFY_HOST_XML.split())} ISE_BAD_CONNECTION_RESP =\ {'status': 0, 'location': '', 'content': " "} ISE_400_RESP =\ {'status': 400, 'location': '', 'content': ""} ISE_GET_VOL_STATUS_404_XML = \ """VOLUME not found.""" ISE_GET_VOL_STATUS_404_RESP =\ {'status': 404, 'location': '', 'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())} ISE_400_INVALID_STATE_XML = \ """Not in a valid state.""" ISE_400_INVALID_STATE_RESP =\ {'status': 400, 'location': '', 'content': " ".join(ISE_400_INVALID_STATE_XML.split())} ISE_409_CONFLICT_XML = \ """Conflict""" ISE_409_CONFLICT_RESP =\ {'status': 409, 'location': '', 'content': " ".join(ISE_409_CONFLICT_XML.split())} DRIVER = "cinder.volume.drivers.xio.XIOISEDriver" @mock.patch(DRIVER + "._opener", autospec=True) class XIOISEDriverTestCase(object): # Test cases for X-IO volume driver def setUp(self): super(XIOISEDriverTestCase, self).setUp() # set good default values self.configuration = mock.Mock() self.configuration.san_ip = ISE_IP1 self.configuration.san_user = 'fakeuser' self.configuration.san_password = 'fakepass' self.configuration.iscsi_ip_address = ISE_ISCSI_IP1 self.configuration.driver_use_ssl = False self.configuration.ise_completion_retries = 30 self.configuration.ise_connection_retries = 5 self.configuration.ise_retry_interval = 1 self.configuration.volume_backend_name = 'ise1' self.driver = None self.protocol = '' self.connector = None self.connection_failures = 0 self.hostgid = '' self.use_response_table = 1 def setup_test(self, protocol): self.protocol = protocol # set good default values if self.protocol == 'iscsi': self.configuration.ise_protocol = protocol self.connector = ISCSI_CONN1 self.hostgid = self.connector['initiator'] elif self.protocol == 'fibre_channel': self.configuration.ise_protocol = protocol self.connector = FC_CONN1 self.hostgid = self.connector['wwpns'][0] def setup_driver(self): # this setups up driver object with previously set configuration values if self.configuration.ise_protocol == 'iscsi': self.driver =\ xio.XIOISEISCSIDriver(configuration=self.configuration) elif self.configuration.ise_protocol == 'fibre_channel': self.driver =\ xio.XIOISEFCDriver(configuration=self.configuration) elif self.configuration.ise_protocol == 'test_prot': # if test_prot specified override with correct protocol # used to bypass protocol specific driver self.configuration.ise_protocol = self.protocol self.driver = xio.XIOISEDriver(configuration=self.configuration) else: # Invalid protocol type raise exception.Invalid() ################################# # UNIT TESTS # ################################# def test_do_setup(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) self.driver.do_setup(None) def test_negative_do_setup_no_clone_support(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP]) self.assertRaises(exception.XIODriverException, self.driver.do_setup, None) def test_negative_do_setup_no_capabilities(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_NO_CAP_RESP]) self.assertRaises(exception.XIODriverException, self.driver.do_setup, None) def test_negative_do_setup_no_ctrl(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_NO_CTRL_RESP]) self.assertRaises(exception.XIODriverException, self.driver.do_setup, None) def test_negative_do_setup_no_ipaddress(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_NO_IP_RESP]) self.driver.do_setup(None) def test_negative_do_setup_bad_globalid_none(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP]) self.assertRaises(exception.XIODriverException, self.driver.do_setup, None) def test_check_for_setup_error(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) self.setup_driver() self.driver.check_for_setup_error() def test_negative_do_setup_bad_ip(self, mock_req): # set san_ip to bad value self.configuration.san_ip = '' mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.check_for_setup_error) def test_negative_do_setup_bad_user_blank(self, mock_req): # set san_user to bad value self.configuration.san_login = '' mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.check_for_setup_error) def test_negative_do_setup_bad_password_blank(self, mock_req): # set san_password to bad value self.configuration.san_password = '' mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.check_for_setup_error) def test_get_volume_stats(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_STORAGE_POOLS_RESP]) backend_name = self.configuration.volume_backend_name if self.configuration.ise_protocol == 'iscsi': protocol = 'iSCSI' else: protocol = 'fibre_channel' exp_result = {'vendor_name': "X-IO", 'driver_version': "1.1.4", 'volume_backend_name': backend_name, 'reserved_percentage': 0, 'total_capacity_gb': 100, 'free_capacity_gb': 60, 'QoS_support': True, 'affinity': True, 'thin': False, 'pools': [{'pool_ise_name': "Pool 1", 'pool_name': "1", 'status': "Operational", 'status_details': "None", 'free_capacity_gb': 60, 'free_capacity_gb_raid_0': 60, 'free_capacity_gb_raid_1': 30, 'free_capacity_gb_raid_5': 45, 'allocated_capacity_gb': 40, 'allocated_capacity_gb_raid_0': 0, 'allocated_capacity_gb_raid_1': 40, 'allocated_capacity_gb_raid_5': 0, 'health': 100, 'media': "Hybrid", 'total_capacity_gb': 100, 'QoS_support': True, 'reserved_percentage': 0}], 'active_volumes': 2, 'storage_protocol': protocol} act_result = self.driver.get_volume_stats(True) self.assertDictMatch(exp_result, act_result) def test_get_volume_stats_ssl(self, mock_req): self.configuration.driver_use_ssl = True self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_STORAGE_POOLS_RESP]) self.driver.get_volume_stats(True) def test_negative_get_volume_stats_bad_primary(self, mock_req): self.configuration.ise_connection_retries = 1 self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_BAD_CONNECTION_RESP, ISE_GET_STORAGE_POOLS_RESP]) self.driver.get_volume_stats(True) def test_create_volume(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_IONETWORKS_RESP]) exp_result = {} exp_result = {"provider_auth": ""} act_result = self.driver.create_volume(VOLUME1) self.assertDictMatch(exp_result, act_result) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP]) self.driver.create_volume(VOLUME1) def test_create_volume_chap(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_IONETWORKS_CHAP_RESP]) exp_result = {} exp_result = {"provider_auth": "CHAP abc abc"} act_result = self.driver.create_volume(VOLUME1) self.assertDictMatch(exp_result, act_result) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP]) self.driver.create_volume(VOLUME1) def test_create_volume_type_none(self, mock_req): self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_IONETWORKS_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_CREATE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP]) self.driver.create_volume(VOLUME3) def test_delete_volume(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_DELETE_VOLUME_RESP, ISE_GET_VOL_STATUS_404_RESP]) self.setup_driver() self.driver.delete_volume(VOLUME1) def test_delete_volume_delayed(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_DELETE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_VOL_STATUS_404_RESP]) self.setup_driver() self.driver.delete_volume(VOLUME1) def test_delete_volume_timeout(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_DELETE_VOLUME_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_GET_VOL1_STATUS_RESP]) self.configuration.ise_completion_retries = 3 self.setup_driver() self.driver.delete_volume(VOLUME1) def test_delete_volume_none_existing(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_VOL1_STATUS_RESP]) self.setup_driver() self.driver.delete_volume(VOLUME2) def test_initialize_connection_positive(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST2_RESP, ISE_CREATE_HOST_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_CREATE_ALLOC_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_GET_CONTROLLERS_RESP]) self.setup_driver() exp_result = {} if self.configuration.ise_protocol == 'iscsi': exp_result = {"driver_volume_type": "iscsi", "data": {"target_lun": 1, "volume_id": '1', "target_discovered": False, "target_iqn": ISE_IQN, "target_portal": ISE_ISCSI_IP1 + ":3260"}} elif self.configuration.ise_protocol == 'fibre_channel': exp_result = {"driver_volume_type": "fibre_channel", "data": {"target_lun": 1, "volume_id": '1', "target_discovered": True, "initiator_target_map": ISE_INIT_TARGET_MAP, "target_wwn": ISE_TARGETS}} act_result =\ self.driver.initialize_connection(VOLUME1, self.connector) self.assertDictMatch(exp_result, act_result) def test_initialize_connection_positive_host_type(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, ISE_MODIFY_HOST_RESP, ISE_CREATE_ALLOC_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_GET_CONTROLLERS_RESP]) self.setup_driver() exp_result = {} if self.configuration.ise_protocol == 'iscsi': exp_result = {"driver_volume_type": "iscsi", "data": {"target_lun": 1, "volume_id": '1', "target_discovered": False, "target_iqn": ISE_IQN, "target_portal": ISE_ISCSI_IP1 + ":3260"}} elif self.configuration.ise_protocol == 'fibre_channel': exp_result = {"driver_volume_type": "fibre_channel", "data": {"target_lun": 1, "volume_id": '1', "target_discovered": True, "initiator_target_map": ISE_INIT_TARGET_MAP, "target_wwn": ISE_TARGETS}} act_result =\ self.driver.initialize_connection(VOLUME1, self.connector) self.assertDictMatch(exp_result, act_result) def test_initialize_connection_positive_chap(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST2_RESP, ISE_CREATE_HOST_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_CREATE_ALLOC_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_GET_CONTROLLERS_RESP]) self.setup_driver() exp_result = {} if self.configuration.ise_protocol == 'iscsi': exp_result = {"driver_volume_type": "iscsi", "data": {"target_lun": 1, "volume_id": '2', "target_discovered": False, "target_iqn": ISE_IQN, "target_portal": ISE_ISCSI_IP1 + ":3260", 'auth_method': 'CHAP', 'auth_username': 'abc', 'auth_password': 'abc'}} elif self.configuration.ise_protocol == 'fibre_channel': exp_result = {"driver_volume_type": "fibre_channel", "data": {"target_lun": 1, "volume_id": '2', "target_discovered": True, "initiator_target_map": ISE_INIT_TARGET_MAP, "target_wwn": ISE_TARGETS}} act_result =\ self.driver.initialize_connection(VOLUME2, self.connector) self.assertDictMatch(exp_result, act_result) def test_initialize_connection_negative_no_host(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST2_RESP, ISE_CREATE_HOST_RESP, ISE_GET_HOSTS_HOST2_RESP]) self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.initialize_connection, VOLUME2, self.connector) def test_initialize_connection_negative_host_type(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, ISE_400_RESP]) self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.initialize_connection, VOLUME2, self.connector) def test_terminate_connection_positive(self, mock_req): self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_DELETE_ALLOC_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_GET_CONTROLLERS_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_DELETE_ALLOC_RESP]) self.driver.terminate_connection(VOLUME1, self.connector) def test_terminate_connection_positive_noalloc(self, mock_req): self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_DELETE_ALLOC_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, ISE_GET_CONTROLLERS_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_DELETE_ALLOC_RESP]) self.driver.terminate_connection(VOLUME1, self.connector) def test_negative_terminate_connection_bad_host(self, mock_req): self.setup_driver() test_connector = {} if self.configuration.ise_protocol == 'iscsi': test_connector['initiator'] = 'bad_iqn' test_connector['host'] = '' mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': test_connector['wwpns'] = 'bad_wwn' test_connector['host'] = '' mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_HOSTS_HOST1_RESP, ISE_GET_CONTROLLERS_RESP]) self.driver.terminate_connection(VOLUME1, test_connector) def test_create_snapshot(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_PREP_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_CREATE_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP]) self.setup_driver() self.driver.create_snapshot(SNAPSHOT1) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_negative_create_snapshot_invalid_state_recover(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_400_INVALID_STATE_RESP, ISE_PREP_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_CREATE_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP]) self.setup_driver() self.driver.create_snapshot(SNAPSHOT1) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_negative_create_snapshot_invalid_state_norecover(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_400_INVALID_STATE_RESP, ISE_400_INVALID_STATE_RESP, ISE_400_INVALID_STATE_RESP, ISE_400_INVALID_STATE_RESP, ISE_400_INVALID_STATE_RESP]) self.configuration.ise_completion_retries = 5 self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.create_snapshot, SNAPSHOT1) def test_negative_create_snapshot_conflict(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_409_CONFLICT_RESP]) self.configuration.ise_completion_retries = 1 self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.create_snapshot, SNAPSHOT1) def test_delete_snapshot(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_ALLOC_WITH_EP_RESP, ISE_DELETE_ALLOC_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_DELETE_VOLUME_RESP]) self.setup_driver() self.driver.delete_snapshot(SNAPSHOT1) def test_clone_volume(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_PREP_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_CREATE_SNAPSHOT_RESP, ISE_GET_SNAP1_STATUS_RESP]) self.setup_driver() self.driver.create_cloned_volume(CLONE1, VOLUME1) def test_extend_volume(self, mock_req): mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.setup_driver() self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE) def test_retype_volume(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] # New volume type extra_specs = {"Feature:Pool": "1", "Feature:Raid": "5", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT2', extra_specs) specs = {'qos:minIOPS': '30', 'qos:maxIOPS': '3000', 'qos:burstIOPS': '10000'} qos = qos_specs.create(ctxt, 'fake-qos2', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.setup_driver() self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0) def test_create_volume_from_snapshot(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_PREP_SNAPSHOT_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_CREATE_SNAPSHOT_RESP, ISE_GET_VOL1_STATUS_RESP]) self.setup_driver() self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1) def test_manage_existing(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP, ISE_GET_IONETWORKS_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'}) def test_manage_existing_no_source_name(self, mock_req): self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP, ISE_GET_IONETWORKS_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.assertRaises(exception.XIODriverException, self.driver.manage_existing, VOLUME1, {}) def test_manage_existing_get_size(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP]) exp_result = 10 act_result = \ self.driver.manage_existing_get_size(VOLUME1, {'source-name': 'a'}) self.assertEqual(exp_result, act_result) def test_manage_existing_get_size_no_source_name(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP]) self.assertRaises(exception.XIODriverException, self.driver.manage_existing_get_size, VOLUME1, {}) def test_unmanage(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP]) self.driver.unmanage(VOLUME1) def test_negative_unmanage_no_volume_status_xml(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL_STATUS_NO_STATUS_RESP]) self.driver.unmanage(VOLUME1) def test_negative_unmanage_no_volume_xml(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP]) self.assertRaises(exception.XIODriverException, self.driver.unmanage, VOLUME1) def test_negative_unmanage_non_existing_volume(self, mock_req): self.setup_driver() mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL_STATUS_404_RESP]) self.assertRaises(exception.XIODriverException, self.driver.unmanage, VOLUME1) class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase): def setUp(self): super(XIOISEISCSIDriverTestCase, self).setUp() self.setup_test('iscsi') class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase): def setUp(self): super(XIOISEFCDriverTestCase, self).setUp() self.setup_test('fibre_channel') cinder-8.0.0/cinder/tests/unit/test_migrations.py0000664000567000056710000011456312701406250023305 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. This test case reads the configuration file test_migrations.conf for database connection settings to use in the tests. For each connection found in the config file, the test case runs a series of test cases to ensure that migrations work properly both upgrading and downgrading, and that no data loss occurs if possible. """ import os import uuid import fixtures from migrate.versioning import api as migration_api from migrate.versioning import repository from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils as db_utils import sqlalchemy from cinder.db import migration import cinder.db.sqlalchemy.migrate_repo class MigrationsMixin(test_migrations.WalkVersionsMixin): """Test sqlalchemy-migrate migrations.""" BOOL_TYPE = sqlalchemy.types.BOOLEAN TIME_TYPE = sqlalchemy.types.DATETIME INTEGER_TYPE = sqlalchemy.types.INTEGER VARCHAR_TYPE = sqlalchemy.types.VARCHAR @property def INIT_VERSION(self): return migration.INIT_VERSION @property def REPOSITORY(self): migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__ return repository.Repository( os.path.abspath(os.path.dirname(migrate_file))) @property def migration_api(self): return migration_api @property def migrate_engine(self): return self.engine def get_table_ref(self, engine, name, metadata): metadata.bind = engine return sqlalchemy.Table(name, metadata, autoload=True) class BannedDBSchemaOperations(fixtures.Fixture): """Ban some operations for migrations""" def __init__(self, banned_resources=None): super(MigrationsMixin.BannedDBSchemaOperations, self).__init__() self._banned_resources = banned_resources or [] @staticmethod def _explode(resource, op): print('%s.%s()' % (resource, op)) # noqa raise Exception( 'Operation %s.%s() is not allowed in a database migration' % ( resource, op)) def setUp(self): super(MigrationsMixin.BannedDBSchemaOperations, self).setUp() for thing in self._banned_resources: self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.drop' % thing, lambda *a, **k: self._explode(thing, 'drop'))) self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.alter' % thing, lambda *a, **k: self._explode(thing, 'alter'))) def migrate_up(self, version, with_data=False): # NOTE(dulek): This is a list of migrations where we allow dropping # things. The rules for adding things here are very very specific. # Insight on how to drop things from the DB in a backward-compatible # manner is provided in Cinder's developer documentation. # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE exceptions = [ # NOTE(dulek): 62 alters the column type from boolean to integer to # fix the bug 1518363. If we've followed the guidelines for live # schema upgrades we would end up either waiting 3 releases to fix # a simple bug or trigger a rebuild index operation in migration # (because constraint was impossible to delete without deleting # other foreign key constraints). Either way it's harsh... We've # decided to go with alter to minimise upgrade impact. The only # consequence for deployments running recent MySQL is inability # to perform volume-type-access modifications while running this # migration. 62, # NOTE(dulek): 66 sets reservations.usage_id to nullable. This is # 100% backward compatible and according to MySQL docs such ALTER # is performed with the same restrictions as column addition, which # we of course allow. 66, ] # NOTE(dulek): We only started requiring things be additive in # Mitaka, so ignore all migrations before that point. MITAKA_START = 61 if version >= MITAKA_START and version not in exceptions: banned = ['Table', 'Column'] else: banned = None with MigrationsMixin.BannedDBSchemaOperations(banned): super(MigrationsMixin, self).migrate_up(version, with_data) def _pre_upgrade_004(self, engine): """Change volume types to UUID """ data = { 'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1', 'volume_type_id': 1}, {'id': str(uuid.uuid4()), 'host': 'test2', 'volume_type_id': 1}, {'id': str(uuid.uuid4()), 'host': 'test3', 'volume_type_id': 3}, ], 'volume_types': [{'name': 'vtype1'}, {'name': 'vtype2'}, {'name': 'vtype3'}, ], 'volume_type_extra_specs': [{'volume_type_id': 1, 'key': 'v1', 'value': 'hotep', }, {'volume_type_id': 1, 'key': 'v2', 'value': 'bending rodrigez', }, {'volume_type_id': 2, 'key': 'v3', 'value': 'bending rodrigez', }, ]} volume_types = db_utils.get_table(engine, 'volume_types') for vtype in data['volume_types']: r = volume_types.insert().values(vtype).execute() vtype['id'] = r.inserted_primary_key[0] volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs') for vtes in data['volume_type_extra_specs']: r = volume_type_es.insert().values(vtes).execute() vtes['id'] = r.inserted_primary_key[0] volumes = db_utils.get_table(engine, 'volumes') for vol in data['volumes']: r = volumes.insert().values(vol).execute() vol['id'] = r.inserted_primary_key[0] return data def _check_004(self, engine, data): volumes = db_utils.get_table(engine, 'volumes') v1 = volumes.select(volumes.c.id == data['volumes'][0]['id'] ).execute().first() v2 = volumes.select(volumes.c.id == data['volumes'][1]['id'] ).execute().first() v3 = volumes.select(volumes.c.id == data['volumes'][2]['id'] ).execute().first() volume_types = db_utils.get_table(engine, 'volume_types') vt1 = volume_types.select(volume_types.c.name == data['volume_types'][0]['name'] ).execute().first() vt2 = volume_types.select(volume_types.c.name == data['volume_types'][1]['name'] ).execute().first() vt3 = volume_types.select(volume_types.c.name == data['volume_types'][2]['name'] ).execute().first() vtes = db_utils.get_table(engine, 'volume_type_extra_specs') vtes1 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][0]['key'] ).execute().first() vtes2 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][1]['key'] ).execute().first() vtes3 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][2]['key'] ).execute().first() self.assertEqual(v1['volume_type_id'], vt1['id']) self.assertEqual(v2['volume_type_id'], vt1['id']) self.assertEqual(v3['volume_type_id'], vt3['id']) self.assertEqual(vtes1['volume_type_id'], vt1['id']) self.assertEqual(vtes2['volume_type_id'], vt1['id']) self.assertEqual(vtes3['volume_type_id'], vt2['id']) def _check_005(self, engine, data): """Test that adding source_volid column works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.source_volid.type, self.VARCHAR_TYPE) def _check_006(self, engine, data): snapshots = db_utils.get_table(engine, 'snapshots') self.assertIsInstance(snapshots.c.provider_location.type, self.VARCHAR_TYPE) def _check_007(self, engine, data): snapshots = db_utils.get_table(engine, 'snapshots') fkey, = snapshots.c.volume_id.foreign_keys self.assertIsNotNone(fkey) def _pre_upgrade_008(self, engine): self.assertFalse(engine.dialect.has_table(engine.connect(), "backups")) def _check_008(self, engine, data): """Test that adding and removing the backups table works correctly.""" self.assertTrue(engine.dialect.has_table(engine.connect(), "backups")) backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(backups.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(backups.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(backups.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(backups.c.id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.volume_id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.user_id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.project_id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.host.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.availability_zone.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.display_name.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.display_description.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.container.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.status.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.fail_reason.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.service_metadata.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.service.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.size.type, self.INTEGER_TYPE) self.assertIsInstance(backups.c.object_count.type, self.INTEGER_TYPE) def _check_009(self, engine, data): """Test adding snapshot_metadata table works correctly.""" self.assertTrue(engine.dialect.has_table(engine.connect(), "snapshot_metadata")) snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata') self.assertIsInstance(snapshot_metadata.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(snapshot_metadata.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(snapshot_metadata.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(snapshot_metadata.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(snapshot_metadata.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(snapshot_metadata.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(snapshot_metadata.c.snapshot_id.type, self.VARCHAR_TYPE) self.assertIsInstance(snapshot_metadata.c.key.type, self.VARCHAR_TYPE) self.assertIsInstance(snapshot_metadata.c.value.type, self.VARCHAR_TYPE) def _check_010(self, engine, data): """Test adding transfers table works correctly.""" self.assertTrue(engine.dialect.has_table(engine.connect(), "transfers")) transfers = db_utils.get_table(engine, 'transfers') self.assertIsInstance(transfers.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(transfers.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(transfers.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(transfers.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(transfers.c.id.type, self.VARCHAR_TYPE) self.assertIsInstance(transfers.c.volume_id.type, self.VARCHAR_TYPE) self.assertIsInstance(transfers.c.display_name.type, self.VARCHAR_TYPE) self.assertIsInstance(transfers.c.salt.type, self.VARCHAR_TYPE) self.assertIsInstance(transfers.c.crypt_hash.type, self.VARCHAR_TYPE) self.assertIsInstance(transfers.c.expires_at.type, self.TIME_TYPE) def _check_011(self, engine, data): """Test adding transfers table works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIn('bootable', volumes.c) self.assertIsInstance(volumes.c.bootable.type, self.BOOL_TYPE) def _check_012(self, engine, data): """Test that adding attached_host column works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.attached_host.type, self.VARCHAR_TYPE) def _check_013(self, engine, data): """Test that adding provider_geometry column works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.provider_geometry.type, self.VARCHAR_TYPE) def _check_014(self, engine, data): """Test that adding _name_id column works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c._name_id.type, self.VARCHAR_TYPE) def _check_015(self, engine, data): """Test removing migrations table works correctly.""" self.assertFalse(engine.dialect.has_table(engine.connect(), "migrations")) def _check_016(self, engine, data): """Test that dropping xen storage manager tables works correctly.""" self.assertFalse(engine.dialect.has_table(engine.connect(), 'sm_flavors')) self.assertFalse(engine.dialect.has_table(engine.connect(), 'sm_backend_config')) self.assertFalse(engine.dialect.has_table(engine.connect(), 'sm_volume')) def _check_017(self, engine, data): """Test that added encryption information works correctly.""" # encryption key UUID volumes = db_utils.get_table(engine, 'volumes') self.assertIn('encryption_key_id', volumes.c) self.assertIsInstance(volumes.c.encryption_key_id.type, self.VARCHAR_TYPE) snapshots = db_utils.get_table(engine, 'snapshots') self.assertIn('encryption_key_id', snapshots.c) self.assertIsInstance(snapshots.c.encryption_key_id.type, self.VARCHAR_TYPE) self.assertIn('volume_type_id', snapshots.c) self.assertIsInstance(snapshots.c.volume_type_id.type, self.VARCHAR_TYPE) # encryption types table encryption = db_utils.get_table(engine, 'encryption') self.assertIsInstance(encryption.c.volume_type_id.type, self.VARCHAR_TYPE) self.assertIsInstance(encryption.c.cipher.type, self.VARCHAR_TYPE) self.assertIsInstance(encryption.c.key_size.type, self.INTEGER_TYPE) self.assertIsInstance(encryption.c.provider.type, self.VARCHAR_TYPE) def _check_018(self, engine, data): """Test that added qos_specs table works correctly.""" self.assertTrue(engine.dialect.has_table( engine.connect(), "quality_of_service_specs")) qos_specs = db_utils.get_table(engine, 'quality_of_service_specs') self.assertIsInstance(qos_specs.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(qos_specs.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(qos_specs.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(qos_specs.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(qos_specs.c.id.type, self.VARCHAR_TYPE) self.assertIsInstance(qos_specs.c.specs_id.type, self.VARCHAR_TYPE) self.assertIsInstance(qos_specs.c.key.type, self.VARCHAR_TYPE) self.assertIsInstance(qos_specs.c.value.type, self.VARCHAR_TYPE) def _check_019(self, engine, data): """Test that adding migration_status column works correctly.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.migration_status.type, self.VARCHAR_TYPE) def _check_020(self, engine, data): """Test adding volume_admin_metadata table works correctly.""" self.assertTrue(engine.dialect.has_table(engine.connect(), "volume_admin_metadata")) volume_admin_metadata = db_utils.get_table(engine, 'volume_admin_metadata') self.assertIsInstance(volume_admin_metadata.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(volume_admin_metadata.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(volume_admin_metadata.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(volume_admin_metadata.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(volume_admin_metadata.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(volume_admin_metadata.c.volume_id.type, self.VARCHAR_TYPE) self.assertIsInstance(volume_admin_metadata.c.key.type, self.VARCHAR_TYPE) self.assertIsInstance(volume_admin_metadata.c.value.type, self.VARCHAR_TYPE) def _verify_quota_defaults(self, engine): quota_class_metadata = db_utils.get_table(engine, 'quota_classes') num_defaults = quota_class_metadata.count().\ where(quota_class_metadata.c.class_name == 'default').\ execute().scalar() self.assertEqual(3, num_defaults) def _check_021(self, engine, data): """Test adding default data for quota classes works correctly.""" self._verify_quota_defaults(engine) def _check_022(self, engine, data): """Test that adding disabled_reason column works correctly.""" services = db_utils.get_table(engine, 'services') self.assertIsInstance(services.c.disabled_reason.type, self.VARCHAR_TYPE) def _check_023(self, engine, data): """Test that adding reservations index works correctly.""" reservations = db_utils.get_table(engine, 'reservations') index_columns = [] for idx in reservations.indexes: if idx.name == 'reservations_deleted_expire_idx': index_columns = idx.columns.keys() break self.assertEqual(sorted(['deleted', 'expire']), sorted(index_columns)) def _check_024(self, engine, data): """Test adding replication columns to volume table.""" volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.replication_status.type, self.VARCHAR_TYPE) self.assertIsInstance(volumes.c.replication_extended_status.type, self.VARCHAR_TYPE) self.assertIsInstance(volumes.c.replication_driver_data.type, self.VARCHAR_TYPE) def _check_025(self, engine, data): """Test adding table and columns for consistencygroups.""" # Test consistencygroup_id is in Table volumes metadata = sqlalchemy.MetaData() volumes = self.get_table_ref(engine, 'volumes', metadata) self.assertIsInstance(volumes.c.consistencygroup_id.type, self.VARCHAR_TYPE) # Test cgsnapshot_id is in Table snapshots snapshots = self.get_table_ref(engine, 'snapshots', metadata) self.assertIsInstance(snapshots.c.cgsnapshot_id.type, self.VARCHAR_TYPE) # Test Table consistencygroups exists self.assertTrue(engine.dialect.has_table(engine.connect(), "consistencygroups")) consistencygroups = self.get_table_ref(engine, 'consistencygroups', metadata) self.assertIsInstance(consistencygroups.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(consistencygroups.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(consistencygroups.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(consistencygroups.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(consistencygroups.c.id.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.user_id.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.project_id.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.host.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.availability_zone.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.name.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.description.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.volume_type_id.type, self.VARCHAR_TYPE) self.assertIsInstance(consistencygroups.c.status.type, self.VARCHAR_TYPE) # Test Table cgsnapshots exists self.assertTrue(engine.dialect.has_table(engine.connect(), "cgsnapshots")) cgsnapshots = self.get_table_ref(engine, 'cgsnapshots', metadata) self.assertIsInstance(cgsnapshots.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(cgsnapshots.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(cgsnapshots.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(cgsnapshots.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(cgsnapshots.c.id.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.user_id.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.project_id.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.name.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.description.type, self.VARCHAR_TYPE) self.assertIsInstance(cgsnapshots.c.status.type, self.VARCHAR_TYPE) # Verify foreign keys are created fkey, = volumes.c.consistencygroup_id.foreign_keys self.assertEqual(consistencygroups.c.id, fkey.column) self.assertEqual(1, len(volumes.foreign_keys)) fkey, = snapshots.c.cgsnapshot_id.foreign_keys self.assertEqual(cgsnapshots.c.id, fkey.column) fkey, = snapshots.c.volume_id.foreign_keys self.assertEqual(volumes.c.id, fkey.column) # 2 foreign keys in Table snapshots self.assertEqual(2, len(snapshots.foreign_keys)) def _pre_upgrade_026(self, engine): """Test adding default data for consistencygroups quota class.""" quota_class_metadata = db_utils.get_table(engine, 'quota_classes') num_defaults = quota_class_metadata.count().\ where(quota_class_metadata.c.class_name == 'default').\ execute().scalar() self.assertEqual(3, num_defaults) def _check_026(self, engine, data): quota_class_metadata = db_utils.get_table(engine, 'quota_classes') num_defaults = quota_class_metadata.count().\ where(quota_class_metadata.c.class_name == 'default').\ execute().scalar() self.assertEqual(4, num_defaults) def _check_032(self, engine, data): """Test adding volume_type_projects table works correctly.""" volume_type_projects = db_utils.get_table(engine, 'volume_type_projects') self.assertIsInstance(volume_type_projects.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(volume_type_projects.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(volume_type_projects.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(volume_type_projects.c.deleted.type, self.BOOL_TYPE) self.assertIsInstance(volume_type_projects.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(volume_type_projects.c.volume_type_id.type, self.VARCHAR_TYPE) self.assertIsInstance(volume_type_projects.c.project_id.type, self.VARCHAR_TYPE) volume_types = db_utils.get_table(engine, 'volume_types') self.assertIsInstance(volume_types.c.is_public.type, self.BOOL_TYPE) def _check_033(self, engine, data): """Test adding encryption_id column to encryption table.""" encryptions = db_utils.get_table(engine, 'encryption') self.assertIsInstance(encryptions.c.encryption_id.type, self.VARCHAR_TYPE) def _check_034(self, engine, data): """Test adding description columns to volume_types table.""" volume_types = db_utils.get_table(engine, 'volume_types') self.assertIsInstance(volume_types.c.description.type, self.VARCHAR_TYPE) def _check_035(self, engine, data): volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.provider_id.type, self.VARCHAR_TYPE) def _check_036(self, engine, data): snapshots = db_utils.get_table(engine, 'snapshots') self.assertIsInstance(snapshots.c.provider_id.type, self.VARCHAR_TYPE) def _check_037(self, engine, data): consistencygroups = db_utils.get_table(engine, 'consistencygroups') self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type, self.VARCHAR_TYPE) def _check_038(self, engine, data): """Test adding and removing driver_initiator_data table.""" has_table = engine.dialect.has_table(engine.connect(), "driver_initiator_data") self.assertTrue(has_table) private_data = db_utils.get_table( engine, 'driver_initiator_data' ) self.assertIsInstance(private_data.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(private_data.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(private_data.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(private_data.c.initiator.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.namespace.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.key.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.value.type, self.VARCHAR_TYPE) def _check_039(self, engine, data): backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.parent_id.type, self.VARCHAR_TYPE) def _check_40(self, engine, data): volumes = db_utils.get_table(engine, 'volumes') self.assertNotIn('instance_uuid', volumes.c) self.assertNotIn('attached_host', volumes.c) self.assertNotIn('attach_time', volumes.c) self.assertNotIn('mountpoint', volumes.c) self.assertIsInstance(volumes.c.multiattach.type, self.BOOL_TYPE) attachments = db_utils.get_table(engine, 'volume_attachment') self.assertIsInstance(attachments.c.attach_mode.type, self.VARCHAR_TYPE) self.assertIsInstance(attachments.c.instance_uuid.type, self.VARCHAR_TYPE) self.assertIsInstance(attachments.c.attached_host.type, self.VARCHAR_TYPE) self.assertIsInstance(attachments.c.mountpoint.type, self.VARCHAR_TYPE) self.assertIsInstance(attachments.c.attach_status.type, self.VARCHAR_TYPE) def _check_041(self, engine, data): """Test that adding modified_at column works correctly.""" services = db_utils.get_table(engine, 'services') self.assertIsInstance(services.c.modified_at.type, self.TIME_TYPE) def _check_048(self, engine, data): quotas = db_utils.get_table(engine, 'quotas') self.assertIsInstance(quotas.c.allocated.type, self.INTEGER_TYPE) def _check_049(self, engine, data): backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.temp_volume_id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.temp_snapshot_id.type, self.VARCHAR_TYPE) def _check_050(self, engine, data): volumes = db_utils.get_table(engine, 'volumes') self.assertIsInstance(volumes.c.previous_status.type, self.VARCHAR_TYPE) def _check_051(self, engine, data): consistencygroups = db_utils.get_table(engine, 'consistencygroups') self.assertIsInstance(consistencygroups.c.source_cgid.type, self.VARCHAR_TYPE) def _check_052(self, engine, data): snapshots = db_utils.get_table(engine, 'snapshots') self.assertIsInstance(snapshots.c.provider_auth.type, self.VARCHAR_TYPE) def _check_053(self, engine, data): services = db_utils.get_table(engine, 'services') self.assertIsInstance(services.c.rpc_current_version.type, self.VARCHAR_TYPE) self.assertIsInstance(services.c.rpc_available_version.type, self.VARCHAR_TYPE) self.assertIsInstance(services.c.object_current_version.type, self.VARCHAR_TYPE) self.assertIsInstance(services.c.object_available_version.type, self.VARCHAR_TYPE) def _check_054(self, engine, data): backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.num_dependent_backups.type, self.INTEGER_TYPE) def _check_055(self, engine, data): """Test adding image_volume_cache_entries table.""" has_table = engine.dialect.has_table(engine.connect(), "image_volume_cache_entries") self.assertTrue(has_table) private_data = db_utils.get_table( engine, 'image_volume_cache_entries' ) self.assertIsInstance(private_data.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(private_data.c.host.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.image_id.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.image_updated_at.type, self.TIME_TYPE) self.assertIsInstance(private_data.c.volume_id.type, self.VARCHAR_TYPE) self.assertIsInstance(private_data.c.size.type, self.INTEGER_TYPE) self.assertIsInstance(private_data.c.last_used.type, self.TIME_TYPE) def _check_061(self, engine, data): backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.snapshot_id.type, self.VARCHAR_TYPE) self.assertIsInstance(backups.c.data_timestamp.type, self.TIME_TYPE) def _check_062(self, engine, data): volume_type_projects = db_utils.get_table(engine, 'volume_type_projects') self.assertIsInstance(volume_type_projects.c.id.type, self.INTEGER_TYPE) def _check_064(self, engine, data): backups = db_utils.get_table(engine, 'backups') self.assertIsInstance(backups.c.restore_volume_id.type, self.VARCHAR_TYPE) def _check_065(self, engine, data): services = db_utils.get_table(engine, 'services') self.assertIsInstance(services.c.replication_status.type, self.VARCHAR_TYPE) self.assertIsInstance(services.c.frozen.type, self.BOOL_TYPE) self.assertIsInstance(services.c.active_backend_id.type, self.VARCHAR_TYPE) def _check_066(self, engine, data): reservations = db_utils.get_table(engine, 'reservations') self.assertIsInstance(reservations.c.allocated_id.type, self.INTEGER_TYPE) def _check_067(self, engine, data): iscsi_targets = db_utils.get_table(engine, 'iscsi_targets') fkey, = iscsi_targets.c.volume_id.foreign_keys self.assertIsNotNone(fkey) def test_walk_versions(self): self.walk_versions(False, False) class TestSqliteMigrations(test_base.DbTestCase, MigrationsMixin): pass class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase, MigrationsMixin): BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT def test_mysql_innodb(self): """Test that table creation on mysql only builds InnoDB tables.""" # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. # sanity check migration.db_sync(engine=self.migrate_engine) total = self.migrate_engine.execute( "SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='{0}'".format( self.migrate_engine.url.database)) self.assertGreater(total.scalar(), 0, msg="No tables found. Wrong schema?") noninnodb = self.migrate_engine.execute( "SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='openstack_citest' " "and ENGINE!='InnoDB' " "and TABLE_NAME!='migrate_version'") count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase, MigrationsMixin): TIME_TYPE = sqlalchemy.types.TIMESTAMP cinder-8.0.0/cinder/tests/unit/test_ibm_flashsystem.py0000664000567000056710000013756212701406250024326 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the IBM FlashSystem volume driver.""" import mock from oslo_concurrency import processutils from oslo_utils import units import six import random import re from cinder import context from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_fc from cinder.volume import utils as volume_utils from cinder.volume import volume_types class FlashSystemManagementSimulator(object): def __init__(self): # Default protocol is FC self._protocol = 'FC' self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._next_cmd_error = { 'lsnode': '', 'lssystem': '', 'lsmdiskgrp': '' } self._errors = { # CMMVC50000 is a fake error which indicates that command has not # got expected results. This error represents kinds of CLI errors. 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' 'successfully.') } @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return six.text_type(index) return six.text_type(len(ids)) @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w ._-]*$', name): return False return True @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'bytes', 'force' ] one_param_args = [ 'delim', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'scsi', 'size', 'unit' ] # All commands should begin with svcinfo or svctask if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2: raise exception.InvalidInput(reason=six.text_type(arg_list)) ret = {'cmd': arg_list[1]} arg_list.pop(0) skip = False for i in range(1, len(arg_list)): if skip: skip = False continue if arg_list[i][0] == '-': param = arg_list[i][1:] if param in no_param_args: ret[param] = True elif param in one_param_args: ret[param] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason=('unrecognized argument %s') % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _convert_units_bytes(num, unit): unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while unit.lower() != unit_array[unit_index].lower(): num = num * 1024 unit_index += 1 return six.text_type(num) def _cmd_lshost(self, **kwargs): """lshost command. svcinfo lshost -delim ! svcinfo lshost -delim ! """ if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) for host in self._hosts_list.values(): rows.append([host['id'], host['host_name'], '1', '1', 'degraded']) if len(rows) > 1: return self._print_cmd_info(rows=rows, **kwargs) else: return ('', '') else: host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] host = self._hosts_list[host_name] rows = [] rows.append(['id', host['id']]) rows.append(['name', host['host_name']]) rows.append(['port_count', '1']) rows.append(['type', 'generic']) rows.append(['mask', '1111']) rows.append(['iogrp_count', '1']) rows.append(['status', 'degraded']) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'offline']) for port in host['wwpns']: rows.append(['WWPN', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'active']) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lshostvdiskmap(self, **kwargs): """svcinfo lshostvdiskmap -delim ! """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], mapping['lun'], volume['id'], volume['name'], volume['vdisk_UID']]) return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsmdiskgrp(self, **kwargs): """svcinfo lsmdiskgrp -gui -bytes -delim ! """ status = 'online' if self._next_cmd_error['lsmdiskgrp'] == 'error': self._next_cmd_error['lsmdiskgrp'] = '' return self._errors['CMMVC50000'] if self._next_cmd_error['lsmdiskgrp'] == 'status=offline': self._next_cmd_error['lsmdiskgrp'] = '' status = 'offline' rows = [None] * 2 rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'encrypted', 'type', 'encrypt'] rows[1] = ['0', status, '1', '0', '3573412790272', '3529432325160', '1693247906775', '277841182', '38203734097', 'no', 'parent', 'no'] if kwargs['obj'] == 'mdiskgrp0': row = rows[1] else: return self._errors['CMMVC50000'] objrows = [] for idx, val in enumerate(rows[0]): objrows.append([val, row[idx]]) if 'delim' in kwargs: for index in range(len(objrows)): objrows[index] = kwargs['delim'].join(objrows[index]) return ('%s' % '\n'.join(objrows), '') def _cmd_lsnode(self, **kwargs): """lsnode command. svcinfo lsnode -delim ! svcinfo lsnode -delim ! """ if self._protocol == 'FC' or self._protocol == 'both': port_status = 'active' else: port_status = 'unconfigured' rows1 = [None] * 7 rows1[0] = ['name', 'node1'] rows1[1] = ['port_id', '000000000000001'] rows1[2] = ['port_status', port_status] rows1[3] = ['port_speed', '8Gb'] rows1[4] = ['port_id', '000000000000001'] rows1[5] = ['port_status', port_status] rows1[6] = ['port_speed', '8Gb'] rows2 = [None] * 7 rows2[0] = ['name', 'node2'] rows2[1] = ['port_id', '000000000000002'] rows2[2] = ['port_status', port_status] rows2[3] = ['port_speed', '8Gb'] rows2[4] = ['port_id', '000000000000002'] rows2[5] = ['port_status', port_status] rows2[6] = ['port_speed', 'N/A'] rows3 = [None] * 3 rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', 'enclosure_serial_number'] rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0', 'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '', '01-1', '1', '1', 'H441028'] rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0', 'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '', '01-2', '1', '2', 'H441028'] if self._next_cmd_error['lsnode'] == 'error': self._next_cmd_error['lsnode'] = '' return self._errors['CMMVC50000'] rows = None if 'obj' not in kwargs: rows = rows3 elif kwargs['obj'] == '1': rows = rows1 elif kwargs['obj'] == '2': rows = rows2 else: return self._errors['CMMVC50000'] if self._next_cmd_error['lsnode'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsnode'] = '' return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None)) def _cmd_lssystem(self, **kwargs): """svcinfo lssystem -delim !""" open_access_enabled = 'off' if self._next_cmd_error['lssystem'] == 'error': self._next_cmd_error['lssystem'] = '' return self._errors['CMMVC50000'] if self._next_cmd_error['lssystem'] == 'open_access_enabled=on': self._next_cmd_error['lssystem'] = '' open_access_enabled = 'on' rows = [None] * 3 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'flashsystem_1.2.3.4'] rows[2] = ['open_access_enabled', open_access_enabled] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsportfc(self, **kwargs): """svcinfo lsportfc""" if self._protocol == 'FC' or self._protocol == 'both': status = 'active' else: status = 'unconfigured' rows = [None] * 3 rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment', 'topology'] rows[1] = ['0', '1', '1', '1', 'fc', '8Gb', '1', 'node_1', 'AABBCCDDEEFF0011', '000000', status, 'host', 'al'] rows[2] = ['1', '1', '1', '1', 'fc', '8Gb', '1', 'node_1', 'AABBCCDDEEFF0010', '000000', status, 'host', 'al'] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsportip(self, **kwargs): """svcinfo lsportip""" if self._protocol == 'iSCSI' or self._protocol == 'both': IP_address1 = '192.168.1.10' IP_address2 = '192.168.1.11' state = 'online' speed = '8G' else: IP_address1 = '' IP_address2 = '' state = '' speed = '' rows = [None] * 3 rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id', 'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', 'failover', 'link_state', 'host', 'host_6', 'vlan', 'vlan_6', 'adapter_location', 'adapter_port_id'] rows[1] = ['1', '1', 'node1', '0', '0', '0', IP_address1, '', '', '', '0', '', '11:22:33:44:55:AA', '', state, speed, 'no', 'active', '', '', '', '', '0', '0'] rows[2] = ['2', '2', 'node2', '0', '0', '0', IP_address2, '', '', '', '0', '', '11:22:33:44:55:BB', '', state, speed, 'no', 'active', '', '', '', '', '0', '0'] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsvdisk(self, **kwargs): """cmd: svcinfo lsvdisk -gui -bytes -delim ! """ if 'obj' not in kwargs or ( 'delim' not in kwargs) or ( 'bytes' not in kwargs): return self._errors['CMMVC50000'] if kwargs['obj'] not in self._volumes_list: return self._errors['CMMVC50000'] vol = self._volumes_list[kwargs['obj']] rows = [] rows.append(['id', vol['id']]) rows.append(['name', vol['name']]) rows.append(['status', vol['status']]) rows.append(['capacity', vol['capacity']]) rows.append(['vdisk_UID', vol['vdisk_UID']]) rows.append(['udid', '']) rows.append(['open_access_scsi_id', '1']) rows.append(['parent_mdisk_grp_id', '0']) rows.append(['parent_mdisk_grp_name', 'mdiskgrp0']) for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lsvdiskhostmap(self, **kwargs): """svcinfo lsvdiskhostmap -delim ! """ if 'obj' not in kwargs or ( 'delim' not in kwargs): return self._errors['CMMVC50000'] vdisk_name = kwargs['obj'] if vdisk_name not in self._volumes_list: return self._errors['CMMVC50000'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) mappings_found = 0 for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] host = self._hosts_list[mapping['host']] rows.append([volume['id'], volume['name'], '1', host['id'], host['host_name'], volume['vdisk_UID'], '0', 'mdiskgrp0']) if mappings_found: return self._print_cmd_info(rows=rows, **kwargs) else: return ('', '') def _cmd_expandvdisksize(self, **kwargs): """svctask expandvdisksize -size -unit gb """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] vol_name = kwargs['obj'].strip('\'\"') if 'size' not in kwargs: return self._errors['CMMVC50000'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._errors['CMMVC50000'] curr_size = int(self._volumes_list[vol_name]['capacity']) addition = size * units.Gi self._volumes_list[vol_name]['capacity'] = six.text_type( curr_size + addition) return ('', '') def _cmd_mkvdisk(self, **kwargs): """mkvdisk command. svctask mkvdisk -name -mdiskgrp -iogrp -size -unit """ if 'name' not in kwargs or ( 'size' not in kwargs) or ( 'unit' not in kwargs): return self._errors['CMMVC50000'] vdisk_info = {} vdisk_info['id'] = self._find_unused_id(self._volumes_list) vdisk_info['name'] = kwargs['name'].strip('\'\"') vdisk_info['status'] = 'online' vdisk_info['capacity'] = self._convert_units_bytes( int(kwargs['size']), kwargs['unit']) vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id'] if vdisk_info['name'] in self._volumes_list: return self._errors['CMMVC50000'] else: self._volumes_list[vdisk_info['name']] = vdisk_info return ('Virtual Disk, id [%s], successfully created' % (vdisk_info['id']), '') def _cmd_rmvdisk(self, **kwargs): """svctask rmvdisk -force """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] vdisk_name = kwargs['obj'].strip('\'\"') if vdisk_name not in self._volumes_list: return self._errors['CMMVC50000'] del self._volumes_list[vdisk_name] return ('', '') def _add_port_to_host(self, host_info, **kwargs): if 'iscsiname' in kwargs: added_key = 'iscsi_names' added_val = kwargs['iscsiname'].strip('\'\"') elif 'hbawwpn' in kwargs: added_key = 'wwpns' added_val = kwargs['hbawwpn'].strip('\'\"') else: return self._errors['CMMVC50000'] host_info[added_key].append(added_val) for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: if port == added_val: return self._errors['CMMVC50000'] return ('', '') def _cmd_mkhost(self, **kwargs): """mkhost command. svctask mkhost -force -hbawwpn -name svctask mkhost -force -iscsiname -name """ if 'name' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['name'].strip('\'\"') if self._is_invalid_name(host_name): return self._errors['CMMVC50000'] if host_name in self._hosts_list: return self._errors['CMMVC50000'] host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = host_name host_info['iscsi_names'] = [] host_info['wwpns'] = [] out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info return ('Host, id [%s], successfully created' % (host_info['id']), '') else: return (out, err) def _cmd_addhostport(self, **kwargs): """addhostport command. svctask addhostport -force -hbawwpn svctask addhostport -force -iscsiname """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] host_info = self._hosts_list[host_name] return self._add_port_to_host(host_info, **kwargs) def _cmd_rmhost(self, **kwargs): """svctask rmhost """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC50000'] del self._hosts_list[host_name] return ('', '') def _cmd_mkvdiskhostmap(self, **kwargs): """svctask mkvdiskhostmap -host -scsi """ mapping_info = {} mapping_info['id'] = self._find_unused_id(self._mappings_list) if 'host' not in kwargs or ( 'scsi' not in kwargs) or ( 'obj' not in kwargs): return self._errors['CMMVC50000'] mapping_info['host'] = kwargs['host'].strip('\'\"') mapping_info['lun'] = kwargs['scsi'].strip('\'\"') mapping_info['vol'] = kwargs['obj'].strip('\'\"') if mapping_info['vol'] not in self._volumes_list: return self._errors['CMMVC50000'] if mapping_info['host'] not in self._hosts_list: return self._errors['CMMVC50000'] if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC50000'] for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC50000'] for v in self._mappings_list.values(): if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): return self._errors['CMMVC50000'] self._mappings_list[mapping_info['id']] = mapping_info return ('Virtual Disk to Host map, id [%s], successfully created' % (mapping_info['id']), '') def _cmd_rmvdiskhostmap(self, **kwargs): """svctask rmvdiskhostmap -host """ if 'host' not in kwargs or 'obj' not in kwargs: return self._errors['CMMVC50000'] host = kwargs['host'].strip('\'\"') vdisk = kwargs['obj'].strip('\'\"') mapping_ids = [] for v in self._mappings_list.values(): if v['vol'] == vdisk: mapping_ids.append(v['id']) if not mapping_ids: return self._errors['CMMVC50000'] this_mapping = None for mapping_id in mapping_ids: if self._mappings_list[mapping_id]['host'] == host: this_mapping = mapping_id if this_mapping is None: return self._errors['CMMVC50000'] del self._mappings_list[this_mapping] return ('', '') def set_protocol(self, protocol): self._protocol = protocol def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except exception.InvalidInput: return self._errors['CMMVC50000'] command = kwargs['cmd'] del kwargs['cmd'] func = getattr(self, '_cmd_' + command) out, err = func(**kwargs) if (check_exit_code) and (len(err) != 0): raise processutils.ProcessExecutionError(exit_code=1, stdout=out, stderr=err, cmd=command) return (out, err) def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error class FlashSystemFakeDriver(flashsystem_fc.FlashSystemFCDriver): def __init__(self, *args, **kwargs): super(FlashSystemFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _ssh(self, cmd, check_exit_code=True): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FlashSystemDriverTestCase(test.TestCase): def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _generate_vol_info(self, vol_name, vol_size=10, vol_status='available'): rand_id = six.text_type(random.randint(10000, 99999)) if not vol_name: vol_name = 'test_volume%s' % rand_id return {'name': vol_name, 'size': vol_size, 'id': '%s' % rand_id, 'volume_type_id': None, 'status': vol_status, 'mdisk_grp_name': 'mdiskgrp0'} def _generate_snap_info(self, vol_name, vol_id, vol_size, vol_status, snap_status='available'): rand_id = six.text_type(random.randint(10000, 99999)) return {'name': 'test_snap_%s' % rand_id, 'id': rand_id, 'volume': {'name': vol_name, 'id': vol_id, 'size': vol_size, 'status': vol_status}, 'volume_size': vol_size, 'status': snap_status, 'mdisk_grp_name': 'mdiskgrp0'} def setUp(self): super(FlashSystemDriverTestCase, self).setUp() self._def_flags = {'san_ip': 'hostname', 'san_login': 'username', 'san_password': 'password', 'flashsystem_connection_protocol': 'FC', 'flashsystem_multihostmap_enabled': True} self.connector = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} self.sim = FlashSystemManagementSimulator() self.driver = FlashSystemFakeDriver( configuration=conf.Configuration(None)) self.driver.set_fake_storage(self.sim) self._reset_flags() self.ctxt = context.get_admin_context() self.driver.do_setup(None) self.driver.check_for_setup_error() self.sleeppatch = mock.patch('eventlet.greenthread.sleep') self.sleeppatch.start() def tearDown(self): self.sleeppatch.stop() super(FlashSystemDriverTestCase, self).tearDown() def test_flashsystem_do_setup(self): # case 1: cmd lssystem encounters error self.sim.error_injection('lssystem', 'error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 2: open_access_enabled is not off self.sim.error_injection('lssystem', 'open_access_enabled=on') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 3: cmd lsmdiskgrp encounters error self.sim.error_injection('lsmdiskgrp', 'error') self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) # case 4: status is not online self.sim.error_injection('lsmdiskgrp', 'status=offline') self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) # case 5: cmd lsnode encounters error self.sim.error_injection('lsnode', 'error') self.assertRaises(processutils.ProcessExecutionError, self.driver.do_setup, None) # case 6: cmd lsnode header does not match self.sim.error_injection('lsnode', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 7: set as FC self.sim.set_protocol('FC') self.driver.do_setup(None) self.assertEqual('FC', self.driver._protocol) # case 8: no configured nodes available self.sim.set_protocol('unknown') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # clear environment self.sim.set_protocol('FC') self.driver.do_setup(None) def test_flashsystem_check_for_setup_error(self): self._set_flag('san_ip', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_ssh_port', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_login', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_password', None) self._set_flag('san_private_key', None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('flashsystem_connection_protocol', 'foo') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() # clear environment self.driver.do_setup(None) def test_flashsystem_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} protocol = self.driver._protocol # case 1: when protocol is FC self.driver._protocol = 'FC' self.driver.validate_connector(conn_fc) self.driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_neither) # clear environment self.driver._protocol = protocol def test_flashsystem_volumes(self): # case 1: create volume vol = self._generate_vol_info(None) self.driver.create_volume(vol) # Check whether volume is created successfully attributes = self.driver._get_vdisk_attributes(vol['name']) attr_size = float(attributes['capacity']) / units.Gi self.assertEqual(float(vol['size']), attr_size) # case 2: create volume with empty returning value with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("", "") vol1 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, vol1) # case 3: create volume with error returning value with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("CMMVC6070E", "An invalid or duplicated " "parameter has been detected.") vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, vol2) # case 4: delete volume self.driver.delete_volume(vol) # case 5: delete volume that doesn't exist (expected not fail) vol_no_exist = self._generate_vol_info(None) self.driver.delete_volume(vol_no_exist) def test_flashsystem_extend_volume(self): vol = self._generate_vol_info(None) self.driver.create_volume(vol) self.driver.extend_volume(vol, '200') attrs = self.driver._get_vdisk_attributes(vol['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 200) # clear environment self.driver.delete_volume(vol) def test_flashsystem_connection(self): # case 1: initialize_connection/terminate_connection for good path vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.initialize_connection(vol1, self.connector) self.driver.terminate_connection(vol1, self.connector) # case 2: when volume is not existed vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, vol2, self.connector) # case 3: _get_vdisk_map_properties raises exception with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_get_vdisk_map_properties') as get_properties: get_properties.side_effect = exception.VolumeBackendAPIException self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, vol1, self.connector) # case 4: terminate_connection with no host with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_get_hostvdisk_mappings') as mock_host: mock_host.return_value = {} vol3 = self._generate_vol_info(None) self.driver.create_volume(vol3) self.driver.initialize_connection(vol3, self.connector) return_value = self.driver.terminate_connection(vol3, self.connector) self.assertNotEqual({}, return_value['data']) # case 5: terminate_connection with host vol4 = self._generate_vol_info(None) self.driver.create_volume(vol4) self.driver.initialize_connection(vol4, self.connector) vol5 = self._generate_vol_info(None) self.driver.create_volume(vol5) self.driver.initialize_connection(vol5, self.connector) return_value = self.driver.terminate_connection(vol4, self.connector) self.assertEqual({}, return_value['data']) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) self.driver.delete_volume(vol3) self.driver.delete_volume(vol4) self.driver.delete_volume(vol5) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data): # case 1: good path vol1 = self._generate_vol_info(None) snap1 = self._generate_snap_info(vol1['name'], vol1['id'], vol1['size'], vol1['status']) self.driver.create_snapshot(snap1) # case 2: when volume status is error vol2 = self._generate_vol_info(None, vol_status='error') snap2 = self._generate_snap_info(vol2['name'], vol2['id'], vol2['size'], vol2['status']) self.assertRaises(exception.InvalidVolume, self.driver.create_snapshot, snap2) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_delete_vdisk') def test_flashsystem_delete_snapshot(self, _delete_vdisk): vol1 = self._generate_vol_info(None) snap1 = self._generate_snap_info(vol1['name'], vol1['id'], vol1['size'], vol1['status']) self.driver.delete_snapshot(snap1) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_volume_from_snapshot( self, _create_and_copy_vdisk_data): # case 1: good path vol = self._generate_vol_info(None) snap = self._generate_snap_info(vol['name'], vol['id'], vol['size'], vol['status']) self.driver.create_volume_from_snapshot(vol, snap) # case 2: when size does not match vol = self._generate_vol_info(None, vol_size=100) snap = self._generate_snap_info(vol['name'], vol['id'], 200, vol['status']) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, vol, snap) # case 3: when snapshot status is not available vol = self._generate_vol_info(None) snap = self._generate_snap_info(vol['name'], vol['id'], vol['size'], vol['status'], snap_status='error') self.assertRaises(exception.InvalidSnapshot, self.driver.create_volume_from_snapshot, vol, snap) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_cloned_volume( self, _create_and_copy_vdisk_data): # case 1: good path vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.driver.create_cloned_volume(vol2, vol1) # case 2: when size does not match vol1 = self._generate_vol_info(None, vol_size=10) vol2 = self._generate_vol_info(None, vol_size=20) self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, vol2, vol1) def test_flashsystem_get_volume_stats(self): # case 1: good path self._set_flag('reserved_percentage', 25) pool = 'mdiskgrp0' backend_name = 'flashsystem_1.2.3.4' + '_' + pool stats = self.driver.get_volume_stats() self.assertEqual(25, stats['reserved_percentage']) self.assertEqual('IBM', stats['vendor_name']) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual(backend_name, stats['volume_backend_name']) self._reset_flags() # case 2: when lsmdiskgrp returns error self.sim.error_injection('lsmdiskgrp', 'error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, refresh=True) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_copy_vdisk_data') def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data): # case 1: when volume does not exist vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_and_copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) # case 2: good path self.driver.create_volume(vol1) self.driver._create_and_copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 3: _copy_vdisk_data raises exception self.driver.create_volume(vol1) _copy_vdisk_data.side_effect = exception.VolumeBackendAPIException self.assertRaises( exception.VolumeBackendAPIException, self.driver._create_and_copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) self.assertEqual(set(), self.driver._vdisk_copy_in_progress) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) @mock.patch.object(volume_utils, 'copy_volume') @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_scan_device') @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_remove_device') @mock.patch.object(utils, 'brick_get_connector_properties') def test_flashsystem_copy_vdisk_data(self, _connector, _remove_device, _scan_device, copy_volume): connector = _connector.return_value = self.connector vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.create_volume(vol2) # case 1: no mapped before copy self.driver._copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # case 2: mapped before copy self.driver.initialize_connection(vol1, connector) self.driver.initialize_connection(vol2, connector) self.driver._copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertTrue(v1_mapped) self.assertTrue(v2_mapped) self.driver.terminate_connection(vol1, connector) self.driver.terminate_connection(vol2, connector) # case 3: no mapped before copy, raise exception when scan _scan_device.side_effect = exception.VolumeBackendAPIException self.assertRaises( exception.VolumeBackendAPIException, self.driver._copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # case 4: no mapped before copy, raise exception when copy copy_volume.side_effect = exception.VolumeBackendAPIException self.assertRaises( exception.VolumeBackendAPIException, self.driver._copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) def test_flashsystem_connector_to_hostname_prefix(self): # Invalid characters will be translated to '-' # case 1: host name is unicode with invalid characters conn = {'host': u'unicode.test}.abc{.abc'} self.assertEqual(u'unicode.test-.abc-.abc', self.driver._connector_to_hostname_prefix(conn)) # case 2: host name is string with invalid characters conn = {'host': 'string.test}.abc{.abc'} self.assertEqual('string.test-.abc-.abc', self.driver._connector_to_hostname_prefix(conn)) # case 3: host name is neither unicode nor string conn = {'host': 12345} self.assertRaises(exception.NoValidHost, self.driver._connector_to_hostname_prefix, conn) # case 4: host name started with number will be translated conn = {'host': '192.168.1.1'} self.assertEqual('_192.168.1.1', self.driver._connector_to_hostname_prefix(conn)) def test_flashsystem_create_host(self): # case 1: create host conn = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} host = self.driver._create_host(conn) # case 2: create host that already exists self.assertRaises(processutils.ProcessExecutionError, self.driver._create_host, conn) # case 3: delete host self.driver._delete_host(host) # case 4: create host with empty ports conn = {'host': 'flashsystem', 'wwpns': []} self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_host, conn) def test_flashsystem_find_host_exhaustive(self): # case 1: create host and find it conn1 = { 'host': 'flashsystem-01', 'wwnns': ['1111111111abcdef', '1111111111abcdeg'], 'wwpns': ['1111111111000001', '1111111111000002'], 'initiator': 'iqn.111111'} conn2 = { 'host': 'flashsystem-02', 'wwnns': ['2222222222abcdef', '2222222222abcdeg'], 'wwpns': ['2222222222000001', '2222222222000002'], 'initiator': 'iqn.222222'} conn3 = { 'host': 'flashsystem-03', 'wwnns': ['3333333333abcdef', '3333333333abcdeg'], 'wwpns': ['3333333333000001', '3333333333000002'], 'initiator': 'iqn.333333'} host1 = self.driver._create_host(conn1) host2 = self.driver._create_host(conn2) self.assertEqual( host2, self.driver._find_host_exhaustive(conn2, [host1, host2])) self.assertIsNone(self.driver._find_host_exhaustive(conn3, [host1, host2])) # case 2: hosts contains non-existent host info with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("pass", "") self.driver._find_host_exhaustive(conn1, [host2]) self.assertFalse(mock_ssh.called) # clear environment self.driver._delete_host(host1) self.driver._delete_host(host2) def test_flashsystem_get_vdisk_params(self): # case 1: use default params self.driver._get_vdisk_params(None) # case 2: use extra params from type opts1 = {'storage_protocol': 'FC'} opts2 = {'capabilities:storage_protocol': 'FC'} opts3 = {'storage_protocol': 'iSCSI'} type1 = volume_types.create(self.ctxt, 'opts1', opts1) type2 = volume_types.create(self.ctxt, 'opts2', opts2) type3 = volume_types.create(self.ctxt, 'opts3', opts3) self.assertEqual( 'FC', self.driver._get_vdisk_params(type1['id'])['protocol']) self.assertEqual( 'FC', self.driver._get_vdisk_params(type2['id'])['protocol']) self.assertRaises(exception.InvalidInput, self.driver._get_vdisk_params, type3['id']) # clear environment volume_types.destroy(self.ctxt, type1['id']) volume_types.destroy(self.ctxt, type2['id']) def test_flashsystem_map_vdisk_to_host(self): # case 1: no host found vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.assertEqual( # lun id shoud begin with 1 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # case 2: host already exists vol2 = self._generate_vol_info(None) self.driver.create_volume(vol2) self.assertEqual( # lun id shoud be sequential 2, self.driver._map_vdisk_to_host(vol2['name'], self.connector)) # case 3: test if already mapped self.assertEqual( 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # clean environment self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 4: If there is no vdisk mapped to host, host should be removed self.assertIsNone(self.driver._get_host_from_connector(self.connector)) cinder-8.0.0/cinder/tests/unit/test_dellfc.py0000664000567000056710000007566112701406250022367 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import exception from cinder import test from cinder.volume.drivers.dell import dell_storagecenter_api from cinder.volume.drivers.dell import dell_storagecenter_fc # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @mock.patch.object(dell_storagecenter_api.HttpClient, '__init__', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'open_connection') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'close_connection') class DellSCSanFCDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.4829', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 4831, u'objectType': u'ScVolume', u'index': 4829, u'volumeFolderPath': u'dopnstktst/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'statusMessage': u'', u'status': u'Down', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'opnstktst', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe0000000000000012df', u'active': False, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-000012df', u'replayAllowed': False, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'opnstktst/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'opnstktst', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPING = {u'instanceId': u'64702.2183', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'lunUsed': [1], u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.4829', u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'objectType': u'ScVolume'}, u'connectivity': u'Up', u'readOnly': False, u'objectType': u'ScMappingProfile', u'hostCache': False, u'mappedVia': u'Server', u'mapCount': 2, u'instanceName': u'4829-47', u'lunRequested': u'N/A' } def setUp(self): super(DellSCSanFCDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "pwd" self.configuration.dell_sc_ssn = 64702 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self._context = context.get_admin_context() self.driver = dell_storagecenter_fc.DellStorageCenterFCDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell', 'storage_protocol': 'FC'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d' self.volume_name = "volume" + self.volid self.connector = {'ip': '192.168.0.77', 'host': 'cinderfc-vm', 'wwnns': ['20000024ff30441c', '20000024ff30441d'], 'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1', 'wwpns': ['21000024ff30441c', '21000024ff30441d']} @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server_multiple_hbas', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPING) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) def test_initialize_connection(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice mock_find_volume.assert_any_call(self.volume_name) assert mock_find_volume.call_count == 2 @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPING) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_wwns(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server_multiple_hbas', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPING) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_server(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPING) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_vol_not_found(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_map_vol_fail(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=1) def test_terminate_connection(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_server(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_volume(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(None, [], {})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_wwns(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = self.connector # self.assertRaises(exception.VolumeBackendAPIException, # self.driver.terminate_connection, # volume, # connector) res = self.driver.terminate_connection(volume, connector) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=1) def test_terminate_connection_failure(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_volume_count', return_value=0) def test_terminate_connection_vol_count_zero(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where get_volume_count is zero volume = {'id': self.volume_name} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'data': {'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) mock_get_storage_usage.assert_called_once_with() @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('FC', stats['storage_protocol']) assert mock_get_storage_usage.called is False cinder-8.0.0/cinder/tests/unit/cast_as_call.py0000664000567000056710000000222012701406250022464 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock def mock_cast_as_call(obj=None): """Use this to mock `cast` as calls. :param obj: Either an instance of RPCClient or an instance of _Context. """ orig_prepare = obj.prepare def prepare(*args, **kwargs): cctxt = orig_prepare(*args, **kwargs) mock_cast_as_call(obj=cctxt) # woo, recurse! return cctxt prepare_patch = mock.patch.object(obj, 'prepare').start() prepare_patch.side_effect = prepare cast_patch = mock.patch.object(obj, 'cast').start() cast_patch.side_effect = obj.call cinder-8.0.0/cinder/tests/unit/test_hacking.py0000664000567000056710000004734112701406250022534 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import textwrap import mock import pep8 from cinder.hacking import checks from cinder import test @ddt.ddt class HackingTestCase(test.TestCase): """This class tests the hacking checks in cinder.hacking.checks This class ensures that Cinder's hacking checks are working by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def test_no_vi_headers(self): lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n', 'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n', 'Line 11\n'] self.assertIsNone(checks.no_vi_headers( "Test string foo", 1, lines)) self.assertEqual(2, len(list(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 2, lines)))) self.assertEqual(2, len(list(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 8, lines)))) self.assertIsNone(checks.no_vi_headers( "Test end string for vi", 9, lines)) # vim header outside of boundary (first/last 5 lines) self.assertIsNone(checks.no_vi_headers( "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", 6, lines)) def test_no_translate_debug_logs(self): self.assertEqual(1, len(list(checks.no_translate_debug_logs( "LOG.debug(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.debug('foo')", "cinder/scheduler/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.info(_('foo'))", "cinder/scheduler/foo.py")))) def test_check_explicit_underscore_import(self): self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder.tests.unit/other_files.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _LE, _, _LW", "cinder.tests.unit/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "cinder.tests.unit/other_files3.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files3.py")))) # Complete code coverage by falling through all checks self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info('My info message')", "cinder.tests.unit/other_files4.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _LW", "cinder.tests.unit/other_files5.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files5.py")))) # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_logging_format_args(self): checker = checks.CheckLoggingFormatArgs code = """ import logging LOG = logging.getLogger() LOG.info("Message without a second argument.") LOG.critical("Message with %s arguments.", 'two') LOG.debug("Volume %s caught fire and is at %d degrees C and" " climbing.", 'volume1', 500) """ self._assert_has_no_errors(code, checker) code = """ import logging LOG = logging.getLogger() LOG.{0}("Volume %s caught fire and is at %d degrees C and " "climbing.", ('volume1', 500)) """ for method in checker.LOG_METHODS: self._assert_has_errors(code.format(method), checker, expected_errors=[(4, 21, 'C310')]) code = """ import logging LOG = logging.getLogger() LOG.log(logging.DEBUG, "Volume %s caught fire and is at %d" " degrees C and climbing.", ('volume1', 500)) """ self._assert_has_errors(code, checker, expected_errors=[(4, 37, 'C310')]) def test_opt_type_registration_args(self): checker = checks.CheckOptRegistrationArgs code = """ CONF.register_opts([opt1, opt2, opt3]) CONF.register_opts((opt4, opt5)) CONF.register_opt(lonely_opt) CONF.register_opts([OPT1, OPT2], group="group_of_opts") CONF.register_opt(single_opt, group=blah) """ self._assert_has_no_errors(code, checker) code = """ CONF.register_opt([opt4, opt5, opt6]) CONF.register_opt((opt7, opt8)) CONF.register_opts(lonely_opt) CONF.register_opt((an_opt, another_opt)) """ self._assert_has_errors(code, checker, expected_errors=[(1, 18, 'C311'), (2, 19, 'C311'), (3, 19, 'C311'), (4, 19, 'C311')]) code = """ CONF.register_opt(single_opt) CONF.register_opts(other_opt) CONF.register_opt(multiple_opts) tuple_opts = (one_opt, two_opt) CONF.register_opts(tuple_opts) """ self._assert_has_errors(code, checker, expected_errors=[(2, 19, 'C311'), (3, 18, 'C311')]) def test_str_unicode_exception(self): checker = checks.CheckForStrUnicodeExc code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = str(e) return p """ errors = [(5, 16, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = unicode(a) + str(b) except ValueError as e: p = e return p """ self._assert_has_no_errors(code, checker) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = unicode(e) return p """ errors = [(5, 20, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: try: p = unicode(a) + unicode(b) except ValueError as ve: p = str(e) + str(ve) p = e return p """ errors = [(8, 20, 'N325'), (8, 29, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: try: p = unicode(a) + unicode(b) except ValueError as ve: p = str(e) + unicode(ve) p = str(e) return p """ errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')] self._assert_has_errors(code, checker, expected_errors=errors) def test_check_no_log_audit(self): self.assertEqual(1, len(list(checks.check_no_log_audit( "LOG.audit('My test audit log')")))) self.assertEqual(0, len(list(checks.check_no_log_audit( "LOG.info('My info test log.')")))) def test_no_mutable_default_args(self): self.assertEqual(0, len(list(checks.no_mutable_default_args( "def foo (bar):")))) self.assertEqual(1, len(list(checks.no_mutable_default_args( "def foo (bar=[]):")))) self.assertEqual(1, len(list(checks.no_mutable_default_args( "def foo (bar={}):")))) def test_oslo_namespace_imports_check(self): self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.concurrency import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_concurrency import bar")))) self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.db import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_db import bar")))) self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.config import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_config import bar")))) self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.utils import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_utils import bar")))) self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.serialization import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_serialization import bar")))) self.assertEqual(1, len(list(checks.check_oslo_namespace_imports( "from oslo.log import foo")))) self.assertEqual(0, len(list(checks.check_oslo_namespace_imports( "from oslo_log import bar")))) def test_no_contextlib_nested(self): self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with contextlib.nested(")))) self.assertEqual(1, len(list(checks.check_no_contextlib_nested( " with nested(")))) self.assertEqual(0, len(list(checks.check_no_contextlib_nested( "with my.nested(")))) self.assertEqual(0, len(list(checks.check_no_contextlib_nested( "with foo as bar")))) def test_check_datetime_now(self): self.assertEqual(1, len(list(checks.check_datetime_now( "datetime.now", False)))) self.assertEqual(0, len(list(checks.check_datetime_now( "timeutils.utcnow", False)))) def test_check_datetime_now_noqa(self): self.assertEqual(0, len(list(checks.check_datetime_now( "datetime.now() # noqa", True)))) def test_check_timeutils_strtime(self): self.assertEqual(1, len(list(checks.check_timeutils_strtime( "timeutils.strtime")))) self.assertEqual(0, len(list(checks.check_timeutils_strtime( "strftime")))) def test_validate_log_translations(self): self.assertEqual(1, len(list(checks.validate_log_translations( "LOG.info('foo')", "foo.py")))) self.assertEqual(1, len(list(checks.validate_log_translations( "LOG.warning('foo')", "foo.py")))) self.assertEqual(1, len(list(checks.validate_log_translations( "LOG.error('foo')", "foo.py")))) self.assertEqual(1, len(list(checks.validate_log_translations( "LOG.exception('foo')", "foo.py")))) self.assertEqual(0, len(list(checks.validate_log_translations( "LOG.info('foo')", "cinder/tests/foo.py")))) self.assertEqual(0, len(list(checks.validate_log_translations( "LOG.info(_LI('foo')", "foo.py")))) self.assertEqual(0, len(list(checks.validate_log_translations( "LOG.warning(_LW('foo')", "foo.py")))) self.assertEqual(0, len(list(checks.validate_log_translations( "LOG.error(_LE('foo')", "foo.py")))) self.assertEqual(0, len(list(checks.validate_log_translations( "LOG.exception(_LE('foo')", "foo.py")))) def test_check_unicode_usage(self): self.assertEqual(1, len(list(checks.check_unicode_usage( "unicode(msg)", False)))) self.assertEqual(0, len(list(checks.check_unicode_usage( "unicode(msg) # noqa", True)))) def test_no_print_statements(self): self.assertEqual(0, len(list(checks.check_no_print_statements( "a line with no print statement", "cinder/file.py", False)))) self.assertEqual(1, len(list(checks.check_no_print_statements( "print('My print statement')", "cinder/file.py", False)))) self.assertEqual(0, len(list(checks.check_no_print_statements( "print('My print statement in cinder/cmd, which is ok.')", "cinder/cmd/file.py", False)))) self.assertEqual(0, len(list(checks.check_no_print_statements( "print('My print statement that I just must have.')", "cinder.tests.unit/file.py", True)))) self.assertEqual(1, len(list(checks.check_no_print_statements( "print ('My print with space')", "cinder/volume/anotherFile.py", False)))) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " dict()")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_validate_assertIsNone(self): test_value = None self.assertEqual(0, len(list(checks.validate_assertIsNone( "assertIsNone(None)")))) self.assertEqual(1, len(list(checks.validate_assertIsNone( "assertEqual(None, %s)" % test_value)))) def test_validate_assertTrue(self): test_value = True self.assertEqual(0, len(list(checks.validate_assertTrue( "assertTrue(True)")))) self.assertEqual(1, len(list(checks.validate_assertTrue( "assertEqual(True, %s)" % test_value)))) @ddt.unpack @ddt.data( (1, 'LOG.info', "cinder/tests/unit/fake.py", False), (1, 'LOG.warning', "cinder/tests/fake.py", False), (1, 'LOG.error', "cinder/tests/fake.py", False), (1, 'LOG.exception', "cinder/tests/fake.py", False), (1, 'LOG.debug', "cinder/tests/fake.py", False), (0, 'LOG.info.assert_called_once_with', "cinder/tests/fake.py", False), (0, 'some.LOG.error.call', "cinder/tests/fake.py", False), (0, 'LOG.warning', "cinder/tests/unit/fake.py", True), (0, 'LOG.warning', "cinder/tests/unit/integrated/fake.py", False)) def test_no_test_log(self, first, second, third, fourth): self.assertEqual(first, len(list(checks.no_test_log( "%s('arg')" % second, third, fourth)))) cinder-8.0.0/cinder/tests/unit/test_hpe_xp_fc.py0000664000567000056710000007622312701406250023064 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_xp_fc from cinder.volume.drivers.hpe import hpe_xp_opts from oslo_utils import importutils NORMAL_LDEV_TYPE = 'Normal' POOL_INFO = {'30': {'total_gb': 'infinite', 'free_gb': 'infinite'}} EXISTING_POOL_REF = { '101': {'size': 128} } class HPEXPFakeCommon(object): """Fake HPEXP Common.""" def __init__(self, conf, storage_protocol, **kwargs): self.conf = conf self.volumes = {} self.snapshots = {} self._stats = {} self.POOL_SIZE = 1000 self.LDEV_MAX = 1024 self.driver_info = { 'hba_id': 'wwpns', 'hba_id_type': 'World Wide Name', 'msg_id': {'target': 308}, 'volume_backend_name': 'HPEXPFC', 'volume_opts': hpe_xp_opts.FC_VOLUME_OPTS, 'volume_type': 'fibre_channel', } self.storage_info = { 'protocol': storage_protocol, 'pool_id': None, 'ldev_range': None, 'ports': [], 'compute_ports': [], 'wwns': {}, 'output_first': True } def create_volume(self, volume): if volume['size'] > self.POOL_SIZE: raise exception.VolumeBackendAPIException( data='The volume size (%s) exceeds the pool size (%s).' % (volume['size'], self.POOL_SIZE)) newldev = self._available_ldev() self.volumes[newldev] = volume return { 'provider_location': newldev, 'metadata': { 'ldev': newldev, 'type': NORMAL_LDEV_TYPE } } def _available_ldev(self): for i in range(1, self.LDEV_MAX): if self.volume_exists({'provider_location': str(i)}) is False: return str(i) raise exception.VolumeBackendAPIException( data='Failed to get an available logical device.') def volume_exists(self, volume): return self.volumes.get(volume['provider_location'], None) is not None def delete_volume(self, volume): vol = self.volumes.get(volume['provider_location'], None) if vol is not None: if vol.get('is_busy') is True: raise exception.VolumeIsBusy(volume_name=volume['name']) del self.volumes[volume['provider_location']] def create_snapshot(self, snapshot): src_vref = self.volumes.get(snapshot["volume_id"]) if not src_vref: raise exception.VolumeBackendAPIException( data='The %(type)s %(id)s source to be replicated was not ' 'found.' % {'type': 'snapshot', 'id': (snapshot.get('id'))}) newldev = self._available_ldev() self.volumes[newldev] = snapshot return {'provider_location': newldev} def delete_snapshot(self, snapshot): snap = self.volumes.get(snapshot['provider_location'], None) if snap is not None: if snap.get('is_busy') is True: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) del self.volumes[snapshot['provider_location']] def get_volume_stats(self, refresh=False): if refresh: d = {} d['volume_backend_name'] = self.driver_info['volume_backend_name'] d['vendor_name'] = 'Hewlett Packard Enterprise' d['driver_version'] = '1.1.0' d['storage_protocol'] = self.storage_info['protocol'] pool_info = POOL_INFO.get(self.conf.hpexp_pool) if pool_info is None: return self._stats d['total_capacity_gb'] = pool_info['total_gb'] d['free_capacity_gb'] = pool_info['free_gb'] d['allocated_capacity_gb'] = 0 d['reserved_percentage'] = 0 d['QoS_support'] = False self._stats = d return self._stats def create_volume_from_snapshot(self, volume, snapshot): ldev = snapshot.get('provider_location') if self.volumes.get(ldev) is None: raise exception.VolumeBackendAPIException( data='The %(type)s %(id)s source to be replicated ' 'was not found.' % {'type': 'snapshot', 'id': snapshot['id']}) if volume['size'] != snapshot['volume_size']: raise exception.VolumeBackendAPIException( data='The specified operation is not supported. ' 'The volume size must be the same as the source %(type)s. ' '(volume: %(volume_id)s)' % {'type': 'snapshot', 'volume_id': volume['id']}) newldev = self._available_ldev() self.volumes[newldev] = volume return { 'provider_location': newldev, 'metadata': { 'ldev': newldev, 'type': NORMAL_LDEV_TYPE, 'snapshot': snapshot['id'] } } def create_cloned_volume(self, volume, src_vref): ldev = src_vref.get('provider_location') if self.volumes.get(ldev) is None: raise exception.VolumeBackendAPIException( data='The %(type)s %(id)s source to be replicated was not ' 'found.' % {'type': 'volume', 'id': src_vref.get('id')}) if volume['size'] != src_vref['size']: raise exception.VolumeBackendAPIException( data='The specified operation is not supported. ' 'The volume size must be the same as the source %(type)s. ' '(volume: %(volume_id)s)' % {'type': 'volume', 'volume_id': volume['id']}) newldev = self._available_ldev() self.volumes[newldev] = volume return { 'provider_location': newldev, 'metadata': { 'ldev': newldev, 'type': NORMAL_LDEV_TYPE, 'volume': src_vref['id'] } } def extend_volume(self, volume, new_size): ldev = volume.get('provider_location') if not self.volumes.get(ldev): raise exception.VolumeBackendAPIException( data='The volume %(volume_id)s to be extended was not found.' % {'volume_id': volume['id']}) if new_size > self.POOL_SIZE: raise exception.VolumeBackendAPIException( data='The volume size (%s) exceeds the pool size (%s).' % (new_size, self.POOL_SIZE)) self.volumes[ldev]['size'] = new_size def manage_existing(self, volume, existing_ref): ldev = existing_ref.get('source-id') return { 'provider_location': ldev, 'metadata': { 'ldev': ldev, 'type': NORMAL_LDEV_TYPE } } def manage_existing_get_size(self, dummy_volume, existing_ref): ldev = existing_ref.get('source-id') if not EXISTING_POOL_REF.get(ldev): raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason='No valid value is ' 'specified for "source-id". A valid value ' 'must be specified for "source-id" to manage the volume.') size = EXISTING_POOL_REF[ldev]['size'] return size def unmanage(self, volume): vol = self.volumes.get(volume['provider_location'], None) if vol is not None: if vol.get('is_busy') is True: raise exception.VolumeIsBusy( volume_name=volume['provider_location']) del self.volumes[volume['provider_location']] def get_pool_id(self): pool = self.conf.hpexp_pool if pool.isdigit(): return int(pool) return None def do_setup(self, context): self.ctxt = context self.storage_info['pool_id'] = self.get_pool_id() if self.storage_info['pool_id'] is None: raise exception.VolumeBackendAPIException( data='A pool could not be found. (pool: %(pool)s)' % {'pool': self.conf.hpexp_pool}) def initialize_connection(self, volume, connector): ldev = volume.get('provider_location') if not self.volumes.get(ldev): raise exception.VolumeBackendAPIException( data='The volume %(volume_id)s to be mapped was not found.' % {'volume_id': volume['id']}) self.volumes[ldev]['attached'] = connector return { 'driver_volume_type': self.driver_info['volume_type'], 'data': { 'target_discovered': True, 'target_lun': volume['id'], 'access_mode': 'rw', 'multipath': True, 'target_wwn': ['50060E801053C2E0'], 'initiator_target_map': { u'2388000087e1a2e0': ['50060E801053C2E0']}, } } def terminate_connection(self, volume, connector): ldev = volume.get('provider_location') if not self.volumes.get(ldev): return if not self.is_volume_attached(volume, connector): raise exception.VolumeBackendAPIException( data='Volume not found for %s' % ldev) del self.volumes[volume['provider_location']]['attached'] for vol in self.volumes: if 'attached' in self.volumes[vol]: return return { 'driver_volume_type': self.driver_info['volume_type'], 'data': { 'target_lun': volume['id'], 'target_wwn': ['50060E801053C2E0'], 'initiator_target_map': { u'2388000087e1a2e0': ['50060E801053C2E0']}, } } def is_volume_attached(self, volume, connector): if not self.volume_exists(volume): return False return (self.volumes[volume['provider_location']].get('attached', None) == connector) def copy_volume_data(self, context, src_vol, dest_vol, remote=None): pass def copy_image_to_volume(self, context, volume, image_service, image_id): pass def restore_backup(self, context, backup, volume, backup_service): pass class HPEXPFCDriverTest(test.TestCase): """Test HPEXPFCDriver.""" _VOLUME = {'size': 128, 'name': 'test1', 'id': 'id1', 'status': 'available'} _VOLUME2 = {'size': 128, 'name': 'test2', 'id': 'id2', 'status': 'available'} _VOLUME3 = {'size': 256, 'name': 'test2', 'id': 'id3', 'status': 'available'} _VOLUME_BACKUP = {'size': 128, 'name': 'backup-test', 'id': 'id-backup', 'provider_location': '0', 'status': fields.BackupStatus.AVAILABLE} _TEST_SNAPSHOT = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': '1', 'id': 'test-snap-0', 'status': 'available'} _TOO_BIG_VOLUME_SIZE = 100000 def __init__(self, *args, **kwargs): super(HPEXPFCDriverTest, self).__init__(*args, **kwargs) def setUp(self): self._setup_config() self._setup_driver() super(HPEXPFCDriverTest, self).setUp() def _setup_config(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.hpexp_storage_id = "00000" self.configuration.hpexp_pool = "30" @mock.patch.object(importutils, 'import_object', return_value=None) def _setup_driver(self, arg1): self.driver = hpe_xp_fc.HPEXPFCDriver(configuration=self.configuration) self.driver.common = HPEXPFakeCommon(self.configuration, 'FC') self.driver.do_setup(None) # API test cases def test_create_volume(self): """Test create_volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) volume['provider_location'] = rc['provider_location'] has_volume = self.driver.common.volume_exists(volume) self.assertTrue(has_volume) def test_create_volume_error_on_no_pool_space(self): """Test create_volume is error on no pool space.""" update = { 'size': self._TOO_BIG_VOLUME_SIZE, 'name': 'test', 'id': 'id1', 'status': 'available' } volume = fake_volume.fake_db_volume(**update) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) def test_create_volume_error_on_no_available_ldev(self): """Test create_volume is error on no available ldev.""" for i in range(1, 1024): volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) self.assertEqual(str(i), rc['provider_location']) volume = fake_volume.fake_db_volume(**self._VOLUME) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) def test_delete_volume(self): """Test delete_volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) volume['provider_location'] = rc['provider_location'] self.driver.delete_volume(volume) has_volume = self.driver.common.volume_exists(volume) self.assertFalse(has_volume) def test_delete_volume_on_non_existing_volume(self): """Test delete_volume on non existing volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' has_volume = self.driver.common.volume_exists(volume) self.assertFalse(has_volume) self.driver.delete_volume(volume) def test_delete_volume_error_on_busy_volume(self): """Test delete_volume is error on busy volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) self.driver.common.volumes[rc['provider_location']]['is_busy'] = True volume['provider_location'] = rc['provider_location'] self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, volume) def test_create_snapshot(self): """Test create_snapshot.""" volume = fake_volume.fake_db_volume(**self._VOLUME) self.driver.create_volume(volume) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) rc = self.driver.create_snapshot(snapshot) snapshot['provider_location'] = rc['provider_location'] has_volume = self.driver.common.volume_exists(snapshot) self.assertTrue(has_volume) def test_create_snapshot_error_on_non_src_ref(self): """Test create_snapshot is error on non source reference.""" snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) def test_delete_snapshot(self): """Test delete_snapshot.""" volume = fake_volume.fake_db_volume(**self._VOLUME) self.driver.create_volume(volume) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) rc = self.driver.create_snapshot(snapshot) snapshot['provider_location'] = rc['provider_location'] rc = self.driver.delete_snapshot(snapshot) has_volume = self.driver.common.volume_exists(snapshot) self.assertFalse(has_volume) def test_delete_snapshot_error_on_busy_snapshot(self): """Test delete_snapshot is error on busy snapshot.""" volume = fake_volume.fake_db_volume(**self._VOLUME) self.driver.create_volume(volume) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) rc = self.driver.create_snapshot(snapshot) self.driver.common.volumes[rc['provider_location']]['is_busy'] = True snapshot['provider_location'] = rc['provider_location'] self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, snapshot) def test_delete_snapshot_on_non_existing_snapshot(self): """Test delete_snapshot on non existing snapshot.""" snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) snapshot['provider_location'] = '1' self.driver.delete_snapshot(snapshot) def test_create_volume_from_snapshot(self): """Test create_volume_from_snapshot.""" volume = fake_volume.fake_db_volume(**self._VOLUME) self.driver.create_volume(volume) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) rc_snap = self.driver.create_snapshot(snapshot) snapshot['provider_location'] = rc_snap['provider_location'] volume2 = fake_volume.fake_db_volume(**self._VOLUME2) rc = self.driver.create_volume_from_snapshot(volume2, snapshot) volume2['provider_location'] = rc['provider_location'] has_volume = self.driver.common.volume_exists(volume2) self.assertTrue(has_volume) def test_create_volume_from_snapshot_error_on_non_existing_snapshot(self): """Test create_volume_from_snapshot is error on non existing snapshot. """ volume2 = fake_volume.fake_db_volume(**self._VOLUME2) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) snapshot['provider_location'] = '1' self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume2, snapshot) def test_create_volume_from_snapshot_error_on_diff_size(self): """Test create_volume_from_snapshot is error on different size.""" volume = fake_volume.fake_db_volume(**self._VOLUME) self.driver.create_volume(volume) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) rc_snap = self.driver.create_snapshot(snapshot) snapshot['provider_location'] = rc_snap['provider_location'] volume3 = fake_volume.fake_db_volume(**self._VOLUME3) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume3, snapshot) def test_create_cloned_volume(self): """Test create_cloned_volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] volume2 = fake_volume.fake_db_volume(**self._VOLUME2) rc = self.driver.create_cloned_volume(volume2, volume) volume2['provider_location'] = rc['provider_location'] has_volume = self.driver.common.volume_exists(volume2) self.assertTrue(has_volume) def test_create_cloned_volume_error_on_non_existing_volume(self): """Test create_cloned_volume is error on non existing volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' volume2 = fake_volume.fake_db_volume(**self._VOLUME2) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume2, volume) def test_create_cloned_volume_error_on_diff_size(self): """Test create_cloned_volume is error on different size.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] volume3 = fake_volume.fake_db_volume(**self._VOLUME3) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume3, volume) def test_get_volume_stats(self): """Test get_volume_stats.""" rc = self.driver.get_volume_stats(True) self.assertEqual("Hewlett Packard Enterprise", rc['vendor_name']) def test_get_volume_stats_error_on_non_existing_pool_id(self): """Test get_volume_stats is error on non existing pool id.""" self.configuration.hpexp_pool = 29 rc = self.driver.get_volume_stats(True) self.assertEqual({}, rc) @mock.patch.object(driver.FibreChannelDriver, 'copy_image_to_volume') def test_copy_image_to_volume(self, arg1): """Test copy_image_to_volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] self.driver.copy_image_to_volume(None, volume, None, None) arg1.assert_called_with(None, volume, None, None) @mock.patch.object(driver.FibreChannelDriver, 'copy_image_to_volume', side_effect=exception.CinderException) def test_copy_image_to_volume_error(self, arg1): """Test copy_image_to_volume is error.""" volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' self.assertRaises(exception.CinderException, self.driver.copy_image_to_volume, None, volume, None, None) arg1.assert_called_with(None, volume, None, None) @mock.patch.object(driver.FibreChannelDriver, 'restore_backup') def test_restore_backup(self, arg1): """Test restore_backup.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] volume_backup = fake_volume.fake_db_volume(**self._VOLUME_BACKUP) self.driver.restore_backup(None, volume_backup, volume, None) arg1.assert_called_with(None, volume_backup, volume, None) @mock.patch.object(driver.FibreChannelDriver, 'restore_backup', side_effect=exception.CinderException) def test_restore_backup_error(self, arg1): """Test restore_backup is error.""" volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' volume_backup = fake_volume.fake_db_volume(**self._VOLUME_BACKUP) self.assertRaises(exception.CinderException, self.driver.restore_backup, None, volume_backup, volume, None) arg1.assert_called_with(None, volume_backup, volume, None) def test_extend_volume(self): """Test extend_volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) volume['provider_location'] = rc['provider_location'] new_size = 256 self.driver.extend_volume(volume, new_size) actual = self.driver.common.volumes[rc['provider_location']]['size'] self.assertEqual(new_size, actual) def test_extend_volume_error_on_non_existing_volume(self): """Test extend_volume is error on non existing volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, 256) def test_extend_volume_error_on_no_pool_space(self): """Test extend_volume is error on no pool space.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) volume['provider_location'] = rc['provider_location'] self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, self._TOO_BIG_VOLUME_SIZE) def test_manage_existing(self): """Test manage_existing.""" existing_ref = {'source-id': '101'} volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.manage_existing(volume, existing_ref) self.assertEqual('101', rc['provider_location']) def test_manage_existing_with_none_sourceid(self): """Test manage_existing is error with no source-id.""" existing_ref = {'source-id': None} volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.manage_existing(volume, existing_ref) self.assertIsNone(rc['provider_location']) def test_manage_existing_get_size(self): """Test manage_existing_get_size.""" existing_ref = {'source-id': '101'} volume = fake_volume.fake_db_volume(**self._VOLUME) return_size = self.driver.manage_existing_get_size( volume, existing_ref) self.assertEqual(EXISTING_POOL_REF['101']['size'], return_size) def test_manage_existing_get_size_with_none_sourceid(self): """Test manage_existing_get_size is error with no source-id.""" existing_ref = {'source-id': None} volume = fake_volume.fake_db_volume(**self._VOLUME) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, existing_ref) def test_unmanage(self): """Test unmanage.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) volume['provider_location'] = rc['provider_location'] self.assertTrue(self.driver.common.volume_exists(volume)) self.driver.unmanage(volume) self.assertFalse(self.driver.common.volume_exists(volume)) def test_unmanage_error_on_busy_volume(self): """Test unmanage is error on busy volume.""" volume = fake_volume.fake_db_volume(**self._VOLUME) rc = self.driver.create_volume(volume) ldev = rc['provider_location'] self.driver.common.volumes[ldev]['is_busy'] = True self.assertRaises(exception.VolumeIsBusy, self.driver.unmanage, {'provider_location': ldev}) def test_initialize_connection(self): """Test initialize_connection.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] conn_info = self.driver.initialize_connection(volume, connector) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) is_attached = self.driver.common.is_volume_attached(volume, connector) self.assertTrue(is_attached) self.driver.terminate_connection(volume, connector) self.driver.delete_volume(volume) def test_initialize_connection_error_on_non_exisiting_volume(self): """Test initialize_connection is error on non existing volume.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) def test_terminate_connection_on_non_last_volume(self): """Test terminate_connection on non last volume.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} last_volume = fake_volume.fake_db_volume(**self._VOLUME) last_rc_vol = self.driver.create_volume(last_volume) last_volume['provider_location'] = last_rc_vol['provider_location'] self.driver.initialize_connection(last_volume, connector) volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] self.driver.initialize_connection(volume, connector) conn_info = self.driver.terminate_connection(volume, connector) self.assertNotIn('data', conn_info) is_attached = self.driver.common.is_volume_attached(volume, connector) self.assertFalse(is_attached) self.driver.delete_volume(volume) self.driver.terminate_connection(last_volume, connector) self.driver.delete_volume(last_volume) def test_terminate_connection_on_non_existing_volume(self): """Test terminate_connection on non existing volume.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} volume = fake_volume.fake_db_volume(**self._VOLUME) volume['provider_location'] = '1' self.driver.terminate_connection(volume, connector) def test_terminate_connection_error_on_non_initialized_volume(self): """Test terminate_connection is error on non initialized volume.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) def test_terminate_connection_last_volume(self): """Test terminate_connection on last volume on a host.""" connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], 'ip': '127.0.0.1'} volume = fake_volume.fake_db_volume(**self._VOLUME) rc_vol = self.driver.create_volume(volume) volume['provider_location'] = rc_vol['provider_location'] self.driver.initialize_connection(volume, connector) conn_info = self.driver.terminate_connection(volume, connector) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) is_attached = self.driver.common.is_volume_attached(volume, connector) self.assertFalse(is_attached) self.driver.delete_volume(volume) def test_do_setup_error_on_invalid_pool_id(self): """Test do_setup is error on invalid pool id.""" self.configuration.hpexp_pool = 'invalid' self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) cinder-8.0.0/cinder/tests/unit/test_ssh_utils.py0000664000567000056710000003071612701406250023143 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import paramiko import uuid from oslo_config import cfg from cinder import exception from cinder import ssh_utils from cinder import test CONF = cfg.CONF class FakeSock(object): def settimeout(self, timeout): pass class FakeTransport(object): def __init__(self): self.active = True self.sock = FakeSock() def set_keepalive(self, timeout): pass def is_active(self): return self.active class FakeSSHClient(object): def __init__(self): self.id = uuid.uuid4() self.transport = FakeTransport() def set_missing_host_key_policy(self, policy): self.policy = policy def load_system_host_keys(self): self.system_host_keys = 'system_host_keys' def load_host_keys(self, hosts_key_file): self.hosts_key_file = hosts_key_file def connect(self, ip, port=22, username=None, password=None, pkey=None, timeout=10): pass def get_transport(self): return self.transport def get_policy(self): return self.policy def get_host_keys(self): return '127.0.0.1 ssh-rsa deadbeef' def close(self): pass def __call__(self, *args, **kwargs): pass class SSHPoolTestCase(test.TestCase): """Unit test for SSH Connection Pool.""" @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove(self, mock_isfile, mock_sshclient, mock_open, mock_conf): ssh_to_remove = mock.MagicMock() mock_sshclient.side_effect = [mock.MagicMock(), ssh_to_remove, mock.MagicMock()] mock_conf.ssh_hosts_key_file.return_value = 'dummy' sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=3, max_size=3) self.assertIn(ssh_to_remove, list(sshpool.free_items)) sshpool.remove(ssh_to_remove) self.assertNotIn(ssh_to_remove, list(sshpool.free_items)) @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove_object_not_in_pool(self, mock_isfile, mock_sshclient, mock_open, mock_conf): # create an SSH Client that is not a part of sshpool. ssh_to_remove = mock.MagicMock() mock_sshclient.side_effect = [mock.MagicMock(), mock.MagicMock()] mock_conf.ssh_hosts_key_file.return_value = 'dummy' sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=2, max_size=2) listBefore = list(sshpool.free_items) self.assertNotIn(ssh_to_remove, listBefore) sshpool.remove(ssh_to_remove) self.assertEqual(listBefore, list(sshpool.free_items)) @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient, mock_open, mock_conf): mock_ssh = mock.MagicMock() mock_sshclient.return_value = mock_ssh mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts' # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) host_key_files = sshpool.hosts_key_file self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files) mock_ssh.load_host_keys.assert_called_once_with( '/var/lib/cinder/ssh_known_hosts') @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient, mock_open, mock_conf): mock_ssh = mock.MagicMock() mock_sshclient.return_value = mock_ssh mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts' # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1, hosts_key_file='dummy_host_keyfile') host_key_files = sshpool.hosts_key_file self.assertIn('dummy_host_keyfile', host_key_files) self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files) expected = [ mock.call.load_host_keys('dummy_host_keyfile'), mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')] mock_ssh.assert_has_calls(expected, any_order=True) @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('paramiko.SSHClient') def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile, mock_open, mock_conf): mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts' # create with password sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id self.assertEqual(first_id, second_id) self.assertEqual(1, mock_sshclient.return_value.connect.call_count) # create with private key sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", privatekey="test", min_size=1, max_size=1) self.assertEqual(2, mock_sshclient.return_value.connect.call_count) # attempt to create with no password or private key self.assertRaises(paramiko.SSHException, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", min_size=1, max_size=1) @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=4) with sshpool.item() as ssh: mock_sshclient.reset_mock() first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id ssh.get_transport().active = False sshpool.remove(ssh) self.assertEqual(first_id, second_id) # create a new client mock_sshclient.return_value = FakeSSHClient() with sshpool.item() as ssh: third_id = ssh.id self.assertNotEqual(first_id, third_id) @mock.patch('cinder.ssh_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') def test_missing_ssh_hosts_key_config(self, mock_sshclient, mock_open, mock_conf): mock_sshclient.return_value = FakeSSHClient() mock_conf.ssh_hosts_key_file = None # create with password self.assertRaises(exception.ParameterNotFound, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') def test_create_default_known_hosts_file(self, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() CONF.state_path = '/var/lib/cinder' CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts' default_file = '/var/lib/cinder/ssh_known_hosts' ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with ssh_pool.item() as ssh: mock_open.assert_called_once_with(default_file, 'a') ssh_pool.remove(ssh) @mock.patch('os.path.isfile', return_value=False) @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_open, mock_isfile): mock_sshclient.return_value = FakeSSHClient() CONF.ssh_hosts_key_file = '/tmp/blah' self.assertNotIn(CONF.state_path, CONF.ssh_hosts_key_file) self.assertRaises(exception.InvalidInput, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) @mock.patch.multiple('cinder.ssh_utils.CONF', strict_ssh_host_key_policy=True, ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: self.assertTrue(isinstance(ssh.get_policy(), paramiko.RejectPolicy)) @mock.patch('six.moves.builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() CONF.strict_ssh_host_key_policy = False # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: self.assertTrue(isinstance(ssh.get_policy(), paramiko.AutoAddPolicy)) cinder-8.0.0/cinder/tests/unit/test_blockbridge.py0000664000567000056710000005003112701406250023365 0ustar jenkinsjenkins00000000000000# Copyright 2015 Blockbridge Networks, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Blockbridge EPS iSCSI Volume Driver Tests """ import base64 try: from unittest import mock except ImportError: import mock from oslo_serialization import jsonutils from oslo_utils import units import six from six.moves import http_client from six.moves import urllib from cinder import context from cinder import exception from cinder import test from cinder.volume import configuration as conf import cinder.volume.drivers.blockbridge as bb DEFAULT_POOL_NAME = "OpenStack" DEFAULT_POOL_QUERY = "+openstack" FIXTURE_VOL_EXPORT_OK = """{ "target_ip":"127.0.0.1", "target_port":3260, "target_iqn":"iqn.2009-12.com.blockbridge:t-pjxczxh-t001", "target_lun":0, "initiator_login":"mock-user-abcdef123456" } """ POOL_STATS_WITHOUT_USAGE = { 'driver_version': '1.3.0', 'pools': [{ 'filter_function': None, 'free_capacity_gb': 'unknown', 'goodness_function': None, 'location_info': 'BlockbridgeDriver:unknown:OpenStack', 'max_over_subscription_ratio': None, 'pool_name': 'OpenStack', 'thin_provisioning_support': True, 'reserved_percentage': 0, 'total_capacity_gb': 'unknown'}, ], 'storage_protocol': 'iSCSI', 'vendor_name': 'Blockbridge', 'volume_backend_name': 'BlockbridgeISCSIDriver', } def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch("six.moves.http_client.HTTPSConnection", autospec=True) def _common_inner_inner2(mock_conn): inst.mock_httplib = mock_conn inst.mock_conn = mock_conn.return_value inst.mock_response = mock.Mock() inst.mock_response.read.return_value = '{}' inst.mock_response.status = 200 inst.mock_conn.request.return_value = True inst.mock_conn.getresponse.return_value = inst.mock_response return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 class BlockbridgeISCSIDriverTestCase(test.TestCase): def setUp(self): super(BlockbridgeISCSIDriverTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.blockbridge_api_host = 'ut-api.blockbridge.com' self.cfg.blockbridge_api_port = None self.cfg.blockbridge_auth_scheme = 'token' self.cfg.blockbridge_auth_token = '0//kPIw7Ck7PUkPSKY...' self.cfg.blockbridge_pools = {DEFAULT_POOL_NAME: DEFAULT_POOL_QUERY} self.cfg.blockbridge_default_pool = None self.cfg.filter_function = None self.cfg.goodness_function = None def _cfg_safe_get(arg): return getattr(self.cfg, arg, None) self.cfg.safe_get.side_effect = _cfg_safe_get mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.real_client = bb.BlockbridgeAPIClient(configuration=self.cfg) self.mock_client = mock.Mock(wraps=self.real_client) self.driver = bb.BlockbridgeISCSIDriver(execute=mock_exec, client=self.mock_client, configuration=self.cfg) self.user_id = '2c13bc8ef717015fda1e12e70dab24654cb6a6da' self.project_id = '62110b9d37f1ff3ea1f51e75812cb92ed9a08b28' self.volume_name = u'testvol-1' self.volume_id = '6546b9e9-1980-4241-a4e9-0ad9d382c032' self.volume_size = 1 self.volume = dict( name=self.volume_name, size=self.volume_size, id=self.volume_id, user_id=self.user_id, project_id=self.project_id, host='fake-host') self.snapshot_name = u'testsnap-1' self.snapshot_id = '207c12af-85a7-4da6-8d39-a7457548f965' self.snapshot = dict( volume_name=self.volume_name, name=self.snapshot_name, id=self.snapshot_id, volume_id='55ff8a46-c35f-4ca3-9991-74e1697b220e', user_id=self.user_id, project_id=self.project_id) self.connector = dict( initiator='iqn.1994-05.com.redhat:6a528422b61') self.driver.do_setup(context.get_admin_context()) @common_mocks def test_http_mock_success(self): self.mock_response.read.return_value = '{}' self.mock_response.status = 200 conn = http_client.HTTPSConnection('whatever', None) conn.request('GET', '/blah', '{}', {}) rsp = conn.getresponse() self.assertEqual('{}', rsp.read()) self.assertEqual(200, rsp.status) @common_mocks def test_http_mock_failure(self): mock_body = '{"error": "no results matching query", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 conn = http_client.HTTPSConnection('whatever', None) conn.request('GET', '/blah', '{}', {}) rsp = conn.getresponse() self.assertEqual(mock_body, rsp.read()) self.assertEqual(413, rsp.status) @common_mocks def test_cfg_api_host(self): with mock.patch.object(self.cfg, 'blockbridge_api_host', 'test.host'): self.driver.get_volume_stats(True) self.mock_httplib.assert_called_once_with('test.host', None) @common_mocks def test_cfg_api_port(self): with mock.patch.object(self.cfg, 'blockbridge_api_port', 1234): self.driver.get_volume_stats(True) self.mock_httplib.assert_called_once_with( self.cfg.blockbridge_api_host, 1234) @common_mocks def test_cfg_api_auth_scheme_password(self): self.cfg.blockbridge_auth_scheme = 'password' self.cfg.blockbridge_auth_user = 'mock-user' self.cfg.blockbridge_auth_password = 'mock-password' with mock.patch.object(self.driver, 'hostname', 'mock-hostname'): self.driver.get_volume_stats(True) creds = "%s:%s" % (self.cfg.blockbridge_auth_user, self.cfg.blockbridge_auth_password) if six.PY3: creds = creds.encode('utf-8') b64_creds = base64.encodestring(creds).decode('ascii') else: b64_creds = base64.encodestring(creds) params = dict( hostname='mock-hostname', version=self.driver.VERSION, backend_name='BlockbridgeISCSIDriver', pool='OpenStack', query='+openstack') headers = { 'Accept': 'application/vnd.blockbridge-3+json', 'Authorization': "Basic %s" % b64_creds.replace("\n", ""), 'User-Agent': "cinder-volume/%s" % self.driver.VERSION, } self.mock_conn.request.assert_called_once_with( 'GET', mock.ANY, None, headers) # Parse the URL instead of comparing directly both URLs. # On Python 3, parameters are formatted in a random order because # of the hash randomization. conn_url = self.mock_conn.request.call_args[0][1] conn_params = dict(urllib.parse.parse_qsl(conn_url.split("?", 1)[1])) self.assertTrue(conn_url.startswith("/api/cinder/status?"), repr(conn_url)) self.assertEqual(params, conn_params) @common_mocks def test_create_volume(self): self.driver.create_volume(self.volume) url = "/volumes/%s" % self.volume_id create_params = dict( name=self.volume_name, query=DEFAULT_POOL_QUERY, capacity=self.volume_size * units.Gi) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) full_url = "/api/cinder" + url raw_body = jsonutils.dumps(create_params) tsk_header = "ext_auth=keystone/%(project_id)s/%(user_id)s" % kwargs authz_header = "Bearer %s" % self.cfg.blockbridge_auth_token headers = { 'X-Blockbridge-Task': tsk_header, 'Accept': 'application/vnd.blockbridge-3+json', 'Content-Type': 'application/json', 'Authorization': authz_header, 'User-Agent': "cinder-volume/%s" % self.driver.VERSION, } self.mock_conn.request.assert_called_once_with( 'PUT', full_url, raw_body, headers) @common_mocks def test_create_volume_no_results(self): mock_body = '{"message": "no results matching query", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 self.assertRaisesRegex(exception.VolumeBackendAPIException, "no results matching query", self.driver.create_volume, self.volume) create_params = dict( name=self.volume_name, query=DEFAULT_POOL_QUERY, capacity=self.volume_size * units.Gi) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with( "/volumes/%s" % self.volume_id, **kwargs) @common_mocks def test_create_volume_from_snapshot(self): self.driver.create_volume_from_snapshot(self.volume, self.snapshot) vol_src = dict( snapshot_id=self.snapshot_id, volume_id=self.snapshot['volume_id']) create_params = dict( name=self.volume_name, capacity=self.volume_size * units.Gi, src=vol_src) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with( "/volumes/%s" % self.volume_id, **kwargs) @common_mocks def test_create_volume_from_snapshot_overquota(self): mock_body = '{"message": "over quota", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 self.assertRaisesRegex(exception.VolumeBackendAPIException, "over quota", self.driver.create_volume_from_snapshot, self.volume, self.snapshot) vol_src = dict( snapshot_id=self.snapshot_id, volume_id=self.snapshot['volume_id']) create_params = dict( name=self.volume_name, capacity=self.volume_size * units.Gi, src=vol_src) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with( "/volumes/%s" % self.volume_id, **kwargs) @common_mocks def test_create_cloned_volume(self): src_vref = dict( name='cloned_volume_source', size=self.volume_size, id='5d734467-5d77-461c-b5ac-5009dbeaa5d5', user_id=self.user_id, project_id=self.project_id) self.driver.create_cloned_volume(self.volume, src_vref) create_params = dict( name=self.volume_name, capacity=self.volume_size * units.Gi, src=dict(volume_id=src_vref['id'])) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with( "/volumes/%s" % self.volume_id, **kwargs) @common_mocks def test_create_cloned_volume_overquota(self): mock_body = '{"message": "over quota", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 src_vref = dict( name='cloned_volume_source', size=self.volume_size, id='5d734467-5d77-461c-b5ac-5009dbeaa5d5', user_id=self.user_id, project_id=self.project_id) self.assertRaisesRegex(exception.VolumeBackendAPIException, "over quota", self.driver.create_cloned_volume, self.volume, src_vref) create_params = dict( name=self.volume_name, capacity=self.volume_size * units.Gi, src=dict(volume_id=src_vref['id'])) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with( "/volumes/%s" % self.volume_id, **kwargs) @common_mocks def test_extend_volume(self): self.driver.extend_volume(self.volume, 2) url = "/volumes/%s" % self.volume_id kwargs = dict( action='grow', method='POST', params=dict(capacity=(2 * units.Gi)), user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_extend_volume_overquota(self): mock_body = '{"message": "over quota", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 self.assertRaisesRegex(exception.VolumeBackendAPIException, "over quota", self.driver.extend_volume, self.volume, 2) url = "/volumes/%s" % self.volume_id kwargs = dict( action='grow', method='POST', params=dict(capacity=(2 * units.Gi)), user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_delete_volume(self): self.driver.delete_volume(self.volume) url = "/volumes/%s" % self.volume_id kwargs = dict( method='DELETE', user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_create_snapshot(self): self.driver.create_snapshot(self.snapshot) url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], self.snapshot['id']) create_params = dict( name=self.snapshot_name) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_create_snapshot_overquota(self): mock_body = '{"message": "over quota", "status": 413}' self.mock_response.read.return_value = mock_body self.mock_response.status = 413 self.assertRaisesRegex(exception.VolumeBackendAPIException, "over quota", self.driver.create_snapshot, self.snapshot) url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], self.snapshot['id']) create_params = dict( name=self.snapshot_name) kwargs = dict( method='PUT', params=create_params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_delete_snapshot(self): self.driver.delete_snapshot(self.snapshot) url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], self.snapshot['id']) kwargs = dict( method='DELETE', user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks @mock.patch('cinder.volume.utils.generate_username') @mock.patch('cinder.volume.utils.generate_password') def test_initialize_connection(self, mock_generate_password, mock_generate_username): mock_generate_username.return_value = 'mock-user-abcdef123456' mock_generate_password.return_value = 'mock-password-abcdef123456' self.mock_response.read.return_value = FIXTURE_VOL_EXPORT_OK self.mock_response.status = 200 props = self.driver.initialize_connection(self.volume, self.connector) expected_props = dict( driver_volume_type="iscsi", data=dict( auth_method="CHAP", auth_username='mock-user-abcdef123456', auth_password='mock-password-abcdef123456', target_discovered=False, target_iqn="iqn.2009-12.com.blockbridge:t-pjxczxh-t001", target_lun=0, target_portal="127.0.0.1:3260", volume_id=self.volume_id)) self.assertEqual(expected_props, props) ini_name = urllib.parse.quote(self.connector["initiator"], "") url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name) params = dict( chap_user="mock-user-abcdef123456", chap_secret="mock-password-abcdef123456") kwargs = dict( method='PUT', params=params, user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_terminate_connection(self): self.driver.terminate_connection(self.volume, self.connector) ini_name = urllib.parse.quote(self.connector["initiator"], "") url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name) kwargs = dict( method='DELETE', user_id=self.user_id, project_id=self.project_id) self.mock_client.submit.assert_called_once_with(url, **kwargs) @common_mocks def test_get_volume_stats_without_usage(self): with mock.patch.object(self.driver, 'hostname', 'mock-hostname'): self.driver.get_volume_stats(True) p = { 'query': '+openstack', 'pool': 'OpenStack', 'hostname': 'mock-hostname', 'version': '1.3.0', 'backend_name': 'BlockbridgeISCSIDriver', } self.mock_client.submit.assert_called_once_with('/status', params=p) self.assertEqual(POOL_STATS_WITHOUT_USAGE, self.driver._stats) @common_mocks def test_get_volume_stats_forbidden(self): self.mock_response.status = 403 self.assertRaisesRegex(exception.NotAuthorized, "Insufficient privileges", self.driver.get_volume_stats, True) @common_mocks def test_get_volume_stats_unauthorized(self): self.mock_response.status = 401 self.assertRaisesRegex(exception.NotAuthorized, "Invalid credentials", self.driver.get_volume_stats, True) cinder-8.0.0/cinder/tests/unit/test_glusterfs.py0000664000567000056710000023071712701406250023147 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the GlusterFS driver module.""" import errno import os import six import tempfile import time import traceback import mock import os_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_utils import imageutils from oslo_utils import units from cinder import compute from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import test from cinder import utils from cinder.volume import driver as base_driver from cinder.volume.drivers import glusterfs from cinder.volume.drivers import remotefs as remotefs_drv CONF = cfg.CONF class DumbVolume(object): fields = {} def __setitem__(self, key, value): self.fields[key] = value def __getitem__(self, item): return self.fields[item] class FakeDb(object): msg = "Tests are broken: mock this out." def volume_get(self, *a, **kw): raise Exception(self.msg) def snapshot_get_all_for_volume(self, *a, **kw): """Mock this if you want results from it.""" return [] class GlusterFsDriverTestCase(test.TestCase): """Test case for GlusterFS driver.""" TEST_EXPORT1 = 'glusterfs-host1:/export' TEST_EXPORT2 = 'glusterfs-host2:/export' TEST_EXPORT2_OPTIONS = '-o backupvolfile-server=glusterfs-backup1' TEST_SIZE_IN_GB = 1 TEST_MNT_POINT = '/mnt/glusterfs' TEST_MNT_POINT_BASE = '/mnt/test' TEST_LOCAL_PATH = '/mnt/glusterfs/volume-123' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' TEST_TMP_FILE = '/tmp/tempfile' VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' VOLUME_NAME = 'volume-%s' % VOLUME_UUID SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca' SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede' def setUp(self): super(GlusterFsDriverTestCase, self).setUp() self._configuration = mock.MagicMock() self._configuration.glusterfs_shares_config = \ self.TEST_SHARES_CONFIG_FILE self._configuration.glusterfs_mount_point_base = \ self.TEST_MNT_POINT_BASE self._configuration.nas_secure_file_permissions = 'false' self._configuration.nas_secure_file_operations = 'false' self._configuration.nas_ip = None self._configuration.nas_share_path = None self._configuration.nas_mount_options = None self._driver =\ glusterfs.GlusterfsDriver(configuration=self._configuration, db=FakeDb()) self._driver.shares = {} compute.API = mock.MagicMock() def assertRaisesAndMessageMatches( self, excClass, msg, callableObj, *args, **kwargs): """Ensure that 'excClass' was raised and its message contains 'msg'.""" caught = False try: callableObj(*args, **kwargs) except Exception as exc: caught = True self.assertEqual(excClass, type(exc), 'Wrong exception caught: %s Stacktrace: %s' % (exc, traceback.format_exc())) self.assertIn(msg, six.text_type(exc)) if not caught: self.fail('Expected raised exception but nothing caught.') def test_local_path(self): """local_path common use case.""" self.override_config("glusterfs_mount_point_base", self.TEST_MNT_POINT_BASE) drv = self._driver volume = DumbVolume() volume['id'] = self.VOLUME_UUID volume['provider_location'] = self.TEST_EXPORT1 volume['name'] = 'volume-123' self.assertEqual( '/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc/volume-123', drv.local_path(volume)) def test_mount_glusterfs(self): """_mount_glusterfs common case usage.""" drv = self._driver with mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient, 'mount') as mock_mount: drv._mount_glusterfs(self.TEST_EXPORT1) mock_mount.assert_called_once_with(self.TEST_EXPORT1, []) def test_mount_glusterfs_should_reraise_exception_on_failure(self): """_mount_glusterfs should reraise exception if mount fails.""" drv = self._driver with mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient, 'mount') as mock_mount: mock_mount.side_effect = exception.GlusterfsException() self.assertRaises(exception.GlusterfsException, drv._mount_glusterfs, self.TEST_EXPORT1) def test_get_hash_str(self): """_get_hash_str should calculation correct value.""" drv = self._driver self.assertEqual('ab03ab34eaca46a5fb81878f7e9b91fc', drv._get_hash_str(self.TEST_EXPORT1)) def test_get_mount_point_for_share(self): """_get_mount_point_for_share should call RemoteFsClient.""" drv = self._driver hashed_path = '/mnt/test/abcdefabcdef' with mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient, 'get_mount_point') as mock_get_mount_point: mock_get_mount_point.return_value = hashed_path result = drv._get_mount_point_for_share(self.TEST_EXPORT1) self.assertEqual(hashed_path, result) def test_get_available_capacity_with_df(self): """_get_available_capacity should calculate correct value.""" drv = self._driver df_total_size = 2620544 df_avail = 1490560 df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \ (df_total_size, df_avail) df_output = df_head + df_data with mock.patch.object(drv, '_get_mount_point_for_share') as \ mock_get_mount_point_for_share,\ mock.patch.object(drv, '_execute') as mock_execute: mock_get_mount_point_for_share.\ return_value = self.TEST_MNT_POINT mock_execute.return_value = (df_output, None) result = drv._get_available_capacity(self.TEST_EXPORT1) self.assertEqual((df_avail, df_total_size), result) def test_get_provisioned_capacity(self): """_get_provisioned_size should calculate correct value.""" drv = self._driver drv.shares = {'127.7.7.7:/gluster1': None} with mock.patch.object(drv, '_get_mount_point_for_share') as \ mock_get_mount_point_for_share,\ mock.patch.object(drv, '_execute') as mock_execute: mock_get_mount_point_for_share.\ return_value = self.TEST_MNT_POINT mock_execute.return_value = ("3221225472 /mount/point", '') provisioned_capacity = drv._get_provisioned_capacity() self.assertEqual(3.0, provisioned_capacity) def test_update_volume_stats_thin(self): """_update_volume_stats_thin with qcow2 files.""" drv = self._driver rfsdriver = remotefs_drv.RemoteFSSnapDriver with mock.patch.object(rfsdriver, '_update_volume_stats') as \ mock_update_volume_stats,\ mock.patch.object(drv, '_get_provisioned_capacity') as \ mock_get_provisioned_capacity: data = {'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0} drv._stats = data drv.configuration.nas_volume_prov_type = 'thin' drv.configuration.max_over_subscription_ratio = 20.0 mock_get_provisioned_capacity.return_value = 8.0 drv._update_volume_stats() data['max_over_subscription_ratio'] = 20.0 data['thick_provisioning_support'] = False data['thin_provisioning_support'] = True self.assertEqual(data, drv._stats) self.assertTrue(mock_get_provisioned_capacity.called) self.assertTrue(mock_update_volume_stats.called) def test_update_volume_stats_thick(self): """_update_volume_stats_thick with raw files.""" drv = self._driver rfsdriver = remotefs_drv.RemoteFSSnapDriver with mock.patch.object(rfsdriver, '_update_volume_stats') as \ mock_update_volume_stats: data = {'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0} drv._stats = data drv.configuration.nas_volume_prov_type = 'thick' drv.configuration.max_over_subscription_ratio = 20.0 drv._update_volume_stats() data['provisioned_capacity_gb'] = 8.0 data['max_over_subscription_ratio'] = 20.0 data['thick_provisioning_support'] = True data['thin_provisioning_support'] = False self.assertEqual(data, drv._stats) self.assertTrue(mock_update_volume_stats.called) def test_load_shares_config(self): drv = self._driver drv.configuration.glusterfs_shares_config = ( self.TEST_SHARES_CONFIG_FILE) with mock.patch.object(drv, '_read_config_file') as \ mock_read_config_file: config_data = [] config_data.append(self.TEST_EXPORT1) config_data.append('#' + self.TEST_EXPORT2) config_data.append(self.TEST_EXPORT2 + ' ' + self.TEST_EXPORT2_OPTIONS) config_data.append('broken:share_format') config_data.append('') mock_read_config_file.return_value = config_data drv._load_shares_config(drv.configuration.glusterfs_shares_config) self.assertIn(self.TEST_EXPORT1, drv.shares) self.assertIn(self.TEST_EXPORT2, drv.shares) self.assertEqual(2, len(drv.shares)) self.assertEqual(self.TEST_EXPORT2_OPTIONS, drv.shares[self.TEST_EXPORT2]) def test_ensure_share_mounted(self): """_ensure_share_mounted simple use case.""" drv = self._driver with mock.patch.object(utils, 'get_file_mode') as \ mock_get_file_mode,\ mock.patch.object(utils, 'get_file_gid') as mock_get_file_gid,\ mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(drv, '_ensure_share_writable') as \ mock_ensure_share_writable,\ mock.patch.object(drv, '_get_mount_point_for_share') as \ mock_get_mount_point_for_share,\ mock.patch.object(drv, '_mount_glusterfs') as \ mock_mount_glusterfs: mock_get_mount_point_for_share.return_value = self.TEST_MNT_POINT mock_get_file_mode.return_value = 0o777 mock_get_file_gid.return_value = 333333 drv._ensure_share_mounted(self.TEST_EXPORT1) mock_get_file_mode.assert_called_once_with(self.TEST_MNT_POINT) mock_get_file_gid.assert_called_once_with(self.TEST_MNT_POINT) mock_ensure_share_writable.assert_called_once_with( self.TEST_MNT_POINT) self.assertTrue(mock_ensure_share_writable.called) self.assertTrue(mock_mount_glusterfs.called) self.assertTrue(mock_execute.called) def test_ensure_shares_mounted_should_save_mounting_successfully(self): """_ensure_shares_mounted should save share if mounted with success.""" drv = self._driver with mock.patch.object(drv, '_read_config_file') as \ mock_read_config_file,\ mock.patch.object(drv, '_ensure_share_mounted') as \ mock_ensure_share_mounted: config_data = [] config_data.append(self.TEST_EXPORT1) mock_read_config_file.return_value = config_data drv._ensure_shares_mounted() mock_ensure_share_mounted.\ assert_called_once_with(self.TEST_EXPORT1) self.assertEqual(1, len(drv._mounted_shares)) self.assertEqual(self.TEST_EXPORT1, drv._mounted_shares[0]) def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): """_ensure_shares_mounted should not save share if failed to mount.""" drv = self._driver with mock.patch.object(drv, '_read_config_file') as \ mock_read_config_file,\ mock.patch.object(drv, '_ensure_share_mounted') as \ mock_ensure_share_mounted: config_data = [] config_data.append(self.TEST_EXPORT1) mock_read_config_file.return_value = config_data mock_ensure_share_mounted.side_effect = Exception() drv._ensure_shares_mounted() self.assertEqual(0, len(drv._mounted_shares)) def test_setup_should_throw_error_if_shares_config_not_configured(self): """do_setup should throw error if shares config is not configured.""" drv = self._driver drv.configuration.glusterfs_shares_config = None self.assertRaisesAndMessageMatches(exception.GlusterfsException, 'no Gluster config file configured', drv.do_setup, mock.MagicMock()) @mock.patch.object(os.path, 'exists') def test_setup_should_throw_exception_if_client_is_not_installed( self, mock_exists): """do_setup should throw exception if client is not installed.""" drv = self._driver self.override_config("glusterfs_shares_config", self.TEST_SHARES_CONFIG_FILE) with mock.patch.object(drv, '_execute') as mock_execute: mock_exists.return_value = True mock_execute.side_effect = OSError(errno.ENOENT, 'No such file or directory') self.assertRaisesAndMessageMatches(exception.GlusterfsException, 'mount.glusterfs is not ' 'installed', drv.do_setup, mock.MagicMock()) def _fake_load_shares_config(self, config): self._driver.shares = {'127.7.7.7:/gluster1': None} def _fake_NamedTemporaryFile(self, prefix=None, dir=None): raise OSError('Permission denied!') @mock.patch.object(os, 'getegid') @mock.patch.object(os.path, 'exists') def test_setup_set_share_permissions(self, mock_exists, mock_getegid): drv = self._driver self.override_config("glusterfs_shares_config", self.TEST_SHARES_CONFIG_FILE) with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(utils, 'get_file_gid') as \ mock_get_file_gid,\ mock.patch.object(utils, 'get_file_mode') as \ mock_get_file_mode,\ mock.patch.object(tempfile, 'NamedTemporaryFile') as \ mock_named_temp,\ mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient, 'mount') as mock_mount: drv._load_shares_config = self._fake_load_shares_config mock_named_temp.return_value = self._fake_NamedTemporaryFile mock_exists.return_value = True mock_get_file_gid.return_value = 33333 mock_get_file_mode.return_value = 0o000 mock_getegid.return_value = 888 drv.do_setup(mock.MagicMock()) expected = [ mock.call('mount.glusterfs', check_exit_code=False), mock.call('umount', '/mnt/test/8f0473c9ad824b8b6a27264b9cacb005', run_as_root=True), mock.call('chgrp', 888, '/mnt/test/8f0473c9ad824b8b6a27264b9cacb005', run_as_root=True), mock.call('chmod', 'g+w', '/mnt/test/8f0473c9ad824b8b6a27264b9cacb005', run_as_root=True)] self.assertEqual(expected, mock_execute.mock_calls) mock_mount.assert_called_once_with('127.7.7.7:/gluster1', []) def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): """_find_share should throw error if there is no mounted shares.""" drv = self._driver drv._mounted_shares = [] self.assertRaises(exception.GlusterfsNoSharesMounted, drv._find_share, self.TEST_SIZE_IN_GB) def test_find_share(self): """_find_share simple use case.""" drv = self._driver drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2] with mock.patch.object(drv, '_get_available_capacity') as \ mock_get_available_capacity: capacity = {self.TEST_EXPORT1: (2 * units.Gi, 5 * units.Gi), self.TEST_EXPORT2: (3 * units.Gi, 10 * units.Gi)} def capacity_side_effect(*args, **kwargs): return capacity[args[0]] mock_get_available_capacity.side_effect = capacity_side_effect self.assertEqual(self.TEST_EXPORT2, drv._find_share(self.TEST_SIZE_IN_GB)) def test_find_share_should_throw_error_if_there_is_no_enough_place(self): """_find_share should throw error if there is no share to host vol.""" drv = self._driver drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2] with mock.patch.object(drv, '_get_available_capacity') as \ mock_get_available_capacity: capacity = {self.TEST_EXPORT1: (0, 5 * units.Gi), self.TEST_EXPORT2: (0, 10 * units.Gi)} def capacity_side_effect(*args, **kwargs): return capacity[args[0]] mock_get_available_capacity.side_effect = capacity_side_effect self.assertRaises(exception.GlusterfsNoSuitableShareFound, drv._find_share, self.TEST_SIZE_IN_GB) def _simple_volume(self, id=None): volume = DumbVolume() volume['provider_location'] = self.TEST_EXPORT1 if id is None: volume['id'] = self.VOLUME_UUID else: volume['id'] = id # volume['name'] mirrors format from db/sqlalchemy/models.py volume['name'] = 'volume-%s' % volume['id'] volume['size'] = 10 volume['status'] = 'available' return volume def test_create_thin_volume(self): drv = self._driver volume = self._simple_volume() self._configuration.nas_volume_prov_type = 'thin' with mock.patch.object(drv, '_create_qcow2_file') as \ mock_create_qcow2_file,\ mock.patch.object(drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions_for_all: drv._do_create_volume(volume) volume_path = drv.local_path(volume) volume_size = volume['size'] mock_create_qcow2_file.assert_called_once_with(volume_path, volume_size) mock_set_rw_permissions_for_all.\ assert_called_once_with(volume_path) def test_create_thick_fallocate_volume(self): drv = self._driver volume = self._simple_volume() self._configuration.nas_volume_prov_type = 'thick' with mock.patch.object(drv, '_fallocate') as \ mock_fallocate,\ mock.patch.object(drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions_for_all: drv._do_create_volume(volume) volume_path = drv.local_path(volume) volume_size = volume['size'] mock_fallocate.assert_called_once_with(volume_path, volume_size) mock_set_rw_permissions_for_all.\ assert_called_once_with(volume_path) def test_create_thick_dd_volume(self): drv = self._driver volume = self._simple_volume() self._configuration.nas_volume_prov_type = 'thick' with mock.patch.object(drv, '_fallocate') as \ mock_fallocate,\ mock.patch.object(drv, '_create_regular_file') as \ mock_create_regular_file,\ mock.patch.object(drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions_for_all: mock_fallocate.side_effect = putils.ProcessExecutionError( stderr='Fallocate: Operation not supported.') drv._do_create_volume(volume) volume_path = drv.local_path(volume) volume_size = volume['size'] mock_fallocate.assert_called_once_with(volume_path, volume_size) mock_create_regular_file.assert_called_once_with(volume_path, volume_size) mock_set_rw_permissions_for_all.\ assert_called_once_with(volume_path) def test_create_volume_should_ensure_glusterfs_mounted(self): """create_volume ensures shares provided in config are mounted.""" drv = self._driver with mock.patch.object(drv, '_find_share') as mock_find_share,\ mock.patch.object(drv, '_do_create_volume') as \ mock_do_create_volume,\ mock.patch.object(drv, '_ensure_shares_mounted') as \ mock_ensure_shares_mounted: volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB volume['id'] = self.VOLUME_UUID drv.create_volume(volume) self.assertTrue(mock_ensure_shares_mounted.called) self.assertTrue(mock_do_create_volume.called) self.assertTrue(mock_find_share.called) def test_create_volume_should_return_provider_location(self): """create_volume should return provider_location with found share.""" drv = self._driver with mock.patch.object(drv, '_find_share') as mock_find_share,\ mock.patch.object(drv, '_do_create_volume') as \ mock_do_create_volume,\ mock.patch.object(drv, '_ensure_shares_mounted') as \ mock_ensure_shares_mounted: mock_find_share.return_value = self.TEST_EXPORT1 volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB volume['id'] = self.VOLUME_UUID result = drv.create_volume(volume) self.assertEqual(self.TEST_EXPORT1, result['provider_location']) self.assertTrue(mock_ensure_shares_mounted.called) self.assertTrue(mock_do_create_volume.called) def test_create_cloned_volume(self): drv = self._driver with mock.patch.object(drv, '_create_snapshot') as \ mock_create_snapshot,\ mock.patch.object(drv, '_delete_snapshot') as \ mock_delete_snapshot,\ mock.patch.object(drv, '_copy_volume_from_snapshot') as \ mock_copy_volume_from_snapshot: volume = self._simple_volume() src_vref = self._simple_volume() src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref['name'] = 'volume-%s' % src_vref['id'] volume_ref = {'id': volume['id'], 'name': volume['name'], 'status': volume['status'], 'provider_location': volume['provider_location'], 'size': volume['size']} snap_ref = {'volume_name': src_vref['name'], 'name': 'clone-snap-%s' % src_vref['id'], 'size': src_vref['size'], 'volume_size': src_vref['size'], 'volume_id': src_vref['id'], 'id': 'tmp-snap-%s' % src_vref['id'], 'volume': src_vref} drv.create_cloned_volume(volume, src_vref) mock_create_snapshot.assert_called_once_with(snap_ref) mock_copy_volume_from_snapshot.\ assert_called_once_with(snap_ref, volume_ref, volume['size']) self.assertTrue(mock_delete_snapshot.called) @mock.patch('oslo_utils.fileutils.delete_if_exists') def test_delete_volume(self, mock_delete_if_exists): volume = self._simple_volume() volume_filename = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename) info_file = volume_path + '.info' with mock.patch.object(self._driver, '_ensure_share_mounted') as \ mock_ensure_share_mounted,\ mock.patch.object(self._driver, '_local_volume_dir') as \ mock_local_volume_dir,\ mock.patch.object(self._driver, 'get_active_image_from_info') as \ mock_active_image_from_info,\ mock.patch.object(self._driver, '_execute') as \ mock_execute,\ mock.patch.object(self._driver, '_local_path_volume') as \ mock_local_path_volume,\ mock.patch.object(self._driver, '_local_path_volume_info') as \ mock_local_path_volume_info: mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_active_image_from_info.return_value = volume_filename mock_local_path_volume.return_value = volume_path mock_local_path_volume_info.return_value = info_file self._driver.delete_volume(volume) mock_ensure_share_mounted.assert_called_once_with( volume['provider_location']) mock_local_volume_dir.assert_called_once_with(volume) mock_active_image_from_info.assert_called_once_with(volume) mock_execute.assert_called_once_with('rm', '-f', volume_path, run_as_root=True) mock_local_path_volume_info.assert_called_once_with(volume) mock_local_path_volume.assert_called_once_with(volume) mock_delete_if_exists.assert_any_call(volume_path) mock_delete_if_exists.assert_any_call(info_file) def test_refresh_mounts(self): with mock.patch.object(self._driver, '_unmount_shares') as \ mock_unmount_shares,\ mock.patch.object(self._driver, '_ensure_shares_mounted') as \ mock_ensure_shares_mounted: self._driver._refresh_mounts() self.assertTrue(mock_unmount_shares.called) self.assertTrue(mock_ensure_shares_mounted.called) def test_refresh_mounts_with_excp(self): with mock.patch.object(self._driver, '_unmount_shares') as \ mock_unmount_shares,\ mock.patch.object(self._driver, '_ensure_shares_mounted') as \ mock_ensure_shares_mounted,\ mock.patch.object(glusterfs, 'LOG') as mock_logger: mock_stderr = _("umount: : target is busy") mock_unmount_shares.side_effect = \ putils.ProcessExecutionError(stderr=mock_stderr) self._driver._refresh_mounts() self.assertTrue(mock_unmount_shares.called) self.assertTrue(mock_logger.warning.called) self.assertTrue(mock_ensure_shares_mounted.called) mock_unmount_shares.reset_mock() mock_ensure_shares_mounted.reset_mock() mock_logger.reset_mock() mock_logger.warning.reset_mock() mock_stderr = _("umount: : some other error") mock_unmount_shares.side_effect = \ putils.ProcessExecutionError(stderr=mock_stderr) self.assertRaises(putils.ProcessExecutionError, self._driver._refresh_mounts) self.assertTrue(mock_unmount_shares.called) self.assertFalse(mock_ensure_shares_mounted.called) def test_unmount_shares_with_excp(self): self._driver.shares = {'127.7.7.7:/gluster1': None} with mock.patch.object(self._driver, '_load_shares_config') as \ _mock_load_shares_config,\ mock.patch.object(self._driver, '_do_umount') as \ mock_do_umount,\ mock.patch.object(glusterfs, 'LOG') as \ mock_logger: mock_do_umount.side_effect = Exception() self._driver._unmount_shares() self.assertTrue(mock_do_umount.called) self.assertTrue(mock_logger.warning.called) self.assertFalse(mock_logger.debug.called) self.assertTrue(_mock_load_shares_config.called) def test_unmount_shares_1share(self): self._driver.shares = {'127.7.7.7:/gluster1': None} with mock.patch.object(self._driver, '_load_shares_config') as \ _mock_load_shares_config,\ mock.patch.object(self._driver, '_do_umount') as \ mock_do_umount: self._driver._unmount_shares() self.assertTrue(mock_do_umount.called) mock_do_umount.assert_called_once_with(True, '127.7.7.7:/gluster1') self.assertTrue(_mock_load_shares_config.called) def test_unmount_shares_2share(self): self._driver.shares = {'127.7.7.7:/gluster1': None, '127.7.7.8:/gluster2': None} with mock.patch.object(self._driver, '_load_shares_config') as \ _mock_load_shares_config,\ mock.patch.object(self._driver, '_do_umount') as \ mock_do_umount: self._driver._unmount_shares() mock_do_umount.assert_any_call(True, '127.7.7.7:/gluster1') mock_do_umount.assert_any_call(True, '127.7.7.8:/gluster2') self.assertTrue(_mock_load_shares_config.called) def test_do_umount(self): test_share = '127.7.7.7:/gluster1' test_hashpath = '/hashed/mnt/path' with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ mock_get_mntp_share,\ mock.patch.object(putils, 'execute') as mock_execute: mock_get_mntp_share.return_value = test_hashpath self._driver._do_umount(True, test_share) self.assertTrue(mock_get_mntp_share.called) self.assertTrue(mock_execute.called) mock_get_mntp_share.assert_called_once_with(test_share) cmd = ['umount', test_hashpath] self.assertEqual(cmd[0], mock_execute.call_args[0][0]) self.assertEqual(cmd[1], mock_execute.call_args[0][1]) self.assertTrue(mock_execute.call_args[1]['run_as_root']) mock_get_mntp_share.reset_mock() mock_get_mntp_share.return_value = test_hashpath mock_execute.reset_mock() self._driver._do_umount(False, test_share) self.assertTrue(mock_get_mntp_share.called) self.assertTrue(mock_execute.called) mock_get_mntp_share.assert_called_once_with(test_share) cmd = ['umount', test_hashpath] self.assertEqual(cmd[0], mock_execute.call_args[0][0]) self.assertEqual(cmd[1], mock_execute.call_args[0][1]) self.assertTrue(mock_execute.call_args[1]['run_as_root']) def test_do_umount_with_excp1(self): test_share = '127.7.7.7:/gluster1' test_hashpath = '/hashed/mnt/path' with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ mock_get_mntp_share,\ mock.patch.object(putils, 'execute') as mock_execute,\ mock.patch.object(glusterfs, 'LOG') as mock_logger: mock_get_mntp_share.return_value = test_hashpath mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(putils.ProcessExecutionError, self._driver._do_umount, False, test_share) mock_logger.reset_mock() mock_logger.info.reset_mock() mock_logger.error.reset_mock() mock_execute.side_effect = putils.ProcessExecutionError try: self._driver._do_umount(False, test_share) except putils.ProcessExecutionError: self.assertFalse(mock_logger.info.called) self.assertTrue(mock_logger.error.called) except Exception as e: self.fail('Unexpected exception thrown:', e) else: self.fail('putils.ProcessExecutionError not thrown') def test_do_umount_with_excp2(self): test_share = '127.7.7.7:/gluster1' test_hashpath = '/hashed/mnt/path' with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ mock_get_mntp_share,\ mock.patch.object(putils, 'execute') as mock_execute,\ mock.patch.object(glusterfs, 'LOG') as mock_logger: mock_get_mntp_share.return_value = test_hashpath mock_stderr = _("umount: %s: not mounted") % test_hashpath mock_execute.side_effect = putils.ProcessExecutionError( stderr=mock_stderr) self._driver._do_umount(True, test_share) self.assertTrue(mock_logger.info.called) self.assertFalse(mock_logger.error.called) mock_logger.reset_mock() mock_logger.info.reset_mock() mock_logger.error.reset_mock() mock_stderr = _("umount: %s: target is busy") %\ (test_hashpath) mock_execute.side_effect = putils.ProcessExecutionError( stderr=mock_stderr) self.assertRaises(putils.ProcessExecutionError, self._driver._do_umount, True, test_share) mock_logger.reset_mock() mock_logger.info.reset_mock() mock_logger.error.reset_mock() mock_stderr = _('umount: %s: target is busy') %\ (test_hashpath) mock_execute.side_effect = putils.ProcessExecutionError( stderr=mock_stderr) try: self._driver._do_umount(True, test_share) except putils.ProcessExecutionError: self.assertFalse(mock_logger.info.called) self.assertTrue(mock_logger.error.called) except Exception as e: self.fail('Unexpected exception thrown:', e) else: self.fail('putils.ProcessExecutionError not thrown') def test_delete_should_ensure_share_mounted(self): """delete_volume should ensure that corresponding share is mounted.""" drv = self._driver with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(drv, '_ensure_share_mounted') as \ mock_ensure_share_mounted: volume = DumbVolume() volume['id'] = self.VOLUME_UUID volume['name'] = 'volume-123' volume['provider_location'] = self.TEST_EXPORT1 drv.delete_volume(volume) mock_ensure_share_mounted.\ assert_called_once_with(self.TEST_EXPORT1) self.assertTrue(mock_execute.called) def test_delete_should_not_delete_if_provider_location_not_provided(self): """delete_volume shouldn't delete if provider_location missed.""" drv = self._driver with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(drv, '_ensure_share_mounted') as \ mock_ensure_share_mounted: volume = DumbVolume() volume['id'] = self.VOLUME_UUID volume['name'] = 'volume-123' volume['provider_location'] = None drv.delete_volume(volume) self.assertFalse(mock_ensure_share_mounted.called) self.assertFalse(mock_execute.called) def test_read_info_file(self): drv = self._driver with mock.patch.object(drv, '_read_file') as mock_read_file: hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, hashed, self.VOLUME_UUID) info_path = '%s%s' % (volume_path, '.info') mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\ {'id': self.VOLUME_UUID} volume = DumbVolume() volume['id'] = self.VOLUME_UUID volume['name'] = 'volume-%s' % self.VOLUME_UUID info = drv._read_info_file(info_path) self.assertEqual('volume-%s' % self.VOLUME_UUID, info[self.VOLUME_UUID]) def test_extend_volume(self): drv = self._driver volume = self._simple_volume() qemu_img_info_output = """image: volume-%s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 473K """ % self.VOLUME_UUID img_info = imageutils.QemuImgInfo(qemu_img_info_output) with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(self._driver, '_local_volume_dir') as \ mock_local_volume_dir,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(image_utils, 'resize_image') as \ mock_resize_image: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_qemu_img_info.return_value = img_info drv.extend_volume(volume, 3) self.assertTrue(mock_resize_image.called) def test_extend_volume_with_snapshot(self): drv = self._driver volume = self._simple_volume() snap_file = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID) qemu_img_info_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 473K """ % snap_file img_info = imageutils.QemuImgInfo(qemu_img_info_output) with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(self._driver, '_local_volume_dir') as \ mock_local_volume_dir,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(image_utils, 'resize_image') as \ mock_resize_image: mock_get_active_image_from_info.return_value = snap_file mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_qemu_img_info.return_value = img_info snap_path = '%s/%s' % (self.TEST_MNT_POINT, snap_file) drv.extend_volume(volume, 3) mock_resize_image.assert_called_once_with(snap_path, 3) def test_create_snapshot_online(self): drv = self._driver volume = self._simple_volume() volume['status'] = 'in-use' hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt, 'status': 'asdf', 'progress': 'asdf'} snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) with mock.patch.object(drv, '_do_create_snapshot') as \ mock_do_create_snapshot,\ mock.patch.object(db, 'snapshot_get') as mock_snapshot_get,\ mock.patch.object(drv, '_nova') as mock_nova,\ mock.patch.object(time, 'sleep') as mock_sleep: create_info = {'snapshot_id': snap_ref['id'], 'type': 'qcow2', 'new_file': snap_file} snap_ref_progress = snap_ref.copy() snap_ref_progress['status'] = 'creating' snap_ref_progress_0p = snap_ref_progress.copy() snap_ref_progress_0p['progress'] = '0%' snap_ref_progress_50p = snap_ref_progress.copy() snap_ref_progress_50p['progress'] = '50%' snap_ref_progress_90p = snap_ref_progress.copy() snap_ref_progress_90p['progress'] = '90%' mock_snapshot_get.side_effect = [ snap_ref_progress_0p, snap_ref_progress_50p, snap_ref_progress_90p ] drv._create_snapshot_online(snap_ref, snap_file, snap_path) mock_do_create_snapshot.\ assert_called_once_with(snap_ref, snap_file, snap_path) mock_nova.create_volume_snapshot.\ assert_called_once_with(ctxt, self.VOLUME_UUID, create_info) self.assertTrue(mock_sleep.called) def test_create_snapshot_online_novafailure(self): drv = self._driver volume = self._simple_volume() volume['status'] = 'in-use' hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt} snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) with mock.patch.object(drv, '_do_create_snapshot') as mock_do_create_snapshot,\ mock.patch.object(db, 'snapshot_get') as mock_snapshot_get,\ mock.patch.object(drv, '_nova') as mock_nova,\ mock.patch.object(time, 'sleep') as mock_sleep: snap_ref_progress = snap_ref.copy() snap_ref_progress['status'] = 'creating' snap_ref_progress_0p = snap_ref_progress.copy() snap_ref_progress_0p['progress'] = '0%' snap_ref_progress_50p = snap_ref_progress.copy() snap_ref_progress_50p['progress'] = '50%' snap_ref_progress_99p = snap_ref_progress.copy() snap_ref_progress_99p['progress'] = '99%' snap_ref_progress_99p['status'] = 'error' mock_snapshot_get.side_effect = [ snap_ref_progress_0p, snap_ref_progress_50p, snap_ref_progress_99p ] self.assertRaisesAndMessageMatches( exception.RemoteFSException, 'Nova returned "error" status while creating snapshot.', drv._create_snapshot_online, snap_ref, snap_file, snap_path) self.assertTrue(mock_sleep.called) self.assertTrue(mock_nova.create_volume_snapshot.called) self.assertTrue(mock_do_create_snapshot.called) def test_delete_snapshot_online_1(self): """Delete the newest snapshot, with only one snap present.""" drv = self._driver volume = self._simple_volume() volume['status'] = 'in-use' ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap to delete (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt} hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(db, 'snapshot_get') as mock_snapshot_get,\ mock.patch.object(drv, '_nova') as mock_nova,\ mock.patch.object(time, 'sleep') as mock_sleep,\ mock.patch.object(drv, '_read_info_file') as \ mock_read_info_file,\ mock.patch.object(drv, '_write_info_file') as \ mock_write_info_file,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(drv, '_ensure_share_writable') as \ mock_ensure_share_writable: snap_info = {'active': snap_file, self.SNAP_UUID: snap_file} mock_read_info_file.return_value = snap_info qemu_img_info_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, volume_file) img_info = imageutils.QemuImgInfo(qemu_img_info_output) vol_qemu_img_info_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume_file volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output) paths = {snap_path: img_info, volume_path: volume_img_info} def img_info_side_effect(*args, **kwargs): return paths[args[0]] mock_qemu_img_info.side_effect = img_info_side_effect delete_info = { 'type': 'qcow2', 'merge_target_file': None, 'file_to_merge': None, 'volume_id': self.VOLUME_UUID } snap_ref_progress = snap_ref.copy() snap_ref_progress['status'] = 'deleting' snap_ref_progress_0p = snap_ref_progress.copy() snap_ref_progress_0p['progress'] = '0%' snap_ref_progress_50p = snap_ref_progress.copy() snap_ref_progress_50p['progress'] = '50%' snap_ref_progress_90p = snap_ref_progress.copy() snap_ref_progress_90p['progress'] = '90%' mock_snapshot_get.side_effect = [ snap_ref_progress_0p, snap_ref_progress_50p, snap_ref_progress_90p ] drv.delete_snapshot(snap_ref) mock_ensure_share_writable.assert_called_once_with(volume_dir) mock_nova.delete_volume_snapshot.\ assert_called_once_with(ctxt, self.SNAP_UUID, delete_info) mock_write_info_file.assert_called_once_with(info_path, snap_info) mock_execute.assert_called_once_with('rm', '-f', volume_path, run_as_root=True) self.assertTrue(mock_ensure_share_writable.called) self.assertTrue(mock_write_info_file.called) self.assertTrue(mock_sleep.called) self.assertTrue(mock_nova.delete_volume_snapshot.called) self.assertTrue(mock_execute.called) def test_delete_snapshot_online_2(self): """Delete the middle of 3 snapshots.""" drv = self._driver volume = self._simple_volume() volume['status'] = 'in-use' ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap to delete (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt} hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2) with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(db, 'snapshot_get') as \ mock_snapshot_get,\ mock.patch.object(drv, '_nova') as \ mock_nova,\ mock.patch.object(time, 'sleep') as \ mock_sleep,\ mock.patch.object(drv, '_read_info_file') as \ mock_read_info_file,\ mock.patch.object(drv, '_write_info_file') as \ mock_write_info_file,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(drv, '_ensure_share_writable') as \ mock_ensure_share_writable: snap_info = {'active': snap_file_2, self.SNAP_UUID: snap_file, self.SNAP_UUID_2: snap_file_2} mock_read_info_file.return_value = snap_info qemu_img_info_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, volume_file) img_info = imageutils.QemuImgInfo(qemu_img_info_output) vol_qemu_img_info_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume_file volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output) paths = {snap_path: img_info, volume_path: volume_img_info} def img_info_side_effect(*args, **kwargs): return paths[args[0]] mock_qemu_img_info.side_effect = img_info_side_effect delete_info = {'type': 'qcow2', 'merge_target_file': volume_file, 'file_to_merge': snap_file, 'volume_id': self.VOLUME_UUID} snap_ref_progress = snap_ref.copy() snap_ref_progress['status'] = 'deleting' snap_ref_progress_0p = snap_ref_progress.copy() snap_ref_progress_0p['progress'] = '0%' snap_ref_progress_50p = snap_ref_progress.copy() snap_ref_progress_50p['progress'] = '50%' snap_ref_progress_90p = snap_ref_progress.copy() snap_ref_progress_90p['progress'] = '90%' mock_snapshot_get.side_effect = [ snap_ref_progress_0p, snap_ref_progress_50p, snap_ref_progress_90p] drv.delete_snapshot(snap_ref) mock_ensure_share_writable.assert_called_once_with(volume_dir) mock_nova.delete_volume_snapshot.\ assert_called_once_with(ctxt, self.SNAP_UUID, delete_info) mock_write_info_file.assert_called_once_with(info_path, snap_info) mock_execute.assert_called_once_with('rm', '-f', snap_path, run_as_root=True) self.assertTrue(mock_ensure_share_writable.called) self.assertTrue(mock_write_info_file.called) self.assertTrue(mock_sleep.called) self.assertTrue(mock_nova.delete_volume_snapshot.called) self.assertTrue(mock_execute.called) def test_delete_snapshot_online_novafailure(self): """Delete the newest snapshot.""" drv = self._driver volume = self._simple_volume() volume['status'] = 'in-use' ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap to delete (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt} hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) with mock.patch.object(drv, '_execute') as mock_execute,\ mock.patch.object(drv, '_do_create_snapshot') as \ mock_do_create_snapshot,\ mock.patch.object(db, 'snapshot_get') as \ mock_snapshot_get,\ mock.patch.object(drv, '_nova') as \ mock_nova,\ mock.patch.object(time, 'sleep') as \ mock_sleep,\ mock.patch.object(drv, '_read_info_file') as \ mock_read_info_file,\ mock.patch.object(drv, '_write_info_file') as \ mock_write_info_file,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(drv, '_ensure_share_writable') as \ mock_ensure_share_writable: snap_info = {'active': snap_file, self.SNAP_UUID: snap_file} mock_read_info_file.return_value = snap_info qemu_img_info_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, volume_file) img_info = imageutils.QemuImgInfo(qemu_img_info_output) vol_qemu_img_info_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume_file volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output) paths = {snap_path: img_info, volume_path: volume_img_info} def img_info_side_effect(*args, **kwargs): return paths[args[0]] mock_qemu_img_info.side_effect = img_info_side_effect snap_ref_progress = snap_ref.copy() snap_ref_progress['status'] = 'deleting' snap_ref_progress_0p = snap_ref_progress.copy() snap_ref_progress_0p['progress'] = '0%' snap_ref_progress_50p = snap_ref_progress.copy() snap_ref_progress_50p['progress'] = '50%' snap_ref_progress_90p = snap_ref_progress.copy() snap_ref_progress_90p['status'] = 'error_deleting' snap_ref_progress_90p['progress'] = '90%' mock_snapshot_get.side_effect = [ snap_ref_progress_0p, snap_ref_progress_50p, snap_ref_progress_90p] self.assertRaisesAndMessageMatches(exception.RemoteFSException, 'Unable to delete snapshot', drv.delete_snapshot, snap_ref) self.assertTrue(mock_ensure_share_writable.called) self.assertFalse(mock_write_info_file.called) self.assertTrue(mock_sleep.called) self.assertFalse(mock_nova.called) self.assertFalse(mock_do_create_snapshot.called) self.assertFalse(mock_execute.called) def test_get_backing_chain_for_path(self): drv = self._driver self.override_config('glusterfs_mount_point_base', self.TEST_MNT_POINT_BASE) volume = self._simple_volume() vol_filename = volume['name'] vol_filename_2 = volume['name'] + '.abcd' vol_filename_3 = volume['name'] + '.efef' hashed = drv._get_hash_str(self.TEST_EXPORT1) vol_dir = '%s/%s' % (self.TEST_MNT_POINT_BASE, hashed) vol_path = '%s/%s' % (vol_dir, vol_filename) vol_path_2 = '%s/%s' % (vol_dir, vol_filename_2) vol_path_3 = '%s/%s' % (vol_dir, vol_filename_3) with mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info: qemu_img_output_base = """image: %(image_name)s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K """ qemu_img_output = """image: %(image_name)s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %(backing_file)s """ qemu_img_output_1 = qemu_img_output_base %\ {'image_name': vol_filename} qemu_img_output_2 = qemu_img_output %\ {'image_name': vol_filename_2, 'backing_file': vol_filename} qemu_img_output_3 = qemu_img_output %\ {'image_name': vol_filename_3, 'backing_file': vol_filename_2} info_1 = imageutils.QemuImgInfo(qemu_img_output_1) info_2 = imageutils.QemuImgInfo(qemu_img_output_2) info_3 = imageutils.QemuImgInfo(qemu_img_output_3) img_infos = {vol_path_3: info_3, vol_path_2: info_2, vol_path: info_1} def img_info_side_effect(*args, **kwargs): return img_infos[args[0]] mock_qemu_img_info.side_effect = img_info_side_effect mock_local_volume_dir.return_value = vol_dir chain = drv._get_backing_chain_for_path(volume, vol_path_3) # Verify chain contains all expected data item_1 = drv._get_matching_backing_file(chain, vol_filename) self.assertEqual(vol_filename_2, item_1['filename']) chain.remove(item_1) item_2 = drv._get_matching_backing_file(chain, vol_filename_2) self.assertEqual(vol_filename_3, item_2['filename']) chain.remove(item_2) self.assertEqual(1, len(chain)) self.assertEqual(vol_filename, chain[0]['filename']) def test_copy_volume_from_snapshot(self): drv = self._driver with mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image,\ mock.patch.object(drv, '_read_info_file') as \ mock_read_info_file,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions: dest_volume = self._simple_volume( 'c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_EXPORT1)) src_vol_path = os.path.join(vol_dir, src_volume['name']) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) snapshot = {'volume_name': src_volume['name'], 'name': 'clone-snap-%s' % src_volume['id'], 'size': src_volume['size'], 'volume_size': src_volume['size'], 'volume_id': src_volume['id'], 'id': 'tmp-snap-%s' % src_volume['id'], 'volume': src_volume} snap_file = dest_volume['name'] + '.' + snapshot['id'] size = dest_volume['size'] mock_read_info_file.return_value = {'active': snap_file, snapshot['id']: snap_file} qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info drv._copy_volume_from_snapshot(snapshot, dest_volume, size) mock_convert_image.assert_called_once_with(src_vol_path, dest_vol_path, 'raw') mock_set_rw_permissions.assert_called_once_with(dest_vol_path) def test_create_volume_from_snapshot(self): drv = self._driver src_volume = self._simple_volume() snap_ref = {'volume_name': src_volume['name'], 'name': 'clone-snap-%s' % src_volume['id'], 'size': src_volume['size'], 'volume_size': src_volume['size'], 'volume_id': src_volume['id'], 'id': 'tmp-snap-%s' % src_volume['id'], 'volume': src_volume, 'status': 'available'} new_volume = DumbVolume() new_volume['id'] = self.VOLUME_UUID new_volume['size'] = snap_ref['size'] with mock.patch.object(drv, '_ensure_shares_mounted') as \ mock_ensure_shares_mounted,\ mock.patch.object(drv, '_find_share') as \ mock_find_share, \ mock.patch.object(drv, '_do_create_volume') as \ mock_do_create_volume, \ mock.patch.object(drv, '_copy_volume_from_snapshot') as \ mock_copy_volume: mock_find_share.return_value = self.TEST_EXPORT1 drv.create_volume_from_snapshot(new_volume, snap_ref) self.assertTrue(mock_ensure_shares_mounted.called) mock_do_create_volume.assert_called_once_with(new_volume) mock_copy_volume.assert_called_once_with(snap_ref, new_volume, new_volume['size']) def test_initialize_connection(self): drv = self._driver volume = self._simple_volume() qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info: mock_get_active_image_from_info.return_value = volume['name'] mock_qemu_img_info.return_value = img_info conn_info = drv.initialize_connection(volume, None) self.assertEqual('raw', conn_info['data']['format']) self.assertEqual('glusterfs', conn_info['driver_volume_type']) self.assertEqual(volume['name'], conn_info['data']['name']) self.assertEqual(self.TEST_MNT_POINT_BASE, conn_info['mount_point_base']) def test_get_mount_point_base(self): drv = self._driver self.assertEqual(self.TEST_MNT_POINT_BASE, drv._get_mount_point_base()) def test_backup_volume(self): """Backup a volume with no snapshots.""" drv = self._driver with mock.patch.object(drv.db, 'volume_get') as mock_volume_get,\ mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(drv, '_qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(base_driver.BaseVD, 'backup_volume') as \ mock_backup_volume: ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() backup = {'volume_id': volume['id']} mock_volume_get.return_value = volume mock_get_active_image_from_info.return_value = '/some/path' info = imageutils.QemuImgInfo() info.file_format = 'raw' mock_qemu_img_info.return_value = info drv.backup_volume(ctxt, backup, mock.MagicMock()) self.assertTrue(mock_backup_volume.called) def test_backup_volume_previous_snap(self): """Backup a volume that previously had a snapshot. Snapshot was deleted, snap_info is different from above. """ drv = self._driver with mock.patch.object(drv.db, 'volume_get') as mock_volume_get,\ mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(drv, '_qemu_img_info') as \ mock_qemu_img_info,\ mock.patch.object(base_driver.BaseVD, 'backup_volume') as \ mock_backup_volume: ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() backup = {'volume_id': volume['id']} mock_volume_get.return_value = volume mock_get_active_image_from_info.return_value = '/some/file2' info = imageutils.QemuImgInfo() info.file_format = 'raw' mock_qemu_img_info.return_value = info drv.backup_volume(ctxt, backup, mock.MagicMock()) self.assertTrue(mock_backup_volume.called) def test_backup_snap_failure_1(self): """Backup fails if snapshot exists (database).""" drv = self._driver with mock.patch.object(drv.db, 'snapshot_get_all_for_volume') as \ mock_snapshot_get_all_for_volume: ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() backup = {'volume_id': volume['id']} mock_snapshot_get_all_for_volume.return_value = [ {'snap1': 'a'}, {'snap2': 'b'} ] self.assertRaises(exception.InvalidVolume, drv.backup_volume, ctxt, backup, mock.MagicMock()) def test_backup_snap_failure_2(self): """Backup fails if snapshot exists (on-disk).""" drv = self._driver with mock.patch.object(drv.db, 'volume_get') as mock_volume_get,\ mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_qemu_img_info') as \ mock_qemu_img_info: ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() backup = {'volume_id': volume['id']} mock_volume_get.return_value = volume mock_get_active_image_from_info.return_value = '/some/path/file2' info = imageutils.QemuImgInfo() info.file_format = 'raw' info.backing_file = 'file1' mock_qemu_img_info.return_value = info self.assertRaises(exception.InvalidVolume, drv.backup_volume, ctxt, backup, mock.MagicMock()) def test_backup_failure_unsupported_format(self): """Attempt to backup a volume with a qcow2 base.""" drv = self._driver with mock.patch.object(drv.db, 'volume_get') as mock_volume_get,\ mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info,\ mock.patch.object(drv, '_qemu_img_info') as mock_qemu_img_info: ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() backup = {'volume_id': volume['id']} mock_volume_get.return_value = volume mock_get_active_image_from_info.return_value = '/some/path' info = imageutils.QemuImgInfo() info.file_format = 'qcow2' self.assertRaises(exception.InvalidVolume, drv.backup_volume, ctxt, backup, mock.MagicMock()) mock_volume_get.return_value = volume mock_qemu_img_info.return_value = info self.assertRaises(exception.InvalidVolume, drv.backup_volume, ctxt, backup, mock.MagicMock()) def test_copy_volume_to_image_raw_image(self): drv = self._driver volume = self._simple_volume() volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: %s file format: raw virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = volume_path drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_once_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertEqual(1, mock_create_temporary_file.call_count) def test_copy_volume_to_image_qcow2_image(self): """Upload a qcow2 image file which has to be converted to raw first.""" drv = self._driver volume = self._simple_volume() volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K """ % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw') mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertEqual(1, mock_create_temporary_file.call_count) def test_copy_volume_to_image_snapshot_exists(self): """Upload an active snapshot which has to be converted to raw first.""" drv = self._driver volume = self._simple_volume() volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID) volume_filename = 'volume-%s' % self.VOLUME_UUID image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """image: volume-%s.%s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename) img_info = imageutils.QemuImgInfo(qemu_img_output) mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with(volume_path) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw') mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path) self.assertEqual(1, mock_create_temporary_file.call_count) def test_migrate_volume_is_there(self): """Ensure that driver.migrate_volume() is there.""" drv = self._driver ctxt = context.RequestContext('fake_user', 'fake_project') volume = self._simple_volume() ret = drv.migrate_volume(ctxt, volume, mock.sentinel.host) self.assertEqual((False, None), ret) def test_manage_existing_is_there(self): """Ensure that driver.manage_existing() is there.""" drv = self._driver volume = self._simple_volume(id=mock.sentinel.manage_id) self.assertRaises(NotImplementedError, drv.manage_existing, volume, mock.sentinel.existing_ref) def test_unmanage_is_there(self): """Ensure that driver.unmanage() is there.""" drv = self._driver volume = self._simple_volume(id=mock.sentinel.unmanage_id) self.assertRaises(NotImplementedError, drv.unmanage, volume) cinder-8.0.0/cinder/tests/unit/test_backup_driver_base.py0000664000567000056710000004020412701406250024731 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the backup service base driver. """ import uuid import mock from oslo_serialization import jsonutils from cinder.backup import driver from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.backup import fake_service _backup_db_fields = ['id', 'user_id', 'project_id', 'volume_id', 'host', 'availability_zone', 'display_name', 'display_description', 'container', 'status', 'fail_reason', 'service_metadata', 'service', 'size', 'object_count'] class BackupBaseDriverTestCase(test.TestCase): def _create_volume_db_entry(self, id, size): vol = {'id': id, 'size': size, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, backupid, volid, size, userid=str(uuid.uuid4()), projectid=str(uuid.uuid4())): backup = {'id': backupid, 'size': size, 'volume_id': volid, 'user_id': userid, 'project_id': projectid} return db.backup_create(self.ctxt, backup)['id'] def setUp(self): super(BackupBaseDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.volume_id = str(uuid.uuid4()) self.backup_id = str(uuid.uuid4()) self._create_backup_db_entry(self.backup_id, self.volume_id, 1) self._create_volume_db_entry(self.volume_id, 1) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) self.driver = fake_service.FakeBackupService(self.ctxt) def test_get_metadata(self): json_metadata = self.driver.get_metadata(self.volume_id) metadata = jsonutils.loads(json_metadata) self.assertEqual(2, metadata['version']) def test_put_metadata(self): metadata = {'version': 1} self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata)) def test_get_put_metadata(self): json_metadata = self.driver.get_metadata(self.volume_id) self.driver.put_metadata(self.volume_id, json_metadata) def test_export_record(self): export_record = self.driver.export_record(self.backup) self.assertDictEqual({}, export_record) def test_import_record(self): export_record = {'key1': 'value1'} self.assertIsNone(self.driver.import_record(self.backup, export_record)) class BackupMetadataAPITestCase(test.TestCase): def _create_volume_db_entry(self, id, size, display_name, display_description): vol = {'id': id, 'size': size, 'status': 'available', 'display_name': display_name, 'display_description': display_description} return db.volume_create(self.ctxt, vol)['id'] def setUp(self): super(BackupMetadataAPITestCase, self).setUp() self.ctxt = context.get_admin_context() self.volume_id = str(uuid.uuid4()) self.volume_display_name = 'vol-1' self.volume_display_description = 'test vol' self._create_volume_db_entry(self.volume_id, 1, self.volume_display_name, self.volume_display_description) self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt) def _add_metadata(self, vol_meta=False, vol_glance_meta=False): if vol_meta: # Add some VolumeMetadata db.volume_metadata_update(self.ctxt, self.volume_id, {'fee': 'fi'}, False) db.volume_metadata_update(self.ctxt, self.volume_id, {'fo': 'fum'}, False) if vol_glance_meta: # Add some GlanceMetadata db.volume_glance_metadata_create(self.ctxt, self.volume_id, 'disk_format', 'bare') db.volume_glance_metadata_create(self.ctxt, self.volume_id, 'container_type', 'ovf') def test_get(self): # Volume won't have anything other than base by default meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META] self.assertEqual(set(), s1.symmetric_difference(s2)) self._add_metadata(vol_glance_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set(), s1.symmetric_difference(s2)) self._add_metadata(vol_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META, self.bak_meta_api.TYPE_TAG_VOL_META] self.assertEqual(set(), s1.symmetric_difference(s2)) def test_put(self): meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) self._add_metadata(vol_glance_meta=True) meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) self._add_metadata(vol_meta=True) meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) def test_put_invalid_version(self): container = jsonutils.dumps({'version': 3}) self.assertRaises(exception.BackupMetadataUnsupportedVersion, self.bak_meta_api.put, self.volume_id, container) def test_v1_restore_factory(self): fact = self.bak_meta_api._v1_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: {'display_name': 'vol-2', 'display_description': 'description'}, self.bak_meta_api.TYPE_TAG_VOL_META: {}, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} for f in fact: func = fact[f][0] fields = fact[f][1] func(meta_container[f], self.volume_id, fields) vol = db.volume_get(self.ctxt, self.volume_id) self.assertEqual(self.volume_display_name, vol['display_name']) self.assertEqual(self.volume_display_description, vol['display_description']) def test_v2_restore_factory(self): fact = self.bak_meta_api._v2_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) for f in fact: func = fact[f][0] fields = fact[f][1] func({}, self.volume_id, fields) def test_restore_vol_glance_meta(self): # Fields is an empty list for _restore_vol_glance_meta method. fields = [] container = {} self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, fields) self._add_metadata(vol_glance_meta=True) self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, fields) def test_restore_vol_meta(self): # Fields is an empty list for _restore_vol_meta method. fields = [] container = {} self.bak_meta_api._save_vol_meta(container, self.volume_id) # Extract volume metadata from container. metadata = container.get('volume-metadata', {}) self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields) self._add_metadata(vol_meta=True) self.bak_meta_api._save_vol_meta(container, self.volume_id) # Extract volume metadata from container. metadata = container.get('volume-metadata', {}) self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields) def test_restore_vol_base_meta(self): # Fields is a list with 'encryption_key_id' for # _restore_vol_base_meta method. fields = ['encryption_key_id'] container = {} self.bak_meta_api._save_vol_base_meta(container, self.volume_id) self.bak_meta_api._restore_vol_base_meta(container, self.volume_id, fields) def _create_encrypted_volume_db_entry(self, id, type_id, encrypted): if encrypted: vol = {'id': id, 'size': 1, 'status': 'available', 'volume_type_id': type_id, 'encryption_key_id': 'fake_id'} else: vol = {'id': id, 'size': 1, 'status': 'available', 'volume_type_id': type_id, 'encryption_key_id': None} return db.volume_create(self.ctxt, vol)['id'] def test_restore_encrypted_vol_to_different_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an encrypted volume enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type', True) # Create a second encrypted volume, of a different volume type enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type2', True) # Backup the first volume and attempt to restore to the second self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) self.assertRaises(exception.EncryptedBackupOperationFailed, self.bak_meta_api._restore_vol_base_meta, container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id, fields) def test_restore_unencrypted_vol_to_different_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an unencrypted volume vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'vol_type1', False) # Create a second unencrypted volume, of a different volume type vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'vol_type2', False) # Backup the first volume and restore to the second self.bak_meta_api._save_vol_base_meta(container, vol1_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], vol2_id, fields) self.assertNotEqual( db.volume_get(self.ctxt, vol1_id)['volume_type_id'], db.volume_get(self.ctxt, vol2_id)['volume_type_id']) def test_restore_encrypted_vol_to_same_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an encrypted volume enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type', True) # Create an encrypted volume of the same type enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type', True) # Backup the first volume and restore to the second self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id, fields) def test_restore_encrypted_vol_to_none_type_source_type_unavailable(self): fields = ['encryption_key_id'] container = {} enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type', True) undef_vol_id = self._create_encrypted_volume_db_entry( str(uuid.uuid4()), None, False) self.bak_meta_api._save_vol_base_meta(container, enc_vol_id) self.assertRaises(exception.EncryptedBackupOperationFailed, self.bak_meta_api._restore_vol_base_meta, container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id, fields) def test_restore_encrypted_vol_to_none_type_source_type_available(self): fields = ['encryption_key_id'] container = {} db.volume_type_create(self.ctxt, {'id': 'enc_vol_type_id', 'name': 'enc_vol_type'}) enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), 'enc_vol_type_id', True) undef_vol_id = self._create_encrypted_volume_db_entry( str(uuid.uuid4()), None, False) self.bak_meta_api._save_vol_base_meta(container, enc_vol_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id, fields) self.assertEqual( db.volume_get(self.ctxt, undef_vol_id)['volume_type_id'], db.volume_get(self.ctxt, enc_vol_id)['volume_type_id']) def test_filter(self): metadata = {'a': 1, 'b': 2, 'c': 3} self.assertEqual(metadata, self.bak_meta_api._filter(metadata, [])) self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b'])) self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d'])) self.assertEqual({'a': 1, 'b': 2}, self.bak_meta_api._filter(metadata, ['a', 'b'])) def test_save_vol_glance_meta(self): container = {} self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) def test_save_vol_meta(self): container = {} self.bak_meta_api._save_vol_meta(container, self.volume_id) def test_save_vol_base_meta(self): container = {} self.bak_meta_api._save_vol_base_meta(container, self.volume_id) def test_is_serializable(self): data = {'foo': 'bar'} if self.bak_meta_api._is_serializable(data): jsonutils.dumps(data) def test_is_not_serializable(self): data = {'foo': 'bar'} with mock.patch.object(jsonutils, 'dumps') as mock_dumps: mock_dumps.side_effect = TypeError self.assertFalse(self.bak_meta_api._is_serializable(data)) mock_dumps.assert_called_once_with(data) cinder-8.0.0/cinder/tests/unit/test_volume_types_extra_specs.py0000664000567000056710000001174612701406250026263 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for volume types extra specs code """ from cinder import context from cinder import db from cinder import test class VolumeTypeExtraSpecsTestCase(test.TestCase): def setUp(self): super(VolumeTypeExtraSpecsTestCase, self).setUp() self.context = context.get_admin_context() self.vol_type1 = dict(name="TEST: Regular volume test") self.vol_type1_specs = dict(vol_extra1="value1", vol_extra2="value2", vol_extra3=3) self.vol_type1['extra_specs'] = self.vol_type1_specs ref = db.volume_type_create(self.context, self.vol_type1) self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.vol_type1['id']) self.volume_type1_id = ref.id for k, v in self.vol_type1_specs.items(): self.vol_type1_specs[k] = str(v) self.vol_type2_noextra = dict(name="TEST: Volume type without extra") ref = db.volume_type_create(self.context, self.vol_type2_noextra) self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.vol_type2_noextra['id']) self.vol_type2_id = ref.id def test_volume_type_specs_get(self): expected_specs = self.vol_type1_specs.copy() actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_delete(self): expected_specs = self.vol_type1_specs.copy() del expected_specs['vol_extra2'] db.volume_type_extra_specs_delete(context.get_admin_context(), self.volume_type1_id, 'vol_extra2') actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_update(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra3'] = "4" db.volume_type_extra_specs_update_or_create( context.get_admin_context(), self.volume_type1_id, dict(vol_extra3=4)) actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_create(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra4'] = 'value4' expected_specs['vol_extra5'] = 'value5' db.volume_type_extra_specs_update_or_create( context.get_admin_context(), self.volume_type1_id, dict(vol_extra4="value4", vol_extra5="value5")) actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_get_with_extra_specs(self): volume_type = db.volume_type_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) volume_type = db.volume_type_get( context.get_admin_context(), self.vol_type2_id) self.assertEqual({}, volume_type['extra_specs']) def test_volume_type_get_by_name_with_extra_specs(self): volume_type = db.volume_type_get_by_name( context.get_admin_context(), self.vol_type1['name']) self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) volume_type = db.volume_type_get_by_name( context.get_admin_context(), self.vol_type2_noextra['name']) self.assertEqual({}, volume_type['extra_specs']) def test_volume_type_get_all(self): expected_specs = self.vol_type1_specs.copy() types = db.volume_type_get_all(context.get_admin_context()) self.assertEqual(expected_specs, types[self.vol_type1['name']]['extra_specs']) self.assertEqual({}, types[self.vol_type2_noextra['name']]['extra_specs']) cinder-8.0.0/cinder/tests/unit/test_storwize_svc.py0000664000567000056710000066731612701406257023712 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Tests for the IBM Storwize family and SVC volume driver. """ import paramiko import random import re import time import uuid import mock from oslo_concurrency import processutils from oslo_utils import importutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import ssh_utils from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils as testutils from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm.storwize_svc import ( replication as storwize_rep) from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi from cinder.volume import qos_specs from cinder.volume import utils as volume_utils from cinder.volume import volume_types SVC_POOLS = ['openstack', 'openstack1'] def _get_test_pool(get_all=False): if get_all: return SVC_POOLS else: return SVC_POOLS[0] class StorwizeSVCManagementSimulator(object): def __init__(self, pool_name): self._flags = {'storwize_svc_volpool_name': pool_name} self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._fcmappings_list = {} self._fcconsistgrp_list = {} self._other_pools = {'openstack2': {}, 'openstack3': {}} self._next_cmd_error = { 'lsportip': '', 'lsfabric': '', 'lsiscsiauth': '', 'lsnodecanister': '', 'mkvdisk': '', 'lsvdisk': '', 'lsfcmap': '', 'prestartfcmap': '', 'startfcmap': '', 'rmfcmap': '', 'lslicense': '', 'lsguicapabilities': '', } self._errors = { 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' 'object already exists.'), 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' 'exist or is not a suitable candidate.'), 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' 'the maximum number of allowed iSCSI ' 'qualified names (IQNs) has been reached, ' 'or the IQN is already assigned or is not ' 'valid.'), 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' 'exist, or the name supplied does not meet ' 'the naming rules.'), 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' 'not created because the VDisk is already ' 'mapped to a host.'), 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' 'not created because a VDisk is already ' 'mapped to this host with this SCSI LUN.'), 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' 'not deleted because it is mapped to a ' 'host or because it is part of a FlashCopy ' 'or Remote Copy mapping, or is involved in ' 'an image mode migrate.'), 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' 'is not valid. The name can contain letters, ' 'numbers, spaces, periods, dashes, and ' 'underscores. The name must begin with a ' 'letter or an underscore. The name must not ' 'begin or end with a space.'), 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' 'more of the configured port names is in a ' 'mapping.'), 'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not ' 'created because the source and target ' 'virtual disks (VDisks) are different sizes.'), 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' 'source and target VDisks are the same.'), 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' 'least one node in the I/O group does not ' 'support compressed VDisks.'), 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' 'target and source managed disk groups must ' 'be different.'), 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' 'copy specified does not exist.'), 'CMMVC6446E': ('', 'The command failed because the managed disk ' 'groups have different extent sizes.'), # Catch-all for invalid state transitions: 'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not ' 'changed because the mapping or consistency ' 'group is another state.'), 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' 'parameter.'), } self._fc_transitions = {'begin': {'make': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping'}, # Assume the worst case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._fc_cg_transitions = {'begin': {'make': 'empty'}, 'empty': {'add': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying', 'delete_force': 'end', 'delete': 'end'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping', 'delete_force': 'end', 'delete': 'end'}, # Assume the case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } def _state_transition(self, function, fcmap): if (function == 'wait' and 'wait' not in self._fc_transitions[fcmap['status']]): return ('', '') if fcmap['status'] == 'copying' and function == 'wait': if fcmap['copyrate'] != '0': if fcmap['progress'] == '0': fcmap['progress'] = '50' else: fcmap['progress'] = '100' fcmap['status'] = 'idle_or_copied' return ('', '') else: try: curr_state = fcmap['status'] fcmap['status'] = self._fc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] def _fc_cg_state_transition(self, function, fc_consistgrp): if (function == 'wait' and 'wait' not in self._fc_transitions[fc_consistgrp['status']]): return ('', '') try: curr_state = fc_consistgrp['status'] fc_consistgrp['status'] \ = self._fc_cg_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] # Find an unused ID @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return six.text_type(index) return six.text_type(len(ids)) # Check if name is valid @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w ._-]*$', name): return False return True # Convert argument string to dictionary @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'autodelete', 'bytes', 'compressed', 'force', 'nohdr', 'nofmtdisk' ] one_param_args = [ 'chapsecret', 'cleanrate', 'copy', 'copyrate', 'delim', 'easytier', 'filtervalue', 'grainsize', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'rsize', 'scsi', 'size', 'source', 'target', 'unit', 'vdisk', 'warning', 'wwpn', 'primary', 'consistgrp' ] no_or_one_param_args = [ 'autoexpand', ] # Handle the special case of lsnode which is a two-word command # Use the one word version of the command internally if arg_list[0] in ('svcinfo', 'svctask'): if arg_list[1] == 'lsnode': if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} else: ret = {'cmd': 'lsnodecanister'} else: ret = {'cmd': arg_list[1]} arg_list.pop(0) else: ret = {'cmd': arg_list[0]} skip = False for i in range(1, len(arg_list)): if skip: skip = False continue if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True elif arg_list[i][1:] in one_param_args: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True elif arg_list[i][1:] in no_or_one_param_args: if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': ret[arg_list[i][1:]] = True else: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason=_('unrecognized argument %s') % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _print_info_obj_cmd(header, row, delim=' ', nohdr=False): """Generic function for printing information for a specific object.""" objrows = [] for idx, val in enumerate(header): objrows.append([val, row[idx]]) if nohdr: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) for index in range(len(objrows)): objrows[index] = delim.join(objrows[index]) return ('%s' % '\n'.join(objrows), '') @staticmethod def _convert_bytes_units(bytestr): num = int(bytestr) unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while num > 1024: num = num / 1024 unit_index += 1 return '%d%s' % (num, unit_array[unit_index]) @staticmethod def _convert_units_bytes(num, unit): unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while unit.lower() != unit_array[unit_index].lower(): num = num * 1024 unit_index += 1 return six.text_type(num) def _cmd_lslicense(self, **kwargs): rows = [None] * 3 rows[0] = ['used_compression_capacity', '0.08'] rows[1] = ['license_compression_capacity', '0'] if self._next_cmd_error['lslicense'] == 'no_compression': self._next_cmd_error['lslicense'] = '' rows[2] = ['license_compression_enclosures', '0'] else: rows[2] = ['license_compression_enclosures', '1'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsguicapabilities(self, **kwargs): rows = [None] if self._next_cmd_error['lsguicapabilities'] == 'no_compression': self._next_cmd_error['lsguicapabilities'] = '' rows[0] = ['license_scheme', '0'] else: rows[0] = ['license_scheme', '9846'] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lssystem(self, **kwargs): rows = [None] * 3 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'storwize-svc-sim'] rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax, assume -bytes passed def _cmd_lsmdiskgrp(self, **kwargs): pool_num = len(self._flags['storwize_svc_volpool_name']) rows = [] rows.append(['id', 'name', 'status', 'mdisk_count', 'vdisk_count', 'capacity', 'extent_size', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'overallocation', 'warning', 'easy_tier', 'easy_tier_status']) for i in range(pool_num): row_data = [str(i + 1), self._flags['storwize_svc_volpool_name'][i], 'online', '1', six.text_type(len(self._volumes_list)), '3573412790272', '256', '3529926246400', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive'] rows.append(row_data) rows.append([str(pool_num + 1), 'openstack2', 'online', '1', '0', '3573412790272', '256', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive']) rows.append([str(pool_num + 2), 'openstack3', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: pool_name = kwargs['obj'].strip('\'\"') if pool_name == kwargs['obj']: raise exception.InvalidInput( reason=_('obj missing quotes %s') % kwargs['obj']) elif pool_name in self._flags['storwize_svc_volpool_name']: for each_row in rows: if pool_name in each_row: row = each_row break elif pool_name == 'openstack2': row = rows[-2] elif pool_name == 'openstack3': row = rows[-1] else: return self._errors['CMMVC5754E'] objrows = [] for idx, val in enumerate(rows[0]): objrows.append([val, row[idx]]) if 'nohdr' in kwargs: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) if 'delim' in kwargs: for index in range(len(objrows)): objrows[index] = kwargs['delim'].join(objrows[index]) return ('%s' % '\n'.join(objrows), '') def _get_mdiskgrp_id(self, mdiskgrp): grp_num = len(self._flags['storwize_svc_volpool_name']) if mdiskgrp in self._flags['storwize_svc_volpool_name']: for i in range(grp_num): if mdiskgrp == self._flags['storwize_svc_volpool_name'][i]: return i + 1 elif mdiskgrp == 'openstack2': return grp_num + 1 elif mdiskgrp == 'openstack3': return grp_num + 2 else: return None # Print mostly made-up stuff in the correct syntax def _cmd_lsnodecanister(self, **kwargs): rows = [None] * 3 rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', 'enclosure_serial_number'] rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', 'io_grp0', 'yes', '123456789ABCDEF0', '100', 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', '0123ABC'] rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0', 'io_grp0', 'no', '123456789ABCDEF1', '100', 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', '0123ABC'] if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsnodecanister'] = '' if self._next_cmd_error['lsnodecanister'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsnodecanister'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Print information of every single node of SVC def _cmd_lsnode(self, **kwargs): node_infos = dict() node_infos['1'] = r'''id!1 name!node1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680220C744 port_status!active port_speed!8Gb ''' node_infos['2'] = r'''id!2 name!node2 port_id!500507680220C745 port_status!active port_speed!8Gb port_id!500507680230C745 port_status!inactive port_speed!N/A ''' node_id = kwargs.get('node_id', None) stdout = node_infos.get(node_id, '') return stdout, '' # Print made up stuff for the ports def _cmd_lsportfc(self, **kwargs): node_1 = [None] * 7 node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1', '5005076802132ADE', '012E00', 'active', 'switch'] node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1', '5005076802232ADE', '012E00', 'active', 'switch'] node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1', '5005076802332ADE', '9B0600', 'active', 'switch'] node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1', '5005076802432ADE', '012A00', 'active', 'switch'] node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1', '5005076802532ADE', '014A00', 'active', 'switch'] node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1', '5005076802632ADE', '000000', 'inactive_unconfigured', 'none'] node_2 = [None] * 7 node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2', '5005086802132ADE', '012E00', 'active', 'switch'] node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2', '5005086802232ADE', '012E00', 'active', 'switch'] node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2', '5005086802332ADE', '9B0600', 'active', 'switch'] node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2', '5005086802432ADE', '012A00', 'active', 'switch'] node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2', '5005086802532ADE', '014A00', 'active', 'switch'] node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2', '5005086802632ADE', '000000', 'inactive_unconfigured', 'none'] node_infos = [node_1, node_2] node_id = int(kwargs['filtervalue'].split('=')[1]) - 1 return self._print_info_cmd(rows=node_infos[node_id], **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lsportip(self, **kwargs): if self._next_cmd_error['lsportip'] == 'ip_no_config': self._next_cmd_error['lsportip'] = '' ip_addr1 = '' ip_addr2 = '' gw = '' else: ip_addr1 = '1.234.56.78' ip_addr2 = '1.234.56.79' gw = '1.234.56.1' rows = [None] * 17 rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', 'failover'] rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'no'] rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes'] rows[3] = ['2', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'no'] rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes'] rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no'] rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes'] rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no'] rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes'] rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'no'] rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes'] rows[11] = ['2', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'no'] rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'yes'] rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no'] rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes'] rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no'] rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes'] if self._next_cmd_error['lsportip'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsportip'] = '' if self._next_cmd_error['lsportip'] == 'remove_field': for row in rows: row.pop(1) self._next_cmd_error['lsportip'] = '' return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsfabric(self, **kwargs): host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None host_infos = [] for hv in self._hosts_list.values(): if (not host_name) or (hv['host_name'] == host_name): if not target_wwpn or target_wwpn in hv['wwpns']: host_infos.append(hv) break if not len(host_infos): return ('', '') rows = [] rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', 'local_wwpn', 'local_port', 'local_nportid', 'state', 'name', 'cluster_name', 'type']) for host_info in host_infos: for wwpn in host_info['wwpns']: rows.append([wwpn, '123456', host_info['id'], 'nodeN', 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', host_info['host_name'], '', 'host']) if self._next_cmd_error['lsfabric'] == 'header_mismatch': rows[0].pop(0) self._next_cmd_error['lsfabric'] = '' if self._next_cmd_error['lsfabric'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsfabric'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk def _cmd_mkvdisk(self, **kwargs): # We only save the id/uid, name, and size - all else will be made up volume_info = {} volume_info['id'] = self._find_unused_id(self._volumes_list) volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp) volume_info['mdisk_grp_name'] = mdiskgrp volume_info['mdisk_grp_id'] = str(mdiskgrp_id) if 'name' in kwargs: volume_info['name'] = kwargs['name'].strip('\'\"') else: volume_info['name'] = 'vdisk' + volume_info['id'] # Assume size and unit are given, store it in bytes capacity = int(kwargs['size']) unit = kwargs['unit'] volume_info['capacity'] = self._convert_units_bytes(capacity, unit) volume_info['IO_group_id'] = kwargs['iogrp'] volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp'] if 'easytier' in kwargs: if kwargs['easytier'] == 'on': volume_info['easy_tier'] = 'on' else: volume_info['easy_tier'] = 'off' if 'rsize' in kwargs: volume_info['formatted'] = 'no' # Fake numbers volume_info['used_capacity'] = '786432' volume_info['real_capacity'] = '21474816' volume_info['free_capacity'] = '38219264' if 'warning' in kwargs: volume_info['warning'] = kwargs['warning'].rstrip('%') else: volume_info['warning'] = '80' if 'autoexpand' in kwargs: volume_info['autoexpand'] = 'on' else: volume_info['autoexpand'] = 'off' if 'grainsize' in kwargs: volume_info['grainsize'] = kwargs['grainsize'] else: volume_info['grainsize'] = '32' if 'compressed' in kwargs: volume_info['compressed_copy'] = 'yes' else: volume_info['compressed_copy'] = 'no' else: volume_info['used_capacity'] = volume_info['capacity'] volume_info['real_capacity'] = volume_info['capacity'] volume_info['free_capacity'] = '0' volume_info['warning'] = '' volume_info['autoexpand'] = '' volume_info['grainsize'] = '' volume_info['compressed_copy'] = 'no' volume_info['formatted'] = 'yes' if 'nofmtdisk' in kwargs: if kwargs['nofmtdisk']: volume_info['formatted'] = 'no' vol_cp = {'id': '0', 'status': 'online', 'sync': 'yes', 'primary': 'yes', 'mdisk_grp_id': str(mdiskgrp_id), 'mdisk_grp_name': mdiskgrp, 'easy_tier': volume_info['easy_tier'], 'compressed_copy': volume_info['compressed_copy']} volume_info['copies'] = {'0': vol_cp} if volume_info['name'] in self._volumes_list: return self._errors['CMMVC6035E'] else: self._volumes_list[volume_info['name']] = volume_info return ('Virtual Disk, id [%s], successfully created' % (volume_info['id']), '') # Delete a vdisk def _cmd_rmvdisk(self, **kwargs): force = True if 'force' in kwargs else False if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] if not force: for mapping in self._mappings_list.values(): if mapping['vol'] == vol_name: return self._errors['CMMVC5840E'] for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): return self._errors['CMMVC5840E'] del self._volumes_list[vol_name] return ('', '') def _cmd_expandvdisksize(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') # Assume unit is gb if 'size' not in kwargs: return self._errors['CMMVC5707E'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] curr_size = int(self._volumes_list[vol_name]['capacity']) addition = size * units.Gi self._volumes_list[vol_name]['capacity'] = ( six.text_type(curr_size + addition)) return ('', '') def _get_fcmap_info(self, vol_name): ret_vals = { 'fc_id': '', 'fc_name': '', 'fc_map_count': '0', } for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): ret_vals['fc_id'] = fcmap['id'] ret_vals['fc_name'] = fcmap['name'] ret_vals['fc_map_count'] = '1' return ret_vals # List information about vdisks def _cmd_lsvdisk(self, **kwargs): rows = [] rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', 'status', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', 'fast_write_state', 'se_copy_count', 'RC_change']) for vol in self._volumes_list.values(): if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == 'name=' + vol['name']) or (kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])): fcmap_info = self._get_fcmap_info(vol['name']) if 'bytes' in kwargs: cap = self._convert_bytes_units(vol['capacity']) else: cap = vol['capacity'] rows.append([six.text_type(vol['id']), vol['name'], vol['IO_group_id'], vol['IO_group_name'], 'online', '0', _get_test_pool(), cap, 'striped', fcmap_info['fc_id'], fcmap_info['fc_name'], '', '', vol['uid'], fcmap_info['fc_map_count'], '1', 'empty', '1', 'no']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: if kwargs['obj'] not in self._volumes_list: return self._errors['CMMVC5754E'] vol = self._volumes_list[kwargs['obj']] fcmap_info = self._get_fcmap_info(vol['name']) cap = vol['capacity'] cap_u = vol['used_capacity'] cap_r = vol['real_capacity'] cap_f = vol['free_capacity'] if 'bytes' not in kwargs: for item in [cap, cap_u, cap_r, cap_f]: item = self._convert_bytes_units(item) rows = [] rows.append(['id', six.text_type(vol['id'])]) rows.append(['name', vol['name']]) rows.append(['IO_group_id', vol['IO_group_id']]) rows.append(['IO_group_name', vol['IO_group_name']]) rows.append(['status', 'online']) rows.append(['capacity', cap]) rows.append(['formatted', vol['formatted']]) rows.append(['mdisk_id', '']) rows.append(['mdisk_name', '']) rows.append(['FC_id', fcmap_info['fc_id']]) rows.append(['FC_name', fcmap_info['fc_name']]) rows.append(['RC_id', '']) rows.append(['RC_name', '']) rows.append(['vdisk_UID', vol['uid']]) rows.append(['throttling', '0']) if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': rows.append(['preferred_node_id', '']) self._next_cmd_error['lsvdisk'] = '' elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': self._next_cmd_error['lsvdisk'] = '' else: rows.append(['preferred_node_id', '1']) rows.append(['fast_write_state', 'empty']) rows.append(['cache', 'readwrite']) rows.append(['udid', '']) rows.append(['fc_map_count', fcmap_info['fc_map_count']]) rows.append(['sync_rate', '50']) rows.append(['copy_count', '1']) rows.append(['se_copy_count', '0']) rows.append(['mirror_write_priority', 'latency']) rows.append(['RC_change', 'no']) for copy in vol['copies'].values(): rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['type', 'striped']) rows.append(['used_capacity', cap_u]) rows.append(['real_capacity', cap_r]) rows.append(['free_capacity', cap_f]) rows.append(['easy_tier', copy['easy_tier']]) rows.append(['compressed_copy', copy['compressed_copy']]) rows.append(['autoexpand', vol['autoexpand']]) rows.append(['warning', vol['warning']]) rows.append(['grainsize', vol['grainsize']]) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lsiogrp(self, **kwargs): rows = [None] * 6 rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count'] rows[1] = ['0', 'io_grp0', '2', '0', '4'] rows[2] = ['1', 'io_grp1', '2', '0', '4'] rows[3] = ['2', 'io_grp2', '0', '0', '4'] rows[4] = ['3', 'io_grp3', '0', '0', '4'] rows[5] = ['4', 'recovery_io_grp', '0', '0', '0'] return self._print_info_cmd(rows=rows, **kwargs) def _add_port_to_host(self, host_info, **kwargs): if 'iscsiname' in kwargs: added_key = 'iscsi_names' added_val = kwargs['iscsiname'].strip('\'\"') elif 'hbawwpn' in kwargs: added_key = 'wwpns' added_val = kwargs['hbawwpn'].strip('\'\"') else: return self._errors['CMMVC5707E'] host_info[added_key].append(added_val) for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: if port == added_val: return self._errors['CMMVC6581E'] return ('', '') # Make a host def _cmd_mkhost(self, **kwargs): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) if 'name' in kwargs: host_name = kwargs['name'].strip('\'\"') else: host_name = 'host' + six.text_type(host_info['id']) if self._is_invalid_name(host_name): return self._errors['CMMVC6527E'] if host_name in self._hosts_list: return self._errors['CMMVC6035E'] host_info['host_name'] = host_name host_info['iscsi_names'] = [] host_info['wwpns'] = [] out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info return ('Host, id [%s], successfully created' % (host_info['id']), '') else: return (out, err) # Add ports to an existing host def _cmd_addhostport(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] host_info = self._hosts_list[host_name] return self._add_port_to_host(host_info, **kwargs) # Change host properties def _cmd_chhost(self, **kwargs): if 'chapsecret' not in kwargs: return self._errors['CMMVC5707E'] secret = kwargs['obj'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] self._hosts_list[host_name]['chapsecret'] = secret return ('', '') # Remove a host def _cmd_rmhost(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC5871E'] del self._hosts_list[host_name] return ('', '') # List information about hosts def _cmd_lshost(self, **kwargs): if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) found = False for host in self._hosts_list.values(): filterstr = 'name=' + host['host_name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): rows.append([host['id'], host['host_name'], '1', '4', 'offline']) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') else: host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] host = self._hosts_list[host_name] rows = [] rows.append(['id', host['id']]) rows.append(['name', host['host_name']]) rows.append(['port_count', '1']) rows.append(['type', 'generic']) rows.append(['mask', '1111']) rows.append(['iogrp_count', '4']) rows.append(['status', 'online']) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'offline']) for port in host['wwpns']: rows.append(['WWPN', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'active']) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') # List iSCSI authorization information about hosts def _cmd_lsiscsiauth(self, **kwargs): if self._next_cmd_error['lsiscsiauth'] == 'no_info': self._next_cmd_error['lsiscsiauth'] = '' return ('', '') rows = [] rows.append(['type', 'id', 'name', 'iscsi_auth_method', 'iscsi_chap_secret']) for host in self._hosts_list.values(): method = 'none' secret = '' if 'chapsecret' in host: method = 'chap' secret = host['chapsecret'] rows.append(['host', host['id'], host['host_name'], method, secret]) return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk-host mapping def _cmd_mkvdiskhostmap(self, **kwargs): mapping_info = {} mapping_info['id'] = self._find_unused_id(self._mappings_list) if 'host' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['host'] = kwargs['host'].strip('\'\"') if 'scsi' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['lun'] = kwargs['scsi'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['vol'] = kwargs['obj'].strip('\'\"') if mapping_info['vol'] not in self._volumes_list: return self._errors['CMMVC5753E'] if mapping_info['host'] not in self._hosts_list: return self._errors['CMMVC5754E'] if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC6071E'] for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC5879E'] for v in self._mappings_list.values(): if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): return self._errors['CMMVC6071E'] self._mappings_list[mapping_info['id']] = mapping_info return ('Virtual Disk to Host map, id [%s], successfully created' % (mapping_info['id']), '') # Delete a vdisk-host mapping def _cmd_rmvdiskhostmap(self, **kwargs): if 'host' not in kwargs: return self._errors['CMMVC5707E'] host = kwargs['host'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol = kwargs['obj'].strip('\'\"') mapping_ids = [] for v in self._mappings_list.values(): if v['vol'] == vol: mapping_ids.append(v['id']) if not mapping_ids: return self._errors['CMMVC5753E'] this_mapping = None for mapping_id in mapping_ids: if self._mappings_list[mapping_id]['host'] == host: this_mapping = mapping_id if this_mapping is None: return self._errors['CMMVC5753E'] del self._mappings_list[this_mapping] return ('', '') # List information about host->vdisk mappings def _cmd_lshostvdiskmap(self, **kwargs): host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], mapping['lun'], volume['id'], volume['name'], volume['uid']]) return self._print_info_cmd(rows=rows, **kwargs) # List information about vdisk->host mappings def _cmd_lsvdiskhostmap(self, **kwargs): mappings_found = 0 vdisk_name = kwargs['obj'] if vdisk_name not in self._volumes_list: return self._errors['CMMVC5753E'] rows = [] rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] host = self._hosts_list[mapping['host']] rows.append([volume['id'], volume['name'], host['id'], host['host_name'], volume['uid'], volume['IO_group_id'], volume['IO_group_name']]) if mappings_found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') # Create a FlashCopy mapping def _cmd_mkfcmap(self, **kwargs): source = '' target = '' copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' if 'source' not in kwargs: return self._errors['CMMVC5707E'] source = kwargs['source'].strip('\'\"') if source not in self._volumes_list: return self._errors['CMMVC5754E'] if 'target' not in kwargs: return self._errors['CMMVC5707E'] target = kwargs['target'].strip('\'\"') if target not in self._volumes_list: return self._errors['CMMVC5754E'] if source == target: return self._errors['CMMVC6303E'] if (self._volumes_list[source]['capacity'] != self._volumes_list[target]['capacity']): return self._errors['CMMVC5754E'] fcmap_info = {} fcmap_info['source'] = source fcmap_info['target'] = target fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) fcmap_info['name'] = 'fcmap' + fcmap_info['id'] fcmap_info['copyrate'] = copyrate fcmap_info['progress'] = '0' fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False fcmap_info['status'] = 'idle_or_copied' # Add fcmap to consistency group if 'consistgrp' in kwargs: consistgrp = kwargs['consistgrp'] # if is digit, assume is cg id, else is cg name cg_id = 0 if not consistgrp.isdigit(): for consistgrp_key in self._fcconsistgrp_list.keys(): if (self._fcconsistgrp_list[consistgrp_key]['name'] == consistgrp): cg_id = consistgrp_key fcmap_info['consistgrp'] = consistgrp_key break else: if int(consistgrp) in self._fcconsistgrp_list.keys(): cg_id = int(consistgrp) # If can't find exist consistgrp id, return not exist error if not cg_id: return self._errors['CMMVC5754E'] fcmap_info['consistgrp'] = cg_id # Add fcmap to consistgrp self._fcconsistgrp_list[cg_id]['fcmaps'][fcmap_info['id']] = ( fcmap_info['name']) self._fc_cg_state_transition('add', self._fcconsistgrp_list[cg_id]) self._fcmappings_list[fcmap_info['id']] = fcmap_info return('FlashCopy Mapping, id [' + fcmap_info['id'] + '], successfully created', '') def _cmd_prestartfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['prestartfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['prestartfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('prepare', fcmap) def _cmd_startfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['startfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['startfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('start', fcmap) def _cmd_stopfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('stop', fcmap) def _cmd_rmfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force = True if 'force' in kwargs else False if self._next_cmd_error['rmfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['rmfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'delete_force' if force else 'delete' ret = self._state_transition(function, fcmap) if fcmap['status'] == 'end': del self._fcmappings_list[id_num] return ret def _cmd_lsvdiskfcmappings(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] vdisk = kwargs['obj'] rows = [] rows.append(['id', 'name']) for v in self._fcmappings_list.values(): if v['source'] == vdisk or v['target'] == vdisk: rows.append([v['id'], v['name']]) return self._print_info_cmd(rows=rows, **kwargs) def _cmd_chfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] for key in ['name', 'copyrate', 'autodelete']: if key in kwargs: fcmap[key] = kwargs[key] return ('', '') def _cmd_lsfcmap(self, **kwargs): rows = [] rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', 'target_vdisk_id', 'target_vdisk_name', 'group_id', 'group_name', 'status', 'progress', 'copy_rate', 'clean_progress', 'incremental', 'partner_FC_id', 'partner_FC_name', 'restoring', 'start_time', 'rc_controlled']) # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] to_delete = [] for k, v in self._fcmappings_list.items(): if six.text_type(v[filter_key]) == filter_value: source = self._volumes_list[v['source']] target = self._volumes_list[v['target']] self._state_transition('wait', v) if self._next_cmd_error['lsfcmap'] == 'speed_up': self._next_cmd_error['lsfcmap'] = '' curr_state = v['status'] while self._state_transition('wait', v) == ("", ""): if curr_state == v['status']: break curr_state = v['status'] if ((v['status'] == 'idle_or_copied' and v['autodelete'] and v['progress'] == '100') or (v['status'] == 'end')): to_delete.append(k) else: rows.append([v['id'], v['name'], source['id'], source['name'], target['id'], target['name'], '', '', v['status'], v['progress'], v['copyrate'], '100', 'off', '', '', 'no', '', 'no']) for d in to_delete: del self._fcmappings_list[d] return self._print_info_cmd(rows=rows, **kwargs) # Create a FlashCopy mapping def _cmd_mkfcconsistgrp(self, **kwargs): fcconsistgrp_info = {} fcconsistgrp_info['id'] = self._find_unused_id(self._fcconsistgrp_list) if 'name' in kwargs: fcconsistgrp_info['name'] = kwargs['name'].strip('\'\"') else: fcconsistgrp_info['name'] = 'fccstgrp' + fcconsistgrp_info['id'] if 'autodelete' in kwargs: fcconsistgrp_info['autodelete'] = True else: fcconsistgrp_info['autodelete'] = False fcconsistgrp_info['status'] = 'empty' fcconsistgrp_info['start_time'] = None fcconsistgrp_info['fcmaps'] = {} self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info return('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] + '], successfully created', '') def _cmd_prestartfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break return self._fc_cg_state_transition('prepare', self._fcconsistgrp_list[cg_id]) def _cmd_startfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break return self._fc_cg_state_transition('start', self._fcconsistgrp_list[cg_id]) def _cmd_stopfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: fcconsistgrps = self._fcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._fc_cg_state_transition('stop', fcconsistgrps) def _cmd_rmfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] force = True if 'force' in kwargs else False cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break if not cg_id: return self._errors['CMMVC5753E'] fcconsistgrps = self._fcconsistgrp_list[cg_id] function = 'delete_force' if force else 'delete' ret = self._fc_cg_state_transition(function, fcconsistgrps) if fcconsistgrps['status'] == 'end': del self._fcconsistgrp_list[cg_id] return ret def _cmd_lsfcconsistgrp(self, **kwargs): rows = [] if 'obj' not in kwargs: rows.append(['id', 'name', 'status' 'start_time']) for fcconsistgrp in self._fcconsistgrp_list.values(): rows.append([fcconsistgrp['id'], fcconsistgrp['name'], fcconsistgrp['status'], fcconsistgrp['start_time']]) return self._print_info_cmd(rows=rows, **kwargs) else: fcconsistgrp = None cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']: fcconsistgrp = self._fcconsistgrp_list[cg_id] rows = [] rows.append(['id', six.text_type(cg_id)]) rows.append(['name', fcconsistgrp['name']]) rows.append(['status', fcconsistgrp['status']]) rows.append(['autodelete', six.text_type(fcconsistgrp['autodelete'])]) rows.append(['start_time', six.text_type(fcconsistgrp['start_time'])]) for fcmap_id in fcconsistgrp['fcmaps'].keys(): rows.append(['FC_mapping_id', six.text_type(fcmap_id)]) rows.append(['FC_mapping_name', fcconsistgrp['fcmaps'][fcmap_id]]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) self._fc_cg_state_transition('wait', fcconsistgrp) return ('%s' % '\n'.join(rows), '') def _cmd_migratevdisk(self, **kwargs): if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') vdisk = kwargs['vdisk'].strip('\'\"') if vdisk in self._volumes_list: curr_mdiskgrp = self._volumes_list else: for pool in self._other_pools: if vdisk in pool: curr_mdiskgrp = pool break else: return self._errors['CMMVC5754E'] if mdiskgrp == self._flags['storwize_svc_volpool_name']: tgt_mdiskgrp = self._volumes_list elif mdiskgrp == 'openstack2': tgt_mdiskgrp = self._other_pools['openstack2'] elif mdiskgrp == 'openstack3': tgt_mdiskgrp = self._other_pools['openstack3'] else: return self._errors['CMMVC5754E'] if curr_mdiskgrp == tgt_mdiskgrp: return self._errors['CMMVC6430E'] vol = curr_mdiskgrp[vdisk] tgt_mdiskgrp[vdisk] = vol del curr_mdiskgrp[vdisk] return ('', '') def _cmd_addvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if 'mdiskgrp' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) copy_info = {} copy_info['id'] = self._find_unused_id(vol['copies']) copy_info['status'] = 'online' copy_info['sync'] = 'no' copy_info['primary'] = 'no' copy_info['mdisk_grp_name'] = mdiskgrp copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp)) if 'easytier' in kwargs: if kwargs['easytier'] == 'on': copy_info['easy_tier'] = 'on' else: copy_info['easy_tier'] = 'off' if 'rsize' in kwargs: if 'compressed' in kwargs: copy_info['compressed_copy'] = 'yes' else: copy_info['compressed_copy'] = 'no' vol['copies'][copy_info['id']] = copy_info return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' % {'vid': vol['id'], 'cid': copy_info['id']}, '') def _cmd_lsvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync', 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'se_copy', 'easy_tier', 'easy_tier_status', 'compressed_copy']) for copy in vol['copies'].values(): rows.append([vol['id'], vol['name'], copy['id'], copy['status'], copy['sync'], copy['primary'], copy['mdisk_grp_id'], copy['mdisk_grp_name'], vol['capacity'], 'striped', 'yes', copy['easy_tier'], 'inactive', copy['compressed_copy']]) if 'copy' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: copy_id = kwargs['copy'].strip('\'\"') if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] copy = vol['copies'][copy_id] rows = [] rows.append(['vdisk_id', vol['id']]) rows.append(['vdisk_name', vol['name']]) rows.append(['capacity', vol['capacity']]) rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['sync', copy['sync']]) copy['sync'] = 'yes' rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['easy_tier', copy['easy_tier']]) rows.append(['easy_tier_status', 'inactive']) rows.append(['compressed_copy', copy['compressed_copy']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_rmvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if 'copy' not in kwargs: return self._errors['CMMVC5707E'] copy_id = kwargs['copy'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] del vol['copies'][copy_id] return ('', '') def _cmd_chvdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] kwargs.pop('obj') params = ['name', 'warning', 'udid', 'autoexpand', 'easytier', 'primary'] for key, value in kwargs.items(): if key == 'easytier': vol['easy_tier'] = value continue if key == 'warning': vol['warning'] = value.rstrip('%') continue if key == 'name': vol['name'] = value del self._volumes_list[vol_name] self._volumes_list[value] = vol if key == 'primary': if value == '0': self._volumes_list[vol_name]['copies']['0']['primary']\ = 'yes' self._volumes_list[vol_name]['copies']['1']['primary']\ = 'no' elif value == '1': self._volumes_list[vol_name]['copies']['0']['primary']\ = 'no' self._volumes_list[vol_name]['copies']['1']['primary']\ = 'yes' else: err = self._errors['CMMVC6353E'][1] % {'VALUE': key} return ('', err) if key in params: vol[key] = value else: err = self._errors['CMMVC5709E'][1] % {'VALUE': key} return ('', err) return ('', '') def _cmd_movevdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] if 'iogrp' not in kwargs: return self._errors['CMMVC5707E'] iogrp = kwargs['iogrp'] if iogrp.isdigit(): vol['IO_group_id'] = iogrp vol['IO_group_name'] = 'io_grp%s' % iogrp else: vol['IO_group_id'] = iogrp[6:] vol['IO_group_name'] = iogrp return ('', '') def _cmd_addvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') def _cmd_rmvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') # list vdisk sync process def _cmd_lsvdisksyncprogress(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] copy_id = kwargs.get('copy', None) vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress', 'estimated_completion_time']) copy_found = False for copy in vol['copies'].values(): if not copy_id or copy_id == copy['id']: copy_found = True row = [vol['id'], name, copy['id']] if copy['sync'] == 'yes': row.extend(['100', '']) else: row.extend(['50', '140210115226']) copy['sync'] = 'yes' rows.append(row) if not copy_found: return self._errors['CMMVC5804E'] return self._print_info_cmd(rows=rows, **kwargs) def _add_host_to_list(self, connector): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = connector['host'] host_info['iscsi_names'] = [] host_info['wwpns'] = [] if 'initiator' in connector: host_info['iscsi_names'].append(connector['initiator']) if 'wwpns' in connector: host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] self._hosts_list[connector['host']] = host_info def _host_in_list(self, host_name): for k in self._hosts_list: if k.startswith(host_name): return k return None # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except IndexError: return self._errors['CMMVC5707E'] command = kwargs['cmd'] del kwargs['cmd'] func = getattr(self, '_cmd_' + command) out, err = func(**kwargs) if (check_exit_code) and (len(err) != 0): raise processutils.ProcessExecutionError(exit_code=1, stdout=out, stderr=err, cmd=' '.join(cmd)) return (out, err) # After calling this function, the next call to the specified command will # result in in the error specified def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"): if copy == 'primary': self._volumes_list[vol_name]['copies']['0'][key] = value elif copy == 'secondary': self._volumes_list[vol_name]['copies']['1'][key] = value else: msg = _("The copy should be primary or secondary") raise exception.InvalidInput(reason=msg) class StorwizeSVCISCSIFakeDriver(storwize_svc_iscsi.StorwizeSVCISCSIDriver): def __init__(self, *args, **kwargs): super(StorwizeSVCISCSIFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class StorwizeSVCFcFakeDriver(storwize_svc_fc.StorwizeSVCFCDriver): def __init__(self, *args, **kwargs): super(StorwizeSVCFcFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class StorwizeSVCISCSIDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCISCSIDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self.iscsi_driver = StorwizeSVCISCSIFakeDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': ['openstack'], 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_multipath_enabled': False, 'storwize_svc_allow_tenant_qos': True} wwpns = [ six.text_type(random.randint(0, 9999999999999999)).zfill(16), six.text_type(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % six.text_type( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(['openstack']) self.iscsi_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() db_driver = self.iscsi_driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.iscsi_driver.db = self.db self.iscsi_driver.do_setup(None) self.iscsi_driver.check_for_setup_error() self.iscsi_driver._helpers.check_fcmapping_interval = 0 def _set_flag(self, flag, value): group = self.iscsi_driver.configuration.config_group self.iscsi_driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.iscsi_driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.iscsi_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.iscsi_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = _get_test_pool() rand_id = six.text_type(random.randint(10000, 99999)) if vol_name: return {'name': 'snap_volume%s' % rand_id, 'volume_name': vol_name, 'id': rand_id, 'volume_id': vol_id, 'volume_size': 10, 'mdisk_grp_name': pool} else: return {'name': 'test_volume%s' % rand_id, 'size': 10, 'id': rand_id, 'volume_type_id': None, 'mdisk_grp_name': pool, 'host': 'openstack@svc#%s' % pool} def _assert_vol_exists(self, name, exists): is_vol_defined = self.iscsi_driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_storwize_svc_iscsi_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_fc) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) def test_storwize_terminate_iscsi_connection(self): # create a iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) def test_storwize_svc_iscsi_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume2) # Create volume types that we created types = {} for protocol in ['iSCSI']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) expected = {'iSCSI': {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.ibm:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}}} volume1['volume_type_id'] = types[protocol]['id'] volume2['volume_type_id'] = types[protocol]['id'] # Check case where no hosts exist if self.USESIM: ret = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.delete_volume, volume1) ret = self.iscsi_driver.terminate_connection(volume1, self._connector) if self.USESIM: ret = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Check cases with no auth set for host if self.USESIM: for auth_enabled in [True, False]: for host_exists in ['yes-auth', 'yes-noauth', 'no']: self._set_flag('storwize_svc_iscsi_chap_enabled', auth_enabled) case = 'en' + six.text_type( auth_enabled) + 'ex' + six.text_type(host_exists) conn_na = {'initiator': 'test:init:%s' % random.randint(10000, 99999), 'ip': '11.11.11.11', 'host': 'host-%s' % case} if host_exists.startswith('yes'): self.sim._add_host_to_list(conn_na) if host_exists == 'yes-auth': kwargs = {'chapsecret': 'foo', 'obj': conn_na['host']} self.sim._cmd_chhost(**kwargs) volume1['volume_type_id'] = types['iSCSI']['id'] init_ret = self.iscsi_driver.initialize_connection(volume1, conn_na) host_name = self.sim._host_in_list(conn_na['host']) chap_ret = ( self.iscsi_driver._helpers.get_chap_secret_for_host( host_name)) if auth_enabled or host_exists == 'yes-auth': self.assertIn('auth_password', init_ret['data']) self.assertIsNotNone(chap_ret) else: self.assertNotIn('auth_password', init_ret['data']) self.assertIsNone(chap_ret) self.iscsi_driver.terminate_connection(volume1, conn_na) self._set_flag('storwize_svc_iscsi_chap_enabled', True) # Test no preferred node if self.USESIM: self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. if self.USESIM: self.sim.error_injection('lsvdisk', 'blank_pref_node') self.iscsi_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.iscsi_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(unmapped_vol) self.iscsi_driver.terminate_connection(unmapped_vol, self._connector) self.iscsi_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.iscsi_driver.terminate_connection(volume1, self._connector) self.iscsi_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.iscsi_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified (see bug #1244257) fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.iscsi_driver.initialize_connection(volume2, self._connector) host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) self.iscsi_driver.terminate_connection(volume2, fake_conn) host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(host_name) self.iscsi_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created for protocol in ['iSCSI']: volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) if self.USESIM: ret = ( self.iscsi_driver._helpers.get_host_from_connector( self._connector)) self.assertIsNone(ret) def test_storwize_svc_iscsi_multi_host_maps(self): # We can't test connecting to multiple hosts from a single host when # using real storage if not self.USESIM: return # Create a volume to be used in mappings ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume) # Create volume types for protocols types = {} for protocol in ['iSCSI']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) # Create a connector for the second 'host' wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16), six.text_type(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % six.text_type(random.randint(10000, 99999)) conn2 = {'ip': '1.234.56.79', 'host': 'storwize-svc-test2', 'wwpns': wwpns, 'initiator': initiator} # Check protocols for iSCSI volume['volume_type_id'] = types[protocol]['id'] # Make sure that the volume has been created self._assert_vol_exists(volume['name'], True) self.iscsi_driver.initialize_connection(volume, self._connector) self._set_flag('storwize_svc_multihostmap_enabled', False) self.assertRaises( exception.CinderException, self.iscsi_driver.initialize_connection, volume, conn2) self._set_flag('storwize_svc_multihostmap_enabled', True) self.iscsi_driver.initialize_connection(volume, conn2) self.iscsi_driver.terminate_connection(volume, conn2) self.iscsi_driver.terminate_connection(volume, self._connector) def test_add_vdisk_copy_iscsi(self): # Ensure only iSCSI is available self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) volume = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume) self.iscsi_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) class StorwizeSVCFcDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCFcDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self.fc_driver = StorwizeSVCFcFakeDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': SVC_POOLS, 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_multipath_enabled': False, 'storwize_svc_allow_tenant_qos': True} wwpns = [ six.text_type(random.randint(0, 9999999999999999)).zfill(16), six.text_type(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % six.text_type( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) self.fc_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() db_driver = self.fc_driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.fc_driver.db = self.db self.fc_driver.do_setup(None) self.fc_driver.check_for_setup_error() self.fc_driver._helpers.check_fcmapping_interval = 0 def _set_flag(self, flag, value): group = self.fc_driver.configuration.config_group self.fc_driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.fc_driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.fc_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.fc_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = _get_test_pool() rand_id = six.text_type(random.randint(10000, 99999)) if vol_name: return {'name': 'snap_volume%s' % rand_id, 'volume_name': vol_name, 'id': rand_id, 'volume_id': vol_id, 'volume_size': 10, 'mdisk_grp_name': pool} else: return {'name': 'test_volume%s' % rand_id, 'size': 10, 'id': '%s' % rand_id, 'volume_type_id': None, 'mdisk_grp_name': pool, 'host': 'openstack@svc#%s' % pool} def _assert_vol_exists(self, name, exists): is_vol_defined = self.fc_driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_storwize_get_host_with_fc_connection(self): # Create a FC host del self._connector['initiator'] helper = self.fc_driver._helpers host_name = helper.create_host(self._connector) # Remove the first wwpn from connector, and then try get host wwpns = self._connector['wwpns'] wwpns.remove(wwpns[0]) host_name = helper.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) def test_storwize_initiator_multiple_wwpns_connected(self): # Generate us a test volume volume = self._create_volume() # Fibre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) # Set up one WWPN that won't match and one that will. self.fc_driver._state['storage_nodes']['1']['WWPN'] = [ '123456789ABCDEF0', 'AABBCCDDEEFF0010'] wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_conn_fc_wwpns') as get_mappings: mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002', 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012'] get_mappings.return_value = mapped_wwpns # Initialize the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Make sure we return all wwpns which where mapped as part of the # connection self.assertEqual(mapped_wwpns, init_ret['data']['target_wwn']) def test_storwize_svc_fc_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} self.fc_driver._state['enabled_protocols'] = set(['FC']) self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) self.fc_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) def test_storwize_terminate_fc_connection(self): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) def test_storwize_initiator_target_map(self): # Generate us a test volume volume = self._create_volume() # FIbre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} # Initialise the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Check that the initiator_target_map is as expected init_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['AABBCCDDEEFF0011'], 'ff00000000000001': ['AABBCCDDEEFF0011']}, 'target_discovered': False, 'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'volume_id': volume['id'] } } self.assertEqual(init_data, init_ret) # Terminate connection term_ret = self.fc_driver.terminate_connection(volume, connector) # Check that the initiator_target_map is as expected term_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE'], 'ff00000000000001': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE']} } } self.assertItemsEqual(term_data, term_ret) def test_storwize_svc_fc_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume2) # Create volume types that we created types = {} for protocol in ['FC']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) expected = {'FC': {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'target_discovered': False}}} volume1['volume_type_id'] = types[protocol]['id'] volume2['volume_type_id'] = types[protocol]['id'] # Check case where no hosts exist if self.USESIM: ret = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.delete_volume, volume1) # Check bad output from lsfabric for the 2nd volume if protocol == 'FC' and self.USESIM: for error in ['remove_field', 'header_mismatch']: self.sim.error_injection('lsfabric', error) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume2, self._connector) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_conn_fc_wwpns') as conn_fc_wwpns: conn_fc_wwpns.return_value = [] ret = self.fc_driver.initialize_connection(volume2, self._connector) ret = self.fc_driver.terminate_connection(volume1, self._connector) if protocol == 'FC' and self.USESIM: # For the first volume detach, ret['data'] should be empty # only ret['driver_volume_type'] returned self.assertEqual({}, ret['data']) self.assertEqual('fibre_channel', ret['driver_volume_type']) ret = self.fc_driver.terminate_connection(volume2, self._connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) # wwpn is randomly created self.assertNotEqual({}, ret['data']) if self.USESIM: ret = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Test no preferred node if self.USESIM: self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. if self.USESIM: self.sim.error_injection('lsvdisk', 'blank_pref_node') self.fc_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.fc_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.fc_driver.create_volume(unmapped_vol) self.fc_driver.terminate_connection(unmapped_vol, self._connector) self.fc_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.fc_driver.terminate_connection(volume1, self._connector) self.fc_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.fc_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified (see bug #1244257) fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.fc_driver.initialize_connection(volume2, self._connector) host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) self.fc_driver.terminate_connection(volume2, fake_conn) host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(host_name) self.fc_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created for protocol in ['FC']: volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) if self.USESIM: ret = (self.fc_driver._helpers.get_host_from_connector( self._connector)) self.assertIsNone(ret) def test_storwize_svc_fc_multi_host_maps(self): # We can't test connecting to multiple hosts from a single host when # using real storage if not self.USESIM: return # Create a volume to be used in mappings ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) # Create volume types for protocols types = {} for protocol in ['FC']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) # Create a connector for the second 'host' wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16), six.text_type(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % six.text_type(random.randint(10000, 99999)) conn2 = {'ip': '1.234.56.79', 'host': 'storwize-svc-test2', 'wwpns': wwpns, 'initiator': initiator} # Check protocols for FC volume['volume_type_id'] = types[protocol]['id'] # Make sure that the volume has been created self._assert_vol_exists(volume['name'], True) self.fc_driver.initialize_connection(volume, self._connector) self._set_flag('storwize_svc_multihostmap_enabled', False) self.assertRaises( exception.CinderException, self.fc_driver.initialize_connection, volume, conn2) self._set_flag('storwize_svc_multihostmap_enabled', True) self.fc_driver.initialize_connection(volume, conn2) self.fc_driver.terminate_connection(volume, conn2) self.fc_driver.terminate_connection(volume, self._connector) def test_add_vdisk_copy_fc(self): # Ensure only FC is available self.fc_driver._state['enabled_protocols'] = set(['FC']) volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) self.fc_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) class StorwizeSVCCommonDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCCommonDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self.driver = StorwizeSVCISCSIFakeDriver( configuration=conf.Configuration(None)) self._driver = storwize_svc_iscsi.StorwizeSVCISCSIDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'storwize_san_secondary_ip': 'secondaryname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': SVC_POOLS, 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_allow_tenant_qos': True} wwpns = [ six.text_type(random.randint(0, 9999999999999999)).zfill(16), six.text_type(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % six.text_type( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) self.driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() db_driver = self.driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.driver.db = self.db self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver._helpers.check_fcmapping_interval = 0 def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _assert_vol_exists(self, name, exists): is_vol_defined = self.driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_storwize_svc_connectivity(self): # Make sure we detect if the pool doesn't exist no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) self._set_flag('storwize_svc_volpool_name', no_exist_pool) self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) self._reset_flags() # Check the case where the user didn't configure IP addresses # as well as receiving unexpected results from the storage if self.USESIM: self.sim.error_injection('lsnodecanister', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsnodecanister', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # Check with bad parameters self._set_flag('san_ip', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_password', None) self._set_flag('san_private_key', None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_grainsize', 42) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_compression', True) self._set_flag('storwize_svc_vol_rsize', -1) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_rsize', 2) self._set_flag('storwize_svc_vol_nofmtdisk', True) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_iogrp', 5) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() if self.USESIM: self.sim.error_injection('lslicense', 'no_compression') self.sim.error_injection('lsguicapabilities', 'no_compression') self._set_flag('storwize_svc_vol_compression', True) self.driver.do_setup(None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() # Finally, check with good parameters self.driver.do_setup(None) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_san_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_once_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.storwize_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(random, 'randint', mock.Mock(return_value=0)) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_fail_to_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.storwize_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_secondary_ip_ssh_fail_to_san_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [ paramiko.SSHException, mock.MagicMock( ip = self._driver.configuration.storwize_san_secondary_ip), mock.MagicMock()] mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) def _generate_vol_info(self, vol_name, vol_id): pool = _get_test_pool() rand_id = six.text_type(random.randint(10000, 99999)) if vol_name: return {'name': 'snap_volume%s' % rand_id, 'volume_name': vol_name, 'id': rand_id, 'volume_id': vol_id, 'volume_size': 10, 'mdisk_grp_name': pool} else: return {'name': 'test_volume%s' % rand_id, 'size': 10, 'id': '%s' % rand_id, 'volume_type_id': None, 'mdisk_grp_name': pool, 'host': 'openstack@svc#%s' % pool} def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.driver.create_volume(vol) return vol def _delete_volume(self, volume): self.driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _create_consistencygroup_in_db(self, **kwargs): cg = testutils.create_consistencygroup(self.ctxt, **kwargs) return cg def _create_consistencegroup(self, **kwargs): cg = self._create_consistencygroup_in_db(**kwargs) model_update = self.driver.create_consistencygroup(self.ctxt, cg) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "CG created failed") return cg def _create_cgsnapshot_in_db(self, cg_id, **kwargs): cg_snapshot = testutils.create_cgsnapshot(self.ctxt, consistencygroup_id= cg_id, **kwargs) cg_id = cg_snapshot['consistencygroup_id'] volumes = self.db.volume_get_all_by_group(self.ctxt.elevated(), cg_id) if not volumes: msg = _("Consistency group is empty. No cgsnapshot " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for volume in volumes: testutils.create_snapshot(self.ctxt, volume['id'], cg_snapshot.id, cg_snapshot.name, cg_snapshot.id, "creating") return cg_snapshot def _create_cgsnapshot(self, cg_id, **kwargs): cg_snapshot = self._create_cgsnapshot_in_db(cg_id, **kwargs) model_update, snapshots = ( self.driver.create_cgsnapshot(self.ctxt, cg_snapshot, [])) self.assertEqual('available', model_update['status'], "CGSnapshot created failed") for snapshot in snapshots: self.assertEqual('available', snapshot['status']) snapshots = ( self.db.snapshot_get_all_for_cgsnapshot(self.ctxt.elevated(), cg_snapshot['id'])) return cg_snapshot, snapshots def _create_test_vol(self, opts): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', opts) volume = self._generate_vol_info(None, None) type_id = type_ref['id'] type_ref = volume_types.get_volume_type(ctxt, type_id) volume['volume_type_id'] = type_id volume['volume_type'] = type_ref self.driver.create_volume(volume) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) return attrs def _get_default_opts(self): opt = {'rsize': 2, 'warning': 0, 'autoexpand': True, 'grainsize': 256, 'compression': False, 'easytier': True, 'iogrp': 0, 'qos': None, 'replication': False, 'stretched_cluster': None, 'nofmtdisk': False} return opt @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_vdisk_params') def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params, add_vdisk_qos): vol = testutils.create_volume(self.ctxt) fake_opts = self._get_default_opts() # If the qos is empty, chvdisk should not be called # for create_volume. get_vdisk_params.return_value = fake_opts self.driver.create_volume(vol) self._assert_vol_exists(vol['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol) # If the qos is not empty, chvdisk should be called # for create_volume. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_volume(vol) self._assert_vol_exists(vol['name'], True) add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos']) self.driver.delete_volume(vol) self._assert_vol_exists(vol['name'], False) def test_storwize_svc_snapshots(self): vol1 = self._create_volume() snap1 = self._generate_vol_info(vol1['name'], vol1['id']) # Test timeout and volume cleanup self._set_flag('storwize_svc_flashcopy_timeout', 1) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self._reset_flags() # Test prestartfcmap failing with mock.patch.object( storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self.sim.error_injection('prestartfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) # Test successful snapshot self.driver.create_snapshot(snap1) self._assert_vol_exists(snap1['name'], True) # Try to create a snapshot from an non-existing volume - should fail snap_novol = self._generate_vol_info('undefined-vol', '12345') self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap_novol) # We support deleting a volume that has snapshots, so delete the volume # first self.driver.delete_volume(vol1) self.driver.delete_snapshot(snap1) def test_storwize_svc_create_volfromsnap_clone(self): vol1 = self._create_volume() snap1 = self._generate_vol_info(vol1['name'], vol1['id']) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info(None, None) vol3 = testutils.create_volume(self.ctxt) vol4 = testutils.create_volume(self.ctxt) # Try to create a volume from a non-existing snapshot snap_novol = self._generate_vol_info('undefined-vol', '12345') vol_novol = self._generate_vol_info(None, None) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, vol_novol, snap_novol) # Fail the snapshot with mock.patch.object( storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) # Try to create where source size != target size vol2['size'] += 1 self.assertRaises(exception.InvalidInput, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) vol2['size'] -= 1 # Succeed if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) # Try to clone where source size > target size vol2['size'] = vol3['size'] + 1 self.assertRaises(exception.InvalidInput, self.driver.create_cloned_volume, vol3, vol2) self._assert_vol_exists(vol3['name'], False) # Try to clone where source size = target size vol2['size'] = vol3['size'] if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol3, vol2) if self.USESIM: # validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol2['name']: self.assertEqual('49', fcmap['copyrate']) self._assert_vol_exists(vol3['name'], True) # Try to clone where source size < target size vol4['size'] = vol2['size'] + 1 if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol4, vol2) if self.USESIM: # Validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol2['name']: self.assertEqual('49', fcmap['copyrate']) self._assert_vol_exists(vol4['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol4) self._assert_vol_exists(vol4['name'], False) self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') def test_storwize_svc_create_volfromsnap_clone_with_qos(self, add_vdisk_qos): vol1 = self._create_volume() snap1 = self._generate_vol_info(vol1['name'], vol1['id']) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info(None, None) vol3 = self._generate_vol_info(None, None) fake_opts = self._get_default_opts() # Succeed if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_volume_from_snapshot. with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol2) # If the qos is not empty, chvdisk should be called # for create_volume_from_snapshot. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) add_vdisk_qos.assert_called_once_with(vol2['name'], fake_opts['qos']) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_volume_from_snapshot. add_vdisk_qos.reset_mock() fake_opts['qos'] = None get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol3) # If the qos is not empty, chvdisk should be called # for create_volume_from_snapshot. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) add_vdisk_qos.assert_called_once_with(vol3['name'], fake_opts['qos']) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_storwize_svc_volumes(self): # Create a first volume volume = self._generate_vol_info(None, None) self.driver.create_volume(volume) self.driver.ensure_export(None, volume) # Do nothing self.driver.create_export(None, volume, {}) self.driver.remove_export(None, volume) # Make sure volume attributes are as they should be attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) attr_size = float(attributes['capacity']) / units.Gi # bytes to GB self.assertEqual(attr_size, float(volume['size'])) pool = _get_test_pool() self.assertEqual(attributes['mdisk_grp_name'], pool) # Try to create the volume again (should fail) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Try to delete a volume that doesn't exist (should not fail) vol_no_exist = {'name': 'i_dont_exist', 'id': '111111'} self.driver.delete_volume(vol_no_exist) # Ensure export for volume that doesn't exist (should not fail) self.driver.ensure_export(None, vol_no_exist) # Delete the volume self.driver.delete_volume(volume) def test_storwize_svc_volume_params(self): # Option test matrix # Option Value Covered by test # # rsize -1 1 # rsize 2 2,3 # warning 0 2 # warning 80 3 # autoexpand True 2 # autoexpand False 3 # grainsize 32 2 # grainsize 256 3 # compression True 4 # compression False 2,3 # easytier True 1,3 # easytier False 2 # iogrp 0 1 # iogrp 1 2 # nofmtdisk False 1 # nofmtdisk True 1 opts_list = [] chck_list = [] opts_list.append({'rsize': -1, 'easytier': True, 'iogrp': 0}) chck_list.append({'free_capacity': '0', 'easy_tier': 'on', 'IO_group_id': '0'}) opts_list.append({'rsize': -1, 'nofmtdisk': False}) chck_list.append({'formatted': 'yes'}) opts_list.append({'rsize': -1, 'nofmtdisk': True}) chck_list.append({'formatted': 'no'}) test_iogrp = 1 if self.USESIM else 0 opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, 'autoexpand': True, 'grainsize': 32, 'easytier': False, 'iogrp': test_iogrp}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '0', 'autoexpand': 'on', 'grainsize': '32', 'easy_tier': 'off', 'IO_group_id': six.text_type(test_iogrp)}) opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, 'autoexpand': False, 'grainsize': 256, 'easytier': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '80', 'autoexpand': 'off', 'grainsize': '256', 'easy_tier': 'on'}) opts_list.append({'rsize': 2, 'compression': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'yes'}) for idx in range(len(opts_list)): attrs = self._create_test_vol(opts_list[idx]) for k, v in chck_list[idx].items(): try: if k[0] == '-': k = k[1:] self.assertNotEqual(v, attrs[k]) else: self.assertEqual(v, attrs[k]) except processutils.ProcessExecutionError as e: if 'CMMVC7050E' not in e.stderr: raise def test_storwize_svc_unicode_host_and_volume_names(self): # We'll check with iSCSI only - nothing protocol-dependent here self.driver.do_setup(None) rand_id = random.randint(10000, 99999) pool = _get_test_pool() volume1 = {'name': u'unicode1_volume%s' % rand_id, 'size': 2, 'id': 1, 'volume_type_id': None, 'host': 'openstack@svc#%s' % pool} self.driver.create_volume(volume1) self._assert_vol_exists(volume1['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver._helpers.create_host, {'host': 12345}) # Add a host first to make life interesting (this host and # conn['host'] should be translated to the same prefix, and the # initiator should differentiate tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, 'ip': '10.10.10.10', 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} self.driver._helpers.create_host(tmpconn1) # Add a host with a different prefix tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, 'ip': '10.10.10.11', 'host': u'unicode.hello.world-%s' % rand_id} self.driver._helpers.create_host(tmpconn2) conn = {'initiator': u'unicode:initiator3.%s' % rand_id, 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz-%s' % rand_id} self.driver.initialize_connection(volume1, conn) host_name = self.driver._helpers.get_host_from_connector(conn) self.assertIsNotNone(host_name) self.driver.terminate_connection(volume1, conn) host_name = self.driver._helpers.get_host_from_connector(conn) self.assertIsNone(host_name) self.driver.delete_volume(volume1) # Clean up temporary hosts for tmpconn in [tmpconn1, tmpconn2]: host_name = self.driver._helpers.get_host_from_connector(tmpconn) self.assertIsNotNone(host_name) self.driver._helpers.delete_host(host_name) def test_storwize_svc_delete_volume_snapshots(self): # Create a volume with two snapshots master = self._create_volume() # Fail creating a snapshot - will force delete the snapshot if self.USESIM and False: snap = self._generate_vol_info(master['name'], master['id']) self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap) self._assert_vol_exists(snap['name'], False) # Delete a snapshot snap = self._generate_vol_info(master['name'], master['id']) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Delete a volume with snapshots (regular) snap = self._generate_vol_info(master['name'], master['id']) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_volume(master) self._assert_vol_exists(master['name'], False) # Fail create volume from snapshot - will force delete the volume if self.USESIM: volfs = self._generate_vol_info(None, None) self.sim.error_injection('startfcmap', 'bad_id') self.sim.error_injection('lsfcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volfs, snap) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete it volfs = self._generate_vol_info(None, None) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self._assert_vol_exists(volfs['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete the snapshot volfs = self._generate_vol_info(None, None) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Fail create clone - will force delete the target volume if self.USESIM: clone = self._generate_vol_info(None, None) self.sim.error_injection('startfcmap', 'bad_id') self.sim.error_injection('lsfcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, clone, volfs) self._assert_vol_exists(clone['name'], False) # Create the clone, delete the source and target clone = self._generate_vol_info(None, None) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(clone, volfs) self._assert_vol_exists(clone['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) self.driver.delete_volume(clone) self._assert_vol_exists(clone['name'], False) def test_storwize_svc_get_volume_stats(self): self._set_flag('reserved_percentage', 25) stats = self.driver.get_volume_stats() for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['storwize_svc_volpool_name']) self.assertLessEqual(each_pool['free_capacity_gb'], each_pool['total_capacity_gb']) self.assertLessEqual(each_pool['allocated_capacity_gb'], each_pool['total_capacity_gb']) self.assertEqual(25, each_pool['reserved_percentage']) if self.USESIM: expected = 'storwize-svc-sim' self.assertEqual(expected, stats['volume_backend_name']) for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['storwize_svc_volpool_name']) self.assertAlmostEqual(3328.0, each_pool['total_capacity_gb']) self.assertAlmostEqual(3287.5, each_pool['free_capacity_gb']) self.assertAlmostEqual(25.0, each_pool['allocated_capacity_gb']) def test_get_pool(self): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', None) volume = self._generate_vol_info(None, None) type_id = type_ref['id'] type_ref = volume_types.get_volume_type(ctxt, type_id) volume['volume_type_id'] = type_id volume['volume_type'] = type_ref self.driver.create_volume(volume) self.assertEqual(volume['mdisk_grp_name'], self.driver.get_pool(volume)) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) def test_storwize_svc_extend_volume(self): volume = self._create_volume() self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) snap = self._generate_vol_info(volume['name'], volume['id']) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, '16') self.driver.delete_snapshot(snap) self.driver.delete_volume(volume) @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, 'create_relationship') @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, 'extend_target_volume') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_storwize_svc_extend_volume_replication(self, get_relationship, delete_relationship, extend_target_volume, create_relationship): fake_target = mock.Mock() rep_type = 'global' self.driver.replications[rep_type] = ( self.driver.replication_factory(rep_type, fake_target)) volume = self._create_volume() volume['replication_status'] = 'enabled' fake_target_vol = 'vol-target-id' get_relationship.return_value = {'aux_vdisk_name': fake_target_vol} with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'global' self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) delete_relationship.assert_called_once_with(volume) extend_target_volume.assert_called_once_with(fake_target_vol, 12) create_relationship.assert_called_once_with(volume, fake_target_vol) self.driver.delete_volume(volume) def test_storwize_svc_extend_volume_replication_failover(self): volume = self._create_volume() volume['replication_status'] = 'failed-over' with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'global' self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(volume) def _check_loc_info(self, capabilities, expected): host = {'host': 'foo', 'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1} ctxt = context.get_admin_context() moved, model_update = self.driver.migrate_volume(ctxt, vol, host) self.assertEqual(expected['moved'], moved) self.assertEqual(expected['model_update'], model_update) def test_storwize_svc_migrate_bad_loc_info(self): self._check_loc_info({}, {'moved': False, 'model_update': None}) cap = {'location_info': 'foo'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'FooDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'StorwizeSVCDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) def test_storwize_svc_volume_migrate(self): # Make sure we don't call migrate_volume_vdiskcopy self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '256'} host = {'host': 'openstack@svc#openstack2', 'capabilities': cap} ctxt = context.get_admin_context() volume = self._create_volume() volume['volume_type_id'] = None self.driver.migrate_volume(ctxt, volume, host) self._delete_volume(volume) def test_storwize_svc_get_vdisk_params(self): self.driver.do_setup(None) fake_qos = {'qos:IOThrottling': 5000} expected_qos = {'IOThrottling': 5000} fake_opts = self._get_default_opts() # The parameters retured should be the same to the default options, # if the QoS is empty. vol_type_empty_qos = self._create_volume_type_qos(True, None) type_id = vol_type_empty_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_empty_qos, volume_metadata=None) self.assertEqual(fake_opts, params) volume_types.destroy(self.ctxt, type_id) # If the QoS is set via the qos association with the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(False, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, it should # work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) # If the QoS is set via the extra specs in the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set in the volume metadata, # qos value should be set in the retured parameters. metadata = [{'key': 'qos:IOThrottling', 'value': 4000}] expected_qos_metadata = {'IOThrottling': 4000} params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos_metadata, params['qos']) # If the QoS is set both in the metadata and the volume type, the one # in the volume type will take effect. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos, params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set both via the qos association and the # extra specs, the one from the qos association will take effect. fake_qos_associate = {'qos:IOThrottling': 6000} expected_qos_associate = {'IOThrottling': 6000} vol_type_qos = self._create_volume_type_qos_both(fake_qos, fake_qos_associate) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos_associate, params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'disable_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'update_vdisk_qos') def test_storwize_svc_retype_no_copy(self, update_vdisk_qos, disable_vdisk_qos): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed') self.assertEqual('5', attrs['warning'], 'Volume retype failed') self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': 4000} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': 5000} self.driver.create_volume(volume) with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # add_vdisk_qos and disable_vdisk_qos will not be called for # retype. get_vdisk_params.side_effect = [fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, add_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_storwize_svc_retype_only_change_iogrp(self): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'iogrp': 0} key_specs_new = {'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'disable_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'update_vdisk_qos') def test_storwize_svc_retype_need_copy(self, update_vdisk_qos, disable_vdisk_qos): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'compression': True, 'iogrp': 0} key_specs_new = {'compression': False, 'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('no', attrs['compressed_copy']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': 4000} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': 5000} self.driver.create_volume(volume) with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # add_vdisk_qos and disable_vdisk_qos will not be called for # retype. get_vdisk_params.side_effect = [fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, add_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_set_storage_code_level_success(self): res = self.driver._helpers.get_system_info() if self.USESIM: self.assertEqual((7, 2, 0, 0), res['code_level'], 'Get code level error') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') def test_storwize_update_migrated_volume(self, rename_vdisk): ctxt = testutils.get_test_admin_context() backend_volume = self._create_volume() volume = self._create_volume() model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') rename_vdisk.assert_called_once_with(backend_volume.name, volume.name) self.assertEqual({'_name_id': None}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') self.assertEqual({'_name_id': backend_volume.id}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'attached') self.assertEqual({'_name_id': backend_volume.id}, model_update) def test_storwize_vdisk_copy_ops(self): ctxt = testutils.get_test_admin_context() volume = self._create_volume() driver = self.driver dest_pool = volume_utils.extract_host(volume['host'], 'pool') new_ops = driver._helpers.add_vdisk_copy(volume['name'], dest_pool, None, self.driver._state, self.driver.configuration) self.driver._add_vdisk_copy_op(ctxt, volume, new_ops) admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) self.assertEqual(":".join(x for x in new_ops), admin_metadata['vdiskcopyops'], 'Storwize driver add vdisk copy error.') self.driver._check_volume_copy_ops() self.driver._rm_vdisk_copy_op(ctxt, volume, new_ops[0], new_ops[1]) admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) self.assertIsNone(admin_metadata.get('vdiskcopyops', None), 'Storwize driver delete vdisk copy error') self._delete_volume(volume) def test_storwize_delete_with_vdisk_copy_ops(self): volume = self._create_volume() self.driver._vdiskcopyops = {volume['id']: [('0', '1')]} with mock.patch.object(self.driver, '_vdiskcopyops_loop'): self.assertIn(volume['id'], self.driver._vdiskcopyops) self.driver.delete_volume(volume) self.assertNotIn(volume['id'], self.driver._vdiskcopyops) def test_storwize_create_volume_with_replication_disable(self): volume = self._generate_vol_info(None, None) model_update = self.driver.create_volume(volume) self.assertIsNone(model_update) model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertIsNone(model_update) def test_storwize_create_volume_with_strech_cluster_replication(self): # Set replication flag, set pool openstack2 for secondary volume. self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2') # Create a type for repliation. volume = self._generate_vol_info(None, None) volume_type = self._create_replication_volume_type(True) volume['volume_type_id'] = volume_type['id'] self.driver.do_setup(self.ctxt) model_update = self.driver.create_volume(volume) self.assertEqual('copying', model_update['replication_status']) volume['replication_status'] = 'copying' volume['replication_extended_status'] = None model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('copying', model_update['replication_status']) # Primary copy offline, secondary copy online, data consistent self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'offline') model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('active-stop', model_update['replication_status']) # Primary copy offline, secondary copy online, data inconsistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'No', copy="secondary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('error', model_update['replication_status']) # Primary copy online, secondary copy offline, data consistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'yes', copy="secondary") self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'offline', copy="secondary") self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'online') model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('error', model_update['replication_status']) # Primary copy online, secondary copy offline, data inconsistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'no', copy="secondary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('error', model_update['replication_status']) # Primary copy offline, secondary copy offline, data consistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'yes', copy="secondary") self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'offline', copy="primary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('error', model_update['replication_status']) # Primary copy offline, secondary copy offline, data inconsistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'no', copy="secondary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('error', model_update['replication_status']) # Primary copy online, secondary copy online, data inconsistent self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'online', copy="secondary") self.sim.change_vdiskcopy_attr(volume['name'], 'status', 'online', copy="primary") self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'no', copy="secondary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('copying', model_update['replication_status']) # Primary copy online, secondary copy online, data consistent self.sim.change_vdiskcopy_attr(volume['name'], 'sync', 'yes', copy="secondary") model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertEqual('active', model_update['replication_status']) # Check the volume copy created on pool opentack2. attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertIn('openstack2', attrs['mdisk_grp_name']) primary_status = attrs['primary'] self.driver.promote_replica(self.ctxt, volume) # After promote_replica, primary copy should be swiched. attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual(primary_status[0], attrs['primary'][1]) self.assertEqual(primary_status[1], attrs['primary'][0]) self.driver.delete_volume(volume) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertIsNone(attrs) def test_storwize_create_cloned_volume_with_strech_cluster_replica(self): # Set replication flag, set pool openstack2 for secondary volume. self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2') self.driver.do_setup(self.ctxt) # Create a source volume. src_volume = self._generate_vol_info(None, None) self.driver.create_volume(src_volume) # Create a type for repliation. volume = self._generate_vol_info(None, None) volume_type = self._create_replication_volume_type(True) volume['volume_type_id'] = volume_type['id'] # Create a cloned volume from source volume. model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual('copying', model_update['replication_status']) # Check the replication volume created on pool openstack2. attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertIn('openstack2', attrs['mdisk_grp_name']) def test_storwize_create_snapshot_volume_with_strech_cluster_replica(self): # Set replication flag, set pool openstack2 for secondary volume. self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2') self.driver.do_setup(self.ctxt) vol1 = self._create_volume() snap = self._generate_vol_info(vol1['name'], vol1['id']) self.driver.create_snapshot(snap) vol2 = self._generate_vol_info(None, None) # Create a type for repliation. vol2 = self._generate_vol_info(None, None) volume_type = self._create_replication_volume_type(True) vol2['volume_type_id'] = volume_type['id'] model_update = self.driver.create_volume_from_snapshot(vol2, snap) self._assert_vol_exists(vol2['name'], True) self.assertEqual('copying', model_update['replication_status']) # Check the replication volume created on pool openstack2. attrs = self.driver._helpers.get_vdisk_attributes(vol2['name']) self.assertIn('openstack2', attrs['mdisk_grp_name']) def test_storwize_retype_with_strech_cluster_replication(self): self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2') self.driver.do_setup(self.ctxt) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() disable_type = self._create_replication_volume_type(False) enable_type = self._create_replication_volume_type(True) diff, _equal = volume_types.volume_types_diff(ctxt, disable_type['id'], enable_type['id']) volume = self._generate_vol_info(None, None) volume['host'] = host['host'] volume['volume_type_id'] = disable_type['id'] volume['volume_type'] = disable_type volume['replication_status'] = None volume['replication_extended_status'] = None # Create volume which is not volume replication self.driver.create_volume(volume) # volume should be DB object in this parameter model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertIs('error', model_update['replication_status']) # Enable replica self.driver.retype(ctxt, volume, enable_type, diff, host) model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertIs('copying', model_update['replication_status']) self.driver.delete_volume(volume) def test_storwize_retype_from_none_to_strech_cluster_replication(self): self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2') self.driver.do_setup(self.ctxt) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) volume['volume_type_id'] = None volume['volume_type'] = None volume['replication_status'] = "disabled" volume['replication_extended_status'] = None volume['host'] = host['host'] # Create volume which is not volume replication model_update = self.driver.create_volume(volume) self.assertIsNone(model_update) # volume should be DB object in this parameter model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertIsNone(model_update) enable_type = self._create_replication_volume_type(True) diff, _equal = volume_types.volume_types_diff(ctxt, None, enable_type['id']) # Enable replica self.driver.retype(ctxt, volume, enable_type, diff, host) # In DB replication_status will be updated volume['replication_status'] = None model_update = self.driver.get_replication_status(self.ctxt, volume) self.assertIs('copying', model_update['replication_status']) self.driver.delete_volume(volume) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_storwize_consistency_group_snapshot(self): cg_type = self._create_consistency_group_volume_type() self.ctxt.user_id = fake.user_id self.ctxt.project_id = fake.project_id cg = self._create_consistencygroup_in_db(volume_type_id=cg_type['id']) model_update = self.driver.create_consistencygroup(self.ctxt, cg) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "CG created failed") # Add volumes to CG self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=cg['id']) self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=cg['id']) self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=cg['id']) cg_snapshot = self._create_cgsnapshot_in_db(cg['id']) model_update = self.driver.create_cgsnapshot(self.ctxt, cg_snapshot, []) self.assertEqual('available', model_update[0]['status'], "CGSnapshot created failed") for snapshot in model_update[1]: self.assertEqual('available', snapshot['status']) model_update = self.driver.delete_consistencygroup(self.ctxt, cg, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_storwize_consistency_group_from_src_invalid(self): # Invalid input case for create cg from src cg_type = self._create_consistency_group_volume_type() self.ctxt.user_id = fake.user_id self.ctxt.project_id = fake.project_id # create cg in db cg = self._create_consistencygroup_in_db(volume_type_id=cg_type['id']) # create volumes in db vol1 = testutils.create_volume(self.ctxt, volume_type_id=cg_type['id'], consistencygroup_id=cg['id']) vol2 = testutils.create_volume(self.ctxt, volume_type_id=cg_type['id'], consistencygroup_id=cg['id']) volumes = [vol1, vol2] source_cg = self._create_consistencegroup(volume_type_id=cg_type['id']) # Add volumes to source CG src_vol1 = self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=source_cg['id']) src_vol2 = self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=source_cg['id']) source_vols = [src_vol1, src_vol2] cgsnapshot, snapshots = self._create_cgsnapshot(source_cg['id']) # Create cg from src with null input self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, self.ctxt, cg, volumes, None, None, None, None) # Create cg from src with source_cg and empty source_vols self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, self.ctxt, cg, volumes, None, None, source_cg, None) # Create cg from src with source_vols and empty source_cg self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, self.ctxt, cg, volumes, None, None, None, source_vols) # Create cg from src with cgsnapshot and empty snapshots self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, self.ctxt, cg, volumes, cgsnapshot, None, None, None) # Create cg from src with snapshots and empty cgsnapshot self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, self.ctxt, cg, volumes, None, snapshots, None, None) model_update = self.driver.delete_consistencygroup(self.ctxt, cg, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) model_update = ( self.driver.delete_consistencygroup(self.ctxt, source_cg, [])) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) model_update = ( self.driver.delete_consistencygroup(self.ctxt, cgsnapshot, [])) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_storwize_consistency_group_from_src(self): # Valid case for create cg from src cg_type = self._create_consistency_group_volume_type() self.ctxt.user_id = fake.user_id self.ctxt.project_id = fake.project_id pool = _get_test_pool() # Create cg in db cg = self._create_consistencygroup_in_db(volume_type_id=cg_type['id']) # Create volumes in db testutils.create_volume(self.ctxt, volume_type_id=cg_type['id'], consistencygroup_id=cg['id'], host='openstack@svc#%s' % pool) testutils.create_volume(self.ctxt, volume_type_id=cg_type['id'], consistencygroup_id=cg['id'], host='openstack@svc#%s' % pool) volumes = ( self.db.volume_get_all_by_group(self.ctxt.elevated(), cg['id'])) # Create source CG source_cg = self._create_consistencegroup(volume_type_id=cg_type['id']) # Add volumes to source CG self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=source_cg['id']) self._create_volume(volume_type_id=cg_type['id'], consistencygroup_id=source_cg['id']) source_vols = self.db.volume_get_all_by_group( self.ctxt.elevated(), source_cg['id']) # Create cgsnapshot cgsnapshot, snapshots = self._create_cgsnapshot(source_cg['id']) # Create cg from source cg model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src(self.ctxt, cg, volumes, None, None, source_cg, source_vols)) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) model_update = self.driver.delete_consistencygroup(self.ctxt, cg, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) # Create cg from cg snapshot model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src(self.ctxt, cg, volumes, cgsnapshot, snapshots, None, None)) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes: self.assertEqual('available', each_vol['status']) model_update = self.driver.delete_consistencygroup(self.ctxt, cg, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) model_update = self.driver.delete_consistencygroup(self.ctxt, cgsnapshot, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) model_update = self.driver.delete_consistencygroup(self.ctxt, source_cg, []) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) def _create_volume_type_qos(self, extra_specs, fake_qos): # Generate a QoS volume type for volume. if extra_specs: spec = fake_qos type_ref = volume_types.create(self.ctxt, "qos_extra_specs", spec) else: type_ref = volume_types.create(self.ctxt, "qos_associate", None) if fake_qos: qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_volume_type_qos_both(self, fake_qos, fake_qos_associate): type_ref = volume_types.create(self.ctxt, "qos_extra_specs", fake_qos) qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos_associate) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_replication_volume_type(self, enable): # Generate a volume type for volume repliation. if enable: spec = {'capabilities:replication': ' True'} type_ref = volume_types.create(self.ctxt, "replication_1", spec) else: spec = {'capabilities:replication': ' False'} type_ref = volume_types.create(self.ctxt, "replication_2", spec) replication_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return replication_type def _create_consistency_group_volume_type(self): # Generate a volume type for volume consistencygroup. spec = {'capabilities:consistencygroup_support': ' True'} type_ref = volume_types.create(self.ctxt, "cg", spec) cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return cg_type def _get_vdisk_uid(self, vdisk_name): """Return vdisk_UID for given vdisk. Given a vdisk by name, performs an lvdisk command that extracts the vdisk_UID parameter and returns it. Returns None if the specified vdisk does not exist. """ vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, delim='!') # Iterate through each row until we find the vdisk_UID entry for row in vdisk_properties.split('\n'): words = row.split('!') if words[0] == 'vdisk_UID': return words[1] return None def _create_volume_and_return_uid(self, volume_name): """Creates a volume and returns its UID. Creates a volume with the specified name, and returns the UID that the Storwize controller allocated for it. We do this by executing a create_volume and then calling into the simulator to perform an lsvdisk directly. """ volume = self._generate_vol_info(None, None) self.driver.create_volume(volume) return (volume, self._get_vdisk_uid(volume['name'])) def test_manage_existing_get_size_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the Storwize driver doesn't understand. We expect an exception to be raised. """ volume = self._generate_vol_info(None, None) ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_get_size_bad_uid(self): """Error when the specified UUID does not exist.""" volume = self._generate_vol_info(None, None) ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) pass def test_manage_existing_get_size_bad_name(self): """Error when the specified name does not exist.""" volume = self._generate_vol_info(None, None) ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the Storwize driver doesn't understand. We expect an exception to be raised. """ # Error when neither UUID nor name are specified. volume = self._generate_vol_info(None, None) ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified UUID does not exist. volume = self._generate_vol_info(None, None) ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified name does not exist. volume = self._generate_vol_info(None, None) ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_copy_attrs') def test_manage_existing_mismatch(self, get_vdisk_copy_attrs): ctxt = testutils.get_test_admin_context() _volume, uid = self._create_volume_and_return_uid('manage_test') opts = {'rsize': -1} type_thick_ref = volume_types.create(ctxt, 'testtype1', opts) opts = {'rsize': 2} type_thin_ref = volume_types.create(ctxt, 'testtype2', opts) opts = {'rsize': 2, 'compression': True} type_comp_ref = volume_types.create(ctxt, 'testtype3', opts) opts = {'rsize': -1, 'iogrp': 1} type_iogrp_ref = volume_types.create(ctxt, 'testtype4', opts) new_volume = self._generate_vol_info(None, None) ref = {'source-name': _volume['name']} fake_copy_thin = self._get_default_opts() fake_copy_thin['autoexpand'] = 'on' fake_copy_comp = self._get_default_opts() fake_copy_comp['autoexpand'] = 'on' fake_copy_comp['compressed_copy'] = 'yes' fake_copy_thick = self._get_default_opts() fake_copy_thick['autoexpand'] = '' fake_copy_thick['compressed_copy'] = 'no' fake_copy_no_comp = self._get_default_opts() fake_copy_no_comp['compressed_copy'] = 'no' valid_iogrp = self.driver._state['available_iogrps'] self.driver._state['available_iogrps'] = [9999] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver._state['available_iogrps'] = valid_iogrp get_vdisk_copy_attrs.side_effect = [fake_copy_thin, fake_copy_thick, fake_copy_no_comp, fake_copy_comp, fake_copy_thick, fake_copy_thick ] new_volume['volume_type_id'] = type_thick_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_comp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_iogrp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thick_ref['id'] no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) new_volume['host'] = 'openstack@svc#%s' % no_exist_pool self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self._reset_flags() volume_types.destroy(ctxt, type_thick_ref['id']) volume_types.destroy(ctxt, type_comp_ref['id']) volume_types.destroy(ctxt, type_iogrp_ref['id']) def test_manage_existing_good_uid_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by UID, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it. ref = {'source-id': uid} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by name, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it. ref = {'source-name': _volume['name']} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_mapped(self): """Tests managing a mapped volume with no override. This test case attempts to manage an existing volume by UID, but the volume is mapped to a host, so we expect to see an exception raised. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. volume = self._generate_vol_info(None, None) ref = {'source-id': uid} # Attempt to manage this disk, and except an exception beause the # volume is already mapped. self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) ref = {'source-name': volume['name']} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_good_uid_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by UID, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-id': uid, 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by name, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-name': volume['name'], 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) class CLIResponseTestCase(test.TestCase): def test_empty(self): self.assertEqual(0, len( storwize_svc_common.CLIResponse(''))) self.assertEqual(0, len( storwize_svc_common.CLIResponse(('', 'stderr')))) def test_header(self): raw = r'''id!name 1!node1 2!node2 ''' resp = storwize_svc_common.CLIResponse(raw, with_header=True) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('2', resp[1]['id']) def test_select(self): raw = r'''id!123 name!Bill name!Bill2 age!30 home address!s1 home address!s2 id! 7 name!John name!John2 age!40 home address!s3 home address!s4 ''' resp = storwize_svc_common.CLIResponse(raw, with_header=False) self.assertEqual([('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), ('s3', 'John', 's3'), ('s4', 'John2', 's4')], list(resp.select('home address', 'name', 'home address'))) def test_lsnode_all(self): raw = r'''id!name!UPS_serial_number!WWNN!status 1!node1!!500507680200C744!online 2!node2!!500507680200C745!online ''' resp = storwize_svc_common.CLIResponse(raw) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('500507680200C744', resp[0]['WWNN']) self.assertEqual('2', resp[1]['id']) self.assertEqual('500507680200C745', resp[1]['WWNN']) def test_lsnode_single(self): raw = r'''id!1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680240C744 port_status!inactive port_speed!8Gb ''' resp = storwize_svc_common.CLIResponse(raw, with_header=False) self.assertEqual(1, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual([('500507680210C744', 'active'), ('500507680240C744', 'inactive')], list(resp.select('port_id', 'port_status'))) class StorwizeHelpersTestCase(test.TestCase): def setUp(self): super(StorwizeHelpersTestCase, self).setUp() self.storwize_svc_common = storwize_svc_common.StorwizeHelpers(None) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lslicense') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') def test_compression_enabled(self, lsguicapabilities, lslicense): fake_license_without_keys = {} fake_license = { 'license_compression_enclosures': '1', 'license_compression_capacity': '1' } fake_license_scheme = { 'license_scheme': '9846' } fake_license_invalid_scheme = { 'license_scheme': '0000' } lslicense.side_effect = [fake_license_without_keys, fake_license_without_keys, fake_license, fake_license_without_keys] lsguicapabilities.side_effect = [fake_license_without_keys, fake_license_invalid_scheme, fake_license_scheme] self.assertFalse(self.storwize_svc_common.compression_enabled()) self.assertFalse(self.storwize_svc_common.compression_enabled()) self.assertTrue(self.storwize_svc_common.compression_enabled()) self.assertTrue(self.storwize_svc_common.compression_enabled()) class StorwizeSSHTestCase(test.TestCase): def setUp(self): super(StorwizeSSHTestCase, self).setUp() self.storwize_ssh = storwize_svc_common.StorwizeSSH(None) def test_mkvdiskhostmap(self): # mkvdiskhostmap should not be returning anything with mock.patch.object( storwize_svc_common.StorwizeSSH, 'run_ssh_check_created') as run_ssh_check_created: run_ssh_check_created.return_value = None ret = self.storwize_ssh.mkvdiskhostmap('HOST1', 9999, 511, False) self.assertIsNone(ret) ret = self.storwize_ssh.mkvdiskhostmap('HOST2', 9999, 511, True) self.assertIsNone(ret) ex = exception.VolumeBackendAPIException(data='CMMVC6071E') run_ssh_check_created.side_effect = ex self.assertRaises(exception.VolumeBackendAPIException, self.storwize_ssh.mkvdiskhostmap, 'HOST3', 9999, 511, True) class StorwizeSVCReplicationMirrorTestCase(test.TestCase): rep_type = 'global' mirror_class = storwize_rep.StorwizeSVCReplicationGlobalMirror def setUp(self): super(StorwizeSVCReplicationMirrorTestCase, self).setUp() self.svc_driver = storwize_svc_iscsi.StorwizeSVCISCSIDriver( configuration=conf.Configuration(None)) extra_spec_rep_type = ' ' + self.rep_type fake_target = {"managed_backend_name": "second_host@sv2#sv2", "replication_mode": self.rep_type, "backend_id": "svc_id_target", "san_ip": "192.168.10.23", "san_login": "admin", "san_password": "admin", "pool_name": "cinder_target"} self.fake_targets = [fake_target] self.driver = self.mirror_class(self.svc_driver, fake_target, storwize_svc_common.StorwizeHelpers) self.svc_driver.configuration.set_override('replication_device', self.fake_targets) self.svc_driver._replication_targets = self.fake_targets self.svc_driver._replication_enabled = True self.svc_driver.replications[self.rep_type] = ( self.svc_driver.replication_factory(self.rep_type, fake_target)) self.ctxt = context.get_admin_context() self.fake_volume_id = six.text_type(uuid.uuid4()) pool = _get_test_pool() self.volume = {'name': 'volume-%s' % self.fake_volume_id, 'size': 10, 'id': '%s' % self.fake_volume_id, 'volume_type_id': None, 'mdisk_grp_name': 'openstack', 'replication_status': 'disabled', 'replication_extended_status': None, 'volume_metadata': None, 'host': 'openstack@svc#%s' % pool} spec = {'replication_enabled': ' True', 'replication_type': extra_spec_rep_type} type_ref = volume_types.create(self.ctxt, "replication", spec) self.replication_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) self.volume['volume_type_id'] = self.replication_type['id'] self.volume['volume_type'] = self.replication_type self.volumes = [self.volume] def test_storwize_do_replication_setup(self): self.svc_driver.configuration.set_override('san_ip', "192.168.10.23") self.svc_driver.configuration.set_override('replication_device', self.fake_targets) self.svc_driver._do_replication_setup() def test_storwize_do_replication_setup_unmanaged(self): fake_target = {"replication_mode": self.rep_type, "backend_id": "svc_id_target", "san_ip": "192.168.10.23", "san_login": "admin", "san_password": "admin", "pool_name": "cinder_target"} fake_targets = [fake_target] self.svc_driver.configuration.set_override('san_ip', "192.168.10.23") self.svc_driver.configuration.set_override('replication_device', fake_targets) self.assertRaises(exception.InvalidConfigurationValue, self.svc_driver._do_replication_setup) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_params') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(mirror_class, 'volume_replication_setup') def test_storwize_create_volume_with_mirror_replication(self, rep_setup, ctx, get_vdisk_params, create_vdisk): ctx.return_value = self.ctxt get_vdisk_params.return_value = {'replication': None, 'qos': None} self.svc_driver.create_volume(self.volume) rep_setup.assert_called_once_with(self.ctxt, self.volume) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_copy') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_params') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(mirror_class, 'volume_replication_setup') def test_storwize_create_volume_from_snap_with_mirror_replication( self, rep_setup, ctx, get_vdisk_params, create_copy): ctx.return_value = self.ctxt get_vdisk_params.return_value = {'replication': None, 'qos': None} snapshot = {'id': 'snapshot-id', 'name': 'snapshot-name', 'volume_size': 10} model_update = self.svc_driver.create_volume_from_snapshot( self.volume, snapshot) rep_setup.assert_called_once_with(self.ctxt, self.volume) self.assertEqual({'replication_status': 'enabled'}, model_update) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_copy') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_params') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(mirror_class, 'volume_replication_setup') def test_storwize_clone_volume_with_mirror_replication( self, rep_setup, ctx, get_vdisk_params, create_copy): ctx.return_value = self.ctxt get_vdisk_params.return_value = {'replication': None, 'qos': None} rand_id = six.text_type(random.randint(10000, 99999)) pool = _get_test_pool() target_volume = {'name': 'test_volume%s' % rand_id, 'size': 10, 'id': '%s' % rand_id, 'volume_type_id': None, 'mdisk_grp_name': 'openstack', 'replication_status': 'disabled', 'replication_extended_status': None, 'volume_metadata': None, 'host': 'openstack@svc#%s' % pool} target_volume['volume_type_id'] = self.replication_type['id'] target_volume['volume_type'] = self.replication_type model_update = self.svc_driver.create_cloned_volume( target_volume, self.volume) rep_setup.assert_called_once_with(self.ctxt, target_volume) self.assertEqual({'replication_status': 'enabled'}, model_update) @mock.patch.object(mirror_class, 'failover_volume_host') def test_storwize_failover_host(self, failover_volume_host): fake_secondary = 'svc_id_target' target_id, volume_list = self.svc_driver.failover_host(self.ctxt, self.volumes, fake_secondary) expected_list = [{'updates': {'replication_status': 'failed-over'}, 'volume_id': self.fake_volume_id}] expected_calls = [mock.call(self.ctxt, self.volume, fake_secondary)] failover_volume_host.assert_has_calls(expected_calls) self.assertEqual(fake_secondary, target_id) self.assertEqual(expected_list, volume_list) @mock.patch.object(mirror_class, '_partnership_validate_create') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_establish_target_partnership(self, get_system_info, partnership_validate_create): source_system_name = 'source_vol' target_system_name = 'target_vol' self.svc_driver.configuration.set_override('san_ip', "192.168.10.21") get_system_info.side_effect = [{'system_name': source_system_name}, {'system_name': target_system_name}] self.driver.establish_target_partnership() expected_calls = [mock.call(self.svc_driver._helpers, 'target_vol', '192.168.10.23'), mock.call(self.driver.target_helpers, 'source_vol', '192.168.10.21')] partnership_validate_create.assert_has_calls(expected_calls) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'switch_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_failover_volume_host(self, get_relationship_info, switch_relationship): fake_vol = {'id': '21345678-1234-5678-1234-567812345683'} context = mock.Mock secondary = 'svc_id_target' get_relationship_info.return_value = ( {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678', 'name': 'RC_name'}) self.driver.failover_volume_host(context, fake_vol, secondary) get_relationship_info.assert_called_once_with(fake_vol) switch_relationship.assert_called_once_with('RC_name') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'switch_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_failover_volume_host_relation_error(self, get_relationship_info, switch_relationship): fake_vol = {'id': '21345678-1234-5678-1234-567812345683'} context = mock.Mock get_relationship_info.side_effect = Exception secondary = 'svc_id_target' self.assertRaises(exception.VolumeDriverException, self.driver.failover_volume_host, context, fake_vol, secondary) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'switch_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_failover_volume_host_switch_error(self, get_relationship_info, switch_relationship): fake_vol = {'id': '21345678-1234-5678-1234-567812345683'} context = mock.Mock secondary = 'svc_id_target' get_relationship_info.return_value = ( {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678', 'RC_name': 'RC_name'}) switch_relationship.side_effect = Exception self.assertRaises(exception.VolumeDriverException, self.driver.failover_volume_host, context, fake_vol, secondary) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'switch_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_failover_volume_host_backend_mismatch(self, get_relationship_info, switch_relationship): fake_vol = {'id': '21345678-1234-5678-1234-567812345683'} context = mock.Mock secondary = 'wrong_id' get_relationship_info.return_value = ( {'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678', 'RC_name': 'RC_name'}) updates = self.driver.failover_volume_host(context, fake_vol, secondary) self.assertFalse(get_relationship_info.called) self.assertFalse(switch_relationship.called) self.assertIsNone(updates) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'switch_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_replication_failback(self, get_relationship_info, switch_relationship): fake_vol = mock.Mock() get_relationship_info.return_value = {'id': 'rel_id', 'name': 'rc_name'} self.driver.replication_failback(fake_vol) get_relationship_info.assert_called_once_with(fake_vol) switch_relationship.assert_called_once_with('rc_name', aux=False) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_get_relationship_status_valid(self, get_relationship_info): fake_vol = mock.Mock() get_relationship_info.return_value = {'state': 'synchronized'} status = self.driver.get_relationship_status(fake_vol) get_relationship_info.assert_called_once_with(fake_vol) self.assertEqual('synchronized', status) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_get_relationship_status_none(self, get_relationship_info): fake_vol = mock.Mock() get_relationship_info.return_value = None status = self.driver.get_relationship_status(fake_vol) get_relationship_info.assert_called_once_with(fake_vol) self.assertIsNone(status) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_get_relationship_status_exception(self, get_relationship_info): fake_vol = {'id': 'vol-id'} get_relationship_info.side_effect = exception.VolumeDriverException status = self.driver.get_relationship_status(fake_vol) get_relationship_info.assert_called_once_with(fake_vol) self.assertIsNone(status) class StorwizeSVCReplicationMetroMirrorTestCase( StorwizeSVCReplicationMirrorTestCase): rep_type = 'metro' mirror_class = storwize_rep.StorwizeSVCReplicationMetroMirror def setUp(self): super(StorwizeSVCReplicationMetroMirrorTestCase, self).setUp() cinder-8.0.0/cinder/tests/unit/test_utils.py0000664000567000056710000015516112701406257022277 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import functools import os import time import mock from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_utils import timeutils import six from six.moves import range import webob.exc import cinder from cinder import exception from cinder import test from cinder import utils CONF = cfg.CONF class ExecuteTestCase(test.TestCase): @mock.patch('cinder.utils.processutils.execute') def test_execute(self, mock_putils_exe): output = utils.execute('a', 1, foo='bar') self.assertEqual(mock_putils_exe.return_value, output) mock_putils_exe.assert_called_once_with('a', 1, foo='bar') @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.utils.processutils.execute') def test_execute_root(self, mock_putils_exe, mock_get_helper): output = utils.execute('a', 1, foo='bar', run_as_root=True) self.assertEqual(mock_putils_exe.return_value, output) mock_helper = mock_get_helper.return_value mock_putils_exe.assert_called_once_with('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.utils.processutils.execute') def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper): mock_helper = mock.Mock() output = utils.execute('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) self.assertEqual(mock_putils_exe.return_value, output) self.assertFalse(mock_get_helper.called) mock_putils_exe.assert_called_once_with('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) class GenericUtilsTestCase(test.TestCase): @mock.patch('os.path.exists', return_value=True) def test_find_config(self, mock_exists): path = '/etc/cinder/cinder.conf' cfgpath = utils.find_config(path) self.assertEqual(path, cfgpath) mock_exists.return_value = False self.assertRaises(exception.ConfigNotFound, utils.find_config, path) def test_as_int(self): test_obj_int = '2' test_obj_float = '2.2' for obj in [test_obj_int, test_obj_float]: self.assertEqual(2, utils.as_int(obj)) obj = 'not_a_number' self.assertEqual(obj, utils.as_int(obj)) self.assertRaises(TypeError, utils.as_int, obj, quiet=False) def test_check_exclusive_options(self): utils.check_exclusive_options() utils.check_exclusive_options(something=None, pretty_keys=True, unit_test=True) self.assertRaises(exception.InvalidInput, utils.check_exclusive_options, test=True, unit=False, pretty_keys=True) self.assertRaises(exception.InvalidInput, utils.check_exclusive_options, test=True, unit=False, pretty_keys=False) def test_require_driver_intialized(self): driver = mock.Mock() driver.initialized = True utils.require_driver_initialized(driver) driver.initialized = False self.assertRaises(exception.DriverNotInitialized, utils.require_driver_initialized, driver) def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_is_valid_boolstr(self): self.assertTrue(utils.is_valid_boolstr(True)) self.assertTrue(utils.is_valid_boolstr('trUe')) self.assertTrue(utils.is_valid_boolstr(False)) self.assertTrue(utils.is_valid_boolstr('faLse')) self.assertTrue(utils.is_valid_boolstr('yeS')) self.assertTrue(utils.is_valid_boolstr('nO')) self.assertTrue(utils.is_valid_boolstr('y')) self.assertTrue(utils.is_valid_boolstr('N')) self.assertTrue(utils.is_valid_boolstr(1)) self.assertTrue(utils.is_valid_boolstr('1')) self.assertTrue(utils.is_valid_boolstr(0)) self.assertTrue(utils.is_valid_boolstr('0')) @mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y))) def test_make_dev_path(self, mock_join): self.assertEqual('/dev/xvda', utils.make_dev_path('xvda')) self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1)) self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo')) @mock.patch('cinder.utils.execute') def test_read_file_as_root(self, mock_exec): out = mock.Mock() err = mock.Mock() mock_exec.return_value = (out, err) test_filepath = '/some/random/path' output = utils.read_file_as_root(test_filepath) mock_exec.assert_called_once_with('cat', test_filepath, run_as_root=True) self.assertEqual(out, output) @mock.patch('cinder.utils.execute', side_effect=putils.ProcessExecutionError) def test_read_file_as_root_fails(self, mock_exec): test_filepath = '/some/random/path' self.assertRaises(exception.FileNotFound, utils.read_file_as_root, test_filepath) @mock.patch('tempfile.NamedTemporaryFile') @mock.patch.object(os, 'open') @mock.patch.object(os, 'fdatasync') @mock.patch.object(os, 'fsync') @mock.patch.object(os, 'rename') @mock.patch.object(os, 'close') @mock.patch.object(os.path, 'isfile') @mock.patch.object(os, 'unlink') def test_write_configfile(self, mock_unlink, mock_isfile, mock_close, mock_rename, mock_fsync, mock_fdatasync, mock_open, mock_tmp): filename = 'foo' directory = '/some/random/path' filepath = os.path.join(directory, filename) expected = ('\n\n' ' backing-store %(bspath)s\n' ' driver iscsi\n' ' incominguser chap_foo chap_bar\n' ' bsoflags foo\n' ' write-cache bar\n' '\n' % {'id': filename, 'bspath': filepath}) # Normal case utils.robust_file_write(directory, filename, expected) mock_open.assert_called_once_with(directory, os.O_DIRECTORY) mock_rename.assert_called_once_with(mock.ANY, filepath) self.assertEqual( expected.encode('utf-8'), mock_tmp.return_value.__enter__.return_value.write.call_args[0][0] ) # Failure to write persistent file. tempfile = '/some/tempfile' mock_tmp.return_value.__enter__.return_value.name = tempfile mock_rename.side_effect = OSError self.assertRaises(OSError, utils.robust_file_write, directory, filename, mock.MagicMock()) mock_isfile.assert_called_once_with(tempfile) mock_unlink.assert_called_once_with(tempfile) @mock.patch('oslo_utils.timeutils.utcnow') def test_service_is_up(self, mock_utcnow): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 5 self.flags(service_down_time=down_time) mock_utcnow.return_value = fts_func(fake_now) # Up (equal) service = {'updated_at': fts_func(fake_now - down_time), 'created_at': fts_func(fake_now - down_time)} result = utils.service_is_up(service) self.assertTrue(result) # Up service = {'updated_at': fts_func(fake_now - down_time + 1), 'created_at': fts_func(fake_now - down_time + 1)} result = utils.service_is_up(service) self.assertTrue(result) # Down service = {'updated_at': fts_func(fake_now - down_time - 1), 'created_at': fts_func(fake_now - down_time - 1)} result = utils.service_is_up(service) self.assertFalse(result) def test_safe_parse_xml(self): normal_body = ('' 'heythere') def killer_body(): return ((""" ]> %(d)s """) % { 'a': 'A' * 10, 'b': '&a;' * 10, 'c': '&b;' * 10, 'd': '&c;' * 9999, }).strip() dom = utils.safe_minidom_parse_string(normal_body) # Some versions of minidom inject extra newlines so we ignore them result = str(dom.toxml()).replace('\n', '') self.assertEqual(normal_body, result) self.assertRaises(ValueError, utils.safe_minidom_parse_string, killer_body()) def test_check_ssh_injection(self): cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer'] self.assertIsNone(utils.check_ssh_injection(cmd_list)) cmd_list = ['echo', '"quoted arg with space"'] self.assertIsNone(utils.check_ssh_injection(cmd_list)) cmd_list = ['echo', "'quoted arg with space'"] self.assertIsNone(utils.check_ssh_injection(cmd_list)) def test_check_ssh_injection_on_error(self): with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_unquoted_space) with_danger_chars = ['||', 'my_name@name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_danger_chars) with_danger_char = [';', 'my_name@name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_danger_char) with_special = ['cmd', 'virus;ls'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_special) quoted_with_unescaped = ['cmd', '"arg\"withunescaped"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, quoted_with_unescaped) bad_before_quotes = ['cmd', 'virus;"quoted argument"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_before_quotes) bad_after_quotes = ['echo', '"quoted argument";rm -rf'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_after_quotes) bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_within_quotes) with_multiple_quotes = ['echo', '"quoted";virus;"quoted"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_multiple_quotes) with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\''] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_multiple_quotes) @mock.patch('os.stat') def test_get_file_mode(self, mock_stat): class stat_result(object): st_mode = 0o777 st_gid = 33333 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result mode = utils.get_file_mode(test_file) self.assertEqual(0o777, mode) mock_stat.assert_called_once_with(test_file) @mock.patch('os.stat') def test_get_file_gid(self, mock_stat): class stat_result(object): st_mode = 0o777 st_gid = 33333 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result gid = utils.get_file_gid(test_file) self.assertEqual(33333, gid) mock_stat.assert_called_once_with(test_file) @mock.patch('cinder.utils.CONF') def test_get_root_helper(self, mock_conf): mock_conf.rootwrap_config = '/path/to/conf' self.assertEqual('sudo cinder-rootwrap /path/to/conf', utils.get_root_helper()) def test_list_of_dicts_to_dict(self): a = {'id': '1', 'color': 'orange'} b = {'id': '2', 'color': 'blue'} c = {'id': '3', 'color': 'green'} lst = [a, b, c] resp = utils.list_of_dicts_to_dict(lst, 'id') self.assertEqual(c['id'], resp['3']['id']) class TemporaryChownTestCase(test.TestCase): @mock.patch('os.stat') @mock.patch('os.getuid', return_value=1234) @mock.patch('cinder.utils.execute') def test_get_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename): mock_exec.assert_called_once_with('chown', 1234, test_filename, run_as_root=True) mock_getuid.assert_called_once_with() mock_stat.assert_called_once_with(test_filename) calls = [mock.call('chown', 1234, test_filename, run_as_root=True), mock.call('chown', 5678, test_filename, run_as_root=True)] mock_exec.assert_has_calls(calls) @mock.patch('os.stat') @mock.patch('os.getuid', return_value=1234) @mock.patch('cinder.utils.execute') def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename, owner_uid=9101): mock_exec.assert_called_once_with('chown', 9101, test_filename, run_as_root=True) self.assertFalse(mock_getuid.called) mock_stat.assert_called_once_with(test_filename) calls = [mock.call('chown', 9101, test_filename, run_as_root=True), mock.call('chown', 5678, test_filename, run_as_root=True)] mock_exec.assert_has_calls(calls) @mock.patch('os.stat') @mock.patch('os.getuid', return_value=5678) @mock.patch('cinder.utils.execute') def test_matching_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename): pass mock_getuid.assert_called_once_with() mock_stat.assert_called_once_with(test_filename) self.assertFalse(mock_exec.called) class TempdirTestCase(test.TestCase): @mock.patch('tempfile.mkdtemp') @mock.patch('shutil.rmtree') def test_tempdir(self, mock_rmtree, mock_mkdtemp): with utils.tempdir(a='1', b=2) as td: self.assertEqual(mock_mkdtemp.return_value, td) self.assertFalse(mock_rmtree.called) mock_mkdtemp.assert_called_once_with(a='1', b=2) mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) @mock.patch('tempfile.mkdtemp') @mock.patch('shutil.rmtree', side_effect=OSError) def test_tempdir_error(self, mock_rmtree, mock_mkdtemp): with utils.tempdir(a='1', b=2) as td: self.assertEqual(mock_mkdtemp.return_value, td) self.assertFalse(mock_rmtree.called) mock_mkdtemp.assert_called_once_with(a='1', b=2) mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) class WalkClassHierarchyTestCase(test.TestCase): def test_walk_class_hierarchy(self): class A(object): pass class B(A): pass class C(A): pass class D(B): pass class E(A): pass class_pairs = zip((D, B, E), utils.walk_class_hierarchy(A, encountered=[C])) for actual, expected in class_pairs: self.assertEqual(expected, actual) class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A)) for actual, expected in class_pairs: self.assertEqual(expected, actual) class GetDiskOfPartitionTestCase(test.TestCase): def test_devpath_is_diskpath(self): devpath = '/some/path' st_mock = mock.Mock() output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual('/some/path', output[0]) self.assertIs(st_mock, output[1]) with mock.patch('os.stat') as mock_stat: devpath = '/some/path' output = utils._get_disk_of_partition(devpath) mock_stat.assert_called_once_with(devpath) self.assertEqual(devpath, output[0]) self.assertIs(mock_stat.return_value, output[1]) @mock.patch('os.stat', side_effect=OSError) def test_stat_oserror(self, mock_stat): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) mock_stat.assert_called_once_with('/some/path') self.assertEqual(devpath, output[0]) self.assertIs(st_mock, output[1]) @mock.patch('stat.S_ISBLK', return_value=True) @mock.patch('os.stat') def test_diskpath_is_block_device(self, mock_stat, mock_isblk): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual('/some/path', output[0]) self.assertEqual(mock_stat.return_value, output[1]) @mock.patch('stat.S_ISBLK', return_value=False) @mock.patch('os.stat') def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual(devpath, output[0]) self.assertEqual(st_mock, output[1]) class GetBlkdevMajorMinorTestCase(test.TestCase): @mock.patch('os.stat') def test_get_file_size(self, mock_stat): class stat_result(object): st_mode = 0o777 st_size = 1074253824 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result size = utils.get_file_size(test_file) self.assertEqual(size, stat_result.st_size) mock_stat.assert_called_once_with(test_file) @mock.patch('os.stat') def test_get_blkdev_major_minor(self, mock_stat): class stat_result(object): st_mode = 0o60660 st_rdev = os.makedev(253, 7) test_device = '/dev/made_up_blkdev' mock_stat.return_value = stat_result dev = utils.get_blkdev_major_minor(test_device) self.assertEqual('253:7', dev) mock_stat.assert_called_once_with(test_device) @mock.patch('os.stat') @mock.patch.object(utils, 'execute') def _test_get_blkdev_major_minor_file(self, test_partition, mock_exec, mock_stat): mock_exec.return_value = ( 'Filesystem Size Used Avail Use%% Mounted on\n' '%s 4096 2048 2048 50%% /tmp\n' % test_partition, None) test_file = '/tmp/file' test_disk = '/dev/made_up_disk' class stat_result_file(object): st_mode = 0o660 class stat_result_partition(object): st_mode = 0o60660 st_rdev = os.makedev(8, 65) class stat_result_disk(object): st_mode = 0o60660 st_rdev = os.makedev(8, 64) def fake_stat(path): try: return {test_file: stat_result_file, test_partition: stat_result_partition, test_disk: stat_result_disk}[path] except KeyError: raise OSError mock_stat.side_effect = fake_stat dev = utils.get_blkdev_major_minor(test_file) mock_stat.assert_any_call(test_file) mock_exec.assert_called_once_with('df', test_file) if test_partition.startswith('/'): mock_stat.assert_any_call(test_partition) mock_stat.assert_any_call(test_disk) return dev def test_get_blkdev_major_minor_file(self): dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1') self.assertEqual('8:64', dev) def test_get_blkdev_major_minor_file_nfs(self): dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path') self.assertIsNone(dev) @mock.patch('os.stat') @mock.patch('stat.S_ISCHR', return_value=False) @mock.patch('stat.S_ISBLK', return_value=False) def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat): path = '/some/path' self.assertRaises(exception.Error, utils.get_blkdev_major_minor, path, lookup_for_file=False) mock_stat.assert_called_once_with(path) mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) @mock.patch('os.stat') @mock.patch('stat.S_ISCHR', return_value=True) @mock.patch('stat.S_ISBLK', return_value=False) def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat): path = '/some/path' output = utils.get_blkdev_major_minor(path, lookup_for_file=False) mock_stat.assert_called_once_with(path) mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) self.assertIs(None, output) class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'cinder.tests.unit.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = [] from cinder.tests.unit.monkey_patch_example import example_a from cinder.tests.unit.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(8, ret_a) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(8, ret_b) package_a = self.example_package + 'example_a.' self.assertTrue( package_a + 'example_function_a' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertTrue( package_a + 'ExampleClassA.example_method' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertTrue( package_a + 'ExampleClassA.example_method_add' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertFalse( package_b + 'example_function_b' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertFalse( package_b + 'ExampleClassB.example_method' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertFalse( package_b + 'ExampleClassB.example_method_add' in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) class AuditPeriodTest(test.TestCase): def setUp(self): super(AuditPeriodTest, self).setUp() test_time = datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012) patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = test_time def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEqual(datetime.datetime(hour=7, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=8, day=5, month=3, year=2012), end) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEqual(datetime.datetime(minute=10, hour=7, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(minute=10, hour=8, day=5, month=3, year=2012), end) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEqual(datetime.datetime(minute=30, hour=6, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(minute=30, hour=7, day=5, month=3, year=2012), end) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEqual(datetime.datetime(day=4, month=3, year=2012), begin) self.assertEqual(datetime.datetime(day=5, month=3, year=2012), end) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEqual(datetime.datetime(hour=6, day=4, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=6, day=5, month=3, year=2012), end) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEqual(datetime.datetime(hour=10, day=3, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=10, day=4, month=3, year=2012), end) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=2, year=2012), begin) self.assertEqual(datetime.datetime(day=1, month=3, year=2012), end) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEqual(datetime.datetime(day=2, month=2, year=2012), begin) self.assertEqual(datetime.datetime(day=2, month=3, year=2012), end) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEqual(datetime.datetime(day=15, month=1, year=2012), begin) self.assertEqual(datetime.datetime(day=15, month=2, year=2012), end) @mock.patch('oslo_utils.timeutils.utcnow', return_value=datetime.datetime(day=1, month=1, year=2012)) def test_month_jan_day_first(self, mock_utcnow): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end) @mock.patch('oslo_utils.timeutils.utcnow', return_value=datetime.datetime(day=2, month=1, year=2012)) def test_month_jan_day_not_first(self, mock_utcnow): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEqual(datetime.datetime(day=1, month=1, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEqual(datetime.datetime(day=1, month=2, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=2, year=2012), end) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEqual(datetime.datetime(day=1, month=6, year=2010), begin) self.assertEqual(datetime.datetime(day=1, month=6, year=2011), end) def test_invalid_unit(self): self.assertRaises(ValueError, utils.last_completed_audit_period, unit='invalid_unit') @mock.patch('cinder.utils.CONF') def test_uses_conf_unit(self, mock_conf): mock_conf.volume_usage_audit_period = 'hour' begin1, end1 = utils.last_completed_audit_period() self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds()) mock_conf.volume_usage_audit_period = 'day' begin2, end2 = utils.last_completed_audit_period() self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds()) class BrickUtils(test.TestCase): """Unit test to test the brick utility wrapper functions.""" @mock.patch('cinder.utils.CONF') @mock.patch('os_brick.initiator.connector.get_connector_properties') @mock.patch('cinder.utils.get_root_helper') def test_brick_get_connector_properties(self, mock_helper, mock_get, mock_conf): mock_conf.my_ip = '1.2.3.4' output = utils.brick_get_connector_properties() mock_helper.assert_called_once_with() mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4', False, False) self.assertEqual(mock_get.return_value, output) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory') @mock.patch('cinder.utils.get_root_helper') def test_brick_get_connector(self, mock_helper, mock_factory): output = utils.brick_get_connector('protocol') mock_helper.assert_called_once_with() self.assertEqual(mock_factory.return_value, output) mock_factory.assert_called_once_with( 'protocol', mock_helper.return_value, driver=None, execute=putils.execute, use_multipath=False, device_scan_attempts=3) class StringLengthTestCase(test.TestCase): def test_check_string_length(self): self.assertIsNone(utils.check_string_length( 'test', 'name', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', 'name', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) class AddVisibleAdminMetadataTestCase(test.TestCase): def test_add_visible_admin_metadata_visible_key_only(self): admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}] metadata = [{"key": "key", "value": "value"}, {"key": "readonly", "value": "existing"}] volume = {'volume_admin_metadata': admin_metadata, 'volume_metadata': metadata} utils.add_visible_admin_metadata(volume) self.assertEqual([{"key": "key", "value": "value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}], volume['volume_metadata']) admin_metadata = {"invisible_key": "invisible_value", "readonly": "visible", "attached_mode": "visible"} metadata = {"key": "value", "readonly": "existing"} volume = {'admin_metadata': admin_metadata, 'metadata': metadata} utils.add_visible_admin_metadata(volume) self.assertEqual({'key': 'value', 'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) def test_add_visible_admin_metadata_no_visible_keys(self): admin_metadata = [ {"key": "invisible_key1", "value": "invisible_value1"}, {"key": "invisible_key2", "value": "invisible_value2"}, {"key": "invisible_key3", "value": "invisible_value3"}] metadata = [{"key": "key", "value": "value"}] volume = {'volume_admin_metadata': admin_metadata, 'volume_metadata': metadata} utils.add_visible_admin_metadata(volume) self.assertEqual([{"key": "key", "value": "value"}], volume['volume_metadata']) admin_metadata = {"invisible_key1": "invisible_value1", "invisible_key2": "invisible_value2", "invisible_key3": "invisible_value3"} metadata = {"key": "value"} volume = {'admin_metadata': admin_metadata, 'metadata': metadata} utils.add_visible_admin_metadata(volume) self.assertEqual({'key': 'value'}, volume['metadata']) def test_add_visible_admin_metadata_no_existing_metadata(self): admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}] volume = {'volume_admin_metadata': admin_metadata} utils.add_visible_admin_metadata(volume) self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) admin_metadata = {"invisible_key": "invisible_value", "readonly": "visible", "attached_mode": "visible"} volume = {'admin_metadata': admin_metadata} utils.add_visible_admin_metadata(volume) self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) class InvalidFilterTestCase(test.TestCase): def test_admin_allows_all_options(self): ctxt = mock.Mock(name='context') ctxt.is_admin = True filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} allowed_search_options = ('allowed1', 'allowed2') allowed_orig = ('allowed1', 'allowed2') utils.remove_invalid_filter_options(ctxt, filters, allowed_search_options) self.assertEqual(allowed_orig, allowed_search_options) self.assertEqual(fltrs_orig, filters) def test_admin_allows_some_options(self): ctxt = mock.Mock(name='context') ctxt.is_admin = False filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} allowed_search_options = ('allowed1', 'allowed2') allowed_orig = ('allowed1', 'allowed2') utils.remove_invalid_filter_options(ctxt, filters, allowed_search_options) self.assertEqual(allowed_orig, allowed_search_options) self.assertNotEqual(fltrs_orig, filters) self.assertEqual(allowed_search_options, tuple(sorted(filters.keys()))) class IsBlkDeviceTestCase(test.TestCase): @mock.patch('stat.S_ISBLK', return_value=True) @mock.patch('os.stat') def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'some_device' self.assertTrue(utils.is_blk_device(dev)) @mock.patch('stat.S_ISBLK', return_value=False) @mock.patch('os.stat') def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'not_some_device' self.assertFalse(utils.is_blk_device(dev)) @mock.patch('stat.S_ISBLK', side_effect=Exception) @mock.patch('os.stat') def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'device_exception' self.assertFalse(utils.is_blk_device(dev)) class WrongException(Exception): pass class TestRetryDecorator(test.TestCase): def setUp(self): super(TestRetryDecorator, self).setUp() def test_no_retry_required(self): self.counter = 0 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_no_retry_required_random(self): self.counter = 0 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval=2, retries=3, backoff_rate=2, wait_random=True) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.VolumeBackendAPIException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) mock_sleep.assert_called_with(interval * backoff_rate) def test_retries_once_random(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate, wait_random=True) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.VolumeBackendAPIException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) self.assertTrue(mock_sleep.called) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exception.VolumeBackendAPIException(data='fake') self.assertRaises(exception.VolumeBackendAPIException, always_fails) self.assertEqual(retries, self.counter) expected_sleep_arg = [] for i in range(retries): if i > 0: interval *= backoff_rate expected_sleep_arg.append(float(interval)) mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg)) def test_wrong_exception_no_retry(self): with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException) def raise_unexpected_error(): raise WrongException("wrong exception") self.assertRaises(WrongException, raise_unexpected_error) self.assertFalse(mock_sleep.called) class LogTracingTestCase(test.TestCase): def test_utils_setup_tracing(self): self.mock_object(utils, 'LOG') utils.setup_tracing(None) self.assertFalse(utils.TRACE_API) self.assertFalse(utils.TRACE_METHOD) self.assertEqual(0, utils.LOG.warning.call_count) utils.setup_tracing(['method']) self.assertFalse(utils.TRACE_API) self.assertTrue(utils.TRACE_METHOD) self.assertEqual(0, utils.LOG.warning.call_count) utils.setup_tracing(['method', 'api']) self.assertTrue(utils.TRACE_API) self.assertTrue(utils.TRACE_METHOD) self.assertEqual(0, utils.LOG.warning.call_count) def test_utils_setup_tracing_invalid_key(self): self.mock_object(utils, 'LOG') utils.setup_tracing(['fake']) self.assertFalse(utils.TRACE_API) self.assertFalse(utils.TRACE_METHOD) self.assertEqual(1, utils.LOG.warning.call_count) def test_utils_setup_tracing_valid_and_invalid_key(self): self.mock_object(utils, 'LOG') utils.setup_tracing(['method', 'fake']) self.assertFalse(utils.TRACE_API) self.assertTrue(utils.TRACE_METHOD) self.assertEqual(1, utils.LOG.warning.call_count) def test_trace_no_tracing(self): self.mock_object(utils, 'LOG') @utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(None) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(0, utils.LOG.debug.call_count) def test_utils_trace_method(self): self.mock_object(utils, 'LOG') @utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, utils.LOG.debug.call_count) def test_utils_trace_api(self): self.mock_object(utils, 'LOG') @utils.trace_api def _trace_test_api(*args, **kwargs): return 'OK' utils.setup_tracing(['api']) result = _trace_test_api() self.assertEqual('OK', result) self.assertEqual(2, utils.LOG.debug.call_count) def test_utils_trace_method_default_logger(self): mock_log = self.mock_object(utils, 'LOG') @utils.trace_method def _trace_test_method_custom_logger(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method_custom_logger() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) def test_utils_trace_method_inner_decorator(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @_test_decorator @utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the correct function name was logged for call in mock_log.debug.call_args_list: self.assertTrue('_trace_test_method' in str(call)) self.assertFalse('blah' in str(call)) def test_utils_trace_method_outer_decorator(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @utils.trace_method @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertFalse('_trace_test_method' in str(call)) self.assertTrue('blah' in str(call)) def test_utils_trace_method_outer_decorator_with_functools(self): mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True self.mock_object(utils.logging, 'getLogger', mock_log) mock_log = self.mock_object(utils, 'LOG') def _test_decorator(f): @functools.wraps(f) def wraps(*args, **kwargs): return f(*args, **kwargs) return wraps @utils.trace_method @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertTrue('_trace_test_method' in str(call)) self.assertFalse('wraps' in str(call)) def test_utils_trace_method_with_exception(self): self.LOG = self.mock_object(utils, 'LOG') @utils.trace_method def _trace_test_method(*args, **kwargs): raise exception.APITimeout('test message') utils.setup_tracing(['method']) self.assertRaises(exception.APITimeout, _trace_test_method) exception_log = self.LOG.debug.call_args_list[1] self.assertTrue('exception' in str(exception_log)) self.assertTrue('test message' in str(exception_log)) def test_utils_trace_method_with_time(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) mock_time = mock.Mock(side_effect=[3.1, 6]) self.mock_object(time, 'time', mock_time) @utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) return_log = mock_log.debug.call_args_list[1] self.assertTrue('2900' in str(return_log)) def test_utils_trace_wrapper_class(self): mock_logging = self.mock_object(utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) utils.setup_tracing(['method']) @six.add_metaclass(utils.TraceWrapperMetaclass) class MyClass(object): def trace_test_method(self): return 'OK' test_class = MyClass() result = test_class.trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) def test_utils_calculate_virtual_free_capacity_with_thick(self): host_stat = {'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5} free = utils.calculate_virtual_free_capacity( host_stat['total_capacity_gb'], host_stat['free_capacity_gb'], host_stat['provisioned_capacity_gb'], host_stat['thin_provisioning_support'], host_stat['max_over_subscription_ratio'], host_stat['reserved_percentage']) self.assertEqual(27.01, free) def test_utils_calculate_virtual_free_capacity_with_thin(self): host_stat = {'total_capacity_gb': 20.01, 'free_capacity_gb': 18.01, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': 2.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5} free = utils.calculate_virtual_free_capacity( host_stat['total_capacity_gb'], host_stat['free_capacity_gb'], host_stat['provisioned_capacity_gb'], host_stat['thin_provisioning_support'], host_stat['max_over_subscription_ratio'], host_stat['reserved_percentage']) self.assertEqual(37.02, free) class Comparable(utils.ComparableMixin): def __init__(self, value): self.value = value def _cmpkey(self): return self.value class TestComparableMixin(test.TestCase): def setUp(self): super(TestComparableMixin, self).setUp() self.one = Comparable(1) self.two = Comparable(2) def test_lt(self): self.assertTrue(self.one < self.two) self.assertFalse(self.two < self.one) self.assertFalse(self.one < self.one) def test_le(self): self.assertTrue(self.one <= self.two) self.assertFalse(self.two <= self.one) self.assertTrue(self.one <= self.one) def test_eq(self): self.assertFalse(self.one == self.two) self.assertFalse(self.two == self.one) self.assertTrue(self.one == self.one) def test_ge(self): self.assertFalse(self.one >= self.two) self.assertTrue(self.two >= self.one) self.assertTrue(self.one >= self.one) def test_gt(self): self.assertFalse(self.one > self.two) self.assertTrue(self.two > self.one) self.assertFalse(self.one > self.one) def test_ne(self): self.assertTrue(self.one != self.two) self.assertTrue(self.two != self.one) self.assertFalse(self.one != self.one) def test_compare(self): self.assertEqual(NotImplemented, self.one._compare(1, self.one._cmpkey)) class TestValidateInteger(test.TestCase): def test_validate_integer_greater_than_max_int_limit(self): value = (2 ** 31) + 1 self.assertRaises(webob.exc.HTTPBadRequest, utils.validate_integer, value, 'limit', min_value=-1, max_value=(2 ** 31)) def test_validate_integer_less_than_min_int_limit(self): value = -12 self.assertRaises(webob.exc.HTTPBadRequest, utils.validate_integer, value, 'limit', min_value=-1, max_value=(2 ** 31)) def test_validate_integer_invalid_limit(self): value = "should_be_int" self.assertRaises(webob.exc.HTTPBadRequest, utils.validate_integer, value, 'limit', min_value=-1, max_value=(2 ** 31)) cinder-8.0.0/cinder/tests/unit/test_drbdmanagedrv.py0000664000567000056710000005252012701406250023723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import eventlet import six import sys import time import mock from oslo_utils import importutils from oslo_utils import timeutils from cinder import context from cinder import test from cinder.volume import configuration as conf class mock_dbus(object): def __init__(self): pass @staticmethod def Array(defaults, signature=None): return defaults class mock_dm_consts(object): TQ_GET_PATH = "get_path" NODE_ADDR = "addr" CSTATE_PREFIX = "cstate:" TSTATE_PREFIX = "tstate:" FLAG_UPD_POOL = "upd_pool" FLAG_UPDATE = "update" FLAG_DRBDCTRL = "drbdctrl" FLAG_STORAGE = "storage" FLAG_EXTERNAL = "external" FLAG_DEPLOY = "deploy" FLAG_DISKLESS = "diskless" FLAG_CONNECT = "connect" FLAG_UPD_CON = "upd_con" FLAG_RECONNECT = "reconnect" FLAG_OVERWRITE = "overwrite" FLAG_DISCARD = "discard" FLAG_UPD_CONFIG = "upd_config" FLAG_STANDBY = "standby" FLAG_QIGNORE = "qignore" FLAG_REMOVE = "remove" AUX_PROP_PREFIX = "aux:" BOOL_TRUE = "true" BOOL_FALSE = "false" VOL_ID = "vol_id" class mock_dm_exc(object): DM_SUCCESS = 0 DM_INFO = 1 DM_EEXIST = 101 DM_ENOENT = 102 DM_ERROR = 1000 class mock_dm_utils(object): @staticmethod def _aux_prop_name(key): if six.text_type(key).startswith(mock_dm_consts.AUX_PROP_PREFIX): return key[len(mock_dm_consts.AUX_PROP_PREFIX):] else: return None @staticmethod def aux_props_to_dict(props): aux_props = {} for (key, val) in props.items(): aux_key = mock_dm_utils._aux_prop_name(key) if aux_key is not None: aux_props[aux_key] = val return aux_props @staticmethod def dict_to_aux_props(props): aux_props = {} for (key, val) in props.items(): aux_key = mock_dm_consts.AUX_PROP_PREFIX + six.text_type(key) aux_props[aux_key] = six.text_type(val) return aux_props def public_keys(c): return [n for n in c.__dict__.keys() if not n.startswith("_")] sys.modules['dbus'] = mock_dbus sys.modules['drbdmanage'] = collections.namedtuple( 'module', ['consts', 'exceptions', 'utils']) sys.modules['drbdmanage.utils'] = collections.namedtuple( 'module', public_keys(mock_dm_utils)) sys.modules['drbdmanage.consts'] = collections.namedtuple( 'module', public_keys(mock_dm_consts)) sys.modules['drbdmanage.exceptions'] = collections.namedtuple( 'module', public_keys(mock_dm_exc)) import cinder.volume.drivers.drbdmanagedrv as drv drv.dbus = mock_dbus drv.dm_const = mock_dm_consts drv.dm_utils = mock_dm_utils drv.dm_exc = mock_dm_exc def create_configuration(object): configuration = mock.MockObject(conf.Configuration) configuration.san_is_local = False configuration.append_config_values(mock.IgnoreArg()) return configuration class DrbdManageFakeDriver(object): resources = {} def __init__(self): self.calls = [] def run_external_plugin(self, name, props): self.calls.append(["run_external_plugin", name, props]) call_okay = [[mock_dm_exc.DM_SUCCESS, "ACK", []]] not_done_yet = (call_okay, dict(timeout=mock_dm_consts.BOOL_FALSE, result=mock_dm_consts.BOOL_FALSE)) success = (call_okay, dict(timeout=mock_dm_consts.BOOL_FALSE, result=mock_dm_consts.BOOL_TRUE)) got_timeout = (call_okay, dict(timeout=mock_dm_consts.BOOL_TRUE, result=mock_dm_consts.BOOL_FALSE)) if "retry" not in props: # Fake success, to not slow tests down return success if props["retry"] > 1: props["retry"] -= 1 return not_done_yet if props.get("run-into-timeout"): return got_timeout return success def list_resources(self, res, serial, prop, req): self.calls.append(["list_resources", res, prop, req]) if ('aux:cinder-id' in prop and prop['aux:cinder-id'].startswith("deadbeef")): return ([[mock_dm_exc.DM_ENOENT, "none", []]], []) else: return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], [("res", dict(prop))]) def create_resource(self, res, props): self.calls.append(["create_resource", res, props]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def create_volume(self, res, size, props): self.calls.append(["create_volume", res, size, props]) return [[mock_dm_exc.DM_SUCCESS, "ack", []], [mock_dm_exc.DM_INFO, "create_volume", [(mock_dm_consts.VOL_ID, '2')]]] def auto_deploy(self, res, red, delta, site_clients): self.calls.append(["auto_deploy", res, red, delta, site_clients]) return [[mock_dm_exc.DM_SUCCESS, "ack", []] * red] def list_volumes(self, res, ser, prop, req): self.calls.append(["list_volumes", res, ser, prop, req]) if ('aux:cinder-id' in prop and prop['aux:cinder-id'].startswith("deadbeef")): return ([[mock_dm_exc.DM_SUCCESS, "none", []]], []) else: return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], [("res", dict(), [(2, dict(prop))]) ]) def remove_volume(self, res, nr, force): self.calls.append(["remove_volume", res, nr, force]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def text_query(self, cmd): self.calls.append(["text_query", cmd]) if cmd[0] == mock_dm_consts.TQ_GET_PATH: return ([(mock_dm_exc.DM_SUCCESS, "ack", [])], ['/dev/drbd0']) return ([(mock_dm_exc.DM_ERROR, 'unknown command', [])], []) def list_assignments(self, nodes, res, ser, prop, req): self.calls.append(["list_assignments", nodes, res, ser, prop, req]) if ('aux:cinder-id' in prop and prop['aux:cinder-id'].startswith("deadbeef")): return ([[mock_dm_exc.DM_SUCCESS, "none", []]], []) else: return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], [("node", "res", dict(), [(2, dict(prop))]) ]) def create_snapshot(self, res, snap, nodes, props): self.calls.append(["create_snapshot", res, snap, nodes, props]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def list_snapshots(self, res, sn, serial, prop, req): self.calls.append(["list_snapshots", res, sn, serial, prop, req]) if ('aux:cinder-id' in prop and prop['aux:cinder-id'].startswith("deadbeef")): return ([[mock_dm_exc.DM_SUCCESS, "none", []]], []) else: return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], [("res", [("snap", dict(prop))]) ]) def remove_snapshot(self, res, snap, force): self.calls.append(["remove_snapshot", res, snap, force]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def resize_volume(self, res, vol, ser, size, delta): self.calls.append(["resize_volume", res, vol, ser, size, delta]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def restore_snapshot(self, res, snap, new, rprop, vprops): self.calls.append(["restore_snapshot", res, snap, new, rprop, vprops]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def assign(self, host, resource, props): self.calls.append(["assign", host, resource, props]) return [[mock_dm_exc.DM_SUCCESS, "ack", []]] def create_node(self, name, prop): self.calls.append(["create_node", name, prop]) if name.startswith('EXIST'): return [(mock_dm_exc.DM_EEXIST, "none", [])] else: return [(mock_dm_exc.DM_SUCCESS, "ack", [])] class DrbdManageIscsiTestCase(test.TestCase): def _fake_safe_get(self, key): if key == 'iscsi_helper': return 'fake' if key.endswith('_policy'): return '{}' return None @staticmethod def _fake_sleep(amount): pass def setUp(self): self.ctxt = context.get_admin_context() self._mock = mock.Mock() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_is_local = True self.configuration.reserved_percentage = 1 super(DrbdManageIscsiTestCase, self).setUp() self.stubs.Set(importutils, 'import_object', self.fake_import_object) self.stubs.Set(drv.DrbdManageBaseDriver, 'call_or_reconnect', self.fake_issue_dbus_call) self.stubs.Set(drv.DrbdManageBaseDriver, 'dbus_connect', self.fake_issue_dbus_connect) self.stubs.Set(drv.DrbdManageBaseDriver, '_wait_for_node_assignment', self.fake_wait_node_assignment) self.configuration.safe_get = self._fake_safe_get self.stubs.Set(eventlet, 'sleep', self._fake_sleep) # Infrastructure def fake_import_object(self, what, configuration, db, executor): return None def fake_issue_dbus_call(self, fn, *args): return fn(*args) def fake_wait_node_assignment(self, *args, **kwargs): return True def fake_issue_dbus_connect(self): self.odm = DrbdManageFakeDriver() def call_or_reconnect(self, method, *params): return method(*params) def fake_is_external_node(self, name): return False # Tests per se def test_create_volume(self): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'deadbeef-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.drbdmanage_devs_on_controller = False dmd.odm = DrbdManageFakeDriver() dmd.create_volume(testvol) self.assertEqual("create_resource", dmd.odm.calls[0][0]) self.assertEqual("list_volumes", dmd.odm.calls[1][0]) self.assertEqual("create_volume", dmd.odm.calls[2][0]) self.assertEqual(1048576, dmd.odm.calls[2][2]) self.assertEqual("auto_deploy", dmd.odm.calls[3][0]) self.assertEqual(5, len(dmd.odm.calls)) def test_create_volume_controller_all_vols(self): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'deadbeef-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.drbdmanage_devs_on_controller = True dmd.odm = DrbdManageFakeDriver() dmd.create_volume(testvol) self.assertEqual(6, len(dmd.odm.calls)) self.assertEqual("create_resource", dmd.odm.calls[0][0]) self.assertEqual("list_volumes", dmd.odm.calls[1][0]) self.assertEqual("create_volume", dmd.odm.calls[2][0]) self.assertEqual(1048576, dmd.odm.calls[2][2]) self.assertEqual("auto_deploy", dmd.odm.calls[3][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[4][0]) self.assertEqual("assign", dmd.odm.calls[5][0]) def test_delete_volume(self): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.delete_volume(testvol) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["aux:cinder-id"]) self.assertEqual("remove_volume", dmd.odm.calls[1][0]) def test_local_path(self): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() data = dmd.local_path(testvol) self.assertTrue(data.startswith("/dev/drbd")) def test_create_snapshot(self): testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111', 'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_snapshot(testsnap) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual("list_assignments", dmd.odm.calls[1][0]) self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) self.assertTrue('node' in dmd.odm.calls[2][3]) def test_delete_snapshot(self): testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.delete_snapshot(testsnap) self.assertEqual("list_snapshots", dmd.odm.calls[0][0]) self.assertEqual("remove_snapshot", dmd.odm.calls[1][0]) def test_extend_volume(self): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.extend_volume(testvol, 5) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["aux:cinder-id"]) self.assertEqual("resize_volume", dmd.odm.calls[1][0]) self.assertEqual("res", dmd.odm.calls[1][1]) self.assertEqual(2, dmd.odm.calls[1][2]) self.assertEqual(-1, dmd.odm.calls[1][3]) self.assertEqual(5242880, dmd.odm.calls[1][4]) def test_create_cloned_volume(self): srcvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_cloned_volume(newvol, srcvol) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual("list_assignments", dmd.odm.calls[1][0]) self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) self.assertEqual("list_snapshots", dmd.odm.calls[4][0]) self.assertEqual("restore_snapshot", dmd.odm.calls[5][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[6][0]) self.assertEqual("list_snapshots", dmd.odm.calls[7][0]) self.assertEqual("remove_snapshot", dmd.odm.calls[8][0]) def test_create_cloned_volume_larger_size(self): srcvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} newvol = {'size': 5, 'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() dmd.create_cloned_volume(newvol, srcvol) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual("list_assignments", dmd.odm.calls[1][0]) self.assertEqual("create_snapshot", dmd.odm.calls[2][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) self.assertEqual("list_snapshots", dmd.odm.calls[4][0]) self.assertEqual("restore_snapshot", dmd.odm.calls[5][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[6][0]) self.assertEqual("list_snapshots", dmd.odm.calls[7][0]) self.assertEqual("remove_snapshot", dmd.odm.calls[8][0]) # resize image checks self.assertEqual("list_volumes", dmd.odm.calls[9][0]) self.assertEqual(newvol['id'], dmd.odm.calls[9][3]["aux:cinder-id"]) self.assertEqual("resize_volume", dmd.odm.calls[10][0]) self.assertEqual("res", dmd.odm.calls[10][1]) self.assertEqual(2, dmd.odm.calls[10][2]) self.assertEqual(-1, dmd.odm.calls[10][3]) self.assertEqual(5242880, dmd.odm.calls[10][4]) class DrbdManageDrbdTestCase(DrbdManageIscsiTestCase): def setUp(self): super(DrbdManageDrbdTestCase, self).setUp() self.stubs.Set(drv.DrbdManageDrbdDriver, '_is_external_node', self.fake_is_external_node) def test_drbd_create_export(self): volume = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', 'volume_type_id': 'drbdmanage', 'created_at': timeutils.utcnow()} connector = {'host': 'node99', 'ip': '127.0.0.99'} dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() x = dmd.create_export({}, volume, connector) self.assertEqual("list_volumes", dmd.odm.calls[0][0]) self.assertEqual("create_node", dmd.odm.calls[1][0]) self.assertEqual("assign", dmd.odm.calls[2][0]) # local_path self.assertEqual("list_volumes", dmd.odm.calls[3][0]) self.assertEqual("text_query", dmd.odm.calls[4][0]) self.assertEqual("local", x["driver_volume_type"]) class DrbdManageCommonTestCase(DrbdManageIscsiTestCase): def setUp(self): super(DrbdManageCommonTestCase, self).setUp() def test_drbd_policy_loop_timeout(self): dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() res = dmd._call_policy_plugin('void', {}, {'retry': 4, 'run-into-timeout': True}) self.assertFalse(res) self.assertEqual(4, len(dmd.odm.calls)) self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[1][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) def test_drbd_policy_loop_success(self): dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() res = dmd._call_policy_plugin('void', {'base': 'data', 'retry': 4}, {'override': 'xyz'}) self.assertTrue(res) self.assertEqual(4, len(dmd.odm.calls)) self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[1][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[2][0]) self.assertEqual("run_external_plugin", dmd.odm.calls[3][0]) def test_drbd_policy_loop_simple(self): dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) dmd.odm = DrbdManageFakeDriver() res = dmd._call_policy_plugin('policy-name', {'base': "value", 'over': "ignore"}, {'over': "ride", 'starttime': 0}) self.assertTrue(res) self.assertEqual(1, len(dmd.odm.calls)) self.assertEqual("run_external_plugin", dmd.odm.calls[0][0]) self.assertEqual('policy-name', dmd.odm.calls[0][1]) incoming = dmd.odm.calls[0][2] self.assertGreaterEqual(4, abs(float(incoming['starttime']) - time.time())) self.assertEqual('value', incoming['base']) self.assertEqual('ride', incoming['over']) cinder-8.0.0/cinder/tests/unit/test_service.py0000664000567000056710000003505512701406250022567 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for remote procedure calls using queue """ import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from cinder import context from cinder import db from cinder import exception from cinder import manager from cinder import objects from cinder import rpc from cinder import service from cinder import test test_service_opts = [ cfg.StrOpt("fake_manager", default="cinder.tests.unit.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" def __init__(self, host=None, db_driver=None, service_name=None): super(FakeManager, self).__init__(host=host, db_driver=db_driver) def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.TestCase): """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() self.assertEqual('manager', serv.test_method()) def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() self.assertEqual('service', serv.test_method()) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'}) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'}) def test_reset(self): serv = service.Service('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() serv.reset() self.assertEqual({}, rpc.LAST_OBJ_VERSIONS) self.assertEqual({}, rpc.LAST_RPC_VERSIONS) class ServiceFlagsTestCase(test.TestCase): def test_service_enabled_on_create_based_on_flag(self): self.flags(enable_new_services=True) host = 'foo' binary = 'cinder-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertFalse(ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'cinder-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertTrue(ref['disabled']) class ServiceTestCase(test.TestCase): """Test cases for Services.""" def setUp(self): super(ServiceTestCase, self).setUp() self.host = 'foo' self.binary = 'cinder-fake' self.topic = 'fake' def test_create(self): # NOTE(vish): Create was moved out of mock replay to make sure that # the looping calls are created in StartService. app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) self.assertTrue(app) def test_report_state_newly_disconnected(self): service_ref = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} with mock.patch.object(objects.service, 'db') as mock_db: mock_db.service_get_by_args.side_effect = exception.NotFound() mock_db.service_create.return_value = service_ref mock_db.service_get.side_effect = db_exc.DBConnectionError() serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) def test_report_state_disconnected_DBError(self): service_ref = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} with mock.patch.object(objects.service, 'db') as mock_db: mock_db.service_get_by_args.side_effect = exception.NotFound() mock_db.service_create.return_value = service_ref mock_db.service_get.side_effect = db_exc.DBError() serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) def test_report_state_newly_connected(self): service_ref = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} with mock.patch.object(objects.service, 'db') as mock_db,\ mock.patch('cinder.db.sqlalchemy.api.get_by_id') as get_by_id: mock_db.service_get_by_args.side_effect = exception.NotFound() mock_db.service_create.return_value = service_ref get_by_id.return_value = service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) self.assertTrue(mock_db.service_update.called) def test_report_state_manager_not_working(self): service_ref = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} with mock.patch('cinder.db') as mock_db: mock_db.service_get.return_value = service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.manager.is_working = mock.Mock(return_value=False) serv.start() serv.report_state() serv.manager.is_working.assert_called_once_with() self.assertFalse(mock_db.service_update.called) def test_service_with_long_report_interval(self): self.override_config('service_down_time', 10) self.override_config('report_interval', 10) service.Service.create( binary="test_service", manager="cinder.tests.unit.test_service.FakeManager") self.assertEqual(25, CONF.service_down_time) @mock.patch.object(rpc, 'get_server') @mock.patch('cinder.db') def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc): serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.stop() serv.wait() serv.rpcserver.start.assert_called_once_with() serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() @mock.patch('cinder.service.Service.report_state') @mock.patch('cinder.service.Service.periodic_tasks') @mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(rpc, 'get_server') @mock.patch('cinder.db') def test_service_stop_waits_for_timers(self, mock_db, mock_rpc, mock_loopcall, mock_periodic, mock_report): """Test that we wait for loopcalls only if stop succeeds.""" serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager', report_interval=5, periodic_interval=10, ) # One of the loopcalls will raise an exception on stop mock_loopcall.side_effect = ( mock.Mock(**{'stop.side_effect': Exception}), mock.Mock()) serv.start() serv.stop() serv.wait() serv.rpcserver.start.assert_called_once_with() serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() # The first loopcall will have failed on the stop call, so we will not # have waited for it to stop self.assertEqual(1, serv.timers[0].start.call_count) self.assertEqual(1, serv.timers[0].stop.call_count) self.assertFalse(serv.timers[0].wait.called) # We will wait for the second loopcall self.assertEqual(1, serv.timers[1].start.call_count) self.assertEqual(1, serv.timers[1].stop.call_count) self.assertEqual(1, serv.timers[1].wait.call_count) class TestWSGIService(test.TestCase): def setUp(self): super(TestWSGIService, self).setUp() @mock.patch('oslo_service.wsgi.Loader') def test_service_random_port(self, mock_loader): test_service = service.WSGIService("test_service") self.assertEqual(0, test_service.port) test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_reset_pool_size_to_default(self, mock_loader): test_service = service.WSGIService("test_service") test_service.start() # Stopping the service, which in turn sets pool size to 0 test_service.stop() self.assertEqual(0, test_service.server._pool.size) # Resetting pool size to default test_service.reset() test_service.start() self.assertEqual(cfg.CONF.wsgi_default_pool_size, test_service.server._pool.size) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_default(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) test_service = service.WSGIService("osapi_volume") self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_good_user_setting(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) self.override_config('osapi_volume_workers', 8) test_service = service.WSGIService("osapi_volume") self.assertEqual(8, test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_zero_user_setting(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) self.override_config('osapi_volume_workers', 0) test_service = service.WSGIService("osapi_volume") # If a value less than 1 is used, defaults to number of procs # available self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_negative_user_setting(self, mock_loader): self.override_config('osapi_volume_workers', -1) self.assertRaises(exception.InvalidInput, service.WSGIService, "osapi_volume") self.assertTrue(mock_loader.called) class OSCompatibilityTestCase(test.TestCase): def _test_service_launcher(self, fake_os): # Note(lpetrut): The cinder-volume service needs to be spawned # differently on Windows due to an eventlet bug. For this reason, # we must check the process launcher used. fake_process_launcher = mock.MagicMock() with mock.patch('os.name', fake_os): with mock.patch('cinder.service.process_launcher', fake_process_launcher): launcher = service.get_launcher() if fake_os == 'nt': self.assertEqual(service.Launcher, type(launcher)) else: self.assertEqual(fake_process_launcher(), launcher) def test_process_launcher_on_windows(self): self._test_service_launcher('nt') def test_process_launcher_on_linux(self): self._test_service_launcher('posix') cinder-8.0.0/cinder/tests/unit/test_backup_google.py0000664000567000056710000005632012701406250023726 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Google Backup code. """ import bz2 import filecmp import hashlib import os import shutil import tempfile import zlib import mock from oslo_utils import units from cinder.backup.drivers import google as google_dr from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import test from cinder.tests.unit.backup import fake_google_client from cinder.tests.unit.backup import fake_google_client2 from cinder.tests.unit import fake_constants as fake class FakeMD5(object): def __init__(self, *args, **kwargs): pass @classmethod def digest(self): return 'gcscindermd5' @classmethod def hexdigest(self): return 'gcscindermd5' class FakeObjectName(object): @classmethod def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup.id) volume = 'volume_%s' % (backup.volume_id) prefix = volume + '_' + backup_name return prefix def gcs_client(func): @mock.patch.object(google_dr.client, 'GoogleCredentials', fake_google_client.FakeGoogleCredentials) @mock.patch.object(google_dr.discovery, 'build', fake_google_client.FakeGoogleDiscovery.Build) @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', fake_google_client.FakeGoogleMediaIoBaseDownload) @mock.patch.object(hashlib, 'md5', FakeMD5) def func_wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return func_wrapper def gcs_client2(func): @mock.patch.object(google_dr.client, 'GoogleCredentials', fake_google_client2.FakeGoogleCredentials) @mock.patch.object(google_dr.discovery, 'build', fake_google_client2.FakeGoogleDiscovery.Build) @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', fake_google_client2.FakeGoogleMediaIoBaseDownload) @mock.patch.object(google_dr.GoogleBackupDriver, '_generate_object_name_prefix', FakeObjectName._fake_generate_object_name_prefix) @mock.patch.object(hashlib, 'md5', FakeMD5) def func_wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return func_wrapper def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(message=_('fake')) def fake_delete(self, backup): raise exception.BackupOperationError() def _fake_delete_object(self, bucket_name, object_name): raise AssertionError('delete_object method should not be called.') class GoogleBackupDriverTestCase(test.TestCase): """Test Case for Google""" _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container=google_dr.CONF.backup_gcs_bucket, parent_id=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) kwargs = {'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'service_metadata': service_metadata, } backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def setUp(self): super(GoogleBackupDriverTestCase, self).setUp() self.flags(backup_gcs_bucket='gcscinderbucket') self.flags(backup_gcs_credential_file='test-file') self.flags(backup_gcs_project_id='test-gcs') self.ctxt = context.get_admin_context() self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) for _i in range(0, 64): self.volume_file.write(os.urandom(units.Ki)) @gcs_client def test_backup(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'test-bucket' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) result = service.backup(backup, self.volume_file) self.assertIsNone(result) @gcs_client def test_backup_uncompressed(self): volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @gcs_client def test_backup_bz2(self): volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @gcs_client def test_backup_zlib(self): volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @gcs_client def test_backup_default_container(self): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual('gcscinderbucket', backup.container) @gcs_client @mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. google_dr.CONF.set_override("backup_object_number_per_notification", 1) google_dr.CONF.set_override("backup_gcs_enable_progress_timer", False) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() google_dr.CONF.set_override("backup_object_number_per_notification", 10) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() google_dr.CONF.set_override("backup_object_number_per_notification", 10) google_dr.CONF.set_override("backup_gcs_enable_progress_timer", True) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) @gcs_client def test_backup_custom_container(self): volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' container_name = 'fake99' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) @gcs_client2 def test_backup_shafile(self): volume_id = '6465dad4-22af-48f7-8a1a-000000218907' container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(64 * units.Ki / content1['chunk_size'], len(content1['sha256s'])) @gcs_client2 def test_backup_cmp_shafiles(self): volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) # Compare shas from both files content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) @gcs_client2 def test_backup_delta_two_objects_change(self): volume_id = '30dab288-265a-4583-9abe-000000d42c67' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents self.volume_file.seek(2 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(4 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 32 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) @gcs_client2 def test_backup_delta_two_blocks_in_object_change(self): volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) # Verify that two shas are changed at index 16 and 20 content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) @gcs_client def test_create_backup_fail(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'gcs_api_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(exception.GCSApiFailure, service.backup, backup, self.volume_file) @gcs_client def test_create_backup_fail2(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'gcs_oauth2_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(exception.GCSOAuth2Failure, service.backup, backup, self.volume_file) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', fake_backup_metadata) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = '020d9142-339c-4876-a445-000000f1520c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', fake_backup_metadata) @mock.patch.object(google_dr.GoogleBackupDriver, 'delete', fake_delete) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete(). """ volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) @gcs_client def test_restore(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' backup = self._create_backup_db_entry(volume_id=volume_id) service = google_dr.GoogleBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: service.restore(backup, volume_id, volume_file) @gcs_client def test_restore_fail(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' container_name = 'gcs_connection_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: self.assertRaises(exception.GCSConnectionFailure, service.restore, backup, volume_id, volume_file) @gcs_client2 def test_restore_delta(self): volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) self.volume_file.seek(0) service2 = google_dr.GoogleBackupDriver(self.ctxt) service2.backup(deltabackup, self.volume_file, True) with tempfile.NamedTemporaryFile() as restored_file: service2.restore(deltabackup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) @gcs_client def test_delete(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' object_prefix = 'test_prefix' backup = self._create_backup_db_entry(volume_id=volume_id, service_metadata=object_prefix) service = google_dr.GoogleBackupDriver(self.ctxt) service.delete(backup) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, 'delete_object', _fake_delete_object) def test_delete_without_object_prefix(self): volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' backup = self._create_backup_db_entry(volume_id=volume_id) service = google_dr.GoogleBackupDriver(self.ctxt) service.delete(backup) @gcs_client def test_get_compressor(self): service = google_dr.GoogleBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(zlib, compressor) compressor = service._get_compressor('bz2') self.assertEqual(bz2, compressor) self.assertRaises(ValueError, service._get_compressor, 'fake') @gcs_client def test_prepare_output_data_effective_compression(self): service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertTrue(len(result) < len(fake_data)) @gcs_client def test_prepare_output_data_no_compresssion(self): self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) @gcs_client def test_prepare_output_data_ineffective_compression(self): service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) cinder-8.0.0/cinder/tests/unit/test_scality.py0000664000567000056710000003766212701406250022605 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Scality # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the Scality SOFS Volume Driver. """ import errno import os import mock from oslo_utils import imageutils from six.moves import urllib from cinder import context from cinder import exception from cinder import test from cinder.volume import configuration as conf import cinder.volume.drivers.scality as driver _FAKE_VOLUME = {'name': 'volume-a79d463e-1fd5-11e5-a6ff-5b81bfee8544', 'id': 'a79d463e-1fd5-11e5-a6ff-5b81bfee8544', 'provider_location': 'fake_share'} _FAKE_SNAPSHOT = {'id': 'ae3d6da2-1fd5-11e5-967f-1b8cf3b401ab', 'volume': _FAKE_VOLUME, 'status': 'available', 'provider_location': None, 'volume_size': 1, 'name': 'snapshot-ae3d6da2-1fd5-11e5-967f-1b8cf3b401ab'} _FAKE_BACKUP = {'id': '914849d2-2585-11e5-be54-d70ca0c343d6', 'volume_id': _FAKE_VOLUME['id']} _FAKE_MNT_POINT = '/tmp' _FAKE_SOFS_CONFIG = '/etc/sfused.conf' _FAKE_VOLUME_DIR = 'cinder/volumes' _FAKE_VOL_BASEDIR = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_DIR, '00') _FAKE_VOL_PATH = os.path.join(_FAKE_VOL_BASEDIR, _FAKE_VOLUME['name']) _FAKE_SNAP_PATH = os.path.join(_FAKE_VOL_BASEDIR, _FAKE_SNAPSHOT['name']) _FAKE_MOUNTS_TABLE = [['tmpfs /dev/shm\n'], ['fuse ' + _FAKE_MNT_POINT + '\n']] class ScalityDriverTestCase(test.TestCase): """Test case for the Scality driver.""" def setUp(self): super(ScalityDriverTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.scality_sofs_mount_point = _FAKE_MNT_POINT self.cfg.scality_sofs_config = _FAKE_SOFS_CONFIG self.cfg.scality_sofs_volume_dir = _FAKE_VOLUME_DIR self.drv = driver.ScalityDriver(configuration=self.cfg) self.drv.db = mock.Mock() @mock.patch.object(driver.urllib.request, 'urlopen') @mock.patch('os.access') def test_check_for_setup_error(self, mock_os_access, mock_urlopen): self.drv.check_for_setup_error() mock_urlopen.assert_called_once_with('file://%s' % _FAKE_SOFS_CONFIG, timeout=5) mock_os_access.assert_called_once_with('/sbin/mount.sofs', os.X_OK) def test_check_for_setup_error_with_no_sofs_config(self): self.cfg.scality_sofs_config = '' self.drv = driver.ScalityDriver(configuration=self.cfg) self.assertRaises(exception.VolumeBackendAPIException, self.drv.check_for_setup_error) exec_patcher = mock.patch.object(self.drv, '_execute', mock.MagicMock()) exec_patcher.start() self.addCleanup(exec_patcher.stop) @mock.patch.object(driver.urllib.request, 'urlopen') def test_check_for_setup_error_with_urlerror(self, mock_urlopen): # Add a Unicode char to be sure that the exception is properly # handled even if it contains Unicode chars mock_urlopen.side_effect = urllib.error.URLError(u'\u9535') self.assertRaises(exception.VolumeBackendAPIException, self.drv.check_for_setup_error) @mock.patch.object(driver.urllib.request, 'urlopen') def test_check_for_setup_error_with_httperror(self, mock_urlopen): mock_urlopen.side_effect = urllib.error.HTTPError(*[None] * 5) self.assertRaises(exception.VolumeBackendAPIException, self.drv.check_for_setup_error) @mock.patch.object(driver.urllib.request, 'urlopen', mock.Mock()) @mock.patch('os.access') def test_check_for_setup_error_with_no_mountsofs(self, mock_os_access): mock_os_access.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.drv.check_for_setup_error) mock_os_access.assert_called_once_with('/sbin/mount.sofs', os.X_OK) def test_load_shares_config(self): self.assertEqual({}, self.drv.shares) self.drv._load_shares_config() self.assertEqual({_FAKE_VOLUME_DIR: None}, self.drv.shares) def test_get_mount_point_for_share(self): self.assertEqual(_FAKE_VOL_BASEDIR, self.drv._get_mount_point_for_share()) @mock.patch("cinder.volume.utils.read_proc_mounts") @mock.patch("oslo_concurrency.processutils.execute") def test_ensure_share_mounted_when_mount_failed(self, mock_execute, mock_read_proc_mounts): mock_read_proc_mounts.return_value = ['tmpfs /dev/shm\n'] self.assertRaises(exception.VolumeBackendAPIException, self.drv._ensure_share_mounted) self.assertEqual(2, mock_read_proc_mounts.call_count) self.assertEqual(1, mock_execute.call_count) @mock.patch("cinder.volume.utils.read_proc_mounts") @mock.patch("oslo_concurrency.processutils.execute") @mock.patch("oslo_utils.fileutils.ensure_tree") @mock.patch("os.symlink") def test_ensure_shares_mounted(self, mock_symlink, mock_ensure_tree, mock_execute, mock_read_proc_mounts): self.assertEqual([], self.drv._mounted_shares) mock_read_proc_mounts.side_effect = _FAKE_MOUNTS_TABLE self.drv._ensure_shares_mounted() self.assertEqual([_FAKE_VOLUME_DIR], self.drv._mounted_shares) self.assertEqual(2, mock_read_proc_mounts.call_count) mock_symlink.assert_called_once_with('.', _FAKE_VOL_BASEDIR) self.assertEqual(2, mock_ensure_tree.call_count) self.assertEqual(1, mock_execute.call_count) expected_args = ('mount', '-t', 'sofs', _FAKE_SOFS_CONFIG, _FAKE_MNT_POINT) self.assertEqual(expected_args, mock_execute.call_args[0]) @mock.patch("cinder.volume.utils.read_proc_mounts") @mock.patch("oslo_concurrency.processutils.execute") @mock.patch("oslo_utils.fileutils.ensure_tree", mock.Mock()) @mock.patch("os.symlink", mock.Mock()) def test_ensure_shares_mounted_when_sofs_mounted(self, mock_execute, mock_read_proc_mounts): mock_read_proc_mounts.return_value = _FAKE_MOUNTS_TABLE[1] self.drv._ensure_shares_mounted() # Because SOFS is mounted from the beginning, we shouldn't read # /proc/mounts more than once. mock_read_proc_mounts.assert_called_once_with() self.assertFalse(mock_execute.called) def test_find_share_when_no_shares_mounted(self): self.assertRaises(exception.RemoteFSNoSharesMounted, self.drv._find_share, 'ignored') @mock.patch("cinder.volume.utils.read_proc_mounts") @mock.patch("oslo_concurrency.processutils.execute") @mock.patch("oslo_utils.fileutils.ensure_tree") @mock.patch("os.symlink") def test_find_share(self, mock_symlink, mock_ensure_tree, mock_execute, mock_read_proc_mounts): mock_read_proc_mounts.side_effect = _FAKE_MOUNTS_TABLE self.drv._ensure_shares_mounted() self.assertEqual(_FAKE_VOLUME_DIR, self.drv._find_share('ignored')) self.assertEqual(2, mock_read_proc_mounts.call_count) self.assertEqual(1, mock_execute.call_count) expected_args = ('mount', '-t', 'sofs', _FAKE_SOFS_CONFIG, _FAKE_MNT_POINT) self.assertEqual(expected_args, mock_execute.call_args[0]) mock_symlink.assert_called_once_with('.', _FAKE_VOL_BASEDIR) self.assertEqual(mock_ensure_tree.call_args_list, [mock.call(_FAKE_MNT_POINT), mock.call(os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_DIR))]) def test_get_volume_stats(self): with mock.patch.object(self.cfg, 'safe_get') as mock_safe_get: mock_safe_get.return_value = 'fake_backend_name' stats = self.drv.get_volume_stats() self.assertEqual(self.drv.VERSION, stats['driver_version']) self.assertEqual(mock_safe_get.return_value, stats['volume_backend_name']) mock_safe_get.assert_called_once_with('volume_backend_name') @mock.patch("cinder.image.image_utils.qemu_img_info") def test_initialize_connection(self, mock_qemu_img_info): info = imageutils.QemuImgInfo() info.file_format = 'raw' info.image = _FAKE_VOLUME['name'] mock_qemu_img_info.return_value = info with mock.patch.object(self.drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info: mock_get_active_image_from_info.return_value = _FAKE_VOLUME['name'] conn_info = self.drv.initialize_connection(_FAKE_VOLUME, None) expected_conn_info = { 'driver_volume_type': driver.ScalityDriver.driver_volume_type, 'mount_point_base': _FAKE_MNT_POINT, 'data': { 'export': _FAKE_VOLUME['provider_location'], 'name': _FAKE_VOLUME['name'], 'sofs_path': 'cinder/volumes/00/' + _FAKE_VOLUME['name'], 'format': 'raw' } } self.assertEqual(expected_conn_info, conn_info) mock_get_active_image_from_info.assert_called_once_with(_FAKE_VOLUME) mock_qemu_img_info.assert_called_once_with(_FAKE_VOL_PATH) @mock.patch("cinder.image.image_utils.resize_image") @mock.patch("cinder.image.image_utils.qemu_img_info") def test_extend_volume(self, mock_qemu_img_info, mock_resize_image): info = imageutils.QemuImgInfo() info.file_format = 'raw' mock_qemu_img_info.return_value = info self.drv.extend_volume(_FAKE_VOLUME, 2) mock_qemu_img_info.assert_called_once_with(_FAKE_VOL_PATH) mock_resize_image.assert_called_once_with(_FAKE_VOL_PATH, 2) @mock.patch("cinder.image.image_utils.qemu_img_info") def test_extend_volume_with_invalid_format(self, mock_qemu_img_info): info = imageutils.QemuImgInfo() info.file_format = 'vmdk' mock_qemu_img_info.return_value = info self.assertRaises(exception.InvalidVolume, self.drv.extend_volume, _FAKE_VOLUME, 2) @mock.patch("cinder.image.image_utils.resize_image") @mock.patch("cinder.image.image_utils.convert_image") def test_copy_volume_from_snapshot_with_ioerror(self, mock_convert_image, mock_resize_image): with mock.patch.object(self.drv, '_read_info_file') as \ mock_read_info_file, \ mock.patch.object(self.drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions: mock_read_info_file.side_effect = IOError(errno.ENOENT, '') self.drv._copy_volume_from_snapshot(_FAKE_SNAPSHOT, _FAKE_VOLUME, 1) mock_read_info_file.assert_called_once_with("%s.info" % _FAKE_VOL_PATH) mock_convert_image.assert_called_once_with(_FAKE_SNAP_PATH, _FAKE_VOL_PATH, 'raw', run_as_root=True) mock_set_rw_permissions.assert_called_once_with(_FAKE_VOL_PATH) mock_resize_image.assert_called_once_with(_FAKE_VOL_PATH, 1) @mock.patch("cinder.image.image_utils.resize_image") @mock.patch("cinder.image.image_utils.convert_image") @mock.patch("cinder.image.image_utils.qemu_img_info") def test_copy_volume_from_snapshot(self, mock_qemu_img_info, mock_convert_image, mock_resize_image): new_volume = {'name': 'volume-3fa63b02-1fe5-11e5-b492-abf97a8fb23b', 'id': '3fa63b02-1fe5-11e5-b492-abf97a8fb23b', 'provider_location': 'fake_share'} new_vol_path = os.path.join(_FAKE_VOL_BASEDIR, new_volume['name']) info = imageutils.QemuImgInfo() info.file_format = 'raw' info.backing_file = _FAKE_VOL_PATH mock_qemu_img_info.return_value = info with mock.patch.object(self.drv, '_read_info_file') as \ mock_read_info_file, \ mock.patch.object(self.drv, '_set_rw_permissions_for_all') as \ mock_set_rw_permissions: self.drv._copy_volume_from_snapshot(_FAKE_SNAPSHOT, new_volume, 1) mock_read_info_file.assert_called_once_with("%s.info" % _FAKE_VOL_PATH) mock_convert_image.assert_called_once_with(_FAKE_VOL_PATH, new_vol_path, 'raw', run_as_root=True) mock_set_rw_permissions.assert_called_once_with(new_vol_path) mock_resize_image.assert_called_once_with(new_vol_path, 1) @mock.patch("cinder.image.image_utils.qemu_img_info") @mock.patch("cinder.utils.temporary_chown") @mock.patch("six.moves.builtins.open") def test_backup_volume(self, mock_open, mock_temporary_chown, mock_qemu_img_info): """Backup a volume with no snapshots.""" info = imageutils.QemuImgInfo() info.file_format = 'raw' mock_qemu_img_info.return_value = info backup = {'volume_id': _FAKE_VOLUME['id']} mock_backup_service = mock.MagicMock() self.drv.db.volume_get.return_value = _FAKE_VOLUME self.drv.backup_volume(context, backup, mock_backup_service) mock_qemu_img_info.assert_called_once_with(_FAKE_VOL_PATH) mock_temporary_chown.assert_called_once_with(_FAKE_VOL_PATH) mock_open.assert_called_once_with(_FAKE_VOL_PATH) mock_backup_service.backup.assert_called_once_with( backup, mock_open().__enter__()) @mock.patch("cinder.image.image_utils.qemu_img_info") def test_backup_volume_with_non_raw_volume(self, mock_qemu_img_info): info = imageutils.QemuImgInfo() info.file_format = 'qcow2' mock_qemu_img_info.return_value = info self.drv.db.volume_get.return_value = _FAKE_VOLUME self.assertRaises(exception.InvalidVolume, self.drv.backup_volume, context, _FAKE_BACKUP, mock.MagicMock()) mock_qemu_img_info.assert_called_once_with(_FAKE_VOL_PATH) @mock.patch("cinder.image.image_utils.qemu_img_info") def test_backup_volume_with_backing_file(self, mock_qemu_img_info): info = imageutils.QemuImgInfo() info.file_format = 'raw' info.backing_file = 'fake.img' mock_qemu_img_info.return_value = info backup = {'volume_id': _FAKE_VOLUME['id']} self.drv.db.volume_get.return_value = _FAKE_VOLUME self.assertRaises(exception.InvalidVolume, self.drv.backup_volume, context, backup, mock.MagicMock()) mock_qemu_img_info.assert_called_once_with(_FAKE_VOL_PATH) @mock.patch("cinder.utils.temporary_chown") @mock.patch("six.moves.builtins.open") def test_restore_bakup(self, mock_open, mock_temporary_chown): mock_backup_service = mock.MagicMock() self.drv.restore_backup(context, _FAKE_BACKUP, _FAKE_VOLUME, mock_backup_service) mock_temporary_chown.assert_called_once_with(_FAKE_VOL_PATH) mock_open.assert_called_once_with(_FAKE_VOL_PATH, 'wb') mock_backup_service.restore.assert_called_once_with( _FAKE_BACKUP, _FAKE_VOLUME['id'], mock_open().__enter__()) cinder-8.0.0/cinder/tests/unit/fake_service.py0000664000567000056710000000337312701406250022514 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from oslo_versionedobjects import fields from cinder import objects def fake_db_service(**updates): NOW = timeutils.utcnow().replace(microsecond=0) db_service = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'fake-service', 'topic': 'fake-service-topic', 'report_count': 1, 'disabled': False, 'disabled_reason': None, 'modified_at': NOW, } for name, field in objects.Service.fields.items(): if name in db_service: continue if field.nullable: db_service[name] = None elif field.default != fields.UnspecifiedDefault: db_service[name] = field.default else: raise Exception('fake_db_service needs help with %s.' % name) if updates: db_service.update(updates) return db_service def fake_service_obj(context, **updates): return objects.Service._from_db_object(context, objects.Service(), fake_db_service(**updates)) cinder-8.0.0/cinder/tests/unit/test_conf.py0000664000567000056710000000571712701406250022056 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinder import test CONF = cfg.CONF CONF.register_opt(cfg.StrOpt('conf_unittest', default='foo', help='for testing purposes only')) class ConfigTestCase(test.TestCase): def setUp(self): super(ConfigTestCase, self).setUp() def test_declare(self): self.assertNotIn('answer', CONF) CONF.import_opt('answer', 'cinder.tests.unit.declare_conf') self.assertIn('answer', CONF) self.assertEqual(42, CONF.answer) # Make sure we don't overwrite anything CONF.set_override('answer', 256) self.assertEqual(256, CONF.answer) CONF.import_opt('answer', 'cinder.tests.unit.declare_conf') self.assertEqual(256, CONF.answer) def test_runtime_and_unknown_conf(self): self.assertNotIn('runtime_answer', CONF) import cinder.tests.unit.runtime_conf # noqa self.assertIn('runtime_answer', CONF) self.assertEqual(54, CONF.runtime_answer) def test_long_vs_short_conf(self): CONF.clear() CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) CONF.register_cli_opt(cfg.IntOpt('duplicate_answer', default=50, help='desc')) argv = ['--duplicate_answer=60'] CONF(argv, default_config_files=[]) self.assertEqual(60, CONF.duplicate_answer) self.assertEqual('val', CONF.duplicate_answer_long) def test_conf_leak_left(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_conf_leak_right(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_conf_overrides(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) CONF.reset() self.assertEqual('foo', CONF.conf_unittest) cinder-8.0.0/cinder/tests/unit/test_cloudbyte.py0000664000567000056710000014754312701406250023127 0ustar jenkinsjenkins00000000000000# Copyright 2015 CloudByte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test class for cloudbyte's cinder driver. This involves mocking of elasticenter's json responses when a method of this driver is unit tested. """ import json import mock import testtools from testtools import matchers from cinder import context from cinder import exception from cinder.volume import configuration as conf from cinder.volume.drivers.cloudbyte import cloudbyte from cinder.volume import qos_specs from cinder.volume import volume_types # A fake list account response of cloudbyte's elasticenter FAKE_LIST_ACCOUNT_RESPONSE = """{ "listAccountResponse" : { "count":1 , "account" : [{ "id": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0", "name": "CustomerA", "simpleid": 1, "description": "None", "iqnname": "iqn.2014-05.cvsacc1", "availIOPS": 508, "totaliops": 2000, "usedIOPS": 1492, "volumes": [], "storageBuckets": [], "tsms": [], "qosgroups": [], "filesystemslist": [], "currentUsedSpace": 53179, "currentAvailableSpace": 1249349, "currentThroughput": 156, "currentIOPS": 33, "currentLatency": 0, "currentThrottle": 0, "numericquota": 3145728.0, "currentnumericquota": 1253376.0, "currentavailablequota": 1892352.0, "revisionnumber": 1 }] }}""" # A fake list tsm response of cloudbyte's elasticenter FAKE_LIST_TSM_RESPONSE = """{ "listTsmResponse" : { "count":1 , "listTsm" : [{ "id": "955eaf34-4221-3a77-82d0-99113b126fa8", "simpleid": 2, "name": "openstack", "ipaddress": "172.16.50.40", "accountname": "CustomerA", "sitename": "BLR", "clustername": "HAGrp1", "controllerName": "Controller", "controlleripaddress": "172.16.50.6", "clusterstatus": "Online", "hapoolstatus": "ONLINE", "hapoolname": "pool", "hapoolavailiops": 1700, "hapoolgrace": true, "hapoolavailtput": 6800, "poollatency": 10, "accountid": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0", "controllerid": "8c2f7084-99c0-36e6-9cb7-205e3ba4c813", "poolid": "adcbef8f-2193-3f2c-9bb1-fcaf977ae0fc", "datasetid": "87a23025-f2b2-39e9-85ac-9cda15bfed1a", "storageBuckets": [], "currentUsedSpace": 16384, "currentAvailableSpace": 188416, "currentTotalSpace": 204800, "currentThroughput": 12, "tpcontrol": "true", "currentIOPS": 0, "iopscontrol": "true", "gracecontrol": "false", "currentLatency": 0, "currentThrottle": 0, "iops": "1000", "availIOPS": "500", "availThroughput": "2000", "usedIOPS": "500", "usedThroughput": "2000", "throughput": "4000", "latency": "15", "graceallowed": true, "numericquota": 1048576.0, "currentnumericquota": 204800.0, "availablequota": 843776.0, "blocksize": "4", "type": "1", "iqnname": "iqn.2014-05.cvsacc1.openstack", "interfaceName": "em0", "revisionnumber": 0, "status": "Online", "subnet": "16", "managedstate": "Available", "configurationstate": "sync", "offlinenodes": "", "pooltakeover": "noTakeOver", "totalprovisionquota": "536576", "haNodeStatus": "Available", "ispooltakeoveronpartialfailure": true, "filesystemslist": [], "volumes": [], "qosgrouplist": [] }] }}""" # A fake add QOS group response of cloudbyte's elasticenter FAKE_ADD_QOS_GROUP_RESPONSE = """{ "addqosgroupresponse" : { "qosgroup" : { "id": "d73662ac-6db8-3b2c-981a-012af4e2f7bd", "name": "QoS_DS1acc1openstacktsm", "tsmid": "8146146e-f67b-3942-8074-3074599207a4", "controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86", "poolid": "73b567c0-e57d-37b5-b765-9d70725f59af", "parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9", "tsmName": "openstacktsm", "offlinenodes": "", "sitename": "site1", "clustername": "HA1", "controllerName": "node1", "clusterstatus": "Online", "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "iopsvalue": "(0/100)", "throughputvalue": "(0/400)", "iops": "100", "iopscontrol": "true", "throughput": "400", "tpcontrol": "true", "blocksize": "4k", "latency": "15", "graceallowed": false, "type": "1", "revisionnumber": 0, "managedstate": "Available", "configurationstate": "init", "standardproviops": 0, "operatingblocksize": 0, "operatingcachehit": 0, "operatingiops": 0, "standardoperatingiops": 0 } }}""" # A fake create volume response of cloudbyte's elasticenter FAKE_CREATE_VOLUME_RESPONSE = """{ "createvolumeresponse" : { "jobid": "f94e2257-9515-4a44-add0-4b16cb1bcf67" }}""" # A fake query async job response of cloudbyte's elasticenter FAKE_QUERY_ASYNC_JOB_RESULT_RESPONSE = """{ "queryasyncjobresultresponse" : { "accountid": "e8aca633-7bce-4ab7-915a-6d8847248467", "userid": "a83d1030-1b85-40f7-9479-f40e4dbdd5d5", "cmd": "com.cloudbyte.api.commands.CreateVolumeCmd", "msg": "5", "jobstatus": 1, "jobprocstatus": 0, "jobresultcode": 0, "jobresulttype": "object", "jobresult": { "storage": { "id": "92cfd601-bc1f-3fa7-8322-c492099f3326", "name": "DS1", "simpleid": 20, "compression": "off", "sync": "always", "noofcopies": 1, "recordsize": "4k", "deduplication": "off", "quota": "10G", "path": "devpool1/acc1openstacktsm/DS1", "tsmid": "8146146e-f67b-3942-8074-3074599207a4", "poolid": "73b567c0-e57d-37b5-b765-9d70725f59af", "mountpoint": "acc1DS1", "currentUsedSpace": 0, "currentAvailableSpace": 0, "currentTotalSpace": 0, "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "tsmName": "openstacktsm", "hapoolname": "devpool1", "revisionnumber": 0, "blocklength": "512B", "nfsenabled": false, "cifsenabled": false, "iscsienabled": true, "fcenabled": false } }, "created": "2014-06-16 15:49:49", "jobid": "f94e2257-9515-4a44-add0-4b16cb1bcf67" }}""" # A fake list filesystem response of cloudbyte's elasticenter FAKE_LIST_FILE_SYSTEM_RESPONSE = """{ "listFilesystemResponse" : { "count":1 , "filesystem" : [{ "id": "c93df32e-3a99-3491-8e10-cf318a7f9b7f", "name": "c93df32e3a9934918e10cf318a7f9b7f", "simpleid": 34, "type": "filesystem", "revisionnumber": 1, "path": "/cvsacc1DS1", "clusterid": "8b404f12-7975-4e4e-8549-7abeba397fc9", "clusterstatus": "Online", "Tsmid": "955eaf34-4221-3a77-82d0-99113b126fa8", "tsmType": "1", "accountid": "d13a4e9e-0c05-4d2d-8a5e-5efd3ef058e0", "poolid": "adcbef8f-2193-3f2c-9bb1-fcaf977ae0fc", "controllerid": "8c2f7084-99c0-36e6-9cb7-205e3ba4c813", "groupid": "663923c9-084b-3778-b13d-72f23d046b8d", "parentid": "08de7c14-62af-3992-8407-28f5f053e59b", "compression": "off", "sync": "always", "noofcopies": 1, "recordsize": "4k", "deduplication": "off", "quota": "1T", "unicode": "off", "casesensitivity": "sensitive", "readonly": false, "nfsenabled": true, "cifsenabled": false, "iscsienabled": false, "fcenabled": false, "currentUsedSpace": 19968, "currentAvailableSpace": 1028608, "currentTotalSpace": 1048576, "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "numericquota": 1048576.0, "status": "Online", "managedstate": "Available", "configurationstate": "sync", "tsmName": "cvstsm1", "ipaddress": "172.16.50.35", "sitename": "BLR", "clustername": "HAGrp1", "controllerName": "Controller", "hapoolname": "pool", "hapoolgrace": true, "tsmgrace": true, "tsmcontrolgrace": "false", "accountname": "CustomerA", "groupname": "QoS_DS1cvsacc1cvstsm1", "iops": "500", "blocksize": "4", "throughput": "2000", "latency": "15", "graceallowed": false, "offlinenodes": "", "tpcontrol": "true", "iopscontrol": "true", "tsmAvailIops": "8", "tsmAvailTput": "32", "iqnname": "", "mountpoint": "cvsacc1DS1", "pooltakeover": "noTakeOver", "volumeaccessible": "true", "localschedulecount": 0 }] }}""" # A fake list storage snapshot response of cloudbyte's elasticenter FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE = """{ "listDatasetSnapshotsResponse" : { "count":1 , "snapshot" : [{ "name": "snap_c60890b1f23646f29e6d51e6e592cee6", "path": "DS1@snap_c60890b1f23646f29e6d51e6e592cee6", "availMem": "-", "usedMem": "0", "refer": "26K", "mountpoint": "-", "timestamp": "Mon Jun 16 2014 14:41", "clones": 0, "pooltakeover": "noTakeOver", "managedstate": "Available" }] }}""" # A fake delete storage snapshot response of cloudbyte's elasticenter FAKE_DELETE_STORAGE_SNAPSHOT_RESPONSE = """{ "deleteSnapshotResponse" : { "DeleteSnapshot" : { "status": "success" } }}""" # A fake update volume iscsi service response of cloudbyte's elasticenter FAKE_UPDATE_VOLUME_ISCSI_SERVICE_RESPONSE = ( """{ "updatingvolumeiscsidetails" : { "viscsioptions" : { "id": "0426c04a-8fac-30e8-a8ad-ddab2f08013a", "volume_id": "12371e7c-392b-34b9-ac43-073b3c85f1d1", "ag_id": "4459248d-e9f1-3d2a-b7e8-b5d9ce587fc1", "ig_id": "527bd65b-ebec-39ce-a5e9-9dd1106cc0fc", "iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1", "authmethod": "None", "status": true, "usn": "12371e7c392b34b9ac43073b3c85f1d1", "initialdigest": "Auto", "queuedepth": "32", "inqproduct": 0, "inqrevision": 0, "blocklength": "512B" }} }""") # A fake list iscsi initiator response of cloudbyte's elasticenter FAKE_LIST_ISCSI_INITIATOR_RESPONSE = """{ "listInitiatorsResponse" : { "count":2 , "initiator" : [{ "id": "527bd65b-ebec-39ce-a5e9-9dd1106cc0fc", "accountid": "86c5251a-9044-4690-b924-0d97627aeb8c", "name": "ALL", "netmask": "ALL", "initiatorgroup": "ALL" },{ "id": "203e0235-1d5a-3130-9204-98e3f642a564", "accountid": "86c5251a-9044-4690-b924-0d97627aeb8c", "name": "None", "netmask": "None", "initiatorgroup": "None" }] }}""" # A fake delete file system response of cloudbyte's elasticenter FAKE_DELETE_FILE_SYSTEM_RESPONSE = """{ "deleteFileSystemResponse" : { "jobid": "e1fe861a-17e3-41b5-ae7c-937caac62cdf" }}""" # A fake create storage snapshot response of cloudbyte's elasticenter FAKE_CREATE_STORAGE_SNAPSHOT_RESPONSE = ( """{ "createStorageSnapshotResponse" : { "StorageSnapshot" : { "id": "21d7a92a-f15e-3f5b-b981-cb30697b8028", "name": "snap_c60890b1f23646f29e6d51e6e592cee6", "usn": "21d7a92af15e3f5bb981cb30697b8028", "lunusn": "12371e7c392b34b9ac43073b3c85f1d1", "lunid": "12371e7c-392b-34b9-ac43-073b3c85f1d1", "scsiEnabled": false }} }""") # A fake list volume iscsi service response of cloudbyte's elasticenter FAKE_LIST_VOLUME_ISCSI_SERVICE_RESPONSE = ( """{ "listVolumeiSCSIServiceResponse" : { "count":1 , "iSCSIService" : [{ "id": "67ddcbf4-6887-3ced-8695-7b9cdffce885", "volume_id": "c93df32e-3a99-3491-8e10-cf318a7f9b7f", "ag_id": "4459248d-e9f1-3d2a-b7e8-b5d9ce587fc1", "ig_id": "203e0235-1d5a-3130-9204-98e3f642a564", "iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1", "authmethod": "None", "status": true, "usn": "92cfd601bc1f3fa78322c492099f3326", "initialdigest": "Auto", "queuedepth": "32", "inqproduct": 0, "inqrevision": 0, "blocklength": "512B" }] }}""") # A fake clone dataset snapshot response of cloudbyte's elasticenter FAKE_CLONE_DATASET_SNAPSHOT_RESPONSE = """{ "cloneDatasetSnapshot" : { "filesystem" : { "id": "dcd46a57-e3f4-3fc1-8dd8-2e658d9ebb11", "name": "DS1Snap1clone1", "simpleid": 21, "type": "volume", "revisionnumber": 1, "path": "iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1", "clusterid": "0ff44329-9a69-4611-bac2-6eaf1b08bb18", "clusterstatus": "Online", "Tsmid": "8146146e-f67b-3942-8074-3074599207a4", "tsmType": "1", "accountid": "86c5251a-9044-4690-b924-0d97627aeb8c", "poolid": "73b567c0-e57d-37b5-b765-9d70725f59af", "controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86", "groupid": "d73662ac-6db8-3b2c-981a-012af4e2f7bd", "parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9", "compression": "off", "sync": "always", "noofcopies": 1, "recordsize": "4k", "deduplication": "off", "quota": "10G", "unicode": "off", "casesensitivity": "sensitive", "readonly": false, "nfsenabled": false, "cifsenabled": false, "iscsienabled": true, "fcenabled": false, "currentUsedSpace": 0, "currentAvailableSpace": 10240, "currentTotalSpace": 10240, "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "numericquota": 10240.0, "status": "Online", "managedstate": "Available", "configurationstate": "sync", "tsmName": "openstacktsm", "ipaddress": "20.10.22.56", "sitename": "site1", "clustername": "HA1", "controllerName": "node1", "hapoolname": "devpool1", "hapoolgrace": true, "tsmgrace": true, "tsmcontrolgrace": "false", "accountname": "acc1", "groupname": "QoS_DS1acc1openstacktsm", "iops": "100", "blocksize": "4k", "throughput": "400", "latency": "15", "graceallowed": false, "offlinenodes": "", "tpcontrol": "true", "iopscontrol": "true", "tsmAvailIops": "700", "tsmAvailTput": "2800", "iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1", "mountpoint": "acc1DS1Snap1clone1", "blocklength": "512B", "volumeaccessible": "true", "localschedulecount": 0 } }}""" # A fake update filesystem response of cloudbyte's elasticenter FAKE_UPDATE_FILE_SYSTEM_RESPONSE = """{ "updatefilesystemresponse" : { "count":1 , "filesystem" : [{ "id": "92cfd601-bc1f-3fa7-8322-c492099f3326", "name": "DS1", "simpleid": 20, "type": "volume", "revisionnumber": 1, "path": "iqn.2014-06.acc1.openstacktsm:acc1DS1", "clusterid": "0ff44329-9a69-4611-bac2-6eaf1b08bb18", "clusterstatus": "Online", "Tsmid": "8146146e-f67b-3942-8074-3074599207a4", "tsmType": "1", "accountid": "86c5251a-9044-4690-b924-0d97627aeb8c", "poolid": "73b567c0-e57d-37b5-b765-9d70725f59af", "controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86", "groupid": "d73662ac-6db8-3b2c-981a-012af4e2f7bd", "parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9", "compression": "off", "sync": "always", "noofcopies": 1, "recordsize": "4k", "deduplication": "off", "quota": "12G", "unicode": "off", "casesensitivity": "sensitive", "readonly": false, "nfsenabled": false, "cifsenabled": false, "iscsienabled": true, "fcenabled": false, "currentUsedSpace": 0, "currentAvailableSpace": 10240, "currentTotalSpace": 10240, "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "numericquota": 12288.0, "status": "Online", "managedstate": "Available", "configurationstate": "sync", "tsmName": "openstacktsm", "ipaddress": "20.10.22.56", "sitename": "site1", "clustername": "HA1", "controllerName": "node1", "hapoolname": "devpool1", "hapoolgrace": true, "tsmgrace": true, "tsmcontrolgrace": "false", "accountname": "acc1", "groupname": "QoS_DS1acc1openstacktsm", "iops": "100", "blocksize": "4k", "throughput": "400", "latency": "15", "graceallowed": false, "offlinenodes": "", "tpcontrol": "true", "iopscontrol": "true", "tsmAvailIops": "700", "tsmAvailTput": "2800", "iqnname": "iqn.2014-06.acc1.openstacktsm:acc1DS1", "mountpoint": "acc1DS1", "blocklength": "512B", "volumeaccessible": "true", "localschedulecount": 0 }] }}""" # A fake update QOS group response of cloudbyte's elasticenter FAKE_UPDATE_QOS_GROUP_RESPONSE = """{ "updateqosresponse" : { "count":1 , "qosgroup" : [{ "id": "d73662ac-6db8-3b2c-981a-012af4e2f7bd", "name": "QoS_DS1acc1openstacktsm", "tsmid": "8146146e-f67b-3942-8074-3074599207a4", "controllerid": "f1603e87-d1e6-3dcb-a549-7a6e77f82d86", "poolid": "73b567c0-e57d-37b5-b765-9d70725f59af", "parentid": "81ebdcbb-f73b-3337-8f32-222820e6acb9", "tsmName": "openstacktsm", "offlinenodes": "", "sitename": "site1", "clustername": "HA1", "controllerName": "node1", "clusterstatus": "Online", "currentThroughput": 0, "currentIOPS": 0, "currentLatency": 0, "currentThrottle": 0, "iopsvalue": "(0/101)", "throughputvalue": "(0/404)", "iops": "101", "iopscontrol": "true", "throughput": "404", "tpcontrol": "true", "blocksize": "4k", "latency": "15", "graceallowed": true, "type": "1", "revisionnumber": 2, "managedstate": "Available", "configurationstate": "sync", "status": "Online", "standardproviops": 0, "operatingblocksize": 0, "operatingcachehit": 0, "operatingiops": 0, "standardoperatingiops": 0 }] }}""" # A fake list iSCSI auth user response of cloudbyte's elasticenter FAKE_LIST_ISCSI_AUTH_USER_RESPONSE = """{ "listiSCSIAuthUsersResponse" : { "count":1 , "authuser" : [{ "id": "53d00164-a974-31b8-a854-bd346a8ea937", "accountid": "12d41531-c41a-4ab7-abe2-ce0db2570119", "authgroupid": "537744eb-c594-3145-85c0-96079922b894", "chapusername": "fakeauthgroupchapuser", "chappassword": "fakeauthgroupchapsecret", "mutualchapusername": "fakeauthgroupmutualchapuser", "mutualchappassword": "fakeauthgroupmutualchapsecret" }] }}""" # A fake list iSCSI auth group response of cloudbyte's elasticenter FAKE_LIST_ISCSI_AUTH_GROUP_RESPONSE = """{ "listiSCSIAuthGroupResponse" : { "count":2 , "authgroup" : [{ "id": "32d935ee-a60f-3681-b792-d8ccfe7e8e7f", "name": "None", "comment": "None" }, { "id": "537744eb-c594-3145-85c0-96079922b894", "name": "fakeauthgroup", "comment": "Fake Auth Group For Openstack " }] }}""" # This dict maps the http commands of elasticenter # with its respective fake responses MAP_COMMAND_TO_FAKE_RESPONSE = {} MAP_COMMAND_TO_FAKE_RESPONSE['deleteFileSystem'] = ( json.loads(FAKE_DELETE_FILE_SYSTEM_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["listFileSystem"] = ( json.loads(FAKE_LIST_FILE_SYSTEM_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["deleteSnapshot"] = ( json.loads(FAKE_DELETE_STORAGE_SNAPSHOT_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["updateVolumeiSCSIService"] = ( json.loads(FAKE_UPDATE_VOLUME_ISCSI_SERVICE_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["createStorageSnapshot"] = ( json.loads(FAKE_CREATE_STORAGE_SNAPSHOT_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["listAccount"] = ( json.loads(FAKE_LIST_ACCOUNT_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["listTsm"] = ( json.loads(FAKE_LIST_TSM_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["addQosGroup"] = ( json.loads(FAKE_ADD_QOS_GROUP_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["queryAsyncJobResult"] = ( json.loads(FAKE_QUERY_ASYNC_JOB_RESULT_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["createVolume"] = ( json.loads(FAKE_CREATE_VOLUME_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["listVolumeiSCSIService"] = ( json.loads(FAKE_LIST_VOLUME_ISCSI_SERVICE_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["listiSCSIInitiator"] = ( json.loads(FAKE_LIST_ISCSI_INITIATOR_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['cloneDatasetSnapshot'] = ( json.loads(FAKE_CLONE_DATASET_SNAPSHOT_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['updateFileSystem'] = ( json.loads(FAKE_UPDATE_FILE_SYSTEM_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['updateQosGroup'] = ( json.loads(FAKE_UPDATE_QOS_GROUP_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['listStorageSnapshots'] = ( json.loads(FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['listiSCSIAuthUser'] = ( json.loads(FAKE_LIST_ISCSI_AUTH_USER_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['listiSCSIAuthGroup'] = ( json.loads(FAKE_LIST_ISCSI_AUTH_GROUP_RESPONSE)) class CloudByteISCSIDriverTestCase(testtools.TestCase): def setUp(self): super(CloudByteISCSIDriverTestCase, self).setUp() self._configure_driver() self.ctxt = context.get_admin_context() def _configure_driver(self): configuration = conf.Configuration(None, None) # initialize the elasticenter iscsi driver self.driver = cloudbyte.CloudByteISCSIDriver( configuration=configuration) # override some parts of driver configuration self.driver.configuration.cb_tsm_name = 'openstack' self.driver.configuration.cb_account_name = 'CustomerA' self.driver.configuration.cb_auth_group = 'fakeauthgroup' self.driver.configuration.cb_apikey = 'G4ZUB39WH7lbiZhPhL3nbd' self.driver.configuration.san_ip = '172.16.51.30' def _side_effect_api_req(self, cmd, params, version='1.0'): """This is a side effect function. The return value is determined based on cmd argument. The signature matches exactly with the method it tries to mock. """ return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_create_vol(self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'createVolume': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_delete_file_system( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'deleteFileSystem': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_query_asyncjob_response( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'queryAsyncJobResult': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_query_asyncjob( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'queryAsyncJobResult': return {'queryasyncjobresultresponse': {'jobstatus': 0}} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_list_tsm(self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listTsm': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _none_response_to_list_tsm(self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listTsm': return {"listTsmResponse": {}} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_list_iscsi_auth_group(self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listiSCSIAuthGroup': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_list_iscsi_auth_user(self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listiSCSIAuthUser': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_enable_chap(self): """This is a side effect function.""" self.driver.cb_use_chap = True def _side_effect_disable_chap(self): """This is a side effect function.""" self.driver.cb_use_chap = False def _side_effect_api_req_to_list_filesystem( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listFileSystem': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _fake_api_req_to_list_filesystem( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listFileSystem': return {"listFilesystemResponse": {"filesystem": [{}]}} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_list_vol_iscsi_service( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listVolumeiSCSIService': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_api_req_to_list_iscsi_initiator( self, cmd, params, version='1.0'): """This is a side effect function.""" if cmd == 'listiSCSIInitiator': return {} return MAP_COMMAND_TO_FAKE_RESPONSE[cmd] def _side_effect_create_vol_from_snap(self, cloned_volume, snapshot): """This is a side effect function.""" return {} def _side_effect_create_snapshot(self, snapshot): """This is a side effect function.""" model_update = {} model_update['provider_id'] = "devpool1/acc1openstacktsm/DS1@DS1Snap1" return model_update def _side_effect_get_connection(self, host, url): """This is a side effect function.""" return_obj = {} return_obj['http_status'] = 200 # mock the response data return_obj['data'] = MAP_COMMAND_TO_FAKE_RESPONSE['listTsm'] return_obj['error'] = None return return_obj def _side_effect_get_err_connection(self, host, url): """This is a side effect function.""" return_obj = {} return_obj['http_status'] = 500 # mock the response data return_obj['data'] = None return_obj['error'] = "Http status: 500, Error: Elasticenter " "is not available." return return_obj def _side_effect_get_err_connection2(self, host, url): """This is a side effect function.""" msg = ("Error executing CloudByte API %(cmd)s , Error: %(err)s" % {'cmd': 'MockTest', 'err': 'Error'}) raise exception.VolumeBackendAPIException(msg) def _get_fake_volume_id(self): # Get the filesystems fs_list = MAP_COMMAND_TO_FAKE_RESPONSE['listFileSystem'] filesystems = fs_list['listFilesystemResponse']['filesystem'] # Get the volume id from the first filesystem volume_id = filesystems[0]['id'] return volume_id def _fake_get_volume_type(self, ctxt, type_id): fake_type = {'qos_specs_id': 'fake-id', 'extra_specs': {'qos:iops': '100000'}, 'id': 'fake-volume-type-id'} return fake_type def _fake_get_qos_spec(self, ctxt, spec_id): fake_qos_spec = {'id': 'fake-qos-spec-id', 'specs': {'iops': '1000', 'graceallowed': 'true', 'readonly': 'true'}} return fake_qos_spec @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_execute_and_get_response_details') def test_api_request_for_cloudbyte(self, mock_conn): # Test - I # configure the mocks with respective side-effects mock_conn.side_effect = self._side_effect_get_connection # run the test data = self.driver._api_request_for_cloudbyte('listTsm', {}) # assert the data attributes self.assertEqual(1, data['listTsmResponse']['count']) # Test - II # configure the mocks with side-effects mock_conn.reset_mock() mock_conn.side_effect = self._side_effect_get_err_connection # run the test with testtools.ExpectedException( exception.VolumeBackendAPIException, 'Bad or unexpected response from the storage volume ' 'backend API: Failed to execute CloudByte API'): self.driver._api_request_for_cloudbyte('listTsm', {}) # Test - III # configure the mocks with side-effects mock_conn.reset_mock() mock_conn.side_effect = self._side_effect_get_err_connection2 # run the test with testtools.ExpectedException( exception.VolumeBackendAPIException, 'Error executing CloudByte API'): self.driver._api_request_for_cloudbyte('listTsm', {}) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_delete_volume(self, mock_api_req): # prepare the dependencies fake_volume_id = self._get_fake_volume_id() volume = {'id': fake_volume_id, 'provider_id': fake_volume_id} # Test-I mock_api_req.side_effect = self._side_effect_api_req # run the test self.driver.delete_volume(volume) # assert that 7 api calls were invoked self.assertEqual(7, mock_api_req.call_count) # Test-II # reset & re-configure mock volume['provider_id'] = None mock_api_req.reset_mock() mock_api_req.side_effect = self._side_effect_api_req # run the test self.driver.delete_volume(volume) # assert that no api calls were invoked self.assertEqual(0, mock_api_req.call_count) # Test-III # re-configure the dependencies volume['provider_id'] = fake_volume_id # reset & re-configure mock mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_delete_file_system) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, volume) # assert that 6 api calls were invoked self.assertEqual(6, mock_api_req.call_count) # Test - IV # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_query_asyncjob_response) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, volume) # assert that 7 api calls were invoked self.assertEqual(7, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_delete_snapshot(self, mock_api_req): snapshot = { 'id': 'SomeID', 'provider_id': 'devpool1/acc1openstacktsm/DS1@DS1Snap1', 'display_name': 'DS1Snap1', 'volume_id': 'SomeVol', 'volume': { 'display_name': 'DS1' } } # Test - I # now run the test self.driver.delete_snapshot(snapshot) # assert that 1 api call was invoked self.assertEqual(1, mock_api_req.call_count) # Test - II # reconfigure the dependencies snapshot['provider_id'] = None # reset & reconfigure the mock mock_api_req.reset_mock() mock_api_req.side_effect = self._side_effect_api_req # now run the test self.driver.delete_snapshot(snapshot) # assert that no api calls were invoked self.assertEqual(0, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_create_snapshot(self, mock_api_req): # prepare the dependencies fake_volume_id = self._get_fake_volume_id() snapshot = { 'id': 'c60890b1-f236-46f2-9e6d-51e6e592cee6', 'display_name': 'DS1Snap1', 'volume_id': 'SomeVol', 'volume': { 'display_name': 'DS1', 'provider_id': fake_volume_id } } # Test - I # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test model_update = self.driver.create_snapshot(snapshot) # assert that 2 api calls were invoked self.assertEqual(2, mock_api_req.call_count) self.assertEqual('DS1@snap_c60890b1f23646f29e6d51e6e592cee6', model_update['provider_id']) # Test - II # reconfigure the dependencies snapshot['volume']['provider_id'] = None # reset & reconfigure the mock mock_api_req.reset_mock() mock_api_req.side_effect = self._side_effect_api_req # now run the test & assert the exception with testtools.ExpectedException( exception.VolumeBackendAPIException, 'Bad or unexpected response from the storage volume ' 'backend API: Failed to create snapshot'): self.driver.create_snapshot(snapshot) # assert that no api calls were invoked self.assertEqual(0, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_create_volume(self, mock_api_req): # prepare the dependencies fake_volume_id = self._get_fake_volume_id() volume = { 'id': fake_volume_id, 'size': 22, 'volume_type_id': None } # Test - I # enable CHAP self._side_effect_enable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test provider_details = self.driver.create_volume(volume) # assert equality checks for certain configuration attributes self.assertEqual( 'openstack', self.driver.configuration.cb_tsm_name) self.assertEqual( 'CustomerA', self.driver.configuration.cb_account_name) self.assertEqual( 'fakeauthgroup', self.driver.configuration.cb_auth_group) # assert the result self.assertEqual( 'CHAP fakeauthgroupchapuser fakeauthgroupchapsecret', provider_details['provider_auth']) self.assertThat( provider_details['provider_location'], matchers.Contains('172.16.50.35:3260')) # assert the invoked api calls to CloudByte Storage self.assertEqual(11, mock_api_req.call_count) # Test - II # reset the mock mock_api_req.reset_mock() # disable CHAP self._side_effect_disable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test provider_details = self.driver.create_volume(volume) # assert equality checks for certain configuration attributes self.assertEqual( 'openstack', self.driver.configuration.cb_tsm_name) self.assertEqual( 'CustomerA', self.driver.configuration.cb_account_name) # assert the result self.assertIsNone(provider_details['provider_auth']) self.assertThat( provider_details['provider_location'], matchers.Contains('172.16.50.35:3260')) # assert the invoked api calls to CloudByte Storage self.assertEqual(9, mock_api_req.call_count) # Test - III # reconfigure the dependencies volume['id'] = 'NotExists' del volume['size'] # reset & reconfigure the mock mock_api_req.reset_mock() mock_api_req.side_effect = self._side_effect_api_req # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - IV # reconfigure the dependencies volume['id'] = 'abc' # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = self._side_effect_api_req_to_create_vol # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - V # reconfigure the dependencies # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = self._side_effect_api_req_to_list_filesystem # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - VI volume['id'] = fake_volume_id # reconfigure the dependencies # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_list_vol_iscsi_service) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - VII # reconfigure the dependencies # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_list_iscsi_initiator) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - VIII volume['id'] = fake_volume_id volume['size'] = 22 # reconfigure the dependencies # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._none_response_to_list_tsm) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - IX volume['id'] = fake_volume_id volume['size'] = 22 # reconfigure the dependencies # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_create_vol) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Test - X # reset the mocks mock_api_req.reset_mock() # configure or re-configure the mocks mock_api_req.side_effect = ( self._side_effect_api_req_to_query_asyncjob_response) # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') @mock.patch.object(cloudbyte.CloudByteISCSIDriver, 'create_volume_from_snapshot') @mock.patch.object(cloudbyte.CloudByteISCSIDriver, 'create_snapshot') def test_create_cloned_volume(self, mock_create_snapshot, mock_create_vol_from_snap, mock_api_req): # prepare the input test data fake_volume_id = self._get_fake_volume_id() src_volume = {'display_name': 'DS1Snap1'} cloned_volume = { 'source_volid': fake_volume_id, 'id': 'SomeNewID', 'display_name': 'CloneOfDS1Snap1' } # Test - I # configure the mocks with respective sideeffects mock_api_req.side_effect = self._side_effect_api_req mock_create_vol_from_snap.side_effect = ( self._side_effect_create_vol_from_snap) mock_create_snapshot.side_effect = ( self._side_effect_create_snapshot) # now run the test self.driver.create_cloned_volume(cloned_volume, src_volume) # assert that n api calls were invoked self.assertEqual(0, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_create_volume_from_snapshot(self, mock_api_req): # prepare the input test data fake_volume_id = self._get_fake_volume_id() snapshot = { 'volume_id': fake_volume_id, 'provider_id': 'devpool1/acc1openstacktsm/DS1@DS1Snap1', 'id': 'SomeSnapID', 'volume': { 'provider_id': fake_volume_id } } cloned_volume = { 'display_name': 'CloneOfDS1Snap1', 'id': 'ClonedVolID' } # Test - I # enable CHAP self._side_effect_enable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test provider_details = ( self.driver.create_volume_from_snapshot(cloned_volume, snapshot)) # assert the result self.assertEqual( 'CHAP fakeauthgroupchapuser fakeauthgroupchapsecret', provider_details['provider_auth']) self.assertEqual( '20.10.22.56:3260 ' 'iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1 0', provider_details['provider_location']) # assert the invoked api calls to CloudByte Storage self.assertEqual(4, mock_api_req.call_count) # Test - II # reset the mocks mock_api_req.reset_mock() # disable CHAP self._side_effect_disable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test provider_details = ( self.driver.create_volume_from_snapshot(cloned_volume, snapshot)) # assert the result self.assertIsNone(provider_details['provider_auth']) self.assertEqual( '20.10.22.56:3260 ' 'iqn.2014-06.acc1.openstacktsm:acc1DS1Snap1clone1 0', provider_details['provider_location']) # assert n api calls were invoked self.assertEqual(1, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_extend_volume(self, mock_api_req): # prepare the input test data fake_volume_id = self._get_fake_volume_id() volume = { 'id': 'SomeID', 'provider_id': fake_volume_id } new_size = '2' # Test - I # configure the mock with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test self.driver.extend_volume(volume, new_size) # assert n api calls were invoked self.assertEqual(1, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_create_export(self, mock_api_req): # Test - I # enable CHAP self._side_effect_enable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test model_update = self.driver.create_export({}, {}, {}) # assert the result self.assertEqual('CHAP fakeauthgroupchapuser fakeauthgroupchapsecret', model_update['provider_auth']) # Test - II # reset the mocks mock_api_req.reset_mock() # disable CHAP self._side_effect_disable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test model_update = self.driver.create_export({}, {}, {}) # assert the result self.assertIsNone(model_update['provider_auth']) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_ensure_export(self, mock_api_req): # Test - I # enable CHAP self._side_effect_enable_chap() # configure the mock with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test model_update = self.driver.ensure_export({}, {}) # assert the result to have a provider_auth attribute self.assertEqual('CHAP fakeauthgroupchapuser fakeauthgroupchapsecret', model_update['provider_auth']) # Test - II # reset the mocks mock_api_req.reset_mock() # disable CHAP self._side_effect_disable_chap() # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req # now run the test model_update = self.driver.create_export({}, {}, {}) # assert the result self.assertIsNone(model_update['provider_auth']) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') def test_get_volume_stats(self, mock_api_req): # configure the mock with a side-effect mock_api_req.side_effect = self._side_effect_api_req # Test - I # run the test vol_stats = self.driver.get_volume_stats() # assert 0 api calls were invoked self.assertEqual(0, mock_api_req.call_count) # Test - II # run the test with refresh as True vol_stats = self.driver.get_volume_stats(refresh=True) # assert n api calls were invoked self.assertEqual(1, mock_api_req.call_count) # assert the result attributes with respective values self.assertEqual(1024.0, vol_stats['total_capacity_gb']) self.assertEqual(824.0, vol_stats['free_capacity_gb']) self.assertEqual(0, vol_stats['reserved_percentage']) self.assertEqual('CloudByte', vol_stats['vendor_name']) self.assertEqual('iSCSI', vol_stats['storage_protocol']) # Test - III # configure the mocks with side-effect mock_api_req.reset_mock() mock_api_req.side_effect = self._side_effect_api_req_to_list_tsm # run the test with refresh as True with testtools.ExpectedException( exception.VolumeBackendAPIException, "Bad or unexpected response from the storage volume " "backend API: No response was received from CloudByte " "storage list tsm API call."): self.driver.get_volume_stats(refresh=True) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(qos_specs, 'get_qos_specs') def test_retype(self, get_qos_spec, get_volume_type, mock_api_req): # prepare the input test data fake_new_type = {'id': 'fake-new-type-id'} fake_volume_id = self._get_fake_volume_id() volume = { 'id': 'SomeID', 'provider_id': fake_volume_id } # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req get_qos_spec.side_effect = self._fake_get_qos_spec get_volume_type.side_effect = self._fake_get_volume_type self.assertTrue(self.driver.retype(self.ctxt, volume, fake_new_type, None, None)) # assert the invoked api calls self.assertEqual(3, mock_api_req.call_count) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(qos_specs, 'get_qos_specs') def test_retype_without_provider_id(self, get_qos_spec, get_volume_type, mock_api_req): # prepare the input test data fake_new_type = {'id': 'fake-new-type-id'} volume = {'id': 'SomeID'} # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req get_qos_spec.side_effect = self._fake_get_qos_spec get_volume_type.side_effect = self._fake_get_volume_type # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, self.ctxt, volume, fake_new_type, None, None) @mock.patch.object(cloudbyte.CloudByteISCSIDriver, '_api_request_for_cloudbyte') @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(qos_specs, 'get_qos_specs') def test_retype_without_filesystem(self, get_qos_spec, get_volume_type, mock_api_req): # prepare the input test data fake_new_type = {'id': 'fake-new-type-id'} fake_volume_id = self._get_fake_volume_id() volume = { 'id': 'SomeID', 'provider_id': fake_volume_id } # configure the mocks with respective side-effects mock_api_req.side_effect = self._side_effect_api_req get_qos_spec.side_effect = self._fake_get_qos_spec get_volume_type.side_effect = self._fake_get_volume_type mock_api_req.side_effect = self._fake_api_req_to_list_filesystem # Now run the test & assert the exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, self.ctxt, volume, fake_new_type, None, None) cinder-8.0.0/cinder/tests/unit/test_dothill.py0000664000567000056710000010363412701406250022565 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for OpenStack Cinder DotHill driver.""" from lxml import etree import mock import requests from cinder import exception from cinder import test from cinder.volume.drivers.dothill import dothill_client as dothill from cinder.volume.drivers.dothill import dothill_common from cinder.volume.drivers.dothill import dothill_fc from cinder.volume.drivers.dothill import dothill_iscsi from cinder.zonemanager import utils as fczm_utils session_key = '12a1626754554a21d85040760c81b' resp_login = ''' success 0 12a1626754554a21d85040760c81b 1''' resp_badlogin = ''' error 1 Authentication failure 1''' response_ok = ''' some data 0 ''' response_not_ok = ''' Error Message 1 ''' response_stats_linear = ''' 3863830528 3863830528 ''' response_stats_virtual = ''' 3863830528 3863830528 ''' response_no_lun = '''''' response_lun = ''' 1 4''' response_ports = ''' FC id1 Disconnected FC id2 Up iSCSI id3 10.0.0.10 Disconnected iSCSI id4 10.0.0.11 Up iSCSI id5 10.0.0.12 Up ''' response_ports_linear = response_ports % {'ip': 'primary-ip-address'} response_ports_virtual = response_ports % {'ip': 'ip-address'} invalid_xml = '''''' malformed_xml = '''''' fake_xml = '''''' stats_low_space = {'free_capacity_gb': 10, 'total_capacity_gb': 100} stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100} vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2' test_volume = {'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10} test_retype_volume = {'attach_status': 'available', 'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10} test_host = {'capabilities': {'location_info': 'DotHillVolumeDriver:xxxxx:dg02:A'}} test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'volume': {'name_id': None}, 'volume_id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10} encoded_volid = 'v_O7DDpi8TOWF_9cwnMF' encoded_snapid = 's_O7DDpi8TOWF_9cwnMF' dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'source_volid': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10} attached_volume = {'id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'in-use', 'attach_status': 'attached'} attaching_volume = {'id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'attaching', 'attach_status': 'attached'} detached_volume = {'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'available', 'attach_status': 'detached'} connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["111111111111111", "111111111111112"], 'wwnns': ["211111111111111", "211111111111112"], 'host': 'fakehost'} invalid_connector = {'ip': '10.0.0.2', 'initiator': '', 'wwpns': [], 'wwnns': [], 'host': 'fakehost'} class TestDotHillClient(test.TestCase): def setUp(self): super(TestDotHillClient, self).setUp() self.login = 'manage' self.passwd = '!manage' self.ip = '10.0.0.1' self.protocol = 'http' self.ssl_verify = False self.client = dothill.DotHillClient(self.ip, self.login, self.passwd, self.protocol, self.ssl_verify) @mock.patch('requests.get') def test_login(self, mock_requests_get): m = mock.Mock() m.text.encode.side_effect = [resp_login] mock_requests_get.return_value = m self.client.login() self.assertEqual(session_key, self.client._session_key) m.text.encode.side_effect = [resp_badlogin] self.assertRaises(exception.DotHillAuthenticationError, self.client.login) def test_build_request_url(self): url = self.client._build_request_url('/path') self.assertEqual('http://10.0.0.1/api/path', url) url = self.client._build_request_url('/path', arg1='val1') self.assertEqual('http://10.0.0.1/api/path/arg1/val1', url) url = self.client._build_request_url('/path', arg_1='val1') self.assertEqual('http://10.0.0.1/api/path/arg-1/val1', url) url = self.client._build_request_url('/path', 'arg1') self.assertEqual('http://10.0.0.1/api/path/arg1', url) url = self.client._build_request_url('/path', 'arg1', arg2='val2') self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1', url) url = self.client._build_request_url('/path', 'arg1', 'arg3', arg2='val2') self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1/arg3', url) @mock.patch('requests.get') def test_request(self, mock_requests_get): self.client._session_key = session_key m = mock.Mock() m.text.encode.side_effect = [response_ok, malformed_xml, requests.exceptions. RequestException("error")] mock_requests_get.return_value = m ret = self.client._request('/path') self.assertTrue(type(ret) == etree._Element) self.assertRaises(exception.DotHillConnectionError, self.client._request, '/path') self.assertRaises(exception.DotHillConnectionError, self.client._request, '/path') def test_assert_response_ok(self): ok_tree = etree.XML(response_ok) not_ok_tree = etree.XML(response_not_ok) invalid_tree = etree.XML(invalid_xml) ret = self.client._assert_response_ok(ok_tree) self.assertIsNone(ret) self.assertRaises(exception.DotHillRequestError, self.client._assert_response_ok, not_ok_tree) self.assertRaises(exception.DotHillRequestError, self.client._assert_response_ok, invalid_tree) @mock.patch.object(dothill.DotHillClient, '_request') def test_backend_exists(self, mock_request): mock_request.side_effect = [exception.DotHillRequestError, fake_xml] self.assertFalse(self.client.backend_exists('backend_name', 'linear')) self.assertTrue(self.client.backend_exists('backend_name', 'linear')) @mock.patch.object(dothill.DotHillClient, '_request') def test_backend_stats(self, mock_request): stats = {'free_capacity_gb': 1979, 'total_capacity_gb': 1979} linear = etree.XML(response_stats_linear) virtual = etree.XML(response_stats_virtual) mock_request.side_effect = [linear, virtual] self.assertEqual(stats, self.client.backend_stats('OpenStack', 'linear')) self.assertEqual(stats, self.client.backend_stats('A', 'virtual')) @mock.patch.object(dothill.DotHillClient, '_request') def test_get_lun(self, mock_request): mock_request.side_effect = [etree.XML(response_no_lun), etree.XML(response_lun)] ret = self.client._get_first_available_lun_for_host("fakehost") self.assertEqual(1, ret) ret = self.client._get_first_available_lun_for_host("fakehost") self.assertEqual(2, ret) @mock.patch.object(dothill.DotHillClient, '_request') def test_get_ports(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_target_ports() self.assertEqual([{'port-type': 'FC', 'target-id': 'id2', 'status': 'Up'}, {'port-type': 'iSCSI', 'target-id': 'id4', 'status': 'Up'}, {'port-type': 'iSCSI', 'target-id': 'id5', 'status': 'Up'}], ret) @mock.patch.object(dothill.DotHillClient, '_request') def test_get_fc_ports(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_fc_target_ports() self.assertEqual(['id2'], ret) @mock.patch.object(dothill.DotHillClient, '_request') def test_get_iscsi_iqns(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_iscsi_target_iqns() self.assertEqual(['id4', 'id5'], ret) @mock.patch.object(dothill.DotHillClient, '_request') def test_get_iscsi_portals(self, mock_request): portals = {'10.0.0.12': 'Up', '10.0.0.11': 'Up'} mock_request.side_effect = [etree.XML(response_ports_linear), etree.XML(response_ports_virtual)] ret = self.client.get_active_iscsi_target_portals() self.assertEqual(portals, ret) ret = self.client.get_active_iscsi_target_portals() self.assertEqual(portals, ret) class FakeConfiguration1(object): dothill_backend_name = 'OpenStack' dothill_backend_type = 'linear' san_ip = '10.0.0.1' san_login = 'manage' san_password = '!manage' dothill_api_protocol = 'http' def safe_get(self, key): return 'fakevalue' class FakeConfiguration2(FakeConfiguration1): dothill_iscsi_ips = ['10.0.0.11'] use_chap_auth = None class TestFCDotHillCommon(test.TestCase): def setUp(self): super(TestFCDotHillCommon, self).setUp() self.config = FakeConfiguration1() self.common = dothill_common.DotHillCommon(self.config) self.common.client_login = mock.MagicMock() self.common.client_logout = mock.MagicMock() self.common.serialNumber = "xxxxx" self.common.owner = "A" self.connector_element = "wwpns" @mock.patch.object(dothill.DotHillClient, 'get_serial_number') @mock.patch.object(dothill.DotHillClient, 'get_owner_info') @mock.patch.object(dothill.DotHillClient, 'backend_exists') def test_do_setup(self, mock_backend_exists, mock_owner_info, mock_serial_number): mock_backend_exists.side_effect = [False, True] mock_owner_info.return_value = "A" mock_serial_number.return_value = "xxxxx" self.assertRaises(exception.DotHillInvalidBackend, self.common.do_setup, None) self.assertIsNone(self.common.do_setup(None)) mock_backend_exists.assert_called_with(self.common.backend_name, self.common.backend_type) mock_owner_info.assert_called_with(self.common.backend_name, self.common.backend_type) def test_vol_name(self): self.assertEqual(encoded_volid, self.common._get_vol_name(vol_id)) self.assertEqual(encoded_snapid, self.common._get_snap_name(vol_id)) def test_check_flags(self): class FakeOptions(object): def __init__(self, d): for k, v in d.items(): self.__dict__[k] = v options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) required_flags = ['opt1', 'opt2'] ret = self.common.check_flags(options, required_flags) self.assertIsNone(ret) options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) required_flags = ['opt1', 'opt2', 'opt3'] self.assertRaises(exception.Invalid, self.common.check_flags, options, required_flags) def test_assert_connector_ok(self): self.assertRaises(exception.InvalidInput, self.common._assert_connector_ok, invalid_connector, self.connector_element) self.assertIsNone(self.common._assert_connector_ok( connector, self.connector_element)) @mock.patch.object(dothill.DotHillClient, 'backend_stats') def test_update_volume_stats(self, mock_stats): mock_stats.side_effect = [exception.DotHillRequestError, stats_large_space] self.assertRaises(exception.Invalid, self.common._update_volume_stats) mock_stats.assert_called_with(self.common.backend_name, self.common.backend_type) ret = self.common._update_volume_stats() self.assertIsNone(ret) self.assertEqual({'driver_version': self.common.VERSION, 'pools': [{'QoS_support': False, 'free_capacity_gb': 90, 'location_info': 'DotHillVolumeDriver:xxxxx:OpenStack:A', 'pool_name': 'OpenStack', 'total_capacity_gb': 100}], 'storage_protocol': None, 'vendor_name': 'DotHill', 'volume_backend_name': None}, self.common.stats) @mock.patch.object(dothill.DotHillClient, 'create_volume') def test_create_volume(self, mock_create): mock_create.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.create_volume, test_volume) ret = self.common.create_volume(test_volume) self.assertIsNone(ret) mock_create.assert_called_with(encoded_volid, "%sGB" % test_volume['size'], self.common.backend_name, self.common.backend_type) @mock.patch.object(dothill.DotHillClient, 'delete_volume') def test_delete_volume(self, mock_delete): not_found_e = exception.DotHillRequestError( 'The volume was not found on this system.') mock_delete.side_effect = [not_found_e, exception.DotHillRequestError, None] self.assertIsNone(self.common.delete_volume(test_volume)) self.assertRaises(exception.Invalid, self.common.delete_volume, test_volume) self.assertIsNone(self.common.delete_volume(test_volume)) mock_delete.assert_called_with(encoded_volid) @mock.patch.object(dothill.DotHillClient, 'copy_volume') @mock.patch.object(dothill.DotHillClient, 'backend_stats') def test_create_cloned_volume(self, mock_stats, mock_copy): mock_stats.side_effect = [stats_low_space, stats_large_space, stats_large_space] self.assertRaises(exception.DotHillNotEnoughSpace, self.common.create_cloned_volume, dest_volume, detached_volume) self.assertFalse(mock_copy.called) mock_copy.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.create_cloned_volume, dest_volume, detached_volume) ret = self.common.create_cloned_volume(dest_volume, detached_volume) self.assertIsNone(ret) mock_copy.assert_called_with(encoded_volid, 'vqqqqqqqqqqqqqqqqqqq', self.common.backend_name, self.common.backend_type) @mock.patch.object(dothill.DotHillClient, 'copy_volume') @mock.patch.object(dothill.DotHillClient, 'backend_stats') def test_create_volume_from_snapshot(self, mock_stats, mock_copy): mock_stats.side_effect = [stats_low_space, stats_large_space, stats_large_space] self.assertRaises(exception.DotHillNotEnoughSpace, self.common.create_volume_from_snapshot, dest_volume, test_snap) mock_copy.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.create_volume_from_snapshot, dest_volume, test_snap) ret = self.common.create_volume_from_snapshot(dest_volume, test_snap) self.assertIsNone(ret) mock_copy.assert_called_with('sqqqqqqqqqqqqqqqqqqq', 'vqqqqqqqqqqqqqqqqqqq', self.common.backend_name, self.common.backend_type) @mock.patch.object(dothill.DotHillClient, 'extend_volume') def test_extend_volume(self, mock_extend): mock_extend.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.extend_volume, test_volume, 20) ret = self.common.extend_volume(test_volume, 20) self.assertIsNone(ret) mock_extend.assert_called_with(encoded_volid, '10GB') @mock.patch.object(dothill.DotHillClient, 'create_snapshot') def test_create_snapshot(self, mock_create): mock_create.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.create_snapshot, test_snap) ret = self.common.create_snapshot(test_snap) self.assertIsNone(ret) mock_create.assert_called_with(encoded_volid, 'sqqqqqqqqqqqqqqqqqqq') @mock.patch.object(dothill.DotHillClient, 'delete_snapshot') def test_delete_snapshot(self, mock_delete): not_found_e = exception.DotHillRequestError( 'The volume was not found on this system.') mock_delete.side_effect = [not_found_e, exception.DotHillRequestError, None] self.assertIsNone(self.common.delete_snapshot(test_snap)) self.assertRaises(exception.Invalid, self.common.delete_snapshot, test_snap) self.assertIsNone(self.common.delete_snapshot(test_snap)) mock_delete.assert_called_with('sqqqqqqqqqqqqqqqqqqq') @mock.patch.object(dothill.DotHillClient, 'map_volume') def test_map_volume(self, mock_map): mock_map.side_effect = [exception.DotHillRequestError, 10] self.assertRaises(exception.Invalid, self.common.map_volume, test_volume, connector, self.connector_element) lun = self.common.map_volume(test_volume, connector, self.connector_element) self.assertEqual(10, lun) mock_map.assert_called_with(encoded_volid, connector, self.connector_element) @mock.patch.object(dothill.DotHillClient, 'unmap_volume') def test_unmap_volume(self, mock_unmap): mock_unmap.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.unmap_volume, test_volume, connector, self.connector_element) ret = self.common.unmap_volume(test_volume, connector, self.connector_element) self.assertIsNone(ret) mock_unmap.assert_called_with(encoded_volid, connector, self.connector_element) @mock.patch.object(dothill.DotHillClient, 'copy_volume') @mock.patch.object(dothill.DotHillClient, 'delete_volume') @mock.patch.object(dothill.DotHillClient, 'modify_volume_name') def test_retype(self, mock_modify, mock_delete, mock_copy): mock_copy.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.migrate_volume, test_retype_volume, test_host) ret = self.common.migrate_volume(test_retype_volume, test_host) self.assertEqual((True, None), ret) ret = self.common.migrate_volume(test_retype_volume, {'capabilities': {}}) self.assertEqual((False, None), ret) @mock.patch.object(dothill_common.DotHillCommon, '_get_vol_name') @mock.patch.object(dothill.DotHillClient, 'modify_volume_name') def test_manage_existing(self, mock_modify, mock_volume): existing_ref = {'source-name': 'xxxx'} mock_modify.side_effect = [exception.DotHillRequestError, None] self.assertRaises(exception.Invalid, self.common.manage_existing, test_volume, existing_ref) ret = self.common.manage_existing(test_volume, existing_ref) self.assertIsNone(ret) @mock.patch.object(dothill.DotHillClient, 'get_volume_size') def test_manage_existing_get_size(self, mock_volume): existing_ref = {'source-name': 'xxxx'} mock_volume.side_effect = [exception.DotHillRequestError, 1] self.assertRaises(exception.Invalid, self.common.manage_existing_get_size, None, existing_ref) ret = self.common.manage_existing_get_size(None, existing_ref) self.assertEqual(1, ret) class TestISCSIDotHillCommon(TestFCDotHillCommon): def setUp(self): super(TestISCSIDotHillCommon, self).setUp() self.connector_element = 'initiator' class TestDotHillFC(test.TestCase): @mock.patch.object(dothill_common.DotHillCommon, 'do_setup') def setUp(self, mock_setup): super(TestDotHillFC, self).setUp() self.vendor_name = 'DotHill' mock_setup.return_value = True def fake_init(self, *args, **kwargs): super(dothill_fc.DotHillFCDriver, self).__init__() self.common = None self.configuration = FakeConfiguration1() self.lookup_service = fczm_utils.create_lookup_service() dothill_fc.DotHillFCDriver.__init__ = fake_init self.driver = dothill_fc.DotHillFCDriver() self.driver.do_setup(None) def _test_with_mock(self, mock, method, args, expected=None): func = getattr(self.driver, method) mock.side_effect = [exception.Invalid(), None] self.assertRaises(exception.Invalid, func, *args) self.assertEqual(expected, func(*args)) @mock.patch.object(dothill_common.DotHillCommon, 'create_volume') def test_create_volume(self, mock_create): self._test_with_mock(mock_create, 'create_volume', [None]) @mock.patch.object(dothill_common.DotHillCommon, 'create_cloned_volume') def test_create_cloned_volume(self, mock_create): self._test_with_mock(mock_create, 'create_cloned_volume', [None, None]) @mock.patch.object(dothill_common.DotHillCommon, 'create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create): self._test_with_mock(mock_create, 'create_volume_from_snapshot', [None, None]) @mock.patch.object(dothill_common.DotHillCommon, 'delete_volume') def test_delete_volume(self, mock_delete): self._test_with_mock(mock_delete, 'delete_volume', [None]) @mock.patch.object(dothill_common.DotHillCommon, 'create_snapshot') def test_create_snapshot(self, mock_create): self._test_with_mock(mock_create, 'create_snapshot', [None]) @mock.patch.object(dothill_common.DotHillCommon, 'delete_snapshot') def test_delete_snapshot(self, mock_delete): self._test_with_mock(mock_delete, 'delete_snapshot', [None]) @mock.patch.object(dothill_common.DotHillCommon, 'extend_volume') def test_extend_volume(self, mock_extend): self._test_with_mock(mock_extend, 'extend_volume', [None, 10]) @mock.patch.object(dothill_common.DotHillCommon, 'client_logout') @mock.patch.object(dothill_common.DotHillCommon, 'get_active_fc_target_ports') @mock.patch.object(dothill_common.DotHillCommon, 'map_volume') @mock.patch.object(dothill_common.DotHillCommon, 'client_login') def test_initialize_connection(self, mock_login, mock_map, mock_ports, mock_logout): mock_login.return_value = None mock_logout.return_value = None mock_map.side_effect = [exception.Invalid, 1] mock_ports.side_effect = [['id1']] self.assertRaises(exception.Invalid, self.driver.initialize_connection, test_volume, connector) mock_map.assert_called_with(test_volume, connector, 'wwpns') ret = self.driver.initialize_connection(test_volume, connector) self.assertEqual({'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': { '111111111111111': ['id1'], '111111111111112': ['id1']}, 'target_wwn': ['id1'], 'target_lun': 1, 'target_discovered': True}}, ret) @mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume') @mock.patch.object(dothill.DotHillClient, 'list_luns_for_host') def test_terminate_connection(self, mock_list, mock_unmap): mock_unmap.side_effect = [exception.Invalid, 1] mock_list.side_effect = ['yes'] actual = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertRaises(exception.Invalid, self.driver.terminate_connection, test_volume, connector) mock_unmap.assert_called_with(test_volume, connector, 'wwpns') ret = self.driver.terminate_connection(test_volume, connector) self.assertEqual(actual, ret) @mock.patch.object(dothill_common.DotHillCommon, 'get_volume_stats') def test_get_volume_stats(self, mock_stats): stats = {'storage_protocol': None, 'driver_version': self.driver.VERSION, 'volume_backend_name': None, 'vendor_name': self.vendor_name, 'pools': [{'free_capacity_gb': 90, 'reserved_percentage': 0, 'total_capacity_gb': 100, 'QoS_support': False, 'location_info': 'xx:xx:xx:xx', 'pool_name': 'x'}]} mock_stats.side_effect = [exception.Invalid, stats, stats] self.assertRaises(exception.Invalid, self.driver.get_volume_stats, False) ret = self.driver.get_volume_stats(False) self.assertEqual(stats, ret) ret = self.driver.get_volume_stats(True) self.assertEqual(stats, ret) mock_stats.assert_called_with(True) @mock.patch.object(dothill_common.DotHillCommon, 'retype') def test_retype(self, mock_retype): mock_retype.side_effect = [exception.Invalid, True, False] args = [None, None, None, None, None] self.assertRaises(exception.Invalid, self.driver.retype, *args) self.assertTrue(self.driver.retype(*args)) self.assertFalse(self.driver.retype(*args)) @mock.patch.object(dothill_common.DotHillCommon, 'manage_existing') def test_manage_existing(self, mock_manage_existing): self._test_with_mock(mock_manage_existing, 'manage_existing', [None, None]) @mock.patch.object(dothill_common.DotHillCommon, 'manage_existing_get_size') def test_manage_size(self, mock_manage_size): mock_manage_size.side_effect = [exception.Invalid, 1] self.assertRaises(exception.Invalid, self.driver.manage_existing_get_size, None, None) self.assertEqual(1, self.driver.manage_existing_get_size(None, None)) class TestDotHillISCSI(TestDotHillFC): @mock.patch.object(dothill_common.DotHillCommon, 'do_setup') def setUp(self, mock_setup): super(TestDotHillISCSI, self).setUp() self.vendor_name = 'DotHill' mock_setup.return_value = True def fake_init(self, *args, **kwargs): super(dothill_iscsi.DotHillISCSIDriver, self).__init__() self.common = None self.configuration = FakeConfiguration2() self.iscsi_ips = ['10.0.0.11'] dothill_iscsi.DotHillISCSIDriver.__init__ = fake_init self.driver = dothill_iscsi.DotHillISCSIDriver() self.driver.do_setup(None) @mock.patch.object(dothill_common.DotHillCommon, 'client_logout') @mock.patch.object(dothill_common.DotHillCommon, 'get_active_iscsi_target_portals') @mock.patch.object(dothill_common.DotHillCommon, 'get_active_iscsi_target_iqns') @mock.patch.object(dothill_common.DotHillCommon, 'map_volume') @mock.patch.object(dothill_common.DotHillCommon, 'client_login') def test_initialize_connection(self, mock_login, mock_map, mock_iqns, mock_portals, mock_logout): mock_login.return_value = None mock_logout.return_value = None mock_map.side_effect = [exception.Invalid, 1] self.driver.iscsi_ips = ['10.0.0.11'] self.driver.initialize_iscsi_ports() mock_iqns.side_effect = [['id2']] mock_portals.return_value = {'10.0.0.11': 'Up', '10.0.0.12': 'Up'} self.assertRaises(exception.Invalid, self.driver.initialize_connection, test_volume, connector) mock_map.assert_called_with(test_volume, connector, 'initiator') ret = self.driver.initialize_connection(test_volume, connector) self.assertEqual({'driver_volume_type': 'iscsi', 'data': {'target_iqn': 'id2', 'target_lun': 1, 'target_discovered': True, 'target_portal': '10.0.0.11:3260'}}, ret) @mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume') def test_terminate_connection(self, mock_unmap): mock_unmap.side_effect = [exception.Invalid, 1] self.assertRaises(exception.Invalid, self.driver.terminate_connection, test_volume, connector) mock_unmap.assert_called_with(test_volume, connector, 'initiator') ret = self.driver.terminate_connection(test_volume, connector) self.assertIsNone(ret) cinder-8.0.0/cinder/tests/unit/monkey_patch_example/0000775000567000056710000000000012701406543023707 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/monkey_patch_example/example_a.py0000664000567000056710000000162512701406250026213 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 cinder-8.0.0/cinder/tests/unit/monkey_patch_example/__init__.py0000664000567000056710000000213012701406250026007 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func cinder-8.0.0/cinder/tests/unit/monkey_patch_example/example_b.py0000664000567000056710000000162612701406250026215 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 cinder-8.0.0/cinder/tests/unit/test_evaluator.py0000664000567000056710000001427112701406250023126 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.scheduler.evaluator import evaluator from cinder import test class EvaluatorTestCase(test.TestCase): def test_simple_integer(self): self.assertEqual(2, evaluator.evaluate("1+1")) self.assertEqual(9, evaluator.evaluate("2+3+4")) self.assertEqual(23, evaluator.evaluate("11+12")) self.assertEqual(30, evaluator.evaluate("5*6")) self.assertEqual(2, evaluator.evaluate("22/11")) self.assertEqual(38, evaluator.evaluate("109-71")) self.assertEqual( 493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66")) def test_simple_float(self): self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0")) self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0")) self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0")) def test_int_float_mix(self): self.assertEqual(2.5, evaluator.evaluate("1.5 + 1")) self.assertEqual(4.25, evaluator.evaluate("8.5 / 2")) self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2")) def test_negative_numbers(self): self.assertEqual(-2, evaluator.evaluate("-2")) self.assertEqual(-1, evaluator.evaluate("-2+1")) self.assertEqual(3, evaluator.evaluate("5+-2")) def test_exponent(self): self.assertEqual(8, evaluator.evaluate("2^3")) self.assertEqual(-8, evaluator.evaluate("-2 ^ 3")) self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3")) self.assertEqual(8, evaluator.evaluate("4 ^ 1.5")) def test_function(self): self.assertEqual(5, evaluator.evaluate("abs(-5)")) self.assertEqual(2, evaluator.evaluate("abs(2)")) self.assertEqual(1, evaluator.evaluate("min(1, 100)")) self.assertEqual(100, evaluator.evaluate("max(1, 100)")) def test_parentheses(self): self.assertEqual(1, evaluator.evaluate("(1)")) self.assertEqual(-1, evaluator.evaluate("(-1)")) self.assertEqual(2, evaluator.evaluate("(1+1)")) self.assertEqual(15, evaluator.evaluate("(1+2) * 5")) self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))")) self.assertEqual( -8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)")) def test_comparisons(self): self.assertTrue(evaluator.evaluate("1 < 2")) self.assertTrue(evaluator.evaluate("2 > 1")) self.assertTrue(evaluator.evaluate("2 != 1")) self.assertFalse(evaluator.evaluate("1 > 2")) self.assertFalse(evaluator.evaluate("2 < 1")) self.assertFalse(evaluator.evaluate("2 == 1")) self.assertTrue(evaluator.evaluate("(1 == 1) == !(1 == 2)")) def test_logic_ops(self): self.assertTrue(evaluator.evaluate("(1 == 1) AND (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) and (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) && (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) && (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) or (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(5 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND NOT (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND not (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND !(2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) AND NOT (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR NOT (2 == 2) " "AND (5 == 5)")) def test_ternary_conditional(self): self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10")) self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10")) def test_variables_dict(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops", stats=stats, request=request)) def test_missing_var(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "foo.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "stats.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "fake.var + 1", stats=stats, request=request, fake=None) def test_bad_expression(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "1/*1") def test_nonnumber_comparison(self): nonnumber = {'test': 'foo'} request = {'test': 'bar'} self.assertRaises( exception.EvaluatorParseException, evaluator.evaluate, "nonnumber.test != request.test", nonnumber=nonnumber, request=request) def test_div_zero(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "7 / 0") cinder-8.0.0/cinder/tests/unit/test_nexenta_edge.py0000664000567000056710000001415212701406257023557 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.nexentaedge import iscsi NEDGE_URL = 'service/isc/iscsi' NEDGE_BUCKET = 'c/t/bk' NEDGE_SERVICE = 'isc' NEDGE_BLOCKSIZE = 4096 NEDGE_CHUNKSIZE = 16384 MOCK_VOL = { 'id': 'vol1', 'name': 'vol1', 'size': 1 } MOCK_VOL2 = { 'id': 'vol2', 'name': 'vol2', 'size': 1 } MOCK_SNAP = { 'id': 'snap1', 'name': 'snap1', 'volume_name': 'vol1' } NEW_VOL_SIZE = 2 ISCSI_TARGET_NAME = 'iscsi_target_name' ISCSI_TARGET_STATUS = 'Target 1: ' + ISCSI_TARGET_NAME class TestNexentaEdgeISCSIDriver(test.TestCase): def setUp(self): super(TestNexentaEdgeISCSIDriver, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_client_address = '0.0.0.0' self.cfg.nexenta_rest_address = '0.0.0.0' self.cfg.nexenta_rest_port = 8080 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_iscsi_target_portal_port = 3260 self.cfg.nexenta_rest_user = 'admin' self.cfg.nexenta_rest_password = 'admin' self.cfg.nexenta_lun_container = NEDGE_BUCKET self.cfg.nexenta_iscsi_service = NEDGE_SERVICE self.cfg.nexenta_blocksize = NEDGE_BLOCKSIZE self.cfg.nexenta_chunksize = NEDGE_CHUNKSIZE mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = iscsi.NexentaEdgeISCSIDriver(execute=mock_exec, configuration=self.cfg) self.api_patcher = mock.patch('cinder.volume.drivers.nexenta.' 'nexentaedge.jsonrpc.' 'NexentaEdgeJSONProxy.__call__') self.mock_api = self.api_patcher.start() self.mock_api.return_value = { 'data': {'value': ISCSI_TARGET_STATUS} } self.driver.do_setup(context.get_admin_context()) self.addCleanup(self.api_patcher.stop) def test_check_do_setup(self): self.assertEqual(ISCSI_TARGET_NAME, self.driver.target_name) def test_create_volume(self): self.driver.create_volume(MOCK_VOL) self.mock_api.assert_called_with(NEDGE_URL, { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL['id'], 'volSizeMB': MOCK_VOL['size'] * 1024, 'blockSize': NEDGE_BLOCKSIZE, 'chunkSize': NEDGE_CHUNKSIZE }) def test_create_volume_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.create_volume, MOCK_VOL) def test_delete_volume(self): self.driver.delete_volume(MOCK_VOL) self.mock_api.assert_called_with(NEDGE_URL, { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL['id'] }) def test_delete_volume_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.delete_volume, MOCK_VOL) def test_extend_volume(self): self.driver.extend_volume(MOCK_VOL, NEW_VOL_SIZE) self.mock_api.assert_called_with(NEDGE_URL + '/resize', { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL['id'], 'newSizeMB': NEW_VOL_SIZE * 1024 }) def test_extend_volume_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.extend_volume, MOCK_VOL, NEW_VOL_SIZE) def test_create_snapshot(self): self.driver.create_snapshot(MOCK_SNAP) self.mock_api.assert_called_with(NEDGE_URL + '/snapshot', { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL['id'], 'snapName': MOCK_SNAP['id'] }) def test_create_snapshot_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.create_snapshot, MOCK_SNAP) def test_delete_snapshot(self): self.driver.delete_snapshot(MOCK_SNAP) self.mock_api.assert_called_with(NEDGE_URL + '/snapshot', { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL['id'], 'snapName': MOCK_SNAP['id'] }) def test_delete_snapshot_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.delete_snapshot, MOCK_SNAP) def test_create_volume_from_snapshot(self): self.driver.create_volume_from_snapshot(MOCK_VOL2, MOCK_SNAP) self.mock_api.assert_called_with(NEDGE_URL + '/snapshot/clone', { 'objectPath': NEDGE_BUCKET + '/' + MOCK_SNAP['volume_name'], 'clonePath': NEDGE_BUCKET + '/' + MOCK_VOL2['id'], 'snapName': MOCK_SNAP['id'] }) def test_create_volume_from_snapshot_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.create_volume_from_snapshot, MOCK_VOL2, MOCK_SNAP) def test_create_cloned_volume(self): self.driver.create_cloned_volume(MOCK_VOL2, MOCK_VOL) self.mock_api.assert_called_with(NEDGE_URL, { 'objectPath': NEDGE_BUCKET + '/' + MOCK_VOL2['id'], 'volSizeMB': MOCK_VOL2['size'] * 1024, 'blockSize': NEDGE_BLOCKSIZE, 'chunkSize': NEDGE_CHUNKSIZE }) def test_create_cloned_volume_fail(self): self.mock_api.side_effect = RuntimeError self.assertRaises(RuntimeError, self.driver.create_cloned_volume, MOCK_VOL2, MOCK_VOL) cinder-8.0.0/cinder/tests/unit/objects/0000775000567000056710000000000012701406543021144 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/objects/test_backup.py0000664000567000056710000002171012701406250024016 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects from cinder.tests.unit import utils fake_backup = { 'id': fake.backup_id, 'volume_id': fake.volume_id, 'status': fields.BackupStatus.CREATING, 'size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'user_id': fake.user_id, 'project_id': fake.project_id, 'temp_volume_id': None, 'temp_snapshot_id': None, 'snapshot_id': None, 'data_timestamp': None, 'restore_volume_id': None, } class TestBackup(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.get_by_id', return_value=fake_backup) def test_get_by_id(self, backup_get): backup = objects.Backup.get_by_id(self.context, fake.user_id) self._compare(self, fake_backup, backup) backup_get.assert_called_once_with(self.context, models.Backup, fake.user_id) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): query = mock.Mock() filter_by = mock.Mock() filter_by.first.return_value = None query.filter_by.return_value = filter_by model_query.return_value = query self.assertRaises(exception.BackupNotFound, objects.Backup.get_by_id, self.context, 123) @mock.patch('cinder.db.backup_create', return_value=fake_backup) def test_create(self, backup_create): backup = objects.Backup(context=self.context) backup.create() self.assertEqual(fake_backup['id'], backup.id) self.assertEqual(fake_backup['volume_id'], backup.volume_id) @mock.patch('cinder.db.backup_update') def test_save(self, backup_update): backup = objects.Backup._from_db_object( self.context, objects.Backup(), fake_backup) backup.display_name = 'foobar' backup.save() backup_update.assert_called_once_with(self.context, backup.id, {'display_name': 'foobar'}) @mock.patch('cinder.db.backup_destroy') def test_destroy(self, backup_destroy): backup = objects.Backup(context=self.context, id=fake.backup_id) backup.destroy() self.assertTrue(backup_destroy.called) admin_context = backup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) def test_obj_field_temp_volume_snapshot_id(self): backup = objects.Backup(context=self.context, temp_volume_id='2', temp_snapshot_id='3') self.assertEqual('2', backup.temp_volume_id) self.assertEqual('3', backup.temp_snapshot_id) def test_obj_field_snapshot_id(self): backup = objects.Backup(context=self.context, snapshot_id='2') self.assertEqual('2', backup.snapshot_id) def test_obj_field_restore_volume_id(self): backup = objects.Backup(context=self.context, restore_volume_id='2') self.assertEqual('2', backup.restore_volume_id) def test_import_record(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.backup_id, parent_id=None, num_dependent_backups=0) export_string = backup.encode_record() imported_backup = objects.Backup.decode_record(export_string) # Make sure we don't lose data when converting from string self.assertDictEqual(self._expected_backup(backup), imported_backup) def test_import_record_additional_info(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.backup_id, parent_id=None, num_dependent_backups=0) extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}} extra_info_copy = extra_info.copy() export_string = backup.encode_record(extra_info=extra_info) imported_backup = objects.Backup.decode_record(export_string) # Dictionary passed should not be modified self.assertDictEqual(extra_info_copy, extra_info) # Make sure we don't lose data when converting from string and that # extra info is still there expected = self._expected_backup(backup) expected['extra_info'] = extra_info self.assertDictEqual(expected, imported_backup) def _expected_backup(self, backup): record = {name: field.to_primitive(backup, name, getattr(backup, name)) for name, field in backup.fields.items()} return record def test_import_record_additional_info_cant_overwrite(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.backup_id, parent_id=None, num_dependent_backups=0) export_string = backup.encode_record(id='fake_id') imported_backup = objects.Backup.decode_record(export_string) # Make sure the extra_info can't overwrite basic data self.assertDictEqual(self._expected_backup(backup), imported_backup) def test_import_record_decoding_error(self): export_string = '123456' self.assertRaises(exception.InvalidInput, objects.Backup.decode_record, export_string) def test_import_record_parsing_error(self): export_string = '' self.assertRaises(exception.InvalidInput, objects.Backup.decode_record, export_string) @mock.patch('cinder.db.sqlalchemy.api.backup_get') def test_refresh(self, backup_get): db_backup1 = fake_backup.copy() db_backup2 = db_backup1.copy() db_backup2['display_name'] = 'foobar' # On the second backup_get, return the backup with an updated # display_name backup_get.side_effect = [db_backup1, db_backup2] backup = objects.Backup.get_by_id(self.context, fake.backup_id) self._compare(self, db_backup1, backup) # display_name was updated, so a backup refresh should have a new value # for that field backup.refresh() self._compare(self, db_backup2, backup) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() backup_get.assert_has_calls([mock.call(self.context, fake.backup_id), call_bool, mock.call(self.context, fake.backup_id)]) class TestBackupList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) def test_get_all(self, backup_get_all): backups = objects.BackupList.get_all(self.context) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all_by_project', return_value=[fake_backup]) def test_get_all_by_project(self, get_all_by_project): backups = objects.BackupList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all_by_host', return_value=[fake_backup]) def test_get_all_for_volume(self, get_all_by_host): fake_volume_obj = fake_volume.fake_volume_obj(self.context) backups = objects.BackupList.get_all_by_host(self.context, fake_volume_obj.id) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) def test_get_all_tenants(self, backup_get_all): search_opts = {'all_tenants': 1} backups = objects.BackupList.get_all(self.context, search_opts) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) cinder-8.0.0/cinder/tests/unit/objects/test_objects.py0000664000567000056710000000764212701406250024212 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fixture from cinder import db from cinder.objects import base from cinder import test # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { 'Backup': '1.4-cae44fe34d5a870110ba93adebc1edca', 'BackupImport': '1.4-cae44fe34d5a870110ba93adebc1edca', 'BackupList': '1.0-24591dabe26d920ce0756fe64cd5f3aa', 'CGSnapshot': '1.0-78b91e76cb4c56e9cf5c9c41e208c05a', 'CGSnapshotList': '1.0-e8c3f4078cd0ee23487b34d173eec776', 'ConsistencyGroup': '1.2-3aeb6b25664057e8078bd6d45bf23e0a', 'ConsistencyGroupList': '1.1-73916823b697dfa0c7f02508d87e0f28', 'Service': '1.3-66c8e1683f58546c54551e9ff0a3b111', 'ServiceList': '1.1-cb758b200f0a3a90efabfc5aa2ffb627', 'Snapshot': '1.0-404c1a8b48a808aa0b7cc92cd3ec1e57', 'SnapshotList': '1.0-71661e7180ef6cc51501704a9bea4bf1', 'Volume': '1.3-264388ec57bc4c3353c89f93bebf9482', 'VolumeAttachment': '1.0-8fc9a9ac6f554fdf2a194d25dbf28a3b', 'VolumeAttachmentList': '1.0-307d2b6c8dd55ef854f6386898e9e98e', 'VolumeList': '1.1-03ba6cb8c546683e64e15c50042cb1a3', 'VolumeType': '1.0-dd980cfd1eef2dcce941a981eb469fc8', 'VolumeTypeList': '1.1-8a1016c03570dc13b9a33fe04a6acb2c', } class TestObjectVersions(test.TestCase): def test_versions(self): checker = fixture.ObjectVersionChecker( base.CinderObjectRegistry.obj_classes()) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes in the object_data map in this test module.') def test_versions_history(self): classes = base.CinderObjectRegistry.obj_classes() versions = base.OBJ_VERSIONS.get_current_versions() expected = {} actual = {} for name, cls in classes.items(): if name not in versions: expected[name] = cls[0].VERSION elif cls[0].VERSION != versions[name]: expected[name] = cls[0].VERSION actual[name] = versions[name] self.assertEqual(expected, actual, 'Some objects versions have changed; please make ' 'sure a new objects history version was added in ' 'cinder.objects.base.OBJ_VERSIONS.') def test_object_nullable_match_db(self): # This test is to keep nullable of every field in corresponding # db model and object match. def _check_table_matched(db_model, cls): for column in db_model.__table__.columns: if column.name in cls.fields: self.assertEqual( column.nullable, cls.fields[column.name].nullable, 'Column %(c)s in table %(t)s not match.' % {'c': column.name, 't': name}) classes = base.CinderObjectRegistry.obj_classes() for name, cls in classes.items(): if not issubclass(cls[0], base.ObjectListBase): db_model = db.get_model_for_versioned_object(cls[0]) _check_table_matched(db_model, cls[0]) cinder-8.0.0/cinder/tests/unit/objects/test_volume_type.py0000664000567000056710000001362612701406250025130 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects class TestVolumeType(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_get_by_id(self, volume_type_get): db_volume_type = fake_volume.fake_db_volume_type() volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_id(self.context, fake.volume_type_id) self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.volume.volume_types.create') def test_create(self, volume_type_create): db_volume_type = fake_volume.fake_db_volume_type() volume_type_create.return_value = db_volume_type volume_type = objects.VolumeType(context=self.context) volume_type.name = db_volume_type['name'] volume_type.extra_specs = db_volume_type['extra_specs'] volume_type.is_public = db_volume_type['is_public'] volume_type.projects = db_volume_type['projects'] volume_type.description = db_volume_type['description'] volume_type.create() volume_type_create.assert_called_once_with( self.context, db_volume_type['name'], db_volume_type['extra_specs'], db_volume_type['is_public'], db_volume_type['projects'], db_volume_type['description']) @mock.patch('cinder.volume.volume_types.update') def test_save(self, volume_type_update): db_volume_type = fake_volume.fake_db_volume_type() volume_type = objects.VolumeType._from_db_object(self.context, objects.VolumeType(), db_volume_type) volume_type.description = 'foobar' volume_type.save() volume_type_update.assert_called_once_with(self.context, volume_type.id, volume_type.name, volume_type.description) @mock.patch('cinder.volume.volume_types.destroy') def test_destroy(self, volume_type_destroy): db_volume_type = fake_volume.fake_db_volume_type() volume_type = objects.VolumeType._from_db_object(self.context, objects.VolumeType(), db_volume_type) volume_type.destroy() self.assertTrue(volume_type_destroy.called) admin_context = volume_type_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_refresh(self, volume_type_get): db_type1 = fake_volume.fake_db_volume_type() db_type2 = db_type1.copy() db_type2['description'] = 'foobar' # updated description volume_type_get.side_effect = [db_type1, db_type2] volume_type = objects.VolumeType.get_by_id(self.context, fake.volume_type_id) self._compare(self, db_type1, volume_type) # description was updated, so a volume type refresh should have a new # value for that field volume_type.refresh() self._compare(self, db_type2, volume_type) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() volume_type_get.assert_has_calls([mock.call(self.context, fake.volume_type_id), call_bool, mock.call(self.context, fake.volume_type_id)]) class TestVolumeTypeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.volume.volume_types.get_all_types') def test_get_all(self, get_all_types): db_volume_type = fake_volume.fake_db_volume_type() get_all_types.return_value = {db_volume_type['name']: db_volume_type} volume_types = objects.VolumeTypeList.get_all(self.context) self.assertEqual(1, len(volume_types)) TestVolumeType._compare(self, db_volume_type, volume_types[0]) @mock.patch('cinder.volume.volume_types.get_all_types') def test_get_all_with_pagination(self, get_all_types): db_volume_type = fake_volume.fake_db_volume_type() get_all_types.return_value = {db_volume_type['name']: db_volume_type} volume_types = objects.VolumeTypeList.get_all(self.context, filters={'is_public': True}, marker=None, limit=1, sort_keys='id', sort_dirs='desc', offset=None) self.assertEqual(1, len(volume_types)) TestVolumeType._compare(self, db_volume_type, volume_types[0]) cinder-8.0.0/cinder/tests/unit/objects/test_volume.py0000664000567000056710000005205112701406257024071 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import six from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects @ddt.ddt class TestVolume(test_objects.BaseObjectsTestCase): @staticmethod def _compare(test, db, obj): db = {k: v for k, v in db.items() if not k.endswith('metadata') or k.startswith('volume')} test_objects.BaseObjectsTestCase._compare(test, db, obj) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_get_by_id(self, volume_get): db_volume = fake_volume.fake_db_volume() volume_get.return_value = db_volume volume = objects.Volume.get_by_id(self.context, fake.volume_id) volume_get.assert_called_once_with(self.context, fake.volume_id) self._compare(self, db_volume, volume) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): pf = model_query().options().options().options().options().options() pf.filter_by().first.return_value = None self.assertRaises(exception.VolumeNotFound, objects.Volume.get_by_id, self.context, 123) @mock.patch('cinder.db.volume_create') def test_create(self, volume_create): db_volume = fake_volume.fake_db_volume() volume_create.return_value = db_volume volume = objects.Volume(context=self.context) volume.create() self.assertEqual(db_volume['id'], volume.id) @mock.patch('cinder.db.volume_update') def test_save(self, volume_update): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.save() volume_update.assert_called_once_with(self.context, volume.id, {'display_name': 'foobar'}) @mock.patch('cinder.db.volume_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.volume_update') def test_save_with_metadata(self, volume_update, metadata_update): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.metadata = {'key1': 'value1'} self.assertEqual({'display_name': 'foobar', 'metadata': {'key1': 'value1'}}, volume.obj_get_changes()) volume.save() volume_update.assert_called_once_with(self.context, volume.id, {'display_name': 'foobar'}) metadata_update.assert_called_once_with(self.context, volume.id, {'key1': 'value1'}, True) @mock.patch('cinder.db.volume_admin_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.volume_update') def test_save_with_admin_metadata(self, volume_update, admin_metadata_update): # Test with no admin context db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.admin_metadata = {'key1': 'value1'} volume.save() self.assertFalse(admin_metadata_update.called) # Test with admin context admin_context = context.RequestContext(self.user_id, self.project_id, is_admin=True) volume = objects.Volume._from_db_object(admin_context, objects.Volume(), db_volume) volume.admin_metadata = {'key1': 'value1'} volume.save() admin_metadata_update.assert_called_once_with( admin_context, volume.id, {'key1': 'value1'}, True) def test_save_with_glance_metadata(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.glance_metadata = {'key1': 'value1'} self.assertRaises(exception.ObjectActionError, volume.save) def test_save_with_consistencygroup(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.consistencygroup = objects.ConsistencyGroup() self.assertRaises(exception.ObjectActionError, volume.save) def test_save_with_snapshots(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.snapshots = objects.SnapshotList() self.assertRaises(exception.ObjectActionError, volume.save) @mock.patch('cinder.db.volume_destroy') def test_destroy(self, volume_destroy): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.destroy() self.assertTrue(volume_destroy.called) admin_context = volume_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) def test_obj_fields(self): volume = objects.Volume(context=self.context, id=fake.volume_id, name_id=fake.volume_name_id) self.assertEqual(['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'], volume.obj_extra_fields) self.assertEqual('volume-%s' % fake.volume_name_id, volume.name) self.assertEqual(fake.volume_name_id, volume.name_id) def test_obj_field_previous_status(self): volume = objects.Volume(context=self.context, previous_status='backing-up') self.assertEqual('backing-up', volume.previous_status) @mock.patch('cinder.db.volume_metadata_delete') def test_delete_metadata_key(self, metadata_delete): volume = objects.Volume(self.context, id=fake.volume_id) volume.metadata = {'key1': 'value1', 'key2': 'value2'} self.assertEqual({}, volume._orig_metadata) volume.delete_metadata_key('key2') self.assertEqual({'key1': 'value1'}, volume.metadata) metadata_delete.assert_called_once_with(self.context, fake.volume_id, 'key2') @mock.patch('cinder.db.volume_metadata_get') @mock.patch('cinder.db.volume_glance_metadata_get') @mock.patch('cinder.db.volume_admin_metadata_get') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_id') @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' 'get_all_by_volume_id') @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_volume') def test_obj_load_attr(self, mock_sl_get_all_for_volume, mock_cg_get_by_id, mock_va_get_all_by_vol, mock_vt_get_by_id, mock_admin_metadata_get, mock_glance_metadata_get, mock_metadata_get): volume = objects.Volume._from_db_object( self.context, objects.Volume(), fake_volume.fake_db_volume()) # Test metadata lazy-loaded field metadata = {'foo': 'bar'} mock_metadata_get.return_value = metadata self.assertEqual(metadata, volume.metadata) mock_metadata_get.assert_called_once_with(self.context, volume.id) # Test glance_metadata lazy-loaded field glance_metadata = [{'key': 'foo', 'value': 'bar'}] mock_glance_metadata_get.return_value = glance_metadata self.assertEqual({'foo': 'bar'}, volume.glance_metadata) mock_glance_metadata_get.assert_called_once_with( self.context, volume.id) # Test volume_type lazy-loaded field # Case1. volume.volume_type_id = None self.assertIsNone(volume.volume_type) # Case2. volume2.volume_type_id = 1 fake2 = fake_volume.fake_db_volume() fake2.update({'volume_type_id': fake.volume_id}) volume2 = objects.Volume._from_db_object( self.context, objects.Volume(), fake2) volume_type = objects.VolumeType(context=self.context, id=fake.volume_type_id) mock_vt_get_by_id.return_value = volume_type self.assertEqual(volume_type, volume2.volume_type) mock_vt_get_by_id.assert_called_once_with(self.context, volume2.volume_type_id) # Test consistencygroup lazy-loaded field consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.consistency_group_id) mock_cg_get_by_id.return_value = consistencygroup self.assertEqual(consistencygroup, volume.consistencygroup) mock_cg_get_by_id.assert_called_once_with(self.context, volume.consistencygroup_id) # Test snapshots lazy-loaded field snapshots = objects.SnapshotList(context=self.context, id=fake.snapshot_id) mock_sl_get_all_for_volume.return_value = snapshots self.assertEqual(snapshots, volume.snapshots) mock_sl_get_all_for_volume.assert_called_once_with(self.context, volume.id) # Test volume_attachment lazy-loaded field va_objs = [objects.VolumeAttachment(context=self.context, id=i) for i in [fake.object_id, fake.object2_id, fake.object3_id]] va_list = objects.VolumeAttachmentList(context=self.context, objects=va_objs) mock_va_get_all_by_vol.return_value = va_list self.assertEqual(va_list, volume.volume_attachment) mock_va_get_all_by_vol.assert_called_once_with(self.context, volume.id) # Test admin_metadata lazy-loaded field - user context adm_metadata = {'bar': 'foo'} mock_admin_metadata_get.return_value = adm_metadata self.assertEqual({}, volume.admin_metadata) self.assertFalse(mock_admin_metadata_get.called) # Test admin_metadata lazy-loaded field - admin context adm_context = self.context.elevated() volume = objects.Volume._from_db_object(adm_context, objects.Volume(), fake_volume.fake_db_volume()) adm_metadata = {'bar': 'foo'} mock_admin_metadata_get.return_value = adm_metadata self.assertEqual(adm_metadata, volume.admin_metadata) mock_admin_metadata_get.assert_called_once_with(adm_context, volume.id) def test_from_db_object_with_all_expected_attributes(self): expected_attrs = ['metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup'] db_metadata = [{'key': 'foo', 'value': 'bar'}] db_admin_metadata = [{'key': 'admin_foo', 'value': 'admin_bar'}] db_glance_metadata = [{'key': 'glance_foo', 'value': 'glance_bar'}] db_volume_type = fake_volume.fake_db_volume_type() db_volume_attachments = fake_volume.fake_db_volume_attachment() db_consistencygroup = fake_consistencygroup.fake_db_consistencygroup() db_snapshots = fake_snapshot.fake_db_snapshot() db_volume = fake_volume.fake_db_volume( volume_metadata=db_metadata, volume_admin_metadata=db_admin_metadata, volume_glance_metadata=db_glance_metadata, volume_type=db_volume_type, volume_attachment=[db_volume_attachments], consistencygroup=db_consistencygroup, snapshots=[db_snapshots], ) volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume, expected_attrs) self.assertEqual({'foo': 'bar'}, volume.metadata) self.assertEqual({'admin_foo': 'admin_bar'}, volume.admin_metadata) self.assertEqual({'glance_foo': 'glance_bar'}, volume.glance_metadata) self._compare(self, db_volume_type, volume.volume_type) self._compare(self, db_volume_attachments, volume.volume_attachment) self._compare(self, db_consistencygroup, volume.consistencygroup) self._compare(self, db_snapshots, volume.snapshots) @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_refresh(self, volume_get, volume_metadata_get): db_volume1 = fake_volume.fake_db_volume() db_volume2 = db_volume1.copy() db_volume2['display_name'] = 'foobar' # On the second volume_get, return the volume with an updated # display_name volume_get.side_effect = [db_volume1, db_volume2] volume = objects.Volume.get_by_id(self.context, fake.volume_id) self._compare(self, db_volume1, volume) # display_name was updated, so a volume refresh should have a new value # for that field volume.refresh() self._compare(self, db_volume2, volume) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() volume_get.assert_has_calls([mock.call(self.context, fake.volume_id), call_bool, mock.call(self.context, fake.volume_id)]) def test_metadata_aliases(self): volume = objects.Volume(context=self.context) # metadata<->volume_metadata volume.metadata = {'abc': 'def'} self.assertEqual([{'key': 'abc', 'value': 'def'}], volume.volume_metadata) md = [{'key': 'def', 'value': 'abc'}] volume.volume_metadata = md self.assertEqual({'def': 'abc'}, volume.metadata) # admin_metadata<->volume_admin_metadata volume.admin_metadata = {'foo': 'bar'} self.assertEqual([{'key': 'foo', 'value': 'bar'}], volume.volume_admin_metadata) volume.volume_admin_metadata = [{'key': 'xyz', 'value': '42'}] self.assertEqual({'xyz': '42'}, volume.admin_metadata) # glance_metadata<->volume_glance_metadata volume.glance_metadata = {'jkl': 'mno'} self.assertEqual([{'key': 'jkl', 'value': 'mno'}], volume.volume_glance_metadata) volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}] self.assertEqual({'prs': 'tuw'}, volume.glance_metadata) @mock.patch('cinder.db.volume_metadata_update', return_value={}) @mock.patch('cinder.db.volume_update') @ddt.data({'src_vol_type_id': fake.volume_type_id, 'dest_vol_type_id': fake.volume_type2_id}, {'src_vol_type_id': None, 'dest_vol_type_id': fake.volume_type2_id}) @ddt.unpack def test_finish_volume_migration(self, volume_update, metadata_update, src_vol_type_id, dest_vol_type_id): src_volume_db = fake_volume.fake_db_volume( **{'id': fake.volume_id, 'volume_type_id': src_vol_type_id}) if src_vol_type_id: src_volume_db['volume_type'] = fake_volume.fake_db_volume_type( id=src_vol_type_id) dest_volume_db = fake_volume.fake_db_volume( **{'id': fake.volume2_id, 'volume_type_id': dest_vol_type_id}) if dest_vol_type_id: dest_volume_db['volume_type'] = fake_volume.fake_db_volume_type( id=dest_vol_type_id) expected_attrs = objects.Volume._get_expected_attrs(self.context) src_volume = objects.Volume._from_db_object( self.context, objects.Volume(), src_volume_db, expected_attrs=expected_attrs) dest_volume = objects.Volume._from_db_object( self.context, objects.Volume(), dest_volume_db, expected_attrs=expected_attrs) updated_dest_volume = src_volume.finish_volume_migration( dest_volume) self.assertEqual('deleting', updated_dest_volume.migration_status) self.assertEqual('migration src for ' + src_volume.id, updated_dest_volume.display_description) self.assertEqual(src_volume.id, updated_dest_volume._name_id) self.assertTrue(volume_update.called) ctxt, vol_id, updates = volume_update.call_args[0] self.assertNotIn('volume_type', updates) # Ensure that the destination volume type has not been overwritten self.assertEqual(dest_vol_type_id, getattr(updated_dest_volume, 'volume_type_id')) # Ignore these attributes, since they were updated by # finish_volume_migration ignore_keys = ('id', 'provider_location', '_name_id', 'migration_status', 'display_description', 'status', 'volume_type_id', 'volume_glance_metadata', 'volume_type') dest_vol_dict = {k: updated_dest_volume[k] for k in updated_dest_volume.keys() if k not in ignore_keys} src_vol_dict = {k: src_volume[k] for k in src_volume.keys() if k not in ignore_keys} self.assertEqual(src_vol_dict, dest_vol_dict) def test_volume_with_metadata_serialize_deserialize_no_changes(self): updates = {'volume_glance_metadata': [{'key': 'foo', 'value': 'bar'}], 'expected_attrs': ['glance_metadata']} volume = fake_volume.fake_volume_obj(self.context, **updates) serializer = objects.base.CinderObjectSerializer() serialized_volume = serializer.serialize_entity(self.context, volume) volume = serializer.deserialize_entity(self.context, serialized_volume) self.assertDictEqual({}, volume.obj_get_changes()) class TestVolumeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.volume_get_all') def test_get_all(self, volume_get_all): db_volume = fake_volume.fake_db_volume() volume_get_all.return_value = [db_volume] volumes = objects.VolumeList.get_all(self.context, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sort_key, mock.sentinel.sort_dir) self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_host') def test_get_by_host(self, get_all_by_host): db_volume = fake_volume.fake_db_volume() get_all_by_host.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_host( self.context, 'fake-host') self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_group') def test_get_by_group(self, get_all_by_group): db_volume = fake_volume.fake_db_volume() get_all_by_group.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_group( self.context, 'fake-host') self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_project') def test_get_by_project(self, get_all_by_project): db_volume = fake_volume.fake_db_volume() get_all_by_project.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_project( self.context, mock.sentinel.project_id, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sorted_keys, mock.sentinel.sorted_dirs, mock.sentinel.filters) self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) cinder-8.0.0/cinder/tests/unit/objects/test_fields.py0000664000567000056710000001032312701406250024015 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.objects import fields from cinder import test class FakeFieldType(fields.FieldType): def coerce(self, obj, attr, value): return '*%s*' % value def to_primitive(self, obj, attr, value): return '!%s!' % value def from_primitive(self, obj, attr, value): return value[1:-1] class TestField(test.TestCase): def setUp(self): super(TestField, self).setUp() self.field = fields.Field(FakeFieldType()) self.coerce_good_values = [('foo', '*foo*')] self.coerce_bad_values = [] self.to_primitive_values = [('foo', '!foo!')] self.from_primitive_values = [('!foo!', 'foo')] def test_coerce_good_values(self): for in_val, out_val in self.coerce_good_values: self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) def test_coerce_bad_values(self): for in_val in self.coerce_bad_values: self.assertRaises((TypeError, ValueError), self.field.coerce, 'obj', 'attr', in_val) def test_to_primitive(self): for in_val, prim_val in self.to_primitive_values: self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', in_val)) def test_from_primitive(self): class ObjectLikeThing(object): _context = 'context' for prim_val, out_val in self.from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( ObjectLikeThing, 'attr', prim_val)) def test_stringify(self): self.assertEqual('123', self.field.stringify(123)) class TestBackupStatus(TestField): def setUp(self): super(TestBackupStatus, self).setUp() self.field = fields.BackupStatusField() self.coerce_good_values = [('error', 'error'), ('error_deleting', 'error_deleting'), ('creating', 'creating'), ('available', 'available'), ('deleting', 'deleting'), ('deleted', 'deleted'), ('restoring', 'restoring')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') class TestConsistencyGroupStatus(TestField): def setUp(self): super(TestConsistencyGroupStatus, self).setUp() self.field = fields.ConsistencyGroupStatusField() self.coerce_good_values = [('error', 'error'), ('available', 'available'), ('creating', 'creating'), ('deleting', 'deleting'), ('deleted', 'deleted'), ('updating', 'updating'), ('in-use', 'in-use'), ('error_deleting', 'error_deleting')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') cinder-8.0.0/cinder/tests/unit/objects/__init__.py0000664000567000056710000000425512701406250023256 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from cinder import context from cinder import exception from cinder.objects import base as obj_base from cinder import test class BaseObjectsTestCase(test.TestCase): def setUp(self): super(BaseObjectsTestCase, self).setUp() self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=False) # We only test local right now. # TODO(mriedem): Testing remote would be nice... self.assertIsNone(obj_base.CinderObject.indirection_api) # TODO(mriedem): Replace this with # oslo_versionedobjects.fixture.compare_obj when that is in a released # version of o.vo. @staticmethod def _compare(test, db, obj): for field, value in db.items(): try: getattr(obj, field) except (AttributeError, exception.CinderException, NotImplementedError): # NotImplementedError: ignore "Cannot load 'projects' in the # base class" error continue if field in ('modified_at', 'created_at', 'updated_at', 'deleted_at') and db[field]: test.assertEqual(db[field], timeutils.normalize_time(obj[field])) elif isinstance(obj[field], obj_base.ObjectListBase): test.assertEqual(db[field], obj[field].objects) else: test.assertEqual(db[field], obj[field]) cinder-8.0.0/cinder/tests/unit/objects/test_service.py0000664000567000056710000001716012701406250024215 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import objects from cinder.tests.unit import fake_service from cinder.tests.unit import objects as test_objects class TestService(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_get_by_id(self, service_get): db_service = fake_service.fake_db_service() service_get.return_value = db_service service = objects.Service.get_by_id(self.context, 1) self._compare(self, db_service, service) service_get.assert_called_once_with(self.context, 1) @mock.patch('cinder.db.service_get_by_host_and_topic') def test_get_by_host_and_topic(self, service_get_by_host_and_topic): db_service = fake_service.fake_db_service() service_get_by_host_and_topic.return_value = db_service service = objects.Service.get_by_host_and_topic( self.context, 'fake-host', 'fake-topic') self._compare(self, db_service, service) service_get_by_host_and_topic.assert_called_once_with( self.context, 'fake-host', 'fake-topic') @mock.patch('cinder.db.service_get_by_args') def test_get_by_args(self, service_get_by_args): db_service = fake_service.fake_db_service() service_get_by_args.return_value = db_service service = objects.Service.get_by_args( self.context, 'fake-host', 'fake-key') self._compare(self, db_service, service) service_get_by_args.assert_called_once_with( self.context, 'fake-host', 'fake-key') @mock.patch('cinder.db.service_create') def test_create(self, service_create): db_service = fake_service.fake_db_service() service_create.return_value = db_service service = objects.Service(context=self.context) service.create() self.assertEqual(db_service['id'], service.id) service_create.assert_called_once_with(self.context, {}) @mock.patch('cinder.db.service_update') def test_save(self, service_update): db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) service.topic = 'foobar' service.save() service_update.assert_called_once_with(self.context, service.id, {'topic': 'foobar'}) @mock.patch('cinder.db.service_destroy') def test_destroy(self, service_destroy): db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) with mock.patch.object(service._context, 'elevated') as elevated_ctx: service.destroy() service_destroy.assert_called_once_with(elevated_ctx(), 123) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_refresh(self, service_get): db_service1 = fake_service.fake_db_service() db_service2 = db_service1.copy() db_service2['availability_zone'] = 'foobar' # On the second service_get, return the service with an updated # availability_zone service_get.side_effect = [db_service1, db_service2] service = objects.Service.get_by_id(self.context, 123) self._compare(self, db_service1, service) # availability_zone was updated, so a service refresh should have a # new value for that field service.refresh() self._compare(self, db_service2, service) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() service_get.assert_has_calls([mock.call(self.context, 123), call_bool, mock.call(self.context, 123)]) @mock.patch('cinder.db.service_get_all_by_binary') def _test_get_minimum_version(self, services_update, expected, service_get_all_by_binary): services = [fake_service.fake_db_service(**s) for s in services_update] service_get_all_by_binary.return_value = services min_rpc = objects.Service.get_minimum_rpc_version(self.context, 'foo') self.assertEqual(expected[0], min_rpc) min_obj = objects.Service.get_minimum_obj_version(self.context, 'foo') self.assertEqual(expected[1], min_obj) service_get_all_by_binary.assert_has_calls( [mock.call(self.context, 'foo', disabled=None)] * 2) @mock.patch('cinder.db.service_get_all_by_binary') def test_get_minimum_version(self, service_get_all_by_binary): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, ] expected = ('1.0', '1.2') self._test_get_minimum_version(services_update, expected) @mock.patch('cinder.db.service_get_all_by_binary') def test_get_minimum_version_liberty(self, service_get_all_by_binary): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': None}, {'rpc_current_version': None, 'object_current_version': '2.5'}, ] expected = ('liberty', 'liberty') self._test_get_minimum_version(services_update, expected) class TestServiceList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.service_get_all') def test_get_all(self, service_get_all): db_service = fake_service.fake_db_service() service_get_all.return_value = [db_service] filters = {'host': 'host', 'binary': 'foo', 'disabled': False} services = objects.ServiceList.get_all(self.context, filters) service_get_all.assert_called_once_with(self.context, filters) self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) @mock.patch('cinder.db.service_get_all_by_topic') def test_get_all_by_topic(self, service_get_all_by_topic): db_service = fake_service.fake_db_service() service_get_all_by_topic.return_value = [db_service] services = objects.ServiceList.get_all_by_topic( self.context, 'foo', 'bar') service_get_all_by_topic.assert_called_once_with( self.context, 'foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) @mock.patch('cinder.db.service_get_all_by_binary') def test_get_all_by_binary(self, service_get_all_by_binary): db_service = fake_service.fake_db_service() service_get_all_by_binary.return_value = [db_service] services = objects.ServiceList.get_all_by_binary( self.context, 'foo', 'bar') service_get_all_by_binary.assert_called_once_with( self.context, 'foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) cinder-8.0.0/cinder/tests/unit/objects/test_base.py0000664000567000056710000006232112701406250023466 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock import uuid from iso8601 import iso8601 from oslo_utils import versionutils from oslo_versionedobjects import fields from sqlalchemy import sql from cinder import context from cinder import db from cinder.db.sqlalchemy import models from cinder import objects from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects @objects.base.CinderObjectRegistry.register_if(False) class TestObject(objects.base.CinderObject): VERSION = '1.1' fields = { 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), 'uuid': objects.base.fields.UUIDField(), 'text': objects.base.fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(TestObject, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('text', None) class TestCinderObject(test_objects.BaseObjectsTestCase): """Tests methods from CinderObject.""" def setUp(self): super(TestCinderObject, self).setUp() self.obj = TestObject( scheduled_at=None, uuid=uuid.uuid4(), text='text') self.obj.obj_reset_changes() def test_cinder_obj_get_changes_no_changes(self): self.assertDictEqual({}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_other_changes(self): self.obj.text = 'text2' self.assertDictEqual({'text': 'text2'}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_no_tz(self): now = datetime.datetime.utcnow() self.obj.scheduled_at = now self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_utc(self): now_tz = iso8601.parse_date('2015-06-26T22:00:01Z') now = now_tz.replace(tzinfo=None) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self): now_tz = iso8601.parse_date('2015-06-26T22:00:01+01') now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self): now_tz = iso8601.parse_date('2015-06-26T10:00:01-05') now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_refresh(self): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObject(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject): fields = {'id': fields.UUIDField(), 'name': fields.StringField()} test_obj = MyTestObject(id=fake.object_id, name='foo') refresh_obj = MyTestObject(id=fake.object_id, name='bar') with mock.patch( 'cinder.objects.base.CinderObject.get_by_id') as get_by_id: get_by_id.return_value = refresh_obj test_obj.refresh() self._compare(self, refresh_obj, test_obj) def test_refresh_no_id_field(self): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObjectNoId(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject): fields = {'uuid': fields.UUIDField()} test_obj = MyTestObjectNoId(uuid=fake.object_id, name='foo') self.assertRaises(NotImplementedError, test_obj.refresh) class TestCinderComparableObject(test_objects.BaseObjectsTestCase): def test_comparable_objects(self): @objects.base.CinderObjectRegistry.register class MyComparableObj(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject): fields = {'foo': fields.Field(fields.Integer())} class NonVersionedObject(object): pass obj1 = MyComparableObj(foo=1) obj2 = MyComparableObj(foo=1) obj3 = MyComparableObj(foo=2) obj4 = NonVersionedObject() self.assertTrue(obj1 == obj2) self.assertFalse(obj1 == obj3) self.assertFalse(obj1 == obj4) self.assertNotEqual(obj1, None) class TestCinderObjectConditionalUpdate(test.TestCase): def setUp(self): super(TestCinderObjectConditionalUpdate, self).setUp() self.context = context.get_admin_context() def _create_volume(self): vol = { 'display_description': 'Test Desc', 'size': 1, 'status': 'available', 'availability_zone': 'az', 'host': 'dummy', 'attach_status': 'no', } volume = objects.Volume(context=self.context, **vol) volume.create() return volume def _create_snapshot(self, volume): snapshot = objects.Snapshot(context=self.context, volume_id=volume.id) snapshot.create() return snapshot def _check_volume(self, volume, status, size, reload=False, dirty_keys=(), **kwargs): if reload: volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(status, volume.status) self.assertEqual(size, volume.size) dirty = volume.cinder_obj_get_changes() self.assertEqual(list(dirty_keys), list(dirty.keys())) for key, value in kwargs.items(): self.assertEqual(value, getattr(volume, key)) def test_conditional_update_non_iterable_expected(self): volume = self._create_volume() # We also check that we can check for None values self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available', 'migration_status': None})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_model_field(self): volume = self._create_volume() # We also check that we can check for None values self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2, 'previous_status': volume.model.status}, {'status': 'available', 'migration_status': None})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2, previous_status='available') # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True, previous_status='available') def test_conditional_update_non_iterable_expected_save_all(self): volume = self._create_volume() volume.size += 1 # We also check that we can check for not None values self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'availability_zone': volume.Not(None)}, save_all=True)) # Check that the object in memory has been updated and that the size # is not a dirty key self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_dont_save_all(self): volume = self._create_volume() volume.size += 1 self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available'}, save_all=False)) # Check that the object in memory has been updated with the new status # but that size has not been saved and is a dirty key self._check_volume(volume, 'deleting', 2, False, ['size']) # Check that the volume in the DB also has been updated but not the # size self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_fail_non_iterable_expected_save_all(self): volume = self._create_volume() volume.size += 1 self.assertFalse(volume.conditional_update( {'status': 'available'}, {'status': 'deleting'}, save_all=True)) # Check that the object in memory has not been updated and that the # size is still a dirty key self._check_volume(volume, 'available', 2, False, ['size']) # Check that the volume in the DB hasn't been updated self._check_volume(volume, 'available', 1, True) def test_default_conditional_update_non_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_default_conditional_fail_update_non_iterable_expected(self): volume_in_db = self._create_volume() volume = objects.Volume.get_by_id(self.context, volume_in_db.id) volume_in_db.size += 1 volume_in_db.save() # This will fail because size in DB is different self.assertFalse(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed the status but has # the size we changed before the conditional update self._check_volume(volume_in_db, 'available', 2, True) def test_default_conditional_update_non_iterable_expected_with_dirty(self): volume_in_db = self._create_volume() volume = objects.Volume.get_by_id(self.context, volume_in_db.id) volume_in_db.size += 1 volume_in_db.save() volume.size = 33 # This will fail because even though we have excluded the size from # the default condition when we dirtied it in the volume object, we # still have the last update timestamp that will be included in the # condition self.assertFalse(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 33, False, ['size']) # Check that the volume in the DB hasn't changed the status but has # the size we changed before the conditional update self._check_volume(volume_in_db, 'available', 2, True) def test_conditional_update_negated_non_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': db.Not('in-use'), 'size': db.Not(2)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_filter(self): # Volume we want to change volume = self._create_volume() # Another volume that has no snapshots volume2 = self._create_volume() # A volume with snapshots volume3 = self._create_volume() self._create_snapshot(volume3) # Update only it it has no snapshot filters = (~sql.exists().where( models.Snapshot.volume_id == models.Volume.id),) self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available'}, filters)) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) # Check that the other volumes in the DB haven't changed self._check_volume(volume2, 'available', 1, True) self._check_volume(volume3, 'available', 1, True) def test_conditional_update_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 20}, {'status': ('error', 'available'), 'size': range(10)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 20) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 20, True) def test_conditional_update_negated_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 20}, {'status': db.Not(('creating', 'in-use')), 'size': range(10)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 20) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 20, True) def test_conditional_update_fail_non_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'size': 2})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_negated_non_iterable_expected(self): volume = self._create_volume() result = volume.conditional_update({'status': 'deleting'}, {'status': db.Not('in-use'), 'size': 2}) self.assertFalse(result) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'available'}, {'status': ('error', 'creating'), 'size': range(2, 10)})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_negated_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'error'}, {'status': db.Not(('available', 'in-use')), 'size': range(2, 10)})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_non_iterable_expected_filter(self): # Volume we want to change volume = self._create_volume() self._create_snapshot(volume) # A volume that has no snapshots volume2 = self._create_volume() # Another volume with snapshots volume3 = self._create_volume() self._create_snapshot(volume3) # Update only it it has no snapshot filters = (~sql.exists().where( models.Snapshot.volume_id == models.Volume.id),) self.assertFalse(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available'}, filters)) # Check that the object in memory hasn't been updated self._check_volume(volume, 'available', 1) # Check that no volume in the DB also has been updated self._check_volume(volume, 'available', 1, True) self._check_volume(volume2, 'available', 1, True) self._check_volume(volume3, 'available', 1, True) def test_conditional_update_non_iterable_case_value(self): # Volume we want to change and has snapshots volume = self._create_volume() self._create_snapshot(volume) # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') self.assertTrue(volume.conditional_update({'status': case_values}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'has-snapshot', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'has-snapshot', 1, True) def test_conditional_update_non_iterable_case_value_else(self): # Volume we want to change volume = self._create_volume() # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') self.assertTrue(volume.conditional_update({'status': case_values}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'no-snapshot', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'no-snapshot', 1, True) def test_conditional_update_non_iterable_case_value_fail(self): # Volume we want to change doesn't have snapshots volume = self._create_volume() # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') # We won't update because volume status is available self.assertFalse(volume.conditional_update({'status': case_values}, {'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1) # Check that the volume in the DB also hasn't been updated either self._check_volume(volume, 'available', 1, True) def test_conditional_update_iterable_with_none_expected(self): volume = self._create_volume() # We also check that we can check for None values in an iterable self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': (None, 'available'), 'migration_status': (None, 'finished')})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_none_expected(self): volume = self._create_volume() # We also check that we can check for None values in a negated iterable self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': volume.Not((None, 'in-use'))})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_includes_null(self): volume = self._create_volume() # We also check that negation includes None values by default like we # do in Python and not like MySQL does self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'migration_status': volume.Not(('migrating', 'error'))})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_includes_null_fails(self): volume = self._create_volume() # We also check that negation excludes None values if we ask it to self.assertFalse(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'migration_status': volume.Not(('migrating', 'error'), auto_none=False)})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1, False) # Check that the volume in the DB hasn't been updated self._check_volume(volume, 'available', 1, True) def test_conditional_update_use_operation_in_value(self): volume = self._create_volume() expected_size = volume.size + 1 # We also check that using fields in requested changes will work as # expected self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': volume.model.size + 1}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', expected_size, False) # Check that the volume in the DB has also been updated self._check_volume(volume, 'deleting', expected_size, True) class TestCinderDictObject(test_objects.BaseObjectsTestCase): @objects.base.CinderObjectRegistry.register_if(False) class TestDictObject(objects.base.CinderObjectDictCompat, objects.base.CinderObject): obj_extra_fields = ['foo'] fields = { 'abc': fields.StringField(nullable=True), 'def': fields.IntegerField(nullable=True), } @property def foo(self): return 42 def test_dict_objects(self): obj = self.TestDictObject() self.assertIsNone(obj.get('non_existing')) self.assertEqual('val', obj.get('abc', 'val')) self.assertIsNone(obj.get('abc')) obj.abc = 'val2' self.assertEqual('val2', obj.get('abc', 'val')) self.assertEqual(42, obj.get('foo')) self.assertEqual(42, obj.get('foo', None)) self.assertTrue('foo' in obj) self.assertTrue('abc' in obj) self.assertFalse('def' in obj) @mock.patch('cinder.objects.base.OBJ_VERSIONS', {'1.0': {'TestObject': '1.0'}, '1.1': {'TestObject': '1.1'}, }) class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase): def setUp(self): super(TestCinderObjectSerializer, self).setUp() self.obj = TestObject(scheduled_at=None, uuid=uuid.uuid4(), text='text') def test_serialize_entity_backport(self): serializer = objects.base.CinderObjectSerializer('1.0') primitive = serializer.serialize_entity(self.context, self.obj) self.assertEqual('1.0', primitive['versioned_object.version']) def test_serialize_entity_unknown_version(self): serializer = objects.base.CinderObjectSerializer('0.9') primitive = serializer.serialize_entity(self.context, self.obj) self.assertEqual('1.1', primitive['versioned_object.version']) cinder-8.0.0/cinder/tests/unit/objects/test_consistencygroup.py0000664000567000056710000002526312701406250026176 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yahoo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects fake_consistencygroup = { 'id': fake.consistency_group_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'host': 'fake_host', 'availability_zone': 'fake_az', 'name': 'fake_name', 'description': 'fake_description', 'volume_type_id': fake.volume_type_id, 'status': fields.ConsistencyGroupStatus.CREATING, 'cgsnapshot_id': fake.cgsnapshot_id, 'source_cgid': None, } fake_cgsnapshot = { 'id': fake.cgsnapshot_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'name': 'fake_name', 'description': 'fake_description', 'status': 'creating', 'consistencygroup_id': fake.consistency_group_id, } class TestConsistencyGroup(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get', return_value=fake_consistencygroup) def test_get_by_id(self, consistencygroup_get): consistencygroup = objects.ConsistencyGroup.get_by_id( self.context, fake.consistency_group_id) self._compare(self, fake_consistencygroup, consistencygroup) consistencygroup_get.assert_called_once_with( self.context, fake.consistency_group_id) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): model_query().filter_by().first.return_value = None self.assertRaises(exception.ConsistencyGroupNotFound, objects.ConsistencyGroup.get_by_id, self.context, 123) @mock.patch('cinder.db.consistencygroup_create', return_value=fake_consistencygroup) def test_create(self, consistencygroup_create): fake_cg = fake_consistencygroup.copy() del fake_cg['id'] consistencygroup = objects.ConsistencyGroup(context=self.context, **fake_cg) consistencygroup.create() self._compare(self, fake_consistencygroup, consistencygroup) def test_create_with_id_except_exception(self, ): consistencygroup = objects.ConsistencyGroup( context=self.context, **{'id': fake.consistency_group_id}) self.assertRaises(exception.ObjectActionError, consistencygroup.create) @mock.patch('cinder.db.consistencygroup_update') def test_save(self, consistencygroup_update): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) consistencygroup.status = fields.ConsistencyGroupStatus.AVAILABLE consistencygroup.save() consistencygroup_update.assert_called_once_with( self.context, consistencygroup.id, {'status': fields.ConsistencyGroupStatus.AVAILABLE}) def test_save_with_cgsnapshots(self): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) for i in [fake.cgsnapshot_id, fake.cgsnapshot2_id, fake.cgsnapshot3_id]] cgsnapshots = objects.CGSnapshotList(objects=cgsnapshots_objs) consistencygroup.name = 'foobar' consistencygroup.cgsnapshots = cgsnapshots self.assertEqual({'name': 'foobar', 'cgsnapshots': cgsnapshots}, consistencygroup.obj_get_changes()) self.assertRaises(exception.ObjectActionError, consistencygroup.save) def test_save_with_volumes(self): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) volumes_objs = [objects.Volume(context=self.context, id=i) for i in [fake.volume_id, fake.volume2_id, fake.volume3_id]] volumes = objects.VolumeList(objects=volumes_objs) consistencygroup.name = 'foobar' consistencygroup.volumes = volumes self.assertEqual({'name': 'foobar', 'volumes': volumes}, consistencygroup.obj_get_changes()) self.assertRaises(exception.ObjectActionError, consistencygroup.save) @mock.patch('cinder.objects.cgsnapshot.CGSnapshotList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_group') def test_obj_load_attr(self, mock_vol_get_all_by_group, mock_cgsnap_get_all_by_group): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) # Test cgsnapshots lazy-loaded field cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) for i in [fake.cgsnapshot_id, fake.cgsnapshot2_id, fake.cgsnapshot3_id]] cgsnapshots = objects.CGSnapshotList(context=self.context, objects=cgsnapshots_objs) mock_cgsnap_get_all_by_group.return_value = cgsnapshots self.assertEqual(cgsnapshots, consistencygroup.cgsnapshots) mock_cgsnap_get_all_by_group.assert_called_once_with( self.context, consistencygroup.id) # Test volumes lazy-loaded field volume_objs = [objects.Volume(context=self.context, id=i) for i in [fake.volume_id, fake.volume2_id, fake.volume3_id]] volumes = objects.VolumeList(context=self.context, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes self.assertEqual(volumes, consistencygroup.volumes) mock_vol_get_all_by_group.assert_called_once_with(self.context, consistencygroup.id) @mock.patch('cinder.db.consistencygroup_destroy') def test_destroy(self, consistencygroup_destroy): consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.consistency_group_id) consistencygroup.destroy() self.assertTrue(consistencygroup_destroy.called) admin_context = consistencygroup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get') def test_refresh(self, consistencygroup_get): db_cg1 = fake_consistencygroup.copy() db_cg2 = db_cg1.copy() db_cg2['description'] = 'foobar' # On the second consistencygroup_get, return the ConsistencyGroup with # an updated description consistencygroup_get.side_effect = [db_cg1, db_cg2] cg = objects.ConsistencyGroup.get_by_id(self.context, fake.consistency_group_id) self._compare(self, db_cg1, cg) # description was updated, so a ConsistencyGroup refresh should have a # new value for that field cg.refresh() self._compare(self, db_cg2, cg) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() consistencygroup_get.assert_has_calls([ mock.call( self.context, fake.consistency_group_id), call_bool, mock.call( self.context, fake.consistency_group_id)]) class TestConsistencyGroupList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.consistencygroup_get_all', return_value=[fake_consistencygroup]) def test_get_all(self, consistencygroup_get_all): consistencygroups = objects.ConsistencyGroupList.get_all(self.context) self.assertEqual(1, len(consistencygroups)) TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all_by_project', return_value=[fake_consistencygroup]) def test_get_all_by_project(self, consistencygroup_get_all_by_project): consistencygroups = objects.ConsistencyGroupList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(consistencygroups)) TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all', return_value=[fake_consistencygroup]) def test_get_all_with_pagination(self, consistencygroup_get_all): consistencygroups = objects.ConsistencyGroupList.get_all( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(consistencygroups)) consistencygroup_get_all.assert_called_once_with( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all_by_project', return_value=[fake_consistencygroup]) def test_get_all_by_project_with_pagination( self, consistencygroup_get_all_by_project): consistencygroups = objects.ConsistencyGroupList.get_all_by_project( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(consistencygroups)) consistencygroup_get_all_by_project.assert_called_once_with( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) cinder-8.0.0/cinder/tests/unit/objects/test_snapshot.py0000664000567000056710000003527712701406250024425 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import six from oslo_log import log as logging from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects LOG = logging.getLogger(__name__) fake_db_snapshot = fake_snapshot.fake_db_snapshot( cgsnapshot_id=fake.cgsnapshot_id) del fake_db_snapshot['metadata'] del fake_db_snapshot['volume'] # NOTE(andrey-mp): make Snapshot object here to check object algorithms fake_snapshot_obj = { 'id': fake.snapshot_id, 'volume_id': fake.volume_id, 'status': "creating", 'progress': '0%', 'volume_size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'metadata': {}, } class TestSnapshot(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.get_by_id', return_value=fake_db_snapshot) def test_get_by_id(self, snapshot_get): snapshot = objects.Snapshot.get_by_id(self.context, 1) self._compare(self, fake_snapshot_obj, snapshot) snapshot_get.assert_called_once_with(self.context, models.Snapshot, 1) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): query = model_query().options().options().filter_by().first query.return_value = None self.assertRaises(exception.SnapshotNotFound, objects.Snapshot.get_by_id, self.context, 123) def test_reset_changes(self): snapshot = objects.Snapshot() snapshot.metadata = {'key1': 'value1'} self.assertEqual({}, snapshot._orig_metadata) snapshot.obj_reset_changes(['metadata']) self.assertEqual({'key1': 'value1'}, snapshot._orig_metadata) @mock.patch('cinder.db.snapshot_create', return_value=fake_db_snapshot) def test_create(self, snapshot_create): snapshot = objects.Snapshot(context=self.context) snapshot.create() self.assertEqual(fake_snapshot_obj['id'], snapshot.id) self.assertEqual(fake_snapshot_obj['volume_id'], snapshot.volume_id) @mock.patch('cinder.db.snapshot_create') def test_create_with_provider_id(self, snapshot_create): snapshot_create.return_value = copy.deepcopy(fake_db_snapshot) snapshot_create.return_value['provider_id'] = fake.provider_id snapshot = objects.Snapshot(context=self.context) snapshot.create() self.assertEqual(fake.provider_id, snapshot.provider_id) @mock.patch('cinder.db.snapshot_update') def test_save(self, snapshot_update): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) snapshot.display_name = 'foobar' snapshot.save() snapshot_update.assert_called_once_with(self.context, snapshot.id, {'display_name': 'foobar'}) @mock.patch('cinder.db.snapshot_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.snapshot_update') def test_save_with_metadata(self, snapshot_update, snapshot_metadata_update): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) snapshot.display_name = 'foobar' snapshot.metadata = {'key1': 'value1'} self.assertEqual({'display_name': 'foobar', 'metadata': {'key1': 'value1'}}, snapshot.obj_get_changes()) snapshot.save() snapshot_update.assert_called_once_with(self.context, snapshot.id, {'display_name': 'foobar'}) snapshot_metadata_update.assert_called_once_with(self.context, fake.snapshot_id, {'key1': 'value1'}, True) @mock.patch('cinder.db.snapshot_destroy') def test_destroy(self, snapshot_destroy): snapshot = objects.Snapshot(context=self.context, id=fake.snapshot_id) snapshot.destroy() snapshot_destroy.assert_called_once_with(self.context, fake.snapshot_id) @mock.patch('cinder.db.snapshot_metadata_delete') def test_delete_metadata_key(self, snapshot_metadata_delete): snapshot = objects.Snapshot(self.context, id=fake.snapshot_id) snapshot.metadata = {'key1': 'value1', 'key2': 'value2'} self.assertEqual({}, snapshot._orig_metadata) snapshot.delete_metadata_key(self.context, 'key2') self.assertEqual({'key1': 'value1'}, snapshot.metadata) snapshot_metadata_delete.assert_called_once_with(self.context, fake.snapshot_id, 'key2') def test_obj_fields(self): volume = objects.Volume(context=self.context, id=fake.volume_id, _name_id=fake.volume_name_id) snapshot = objects.Snapshot(context=self.context, id=fake.volume_id, volume=volume) self.assertEqual(['name', 'volume_name'], snapshot.obj_extra_fields) self.assertEqual('snapshot-%s' % fake.volume_id, snapshot.name) self.assertEqual('volume-%s' % fake.volume_name_id, snapshot.volume_name) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id') def test_obj_load_attr(self, cgsnapshot_get_by_id, volume_get_by_id): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) # Test volume lazy-loaded field volume = objects.Volume(context=self.context, id=fake.volume_id) volume_get_by_id.return_value = volume self.assertEqual(volume, snapshot.volume) volume_get_by_id.assert_called_once_with(self.context, snapshot.volume_id) # Test cgsnapshot lazy-loaded field cgsnapshot = objects.CGSnapshot(context=self.context, id=fake.cgsnapshot_id) cgsnapshot_get_by_id.return_value = cgsnapshot self.assertEqual(cgsnapshot, snapshot.cgsnapshot) cgsnapshot_get_by_id.assert_called_once_with(self.context, snapshot.cgsnapshot_id) @mock.patch('cinder.db.snapshot_data_get_for_project') def test_snapshot_data_get_for_project(self, snapshot_data_get): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) volume_type_id = mock.sentinel.volume_type_id snapshot.snapshot_data_get_for_project(self.context, self.project_id, volume_type_id) snapshot_data_get.assert_called_once_with(self.context, self.project_id, volume_type_id) @mock.patch('cinder.db.sqlalchemy.api.snapshot_get') def test_refresh(self, snapshot_get): db_snapshot1 = fake_snapshot.fake_db_snapshot() db_snapshot2 = db_snapshot1.copy() db_snapshot2['display_name'] = 'foobar' # On the second snapshot_get, return the snapshot with an updated # display_name snapshot_get.side_effect = [db_snapshot1, db_snapshot2] snapshot = objects.Snapshot.get_by_id(self.context, fake.snapshot_id) self._compare(self, db_snapshot1, snapshot) # display_name was updated, so a snapshot refresh should have a new # value for that field snapshot.refresh() self._compare(self, db_snapshot2, snapshot) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() snapshot_get.assert_has_calls([ mock.call(self.context, fake.snapshot_id), call_bool, mock.call(self.context, fake.snapshot_id)]) class TestSnapshotList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all', return_value=[fake_db_snapshot]) def test_get_all(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all( self.context, search_opts) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) snapshot_get_all.assert_called_once_with(self.context, search_opts, None, None, None, None, None) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_by_host', return_value=[fake_db_snapshot]) def test_get_by_host(self, get_by_host, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_by_host( self.context, 'fake-host') self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_by_project', return_value=[fake_db_snapshot]) def test_get_all_by_project(self, get_all_by_project, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all_by_project( self.context, self.project_id, search_opts) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) get_all_by_project.assert_called_once_with(self.context, self.project_id, search_opts, None, None, None, None, None) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_for_volume', return_value=[fake_db_snapshot]) def test_get_all_for_volume(self, get_all_for_volume, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_all_for_volume( self.context, fake_volume_obj.id) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_active_by_window', return_value=[fake_db_snapshot]) def test_get_active_by_window(self, get_active_by_window, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_active_by_window( self.context, mock.sentinel.begin, mock.sentinel.end) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot', return_value=[fake_db_snapshot]) def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_all_for_cgsnapshot( self.context, mock.sentinel.cgsnapshot_id) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all') def test_get_all_without_metadata(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshot = copy.deepcopy(fake_db_snapshot) del snapshot['snapshot_metadata'] snapshot_get_all.return_value = [snapshot] search_opts = mock.sentinel.search_opts self.assertRaises(exception.MetadataAbsent, objects.SnapshotList.get_all, self.context, search_opts) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all') def test_get_all_with_metadata(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj db_snapshot = copy.deepcopy(fake_db_snapshot) db_snapshot['snapshot_metadata'] = [{'key': 'fake_key', 'value': 'fake_value'}] snapshot_get_all.return_value = [db_snapshot] search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all( self.context, search_opts) self.assertEqual(1, len(snapshots)) snapshot_obj = copy.deepcopy(fake_snapshot_obj) snapshot_obj['metadata'] = {'fake_key': 'fake_value'} TestSnapshot._compare(self, snapshot_obj, snapshots[0]) snapshot_get_all.assert_called_once_with(self.context, search_opts, None, None, None, None, None) cinder-8.0.0/cinder/tests/unit/objects/test_cgsnapshot.py0000664000567000056710000001711312701406250024724 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import exception from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects from cinder.tests.unit.objects.test_consistencygroup import \ fake_consistencygroup fake_cgsnapshot = { 'id': fake.cgsnapshot_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'name': 'fake_name', 'description': 'fake_description', 'status': 'creating', 'consistencygroup_id': fake.consistency_group_id, } class TestCGSnapshot(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get', return_value=fake_cgsnapshot) def test_get_by_id(self, cgsnapshot_get): cgsnapshot = objects.CGSnapshot.get_by_id(self.context, fake.cgsnapshot_id) self._compare(self, fake_cgsnapshot, cgsnapshot) @mock.patch('cinder.db.cgsnapshot_create', return_value=fake_cgsnapshot) def test_create(self, cgsnapshot_create): fake_cgsnap = fake_cgsnapshot.copy() del fake_cgsnap['id'] cgsnapshot = objects.CGSnapshot(context=self.context, **fake_cgsnap) cgsnapshot.create() self._compare(self, fake_cgsnapshot, cgsnapshot) def test_create_with_id_except_exception(self): cgsnapshot = objects.CGSnapshot(context=self.context, **{'id': fake.consistency_group_id}) self.assertRaises(exception.ObjectActionError, cgsnapshot.create) @mock.patch('cinder.db.cgsnapshot_update') def test_save(self, cgsnapshot_update): cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) cgsnapshot.status = 'active' cgsnapshot.save() cgsnapshot_update.assert_called_once_with(self.context, cgsnapshot.id, {'status': 'active'}) @mock.patch('cinder.db.consistencygroup_update', return_value=fake_consistencygroup) @mock.patch('cinder.db.cgsnapshot_update') def test_save_with_consistencygroup(self, cgsnapshot_update, cgsnapshot_cg_update): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) cgsnapshot.name = 'foobar' cgsnapshot.consistencygroup = consistencygroup self.assertEqual({'name': 'foobar', 'consistencygroup': consistencygroup}, cgsnapshot.obj_get_changes()) self.assertRaises(exception.ObjectActionError, cgsnapshot.save) @mock.patch('cinder.db.cgsnapshot_destroy') def test_destroy(self, cgsnapshot_destroy): cgsnapshot = objects.CGSnapshot(context=self.context, id=fake.cgsnapshot_id) cgsnapshot.destroy() self.assertTrue(cgsnapshot_destroy.called) admin_context = cgsnapshot_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_obj_load_attr(self, snapshotlist_get_for_cgs, consistencygroup_get_by_id): cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) # Test consistencygroup lazy-loaded field consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.consistency_group_id) consistencygroup_get_by_id.return_value = consistencygroup self.assertEqual(consistencygroup, cgsnapshot.consistencygroup) consistencygroup_get_by_id.assert_called_once_with( self.context, cgsnapshot.consistencygroup_id) # Test snapshots lazy-loaded field snapshots_objs = [objects.Snapshot(context=self.context, id=i) for i in [fake.snapshot_id, fake.snapshot2_id, fake.snapshot3_id]] snapshots = objects.SnapshotList(context=self.context, objects=snapshots_objs) snapshotlist_get_for_cgs.return_value = snapshots self.assertEqual(snapshots, cgsnapshot.snapshots) snapshotlist_get_for_cgs.assert_called_once_with( self.context, cgsnapshot.id) @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get') def test_refresh(self, cgsnapshot_get): db_cgsnapshot1 = fake_cgsnapshot.copy() db_cgsnapshot2 = db_cgsnapshot1.copy() db_cgsnapshot2['description'] = 'foobar' # On the second cgsnapshot_get, return the CGSnapshot with an updated # description cgsnapshot_get.side_effect = [db_cgsnapshot1, db_cgsnapshot2] cgsnapshot = objects.CGSnapshot.get_by_id(self.context, fake.cgsnapshot_id) self._compare(self, db_cgsnapshot1, cgsnapshot) # description was updated, so a CGSnapshot refresh should have a new # value for that field cgsnapshot.refresh() self._compare(self, db_cgsnapshot2, cgsnapshot) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() cgsnapshot_get.assert_has_calls([mock.call(self.context, fake.cgsnapshot_id), call_bool, mock.call(self.context, fake.cgsnapshot_id)]) class TestCGSnapshotList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.cgsnapshot_get_all', return_value=[fake_cgsnapshot]) def test_get_all(self, cgsnapshot_get_all): cgsnapshots = objects.CGSnapshotList.get_all(self.context) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) @mock.patch('cinder.db.cgsnapshot_get_all_by_project', return_value=[fake_cgsnapshot]) def test_get_all_by_project(self, cgsnapshot_get_all_by_project): cgsnapshots = objects.CGSnapshotList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) @mock.patch('cinder.db.cgsnapshot_get_all_by_group', return_value=[fake_cgsnapshot]) def test_get_all_by_group(self, cgsnapshot_get_all_by_group): cgsnapshots = objects.CGSnapshotList.get_all_by_group( self.context, self.project_id) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) cinder-8.0.0/cinder/tests/unit/objects/test_volume_attachment.py0000664000567000056710000001077412701406250026300 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects class TestVolumeAttachment(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') def test_get_by_id(self, volume_attachment_get): db_attachment = fake_volume.fake_db_volume_attachment() volume_attachment_get.return_value = db_attachment attachment = objects.VolumeAttachment.get_by_id(self.context, fake.attachment_id) self._compare(self, db_attachment, attachment) @mock.patch('cinder.db.volume_attachment_update') def test_save(self, volume_attachment_update): attachment = fake_volume.fake_volume_attachment_obj(self.context) attachment.attach_status = 'attaching' attachment.save() volume_attachment_update.assert_called_once_with( self.context, attachment.id, {'attach_status': 'attaching'}) @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') def test_refresh(self, attachment_get): db_attachment1 = fake_volume.fake_db_volume_attachment() db_attachment2 = db_attachment1.copy() db_attachment2['mountpoint'] = '/dev/sdc' # On the second volume_attachment_get, return the volume attachment # with an updated mountpoint attachment_get.side_effect = [db_attachment1, db_attachment2] attachment = objects.VolumeAttachment.get_by_id(self.context, fake.attachment_id) self._compare(self, db_attachment1, attachment) # mountpoint was updated, so a volume attachment refresh should have a # new value for that field attachment.refresh() self._compare(self, db_attachment2, attachment) if six.PY3: call_bool = mock.call.__bool__() else: call_bool = mock.call.__nonzero__() attachment_get.assert_has_calls([mock.call(self.context, fake.attachment_id), call_bool, mock.call(self.context, fake.attachment_id)]) class TestVolumeAttachmentList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.volume_attachment_get_used_by_volume_id') def test_get_all_by_volume_id(self, get_used_by_volume_id): db_attachment = fake_volume.fake_db_volume_attachment() get_used_by_volume_id.return_value = [db_attachment] attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self.context, mock.sentinel.volume_id) self.assertEqual(1, len(attachments)) TestVolumeAttachment._compare(self, db_attachment, attachments[0]) @mock.patch('cinder.db.volume_attachment_get_by_host') def test_get_all_by_host(self, get_by_host): db_attachment = fake_volume.fake_db_volume_attachment() get_by_host.return_value = [db_attachment] attachments = objects.VolumeAttachmentList.get_all_by_host( self.context, mock.sentinel.volume_id, mock.sentinel.host) self.assertEqual(1, len(attachments)) TestVolumeAttachment._compare(self, db_attachment, attachments[0]) @mock.patch('cinder.db.volume_attachment_get_by_instance_uuid') def test_get_all_by_instance_uuid(self, get_by_instance_uuid): db_attachment = fake_volume.fake_db_volume_attachment() get_by_instance_uuid.return_value = [db_attachment] attachments = objects.VolumeAttachmentList.get_all_by_instance_uuid( self.context, mock.sentinel.volume_id, mock.sentinel.uuid) self.assertEqual(1, len(attachments)) TestVolumeAttachment._compare(self, db_attachment, attachments[0]) cinder-8.0.0/cinder/tests/unit/policy.json0000664000567000056710000001123212701406257021706 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "admin_api": "is_admin:True", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "volume:create": "", "volume:get": "rule:admin_or_owner", "volume:get_all": "", "volume:get_volume_metadata": "", "volume:get_volume_image_metadata": "", "volume:delete_volume_metadata": "", "volume:update_volume_metadata": "", "volume:get_volume_admin_metadata": "rule:admin_api", "volume:update_volume_admin_metadata": "rule:admin_api", "volume:delete": "", "volume:update": "", "volume:attach": "", "volume:detach": "", "volume:reserve_volume": "", "volume:unreserve_volume": "", "volume:begin_detaching": "", "volume:roll_detaching": "", "volume:initialize_connection": "", "volume:terminate_connection": "", "volume:create_snapshot": "", "volume:delete_snapshot": "", "volume:get_snapshot": "", "volume:get_all_snapshots": "", "volume:update_snapshot": "", "volume:extend": "", "volume:migrate_volume": "rule:admin_api", "volume:migrate_volume_completion": "rule:admin_api", "volume:update_readonly_flag": "", "volume:retype": "", "volume:copy_volume_to_image": "", "volume:enable_replication": "rule:admin_api", "volume:disable_replication": "rule:admin_api", "volume:failover_replication": "rule:admin_api", "volume:list_replication_targets": "rule:admin_api", "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", "volume_extension:volume_actions:upload_image": "", "volume_extension:types_manage": "", "volume_extension:types_extra_specs": "", "volume_extension:access_types_qos_specs_id": "rule:admin_api", "volume_extension:access_types_extra_specs": "rule:admin_api", "volume_extension:volume_type_access": "", "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", "volume_extension:volume_type_encryption": "rule:admin_api", "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", "volume_extension:qos_specs_manage": "", "volume_extension:extended_snapshot_attributes": "", "volume_extension:volume_image_metadata": "", "volume_extension:volume_host_attribute": "rule:admin_api", "volume_extension:volume_tenant_attribute": "rule:admin_api", "volume_extension:volume_mig_status_attribute": "rule:admin_api", "volume_extension:hosts": "rule:admin_api", "volume_extension:quotas:show": "", "volume_extension:quotas:update": "", "volume_extension:quotas:delete": "", "volume_extension:quota_classes": "", "volume_extension:services:index": "", "volume_extension:services:update" : "rule:admin_api", "volume_extension:volume_manage": "rule:admin_api", "volume_extension:volume_unmanage": "rule:admin_api", "volume_extension:capabilities": "rule:admin_api", "limits_extension:used_limits": "", "snapshot_extension:snapshot_actions:update_snapshot_status": "", "snapshot_extension:snapshot_manage": "rule:admin_api", "snapshot_extension:snapshot_unmanage": "rule:admin_api", "volume:create_transfer": "", "volume:accept_transfer": "", "volume:delete_transfer": "", "volume:get_all_transfers": "", "backup:create" : "", "backup:delete": "", "backup:get": "", "backup:get_all": "", "backup:restore": "", "backup:backup-import": "rule:admin_api", "backup:backup-export": "rule:admin_api", "volume_extension:replication:promote": "rule:admin_api", "volume_extension:replication:reenable": "rule:admin_api", "consistencygroup:create" : "", "consistencygroup:delete": "", "consistencygroup:update": "", "consistencygroup:get": "", "consistencygroup:get_all": "", "consistencygroup:create_cgsnapshot" : "", "consistencygroup:delete_cgsnapshot": "", "consistencygroup:get_cgsnapshot": "", "consistencygroup:get_all_cgsnapshots": "", "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" } cinder-8.0.0/cinder/tests/unit/test_block_device.py0000664000567000056710000005176512701406250023546 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from cinder import context from cinder import db import cinder.exception from cinder.objects import snapshot as obj_snap from cinder.objects import volume as obj_volume import cinder.test from cinder.tests.unit import fake_constants as fake from cinder.volume import configuration as conf from cinder.volume.drivers import block_device from cinder.volume import utils as volutils class TestBlockDeviceDriver(cinder.test.TestCase): def setUp(self): fake_opt = [cfg.StrOpt('fake_opt', default='fake', help='fake option')] super(TestBlockDeviceDriver, self).setUp() self.configuration = conf.Configuration(fake_opt, 'fake_group') self.configuration.available_devices = ['/dev/loop1', '/dev/loop2'] self.configuration.iscsi_helper = 'tgtadm' self.host = 'localhost' self.configuration.iscsi_port = 3260 self.configuration.volume_dd_blocksize = 1234 self.drv = block_device.BlockDeviceDriver( configuration=self.configuration, host='localhost', db=db) def test_initialize_connection(self): TEST_VOLUME1 = obj_volume.Volume(host='localhost1', provider_location='1 2 3 /dev/loop1', provider_auth='a b c', attached_mode='rw', id=fake.volume_id) TEST_CONNECTOR = {'host': 'localhost1'} data = self.drv.initialize_connection(TEST_VOLUME1, TEST_CONNECTOR) expected_data = {'data': {'device_path': '/dev/loop1'}, 'driver_volume_type': 'local'} self.assertEqual(expected_data, data) @mock.patch('cinder.volume.driver.ISCSIDriver.initialize_connection') def test_initialize_connection_different_hosts(self, _init_conn): TEST_CONNECTOR = {'host': 'localhost1'} TEST_VOLUME2 = obj_volume.Volume(host='localhost2', provider_location='1 2 3 /dev/loop2', provider_auth='d e f', attached_mode='rw', id=fake.volume2_id) _init_conn.return_value = 'data' data = self.drv.initialize_connection(TEST_VOLUME2, TEST_CONNECTOR) expected_data = {'data': {'auth_method': 'd', 'auth_password': 'f', 'auth_username': 'e', 'encrypted': False, 'target_discovered': False, 'target_iqn': '2', 'target_lun': 3, 'target_portal': '1', 'volume_id': fake.volume2_id}} self.assertEqual(expected_data['data'], data['data']) @mock.patch('cinder.volume.drivers.block_device.BlockDeviceDriver.' 'local_path', return_value=None) @mock.patch('cinder.volume.utils.clear_volume') def test_delete_not_volume_provider_location(self, _clear_volume, _local_path): TEST_VOLUME2 = obj_volume.Volume(provider_location=None) self.drv.delete_volume(TEST_VOLUME2) _local_path.assert_called_once_with(TEST_VOLUME2) @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.volume.utils.clear_volume') def test_delete_volume_path_exist(self, _clear_volume, _exists): TEST_VOLUME = obj_volume.Volume(name_id=fake.volume_name_id, size=1, provider_location='/dev/loop1', display_name='vol1', status='available') with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop1') as lp_mocked: with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop1': 1}) as \ gds_mocked: volutils.clear_volume(gds_mocked, lp_mocked) self.drv.delete_volume(TEST_VOLUME) lp_mocked.assert_called_once_with(TEST_VOLUME) gds_mocked.assert_called_once_with(['/dev/loop1']) self.assertTrue(_exists.called) self.assertTrue(_clear_volume.called) def test_delete_path_is_not_in_list_of_available_devices(self): TEST_VOLUME2 = obj_volume.Volume(provider_location='/dev/loop0') with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop0') as lp_mocked: self.drv.delete_volume(TEST_VOLUME2) lp_mocked.assert_called_once_with(TEST_VOLUME2) def test__update_provider_location(self): TEST_VOLUME = obj_volume.Volume(name_id=fake.volume_name_id, size=1, display_name='vol1') with mock.patch.object(obj_volume.Volume, 'update') as update_mocked, \ mock.patch.object(obj_volume.Volume, 'save') as save_mocked: self.drv._update_provider_location(TEST_VOLUME, 'dev_path') self.assertEqual(1, update_mocked.call_count) save_mocked.assert_called_once_with() def test_create_volume(self): TEST_VOLUME = obj_volume.Volume(name_id=fake.volume_name_id, size=1, display_name='vol1') with mock.patch.object(self.drv, 'find_appropriate_size_device', return_value='dev_path') as fasd_mocked: with mock.patch.object(self.drv, '_update_provider_location') as \ upl_mocked: self.drv.create_volume(TEST_VOLUME) fasd_mocked.assert_called_once_with(TEST_VOLUME.size) upl_mocked.assert_called_once_with(TEST_VOLUME, 'dev_path') def test_update_volume_stats(self): with mock.patch.object(self.drv, '_devices_sizes', return_value={'/dev/loop1': 1024, '/dev/loop2': 1024}) as \ ds_mocked: with mock.patch.object(self.drv, '_get_used_devices') as \ gud_mocked: self.drv._update_volume_stats() self.assertEqual({'total_capacity_gb': 2, 'free_capacity_gb': 2, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'vendor_name': "Open Source", 'driver_version': self.drv.VERSION, 'storage_protocol': 'unknown', 'volume_backend_name': 'BlockDeviceDriver', }, self.drv._stats) gud_mocked.assert_called_once_with() ds_mocked.assert_called_once_with() @mock.patch('cinder.volume.utils.copy_volume') def test_create_cloned_volume(self, _copy_volume): TEST_SRC = obj_volume.Volume(id=fake.volume_id, name_id=fake.volume_name_id, size=1, provider_location='/dev/loop1') TEST_VOLUME = obj_volume.Volume(name_id=fake.volume2_name_id, size=1, display_name='vol1') with mock.patch.object(self.drv, 'find_appropriate_size_device', return_value='/dev/loop2') as fasd_mocked: with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop2': 2}) as \ gds_mocked: with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop1') as \ lp_mocked: with mock.patch.object(self.drv, '_update_provider_location') as \ upl_mocked: volutils.copy_volume('/dev/loop1', fasd_mocked, 2, mock.sentinel, execute=self.drv._execute) self.drv.create_cloned_volume(TEST_VOLUME, TEST_SRC) fasd_mocked.assert_called_once_with(TEST_SRC.size) lp_mocked.assert_called_once_with(TEST_SRC) gds_mocked.assert_called_once_with(['/dev/loop2']) upl_mocked.assert_called_once_with( TEST_VOLUME, '/dev/loop2') @mock.patch.object(cinder.image.image_utils, 'fetch_to_raw') def test_copy_image_to_volume(self, _fetch_to_raw): TEST_VOLUME = obj_volume.Volume(name_id=fake.volume_name_id, size=1, provider_location='/dev/loop1') TEST_IMAGE_SERVICE = "image_service" TEST_IMAGE_ID = "image_id" with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop1') as lp_mocked: self.drv.copy_image_to_volume(context, TEST_VOLUME, TEST_IMAGE_SERVICE, TEST_IMAGE_ID) lp_mocked.assert_called_once_with(TEST_VOLUME) _fetch_to_raw.assert_called_once_with(context, TEST_IMAGE_SERVICE, TEST_IMAGE_ID, '/dev/loop1', 1234, size=1) def test_copy_volume_to_image(self): TEST_VOLUME = {'provider_location': '/dev/loop1'} TEST_IMAGE_SERVICE = "image_service" TEST_IMAGE_META = "image_meta" with mock.patch.object(cinder.image.image_utils, 'upload_volume') as \ _upload_volume: with mock.patch.object(self.drv, 'local_path') as _local_path: _local_path.return_value = '/dev/loop1' self.drv.copy_volume_to_image(context, TEST_VOLUME, TEST_IMAGE_SERVICE, TEST_IMAGE_META) self.assertTrue(_local_path.called) _upload_volume.assert_called_once_with(context, TEST_IMAGE_SERVICE, TEST_IMAGE_META, '/dev/loop1') def test_get_used_devices(self): TEST_VOLUME1 = {'host': 'localhost', 'provider_location': '/dev/loop1'} TEST_VOLUME2 = {'host': 'localhost', 'provider_location': '/dev/loop2'} def fake_local_path(vol): return vol['provider_location'].split()[-1] with mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', return_value=[TEST_VOLUME1, TEST_VOLUME2]),\ mock.patch.object(obj_snap.SnapshotList, 'get_by_host', return_value=[]): with mock.patch.object(context, 'get_admin_context'): with mock.patch.object(self.drv, 'local_path', return_value=fake_local_path): path1 = self.drv.local_path(TEST_VOLUME1) path2 = self.drv.local_path(TEST_VOLUME2) self.assertEqual(set([path1, path2]), self.drv._get_used_devices()) def test_get_devices_sizes(self): dev_paths = ['/dev/loop1', '/dev/loop2', '/dev/loop3'] out = '4294967296\n2147483648\n3221225472\nn' with mock.patch.object(self.drv, '_execute', return_value=(out, None)) as _execute: actual = self.drv._get_devices_sizes(dev_paths) self.assertEqual(3, len(actual)) self.assertEqual({'/dev/loop1': 4096, '/dev/loop2': 2048, '/dev/loop3': 3072}, actual) _execute.assert_called_once_with('blockdev', '--getsize64', *dev_paths, run_as_root=True) def test_devices_sizes(self): with mock.patch.object(self.drv, '_get_devices_sizes') as \ _get_dvc_size: _get_dvc_size.return_value = {'/dev/loop1': 1, '/dev/loop2': 1} self.assertEqual(2, len(self.drv._devices_sizes())) self.assertEqual({'/dev/loop1': 1, '/dev/loop2': 1}, self.drv._devices_sizes()) def test_find_appropriate_size_device_no_free_disks(self): size = 1 with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: with mock.patch.object(self.drv, '_get_used_devices') as \ _get_used_dvc: _dvc_sizes.return_value = {'/dev/loop1': 1, '/dev/loop2': 1} _get_used_dvc.return_value = set(['/dev/loop1', '/dev/loop2']) self.assertRaises(cinder.exception.CinderException, self.drv.find_appropriate_size_device, size) def test_find_appropriate_size_device_not_big_enough_disk(self): size = 2948 with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: with mock.patch.object(self.drv, '_get_used_devices') as \ _get_used_dvc: _dvc_sizes.return_value = {'/dev/loop1': 1024, '/dev/loop2': 1924} _get_used_dvc.return_value = set(['/dev/loop1']) self.assertRaises(cinder.exception.CinderException, self.drv.find_appropriate_size_device, size) def test_find_appropriate_size_device(self): size = 1 with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: with mock.patch.object(self.drv, '_get_used_devices') as \ _get_used_dvc: _dvc_sizes.return_value = {'/dev/loop1': 2048, '/dev/loop2': 1024} _get_used_dvc.return_value = set() self.assertEqual('/dev/loop2', self.drv.find_appropriate_size_device(size)) def test_extend_volume_exists(self): TEST_VOLUME = {'name': 'vol1', 'id': 123} with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop1': 1024}) as \ mock_get_size: with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop1') as lp_mocked: self.assertRaises(cinder.exception.CinderException, self.drv.extend_volume, TEST_VOLUME, 2) lp_mocked.assert_called_once_with(TEST_VOLUME) mock_get_size.assert_called_once_with(['/dev/loop1']) @mock.patch('cinder.volume.utils.copy_volume') def test_create_snapshot(self, _copy_volume): TEST_VOLUME = obj_volume.Volume(id=fake.volume_id, name_id=fake.volume_name_id, size=1, display_name='vol1', status='available', provider_location='/dev/loop1') TEST_SNAP = obj_snap.Snapshot(id=fake.snapshot_id, volume_id=fake.volume_id, volume_size=1024, provider_location='/dev/loop2', volume=TEST_VOLUME) with mock.patch.object(self.drv, 'find_appropriate_size_device', return_value='/dev/loop2') as fasd_mocked: with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop2': 1024}) as \ gds_mocked: with mock.patch.object(self.drv, '_update_provider_location') as \ upl_mocked: volutils.copy_volume('/dev/loop1', fasd_mocked, 1024, mock.sentinel, execute=self.drv._execute) self.drv.create_snapshot(TEST_SNAP) fasd_mocked.assert_called_once_with(TEST_SNAP.volume_size) gds_mocked.assert_called_once_with(['/dev/loop2']) upl_mocked.assert_called_once_with( TEST_SNAP, '/dev/loop2') def test_create_snapshot_with_not_available_volume(self): TEST_VOLUME = obj_volume.Volume(id=fake.volume_id, name_id=fake.volume_name_id, size=1, display_name='vol1', status='in use', provider_location='/dev/loop1') TEST_SNAP = obj_snap.Snapshot(id=fake.snapshot_id, volume_id=fake.volume_id, volume_size=1024, provider_location='/dev/loop2', volume=TEST_VOLUME) self.assertRaises(cinder.exception.CinderException, self.drv.create_snapshot, TEST_SNAP) @mock.patch('cinder.volume.utils.copy_volume') def test_create_volume_from_snapshot(self, _copy_volume): TEST_SNAP = obj_snap.Snapshot(volume_id=fake.volume_id, volume_size=1024, provider_location='/dev/loop1') TEST_VOLUME = obj_volume.Volume(id=fake.volume_id, name_id=fake.volume_name_id, size=1, display_name='vol1', provider_location='/dev/loop2') with mock.patch.object(self.drv, 'find_appropriate_size_device', return_value='/dev/loop2') as fasd_mocked: with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop2': 1024}) as \ gds_mocked: with mock.patch.object(self.drv, '_update_provider_location') as \ upl_mocked: volutils.copy_volume('/dev/loop1', fasd_mocked, 1024, mock.sentinel, execute=self.drv._execute) self.drv.create_volume_from_snapshot( TEST_VOLUME, TEST_SNAP) fasd_mocked.assert_called_once_with( TEST_SNAP.volume_size) gds_mocked.assert_called_once_with(['/dev/loop2']) upl_mocked.assert_called_once_with( TEST_VOLUME, '/dev/loop2') @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.volume.utils.clear_volume') def test_delete_snapshot(self, _clear_volume, _exists): TEST_SNAP = obj_snap.Snapshot(volume_id=fake.volume_id, provider_location='/dev/loop1', status='available') with mock.patch.object(self.drv, 'local_path', return_value='/dev/loop1') as lp_mocked: with mock.patch.object(self.drv, '_get_devices_sizes', return_value={'/dev/loop1': 1}) as \ gds_mocked: volutils.clear_volume(gds_mocked, lp_mocked) self.drv.delete_snapshot(TEST_SNAP) lp_mocked.assert_called_once_with(TEST_SNAP) gds_mocked.assert_called_once_with(['/dev/loop1']) self.assertTrue(_exists.called) self.assertTrue(_clear_volume.called) cinder-8.0.0/cinder/tests/unit/zonemanager/0000775000567000056710000000000012701406543022021 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py0000664000567000056710000002410512701406250027417 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Brocade fc zone driver.""" import mock from oslo_config import cfg from oslo_utils import importutils import paramiko import requests from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver _active_cfg_before_add = {} _active_cfg_before_delete = { 'zones': { 'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50']), 't_zone': ['1,0']}, 'active_zone_config': 'cfg1'} _activate = True _zone_name = 'openstack10008c7cff523b0120240002ac000a50' _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])} _initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class BrcdFcZoneDriverBaseTest(object): def setup_config(self, is_normal, mode): fc_test_opts = [ cfg.StrOpt('fc_fabric_address_BRCD_FAB_1', default='10.24.48.213', help='FC Fabric names'), ] configuration = conf.Configuration(fc_test_opts) # fill up config configuration.zoning_mode = 'fabric' configuration.zone_driver = ('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver.' 'FakeBrcdFCZoneDriver') configuration.brcd_sb_connector = ('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver' '.FakeBrcdFCZoneClientCLI') configuration.zoning_policy = 'initiator-target' configuration.zone_activate = True configuration.zone_name_prefix = 'openstack' configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver.' 'FakeBrcdFCSanLookupService') configuration.fc_fabric_names = 'BRCD_FAB_1' configuration.fc_fabric_address_BRCD_FAB_1 = '10.24.48.213' configuration.fc_southbound_connector = 'CLI' if is_normal: configuration.fc_fabric_user_BRCD_FAB_1 = 'admin' else: configuration.fc_fabric_user_BRCD_FAB_1 = 'invaliduser' configuration.fc_fabric_password_BRCD_FAB_1 = 'password' if mode == 1: configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target' elif mode == 2: configuration.zoning_policy_BRCD_FAB_1 = 'initiator' else: configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target' configuration.zone_activate_BRCD_FAB_1 = True configuration.zone_name_prefix_BRCD_FAB_1 = 'openstack_fab1' configuration.principal_switch_wwn_BRCD_FAB_1 = '100000051e55a100' return configuration class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase): def setUp(self): super(TestBrcdFcZoneDriver, self).setUp() # setup config for normal flow self.setup_driver(self.setup_config(True, 1)) GlobalVars._zone_state = [] def setup_driver(self, config): self.driver = importutils.import_object( 'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' '.BrcdFCZoneDriver', configuration=config) def fake__get_active_zone_set(self, brcd_sb_connector, fabric_ip): return GlobalVars._active_cfg def get_client(self, protocol='HTTPS'): conn = ('cinder.tests.unit.zonemanager.test_brcd_fc_zone_driver.' + ('FakeBrcdFCZoneClientCLI' if protocol == "CLI" else 'FakeBrcdHttpFCZoneClient')) client = importutils.import_object( conn, ipaddress="10.24.48.213", username="admin", password="password", key="/home/stack/.ssh/id_rsa", port=22, vfid="2", protocol=protocol ) return client def fake_get_san_context(self, target_wwn_list): fabric_map = {} return fabric_map @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_add_connection(self, get_southbound_client_mock): """Normal flow for i-t mode.""" GlobalVars._is_normal_test = True GlobalVars._zone_state = [] GlobalVars._active_cfg = _active_cfg_before_add get_southbound_client_mock.return_value = self.get_client("HTTPS") self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) self.assertTrue(_zone_name in GlobalVars._zone_state) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_delete_connection(self, get_southbound_client_mock): GlobalVars._is_normal_test = True get_southbound_client_mock.return_value = self.get_client("CLI") GlobalVars._active_cfg = _active_cfg_before_delete self.driver.delete_connection( 'BRCD_FAB_1', _initiator_target_map) self.assertFalse(_zone_name in GlobalVars._zone_state) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_add_connection_for_initiator_mode(self, get_southbound_client_mk): """Normal flow for i mode.""" GlobalVars._is_normal_test = True get_southbound_client_mk.return_value = self.get_client("CLI") GlobalVars._active_cfg = _active_cfg_before_add self.setup_driver(self.setup_config(True, 2)) self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) self.assertTrue(_zone_name in GlobalVars._zone_state) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_delete_connection_for_initiator_mode(self, get_southbound_client_mk): GlobalVars._is_normal_test = True get_southbound_client_mk.return_value = self.get_client("HTTPS") GlobalVars._active_cfg = _active_cfg_before_delete self.setup_driver(self.setup_config(True, 2)) self.driver.delete_connection( 'BRCD_FAB_1', _initiator_target_map) self.assertFalse(_zone_name in GlobalVars._zone_state) def test_add_connection_for_invalid_fabric(self): """Test abnormal flows.""" GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_add GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.add_connection, 'BRCD_FAB_1', _initiator_target_map) def test_delete_connection_for_invalid_fabric(self): GlobalVars._active_cfg = _active_cfg_before_delete GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.delete_connection, 'BRCD_FAB_1', _initiator_target_map) class FakeClient(object): def get_active_zone_set(self): return GlobalVars._active_cfg def add_zones(self, zones, isActivate, active_zone_set): GlobalVars._zone_state.extend(zones.keys()) def delete_zones(self, zone_names, isActivate, active_zone_set): zone_list = zone_names.split(';') GlobalVars._zone_state = [ x for x in GlobalVars._zone_state if x not in zone_list] def is_supported_firmware(self): return True def get_nameserver_info(self): return _target_ns_map def close_connection(self): pass def cleanup(self): pass class FakeBrcdFCZoneClientCLI(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True if not GlobalVars._is_normal_test: raise paramiko.SSHException("Unable to connect to fabric.") class FakeBrcdHttpFCZoneClient(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True if not GlobalVars._is_normal_test: raise requests.exception.HTTPError("Unable to connect to fabric") class FakeBrcdFCSanLookupService(object): def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if i in _initiator_ns_map[_fabric_wwn]: initiators.append(i) for t in target_wwn_list: if t in _target_ns_map[_fabric_wwn]: targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalVars(object): global _active_cfg _active_cfg = {} global _zone_state _zone_state = list() global _is_normal_test _is_normal_test = True cinder-8.0.0/cinder/tests/unit/zonemanager/test_driverutils.py0000664000567000056710000001261012701406250026001 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for friendly zone name.""" import ddt import string from cinder import test from cinder.zonemanager.drivers import driver_utils TEST_CHAR_SET = string.ascii_letters + string.digits @ddt.ddt class TestDriverUtils(test.TestCase): @ddt.data('OSHost10010008c7cff523b01AMCEArray20240002ac000a50') def test_get_friendly_zone_name_valid_hostname_storagesystem(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100", 'AMCE' '_Array', "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_hostname_storagesystem_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_storagesystem_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100", None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_hostname_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost10010008c7cff523b01') def test_get_friendly_zone_name_initiator_mode(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, "OS_Host100", None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b01') def test_get_friendly_zone_name_initiator_mode_hostname_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, None, None, "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') def test_get_friendly_zone_name_storagename_length_too_long(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100XXXXXXXXXX", "AMCE_ArrayYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY" "YYYY", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') def test_get_friendly_zone_name_max_length(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100XXXXXXXXXX", "AMCE_ArrayYYYYYYYYYY", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX10008c7cff523b01') def test_get_friendly_zone_name_initiator_mode_hostname_max_length(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, 'OS_Host100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' 'XXXXX', None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack110008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_invalid_characters(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", "open-stack*1_", TEST_CHAR_SET)) cinder-8.0.0/cinder/tests/unit/zonemanager/test_brcd_lookup_service.py0000664000567000056710000000671712701406250027463 0ustar jenkinsjenkins00000000000000# (c) Copyright 2013 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for fc san lookup service.""" from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.zonemanager import fc_san_lookup_service as san_service _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): def setUp(self): super(TestFCSanLookupService, self).setUp() self.configuration = self.setup_config() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def setup_config(self): configuration = conf.Configuration(None) # fill up config configuration.fc_san_lookup_service = ( 'cinder.tests.unit.zonemanager.test_brcd_lookup_service.' 'FakeBrcdFCSanLookupService') return configuration def test_get_device_mapping_from_network(self): GlobalParams._is_normal_test = True initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictMatch(_device_map_to_verify, device_map) def test_get_device_mapping_from_network_for_invalid_config(self): GlobalParams._is_normal_test = False initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] self.assertRaises(exception.FCSanLookupServiceException, self.get_device_mapping_from_network, initiator_list, target_list) class FakeBrcdFCSanLookupService(object): def __init__(self, **kwargs): pass def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): if not GlobalParams._is_normal_test: raise exception.FCSanLookupServiceException("Error") device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalParams(object): global _is_normal_test _is_normal_test = True cinder-8.0.0/cinder/tests/unit/zonemanager/__init__.py0000664000567000056710000000000012701406250024113 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/zonemanager/test_volume_driver.py0000664000567000056710000000753612701406250026322 0ustar jenkinsjenkins00000000000000# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Volume Manager.""" import mock from cinder import test from cinder.tests.unit import fake_driver from cinder import utils from cinder.volume import configuration as conf from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver from cinder.zonemanager import fc_zone_manager class TestVolumeDriver(test.TestCase): def setUp(self): super(TestVolumeDriver, self).setUp() self.driver = fake_driver.FakeFibreChannelDriver() brcd_fc_zone_driver.BrcdFCZoneDriver = mock.Mock() self.addCleanup(self._cleanup) def _cleanup(self): self.driver = None def __init__(self, *args, **kwargs): super(TestVolumeDriver, self).__init__(*args, **kwargs) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(utils, 'require_driver_initialized') def test_initialize_connection_with_decorator(self, utils_mock, opt_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ as add_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' conn_info = self.driver.initialize_connection(None, None) add_zone_mock.assert_called_once_with(conn_info) @mock.patch.object(utils, 'require_driver_initialized') def test_initialize_connection_no_decorator(self, utils_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ as add_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' self.driver.no_zone_initialize_connection(None, None) assert not add_zone_mock.called @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(utils, 'require_driver_initialized') def test_terminate_connection_with_decorator(self, utils_mock, opt_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'delete_connection') as remove_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' conn_info = self.driver.terminate_connection(None, None) remove_zone_mock.assert_called_once_with(conn_info) @mock.patch.object(utils, 'require_driver_initialized') def test_terminate_connection_no_decorator(self, utils_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'delete_connection') as remove_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' self.driver.no_zone_terminate_connection(None, None) assert not remove_zone_mock.called cinder-8.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py0000664000567000056710000003423512701406257030245 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc zone client cli.""" import mock from oslo_concurrency import processutils from cinder import exception from cinder import test from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli as client_cli) import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\ 20:1a:00:05:1e:e8:e3:29;na', ' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29'] cfgactvshow = ['Effective configuration:\n', ' cfg:\tOpenStack_Cfg\t\n', ' zone:\topenstack50060b0000c26604201900051ee8e329\t\n', '\t\t50:06:0b:00:00:c2:66:04\n', '\t\t20:19:00:05:1e:e8:e3:29\n'] active_zoneset = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, 'active_zone_config': 'OpenStack_Cfg'} active_zoneset_multiple_zones = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'openstack50060b0000c26602201900051ee8e327': ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, 'active_zone_config': 'OpenStack_Cfg'} new_zone_memb_same = { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']} new_zone_memb_not_same = { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']} new_zone = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} new_zones = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], 'openstack10000011111111112001001111111111': ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1'] unsupported_firmware = ['Fabric OS: v6.2.1'] class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase): def setUp(self): super(TestBrcdFCZoneClientCLI, self).setUp() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') def test_get_active_zone_set(self, get_switch_info_mock): cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG] get_switch_info_mock.return_value = cfgactvshow active_zoneset_returned = self.get_active_zone_set() get_switch_info_mock.assert_called_once_with(cmd_list) self.assertDictMatch(active_zoneset, active_zoneset_returned) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_get_active_zone_set_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.BrocadeZoningCliException, self.get_active_zone_set) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') def test_add_zones_new_zone_no_activate(self, cfg_save_mock, apply_zone_change_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zones, False, None) self.assertEqual(2, get_active_zs_mock.call_count) self.assertEqual(3, apply_zone_change_mock.call_count) cfg_save_mock.assert_called_once_with() @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') def test_add_zones_new_zone_activate(self, activate_zoneset_mock, apply_zone_change_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zone, True, active_zoneset) self.assertEqual(2, apply_zone_change_mock.call_count) activate_zoneset_mock.assert_called_once_with( active_zoneset['active_zone_config']) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'delete_zones') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test_add_zone_exists_memb_same(self, apply_zone_change_mock, activate_zoneset_mock, delete_zones_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zone_memb_same, True, active_zoneset) self.assertEqual(0, apply_zone_change_mock.call_count) self.assertEqual(0, delete_zones_mock.call_count) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'delete_zones') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test_add_zone_exists_memb_not_same(self, apply_zone_change_mock, activate_zoneset_mock, delete_zones_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zone_memb_not_same, True, active_zoneset) self.assertEqual(2, apply_zone_change_mock.call_count) self.assertEqual(1, delete_zones_mock.call_count) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test_activate_zoneset(self, ssh_execute_mock): ssh_execute_mock.return_value = True return_value = self.activate_zoneset('zoneset1') self.assertTrue(return_value) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test_deactivate_zoneset(self, ssh_execute_mock): ssh_execute_mock.return_value = True return_value = self.deactivate_zoneset() self.assertTrue(return_value) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') def test_delete_zones_activate_false(self, cfg_save_mock, apply_zone_change_mock): with mock.patch.object(self, '_zone_delete') as zone_delete_mock: self.delete_zones(zone_names_to_delete, False, active_zoneset_multiple_zones) self.assertEqual(1, apply_zone_change_mock.call_count) zone_delete_mock.assert_called_once_with(zone_names_to_delete) cfg_save_mock.assert_called_once_with() @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') def test_delete_zones_activate_true(self, activate_zs_mock, apply_zone_change_mock): with mock.patch.object(self, '_zone_delete') \ as zone_delete_mock: self.delete_zones(zone_names_to_delete, True, active_zoneset_multiple_zones) self.assertEqual(1, apply_zone_change_mock.call_count) zone_delete_mock.assert_called_once_with(zone_names_to_delete) activate_zs_mock.assert_called_once_with( active_zoneset['active_zone_config']) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') def test_get_nameserver_info(self, get_switch_info_mock): ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] get_switch_info_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info() self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_get_nameserver_info_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.BrocadeZoningCliException, self.get_nameserver_info) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test__cfg_save(self, ssh_execute_mock): cmd_list = [zone_constant.CFG_SAVE] self._cfg_save() ssh_execute_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test__zone_delete(self, apply_zone_change_mock): zone_name = 'testzone' cmd_list = ['zonedelete', '"testzone"'] self._zone_delete(zone_name) apply_zone_change_mock.assert_called_once_with(cmd_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test__cfg_trans_abort(self, apply_zone_change_mock): cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT] with mock.patch.object(self, '_is_trans_abortable') \ as is_trans_abortable_mock: is_trans_abortable_mock.return_value = True self._cfg_trans_abort() is_trans_abortable_mock.assert_called_once_with() apply_zone_change_mock.assert_called_once_with(cmd_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_true(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SHOW_TRANS] run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE), None) data = self._is_trans_abortable() self.assertTrue(data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_ssh_error(self, run_ssh_mock): run_ssh_mock.return_value = (Stream(), Stream()) self.assertRaises(exception.BrocadeZoningCliException, self._is_trans_abortable) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_false(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SHOW_TRANS] cfgtransshow = 'There is no outstanding zoning transaction' run_ssh_mock.return_value = (Stream(cfgtransshow), None) data = self._is_trans_abortable() self.assertFalse(data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_apply_zone_change(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SAVE] run_ssh_mock.return_value = (None, None) self.apply_zone_change(cmd_list) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [zone_constant.NS_SHOW] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) def test__parse_ns_output(self): invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (supported_firmware, None) self.assertTrue(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (unsupported_firmware, None) self.assertFalse(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (None, Stream()) self.assertFalse(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock): exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.BrocadeZoningCliException, self.is_supported_firmware) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' cinder-8.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py0000664000567000056710000001364212701406250031155 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco fc san lookup service.""" import mock from oslo_config import cfg from cinder import exception from cinder import test from cinder.volume import configuration as conf import cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service \ as cisco_lookup import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant from cinder.zonemanager import utils as zm_utils nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = ['VSAN 304\n', '------------------------------------------------------\n', 'FCID TYPE PWWN (VENDOR) \n', '------------------------------------------------------\n', '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', '0x030200 N 10:00:00:49:c9:28:c7:01\n'] nsshow_data = ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'] _device_map_to_verify = { '304': { 'initiator_port_wwn_list': ['10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} class TestCiscoFCSanLookupService(cisco_lookup.CiscoFCSanLookupService, test.TestCase): def setUp(self): super(TestCiscoFCSanLookupService, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'CISCO_FAB_2', 'fc-zone-manager') self.configuration.fc_fabric_names = 'CISCO_FAB_2' self.create_configuration() self.fabric_vsan = '304' # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_address', default='172.24.173.142', help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_password', default='admin1234', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('cisco_fc_fabric_port', default=22, help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_zoning_vsan', default='304', help='')) config = conf.Configuration(fc_fabric_opts, 'CISCO_FAB_2') self.fabric_configs = {'CISCO_FAB_2': config} @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, 'get_nameserver_info') def test_get_device_mapping_from_network(self, get_nameserver_info_mock): initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] get_nameserver_info_mock.return_value = (nsshow_data) device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictMatch(_device_map_to_verify, device_map) @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, '_get_switch_info') def test_get_nameserver_info(self, get_switch_data_mock): ns_info_list = [] ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:49:c9:28:c7:01'] get_switch_data_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info('304') self.assertEqual(ns_info_list_expected, ns_info_list) def test_parse_ns_output(self): invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] return_wwn_list = [] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:49:c9:28:c7:01'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) def test_get_formatted_wwn(self): wwn_list = ['10008c7cff523b01'] return_wwn_list = [] expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0])) self.assertEqual(expected_wwn_list, return_wwn_list) @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan, ' | no-more'] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' cinder-8.0.0/cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py0000664000567000056710000006453312701406250030452 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc zone client http(s).""" import time import mock from mock import patch from cinder import exception from cinder import test from cinder.zonemanager.drivers.brocade import (brcd_http_fc_zone_client as client) import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant cfgs = {'openstack_cfg': 'zone1;zone2'} cfgs_to_delete = { 'openstack_cfg': 'zone1;zone2;openstack50060b0000c26604201900051ee8e329'} zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} zones_to_delete = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', 'openstack50060b0000c26604201900051ee8e329': '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} alias = {} qlps = {} ifas = {} parsed_raw_zoneinfo = "" random_no = '' session = None active_cfg = 'openstack_cfg' activate = True no_activate = False vf_enable = True ns_info = ['10:00:00:05:1e:7c:64:96'] nameserver_info = """ NSInfo Page
--BEGIN NS INFO

2;8;020800;N    ;10:00:00:05:1e:7c:64:96;20:00:00:05:1e:7c:64:96;[89]""" \
"""Brocade-825 | 3.0.4.09 | DCM-X3650-94 | Microsoft Windows Server 2003 R2"""\
    """| Service Pack 2";FCP ;      3;20:08:00:05:1e:89:54:a0;"""\
    """0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0;000000;port8"""\
    """
--END NS INFO

""" mocked_zone_string = 'zonecfginfo=openstack_cfg zone1;zone2 '\ 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'openstack_cfg null &saveonly=false' mocked_zone_string_no_activate = 'zonecfginfo=openstack_cfg zone1;zone2 '\ 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c &saveonly=true' zone_string_to_post = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "openstack50060b0000c26604201900051ee8e329 "\ "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ "openstack_cfg null &saveonly=false" zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "openstack50060b0000c26604201900051ee8e329 "\ "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 &saveonly=true" zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\ "zone1;zone2 openstack50060b0000c26604201900051ee8e329000000000000000000000"\ "00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true" zone_string_del_to_post = "zonecfginfo=openstack_cfg zone1;zone2"\ " zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "openstack_cfg null &saveonly=false" zone_string_del_to_post_no_active = "zonecfginfo=openstack_cfg zone1;zone2"\ " zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 &saveonly=true" zone_post_page = """
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=-1
errorMessage=Name too long
--END ZONE_TXN_INFO
""" zone_post_page_no_error = """
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=0
errorMessage=
--END ZONE_TXN_INFO
""" secinfo_resp = """
--BEGIN SECINFO
SECURITY = OFF
RANDOM = 6281590
DefaultPasswdBitmap = 0
primaryFCS = no
switchType = 66
resource = 10.24.48.210
REALM = FC Switch Administration
AUTHMETHOD = Custom_Basic
hasUpfrontLogin=yes
AUTHVERSION = 1
vfEnabled=false
vfSupported=true
--END SECINFO
""" authenticate_resp = """
--BEGIN AUTHENTICATE
authenticated = yes
username=admin
userrole=admin
adCapable=1
currentAD=AD0
trueADEnvironment=0
adId=0
adList=ALL
contextType=0
--END AUTHENTICATE
""" un_authenticate_resp = """ Authentication
--BEGIN AUTHENTICATE
authenticated = no
errCode = -3
authType = Custom_Basic
realm = FC Switch Administration
--END AUTHENTICATE
""" switch_page_resp = """
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
--END SWITCH INFORMATION
""" switch_page_invalid_firm = """
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v6.1.1
swDomain=2
--END SWITCH INFORMATION
""" parsed_value = """ didOffset=96 swFWVersion=v7.3.0b_rc1_bld06 swDomain=2 """ parsed_session_info_vf = """ sessionId=524461483 user=admin userRole=admin isAdminRole=Yes authSource=0 sessionIp=172.26.1.146 valid=yes adName= adId=128 adCapable=1 currentAD=AD0 currentADId=0 homeAD=AD0 trueADEnvironment=0 adList= adIdList= pfAdmin=0 switchIsMember=0 definedADList=AD0,Physical Fabric definedADIdList=0,255, effectiveADList=AD0,Physical Fabric rc=0 err= contextType=1 vfEnabled=true vfSupported=true HomeVF=128 sessionLFId=2 isContextManageable=1 manageableLFList=2,128, activeLFList=128,2, """ session_info_vf = """
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
""" session_info_vf_not_changed = """
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
User-Agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=128
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
""" session_info_AD = """ Webtools Session Info
--BEGIN SESSION
sessionId=-2096740776
user=
userRole=root
isAdminRole=No
authSource=0
sessionIp=
User-Agent=
valid=no
adName=
adId=0
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=1
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=-2
err=Could not obtain session data from store
contextType=0
--END SESSION
""" zone_info = """ Zone Configuration Information
--BEGIN ZONE CHANGE
LastZoneChangeTime=1421926251
--END ZONE CHANGE
isZoneTxnSupported=true
ZoneLicense=true
QuickLoopLicense=true
DefZoneStatus=noaccess
McDataDefaultZone=false
McDataSafeZone=false
AvailableZoneSize=1043890
--BEGIN ZONE INFO
openstack_cfg zone1;zone2 """\
"""zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 """\
    """zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 """\
    """alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 """\
    """qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
    """fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
    """openstack_cfg null 1045274"""\
    """--END ZONE INFO
""" active_zone_set = { 'zones': {'zone1': ['20:01:00:05:33:0e:96:15', '20:00:00:05:33:0e:93:11'], 'zone2': ['20:01:00:05:33:0e:96:14', '20:00:00:05:33:0e:93:11']}, 'active_zone_config': 'openstack_cfg'} updated_zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', 'test_updated_zone': '20:01:00:05:33:0e:96:10;20:00:00:05:33:0e:93:11'} updated_cfgs = {'openstack_cfg': 'test_updated_zone;zone1;zone2'} valid_zone_name = "openstack50060b0000c26604201900051ee8e329" class TestBrcdHttpFCZoneClient(client.BrcdHTTPFCZoneClient, test.TestCase): def setUp(self): self.auth_header = "YWRtaW46cGFzc3dvcmQ6NDM4ODEyNTIw" self.switch_user = "admin" self.switch_pwd = "password" self.protocol = "HTTPS" self.conn = None self.alias = {} self.qlps = {} self.ifas = {} self.parsed_raw_zoneinfo = "" self.random_no = '' self.session = None super(TestBrcdHttpFCZoneClient, self).setUp() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_create_auth_token(self, connect_mock): connect_mock.return_value = secinfo_resp self.assertEqual("Custom_Basic YWRtaW46cGFzc3dvcmQ6NjI4MTU5MA==", self.create_auth_token()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_authenticate(self, connect_mock): connect_mock.return_value = authenticate_resp self.assertEqual( (True, "Custom_Basic YWRtaW46eHh4Og=="), self.authenticate()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_authenticate_failed(self, connect_mock): connect_mock.return_value = un_authenticate_resp self.assertRaises( exception.BrocadeZoningHttpException, self.authenticate) def test_get_parsed_data(self): valid_delimiter1 = zone_constant.SWITCHINFO_BEGIN valid_delimiter2 = zone_constant.SWITCHINFO_END invalid_delimiter = "--END SWITCH INFORMATION1" self.assertEqual(parsed_value, self.get_parsed_data( switch_page_resp, valid_delimiter1, valid_delimiter2)) self.assertRaises(exception.BrocadeZoningHttpException, self.get_parsed_data, switch_page_resp, valid_delimiter1, invalid_delimiter) self.assertRaises(exception.BrocadeZoningHttpException, self.get_parsed_data, switch_page_resp, invalid_delimiter, valid_delimiter2) def test_get_nvp_value(self): valid_keyname = zone_constant.FIRMWARE_VERSION invalid_keyname = "swFWVersion1" self.assertEqual( "v7.3.0b_rc1_bld06", self.get_nvp_value(parsed_value, valid_keyname)) self.assertRaises(exception.BrocadeZoningHttpException, self.get_nvp_value, parsed_value, invalid_keyname) def test_get_managable_vf_list(self): manageable_list = ['2', '128'] self.assertEqual( manageable_list, self.get_managable_vf_list(session_info_vf)) self.assertRaises(exception.BrocadeZoningHttpException, self.get_managable_vf_list, session_info_AD) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') def test_check_change_vf_context_vf_enabled(self, is_vf_enabled_mock): is_vf_enabled_mock.return_value = (True, session_info_vf) self.vfid = None self.assertRaises( exception.BrocadeZoningHttpException, self.check_change_vf_context) self.vfid = "2" with mock.patch.object(self, 'change_vf_context') \ as change_vf_context_mock: self.check_change_vf_context() change_vf_context_mock.assert_called_once_with( self.vfid, session_info_vf) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') def test_check_change_vf_context_vf_disabled(self, is_vf_enabled_mock): is_vf_enabled_mock.return_value = (False, session_info_AD) self.vfid = "128" self.assertRaises( exception.BrocadeZoningHttpException, self.check_change_vf_context) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_change_vf_context_valid(self, connect_mock, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] connect_mock.return_value = session_info_vf self.assertIsNone(self.change_vf_context("2", session_info_vf)) data = zone_constant.CHANGE_VF.format(vfid="2") headers = {zone_constant.AUTH_HEADER: self.auth_header} connect_mock.assert_called_once_with( zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_change_vf_context_vf_not_changed(self, connect_mock, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] connect_mock.return_value = session_info_vf_not_changed self.assertRaises(exception.BrocadeZoningHttpException, self.change_vf_context, "2", session_info_vf) data = zone_constant.CHANGE_VF.format(vfid="2") headers = {zone_constant.AUTH_HEADER: self.auth_header} connect_mock.assert_called_once_with( zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') def test_change_vf_context_vfid_not_managaed(self, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] self.assertRaises(exception.BrocadeZoningHttpException, self.change_vf_context, "12", session_info_vf) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_is_supported_firmware(self, connect_mock): connect_mock.return_value = switch_page_resp self.assertTrue(self.is_supported_firmware()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_is_supported_firmware_invalid(self, connect_mock): connect_mock.return_value = switch_page_invalid_firm self.assertFalse(self.is_supported_firmware()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_active_zone_set(self, connect_mock): connect_mock.return_value = zone_info returned_zone_map = self.get_active_zone_set() self.assertDictMatch(active_zone_set, returned_zone_map) def test_form_zone_string(self): new_alias = { 'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'} new_qlps = {'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} new_ifas = {'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} self.assertEqual(mocked_zone_string, self.form_zone_string( cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)) self.assertEqual(mocked_zone_string_no_activate, self.form_zone_string( cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, False)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.add_zones(add_zones_info, True) post_zone_data_mock.assert_called_once_with(zone_string_to_post) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_invalid_zone_name(self, post_zone_data_mock): post_zone_data_mock.return_value = ("-1", "Name Too Long") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg invalid_zone_name = valid_zone_name + "00000000000000000000000000" add_zones_info = {invalid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.assertRaises( exception.BrocadeZoningHttpException, self.add_zones, add_zones_info, False) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_no_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.add_zones(add_zones_info, False) post_zone_data_mock.assert_called_once_with( zone_string_to_post_no_activate) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = valid_zone_name self.delete_zones(delete_zones_info, True) post_zone_data_mock.assert_called_once_with(zone_string_del_to_post) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_no_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = valid_zone_name self.delete_zones(delete_zones_info, False) post_zone_data_mock.assert_called_once_with( zone_string_del_to_post_no_active) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_invalid_zone_name(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = 'openstack50060b0000c26604201900051ee8e32' self.assertRaises(exception.BrocadeZoningHttpException, self.delete_zones, delete_zones_info, False) @patch.object(time, 'sleep') @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_post_zone_data(self, connect_mock, sleep_mock): connect_mock.return_value = zone_post_page self.assertEqual( ("-1", "Name too long"), self.post_zone_data(zone_string_to_post)) connect_mock.return_value = zone_post_page_no_error self.assertEqual(("0", ""), self.post_zone_data(zone_string_to_post)) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_nameserver_info(self, connect_mock): connect_mock.return_value = nameserver_info self.assertEqual(ns_info, self.get_nameserver_info()) @patch.object(client.BrcdHTTPFCZoneClient, 'get_session_info') def test_is_vf_enabled(self, get_session_info_mock): get_session_info_mock.return_value = session_info_vf self.assertEqual((True, parsed_session_info_vf), self.is_vf_enabled()) def test_delete_update_zones_cfgs(self): cfgs = {'openstack_cfg': 'zone1;zone2'} zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} delete_zones_info = valid_zone_name self.assertEqual( (zones, cfgs, active_cfg), self.delete_update_zones_cfgs( cfgs_to_delete.copy(), zones_to_delete.copy(), delete_zones_info, active_cfg)) cfgs = {'openstack_cfg': 'zone2'} zones = {'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} delete_zones_info = valid_zone_name + ";zone1" self.assertEqual( (zones, cfgs, active_cfg), self.delete_update_zones_cfgs( cfgs_to_delete.copy(), zones_to_delete.copy(), delete_zones_info, active_cfg)) def test_add_update_zones_cfgs(self): add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } updated_cfgs = { 'openstack_cfg': valid_zone_name + ';zone1;zone2'} updated_zones = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', valid_zone_name: '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} self.assertEqual((updated_zones, updated_cfgs, active_cfg), self.add_update_zones_cfgs( cfgs.copy(), zones.copy(), add_zones_info, active_cfg, "openstack_cfg")) add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'test4': ['20:06:0b:00:00:b2:66:07', '20:10:00:05:1e:b8:c3:19'] } updated_cfgs = { 'openstack_cfg': 'test4;openstack50060b0000c26604201900051ee8e329;zone1;zone2'} updated_zones = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', valid_zone_name: '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29', 'test4': '20:06:0b:00:00:b2:66:07;20:10:00:05:1e:b8:c3:19'} self.assertEqual( (updated_zones, updated_cfgs, active_cfg), self.add_update_zones_cfgs( cfgs.copy(), zones.copy(), add_zones_info, active_cfg, "openstack_cfg")) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_zone_info(self, connect_mock): connect_mock.return_value = zone_info self.get_zone_info() self.assertEqual({'openstack_cfg': 'zone1;zone2'}, self.cfgs) self.assertEqual( {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}, self.zones) self.assertEqual('openstack_cfg', self.active_cfg) self.assertEqual( {'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'}, self.alias) self.assertEqual( {'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, self.ifas) self.assertEqual( {'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, self.qlps) cinder-8.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py0000664000567000056710000003012312701406250030414 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco fc zone client cli.""" import time import mock from oslo_concurrency import processutils from six.moves import range from cinder import exception from cinder import test from cinder.zonemanager.drivers.cisco \ import cisco_fc_zone_client_cli as cli import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = ['VSAN 303\n', '----------------------------------------------------------\n', 'FCID TYPE PWWN (VENDOR) FC4-TYPE:FEATURE\n', '----------------------------------------------------------\n', '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', '0x030200 NL 10:00:00:49:c9:28:c7:01\n'] cfgactv = ['zoneset name OpenStack_Cfg vsan 303\n', 'zone name openstack50060b0000c26604201900051ee8e329 vsan 303\n', 'pwwn 50:06:0b:00:00:c2:66:04\n', 'pwwn 20:19:00:05:1e:e8:e3:29\n'] active_zoneset = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, 'active_zone_config': 'OpenStack_Cfg'} zoning_status_data_basic = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: basic merge-control: allow\n', ' session: none\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_basic = {'mode': 'basic', 'session': 'none'} zoning_status_data_enhanced_nosess = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: enhanced merge-control: allow\n', ' session: none\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_enhanced_nosess = {'mode': 'enhanced', 'session': 'none'} zoning_status_data_enhanced_sess = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: enhanced merge-control: allow\n', ' session: otherthannone\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_enhanced_sess = {'mode': 'enhanced', 'session': 'otherthannone'} active_zoneset_multiple_zones = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'openstack10000012345678902001009876543210': ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, 'active_zone_config': 'OpenStack_Cfg'} new_zone = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} new_zones = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], 'openstack10000011111111112001001111111111': ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' class TestCiscoFCZoneClientCLI(cli.CiscoFCZoneClientCLI, test.TestCase): def setUp(self): super(TestCiscoFCZoneClientCLI, self).setUp() self.fabric_vsan = '303' # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_active_zone_set(self, get_switch_info_mock): cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, ' | no-more'] get_switch_info_mock.return_value = cfgactv active_zoneset_returned = self.get_active_zone_set() get_switch_info_mock.assert_called_once_with(cmd_list) self.assertDictMatch(active_zoneset, active_zoneset_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test_get_active_zone_set_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.CiscoZoningCliException, self.get_active_zone_set) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_basic(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value = zoning_status_data_basic zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictMatch(zoning_status_basic, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value =\ zoning_status_data_enhanced_nosess zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictMatch(zoning_status_enhanced_nosess, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictMatch(zoning_status_enhanced_sess, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_nameserver_info(self, get_switch_info_mock): ns_info_list = [] ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] get_switch_info_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info() self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test_get_nameserver_info_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.CiscoZoningCliException, self.get_nameserver_info) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test__cfg_save(self, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] self._cfg_save() run_ssh_mock.assert_called_once_with(cmd_list, True) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') @mock.patch.object(time, 'sleep') def test__cfg_save_with_retry(self, mock_sleep, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] run_ssh_mock.side_effect = [ processutils.ProcessExecutionError, ('', None) ] self._cfg_save() self.assertEqual(2, run_ssh_mock.call_count) run_ssh_mock.assert_has_calls([ mock.call(cmd_list, True), mock.call(cmd_list, True) ]) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') @mock.patch.object(time, 'sleep') def test__cfg_save_with_error(self, mock_sleep, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(processutils.ProcessExecutionError, self._cfg_save) expected_num_calls = 5 expected_calls = [] for i in range(expected_num_calls): expected_calls.append(mock.call(cmd_list, True)) self.assertEqual(expected_num_calls, run_ssh_mock.call_count) run_ssh_mock.assert_has_calls(expected_calls) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_ssh_execute') @mock.patch.object(cli.CiscoFCZoneClientCLI, '_cfg_save') def test__add_zones_with_update(self, ssh_execute_mock, cfg_save_mock): self.add_zones(new_zone, False, self.fabric_vsan, active_zoneset_multiple_zones, zoning_status_basic) self.assertEqual(2, ssh_execute_mock.call_count) self.assertEqual(2, cfg_save_mock.call_count) def test__parse_ns_output(self): return_wwn_list = [] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) class TestCiscoFCZoneClientCLISSH(test.TestCase): def setUp(self): super(TestCiscoFCZoneClientCLISSH, self).setUp() self.client = cli.CiscoFCZoneClientCLI(None, None, None, None, None) self.client.sshpool = mock.MagicMock() self.mock_ssh = self.client.sshpool.item().__enter__() @mock.patch('oslo_concurrency.processutils.ssh_execute') def test__run_ssh(self, mock_execute): mock_execute.return_value = 'ssh output' ret = self.client._run_ssh(['cat', 'foo']) self.assertEqual('ssh output', ret) mock_execute.assert_called_once_with(self.mock_ssh, 'cat foo', check_exit_code=True) @mock.patch('oslo_concurrency.processutils.ssh_execute') def test__run_ssh_with_error(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError() self.assertRaises(processutils.ProcessExecutionError, self.client._run_ssh, ['cat', 'foo']) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' cinder-8.0.0/cinder/tests/unit/zonemanager/test_fc_zone_manager.py0000664000567000056710000001067312701406250026551 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for FC Zone Manager.""" import mock from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import fc_zone_manager fabric_name = 'BRCD_FAB_3' init_target_map = {'10008c7cff523b01': ['20240002ac000a50']} conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '20240002ac000a50', 'initiator_target_map': { '10008c7cff523b01': ['20240002ac000a50'] } } } fabric_map = {'BRCD_FAB_3': ['20240002ac000a50']} target_list = ['20240002ac000a50'] class TestFCZoneManager(test.TestCase): @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def setUp(self, opt_mock): super(TestFCZoneManager, self).setUp() config = conf.Configuration(None) config.fc_fabric_names = fabric_name def fake_build_driver(self): self.driver = mock.Mock(fc_zone_driver.FCZoneDriver) self.stubs.Set(fc_zone_manager.ZoneManager, '_build_driver', fake_build_driver) self.zm = fc_zone_manager.ZoneManager(configuration=config) self.configuration = conf.Configuration(None) self.configuration.fc_fabric_names = fabric_name self.driver = mock.Mock(fc_zone_driver.FCZoneDriver) def __init__(self, *args, **kwargs): super(TestFCZoneManager, self).__init__(*args, **kwargs) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_add_connection(self, opt_mock): with mock.patch.object(self.zm.driver, 'add_connection')\ as add_connection_mock: self.zm.driver.get_san_context.return_value = fabric_map self.zm.add_connection(conn_info) self.zm.driver.get_san_context.assert_called_once_with(target_list) add_connection_mock.assert_called_once_with(fabric_name, init_target_map, None, None) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_add_connection_error(self, opt_mock): with mock.patch.object(self.zm.driver, 'add_connection')\ as add_connection_mock: add_connection_mock.side_effect = exception.FCZoneDriverException self.assertRaises(exception.ZoneManagerException, self.zm.add_connection, conn_info) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_delete_connection(self, opt_mock): with mock.patch.object(self.zm.driver, 'delete_connection')\ as delete_connection_mock: self.zm.driver.get_san_context.return_value = fabric_map self.zm.delete_connection(conn_info) self.zm.driver.get_san_context.assert_called_once_with(target_list) delete_connection_mock.assert_called_once_with(fabric_name, init_target_map, None, None) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_delete_connection_error(self, opt_mock): with mock.patch.object(self.zm.driver, 'delete_connection')\ as del_connection_mock: del_connection_mock.side_effect = exception.FCZoneDriverException self.assertRaises(exception.ZoneManagerException, self.zm.delete_connection, conn_info) cinder-8.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py0000664000567000056710000001726312701406250027614 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco FC zone driver.""" from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import importutils from cinder import exception from cinder import test from cinder.volume import configuration as conf _active_cfg_before_add = {} _active_cfg_before_delete = { 'zones': { 'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])}, 'active_zone_config': 'cfg1'} _activate = True _zone_name = 'openstack10008c7cff523b0120240002ac000a50' _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _zoning_status = {'mode': 'basis', 'session': 'none'} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])} _initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} _device_map_to_verify = { '304': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '304' class CiscoFcZoneDriverBaseTest(object): def setup_config(self, is_normal, mode): fc_test_opts = [ cfg.StrOpt('fc_fabric_address_CISCO_FAB_1', default='10.24.48.213', help='FC Fabric names'), ] configuration = conf.Configuration(fc_test_opts) # fill up config configuration.zoning_mode = 'fabric' configuration.zone_driver = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver.' 'FakeCiscoFCZoneDriver') configuration.cisco_sb_connector = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver' '.FakeCiscoFCZoneClientCLI') configuration.zoning_policy = 'initiator-target' configuration.zone_activate = True configuration.zone_name_prefix = 'openstack' configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver.' 'FakeCiscoFCSanLookupService') configuration.fc_fabric_names = 'CISCO_FAB_1' configuration.fc_fabric_address_CISCO_FAB_1 = '172.21.60.220' if (is_normal): configuration.fc_fabric_user_CISCO_FAB_1 = 'admin' else: configuration.fc_fabric_user_CISCO_FAB_1 = 'invaliduser' configuration.fc_fabric_password_CISCO_FAB_1 = 'admin1234' if (mode == 1): configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' elif (mode == 2): configuration.zoning_policy_CISCO_FAB_1 = 'initiator' else: configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' configuration.zone_activate_CISCO_FAB_1 = True configuration.zone_name_prefix_CISCO_FAB_1 = 'openstack' configuration.zoning_vsan_CISCO_FAB_1 = '304' return configuration class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase): def setUp(self): super(TestCiscoFcZoneDriver, self).setUp() # setup config for normal flow self.setup_driver(self.setup_config(True, 1)) GlobalVars._zone_state = [] def setup_driver(self, config): self.driver = importutils.import_object( 'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver' '.CiscoFCZoneDriver', configuration=config) def fake_get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, zoning_vsan): return GlobalVars._active_cfg def fake_get_san_context(self, target_wwn_list): fabric_map = {} return fabric_map def test_delete_connection(self): GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_delete self.driver.delete_connection( 'CISCO_FAB_1', _initiator_target_map) self.assertFalse(_zone_name in GlobalVars._zone_state) def test_delete_connection_for_initiator_mode(self): GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_delete self.setup_driver(self.setup_config(True, 2)) self.driver.delete_connection( 'CISCO_FAB_1', _initiator_target_map) self.assertFalse(_zone_name in GlobalVars._zone_state) def test_add_connection_for_invalid_fabric(self): """Test abnormal flows.""" GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_add GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.add_connection, 'CISCO_FAB_1', _initiator_target_map) def test_delete_connection_for_invalid_fabric(self): GlobalVars._active_cfg = _active_cfg_before_delete GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.delete_connection, 'CISCO_FAB_1', _initiator_target_map) class FakeCiscoFCZoneClientCLI(object): def __init__(self, ipaddress, username, password, port, vsan): if not GlobalVars._is_normal_test: raise processutils.ProcessExecutionError( "Unable to connect to fabric") def get_active_zone_set(self): return GlobalVars._active_cfg def add_zones(self, zones, isActivate): GlobalVars._zone_state.extend(zones.keys()) def delete_zones(self, zone_names, isActivate): zone_list = zone_names.split(';') GlobalVars._zone_state = [ x for x in GlobalVars._zone_state if x not in zone_list] def get_nameserver_info(self): return _target_ns_map def get_zoning_status(self): return _zoning_status def close_connection(self): pass def cleanup(self): pass class FakeCiscoFCSanLookupService(object): def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalVars(object): global _active_cfg _active_cfg = {} global _zone_state _zone_state = list() global _is_normal_test _is_normal_test = True global _zoning_status _zoning_status = {} cinder-8.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py0000664000567000056710000001415412701406250030766 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc san lookup service.""" import mock from oslo_concurrency import processutils as putils from oslo_config import cfg from cinder import exception from cinder import ssh_utils from cinder import test from cinder.volume import configuration as conf import cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service \ as brcd_lookup from cinder.zonemanager.drivers.brocade import fc_zone_constants parsed_switch_port_wwns = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:90:fa:34:40:f6'] switch_data = (""" Type Pid COS PortName NodeName TTL(sec) N 011a00; 2,3; %(port_1)s; 20:1a:00:05:1e:e8:e3:29; na FC4s: FCP PortSymb: [26] "222222 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 7211" Fabric Port Name: 20:1a:00:05:1e:e8:e3:29 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No N 010100; 2,3; %(port_2)s; 20:00:00:00:af:00:00:af; na FC4s: FCP PortSymb: [26] "333333 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 2222" Fabric Port Name: 10:00:00:90:fa:34:40:f6 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No""" % {'port_1': parsed_switch_port_wwns[0], 'port_2': parsed_switch_port_wwns[1]}) _device_map_to_verify = { 'BRCD_FAB_2': { 'initiator_port_wwn_list': [parsed_switch_port_wwns[1].replace(':', '')], 'target_port_wwn_list': [parsed_switch_port_wwns[0].replace(':', '')]}} class TestBrcdFCSanLookupService(brcd_lookup.BrcdFCSanLookupService, test.TestCase): def setUp(self): super(TestBrcdFCSanLookupService, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_2', 'fc-zone-manager') self.configuration.fc_fabric_names = 'BRCD_FAB_2' self.create_configuration() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address', default='10.24.49.100', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password', default='password', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('fc_fabric_port', default=22, help='')) fc_fabric_opts.append(cfg.StrOpt('principal_switch_wwn', default='100000051e55a100', help='')) config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2') self.fabric_configs = {'BRCD_FAB_2': config} @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, 'get_nameserver_info') @mock.patch('cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service' '.ssh_utils.SSHPool') def test_get_device_mapping_from_network(self, mock_ssh_pool, get_nameserver_info_mock): initiator_list = [parsed_switch_port_wwns[1]] target_list = [parsed_switch_port_wwns[0], '20240002ac000a40'] get_nameserver_info_mock.return_value = parsed_switch_port_wwns device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictMatch(_device_map_to_verify, device_map) @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, '_get_switch_data') def test_get_nameserver_info(self, get_switch_data_mock): ns_info_list = [] get_switch_data_mock.return_value = (switch_data) # get_switch_data will be called twice with the results appended ns_info_list_expected = (parsed_switch_port_wwns + parsed_switch_port_wwns) ns_info_list = self.get_nameserver_info(None) self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(putils, 'ssh_execute', return_value=(switch_data, '')) @mock.patch.object(ssh_utils.SSHPool, 'item') def test__get_switch_data(self, ssh_pool_mock, ssh_execute_mock): actual_switch_data = self._get_switch_data(ssh_pool_mock, fc_zone_constants.NS_SHOW) self.assertEqual(actual_switch_data, switch_data) ssh_execute_mock.side_effect = putils.ProcessExecutionError() self.assertRaises(exception.FCSanLookupServiceException, self._get_switch_data, ssh_pool_mock, fc_zone_constants.NS_SHOW) def test__parse_ns_output(self): invalid_switch_data = ' N 011a00;20:1a:00:05:1e:e8:e3:29' return_wwn_list = [] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(parsed_switch_port_wwns, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) cinder-8.0.0/cinder/tests/unit/zonemanager/test_cisco_lookup_service.py0000664000567000056710000000702012701406250027635 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco FC san lookup service.""" from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.zonemanager import fc_san_lookup_service as san_service _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): def setUp(self): super(TestFCSanLookupService, self).setUp() self.configuration = self.setup_config() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def setup_config(self): configuration = conf.Configuration(None) # fill up config configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager' '.test_cisco_lookup_service' '.FakeCiscoFCSanLookupService') return configuration def test_get_device_mapping_from_network(self): GlobalParams._is_normal_test = True initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictMatch(_device_map_to_verify, device_map) def test_get_device_mapping_from_network_for_invalid_config(self): GlobalParams._is_normal_test = False initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] self.assertRaises(exception.FCSanLookupServiceException, self.get_device_mapping_from_network, initiator_list, target_list) class FakeCiscoFCSanLookupService(object): def __init__(self, **kwargs): pass def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): if not GlobalParams._is_normal_test: raise exception.FCSanLookupServiceException("Error") device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalParams(object): global _is_normal_test _is_normal_test = True cinder-8.0.0/cinder/tests/unit/test_hitachi_hbsd_snm2_iscsi.py0000664000567000056710000006767712701406257025717 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Self test for Hitachi Block Storage Driver """ import mock from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hbsd_basiclib from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_iscsi from cinder.volume.drivers.hitachi import hbsd_snm2 def _exec_hsnm(*args, **kargs): return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args) def _exec_hsnm_init(*args, **kargs): return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args) class HBSDSNM2ISCSIDriverTest(test.TestCase): """Test HBSDSNM2ISCSIDriver.""" audppool_result = " DP RAID \ Current Utilization Current Over Replication\ Available Current Replication Rotational \ \ Stripe \ Needing Preparation\n\ Pool Tier Mode Level Total Capacity Consumed Capacity \ Percent Provisioning Percent Capacity \ Utilization Percent Type Speed Encryption Status \ \ Reconstruction Progress Size Capacity\n\ 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ 1% 24835% 532.0 GB \ 1% SAS 10000rpm N/A Normal \ N/A \ 256KB 0.0 GB" aureplicationlocal_result = "Pair Name LUN Pair \ LUN Status Copy Type Group \ Point-in-Time MU Number\n\ 0 10 0 Split( 99%) \ ShadowImage ---:Ungrouped N/A\ " auluref_result = " Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 0 Normal" auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ Name Port Name Host Group\n\ HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ Assigned WWN\n Name Port Name \ Host Group\n abcdefg 10000000C97BCE7A \ 001:HBSD-01" autargetini_result = "Port 00 Target Security ON\n\ Target Name \ iSCSI Name\n\ 001:HBSD-01 \ iqn" autargetini_result2 = "Port 00 Target Security ON\n\ Target Name \ iSCSI Name" autargetmap_result = "Mapping Mode = ON\n\ Port Target H-LUN LUN\n\ 00 001:HBSD-01 0 1000" auiscsi_result = "Port 00\n\ Port Number : 3260\n\ Keep Alive Timer[sec.] : 60\n\ MTU : 1500\n\ Transfer Rate : 1Gbps\n\ Link Status : Link Up\n\ Ether Address : 00:00:87:33:D1:3E\n\ IPv4\n\ IPv4 Address : 192.168.0.1\n\ IPv4 Subnet Mask : 255.255.252.0\n\ IPv4 Default Gateway : 0.0.0.0\n\ IPv6 Status : Disable\n\ Connecting Hosts : 0\n\ Result : Normal\n\ VLAN Status : Disable\n\ VLAN ID : N/A\n\ Header Digest : Enable\n\ Data Digest : Enable\n\ Window Scale : Disable" autargetdef_result = "Port 00\n\ Authentication Mutual\n\ Target Method CHAP Algorithm \ Authentication\n\ 001:T000 None --- ---\n\ User Name : ---\n\ iSCSI Name : iqn-target" hsnm_vals = { ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], ('aureplicationlocal', '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): [0, "", ""], ('aureplicationlocal', '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): [1, "", ""], ('aureplicationlocal', '-unit None -refer -pvol 1'): [0, "%s" % aureplicationlocal_result, ""], ('aureplicationlocal', '-unit None -refer -pvol 3'): [1, "", "DMEC002015"], ('aureplicationlocal', '-unit None -refer -svol 3'): [1, "", "DMEC002015"], ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): [0, "", ""], ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'): [1, "", ""], ('auluchgsize', '-unit None -lu 1 -size 256g'): [0, "", ""], ('auludel', '-unit None -lu 1 -f'): [0, "", ""], ('auludel', '-unit None -lu 3 -f'): [1, "", ""], ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], ('autargetini', '-unit None -refer'): [0, "%s" % autargetini_result, ""], ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): [0, "", ""], ('autargetmap', '-unit None -refer'): [0, "%s" % autargetmap_result, ""], ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ -authmethod None'): [0, "", ""], ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \ -iname iqnX.target -authmethod None'): [1, "", ""], ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ -ReportFullPortalList enable'): [0, "", ""], ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], ('autargetdef', '-unit None -refer'): [0, "%s" % autargetdef_result, ""]} hsnm_vals_init = { ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], ('aureplicationlocal', '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): [0, 0, ""], ('aureplicationlocal', '-unit None -refer -pvol 1'): [0, "%s" % aureplicationlocal_result, ""], ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): [0, 0, ""], ('auluchgsize', '-unit None -lu 1 -size 256g'): [0, 0, ""], ('auludel', '-unit None -lu 1 -f'): [0, "", ""], ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], ('autargetini', '-unit None -refer'): [0, "%s" % autargetini_result2, ""], ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): [0, "", ""], ('autargetmap', '-unit None -refer'): [0, "%s" % autargetmap_result, ""], ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ -authmethod None'): [0, "", ""], ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ -ReportFullPortalList enable'): [0, "", ""], ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], ('autargetdef', '-unit None -refer'): [0, "%s" % autargetdef_result, ""], ('auman', '-help'): [0, "Version 27.50", ""]} # The following information is passed on to tests, when creating a volume _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', 'provider_location': '1', 'name': 'test', 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} test_volume = {'name': 'test_volume', 'size': 128, 'id': 'test-volume-0', 'provider_location': '1', 'status': 'available'} test_volume_error = {'name': 'test_volume_error', 'size': 256, 'id': 'test-volume-error', 'provider_location': '3', 'status': 'available'} test_volume_error1 = {'name': 'test_volume_error', 'size': 128, 'id': 'test-volume-error', 'provider_location': None, 'status': 'available'} test_volume_error2 = {'name': 'test_volume_error', 'size': 256, 'id': 'test-volume-error', 'provider_location': '1', 'status': 'available'} test_volume_error3 = {'name': 'test_volume3', 'size': 128, 'id': 'test-volume3', 'volume_metadata': [{'key': 'type', 'value': 'V-VOL'}], 'provider_location': '1', 'status': 'available'} test_volume_error4 = {'name': 'test_volume4', 'size': 128, 'id': 'test-volume2', 'provider_location': '3', 'status': 'available'} test_snapshot = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, 'provider_location': '1', 'status': 'available'} test_snapshot_error2 = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': test_volume_error, 'provider_location': None, 'status': 'available'} UNIT_NAME = 'HUS110_91122819' test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} test_existing_no_unit_ref = {'ldev': '0'} def __init__(self, *args, **kwargs): super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs) @mock.patch.object(utils, 'brick_get_connector_properties', return_value={'ip': '0.0.0.0', 'initiator': 'iqn'}) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_init) @mock.patch.object(utils, 'execute', return_value=['', '']) def setUp(self, args1, arg2, arg3, arg4): super(HBSDSNM2ISCSIDriverTest, self).setUp() self._setup_config() self._setup_driver() self.driver.check_param() self.driver.common.create_lock_file() self.driver.common.command.connect_storage() self.driver.max_hostgroups = \ self.driver.common.command.get_max_hostgroups() self.driver.add_hostgroup() self.driver.output_param_to_log() self.driver.do_setup_status.set() def _setup_config(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.hitachi_pool_id = 30 self.configuration.hitachi_thin_pool_id = 31 self.configuration.hitachi_target_ports = "00" self.configuration.hitachi_debug_level = 0 self.configuration.hitachi_serial_number = None self.configuration.hitachi_unit_name = "None" self.configuration.hitachi_group_request = True self.configuration.hitachi_group_range = "0-1" self.configuration.config_group = "None" self.configuration.hitachi_ldev_range = "0-100" self.configuration.hitachi_default_copy_method = 'FULL' self.configuration.hitachi_copy_check_interval = 1 self.configuration.hitachi_async_copy_check_interval = 1 self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_auth_method = None self.configuration.hitachi_auth_user = "HBSD-CHAP-user" self.configuration.hitachi_auth_password = "HBSD-CHAP-password" self.configuration.hitachi_add_chap_user = "False" def _setup_driver(self): self.driver = hbsd_iscsi.HBSDISCSIDriver( configuration=self.configuration) context = None db = None self.driver.common = hbsd_common.HBSDCommon( self.configuration, self.driver, context, db) self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) self.driver.common.horcmgr_flock = \ self.driver.common.command.set_horcmgr_flock() # API test cases @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume(self, arg1, arg2, arg3): """test create_volume.""" ret = self.driver.create_volume(self._VOLUME) vol = self._VOLUME.copy() vol['provider_location'] = ret['provider_location'] self.assertEqual('1', vol['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_error(self, arg1, arg2, arg3): """test create_volume.""" self.assertRaises(exception.HBSDCmdError, self.driver.create_volume, self.test_volume_error) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_get_volume_stats(self, arg1, arg2): """test get_volume_stats.""" stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_get_volume_stats_error(self, arg1, arg2): """test get_volume_stats.""" self.configuration.hitachi_pool_id = 29 stats = self.driver.get_volume_stats(True) self.assertEqual({}, stats) self.configuration.hitachi_pool_id = 30 @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_extend_volume(self, arg1, arg2): """test extend_volume.""" self.driver.extend_volume(self._VOLUME, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_extend_volume_error(self, arg1, arg2): """test extend_volume.""" self.assertRaises(exception.HBSDError, self.driver.extend_volume, self.test_volume_error3, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_volume(self, arg1, arg2): """test delete_volume.""" self.driver.delete_volume(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_volume_error(self, arg1, arg2): """test delete_volume.""" self.assertRaises(exception.HBSDCmdError, self.driver.delete_volume, self.test_volume_error4) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): """test create_snapshot.""" ret = self.driver.create_volume(self._VOLUME) ret = self.driver.create_snapshot(self.test_snapshot) self.assertEqual('1', ret['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=test_volume_error) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): """test create_snapshot.""" self.assertRaises(exception.HBSDCmdError, self.driver.create_snapshot, self.test_snapshot_error2) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_snapshot(self, arg1, arg2): """test delete_snapshot.""" self.driver.delete_snapshot(self.test_snapshot) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_snapshot_error(self, arg1, arg2): """test delete_snapshot.""" self.driver.delete_snapshot(self.test_snapshot_error2) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_from_snapshot(self, arg1, arg2, arg3): """test create_volume_from_snapshot.""" vol = self.driver.create_volume_from_snapshot(self._VOLUME, self.test_snapshot) self.assertIsNotNone(vol) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): """test create_volume_from_snapshot.""" self.assertRaises(exception.HBSDError, self.driver.create_volume_from_snapshot, self.test_volume_error2, self.test_snapshot) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_basiclib, 'get_process_lock') def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): """test create_cloned_volume.""" vol = self.driver.create_cloned_volume(self._VOLUME, self.test_snapshot) self.assertIsNotNone(vol) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=test_volume_error1) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_basiclib, 'get_process_lock') def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): """test create_cloned_volume.""" self.assertRaises(exception.HBSDError, self.driver.create_cloned_volume, self._VOLUME, self.test_volume_error1) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_initialize_connection(self, arg1, arg2): """test initialize connection.""" connector = { 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': 'iqn'} rc = self.driver.initialize_connection(self._VOLUME, connector) self.assertEqual('iscsi', rc['driver_volume_type']) self.assertEqual('iqn-target', rc['data']['target_iqn']) self.assertEqual(1, rc['data']['target_lun']) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_initialize_connection_error(self, arg1, arg2): """test initialize connection.""" connector = { 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': 'iqnX'} self.assertRaises(exception.HBSDError, self.driver.initialize_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_terminate_connection(self, arg1): """test terminate connection.""" connector = { 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': 'iqn'} self.driver.terminate_connection(self._VOLUME, connector) return @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_terminate_connection_error(self, arg1): """test terminate connection.""" connector = {'ip': '0.0.0.0'} self.assertRaises(exception.HBSDError, self.driver.terminate_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_manage_existing(self, arg1, arg2): rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) self.assertEqual(0, rc['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME size = self.driver.manage_existing_get_size(self._VOLUME, self.test_existing_ref) self.assertEqual(1, size) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_unmanage(self, arg1, arg2): self.driver.unmanage(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_unmanage_busy(self, arg1, arg2): self.assertRaises(exception.HBSDVolumeIsBusy, self.driver.unmanage, self.test_volume_error3) cinder-8.0.0/cinder/tests/unit/test_image_utils.py0000664000567000056710000017366012701406250023436 0ustar jenkinsjenkins00000000000000 # Copyright (c) 2013 eNovance , Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for image utils.""" import math import mock from oslo_concurrency import processutils from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder import test from cinder.volume import throttling class TestQemuImgInfo(test.TestCase): @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info(self, mock_exec, mock_info): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', 'info', test_path, run_as_root=True) self.assertEqual(mock_info.return_value, output) @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_not_root(self, mock_exec, mock_info): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) output = image_utils.qemu_img_info(test_path, run_as_root=False) mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', 'info', test_path, run_as_root=False) self.assertEqual(mock_info.return_value, output) @mock.patch('cinder.image.image_utils.os') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_os.name = 'nt' output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with('qemu-img', 'info', test_path, run_as_root=True) self.assertEqual(mock_info.return_value, output) @mock.patch('cinder.utils.execute') def test_get_qemu_img_version(self, mock_exec): mock_out = "qemu-img version 2.0.0" mock_err = mock.sentinel.err mock_exec.return_value = (mock_out, mock_err) expected_version = [2, 0, 0] version = image_utils.get_qemu_img_version() mock_exec.assert_called_once_with('qemu-img', '--help', check_exit_code=False) self.assertEqual(expected_version, version) @mock.patch.object(image_utils, 'get_qemu_img_version') def test_validate_qemu_img_version(self, mock_get_qemu_img_version): fake_current_version = [1, 8] mock_get_qemu_img_version.return_value = fake_current_version minimum_version = '1.8' image_utils.check_qemu_img_version(minimum_version) mock_get_qemu_img_version.assert_called_once_with() @mock.patch.object(image_utils, 'get_qemu_img_version') def _test_validate_unsupported_qemu_img_version(self, mock_get_qemu_img_version, current_version=None): mock_get_qemu_img_version.return_value = current_version minimum_version = '2.0' self.assertRaises(exception.VolumeBackendAPIException, image_utils.check_qemu_img_version, minimum_version) mock_get_qemu_img_version.assert_called_once_with() def test_validate_qemu_img_version_not_installed(self): self._test_validate_unsupported_qemu_img_version() def test_validate_older_qemu_img_version(self): self._test_validate_unsupported_qemu_img_version( current_version=[1, 8]) class TestConvertImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-t', 'none', '-O', out_format, source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.side_effect = ValueError throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) mock_info.assert_called_once_with(source, run_as_root=True) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-t', 'none', '-O', out_format, source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.volume.utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.side_effect = ValueError output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) class TestResizeImage(test.TestCase): @mock.patch('cinder.utils.execute') def test_defaults(self, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=False) @mock.patch('cinder.utils.execute') def test_run_as_root(self, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size, run_as_root=True) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=True) class TestFetch(test.TestCase): @mock.patch('os.stat') @mock.patch('cinder.image.image_utils.fileutils') def test_defaults(self, mock_fileutils, mock_stat): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id path = 'test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id mock_open = mock.mock_open() mock_stat.return_value.st_size = 1048576 with mock.patch('cinder.image.image_utils.open', new=mock_open, create=True): output = image_utils.fetch(ctxt, image_service, image_id, path, _user_id, _project_id) self.assertIsNone(output) image_service.download.assert_called_once_with(ctxt, image_id, mock_open.return_value) mock_open.assert_called_once_with(path, 'wb') mock_fileutils.remove_path_on_error.assert_called_once_with(path) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) class TestVerifyImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_defaults(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None output = image_utils.fetch_verify_image(ctxt, image_service, image_id, dest) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_info.assert_called_once_with(dest, run_as_root=True) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_kwargs(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 2 run_as_root = mock.sentinel.run_as_root mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None mock_data.virtual_size = 1 output = image_utils.fetch_verify_image( ctxt, image_service, image_id, dest, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_info.assert_called_once_with(dest, run_as_root=run_as_root) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_format_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = None mock_data.backing_file = None self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = 'test_backing_file' self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_size_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest size = 1 mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None mock_data.virtual_size = 2 self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest, size=size) class TestTemporaryDir(test.TestCase): @mock.patch('cinder.image.image_utils.CONF') @mock.patch('os.makedirs') @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.image.image_utils.utils.tempdir') def test_conv_dir_exists(self, mock_tempdir, mock_exists, mock_make, mock_conf): mock_conf.image_conversion_dir = mock.sentinel.conv_dir output = image_utils.temporary_dir() self.assertFalse(mock_make.called) mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) self.assertEqual(output, mock_tempdir.return_value) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('os.makedirs') @mock.patch('os.path.exists', return_value=False) @mock.patch('cinder.image.image_utils.utils.tempdir') def test_create_conv_dir(self, mock_tempdir, mock_exists, mock_make, mock_conf): mock_conf.image_conversion_dir = mock.sentinel.conv_dir output = image_utils.temporary_dir() mock_make.assert_called_once_with(mock.sentinel.conv_dir) mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) self.assertEqual(output, mock_tempdir.return_value) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('os.makedirs') @mock.patch('os.path.exists', return_value=False) @mock.patch('cinder.image.image_utils.utils.tempdir') def test_no_conv_dir(self, mock_tempdir, mock_exists, mock_make, mock_conf): mock_conf.image_conversion_dir = None output = image_utils.temporary_dir() self.assertFalse(mock_make.called) mock_tempdir.assert_called_once_with(dir=None) self.assertEqual(output, mock_tempdir.return_value) class TestUploadVolume(test.TestCase): @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_diff_format(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': mock.sentinel.disk_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = mock.sentinel.disk_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, mock.sentinel.disk_format, run_as_root=True) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_open.return_value.__enter__.return_value) @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw'} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_chown.assert_called_once_with(volume_path) mock_open.assert_called_once_with(volume_path) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_open.return_value.__enter__.return_value) @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf, mock_chown): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw'} volume_path = mock.sentinel.volume_path mock_os.name = 'nt' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_open.assert_called_once_with(volume_path, 'rb') image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_open.return_value.__enter__.return_value) @mock.patch('cinder.image.image_utils.CONF') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_conf): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': mock.sentinel.disk_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = mock.sentinel.other_disk_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value self.assertRaises(exception.ImageUnacceptable, image_utils.upload_volume, ctxt, image_service, image_meta, volume_path) mock_convert.assert_called_once_with(volume_path, temp_file, mock.sentinel.disk_format, run_as_root=True) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) self.assertFalse(image_service.update.called) class TestFetchToVhd(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, None, None, run_as_root=True) @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id run_as_root = mock.sentinel.run_as_root output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, user_id, project_id, run_as_root=run_as_root) class TestFetchToRaw(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, None, None, None, run_as_root=True) @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = mock.sentinel.size run_as_root = mock.sentinel.run_as_root output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, user_id, project_id, size, run_as_root=run_as_root) class TestFetchToVolumeFormat(test.TestCase): @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_defaults(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=True), mock.call(tmp, run_as_root=True), mock.call(dest, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, run_as_root=True) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_kwargs(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root), mock.call(dest, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, run_as_root=run_as_root) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_temporary_images(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock.sentinel.tmp dummy = mock.sentinel.dummy mock_temp.return_value.__enter__.side_effect = [tmp, dummy] with image_utils.TemporaryImages.fetch(image_service, ctxt, image_id) as tmp_img: self.assertEqual(tmp_img, tmp) output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) self.assertEqual(2, mock_temp.call_count) mock_info.assert_has_calls([ mock.call(tmp, run_as_root=True), mock.call(dummy, run_as_root=True), mock.call(tmp, run_as_root=True), mock.call(dest, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, run_as_root=True) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_and_is_raw(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'raw', 'size': 41126400} image_size_m = math.ceil(41126400 / units.Mi) output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) mock_copy.assert_called_once_with(tmp, dest, image_size_m, blocksize) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_not_raw(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'not_raw'} self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) self.assertFalse(mock_fetch.called) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_no_qemu_img_no_metadata(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = None self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) self.assertFalse(mock_fetch.called) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_size_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 1234 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 4321 * 1024 ** 3 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_qemu_img_parse_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = None data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_backing_file_error(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = mock.sentinel.backing_file data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def _test_format_name_mismatch(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, legacy_format_name=False): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = 'vhd' blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = 'vpc' if legacy_format_name else 'raw' data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value if legacy_format_name: image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) else: self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root), mock.call(dest, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, run_as_root=run_as_root) def test_format_mismatch(self): self._test_format_name_mismatch() def test_format_name_mismatch_same_format(self): # Make sure no exception is raised because of qemu-img still using # the legacy 'vpc' format name if 'vhd' is requested. self._test_format_name_mismatch(legacy_format_name=True) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_image', return_value=True) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.CONF') def test_xenserver_to_vhd(self, mock_conf, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with() mock_info.assert_has_calls([ mock.call(tmp, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root), mock.call(dest, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) mock_repl_xen.assert_called_once_with(tmp) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, run_as_root=run_as_root) class TestXenserverUtils(test.TestCase): @mock.patch('cinder.image.image_utils.is_xenserver_format') def test_is_xenserver_image(self, mock_format): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id output = image_utils.is_xenserver_image(ctxt, image_service, image_id) image_service.show.assert_called_once_with(ctxt, image_id) mock_format.assert_called_once_with(image_service.show.return_value) self.assertEqual(mock_format.return_value, output) def test_is_xenserver_format(self): image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'} self.assertTrue(image_utils.is_xenserver_format(image_meta1)) image_meta2 = {'disk_format': 'test_disk_format', 'container_format': 'test_cont_format'} self.assertFalse(image_utils.is_xenserver_format(image_meta2)) @mock.patch('cinder.image.image_utils.utils.execute') def test_extract_targz(self, mock_exec): name = mock.sentinel.archive_name target = mock.sentinel.target output = image_utils.extract_targz(name, target) mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target) self.assertIsNone(output) class TestVhdUtils(test.TestCase): @mock.patch('cinder.image.image_utils.utils.execute') def test_set_vhd_parent(self, mock_exec): vhd_path = mock.sentinel.vhd_path parentpath = mock.sentinel.parentpath output = image_utils.set_vhd_parent(vhd_path, parentpath) mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) self.assertIsNone(output) @mock.patch('cinder.image.image_utils.set_vhd_parent') def test_fix_vhd_chain(self, mock_set_parent): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) output = image_utils.fix_vhd_chain(vhd_chain) self.assertIsNone(output) mock_set_parent.assert_has_calls([ mock.call(mock.sentinel.first, mock.sentinel.second), mock.call(mock.sentinel.second, mock.sentinel.third), mock.call(mock.sentinel.third, mock.sentinel.fourth), mock.call(mock.sentinel.fourth, mock.sentinel.fifth)]) @mock.patch('cinder.image.image_utils.utils.execute', return_value=(98765.43210, mock.sentinel.error)) def test_get_vhd_size(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.get_vhd_size(vhd_path) mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path, '-v') self.assertEqual(98765, output) @mock.patch('cinder.image.image_utils.utils.execute') def test_resize_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path size = 387549349 journal = mock.sentinel.journal output = image_utils.resize_vhd(vhd_path, size, journal) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path, '-s', str(size), '-j', journal) @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.coalesce_vhd(vhd_path) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n', vhd_path) @mock.patch('cinder.image.image_utils.coalesce_vhd') @mock.patch('cinder.image.image_utils.resize_vhd') @mock.patch('cinder.image.image_utils.get_vhd_size') @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_chain(self, mock_exec, mock_size, mock_resize, mock_coal): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) output = image_utils.coalesce_chain(vhd_chain) self.assertEqual(mock.sentinel.fifth, output) mock_size.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) mock_resize.assert_has_calls([ mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)]) mock_coal.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) @mock.patch('cinder.image.image_utils.os.path') def test_discover_vhd_chain(self, mock_path): directory = '/some/test/directory' mock_path.join.side_effect = lambda x, y: '/'.join((x, y)) mock_path.exists.side_effect = (True, True, True, False) output = image_utils.discover_vhd_chain(directory) expected_output = ['/some/test/directory/0.vhd', '/some/test/directory/1.vhd', '/some/test/directory/2.vhd'] self.assertEqual(expected_output, output) @mock.patch('cinder.image.image_utils.temporary_dir') @mock.patch('cinder.image.image_utils.os.rename') @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') @mock.patch('cinder.image.image_utils.coalesce_chain') @mock.patch('cinder.image.image_utils.fix_vhd_chain') @mock.patch('cinder.image.image_utils.discover_vhd_chain') @mock.patch('cinder.image.image_utils.extract_targz') def test_replace_xenserver_image_with_coalesced_vhd( self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete, mock_rename, mock_temp): image_file = mock.sentinel.image_file tmp = mock_temp.return_value.__enter__.return_value output = image_utils.replace_xenserver_image_with_coalesced_vhd( image_file) self.assertIsNone(output) mock_targz.assert_called_once_with(image_file, tmp) mock_discover.assert_called_once_with(tmp) mock_fix.assert_called_once_with(mock_discover.return_value) mock_coal.assert_called_once_with(mock_discover.return_value) mock_delete.assert_called_once_with(image_file) mock_rename.assert_called_once_with(mock_coal.return_value, image_file) class TestCreateTemporaryFile(test.TestCase): @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.path.exists') @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs, mock_path, mock_conf, mock_close): mock_conf.image_conversion_dir = None fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_mkstemp.assert_called_once_with(dir=None) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs, mock_path, mock_conf, mock_close): conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) self.assertFalse(mock_dirs.called) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.CONF') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=False) @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs, mock_path, mock_conf, mock_close): conv_dir = mock.sentinel.image_conversion_dir mock_conf.image_conversion_dir = conv_dir fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_dirs.assert_called_once_with(conv_dir) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) class TestTemporaryFileContextManager(test.TestCase): @mock.patch('cinder.image.image_utils.create_temporary_file', return_value=mock.sentinel.temporary_file) @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') def test_temporary_file(self, mock_delete, mock_create): with image_utils.temporary_file() as tmp_file: self.assertEqual(mock.sentinel.temporary_file, tmp_file) self.assertFalse(mock_delete.called) mock_delete.assert_called_once_with(mock.sentinel.temporary_file) cinder-8.0.0/cinder/tests/unit/test_hpe3par.py0000664000567000056710000106034512701406250022472 0ustar jenkinsjenkins00000000000000# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for OpenStack Cinder volume drivers.""" import mock import ast from oslo_config import cfg from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_hpe_3par_client as hpe3parclient from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.hpe import hpe_3par_fc as hpefcdriver from cinder.volume.drivers.hpe import hpe_3par_iscsi as hpedriver from cinder.volume import qos_specs from cinder.volume import utils as volume_utils from cinder.volume import volume_types hpeexceptions = hpe3parclient.hpeexceptions CONF = cfg.CONF HPE3PAR_CPG = 'OpenStackCPG' HPE3PAR_CPG2 = 'fakepool' HPE3PAR_CPG_QOS = 'qospool' HPE3PAR_CPG_SNAP = 'OpenStackCPGSnap' HPE3PAR_USER_NAME = 'testUser' HPE3PAR_USER_PASS = 'testPassword' HPE3PAR_SAN_IP = '2.2.2.2' HPE3PAR_SAN_SSH_PORT = 999 HPE3PAR_SAN_SSH_CON_TIMEOUT = 44 HPE3PAR_SAN_SSH_PRIVATE = 'foobar' GOODNESS_FUNCTION = \ "stats.capacity_utilization < 0.6? 100:25" FILTER_FUNCTION = \ "stats.total_volumes < 400 && stats.capacity_utilization < 0.8" CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" FLASH_CACHE_ENABLED = 1 FLASH_CACHE_DISABLED = 2 # Input/output (total read/write) operations per second. THROUGHPUT = 'throughput' # Data processed (total read/write) per unit time: kilobytes per second. BANDWIDTH = 'bandwidth' # Response time (total read/write): microseconds. LATENCY = 'latency' # IO size (total read/write): kilobytes. IO_SIZE = 'io_size' # Queue length for processing IO requests QUEUE_LENGTH = 'queue_length' # Average busy percentage AVG_BUSY_PERC = 'avg_busy_perc' # replication constants HPE3PAR_CPG_REMOTE = 'DestOpenStackCPG' HPE3PAR_CPG2_REMOTE = 'destfakepool' HPE3PAR_CPG_MAP = 'OpenStackCPG:DestOpenStackCPG fakepool:destfakepool' SYNC_MODE = 1 PERIODIC_MODE = 2 SYNC_PERIOD = 900 class Comment(object): def __init__(self, expected): self.expected = expected def __eq__(self, actual): return (dict(ast.literal_eval(actual)) == self.expected) class HPE3PARBaseDriver(object): VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6' CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000' VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db' VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111' VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222' VOLUME_NAME = 'volume-' + VOLUME_ID SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw' SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw' SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ' RCG_3PAR_NAME = 'rcg-0DM4qZEVSKON-DXN-N' CONSIS_GROUP_ID = '6044fedf-c889-4752-900f-2039d247a5df' CONSIS_GROUP_NAME = 'vvs-YET.38iJR1KQDyA50kel3w' SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47' SRC_CONSIS_GROUP_NAME = 'vvs-fX36AqxuSMuWr4oM0wCNRw' CGSNAPSHOT_ID = 'e91c5ed5-daee-4e84-8724-1c9e31e7a1f2' CGSNAPSHOT_BASE_NAME = 'oss-6Rxe1druToSHJByeMeeh8g' CLIENT_ID = "12345" REPLICATION_CLIENT_ID = "54321" REPLICATION_BACKEND_ID = 'target' # fake host on the 3par FAKE_HOST = 'fakehost' FAKE_CINDER_HOST = 'fakehost@foo#' + HPE3PAR_CPG USER_ID = '2689d9a913974c008b1d859013f23607' PROJECT_ID = 'fac88235b9d64685a3530f73e490348f' VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156' FAKE_DESC = 'test description name' FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, 'portWWN': '0987654321234', 'protocol': 1, 'mode': 2, 'linkState': 4}, {'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, 'portWWN': '123456789000987', 'protocol': 1, 'mode': 2, 'linkState': 4}] QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50', 'qos:minIOPS': '100', 'qos:minBWS': '25', 'qos:latency': '25', 'qos:priority': 'low'} QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'} VVS_NAME = "myvvs" FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}, 'protocol': 2, 'mode': 2, 'IPAddr': '1.1.1.2', 'iSCSIName': ('iqn.2000-05.com.3pardata:' '21810002ac00383d'), 'linkState': 4} volume = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} volume_src_cg = {'name': SRC_CG_VOLUME_NAME, 'id': SRC_CG_VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} volume_replicated = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'replication_status': 'disabled', 'provider_location': CLIENT_ID, 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': 'replicated', 'volume_type_id': VOLUME_TYPE_ID_REPLICATED} replication_targets = [{'backend_id': REPLICATION_BACKEND_ID, 'cpg_map': HPE3PAR_CPG_MAP, 'hpe3par_api_url': 'https://1.1.1.1/api/v1', 'hpe3par_username': HPE3PAR_USER_NAME, 'hpe3par_password': HPE3PAR_USER_PASS, 'san_ip': HPE3PAR_SAN_IP, 'san_login': HPE3PAR_USER_NAME, 'san_password': HPE3PAR_USER_PASS, 'san_ssh_port': HPE3PAR_SAN_SSH_PORT, 'ssh_conn_timeout': HPE3PAR_SAN_SSH_CON_TIMEOUT, 'san_private_key': HPE3PAR_SAN_SSH_PRIVATE}] list_rep_targets = [{'backend_id': 'target'}] volume_encrypted = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None, 'encryption_key_id': 'fake_key'} volume_dedup = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': 'dedup', 'volume_type_id': VOLUME_TYPE_ID_DEDUP} volume_pool = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(FAKE_HOST, HPE3PAR_CPG2), 'volume_type': None, 'volume_type_id': None} volume_qos = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': 'gold'} volume_flash_cache = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE} snapshot = {'name': SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'user_id': USER_ID, 'project_id': PROJECT_ID, 'volume_id': VOLUME_ID_SNAP, 'volume_name': VOLUME_NAME, 'status': 'creating', 'progress': '0%', 'volume_size': 2, 'display_name': 'fakesnap', 'display_description': FAKE_DESC, 'volume': volume} wwn = ["123456789012345", "123456789054321"] connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': [wwn[0], wwn[1]], 'wwnns': ["223456789012345", "223456789054321"], 'host': FAKE_HOST, 'multipath': False} connector_multipath_enabled = {'ip': '10.0.0.2', 'initiator': ('iqn.1993-08.org' '.debian:01:222'), 'wwpns': [wwn[0], wwn[1]], 'wwnns': ["223456789012345", "223456789054321"], 'host': FAKE_HOST, 'multipath': True} volume_type = {'name': 'gold', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'qos:maxIOPS': '1000', 'qos:maxBWS': '50', 'qos:minIOPS': '100', 'qos:minBWS': '25', 'qos:latency': '25', 'qos:priority': 'low'}, 'deleted_at': None, 'id': 'gold'} volume_type_replicated = {'name': 'replicated', 'deleted': False, 'updated_at': None, 'extra_specs': {'replication_enabled': ' True'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_REPLICATED} volume_type_dedup = {'name': 'dedup', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'provisioning': 'dedup'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_DEDUP} volume_type_flash_cache = {'name': 'flash-cache-on', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'hpe3par:flash_cache': 'true'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_FLASH_CACHE} flash_cache_3par_keys = {'flash_cache': 'true'} cpgs = [ {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, 'incrementMiB': 8192}, 'SAUsage': {'rawTotalMiB': 24576, 'rawUsedMiB': 768, 'totalMiB': 8192, 'usedMiB': 256}, 'SDGrowth': {'LDLayout': {'RAIDType': 4, 'diskPatterns': [{'diskType': 2}]}, 'incrementMiB': 32768}, 'SDUsage': {'rawTotalMiB': 49152, 'rawUsedMiB': 1023, 'totalMiB': 36864, 'usedMiB': 1024 * 1}, 'UsrUsage': {'rawTotalMiB': 57344, 'rawUsedMiB': 43349, 'totalMiB': 43008, 'usedMiB': 1024 * 20}, 'additionalStates': [], 'degradedStates': [], 'failedStates': [], 'id': 5, 'name': HPE3PAR_CPG, 'numFPVVs': 2, 'numTPVVs': 0, 'numTDVVs': 1, 'state': 1, 'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}] TASK_DONE = 1 TASK_ACTIVE = 2 STATUS_DONE = {'status': 1} STATUS_ACTIVE = {'status': 2} mock_client_conf = { 'PORT_MODE_TARGET': 2, 'PORT_STATE_READY': 4, 'PORT_PROTO_ISCSI': 2, 'PORT_PROTO_FC': 1, 'TASK_DONE': TASK_DONE, 'TASK_ACTIVE': TASK_ACTIVE, 'HOST_EDIT_ADD': 1, 'CHAP_INITIATOR': 1, 'CHAP_TARGET': 2, 'getPorts.return_value': { 'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT] } } RETYPE_VVS_NAME = "yourvvs" RETYPE_HOST = { u'host': u'mark-stack1@3parfc', u'capabilities': { 'QoS_support': True, u'location_info': u'HPE3PARDriver:1234567:MARK_TEST_CPG', u'timestamp': u'2014-06-04T19:03:32.485540', u'allocated_capacity_gb': 0, u'volume_backend_name': u'3parfc', u'free_capacity_gb': u'infinite', u'driver_version': u'3.0.0', u'total_capacity_gb': u'infinite', u'reserved_percentage': 0, u'vendor_name': u'Hewlett Packard Enterprise', u'storage_protocol': u'FC' } } RETYPE_HOST_NOT3PAR = { u'host': u'mark-stack1@3parfc', u'capabilities': { u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG', } } RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'high'} RETYPE_VOLUME_TYPE_ID = "FakeVolId" RETYPE_VOLUME_TYPE_0 = { 'name': 'red', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_1 = { 'name': 'white', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': VVS_NAME, 'qos': QOS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_2 = { 'name': 'blue', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_3 = { 'name': 'purple', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': False, 'tdvv': True, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_BAD_PERSONA = { 'name': 'bad_persona', 'id': 'any_id', 'extra_specs': { 'hpe3par:persona': '99 - invalid' } } RETYPE_VOLUME_TYPE_BAD_CPG = { 'name': 'bad_cpg', 'id': 'any_id', 'extra_specs': { 'cpg': 'bogus', 'snap_cpg': 'bogus', 'hpe3par:persona': '2 - Generic-ALUA' } } MANAGE_VOLUME_INFO = { 'userCPG': 'testUserCpg0', 'snapCPG': 'testSnapCpg0', 'provisioningType': 1, 'comment': "{'display_name': 'Foo Volume'}" } MV_INFO_WITH_NO_SNAPCPG = { 'userCPG': 'testUserCpg0', 'provisioningType': 1, 'comment': "{'display_name': 'Foo Volume'}" } RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}" RETYPE_VOLUME_INFO_0 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol0', 'size': 1, 'host': RETYPE_HOST, 'userCPG': 'testUserCpg0', 'snapCPG': 'testSnapCpg0', 'provisioningType': 1, 'comment': RETYPE_TEST_COMMENT } RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}" RETYPE_VOLUME_INFO_1 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol1', 'size': 1, 'host': RETYPE_HOST, 'userCPG': HPE3PAR_CPG, 'snapCPG': HPE3PAR_CPG_SNAP, 'provisioningType': 1, 'comment': RETYPE_TEST_COMMENT } RETYPE_TEST_COMMENT_2 = "{'retype_test': 'test comment 2'}" RETYPE_VOLUME_INFO_2 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol2', 'size': 1, 'host': RETYPE_HOST, 'userCPG': HPE3PAR_CPG, 'snapCPG': HPE3PAR_CPG_SNAP, 'provisioningType': 3, 'comment': RETYPE_TEST_COMMENT } # Test for when we don't get a snapCPG. RETYPE_VOLUME_INFO_NO_SNAP = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol2', 'size': 1, 'host': RETYPE_HOST, 'userCPG': 'testUserCpg2', 'provisioningType': 1, 'comment': '{}' } RETYPE_CONF = { 'TASK_ACTIVE': TASK_ACTIVE, 'TASK_DONE': TASK_DONE, 'getTask.return_value': STATUS_DONE, 'getStorageSystemInfo.return_value': {'id': CLIENT_ID, 'serialNumber': '1234567'}, 'getVolume.return_value': RETYPE_VOLUME_INFO_0, 'modifyVolume.return_value': ("anyResponse", {'taskid': 1}) } # 3PAR retype currently doesn't use the diff. Existing code and fresh info # from the array work better for the most part. Some use of the diff was # intentionally removed to make _retype more usable for other use cases. RETYPE_DIFF = None wsapi_version_312 = {'major': 1, 'build': 30102422, 'minor': 3, 'revision': 1} wsapi_version_for_dedup = {'major': 1, 'build': 30201120, 'minor': 4, 'revision': 1} wsapi_version_for_flash_cache = {'major': 1, 'build': 30201200, 'minor': 4, 'revision': 2} wsapi_version_for_remote_copy = {'major': 1, 'build': 30202290, 'minor': 5, 'revision': 0} # Use this to point to latest version of wsapi wsapi_version_latest = wsapi_version_for_remote_copy standard_login = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.setSSHOptions( HPE3PAR_SAN_IP, HPE3PAR_USER_NAME, HPE3PAR_USER_PASS, missing_key_policy='AutoAddPolicy', privatekey=HPE3PAR_SAN_SSH_PRIVATE, known_hosts_file=mock.ANY, port=HPE3PAR_SAN_SSH_PORT, conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT)] get_id_login = [ mock.call.getWsApiVersion(), mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.setSSHOptions( HPE3PAR_SAN_IP, HPE3PAR_USER_NAME, HPE3PAR_USER_PASS, missing_key_policy='AutoAddPolicy', privatekey=HPE3PAR_SAN_SSH_PRIVATE, known_hosts_file=mock.ANY, port=HPE3PAR_SAN_SSH_PORT, conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), mock.call.getStorageSystemInfo()] standard_logout = [ mock.call.logout()] class fake_consistencygroup_object(object): def __init__(self, cg_id='6044fedf-c889-4752-900f-2039d247a5df'): self.id = cg_id self.volume_type_id = '49fa96b5-828e-4653-b622-873a1b7e6f1c' self.name = 'cg_name' self.cgsnapshot_id = None self.host = 'fakehost@foo#OpenStackCPG' self.description = 'consistency group' class fake_cgsnapshot_object(object): def __init__(self, cgsnap_id='e91c5ed5-daee-4e84-8724-1c9e31e7a1f2'): self.id = cgsnap_id self.consistencygroup_id = '6044fedf-c889-4752-900f-2039d247a5df' self.description = 'cgsnapshot' self.readOnly = False def setup_configuration(self): configuration = mock.MagicMock() configuration.hpe3par_debug = False configuration.hpe3par_username = HPE3PAR_USER_NAME configuration.hpe3par_password = HPE3PAR_USER_PASS configuration.hpe3par_api_url = 'https://1.1.1.1/api/v1' configuration.hpe3par_cpg = [HPE3PAR_CPG, HPE3PAR_CPG2] configuration.hpe3par_cpg_snap = HPE3PAR_CPG_SNAP configuration.iscsi_ip_address = '1.1.1.2' configuration.iscsi_port = '1234' configuration.san_ip = HPE3PAR_SAN_IP configuration.san_login = HPE3PAR_USER_NAME configuration.san_password = HPE3PAR_USER_PASS configuration.san_ssh_port = HPE3PAR_SAN_SSH_PORT configuration.ssh_conn_timeout = HPE3PAR_SAN_SSH_CON_TIMEOUT configuration.san_private_key = HPE3PAR_SAN_SSH_PRIVATE configuration.hpe3par_snapshot_expiration = "" configuration.hpe3par_snapshot_retention = "" configuration.hpe3par_iscsi_ips = [] configuration.hpe3par_iscsi_chap_enabled = False configuration.goodness_function = GOODNESS_FUNCTION configuration.filter_function = FILTER_FUNCTION configuration.image_volume_cache_enabled = False configuration.replication_device = None return configuration @mock.patch( 'hpe3parclient.client.HPE3ParClient', spec=True, ) def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None): _m_client = _m_client.return_value # Configure the base constants, defaults etc... _m_client.configure_mock(**self.mock_client_conf) _m_client.getWsApiVersion.return_value = self.wsapi_version_latest # If m_conf, drop those over the top of the base_conf. if m_conf is not None: _m_client.configure_mock(**m_conf) if conf is None: conf = self.setup_configuration() self.driver = driver(configuration=conf) self.driver.do_setup(None) return _m_client @mock.patch('hpe3parclient.version', "3.0.9") def test_unsupported_client_version(self): self.assertRaises(exception.InvalidInput, self.setup_driver) def test_ssh_options(self): expected_hosts_key_file = "test_hosts_key_file" orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy CONF.ssh_hosts_key_file = expected_hosts_key_file CONF.strict_ssh_host_key_policy = False self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( driver=hpefcdriver.HPE3PARFCDriver) CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy expected = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.setSSHOptions( HPE3PAR_SAN_IP, HPE3PAR_USER_NAME, HPE3PAR_USER_PASS, privatekey=HPE3PAR_SAN_SSH_PRIVATE, known_hosts_file=expected_hosts_key_file, missing_key_policy="AutoAddPolicy", port=HPE3PAR_SAN_SSH_PORT, conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] mock_client.assert_has_calls( expected + self.standard_logout) def test_ssh_options_strict(self): expected_hosts_key_file = "test_hosts_key_file" orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy CONF.ssh_hosts_key_file = expected_hosts_key_file CONF.strict_ssh_host_key_policy = True self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( driver=hpefcdriver.HPE3PARFCDriver) CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy expected = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.setSSHOptions( HPE3PAR_SAN_IP, HPE3PAR_USER_NAME, HPE3PAR_USER_PASS, privatekey=HPE3PAR_SAN_SSH_PRIVATE, known_hosts_file=expected_hosts_key_file, missing_key_policy="RejectPolicy", port=HPE3PAR_SAN_SSH_PORT, conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected + self.standard_logout) def test_task_waiter(self): task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE] def side_effect(*args): return task_statuses and task_statuses.pop(0) or self.STATUS_DONE conf = {'getTask.side_effect': side_effect} mock_client = self.setup_driver(mock_conf=conf) task_id = 1234 interval = .001 with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() waiter = common.TaskWaiter(mock_client, task_id, interval) status = waiter.wait_for_task() expected = [ mock.call.getTask(task_id), mock.call.getTask(task_id), mock.call.getTask(task_id) ] mock_client.assert_has_calls(expected) self.assertEqual(self.STATUS_DONE, status) def test_create_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_volume(self.volume) comment = Comment({ "display_name": "Foo Volume", "type": "OpenStack", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_create_volume_in_pool(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_pool) comment = Comment({ "display_name": "Foo Volume", "type": "OpenStack", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG2, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_unsupported_dedup_volume_type(self, _mock_volume_types): mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) _mock_volume_types.return_value = { 'name': 'dedup', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'provisioning': 'dedup', 'volume_type': self.volume_type_dedup}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.InvalidInput, common.get_volume_settings_from_type_id, self.VOLUME_TYPE_ID_DEDUP, "mock") @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type(self, _mock_volume_types): mock_client = self.setup_driver() expected_type_snap_cpg = "type_snap_cpg" _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': expected_type_snap_cpg, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_type_snap_cpg, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types): mock_client = self.setup_driver() expected_cpg = 'use_extra_specs_cpg' _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': expected_cpg, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(self.driver.configuration.hpe3par_cpg_snap, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_conf_snap_cpg( self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'volume_type': self.volume_type}} conf = self.setup_configuration() expected_snap_cpg = conf.hpe3par_cpg_snap mock_client = self.setup_driver(config=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_snap_cpg, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_conf_cpg( self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'volume_type': self.volume_type}} conf = self.setup_configuration() conf.hpe3par_cpg_snap = None expected_cpg = conf.hpe3par_cpg mock_client = self.setup_driver(config=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_cpg, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_qos(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_qos) comment = Comment({ "volume_type_name": "gold", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "gold", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_periodic(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client return_model = self.driver.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': PERIODIC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_NAME, {'targets': [{'syncPeriod': SYNC_PERIOD, 'targetName': backend_id}]}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual({'replication_status': 'enabled', 'provider_location': self.CLIENT_ID}, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated_failedover(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.return_value = ( {'targets': [{'targetName': 'tgt'}]}) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = self.volume_replicated.copy() volume['replication_status'] = 'failed-over' self.driver.delete_volume(volume) rcg_name = self.RCG_3PAR_NAME + ".r" + self.CLIENT_ID expected = [ mock.call.getRemoteCopyGroup(rcg_name), mock.call.toggleRemoteCopyConfigMirror( 'tgt', mirror_config=False), mock.call.stopRemoteCopy(rcg_name), mock.call.removeVolumeFromRemoteCopyGroup( rcg_name, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(rcg_name), mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.toggleRemoteCopyConfigMirror( 'tgt', mirror_config=True)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_sync(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client return_model = self.driver.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': SYNC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual({'replication_status': 'enabled', 'provider_location': self.CLIENT_ID}, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_dedup(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'dedup', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'provisioning': 'dedup', 'volume_type': self.volume_type_dedup}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_dedup) comment = Comment({ "volume_type_name": "dedup", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "d03338a9-9115-48a3-8dfc-11111111111", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': False, 'tdvv': True, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_flash_cache(self, _mock_volume_types): # Setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} _mock_volume_types.return_value = { 'name': 'flash-cache-on', 'extra_specs': { 'cpg': HPE3PAR_CPG2, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'hpe3par:flash_cache': 'true', 'volume_type': self.volume_type_flash_cache}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED return_model = self.driver.create_volume(self.volume_flash_cache) comment = Comment({ "volume_type_name": "flash-cache-on", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), mock.call.createQoSRules( 'vvs-0DM4qZEVSKON-DXN-NwVpw', {'priority': 2} ), mock.call.modifyVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1), mock.call.addVolumeToVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', 'osv-0DM4qZEVSKON-DXN-NwVpw')] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_unsupported_flash_cache_volume(self, _mock_volume_types): mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) _mock_volume_types.return_value = { 'name': 'flash-cache-on', 'extra_specs': { 'cpg': HPE3PAR_CPG2, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'hpe3par:flash_cache': 'true', 'volume_type': self.volume_type_flash_cache}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.InvalidInput, common.get_flash_cache_policy, self.flash_cache_3par_keys) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_not_3par(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidHost, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST_NOT3PAR) expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_volume_not_found(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPNotFound, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types): _mock_volume_types.side_effect = [ self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0] mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0 # Fail the QOS setting to test the revert of the snap CPG rename. mock_client.addVolumeToVolumeSet.side_effect = \ hpeexceptions.HTTPForbidden with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.retype, self.ctxt, {'id': self.VOLUME_ID}, self.RETYPE_VOLUME_TYPE_0, self.RETYPE_DIFF, self.RETYPE_HOST) old_settings = { 'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'], 'comment': self.RETYPE_VOLUME_INFO_0['comment']} new_settings = { 'snapCPG': ( self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']), 'comment': mock.ANY} expected = [ mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings) ] mock_client.assert_has_calls(expected) expected = [ mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings) ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_revert_comment(self, _mock_volume_types): _mock_volume_types.side_effect = [ self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1] mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1 # Fail the QOS setting to test the revert of the snap CPG rename. mock_client.deleteVolumeSet.side_effect = hpeexceptions.HTTPForbidden with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.retype, self.ctxt, {'id': self.VOLUME_ID}, self.RETYPE_VOLUME_TYPE_2, self.RETYPE_DIFF, self.RETYPE_HOST) original = { 'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'], 'comment': self.RETYPE_VOLUME_INFO_1['comment']} expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_different_array(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': 'XXXXXXX'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidHost, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo()] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_across_cpg_domains(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getCPG.side_effect = [ {'domain': 'domain1'}, {'domain': 'domain2'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.Invalid3PARDomain, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo(), mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_across_snap_cpg_domains(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getCPG.side_effect = [ {'domain': 'cpg_domain'}, {'domain': 'cpg_domain'}, {'domain': 'snap_cpg_domain_1'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.Invalid3PARDomain, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo(), mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_to_bad_persona(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_BAD_PERSONA, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_tune(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) volume = {'id': HPE3PARBaseDriver.CLONE_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client retyped = self.driver.retype( self.ctxt, volume, type_ref, None, self.RETYPE_HOST) self.assertTrue(retyped) expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', {'comment': mock.ANY, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'), mock.call.addVolumeToVolumeSet('myvvs', 'osv-0DM4qZEVSKON-AAAAAAAAA'), mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', {'action': 6, 'userCPG': 'OpenStackCPG', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(1) ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_qos_spec(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) cpg = "any_cpg" snap_cpg = "any_cpg" with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() common._retype(self.volume, HPE3PARBaseDriver.VOLUME_3PAR_NAME, "old_type", "old_type_id", HPE3PARBaseDriver.RETYPE_HOST, None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, None, None, "{}") expected = [ mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), mock.call.createQoSRules( 'vvs-0DM4qZEVSKON-DXN-NwVpw', {'ioMinGoal': 100, 'ioMaxLimit': 1000, 'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200, 'priority': 3, 'latencyGoal': 25} ), mock.call.addVolumeToVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', 'osv-0DM4qZEVSKON-DXN-NwVpw')] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_dedup(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_3 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) cpg = "any_cpg" snap_cpg = "any_cpg" with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() common._retype(self.volume, HPE3PARBaseDriver.VOLUME_3PAR_NAME, "old_type", "old_type_id", HPE3PARBaseDriver.RETYPE_HOST, None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, None, None, "{}") expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', {'action': 6, 'userCPG': 'any_cpg', 'conversionOperation': 3, 'tuneOperation': 1}), mock.call.getTask(1)] mock_client.assert_has_calls(expected) def test_delete_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_volume(self.volume) expected = [mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_volume(self.volume_replicated) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_cloned_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2), 'source_volid': HPE3PARBaseDriver.VOLUME_ID} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(src_vref['id']) # snapshot name is random snap_name = mock.ANY optional = mock.ANY expected = [ mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, 'osv-0DM4qZEVSKON-AAAAAAAAA', HPE3PAR_CPG2, {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_create_cloned_volume_offline_copy(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 5, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2), 'source_volid': HPE3PARBaseDriver.VOLUME_ID} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) src_vol_name = common._get_3par_vol_name(src_vref['id']) optional = {'priority': 1} comment = mock.ANY expected = [ mock.call.createVolume(vol_name, 'fakepool', 5120, comment), mock.call.copyVolume( src_vol_name, vol_name, None, optional=optional), mock.call.getTask(task_id), ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_qos_volume(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client src_vref = {'id': HPE3PARBaseDriver.CLONE_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2} volume = self.volume_qos.copy() host = "TEST_HOST" pool = "TEST_POOL" volume_host = volume_utils.append_host(host, pool) expected_cpg = pool volume['id'] = HPE3PARBaseDriver.VOLUME_ID volume['host'] = volume_host volume['source_volid'] = HPE3PARBaseDriver.CLONE_ID model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) # creation of the temp snapshot common = hpecommon.HPE3PARCommon(None) snap_name = mock.ANY vol_name = common._get_3par_vol_name(src_vref['id']) optional = mock.ANY expected = [ mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.getCPG(expected_cpg), mock.call.copyVolume( snap_name, self.VOLUME_3PAR_NAME, expected_cpg, {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_migrate_volume(self): conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1#CPG-FC1', 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par comment = Comment({"qos": {}, "display_name": "Foo Volume"}) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': comment, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.modifyVolume(osv_matcher, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(mock.ANY) ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_with_type(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE display_name = 'Foo Volume' volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': display_name, "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' instance_host = 'stack@3parfc1#CPG-FC1' host = {'host': instance_host, 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) # when the host and pool are the same we'll get None self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par expected_comment = Comment({ "display_name": display_name, "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], "volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'], "vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs'] }) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': expected_comment, 'snapCPG': self.RETYPE_VOLUME_TYPE_2 ['extra_specs']['snap_cpg']}), mock.call.modifyVolume( osv_matcher, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(mock.ANY) ] mock_client.assert_has_calls( expected + self.standard_logout) def test_migrate_volume_diff_host(self): conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': 'different'}, } mock_client = self.setup_driver(mock_conf=conf) volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((False, None), result) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_diff_domain(self, _mock_volume_types): _mock_volume_types.return_value = self.volume_type conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1#CPG-FC1', 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par comment = Comment({"qos": {}, "display_name": "Foo Volume"}) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': comment, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.modifyVolume(osv_matcher, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(mock.ANY), ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_attached(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'volume_type_id': None, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'status': 'in-use', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par loc_info = 'HPE3PARDriver:1234567:CPG-FC1' protocol = "FC" if self.properties['driver_volume_type'] == "iscsi": protocol = "iSCSI" host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info, 'storage_protocol': protocol}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) new_comment = Comment({ "qos": {}, "retype_test": "test comment", }) expected = [ mock.call.modifyVolume(osv_matcher, {'comment': new_comment, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.modifyVolume(osv_matcher, {'action': 6, 'userCPG': 'OpenStackCPG', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(1), mock.call.logout() ] mock_client.assert_has_calls(expected) self.assertIsNotNone(result) self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}), result) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) protocol = "OTHER" volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'volume_type_id': None, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'status': 'in-use', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} loc_info = 'HPE3PARDriver:1234567:CPG-FC1' host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info, 'storage_protocol': protocol}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((False, None), result) expected = [] mock_client.assert_has_calls(expected) def test_update_migrated_volume(self): mock_client = self.setup_driver() fake_old_volume = {'id': self.VOLUME_ID} provider_location = 'foo' fake_new_volume = {'id': self.CLONE_ID, '_name_id': self.CLONE_ID, 'provider_location': provider_location} original_volume_status = 'available' with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': None, 'provider_location': None} self.assertEqual(expected_update, actual_update) def test_update_migrated_volume_attached(self): mock_client = self.setup_driver() fake_old_volume = {'id': self.VOLUME_ID} provider_location = 'foo' fake_new_volume = {'id': self.CLONE_ID, '_name_id': self.CLONE_ID, 'provider_location': provider_location} original_volume_status = 'in-use' with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': fake_new_volume['_name_id'], 'provider_location': provider_location} self.assertEqual(expected_update, actual_update) def test_attach_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.attach_volume(context.get_admin_context(), self.volume, 'abcdef', 'newhost', '/dev/vdb') expected = [ mock.call.setVolumeMetaData( self.VOLUME_3PAR_NAME, 'HPQ-CS-instance_uuid', 'abcdef')] mock_client.assert_has_calls(expected) # test the exception mock_client.setVolumeMetaData.side_effect = Exception('Custom ex') self.assertRaises(exception.CinderException, self.driver.attach_volume, context.get_admin_context(), self.volume, 'abcdef', 'newhost', '/dev/vdb') def test_detach_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.detach_volume(context.get_admin_context(), self.volume, None) expected = [ mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, 'HPQ-CS-instance_uuid')] mock_client.assert_has_calls(expected) def test_create_snapshot(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_snapshot(self.snapshot) comment = Comment({ "volume_id": "761fc5e5-5191-4ec7-aeba-33e36de44156", "display_name": "fakesnap", "description": "test description name", "volume_name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( 'oss-L4I73ONuTci9Fd4ceij-MQ', 'osv-dh-F5VGRTseuujPjbeRBVg', { 'comment': comment, 'readOnly': True})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_delete_snapshot(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_snapshot(self.snapshot) expected = [ mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_delete_snapshot_in_use(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_snapshot(self.snapshot) self.driver.create_volume_from_snapshot(self.volume, self.snapshot) ex = hpeexceptions.HTTPConflict("In use") mock_client.deleteVolume = mock.Mock(side_effect=ex) # Deleting the snapshot that a volume is dependent on should fail self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) def test_delete_snapshot_not_found(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_snapshot(self.snapshot) try: ex = hpeexceptions.HTTPNotFound("not found") mock_client.deleteVolume = mock.Mock(side_effect=ex) self.driver.delete_snapshot(self.snapshot) except Exception: self.fail("Deleting a snapshot that is missing should act " "as if it worked.") def test_create_volume_from_snapshot(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client model_update = self.driver.create_volume_from_snapshot( self.volume, self.snapshot) self.assertEqual({}, model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) volume = self.volume.copy() volume['size'] = 1 self.assertRaises(exception.InvalidInput, self.driver.create_volume_from_snapshot, volume, self.snapshot) def test_create_volume_from_snapshot_and_extend(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_and_extend_with_qos( self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_qos.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getCPG(HPE3PAR_CPG), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_create_volume_from_snapshot_and_extend_copy_fail(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 4, 'failure message': 'out of disk space'}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = self.volume.copy() volume['size'] = self.volume['size'] + 10 self.assertRaises(exception.CinderException, self.driver.create_volume_from_snapshot, volume, self.snapshot) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_qos(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} self.driver.create_volume_from_snapshot( self.volume_qos, self.snapshot) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) volume = self.volume.copy() volume['size'] = 1 self.assertRaises(exception.InvalidInput, self.driver.create_volume_from_snapshot, volume, self.snapshot) def test_terminate_connection(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection( self.volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_USER_KEY), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_update_volume_key_value_pair(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() key = 'a' value = 'b' with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() common.update_volume_key_value_pair( self.volume, key, value) expected = [ mock.call.setVolumeMetaData(self.VOLUME_3PAR_NAME, key, value)] mock_client.assert_has_calls(expected) # check exception mock_client.setVolumeMetaData.side_effect = Exception('fake') self.assertRaises(exception.VolumeBackendAPIException, common.update_volume_key_value_pair, self.volume, None, 'b') def test_clear_volume_key_value_pair(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client key = 'a' common = self.driver._login() common.clear_volume_key_value_pair(self.volume, key) expected = [ mock.call.removeVolumeMetaData(self.VOLUME_3PAR_NAME, key)] mock_client.assert_has_calls(expected) def test_extend_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.driver.extend_volume(self.volume, str(new_size)) growth_size_mib = grow_size * units.Ki expected = [ mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)] mock_client.assert_has_calls(expected) def test_extend_volume_non_base(self): extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) conf = { 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {}, # Throw an exception first time only 'growVolume.side_effect': [extend_ex, None], } mock_client = self.setup_driver(mock_conf=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.driver.extend_volume(self.volume, str(new_size)) self.assertEqual(2, mock_client.growVolume.call_count) def test_extend_volume_non_base_failure(self): extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) conf = { 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {}, # Always fail 'growVolume.side_effect': extend_ex } mock_client = self.setup_driver(mock_conf=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.extend_volume, self.volume, str(new_size)) @mock.patch.object(volume_types, 'get_volume_type') def test_extend_volume_replicated(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # extending a replicated volume type. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume_replicated['size'] new_size = old_size + grow_size # Test a successful extend. self.driver.extend_volume( self.volume_replicated, new_size) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) # Test an unsuccessful extend. growVolume will fail but remote # copy should still be started again. mock_client.growVolume.side_effect = ( hpeexceptions.HTTPForbidden("Error: The volume cannot be " "extended.")) self.assertRaises( hpeexceptions.HTTPForbidden, self.driver.extend_volume, self.volume_replicated, new_size) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_get_ports(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getPorts.return_value = { 'members': [ {'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.120.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}, {'portWWN': '20210002AC00383D', 'protocol': 1, 'linkState': 4, 'mode': 2, 'device': ['cage2'], 'nodeWWN': '20210002AC00383D', 'type': 2, 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ports = common.get_ports()['members'] self.assertEqual(3, len(ports)) def test_get_by_qos_spec_with_scoping(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'}, qos) def test_get_by_qos_spec(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() qos_ref = qos_specs.create( self.ctxt, 'qos-specs-1', self.QOS_SPECS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'}, qos) def test_get_by_qos_by_type_only(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '100', 'maxBWS': '50', 'minIOPS': '10', 'minBWS': '20', 'latency': '5', 'priority': 'high'}, qos) def test_create_vlun(self): host = 'fake-host' lun_id = 11 nsp = '1:2:3' mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host, 'nsp': nsp}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host, 'nsp': nsp} common = self.driver._login() vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, nsp) self.assertEqual(expected_info, vlun_info) location = ("%(name)s,%(lunid)s,%(host)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host} vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, None) self.assertEqual(expected_info, vlun_info) def test__get_existing_volume_ref_name(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) ums_matcher = common._get_3par_ums_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} result = common._get_existing_volume_ref_name(existing_ref) self.assertEqual(unm_matcher, result) existing_ref = {'source-id': self.volume['id']} result = common._get_existing_volume_ref_name(existing_ref) self.assertEqual(unm_matcher, result) existing_ref = {'source-id': self.volume['id']} result = common._get_existing_volume_ref_name(existing_ref, True) self.assertEqual(ums_matcher, result) existing_ref = {'bad-key': 'foo'} self.assertRaises( exception.ManageExistingInvalidReference, common._get_existing_volume_ref_name, existing_ref) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing(self, _mock_volume_types): _mock_volume_types.return_value = self.volume_type mock_client = self.setup_driver() new_comment = Comment({ "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", "type": "OpenStack", }) volume = {'display_name': None, 'host': self.FAKE_CINDER_HOST, 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) vvs_matcher = common._get_3par_vvs_name(volume['id']) existing_ref = {'source-name': unm_matcher} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment}), ] retype_comment_qos = Comment({ "display_name": "Foo Volume", "volume_type_name": self.volume_type['name'], "volume_type_id": self.volume_type['id'], "qos": { 'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low' } }) expected_snap_cpg = HPE3PAR_CPG_SNAP expected_retype_modify = [ mock.call.modifyVolume(osv_matcher, {'comment': retype_comment_qos, 'snapCPG': expected_snap_cpg}), mock.call.deleteVolumeSet(vvs_matcher), ] expected_retype_specs = [ mock.call.createVolumeSet(vvs_matcher, None), mock.call.createQoSRules( vvs_matcher, {'ioMinGoal': 100, 'ioMaxLimit': 1000, 'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25, 'bwMaxLimitKB': 51200}), mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher), mock.call.modifyVolume( osv_matcher, {'action': 6, 'userCPG': HPE3PAR_CPG, 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(1) ] mock_client.assert_has_calls(self.standard_login + expected_manage) mock_client.assert_has_calls(expected_retype_modify) mock_client.assert_has_calls( expected_retype_specs + self.standard_logout) self.assertEqual(expected_obj, obj) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types): _mock_volume_types.return_value = self.volume_type mock_client = self.setup_driver() new_comment = Comment({ "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", "type": "OpenStack", }) volume = {'display_name': None, 'host': 'my-stack1@3parxxx#CPGNOTUSED', 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MV_INFO_WITH_NO_SNAPCPG mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume( existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls(self.standard_login + expected_manage) self.assertEqual(expected_obj, obj) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_vvs(self, _mock_volume_types): test_volume_type = self.RETYPE_VOLUME_TYPE_2 vvs = test_volume_type['extra_specs']['vvs'] _mock_volume_types.return_value = test_volume_type mock_client = self.setup_driver() mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE id = '007abcde-7579-40bc-8f90-a20b3902283e' new_comment = Comment({ "display_name": "Test Volume", "name": ("volume-%s" % id), "volume_id": id, "type": "OpenStack", }) volume = {'display_name': 'Test Volume', 'host': 'my-stack1@3parxxx#CPGNOTUSED', 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': id} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) vvs_matcher = common._get_3par_vvs_name(volume['id']) existing_ref = {'source-name': unm_matcher} obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Test Volume'} expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment}) ] retype_comment_vvs = Comment({ "display_name": "Foo Volume", "volume_type_name": test_volume_type['name'], "volume_type_id": test_volume_type['id'], "vvs": vvs }) expected_retype = [ mock.call.modifyVolume(osv_matcher, {'comment': retype_comment_vvs, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.deleteVolumeSet(vvs_matcher), mock.call.addVolumeToVolumeSet(vvs, osv_matcher), mock.call.modifyVolume(osv_matcher, {'action': 6, 'userCPG': 'CPGNOTUSED', 'conversionOperation': 1, 'tuneOperation': 1}), mock.call.getTask(1) ] mock_client.assert_has_calls(self.standard_login + expected_manage) mock_client.assert_has_calls( expected_retype + self.standard_logout) self.assertEqual(expected_obj, obj) def test_manage_existing_no_volume_type(self): mock_client = self.setup_driver() comment = repr({"display_name": "Foo Volume"}) new_comment = Comment({ "type": "OpenStack", "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", }) volume = {'display_name': None, 'volume_type': None, 'volume_type_id': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = {'comment': comment, 'userCPG': 'testUserCpg0'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Foo Volume'} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_obj, obj) volume['display_name'] = 'Test Volume' obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Test Volume'} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_obj, obj) mock_client.getVolume.return_value = {'userCPG': 'testUserCpg0'} volume['display_name'] = None common = self.driver._login() obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': None} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_obj, obj) def test_manage_existing_invalid_input(self): mock_client = self.setup_driver() volume = {'display_name': None, 'volume_type': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_manage_existing_volume_type_exception(self): mock_client = self.setup_driver() comment = repr({"display_name": "Foo Volume"}) volume = {'display_name': None, 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = {'comment': comment} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_retype_exception(self, _mock_volume_types): mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} volume = {'display_name': None, 'host': 'stack1@3pariscsi#POOL1', 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE mock_client.getCPG.side_effect = [ {'domain': 'domain1'}, {'domain': 'domain2'}, {'domain': 'domain3'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.Invalid3PARDomain, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [ mock.call.getVolume(unm_matcher), mock.call.modifyVolume( unm_matcher, { 'newName': osv_matcher, 'comment': mock.ANY}), mock.call.getCPG('POOL1'), mock.call.getVolume(osv_matcher), mock.call.getCPG('testUserCpg0'), mock.call.getCPG('POOL1'), mock.call.modifyVolume( osv_matcher, {'newName': unm_matcher, 'comment': self.MANAGE_VOLUME_INFO ['comment']}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_manage_existing_snapshot(self): mock_client = self.setup_driver() new_comment = Comment({ "display_name": "snap", "volume_name": self.VOLUME_NAME, "volume_id": self.VOLUME_ID, "description": "", }) volume = {'id': self.VOLUME_ID} snapshot = { 'display_name': None, 'id': self.SNAPSHOT_ID, 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': self.VOLUME_NAME_3PAR, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() oss_matcher = common._get_3par_snap_name(snapshot['id']) ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} expected_obj = {'display_name': 'snap'} obj = self.driver.manage_existing_snapshot(snapshot, existing_ref) expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': oss_matcher, 'comment': new_comment}), ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_obj, obj) def test_manage_existing_snapshot_invalid_parent(self): mock_client = self.setup_driver() volume = {'id': self.VOLUME_ID} snapshot = { 'display_name': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': 'fake-invalid', } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot, snapshot=snapshot, existing_ref=existing_ref) expected = [ mock.call.getVolume(existing_ref['source-name']), ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_manage_existing_snapshot_failed_over_volume(self): mock_client = self.setup_driver() volume = { 'id': self.VOLUME_ID, 'replication_status': 'failed-over', } snapshot = { 'display_name': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': self.VOLUME_NAME_3PAR, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot, snapshot=snapshot, existing_ref=existing_ref) def test_manage_existing_get_size(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = {'sizeMiB': 2048} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) volume = {} existing_ref = {'source-name': unm_matcher} size = self.driver.manage_existing_get_size(volume, existing_ref) expected_size = 2 expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_size, size) def test_manage_existing_get_size_invalid_reference(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {} existing_ref = {'source-name': self.VOLUME_3PAR_NAME} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) mock_client.assert_has_calls( self.standard_login + self.standard_logout) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) mock_client.assert_has_calls( self.standard_login + self.standard_logout) def test_manage_existing_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) volume = {} existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_manage_existing_snapshot_get_size(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = {'sizeMiB': 2048} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(self.snapshot['id']) snapshot = {} existing_ref = {'source-name': ums_matcher} size = self.driver.manage_existing_snapshot_get_size(snapshot, existing_ref) expected_size = 2 expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_size, size) def test_manage_existing_snapshot_get_size_invalid_reference(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client snapshot = {} existing_ref = {'source-name': self.SNAPSHOT_3PAR_NAME} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) mock_client.assert_has_calls( self.standard_login + self.standard_logout) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) mock_client.assert_has_calls( self.standard_login + self.standard_logout) def test_manage_existing_snapshot_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(self.snapshot['id']) snapshot = {} existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_unmanage(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.unmanage(self.volume) osv_matcher = common._get_3par_vol_name(self.volume['id']) unm_matcher = common._get_3par_unm_name(self.volume['id']) expected = [ mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_unmanage_snapshot(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.unmanage_snapshot(self.snapshot) oss_matcher = common._get_3par_snap_name(self.snapshot['id']) ums_matcher = common._get_3par_ums_name(self.snapshot['id']) expected = [ mock.call.modifyVolume(oss_matcher, {'newName': ums_matcher}) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_unmanage_snapshot_failed_over_volume(self): mock_client = self.setup_driver() volume = {'replication_status': 'failed-over', } snapshot = {'id': self.SNAPSHOT_ID, 'display_name': 'fake_snap', 'volume': volume, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.SnapshotIsBusy, self.driver.unmanage_snapshot, snapshot=snapshot) def test__safe_hostname(self): long_hostname = "abc123abc123abc123abc123abc123abc123" fixed_hostname = "abc123abc123abc123abc123abc123a" common = hpecommon.HPE3PARCommon(None) safe_host = common._safe_hostname(long_hostname) self.assertEqual(fixed_hostname, safe_host) def test_create_consistency_group(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group', }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_consistency_group_from_src(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume cgsnap_comment = Comment({ "consistency_group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "cgsnapshot", "cgsnapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2", }) cgsnap_optional = ( {'comment': cgsnap_comment, 'readOnly': False}) cg_comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # create a snapshot of the consistency group cgsnapshot = self.fake_cgsnapshot_object() self.driver.create_cgsnapshot(context.get_admin_context(), cgsnapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=cgsnap_optional)] # create a consistency group from the cgsnapshot self.driver.create_consistencygroup_from_src( context.get_admin_context(), group, [volume], cgsnapshot=cgsnapshot, snapshots=[self.snapshot]) mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_consistency_group_from_src_cg(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume source_volume = self.volume_src_cg cgsnap_optional = ( {'expirationHours': 1}) cg_comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} group = self.fake_consistencygroup_object() source_group = self.fake_consistencygroup_object( cg_id=self.SRC_CONSIS_GROUP_ID) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment), mock.call.createSnapshotOfVolumeSet( mock.ANY, self.SRC_CONSIS_GROUP_NAME, optional=cgsnap_optional), mock.call.copyVolume( mock.ANY, self.VOLUME_NAME_3PAR, HPE3PAR_CPG, {'snapCPG': HPE3PAR_CPG, 'online': True}), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] # Create a consistency group from a source consistency group. self.driver.create_consistencygroup_from_src( context.get_admin_context(), group, [volume], source_cg=source_group, source_vols=[source_volume]) mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_delete_consistency_group(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # remove the consistency group group.status = fields.ConsistencyGroupStatus.DELETING self.driver.delete_consistencygroup(context.get_admin_context(), group, []) expected = [ mock.call.deleteVolumeSet( self.CONSIS_GROUP_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_update_consistency_group_add_vol(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_update_consistency_group_remove_vol(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # remove the volume from the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[], remove_volumes=[volume]) expected = [ mock.call.removeVolumeFromVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_cgsnapshot(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume cg_comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) cgsnap_comment = Comment({ "consistency_group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "cgsnapshot", "cgsnapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) cgsnap_optional = ( {'comment': cgsnap_comment, 'readOnly': False}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # create a snapshot of the consistency group cgsnapshot = self.fake_cgsnapshot_object() self.driver.create_cgsnapshot(context.get_admin_context(), cgsnapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=cgsnap_optional)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_delete_cgsnapshot(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.volume cgsnapshot = self.fake_cgsnapshot_object() cg_comment = Comment({ 'display_name': 'cg_name', 'consistency_group_id': self.CONSIS_GROUP_ID, 'description': 'consistency group'}) cgsnap_comment = Comment({ "consistency_group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "cgsnapshot", "cgsnapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) cgsnap_optional = {'comment': cgsnap_comment, 'readOnly': False} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_consistencygroup_object() self.driver.create_consistencygroup(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_consistencygroup(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) mock_client.reset_mock() # create a snapshot of the consistency group self.driver.create_cgsnapshot(context.get_admin_context(), cgsnapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=cgsnap_optional)] # delete the snapshot of the consistency group cgsnapshot.status = 'deleting' self.driver.delete_cgsnapshot(context.get_admin_context(), cgsnapshot, []) mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_failover_host(self, _mock_volume_types): # periodic vs. sync is not relevant when conducting a failover. We # will just use periodic. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client valid_backend_id = ( self.replication_targets[0]['backend_id']) invalid_backend_id = 'INVALID' volumes = [self.volume_replicated] # Test invalid secondary target. self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), volumes, invalid_backend_id) # Test no secondary target. self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), volumes, None) # Test a successful failover. expected_model = (self.REPLICATION_BACKEND_ID, [{'updates': {'replication_status': 'failed-over'}, 'volume_id': self.VOLUME_ID}]) return_model = self.driver.failover_host( context.get_admin_context(), volumes, valid_backend_id) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_ready(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # failing back a volume. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client # Test a successful fail-back. volume = self.volume_replicated.copy() volume['replication_status'] = 'failed-over' return_model = self.driver.failover_host( context.get_admin_context(), [volume], 'default') expected_model = (None, [{'updates': {'replication_status': 'available'}, 'volume_id': self.VOLUME_ID}]) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_not_ready(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # failing back a volume. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_client.getRemoteCopyGroup.side_effect = ( exception.VolumeBackendAPIException( "Error: Remote Copy Group not Ready.")) mock_replication_client.return_value = mock_replicated_client # Test an unsuccessful fail-back. volume = self.volume_replicated.copy() volume['replication_status'] = 'failed-over' self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), [volume], 'default') class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase): properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234', '123456789000987'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987'], }}} def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpefcdriver.HPE3PARFCDriver) if wsapi_version: mock_client.getWsApiVersion.return_value = ( wsapi_version) else: mock_client.getWsApiVersion.return_value = ( self.wsapi_version_latest) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() return mock_client def test_initialize_connection(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 0, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictMatch(self.properties, result) @mock.patch('cinder.zonemanager.utils.create_lookup_service') def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client class fake_lookup_object(object): def get_device_mapping_from_network(self, connector, target_wwns): fake_map = { 'FAB_1': { 'target_port_wwn_list': ['0987654321234'], 'initiator_port_wwn_list': ['123456789012345'] } } return fake_map mock_lookup.return_value = fake_lookup_object() mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 90, 'type': 0, 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}, }]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': [self.wwn[0]], 'wwnns': ["223456789012345"], 'host': self.FAKE_HOST} expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234'] }}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection(self.volume, connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.ANY, mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts(), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos={'node': 7, 'slot': 1, 'cardPort': 1}, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictMatch(expected_properties, result) def test_initialize_connection_encrypted(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 0, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume_encrypted, self.connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) expected_properties = self.properties expected_properties['data']['encrypted'] = True self.assertDictMatch(expected_properties, result) def test_terminate_connection(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() effects = [ [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}], hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] mock_client.getHostVLUNs.side_effect = effects mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } expected = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects # mock some deleteHost exceptions that are handled delete_with_vlun = hpeexceptions.HTTPConflict( error={'message': "has exported VLUN"}) delete_with_hostset = hpeexceptions.HTTPConflict( error={'message': "host is a member of a set"}) mock_client.deleteHost = mock.Mock( side_effect=[delete_with_vlun, delete_with_hostset]) conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch('cinder.zonemanager.utils.create_lookup_service') def test_terminate_connection_with_lookup(self, mock_lookup): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client class fake_lookup_object(object): def get_device_mapping_from_network(self, connector, target_wwns): fake_map = { 'FAB_1': { 'target_port_wwn_list': ['0987654321234'], 'initiator_port_wwn_list': ['123456789012345'] } } return fake_map mock_lookup.return_value = fake_lookup_object() mock_client = self.setup_driver() effects = [ [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}], hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = effects expected = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects # mock some deleteHost exceptions that are handled delete_with_vlun = hpeexceptions.HTTPConflict( error={'message': "has exported VLUN"}) delete_with_hostset = hpeexceptions.HTTPConflict( error={'message': "host is a member of a set"}) mock_client.deleteHost = mock.Mock( side_effect=[delete_with_vlun, delete_with_hostset]) conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_terminate_connection_more_vols(self): mock_client = self.setup_driver() # mock more than one vlun on the host (don't even try to remove host) mock_client.getHostVLUNs.return_value = \ [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}, {'active': True, 'volumeName': 'there-is-another-volume', 'lun': None, 'type': 0}, ] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } expect_less = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST)] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expect_less + self.standard_logout) self.assertNotIn('initiator_target_map', conn_info['data']) def test_get_3par_host_from_wwn_iqn(self): mock_client = self.setup_driver() mock_client.getHosts.return_value = { 'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': '123ab6789012345'}]} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client hostname = mock_client._get_3par_hostname_from_wwn_iqn( wwns=['123AB6789012345', '123CD6789054321'], iqns=None) self.assertIsNotNone(hostname) def test_get_volume_stats1(self): # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config) mock_client.getCPG.return_value = self.cpgs[0] # Purposely left out the Priority Optimization license in # getStorageSystemInfo to test that QoS_support returns False. mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Thin Provisioning (102400G)'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } stat_capabilities = { THROUGHPUT: 0, BANDWIDTH: 0, LATENCY: 0, IO_SIZE: 0, QUEUE_LENGTH: 0, AVG_BUSY_PERC: 0 } mock_client.getCPGStatData.return_value = stat_capabilities with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() stats = self.driver.get_volume_stats(True) const = 0.0009765625 self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) self.assertFalse(stats['pools'][0]['QoS_support']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) self.assertFalse(stats['pools'][0]['QoS_support']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) cpg2 = self.cpgs[0].copy() cpg2.update({'SDGrowth': {'limitMiB': 8192}}) mock_client.getCPG.return_value = cpg2 stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) self.assertFalse(stats['pools'][0]['QoS_support']) total_capacity_gb = 8192 * const self.assertEqual(total_capacity_gb, stats['pools'][0]['total_capacity_gb']) free_capacity_gb = int( (8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] + self.cpgs[0]['SDUsage']['usedMiB'])) * const) self.assertEqual(free_capacity_gb, stats['pools'][0]['free_capacity_gb']) provisioned_capacity_gb = int( (self.cpgs[0]['UsrUsage']['totalMiB'] + self.cpgs[0]['SAUsage']['totalMiB'] + self.cpgs[0]['SDUsage']['totalMiB']) * const) self.assertEqual(provisioned_capacity_gb, stats['pools'][0]['provisioned_capacity_gb']) cap_util = (float(total_capacity_gb - free_capacity_gb) / float(total_capacity_gb)) * 100 self.assertEqual(cap_util, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) common.client.deleteCPG(HPE3PAR_CPG) common.client.createCPG(HPE3PAR_CPG, {}) def test_get_volume_stats2(self): # Testing when the API_VERSION is incompatible with getCPGStatData srstatld_api_version = 30201200 pre_srstatld_api_version = srstatld_api_version - 1 wsapi = {'build': pre_srstatld_api_version} config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=wsapi) mock_client.getCPG.return_value = self.cpgs[0] # Purposely left out the Thin Provisioning license in # getStorageSystemInfo to test that thin_provisioning_support returns # False. mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Priority Optimization'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertFalse(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_get_volume_stats3(self): # Testing when the client version is incompatible with getCPGStatData # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=self.wsapi_version_312) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 0, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': 186} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.createHost( self.FAKE_HOST, FCWwns=['123456789012345', '123456789054321'], optional={'domain': None, 'persona': 2}), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) def test_create_invalid_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), { 'name': 'fakehost.foo', 'FCPaths': [{'wwn': '123456789012345'}, { 'wwn': '123456789054321'}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost('fakehost.foo')] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) def test_create_modify_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [{ 'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, { 'wwn': '123456789054321'}]}] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host = self.driver._create_host( common, self.volume, self.connector) # On Python 3, hash is randomized, and so set() is used to get # the expected order fcwwns = list(set(('123456789054321', '123456789012345'))) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.modifyHost('fakehost', {'FCWWNs': fcwwns, 'pathOperation': 1}), mock.call.getHost('fakehost')] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(2, len(host['FCPaths'])) def test_modify_host_with_new_wwn(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} getHost_ret1 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789054321'}]} getHost_ret2 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]} mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.modifyHost( 'fakehost', { 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), mock.call.getHost('fakehost')] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(2, len(host['FCPaths'])) def test_modify_host_with_unknown_wwn_and_new_wwn(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} getHost_ret1 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789054321'}, {'wwn': 'xxxxxxxxxxxxxxx'}]} getHost_ret2 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}, {'wwn': 'xxxxxxxxxxxxxxx'}]} mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.modifyHost( 'fakehost', { 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), mock.call.getHost('fakehost')] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(3, len(host['FCPaths'])) class TestHPE3PARISCSIDriver(HPE3PARBaseDriver, test.TestCase): TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d' TARGET_LUN = 186 properties = { 'driver_volume_type': 'iscsi', 'data': {'encrypted': False, 'target_discovered': True, 'target_iqn': TARGET_IQN, 'target_lun': TARGET_LUN, 'target_portal': '1.1.1.2:1234'}} multipath_properties = { 'driver_volume_type': 'iscsi', 'data': {'encrypted': False, 'target_discovered': True, 'target_iqns': [TARGET_IQN], 'target_luns': [TARGET_LUN], 'target_portals': ['1.1.1.2:1234']}} def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpedriver.HPE3PARISCSIDriver) if wsapi_version: mock_client.getWsApiVersion.return_value = ( wsapi_version) else: mock_client.getWsApiVersion.return_value = ( self.wsapi_version_latest) expected_get_cpgs = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] expected_get_ports = [mock.call.getPorts()] mock_client.assert_has_calls( self.standard_login + expected_get_cpgs + self.standard_logout + self.standard_login + expected_get_ports + self.standard_logout) mock_client.reset_mock() return mock_client def test_initialize_connection(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictMatch(self.properties, result) def test_initialize_connection_multipath(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos=self.FAKE_ISCSI_PORT['portPos'], lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictMatch(self.multipath_properties, result) def test_initialize_connection_multipath_existing_nsp(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0}]] mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictMatch(self.multipath_properties, result) def test_initialize_connection_encrypted(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume_encrypted, self.connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) expected_properties = self.properties expected_properties['data']['encrypted'] = True self.assertDictMatch(self.properties, result) def test_get_volume_stats(self): # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } stat_capabilities = { THROUGHPUT: 0, BANDWIDTH: 0, LATENCY: 0, IO_SIZE: 0, QUEUE_LENGTH: 0, AVG_BUSY_PERC: 0 } mock_client.getCPGStatData.return_value = stat_capabilities with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client stats = self.driver.get_volume_stats(True) const = 0.0009765625 self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) cpg2 = self.cpgs[0].copy() cpg2.update({'SDGrowth': {'limitMiB': 8192}}) mock_client.getCPG.return_value = cpg2 stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) total_capacity_gb = 8192 * const self.assertEqual(total_capacity_gb, stats['pools'][0]['total_capacity_gb']) free_capacity_gb = int( (8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] + self.cpgs[0]['SDUsage']['usedMiB'])) * const) self.assertEqual(free_capacity_gb, stats['pools'][0]['free_capacity_gb']) cap_util = (float(total_capacity_gb - free_capacity_gb) / float(total_capacity_gb)) * 100 self.assertEqual(cap_util, stats['pools'][0]['capacity_utilization']) provisioned_capacity_gb = int( (self.cpgs[0]['UsrUsage']['totalMiB'] + self.cpgs[0]['SAUsage']['totalMiB'] + self.cpgs[0]['SDUsage']['totalMiB']) * const) self.assertEqual(provisioned_capacity_gb, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) def test_get_volume_stats2(self): # Testing when the API_VERSION is incompatible with getCPGStatData srstatld_api_version = 30201200 pre_srstatld_api_version = srstatld_api_version - 1 wsapi = {'build': pre_srstatld_api_version} config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=wsapi) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_get_volume_stats3(self): # Testing when the client version is incompatible with getCPGStatData # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=self.wsapi_version_312) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 3 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_create_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.createHost( self.FAKE_HOST, optional={'domain': None, 'persona': 2}, iscsiNames=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) def test_create_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.createHost( self.FAKE_HOST, optional={'domain': None, 'persona': 2}, iscsiNames=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( 'fakehost', expected_mod_request), mock.call.getHost(self.FAKE_HOST) ] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) def test_create_invalid_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), {'name': 'fakehost.foo'}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost('fakehost.foo')] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) def test_create_invalid_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), {'name': 'fakehost.foo'}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( 'fakehost.foo', expected_mod_request), mock.call.getHost('fakehost.foo') ] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) def test_create_modify_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ {'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]}] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.modifyHost( self.FAKE_HOST, {'pathOperation': 1, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) self.assertEqual(2, len(host['FCPaths'])) def test_create_modify_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ {'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]}] def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.modifyHost( self.FAKE_HOST, {'pathOperation': 1, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.modifyHost( self.FAKE_HOST, expected_mod_request ), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) self.assertEqual(2, len(host['FCPaths'])) def test_get_least_used_nsp_for_host_single(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertEqual("1:8:1", nsp) def test_get_least_used_nsp_for_host_new(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) # Host 'newhost' does not yet have any iscsi paths, # so the 'least used' is returned nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertEqual("1:8:2", nsp) def test_get_least_used_nsp_for_host_reuse(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) # hosts 'foo' and 'bar' already have active iscsi paths # the same one should be used nsp = self.driver._get_least_used_nsp_for_host(common, 'foo') self.assertEqual("1:8:2", nsp) nsp = self.driver._get_least_used_nsp_for_host(common, 'bar') self.assertEqual("1:8:1", nsp) def test_get_least_used_nps_for_host_fc(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getPorts.return_value = PORTS1_RET mock_client.getVLUNs.return_value = VLUNS5_RET # Setup two ISCSI IPs iscsi_ips = ["10.10.220.252", "10.10.220.253"] self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertNotEqual("0:6:3", nsp) self.assertEqual("1:8:1", nsp) def test_invalid_iscsi_ip(self): config = self.setup_configuration() config.hpe3par_iscsi_ips = ['10.10.220.250', '10.10.220.251'] config.iscsi_ip_address = '10.10.10.10' mock_conf = { 'getPorts.return_value': { 'members': [ {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.220.252', 'linkState': 4, 'device': [], 'iSCSIName': self.TARGET_IQN, 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': self.TARGET_IQN, 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}]}} # no valid ip addr should be configured. self.assertRaises(exception.InvalidInput, self.setup_driver, config=config, mock_conf=mock_conf) def test_get_least_used_nsp(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() ports = [ {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() # in use count vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['0:2:1', '1:8:1']) self.assertEqual('1:8:1', nsp) ports = [ {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} # in use count common = self.driver._login() vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['0:2:1', '1:2:1']) self.assertEqual('1:2:1', nsp) ports = [ {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} # in use count common = self.driver._login() vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['1:1:1', '1:2:1']) self.assertEqual('1:1:1', nsp) def test_set_3par_chaps(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() expected = [] self.driver._set_3par_chaps( common, 'test-host', 'test-vol', 'test-host', 'pass') mock_client.assert_has_calls(expected) # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-host', 'chapSecret': 'fake' } expected = [ mock.call.modifyHost('test-host', expected_mod_request) ] self.driver._set_3par_chaps( common, 'test-host', 'test-vol', 'test-host', 'fake') mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.utils.generate_password') def test_do_export(self, mock_utils): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [] expected_model = {'provider_auth': None} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) mock_client.reset_mock() # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.utils.generate_password') def test_do_export_host_not_found(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = "random-pass" mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( 'fake') mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.utils.generate_password') def test_do_export_host_chap_disabled(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'fake-host', 'initiatorChapEnabled': False } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.utils.generate_password') def test_do_export_no_active_vluns(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = "random-pass" mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'fake-host', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) def test_ensure_export(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_client.getAllVolumeMetaData.return_value = { 'total': 0, 'members': [] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client model = self.driver.ensure_export(None, volume) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') ] expected_model = {'provider_auth': None} mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) mock_client.getAllVolumeMetaData.return_value = { 'total': 2, 'members': [ { 'creationTimeSec': 1406074222, 'value': 'fake-host', 'key': CHAP_USER_KEY, 'creationTime8601': '2014-07-22T17:10:22-07:00' }, { 'creationTimeSec': 1406074222, 'value': 'random-pass', 'key': CHAP_PASS_KEY, 'creationTime8601': '2014-07-22T17:10:22-07:00' } ] } model = self.driver.ensure_export(None, volume) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') ] expected_model = {'provider_auth': "CHAP fake-host random-pass"} mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) def test_ensure_export_missing_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound( 'fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client model = self.driver.ensure_export(None, volume) expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')] expected_model = None mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) @mock.patch.object(volume_types, 'get_volume_type') def test_get_volume_settings_default_pool(self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': {}} mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = {'host': 'test-host@3pariscsi#pool_foo', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} pool = volume_utils.extract_host(volume['host'], 'pool') model = common.get_volume_settings_from_type_id('gold-id', pool) self.assertEqual('pool_foo', model['cpg']) def test_get_model_update(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model_update = common._get_model_update('xxx@yyy#zzz', 'CPG') self.assertEqual({'host': 'xxx@yyy#CPG'}, model_update) VLUNS5_RET = ({'members': [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True}]}) PORTS_RET = ({'members': [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.220.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}]}) VLUNS1_RET = ({'members': [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'hostname': 'foo', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}]}) PORTS1_RET = ({'members': [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.120.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}, {'portWWN': '20210002AC00383D', 'protocol': 1, 'linkState': 4, 'mode': 2, 'device': ['cage2'], 'nodeWWN': '20210002AC00383D', 'type': 2, 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}) cinder-8.0.0/cinder/tests/unit/test_vmware_volumeops.py0000664000567000056710000026154512701406250024546 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for VMware VMDK driver volumeops module. """ import ddt import mock from oslo_utils import units from oslo_vmware import exceptions from oslo_vmware import vim_util from cinder import test from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import volumeops @ddt.ddt class VolumeOpsTestCase(test.TestCase): """Unit tests for volumeops module.""" MAX_OBJECTS = 100 def setUp(self): super(VolumeOpsTestCase, self).setUp() self.session = mock.MagicMock() self.vops = volumeops.VMwareVolumeOps(self.session, self.MAX_OBJECTS) def test_split_datastore_path(self): test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx' (datastore, folder, file_name) = volumeops.split_datastore_path(test1) self.assertEqual('datastore1', datastore) self.assertEqual('myfolder/mysubfolder/', folder) self.assertEqual('myvm.vmx', file_name) test2 = '[datastore2 ] myfolder/myvm.vmdk' (datastore, folder, file_name) = volumeops.split_datastore_path(test2) self.assertEqual('datastore2', datastore) self.assertEqual('myfolder/', folder) self.assertEqual('myvm.vmdk', file_name) test3 = 'myfolder/myvm.vmdk' self.assertRaises(IndexError, volumeops.split_datastore_path, test3) def vm(self, val): """Create a mock vm in retrieve result format.""" vm = mock.MagicMock() prop = mock.Mock(spec=object) prop.val = val vm.propSet = [prop] return vm def test_get_backing(self): name = 'mock-backing' # Test no result self.session.invoke_api.return_value = None result = self.vops.get_backing(name) self.assertIsNone(result) self.session.invoke_api.assert_called_once_with(vim_util, 'get_objects', self.session.vim, 'VirtualMachine', self.MAX_OBJECTS) # Test single result vm = self.vm(name) vm.obj = mock.sentinel.vm_obj retrieve_result = mock.Mock(spec=object) retrieve_result.objects = [vm] self.session.invoke_api.return_value = retrieve_result self.vops.cancel_retrieval = mock.Mock(spec=object) result = self.vops.get_backing(name) self.assertEqual(mock.sentinel.vm_obj, result) self.session.invoke_api.assert_called_with(vim_util, 'get_objects', self.session.vim, 'VirtualMachine', self.MAX_OBJECTS) self.vops.cancel_retrieval.assert_called_once_with(retrieve_result) # Test multiple results retrieve_result2 = mock.Mock(spec=object) retrieve_result2.objects = [vm('1'), vm('2'), vm('3')] self.session.invoke_api.return_value = retrieve_result2 self.vops.continue_retrieval = mock.Mock(spec=object) self.vops.continue_retrieval.return_value = retrieve_result result = self.vops.get_backing(name) self.assertEqual(mock.sentinel.vm_obj, result) self.session.invoke_api.assert_called_with(vim_util, 'get_objects', self.session.vim, 'VirtualMachine', self.MAX_OBJECTS) self.vops.continue_retrieval.assert_called_once_with(retrieve_result2) self.vops.cancel_retrieval.assert_called_with(retrieve_result) def test_delete_backing(self): backing = mock.sentinel.backing task = mock.sentinel.task self.session.invoke_api.return_value = task self.vops.delete_backing(backing) self.session.invoke_api.assert_called_once_with(self.session.vim, "Destroy_Task", backing) self.session.wait_for_task(task) def test_get_host(self): instance = mock.sentinel.instance host = mock.sentinel.host self.session.invoke_api.return_value = host result = self.vops.get_host(instance) self.assertEqual(host, result) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, instance, 'runtime.host') def _host_runtime_info( self, connection_state='connected', in_maintenance=False): return mock.Mock(connectionState=connection_state, inMaintenanceMode=in_maintenance) def test_is_host_usable(self): self.session.invoke_api.return_value = self._host_runtime_info() self.assertTrue(self.vops.is_host_usable(mock.sentinel.host)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, mock.sentinel.host, 'runtime') def test_is_host_usable_with_disconnected_host(self): self.session.invoke_api.return_value = self._host_runtime_info( connection_state='disconnected') self.assertFalse(self.vops.is_host_usable(mock.sentinel.host)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, mock.sentinel.host, 'runtime') def test_is_host_usable_with_host_in_maintenance(self): self.session.invoke_api.return_value = self._host_runtime_info( in_maintenance=True) self.assertFalse(self.vops.is_host_usable(mock.sentinel.host)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, mock.sentinel.host, 'runtime') def test_get_hosts(self): hosts = mock.sentinel.hosts self.session.invoke_api.return_value = hosts result = self.vops.get_hosts() self.assertEqual(hosts, result) self.session.invoke_api.assert_called_once_with(vim_util, 'get_objects', self.session.vim, 'HostSystem', self.MAX_OBJECTS) def test_continue_retrieval(self): retrieve_result = mock.sentinel.retrieve_result self.session.invoke_api.return_value = retrieve_result result = self.vops.continue_retrieval(retrieve_result) self.assertEqual(retrieve_result, result) self.session.invoke_api.assert_called_once_with(vim_util, 'continue_retrieval', self.session.vim, retrieve_result) def test_cancel_retrieval(self): retrieve_result = mock.sentinel.retrieve_result self.session.invoke_api.return_value = retrieve_result result = self.vops.cancel_retrieval(retrieve_result) self.assertIsNone(result) self.session.invoke_api.assert_called_once_with(vim_util, 'cancel_retrieval', self.session.vim, retrieve_result) def test_is_usable(self): mount_info = mock.Mock(spec=object) mount_info.accessMode = "readWrite" mount_info.mounted = True mount_info.accessible = True self.assertTrue(self.vops._is_usable(mount_info)) del mount_info.mounted self.assertTrue(self.vops._is_usable(mount_info)) mount_info.accessMode = "readonly" self.assertFalse(self.vops._is_usable(mount_info)) mount_info.accessMode = "readWrite" mount_info.mounted = False self.assertFalse(self.vops._is_usable(mount_info)) mount_info.mounted = True mount_info.accessible = False self.assertFalse(self.vops._is_usable(mount_info)) del mount_info.accessible self.assertFalse(self.vops._is_usable(mount_info)) def _create_host_mounts(self, access_mode, host, set_accessible=True, is_accessible=True, mounted=True): """Create host mount value of datastore with single mount info. :param access_mode: string specifying the read/write permission :param set_accessible: specify whether accessible property should be set :param is_accessible: boolean specifying whether the datastore is accessible to host :param host: managed object reference of the connected host :return: list of host mount info """ mntInfo = mock.Mock(spec=object) mntInfo.accessMode = access_mode if set_accessible: mntInfo.accessible = is_accessible else: del mntInfo.accessible mntInfo.mounted = mounted host_mount = mock.Mock(spec=object) host_mount.key = host host_mount.mountInfo = mntInfo host_mounts = mock.Mock(spec=object) host_mounts.DatastoreHostMount = [host_mount] return host_mounts def test_get_connected_hosts(self): with mock.patch.object(self.vops, 'get_summary') as get_summary: datastore = mock.sentinel.datastore summary = mock.Mock(spec=object) get_summary.return_value = summary summary.accessible = False hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([], hosts) summary.accessible = True host = mock.Mock(spec=object) host.value = mock.sentinel.host host_mounts = self._create_host_mounts("readWrite", host) self.session.invoke_api.return_value = host_mounts hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([mock.sentinel.host], hosts) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, datastore, 'host') del host_mounts.DatastoreHostMount hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([], hosts) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_connected_hosts') def test_is_datastore_accessible(self, get_connected_hosts): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 get_connected_hosts.return_value = [host_1, host_2] ds = mock.sentinel.datastore host = mock.Mock(value=mock.sentinel.host_1) self.assertTrue(self.vops.is_datastore_accessible(ds, host)) get_connected_hosts.assert_called_once_with(ds) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_connected_hosts') def test_is_datastore_accessible_with_inaccessible(self, get_connected_hosts): host_1 = mock.sentinel.host_1 get_connected_hosts.return_value = [host_1] ds = mock.sentinel.datastore host = mock.Mock(value=mock.sentinel.host_2) self.assertFalse(self.vops.is_datastore_accessible(ds, host)) get_connected_hosts.assert_called_once_with(ds) def test_is_valid(self): with mock.patch.object(self.vops, 'get_summary') as get_summary: summary = mock.Mock(spec=object) get_summary.return_value = summary datastore = mock.sentinel.datastore host = mock.Mock(spec=object) host.value = mock.sentinel.host def _is_valid(host_mounts, is_valid): self.session.invoke_api.return_value = host_mounts result = self.vops._is_valid(datastore, host) self.assertEqual(is_valid, result) self.session.invoke_api.assert_called_with( vim_util, 'get_object_property', self.session.vim, datastore, 'host') # Test positive cases summary.maintenanceMode = 'normal' summary.accessible = True _is_valid(self._create_host_mounts("readWrite", host), True) # Test negative cases _is_valid(self._create_host_mounts("Inaccessible", host), False) _is_valid(self._create_host_mounts("readWrite", host, True, False), False) _is_valid(self._create_host_mounts("readWrite", host, True, True, False), False) summary.accessible = False _is_valid(self._create_host_mounts("readWrite", host, False), False) summary.accessible = True summary.maintenanceMode = 'inMaintenance' _is_valid(self._create_host_mounts("readWrite", host), False) def test_get_dss_rp(self): with mock.patch.object(self.vops, 'get_summary') as get_summary: summary = mock.Mock(spec=object) summary.accessible = True summary.maintenanceModel = 'normal' get_summary.return_value = summary # build out props to be returned by 1st invoke_api call datastore_prop = mock.Mock(spec=object) datastore_prop.name = 'datastore' datastore_prop.val = mock.Mock(spec=object) datastore_prop.val.ManagedObjectReference = [mock.sentinel.ds1, mock.sentinel.ds2] compute_resource_prop = mock.Mock(spec=object) compute_resource_prop.name = 'parent' compute_resource_prop.val = mock.sentinel.compute_resource elem = mock.Mock(spec=object) elem.propSet = [datastore_prop, compute_resource_prop] props = [elem] # build out host_mounts to be returned by 2nd invoke_api call host = mock.Mock(spec=object) host.value = mock.sentinel.host host_mounts = self._create_host_mounts("readWrite", host) # build out resource_pool to be returned by 3rd invoke_api call resource_pool = mock.sentinel.resource_pool # set return values for each call of invoke_api self.session.invoke_api.side_effect = [props, host_mounts, host_mounts, resource_pool] # invoke function and verify results (dss_actual, rp_actual) = self.vops.get_dss_rp(host) self.assertEqual([mock.sentinel.ds1, mock.sentinel.ds2], dss_actual) self.assertEqual(resource_pool, rp_actual) # invoke function with no valid datastore summary.maintenanceMode = 'inMaintenance' self.session.invoke_api.side_effect = [props, host_mounts, host_mounts, resource_pool] self.assertRaises(exceptions.VimException, self.vops.get_dss_rp, host) # Clear side effects. self.session.invoke_api.side_effect = None def test_get_parent(self): # Not recursive child = mock.Mock(spec=object) child._type = 'Parent' ret = self.vops._get_parent(child, 'Parent') self.assertEqual(child, ret) # Recursive parent = mock.Mock(spec=object) parent._type = 'Parent' child = mock.Mock(spec=object) child._type = 'Child' self.session.invoke_api.return_value = parent ret = self.vops._get_parent(child, 'Parent') self.assertEqual(parent, ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, child, 'parent') def test_get_dc(self): # set up hierarchy of objects dc = mock.Mock(spec=object) dc._type = 'Datacenter' o1 = mock.Mock(spec=object) o1._type = 'mockType1' o1.parent = dc o2 = mock.Mock(spec=object) o2._type = 'mockType2' o2.parent = o1 # mock out invoke_api behaviour to fetch parent def mock_invoke_api(vim_util, method, vim, the_object, arg): return the_object.parent self.session.invoke_api.side_effect = mock_invoke_api ret = self.vops.get_dc(o2) self.assertEqual(dc, ret) # Clear side effects. self.session.invoke_api.side_effect = None def test_get_vmfolder(self): self.session.invoke_api.return_value = mock.sentinel.ret ret = self.vops.get_vmfolder(mock.sentinel.dc) self.assertEqual(mock.sentinel.ret, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, mock.sentinel.dc, 'vmFolder') def test_create_folder_with_concurrent_create(self): parent_folder = mock.sentinel.parent_folder child_name = 'child_folder' prop_val_1 = mock.Mock(ManagedObjectReference=[]) child_folder = mock.Mock(_type='Folder') prop_val_2 = mock.Mock(ManagedObjectReference=[child_folder]) self.session.invoke_api.side_effect = [prop_val_1, exceptions.DuplicateName, prop_val_2, child_name] ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_folder, ret) expected_invoke_api = [mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(self.session.vim, 'CreateFolder', parent_folder, name=child_name), mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(vim_util, 'get_object_property', self.session.vim, child_folder, 'name')] self.assertEqual(expected_invoke_api, self.session.invoke_api.mock_calls) def test_create_folder_with_empty_vmfolder(self): """Test create_folder when the datacenter vmFolder is empty""" child_folder = mock.sentinel.child_folder self.session.invoke_api.side_effect = [None, child_folder] parent_folder = mock.sentinel.parent_folder child_name = 'child_folder' ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_folder, ret) expected_calls = [mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(self.session.vim, 'CreateFolder', parent_folder, name=child_name)] self.assertEqual(expected_calls, self.session.invoke_api.call_args_list) def test_create_folder_not_present(self): """Test create_folder when child not present.""" prop_val = mock.Mock(spec=object) child_folder = mock.sentinel.child_folder self.session.invoke_api.side_effect = [prop_val, child_folder] child_name = 'child_folder' parent_folder = mock.sentinel.parent_folder ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_folder, ret) expected_invoke_api = [mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(self.session.vim, 'CreateFolder', parent_folder, name=child_name)] self.assertEqual(expected_invoke_api, self.session.invoke_api.mock_calls) # Clear side effects. self.session.invoke_api.side_effect = None def test_create_folder_already_present(self): """Test create_folder when child already present.""" parent_folder = mock.sentinel.parent_folder child_name = 'child_folder' prop_val = mock.Mock(spec=object) child_entity_1 = mock.Mock(spec=object) child_entity_1._type = 'Folder' child_entity_1_name = 'SomeOtherName' child_entity_2 = mock.Mock(spec=object) child_entity_2._type = 'Folder' child_entity_2_name = child_name prop_val.ManagedObjectReference = [child_entity_1, child_entity_2] self.session.invoke_api.side_effect = [prop_val, child_entity_1_name, child_entity_2_name] ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_entity_2, ret) expected_invoke_api = [mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(vim_util, 'get_object_property', self.session.vim, child_entity_1, 'name'), mock.call(vim_util, 'get_object_property', self.session.vim, child_entity_2, 'name')] self.assertEqual(expected_invoke_api, self.session.invoke_api.mock_calls) # Clear side effects. self.session.invoke_api.side_effect = None def test_create_folder_with_special_characters(self): """Test create_folder with names containing special characters.""" # Test folder already exists case. child_entity_1 = mock.Mock(_type='Folder') child_entity_1_name = 'cinder-volumes' child_entity_2 = mock.Mock(_type='Folder') child_entity_2_name = '%2fcinder-volumes' prop_val = mock.Mock(ManagedObjectReference=[child_entity_1, child_entity_2]) self.session.invoke_api.side_effect = [prop_val, child_entity_1_name, child_entity_2_name] parent_folder = mock.sentinel.parent_folder child_name = '/cinder-volumes' ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_entity_2, ret) # Test non-existing folder case. child_entity_2_name = '%25%25cinder-volumes' new_child_folder = mock.sentinel.new_child_folder self.session.invoke_api.side_effect = [prop_val, child_entity_1_name, child_entity_2_name, new_child_folder] child_name = '%cinder-volumes' ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(new_child_folder, ret) self.session.invoke_api.assert_called_with(self.session.vim, 'CreateFolder', parent_folder, name=child_name) # Reset side effects. self.session.invoke_api.side_effect = None def test_create_folder_with_duplicate_name(self): parent_folder = mock.sentinel.parent_folder child_name = 'child_folder' prop_val_1 = mock.Mock(spec=object) prop_val_1.ManagedObjectReference = [] child_entity_2 = mock.Mock(spec=object) child_entity_2._type = 'Folder' prop_val_2 = mock.Mock(spec=object) prop_val_2.ManagedObjectReference = [child_entity_2] child_entity_2_name = child_name self.session.invoke_api.side_effect = [ prop_val_1, exceptions.DuplicateName, prop_val_2, child_entity_2_name] ret = self.vops.create_folder(parent_folder, child_name) self.assertEqual(child_entity_2, ret) expected_invoke_api = [mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(self.session.vim, 'CreateFolder', parent_folder, name=child_name), mock.call(vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity'), mock.call(vim_util, 'get_object_property', self.session.vim, child_entity_2, 'name')] self.assertEqual(expected_invoke_api, self.session.invoke_api.mock_calls) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_vmfolder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'create_folder') def test_create_vm_inventory_folder(self, create_folder, get_vmfolder): vm_folder_1 = mock.sentinel.vm_folder_1 get_vmfolder.return_value = vm_folder_1 folder_1a = mock.sentinel.folder_1a folder_1b = mock.sentinel.folder_1b create_folder.side_effect = [folder_1a, folder_1b] datacenter_1 = mock.Mock(value='dc-1') path_comp = ['a', 'b'] ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) self.assertEqual(folder_1b, ret) get_vmfolder.assert_called_once_with(datacenter_1) exp_calls = [mock.call(vm_folder_1, 'a'), mock.call(folder_1a, 'b')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b} self.assertEqual(exp_cache, self.vops._folder_cache) # Test cache get_vmfolder.reset_mock() create_folder.reset_mock() folder_1c = mock.sentinel.folder_1c create_folder.side_effect = [folder_1c] path_comp = ['a', 'c'] ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) self.assertEqual(folder_1c, ret) self.assertFalse(get_vmfolder.called) exp_calls = [mock.call(folder_1a, 'c')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b, '/dc-1/a/c': folder_1c} self.assertEqual(exp_cache, self.vops._folder_cache) # Test cache with different datacenter get_vmfolder.reset_mock() create_folder.reset_mock() vm_folder_2 = mock.sentinel.vm_folder_2 get_vmfolder.return_value = vm_folder_2 folder_2a = mock.sentinel.folder_2a folder_2b = mock.sentinel.folder_2b create_folder.side_effect = [folder_2a, folder_2b] datacenter_2 = mock.Mock(value='dc-2') path_comp = ['a', 'b'] ret = self.vops.create_vm_inventory_folder(datacenter_2, path_comp) self.assertEqual(folder_2b, ret) get_vmfolder.assert_called_once_with(datacenter_2) exp_calls = [mock.call(vm_folder_2, 'a'), mock.call(folder_2a, 'b')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b, '/dc-1/a/c': folder_1c, '/dc-2': vm_folder_2, '/dc-2/a': folder_2a, '/dc-2/a/b': folder_2b } self.assertEqual(exp_cache, self.vops._folder_cache) def test_create_disk_backing_thin(self): backing = mock.Mock() del backing.eagerlyScrub cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'thin' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertIsInstance(ret.thinProvisioned, bool) self.assertTrue(ret.thinProvisioned) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_disk_backing_thick(self): backing = mock.Mock() del backing.eagerlyScrub del backing.thinProvisioned cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'thick' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_disk_backing_eager_zeroed_thick(self): backing = mock.Mock() del backing.thinProvisioned cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'eagerZeroedThick' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertIsInstance(ret.eagerlyScrub, bool) self.assertTrue(ret.eagerlyScrub) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_virtual_disk_config_spec(self): cf = self.session.vim.client.factory cf.create.side_effect = lambda *args: mock.Mock() size_kb = units.Ki controller_key = 200 disk_type = 'thick' spec = self.vops._create_virtual_disk_config_spec(size_kb, disk_type, controller_key, None) cf.create.side_effect = None self.assertEqual('add', spec.operation) self.assertEqual('create', spec.fileOperation) device = spec.device self.assertEqual(size_kb, device.capacityInKB) self.assertEqual(-101, device.key) self.assertEqual(0, device.unitNumber) self.assertEqual(controller_key, device.controllerKey) backing = device.backing self.assertEqual('', backing.fileName) self.assertEqual('persistent', backing.diskMode) def test_create_specs_for_ide_disk_add(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() size_kb = 1 disk_type = 'thin' adapter_type = 'ide' ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, adapter_type) factory.create.side_effect = None self.assertEqual(1, len(ret)) self.assertEqual(units.Ki, ret[0].device.capacityInKB) self.assertEqual(200, ret[0].device.controllerKey) expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'), mock.call.create('ns0:VirtualDisk'), mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')] factory.create.assert_has_calls(expected, any_order=True) def test_create_specs_for_scsi_disk_add(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() size_kb = 2 * units.Ki disk_type = 'thin' adapter_type = 'lsiLogicsas' ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, adapter_type) factory.create.side_effect = None self.assertEqual(2, len(ret)) self.assertEqual('noSharing', ret[1].device.sharedBus) self.assertEqual(size_kb, ret[0].device.capacityInKB) expected = [mock.call.create('ns0:VirtualLsiLogicSASController'), mock.call.create('ns0:VirtualDeviceConfigSpec'), mock.call.create('ns0:VirtualDisk'), mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'), mock.call.create('ns0:VirtualDeviceConfigSpec')] factory.create.assert_has_calls(expected, any_order=True) def test_get_create_spec_disk_less(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() name = mock.sentinel.name ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id option_key = mock.sentinel.key option_value = mock.sentinel.value extra_config = {option_key: option_value} ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id, extra_config) factory.create.side_effect = None self.assertEqual(name, ret.name) self.assertEqual('[%s]' % ds_name, ret.files.vmPathName) self.assertEqual("vmx-08", ret.version) self.assertEqual(profile_id, ret.vmProfile[0].profileId) self.assertEqual(1, len(ret.extraConfig)) self.assertEqual(option_key, ret.extraConfig[0].key) self.assertEqual(option_value, ret.extraConfig[0].value) expected = [mock.call.create('ns0:VirtualMachineFileInfo'), mock.call.create('ns0:VirtualMachineConfigSpec'), mock.call.create('ns0:VirtualMachineDefinedProfileSpec'), mock.call.create('ns0:OptionValue')] factory.create.assert_has_calls(expected, any_order=True) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_create_spec_disk_less') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_specs_for_disk_add') def test_get_create_spec(self, create_specs_for_disk_add, get_create_spec_disk_less): name = 'vol-1' size_kb = 1024 disk_type = 'thin' ds_name = 'nfs-1' profileId = mock.sentinel.profile_id adapter_type = 'busLogic' extra_config = mock.sentinel.extra_config self.vops.get_create_spec(name, size_kb, disk_type, ds_name, profileId, adapter_type, extra_config) get_create_spec_disk_less.assert_called_once_with( name, ds_name, profileId=profileId, extra_config=extra_config) create_specs_for_disk_add.assert_called_once_with( size_kb, disk_type, adapter_type) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_create_spec') def test_create_backing(self, get_create_spec): create_spec = mock.sentinel.create_spec get_create_spec.return_value = create_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info name = 'backing_name' size_kb = mock.sentinel.size_kb disk_type = mock.sentinel.disk_type adapter_type = mock.sentinel.adapter_type folder = mock.sentinel.folder resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id extra_config = mock.sentinel.extra_config ret = self.vops.create_backing(name, size_kb, disk_type, folder, resource_pool, host, ds_name, profile_id, adapter_type, extra_config) self.assertEqual(mock.sentinel.result, ret) get_create_spec.assert_called_once_with( name, size_kb, disk_type, ds_name, profileId=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_create_spec_disk_less') def test_create_backing_disk_less(self, get_create_spec_disk_less): create_spec = mock.sentinel.create_spec get_create_spec_disk_less.return_value = create_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info name = 'backing_name' folder = mock.sentinel.folder resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id extra_config = mock.sentinel.extra_config ret = self.vops.create_backing_disk_less(name, folder, resource_pool, host, ds_name, profile_id, extra_config) self.assertEqual(mock.sentinel.result, ret) get_create_spec_disk_less.assert_called_once_with( name, ds_name, profileId=profile_id, extra_config=extra_config) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) self.session.wait_for_task.assert_called_once_with(task) def test_get_datastore(self): backing = mock.sentinel.backing datastore = mock.Mock(spec=object) datastore.ManagedObjectReference = [mock.sentinel.ds] self.session.invoke_api.return_value = datastore ret = self.vops.get_datastore(backing) self.assertEqual(mock.sentinel.ds, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'datastore') def test_get_summary(self): datastore = mock.sentinel.datastore summary = mock.sentinel.summary self.session.invoke_api.return_value = summary ret = self.vops.get_summary(datastore) self.assertEqual(summary, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, datastore, 'summary') def test_get_relocate_spec(self): delete_disk_attribute = True def _create_side_effect(type): obj = mock.Mock() if type == "ns0:VirtualDiskFlatVer2BackingInfo": del obj.eagerlyScrub elif (type == "ns0:VirtualMachineRelocateSpec" and delete_disk_attribute): del obj.disk else: pass return obj factory = self.session.vim.client.factory factory.create.side_effect = _create_side_effect datastore = mock.sentinel.datastore resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host disk_move_type = mock.sentinel.disk_move_type ret = self.vops._get_relocate_spec(datastore, resource_pool, host, disk_move_type) self.assertEqual(datastore, ret.datastore) self.assertEqual(resource_pool, ret.pool) self.assertEqual(host, ret.host) self.assertEqual(disk_move_type, ret.diskMoveType) # Test with disk locator. delete_disk_attribute = False disk_type = 'thin' disk_device = mock.Mock() ret = self.vops._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) factory.create.side_effect = None self.assertEqual(datastore, ret.datastore) self.assertEqual(resource_pool, ret.pool) self.assertEqual(host, ret.host) self.assertEqual(disk_move_type, ret.diskMoveType) self.assertIsInstance(ret.disk, list) self.assertEqual(1, len(ret.disk)) disk_locator = ret.disk[0] self.assertEqual(datastore, disk_locator.datastore) self.assertEqual(disk_device.key, disk_locator.diskId) backing = disk_locator.diskBackingInfo self.assertIsInstance(backing.thinProvisioned, bool) self.assertTrue(backing.thinProvisioned) self.assertEqual('', backing.fileName) self.assertEqual('persistent', backing.diskMode) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_relocate_spec') def test_relocate_backing(self, get_relocate_spec, get_disk_device): disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device spec = mock.sentinel.relocate_spec get_relocate_spec.return_value = spec task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing datastore = mock.sentinel.datastore resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host disk_type = mock.sentinel.disk_type self.vops.relocate_backing(backing, datastore, resource_pool, host, disk_type) # Verify calls disk_move_type = 'moveAllDiskBackingsAndAllowSharing' get_disk_device.assert_called_once_with(backing) get_relocate_spec.assert_called_once_with(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) self.session.invoke_api.assert_called_once_with(self.session.vim, 'RelocateVM_Task', backing, spec=spec) self.session.wait_for_task.assert_called_once_with(task) def test_move_backing_to_folder(self): task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing folder = mock.sentinel.folder self.vops.move_backing_to_folder(backing, folder) # Verify calls self.session.invoke_api.assert_called_once_with(self.session.vim, 'MoveIntoFolder_Task', folder, list=[backing]) self.session.wait_for_task.assert_called_once_with(task) def test_create_snapshot_operation(self): task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info backing = mock.sentinel.backing name = mock.sentinel.name desc = mock.sentinel.description quiesce = True ret = self.vops.create_snapshot(backing, name, desc, quiesce) self.assertEqual(mock.sentinel.result, ret) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateSnapshot_Task', backing, name=name, description=desc, memory=False, quiesce=quiesce) self.session.wait_for_task.assert_called_once_with(task) def test_get_snapshot_from_tree(self): volops = volumeops.VMwareVolumeOps name = mock.sentinel.name # Test snapshot == 'None' ret = volops._get_snapshot_from_tree(name, None) self.assertIsNone(ret) # Test root == snapshot snapshot = mock.sentinel.snapshot node = mock.Mock(spec=object) node.name = name node.snapshot = snapshot ret = volops._get_snapshot_from_tree(name, node) self.assertEqual(snapshot, ret) # Test root.childSnapshotList == None root = mock.Mock(spec=object) root.name = 'root' del root.childSnapshotList ret = volops._get_snapshot_from_tree(name, root) self.assertIsNone(ret) # Test root.child == snapshot root.childSnapshotList = [node] ret = volops._get_snapshot_from_tree(name, root) self.assertEqual(snapshot, ret) def test_get_snapshot(self): # build out the root snapshot tree snapshot_name = mock.sentinel.snapshot_name snapshot = mock.sentinel.snapshot root = mock.Mock(spec=object) root.name = 'root' node = mock.Mock(spec=object) node.name = snapshot_name node.snapshot = snapshot root.childSnapshotList = [node] # Test rootSnapshotList is not None snapshot_tree = mock.Mock(spec=object) snapshot_tree.rootSnapshotList = [root] self.session.invoke_api.return_value = snapshot_tree backing = mock.sentinel.backing ret = self.vops.get_snapshot(backing, snapshot_name) self.assertEqual(snapshot, ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') # Test rootSnapshotList == None snapshot_tree.rootSnapshotList = None ret = self.vops.get_snapshot(backing, snapshot_name) self.assertIsNone(ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') def test_snapshot_exists(self): backing = mock.sentinel.backing invoke_api = self.session.invoke_api invoke_api.return_value = None self.assertFalse(self.vops.snapshot_exists(backing)) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') snapshot = mock.Mock() invoke_api.return_value = snapshot snapshot.rootSnapshotList = None self.assertFalse(self.vops.snapshot_exists(backing)) snapshot.rootSnapshotList = [mock.Mock()] self.assertTrue(self.vops.snapshot_exists(backing)) def test_delete_snapshot(self): backing = mock.sentinel.backing snapshot_name = mock.sentinel.snapshot_name # Test snapshot is None with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: get_snapshot.return_value = None self.vops.delete_snapshot(backing, snapshot_name) get_snapshot.assert_called_once_with(backing, snapshot_name) # Test snapshot is not None snapshot = mock.sentinel.snapshot task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: get_snapshot.return_value = snapshot self.vops.delete_snapshot(backing, snapshot_name) get_snapshot.assert_called_with(backing, snapshot_name) invoke_api.assert_called_once_with(self.session.vim, 'RemoveSnapshot_Task', snapshot, removeChildren=False) self.session.wait_for_task.assert_called_once_with(task) def test_get_folder(self): folder = mock.sentinel.folder backing = mock.sentinel.backing with mock.patch.object(self.vops, '_get_parent') as get_parent: get_parent.return_value = folder ret = self.vops._get_folder(backing) self.assertEqual(folder, ret) get_parent.assert_called_once_with(backing, 'Folder') def _verify_extra_config(self, option_values, key, value): self.assertEqual(1, len(option_values)) self.assertEqual(key, option_values[0].key) self.assertEqual(value, option_values[0].value) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_relocate_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') def test_get_clone_spec(self, get_disk_device, get_relocate_spec): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() relocate_spec = mock.sentinel.relocate_spec get_relocate_spec.return_value = relocate_spec # Test with empty disk type. datastore = mock.sentinel.datastore disk_move_type = mock.sentinel.disk_move_type snapshot = mock.sentinel.snapshot disk_type = None backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp key = mock.sentinel.key value = mock.sentinel.value extra_config = {key: value} ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot, backing, disk_type, host, rp, extra_config) self.assertEqual(relocate_spec, ret.location) self.assertFalse(ret.powerOn) self.assertFalse(ret.template) self.assertEqual(snapshot, ret.snapshot) get_relocate_spec.assert_called_once_with(datastore, rp, host, disk_move_type, disk_type, None) self._verify_extra_config(ret.config.extraConfig, key, value) # Test with non-empty disk type. disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device disk_type = 'thin' ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot, backing, disk_type, host, rp, extra_config) factory.create.side_effect = None self.assertEqual(relocate_spec, ret.location) self.assertFalse(ret.powerOn) self.assertFalse(ret.template) self.assertEqual(snapshot, ret.snapshot) get_disk_device.assert_called_once_with(backing) get_relocate_spec.assert_called_with(datastore, rp, host, disk_move_type, disk_type, disk_device) self._verify_extra_config(ret.config.extraConfig, key, value) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_folder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_clone_spec') def _test_clone_backing( self, clone_type, folder, get_clone_spec, get_folder): backing_folder = mock.sentinel.backing_folder get_folder.return_value = backing_folder clone_spec = mock.sentinel.clone_spec get_clone_spec.return_value = clone_spec task = mock.sentinel.task self.session.invoke_api.return_value = task clone = mock.sentinel.clone self.session.wait_for_task.return_value = mock.Mock(result=clone) name = mock.sentinel.name backing = mock.sentinel.backing snapshot = mock.sentinel.snapshot datastore = mock.sentinel.datastore disk_type = mock.sentinel.disk_type host = mock.sentinel.host resource_pool = mock.sentinel.resource_pool extra_config = mock.sentinel.extra_config ret = self.vops.clone_backing( name, backing, snapshot, clone_type, datastore, disk_type=disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config, folder=folder) if folder: self.assertFalse(get_folder.called) else: get_folder.assert_called_once_with(backing) if clone_type == 'linked': exp_disk_move_type = 'createNewChildDiskBacking' else: exp_disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' get_clone_spec.assert_called_once_with( datastore, exp_disk_move_type, snapshot, backing, disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config) exp_folder = folder if folder else backing_folder self.session.invoke_api.assert_called_once_with( self.session.vim, 'CloneVM_Task', backing, folder=exp_folder, name=name, spec=clone_spec) self.session.wait_for_task.assert_called_once_with(task) self.assertEqual(clone, ret) @ddt.data('linked', 'full') def test_clone_backing(self, clone_type): self._test_clone_backing(clone_type, mock.sentinel.folder) def test_clone_backing_with_empty_folder(self): self._test_clone_backing('linked', None) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_specs_for_disk_add') def test_attach_disk_to_backing(self, create_spec): reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec disk_add_config_specs = mock.Mock() create_spec.return_value = disk_add_config_specs task = mock.Mock() self.session.invoke_api.return_value = task backing = mock.Mock() size_in_kb = units.Ki disk_type = "thin" adapter_type = "ide" vmdk_ds_file_path = mock.Mock() self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type, adapter_type, vmdk_ds_file_path) self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange) create_spec.assert_called_once_with(size_in_kb, disk_type, adapter_type, vmdk_ds_file_path) self.session.invoke_api.assert_called_once_with(self.session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) self.session.wait_for_task.assert_called_once_with(task) def test_create_spec_for_disk_remove(self): disk_spec = mock.Mock() self.session.vim.client.factory.create.return_value = disk_spec disk_device = mock.sentinel.disk_device self.vops._create_spec_for_disk_remove(disk_device) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualDeviceConfigSpec') self.assertEqual('remove', disk_spec.operation) self.assertEqual(disk_device, disk_spec.device) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_spec_for_disk_remove') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_detach_disk_from_backing(self, reconfigure_backing, create_spec): disk_spec = mock.sentinel.disk_spec create_spec.return_value = disk_spec reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec backing = mock.sentinel.backing disk_device = mock.sentinel.disk_device self.vops.detach_disk_from_backing(backing, disk_device) create_spec.assert_called_once_with(disk_device) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualMachineConfigSpec') self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) def test_rename_backing(self): task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing new_name = mock.sentinel.new_name self.vops.rename_backing(backing, new_name) self.session.invoke_api.assert_called_once_with(self.session.vim, "Rename_Task", backing, newName=new_name) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_update_backing_disk_uuid(self, reconfigure_backing, get_disk_device): disk_spec = mock.Mock() reconfig_spec = mock.Mock() self.session.vim.client.factory.create.side_effect = [disk_spec, reconfig_spec] disk_device = mock.Mock() get_disk_device.return_value = disk_device self.vops.update_backing_disk_uuid(mock.sentinel.backing, mock.sentinel.disk_uuid) get_disk_device.assert_called_once_with(mock.sentinel.backing) self.assertEqual(mock.sentinel.disk_uuid, disk_device.backing.uuid) self.assertEqual('edit', disk_spec.operation) self.assertEqual(disk_device, disk_spec.device) self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(mock.sentinel.backing, reconfig_spec) exp_factory_create_calls = [mock.call('ns0:VirtualDeviceConfigSpec'), mock.call('ns0:VirtualMachineConfigSpec')] self.assertEqual(exp_factory_create_calls, self.session.vim.client.factory.create.call_args_list) def test_change_backing_profile(self): # Test change to empty profile. reconfig_spec = mock.Mock() empty_profile_spec = mock.sentinel.empty_profile_spec self.session.vim.client.factory.create.side_effect = [ reconfig_spec, empty_profile_spec] task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing unique_profile_id = mock.sentinel.unique_profile_id profile_id = mock.Mock(uniqueId=unique_profile_id) self.vops.change_backing_profile(backing, profile_id) self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile) self.session.invoke_api.assert_called_once_with(self.session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) self.session.wait_for_task.assert_called_once_with(task) # Test change to non-empty profile. profile_spec = mock.Mock() self.session.vim.client.factory.create.side_effect = [ reconfig_spec, profile_spec] self.session.invoke_api.reset_mock() self.session.wait_for_task.reset_mock() self.vops.change_backing_profile(backing, profile_id) self.assertEqual([profile_spec], reconfig_spec.vmProfile) self.assertEqual(unique_profile_id, reconfig_spec.vmProfile[0].profileId) self.session.invoke_api.assert_called_once_with(self.session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) self.session.wait_for_task.assert_called_once_with(task) # Clear side effects. self.session.vim.client.factory.create.side_effect = None def test_delete_file(self): file_mgr = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_mgr task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task # Test delete file file_path = mock.sentinel.file_path datacenter = mock.sentinel.datacenter self.vops.delete_file(file_path, datacenter) # verify calls invoke_api.assert_called_once_with(self.session.vim, 'DeleteDatastoreFile_Task', file_mgr, name=file_path, datacenter=datacenter) self.session.wait_for_task.assert_called_once_with(task) def test_create_datastore_folder(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.vops.create_datastore_folder(ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) def test_create_datastore_folder_with_existing_folder(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api invoke_api.side_effect = exceptions.FileAlreadyExistsException ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.vops.create_datastore_folder(ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) invoke_api.side_effect = None def test_create_datastore_folder_with_invoke_api_error(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api invoke_api.side_effect = exceptions.VimFaultException( ["FileFault"], "error") ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.assertRaises(exceptions.VimFaultException, self.vops.create_datastore_folder, ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) invoke_api.side_effect = None def test_get_path_name(self): path = mock.Mock(spec=object) path_name = mock.sentinel.vm_path_name path.vmPathName = path_name invoke_api = self.session.invoke_api invoke_api.return_value = path backing = mock.sentinel.backing ret = self.vops.get_path_name(backing) self.assertEqual(path_name, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'config.files') def test_get_entity_name(self): entity_name = mock.sentinel.entity_name invoke_api = self.session.invoke_api invoke_api.return_value = entity_name entity = mock.sentinel.entity ret = self.vops.get_entity_name(entity) self.assertEqual(entity_name, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, entity, 'name') def test_get_vmdk_path(self): # Setup hardware_devices for test device = mock.Mock() device.__class__.__name__ = 'VirtualDisk' backing = mock.Mock() backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' backing.fileName = mock.sentinel.vmdk_path device.backing = backing invoke_api = self.session.invoke_api invoke_api.return_value = [device] # Test get_vmdk_path ret = self.vops.get_vmdk_path(backing) self.assertEqual(mock.sentinel.vmdk_path, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'config.hardware.device') backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo' self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing) # Test with no disk device. invoke_api.return_value = [] self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, self.vops.get_vmdk_path, backing) def test_get_disk_size(self): # Test with valid disk device. device = mock.Mock() device.__class__.__name__ = 'VirtualDisk' disk_size_bytes = 1024 device.capacityInKB = disk_size_bytes / units.Ki invoke_api = self.session.invoke_api invoke_api.return_value = [device] self.assertEqual(disk_size_bytes, self.vops.get_disk_size(mock.sentinel.backing)) # Test with no disk device. invoke_api.return_value = [] self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, self.vops.get_disk_size, mock.sentinel.backing) def test_create_virtual_disk(self): task = mock.Mock() invoke_api = self.session.invoke_api invoke_api.return_value = task spec = mock.Mock() factory = self.session.vim.client.factory factory.create.return_value = spec disk_mgr = self.session.vim.service_content.virtualDiskManager dc_ref = mock.Mock() vmdk_ds_file_path = mock.Mock() size_in_kb = 1024 adapter_type = 'ide' disk_type = 'thick' self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb, adapter_type, disk_type) self.assertEqual(volumeops.VirtualDiskAdapterType.IDE, spec.adapterType) self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType) self.assertEqual(size_in_kb, spec.capacityKb) invoke_api.assert_called_once_with(self.session.vim, 'CreateVirtualDisk_Task', disk_mgr, name=vmdk_ds_file_path, datacenter=dc_ref, spec=spec) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'create_virtual_disk') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'delete_file') def test_create_flat_extent_virtual_disk_descriptor(self, delete_file, create_virtual_disk): dc_ref = mock.Mock() path = mock.Mock() size_in_kb = 1024 adapter_type = 'ide' disk_type = 'thick' self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref, path, size_in_kb, adapter_type, disk_type) create_virtual_disk.assert_called_once_with( dc_ref, path.get_descriptor_ds_file_path(), size_in_kb, adapter_type, disk_type) delete_file.assert_called_once_with( path.get_flat_extent_ds_file_path(), dc_ref) def test_copy_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_dc_ref = mock.sentinel.dest_dc_ref dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_copy_vmdk_file_with_default_dest_datacenter(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path) invoke_api.assert_called_once_with(self.session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=src_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_move_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_dc_ref = mock.sentinel.dest_dc_ref dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.move_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=dest_dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'MoveVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_delete_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager dc_ref = self.session.dc_ref vmdk_file_path = self.session.vmdk_file self.vops.delete_vmdk_file(vmdk_file_path, dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'DeleteVirtualDisk_Task', disk_mgr, name=vmdk_file_path, datacenter=dc_ref) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('oslo_vmware.pbm.get_profiles_by_ids') @mock.patch('oslo_vmware.pbm.get_profiles') def test_get_profile(self, get_profiles, get_profiles_by_ids): profile_ids = [mock.sentinel.profile_id] get_profiles.return_value = profile_ids profile_name = mock.sentinel.profile_name profile = mock.Mock() profile.name = profile_name get_profiles_by_ids.return_value = [profile] backing = mock.sentinel.backing self.assertEqual(profile_name, self.vops.get_profile(backing)) get_profiles.assert_called_once_with(self.session, backing) get_profiles_by_ids.assert_called_once_with(self.session, profile_ids) @mock.patch('oslo_vmware.pbm.get_profiles_by_ids') @mock.patch('oslo_vmware.pbm.get_profiles') def test_get_profile_with_no_profile(self, get_profiles, get_profiles_by_ids): get_profiles.return_value = [] backing = mock.sentinel.backing self.assertIsNone(self.vops.get_profile(backing)) get_profiles.assert_called_once_with(self.session, backing) self.assertFalse(get_profiles_by_ids.called) def test_extend_virtual_disk(self): """Test volumeops.extend_virtual_disk.""" task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager fake_size = 5 fake_size_in_kb = fake_size * units.Mi fake_name = 'fake_volume_0000000001' fake_dc = mock.sentinel.datacenter self.vops.extend_virtual_disk(fake_size, fake_name, fake_dc) invoke_api.assert_called_once_with(self.session.vim, "ExtendVirtualDisk_Task", disk_mgr, name=fake_name, datacenter=fake_dc, newCapacityKb=fake_size_in_kb, eagerZero=False) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_all_clusters') def test_get_cluster_refs(self, get_all_clusters): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 clusters = {"cls_1": cls_1, "cls_2": cls_2} get_all_clusters.return_value = clusters self.assertEqual({"cls_2": cls_2}, self.vops.get_cluster_refs(["cls_2"])) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_all_clusters') def test_get_cluster_refs_with_invalid_cluster(self, get_all_clusters): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 clusters = {"cls_1": cls_1, "cls_2": cls_2} get_all_clusters.return_value = clusters self.assertRaises(vmdk_exceptions.ClusterNotFoundException, self.vops.get_cluster_refs, ["cls_1", "cls_3"]) def test_get_cluster_hosts(self): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 hosts = mock.Mock(ManagedObjectReference=[host_1, host_2]) self.session.invoke_api.return_value = hosts cluster = mock.sentinel.cluster ret = self.vops.get_cluster_hosts(cluster) self.assertEqual([host_1, host_2], ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, cluster, 'host') def test_get_cluster_hosts_with_no_host(self): self.session.invoke_api.return_value = None cluster = mock.sentinel.cluster ret = self.vops.get_cluster_hosts(cluster) self.assertEqual([], ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, cluster, 'host') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'continue_retrieval', return_value=None) def test_get_all_clusters(self, continue_retrieval): prop_1 = mock.Mock(val='test_cluster_1') cls_1 = mock.Mock(propSet=[prop_1], obj=mock.sentinel.mor_1) prop_2 = mock.Mock(val='/test_cluster_2') cls_2 = mock.Mock(propSet=[prop_2], obj=mock.sentinel.mor_2) retrieve_result = mock.Mock(objects=[cls_1, cls_2]) self.session.invoke_api.return_value = retrieve_result ret = self.vops._get_all_clusters() exp = {'test_cluster_1': mock.sentinel.mor_1, '/test_cluster_2': mock.sentinel.mor_2} self.assertEqual(exp, ret) self.session.invoke_api.assert_called_once_with( vim_util, 'get_objects', self.session.vim, 'ClusterComputeResource', self.MAX_OBJECTS) continue_retrieval.assert_called_once_with(retrieve_result) def test_get_entity_by_inventory_path(self): self.session.invoke_api.return_value = mock.sentinel.ref path = mock.sentinel.path ret = self.vops.get_entity_by_inventory_path(path) self.assertEqual(mock.sentinel.ref, ret) self.session.invoke_api.assert_called_once_with( self.session.vim, "FindByInventoryPath", self.session.vim.service_content.searchIndex, inventoryPath=path) def test_get_disk_devices(self): disk_device = mock.Mock() disk_device.__class__.__name__ = 'VirtualDisk' controller_device = mock.Mock() controller_device.__class__.__name__ = 'VirtualLSILogicController' devices = mock.Mock() devices.__class__.__name__ = "ArrayOfVirtualDevice" devices.VirtualDevice = [disk_device, controller_device] self.session.invoke_api.return_value = devices vm = mock.sentinel.vm self.assertEqual([disk_device], self.vops._get_disk_devices(vm)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, vm, 'config.hardware.device') def _create_disk_device(self, file_name): backing = mock.Mock(fileName=file_name) backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' return mock.Mock(backing=backing) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_devices') def test_get_disk_device(self, get_disk_devices): dev_1 = self._create_disk_device('[ds1] foo/foo.vmdk') dev_2 = self._create_disk_device('[ds1] foo/foo_1.vmdk') get_disk_devices.return_value = [dev_1, dev_2] vm = mock.sentinel.vm self.assertEqual(dev_2, self.vops.get_disk_device(vm, '[ds1] foo/foo_1.vmdk')) get_disk_devices.assert_called_once_with(vm) class VirtualDiskPathTest(test.TestCase): """Unit tests for VirtualDiskPath.""" def setUp(self): super(VirtualDiskPathTest, self).setUp() self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk") def test_get_datastore_file_path(self): self.assertEqual("[nfs] A/B/disk.vmdk", self._path.get_datastore_file_path("nfs", "A/B/disk.vmdk")) def test_get_descriptor_file_path(self): self.assertEqual("A/B/disk.vmdk", self._path.get_descriptor_file_path()) def test_get_descriptor_ds_file_path(self): self.assertEqual("[nfs] A/B/disk.vmdk", self._path.get_descriptor_ds_file_path()) class FlatExtentVirtualDiskPathTest(test.TestCase): """Unit tests for FlatExtentVirtualDiskPath.""" def setUp(self): super(FlatExtentVirtualDiskPathTest, self).setUp() self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk") def test_get_flat_extent_file_path(self): self.assertEqual("A/B/disk-flat.vmdk", self._path.get_flat_extent_file_path()) def test_get_flat_extent_ds_file_path(self): self.assertEqual("[nfs] A/B/disk-flat.vmdk", self._path.get_flat_extent_ds_file_path()) class VirtualDiskTypeTest(test.TestCase): """Unit tests for VirtualDiskType.""" def test_is_valid(self): self.assertTrue(volumeops.VirtualDiskType.is_valid("thick")) self.assertTrue(volumeops.VirtualDiskType.is_valid("thin")) self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick")) self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated")) def test_validate(self): volumeops.VirtualDiskType.validate("thick") volumeops.VirtualDiskType.validate("thin") volumeops.VirtualDiskType.validate("eagerZeroedThick") self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, volumeops.VirtualDiskType.validate, "preallocated") def test_get_virtual_disk_type(self): self.assertEqual("preallocated", volumeops.VirtualDiskType.get_virtual_disk_type( "thick")) self.assertEqual("thin", volumeops.VirtualDiskType.get_virtual_disk_type( "thin")) self.assertEqual("eagerZeroedThick", volumeops.VirtualDiskType.get_virtual_disk_type( "eagerZeroedThick")) self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, volumeops.VirtualDiskType.get_virtual_disk_type, "preallocated") class VirtualDiskAdapterTypeTest(test.TestCase): """Unit tests for VirtualDiskAdapterType.""" def test_is_valid(self): self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid( "lsiLogicsas")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide")) self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi")) def test_validate(self): volumeops.VirtualDiskAdapterType.validate("lsiLogic") volumeops.VirtualDiskAdapterType.validate("busLogic") volumeops.VirtualDiskAdapterType.validate("lsiLogicsas") volumeops.VirtualDiskAdapterType.validate("ide") self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.VirtualDiskAdapterType.validate, "pvscsi") def test_get_adapter_type(self): self.assertEqual("lsiLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "lsiLogic")) self.assertEqual("busLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "busLogic")) self.assertEqual("lsiLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "lsiLogicsas")) self.assertEqual("ide", volumeops.VirtualDiskAdapterType.get_adapter_type( "ide")) self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.VirtualDiskAdapterType.get_adapter_type, "pvscsi") class ControllerTypeTest(test.TestCase): """Unit tests for ControllerType.""" def test_get_controller_type(self): self.assertEqual(volumeops.ControllerType.LSI_LOGIC, volumeops.ControllerType.get_controller_type( 'lsiLogic')) self.assertEqual(volumeops.ControllerType.BUS_LOGIC, volumeops.ControllerType.get_controller_type( 'busLogic')) self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS, volumeops.ControllerType.get_controller_type( 'lsiLogicsas')) self.assertEqual(volumeops.ControllerType.IDE, volumeops.ControllerType.get_controller_type( 'ide')) self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.ControllerType.get_controller_type, 'invalid_type') def test_is_scsi_controller(self): self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.LSI_LOGIC)) self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.BUS_LOGIC)) self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.LSI_LOGIC_SAS)) self.assertFalse(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.IDE)) cinder-8.0.0/cinder/tests/unit/fake_hpe_3par_client.py0000664000567000056710000000170412701406250024107 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HPE client for testing 3PAR without installing the client.""" import sys import mock from cinder.tests.unit import fake_hpe_client_exceptions as hpeexceptions hpe3par = mock.Mock() hpe3par.version = "4.2.0" hpe3par.exceptions = hpeexceptions sys.modules['hpe3parclient'] = hpe3par cinder-8.0.0/cinder/tests/unit/test_backup_swift.py0000664000567000056710000010641112701406250023603 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Backup swift code. """ import bz2 import ddt import filecmp import hashlib import os import shutil import tempfile import zlib import mock from oslo_config import cfg from swiftclient import client as swift from cinder.backup.drivers import swift as swift_dr from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import test from cinder.tests.unit.backup import fake_swift_client from cinder.tests.unit.backup import fake_swift_client2 from cinder.tests.unit import fake_constants as fake CONF = cfg.CONF ANY = mock.ANY def fake_md5(arg): class result(object): def hexdigest(self): return 'fake-md5-sum' ret = result() return ret @ddt.ddt class BackupSwiftTestCase(test.TestCase): """Test Case for swift.""" _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available'} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container='test-container', backup_id=fake.backup_id, parent_id=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) backup = {'id': backup_id, 'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'service_metadata': service_metadata, } return db.backup_create(self.ctxt, backup)['id'] def setUp(self): super(BackupSwiftTestCase, self).setUp() service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.ctxt = context.get_admin_context() self.ctxt.service_catalog = service_catalog self.stubs.Set(swift, 'Connection', fake_swift_client.FakeSwiftClient.Connection) self.stubs.Set(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) for _i in range(0, 64): self.volume_file.write(os.urandom(1024)) notify_patcher = mock.patch( 'cinder.volume.utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) def test_backup_swift_url(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'adminURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) def test_backup_swift_auth_url(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'adminURL': u'http://example.com'}]}] self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) def test_backup_swift_url_conf(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'adminURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.ctxt.project_id = fake.project_id self.override_config("backup_swift_url", "http://public.example.com/") backup = swift_dr.SwiftBackupDriver(self.ctxt) self.assertEqual("%s%s" % (CONF.backup_swift_url, self.ctxt.project_id), backup.swift_url) def test_backup_swift_auth_url_conf(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'adminURL': u'http://example.com'}]}] self.ctxt.project_id = fake.project_id self.override_config("backup_swift_auth_url", "http://public.example.com/") backup = swift_dr.SwiftBackupDriver(self.ctxt) self.assertEqual("%s%s" % (CONF.backup_swift_auth_url, self.ctxt.project_id), backup.auth_url) def test_backup_swift_info(self): self.override_config("swift_catalog_info", "dummy") self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) @ddt.data( {'auth': 'single_user', 'insecure': True}, {'auth': 'single_user', 'insecure': False}, {'auth': 'per_user', 'insecure': True}, {'auth': 'per_user', 'insecure': False}, ) @ddt.unpack def test_backup_swift_auth_insecure(self, auth, insecure): self.override_config("backup_swift_auth_insecure", insecure) self.override_config('backup_swift_auth', auth) if auth == 'single_user': self.override_config('backup_swift_user', 'swift-user') mock_connection = self.mock_object(swift, 'Connection') swift_dr.SwiftBackupDriver(self.ctxt) if auth == 'single_user': mock_connection.assert_called_once_with(insecure=insecure, authurl=ANY, auth_version=ANY, tenant_name=ANY, user=ANY, key=ANY, retries=ANY, starting_backoff=ANY, cacert=ANY) else: mock_connection.assert_called_once_with(insecure=insecure, retries=ANY, preauthurl=ANY, preauthtoken=ANY, starting_backoff=ANY, cacert=ANY) def test_backup_uncompressed(self): volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) def test_backup_bz2(self): volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) def test_backup_zlib(self): volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_default_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container=None) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual('volumebackups', backup['container']) self.assertEqual(3, backup_update_mock.call_count) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_db_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container='existing_name') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual('existing_name', backup['container']) # Make sure we are not making a DB update when we are using the same # value that's already in the DB. self.assertEqual(2, backup_update_mock.call_count) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_driver_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container=None) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) with mock.patch.object(service, 'update_container_name', return_value='driver_name'): service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual('driver_name', backup['container']) self.assertEqual(3, backup_update_mock.call_count) @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. CONF.set_override("backup_object_number_per_notification", 1) CONF.set_override("backup_swift_enable_progress_timer", False) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_swift_enable_progress_timer", True) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) def test_backup_custom_container(self): volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' container_name = 'fake99' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(container_name, backup['container']) def test_backup_shafile(self): volume_id = '6465dad4-22af-48f7-8a1a-000000218907' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(container_name, backup['container']) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(64 * 1024 / content1['chunk_size'], len(content1['sha256s'])) def test_backup_cmp_shafiles(self): volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id= fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(container_name, deltabackup['container']) # Compare shas from both files content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) def test_backup_delta_two_objects_change(self): volume_id = '30dab288-265a-4583-9abe-000000d42c67' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self.volume_file.seek(2 * 8 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(4 * 8 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(container_name, deltabackup['container']) content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 32 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) def test_backup_delta_two_blocks_in_object_change(self): volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) self.assertEqual(container_name, deltabackup['container']) # Verify that two shas are changed at index 16 and 20 content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_create_backup_put_object_wraps_socket_error(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'socket_error_on_put' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertRaises(exception.SwiftConnectionFailed, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = '020d9142-339c-4876-a445-000000f1520c' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata', fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete(). """ volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(message=_('fake')) # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata', fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. self.stubs.Set(swift_dr.SwiftBackupDriver, 'delete', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) def test_restore(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' self._create_backup_db_entry(volume_id=volume_id) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.restore(backup, volume_id, volume_file) def test_restore_delta(self): volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix # Raise a pseudo exception.BackupDriverException. self.stubs.Set(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup_id) self.stubs.Set(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.backup2_id, parent_id=fake.backup_id) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.backup(deltabackup, self.volume_file, True) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup2_id) service.restore(backup, volume_id, restored_file) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_wraps_socket_error(self): volume_id = 'c1160de7-2774-4f20-bf14-0000001ac139' container_name = 'socket_error_on_get' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertRaises(exception.SwiftConnectionFailed, service.restore, backup, volume_id, volume_file) def test_restore_unsupported_version(self): volume_id = '390db8c1-32d3-42ca-82c9-00000010c703' container_name = 'unsupported_version' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertRaises(exception.InvalidBackup, service.restore, backup, volume_id, volume_file) def test_delete(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' object_prefix = 'test_prefix' self._create_backup_db_entry(volume_id=volume_id, service_metadata=object_prefix) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.delete(backup) def test_delete_wraps_socket_error(self): volume_id = 'f74cb6fa-2900-40df-87ac-0000000f72ea' container_name = 'socket_error_on_delete' object_prefix = 'test_prefix' self._create_backup_db_entry(volume_id=volume_id, container=container_name, service_metadata=object_prefix) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) self.assertRaises(exception.SwiftConnectionFailed, service.delete, backup) def test_delete_without_object_prefix(self): volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' def _fake_delete_object(self, container, object_name): raise AssertionError('delete_object method should not be called.') self.stubs.Set(swift_dr.SwiftBackupDriver, 'delete_object', _fake_delete_object) self._create_backup_db_entry(volume_id=volume_id) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.backup_id) service.delete(backup) def test_get_compressor(self): service = swift_dr.SwiftBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(zlib, compressor) compressor = service._get_compressor('bz2') self.assertEqual(bz2, compressor) self.assertRaises(ValueError, service._get_compressor, 'fake') def test_prepare_output_data_effective_compression(self): service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertTrue(len(result) < len(fake_data)) def test_prepare_output_data_no_compresssion(self): self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) def test_prepare_output_data_ineffective_compression(self): service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) cinder-8.0.0/cinder/tests/unit/compute/0000775000567000056710000000000012701406543021167 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/compute/__init__.py0000664000567000056710000000000012701406250023261 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/compute/test_nova.py0000664000567000056710000001261312701406250023541 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder.compute import nova from cinder import context from cinder import test class NovaClientTestCase(test.TestCase): def setUp(self): super(NovaClientTestCase, self).setUp() self.ctx = context.RequestContext('regularuser', 'e3f0833dc08b4cea', auth_token='token', is_admin=False) self.ctx.service_catalog = \ [{'type': 'compute', 'name': 'nova', 'endpoints': [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, {'type': 'identity', 'name': 'keystone', 'endpoints': [{'publicURL': 'http://keystonehost:5000/v2.0'}]}] self.override_config('nova_endpoint_template', 'http://novahost:8774/v2/%(project_id)s') self.override_config('nova_endpoint_admin_template', 'http://novaadmhost:4778/v2/%(project_id)s') self.override_config('os_privileged_user_name', 'adminuser') self.override_config('os_privileged_user_password', 'strongpassword') @mock.patch('novaclient.client.Client') def test_nova_client_regular(self, p_client): nova.novaclient(self.ctx) p_client.assert_called_once_with( nova.NOVA_API_VERSION, 'regularuser', 'token', None, region_name=None, auth_url='http://novahost:8774/v2/e3f0833dc08b4cea', insecure=False, endpoint_type='publicURL', cacert=None, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.client.Client') def test_nova_client_admin_endpoint(self, p_client): nova.novaclient(self.ctx, admin_endpoint=True) p_client.assert_called_once_with( nova.NOVA_API_VERSION, 'regularuser', 'token', None, region_name=None, auth_url='http://novaadmhost:4778/v2/e3f0833dc08b4cea', insecure=False, endpoint_type='adminURL', cacert=None, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.client.Client') def test_nova_client_privileged_user(self, p_client): nova.novaclient(self.ctx, privileged_user=True) p_client.assert_called_once_with( nova.NOVA_API_VERSION, 'adminuser', 'strongpassword', None, region_name=None, auth_url='http://keystonehost:5000/v2.0', insecure=False, endpoint_type='publicURL', cacert=None, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.client.Client') def test_nova_client_privileged_user_custom_auth_url(self, p_client): self.override_config('os_privileged_user_auth_url', 'http://privatekeystonehost:5000/v2.0') nova.novaclient(self.ctx, privileged_user=True) p_client.assert_called_once_with( nova.NOVA_API_VERSION, 'adminuser', 'strongpassword', None, region_name=None, auth_url='http://privatekeystonehost:5000/v2.0', insecure=False, endpoint_type='publicURL', cacert=None, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.client.Client') def test_nova_client_custom_region(self, p_client): self.override_config('os_region_name', 'farfaraway') nova.novaclient(self.ctx) p_client.assert_called_once_with( nova.NOVA_API_VERSION, 'regularuser', 'token', None, region_name='farfaraway', auth_url='http://novahost:8774/v2/e3f0833dc08b4cea', insecure=False, endpoint_type='publicURL', cacert=None, timeout=None, extensions=nova.nova_extensions) class FakeNovaClient(object): class Volumes(object): def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() def create_volume_snapshot(self, *args, **kwargs): pass def delete_volume_snapshot(self, *args, **kwargs): pass class NovaApiTestCase(test.TestCase): def setUp(self): super(NovaApiTestCase, self).setUp() self.api = nova.API() self.novaclient = FakeNovaClient() self.ctx = context.get_admin_context() def test_update_server_volume(self): with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.volumes, 'update_server_volume') as \ mock_update_server_volume: mock_novaclient.return_value = self.novaclient self.api.update_server_volume(self.ctx, 'server_id', 'attach_id', 'new_volume_id') mock_novaclient.assert_called_once_with(self.ctx) mock_update_server_volume.assert_called_once_with( 'server_id', 'attach_id', 'new_volume_id' ) cinder-8.0.0/cinder/tests/unit/conf_fixture.py0000664000567000056710000000454212701406250022560 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg CONF = cfg.CONF CONF.import_opt('policy_file', 'cinder.policy', group='oslo_policy') CONF.import_opt('volume_driver', 'cinder.volume.manager') CONF.import_opt('xiv_ds8k_proxy', 'cinder.volume.drivers.ibm.xiv_ds8k') CONF.import_opt('backup_driver', 'cinder.backup.manager') CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') def_vol_type = 'fake_vol_type' def set_defaults(conf): conf.set_default('default_volume_type', def_vol_type) conf.set_default('volume_driver', 'cinder.tests.unit.fake_driver.FakeISCSIDriver') conf.set_default('iscsi_helper', 'fake') conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') conf.set_default('connection', 'sqlite://', group='database') conf.set_default('sqlite_synchronous', False, group='database') conf.set_default('policy_file', 'cinder.tests.unit/policy.json', group='oslo_policy') conf.set_default( 'xiv_ds8k_proxy', 'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver') conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service') conf.set_default('fixed_key', default='0' * 64, group='keymgr') conf.set_default('scheduler_driver', 'cinder.scheduler.filter_scheduler.FilterScheduler') conf.set_default('state_path', os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..'))) conf.set_default('policy_dirs', [], group='oslo_policy') conf.set_default('auth_strategy', 'noauth') cinder-8.0.0/cinder/tests/unit/test_volume_transfer.py0000664000567000056710000002711012701406250024333 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for volume transfers.""" import datetime import mock from cinder import context from cinder import exception from cinder import objects from cinder import quota from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils from cinder.transfer import api as transfer_api QUOTAS = quota.QUOTAS class VolumeTransferTestCase(test.TestCase): """Test cases for volume transfer code.""" def setUp(self): super(VolumeTransferTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.user_id, project_id=fake.project_id) self.updated_at = datetime.datetime(1, 1, 1, 1, 1, 1) @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_volume_create_delete(self, mock_notify): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) response = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) tx_api.delete(self.ctxt, response['id']) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('available', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"), mock.call(self.ctxt, mock.ANY, "transfer.delete.end")] mock_notify.assert_has_calls(calls) self.assertEqual(4, mock_notify.call_count) def test_transfer_invalid_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, status='in-use', updated_at=self.updated_at) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('in-use', volume['status'], 'Unexpected state') @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_accept_invalid_authkey(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_accept_invalid_volume(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume.status = 'wrong' volume.save() self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) volume.status = 'awaiting-transfer' volume.save() # Because the InvalidVolume exception is raised in tx_api, so there is # only transfer.accept.start called and missing transfer.accept.end. calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")] mock_notify.assert_has_calls(calls) self.assertEqual(3, mock_notify.call_count) @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_accept_volume_in_consistencygroup(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() consistencygroup = utils.create_consistencygroup(self.ctxt) volume = utils.create_volume(self.ctxt, updated_at=self.updated_at, consistencygroup_id= consistencygroup.id) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_accept(self, mock_notify, mock_quota_voltype, mock_quota_reserve): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.volume_type_id, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.ctxt.user_id = fake.user2_id self.ctxt.project_id = fake.project2_id response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual(fake.project2_id, volume.project_id) self.assertEqual(fake.user2_id, volume.user_id) self.assertEqual(response['volume_id'], volume.id, 'Unexpected volume id in response.') self.assertEqual(response['id'], transfer['id'], 'Unexpected transfer id in response.') calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) # The notify_about_volume_usage is called twice at create(), # and twice at accept(). self.assertEqual(4, mock_notify.call_count) # Check QUOTAS reservation calls # QUOTAS.add_volume_type_opts reserve_opt = {'volumes': 1, 'gigabytes': 1} release_opt = {'volumes': -1, 'gigabytes': -1} calls = [mock.call(self.ctxt, reserve_opt, fake.volume_type_id), mock.call(self.ctxt, release_opt, fake.volume_type_id)] mock_quota_voltype.assert_has_calls(calls) # QUOTAS.reserve calls = [mock.call(mock.ANY, **reserve_opt), mock.call(mock.ANY, project_id=fake.project_id, **release_opt)] mock_quota_reserve.assert_has_calls(calls) @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_transfer_accept_over_quota(self, mock_notify, mock_quota_voltype, mock_quota_reserve): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.volume_type_id, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') fake_overs = ['volumes_lvmdriver-3'] fake_quotas = {'gigabytes_lvmdriver-3': 1, 'volumes_lvmdriver-3': 10} fake_usages = {'gigabytes_lvmdriver-3': {'reserved': 0, 'in_use': 1}, 'volumes_lvmdriver-3': {'reserved': 0, 'in_use': 1}} mock_quota_reserve.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) self.ctxt.user_id = fake.user2_id self.ctxt.project_id = fake.project2_id self.assertRaises(exception.VolumeLimitExceeded, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) def test_transfer_get(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') ts = tx_api.get_all(self.ctxt) self.assertEqual(1, len(ts), 'Unexpected number of transfers.') nctxt = context.RequestContext(user_id=fake.user2_id, project_id=fake.project2_id) utils.create_volume(nctxt, updated_at=self.updated_at) self.assertRaises(exception.TransferNotFound, tx_api.get, nctxt, transfer['id']) ts = tx_api.get_all(nctxt) self.assertEqual(0, len(ts), 'Unexpected transfers listed.') @mock.patch('cinder.volume.utils.notify_about_volume_usage') def test_delete_transfer_with_deleted_volume(self, mock_notify): # create a volume volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) # create a transfer tx_api = transfer_api.API() transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) # force delete volume volume.destroy() # Make sure transfer has been deleted. self.assertRaises(exception.TransferNotFound, tx_api.get, self.ctxt, transfer['id']) cinder-8.0.0/cinder/tests/unit/test_volume_throttling.py0000664000567000056710000000564112701406250024712 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume copy throttling helpers.""" import mock from cinder import test from cinder import utils from cinder.volume import throttling class ThrottleTestCase(test.TestCase): def test_NoThrottle(self): with throttling.Throttle().subcommand('volume1', 'volume2') as cmd: self.assertEqual([], cmd['prefix']) @mock.patch.object(utils, 'get_blkdev_major_minor') def test_BlkioCgroup(self, mock_major_minor): def fake_get_blkdev_major_minor(path): return {'src_volume1': "253:0", 'dst_volume1': "253:1", 'src_volume2': "253:2", 'dst_volume2': "253:3"}[path] mock_major_minor.side_effect = fake_get_blkdev_major_minor self.exec_cnt = 0 def fake_execute(*cmd, **kwargs): cmd_set = ['cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d', 'fake_group'] set_order = [None, ('read', '253:0', 1024), ('write', '253:1', 1024), # a nested job starts; bps limit are set to the half ('read', '253:0', 512), ('read', '253:2', 512), ('write', '253:1', 512), ('write', '253:3', 512), # a nested job ends; bps limit is resumed ('read', '253:0', 1024), ('write', '253:1', 1024)] if set_order[self.exec_cnt] is None: self.assertEqual(('cgcreate', '-g', 'blkio:fake_group'), cmd) else: cmd_set[2] %= set_order[self.exec_cnt] self.assertEqual(tuple(cmd_set), cmd) self.exec_cnt += 1 with mock.patch.object(utils, 'execute', side_effect=fake_execute): throttle = throttling.BlkioCgroup(1024, 'fake_group') with throttle.subcommand('src_volume1', 'dst_volume1') as cmd: self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], cmd['prefix']) # a nested job with throttle.subcommand('src_volume2', 'dst_volume2') as cmd: self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], cmd['prefix']) cinder-8.0.0/cinder/tests/unit/declare_conf.py0000664000567000056710000000152112701406250022463 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('answer', default=42, help='test conf')) cinder-8.0.0/cinder/tests/unit/test_context.py0000664000567000056710000001320212701406257022610 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import test class ContextTestCase(test.TestCase): def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) def test_request_context_elevated(self): user_context = context.RequestContext( 'fake_user', 'fake_project', admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertFalse('admin' in user_context.roles) self.assertTrue('admin' in admin_context.roles) def test_service_catalog_nova_and_swift(self): service_catalog = [ {u'type': u'compute', u'name': u'nova'}, {u'type': u's3', u'name': u's3'}, {u'type': u'image', u'name': u'glance'}, {u'type': u'volume', u'name': u'cinder'}, {u'type': u'ec2', u'name': u'ec2'}, {u'type': u'object-store', u'name': u'swift'}, {u'type': u'identity', u'name': u'keystone'}, {u'type': None, u'name': u'S_withtypeNone'}, {u'type': u'co', u'name': u'S_partofcompute'}] compute_catalog = [{u'type': u'compute', u'name': u'nova'}] object_catalog = [{u'name': u'swift', u'type': u'object-store'}] ctxt = context.RequestContext('111', '222', service_catalog=service_catalog) self.assertEqual(3, len(ctxt.service_catalog)) return_compute = [v for v in ctxt.service_catalog if v['type'] == u'compute'] return_object = [v for v in ctxt.service_catalog if v['type'] == u'object-store'] self.assertEqual(compute_catalog, return_compute) self.assertEqual(object_catalog, return_object) def test_user_identity(self): ctx = context.RequestContext("user", "tenant", domain="domain", user_domain="user-domain", project_domain="project-domain") self.assertEqual('user tenant domain user-domain project-domain', ctx.to_dict()["user_identity"]) @mock.patch('cinder.context.CONF') def test_cinder_internal_context(self, mock_conf): project_id = 'ec729e9946bc43c39ece6dfa7de70eea' user_id = 'c466a48309794261b64a4f02cfcc3d64' mock_conf.cinder_internal_tenant_project_id = project_id mock_conf.cinder_internal_tenant_user_id = user_id ctx = context.get_internal_tenant_context() self.assertEqual(user_id, ctx.user_id) self.assertEqual(project_id, ctx.project_id) @mock.patch('cinder.context.CONF') def test_cinder_internal_context_missing_user(self, mock_conf): project_id = 'ec729e9946bc43c39ece6dfa7de70eea' user_id = None mock_conf.cinder_internal_tenant_project_id = project_id mock_conf.cinder_internal_tenant_user_id = user_id ctx = context.get_internal_tenant_context() self.assertIsNone(ctx) @mock.patch('cinder.context.CONF') def test_cinder_internal_context_missing_project(self, mock_conf): project_id = None user_id = 'c466a48309794261b64a4f02cfcc3d64' mock_conf.cinder_internal_tenant_project_id = project_id mock_conf.cinder_internal_tenant_user_id = user_id ctx = context.get_internal_tenant_context() self.assertIsNone(ctx) @mock.patch('cinder.context.CONF') def test_cinder_internal_context_missing_all(self, mock_conf): project_id = None user_id = None mock_conf.cinder_internal_tenant_project_id = project_id mock_conf.cinder_internal_tenant_user_id = user_id ctx = context.get_internal_tenant_context() self.assertIsNone(ctx) cinder-8.0.0/cinder/tests/unit/test_api_urlmap.py0000664000567000056710000003102112701406250023245 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for cinder.api.urlmap.py """ from cinder.api import urlmap from cinder import test class TestParseFunctions(test.TestCase): def test_unquote_header_value_without_quotes(self): arg = 'TestString' result = urlmap.unquote_header_value(arg) self.assertEqual(arg, result) def test_unquote_header_value_with_quotes(self): result = urlmap.unquote_header_value('"TestString"') self.assertEqual('TestString', result) def test_parse_list_header(self): arg = 'token, "quoted value"' result = urlmap.parse_list_header(arg) self.assertEqual(['token', 'quoted value'], result) def test_parse_options_header(self): result = urlmap.parse_options_header('Content-Type: text/html;' ' mimetype=text/html') self.assertEqual(('Content-Type:', {'mimetype': 'text/html'}), result) def test_parse_options_header_without_value(self): result = urlmap.parse_options_header(None) self.assertEqual(('', {}), result) class TestAccept(test.TestCase): def test_best_match_ValueError(self): arg = 'text/html; q=some_invalud_value' accept = urlmap.Accept(arg) self.assertEqual((None, {}), accept.best_match(['text/html'])) def test_best_match(self): arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8' accept = urlmap.Accept(arg) self.assertEqual(('application/json', {'q': '0.7'}), accept.best_match(['application/json', 'application/xml', 'text/html'])) def test_match_mask_one_asterisk(self): arg = 'text/*; q=0.7' accept = urlmap.Accept(arg) self.assertEqual(('text/html', {'q': '0.7'}), accept.best_match(['text/html'])) def test_match_mask_two_asterisk(self): arg = '*/*; q=0.7' accept = urlmap.Accept(arg) self.assertEqual(('text/html', {'q': '0.7'}), accept.best_match(['text/html'])) def test_match_mask_no_asterisk(self): arg = 'application/json; q=0.7' accept = urlmap.Accept(arg) self.assertEqual((None, {}), accept.best_match(['text/html'])) def test_content_type_params(self): arg = "application/xml; q=0.1, application/json; q=0.2," \ " text/html; q=0.3" accept = urlmap.Accept(arg) self.assertEqual({'q': '0.2'}, accept.content_type_params('application/json')) def test_content_type_params_wrong_content_type(self): arg = 'application/xml; q=0.1, text/html; q=0.1' accept = urlmap.Accept(arg) self.assertEqual({}, accept.content_type_params('application/json')) class TestUrlMapFactory(test.TestCase): def setUp(self): super(TestUrlMapFactory, self).setUp() self.global_conf = {'not_found_app': 'app_global', 'domain hoobar.com port 10 /': 'some_app_global'} self.loader = self.mox.CreateMockAnything() def test_not_found_app_in_local_conf(self): local_conf = {'not_found_app': 'app_local', 'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app('app_local', global_conf=self.global_conf).\ AndReturn('app_local_loader') self.loader.get_app('some_app_local', global_conf=self.global_conf).\ AndReturn('some_app_loader') self.mox.ReplayAll() expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader') expected_urlmap['http://foobar.com:20'] = 'some_app_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) def test_not_found_app_not_in_local_conf(self): local_conf = {'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app('app_global', global_conf=self.global_conf).\ AndReturn('app_global_loader') self.loader.get_app('some_app_local', global_conf=self.global_conf).\ AndReturn('some_app_returned_by_loader') self.mox.ReplayAll() expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader') expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ '_by_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) def test_not_found_app_is_none(self): local_conf = {'not_found_app': None, 'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app('some_app_local', global_conf=self.global_conf).\ AndReturn('some_app_returned_by_loader') self.mox.ReplayAll() expected_urlmap = urlmap.URLMap(not_found_app=None) expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ '_by_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) class TestURLMap(test.TestCase): def setUp(self): super(TestURLMap, self).setUp() self.urlmap = urlmap.URLMap() self.input_environ = {'HTTP_ACCEPT': "application/json;" "version=9.0", 'REQUEST_METHOD': "GET", 'CONTENT_TYPE': 'application/xml', 'SCRIPT_NAME': '/scriptname', 'PATH_INFO': "/resource.xml"} self.environ = {'HTTP_ACCEPT': "application/json;" "version=9.0", 'REQUEST_METHOD': "GET", 'CONTENT_TYPE': 'application/xml', 'SCRIPT_NAME': '/scriptname/app_url', 'PATH_INFO': "/resource.xml"} def test_match_with_applications(self): self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app' self.assertEqual((None, None), self.urlmap._match('20.30.40.50', '20', 'path/somepath')) def test_match_without_applications(self): self.assertEqual((None, None), self.urlmap._match('host', 20, 'app_url/somepath')) def test_match_path_info_equals_app_url(self): self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app' self.assertEqual(('app', '/app_url/somepath'), self.urlmap._match('http://20.30.40.50', '60', '/app_url/somepath')) def test_match_path_info_equals_app_url_many_app(self): self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1' self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2' self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \ 'app3' self.assertEqual(('app3', '/path/somepath/elsepath'), self.urlmap._match('http://20.30.40.50', '60', '/path/somepath/elsepath')) def test_set_script_name(self): app = self.mox.CreateMockAnything() start_response = self.mox.CreateMockAnything() app.__call__(self.environ, start_response).AndReturn('value') self.mox.ReplayAll() wrap = self.urlmap._set_script_name(app, '/app_url') self.assertEqual('value', wrap(self.input_environ, start_response)) def test_munge_path(self): app = self.mox.CreateMockAnything() start_response = self.mox.CreateMockAnything() app.__call__(self.environ, start_response).AndReturn('value') self.mox.ReplayAll() wrap = self.urlmap._munge_path(app, '/app_url/resource.xml', '/app_url') self.assertEqual('value', wrap(self.input_environ, start_response)) def test_content_type_strategy_without_version(self): self.assertIsNone(self.urlmap._content_type_strategy('host', 20, self.environ)) def test_content_type_strategy_with_version(self): environ = {'HTTP_ACCEPT': "application/vnd.openstack.melange+xml;" "version=9.0", 'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml", 'CONTENT_TYPE': 'application/xml; version=2.0'} self.urlmap[('http://10.20.30.40:50', '/v2.0')] = 'app' self.mox.StubOutWithMock(self.urlmap, '_set_script_name') self.urlmap._set_script_name('app', '/v2.0').AndReturn('value') self.mox.ReplayAll() self.assertEqual('value', self.urlmap._content_type_strategy( 'http://10.20.30.40', '50', environ)) def test_path_strategy_wrong_path_info(self): self.assertEqual((None, None, None), self.urlmap._path_strategy('http://10.20.30.40', '50', '/resource')) def test_path_strategy_mime_type_only(self): self.assertEqual(('application/xml', None, None), self.urlmap._path_strategy('http://10.20.30.40', '50', '/resource.xml')) def test_path_strategy(self): self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app' self.mox.StubOutWithMock(self.urlmap, '_munge_path') self.urlmap._munge_path('app', '/path/elsepath/resource.xml', '/path/elsepath').AndReturn('value') self.mox.ReplayAll() self.assertEqual( ('application/xml', 'value', '/path/elsepath'), self.urlmap._path_strategy('http://10.20.30.40', '50', '/path/elsepath/resource.xml')) def test_path_strategy_wrong_mime_type(self): self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app' self.mox.StubOutWithMock(self.urlmap, '_munge_path') self.urlmap._munge_path('app', '/path/elsepath/resource.abc', '/path/elsepath').AndReturn('value') self.mox.ReplayAll() self.assertEqual( (None, 'value', '/path/elsepath'), self.urlmap._path_strategy('http://10.20.30.40', '50', '/path/elsepath/resource.abc')) def test_accept_strategy_version_not_in_params(self): environ = {'HTTP_ACCEPT': "application/xml; q=0.1, application/json; " "q=0.2", 'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml", 'CONTENT_TYPE': 'application/xml; version=2.0'} self.assertEqual(('application/xml', None), self.urlmap._accept_strategy('http://10.20.30.40', '50', environ, ['application/xml'])) def test_accept_strategy_version(self): environ = {'HTTP_ACCEPT': "application/xml; q=0.1; version=1.0," "application/json; q=0.2; version=2.0", 'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml", 'CONTENT_TYPE': 'application/xml; version=2.0'} self.urlmap[('http://10.20.30.40:50', '/v1.0')] = 'app' self.mox.StubOutWithMock(self.urlmap, '_set_script_name') self.urlmap._set_script_name('app', '/v1.0').AndReturn('value') self.mox.ReplayAll() self.assertEqual(('application/xml', 'value'), self.urlmap._accept_strategy('http://10.20.30.40', '50', environ, ['application/xml'])) cinder-8.0.0/cinder/tests/unit/test_nfs.py0000664000567000056710000014120012701406250021703 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NFS driver module.""" import ddt import errno import os import mock from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs class DumbVolume(object): # TODO(eharney): replace this with an autospecced mock class fields = {} def __setitem__(self, key, value): self.fields[key] = value def __getitem__(self, item): return self.fields[item] class RemoteFsDriverTestCase(test.TestCase): TEST_FILE_NAME = 'test.txt' TEST_EXPORT = 'nas-host1:/export' TEST_MNT_POINT = '/mnt/nas' def setUp(self): super(RemoteFsDriverTestCase, self).setUp() self._driver = remotefs.RemoteFSDriver() self.configuration = mock.Mock(conf.Configuration) self.configuration.append_config_values(mock.ANY) self.configuration.nas_secure_file_permissions = 'false' self.configuration.nas_secure_file_operations = 'false' self.configuration.max_over_subscription_ratio = 1.0 self.configuration.reserved_percentage = 5 self._driver = remotefs.RemoteFSDriver( configuration=self.configuration) mock_exc = mock.patch.object(self._driver, '_execute') self._execute = mock_exc.start() self.addCleanup(mock_exc.stop) def test_create_sparsed_file(self): self._driver._create_sparsed_file('/path', 1) self._execute.assert_called_once_with('truncate', '-s', '1G', '/path', run_as_root=True) def test_create_regular_file(self): self._driver._create_regular_file('/path', 1) self._execute.assert_called_once_with('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024', run_as_root=True) def test_create_qcow2_file(self): file_size = 1 self._driver._create_qcow2_file('/path', file_size) self._execute.assert_called_once_with('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', '/path', '%s' % str(file_size * units.Gi), run_as_root=True) def test_set_rw_permissions_for_all(self): self._driver._set_rw_permissions_for_all('/path') self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path', run_as_root=True) @mock.patch.object(remotefs, 'LOG') def test_set_rw_permissions_with_secure_file_permissions(self, LOG): self._driver._mounted_shares = [self.TEST_EXPORT] self.configuration.nas_secure_file_permissions = 'true' self._driver._set_rw_permissions(self.TEST_FILE_NAME) self.assertFalse(LOG.warning.called) @mock.patch.object(remotefs, 'LOG') def test_set_rw_permissions_without_secure_file_permissions(self, LOG): self.configuration.nas_secure_file_permissions = 'false' self._driver._set_rw_permissions(self.TEST_FILE_NAME) self.assertTrue(LOG.warning.called) warn_msg = "%(path)s is being set with open permissions: %(perm)s" LOG.warning.assert_called_once_with( warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'}) @mock.patch('os.path.join') @mock.patch('os.path.isfile', return_value=False) def test_determine_nas_security_options_when_auto_and_new_install( self, mock_isfile, mock_join): """Test the setting of the NAS Security Option In this test case, we will create the marker file. No pre-exxisting Cinder volumes found during bootup. """ self._driver._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = True self._driver._ensure_shares_mounted = mock.Mock() nas_mount = self._driver._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) mock_join.return_value = file_path secure_file_permissions = 'auto' nas_option = self._driver._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'auto' nas_option = self._driver._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) @mock.patch('os.path.join') @mock.patch('os.path.isfile') def test_determine_nas_security_options_when_auto_and_new_install_exists( self, isfile, join): """Test the setting of the NAS Security Option In this test case, the marker file already exists. Cinder volumes found during bootup. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) join.return_value = file_path isfile.return_value = True secure_file_permissions = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) @mock.patch('os.path.join') @mock.patch('os.path.isfile') def test_determine_nas_security_options_when_auto_and_old_install(self, isfile, join): """Test the setting of the NAS Security Option In this test case, the marker file does not exist. There are also pre-existing Cinder volumes. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) join.return_value = file_path isfile.return_value = False secure_file_permissions = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('false', nas_option) secure_file_operations = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('false', nas_option) def test_determine_nas_security_options_when_admin_set_true(self): """Test the setting of the NAS Security Option In this test case, the Admin set the flag to 'true'. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) secure_file_permissions = 'true' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'true' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) def test_determine_nas_security_options_when_admin_set_false(self): """Test the setting of the NAS Security Option In this test case, the Admin set the flag to 'false'. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) secure_file_permissions = 'false' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('false', nas_option) secure_file_operations = 'false' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('false', nas_option) @mock.patch.object(remotefs, 'LOG') def test_set_nas_security_options(self, LOG): """Test setting of NAS Security options. The RemoteFS driver will force set options to false. The derived objects will provide an inherited interface to properly set options. """ drv = self._driver is_new_install = False drv.set_nas_security_options(is_new_install) self.assertEqual('false', drv.configuration.nas_secure_file_operations) self.assertEqual('false', drv.configuration.nas_secure_file_permissions) self.assertTrue(LOG.warning.called) def test_secure_file_operations_enabled_true(self): """Test nas_secure_file_operations = 'true' Networked file system based drivers may support secure file operations. This test verifies the settings when secure. """ drv = self._driver self.configuration.nas_secure_file_operations = 'true' ret_flag = drv.secure_file_operations_enabled() self.assertTrue(ret_flag) def test_secure_file_operations_enabled_false(self): """Test nas_secure_file_operations = 'false' Networked file system based drivers may support secure file operations. This test verifies the settings when not secure. """ drv = self._driver self.configuration.nas_secure_file_operations = 'false' ret_flag = drv.secure_file_operations_enabled() self.assertFalse(ret_flag) @ddt.ddt class NfsDriverTestCase(test.TestCase): """Test case for NFS driver.""" TEST_NFS_HOST = 'nfs-host1' TEST_NFS_SHARE_PATH = '/export' TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) TEST_NFS_EXPORT2 = 'nfs-host2:/export' TEST_NFS_EXPORT2_OPTIONS = '-o intr' TEST_SIZE_IN_GB = 1 TEST_MNT_POINT = '/mnt/nfs' TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt' TEST_MNT_POINT_BASE = '/mnt/test' TEST_LOCAL_PATH = '/mnt/nfs/volume-123' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this' TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo' def setUp(self): super(NfsDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.append_config_values(mock.ANY) self.configuration.max_over_subscription_ratio = 1.0 self.configuration.reserved_percentage = 5 self.configuration.nfs_shares_config = None self.configuration.nfs_sparsed_volumes = True self.configuration.nfs_reserved_percentage = 5.0 self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE self.configuration.nfs_mount_options = None self.configuration.nfs_mount_attempts = 3 self.configuration.nfs_qcow2_volumes = False self.configuration.nas_secure_file_permissions = 'false' self.configuration.nas_secure_file_operations = 'false' self.configuration.nas_ip = None self.configuration.nas_share_path = None self.configuration.nas_mount_options = None self.configuration.volume_dd_blocksize = '1M' self._driver = nfs.NfsDriver(configuration=self.configuration) self._driver.shares = {} mock_exc = mock.patch.object(self._driver, '_execute') self._execute = mock_exc.start() self.addCleanup(mock_exc.stop) def test_local_path(self): """local_path common use case.""" self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE drv = self._driver volume = DumbVolume() volume['provider_location'] = self.TEST_NFS_EXPORT1 volume['name'] = 'volume-123' self.assertEqual( '/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123', drv.local_path(volume)) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch.object(image_utils, 'resize_image') @mock.patch.object(image_utils, 'fetch_to_raw') def test_copy_image_to_volume(self, mock_fetch, mock_resize, mock_qemu): """resize_image common case usage.""" drv = self._driver TEST_IMG_SOURCE = 'foo.img' volume = {'size': self.TEST_SIZE_IN_GB, 'name': TEST_IMG_SOURCE} with mock.patch.object(drv, 'local_path', return_value=TEST_IMG_SOURCE): data = mock.Mock() data.virtual_size = 1 * units.Gi mock_qemu.return_value = data drv.copy_image_to_volume(None, volume, None, None) mock_fetch.assert_called_once_with( None, None, None, TEST_IMG_SOURCE, mock.ANY, run_as_root=True, size=self.TEST_SIZE_IN_GB) mock_resize.assert_called_once_with(TEST_IMG_SOURCE, self.TEST_SIZE_IN_GB, run_as_root=True) def test_get_mount_point_for_share(self): """_get_mount_point_for_share should calculate correct value.""" drv = self._driver self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4', drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) def test_get_mount_point_for_share_given_extra_slash_in_state_path(self): """_get_mount_point_for_share should calculate correct value.""" # This test gets called with the extra slash self.configuration.nfs_mount_point_base = ( self.TEST_MNT_POINT_BASE_EXTRA_SLASH) # The driver gets called with the correct configuration and removes # the extra slash drv = nfs.NfsDriver(configuration=self.configuration) self.assertEqual('/opt/stack/data/cinder/mnt', drv.base) self.assertEqual( '/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4', drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) def test_get_capacity_info(self): """_get_capacity_info should calculate correct value.""" drv = self._driver stat_total_size = 2620544 stat_avail = 2129984 stat_output = '1 %d %d' % (stat_total_size, stat_avail) du_used = 490560 du_output = '%d /mnt' % du_used with mock.patch.object( drv, '_get_mount_point_for_share') as mock_get_mount: mock_get_mount.return_value = self.TEST_MNT_POINT self._execute.side_effect = [(stat_output, None), (du_output, None)] self.assertEqual((stat_total_size, stat_avail, du_used), drv._get_capacity_info(self.TEST_NFS_EXPORT1)) mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1) calls = [mock.call('stat', '-f', '-c', '%S %b %a', self.TEST_MNT_POINT, run_as_root=True), mock.call('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', self.TEST_MNT_POINT, run_as_root=True)] self._execute.assert_has_calls(calls) def test_get_capacity_info_for_share_and_mount_point_with_spaces(self): """_get_capacity_info should calculate correct value.""" drv = self._driver stat_total_size = 2620544 stat_avail = 2129984 stat_output = '1 %d %d' % (stat_total_size, stat_avail) du_used = 490560 du_output = '%d /mnt' % du_used with mock.patch.object( drv, '_get_mount_point_for_share') as mock_get_mount: mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES self._execute.side_effect = [(stat_output, None), (du_output, None)] self.assertEqual((stat_total_size, stat_avail, du_used), drv._get_capacity_info( self.TEST_NFS_EXPORT_SPACES)) mock_get_mount.assert_called_once_with( self.TEST_NFS_EXPORT_SPACES) calls = [mock.call('stat', '-f', '-c', '%S %b %a', self.TEST_MNT_POINT_SPACES, run_as_root=True), mock.call('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', self.TEST_MNT_POINT_SPACES, run_as_root=True)] self._execute.assert_has_calls(calls) def test_load_shares_config(self): drv = self._driver drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: config_data = [] config_data.append(self.TEST_NFS_EXPORT1) config_data.append('#' + self.TEST_NFS_EXPORT2) config_data.append('') config_data.append(self.TEST_NFS_EXPORT2 + ' ' + self.TEST_NFS_EXPORT2_OPTIONS) config_data.append('broken:share_format') mock_read_config.return_value = config_data drv._load_shares_config(drv.configuration.nfs_shares_config) mock_read_config.assert_called_once_with( self.TEST_SHARES_CONFIG_FILE) self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) self.assertIn(self.TEST_NFS_EXPORT2, drv.shares) self.assertEqual(2, len(drv.shares)) self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS, drv.shares[self.TEST_NFS_EXPORT2]) def test_load_shares_config_nas_opts(self): drv = self._driver drv.configuration.nas_ip = self.TEST_NFS_HOST drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE drv._load_shares_config(drv.configuration.nfs_shares_config) self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) self.assertEqual(1, len(drv.shares)) def test_ensure_shares_mounted_should_save_mounting_successfully(self): """_ensure_shares_mounted should save share if mounted with success.""" drv = self._driver config_data = [] config_data.append(self.TEST_NFS_EXPORT1) drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure: mock_read_config.return_value = config_data drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1) @mock.patch.object(remotefs, 'LOG') def test_ensure_shares_mounted_should_not_save_mounting_with_error(self, LOG): """_ensure_shares_mounted should not save share if failed to mount.""" drv = self._driver config_data = [] config_data.append(self.TEST_NFS_EXPORT1) drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure: mock_read_config.return_value = config_data drv._ensure_share_mounted() self.assertEqual(0, len(drv._mounted_shares)) mock_ensure.assert_called_once_with() def test_find_share_should_throw_error_if_there_is_no_mounted_share(self): """_find_share should throw error if there is no mounted shares.""" drv = self._driver drv._mounted_shares = [] self.assertRaises(exception.NfsNoSharesMounted, drv._find_share, self.TEST_SIZE_IN_GB) def test_find_share(self): """_find_share simple use case.""" drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (5 * units.Gi, 2 * units.Gi, 2 * units.Gi), (10 * units.Gi, 3 * units.Gi, 1 * units.Gi)] self.assertEqual(self.TEST_NFS_EXPORT2, drv._find_share(self.TEST_SIZE_IN_GB)) calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertEqual(2, mock_get_capacity_info.call_count) def test_find_share_should_throw_error_if_there_is_not_enough_space(self): """_find_share should throw error if there is no share to host vol.""" drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (5 * units.Gi, 0, 5 * units.Gi), (10 * units.Gi, 0, 10 * units.Gi)] self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share, self.TEST_SIZE_IN_GB) calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertEqual(2, mock_get_capacity_info.call_count) def _simple_volume(self): volume = DumbVolume() volume['provider_location'] = '127.0.0.1:/mnt' volume['name'] = 'volume_name' volume['size'] = 10 return volume def test_create_sparsed_volume(self): drv = self._driver volume = self._simple_volume() self.override_config('nfs_sparsed_volumes', True) with mock.patch.object( drv, '_create_sparsed_file') as mock_create_sparsed_file: with mock.patch.object( drv, '_set_rw_permissions') as mock_set_rw_permissions: drv._do_create_volume(volume) mock_create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY) mock_set_rw_permissions.assert_called_once_with(mock.ANY) def test_create_nonsparsed_volume(self): drv = self._driver self.configuration.nfs_sparsed_volumes = False volume = self._simple_volume() self.override_config('nfs_sparsed_volumes', False) with mock.patch.object( drv, '_create_regular_file') as mock_create_regular_file: with mock.patch.object( drv, '_set_rw_permissions') as mock_set_rw_permissions: drv._do_create_volume(volume) mock_create_regular_file.assert_called_once_with(mock.ANY, mock.ANY) mock_set_rw_permissions.assert_called_once_with(mock.ANY) @mock.patch.object(nfs, 'LOG') def test_create_volume_should_ensure_nfs_mounted(self, mock_log): """create_volume ensures shares provided in config are mounted.""" drv = self._driver drv._find_share = mock.Mock() drv._do_create_volume = mock.Mock() with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure_share: drv._ensure_share_mounted() volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB drv.create_volume(volume) mock_ensure_share.assert_called_once_with() @mock.patch.object(nfs, 'LOG') def test_create_volume_should_return_provider_location(self, mock_log): """create_volume should return provider_location with found share.""" drv = self._driver drv._ensure_shares_mounted = mock.Mock() drv._do_create_volume = mock.Mock() with mock.patch.object(drv, '_find_share') as mock_find_share: mock_find_share.return_value = self.TEST_NFS_EXPORT1 volume = DumbVolume() volume['size'] = self.TEST_SIZE_IN_GB result = drv.create_volume(volume) self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location']) mock_find_share.assert_called_once_with(self.TEST_SIZE_IN_GB) def test_delete_volume(self): """delete_volume simple test case.""" drv = self._driver drv._ensure_share_mounted = mock.Mock() volume = DumbVolume() volume['name'] = 'volume-123' volume['provider_location'] = self.TEST_NFS_EXPORT1 with mock.patch.object(drv, 'local_path') as mock_local_path: mock_local_path.return_value = self.TEST_LOCAL_PATH drv.delete_volume(volume) mock_local_path.assert_called_once_with(volume) self._execute.assert_called_once_with('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True) def test_delete_should_ensure_share_mounted(self): """delete_volume should ensure that corresponding share is mounted.""" drv = self._driver volume = DumbVolume() volume['name'] = 'volume-123' volume['provider_location'] = self.TEST_NFS_EXPORT1 with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure_share: drv.delete_volume(volume) mock_ensure_share.assert_called_once_with(self.TEST_NFS_EXPORT1) def test_delete_should_not_delete_if_provider_location_not_provided(self): """delete_volume shouldn't delete if provider_location missed.""" drv = self._driver volume = DumbVolume() volume['name'] = 'volume-123' volume['provider_location'] = None with mock.patch.object(drv, '_ensure_share_mounted'): drv.delete_volume(volume) self.assertFalse(self._execute.called) def test_get_volume_stats(self): """get_volume_stats must fill the correct values.""" drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_ensure_shares_mounted') as mock_ensure_share: with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] drv._ensure_shares_mounted() drv.get_volume_stats() calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertTrue(mock_ensure_share.called) self.assertEqual(30.0, drv._stats['total_capacity_gb']) self.assertEqual(5.0, drv._stats['free_capacity_gb']) self.assertEqual(5, drv._stats['reserved_percentage']) self.assertTrue(drv._stats['sparse_copy_volume']) def test_get_volume_stats_with_non_zero_reserved_percentage(self): """get_volume_stats must fill the correct values.""" self.configuration.reserved_percentage = 10.0 drv = nfs.NfsDriver(configuration=self.configuration) drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_ensure_shares_mounted') as mock_ensure_share: with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] drv._ensure_shares_mounted() drv.get_volume_stats() calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertTrue(mock_ensure_share.called) self.assertEqual(30.0, drv._stats['total_capacity_gb']) self.assertEqual(5.0, drv._stats['free_capacity_gb']) self.assertEqual(10.0, drv._stats['reserved_percentage']) @ddt.data(True, False) def test_update_volume_stats(self, thin): self._driver.configuration.max_over_subscription_ratio = 20.0 self._driver.configuration.reserved_percentage = 5.0 self._driver.configuration.nfs_sparsed_volumes = thin remotefs_volume_stats = { 'volume_backend_name': 'fake_backend_name', 'vendor_name': 'fake_vendor', 'driver_version': 'fake_version', 'storage_protocol': 'NFS', 'total_capacity_gb': 100.0, 'free_capacity_gb': 20.0, 'reserved_percentage': 5.0, 'QoS_support': False, } self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats') self._driver._stats = remotefs_volume_stats mock_get_provisioned_capacity = self.mock_object( self._driver, '_get_provisioned_capacity', mock.Mock(return_value=25.0)) self._driver._update_volume_stats() nfs_added_volume_stats = { 'provisioned_capacity_gb': 25.0 if thin else 80.0, 'max_over_subscription_ratio': 20.0, 'reserved_percentage': 5.0, 'thin_provisioning_support': thin, 'thick_provisioning_support': not thin, } expected = remotefs_volume_stats expected.update(nfs_added_volume_stats) self.assertEqual(expected, self._driver._stats) self.assertEqual(thin, mock_get_provisioned_capacity.called) def _check_is_share_eligible(self, total_size, total_available, total_allocated, requested_volume_size): with mock.patch.object(self._driver, '_get_capacity_info')\ as mock_get_capacity_info: mock_get_capacity_info.return_value = (total_size, total_available, total_allocated) return self._driver._is_share_eligible('fake_share', requested_volume_size) def test_is_share_eligible(self): total_size = 100.0 * units.Gi total_available = 90.0 * units.Gi total_allocated = 10.0 * units.Gi requested_volume_size = 1 # GiB self.assertTrue(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_share_eligibility_with_reserved_percentage(self): total_size = 100.0 * units.Gi total_available = 4.0 * units.Gi total_allocated = 96.0 * units.Gi requested_volume_size = 1 # GiB # Check used > used_ratio statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_is_share_eligible_above_oversub_ratio(self): total_size = 100.0 * units.Gi total_available = 10.0 * units.Gi total_allocated = 90.0 * units.Gi requested_volume_size = 10 # GiB # Check apparent_available <= requested_volume_size statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_is_share_eligible_reserved_space_above_oversub_ratio(self): total_size = 100.0 * units.Gi total_available = 10.0 * units.Gi total_allocated = 100.0 * units.Gi requested_volume_size = 1 # GiB # Check total_allocated / total_size >= oversub_ratio # statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_extend_volume(self): """Extend a volume by 1.""" drv = self._driver volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1, 'provider_location': 'nfs_share'} path = 'path' newSize = volume['size'] + 1 with mock.patch.object(image_utils, 'resize_image') as resize: with mock.patch.object(drv, 'local_path', return_value=path): with mock.patch.object(drv, '_is_share_eligible', return_value=True): with mock.patch.object(drv, '_is_file_size_equal', return_value=True): drv.extend_volume(volume, newSize) resize.assert_called_once_with(path, newSize, run_as_root=True) def test_extend_volume_failure(self): """Error during extend operation.""" drv = self._driver volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1, 'provider_location': 'nfs_share'} with mock.patch.object(image_utils, 'resize_image'): with mock.patch.object(drv, 'local_path', return_value='path'): with mock.patch.object(drv, '_is_share_eligible', return_value=True): with mock.patch.object(drv, '_is_file_size_equal', return_value=False): self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, 2) def test_extend_volume_insufficient_space(self): """Insufficient space on nfs_share during extend operation.""" drv = self._driver volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1, 'provider_location': 'nfs_share'} with mock.patch.object(image_utils, 'resize_image'): with mock.patch.object(drv, 'local_path', return_value='path'): with mock.patch.object(drv, '_is_share_eligible', return_value=False): with mock.patch.object(drv, '_is_file_size_equal', return_value=False): self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, 2) def test_is_file_size_equal(self): """File sizes are equal.""" drv = self._driver path = 'fake/path' size = 2 data = mock.MagicMock() data.virtual_size = size * units.Gi with mock.patch.object(image_utils, 'qemu_img_info', return_value=data): self.assertTrue(drv._is_file_size_equal(path, size)) def test_is_file_size_equal_false(self): """File sizes are not equal.""" drv = self._driver path = 'fake/path' size = 2 data = mock.MagicMock() data.virtual_size = (size + 1) * units.Gi with mock.patch.object(image_utils, 'qemu_img_info', return_value=data): self.assertFalse(drv._is_file_size_equal(path, size)) @mock.patch.object(nfs, 'LOG') def test_set_nas_security_options_when_true(self, LOG): """Test higher level setting of NAS Security options. The NFS driver overrides the base method with a driver specific version. """ drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] is_new_install = True drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._determine_nas_security_option_setting = mock.Mock( return_value='true') drv.set_nas_security_options(is_new_install) self.assertEqual('true', drv.configuration.nas_secure_file_operations) self.assertEqual('true', drv.configuration.nas_secure_file_permissions) self.assertFalse(LOG.warning.called) @mock.patch.object(nfs, 'LOG') def test_set_nas_security_options_when_false(self, LOG): """Test higher level setting of NAS Security options. The NFS driver overrides the base method with a driver specific version. """ drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] is_new_install = False drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._determine_nas_security_option_setting = mock.Mock( return_value='false') drv.set_nas_security_options(is_new_install) self.assertEqual('false', drv.configuration.nas_secure_file_operations) self.assertEqual('false', drv.configuration.nas_secure_file_permissions) self.assertTrue(LOG.warning.called) def test_set_nas_security_options_exception_if_no_mounted_shares(self): """Ensure proper exception is raised if there are no mounted shares.""" drv = self._driver drv._ensure_shares_mounted = mock.Mock() drv._mounted_shares = [] is_new_cinder_install = 'does not matter' self.assertRaises(exception.NfsNoSharesMounted, drv.set_nas_security_options, is_new_cinder_install) def test_ensure_share_mounted(self): """Case where the mount works the first time.""" self.mock_object(self._driver._remotefsclient, 'mount') drv = self._driver drv.configuration.nfs_mount_attempts = 3 drv.shares = {self.TEST_NFS_EXPORT1: ''} drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) drv._remotefsclient.mount.called_once() @mock.patch('time.sleep') def test_ensure_share_mounted_exception(self, _mock_sleep): """Make the configured number of attempts when mounts fail.""" num_attempts = 3 self.mock_object(self._driver._remotefsclient, 'mount', mock.Mock(side_effect=Exception)) drv = self._driver drv.configuration.nfs_mount_attempts = num_attempts drv.shares = {self.TEST_NFS_EXPORT1: ''} self.assertRaises(exception.NfsException, drv._ensure_share_mounted, self.TEST_NFS_EXPORT1) self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count) def test_ensure_share_mounted_at_least_one_attempt(self): """Make at least one mount attempt even if configured for less.""" min_num_attempts = 1 num_attempts = 0 self.mock_object(self._driver._remotefsclient, 'mount', mock.Mock(side_effect=Exception)) drv = self._driver drv.configuration.nfs_mount_attempts = num_attempts drv.shares = {self.TEST_NFS_EXPORT1: ''} self.assertRaises(exception.NfsException, drv._ensure_share_mounted, self.TEST_NFS_EXPORT1) self.assertEqual(min_num_attempts, drv._remotefsclient.mount.call_count) class NfsDriverDoSetupTestCase(test.TestCase): def setUp(self): super(NfsDriverDoSetupTestCase, self).setUp() self.context = mock.Mock() self.create_configuration() def create_configuration(self): config = conf.Configuration(None) config.append_config_values(nfs.nfs_opts) self.configuration = config def test_setup_should_throw_error_if_shares_config_not_configured(self): """do_setup should throw error if shares config is not configured.""" self.override_config('nfs_shares_config', None) drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') with self.assertRaisesRegex(exception.NfsException, ".*no NFS config file configured.*"): drv.do_setup(self.context) self.assertEqual(0, mock_os_path_exists.call_count) def test_setup_should_throw_error_if_shares_file_does_not_exist(self): """do_setup should throw error if shares file does not exist.""" drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False with self.assertRaisesRegex(exception.NfsException, "NFS config file.*doesn't exist"): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): """do_setup should throw error if nfs client is not installed.""" drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = True mock_execute = self.mock_object(drv, '_execute') mock_execute.side_effect = OSError( errno.ENOENT, 'No such file or directory.') with self.assertRaisesRegex(exception.NfsException, 'mount.nfs is not installed'): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) mock_execute.assert_has_calls( [mock.call('mount.nfs', check_exit_code=False, run_as_root=True)]) def test_setup_should_throw_exception_if_mount_nfs_command_fails(self): """do_setup should throw error if mount.nfs fails with OSError This test covers the OSError path when mount.nfs is installed. """ drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = True mock_execute = self.mock_object(drv, '_execute') mock_execute.side_effect = OSError( errno.EPERM, 'Operation... BROKEN') with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) mock_execute.assert_has_calls( [mock.call('mount.nfs', check_exit_code=False, run_as_root=True)]) @mock.patch.object(os, 'rename') def test_update_migrated_available_volume(self, rename_volume): self._test_update_migrated_volume('available', rename_volume) @mock.patch.object(os, 'rename') def test_update_migrated_available_volume_rename_fail(self, rename_volume): self._test_update_migrated_volume('available', rename_volume, rename_exception=True) @mock.patch.object(os, 'rename') def test_update_migrated_in_use_volume(self, rename_volume): self._test_update_migrated_volume('in-use', rename_volume) def _test_update_migrated_volume(self, volume_status, rename_volume, rename_exception=False): drv = nfs.NfsDriver(configuration=self.configuration) fake_volume_id = 'vol1' fake_new_volume_id = 'vol2' fake_provider_source = 'fake_provider_source' fake_provider = 'fake_provider' base_dir = '/dir_base/' volume_name_template = 'volume-%s' original_volume_name = volume_name_template % fake_volume_id current_name = volume_name_template % fake_new_volume_id original_volume_path = base_dir + original_volume_name current_path = base_dir + current_name fake_volume = {'size': 1, 'id': fake_volume_id, 'provider_location': fake_provider_source, '_name_id': None} fake_new_volume = {'size': 1, 'id': fake_new_volume_id, 'provider_location': fake_provider, '_name_id': None} with mock.patch.object(drv, 'local_path') as local_path: local_path.return_value = base_dir + current_name if volume_status == 'in-use': update = drv.update_migrated_volume(self.context, fake_volume, fake_new_volume, volume_status) self.assertEqual({'_name_id': fake_new_volume_id, 'provider_location': fake_provider}, update) elif rename_exception: rename_volume.side_effect = OSError update = drv.update_migrated_volume(self.context, fake_volume, fake_new_volume, volume_status) rename_volume.assert_called_once_with(current_path, original_volume_path) self.assertEqual({'_name_id': fake_new_volume_id, 'provider_location': fake_provider}, update) else: update = drv.update_migrated_volume(self.context, fake_volume, fake_new_volume, volume_status) rename_volume.assert_called_once_with(current_path, original_volume_path) self.assertEqual({'_name_id': None, 'provider_location': fake_provider}, update) def test_retype_is_there(self): "Ensure that driver.retype() is there.""" drv = nfs.NfsDriver(configuration=self.configuration) v1 = DumbVolume() ret = drv.retype(self.context, v1, mock.sentinel.new_type, mock.sentinel.diff, mock.sentinel.host) self.assertEqual((False, None), ret) cinder-8.0.0/cinder/tests/unit/test_quota_utils.py0000664000567000056710000001670212701406250023476 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import exception from cinder import quota_utils from cinder import test from keystoneclient import exceptions from keystonemiddleware import auth_token from oslo_config import cfg from oslo_config import fixture as config_fixture CONF = cfg.CONF class QuotaUtilsTest(test.TestCase): class FakeProject(object): def __init__(self, id='foo', parent_id=None): self.id = id self.parent_id = parent_id self.subtree = None self.parents = None self.domain_id = 'default' def setUp(self): super(QuotaUtilsTest, self).setUp() self.auth_url = 'http://localhost:5000' self.context = context.RequestContext('fake_user', 'fake_proj_id') self.fixture = self.useFixture(config_fixture.Config(auth_token.CONF)) self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken') @mock.patch('keystoneclient.client.Client') @mock.patch('keystoneclient.session.Session') def test_keystone_client_instantiation(self, ksclient_session, ksclient_class): quota_utils._keystone_client(self.context) ksclient_class.assert_called_once_with(auth_url=self.auth_url, session=ksclient_session(), version=(3, 0)) @mock.patch('keystoneclient.client.Client') def test_get_project_keystoneclient_v2(self, ksclient_class): keystoneclient = ksclient_class.return_value keystoneclient.version = 'v2.0' expected_project = quota_utils.GenericProjectInfo( self.context.project_id, 'v2.0') project = quota_utils.get_project_hierarchy( self.context, self.context.project_id) self.assertEqual(expected_project.__dict__, project.__dict__) @mock.patch('keystoneclient.client.Client') def test_get_project_keystoneclient_v3(self, ksclient_class): keystoneclient = ksclient_class.return_value keystoneclient.version = 'v3' returned_project = self.FakeProject(self.context.project_id, 'bar') del returned_project.subtree keystoneclient.projects.get.return_value = returned_project expected_project = quota_utils.GenericProjectInfo( self.context.project_id, 'v3', 'bar') project = quota_utils.get_project_hierarchy( self.context, self.context.project_id) self.assertEqual(expected_project.__dict__, project.__dict__) @mock.patch('keystoneclient.client.Client') def test_get_project_keystoneclient_v3_with_subtree(self, ksclient_class): keystoneclient = ksclient_class.return_value keystoneclient.version = 'v3' returned_project = self.FakeProject(self.context.project_id, 'bar') subtree_dict = {'baz': {'quux': None}} returned_project.subtree = subtree_dict keystoneclient.projects.get.return_value = returned_project expected_project = quota_utils.GenericProjectInfo( self.context.project_id, 'v3', 'bar', subtree_dict) project = quota_utils.get_project_hierarchy( self.context, self.context.project_id, subtree_as_ids=True) keystoneclient.projects.get.assert_called_once_with( self.context.project_id, parents_as_ids=False, subtree_as_ids=True) self.assertEqual(expected_project.__dict__, project.__dict__) def _setup_mock_ksclient(self, mock_client, version='v3', subtree=None, parents=None): keystoneclient = mock_client.return_value keystoneclient.version = version proj = self.FakeProject(self.context.project_id) proj.subtree = subtree if parents: proj.parents = parents proj.parent_id = next(iter(parents.keys())) keystoneclient.projects.get.return_value = proj @mock.patch('keystoneclient.client.Client') def test__filter_domain_id_from_parents_domain_as_parent( self, mock_client): # Test with a top level project (domain is direct parent) self._setup_mock_ksclient(mock_client, parents={'default': None}) project = quota_utils.get_project_hierarchy( self.context, self.context.project_id, parents_as_ids=True) self.assertIsNone(project.parent_id) self.assertIsNone(project.parents) @mock.patch('keystoneclient.client.Client') def test__filter_domain_id_from_parents_domain_as_grandparent( self, mock_client): # Test with a child project (domain is more than a parent) self._setup_mock_ksclient(mock_client, parents={'bar': {'default': None}}) project = quota_utils.get_project_hierarchy( self.context, self.context.project_id, parents_as_ids=True) self.assertEqual('bar', project.parent_id) self.assertEqual({'bar': None}, project.parents) @mock.patch('keystoneclient.client.Client') def test__filter_domain_id_from_parents_no_domain_in_parents( self, mock_client): # Test that if top most parent is not a domain (to simulate an older # keystone version) nothing gets removed from the tree parents = {'bar': {'foo': None}} self._setup_mock_ksclient(mock_client, parents=parents) project = quota_utils.get_project_hierarchy( self.context, self.context.project_id, parents_as_ids=True) self.assertEqual('bar', project.parent_id) self.assertEqual(parents, project.parents) @mock.patch('keystoneclient.client.Client') def test__filter_domain_id_from_parents_no_parents( self, mock_client): # Test that if top no parents are present (to simulate an older # keystone version) things don't blow up self._setup_mock_ksclient(mock_client) project = quota_utils.get_project_hierarchy( self.context, self.context.project_id, parents_as_ids=True) self.assertIsNone(project.parent_id) self.assertIsNone(project.parents) @mock.patch('cinder.quota_utils._keystone_client') def test_validate_nested_projects_with_keystone_v2(self, _keystone_client): _keystone_client.side_effect = exceptions.VersionNotAvailable self.assertRaises(exception.CinderException, quota_utils.validate_setup_for_nested_quota_use, self.context, [], None) @mock.patch('cinder.quota_utils._keystone_client') def test_validate_nested_projects_non_cloud_admin(self, _keystone_client): # Covers not cloud admin or using old policy.json _keystone_client.side_effect = exceptions.Forbidden self.assertRaises(exception.CinderException, quota_utils.validate_setup_for_nested_quota_use, self.context, [], None) cinder-8.0.0/cinder/tests/unit/keymgr/0000775000567000056710000000000012701406543021011 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/keymgr/__init__.py0000664000567000056710000000000012701406250023103 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/keymgr/test_key_mgr.py0000664000567000056710000000207312701406250024054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key manager. """ from cinder import test class KeyManagerTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(KeyManagerTestCase, self).__init__(*args, **kwargs) def _create_key_manager(self): raise NotImplementedError() def setUp(self): super(KeyManagerTestCase, self).setUp() self.key_mgr = self._create_key_manager() cinder-8.0.0/cinder/tests/unit/keymgr/mock_key_mgr.py0000664000567000056710000001026312701406250024026 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A mock implementation of a key manager that stores keys in a dictionary. This key manager implementation is primarily intended for testing. In particular, it does not store keys persistently. Lack of a centralized key store also makes this implementation unsuitable for use among different services. Note: Instantiating this class multiple times will create separate key stores. Keys created in one instance will not be accessible from other instances of this class. """ import array import binascii import uuid from cinder import exception from cinder.keymgr import key from cinder.keymgr import key_mgr from cinder.volume import utils class MockKeyManager(key_mgr.KeyManager): """Mocking manager for integration tests. This mock key manager implementation supports all the methods specified by the key manager interface. This implementation stores keys within a dictionary, and as a result, it is not acceptable for use across different services. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. This key manager is not suitable for use in production deployments. """ def __init__(self): self.keys = {} def _generate_hex_key(self, **kwargs): key_length = kwargs.get('key_length', 256) # hex digit => 4 bits hex_encoded = utils.generate_password(length=key_length // 4, symbolgroups='0123456789ABCDEF') return hex_encoded def _generate_key(self, **kwargs): _hex = self._generate_hex_key(**kwargs) key_bytes = array.array('B', binascii.unhexlify(_hex)).tolist() return key.SymmetricKey('AES', key_bytes) def create_key(self, ctxt, **kwargs): """Creates a key. This implementation returns a UUID for the created key. A NotAuthorized exception is raised if the specified context is None. """ if ctxt is None: raise exception.NotAuthorized() key = self._generate_key(**kwargs) return self.store_key(ctxt, key) def _generate_key_id(self): key_id = str(uuid.uuid4()) while key_id in self.keys: key_id = str(uuid.uuid4()) return key_id def store_key(self, ctxt, key, **kwargs): """Stores (i.e., registers) a key with the key manager.""" if ctxt is None: raise exception.NotAuthorized() key_id = self._generate_key_id() self.keys[key_id] = key return key_id def copy_key(self, ctxt, key_id, **kwargs): if ctxt is None: raise exception.NotAuthorized() copied_key_id = self._generate_key_id() self.keys[copied_key_id] = self.keys[key_id] return copied_key_id def get_key(self, ctxt, key_id, **kwargs): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() return self.keys[key_id] def delete_key(self, ctxt, key_id, **kwargs): """Deletes the key identified by the specified id. A NotAuthorized exception is raised if the context is None and a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() del self.keys[key_id] cinder-8.0.0/cinder/tests/unit/keymgr/test_barbican.py0000664000567000056710000002657212701406250024172 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the barbican key manager. """ import array import base64 import binascii import mock from oslo_config import cfg from cinder import exception from cinder.keymgr import barbican from cinder.keymgr import key as keymgr_key from cinder.tests.unit.keymgr import test_key_mgr CONF = cfg.CONF CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr') CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr') class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return barbican.BarbicanKeyManager() def setUp(self): super(BarbicanKeyManagerTestCase, self).setUp() # Create fake auth_token self.ctxt = mock.Mock() self.ctxt.auth_token = "fake_token" self.ctxt.project_id = "fake_project_id" # Create mock barbican client self._build_mock_barbican() # Create a key_id, secret_ref, pre_hex, and hex to use self.key_id = "d152fa13-2b41-42ca-a934-6c21566c0f40" self.secret_ref = self.key_mgr._create_secret_ref(self.key_id, self.mock_barbican) self.pre_hex = "AIDxQp2++uAbKaTVDMXFYIu8PIugJGqkK0JLqkU0rhY=" self.hex = ("0080f1429dbefae01b29a4d50cc5c5608bbc3c8ba0246aa42b424baa4" "534ae16") self.original_api_url = CONF.keymgr.encryption_api_url self.addCleanup(self._restore) def _restore(self): if hasattr(self, 'original_key'): keymgr_key.SymmetricKey = self.original_key if hasattr(self, 'original_base64'): base64.b64encode = self.original_base64 if hasattr(self, 'original_api_url'): CONF.keymgr.encryption_api_url = self.original_api_url def _build_mock_barbican(self): self.mock_barbican = mock.MagicMock(name='mock_barbican') # Set commonly used methods self.get = self.mock_barbican.secrets.get self.delete = self.mock_barbican.secrets.delete self.store = self.mock_barbican.secrets.store self.create = self.mock_barbican.secrets.create self.key_mgr._barbican_client = self.mock_barbican self.key_mgr._current_context = self.ctxt def _build_mock_symKey(self): self.mock_symKey = mock.Mock() def fake_sym_key(alg, key): self.mock_symKey.get_encoded.return_value = key self.mock_symKey.get_algorithm.return_value = alg return self.mock_symKey self.original_key = keymgr_key.SymmetricKey keymgr_key.SymmetricKey = fake_sym_key def _build_mock_base64(self): def fake_base64_b64encode(string): return self.pre_hex self.original_base64 = base64.b64encode base64.b64encode = fake_base64_b64encode def test_copy_key(self): # Create metadata for original secret original_secret_metadata = mock.Mock() original_secret_metadata.algorithm = 'fake_algorithm' original_secret_metadata.bit_length = 'fake_bit_length' original_secret_metadata.name = 'original_name' original_secret_metadata.expiration = 'fake_expiration' original_secret_metadata.mode = 'fake_mode' content_types = {'default': 'fake_type'} original_secret_metadata.content_types = content_types original_secret_data = mock.Mock() original_secret_metadata.payload = original_secret_data self.get.return_value = original_secret_metadata # Create the mock key self._build_mock_symKey() # Copy the original self.key_mgr.copy_key(self.ctxt, self.key_id) # Assert proper methods were called self.get.assert_called_once_with(self.secret_ref) self.create.assert_called_once_with( original_secret_metadata.name, self.mock_symKey.get_encoded(), content_types['default'], 'base64', original_secret_metadata.algorithm, original_secret_metadata.bit_length, None, original_secret_metadata.mode, original_secret_metadata.expiration) self.create.return_value.store.assert_called_once_with() def test_copy_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.NotAuthorized, self.key_mgr.copy_key, None, self.key_id) def test_create_key(self): # Create order_ref_url and assign return value order_ref_url = ("http://localhost:9311/v1/None/orders/" "4fe939b7-72bc-49aa-bd1e-e979589858af") key_order = mock.Mock() self.mock_barbican.orders.create_key.return_value = key_order key_order.submit.return_value = order_ref_url # Create order and assign return value order = mock.Mock() order.secret_ref = self.secret_ref self.mock_barbican.orders.get.return_value = order # Create the key, get the UUID returned_uuid = self.key_mgr.create_key(self.ctxt) self.mock_barbican.orders.get.assert_called_once_with(order_ref_url) self.assertEqual(self.key_id, returned_uuid) def test_create_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) def test_delete_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.NotAuthorized, self.key_mgr.delete_key, None, self.key_id) def test_delete_key(self): self.key_mgr.delete_key(self.ctxt, self.key_id) self.delete.assert_called_once_with(self.secret_ref) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) def test_get_key(self): self._build_mock_base64() content_type = 'application/octet-stream' key = self.key_mgr.get_key(self.ctxt, self.key_id, content_type) self.get.assert_called_once_with(self.secret_ref) encoded = array.array('B', binascii.unhexlify(self.hex)).tolist() self.assertEqual(encoded, key.get_encoded()) def test_get_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.NotAuthorized, self.key_mgr.get_key, None, self.key_id) def test_get_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.get_key, self.ctxt, None) def test_store_key_base64(self): # Create Key to store secret_key = array.array('B', [0x01, 0x02, 0xA0, 0xB3]).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) # Define the return values secret = mock.Mock() self.create.return_value = secret secret.store.return_value = self.secret_ref # Store the Key returned_uuid = self.key_mgr.store_key(self.ctxt, _key, bit_length=32) self.create.assert_called_once_with('Cinder Volume Key', b'AQKgsw==', 'application/octet-stream', 'base64', 'AES', 32, None, 'CBC', None) self.assertEqual(self.key_id, returned_uuid) def test_store_key_plaintext(self): # Create the plaintext key secret_key_text = "This is a test text key." _key = keymgr_key.SymmetricKey('AES', secret_key_text) # Store the Key self.key_mgr.store_key(self.ctxt, _key, payload_content_type='text/plain', payload_content_encoding=None) self.create.assert_called_once_with('Cinder Volume Key', secret_key_text, 'text/plain', None, 'AES', 256, None, 'CBC', None) self.create.return_value.store.assert_called_once_with() def test_store_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.NotAuthorized, self.key_mgr.store_key, None, None) def test_null_project_id(self): self.key_mgr._barbican_client = None self.ctxt.project_id = None self.assertRaises(exception.KeyManagerError, self.key_mgr.create_key, self.ctxt) def test_ctxt_without_project_id(self): self.key_mgr._barbican_client = None del self.ctxt.project_id self.assertRaises(exception.KeyManagerError, self.key_mgr.create_key, self.ctxt) @mock.patch('cinder.keymgr.barbican.identity.v3.Token') @mock.patch('cinder.keymgr.barbican.session.Session') @mock.patch('cinder.keymgr.barbican.barbican_client.Client') def test_ctxt_with_project_id(self, mock_client, mock_session, mock_token): # set client to None so that client creation will occur self.key_mgr._barbican_client = None # mock the return values mock_auth = mock.Mock() mock_token.return_value = mock_auth mock_sess = mock.Mock() mock_session.return_value = mock_sess # mock the endpoint mock_endpoint = mock.Mock() self.key_mgr._barbican_endpoint = mock_endpoint self.key_mgr.create_key(self.ctxt) # assert proper calls occurred, including with project_id mock_token.assert_called_once_with( auth_url=CONF.keymgr.encryption_auth_url, token=self.ctxt.auth_token, project_id=self.ctxt.project_id) mock_session.assert_called_once_with(auth=mock_auth) mock_client.assert_called_once_with(session=mock_sess, endpoint=mock_endpoint) def test_parse_barbican_api_url(self): # assert that the correct format is handled correctly CONF.keymgr.encryption_api_url = "http://host:port/v1/" dummy = barbican.BarbicanKeyManager() self.assertEqual(dummy._barbican_endpoint, "http://host:port") # assert that invalid api url formats will raise an exception CONF.keymgr.encryption_api_url = "http://host:port/" self.assertRaises(exception.KeyManagerError, barbican.BarbicanKeyManager) CONF.keymgr.encryption_api_url = "http://host:port/secrets" self.assertRaises(exception.KeyManagerError, barbican.BarbicanKeyManager) cinder-8.0.0/cinder/tests/unit/keymgr/test_not_implemented_key_mgr.py0000664000567000056710000000337112701406250027321 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the not implemented key manager. """ from cinder.keymgr import not_implemented_key_mgr from cinder.tests.unit.keymgr import test_key_mgr class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return not_implemented_key_mgr.NotImplementedKeyManager() def setUp(self): super(NotImplementedKeyManagerTestCase, self).setUp() def test_create_key(self): self.assertRaises(NotImplementedError, self.key_mgr.create_key, None) def test_store_key(self): self.assertRaises(NotImplementedError, self.key_mgr.store_key, None, None) def test_copy_key(self): self.assertRaises(NotImplementedError, self.key_mgr.copy_key, None, None) def test_get_key(self): self.assertRaises(NotImplementedError, self.key_mgr.get_key, None, None) def test_delete_key(self): self.assertRaises(NotImplementedError, self.key_mgr.delete_key, None, None) cinder-8.0.0/cinder/tests/unit/keymgr/fake.py0000664000567000056710000000147512701406250022273 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from cinder.tests.unit.keymgr import mock_key_mgr def fake_api(): return mock_key_mgr.MockKeyManager() cinder-8.0.0/cinder/tests/unit/keymgr/test_conf_key_mgr.py0000664000567000056710000001042712701406250025063 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the conf key manager. """ import array import binascii from oslo_config import cfg from cinder import context from cinder import exception from cinder.keymgr import conf_key_mgr from cinder.keymgr import key from cinder.tests.unit.keymgr import test_key_mgr CONF = cfg.CONF CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') class ConfKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '1' * 64 def _create_key_manager(self): CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') return conf_key_mgr.ConfKeyManager() def setUp(self): super(ConfKeyManagerTestCase, self).setUp() self.ctxt = context.RequestContext('fake', 'fake') self.key_id = '00000000-0000-0000-0000-000000000000' encoded = array.array('B', binascii.unhexlify(self._hex_key)).tolist() self.key = key.SymmetricKey('AES', encoded) def test___init__(self): self.assertEqual(self.key_id, self.key_mgr.key_id) def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are the same self.assertEqual(key_id_1, key_id_2) def test_create_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) def test_store_key(self): key_id = self.key_mgr.store_key(self.ctxt, self.key) actual_key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(self.key, actual_key) def test_store_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.store_key, None, self.key) def test_store_key_invalid(self): encoded = self.key.get_encoded() inverse_key = key.SymmetricKey('AES', [~b for b in encoded]) self.assertRaises(exception.KeyManagerError, self.key_mgr.store_key, self.ctxt, inverse_key) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_copy_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.copy_key, None, None) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete_key(self.ctxt, key_id) # cannot delete key -- might have lingering references self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_delete_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.delete_key, None, None) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) def test_get_key(self): self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_get_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.get_key, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) cinder-8.0.0/cinder/tests/unit/keymgr/test_mock_key_mgr.py0000664000567000056710000000707512701406250025074 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the mock key manager. """ import array from cinder import context from cinder import exception from cinder.keymgr import key as keymgr_key from cinder.tests.unit.keymgr import mock_key_mgr from cinder.tests.unit.keymgr import test_key_mgr class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return mock_key_mgr.MockKeyManager() def setUp(self): super(MockKeyManagerTestCase, self).setUp() self.ctxt = context.RequestContext('fake', 'fake') def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are unique self.assertNotEqual(key_id_1, key_id_2) def test_create_key_with_length(self): for length in [64, 128, 256]: key_id = self.key_mgr.create_key(self.ctxt, key_length=length) key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(length // 8, len(key.get_encoded())) def test_create_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) def test_store_key(self): secret_key = array.array('B', b'\x00' * 32).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) key_id = self.key_mgr.store_key(self.ctxt, _key) actual_key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(_key, actual_key) def test_store_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.store_key, None, None) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertNotEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_copy_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.copy_key, None, None) def test_get_key(self): pass def test_get_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.get_key, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete_key(self.ctxt, key_id) self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id) def test_delete_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.delete_key, None, None) def test_delete_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None) cinder-8.0.0/cinder/tests/unit/keymgr/test_key.py0000664000567000056710000000347312701406250023214 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key classes. """ from cinder.keymgr import key from cinder import test class KeyTestCase(test.TestCase): def _create_key(self): raise NotImplementedError() def setUp(self): super(KeyTestCase, self).setUp() self.key = self._create_key() class SymmetricKeyTestCase(KeyTestCase): def _create_key(self): return key.SymmetricKey(self.algorithm, self.encoded) def setUp(self): self.algorithm = 'AES' self.encoded = [0] * 32 super(SymmetricKeyTestCase, self).setUp() def test_get_algorithm(self): self.assertEqual(self.algorithm, self.key.get_algorithm()) def test_get_format(self): self.assertEqual('RAW', self.key.get_format()) def test_get_encoded(self): self.assertEqual(self.encoded, self.key.get_encoded()) def test___eq__(self): self.assertTrue(self.key == self.key) self.assertFalse(self.key is None) self.assertFalse(None == self.key) def test___ne__(self): self.assertFalse(self.key != self.key) self.assertTrue(self.key is not None) self.assertTrue(None != self.key) cinder-8.0.0/cinder/tests/unit/fake_backup.py0000664000567000056710000000357712701406250022327 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.objects import fields as c_fields from cinder.tests.unit import fake_constants as fake def fake_db_backup(**updates): db_backup = { 'id': fake.backup_id, 'user_id': fake.user_id, 'project_id': fake.project_id, 'volume_id': fake.volume_id, 'status': c_fields.BackupStatus.CREATING, 'host': 'fake_host', 'display_name': 'fake_name', 'size': 5, 'display_description': 'fake_description', 'service_metadata': 'fake_metadata', 'service': 'fake_service', 'object_count': 5, 'num_dependent_backups': 0, } for name, field in objects.Backup.fields.items(): if name in db_backup: continue if field.nullable: db_backup[name] = None elif field.default != fields.UnspecifiedDefault: db_backup[name] = field.default else: raise Exception('fake_db_backup needs help with %s' % name) if updates: db_backup.update(updates) return db_backup def fake_backup_obj(context, **updates): return objects.Backup._from_db_object(context, objects.Backup(), fake_db_backup(**updates)) cinder-8.0.0/cinder/tests/unit/scheduler/0000775000567000056710000000000012701406543021471 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/scheduler/test_goodness_weigher.py0000664000567000056710000001475712701406250026446 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Goodness Weigher. """ from cinder.scheduler.weights import goodness from cinder import test from cinder.tests.unit.scheduler import fakes class GoodnessWeigherTestCase(test.TestCase): def setUp(self): super(GoodnessWeigherTestCase, self).setUp() def test_goodness_weigher_with_no_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': '50' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_passing_host(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '100' } }) host_state_2 = fakes.FakeHostState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '0' } }) host_state_3 = fakes.FakeHostState('host3', { 'host': 'host3.example.com', 'capabilities': { 'goodness_function': '100 / 2' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_3, weight_properties) self.assertEqual(50, weight) def test_goodness_weigher_capabilities_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': 50, 'goodness_function': '10 + capabilities.foo' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_extra_specs_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + extra.foo' } }) weight_properties = { 'volume_type': { 'extra_specs': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_volume_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + volume.foo' } }) weight_properties = { 'request_spec': { 'volume_properties': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_qos_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + qos.foo' } }) weight_properties = { 'qos_specs': { 'foo': 50 } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_stats_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': 'stats.free_capacity_gb > 20' }, 'free_capacity_gb': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) def test_goodness_weigher_invalid_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + stats.my_val' }, 'foo': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_host_rating_out_of_bounds(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '-10' } }) host_state_2 = fakes.FakeHostState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '200' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_invalid_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '50 / 0' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) cinder-8.0.0/cinder/tests/unit/scheduler/test_volume_number_weigher.py0000664000567000056710000000744012701406250027473 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Volume Number Weigher. """ import mock from oslo_config import cfg from cinder import context from cinder.db.sqlalchemy import api from cinder.scheduler import weights from cinder import test from cinder.tests.unit.scheduler import fakes from cinder.volume import utils CONF = cfg.CONF def fake_volume_data_get_for_host(context, host, count_only=False): host = utils.extract_host(host) if host == 'host1': return 1 elif host == 'host2': return 2 elif host == 'host3': return 3 elif host == 'host4': return 4 elif host == 'host5': return 5 else: return 6 class VolumeNumberWeigherTestCase(test.TestCase): def setUp(self): super(VolumeNumberWeigherTestCase, self).setUp() self.context = context.get_admin_context() self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.HostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {'context': self.context} return self.weight_handler.get_weighed_objects( [weights.volume_number.VolumeNumberWeigher], hosts, weight_properties)[0] @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.volume_topic, disabled=disabled) return host_states def test_volume_number_weight_multiplier1(self): self.flags(volume_number_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # host1: 1 volume Norm=0.0 # host2: 2 volumes # host3: 3 volumes # host4: 4 volumes # host5: 5 volumes Norm=-1.0 # so, host1 should win: with mock.patch.object(api, 'volume_data_get_for_host', fake_volume_data_get_for_host): weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual('host1', utils.extract_host(weighed_host.obj.host)) def test_volume_number_weight_multiplier2(self): self.flags(volume_number_multiplier=1.0) hostinfo_list = self._get_all_hosts() # host1: 1 volume Norm=0 # host2: 2 volumes # host3: 3 volumes # host4: 4 volumes # host5: 5 volumes Norm=1 # so, host5 should win: with mock.patch.object(api, 'volume_data_get_for_host', fake_volume_data_get_for_host): weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host5', utils.extract_host(weighed_host.obj.host)) cinder-8.0.0/cinder/tests/unit/scheduler/test_scheduler_options.py0000664000567000056710000001170012701406250026625 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For PickledScheduler. """ import datetime from oslo_serialization import jsonutils import six from cinder.scheduler import scheduler_options from cinder import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = filedata self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True return six.StringIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.TestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) cinder-8.0.0/cinder/tests/unit/scheduler/fake_hosts.py0000664000567000056710000000320112701406250024160 0ustar jenkinsjenkins00000000000000# Copyright 2012 Intel Inc, OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For filters tests. """ import six class FakeHostManager(object): """Defines fake hosts. host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 host4: free_ram_mb=8192 free_disk_gb=8192 """ def __init__(self): self.service_states = { 'host1': { 'compute': {'host_memory_free': 1073741824}, }, 'host2': { 'compute': {'host_memory_free': 2147483648}, }, 'host3': { 'compute': {'host_memory_free': 3221225472}, }, 'host4': { 'compute': {'host_memory_free': 999999999}, }, } class FakeHostState(object): def __init__(self, host, attribute_dict): self.host = host for (key, val) in six.iteritems(attribute_dict): setattr(self, key, val) cinder-8.0.0/cinder/tests/unit/scheduler/test_base_filter.py0000664000567000056710000001315512701406250025361 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslotest import moxstubout from cinder.scheduler import base_filter from cinder import test class TestBaseFilter(test.TestCase): def setUp(self): super(TestBaseFilter, self).setUp() self.mox = self.useFixture(moxstubout.MoxStubout()).mox self.filter = base_filter.BaseFilter() def test_filter_one_is_called(self): filters = [1, 2, 3, 4] filter_properties = {'x': 'y'} self.mox.StubOutWithMock(self.filter, '_filter_one') self.filter._filter_one(1, filter_properties).AndReturn(False) self.filter._filter_one(2, filter_properties).AndReturn(True) self.filter._filter_one(3, filter_properties).AndReturn(True) self.filter._filter_one(4, filter_properties).AndReturn(False) self.mox.ReplayAll() result = list(self.filter.filter_all(filters, filter_properties)) self.assertEqual([2, 3], result) class FakeExtension(object): def __init__(self, plugin): self.plugin = plugin class BaseFakeFilter(base_filter.BaseFilter): pass class FakeFilter1(BaseFakeFilter): """Derives from BaseFakeFilter and has a fake entry point defined. Entry point is returned by fake ExtensionManager. Should be included in the output of all_classes. """ pass class FakeFilter2(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should be not included in all_classes. """ pass class FakeFilter3(base_filter.BaseFilter): """Does not derive from BaseFakeFilter. Should not be included. """ pass class FakeFilter4(BaseFakeFilter): """Derives from BaseFakeFilter and has an entry point. Should be included. """ pass class FakeFilter5(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should not be included. """ run_filter_once_per_request = True pass class FakeExtensionManager(list): def __init__(self, namespace): classes = [FakeFilter1, FakeFilter3, FakeFilter4] exts = map(FakeExtension, classes) super(FakeExtensionManager, self).__init__(exts) self.namespace = namespace class TestBaseFilterHandler(test.TestCase): def setUp(self): super(TestBaseFilterHandler, self).setUp() self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs self.stubs.Set(base_filter.base_handler.extension, 'ExtensionManager', FakeExtensionManager) self.handler = base_filter.BaseFilterHandler(BaseFakeFilter, 'fake_filters') def test_get_all_classes(self): # In order for a FakeFilter to be returned by get_all_classes, it has # to comply with these rules: # * It must be derived from BaseFakeFilter # AND # * It must have a python entrypoint assigned (returned by # FakeExtensionManager) expected = [FakeFilter1, FakeFilter4] result = self.handler.get_all_classes() self.assertEqual(expected, result) def _get_filtered_objects(self, filter_classes, index=0): filter_objs_initial = [1, 2, 3, 4] filter_properties = {'x': 'y'} return self.handler.get_filtered_objects(filter_classes, filter_objs_initial, filter_properties, index) @mock.patch.object(FakeFilter4, 'filter_all') @mock.patch.object(FakeFilter3, 'filter_all', return_value=None) def test_get_filtered_objects_return_none(self, fake3_filter_all, fake4_filter_all): filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertIsNone(result) self.assertFalse(fake4_filter_all.called) def test_get_filtered_objects(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) def test_get_filtered_objects_with_filter_run_once(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter5] with mock.patch.object(FakeFilter5, 'filter_all', return_value=filter_objs_expected ) as fake5_filter_all: result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=1) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=2) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) cinder-8.0.0/cinder/tests/unit/scheduler/__init__.py0000664000567000056710000000000012701406250023563 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/scheduler/test_host_manager.py0000664000567000056710000006530212701406250025552 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For HostManager """ from datetime import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.scheduler import filters from cinder.scheduler import host_manager from cinder import test from cinder.tests.unit.objects import test_service CONF = cfg.CONF class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class HostManagerTestCase(test.TestCase): """Test case for HostManager class.""" def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x) for x in range(1, 5)] def test_choose_host_filters_not_found(self): self.flags(scheduler_default_filters='FakeFilterClass3') self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, None) def test_choose_host_filters(self): self.flags(scheduler_default_filters=['FakeFilterClass2']) self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] # Test 'volume' returns 1 correct function filter_classes = self.host_manager._choose_host_filters(None) self.assertEqual(1, len(filter_classes)) self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) @mock.patch('cinder.scheduler.host_manager.HostManager.' '_choose_host_filters') def test_get_filtered_hosts(self, _mock_choose_host_filters): filter_class = FakeFilterClass1 mock_func = mock.Mock() mock_func.return_value = True filter_class._filter_one = mock_func _mock_choose_host_filters.return_value = [filter_class] fake_properties = {'moo': 1, 'cow': 2} expected = [] for fake_host in self.fake_hosts: expected.append(mock.call(fake_host, fake_properties)) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self.assertEqual(expected, mock_func.call_args_list) self.assertEqual(set(self.fake_hosts), set(result)) @mock.patch('oslo_utils.timeutils.utcnow') def test_update_service_capabilities(self, _mock_utcnow): service_states = self.host_manager.service_states self.assertDictMatch({}, service_states) _mock_utcnow.side_effect = [31337, 31338, 31339] host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs) # Make sure dictionary isn't re-assigned self.assertEqual(service_states, self.host_manager.service_states) # Make sure original dictionary wasn't copied self.assertEqual(1, host1_volume_capabs['timestamp']) host1_volume_capabs['timestamp'] = 31337 host2_volume_capabs['timestamp'] = 31338 host3_volume_capabs['timestamp'] = 31339 expected = {'host1': host1_volume_capabs, 'host2': host2_volume_capabs, 'host3': host3_volume_capabs} self.assertDictMatch(expected, service_states) @mock.patch('cinder.utils.service_is_up') @mock.patch('cinder.db.service_get_all_by_topic') def test_has_all_capabilities(self, _mock_service_get_all_by_topic, _mock_service_is_up): _mock_service_is_up.return_value = True services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), ] _mock_service_get_all_by_topic.return_value = services # Create host_manager again to let db.service_get_all_by_topic mock run self.host_manager = host_manager.HostManager() self.assertFalse(self.host_manager.has_all_capabilities()) host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs) self.assertTrue(self.host_manager.has_all_capabilities()) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.utils.service_is_up') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_get_pools(self, _mock_utcnow, _mock_service_is_up, _mock_service_get_all_by_topic): """Test interaction between update and get_pools This test verifies that each time that get_pools is called it gets the latest copy of service_capabilities, which is timestamped with the current date/time. """ context = 'fake_context' dates = [datetime.fromtimestamp(400), datetime.fromtimestamp(401), datetime.fromtimestamp(402)] _mock_utcnow.side_effect = dates services = [ # This is the first call to utcnow() dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0), } _mock_service_get_all_by_topic.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning host_volume_capabs = dict(free_capacity_gb=4321) service_name = 'volume' with mock.patch.dict(self.host_manager.service_states, mocked_service_states): self.host_manager.update_service_capabilities(service_name, 'host1', host_volume_capabs) res = self.host_manager.get_pools(context) self.assertEqual(1, len(res)) self.assertEqual(dates[1], res[0]['capabilities']['timestamp']) self.host_manager.update_service_capabilities(service_name, 'host1', host_volume_capabs) res = self.host_manager.get_pools(context) self.assertEqual(1, len(res)) self.assertEqual(dates[2], res[0]['capabilities']['timestamp']) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.utils.service_is_up') def test_get_all_host_states(self, _mock_service_is_up, _mock_service_get_all_by_topic): context = 'fake_context' topic = CONF.volume_topic services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=4, host='host4', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), ] service_objs = [] for db_service in services: service_obj = objects.Service() service_objs.append(objects.Service._from_db_object(context, service_obj, db_service)) service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312), 'host2': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=156), 'host3': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=9300), } # First test: service_is_up is always True, host5 is disabled, # host4 has no capabilities self.host_manager.service_states = service_states _mock_service_get_all_by_topic.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warning = _mock_warning # Get all states self.host_manager.get_all_host_states(context) _mock_service_get_all_by_topic.assert_called_with(context, topic, disabled=False) expected = [] for service in service_objs: expected.append(mock.call(service)) self.assertEqual(expected, _mock_service_is_up.call_args_list) # Get host_state_map and make sure we have the first 3 hosts host_state_map = self.host_manager.host_state_map self.assertEqual(3, len(host_state_map)) for i in range(3): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, host_state_map[host].service) # Second test: Now service_is_up returns False for host3 _mock_service_is_up.reset_mock() _mock_service_is_up.side_effect = [True, True, False, True] _mock_service_get_all_by_topic.reset_mock() _mock_warning.reset_mock() # Get all states, make sure host 3 is reported as down self.host_manager.get_all_host_states(context) _mock_service_get_all_by_topic.assert_called_with(context, topic, disabled=False) self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertTrue(_mock_warning.call_count > 0) # Get host_state_map and make sure we have the first 2 hosts (host3 is # down, host4 is missing capabilities) host_state_map = self.host_manager.host_state_map self.assertEqual(2, len(host_state_map)) for i in range(2): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, host_state_map[host].service) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.utils.service_is_up') def test_get_pools(self, _mock_service_is_up, _mock_service_get_all_by_topic): context = 'fake_context' services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@back1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host2@back2', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312), 'host2@back1': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=156), 'host2@back2': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=9300), } _mock_service_get_all_by_topic.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning with mock.patch.dict(self.host_manager.service_states, mocked_service_states): res = self.host_manager.get_pools(context) # check if get_pools returns all 3 pools self.assertEqual(3, len(res)) expected = [ { 'name': 'host1#AAA', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 312}, }, { 'name': 'host2@back1#BBB', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 156}, }, { 'name': 'host2@back2#CCC', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'CCC', 'free_capacity_gb': 700, 'driver_version': None, 'total_capacity_gb': 10000, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 9300}, } ] def sort_func(data): return data['name'] self.assertEqual(len(expected), len(res)) self.assertEqual(sorted(expected, key=sort_func), sorted(res, key=sort_func)) class HostStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_volume_capability_nopool(self): fake_host = host_manager.HostState('host1') self.assertIsNone(fake_host.free_capacity_gb) volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'provisioned_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(1024, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(512, fake_host.pools['_pool0'].free_capacity_gb) self.assertEqual(512, fake_host.pools['_pool0'].provisioned_capacity_gb) # Test update for existing host state volume_capability.update(dict(total_capacity_gb=1000)) fake_host.update_from_volume_capability(volume_capability) self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb) # Test update for existing host state with different backend name volume_capability.update(dict(volume_backend_name='magic')) fake_host.update_from_volume_capability(volume_capability) self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb) self.assertEqual(512, fake_host.pools['magic'].free_capacity_gb) self.assertEqual(512, fake_host.pools['magic'].provisioned_capacity_gb) # 'pool0' becomes nonactive pool, and is deleted self.assertRaises(KeyError, lambda: fake_host.pools['pool0']) def test_update_from_volume_capability_with_pools(self): fake_host = host_manager.HostState('host1') self.assertIsNone(fake_host.free_capacity_gb) capability = { 'volume_backend_name': 'Local iSCSI', 'vendor_name': 'OpenStack', 'driver_version': '1.0.1', 'storage_protocol': 'iSCSI', 'pools': [ {'pool_name': '1st pool', 'total_capacity_gb': 500, 'free_capacity_gb': 230, 'allocated_capacity_gb': 270, 'provisioned_capacity_gb': 270, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 100, 'super_hero_1': 'spider-man', 'super_hero_2': 'flash', 'super_hero_3': 'neoncat', }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': 'Hulk', } ], 'timestamp': None, } fake_host.update_from_volume_capability(capability) self.assertEqual('Local iSCSI', fake_host.volume_backend_name) self.assertEqual('iSCSI', fake_host.storage_protocol) self.assertEqual('OpenStack', fake_host.vendor_name) self.assertEqual('1.0.1', fake_host.driver_version) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(2, len(fake_host.pools)) self.assertEqual(500, fake_host.pools['1st pool'].total_capacity_gb) self.assertEqual(230, fake_host.pools['1st pool'].free_capacity_gb) self.assertEqual(270, fake_host.pools['1st pool'].provisioned_capacity_gb) self.assertEqual(1024, fake_host.pools['2nd pool'].total_capacity_gb) self.assertEqual(1024, fake_host.pools['2nd pool'].free_capacity_gb) self.assertEqual(0, fake_host.pools['2nd pool'].provisioned_capacity_gb) capability = { 'volume_backend_name': 'Local iSCSI', 'vendor_name': 'OpenStack', 'driver_version': '1.0.2', 'storage_protocol': 'iSCSI', 'pools': [ {'pool_name': '3rd pool', 'total_capacity_gb': 10000, 'free_capacity_gb': 10000, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, }, ], 'timestamp': None, } # test update HostState Record fake_host.update_from_volume_capability(capability) self.assertEqual('1.0.2', fake_host.driver_version) # Non-active pool stats has been removed self.assertEqual(1, len(fake_host.pools)) self.assertRaises(KeyError, lambda: fake_host.pools['1st pool']) self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool']) self.assertEqual(10000, fake_host.pools['3rd pool'].total_capacity_gb) self.assertEqual(10000, fake_host.pools['3rd pool'].free_capacity_gb) self.assertEqual(0, fake_host.pools['3rd pool'].provisioned_capacity_gb) def test_update_from_volume_infinite_capability(self): fake_host = host_manager.HostState('host1') self.assertIsNone(fake_host.free_capacity_gb) volume_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual( 'infinite', fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual( 'infinite', fake_host.pools['_pool0'].free_capacity_gb) def test_update_from_volume_unknown_capability(self): fake_host = host_manager.HostState('host1') self.assertIsNone(fake_host.free_capacity_gb) volume_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual( 'infinite', fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual( 'unknown', fake_host.pools['_pool0'].free_capacity_gb) def test_update_from_empty_volume_capability(self): fake_host = host_manager.HostState('host1') vol_cap = {'timestamp': None} fake_host.update_from_volume_capability(vol_cap) self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(0, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(0, fake_host.pools['_pool0'].free_capacity_gb) self.assertEqual(0, fake_host.pools['_pool0'].provisioned_capacity_gb) class PoolStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_volume_capability(self): fake_pool = host_manager.PoolState('host1', None, 'pool0') self.assertIsNone(fake_pool.free_capacity_gb) volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'provisioned_capacity_gb': 512, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'} fake_pool.update_from_volume_capability(volume_capability) self.assertEqual('host1#pool0', fake_pool.host) self.assertEqual('pool0', fake_pool.pool_name) self.assertEqual(1024, fake_pool.total_capacity_gb) self.assertEqual(512, fake_pool.free_capacity_gb) self.assertEqual(512, fake_pool.provisioned_capacity_gb) self.assertDictMatch(volume_capability, fake_pool.capabilities) cinder-8.0.0/cinder/tests/unit/scheduler/test_weights.py0000664000567000056710000000357712701406250024563 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler weights. """ from cinder.scheduler import base_weight from cinder import test class TestWeightHandler(test.TestCase): def test_no_multiplier(self): class FakeWeigher(base_weight.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(base_weight.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = base_weight.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(result, tuple(ret)) cinder-8.0.0/cinder/tests/unit/scheduler/test_host_filters.py0000664000567000056710000021306412701406250025610 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import mock from oslo_serialization import jsonutils from requests import exceptions as request_exceptions from cinder.compute import nova from cinder import context from cinder import db from cinder import exception from cinder.scheduler import filters from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import utils class HostFiltersTestCase(test.TestCase): """Test case for host filters.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext(fake.user_id, fake.project_id) # This has a side effect of testing 'get_filter_classes' # when specifying a method (in this case, our standard filters) filter_handler = filters.HostFilterHandler('cinder.scheduler.filters') classes = filter_handler.get_all_classes() self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls class CapacityFilterTestCase(HostFiltersTestCase): def setUp(self): super(CapacityFilterTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_capacity_gb', 1024], ['>=', '$total_capacity_gb', 10 * 1024]]) @mock.patch('cinder.utils.service_is_up') def test_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_current_host_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'vol_exists_on': 'host1'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 100, 'free_capacity_gb': 10, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 200, 'free_capacity_gb': 120, 'reserved_percentage': 20, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_free_capacity_None(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': None, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'infinite', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'unknown', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'infinite', 'total_capacity_gb': 'infinite', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'unknown', 'total_capacity_gb': 'unknown', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 'infinite', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 'unknown', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_zero(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 0, 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 500, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 3000, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 7000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_false_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' False', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 300, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_less_than_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 200, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 0.8, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_equal_to_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 150, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 700, 'max_over_subscription_ratio': 1.5, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 2000, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 30, 'provisioned_capacity_gb': 9000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' False', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 800, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 125, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 80, 'provisioned_capacity_gb': 600, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 99, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) class AffinityFilterTestCase(HostFiltersTestCase): @mock.patch('cinder.utils.service_is_up') def test_different_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeHostState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1:pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_different_filter_legacy_volume_hint_passes( self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeHostState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_non_list_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host2', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': vol_id}} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_none(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_deleted_instance(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_multiple_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume1 = utils.create_volume(self.context, host='host1:pool1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host1:pool3') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_invalid_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id, "NOT-a-valid-UUID"], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_no_list_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': vol_id}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool0') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_legacy_vol_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_vol_list_pass(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume1 = utils.create_volume(self.context, host='host1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host2') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_handles_none(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_handles_deleted_instance(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) class DriverFilterTestCase(HostFiltersTestCase): def test_passing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_failing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 2', } }) filter_properties = {'volume_type': {}} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_no_filter_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': None, } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_not_implemented(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': {} }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_no_volume_extra_specs(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_extra_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'extra.var == 1', } }) filter_properties = { 'volume_type': { 'extra_specs': { 'var': 1, } } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_stats_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'total_capacity_gb': 100, 'capabilities': { 'filter_function': 'stats.total_capacity_gb < 200', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_volume_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'volume.size < 5', } }) filter_properties = { 'request_spec': { 'volume_properties': { 'size': 1 } } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_qos_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'qos.var == 1', } }) filter_properties = { 'qos_specs': { 'var': 1 } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_exception_caught(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 / 0 == 0', } }) filter_properties = {} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_function_empty_qos(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'qos.maxiops == 1', } }) filter_properties = { 'qos_specs': None } self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'foo': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_wrong_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'bar': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) class InstanceLocalityFilterTestCase(HostFiltersTestCase): def setUp(self): super(InstanceLocalityFilterTestCase, self).setUp() self.override_config('nova_endpoint_template', 'http://novahost:8774/v2/%(project_id)s') self.context.service_catalog = \ [{'type': 'compute', 'name': 'nova', 'endpoints': [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, {'type': 'identity', 'name': 'keystone', 'endpoints': [{'publicURL': 'http://keystonehost:5000/v2.0'}]}] @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_same_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host1') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_different_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host2') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_handles_none(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_invalid_uuid(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-not-valid-a716'}} self.assertRaises(exception.InvalidUUID, filt_cls.host_passes, host, filter_properties) @mock.patch('cinder.compute.nova.novaclient') def test_nova_no_extended_server_attributes(self, _mock_novaclient): _mock_novaclient.return_value = fakes.FakeNovaClient( ext_srv_attr=False) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host1') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertRaises(exception.CinderException, filt_cls.host_passes, host, filter_properties) @mock.patch('cinder.compute.nova.novaclient') def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient): # Simulate Nova API is not available _mock_novaclient.side_effect = Exception filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'size': 100} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('novaclient.client.discover_extensions') @mock.patch('requests.request') def test_nova_timeout(self, _mock_request, fake_extensions): # Simulate a HTTP timeout _mock_request.side_effect = request_exceptions.Timeout fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = \ {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}} self.assertRaises(exception.APITimeout, filt_cls.host_passes, host, filter_properties) class TestFilter(filters.BaseHostFilter): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseHostFilter.""" pass class ExtraSpecsOpsTestCase(test.TestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(filters.extra_specs_ops.match(value, req)) def test_extra_specs_matches_simple(self): self._do_extra_specs_ops_test( value='1', req='1', matches=True) def test_extra_specs_fails_simple(self): self._do_extra_specs_ops_test( value='', req='1', matches=False) def test_extra_specs_fails_simple2(self): self._do_extra_specs_ops_test( value='3', req='1', matches=False) def test_extra_specs_fails_simple3(self): self._do_extra_specs_ops_test( value='222', req='2', matches=False) def test_extra_specs_fails_with_bogus_ops(self): self._do_extra_specs_ops_test( value='4', req='> 2', matches=False) def test_extra_specs_matches_with_op_eq(self): self._do_extra_specs_ops_test( value='123', req='= 123', matches=True) def test_extra_specs_matches_with_op_eq2(self): self._do_extra_specs_ops_test( value='124', req='= 123', matches=True) def test_extra_specs_fails_with_op_eq(self): self._do_extra_specs_ops_test( value='34', req='= 234', matches=False) def test_extra_specs_fails_with_op_eq3(self): self._do_extra_specs_ops_test( value='34', req='=', matches=False) def test_extra_specs_matches_with_op_seq(self): self._do_extra_specs_ops_test( value='123', req='s== 123', matches=True) def test_extra_specs_fails_with_op_seq(self): self._do_extra_specs_ops_test( value='1234', req='s== 123', matches=False) def test_extra_specs_matches_with_op_sneq(self): self._do_extra_specs_ops_test( value='1234', req='s!= 123', matches=True) def test_extra_specs_fails_with_op_sneq(self): self._do_extra_specs_ops_test( value='123', req='s!= 123', matches=False) def test_extra_specs_fails_with_op_sge(self): self._do_extra_specs_ops_test( value='1000', req='s>= 234', matches=False) def test_extra_specs_fails_with_op_sle(self): self._do_extra_specs_ops_test( value='1234', req='s<= 1000', matches=False) def test_extra_specs_fails_with_op_sl(self): self._do_extra_specs_ops_test( value='2', req='s< 12', matches=False) def test_extra_specs_fails_with_op_sg(self): self._do_extra_specs_ops_test( value='12', req='s> 2', matches=False) def test_extra_specs_matches_with_op_in(self): self._do_extra_specs_ops_test( value='12311321', req=' 11', matches=True) def test_extra_specs_matches_with_op_in2(self): self._do_extra_specs_ops_test( value='12311321', req=' 12311321', matches=True) def test_extra_specs_matches_with_op_in3(self): self._do_extra_specs_ops_test( value='12311321', req=' 12311321 ', matches=True) def test_extra_specs_fails_with_op_in(self): self._do_extra_specs_ops_test( value='12310321', req=' 11', matches=False) def test_extra_specs_fails_with_op_in2(self): self._do_extra_specs_ops_test( value='12310321', req=' 11 ', matches=False) def test_extra_specs_matches_with_op_is(self): self._do_extra_specs_ops_test( value=True, req=' True', matches=True) def test_extra_specs_matches_with_op_is2(self): self._do_extra_specs_ops_test( value=False, req=' False', matches=True) def test_extra_specs_matches_with_op_is3(self): self._do_extra_specs_ops_test( value=False, req=' Nonsense', matches=True) def test_extra_specs_fails_with_op_is(self): self._do_extra_specs_ops_test( value=True, req=' False', matches=False) def test_extra_specs_fails_with_op_is2(self): self._do_extra_specs_ops_test( value=False, req=' True', matches=False) def test_extra_specs_matches_with_op_or(self): self._do_extra_specs_ops_test( value='12', req=' 11 12', matches=True) def test_extra_specs_matches_with_op_or2(self): self._do_extra_specs_ops_test( value='12', req=' 11 12 ', matches=True) def test_extra_specs_fails_with_op_or(self): self._do_extra_specs_ops_test( value='13', req=' 11 12', matches=False) def test_extra_specs_fails_with_op_or2(self): self._do_extra_specs_ops_test( value='13', req=' 11 12 ', matches=False) def test_extra_specs_matches_with_op_le(self): self._do_extra_specs_ops_test( value='2', req='<= 10', matches=True) def test_extra_specs_fails_with_op_le(self): self._do_extra_specs_ops_test( value='3', req='<= 2', matches=False) def test_extra_specs_matches_with_op_ge(self): self._do_extra_specs_ops_test( value='3', req='>= 1', matches=True) def test_extra_specs_fails_with_op_ge(self): self._do_extra_specs_ops_test( value='2', req='>= 3', matches=False) class BasicFiltersTestCase(HostFiltersTestCase): """Test case for host filters.""" def setUp(self): super(BasicFiltersTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) def test_all_filters(self): # Double check at least a couple of known filters exist self.assertTrue('JsonFilter' in self.class_map) self.assertTrue('CapabilitiesFilter' in self.class_map) self.assertTrue('AvailabilityZoneFilter' in self.class_map) self.assertTrue('IgnoreAttemptedHostsFilter' in self.class_map) def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): filt_cls = self.class_map['CapabilitiesFilter']() capabilities = {'enabled': True} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'resource_type': {'name': 'fake_type', 'extra_specs': especs}} host = fakes.FakeHostState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.host_passes(host, filter_properties)) def test_capability_filter_passes_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) def test_capability_filter_fails_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222'}, passes=False) def test_capability_filter_passes_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '>= 8'}, passes=False) def test_capability_filter_passes_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=True) def test_capability_filter_passes_fakescope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, passes=True) def test_capability_filter_fails_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '<= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': {'scope_lv2': {'opt1': 10}}}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=True) def test_capability_filter_fails_wrong_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=False) def test_json_filter_passes(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 0, 'free_disk_mb': 0, 'capabilities': capabilities}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_caps_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], '$capabilities.enabled']) filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'resource_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_happy_day(self): """Test json filter more thoroughly.""" filt_cls = self.class_map['JsonFilter']() raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_basic_operators(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, filt_cls.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): filt_cls = self.class_map['JsonFilter']() raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) self.assertRaises(KeyError, filt_cls.host_passes, host, filter_properties) def test_json_filter_empty_filters_pass(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'resource_properties': { 'availability_zone': zone } } } def test_availability_zone_filter_same(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(filt_cls.host_passes(host, request)) def test_availability_zone_filter_different(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', {'service': service}) self.assertFalse(filt_cls.host_passes(host, request)) def test_availability_zone_filter_empty(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = {} host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(filt_cls.host_passes(host, request)) def test_ignore_attempted_hosts_filter_disabled(self): # Test case where re-scheduling is disabled. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_pass(self): # Node not previously tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=attempted) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_fail(self): # Node was already tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host1']) filter_properties = dict(retry=attempted) self.assertFalse(filt_cls.host_passes(host, filter_properties)) cinder-8.0.0/cinder/tests/unit/scheduler/test_scheduler.py0000664000567000056710000004202012701406250025051 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler """ import mock from oslo_config import cfg from cinder import context from cinder import db from cinder import exception from cinder.objects import fields from cinder.scheduler import driver from cinder.scheduler import filter_scheduler from cinder.scheduler import manager from cinder import test from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils CONF = cfg.CONF class SchedulerManagerTestCase(test.TestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = driver.Scheduler driver_cls_name = 'cinder.scheduler.driver.Scheduler' class AnException(Exception): pass def setUp(self): super(SchedulerManagerTestCase, self).setUp() self.flags(scheduler_driver=self.driver_cls_name) self.manager = self.manager_cls() self.manager._startup_delay = False self.context = context.get_admin_context() self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertIsInstance(manager.driver, self.driver_cls) @mock.patch('eventlet.sleep') @mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities') def test_init_host_with_rpc(self, publish_capabilities_mock, sleep_mock): self.manager._startup_delay = True self.manager.init_host_with_rpc() publish_capabilities_mock.assert_called_once_with(mock.ANY) sleep_mock.assert_called_once_with(CONF.periodic_interval) self.assertFalse(self.manager._startup_delay) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.5'}) def test_reset(self): mgr = self.manager_cls() volume_rpcapi = mgr.driver.volume_rpcapi self.assertEqual('1.3', volume_rpcapi.client.version_cap) self.assertEqual('1.5', volume_rpcapi.client.serializer._base.version_cap) mgr.reset() volume_rpcapi = mgr.driver.volume_rpcapi self.assertIsNone(volume_rpcapi.client.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.version_cap) @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities_empty_dict(self, _mock_update_cap): # Test no capabilities passes empty dictionary service = 'fake_service' host = 'fake_host' self.manager.update_service_capabilities(self.context, service_name=service, host=host) _mock_update_cap.assert_called_once_with(service, host, {}) @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities_correct(self, _mock_update_cap): # Test capabilities passes correctly service = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} self.manager.update_service_capabilities(self.context, service_name=service, host=host, capabilities=capabilities) _mock_update_cap.assert_called_once_with(service, host, capabilities) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.db.volume_update') def test_create_volume_exception_puts_volume_in_error_state( self, _mock_volume_update, _mock_sched_create): # Test NoValidHost exception behavior for create_volume. # Puts the volume in 'error' state and eats the exception. _mock_sched_create.side_effect = exception.NoValidHost(reason="") volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' request_spec = {'volume_id': volume.id} self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) _mock_sched_create.assert_called_once_with(self.context, request_spec, {}) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('eventlet.sleep') def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create): volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' request_spec = {'volume_id': volume.id} self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_sched_create.assert_called_once_with(self.context, request_spec, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') @mock.patch('eventlet.sleep') def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep, _mock_is_ready, _mock_sched_create): self.manager._startup_delay = True volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' request_spec = {'volume_id': volume.id} _mock_is_ready.side_effect = [False, False, True] self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_sched_create.assert_called_once_with(self.context, request_spec, {}) calls = [mock.call(1)] * 2 _mock_sleep.assert_has_calls(calls) self.assertEqual(2, _mock_sleep.call_count) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') @mock.patch('eventlet.sleep') def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep, _mock_is_ready, _mock_sched_create): self.manager._startup_delay = True volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' request_spec = {'volume_id': volume.id} _mock_is_ready.return_value = True self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_sched_create.assert_called_once_with(self.context, request_spec, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters') @mock.patch('cinder.db.volume_update') def test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_host_passes, _mock_volume_get): # Test NoValidHost exception behavior for migrate_volume_to_host. # Puts the volume in 'error_migrating' state and eats the exception. fake_updates = {'migration_status': 'error'} self._test_migrate_volume_exception_returns_volume_state( _mock_volume_update, _mock_host_passes, _mock_volume_get, 'available', fake_updates) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters') @mock.patch('cinder.db.volume_update') def test_migrate_volume_exception_returns_volume_state_maintenance( self, _mock_volume_update, _mock_host_passes, _mock_volume_get): fake_updates = {'status': 'available', 'migration_status': 'error'} self._test_migrate_volume_exception_returns_volume_state( _mock_volume_update, _mock_host_passes, _mock_volume_get, 'maintenance', fake_updates) def _test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_host_passes, _mock_volume_get, status, fake_updates): volume = tests_utils.create_volume(self.context, status=status, previous_status='available') fake_volume_id = volume.id topic = 'fake_topic' request_spec = {'volume_id': fake_volume_id} _mock_host_passes.side_effect = exception.NoValidHost(reason="") _mock_volume_get.return_value = volume self.manager.migrate_volume_to_host(self.context, topic, fake_volume_id, 'host', True, request_spec=request_spec, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, fake_updates) _mock_host_passes.assert_called_once_with(self.context, 'host', request_spec, {}) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_attachment_get_used_by_volume_id') def test_retype_volume_exception_returns_volume_state( self, _mock_vol_attachment_get, _mock_vol_update): # Test NoValidHost exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status='retyping', previous_status='in-use') instance_uuid = '12345678-1234-5678-1234-567812345678' volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, '/dev/fake') _mock_vol_attachment_get.return_value = [volume_attach] topic = 'fake_topic' request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand'} _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_host = mock.Mock( side_effect=exception.NoValidHost(reason="")) orig_retype = self.manager.driver.find_retype_host self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.retype(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_find_retype_host.assert_called_once_with(self.context, request_spec, {}, 'on-demand') _mock_vol_update.assert_called_once_with(self.context, volume.id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype def test_create_consistencygroup_exceptions(self): with mock.patch.object(filter_scheduler.FilterScheduler, 'schedule_create_consistencygroup') as mock_cg: original_driver = self.manager.driver consistencygroup_obj = \ fake_consistencygroup.fake_consistencyobject_obj(self.context) self.manager.driver = filter_scheduler.FilterScheduler LOG = self.mock_object(manager, 'LOG') self.stubs.Set(db, 'consistencygroup_update', mock.Mock()) ex = exception.CinderException('test') mock_cg.side_effect = ex group_id = fake.consistency_group_id self.assertRaises(exception.CinderException, self.manager.create_consistencygroup, self.context, 'volume', consistencygroup_obj) self.assertTrue(LOG.exception.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': ( fields.ConsistencyGroupStatus.ERROR)}) mock_cg.reset_mock() LOG.exception.reset_mock() db.consistencygroup_update.reset_mock() mock_cg.side_effect = exception.NoValidHost( reason="No weighed hosts available") self.manager.create_consistencygroup( self.context, 'volume', consistencygroup_obj) self.assertTrue(LOG.error.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': ( fields.ConsistencyGroupStatus.ERROR)}) self.manager.driver = original_driver class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = driver.Scheduler def setUp(self): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext(fake.user_id, fake.project_id) self.topic = 'fake_topic' @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities(self, _mock_update_cap): service_name = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} self.driver.update_service_capabilities(service_name, host, capabilities) _mock_update_cap.assert_called_once_with(service_name, host, capabilities) @mock.patch('cinder.scheduler.host_manager.HostManager.' 'has_all_capabilities', return_value=False) def test_is_ready(self, _mock_has_caps): ready = self.driver.is_ready() _mock_has_caps.assert_called_once_with() self.assertFalse(ready) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test schedule driver class. Test cases for base scheduler driver class methods that will fail if the driver is changed. """ def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, self.context, self.topic, 'schedule_something', *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext(fake.user_id, fake.project_id) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update): volume = fake_volume.fake_volume_obj(self.context) _mock_volume_get.return_value = volume driver.volume_update_db(self.context, volume.id, 'fake_host') scheduled_at = volume.scheduled_at.replace(tzinfo=None) _mock_vol_update.assert_called_once_with( self.context, volume.id, {'host': 'fake_host', 'scheduled_at': scheduled_at}) cinder-8.0.0/cinder/tests/unit/scheduler/test_capacity_weigher.py0000664000567000056710000003415012701406250026407 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Capacity Weigher. """ import mock from oslo_config import cfg from cinder import context from cinder.scheduler import weights from cinder import test from cinder.tests.unit.scheduler import fakes from cinder.volume import utils CONF = cfg.CONF class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.HostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_hosts(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {'size': 1} return self.weight_handler.get_weighed_objects( [weights.capacity.CapacityWeigher], hosts, weight_properties) @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.volume_topic, disabled=disabled) return host_states # If thin_provisioning_support = False, use the following formula: # free = free_space - math.floor(total * reserved) # Otherwise, use the following formula: # free = (total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - math.floor(total * reserved)) def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=1024-math.floor(1024*0.1)=922 # Norm=0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=2048*1.5-1748-math.floor(2048*0.1)=1120 # Norm=1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=256-512*0=256 # Norm=0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=2048*1.0-2047-math.floor(2048*0.05)=-101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-1 # Norm=0.0819000819001 # so, host2 should win: weighed_host = self._get_weighed_hosts(hostinfo_list)[0] self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host2', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier1(self): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=-(1024-math.floor(1024*0.1))=-922 # Norm=-0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=-(256-512*0)=-256 # Norm=--0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=-(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-float('inf') # Norm=-1.0 # so, host4 should win: weighed_host = self._get_weighed_hosts(hostinfo_list)[0] self.assertEqual(0.0, weighed_host.weight) self.assertEqual('host4', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier2(self): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))*2=1844 # Norm=1.67567567568 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240 # Norm=2.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)*2=512 # Norm=0.584766584767 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202 # Norm=0.0 # host5: free_capacity_gb=unknown free=-2 # Norm=0.1638001638 # so, host2 should win: weighed_host = self._get_weighed_hosts(hostinfo_list)[0] self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual('host2', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_no_unknown_or_infinite(self): self.flags(capacity_weight_multiplier=-1.0) del self.host_manager.service_states['host5'] hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm=-0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(hostinfo_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', utils.extract_host(best_host.obj.host)) # and host2 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host2', utils.extract_host(worst_host.obj.host)) def test_capacity_weight_free_unknown(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 3000, 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': None} hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(hostinfo_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) def test_capacity_weight_cap_unknown(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 3000, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': None} hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=unknown # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(hostinfo_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) def test_capacity_weight_free_infinite(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 3000, 'free_capacity_gb': 'infinite', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': None} hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=infinite free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(hostinfo_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) def test_capacity_weight_cap_infinite(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 'infinite', 'free_capacity_gb': 3000, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': None} hostinfo_list = self._get_all_hosts() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=infinite # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(hostinfo_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) cinder-8.0.0/cinder/tests/unit/scheduler/test_chance_weigher.py0000664000567000056710000000504612701406250026035 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Chance Weigher. """ import mock from cinder.scheduler import host_manager from cinder.scheduler.weights import chance from cinder import test class ChanceWeigherTestCase(test.TestCase): def setUp(self): super(ChanceWeigherTestCase, self).setUp() def fake_random(self, reset=False): if reset: self.not_random_float = 0.0 else: self.not_random_float += 1.0 return self.not_random_float @mock.patch('random.random') def test_chance_weigher(self, _mock_random): # stub random.random() to verify the ChanceWeigher # is using random.random() (repeated calls to weigh should # return incrementing weights) weigher = chance.ChanceWeigher() _mock_random.side_effect = self.fake_random self.fake_random(reset=True) host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999} weight = weigher._weigh_object(host_state, None) self.assertEqual(1.0, weight) weight = weigher._weigh_object(host_state, None) self.assertEqual(2.0, weight) weight = weigher._weigh_object(host_state, None) self.assertEqual(3.0, weight) def test_host_manager_choosing_chance_weigher(self): # ensure HostManager can load the ChanceWeigher # via the entry points mechanism hm = host_manager.HostManager() weighers = hm._choose_host_weighers('ChanceWeigher') self.assertEqual(1, len(weighers)) self.assertEqual(weighers[0], chance.ChanceWeigher) def test_use_of_chance_weigher_via_host_manager(self): # ensure we don't lose any hosts when weighing with # the ChanceWeigher hm = host_manager.HostManager() fake_hosts = [host_manager.HostState('fake_host%s' % x) for x in range(1, 5)] weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher') self.assertEqual(4, len(weighed_hosts)) cinder-8.0.0/cinder/tests/unit/scheduler/test_rpcapi.py0000664000567000056710000002402112701406250024352 0ustar jenkinsjenkins00000000000000 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cinder.scheduler.rpcapi """ import copy import mock from cinder import context from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import test class SchedulerRpcAPITestCase(test.TestCase): def setUp(self): super(SchedulerRpcAPITestCase, self).setUp() def tearDown(self): super(SchedulerRpcAPITestCase, self).tearDown() def _test_scheduler_api(self, method, rpc_method, fanout=False, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() expected_retval = 'foo' if rpc_method == 'call' else None target = { "fanout": fanout, "version": kwargs.pop('version', rpcapi.RPC_API_VERSION) } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) for kwarg, value in self.fake_kwargs.items(): self.assertEqual(expected_msg[kwarg], value) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_update_service_capabilities(self, can_send_version): self._test_scheduler_api('update_service_capabilities', rpc_method='cast', service_name='fake_name', host='fake_host', capabilities='fake_capabilities', fanout=True, version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_update_service_capabilities_old(self, can_send_version): self._test_scheduler_api('update_service_capabilities', rpc_method='cast', service_name='fake_name', host='fake_host', capabilities='fake_capabilities', fanout=True, version='1.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_volume(self, can_send_version): self._test_scheduler_api('create_volume', rpc_method='cast', topic='topic', volume_id='volume_id', snapshot_id='snapshot_id', image_id='image_id', request_spec='fake_request_spec', filter_properties='filter_properties', volume='volume', version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_create_volume_old(self, can_send_version): # Tests backwards compatibility with older clients self._test_scheduler_api('create_volume', rpc_method='cast', topic='topic', volume_id='volume_id', snapshot_id='snapshot_id', image_id='image_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.2') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.9')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_migrate_volume_to_host(self, can_send_version): self._test_scheduler_api('migrate_volume_to_host', rpc_method='cast', topic='topic', volume_id='volume_id', host='host', force_host_copy=True, request_spec='fake_request_spec', filter_properties='filter_properties', volume='volume', version='2.0') can_send_version.assert_called_once_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_migrate_volume_to_host_old(self, can_send_version): self._test_scheduler_api('migrate_volume_to_host', rpc_method='cast', topic='topic', volume_id='volume_id', host='host', force_host_copy=True, request_spec='fake_request_spec', filter_properties='filter_properties', volume='volume', version='1.3') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.11')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_retype(self, can_send_version): self._test_scheduler_api('retype', rpc_method='cast', topic='topic', volume_id='volume_id', request_spec='fake_request_spec', filter_properties='filter_properties', volume='volume', version='2.0') can_send_version.assert_called_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_retype_old(self, can_send_version): self._test_scheduler_api('retype', rpc_method='cast', topic='topic', volume_id='volume_id', request_spec='fake_request_spec', filter_properties='filter_properties', volume='volume', version='1.4') can_send_version.assert_has_calls([mock.call('2.0'), mock.call('1.10')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_manage_existing(self, can_send_version): self._test_scheduler_api('manage_existing', rpc_method='cast', topic='topic', volume_id='volume_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='2.0') can_send_version.assert_called_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_manage_existing_old(self, can_send_version): self._test_scheduler_api('manage_existing', rpc_method='cast', topic='topic', volume_id='volume_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.5') can_send_version.assert_called_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_get_pools(self, can_send_version): self._test_scheduler_api('get_pools', rpc_method='call', filters=None, version='2.0') can_send_version.assert_called_with('2.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_get_pools_old(self, can_send_version): self._test_scheduler_api('get_pools', rpc_method='call', filters=None, version='1.7') can_send_version.assert_called_with('2.0') cinder-8.0.0/cinder/tests/unit/scheduler/test_filter_scheduler.py0000664000567000056710000005360612701406250026432 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ import mock from cinder import context from cinder import exception from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import test_scheduler from cinder.volume import utils class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter_scheduler.FilterScheduler def test_create_consistencygroup_no_hosts(self): # Ensure empty hosts result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': {}}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': {}}} request_spec_list = [request_spec, request_spec2] self.assertRaises(exception.NoValidHost, sched.schedule_create_consistencygroup, fake_context, 'faki-id1', request_spec_list, {}) @mock.patch('cinder.db.service_get_all_by_topic') def test_schedule_consistencygroup(self, _mock_service_get_all_by_topic): # Make sure _schedule_group() can find host successfully. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) specs = {'capabilities:consistencygroup_support': ' True'} request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': specs}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': specs}} request_spec_list = [request_spec, request_spec2] weighed_host = sched._schedule_group(fake_context, request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all_by_topic.called) @mock.patch('cinder.db.service_get_all_by_topic') def test_schedule_consistencygroup_no_cg_support_in_extra_specs( self, _mock_service_get_all_by_topic): # Make sure _schedule_group() can find host successfully even # when consistencygroup_support is not specified in volume type's # extra specs sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': {}}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': {}}} request_spec_list = [request_spec, request_spec2] weighed_host = sched._schedule_group(fake_context, request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all_by_topic.called) def test_create_volume_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': ['fake-id1']} self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) def test_create_volume_no_hosts_invalid_req(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_id' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}} self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) def test_create_volume_no_volume_type(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_id': ['fake-id1']} self.assertRaises(exception.InvalidVolumeType, sched.schedule_create_volume, fake_context, request_spec, {}) @mock.patch('cinder.scheduler.host_manager.HostManager.' 'get_all_host_states') def test_create_volume_non_admin(self, _mock_get_all_host_states): # Test creating a volume locally using create_volume, passing # a non-admin context. DB actions should work. self.was_admin = False def fake_get(ctxt): # Make sure this is called with admin context, even though # we're using user context below. self.was_admin = ctxt.is_admin return {} sched = fakes.FakeFilterScheduler() _mock_get_all_host_states.side_effect = fake_get fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': ['fake-id1']} self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) self.assertTrue(self.was_admin) @mock.patch('cinder.db.service_get_all_by_topic') def test_schedule_happy_day(self, _mock_service_get_all_by_topic): # Make sure there's nothing glaringly wrong with _schedule() # by doing a happy day pass through. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all_by_topic.called) @mock.patch('cinder.db.service_get_all_by_topic') def test_create_volume_clear_host_different_with_cg(self, _mock_service_get_all): # Ensure we clear those hosts whose backend is not same as # consistencygroup's backend. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fakes.mock_host_manager_db_calls(_mock_service_get_all) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host@lvmdriver'} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host) @mock.patch('cinder.db.service_get_all_by_topic') def test_create_volume_host_same_as_cg(self, _mock_service_get_all): # Ensure we don't clear the host whose backend is same as # consistencygroup's backend. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fakes.mock_host_manager_db_calls(_mock_service_get_all) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host1'} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertEqual('host1#lvm1', weighed_host.obj.host) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) def test_retry_disabled(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties) def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} retry = dict(num_attempts=1) filter_properties = dict(retry=retry) sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts) def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule, self.context, request_spec, filter_properties=filter_properties) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host') host_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual(1024, host_state.total_capacity_gb) def _host_passes_filters_setup(self, mock_obj): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(mock_obj) return (sched, fake_context) @mock.patch('cinder.db.service_get_all_by_topic') def test_host_passes_filters_happy_day(self, _mock_service_get_topic): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} ret_host = sched.host_passes_filters(ctx, 'host1#lvm1', request_spec, {}) self.assertEqual('host1', utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all_by_topic') def test_host_passes_filters_default_pool_happy_day( self, _mock_service_get_topic): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} ret_host = sched.host_passes_filters(ctx, 'host5#_pool0', request_spec, {}) self.assertEqual('host5', utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all_by_topic') def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): """Fail the host due to insufficient capacity.""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1024}} self.assertRaises(exception.NoValidHost, sched.host_passes_filters, ctx, 'host1#lvm1', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all_by_topic') def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host passes filters and # policy=never. host4 doesn't have enough space to hold an additional # 200GB, but it is already the host of this volume and should not be # counted twice. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm4'} request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4#lvm4'}} host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host4', utils.extract_host(host_state.host)) @mock.patch('cinder.db.service_get_all_by_topic') def test_retype_with_pool_policy_never_migrate_pass( self, _mock_service_get_topic): # Retype should pass if current host passes filters and # policy=never. host4 doesn't have enough space to hold an additional # 200GB, but it is already the host of this volume and should not be # counted twice. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm3'} request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host3#lvm3'}} host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host3#lvm3', host_state.host) @mock.patch('cinder.db.service_get_all_by_topic') def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # policy=never. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='never') @mock.patch('cinder.db.service_get_all_by_topic') def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host fails filters but another host # is suitable when policy=on-demand. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='on-demand') self.assertEqual('host1', utils.extract_host(host_state.host)) @mock.patch('cinder.db.service_get_all_by_topic') def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # no other suitable candidates exist even if policy=on-demand. sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': 1, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 2048, 'host': 'host4'}} self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='on-demand') cinder-8.0.0/cinder/tests/unit/scheduler/fakes.py0000664000567000056710000001472512701406250023140 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ from oslo_utils import timeutils from oslo_utils import uuidutils from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager class FakeFilterScheduler(filter_scheduler.FilterScheduler): def __init__(self, *args, **kwargs): super(FakeFilterScheduler, self).__init__(*args, **kwargs) self.host_manager = host_manager.HostManager() class FakeHostManager(host_manager.HostManager): def __init__(self): super(FakeHostManager, self).__init__() self.service_states = { 'host1': {'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 10, 'volume_backend_name': 'lvm1', 'timestamp': None}, 'host2': {'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': 1.5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, 'volume_backend_name': 'lvm2', 'timestamp': None}, 'host3': {'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': 2.0, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, 'volume_backend_name': 'lvm3', 'timestamp': None}, 'host4': {'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 2047, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'volume_backend_name': 'lvm4', 'timestamp': None, 'consistencygroup_support': True}, 'host5': {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': None}, } class FakeHostState(host_manager.HostState): def __init__(self, host, attribute_dict): super(FakeHostState, self).__init__(host) for (key, val) in attribute_dict.items(): setattr(self, key, val) class FakeNovaClient(object): class Server(object): def __init__(self, host): self.uuid = uuidutils.generate_uuid() self.host = host setattr(self, 'OS-EXT-SRV-ATTR:host', host) class ServerManager(object): def __init__(self): self._servers = [] def create(self, host): self._servers.append(FakeNovaClient.Server(host)) return self._servers[-1].uuid def get(self, server_uuid): for s in self._servers: if s.uuid == server_uuid: return s return None def list(self, detailed=True, search_opts=None): matching = list(self._servers) if search_opts: for opt, val in search_opts.items(): matching = [m for m in matching if getattr(m, opt, None) == val] return matching class ListExtResource(object): def __init__(self, ext_name): self.name = ext_name class ListExtManager(object): def __init__(self, ext_srv_attr=True): self.ext_srv_attr = ext_srv_attr def show_all(self): if self.ext_srv_attr: return [ FakeNovaClient.ListExtResource('ExtendedServerAttributes')] return [] def __init__(self, ext_srv_attr=True): self.servers = FakeNovaClient.ServerManager() self.list_extensions = FakeNovaClient.ListExtManager( ext_srv_attr=ext_srv_attr) def mock_host_manager_db_calls(mock_obj, disabled=None): services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host4', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), dict(id=5, host='host5', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), ] if disabled is None: mock_obj.return_value = services else: mock_obj.return_value = [service for service in services if service['disabled'] == disabled] cinder-8.0.0/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py0000664000567000056710000000772312701406250030425 0ustar jenkinsjenkins00000000000000# Copyright 2013 eBay Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Allocated Capacity Weigher. """ import mock from oslo_config import cfg from cinder import context from cinder.scheduler import weights from cinder import test from cinder.tests.unit.scheduler import fakes from cinder.volume import utils CONF = cfg.CONF class AllocatedCapacityWeigherTestCase(test.TestCase): def setUp(self): super(AllocatedCapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.HostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects( [weights.capacity.AllocatedCapacityWeigher], hosts, weight_properties)[0] @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.volume_topic, disabled=disabled) return host_states def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=-1748 # host3: allocated_capacity_gb=256, weight=-256 # host4: allocated_capacity_gb=1848, weight=-1848 Norm=-1.0 # host5: allocated_capacity_gb=1548, weight=-1540 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host1', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier1(self): self.flags(allocated_capacity_weight_multiplier=1.0) hostinfo_list = self._get_all_hosts() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=1748 # host3: allocated_capacity_gb=256, weight=256 # host4: allocated_capacity_gb=1848, weight=1848 Norm=1.0 # host5: allocated_capacity_gb=1548, weight=1540 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host4', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier2(self): self.flags(allocated_capacity_weight_multiplier=-2.0) hostinfo_list = self._get_all_hosts() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=-3496 # host3: allocated_capacity_gb=256, weight=-512 # host4: allocated_capacity_gb=1848, weight=-3696 Norm=-2.0 # host5: allocated_capacity_gb=1548, weight=-3080 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host1', utils.extract_host(weighed_host.obj.host)) cinder-8.0.0/cinder/tests/unit/test_qos_specs.py0000664000567000056710000003556412701406250023133 0ustar jenkinsjenkins00000000000000 # Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for qos specs internal API """ import time from oslo_db import exception as db_exc from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.volume import qos_specs from cinder.volume import volume_types def fake_db_qos_specs_create(context, values): if values['name'] == 'DupQoSName': raise exception.QoSSpecsExists(specs_id=values['name']) elif values['name'] == 'FailQoSName': raise db_exc.DBError() pass class QoSSpecsTestCase(test.TestCase): """Test cases for qos specs code.""" def setUp(self): super(QoSSpecsTestCase, self).setUp() self.ctxt = context.get_admin_context() def _create_qos_specs(self, name, values=None): """Create a transfer object.""" if values: specs = dict(name=name, qos_specs=values) else: specs = {'name': name, 'qos_specs': { 'consumer': 'back-end', 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] def test_create(self): input = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} ref = qos_specs.create(self.ctxt, 'FakeName', input) specs = qos_specs.get_qos_specs(self.ctxt, ref['id']) expected = (dict(consumer='back-end')) expected.update(dict(id=ref['id'])) expected.update(dict(name='FakeName')) del input['consumer'] expected.update(dict(specs=input)) self.assertDictMatch(expected, specs) self.stubs.Set(db, 'qos_specs_create', fake_db_qos_specs_create) # qos specs must have unique name self.assertRaises(exception.QoSSpecsExists, qos_specs.create, self.ctxt, 'DupQoSName', input) input.update({'consumer': 'FakeConsumer'}) # consumer must be one of: front-end, back-end, both self.assertRaises(exception.InvalidQoSSpecs, qos_specs.create, self.ctxt, 'QoSName', input) del input['consumer'] # able to catch DBError self.assertRaises(exception.QoSSpecsCreateFailed, qos_specs.create, self.ctxt, 'FailQoSName', input) def test_update(self): def fake_db_update(context, specs_id, values): raise db_exc.DBError() input = {'key1': 'value1', 'consumer': 'WrongPlace'} # consumer must be one of: front-end, back-end, both self.assertRaises(exception.InvalidQoSSpecs, qos_specs.update, self.ctxt, 'fake_id', input) input['consumer'] = 'front-end' # qos specs must exists self.assertRaises(exception.QoSSpecsNotFound, qos_specs.update, self.ctxt, 'fake_id', input) specs_id = self._create_qos_specs('Name', input) qos_specs.update(self.ctxt, specs_id, {'key1': 'newvalue1', 'key2': 'value2'}) specs = qos_specs.get_qos_specs(self.ctxt, specs_id) self.assertEqual('newvalue1', specs['specs']['key1']) self.assertEqual('value2', specs['specs']['key2']) self.stubs.Set(db, 'qos_specs_update', fake_db_update) self.assertRaises(exception.QoSSpecsUpdateFailed, qos_specs.update, self.ctxt, 'fake_id', input) def test_delete(self): def fake_db_associations_get(context, id): if id == 'InUse': return True else: return False def fake_db_delete(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) def fake_disassociate_all(context, id): pass self.stubs.Set(db, 'qos_specs_associations_get', fake_db_associations_get) self.stubs.Set(qos_specs, 'disassociate_all', fake_disassociate_all) self.stubs.Set(db, 'qos_specs_delete', fake_db_delete) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete, self.ctxt, None) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete, self.ctxt, 'NotFound') self.assertRaises(exception.QoSSpecsInUse, qos_specs.delete, self.ctxt, 'InUse') # able to delete in-use qos specs if force=True qos_specs.delete(self.ctxt, 'InUse', force=True) def test_delete_keys(self): def fake_db_qos_delete_key(context, id, key): if key == 'NotFound': raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key=key) else: pass def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass value = dict(consumer='front-end', foo='Foo', bar='Bar', zoo='tiger') specs_id = self._create_qos_specs('QoSName', value) qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) del value['consumer'] del value['foo'] del value['bar'] expected = {'name': 'QoSName', 'id': specs_id, 'consumer': 'front-end', 'specs': value} specs = qos_specs.get_qos_specs(self.ctxt, specs_id) self.assertDictMatch(expected, specs) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.stubs.Set(db, 'qos_specs_item_delete', fake_db_qos_delete_key) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete_keys, self.ctxt, None, []) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete_keys, self.ctxt, 'NotFound', []) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, 'Found', ['NotFound']) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, 'Found', ['foo', 'bar', 'NotFound']) def test_get_associations(self): def fake_db_associate_get(context, id): if id == 'Trouble': raise db_exc.DBError() return [{'name': 'type-1', 'id': 'id-1'}, {'name': 'type-2', 'id': 'id-2'}] self.stubs.Set(db, 'qos_specs_associations_get', fake_db_associate_get) expected1 = {'association_type': 'volume_type', 'name': 'type-1', 'id': 'id-1'} expected2 = {'association_type': 'volume_type', 'name': 'type-2', 'id': 'id-2'} res = qos_specs.get_associations(self.ctxt, 'specs-id') self.assertIn(expected1, res) self.assertIn(expected2, res) self.assertRaises(exception.CinderException, qos_specs.get_associations, self.ctxt, 'Trouble') def test_associate_qos_with_type(self): def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass def fake_db_associate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass def fake_vol_type_qos_get(type_id): if type_id == 'Invalid': return {'qos_specs': {'id': 'Invalid'}} else: return {'qos_specs': None} type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual('TypeName', res[0]['name']) self.assertEqual(type_ref['id'], res[0]['id']) self.stubs.Set(db, 'qos_specs_associate', fake_db_associate) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.stubs.Set(volume_types, 'get_volume_type_qos_specs', fake_vol_type_qos_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsAssociateFailed, qos_specs.associate_qos_with_type, self.ctxt, 'Trouble', 'id') self.assertRaises(exception.QoSSpecsNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'NotFound', 'id') self.assertRaises(exception.InvalidVolumeType, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'Invalid') def test_disassociate_qos_specs(self): def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass def fake_db_disassociate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.stubs.Set(db, 'qos_specs_disassociate', fake_db_disassociate) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, self.ctxt, 'Trouble', 'id') def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass type1_ref = volume_types.create(self.ctxt, 'TypeName1') type2_ref = volume_types.create(self.ctxt, 'TypeName2') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(2, len(res)) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.stubs.Set(db, 'qos_specs_disassociate_all', fake_db_disassociate_all) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, 'Trouble') def test_get_all_specs(self): input = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'consumer': 'both'} specs_id1 = self._create_qos_specs('Specs1', input) input.update({'key4': 'value4'}) specs_id2 = self._create_qos_specs('Specs2', input) expected1 = { 'id': specs_id1, 'name': 'Specs1', 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}} expected2 = { 'id': specs_id2, 'name': 'Specs2', 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': 'value4'}} res = qos_specs.get_all_specs(self.ctxt) self.assertEqual(2, len(res)) self.assertIn(expected1, res) self.assertIn(expected2, res) def test_get_qos_specs(self): one_time_value = str(int(time.time())) input = {'key1': one_time_value, 'key2': 'value2', 'key3': 'value3', 'consumer': 'both'} id = self._create_qos_specs('Specs1', input) specs = qos_specs.get_qos_specs(self.ctxt, id) self.assertEqual(one_time_value, specs['specs']['key1']) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.get_qos_specs, self.ctxt, None) def test_get_qos_specs_by_name(self): one_time_value = str(int(time.time())) input = {'key1': one_time_value, 'key2': 'value2', 'key3': 'value3', 'consumer': 'back-end'} self._create_qos_specs(one_time_value, input) specs = qos_specs.get_qos_specs_by_name(self.ctxt, one_time_value) self.assertEqual(one_time_value, specs['specs']['key1']) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.get_qos_specs_by_name, self.ctxt, None) cinder-8.0.0/cinder/tests/unit/test_nexenta5_iscsi.py0000664000567000056710000003536612701406257024064 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for OpenStack Cinder volume driver """ import mock from mock import patch from oslo_serialization import jsonutils from oslo_utils import units import requests from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import iscsi from cinder.volume.drivers.nexenta.ns5 import jsonrpc class TestNexentaISCSIDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, 'id': '1', 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, 'id': '2', 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, 'volume_id': '1' } def __init__(self, method): super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): super(TestNexentaISCSIDriver, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.ctxt = context.get_admin_context() self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' self.cfg.nexenta_rest_port = 2000 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_iscsi_target_portal_port = 8080 self.cfg.nexenta_target_prefix = 'iqn:' self.cfg.nexenta_target_group_prefix = 'cinder/' self.cfg.nexenta_ns5_blocksize = 32 self.cfg.nexenta_sparse = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.reserved_percentage = 20 self.cfg.nexenta_volume = 'pool' self.cfg.nexenta_volume_group = 'dsg' self.nef_mock = mock.Mock() self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nef_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_VOLUME_NAME } return db.volume_create(self.ctxt, vol)['id'] def test_do_setup(self): self.nef_mock.post.side_effect = exception.NexentaException( 'Could not create volume group') self.assertRaises( exception.NexentaException, self.drv.do_setup, self.ctxt) self.nef_mock.post.side_effect = exception.NexentaException( '{"code": "EEXIST"}') self.assertIsNone(self.drv.do_setup(self.ctxt)) def test_check_for_setup_error(self): self.nef_mock.get.return_value = { 'data': [{'name': 'iscsit', 'state': 'offline'}]} self.assertRaises( exception.NexentaException, self.drv.check_for_setup_error) self.nef_mock.get.side_effect = exception.NexentaException() self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_create_volume(self): self.drv.create_volume(self.TEST_VOLUME_REF) url = 'storage/pools/pool/volumeGroups/dsg/volumes' self.nef_mock.post.assert_called_with(url, { 'name': self.TEST_VOLUME_REF['name'], 'volumeSize': 1 * units.Gi, 'volumeBlockSize': 32768, 'sparseVolume': self.cfg.nexenta_sparse}) def test_delete_volume(self): self.nef_mock.delete.side_effect = exception.NexentaException() self.assertIsNone(self.drv.delete_volume(self.TEST_VOLUME_REF)) url = 'storage/pools/pool/volumeGroups' data = {'name': 'dsg', 'volumeBlockSize': 32768} self.nef_mock.post.assert_called_with(url, data) def test_extend_volume(self): self.nef_mock.put.side_effect = exception.NexentaException() self.assertRaises( exception.NexentaException, self.drv.extend_volume, self.TEST_VOLUME_REF, 2) def test_delete_snapshot(self): self._create_volume_db_entry() self.nef_mock.delete.side_effect = exception.NexentaException('EBUSY') self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) url = ('storage/pools/pool/volumeGroups/dsg/' 'volumes/volume-1/snapshots/snapshot1') self.nef_mock.delete.assert_called_with(url) self.nef_mock.delete.side_effect = exception.NexentaException('Error') self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nef_mock.delete.assert_called_with(url) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.create_snapshot') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.delete_snapshot') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.create_volume_from_snapshot') def test_create_cloned_volume(self, crt_vol, dlt_snap, crt_snap): self._create_volume_db_entry() vol = self.TEST_VOLUME_REF2 src_vref = self.TEST_VOLUME_REF crt_vol.side_effect = exception.NexentaException() dlt_snap.side_effect = exception.NexentaException() self.assertRaises( exception.NexentaException, self.drv.create_cloned_volume, vol, src_vref) def test_create_snapshot(self): self._create_volume_db_entry() self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) url = 'storage/pools/pool/volumeGroups/dsg/volumes/volume-1/snapshots' self.nef_mock.post.assert_called_with( url, {'name': 'snapshot1'}) def test_get_target_by_alias(self): self.nef_mock.get.return_value = {'data': []} self.assertIsNone(self.drv._get_target_by_alias('1.1.1.1-0')) self.nef_mock.get.return_value = {'data': [{'name': 'iqn-0'}]} self.assertEqual( {'name': 'iqn-0'}, self.drv._get_target_by_alias('1.1.1.1-0')) def test_target_group_exists(self): self.nef_mock.get.return_value = {'data': []} self.assertFalse( self.drv._target_group_exists({'data': [{'name': 'iqn-0'}]})) self.nef_mock.get.return_value = {'data': [{'name': '1.1.1.1-0'}]} self.assertTrue(self.drv._target_group_exists( {'data': [{'name': 'iqn-0'}]})) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_target_by_alias') def test_create_target(self, target): self.nef_mock.get.return_value = {} target.return_value = {'name': 'iqn-0'} self.assertEqual('iqn-0', self.drv._create_target(0)) target.return_value = None self.assertRaises(TypeError, self.drv._create_target, 0) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._create_target') def test_get_target_name(self, target_name): self._create_volume_db_entry() self.drv.targets = {} target_name.return_value = 'iqn-0' self.drv.targets['iqn-0'] = [] self.assertEqual( 'iqn-0', self.drv._get_target_name(self.TEST_VOLUME_REF)) volume = self.TEST_VOLUME_REF volume['provider_location'] = '1.1.1.1:8080,1 iqn-0 0' self.nef_mock.get.return_value = {'data': [{'alias': '1.1.1.1-0'}]} self.assertEqual( 'iqn-0', self.drv._get_target_name(self.TEST_VOLUME_REF)) self.assertEqual('1.1.1.1-0', self.drv.targetgroups['iqn-0']) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._create_target') def test_get_targetgroup_name(self, target_name): self.TEST_VOLUME_REF['provider_location'] = '1.1.1.1:8080,1 iqn-0 0' self._create_volume_db_entry() target_name = 'iqn-0' self.drv.targetgroups[target_name] = '1.1.1.1-0' self.assertEqual( '1.1.1.1-0', self.drv._get_targetgroup_name(self.TEST_VOLUME_REF)) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_targetgroup_name') def test_get_lun_id(self, targetgroup): targetgroup.return_value = '1.1.1.1-0' self.nef_mock.get.return_value = {'data': [{'guid': '0'}]} self.assertEqual('0', self.drv._get_lun_id(self.TEST_VOLUME_REF)) self.nef_mock.get.return_value = {} self.assertRaises( LookupError, self.drv._get_lun_id, self.TEST_VOLUME_REF) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_lun_id') def test_lu_exists(self, lun_id): lun_id.return_value = '0' self.assertTrue(self.drv._lu_exists(self.TEST_VOLUME_REF)) lun_id.side_effect = LookupError self.assertFalse(self.drv._lu_exists(self.TEST_VOLUME_REF)) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_lun_id') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_targetgroup_name') def test_get_lun(self, targetgroup, lun_id): lun_id.return_value = '0' targetgroup.return_value = '1.1.1.1-0' self.nef_mock.get.return_value = {'data': [{'lunNumber': 0}]} self.assertEqual(0, self.drv._get_lun(self.TEST_VOLUME_REF)) self.nef_mock.get.return_value = {} self.assertRaises( LookupError, self.drv._get_lun, self.TEST_VOLUME_REF) lun_id.side_effect = LookupError() self.assertIsNone(self.drv._get_lun(self.TEST_VOLUME_REF)) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_target_name') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_targetgroup_name') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._lu_exists') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_lun') def test_do_export(self, get_lun, lu_exists, targetgroup, target): target.return_value = 'iqn-0' targetgroup.return_value = '1.1.1.1-0' lu_exists.return_value = False get_lun.return_value = 0 self.assertEqual( {'provider_location': '1.1.1.1:8080,1 iqn-0 0'}, self.drv._do_export({}, self.TEST_VOLUME_REF)) @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_targetgroup_name') @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_lun_id') def test_remove_export(self, lun_id, tg_name): lun_id.return_value = '0' tg_name.return_value = '1.1.1.1-0' self.nef_mock.delete.side_effect = exception.NexentaException( 'No such logical unit in target group') self.assertIsNone( self.drv.remove_export(self.ctxt, self.TEST_VOLUME_REF)) self.nef_mock.delete.side_effect = exception.NexentaException( 'Error') self.assertRaises( exception.NexentaException, self.drv.remove_export, self.ctxt, self.TEST_VOLUME_REF) lun_id.side_effect = LookupError() self.assertIsNone( self.drv.remove_export(self.ctxt, self.TEST_VOLUME_REF)) def test_update_volume_stats(self): self.nef_mock.get.return_value = { 'bytesAvailable': 10 * units.Gi, 'bytesUsed': 2 * units.Gi } location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.drv.__class__.__name__, 'host': self.cfg.nexenta_host, 'pool': self.cfg.nexenta_volume, 'group': self.cfg.nexenta_volume_group, } stats = { 'vendor_name': 'Nexenta', 'dedup': self.cfg.nexenta_dataset_dedup, 'compression': self.cfg.nexenta_dataset_compression, 'description': self.cfg.nexenta_dataset_description, 'driver_version': self.drv.VERSION, 'storage_protocol': 'iSCSI', 'total_capacity_gb': 10, 'free_capacity_gb': 8, 'reserved_percentage': self.cfg.reserved_percentage, 'QoS_support': False, 'volume_backend_name': self.drv.backend_name, 'location_info': location_info, 'iscsi_target_portal_port': ( self.cfg.nexenta_iscsi_target_portal_port), 'nef_url': self.drv.nef.url } self.drv._update_volume_stats() self.assertEqual(stats, self.drv._stats) class TestNexentaJSONProxy(test.TestCase): def __init__(self, method): super(TestNexentaJSONProxy, self).__init__(method) @patch('requests.Response.close') @patch('requests.get') @patch('requests.post') def test_call(self, post, get, close): nef_get = jsonrpc.NexentaJSONProxy( 'http', '1.1.1.1', '8080', 'user', 'pass', method='get') nef_post = jsonrpc.NexentaJSONProxy( 'http', '1.1.1.1', '8080', 'user', 'pass', method='post') data = {'key': 'value'} get.return_value = requests.Response() post.return_value = requests.Response() get.return_value.__setstate__({ 'status_code': 200, '_content': jsonutils.dumps(data)}) self.assertEqual({'key': 'value'}, nef_get('url')) get.return_value.__setstate__({ 'status_code': 201, '_content': ''}) self.assertEqual('Success', nef_get('url')) data2 = {'links': [{'href': 'redirect_url'}]} post.return_value.__setstate__({ 'status_code': 202, '_content': jsonutils.dumps(data2)}) get.return_value.__setstate__({ 'status_code': 200, '_content': jsonutils.dumps(data)}) self.assertEqual({'key': 'value'}, nef_post('url')) get.return_value.__setstate__({ 'status_code': 200, '_content': ''}) self.assertEqual('Success', nef_post('url', data)) get.return_value.__setstate__({ 'status_code': 400, '_content': jsonutils.dumps({'code': 'ENOENT'})}) self.assertRaises(exception.NexentaException, lambda: nef_get('url')) cinder-8.0.0/cinder/tests/unit/image/0000775000567000056710000000000012701406543020575 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/image/__init__.py0000664000567000056710000000000012701406250022667 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/image/test_cache.py0000664000567000056710000002646012701406250023254 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Pure Storage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import timedelta import mock from oslo_utils import timeutils from cinder import context as ctxt from cinder.image import cache as image_cache from cinder import test class ImageVolumeCacheTestCase(test.TestCase): def setUp(self): super(ImageVolumeCacheTestCase, self).setUp() self.mock_db = mock.Mock() self.mock_volume_api = mock.Mock() self.context = ctxt.get_admin_context() def _build_cache(self, max_gb=0, max_count=0): cache = image_cache.ImageVolumeCache(self.mock_db, self.mock_volume_api, max_gb, max_count) cache.notifier = self.notifier return cache def _build_entry(self, size=10): entry = { 'id': 1, 'host': 'test@foo#bar', 'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2', 'image_updated_at': timeutils.utcnow(with_timezone=True), 'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b', 'size': size, 'last_used': timeutils.utcnow(with_timezone=True) } return entry def test_get_by_image_volume(self): cache = self._build_cache() ret = {'id': 1} volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.mock_db.image_volume_cache_get_by_volume_id.return_value = ret entry = cache.get_by_image_volume(self.context, volume_id) self.assertEqual(ret, entry) self.mock_db.image_volume_cache_get_by_volume_id.return_value = None entry = cache.get_by_image_volume(self.context, volume_id) self.assertIsNone(entry) def test_evict(self): cache = self._build_cache() entry = self._build_entry() cache.evict(self.context, entry) self.mock_db.image_volume_cache_delete.assert_called_once_with( self.context, entry['volume_id'] ) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.evict', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(entry['host'], msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_get_entry(self): cache = self._build_cache() entry = self._build_entry() volume_ref = { 'host': 'foo@bar#whatever' } image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': entry['image_updated_at'] } (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = entry found_entry = cache.get_entry(self.context, volume_ref, entry['image_id'], image_meta) self.assertDictMatch(entry, found_entry) (self.mock_db. image_volume_cache_get_and_update_last_used.assert_called_once_with)( self.context, entry['image_id'], volume_ref['host'] ) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.hit', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(entry['host'], msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_get_entry_not_exists(self): cache = self._build_cache() volume_ref = { 'host': 'foo@bar#whatever' } image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': timeutils.utcnow(with_timezone=True) } image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = None found_entry = cache.get_entry(self.context, volume_ref, image_id, image_meta) self.assertIsNone(found_entry) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.miss', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(volume_ref['host'], msg['payload']['host']) self.assertEqual(image_id, msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_get_entry_needs_update(self): cache = self._build_cache() entry = self._build_entry() volume_ref = { 'host': 'foo@bar#whatever' } image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': entry['image_updated_at'] + timedelta(hours=2) } (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = entry mock_volume = mock.Mock() self.mock_db.volume_get.return_value = mock_volume found_entry = cache.get_entry(self.context, volume_ref, entry['image_id'], image_meta) # Expect that the cache entry is not returned and the image-volume # for it is deleted. self.assertIsNone(found_entry) self.mock_volume_api.delete.assert_called_with(self.context, mock_volume) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.miss', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(volume_ref['host'], msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_create_cache_entry(self): cache = self._build_cache() entry = self._build_entry() volume_ref = { 'id': entry['volume_id'], 'host': entry['host'], 'size': entry['size'] } image_meta = { 'updated_at': entry['image_updated_at'] } self.mock_db.image_volume_cache_create.return_value = entry created_entry = cache.create_cache_entry(self.context, volume_ref, entry['image_id'], image_meta) self.assertEqual(entry, created_entry) self.mock_db.image_volume_cache_create.assert_called_once_with( self.context, entry['host'], entry['image_id'], entry['image_updated_at'].replace(tzinfo=None), entry['volume_id'], entry['size'] ) def test_ensure_space_unlimited(self): cache = self._build_cache(max_gb=0, max_count=0) host = 'foo@bar#whatever' has_space = cache.ensure_space(self.context, 0, host) self.assertTrue(has_space) has_space = cache.ensure_space(self.context, 500, host) self.assertTrue(has_space) def test_ensure_space_no_entries(self): cache = self._build_cache(max_gb=100, max_count=10) host = 'foo@bar#whatever' self.mock_db.image_volume_cache_get_all_for_host.return_value = [] has_space = cache.ensure_space(self.context, 5, host) self.assertTrue(has_space) has_space = cache.ensure_space(self.context, 101, host) self.assertFalse(has_space) def test_ensure_space_need_gb(self): cache = self._build_cache(max_gb=30, max_count=10) mock_delete = mock.patch.object(cache, '_delete_image_volume').start() host = 'foo@bar#whatever' entries = [] entry1 = self._build_entry(size=12) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) entry3 = self._build_entry(size=10) entries.append(entry3) self.mock_db.image_volume_cache_get_all_for_host.return_value = entries has_space = cache.ensure_space(self.context, 15, host) self.assertTrue(has_space) self.assertEqual(2, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2) mock_delete.assert_any_call(self.context, entry3) def test_ensure_space_need_count(self): cache = self._build_cache(max_gb=30, max_count=2) mock_delete = mock.patch.object(cache, '_delete_image_volume').start() host = 'foo@bar#whatever' entries = [] entry1 = self._build_entry(size=10) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) self.mock_db.image_volume_cache_get_all_for_host.return_value = entries has_space = cache.ensure_space(self.context, 12, host) self.assertTrue(has_space) self.assertEqual(1, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2) def test_ensure_space_need_gb_and_count(self): cache = self._build_cache(max_gb=30, max_count=3) mock_delete = mock.patch.object(cache, '_delete_image_volume').start() host = 'foo@bar#whatever' entries = [] entry1 = self._build_entry(size=10) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) entry3 = self._build_entry(size=12) entries.append(entry3) self.mock_db.image_volume_cache_get_all_for_host.return_value = entries has_space = cache.ensure_space(self.context, 16, host) self.assertTrue(has_space) self.assertEqual(2, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2) mock_delete.assert_any_call(self.context, entry3) def test_ensure_space_cant_free_enough_gb(self): cache = self._build_cache(max_gb=30, max_count=10) mock_delete = mock.patch.object(cache, '_delete_image_volume').start() host = 'foo@bar#whatever' entries = list(self._build_entry(size=25)) self.mock_db.image_volume_cache_get_all_for_host.return_value = entries has_space = cache.ensure_space(self.context, 50, host) self.assertFalse(has_space) mock_delete.assert_not_called() cinder-8.0.0/cinder/tests/unit/image/test_glance.py0000664000567000056710000007356112701406257023455 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import glanceclient.exc import mock from oslo_config import cfg from cinder import context from cinder import exception from cinder.image import glance from cinder import test from cinder.tests.unit.glance import stubs as glance_stubs CONF = cfg.CONF class NullWriter(object): """Used to test ImageService.get which takes a writer object.""" def write(self, *arg, **kwargs): pass class TestGlanceSerializer(test.TestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': [ {'device': 'bbb'}, {'device': 'yyy'}], 'block_device_mapping': [ {'device_name': '/dev/fake'}, {'device_name': '/dev/fake0'}]}} converted_expected = { 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': '[{"device": "bbb"}, ' '{"device": "yyy"}]', 'block_device_mapping': '[{"device_name": "/dev/fake"}, ' '{"device_name": "/dev/fake0"}]'}} converted = glance._convert_to_string(metadata) self.assertEqual(converted_expected, converted) self.assertEqual(metadata, glance._convert_from_string(converted)) class TestGlanceImageService(test.TestCase): """Tests the Glance image service. At a high level, the translations involved are: 1. Glance -> ImageService - This is needed so we can support multiple ImageServices (Glance, Local, etc) 2. ImageService -> API - This is needed so we can support multple APIs (OpenStack, EC2) """ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" class tzinfo(datetime.tzinfo): @staticmethod def utcoffset(*args, **kwargs): return datetime.timedelta() NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) def setUp(self): super(TestGlanceImageService, self).setUp() client = glance_stubs.StubGlanceClient() self.service = self._create_image_service(client) self.context = context.RequestContext('fake', 'fake', auth_token=True) self.stubs.Set(glance.time, 'sleep', lambda s: None) def _create_image_service(self, client): def _fake_create_glance_client(context, netloc, use_ssl, version): return client self.stubs.Set(glance, '_create_glance_client', _fake_create_glance_client) client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) @staticmethod def _make_fixture(**kwargs): fixture = {'name': None, 'properties': {}, 'status': None, 'is_public': None} fixture.update(kwargs) return fixture def _make_datetime_fixture(self): return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, updated_at=self.NOW_GLANCE_FORMAT, deleted_at=self.NOW_GLANCE_FORMAT) def test_create_with_instance_id(self): """Ensure instance_id is persisted as an image-property.""" fixture = {'name': 'test image', 'is_public': False, 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {'instance_id': '42', 'user_id': 'fake'}, 'owner': None, } self.assertDictMatch(expected, image_meta) image_metas = self.service.detail(self.context) self.assertDictMatch(expected, image_metas[0]) def test_create_without_instance_id(self): """Test Creating images without instance_id. Ensure we can create an image without having to specify an instance_id. Public images are an example of an image not tied to an instance. """ fixture = {'name': 'test image', 'is_public': False} image_id = self.service.create(self.context, fixture)['id'] expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, } actual = self.service.show(self.context, image_id) self.assertDictMatch(expected, actual) def test_create(self): fixture = self._make_fixture(name='test image') num_images = len(self.service.detail(self.context)) image_id = self.service.create(self.context, fixture)['id'] self.assertIsNotNone(image_id) self.assertEqual(num_images + 1, len(self.service.detail(self.context))) def test_create_and_show_non_existing_image(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertIsNotNone(image_id) self.assertRaises(exception.ImageNotFound, self.service.show, self.context, 'bad image id') def test_detail_private_image(self): fixture = self._make_fixture(name='test image') fixture['is_public'] = False properties = {'owner_id': 'proj1'} fixture['properties'] = properties self.service.create(self.context, fixture) proj = self.context.project_id self.context.project_id = 'proj1' image_metas = self.service.detail(self.context) self.context.project_id = proj self.assertEqual(1, len(image_metas)) self.assertEqual('test image', image_metas[0]['name']) self.assertFalse(image_metas[0]['is_public']) def test_detail_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[1]) self.assertEqual(8, len(image_metas)) i = 2 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'owner': None, } self.assertDictMatch(expected, meta) i = i + 1 def test_detail_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, limit=5) self.assertEqual(5, len(image_metas)) def test_detail_default_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context) for i, meta in enumerate(image_metas): self.assertEqual(meta['name'], 'TestImage %d' % (i)) def test_detail_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[3], limit=5) self.assertEqual(5, len(image_metas)) i = 4 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'owner': None, } self.assertDictMatch(expected, meta) i = i + 1 def test_detail_invalid_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) self.assertRaises(exception.Invalid, self.service.detail, self.context, marker='invalidmarker') def test_update(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) image_id = image['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) self.assertEqual('new image name', new_image_data['name']) def test_update_v2(self): self.flags(glance_api_version=2) self.test_update() def test_update_with_data(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) image_id = image['id'] data = '*' * 256 self.service.update(self.context, image_id, fixture, data=data) new_image_data = self.service.show(self.context, image_id) self.assertEqual(256, new_image_data['size']) def test_update_with_data_v2(self): self.flags(glance_api_version=2) self.test_update_with_data() def test_delete(self): fixture1 = self._make_fixture(name='test image 1') fixture2 = self._make_fixture(name='test image 2') fixtures = [fixture1, fixture2] num_images = len(self.service.detail(self.context)) self.assertEqual(0, num_images) ids = [] for fixture in fixtures: new_id = self.service.create(self.context, fixture)['id'] ids.append(new_id) num_images = len(self.service.detail(self.context)) self.assertEqual(2, num_images) self.service.delete(self.context, ids[0]) num_images = len(self.service.detail(self.context)) self.assertEqual(1, num_images) def test_show_passes_through_to_client(self): fixture = self._make_fixture(name='image1', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image1', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, } self.assertEqual(expected, image_meta) def test_show_raises_when_no_authtoken_in_the_context(self): fixture = self._make_fixture(name='image1', is_public=False, properties={'one': 'two'}) image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_detail_passes_through_to_client(self): fixture = self._make_fixture(name='image10', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.detail(self.context) expected = [ { 'id': image_id, 'name': 'image10', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, }, ] self.assertEqual(expected, image_metas) def test_show_makes_datetimes(self): fixture = self._make_datetime_fixture() image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) def test_detail_makes_datetimes(self): fixture = self._make_datetime_fixture() self.service.create(self.context, fixture) image_meta = self.service.detail(self.context)[0] self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) def test_download_with_retries(self): tries = [0] class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): if tries[0] == 0: tries[0] = 1 raise glanceclient.exc.ServiceUnavailable('') else: return {} client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() # When retries are disabled, we should get an exception self.flags(glance_num_retries=0) self.assertRaises(exception.GlanceConnectionFailed, service.download, self.context, image_id, writer) # Now lets enable retries. No exception should happen now. tries = [0] self.flags(glance_num_retries=1) service.download(self.context, image_id, writer) def test_client_forbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a Forbidden exception.""" def get(self, image_id): raise glanceclient.exc.Forbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_httpforbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPForbidden exception.""" def get(self, image_id): raise glanceclient.exc.HTTPForbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_notfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a NotFound exception.""" def get(self, image_id): raise glanceclient.exc.NotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) def test_client_httpnotfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPNotFound exception.""" def get(self, image_id): raise glanceclient.exc.HTTPNotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) @mock.patch('six.moves.builtins.open') @mock.patch('shutil.copyfileobj') def test_download_from_direct_file(self, mock_copyfileobj, mock_open): fixture = self._make_fixture(name='test image', locations=[{'url': 'file:///tmp/test'}]) image_id = self.service.create(self.context, fixture)['id'] writer = NullWriter() self.flags(allowed_direct_url_schemes=['file']) self.flags(glance_api_version=2) self.service.download(self.context, image_id, writer) mock_copyfileobj.assert_called_once_with(mock.ANY, writer) @mock.patch('six.moves.builtins.open') @mock.patch('shutil.copyfileobj') def test_download_from_direct_file_non_file(self, mock_copyfileobj, mock_open): fixture = self._make_fixture(name='test image', direct_url='swift+http://test/image') image_id = self.service.create(self.context, fixture)['id'] writer = NullWriter() self.flags(allowed_direct_url_schemes=['file']) self.flags(glance_api_version=2) self.service.download(self.context, image_id, writer) self.assertIsNone(mock_copyfileobj.call_args) def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] (_service, same_id) = glance.get_remote_image_service(self.context, image_id) self.assertEqual(same_id, image_id) def test_glance_client_image_ref(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_url = 'http://something-less-likely/%s' % image_id (service, same_id) = glance.get_remote_image_service(self.context, image_url) self.assertEqual(same_id, image_id) self.assertEqual('something-less-likely', service._client.netloc) for ipv6_url in ('[::1]', '::1', '[::1]:444'): image_url = 'http://%s/%s' % (ipv6_url, image_id) (service, same_id) = glance.get_remote_image_service(self.context, image_url) self.assertEqual(same_id, image_id) self.assertEqual(ipv6_url, service._client.netloc) def test_extracting_missing_attributes(self): """Verify behavior from glance objects that are missing attributes This fakes the image class and is missing the checksum and name attribute as the client would return if they're not set in the database. Regression test for bug #1308058. """ class MyFakeGlanceImage(glance_stubs.FakeImage): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'id', 'created_at', 'updated_at', 'deleted', 'status', 'min_disk', 'min_ram', 'is_public'] raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw metadata = { 'id': 1, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, } image = MyFakeGlanceImage(metadata) actual = glance._extract_attributes(image) expected = { 'id': 1, 'name': None, 'is_public': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, } self.assertEqual(expected, actual) @mock.patch('cinder.image.glance.CONF') def test_extracting_v2_boot_properties(self, config): config.glance_api_version = 2 config.glance_num_retries = 0 metadata = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'kernel_id': 'foo', 'ramdisk_id': 'bar', } image = glance_stubs.FakeImage(metadata) client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) service._image_schema = glance_stubs.FakeSchema() actual = service._translate_from_glance('fake_context', image) expected = { 'id': 1, 'name': None, 'is_public': None, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'disk_format': None, 'container_format': None, 'checksum': None, 'deleted': None, 'status': None, 'properties': {'kernel_id': 'foo', 'ramdisk_id': 'bar'}, 'owner': None, 'created_at': None, 'updated_at': None } self.assertEqual(expected, actual) def test_translate_to_glance(self): self.flags(glance_api_version=1) client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) metadata = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'properties': {'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123'}, } actual = service._translate_to_glance(metadata) expected = metadata self.assertEqual(expected, actual) def test_translate_to_glance_v2(self): self.flags(glance_api_version=2) client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) metadata = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'properties': {'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123'}, } actual = service._translate_to_glance(metadata) expected = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123', } self.assertEqual(expected, actual) class TestGlanceClientVersion(test.TestCase): """Tests the version of the glance client generated.""" @mock.patch('cinder.image.glance.glanceclient.Client') def test_glance_version_by_flag(self, _mockglanceclient): """Test glance version set by flag is honoured.""" glance.GlanceClientWrapper('fake', 'fake_host', 9292) self.assertEqual('1', _mockglanceclient.call_args[0][0]) self.flags(glance_api_version=2) glance.GlanceClientWrapper('fake', 'fake_host', 9292) self.assertEqual('2', _mockglanceclient.call_args[0][0]) CONF.reset() @mock.patch('cinder.image.glance.glanceclient.Client') def test_glance_version_by_arg(self, _mockglanceclient): """Test glance version set by arg to GlanceClientWrapper""" glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=1) self.assertEqual('1', _mockglanceclient.call_args[0][0]) glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=2) self.assertEqual('2', _mockglanceclient.call_args[0][0]) @mock.patch('cinder.image.glance.glanceclient.Client') def test_call_glance_version_by_arg(self, _mockglanceclient): """Test glance version set by arg to GlanceClientWrapper""" glance_wrapper = glance.GlanceClientWrapper() glance_wrapper.call('fake_context', 'method', version=2) self.assertEqual('2', _mockglanceclient.call_args[0][0]) def _create_failing_glance_client(info): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): info['num_calls'] += 1 if info['num_calls'] == 1: raise glanceclient.exc.ServiceUnavailable('') return {} return MyGlanceStubClient() class TestGlanceImageServiceClient(test.TestCase): def setUp(self): super(TestGlanceImageServiceClient, self).setUp() self.context = context.RequestContext('fake', 'fake', auth_token=True) self.stubs.Set(glance.time, 'sleep', lambda s: None) def test_create_glance_client(self): self.flags(auth_strategy='keystone') self.flags(glance_request_timeout=60) class MyGlanceStubClient(object): def __init__(inst, version, *args, **kwargs): self.assertEqual('1', version) self.assertEqual("http://fake_host:9292", args[0]) self.assertTrue(kwargs['token']) self.assertEqual(60, kwargs['timeout']) self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) def test_create_glance_client_auth_strategy_is_not_keystone(self): self.flags(auth_strategy='noauth') self.flags(glance_request_timeout=60) class MyGlanceStubClient(object): def __init__(inst, version, *args, **kwargs): self.assertEqual('1', version) self.assertEqual('http://fake_host:9292', args[0]) self.assertNotIn('token', kwargs) self.assertEqual(60, kwargs['timeout']) self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) def test_create_glance_client_glance_request_default_timeout(self): self.flags(auth_strategy='keystone') self.flags(glance_request_timeout=None) class MyGlanceStubClient(object): def __init__(inst, version, *args, **kwargs): self.assertEqual("1", version) self.assertEqual("http://fake_host:9292", args[0]) self.assertTrue(kwargs['token']) self.assertNotIn('timeout', kwargs) self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) cinder-8.0.0/cinder/tests/unit/image/fake.py0000664000567000056710000002074612701406257022070 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake image service.""" import copy import datetime import mock import uuid from cinder import exception import cinder.image.glance class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64'}} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': None, 'disk_format': None, 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': { 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None}} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'False'}} image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) self.create(None, image6) self.create(None, image7) self._imagedata = {} self.temp_images = mock.MagicMock() super(_FakeImageService, self).__init__() # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def download(self, context, image_id, data): self.show(context, image_id) data.write(self._imagedata.get(image_id, '')) def show(self, context, image_id): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except Exception: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None def add_location(self, context, image_id, url, metadata): self.update(context, image_id, {'locations': [{'url': url, 'metadata': metadata}]}) return True _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService() def stub_out_image_service(stubs): stubs.Set(cinder.image.glance, 'get_remote_image_service', lambda x, y: (FakeImageService(), y)) stubs.Set(cinder.image.glance, 'get_default_image_service', lambda: FakeImageService()) cinder-8.0.0/cinder/tests/unit/test_vmware_datastore.py0000664000567000056710000004740712701406250024502 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for datastore module. """ import mock from oslo_utils import units from oslo_vmware import exceptions from cinder import test from cinder.volume.drivers.vmware import datastore as ds_sel from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions class DatastoreTest(test.TestCase): """Unit tests for Datastore.""" def setUp(self): super(DatastoreTest, self).setUp() self._session = mock.Mock() self._vops = mock.Mock() self._ds_sel = ds_sel.DatastoreSelector(self._vops, self._session) @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_profile_id(self, get_profile_id_by_name): profile_id = mock.sentinel.profile_id get_profile_id_by_name.return_value = profile_id profile_name = mock.sentinel.profile_name self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_profile_id_with_invalid_profile(self, get_profile_id_by_name): get_profile_id_by_name.return_value = None profile_name = mock.sentinel.profile_name self.assertRaises(vmdk_exceptions.ProfileNotFoundException, self._ds_sel.get_profile_id, profile_name) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) def _create_datastore(self, moref): return mock.Mock(value=moref) def _create_summary( self, ds, free_space=units.Mi, _type=ds_sel.DatastoreType.VMFS, capacity=2 * units.Mi): return mock.Mock(datastore=ds, freeSpace=free_space, type=_type, capacity=capacity) def _create_host(self, value): host = mock.Mock(spec=['_type', 'value']) host._type = 'HostSystem' host.value = value return host def test_filter_datastores_with_unsupported_type(self): ds_1 = self._create_datastore('ds-1') ds_2 = self._create_datastore('ds-2') datastores = [ds_1, ds_2] self._vops.get_summary.side_effect = [ self._create_summary(ds_1), self._create_summary(ds_2, _type='foo')] res = self._ds_sel._filter_datastores( datastores, units.Ki, None, None, None) self.assertEqual(1, len(res)) self.assertEqual(ds_1, res[0].datastore) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') def test_filter_datastores(self, filter_by_profile): # Test with empty datastore list. datastores = [] size_bytes = 2 * units.Mi profile_id = mock.sentinel.profile_id hard_anti_affinity_datastores = None hard_affinity_ds_types = None self.assertEqual([], self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types)) # Test with single datastore with hard anti-affinity. ds_1 = self._create_datastore('ds-1') datastores = [ds_1] hard_anti_affinity_datastores = [ds_1.value] self.assertEqual([], self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types)) # Extend previous case with a profile non-compliant datastore. ds_2 = self._create_datastore('ds-2') datastores.append(ds_2) filter_by_profile.return_value = [] self.assertEqual([], self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types)) filter_by_profile.assert_called_once_with([ds_2], profile_id) # Extend previous case with a less free space datastore. ds_3 = self._create_datastore('ds-3') datastores.append(ds_3) filter_by_profile.return_value = [ds_3] free_space_list = [units.Mi] type_list = [ds_sel.DatastoreType.NFS] self._vops.get_summary.side_effect = ( lambda ds: self._create_summary(ds, free_space_list.pop(0), type_list.pop(0))) self.assertEqual([], self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types)) # Extend previous case with a datastore not satisfying hard affinity # datastore type requirement. ds_4 = self._create_datastore('ds-4') datastores.append(ds_4) filter_by_profile.return_value = [ds_3, ds_4] free_space_list = [units.Mi, 4 * units.Mi] type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN] hard_affinity_ds_types = [ds_sel.DatastoreType.NFS] self.assertEqual([], self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types)) # Modify the previous case to remove hard affinity datastore type # requirement. free_space_list = [units.Mi, 4 * units.Mi] type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN] hard_affinity_ds_types = None res = self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types) self.assertTrue(len(res) == 1) self.assertEqual(ds_4, res[0].datastore) # Extend the previous case by adding a datastore satisfying # hard affinity datastore type requirement. ds_5 = self._create_datastore('ds-5') datastores.append(ds_5) filter_by_profile.return_value = [ds_3, ds_4, ds_5] free_space_list = [units.Mi, 4 * units.Mi, 5 * units.Mi] type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN, ds_sel.DatastoreType.VMFS] hard_affinity_ds_types = [ds_sel.DatastoreType.VMFS] res = self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types) self.assertTrue(len(res) == 1) self.assertEqual(ds_5, res[0].datastore) # Modify the previous case to have two datastores satisfying # hard affinity datastore type requirement. free_space_list = [units.Mi, 4 * units.Mi, 5 * units.Mi] type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN, ds_sel.DatastoreType.VSAN] hard_affinity_ds_types = [ds_sel.DatastoreType.VSAN] res = self._ds_sel._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types) self.assertTrue(len(res) == 2) self.assertEqual(ds_4, res[0].datastore) self.assertEqual(ds_5, res[1].datastore) # Clear side effects. self._vops.get_summary.side_effect = None def test_select_best_summary(self): # No tie-- all datastores with different host mount count. summary_1 = self._create_summary(mock.sentinel.ds_1, free_space=units.Mi, capacity=2 * units.Mi) summary_2 = self._create_summary(mock.sentinel.ds_2, free_space=units.Mi, capacity=3 * units.Mi) summary_3 = self._create_summary(mock.sentinel.ds_3, free_space=units.Mi, capacity=4 * units.Mi) host_1 = self._create_host('host-1') host_2 = self._create_host('host-2') host_3 = self._create_host('host-3') connected_hosts = {mock.sentinel.ds_1: [host_1.value], mock.sentinel.ds_2: [host_1.value, host_2.value], mock.sentinel.ds_3: [host_1.value, host_2.value, host_3.value]} self._vops.get_connected_hosts.side_effect = ( lambda summary: connected_hosts[summary]) summaries = [summary_1, summary_2, summary_3] (best_summary, best_utilization) = self._ds_sel._select_best_summary( summaries) self.assertEqual(summary_3, best_summary) self.assertEqual(3 / 4.0, best_utilization) # Tie-- two datastores with max host mount count. summary_4 = self._create_summary(mock.sentinel.ds_4, free_space=2 * units.Mi, capacity=4 * units.Mi) connected_hosts[mock.sentinel.ds_4] = ( connected_hosts[mock.sentinel.ds_3]) summaries.append(summary_4) (best_summary, best_utilization) = self._ds_sel._select_best_summary( summaries) self.assertEqual(summary_4, best_summary) self.assertEqual(1 / 2.0, best_utilization) # Clear side effects. self._vops.get_connected_hosts.side_effect = None @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' 'get_profile_id') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_datastores') def test_select_datastore(self, filter_datastores, get_profile_id): # Test with no hosts. size_bytes = units.Ki req = {self._ds_sel.SIZE_BYTES: size_bytes} self._vops.get_hosts.return_value = mock.Mock(objects=[]) self.assertEqual((), self._ds_sel.select_datastore(req)) self._vops.get_hosts.assert_called_once_with() # Test with single host with no valid datastores. host_1 = self._create_host('host-1') self._vops.get_hosts.return_value = mock.Mock( objects=[mock.Mock(obj=host_1)]) self._vops.continue_retrieval.return_value = None self._vops.get_dss_rp.side_effect = exceptions.VimException('error') self.assertEqual((), self._ds_sel.select_datastore(req)) self._vops.get_dss_rp.assert_called_once_with(host_1) # Test with three hosts and vCenter connection problem while fetching # datastores for the second host. self._vops.get_dss_rp.reset_mock() host_2 = self._create_host('host-2') host_3 = self._create_host('host-3') self._vops.get_hosts.return_value = mock.Mock( objects=[mock.Mock(obj=host_1), mock.Mock(obj=host_2), mock.Mock(obj=host_3)]) self._vops.get_dss_rp.side_effect = [ exceptions.VimException('no valid datastores'), exceptions.VimConnectionException('connection error')] self.assertRaises(exceptions.VimConnectionException, self._ds_sel.select_datastore, req) get_dss_rp_exp_calls = [mock.call(host_1), mock.call(host_2)] self.assertEqual(get_dss_rp_exp_calls, self._vops.get_dss_rp.call_args_list) # Modify previous case to return datastores for second and third host, # where none of them meet the requirements which include a storage # profile and affinity requirements. aff_ds_types = [ds_sel.DatastoreType.VMFS] req[ds_sel.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = aff_ds_types ds_1a = mock.sentinel.ds_1a anti_affinity_ds = [ds_1a] req[ds_sel.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = anti_affinity_ds profile_name = mock.sentinel.profile_name req[ds_sel.DatastoreSelector.PROFILE_NAME] = profile_name profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id ds_2a = mock.sentinel.ds_2a ds_2b = mock.sentinel.ds_2b ds_3a = mock.sentinel.ds_3a self._vops.get_dss_rp.reset_mock() rp_2 = mock.sentinel.rp_2 rp_3 = mock.sentinel.rp_3 self._vops.get_dss_rp.side_effect = [ exceptions.VimException('no valid datastores'), ([ds_2a, ds_2b], rp_2), ([ds_3a], rp_3)] filter_datastores.return_value = [] self.assertEqual((), self._ds_sel.select_datastore(req)) get_profile_id.assert_called_once_with(profile_name) get_dss_rp_exp_calls.append(mock.call(host_3)) self.assertEqual(get_dss_rp_exp_calls, self._vops.get_dss_rp.call_args_list) filter_datastores_exp_calls = [ mock.call([ds_2a, ds_2b], size_bytes, profile_id, anti_affinity_ds, aff_ds_types), mock.call([ds_3a], size_bytes, profile_id, anti_affinity_ds, aff_ds_types)] self.assertEqual(filter_datastores_exp_calls, filter_datastores.call_args_list) # Modify previous case to have a non-empty summary list after filtering # with preferred utilization threshold unset. self._vops.get_dss_rp.side_effect = [ exceptions.VimException('no valid datastores'), ([ds_2a, ds_2b], rp_2), ([ds_3a], rp_3)] summary_2b = self._create_summary(ds_2b, free_space=0.5 * units.Mi, capacity=units.Mi) filter_datastores.side_effect = [[summary_2b]] self._vops.get_connected_hosts.return_value = [host_1] self.assertEqual((host_2, rp_2, summary_2b), self._ds_sel.select_datastore(req)) # Modify previous case to have a preferred utilization threshold # satsified by one datastore. self._vops.get_dss_rp.side_effect = [ exceptions.VimException('no valid datastores'), ([ds_2a, ds_2b], rp_2), ([ds_3a], rp_3)] req[ds_sel.DatastoreSelector.PREF_UTIL_THRESH] = 0.4 summary_3a = self._create_summary(ds_3a, free_space=0.7 * units.Mi, capacity=units.Mi) filter_datastores.side_effect = [[summary_2b], [summary_3a]] self.assertEqual((host_3, rp_3, summary_3a), self._ds_sel.select_datastore(req)) # Modify previous case to have a preferred utilization threshold # which cannot be satisfied. self._vops.get_dss_rp.side_effect = [ exceptions.VimException('no valid datastores'), ([ds_2a, ds_2b], rp_2), ([ds_3a], rp_3)] filter_datastores.side_effect = [[summary_2b], [summary_3a]] req[ds_sel.DatastoreSelector.PREF_UTIL_THRESH] = 0.2 summary_2b.freeSpace = 0.75 * units.Mi self.assertEqual((host_2, rp_2, summary_2b), self._ds_sel.select_datastore(req)) # Clear side effects. self._vops.get_dss_rp.side_effect = None @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_datastores') def test_select_datastore_with_single_host(self, filter_datastores): host = self._create_host('host-1') req = {self._ds_sel.SIZE_BYTES: units.Gi} ds = mock.sentinel.ds rp = mock.sentinel.rp self._vops.get_dss_rp.return_value = ([ds], rp) summary = self._create_summary(ds, free_space=2 * units.Gi, capacity=3 * units.Gi) filter_datastores.return_value = [summary] self._vops.get_connected_hosts.return_value = [host.value] self.assertEqual((host, rp, summary), self._ds_sel.select_datastore(req, [host])) # reset mocks self._vops.get_dss_rp.reset_mock() self._vops.get_dss_rp.return_value = None self._vops.get_connected_hosts.reset_mock() self._vops.get_connected_hosts.return_value = None def test_select_datastore_with_empty_host_list(self): size_bytes = units.Ki req = {self._ds_sel.SIZE_BYTES: size_bytes} self._vops.get_hosts.return_value = mock.Mock(objects=[]) self.assertEqual((), self._ds_sel.select_datastore(req, hosts=[])) self._vops.get_hosts.assert_called_once_with() @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') def test_is_datastore_compliant(self, filter_by_profile, get_profile_id_by_name): # Test with empty profile. profile_name = None datastore = mock.sentinel.datastore self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, profile_name)) # Test with invalid profile. profile_name = mock.sentinel.profile_name get_profile_id_by_name.return_value = None self.assertRaises(vmdk_exceptions.ProfileNotFoundException, self._ds_sel.is_datastore_compliant, datastore, profile_name) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) # Test with valid profile and non-compliant datastore. get_profile_id_by_name.reset_mock() profile_id = mock.sentinel.profile_id get_profile_id_by_name.return_value = profile_id filter_by_profile.return_value = [] self.assertFalse(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) filter_by_profile.assert_called_once_with([datastore], profile_id) # Test with valid profile and compliant datastore. get_profile_id_by_name.reset_mock() filter_by_profile.reset_mock() filter_by_profile.return_value = [datastore] self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) filter_by_profile.assert_called_once_with([datastore], profile_id) def test_get_all_hosts(self): host_1 = self._create_host('host-1') host_2 = self._create_host('host-2') hosts = mock.Mock(objects=[mock.Mock(obj=host_1), mock.Mock(obj=host_2)]) self._vops.get_hosts.return_value = hosts self._vops.continue_retrieval.return_value = None # host_1 is usable and host_2 is not usable self._vops.is_host_usable.side_effect = [True, False] ret = self._ds_sel._get_all_hosts() self.assertEqual([host_1], ret) self._vops.get_hosts.assert_called_once_with() self._vops.continue_retrieval.assert_called_once_with(hosts) exp_calls = [mock.call(host_1), mock.call(host_2)] self.assertEqual(exp_calls, self._vops.is_host_usable.call_args_list) cinder-8.0.0/cinder/tests/unit/test_tintri.py0000664000567000056710000003033412701406250022433 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Tintri. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver test for Tintri storage. """ import mock from oslo_utils import units from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as cinder_utils from cinder.volume.drivers.tintri import TClient from cinder.volume.drivers.tintri import TintriDriver class FakeImage(object): def __init__(self): self.id = 'image-id' self.name = 'image-name' self.properties = {'provider_location': 'nfs://share'} def __getitem__(self, key): return self.__dict__[key] class TintriDriverTestCase(test.TestCase): def setUp(self): super(TintriDriverTestCase, self).setUp() self.context = context.get_admin_context() kwargs = {'configuration': self.create_configuration()} self._driver = TintriDriver(**kwargs) self._driver._hostname = 'host' self._driver._username = 'user' self._driver._password = 'password' self._driver._api_version = 'v310' self._driver._image_cache_expiry = 30 self._provider_location = 'localhost:/share' self._driver._mounted_shares = [self._provider_location] self.fake_stubs() def create_configuration(self): configuration = mock.Mock() configuration.nfs_mount_point_base = '/mnt/test' configuration.nfs_mount_options = None configuration.nas_mount_options = None return configuration def fake_stubs(self): self.stubs.Set(TClient, 'login', self.fake_login) self.stubs.Set(TClient, 'logout', self.fake_logout) self.stubs.Set(TClient, 'get_snapshot', self.fake_get_snapshot) self.stubs.Set(TClient, 'get_image_snapshots_to_date', self.fake_get_image_snapshots_to_date) self.stubs.Set(TintriDriver, '_move_cloned_volume', self.fake_move_cloned_volume) self.stubs.Set(TintriDriver, '_get_provider_location', self.fake_get_provider_location) self.stubs.Set(TintriDriver, '_set_rw_permissions', self.fake_set_rw_permissions) self.stubs.Set(TintriDriver, '_is_volume_present', self.fake_is_volume_present) self.stubs.Set(TintriDriver, '_is_share_vol_compatible', self.fake_is_share_vol_compatible) self.stubs.Set(TintriDriver, '_is_file_size_equal', self.fake_is_file_size_equal) def fake_login(self, user_name, password): return 'session-id' def fake_logout(self): pass def fake_get_snapshot(self, volume_id): return 'snapshot-id' def fake_get_image_snapshots_to_date(self, date): return [{'uuid': {'uuid': 'image_snapshot-id'}}] def fake_move_cloned_volume(self, clone_name, volume_id, share=None): pass def fake_get_provider_location(self, volume_path): return self._provider_location def fake_set_rw_permissions(self, path): pass def fake_is_volume_present(self, volume_path): return True def fake_is_share_vol_compatible(self, volume, share): return True def fake_is_file_size_equal(self, path, size): return True @mock.patch.object(TClient, 'create_snapshot', mock.Mock(return_value='12345')) def test_create_snapshot(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) provider_id = '12345' snapshot.volume = volume with mock.patch('cinder.objects.snapshot.Snapshot.save'): self.assertEqual({'provider_id': '12345'}, self._driver.create_snapshot(snapshot)) self.assertEqual(provider_id, snapshot.provider_id) @mock.patch.object(TClient, 'create_snapshot', mock.Mock( side_effect=exception.VolumeDriverException)) def test_create_snapshot_failure(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) snapshot.volume = volume self.assertRaises(exception.VolumeDriverException, self._driver.create_snapshot, snapshot) @mock.patch.object(TClient, 'delete_snapshot', mock.Mock()) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_cleanup_cache(self): self.assertFalse(self._driver.cache_cleanup) timer = self._driver._initiate_image_cache_cleanup() # wait for cache cleanup to complete timer.wait() self.assertFalse(self._driver.cache_cleanup) @mock.patch.object(TClient, 'delete_snapshot', mock.Mock( side_effect=exception.VolumeDriverException)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= cinder_utils.ZeroIntervalLoopingCall) def test_cleanup_cache_delete_fail(self): self.assertFalse(self._driver.cache_cleanup) timer = self._driver._initiate_image_cache_cleanup() # wait for cache cleanup to complete timer.wait() self.assertFalse(self._driver.cache_cleanup) @mock.patch.object(TClient, 'delete_snapshot', mock.Mock()) def test_delete_snapshot(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) snapshot.provider_id = 'snapshot-id' self.assertIsNone(self._driver.delete_snapshot(snapshot)) @mock.patch.object(TClient, 'delete_snapshot', mock.Mock( side_effect=exception.VolumeDriverException)) def test_delete_snapshot_failure(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) snapshot.provider_id = 'snapshot-id' self.assertRaises(exception.VolumeDriverException, self._driver.delete_snapshot, snapshot) @mock.patch.object(TClient, 'clone_volume', mock.Mock()) def test_create_volume_from_snapshot(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) self.assertEqual({'provider_location': self._provider_location}, self._driver.create_volume_from_snapshot( volume, snapshot)) @mock.patch.object(TClient, 'clone_volume', mock.Mock( side_effect=exception.VolumeDriverException)) def test_create_volume_from_snapshot_failure(self): snapshot = fake_snapshot.fake_snapshot_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) self.assertRaises(exception.VolumeDriverException, self._driver.create_volume_from_snapshot, volume, snapshot) @mock.patch.object(TClient, 'clone_volume', mock.Mock()) @mock.patch.object(TClient, 'create_snapshot', mock.Mock()) def test_create_cloned_volume(self): volume = fake_volume.fake_volume_obj(self.context) self.assertEqual({'provider_location': self._provider_location}, self._driver.create_cloned_volume(volume, volume)) @mock.patch.object(TClient, 'clone_volume', mock.Mock( side_effect=exception.VolumeDriverException)) @mock.patch.object(TClient, 'create_snapshot', mock.Mock()) def test_create_cloned_volume_failure(self): volume = fake_volume.fake_volume_obj(self.context) self.assertRaises(exception.VolumeDriverException, self._driver.create_cloned_volume, volume, volume) @mock.patch.object(TClient, 'clone_volume', mock.Mock()) def test_clone_image(self): volume = fake_volume.fake_volume_obj(self.context) self.assertEqual(({'provider_location': self._provider_location, 'bootable': True}, True), self._driver.clone_image( None, volume, 'image-name', FakeImage().__dict__, None)) @mock.patch.object(TClient, 'clone_volume', mock.Mock( side_effect=exception.VolumeDriverException)) def test_clone_image_failure(self): volume = fake_volume.fake_volume_obj(self.context) self.assertEqual(({'provider_location': None, 'bootable': False}, False), self._driver.clone_image( None, volume, 'image-name', FakeImage().__dict__, None)) def test_manage_existing(self): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/' + volume.name} with mock.patch('os.path.isfile', return_value=True): self.assertEqual({'provider_location': self._provider_location}, self._driver.manage_existing(volume, existing)) def test_manage_existing_invalid_ref(self): existing = fake_volume.fake_volume_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) self.assertRaises(exception.ManageExistingInvalidReference, self._driver.manage_existing, volume, existing) def test_manage_existing_not_found(self): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/' + volume.name} with mock.patch('os.path.isfile', return_value=False): self.assertRaises(exception.ManageExistingInvalidReference, self._driver.manage_existing, volume, existing) @mock.patch.object(TintriDriver, '_move_file', mock.Mock( return_value=False)) def test_manage_existing_move_failure(self): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/source-volume'} with mock.patch('os.path.isfile', return_value=True): self.assertRaises(exception.VolumeDriverException, self._driver.manage_existing, volume, existing) def test_manage_existing_get_size(self): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/' + volume.name} file = mock.Mock(st_size=123 * units.Gi) with mock.patch('os.path.isfile', return_value=True): with mock.patch('os.stat', return_value=file): self.assertEqual(float(file.st_size / units.Gi), self._driver.manage_existing_get_size( volume, existing)) def test_manage_existing_get_size_failure(self): volume = fake_volume.fake_volume_obj(self.context) existing = {'source-name': self._provider_location + '/' + volume.name} with mock.patch('os.path.isfile', return_value=True): with mock.patch('os.stat', side_effect=OSError): self.assertRaises(exception.VolumeDriverException, self._driver.manage_existing_get_size, volume, existing) def test_unmanage(self): volume = fake_volume.fake_volume_obj(self.context) volume.provider_location = self._provider_location self._driver.unmanage(volume) def test_retype(self): volume = fake_volume.fake_volume_obj(self.context) retype, update = self._driver.retype(None, volume, None, None, None) self.assertTrue(retype) self.assertIsNone(update) cinder-8.0.0/cinder/tests/unit/fake_vmem_client.py0000664000567000056710000000336312701406250023355 0ustar jenkinsjenkins00000000000000# Copyright 2014 Violin Memory, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake VMEM REST client for testing drivers. """ import sys import mock # The following gymnastics to fake an exception class globally is done because # we want to globally model and make available certain exceptions. If we do # not do this, then the real-driver's import will not see our fakes. class NoMatchingObjectIdError(Exception): pass error = mock.Mock() error.NoMatchingObjectIdError = NoMatchingObjectIdError core = mock.Mock() core.attach_mock(error, 'error') vmemclient = mock.Mock() vmemclient.__version__ = "unknown" vmemclient.attach_mock(core, 'core') sys.modules['vmemclient'] = vmemclient mock_client_conf = [ 'basic', 'basic.login', 'basic.get_node_values', 'basic.save_config', 'lun', 'lun.export_lun', 'lun.unexport_lun', 'snapshot', 'snapshot.export_lun_snapshot', 'snapshot.unexport_lun_snapshot', 'iscsi', 'iscsi.bind_ip_to_target', 'iscsi.create_iscsi_target', 'iscsi.delete_iscsi_target', 'igroup', 'client', 'client.get_client_info', 'client.create_client', 'client.delete_client', 'adapter', 'adapter.get_fc_info' ] cinder-8.0.0/cinder/tests/unit/test_pure.py0000664000567000056710000031407312701406250022102 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Pure Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import sys import ddt import mock from oslo_utils import units from cinder import exception from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2): def _decorator(f): return f return _decorator patch_retry = mock.patch('cinder.utils.retry', fake_retry) patch_retry.start() sys.modules['purestorage'] = mock.Mock() from cinder.volume.drivers import pure # Only mock utils.retry for cinder.volume.drivers.pure import patch_retry.stop() DRIVER_PATH = "cinder.volume.drivers.pure" BASE_DRIVER_OBJ = DRIVER_PATH + ".PureBaseVolumeDriver" ISCSI_DRIVER_OBJ = DRIVER_PATH + ".PureISCSIDriver" FC_DRIVER_OBJ = DRIVER_PATH + ".PureFCDriver" ARRAY_OBJ = DRIVER_PATH + ".FlashArray" GET_ARRAY_PRIMARY = {"version": "99.9.9", "revision": "201411230504+8a400f7", "array_name": "pure_target1", "id": "primary_array_id"} GET_ARRAY_SECONDARY = {"version": "99.9.9", "revision": "201411230504+8a400f7", "array_name": "pure_target2", "id": "secondary_array_id"} REPLICATION_TARGET_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" REPLICATION_PROTECTION_GROUP = "cinder-group" REPLICATION_INTERVAL_IN_SEC = 900 REPLICATION_RETENTION_SHORT_TERM = 14400 REPLICATION_RETENTION_LONG_TERM = 6 REPLICATION_RETENTION_LONG_TERM_PER_DAY = 3 PRIMARY_MANAGEMENT_IP = GET_ARRAY_PRIMARY["array_name"] API_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" VOLUME_BACKEND_NAME = "Pure_iSCSI" ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] FC_PORT_NAMES = ["ct0.fc2", "ct0.fc3", "ct1.fc2", "ct1.fc3"] ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] FC_WWNS = ["21000024ff59fe9" + str(i + 1) for i in range(len(FC_PORT_NAMES))] HOSTNAME = "computenode1" PURE_HOST_NAME = pure.PureBaseVolumeDriver._generate_purity_host_name(HOSTNAME) PURE_HOST = { "name": PURE_HOST_NAME, "hgroup": None, "iqn": [], "wwn": [], } REST_VERSION = "1.2" VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" VOLUME_TYPE_ID = "357aa1f1-4f9c-4f10-acec-626af66425ba" VOLUME = { "name": "volume-" + VOLUME_ID, "id": VOLUME_ID, "display_name": "fake_volume", "size": 2, "host": "irrelevant", "volume_type": None, "volume_type_id": VOLUME_TYPE_ID, "replication_status": None, "consistencygroup_id": None, "provider_location": GET_ARRAY_PRIMARY["id"] } VOLUME_PURITY_NAME = VOLUME['name'] + '-cinder' VOLUME_WITH_CGROUP = VOLUME.copy() VOLUME_WITH_CGROUP['consistencygroup_id'] = \ "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" SRC_VOL_ID = "dc7a294d-5964-4379-a15f-ce5554734efc" SRC_VOL = { "name": "volume-" + SRC_VOL_ID, "id": SRC_VOL_ID, "display_name": 'fake_src', "size": 2, "host": "irrelevant", "volume_type": None, "volume_type_id": None, "consistencygroup_id": None, } SNAPSHOT_ID = "04fe2f9a-d0c4-4564-a30d-693cc3657b47" SNAPSHOT = { "name": "snapshot-" + SNAPSHOT_ID, "id": SNAPSHOT_ID, "volume_id": SRC_VOL_ID, "volume_name": "volume-" + SRC_VOL_ID, "volume_size": 2, "display_name": "fake_snapshot", "cgsnapshot_id": None, } SNAPSHOT_PURITY_NAME = SRC_VOL["name"] + '-cinder.' + SNAPSHOT["name"] SNAPSHOT_WITH_CGROUP = SNAPSHOT.copy() SNAPSHOT_WITH_CGROUP['cgsnapshot_id'] = \ "4a2f7e3a-312a-40c5-96a8-536b8a0fe075" INITIATOR_IQN = "iqn.1993-08.org.debian:01:222" INITIATOR_WWN = "5001500150015081" ISCSI_CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME} FC_CONNECTOR = {"wwpns": {INITIATOR_WWN}, "host": HOSTNAME} TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc" TARGET_WWN = "21000024ff59fe94" TARGET_PORT = "3260" INITIATOR_TARGET_MAP =\ { # _build_initiator_target_map() calls list(set()) on the list, # we must also call list(set()) to get the exact same order '5001500150015081': list(set(FC_WWNS)), } DEVICE_MAPPING =\ { "fabric": {'initiator_port_wwn_list': {INITIATOR_WWN}, 'target_port_wwn_list': FC_WWNS }, } ISCSI_PORTS = [{"name": name, "iqn": TARGET_IQN, "portal": ip + ":" + TARGET_PORT, "wwn": None, } for name, ip in zip(ISCSI_PORT_NAMES, ISCSI_IPS)] FC_PORTS = [{"name": name, "iqn": None, "portal": None, "wwn": wwn, } for name, wwn in zip(FC_PORT_NAMES, FC_WWNS)] NON_ISCSI_PORT = { "name": "ct0.fc1", "iqn": None, "portal": None, "wwn": "5001500150015081", } PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT] PORTS_WITHOUT = [NON_ISCSI_PORT] VOLUME_CONNECTIONS = [ {"host": "h1", "name": VOLUME["name"] + "-cinder"}, {"host": "h2", "name": VOLUME["name"] + "-cinder"}, ] TOTAL_CAPACITY = 50.0 USED_SPACE = 32.1 PROVISIONED_CAPACITY = 70.0 DEFAULT_OVER_SUBSCRIPTION = 20 SPACE_INFO = { "capacity": TOTAL_CAPACITY * units.Gi, "total": USED_SPACE * units.Gi, } SPACE_INFO_EMPTY = { "capacity": TOTAL_CAPACITY * units.Gi, "total": 0, } PERF_INFO = { 'writes_per_sec': 318, 'usec_per_write_op': 255, 'output_per_sec': 234240, 'reads_per_sec': 15, 'input_per_sec': 2827943, 'time': '2015-12-17T21:50:55Z', 'usec_per_read_op': 192, 'queue_depth': 4, } PERF_INFO_RAW = [PERF_INFO] ISCSI_CONNECTION_INFO = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "target_luns": [1, 1, 1, 1], "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN], "target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT, ISCSI_IPS[1] + ":" + TARGET_PORT, ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT], }, } FC_CONNECTION_INFO = { "driver_volume_type": "fibre_channel", "data": { "target_wwn": FC_WWNS, "target_lun": 1, "target_discovered": True, "initiator_target_map": INITIATOR_TARGET_MAP, "discard": True, }, } PURE_SNAPSHOT = { "created": "2015-05-27T17:34:33Z", "name": "vol1.snap1", "serial": "8343DFDE2DAFBE40000115E4", "size": 3221225472, "source": "vol1" } PURE_PGROUP = { "hgroups": None, "hosts": None, "name": "pg1", "source": "pure01", "targets": None, "volumes": ["v1"] } PGROUP_ON_TARGET_NOT_ALLOWED = { "name": "array1:replicated_pgroup", "hgroups": None, "source": "array1", "hosts": None, "volumes": ["array1:replicated_volume"], "time_remaining": None, "targets": [{"name": "array2", "allowed": False}]} PGROUP_ON_TARGET_ALLOWED = { "name": "array1:replicated_pgroup", "hgroups": None, "source": "array1", "hosts": None, "volumes": ["array1:replicated_volume"], "time_remaining": None, "targets": [{"name": "array2", "allowed": True}]} CONNECTED_ARRAY = { "id": "6b1a7ce3-da61-0d86-65a7-9772cd259fef", "version": "99.9.9", "connected": True, "management_address": "10.42.10.229", "replication_address": "192.168.10.229", "type": ["replication"], "array_name": "3rd-pure-generic2"} REPLICATED_PGSNAPS = [ { "name": "array1:cinder-repl-pg.3", "created": "2014-12-04T22:59:38Z", "started": "2014-12-04T22:59:38Z", "completed": "2014-12-04T22:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }, { "name": "array1:cinder-repl-pg.2", "created": "2014-12-04T21:59:38Z", "started": "2014-12-04T21:59:38Z", "completed": "2014-12-04T21:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }, { "name": "array1:cinder-repl-pg.1", "created": "2014-12-04T20:59:38Z", "started": "2014-12-04T20:59:38Z", "completed": "2014-12-04T20:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }] REPLICATED_VOLUME_OBJS = [ fake_volume.fake_volume_obj(None, id=fake.volume_id), fake_volume.fake_volume_obj(None, id=fake.volume2_id), fake_volume.fake_volume_obj(None, id=fake.volume3_id), ] REPLICATED_VOLUME_SNAPS = [ { "source": "array1:volume-%s-cinder" % fake.volume_id, "serial": "BBA481C01639104E0001D5F7", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.volume_id, "size": 1048576 }, { "source": "array1:volume-%s-cinder" % fake.volume2_id, "serial": "BBA481C01639104E0001D5F8", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.volume2_id, "size": 1048576 }, { "source": "array1:volume-%s-cinder" % fake.volume3_id, "serial": "BBA481C01639104E0001D5F9", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.volume3_id, "size": 1048576 } ] NON_REPLICATED_VOL_TYPE = {"is_public": True, "extra_specs": {}, "name": "volume_type_1", "id": VOLUME_TYPE_ID} REPLICATED_VOL_TYPE = {"is_public": True, "extra_specs": {pure.EXTRA_SPECS_REPL_ENABLED: " True"}, "name": "volume_type_2", "id": VOLUME_TYPE_ID} class FakePureStorageHTTPError(Exception): def __init__(self, target=None, rest_version=None, code=None, headers=None, text=None): self.target = target self.rest_version = rest_version self.code = code self.headers = headers self.text = text class PureDriverTestCase(test.TestCase): def setUp(self): super(PureDriverTestCase, self).setUp() self.mock_config = mock.Mock() self.mock_config.san_ip = PRIMARY_MANAGEMENT_IP self.mock_config.pure_api_token = API_TOKEN self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME self.mock_config.safe_get.return_value = None self.mock_config.pure_eradicate_on_delete = False self.mock_config.driver_ssl_cert_verify = False self.mock_config.driver_ssl_cert_path = None self.array = mock.Mock() self.array.get.return_value = GET_ARRAY_PRIMARY self.array.array_name = GET_ARRAY_PRIMARY["array_name"] self.array.array_id = GET_ARRAY_PRIMARY["id"] self.array2 = mock.Mock() self.array2.array_name = GET_ARRAY_SECONDARY["array_name"] self.array2.array_id = GET_ARRAY_SECONDARY["id"] self.array2.get.return_value = GET_ARRAY_SECONDARY self.purestorage_module = pure.purestorage self.purestorage_module.VERSION = '1.4.0' self.purestorage_module.PureHTTPError = FakePureStorageHTTPError def fake_get_array(*args, **kwargs): if 'action' in kwargs and kwargs['action'] is 'monitor': return PERF_INFO_RAW if 'space' in kwargs and kwargs['space'] is True: return SPACE_INFO def assert_error_propagates(self, mocks, func, *args, **kwargs): """Assert that errors from mocks propagate to func. Fail if exceptions raised by mocks are not seen when calling func(*args, **kwargs). Ensure that we are really seeing exceptions from the mocks by failing if just running func(*args, **kargs) raises an exception itself. """ func(*args, **kwargs) for mock_func in mocks: original_side_effect = mock_func.side_effect mock_func.side_effect = [exception.PureDriverException( reason='reason')] self.assertRaises(exception.PureDriverException, func, *args, **kwargs) mock_func.side_effect = original_side_effect class PureBaseSharedDriverTestCase(PureDriverTestCase): def setUp(self): super(PureBaseSharedDriverTestCase, self).setUp() self.driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) self.driver._array = self.array self.array.get_rest_version.return_value = '1.4' self.purestorage_module.FlashArray.side_effect = None self.array2.get_rest_version.return_value = '1.4' def tearDown(self): super(PureBaseSharedDriverTestCase, self).tearDown() @ddt.ddt class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureBaseVolumeDriverTestCase, self).setUp() def _setup_mocks_for_replication(self): # Mock config values self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) self.mock_config.pure_replica_retention_short_term_default = ( REPLICATION_RETENTION_SHORT_TERM) self.mock_config.pure_replica_retention_long_term_default = ( REPLICATION_RETENTION_LONG_TERM) self.mock_config.pure_replica_retention_long_term_default = ( REPLICATION_RETENTION_LONG_TERM_PER_DAY) self.mock_config.safe_get.return_value = [ {"backend_id": self.driver._array.array_id, "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}] @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_single_target( self, mock_setup_repl_pgroups, mock_generate_replication_retention): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None # Test single array configured self.mock_config.safe_get.return_value = [ {"backend_id": self.driver._array.id, "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}] self.purestorage_module.FlashArray.return_value = self.array self.driver.parse_replication_configs() self.assertEqual(1, len(self.driver._replication_target_arrays)) self.assertEqual(self.array, self.driver._replication_target_arrays[0]) only_target_array = self.driver._replication_target_arrays[0] self.assertEqual(self.driver._array.id, only_target_array._backend_id) @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_multiple_target( self, mock_setup_repl_pgroups, mock_generate_replication_retention): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None # Test multiple arrays configured self.mock_config.safe_get.return_value = [ {"backend_id": GET_ARRAY_PRIMARY["id"], "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}, {"backend_id": GET_ARRAY_SECONDARY["id"], "managed_backend_name": None, "san_ip": "1.2.3.5", "api_token": "abc124"}] self.purestorage_module.FlashArray.side_effect = \ [self.array, self.array2] self.driver.parse_replication_configs() self.assertEqual(2, len(self.driver._replication_target_arrays)) self.assertEqual(self.array, self.driver._replication_target_arrays[0]) first_target_array = self.driver._replication_target_arrays[0] self.assertEqual(GET_ARRAY_PRIMARY["id"], first_target_array._backend_id) self.assertEqual( self.array2, self.driver._replication_target_arrays[1]) second_target_array = self.driver._replication_target_arrays[1] self.assertEqual(GET_ARRAY_SECONDARY["id"], second_target_array._backend_id) @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_do_setup_replicated(self, mock_get_volume_type, mock_setup_repl_pgroups, mock_generate_replication_retention): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_get_volume_type.return_value = REPLICATED_VOL_TYPE self._setup_mocks_for_replication() self.array2.get.return_value = GET_ARRAY_SECONDARY self.array.get.return_value = GET_ARRAY_PRIMARY self.purestorage_module.FlashArray.side_effect = [self.array, self.array2] self.driver.do_setup(None) self.assertEqual(self.array, self.driver._array) self.assertEqual(1, len(self.driver._replication_target_arrays)) self.assertEqual(self.array2, self.driver._replication_target_arrays[0]) calls = [ mock.call(self.array, [self.array2], 'cinder-group', REPLICATION_INTERVAL_IN_SEC, retention) ] mock_setup_repl_pgroups.assert_has_calls(calls) def test_generate_purity_host_name(self): result = self.driver._generate_purity_host_name( "really-long-string-thats-a-bit-too-long") self.assertTrue(result.startswith("really-long-string-that-")) self.assertTrue(result.endswith("-cinder")) self.assertEqual(63, len(result)) self.assertTrue(pure.GENERATED_NAME.match(result)) result = self.driver._generate_purity_host_name("!@#$%^-invalid&*") self.assertTrue(result.startswith("invalid---")) self.assertTrue(result.endswith("-cinder")) self.assertEqual(49, len(result)) self.assertTrue(pure.GENERATED_NAME.match(result)) @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_volume(self, mock_is_replicated_type): mock_is_replicated_type.return_value = False self.driver.create_volume(VOLUME) self.array.create_volume.assert_called_with( VOLUME["name"] + "-cinder", 2 * units.Gi) self.assert_error_propagates([self.array.create_volume], self.driver.create_volume, VOLUME) @mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group", autospec=True) @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_volume_with_cgroup(self, mock_is_replicated_type, mock_add_to_cgroup): vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder" mock_is_replicated_type.return_value = False self.driver.create_volume(VOLUME_WITH_CGROUP) mock_add_to_cgroup\ .assert_called_with(self.driver, VOLUME_WITH_CGROUP['consistencygroup_id'], vol_name) @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_volume_from_snapshot(self, mock_is_replicated_type): vol_name = VOLUME["name"] + "-cinder" snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] mock_is_replicated_type.return_value = False # Branch where extend unneeded self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) self.array.copy_volume.assert_called_with(snap_name, vol_name) self.assertFalse(self.array.extend_volume.called) self.assert_error_propagates( [self.array.copy_volume], self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT) self.assertFalse(self.array.extend_volume.called) # Branch where extend needed SNAPSHOT["volume_size"] = 1 # resize so smaller than VOLUME self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) expected = [mock.call.copy_volume(snap_name, vol_name), mock.call.extend_volume(vol_name, 2 * units.Gi)] self.array.assert_has_calls(expected) self.assert_error_propagates( [self.array.copy_volume, self.array.extend_volume], self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT) SNAPSHOT["volume_size"] = 2 # reset size @mock.patch(BASE_DRIVER_OBJ + "._get_snap_name") def test_create_volume_from_snapshot_cant_get_name(self, mock_get_name): mock_get_name.return_value = None self.assertRaises(exception.PureDriverException, self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name_from_snapshot") def test_create_volume_from_cgsnapshot_cant_get_name(self, mock_get_name): mock_get_name.return_value = None self.assertRaises(exception.PureDriverException, self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT_WITH_CGROUP) @mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group", autospec=True) @mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed", autospec=True) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name_from_snapshot") @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_volume_from_cgsnapshot(self, mock_is_replicated_type, mock_get_snap_name, mock_extend_if_needed, mock_add_to_cgroup): vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder" snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075."\ + vol_name mock_get_snap_name.return_value = snap_name mock_is_replicated_type.return_value = False self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP, SNAPSHOT_WITH_CGROUP) self.array.copy_volume.assert_called_with(snap_name, vol_name) self.assertTrue(mock_get_snap_name.called) self.assertTrue(mock_extend_if_needed.called) self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP, SNAPSHOT_WITH_CGROUP) mock_add_to_cgroup\ .assert_called_with(self.driver, VOLUME_WITH_CGROUP['consistencygroup_id'], vol_name) # Tests cloning a volume that is not replicated type @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_cloned_volume(self, mock_is_replicated_type): vol_name = VOLUME["name"] + "-cinder" src_name = SRC_VOL["name"] + "-cinder" mock_is_replicated_type.return_value = False # Branch where extend unneeded self.driver.create_cloned_volume(VOLUME, SRC_VOL) self.array.copy_volume.assert_called_with(src_name, vol_name) self.assertFalse(self.array.extend_volume.called) self.assert_error_propagates( [self.array.copy_volume], self.driver.create_cloned_volume, VOLUME, SRC_VOL) self.assertFalse(self.array.extend_volume.called) # Branch where extend needed SRC_VOL["size"] = 1 # resize so smaller than VOLUME self.driver.create_cloned_volume(VOLUME, SRC_VOL) expected = [mock.call.copy_volume(src_name, vol_name), mock.call.extend_volume(vol_name, 2 * units.Gi)] self.array.assert_has_calls(expected) self.assert_error_propagates( [self.array.copy_volume, self.array.extend_volume], self.driver.create_cloned_volume, VOLUME, SRC_VOL) SRC_VOL["size"] = 2 # reset size # Tests cloning a volume that is part of a consistency group @mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group", autospec=True) @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) def test_create_cloned_volume_with_cgroup(self, mock_is_replicated_type, mock_add_to_cgroup): vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder" mock_is_replicated_type.return_value = False self.driver.create_cloned_volume(VOLUME_WITH_CGROUP, SRC_VOL) mock_add_to_cgroup\ .assert_called_with(self.driver, VOLUME_WITH_CGROUP['consistencygroup_id'], vol_name) def test_delete_volume_already_deleted(self): self.array.list_volume_private_connections.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Volume does not exist" ) self.driver.delete_volume(VOLUME) self.assertFalse(self.array.destroy_volume.called) self.assertFalse(self.array.eradicate_volume.called) # Testing case where array.destroy_volume returns an exception # because volume has already been deleted self.array.list_volume_private_connections.side_effect = None self.array.list_volume_private_connections.return_value = {} self.array.destroy_volume.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Volume does not exist" ) self.driver.delete_volume(VOLUME) self.assertTrue(self.array.destroy_volume.called) self.assertFalse(self.array.eradicate_volume.called) def test_delete_volume(self): vol_name = VOLUME["name"] + "-cinder" self.array.list_volume_private_connections.return_value = {} self.driver.delete_volume(VOLUME) expected = [mock.call.destroy_volume(vol_name)] self.array.assert_has_calls(expected) self.assertFalse(self.array.eradicate_volume.called) self.array.destroy_volume.side_effect = ( self.purestorage_module.PureHTTPError(code=400, text="does not " "exist")) self.driver.delete_volume(VOLUME) self.array.destroy_volume.side_effect = None self.assert_error_propagates([self.array.destroy_volume], self.driver.delete_volume, VOLUME) def test_delete_volume_eradicate_now(self): vol_name = VOLUME["name"] + "-cinder" self.array.list_volume_private_connections.return_value = {} self.mock_config.pure_eradicate_on_delete = True self.driver.delete_volume(VOLUME) expected = [mock.call.destroy_volume(vol_name), mock.call.eradicate_volume(vol_name)] self.array.assert_has_calls(expected) def test_delete_connected_volume(self): vol_name = VOLUME["name"] + "-cinder" host_name_a = "ha" host_name_b = "hb" self.array.list_volume_private_connections.return_value = [{ "host": host_name_a, "lun": 7, "name": vol_name, "size": 3221225472, }, { "host": host_name_b, "lun": 2, "name": vol_name, "size": 3221225472, }] self.driver.delete_volume(VOLUME) expected = [mock.call.list_volume_private_connections(vol_name), mock.call.disconnect_host(host_name_a, vol_name), mock.call.disconnect_host(host_name_b, vol_name), mock.call.destroy_volume(vol_name)] self.array.assert_has_calls(expected) def test_create_snapshot(self): vol_name = SRC_VOL["name"] + "-cinder" self.driver.create_snapshot(SNAPSHOT) self.array.create_snapshot.assert_called_with( vol_name, suffix=SNAPSHOT["name"] ) self.assert_error_propagates([self.array.create_snapshot], self.driver.create_snapshot, SNAPSHOT) def test_delete_snapshot(self): snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] self.driver.delete_snapshot(SNAPSHOT) expected = [mock.call.destroy_volume(snap_name)] self.array.assert_has_calls(expected) self.assertFalse(self.array.eradicate_volume.called) self.array.destroy_volume.side_effect = ( self.purestorage_module.PureHTTPError(code=400, text="does not " "exist")) self.driver.delete_snapshot(SNAPSHOT) self.array.destroy_volume.side_effect = None self.assert_error_propagates([self.array.destroy_volume], self.driver.delete_snapshot, SNAPSHOT) def test_delete_snapshot_eradicate_now(self): snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] self.mock_config.pure_eradicate_on_delete = True self.driver.delete_snapshot(SNAPSHOT) expected = [mock.call.destroy_volume(snap_name), mock.call.eradicate_volume(snap_name)] self.array.assert_has_calls(expected) @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection(self, mock_host): vol_name = VOLUME["name"] + "-cinder" mock_host.return_value = {"name": "some-host"} # Branch with manually created host self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with("some-host", vol_name) self.assertFalse(self.array.list_host_connections.called) self.assertFalse(self.array.delete_host.called) # Branch with host added to host group self.array.reset_mock() self.array.list_host_connections.return_value = [] mock_host.return_value = PURE_HOST.copy() mock_host.return_value.update(hgroup="some-group") self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.assertTrue(self.array.list_host_connections.called) self.assertTrue(self.array.delete_host.called) # Branch with host still having connected volumes self.array.reset_mock() self.array.list_host_connections.return_value = [ {"lun": 2, "name": PURE_HOST_NAME, "vol": "some-vol"}] mock_host.return_value = PURE_HOST self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, private=True) self.assertFalse(self.array.delete_host.called) # Branch where host gets deleted self.array.reset_mock() self.array.list_host_connections.return_value = [] self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, private=True) self.array.delete_host.assert_called_with(PURE_HOST_NAME) # Branch where connection is missing and the host is still deleted self.array.reset_mock() self.array.disconnect_host.side_effect = \ self.purestorage_module.PureHTTPError(code=400, text="is not connected") self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, private=True) self.array.delete_host.assert_called_with(PURE_HOST_NAME) # Branch where an unexpected exception occurs self.array.reset_mock() self.array.disconnect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=500, text="Some other error" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver.terminate_connection, VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.assertFalse(self.array.list_host_connections.called) self.assertFalse(self.array.delete_host.called) @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection_host_deleted(self, mock_host): vol_name = VOLUME["name"] + "-cinder" mock_host.return_value = PURE_HOST.copy() self.array.reset_mock() self.array.list_host_connections.return_value = [] self.array.delete_host.side_effect = \ self.purestorage_module.PureHTTPError(code=400, text='Host does not exist.') self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, private=True) self.array.delete_host.assert_called_once_with(PURE_HOST_NAME) def test_extend_volume(self): vol_name = VOLUME["name"] + "-cinder" self.driver.extend_volume(VOLUME, 3) self.array.extend_volume.assert_called_with(vol_name, 3 * units.Gi) self.assert_error_propagates([self.array.extend_volume], self.driver.extend_volume, VOLUME, 3) def test_get_pgroup_name_from_id(self): id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" expected_name = "consisgroup-%s-cinder" % id actual_name = self.driver._get_pgroup_name_from_id(id) self.assertEqual(expected_name, actual_name) def test_get_pgroup_snap_suffix(self): cgsnap = mock.Mock() cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" expected_suffix = "cgsnapshot-%s-cinder" % cgsnap.id actual_suffix = self.driver._get_pgroup_snap_suffix(cgsnap) self.assertEqual(expected_suffix, actual_suffix) def test_get_pgroup_snap_name(self): cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_cgsnap = mock.Mock() mock_cgsnap.consistencygroup_id = cg_id mock_cgsnap.id = cgsnap_id expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\ % {"cg": cg_id, "snap": cgsnap_id} actual_name = self.driver._get_pgroup_snap_name(mock_cgsnap) self.assertEqual(expected_name, actual_name) def test_get_pgroup_snap_name_from_snapshot(self): cgsnapshot_id = 'b919b266-23b4-4b83-9a92-e66031b9a921' volume_name = 'volume-a3b8b294-8494-4a72-bec7-9aadec561332' cg_id = '0cfc0e4e-5029-4839-af20-184fbc42a9ed' pgsnap_name_base = ( 'consisgroup-%s-cinder.cgsnapshot-%s-cinder.%s-cinder') pgsnap_name = pgsnap_name_base % (cg_id, cgsnapshot_id, volume_name) self.driver.db = mock.MagicMock() mock_cgsnap = mock.MagicMock() mock_cgsnap.id = cgsnapshot_id mock_cgsnap.consistencygroup_id = cg_id self.driver.db.cgsnapshot_get.return_value = mock_cgsnap mock_snap = mock.Mock() mock_snap.cgsnapshot_id = cgsnapshot_id mock_snap.volume_name = volume_name actual_name = self.driver._get_pgroup_snap_name_from_snapshot( mock_snap ) self.assertEqual(pgsnap_name, actual_name) def test_create_consistencygroup(self): mock_cgroup = mock.Mock() mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" model_update = self.driver.create_consistencygroup(None, mock_cgroup) expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id) self.array.create_pgroup.assert_called_with(expected_name) self.assertEqual({'status': 'available'}, model_update) self.assert_error_propagates( [self.array.create_pgroup], self.driver.create_consistencygroup, None, mock_cgroup) @mock.patch(BASE_DRIVER_OBJ + ".create_volume_from_snapshot") @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") def test_create_consistencygroup_from_cgsnapshot(self, mock_create_cg, mock_create_vol): mock_context = mock.Mock() mock_group = mock.Mock() mock_cgsnapshot = mock.Mock() mock_snapshots = [mock.Mock() for i in range(5)] mock_volumes = [mock.Mock() for i in range(5)] self.driver.create_consistencygroup_from_src( mock_context, mock_group, mock_volumes, cgsnapshot=mock_cgsnapshot, snapshots=mock_snapshots, source_cg=None, source_vols=None ) mock_create_cg.assert_called_with(mock_context, mock_group) expected_calls = [mock.call(vol, snap) for vol, snap in zip(mock_volumes, mock_snapshots)] mock_create_vol.assert_has_calls(expected_calls, any_order=True) self.assert_error_propagates( [mock_create_vol, mock_create_cg], self.driver.create_consistencygroup_from_src, mock_context, mock_group, mock_volumes, cgsnapshot=mock_cgsnapshot, snapshots=mock_snapshots, source_cg=None, source_vols=None ) @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") def test_create_consistencygroup_from_cg(self, mock_create_cg): num_volumes = 5 mock_context = mock.MagicMock() mock_group = mock.MagicMock() mock_source_cg = mock.MagicMock() mock_volumes = [mock.MagicMock() for i in range(num_volumes)] mock_source_vols = [mock.MagicMock() for i in range(num_volumes)] self.driver.create_consistencygroup_from_src( mock_context, mock_group, mock_volumes, source_cg=mock_source_cg, source_vols=mock_source_vols ) mock_create_cg.assert_called_with(mock_context, mock_group) self.assertTrue(self.array.create_pgroup_snapshot.called) self.assertEqual(num_volumes, self.array.copy_volume.call_count) self.assertEqual(num_volumes, self.array.set_pgroup.call_count) self.assertTrue(self.array.destroy_pgroup.called) @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") def test_create_consistencygroup_from_cg_with_error(self, mock_create_cg): num_volumes = 5 mock_context = mock.MagicMock() mock_group = mock.MagicMock() mock_source_cg = mock.MagicMock() mock_volumes = [mock.MagicMock() for i in range(num_volumes)] mock_source_vols = [mock.MagicMock() for i in range(num_volumes)] self.array.copy_volume.side_effect = FakePureStorageHTTPError() self.assertRaises( FakePureStorageHTTPError, self.driver.create_consistencygroup_from_src, mock_context, mock_group, mock_volumes, source_cg=mock_source_cg, source_vols=mock_source_vols ) mock_create_cg.assert_called_with(mock_context, mock_group) self.assertTrue(self.array.create_pgroup_snapshot.called) # Make sure that the temp snapshot is cleaned up even when copying # the volume fails! self.assertTrue(self.array.destroy_pgroup.called) @mock.patch(BASE_DRIVER_OBJ + ".delete_volume", autospec=True) def test_delete_consistencygroup(self, mock_delete_volume): mock_cgroup = mock.MagicMock() mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" mock_cgroup['status'] = "deleted" mock_context = mock.Mock() mock_volume = mock.MagicMock() model_update, volumes = self.driver.delete_consistencygroup( mock_context, mock_cgroup, [mock_volume]) expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id) self.array.destroy_pgroup.assert_called_with(expected_name) self.assertFalse(self.array.eradicate_pgroup.called) expected_volume_updates = [{ 'id': mock_volume.id, 'status': 'deleted' }] self.assertEqual(expected_volume_updates, volumes) self.assertEqual(mock_cgroup['status'], model_update['status']) mock_delete_volume.assert_called_with(self.driver, mock_volume) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Protection group has been destroyed." ) self.driver.delete_consistencygroup(mock_context, mock_cgroup, [mock_volume]) self.array.destroy_pgroup.assert_called_with(expected_name) self.assertFalse(self.array.eradicate_pgroup.called) mock_delete_volume.assert_called_with(self.driver, mock_volume) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Protection group does not exist" ) self.driver.delete_consistencygroup(mock_context, mock_cgroup, [mock_volume]) self.array.destroy_pgroup.assert_called_with(expected_name) self.assertFalse(self.array.eradicate_pgroup.called) mock_delete_volume.assert_called_with(self.driver, mock_volume) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Some other error" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver.delete_consistencygroup, mock_context, mock_volume, [mock_volume]) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=500, text="Another different error" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver.delete_consistencygroup, mock_context, mock_volume, [mock_volume]) self.array.destroy_pgroup.side_effect = None self.assert_error_propagates( [self.array.destroy_pgroup], self.driver.delete_consistencygroup, mock_context, mock_cgroup, [mock_volume] ) def _create_mock_cg(self): mock_group = mock.MagicMock() mock_group.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" mock_group.status = "Available" mock_group.cg_name = "consisgroup-" + mock_group.id + "-cinder" return mock_group def test_update_consistencygroup(self): mock_group = self._create_mock_cg() add_vols = [ {'name': 'vol1'}, {'name': 'vol2'}, {'name': 'vol3'}, ] expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols] remove_vols = [ {'name': 'vol4'}, {'name': 'vol5'}, ] expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols] self.driver.update_consistencygroup(mock.Mock(), mock_group, add_vols, remove_vols) self.array.set_pgroup.assert_called_with( mock_group.cg_name, addvollist=expected_addvollist, remvollist=expected_remvollist ) def test_update_consistencygroup_no_add_vols(self): mock_group = self._create_mock_cg() expected_addvollist = [] remove_vols = [ {'name': 'vol4'}, {'name': 'vol5'}, ] expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols] self.driver.update_consistencygroup(mock.Mock(), mock_group, None, remove_vols) self.array.set_pgroup.assert_called_with( mock_group.cg_name, addvollist=expected_addvollist, remvollist=expected_remvollist ) def test_update_consistencygroup_no_remove_vols(self): mock_group = self._create_mock_cg() add_vols = [ {'name': 'vol1'}, {'name': 'vol2'}, {'name': 'vol3'}, ] expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols] expected_remvollist = [] self.driver.update_consistencygroup(mock.Mock(), mock_group, add_vols, None) self.array.set_pgroup.assert_called_with( mock_group.cg_name, addvollist=expected_addvollist, remvollist=expected_remvollist ) def test_update_consistencygroup_no_vols(self): mock_group = self._create_mock_cg() self.driver.update_consistencygroup(mock.Mock(), mock_group, None, None) self.array.set_pgroup.assert_called_with( mock_group.cg_name, addvollist=[], remvollist=[] ) def test_create_cgsnapshot(self): mock_cgsnap = mock.Mock() mock_cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" mock_cgsnap.consistencygroup_id = \ "4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_context = mock.Mock() mock_snap = mock.MagicMock() model_update, snapshots = self.driver.create_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) cg_id = mock_cgsnap.consistencygroup_id expected_pgroup_name = self.driver._get_pgroup_name_from_id(cg_id) expected_snap_suffix = self.driver._get_pgroup_snap_suffix(mock_cgsnap) self.array.create_pgroup_snapshot\ .assert_called_with(expected_pgroup_name, suffix=expected_snap_suffix) self.assertEqual({'status': 'available'}, model_update) expected_snapshot_update = [{ 'id': mock_snap.id, 'status': 'available' }] self.assertEqual(expected_snapshot_update, snapshots) self.assert_error_propagates( [self.array.create_pgroup_snapshot], self.driver.create_cgsnapshot, mock_context, mock_cgsnap, []) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) def test_delete_cgsnapshot(self, mock_get_snap_name): snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_get_snap_name.return_value = snap_name mock_cgsnap = mock.Mock() mock_cgsnap.status = 'deleted' mock_context = mock.Mock() mock_snap = mock.Mock() model_update, snapshots = self.driver.delete_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) self.array.destroy_pgroup.assert_called_with(snap_name) self.assertFalse(self.array.eradicate_pgroup.called) self.assertEqual({'status': mock_cgsnap.status}, model_update) expected_snapshot_update = [{ 'id': mock_snap.id, 'status': 'deleted' }] self.assertEqual(expected_snapshot_update, snapshots) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Protection group snapshot has been destroyed." ) self.driver.delete_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) self.array.destroy_pgroup.assert_called_with(snap_name) self.assertFalse(self.array.eradicate_pgroup.called) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Protection group snapshot does not exist" ) self.driver.delete_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) self.array.destroy_pgroup.assert_called_with(snap_name) self.assertFalse(self.array.eradicate_pgroup.called) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Some other error" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver.delete_cgsnapshot, mock_context, mock_cgsnap, [mock_snap]) self.array.destroy_pgroup.side_effect = \ self.purestorage_module.PureHTTPError( code=500, text="Another different error" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver.delete_cgsnapshot, mock_context, mock_cgsnap, [mock_snap]) self.array.destroy_pgroup.side_effect = None self.assert_error_propagates( [self.array.destroy_pgroup], self.driver.delete_cgsnapshot, mock_context, mock_cgsnap, [mock_snap] ) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) def test_delete_cgsnapshot_eradicate_now(self, mock_get_snap_name): snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_get_snap_name.return_value = snap_name self.mock_config.pure_eradicate_on_delete = True model_update, snapshots = self.driver.delete_cgsnapshot(mock.Mock(), mock.Mock(), [mock.Mock()]) self.array.destroy_pgroup.assert_called_once_with(snap_name) self.array.eradicate_pgroup.assert_called_once_with(snap_name) def test_manage_existing(self): ref_name = 'vol1' volume_ref = {'name': ref_name} self.array.list_volume_private_connections.return_value = [] vol_name = VOLUME['name'] + '-cinder' self.driver.manage_existing(VOLUME, volume_ref) self.array.list_volume_private_connections.assert_called_with(ref_name) self.array.rename_volume.assert_called_with(ref_name, vol_name) def test_manage_existing_error_propagates(self): self.array.list_volume_private_connections.return_value = [] self.assert_error_propagates( [self.array.list_volume_private_connections, self.array.rename_volume], self.driver.manage_existing, VOLUME, {'name': 'vol1'} ) def test_manage_existing_bad_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, VOLUME, {'bad_key': 'bad_value'}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, VOLUME, {'name': ''}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, VOLUME, {'name': None}) self.array.get_volume.side_effect = \ self.purestorage_module.PureHTTPError( text="Volume does not exist.", code=400 ) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, VOLUME, {'name': 'non-existing-volume'}) def test_manage_existing_with_connected_hosts(self): ref_name = 'vol1' self.array.list_volume_private_connections.return_value = \ ["host1", "host2"] self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, VOLUME, {'name': ref_name}) self.array.list_volume_private_connections.assert_called_with(ref_name) self.assertFalse(self.array.rename_volume.called) def test_manage_existing_get_size(self): ref_name = 'vol1' volume_ref = {'name': ref_name} expected_size = 5 self.array.get_volume.return_value = {"size": 5368709120} size = self.driver.manage_existing_get_size(VOLUME, volume_ref) self.assertEqual(expected_size, size) self.array.get_volume.assert_called_with(ref_name, snap=False) def test_manage_existing_get_size_error_propagates(self): self.array.get_volume.return_value = mock.MagicMock() self.assert_error_propagates([self.array.get_volume], self.driver.manage_existing_get_size, VOLUME, {'name': 'vol1'}) def test_manage_existing_get_size_bad_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, VOLUME, {'bad_key': 'bad_value'}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, VOLUME, {'name': ''}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, VOLUME, {'name': None}) def test_unmanage(self): vol_name = VOLUME['name'] + "-cinder" unmanaged_vol_name = vol_name + "-unmanaged" self.driver.unmanage(VOLUME) self.array.rename_volume.assert_called_with(vol_name, unmanaged_vol_name) def test_unmanage_error_propagates(self): self.assert_error_propagates([self.array.rename_volume], self.driver.unmanage, VOLUME) def test_unmanage_with_deleted_volume(self): vol_name = VOLUME['name'] + "-cinder" unmanaged_vol_name = vol_name + "-unmanaged" self.array.rename_volume.side_effect = \ self.purestorage_module.PureHTTPError( text="Volume does not exist.", code=400 ) self.driver.unmanage(VOLUME) self.array.rename_volume.assert_called_with(vol_name, unmanaged_vol_name) def test_manage_existing_snapshot(self): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'name': ref_name} self.array.get_volume.return_value = [PURE_SNAPSHOT] self.driver.manage_existing_snapshot(SNAPSHOT, snap_ref) self.array.rename_volume.assert_called_once_with(ref_name, SNAPSHOT_PURITY_NAME) self.array.get_volume.assert_called_with(PURE_SNAPSHOT['source'], snap=True) def test_manage_existing_snapshot_multiple_snaps_on_volume(self): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'name': ref_name} pure_snaps = [PURE_SNAPSHOT] for i in range(5): snap = PURE_SNAPSHOT.copy() snap['name'] += str(i) pure_snaps.append(snap) self.array.get_volume.return_value = pure_snaps self.driver.manage_existing_snapshot(SNAPSHOT, snap_ref) self.array.rename_volume.assert_called_once_with(ref_name, SNAPSHOT_PURITY_NAME) def test_manage_existing_snapshot_error_propagates(self): self.array.get_volume.return_value = [PURE_SNAPSHOT] self.assert_error_propagates( [self.array.rename_volume], self.driver.manage_existing_snapshot, SNAPSHOT, {'name': PURE_SNAPSHOT['name']} ) def test_manage_existing_snapshot_bad_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, SNAPSHOT, {'bad_key': 'bad_value'}) def test_manage_existing_snapshot_empty_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, SNAPSHOT, {'name': ''}) def test_manage_existing_snapshot_none_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, SNAPSHOT, {'name': None}) def test_manage_existing_snapshot_volume_ref_not_exist(self): self.array.get_volume.side_effect = \ self.purestorage_module.PureHTTPError( text="Volume does not exist.", code=400 ) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, SNAPSHOT, {'name': 'non-existing-volume.snap1'}) def test_manage_existing_snapshot_ref_not_exist(self): ref_name = PURE_SNAPSHOT['name'] + '-fake' snap_ref = {'name': ref_name} self.array.get_volume.return_value = [PURE_SNAPSHOT] self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, SNAPSHOT, snap_ref) def test_manage_existing_snapshot_bad_api_version(self): self.array.get_rest_version.return_value = '1.3' self.assertRaises(exception.PureDriverException, self.driver.manage_existing_snapshot, SNAPSHOT, {'name': PURE_SNAPSHOT['name']}) def test_manage_existing_snapshot_get_size(self): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'name': ref_name} self.array.get_volume.return_value = [PURE_SNAPSHOT] size = self.driver.manage_existing_snapshot_get_size(SNAPSHOT, snap_ref) expected_size = 3.0 self.assertEqual(expected_size, size) self.array.get_volume.assert_called_with(PURE_SNAPSHOT['source'], snap=True) def test_manage_existing_snapshot_get_size_error_propagates(self): self.array.get_volume.return_value = [PURE_SNAPSHOT] self.assert_error_propagates( [self.array.get_volume], self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'name': PURE_SNAPSHOT['name']} ) def test_manage_existing_snapshot_get_size_bad_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'bad_key': 'bad_value'}) def test_manage_existing_snapshot_get_size_empty_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'name': ''}) def test_manage_existing_snapshot_get_size_none_ref(self): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'name': None}) def test_manage_existing_snapshot_get_size_volume_ref_not_exist(self): self.array.get_volume.side_effect = \ self.purestorage_module.PureHTTPError( text="Volume does not exist.", code=400 ) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'name': 'non-existing-volume.snap1'}) def test_manage_existing_snapshot_get_size_bad_api_version(self): self.array.get_rest_version.return_value = '1.3' self.assertRaises(exception.PureDriverException, self.driver.manage_existing_snapshot_get_size, SNAPSHOT, {'name': PURE_SNAPSHOT['name']}) def test_unmanage_snapshot(self): unmanaged_snap_name = SNAPSHOT_PURITY_NAME + "-unmanaged" self.driver.unmanage_snapshot(SNAPSHOT) self.array.rename_volume.assert_called_with(SNAPSHOT_PURITY_NAME, unmanaged_snap_name) def test_unmanage_snapshot_error_propagates(self): self.assert_error_propagates([self.array.rename_volume], self.driver.unmanage_snapshot, SNAPSHOT) def test_unmanage_snapshot_with_deleted_snapshot(self): unmanaged_snap_name = SNAPSHOT_PURITY_NAME + "-unmanaged" self.array.rename_volume.side_effect = \ self.purestorage_module.PureHTTPError( text="Snapshot does not exist.", code=400 ) self.driver.unmanage_snapshot(SNAPSHOT) self.array.rename_volume.assert_called_with(SNAPSHOT_PURITY_NAME, unmanaged_snap_name) def test_unmanage_snapshot_bad_api_version(self): self.array.get_rest_version.return_value = '1.3' self.assertRaises(exception.PureDriverException, self.driver.unmanage_snapshot, SNAPSHOT) def _test_retype_repl(self, mock_is_repl, is_vol_repl, repl_cabability): mock_is_repl.return_value = is_vol_repl context = mock.MagicMock() volume = fake_volume.fake_volume_obj(context) new_type = { 'extra_specs': { pure.EXTRA_SPECS_REPL_ENABLED: ' ' + str(repl_cabability) } } actual = self.driver.retype(context, volume, new_type, None, None) expected = (True, None) self.assertEqual(expected, actual) return context, volume @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) def test_retype_repl_to_repl(self, mock_is_replicated_type): self._test_retype_repl(mock_is_replicated_type, True, True) @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) def test_retype_non_repl_to_non_repl(self, mock_is_replicated_type): self._test_retype_repl(mock_is_replicated_type, False, False) @mock.patch(BASE_DRIVER_OBJ + '._enable_replication') @mock.patch(BASE_DRIVER_OBJ + '._disable_replication') @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) def test_retype_non_repl_to_repl(self, mock_is_replicated_type, mock_replication_disable, mock_replication_enable): context, volume = self._test_retype_repl(mock_is_replicated_type, False, True) self.assertFalse(mock_replication_disable.called) mock_replication_enable.assert_called_with(volume) @mock.patch(BASE_DRIVER_OBJ + '._enable_replication') @mock.patch(BASE_DRIVER_OBJ + '._disable_replication') @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) def test_retype_repl_to_non_repl(self, mock_is_replicated_type, mock_replication_disable, mock_replication_enable): context, volume = self._test_retype_repl(mock_is_replicated_type, True, False) self.assertFalse(mock_replication_enable.called) mock_replication_disable.assert_called_with(volume) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_is_vol_replicated_no_extra_specs(self, mock_get_vol_type): mock_get_vol_type.return_value = NON_REPLICATED_VOL_TYPE volume = fake_volume.fake_volume_obj(mock.MagicMock()) actual = self.driver._is_volume_replicated_type(volume) self.assertFalse(actual) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_is_vol_replicated_has_repl_extra_specs(self, mock_get_vol_type): mock_get_vol_type.return_value = REPLICATED_VOL_TYPE volume = fake_volume.fake_volume_obj(mock.MagicMock()) actual = self.driver._is_volume_replicated_type(volume) self.assertTrue(actual) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_is_vol_replicated_has_other_extra_specs(self, mock_get_vol_type): vtype_test = deepcopy(NON_REPLICATED_VOL_TYPE) vtype_test["extra_specs"] = {"some_key": "some_value"} mock_get_vol_type.return_value = vtype_test volume = fake_volume.fake_volume_obj(mock.MagicMock()) actual = self.driver._is_volume_replicated_type(volume) self.assertFalse(actual) def test_does_pgroup_exist_not_exists(self): self.array.get_pgroup.side_effect = ( self.purestorage_module.PureHTTPError(code=400, text="does not exist")) exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") self.assertFalse(exists) def test_does_pgroup_exist_exists(self): self.array.get_pgroup.side_effect = None self.array.get_pgroup.return_value = PGROUP_ON_TARGET_NOT_ALLOWED exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") self.assertTrue(exists) def test_does_pgroup_exist_error_propagates(self): self.assert_error_propagates([self.array.get_pgroup], self.driver._does_pgroup_exist, self.array, "some_pgroup") @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") def test_wait_until_target_group_setting_propagates_ready(self, mock_exists): mock_exists.return_value = True self.driver._wait_until_target_group_setting_propagates( self.array, "some_pgroup" ) @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") def test_wait_until_target_group_setting_propagates_not_ready(self, mock_exists): mock_exists.return_value = False self.assertRaises( exception.PureDriverException, self.driver._wait_until_target_group_setting_propagates, self.array, "some_pgroup" ) def test_wait_until_source_array_allowed_ready(self): self.array.get_pgroup.return_value = PGROUP_ON_TARGET_ALLOWED self.driver._wait_until_source_array_allowed( self.array, "some_pgroup",) def test_wait_until_source_array_allowed_not_ready(self): self.array.get_pgroup.return_value = PGROUP_ON_TARGET_NOT_ALLOWED self.assertRaises( exception.PureDriverException, self.driver._wait_until_source_array_allowed, self.array, "some_pgroup", ) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_create_volume_replicated(self, mock_get_volume_type): mock_get_volume_type.return_value = REPLICATED_VOL_TYPE self._setup_mocks_for_replication() self.driver._array = self.array self.driver._array.array_name = GET_ARRAY_PRIMARY["array_name"] self.driver._array.array_id = GET_ARRAY_PRIMARY["id"] self.driver._replication_target_arrays = [mock.Mock()] self.driver._replication_target_arrays[0].array_name = ( GET_ARRAY_SECONDARY["array_name"]) self.driver.create_volume(VOLUME) self.array.create_volume.assert_called_with( VOLUME["name"] + "-cinder", 2 * units.Gi) self.array.set_pgroup.assert_called_with( REPLICATION_PROTECTION_GROUP, addvollist=[VOLUME["name"] + "-cinder"]) def test_find_failover_target_no_repl_targets(self): self.driver._replication_target_arrays = [] self.assertRaises(exception.PureDriverException, self.driver._find_failover_target, None) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_secondary_specified(self, mock_get_snap): mock_backend_1 = mock.Mock() mock_backend_2 = mock.Mock() secondary_id = 'foo' mock_backend_2._backend_id = secondary_id self.driver._replication_target_arrays = [mock_backend_1, mock_backend_2] mock_get_snap.return_value = REPLICATED_PGSNAPS[0] array, pg_snap = self.driver._find_failover_target(secondary_id) self.assertEqual(mock_backend_2, array) self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) def test_find_failover_target_secondary_specified_not_found(self): mock_backend = mock.Mock() mock_backend._backend_id = 'not_foo' self.driver._replication_target_arrays = [mock_backend] self.assertRaises(exception.InvalidReplicationTarget, self.driver._find_failover_target, 'foo') @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_secondary_specified_no_pgsnap(self, mock_get_snap): mock_backend = mock.Mock() secondary_id = 'foo' mock_backend._backend_id = secondary_id self.driver._replication_target_arrays = [mock_backend] mock_get_snap.return_value = None self.assertRaises(exception.PureDriverException, self.driver._find_failover_target, secondary_id) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_no_secondary_specified(self, mock_get_snap): mock_backend_1 = mock.Mock() mock_backend_2 = mock.Mock() self.driver._replication_target_arrays = [mock_backend_1, mock_backend_2] mock_get_snap.return_value = REPLICATED_PGSNAPS[0] array, pg_snap = self.driver._find_failover_target(None) self.assertEqual(mock_backend_1, array) self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_no_secondary_specified_missing_pgsnap( self, mock_get_snap): mock_backend_1 = mock.Mock() mock_backend_2 = mock.Mock() self.driver._replication_target_arrays = [mock_backend_1, mock_backend_2] mock_get_snap.side_effect = [None, REPLICATED_PGSNAPS[0]] array, pg_snap = self.driver._find_failover_target(None) self.assertEqual(mock_backend_2, array) self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_no_secondary_specified_no_pgsnap( self, mock_get_snap): mock_backend = mock.Mock() self.driver._replication_target_arrays = [mock_backend] mock_get_snap.return_value = None self.assertRaises(exception.PureDriverException, self.driver._find_failover_target, None) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_error_propagates_secondary_specified( self, mock_get_snap): mock_backend = mock.Mock() mock_backend._backend_id = 'foo' self.driver._replication_target_arrays = [mock_backend] self.assert_error_propagates( [mock_get_snap], self.driver._find_failover_target, 'foo' ) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_failover_target_error_propagates_no_secondary( self, mock_get_snap): self.driver._replication_target_arrays = [mock.Mock()] self.assert_error_propagates( [mock_get_snap], self.driver._find_failover_target, None ) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_enable_replication_if_needed_success( self, mock_get_volume_type): mock_get_volume_type.return_value = REPLICATED_VOL_TYPE self.driver._enable_replication_if_needed(self.array, VOLUME) self.array.set_pgroup.assert_called_with( self.driver._replication_pg_name, addvollist=[VOLUME_PURITY_NAME] ) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_enable_replication_if_needed_not_repl_type( self, mock_get_volume_type): mock_get_volume_type.return_value = NON_REPLICATED_VOL_TYPE self.driver._enable_replication_if_needed(self.array, VOLUME) self.assertFalse(self.array.set_pgroup.called) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_enable_replication_if_needed_already_repl( self, mock_get_volume_type): mock_get_volume_type.return_value = REPLICATED_VOL_TYPE self.array.set_pgroup.side_effect = FakePureStorageHTTPError( code=400, text='already belongs to') self.driver._enable_replication_if_needed(self.array, VOLUME) self.array.set_pgroup.assert_called_with( self.driver._replication_pg_name, addvollist=[VOLUME_PURITY_NAME] ) @mock.patch('cinder.volume.volume_types.get_volume_type') def test_enable_replication_if_needed_error_propagates( self, mock_get_volume_type): mock_get_volume_type.return_value = REPLICATED_VOL_TYPE self.driver._enable_replication_if_needed(self.array, VOLUME) self.assert_error_propagates( [self.array.set_pgroup], self.driver._enable_replication, self.array, VOLUME ) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._find_failover_target') def test_failover(self, mock_find_failover_target, mock_get_array): secondary_device_id = 'foo' self.array2._backend_id = secondary_device_id self.driver._replication_target_arrays = [self.array2] array2_v1_3 = mock.Mock() array2_v1_3._backend_id = secondary_device_id array2_v1_3.array_name = GET_ARRAY_SECONDARY['array_name'] array2_v1_3.array_id = GET_ARRAY_SECONDARY['id'] array2_v1_3.version = '1.3' mock_get_array.return_value = array2_v1_3 target_array = self.array2 target_array.copy_volume = mock.Mock() mock_find_failover_target.return_value = ( target_array, REPLICATED_PGSNAPS[1] ) array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS context = mock.MagicMock() new_active_id, volume_updates = self.driver.failover_host( context, REPLICATED_VOLUME_OBJS, None ) self.assertEqual(secondary_device_id, new_active_id) self.assertEqual([], volume_updates) calls = [] for snap in REPLICATED_VOLUME_SNAPS: vol_name = snap['name'].split('.')[-1] calls.append(mock.call( snap['name'], vol_name, overwrite=True )) target_array.copy_volume.assert_has_calls(calls, any_order=True) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._find_failover_target') def test_failover_error_propagates(self, mock_find_failover_target, mock_get_array): mock_find_failover_target.return_value = ( self.array2, REPLICATED_PGSNAPS[1] ) array2_v1_3 = mock.Mock() array2_v1_3.array_name = GET_ARRAY_SECONDARY['array_name'] array2_v1_3.array_id = GET_ARRAY_SECONDARY['id'] array2_v1_3.version = '1.3' mock_get_array.return_value = array2_v1_3 array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS self.assert_error_propagates( [mock_find_failover_target, mock_get_array, array2_v1_3.get_volume, self.array2.copy_volume], self.driver.failover_host, mock.Mock(), REPLICATED_VOLUME_OBJS, None ) def test_disable_replication_success(self): self.driver._disable_replication(VOLUME) self.array.set_pgroup.assert_called_with( self.driver._replication_pg_name, remvollist=[VOLUME_PURITY_NAME] ) def test_disable_replication_error_propagates(self): self.assert_error_propagates( [self.array.set_pgroup], self.driver._disable_replication, VOLUME ) def test_disable_replication_already_disabled(self): self.array.set_pgroup.side_effect = FakePureStorageHTTPError( code=400, text='could not be found') self.driver._disable_replication(VOLUME) self.array.set_pgroup.assert_called_with( self.driver._replication_pg_name, remvollist=[VOLUME_PURITY_NAME] ) @ddt.data( dict(version='1.5.0'), dict(version='2.0.0'), dict(version='1.4.1'), ) @ddt.unpack def test_get_flasharray_verify_https(self, version): self.purestorage_module.VERSION = version san_ip = '1.2.3.4' api_token = 'abcdef' cert_path = '/my/ssl/certs' self.purestorage_module.FlashArray.return_value = mock.MagicMock() self.driver._get_flasharray(san_ip, api_token, verify_https=True, ssl_cert_path=cert_path) self.purestorage_module.FlashArray.assert_called_with( san_ip, api_token=api_token, rest_version=None, verify_https=True, ssl_cert=cert_path ) def test_get_flasharray_dont_verify_https_version_too_old(self): self.purestorage_module.VERSION = '1.4.0' san_ip = '1.2.3.4' api_token = 'abcdef' self.purestorage_module.FlashArray.return_value = mock.MagicMock() self.driver._get_flasharray(san_ip, api_token, verify_https=False, ssl_cert_path=None) self.purestorage_module.FlashArray.assert_called_with( san_ip, api_token=api_token, rest_version=None ) def test_get_flasharray_verify_https_version_too_old(self): self.purestorage_module.VERSION = '1.4.0' san_ip = '1.2.3.4' api_token = 'abcdef' self.purestorage_module.FlashArray.return_value = mock.MagicMock() self.assertRaises( exception.PureDriverException, self.driver._get_flasharray, san_ip, api_token, verify_https=True, ssl_cert_path='/my/ssl/certs' ) class PureISCSIDriverTestCase(PureDriverTestCase): def setUp(self): super(PureISCSIDriverTestCase, self).setUp() self.mock_config.use_chap_auth = False self.driver = pure.PureISCSIDriver(configuration=self.mock_config) self.driver._array = self.array def test_get_host(self): good_host = PURE_HOST.copy() good_host.update(iqn=["another-wrong-iqn", INITIATOR_IQN]) bad_host = {"name": "bad-host", "iqn": ["wrong-iqn"]} self.array.list_hosts.return_value = [bad_host] real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) self.assertIs(None, real_result) self.array.list_hosts.return_value.append(good_host) real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) self.assertEqual(good_host, real_result) self.assert_error_propagates([self.array.list_hosts], self.driver._get_host, self.array, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") def test_initialize_connection(self, mock_get_iscsi_ports, mock_connection): mock_get_iscsi_ports.return_value = ISCSI_PORTS lun = 1 connection = { "vol": VOLUME["name"] + "-cinder", "lun": lun, } mock_connection.return_value = connection result = deepcopy(ISCSI_CONNECTION_INFO) real_result = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) self.assertDictMatch(result, real_result) mock_get_iscsi_ports.assert_called_with() mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None) self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], self.driver.initialize_connection, VOLUME, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") def test_initialize_connection_with_auth(self, mock_get_iscsi_ports, mock_connection): auth_type = "CHAP" chap_username = ISCSI_CONNECTOR["host"] chap_password = "password" mock_get_iscsi_ports.return_value = ISCSI_PORTS initiator_update = [{"key": pure.CHAP_SECRET_KEY, "value": chap_password}] mock_connection.return_value = { "vol": VOLUME["name"] + "-cinder", "lun": 1, "auth_username": chap_username, "auth_password": chap_password, } result = deepcopy(ISCSI_CONNECTION_INFO) result["data"]["auth_method"] = auth_type result["data"]["auth_username"] = chap_username result["data"]["auth_password"] = chap_password self.mock_config.use_chap_auth = True # Branch where no credentials were generated real_result = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None) self.assertDictMatch(result, real_result) # Branch where new credentials were generated mock_connection.return_value["initiator_update"] = initiator_update result["initiator_update"] = initiator_update real_result = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None) self.assertDictMatch(result, real_result) self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], self.driver.initialize_connection, VOLUME, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") def test_initialize_connection_multipath(self, mock_get_iscsi_ports, mock_connection): mock_get_iscsi_ports.return_value = ISCSI_PORTS lun = 1 connection = { "vol": VOLUME["name"] + "-cinder", "lun": lun, } mock_connection.return_value = connection multipath_connector = deepcopy(ISCSI_CONNECTOR) multipath_connector["multipath"] = True result = deepcopy(ISCSI_CONNECTION_INFO) real_result = self.driver.initialize_connection(VOLUME, multipath_connector) self.assertDictMatch(result, real_result) mock_get_iscsi_ports.assert_called_with() mock_connection.assert_called_with(VOLUME, multipath_connector, None) multipath_connector["multipath"] = False self.driver.initialize_connection(VOLUME, multipath_connector) def test_get_target_iscsi_ports(self): self.array.list_ports.return_value = ISCSI_PORTS ret = self.driver._get_target_iscsi_ports() self.assertEqual(ISCSI_PORTS, ret) def test_get_target_iscsi_ports_with_iscsi_and_fc(self): self.array.list_ports.return_value = PORTS_WITH ret = self.driver._get_target_iscsi_ports() self.assertEqual(ISCSI_PORTS, ret) def test_get_target_iscsi_ports_with_no_ports(self): # Should raise an exception if there are no ports self.array.list_ports.return_value = [] self.assertRaises(exception.PureDriverException, self.driver._get_target_iscsi_ports) def test_get_target_iscsi_ports_with_only_fc_ports(self): # Should raise an exception of there are no iscsi ports self.array.list_ports.return_value = PORTS_WITHOUT self.assertRaises(exception.PureDriverException, self.driver._get_target_iscsi_ports) @mock.patch("cinder.volume.utils.generate_password", autospec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) def test_connect(self, mock_generate, mock_host, mock_gen_secret): vol_name = VOLUME["name"] + "-cinder" result = {"vol": vol_name, "lun": 1} # Branch where host already exists mock_host.return_value = PURE_HOST self.array.connect_host.return_value = {"vol": vol_name, "lun": 1} real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None) self.assertEqual(result, real_result) mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR) self.assertFalse(mock_generate.called) self.assertFalse(self.array.create_host.called) self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name) # Branch where new host is created mock_host.return_value = None mock_generate.return_value = PURE_HOST_NAME real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None) mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR) mock_generate.assert_called_with(HOSTNAME) self.array.create_host.assert_called_with(PURE_HOST_NAME, iqnlist=[INITIATOR_IQN]) self.assertEqual(result, real_result) mock_generate.reset_mock() self.array.reset_mock() self.assert_error_propagates( [mock_host, mock_generate, self.array.connect_host, self.array.create_host], self.driver._connect, VOLUME, ISCSI_CONNECTOR, None) self.mock_config.use_chap_auth = True chap_user = ISCSI_CONNECTOR["host"] chap_password = "sOmEseCr3t" # Branch where chap is used and credentials already exist initiator_data = [{"key": pure.CHAP_SECRET_KEY, "value": chap_password}] self.driver._connect(VOLUME, ISCSI_CONNECTOR, initiator_data) result["auth_username"] = chap_user result["auth_password"] = chap_password self.assertDictMatch(result, real_result) self.array.set_host.assert_called_with(PURE_HOST_NAME, host_user=chap_user, host_password=chap_password) # Branch where chap is used and credentials are generated mock_gen_secret.return_value = chap_password self.driver._connect(VOLUME, ISCSI_CONNECTOR, None) result["auth_username"] = chap_user result["auth_password"] = chap_password result["initiator_update"] = { "set_values": { pure.CHAP_SECRET_KEY: chap_password }, } self.assertDictMatch(result, real_result) self.array.set_host.assert_called_with(PURE_HOST_NAME, host_user=chap_user, host_password=chap_password) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected(self, mock_host): mock_host.return_value = PURE_HOST expected = {"host": PURE_HOST_NAME, "lun": 1} self.array.list_volume_private_connections.return_value = \ [expected, {"host": "extra", "lun": 2}] self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) actual = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None) self.assertEqual(expected, actual) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_empty(self, mock_host): mock_host.return_value = PURE_HOST self.array.list_volume_private_connections.return_value = {} self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) self.assertRaises(exception.PureDriverException, self.driver._connect, VOLUME, ISCSI_CONNECTOR, None) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_exception(self, mock_host): mock_host.return_value = PURE_HOST self.array.list_volume_private_connections.side_effect = \ self.purestorage_module.PureHTTPError(code=400, text="") self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver._connect, VOLUME, ISCSI_CONNECTOR, None) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) class PureFCDriverTestCase(PureDriverTestCase): def setUp(self): super(PureFCDriverTestCase, self).setUp() self.driver = pure.PureFCDriver(configuration=self.mock_config) self.driver._array = self.array self.driver._lookup_service = mock.Mock() def test_get_host(self): good_host = PURE_HOST.copy() good_host.update(wwn=["another-wrong-wwn", INITIATOR_WWN]) bad_host = {"name": "bad-host", "wwn": ["wrong-wwn"]} self.array.list_hosts.return_value = [bad_host] actual_result = self.driver._get_host(self.array, FC_CONNECTOR) self.assertIs(None, actual_result) self.array.list_hosts.return_value.append(good_host) actual_result = self.driver._get_host(self.array, FC_CONNECTOR) self.assertEqual(good_host, actual_result) self.assert_error_propagates([self.array.list_hosts], self.driver._get_host, self.array, FC_CONNECTOR) @mock.patch(FC_DRIVER_OBJ + "._connect") def test_initialize_connection(self, mock_connection): lookup_service = self.driver._lookup_service (lookup_service.get_device_mapping_from_network. return_value) = DEVICE_MAPPING mock_connection.return_value = {"vol": VOLUME["name"] + "-cinder", "lun": 1, } self.array.list_ports.return_value = FC_PORTS actual_result = self.driver.initialize_connection(VOLUME, FC_CONNECTOR) self.assertDictMatch(FC_CONNECTION_INFO, actual_result) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) @mock.patch(FC_DRIVER_OBJ + "._generate_purity_host_name", spec=True) def test_connect(self, mock_generate, mock_host): vol_name = VOLUME["name"] + "-cinder" result = {"vol": vol_name, "lun": 1} # Branch where host already exists mock_host.return_value = PURE_HOST self.array.connect_host.return_value = {"vol": vol_name, "lun": 1} real_result = self.driver._connect(VOLUME, FC_CONNECTOR) self.assertEqual(result, real_result) mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR) self.assertFalse(mock_generate.called) self.assertFalse(self.array.create_host.called) self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name) # Branch where new host is created mock_host.return_value = None mock_generate.return_value = PURE_HOST_NAME real_result = self.driver._connect(VOLUME, FC_CONNECTOR) mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR) mock_generate.assert_called_with(HOSTNAME) self.array.create_host.assert_called_with(PURE_HOST_NAME, wwnlist={INITIATOR_WWN}) self.assertEqual(result, real_result) mock_generate.reset_mock() self.array.reset_mock() self.assert_error_propagates( [mock_host, mock_generate, self.array.connect_host, self.array.create_host], self.driver._connect, VOLUME, FC_CONNECTOR) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected(self, mock_host): mock_host.return_value = PURE_HOST expected = {"host": PURE_HOST_NAME, "lun": 1} self.array.list_volume_private_connections.return_value = \ [expected, {"host": "extra", "lun": 2}] self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) actual = self.driver._connect(VOLUME, FC_CONNECTOR) self.assertEqual(expected, actual) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_empty(self, mock_host): mock_host.return_value = PURE_HOST self.array.list_volume_private_connections.return_value = {} self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) self.assertRaises(exception.PureDriverException, self.driver._connect, VOLUME, FC_CONNECTOR) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_exception(self, mock_host): mock_host.return_value = PURE_HOST self.array.list_volume_private_connections.side_effect = \ self.purestorage_module.PureHTTPError(code=400, text="") self.array.connect_host.side_effect = \ self.purestorage_module.PureHTTPError( code=400, text="Connection already exists" ) self.assertRaises(self.purestorage_module.PureHTTPError, self.driver._connect, VOLUME, FC_CONNECTOR) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) @ddt.ddt class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureVolumeUpdateStatsTestCase, self).setUp() self.array.get.side_effect = self.fake_get_array @ddt.data(dict(used=10, provisioned=100, config_ratio=5, expected_ratio=5, auto=False), dict(used=10, provisioned=100, config_ratio=5, expected_ratio=10, auto=True), dict(used=0, provisioned=100, config_ratio=5, expected_ratio=5, auto=True), dict(used=10, provisioned=0, config_ratio=5, expected_ratio=5, auto=True)) @ddt.unpack def test_get_thin_provisioning(self, used, provisioned, config_ratio, expected_ratio, auto): self.mock_config.pure_automatic_max_oversubscription_ratio = auto self.mock_config.max_over_subscription_ratio = config_ratio actual_ratio = self.driver._get_thin_provisioning(provisioned, used) self.assertEqual(expected_ratio, actual_ratio) @mock.patch(BASE_DRIVER_OBJ + '.get_goodness_function') @mock.patch(BASE_DRIVER_OBJ + '.get_filter_function') @mock.patch(BASE_DRIVER_OBJ + '._get_provisioned_space') @mock.patch(BASE_DRIVER_OBJ + '._get_thin_provisioning') def test_get_volume_stats(self, mock_get_thin_provisioning, mock_get_space, mock_get_filter, mock_get_goodness): filter_function = 'capabilities.total_volumes < 10' goodness_function = '90' num_hosts = 20 num_snaps = 175 num_pgroups = 15 reserved_percentage = 12 self.array.list_hosts.return_value = [PURE_HOST] * num_hosts self.array.list_volumes.return_value = [PURE_SNAPSHOT] * num_snaps self.array.list_pgroups.return_value = [PURE_PGROUP] * num_pgroups self.mock_config.reserved_percentage = reserved_percentage mock_get_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100) mock_get_filter.return_value = filter_function mock_get_goodness.return_value = goodness_function mock_get_thin_provisioning.return_value = (PROVISIONED_CAPACITY / USED_SPACE) expected_result = { 'volume_backend_name': VOLUME_BACKEND_NAME, 'vendor_name': 'Pure Storage', 'driver_version': self.driver.VERSION, 'storage_protocol': None, 'consistencygroup_support': True, 'thin_provisioning_support': True, 'multiattach': True, 'total_capacity_gb': TOTAL_CAPACITY, 'free_capacity_gb': TOTAL_CAPACITY - USED_SPACE, 'reserved_percentage': reserved_percentage, 'provisioned_capacity': PROVISIONED_CAPACITY, 'max_over_subscription_ratio': (PROVISIONED_CAPACITY / USED_SPACE), 'filter_function': filter_function, 'goodness_function': goodness_function, 'total_volumes': 100, 'total_snapshots': num_snaps, 'total_hosts': num_hosts, 'total_pgroups': num_pgroups, 'writes_per_sec': PERF_INFO['writes_per_sec'], 'reads_per_sec': PERF_INFO['reads_per_sec'], 'input_per_sec': PERF_INFO['input_per_sec'], 'output_per_sec': PERF_INFO['output_per_sec'], 'usec_per_read_op': PERF_INFO['usec_per_read_op'], 'usec_per_write_op': PERF_INFO['usec_per_write_op'], 'queue_depth': PERF_INFO['queue_depth'], 'replication_enabled': False, 'replication_type': ['async'], 'replication_count': 0, 'replication_targets': [], } real_result = self.driver.get_volume_stats(refresh=True) self.assertDictMatch(expected_result, real_result) # Make sure when refresh=False we are using cached values and not # sending additional requests to the array. self.array.reset_mock() real_result = self.driver.get_volume_stats(refresh=False) self.assertDictMatch(expected_result, real_result) self.assertFalse(self.array.get.called) self.assertFalse(self.array.list_volumes.called) self.assertFalse(self.array.list_hosts.called) self.assertFalse(self.array.list_pgroups.called) cinder-8.0.0/cinder/tests/unit/test_fixtures.py0000664000567000056710000000460712701406250022777 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures as fx from oslo_log import log as logging import testtools from cinder.tests import fixtures class TestLogging(testtools.TestCase): def test_default_logging(self): stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should be a null handler as well at DEBUG self.assertEqual(2, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) # broken debug messages should still explode, even though we # aren't logging them in the regular handler self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo") # and, ensure that one of the terrible log messages isn't # output at info warn_log = logging.getLogger('migrate.versioning.api') warn_log.info("warn_log at info, should be skipped") warn_log.error("warn_log at error") self.assertIn("warn_log at error", stdlog.logger.output) self.assertNotIn("warn_log at info", stdlog.logger.output) def test_debug_logging(self): self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should no longer be a null handler self.assertEqual(1, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertIn("at debug", stdlog.logger.output) cinder-8.0.0/cinder/tests/unit/test_v7000_common.py0000664000567000056710000007354712701406250023263 0ustar jenkinsjenkins00000000000000# Copyright 2015 Violin Memory, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Violin Memory 7000 Series All-Flash Array Common Driver """ import math import mock from oslo_utils import units from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_vmem_client as vmemclient from cinder.volume import configuration as conf from cinder.volume.drivers.violin import v7000_common from cinder.volume import volume_types VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" VOLUME = {"name": "volume-" + VOLUME_ID, "id": VOLUME_ID, "display_name": "fake_volume", "size": 2, "host": "irrelevant", "volume_type": None, "volume_type_id": None, } SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" SNAPSHOT = {"name": "snapshot-" + SNAPSHOT_ID, "id": SNAPSHOT_ID, "volume_id": VOLUME_ID, "volume_name": "volume-" + VOLUME_ID, "volume_size": 2, "display_name": "fake_snapshot", } SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" SRC_VOL = {"name": "volume-" + SRC_VOL_ID, "id": SRC_VOL_ID, "display_name": "fake_src_vol", "size": 2, "host": "irrelevant", "volume_type": None, "volume_type_id": None, } INITIATOR_IQN = "iqn.1111-22.org.debian:11:222" CONNECTOR = {"initiator": INITIATOR_IQN} class V7000CommonTestCase(test.TestCase): """Test case for Violin drivers.""" def setUp(self): super(V7000CommonTestCase, self).setUp() self.conf = self.setup_configuration() self.driver = v7000_common.V7000Common(self.conf) self.driver.container = 'myContainer' self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022' self.stats = {} def tearDown(self): super(V7000CommonTestCase, self).tearDown() def setup_configuration(self): config = mock.Mock(spec=conf.Configuration) config.volume_backend_name = 'v7000_common' config.san_ip = '1.1.1.1' config.san_login = 'admin' config.san_password = '' config.san_thin_provision = False config.san_is_local = False config.gateway_mga = '2.2.2.2' config.gateway_mgb = '3.3.3.3' config.use_igroups = False config.violin_request_timeout = 300 config.container = 'myContainer' return config @mock.patch('vmemclient.open') def setup_mock_client(self, _m_client, m_conf=None): """Create a fake backend communication factory. The xg-tools creates a Concerto connection object (for V7000 devices) and returns it for use on a call to vmemclient.open(). """ # configure the concerto object mock with defaults _m_concerto = mock.Mock(name='Concerto', version='1.1.1', spec=vmemclient.mock_client_conf) # if m_conf, clobber the defaults with it if m_conf: _m_concerto.configure_mock(**m_conf) # set calls to vmemclient.open() to return this mocked concerto object _m_client.return_value = _m_concerto return _m_client def setup_mock_concerto(self, m_conf=None): """Create a fake Concerto communication object.""" _m_concerto = mock.Mock(name='Concerto', version='1.1.1', spec=vmemclient.mock_client_conf) if m_conf: _m_concerto.configure_mock(**m_conf) return _m_concerto def test_check_for_setup_error(self): """No setup errors are found.""" self.driver.vmem_mg = self.setup_mock_concerto() self.driver._is_supported_vmos_version = mock.Mock(return_value=True) result = self.driver.check_for_setup_error() self.driver._is_supported_vmos_version.assert_called_with( self.driver.vmem_mg.version) self.assertIsNone(result) def test_create_lun(self): """Lun is successfully created.""" response = {'success': True, 'msg': 'Create resource successfully.'} size_in_mb = VOLUME['size'] * units.Ki conf = { 'lun.create_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) result = self.driver._create_lun(VOLUME) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.create_lun, 'Create resource successfully.', VOLUME['id'], size_in_mb, False, False, size_in_mb, storage_pool=None) self.assertIsNone(result) def test_create_dedup_lun(self): """Lun is successfully created.""" vol = VOLUME.copy() vol['size'] = 100 vol['volume_type_id'] = '1' response = {'success': True, 'msg': 'Create resource successfully.'} size_in_mb = vol['size'] * units.Ki full_size_mb = size_in_mb conf = { 'lun.create_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) # simulate extra specs of {'thin': 'true', 'dedupe': 'true'} self.driver._get_volume_type_extra_spec = mock.Mock( return_value="True") self.driver._get_violin_extra_spec = mock.Mock( return_value=None) result = self.driver._create_lun(vol) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.create_lun, 'Create resource successfully.', VOLUME['id'], size_in_mb / 10, True, True, full_size_mb, storage_pool=None) self.assertIsNone(result) def test_fail_extend_dedup_lun(self): """Volume extend fails when new size would shrink the volume.""" failure = exception.VolumeDriverException vol = VOLUME.copy() vol['volume_type_id'] = '1' size_in_mb = vol['size'] * units.Ki self.driver.vmem_mg = self.setup_mock_concerto() # simulate extra specs of {'thin': 'true', 'dedupe': 'true'} self.driver._get_volume_type_extra_spec = mock.Mock( return_value="True") self.assertRaises(failure, self.driver._extend_lun, vol, size_in_mb) def test_create_non_dedup_lun(self): """Lun is successfully created.""" vol = VOLUME.copy() vol['size'] = 100 vol['volume_type_id'] = '1' response = {'success': True, 'msg': 'Create resource successfully.'} size_in_mb = vol['size'] * units.Ki full_size_mb = size_in_mb conf = { 'lun.create_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) # simulate extra specs of {'thin': 'false', 'dedupe': 'false'} self.driver._get_volume_type_extra_spec = mock.Mock( return_value="False") self.driver._get_violin_extra_spec = mock.Mock( return_value=None) result = self.driver._create_lun(vol) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.create_lun, 'Create resource successfully.', VOLUME['id'], size_in_mb, False, False, full_size_mb, storage_pool=None) self.assertIsNone(result) def test_create_lun_fails(self): """Array returns error that the lun already exists.""" response = {'success': False, 'msg': 'Duplicate Virtual Device name. Error: 0x90010022'} conf = { 'lun.create_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) self.assertIsNone(self.driver._create_lun(VOLUME)) def test_create_lun_on_a_storage_pool(self): """Lun is successfully created.""" vol = VOLUME.copy() vol['size'] = 100 vol['volume_type_id'] = '1' response = {'success': True, 'msg': 'Create resource successfully.'} size_in_mb = vol['size'] * units.Ki full_size_mb = size_in_mb conf = { 'lun.create_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) self.driver._get_volume_type_extra_spec = mock.Mock( return_value="False") # simulates extra specs: {'storage_pool', 'StoragePool'} self.driver._get_violin_extra_spec = mock.Mock( return_value="StoragePool") result = self.driver._create_lun(vol) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.create_lun, 'Create resource successfully.', VOLUME['id'], size_in_mb, False, False, full_size_mb, storage_pool="StoragePool") self.assertIsNone(result) def test_delete_lun(self): """Lun is deleted successfully.""" response = {'success': True, 'msg': 'Delete resource successfully'} success_msgs = ['Delete resource successfully', ''] conf = { 'lun.delete_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) self.driver._delete_lun_snapshot_bookkeeping = mock.Mock() result = self.driver._delete_lun(VOLUME) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.delete_lun, success_msgs, VOLUME['id'], True) self.driver._delete_lun_snapshot_bookkeeping.assert_called_with( VOLUME['id']) self.assertIsNone(result) # TODO(rlucio) More delete lun failure cases to be added after # collecting the possible responses from Concerto def test_extend_lun(self): """Volume extend completes successfully.""" new_volume_size = 10 change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki response = {'success': True, 'message': 'Expand resource successfully'} conf = { 'lun.extend_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) result = self.driver._extend_lun(VOLUME, new_volume_size) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.lun.extend_lun, response['message'], VOLUME['id'], change_in_size_mb) self.assertIsNone(result) def test_extend_lun_new_size_is_too_small(self): """Volume extend fails when new size would shrink the volume.""" new_volume_size = 0 change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki response = {'success': False, 'msg': 'Invalid size. Error: 0x0902000c'} failure = exception.ViolinBackendErr conf = { 'lun.resize_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) self.assertRaises(failure, self.driver._extend_lun, VOLUME, change_in_size_mb) def test_create_volume_from_snapshot(self): """Create a new cinder volume from a given snapshot of a lun.""" object_id = '12345' vdev_id = 11111 response = {'success': True, 'object_id': object_id, 'msg': 'Copy TimeMark successfully.'} lun_info = {'virtualDeviceID': vdev_id} compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' conf = { 'lun.copy_snapshot_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._compress_snapshot_id = mock.Mock( return_value=compressed_snap_id) self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info) self.driver._wait_for_lun_or_snap_copy = mock.Mock() result = self.driver._create_volume_from_snapshot(SNAPSHOT, VOLUME) self.driver.vmem_mg.lun.copy_snapshot_to_new_lun.assert_called_with( source_lun=SNAPSHOT['volume_id'], source_snapshot_comment=compressed_snap_id, destination=VOLUME['id'], storage_pool=None) self.driver.vmem_mg.lun.get_lun_info.assert_called_with( object_id=object_id) self.driver._wait_for_lun_or_snap_copy.assert_called_with( SNAPSHOT['volume_id'], dest_vdev_id=vdev_id) self.assertIsNone(result) def test_create_volume_from_snapshot_on_a_storage_pool(self): """Create a new cinder volume from a given snapshot of a lun.""" dest_vol = VOLUME.copy() dest_vol['size'] = 100 dest_vol['volume_type_id'] = '1' object_id = '12345' vdev_id = 11111 response = {'success': True, 'object_id': object_id, 'msg': 'Copy TimeMark successfully.'} lun_info = {'virtualDeviceID': vdev_id} compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' conf = { 'lun.copy_snapshot_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._compress_snapshot_id = mock.Mock( return_value=compressed_snap_id) self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info) self.driver._wait_for_lun_or_snap_copy = mock.Mock() # simulates extra specs: {'storage_pool', 'StoragePool'} self.driver._get_violin_extra_spec = mock.Mock( return_value="StoragePool") result = self.driver._create_volume_from_snapshot(SNAPSHOT, dest_vol) self.assertIsNone(result) def test_create_volume_from_snapshot_fails(self): """Array returns error that the lun already exists.""" response = {'success': False, 'msg': 'Duplicate Virtual Device name. Error: 0x90010022'} compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' failure = exception.ViolinBackendErrExists conf = { 'lun.copy_snapshot_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._send_cmd = mock.Mock(return_value=response) self.driver._compress_snapshot_id = mock.Mock( return_value=compressed_snap_id) self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) self.assertRaises(failure, self.driver._create_volume_from_snapshot, SNAPSHOT, VOLUME) def test_create_lun_from_lun(self): """lun full clone to new volume completes successfully.""" object_id = '12345' response = {'success': True, 'object_id': object_id, 'msg': 'Copy Snapshot resource successfully'} conf = { 'lun.copy_lun_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._ensure_snapshot_resource_area = mock.Mock() self.driver._wait_for_lun_or_snap_copy = mock.Mock() result = self.driver._create_lun_from_lun(SRC_VOL, VOLUME) self.driver._ensure_snapshot_resource_area.assert_called_with( SRC_VOL['id']) self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with( source=SRC_VOL['id'], destination=VOLUME['id'], storage_pool=None) self.driver._wait_for_lun_or_snap_copy.assert_called_with( SRC_VOL['id'], dest_obj_id=object_id) self.assertIsNone(result) def test_create_lun_from_lun_on_a_storage_pool(self): """lun full clone to new volume completes successfully.""" dest_vol = VOLUME.copy() dest_vol['size'] = 100 dest_vol['volume_type_id'] = '1' object_id = '12345' response = {'success': True, 'object_id': object_id, 'msg': 'Copy Snapshot resource successfully'} conf = { 'lun.copy_lun_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._ensure_snapshot_resource_area = mock.Mock() self.driver._wait_for_lun_or_snap_copy = mock.Mock() # simulates extra specs: {'storage_pool', 'StoragePool'} self.driver._get_violin_extra_spec = mock.Mock( return_value="StoragePool") result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol) self.driver._ensure_snapshot_resource_area.assert_called_with( SRC_VOL['id']) self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with( source=SRC_VOL['id'], destination=dest_vol['id'], storage_pool="StoragePool") self.driver._wait_for_lun_or_snap_copy.assert_called_with( SRC_VOL['id'], dest_obj_id=object_id) self.assertIsNone(result) def test_create_lun_from_lun_fails(self): """lun full clone to new volume completes successfully.""" failure = exception.ViolinBackendErr response = {'success': False, 'msg': 'Snapshot Resource is not created ' 'for this virtual device. Error: 0x0901008c'} conf = { 'lun.copy_lun_to_new_lun.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._ensure_snapshot_resource_area = mock.Mock() self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) self.assertRaises(failure, self.driver._create_lun_from_lun, SRC_VOL, VOLUME) def test_send_cmd(self): """Command callback completes successfully.""" success_msg = 'success' request_args = ['arg1', 'arg2', 'arg3'] response = {'success': True, 'msg': 'Operation successful'} request_func = mock.Mock(return_value=response) result = self.driver._send_cmd(request_func, success_msg, request_args) self.assertEqual(response, result) def test_send_cmd_request_timed_out(self): """The callback retry timeout hits immediately.""" failure = exception.ViolinRequestRetryTimeout success_msg = 'success' request_args = ['arg1', 'arg2', 'arg3'] self.conf.violin_request_timeout = 0 request_func = mock.Mock() self.assertRaises(failure, self.driver._send_cmd, request_func, success_msg, request_args) def test_send_cmd_response_has_no_message(self): """The callback returns no message on the first call.""" success_msg = 'success' request_args = ['arg1', 'arg2', 'arg3'] response1 = {'success': True, 'msg': None} response2 = {'success': True, 'msg': 'success'} request_func = mock.Mock(side_effect=[response1, response2]) self.assertEqual(response2, self.driver._send_cmd (request_func, success_msg, request_args)) def test_check_error_code(self): """Return an exception for a valid error code.""" failure = exception.ViolinBackendErr response = {'success': False, 'msg': 'Error: 0x90000000'} self.assertRaises(failure, self.driver._check_error_code, response) def test_check_error_code_non_fatal_error(self): """Returns no exception for a non-fatal error code.""" response = {'success': False, 'msg': 'Error: 0x9001003c'} self.assertIsNone(self.driver._check_error_code(response)) def test_compress_snapshot_id(self): test_snap_id = "12345678-abcd-1234-cdef-0123456789ab" expected = "12345678abcd1234cdef0123456789ab" self.assertTrue(len(expected) == 32) result = self.driver._compress_snapshot_id(test_snap_id) self.assertTrue(result == expected) def test_ensure_snapshot_resource_area(self): result_dict = {'success': True, 'res': 'Successful'} self.driver.vmem_mg = self.setup_mock_concerto() snap = self.driver.vmem_mg.snapshot snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False) snap.create_snapshot_resource = mock.Mock(return_value=result_dict) with mock.patch('cinder.db.sqlalchemy.api.volume_get', return_value=VOLUME): result = self.driver._ensure_snapshot_resource_area(VOLUME_ID) self.assertIsNone(result) snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID) snap.create_snapshot_resource.assert_called_with( lun=VOLUME_ID, size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))), enable_notification=False, policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY, enable_expansion= v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, expansion_threshold= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, expansion_increment= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, expansion_max_size= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, storage_pool=None) def test_ensure_snapshot_resource_area_with_storage_pool(self): dest_vol = VOLUME.copy() dest_vol['size'] = 2 dest_vol['volume_type_id'] = '1' result_dict = {'success': True, 'res': 'Successful'} self.driver.vmem_mg = self.setup_mock_concerto() snap = self.driver.vmem_mg.snapshot snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False) snap.create_snapshot_resource = mock.Mock(return_value=result_dict) # simulates extra specs: {'storage_pool', 'StoragePool'} self.driver._get_violin_extra_spec = mock.Mock( return_value="StoragePool") with mock.patch('cinder.db.sqlalchemy.api.volume_get', return_value=dest_vol): result = self.driver._ensure_snapshot_resource_area(VOLUME_ID) self.assertIsNone(result) snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID) snap.create_snapshot_resource.assert_called_with( lun=VOLUME_ID, size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))), enable_notification=False, policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY, enable_expansion= v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, expansion_threshold= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, expansion_increment= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, expansion_max_size= v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, storage_pool="StoragePool") def test_ensure_snapshot_resource_policy(self): result_dict = {'success': True, 'res': 'Successful'} self.driver.vmem_mg = self.setup_mock_concerto() snap = self.driver.vmem_mg.snapshot snap.lun_has_a_snapshot_policy = mock.Mock(return_value=False) snap.create_snapshot_policy = mock.Mock(return_value=result_dict) result = self.driver._ensure_snapshot_policy(VOLUME_ID) self.assertIsNone(result) snap.lun_has_a_snapshot_policy.assert_called_with(lun=VOLUME_ID) snap.create_snapshot_policy.assert_called_with( lun=VOLUME_ID, max_snapshots=v7000_common.CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS, enable_replication=False, enable_snapshot_schedule=False, enable_cdp=False, retention_mode=v7000_common.CONCERTO_DEFAULT_POLICY_RETENTION_MODE) def test_delete_lun_snapshot_bookkeeping(self): result_dict = {'success': True, 'res': 'Successful'} self.driver.vmem_mg = self.setup_mock_concerto() snap = self.driver.vmem_mg.snapshot snap.get_snapshots = mock.Mock( return_value=[], side_effect=vmemclient.core.error.NoMatchingObjectIdError) snap.delete_snapshot_policy = mock.Mock(return_value=result_dict) snap.delete_snapshot_resource = mock.Mock() result = self.driver._delete_lun_snapshot_bookkeeping( volume_id=VOLUME_ID) self.assertIsNone(result) snap.get_snapshots.assert_called_with(VOLUME_ID) snap.delete_snapshot_policy.assert_called_with(lun=VOLUME_ID) snap.delete_snapshot_resource.assert_called_with(lun=VOLUME_ID) def test_create_lun_snapshot(self): response = {'success': True, 'msg': 'Create TimeMark successfully'} self.driver.vmem_mg = self.setup_mock_concerto() self.driver._ensure_snapshot_resource_area = ( mock.Mock(return_value=True)) self.driver._ensure_snapshot_policy = mock.Mock(return_value=True) self.driver._send_cmd = mock.Mock(return_value=response) with mock.patch('cinder.db.sqlalchemy.api.volume_get', return_value=VOLUME): result = self.driver._create_lun_snapshot(SNAPSHOT) self.assertIsNone(result) self.driver._ensure_snapshot_resource_area.assert_called_with( VOLUME_ID) self.driver._ensure_snapshot_policy.assert_called_with(VOLUME_ID) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.snapshot.create_lun_snapshot, 'Create TimeMark successfully', lun=VOLUME_ID, comment=self.driver._compress_snapshot_id(SNAPSHOT_ID), priority=v7000_common.CONCERTO_DEFAULT_PRIORITY, enable_notification=False) def test_delete_lun_snapshot(self): response = {'success': True, 'msg': 'Delete TimeMark successfully'} compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' self.driver.vmem_mg = self.setup_mock_concerto() self.driver._send_cmd = mock.Mock(return_value=response) self.driver._compress_snapshot_id = mock.Mock( return_value=compressed_snap_id) self.assertIsNone(self.driver._delete_lun_snapshot(SNAPSHOT)) self.driver._send_cmd.assert_called_with( self.driver.vmem_mg.snapshot.delete_lun_snapshot, 'Delete TimeMark successfully', lun=VOLUME_ID, comment=compressed_snap_id) def test_wait_for_lun_or_snap_copy_completes_for_snap(self): """waiting for a snapshot to copy succeeds.""" vdev_id = 11111 response = (vdev_id, None, 100) conf = { 'snapshot.get_snapshot_copy_status.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) result = self.driver._wait_for_lun_or_snap_copy( SRC_VOL['id'], dest_vdev_id=vdev_id) (self.driver.vmem_mg.snapshot.get_snapshot_copy_status. assert_called_with(SRC_VOL['id'])) self.assertTrue(result) def test_wait_for_lun_or_snap_copy_completes_for_lun(self): """waiting for a lun to copy succeeds.""" object_id = '12345' response = (object_id, None, 100) conf = { 'lun.get_lun_copy_status.return_value': response, } self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) result = self.driver._wait_for_lun_or_snap_copy( SRC_VOL['id'], dest_obj_id=object_id) self.driver.vmem_mg.lun.get_lun_copy_status.assert_called_with( SRC_VOL['id']) self.assertTrue(result) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(volume_types, 'get_volume_type') def test_get_volume_type_extra_spec(self, m_get_volume_type, m_get_admin_context): """Volume_type extra specs are found successfully.""" vol = VOLUME.copy() vol['volume_type_id'] = 1 volume_type = {'extra_specs': {'override:test_key': 'test_value'}} m_get_admin_context.return_value = None m_get_volume_type.return_value = volume_type result = self.driver._get_volume_type_extra_spec(vol, 'test_key') m_get_admin_context.assert_called_with() m_get_volume_type.assert_called_with(None, vol['volume_type_id']) self.assertEqual('test_value', result) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(volume_types, 'get_volume_type') def test_get_violin_extra_spec(self, m_get_volume_type, m_get_admin_context): """Volume_type extra specs are found successfully.""" vol = VOLUME.copy() vol['volume_type_id'] = 1 volume_type = {'extra_specs': {'violin:test_key': 'test_value'}} m_get_admin_context.return_value = None m_get_volume_type.return_value = volume_type result = self.driver._get_volume_type_extra_spec(vol, 'test_key') m_get_admin_context.assert_called_with() m_get_volume_type.assert_called_with(None, vol['volume_type_id']) self.assertEqual('test_value', result) cinder-8.0.0/cinder/tests/unit/test_huawei_drivers.py0000664000567000056710000044040712701406250024150 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for huawei drivers.""" import copy import ddt import json import mock import re import tempfile import time from xml.dom import minidom from oslo_log import log as logging from cinder import exception from cinder import test from cinder.tests.unit import utils from cinder.volume import configuration as conf from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_driver from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx LOG = logging.getLogger(__name__) hypermetro_devices = """{ "remote_device": { "RestURL": "http://192.0.2.69:8082/deviceManager/rest", "UserName": "admin", "UserPassword": "Admin@storage1", "StoragePool": "StoragePool001", "domain_name": "hypermetro-domain", "remote_target_ip": "192.0.2.241" } } """ test_volume = { 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu001@backend001#OpenStack_Pool', 'provider_location': '11', 'status': 'available', 'admin_metadata': {'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}, } fake_smartx_value = {'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': False, 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test', } fake_hypermetro_opts = {'hypermetro': 'true', 'smarttier': False, 'smartcache': False, 'smartpartition': False, 'thin_provisioning_support': False, 'thick_provisioning_support': False, } hyper_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu@huawei#OpenStack_Pool', 'provider_location': '11', 'volume_metadata': [{'key': 'hypermetro_id', 'value': '1'}, {'key': 'remote_lun_id', 'value': '11'}], 'admin_metadata': {}, } sync_replica_specs = {'replication_enabled': ' True', 'replication_type': ' sync'} async_replica_specs = {'replication_enabled': ' True', 'replication_type': ' async'} TEST_PAIR_ID = "3400a30d844d0004" replication_volume = { 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu@huawei#OpenStack_Pool', 'provider_location': '11', 'admin_metadata': {'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}, 'replication_status': 'disabled', 'replication_driver_data': '{"pair_id": "%s", "rmt_lun_id": "1"}' % TEST_PAIR_ID, } test_snap = { 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 1, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'provider_location': '11', 'volume': {'provider_location': '12', 'admin_metadata': { 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}}, } test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool', 'capabilities': {'smartcache': True, 'location_info': '210235G7J20000000000', 'QoS_support': True, 'pool_name': 'OpenStack_Pool', 'timestamp': '2015-07-13T11:41:00.513549', 'smartpartition': True, 'allocated_capacity_gb': 0, 'volume_backend_name': 'HuaweiFCDriver', 'free_capacity_gb': 20.0, 'driver_version': '1.1.0', 'total_capacity_gb': 20.0, 'smarttier': True, 'hypermetro': True, 'reserved_percentage': 0, 'vendor_name': None, 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'storage_protocol': 'FC', } } test_new_type = { 'name': u'new_type', 'qos_specs_id': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'extra_specs': { 'smarttier': ' true', 'smartcache': ' true', 'smartpartition': ' true', 'thin_provisioning_support': ' true', 'thick_provisioning_support': ' False', 'policy': '2', 'smartcache:cachename': 'cache-test', 'smartpartition:partitionname': 'partition-test', }, 'is_public': True, 'deleted_at': None, 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', 'description': None, } test_new_replication_type = { 'name': u'new_type', 'qos_specs_id': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'extra_specs': { 'replication_enabled': ' True', 'replication_type': ' sync', }, 'is_public': True, 'deleted_at': None, 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', 'description': None, } hypermetro_devices = """ { "remote_device": { "RestURL": "http://192.0.2.69:8082/deviceManager/rest", "UserName":"admin", "UserPassword":"Admin@storage2", "StoragePool":"StoragePool001", "domain_name":"hypermetro_test"} } """ FAKE_FIND_POOL_RESPONSE = {'CAPACITY': '985661440', 'ID': '0', 'TOTALCAPACITY': '985661440'} FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "WWN": '6643e8c1004c5f6723e9f454003'} FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'wwpns': ['10000090fa0d6754'], 'wwnns': ['10000090fa0d6755'], 'host': 'ubuntuc', } smarttier_opts = {'smarttier': 'true', 'smartpartition': False, 'smartcache': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'policy': '3', 'readcachepolicy': '1', 'writecachepolicy': None, } fake_fabric_mapping = { 'swd1': { 'target_port_wwn_list': ['2000643e8c4c5f66'], 'initiator_port_wwn_list': ['10000090fa0d6754'] } } CHANGE_OPTS = {'policy': ('1', '2'), 'partitionid': (['1', 'partition001'], ['2', 'partition002']), 'cacheid': (['1', 'cache001'], ['2', 'cache002']), 'qos': (['11', {'MAXIOPS': '100', 'IOType': '1'}], {'MAXIOPS': '100', 'IOType': '2', 'MIN': 1, 'LATENCY': 1}), 'host': ('ubuntu@huawei#OpenStack_Pool', 'ubuntu@huawei#OpenStack_Pool'), 'LUNType': ('0', '1'), } # A fake response of create a host FAKE_CREATE_HOST_RESPONSE = """ { "error": { "code": 0 }, "data":{"NAME": "ubuntuc001", "ID": "1"} } """ # A fake response of success response storage FAKE_COMMON_SUCCESS_RESPONSE = """ { "error": { "code": 0 }, "data":{} } """ # A fake response of login huawei storage FAKE_GET_LOGIN_STORAGE_RESPONSE = """ { "error": { "code": 0 }, "data": { "username": "admin", "iBaseToken": "2001031430", "deviceid": "210235G7J20000000000" } } """ # A fake response of login out huawei storage FAKE_LOGIN_OUT_STORAGE_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": 11 } } """ # A fake response of mock storage pool info FAKE_STORAGE_POOL_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "USERFREECAPACITY": "985661440", "ID": "0", "NAME": "OpenStack_Pool", "USERTOTALCAPACITY": "985661440" }] } """ # A fake response of lun or lungroup response FAKE_LUN_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": "1", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "WWN": "6643e8c1004c5f6723e9f454003", "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "27", "ALLOCTYPE": "1", "CAPACITY": "2097152" } } """ FAKE_LUN_GET_SUCCESS_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": "11", "IOCLASSID": "11", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", "RUNNINGSTATUS": "10", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "27", "LUNLIST": "", "ALLOCTYPE": "1", "CAPACITY": "2097152", "WRITEPOLICY": "1", "MIRRORPOLICY": "0", "PREFETCHPOLICY": "1", "PREFETCHVALUE": "20", "DATATRANSFERPOLICY": "1", "READCACHEPOLICY": "2", "WRITECACHEPOLICY": "5", "OWNINGCONTROLLER": "0B", "SMARTCACHEPARTITIONID": "", "CACHEPARTITIONID": "", "WWN": "6643e8c1004c5f6723e9f454003", "PARENTNAME": "OpenStack_Pool" } } """ FAKE_QUERY_ALL_LUN_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "ID": "1", "NAME": "IexzQZJWSXuX2e9I7c8GNQ" }] } """ FAKE_LUN_ASSOCIATE_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":"11" }] } """ FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """ { "error": { "code":0 }, "data":[{ "NAME":"OpenStack_LunGroup_1", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 }] } """ FAKE_QUERY_LUN_GROUP_RESPONSE = """ { "error": { "code":0 }, "data":{ "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 } } """ FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """ { "error":{ "code":0 }, "data":{ "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 } } """ FAKE_LUN_COUNT_RESPONSE = """ { "data":{ "COUNT":"0" }, "error":{ "code":0, "description":"0" } } """ # A fake response of snapshot list response FAKE_SNAPSHOT_LIST_INFO_RESPONSE = """ { "error": { "code": 0, "description": "0" }, "data": [{ "ID": 11, "NAME": "wr_LMKAjS7O_VtsEIREGYw" }, { "ID": 12, "NAME": "SDFAJSDFLKJ" }, { "ID": 13, "NAME": "s1Ew5v36To-hR2txJitX5Q" }] } """ # A fake response of create snapshot response FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": 11, "NAME": "YheUoRwbSX2BxN7" } } """ # A fake response of get snapshot response FAKE_GET_SNAPSHOT_INFO_RESPONSE = """ { "error": { "code": 0, "description": "0" }, "data": { "ID": 11, "NAME": "YheUoRwbSX2BxN7" } } """ # A fake response of get iscsi response FAKE_GET_ISCSI_INFO_RESPONSE = """ { "data": [{ "ETHPORTID": "139267", "ID": "iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.244", "TPGT": "8196", "TYPE": 249 }, { "ETHPORTID": "139268", "ID": "iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.244", "TPGT": "8196", "TYPE": 249 } ], "error": { "code": 0, "description": "0" } } """ # A fake response of get eth info response FAKE_GET_ETH_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "PARENTTYPE": 209, "MACADDRESS": "00:22:a1:0a:79:57", "ETHNEGOTIATE": "-1", "ERRORPACKETS": "0", "IPV4ADDR": "192.0.2.2", "IPV6GATEWAY": "", "IPV6MASK": "0", "OVERFLOWEDPACKETS": "0", "ISCSINAME": "P0", "HEALTHSTATUS": "1", "ETHDUPLEX": "2", "ID": "16909568", "LOSTPACKETS": "0", "TYPE": 213, "NAME": "P0", "INIORTGT": "4", "RUNNINGSTATUS": "10", "IPV4GATEWAY": "", "BONDNAME": "", "STARTTIME": "1371684218", "SPEED": "1000", "ISCSITCPPORT": "0", "IPV4MASK": "255.255.0.0", "IPV6ADDR": "", "LOGICTYPE": "0", "LOCATION": "ENG0.A5.P0", "MTU": "1500", "PARENTID": "1.5" }, { "PARENTTYPE": 209, "MACADDRESS": "00:22:a1:0a:79:57", "ETHNEGOTIATE": "-1", "ERRORPACKETS": "0", "IPV4ADDR": "192.0.2.1", "IPV6GATEWAY": "", "IPV6MASK": "0", "OVERFLOWEDPACKETS": "0", "ISCSINAME": "P0", "HEALTHSTATUS": "1", "ETHDUPLEX": "2", "ID": "16909568", "LOSTPACKETS": "0", "TYPE": 213, "NAME": "P0", "INIORTGT": "4", "RUNNINGSTATUS": "10", "IPV4GATEWAY": "", "BONDNAME": "", "STARTTIME": "1371684218", "SPEED": "1000", "ISCSITCPPORT": "0", "IPV4MASK": "255.255.0.0", "IPV6ADDR": "", "LOGICTYPE": "0", "LOCATION": "ENG0.A5.P3", "MTU": "1500", "PARENTID": "1.5" }] } """ FAKE_GET_ETH_ASSOCIATE_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "IPV4ADDR": "192.0.2.1", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10" }, { "IPV4ADDR": "192.0.2.2", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10" } ] } """ # A fake response of get iscsi device info response FAKE_GET_ISCSI_DEVICE_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:" }] } """ # A fake response of get iscsi device info response FAKE_GET_ALL_HOST_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "PARENTTYPE": 245, "NAME": "ubuntuc", "DESCRIPTION": "", "RUNNINGSTATUS": "1", "IP": "", "PARENTNAME": "", "OPERATIONSYSTEM": "0", "LOCATION": "", "HEALTHSTATUS": "1", "MODEL": "", "ID": "1", "PARENTID": "", "NETWORKNAME": "", "TYPE": 21 }, { "PARENTTYPE": 245, "NAME": "ubuntu", "DESCRIPTION": "", "RUNNINGSTATUS": "1", "IP": "", "PARENTNAME": "", "OPERATIONSYSTEM": "0", "LOCATION": "", "HEALTHSTATUS": "1", "MODEL": "", "ID": "2", "PARENTID": "", "NETWORKNAME": "", "TYPE": 21 }] } """ # A fake response of get host or hostgroup info response FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "NAME":"ubuntuc", "DESCRIPTION":"", "ID":"0", "TYPE":14 }, {"NAME":"OpenStack_HostGroup_1", "DESCRIPTION":"", "ID":"0", "TYPE":14 } ] } """ FAKE_GET_HOST_GROUP_INFO_RESPONSE = """ { "error": { "code": 0 }, "data":{ "NAME":"ubuntuc", "DESCRIPTION":"", "ID":"0", "TYPE":14 } } """ # A fake response of lun copy info response FAKE_GET_LUN_COPY_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "COPYSTOPTIME": "-1", "HEALTHSTATUS": "1", "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", "RUNNINGSTATUS": "36", "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", "ID": "0", "LUNCOPYTYPE": "1", "COPYPROGRESS": "0", "COPYSPEED": "2", "TYPE": 219, "COPYSTARTTIME": "-1" } } """ # A fake response of lun copy list info response FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "COPYSTOPTIME": "1372209335", "HEALTHSTATUS": "1", "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", "RUNNINGSTATUS": "40", "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", "ID": "0", "LUNCOPYTYPE": "1", "COPYPROGRESS": "100", "COPYSPEED": "2", "TYPE": 219, "COPYSTARTTIME": "1372209329" }] } """ # A fake response of mappingview info response FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"OpenStack_Mapping_View_1", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"1", "INBANDLUNWWN":"", "TYPE":245 }, { "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"YheUoRwbSX2BxN767nvLSw", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"2", "INBANDLUNWWN": "", "TYPE": 245 }] } """ FAKE_GET_MAPPING_VIEW_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"11", "INBANDLUNWWN":"", "TYPE": 245, "AVAILABLEHOSTLUNIDLIST": "" }] } """ FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE = """ { "error":{ "code":0 }, "data":{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"1", "INBANDLUNWWN":"", "TYPE":245, "AVAILABLEHOSTLUNIDLIST": "[1]" } } """ FAKE_FC_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "HEALTHSTATUS":"1", "NAME":"", "MULTIPATHTYPE":"1", "ISFREE":"true", "RUNNINGSTATUS":"27", "ID":"10000090fa0d6754", "OPERATIONSYSTEM":"255", "TYPE":223 }, { "HEALTHSTATUS":"1", "NAME":"", "MULTIPATHTYPE":"1", "ISFREE":"true", "RUNNINGSTATUS":"27", "ID":"10000090fa0d6755", "OPERATIONSYSTEM":"255", "TYPE":223 }] } """ FAKE_ISCSI_INITIATOR_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "CHAPNAME":"mm-user", "HEALTHSTATUS":"1", "ID":"iqn.1993-08.org.debian:01:9073aba6c6f", "ISFREE":"true", "MULTIPATHTYPE":"1", "NAME":"", "OPERATIONSYSTEM":"255", "RUNNINGSTATUS":"28", "TYPE":222, "USECHAP":"true" }] } """ FAKE_HOST_LINK_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "PARENTTYPE":21, "TARGET_ID":"0000000000000000", "INITIATOR_NODE_WWN":"20000090fa0d6754", "INITIATOR_TYPE":"223", "RUNNINGSTATUS":"27", "PARENTNAME":"ubuntuc", "INITIATOR_ID":"10000090fa0d6754", "TARGET_PORT_WWN":"24000022a10a2a39", "HEALTHSTATUS":"1", "INITIATOR_PORT_WWN":"10000090fa0d6754", "ID":"010000090fa0d675-0000000000110400", "TARGET_NODE_WWN":"21000022a10a2a39", "PARENTID":"1", "CTRL_ID":"0", "TYPE":255, "TARGET_TYPE":"212" }] } """ FAKE_PORT_GROUP_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":11, "NAME": "portgroup-test" }] } """ FAKE_ERROR_INFO_RESPONSE = """ { "error":{ "code":31755596 } } """ FAKE_ERROR_CONNECT_RESPONSE = """ { "error":{ "code":-403 } } """ FAKE_ERROR_LUN_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":{ "ID":"11", "IOCLASSID":"11", "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "ALLOCTYPE": "0", "DATATRANSFERPOLICY": "0", "SMARTCACHEPARTITIONID": "0", "CACHEPARTITIONID": "0" } } """ FAKE_GET_FC_INI_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":"10000090fa0d6754", "ISFREE":"true" }] } """ FAKE_SYSTEM_VERSION_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "PRODUCTVERSION": "V100R001C10", "wwn": "21003400a30d844d" } } """ FAKE_GET_LUN_MIGRATION_RESPONSE = """ { "data":[{"ENDTIME":"1436816174", "ID":"9", "PARENTID":"11", "PARENTNAME":"xmRBHMlVRruql5vwthpPXQ", "PROCESS":"-1", "RUNNINGSTATUS":"76", "SPEED":"2", "STARTTIME":"1436816111", "TARGETLUNID":"1", "TARGETLUNNAME":"4924891454902893639", "TYPE":253, "WORKMODE":"0" }], "error":{"code":0, "description":"0"} } """ FAKE_HYPERMETRODOMAIN_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "42" } } """ FAKE_HYPERMETRO_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "1", "HEALTHSTATUS": "1" } } """ FAKE_QOS_INFO_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "ID": "11" } } """ FAKE_GET_FC_PORT_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "RUNNINGSTATUS":"10", "WWN":"2000643e8c4c5f66", "PARENTID":"0A.1", "ID": "1114368", "RUNSPEED": "16000" }, { "RUNNINGSTATUS":"10", "WWN":"2009643e8c4c5f67", "PARENTID":"0A.1", "ID": "1114369", "RUNSPEED": "16000" }] } """ FAKE_SMARTCACHEPARTITION_RESPONSE = """ { "error":{ "code":0 }, "data":{ "ID":"11", "NAME":"cache-name" } } """ FAKE_CONNECT_FC_RESPONCE = { "driver_volume_type": 'fibre_channel', "data": { "target_wwn": ["10000090fa0d6754"], "target_lun": "1", "volume_id": "21ec7341-9256-497b-97d9-ef48edcf0635" } } FAKE_METRO_INFO_RESPONCE = { "error": { "code": 0 }, "data": { "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "42" } } # mock login info map MAP_COMMAND_TO_FAKE_RESPONSE = {} MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = ( FAKE_GET_LOGIN_STORAGE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/sessions'] = ( FAKE_LOGIN_OUT_STORAGE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/POST'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION?range=[0-256]/GET'] = ( FAKE_GET_LUN_MIGRATION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock storage info map MAP_COMMAND_TO_FAKE_RESPONSE['/storagepool'] = ( FAKE_STORAGE_POOL_RESPONSE) # mock lun info map MAP_COMMAND_TO_FAKE_RESPONSE['/lun'] = ( FAKE_LUN_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun?range=[0-65535]/GET'] = ( FAKE_QUERY_ALL_LUN_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=12/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21' '&ASSOCIATEOBJID=0/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1' '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11' '/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = ( FAKE_QUERY_LUN_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup'] = ( FAKE_QUERY_LUN_GROUP_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate'] = ( FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNGroup/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_COUNT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = ( FAKE_LUN_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=12/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock snapshot info map MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot'] = ( FAKE_CREATE_SNAPSHOT_INFO_RESPONSE) # mock snapshot info map MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/GET'] = ( FAKE_GET_SNAPSHOT_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/activate'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/stop/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?range=[0-32767]/GET'] = ( FAKE_SNAPSHOT_LIST_INFO_RESPONSE) # mock QoS info map MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/active/11/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/'] = ( FAKE_QOS_INFO_RESPONSE) # mock iscsi info map MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_tgt_port/GET'] = ( FAKE_GET_ISCSI_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/GET'] = ( FAKE_GET_ETH_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE' '=257&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_ETH_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsidevicename'] = ( FAKE_GET_ISCSI_DEVICE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?range=[0-256]/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/POST'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/PUT'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/remove_iscsi_from_host/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/' 'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) # mock host info map MAP_COMMAND_TO_FAKE_RESPONSE['/host?range=[0-65535]/GET'] = ( FAKE_GET_ALL_HOST_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host'] = ( FAKE_CREATE_HOST_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup?range=[0-8191]/GET'] = ( FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup'] = ( FAKE_GET_HOST_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' '&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1' '/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' '&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/0/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/associate'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock copy info map MAP_COMMAND_TO_FAKE_RESPONSE['/luncopy'] = ( FAKE_GET_LUN_COPY_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY?range=[0-1023]/GET'] = ( FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/start/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/0/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock mapping view info map MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview?range=[0-8191]/GET'] = ( FAKE_GET_MAPPING_VIEW_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/PUT'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/1/GET'] = ( FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/lungroup?TYPE=256&' 'ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate?ASSOCIATEOBJTYPE=245&' 'ASSOCIATEOBJID=1&range=[0-8191]/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock FC info map MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' 'range=[0-8191]/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock FC info map MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' 'range=[0-8191]/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host_link?INITIATOR_TYPE=223' '&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = ( FAKE_HOST_LINK_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup?range=[0-8191]&TYPE=257/GET'] = ( FAKE_PORT_GROUP_RESPONSE) # mock system info map MAP_COMMAND_TO_FAKE_RESPONSE['/system//GET'] = ( FAKE_SYSTEM_VERSION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]/GET'] = ( FAKE_GET_FC_INI_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?range=[0-256]/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition/POST'] = ( FAKE_SYSTEM_VERSION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/0/GET'] = ( FAKE_SMARTCACHEPARTITION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/REMOVE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/cachepartition/0/GET'] = ( FAKE_SMARTCACHEPARTITION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroDomain?range=[0-32]/GET'] = ( FAKE_HYPERMETRODOMAIN_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/POST'] = ( FAKE_HYPERMETRODOMAIN_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/GET'] = ( FAKE_HYPERMETRODOMAIN_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/disable_hcpair/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/1/GET'] = ( FAKE_HYPERMETRO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair?range=[0-65535]/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror?range=[0-512]/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_GET_PORTG_BY_VIEW = """ { "data": [{ "DESCRIPTION": "Please do NOT modify this. Engine ID: 0", "ID": "0", "NAME": "OpenStack_PortGroup_1", "TYPE": 257 }], "error": { "code": 0 } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/mappingview?TYPE=257&AS' 'SOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( FAKE_GET_PORTG_BY_VIEW) FAKE_GET_PORT_BY_PORTG = """ { "data":[{ "CONFSPEED":"0","FCCONFMODE":"3", "FCRUNMODE":"0","HEALTHSTATUS":"1","ID":"2000643e8c4c5f66", "MAXSUPPORTSPEED":"16000","NAME":"P0","PARENTID":"0B.1", "PARENTTYPE":209,"RUNNINGSTATUS":"10","RUNSPEED":"8000", "WWN":"2000643e8c4c5f66" }], "error":{ "code":0,"description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate/portgroup?TYPE=212&ASSOCI' 'ATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_PORT_BY_PORTG) FAKE_GET_PORTG = """ { "data": { "TYPE": 257, "NAME": "OpenStack_PortGroup_1", "DESCRIPTION": "Please DO NOT change thefollowing message: 0", "ID": "0" }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/GET'] = FAKE_GET_PORTG MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/PUT'] = FAKE_GET_PORTG MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup/POST'] = ( FAKE_GET_PORT_BY_PORTG) MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup?ID=0&TYPE=257&ASSOCIA' 'TEOBJTYPE=212&ASSOCIATEOBJID=2000643e8c4c5f66/DE' 'LETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_CREATE_PORTG = """ { "data": { "DESCRIPTION": "Please DO NOT change the following message: 0", "ID": "0", "NAME": "OpenStack_PortGroup_1", "TYPE": 257 }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/POST'] = FAKE_CREATE_PORTG FAKE_GET_PORTG_FROM_PORT = """ { "data": [{ "TYPE": 257, "NAME": "OpenStack_PortGroup_1", "DESCRIPTION": "PleaseDONOTchangethefollowingmessage: 0", "ID": "0" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA' 'TEOBJTYPE=212&ASSOCIATEOBJID=1114368/GET'] = ( FAKE_GET_PORTG_FROM_PORT) FAKE_GET_VIEW_BY_PORTG = """ { "data": [{ "ASSOCIATEOBJID": "0", "COUNT": "0", "ASSOCIATEOBJTYPE": "0", "INBANDLUNWWN": "", "FORFILESYSTEM": "false", "ID": "2", "ENABLEINBANDCOMMAND": "false", "NAME": "OpenStack_Mapping_View_1", "WORKMODE": "0", "TYPE": 245, "HOSTLUNID": "0", "DESCRIPTION": "" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASS' 'OCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_VIEW_BY_PORTG) FAKE_GET_LUNG_BY_VIEW = """ { "data": [{ "TYPE": 256, "NAME": "OpenStack_LunGroup_1", "DESCRIPTION": "OpenStack_LunGroup_1", "ID": "1" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/mappingview?TYPE=256&ASSO' 'CIATEOBJTYPE=245&ASSOCIATEOBJID=2/GET'] = ( FAKE_GET_LUNG_BY_VIEW) FAKE_LUN_COUNT_RESPONSE_1 = """ { "data":{ "COUNT":"2" }, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOB' 'JTYPE=256&ASSOCIATEOBJID=1/GET'] = ( FAKE_LUN_COUNT_RESPONSE_1) FAKE_PORTS_IN_PG_RESPONSE = """ { "data": [{ "ID": "1114114", "WWN": "2002643e8c4c5f66" }, { "ID": "1114113", "WWN": "2001643e8c4c5f66" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=' '257&ASSOCIATEOBJID=0/GET'] = ( FAKE_PORTS_IN_PG_RESPONSE) # Replication response FAKE_GET_REMOTEDEV_RESPONSE = """ { "data":[{ "ARRAYTYPE":"1", "HEALTHSTATUS":"1", "ID":"0", "NAME":"Huawei.Storage", "RUNNINGSTATUS":"1", "WWN":"21003400a30d844d" }], "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/remote_device/GET'] = ( FAKE_GET_REMOTEDEV_RESPONSE) FAKE_CREATE_PAIR_RESPONSE = """ { "data":{ "ID":"%s" }, "error":{ "code":0, "description":"0" } } """ % TEST_PAIR_ID MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/POST'] = ( FAKE_CREATE_PAIR_RESPONSE) FAKE_DELETE_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/DELETE' % TEST_PAIR_ID] = ( FAKE_DELETE_PAIR_RESPONSE) FAKE_SET_PAIR_ACCESS_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/PUT' % TEST_PAIR_ID] = ( FAKE_SET_PAIR_ACCESS_RESPONSE) FAKE_GET_PAIR_NORMAL_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "1", "SECRESACCESS": "2", "HEALTHSTATUS": "1", "ISPRIMARY": "true" }, "error":{ "code":0, "description":"0" } } """ FAKE_GET_PAIR_SPLIT_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "26", "SECRESACCESS": "2", "ISPRIMARY": "true" }, "error":{ "code":0, "description":"0" } } """ FAKE_GET_PAIR_SYNC_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "23", "SECRESACCESS": "2" }, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/GET' % TEST_PAIR_ID] = ( FAKE_GET_PAIR_NORMAL_RESPONSE) FAKE_SYNC_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/sync/PUT'] = ( FAKE_SYNC_PAIR_RESPONSE) FAKE_SPLIT_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/split/PUT'] = ( FAKE_SPLIT_PAIR_RESPONSE) FAKE_SWITCH_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/switch/PUT'] = ( FAKE_SWITCH_PAIR_RESPONSE) def Fake_sleep(time): pass REPLICA_BACKEND_ID = 'huawei-replica-1' class FakeHuaweiConf(huawei_conf.HuaweiConf): def __init__(self, conf, protocol): self.conf = conf self.protocol = protocol def safe_get(self, key): try: return getattr(self.conf, key) except Exception: return def update_config_value(self): setattr(self.conf, 'volume_backend_name', 'huawei_storage') setattr(self.conf, 'san_address', ['http://192.0.2.69:8082/deviceManager/rest/']) setattr(self.conf, 'san_user', 'admin') setattr(self.conf, 'san_password', 'Admin@storage') setattr(self.conf, 'san_product', 'V3') setattr(self.conf, 'san_protocol', self.protocol) setattr(self.conf, 'lun_type', constants.THICK_LUNTYPE) setattr(self.conf, 'lun_ready_wait_interval', 2) setattr(self.conf, 'lun_copy_wait_interval', 2) setattr(self.conf, 'lun_timeout', 43200) setattr(self.conf, 'lun_write_type', '1') setattr(self.conf, 'lun_mirror_switch', '1') setattr(self.conf, 'lun_prefetch_type', '1') setattr(self.conf, 'lun_prefetch_value', '0') setattr(self.conf, 'lun_policy', '0') setattr(self.conf, 'lun_read_cache_policy', '2') setattr(self.conf, 'lun_write_cache_policy', '5') setattr(self.conf, 'storage_pools', ['OpenStack_Pool']) setattr(self.conf, 'iscsi_default_target_ip', ['192.0.2.68']) setattr(self.conf, 'metro_san_address', ['https://192.0.2.240:8088/deviceManager/rest/']) setattr(self.conf, 'metro_storage_pools', 'StoragePool001') setattr(self.conf, 'metro_san_user', 'admin') setattr(self.conf, 'metro_san_password', 'Admin@storage1') setattr(self.conf, 'metro_domain_name', 'hypermetro_test') iscsi_info = {'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'TargetIP': '192.0.2.2', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', 'TargetPortGroup': 'portgroup-test', } setattr(self.conf, 'iscsi_info', [iscsi_info]) targets = [{'backend_id': REPLICA_BACKEND_ID, 'storage_pool': 'OpenStack_Pool', 'san_address': 'https://192.0.2.69:8088/deviceManager/rest/', 'san_user': 'admin', 'san_password': 'Admin@storage1'}] setattr(self.conf, 'replication_device', targets) setattr(self.conf, 'safe_get', self.safe_get) class FakeClient(rest_client.RestClient): def __init__(self, configuration): san_address = configuration.san_address san_user = configuration.san_user san_password = configuration.san_password rest_client.RestClient.__init__(self, configuration, san_address, san_user, san_password) self.test_fail = False self.test_multi_url_flag = False self.cache_not_exist = False self.partition_not_exist = False def _get_snapshotid_by_name(self, snapshot_name): return "11" def _check_snapshot_exist(self, snapshot_id): return True def get_partition_id_by_name(self, name): if self.partition_not_exist: return None return "11" def get_cache_id_by_name(self, name): if self.cache_not_exist: return None return "11" def add_lun_to_cache(self, lunid, cache_id): pass def do_call(self, url=False, data=None, method=None, calltimeout=4): url = url.replace('http://192.0.2.69:8082/deviceManager/rest', '') command = url.replace('/210235G7J20000000000/', '') data = json.dumps(data) if data else None if method: command = command + "/" + method for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys(): if command == item: data = MAP_COMMAND_TO_FAKE_RESPONSE[item] if self.test_fail: data = FAKE_ERROR_INFO_RESPONSE if command == 'lun/11/GET': data = FAKE_ERROR_LUN_INFO_RESPONSE self.test_fail = False if self.test_multi_url_flag: data = FAKE_ERROR_CONNECT_RESPONSE self.test_multi_url_flag = False return json.loads(data) class FakeReplicaPairManager(replication.ReplicaPairManager): def _init_rmt_client(self): self.rmt_client = FakeClient(self.conf) class FakeISCSIStorage(huawei_driver.HuaweiISCSIDriver): """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" def __init__(self, configuration): self.configuration = configuration self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') self.active_backend_id = None self.replica = None def do_setup(self): self.metro_flag = True self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() self.client = FakeClient(configuration=self.configuration) self.rmt_client = FakeClient(configuration=self.configuration) self.replica_client = FakeClient(configuration=self.configuration) self.metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) self.replica = FakeReplicaPairManager(self.client, self.replica_client, self.configuration) class FakeFCStorage(huawei_driver.HuaweiFCDriver): """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" def __init__(self, configuration): self.configuration = configuration self.fcsan = None self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') self.active_backend_id = None self.replica = None def do_setup(self): self.metro_flag = True self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() self.client = FakeClient(configuration=self.configuration) self.rmt_client = FakeClient(configuration=self.configuration) self.replica_client = FakeClient(configuration=self.configuration) self.metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) self.replica = FakeReplicaPairManager(self.client, self.replica_client, self.configuration) @ddt.ddt class HuaweiISCSIDriverTestCase(test.TestCase): def setUp(self): super(HuaweiISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hypermetro_devices = hypermetro_devices self.stubs.Set(time, 'sleep', Fake_sleep) self.driver = FakeISCSIStorage(configuration=self.configuration) self.driver.do_setup() self.portgroup = 'portgroup-test' self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:' ':20503:192.0.2.1', 'iqn.2006-08.com.huawei:oceanstor:21000022a:' ':20500:192.0.2.2'] self.target_ips = ['192.0.2.1', '192.0.2.2'] self.portgroup_id = 11 self.driver.client.login() def test_login_success(self): device_id = self.driver.client.login() self.assertEqual('210235G7J20000000000', device_id) def test_check_volume_exist_on_array(self): test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu001@backend001#OpenStack_Pool', 'provider_location': None, } self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', mock.Mock(return_value=None)) self.driver._check_volume_exist_on_array( test_volume, constants.VOLUME_NOT_EXISTS_WARN) def test_create_volume_success(self): # Have pool info in the volume. test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu001@backend001#OpenStack_Pool', 'provider_location': '11', 'admin_metadata': {}, } lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) # No pool info in the volume. test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'host': 'ubuntu001@backend001', 'provider_location': '11', 'admin_metadata': {}, } lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) def test_delete_volume_success(self): self.driver.delete_volume(test_volume) def test_create_snapshot_success(self): lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) test_snap['volume']['provider_location'] = '' lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) test_snap['volume']['provider_location'] = None lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) def test_delete_snapshot_success(self): self.driver.delete_snapshot(test_snap) def test_create_volume_from_snapsuccess(self): self.mock_object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', mock.Mock(return_value={'extra_specs': sync_replica_specs})) self.mock_object(replication.ReplicaCommonDriver, 'sync') model_update = self.driver.create_volume_from_snapshot(test_volume, test_volume) self.assertEqual('1', model_update['provider_location']) driver_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': '1'} driver_data = replication.to_string(driver_data) self.assertEqual(driver_data, model_update['replication_driver_data']) self.assertEqual('available', model_update['replication_status']) def test_initialize_connection_success(self): iscsi_properties = self.driver.initialize_connection(test_volume, FakeConnector) self.assertEqual(1, iscsi_properties['data']['target_lun']) def test_terminate_connection_success(self): self.driver.terminate_connection(test_volume, FakeConnector) def test_get_volume_status(self): data = self.driver.get_volume_stats() self.assertEqual('2.0.5', data['driver_version']) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 6291456}) @mock.patch.object(rest_client.RestClient, 'extend_lun') def test_extend_volume_size_equal(self, mock_extend, mock_lun_info): self.driver.extend_volume(test_volume, 3) self.assertEqual(0, mock_extend.call_count) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 5291456}) @mock.patch.object(rest_client.RestClient, 'extend_lun') def test_extend_volume_success(self, mock_extend, mock_lun_info): self.driver.extend_volume(test_volume, 3) self.assertEqual(1, mock_extend.call_count) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 7291456}) def test_extend_volume_fail(self, mock_lun_info): self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, test_volume, 3) def test_extend_nonexistent_volume(self): test_volume = { 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635' } self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', mock.Mock(return_value=None)) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, test_volume, 3) @ddt.data({'admin_metadata': {'huawei_lun_wwn': '1'}, 'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}, {'volume_admin_metadata': [{'key': 'huawei_lun_wwn', 'value': '1'}], 'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}) def test_get_admin_metadata(self, volume_data): expected_value = {'huawei_lun_wwn': '1'} admin_metadata = huawei_utils.get_admin_metadata(volume_data) self.assertEqual(expected_value, admin_metadata) def test_login_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) def test_create_snapshot_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, test_snap) def test_create_volume_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) def test_delete_volume_fail(self): self.driver.client.test_fail = True self.driver.delete_volume(test_volume) def test_delete_snapshot_fail(self): self.driver.client.test_fail = True self.driver.delete_snapshot(test_snap) def test_delete_snapshot_with_snapshot_nonexistent(self): fake_snap = { 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 1, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'provider_location': None, } self.driver.delete_snapshot(fake_snap) def test_initialize_connection_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, FakeConnector) def test_lun_is_associated_to_lungroup(self): self.driver.client.associate_lun_to_lungroup('11', '11') result = self.driver.client._is_lun_associated_to_lungroup('11', '11') self.assertTrue(result) def test_lun_is_not_associated_to_lun_group(self): self.driver.client.associate_lun_to_lungroup('12', '12') self.driver.client.remove_lun_from_lungroup('12', '12') result = self.driver.client._is_lun_associated_to_lungroup('12', '12') self.assertFalse(result) def test_get_tgtip(self): portg_id = self.driver.client.get_tgt_port_group(self.portgroup) target_ip = self.driver.client._get_tgt_ip_from_portgroup(portg_id) self.assertEqual(self.target_ips, target_ip) def test_find_chap_info(self): tmp_dict = {} tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3' tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage' iscsi_info = [tmp_dict] initiator_name = FakeConnector['initiator'] chapinfo = self.driver.client.find_chap_info(iscsi_info, initiator_name) chap_username, chap_password = chapinfo.split(';') self.assertEqual('mm-user', chap_username) self.assertEqual('mm-user@storage', chap_password) def test_find_alua_info(self): tmp_dict = {} tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3' tmp_dict['ALUA'] = '1' iscsi_info = [tmp_dict] initiator_name = FakeConnector['initiator'] type = self.driver.client._find_alua_info(iscsi_info, initiator_name) self.assertEqual('1', type) def test_get_pool_info(self): pools = [{"NAME": "test001", "ID": "0", "USERFREECAPACITY": "36", "USERTOTALCAPACITY": "48", "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}, {"NAME": "test002", "ID": "1", "USERFREECAPACITY": "37", "USERTOTALCAPACITY": "49", "USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE}, {"NAME": "test003", "ID": "0", "USERFREECAPACITY": "36", "DATASPACE": "35", "USERTOTALCAPACITY": "48", "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}] pool_name = 'test001' test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test002' test_info = {} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test000' test_info = {} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test003' test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) def test_get_smartx_specs_opts(self): smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts) self.assertEqual('3', smartx_opts['policy']) @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', return_value={'MAXIOPS': '100', 'IOType': '2'}) def test_create_smartqos(self, mock_qos_value): lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': 'false', 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) def test_create_smartx(self, mock_volume_types, mock_add_lun_to_partition): lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) def test_find_available_qos(self): qos = {'MAXIOPS': '100', 'IOType': '2'} fake_qos_info_response_equal = { "error": { "code": 0 }, "data": [{ "ID": "11", "MAXIOPS": "100", "LATENCY": "0", "IOType": "2", "FSLIST": u'[""]', 'RUNNINGSTATUS': "2", "NAME": "OpenStack_57_20151225102851", "LUNLIST": u'["1", "2", "3", "4", "5", "6", "7", "8", "9",\ "10", ,"11", "12", "13", "14", "15", "16", "17", "18", "19",\ "20", ,"21", "22", "23", "24", "25", "26", "27", "28", "29",\ "30", ,"31", "32", "33", "34", "35", "36", "37", "38", "39",\ "40", ,"41", "42", "43", "44", "45", "46", "47", "48", "49",\ "50", ,"51", "52", "53", "54", "55", "56", "57", "58", "59",\ "60", ,"61", "62", "63", "64"]' }] } # Number of LUNs in QoS is equal to 64 with mock.patch.object(rest_client.RestClient, 'get_qos', return_value=fake_qos_info_response_equal): (qos_id, lun_list) = self.driver.client.find_available_qos(qos) self.assertEqual((None, []), (qos_id, lun_list)) # Number of LUNs in QoS is less than 64 fake_qos_info_response_less = { "error": { "code": 0 }, "data": [{ "ID": "11", "MAXIOPS": "100", "LATENCY": "0", "IOType": "2", "FSLIST": u'[""]', 'RUNNINGSTATUS': "2", "NAME": "OpenStack_57_20151225102851", "LUNLIST": u'["0", "1", "2"]' }] } with mock.patch.object(rest_client.RestClient, 'get_qos', return_value=fake_qos_info_response_less): (qos_id, lun_list) = self.driver.client.find_available_qos(qos) self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list)) @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) def test_create_hypermetro_success(self, mock_volume_ready, mock_hyper_domain, mock_pool_info, mock_all_pool_info, mock_login_return): metadata = {"hypermetro_id": '11', "remote_lun_id": '1'} lun_info = self.driver.create_volume(hyper_volume) self.assertEqual(metadata, lun_info['metadata']) @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) @mock.patch.object(hypermetro.HuaweiHyperMetro, '_create_hypermetro_pair') @mock.patch.object(rest_client.RestClient, 'delete_lun') def test_create_hypermetro_fail(self, mock_delete_lun, mock_hyper_pair_info, mock_volume_ready, mock_hyper_domain, mock_pool_info, mock_all_pool_info, mock_hypermetro_opts): self.driver.client.login() mock_hyper_pair_info.side_effect = exception.VolumeBackendAPIException( data='Create hypermetro error.') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, hyper_volume) mock_delete_lun.assert_called_with('1') @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value={}) def test_create_hypermetro_remote_pool_none_fail(self, mock_pool_info, mock_all_pool_info): param = {'TYPE': '11', 'PARENTID': ''} self.driver.client.login() self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.create_hypermetro, '2', param) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'create_lun', return_value={'CAPACITY': '2097152', 'DESCRIPTION': '2f0635', 'HEALTHSTATUS': '1', 'ALLOCTYPE': '1', 'WWN': '6643e8c1004c5f6723e9f454003', 'ID': '1', 'RUNNINGSTATUS': '27', 'NAME': '5mFHcBv4RkCcD'}) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) def test_create_hypermetro_remote_pool_parentid(self, mock_volume_ready, mock_hyper_domain, mock_create_lun, mock_pool_info, mock_all_pool_info): param = {'TYPE': '11', 'PARENTID': ''} self.driver.metro.create_hypermetro('2', param) lun_PARENTID = mock_create_lun.call_args[0][0]['PARENTID'] self.assertEqual(FAKE_FIND_POOL_RESPONSE['ID'], lun_PARENTID) @mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': '1'}) def test_hypermetro_none_map_info_fail(self, mock_metadata): self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.connect_volume_fc, test_volume, FakeConnector) @mock.patch.object(rest_client.RestClient, 'check_lun_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'delete_hypermetro', return_value=FAKE_COMMON_SUCCESS_RESPONSE) @mock.patch.object(rest_client.RestClient, 'delete_lun', return_value=None) def test_delete_hypermetro_success(self, mock_delete_lun, mock_delete_hypermetro, mock_check_hyermetro, mock_lun_exit): self.driver.delete_volume(hyper_volume) @mock.patch.object(rest_client.RestClient, 'check_lun_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id', return_value=FAKE_METRO_INFO_RESPONCE) @mock.patch.object(rest_client.RestClient, 'delete_hypermetro') @mock.patch.object(rest_client.RestClient, 'delete_lun', return_value=None) def test_delete_hypermetro_fail(self, mock_delete_lun, mock_delete_hypermetro, mock_metro_info, mock_check_hyermetro, mock_lun_exit): mock_delete_hypermetro.side_effect = ( exception.VolumeBackendAPIException(data='Delete hypermetro ' 'error.')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, hyper_volume) mock_delete_lun.assert_called_with('11') def test_manage_existing_get_size_invalid_reference(self): # Can't find LUN by source-name. external_ref = {'source-name': 'LUN1'} with mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value=None): ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, test_volume, external_ref) self.assertIsNotNone(re.search('please check the source-name ' 'or source-id', ex.msg)) # Can't find LUN by source-id. external_ref = {'source-id': 'ID1'} with mock.patch.object(rest_client.RestClient, 'get_lun_info') as m_gt: m_gt.side_effect = exception.VolumeBackendAPIException( data='Error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, test_volume, external_ref) self.assertIsNotNone(re.search('please check the source-name ' 'or source-id', ex.msg)) def test_manage_existing_get_size_improper_lunsize(self): # LUN size is not multiple of 1 GB. external_ref = {'source-id': 'ID1'} with mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097150}): ex = self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, test_volume, external_ref) self.assertIsNotNone( re.search('Volume size must be multiple of 1 GB', ex.msg)) @ddt.data({'source-id': 'ID1'}, {'source-name': 'LUN1'}, {'source-name': 'LUN1', 'source-id': 'ID1'}) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_get_size_success(self, mock_get_lun_id_by_name, mock_get_lun_info, external_ref): size = self.driver.manage_existing_get_size(test_volume, external_ref) self.assertEqual(1, size) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_pool_mismatch(self, mock_get_by_name, mock_get_info): # LUN does not belong to the specified pool. with mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'StoragePool001'}): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool002', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'} external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('The specified LUN does not belong' ' to the given pool', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_lun_abnormal(self, mock_get_by_name, mock_get_info): # Status is not normal. ret = {'PARENTNAME': "StoragePool001", 'HEALTHSTATUS': '2'} with mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value=ret): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'} external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('LUN status is not normal', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs', return_value=[{'LOCALOBJID': 'ID1'}]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_hypermetro(self, mock_get_by_name, mock_get_info, mock_get_hyper_pairs): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'} # Exists in a HyperMetroPair. with mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs', return_value=[{'LOCALOBJID': 'ID1'}]): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('HyperMetroPair', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs') @mock.patch.object(rest_client.RestClient, 'rename_lun') @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001', 'HEALTHSTATUS': constants.STATUS_HEALTH, 'WWN': '6643e8c1004c5f6723e9f454003'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_lower_version(self, mock_get_by_name, mock_get_info, mock_rename, mock_get_hyper_pairs): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf', 'admin_metadata': { 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}} mock_get_hyper_pairs.side_effect = ( exception.VolumeBackendAPIException(data='err')) external_ref = {'source-name': 'LUN1'} model_update = self.driver.manage_existing(test_volume, external_ref) expected_val = { 'admin_metadata': { 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003' }, 'provider_location': 'ID1'} self.assertEqual(expected_val, model_update) @ddt.data([[{'PRILUNID': 'ID1'}], []], [[{'PRILUNID': 'ID2'}], ['ID1', 'ID2']]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_splitmirror(self, ddt_data, mock_get_by_name, mock_get_info): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf', 'id': '21ec7341-9256-497b-97d9-ef48edcf'} # Exists in a SplitMirror. with mock.patch.object(rest_client.RestClient, 'get_split_mirrors', return_value=ddt_data[0]), \ mock.patch.object(rest_client.RestClient, 'get_target_luns', return_value=ddt_data[1]): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('SplitMirror', ex.msg)) @ddt.data([{'PARENTID': 'ID1'}], [{'TARGETLUNID': 'ID1'}]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_under_migration(self, ddt_data, mock_get_by_name, mock_get_info): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf', 'id': '21ec7341-9256-497b-97d9-ef48edcf'} # Exists in a migration task. with mock.patch.object(rest_client.RestClient, 'get_migration_task', return_value=ddt_data): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('migration', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'StoragePool001', 'SNAPSHOTIDS': [], 'ISADD2LUNGROUP': 'true', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_lungroup(self, mock_get_by_name, mock_get_info): # Already in LUN group. test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'} external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, external_ref) self.assertIsNotNone(re.search('Already exists in a LUN group', ex.msg)) @ddt.data({'source-name': 'LUN1'}, {'source-id': 'ID1'}) @mock.patch.object(rest_client.RestClient, 'rename_lun') @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'StoragePool001', 'SNAPSHOTIDS': [], 'ID': 'ID1', 'HEALTHSTATUS': constants.STATUS_HEALTH, 'WWN': '6643e8c1004c5f6723e9f454003'}) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ALLOCTYPE': 1}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_success(self, mock_get_by_name, mock_get_info, mock_check_lun, mock_rename, external_ref): test_volume = { 'host': 'ubuntu-204@v3r3#StoragePool001', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf', 'admin_metadata': { 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003' } } model_update = self.driver.manage_existing(test_volume, external_ref) expected_val = { 'admin_metadata': { 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003' }, 'provider_location': 'ID1'} self.assertEqual(expected_val, model_update) @ddt.data([None, 0], ['ID1', 1]) @mock.patch.object(rest_client.RestClient, 'rename_lun') def test_unmanage(self, ddt_data, mock_rename): test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635'} with mock.patch.object(huawei_driver.HuaweiBaseDriver, '_check_volume_exist_on_array', return_value=ddt_data[0]): self.driver.unmanage(test_volume) self.assertEqual(ddt_data[1], mock_rename.call_count) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'NAME': 'test1', 'PARENTID': '12', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': '2'}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_abnormal(self, mock_get_by_name, mock_get_info): with mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'HEALTHSTATUS': '2', 'PARENTID': '12'}): test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} external_ref = {'source-name': 'test1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, test_snapshot, external_ref) self.assertIsNotNone(re.search('Snapshot status is not normal', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'true', 'NAME': 'test1', 'PARENTID': '12', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_with_lungroup(self, mock_get_by_name, mock_get_info): # Already in LUN group. test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} external_ref = {'source-name': 'test1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, test_snapshot, external_ref) self.assertIsNotNone(re.search('Snapshot is exposed to initiator', ex.msg)) @mock.patch.object(rest_client.RestClient, 'rename_snapshot') @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', 'NAME': 'test1', 'PARENTID': '12', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', 'NAME': 'test1', 'PARENTID': '12', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_success(self, mock_get_by_name, mock_get_info, mock_check_snapshot, mock_rename): test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} external_ref = {'source-name': 'test1'} model_update = self.driver.manage_existing_snapshot(test_snapshot, external_ref) self.assertEqual({'provider_location': 'ID1'}, model_update) test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} external_ref = {'source-id': 'ID1'} model_update = self.driver.manage_existing_snapshot(test_snapshot, external_ref) self.assertEqual({'provider_location': 'ID1'}, model_update) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', 'NAME': 'test1', 'USERCAPACITY': 2097152, 'PARENTID': '11', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_mismatch_lun(self, mock_get_by_name, mock_get_info): external_ref = {'source-name': 'test1'} test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, test_snapshot, external_ref) self.assertIsNotNone(re.search("Snapshot doesn't belong to volume", ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'USERCAPACITY': 2097152}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_get_size_success(self, mock_get_id_by_name, mock_get_info): external_ref = {'source-name': 'test1', 'source-id': 'ID1'} test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} size = self.driver.manage_existing_snapshot_get_size(test_snapshot, external_ref) self.assertEqual(1, size) external_ref = {'source-name': 'test1'} test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} size = self.driver.manage_existing_snapshot_get_size(test_snapshot, external_ref) self.assertEqual(1, size) external_ref = {'source-id': 'ID1'} test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume': {'provider_location': '12'}} size = self.driver.manage_existing_snapshot_get_size(test_snapshot, external_ref) self.assertEqual(1, size) @mock.patch.object(rest_client.RestClient, 'rename_snapshot') def test_unmanage_snapshot(self, mock_rename): test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635'} with mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value=None): self.driver.unmanage_snapshot(test_snapshot) self.assertEqual(0, mock_rename.call_count) with mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1'): self.driver.unmanage_snapshot(test_snapshot) self.assertEqual(1, mock_rename.call_count) @ddt.data(sync_replica_specs, async_replica_specs) def test_create_replication_success(self, mock_type): self.mock_object(replication.ReplicaCommonDriver, 'sync') self.mock_object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', mock.Mock(return_value={'extra_specs': mock_type})) model_update = self.driver.create_volume(replication_volume) driver_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': '1'} driver_data = replication.to_string(driver_data) self.assertEqual(driver_data, model_update['replication_driver_data']) self.assertEqual('available', model_update['replication_status']) @ddt.data( [ rest_client.RestClient, 'get_array_info', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')) ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')) ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock(return_value={}) ], [ replication.ReplicaPairManager, 'wait_volume_online', mock.Mock(side_effect=[ None, exception.VolumeBackendAPIException(data='err')]) ], [ rest_client.RestClient, 'create_pair', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')) ], [ replication.ReplicaCommonDriver, 'sync', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')) ], ) @ddt.unpack def test_create_replication_fail(self, mock_module, mock_func, mock_value): self.mock_object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', mock.Mock(return_value={'extra_specs': sync_replica_specs})) self.mock_object(replication.ReplicaPairManager, '_delete_pair') self.mock_object(mock_module, mock_func, mock_value) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, replication_volume) def test_delete_replication_success(self): self.mock_object(replication.ReplicaCommonDriver, 'split') self.mock_object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', mock.Mock(return_value={'extra_specs': sync_replica_specs})) self.driver.delete_volume(replication_volume) self.mock_object(rest_client.RestClient, 'check_lun_exist', mock.Mock(return_value=False)) self.driver.delete_volume(replication_volume) def test_wait_volume_online(self): replica = FakeReplicaPairManager(self.driver.client, self.driver.replica_client, self.configuration) lun_info = {'ID': '11'} replica.wait_volume_online(self.driver.client, lun_info) offline_status = {'RUNNINGSTATUS': '28'} replica.wait_volume_online(self.driver.client, lun_info) with mock.patch.object(rest_client.RestClient, 'get_lun_info', offline_status): self.assertRaises(exception.VolumeBackendAPIException, replica.wait_volume_online, self.driver.client, lun_info) def test_wait_second_access(self): pair_id = '1' access_ro = constants.REPLICA_SECOND_RO access_rw = constants.REPLICA_SECOND_RW op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.PairOp, 'get_replica_info', mock.Mock(return_value={'SECRESACCESS': access_ro})) self.mock_object(huawei_utils.time, 'time', mock.Mock( side_effect = utils.generate_timeout_series( constants.DEFAULT_REPLICA_WAIT_TIMEOUT))) common_driver.wait_second_access(pair_id, access_ro) self.assertRaises(exception.VolumeBackendAPIException, common_driver.wait_second_access, pair_id, access_rw) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_wait_replica_ready(self): normal_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } split_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SPLIT, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } sync_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SYNC, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } pair_id = '1' op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) with mock.patch.object(replication.PairOp, 'get_replica_info', mock.Mock(return_value=normal_status)): common_driver.wait_replica_ready(pair_id) with mock.patch.object( replication.PairOp, 'get_replica_info', mock.Mock(side_effect=[sync_status, normal_status])): common_driver.wait_replica_ready(pair_id) with mock.patch.object(replication.PairOp, 'get_replica_info', mock.Mock(return_value=split_status)): self.assertRaises(exception.VolumeBackendAPIException, common_driver.wait_replica_ready, pair_id) def test_failover_to_current(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [test_volume], 'default') self.assertTrue(driver.active_backend_id in ('', None)) self.assertTrue(old_client == driver.client) self.assertTrue(old_replica_client == driver.replica_client) self.assertTrue(old_replica == driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(0, len(volumes_update)) def test_failover_normal_volumes(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [test_volume], REPLICA_BACKEND_ID) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(test_volume['id'], v_id) self.assertEqual('error', v_update['status']) self.assertEqual(test_volume['status'], v_update['metadata']['old_status']) def test_failback_to_current(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [test_volume], REPLICA_BACKEND_ID) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertTrue(old_client == driver.client) self.assertTrue(old_replica_client == driver.replica_client) self.assertTrue(old_replica == driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(0, len(volumes_update)) def test_failback_normal_volumes(self): volume = copy.deepcopy(test_volume) volume['status'] = 'error' volume['metadata'] = {'old_status', 'available'} driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [volume], 'default') self.assertTrue(driver.active_backend_id in ('', None)) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(volume['id'], v_id) self.assertEqual('available', v_update['status']) self.assertFalse('old_status' in v_update['metadata']) def test_failover_replica_volumes(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica self.mock_object(replication.ReplicaCommonDriver, 'failover') self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', mock.Mock( return_value={'replication_enabled': 'true'})) secondary_id, volumes_update = driver.failover_host( None, [replication_volume], REPLICA_BACKEND_ID) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(replication_volume['id'], v_id) self.assertEqual('1', v_update['provider_location']) self.assertEqual('failed-over', v_update['replication_status']) new_drv_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': replication_volume['provider_location']} new_drv_data = replication.to_string(new_drv_data) self.assertEqual(new_drv_data, v_update['replication_driver_data']) @ddt.data({}, {'pair_id': TEST_PAIR_ID}) def test_failover_replica_volumes_invalid_drv_data(self, mock_drv_data): volume = copy.deepcopy(replication_volume) volume['replication_driver_data'] = replication.to_string( mock_drv_data) driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', mock.Mock( return_value={'replication_enabled': 'true'})) secondary_id, volumes_update = driver.failover_host( None, [volume], REPLICA_BACKEND_ID) self.assertTrue(driver.active_backend_id == REPLICA_BACKEND_ID) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(volume['id'], v_id) self.assertEqual('error', v_update['replication_status']) def test_failback_replica_volumes(self): self.mock_object(replication.ReplicaCommonDriver, 'enable') self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready') self.mock_object(replication.ReplicaCommonDriver, 'failover') self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', mock.Mock( return_value={'replication_enabled': 'true'})) volume = copy.deepcopy(replication_volume) driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [volume], 'default') self.assertTrue(driver.active_backend_id in ('', None)) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(replication_volume['id'], v_id) self.assertEqual('1', v_update['provider_location']) self.assertEqual('available', v_update['replication_status']) new_drv_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': replication_volume['provider_location']} new_drv_data = replication.to_string(new_drv_data) self.assertEqual(new_drv_data, v_update['replication_driver_data']) @ddt.data({}, {'pair_id': TEST_PAIR_ID}) def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data): self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', mock.Mock( return_value={'replication_enabled': 'true'})) volume = copy.deepcopy(replication_volume) volume['replication_driver_data'] = replication.to_string( mock_drv_data) driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update = driver.failover_host( None, [volume], 'default') self.assertTrue(driver.active_backend_id in ('', None)) self.assertTrue(old_client == driver.replica_client) self.assertTrue(old_replica_client == driver.client) self.assertFalse(old_replica == driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(replication_volume['id'], v_id) self.assertEqual('error', v_update['replication_status']) @mock.patch.object(replication.PairOp, 'is_primary', side_effect=[False, True]) @mock.patch.object(replication.ReplicaCommonDriver, 'split') @mock.patch.object(replication.ReplicaCommonDriver, 'unprotect_second') def test_replication_driver_enable_success(self, mock_unprotect, mock_split, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) common_driver.enable(replica_id) self.assertTrue(mock_unprotect.called) self.assertTrue(mock_split.called) self.assertTrue(mock_is_primary.called) @mock.patch.object(replication.PairOp, 'is_primary', return_value=False) @mock.patch.object(replication.ReplicaCommonDriver, 'split') def test_replication_driver_failover_success(self, mock_split, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) common_driver.failover(replica_id) self.assertTrue(mock_split.called) self.assertTrue(mock_is_primary.called) @mock.patch.object(replication.PairOp, 'is_primary', return_value=True) def test_replication_driver_failover_fail(self, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.assertRaises( exception.VolumeBackendAPIException, common_driver.failover, replica_id) @ddt.data(constants.REPLICA_SECOND_RW, constants.REPLICA_SECOND_RO) def test_replication_driver_protect_second(self, mock_access): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.ReplicaCommonDriver, 'wait_second_access') self.mock_object( replication.PairOp, 'get_replica_info', mock.Mock(return_value={'SECRESACCESS': mock_access})) common_driver.protect_second(replica_id) common_driver.unprotect_second(replica_id) def test_replication_driver_sync(self): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) async_normal_status = { 'REPLICATIONMODEL': constants.REPLICA_ASYNC_MODEL, 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } self.mock_object(replication.ReplicaCommonDriver, 'protect_second') self.mock_object(replication.PairOp, 'get_replica_info', mock.Mock(return_value=async_normal_status)) common_driver.sync(replica_id, True) common_driver.sync(replica_id, False) def test_replication_driver_split(self): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.ReplicaCommonDriver, 'wait_expect_state') self.mock_object(replication.PairOp, 'split', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err'))) common_driver.split(replica_id) def test_replication_base_op(self): replica_id = '1' op = replication.AbsReplicaOp(None) op.create() op.delete(replica_id) op.protect_second(replica_id) op.unprotect_second(replica_id) op.sync(replica_id) op.split(replica_id) op.switch(replica_id) op.is_primary({}) op.get_replica_info(replica_id) op._is_status(None, {'key': 'volue'}, None) @mock.patch.object(rest_client.RestClient, 'call', return_value={"error": {"code": 0}}) def test_get_tgt_port_group_no_portg_exist(self, mock_call): portg = self.driver.client.get_tgt_port_group('test_portg') self.assertIsNone(portg) def test_get_tgt_iqn_from_rest_match(self): match_res = { 'data': [{ 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.19,t,0x01' }, { 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.191,t,0x01' }], 'error': { 'code': 0 } } ip = '111.111.111.19' expected_iqn = 'iqn.2006-08.com: 210048cee9d: 111.111.111.19' self.mock_object(rest_client.RestClient, 'call', mock.Mock(return_value=match_res)) iqn = self.driver.client._get_tgt_iqn_from_rest(ip) self.assertEqual(expected_iqn, iqn) def test_get_tgt_iqn_from_rest_mismatch(self): match_res = { 'data': [{ 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.191,t,0x01' }, { 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.192,t,0x01' }], 'error': { 'code': 0 } } ip = '192.0.2.19' self.mock_object(rest_client.RestClient, 'call', mock.Mock(return_value=match_res)) iqn = self.driver.client._get_tgt_iqn_from_rest(ip) self.assertIsNone(iqn) class FCSanLookupService(object): def get_device_mapping_from_network(self, initiator_list, target_list): return fake_fabric_mapping class HuaweiFCDriverTestCase(test.TestCase): def setUp(self): super(HuaweiFCDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.huawei_conf = FakeHuaweiConf(self.configuration, 'FC') self.configuration.hypermetro_devices = hypermetro_devices self.stubs.Set(time, 'sleep', Fake_sleep) driver = FakeFCStorage(configuration=self.configuration) self.driver = driver self.driver.do_setup() self.driver.client.login() def test_login_success(self): device_id = self.driver.client.login() self.assertEqual('210235G7J20000000000', device_id) def test_create_volume_success(self): lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) def test_delete_volume_success(self): self.driver.delete_volume(test_volume) def test_create_snapshot_success(self): lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) test_snap['volume']['provider_location'] = '' lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) test_snap['volume']['provider_location'] = None lun_info = self.driver.create_snapshot(test_snap) self.assertEqual(11, lun_info['provider_location']) def test_delete_snapshot_success(self): self.driver.delete_snapshot(test_snap) def test_create_volume_from_snapsuccess(self): lun_info = self.driver.create_volume_from_snapshot(test_volume, test_volume) self.assertEqual('1', lun_info['provider_location']) def test_initialize_connection_success(self): iscsi_properties = self.driver.initialize_connection(test_volume, FakeConnector) self.assertEqual(1, iscsi_properties['data']['target_lun']) def test_hypermetro_connection_success(self): self.mock_object(rest_client.RestClient, 'find_array_version', mock.Mock(return_value='V300R003C00')) fc_properties = self.driver.initialize_connection(hyper_volume, FakeConnector) self.assertEqual(1, fc_properties['data']['target_lun']) def test_terminate_connection_success(self): self.driver.client.terminateFlag = True self.driver.terminate_connection(test_volume, FakeConnector) self.assertTrue(self.driver.client.terminateFlag) def test_terminate_connection_hypermetro_in_metadata(self): self.driver.terminate_connection(hyper_volume, FakeConnector) def test_get_volume_status(self): remote_device_info = {"ARRAYTYPE": "1", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10"} self.mock_object( replication.ReplicaPairManager, 'get_remote_device_by_wwn', mock.Mock(return_value=remote_device_info)) data = self.driver.get_volume_stats() self.assertEqual('2.0.5', data['driver_version']) self.assertTrue(data['pools'][0]['replication_enabled']) self.assertListEqual(['sync', 'async'], data['pools'][0]['replication_type']) self.mock_object( replication.ReplicaPairManager, 'get_remote_device_by_wwn', mock.Mock(return_value={})) data = self.driver.get_volume_stats() self.assertNotIn('replication_enabled', data['pools'][0]) self.mock_object( replication.ReplicaPairManager, 'try_get_remote_wwn', mock.Mock(return_value={})) data = self.driver.get_volume_stats() self.assertEqual('2.0.5', data['driver_version']) self.assertNotIn('replication_enabled', data['pools'][0]) def test_extend_volume(self): self.driver.extend_volume(test_volume, 3) def test_login_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) def test_create_snapshot_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, test_snap) def test_create_volume_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) def test_delete_volume_fail(self): self.driver.client.test_fail = True self.driver.delete_volume(test_volume) def test_delete_snapshot_fail(self): self.driver.client.test_fail = True self.driver.delete_snapshot(test_snap) def test_initialize_connection_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, FakeConnector) def test_lun_is_associated_to_lungroup(self): self.driver.client.associate_lun_to_lungroup('11', '11') result = self.driver.client._is_lun_associated_to_lungroup('11', '11') self.assertTrue(result) def test_lun_is_not_associated_to_lun_group(self): self.driver.client.associate_lun_to_lungroup('12', '12') self.driver.client.remove_lun_from_lungroup('12', '12') result = self.driver.client._is_lun_associated_to_lungroup('12', '12') self.assertFalse(result) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') def test_migrate_volume_success(self, mock_add_lun_to_partition): # Migrate volume without new type. empty_dict = {} moved, model_update = self.driver.migrate_volume(None, test_volume, test_host, None) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) # Migrate volume with new type. empty_dict = {} new_type = {'extra_specs': {'smarttier': ' true', 'smartcache': ' true', 'smartpartition': ' true', 'thin_provisioning_support': ' true', 'thick_provisioning_support': ' False', 'policy': '2', 'smartcache:cachename': 'cache-test', 'smartpartition:partitionname': 'partition-test'}} moved, model_update = self.driver.migrate_volume(None, test_volume, test_host, new_type) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) def test_migrate_volume_fail(self): self.driver.client.test_fail = True # Migrate volume without new type. self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, test_volume, test_host, None) # Migrate volume with new type. new_type = {'extra_specs': {'smarttier': ' true', 'smartcache': ' true', 'thin_provisioning_support': ' true', 'thick_provisioning_support': ' False', 'policy': '2', 'smartcache:cachename': 'cache-test', 'partitionname': 'partition-test'}} self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, test_volume, test_host, new_type) def test_check_migration_valid(self): is_valid = self.driver._check_migration_valid(test_host, test_volume) self.assertTrue(is_valid) # No pool_name in capabilities. invalid_host1 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000000', 'allocated_capacity_gb': 0, 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'FC'}} is_valid = self.driver._check_migration_valid(invalid_host1, test_volume) self.assertFalse(is_valid) # location_info in capabilities is not matched. invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': 'OpenStack_Pool', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'FC'}} is_valid = self.driver._check_migration_valid(invalid_host2, test_volume) self.assertFalse(is_valid) # storage_protocol is not match current protocol and volume status is # 'in-use'. volume_in_use = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_attachment': 'in-use', 'provider_location': '11'} invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': 'OpenStack_Pool', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'iSCSI'}} is_valid = self.driver._check_migration_valid(invalid_host2, volume_in_use) self.assertFalse(is_valid) # pool_name is empty. invalid_host3 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': '', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'iSCSI'}} is_valid = self.driver._check_migration_valid(invalid_host3, test_volume) self.assertFalse(is_valid) @mock.patch.object(rest_client.RestClient, 'rename_lun') def test_update_migrated_volume_success(self, mock_rename_lun): original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'} current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636'} model_update = self.driver.update_migrated_volume(None, original_volume, current_volume, 'available') self.assertEqual({'_name_id': None}, model_update) @mock.patch.object(rest_client.RestClient, 'rename_lun') def test_update_migrated_volume_fail(self, mock_rename_lun): mock_rename_lun.side_effect = exception.VolumeBackendAPIException( data='Error occurred.') original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'} current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636', '_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'} model_update = self.driver.update_migrated_volume(None, original_volume, current_volume, 'available') self.assertEqual({'_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'}, model_update) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') def test_retype_volume_success(self, mock_add_lun_to_partition): retype = self.driver.retype(None, test_volume, test_new_type, None, test_host) self.assertTrue(retype) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') @mock.patch.object( huawei_driver.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) def test_retype_replication_volume_success(self, mock_get_type, mock_add_lun_to_partition): retype = self.driver.retype(None, test_volume, test_new_replication_type, None, test_host) self.assertTrue(retype) def test_retype_volume_cache_fail(self): self.driver.client.cache_not_exist = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, None, test_volume, test_new_type, None, test_host) def test_retype_volume_partition_fail(self): self.driver.client.partition_not_exist = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, None, test_volume, test_new_type, None, test_host) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') def test_retype_volume_fail(self, mock_add_lun_to_partition): mock_add_lun_to_partition.side_effect = ( exception.VolumeBackendAPIException(data='Error occurred.')) retype = self.driver.retype(None, test_volume, test_new_type, None, test_host) self.assertFalse(retype) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A","0B"]', 'ID': '0'}]) def test_build_ini_targ_map_engie_recorded(self, mock_engines): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '1', '11') target_port_wwns = ['2000643e8c4c5f66'] self.assertEqual(target_port_wwns, tgt_wwns) self.assertEqual({}, init_targ_map) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A"]', 'ID': '0'}, {'NODELIST': '["0B"]', 'ID': '1'}]) def test_build_ini_targ_map_engie_not_recorded(self, mock_engines): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '1', '11') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}]) def test_build_ini_targ_map_no_map(self, mock_engines): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) # Host with id '5' has no map on the array. (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '5', '11') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) def test_get_init_targ_map(self): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.get_init_targ_map( ['10000090fa0d6754'], '1') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) def test_multi_resturls_success(self): self.driver.client.test_multi_url_flag = True lun_info = self.driver.create_volume(test_volume) self.assertEqual('1', lun_info['provider_location']) def test_get_id_from_result(self): result = {} name = 'test_name' key = 'NAME' re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': {}} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'COUNT': 1, 'ID': '1'}, {'COUNT': 2, 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'NAME': 'test_name1', 'ID': '1'}, {'NAME': 'test_name2', 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'NAME': 'test_name', 'ID': '1'}, {'NAME': 'test_name2', 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertEqual('1', re) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value={'ID': 1, 'CAPACITY': 110362624, 'TOTALCAPACITY': 209715200}) def test_get_capacity(self, mock_get_pool_info): expected_pool_capacity = {'total_capacity': 100.0, 'free_capacity': 52.625} pool_capacity = self.driver.client._get_capacity(None, None) self.assertEqual(expected_pool_capacity, pool_capacity) @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) @mock.patch.object(hypermetro.HuaweiHyperMetro, '_create_hypermetro_pair', return_value={"ID": '11', "NAME": 'hypermetro-pair'}) @mock.patch.object(rest_client.RestClient, 'logout', return_value=None) def test_create_hypermetro_success(self, mock_hypermetro_opts, mock_login_return, mock_all_pool_info, mock_pool_info, mock_hyper_domain, mock_volume_ready, mock_logout): metadata = {"hypermetro_id": '11', "remote_lun_id": '1'} lun_info = self.driver.create_volume(hyper_volume) self.assertEqual(metadata, lun_info['metadata']) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": [{"RUNNINGSTATUS": "27", "ID": '1'}, {"RUNNINGSTATUS": "26", "ID": '2'}], "error": {"code": 0}}) def test_get_online_free_wwns(self, mock_call): wwns = self.driver.client.get_online_free_wwns() self.assertEqual(['1'], wwns) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {"ID": 1}, "error": {"code": 0}}) def test_rename_lun(self, mock_call): des = 'This LUN is renamed.' new_name = 'test_name' self.driver.client.rename_lun('1', new_name, des) self.assertEqual(1, mock_call.call_count) url = "/lun/1" data = {"NAME": new_name, "DESCRIPTION": des} mock_call.assert_called_once_with(url, data, "PUT") @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {}}) def test_is_host_associated_to_hostgroup_no_data(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertFalse(res) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {'ISADD2HOSTGROUP': 'true'}}) def test_is_host_associated_to_hostgroup_true(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertTrue(res) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {'ISADD2HOSTGROUP': 'false'}}) def test_is_host_associated_to_hostgroup_false(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertFalse(res) class HuaweiConfTestCase(test.TestCase): def setUp(self): super(HuaweiConfTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.fake_xml_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.conf = mock.Mock() self.conf.cinder_huawei_conf_file = self.fake_xml_file self.huawei_conf = huawei_conf.HuaweiConf(self.conf) def _create_fake_conf_file(self): """Create a fake Config file. Huawei storage customize a XML configuration file, the configuration file is used to set the Huawei storage custom parameters, therefore, in the UT test we need to simulate such a configuration file. """ doc = minidom.Document() config = doc.createElement('config') doc.appendChild(config) storage = doc.createElement('Storage') config.appendChild(storage) url = doc.createElement('RestURL') url_text = doc.createTextNode('http://192.0.2.69:8082/' 'deviceManager/rest/') url.appendChild(url_text) storage.appendChild(url) username = doc.createElement('UserName') username_text = doc.createTextNode('admin') username.appendChild(username_text) storage.appendChild(username) password = doc.createElement('UserPassword') password_text = doc.createTextNode('Admin@storage') password.appendChild(password_text) storage.appendChild(password) product = doc.createElement('Product') product_text = doc.createTextNode('V3') product.appendChild(product_text) storage.appendChild(product) protocol = doc.createElement('Protocol') protocol_text = doc.createTextNode('iSCSI') protocol.appendChild(protocol_text) storage.appendChild(protocol) lun = doc.createElement('LUN') config.appendChild(lun) luntype = doc.createElement('LUNType') luntype_text = doc.createTextNode('Thick') luntype.appendChild(luntype_text) lun.appendChild(luntype) lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval') lun_ready_wait_interval_text = doc.createTextNode('2') lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text) lun.appendChild(lun_ready_wait_interval) lun_copy_wait_interval = doc.createElement('LUNcopyWaitInterval') lun_copy_wait_interval_text = doc.createTextNode('2') lun_copy_wait_interval.appendChild(lun_copy_wait_interval_text) lun.appendChild(lun_copy_wait_interval) timeout = doc.createElement('Timeout') timeout_text = doc.createTextNode('43200') timeout.appendChild(timeout_text) lun.appendChild(timeout) write_type = doc.createElement('WriteType') write_type_text = doc.createTextNode('1') write_type.appendChild(write_type_text) lun.appendChild(write_type) mirror_switch = doc.createElement('MirrorSwitch') mirror_switch_text = doc.createTextNode('1') mirror_switch.appendChild(mirror_switch_text) lun.appendChild(mirror_switch) prefetch = doc.createElement('Prefetch') prefetch.setAttribute('Type', '1') prefetch.setAttribute('Value', '0') lun.appendChild(prefetch) pool = doc.createElement('StoragePool') pool_text = doc.createTextNode('OpenStack_Pool') pool.appendChild(pool_text) lun.appendChild(pool) iscsi = doc.createElement('iSCSI') config.appendChild(iscsi) defaulttargetip = doc.createElement('DefaultTargetIP') defaulttargetip_text = doc.createTextNode('192.0.2.68') defaulttargetip.appendChild(defaulttargetip_text) iscsi.appendChild(defaulttargetip) initiator = doc.createElement('Initiator') initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') initiator.setAttribute('TargetIP', '192.0.2.2') initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage') initiator.setAttribute('ALUA', '1') initiator.setAttribute('TargetPortGroup', 'PortGroup001') iscsi.appendChild(initiator) fakefile = open(self.conf.cinder_huawei_conf_file, 'w') fakefile.write(doc.toprettyxml(indent='')) fakefile.close() cinder-8.0.0/cinder/tests/unit/test_volume_configuration.py0000664000567000056710000000371012701406250025356 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the configuration wrapper in volume drivers.""" from oslo_config import cfg from cinder import test from cinder.volume import configuration volume_opts = [ cfg.StrOpt('str_opt', default='STR_OPT'), cfg.BoolOpt('bool_opt', default=False) ] more_volume_opts = [ cfg.IntOpt('int_opt', default=1), ] CONF = cfg.CONF CONF.register_opts(volume_opts) CONF.register_opts(more_volume_opts) class VolumeConfigurationTest(test.TestCase): def test_group_grafts_opts(self): c = configuration.Configuration(volume_opts, config_group='foo') self.assertEqual(c.str_opt, CONF.foo.str_opt) self.assertEqual(c.bool_opt, CONF.foo.bool_opt) def test_opts_no_group(self): c = configuration.Configuration(volume_opts) self.assertEqual(c.str_opt, CONF.str_opt) self.assertEqual(c.bool_opt, CONF.bool_opt) def test_grafting_multiple_opts(self): c = configuration.Configuration(volume_opts, config_group='foo') c.append_config_values(more_volume_opts) self.assertEqual(c.str_opt, CONF.foo.str_opt) self.assertEqual(c.bool_opt, CONF.foo.bool_opt) self.assertEqual(c.int_opt, CONF.foo.int_opt) def test_safe_get(self): c = configuration.Configuration(volume_opts, config_group='foo') self.assertIsNone(c.safe_get('none_opt')) cinder-8.0.0/cinder/tests/unit/test_test.py0000664000567000056710000000525712701406250022107 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the testing base code.""" import mock from oslo_config import cfg import oslo_messaging as messaging from cinder import rpc from cinder import test class IsolationTestCase(test.TestCase): """Ensure that things are cleaned up after failed tests. These tests don't really do much here, but if isolation fails a bunch of other tests should fail. """ def test_service_isolation(self): self.start_service('volume') def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(*args): assert False, "I should never get called." server = rpc.get_server(messaging.Target(topic='volume', server=cfg.CONF.host), endpoints=[NeverCalled()]) server.start() class MockAssertTestCase(test.TestCase): """Ensure that valid mock assert methods are used.""" def test_assert_has_calls(self): mock_call = mock.MagicMock(return_value=None) mock_call(1) mock_call(2) mock_call.assert_has_calls([mock.call(1), mock.call(2)]) def test_assert_any_call(self): mock_call = mock.MagicMock(return_value=None) mock_call(1) mock_call(2) mock_call(3) mock_call.assert_any_call(1) def test_assert_called_with(self): mock_call = mock.MagicMock(return_value=None) mock_call(1, 'foo', a='123') mock_call.assert_called_with(1, 'foo', a='123') def test_assert_called_once_with(self): mock_call = mock.MagicMock(return_value=None) mock_call(1, 'foobar', a='123') mock_call.assert_called_once_with(1, 'foobar', a='123') def test_invalid_assert_calls(self): mock_call = mock.MagicMock() self.assertRaises(AttributeError, lambda: mock_call.assert_called) self.assertRaises(AttributeError, lambda: mock_call.assert_once_called_with) cinder-8.0.0/cinder/tests/unit/brick/0000775000567000056710000000000012701406543020605 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/brick/fake_lvm.py0000664000567000056710000000334412701406257022751 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeBrickLVM(object): """Logs and records calls, for unit tests.""" def __init__(self, vg_name, create, pv_list, vtype, execute=None): super(FakeBrickLVM, self).__init__() self.vg_size = '5.00' self.vg_free_space = '5.00' self.vg_name = vg_name def supports_thin_provisioning(): return False def get_volumes(self): return ['fake-volume'] def get_volume(self, name): return ['name'] def get_all_physical_volumes(vg_name=None): return [] def get_physical_volumes(self): return [] def update_volume_group_info(self): pass def create_thin_pool(self, name=None, size_str=0): pass def create_volume(self, name, size_str, lv_type='default', mirror_count=0): pass def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): pass def delete(self, name): pass def revert(self, snapshot_name): pass def lv_has_snapshot(self, name): return False def activate_lv(self, lv, is_snapshot=False, permanent=False): pass def rename_volume(self, lv_name, new_name): pass cinder-8.0.0/cinder/tests/unit/brick/test_brick_lvm.py0000664000567000056710000003542612701406257024202 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mox3 import mox from oslo_concurrency import processutils from cinder.brick.local_dev import lvm as brick from cinder import exception from cinder import test from cinder.volume import configuration as conf def create_configuration(): configuration = mox.MockObject(conf.Configuration) configuration.append_config_values(mox.IgnoreArg()) return configuration class BrickLvmTestCase(test.TestCase): def setUp(self): self.configuration = mox.MockObject(conf.Configuration) self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() # Stub processutils.execute for static methods self.stubs.Set(processutils, 'execute', self.fake_execute) self.vg = brick.LVM(self.configuration.volume_group_name, 'sudo', False, None, 'default', self.fake_execute) def failed_fake_execute(obj, *cmd, **kwargs): return ("\n", "fake-error") def fake_pretend_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.03.00 (2012-03-06)\n", "") def fake_old_lvm_version(obj, *cmd, **kwargs): # Does not support thin prov or snap activation return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") def fake_customised_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") def fake_execute(obj, *cmd, **kwargs): cmd_string = ', '.join(cmd) data = "\n" if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' == cmd_string): data = " fake-vg\n" data += " some-other-vg\n" elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' == cmd_string): data = " fake-vg\n" elif 'env, LC_ALL=C, vgs, --version' in cmd_string: data = " LVM version: 2.02.95(2) (2012-03-06)\n" elif ('env, LC_ALL=C, vgs, --noheadings, -o, uuid, fake-vg' in cmd_string): data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \ '-o, name,size,free,lv_count,uuid, ' \ '--separator, :, --nosuffix' in cmd_string: data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: return (data, "") data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-no-unit' in cmd_string: return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: return (data, "") data += " fake-vg-2:10.00:10.00:0:"\ "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" data += " fake-vg-3:10.00:10.00:0:"\ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-nothere' in cmd_string): raise processutils.ProcessExecutionError( stderr="One or more specified logical volume(s) not found.") elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-newerror' in cmd_string): raise processutils.ProcessExecutionError( stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size' in cmd_string): if 'fake-unknown' in cmd_string: raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found." ) if 'test-prov-cap-vg-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-unit 9.50g\n" data += " fake-vg fake-volume-1 1.00g\n" data += " fake-vg fake-volume-2 2.00g\n" elif 'test-prov-cap-vg-no-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" data += " fake-vg fake-volume-1 1.00\n" data += " fake-vg fake-volume-2 2.00\n" elif 'test-found-lv-name' in cmd_string: data = " fake-vg test-found-lv-name 9.50\n" else: data = " fake-vg fake-1 1.00g\n" data += " fake-vg fake-2 1.00g\n" elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' else: data = ' owi-a-' elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" data += " fake-vg|/dev/sdc|10.00|8.99\n" data += " fake-vg-2|/dev/sdd|10.00|9.99\n" elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \ ', -o, size,data_percent, --separator, :' in cmd_string: if 'test-prov-cap-pool' in cmd_string: data = " 9.5:20\n" else: data = " 9:12\n" elif 'lvcreate, -T, -L, ' in cmd_string: pass elif 'lvcreate, -T, -V, ' in cmd_string: pass elif 'lvcreate, -n, ' in cmd_string: pass elif 'lvcreate, --name, ' in cmd_string: pass elif 'lvextend, -L, ' in cmd_string: pass else: raise AssertionError('unexpected command called: %s' % cmd_string) return (data, "") def test_create_lv_snapshot(self): self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1')) self.mox.StubOutWithMock(self.vg, 'get_volume') self.vg.get_volume('fake-non-existent').AndReturn(None) self.mox.ReplayAll() try: self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent') except exception.VolumeDeviceNotFound as e: self.assertEqual('fake-non-existent', e.kwargs['device']) else: self.fail("Exception not raised") def test_vg_exists(self): self.assertTrue(self.vg._vg_exists()) def test_get_vg_uuid(self): self.assertEqual('kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1', self.vg._get_vg_uuid()[0]) def test_get_all_volumes(self): out = self.vg.get_volumes() self.assertEqual('fake-1', out[0]['name']) self.assertEqual('1.00g', out[0]['size']) self.assertEqual('fake-vg', out[0]['vg']) def test_get_volume(self): self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name']) def test_get_volume_none(self): self.assertIsNone(self.vg.get_volume('fake-unknown')) def test_get_lv_info_notfound(self): # lv-nothere will raise lvm < 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-nothere') ) # lv-newerror will raise lvm > 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-newerror') ) def test_get_lv_info_found(self): lv_info = [{'size': '9.50', 'name': 'test-found-lv-name', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='test-found-lv-name') ) def test_get_lv_info_no_lv_name(self): lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg') ) def test_get_all_physical_volumes(self): # Filtered VG version pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg') self.assertEqual(3, len(pvs)) # Non-Filtered, all VG's pvs = self.vg.get_all_physical_volumes('sudo') self.assertEqual(4, len(pvs)) def test_get_physical_volumes(self): pvs = self.vg.get_physical_volumes() self.assertEqual(3, len(pvs)) def test_get_volume_groups(self): self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo'))) self.assertEqual(1, len(self.vg.get_all_volume_groups('sudo', 'fake-vg'))) def test_thin_support(self): # lvm.supports_thin() is a static method and doesn't # use the self._executor fake we pass in on init # so we need to stub processutils.execute appropriately self.stubs.Set(processutils, 'execute', self.fake_execute) self.assertTrue(self.vg.supports_thin_provisioning('sudo')) self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version) self.assertTrue(self.vg.supports_thin_provisioning('sudo')) self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) self.assertFalse(self.vg.supports_thin_provisioning('sudo')) self.stubs.Set(processutils, 'execute', self.fake_customised_lvm_version) self.assertTrue(self.vg.supports_thin_provisioning('sudo')) def test_snapshot_lv_activate_support(self): self.vg._supports_snapshot_lv_activation = None self.stubs.Set(processutils, 'execute', self.fake_execute) self.assertTrue(self.vg.supports_snapshot_lv_activation) self.vg._supports_snapshot_lv_activation = None self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) self.assertFalse(self.vg.supports_snapshot_lv_activation) self.vg._supports_snapshot_lv_activation = None def test_lvchange_ignskipact_support_yes(self): """Tests if lvchange -K is available via a lvm2 version check.""" self.vg._supports_lvchange_ignoreskipactivation = None self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version) self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation) self.vg._supports_lvchange_ignoreskipactivation = None self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation) self.vg._supports_lvchange_ignoreskipactivation = None def test_thin_pool_creation(self): # The size of fake-vg volume group is 10g, so the calculated thin # pool size should be 9.5g (95% of 10g). self.assertEqual("9.5g", self.vg.create_thin_pool()) # Passing a size parameter should result in a thin pool of that exact # size. for size in ("1g", "1.2g", "1.75g"): self.assertEqual(size, self.vg.create_thin_pool(size_str=size)) def test_thin_pool_provisioned_capacity(self): self.vg.vg_thin_pool = "test-prov-cap-pool-unit" self.vg.vg_name = 'test-prov-cap-vg-unit' self.assertEqual( "9.5g", self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual("9.50", self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit" self.vg.vg_name = 'test-prov-cap-vg-no-unit' self.assertEqual( "9.5g", self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual("9.50", self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) def test_thin_pool_free_space(self): # The size of fake-vg-pool is 9g and the allocated data sums up to # 12% so the calculated free space should be 7.92 self.assertEqual(float("7.92"), self.vg._get_thin_pool_free_space("fake-vg", "fake-vg-pool")) def test_volume_create_after_thin_creation(self): """Test self.vg.vg_thin_pool is set to pool_name See bug #1220286 for more info. """ vg_name = "vg-name" pool_name = vg_name + "-pool" pool_path = "%s/%s" % (vg_name, pool_name) def executor(obj, *cmd, **kwargs): self.assertEqual(pool_path, cmd[-1]) self.vg._executor = executor self.vg.create_thin_pool(pool_name, "1G") self.vg.create_volume("test", "1G", lv_type='thin') self.assertEqual(pool_name, self.vg.vg_thin_pool) def test_lv_has_snapshot(self): self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) def test_activate_lv(self): self.mox.StubOutWithMock(self.vg, '_execute') self.vg._supports_lvchange_ignoreskipactivation = True self.vg._execute('lvchange', '-a', 'y', '--yes', '-K', 'fake-vg/my-lv', root_helper='sudo', run_as_root=True) self.mox.ReplayAll() self.vg.activate_lv('my-lv') self.mox.VerifyAll() def test_get_mirrored_available_capacity(self): self.assertEqual(2.0, self.vg.vg_mirror_free_space(1)) def test_lv_extend(self): self.vg.deactivate_lv = mock.MagicMock() # Extend lv with snapshot and make sure deactivate called self.vg.create_volume("test", "1G") self.vg.extend_volume("test", "2G") self.vg.deactivate_lv.assert_called_once_with('test') self.vg.deactivate_lv.reset_mock() # Extend lv without snapshot so deactivate should not be called self.vg.create_volume("test", "1G") self.vg.vg_name = "test-volumes" self.vg.extend_volume("test", "2G") self.assertFalse(self.vg.deactivate_lv.called) cinder-8.0.0/cinder/tests/unit/brick/__init__.py0000664000567000056710000000000012701406250022677 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/test_sheepdog.py0000664000567000056710000020136212701406257022730 0ustar jenkinsjenkins00000000000000 # Copyright (c) 2013 Zelin.io # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import mock from oslo_concurrency import processutils from oslo_utils import importutils from oslo_utils import units from cinder.backup import driver as backup_driver from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import test from cinder.tests.unit import fake_backup from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers import sheepdog SHEEP_ADDR = '127.0.0.1' SHEEP_PORT = 7000 class SheepdogDriverTestDataGenerator(object): def __init__(self): self.TEST_VOLUME = self._make_fake_volume(self.TEST_VOL_DATA) self.TEST_CLONED_VOLUME = self._make_fake_volume( self.TEST_CLONED_VOL_DATA) self.TEST_SNAPSHOT = self._make_fake_snapshot( self.TEST_SNAPSHOT_DATA, self.TEST_VOLUME) self.TEST_BACKUP_VOLUME = self._make_fake_backup_volume( self.TEST_BACKUP_VOL_DATA) def sheepdog_cmd_error(self, cmd, exit_code, stdout, stderr): return (('(Command: %(cmd)s) ' '(Return Code: %(exit_code)s) ' '(Stdout: %(stdout)s) ' '(Stderr: %(stderr)s)') % {'cmd': cmd, 'exit_code': exit_code, 'stdout': stdout.replace('\n', '\\n'), 'stderr': stderr.replace('\n', '\\n')}) def _make_fake_volume(self, volume_data): return fake_volume.fake_volume_obj(context.get_admin_context(), **volume_data) def _make_fake_snapshot(self, snapshot_data, src_volume): snapshot_obj = fake_snapshot.fake_snapshot_obj( context.get_admin_context(), **snapshot_data) snapshot_obj.volume = src_volume return snapshot_obj def _make_fake_backup_volume(self, backup_data): return fake_backup.fake_backup_obj(context.get_admin_context(), **backup_data) def cmd_dog_vdi_create(self, name, size): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'create', name, '%sG' % size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) def cmd_dog_vdi_delete(self, name): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', name, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) def cmd_dog_vdi_create_snapshot(self, vdiname, snapname): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'snapshot', '-s', snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname) def cmd_dog_vdi_delete_snapshot(self, vdiname, snapname): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', '-s', snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname) def cmd_qemuimg_vdi_clone(self, src_vdiname, src_snapname, dst_vdiname, size): return ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-b', 'sheepdog:%(addr)s:%(port)s:%(src_vdiname)s:%(src_snapname)s' % {'addr': SHEEP_ADDR, 'port': SHEEP_PORT, 'src_vdiname': src_vdiname, 'src_snapname': src_snapname}, 'sheepdog:%(addr)s:%(port)s:%(dst_vdiname)s' % {'addr': SHEEP_ADDR, 'port': SHEEP_PORT, 'dst_vdiname': dst_vdiname}, '%sG' % size) def cmd_dog_vdi_resize(self, name, size): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'resize', name, size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) def cmd_dog_node_info(self): return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'info', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r') CMD_DOG_CLUSTER_INFO = ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'cluster', 'info', '-a', SHEEP_ADDR, '-p', SHEEP_PORT) TEST_VOL_DATA = { 'size': 1, 'id': '00000000-0000-0000-0000-000000000001', 'provider_auth': None, 'host': 'host@backendsec#unit_test_pool', 'project_id': 'project', 'provider_location': 'location', 'display_name': 'vol1', 'display_description': 'unit test volume', 'volume_type_id': None, 'consistencygroup_id': None, } TEST_CLONED_VOL_DATA = { 'size': 2, 'id': '00000000-0000-0000-0000-000000000003', 'provider_auth': None, 'host': 'host@backendsec#unit_test_pool', 'project_id': 'project', 'provider_location': 'location', 'display_name': 'vol3', 'display_description': 'unit test cloned volume', 'volume_type_id': None, 'consistencygroup_id': None, } TEST_SNAPSHOT_DATA = { 'id': '00000000-0000-0000-0000-000000000002', } TEST_BACKUP_VOL_DATA = { 'volume_id': '00000000-0000-0000-0000-000000000001', } COLLIE_NODE_INFO = """ 0 107287605248 3623897354 3% Total 107287605248 3623897354 3% 54760833024 """ COLLIE_CLUSTER_INFO_0_5 = """\ Cluster status: running Cluster created at Tue Jun 25 19:51:41 2013 Epoch Time Version 2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] """ COLLIE_CLUSTER_INFO_0_6 = """\ Cluster status: running, auto-recovery enabled Cluster created at Tue Jun 25 19:51:41 2013 Epoch Time Version 2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] """ DOG_CLUSTER_RUNNING = """\ Cluster status: running, auto-recovery enabled Cluster created at Thu Jun 18 17:24:56 2015 Epoch Time Version [Host:Port:V-Nodes,,,] 2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128,\ 127.0.0.1:7002:128] """ DOG_CLUSTER_INFO_TO_BE_FORMATTED = """\ Cluster status: Waiting for cluster to be formatted """ DOG_CLUSTER_INFO_WAITING_OTHER_NODES = """\ Cluster status: Waiting for other nodes to join cluster Cluster created at Thu Jun 18 17:24:56 2015 Epoch Time Version [Host:Port:V-Nodes,,,] 2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128] """ DOG_CLUSTER_INFO_SHUTTING_DOWN = """\ Cluster status: System is shutting down """ DOG_VDI_CREATE_VDI_ALREADY_EXISTS = """\ Failed to create VDI %(vdiname)s: VDI exists already """ DOG_VDI_SNAPSHOT_VDI_NOT_FOUND = """\ Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001: \ No VDI found """ DOG_VDI_SNAPSHOT_ALREADY_EXISTED = """\ Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001, \ maybe snapshot id (0) or tag (snapshot-00000000-0000-0000-0000-000000000002) \ is existed """ DOG_VDI_SNAPSHOT_TAG_NOT_FOUND = """\ Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \ (snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \ Failed to find requested tag """ DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND = """\ Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \ (snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \ No VDI found """ DOG_VDI_RESIZE_SIZE_SHRINK = """\ Shrinking VDIs is not implemented """ DOG_VDI_RESIZE_TOO_LARGE = """\ New VDI size is too large. This volume's max size is 4398046511104 """ DOG_COMMAND_ERROR_VDI_NOT_EXISTS = """\ Failed to open VDI %(vdiname)s (snapshot id: 0 snapshot tag: ): No VDI found """ DOG_COMMAND_ERROR_FAIL_TO_CONNECT = """\ failed to connect to 127.0.0.1:7000: Connection refused failed to connect to 127.0.0.1:7000: Connection refused Failed to get node list """ QEMU_IMG_VDI_ALREADY_EXISTS = """\ qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \ VDI exists already, """ QEMU_IMG_VDI_NOT_FOUND = """\ qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \ cannot get vdi info, No vdi found, \ volume-00000000-0000-0000-0000-000000000001 \ snapshot-00000000-0000-0000-0000-000000000002 """ QEMU_IMG_SNAPSHOT_NOT_FOUND = """\ qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \ cannot get vdi info, Failed to find the requested tag, \ volume-00000000-0000-0000-0000-000000000001 \ snapshot-00000000-0000-0000-0000-000000000002 """ QEMU_IMG_SIZE_TOO_LARGE = """\ qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \ An image is too large. The maximum image size is 4096GB """ QEMU_IMG_FAILED_TO_CONNECT = """\ qemu-img: sheepdog::volume-00000000-0000-0000-0000-000000000001: \ Failed to connect socket: Connection refused """ class FakeImageService(object): def download(self, context, image_id, path): pass class SheepdogIOWrapperTestCase(test.TestCase): def setUp(self): super(SheepdogIOWrapperTestCase, self).setUp() self.volume = {'name': 'volume-2f9b2ff5-987b-4412-a91c-23caaf0d5aff'} self.snapshot_name = 'snapshot-bf452d80-068a-43d7-ba9f-196cf47bd0be' self.vdi_wrapper = sheepdog.SheepdogIOWrapper( SHEEP_ADDR, SHEEP_PORT, self.volume) self.snapshot_wrapper = sheepdog.SheepdogIOWrapper( SHEEP_ADDR, SHEEP_PORT, self.volume, self.snapshot_name) self.execute = mock.MagicMock() self.mock_object(processutils, 'execute', self.execute) def test_init(self): self.assertEqual(self.volume['name'], self.vdi_wrapper._vdiname) self.assertIsNone(self.vdi_wrapper._snapshot_name) self.assertEqual(0, self.vdi_wrapper._offset) self.assertEqual(self.snapshot_name, self.snapshot_wrapper._snapshot_name) def test_execute(self): cmd = ('cmd1', 'arg1') data = 'data1' self.vdi_wrapper._execute(cmd, data) self.execute.assert_called_once_with(*cmd, process_input=data) def test_execute_error(self): cmd = ('cmd1', 'arg1') data = 'data1' self.mock_object(processutils, 'execute', mock.MagicMock(side_effect=OSError)) args = (cmd, data) self.assertRaises(exception.VolumeDriverException, self.vdi_wrapper._execute, *args) def test_read_vdi(self): self.vdi_wrapper.read() self.execute.assert_called_once_with( 'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, self.volume['name'], 0, process_input=None) def test_read_vdi_invalid(self): self.vdi_wrapper._valid = False self.assertRaises(exception.VolumeDriverException, self.vdi_wrapper.read) def test_write_vdi(self): data = 'data1' self.vdi_wrapper.write(data) self.execute.assert_called_once_with( 'dog', 'vdi', 'write', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, self.volume['name'], 0, len(data), process_input=data) self.assertEqual(len(data), self.vdi_wrapper.tell()) def test_write_vdi_invalid(self): self.vdi_wrapper._valid = False self.assertRaises(exception.VolumeDriverException, self.vdi_wrapper.write, 'dummy_data') def test_read_snapshot(self): self.snapshot_wrapper.read() self.execute.assert_called_once_with( 'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-s', self.snapshot_name, self.volume['name'], 0, process_input=None) def test_seek(self): self.vdi_wrapper.seek(12345) self.assertEqual(12345, self.vdi_wrapper.tell()) self.vdi_wrapper.seek(-2345, whence=1) self.assertEqual(10000, self.vdi_wrapper.tell()) # This results in negative offset. self.assertRaises(IOError, self.vdi_wrapper.seek, -20000, whence=1) def test_seek_invalid(self): seek_num = 12345 self.vdi_wrapper._valid = False self.assertRaises(exception.VolumeDriverException, self.vdi_wrapper.seek, seek_num) def test_flush(self): # flush does nothing. self.vdi_wrapper.flush() self.assertFalse(self.execute.called) def test_fileno(self): self.assertRaises(IOError, self.vdi_wrapper.fileno) class SheepdogClientTestCase(test.TestCase): def setUp(self): super(SheepdogClientTestCase, self).setUp() self._cfg = conf.Configuration(None) self._cfg.sheepdog_store_address = SHEEP_ADDR self._cfg.sheepdog_store_port = SHEEP_PORT self.driver = sheepdog.SheepdogDriver(configuration=self._cfg) db_driver = self.driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.driver.db = self.db self.driver.do_setup(None) self.test_data = SheepdogDriverTestDataGenerator() self.client = self.driver.client self._addr = SHEEP_ADDR self._port = SHEEP_PORT self._vdiname = self.test_data.TEST_VOLUME.name self._vdisize = self.test_data.TEST_VOLUME.size self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name self._snapname = self.test_data.TEST_SNAPSHOT.name self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size @mock.patch.object(utils, 'execute') def test_run_dog_success(self, fake_execute): args = ('cluster', 'info') expected_cmd = self.test_data.CMD_DOG_CLUSTER_INFO fake_execute.return_value = ('', '') self.client._run_dog(*args) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_dog_command_not_found(self, fake_logger, fake_execute): args = ('cluster', 'info') expected_msg = 'No such file or directory' expected_errno = errno.ENOENT fake_execute.side_effect = OSError(expected_errno, expected_msg) self.assertRaises(OSError, self.client._run_dog, *args) self.assertTrue(fake_logger.error.called) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_dog_operation_not_permitted(self, fake_logger, fake_execute): args = ('cluster', 'info') expected_msg = 'Operation not permitted' expected_errno = errno.EPERM fake_execute.side_effect = OSError(expected_errno, expected_msg) self.assertRaises(OSError, self.client._run_dog, *args) self.assertTrue(fake_logger.error.called) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_dog_fail_to_connect(self, fake_logger, fake_execute): args = ('cluster', 'info') cmd = self.test_data.CMD_DOG_CLUSTER_INFO exit_code = 2 stdout = 'stdout dummy' stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT expected_reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) fake_execute.side_effect = processutils.ProcessExecutionError( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) ex = self.assertRaises(exception.SheepdogError, self.client._run_dog, *args) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_dog_unknown_error(self, fake_logger, fake_execute): args = ('cluster', 'info') cmd = self.test_data.CMD_DOG_CLUSTER_INFO cmd = self.test_data.CMD_DOG_CLUSTER_INFO exit_code = 1 stdout = 'stdout dummy' stderr = 'stderr dummy' expected_msg = self.test_data.sheepdog_cmd_error( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = processutils.ProcessExecutionError( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) ex = self.assertRaises(exception.SheepdogCmdError, self.client._run_dog, *args) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(utils, 'execute') def test_run_qemu_img_success(self, fake_execute): # multiple part of args match the prefix and # volume name is matched the prefix unfortunately expected_cmd = ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-b', 'sheepdog:%(addr)s:%(port)s:sheepdog:snap' % {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}, 'sheepdog:%(addr)s:%(port)s:clone' % {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}, '10G') fake_execute.return_value = ('', '') self.client._run_qemu_img('create', '-b', 'sheepdog:sheepdog:snap', 'sheepdog:clone', '10G') fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_qemu_img_command_not_found(self, fake_logger, fake_execute): args = ('create', 'dummy') expected_msg = 'No such file or directory' expected_errno = errno.ENOENT fake_execute.side_effect = OSError(expected_errno, expected_msg) self.assertRaises(OSError, self.client._run_qemu_img, *args) self.assertTrue(fake_logger.error.called) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_qemu_img_unknown_os_error(self, fake_logger, fake_execute): args = ('create', 'dummy') expected_msg = 'unknown' expected_errno = errno.EPERM fake_execute.side_effect = OSError(expected_errno, expected_msg) self.assertRaises(OSError, self.client._run_qemu_img, *args) self.assertTrue(fake_logger.error.called) @mock.patch.object(utils, 'execute') @mock.patch.object(sheepdog, 'LOG') def test_run_qemu_img_execution_error(self, fake_logger, fake_execute): args = ('create', 'dummy') cmd = ('qemu-img', 'create', 'dummy') exit_code = 1 stdout = 'stdout dummy' stderr = 'stderr dummy' expected_msg = self.test_data.sheepdog_cmd_error( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = processutils.ProcessExecutionError( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) ex = self.assertRaises(exception.SheepdogCmdError, self.client._run_qemu_img, *args) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_check_cluster_status_success(self, fake_logger, fake_execute): stdout = self.test_data.DOG_CLUSTER_RUNNING stderr = '' expected_cmd = ('cluster', 'info') fake_execute.return_value = (stdout, stderr) self.client.check_cluster_status() fake_execute.assert_called_once_with(*expected_cmd) self.assertTrue(fake_logger.debug.called) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_check_cluster_status_v0_5(self, fake_execute): stdout = self.test_data.COLLIE_CLUSTER_INFO_0_5 stderr = '' fake_execute.return_value = (stdout, stderr) self.client.check_cluster_status() @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_check_cluster_status_v0_6(self, fake_execute): stdout = self.test_data.COLLIE_CLUSTER_INFO_0_6 stderr = '' fake_execute.return_value = (stdout, stderr) self.client.check_cluster_status() @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_check_cluster_status_not_formatted(self, fake_logger, fake_execute): stdout = self.test_data.DOG_CLUSTER_INFO_TO_BE_FORMATTED stderr = '' expected_reason = _('Cluster is not formatted. ' 'You should probably perform ' '"dog cluster format".') fake_execute.return_value = (stdout, stderr) ex = self.assertRaises(exception.SheepdogError, self.client.check_cluster_status) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_check_cluster_status_waiting_to_join_cluster(self, fake_logger, fake_execute): stdout = self.test_data.DOG_CLUSTER_INFO_WAITING_OTHER_NODES stderr = '' expected_reason = _('Waiting for all nodes to join cluster. ' 'Ensure all sheep daemons are running.') fake_execute.return_value = (stdout, stderr) ex = self.assertRaises(exception.SheepdogError, self.client.check_cluster_status) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_check_cluster_status_shutting_down(self, fake_logger, fake_execute): stdout = self.test_data.DOG_CLUSTER_INFO_SHUTTING_DOWN stderr = '' expected_reason = _('Invalid sheepdog cluster status.') fake_execute.return_value = (stdout, stderr) ex = self.assertRaises(exception.SheepdogError, self.client.check_cluster_status) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_check_cluster_status_unknown_error(self, fake_logger, fake_execute): cmd = self.test_data.CMD_DOG_CLUSTER_INFO exit_code = 2 stdout = 'stdout_dummy' stderr = 'stdout_dummy' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) ex = self.assertRaises(exception.SheepdogCmdError, self.client.check_cluster_status) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_create_success(self, fake_execute): expected_cmd = ('vdi', 'create', self._vdiname, '%sG' % self._vdisize) fake_execute.return_value = ('', '') self.client.create(self._vdiname, self._vdisize) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_create_vdi_already_exists(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize) exit_code = 1 stdout = '' stderr = (self.test_data.DOG_VDI_CREATE_VDI_ALREADY_EXISTS % {'vdiname': self._vdiname}) expected_msg = self.test_data.sheepdog_cmd_error( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.create, self._vdiname, self._vdisize) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_create_unknown_error(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize) exit_code = 1 stdout = 'stdout_dummy' stderr = 'stderr_dummy' expected_msg = self.test_data.sheepdog_cmd_error( cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.create, self._vdiname, self._vdisize) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_delete_success(self, fake_execute): expected_cmd = ('vdi', 'delete', self._vdiname) fake_execute.return_value = ('', '') self.client.delete(self._vdiname) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_vdi_not_found(self, fake_logger, fake_execute): stdout = '' stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS % {'vdiname': self._vdiname}) fake_execute.return_value = (stdout, stderr) self.client.delete(self._vdiname) self.assertTrue(fake_logger.warning.called) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_delete_fail_to_connect_bugcase(self, fake_execute): # NOTE(tishizaki): Sheepdog's bug case. # details are written to Sheepdog driver code. stdout = '' stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT expected_reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) fake_execute.return_value = (stdout, stderr) ex = self.assertRaises(exception.SheepdogError, self.client.delete, self._vdiname) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_unknown_error(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_delete(self._vdiname) exit_code = 2 stdout = 'stdout_dummy' stderr = 'stderr_dummy' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.delete, self._vdiname) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_create_snapshot_success(self, fake_execute): args = (self._src_vdiname, self._snapname) expected_cmd = ('vdi', 'snapshot', '-s', self._snapname, self._src_vdiname) fake_execute.return_value = ('', '') self.client.create_snapshot(*args) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_create_snapshot_vdi_not_found(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) exit_code = 1 stdout = '' stderr = self.test_data.DOG_VDI_SNAPSHOT_VDI_NOT_FOUND expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.create_snapshot, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_create_snapshot_snap_name_already_used(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) exit_code = 1 stdout = 'stdout_dummy' stderr = self.test_data.DOG_VDI_SNAPSHOT_ALREADY_EXISTED expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.create_snapshot, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_create_snapshot_unknown_error(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) exit_code = 1 stdout = 'stdout_dummy' stderr = 'unknown_error' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.create_snapshot, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_snapshot_success(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) expected_cmd = ('vdi', 'delete', '-s', self._snapname, self._src_vdiname) fake_execute.return_value = ('', '') self.client.delete_snapshot(*args) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_snapshot_not_found(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) stdout = '' stderr = self.test_data.DOG_VDI_SNAPSHOT_TAG_NOT_FOUND fake_execute.return_value = (stdout, stderr) self.client.delete_snapshot(*args) self.assertTrue(fake_logger.warning.called) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_snapshot_vdi_not_found(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) stdout = '' stderr = self.test_data.DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND fake_execute.return_value = (stdout, stderr) self.client.delete_snapshot(*args) self.assertTrue(fake_logger.warning.called) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_snapshot_fail_to_connect_bugcase(self, fake_logger, fake_execute): # NOTE(tishizaki): Sheepdog's bug case. # details are written to Sheepdog driver code. args = (self._src_vdiname, self._snapname) stdout = '' stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT expected_reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) fake_execute.return_value = (stdout, stderr) ex = self.assertRaises(exception.SheepdogError, self.client.delete_snapshot, *args) self.assertEqual(expected_reason, ex.kwargs['reason']) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_delete_snapshot_unknown_error(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname) cmd = self.test_data.cmd_dog_vdi_delete_snapshot(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = 'unknown_error' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.delete_snapshot, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') def test_clone_success(self, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) src_volume = 'sheepdog:%(src_vdiname)s:%(snapname)s' % { 'src_vdiname': self._src_vdiname, 'snapname': self._snapname} dst_volume = 'sheepdog:%s' % self._dst_vdiname expected_cmd = ('create', '-b', src_volume, dst_volume, '%sG' % self._dst_vdisize) fake_execute.return_code = ("", "") self.client.clone(*args) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_fail_to_connect(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = self.test_data.QEMU_IMG_FAILED_TO_CONNECT expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_dst_vdi_already_exists(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = self.test_data.QEMU_IMG_VDI_ALREADY_EXISTS expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_src_vdi_not_found(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = self.test_data.QEMU_IMG_VDI_NOT_FOUND expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_src_snapshot_not_found(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = self.test_data.QEMU_IMG_SNAPSHOT_NOT_FOUND expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_too_large_volume_size(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = self.test_data.QEMU_IMG_SIZE_TOO_LARGE expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') @mock.patch.object(sheepdog, 'LOG') def test_clone_unknown_error(self, fake_logger, fake_execute): args = (self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) exit_code = 2 stdout = 'stdout_dummy' stderr = 'stderr_dummy' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, *args) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_resize_success(self, fake_execute): expected_cmd = ('vdi', 'resize', self._vdiname, 10 * 1024 ** 3) fake_execute.return_value = ('', '') self.client.resize(self._vdiname, 10) fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_resize_vdi_not_found(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3) exit_code = 1 stdout = 'stdout_dummy' stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS % {'vdiname': self._vdiname}) expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.resize, self._vdiname, 1) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_resize_shrinking_not_supported(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 1 * 1024 ** 3) exit_code = 1 stdout = 'stdout_dummy' stderr = self.test_data.DOG_VDI_RESIZE_SIZE_SHRINK expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.resize, self._vdiname, 1) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_resize_too_large_size(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 5 * 1024 ** 4) exit_code = 64 stdout = 'stdout_dummy' stderr = self.test_data.DOG_VDI_RESIZE_TOO_LARGE expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.resize, self._vdiname, 5120) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_resize_unknown_error(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3) exit_code = 2 stdout = 'stdout_dummy' stderr = 'stderr_dummy' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.resize, self._vdiname, 10) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_get_volume_stats_success(self, fake_execute): expected_cmd = ('node', 'info', '-r') fake_execute.return_value = (self.test_data.COLLIE_NODE_INFO, '') self.client.get_volume_stats() fake_execute.assert_called_once_with(*expected_cmd) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') @mock.patch.object(sheepdog, 'LOG') def test_get_volume_stats_unknown_error(self, fake_logger, fake_execute): cmd = self.test_data.cmd_dog_node_info() exit_code = 2 stdout = 'stdout_dummy' stderr = 'stderr_dummy' expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) fake_execute.side_effect = exception.SheepdogCmdError( cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), stderr=stderr.replace('\n', '\\n')) ex = self.assertRaises(exception.SheepdogCmdError, self.client.get_volume_stats) self.assertTrue(fake_logger.error.called) self.assertEqual(expected_msg, ex.msg) class SheepdogDriverTestCase(test.TestCase): def setUp(self): super(SheepdogDriverTestCase, self).setUp() self._cfg = conf.Configuration(None) self._cfg.sheepdog_store_address = SHEEP_ADDR self._cfg.sheepdog_store_port = SHEEP_PORT self.driver = sheepdog.SheepdogDriver(configuration=self._cfg) db_driver = self.driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.driver.db = self.db self.driver.do_setup(None) self.test_data = SheepdogDriverTestDataGenerator() self.client = self.driver.client self._addr = SHEEP_ADDR self._port = SHEEP_PORT self._vdiname = self.test_data.TEST_VOLUME.name self._vdisize = self.test_data.TEST_VOLUME.size self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name self._snapname = self.test_data.TEST_SNAPSHOT.name self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size @mock.patch.object(sheepdog.SheepdogClient, 'check_cluster_status') def test_check_for_setup_error(self, fake_execute): self.driver.check_for_setup_error() fake_execute.assert_called_once_with() @mock.patch.object(sheepdog.SheepdogClient, 'create') def test_create_volume(self, fake_execute): self.driver.create_volume(self.test_data.TEST_VOLUME) fake_execute.assert_called_once_with(self._vdiname, self._vdisize) @mock.patch.object(sheepdog.SheepdogClient, 'delete') def test_delete_volume(self, fake_execute): self.driver.delete_volume(self.test_data.TEST_VOLUME) fake_execute.assert_called_once_with(self._vdiname) @mock.patch.object(sheepdog.SheepdogClient, 'get_volume_stats') def test_update_volume_stats(self, fake_execute): fake_execute.return_value = self.test_data.COLLIE_NODE_INFO expected = dict( volume_backend_name='sheepdog', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='sheepdog', total_capacity_gb=float(107287605248) / units.Gi, free_capacity_gb=float(107287605248 - 3623897354) / units.Gi, reserved_percentage=0, QoS_support=False) actual = self.driver.get_volume_stats(True) self.assertDictMatch(expected, actual) @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') def test_copy_image_to_volume(self, fake_run_dog): @contextlib.contextmanager def fake_temp_file(): class FakeTmp(object): def __init__(self, name): self.name = name yield FakeTmp('test').name def fake_try_execute(obj, *command, **kwargs): return True self.stubs.Set(image_utils, 'temporary_file', fake_temp_file) self.stubs.Set(image_utils, 'fetch_verify_image', lambda w, x, y, z: None) self.stubs.Set(image_utils, 'convert_image', lambda x, y, z: None) self.stubs.Set(sheepdog.SheepdogDriver, '_try_execute', fake_try_execute) fake_run_dog.return_value = ('fake_stdout', 'fake_stderr') self.driver.copy_image_to_volume(None, self.test_data.TEST_VOLUME, FakeImageService(), None) def test_copy_volume_to_image(self): fake_context = {} fake_volume = {'name': 'volume-00000001'} fake_image_service = mock.Mock() fake_image_service_update = mock.Mock() fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} patch = mock.patch.object with patch(self.driver, '_try_execute') as fake_try_execute: with patch(fake_image_service, 'update') as fake_image_service_update: self.driver.copy_volume_to_image(fake_context, fake_volume, fake_image_service, fake_image_meta) expected_cmd = ('qemu-img', 'convert', '-f', 'raw', '-t', 'none', '-O', 'raw', 'sheepdog:%s:%s:%s' % ( self._addr, self._port, fake_volume['name']), mock.ANY) fake_try_execute.assert_called_once_with(*expected_cmd) fake_image_service_update.assert_called_once_with( fake_context, fake_image_meta['id'], mock.ANY, mock.ANY) def test_copy_volume_to_image_nonexistent_volume(self): fake_context = {} fake_volume = { 'name': 'nonexistent-volume-82c4539e-c2a5-11e4-a293-0aa186c60fe0'} fake_image_service = mock.Mock() fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} # The command is expected to fail, so we don't want to retry it. self.driver._try_execute = self.driver._execute args = (fake_context, fake_volume, fake_image_service, fake_image_meta) expected_errors = (processutils.ProcessExecutionError, OSError) self.assertRaises(expected_errors, self.driver.copy_volume_to_image, *args) @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') @mock.patch.object(sheepdog.SheepdogClient, 'clone') @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') def test_create_cloned_volume(self, fake_delete_snapshot, fake_clone, fake_create_snapshot): src_vol = self.test_data.TEST_VOLUME cloned_vol = self.test_data.TEST_CLONED_VOLUME self.driver.create_cloned_volume(cloned_vol, src_vol) snapshot_name = src_vol.name + '-temp-snapshot' fake_create_snapshot.assert_called_once_with(src_vol.name, snapshot_name) fake_clone.assert_called_once_with(src_vol.name, snapshot_name, cloned_vol.name, cloned_vol.size) fake_delete_snapshot.assert_called_once_with(src_vol.name, snapshot_name) @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') @mock.patch.object(sheepdog.SheepdogClient, 'clone') @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') @mock.patch.object(sheepdog, 'LOG') def test_create_cloned_volume_failure(self, fake_logger, fake_delete_snapshot, fake_clone, fake_create_snapshot): src_vol = self.test_data.TEST_VOLUME cloned_vol = self.test_data.TEST_CLONED_VOLUME snapshot_name = src_vol.name + '-temp-snapshot' fake_clone.side_effect = exception.SheepdogCmdError( cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy') self.assertRaises(exception.SheepdogCmdError, self.driver.create_cloned_volume, cloned_vol, src_vol) fake_delete_snapshot.assert_called_once_with(src_vol.name, snapshot_name) self.assertTrue(fake_logger.error.called) @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') def test_create_snapshot(self, fake_create_snapshot): snapshot = self.test_data.TEST_SNAPSHOT self.driver.create_snapshot(snapshot) fake_create_snapshot.assert_called_once_with(snapshot.volume_name, snapshot.name) @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') def test_delete_snapshot(self, fake_delete_snapshot): snapshot = self.test_data.TEST_SNAPSHOT self.driver.delete_snapshot(snapshot) fake_delete_snapshot.assert_called_once_with(snapshot.volume_name, snapshot.name) def test_clone_image_success(self): context = {} image_id = "caa4ffd0-fake-fake-fake-f8631a807f5a" image_location = ('sheepdog://192.168.1.111:7000:%s' % image_id, None) image_meta = {'id': image_id, 'size': 1, 'disk_format': 'raw'} image_service = '' patch = mock.patch.object with patch(self.driver, '_try_execute', return_value=True): with patch(self.driver, 'create_cloned_volume'): with patch(self.client, 'resize'): model_updated, cloned = self.driver.clone_image( context, self.test_data.TEST_CLONED_VOLUME, image_location, image_meta, image_service) self.assertTrue(cloned) self.assertEqual("sheepdog:%s:%s:%s" % ( self._addr, self._port, self.test_data.TEST_CLONED_VOLUME.name), model_updated['provider_location']) def test_clone_image_failure(self): context = {} fake_vol = {} image_location = ('image_location', None) image_meta = {} image_service = '' with mock.patch.object(self.driver, '_is_cloneable', lambda *args: False): result = self.driver.clone_image( context, fake_vol, image_location, image_meta, image_service) self.assertEqual(({}, False), result) def test_is_cloneable(self): uuid = '87f1b01c-f46c-4537-bd5d-23962f5f4316' location = 'sheepdog://ip:port:%s' % uuid image_meta = {'id': uuid, 'size': 1, 'disk_format': 'raw'} invalid_image_meta = {'id': uuid, 'size': 1, 'disk_format': 'iso'} with mock.patch.object(self.driver, '_try_execute') as try_execute: self.assertTrue( self.driver._is_cloneable(location, image_meta)) expected_cmd = ('collie', 'vdi', 'list', '--address', 'ip', '--port', 'port', uuid) try_execute.assert_called_once_with(*expected_cmd) # check returning False without executing a command self.assertFalse( self.driver._is_cloneable('invalid-location', image_meta)) self.assertFalse( self.driver._is_cloneable(location, invalid_image_meta)) self.assertEqual(1, try_execute.call_count) error = processutils.ProcessExecutionError with mock.patch.object(self.driver, '_try_execute', side_effect=error) as fail_try_execute: self.assertFalse( self.driver._is_cloneable(location, image_meta)) fail_try_execute.assert_called_once_with(*expected_cmd) def test_create_volume_from_snapshot(self): dst_volume = self.test_data.TEST_CLONED_VOLUME snapshot = self.test_data.TEST_SNAPSHOT with mock.patch.object(self.client, 'clone') as fake_execute: self.driver.create_volume_from_snapshot(dst_volume, snapshot) fake_execute.assert_called_once_with(self._src_vdiname, self._snapname, self._dst_vdiname, self._dst_vdisize) def test_initialize_connection(self): fake_volume = self.test_data.TEST_VOLUME expected = { 'driver_volume_type': 'sheepdog', 'data': { 'name': fake_volume.name, 'hosts': ["127.0.0.1"], 'ports': ["7000"], } } actual = self.driver.initialize_connection(fake_volume, None) self.assertDictMatch(expected, actual) @mock.patch.object(sheepdog.SheepdogClient, 'resize') @mock.patch.object(sheepdog, 'LOG') def test_extend_volume(self, fake_logger, fake_execute): self.driver.extend_volume(self.test_data.TEST_VOLUME, 10) fake_execute.assert_called_once_with(self._vdiname, 10) self.assertTrue(fake_logger.debug.called) @mock.patch.object(db, 'volume_get') @mock.patch.object(sheepdog.SheepdogDriver, '_try_execute') @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') @mock.patch.object(backup_driver, 'BackupDriver') @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') def test_backup_volume_success(self, fake_delete_snapshot, fake_backup_service, fake_create_snapshot, fake_execute, fake_volume_get): fake_context = {} fake_volume = self.test_data.TEST_VOLUME fake_backup = self.test_data.TEST_BACKUP_VOLUME fake_backup_service = mock.Mock() fake_volume_get.return_value = fake_volume self.driver.backup_volume(fake_context, fake_backup, fake_backup_service) self.assertEqual(1, fake_create_snapshot.call_count) self.assertEqual(2, fake_delete_snapshot.call_count) self.assertEqual(fake_create_snapshot.call_args, fake_delete_snapshot.call_args) call_args, call_kwargs = fake_backup_service.backup.call_args call_backup, call_sheepdog_fd = call_args self.assertEqual(fake_backup, call_backup) self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper) @mock.patch.object(db, 'volume_get') @mock.patch.object(sheepdog.SheepdogDriver, '_try_execute') @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') @mock.patch.object(backup_driver, 'BackupDriver') @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') def test_backup_volume_fail_to_create_snap(self, fake_delete_snapshot, fake_backup_service, fake_create_snapshot, fake_execute, fake_volume_get): fake_context = {} fake_volume = self.test_data.TEST_VOLUME fake_backup = self.test_data.TEST_BACKUP_VOLUME fake_volume_get.return_value = fake_volume fake_create_snapshot.side_effect = exception.SheepdogCmdError( cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy') self.assertRaises(exception.SheepdogError, self.driver.backup_volume, fake_context, fake_backup, fake_backup_service) self.assertEqual(1, fake_create_snapshot.call_count) self.assertEqual(1, fake_delete_snapshot.call_count) self.assertEqual(fake_create_snapshot.call_args, fake_delete_snapshot.call_args) @mock.patch.object(db, 'volume_get') @mock.patch.object(sheepdog.SheepdogDriver, '_try_execute') @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') @mock.patch.object(backup_driver, 'BackupDriver') @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') def test_backup_volume_fail_to_backup_vol(self, fake_delete_snapshot, fake_backup_service, fake_create_snapshot, fake_execute, fake_volume_get): fake_context = {} fake_volume = self.test_data.TEST_VOLUME fake_backup = self.test_data.TEST_BACKUP_VOLUME fake_volume_get.return_value = fake_volume class BackupError(Exception): pass fake_backup_service.backup.side_effect = BackupError() self.assertRaises(BackupError, self.driver.backup_volume, fake_context, fake_backup, fake_backup_service) self.assertEqual(1, fake_create_snapshot.call_count) self.assertEqual(2, fake_delete_snapshot.call_count) self.assertEqual(fake_create_snapshot.call_args, fake_delete_snapshot.call_args) @mock.patch.object(backup_driver, 'BackupDriver') def test_restore_backup(self, fake_backup_service): fake_context = {} fake_backup = self.test_data.TEST_BACKUP_VOLUME fake_volume = self.test_data.TEST_VOLUME self.driver.restore_backup( fake_context, fake_backup, fake_volume, fake_backup_service) call_args, call_kwargs = fake_backup_service.restore.call_args call_backup, call_volume_id, call_sheepdog_fd = call_args self.assertEqual(fake_backup, call_backup) self.assertEqual(fake_volume.id, call_volume_id) self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper) cinder-8.0.0/cinder/tests/unit/test_san.py0000664000567000056710000000473512701406250021711 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from cinder import test from cinder.volume import configuration from cinder.volume.drivers.san import san class SanDriverTestCase(test.TestCase): """Tests for SAN driver""" def __init__(self, *args, **kwargs): super(SanDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(SanDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = "10.0.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "password" self.configuration.san_ssh_port = 22 self.configuration.san_thin_provision = True self.configuration.san_private_key = 'private_key' self.configuration.ssh_min_pool_conn = 1 self.configuration.ssh_max_pool_conn = 5 self.configuration.ssh_conn_timeout = 30 class fake_san_driver(san.SanDriver): def initialize_connection(): pass def create_volume(): pass def delete_volume(): pass def terminate_connection(): pass @mock.patch.object(san.processutils, 'ssh_execute') @mock.patch.object(san.ssh_utils, 'SSHPool') @mock.patch.object(san.utils, 'check_ssh_injection') def test_ssh_formatted_command(self, mock_check_ssh_injection, mock_ssh_pool, mock_ssh_execute): driver = self.fake_san_driver(configuration=self.configuration) cmd_list = ['uname', '-s'] expected_cmd = 'uname -s' driver.san_execute(*cmd_list) # get the same used mocked item from the pool with driver.sshpool.item() as ssh_item: mock_ssh_execute.assert_called_with(ssh_item, expected_cmd, check_exit_code=None) cinder-8.0.0/cinder/tests/unit/test_nimble.py0000664000567000056710000012770712701406250022403 0ustar jenkinsjenkins00000000000000# Nimble Storage, Inc. (c) 2013-2014 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_config import cfg from cinder import exception from cinder.objects import volume as obj_volume from cinder import test from cinder.volume.drivers import nimble from cinder.volume import volume_types CONF = cfg.CONF NIMBLE_CLIENT = 'cinder.volume.drivers.nimble.client' NIMBLE_URLLIB2 = 'six.moves.urllib.request' NIMBLE_RANDOM = 'cinder.volume.drivers.nimble.random' FAKE_ENUM_STRING = """ """ FAKE_POSITIVE_LOGIN_RESPONSE_1 = {'err-list': {'err-list': [{'code': 0}]}, 'authInfo': {'sid': "a9b9aba7"}} FAKE_POSITIVE_LOGIN_RESPONSE_2 = {'err-list': {'err-list': [{'code': 0}]}, 'authInfo': {'sid': "a9f3eba7"}} FAKE_POSITIVE_NETCONFIG_RESPONSE = { 'config': {'subnet-list': [{'label': "data1", 'subnet-id': {'type': 3}, 'discovery-ip': "172.18.108.21"}, {'label': "mgmt-data", 'subnet-id': {'type': 4}, 'discovery-ip': "10.18.108.55"}]}, 'err-list': {'err-list': [{'code': 0}]}} FAKE_NEGATIVE_NETCONFIG_RESPONSE = {'err-list': {'err-list': [{'code': 13}]}} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE = {'err-list': {'err-list': [{'code': 0}]}, 'name': "openstack-test11"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION = {'err-list': {'err-list': [{'code': 0}]}, 'name': "openstack-test-encryption"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERFPOLICY = {'err-list': {'err-list': [{'code': 0}]}, 'name': "openstack-test-perfpolicy"} FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE = {'err-list': {'err-list': [{'code': 17}]}, 'name': "openstack-test11"} FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE_ENCRYPTION = {'err-list': {'err-list': [{'code': 17}]}, 'name': "openstack-test-encryption"} FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE_PERFPOLICY = {'err-list': {'err-list': [{'code': 17}]}, 'name': "openstack-test-perfpolicy"} FAKE_GENERIC_POSITIVE_RESPONSE = {'err-list': {'err-list': [{'code': 0}]}} FAKE_POSITIVE_GROUP_CONFIG_RESPONSE = { 'err-list': {'err-list': [{'code': 0}]}, 'info': {'usableCapacity': 8016883089408, 'volUsageCompressed': 2938311843, 'snapUsageCompressed': 36189, 'unusedReserve': 0, 'spaceInfoValid': True}} FAKE_IGROUP_LIST_RESPONSE = { 'err-list': {'err-list': [{'code': 0}]}, 'initiatorgrp-list': [ {'initiator-list': [{'name': 'test-initiator1'}, {'name': 'test-initiator2'}], 'name': 'test-igrp1'}, {'initiator-list': [{'name': 'test-initiator1'}], 'name': 'test-igrp2'}]} FAKE_GET_VOL_INFO_RESPONSE = { 'err-list': {'err-list': [{'code': 0}]}, 'vol': {'target-name': 'iqn.test', 'name': 'test_vol', 'agent-type': 1, 'online': False}} FAKE_GET_VOL_INFO_ONLINE = { 'err-list': {'err-list': [{'code': 0}]}, 'vol': {'target-name': 'iqn.test', 'name': 'test_vol', 'agent-type': 1, 'online': True}} FAKE_GET_VOL_INFO_ERROR = { 'err-list': {'err-list': [{'code': 2}]}, 'vol': {'target-name': 'iqn.test'}} FAKE_GET_VOL_INFO_RESPONSE_WITH_SET_AGENT_TYPE = { 'err-list': {'err-list': [{'code': 0}]}, 'vol': {'target-name': 'iqn.test', 'name': 'test_vol', 'agent-type': 5}} FAKE_TYPE_ID = 12345 def create_configuration(username, password, ip_address, pool_name=None, subnet_label=None, thin_provision=True): configuration = mock.Mock() configuration.san_login = username configuration.san_password = password configuration.san_ip = ip_address configuration.san_thin_provision = thin_provision configuration.nimble_pool_name = pool_name configuration.nimble_subnet_label = subnet_label configuration.safe_get.return_value = 'NIMBLE' return configuration class NimbleDriverBaseTestCase(test.TestCase): """Base Class for the NimbleDriver Tests.""" def setUp(self): super(NimbleDriverBaseTestCase, self).setUp() self.mock_client_service = None self.mock_client_class = None self.driver = None @staticmethod def client_mock_decorator(configuration): def client_mock_wrapper(func): def inner_client_mock( self, mock_client_class, mock_urllib2, *args, **kwargs): self.mock_client_class = mock_client_class self.mock_client_service = mock.MagicMock(name='Client') self.mock_client_class.Client.return_value = \ self.mock_client_service mock_wsdl = mock_urllib2.urlopen.return_value mock_wsdl.read = mock.MagicMock() mock_wsdl.read.return_value = FAKE_ENUM_STRING self.driver = nimble.NimbleISCSIDriver( configuration=configuration) self.mock_client_service.service.login.return_value = \ FAKE_POSITIVE_LOGIN_RESPONSE_1 self.driver.do_setup(None) func(self, *args, **kwargs) return inner_client_mock return client_mock_wrapper def tearDown(self): super(NimbleDriverBaseTestCase, self).tearDown() class NimbleDriverLoginTestCase(NimbleDriverBaseTestCase): """Tests do_setup api.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_do_setup_positive(self): expected_call_list = [ mock.call.Client( 'https://10.18.108.55/wsdl/NsGroupManagement.wsdl', username='nimble', password='nimble_pass')] self.assertEqual(self.mock_client_class.method_calls, expected_call_list) expected_call_list = [mock.call.set_options( location='https://10.18.108.55:5391/soap'), mock.call.service.login( req={'username': 'nimble', 'password': 'nimble_pass'})] self.assertEqual( self.mock_client_service.method_calls, expected_call_list) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_expire_session_id(self): self.mock_client_service.service.login.return_value = \ FAKE_POSITIVE_LOGIN_RESPONSE_2 self.mock_client_service.service.getNetConfig = mock.MagicMock( side_effect=[ FAKE_NEGATIVE_NETCONFIG_RESPONSE, FAKE_POSITIVE_NETCONFIG_RESPONSE]) self.driver.APIExecutor.get_netconfig("active") expected_call_list = [mock.call.set_options( location='https://10.18.108.55:5391/soap'), mock.call.service.login( req={ 'username': 'nimble', 'password': 'nimble_pass'}), mock.call.service.getNetConfig( request={'name': 'active', 'sid': 'a9b9aba7'}), mock.call.service.login( req={'username': 'nimble', 'password': 'nimble_pass'}), mock.call.service.getNetConfig( request={'name': 'active', 'sid': 'a9f3eba7'})] self.assertEqual( self.mock_client_service.method_calls, expected_call_list) class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): """Tests volume related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_positive(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_POSITIVE_RESPONSE self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''})) self.mock_client_service.service.createVol.assert_called_once_with( request={ 'attr': {'snap-quota': sys.maxsize, 'warn-level': 858993459, 'name': 'testvolume', 'reserve': 0, 'online': True, 'pool-name': 'default', 'size': 1073741824, 'quota': 1073741824, 'perfpol-name': 'default', 'description': '', 'agent-type': 5, 'encryptionAttr': {'cipher': 3}, 'multi-initiator': 'false'}, 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'nimble:multi-initiator': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_encryption_positive(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-encryption', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) mock_volume_type = volume_types.get_volume_type_extra_specs mock_volume_type.assert_called_once_with(FAKE_TYPE_ID) self.mock_client_service.service.createVol.assert_called_once_with( request={ 'attr': {'snap-quota': sys.maxsize, 'warn-level': 858993459, 'name': 'testvolume-encryption', 'reserve': 0, 'online': True, 'pool-name': 'default', 'size': 1073741824, 'quota': 1073741824, 'perfpol-name': 'default', 'description': '', 'agent-type': 5, 'encryptionAttr': {'cipher': 2}, 'multi-initiator': 'false'}, 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'VMware ESX', 'nimble:encryption': 'no', 'nimble:multi-initiator': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_perfpolicy_positive(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERFPOLICY self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) mock_volume_type = volume_types.get_volume_type_extra_specs mock_volume_type.assert_called_once_with(FAKE_TYPE_ID) self.mock_client_service.service.createVol.assert_called_once_with( request={ 'attr': {'snap-quota': sys.maxsize, 'warn-level': 858993459, 'name': 'testvolume-perfpolicy', 'reserve': 0, 'online': True, 'pool-name': 'default', 'size': 1073741824, 'quota': 1073741824, 'perfpol-name': 'VMware ESX', 'description': '', 'agent-type': 5, 'encryptionAttr': {'cipher': 3}, 'multi-initiator': 'false'}, 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'no', 'nimble:multi-initiator': 'true'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_multi_initiator_positive(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERFPOLICY self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) mock_volume_type = volume_types.get_volume_type_extra_specs mock_volume_type.assert_called_once_with(FAKE_TYPE_ID) self.mock_client_service.service.createVol.assert_called_once_with( request={ 'attr': {'snap-quota': sys.maxsize, 'warn-level': 858993459, 'name': 'testvolume-perfpolicy', 'reserve': 0, 'online': True, 'pool-name': 'default', 'size': 1073741824, 'quota': 1073741824, 'perfpol-name': 'default', 'description': '', 'agent-type': 5, 'encryptionAttr': {'cipher': 3}, 'multi-initiator': 'true'}, 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_negative(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_encryption_negative(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE_ENCRYPTION self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-encryption', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_perfpolicy_negative(self): self.mock_client_service.service.createVol.return_value = \ FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE_PERFPOLICY self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_delete_volume(self): self.mock_client_service.service.onlineVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.deleteVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.dissocProtPol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.driver.delete_volume({'name': 'testvolume'}) expected_calls = [mock.call.service.onlineVol( request={ 'online': False, 'name': 'testvolume', 'sid': 'a9b9aba7'}), mock.call.service.dissocProtPol( request={'vol-name': 'testvolume', 'sid': 'a9b9aba7'}), mock.call.service.deleteVol( request={'name': 'testvolume', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_extend_volume(self): self.mock_client_service.service.editVol.return_value = \ FAKE_CREATE_VOLUME_POSITIVE_RESPONSE self.driver.extend_volume({'name': 'testvolume'}, 5) self.mock_client_service.service.editVol.assert_called_once_with( request={'attr': {'size': 5368709120, 'snap-quota': sys.maxsize, 'warn-level': 4294967296, 'reserve': 0, 'quota': 5368709120}, 'mask': 884, 'name': 'testvolume', 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'nimble:multi-initiator': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False)) @mock.patch(NIMBLE_RANDOM) def test_create_cloned_volume(self, mock_random): mock_random.sample.return_value = 'abcdefghijkl' self.mock_client_service.service.snapVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.cloneVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_cloned_volume({'name': 'volume', 'size': 5, 'volume_type_id': FAKE_TYPE_ID}, {'name': 'testvolume', 'size': 5})) expected_calls = [mock.call.service.snapVol( request={ 'vol': 'testvolume', 'snapAttr': {'name': 'openstack-clone-volume-abcdefghijkl', 'description': ''}, 'sid': 'a9b9aba7'}), mock.call.service.cloneVol( request={ 'snap-name': 'openstack-clone-volume-abcdefghijkl', 'attr': {'snap-quota': sys.maxsize, 'name': 'volume', 'quota': 5368709120, 'reserve': 5368709120, 'online': True, 'warn-level': 4294967296, 'encryptionAttr': {'cipher': 2}, 'multi-initiator': 'false', 'perfpol-name': 'default', 'agent-type': 5}, 'name': 'testvolume', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_positive(self): self.mock_client_service.service.getNetConfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.service.onlineVol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.service.editVol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.manage_existing({'name': 'volume-abcdef'}, {'source-name': 'test-vol'})) expected_calls = [ mock.call.service.editVol( request={ 'attr': { 'name': 'volume-abcdef', 'agent-type': 5}, 'mask': 262145, 'name': 'test-vol', 'sid': 'a9b9aba7'}), mock.call.service.onlineVol( request={'online': True, 'name': 'volume-abcdef', 'sid': 'a9b9aba7'} ) ] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_which_is_online(self): self.mock_client_service.service.getNetConfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_ONLINE) self.assertRaises( exception.InvalidVolume, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_improper_ref(self): self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-id': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_nonexistant_volume(self): self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_ERROR) self.assertRaises( exception.VolumeBackendAPIException, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_wrong_agent_type(self): self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_RESPONSE_WITH_SET_AGENT_TYPE) self.assertRaises( exception.ManageExistingAlreadyManaged, self.driver.manage_existing, {'id': 'abcdef', 'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_volume_positive(self): self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_RESPONSE_WITH_SET_AGENT_TYPE) self.mock_client_service.service.editVol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) self.driver.unmanage({'name': 'volume-abcdef'}) expected_calls = [ mock.call.service.editVol( request={'attr': {'agent-type': 1}, 'mask': 262144, 'name': 'volume-abcdef', 'sid': 'a9b9aba7'}), mock.call.service.onlineVol( request={'online': False, 'name': 'volume-abcdef', 'sid': 'a9b9aba7'} ) ] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_with_invalid_volume(self): self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_ERROR) self.assertRaises( exception.VolumeBackendAPIException, self.driver.unmanage, {'name': 'volume-abcdef'} ) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_with_invalid_agent_type(self): self.mock_client_service.service.getVolInfo.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.assertRaises( exception.InvalidVolume, self.driver.unmanage, {'name': 'volume-abcdef'} ) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_get_volume_stats(self): self.mock_client_service.service.getGroupConfig.return_value = \ FAKE_POSITIVE_GROUP_CONFIG_RESPONSE expected_res = {'driver_version': '2.0.2', 'vendor_name': 'Nimble', 'volume_backend_name': 'NIMBLE', 'storage_protocol': 'iSCSI', 'pools': [{'pool_name': 'NIMBLE', 'total_capacity_gb': 7466.30419921875, 'free_capacity_gb': 7463.567649364471, 'reserved_percentage': 0, 'QoS_support': False}]} self.assertEqual( expected_res, self.driver.get_volume_stats(refresh=True)) class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase): """Tests snapshot related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_snapshot(self): self.mock_client_service.service.snapVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.driver.create_snapshot( {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'display_name': '', 'display_description': ''}) self.mock_client_service.service.snapVol.assert_called_once_with( request={'vol': 'testvolume', 'snapAttr': {'name': 'testvolume-snap1', 'description': '' }, 'sid': 'a9b9aba7'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_delete_snapshot(self): self.mock_client_service.service.onlineSnap.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.deleteSnap.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.driver.delete_snapshot( {'volume_name': 'testvolume', 'name': 'testvolume-snap1'}) expected_calls = [mock.call.service.onlineSnap( request={ 'vol': 'testvolume', 'online': False, 'name': 'testvolume-snap1', 'sid': 'a9b9aba7'}), mock.call.service.deleteSnap(request={'vol': 'testvolume', 'name': 'testvolume-snap1', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'nimble:multi-initiator': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_from_snapshot(self): self.mock_client_service.service.cloneVol.return_value = \ FAKE_GENERIC_POSITIVE_RESPONSE self.mock_client_service.service.getVolInfo.return_value = \ FAKE_GET_VOL_INFO_RESPONSE self.mock_client_service.service.getNetConfig.return_value = \ FAKE_POSITIVE_NETCONFIG_RESPONSE self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test 0', 'provider_auth': None}, self.driver.create_volume_from_snapshot( {'name': 'clone-testvolume', 'size': 2, 'volume_type_id': FAKE_TYPE_ID}, {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'volume_size': 1})) expected_calls = [ mock.call.service.cloneVol( request={'snap-name': 'testvolume-snap1', 'attr': {'snap-quota': sys.maxsize, 'name': 'clone-testvolume', 'quota': 1073741824, 'online': True, 'reserve': 0, 'warn-level': 858993459, 'perfpol-name': 'default', 'encryptionAttr': {'cipher': 2}, 'multi-initiator': 'false', 'agent-type': 5}, 'name': 'testvolume', 'sid': 'a9b9aba7'}), mock.call.service.editVol( request={'attr': {'size': 2147483648, 'snap-quota': sys.maxsize, 'warn-level': 1717986918, 'reserve': 0, 'quota': 2147483648}, 'mask': 884, 'name': 'clone-testvolume', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls(expected_calls) class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase): """Tests Connection related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_initialize_connection_igroup_exist(self): self.mock_client_service.service.getInitiatorGrpList.return_value = \ FAKE_IGROUP_LIST_RESPONSE expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_lun': '14', 'volume_id': 12, 'target_iqn': '13', 'target_discovered': False, 'target_portal': '12'}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13 14', 'id': 12}, {'initiator': 'test-initiator1'})) expected_call_list = [mock.call.set_options( location='https://10.18.108.55:5391/soap'), mock.call.service.login( req={ 'username': 'nimble', 'password': 'nimble_pass'}), mock.call.service.getInitiatorGrpList( request={'sid': 'a9b9aba7'}), mock.call.service.addVolAcl( request={'volname': 'test-volume', 'apply-to': 3, 'chapuser': '*', 'initiatorgrp': 'test-igrp2', 'sid': 'a9b9aba7'})] self.assertEqual( self.mock_client_service.method_calls, expected_call_list) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_RANDOM) def test_initialize_connection_igroup_not_exist(self, mock_random): mock_random.sample.return_value = 'abcdefghijkl' self.mock_client_service.service.getInitiatorGrpList.return_value = \ FAKE_IGROUP_LIST_RESPONSE expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_lun': '14', 'volume_id': 12, 'target_iqn': '13', 'target_discovered': False, 'target_portal': '12'}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13 14', 'id': 12}, {'initiator': 'test-initiator3'})) expected_calls = [ mock.call.service.getInitiatorGrpList( request={'sid': 'a9b9aba7'}), mock.call.service.createInitiatorGrp( request={ 'attr': {'initiator-list': [{'name': 'test-initiator3', 'label': 'test-initiator3'}], 'name': 'openstack-abcdefghijkl'}, 'sid': 'a9b9aba7'}), mock.call.service.addVolAcl( request={'volname': 'test-volume', 'apply-to': 3, 'chapuser': '*', 'initiatorgrp': 'openstack-abcdefghijkl', 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_positive(self): self.mock_client_service.service.getInitiatorGrpList.return_value = \ FAKE_IGROUP_LIST_RESPONSE self.driver.terminate_connection( {'name': 'test-volume', 'provider_location': '12 13 14', 'id': 12}, {'initiator': 'test-initiator1'}) expected_calls = [mock.call.service.getInitiatorGrpList( request={'sid': 'a9b9aba7'}), mock.call.service.removeVolAcl( request={'volname': 'test-volume', 'apply-to': 3, 'chapuser': '*', 'initiatorgrp': {'initiator-list': [{'name': 'test-initiator1'}]}, 'sid': 'a9b9aba7'})] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_negative(self): self.mock_client_service.service.getInitiatorGrpList.return_value = \ FAKE_IGROUP_LIST_RESPONSE self.assertRaises( exception.VolumeDriverException, self.driver.terminate_connection, { 'name': 'test-volume', 'provider_location': '12 13 14', 'id': 12}, {'initiator': 'test-initiator3'}) cinder-8.0.0/cinder/tests/unit/test_volume_glance_metadata.py0000664000567000056710000002064312701406250025604 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for volume types extra specs code """ from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit import fake_constants as fake class VolumeGlanceMetadataTestCase(test.TestCase): def setUp(self): super(VolumeGlanceMetadataTestCase, self).setUp() self.ctxt = context.get_admin_context() objects.register_all() def test_vol_glance_metadata_bad_vol_id(self): ctxt = context.get_admin_context() self.assertRaises(exception.VolumeNotFound, db.volume_glance_metadata_create, ctxt, fake.volume_id, 'key1', 'value1') self.assertRaises(exception.VolumeNotFound, db.volume_glance_metadata_get, ctxt, fake.volume_id) db.volume_glance_metadata_delete_by_volume(ctxt, fake.volume2_id) def test_vol_update_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.volume_id}) db.volume_create(ctxt, {'id': fake.volume2_id}) db.volume_glance_metadata_create(ctxt, fake.volume_id, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.volume2_id, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.volume2_id, 'key2', 'value2') db.volume_glance_metadata_create(ctxt, fake.volume2_id, 'key3', 123) expected_metadata_1 = {'volume_id': fake.volume_id, 'key': 'key1', 'value': 'value1'} metadata = db.volume_glance_metadata_get(ctxt, fake.volume_id) self.assertEqual(1, len(metadata)) for key, value in expected_metadata_1.items(): self.assertEqual(value, metadata[0][key]) expected_metadata_2 = ({'volume_id': fake.volume2_id, 'key': 'key1', 'value': 'value1'}, {'volume_id': fake.volume2_id, 'key': 'key2', 'value': 'value2'}, {'volume_id': fake.volume2_id, 'key': 'key3', 'value': '123'}) metadata = db.volume_glance_metadata_get(ctxt, fake.volume2_id) self.assertEqual(3, len(metadata)) for expected, meta in zip(expected_metadata_2, metadata): for key, value in expected.items(): self.assertEqual(value, meta[key]) self.assertRaises(exception.GlanceMetadataExists, db.volume_glance_metadata_create, ctxt, fake.volume_id, 'key1', 'value1a') metadata = db.volume_glance_metadata_get(ctxt, fake.volume_id) self.assertEqual(1, len(metadata)) for key, value in expected_metadata_1.items(): self.assertEqual(value, metadata[0][key]) def test_vols_get_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.volume_id}) db.volume_create(ctxt, {'id': fake.volume2_id}) db.volume_create(ctxt, {'id': '3'}) db.volume_glance_metadata_create(ctxt, fake.volume_id, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.volume2_id, 'key2', 'value2') db.volume_glance_metadata_create(ctxt, fake.volume2_id, 'key22', 'value22') metadata = db.volume_glance_metadata_get_all(ctxt) self.assertEqual(3, len(metadata)) self._assert_metadata_equals(fake.volume_id, 'key1', 'value1', metadata[0]) self._assert_metadata_equals(fake.volume2_id, 'key2', 'value2', metadata[1]) self._assert_metadata_equals(fake.volume2_id, 'key22', 'value22', metadata[2]) def _assert_metadata_equals(self, volume_id, key, value, observed): self.assertEqual(volume_id, observed.volume_id) self.assertEqual(key, observed.key) self.assertEqual(value, observed.value) def test_vol_delete_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.volume_id}) db.volume_glance_metadata_delete_by_volume(ctxt, fake.volume_id) db.volume_glance_metadata_create(ctxt, fake.volume_id, 'key1', 'value1') db.volume_glance_metadata_delete_by_volume(ctxt, fake.volume_id) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_get, ctxt, fake.volume_id) def test_vol_glance_metadata_copy_to_snapshot(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.volume_id}) snap = objects.Snapshot(ctxt, volume_id=fake.volume_id) snap.create() db.volume_glance_metadata_create(ctxt, fake.volume_id, 'key1', 'value1') db.volume_glance_metadata_copy_to_snapshot(ctxt, snap.id, fake.volume_id) expected_meta = {'snapshot_id': snap.id, 'key': 'key1', 'value': 'value1'} for meta in db.volume_snapshot_glance_metadata_get(ctxt, snap.id): for (key, value) in expected_meta.items(): self.assertEqual(value, meta[key]) snap.destroy() def test_vol_glance_metadata_copy_from_volume_to_volume(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.volume_id}) db.volume_create(ctxt, {'id': fake.volume2_id, 'source_volid': fake.volume_id}) db.volume_glance_metadata_create(ctxt, fake.volume_id, 'key1', 'value1') db.volume_glance_metadata_copy_from_volume_to_volume(ctxt, fake.volume_id, fake.volume2_id) expected_meta = {'key': 'key1', 'value': 'value1'} for meta in db.volume_glance_metadata_get(ctxt, fake.volume2_id): for (key, value) in expected_meta.items(): self.assertEqual(value, meta[key]) def test_volume_glance_metadata_copy_to_volume(self): vol1 = db.volume_create(self.ctxt, {}) vol2 = db.volume_create(self.ctxt, {}) db.volume_glance_metadata_create(self.ctxt, vol1['id'], 'm1', 'v1') snapshot = objects.Snapshot(self.ctxt, volume_id=vol1['id']) snapshot.create() db.volume_glance_metadata_copy_to_snapshot(self.ctxt, snapshot.id, vol1['id']) db.volume_glance_metadata_copy_to_volume(self.ctxt, vol2['id'], snapshot.id) metadata = db.volume_glance_metadata_get(self.ctxt, vol2['id']) metadata = {m['key']: m['value'] for m in metadata} self.assertEqual({'m1': 'v1'}, metadata) def test_volume_snapshot_glance_metadata_get_nonexistent(self): vol = db.volume_create(self.ctxt, {}) snapshot = objects.Snapshot(self.ctxt, volume_id=vol['id']) snapshot.create() self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, self.ctxt, snapshot.id) snapshot.destroy() cinder-8.0.0/cinder/tests/unit/fake_hpe_client_exceptions.py0000664000567000056710000000601412701406250025422 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HPE client exceptions to use when mocking HPE clients.""" class UnsupportedVersion(Exception): """Unsupported version of the client.""" pass class ClientException(Exception): """The base exception class for these fake exceptions.""" _error_code = None _error_desc = None _error_ref = None _debug1 = None _debug2 = None def __init__(self, error=None): if error: if 'code' in error: self._error_code = error['code'] if 'desc' in error: self._error_desc = error['desc'] if 'ref' in error: self._error_ref = error['ref'] if 'debug1' in error: self._debug1 = error['debug1'] if 'debug2' in error: self._debug2 = error['debug2'] def get_code(self): return self._error_code def get_description(self): return self._error_desc def get_ref(self): return self._error_ref def __str__(self): formatted_string = self.message if self.http_status: formatted_string += " (HTTP %s)" % self.http_status if self._error_code: formatted_string += " %s" % self._error_code if self._error_desc: formatted_string += " - %s" % self._error_desc if self._error_ref: formatted_string += " - %s" % self._error_ref if self._debug1: formatted_string += " (1: '%s')" % self._debug1 if self._debug2: formatted_string += " (2: '%s')" % self._debug2 return formatted_string class HTTPConflict(Exception): http_status = 409 message = "Conflict" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc class HTTPNotFound(Exception): http_status = 404 message = "Not found" class HTTPForbidden(ClientException): http_status = 403 message = "Forbidden" class HTTPBadRequest(Exception): http_status = 400 message = "Bad request" class HTTPServerError(Exception): http_status = 500 message = "Error" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc cinder-8.0.0/cinder/tests/unit/test_zfssa.py0000664000567000056710000022672412701406250022262 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for Oracle's ZFSSA Cinder volume driver.""" from datetime import date import json import math import mock from oslo_utils import units import six from cinder import context from cinder import exception from cinder.image import image_utils from cinder import test from cinder.tests.unit import fake_utils from cinder.tests.unit import utils from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers import remotefs from cinder.volume.drivers.zfssa import restclient as client from cinder.volume.drivers.zfssa import webdavclient from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi from cinder.volume.drivers.zfssa import zfssanfs from cinder.volume.drivers.zfssa import zfssarest as rest nfs_logbias = 'latency' nfs_compression = 'off' zfssa_cache_dir = 'os-cinder-cache' no_virtsize_img = { 'id': 'no_virtsize_img_id1234', 'size': 654321, 'updated_at': date(2015, 1, 1), } small_img = { 'id': 'small_id1234', 'size': 654321, 'virtual_size': 2361393152, 'updated_at': date(2015, 1, 1), } large_img = { 'id': 'large_id5678', 'size': 50000000, 'virtual_size': 11806965760, 'updated_at': date(2015, 2, 2), } fakespecs = { 'prop1': 'prop1_val', 'prop2': 'prop2_val', } small_img_props = { 'size': 3, } img_props_nfs = { 'image_id': small_img['id'], 'updated_at': small_img['updated_at'].isoformat(), 'size': 3, 'name': '%(dir)s/os-cache-vol-%(name)s' % ({'dir': zfssa_cache_dir, 'name': small_img['id']}) } fakecontext = 'fakecontext' img_service = 'fakeimgservice' img_location = 'fakeimglocation' class ImgInfo(object): def __init__(self, vsize): self.virtual_size = vsize class FakeResponse(object): def __init__(self, statuscode, data='data'): self.status = statuscode self.data = data class FakeSSL(object): def _create_unverified_context(self): return 'fakecontext' class TestZFSSAISCSIDriver(test.TestCase): test_vol = { 'name': 'cindervol', 'size': 3, 'id': 1, 'provider_location': 'fake_location 1 2', 'provider_auth': 'fake_auth user pass', } test_vol2 = { 'name': 'cindervol2', 'size': 5, 'id': 2, 'provider_location': 'fake_location 3 4', 'provider_auth': 'fake_auth user pass', } test_snap = { 'name': 'cindersnap', 'volume_name': test_vol['name'] } test_vol_snap = { 'name': 'cindersnapvol', 'size': test_vol['size'] } def __init__(self, method): super(TestZFSSAISCSIDriver, self).__init__(method) @mock.patch.object(iscsi, 'factory_zfssa') def setUp(self, _factory_zfssa): super(TestZFSSAISCSIDriver, self).setUp() self._create_fake_config() _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSAApi) iscsi.ZFSSAISCSIDriver._execute = fake_utils.fake_execute self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration) self.drv.do_setup({}) def _create_fake_config(self): self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.san_ip = '1.1.1.1' self.configuration.san_login = 'user' self.configuration.san_password = 'passwd' self.configuration.zfssa_pool = 'pool' self.configuration.zfssa_project = 'project' self.configuration.zfssa_lun_volblocksize = '8k' self.configuration.zfssa_lun_sparse = 'false' self.configuration.zfssa_lun_logbias = 'latency' self.configuration.zfssa_lun_compression = 'off' self.configuration.zfssa_initiator_group = 'test-init-grp1' self.configuration.zfssa_initiator = \ 'iqn.1-0.org.deb:01:d7, iqn.1-0.org.deb:01:d9' self.configuration.zfssa_initiator_user = '' self.configuration.zfssa_initiator_password = '' self.configuration.zfssa_initiator_config = "{'test-init-grp1':[{'iqn':\ 'iqn.1-0.org.deb:01:d7','user':'','password':''}],'test-init-grp\ 2':[{'iqn':'iqn.1-0.org.deb:01:d9','user':'','password':''}]}" self.configuration.zfssa_target_group = 'test-target-grp1' self.configuration.zfssa_target_user = '' self.configuration.zfssa_target_password = '' self.configuration.zfssa_target_portal = '1.1.1.1:3260' self.configuration.zfssa_target_interfaces = 'e1000g0' self.configuration.zfssa_rest_timeout = 60 self.configuration.volume_backend_name = 'fake_zfssa' self.configuration.zfssa_enable_local_cache = True self.configuration.zfssa_cache_project = zfssa_cache_dir self.configuration.safe_get = self.fake_safe_get self.configuration.zfssa_replication_ip = '1.1.1.1' self.configuration.zfssa_manage_policy = 'loose' def _util_migrate_volume_exceptions(self): self.drv.zfssa.get_lun.return_value = ( {'targetgroup': 'test-target-grp1'}) self.drv.zfssa.get_asn.return_value = ( '9a2b5a0f-e3af-6d14-9578-8825f229dc89') self.drv.tgt_zfssa.get_asn.return_value = ( '9a2b5a0f-e3af-6d14-9578-8825f229dc89') targets = {'targets': [{'hostname': '2.2.2.2', 'address': '2.2.2.2:216', 'label': '2.2.2.2', 'asn': '9a2b5a0f-e3af-6d14-9578-8825f229dc89'}]} self.drv.zfssa.get_replication_targets.return_value = targets self.drv.zfssa.edit_inherit_replication_flag.return_value = {} self.drv.zfssa.create_replication_action.return_value = 'action-123' self.drv.zfssa.send_repl_update.return_value = True def test_migrate_volume(self): self._util_migrate_volume_exceptions() volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' host = {'host': 'stack@zfssa_iscsi#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'iSCSI', 'location_info': loc_info}} ctxt = context.get_admin_context() # Test the normal case result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((True, None), result) # Test when volume status is not available volume['status'] = 'in-use' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) volume['status'] = 'available' # Test when vendor is not Oracle host['capabilities']['vendor_name'] = 'elcarO' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['vendor_name'] = 'Oracle' # Test when storage protocol is not iSCSI host['capabilities']['storage_protocol'] = 'not_iSCSI' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['storage_protocol'] = 'iSCSI' # Test when location_info is incorrect host['capabilities']['location_info'] = '' self.assertEqual((False, None), result) host['capabilities']['location_info'] = loc_info # Test if replication ip and replication target's address dont match invalid_loc_info = ( '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:9.9.9.9') host['capabilities']['location_info'] = invalid_loc_info result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['location_info'] = loc_info # Test if no targets are returned self.drv.zfssa.get_replication_targets.return_value = {'targets': []} result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) def test_migrate_volume_uninherit_exception(self): self._util_migrate_volume_exceptions() volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' host = {'host': 'stack@zfssa_iscsi#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'iSCSI', 'location_info': loc_info}} ctxt = context.get_admin_context() self.drv.zfssa.edit_inherit_replication_flag.side_effect = ( exception.VolumeBackendAPIException(data='uniherit ex')) self.assertRaises(exception.VolumeBackendAPIException, self.drv.migrate_volume, ctxt, volume, host) def test_migrate_volume_create_action_exception(self): self._util_migrate_volume_exceptions() volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' host = {'host': 'stack@zfssa_iscsi#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'iSCSI', 'location_info': loc_info}} ctxt = context.get_admin_context() self.drv.zfssa.create_replication_action.side_effect = ( exception.VolumeBackendAPIException(data= 'failed to create action')) self.assertRaises(exception.VolumeBackendAPIException, self.drv.migrate_volume, ctxt, volume, host) def test_migrate_volume_send_update_exception(self): self._util_migrate_volume_exceptions() volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' host = {'host': 'stack@zfssa_iscsi#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'iSCSI', 'location_info': loc_info}} ctxt = context.get_admin_context() self.drv.zfssa.send_repl_update.side_effect = ( exception.VolumeBackendAPIException(data='failed to send update')) self.assertRaises(exception.VolumeBackendAPIException, self.drv.migrate_volume, ctxt, volume, host) def test_migrate_volume_sever_repl_exception(self): self._util_migrate_volume_exceptions() volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' host = {'host': 'stack@zfssa_iscsi#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'iSCSI', 'location_info': loc_info}} ctxt = context.get_admin_context() self.drv.tgt_zfssa.sever_replication.side_effect = ( exception.VolumeBackendAPIException(data= 'failed to sever replication')) self.assertRaises(exception.VolumeBackendAPIException, self.drv.migrate_volume, ctxt, volume, host) def test_create_delete_volume(self): self.drv.zfssa.get_lun.return_value = {'guid': '00000000000000000000000000000', 'number': 0, 'initiatorgroup': 'default', 'size': 1, 'nodestroy': False} lcfg = self.configuration self.drv.create_volume(self.test_vol) self.drv.zfssa.create_lun.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_vol['name'], six.text_type(self.test_vol['size']) + 'g', lcfg.zfssa_target_group, mock.ANY) self.drv.delete_volume(self.test_vol) self.drv.zfssa.get_lun.assert_called_once_with(lcfg.zfssa_pool, lcfg.zfssa_project, self.test_vol['name']) self.drv.zfssa.delete_lun.assert_called_once_with( pool=lcfg.zfssa_pool, project=lcfg.zfssa_project, lun=self.test_vol['name']) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_check_origin') def test_delete_cache_volume(self, _check_origin): lcfg = self.configuration lun2del = { 'guid': '00000000000000000000000000000', 'number': 0, 'initiatorgroup': 'default', 'size': 1, 'nodestroy': False, 'origin': { 'project': lcfg.zfssa_cache_project, 'snapshot': 'image-%s' % small_img['id'], 'share': 'os-cache-vol-%s' % small_img['id'], } } self.drv.zfssa.get_lun.return_value = lun2del self.drv.delete_volume(self.test_vol) self.drv._check_origin.assert_called_once_with(lun2del, self.test_vol['name']) def test_check_origin(self): lcfg = self.configuration lun2del = { 'guid': '00000000000000000000000000000', 'number': 0, 'initiatorgroup': 'default', 'size': 1, 'nodestroy': False, 'origin': { 'project': lcfg.zfssa_cache_project, 'snapshot': 'image-%s' % small_img['id'], 'share': 'os-cache-vol-%s' % small_img['id'], } } cache = lun2del['origin'] self.drv.zfssa.num_clones.return_value = 0 self.drv._check_origin(lun2del, 'volname') self.drv.zfssa.delete_lun.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache['share']) def test_create_delete_snapshot(self): self.drv.zfssa.num_clones.return_value = 0 lcfg = self.configuration self.drv.create_snapshot(self.test_snap) self.drv.zfssa.create_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_snap['volume_name'], self.test_snap['name']) self.drv.delete_snapshot(self.test_snap) self.drv.zfssa.delete_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_snap['volume_name'], self.test_snap['name']) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_clone_size') def test_create_volume_from_snapshot(self, _verify_clone_size): self.drv._verify_clone_size.return_value = True lcfg = self.configuration self.drv.create_snapshot(self.test_snap) self.drv.zfssa.create_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_snap['volume_name'], self.test_snap['name']) self.drv.create_volume_from_snapshot(self.test_vol_snap, self.test_snap) self.drv._verify_clone_size.assert_called_once_with( self.test_snap, self.test_vol_snap['size'] * units.Gi) self.drv.zfssa.clone_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_snap['volume_name'], self.test_snap['name'], lcfg.zfssa_project, self.test_vol_snap['name']) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info') def test_volume_attach_detach(self, _get_provider_info): lcfg = self.configuration test_target_iqn = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd' stub_val = {'provider_location': '%s %s 0' % (lcfg.zfssa_target_portal, test_target_iqn)} self.drv._get_provider_info.return_value = stub_val connector = dict(initiator='iqn.1-0.org.deb:01:d7') props = self.drv.initialize_connection(self.test_vol, connector) self.drv._get_provider_info.assert_called_once_with(self.test_vol) self.assertEqual('iscsi', props['driver_volume_type']) self.assertEqual(self.test_vol['id'], props['data']['volume_id']) self.assertEqual(lcfg.zfssa_target_portal, props['data']['target_portal']) self.assertEqual(test_target_iqn, props['data']['target_iqn']) self.assertEqual(0, props['data']['target_lun']) self.assertFalse(props['data']['target_discovered']) self.drv.terminate_connection(self.test_vol, '') self.drv.zfssa.set_lun_initiatorgroup.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_vol['name'], '') def test_volume_attach_detach_negative(self): self.drv.zfssa.get_initiator_initiatorgroup.return_value = [] connector = dict(initiator='iqn.1-0.org.deb:01:d7') self.assertRaises(exception.VolumeBackendAPIException, self.drv.initialize_connection, self.test_vol, connector) def test_get_volume_stats(self): self.drv.zfssa.get_project_stats.return_value = 2 * units.Gi,\ 3 * units.Gi self.drv.zfssa.get_pool_details.return_value = \ {"profile": "mirror:log_stripe"} lcfg = self.configuration stats = self.drv.get_volume_stats(refresh=True) self.drv.zfssa.get_project_stats.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project) self.drv.zfssa.get_pool_details.assert_called_once_with( lcfg.zfssa_pool) self.assertEqual('Oracle', stats['vendor_name']) self.assertEqual(self.configuration.volume_backend_name, stats['volume_backend_name']) self.assertEqual(self.drv.VERSION, stats['driver_version']) self.assertEqual(self.drv.protocol, stats['storage_protocol']) self.assertEqual(0, stats['reserved_percentage']) self.assertFalse(stats['QoS_support']) self.assertEqual(3, stats['total_capacity_gb']) self.assertEqual(2, stats['free_capacity_gb']) self.assertEqual('mirror:log_stripe', stats['zfssa_poolprofile']) self.assertEqual('8k', stats['zfssa_volblocksize']) self.assertEqual('false', stats['zfssa_sparse']) self.assertEqual('off', stats['zfssa_compression']) self.assertEqual('latency', stats['zfssa_logbias']) self.drv.zfssa.get_pool_details.return_value = {"profile": "raidz2"} stats = self.drv.get_volume_stats(refresh=True) self.assertEqual('raidz2', stats['zfssa_poolprofile']) def test_extend_volume(self): lcfg = self.configuration self.drv.extend_volume(self.test_vol, 3) self.drv.zfssa.set_lun_props.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_vol['name'], volsize= 3 * units.Gi) @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') def test_get_voltype_specs(self, get_volume_type_extra_specs): volume_type_id = mock.sentinel.volume_type_id volume = {'volume_type_id': volume_type_id} get_volume_type_extra_specs.return_value = { 'zfssa:volblocksize': '128k', 'zfssa:compression': 'gzip' } ret = self.drv._get_voltype_specs(volume) self.assertEqual('128k', ret.get('volblocksize')) self.assertEqual(self.configuration.zfssa_lun_sparse, ret.get('sparse')) self.assertEqual('gzip', ret.get('compression')) self.assertEqual(self.configuration.zfssa_lun_logbias, ret.get('logbias')) def tearDown(self): super(TestZFSSAISCSIDriver, self).tearDown() def fake_safe_get(self, value): try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch.object(image_utils.TemporaryImages, 'fetch') @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume') def test_clone_image_negative(self, _verify_cache_volume, _fetch, _info): # Disabling local cache feature: self.configuration.zfssa_enable_local_cache = False _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) _info.return_value = ImgInfo(small_img['virtual_size']) self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, small_img, img_service)) self.configuration.zfssa_enable_local_cache = True # Creating a volume smaller than image: _info.return_value = ImgInfo(large_img['virtual_size']) self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, large_img, img_service)) # Exception raised in _verify_cache_image _info.return_value = ImgInfo(small_img['virtual_size']) self.drv._verify_cache_volume.side_effect = ( exception.VolumeBackendAPIException('fakeerror')) self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, small_img, img_service)) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch.object(image_utils.TemporaryImages, 'fetch') @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_voltype_specs') @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume') @mock.patch.object(iscsi.ZFSSAISCSIDriver, 'extend_volume') def test_clone_image(self, _extend_vol, _verify_cache, _get_specs, _fetch, _info): lcfg = self.configuration cache_vol = 'os-cache-vol-%s' % small_img['id'] cache_snap = 'image-%s' % small_img['id'] self.drv._get_voltype_specs.return_value = fakespecs.copy() self.drv._verify_cache_volume.return_value = cache_vol, cache_snap _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) _info.return_value = ImgInfo(small_img['virtual_size']) model, cloned = self.drv.clone_image(fakecontext, self.test_vol2, img_location, small_img, img_service) self.drv._verify_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, fakespecs, small_img_props) self.drv.zfssa.clone_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol, cache_snap, lcfg.zfssa_project, self.test_vol2['name']) self.drv.extend_volume.assert_called_once_with(self.test_vol2, self.test_vol2['size']) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume') def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol): vol_name = 'os-cache-vol-%s' % small_img['id'] self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound( volume_id=vol_name) self.drv._verify_cache_volume(fakecontext, small_img, img_service, fakespecs, small_img_props) self.drv._create_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, fakespecs, small_img_props) def test_verify_cache_vol_no_cache_snap(self): snap_name = 'image-%s' % small_img['id'] self.drv.zfssa.get_lun_snapshot.side_effect = ( exception.SnapshotNotFound(snapshot_id=snap_name)) self.assertRaises(exception.VolumeBackendAPIException, self.drv._verify_cache_volume, fakecontext, small_img, img_service, fakespecs, small_img_props) def test_verify_cache_vol_stale_vol(self): self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 5} self.assertRaises(exception.VolumeBackendAPIException, self.drv._verify_cache_volume, fakecontext, small_img, img_service, fakespecs, small_img_props) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume') def test_verify_cache_vol_updated_vol(self, _create_cache_vol): lcfg = self.configuration updated_vol = { 'updated_at': date(3000, 12, 12), 'image_id': 'updated_id', } cachevol_name = 'os-cache-vol-%s' % small_img['id'] self.drv.zfssa.get_lun.return_value = updated_vol self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 0} self.drv._verify_cache_volume(fakecontext, small_img, img_service, fakespecs, small_img_props) self.drv.zfssa.delete_lun.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, cachevol_name) self.drv._create_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, fakespecs, small_img_props) @mock.patch.object(driver.BaseVD, 'copy_image_to_volume') def test_create_cache_volume(self, _copy_image): lcfg = self.configuration virtual_size = int(small_img['virtual_size']) volsize = math.ceil(float(virtual_size) / units.Gi) lunsize = "%sg" % six.text_type(int(volsize)) volname = 'os-cache-vol-%s' % small_img['id'] snapname = 'image-%s' % small_img['id'] cachevol_props = { 'cache_name': volname, 'snap_name': snapname, } cachevol_props.update(small_img_props) cache_vol = { 'name': volname, 'id': small_img['id'], 'size': volsize, } lun_props = { 'custom:image_id': small_img['id'], 'custom:updated_at': ( six.text_type(small_img['updated_at'].isoformat())), } lun_props.update(fakespecs) self.drv._create_cache_volume(fakecontext, small_img, img_service, fakespecs, cachevol_props) self.drv.zfssa.create_lun.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol['name'], lunsize, lcfg.zfssa_target_group, lun_props) _copy_image.assert_called_once_with(fakecontext, cache_vol, img_service, small_img['id']) self.drv.zfssa.create_snapshot.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol['name'], snapname) def test_create_cache_vol_negative(self): lcfg = self.configuration volname = 'os-cache-vol-%s' % small_img['id'] snapname = 'image-%s' % small_img['id'] cachevol_props = { 'cache_name': volname, 'snap_name': snapname, } cachevol_props.update(small_img) self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound( volume_id=volname) self.assertRaises(exception.VolumeBackendAPIException, self.drv._create_cache_volume, fakecontext, small_img, img_service, fakespecs, cachevol_props) self.drv.zfssa.delete_lun.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_cache_project, volname) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage') def test_volume_manage(self, _get_existing_vol, _verify_volume_to_manage): lcfg = self.configuration lcfg.zfssa_manage_policy = 'loose' test_vol = self.test_vol self.drv._get_existing_vol.return_value = test_vol self.drv._verify_volume_to_manage.return_value = None self.drv.zfssa.set_lun_props.return_value = True self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, {'source-name': 'volume-567'})) self.drv._get_existing_vol.assert_called_once_with({'source-name': 'volume-567'}) self.drv._verify_volume_to_manage.assert_called_once_with(test_vol) self.drv.zfssa.set_lun_props.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, test_vol['name'], name='volume-123', schema={"custom:cinder_managed": True}) # Case when zfssa_manage_policy is 'loose' and 'cinder_managed' is # set to true. test_vol.update({'cinder_managed': False}) self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, {'source-name': 'volume-567'})) # Another case is when the zfssa_manage_policy is set to 'strict' lcfg.zfssa_manage_policy = 'strict' test_vol.update({'cinder_managed': False}) self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, {'source-name': 'volume-567'})) def test_volume_manage_negative(self): lcfg = self.configuration lcfg.zfssa_manage_policy = 'strict' test_vol = self.test_vol if 'cinder_managed' in test_vol: del test_vol['cinder_managed'] self.drv.zfssa.get_lun.return_value = test_vol self.assertRaises(exception.InvalidInput, self.drv.manage_existing, {'name': 'cindervol'}, {'source-name': 'volume-567'}) test_vol.update({'cinder_managed': True}) self.drv.zfssa.get_lun.return_value = test_vol self.assertRaises(exception.ManageExistingAlreadyManaged, self.drv.manage_existing, {'name': 'cindervol'}, {'source-name': 'volume-567'}) test_vol.update({'cinder_managed': False}) self.drv.zfssa.get_lun.return_value = test_vol self.assertRaises(exception.ManageExistingInvalidReference, self.drv.manage_existing, {'name': 'cindervol'}, {'source-id': 'volume-567'}) lcfg.zfssa_manage_policy = 'loose' self.assertRaises(exception.ManageExistingInvalidReference, self.drv.manage_existing, {'name': 'cindervol'}, {'source-id': 'volume-567'}) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage') def test_volume_manage_negative_api_exception(self, _verify_volume_to_manage): lcfg = self.configuration lcfg.zfssa_manage_policy = 'loose' self.drv.zfssa.get_lun.return_value = self.test_vol self.drv._verify_volume_to_manage.return_value = None self.drv.zfssa.set_lun_props.side_effect = \ exception.VolumeBackendAPIException(data='fake exception') self.assertRaises(exception.VolumeBackendAPIException, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) def test_volume_unmanage(self): lcfg = self.configuration self.drv.zfssa.set_lun_props.return_value = True self.assertIsNone(self.drv.unmanage({'name': 'volume-123'})) self.drv.zfssa.set_lun_props.assert_called_once_with( lcfg.zfssa_pool, lcfg.zfssa_project, 'volume-123', name='unmanaged-volume-123', schema={"custom:cinder_managed": False}) def test_volume_unmanage_negative(self): self.drv.zfssa.set_lun_props.side_effect = \ exception.VolumeBackendAPIException(data='fake exception') self.assertRaises(exception.VolumeBackendAPIException, self.drv.unmanage, {'name': 'volume-123'}) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') def test_manage_existing_get_size(self, _get_existing_vol): test_vol = self.test_vol test_vol['size'] = 3 * units.Gi self.drv._get_existing_vol.return_value = test_vol self.assertEqual(3, self.drv.manage_existing_get_size( {'name': 'volume-123'}, {'source-name': 'volume-567'})) @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') def test_manage_existing_get_size_negative(self, _get_existing_vol): self.drv._get_existing_vol.side_effect = \ exception.VolumeNotFound(volume_id='123') self.assertRaises(exception.VolumeNotFound, self.drv.manage_existing_get_size, {'name': 'volume-123'}, {'source-name': 'volume-567'}) class TestZFSSANFSDriver(test.TestCase): test_vol = { 'name': 'test-vol', 'id': '1', 'size': 3, 'provider_location': 'fakelocation', } test_snap = { 'name': 'cindersnap', 'volume_name': test_vol['name'], 'volume_size': test_vol['size'] } test_vol_snap = { 'name': 'cindersnapvol', 'size': test_vol['size'] } def __init__(self, method): super(TestZFSSANFSDriver, self).__init__(method) @mock.patch.object(zfssanfs, 'factory_zfssa') def setUp(self, _factory_zfssa): super(TestZFSSANFSDriver, self).setUp() self._create_fake_config() _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSANfsApi) self.drv = zfssanfs.ZFSSANFSDriver(configuration=self.configuration) self.drv._execute = fake_utils.fake_execute self.drv.do_setup({}) def _create_fake_config(self): self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.reserved_percentage = 0 self.configuration.max_over_subscription_ratio = 20.0 self.configuration.san_ip = '1.1.1.1' self.configuration.san_login = 'user' self.configuration.san_password = 'passwd' self.configuration.zfssa_data_ip = '2.2.2.2' self.configuration.zfssa_https_port = '443' self.configuration.zfssa_nfs_pool = 'pool' self.configuration.zfssa_nfs_project = 'nfs_project' self.configuration.zfssa_nfs_share = 'nfs_share' self.configuration.zfssa_nfs_share_logbias = nfs_logbias self.configuration.zfssa_nfs_share_compression = nfs_compression self.configuration.zfssa_nfs_mount_options = '' self.configuration.zfssa_rest_timeout = '30' self.configuration.zfssa_enable_local_cache = True self.configuration.zfssa_cache_directory = zfssa_cache_dir self.configuration.nfs_sparsed_volumes = 'true' self.configuration.zfssa_manage_policy = 'strict' def test_migrate_volume(self): self.drv.zfssa.get_asn.return_value = ( '9a2b5a0f-e3af-6d14-9578-8825f229dc89') volume = self.test_vol volume.update({'host': 'fake_host', 'status': 'available', 'name': 'vol-1', 'source_volid': self.test_vol['id']}) loc_info = '9a2b5a0f-e3af-6d14-9578-8825f229dc89:nfs_share' host = {'host': 'stack@zfssa_nfs#fake_zfssa', 'capabilities': {'vendor_name': 'Oracle', 'storage_protocol': 'nfs', 'location_info': loc_info}} ctxt = context.get_admin_context() # Test Normal case result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((True, None), result) # Test when volume status is not available volume['status'] = 'in-use' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) volume['status'] = 'available' # Test when Vendor is not Oracle host['capabilities']['vendor_name'] = 'elcarO' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['vendor_name'] = 'Oracle' # Test when storage protocol is not iSCSI host['capabilities']['storage_protocol'] = 'not_nfs' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['storage_protocol'] = 'nfs' # Test for exceptions host['capabilities']['location_info'] = '' result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) host['capabilities']['location_info'] = loc_info # Test case when source and target asn dont match invalid_loc_info = ( 'fake_asn*https://2.2.2.2:/shares/export/nfs_share*nfs_share') host['capabilities']['location_info'] = invalid_loc_info result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) # Test case when source and target shares names are different invalid_loc_info = ( '9a2b5a0f-e3af-6d14-9578-8825f229dc89*' + 'https://tgt:/shares/export/nfs_share*nfs_share_1') host['capabilities']['location_info'] = invalid_loc_info result = self.drv.migrate_volume(ctxt, volume, host) self.assertEqual((False, None), result) def test_create_delete_snapshot(self): lcfg = self.configuration self.drv.create_snapshot(self.test_snap) self.drv.zfssa.create_snapshot.assert_called_once_with( lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, mock.ANY) self.drv.zfssa.create_snapshot_of_volume_file.assert_called_once_with( src_file=mock.ANY, dst_file=self.test_snap['name']) self.drv.delete_snapshot(self.test_snap) self.drv.zfssa.delete_snapshot_of_volume_file.assert_called_with( src_file=self.test_snap['name']) def test_create_volume_from_snapshot(self): self.drv.create_snapshot(self.test_snap) with mock.patch.object(self.drv, '_ensure_shares_mounted'): self.drv.create_volume_from_snapshot(self.test_vol_snap, self.test_snap, method='COPY') self.drv.zfssa.create_volume_from_snapshot_file.\ assert_called_once_with(src_file=self.test_snap['name'], dst_file=self.test_vol_snap['name'], method='COPY') def test_get_volume_stats(self): lcfg = self.configuration self.drv._mounted_shares = ['nfs_share'] with mock.patch.object(self.drv, '_ensure_shares_mounted'): with mock.patch.object(self.drv, '_get_share_capacity_info') as \ mock_get_share_capacity_info: mock_get_share_capacity_info.return_value = (1073741824, 9663676416) self.drv.zfssa.get_pool_details.return_value = \ {"profile": "mirror:log_stripe"} self.drv.zfssa.get_share.return_value = {"compression": "lzjb", "encryption": "off", "logbias": "latency"} stats = self.drv.get_volume_stats(refresh=True) self.drv.zfssa.get_pool_details.assert_called_once_with( lcfg.zfssa_nfs_pool) self.drv.zfssa.get_share.assert_called_with( lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) self.assertEqual(1, stats['free_capacity_gb']) self.assertEqual(10, stats['total_capacity_gb']) self.assertEqual('mirror:log_stripe', stats['zfssa_poolprofile']) self.assertEqual('lzjb', stats['zfssa_compression']) self.assertEqual('true', stats['zfssa_sparse']) self.assertEqual('off', stats['zfssa_encryption']) self.assertEqual('latency', stats['zfssa_logbias']) self.drv.zfssa.get_pool_details.return_value = \ {"profile": "mirror3"} stats = self.drv.get_volume_stats(refresh=True) self.assertEqual('mirror3', stats['zfssa_poolprofile']) def tearDown(self): super(TestZFSSANFSDriver, self).tearDown() @mock.patch.object(remotefs.RemoteFSDriver, 'delete_volume') @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_check_origin') def test_delete_volume(self, _check_origin, _delete_vol): self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect self.drv.delete_volume(self.test_vol) _delete_vol.assert_called_once_with(self.test_vol) self.drv._check_origin.assert_called_once_with(img_props_nfs['name']) def _get_volume_side_effect(self, *args, **kwargs): lcfg = self.configuration volname = six.text_type(args[0]) if volname.startswith(lcfg.zfssa_cache_directory): return {'numclones': 0} else: return {'origin': img_props_nfs['name']} def test_check_origin(self): self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect self.drv._check_origin(img_props_nfs['name']) self.drv.zfssa.delete_file.assert_called_once_with( img_props_nfs['name']) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch.object(image_utils.TemporaryImages, 'fetch') @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume') @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume') def test_clone_image_negative(self, _create_clone, _verify_cache_volume, _fetch, _info): _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) _info.return_value = ImgInfo(small_img['virtual_size']) # Disabling local cache feature: self.configuration.zfssa_enable_local_cache = False self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, small_img, img_service)) self.configuration.zfssa_enable_local_cache = True # Creating a volume smaller than image: _info.return_value = ImgInfo(large_img['virtual_size']) self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, large_img, img_service)) # Exception raised in _verify_cache_image _info.return_value = ImgInfo(small_img['virtual_size']) self.drv._verify_cache_volume.side_effect = ( exception.VolumeBackendAPIException('fakeerror')) self.assertEqual((None, False), self.drv.clone_image(fakecontext, self.test_vol, img_location, small_img, img_service)) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch.object(image_utils.TemporaryImages, 'fetch') @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume') @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume') @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'extend_volume') def test_clone_image(self, _extend_vol, _verify_cache, _create_clone, _fetch, _info): _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) _info.return_value = ImgInfo(small_img['virtual_size']) self.drv._verify_cache_volume.return_value = img_props_nfs['name'] prov_loc = {'provider_location': self.test_vol['provider_location']} self.drv.create_cloned_volume.return_value = prov_loc self.assertEqual((prov_loc, True), self.drv.clone_image(fakecontext, self.test_vol, img_location, small_img, img_service)) self.drv._verify_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, img_props_nfs) cache_vol = { 'name': img_props_nfs['name'], 'size': 3, 'id': small_img['id'], } self.drv.create_cloned_volume.assert_called_once_with(self.test_vol, cache_vol) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume') def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol): self.drv.zfssa.get_volume.side_effect = exception.VolumeNotFound( volume_id=img_props_nfs['name']) self.drv._verify_cache_volume(fakecontext, small_img, img_service, img_props_nfs) self.drv._create_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, img_props_nfs) def test_verify_cache_vol_stale_vol(self): self.drv.zfssa.get_volume.return_value = { 'numclones': 5, 'updated_at': small_img['updated_at'].isoformat(), 'image_id': 'wrong_id', } self.assertRaises(exception.VolumeBackendAPIException, self.drv._verify_cache_volume, fakecontext, small_img, img_service, img_props_nfs) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume') @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'delete_volume') def test_verify_cache_vol_updated_vol(self, _del_vol, _create_cache_vol): updated_vol = { 'updated_at': date(3000, 12, 12), 'image_id': 'updated_id', 'numclones': 0, } self.drv.zfssa.get_volume.return_value = updated_vol self.drv._verify_cache_volume(fakecontext, small_img, img_service, img_props_nfs) cache_vol = { 'provider_location': mock.ANY, 'name': img_props_nfs['name'], } self.drv.delete_volume.assert_called_once_with(cache_vol) self.drv._create_cache_volume.assert_called_once_with(fakecontext, small_img, img_service, img_props_nfs) @mock.patch.object(remotefs.RemoteFSDriver, 'copy_image_to_volume') @mock.patch.object(remotefs.RemoteFSDriver, 'create_volume') def test_create_cache_volume(self, _create_vol, _copy_image): virtual_size = int(small_img['virtual_size']) volsize = math.ceil(float(virtual_size) / units.Gi) cache_vol = { 'name': img_props_nfs['name'], 'size': volsize, 'provider_location': mock.ANY, } self.drv._create_cache_volume(fakecontext, small_img, img_service, img_props_nfs) _create_vol.assert_called_once_with(cache_vol) _copy_image.assert_called_once_with(fakecontext, cache_vol, img_service, small_img['id']) def test_create_cache_vol_negative(self): self.drv.zfssa.get_lun.side_effect = ( exception.VolumeBackendAPIException) self.assertRaises(exception.VolumeBackendAPIException, self.drv._create_cache_volume, fakecontext, small_img, img_service, img_props_nfs) self.drv.zfssa.delete_file.assert_called_once_with( img_props_nfs['name']) def test_volume_manage(self): lcfg = self.configuration lcfg.zfssa_manage_policy = 'loose' test_vol = self.test_vol self.drv.zfssa.get_volume.return_value = test_vol self.drv.zfssa.rename_volume.return_value = None self.drv.zfssa.set_file_props.return_value = None self.drv.mount_path = lcfg.zfssa_data_ip + ':' + 'fake_mountpoint' self.assertEqual({'provider_location': self.drv.mount_path}, self.drv.manage_existing({'name': 'volume-123'}, {'source-name': 'volume-567'})) self.drv.zfssa.get_volume.assert_called_once_with('volume-567') self.drv.zfssa.rename_volume.assert_called_once_with('volume-567', 'volume-123') self.drv.zfssa.set_file_props.assert_called_once_with( 'volume-123', {'cinder_managed': 'True'}) # Test when 'zfssa_manage_policy' is set to 'strict'. lcfg.zfssa_manage_policy = 'strict' test_vol.update({'cinder_managed': 'False'}) self.drv.zfssa.get_volume.return_value = test_vol self.assertEqual({'provider_location': self.drv.mount_path}, self.drv.manage_existing({'name': 'volume-123'}, {'source-name': 'volume-567'})) def test_volume_manage_negative_no_source_name(self): self.assertRaises(exception.ManageExistingInvalidReference, self.drv.manage_existing, {'name': 'volume-123'}, {'source-id': 'volume-567'}) def test_volume_manage_negative_backend_exception(self): self.drv.zfssa.get_volume.side_effect = \ exception.VolumeNotFound(volume_id='volume-567') self.assertRaises(exception.InvalidInput, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) def test_volume_manage_negative_verify_fail(self): lcfg = self.configuration lcfg.zfssa_manage_policy = 'strict' test_vol = self.test_vol test_vol['cinder_managed'] = '' self.drv.zfssa.get_volume.return_value = test_vol self.assertRaises(exception.InvalidInput, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) test_vol.update({'cinder_managed': 'True'}) self.drv.zfssa.get_volume.return_value = test_vol self.assertRaises(exception.ManageExistingAlreadyManaged, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage') def test_volume_manage_negative_rename_fail(self, _verify_volume_to_manage): test_vol = self.test_vol test_vol.update({'cinder_managed': 'False'}) self.drv.zfssa.get_volume.return_value = test_vol self.drv._verify_volume_to_manage.return_value = None self.drv.zfssa.rename_volume.side_effect = \ exception.VolumeBackendAPIException(data="fake exception") self.assertRaises(exception.VolumeBackendAPIException, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage') def test_volume_manage_negative_set_prop_fail(self, _verify_volume_to_manage): test_vol = self.test_vol test_vol.update({'cinder_managed': 'False'}) self.drv.zfssa.get_volume.return_value = test_vol self.drv._verify_volume_to_manage.return_value = None self.drv.zfssa.rename_volume.return_value = None self.drv.zfssa.set_file_props.side_effect = \ exception.VolumeBackendAPIException(data="fake exception") self.assertRaises(exception.VolumeBackendAPIException, self.drv.manage_existing, {'name': 'volume-123'}, {'source-name': 'volume-567'}) def test_volume_unmanage(self): test_vol = self.test_vol test_vol.update({'cinder_managed': 'True'}) self.drv.zfssa.rename_volume.return_value = None self.drv.zfssa.set_file_props.return_value = None self.assertIsNone(self.drv.unmanage(test_vol)) new_vol_name = 'unmanaged-' + test_vol['name'] self.drv.zfssa.rename_volume.assert_called_once_with(test_vol['name'], new_vol_name) self.drv.zfssa.set_file_props.assert_called_once_with( new_vol_name, {'cinder_managed': 'False'}) def test_volume_unmanage_negative_rename_fail(self): test_vol = self.test_vol test_vol.update({'cinder_managed': 'True'}) self.drv.zfssa.rename_volume.side_effect = \ exception.VolumeBackendAPIException(data="fake exception") self.drv.zfssa.set_file_props.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.drv.unmanage, test_vol) def test_volume_unmanage_negative_set_prop_fail(self): test_vol = self.test_vol test_vol.update({'cinder_managed': 'True'}) self.drv.zfssa.rename_volume.return_value = None self.drv.zfssa.set_file_props.side_effect = \ exception.VolumeBackendAPIException(data="fake exception") self.assertRaises(exception.VolumeBackendAPIException, self.drv.unmanage, test_vol) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share') def test_manage_existing_get_size(self, _get_mount_point_for_share): self.drv._get_mount_point_for_share.return_value = \ '/fake/mnt/fake_share/' self.drv._mounted_shares = [] self.drv._mounted_shares.append('fake_share') file = mock.Mock(st_size=123 * units.Gi) with mock.patch('os.path.isfile', return_value=True): with mock.patch('os.stat', return_value=file): self.assertEqual(float(file.st_size / units.Gi), self.drv.manage_existing_get_size( {'name': 'volume-123'}, {'source-name': 'volume-567'})) @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share') def test_manage_existing_get_size_negative(self, _get_mount_point_for_share): self.drv._get_mount_point_for_share.return_value = \ '/fake/mnt/fake_share/' self.drv._mounted_shares = [] self.drv._mounted_shares.append('fake_share') with mock.patch('os.path.isfile', return_value=True): with mock.patch('os.stat', side_effect=OSError): self.assertRaises(exception.VolumeBackendAPIException, self.drv.manage_existing_get_size, {'name': 'volume-123'}, {'source-name': 'volume-567'}) class TestZFSSAApi(test.TestCase): @mock.patch.object(rest, 'factory_restclient') def setUp(self, _restclient): super(TestZFSSAApi, self).setUp() self.host = 'fakehost' self.user = 'fakeuser' self.url = None self.pool = 'fakepool' self.project = 'fakeproject' self.vol = 'fakevol' self.snap = 'fakesnapshot' self.clone = 'fakeclone' self.targetalias = 'fakealias' _restclient.return_value = mock.MagicMock(spec=client.RestClientURL) self.zfssa = rest.ZFSSAApi() self.zfssa.set_host('fakehost') self.pool_url = '/api/storage/v1/pools/' def _create_response(self, status, data='data'): response = FakeResponse(status, data) return response def test_create_project(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK) self.zfssa.create_project(self.pool, self.project) expected_svc = self.pool_url + self.pool + '/projects/' + self.project self.zfssa.rclient.get.assert_called_with(expected_svc) def test_create_initiator(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK) initiator = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd' alias = 'init-group' self.zfssa.create_initiator(initiator, alias) self.zfssa.rclient.get.assert_called_with( '/api/san/v1/iscsi/initiators/alias=' + alias) def test_create_target(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.NOT_FOUND) ret_val = json.dumps( {'target': {'iqn': 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'}}) self.zfssa.rclient.post.return_value = self._create_response( client.Status.CREATED, ret_val) alias = 'tgt-group' self.zfssa.create_target(alias) self.zfssa.rclient.post.assert_called_with('/api/san/v1/iscsi/targets', {'alias': alias}) def test_get_target(self): ret_val = json.dumps( {'target': {'href': 'fake_href', 'alias': 'tgt-group', 'iqn': 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd', 'targetchapuser': '', 'targetchapsecret': '', 'interfaces': ['nge0']}}) self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK, ret_val) ret = self.zfssa.get_target('tgt-group') self.zfssa.rclient.get.assert_called_once_with( '/api/san/v1/iscsi/targets/alias=tgt-group') self.assertEqual('iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd', ret) def test_verify_pool(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK) self.zfssa.verify_pool(self.pool) self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool) def test_verify_project(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.NOT_FOUND) self.assertRaises(exception.VolumeBackendAPIException, self.zfssa.verify_project, self.pool, self.project) def test_verify_initiator(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK) self.zfssa.verify_initiator('iqn.1-0.org.deb:01:d7') self.zfssa.rclient.get.assert_called_with( '/api/san/v1/iscsi/initiators/iqn.1-0.org.deb:01:d7') def test_verify_target(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.BAD_REQUEST) self.assertRaises(exception.VolumeBackendAPIException, self.zfssa.verify_target, self.targetalias) def test_create_delete_lun(self): arg = json.dumps({'name': self.vol, 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'}) self.zfssa.rclient.post.return_value = self._create_response( client.Status.CREATED, data=arg) self.zfssa.create_lun(self.pool, self.project, self.vol, 1, 'tgt-grp', None) expected_arg = {'name': self.vol, 'volsize': 1, 'targetgroup': 'tgt-grp', 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'} self.zfssa.rclient.post.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/luns', expected_arg) self.zfssa.rclient.delete.return_value = self._create_response( client.Status.NO_CONTENT) self.zfssa.delete_lun(self.pool, self.project, self.vol) self.zfssa.rclient.delete.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/luns/' + self.vol) def test_create_delete_snapshot(self): self.zfssa.rclient.post.return_value = self._create_response( client.Status.CREATED) self.zfssa.create_snapshot(self.pool, self.project, self.vol, self.snap) expected_arg = {'name': self.snap} self.zfssa.rclient.post.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/luns/' + self.vol + '/snapshots', expected_arg) self.zfssa.rclient.delete.return_value = self._create_response( client.Status.NO_CONTENT) self.zfssa.delete_snapshot(self.pool, self.project, self.vol, self.snap) self.zfssa.rclient.delete.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/luns/' + self.vol + '/snapshots/' + self.snap) def test_clone_snapshot(self): self.zfssa.rclient.put.return_value = self._create_response( client.Status.CREATED) self.zfssa.clone_snapshot(self.pool, self.project, self.vol, self.snap, self.project, self.clone) expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \ self.project + '/luns/' + self.vol + '/snapshots/' + self.snap + \ '/clone' expected_arg = {'project': self.project, 'share': self.clone, 'nodestroy': True} self.zfssa.rclient.put.assert_called_with(expected_svc, expected_arg) def test_get_project_stats(self): ret_val = json.dumps({"project": {"name": self.project, "space_available": 15754895360, "space_total": 25754895360, "dedup": False, "logbias": "latency", "encryption": "off"}}) self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK, ret_val) self.zfssa.get_project_stats(self.pool, self.project) expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \ self.project self.zfssa.rclient.get.assert_called_with(expected_svc) self.zfssa.rclient.get.return_value = self._create_response( client.Status.NOT_FOUND) self.assertRaises(exception.VolumeBackendAPIException, self.zfssa.get_project_stats, self.pool, self.project) class TestZFSSANfsApi(test.TestCase): @mock.patch.object(rest, 'factory_restclient') def setUp(self, _restclient): super(TestZFSSANfsApi, self).setUp() self.host = 'fakehost' self.user = 'fakeuser' self.url = None self.pool = 'fakepool' self.project = 'fakeproject' self.share = 'fakeshare' self.snap = 'fakesnapshot' self.targetalias = 'fakealias' _restclient.return_value = mock.MagicMock(spec=client.RestClientURL) self.webdavclient = mock.MagicMock(spec=webdavclient.ZFSSAWebDAVClient) self.zfssa = rest.ZFSSANfsApi() self.zfssa.set_host('fakehost') self.pool_url = '/api/storage/v1/pools/' def _create_response(self, status, data='data'): response = FakeResponse(status, data) return response def test_verify_share(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK) self.zfssa.verify_share(self.pool, self.project, self.share) self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool + '/projects/' + self.project + '/filesystems/' + self.share) def test_create_delete_snapshot(self): self.zfssa.rclient.post.return_value = self._create_response( client.Status.CREATED) self.zfssa.create_snapshot(self.pool, self.project, self.share, self.snap) expected_arg = {'name': self.snap} self.zfssa.rclient.post.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/filesystems/' + self.share + '/snapshots', expected_arg) self.zfssa.rclient.delete.return_value = self._create_response( client.Status.NO_CONTENT) self.zfssa.delete_snapshot(self.pool, self.project, self.share, self.snap) self.zfssa.rclient.delete.assert_called_with( self.pool_url + self.pool + '/projects/' + self.project + '/filesystems/' + self.share + '/snapshots/' + self.snap) def create_delete_snapshot_of_volume_file(self): src_file = "fake_src_file" dst_file = "fake_dst_file" self.zfssa.create_snapshot_of_volume_file(src_file=src_file, dst_file=dst_file) self.zfssa.webdavclient.request.assert_called_once_with( src_file=src_file, dst_file=dst_file, method='COPY') self.zfssa.delete_snapshot_of_volume_file(src_file=src_file) self.zfssa.webdavclient.request.assert_called_once_with( src_file=src_file, method='DELETE') def test_get_share(self): ret_val = json.dumps({'filesystem': 'test_fs'}) self.zfssa.rclient.get.return_value = self._create_response( client.Status.OK, ret_val) ret = self.zfssa.get_share(self.pool, self.project, self.share) self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool + '/projects/' + self.project + '/filesystems/' + self.share) self.assertEqual('test_fs', ret) def test_create_share(self): self.zfssa.rclient.get.return_value = self._create_response( client.Status.NOT_FOUND) self.zfssa.rclient.post.return_value = self._create_response( client.Status.BAD_REQUEST) self.assertRaises(exception.VolumeBackendAPIException, self.zfssa.create_share, self.pool, self.project, self.share, {}) @mock.patch.object(rest.ZFSSANfsApi, '_change_service_state') @mock.patch.object(rest.ZFSSANfsApi, 'verify_service') def test_enable_disable_modify_service(self, verify_service, _change_service_state): self.zfssa.enable_service('http') self.zfssa._change_service_state.assert_called_with( 'http', state='enable') self.zfssa.verify_service.assert_called_with('http') self.zfssa.disable_service('http') self.zfssa._change_service_state.assert_called_with( 'http', state='disable') self.zfssa.verify_service.assert_called_with('http', status='offline') ret_val = json.dumps({'service': { "href": "/api/service/v1/services/http", "": "online", "require_login": False, "protocols": "http/https", "listen_port": 81, "https_port": 443}}) self.zfssa.rclient.put.return_value = self._create_response( client.Status.ACCEPTED, ret_val) args = {'listen_port': 81} self.zfssa.modify_service('http', args) self.zfssa.rclient.put.called_with('/api/service/v1/services/http', args) class TestRestClientURL(test.TestCase): def setUp(self): super(TestRestClientURL, self).setUp() self.timeout = 60 self.url = '1.1.1.1' self.client = client.RestClientURL(self.url, timeout=self.timeout) @mock.patch.object(client.RestClientURL, 'request') def test_post(self, _request): path = '/api/storage/v1/pools' body = {'name': 'fakepool'} self.client.post(path, body=body) self.client.request.assert_called_with(path, 'POST', body) @mock.patch.object(client.RestClientURL, 'request') def test_get(self, _request): path = '/api/storage/v1/pools' self.client.get(path) self.client.request.assert_called_with(path, 'GET') @mock.patch.object(client.RestClientURL, 'request') def test_put(self, _request): path = '/api/storage/v1/pools' body = {'name': 'fakepool'} self.client.put(path, body=body) self.client.request.assert_called_with(path, 'PUT', body) @mock.patch.object(client.RestClientURL, 'request') def test_delete(self, _request): path = '/api/storage/v1/pools' self.client.delete(path) self.client.request.assert_called_with(path, 'DELETE') @mock.patch.object(client.RestClientURL, 'request') def test_head(self, _request): path = '/api/storage/v1/pools' self.client.head(path) self.client.request.assert_called_with(path, 'HEAD') @mock.patch.object(client, 'RestResult') @mock.patch.object(client.urllib.request, 'Request') @mock.patch.object(client.urllib.request, 'urlopen') def test_request(self, _urlopen, _Request, _RestResult): path = '/api/storage/v1/pools' _urlopen.return_value = mock.Mock() self.client.request(path, mock.ANY) _Request.assert_called_with(self.url + path, None, self.client.headers) self.assertEqual(1, _urlopen.call_count) _RestResult.assert_called_with(response=mock.ANY) @mock.patch.object(client, 'RestResult') @mock.patch.object(client.urllib.request, 'Request') @mock.patch.object(client.urllib.request, 'urlopen') @mock.patch.object(client, 'ssl', new_callable=FakeSSL) def test_ssl_with_context(self, _ssl, _urlopen, _Request, _RestResult): """Test PEP476 certificate opt_out fix. """ path = '/api/storage/v1/pools' _urlopen.return_value = mock.Mock() self.client.request(path, mock.ANY) _urlopen.assert_called_once_with(mock.ANY, timeout=self.timeout, context='fakecontext') @mock.patch.object(client, 'RestResult') @mock.patch.object(client.urllib.request, 'Request') @mock.patch.object(client.urllib.request, 'urlopen') @mock.patch.object(client, 'ssl', new_callable=object) def test_ssl_no_context(self, _ssl, _urlopen, _Request, _RestResult): """Verify the PEP476 fix backward compatibility. """ path = '/api/storage/v1/pools' _urlopen.return_value = mock.Mock() self.client.request(path, mock.ANY) _urlopen.assert_called_once_with(mock.ANY, timeout=self.timeout) cinder-8.0.0/cinder/tests/unit/test_ibm_flashsystem_iscsi.py0000664000567000056710000002263112701406250025506 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Tests for the IBM FlashSystem iSCSI volume driver. """ import mock import six import random from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import test_ibm_flashsystem as fscommon from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_iscsi from cinder.volume import volume_types class FlashSystemManagementSimulator(fscommon.FlashSystemManagementSimulator): def __init__(self): # Default protocol is iSCSI self._protocol = 'iSCSI' self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._next_cmd_error = { 'lsnode': '', 'lssystem': '', 'lsmdiskgrp': '' } self._errors = { # CMMVC50000 is a fake error which indicates that command has not # got expected results. This error represents kinds of CLI errors. 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' 'successfully.') } class FlashSystemFakeISCSIDriver(flashsystem_iscsi.FlashSystemISCSIDriver): def __init__(self, *args, **kwargs): super(FlashSystemFakeISCSIDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _ssh(self, cmd, check_exit_code=True): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FlashSystemISCSIDriverTestCase(test.TestCase): def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _generate_vol_info(self, vol_name, vol_size=10, vol_status='available'): rand_id = six.text_type(random.randint(10000, 99999)) if not vol_name: vol_name = 'test_volume%s' % rand_id return {'name': vol_name, 'size': vol_size, 'id': '%s' % rand_id, 'volume_type_id': None, 'status': vol_status, 'mdisk_grp_name': 'mdiskgrp0'} def _generate_snap_info(self, vol_name, vol_id, vol_size, vol_status, snap_status='available'): rand_id = six.text_type(random.randint(10000, 99999)) return {'name': 'test_snap_%s' % rand_id, 'id': rand_id, 'volume': {'name': vol_name, 'id': vol_id, 'size': vol_size, 'status': vol_status}, 'volume_size': vol_size, 'status': snap_status, 'mdisk_grp_name': 'mdiskgrp0'} def setUp(self): super(FlashSystemISCSIDriverTestCase, self).setUp() self._def_flags = {'san_ip': 'hostname', 'san_login': 'username', 'san_password': 'password', 'flashsystem_connection_protocol': 'iSCSI', 'flashsystem_multihostmap_enabled': True, 'iscsi_ip_address': '192.168.1.10', 'flashsystem_iscsi_portid': 1} self.connector = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} self.sim = FlashSystemManagementSimulator() self.driver = FlashSystemFakeISCSIDriver( configuration=conf.Configuration(None)) self.driver.set_fake_storage(self.sim) self._reset_flags() self.ctxt = context.get_admin_context() self.driver.do_setup(None) self.driver.check_for_setup_error() self.sleeppatch = mock.patch('eventlet.greenthread.sleep') self.sleeppatch.start() def tearDown(self): self.sleeppatch.stop() super(FlashSystemISCSIDriverTestCase, self).tearDown() def test_flashsystem_do_setup(self): # case 1: set as iSCSI self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) self.assertEqual('iSCSI', self.driver._protocol) # clear environment self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} protocol = self.driver._protocol # case 1: when protocol is iSCSI self.driver._protocol = 'iSCSI' self.driver.validate_connector(conn_iscsi) self.driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_neither) # clear environment self.driver._protocol = protocol def test_flashsystem_connection(self): # case 1: initialize_connection/terminate_connection with iSCSI self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.initialize_connection(vol1, self.connector) self.driver.terminate_connection(vol1, self.connector) # clear environment self.driver.delete_volume(vol1) self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_create_host(self): # case 1: create host with iqn self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) conn = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} host = self.driver._create_host(conn) # case 2: delete host self.driver._delete_host(host) # clear environment self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_get_vdisk_params(self): # case 1: use default params self.driver._get_vdisk_params(None) # case 2: use extra params from type opts1 = {'storage_protocol': 'iSCSI'} opts2 = {'capabilities:storage_protocol': 'iSCSI'} opts3 = {'storage_protocol': 'FC'} type1 = volume_types.create(self.ctxt, 'opts1', opts1) type2 = volume_types.create(self.ctxt, 'opts2', opts2) type3 = volume_types.create(self.ctxt, 'opts3', opts3) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type1['id'])['protocol']) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type2['id'])['protocol']) self.assertRaises(exception.InvalidInput, self.driver._get_vdisk_params, type3['id']) # clear environment volume_types.destroy(self.ctxt, type1['id']) volume_types.destroy(self.ctxt, type2['id']) volume_types.destroy(self.ctxt, type3['id']) def test_flashsystem_map_vdisk_to_host(self): # case 1: no host found vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.assertEqual( # lun id shoud begin with 1 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # case 2: host already exists vol2 = self._generate_vol_info(None) self.driver.create_volume(vol2) self.assertEqual( # lun id shoud be sequential 2, self.driver._map_vdisk_to_host(vol2['name'], self.connector)) # case 3: test if already mapped self.assertEqual( 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # clean environment self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 4: If there is no vdisk mapped to host, host should be removed self.assertIsNone(self.driver._get_host_from_connector(self.connector)) cinder-8.0.0/cinder/tests/unit/runtime_conf.py0000664000567000056710000000153112701406250022550 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test conf')) cinder-8.0.0/cinder/tests/unit/test_netapp.py0000664000567000056710000016536512701406250022426 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for NetApp volume driver.""" from lxml import etree import mock import six from six.moves import BaseHTTPServer from six.moves import http_client from cinder import exception from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes from cinder.volume import configuration as conf from cinder.volume.drivers.netapp import common from cinder.volume.drivers.netapp.dataontap import block_7mode from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap.client import client_7mode from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import options from cinder.volume.drivers.netapp import utils FAKE_CONNECTION_HTTP = { 'transport_type': 'http', 'username': 'admin', 'password': 'pass', 'hostname': '127.0.0.1', 'port': None, 'vserver': 'openstack', } def create_configuration(): configuration = conf.Configuration(None) configuration.append_config_values(options.netapp_connection_opts) configuration.append_config_values(options.netapp_transport_opts) configuration.append_config_values(options.netapp_basicauth_opts) configuration.append_config_values(options.netapp_cluster_opts) configuration.append_config_values(options.netapp_7mode_opts) configuration.append_config_values(options.netapp_provisioning_opts) return configuration class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """HTTP handler that doesn't spam the log.""" def log_message(self, format, *args): pass class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse.""" def __init__(self, value): self._rbuffer = six.BytesIO(value) self._wbuffer = six.BytesIO() oldclose = self._wbuffer.close def newclose(): self.result = self._wbuffer.getvalue() oldclose() self._wbuffer.close = newclose def makefile(self, mode, *args): """Returns the socket's internal buffer""" if mode == 'r' or mode == 'rb': return self._rbuffer if mode == 'w' or mode == 'wb': return self._wbuffer def close(self): pass RESPONSE_PREFIX_DIRECT_CMODE = b""" """ RESPONSE_PREFIX_DIRECT_7MODE = b""" """ RESPONSE_PREFIX_DIRECT = b""" """ RESPONSE_SUFFIX_DIRECT = b"""""" class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): """HTTP handler that fakes enough stuff to allow the driver to run.""" def do_GET(s): """Respond to a GET request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() out = s.wfile out.write('' '') def do_POST(s): # noqa """Respond to a POST request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return request_xml = s.rfile.read(int(s.headers['Content-Length'])) root = etree.fromstring(request_xml) body = [x for x in root.iterchildren()] request = body[0] tag = request.tag api = etree.QName(tag).localname or tag if 'lun-get-iter' == api: tag = \ FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') if tag is None: body = """ indeterminate 512 1354536362 false true falselinux true/vol/navneet/lun1 0 false2FfGI$APyN68 none20971520 0false 0 cec1f3d7-3d41-11e2-9cf4-123478563412 navneetben_vserver <lun-get-iter-key-td> <key-0>ben_vserver</key-0> <key-1>/vol/navneet/lun2</key-1> <key-2>navneet</key-2> <key-3></key-3> <key-4>lun2</key-4> </lun-get-iter-key-td> 1""" else: body = """ indeterminate 512 1354536362 false true falselinux true/vol/navneet/lun3 0 false2FfGI$APyN68 none20971520 0false 0 cec1f3d7-3d41-11e2-9cf4-123478563412 navneetben_vserver 1""" elif 'volume-get-iter' == api: tag = \ FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') if tag is None: body = """ iscsi Openstack 214748364 true falseonline nfsvol openstack 247483648 true falseonline <volume-get-iter-key-td> <key-0>openstack</key-0> <key-1>nfsvol</key-1> </volume-get-iter-key-td> 2""" else: body = """ iscsi Openstack 4147483648 true falseonline nfsvol openstack 8147483648 true falseonline 2""" elif 'lun-create-by-size' == api: body = """ 22020096""" elif 'lun-destroy' == api: body = """""" elif 'igroup-get-iter' == api: init_found = True query = FakeDirectCMODEServerHandler._get_child_by_name(request, 'query') if query is not None: igroup_info = FakeDirectCMODEServerHandler._get_child_by_name( query, 'initiator-group-info') if igroup_info is not None: inits = FakeDirectCMODEServerHandler._get_child_by_name( igroup_info, 'initiators') if inits is not None: init_info = \ FakeDirectCMODEServerHandler._get_child_by_name( inits, 'initiator-info') init_name = \ FakeDirectCMODEServerHandler._get_child_content( init_info, 'initiator-name') if init_name == 'iqn.1993-08.org.debian:01:10': init_found = True else: init_found = False if init_found: tag = \ FakeDirectCMODEServerHandler._get_child_by_name( request, 'tag') if tag is None: body = """ openstack-01f5297b-00f7-4170-bf30-69b1314b2118 windows iscsi iqn.1993-08.org.debian:01:10 openstack <igroup-get-iter-key-td> <key-0>openstack</key-0> <key-1> openstack-01f5297b-00f7-4170-bf30-69b1314b2118< /key-1> </igroup-get-iter-key-td> 1""" else: body = """ openstack-01f5297b-00f7-4170-bf30-69b1314b2118 linux iscsi iqn.1993-08.org.debian:01:10 openstack 1""" else: body = """ 0 """ elif 'lun-map-get-iter' == api: tag = \ FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') if tag is None: body = """ openstack-44c5e7e1-3306-4800-9623-259e57d56a83 948ae304-06e9-11e2 0 5587e563-06e9-11e2-9cf4-123478563412 /vol/openvol/lun1 openstack <lun-map-get-iter-key-td> <key-0>openstack</key-0> <key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118< /key-1> </lun-map-get-iter-key-td> 1 """ else: body = """ openstack-44c5e7e1-3306-4800-9623-259e57d56a83 948ae304-06e9-11e2 0 5587e563-06e9-11e2-9cf4-123478563412 /vol/openvol/lun1 openstack 1 """ elif 'lun-map' == api: body = """1 """ elif 'lun-get-geometry' == api: body = """256 512 3221225472512 2147483648 256""" elif 'iscsi-service-get-iter' == api: body = """ openstack true iqn.1992-08.com.netapp:sn.fa9:vs.105 openstack 1""" elif 'iscsi-interface-get-iter' == api: body = """ fas3170rre-cmode-01 e1b-1165 iscsi_data_if 10.63.165.216 3260true 5 iscsi_data_if 1038 openstack 1""" elif 'igroup-create' == api: body = """""" elif 'igroup-add' == api: body = """""" elif 'clone-create' == api: body = """""" elif 'lun-unmap' == api: body = """""" elif 'system-get-ontapi-version' == api: body = """ 1 19 """ elif 'vserver-get-iter' == api: body = """ vserver node 1""" elif 'ems-autosupport-log' == api: body = """""" elif 'lun-resize' == api: body = """""" elif 'lun-get-geometry' == api: body = """ 1 2 8 2 4 5 """ elif 'volume-options-list-info' == api: body = """ """ elif 'lun-move' == api: body = """""" else: # Unknown API s.send_response(500) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) s.wfile.write(RESPONSE_PREFIX_DIRECT) if isinstance(body, six.text_type): body = body.encode('utf-8') s.wfile.write(body) s.wfile.write(RESPONSE_SUFFIX_DIRECT) @staticmethod def _get_child_by_name(self, name): for child in self.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child return None @staticmethod def _get_child_content(self, name): """Get the content of the child.""" for child in self.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None class FakeDirectCmodeHTTPConnection(object): """A fake http_client.HTTPConnection for netapp tests Requests made via this connection actually get translated and routed into the fake direct handler above, we then turn the response into the http_client.HTTPResponse that the caller expects. """ def __init__(self, host, timeout=None): self.host = host def request(self, method, path, data=None, headers=None): if not headers: headers = {} req_str = '%s %s HTTP/1.1\r\n' % (method, path) for key, value in headers.items(): req_str += "%s: %s\r\n" % (key, value) if isinstance(req_str, six.text_type): req_str = req_str.encode('latin1') if data: req_str += b'\r\n' + data # NOTE(vish): normally the http transport normalizes from unicode sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) # NOTE(vish): stop the server from trying to look up address from # the fake socket FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) self.sock = FakeHttplibSocket(sock.result) self.http_response = http_client.HTTPResponse(self.sock) def set_debuglevel(self, level): pass def getresponse(self): self.http_response.begin() return self.http_response def getresponsebody(self): return self.sock.result def close(self): pass class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase): """Test case for NetAppISCSIDriver""" volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'host': 'hostname@backend#vol1'} snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', 'volume_size': 2, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', 'volume_size': 1, 'project_id': 'project'} volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} volume_clone = {'name': 'cl_sm', 'size': 3, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'cl_sm', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} volume_clone_large = {'name': 'cl_lg', 'size': 6, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'cl_lg', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None} connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'host': 'hostname@backend#vol1'} vol1 = ssc_cmode.NetAppVolume('lun1', 'openstack') vol1.state['vserver_root'] = False vol1.state['status'] = 'online' vol1.state['junction_active'] = True vol1.space['size_avl_bytes'] = '4000000000' vol1.space['size_total_bytes'] = '5000000000' vol1.space['space-guarantee-enabled'] = False vol1.space['space-guarantee'] = 'file' vol1.space['thin_provisioned'] = True vol1.mirror['mirrored'] = True vol1.qos['qos_policy_group'] = None vol1.aggr['name'] = 'aggr1' vol1.aggr['junction'] = '/vola' vol1.sis['dedup'] = True vol1.sis['compression'] = True vol1.aggr['raid_type'] = 'raiddp' vol1.aggr['ha_policy'] = 'cfo' vol1.aggr['disk_type'] = 'SSD' ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]), 'compression': set([vol1]), 'thin': set([vol1]), 'all': set([vol1])} def setUp(self): super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp() self._custom_setup() def _custom_setup(self): self.stubs.Set( ssc_cmode, 'refresh_cluster_ssc', lambda a, b, c, synchronous: None) self.mock_object(utils, 'OpenStackInfo') self.mock_object(perf_7mode, 'Performance7modeLibrary') self.mock_object(client_base.Client, '_init_ssh_client') configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) self.stubs.Set(http_client, 'HTTPConnection', FakeDirectCmodeHTTPConnection) driver.do_setup(context='') self.driver = driver self.mock_object(self.driver.library.zapi_client, '_init_ssh_client') self.driver.ssc_vols = self.ssc_map def _set_config(self, configuration): configuration.netapp_storage_protocol = 'iscsi' configuration.netapp_login = 'admin' configuration.netapp_password = 'pass' configuration.netapp_server_hostname = '127.0.0.1' configuration.netapp_transport_type = 'http' configuration.netapp_server_port = None configuration.netapp_vserver = 'openstack' return configuration def test_connect(self): self.driver.library.zapi_client = mock.MagicMock() self.driver.library.zapi_client.get_ontapi_version.return_value = \ (1, 20) self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary, '_get_filtered_pools', mock.Mock(return_value=fakes.FAKE_CMODE_POOLS)) self.driver.check_for_setup_error() def test_do_setup_all_default(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) mock_client = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') mock_client.assert_called_with(**FAKE_CONNECTION_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) def test_do_setup_http_default_port(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'http' driver = common.NetAppDriver(configuration=configuration) mock_client = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') mock_client.assert_called_with(**FAKE_CONNECTION_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) def test_do_setup_https_default_port(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' driver = common.NetAppDriver(configuration=configuration) driver.library._get_root_volume_name = mock.Mock() mock_client = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') FAKE_CONNECTION_HTTPS = dict(FAKE_CONNECTION_HTTP, transport_type='https') mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) def test_do_setup_http_non_default_port(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) configuration.netapp_server_port = 81 driver = common.NetAppDriver(configuration=configuration) mock_client = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') FAKE_CONNECTION_HTTP_PORT = dict(FAKE_CONNECTION_HTTP, port=81) mock_client.assert_called_with(**FAKE_CONNECTION_HTTP_PORT) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) def test_do_setup_https_non_default_port(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' configuration.netapp_server_port = 446 driver = common.NetAppDriver(configuration=configuration) driver.library._get_root_volume_name = mock.Mock() mock_client = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') FAKE_CONNECTION_HTTPS_PORT = dict(FAKE_CONNECTION_HTTP, port=446, transport_type='https') mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS_PORT) def test_create_destroy(self): self.driver.create_volume(self.volume) self.driver.delete_volume(self.volume) def test_create_vol_snapshot_destroy(self): self.driver.create_volume(self.volume) self.mock_object(client_7mode.Client, '_check_clone_status') self.mock_object(self.driver.library, '_clone_lun') self.driver.create_snapshot(self.snapshot) self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) self.driver.delete_snapshot(self.snapshot) self.driver.delete_volume(self.volume) def test_map_unmap(self): self.mock_object(client_cmode.Client, 'get_igroup_by_initiators') self.mock_object(client_cmode.Client, 'get_iscsi_target_details') self.mock_object(client_cmode.Client, 'get_iscsi_service_details') self.mock_object(self.driver.library, '_get_or_create_igroup') self.mock_object(self.driver.library, '_map_lun') self.mock_object(self.driver.library, '_unmap_lun') FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80} FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'} self.mock_object(self.driver.library, '_get_preferred_target_from_list', mock.Mock(return_value=FAKE_PREFERRED_TARGET)) self.mock_object(common.na_utils, 'get_iscsi_connection_properties', mock.Mock(return_value=FAKE_CONN_PROPERTIES)) self.mock_object(client_cmode.Client, 'get_operational_network_interface_addresses', mock.Mock(return_value=[])) self.driver.create_volume(self.volume) updates = self.driver.create_export(None, self.volume, {}) self.assertTrue(updates['provider_location']) self.volume['provider_location'] = updates['provider_location'] connection_info = self.driver.initialize_connection(self.volume, self.connector) self.assertEqual('iscsi', connection_info['driver_volume_type']) properties = connection_info['data'] if not properties: raise AssertionError('Target portal is none') self.driver.terminate_connection(self.volume, self.connector) self.driver.delete_volume(self.volume) def test_cloned_volume_destroy(self): self.driver.create_volume(self.volume) self.mock_object(self.driver.library, '_clone_lun') self.driver.create_cloned_volume(self.snapshot, self.volume) self.driver.delete_volume(self.snapshot) self.driver.delete_volume(self.volume) def test_map_by_creating_igroup(self): FAKE_IGROUP_INFO = {'initiator-group-name': 'debian', 'initiator-group-os-type': 'linux', 'initiator-group-type': 'igroup'} FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80} FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'} self.mock_object(client_cmode.Client, 'get_igroup_by_initiators', mock.Mock(return_value=[FAKE_IGROUP_INFO])) self.mock_object(client_cmode.Client, 'get_operational_network_interface_addresses', mock.Mock(return_value=[])) self.mock_object(client_cmode.Client, 'get_iscsi_target_details') self.mock_object(client_cmode.Client, 'get_iscsi_service_details') self.mock_object(self.driver.library, '_get_preferred_target_from_list', mock.Mock(return_value=FAKE_PREFERRED_TARGET)) self.mock_object(common.na_utils, 'get_iscsi_connection_properties', mock.Mock(return_value=FAKE_CONN_PROPERTIES)) self.driver.create_volume(self.volume) updates = self.driver.create_export(None, self.volume, {}) self.assertTrue(updates['provider_location']) self.volume['provider_location'] = updates['provider_location'] connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'} connection_info = self.driver.initialize_connection(self.volume, connector_new) self.assertEqual('iscsi', connection_info['driver_volume_type']) properties = connection_info['data'] if not properties: raise AssertionError('Target portal is none') def test_vol_stats(self): self.mock_object(client_base.Client, 'provide_ems') mock_update_vol_stats = self.mock_object(self.driver.library, '_update_volume_stats') self.driver.get_volume_stats(refresh=True) self.assertEqual(mock_update_vol_stats.call_count, 1) def test_create_vol_snapshot_diff_size_resize(self): self.driver.create_volume(self.volume) self.mock_object(self.driver.library, '_clone_source_to_destination') self.mock_object(self.driver.library, '_clone_lun') self.driver.create_snapshot(self.snapshot) self.driver.create_volume_from_snapshot( self.volume_clone, self.snapshot) self.driver.delete_snapshot(self.snapshot) self.driver.delete_volume(self.volume) def test_create_vol_snapshot_diff_size_subclone(self): self.driver.create_volume(self.volume) self.mock_object(self.driver.library, '_clone_lun') self.mock_object(self.driver.library, '_clone_source_to_destination') self.driver.create_snapshot(self.snapshot) self.driver.create_volume_from_snapshot( self.volume_clone_large, self.snapshot) self.driver.delete_snapshot(self.snapshot) self.driver.delete_volume(self.volume) def test_extend_vol_same_size(self): self.driver.create_volume(self.volume) self.driver.extend_volume(self.volume, self.volume['size']) def test_extend_vol_direct_resize(self): self.mock_object(self.driver.library.zapi_client, 'get_lun_geometry', mock.Mock(return_value=None)) self.mock_object(self.driver.library, '_do_sub_clone_resize') self.driver.create_volume(self.volume) self.driver.extend_volume(self.volume, 3) def test_extend_vol_sub_lun_clone(self): self.mock_object(self.driver.library.zapi_client, 'get_lun_geometry', mock.Mock(return_value=None)) self.mock_object(self.driver.library, '_do_sub_clone_resize') self.driver.create_volume(self.volume) self.driver.extend_volume(self.volume, 4) class NetAppDriverNegativeTestCase(test.TestCase): """Test case for NetAppDriver""" def setUp(self): super(NetAppDriverNegativeTestCase, self).setUp() def test_incorrect_family(self): self.mock_object(utils, 'OpenStackInfo') configuration = create_configuration() configuration.netapp_storage_family = 'xyz_abc' try: common.NetAppDriver(configuration=configuration) raise AssertionError('Wrong storage family is getting accepted.') except exception.InvalidInput: pass def test_incorrect_protocol(self): self.mock_object(utils, 'OpenStackInfo') configuration = create_configuration() configuration.netapp_storage_family = 'ontap' configuration.netapp_storage_protocol = 'ontap' try: common.NetAppDriver(configuration=configuration) raise AssertionError('Wrong storage protocol is getting accepted.') except exception.InvalidInput: pass class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): """HTTP handler that fakes enough stuff to allow the driver to run.""" def do_GET(s): """Respond to a GET request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() out = s.wfile out.write('' '') def do_POST(s): """Respond to a POST request.""" if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return request_xml = s.rfile.read(int(s.headers['Content-Length'])) root = etree.fromstring(request_xml) body = [x for x in root.iterchildren()] request = body[0] tag = request.tag api = etree.QName(tag).localname or tag if 'lun-list-info' == api: body = """ false false /vol/vol1/lun1 20971520 true false false false none linux e867d844-c2c0-11e0-9282-00a09825b3b5 P3lgP4eTyaNl 512 true 0 indeterminate /vol/vol1/lun1 20971520 true false false false none linux 8e1e9284-c288-11e0-9282-00a09825b3b5 P3lgP4eTc3lp 512 true 0 indeterminate """ elif 'volume-list-info' == api: body = """ vol0 019c8f7a-9243-11e0-9281-00a09825b3b5 flex 32_bit online 576914493440 13820354560 563094110208 2 20 140848264 0 0 0 0 20907162 7010 518 31142 31142 0 false aggr0 disabled idle idle for 70:36:44 regular sun-sat@0 Mon Aug 8 09:34:15 EST 2011 Mon Aug 8 09:34:15 EST 2011 0 0 0 0 0 0 0 0 0 0 false volume true 14 raid_dp,sis block true false false false false unmirrored 3 1 /aggr0/plex0 true false vol1 2d50ecf4-c288-11e0-9282-00a09825b3b5 flex 32_bit online 42949672960 44089344 42905583616 0 20 10485760 8192 8192 0 0 1556480 110 504 31142 31142 0 false aggr1 disabled idle idle for 89:19:59 regular sun-sat@0 Sun Aug 7 14:51:00 EST 2011 Sun Aug 7 14:51:00 EST 2011 0 0 0 0 0 0 0 0 0 0 false volume true 7 raid4,sis block true false false false false unmirrored 2 1 /aggr1/plex0 true false """ elif 'volume-options-list-info' == api: body = """ snapmirrored off root false ha_policy cfo striping not_striped compression off """ elif 'lun-create-by-size' == api: body = """ 22020096""" elif 'lun-destroy' == api: body = """""" elif 'igroup-list-info' == api: body = """ openstack-8bc96490 iscsi b8e1d274-c378-11e0 linux 0 false false false true iqn.1993-08.org.debian:01:10 iscsi_group iscsi ccb8cbe4-c36f linux 0 false false false true iqn.1993-08.org.debian:01:10ca """ elif 'lun-map-list-info' == api: body = """ """ elif 'lun-map' == api: body = """1 """ elif 'iscsi-node-get-name' == api: body = """ iqn.1992-08.com.netapp:sn.135093938 """ elif 'iscsi-portal-list-info' == api: body = """ 10.61.176.156 3260 1000 e0a """ elif 'igroup-create' == api: body = """""" elif 'igroup-add' == api: body = """""" elif 'clone-start' == api: body = """ 2d50ecf4-c288-11e0-9282-00a09825b3b5 11 """ elif 'clone-list-status' == api: body = """ completed """ elif 'lun-unmap' == api: body = """""" elif 'system-get-ontapi-version' == api: body = """ 1 8 """ elif 'lun-set-space-reservation-info' == api: body = """""" elif 'ems-autosupport-log' == api: body = """""" elif 'lun-resize' == api: body = """""" elif 'lun-get-geometry' == api: body = """ 1 2 8 2 4 5 """ elif 'volume-options-list-info' == api: body = """ """ elif 'lun-move' == api: body = """""" else: # Unknown API s.send_response(500) s.end_headers return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE) s.wfile.write(RESPONSE_PREFIX_DIRECT) if isinstance(body, six.text_type): body = body.encode('utf-8') s.wfile.write(body) s.wfile.write(RESPONSE_SUFFIX_DIRECT) class FakeDirect7modeHTTPConnection(object): """A fake http_client.HTTPConnection for netapp tests Requests made via this connection actually get translated and routed into the fake direct handler above, we then turn the response into the http_client.HTTPResponse that the caller expects. """ def __init__(self, host, timeout=None): self.host = host def request(self, method, path, data=None, headers=None): if not headers: headers = {} req_str = '%s %s HTTP/1.1\r\n' % (method, path) for key, value in headers.items(): req_str += "%s: %s\r\n" % (key, value) if isinstance(req_str, six.text_type): req_str = req_str.encode('latin1') if data: req_str += b'\r\n' + data # NOTE(vish): normally the http transport normailizes from unicode sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) # NOTE(vish): stop the server from trying to look up address from # the fake socket FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1' self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None) self.sock = FakeHttplibSocket(sock.result) self.http_response = http_client.HTTPResponse(self.sock) def set_debuglevel(self, level): pass def getresponse(self): self.http_response.begin() return self.http_response def getresponsebody(self): return self.sock.result def close(self): pass class NetAppDirect7modeISCSIDriverTestCase_NV(test.TestCase): """Test case for NetAppISCSIDriver without vfiler""" volume = { 'name': 'lun1', 'size': 2, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'lun1', 'volume_type_id': None, 'host': 'hostname@backend#vol1', } def setUp(self): super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() self._custom_setup() def _custom_setup(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) self.mock_object(client_base.Client, '_init_ssh_client') self.stubs.Set(http_client, 'HTTPConnection', FakeDirect7modeHTTPConnection) self.mock_object(driver.library, '_get_root_volume_name', mock.Mock( return_value='root')) self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') driver.root_volume_name = 'root' self.driver = driver def _set_config(self, configuration): configuration.netapp_storage_family = 'ontap_7mode' configuration.netapp_storage_protocol = 'iscsi' configuration.netapp_login = 'admin' configuration.netapp_password = 'pass' configuration.netapp_server_hostname = '127.0.0.1' configuration.netapp_transport_type = 'http' configuration.netapp_server_port = None return configuration def test_create_on_select_vol(self): self.driver.volume_list = ['vol0', 'vol1'] self.driver.create_volume(self.volume) self.driver.delete_volume(self.volume) self.driver.volume_list = [] def test_connect(self): self.driver.library.zapi_client = mock.MagicMock() self.driver.library.zapi_client.get_ontapi_version.\ return_value = (1, 20) self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary, '_get_filtered_pools', mock.Mock(return_value=fakes.FAKE_7MODE_POOLS)) self.driver.check_for_setup_error() def test_check_for_setup_error_version(self): drv = self.driver self.mock_object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=None)) # check exception raises when version not found self.assertRaises(exception.VolumeBackendAPIException, drv.check_for_setup_error) self.mock_object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 8))) # check exception raises when not supported version self.assertRaises(exception.VolumeBackendAPIException, drv.check_for_setup_error) class NetAppDirect7modeISCSIDriverTestCase_WV( NetAppDirect7modeISCSIDriverTestCase_NV): """Test case for NetAppISCSIDriver with vfiler""" def setUp(self): super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() def _custom_setup(self): self.mock_object(utils, 'OpenStackInfo') configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) self.mock_object(client_base.Client, '_init_ssh_client') self.stubs.Set(http_client, 'HTTPConnection', FakeDirect7modeHTTPConnection) self.mock_object(driver.library, '_get_root_volume_name', mock.Mock(return_value='root')) self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') self.driver = driver self.driver.root_volume_name = 'root' def _set_config(self, configuration): configuration.netapp_storage_family = 'ontap_7mode' configuration.netapp_storage_protocol = 'iscsi' configuration.netapp_login = 'admin' configuration.netapp_password = 'pass' configuration.netapp_server_hostname = '127.0.0.1' configuration.netapp_transport_type = 'http' configuration.netapp_server_port = None configuration.netapp_vfiler = 'openstack' return configuration cinder-8.0.0/cinder/tests/unit/test_volume_types.py0000664000567000056710000005724012701406250023662 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for volume types code.""" import datetime import mock import time from oslo_config import cfg from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder import test from cinder.tests.unit import conf_fixture from cinder.volume import qos_specs from cinder.volume import volume_types class VolumeTypeTestCase(test.TestCase): """Test cases for volume type code.""" def setUp(self): super(VolumeTypeTestCase, self).setUp() self.ctxt = context.get_admin_context() self.vol_type1_name = str(int(time.time())) self.vol_type1_specs = dict(type="physical drive", drive_type="SAS", size="300", rpm="7200", visible="True") self.vol_type1_description = self.vol_type1_name + '_desc' def test_volume_type_create_then_destroy(self): """Ensure volume types can be created and deleted.""" prev_all_vtypes = volume_types.get_all_types(self.ctxt) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_description, new['description']) for k, v in self.vol_type1_specs.items(): self.assertEqual(v, new['extra_specs'][k], 'one of fields does not match') new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' type_ref_updated = volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted') @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' 'update_quota_resource') def test_update_volume_type_name(self, mock_update_quota): type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new_type_name = self.vol_type1_name + '_updated' volume_types.update(self.ctxt, type_ref.id, new_type_name, None) mock_update_quota.assert_called_once_with(self.ctxt, self.vol_type1_name, new_type_name) volume_types.destroy(self.ctxt, type_ref.id) def test_volume_type_create_then_destroy_with_non_admin(self): """Ensure volume types can be created and deleted by non-admin user. If a non-admn user is authorized at API, volume type operations should be permitted. """ prev_all_vtypes = volume_types.get_all_types(self.ctxt) self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_description, new['description']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' type_ref_updated = volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted') def test_create_volume_type_with_invalid_params(self): """Ensure exception will be returned.""" vol_type_invalid_specs = "invalid_extra_specs" self.assertRaises(exception.VolumeTypeCreateFailed, volume_types.create, self.ctxt, self.vol_type1_name, vol_type_invalid_specs) def test_get_all_volume_types(self): """Ensures that all volume types can be retrieved.""" session = db_api.get_session() total_volume_types = session.query(models.VolumeTypes).count() vol_types = volume_types.get_all_types(self.ctxt) self.assertEqual(total_volume_types, len(vol_types)) def test_get_default_volume_type(self): """Ensures default volume type can be retrieved.""" volume_types.create(self.ctxt, conf_fixture.def_vol_type, {}) default_vol_type = volume_types.get_default_volume_type() self.assertEqual(conf_fixture.def_vol_type, default_vol_type.get('name')) def test_default_volume_type_missing_in_db(self): """Test default volume type is missing in database. Ensures proper exception raised if default volume type is not in database. """ default_vol_type = volume_types.get_default_volume_type() self.assertEqual({}, default_vol_type) def test_get_default_volume_type_under_non_default(self): cfg.CONF.set_default('default_volume_type', None) self.assertEqual({}, volume_types.get_default_volume_type()) def test_non_existent_vol_type_shouldnt_delete(self): """Ensures that volume type creation fails with invalid args.""" self.assertRaises(exception.VolumeTypeNotFound, volume_types.destroy, self.ctxt, "sfsfsdfdfs") def test_volume_type_with_volumes_shouldnt_delete(self): """Ensures volume type deletion with associated volumes fail.""" type_ref = volume_types.create(self.ctxt, self.vol_type1_name) db.volume_create(self.ctxt, {'id': '1', 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'available', 'volume_type_id': type_ref['id']}) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, type_ref['id']) def test_repeated_vol_types_shouldnt_raise(self): """Ensures that volume duplicates don't raise.""" new_name = self.vol_type1_name + "dup" type_ref = volume_types.create(self.ctxt, new_name) volume_types.destroy(self.ctxt, type_ref['id']) type_ref = volume_types.create(self.ctxt, new_name) def test_invalid_volume_types_params(self): """Ensures that volume type creation fails with invalid args.""" self.assertRaises(exception.InvalidVolumeType, volume_types.destroy, self.ctxt, None) self.assertRaises(exception.InvalidVolumeType, volume_types.get_volume_type, self.ctxt, None) self.assertRaises(exception.InvalidVolumeType, volume_types.get_volume_type_by_name, self.ctxt, None) def test_volume_type_get_by_id_and_name(self): """Ensure volume types get returns same entry.""" volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) new2 = volume_types.get_volume_type(self.ctxt, new['id']) self.assertEqual(new, new2) def test_volume_type_search_by_extra_spec(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {"key3": "another_value", "key4": "val4"}) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key1": "val1"}}) self.assertEqual(1, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertEqual({"key1": "val1", "key2": "val2"}, vol_types['type1']['extra_specs']) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key2": "val2"}}) self.assertEqual(2, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertIn("type2", vol_types.keys()) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key3": "val3"}}) self.assertEqual(1, len(vol_types)) self.assertIn("type2", vol_types.keys()) def test_volume_type_search_by_extra_spec_multiple(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {"key1": "val1", "key3": "val3", "key4": "val4"}) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key1": "val1", "key3": "val3"}}) self.assertEqual(2, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertIn("type3", vol_types.keys()) self.assertEqual({"key1": "val1", "key2": "val2", "key3": "val3"}, vol_types['type1']['extra_specs']) self.assertEqual({"key1": "val1", "key3": "val3", "key4": "val4"}, vol_types['type3']['extra_specs']) def test_is_encrypted(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id)) encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db_api.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) def test_add_access(self): project_id = '456' vtype = volume_types.create(self.ctxt, 'type1', is_public=False) vtype_id = vtype.get('id') volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) self.assertIn(project_id, [a.project_id for a in vtype_access]) def test_remove_access(self): project_id = '456' vtype = volume_types.create(self.ctxt, 'type1', projects=['456'], is_public=False) vtype_id = vtype.get('id') volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) self.assertNotIn(project_id, vtype_access) def test_add_access_with_non_admin(self): self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) project_id = '456' vtype = volume_types.create(self.ctxt, 'type1', is_public=False) vtype_id = vtype.get('id') volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), vtype_id) self.assertIn(project_id, [a.project_id for a in vtype_access]) def test_remove_access_with_non_admin(self): self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) project_id = '456' vtype = volume_types.create(self.ctxt, 'type1', projects=['456'], is_public=False) vtype_id = vtype.get('id') volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), vtype_id) self.assertNotIn(project_id, vtype_access) def test_get_volume_type_qos_specs(self): qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}) type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2", "key3": "val3"}) res = volume_types.get_volume_type_qos_specs(type_ref['id']) self.assertIsNone(res['qos_specs']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) expected = {'qos_specs': {'id': qos_ref['id'], 'name': 'qos-specs-1', 'consumer': 'back-end', 'specs': { 'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}}} res = volume_types.get_volume_type_qos_specs(type_ref['id']) self.assertDictMatch(expected, res) def test_volume_types_diff(self): # type_ref 1 and 2 have the same extra_specs, while 3 has different keyvals1 = {"key1": "val1", "key2": "val2"} keyvals2 = {"key1": "val0", "key2": "val2"} type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1) type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1) type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2) # Check equality with only extra_specs diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertTrue(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref3['id']) self.assertFalse(same) self.assertEqual(('val1', 'val0'), diff['extra_specs']['key1']) # qos_ref 1 and 2 have the same specs, while 3 has different qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'} qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1) qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1) qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2) # Check equality with qos specs too qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'], type_ref1['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], type_ref2['id']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertTrue(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'], type_ref2['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'], type_ref2['id']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertFalse(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v0'), diff['qos_specs']['k1']) qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'], type_ref2['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], type_ref2['id']) # And add encryption for good measure enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1', 'control_location': 'front-end', 'encryption_id': 'uuid1'} enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1', 'control_location': 'front-end', 'encryption_id': 'uuid2'} db.volume_type_encryption_create(self.ctxt, type_ref1['id'], enc_keyvals1) db.volume_type_encryption_create(self.ctxt, type_ref2['id'], enc_keyvals2) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertFalse(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) self.assertEqual((256, 128), diff['encryption']['key_size']) # Check diff equals type specs when one type is None diff, same = volume_types.volume_types_diff(self.ctxt, None, type_ref1['id']) self.assertFalse(same) self.assertEqual({'key1': (None, 'val1'), 'key2': (None, 'val2')}, diff['extra_specs']) self.assertEqual({'consumer': (None, 'back-end'), 'k1': (None, 'v1'), 'k2': (None, 'v2'), 'k3': (None, 'v3')}, diff['qos_specs']) self.assertEqual({'cipher': (None, 'c1'), 'control_location': (None, 'front-end'), 'deleted': (None, False), 'key_size': (None, 256), 'provider': (None, 'p1'), 'encryption_id': (None, 'uuid1')}, diff['encryption']) def test_encryption_create(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db_api.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) def test_get_volume_type_encryption(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) ret = volume_types.get_volume_type_encryption(self.ctxt, volume_type_id) self.assertIsNotNone(ret) def test_get_volume_type_encryption_without_volume_type_id(self): ret = volume_types.get_volume_type_encryption(self.ctxt, None) self.assertIsNone(ret) def test_check_public_volume_type_failed(self): project_id = '456' volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') self.assertRaises(exception.InvalidVolumeType, volume_types.add_volume_type_access, self.ctxt, volume_type_id, project_id) self.assertRaises(exception.InvalidVolumeType, volume_types.remove_volume_type_access, self.ctxt, volume_type_id, project_id) def test_check_private_volume_type(self): volume_type = volume_types.create(self.ctxt, "type1", is_public=False) volume_type_id = volume_type.get('id') self.assertFalse(volume_types.is_public_volume_type(self.ctxt, volume_type_id)) def test_ensure_no_extra_specs_for_non_admin(self): # non-admin users shouldn't get extra-specs back in type-get/list etc ctxt = context.RequestContext('average-joe', 'd802f078-0af1-4e6b-8c02-7fac8d4339aa', auth_token='token', is_admin=False) volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(ctxt, 'type-test') self.assertIsNone(vtype.get('extra_specs', None)) def test_ensure_extra_specs_for_admin(self): # admin users should get extra-specs back in type-get/list etc volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(self.ctxt, 'type-test') self.assertIsNotNone(vtype.get('extra_specs', None)) cinder-8.0.0/cinder/tests/unit/test_netapp_nfs.py0000664000567000056710000021547712701406250023274 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NetApp-specific NFS driver module.""" import itertools import os import shutil import unittest from lxml import etree import mock from mox3 import mox as mox_lib import six from cinder import exception from cinder.image import image_utils from cinder import test from cinder import utils as cinder_utils from cinder.volume import configuration as conf from cinder.volume.drivers.netapp import common from cinder.volume.drivers.netapp.dataontap import (nfs_7mode as netapp_nfs_7mode) from cinder.volume.drivers.netapp.dataontap import (nfs_cmode as netapp_nfs_cmode) from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_7mode from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import utils CONNECTION_INFO = { 'hostname': 'fake_host', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', } FAKE_CONNECTION_INFO_HTTP = { 'hostname': '127.0.0.1', 'transport_type': 'http', 'port': None, 'username': 'admin', 'password': 'pass', 'vserver': 'openstack', } FAKE_CONNECTION_INFO_HTTPS = dict(FAKE_CONNECTION_INFO_HTTP, transport_type='https') FAKE_7MODE_CONNECTION_INFO_HTTP = dict(FAKE_CONNECTION_INFO_HTTP) FAKE_7MODE_CONNECTION_INFO_HTTP.pop('vserver') FAKE_7MODE_CONNECTION_INFO_HTTP['vfiler'] = 'test_vfiler' FAKE_7MODE_CONNECTION_INFO_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTP, transport_type='https') SEVEN_MODE_CONNECTION_INFO = dict( itertools.chain(CONNECTION_INFO.items(), {'vfiler': 'test_vfiler'}.items())) FAKE_VSERVER = 'fake_vserver' def create_configuration(): configuration = mox_lib.MockObject(conf.Configuration) configuration.append_config_values(mox_lib.IgnoreArg()) configuration.max_over_subscription_ratio = 20.0 configuration.reserved_percentage = 0 configuration.nfs_mount_point_base = '/mnt/test' configuration.nfs_mount_options = None configuration.nas_mount_options = None configuration.netapp_server_hostname = CONNECTION_INFO['hostname'] configuration.netapp_transport_type = CONNECTION_INFO['transport_type'] configuration.netapp_server_port = CONNECTION_INFO['port'] configuration.netapp_login = CONNECTION_INFO['username'] configuration.netapp_password = CONNECTION_INFO['password'] configuration.netapp_vfiler = SEVEN_MODE_CONNECTION_INFO['vfiler'] return configuration class FakeVolume(object): def __init__(self, host='', size=0): self.size = size self.id = hash(self) self.name = None self.host = host def __getitem__(self, key): return self.__dict__[key] def __setitem__(self, key, val): self.__dict__[key] = val class FakeSnapshot(object): def __init__(self, volume_size=0): self.volume_name = None self.name = None self.volume_id = None self.volume_size = volume_size self.user_id = None self.status = None def __getitem__(self, key): return self.__dict__[key] class FakeResponse(object): def __init__(self, status): """Initialize FakeResponse. :param status: Either 'failed' or 'passed' """ self.Status = status if status == 'failed': self.Reason = 'Sample error' class NetAppCmodeNfsDriverTestCase(test.TestCase): """Test direct NetApp C Mode driver.""" TEST_NFS_HOST = 'nfs-host1' TEST_NFS_SHARE_PATH = '/export' TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) TEST_NFS_EXPORT2 = 'nfs-host2:/export' TEST_MNT_POINT = '/mnt/nfs' def setUp(self): super(NetAppCmodeNfsDriverTestCase, self).setUp() self._custom_setup() def _custom_setup(self): self.mock_object(utils, 'OpenStackInfo') kwargs = {} kwargs['netapp_mode'] = 'proxy' kwargs['configuration'] = create_configuration() self.mock_object(nfs_base, 'LOG') self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs) self._driver.zapi_client = mock.Mock() config = self._driver.configuration config.netapp_vserver = FAKE_VSERVER def test_create_snapshot(self): """Test snapshot can be created and deleted.""" mox = self.mox drv = self._driver mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) mox.ReplayAll() drv.create_snapshot(FakeSnapshot()) mox.VerifyAll() def test_create_volume_from_snapshot(self): """Tests volume creation from snapshot.""" drv = self._driver mox = self.mox location = '127.0.0.1:/nfs' host = 'hostname@backend#' + location volume = FakeVolume(host, 1) snapshot = FakeSnapshot(1) expected_result = {'provider_location': location} mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') mox.StubOutWithMock(drv, '_get_volume_location') mox.StubOutWithMock(drv, 'local_path') mox.StubOutWithMock(drv, '_discover_file_till_timeout') mox.StubOutWithMock(drv, '_set_rw_permissions') drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location) drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt') drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) drv._set_rw_permissions(mox_lib.IgnoreArg()) mox.ReplayAll() self.mock_object(drv, '_do_qos_for_volume') self.mock_object(utils, 'get_volume_extra_specs') loc = drv.create_volume_from_snapshot(volume, snapshot) self.assertEqual(expected_result, loc) mox.VerifyAll() @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup') @mock.patch.object(client_cmode.Client, '__init__', return_value=None) def test_do_setup(self, mock_client_init, mock_super_do_setup): context = mock.Mock() self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') self._driver.do_setup(context) mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER, **CONNECTION_INFO) mock_super_do_setup.assert_called_once_with(context) @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error') @mock.patch.object(ssc_cmode, 'check_ssc_api_permissions') def test_check_for_setup_error(self, mock_ssc_api_permission_check, mock_super_check_for_setup_error): self._driver.zapi_client = mock.Mock() self._driver.check_for_setup_error() mock_ssc_api_permission_check.assert_called_once_with( self._driver.zapi_client) mock_super_check_for_setup_error.assert_called_once_with() def _prepare_clone_mock(self, status): drv = self._driver mox = self.mox volume = FakeVolume() setattr(volume, 'provider_location', '127.0.0.1:/nfs') drv.zapi_client = mox.CreateMockAnything() mox.StubOutWithMock(drv, '_get_host_ip') mox.StubOutWithMock(drv, '_get_export_path') mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc') drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn( self._prepare_info_by_ip_response()) drv.zapi_client.get_vol_by_junc_vserver('openstack', '/nfs').AndReturn( 'nfsvol') drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name', 'openstack') drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1') drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs') drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg()) return mox def _prepare_info_by_ip_response(self): res = """
127.0.0.1
up fas3170rre-cmode-01 e1b-1165 nfs none disabled data fas3170rre-cmode-01 e1b-1165 nfs_data1 false true 255.255.255.0 24 up data c10.63.165.0/24 disabled openstack
""" response_el = etree.XML(res) return netapp_api.NaElement(response_el).get_children() def test_clone_backing_file_for_volume(self): drv = self._driver mox = self._prepare_clone_mock('pass') mox.ReplayAll() volume_name = 'volume_name' clone_name = 'clone_name' volume_id = volume_name + six.text_type(hash(volume_name)) share = 'ip:/share' drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id, share) mox.VerifyAll() def test_register_img_in_cache_noshare(self): volume = {'id': '1', 'name': 'testvol'} volume['provider_location'] = '10.61.170.1:/share/path' drv = self._driver mox = self.mox mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', '10.61.170.1:/share/path', 'img-cache-12345') mox.ReplayAll() drv._register_image_in_cache(volume, '12345') mox.VerifyAll() def test_register_img_in_cache_with_share(self): volume = {'id': '1', 'name': 'testvol'} volume['provider_location'] = '10.61.170.1:/share/path' drv = self._driver mox = self.mox mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', '10.61.170.1:/share/path', 'img-cache-12345') mox.ReplayAll() drv._register_image_in_cache(volume, '12345') mox.VerifyAll() def test_find_image_in_cache_no_shares(self): drv = self._driver drv._mounted_shares = [] result = drv._find_image_in_cache('image_id') if not result: pass else: self.fail('Return result is unexpected') def test_find_image_in_cache_shares(self): drv = self._driver mox = self.mox drv._mounted_shares = ['testshare'] mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(os.path, 'isfile') drv._get_mount_point_for_share('testshare').AndReturn('/mnt') os.path.isfile('/mnt/img-cache-id').AndReturn(True) mox.ReplayAll() result = drv._find_image_in_cache('id') (share, file_name) = result[0] mox.VerifyAll() drv._mounted_shares.remove('testshare') if (share == 'testshare' and file_name == 'img-cache-id'): pass else: self.fail('Return result is unexpected') def test_find_old_cache_files_notexists(self): drv = self._driver mox = self.mox cmd = ['find', '/mnt', '-maxdepth', '1', '-name', 'img-cache*', '-amin', '+720'] setattr(drv.configuration, 'expiry_thres_minutes', 720) mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(drv, '_execute') drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') drv._execute(*cmd, run_as_root=True).AndReturn((None, '')) mox.ReplayAll() res = drv._find_old_cache_files('share') mox.VerifyAll() if len(res) == 0: pass else: self.fail('No files expected but got return values.') def test_find_old_cache_files_exists(self): drv = self._driver mox = self.mox cmd = ['find', '/mnt', '-maxdepth', '1', '-name', 'img-cache*', '-amin', '+720'] setattr(drv.configuration, 'expiry_thres_minutes', '720') files = '/mnt/img-id1\n/mnt/img-id2\n' r_files = ['img-id1', 'img-id2'] mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_shortlist_del_eligible_files') drv._get_mount_point_for_share('share').AndReturn('/mnt') drv._execute(*cmd, run_as_root=True).AndReturn((files, None)) drv._shortlist_del_eligible_files( mox_lib.IgnoreArg(), r_files).AndReturn(r_files) mox.ReplayAll() res = drv._find_old_cache_files('share') mox.VerifyAll() if len(res) == len(r_files): for f in res: r_files.remove(f) else: self.fail('Returned files not same as expected.') def test_delete_files_till_bytes_free_success(self): drv = self._driver mox = self.mox files = [('img-cache-1', 230), ('img-cache-2', 380)] mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(drv, '_delete_file_at_path') drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True) drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True) mox.ReplayAll() drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024) mox.VerifyAll() def test_clean_image_cache_exec(self): drv = self._driver mox = self.mox drv.configuration.thres_avl_size_perc_start = 20 drv.configuration.thres_avl_size_perc_stop = 50 drv._mounted_shares = ['testshare'] mox.StubOutWithMock(drv, '_find_old_cache_files') mox.StubOutWithMock(drv, '_delete_files_till_bytes_free') mox.StubOutWithMock(drv, '_get_capacity_info') drv._get_capacity_info('testshare').AndReturn((100, 19)) drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2']) drv._delete_files_till_bytes_free( ['f1', 'f2'], 'testshare', bytes_to_free=31) mox.ReplayAll() drv._clean_image_cache() mox.VerifyAll() drv._mounted_shares.remove('testshare') if not drv.cleaning: pass else: self.fail('Clean image cache failed.') def test_clean_image_cache_noexec(self): drv = self._driver mox = self.mox drv.configuration.thres_avl_size_perc_start = 20 drv.configuration.thres_avl_size_perc_stop = 50 drv._mounted_shares = ['testshare'] mox.StubOutWithMock(drv, '_get_capacity_info') drv._get_capacity_info('testshare').AndReturn((100, 30, 70)) mox.ReplayAll() drv._clean_image_cache() mox.VerifyAll() drv._mounted_shares.remove('testshare') if not drv.cleaning: pass else: self.fail('Clean image cache failed.') def test_clone_image_fromcache(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') mox.StubOutWithMock(drv, '_post_clone_image') mox.StubOutWithMock(drv, '_is_share_clone_compatible') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn( [('share', 'file_name')]) drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(True) drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name') drv._post_clone_image(volume) mox.ReplayAll() drv.clone_image('', volume, ('image_location', None), {'id': 'image_id'}, '') mox.VerifyAll() def get_img_info(self, format): class img_info(object): def __init__(self, fmt): self.file_format = fmt return img_info(format) def test_clone_image_cloneableshare_nospace(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_is_cloneable_share') mox.StubOutWithMock(drv, '_is_share_clone_compatible') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) drv._is_cloneable_share( mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share') drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(False) mox.ReplayAll() (prop, cloned) = drv.clone_image( '', volume, ('nfs://127.0.0.1:/share/img-id', None), {'id': 'image_id'}, '') mox.VerifyAll() if not cloned and not prop['provider_location']: pass else: self.fail('Expected not cloned, got cloned.') def test_clone_image_cloneableshare_raw(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_is_cloneable_share') mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(image_utils, 'qemu_img_info') mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') mox.StubOutWithMock(drv, '_discover_file_till_timeout') mox.StubOutWithMock(drv, '_set_rw_permissions') mox.StubOutWithMock(drv, '_resize_image_file') mox.StubOutWithMock(drv, '_is_share_clone_compatible') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) drv._is_cloneable_share( mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share') drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(True) drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ AndReturn(self.get_img_info('raw')) drv._clone_backing_file_for_volume( 'img-id', 'vol', share='127.0.0.1:/share', volume_id=None) drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt') drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) drv._set_rw_permissions('/mnt/vol') drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg()) mox.ReplayAll() drv.clone_image( '', volume, ('nfs://127.0.0.1:/share/img-id', None), {'id': 'image_id'}, '') mox.VerifyAll() def test_clone_image_cloneableshare_notraw(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_is_cloneable_share') mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(image_utils, 'qemu_img_info') mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') mox.StubOutWithMock(drv, '_discover_file_till_timeout') mox.StubOutWithMock(drv, '_set_rw_permissions') mox.StubOutWithMock(drv, '_resize_image_file') mox.StubOutWithMock(image_utils, 'convert_image') mox.StubOutWithMock(drv, '_register_image_in_cache') mox.StubOutWithMock(drv, '_is_share_clone_compatible') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( '127.0.0.1:/share') drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(True) drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ AndReturn(self.get_img_info('notraw')) image_utils.convert_image(mox_lib.IgnoreArg(), mox_lib.IgnoreArg(), 'raw', run_as_root=True) image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\ AndReturn(self.get_img_info('raw')) drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) drv._set_rw_permissions('/mnt/vol') drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg()) mox.ReplayAll() drv.clone_image( '', volume, ('nfs://127.0.0.1/share/img-id', None), {'id': 'image_id'}, '') mox.VerifyAll() def test_clone_image_file_not_discovered(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_is_cloneable_share') mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(image_utils, 'qemu_img_info') mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') mox.StubOutWithMock(drv, '_discover_file_till_timeout') mox.StubOutWithMock(image_utils, 'convert_image') mox.StubOutWithMock(drv, '_register_image_in_cache') mox.StubOutWithMock(drv, '_is_share_clone_compatible') mox.StubOutWithMock(drv, '_do_qos_for_volume') mox.StubOutWithMock(drv, 'local_path') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( '127.0.0.1:/share') drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(True) drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ AndReturn(self.get_img_info('notraw')) image_utils.convert_image(mox_lib.IgnoreArg(), mox_lib.IgnoreArg(), 'raw', run_as_root=True) image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\ AndReturn(self.get_img_info('raw')) drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol') drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False) mox.ReplayAll() vol_dict, result = drv.clone_image( '', volume, ('nfs://127.0.0.1/share/img-id', None), {'id': 'image_id'}, '') mox.VerifyAll() self.assertFalse(result) self.assertFalse(vol_dict['bootable']) self.assertIsNone(vol_dict['provider_location']) def test_clone_image_resizefails(self): drv = self._driver mox = self.mox volume = {'name': 'vol', 'size': '20'} mox.StubOutWithMock(utils, 'get_volume_extra_specs') mox.StubOutWithMock(drv, '_find_image_in_cache') mox.StubOutWithMock(drv, '_is_cloneable_share') mox.StubOutWithMock(drv, '_get_mount_point_for_share') mox.StubOutWithMock(image_utils, 'qemu_img_info') mox.StubOutWithMock(drv, '_clone_backing_file_for_volume') mox.StubOutWithMock(drv, '_discover_file_till_timeout') mox.StubOutWithMock(drv, '_set_rw_permissions') mox.StubOutWithMock(drv, '_resize_image_file') mox.StubOutWithMock(image_utils, 'convert_image') mox.StubOutWithMock(drv, '_do_qos_for_volume') mox.StubOutWithMock(drv, '_register_image_in_cache') mox.StubOutWithMock(drv, '_is_share_clone_compatible') mox.StubOutWithMock(drv, 'local_path') utils.get_volume_extra_specs(mox_lib.IgnoreArg()) drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([]) drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( '127.0.0.1:/share') drv._is_share_clone_compatible(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(True) drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\ AndReturn(self.get_img_info('notraw')) image_utils.convert_image(mox_lib.IgnoreArg(), mox_lib.IgnoreArg(), 'raw', run_as_root=True) image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\ AndReturn(self.get_img_info('raw')) drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol') drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True) drv._set_rw_permissions('/mnt/vol') drv._resize_image_file( mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults()) mox.ReplayAll() vol_dict, result = drv.clone_image( '', volume, ('nfs://127.0.0.1/share/img-id', None), {'id': 'image_id'}, '') mox.VerifyAll() self.assertFalse(result) self.assertFalse(vol_dict['bootable']) self.assertIsNone(vol_dict['provider_location']) def test_is_cloneable_share_badformats(self): drv = self._driver strgs = ['10.61.666.22:/share/img', 'nfs://10.61.666.22:/share/img', 'nfs://10.61.666.22//share/img', 'nfs://com.netapp.com:/share/img', 'nfs://com.netapp.com//share/img', 'com.netapp.com://share/im\g', 'http://com.netapp.com://share/img', 'nfs://com.netapp.com:/share/img', 'nfs://com.netapp.com:8080//share/img' 'nfs://com.netapp.com//img', 'nfs://[ae::sr::ty::po]/img'] for strg in strgs: res = drv._is_cloneable_share(strg) if res: msg = 'Invalid format matched for url %s.' % strg self.fail(msg) def test_is_cloneable_share_goodformat1(self): drv = self._driver mox = self.mox strg = 'nfs://10.61.222.333/share/img' mox.StubOutWithMock(drv, '_check_share_in_use') drv._check_share_in_use(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn('share') mox.ReplayAll() drv._is_cloneable_share(strg) mox.VerifyAll() def test_is_cloneable_share_goodformat2(self): drv = self._driver mox = self.mox strg = 'nfs://10.61.222.333:8080/share/img' mox.StubOutWithMock(drv, '_check_share_in_use') drv._check_share_in_use(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn('share') mox.ReplayAll() drv._is_cloneable_share(strg) mox.VerifyAll() def test_is_cloneable_share_goodformat3(self): drv = self._driver mox = self.mox strg = 'nfs://com.netapp:8080/share/img' mox.StubOutWithMock(drv, '_check_share_in_use') drv._check_share_in_use(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn('share') mox.ReplayAll() drv._is_cloneable_share(strg) mox.VerifyAll() def test_is_cloneable_share_goodformat4(self): drv = self._driver mox = self.mox strg = 'nfs://netapp.com/share/img' mox.StubOutWithMock(drv, '_check_share_in_use') drv._check_share_in_use(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn('share') mox.ReplayAll() drv._is_cloneable_share(strg) mox.VerifyAll() def test_is_cloneable_share_goodformat5(self): drv = self._driver mox = self.mox strg = 'nfs://netapp.com/img' mox.StubOutWithMock(drv, '_check_share_in_use') drv._check_share_in_use(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn('share') mox.ReplayAll() drv._is_cloneable_share(strg) mox.VerifyAll() def test_check_share_in_use_no_conn(self): drv = self._driver share = drv._check_share_in_use(None, '/dir') if share: self.fail('Unexpected share detected.') def test_check_share_in_use_invalid_conn(self): drv = self._driver share = drv._check_share_in_use(':8989', '/dir') if share: self.fail('Unexpected share detected.') def test_check_share_in_use_incorrect_host(self): drv = self._driver mox = self.mox mox.StubOutWithMock(utils, 'resolve_hostname') utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception()) mox.ReplayAll() share = drv._check_share_in_use('incorrect:8989', '/dir') mox.VerifyAll() if share: self.fail('Unexpected share detected.') def test_check_share_in_use_success(self): drv = self._driver mox = self.mox drv._mounted_shares = ['127.0.0.1:/dir/share'] mox.StubOutWithMock(utils, 'resolve_hostname') mox.StubOutWithMock(drv, '_share_match_for_ip') utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44') drv._share_match_for_ip( '10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share') mox.ReplayAll() share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share') mox.VerifyAll() if not share: self.fail('Expected share not detected') def test_construct_image_url_loc(self): drv = self._driver img_loc = (None, # Valid metdata [{'metadata': {'share_location': 'nfs://host/path', 'mountpoint': '/opt/stack/data/glance', 'id': 'abc-123', 'type': 'nfs'}, 'url': 'file:///opt/stack/data/glance/image-id-0'}, # missing metadata {'metadata': {}, 'url': 'file:///opt/stack/data/glance/image-id-1'}, # missing location_type {'metadata': {'location_type': None}, 'url': 'file:///opt/stack/data/glance/image-id-2'}, # non-nfs location_type {'metadata': {'location_type': 'not-NFS'}, 'url': 'file:///opt/stack/data/glance/image-id-3'}, # missing share_location {'metadata': {'location_type': 'nfs', 'share_location': None}, 'url': 'file:///opt/stack/data/glance/image-id-4'}, # missing mountpoint {'metadata': {'location_type': 'nfs', 'share_location': 'nfs://host/path', # Pre-kilo we documented "mount_point" 'mount_point': '/opt/stack/data/glance'}, 'url': 'file:///opt/stack/data/glance/image-id-5'}, # Valid metadata {'metadata': {'share_location': 'nfs://host/path', 'mountpoint': '/opt/stack/data/glance', 'id': 'abc-123', 'type': 'nfs'}, 'url': 'file:///opt/stack/data/glance/image-id-6'}]) locations = drv._construct_image_nfs_url(img_loc) self.assertIn("nfs://host/path/image-id-0", locations) self.assertIn("nfs://host/path/image-id-6", locations) self.assertEqual(2, len(locations)) def test_construct_image_url_direct(self): drv = self._driver img_loc = ("nfs://host/path/image-id", None) locations = drv._construct_image_nfs_url(img_loc) self.assertIn("nfs://host/path/image-id", locations) def test_get_pool(self): pool = self._driver.get_pool({'provider_location': 'fake-share'}) self.assertEqual('fake-share', pool) def _set_config(self, configuration): configuration.netapp_storage_family = 'ontap_cluster' configuration.netapp_storage_protocol = 'nfs' configuration.netapp_login = 'admin' configuration.netapp_password = 'pass' configuration.netapp_server_hostname = '127.0.0.1' configuration.netapp_transport_type = 'http' configuration.netapp_server_port = None configuration.netapp_vserver = 'openstack' configuration.nfs_shares_config = '/nfs' return configuration @mock.patch.object(utils, 'get_volume_extra_specs') def test_check_volume_type_mismatch(self, get_specs): if not hasattr(self._driver, 'vserver'): return unittest.skip("Test only applies to cmode driver") get_specs.return_value = {'thin_volume': 'true'} self._driver._is_share_vol_type_match = mock.Mock(return_value=False) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self._driver._check_volume_type, 'vol', 'share', 'file') get_specs.assert_called_once_with('vol') self._driver._is_share_vol_type_match.assert_called_once_with( 'vol', 'share', 'file') @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_all_default(self): configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_http_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'http' driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_https_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_http_non_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_server_port = 81 driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81) mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_https_non_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' configuration.netapp_server_port = 446 driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_cmode, 'Client') self.mock_object(perf_cmode, 'PerformanceCmodeLibrary') driver.do_setup(context='') FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446) mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS) @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname): drv = self._driver share = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name') modified_share = '10.12.142.11:/export/test_file_name' modified_vol_ref = drv._convert_vol_ref_share_name_to_share_ip(share) self.assertEqual(modified_share, modified_vol_ref) @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') @mock.patch.object(os.path, 'isfile', return_value=True) def test_get_share_mount_and_vol_from_vol_ref(self, mock_isfile, mock_hostname): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name') vol_ref = {'source-name': vol_path} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) (share, mount, file_path) = \ drv._get_share_mount_and_vol_from_vol_ref(vol_ref) self.assertEqual(self.TEST_NFS_EXPORT1, share) self.assertEqual(self.TEST_MNT_POINT, mount) self.assertEqual('test_file_name', file_path) @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self, mock_hostname): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] vol_ref = {'source-id': '1234546'} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) self.assertRaises(exception.ManageExistingInvalidReference, drv._get_share_mount_and_vol_from_vol_ref, vol_ref) @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self, mock_host): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] vol_path = "%s/%s" % (self.TEST_NFS_EXPORT2, 'test_file_name') vol_ref = {'source-name': vol_path} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) self.assertRaises(exception.ManageExistingInvalidReference, drv._get_share_mount_and_vol_from_vol_ref, vol_ref) @mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11') def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self, mock_host): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] vol_ref = {'source-name': self.TEST_NFS_EXPORT2} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) self.assertRaises(exception.ManageExistingInvalidReference, drv._get_share_mount_and_vol_from_vol_ref, vol_ref) @mock.patch.object(cinder_utils, 'get_file_size', return_value=1073741824) def test_manage_existing_get_size(self, get_file_size): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] test_file = 'test_file_name' volume = FakeVolume() volume['name'] = 'file-new-managed-123' volume['id'] = 'volume-new-managed-123' vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) vol_ref = {'source-name': vol_path} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, test_file)) vol_size = drv.manage_existing_get_size(volume, vol_ref) self.assertEqual(1, vol_size) @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) def test_manage_existing_get_size_round_up(self, get_file_size): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] test_file = 'test_file_name' volume = FakeVolume() volume['name'] = 'file-new-managed-123' volume['id'] = 'volume-new-managed-123' vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) vol_ref = {'source-name': vol_path} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, test_file)) vol_size = drv.manage_existing_get_size(volume, vol_ref) self.assertEqual(2, vol_size) @mock.patch.object(cinder_utils, 'get_file_size', return_value='badfloat') def test_manage_existing_get_size_error(self, get_size): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] test_file = 'test_file_name' volume = FakeVolume() volume['name'] = 'file-new-managed-123' volume['id'] = 'volume-new-managed-123' vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) vol_ref = {'source-name': vol_path} drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, test_file)) self.assertRaises(exception.VolumeBackendAPIException, drv.manage_existing_get_size, volume, vol_ref) @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) def test_manage_existing(self, get_file_size): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] test_file = 'test_file_name' volume = FakeVolume() volume['name'] = 'file-new-managed-123' volume['id'] = 'volume-new-managed-123' vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) vol_ref = {'source-name': vol_path} drv._check_volume_type = mock.Mock() self.stubs.Set(drv, '_execute', mock.Mock()) drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, test_file)) shutil.move = mock.Mock() mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') mock_get_specs.return_value = {} self.mock_object(drv, '_do_qos_for_volume') location = drv.manage_existing(volume, vol_ref) self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location']) drv._check_volume_type.assert_called_once_with( volume, self.TEST_NFS_EXPORT1, test_file, {}) @mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824) def test_manage_existing_move_fails(self, get_file_size): drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] test_file = 'test_file_name' volume = FakeVolume() volume['name'] = 'volume-new-managed-123' volume['id'] = 'volume-new-managed-123' vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file) vol_ref = {'source-name': vol_path} mock_check_volume_type = drv._check_volume_type = mock.Mock() drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, test_file)) drv._execute = mock.Mock(side_effect=OSError) mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') mock_get_specs.return_value = {} self.mock_object(drv, '_do_qos_for_volume') self.assertRaises(exception.VolumeBackendAPIException, drv.manage_existing, volume, vol_ref) mock_check_volume_type.assert_called_once_with( volume, self.TEST_NFS_EXPORT1, test_file, {}) @mock.patch.object(nfs_base, 'LOG') def test_unmanage(self, mock_log): drv = self._driver self.mock_object(utils, 'get_valid_qos_policy_group_info') volume = FakeVolume() volume['id'] = '123' volume['provider_location'] = '/share' drv.unmanage(volume) self.assertEqual(1, mock_log.info.call_count) class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase): """Test direct NetApp C Mode driver only and not inherit.""" def setUp(self): super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp() self._custom_setup() def _custom_setup(self): self.mock_object(utils, 'OpenStackInfo') kwargs = {} kwargs['netapp_mode'] = 'proxy' kwargs['configuration'] = create_configuration() self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs) self._driver.ssc_enabled = True self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path' self._driver.zapi_client = mock.Mock() self.mock_object(netapp_nfs_cmode, 'LOG') self._fake_empty_qos_policy_group_info = { 'legacy': None, 'spec': None, } self._fake_legacy_qos_policy_group_info = { 'legacy': { 'policy_name': 'qos_policy_1' }, 'spec': None, } @mock.patch.object(utils, 'LOG', mock.Mock()) def test_create_volume(self): drv = self._driver drv.ssc_enabled = False fake_extra_specs = {} fake_share = 'localhost:myshare' host = 'hostname@backend#' + fake_share mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') mock_get_specs.return_value = fake_extra_specs self.mock_object(drv, '_ensure_shares_mounted') self.mock_object(drv, '_do_create_volume') mock_get_qos_info =\ self.mock_object(utils, 'get_valid_qos_policy_group_info') mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info volume_info = self._driver.create_volume(FakeVolume(host, 1)) self.assertEqual(fake_share, volume_info.get('provider_location')) self.assertEqual(0, utils.LOG.warning.call_count) def test_create_volume_no_pool_specified(self): drv = self._driver drv.ssc_enabled = False host = 'hostname@backend' # missing pool with mock.patch.object(drv, '_ensure_shares_mounted'): self.assertRaises(exception.InvalidHost, self._driver.create_volume, FakeVolume(host, 1)) def test_create_volume_with_legacy_qos_policy(self): drv = self._driver drv.ssc_enabled = False fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'} fake_share = 'localhost:myshare' host = 'hostname@backend#' + fake_share fake_volume = FakeVolume(host, 1) mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs') mock_get_specs.return_value = fake_extra_specs mock_get_qos_info =\ self.mock_object(utils, 'get_valid_qos_policy_group_info') mock_get_qos_info.return_value =\ self._fake_legacy_qos_policy_group_info self.mock_object(drv, '_ensure_shares_mounted') self.mock_object(drv, '_do_create_volume') mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume') volume_info = self._driver.create_volume(fake_volume) self.assertEqual('localhost:myshare', volume_info.get('provider_location')) mock_set_qos.assert_called_once_with( fake_volume, self._fake_legacy_qos_policy_group_info) def test_copy_img_to_vol_copyoffload_success(self): drv = self._driver context = object() volume = {'id': 'vol_id', 'name': 'name'} image_service = object() image_id = 'image_id' drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) drv._copy_from_img_service = mock.Mock() drv._get_provider_location = mock.Mock(return_value='share') drv._get_vol_for_share = mock.Mock(return_value='vol') drv._update_stale_vols = mock.Mock() drv.copy_image_to_volume(context, volume, image_service, image_id) drv._copy_from_img_service.assert_called_once_with(context, volume, image_service, image_id) drv._update_stale_vols.assert_called_once_with('vol') def test_copy_img_to_vol_copyoffload_failure(self): drv = self._driver context = object() volume = {'id': 'vol_id', 'name': 'name'} image_service = object() image_id = 'image_id' drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) drv._copy_from_img_service = mock.Mock(side_effect=Exception()) nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() drv._get_provider_location = mock.Mock(return_value='share') drv._get_vol_for_share = mock.Mock(return_value='vol') drv._update_stale_vols = mock.Mock() drv.copy_image_to_volume(context, volume, image_service, image_id) drv._copy_from_img_service.assert_called_once_with(context, volume, image_service, image_id) nfs_base.NetAppNfsDriver.copy_image_to_volume. \ assert_called_once_with(context, volume, image_service, image_id) drv._update_stale_vols.assert_called_once_with('vol') def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self): drv = self._driver context = object() volume = {'id': 'vol_id', 'name': 'name'} image_service = mock.Mock() image_service.get_location.return_value = (mock.Mock(), mock.Mock()) image_service.show.return_value = {'size': 0} image_id = 'image_id' drv._client = mock.Mock() drv._client.get_api_version = mock.Mock(return_value=(1, 20)) drv._find_image_in_cache = mock.Mock(return_value=[]) drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"]) drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test", "dr")) drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1") drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') drv._get_host_ip = mock.Mock() drv._get_provider_location = mock.Mock() drv._get_export_path = mock.Mock(return_value="dr") drv._check_share_can_hold_size = mock.Mock() # Raise error as if the copyoffload file can not be found drv._clone_file_dst_exists = mock.Mock(side_effect=OSError()) # Verify the original error is propagated self.assertRaises(OSError, drv._copy_from_img_service, context, volume, image_service, image_id) def test_copyoffload_frm_cache_success(self): drv = self._driver context = object() volume = {'id': 'vol_id', 'name': 'name'} image_service = object() image_id = 'image_id' drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() drv._get_provider_location = mock.Mock(return_value='share') drv._get_vol_for_share = mock.Mock(return_value='vol') drv._update_stale_vols = mock.Mock() drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')]) drv._copy_from_cache = mock.Mock(return_value=True) drv.copy_image_to_volume(context, volume, image_service, image_id) drv._copy_from_cache.assert_called_once_with(volume, image_id, [('share', 'img')]) def test_copyoffload_frm_img_service_success(self): drv = self._driver context = object() volume = {'id': 'vol_id', 'name': 'name'} image_service = object() image_id = 'image_id' drv._client = mock.Mock() drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() drv._get_provider_location = mock.Mock(return_value='share') drv._get_vol_for_share = mock.Mock(return_value='vol') drv._update_stale_vols = mock.Mock() drv._find_image_in_cache = mock.Mock(return_value=False) drv._copy_from_img_service = mock.Mock() drv.copy_image_to_volume(context, volume, image_service, image_id) drv._copy_from_img_service.assert_called_once_with(context, volume, image_service, image_id) def test_cache_copyoffload_workflow_success(self): drv = self._driver volume = {'id': 'vol_id', 'name': 'name', 'size': 1} image_id = 'image_id' cache_result = [('ip1:/openstack', 'img-cache-imgid')] drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') drv._get_host_ip = mock.Mock(return_value='ip2') drv._get_export_path = mock.Mock(return_value='/exp_path') drv._execute = mock.Mock() drv._register_image_in_cache = mock.Mock() drv._get_provider_location = mock.Mock(return_value='/share') drv._post_clone_image = mock.Mock() copied = drv._copy_from_cache(volume, image_id, cache_result) self.assertTrue(copied) drv._get_ip_verify_on_cluster.assert_any_call('ip1') drv._get_export_path.assert_called_with('vol_id') drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1', '/openstack/img-cache-imgid', '/exp_path/name', run_as_root=False, check_exit_code=0) drv._post_clone_image.assert_called_with(volume) drv._get_provider_location.assert_called_with('vol_id') @mock.patch.object(image_utils, 'qemu_img_info') def test_img_service_raw_copyoffload_workflow_success(self, mock_qemu_img_info): drv = self._driver volume = {'id': 'vol_id', 'name': 'name', 'size': 1} image_id = 'image_id' context = object() image_service = mock.Mock() image_service.get_location.return_value = ('nfs://ip1/openstack/img', None) image_service.show.return_value = {'size': 1, 'disk_format': 'raw'} drv._check_get_nfs_path_segs =\ mock.Mock(return_value=('ip1', '/openstack')) drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') drv._get_host_ip = mock.Mock(return_value='ip2') drv._get_export_path = mock.Mock(return_value='/exp_path') drv._get_provider_location = mock.Mock(return_value='share') drv._execute = mock.Mock() drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') drv._discover_file_till_timeout = mock.Mock(return_value=True) img_inf = mock.Mock() img_inf.file_format = 'raw' mock_qemu_img_info.return_value = img_inf drv._check_share_can_hold_size = mock.Mock() drv._move_nfs_file = mock.Mock(return_value=True) drv._delete_file_at_path = mock.Mock() drv._clone_file_dst_exists = mock.Mock() drv._post_clone_image = mock.Mock() drv._copy_from_img_service(context, volume, image_service, image_id) drv._get_ip_verify_on_cluster.assert_any_call('ip1') drv._get_export_path.assert_called_with('vol_id') drv._check_share_can_hold_size.assert_called_with('share', 1) assert drv._execute.call_count == 1 drv._post_clone_image.assert_called_with(volume) @mock.patch.object(image_utils, 'convert_image') @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch('os.path.exists') def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists, mock_qemu_img_info, mock_cvrt_image): drv = self._driver volume = {'id': 'vol_id', 'name': 'name', 'size': 1} image_id = 'image_id' context = object() image_service = mock.Mock() image_service.get_location.return_value = ('nfs://ip1/openstack/img', None) image_service.show.return_value = {'size': 1, 'disk_format': 'qcow2'} drv._check_get_nfs_path_segs =\ mock.Mock(return_value=('ip1', '/openstack')) drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') drv._get_host_ip = mock.Mock(return_value='ip2') drv._get_export_path = mock.Mock(return_value='/exp_path') drv._get_provider_location = mock.Mock(return_value='share') drv._execute = mock.Mock() drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') img_inf = mock.Mock() img_inf.file_format = 'raw' mock_qemu_img_info.return_value = img_inf drv._check_share_can_hold_size = mock.Mock() drv._move_nfs_file = mock.Mock(return_value=True) drv._delete_file_at_path = mock.Mock() drv._clone_file_dst_exists = mock.Mock() drv._post_clone_image = mock.Mock() drv._copy_from_img_service(context, volume, image_service, image_id) drv._get_ip_verify_on_cluster.assert_any_call('ip1') drv._get_export_path.assert_called_with('vol_id') drv._check_share_can_hold_size.assert_called_with('share', 1) assert mock_cvrt_image.call_count == 1 assert drv._execute.call_count == 1 assert drv._delete_file_at_path.call_count == 2 drv._clone_file_dst_exists.call_count == 1 drv._post_clone_image.assert_called_with(volume) class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase): """Test direct NetApp 7 Mode driver.""" def _custom_setup(self): self.mock_object(utils, 'OpenStackInfo') self.mock_object(common.na_utils, 'LOG') self.mock_object(nfs_base, 'LOG') self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver( configuration=create_configuration()) self._driver.zapi_client = mock.Mock() def _prepare_delete_snapshot_mock(self, snapshot_exists): drv = self._driver mox = self.mox mox.StubOutWithMock(drv, '_get_provider_location') mox.StubOutWithMock(drv, '_volume_not_present') if snapshot_exists: mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_get_volume_path') drv._get_provider_location(mox_lib.IgnoreArg()) drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\ .AndReturn(not snapshot_exists) if snapshot_exists: drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) drv._execute('rm', None, run_as_root=True) mox.ReplayAll() return mox def test_create_volume_no_pool_specified(self): drv = self._driver drv.ssc_enabled = False host = 'hostname@backend' # missing pool with mock.patch.object(drv, '_ensure_shares_mounted'): self.assertRaises(exception.InvalidHost, self._driver.create_volume, FakeVolume(host, 1)) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup') @mock.patch.object(client_7mode.Client, '__init__', return_value=None) def test_do_setup(self, mock_client_init, mock_super_do_setup): context = mock.Mock() self.mock_object(perf_7mode, 'Performance7modeLibrary') self._driver.do_setup(context) mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO) mock_super_do_setup.assert_called_once_with(context) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_all_default(self): configuration = self._set_config(create_configuration()) driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_7mode, 'Client') self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_http_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'http' driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_7mode, 'Client') self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_https_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_7mode, 'Client') self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_http_non_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_server_port = 81 driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_7mode, 'Client') self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP, port=81) mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock()) def test_do_setup_https_non_default_port(self): configuration = self._set_config(create_configuration()) configuration.netapp_transport_type = 'https' configuration.netapp_server_port = 446 driver = common.NetAppDriver(configuration=configuration) mock_invoke = self.mock_object(client_7mode, 'Client') self.mock_object(perf_7mode, 'Performance7modeLibrary') driver.do_setup(context='') FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS, port=446) mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS) @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error') def test_check_for_setup_error(self, mock_super_check_for_setup_error): self._driver.zapi_client.get_ontapi_version.return_value = (1, 20) self.assertIsNone(self._driver.check_for_setup_error()) mock_super_check_for_setup_error.assert_called_once_with() def test_check_for_setup_error_old_version(self): self._driver.zapi_client.get_ontapi_version.return_value = (1, 8) self.assertRaises(exception.VolumeBackendAPIException, self._driver.check_for_setup_error) def test_check_for_setup_error_no_version(self): self._driver.zapi_client.get_ontapi_version.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self._driver.check_for_setup_error) def _prepare_clone_mock(self, status): drv = self._driver mox = self.mox volume = FakeVolume() setattr(volume, 'provider_location', '127.0.0.1:/nfs') mox.StubOutWithMock(drv, '_get_export_ip_path') drv._get_export_ip_path( mox_lib.IgnoreArg(), mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs')) return mox def test_clone_backing_file_for_volume_clear(self): drv = self._driver mox = self._prepare_clone_mock('fail') drv.zapi_client = mox.CreateMockAnything() drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn( '/vol/vol1/nfs') drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) mox.ReplayAll() volume_name = 'volume_name' clone_name = 'clone_name' volume_id = volume_name + six.text_type(hash(volume_name)) try: drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id) except Exception as e: if isinstance(e, netapp_api.NaApiError): pass else: raise mox.VerifyAll() def test_get_pool(self): pool = self._driver.get_pool({'provider_location': 'fake-share'}) self.assertEqual('fake-share', pool) def _set_config(self, configuration): super(NetApp7modeNfsDriverTestCase, self)._set_config( configuration) configuration.netapp_storage_family = 'ontap_7mode' return configuration def test_clone_backing_file_for_volume(self): drv = self._driver mox = self._prepare_clone_mock('pass') drv.zapi_client = mox.CreateMockAnything() drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn( '/vol/vol1/nfs') drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg()) mox.ReplayAll() volume_name = 'volume_name' clone_name = 'clone_name' volume_id = volume_name + six.text_type(hash(volume_name)) share = 'ip:/share' drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id, share) mox.VerifyAll() cinder-8.0.0/cinder/tests/unit/test_dellsc.py0000664000567000056710000036434712701406257022415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.volume.drivers.dell import dell_storagecenter_api from cinder.volume.drivers.dell import dell_storagecenter_iscsi from cinder.volume import volume_types # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @mock.patch.object(dell_storagecenter_api.HttpClient, '__init__', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'open_connection', return_value=mock.MagicMock()) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'close_connection') class DellSCSanISCSIDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-46-250', u'description': u'Cinder Clone Replay', u'parent': {u'instanceId': u'64702.46.249', u'instanceName': u'64702-46-249', u'objectType': u'ScReplay'}, u'instanceId': u'64702.46.250', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'12/09/2014 03:52:08 PM', u'createVolume': {u'instanceId': u'64702.46', u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'objectType': u'ScVolume'}, u'expireTime': u'12/09/2014 04:52:08 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7910, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'12/09/2014 03:52:08 PM', u'size': u'0.0 Bytes' } SCRPLAYPROFILE = {u'ruleCount': 0, u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'volumeCount': 0, u'scName': u'Storage Center 64702', u'notes': u'Created by Dell Cinder Driver', u'scSerialNumber': 64702, u'userCreated': True, u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'instanceId': u'64702.11', u'enforceReplayCreationTimeout': False, u'replayCreationTimeout': 20, u'objectType': u'ScReplayProfile', u'type': u'Consistent', u'expireIncompleteReplaySets': True} IQN = 'iqn.2002-03.com.compellent:5000D31000000001' ISCSI_PROPERTIES = {'access_mode': 'rw', 'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe44'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.22:3260']} def setUp(self): super(DellSCSanISCSIDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self.configuration.iscsi_ip_address = '192.168.1.1' self.configuration.iscsi_port = 3260 self._context = context.get_admin_context() self.driver = dell_storagecenter_iscsi.DellStorageCenterISCSIDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell', 'storage_protocol': 'iSCSI'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.volid = str(uuid.uuid4()) self.volume_name = "volume" + self.volid self.connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost'} self.connector_multipath = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost', 'multipath': True} self.access_record_output = [ "ID Initiator Ipaddress AuthMethod UserName Apply-To", "--- --------------- ------------- ---------- ---------- --------", "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", " 7dab76162"] self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001' self.properties = { 'target_discovered': True, 'target_portal': '%s:3260' % self.driver.configuration.dell_sc_iscsi_ip, 'target_iqn': self.fake_iqn, 'volume_id': 1} self._model_update = { 'provider_location': "%s:3260,1 %s 0" % (self.driver.configuration.dell_sc_iscsi_ip, self.fake_iqn) # , # 'provider_auth': 'CHAP %s %s' % ( # self.configuration.eqlx_chap_login, # self.configuration.eqlx_chap_password) } @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__create_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True'} model_update = {'replication_status': 'enabled', 'replication_driver_data': '12345,67890'} vol = {'id': 'guid', 'replication_driver_data': ''} scvol = {'name': 'guid'} self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos'}] mock_api = mock.MagicMock() mock_api.create_replication = mock.MagicMock( return_value={'instanceId': '1'}) # Create regular replication test. res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, False) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, False) self.assertEqual(model_update, res) # Create replication with activereplay set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, True) self.assertEqual(model_update, res) # Create replication with sync set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True', 'replication_type': ' sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, None, True) self.assertEqual(model_update, res) # Create replication with disk folder set. self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos', 'diskfolder': 'ssd'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos', 'diskfolder': 'ssd'}] mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True', 'replication_type': ' sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, 'ssd', True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, 'ssd', True) self.assertEqual(model_update, res) # Failed to create replication test. mock_api.create_replication.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_replications, mock_api, vol, scvol) # Replication not enabled test mock_get_volume_extra_specs.return_value = {} res = self.driver._create_replications(mock_api, vol, scvol) self.assertEqual({}, res) self.driver.backends = backends @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__delete_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vol = {'id': 'guid'} scvol = {'instanceId': '1'} mock_api = mock.MagicMock() mock_api.delete_replication = mock.MagicMock() mock_api.find_volume = mock.MagicMock(return_value=scvol) # Start replication disabled. Should fail immediately. mock_get_volume_extra_specs.return_value = {} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Replication enabled. No replications listed. mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True'} vol = {'id': 'guid', 'replication_driver_data': ''} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Something to call. vol = {'id': 'guid', 'replication_driver_data': '12345,67890'} self.driver._delete_replications(mock_api, vol) mock_api.delete_replication.assert_any_call(scvol, 12345) mock_api.delete_replication.assert_any_call(scvol, 67890) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume(self, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_consistency_group(self, mock_find_sc, mock_create_volume, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'consistencygroup_id': 'guid'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, None) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:storageprofile': 'HighPriority'}) def test_create_volume_storage_profile(self, mock_extra, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, "HighPriority", None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:replayprofiles': 'Daily'}) def test_create_volume_replay_profiles(self, mock_extra, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, 'Daily') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': 'ssn'}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_replication(self, mock_find_sc, mock_create_replications, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} ret = self.driver.create_volume(volume) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': 'ssn'}, ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_replication_raises(self, mock_find_sc, mock_create_replications, mock_delete_volume, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_failure(self, mock_delete_volume, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_delete_volume(self, mock_find_sc, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.driver.delete_volume(volume) mock_delete_volume.assert_called_once_with(self.volume_name) self.assertTrue(mock_delete_replications.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_delete_volume_failure(self, mock_find_sc, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, volume) self.assertTrue(mock_delete_replications.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_any_call(self.volume_name) self.assertEqual(2, mock_find_volume.call_count) expected = {'data': self.ISCSI_PROPERTIES, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_multi_path(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where connection is multipath volume = {'id': self.volume_name} connector = self.connector_multipath data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_any_call(self.volume_name) self.assertEqual(2, mock_find_volume.call_count) props = self.ISCSI_PROPERTIES expected = {'data': props, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_iqn(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = {} mock_find_iscsi_properties.side_effect = Exception('abc') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_server(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_vol_not_found(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_map_vol_fail(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection_no_server(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection_no_volume(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=False) def test_terminate_connection_failure(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value='fake') def test_create_snapshot(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.driver.create_snapshot(snapshot) self.assertEqual('available', snapshot['status']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value=None) def test_create_snapshot_no_volume(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value=None) def test_create_snapshot_failure(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with('fake', 'fake', None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_cg(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_update_cg_volumes, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': 'fake', 'consistencygroup_id': 'guid'} snapshot = {'id': 'fake', 'volume_id': 'fake'} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with('fake', 'fake', None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_from_snapshot_failed(self, mock_delete_volume, mock_create_view_volume, mock_find_replay_profile, mock_find_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_from_snapshot_failed_replication( self, mock_delete_volume, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_no_replay(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay.called) self.assertFalse(mock_create_view_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone', 'size': 1} src_vref = {'id': self.volume_name, 'size': 1} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( self.volume_name + '_clone', self.VOLUME, None) self.assertTrue(mock_find_volume.called) self.assertEqual({}, ret) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume', return_value=VOLUME) def test_create_cloned_volume_expand(self, mock_expand_volume, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone', 'size': 2} src_vref = {'id': self.volume_name, 'size': 1} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( self.volume_name + '_clone', self.VOLUME, None) self.assertTrue(mock_find_volume.called) self.assertEqual({}, ret) self.assertTrue(mock_expand_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_cloned_volume_failed(self, mock_delete_volume, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume') def test_create_cloned_volume_expand_failed(self, mock_expand_volume, mock_delete_volume, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone', 'size': 2} src_vref = {'id': self.volume_name, 'size': 1} mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_replication_fail(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_delete_volume, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': self.volume_name + '_clone', 'size': 1} src_vref = {'id': self.volume_name, 'size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_consistency_group(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone', 'consistencygroup_id': 'guid', 'size': 1} src_vref = {'id': self.volume_name, 'size': 1} self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( self.volume_name + '_clone', self.VOLUME, None) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_no_volume(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_create_cloned_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay', return_value=True) def test_delete_snapshot(self, mock_delete_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.driver.delete_snapshot(snapshot) mock_delete_replay.assert_called_once_with( self.VOLUME, self.volume_name) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay', return_value=True) def test_delete_snapshot_no_volume(self, mock_delete_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, snapshot) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_ensure_export(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.driver.ensure_export(context, volume) mock_find_volume.assert_called_once_with( self.VOLUME.get(u'name')) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_ensure_export_failed(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_ensure_export_no_volume(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume', return_value=VOLUME) def test_extend_volume(self, mock_expand_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name, 'size': 1} new_size = 2 self.driver.extend_volume(volume, new_size) mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume', return_value=None) def test_extend_volume_no_volume(self, mock_expand_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name, 'size': 1} new_size = 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertTrue(mock_get_storage_usage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh_and_repl( self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends repliation_enabled = self.driver.replication_enabled self.driver.backends = [{'a': 'a'}, {'b': 'b'}, {'c': 'c'}] self.driver.replication_enabled = True stats = self.driver.get_volume_stats(True) self.assertEqual(3, stats['replication_count']) self.assertEqual(['async', 'sync'], stats['replication_type']) self.assertTrue(stats['replication_enabled']) self.assertTrue(mock_get_storage_usage.called) self.driver.backends = backends self.driver.replication_enabled = repliation_enabled @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertFalse(mock_get_storage_usage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'rename_volume', return_value=True) def test_update_migrated_volume(self, mock_rename_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': 112} model_update = {'_name_id': None} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) self.assertEqual(model_update, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'rename_volume', return_value=False) def test_update_migrated_volume_rename_fail(self, mock_rename_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': 112, '_name_id': 113} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) self.assertEqual({'_name_id': 113}, rt) def test_update_migrated_volume_no_volume_id(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': None} backend_volume = {'id': 112, '_name_id': 113} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') self.assertEqual({'_name_id': 113}, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_update_migrated_volume_no_backend_id(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': None, '_name_id': None} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_find_sc.assert_called_once_with() mock_find_volume.assert_called_once_with(None) self.assertEqual({'_name_id': None}, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay_profile', return_value=SCRPLAYPROFILE) def test_create_consistencygroup(self, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} self.driver.create_consistencygroup(context, group) mock_create_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay_profile', return_value=None) def test_create_consistencygroup_fail(self, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_consistencygroup, context, group) mock_create_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, 'delete_volume') def test_delete_consistencygroup(self, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): mock_volume = mock.MagicMock() expected_volumes = [mock_volume] context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_consistencygroup( context, group, [mock_volume]) mock_find_replay_profile.assert_called_once_with(group['id']) mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE) mock_delete_volume.assert_called_once_with(mock_volume) self.assertEqual(group['status'], model_update['status']) self.assertEqual(expected_volumes, volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, 'delete_volume') def test_delete_consistencygroup_not_found(self, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_consistencygroup(context, group, []) mock_find_replay_profile.assert_called_once_with(group['id']) self.assertFalse(mock_delete_replay_profile.called) self.assertFalse(mock_delete_volume.called) self.assertEqual(group['status'], model_update['status']) self.assertEqual([], volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_update_consistencygroup(self, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] rt1, rt2, rt3 = self.driver.update_consistencygroup(context, group, add_volumes, remove_volumes) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) self.assertIsNone(rt1) self.assertIsNone(rt2) self.assertIsNone(rt3) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) def test_update_consistencygroup_not_found(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_consistencygroup, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_update_consistencygroup_error(self, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_consistencygroup, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'snap_cg_replay', return_value={'instanceId': '100'}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_create_cgsnapshot(self, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() mock_snapshot.id = '1' expected_snapshots = [{'id': '1', 'status': 'available'}] context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} model_update, snapshots = self.driver.create_cgsnapshot( context, cggrp, [mock_snapshot]) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cggrp['id'], 0) self.assertEqual('available', model_update['status']) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) def test_create_cgsnapshot_profile_not_found(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cgsnapshot, context, cggrp, []) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'snap_cg_replay', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_create_cgsnapshot_fail(self, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cgsnapshot, context, cggrp, []) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cggrp['id'], 0) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_delete_cgsnapshot(self, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() expected_snapshots = [mock_snapshot] context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'deleted'} model_update, snapshots = self.driver.delete_cgsnapshot( context, cgsnap, [mock_snapshot]) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cgsnap['id']) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) def test_delete_cgsnapshot_profile_not_found(self, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() expected_snapshots = [mock_snapshot] context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'deleted'} model_update, snapshots = self.driver.delete_cgsnapshot( context, cgsnap, [mock_snapshot]) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) self.assertFalse(mock_delete_cg_replay.called) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_delete_cgsnapshot_profile_failed_delete(self, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'available'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_cgsnapshot, context, cgsnap, []) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cgsnap['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'manage_existing') def test_manage_existing(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': 'guid'} existing_ref = {'source-name': 'imavolumename'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(volume['id'], existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'manage_existing') def test_manage_existing_id(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': 'guid'} existing_ref = {'source-id': 'imadeviceid'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(volume['id'], existing_ref) def test_manage_existing_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': 'guid'} existing_ref = {'source-name': 'imavolumename'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size_id(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': 'guid'} existing_ref = {'source-id': 'imadeviceid'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) def test_manage_existing_get_size_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_storage_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replay_profiles') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replicate_active_replay') def test_retype_not_our_extra_specs(self, mock_update_replicate_active_replay, mock_create_replications, mock_update_replay_profile, mock_update_storage_profile, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': None}, None) self.assertTrue(res) self.assertFalse(mock_update_replicate_active_replay.called) self.assertFalse(mock_create_replications.called) self.assertFalse(mock_update_replay_profile.called) self.assertFalse(mock_update_storage_profile.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replay_profiles') def test_retype_replay_profiles(self, mock_update_replay_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_update_replay_profiles.side_effect = [True, False] # Normal successful run. res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:replayprofiles': ['A', 'B']}}, None) mock_update_replay_profiles.assert_called_once_with(self.VOLUME, 'B') self.assertTrue(res) # Run fails. Make sure this returns False. res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:replayprofiles': ['B', 'A']}}, None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': '54321'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') def test_retype_create_replications(self, mock_delete_replications, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication_enabled': [False, True]}}, None) self.assertTrue(mock_create_replications.called) self.assertFalse(mock_delete_replications.called) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': '54321'}, res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication_enabled': [True, False]}}, None) self.assertTrue(mock_delete_replications.called) self.assertEqual({'replication_status': 'disabled', 'replication_driver_data': ''}, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replicate_active_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_retype_active_replay(self, mock_find_volume, mock_update_replicate_active_replay, mock_close_connection, mock_open_connection, mock_init): # Success, Success, Not called and fail. mock_update_replicate_active_replay.side_effect = [True, True, False] res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', ' True']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': [' True', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', ' True']}}, None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_retype_same(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'A']}}, None) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmanage') def test_unmanage(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(volume['id']) mock_unmanage.assert_called_once_with(self.VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmanage') def test_unmanage_volume_not_found(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(volume['id']) self.assertFalse(mock_unmanage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_storage_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_retype(self, mock_find_sc, mock_find_volume, mock_update_storage_profile, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'volid'}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'B']}}, None) mock_update_storage_profile.assert_called_once_with( self.VOLUME, 'B') self.assertTrue(res) def test__parse_secondary(self, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Good run. Secondary in replication_driver_data and backend. sc up. destssn = self.driver._parse_secondary(mock_api, '67890') self.assertEqual(67890, destssn) # Bad run. Secondary not in backend. destssn = self.driver._parse_secondary(mock_api, '99999') self.assertIsNone(destssn) # Good run. destssn = self.driver._parse_secondary(mock_api, '12345') self.assertEqual(12345, destssn) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc') def test__parse_secondary_sc_down(self, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Bad run. Good selection. SC down. mock_api.find_sc = mock.MagicMock( side_effect=exception.VolumeBackendAPIException(data='1234')) destssn = self.driver._parse_secondary(mock_api, '12345') self.assertIsNone(destssn) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'break_replication') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_parse_secondary') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'remove_mappings') def test_failover_host(self, mock_remove_mappings, mock_find_volume, mock_parse_secondary, mock_break_replication, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = False self.driver.failed_over = False volumes = [{'id': 'guid1', 'replication_driver_data': '12345'}, {'id': 'guid2', 'replication_driver_data': '12345'}] # No run. Not doing repl. Should raise. self.assertRaises(exception.VolumeBackendAPIException, self.driver.failover_host, {}, volumes, '12345') # Good run self.driver.replication_enabled = True mock_parse_secondary.return_value = 12345 expected_destssn = 12345 expected_volume_update = [{'volume_id': 'guid1', 'updates': {'replication_status': 'failed-over'}}, {'volume_id': 'guid2', 'updates': {'replication_status': 'failed-over'}}] destssn, volume_update = self.driver.failover_host( {}, volumes, '12345') self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. volumes = [{'id': 'guid1', 'replication_driver_data': '12345'}, {'id': 'guid2', 'replication_driver_data': ''}] expected_volume_update = [{'volume_id': 'guid1', 'updates': {'replication_status': 'failed-over'}}, {'volume_id': 'guid2', 'updates': {'status': 'error'}}] destssn, volume_update = self.driver.failover_host( {}, volumes, '12345') self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. No replication_driver_data. volumes = [{'id': 'guid1', 'replication_driver_data': '12345'}, {'id': 'guid2'}] expected_volume_update = [{'volume_id': 'guid1', 'updates': {'replication_status': 'failed-over'}}, {'volume_id': 'guid2', 'updates': {'status': 'error'}}] destssn, volume_update = self.driver.failover_host( {}, volumes, '12345') self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. No volumes replicated. No replication_driver_data. volumes = [{'id': 'guid1'}, {'id': 'guid2'}] expected_volume_update = [{'volume_id': 'guid1', 'updates': {'status': 'error'}}, {'volume_id': 'guid2', 'updates': {'status': 'error'}}] destssn, volume_update = self.driver.failover_host( {}, volumes, '12345') self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Secondary not found. mock_parse_secondary.return_value = None self.assertRaises(exception.InvalidInput, self.driver.failover_host, {}, volumes, '54321') # Already failed over. self.driver.failed_over = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.failover_host, {}, volumes, '12345') self.driver.replication_enabled = False def test__get_unmanaged_replay(self, mock_close_connection, mock_open_connection, mock_init): mock_api = mock.MagicMock() existing_ref = None self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, 'guid', existing_ref) existing_ref = {'source-id': 'Not a source-name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, 'guid', existing_ref) existing_ref = {'source-name': 'name'} mock_api.find_volume = mock.MagicMock(return_value=None) self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_unmanaged_replay, mock_api, 'guid', existing_ref) mock_api.find_volume.return_value = {'instanceId': '1'} mock_api.find_replay = mock.MagicMock(return_value=None) self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, 'guid', existing_ref) mock_api.find_replay.return_value = {'instanceId': 2} ret = self.driver._get_unmanaged_replay(mock_api, 'guid', existing_ref) self.assertEqual({'instanceId': 2}, ret) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_unmanaged_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'manage_replay') def test_manage_existing_snapshot(self, mock_manage_replay, mock_get_unmanaged_replay, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': 'guida', 'id': 'guidb'} existing_ref = {'source-name': 'name'} screplay = {'description': 'name'} mock_get_unmanaged_replay.return_value = screplay mock_manage_replay.return_value = True self.driver.manage_existing_snapshot(snapshot, existing_ref) self.assertEqual(1, mock_get_unmanaged_replay.call_count) mock_manage_replay.assert_called_once_with(screplay, 'guidb') mock_manage_replay.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_unmanaged_replay') def test_manage_existing_snapshot_get_size(self, mock_get_unmanaged_replay, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': 'a', 'id': 'b'} existing_ref = {'source-name'} # Good size. mock_get_unmanaged_replay.return_value = {'size': '1.073741824E9 Bytes'} ret = self.driver.manage_existing_snapshot_get_size(snapshot, existing_ref) self.assertEqual(1, ret) # Not on 1GB boundries. mock_get_unmanaged_replay.return_value = {'size': '2.073741824E9 Bytes'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot_get_size, snapshot, existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmanage_replay') def test_unmanage_snapshot(self, mock_unmanage_replay, mock_find_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': 'guida', 'id': 'guidb'} screplay = {'description': 'guidb'} mock_find_volume.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.unmanage_snapshot, snapshot) mock_find_volume.return_value = {'name': 'guida'} mock_find_replay.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.unmanage_snapshot, snapshot) mock_find_replay.return_value = screplay self.driver.unmanage_snapshot(snapshot) mock_unmanage_replay.assert_called_once_with(screplay) cinder-8.0.0/cinder/tests/unit/test_nexenta5_nfs.py0000664000567000056710000001424312701406250023520 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for OpenStack Cinder volume driver """ import mock from mock import patch from cinder import context from cinder import db from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta.ns5 import nfs class TestNexentaNfsDriver(test.TestCase): TEST_SHARE = 'host1:/pool/share' TEST_SHARE2_OPTIONS = '-o intr' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_VOLUME = { 'name': TEST_VOLUME_NAME, 'id': '1', 'size': 1, 'status': 'available', 'provider_location': TEST_SHARE } TEST_VOLUME2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, 'id': '2', 'status': 'in-use' } TEST_SNAPSHOT = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, 'volume_id': '1' } TEST_SHARE_SVC = 'svc:/network/nfs/server:default' def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_attempts = 3 self.cfg.nas_mount_options = 'vers=4' self.cfg.reserved_percentage = 20 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_rest_port = 8080 self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'pass' self.cfg.max_over_subscription_ratio = 20.0 self.cfg.nas_ip = '1.1.1.1' self.cfg.nas_share_path = 'pool/share' self.nef_mock = mock.Mock() self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nef_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_SHARE } return db.volume_create(self.ctxt, vol)['id'] def test_check_for_setup_error(self): self.nef_mock.get.return_value = {'data': []} self.assertRaises( LookupError, lambda: self.drv.check_for_setup_error()) def test_initialize_connection(self): data = { 'export': self.TEST_VOLUME['provider_location'], 'name': 'volume'} self.assertEqual({ 'driver_volume_type': self.drv.driver_volume_type, 'data': data }, self.drv.initialize_connection(self.TEST_VOLUME, None)) @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._create_regular_file') @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._create_sparsed_file') @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._ensure_share_mounted') @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._share_folder') def test_do_create_volume(self, share, ensure, sparsed, regular): ensure.return_value = True share.return_value = True self.nef_mock.get.return_value = 'on' self.drv._do_create_volume(self.TEST_VOLUME) url = 'storage/pools/pool/filesystems' data = { 'name': 'share/volume1', 'compressionMode': 'on', 'dedupMode': 'off', } self.nef_mock.post.assert_called_with(url, data) @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._ensure_share_mounted') def test_delete_volume(self, ensure): self._create_volume_db_entry() self.nef_mock.get.return_value = {} self.drv.delete_volume(self.TEST_VOLUME) self.nef_mock.delete.assert_called_with( 'storage/pools/pool/filesystems/share%2Fvolume1?snapshots=true') def test_create_snapshot(self): self._create_volume_db_entry() self.drv.create_snapshot(self.TEST_SNAPSHOT) url = 'storage/pools/pool/filesystems/share%2Fvolume-1/snapshots' data = {'name': self.TEST_SNAPSHOT['name']} self.nef_mock.post.assert_called_with(url, data) def test_delete_snapshot(self): self._create_volume_db_entry() self.drv.delete_snapshot(self.TEST_SNAPSHOT) url = ('storage/pools/pool/filesystems/share%2Fvolume-1/' 'snapshots/snapshot1') self.drv.delete_snapshot(self.TEST_SNAPSHOT) self.nef_mock.delete.assert_called_with(url) @patch('cinder.volume.drivers.nexenta.ns5.nfs.' 'NexentaNfsDriver._share_folder') def test_create_volume_from_snapshot(self, share): self._create_volume_db_entry() url = ('storage/filesystems/pool%2Fshare%2Fvolume2/promote') self.drv.create_volume_from_snapshot( self.TEST_VOLUME2, self.TEST_SNAPSHOT) self.nef_mock.post.assert_called_with(url) def test_get_capacity_info(self): self.nef_mock.get.return_value = { 'bytesAvailable': 1000, 'bytesUsed': 100} self.assertEqual( (1000, 900, 100), self.drv._get_capacity_info('pool/share')) cinder-8.0.0/cinder/tests/unit/test_smbfs.py0000664000567000056710000010140412701406250022231 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import os import ddt import mock from oslo_utils import fileutils from cinder import context from cinder import db from cinder import exception from cinder.image import image_utils from cinder import objects from cinder import test from cinder.tests.unit import fake_volume from cinder.volume.drivers import remotefs from cinder.volume.drivers import smbfs def requires_allocation_data_update(expected_size): def wrapper(func): @functools.wraps(func) def inner(inst, *args, **kwargs): with mock.patch.object( inst._smbfs_driver, 'update_disk_allocation_data') as fake_update: func(inst, *args, **kwargs) fake_update.assert_called_once_with(inst._FAKE_VOLUME, expected_size) return inner return wrapper @ddt.ddt class SmbFsTestCase(test.TestCase): _FAKE_SHARE = '//1.2.3.4/share1' _FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131' _FAKE_MNT_BASE = '/mnt' _FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_TOTAL_SIZE = '2048' _FAKE_TOTAL_AVAILABLE = '1024' _FAKE_TOTAL_ALLOCATED = 1024 _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': _FAKE_SHARE, 'name': _FAKE_VOLUME_NAME, 'status': 'available'} _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH) _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) _FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba' _FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID, 'volume': _FAKE_VOLUME, 'status': 'available', 'volume_size': 1} _FAKE_SNAPSHOT_PATH = ( _FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID) _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' _FAKE_OPTIONS_DICT = {'username': 'Administrator', 'password': '12345'} _FAKE_ALLOCATION_DATA_PATH = os.path.join('fake_dir', 'fake_allocation_data') def setUp(self): super(SmbFsTestCase, self).setUp() self._FAKE_SMBFS_CONFIG = mock.MagicMock( smbfs_oversub_ratio = 2, smbfs_used_ratio = 0.5, smbfs_shares_config = '/fake/config/path', smbfs_default_volume_format = 'raw', smbfs_sparsed_volumes = False) self._smbfs_driver = smbfs.SmbfsDriver(configuration=mock.Mock()) self._smbfs_driver._remotefsclient = mock.Mock() self._smbfs_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._smbfs_driver._execute = mock.Mock() self._smbfs_driver.base = self._FAKE_MNT_BASE self._smbfs_driver._alloc_info_file_path = ( self._FAKE_ALLOCATION_DATA_PATH) def _get_fake_allocation_data(self): return {self._FAKE_SHARE_HASH: { 'total_allocated': self._FAKE_TOTAL_ALLOCATED}} @mock.patch.object(smbfs, 'open', create=True) @mock.patch('os.path.exists') @mock.patch.object(fileutils, 'ensure_tree') @mock.patch('json.load') def _test_setup_allocation_data(self, mock_json_load, mock_ensure_tree, mock_exists, mock_open, allocation_data_exists=False): mock_exists.return_value = allocation_data_exists self._smbfs_driver._update_allocation_data_file = mock.Mock() self._smbfs_driver._setup_allocation_data() if allocation_data_exists: fd = mock_open.return_value.__enter__.return_value mock_json_load.assert_called_once_with(fd) self.assertEqual(mock_json_load.return_value, self._smbfs_driver._allocation_data) else: mock_ensure_tree.assert_called_once_with( os.path.dirname(self._FAKE_ALLOCATION_DATA_PATH)) update_func = self._smbfs_driver._update_allocation_data_file update_func.assert_called_once_with() def test_setup_allocation_data_file_unexisting(self): self._test_setup_allocation_data() def test_setup_allocation_data_file_existing(self): self._test_setup_allocation_data(allocation_data_exists=True) def _test_update_allocation_data(self, virtual_size_gb=None, volume_exists=True): self._smbfs_driver._update_allocation_data_file = mock.Mock() update_func = self._smbfs_driver._update_allocation_data_file fake_alloc_data = self._get_fake_allocation_data() if volume_exists: fake_alloc_data[self._FAKE_SHARE_HASH][ self._FAKE_VOLUME_NAME] = self._FAKE_VOLUME['size'] self._smbfs_driver._allocation_data = fake_alloc_data self._smbfs_driver.update_disk_allocation_data(self._FAKE_VOLUME, virtual_size_gb) vol_allocated_size = fake_alloc_data[self._FAKE_SHARE_HASH].get( self._FAKE_VOLUME_NAME, None) if not virtual_size_gb: expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED - self._FAKE_VOLUME['size']) self.assertIsNone(vol_allocated_size) else: expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED + virtual_size_gb - self._FAKE_VOLUME['size']) self.assertEqual(virtual_size_gb, vol_allocated_size) update_func.assert_called_once_with() self.assertEqual( expected_total_allocated, fake_alloc_data[self._FAKE_SHARE_HASH]['total_allocated']) def test_update_allocation_data_volume_deleted(self): self._test_update_allocation_data() def test_update_allocation_data_volume_extended(self): self._test_update_allocation_data( virtual_size_gb=self._FAKE_VOLUME['size'] + 1) def test_update_allocation_data_volume_created(self): self._test_update_allocation_data( virtual_size_gb=self._FAKE_VOLUME['size']) @requires_allocation_data_update(expected_size=None) def test_delete_volume(self): drv = self._smbfs_driver fake_vol_info = self._FAKE_VOLUME_PATH + '.info' drv._ensure_share_mounted = mock.MagicMock() fake_ensure_mounted = drv._ensure_share_mounted drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) drv._delete = mock.Mock() drv._local_path_volume_info = mock.Mock( return_value=fake_vol_info) with mock.patch('os.path.exists', lambda x: True): drv.delete_volume(self._FAKE_VOLUME) fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) drv._delete.assert_any_call( self._FAKE_VOLUME_PATH) drv._delete.assert_any_call(fake_vol_info) @mock.patch('os.path.exists') @mock.patch.object(image_utils, 'check_qemu_img_version') def _test_setup(self, mock_check_qemu_img_version, mock_exists, config, share_config_exists=True): mock_exists.return_value = share_config_exists fake_ensure_mounted = mock.MagicMock() self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted self._smbfs_driver.configuration = config if not (config.smbfs_shares_config and share_config_exists and config.smbfs_oversub_ratio > 0 and 0 <= config.smbfs_used_ratio <= 1): self.assertRaises(exception.SmbfsException, self._smbfs_driver.do_setup, None) else: self._smbfs_driver.do_setup(mock.sentinel.context) mock_check_qemu_img_version.assert_called_once_with() self.assertEqual({}, self._smbfs_driver.shares) fake_ensure_mounted.assert_called_once_with() def test_setup_missing_shares_config_option(self): fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) fake_config.smbfs_shares_config = None self._test_setup(config=fake_config, share_config_exists=False) def test_setup_missing_shares_config_file(self): self._test_setup(config=self._FAKE_SMBFS_CONFIG, share_config_exists=False) def test_setup_invlid_oversub_ratio(self): fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) fake_config.smbfs_oversub_ratio = -1 self._test_setup(config=fake_config) def test_setup_invalid_used_ratio(self): fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) fake_config.smbfs_used_ratio = -1 self._test_setup(config=fake_config) def test_setup_invalid_used_ratio2(self): fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) fake_config.smbfs_used_ratio = 1.1 self._test_setup(config=fake_config) def _test_create_volume(self, volume_exists=False, volume_format=None): fake_method = mock.MagicMock() self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG) self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock() fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all self._smbfs_driver.get_volume_format = mock.MagicMock() windows_image_format = False fake_vol_path = self._FAKE_VOLUME_PATH self._smbfs_driver.get_volume_format.return_value = volume_format if volume_format: if volume_format in ('vhd', 'vhdx'): windows_image_format = volume_format if volume_format == 'vhd': windows_image_format = 'vpc' method = '_create_windows_image' fake_vol_path += '.' + volume_format else: method = '_create_%s_file' % volume_format if volume_format == 'sparsed': self._smbfs_driver.configuration.smbfs_sparsed_volumes = ( True) else: method = '_create_regular_file' setattr(self._smbfs_driver, method, fake_method) with mock.patch('os.path.exists', new=lambda x: volume_exists): if volume_exists: self.assertRaises(exception.InvalidVolume, self._smbfs_driver._do_create_volume, self._FAKE_VOLUME) return self._smbfs_driver._do_create_volume(self._FAKE_VOLUME) if windows_image_format: fake_method.assert_called_once_with( fake_vol_path, self._FAKE_VOLUME['size'], windows_image_format) else: fake_method.assert_called_once_with( fake_vol_path, self._FAKE_VOLUME['size']) fake_set_permissions.assert_called_once_with(fake_vol_path) def test_create_existing_volume(self): self._test_create_volume(volume_exists=True) def test_create_vhdx(self): self._test_create_volume(volume_format='vhdx') def test_create_qcow2(self): self._test_create_volume(volume_format='qcow2') def test_create_sparsed(self): self._test_create_volume(volume_format='sparsed') def test_create_regular(self): self._test_create_volume() def _test_find_share(self, existing_mounted_shares=True, eligible_shares=True): if existing_mounted_shares: mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3') else: mounted_shares = None self._smbfs_driver._mounted_shares = mounted_shares self._smbfs_driver._is_share_eligible = mock.Mock( return_value=eligible_shares) self._smbfs_driver._get_total_allocated = mock.Mock( side_effect=[3, 2, 1]) if not mounted_shares: self.assertRaises(exception.SmbfsNoSharesMounted, self._smbfs_driver._find_share, self._FAKE_VOLUME['size']) elif not eligible_shares: self.assertRaises(exception.SmbfsNoSuitableShareFound, self._smbfs_driver._find_share, self._FAKE_VOLUME['size']) else: ret_value = self._smbfs_driver._find_share( self._FAKE_VOLUME['size']) # The eligible share with the minimum allocated space # will be selected self.assertEqual('fake_share3', ret_value) def test_find_share(self): self._test_find_share() def test_find_share_missing_mounted_shares(self): self._test_find_share(existing_mounted_shares=False) def test_find_share_missing_eligible_shares(self): self._test_find_share(eligible_shares=False) def _test_is_share_eligible(self, capacity_info, volume_size): self._smbfs_driver._get_capacity_info = mock.Mock( return_value=[float(x << 30) for x in capacity_info]) self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE, volume_size) def test_share_volume_above_used_ratio(self): fake_capacity_info = (4, 1, 1) fake_volume_size = 2 ret_value = self._test_is_share_eligible(fake_capacity_info, fake_volume_size) self.assertFalse(ret_value) def test_eligible_share(self): fake_capacity_info = (4, 4, 0) fake_volume_size = 1 ret_value = self._test_is_share_eligible(fake_capacity_info, fake_volume_size) self.assertTrue(ret_value) def test_share_volume_above_oversub_ratio(self): fake_capacity_info = (4, 4, 7) fake_volume_size = 2 ret_value = self._test_is_share_eligible(fake_capacity_info, fake_volume_size) self.assertFalse(ret_value) def test_share_reserved_above_oversub_ratio(self): fake_capacity_info = (4, 4, 10) fake_volume_size = 1 ret_value = self._test_is_share_eligible(fake_capacity_info, fake_volume_size) self.assertFalse(ret_value) def test_parse_options(self): (opt_list, opt_dict) = self._smbfs_driver.parse_options( self._FAKE_SHARE_OPTS) expected_ret = ([], self._FAKE_OPTIONS_DICT) self.assertEqual(expected_ret, (opt_list, opt_dict)) def test_parse_credentials(self): fake_smb_options = r'-o user=MyDomain\Administrator,noperm' expected_flags = '-o username=Administrator,noperm' flags = self._smbfs_driver.parse_credentials(fake_smb_options) self.assertEqual(expected_flags, flags) @mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template') @mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path') @mock.patch.object(smbfs.SmbfsDriver, 'get_volume_format') def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume, mock_get_path_template, volume_exists=True, volume_format='raw'): drv = self._smbfs_driver mock_get_path_template.return_value = self._FAKE_VOLUME_PATH expected_vol_path = self._FAKE_VOLUME_PATH if volume_format in (drv._DISK_FORMAT_VHD, drv._DISK_FORMAT_VHDX): expected_vol_path += '.' + volume_format mock_lookup_volume.return_value = ( expected_vol_path if volume_exists else None) mock_get_volume_format.return_value = volume_format ret_val = drv.local_path(self._FAKE_VOLUME) if volume_exists: self.assertFalse(mock_get_volume_format.called) else: mock_get_volume_format.assert_called_once_with(self._FAKE_VOLUME) self.assertEqual(expected_vol_path, ret_val) def test_get_existing_volume_path(self): self._test_get_volume_path() def test_get_new_raw_volume_path(self): self._test_get_volume_path(volume_exists=False) def test_get_new_vhd_volume_path(self): self._test_get_volume_path(volume_exists=False, volume_format='vhd') @mock.patch.object(smbfs.SmbfsDriver, '_local_volume_dir') def test_get_local_volume_path_template(self, mock_get_local_dir): mock_get_local_dir.return_value = self._FAKE_MNT_POINT ret_val = self._smbfs_driver._get_local_volume_path_template( self._FAKE_VOLUME) self.assertEqual(self._FAKE_VOLUME_PATH, ret_val) @mock.patch('os.path.exists') def test_lookup_local_volume_path(self, mock_exists): expected_path = self._FAKE_VOLUME_PATH + '.vhdx' mock_exists.side_effect = lambda x: x == expected_path ret_val = self._smbfs_driver._lookup_local_volume_path( self._FAKE_VOLUME_PATH) possible_paths = [self._FAKE_VOLUME_PATH + ext for ext in ('', '.vhd', '.vhdx')] mock_exists.assert_has_calls( [mock.call(path) for path in possible_paths]) self.assertEqual(expected_path, ret_val) @mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template') @mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path') @mock.patch.object(smbfs.SmbfsDriver, '_qemu_img_info') @mock.patch.object(smbfs.SmbfsDriver, '_get_volume_format_spec') def _mock_get_volume_format(self, mock_get_format_spec, mock_qemu_img_info, mock_lookup_volume, mock_get_path_template, qemu_format=False, volume_format='raw', volume_exists=True): mock_get_path_template.return_value = self._FAKE_VOLUME_PATH mock_lookup_volume.return_value = ( self._FAKE_VOLUME_PATH if volume_exists else None) mock_qemu_img_info.return_value.file_format = volume_format mock_get_format_spec.return_value = volume_format ret_val = self._smbfs_driver.get_volume_format(self._FAKE_VOLUME, qemu_format) if volume_exists: mock_qemu_img_info.assert_called_once_with(self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_NAME) self.assertFalse(mock_get_format_spec.called) else: mock_get_format_spec.assert_called_once_with(self._FAKE_VOLUME) self.assertFalse(mock_qemu_img_info.called) return ret_val def test_get_existing_raw_volume_format(self): fmt = self._mock_get_volume_format() self.assertEqual('raw', fmt) def test_get_new_vhd_volume_format(self): expected_fmt = 'vhd' fmt = self._mock_get_volume_format(volume_format=expected_fmt, volume_exists=False) self.assertEqual(expected_fmt, fmt) def test_get_new_vhd_legacy_volume_format(self): img_fmt = 'vhd' expected_fmt = 'vpc' ret_val = self._mock_get_volume_format(volume_format=img_fmt, volume_exists=False, qemu_format=True) self.assertEqual(expected_fmt, ret_val) def test_initialize_connection(self): self._smbfs_driver.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) self._smbfs_driver._get_mount_point_base = mock.Mock( return_value=self._FAKE_MNT_BASE) self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} self._smbfs_driver._qemu_img_info = mock.Mock( return_value=mock.Mock(file_format='raw')) fake_data = {'export': self._FAKE_SHARE, 'format': 'raw', 'name': self._FAKE_VOLUME_NAME, 'options': self._FAKE_SHARE_OPTS} expected = { 'driver_volume_type': 'smbfs', 'data': fake_data, 'mount_point_base': self._FAKE_MNT_BASE} ret_val = self._smbfs_driver.initialize_connection( self._FAKE_VOLUME, None) self.assertEqual(expected, ret_val) def _test_extend_volume(self, extend_failed=False, image_format='raw'): drv = self._smbfs_driver drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv._check_extend_volume_support = mock.Mock( return_value=True) drv._is_file_size_equal = mock.Mock( return_value=not extend_failed) drv._qemu_img_info = mock.Mock( return_value=mock.Mock(file_format=image_format)) drv._delete = mock.Mock() with mock.patch.object(image_utils, 'resize_image') as fake_resize, \ mock.patch.object(image_utils, 'convert_image') as \ fake_convert: if extend_failed: self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, self._FAKE_VOLUME, mock.sentinel.new_size) else: drv.extend_volume(self._FAKE_VOLUME, mock.sentinel.new_size) if image_format in (drv._DISK_FORMAT_VHDX, drv._DISK_FORMAT_VHD_LEGACY): fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp' fake_convert.assert_any_call(self._FAKE_VOLUME_PATH, fake_tmp_path, 'raw') fake_resize.assert_called_once_with( fake_tmp_path, mock.sentinel.new_size) fake_convert.assert_any_call(fake_tmp_path, self._FAKE_VOLUME_PATH, image_format) else: fake_resize.assert_called_once_with( self._FAKE_VOLUME_PATH, mock.sentinel.new_size) @requires_allocation_data_update(expected_size=mock.sentinel.new_size) def test_extend_volume(self): self._test_extend_volume() def test_extend_volume_failed(self): self._test_extend_volume(extend_failed=True) @requires_allocation_data_update(expected_size=mock.sentinel.new_size) def test_extend_vhd_volume(self): self._test_extend_volume(image_format='vpc') def _test_check_extend_support(self, has_snapshots=False, is_eligible=True): self._smbfs_driver.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) if has_snapshots: active_file_path = self._FAKE_SNAPSHOT_PATH else: active_file_path = self._FAKE_VOLUME_PATH self._smbfs_driver.get_active_image_from_info = mock.Mock( return_value=active_file_path) self._smbfs_driver._is_share_eligible = mock.Mock( return_value=is_eligible) if has_snapshots: self.assertRaises(exception.InvalidVolume, self._smbfs_driver._check_extend_volume_support, self._FAKE_VOLUME, 2) elif not is_eligible: self.assertRaises(exception.ExtendVolumeError, self._smbfs_driver._check_extend_volume_support, self._FAKE_VOLUME, 2) else: self._smbfs_driver._check_extend_volume_support( self._FAKE_VOLUME, 2) self._smbfs_driver._is_share_eligible.assert_called_once_with( self._FAKE_SHARE, 1) def test_check_extend_support(self): self._test_check_extend_support() def test_check_extend_volume_with_snapshots(self): self._test_check_extend_support(has_snapshots=True) def test_check_extend_volume_uneligible_share(self): self._test_check_extend_support(is_eligible=False) @requires_allocation_data_update(expected_size=_FAKE_VOLUME['size']) @mock.patch.object(remotefs.RemoteFSSnapDriver, 'create_volume') def test_create_volume_base(self, mock_create_volume): self._smbfs_driver.create_volume(self._FAKE_VOLUME) mock_create_volume.assert_called_once_with(self._FAKE_VOLUME) @requires_allocation_data_update(expected_size=_FAKE_VOLUME['size']) @mock.patch.object(smbfs.SmbfsDriver, '_create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_volume): self._smbfs_driver.create_volume_from_snapshot(self._FAKE_VOLUME, self._FAKE_SNAPSHOT) mock_create_volume.assert_called_once_with(self._FAKE_VOLUME, self._FAKE_SNAPSHOT) @requires_allocation_data_update(expected_size=_FAKE_VOLUME['size']) @mock.patch.object(smbfs.SmbfsDriver, '_create_cloned_volume') def test_create_cloned_volume(self, mock_create_volume): self._smbfs_driver.create_cloned_volume(self._FAKE_VOLUME, mock.sentinel.src_vol) mock_create_volume.assert_called_once_with(self._FAKE_VOLUME, mock.sentinel.src_vol) def test_create_volume_from_in_use_snapshot(self): fake_snapshot = {'status': 'in-use'} self.assertRaises( exception.InvalidSnapshot, self._smbfs_driver.create_volume_from_snapshot, self._FAKE_VOLUME, fake_snapshot) def test_copy_volume_from_snapshot(self): drv = self._smbfs_driver fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'} fake_img_info = mock.MagicMock() fake_img_info.backing_file = self._FAKE_VOLUME_NAME drv.get_volume_format = mock.Mock( return_value='raw') drv._local_path_volume_info = mock.Mock( return_value=self._FAKE_VOLUME_PATH + '.info') drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv._read_info_file = mock.Mock( return_value=fake_volume_info) drv._qemu_img_info = mock.Mock( return_value=fake_img_info) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH[:-1]) drv._extend_volume = mock.Mock() drv._set_rw_permissions_for_all = mock.Mock() with mock.patch.object(image_utils, 'convert_image') as ( fake_convert_image): drv._copy_volume_from_snapshot( self._FAKE_SNAPSHOT, self._FAKE_VOLUME, self._FAKE_VOLUME['size']) drv._extend_volume.assert_called_once_with( self._FAKE_VOLUME, self._FAKE_VOLUME['size']) fake_convert_image.assert_called_once_with( self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw') def test_ensure_mounted(self): self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE) self._smbfs_driver._remotefsclient.mount.assert_called_once_with( self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split()) def _test_copy_image_to_volume(self, wrong_size_after_fetch=False): drv = self._smbfs_driver vol_size_bytes = self._FAKE_VOLUME['size'] << 30 fake_img_info = mock.MagicMock() if wrong_size_after_fetch: fake_img_info.virtual_size = 2 * vol_size_bytes else: fake_img_info.virtual_size = vol_size_bytes drv.get_volume_format = mock.Mock( return_value=drv._DISK_FORMAT_VHDX) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv._do_extend_volume = mock.Mock() drv.configuration = mock.MagicMock() drv.configuration.volume_dd_blocksize = ( mock.sentinel.block_size) with mock.patch.object(image_utils, 'fetch_to_volume_format') as \ fake_fetch, mock.patch.object(image_utils, 'qemu_img_info') as \ fake_qemu_img_info: fake_qemu_img_info.return_value = fake_img_info if wrong_size_after_fetch: self.assertRaises( exception.ImageUnacceptable, drv.copy_image_to_volume, mock.sentinel.context, self._FAKE_VOLUME, mock.sentinel.image_service, mock.sentinel.image_id) else: drv.copy_image_to_volume( mock.sentinel.context, self._FAKE_VOLUME, mock.sentinel.image_service, mock.sentinel.image_id) fake_fetch.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, mock.sentinel.image_id, self._FAKE_VOLUME_PATH, drv._DISK_FORMAT_VHDX, mock.sentinel.block_size) drv._do_extend_volume.assert_called_once_with( self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'], self._FAKE_VOLUME['name']) def test_copy_image_to_volume(self): self._test_copy_image_to_volume() def test_copy_image_to_volume_wrong_size_after_fetch(self): self._test_copy_image_to_volume(wrong_size_after_fetch=True) def test_get_capacity_info(self): fake_block_size = 4096.0 fake_total_blocks = 1024 fake_avail_blocks = 512 fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks, fake_avail_blocks), None) self._smbfs_driver._get_mount_point_for_share = mock.Mock( return_value=self._FAKE_MNT_POINT) self._smbfs_driver._get_total_allocated = mock.Mock( return_value=self._FAKE_TOTAL_ALLOCATED) self._smbfs_driver._execute.return_value = fake_df ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE) expected = (fake_block_size * fake_total_blocks, fake_block_size * fake_avail_blocks, self._FAKE_TOTAL_ALLOCATED) self.assertEqual(expected, ret_val) @ddt.data([True, False, False], [False, False, False], [True, True, True], [False, True, True], [False, False, True], [True, False, True]) @ddt.unpack def test_get_volume_format_spec(self, volume_versioned_object, volume_meta_contains_fmt, volume_type_contains_fmt): fake_vol_meta_fmt = 'vhd' fake_vol_type_fmt = 'vhdx' volume_metadata = {} volume_type_extra_specs = {} fake_vol_dict = fake_volume.fake_db_volume() del fake_vol_dict['name'] if volume_meta_contains_fmt: volume_metadata['volume_format'] = fake_vol_meta_fmt elif volume_type_contains_fmt: volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt ctxt = context.get_admin_context() volume_type = db.volume_type_create( ctxt, {'extra_specs': volume_type_extra_specs, 'name': 'fake_vol_type'}) fake_vol_dict.update(metadata=volume_metadata, volume_type_id=volume_type.id) # We want to get a 'real' SqlA model object, not just a dict. volume = db.volume_create(ctxt, fake_vol_dict) volume = db.volume_get(ctxt, volume.id) if volume_versioned_object: volume = objects.Volume._from_db_object(ctxt, objects.Volume(), volume) resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume) if volume_meta_contains_fmt: expected_fmt = fake_vol_meta_fmt elif volume_type_contains_fmt: expected_fmt = fake_vol_type_fmt else: expected_fmt = None self.assertEqual(expected_fmt, resulted_fmt) cinder-8.0.0/cinder/tests/unit/test_huawei_drivers_compatibility.py0000664000567000056710000000450612701406250027075 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import test CONF = cfg.CONF HUAWEI_ISCSI_MODULE = ("cinder.volume.drivers.huawei.huawei_driver." "HuaweiISCSIDriver") HUAWEI_FC_MODULE = ("cinder.volume.drivers.huawei.huawei_driver." "HuaweiFCDriver") class VolumeDriverCompatibility(test.TestCase): """Test backwards compatibility for volume drivers.""" def fake_update_cluster_status(self): return def setUp(self): super(VolumeDriverCompatibility, self).setUp() self.manager = importutils.import_object(CONF.volume_manager) self.context = context.get_admin_context() def _load_driver(self, driver): self.manager.__init__(volume_driver=driver) def _driver_module_name(self): return "%s.%s" % (self.manager.driver.__class__.__module__, self.manager.driver.__class__.__name__) def test_huawei_driver_iscsi_old(self): self._load_driver( 'cinder.volume.drivers.huawei.huawei_driver.' 'Huawei18000ISCSIDriver') self.assertEqual(self._driver_module_name(), HUAWEI_ISCSI_MODULE) def test_huawei_driver_iscsi_new(self): self._load_driver(HUAWEI_ISCSI_MODULE) self.assertEqual(self._driver_module_name(), HUAWEI_ISCSI_MODULE) def test_huawei_driver_fc_old(self): self._load_driver( 'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver') self.assertEqual(self._driver_module_name(), HUAWEI_FC_MODULE) def test_huawei_driver_fc_new(self): self._load_driver(HUAWEI_FC_MODULE) self.assertEqual(self._driver_module_name(), HUAWEI_FC_MODULE) cinder-8.0.0/cinder/tests/unit/db/0000775000567000056710000000000012701406543020100 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/db/test_volume_type.py0000664000567000056710000000304512701406250024056 0ustar jenkinsjenkins00000000000000# Copyright 2016 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume type.""" from cinder import context from cinder import db from cinder import test from cinder.volume import volume_types class VolumeTypeTestCase(test.TestCase): """Test cases for volume type.""" def setUp(self): super(VolumeTypeTestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin = True) def test_volume_type_update(self): vol_type_ref = volume_types.create(self.ctxt, 'fake volume type') updates = dict(name = 'test_volume_type_update', description = None, is_public = None) updated_vol_type = db.volume_type_update( self.ctxt, vol_type_ref.id, updates) self.assertEqual('test_volume_type_update', updated_vol_type.name) volume_types.destroy(self.ctxt, vol_type_ref.id) cinder-8.0.0/cinder/tests/unit/db/test_transfers.py0000664000567000056710000001206012701406250023512 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for transfers table.""" from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit import utils class TransfersTableTestCase(test.TestCase): """Test case for transfers model.""" def setUp(self): super(TransfersTableTestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id') def _create_transfer(self, volume_id=None): """Create a transfer object.""" transfer = {'display_name': 'display_name', 'salt': 'salt', 'crypt_hash': 'crypt_hash'} if volume_id is not None: transfer['volume_id'] = volume_id return db.transfer_create(self.ctxt, transfer)['id'] def test_transfer_create(self): # If the volume_id is Null a KeyError exception will be raised. self.assertRaises(KeyError, self._create_transfer) volume_id = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id) def test_transfer_create_not_available(self): volume_id = utils.create_volume(self.ctxt, size=1, status='notavailable')['id'] self.assertRaises(exception.InvalidVolume, self._create_transfer, volume_id) def test_transfer_get(self): volume_id1 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id1) xfer = db.transfer_get(self.ctxt, xfer_id1) self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db.transfer_get, nctxt, xfer_id1) xfer = db.transfer_get(nctxt.elevated(), xfer_id1) self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") def test_transfer_get_all(self): volume_id1 = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id1) self._create_transfer(volume_id2) self.assertRaises(exception.NotAuthorized, db.transfer_get_all, self.ctxt) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") xfer = db.transfer_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.NotAuthorized, db.transfer_get_all_by_project, nctxt, self.ctxt.project_id) xfer = db.transfer_get_all_by_project(nctxt.elevated(), self.ctxt.project_id) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") def test_transfer_destroy(self): volume_id = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id) xfer_id2 = self._create_transfer(volume_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") db.transfer_destroy(self.ctxt, xfer_id1) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(1, len(xfer), "Unexpected number of transfer records") self.assertEqual(xfer[0]['id'], xfer_id2, "Unexpected value for Transfer id") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db.transfer_destroy, nctxt, xfer_id2) db.transfer_destroy(nctxt.elevated(), xfer_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(0, len(xfer), "Unexpected number of transfer records") cinder-8.0.0/cinder/tests/unit/db/__init__.py0000664000567000056710000000126012701406250022203 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`db` -- Stubs for DB API ============================= """ cinder-8.0.0/cinder/tests/unit/db/test_name_id.py0000664000567000056710000000434412701406250023105 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume name_id.""" from oslo_config import cfg from cinder import context from cinder import db from cinder import test from cinder.tests.unit import utils as testutils CONF = cfg.CONF class NameIDsTestCase(test.TestCase): """Test cases for naming volumes with name_id.""" def setUp(self): super(NameIDsTestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id') def test_name_id_same(self): """New volume should have same 'id' and 'name_id'.""" vol_ref = testutils.create_volume(self.ctxt, size=1) self.assertEqual(vol_ref['name_id'], vol_ref['id']) expected_name = CONF.volume_name_template % vol_ref['id'] self.assertEqual(expected_name, vol_ref['name']) def test_name_id_diff(self): """Change name ID to mimic volume after migration.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) vol_ref = db.volume_get(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(expected_name, vol_ref['name']) def test_name_id_snapshot_volume_name(self): """Make sure snapshot['volume_name'] is updated.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(expected_name, snap_ref['volume_name']) cinder-8.0.0/cinder/tests/unit/db/test_qos_specs.py0000664000567000056710000002172012701406250023505 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eBay Inc. # Copyright (C) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for quality_of_service_specs table.""" import time from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.volume import volume_types def fake_qos_specs_get_by_name(context, name, session=None, inactive=False): pass class QualityOfServiceSpecsTableTestCase(test.TestCase): """Test case for QualityOfServiceSpecs model.""" def setUp(self): super(QualityOfServiceSpecsTableTestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) def _create_qos_specs(self, name, values=None): """Create a transfer object.""" if values: specs = dict(name=name, qos_specs=values) else: specs = {'name': name, 'qos_specs': { 'consumer': 'back-end', 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] def test_qos_specs_create(self): # If there is qos specs with the same name exists, # a QoSSpecsExists exception will be raised. name = 'QoSSpecsCreationTest' self._create_qos_specs(name) self.assertRaises(exception.QoSSpecsExists, db.qos_specs_create, self.ctxt, dict(name=name)) specs_id = self._create_qos_specs('NewName') query_id = db.qos_specs_get_by_name( self.ctxt, 'NewName')['id'] self.assertEqual(specs_id, query_id) def test_qos_specs_get(self): value = dict(consumer='front-end', key1='foo', key2='bar') specs_id = self._create_qos_specs('Name1', value) fake_id = 'fake-UUID' self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, self.ctxt, fake_id) specs = db.qos_specs_get(self.ctxt, specs_id) expected = dict(name='Name1', id=specs_id, consumer='front-end') del value['consumer'] expected.update(dict(specs=value)) self.assertDictMatch(expected, specs) def test_qos_specs_get_all(self): value1 = dict(consumer='front-end', key1='v1', key2='v2') value2 = dict(consumer='back-end', key3='v3', key4='v4') value3 = dict(consumer='back-end', key5='v5', key6='v6') spec_id1 = self._create_qos_specs('Name1', value1) spec_id2 = self._create_qos_specs('Name2', value2) spec_id3 = self._create_qos_specs('Name3', value3) specs = db.qos_specs_get_all(self.ctxt) self.assertEqual(3, len(specs), "Unexpected number of qos specs records") expected1 = dict(name='Name1', id=spec_id1, consumer='front-end') expected2 = dict(name='Name2', id=spec_id2, consumer='back-end') expected3 = dict(name='Name3', id=spec_id3, consumer='back-end') del value1['consumer'] del value2['consumer'] del value3['consumer'] expected1.update(dict(specs=value1)) expected2.update(dict(specs=value2)) expected3.update(dict(specs=value3)) self.assertIn(expected1, specs) self.assertIn(expected2, specs) self.assertIn(expected3, specs) def test_qos_specs_get_by_name(self): name = str(int(time.time())) value = dict(consumer='front-end', foo='Foo', bar='Bar') specs_id = self._create_qos_specs(name, value) specs = db.qos_specs_get_by_name(self.ctxt, name) del value['consumer'] expected = {'name': name, 'id': specs_id, 'consumer': 'front-end', 'specs': value} self.assertDictMatch(expected, specs) def test_qos_specs_delete(self): name = str(int(time.time())) specs_id = self._create_qos_specs(name) db.qos_specs_delete(self.ctxt, specs_id) self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, self.ctxt, specs_id) def test_qos_specs_item_delete(self): name = str(int(time.time())) value = dict(consumer='front-end', foo='Foo', bar='Bar') specs_id = self._create_qos_specs(name, value) del value['consumer'] del value['foo'] expected = {'name': name, 'id': specs_id, 'consumer': 'front-end', 'specs': value} db.qos_specs_item_delete(self.ctxt, specs_id, 'foo') specs = db.qos_specs_get_by_name(self.ctxt, name) self.assertDictMatch(expected, specs) def test_associate_type_with_qos(self): self.assertRaises(exception.VolumeTypeNotFound, db.volume_type_qos_associate, self.ctxt, 'Fake-VOLID', 'Fake-QOSID') type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) def test_qos_associations_get(self): self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_associations_get, self.ctxt, 'Fake-UUID') type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) type0_id = volume_types.create(self.ctxt, 'Type0Name')['id'] db.volume_type_qos_associate(self.ctxt, type0_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(2, len(res)) self.assertEqual(specs_id, res[0]['qos_specs_id']) self.assertEqual(specs_id, res[1]['qos_specs_id']) def test_qos_specs_disassociate(self): type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) db.qos_specs_disassociate(self.ctxt, specs_id, type_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) res = db.volume_type_get(self.ctxt, type_id) self.assertIsNone(res['qos_specs_id']) def test_qos_specs_disassociate_all(self): specs_id = self._create_qos_specs('FakeQos') type1_id = volume_types.create(self.ctxt, 'Type1Name')['id'] type2_id = volume_types.create(self.ctxt, 'Type2Name')['id'] type3_id = volume_types.create(self.ctxt, 'Type3Name')['id'] db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(3, len(res)) db.qos_specs_disassociate_all(self.ctxt, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) def test_qos_specs_update(self): name = 'FakeName' specs_id = self._create_qos_specs(name) value = dict(key2='new_value2', key3='value3') self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update, self.ctxt, 'Fake-UUID', value) db.qos_specs_update(self.ctxt, specs_id, value) specs = db.qos_specs_get(self.ctxt, specs_id) self.assertEqual('new_value2', specs['specs']['key2']) self.assertEqual('value3', specs['specs']['key3']) cinder-8.0.0/cinder/tests/unit/db/test_purge.py0000664000567000056710000001003612701406250022626 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for db purge.""" import datetime import uuid from oslo_utils import timeutils from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder import exception from cinder import test from oslo_db.sqlalchemy import utils as sqlalchemyutils class PurgeDeletedTest(test.TestCase): def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) self.conn.execute(make_old) self.conn.execute(make_older) self.conn.execute(make_meta_old) self.conn.execute(make_meta_older) def test_purge_deleted_rows_old(self): # Purge at 30 days old, should only delete 2 rows db.purge_deleted_rows(self.context, age_in_days=30) rows = self.session.query(self.volumes).count() meta_rows = self.session.query(self.vm).count() # Verify that we only deleted 2 self.assertEqual(4, rows) self.assertEqual(4, meta_rows) def test_purge_deleted_rows_older(self): # Purge at 10 days old now, should delete 2 more rows db.purge_deleted_rows(self.context, age_in_days=10) rows = self.session.query(self.volumes).count() meta_rows = self.session.query(self.vm).count() # Verify that we only have 2 rows now self.assertEqual(2, rows) self.assertEqual(2, meta_rows) def test_purge_deleted_rows_bad_args(self): # Test with no age argument self.assertRaises(TypeError, db.purge_deleted_rows, self.context) # Test purge with non-integer self.assertRaises(exception.InvalidParameterValue, db.purge_deleted_rows, self.context, age_in_days='ten') # Test with negative value self.assertRaises(exception.InvalidParameterValue, db.purge_deleted_rows, self.context, age_in_days=-1) cinder-8.0.0/cinder/tests/unit/db/fakes.py0000664000567000056710000000255512701406250021545 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite.""" from cinder import db class FakeModel(object): """Stubs out for model.""" def __init__(self, values): self.values = values def __getattr__(self, name): return self.values[name] def __getitem__(self, key): if key in self.values: return self.values[key] else: raise NotImplementedError() def __repr__(self): return '' % self.values def stub_out(stubs, funcs): """Set the stubs in mapping in the db api.""" for func in funcs: func_name = '_'.join(func.__name__.split('_')[1:]) stubs.Set(db, func_name, func) cinder-8.0.0/cinder/tests/unit/test_infortrend_cli.py0000664000567000056710000021423612701406250024130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import test from cinder.volume.drivers.infortrend.eonstor_ds_cli import cli_factory as cli class InfortrendCLITestData(object): """CLI Test Data.""" # Infortrend entry fake_lv_id = ['5DE94FF775D81C30', '1234567890'] fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173', '987654321', '123456789', '2667FE351FC505AE', '53F3E98141A2E871'] fake_pair_id = ['55D790F8350B036B', '095A184B0ED2DB10'] fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB'] fake_data_port_ip = ['172.27.0.1', '172.27.0.2', '172.27.0.3', '172.27.0.4', '172.27.0.5', '172.27.0.6'] fake_model = ['DS S12F-G2852-6'] fake_manage_port_ip = ['172.27.0.10'] fake_system_id = ['DEEC'] fake_host_ip = ['172.27.0.2'] fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC'] fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC', '110123D02310DEEC', '120123D02310DEEC'] fake_initiator_wwnns = ['2234567890123456', '2234567890543216'] fake_initiator_wwpns = ['1234567890123456', '1234567890543216'] fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123', 'iqn.1991-05.com.infortrend:pc456'] fake_lun_map = [0, 1, 2] # cinder entry test_provider_location = [( 'system_id^%s@partition_id^%s') % ( int(fake_system_id[0], 16), fake_partition_id[0]), ] test_volume = { 'id': '5aa119a8-d25b-45a7-8d1b-88e127885635', 'size': 1, 'name': 'Part-1', 'host': 'infortrend-server1@backend_1#LV-1', 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'Part-1', 'volume_type_id': None, 'provider_location': test_provider_location[0], 'volume_attachment': [], } test_dst_volume = { 'id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'size': 1, 'name': 'Part-1-Copy', 'host': 'infortrend-server1@backend_1', 'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'provider_auth': None, 'project_id': 'project', 'display_name': None, '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'display_description': 'Part-1-Copy', 'volume_type_id': None, 'provider_location': '', 'volume_attachment': [], } test_ref_volume = { 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'size': 1, } test_ref_volume_with_import = { 'source-name': 'import_into_openstack', 'size': 1, } test_snapshot = { 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', 'volume_id': test_volume['id'], 'size': 2, 'volume_name': test_volume['name'], 'volume_size': 2, 'project_id': 'project', 'display_name': None, 'display_description': 'SI-1', 'volume_type_id': None, 'provider_location': fake_snapshot_id[0], } test_iqn = [( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 1, 0, 1), ( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 1, 0, 1), ] test_iscsi_properties = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[2], 'target_iqn': test_iqn[0], 'target_lun': fake_lun_map[0], 'volume_id': test_volume['id'], }, } test_iscsi_properties_with_mcs = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[0], 'target_iqn': test_iqn[1], 'target_lun': fake_lun_map[2], 'volume_id': test_volume['id'], }, } test_iqn_empty_map = [( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 0, 0, 1), ] test_iscsi_properties_empty_map = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[0], 'target_iqn': test_iqn_empty_map[0], 'target_lun': fake_lun_map[0], 'volume_id': test_volume['id'], }, } test_initiator_target_map = { fake_initiator_wwpns[0]: fake_target_wwpns[0:2], fake_initiator_wwpns[1]: fake_target_wwpns[0:2], } test_fc_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': fake_target_wwpns[0:2], 'initiator_target_map': test_initiator_target_map, }, } test_initiator_target_map_specific_channel = { fake_initiator_wwpns[0]: [fake_target_wwpns[1]], fake_initiator_wwpns[1]: [fake_target_wwpns[1]], } test_fc_properties_with_specific_channel = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [fake_target_wwpns[1]], 'initiator_target_map': test_initiator_target_map_specific_channel, }, } test_target_wwpns_map_multipath_r_model = [ fake_target_wwpns[0], fake_target_wwpns[2], fake_target_wwpns[1], fake_target_wwpns[3], ] test_initiator_target_map_multipath_r_model = { fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:], fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:], } test_fc_properties_multipath_r_model = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': test_target_wwpns_map_multipath_r_model[:], 'initiator_target_map': test_initiator_target_map_multipath_r_model, }, } test_initiator_target_map_zoning = { fake_initiator_wwpns[0].lower(): [x.lower() for x in fake_target_wwpns[0:2]], fake_initiator_wwpns[1].lower(): [x.lower() for x in fake_target_wwpns[0:2]], } test_fc_properties_zoning = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]], 'initiator_target_map': test_initiator_target_map_zoning, }, } test_initiator_target_map_zoning_r_model = { fake_initiator_wwpns[0].lower(): [x.lower() for x in fake_target_wwpns[1:3]], fake_initiator_wwpns[1].lower(): [x.lower() for x in fake_target_wwpns[1:3]], } test_fc_properties_zoning_r_model = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]], 'initiator_target_map': test_initiator_target_map_zoning_r_model, }, } test_fc_terminate_conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': test_initiator_target_map_zoning, }, } test_connector_iscsi = { 'ip': fake_host_ip[0], 'initiator': fake_initiator_iqn[0], 'host': 'infortrend-server1@backend_1', } test_connector_fc = { 'wwpns': fake_initiator_wwpns, 'wwnns': fake_initiator_wwnns, 'host': 'infortrend-server1@backend_1', } fake_pool = { 'pool_name': 'LV-2', 'pool_id': fake_lv_id[1], 'total_capacity_gb': 1000, 'free_capacity_gb': 1000, 'reserved_percentage': 0, 'QoS_support': False, 'thin_provisioning_support': False, } test_pools = [{ 'pool_name': 'LV-1', 'pool_id': fake_lv_id[0], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, 'max_over_subscription_ratio': 20.0, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'provisioned_capacity_gb': round((400) / 1024, 2), 'infortrend_provisioning': 'full', }] test_volume_states = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'pools': test_pools, } test_host = { 'host': 'infortrend-server1@backend_1', 'capabilities': test_volume_states, } test_migrate_volume_states = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'pool_name': 'LV-1', 'pool_id': fake_lv_id[1], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, 'infortrend_provisioning': 'full', } test_migrate_host = { 'host': 'infortrend-server1@backend_1#LV-2', 'capabilities': test_migrate_volume_states, } test_migrate_volume_states_2 = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'pool_name': 'LV-1', 'pool_id': fake_lv_id[1], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, 'infortrend_provisioning': 'full', } test_migrate_host_2 = { 'host': 'infortrend-server1@backend_1#LV-1', 'capabilities': test_migrate_volume_states_2, } fake_host = { 'host': 'infortrend-server1@backend_1', 'capabilities': {}, } fake_volume_id = [test_volume['id'], test_dst_volume['id']] fake_lookup_map = { '12345678': { 'initiator_port_wwn_list': [x.lower() for x in fake_initiator_wwpns], 'target_port_wwn_list': [x.lower() for x in fake_target_wwpns[0:2]], }, } fake_lookup_map_r_model = { '12345678': { 'initiator_port_wwn_list': [x.lower() for x in fake_initiator_wwpns[:]], 'target_port_wwn_list': [x.lower() for x in fake_target_wwpns[1:3]], }, } test_new_type = { 'name': 'type0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'infortrend_provisioning': 'thin'}, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = {'extra_specs': {'infortrend_provisioning': ('full', 'thin')}} def get_fake_cli_failed(self): return """ CLI: Failed Return: 0x0001 CLI: No selected device Return: 0x000c """ def get_fake_cli_failed_with_network(self): return """ CLI: Failed Return: 0x0001 CLI: No network Return: 0x000b """ def get_fake_cli_succeed(self): return """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 CLI: Successful: 0 mapping(s) shown Return: 0x0000 """ def get_test_show_empty_list(self): return (0, []) def get_test_show_snapshot(self, partition_id=None, snapshot_id=None): if partition_id and snapshot_id: return (0, [{ 'Map': 'No', 'Partition-ID': partition_id, 'SI-ID': snapshot_id, 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:33:11 2020', 'Index': '1', }]) else: return (0, [{ 'Map': 'No', 'Partition-ID': self.fake_partition_id[0], 'SI-ID': self.fake_snapshot_id[0], 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:33:11 2020', 'Index': '1', }, { 'Map': 'No', 'Partition-ID': self.fake_partition_id[0], 'SI-ID': self.fake_snapshot_id[1], 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:35:50 2020', 'Index': '2', }]) def get_fake_show_snapshot(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 \/\/\/- \ / - \ / - \/-\/- Index SI-ID Name Partition-ID Map Activated-time --------------------------------------------------------------------------------- 1 %s --- %s No Thu, Jan 09 01:33:11 2020 2 %s --- %s No Thu, Jan 09 01:35:50 2020 CLI: Successful: 2 snapshot image(s) shown Return: 0x0000 """ return msg % (self.fake_snapshot_id[0], self.fake_partition_id[0], self.fake_snapshot_id[1], self.fake_partition_id[0]) def get_test_show_snapshot_detail_filled_block(self): return (0, [{ 'Mapped': 'Yes', 'Created-time': 'Wed, Jun 10 10:57:16 2015', 'ID': self.fake_snapshot_id[0], 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', 'Description': '---', 'Total-filled-block': '1', 'LV-ID': self.fake_lv_id[0], 'Activation-schedule-time': 'Not Actived', 'Mapping': 'CH:0/ID:0/LUN:1', 'Index': '1', 'Used': '0', 'Name': '---', 'Valid-filled-block': '0', 'Partition-ID': self.fake_partition_id[0], }]) def get_test_show_snapshot_detail(self): return (0, [{ 'Mapped': 'Yes', 'Created-time': 'Wed, Jun 10 10:57:16 2015', 'ID': self.fake_snapshot_id[0], 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', 'Description': '---', 'Total-filled-block': '0', 'LV-ID': self.fake_lv_id[0], 'Activation-schedule-time': 'Not Actived', 'Mapping': 'CH:0/ID:0/LUN:1', 'Index': '1', 'Used': '0', 'Name': '---', 'Valid-filled-block': '0', 'Partition-ID': self.fake_partition_id[0], }]) def get_fake_show_snapshot_detail(self): msg = """ CLI: Successful: Device(UID:25090, Name:, Model:DS 1016RE) selected. Return: 0x0000 ID: %s Index: 1 Name: --- Partition-ID: %s LV-ID: %s Created-time: Wed, Jun 10 10:57:16 2015 Last-modification-time: Wed, Jun 10 10:57:16 2015 Activation-schedule-time: Not Actived Used: 0 Valid-filled-block: 0 Total-filled-block: 0 Description: --- Mapped: Yes Mapping: CH:0/ID:0/LUN:1 CLI: Successful: 1 snapshot image(s) shown Return: 0x0000 """ return msg % (self.fake_snapshot_id[0], self.fake_partition_id[0], self.fake_lv_id[0]) def get_test_show_net(self): return (0, [{ 'Slot': 'slotA', 'MAC': '10D02380DEEC', 'ID': '1', 'IPv4': self.fake_data_port_ip[0], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02390DEEC', 'ID': '1', 'IPv4': self.fake_data_port_ip[1], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotA', 'MAC': '10D02340DEEC', 'ID': '2', 'IPv4': self.fake_data_port_ip[2], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02350DEEC', 'ID': '2', 'IPv4': self.fake_data_port_ip[3], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotA', 'MAC': '10D02310DEEC', 'ID': '4', 'IPv4': self.fake_data_port_ip[4], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02320DEEC', 'ID': '4', 'IPv4': self.fake_data_port_ip[5], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': '---', 'MAC': '10D023077124', 'ID': '32', 'IPv4': '172.27.1.1', 'Mode': 'Disabled', 'IPv6': '---', }]) def get_fake_show_net(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID MAC Mode IPv4 Mode IPv6 Slot --------------------------------------------------------------- 1 10D02380DEEC DHCP %s Disabled --- slotA 1 10D02390DEEC DHCP %s Disabled --- slotB 2 10D02340DEEC DHCP %s Disabled --- slotA 2 10D02350DEEC DHCP %s Disabled --- slotB 4 10D02310DEEC DHCP %s Disabled --- slotA 4 10D02320DEEC DHCP %s Disabled --- slotB 32 10D023077124 DHCP 172.27.1.1 Disabled --- --- CLI: Successful: 2 record(s) found Return: 0x0000 """ return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1], self.fake_data_port_ip[2], self.fake_data_port_ip[3], self.fake_data_port_ip[4], self.fake_data_port_ip[5]) def get_test_show_net_detail(self): return (0, [{ 'Slot': 'slotA', 'IPv4-mode': 'DHCP', 'ID': '1', 'IPv6-address': '---', 'Net-mask': '---', 'IPv4-address': '---', 'Route': '---', 'Gateway': '---', 'IPv6-mode': 'Disabled', 'MAC': '00D023877124', 'Prefix-length': '---', }, { 'Slot': '---', 'IPv4-mode': 'DHCP', 'ID': '32', 'IPv6-address': '---', 'Net-mask': '255.255.240.0', 'IPv4-address': '172.27.112.245', 'Route': '---', 'Gateway': '172.27.127.254', 'IPv6-mode': 'Disabled', 'MAC': '00D023077124', 'Prefix-length': '---', }]) def get_fake_show_net_detail(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID: 1 MAC: 00D023877124 IPv4-mode: DHCP IPv4-address: --- Net-mask: --- Gateway: --- IPv6-mode: Disabled IPv6-address: --- Prefix-length: --- Route: --- Slot: slotA ID: 32 MAC: 00D023077124 IPv4-mode: DHCP IPv4-address: 172.27.112.245 Net-mask: 255.255.240.0 Gateway: 172.27.127.254 IPv6-mode: Disabled IPv6-address: --- Prefix-length: --- Route: --- Slot: --- CLI: Successful: 3 record(s) found Return: 0x0000 """ return msg def get_test_show_partition(self, volume_id=None, pool_id=None): result = [{ 'ID': self.fake_partition_id[0], 'Used': '200', 'Name': self.fake_volume_id[0].replace('-', ''), 'Size': '200', 'Min-reserve': '200', 'LV-ID': self.fake_lv_id[0], }, { 'ID': self.fake_partition_id[1], 'Used': '200', 'Name': self.fake_volume_id[1].replace('-', ''), 'Size': '200', 'Min-reserve': '200', 'LV-ID': self.fake_lv_id[0], }] if volume_id and pool_id: result.append({ 'ID': self.fake_partition_id[2], 'Used': '200', 'Name': volume_id, 'Size': '200', 'Min-reserve': '200', 'LV-ID': pool_id, }) return (0, result) def get_fake_show_partition(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID Name LV-ID Size Used Min-reserve --------------------------------------------------- %s %s %s 200 200 200 %s %s %s 200 200 200 CLI: Successful: 3 partition(s) shown Return: 0x0000 """ return msg % (self.fake_partition_id[0], self.fake_volume_id[0].replace('-', ''), self.fake_lv_id[0], self.fake_partition_id[1], self.fake_volume_id[1].replace('-', ''), self.fake_lv_id[0]) def get_test_show_partition_detail_for_map( self, partition_id, mapped='true'): result = [{ 'LV-ID': self.fake_lv_id[0], 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1', 'Used': '200', 'Size': '200', 'ID': partition_id, 'Progress': '---', 'Min-reserve': '200', 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[0].replace('-', ''), 'Mapped': mapped, 'Total-filled-block': '100', 'Creation-time': 'Wed, Jan 08 20:23:23 2020', }] return (0, result) def get_test_show_partition_detail(self, volume_id=None, pool_id=None): result = [{ 'LV-ID': self.fake_lv_id[0], 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0', 'Used': '200', 'Size': '200', 'ID': self.fake_partition_id[0], 'Progress': '---', 'Min-reserve': '200', 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[0].replace('-', ''), 'Mapped': 'true', 'Total-filled-block': '100', 'Creation-time': 'Wed, Jan 08 20:23:23 2020', }, { 'LV-ID': self.fake_lv_id[0], 'Mapping': '---', 'Used': '200', 'Size': '200', 'ID': self.fake_partition_id[1], 'Progress': '---', 'Min-reserve': '200', 'Last-modification-time': 'Sat, Jan 11 22:18:40 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[1].replace('-', ''), 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 11 22:18:40 2020', }] if volume_id and pool_id: result.extend([{ 'LV-ID': pool_id, 'Mapping': '---', 'Used': '200', 'Size': '200', 'ID': self.fake_partition_id[2], 'Progress': '---', 'Min-reserve': '200', 'Last-modification-time': 'Sat, Jan 15 22:18:40 2020', 'Valid-filled-block': '100', 'Name': volume_id, 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 15 22:18:40 2020', }, { 'LV-ID': '987654321', 'Mapping': '---', 'Used': '200', 'Size': '200', 'ID': '123123123123', 'Progress': '---', 'Min-reserve': '200', 'Last-modification-time': 'Sat, Jan 12 22:18:40 2020', 'Valid-filled-block': '100', 'Name': volume_id, 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 15 22:18:40 2020', }]) return (0, result) def get_fake_show_partition_detail(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID: %s Name: %s LV-ID: %s Size: 200 Used: 200 Min-reserve: 200 Creation-time: Wed, Jan 08 20:23:23 2020 Last-modification-time: Wed, Jan 08 20:23:23 2020 Valid-filled-block: 100 Total-filled-block: 100 Progress: --- Mapped: true Mapping: CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0 ID: %s Name: %s LV-ID: %s Size: 200 Used: 200 Min-reserve: 200 Creation-time: Sat, Jan 11 22:18:40 2020 Last-modification-time: Sat, Jan 11 22:18:40 2020 Valid-filled-block: 100 Total-filled-block: 100 Progress: --- Mapped: false Mapping: --- CLI: Successful: 3 partition(s) shown Return: 0x0000 """ return msg % (self.fake_partition_id[0], self.fake_volume_id[0].replace('-', ''), self.fake_lv_id[0], self.fake_partition_id[1], self.fake_volume_id[1].replace('-', ''), self.fake_lv_id[0]) def get_test_show_replica_detail_for_migrate( self, src_part_id, dst_part_id, volume_id, status='Completed'): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': src_part_id, 'Source-Type': 'LV-Partition', 'Source-Name': volume_id, 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': dst_part_id, 'Target-Type': 'LV-Partition', 'Target-Name': volume_id, 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': status, 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail_for_si_sync_pair(self): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_snapshot_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': '', 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': '', 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Copy', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail_for_sync_pair(self): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0].replace('-', ''), 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1].replace('-', ''), 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Copy', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail(self): result = [{ 'Pair-ID': '4BF246E26966F015', 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[2], 'Source-Type': 'LV-Partition', 'Source-Name': 'Part-2', 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'No', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[3], 'Target-Type': 'LV-Partition', 'Target-Name': 'Part-1-Copy', 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '714B80F0335F6E52', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Completed', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }, { 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Migrate', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0].replace('-', ''), 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1].replace('-', ''), 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Mirror', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Mirror', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }, { 'Pair-ID': self.fake_pair_id[1], 'Name': 'Cinder-Migrate', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[4], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0].replace('-', ''), 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'No', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[5], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1].replace('-', ''), 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '714B80F0335F6E52', 'Target-Mapped': 'Yes', 'Type': 'Mirror', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Mirror', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_fake_show_replica_detail(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 Pair-ID: 4BF246E26966F015 Name: Cinder-Snapshot Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: Part-2 Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: No Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: Part-1-Copy Target-LV: 5DE94FF775D81C30 Target-VS: 714B80F0335F6E52 Target-Mapped: No Type: Copy Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Completed Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- Pair-ID: %s Name: Cinder-Migrate Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: %s Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: Yes Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: %s Target-LV: 5DE94FF775D81C30 Target-VS: 033EA1FA4EA193EB Target-Mapped: No Type: Mirror Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Mirror Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- Pair-ID: %s Name: Cinder-Migrate Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: %s Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: No Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: %s Target-LV: 5DE94FF775D81C30 Target-VS: 714B80F0335F6E52 Target-Mapped: Yes Type: Mirror Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Mirror Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- CLI: Successful: 3 replication job(s) shown Return: 0x0000 """ return msg % (self.fake_partition_id[2], self.fake_partition_id[3], self.fake_pair_id[0], self.fake_partition_id[0], self.fake_volume_id[0].replace('-', ''), self.fake_partition_id[1], self.fake_volume_id[1].replace('-', ''), self.fake_pair_id[1], self.fake_partition_id[4], self.fake_volume_id[0].replace('-', ''), self.fake_partition_id[5], self.fake_volume_id[1].replace('-', '')) def get_test_show_lv(self): return (0, [{ 'Name': 'LV-1', 'LD-amount': '1', 'Available': '841978 MB', 'ID': self.fake_lv_id[0], 'Progress': '---', 'Size': '857982 MB', 'Status': 'On-line', }]) def get_fake_show_lv(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID Name LD-amount Size Available Progress Status -------------------------------------------------------------- %s LV-1 1 857982 MB 841978 MB --- On-line CLI: Successful: 1 Logical Volumes(s) shown Return: 0x0000 """ return msg % self.fake_lv_id[0] def get_test_show_lv_detail(self): return (0, [{ 'Policy': 'Default', 'Status': 'On-line', 'ID': self.fake_lv_id[0], 'Available': '841978 MB', 'Expandable-size': '0 MB', 'Name': 'LV-1', 'Size': '857982 MB', 'LD-amount': '1', 'Progress': '---', }]) def get_fake_show_lv_detail(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 ID: %s Name: LV-1 LD-amount: 1 Size: 857982 MB Available: 841978 MB Expandable-size: 0 MB Policy: Default Progress: --- Status: On-line CLI: Successful: 1 Logical Volumes(s) shown Return: 0x0000 """ return msg % self.fake_lv_id[0] def get_test_show_lv_tier_for_migration(self): return (0, [{ 'LV-Name': 'TierLV', 'LV-ID': self.fake_lv_id[1], 'Tier': '0', 'Size': '418.93 GB', 'Used': '10 GB(2.4%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '10.0%', }, { 'LV-Name': 'TierLV', 'LV-ID': self.fake_lv_id[1], 'Tier': '3', 'Size': '931.02 GB', 'Used': '0 MB(0.0%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '0.0%', }]) def get_test_show_lv_tier(self): return (0, [{ 'LV-Name': 'TierLV', 'LV-ID': self.fake_lv_id[0], 'Tier': '0', 'Size': '418.93 GB', 'Used': '10 GB(2.4%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '10.0%', }, { 'LV-Name': 'TierLV', 'LV-ID': self.fake_lv_id[0], 'Tier': '3', 'Size': '931.02 GB', 'Used': '0 MB(0.0%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '0.0%', }]) def get_fake_show_lv_tier(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 LV-Name LV-ID Tier Size Used Data Service Reserved Ratio ------------------------------------------------------------------------------ TierLV %s 0 418.93 GB 10 GB(2.4%%) 0 MB(0.0%%) 10.0%% TierLV %s 3 931.02 GB 0 MB(0.0%%) 0 MB(0.0%%) 0.0%% CLI: Successful: 2 lv tiering(s) shown Return: 0x0000 """ return msg % (self.fake_lv_id[0], self.fake_lv_id[0]) def get_test_show_device(self): return (0, [{ 'ID': self.fake_system_id[0], 'Connected-IP': self.fake_manage_port_ip[0], 'Name': '---', 'Index': '0*', 'JBOD-ID': 'N/A', 'Capacity': '1.22 TB', 'Model': self.fake_model[0], 'Service-ID': '8445676', }]) def get_fake_show_device(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 Index ID Model Name Connected-IP JBOD-ID Capacity Service-ID ------------------------------------------------------------------------ 0* %s %s --- %s N/A 1.22 TB 8445676 CLI: Successful: 1 device(s) found Return: 0x0000 """ return msg % (self.fake_system_id[0], self.fake_model[0], self.fake_manage_port_ip[0]) def get_test_show_channel_single(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }]) def get_test_show_channel_with_mcs(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '1', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_test_show_channel_without_mcs(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'curClock': '---', }]) def get_test_show_channel_with_diff_target_id(self): return (0, [{ 'ID': '32', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '48', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_test_show_channel(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_fake_show_channel(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 Ch Mode Type defClock curClock Width ID MCS --------------------------------------------------------- 0 Host FIBRE Auto --- --- 112 N/A 1 Host NETWORK Auto --- iSCSI 0 0 2 Host NETWORK Auto --- iSCSI 0 1 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- N/A 4 Host NETWORK Auto --- iSCSI 0 2 5 Host FIBRE Auto --- --- 112 N/A CLI: Successful: : 6 channel(s) shown Return: 0x0000 """ return msg def get_test_show_channel_r_model_diff_target_id(self): return (0, [{ 'Mode': 'Host', 'AID': '32', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '0', 'BID': '33', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '0', 'Ch': '1', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '1', 'Ch': '2', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Drive', 'AID': '---', 'defClock': '6.0 Gbps', 'MCS': 'N/A', 'Ch': '3', 'BID': '---', 'curClock': '6.0 Gbps', 'Width': 'SAS', 'Type': 'SAS', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '2', 'Ch': '4', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '48', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '5', 'BID': '49', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }]) def get_test_show_channel_r_model(self): return (0, [{ 'Mode': 'Host', 'AID': '112', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '0', 'BID': '113', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '0', 'Ch': '1', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '1', 'Ch': '2', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Drive', 'AID': '---', 'defClock': '6.0 Gbps', 'MCS': 'N/A', 'Ch': '3', 'BID': '---', 'curClock': '6.0 Gbps', 'Width': 'SAS', 'Type': 'SAS', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '2', 'Ch': '4', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '112', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '5', 'BID': '113', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }]) def get_fake_show_channel_r_model(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 Ch Mode Type defClock curClock Width AID BID MCS ---------------------------------------------------------------- 0 Host FIBRE Auto --- --- 112 113 N/A 1 Host NETWORK Auto --- iSCSI 0 1 0 2 Host NETWORK Auto --- iSCSI 0 1 1 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- --- N/A 4 Host NETWORK Auto --- iSCSI 0 1 2 5 Host FIBRE Auto --- --- 112 113 N/A CLI: Successful: : 9 channel(s) shown Return: 0x0000 """ return msg def get_show_map_with_lun_map_on_zoning(self): return (0, [{ 'Ch': '0', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[0], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_test_show_map(self, partition_id=None, channel_id=None): if partition_id and channel_id: return (0, [{ 'Ch': channel_id, 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': partition_id, }, { 'Ch': channel_id, 'LUN': '1', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': partition_id, }]) else: return (0, [{ 'Ch': '1', 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '1', 'LUN': '1', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_test_show_map_multimap(self): return (0, [{ 'Ch': '1', 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '1', 'LUN': '1', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': '210000E08B0AADE1', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': '210000E08B0AADE2', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_fake_show_map(self): msg = """ CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. Return: 0x0000 Ch Target LUN Media Name ID Host-ID ----------------------------------------------------------- 1 0 0 PART Part-1 %s --- 1 0 1 PART Part-1 %s --- 4 0 0 PART Part-1 %s --- CLI: Successful: 3 mapping(s) shown Return: 0x0000 """ return msg % (self.fake_partition_id[0], self.fake_partition_id[0], self.fake_partition_id[0]) def get_test_show_license(self): return (0, { 'Local Volume Copy': { 'Support': False, 'Amount': '8/256', }, 'Synchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Snapshot': { 'Support': False, 'Amount': '1024/16384', }, 'Self-Encryption Drives': { 'Support': False, 'Amount': '---', }, 'Compression': { 'Support': False, 'Amount': '---', }, 'Local volume Mirror': { 'Support': False, 'Amount': '8/256', }, 'Storage Tiering': { 'Support': False, 'Amount': '---', }, 'Asynchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Scale-out': { 'Support': False, 'Amount': 'Not Support', }, 'Thin Provisioning': { 'Support': False, 'Amount': '---', }, 'Max JBOD': { 'Support': False, 'Amount': '15', }, 'EonPath': { 'Support': False, 'Amount': '---', } }) def get_fake_show_license(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 License Amount(Partition/Subsystem) Expired ------------------------------------------------------------------ EonPath --- Expired Scale-out Not Support --- Snapshot 1024/16384 Expired Local Volume Copy 8/256 Expired Local volume Mirror 8/256 Expired Synchronous Remote Mirror 8/256 Expired Asynchronous Remote Mirror 8/256 Expired Compression --- Expired Thin Provisioning --- Expired Storage Tiering --- Expired Max JBOD 15 Expired Self-Encryption Drives --- Expired CLI: Successful Return: 0x0000 """ return msg def get_test_show_wwn_with_g_model(self): return (0, [{ 'ID': 'ID:112', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'ID:112', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }]) def get_test_show_wwn_with_diff_target_id(self): return (0, [{ 'ID': 'AID:32', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:33', 'WWPN': self.fake_target_wwpns[2], 'CH': '0', 'WWNN': self.fake_target_wwnns[1], }, { 'ID': 'AID:48', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:49', 'WWPN': self.fake_target_wwpns[3], 'CH': '5', 'WWNN': self.fake_target_wwnns[1], }]) def get_test_show_wwn(self): return (0, [{ 'ID': 'AID:112', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:113', 'WWPN': self.fake_target_wwpns[2], 'CH': '0', 'WWNN': self.fake_target_wwnns[1], }, { 'ID': 'AID:112', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:113', 'WWPN': self.fake_target_wwpns[3], 'CH': '5', 'WWNN': self.fake_target_wwnns[1], }]) def get_fake_show_wwn(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 WWN entries in controller for host channels: CH ID WWNN WWPN ------------------------------------------------- 0 AID:112 %s %s 0 BID:113 %s %s 5 AID:112 %s %s 5 BID:113 %s %s CLI: Successful Return: 0x0000 """ return msg % (self.fake_target_wwnns[0], self.fake_target_wwpns[0], self.fake_target_wwnns[1], self.fake_target_wwpns[2], self.fake_target_wwnns[0], self.fake_target_wwpns[1], self.fake_target_wwnns[1], self.fake_target_wwpns[3]) def get_test_show_iqn(self): return (0, [{ 'Name': self.fake_initiator_iqn[0][-16:], 'IQN': self.fake_initiator_iqn[0], 'User': '---', 'Password': '---', 'Target': '---', 'Target-Password': '---', 'IP': '0.0.0.0', 'Mask': '0.0.0.0', }]) def get_fake_show_iqn(self): msg = """ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. Return: 0x0000 Detected host IQN: IQN ---------------------------------------- %s List of initiator IQN(s): -------------------------- Name: %s IQN: %s User: --- Password: --- Target: --- Target-Password: --- IP: 0.0.0.0 Mask: 0.0.0.0 CLI: Successful: 1 initiator iqn(s) shown Return: 0x0000 """ return msg % (self.fake_initiator_iqn[0], self.fake_initiator_iqn[0][-16:], self.fake_initiator_iqn[0]) def get_fake_discovery(self, target_iqns, target_portals): template = '%s,1 %s' if len(target_iqns) == 1: result = template % (target_portals[0], target_iqns[0]) return (0, result) result = [] for i in range(len(target_iqns)): result.append(template % ( target_portals[i], target_iqns[i])) return (0, '\n'.join(result)) class InfortrendCLITestCase(test.TestCase): CommandList = ['CreateLD', 'CreateLV', 'CreatePartition', 'DeletePartition', 'SetPartition', 'CreateMap', 'DeleteMap', 'CreateSnapshot', 'DeleteSnapshot', 'CreateReplica', 'DeleteReplica', 'CreateIQN', 'DeleteIQN', 'ShowLD', 'ShowLV', 'ShowPartition', 'ShowSnapshot', 'ShowDevice', 'ShowChannel', 'ShowDisk', 'ShowMap', 'ShowNet', 'ShowLicense', 'ShowWWN', 'ShowReplica', 'ShowIQN'] def __init__(self, *args, **kwargs): super(InfortrendCLITestCase, self).__init__(*args, **kwargs) self.cli_data = InfortrendCLITestData() def setUp(self): super(InfortrendCLITestCase, self).setUp() def _cli_set(self, cli, fake_result): cli_conf = { 'path': '', 'password': '', 'ip': '', 'cli_retry_time': 1, } cli = cli(cli_conf) cli._execute = mock.Mock(return_value=fake_result) return cli def _cli_multi_set(self, cli, fake_result_list): cli_conf = { 'path': '', 'password': '', 'ip': '', 'cli_retry_time': 5, } cli = cli(cli_conf) cli._execute = mock.Mock(side_effect=fake_result_list) return cli def _test_command_succeed(self, command): fake_cli_succeed = self.cli_data.get_fake_cli_succeed() test_command = self._cli_set(command, fake_cli_succeed) rc, out = test_command.execute() self.assertEqual(0, rc) def _test_command_failed(self, command): fake_cli_failed = self.cli_data.get_fake_cli_failed() test_command = self._cli_set(command, fake_cli_failed) rc, out = test_command.execute() self.assertEqual(int('0x000c', 16), rc) def _test_command_failed_retry_succeed(self, log_error, command): log_error.reset_mock() LOG_ERROR_STR = ( 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' ) fake_result_list = [ self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed_with_network(), self.cli_data.get_fake_cli_succeed(), ] test_command = self._cli_multi_set(command, fake_result_list) rc, out = test_command.execute() self.assertEqual(0, rc) expect_log_error = [ mock.call(LOG_ERROR_STR, { 'retry': 1, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 2, 'method': test_command.__class__.__name__, 'rc': int('0x000b', 16), 'reason': 'No network', }) ] log_error.assert_has_calls(expect_log_error) def _test_command_failed_retry_timeout(self, log_error, command): log_error.reset_mock() LOG_ERROR_STR = ( 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' ) fake_result_list = [ self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed_with_network(), self.cli_data.get_fake_cli_failed_with_network(), self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed_with_network(), ] test_command = self._cli_multi_set(command, fake_result_list) rc, out = test_command.execute() self.assertEqual(int('0x000b', 16), rc) self.assertEqual('No network', out) expect_log_error = [ mock.call(LOG_ERROR_STR, { 'retry': 1, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 2, 'method': test_command.__class__.__name__, 'rc': int('0x000b', 16), 'reason': 'No network', }), mock.call(LOG_ERROR_STR, { 'retry': 3, 'method': test_command.__class__.__name__, 'rc': int('0x000b', 16), 'reason': 'No network', }), mock.call(LOG_ERROR_STR, { 'retry': 4, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 5, 'method': test_command.__class__.__name__, 'rc': int('0x000b', 16), 'reason': 'No network', }) ] log_error.assert_has_calls(expect_log_error) def _test_show_command(self, fake_data, test_data, command, *params): test_command = self._cli_set(command, fake_data) rc, out = test_command.execute(*params) self.assertEqual(test_data[0], rc) if isinstance(out, list): for i in range(len(test_data[1])): self.assertDictMatch(test_data[1][i], out[i]) else: self.assertDictMatch(test_data[1], out) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_cli_all_command_execute(self): for command in self.CommandList: self._test_command_succeed(getattr(cli, command)) self._test_command_failed(getattr(cli, command)) @mock.patch.object(cli.LOG, 'error') def test_cli_all_command_execute_retry_succeed(self, log_error): for command in self.CommandList: self._test_command_failed_retry_succeed( log_error, getattr(cli, command)) @mock.patch.object(cli.LOG, 'error') def test_cli_all_command_execute_retry_timeout(self, log_error): for command in self.CommandList: self._test_command_failed_retry_timeout( log_error, getattr(cli, command)) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_snapshot(self): self._test_show_command( self.cli_data.get_fake_show_snapshot(), self.cli_data.get_test_show_snapshot(), cli.ShowSnapshot) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_snapshot_detail(self): self._test_show_command( self.cli_data.get_fake_show_snapshot_detail(), self.cli_data.get_test_show_snapshot_detail(), cli.ShowSnapshot, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_net(self): self._test_show_command( self.cli_data.get_fake_show_net(), self.cli_data.get_test_show_net(), cli.ShowNet) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_detail_net(self): self._test_show_command( self.cli_data.get_fake_show_net_detail(), self.cli_data.get_test_show_net_detail(), cli.ShowNet, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_partition(self): self._test_show_command( self.cli_data.get_fake_show_partition(), self.cli_data.get_test_show_partition(), cli.ShowPartition) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_partition_detail(self): self._test_show_command( self.cli_data.get_fake_show_partition_detail(), self.cli_data.get_test_show_partition_detail(), cli.ShowPartition, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv(self): self._test_show_command( self.cli_data.get_fake_show_lv(), self.cli_data.get_test_show_lv(), cli.ShowLV) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv_detail(self): self._test_show_command( self.cli_data.get_fake_show_lv_detail(), self.cli_data.get_test_show_lv_detail(), cli.ShowLV, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv_tier(self): self._test_show_command( self.cli_data.get_fake_show_lv_tier(), self.cli_data.get_test_show_lv_tier(), cli.ShowLV, 'tier') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_device(self): self._test_show_command( self.cli_data.get_fake_show_device(), self.cli_data.get_test_show_device(), cli.ShowDevice) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_channel(self): self._test_show_command( self.cli_data.get_fake_show_channel(), self.cli_data.get_test_show_channel(), cli.ShowChannel) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_channel_with_r_model(self): self._test_show_command( self.cli_data.get_fake_show_channel_r_model(), self.cli_data.get_test_show_channel_r_model(), cli.ShowChannel) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_map(self): self._test_show_command( self.cli_data.get_fake_show_map(), self.cli_data.get_test_show_map(), cli.ShowMap) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_license(self): self._test_show_command( self.cli_data.get_fake_show_license(), self.cli_data.get_test_show_license(), cli.ShowLicense) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_replica_detail(self): self._test_show_command( self.cli_data.get_fake_show_replica_detail(), self.cli_data.get_test_show_replica_detail(), cli.ShowReplica, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_wwn(self): self._test_show_command( self.cli_data.get_fake_show_wwn(), self.cli_data.get_test_show_wwn(), cli.ShowWWN) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_iqn(self): self._test_show_command( self.cli_data.get_fake_show_iqn(), self.cli_data.get_test_show_iqn(), cli.ShowIQN) cinder-8.0.0/cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py0000664000567000056710000007226412701406257025161 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Self test for Hitachi Block Storage Driver """ import mock from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hbsd_basiclib from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_snm2 def _exec_hsnm(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_vals.get(args) def _exec_hsnm_get_lu_ret_err(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args) def _exec_hsnm_get_lu_vol_type_err(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args) def _exec_hsnm_get_lu_dppool_err(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args) def _exec_hsnm_get_lu_size_err(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args) def _exec_hsnm_get_lu_num_port_err(*args, **kargs): return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args) class HBSDSNM2FCDriverTest(test.TestCase): """Test HBSDSNM2FCDriver.""" audppool_result = " DP RAID \ Current Utilization Current Over Replication\ Available Current Replication Rotational \ \ Stripe \ Needing Preparation\n\ Pool Tier Mode Level Total Capacity Consumed Capacity \ Percent Provisioning Percent Capacity \ Utilization Percent Type Speed Encryption Status \ \ Reconstruction Progress Size Capacity\n\ 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ 1% 24835% 532.0 GB \ 1% SAS 10000rpm N/A Normal \ N/A \ 256KB 0.0 GB" aureplicationlocal_result = "Pair Name LUN Pair \ LUN Status Copy Type Group \ Point-in-Time MU Number\n\ 0 10 0 Split( 99%) \ ShadowImage ---:Ungrouped N/A\ " auluref_result = " Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 0 Normal" auluref_result1 = " Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ Name Port Name Host Group\n\ HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ Assigned WWN\n Name Port Name \ Host Group\n abcdefg 10000000C97BCE7A \ 001:HBSD-01" aufibre1_result = "Port Information\n\ Port Address\n CTL Port\ Node Name Port Name Setting Current\n 0 0 \ 50060E801053C2E0 50060E801053C2E0 0000EF 272700" auhgmap_result = "Mapping Mode = ON\nPort Group \ H-LUN LUN\n 00 001:HBSD-00 0 1000" hsnm_vals = { ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], ('aureplicationlocal', '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): [0, "", ""], ('aureplicationlocal', '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): [1, "", ""], ('aureplicationlocal', '-unit None -refer -pvol 1'): [0, "%s" % aureplicationlocal_result, ""], ('aureplicationlocal', '-unit None -refer -pvol 3'): [1, "", "DMEC002015"], ('aureplicationlocal', '-unit None -refer -svol 3'): [1, "", "DMEC002015"], ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): [0, "", ""], ('auluchgsize', '-unit None -lu 1 -size 256g'): [0, "", ""], ('auludel', '-unit None -lu 1 -f'): [0, 0, ""], ('auludel', '-unit None -lu 3 -f'): [1, 0, ""], ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""], ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], ('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""], ('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""], ('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""], ('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]} auluref_ret_err = "Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 0 Normal" hsnm_get_lu_ret_err = { ('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""], } auluref_vol_type_err = "Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" hsnm_get_lu_vol_type_err = { ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_vol_type_err, ""], } auluref_dppool_err = "Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 N/A Enable 0 Normal" hsnm_get_lu_dppool_err = { ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_dppool_err, ""], } auluref_size_err = "Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097151 blocks 256KB N/A 0 Enable 0 Normal" hsnm_get_lu_size_err = { ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""], } auluref_num_port_err = "Stripe RAID DP Tier \ RAID Rotational Number\n\ LU Capacity Size Group Pool Mode Level Type\ Speed of Paths Status\n\ 0 2097152 blocks 256KB 0 0 Enable 1 Normal" hsnm_get_lu_num_port_err = { ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""], } # The following information is passed on to tests, when creating a volume _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', 'provider_location': '1', 'name': 'test', 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} test_volume = {'name': 'test_volume', 'size': 128, 'id': 'test-volume-0', 'provider_location': '1', 'status': 'available'} test_volume_error = {'name': 'test_volume_error', 'size': 256, 'id': 'test-volume-error', 'provider_location': '3', 'status': 'available'} test_volume_error1 = {'name': 'test_volume_error', 'size': 128, 'id': 'test-volume-error', 'provider_location': None, 'status': 'available'} test_volume_error2 = {'name': 'test_volume_error', 'size': 256, 'id': 'test-volume-error', 'provider_location': '1', 'status': 'available'} test_volume_error3 = {'name': 'test_volume3', 'size': 128, 'id': 'test-volume3', 'volume_metadata': [{'key': 'type', 'value': 'V-VOL'}], 'provider_location': '1', 'status': 'available'} test_volume_error4 = {'name': 'test_volume4', 'size': 128, 'id': 'test-volume2', 'provider_location': '3', 'status': 'available'} test_snapshot = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, 'provider_location': '1', 'status': 'available'} test_snapshot_error2 = {'volume_name': 'test', 'size': 128, 'volume_size': 128, 'name': 'test-snap', 'volume_id': 0, 'id': 'test-snap-0', 'volume': test_volume_error, 'provider_location': None, 'status': 'available'} UNIT_NAME = 'HUS110_91122819' test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} test_existing_no_unit_ref = {'ldev': '0'} def __init__(self, *args, **kwargs): super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs) def setUp(self): super(HBSDSNM2FCDriverTest, self).setUp() self._setup_config() self._setup_driver() def _setup_config(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.hitachi_pool_id = 30 self.configuration.hitachi_target_ports = "00" self.configuration.hitachi_debug_level = 0 self.configuration.hitachi_serial_number = "None" self.configuration.hitachi_unit_name = "None" self.configuration.hitachi_group_request = False self.configuration.hitachi_zoning_request = False self.configuration.config_group = "None" self.configuration.hitachi_ldev_range = [0, 100] self.configuration.hitachi_default_copy_method = 'SI' self.configuration.hitachi_copy_check_interval = 1 self.configuration.hitachi_copy_speed = 3 def _setup_driver(self): self.driver = hbsd_fc.HBSDFCDriver( configuration=self.configuration) context = None db = None self.driver.common = hbsd_common.HBSDCommon( self.configuration, self.driver, context, db) self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) self.driver.common.pair_flock = \ self.driver.common.command.set_pair_flock() self.driver.common.horcmgr_flock = \ self.driver.common.command.set_horcmgr_flock() self.driver.do_setup_status.set() # API test cases @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume(self, arg1, arg2, arg3): """test create_volume.""" ret = self.driver.create_volume(self._VOLUME) vol = self._VOLUME.copy() vol['provider_location'] = ret['provider_location'] self.assertEqual('1', vol['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_error(self, arg1, arg2, arg3): """test create_volume.""" self.assertRaises(exception.HBSDCmdError, self.driver.create_volume, self.test_volume_error) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_get_volume_stats(self, arg1, arg2): """test get_volume_stats.""" stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_get_volume_stats_error(self, arg1, arg2): """test get_volume_stats.""" self.configuration.hitachi_pool_id = 29 stats = self.driver.get_volume_stats(True) self.assertEqual({}, stats) self.configuration.hitachi_pool_id = 30 @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_extend_volume(self, arg1, arg2): """test extend_volume.""" self.driver.extend_volume(self._VOLUME, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_extend_volume_error(self, arg1, arg2): """test extend_volume.""" self.assertRaises(exception.HBSDError, self.driver.extend_volume, self.test_volume_error3, 256) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_volume(self, arg1, arg2): """test delete_volume.""" self.driver.delete_volume(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_volume_error(self, arg1, arg2): """test delete_volume.""" self.assertRaises(exception.HBSDCmdError, self.driver.delete_volume, self.test_volume_error4) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): """test create_snapshot.""" ret = self.driver.create_volume(self._VOLUME) ret = self.driver.create_snapshot(self.test_snapshot) self.assertEqual('1', ret['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', return_value={'dummy_snapshot_meta': 'snapshot_meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=test_volume_error) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): """test create_snapshot.""" self.assertRaises(exception.HBSDCmdError, self.driver.create_snapshot, self.test_snapshot_error2) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_snapshot(self, arg1, arg2): """test delete_snapshot.""" self.driver.delete_snapshot(self.test_snapshot) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_delete_snapshot_error(self, arg1, arg2): """test delete_snapshot.""" self.driver.delete_snapshot(self.test_snapshot_error2) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_from_snapshot(self, arg1, arg2, arg3): """test create_volume_from_snapshot.""" vol = self.driver.create_volume_from_snapshot(self._VOLUME, self.test_snapshot) self.assertIsNotNone(vol) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): """test create_volume_from_snapshot.""" self.assertRaises(exception.HBSDError, self.driver.create_volume_from_snapshot, self.test_volume_error2, self.test_snapshot) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=_VOLUME) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_basiclib, 'get_process_lock') def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): """test create_cloned_volume.""" vol = self.driver.create_cloned_volume(self._VOLUME, self.test_volume) self.assertIsNotNone(vol) return @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', return_value={'dummy_volume_meta': 'meta'}) @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', return_value=test_volume_error1) @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_basiclib, 'get_process_lock') def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): """test create_cloned_volume.""" self.assertRaises(exception.HBSDError, self.driver.create_cloned_volume, self._VOLUME, self.test_volume_error1) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_initialize_connection(self, arg1, arg2): """test initialize connection.""" connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} rc = self.driver.initialize_connection(self._VOLUME, connector) self.assertEqual('fibre_channel', rc['driver_volume_type']) self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) self.assertEqual(1, rc['data']['target_lun']) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_initialize_connection_error(self, arg1, arg2): """test initialize connection.""" connector = {'wwpns': 'x', 'ip': '0xc0a80100'} self.assertRaises(exception.HBSDError, self.driver.initialize_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_terminate_connection(self, arg1): """test terminate connection.""" connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} rc = self.driver.terminate_connection(self._VOLUME, connector) self.assertEqual('fibre_channel', rc['driver_volume_type']) self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) return @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_terminate_connection_error(self, arg1): """test terminate connection.""" connector = {'ip': '0xc0a80100'} self.assertRaises(exception.HBSDError, self.driver.terminate_connection, self._VOLUME, connector) return @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_manage_existing(self, arg1, arg2): rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) self.assertEqual(0, rc['provider_location']) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME size = self.driver.manage_existing_get_size(self._VOLUME, self.test_existing_ref) self.assertEqual(1, size) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_ldev_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_none_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_invalid_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_no_unit_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_get_lu_ret_err) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_get_lu_vol_type_err) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_get_lu_dppool_err) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_get_lu_size_err) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm_get_lu_num_port_err) @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3): self.configuration.hitachi_unit_name = self.UNIT_NAME self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self._VOLUME, self.test_existing_ref) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_unmanage(self, arg1, arg2): self.driver.unmanage(self._VOLUME) @mock.patch.object(hbsd_basiclib, 'get_process_lock') @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) def test_unmanage_busy(self, arg1, arg2): self.assertRaises(exception.HBSDVolumeIsBusy, self.driver.unmanage, self.test_volume_error3) cinder-8.0.0/cinder/tests/unit/api/0000775000567000056710000000000012701406543020264 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/test_common.py0000664000567000056710000005712712701406250023174 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suites for 'common' code used throughout the OpenStack HTTP API. """ import mock from testtools import matchers import webob import webob.exc from oslo_config import cfg from cinder.api import common from cinder import test NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" CONF = cfg.CONF class LimiterTest(test.TestCase): """Unit tests for the `cinder.api.common.limited` method. This method takes in a list of items and, depending on the 'offset' and 'limit' GET params, returns a subset or complete set of the given items. """ def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() self.tiny = list(range(1)) self.small = list(range(10)) self.medium = list(range(1000)) self.large = list(range(10000)) def test_limiter_offset_zero(self): """Test offset key works with 0.""" req = webob.Request.blank('/?offset=0') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_offset_medium(self): """Test offset key works with a medium sized number.""" req = webob.Request.blank('/?offset=10') self.assertEqual([], common.limited(self.tiny, req)) self.assertEqual(self.small[10:], common.limited(self.small, req)) self.assertEqual(self.medium[10:], common.limited(self.medium, req)) self.assertEqual(self.large[10:1010], common.limited(self.large, req)) def test_limiter_offset_over_max(self): """Test offset key works with a number over 1000 (max_limit).""" req = webob.Request.blank('/?offset=1001') self.assertEqual([], common.limited(self.tiny, req)) self.assertEqual([], common.limited(self.small, req)) self.assertEqual([], common.limited(self.medium, req)) self.assertEqual( self.large[1001:2001], common.limited(self.large, req)) def test_limiter_offset_blank(self): """Test offset key works with a blank offset.""" req = webob.Request.blank('/?offset=') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_out_of_range(self): """Test offset key works with a offset out of range.""" req = webob.Request.blank('/?offset=123456789012346456') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): """Test offset key works with a BAD offset.""" req = webob.Request.blank(u'/?offset=\u0020aa') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): """Test request with no offset or limit.""" req = webob.Request.blank('/') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_zero(self): """Test limit of zero.""" req = webob.Request.blank('/?limit=0') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_bad(self): """Test with a bad limit.""" req = webob.Request.blank(u'/?limit=hello') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_limit_medium(self): """Test limit of 10.""" req = webob.Request.blank('/?limit=10') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium[:10], common.limited(self.medium, req)) self.assertEqual(self.large[:10], common.limited(self.large, req)) def test_limiter_limit_over_max(self): """Test limit of 3000.""" req = webob.Request.blank('/?limit=3000') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_and_offset(self): """Test request with both limit and offset.""" items = list(range(2000)) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual(items[1:4], common.limited(items, req)) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual(items[3:1003], common.limited(items, req)) req = webob.Request.blank('/?offset=3&limit=1500') self.assertEqual(items[3:1003], common.limited(items, req)) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual([], common.limited(items, req)) req = webob.Request.blank('/?offset=30034522235674530&limit=10') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, items, req) def test_limiter_custom_max_limit(self): """Test a max_limit other than 1000.""" items = list(range(2000)) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual( items[1:4], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual( items[3:], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3&limit=2500') self.assertEqual( items[3:], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual([], common.limited(items, req, max_limit=2000)) def test_limiter_negative_limit(self): """Test a negative limit.""" req = webob.Request.blank('/?limit=-3000') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): """Test a negative offset.""" req = webob.Request.blank('/?offset=-30') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class PaginationParamsTest(test.TestCase): """Unit tests for `cinder.api.common.get_pagination_params` method. This method takes in a request object and returns 'marker' and 'limit' GET params. """ def test_nonnumerical_limit(self): """Test nonnumerical limit param.""" req = webob.Request.blank('/?limit=hello') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req.GET.copy()) @mock.patch.object(common, 'CONF') def test_no_params(self, mock_cfg): """Test no params.""" mock_cfg.osapi_max_limit = 100 req = webob.Request.blank('/') expected = (None, 100, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_valid_marker(self): """Test valid marker param.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?marker=' + marker) expected = (marker, CONF.osapi_max_limit, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_valid_limit(self): """Test valid limit param.""" req = webob.Request.blank('/?limit=10') expected = (None, 10, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_invalid_limit(self): """Test invalid limit param.""" req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req.GET.copy()) def test_valid_limit_and_marker(self): """Test valid limit and marker parameters.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) expected = (marker, 20, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) class SortParamUtilsTest(test.TestCase): def test_get_sort_params_defaults(self): """Verifies the default sort key and direction.""" sort_keys, sort_dirs = common.get_sort_params({}) self.assertEqual(['created_at'], sort_keys) self.assertEqual(['desc'], sort_dirs) def test_get_sort_params_override_defaults(self): """Verifies that the defaults can be overriden.""" sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1', default_dir='dir1') self.assertEqual(['key1'], sort_keys) self.assertEqual(['dir1'], sort_dirs) def test_get_sort_params_single_value_sort_param(self): """Verifies a single sort key and direction.""" params = {'sort': 'key1:dir1'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) self.assertEqual(['dir1'], sort_dirs) def test_get_sort_params_single_value_old_params(self): """Verifies a single sort key and direction.""" params = {'sort_key': 'key1', 'sort_dir': 'dir1'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) self.assertEqual(['dir1'], sort_dirs) def test_get_sort_params_single_with_default_sort_param(self): """Verifies a single sort value with a default direction.""" params = {'sort': 'key1'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) # Direction should be defaulted self.assertEqual(['desc'], sort_dirs) def test_get_sort_params_single_with_default_old_params(self): """Verifies a single sort value with a default direction.""" params = {'sort_key': 'key1'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) # Direction should be defaulted self.assertEqual(['desc'], sort_dirs) def test_get_sort_params_multiple_values(self): """Verifies multiple sort parameter values.""" params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs) def test_get_sort_params_multiple_not_all_dirs(self): """Verifies multiple sort keys without all directions.""" params = {'sort': 'key1:dir1,key2,key3:dir3'} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) # Second key is missing the direction, should be defaulted self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs) def test_get_sort_params_multiple_override_default_dir(self): """Verifies multiple sort keys and overriding default direction.""" params = {'sort': 'key1:dir1,key2,key3'} sort_keys, sort_dirs = common.get_sort_params(params, default_dir='foo') self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs) def test_get_sort_params_params_modified(self): """Verifies that the input sort parameter are modified.""" params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} common.get_sort_params(params) self.assertEqual({}, params) params = {'sort_key': 'key1', 'sort_dir': 'dir1'} common.get_sort_params(params) self.assertEqual({}, params) def test_get_sort_params_random_spaces(self): """Verifies that leading and trailing spaces are removed.""" params = {'sort': ' key1 : dir1,key2: dir2 , key3 '} sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs) def test_get_params_mix_sort_and_old_params(self): """An exception is raised if both types of sorting params are given.""" for params in ({'sort': 'k1', 'sort_key': 'k1'}, {'sort': 'k1', 'sort_dir': 'd1'}, {'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}): self.assertRaises(webob.exc.HTTPBadRequest, common.get_sort_params, params) class MiscFunctionsTest(test.TestCase): def test_remove_major_version_from_href(self): fixture = 'http://www.testsite.com/v1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_2(self): fixture = 'http://www.testsite.com/v1.1/' expected = 'http://www.testsite.com/' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_3(self): fixture = 'http://www.testsite.com/v10.10' expected = 'http://www.testsite.com' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_4(self): fixture = 'http://www.testsite.com/v1.1/images/v10.5' expected = 'http://www.testsite.com/images/v10.5' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_bad_request(self): fixture = 'http://www.testsite.com/1.1/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_2(self): fixture = 'http://www.testsite.com/v/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_3(self): fixture = 'http://www.testsite.com/v1.1images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) class TestCollectionLinks(test.TestCase): """Tests the _get_collection_links method.""" def _validate_next_link(self, item_count, osapi_max_limit, limit, should_link_exist): req = webob.Request.blank('/?limit=%s' % limit if limit else '/') link_return = [{"rel": "next", "href": "fake_link"}] self.flags(osapi_max_limit=osapi_max_limit) if limit is None: limited_list_size = min(item_count, osapi_max_limit) else: limited_list_size = min(item_count, osapi_max_limit, limit) limited_list = [{"uuid": str(i)} for i in range(limited_list_size)] builder = common.ViewBuilder() def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param), \ mock.patch.object(common.ViewBuilder, '_generate_next_link', return_value=link_return) as href_link_mock: results = builder._get_collection_links(req, limited_list, mock.sentinel.coll_key, item_count, "uuid") if should_link_exist: href_link_mock.assert_called_once_with(limited_list, "uuid", req, mock.sentinel.coll_key) self.assertThat(results, matchers.HasLength(1)) else: self.assertFalse(href_link_mock.called) self.assertThat(results, matchers.HasLength(0)) def test_items_equals_osapi_max_no_limit(self): item_count = 5 osapi_max_limit = 5 limit = None should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_equals_osapi_max_greater_than_limit(self): item_count = 5 osapi_max_limit = 5 limit = 4 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_equals_osapi_max_equals_limit(self): item_count = 5 osapi_max_limit = 5 limit = 5 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_equals_osapi_max_less_than_limit(self): item_count = 5 osapi_max_limit = 5 limit = 6 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_less_than_osapi_max_no_limit(self): item_count = 5 osapi_max_limit = 7 limit = None should_link_exist = False self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_limit_less_than_items_less_than_osapi_max(self): item_count = 5 osapi_max_limit = 7 limit = 4 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_limit_equals_items_less_than_osapi_max(self): item_count = 5 osapi_max_limit = 7 limit = 5 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_less_than_limit_less_than_osapi_max(self): item_count = 5 osapi_max_limit = 7 limit = 6 should_link_exist = False self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_less_than_osapi_max_equals_limit(self): item_count = 5 osapi_max_limit = 7 limit = 7 should_link_exist = False self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_less_than_osapi_max_less_than_limit(self): item_count = 5 osapi_max_limit = 7 limit = 8 should_link_exist = False self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_greater_than_osapi_max_no_limit(self): item_count = 5 osapi_max_limit = 3 limit = None should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_limit_less_than_items_greater_than_osapi_max(self): item_count = 5 osapi_max_limit = 3 limit = 2 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_greater_than_osapi_max_equals_limit(self): item_count = 5 osapi_max_limit = 3 limit = 3 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_greater_than_limit_greater_than_osapi_max(self): item_count = 5 osapi_max_limit = 3 limit = 4 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_items_equals_limit_greater_than_osapi_max(self): item_count = 5 osapi_max_limit = 3 limit = 5 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) def test_limit_greater_than_items_greater_than_osapi_max(self): item_count = 5 osapi_max_limit = 3 limit = 6 should_link_exist = True self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) class LinkPrefixTest(test.TestCase): def test_update_link_prefix(self): vb = common.ViewBuilder() result = vb._update_link_prefix("http://192.168.0.243:24/", "http://127.0.0.1/volume") self.assertEqual("http://127.0.0.1/volume", result) result = vb._update_link_prefix("http://foo.x.com/v1", "http://new.prefix.com") self.assertEqual("http://new.prefix.com/v1", result) result = vb._update_link_prefix( "http://foo.x.com/v1", "http://new.prefix.com:20455/new_extra_prefix") self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1", result) class RequestUrlTest(test.TestCase): def test_get_request_url_no_forward(self): app_url = 'http://127.0.0.1/v2;param?key=value#frag' request = type('', (), { 'application_url': app_url, 'headers': {} }) result = common.get_request_url(request) self.assertEqual(app_url, result) def test_get_request_url_forward(self): request = type('', (), { 'application_url': 'http://127.0.0.1/v2;param?key=value#frag', 'headers': {'X-Forwarded-Host': '192.168.0.243:24'} }) result = common.get_request_url(request) self.assertEqual('http://192.168.0.243:24/v2;param?key=value#frag', result) cinder-8.0.0/cinder/tests/unit/api/__init__.py0000664000567000056710000000000012701406250022356 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/common.py0000664000567000056710000000226412701406250022125 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def compare_links(actual, expected): """Compare xml atom links.""" return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) def compare_media_types(actual, expected): """Compare xml media types.""" return compare_tree_to_dict(actual, expected, ('base', 'type')) def compare_tree_to_dict(actual, expected, keys): """Compare parts of lxml.etree objects to dicts.""" for elem, data in zip(actual, expected): for key in keys: if elem.get(key) != data.get(key): return False return True cinder-8.0.0/cinder/tests/unit/api/openstack/0000775000567000056710000000000012701406543022253 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/openstack/test_api_version_request.py0000664000567000056710000001216112701406250027746 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import six from cinder.api.openstack import api_version_request from cinder import exception from cinder import test @ddt.ddt class APIVersionRequestTests(test.TestCase): def test_init(self): result = api_version_request.APIVersionRequest() self.assertIsNone(result._ver_major) self.assertIsNone(result._ver_minor) def test_min_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MIN_API_VERSION), api_version_request.min_api_version()) def test_max_api_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MAX_API_VERSION), api_version_request.max_api_version()) @ddt.data( ('1.1', 1, 1), ('2.10', 2, 10), ('5.234', 5, 234), ('12.5', 12, 5), ('2.0', 2, 0), ('2.200', 2, 200) ) @ddt.unpack def test_valid_version_strings(self, version_string, major, minor): request = api_version_request.APIVersionRequest(version_string) self.assertEqual(major, request._ver_major) self.assertEqual(minor, request._ver_minor) def test_null_version(self): v = api_version_request.APIVersionRequest() self.assertTrue(v.is_null()) @ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3', '5.03', '02.1', '2.001', '', ' 2.1', '2.1 ') def test_invalid_version_strings(self, version_string): self.assertRaises(exception.InvalidAPIVersionString, api_version_request.APIVersionRequest, version_string) def test_cmpkey(self): request = api_version_request.APIVersionRequest('1.2') self.assertEqual((1, 2), request._cmpkey()) def test_version_comparisons(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('5.23') v4 = api_version_request.APIVersionRequest('2.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v1 < v2) self.assertTrue(v1 <= v2) self.assertTrue(v3 > v2) self.assertTrue(v3 >= v2) self.assertTrue(v1 != v2) self.assertTrue(v1 == v4) self.assertTrue(v1 != v_null) self.assertTrue(v_null == v_null) self.assertFalse(v1 == '2.0') def test_version_matches(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('2.45') v4 = api_version_request.APIVersionRequest('3.3') v5 = api_version_request.APIVersionRequest('3.23') v6 = api_version_request.APIVersionRequest('2.0') v7 = api_version_request.APIVersionRequest('3.3') v8 = api_version_request.APIVersionRequest('4.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v2.matches(v1, v3)) self.assertTrue(v2.matches(v1, v_null)) self.assertTrue(v1.matches(v6, v2)) self.assertTrue(v4.matches(v2, v7)) self.assertTrue(v4.matches(v_null, v7)) self.assertTrue(v4.matches(v_null, v8)) self.assertFalse(v1.matches(v2, v3)) self.assertFalse(v5.matches(v2, v4)) self.assertFalse(v2.matches(v3, v1)) self.assertTrue(v1.matches(v_null, v_null)) self.assertRaises(ValueError, v_null.matches, v1, v3) def test_matches_versioned_method(self): request = api_version_request.APIVersionRequest('2.0') self.assertRaises(exception.InvalidParameterValue, request.matches_versioned_method, 'fake_method') def test_get_string(self): v1_string = '3.23' v1 = api_version_request.APIVersionRequest(v1_string) self.assertEqual(v1_string, v1.get_string()) self.assertRaises(ValueError, api_version_request.APIVersionRequest().get_string) @ddt.data(('1', '0'), ('1', '1')) @ddt.unpack def test_str(self, major, minor): request_input = '%s.%s' % (major, minor) request = api_version_request.APIVersionRequest(request_input) request_string = six.text_type(request) self.assertEqual('API Version Request ' 'Major: %s, Minor: %s' % (major, minor), request_string) cinder-8.0.0/cinder/tests/unit/api/openstack/test_versioned_method.py0000664000567000056710000000244612701406250027223 0ustar jenkinsjenkins00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from cinder.api.openstack import versioned_method from cinder import test class VersionedMethodTestCase(test.TestCase): def test_str(self): args = ('fake_name', 'fake_min', 'fake_max') method = versioned_method.VersionedMethod(*(args + (False, None))) method_string = six.text_type(method) self.assertEqual('Version Method %s: min: %s, max: %s' % args, method_string) def test_cmpkey(self): method = versioned_method.VersionedMethod( 'fake_name', 'fake_start_version', 'fake_end_version', False, 'fake_func') self.assertEqual('fake_start_version', method._cmpkey()) cinder-8.0.0/cinder/tests/unit/api/openstack/__init__.py0000664000567000056710000000000012701406250024345 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/openstack/test_wsgi.py0000664000567000056710000011212412701406250024631 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import webob from cinder.api.openstack import wsgi from cinder import exception from cinder import test from cinder.tests.unit.api import fakes class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = b"asdf
" self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): for content_type in ('application/xml', 'application/vnd.openstack.volume+xml', 'application/json', 'application/vnd.openstack.volume+json'): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/xml", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') result = request.best_match_content_type() self.assertEqual("application/xml", result) request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/xml", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_best_match_language(self): # Test that we are actually invoking language negotiation by webob request = wsgi.Request.blank('/') accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} def fake_best_match(self, offers, default_match=None): # Match would return None, if requested lang is not found return None self.stubs.SmartSet(request.accept_language, 'best_match', fake_best_match) self.assertIsNone(request.best_match_language()) # If accept-language is not included or empty, match should be None request.headers = {'Accept-Language': ''} self.assertIsNone(request.best_match_language()) request.headers.pop('Accept-Language') self.assertIsNone(request.best_match_language()) def test_cache_and_retrieve_resources(self): request = wsgi.Request.blank('/foo') # Test that trying to retrieve a cached object on # an empty cache fails gracefully self.assertIsNone(request.cached_resource()) self.assertIsNone(request.cached_resource_by_id('r-0')) resources = [] for x in range(3): resources.append({'id': 'r-%s' % x}) # Cache an empty list of resources using the default name request.cache_resource([]) self.assertEqual({}, request.cached_resource()) self.assertIsNone(request.cached_resource('r-0')) # Cache some resources request.cache_resource(resources[:2]) # Cache one resource request.cache_resource(resources[2]) # Cache a different resource name other_resource = {'id': 'o-0'} request.cache_resource(other_resource, name='other-resource') self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) self.assertIsNone(request.cached_resource_by_id('r-3')) self.assertEqual({'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]}, request.cached_resource()) self.assertEqual(other_resource, request.cached_resource_by_id('o-0', name='other-resource')) def test_cache_and_retrieve_volumes(self): self._test_cache_and_retrieve_resources('volume') def test_cache_and_retrieve_volume_types(self): self._test_cache_and_retrieve_resources('volume_type') def test_cache_and_retrieve_snapshots(self): self._test_cache_and_retrieve_resources('snapshot') def test_cache_and_retrieve_backups(self): self._test_cache_and_retrieve_resources('backup') def _test_cache_and_retrieve_resources(self, resource_name): """Generic helper for cache tests.""" cache_all_func = 'cache_db_%ss' % resource_name cache_one_func = 'cache_db_%s' % resource_name get_db_all_func = 'get_db_%ss' % resource_name get_db_one_func = 'get_db_%s' % resource_name r = wsgi.Request.blank('/foo') resources = [] for x in range(3): resources.append({'id': 'id%s' % x}) # Store 2 getattr(r, cache_all_func)(resources[:2]) # Store 1 getattr(r, cache_one_func)(resources[2]) self.assertEqual(resources[0], getattr(r, get_db_one_func)('id0')) self.assertEqual(resources[1], getattr(r, get_db_one_func)('id1')) self.assertEqual(resources[2], getattr(r, get_db_one_func)('id2')) self.assertIsNone(getattr(r, get_db_one_func)('id3')) self.assertEqual({'id0': resources[0], 'id1': resources[1], 'id2': resources[2]}, getattr(r, get_db_all_func)()) class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual('pants', serializer.dispatch({}, action='create')) def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action='update')) class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'update')) class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = b'(2,3)' serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_xml, result) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = b'{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class XMLDeserializerTest(test.TestCase): def test_xml(self): xml = """ 123 1 1 """.strip() as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } metadata = {'plurals': {'bs': 'b', 'ts': 't'}} deserializer = wsgi.XMLDeserializer(metadata=metadata) self.assertEqual(as_dict, deserializer.deserialize(xml)) def test_xml_empty(self): xml = """""" as_dict = {"body": {"a": {}}} deserializer = wsgi.XMLDeserializer() self.assertEqual(as_dict, deserializer.deserialize(xml)) class MetadataXMLDeserializerTest(test.TestCase): def test_xml_meta_parsing_special_character(self): """Test XML meta parsing with special characters. Test that when a SaxParser splits a string containing special characters into multiple childNodes there are no issues extracting the text. """ meta_xml_str = """ value&3 value2 value1 """.strip() meta_expected = {'key1': 'value1', 'key2': 'value2', 'key3': 'value&3'} meta_deserializer = wsgi.MetadataXMLDeserializer() document = wsgi.utils.safe_minidom_parse_string(meta_xml_str) root_node = document.childNodes[0] meta_extracted = meta_deserializer.extract_metadata(root_node) self.assertEqual(meta_expected, meta_extracted) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(b'off', response.body) self.assertEqual(200, response.status_int) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(403, response.status_int) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_xml(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method( None, 'action', 'application/xml', 'true') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_method_action_method(self): class Controller(object): def action(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method(None, 'action', 'application/xml', 'true' % volume_id) if isinstance(body, six.text_type): body = body.encode('utf-8') req = webob.Request.blank('/v2/fake/backups') req.body = body req.method = 'POST' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) dom = minidom.parseString(res.body) backup = dom.getElementsByTagName('backup') self.assertTrue(backup.item(0).hasAttribute('id')) self.assertTrue(_mock_service_get_all_by_topic.called) self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_backup_with_invalid_snapshot(self): volume_id = utils.create_volume(self.context, size=5, status='available')['id'] snapshot_id = utils.create_snapshot(self.context, volume_id, status='error')['id'] body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "snapshot_id": snapshot_id, "volume_id": volume_id, } } self.addCleanup(db.volume_destroy, self.context.elevated(), volume_id) self.addCleanup(db.snapshot_destroy, self.context.elevated(), snapshot_id) req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) def test_create_backup_with_non_existent_snapshot(self): volume_id = utils.create_volume(self.context, size=5, status='restoring')['id'] body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "snapshot_id": fake_constants.snapshot_id, "volume_id": volume_id, } } self.addCleanup(db.volume_destroy, self.context.elevated(), volume_id) req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertIsNotNone(res_dict['itemNotFound']['message']) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') @ddt.data(False, True) def test_create_backup_delta(self, backup_from_snapshot, mock_validate, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5)['id'] snapshot = None snapshot_id = None if backup_from_snapshot: snapshot = utils.create_snapshot(self.context, volume_id, status='available') snapshot_id = snapshot.id backup_id = self._create_backup(volume_id, status=fields.BackupStatus.AVAILABLE) body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", "incremental": True, "snapshot_id": snapshot_id, } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['backup']) self.assertTrue(_mock_service_get_all_by_topic.called) self.assertTrue(mock_validate.called) db.backup_destroy(context.get_admin_context(), backup_id) if snapshot: snapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_create_incremental_backup_invalid_status( self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5)['id'] backup_id = self._create_backup(volume_id) body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", "incremental": True, } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: The parent backup must be ' 'available for incremental backup.', res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_backup_with_no_body(self): # omit body from the request req = webob.Request.blank('/v2/fake/backups') req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'backup' in request body.", res_dict['badRequest']['message']) def test_create_backup_with_body_KeyError(self): # omit volume_id from body body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Incorrect request body format', res_dict['badRequest']['message']) def test_create_backup_with_VolumeNotFound(self): body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": 9999, "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Volume 9999 could not be found.', res_dict['itemNotFound']['message']) def test_create_backup_with_InvalidVolume(self): # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5, status='restoring')['id'] body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) @mock.patch('cinder.db.service_get_all_by_topic') def test_create_backup_WithOUT_enabled_backup_service( self, _mock_service_get_all_by_topic): # need an enabled backup service available _mock_service_get_all_by_topic.return_value = [] volume_id = utils.create_volume(self.context, size=2)['id'] req = webob.Request.blank('/v2/fake/backups') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(500, res.status_int) self.assertEqual(500, res_dict['computeFault']['code']) self.assertEqual('Service cinder-backup could not be found.', res_dict['computeFault']['message']) volume = self.volume_api.get(context.get_admin_context(), volume_id) self.assertEqual('available', volume['status']) @mock.patch('cinder.db.service_get_all_by_topic') def test_create_incremental_backup_invalid_no_full( self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5, status='available')['id'] body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", "incremental": True, } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: No backups available to do ' 'an incremental backup.', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic): testhost = 'test_host' alt_host = 'strange_host' empty_service = [] # service host not match with volume's host host_not_match = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}] # service az not match with volume's az az_not_match = [{'availability_zone': 'strange_az', 'host': testhost, 'disabled': 0, 'updated_at': timeutils.utcnow()}] # service disabled disabled_service = [] # dead service that last reported at 20th century dead_service = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}] # first service's host not match but second one works. multi_services = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'fake_az', 'host': testhost, 'disabled': 0, 'updated_at': timeutils.utcnow()}] # Setup mock to run through the following service cases _mock_service_get_all_by_topic.side_effect = [empty_service, host_not_match, az_not_match, disabled_service, dead_service, multi_services] volume_id = utils.create_volume(self.context, size=2, host=testhost)['id'] volume = self.volume_api.get(context.get_admin_context(), volume_id) # test empty service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) # test host not match service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) # test az not match service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) # test disabled service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) # test dead service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) # test multi services and the last service matches self.assertTrue(self.backup_api._is_backup_service_enabled( volume['availability_zone'], testhost)) @mock.patch('cinder.db.service_get_all_by_topic') def test_get_available_backup_service(self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'az2', 'host': 'testhost2', 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'az2', 'host': 'testhost3', 'disabled': 0, 'updated_at': timeutils.utcnow()}, ] actual_host = self.backup_api._get_available_backup_service_host( None, 'az1') self.assertEqual('testhost1', actual_host) actual_host = self.backup_api._get_available_backup_service_host( 'testhost2', 'az2') self.assertIn(actual_host, ['testhost2', 'testhost3']) actual_host = self.backup_api._get_available_backup_service_host( 'testhost4', 'az1') self.assertEqual('testhost1', actual_host) @mock.patch('cinder.db.service_get_all_by_topic') def test_get_available_backup_service_with_same_host( self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': 'az2', 'host': 'testhost2', 'disabled': 0, 'updated_at': timeutils.utcnow()}, ] self.override_config('backup_use_same_host', True) actual_host = self.backup_api._get_available_backup_service_host( None, 'az1') self.assertEqual('testhost1', actual_host) actual_host = self.backup_api._get_available_backup_service_host( 'testhost2', 'az2') self.assertEqual('testhost2', actual_host) self.assertRaises(exception.ServiceNotFound, self.backup_api._get_available_backup_service_host, 'testhost4', 'az1') @mock.patch('cinder.db.service_get_all_by_topic') def test_delete_backup_available( self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, self._get_backup_attrib(backup_id, 'status')) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_delete_delta_backup(self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) delta_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, incremental=True) req = webob.Request.blank('/v2/fake/backups/%s' % delta_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, self._get_backup_attrib(delta_id, 'status')) db.backup_destroy(context.get_admin_context(), delta_id) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_delete_backup_error(self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(status=fields.BackupStatus.ERROR) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, self._get_backup_attrib(backup_id, 'status')) db.backup_destroy(context.get_admin_context(), backup_id) def test_delete_backup_with_backup_NotFound(self): req = webob.Request.blank('/v2/fake/backups/9999') req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Backup 9999 could not be found.', res_dict['itemNotFound']['message']) def test_delete_backup_with_InvalidBackup(self): backup_id = self._create_backup() req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be ' 'available or error', res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_delete_backup_with_InvalidBackup2(self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] volume_id = utils.create_volume(self.context, size=5)['id'] backup_id = self._create_backup(volume_id, status=fields.BackupStatus.AVAILABLE) delta_backup_id = self._create_backup( status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=backup_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Incremental backups ' 'exist for this backup.', res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), delta_backup_id) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.db.service_get_all_by_topic') def test_delete_backup_service_down(self, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': '1775-04-19 05:00:00'}] backup_id = self._create_backup(status='available') req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_volume_id_specified_json( self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume_name = 'test1' volume_id = utils.create_volume(self.context, size=5, display_name = volume_name)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) self.assertEqual(volume_id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_volume_id_specified_xml( self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' volume_name = 'test1' backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) volume_id = utils.create_volume(self.context, size=2, display_name=volume_name)['id'] body = '' % volume_id if isinstance(body, six.text_type): body = body.encode('utf-8') req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.body = body req.method = 'POST' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) dom = minidom.parseString(res.body) restore = dom.getElementsByTagName('restore') self.assertEqual(backup_id, restore.item(0).getAttribute('backup_id')) self.assertEqual(volume_id, restore.item(0).getAttribute('volume_id')) db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id) def test_restore_backup_with_no_body(self): # omit body from the request backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'restore' in request body.", res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) def test_restore_backup_with_body_KeyError(self): # omit restore from body backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) body = {"": {}} req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'restore' in request body.", res_dict['badRequest']['message']) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_volume_id_unspecified( self, _mock_volume_api_create, _mock_service_get_all_by_topic): # intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): volume_id = utils.create_volume(self.context, size=size)['id'] return db.volume_get(context, volume_id) _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] _mock_volume_api_create.side_effect = fake_volume_api_create backup_id = self._create_backup(size=5, status=fields.BackupStatus.AVAILABLE) body = {"restore": {}} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_name_specified(self, _mock_volume_api_create, _mock_service_get_all_by_topic): # Intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): volume_id = utils.create_volume(self.context, size=size, display_name=name)['id'] return db.volume_get(context, volume_id) _mock_volume_api_create.side_effect = fake_volume_api_create _mock_service_get_all_by_topic.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] backup_id = self._create_backup(size=5, status=fields.BackupStatus.AVAILABLE) body = {"restore": {'name': 'vol-01'}} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) description = 'auto-created_from_restore_from_backup' # Assert that we have indeed passed on the name parameter _mock_volume_api_create.assert_called_once_with( mock.ANY, 5, body['restore']['name'], description) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_name_volume_id_specified( self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' backup_id = self._create_backup(size=5, status=fields.BackupStatus.AVAILABLE) orig_vol_name = "vol-00" volume_id = utils.create_volume(self.context, size=5, display_name=orig_vol_name)['id'] body = {"restore": {'name': 'vol-01', 'volume_id': volume_id}} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) self.assertEqual(volume_id, res_dict['restore']['volume_id']) restored_vol = db.volume_get(self.context, res_dict['restore']['volume_id']) # Ensure that the original volume name wasn't overridden self.assertEqual(orig_vol_name, restored_vol['display_name']) @mock.patch('cinder.backup.API.restore') def test_restore_backup_with_InvalidInput(self, _mock_volume_api_restore): msg = _("Invalid input") _mock_volume_api_restore.side_effect = \ exception.InvalidInput(reason=msg) backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=0)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid input received: Invalid input', res_dict['badRequest']['message']) def test_restore_backup_with_InvalidVolume(self): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5, status='attaching')['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: Volume to be restored to must ' 'be available', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id) def test_restore_backup_with_InvalidBackup(self): backup_id = self._create_backup(status=fields.BackupStatus.RESTORING) # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be available', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id) def test_restore_backup_with_BackupNotFound(self): # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/9999/restore') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Backup 9999 could not be found.', res_dict['itemNotFound']['message']) db.volume_destroy(context.get_admin_context(), volume_id) def test_restore_backup_with_VolumeNotFound(self): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) body = {"restore": {"volume_id": "9999", }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Volume 9999 could not be found.', res_dict['itemNotFound']['message']) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.API.restore') def test_restore_backup_with_VolumeSizeExceedsAvailableQuota( self, _mock_backup_restore): _mock_backup_restore.side_effect = \ exception.VolumeSizeExceedsAvailableQuota(requested='2', consumed='2', quota='3') backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual('Requested volume or snapshot exceeds allowed ' 'gigabytes quota. Requested 2G, quota is 3G and ' '2G has been consumed.', res_dict['overLimit']['message']) @mock.patch('cinder.backup.API.restore') def test_restore_backup_with_VolumeLimitExceeded(self, _mock_backup_restore): _mock_backup_restore.side_effect = \ exception.VolumeLimitExceeded(allowed=1) backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual("Maximum number of volumes allowed (1) exceeded for" " quota 'volumes'.", res_dict['overLimit']['message']) def test_restore_backup_to_undersized_volume(self): backup_size = 10 backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=backup_size) # need to create the volume referenced below first volume_size = 5 volume_id = utils.create_volume(self.context, size=volume_size)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: volume size %d is too ' 'small to restore backup of size %d.' % (volume_size, backup_size), res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_to_oversized_volume(self, _mock_get_backup_host): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=10) _mock_get_backup_host.return_value = 'testhost' # need to create the volume referenced below first volume_name = 'test1' volume_id = utils.create_volume(self.context, size=15, display_name = volume_name)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) self.assertEqual(volume_id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_with_different_host(self, _mock_get_backup_host, mock_restore_backup): volume_name = 'test1' backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=10, host='HostA') volume_id = utils.create_volume(self.context, size=10, host='HostB@BackendB#PoolB', display_name=volume_name)['id'] _mock_get_backup_host.return_value = 'testhost' body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(backup_id, res_dict['restore']['backup_id']) self.assertEqual(volume_id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) mock_restore_backup.assert_called_once_with(mock.ANY, u'testhost', mock.ANY, volume_id) # Manually check if restore_backup was called with appropriate backup. self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id) def test_export_record_as_non_admin(self): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=10) req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) # request is not authorized self.assertEqual(403, res.status_int) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') def test_export_backup_record_id_specified_json(self, _mock_export_record_rpc, _mock_get_backup_host): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=10) ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_export_record_rpc.return_value = \ {'backup_service': backup_service, 'backup_url': backup_url} _mock_get_backup_host.return_value = 'testhost' req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(200, res.status_int) self.assertEqual(backup_service, res_dict['backup-record']['backup_service']) self.assertEqual(backup_url, res_dict['backup-record']['backup_url']) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') def test_export_record_backup_id_specified_xml(self, _mock_export_record_rpc, _mock_get_backup_host): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, size=10) ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_export_record_rpc.return_value = \ {'backup_service': backup_service, 'backup_url': backup_url} _mock_get_backup_host.return_value = 'testhost' req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) export = dom.getElementsByTagName('backup-record') self.assertEqual(backup_service, export.item(0).getAttribute('backup_service')) self.assertEqual(backup_url, export.item(0).getAttribute('backup_url')) # db.backup_destroy(context.get_admin_context(), backup_id) def test_export_record_with_bad_backup_id(self): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_id = 'bad_id' req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Backup %s could not be found.' % backup_id, res_dict['itemNotFound']['message']) def test_export_record_for_unavailable_backup(self): backup_id = self._create_backup(status=fields.BackupStatus.RESTORING) ctx = context.RequestContext('admin', 'fake', is_admin=True) req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be available ' 'and not restoring.', res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') def test_export_record_with_unavailable_service(self, _mock_export_record_rpc, _mock_get_backup_host): msg = 'fake unavailable service' _mock_export_record_rpc.side_effect = \ exception.InvalidBackup(reason=msg) _mock_get_backup_host.return_value = 'testhost' backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) ctx = context.RequestContext('admin', 'fake', is_admin=True) req = webob.Request.blank('/v2/fake/backups/%s/export_record' % backup_id) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: %s' % msg, res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) def test_import_record_as_non_admin(self): backup_service = 'fake' backup_url = 'fake' req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) # request is not authorized self.assertEqual(403, res.status_int) @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_specified_json(self, _mock_import_record_rpc, _mock_list_services): utils.replace_obj_loader(self, objects.Backup) project_id = 'fake' backup_service = 'fake' ctx = context.RequestContext('admin', project_id, is_admin=True) backup = objects.Backup(ctx, id='id', user_id='user_id', project_id=project_id, status=fields.BackupStatus.AVAILABLE) backup_url = backup.encode_record() _mock_import_record_rpc.return_value = None _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(201, res.status_int) self.assertIn('id', res_dict['backup']) self.assertEqual('id', res_dict['backup']['id']) # Verify that entry in DB is as expected db_backup = objects.Backup.get_by_id(ctx, 'id') self.assertEqual(ctx.project_id, db_backup.project_id) self.assertEqual(ctx.user_id, db_backup.user_id) self.assertEqual('0000-0000-0000-0000', db_backup.volume_id) self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_exists_deleted(self, _mock_import_record_rpc, _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) utils.replace_obj_loader(self, objects.Backup) # Original backup belonged to a different user_id and project_id backup = objects.Backup(ctx, id='id', user_id='original_user_id', project_id='original_project_id', status=fields.BackupStatus.AVAILABLE) backup_url = backup.encode_record() # Deleted DB entry has project_id and user_id set to fake backup_id = self._create_backup('id', status=fields.BackupStatus.DELETED) backup_service = 'fake' _mock_import_record_rpc.return_value = None _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(201, res.status_int) self.assertIn('id', res_dict['backup']) self.assertEqual('id', res_dict['backup']['id']) # Verify that entry in DB is as expected, with new project and user_id db_backup = objects.Backup.get_by_id(ctx, 'id') self.assertEqual(ctx.project_id, db_backup.project_id) self.assertEqual(ctx.user_id, db_backup.user_id) self.assertEqual('0000-0000-0000-0000', db_backup.volume_id) self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_specified_xml(self, _mock_import_record_rpc, _mock_list_services): utils.replace_obj_loader(self, objects.Backup) project_id = 'fake' backup_service = 'fake' ctx = context.RequestContext('admin', project_id, is_admin=True) backup = objects.Backup(ctx, id='id', user_id='user_id', project_id=project_id, status=fields.BackupStatus.AVAILABLE) backup_url = backup.encode_record() _mock_import_record_rpc.return_value = None _mock_list_services.return_value = [backup_service] if six.PY2: backup_url = backup_url.encode('utf-8') body = ('' % {'backup_url': backup_url, 'backup_service': backup_service}) if isinstance(body, six.text_type): body = body.encode('utf-8') req = webob.Request.blank('/v2/fake/backups/import_record') req.body = body req.method = 'POST' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) # verify that request is successful self.assertEqual(201, res.status_int) # Verify that entry in DB is as expected db_backup = objects.Backup.get_by_id(ctx, 'id') self.assertEqual(ctx.project_id, db_backup.project_id) self.assertEqual(ctx.user_id, db_backup.user_id) self.assertEqual('0000-0000-0000-0000', db_backup.volume_id) self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) # Verify the response dom = minidom.parseString(res.body) back = dom.getElementsByTagName('backup') self.assertEqual(backup.id, back.item(0).attributes['id'].value) @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_record_with_no_backup_services(self, _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_list_services.return_value = [] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(500, res.status_int) self.assertEqual(500, res_dict['computeFault']['code']) self.assertEqual('Service %s could not be found.' % backup_service, res_dict['computeFault']['message']) @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_backup_with_wrong_backup_url(self, _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_list_services.return_value = ['no-match1', 'no-match2'] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Invalid input received: Can't parse backup record.", res_dict['badRequest']['message']) @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_backup_with_existing_backup_record(self, _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_id = self._create_backup('1') backup_service = 'fake' backup = objects.Backup.get_by_id(ctx, backup_id) backup_url = backup.encode_record() _mock_list_services.return_value = ['no-match1', 'no-match2'] req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup already exists in database.', res_dict['badRequest']['message']) db.backup_destroy(context.get_admin_context(), backup_id) @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_backup_with_missing_backup_services(self, _mock_import_record, _mock_list_services): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_id = self._create_backup('1', status=fields.BackupStatus.DELETED) backup_service = 'fake' backup = objects.Backup.get_by_id(ctx, backup_id) backup_url = backup.encode_record() _mock_list_services.return_value = ['no-match1', 'no-match2'] _mock_import_record.side_effect = \ exception.ServiceNotFound(service_id='fake') req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(500, res.status_int) self.assertEqual(500, res_dict['computeFault']['code']) self.assertEqual('Service %s could not be found.' % backup_service, res_dict['computeFault']['message']) db.backup_destroy(context.get_admin_context(), backup_id) def test_import_record_with_missing_body_elements(self): ctx = context.RequestContext('admin', 'fake', is_admin=True) backup_service = 'fake' backup_url = 'fake' # test with no backup_service req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Incorrect request body format.', res_dict['badRequest']['message']) # test with no backup_url req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Incorrect request body format.', res_dict['badRequest']['message']) # test with no backup_url and backup_url req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Incorrect request body format.', res_dict['badRequest']['message']) def test_import_record_with_no_body(self): ctx = context.RequestContext('admin', 'fake', is_admin=True) req = webob.Request.blank('/v2/fake/backups/import_record') req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'backup-record' in " "request body.", res_dict['badRequest']['message']) @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', return_value=False) def test_force_delete_with_not_supported_operation(self, mock_check_support): backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) backup = self.backup_api.get(self.context, backup_id) self.assertRaises(exception.NotSupportedOperation, self.backup_api.delete, self.context, backup, True) @ddt.data(False, True) def test_show_incremental_backup(self, backup_from_snapshot): volume_id = utils.create_volume(self.context, size=5)['id'] parent_backup_id = self._create_backup( volume_id, status=fields.BackupStatus.AVAILABLE, num_dependent_backups=1) backup_id = self._create_backup(volume_id, status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=parent_backup_id, num_dependent_backups=1) snapshot = None snapshot_id = None if backup_from_snapshot: snapshot = utils.create_snapshot(self.context, volume_id) snapshot_id = snapshot.id child_backup_id = self._create_backup( volume_id, status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=backup_id, snapshot_id=snapshot_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v2/fake/backups/%s' % parent_backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertFalse(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v2/fake/backups/%s' % child_backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['has_dependent_backups']) self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) db.backup_destroy(context.get_admin_context(), child_backup_id) db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), parent_backup_id) if snapshot: snapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_type_encryption.py0000664000567000056710000006331012701406250030155 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import db from cinder import test from cinder.tests.unit.api import fakes from cinder import utils def return_volume_type_encryption(context, volume_type_id): return stub_volume_type_encryption() def stub_volume_type_encryption(): values = { 'cipher': 'fake_cipher', 'control_location': 'front-end', 'key_size': 256, 'provider': 'fake_provider', 'volume_type_id': 'fake_type_id', } return values class VolumeTypeEncryptionTest(test.TestCase): _default_volume_type = { 'id': 'fake_type_id', 'name': 'fake_type', } def setUp(self): super(VolumeTypeEncryptionTest, self).setUp() self.flags(host='fake') self.api_path = '/v2/fake/os-volume-types/1/encryption' """to reset notifier drivers left over from other api/contrib tests""" def _get_response(self, volume_type, admin=True, url='/v2/fake/types/%s/encryption', req_method='GET', req_body=None, req_headers=None): ctxt = context.RequestContext('fake', 'fake', is_admin=admin) req = webob.Request.blank(url % volume_type['id']) req.method = req_method req.body = req_body if req_headers: req.headers['Content-Type'] = req_headers return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) def _create_type_and_encryption(self, volume_type, body=None): if body is None: body = {"encryption": stub_volume_type_encryption()} db.volume_type_create(context.get_admin_context(), volume_type) return self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') def test_index(self): self.stubs.Set(db, 'volume_type_encryption_get', return_volume_type_encryption) volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type) self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) expected = stub_volume_type_encryption() self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_index_invalid_type(self): volume_type = self._default_volume_type res = self._get_response(volume_type) self.assertEqual(404, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'itemNotFound': { 'code': 404, 'message': ('Volume type %s could not be found.' % volume_type['id']) } } self.assertEqual(expected, res_dict) def test_show_key_size(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v2/fake/types/%s/encryption/key_size') res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_code) self.assertEqual(256, res_dict['key_size']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_show_provider(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v2/fake/types/%s/encryption/provider') res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_code) self.assertEqual('fake_provider', res_dict['provider']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_show_item_not_found(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v2/fake/types/%s/encryption/fake') res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_code) expected = { 'itemNotFound': { 'code': 404, 'message': ('The resource could not be found.') } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _create(self, cipher, control_location, key_size, provider): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) body = {"encryption": {'cipher': cipher, 'control_location': control_location, 'key_size': key_size, 'provider': provider, 'volume_type_id': volume_type['id']}} self.assertEqual(0, len(self.notifier.notifications)) res = self._get_response(volume_type) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_code) # Confirm that volume type has no encryption information # before create. self.assertEqual(b'{}', res.body) # Create encryption specs for the volume type # with the defined body. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) self.assertEqual(1, len(self.notifier.notifications)) # check response self.assertIn('encryption', res_dict) self.assertEqual(cipher, res_dict['encryption']['cipher']) self.assertEqual(control_location, res_dict['encryption']['control_location']) self.assertEqual(key_size, res_dict['encryption']['key_size']) self.assertEqual(provider, res_dict['encryption']['provider']) self.assertEqual(volume_type['id'], res_dict['encryption']['volume_type_id']) # check database encryption = db.volume_type_encryption_get(context.get_admin_context(), volume_type['id']) self.assertIsNotNone(encryption) self.assertEqual(cipher, encryption['cipher']) self.assertEqual(key_size, encryption['key_size']) self.assertEqual(provider, encryption['provider']) self.assertEqual(volume_type['id'], encryption['volume_type_id']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_json(self): with mock.patch.object(utils, 'validate_integer') as mock_validate_integer: mock_validate_integer.return_value = 128 self._create('fake_cipher', 'front-end', 128, 'fake_encryptor') self.assertTrue(mock_validate_integer.called) def test_create_xml(self): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) ctxt = context.RequestContext('fake', 'fake', is_admin=True) req = webob.Request.blank('/v2/fake/types/%s/encryption' % volume_type['id']) req.method = 'POST' req.body = (b'') req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) self.assertEqual(200, res.status_int) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_invalid_volume_type(self): volume_type = self._default_volume_type body = {"encryption": stub_volume_type_encryption()} # Attempt to create encryption without first creating type res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) self.assertEqual(0, len(self.notifier.notifications)) self.assertEqual(404, res.status_code) expected = { 'itemNotFound': { 'code': 404, 'message': ('Volume type %s could not be found.' % volume_type['id']) } } self.assertEqual(expected, res_dict) def test_create_encryption_type_exists(self): volume_type = self._default_volume_type body = {"encryption": stub_volume_type_encryption()} self._create_type_and_encryption(volume_type, body) # Try to create encryption specs for a volume type # that already has them. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': ('Volume type encryption for type ' 'fake_type_id already exists.') } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_volume_exists(self): # Create the volume type and a volume with the volume type. volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) db.volume_create(context.get_admin_context(), {'id': 'fake_id', 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider', 'volume_type_id': volume_type['id']}} # Try to create encryption specs for a volume type # with a volume. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': ('Cannot create encryption specs. ' 'Volume type in use.') } } self.assertEqual(expected, res_dict) db.volume_destroy(context.get_admin_context(), 'fake_id') db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _encryption_create_bad_body(self, body, msg='Create body is not valid.'): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': (msg) } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_no_body(self): msg = "Missing required element 'encryption' in request body." self._encryption_create_bad_body(body=None, msg=msg) def test_create_malformed_entity(self): body = {'encryption': 'string'} msg = "Missing required element 'encryption' in request body." self._encryption_create_bad_body(body=body, msg=msg) def test_create_negative_key_size(self): body = {"encryption": {'cipher': 'cipher', 'key_size': -128, 'provider': 'fake_provider', 'volume_type_id': 'volume_type'}} msg = 'key_size must be >= 0' self._encryption_create_bad_body(body=body, msg=msg) def test_create_none_key_size(self): self._create('fake_cipher', 'front-end', None, 'fake_encryptor') def test_create_invalid_control_location(self): body = {"encryption": {'cipher': 'cipher', 'control_location': 'fake_control', 'provider': 'fake_provider', 'volume_type_id': 'volume_type'}} msg = ("Invalid input received: Valid control location are: " "['front-end', 'back-end']") self._encryption_create_bad_body(body=body, msg=msg) def test_create_no_provider(self): body = {"encryption": {'cipher': 'cipher', 'volume_type_id': 'volume_type'}} msg = ("Invalid input received: provider must be defined") self._encryption_create_bad_body(body=body, msg=msg) def test_delete(self): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) # Test that before create, there's nothing with a get res = self._get_response(volume_type) self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider', 'volume_type_id': volume_type['id']}} # Create, and test that get returns something res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v2/fake/types/%s/encryption') self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Delete, and test that get returns nothing res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v2/fake/types/%s/encryption/provider') self.assertEqual(202, res.status_code) self.assertEqual(0, len(res.body)) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v2/fake/types/%s/encryption') self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_delete_with_volume_in_use(self): # Create the volume type volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider', 'volume_type_id': volume_type['id']}} # Create encryption with volume type, and test with GET res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v2/fake/types/%s/encryption') self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Create volumes with the volume type db.volume_create(context.get_admin_context(), {'id': 'fake_id', 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) db.volume_create(context.get_admin_context(), {'id': 'fake_id2', 'display_description': 'Test Desc2', 'size': 2, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) # Delete, and test that there is an error since volumes exist res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v2/fake/types/%s/encryption/provider') self.assertEqual(400, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': 'Cannot delete encryption specs. ' 'Volume type in use.' } } self.assertEqual(expected, res_dict) # Delete the volumes db.volume_destroy(context.get_admin_context(), 'fake_id') db.volume_destroy(context.get_admin_context(), 'fake_id2') # Delete, and test that get returns nothing res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v2/fake/types/%s/encryption/provider') self.assertEqual(202, res.status_code) self.assertEqual(0, len(res.body)) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v2/fake/types/%s/encryption') self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_delete_with_no_encryption(self): volume_type = self._default_volume_type # create a volume type db.volume_type_create(context.get_admin_context(), volume_type) # without creating encryption type, try to delete # and check if 404 is raised. res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v2/fake/types/%s/encryption/provider') self.assertEqual(404, res.status_code) expected = { "itemNotFound": { "message": "Volume type encryption for type " "fake_type_id does not exist.", "code": 404 } } self.assertEqual(expected, jsonutils.loads(res.body)) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) @mock.patch('cinder.utils.validate_integer') def test_update_item(self, mock_validate_integer): mock_validate_integer.return_value = 512 volume_type = self._default_volume_type # Create Encryption Specs create_body = {"encryption": {'cipher': 'cipher', 'control_location': 'front-end', 'key_size': 128, 'provider': 'fake_provider', 'volume_type_id': volume_type['id']}} self._create_type_and_encryption(volume_type, create_body) # Update Encryption Specs update_body = {"encryption": {'key_size': 512, 'provider': 'fake_provider2'}} res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v2/fake/types/%s/encryption/fake_type_id') res_dict = jsonutils.loads(res.body) self.assertEqual(512, res_dict['encryption']['key_size']) self.assertEqual('fake_provider2', res_dict['encryption']['provider']) # Get Encryption Specs res = self._get_response(volume_type) res_dict = jsonutils.loads(res.body) # Confirm Encryption Specs self.assertEqual(512, res_dict['key_size']) self.assertEqual('fake_provider2', res_dict['provider']) self.assertTrue(mock_validate_integer.called) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _encryption_update_bad_body(self, update_body, msg): # Create Volume Type and Encryption volume_type = self._default_volume_type res = self._create_type_and_encryption(volume_type) # Update Encryption res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v2/fake/types/%s/encryption/fake_type_id') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': (msg) } } # Confirm Failure self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_update_too_many_items(self): update_body = {"encryption": {'key_size': 512}, "encryption2": {'key_size': 256}} msg = 'Request body contains too many items.' self._encryption_update_bad_body(update_body, msg) def test_update_key_size_non_integer(self): update_body = {"encryption": {'key_size': 'abc'}} msg = 'key_size must be an integer.' self._encryption_update_bad_body(update_body, msg) def test_update_item_invalid_body(self): update_body = {"key_size": "value1"} msg = "Missing required element 'encryption' in request body." self._encryption_update_bad_body(update_body, msg) def _encryption_empty_update(self, update_body): msg = "Missing required element 'encryption' in request body." self._encryption_update_bad_body(update_body, msg) def test_update_no_body(self): self._encryption_empty_update(update_body=None) def test_update_empty_body(self): self._encryption_empty_update(update_body={}) def test_update_with_volume_in_use(self): # Create the volume type and encryption volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) # Create a volume with the volume type db.volume_create(context.get_admin_context(), {'id': 'fake_id', 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) # Get the Encryption res = self._get_response(volume_type) self.assertEqual(200, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Update, and test that there is an error since volumes exist update_body = {"encryption": {'key_size': 512}} res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v2/fake/types/%s/encryption/fake_type_id') self.assertEqual(400, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': 'Cannot update encryption specs. ' 'Volume type in use.' } } self.assertEqual(expected, res_dict) cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_transfer.py0000664000567000056710000006440212701406250026551 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for volume transfer code. """ import mock from xml.dom import minidom from oslo_serialization import jsonutils import six import webob from cinder.api.contrib import volume_transfer from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes import cinder.transfer class VolumeTransferAPITestCase(test.TestCase): """Test Case for transfers API.""" def setUp(self): super(VolumeTransferAPITestCase, self).setUp() self.volume_transfer_api = cinder.transfer.API() self.controller = volume_transfer.VolumeTransferController() def _create_transfer(self, volume_id=1, display_name='test_transfer'): """Create a transfer object.""" return self.volume_transfer_api.create(context.get_admin_context(), volume_id, display_name) @staticmethod def _create_volume(display_name='test_volume', display_description='this is a test volume', status='available', size=1, project_id='fake'): """Create a volume object.""" vol = {} vol['host'] = 'fake_host' vol['size'] = size vol['user_id'] = 'fake' vol['project_id'] = project_id vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = status vol['availability_zone'] = 'fake_zone' return db.volume_create(context.get_admin_context(), vol)['id'] def test_show_transfer(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % transfer['id']) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('test_transfer', res_dict['transfer']['name']) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_show_transfer_xml_content_type(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % transfer['id']) req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) transfer_xml = dom.getElementsByTagName('transfer') name = transfer_xml.item(0).getAttribute('name') self.assertEqual('test_transfer', name.strip()) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_show_transfer_with_transfer_NotFound(self): req = webob.Request.blank('/v2/fake/os-volume-transfer/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Transfer 1234 could not be found.', res_dict['itemNotFound']['message']) def test_list_transfers_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(4, len(res_dict['transfers'][0])) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(4, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_1) db.volume_destroy(context.get_admin_context(), volume_id_2) def test_list_transfers_xml(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) transfer_list = dom.getElementsByTagName('transfer') self.assertEqual(3, transfer_list.item(0).attributes.length) self.assertEqual(transfer1['id'], transfer_list.item(0).getAttribute('id')) self.assertEqual(3, transfer_list.item(1).attributes.length) self.assertEqual(transfer2['id'], transfer_list.item(1).getAttribute('id')) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1) def test_list_transfers_detail_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer/detail') req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(5, len(res_dict['transfers'][0])) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) self.assertEqual(5, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1) def test_list_transfers_detail_xml(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer/detail') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) transfer_detail = dom.getElementsByTagName('transfer') self.assertEqual(4, transfer_detail.item(0).attributes.length) self.assertEqual( 'test_transfer', transfer_detail.item(0).getAttribute('name')) self.assertEqual( transfer1['id'], transfer_detail.item(0).getAttribute('id')) self.assertEqual(volume_id_1, transfer_detail.item(0).getAttribute('volume_id')) self.assertEqual(4, transfer_detail.item(1).attributes.length) self.assertEqual( 'test_transfer', transfer_detail.item(1).getAttribute('name')) self.assertEqual( transfer2['id'], transfer_detail.item(1).getAttribute('id')) self.assertEqual( volume_id_2, transfer_detail.item(1).getAttribute('volume_id')) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1) def test_list_transfers_with_all_tenants(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5, project_id='fake1') transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = fakes.HTTPRequest.blank('/v2/fake/os-volume-transfer?' 'all_tenants=1', use_admin_context=True) res_dict = self.controller.index(req) expected = [(transfer1['id'], 'test_transfer'), (transfer2['id'], 'test_transfer')] ret = [] for item in res_dict['transfers']: ret.append((item['id'], item['name'])) self.assertEqual(set(expected), set(ret)) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_1) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create_transfer_json(self, mock_validate): volume_id = self._create_volume(status='available', size=5) body = {"transfer": {"name": "transfer1", "volume_id": volume_id}} req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['transfer']) self.assertIn('auth_key', res_dict['transfer']) self.assertIn('created_at', res_dict['transfer']) self.assertIn('name', res_dict['transfer']) self.assertIn('volume_id', res_dict['transfer']) self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create_transfer_xml(self, mock_validate): volume_size = 2 volume_id = self._create_volume(status='available', size=volume_size) body = '' % volume_id if isinstance(body, six.text_type): body = body.encode('utf-8') req = webob.Request.blank('/v2/fake/os-volume-transfer') req.body = body req.method = 'POST' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) dom = minidom.parseString(res.body) transfer = dom.getElementsByTagName('transfer') self.assertTrue(transfer.item(0).hasAttribute('id')) self.assertTrue(transfer.item(0).hasAttribute('auth_key')) self.assertTrue(transfer.item(0).hasAttribute('created_at')) self.assertEqual('transfer-001', transfer.item(0).getAttribute('name')) self.assertTrue(transfer.item(0).hasAttribute('volume_id')) self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_transfer_with_no_body(self): req = webob.Request.blank('/v2/fake/os-volume-transfer') req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'transfer' in " "request body.", res_dict['badRequest']['message']) def test_create_transfer_with_body_KeyError(self): body = {"transfer": {"name": "transfer1"}} req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Incorrect request body format', res_dict['badRequest']['message']) def test_create_transfer_with_VolumeNotFound(self): body = {"transfer": {"name": "transfer1", "volume_id": 1234}} req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Volume 1234 could not be found.', res_dict['itemNotFound']['message']) def test_create_transfer_with_InvalidVolume(self): volume_id = self._create_volume(status='attached') body = {"transfer": {"name": "transfer1", "volume_id": volume_id}} req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: status must be available', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) def test_delete_transfer_awaiting_transfer(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % transfer['id']) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) # verify transfer has been deleted req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % transfer['id']) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % transfer['id'], res_dict['itemNotFound']['message']) self.assertEqual(db.volume_get(context.get_admin_context(), volume_id)['status'], 'available') db.volume_destroy(context.get_admin_context(), volume_id) def test_delete_transfer_with_transfer_NotFound(self): req = webob.Request.blank('/v2/fake/os-volume-transfer/9999') req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Transfer 9999 could not be found.', res_dict['itemNotFound']['message']) def test_accept_transfer_volume_id_specified_json(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) svc = self.start_service('volume', host='fake_host') body = {"accept": {"id": transfer['id'], "auth_key": transfer['auth_key']}} req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) # cleanup svc.stop() def test_accept_transfer_volume_id_specified_xml(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) svc = self.start_service('volume', host='fake_host') body = '' % transfer['auth_key'] if isinstance(body, six.text_type): body = body.encode('utf-8') req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.body = body req.method = 'POST' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) dom = minidom.parseString(res.body) accept = dom.getElementsByTagName('transfer') self.assertEqual(transfer['id'], accept.item(0).getAttribute('id')) self.assertEqual(volume_id, accept.item(0).getAttribute('volume_id')) db.volume_destroy(context.get_admin_context(), volume_id) # cleanup svc.stop() def test_accept_transfer_with_no_body(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'accept' in request body.", res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_body_KeyError(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) body = {"": {}} req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'accept' in request body.", res_dict['badRequest']['message']) def test_accept_transfer_invalid_id_auth_key(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"id": transfer['id'], "auth_key": 1}} req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual(res_dict['badRequest']['message'], 'Invalid auth key: Attempt to transfer %s with ' 'invalid auth key.' % transfer['id']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_invalid_transfer(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"id": transfer['id'], "auth_key": 1}} req = webob.Request.blank('/v2/fake/os-volume-transfer/1/accept') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('TransferNotFound: Transfer 1 could not be found.', res_dict['itemNotFound']['message']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_VolumeSizeExceedsAvailableQuota(self): def fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota( cls, context, transfer, volume_id): raise exception.VolumeSizeExceedsAvailableQuota(requested='2', consumed='2', quota='3') self.stubs.Set( cinder.transfer.API, 'accept', fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota) volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"id": transfer['id'], "auth_key": transfer['auth_key']}} req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual('Requested volume or snapshot exceeds allowed ' 'gigabytes quota. Requested 2G, quota is 3G and ' '2G has been consumed.', res_dict['overLimit']['message']) def test_accept_transfer_with_VolumeLimitExceeded(self): def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls, context, transfer, volume_id): raise exception.VolumeLimitExceeded(allowed=1) self.stubs.Set(cinder.transfer.API, 'accept', fake_transfer_api_accept_throwing_VolumeLimitExceeded) volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"id": transfer['id'], "auth_key": transfer['auth_key']}} req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % transfer['id']) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual("VolumeLimitExceeded: Maximum number of volumes " "allowed (1) exceeded for quota 'volumes'.", res_dict['overLimit']['message']) cinder-8.0.0/cinder/tests/unit/api/contrib/test_quotas_classes.py0000664000567000056710000001500012701406250026355 0ustar jenkinsjenkins00000000000000# Copyright 2013 Huawei Technologies Co., Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for cinder.api.contrib.quota_classes.py """ import mock from lxml import etree import webob.exc from cinder.api.contrib import quota_classes from cinder import context from cinder import quota from cinder import test from cinder.volume import volume_types QUOTAS = quota.QUOTAS def make_body(root=True, gigabytes=1000, snapshots=10, volumes=10, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1, volume_types_faked=None, tenant_id='foo'): resources = {'gigabytes': gigabytes, 'snapshots': snapshots, 'volumes': volumes, 'backups': backups, 'per_volume_gigabytes': per_volume_gigabytes, 'backup_gigabytes': backup_gigabytes} if not volume_types_faked: volume_types_faked = {'fake_type': None} for volume_type in volume_types_faked: resources['gigabytes_' + volume_type] = -1 resources['snapshots_' + volume_type] = -1 resources['volumes_' + volume_type] = -1 if tenant_id: resources['id'] = tenant_id if root: result = {'quota_class_set': resources} else: result = resources return result def make_response_body(root=True, ctxt=None, quota_class='foo', request_body=None, tenant_id='foo'): resources = {} if not ctxt: ctxt = context.get_admin_context() resources.update(QUOTAS.get_class_quotas(ctxt, quota_class)) if not request_body and not request_body['quota_class_set']: resources.update(request_body['quota_class_set']) if tenant_id: resources['id'] = tenant_id if root: result = {'quota_class_set': resources} else: result = resources return result class QuotaClassSetsControllerTest(test.TestCase): def setUp(self): super(QuotaClassSetsControllerTest, self).setUp() self.controller = quota_classes.QuotaClassSetsController() self.ctxt = context.get_admin_context() self.req = mock.Mock() self.req.environ = {'cinder.context': self.ctxt} self.req.environ['cinder.context'].is_admin = True def test_show(self): volume_types.create(self.ctxt, 'fake_type') result = self.controller.show(self.req, 'foo') self.assertDictMatch(make_body(), result) def test_show_not_authorized(self): self.req.environ['cinder.context'].is_admin = False self.req.environ['cinder.context'].user_id = 'bad_user' self.req.environ['cinder.context'].project_id = 'bad_project' self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, self.req, 'foo') def test_update(self): volume_types.create(self.ctxt, 'fake_type') body = make_body(gigabytes=2000, snapshots=15, volumes=5, tenant_id=None) result = self.controller.update(self.req, 'foo', body) self.assertDictMatch(body, result) @mock.patch('cinder.api.openstack.wsgi.Controller.validate_string_length') @mock.patch('cinder.utils.validate_integer') def test_update_limit(self, mock_validate_integer, mock_validate): mock_validate_integer.return_value = 5 volume_types.create(self.ctxt, 'fake_type') body = make_body(volumes=5) result = self.controller.update(self.req, 'foo', body) self.assertEqual(5, result['quota_class_set']['volumes']) self.assertTrue(mock_validate.called) self.assertTrue(mock_validate_integer.called) def test_update_wrong_key(self): volume_types.create(self.ctxt, 'fake_type') body = {'quota_class_set': {'bad': 'bad'}} result = self.controller.update(self.req, 'foo', body) self.assertDictMatch(make_body(tenant_id=None), result) def test_update_invalid_key_value(self): body = {'quota_class_set': {'gigabytes': "should_be_int"}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'foo', body) def test_update_bad_quota_limit(self): body = {'quota_class_set': {'gigabytes': -1000}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'foo', body) def test_update_no_admin(self): self.req.environ['cinder.context'].is_admin = False self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, self.req, 'foo', make_body(tenant_id=None)) def test_update_with_more_volume_types(self): volume_types.create(self.ctxt, 'fake_type_1') volume_types.create(self.ctxt, 'fake_type_2') body = {'quota_class_set': {'gigabytes_fake_type_1': 1111, 'volumes_fake_type_2': 2222}} result = self.controller.update(self.req, 'foo', body) self.assertDictMatch(make_response_body(ctxt=self.ctxt, quota_class='foo', request_body=body, tenant_id=None), result) class QuotaClassesSerializerTest(test.TestCase): def setUp(self): super(QuotaClassesSerializerTest, self).setUp() self.req = mock.Mock() self.req.environ = {'cinder.context': context.get_admin_context()} def test_update_serializer(self): serializer = quota_classes.QuotaClassTemplate() quota_class_set = make_body(root=False) text = serializer.serialize({'quota_class_set': quota_class_set}) tree = etree.fromstring(text) self.assertEqual('quota_class_set', tree.tag) self.assertEqual(tree.get('id'), quota_class_set['id']) body = make_body(root=False, tenant_id=None) for node in tree: self.assertIn(node.tag, body) self.assertEqual(str(body[node.tag]), node.text) cinder-8.0.0/cinder/tests/unit/api/contrib/test_scheduler_stats.py0000664000567000056710000000750212701406250026530 0ustar jenkinsjenkins00000000000000# Copyright 2013 eBay Inc. # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder.api.contrib import scheduler_stats from cinder import context from cinder import test from cinder.tests.unit.api import fakes def schedule_rpcapi_get_pools(self, context, filters=None): all_pools = [] pool1 = dict(name='pool1', capabilities=dict( total_capacity=1024, free_capacity=100, volume_backend_name='pool1', reserved_percentage=0, driver_version='1.0.0', storage_protocol='iSCSI', QoS_support='False', updated=None)) all_pools.append(pool1) pool2 = dict(name='pool2', capabilities=dict( total_capacity=512, free_capacity=200, volume_backend_name='pool2', reserved_percentage=0, driver_version='1.0.1', storage_protocol='iSER', QoS_support='True', updated=None)) all_pools.append(pool2) return all_pools @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', schedule_rpcapi_get_pools) class SchedulerStatsAPITest(test.TestCase): def setUp(self): super(SchedulerStatsAPITest, self).setUp() self.flags(host='fake') self.controller = scheduler_stats.SchedulerStatsController() self.ctxt = context.RequestContext('admin', 'fake', True) def test_get_pools_summery(self): req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats') req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) self.assertEqual(2, len(res['pools'])) expected = { 'pools': [ { 'name': 'pool1', }, { 'name': 'pool2', } ] } self.assertDictMatch(expected, res) def test_get_pools_detail(self): req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats?detail=True') req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) self.assertEqual(2, len(res['pools'])) expected = { 'pools': [ { 'name': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'volume_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'QoS_support': 'False', } }, { 'name': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'volume_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'QoS_support': 'True', } } ] } self.assertDictMatch(expected, res) cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_manage.py0000664000567000056710000002140112701406250026145 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_volume def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper def db_service_get_by_host_and_topic(context, host, topic): """Replacement for db.service_get_by_host_and_topic. We stub the db.service_get_by_host_and_topic method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {} raise exception.ServiceNotFound(service_id=host) # Some of the tests check that volume types are correctly validated during a # volume manage operation. This data structure represents an existing volume # type. fake_vt = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'name': 'good_fakevt'} def vt_get_volume_type_by_name(context, name): """Replacement for cinder.volume.volume_types.get_volume_type_by_name. Overrides cinder.volume.volume_types.get_volume_type_by_name to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if name == fake_vt['name']: return fake_vt raise exception.VolumeTypeNotFoundByName(volume_type_name=name) def vt_get_volume_type(context, vt_id): """Replacement for cinder.volume.volume_types.get_volume_type. Overrides cinder.volume.volume_types.get_volume_type to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if vt_id == fake_vt['id']: return fake_vt raise exception.VolumeTypeNotFound(volume_type_id=vt_id) def api_manage(*args, **kwargs): """Replacement for cinder.volume.api.API.manage_existing. Overrides cinder.volume.api.API.manage_existing to return some fake volume data structure, rather than initiating a real volume managing. Note that we don't try to replicate any passed-in information (e.g. name, volume type) in the returned structure. """ ctx = context.RequestContext('admin', 'fake', True) vol = { 'status': 'creating', 'display_name': 'fake_name', 'availability_zone': 'nova', 'tenant_id': 'fake', 'id': 'ffffffff-0000-ffff-0000-ffffffffffff', 'volume_type': None, 'snapshot_id': None, 'user_id': 'fake', 'size': 0, 'attach_status': 'detached', 'volume_type_id': None} return fake_volume.fake_volume_obj(ctx, **vol) @mock.patch('cinder.db.service_get_by_host_and_topic', db_service_get_by_host_and_topic) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', vt_get_volume_type_by_name) @mock.patch('cinder.volume.volume_types.get_volume_type', vt_get_volume_type) class VolumeManageTest(test.TestCase): """Test cases for cinder/api/contrib/volume_manage.py The API extension adds a POST /os-volume-manage API that is passed a cinder host name, and a driver-specific reference parameter. If everything is passed correctly, then the cinder.volume.api.API.manage_existing method is invoked to manage an existing storage object on the host. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.manage_existing with the correct arguments. """ def setUp(self): super(VolumeManageTest, self).setUp() def _get_resp(self, body): """Helper to execute an os-volume-manage API call.""" req = webob.Request.blank('/v2/fake/os-volume-manage') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = context.RequestContext('admin', 'fake', True) req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_manage_volume_ok(self, mock_validate, mock_api_manage): """Test successful manage volume execution. Tests for correct operation when valid arguments are passed in the request body. We ensure that cinder.volume.api.API.manage_existing got called with the correct arguments, and that we return the correct HTTP code to the caller. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp(body) self.assertEqual(202, res.status_int, res) # Check that the manage API was called with the correct arguments. self.assertEqual(1, mock_api_manage.call_count) args = mock_api_manage.call_args[0] self.assertEqual(args[1], body['volume']['host']) self.assertEqual(args[2], body['volume']['ref']) self.assertTrue(mock_validate.called) def test_manage_volume_missing_host(self): """Test correct failure when host is not specified.""" body = {'volume': {'ref': 'fake_ref'}} res = self._get_resp(body) self.assertEqual(400, res.status_int) def test_manage_volume_missing_ref(self): """Test correct failure when the ref is not specified.""" body = {'volume': {'host': 'host_ok'}} res = self._get_resp(body) self.assertEqual(400, res.status_int) pass @mock.patch('cinder.volume.api.API.manage_existing', api_manage) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_manage_volume_volume_type_by_uuid(self, mock_validate): """Tests for correct operation when a volume type is specified by ID. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}} res = self._get_resp(body) self.assertEqual(202, res.status_int, res) self.assertTrue(mock_validate.called) pass @mock.patch('cinder.volume.api.API.manage_existing', api_manage) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_manage_volume_volume_type_by_name(self, mock_validate): """Tests for correct operation when a volume type is specified by name. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'good_fakevt'}} res = self._get_resp(body) self.assertEqual(202, res.status_int, res) self.assertTrue(mock_validate.called) pass def test_manage_volume_bad_volume_type_by_uuid(self): """Test failure on nonexistent volume type specified by ID.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}} res = self._get_resp(body) self.assertEqual(404, res.status_int, res) pass def test_manage_volume_bad_volume_type_by_name(self): """Test failure on nonexistent volume type specified by name.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'bad_fakevt'}} res = self._get_resp(body) self.assertEqual(404, res.status_int, res) pass cinder-8.0.0/cinder/tests/unit/api/contrib/test_types_extra_specs.py0000664000567000056710000002641112701406250027100 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock import webob from cinder.api.contrib import types_extra_specs from cinder import exception from cinder import test from cinder.tests.unit.api import fakes import cinder.wsgi def return_create_volume_type_extra_specs(context, volume_type_id, extra_specs): return stub_volume_type_extra_specs() def return_volume_type_extra_specs(context, volume_type_id): return stub_volume_type_extra_specs() def return_empty_volume_type_extra_specs(context, volume_type_id): return {} def delete_volume_type_extra_specs(context, volume_type_id, key): pass def delete_volume_type_extra_specs_not_found(context, volume_type_id, key): raise exception.VolumeTypeExtraSpecsNotFound("Not Found") def stub_volume_type_extra_specs(): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return specs def volume_type_get(context, id, inactive=False, expected_fields=None): pass class VolumeTypesExtraSpecsTest(test.TestCase): def setUp(self): super(VolumeTypesExtraSpecsTest, self).setUp() self.flags(host='fake') self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get) self.api_path = '/v2/fake/os-volume-types/1/extra_specs' self.controller = types_extra_specs.VolumeTypeExtraSpecsController() """to reset notifier drivers left over from other api/contrib tests""" def test_index(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.index(req, 1) self.assertEqual('value1', res_dict['extra_specs']['key1']) def test_index_no_data(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', return_empty_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.index(req, 1) self.assertEqual(0, len(res_dict['extra_specs'])) def test_show(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key5') res_dict = self.controller.show(req, 1, 'key5') self.assertEqual('value5', res_dict['key5']) def test_show_spec_not_found(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', return_empty_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key6') def test_delete(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', delete_volume_type_extra_specs) self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path + '/key5') self.controller.delete(req, 1, 'key5') self.assertEqual(1, len(self.notifier.notifications)) def test_delete_not_found(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', delete_volume_type_extra_specs_not_found) req = fakes.HTTPRequest.blank(self.api_path + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, 'key6') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create(self, mock_validate): self.stubs.Set(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"extra_specs": {"key1": "value1"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) self.assertEqual('value1', res_dict['extra_specs']['key1']) @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create_key_allowed_chars( self, mock_validate, volume_type_extra_specs_update_or_create): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} volume_type_extra_specs_update_or_create.\ return_value = mock_return_value body = {"extra_specs": {"other_alphanum.-_:": "value1"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) self.assertEqual('value1', res_dict['extra_specs']['other_alphanum.-_:']) @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create_too_many_keys_allowed_chars( self, mock_validate, volume_type_extra_specs_update_or_create): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} volume_type_extra_specs_update_or_create.\ return_value = mock_return_value body = {"extra_specs": {"other_alphanum.-_:": "value1", "other2_alphanum.-_:": "value2", "other3_alphanum.-_:": "value3"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) self.assertEqual('value1', res_dict['extra_specs']['other_alphanum.-_:']) self.assertEqual('value2', res_dict['extra_specs']['other2_alphanum.-_:']) self.assertEqual('value3', res_dict['extra_specs']['other3_alphanum.-_:']) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_update_item(self, mock_validate): self.stubs.Set(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path + '/key1') res_dict = self.controller.update(req, 1, 'key1', body) self.assertEqual(1, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) self.assertEqual('value1', res_dict['key1']) def test_update_item_too_many_keys(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1", "key2": "value2"} req = fakes.HTTPRequest.blank(self.api_path + '/key1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/bad') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) def _extra_specs_empty_update(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) def test_update_no_body(self): self._extra_specs_empty_update(body=None) def test_update_empty_body(self): self._extra_specs_empty_update(body={}) def _extra_specs_create_bad_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, '1', body) def test_create_no_body(self): self._extra_specs_create_bad_body(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._extra_specs_create_bad_body(body=body) def test_create_malformed_entity(self): body = {'extra_specs': 'string'} self._extra_specs_create_bad_body(body=body) def test_create_invalid_key(self): body = {"extra_specs": {"ke/y1": "value1"}} self._extra_specs_create_bad_body(body=body) def test_create_invalid_too_many_key(self): body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"} self._extra_specs_create_bad_body(body=body) class VolumeTypeExtraSpecsSerializerTest(test.TestCase): def test_index_create_serializer(self): serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate() # Just getting some input data extra_specs = stub_volume_type_extra_specs() text = serializer.serialize(dict(extra_specs=extra_specs)) tree = etree.fromstring(text) self.assertEqual('extra_specs', tree.tag) self.assertEqual(len(extra_specs), len(tree)) seen = set(extra_specs.keys()) for child in tree: self.assertIn(child.tag, seen) self.assertEqual(extra_specs[child.tag], child.text) seen.remove(child.tag) self.assertEqual(0, len(seen)) def test_update_show_serializer(self): serializer = types_extra_specs.VolumeTypeExtraSpecTemplate() exemplar = dict(key1='value1') text = serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('key1', tree.tag) self.assertEqual('value1', tree.text) self.assertEqual(0, len(tree)) cinder-8.0.0/cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py0000664000567000056710000001022312701406250031313 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock from oslo_serialization import jsonutils import webob from cinder.api.contrib import extended_snapshot_attributes from cinder import context from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume UUID1 = '00000000-0000-0000-0000-000000000001' UUID2 = '00000000-0000-0000-0000-000000000002' def _get_default_snapshot_param(): return {'id': UUID1, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake', 'progress': '0%', 'expected_attrs': ['metadata']} def fake_snapshot_get(self, context, snapshot_id): param = _get_default_snapshot_param() return param def fake_snapshot_get_all(self, context, search_opts=None): param = _get_default_snapshot_param() return [param] class ExtendedSnapshotAttributesTest(test.TestCase): content_type = 'application/json' prefix = 'os-extended-snapshot-attributes:' def setUp(self): super(ExtendedSnapshotAttributesTest, self).setUp() def _make_request(self, url): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_snapshot(self, body): return jsonutils.loads(body).get('snapshot') def _get_snapshots(self, body): return jsonutils.loads(body).get('snapshots') def assertSnapshotAttributes(self, snapshot, project_id, progress): self.assertEqual(project_id, snapshot.get('%sproject_id' % self.prefix)) self.assertEqual(progress, snapshot.get('%sprogress' % self.prefix)) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): ctx = context.RequestContext('fake', 'fake', auth_token=True) snapshot = _get_default_snapshot_param() snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj url = '/v2/fake/snapshots/%s' % UUID1 res = self._make_request(url) self.assertEqual(200, res.status_int) self.assertSnapshotAttributes(self._get_snapshot(res.body), project_id='fake', progress='0%') def test_detail(self): url = '/v2/fake/snapshots/detail' res = self._make_request(url) self.assertEqual(200, res.status_int) for snapshot in self._get_snapshots(res.body): self.assertSnapshotAttributes(snapshot, project_id='fake', progress='0%') class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest): content_type = 'application/xml' ext = extended_snapshot_attributes prefix = '{%s}' % ext.Extended_snapshot_attributes.namespace def _get_snapshot(self, body): return etree.XML(body) def _get_snapshots(self, body): return etree.XML(body).getchildren() cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_actions.py0000664000567000056710000012767012701406257026403 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import uuid import mock import oslo_messaging as messaging from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_actions from cinder import context from cinder import exception from cinder.image import glance from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder import volume from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi class VolumeActionsTest(test.TestCase): _actions = ('os-reserve', 'os-unreserve') _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') def setUp(self): super(VolumeActionsTest, self).setUp() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.UUID = uuid.uuid4() self.controller = volume_actions.VolumeActionsController() self.api_patchers = {} for _meth in self._methods: self.api_patchers[_meth] = mock.patch('cinder.volume.api.API.' + _meth) self.api_patchers[_meth].start() self.addCleanup(self.api_patchers[_meth].stop) self.api_patchers[_meth].return_value = True db_vol = {'id': 'fake', 'host': 'fake', 'status': 'available', 'size': 1, 'migration_status': None, 'volume_type_id': 'fake', 'project_id': 'project_id'} vol = fake_volume.fake_volume_obj(self.context, **db_vol) self.get_patcher = mock.patch('cinder.volume.api.API.get') self.mock_volume_get = self.get_patcher.start() self.addCleanup(self.get_patcher.stop) self.mock_volume_get.return_value = vol self.update_patcher = mock.patch('cinder.volume.api.API.update') self.mock_volume_update = self.update_patcher.start() self.addCleanup(self.update_patcher.stop) self.mock_volume_update.return_value = vol self.db_get_patcher = mock.patch('cinder.db.sqlalchemy.api.volume_get') self.mock_volume_db_get = self.db_get_patcher.start() self.addCleanup(self.db_get_patcher.stop) self.mock_volume_db_get.return_value = vol self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') def test_simple_api_actions(self): app = fakes.wsgi_app() for _action in self._actions: req = webob.Request.blank('/v2/fake/volumes/%s/action' % self.UUID) req.method = 'POST' req.body = jsonutils.dump_as_bytes({_action: None}) req.content_type = 'application/json' res = req.get_response(app) self.assertEqual(202, res.status_int) def test_initialize_connection(self): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.return_value = {} body = {'os-initialize_connection': {'connector': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) def test_initialize_connection_without_connector(self): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.return_value = {} body = {'os-initialize_connection': {}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_initialize_connection_exception(self): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.side_effect = \ exception.VolumeBackendAPIException(data=None) body = {'os-initialize_connection': {'connector': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(500, res.status_int) def test_terminate_connection(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.return_value = {} body = {'os-terminate_connection': {'connector': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_terminate_connection_without_connector(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.return_value = {} body = {'os-terminate_connection': {}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_terminate_connection_with_exception(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.side_effect = \ exception.VolumeBackendAPIException(data=None) body = {'os-terminate_connection': {'connector': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(500, res.status_int) def test_attach_to_instance(self): body = {'os-attach': {'instance_uuid': 'fake', 'mountpoint': '/dev/vdc', 'mode': 'rw'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) body = {'os-attach': {'instance_uuid': 'fake', 'host_name': 'fake_host', 'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_attach_to_host(self): # using 'read-write' mode attach volume by default body = {'os-attach': {'host_name': 'fake_host', 'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_volume_attach_to_instance_raises_remote_error(self): volume_remote_error = \ messaging.RemoteError(exc_type='InvalidUUID') with mock.patch.object(volume_api.API, 'attach', side_effect=volume_remote_error): id = 1 vol = {"instance_uuid": self.UUID, "mountpoint": "/dev/vdc", "mode": "rw"} body = {"os-attach": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._attach, req, id, body) def test_volume_attach_to_instance_raises_db_error(self): # In case of DB error 500 error code is returned to user volume_remote_error = \ messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'attach', side_effect=volume_remote_error): id = 1 vol = {"instance_uuid": self.UUID, "mountpoint": "/dev/vdc", "mode": "rw"} body = {"os-attach": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(messaging.RemoteError, self.controller._attach, req, id, body) def test_detach(self): body = {'os-detach': {'attachment_id': 'fakeuuid'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_volume_detach_raises_remote_error(self): volume_remote_error = \ messaging.RemoteError(exc_type='VolumeAttachmentNotFound') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): id = 1 vol = {"attachment_id": self.UUID} body = {"os-detach": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._detach, req, id, body) def test_volume_detach_raises_db_error(self): # In case of DB error 500 error code is returned to user volume_remote_error = \ messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): id = 1 vol = {"attachment_id": self.UUID} body = {"os-detach": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(messaging.RemoteError, self.controller._detach, req, id, body) def test_attach_with_invalid_arguments(self): # Invalid request to attach volume an invalid target body = {'os-attach': {'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) # Invalid request to attach volume with an invalid mode body = {'os-attach': {'instance_uuid': 'fake', 'mountpoint': '/dev/vdc', 'mode': 'rr'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) body = {'os-attach': {'host_name': 'fake_host', 'mountpoint': '/dev/vdc', 'mode': 'ww'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_begin_detaching(self): def fake_begin_detaching(*args, **kwargs): return {} self.stubs.Set(volume.api.API, 'begin_detaching', fake_begin_detaching) body = {'os-begin_detaching': {'fake': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_roll_detaching(self): def fake_roll_detaching(*args, **kwargs): return {} self.stubs.Set(volume.api.API, 'roll_detaching', fake_roll_detaching) body = {'os-roll_detaching': {'fake': 'fake'}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_extend_volume(self): def fake_extend_volume(*args, **kwargs): return {} self.stubs.Set(volume.api.API, 'extend', fake_extend_volume) body = {'os-extend': {'new_size': 5}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) def test_extend_volume_invalid_status(self): def fake_extend_volume(*args, **kwargs): msg = "Volume status must be available" raise exception.InvalidVolume(reason=msg) self.stubs.Set(volume.api.API, 'extend', fake_extend_volume) body = {'os-extend': {'new_size': 5}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_update_readonly_flag(self): def fake_update_readonly_flag(*args, **kwargs): return {} self.stubs.Set(volume.api.API, 'update_readonly_flag', fake_update_readonly_flag) def make_update_readonly_flag_test(self, readonly, return_code): body = {"os-update_readonly_flag": {"readonly": readonly}} if readonly is None: body = {"os-update_readonly_flag": {}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(return_code, res.status_int) make_update_readonly_flag_test(self, True, 202) make_update_readonly_flag_test(self, False, 202) make_update_readonly_flag_test(self, '1', 202) make_update_readonly_flag_test(self, '0', 202) make_update_readonly_flag_test(self, 'true', 202) make_update_readonly_flag_test(self, 'false', 202) make_update_readonly_flag_test(self, 'tt', 400) make_update_readonly_flag_test(self, 11, 400) make_update_readonly_flag_test(self, None, 400) def test_set_bootable(self): def make_set_bootable_test(self, bootable, return_code): body = {"os-set_bootable": {"bootable": bootable}} if bootable is None: body = {"os-set_bootable": {}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(return_code, res.status_int) make_set_bootable_test(self, True, 200) make_set_bootable_test(self, False, 200) make_set_bootable_test(self, '1', 200) make_set_bootable_test(self, '0', 200) make_set_bootable_test(self, 'true', 200) make_set_bootable_test(self, 'false', 200) make_set_bootable_test(self, 'tt', 400) make_set_bootable_test(self, 11, 400) make_set_bootable_test(self, None, 400) class VolumeRetypeActionsTest(VolumeActionsTest): def setUp(self): def get_vol_type(*args, **kwargs): d1 = {'id': 'fake', 'qos_specs_id': 'fakeqid1', 'extra_specs': {}} d2 = {'id': 'foo', 'qos_specs_id': 'fakeqid2', 'extra_specs': {}} return d1 if d1['id'] == args[1] else d2 self.retype_patchers = {} self.retype_mocks = {} paths = ['cinder.volume.volume_types.get_volume_type', 'cinder.volume.volume_types.get_volume_type_by_name', 'cinder.volume.qos_specs.get_qos_specs', 'cinder.quota.QUOTAS.add_volume_type_opts', 'cinder.quota.QUOTAS.reserve'] for path in paths: name = path.split('.')[-1] self.retype_patchers[name] = mock.patch(path) self.retype_mocks[name] = self.retype_patchers[name].start() self.addCleanup(self.retype_patchers[name].stop) self.retype_mocks['get_volume_type'].side_effect = get_vol_type self.retype_mocks['get_volume_type_by_name'].side_effect = get_vol_type self.retype_mocks['add_volume_type_opts'].return_value = None self.retype_mocks['reserve'].return_value = None super(VolumeRetypeActionsTest, self).setUp() def _retype_volume_exec(self, expected_status, new_type='foo'): req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': new_type, 'migration_policy': 'never'} req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) res = req.get_response(fakes.wsgi_app()) self.assertEqual(expected_status, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs') def test_retype_volume_success(self, _mock_get_qspecs): # Test that the retype API works for both available and in-use self._retype_volume_exec(202) self.mock_volume_get.return_value['status'] = 'in-use' specs = {'id': 'fakeqid1', 'name': 'fake_name1', 'consumer': 'back-end', 'specs': {'key1': 'value1'}} _mock_get_qspecs.return_value = specs self._retype_volume_exec(202) def test_retype_volume_no_body(self): # Request with no body should fail req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-retype': None}) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_retype_volume_bad_policy(self): # Request with invalid migration policy should fail req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'} req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_retype_volume_bad_status(self): # Should fail if volume does not have proper status self.mock_volume_get.return_value['status'] = 'error' self._retype_volume_exec(400) def test_retype_type_no_exist(self): # Should fail if new type does not exist exc = exception.VolumeTypeNotFound('exc') self.retype_mocks['get_volume_type'].side_effect = exc self._retype_volume_exec(404) def test_retype_same_type(self): # Should fail if new type and old type are the same self._retype_volume_exec(400, new_type='fake') def test_retype_over_quota(self): # Should fail if going over quota for new type exc = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.retype_mocks['reserve'].side_effect = exc self._retype_volume_exec(413) @mock.patch('cinder.volume.qos_specs.get_qos_specs') def _retype_volume_diff_qos(self, vol_status, consumer, expected_status, _mock_get_qspecs): def fake_get_qos(ctxt, qos_id): d1 = {'id': 'fakeqid1', 'name': 'fake_name1', 'consumer': consumer, 'specs': {'key1': 'value1'}} d2 = {'id': 'fakeqid2', 'name': 'fake_name2', 'consumer': consumer, 'specs': {'key1': 'value1'}} return d1 if d1['id'] == qos_id else d2 self.mock_volume_get.return_value['status'] = vol_status _mock_get_qspecs.side_effect = fake_get_qos self._retype_volume_exec(expected_status) def test_retype_volume_diff_qos_fe_in_use(self): # should fail if changing qos enforced by front-end for in-use volumes self._retype_volume_diff_qos('in-use', 'front-end', 400) def test_retype_volume_diff_qos_fe_available(self): # should NOT fail if changing qos enforced by FE for available volumes self._retype_volume_diff_qos('available', 'front-end', 202) def test_retype_volume_diff_qos_be(self): # should NOT fail if changing qos enforced by back-end self._retype_volume_diff_qos('available', 'back-end', 202) self._retype_volume_diff_qos('in-use', 'back-end', 202) def stub_volume_get(self, context, volume_id): volume = stubs.stub_volume(volume_id) if volume_id == 5: volume['status'] = 'in-use' else: volume['status'] = 'available' return volume def stub_upload_volume_to_image_service(self, context, volume, metadata, force): ret = {"id": volume['id'], "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": 1, "container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name'} return ret class VolumeImageActionsTest(test.TestCase): def setUp(self): super(VolumeImageActionsTest, self).setUp() self.controller = volume_actions.VolumeActionsController() self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.context = context.RequestContext('fake', 'fake', is_admin=False) def _get_os_volume_upload_image(self): vol = { "container_format": 'bare', "disk_format": 'raw', "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "image_name": 'image_name', "is_public": False, "force": True} body = {"os-volume_upload_image": vol} return body def fake_image_service_create(self, *args): ret = { 'status': u'queued', 'name': u'image_name', 'deleted': False, 'container_format': u'bare', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'disk_format': u'raw', 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'id': 1, 'min_ram': 0, 'checksum': None, 'min_disk': 0, 'is_public': False, 'deleted_at': None, 'properties': {u'x_billing_code_license': u'246254365'}, 'size': 0} return ret def fake_rpc_copy_volume_to_image(self, *args): pass def test_copy_volume_to_image(self): self.stubs.Set(volume_api.API, "copy_volume_to_image", stub_upload_volume_to_image_service) id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) res_dict = self.controller._volume_upload_image(req, id, body) expected = {'os-volume_upload_image': {'id': id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_db_volume_type( name='vol_type_name'), 'image_id': 1, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name'}} self.assertDictMatch(expected, res_dict) def test_copy_volume_to_image_volumenotfound(self): def stub_volume_get_raise_exc(self, context, volume_id): raise exception.VolumeNotFound(volume_id=volume_id) self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc) id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._volume_upload_image, req, id, body) def test_copy_volume_to_image_invalidvolume(self): def stub_upload_volume_to_image_service_raise(self, context, volume, metadata, force): raise exception.InvalidVolume(reason='blah') self.stubs.Set(volume_api.API, "copy_volume_to_image", stub_upload_volume_to_image_service_raise) id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body) def test_copy_volume_to_image_valueerror(self): def stub_upload_volume_to_image_service_raise(self, context, volume, metadata, force): raise ValueError self.stubs.Set(volume_api.API, "copy_volume_to_image", stub_upload_volume_to_image_service_raise) id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body) def test_copy_volume_to_image_remoteerror(self): def stub_upload_volume_to_image_service_raise(self, context, volume, metadata, force): raise messaging.RemoteError self.stubs.Set(volume_api.API, "copy_volume_to_image", stub_upload_volume_to_image_service_raise) id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body) def test_volume_upload_image_typeerror(self): id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' body = {"os-volume_upload_image_fake": "fake"} req = webob.Request.blank('/v2/tenant1/volumes/%s/action' % id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_volume_upload_image_without_type(self): id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": None, "force": True} body = {"": vol} req = webob.Request.blank('/v2/tenant1/volumes/%s/action' % id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_extend_volume_valueerror(self): id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' body = {'os-extend': {'new_size': 'fake'}} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) def test_copy_volume_to_image_notimagename(self): id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": None, "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body) def test_copy_volume_to_image_with_protected_prop(self): """Test create image from volume with protected properties.""" id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' def fake_get_volume_image_metadata(*args): meta_dict = { "volume_id": id, "key": "x_billing_code_license", "value": "246254365"} return meta_dict # Need to mock get_volume_image_metadata, create, # update and copy_volume_to_image with mock.patch.object(volume_api.API, "get_volume_image_metadata") \ as mock_get_volume_image_metadata: mock_get_volume_image_metadata.side_effect = \ fake_get_volume_image_metadata with mock.patch.object(glance.GlanceImageService, "create") \ as mock_create: mock_create.side_effect = self.fake_image_service_create with mock.patch.object(volume_api.API, "update") \ as mock_update: mock_update.side_effect = stubs.stub_volume_update with mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") \ as mock_copy_volume_to_image: mock_copy_volume_to_image.side_effect = \ self.fake_rpc_copy_volume_to_image req = fakes.HTTPRequest.blank( '/v2/tenant1/volumes/%s/action' % id) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, id, body) expected_res = { 'os-volume_upload_image': { 'id': id, 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_db_volume_type( name='vol_type_name'), 'image_id': 1, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name' } } self.assertDictMatch(expected_res, res_dict) def test_copy_volume_to_image_without_glance_metadata(self): """Test create image from volume if volume is created without image. In this case volume glance metadata will not be available for this volume. """ id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' def fake_get_volume_image_metadata_raise(*args): raise exception.GlanceMetadataNotFound(id=id) # Need to mock get_volume_image_metadata, create, # update and copy_volume_to_image with mock.patch.object(volume_api.API, "get_volume_image_metadata") \ as mock_get_volume_image_metadata: mock_get_volume_image_metadata.side_effect = \ fake_get_volume_image_metadata_raise with mock.patch.object(glance.GlanceImageService, "create") \ as mock_create: mock_create.side_effect = self.fake_image_service_create with mock.patch.object(volume_api.API, "update") \ as mock_update: mock_update.side_effect = stubs.stub_volume_update with mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") \ as mock_copy_volume_to_image: mock_copy_volume_to_image.side_effect = \ self.fake_rpc_copy_volume_to_image req = fakes.HTTPRequest.blank( '/v2/tenant1/volumes/%s/action' % id) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, id, body) expected_res = { 'os-volume_upload_image': { 'id': id, 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_db_volume_type( name='vol_type_name'), 'image_id': 1, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name' } } self.assertDictMatch(expected_res, res_dict) def test_copy_volume_to_image_without_protected_prop(self): """Test protected property is not defined with the root image.""" id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' def fake_get_volume_image_metadata(*args): return [] # Need to mock get_volume_image_metadata, create, # update and copy_volume_to_image with mock.patch.object(volume_api.API, "get_volume_image_metadata") \ as mock_get_volume_image_metadata: mock_get_volume_image_metadata.side_effect = \ fake_get_volume_image_metadata with mock.patch.object(glance.GlanceImageService, "create") \ as mock_create: mock_create.side_effect = self.fake_image_service_create with mock.patch.object(volume_api.API, "update") \ as mock_update: mock_update.side_effect = stubs.stub_volume_update with mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") \ as mock_copy_volume_to_image: mock_copy_volume_to_image.side_effect = \ self.fake_rpc_copy_volume_to_image req = fakes.HTTPRequest.blank( '/v2/tenant1/volumes/%s/action' % id) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, id, body) expected_res = { 'os-volume_upload_image': { 'id': id, 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_db_volume_type( name='vol_type_name'), 'image_id': 1, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name' } } self.assertDictMatch(expected_res, res_dict) def test_copy_volume_to_image_without_core_prop(self): """Test glance_core_properties defined in cinder.conf is empty.""" id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' # Need to mock create, update, copy_volume_to_image with mock.patch.object(glance.GlanceImageService, "create") \ as mock_create: mock_create.side_effect = self.fake_image_service_create with mock.patch.object(volume_api.API, "update") \ as mock_update: mock_update.side_effect = stubs.stub_volume_update with mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") \ as mock_copy_volume_to_image: mock_copy_volume_to_image.side_effect = \ self.fake_rpc_copy_volume_to_image self.override_config('glance_core_properties', []) req = fakes.HTTPRequest.blank( '/v2/tenant1/volumes/%s/action' % id) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, id, body) expected_res = { 'os-volume_upload_image': { 'id': id, 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_db_volume_type( name='vol_type_name'), 'image_id': 1, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name' } } self.assertDictMatch(expected_res, res_dict) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_api.API, "get") @mock.patch.object(volume_api.API, "update") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_volume_type_none( self, mock_copy_volume_to_image, mock_update, mock_get, mock_create, mock_get_volume_image_metadata): """Test create image from volume with none type volume.""" db_volume = fake_volume.fake_db_volume() volume_obj = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) mock_create.side_effect = self.fake_image_service_create mock_get.return_value = volume_obj mock_copy_volume_to_image.side_effect = ( self.fake_rpc_copy_volume_to_image) req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, id, body) expected_res = { 'os-volume_upload_image': { 'id': fake.volume_id, 'updated_at': None, 'status': 'uploading', 'display_description': None, 'size': 1, 'volume_type': None, 'image_id': 1, 'container_format': u'bare', 'disk_format': u'raw', 'image_name': u'image_name' } } self.assertDictMatch(expected_res, res_dict) cinder-8.0.0/cinder/tests/unit/api/contrib/test_capabilities.py0000664000567000056710000001154212701406250025764 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging from cinder.api.contrib import capabilities from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes def rpcapi_get_capabilities(self, context, host, discover): capabilities = dict( vendor_name='OpenStack', volume_backend_name='lvm', pool_name='pool', driver_version='2.0.0', storage_protocol='iSCSI', display_name='Capabilities of Cinder LVM driver', description='These are volume type options provided by ' 'Cinder LVM driver, blah, blah.', replication_targets=[], visibility='public', properties = dict( compression = dict( title='Compression', description='Enables compression.', type='boolean'), qos = dict( title='QoS', description='Enables QoS.', type='boolean'), replication = dict( title='Replication', description='Enables replication.', type='boolean'), thin_provisioning = dict( title='Thin Provisioning', description='Sets thin provisioning.', type='boolean'), ) ) return capabilities class CapabilitiesAPITest(test.TestCase): def setUp(self): super(CapabilitiesAPITest, self).setUp() self.flags(host='fake') self.controller = capabilities.CapabilitiesController() self.ctxt = context.RequestContext('admin', 'fake', True) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', rpcapi_get_capabilities) def test_capabilities_summary(self, mock_services): mock_services.return_value = [{'name': 'fake'}] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt res = self.controller.show(req, 'fake') expected = { 'namespace': 'OS::Storage::Capabilities::fake', 'vendor_name': 'OpenStack', 'volume_backend_name': 'lvm', 'pool_name': 'pool', 'driver_version': '2.0.0', 'storage_protocol': 'iSCSI', 'display_name': 'Capabilities of Cinder LVM driver', 'description': 'These are volume type options provided by ' 'Cinder LVM driver, blah, blah.', 'visibility': 'public', 'replication_targets': [], 'properties': { 'compression': { 'title': 'Compression', 'description': 'Enables compression.', 'type': 'boolean'}, 'qos': { 'title': 'QoS', 'description': 'Enables QoS.', 'type': 'boolean'}, 'replication': { 'title': 'Replication', 'description': 'Enables replication.', 'type': 'boolean'}, 'thin_provisioning': { 'title': 'Thin Provisioning', 'description': 'Sets thin provisioning.', 'type': 'boolean'}, } } self.assertDictMatch(expected, res) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities') def test_get_capabilities_rpc_timeout(self, mock_rpc, mock_services): mock_rpc.side_effect = oslo_messaging.MessagingTimeout mock_services.return_value = [{'name': 'fake'}] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.RPCTimeout, self.controller.show, req, 'fake') @mock.patch('cinder.db.service_get_all') def test_get_capabilities_service_not_found(self, mock_services): mock_services.return_value = [] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.NotFound, self.controller.show, req, 'fake') cinder-8.0.0/cinder/tests/unit/api/contrib/test_snapshot_unmanage.py0000664000567000056710000001134512701406250027046 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume # This list of fake snapshot is used by our tests. snapshot_id = 'ffffffff-0000-ffff-0000-ffffffffffff' bad_snp_id = 'ffffffff-0000-ffff-0000-fffffffffffe' def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper def api_snapshot_get(self, context, snp_id): """Replacement for cinder.volume.api.API.get_snapshot. We stub the cinder.volume.api.API.get_snapshot method to check for the existence of snapshot_id in our list of fake snapshots and raise an exception if the specified snapshot ID is not in our list. """ snapshot = {'id': 'ffffffff-0000-ffff-0000-ffffffffffff', 'progress': '100%', 'volume_id': 'fake_volume_id', 'project_id': 'fake_project', 'status': 'available'} if snp_id == snapshot_id: snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot) return snapshot_objct else: raise exception.SnapshotNotFound(snapshot_id=snp_id) @mock.patch('cinder.volume.api.API.get_snapshot', api_snapshot_get) class SnapshotUnmanageTest(test.TestCase): """Test cases for cinder/api/contrib/snapshot_unmanage.py The API extension adds an action to snapshots, "os-unmanage", which will effectively issue a delete operation on the snapshot, but with a flag set that means that a different method will be invoked on the driver, so that the snapshot is not actually deleted in the storage backend. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.delete_snapshot with the correct arguments. """ def _get_resp(self, snapshot_id): """Helper to build an os-unmanage req for the specified snapshot_id.""" req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = context.RequestContext('admin', 'fake', True) body = {'os-unmanage': ''} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch('cinder.db.conditional_update', return_value=1) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot') def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_volume_get_by_id, mock_db_update, mock_conditional_update): """Return success for valid and unattached volume.""" ctxt = context.RequestContext('admin', 'fake', True) volume = fake_volume.fake_volume_obj(ctxt, id='fake_volume_id') mock_volume_get_by_id.return_value = volume res = self._get_resp(snapshot_id) self.assertEqual(1, mock_volume_get_by_id.call_count) self.assertEqual(2, len(mock_volume_get_by_id.call_args[0]), mock_volume_get_by_id.call_args) self.assertEqual('fake_volume_id', mock_volume_get_by_id.call_args[0][1]) self.assertEqual(1, mock_rpcapi.call_count) self.assertEqual(3, len(mock_rpcapi.call_args[0])) self.assertEqual(1, len(mock_rpcapi.call_args[1])) self.assertTrue(mock_rpcapi.call_args[1]['unmanage_only']) self.assertEqual(202, res.status_int, res) def test_unmanage_snapshot_bad_snapshot_id(self): """Return 404 if the volume does not exist.""" res = self._get_resp(bad_snp_id) self.assertEqual(404, res.status_int, res) cinder-8.0.0/cinder/tests/unit/api/contrib/test_qos_specs_manage.py0000664000567000056710000010575112701406257026657 0ustar jenkinsjenkins00000000000000# Copyright 2013 eBay Inc. # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from xml.dom import minidom from lxml import etree import mock import webob from cinder.api.contrib import qos_specs_manage from cinder.api import xmlutil from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_notifier def stub_qos_specs(id): res = dict(name='qos_specs_' + str(id)) res.update(dict(consumer='back-end')) res.update(dict(id=str(id))) specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} res.update(dict(specs=specs)) return res def stub_qos_associates(id): return [{ 'association_type': 'volume_type', 'name': 'FakeVolTypeName', 'id': 'FakeVolTypeID'}] def return_qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return [ stub_qos_specs(1), stub_qos_specs(2), stub_qos_specs(3), ] def return_qos_specs_get_qos_specs(context, id): if id == "777": raise exception.QoSSpecsNotFound(specs_id=id) return stub_qos_specs(int(id)) def return_qos_specs_delete(context, id, force): if id == "777": raise exception.QoSSpecsNotFound(specs_id=id) elif id == "666": raise exception.QoSSpecsInUse(specs_id=id) pass def return_qos_specs_delete_keys(context, id, keys): if id == "777": raise exception.QoSSpecsNotFound(specs_id=id) if 'foo' in keys: raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key='foo') def return_qos_specs_update(context, id, specs): if id == "777": raise exception.QoSSpecsNotFound(specs_id=id) elif id == "888": raise exception.InvalidQoSSpecs(reason=id) elif id == "999": raise exception.QoSSpecsUpdateFailed(specs_id=id, qos_specs=specs) pass def return_qos_specs_create(context, name, specs): if name == "666": raise exception.QoSSpecsExists(specs_id=name) elif name == "555": raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) elif name == "444": raise exception.InvalidQoSSpecs(reason=name) pass def return_qos_specs_get_by_name(context, name): if name == "777": raise exception.QoSSpecsNotFound(specs_id=name) return stub_qos_specs(int(name.split("_")[2])) def return_get_qos_associations(context, id): if id == "111": raise exception.QoSSpecsNotFound(specs_id=id) elif id == "222": raise exception.CinderException() return stub_qos_associates(id) def return_associate_qos_specs(context, id, type_id): if id == "111": raise exception.QoSSpecsNotFound(specs_id=id) elif id == "222": raise exception.QoSSpecsAssociateFailed(specs_id=id, type_id=type_id) elif id == "333": raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=type_id) if type_id == "1234": raise exception.VolumeTypeNotFound( volume_type_id=type_id) pass def return_disassociate_all(context, id): if id == "111": raise exception.QoSSpecsNotFound(specs_id=id) elif id == "222": raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=None) class QoSSpecManageApiTest(test.TestCase): def _create_qos_specs(self, name, values=None): """Create a transfer object.""" if values: specs = dict(name=name, qos_specs=values) else: specs = {'name': name, 'qos_specs': { 'consumer': 'back-end', 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] def setUp(self): super(QoSSpecManageApiTest, self).setUp() self.flags(host='fake') self.controller = qos_specs_manage.QoSSpecsController() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) self.qos_id1 = self._create_qos_specs("Qos_test_1") self.qos_id2 = self._create_qos_specs("Qos_test_2") self.qos_id3 = self._create_qos_specs("Qos_test_3") self.qos_id4 = self._create_qos_specs("Qos_test_4") @mock.patch('cinder.volume.qos_specs.get_all_specs', side_effect=return_qos_specs_get_all) def test_index(self, mock_get_all_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) names = set() for item in res['qos_specs']: self.assertEqual('value1', item['specs']['key1']) names.add(item['name']) expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3'] self.assertEqual(set(expected_names), names) @mock.patch('cinder.volume.qos_specs.get_all_specs', side_effect=return_qos_specs_get_all) def test_index_xml_response(self, mock_get_all_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') res = self.controller.index(req) req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) qos_specs_response = dom.getElementsByTagName('qos_spec') names = set() for qos_spec in qos_specs_response: name = qos_spec.getAttribute('name') names.add(name) expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3'] self.assertEqual(set(expected_names), names) def test_index_with_limit(self): url = '/v2/fake/qos-specs?limit=2' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id3, res['qos_specs'][1]['id']) expect_next_link = ('http://localhost/v2/fakeproject/qos-specs?limit' '=2&marker=%s') % res['qos_specs'][1]['id'] self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href']) def test_index_with_offset(self): url = '/v2/fake/qos-specs?offset=1' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_offset_out_of_range(self): url = '/v2/fake/qos-specs?offset=356576877698707' req = fakes.HTTPRequest.blank(url, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_index_with_limit_and_offset(self): url = '/v2/fake/qos-specs?limit=2&offset=1' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id3, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id2, res['qos_specs'][1]['id']) def test_index_with_marker(self): url = '/v2/fake/qos-specs?marker=%s' % self.qos_id4 req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_filter(self): url = '/v2/fake/qos-specs?id=%s' % self.qos_id4 req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(1, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) def test_index_with_sort_keys(self): url = '/v2/fake/qos-specs?sort=id' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort(reverse=True) self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) def test_index_with_sort_keys_and_sort_dirs(self): url = '/v2/fake/qos-specs?sort=id:asc' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort() self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.controller.delete(req, 1) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_not_found(self, mock_qos_delete, mock_qos_get_specs): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '777') self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, '666') self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse_force(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666?force=True') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.delete, req, '666') self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys(self, mock_qos_delete_keys): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.controller.delete_keys(req, '666', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_qos_notfound(self, mock_qos_specs_delete): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777/delete_keys') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_keys, req, '777', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_badkey(self, mock_qos_specs_delete): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys') body = {"keys": ['foo', 'zoo']} notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete_keys, req, '666', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_get_notifier(self, mock_qos_delete_keys): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier, autospec=True) as mock_get_notifier: self.controller.delete_keys(req, '666', body) mock_get_notifier.assert_called_once_with('QoSSpecs') @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', side_effect=return_qos_specs_get_by_name) @mock.patch('cinder.api.openstack.wsgi.Controller.validate_string_length') def test_create(self, mock_validate, mock_qos_get_specs, mock_qos_spec_create): body = {"qos_specs": {"name": "qos_specs_1", "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): res_dict = self.controller.create(req, body) self.assertEqual(1, notifier.get_notification_count()) self.assertEqual('qos_specs_1', res_dict['qos_specs']['name']) self.assertTrue(mock_validate.called) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_invalid_input(self, mock_qos_get_specs): body = {"qos_specs": {"name": "444", "consumer": "invalid_consumer"}} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', side_effect=return_qos_specs_get_by_name) def test_create_conflict(self, mock_qos_get_specs, mock_qos_spec_create): body = {"qos_specs": {"name": "666", "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) @mock.patch('cinder.volume.qos_specs.get_qos_specs_by_name', side_effect=return_qos_specs_get_by_name) def test_create_failed(self, mock_qos_get_specs, mock_qos_spec_create): body = {"qos_specs": {"name": "555", "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) def _create_qos_specs_bad_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_no_body(self): self._create_qos_specs_bad_body(body=None) def test_create_invalid_body(self): body = {'foo': {'a': 'b'}} self._create_qos_specs_bad_body(body=body) def test_create_missing_specs_name(self): body = {'qos_specs': {'a': 'b'}} self._create_qos_specs_bad_body(body=body) def test_create_malformed_entity(self): body = {'qos_specs': 'string'} self._create_qos_specs_bad_body(body=body) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/555') body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} res = self.controller.update(req, '555', body) self.assertDictMatch(body, res) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_not_found(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777') body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, '777', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_invalid_input(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/888') body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '888', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_failed(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/999') body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, '999', body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_show(self, mock_get_qos_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') res_dict = self.controller.show(req, '1') self.assertEqual('1', res_dict['qos_specs']['id']) self.assertEqual('qos_specs_1', res_dict['qos_specs']['name']) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_show_xml_response(self, mock_get_qos_specs): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') res = self.controller.show(req, '1') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) qos_spec_response = dom.getElementsByTagName('qos_spec') qos_spec = qos_spec_response.item(0) id = qos_spec.getAttribute('id') name = qos_spec.getAttribute('name') consumer = qos_spec.getAttribute('consumer') self.assertEqual(u'1', id) self.assertEqual('qos_specs_1', name) self.assertEqual('back-end', consumer) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations(self, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/associations') res = self.controller.associations(req, '1') self.assertEqual('FakeVolTypeName', res['qos_associations'][0]['name']) self.assertEqual('FakeVolTypeID', res['qos_associations'][0]['id']) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_xml_response(self, mock_get_assciations): req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1/associations') res = self.controller.associations(req, '1') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) associations_response = dom.getElementsByTagName('associations') association = associations_response.item(0) id = association.getAttribute('id') name = association.getAttribute('name') association_type = association.getAttribute('association_type') self.assertEqual('FakeVolTypeID', id) self.assertEqual('FakeVolTypeName', name) self.assertEqual('volume_type', association_type) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_not_found(self, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/111/associations') self.assertRaises(webob.exc.HTTPNotFound, self.controller.associations, req, '111') @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_failed(self, mock_get_associations): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/222/associations') self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associations, req, '222') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/associate?vol_type_id=111') res = self.controller.associate(req, '1') self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_no_type(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/associate') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.associate, req, '1') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_not_found(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/111/associate?vol_type_id=12') self.assertRaises(webob.exc.HTTPNotFound, self.controller.associate, req, '111') req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/associate?vol_type_id=1234') self.assertRaises(webob.exc.HTTPNotFound, self.controller.associate, req, '1') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_fail(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/222/associate?vol_type_id=1000') self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associate, req, '222') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/disassociate?vol_type_id=111') res = self.controller.disassociate(req, '1') self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_no_type(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/disassociate') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.disassociate, req, '1') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_not_found(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/111/disassociate?vol_type_id=12') self.assertRaises(webob.exc.HTTPNotFound, self.controller.disassociate, req, '111') req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/disassociate?vol_type_id=1234') self.assertRaises(webob.exc.HTTPNotFound, self.controller.disassociate, req, '1') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_failed(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/333/disassociate?vol_type_id=1000') self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate, req, '333') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/1/disassociate_all') res = self.controller.disassociate_all(req, '1') self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_not_found(self, mock_disassociate, mock_get): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/111/disassociate_all') self.assertRaises(webob.exc.HTTPNotFound, self.controller.disassociate_all, req, '111') @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_failed(self, mock_disassociate, mock_get): req = fakes.HTTPRequest.blank( '/v2/fake/qos-specs/222/disassociate_all') self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate_all, req, '222') class TestQoSSpecsTemplate(test.TestCase): def setUp(self): super(TestQoSSpecsTemplate, self).setUp() self.serializer = qos_specs_manage.QoSSpecsTemplate() def test_qos_specs_serializer(self): fixture = { "qos_specs": [ { "specs": { "key1": "v1", "key2": "v2", }, "consumer": "back-end", "name": "qos-2", "id": "61e7b72f-ef15-46d9-b00e-b80f699999d0" }, { "specs": {"total_iops_sec": "200"}, "consumer": "front-end", "name": "qos-1", "id": "e44bba5e-b629-4b96-9aa3-0404753a619b" } ] } output = self.serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'qos_specs') qos_elems = root.findall("qos_spec") self.assertEqual(2, len(qos_elems)) for i, qos_elem in enumerate(qos_elems): qos_dict = fixture['qos_specs'][i] # check qos_spec attributes for key in ['name', 'id', 'consumer']: self.assertEqual(str(qos_dict[key]), qos_elem.get(key)) # check specs specs = qos_elem.find("specs") new_dict = {} for element in specs.iter(tag=etree.Element): # skip root element for specs if element.tag == "specs": continue new_dict.update({element.tag: element.text}) self.assertDictMatch(qos_dict['specs'], new_dict) class TestAssociationsTemplate(test.TestCase): def setUp(self): super(TestAssociationsTemplate, self).setUp() self.serializer = qos_specs_manage.AssociationsTemplate() def test_qos_associations_serializer(self): fixture = { "qos_associations": [ { "association_type": "volume_type", "name": "type-4", "id": "14d54d29-51a4-4046-9f6f-cf9800323563" }, { "association_type": "volume_type", "name": "type-2", "id": "3689ce83-308d-4ba1-8faf-7f1be04a282b"} ] } output = self.serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'qos_associations') association_elems = root.findall("associations") self.assertEqual(2, len(association_elems)) for i, association_elem in enumerate(association_elems): association_dict = fixture['qos_associations'][i] # check qos_spec attributes for key in ['name', 'id', 'association_type']: self.assertEqual(str(association_dict[key]), association_elem.get(key)) class TestQoSSpecsKeyDeserializer(test.TestCase): def setUp(self): super(TestQoSSpecsKeyDeserializer, self).setUp() self.deserializer = qos_specs_manage.QoSSpecsKeyDeserializer() def test_keys(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "keys": ["xyz", "abc"] } self.assertEqual(expected, request['body']) def test_bad_format(self): self_request = """ """ self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.deserialize, self_request) cinder-8.0.0/cinder/tests/unit/api/contrib/test_types_manage.py0000664000567000056710000006045112701406250026012 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import webob from cinder.api.contrib import types_manage from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.volume import volume_types def stub_volume_type(id): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return dict(id=id, name='vol_type_%s' % six.text_type(id), description='vol_type_desc_%s' % six.text_type(id), extra_specs=specs) def stub_volume_type_updated(id, is_public=True): return dict(id=id, name='vol_type_%s_%s' % (six.text_type(id), six.text_type(id)), is_public=is_public, description='vol_type_desc_%s_%s' % ( six.text_type(id), six.text_type(id))) def stub_volume_type_updated_desc_only(id): return dict(id=id, name='vol_type_%s' % six.text_type(id), description='vol_type_desc_%s_%s' % ( six.text_type(id), six.text_type(id))) def return_volume_types_get_volume_type(context, id): if id == "777": raise exception.VolumeTypeNotFound(volume_type_id=id) return stub_volume_type(int(id)) def return_volume_types_destroy(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) pass def return_volume_types_with_volumes_destroy(context, id): if id == "1": raise exception.VolumeTypeInUse(volume_type_id=id) pass def return_volume_types_create(context, name, specs, is_public, description): pass def return_volume_types_create_duplicate_type(context, name, specs, is_public, description): raise exception.VolumeTypeExists(id=name) def stub_volume_type_updated_name_only(id): return dict(id=id, name='vol_type_%s_%s' % (six.text_type(id), six.text_type(id)), description='vol_type_desc_%s' % six.text_type(id)) def stub_volume_type_updated_name_after_delete(id): return dict(id=id, name='vol_type_%s' % six.text_type(id), description='vol_type_desc_%s' % six.text_type(id)) def return_volume_types_get_volume_type_updated(id, is_public=True): if id == "777": raise exception.VolumeTypeNotFound(volume_type_id=id) if id == '888': return stub_volume_type_updated_desc_only(int(id)) if id == '999': return stub_volume_type_updated_name_only(int(id)) if id == '666': return stub_volume_type_updated_name_after_delete(int(id)) # anything else return stub_volume_type_updated(int(id), is_public=is_public) def return_volume_types_get_by_name(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return stub_volume_type(int(name.split("_")[2])) def return_volume_types_get_default(): return stub_volume_type(1) def return_volume_types_get_default_not_found(): return {} class VolumeTypesManageApiTest(test.TestCase): def setUp(self): super(VolumeTypesManageApiTest, self).setUp() self.flags(host='fake') self.controller = types_manage.VolumeTypesManageController() """to reset notifier drivers left over from other api/contrib tests""" def tearDown(self): super(VolumeTypesManageApiTest, self).tearDown() def test_volume_types_delete(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, 1) self.assertEqual(1, len(self.notifier.notifications)) def test_volume_types_delete_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank('/v2/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, req, '777') self.assertEqual(1, len(self.notifier.notifications)) def test_volume_types_with_volumes_destroy(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.stubs.Set(volume_types, 'destroy', return_volume_types_with_volumes_destroy) req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, 1) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.destroy') @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.policy.enforce') def test_volume_types_delete_with_non_admin(self, mock_policy_enforce, mock_get, mock_destroy): # allow policy authorized user to delete type mock_policy_enforce.return_value = None mock_get.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': 1, 'name': u'vol_type_1', 'description': u'vol_type_desc_1'} mock_destroy.side_effect = return_volume_types_destroy req = fakes.HTTPRequest.blank('/v2/fake/types/1', use_admin_context=False) self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, 1) self.assertEqual(1, len(self.notifier.notifications)) # non policy authorized user fails to delete type mock_policy_enforce.side_effect = ( exception.PolicyNotAuthorized(action='type_delete')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._delete, req, 1) def test_create(self): self.stubs.Set(volume_types, 'create', return_volume_types_create) self.stubs.Set(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": True, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._create(req, body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': 'vol_type_desc_1'}) @mock.patch('cinder.volume.volume_types.create') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') def test_create_with_description_of_zero_length( self, mock_get_volume_type_by_name, mock_create_type): mock_get_volume_type_by_name.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': 1, 'name': u'vol_type_1', 'description': u''} type_description = "" body = {"volume_type": {"name": "vol_type_1", "description": type_description, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') res_dict = self.controller._create(req, body) self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': ''}) def test_create_type_with_name_too_long(self): type_name = 'a' * 256 body = {"volume_type": {"name": type_name, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') self.assertRaises(exception.InvalidInput, self.controller._create, req, body) def test_create_type_with_description_too_long(self): type_description = 'a' * 256 body = {"volume_type": {"name": "vol_type_1", "description": type_description, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') self.assertRaises(exception.InvalidInput, self.controller._create, req, body) def test_create_duplicate_type_fail(self): self.stubs.Set(volume_types, 'create', return_volume_types_create_duplicate_type) self.stubs.Set(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) body = {"volume_type": {"name": "vol_type_1", "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') self.assertRaises(webob.exc.HTTPConflict, self.controller._create, req, body) def test_create_type_with_invalid_is_public(self): body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": "fake", "description": "test description", "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types') self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create, req, body) def _create_volume_type_bad_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create, req, body) def test_create_no_body(self): self._create_volume_type_bad_body(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._create_volume_type_bad_body(body=body) def test_create_malformed_entity(self): body = {'volume_type': 'string'} self._create_volume_type_bad_body(body=body) @mock.patch('cinder.volume.volume_types.create') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') @mock.patch('cinder.policy.enforce') def test_create_with_none_admin(self, mock_policy_enforce, mock_get_volume_type_by_name, mock_create_type): # allow policy authorized user to create type mock_policy_enforce.return_value = None mock_get_volume_type_by_name.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': 1, 'name': u'vol_type_1', 'description': u'vol_type_desc_1'} body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": True, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v2/fake/types', use_admin_context=False) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._create(req, body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': 'vol_type_desc_1'}) # non policy authorized user fails to create type mock_policy_enforce.side_effect = ( exception.PolicyNotAuthorized(action='type_create')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._create, req, body) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update(self, mock_get, mock_update): mock_get.return_value = return_volume_types_get_volume_type_updated( '1', is_public=False) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1", "is_public": False}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '1', body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_desc': 'vol_type_desc_1_1', 'expected_name': 'vol_type_1_1', 'is_public': False}) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_type_with_description_having_length_zero( self, mock_get_volume_type, mock_type_update): mock_get_volume_type.return_value = \ {'id': 1, 'name': u'vol_type_1', 'description': u''} type_description = "" body = {"volume_type": {"description": type_description}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' resp = self.controller._update(req, '1', body) self._check_test_results(resp, {'expected_desc': '', 'expected_name': 'vol_type_1'}) def test_update_type_with_name_too_long(self): type_name = 'a' * 256 body = {"volume_type": {"name": type_name, "description": ""}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertRaises(exception.InvalidInput, self.controller._update, req, '1', body) def test_update_type_with_description_too_long(self): type_description = 'a' * 256 body = {"volume_type": {"description": type_description}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertRaises(exception.InvalidInput, self.controller._update, req, '1', body) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.volume.volume_types.update') def test_update_non_exist(self, mock_update, mock_get_volume_type): mock_get_volume_type.side_effect = exception.VolumeTypeNotFound( volume_type_id=777) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1"}} req = fakes.HTTPRequest.blank('/v2/fake/types/777') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(webob.exc.HTTPNotFound, self.controller._update, req, '777', body) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.volume.volume_types.update') def test_update_db_fail(self, mock_update, mock_get_volume_type): mock_update.side_effect = exception.VolumeTypeUpdateFailed(id='1') mock_get_volume_type.return_value = stub_volume_type(1) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1"}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller._update, req, '1', body) self.assertEqual(1, len(self.notifier.notifications)) def test_update_no_name_no_description(self): body = {"volume_type": {}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, '1', body) def test_update_empty_name(self): body = {"volume_type": {"name": " ", "description": "something"}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, '1', body) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.db.volume_type_update') @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' 'update_quota_resource') def test_update_only_name(self, mock_update_quota, mock_update, mock_get): mock_get.return_value = return_volume_types_get_volume_type_updated( '999') ctxt = context.RequestContext('admin', 'fake', True) body = {"volume_type": {"name": "vol_type_999"}} req = fakes.HTTPRequest.blank('/v2/fake/types/999') req.method = 'PUT' req.environ['cinder.context'] = ctxt self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '999', body) self.assertEqual(1, len(self.notifier.notifications)) mock_update_quota.assert_called_once_with(ctxt, 'vol_type_999_999', 'vol_type_999') self._check_test_results(res_dict, {'expected_name': 'vol_type_999_999', 'expected_desc': 'vol_type_desc_999'}) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_only_description(self, mock_get, mock_update): mock_get.return_value = return_volume_types_get_volume_type_updated( '888') body = {"volume_type": {"description": "vol_type_desc_888_888"}} req = fakes.HTTPRequest.blank('/v2/fake/types/888') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '888', body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_name': 'vol_type_888', 'expected_desc': 'vol_type_desc_888_888'}) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_only_is_public(self, mock_get, mock_update): is_public = False mock_get.return_value = return_volume_types_get_volume_type_updated( '123', is_public=is_public) body = {"volume_type": {"is_public": is_public}} req = fakes.HTTPRequest.blank('/v2/fake/types/123') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '123', body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_name': 'vol_type_123_123', 'expected_desc': 'vol_type_desc_123_123', 'is_public': False}) def test_update_invalid_is_public(self): body = {"volume_type": {"name": "test", "description": "something", "is_public": "fake"}} req = fakes.HTTPRequest.blank('/v2/fake/types/1') req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, '1', body) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_rename_existing_name(self, mock_get, mock_update): mock_update.side_effect = exception.VolumeTypeExists( id="666", name="vol_type_666") mock_get.return_value = return_volume_types_get_volume_type_updated( '666') # first attempt fail body = {"volume_type": {"name": "vol_type_666"}} req = fakes.HTTPRequest.blank('/v2/fake/types/666') req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(webob.exc.HTTPConflict, self.controller._update, req, '666', body) self.assertEqual(1, len(self.notifier.notifications)) # delete self.notifier.reset() self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, '1') self.assertEqual(1, len(self.notifier.notifications)) # update again mock_update.side_effect = mock.MagicMock() body = {"volume_type": {"name": "vol_type_666_666"}} req = fakes.HTTPRequest.blank('/v2/fake/types/666') req.method = 'PUT' self.notifier.reset() self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '666', body) self._check_test_results(res_dict, {'expected_name': 'vol_type_666', 'expected_desc': 'vol_type_desc_666'}) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.policy.enforce') def test_update_with_non_admin(self, mock_policy_enforce, mock_get, mock_update): # allow policy authorized user to update type mock_policy_enforce.return_value = None mock_get.return_value = return_volume_types_get_volume_type_updated( '1', is_public=False) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1", "is_public": False}} req = fakes.HTTPRequest.blank('/v2/fake/types/1', use_admin_context=False) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, '1', body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_desc': 'vol_type_desc_1_1', 'expected_name': 'vol_type_1_1', 'is_public': False}) # non policy authorized user fails to update type mock_policy_enforce.side_effect = ( exception.PolicyNotAuthorized(action='type_update')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._update, req, '1', body) def _check_test_results(self, results, expected_results): self.assertEqual(1, len(results)) self.assertEqual(expected_results['expected_desc'], results['volume_type']['description']) if expected_results.get('expected_name'): self.assertEqual(expected_results['expected_name'], results['volume_type']['name']) if expected_results.get('is_public') is not None: self.assertEqual(expected_results['is_public'], results['volume_type']['is_public']) cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py0000664000567000056710000001356312701406250032406 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder import context from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_volume from cinder import volume def fake_db_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'attach_status': 'detached', 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', 'migration_status': 'migrating', '_name_id': 'fake2', } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext('admin', 'fake', True) db_volume = fake_db_volume_get() return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_api_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper class VolumeMigStatusAttributeTest(test.TestCase): def setUp(self): super(VolumeMigStatusAttributeTest, self).setUp() self.stubs.Set(volume.api.API, 'get', fake_volume_api_get) self.stubs.Set(volume.api.API, 'get_all', fake_volume_get_all) self.UUID = uuid.uuid4() def test_get_volume_allowed(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual('migrating', vol['os-vol-mig-status-attr:migstat']) self.assertEqual('fake2', vol['os-vol-mig-status-attr:name_id']) def test_get_volume_unallowed(self): ctx = context.RequestContext('non-admin', 'fake', False) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol) self.assertNotIn('os-vol-mig-status-attr:name_id', vol) def test_list_detail_volumes_allowed(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual('migrating', vol[0]['os-vol-mig-status-attr:migstat']) self.assertEqual('fake2', vol[0]['os-vol-mig-status-attr:name_id']) def test_list_detail_volumes_unallowed(self): ctx = context.RequestContext('non-admin', 'fake', False) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) def test_list_simple_volumes_no_migration_status(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) def test_get_volume_xml(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.accept = 'application/xml' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = etree.XML(res.body) mig_key = ('{http://docs.openstack.org/volume/ext/' 'volume_mig_status_attribute/api/v1}migstat') self.assertEqual('migrating', vol.get(mig_key)) mig_key = ('{http://docs.openstack.org/volume/ext/' 'volume_mig_status_attribute/api/v1}name_id') self.assertEqual('fake2', vol.get(mig_key)) def test_list_volumes_detail_xml(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.accept = 'application/xml' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = list(etree.XML(res.body))[0] mig_key = ('{http://docs.openstack.org/volume/ext/' 'volume_mig_status_attribute/api/v1}migstat') self.assertEqual('migrating', vol.get(mig_key)) mig_key = ('{http://docs.openstack.org/volume/ext/' 'volume_mig_status_attribute/api/v1}name_id') self.assertEqual('fake2', vol.get(mig_key)) cinder-8.0.0/cinder/tests/unit/api/contrib/test_admin_actions.py0000664000567000056710000013310312701406257026150 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from webob import exc from cinder.api.contrib import admin_actions from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import test from cinder.tests.unit.api.contrib import test_backups from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import cast_as_call from cinder.tests.unit import fake_snapshot from cinder.volume import api as volume_api CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper class BaseAdminTest(test.TestCase): def setUp(self): super(BaseAdminTest, self).setUp() self.volume_api = volume_api.API() # admin context self.ctx = context.RequestContext('admin', 'fake', True) def _create_volume(self, context, updates=None): db_volume = {'status': 'available', 'host': 'test', 'availability_zone': 'fake_zone', 'attach_status': 'detached'} if updates: db_volume.update(updates) volume = objects.Volume(context=context, **db_volume) volume.create() return volume class AdminActionsTest(BaseAdminTest): def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=self.tempdir, group='oslo_concurrency') self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) def _issue_volume_reset(self, ctx, volume, updated_status): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status}) req.environ['cinder.context'] = ctx resp = req.get_response(app()) return resp def _issue_snapshot_reset(self, ctx, snapshot, updated_status): req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot.id) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status}) req.environ['cinder.context'] = ctx resp = req.get_response(app()) return resp def _issue_backup_reset(self, ctx, backup, updated_status): req = webob.Request.blank('/v2/fake/backups/%s/action' % backup['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status}) req.environ['cinder.context'] = ctx with mock.patch('cinder.backup.api.API._get_available_backup_service_host') \ as mock_get_backup_host: mock_get_backup_host.return_value = 'testhost' resp = req.get_response(app()) return resp def test_valid_updates(self): vac = admin_actions.VolumeAdminController() vac.validate_update({'status': 'creating'}) vac.validate_update({'status': 'available'}) vac.validate_update({'status': 'deleting'}) vac.validate_update({'status': 'error'}) vac.validate_update({'status': 'error_deleting'}) vac.validate_update({'attach_status': 'detached'}) vac.validate_update({'attach_status': 'attached'}) vac.validate_update({'migration_status': 'migrating'}) vac.validate_update({'migration_status': 'error'}) vac.validate_update({'migration_status': 'completing'}) vac.validate_update({'migration_status': 'none'}) vac.validate_update({'migration_status': 'starting'}) def test_reset_attach_status(self): volume = db.volume_create(self.ctx, {'attach_status': 'detached'}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': 'attached'}) self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('attached', volume['attach_status']) def test_reset_attach_invalid_status(self): volume = db.volume_create(self.ctx, {'attach_status': 'detached'}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': 'bogus-status'}) self.assertEqual(400, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('detached', volume['attach_status']) def test_reset_migration_invalid_status(self): volume = db.volume_create(self.ctx, {'migration_status': None}) resp = self._issue_volume_reset(self.ctx, volume, {'migration_status': 'bogus-status'}) self.assertEqual(400, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertIsNone(volume['migration_status']) def test_reset_migration_status(self): volume = db.volume_create(self.ctx, {'migration_status': None}) resp = self._issue_volume_reset(self.ctx, volume, {'migration_status': 'migrating'}) self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('migrating', volume['migration_status']) def test_reset_status_as_admin(self): volume = db.volume_create(self.ctx, {'status': 'available'}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'error'}) self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('error', volume['status']) def test_reset_status_as_non_admin(self): ctx = context.RequestContext('fake', 'fake') volume = db.volume_create(self.ctx, {'status': 'error', 'size': 1}) resp = self._issue_volume_reset(ctx, volume, {'status': 'error'}) # request is not authorized self.assertEqual(403, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) # status is still 'error' self.assertEqual('error', volume['status']) def test_backup_reset_status_as_admin(self): volume = db.volume_create(self.ctx, {'status': 'available', 'user_id': 'user', 'project_id': 'project'}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'size': 1, 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project', 'host': 'test'}) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(202, resp.status_int) def test_backup_reset_status_as_non_admin(self): ctx = context.RequestContext('fake', 'fake') backup = db.backup_create(ctx, {'status': 'available', 'size': 1, 'volume_id': "fakeid", 'host': 'test'}) resp = self._issue_backup_reset(ctx, backup, {'status': fields.BackupStatus.ERROR}) # request is not authorized self.assertEqual(403, resp.status_int) def test_backup_reset_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project', 'host': 'test'}) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(202, resp.status_int) def test_invalid_status_for_backup(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = db.backup_create(self.ctx, {'status': 'available', 'volume_id': volume['id']}) resp = self._issue_backup_reset(self.ctx, backup, {'status': 'restoring'}) self.assertEqual(400, resp.status_int) def test_backup_reset_status_with_invalid_backup(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project'}) backup['id'] = 'fake_id' resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) # Should raise 404 if backup doesn't exist. self.assertEqual(404, resp.status_int) def test_malformed_reset_status_body(self): volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1}) resp = self._issue_volume_reset(self.ctx, volume, {'x-status': 'bad'}) self.assertEqual(400, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) def test_invalid_status_for_volume(self): volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'invalid'}) self.assertEqual(400, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) def test_reset_status_for_missing_volume(self): req = webob.Request.blank('/v2/fake/volumes/%s/action' % 'missing-volume-id') req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-reset_status': {'status': 'available'}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(404, resp.status_int) self.assertRaises(exception.NotFound, db.volume_get, self.ctx, 'missing-volume-id') def test_reset_attached_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'attached'}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'available', 'attach_status': 'detached'}) self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('detached', volume['attach_status']) self.assertEqual('available', volume['status']) def test_invalid_reset_attached_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'detached'}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'available', 'attach_status': 'invalid'}) self.assertEqual(400, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) self.assertEqual('detached', volume['attach_status']) def test_snapshot_reset_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'availability_zone': 'test', 'attach_status': 'detached'}) kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': None, 'user_id': self.ctx.user_id, 'project_id': self.ctx.project_id, 'status': 'error_deleting', 'progress': '0%', 'volume_size': volume['size'], 'metadata': {} } snapshot = objects.Snapshot(context=self.ctx, **kwargs) snapshot.create() self.addCleanup(snapshot.destroy) resp = self._issue_snapshot_reset(self.ctx, snapshot, {'status': 'error'}) self.assertEqual(202, resp.status_int) snapshot = objects.Snapshot.get_by_id(self.ctx, snapshot['id']) self.assertEqual('error', snapshot.status) def test_invalid_status_for_snapshot(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = objects.Snapshot(self.ctx, status='available', volume_id=volume['id']) snapshot.create() self.addCleanup(snapshot.destroy) resp = self._issue_snapshot_reset(self.ctx, snapshot, {'status': 'attaching'}) self.assertEqual(400, resp.status_int) self.assertEqual('available', snapshot.status) def test_force_delete(self): # current status is creating volume = self._create_volume(self.ctx, {'size': 1, 'host': None}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) # volume is deleted self.assertRaises(exception.NotFound, objects.Volume.get_by_id, self.ctx, volume.id) @mock.patch.object(volume_api.API, 'delete_snapshot', return_value=True) @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch.object(db, 'snapshot_get') @mock.patch.object(db, 'volume_get') def test_force_delete_snapshot(self, volume_get, snapshot_get, get_by_id, delete_snapshot): volume = stubs.stub_volume(1) snapshot = stubs.stub_snapshot(1) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) volume_get.return_value = volume snapshot_get.return_value = snapshot get_by_id.return_value = snapshot_obj path = '/v2/fake/snapshots/%s/action' % snapshot['id'] req = webob.Request.blank(path) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(202, resp.status_int) def _migrate_volume_prep(self): # create volume's current host and the destination host db.service_create(self.ctx, {'host': 'test', 'topic': CONF.volume_topic, 'created_at': timeutils.utcnow()}) db.service_create(self.ctx, {'host': 'test2', 'topic': CONF.volume_topic, 'created_at': timeutils.utcnow()}) # current status is available volume = self._create_volume(self.ctx) return volume def _migrate_volume_exec(self, ctx, volume, host, expected_status, force_host_copy=False): # build request to migrate to host req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': force_host_copy}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) return volume def test_migrate_volume_success(self): expected_status = 202 host = 'test2' volume = self._migrate_volume_prep() volume = self._migrate_volume_exec(self.ctx, volume, host, expected_status) self.assertEqual('starting', volume['migration_status']) def test_migrate_volume_fail_replication(self): expected_status = 400 host = 'test2' volume = self._migrate_volume_prep() # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'attach_status': '', 'replication_status': 'active'}) volume = self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_as_non_admin(self): expected_status = 403 host = 'test2' ctx = context.RequestContext('fake', 'fake') volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_without_host_parameter(self): expected_status = 400 host = 'test3' volume = self._migrate_volume_prep() # build request to migrate without host req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': False}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) def test_migrate_volume_host_no_exist(self): expected_status = 400 host = 'test3' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_same_host(self): expected_status = 400 host = 'test' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_migrating(self): expected_status = 400 host = 'test2' volume = self._migrate_volume_prep() model_update = {'migration_status': 'migrating'} volume = db.volume_update(self.ctx, volume['id'], model_update) self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_with_snap(self): expected_status = 400 host = 'test2' volume = self._migrate_volume_prep() snap = objects.Snapshot(self.ctx, volume_id=volume['id']) snap.create() self.addCleanup(snap.destroy) self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_bad_force_host_copy(self): expected_status = 400 host = 'test2' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status, force_host_copy='foo') def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error, expected_status, expected_id, no_body=False): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'new_volume': new_volume['id'], 'error': error} if no_body: body = {'': body} else: body = {'os-migrate_volume_completion': body} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) resp_dict = resp.json # verify status self.assertEqual(expected_status, resp.status_int) if expected_id: self.assertEqual(expected_id, resp_dict['save_volume_id']) else: self.assertNotIn('save_volume_id', resp_dict) def test_migrate_volume_comp_as_non_admin(self): volume = db.volume_create(self.ctx, {'id': 'fake1'}) new_volume = db.volume_create(self.ctx, {'id': 'fake2'}) expected_status = 403 expected_id = None ctx = context.RequestContext('fake', 'fake') self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id) def test_migrate_volume_comp_no_mig_status(self): volume1 = self._create_volume(self.ctx, {'migration_status': 'foo'}) volume2 = self._create_volume(self.ctx, {'migration_status': None}) expected_status = 400 expected_id = None self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, expected_status, expected_id) self._migrate_volume_comp_exec(self.ctx, volume2, volume1, False, expected_status, expected_id) def test_migrate_volume_comp_bad_mig_status(self): volume1 = self._create_volume(self.ctx, {'migration_status': 'migrating'}) volume2 = self._create_volume(self.ctx, {'migration_status': 'target:foo'}) expected_status = 400 expected_id = None self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, expected_status, expected_id) def test_migrate_volume_comp_no_action(self): volume = db.volume_create(self.ctx, {'id': 'fake1'}) new_volume = db.volume_create(self.ctx, {'id': 'fake2'}) expected_status = 400 expected_id = None ctx = context.RequestContext('fake', 'fake') self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True) def test_migrate_volume_comp_from_nova(self): volume = self._create_volume(self.ctx, {'status': 'in-use', 'migration_status': None, 'attach_status': 'attached'}) new_volume = self._create_volume(self.ctx, {'migration_status': None, 'attach_status': 'detached'}) expected_status = 200 expected_id = new_volume.id self._migrate_volume_comp_exec(self.ctx, volume, new_volume, False, expected_status, expected_id) def test_backup_reset_valid_updates(self): vac = admin_actions.BackupAdminController() vac.validate_update({'status': 'available'}) vac.validate_update({'status': 'error'}) self.assertRaises(exc.HTTPBadRequest, vac.validate_update, {'status': 'restoring'}) self.assertRaises(exc.HTTPBadRequest, vac.validate_update, {'status': 'creating'}) @mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.backup.api.API._check_support_to_force_delete') def _force_delete_backup_util(self, test_status, mock_check_support, _mock_service_get_all_by_topic): _mock_service_get_all_by_topic.return_value = [ {'availability_zone': "az1", 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow()}] # admin context mock_check_support.return_value = True # current status is dependent on argument: test_status. id = test_backups.BackupsAPITestCase._create_backup(status=test_status) req = webob.Request.blank('/v2/fake/backups/%s/action' % id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) req.environ['cinder.context'] = self.ctx res = req.get_response(app()) self.assertEqual(202, res.status_int) self.assertEqual('deleting', test_backups.BackupsAPITestCase. _get_backup_attrib(id, 'status')) db.backup_destroy(self.ctx, id) def test_delete_backup_force_when_creating(self): self._force_delete_backup_util('creating') def test_delete_backup_force_when_deleting(self): self._force_delete_backup_util('deleting') def test_delete_backup_force_when_restoring(self): self._force_delete_backup_util('restoring') def test_delete_backup_force_when_available(self): self._force_delete_backup_util('available') def test_delete_backup_force_when_error(self): self._force_delete_backup_util('error') def test_delete_backup_force_when_error_deleting(self): self._force_delete_backup_util('error_deleting') @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', return_value=False) def test_delete_backup_force_when_not_supported(self, mock_check_support): # admin context self.override_config('backup_driver', 'cinder.backup.drivers.ceph') id = test_backups.BackupsAPITestCase._create_backup() req = webob.Request.blank('/v2/fake/backups/%s/action' % id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) req.environ['cinder.context'] = self.ctx res = req.get_response(app()) self.assertEqual(405, res.status_int) class AdminActionsAttachDetachTest(BaseAdminTest): def setUp(self): super(AdminActionsAttachDetachTest, self).setUp() # start service to handle rpc messages for attach requests self.svc = self.start_service('volume', host='test') def tearDown(self): self.svc.stop() super(AdminActionsAttachDetachTest, self).tearDown() def test_force_detach_instance_attached_volume(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key'], 'readonly') self.assertEqual('False', admin_metadata[0]['value']) def test_force_detach_host_attached_volume(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.initialize_connection(self.ctx, volume, connector) mountpoint = '/dev/vbd' host_name = 'fake-host' attachment = self.volume_api.attach(self.ctx, volume, None, host_name, mountpoint, 'ro') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(host_name, attachment['attached_host']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('ro', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) def test_volume_force_detach_raises_remote_error(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = \ messaging.RemoteError(exc_type='VolumeAttachmentNotFound') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': 'fake'}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) self.assertEqual(400, resp.status_int) # test for KeyError when missing connector volume_remote_error = ( messaging.RemoteError(exc_type='KeyError')) with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': 'fake'}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app()) # test for VolumeBackendAPIException volume_remote_error = ( messaging.RemoteError(exc_type='VolumeBackendAPIException')) with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': 'fake', 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app()) def test_volume_force_detach_raises_db_error(self): # In case of DB error 500 error code is returned to user # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': 'fake', 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app()) def test_attach_in_used_volume_by_instance(self): """Test that attaching to an in-use volume fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.volume_api.attach(self.ctx, volume, fakes.get_fake_uuid(), None, '/dev/vbd0', 'rw') self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, fakes.get_fake_uuid(), None, '/dev/vdb1', 'ro') def test_attach_in_used_volume_by_host(self): """Test that attaching to an in-use volume fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) self.volume_api.initialize_connection(self.ctx, volume, connector) self.volume_api.attach(self.ctx, volume, None, 'fake_host1', '/dev/vbd0', 'rw') conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) conn_info['data']['access_mode'] = 'rw' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, None, 'fake_host2', '/dev/vbd1', 'ro') def test_invalid_iscsi_connector(self): """Test connector without the initiator (required by iscsi driver).""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {} self.assertRaises(exception.InvalidInput, self.volume_api.initialize_connection, self.ctx, volume, connector) def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) self.volume_api.reserve_volume(self.ctx, volume) values = {'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status']) def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) values = {'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid()} db.volume_update(self.ctx, volume['id'], values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, values['instance_uuid'], None, mountpoint, 'ro') cinder-8.0.0/cinder/tests/unit/api/contrib/test_cgsnapshots.py0000664000567000056710000005061312701406250025671 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for cgsnapshot code. """ from xml.dom import minidom import mock from oslo_serialization import jsonutils import webob from cinder.consistencygroup import api as consistencygroupAPI from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import utils import cinder.volume class CgsnapshotsAPITestCase(test.TestCase): """Test Case for cgsnapshots API.""" def setUp(self): super(CgsnapshotsAPITestCase, self).setUp() self.volume_api = cinder.volume.API() self.context = context.get_admin_context() self.context.project_id = 'fake' self.context.user_id = 'fake' def test_show_cgsnapshot(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('this is a test cgsnapshot', res_dict['cgsnapshot']['description']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshot']['name']) self.assertEqual('creating', res_dict['cgsnapshot']['status']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_show_cgsnapshot_xml_content_type(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) cgsnapshots = dom.getElementsByTagName('cgsnapshot') name = cgsnapshots.item(0).getAttribute('name') self.assertEqual("test_cgsnapshot", name.strip()) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_show_cgsnapshot_with_cgsnapshot_NotFound(self): req = webob.Request.blank('/v2/fake/cgsnapshots/9999') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('CgSnapshot 9999 could not be found.', res_dict['itemNotFound']['message']) def test_list_cgsnapshots_json(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot2 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot3 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(cgsnapshot1.id, res_dict['cgsnapshots'][0]['id']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][0]['name']) self.assertEqual(cgsnapshot2.id, res_dict['cgsnapshots'][1]['id']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][1]['name']) self.assertEqual(cgsnapshot3.id, res_dict['cgsnapshots'][2]['id']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][2]['name']) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_list_cgsnapshots_xml(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot2 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot3 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) cgsnapshot_list = dom.getElementsByTagName('cgsnapshot') self.assertEqual(cgsnapshot1.id, cgsnapshot_list.item(0).getAttribute('id')) self.assertEqual(cgsnapshot2.id, cgsnapshot_list.item(1).getAttribute('id')) self.assertEqual(cgsnapshot3.id, cgsnapshot_list.item(2).getAttribute('id')) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_list_cgsnapshots_detail_json(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot2 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot3 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots/detail') req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('this is a test cgsnapshot', res_dict['cgsnapshots'][0]['description']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][0]['name']) self.assertEqual(cgsnapshot1.id, res_dict['cgsnapshots'][0]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][0]['status']) self.assertEqual('this is a test cgsnapshot', res_dict['cgsnapshots'][1]['description']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][1]['name']) self.assertEqual(cgsnapshot2.id, res_dict['cgsnapshots'][1]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][1]['status']) self.assertEqual('this is a test cgsnapshot', res_dict['cgsnapshots'][2]['description']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshots'][2]['name']) self.assertEqual(cgsnapshot3.id, res_dict['cgsnapshots'][2]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][2]['status']) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_list_cgsnapshots_detail_xml(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot2 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) cgsnapshot3 = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots/detail') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) cgsnapshot_detail = dom.getElementsByTagName('cgsnapshot') self.assertEqual( 'this is a test cgsnapshot', cgsnapshot_detail.item(0).getAttribute('description')) self.assertEqual( 'test_cgsnapshot', cgsnapshot_detail.item(0).getAttribute('name')) self.assertEqual( cgsnapshot1.id, cgsnapshot_detail.item(0).getAttribute('id')) self.assertEqual( 'creating', cgsnapshot_detail.item(0).getAttribute('status')) self.assertEqual( 'this is a test cgsnapshot', cgsnapshot_detail.item(1).getAttribute('description')) self.assertEqual( 'test_cgsnapshot', cgsnapshot_detail.item(1).getAttribute('name')) self.assertEqual( cgsnapshot2.id, cgsnapshot_detail.item(1).getAttribute('id')) self.assertEqual( 'creating', cgsnapshot_detail.item(1).getAttribute('status')) self.assertEqual( 'this is a test cgsnapshot', cgsnapshot_detail.item(2).getAttribute('description')) self.assertEqual( 'test_cgsnapshot', cgsnapshot_detail.item(2).getAttribute('name')) self.assertEqual( cgsnapshot3.id, cgsnapshot_detail.item(2).getAttribute('id')) self.assertEqual( 'creating', cgsnapshot_detail.item(2).getAttribute('status')) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_cgsnapshot_json(self, mock_validate): consistencygroup = utils.create_consistencygroup(self.context) utils.create_volume( self.context, consistencygroup_id=consistencygroup.id) body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['cgsnapshot']) self.assertTrue(mock_validate.called) consistencygroup.destroy() cgsnapshot = objects.CGSnapshot.get_by_id( context.get_admin_context(), res_dict['cgsnapshot']['id']) cgsnapshot.destroy() def test_create_cgsnapshot_with_no_body(self): # omit body from the request req = webob.Request.blank('/v2/fake/cgsnapshots') req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'cgsnapshot' in " "request body.", res_dict['badRequest']['message']) @mock.patch.object(consistencygroupAPI.API, 'create_cgsnapshot', side_effect=exception.InvalidCgSnapshot( reason='invalid cgsnapshot')) def test_create_with_invalid_cgsnapshot(self, mock_create_cgsnapshot): consistencygroup = utils.create_consistencygroup(self.context) utils.create_volume( self.context, consistencygroup_id=consistencygroup.id) body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/fake/cgsnapshots') req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid CgSnapshot: invalid cgsnapshot', res_dict['badRequest']['message']) consistencygroup.destroy() @mock.patch.object(consistencygroupAPI.API, 'create_cgsnapshot', side_effect=exception.CgSnapshotNotFound( cgsnapshot_id='invalid_id')) def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot): consistencygroup = utils.create_consistencygroup(self.context) utils.create_volume( self.context, consistencygroup_id=consistencygroup.id) body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('CgSnapshot invalid_id could not be found.', res_dict['itemNotFound']['message']) consistencygroup.destroy() @mock.patch.object(objects.CGSnapshot, 'create') def test_create_cgsnapshot_from_empty_consistencygroup( self, mock_cgsnapshot_create): consistencygroup = utils.create_consistencygroup(self.context) body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid ConsistencyGroup: Consistency group is ' 'empty. No cgsnapshot will be created.', res_dict['badRequest']['message']) # If failed to create cgsnapshot, its DB object should not be created self.assertFalse(mock_cgsnapshot_create.called) consistencygroup.destroy() def test_delete_cgsnapshot_available(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup.id)['id'] cgsnapshot = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id, status='available') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id) self.assertEqual(202, res.status_int) self.assertEqual('deleting', cgsnapshot.status) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_delete_cgsnapshot_with_cgsnapshot_NotFound(self): req = webob.Request.blank('/v2/fake/cgsnapshots/9999') req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('CgSnapshot 9999 could not be found.', res_dict['itemNotFound']['message']) def test_delete_cgsnapshot_with_Invalidcgsnapshot(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup.id)['id'] cgsnapshot = utils.create_cgsnapshot( self.context, consistencygroup_id=consistencygroup.id, status='invalid') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = jsonutils.loads(res.body) self.assertEqual(400, res.status_int) self.assertEqual(400, res_dict['badRequest']['code']) self.assertEqual('Invalid cgsnapshot', res_dict['badRequest']['message']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() cinder-8.0.0/cinder/tests/unit/api/contrib/test_hosts.py0000664000567000056710000002055512701406250024477 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from iso8601 import iso8601 from lxml import etree from oslo_utils import timeutils import webob.exc from cinder.api.contrib import hosts as os_hosts from cinder import context from cinder import db from cinder import test created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) curr_time = datetime.datetime(2013, 7, 3, 0, 0, 1) SERVICE_LIST = [ {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder'}, {'created_at': created_time, 'updated_at': None, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder'}, ] LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time}, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time}, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time}, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time}, {'service-status': 'unavailable', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': None}, ] def stub_utcnow(with_timezone=False): tzinfo = iso8601.Utc() if with_timezone else None return datetime.datetime(2013, 7, 3, 0, 0, 2, tzinfo=tzinfo) def stub_service_get_all(context, filters=None): return SERVICE_LIST class FakeRequest(object): environ = {'cinder.context': context.get_admin_context()} GET = {} class FakeRequestWithcinderZone(object): environ = {'cinder.context': context.get_admin_context()} GET = {'zone': 'cinder'} class HostTestCase(test.TestCase): """Test Case for hosts.""" def setUp(self): super(HostTestCase, self).setUp() self.controller = os_hosts.HostController() self.req = FakeRequest() self.stubs.Set(db, 'service_get_all', stub_service_get_all) self.stubs.Set(timeutils, 'utcnow', stub_utcnow) def _test_host_update(self, host, key, val, expected_value): body = {key: val} result = self.controller.update(self.req, host, body=body) self.assertEqual(expected_value, result[key]) def test_list_hosts(self): """Verify that the volume hosts are returned.""" hosts = os_hosts._list_hosts(self.req) self.assertEqual(LIST_RESPONSE, hosts) cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume') expected = [host for host in LIST_RESPONSE if host['service'] == 'cinder-volume'] self.assertEqual(expected, cinder_hosts) def test_list_hosts_with_zone(self): req = FakeRequestWithcinderZone() hosts = os_hosts._list_hosts(req) self.assertEqual(LIST_RESPONSE, hosts) def test_bad_status_value(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body={'status': 'bad'}) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body={'status': 'disablabc'}) def test_bad_update_key(self): bad_body = {'crazy': 'bad'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body=bad_body) def test_bad_update_key_and_correct_udpate_key(self): bad_body = {'status': 'disable', 'crazy': 'bad'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body=bad_body) def test_good_udpate_keys(self): body = {'status': 'disable'} self.assertRaises(NotImplementedError, self.controller.update, self.req, 'test.host.1', body=body) def test_bad_host(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, self.req, 'bogus_host_name', body={'disabled': 0}) def test_show_forbidden(self): self.req.environ['cinder.context'].is_admin = False dest = 'dummydest' self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, self.req, dest) self.req.environ['cinder.context'].is_admin = True def test_show_host_not_exist(self): """A host given as an argument does not exists.""" self.req.environ['cinder.context'].is_admin = True dest = 'dummydest' self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, self.req, dest) class HostSerializerTest(test.TestCase): def setUp(self): super(HostSerializerTest, self).setUp() self.deserializer = os_hosts.HostDeserializer() def test_index_serializer(self): serializer = os_hosts.HostIndexTemplate() text = serializer.serialize({"hosts": LIST_RESPONSE}) tree = etree.fromstring(text) self.assertEqual('hosts', tree.tag) self.assertEqual(len(LIST_RESPONSE), len(tree)) for i in range(len(LIST_RESPONSE)): self.assertEqual('host', tree[i].tag) self.assertEqual(LIST_RESPONSE[i]['service-status'], tree[i].get('service-status')) self.assertEqual(LIST_RESPONSE[i]['service'], tree[i].get('service')) self.assertEqual(LIST_RESPONSE[i]['zone'], tree[i].get('zone')) self.assertEqual(LIST_RESPONSE[i]['service-state'], tree[i].get('service-state')) self.assertEqual(LIST_RESPONSE[i]['host_name'], tree[i].get('host_name')) self.assertEqual(str(LIST_RESPONSE[i]['last-update']), tree[i].get('last-update')) def test_update_serializer_with_status(self): exemplar = dict(host='test.host.1', status='enabled') serializer = os_hosts.HostUpdateTemplate() text = serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('host', tree.tag) for key, value in exemplar.items(): self.assertEqual(value, tree.get(key)) def test_update_deserializer(self): exemplar = dict(status='enabled', foo='bar') intext = ("\n" 'enabledbar') result = self.deserializer.deserialize(intext) self.assertEqual(dict(body=exemplar), result) cinder-8.0.0/cinder/tests/unit/api/contrib/test_volume_host_attribute.py0000664000567000056710000001231312701406250027757 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder import context from cinder import db from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_volume from cinder import volume def fake_db_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', 'migration_status': None, '_name_id': 'fake2', 'attach_status': 'detached', } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext('admin', 'fake', True) db_volume = fake_db_volume_get() return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_api_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper class VolumeHostAttributeTest(test.TestCase): def setUp(self): super(VolumeHostAttributeTest, self).setUp() self.stubs.Set(volume.api.API, 'get', fake_volume_api_get) self.stubs.Set(volume.api.API, 'get_all', fake_volume_get_all) self.stubs.Set(db, 'volume_get', fake_db_volume_get) self.UUID = uuid.uuid4() def test_get_volume_allowed(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual('host001', vol['os-vol-host-attr:host']) def test_get_volume_unallowed(self): ctx = context.RequestContext('non-admin', 'fake', False) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertNotIn('os-vol-host-attr:host', vol) def test_list_detail_volumes_allowed(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual('host001', vol[0]['os-vol-host-attr:host']) def test_list_detail_volumes_unallowed(self): ctx = context.RequestContext('non-admin', 'fake', False) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-host-attr:host', vol[0]) def test_list_simple_volumes_no_host(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes') req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-host-attr:host', vol[0]) def test_get_volume_xml(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) req.method = 'GET' req.accept = 'application/xml' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = etree.XML(res.body) host_key = ('{http://docs.openstack.org/volume/ext/' 'volume_host_attribute/api/v2}host') self.assertEqual('host001', vol.get(host_key)) def test_list_volumes_detail_xml(self): ctx = context.RequestContext('admin', 'fake', True) req = webob.Request.blank('/v2/fake/volumes/detail') req.method = 'GET' req.accept = 'application/xml' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = list(etree.XML(res.body))[0] host_key = ('{http://docs.openstack.org/volume/ext/' 'volume_host_attribute/api/v2}host') self.assertEqual('host001', vol.get(host_key)) cinder-8.0.0/cinder/tests/unit/api/test_xmlutil.py0000664000567000056710000006510212701406250023372 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from cinder.api import xmlutil from cinder import test class SelectorTest(test.TestCase): obj_for_test = {'test': {'name': 'test', 'values': [1, 2, 3], 'attrs': {'foo': 1, 'bar': 2, 'baz': 3, }, }, } def test_empty_selector(self): sel = xmlutil.Selector() self.assertEqual(0, len(sel.chain)) self.assertEqual(self.obj_for_test, sel(self.obj_for_test)) def test_dict_selector(self): sel = xmlutil.Selector('test') self.assertEqual(1, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual(self.obj_for_test['test'], sel(self.obj_for_test)) def test_datum_selector(self): sel = xmlutil.Selector('test', 'name') self.assertEqual(2, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual('name', sel.chain[1]) self.assertEqual('test', sel(self.obj_for_test)) def test_list_selector(self): sel = xmlutil.Selector('test', 'values', 0) self.assertEqual(3, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual('values', sel.chain[1]) self.assertEqual(0, sel.chain[2]) self.assertEqual(1, sel(self.obj_for_test)) def test_items_selector(self): sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items) self.assertEqual(3, len(sel.chain)) self.assertEqual(xmlutil.get_items, sel.chain[2]) for key, val in sel(self.obj_for_test): self.assertEqual(self.obj_for_test['test']['attrs'][key], val) def test_missing_key_selector(self): sel = xmlutil.Selector('test2', 'attrs') self.assertIsNone(sel(self.obj_for_test)) self.assertRaises(KeyError, sel, self.obj_for_test, True) def test_constant_selector(self): sel = xmlutil.ConstantSelector('Foobar') self.assertEqual('Foobar', sel.value) self.assertEqual('Foobar', sel(self.obj_for_test)) class TemplateElementTest(test.TestCase): def test_element_initial_attributes(self): # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3), c=4, d=5, e=6) # Verify all the attributes are as expected expected = dict(a=1, b=2, c=4, d=5, e=6) for k, v in expected.items(): self.assertEqual(v, elem.attrib[k].chain[0]) def test_element_get_attributes(self): expected = dict(a=1, b=2, c=3) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Verify that get() retrieves the attributes for k, v in expected.items(): self.assertEqual(v, elem.get(k).chain[0]) def test_element_set_attributes(self): attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar')) # Create a bare template element with no attributes elem = xmlutil.TemplateElement('test') # Set the attribute values for k, v in attrs.items(): elem.set(k, v) # Now verify what got set self.assertEqual(1, len(elem.attrib['a'].chain)) self.assertEqual('a', elem.attrib['a'].chain[0]) self.assertEqual(1, len(elem.attrib['b'].chain)) self.assertEqual('foo', elem.attrib['b'].chain[0]) self.assertEqual(attrs['c'], elem.attrib['c']) def test_element_attribute_keys(self): attrs = dict(a=1, b=2, c=3, d=4) expected = set(attrs.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=attrs) # Now verify keys self.assertEqual(expected, set(elem.keys())) def test_element_attribute_items(self): expected = dict(a=xmlutil.Selector(1), b=xmlutil.Selector(2), c=xmlutil.Selector(3)) keys = set(expected.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Now verify items for k, v in elem.items(): self.assertEqual(expected[k], v) keys.remove(k) # Did we visit all keys? self.assertEqual(0, len(keys)) def test_element_selector_none(self): # Create a template element with no selector elem = xmlutil.TemplateElement('test') self.assertEqual(0, len(elem.selector.chain)) def test_element_selector_string(self): # Create a template element with a string selector elem = xmlutil.TemplateElement('test', selector='test') self.assertEqual(1, len(elem.selector.chain)) self.assertEqual('test', elem.selector.chain[0]) def test_element_selector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit selector elem = xmlutil.TemplateElement('test', selector=sel) self.assertEqual(sel, elem.selector) def test_element_subselector_none(self): # Create a template element with no subselector elem = xmlutil.TemplateElement('test') self.assertIsNone(elem.subselector) def test_element_subselector_string(self): # Create a template element with a string subselector elem = xmlutil.TemplateElement('test', subselector='test') self.assertEqual(1, len(elem.subselector.chain)) self.assertEqual('test', elem.subselector.chain[0]) def test_element_subselector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit subselector elem = xmlutil.TemplateElement('test', subselector=sel) self.assertEqual(sel, elem.subselector) def test_element_append_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a child element child = xmlutil.TemplateElement('child') # Append the child to the parent elem.append(child) # Verify that the child was added self.assertEqual(1, len(elem)) self.assertEqual(child, elem[0]) self.assertIn('child', elem) self.assertEqual(child, elem['child']) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child') self.assertRaises(KeyError, elem.append, child2) def test_element_extend_children(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Verify that the children were added self.assertEqual(3, len(elem)) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertIn(children[idx].tag, elem) self.assertEqual(children[idx], elem[children[idx].tag]) # Ensure that multiple children of the same name are rejected children2 = [xmlutil.TemplateElement('child4'), xmlutil.TemplateElement('child1'), ] self.assertRaises(KeyError, elem.extend, children2) # Also ensure that child4 was not added self.assertEqual(3, len(elem)) self.assertEqual('child3', elem[-1].tag) def test_element_insert_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a child to insert child = xmlutil.TemplateElement('child4') # Insert it elem.insert(1, child) # Ensure the child was inserted in the right place self.assertEqual(4, len(elem)) children.insert(1, child) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertIn(children[idx].tag, elem) self.assertEqual(children[idx], elem[children[idx].tag]) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child2') self.assertRaises(KeyError, elem.insert, 2, child2) def test_element_remove_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a test child to remove child = xmlutil.TemplateElement('child2') # Try to remove it self.assertRaises(ValueError, elem.remove, child) # Ensure that no child was removed self.assertEqual(3, len(elem)) # Now remove a legitimate child elem.remove(children[1]) # Ensure that the child was removed self.assertEqual(2, len(elem)) self.assertEqual(children[0], elem[0]) self.assertEqual(children[2], elem[1]) self.assertNotIn('child2', elem) # Ensure the child cannot be retrieved by name def get_key(elem, key): return elem[key] self.assertRaises(KeyError, get_key, elem, 'child2') def test_element_text(self): # Create an element elem = xmlutil.TemplateElement('test') # Ensure that it has no text self.assertIsNone(elem.text) # Try setting it to a string and ensure it becomes a selector elem.text = 'test' self.assertTrue(hasattr(elem.text, 'chain')) self.assertEqual(1, len(elem.text.chain)) self.assertEqual('test', elem.text.chain[0]) # Try resetting the text to None elem.text = None self.assertIsNone(elem.text) # Now make up a selector and try setting the text to that sel = xmlutil.Selector() elem.text = sel self.assertEqual(sel, elem.text) # Finally, try deleting the text and see what happens del elem.text self.assertIsNone(elem.text) def test_apply_attrs(self): # Create a template element attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2)) tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the correct attributes were set for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) def test_apply_text(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.ConstantSelector(1) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the text was set self.assertEqual(str(tmpl_elem.text.value), elem.text) def test__render(self): attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) # Create a master template element master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) # Create a couple of slave template element slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render elem = master_elem._render(None, None, slave_elems, None) # Verify the particulars of the render self.assertEqual('test', elem.tag) self.assertEqual(0, len(elem.nsmap)) for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) # Create a parent for the element to be rendered parent = etree.Element('parent') # Try the render again... elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(1, len(parent)) self.assertEqual(parent[0], elem) self.assertEqual(1, len(elem.nsmap)) self.assertEqual('foo', elem.nsmap['a']) def test_render(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.Selector() # Create the object we're going to render obj = ['elem1', 'elem2', 'elem3', 'elem4'] # Try a render with no object elems = tmpl_elem.render(None, None) self.assertEqual(0, len(elems)) # Try a render with one object elems = tmpl_elem.render(None, 'foo') self.assertEqual(1, len(elems)) self.assertEqual('foo', elems[0][0].text) self.assertEqual('foo', elems[0][1]) # Now, try rendering an object with multiple entries parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) self.assertEqual(4, len(elems)) # Check the results for idx in range(len(obj)): self.assertEqual(obj[idx], elems[idx][0].text) self.assertEqual(obj[idx], elems[idx][1]) def test_subelement(self): # Try the SubTemplateElement constructor parent = xmlutil.SubTemplateElement(None, 'parent') self.assertEqual('parent', parent.tag) self.assertEqual(0, len(parent)) # Now try it with a parent element child = xmlutil.SubTemplateElement(parent, 'child') self.assertEqual('child', child.tag) self.assertEqual(1, len(parent)) self.assertEqual(parent[0], child) def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') self.assertEqual(elem, elem.unwrap()) self.assertEqual(elem, elem.wrap().root) def test_dyntag(self): obj = ['a', 'b', 'c'] # Create a template element with a dynamic tag tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector()) # Try the render parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) # Verify the particulars of the render self.assertEqual(len(obj), len(elems)) for idx in range(len(obj)): self.assertEqual(obj[idx], elems[idx][0].tag) class TemplateTest(test.TestCase): def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) self.assertEqual(elem, tmpl.unwrap()) self.assertEqual(tmpl, tmpl.wrap()) def test__siblings(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) # Check that we get the right siblings siblings = tmpl._siblings() self.assertEqual(1, len(siblings)) self.assertEqual(elem, siblings[0]) def test__splitTagName(self): test_cases = [ ('a', ['a']), ('a:b', ['a', 'b']), ('{http://test.com}a:b', ['{http://test.com}a', 'b']), ('a:b{http://test.com}:c', ['a', 'b{http://test.com}', 'c']), ] for test_case, expected in test_cases: result = xmlutil.TemplateElement._splitTagName(test_case) self.assertEqual(expected, result) def test__nsmap(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem, nsmap=dict(a="foo")) # Check out that we get the right namespace dictionary nsmap = tmpl._nsmap() self.assertNotEqual(id(nsmap), id(tmpl.nsmap)) self.assertEqual(1, len(nsmap)) self.assertEqual('foo', nsmap['a']) def test_master_attach(self): # Set up a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1) # Make sure it has a root but no slaves self.assertEqual(elem, tmpl.root) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an invalid slave bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an invalid and a valid slave good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): def apply(self, master): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) self.assertEqual(0, len(tmpl.slaves)) # Now try attaching an applicable template tmpl.attach(good_elem) self.assertEqual(1, len(tmpl.slaves)) self.assertEqual(good_elem, tmpl.slaves[0].root) def test_master_copy(self): # Construct a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) # Give it a slave slave = xmlutil.TemplateElement('test') tmpl.attach(slave) # Construct a copy copy = tmpl.copy() # Check to see if we actually managed a copy self.assertNotEqual(tmpl, copy) self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) self.assertEqual(len(tmpl.slaves), len(copy.slaves)) self.assertEqual(tmpl.slaves[0], copy.slaves[0]) def test_slave_apply(self): # Construct a master template elem = xmlutil.TemplateElement('test') master = xmlutil.MasterTemplate(elem, 3) # Construct a slave template with applicable minimum version slave = xmlutil.SlaveTemplate(elem, 2) self.assertTrue(slave.apply(master)) # Construct a slave template with equal minimum version slave = xmlutil.SlaveTemplate(elem, 3) self.assertTrue(slave.apply(master)) # Construct a slave template with inapplicable minimum version slave = xmlutil.SlaveTemplate(elem, 4) self.assertFalse(slave.apply(master)) # Construct a slave template with applicable version range slave = xmlutil.SlaveTemplate(elem, 2, 4) self.assertTrue(slave.apply(master)) # Construct a slave template with low version range slave = xmlutil.SlaveTemplate(elem, 1, 2) self.assertFalse(slave.apply(master)) # Construct a slave template with high version range slave = xmlutil.SlaveTemplate(elem, 4, 5) self.assertFalse(slave.apply(master)) # Construct a slave template with matching version range slave = xmlutil.SlaveTemplate(elem, 3, 3) self.assertTrue(slave.apply(master)) def test__serialize(self): # Our test object to serialize obj = {'test': {'name': 'foobar', 'values': [1, 2, 3, 4], 'attrs': {'a': 1, 'b': 2, 'c': 3, 'd': 4, }, 'image': {'name': 'image_foobar', 'id': 42, }, }, } # Set up our master template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') value.text = xmlutil.Selector() attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) # Set up our slave template root_slave = xmlutil.TemplateElement('test', selector='test') image = xmlutil.SubTemplateElement(root_slave, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) # Attach the slave to the master... master.attach(slave) # Try serializing our object siblings = master._siblings() nsmap = master._nsmap() result = master._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual('test', result.tag) self.assertEqual(2, len(result.nsmap)) self.assertEqual('foo', result.nsmap['f']) self.assertEqual('bar', result.nsmap['b']) self.assertEqual(result.get('name'), obj['test']['name']) for idx, val in enumerate(obj['test']['values']): self.assertEqual('value', result[idx].tag) self.assertEqual(str(val), result[idx].text) idx += 1 self.assertEqual('attrs', result[idx].tag) for attr in result[idx]: self.assertEqual('attr', attr.tag) self.assertEqual(str(obj['test']['attrs'][attr.get('key')]), attr.get('value')) idx += 1 self.assertEqual('image', result[idx].tag) self.assertEqual(str(obj['test']['image']['id']), result[idx].get('id')) self.assertEqual(obj['test']['image']['name'], result[idx].text) def test_serialize_with_delimiter(self): # Our test object to serialize obj = {'test': {'scope0:key1': 'Value1', 'scope0:scope1:key2': 'Value2', 'scope0:scope1:scope2:key3': 'Value3' }} # Set up our master template root = xmlutil.TemplateElement('test', selector='test') key1 = xmlutil.SubTemplateElement(root, 'scope0:key1', selector='scope0:key1') key1.text = xmlutil.Selector() key2 = xmlutil.SubTemplateElement(root, 'scope0:scope1:key2', selector='scope0:scope1:key2') key2.text = xmlutil.Selector() key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3', selector='scope0:scope1:scope2:key3') key3.text = xmlutil.Selector() serializer = xmlutil.MasterTemplate(root, 1) expected_xml = (b"" b"Value1" b"Value2Value3" b"") result = serializer.serialize(obj) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_xml, result) class MasterTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.MasterTemplate(elem, 1) class SlaveTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.SlaveTemplate(elem, 1) class TemplateBuilderTest(test.TestCase): def test_master_template_builder(self): # Make sure the template hasn't been built yet self.assertIsNone(MasterTemplateBuilder._tmpl) # Now, construct the template tmpl1 = MasterTemplateBuilder() # Make sure that there is a template cached... self.assertIsNotNone(MasterTemplateBuilder._tmpl) # Make sure it wasn't what was returned... self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt cached = MasterTemplateBuilder._tmpl tmpl2 = MasterTemplateBuilder() self.assertEqual(MasterTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior tmpl3 = MasterTemplateBuilder(False) self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) def test_slave_template_builder(self): # Make sure the template hasn't been built yet self.assertIsNone(SlaveTemplateBuilder._tmpl) # Now, construct the template tmpl1 = SlaveTemplateBuilder() # Make sure there is a template cached... self.assertIsNotNone(SlaveTemplateBuilder._tmpl) # Make sure it was what was returned... self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt tmpl2 = SlaveTemplateBuilder() self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) class MiscellaneousXMLUtilTests(test.TestCase): def test_make_flat_dict(self): expected_xml = (b"\n" b'foo') root = xmlutil.make_flat_dict('wrapper') tmpl = xmlutil.MasterTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo'))) self.assertEqual(expected_xml, result) cinder-8.0.0/cinder/tests/unit/api/views/0000775000567000056710000000000012701406543021421 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/views/__init__.py0000664000567000056710000000000012701406250023513 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/views/test_versions.py0000664000567000056710000001105212701406250024674 0ustar jenkinsjenkins00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import mock from cinder.api.views import versions from cinder import test class FakeRequest(object): def __init__(self, application_url): self.application_url = application_url URL_BASE = 'http://localhost/' FAKE_HREF = URL_BASE + 'v1/' FAKE_VERSIONS = { "v1.0": { "id": "v1.0", "status": "CURRENT", "version": "1.1", "min_version": "1.0", "updated": "2015-07-30T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1", }, { "base": "application/xml", "type": "application/vnd.openstack.share+xml;version=1", } ], }, } FAKE_LINKS = [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, { 'rel': 'self', 'href': FAKE_HREF }, ] @ddt.ddt class ViewBuilderTestCase(test.TestCase): def _get_builder(self): request = FakeRequest('fake') return versions.get_view_builder(request) def test_build_versions(self): self.mock_object(versions.ViewBuilder, '_build_links', mock.Mock(return_value=FAKE_LINKS)) result = self._get_builder().build_versions(FAKE_VERSIONS) expected = {'versions': list(FAKE_VERSIONS.values())} expected['versions'][0]['links'] = FAKE_LINKS self.assertEqual(expected, result) def test_build_version(self): self.mock_object(versions.ViewBuilder, '_build_links', mock.Mock(return_value=FAKE_LINKS)) result = self._get_builder()._build_version(FAKE_VERSIONS['v1.0']) expected = copy.deepcopy(FAKE_VERSIONS['v1.0']) expected['links'] = FAKE_LINKS self.assertEqual(expected, result) def test_build_links(self): self.mock_object(versions.ViewBuilder, '_generate_href', mock.Mock(return_value=FAKE_HREF)) result = self._get_builder()._build_links(FAKE_VERSIONS['v1.0']) self.assertEqual(FAKE_LINKS, result) def test_generate_href_defaults(self): self.mock_object(versions.ViewBuilder, '_get_base_url_without_version', mock.Mock(return_value=URL_BASE)) result = self._get_builder()._generate_href() self.assertEqual('http://localhost/v3/', result) @ddt.data( ('v2', None, URL_BASE + 'v2/'), ('/v2/', None, URL_BASE + 'v2/'), ('/v2/', 'fake_path', URL_BASE + 'v2/fake_path'), ('/v2/', '/fake_path/', URL_BASE + 'v2/fake_path/'), ) @ddt.unpack def test_generate_href_no_path(self, version, path, expected): self.mock_object(versions.ViewBuilder, '_get_base_url_without_version', mock.Mock(return_value=URL_BASE)) result = self._get_builder()._generate_href(version=version, path=path) self.assertEqual(expected, result) @ddt.data( ('http://1.1.1.1/', 'http://1.1.1.1/'), ('http://localhost/', 'http://localhost/'), ('http://1.1.1.1/v1/', 'http://1.1.1.1/'), ('http://1.1.1.1/v1', 'http://1.1.1.1/'), ('http://1.1.1.1/v11', 'http://1.1.1.1/'), ) @ddt.unpack def test_get_base_url_without_version(self, base_url, base_url_no_version): request = FakeRequest(base_url) builder = versions.get_view_builder(request) result = builder._get_base_url_without_version() self.assertEqual(base_url_no_version, result) cinder-8.0.0/cinder/tests/unit/api/test_versions.py0000664000567000056710000002716112701406257023556 0ustar jenkinsjenkins00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_serialization import jsonutils from oslo_utils import encodeutils from cinder.api.openstack import api_version_request from cinder.api.openstack import wsgi from cinder.api.v1 import router from cinder.api import versions from cinder import exception from cinder import test from cinder.tests.unit.api import fakes VERSION_HEADER_NAME = 'OpenStack-API-Version' VOLUME_SERVICE = 'volume ' @ddt.ddt class VersionsControllerTestCase(test.TestCase): def setUp(self): super(VersionsControllerTestCase, self).setUp() self.wsgi_apps = (versions.Versions(), router.APIRouter()) def build_request(self, base_dir=None, base_url='http://localhost/v3', header_version=None): if base_dir: req = fakes.HTTPRequest.blank(base_dir, base_url=base_url) else: req = fakes.HTTPRequest.blank('/', base_url=base_url) req.method = 'GET' req.content_type = 'application/json' if header_version: req.headers = {VERSION_HEADER_NAME: VOLUME_SERVICE + header_version} return req def check_response(self, response, version): self.assertEqual(VOLUME_SERVICE + version, response.headers[VERSION_HEADER_NAME]) self.assertEqual(VERSION_HEADER_NAME, response.headers['Vary']) @ddt.data('1.0', '2.0', '3.0') def test_versions_root(self, version): req = self.build_request(base_url='http://localhost') response = req.get_response(versions.Versions()) self.assertEqual(300, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0', 'v2.0', 'v3.0'}, set(ids)) v1 = [v for v in version_list if v['id'] == 'v1.0'][0] self.assertEqual('', v1.get('min_version')) self.assertEqual('', v1.get('version')) v2 = [v for v in version_list if v['id'] == 'v2.0'][0] self.assertEqual('', v2.get('min_version')) self.assertEqual('', v2.get('version')) v3 = [v for v in version_list if v['id'] == 'v3.0'][0] self.assertEqual(api_version_request._MAX_API_VERSION, v3.get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, v3.get('min_version')) def test_versions_v1_no_header(self): req = self.build_request(base_url='http://localhost/v1') response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) def test_versions_v2_no_header(self): req = self.build_request(base_url='http://localhost/v2') response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) @ddt.data('1.0') def test_versions_v1(self, version): req = self.build_request(base_url='http://localhost/v1', header_version=version) if version is not None: req.headers = {VERSION_HEADER_NAME: VOLUME_SERVICE + version} response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0'}, set(ids)) self.assertEqual('', version_list[0].get('min_version')) self.assertEqual('', version_list[0].get('version')) @ddt.data('2.0') def test_versions_v2(self, version): req = self.build_request(base_url='http://localhost/v2', header_version=version) response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v2.0'}, set(ids)) self.assertEqual('', version_list[0].get('min_version')) self.assertEqual('', version_list[0].get('version')) @ddt.data('3.0', 'latest') def test_versions_v3_0_and_latest(self, version): req = self.build_request(header_version=version) response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v3.0'}, set(ids)) self.check_response(response, '3.0') self.assertEqual(api_version_request._MAX_API_VERSION, version_list[0].get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, version_list[0].get('min_version')) def test_versions_version_latest(self): req = self.build_request(header_version='latest') response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) self.check_response(response, api_version_request._MAX_API_VERSION) def test_versions_version_invalid(self): req = self.build_request(header_version='2.0.1') for app in self.wsgi_apps: response = req.get_response(app) self.assertEqual(400, response.status_int) def test_versions_version_not_found(self): api_version_request_4_0 = api_version_request.APIVersionRequest('4.0') self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=api_version_request_4_0)) class Controller(wsgi.Controller): @wsgi.Controller.api_version('3.0', '3.0') def index(self, req): return 'off' req = self.build_request(header_version='3.5') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(404, response.status_int) def test_versions_version_not_acceptable(self): req = self.build_request(header_version='4.0') response = req.get_response(router.APIRouter()) self.assertEqual(406, response.status_int) @ddt.data(['volume 3.0, compute 2.22', True], ['volume 3.0, compute 2.22, identity 2.3', True], ['compute 2.22, identity 2.3', False]) @ddt.unpack def test_versions_multiple_services_header( self, service_list, should_pass): req = self.build_request() req.headers = {VERSION_HEADER_NAME: service_list} try: response = req.get_response(router.APIRouter()) except exception.VersionNotFoundForAPIMethod: if should_pass: raise elif not should_pass: return self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v3.0'}, set(ids)) self.check_response(response, '3.0') self.assertEqual(api_version_request._MAX_API_VERSION, version_list[0].get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, version_list[0].get('min_version')) @ddt.data(['3.5', 200], ['3.55', 404]) @ddt.unpack def test_req_version_matches(self, version, HTTP_ret): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): @wsgi.Controller.api_version('3.0', '3.6') def index(self, req): return 'off' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') if HTTP_ret == 200: self.assertEqual('off', resp) elif HTTP_ret == 404: self.assertNotEqual('off', resp) self.assertEqual(HTTP_ret, response.status_int) @ddt.data(['3.5', 'older'], ['3.37', 'newer']) @ddt.unpack def test_req_version_matches_with_if(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches('3.1', '3.8'): return 'older' if req_version.matches('3.9', '8.8'): return 'newer' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(200, response.status_int) @ddt.data(['3.5', 'older'], ['3.37', 'newer']) @ddt.unpack def test_req_version_matches_with_None(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches(None, '3.8'): return 'older' if req_version.matches('3.9', None): return 'newer' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(200, response.status_int) def test_req_version_matches_with_None_None(self): version_request = api_version_request.APIVersionRequest('3.39') self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request # This case is artificial, and will return True if req_version.matches(None, None): return "Pass" req = self.build_request(base_dir='/tests', header_version='3.39') app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual("Pass", resp) self.assertEqual(200, response.status_int) cinder-8.0.0/cinder/tests/unit/api/middleware/0000775000567000056710000000000012701406543022401 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/middleware/__init__.py0000664000567000056710000000000012701406250024473 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/middleware/test_faults.py0000664000567000056710000002506712701406250025315 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from xml.dom import minidom import mock from oslo_i18n import fixture as i18n_fixture from oslo_serialization import jsonutils import six import webob.dec from cinder.api import common from cinder.api.openstack import wsgi from cinder.i18n import _ from cinder import test class TestCase(test.TestCase): def _prepare_xml(self, xml_string): """Remove characters from string which hinder XML equality testing.""" if six.PY3 and isinstance(xml_string, bytes): xml_string = xml_string.decode('utf-8') xml_string = xml_string.replace(" ", "") xml_string = xml_string.replace("\n", "") xml_string = xml_string.replace("\t", "") return xml_string class TestFaults(TestCase): """Tests covering `cinder.api.openstack.faults:Fault` class.""" def setUp(self): super(TestFaults, self).setUp() self.useFixture(i18n_fixture.ToggleLazy(True)) def test_400_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) expected = { "badRequest": { "message": "scram", "code": 400, }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_413_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: exc = webob.exc.HTTPRequestEntityTooLarge fault = wsgi.Fault(exc(explanation='sorry', headers={'Retry-After': '4'})) response = request.get_response(fault) expected = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": "4", }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_raise(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) req = webob.Request.blank('/.xml') resp = req.get_response(raiser) self.assertEqual("application/xml", resp.content_type) self.assertEqual(404, resp.status_int) self.assertIn(b'whut?', resp.body) def test_raise_403(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) req = webob.Request.blank('/.xml') resp = req.get_response(raiser) self.assertEqual("application/xml", resp.content_type) self.assertEqual(403, resp.status_int) self.assertNotIn('resizeNotAllowed', resp.body) self.assertIn(b'forbidden', resp.body) @mock.patch('cinder.api.openstack.wsgi.i18n.translate') def test_raise_http_with_localized_explanation(self, mock_translate): params = ('blah', ) expl = _("String with params: %s") % params def _mock_translation(msg, locale): return "Mensaje traducido" mock_translate.side_effect = _mock_translation @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl)) req = webob.Request.blank('/.xml') resp = req.get_response(raiser) self.assertEqual("application/xml", resp.content_type) self.assertEqual(404, resp.status_int) self.assertIn(b"Mensaje traducido", resp.body) self.stubs.UnsetAll() def test_fault_has_status_int(self): """Ensure the status_int is set correctly on faults.""" fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) self.assertEqual(400, fault.status_int) def test_xml_serializer(self): """Ensure that a v2 request responds with a v2 xmlns.""" request = webob.Request.blank('/v2', headers={"Accept": "application/xml"}) fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) self.assertIn(common.XML_NS_V2, response.body.decode()) self.assertEqual("application/xml", response.content_type) self.assertEqual(400, response.status_int) class FaultsXMLSerializationTestV11(TestCase): """Tests covering `cinder.api.openstack.faults:Fault` class.""" def test_400_fault(self): metadata = {'attributes': {"badRequest": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "badRequest": { "message": "scram", "code": 400, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" scram """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) def test_413_fault(self): metadata = {'attributes': {"overLimit": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": 4, }, } output = serializer.serialize(fixture) if six.PY3: output = output.decode('utf-8') actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry 4 """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) def test_404_fault(self): metadata = {'attributes': {"itemNotFound": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "itemNotFound": { "message": "sorry", "code": 404, }, } output = serializer.serialize(fixture) if six.PY3: output = output.decode('utf-8') actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) class FaultsXMLSerializationTestV2(TestCase): """Tests covering `cinder.api.openstack.faults:Fault` class.""" def test_400_fault(self): metadata = {'attributes': {"badRequest": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V2) fixture = { "badRequest": { "message": "scram", "code": 400, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" scram """) % common.XML_NS_V2) self.assertEqual(expected.toxml(), actual.toxml()) def test_413_fault(self): metadata = {'attributes': {"overLimit": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V2) fixture = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": 4, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry 4 """) % common.XML_NS_V2) self.assertEqual(expected.toxml(), actual.toxml()) def test_404_fault(self): metadata = {'attributes': {"itemNotFound": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V2) fixture = { "itemNotFound": { "message": "sorry", "code": 404, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry """) % common.XML_NS_V2) self.assertEqual(expected.toxml(), actual.toxml()) cinder-8.0.0/cinder/tests/unit/api/middleware/test_auth.py0000664000567000056710000000610312701406250024746 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import request_id import webob import cinder.api.middleware.auth from cinder import test class TestCinderKeystoneContextMiddleware(test.TestCase): def setUp(self): super(TestCinderKeystoneContextMiddleware, self).setUp() @webob.dec.wsgify() def fake_app(req): self.context = req.environ['cinder.context'] return webob.Response() self.context = None self.middleware = (cinder.api.middleware.auth .CinderKeystoneContext(fake_app)) self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_or_user_id(self): response = self.request.get_response(self.middleware) self.assertEqual('401 Unauthorized', response.status) def test_user_only(self): self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuser', self.context.user_id) def test_user_id_only(self): self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) def test_user_id_trumps_user(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) def test_tenant_id_name(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_TENANT_NAME'] = 'testtenantname' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testtenantid', self.context.project_id) self.assertEqual('testtenantname', self.context.project_name) def test_request_id_extracted_from_env(self): req_id = 'dummy-request-id' self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.environ[request_id.ENV_REQUEST_ID] = req_id self.request.get_response(self.middleware) self.assertEqual(req_id, self.context.request_id) cinder-8.0.0/cinder/tests/unit/api/fakes.py0000664000567000056710000001210412701406257021727 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_service import wsgi from oslo_utils import timeutils import routes import webob import webob.dec import webob.request from cinder.api.middleware import auth from cinder.api.middleware import fault from cinder.api.openstack import api_version_request as api_version from cinder.api.openstack import wsgi as os_wsgi from cinder.api import urlmap from cinder.api.v2 import limits from cinder.api.v2 import router from cinder.api import versions from cinder import context FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, use_no_auth=False, ext_mgr=None): if not inner_app_v2: inner_app_v2 = router.APIRouter(ext_mgr) if fake_auth: if fake_auth_context is not None: ctxt = fake_auth_context else: ctxt = context.RequestContext('fake', 'fake', auth_token=True) api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, inner_app_v2)) elif use_no_auth: api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) else: api_v2 = fault.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) mapper = urlmap.URLMap() mapper['/v2'] = api_v2 mapper['/'] = fault.FaultWrapper(versions.VersionsController()) return mapper class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count for k, v in kwargs.items(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(webob.Request): @classmethod def blank(cls, *args, **kwargs): if args is not None: if 'v1' in args[0]: kwargs['base_url'] = 'http://localhost/v1' if 'v2' in args[0]: kwargs['base_url'] = 'http://localhost/v2' if 'v3' in args[0]: kwargs['base_url'] = 'http://localhost/v3' use_admin_context = kwargs.pop('use_admin_context', False) version = kwargs.pop('version', api_version._MIN_API_VERSION) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['cinder.context'] = FakeRequestContext( 'fake_user', 'fakeproject', is_admin=use_admin_context) out.api_version_request = api_version.APIVersionRequest(version) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def get_fake_uuid(token=0): if token not in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] cinder-8.0.0/cinder/tests/unit/api/v2/0000775000567000056710000000000012701406543020613 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/v2/test_limits.py0000664000567000056710000010057012701406250023523 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ from xml.dom import minidom from lxml import etree from oslo_serialization import jsonutils import six from six.moves import http_client from six.moves import range import webob from cinder.api.v2 import limits from cinder.api import views from cinder.api import xmlutil import cinder.context from cinder import test TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), ] NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0', } class BaseLimitTestSuite(test.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.stubs.Set(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def stub_get_project_quotas(context, project_id, usages=True): return {k: dict(limit=v) for k, v in self.absolute_limits.items()} self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas", stub_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.create_resource() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" request = webob.Request.blank("/") request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", "controller": "", }) context = cinder.context.RequestContext('testuser', 'testproject') request.environ["cinder.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() response = request.get_response(self.controller) expected = { "limits": { "rate": [], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def test_index_json(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) self.absolute_limits = { 'gigabytes': 512, 'volumes': 5, } response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": {"maxTotalVolumeGigabytes": 512, "maxTotalVolumes": 5, }, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_index_diff_regex(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = request.get_response(self.controller) body = jsonutils.loads(response.body) self.assertEqual(expected, body['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = {'unknown_limit': 9001} self._test_index_absolute_limits_json({}) class TestLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): """Test that middleware selected correct limiter class.""" assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): """Test a rate-limited (413) GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimitFault"]["details"].strip() self.assertEqual(expected, value) def test_limited_request_xml(self): """Test a rate-limited (413) response as XML.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") request.accept = "application/xml" response = request.get_response(self.app) self.assertEqual(413, response.status_int) root = minidom.parseString(response.body).childNodes[0] expected = "Only 1 GET request(s) can be made to * every minute." details = root.getElementsByTagName("details") self.assertEqual(1, details.length) value = details.item(0).firstChild.data.strip() self.assertEqual(expected, value) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """Tests for the default limits parser in the `limits.Limiter` class.""" def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): """Test that parse_limits() handles bad rules correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): """Test that parse_limits() handles missing args correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): """Test that parse_limits() handles bad values correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): """Test that parse_limits() handles bad units correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): """Test that parse_limits() handles multiple rules correctly.""" try: l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, six.text_type(e) # Make sure the number of returned limits are correct self.assertEqual(4, len(l)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in l]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in l]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in l]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in l]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in l]) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'limits.user3': '', 'limits.user0': '(get, *, .*, 4, minute);' '(put, *, .*, 2, minute)'} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """Ensure no delay on a single call for a limit verb we didn't set.""" delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): """Ensure no delay on a single call for a known limit.""" delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """Test delay on 11th PUT request. Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """Test delay on 8th POST request. Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) expected = [None] * 4 + [15.0] results = list(self._check(5, "GET", "/foo", "user0")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): """Test delay on /volumes. Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still OK after 5 requests...but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/volumes")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Test limit is lifted again. Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) def test_user_limit(self): """Test user-specific limits.""" self.assertEqual([], self.limiter.levels['user3']) self.assertEqual(2, len(self.limiter.levels['user0'])) def test_multiple_users(self): """Tests involving multiple users.""" # User0 expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User0 again expected = [28.0] results = list(self._check(1, "PUT", "/anything", "user0")) self.assertEqual(expected, results) self.time += 28.0 expected = [None, 30.0] results = list(self._check(2, "PUT", "/anything", "user0")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dump_as_bytes({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """POST request to given url by given username. Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): """Only POSTs should work.""" for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual('60.00', delay) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual('60.00', delay) delay = self._request("GET", "/delayed", "user2") self.assertEqual('60.00', delay) class FakeHttplibSocket(object): """Fake `http_client.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" if isinstance(response_string, six.text_type): response_string = response_string.encode('utf-8') self._buffer = six.BytesIO(response_string) def makefile(self, mode, *args): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `http_client.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Fake request handler. Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection. Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Decorator to mock the HTTPConecction class. Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection) http_client.HTTPConnection = new_http_connection return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """setUp() for WsgiLimiterProxyTest. Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") self.addCleanup(self._restore, oldHTTPConnection) def _restore(self, oldHTTPConnection): # restore original HTTPConnection object http_client.HTTPConnection = oldHTTPConnection def test_200(self): """Successful request test.""" delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): """Forbidden request test.""" delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", b"403 Forbidden\n\nOnly 1 GET request(s) can be " b"made to /delayed every minute.") self.assertEqual(expected, (delay, error)) class LimitsViewBuilderTest(test.TestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/volumes", "regex": "^/volumes", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = {"metadata_items": 1, "injected_files": 5, "injected_file_content_bytes": 5} def test_build_limits(self): tdate = "2011-07-21T18:17:06" expected_limits = { "limits": {"rate": [{"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/volumes", "regex": "^/volumes", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictMatch(expected_limits, output) def test_build_limits_empty_limits(self): expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(rate_limits, abs_limits) self.assertDictMatch(expected_limits, output) class LimitsXMLSerializationTest(test.TestCase): def test_xml_declaration(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) has_dec = output.startswith(b"") self.assertTrue(has_dec) def test_index(self): tdate = "2011-12-15T22:42:45Z" serializer = limits.LimitsTemplate() fixture = {"limits": {"rate": [{"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(4, len(absolutes)) for limit in absolutes: name = limit.get('name') value = limit.get('value') self.assertEqual(str(fixture['limits']['absolute'][name]), value) # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(2, len(rates)) for i, rate in enumerate(rates): for key in ['uri', 'regex']: self.assertEqual(str(fixture['limits']['rate'][i][key]), rate.get(key)) rate_limits = rate.xpath('ns:limit', namespaces=NS) self.assertEqual(1, len(rate_limits)) for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual( str(fixture['limits']['rate'][i]['limit'][j][key]), limit.get(key)) def test_index_no_limits(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(0, len(absolutes)) # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(0, len(rates)) cinder-8.0.0/cinder/tests/unit/api/v2/__init__.py0000664000567000056710000000000012701406250022705 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/v2/stubs.py0000664000567000056710000002151212701406257022330 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from cinder import exception as exc from cinder import objects from cinder.tests.unit import fake_volume FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' DEFAULT_VOL_NAME = "displayname" DEFAULT_VOL_DESCRIPTION = "displaydesc" DEFAULT_VOL_SIZE = 1 DEFAULT_VOL_TYPE = "vol_type_name" DEFAULT_VOL_STATUS = "fakestatus" DEFAULT_VOL_ID = '1' # TODO(vbala): api.v1 tests use hard-coded "fakeaz" for verifying # post-conditions. Update value to "zone1:host1" once we remove # api.v1 tests and use it in api.v2 tests. DEFAULT_AZ = "fakeaz" def stub_volume(id, **kwargs): volume = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': DEFAULT_VOL_SIZE, 'availability_zone': DEFAULT_AZ, 'status': DEFAULT_VOL_STATUS, 'migration_status': None, 'attach_status': 'attached', 'name': 'vol name', 'display_name': DEFAULT_VOL_NAME, 'display_description': DEFAULT_VOL_DESCRIPTION, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'snapshot_id': None, 'source_volid': None, 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'encryption_key_id': None, 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, {'key': 'readonly', 'value': 'False'}], 'bootable': False, 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), 'replication_status': 'disabled', 'replication_extended_status': None, 'replication_driver_data': None, 'volume_attachment': [], 'multiattach': False, } volume.update(kwargs) if kwargs.get('volume_glance_metadata', None): volume['bootable'] = True if kwargs.get('attach_status') == 'detached': del volume['volume_admin_metadata'][0] return volume def stub_volume_create(self, context, size, name, description, snapshot=None, **param): vol = stub_volume(DEFAULT_VOL_ID) vol['size'] = size vol['display_name'] = name vol['display_description'] = description source_volume = param.get('source_volume') or {} vol['source_volid'] = source_volume.get('id') vol['bootable'] = False vol['volume_attachment'] = [] try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def stub_volume_api_create(self, context, *args, **kwargs): vol = stub_volume_create(self, context, *args, **kwargs) return fake_volume.fake_volume_obj(context, **vol) def stub_image_service_detail(self, context, **kwargs): filters = kwargs.get('filters', {'name': ''}) if filters['name'] == "Fedora-x86_64-20-20140618-sda": return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}] elif filters['name'] == "multi": return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}, {'id': "c905cedb-abcd-47e4-8a62-f26bc5fc4c77"}] return [] def stub_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = stub_volume('1') vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'cinder' vol['bootable'] = False return vol def stub_volume_update(self, context, *args, **param): pass def stub_volume_delete(self, context, *args, **param): pass def stub_volume_get(self, context, volume_id, viewable_admin_meta=False): if viewable_admin_meta: return stub_volume(volume_id) else: volume = stub_volume(volume_id) del volume['volume_admin_metadata'] return volume def stub_volume_get_notfound(self, context, volume_id, viewable_admin_meta=False): raise exc.VolumeNotFound(volume_id) def stub_volume_get_db(context, volume_id): if context.is_admin: return stub_volume(volume_id) else: volume = stub_volume(volume_id) del volume['volume_admin_metadata'] return volume def stub_volume_api_get(self, context, volume_id, viewable_admin_meta=False): vol = stub_volume(volume_id) return fake_volume.fake_volume_obj(context, **vol) def stub_volume_get_all(context, search_opts=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): return [stub_volume(100, project_id='fake'), stub_volume(101, project_id='superfake'), stub_volume(102, project_id='superduperfake')] def stub_volume_get_all_by_project(self, context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): filters = filters or {} return [stub_volume_get(self, context, '1', viewable_admin_meta=True)] def stub_volume_api_get_all_by_project(self, context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): filters = filters or {} vol = stub_volume_get(self, context, '1', viewable_admin_meta=viewable_admin_meta) vol_obj = fake_volume.fake_volume_obj(context, **vol) return objects.VolumeList(objects=[vol_obj]) def stub_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake', 'snapshot_metadata': []} snapshot.update(kwargs) return snapshot def stub_snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [stub_snapshot(100, project_id='fake'), stub_snapshot(101, project_id='superfake'), stub_snapshot(102, project_id='superduperfake')] def stub_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [stub_snapshot(1)] def stub_snapshot_update(self, context, *args, **param): pass def stub_service_get_all_by_topic(context, topic, disabled=None): return [{'availability_zone': "zone1:host1", "disabled": 0}] def stub_snapshot_get(self, context, snapshot_id): if snapshot_id != TEST_SNAPSHOT_UUID: raise exc.SnapshotNotFound(snapshot_id=snapshot_id) return stub_snapshot(snapshot_id) def stub_consistencygroup_get_notfound(self, context, cg_id): raise exc.ConsistencyGroupNotFound(consistencygroup_id=cg_id) def stub_volume_type_get(context, id, *args, **kwargs): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'deleted': False} def stub_volume_admin_metadata_get(context, volume_id, **kwargs): admin_meta = {'attached_mode': 'rw', 'readonly': 'False'} if kwargs.get('attach_status') == 'detached': del admin_meta['attached_mode'] return admin_meta cinder-8.0.0/cinder/tests/unit/api/v2/test_snapshots.py0000664000567000056710000006236012701406257024257 0ustar jenkinsjenkins00000000000000# Copyright 2011 Denali Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock from oslo_config import cfg from oslo_utils import timeutils from six.moves.urllib import parse as urllib import webob from cinder.api import common from cinder.api.v2 import snapshots from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils from cinder import volume CONF = cfg.CONF UUID = '00000000-0000-0000-0000-000000000001' INVALID_UUID = '00000000-0000-0000-0000-000000000002' def _get_default_snapshot_param(): return { 'id': UUID, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'updated_at': None, 'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b', 'project_id': '7ffe17a15c724e2aa79fc839540aec15', 'display_name': 'Default name', 'display_description': 'Default description', 'deleted': None, 'volume': {'availability_zone': 'test_zone'} } def stub_snapshot_delete(self, context, snapshot): if snapshot['id'] != UUID: raise exception.SnapshotNotFound(snapshot['id']) def stub_snapshot_get(self, context, snapshot_id): if snapshot_id != UUID: raise exception.SnapshotNotFound(snapshot_id) param = _get_default_snapshot_param() return param def stub_snapshot_get_all(self, context, search_opts=None): param = _get_default_snapshot_param() return [param] class SnapshotApiTest(test.TestCase): def setUp(self): super(SnapshotApiTest, self).setUp() self.controller = snapshots.SnapshotsController() self.stubs.Set(db, 'snapshot_get_all_by_project', stubs.stub_snapshot_get_all_by_project) self.stubs.Set(db, 'snapshot_get_all', stubs.stub_snapshot_get_all) self.ctx = context.RequestContext('admin', 'fakeproject', True) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_snapshot_create(self, mock_validate): volume = utils.create_volume(self.ctx) snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": False, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v2/snapshots') resp_dict = self.controller.create(req, body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot_name, resp_dict['snapshot']['name']) self.assertEqual(snapshot_description, resp_dict['snapshot']['description']) self.assertTrue(mock_validate.called) self.assertIn('updated_at', resp_dict['snapshot']) db.volume_destroy(self.ctx, volume.id) def test_snapshot_create_force(self): volume = utils.create_volume(self.ctx, status='in-use') snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": True, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v2/snapshots') resp_dict = self.controller.create(req, body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot_name, resp_dict['snapshot']['name']) self.assertEqual(snapshot_description, resp_dict['snapshot']['description']) self.assertIn('updated_at', resp_dict['snapshot']) snapshot = { "volume_id": volume.id, "force": "**&&^^%%$$##@@", "name": "Snapshot Test Name", "description": "Snapshot Test Desc" } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v2/snapshots') self.assertRaises(exception.InvalidParameterValue, self.controller.create, req, body) db.volume_destroy(self.ctx, volume.id) def test_snapshot_create_without_volume_id(self): snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' body = { "snapshot": { "force": True, "name": snapshot_name, "description": snapshot_description } } req = fakes.HTTPRequest.blank('/v2/snapshots') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch.object(volume.api.API, "update_snapshot", side_effect=stubs.stub_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_snapshot_update( self, mock_validate, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get, update_snapshot): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj updates = { "name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) res_dict = self.controller.update(req, UUID, body) expected = { 'snapshot': { 'id': UUID, 'volume_id': '1', 'status': u'available', 'size': 100, 'created_at': None, 'updated_at': None, 'name': u'Updated Test Name', 'description': u'Default description', 'metadata': {}, } } self.assertEqual(expected, res_dict) self.assertTrue(mock_validate.called) self.assertEqual(2, len(self.notifier.notifications)) def test_snapshot_update_missing_body(self): body = {} req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, UUID, body) def test_snapshot_update_invalid_body(self): body = {'name': 'missing top level snapshot key'} req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, UUID, body) def test_snapshot_update_not_found(self): self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) updates = { "name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, 'not-the-uuid', body) @mock.patch.object(volume.api.API, "delete_snapshot", side_effect=stubs.stub_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get, delete_snapshot): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshot_id = UUID req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) resp = self.controller.delete(req, snapshot_id) self.assertEqual(202, resp.status_int) def test_snapshot_delete_invalid_id(self): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) resp_dict = self.controller.show(req, UUID) self.assertIn('snapshot', resp_dict) self.assertEqual(UUID, resp_dict['snapshot']['id']) self.assertIn('updated_at', resp_dict['snapshot']) def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.volume.api.API.get_all_snapshots') def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList(objects=[snapshot_obj]) get_all_snapshots.return_value = snapshots req = fakes.HTTPRequest.blank('/v2/snapshots/detail') resp_dict = self.controller.detail(req) self.assertIn('snapshots', resp_dict) resp_snapshots = resp_dict['snapshots'] self.assertEqual(1, len(resp_snapshots)) self.assertIn('updated_at', resp_snapshots[0]) resp_snapshot = resp_snapshots.pop() self.assertEqual(UUID, resp_snapshot['id']) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_limited_to_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v2/fake/snapshots', use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_list_snapshots_with_limit_and_offset(self, snapshot_metadata_get): def list_snapshots_with_limit_and_offset(snaps, is_admin): req = fakes.HTTPRequest.blank('/v2/fake/snapshots?limit=1\ &offset=1', use_admin_context=is_admin) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) self.assertEqual(snaps[1].id, res['snapshots'][0]['id']) self.assertIn('updated_at', res['snapshots'][0]) # Test that we get an empty list with an offset greater than the # number of items req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=3') self.assertEqual({'snapshots': []}, self.controller.index(req)) self.stubs.UnsetAll() volume, snaps = self._create_db_snapshots(3) # admin case list_snapshots_with_limit_and_offset(snaps, is_admin=True) # non-admin case list_snapshots_with_limit_and_offset(snaps, is_admin=False) @mock.patch.object(db, 'snapshot_get_all_by_project') @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_list_snpashots_with_wrong_limit_and_offset(self, mock_metadata_get, mock_snapshot_get_all): """Test list with negative and non numeric limit and offset.""" mock_snapshot_get_all.return_value = [] # Negative limit req = fakes.HTTPRequest.blank('/v2/snapshots?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Non numeric limit req = fakes.HTTPRequest.blank('/v2/snapshots?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Negative offset req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Non numeric offset req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Test that we get an exception HTTPBadRequest(400) with an offset # greater than the maximum offset value. url = '/v2/snapshots?limit=1&offset=323245324356534235' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def _assert_list_next(self, expected_query=None, project='fakeproject', **kwargs): """Check a page of snapshots list.""" # Since we are accessing v2 api directly we don't need to specify # v2 in the request path, if we did, we'd get /v2/v2 links back request_path = '/v2/%s/snapshots' % project expected_path = request_path # Construct the query if there are kwargs if kwargs: request_str = request_path + '?' + urllib.urlencode(kwargs) else: request_str = request_path # Make the request req = fakes.HTTPRequest.blank(request_str) res = self.controller.index(req) # We only expect to have a next link if there is an actual expected # query. if expected_query: # We must have the links self.assertIn('snapshots_links', res) links = res['snapshots_links'] # Must be a list of links, even if we only get 1 back self.assertTrue(list, type(links)) next_link = links[0] # rel entry must be next self.assertIn('rel', next_link) self.assertIn('next', next_link['rel']) # href entry must have the right path self.assertIn('href', next_link) href_parts = urllib.urlparse(next_link['href']) self.assertEqual(expected_path, href_parts.path) # And the query from the next link must match what we were # expecting params = urllib.parse_qs(href_parts.query) self.assertDictEqual(expected_query, params) # Make sure we don't have links if we were not expecting them else: self.assertNotIn('snapshots_links', res) def _create_db_snapshots(self, num_snaps): volume = utils.create_volume(self.ctx) snaps = [utils.create_snapshot(self.ctx, volume.id, display_name='snap' + str(i)) for i in range(num_snaps)] self.addCleanup(db.volume_destroy, self.ctx, volume.id) for snap in snaps: self.addCleanup(db.snapshot_destroy, self.ctx, snap.id) snaps.reverse() return volume, snaps def test_list_snapshots_next_link_default_limit(self): """Test that snapshot list pagination is limited by osapi_max_limit.""" self.stubs.UnsetAll() volume, snaps = self._create_db_snapshots(3) # NOTE(geguileo): Since cinder.api.common.limited has already been # imported his argument max_limit already has a default value of 1000 # so it doesn't matter that we change it to 2. That's why we need to # mock it and send it current value. We still need to set the default # value because other sections of the code use it, for example # _get_collection_links CONF.set_default('osapi_max_limit', 2) def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param): # The link from the first page should link to the second self._assert_list_next({'marker': [snaps[1].id]}) # Second page should have no next link self._assert_list_next(marker=snaps[1].id) def test_list_snapshots_next_link_with_limit(self): """Test snapshot list pagination with specific limit.""" self.stubs.UnsetAll() volume, snaps = self._create_db_snapshots(2) # The link from the first page should link to the second self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]}, limit=1) # Even though there are no more elements, we should get a next element # per specification. expected = {'limit': ['1'], 'marker': [snaps[1].id]} self._assert_list_next(expected, limit=1, marker=snaps[0].id) # When we go beyond the number of elements there should be no more # next links self._assert_list_next(limit=1, marker=snaps[1].id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1', use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(3, len(res['snapshots'])) @mock.patch.object(db, 'snapshot_get_all') @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get, snapshot_get_all): def get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): if 'project_id' in filters and 'tenant1' in filters['project_id']: return [stubs.stub_snapshot(1, tenant_id='tenant1')] else: return [] snapshot_get_all.side_effect = get_all req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1' '&project_id=tenant1', use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_all_tenants_non_admin_gets_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1') res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_non_admin_get_by_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v2/fake/snapshots') res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) def _create_snapshot_bad_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/snapshots') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_no_body(self): self._create_snapshot_bad_body(body=None) def test_create_missing_snapshot(self): body = {'foo': {'a': 'b'}} self._create_snapshot_bad_body(body=body) def test_create_malformed_entity(self): body = {'snapshot': 'string'} self._create_snapshot_bad_body(body=body) class SnapshotSerializerTest(test.TestCase): def _verify_snapshot(self, snap, tree): self.assertEqual('snapshot', tree.tag) for attr in ('id', 'status', 'size', 'created_at', 'name', 'description', 'volume_id'): self.assertEqual(str(snap[attr]), tree.get(attr)) def test_snapshot_show_create_serializer(self): serializer = snapshots.SnapshotTemplate() raw_snapshot = dict( id='snap_id', status='snap_status', size=1024, created_at=timeutils.utcnow(), name='snap_name', description='snap_desc', display_description='snap_desc', volume_id='vol_id', ) text = serializer.serialize(dict(snapshot=raw_snapshot)) tree = etree.fromstring(text) self._verify_snapshot(raw_snapshot, tree) def test_snapshot_index_detail_serializer(self): serializer = snapshots.SnapshotsTemplate() raw_snapshots = [ dict( id='snap1_id', status='snap1_status', size=1024, created_at=timeutils.utcnow(), name='snap1_name', description='snap1_desc', volume_id='vol1_id', ), dict( id='snap2_id', status='snap2_status', size=1024, created_at=timeutils.utcnow(), name='snap2_name', description='snap2_desc', volume_id='vol2_id', ) ] text = serializer.serialize(dict(snapshots=raw_snapshots)) tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child) cinder-8.0.0/cinder/tests/unit/api/v2/test_types.py0000664000567000056710000004755012701406257023405 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # aLL Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree import mock from oslo_utils import timeutils import six import webob import cinder.api.common as common from cinder.api.v2 import types from cinder.api.v2.views import types as views_types from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.volume import volume_types def stub_volume_type(id): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5" } return dict( id=id, name='vol_type_%s' % six.text_type(id), description='vol_type_desc_%s' % six.text_type(id), extra_specs=specs, ) def return_volume_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): result = dict(vol_type_1=stub_volume_type(1), vol_type_2=stub_volume_type(2), vol_type_3=stub_volume_type(3) ) if list_result: return list(result.values()) return result def return_empty_volume_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): if list_result: return [] return {} def return_volume_types_get_volume_type(context, id): if id == "777": raise exception.VolumeTypeNotFound(volume_type_id=id) return stub_volume_type(id) def return_volume_types_get_by_name(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return stub_volume_type(int(name.split("_")[2])) def return_volume_types_get_default(): return stub_volume_type(1) def return_volume_types_get_default_not_found(): return {} class VolumeTypesApiTest(test.TestCase): def _create_volume_type(self, volume_type_name, extra_specs=None, is_public=True, projects=None): return volume_types.create(self.ctxt, volume_type_name, extra_specs, is_public, projects).get('id') def setUp(self): super(VolumeTypesApiTest, self).setUp() self.controller = types.VolumeTypesController() self.ctxt = context.RequestContext(user_id='fake', project_id='fake', is_admin=True) self.type_id1 = self._create_volume_type('volume_type1', {'key1': 'value1'}) self.type_id2 = self._create_volume_type('volume_type2', {'key2': 'value2'}) self.type_id3 = self._create_volume_type('volume_type3', {'key3': 'value3'}, False, ['fake']) def test_volume_types_index(self): self.stubs.Set(volume_types, 'get_all_types', return_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v2/fake/types', use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['volume_types'])) expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] actual_names = map(lambda e: e['name'], res_dict['volume_types']) self.assertEqual(set(expected_names), set(actual_names)) for entry in res_dict['volume_types']: self.assertEqual('value1', entry['extra_specs']['key1']) def test_volume_types_index_no_data(self): self.stubs.Set(volume_types, 'get_all_types', return_empty_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v2/fake/types') res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['volume_types'])) def test_volume_types_index_with_limit(self): req = fakes.HTTPRequest.blank('/v2/fake/types?limit=1') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['volume_types'])) self.assertEqual(self.type_id3, res['volume_types'][0]['id']) expect_next_link = ('http://localhost/v2/fake/types?limit=1' '&marker=%s') % res['volume_types'][0]['id'] self.assertEqual(expect_next_link, res['volume_type_links'][0]['href']) def test_volume_types_index_with_offset(self): req = fakes.HTTPRequest.blank('/v2/fake/types?offset=1') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(2, len(res['volume_types'])) def test_volume_types_index_with_offset_out_of_range(self): url = '/v2/fake/types?offset=424366766556787' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_types_index_with_limit_and_offset(self): req = fakes.HTTPRequest.blank('/v2/fake/types?limit=2&offset=1') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(2, len(res['volume_types'])) self.assertEqual(self.type_id2, res['volume_types'][0]['id']) self.assertEqual(self.type_id1, res['volume_types'][1]['id']) def test_volume_types_index_with_limit_and_marker(self): req = fakes.HTTPRequest.blank(('/v2/fake/types?limit=1' '&marker=%s') % self.type_id2) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['volume_types'])) self.assertEqual(self.type_id1, res['volume_types'][0]['id']) def test_volume_types_index_with_valid_filter(self): req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=True') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(3, len(res['volume_types'])) self.assertEqual(self.type_id3, res['volume_types'][0]['id']) self.assertEqual(self.type_id2, res['volume_types'][1]['id']) self.assertEqual(self.type_id1, res['volume_types'][2]['id']) def test_volume_types_index_with_invalid_filter(self): req = fakes.HTTPRequest.blank(('/v2/fake/types?id=%s') % self.type_id1) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(3, len(res['volume_types'])) def test_volume_types_index_with_sort_keys(self): req = fakes.HTTPRequest.blank('/v2/fake/types?sort=id') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(3, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) self.assertEqual(expect_result[2], res['volume_types'][2]['id']) def test_volume_types_index_with_sort_and_limit(self): req = fakes.HTTPRequest.blank('/v2/fake/types?sort=id&limit=2') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(2, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) def test_volume_types_index_with_sort_keys_and_sort_dirs(self): req = fakes.HTTPRequest.blank('/v2/fake/types?sort=id:asc') req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id1, self.type_id2, self.type_id3] expect_result.sort() self.assertEqual(3, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) self.assertEqual(expect_result[2], res['volume_types'][2]['id']) def test_volume_types_show(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) type_id = str(uuid.uuid4()) req = fakes.HTTPRequest.blank('/v2/fake/types/' + type_id) res_dict = self.controller.show(req, type_id) self.assertEqual(1, len(res_dict)) self.assertEqual(type_id, res_dict['volume_type']['id']) type_name = 'vol_type_' + type_id self.assertEqual(type_name, res_dict['volume_type']['name']) def test_volume_types_show_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) req = fakes.HTTPRequest.blank('/v2/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '777') def test_get_default(self): self.stubs.Set(volume_types, 'get_default_volume_type', return_volume_types_get_default) req = fakes.HTTPRequest.blank('/v2/fake/types/default') req.method = 'GET' res_dict = self.controller.show(req, 'default') self.assertEqual(1, len(res_dict)) self.assertEqual('vol_type_1', res_dict['volume_type']['name']) self.assertEqual('vol_type_desc_1', res_dict['volume_type']['description']) def test_get_default_not_found(self): self.stubs.Set(volume_types, 'get_default_volume_type', return_volume_types_get_default_not_found) req = fakes.HTTPRequest.blank('/v2/fake/types/default') req.method = 'GET' self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 'default') def test_view_builder_show(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42, ) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_show_admin(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v2", use_admin_context=True) output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, extra_specs={}, id=42, ) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_show_qos_specs_id_policy(self): with mock.patch.object(common, 'validate_policy', side_effect=[False, True]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, id=42, ) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_show_extra_specs_policy(self): with mock.patch.object(common, 'validate_policy', side_effect=[True, False]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', extra_specs={}, is_public=True, id=42, ) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_show_pass_all_policy(self): with mock.patch.object(common, 'validate_policy', side_effect=[True, True]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', extra_specs={}, is_public=True, id=42, ) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_list(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_types = [] for i in range(0, 10): raw_volume_types.append( dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.index(request, raw_volume_types) self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42 + i ) self.assertDictMatch(expected_volume_type, output['volume_types'][i]) def test_view_builder_list_admin(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_types = [] for i in range(0, 10): raw_volume_types.append( dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v2", use_admin_context=True) output = view_builder.index(request, raw_volume_types) self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, extra_specs={}, id=42 + i ) self.assertDictMatch(expected_volume_type, output['volume_types'][i]) class VolumeTypesSerializerTest(test.TestCase): def _verify_volume_type(self, vtype, tree): self.assertEqual('volume_type', tree.tag) self.assertEqual(vtype['name'], tree.get('name')) self.assertEqual(vtype['description'], tree.get('description')) self.assertEqual(str(vtype['id']), tree.get('id')) self.assertEqual(1, len(tree)) extra_specs = tree[0] self.assertEqual('extra_specs', extra_specs.tag) seen = set(vtype['extra_specs'].keys()) for child in extra_specs: self.assertIn(child.tag, seen) self.assertEqual(vtype['extra_specs'][child.tag], child.text) seen.remove(child.tag) self.assertEqual(0, len(seen)) def test_index_serializer(self): serializer = types.VolumeTypesTemplate() # Just getting some input data vtypes = return_volume_types_get_all_types(None) text = serializer.serialize({'volume_types': list(vtypes.values())}) tree = etree.fromstring(text) self.assertEqual('volume_types', tree.tag) self.assertEqual(len(vtypes), len(tree)) for child in tree: name = child.get('name') self.assertIn(name, vtypes) self._verify_volume_type(vtypes[name], child) def test_voltype_serializer(self): serializer = types.VolumeTypeTemplate() vtype = stub_volume_type(1) text = serializer.serialize(dict(volume_type=vtype)) tree = etree.fromstring(text) self._verify_volume_type(vtype, tree) cinder-8.0.0/cinder/tests/unit/api/v2/test_volumes.py0000664000567000056710000024145112701406257023727 0ustar jenkinsjenkins00000000000000# Copyright 2013 Josh Durgin # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from lxml import etree import mock from oslo_config import cfg from oslo_utils import timeutils import six from six.moves import range from six.moves import urllib import webob from cinder.api import common from cinder.api import extensions from cinder.api.v2 import volumes from cinder import consistencygroup as consistencygroupAPI from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import fake_notifier from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import utils from cinder.volume import api as volume_api CONF = cfg.CONF NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}' DEFAULT_AZ = "zone1:host1" class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} fake_image.stub_out_image_service(self.stubs) self.controller = volumes.VolumeController(self.ext_mgr) self.flags(host='fake', notification_driver=[fake_notifier.__name__]) self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) self.stubs.Set(db, 'service_get_all_by_topic', stubs.stub_service_get_all_by_topic) self.maxDiff = None self.ctxt = context.RequestContext('admin', 'fakeproject', True) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create(self, mock_validate): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) vol = self._vol_in_request_body() body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) ex = self._expected_vol_from_controller() self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_type(self, mock_validate): vol_type = db.volume_type_create( context.get_admin_context(), dict(name=CONF.default_volume_type, extra_specs={}) ) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) vol = self._vol_in_request_body(volume_type="FakeTypeName") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when type name isn't valid self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) # Use correct volume type name vol.update(dict(volume_type=CONF.default_volume_type)) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) volume_id = res_dict['volume']['id'] self.assertEqual(1, len(res_dict)) # Use correct volume type id vol.update(dict(volume_type=db_vol_type['id'])) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) volume_id = res_dict['volume']['id'] self.assertEqual(1, len(res_dict)) vol_db = stubs.stub_volume(volume_id, volume_type={'name': vol_type}) vol_obj = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_db) self.stubs.Set(volume_api.API, 'get_all', lambda *args, **kwargs: objects.VolumeList(objects=[vol_obj])) # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches the real get method. db.sqlalchemy.api._GET_METHODS = {} self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/detail') res_dict = self.controller.detail(req) self.assertTrue(mock_validate.called) def _vol_in_request_body(self, size=stubs.DEFAULT_VOL_SIZE, name=stubs.DEFAULT_VOL_NAME, description=stubs.DEFAULT_VOL_DESCRIPTION, availability_zone=DEFAULT_AZ, snapshot_id=None, source_volid=None, source_replica=None, consistencygroup_id=None, volume_type=None, image_ref=None, image_id=None): vol = {"size": size, "name": name, "description": description, "availability_zone": availability_zone, "snapshot_id": snapshot_id, "source_volid": source_volid, "source_replica": source_replica, "consistencygroup_id": consistencygroup_id, "volume_type": volume_type, } if image_id is not None: vol['image_id'] = image_id elif image_ref is not None: vol['imageRef'] = image_ref return vol def _expected_vol_from_controller( self, size=stubs.DEFAULT_VOL_SIZE, availability_zone=DEFAULT_AZ, description=stubs.DEFAULT_VOL_DESCRIPTION, name=stubs.DEFAULT_VOL_NAME, consistencygroup_id=None, source_volid=None, snapshot_id=None, metadata=None, attachments=None, volume_type=stubs.DEFAULT_VOL_TYPE, status=stubs.DEFAULT_VOL_STATUS, with_migration_status=False): metadata = metadata or {} attachments = attachments or [] volume = {'volume': {'attachments': attachments, 'availability_zone': availability_zone, 'bootable': 'false', 'consistencygroup_id': consistencygroup_id, 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'description': description, 'id': stubs.DEFAULT_VOL_ID, 'links': [{'href': 'http://localhost/v2/fakeproject/volumes/1', 'rel': 'self'}, {'href': 'http://localhost/fakeproject/volumes/1', 'rel': 'bookmark'}], 'metadata': metadata, 'name': name, 'replication_status': 'disabled', 'multiattach': False, 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, 'status': status, 'user_id': 'fakeuser', 'volume_type': volume_type, 'encrypted': False}} if with_migration_status: volume['volume']['migration_status'] = None return volume def _expected_volume_api_create_kwargs(self, snapshot=None, availability_zone=DEFAULT_AZ, source_volume=None): return {'metadata': None, 'snapshot': snapshot, 'source_volume': source_volume, 'source_replica': None, 'consistencygroup': None, 'availability_zone': availability_zone, 'scheduler_hints': None, 'multiattach': False, } @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_snapshot(self, create, get_snapshot, volume_type_get): create.side_effect = stubs.stub_volume_api_create get_snapshot.side_effect = stubs.stub_snapshot_get volume_type_get.side_effect = stubs.stub_volume_type_get snapshot_id = stubs.TEST_SNAPSHOT_UUID vol = self._vol_in_request_body(snapshot_id=stubs.TEST_SNAPSHOT_UUID) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) ex = self._expected_vol_from_controller(snapshot_id=snapshot_id) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) kwargs = self._expected_volume_api_create_kwargs( stubs.stub_snapshot(snapshot_id)) create.assert_called_once_with(self.controller.volume_api, context, vol['size'], stubs.DEFAULT_VOL_NAME, stubs.DEFAULT_VOL_DESCRIPTION, **kwargs) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) def test_volume_creation_fails_with_invalid_snapshot(self, get_snapshot): get_snapshot.side_effect = stubs.stub_snapshot_get snapshot_id = "fake_id" vol = self._vol_in_request_body(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when snapshot cannot be found. self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_source_volume(self, create, get_volume, volume_type_get): get_volume.side_effect = stubs.stub_volume_api_get create.side_effect = stubs.stub_volume_api_create volume_type_get.side_effect = stubs.stub_volume_type_get source_volid = '2f49aa3a-6aae-488d-8b99-a43271605af6' vol = self._vol_in_request_body(source_volid=source_volid) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) ex = self._expected_vol_from_controller(source_volid=source_volid) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_volid) db_vol = stubs.stub_volume(source_volid) vol_obj = fake_volume.fake_volume_obj(context, **db_vol) kwargs = self._expected_volume_api_create_kwargs( source_volume=vol_obj) create.assert_called_once_with(self.controller.volume_api, context, vol['size'], stubs.DEFAULT_VOL_NAME, stubs.DEFAULT_VOL_DESCRIPTION, **kwargs) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) def test_volume_creation_fails_with_invalid_source_volume(self, get_volume): get_volume.side_effect = stubs.stub_volume_get_notfound source_volid = "fake_id" vol = self._vol_in_request_body(source_volid=source_volid) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when source volume cannot be found. self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_volid) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) def test_volume_creation_fails_with_invalid_source_replica(self, get_volume): get_volume.side_effect = stubs.stub_volume_get_notfound source_replica = "fake_id" vol = self._vol_in_request_body(source_replica=source_replica) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when source replica cannot be found. self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_replica) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) def test_volume_creation_fails_with_invalid_source_replication_status( self, get_volume): get_volume.side_effect = stubs.stub_volume_get source_replica = '2f49aa3a-6aae-488d-8b99-a43271605af6' vol = self._vol_in_request_body(source_replica=source_replica) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 400 when replication status is disabled. self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_replica) @mock.patch.object(consistencygroupAPI.API, 'get', autospec=True) def test_volume_creation_fails_with_invalid_consistency_group(self, get_cg): get_cg.side_effect = stubs.stub_consistencygroup_get_notfound consistencygroup_id = '4f49aa3a-6aae-488d-8b99-a43271605af6' vol = self._vol_in_request_body( consistencygroup_id=consistencygroup_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 404 when consistency group is not found. self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) context = req.environ['cinder.context'] get_cg.assert_called_once_with(self.controller.consistencygroup_api, context, consistencygroup_id) def test_volume_creation_fails_with_bad_size(self): vol = self._vol_in_request_body(size="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_volume_creation_fails_with_bad_availability_zone(self): vol = self._vol_in_request_body(availability_zone="zonen:hostn") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_ref(self, mock_validate): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body( availability_zone="nova", image_ref="c905cedb-7281-47e4-8a62-f26bc5fc4c77") ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) def test_volume_create_with_image_ref_is_integer(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_ref=1234) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_ref_not_uuid_format(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_ref="12345") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_ref_with_empty_string(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_ref="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_id(self, mock_validate): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body( availability_zone="nova", image_id="c905cedb-7281-47e4-8a62-f26bc5fc4c77") ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) def test_volume_create_with_image_id_is_integer(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_id=1234) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_id_not_uuid_format(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_id="12345") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_id_with_empty_string(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="cinder", image_id="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_name(self, mock_validate): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) self.stubs.Set(fake_image._FakeImageService, "detail", stubs.stub_image_service_detail) test_id = "Fedora-x86_64-20-20140618-sda" self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) def test_volume_create_with_image_name_has_multiple(self): self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.stubs.Set(fake_image._FakeImageService, "detail", stubs.stub_image_service_detail) test_id = "multi" self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_name_no_match(self): self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.stubs.Set(fake_image._FakeImageService, "detail", stubs.stub_image_service_detail) test_id = "MissingName" self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update(self, mock_validate): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) updates = { "name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name", metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_deprecation(self, mock_validate): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) updates = { "display_name": "Updated Test Name", "display_description": "Updated Test Description", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name", description="Updated Test Description", metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_deprecation_key_priority(self, mock_validate): """Test current update keys have priority over deprecated keys.""" self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) updates = { "name": "New Name", "description": "New Description", "display_name": "Not Shown Name", "display_description": "Not Shown Description", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, name="New Name", description="New Description", metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_metadata(self, mock_validate): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) updates = { "metadata": {"qos_max_iops": 2000} } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False', 'qos_max_iops': '2000'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_with_admin_metadata(self, mock_validate): self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), '1') updates = { "name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, volume_type=None, status='in-use', name='Updated Test Name', attachments=[{'id': '1', 'attachment_id': attachment['id'], 'volume_id': stubs.DEFAULT_VOL_ID, 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/', 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.iso8601.Utc()), }], metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.iso8601.Utc()) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) def test_update_empty_body(self): body = {} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) def test_update_invalid_body(self): body = { 'name': 'missing top level volume key' } req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) def test_update_not_found(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) updates = { "name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, '1', body) def test_volume_list_summary(self): self.stubs.Set(volume_api.API, 'get_all', stubs.stub_volume_api_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.index(req) expected = { 'volumes': [ { 'name': stubs.DEFAULT_VOL_NAME, 'id': '1', 'links': [ { 'href': 'http://localhost/v2/fakeproject/volumes/' '1', 'rel': 'self' }, { 'href': 'http://localhost/fakeproject/volumes/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_detail(self): self.stubs.Set(volume_api.API, 'get_all', stubs.stub_volume_api_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/detail') res_dict = self.controller.detail(req) exp_vol = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False'}) expected = {'volumes': [exp_vol['volume']]} self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_detail_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), '1') req = fakes.HTTPRequest.blank('/v2/volumes/detail') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.detail(req) exp_vol = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, status="in-use", volume_type=None, attachments=[{'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': stubs.DEFAULT_VOL_ID, 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.iso8601.Utc()), }], metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) exp_vol['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.iso8601.Utc()) expected = {'volumes': [exp_vol['volume']]} self.assertEqual(expected, res_dict) def test_volume_index_with_marker(self): def stub_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [ stubs.stub_volume(1, display_name='vol1'), stubs.stub_volume(2, display_name='vol2'), ] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/volumes?marker=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) self.assertEqual('1', volumes[0]['id']) self.assertEqual('2', volumes[1]['id']) def test_volume_index_limit(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/volumes' '?limit=1&name=foo' '&sort=id1:asc') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) # Ensure that the next link is correctly formatted, it should # contain the same limit, filter, and sort information as the # original request as well as a marker; this ensures that the # caller can simply use the "next" link and that they do not # need to manually insert the limit and sort information. links = res_dict['volumes_links'] self.assertEqual('next', links[0]['rel']) href_parts = urllib.parse.urlparse(links[0]['href']) self.assertEqual('/v2/fakeproject/volumes', href_parts.path) params = urllib.parse.parse_qs(href_parts.query) self.assertEqual(str(volumes[0]['id']), params['marker'][0]) self.assertEqual('1', params['limit'][0]) self.assertEqual('foo', params['name'][0]) self.assertEqual('id1:asc', params['sort'][0]) def test_volume_index_limit_negative(self): req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_index_limit_non_int(self): req = fakes.HTTPRequest.blank('/v2/volumes?limit=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_index_limit_marker(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual('1', volumes[0]['id']) def _create_db_volumes(self, num_volumes): volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i) for i in range(num_volumes)] for vol in volumes: self.addCleanup(db.volume_destroy, self.ctxt, vol.id) volumes.reverse() return volumes def test_volume_index_limit_offset(self): created_volumes = self._create_db_volumes(2) req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Test that we get an exception HTTPBadRequest(400) with an offset # greater than the maximum offset value. url = '/v2/volumes?limit=2&offset=43543564546567575' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_detail_with_marker(self): def stub_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [ stubs.stub_volume(1, display_name='vol1'), stubs.stub_volume(2, display_name='vol2'), ] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) self.assertEqual('1', volumes[0]['id']) self.assertEqual('2', volumes[1]['id']) def test_volume_detail_limit(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) # Ensure that the next link is correctly formatted links = res_dict['volumes_links'] self.assertEqual('next', links[0]['rel']) href_parts = urllib.parse.urlparse(links[0]['href']) self.assertEqual('/v2/fakeproject/volumes/detail', href_parts.path) params = urllib.parse.parse_qs(href_parts.query) self.assertTrue('marker' in params) self.assertEqual('1', params['limit'][0]) def test_volume_detail_limit_negative(self): req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_detail_limit_non_int(self): req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_detail_limit_marker(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual('1', volumes[0]['id']) def test_volume_detail_limit_offset(self): created_volumes = self._create_db_volumes(2) req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1', use_admin_context=True) res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) url = '/v2/volumes/detail?limit=2&offset=4536546546546467' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_with_limit_zero(self): def stub_volume_get_all(context, marker, limit, **kwargs): return [] self.stubs.Set(db, 'volume_get_all', stub_volume_get_all) req = fakes.HTTPRequest.blank('/v2/volumes?limit=0') res_dict = self.controller.index(req) expected = {'volumes': []} self.assertEqual(expected, res_dict) def _validate_next_link(self, detailed, item_count, osapi_max_limit, limit, should_link_exist): keys_fns = (('volumes', self.controller.index), ('volumes/detail', self.controller.detail)) key, fn = keys_fns[detailed] req_string = '/v2/%s?all_tenants=1' % key if limit: req_string += '&limit=%s' % limit req = fakes.HTTPRequest.blank(req_string, use_admin_context=True) link_return = [{"rel": "next", "href": "fake_link"}] self.flags(osapi_max_limit=osapi_max_limit) def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param), \ mock.patch.object(common.ViewBuilder, '_generate_next_link', return_value=link_return): res_dict = fn(req) self.assertEqual(item_count, len(res_dict['volumes'])) self.assertEqual(should_link_exist, 'volumes_links' in res_dict) def test_volume_default_limit(self): self.stubs.UnsetAll() self._create_db_volumes(3) # Verify both the index and detail queries for detailed in (True, False): # Number of volumes less than max, do not include self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, limit=None, should_link_exist=False) # Number of volumes equals the max, next link will be included self._validate_next_link(detailed, item_count=3, osapi_max_limit=3, limit=None, should_link_exist=True) # Number of volumes more than the max, include next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, limit=None, should_link_exist=True) # Limit lower than max but doesn't limit, no next link self._validate_next_link(detailed, item_count=3, osapi_max_limit=5, limit=4, should_link_exist=False) # Limit lower than max and limits, we have next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=4, limit=2, should_link_exist=True) # Limit higher than max and max limits, we have next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, limit=4, should_link_exist=True) # Limit higher than max but none of them limiting, no next link self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, limit=5, should_link_exist=False) def test_volume_list_default_filters(self): """Tests that the default filters from volume.api.API.get_all are set. 1. 'no_migration_status'=True for non-admins and get_all_by_project is invoked. 2. 'no_migration_status' is not included for admins. 3. When 'all_tenants' is not specified, then it is removed and get_all_by_project is invoked for admins. 3. When 'all_tenants' is specified, then it is removed and get_all is invoked for admins. """ # Non-admin, project function should be called with no_migration_status def stub_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertTrue(filters['no_migration_targets']) self.assertFalse('all_tenants' in filters) return [stubs.stub_volume(1, display_name='vol1')] def stub_volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project) self.stubs.Set(db, 'volume_get_all', stub_volume_get_all) # all_tenants does not matter for non-admin for params in ['', '?all_tenants=1']: req = fakes.HTTPRequest.blank('/v2/volumes%s' % params) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol1', resp['volumes'][0]['name']) # Admin, all_tenants is not set, project function should be called # without no_migration_status def stub_volume_get_all_by_project2(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertFalse('no_migration_targets' in filters) return [stubs.stub_volume(1, display_name='vol2')] def stub_volume_get_all2(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project2) self.stubs.Set(db, 'volume_get_all', stub_volume_get_all2) req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol2', resp['volumes'][0]['name']) # Admin, all_tenants is set, get_all function should be called # without no_migration_status def stub_volume_get_all_by_project3(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] def stub_volume_get_all3(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertFalse('no_migration_targets' in filters) self.assertFalse('all_tenants' in filters) return [stubs.stub_volume(1, display_name='vol3')] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project3) self.stubs.Set(db, 'volume_get_all', stub_volume_get_all3) req = fakes.HTTPRequest.blank('/v2/volumes?all_tenants=1', use_admin_context=True) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol3', resp['volumes'][0]['name']) def test_volume_show(self): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') res_dict = self.controller.show(req, '1') expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) # Finally test that we cached the returned volume self.assertIsNotNone(req.cached_resource_by_id('1')) def test_volume_show_no_attachments(self): def stub_volume_get(self, context, volume_id, **kwargs): vol = stubs.stub_volume(volume_id, attach_status='detached') return fake_volume.fake_volume_obj(context, **vol) def stub_volume_admin_metadata_get(context, volume_id, **kwargs): return stubs.stub_volume_admin_metadata_get( context, volume_id, attach_status='detached') self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.stubs.Set(db, 'volume_admin_metadata_get', stub_volume_admin_metadata_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') res_dict = self.controller.show(req, '1') expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, metadata={'readonly': 'False'}) self.assertEqual(expected, res_dict) def test_volume_show_no_volume(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1) # Finally test that nothing was cached self.assertIsNone(req.cached_resource_by_id('1')) def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), '1') req = fakes.HTTPRequest.blank('/v2/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = self._expected_vol_from_controller( availability_zone=stubs.DEFAULT_AZ, volume_type=None, status='in-use', attachments=[{'id': '1', 'attachment_id': attachment['id'], 'volume_id': stubs.DEFAULT_VOL_ID, 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/', 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.iso8601.Utc()), }], metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.iso8601.Utc()) self.assertEqual(expected, res_dict) def test_volume_show_with_encrypted_volume(self): def stub_volume_get(self, context, volume_id, **kwargs): vol = stubs.stub_volume(volume_id, encryption_key_id='fake_id') return fake_volume.fake_volume_obj(context, **vol) self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') res_dict = self.controller.show(req, 1) self.assertTrue(res_dict['volume']['encrypted']) def test_volume_show_with_unencrypted_volume(self): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') res_dict = self.controller.show(req, 1) self.assertEqual(False, res_dict['volume']['encrypted']) def test_volume_delete(self): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_volume_delete_attached(self): def stub_volume_attached(self, context, volume, force=False, cascade=False): raise exception.VolumeAttached(volume_id=volume['id']) self.stubs.Set(volume_api.API, "delete", stub_volume_attached) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/volumes/1') exp = self.assertRaises(exception.VolumeAttached, self.controller.delete, req, 1) expect_msg = "Volume 1 is still attached, detach volume first." self.assertEqual(expect_msg, six.text_type(exp)) def test_volume_delete_no_volume(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v2/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def test_admin_list_volumes_limited_to_project(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) req = fakes.HTTPRequest.blank('/v2/fake/volumes', use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def test_admin_list_volumes_all_tenants(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1', use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(3, len(res['volumes'])) def test_all_tenants_non_admin_gets_all_tenants(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1') res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def test_non_admin_get_by_project(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) req = fakes.HTTPRequest.blank('/v2/fake/volumes') res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def _create_volume_bad_request(self, body): req = fakes.HTTPRequest.blank('/v2/fake/volumes') req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_no_body(self): self._create_volume_bad_request(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._create_volume_bad_request(body=body) def test_create_malformed_entity(self): body = {'volume': 'string'} self._create_volume_bad_request(body=body) def _test_get_volumes_by_name(self, get_all, display_name): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': display_name} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': display_name}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_string(self, get_all): """Test to get a volume with an alpha-numeric display name.""" self._test_get_volumes_by_name(get_all, 'Volume-573108026') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_double_quoted_string(self, get_all): """Test to get a volume with a double-quoted display name.""" self._test_get_volumes_by_name(get_all, '"Volume-573108026"') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_single_quoted_string(self, get_all): """Test to get a volume with a single-quoted display name.""" self._test_get_volumes_by_name(get_all, "'Volume-573108026'") @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_quote_in_between_string(self, get_all): """Test to get a volume with a quote in between the display name.""" self._test_get_volumes_by_name(get_all, 'Volu"me-573108026') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_mixed_quoted_string(self, get_all): """Test to get a volume with a mix of single and double quotes. """ # The display name starts with a single quote and ends with a # double quote self._test_get_volumes_by_name(get_all, '\'Volume-573108026"') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_true(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': 'Volume-573108026', 'bootable': 1} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'Volume-573108026', 'bootable': True}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_false(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': 'Volume-573108026', 'bootable': 0} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'Volume-573108026', 'bootable': False}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_list(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'id': "['1', '2', '3']"} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'id': ['1', '2', '3']}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_expression(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'name': "d-"} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'd-'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_status(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'status': 'available'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'status': 'available'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_metadata(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'metadata': "{'fake_key': 'fake_value'}"} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'metadata': {'fake_key': 'fake_value'}}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_availability_zone(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'availability_zone': 'nova'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'availability_zone': 'nova'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_bootable(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'bootable': 1} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'bootable': True}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_invalid_filter(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'invalid_filter': 'invalid', 'availability_zone': 'nova'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'availability_zone': 'nova'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_sort_by_name(self, get_all): """Name in client means display_name in database.""" req = mock.MagicMock() ctxt = context.RequestContext('fake', 'fake', auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'sort': 'name'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_dirs=['desc'], viewable_admin_meta=True, sort_keys=['display_name'], filters={}, offset=0) def test_get_volume_filter_options_using_config(self): filter_list = ['name', 'status', 'metadata', 'bootable', 'availability_zone'] self.override_config('query_volume_filters', filter_list) self.assertEqual(filter_list, self.controller._get_volume_filter_options()) class VolumeSerializerTest(test.TestCase): def _verify_volume_attachment(self, attach, tree): for attr in ('id', 'volume_id', 'server_id', 'device'): self.assertEqual(str(attach[attr]), tree.get(attr)) def _verify_volume(self, vol, tree): self.assertEqual(NS + 'volume', tree.tag) for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', 'name', 'description', 'volume_type', 'bootable', 'snapshot_id', 'source_volid'): self.assertEqual(str(vol[attr]), tree.get(attr)) for child in tree: self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata')) if child.tag == 'attachments': self.assertEqual(1, len(child)) self.assertEqual('attachment', child[0].tag) self._verify_volume_attachment(vol['attachments'][0], child[0]) elif child.tag == 'metadata': not_seen = set(vol['metadata'].keys()) for gr_child in child: self.assertIn(gr_child.get("key"), not_seen) self.assertEqual(str(vol['metadata'][gr_child.get("key")]), gr_child.text) not_seen.remove(gr_child.get('key')) self.assertEqual(0, len(not_seen)) def test_volume_show_create_serializer(self): serializer = volumes.VolumeTemplate() raw_volume = dict( id='vol_id', status='vol_status', size=1024, availability_zone='vol_availability', bootable=False, created_at=timeutils.utcnow(), attachments=[ dict( id='vol_id', volume_id='vol_id', server_id='instance_uuid', device='/foo' ) ], name='vol_name', description='vol_desc', volume_type='vol_type', snapshot_id='snap_id', source_volid='source_volid', metadata=dict( foo='bar', baz='quux', ), ) text = serializer.serialize(dict(volume=raw_volume)) tree = etree.fromstring(text) self._verify_volume(raw_volume, tree) def test_volume_index_detail_serializer(self): serializer = volumes.VolumesTemplate() raw_volumes = [ dict( id='vol1_id', status='vol1_status', size=1024, availability_zone='vol1_availability', bootable=True, created_at=timeutils.utcnow(), attachments=[ dict( id='vol1_id', volume_id='vol1_id', server_id='instance_uuid', device='/foo1' ) ], name='vol1_name', description='vol1_desc', volume_type='vol1_type', snapshot_id='snap1_id', source_volid=None, metadata=dict(foo='vol1_foo', bar='vol1_bar', ), ), dict( id='vol2_id', status='vol2_status', size=1024, availability_zone='vol2_availability', bootable=False, created_at=timeutils.utcnow(), attachments=[dict(id='vol2_id', volume_id='vol2_id', server_id='instance_uuid', device='/foo2')], name='vol2_name', description='vol2_desc', volume_type='vol2_type', snapshot_id='snap2_id', source_volid=None, metadata=dict(foo='vol2_foo', bar='vol2_bar', ), )] text = serializer.serialize(dict(volumes=raw_volumes)) tree = etree.fromstring(text) self.assertEqual(NS + 'volumes', tree.tag) self.assertEqual(len(raw_volumes), len(tree)) for idx, child in enumerate(tree): self._verify_volume(raw_volumes[idx], child) class TestVolumeCreateRequestXMLDeserializer(test.TestCase): def setUp(self): super(TestVolumeCreateRequestXMLDeserializer, self).setUp() self.deserializer = volumes.CreateDeserializer() def test_minimal_volume(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", }, } self.assertEqual(expected, request['body']) def test_name(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", }, } self.assertEqual(expected, request['body']) def test_description(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", }, } self.assertEqual(expected, request['body']) def test_volume_type(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", }, } self.assertEqual(expected, request['body']) def test_availability_zone(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", }, } self.assertEqual(expected, request['body']) def test_metadata(self): self_request = """ work""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "name": "Volume-xml", "size": "1", "metadata": { "Type": "work", }, }, } self.assertEqual(expected, request['body']) def test_full_volume(self): self_request = """ work""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", "metadata": { "Type": "work", }, }, } self.assertEqual(expected, request['body']) def test_imageref(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) def test_snapshot_id(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) def test_source_volid(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "name": "Volume-xml", "description": "description", "source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) cinder-8.0.0/cinder/tests/unit/api/v2/test_snapshot_metadata.py0000664000567000056710000006342012701406257025732 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v2 import snapshot_metadata from cinder.api.v2 import snapshots from cinder import context import cinder.db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import volume def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): return stub_snapshot_metadata() def return_create_snapshot_metadata_insensitive(context, snapshot_id, metadata, delete): return stub_snapshot_metadata_insensitive() def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): return stub_new_snapshot_metadata() def return_empty_container_metadata(context, snapshot_id, metadata, delete): return {} def stub_snapshot_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_snapshot_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def stub_new_snapshot_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def return_snapshot(context, snapshot_id): return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', 'status': 'available', 'metadata': {}} def stub_get(context, *args, **kwargs): vol = {'id': 'fake-vol-id', 'size': 100, 'name': 'fake', 'host': 'fake-host', 'status': 'available', 'encryption_key_id': None, 'volume_type_id': None, 'migration_status': None, 'availability_zone': 'fake-zone', 'attach_status': 'detached', 'metadata': {}} return fake_volume.fake_volume_obj(context, **vol) def return_snapshot_nonexistent(context, snapshot_id): raise exception.SnapshotNotFound(snapshot_id=snapshot_id) def fake_update_snapshot_metadata(self, context, snapshot, diff): pass class SnapshotMetaDataTest(test.TestCase): def setUp(self): super(SnapshotMetaDataTest, self).setUp() self.volume_api = cinder.volume.api.API() self.stubs.Set(volume.api.API, 'get', stub_get) self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) self.stubs.Set(self.volume_api, 'update_snapshot_metadata', fake_update_snapshot_metadata) self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) self.controller = snapshot_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id snap = {"volume_size": 100, "volume_id": "fake-vol-id", "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "host": "fake-host", "metadata": {}} body = {"snapshot": snap} req = fakes.HTTPRequest.blank('/v2/snapshots') self.snapshot_controller.create(req, body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_no_data(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key2') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_metadata_delete') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(200, res.status_int) def test_delete_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key1') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create(self, snapshot_get_by_id, volume_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj # if the keys in uppercase_and_lowercase, should return the one # which server added self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata_insensitive) req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, self.req_id, body) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update', return_value={'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20'}) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, body) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_empty_container(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_empty_container_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_malformed_data(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) @mock.patch('cinder.db.snapshot_metadata_update', return_value=dict()) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item(self, snapshot_get_by_id, snapshot_update, snapshot_metadata_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get') @mock.patch('cinder.db.snapshot_metadata_update', autospec=True) def test_update_item_empty_key(self, metadata_update, snapshot_get): snapshot_get.return_value = stub_get req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_key_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, ("a" * 260), body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_value_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, "key1", body) def test_update_item_too_many_keys(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_invalid_metadata_items_on_create(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) cinder-8.0.0/cinder/tests/unit/api/v2/test_volume_metadata.py0000664000567000056710000007367512701406257025417 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v2 import volume_metadata from cinder.api.v2 import volumes from cinder import db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import fake_volume from cinder import volume from cinder.volume import api as volume_api CONF = cfg.CONF def return_create_volume_metadata_max(context, volume_id, metadata, delete): return stub_max_volume_metadata() def return_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_volume_metadata() def return_new_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_new_volume_metadata() def return_create_volume_metadata_insensitive(context, snapshot_id, metadata, delete, meta_type): return stub_volume_metadata_insensitive() def return_volume_metadata(context, volume_id): return stub_volume_metadata() def return_empty_volume_metadata(context, volume_id): return {} def return_empty_container_metadata(context, volume_id, metadata, delete, meta_type): return {} def delete_volume_metadata(context, volume_id, key, meta_type): pass def stub_volume_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_new_volume_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def stub_volume_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def stub_max_volume_metadata(): metadata = {"metadata": {}} for num in range(CONF.quota_metadata_items): metadata['metadata']['key%i' % num] = "blah" return metadata def get_volume(*args, **kwargs): vol = {'name': 'fake', 'metadata': {}} return fake_volume.fake_volume_obj(args[0], **vol) def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') def fake_update_volume_metadata(self, context, volume, diff): pass class volumeMetaDataTest(test.TestCase): def setUp(self): super(volumeMetaDataTest, self).setUp() self.volume_api = volume_api.API() self.stubs.Set(volume.api.API, 'get', get_volume) self.stubs.Set(db, 'volume_metadata_get', return_volume_metadata) self.stubs.Set(db, 'service_get_all_by_topic', stubs.stub_service_get_all_by_topic) self.stubs.Set(self.volume_api, 'update_volume_metadata', fake_update_volume_metadata) self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.volume_controller = volumes.VolumeController(self.ext_mgr) self.controller = volume_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v2/fake/volumes/%s/metadata' % self.req_id vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "metadata": {}} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.volume_controller.create(req, body) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_volume(self): self.stubs.Set(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) def test_index_no_data(self): self.stubs.Set(db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_volume(self): self.stubs.Set(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key2') def test_show_meta_not_found(self): self.stubs.Set(db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete(self, metadata_get, metadata_delete): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = delete_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(200, res.status_int) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete_volume_maintenance(self, metadata_get, metadata_delete): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = delete_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.delete, req, self.req_id, 'key2') get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete_nonexistent_volume(self, metadata_get, metadata_delete): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = return_volume_nonexistent req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key1') get_volume.assert_called_once_with(fake_context, self.req_id) def test_delete_meta_not_found(self): self.stubs.Set(db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_empty_volume_metadata metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", }} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create_volume_maintenance(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_get.side_effect = return_empty_volume_metadata metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", }} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.create, req, self.req_id, body) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): # if the keys in uppercase_and_lowercase, should return the one # which server added fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_empty_volume_metadata metadata_update.side_effect = return_create_volume_metadata_insensitive req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_volume(self): self.stubs.Set(volume.api.API, 'get', return_volume_nonexistent) self.stubs.Set(db, 'volume_metadata_get', return_volume_metadata) self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, self.req_id, body) @mock.patch.object(db, 'volume_metadata_update') def test_update_all(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_all_volume_maintenance(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.update_all, req, self.req_id, expected) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_update_all_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_create_volume_metadata metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_all_empty_container(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_empty_container_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_all_malformed_container(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_malformed_data(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_volume(self): self.stubs.Set(db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) @mock.patch.object(db, 'volume_metadata_update') def test_update_item(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_volume_maintenance(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.req_id, 'key1', body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_item_nonexistent_volume(self): self.stubs.Set(db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank('/v2/fake/volumes/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_empty_key(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_key_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, ("a" * 260), body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_value_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, "key1", body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_item_too_many_keys(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) @mock.patch.object(db, 'volume_metadata_update') def test_invalid_metadata_items_on_create(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) cinder-8.0.0/cinder/tests/unit/api/v1/0000775000567000056710000000000012701406543020612 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/v1/test_limits.py0000664000567000056710000010015212701406257023525 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ from xml.dom import minidom from lxml import etree from oslo_serialization import jsonutils import six from six.moves import http_client from six.moves import range import webob from cinder.api.v1 import limits from cinder.api import views from cinder.api import xmlutil import cinder.context from cinder import test TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), ] NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0' } class BaseLimitTestSuite(test.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.stubs.Set(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def stub_get_project_quotas(context, project_id, usages=True): return {k: dict(limit=v) for k, v in self.absolute_limits.items()} self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas", stub_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.create_resource() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" request = webob.Request.blank("/") request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", "controller": "", }) context = cinder.context.RequestContext('testuser', 'testproject') request.environ["cinder.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() response = request.get_response(self.controller) expected = { "limits": { "rate": [], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def test_index_json(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) self.absolute_limits = { 'gigabytes': 512, 'volumes': 5, } response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": {"maxTotalVolumeGigabytes": 512, "maxTotalVolumes": 5, }, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_index_diff_regex(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = request.get_response(self.controller) body = jsonutils.loads(response.body) self.assertEqual(expected, body['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = {'unknown_limit': 9001} self._test_index_absolute_limits_json({}) class TestLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): """Test that middleware selected correct limiter class.""" assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): """Test a rate-limited (413) GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimitFault"]["details"].strip() self.assertEqual(expected, value) def test_limited_request_xml(self): """Test a rate-limited (413) response as XML.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") request.accept = "application/xml" response = request.get_response(self.app) self.assertEqual(413, response.status_int) root = minidom.parseString(response.body).childNodes[0] expected = "Only 1 GET request(s) can be made to * every minute." details = root.getElementsByTagName("details") self.assertEqual(1, details.length) value = details.item(0).firstChild.data.strip() self.assertEqual(expected, value) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """Tests for the default limits parser in the `limits.Limiter` class.""" def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): """Test that parse_limits() handles bad rules correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): """Test that parse_limits() handles missing args correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): """Test that parse_limits() handles bad values correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): """Test that parse_limits() handles bad units correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): """Test that parse_limits() handles multiple rules correctly.""" try: l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, e # Make sure the number of returned limits are correct self.assertEqual(4, len(l)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in l]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in l]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in l]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in l]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in l]) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'limits.user3': '', 'limits.user0': '(get, *, .*, 4, minute);' '(put, *, .*, 2, minute)'} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """no delay on a single call for a limit verb we didn"t set.""" delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): """no delay on a single call for a known limit.""" delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """test delay on 11th put request. the 11th PUT will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """test delay of 8th post request. Ensure that the 8th POST will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) expected = [None] * 4 + [15.0] results = list(self._check(5, "GET", "/foo", "user0")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): """Test limit of PUT on /volumes. Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still OK after 5 requests... but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/volumes")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Test limit on PUT is lifted. Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) def test_user_limit(self): """Test user-specific limits.""" self.assertEqual([], self.limiter.levels['user3']) self.assertEqual(2, len(self.limiter.levels['user0'])) def test_multiple_users(self): """Tests involving multiple users.""" # User0 expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User0 again expected = [28.0] results = list(self._check(1, "PUT", "/anything", "user0")) self.assertEqual(expected, results) self.time += 28.0 expected = [None, 30.0] results = list(self._check(2, "PUT", "/anything", "user0")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dump_as_bytes({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """Assert that POSTing to given url triggers given action. Ensure POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): """Only POSTs should work.""" for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual('60.00', delay) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual('60.00', delay) delay = self._request("GET", "/delayed", "user2") self.assertEqual('60.00', delay) class FakeHttplibSocket(object): """Fake `http_client.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" if isinstance(response_string, six.text_type): response_string = response_string.encode('utf-8') self._buffer = six.BytesIO(response_string) def makefile(self, mode, *args): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `http_client.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Fake method for request. Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection. Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Decorator to mock the HTTPConnection class. Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection) http_client.HTTPConnection = new_http_connection return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """setUp for test suite. Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") self.addCleanup(self._restore, self.oldHTTPConnection) def _restore(self, oldHTTPConnection): # restore original HTTPConnection object http_client.HTTPConnection = oldHTTPConnection def test_200(self): """Successful request test.""" delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): """Forbidden request test.""" delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", b"403 Forbidden\n\nOnly 1 GET request(s) can be " b"made to /delayed every minute.") self.assertEqual(expected, (delay, error)) class LimitsViewBuilderTest(test.TestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/volumes", "regex": "^/volumes", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = {"metadata_items": 1, "injected_files": 5, "injected_file_content_bytes": 5} def test_build_limits(self): tdate = "2011-07-21T18:17:06" expected_limits = \ {"limits": {"rate": [{"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/volumes", "regex": "^/volumes", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictMatch(expected_limits, output) def test_build_limits_empty_limits(self): expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(rate_limits, abs_limits) self.assertDictMatch(expected_limits, output) class LimitsXMLSerializationTest(test.TestCase): def test_xml_declaration(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) has_dec = output.startswith(b"") self.assertTrue(has_dec) def test_index(self): serializer = limits.LimitsTemplate() fixture = { "limits": { "rate": [{ "uri": "*", "regex": ".*", "limit": [{ "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{ "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": "2011-12-15T22:42:45Z"}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(4, len(absolutes)) for limit in absolutes: name = limit.get('name') value = limit.get('value') self.assertEqual(str(fixture['limits']['absolute'][name]), value) # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(2, len(rates)) for i, rate in enumerate(rates): for key in ['uri', 'regex']: self.assertEqual(str(fixture['limits']['rate'][i][key]), rate.get(key)) rate_limits = rate.xpath('ns:limit', namespaces=NS) self.assertEqual(1, len(rate_limits)) for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual( str(fixture['limits']['rate'][i]['limit'][j][key]), limit.get(key)) def test_index_no_limits(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(0, len(absolutes)) # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(0, len(rates)) cinder-8.0.0/cinder/tests/unit/api/v1/__init__.py0000664000567000056710000000000012701406250022704 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/api/v1/stubs.py0000664000567000056710000001043212701406257022326 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from cinder import exception as exc FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} def stub_volume(id, **kwargs): volume = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': 1, 'availability_zone': 'fakeaz', 'attached_mode': 'rw', 'status': 'fakestatus', 'migration_status': None, 'attach_status': 'attached', 'bootable': 'false', 'name': 'vol name', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'snapshot_id': None, 'source_volid': None, 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'volume_metadata': [], 'volume_type': {'name': 'vol_type_name'}, 'volume_attachment': [], 'multiattach': False, 'readonly': 'False'} volume.update(kwargs) return volume def stub_volume_create(self, context, size, name, description, snapshot, **param): vol = stub_volume('1') vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['source_volid'] = None try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def stub_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = stub_volume('1') vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'cinder' return vol def stub_volume_update(self, context, *args, **param): pass def stub_volume_delete(self, context, *args, **param): pass def stub_volume_get(self, context, volume_id): return stub_volume(volume_id) def stub_volume_get_notfound(self, context, volume_id): raise exc.NotFound def stub_volume_get_all(context, search_opts=None): return [stub_volume(100, project_id='fake'), stub_volume(101, project_id='superfake'), stub_volume(102, project_id='superduperfake')] def stub_volume_get_all_by_project(self, context, search_opts=None): return [stub_volume_get(self, context, '1')] def stub_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake', 'snapshot_metadata': []} snapshot.update(kwargs) return snapshot def stub_snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [stub_snapshot(100, project_id='fake'), stub_snapshot(101, project_id='superfake'), stub_snapshot(102, project_id='superduperfake')] def stub_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [stub_snapshot(1)] def stub_snapshot_update(self, context, *args, **param): pass def stub_service_get_all_by_topic(context, topic, disabled=None): return [{'availability_zone': "zone1:host1", "disabled": 0}] cinder-8.0.0/cinder/tests/unit/api/v1/test_snapshots.py0000664000567000056710000004416712701406257024263 0ustar jenkinsjenkins00000000000000# Copyright 2011 Denali Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock from oslo_utils import timeutils import webob from cinder.api.v1 import snapshots from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v1 import stubs from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import volume UUID = '00000000-0000-0000-0000-000000000001' INVALID_UUID = '00000000-0000-0000-0000-000000000002' def _get_default_snapshot_param(): return {'id': UUID, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', } def stub_snapshot_create(self, context, volume_id, name, description, metadata): snapshot = _get_default_snapshot_param() snapshot['volume_id'] = volume_id snapshot['display_name'] = name snapshot['display_description'] = description snapshot['metadata'] = metadata return snapshot def stub_snapshot_delete(self, context, snapshot): if snapshot['id'] != UUID: raise exception.NotFound def stub_snapshot_get(self, context, snapshot_id): if snapshot_id != UUID: raise exception.NotFound param = _get_default_snapshot_param() return param def stub_snapshot_get_all(self, context, search_opts=None): param = _get_default_snapshot_param() return [param] class SnapshotApiTest(test.TestCase): def setUp(self): super(SnapshotApiTest, self).setUp() self.controller = snapshots.SnapshotsController() self.stubs.Set(db, 'snapshot_get_all_by_project', stubs.stub_snapshot_get_all_by_project) self.stubs.Set(db, 'snapshot_get_all', stubs.stub_snapshot_get_all) def test_snapshot_create(self): self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) snapshot = {"volume_id": '12', "force": False, "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot['display_name'], resp_dict['snapshot']['display_name']) self.assertEqual(snapshot['display_description'], resp_dict['snapshot']['display_description']) def test_snapshot_create_force(self): self.stubs.Set(volume.api.API, "create_snapshot_force", stub_snapshot_create) self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) snapshot = {"volume_id": '12', "force": True, "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot['display_name'], resp_dict['snapshot']['display_name']) self.assertEqual(snapshot['display_description'], resp_dict['snapshot']['display_description']) snapshot = {"volume_id": "12", "force": "**&&^^%%$$##@@", "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') self.assertRaises(exception.InvalidParameterValue, self.controller.create, req, body) def test_snapshot_create_without_volume_id(self): snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' body = { "snapshot": { "force": True, "name": snapshot_name, "description": snapshot_description } } req = fakes.HTTPRequest.blank('/v1/snapshots') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch.object(volume.api.API, "update_snapshot", side_effect=stubs.stub_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_update(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get, update_snapshot): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj updates = {"display_name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) res_dict = self.controller.update(req, UUID, body) expected = {'snapshot': { 'id': UUID, 'volume_id': '1', 'status': u'available', 'size': 100, 'created_at': None, 'display_name': u'Updated Test Name', 'display_description': u'Default description', 'metadata': {}, }} self.assertEqual(expected, res_dict) def test_snapshot_update_missing_body(self): body = {} req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.update, req, UUID, body) def test_snapshot_update_invalid_body(self): body = {'display_name': 'missing top level snapshot key'} req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.update, req, UUID, body) def test_snapshot_update_not_found(self): self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) updates = { "display_name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, 'not-the-uuid', body) @mock.patch.object(volume.api.API, "delete_snapshot", side_effect=stubs.stub_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get, delete_snapshot): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshot_id = UUID req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) resp = self.controller.delete(req, snapshot_id) self.assertEqual(202, resp.status_int) def test_snapshot_delete_invalid_id(self): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) resp_dict = self.controller.show(req, UUID) self.assertIn('snapshot', resp_dict) self.assertEqual(UUID, resp_dict['snapshot']['id']) def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.volume.api.API.get_all_snapshots') def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': 1, 'status': 'available', 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList(objects=[snapshot_obj]) get_all_snapshots.return_value = snapshots req = fakes.HTTPRequest.blank('/v1/snapshots/detail') resp_dict = self.controller.detail(req) self.assertIn('snapshots', resp_dict) resp_snapshots = resp_dict['snapshots'] self.assertEqual(1, len(resp_snapshots)) resp_snapshot = resp_snapshots.pop() self.assertEqual(UUID, resp_snapshot['id']) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_limited_to_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v1/fake/snapshots', use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_list_snapshots_with_limit_and_offset(self, snapshot_metadata_get): def list_snapshots_with_limit_and_offset(is_admin): def stub_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [ stubs.stub_snapshot(1, display_name='backup1'), stubs.stub_snapshot(2, display_name='backup2'), stubs.stub_snapshot(3, display_name='backup3'), ] self.stubs.Set(db, 'snapshot_get_all_by_project', stub_snapshot_get_all_by_project) req = fakes.HTTPRequest.blank('/v1/fake/snapshots?limit=1\ &offset=1', use_admin_context=is_admin) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) self.assertEqual('2', res['snapshots'][0]['id']) # admin case list_snapshots_with_limit_and_offset(is_admin=True) # non_admin case list_snapshots_with_limit_and_offset(is_admin=False) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1', use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(3, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_all_tenants_non_admin_gets_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1') res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_non_admin_get_by_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v1/fake/snapshots') res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) class SnapshotSerializerTest(test.TestCase): def _verify_snapshot(self, snap, tree): self.assertEqual('snapshot', tree.tag) for attr in ('id', 'status', 'size', 'created_at', 'display_name', 'display_description', 'volume_id'): self.assertEqual(str(snap[attr]), tree.get(attr)) def test_snapshot_show_create_serializer(self): serializer = snapshots.SnapshotTemplate() raw_snapshot = dict( id='snap_id', status='snap_status', size=1024, created_at=timeutils.utcnow(), display_name='snap_name', display_description='snap_desc', volume_id='vol_id', ) text = serializer.serialize(dict(snapshot=raw_snapshot)) tree = etree.fromstring(text) self._verify_snapshot(raw_snapshot, tree) def test_snapshot_index_detail_serializer(self): serializer = snapshots.SnapshotsTemplate() raw_snapshots = [dict(id='snap1_id', status='snap1_status', size=1024, created_at=timeutils.utcnow(), display_name='snap1_name', display_description='snap1_desc', volume_id='vol1_id', ), dict(id='snap2_id', status='snap2_status', size=1024, created_at=timeutils.utcnow(), display_name='snap2_name', display_description='snap2_desc', volume_id='vol2_id', )] text = serializer.serialize(dict(snapshots=raw_snapshots)) tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child) class SnapshotsUnprocessableEntityTestCase(test.TestCase): """Tests of places we throw 422 Unprocessable Entity.""" def setUp(self): super(SnapshotsUnprocessableEntityTestCase, self).setUp() self.controller = snapshots.SnapshotsController() def _unprocessable_snapshot_create(self, body): req = fakes.HTTPRequest.blank('/v2/fake/snapshots') req.method = 'POST' self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_create_no_body(self): self._unprocessable_snapshot_create(body=None) def test_create_missing_snapshot(self): body = {'foo': {'a': 'b'}} self._unprocessable_snapshot_create(body=body) def test_create_malformed_entity(self): body = {'snapshot': 'string'} self._unprocessable_snapshot_create(body=body) cinder-8.0.0/cinder/tests/unit/api/v1/test_types.py0000664000567000056710000001665212701406257023403 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # aLL Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree from oslo_utils import timeutils import webob from cinder.api.v1 import types from cinder.api.views import types as views_types from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.volume import volume_types def stub_volume_type(id): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) def return_volume_types_get_all_types(context, search_opts=None): return dict(vol_type_1=stub_volume_type(1), vol_type_2=stub_volume_type(2), vol_type_3=stub_volume_type(3)) def return_empty_volume_types_get_all_types(context, search_opts=None): return {} def return_volume_types_get_volume_type(context, id): if id == "777": raise exception.VolumeTypeNotFound(volume_type_id=id) return stub_volume_type(id) def return_volume_types_get_by_name(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return stub_volume_type(int(name.split("_")[2])) class VolumeTypesApiTest(test.TestCase): def setUp(self): super(VolumeTypesApiTest, self).setUp() self.controller = types.VolumeTypesController() def test_volume_types_index(self): self.stubs.Set(volume_types, 'get_all_types', return_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v1/fake/types') res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['volume_types'])) expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] actual_names = map(lambda e: e['name'], res_dict['volume_types']) self.assertEqual(set(expected_names), set(actual_names)) for entry in res_dict['volume_types']: self.assertEqual('value1', entry['extra_specs']['key1']) def test_volume_types_index_no_data(self): self.stubs.Set(volume_types, 'get_all_types', return_empty_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v1/fake/types') res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['volume_types'])) def test_volume_types_show(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) type_id = str(uuid.uuid4()) req = fakes.HTTPRequest.blank('/v1/fake/types/' + type_id) res_dict = self.controller.show(req, type_id) self.assertEqual(1, len(res_dict)) self.assertEqual(type_id, res_dict['volume_type']['id']) vol_type_name = 'vol_type_' + type_id self.assertEqual(vol_type_name, res_dict['volume_type']['name']) def test_volume_types_show_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) req = fakes.HTTPRequest.blank('/v1/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '777') def test_view_builder_show(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict(name='new_type', deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, description=None, id=42) request = fakes.HTTPRequest.blank("/v1") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict(name='new_type', extra_specs={}, description=None, is_public=None, id=42) self.assertDictMatch(expected_volume_type, output['volume_type']) def test_view_builder_list(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_types = [] for i in range(0, 10): raw_volume_types.append(dict(name='new_type', deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, description=None, id=42 + i)) request = fakes.HTTPRequest.blank("/v1") output = view_builder.index(request, raw_volume_types) self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict(name='new_type', extra_specs={}, id=42 + i, is_public=None, description=None) self.assertDictMatch(expected_volume_type, output['volume_types'][i]) class VolumeTypesSerializerTest(test.TestCase): def _verify_volume_type(self, vtype, tree): self.assertEqual('volume_type', tree.tag) self.assertEqual(vtype['name'], tree.get('name')) self.assertEqual(str(vtype['id']), tree.get('id')) self.assertEqual(1, len(tree)) extra_specs = tree[0] self.assertEqual('extra_specs', extra_specs.tag) seen = set(vtype['extra_specs'].keys()) for child in extra_specs: self.assertIn(child.tag, seen) self.assertEqual(vtype['extra_specs'][child.tag], child.text) seen.remove(child.tag) self.assertEqual(0, len(seen)) def test_index_serializer(self): serializer = types.VolumeTypesTemplate() # Just getting some input data vtypes = return_volume_types_get_all_types(None) text = serializer.serialize({'volume_types': list(vtypes.values())}) tree = etree.fromstring(text) self.assertEqual('volume_types', tree.tag) self.assertEqual(len(vtypes), len(tree)) for child in tree: name = child.get('name') self.assertIn(name, vtypes) self._verify_volume_type(vtypes[name], child) def test_voltype_serializer(self): serializer = types.VolumeTypeTemplate() vtype = stub_volume_type(1) text = serializer.serialize(dict(volume_type=vtype)) tree = etree.fromstring(text) self._verify_volume_type(vtype, tree) cinder-8.0.0/cinder/tests/unit/api/v1/test_volumes.py0000664000567000056710000014712612701406257023732 0ustar jenkinsjenkins00000000000000# Copyright 2013 Josh Durgin # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from lxml import etree import mock from oslo_config import cfg from oslo_utils import timeutils import webob from cinder.api import extensions from cinder.api.v1 import volumes from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs from cinder.tests.unit import fake_notifier from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.volume import api as volume_api NS = '{http://docs.openstack.org/api/openstack-block-storage/1.0/content}' TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' CONF = cfg.CONF def stub_snapshot_get(self, context, snapshot_id): if snapshot_id != TEST_SNAPSHOT_UUID: raise exception.NotFound return {'id': snapshot_id, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', } class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} fake_image.stub_out_image_service(self.stubs) self.controller = volumes.VolumeController(self.ext_mgr) self.flags(host='fake', notification_driver=[fake_notifier.__name__]) self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) self.stubs.Set(db, 'service_get_all_by_topic', stubs.stub_service_get_all_by_topic) self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) def test_volume_create(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') res_dict = self.controller.create(req, body) expected = {'volume': {'status': 'fakestatus', 'display_description': 'Volume Test Desc', 'availability_zone': 'zone1:host1', 'display_name': 'Volume Test Name', 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 100, 'encrypted': False}} self.assertEqual(expected, res_dict) def test_volume_create_with_type(self): vol_type = CONF.default_volume_type db.volume_type_create(context.get_admin_context(), dict(name=vol_type, extra_specs={})) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "volume_type": "FakeTypeName"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') # Raise 404 when type name isn't valid self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) # Use correct volume type name vol.update(dict(volume_type=CONF.default_volume_type)) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) self.assertIn('id', res_dict['volume']) self.assertEqual(1, len(res_dict)) self.assertEqual(db_vol_type['name'], res_dict['volume']['volume_type']) # Use correct volume type id vol.update(dict(volume_type=db_vol_type['id'])) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) self.assertIn('id', res_dict['volume']) self.assertEqual(1, len(res_dict)) self.assertEqual(db_vol_type['name'], res_dict['volume']['volume_type']) def test_volume_creation_fails_with_bad_size(self): vol = {"size": '', "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_volume_creation_fails_with_bad_availability_zone(self): vol = {"size": '1', "name": "Volume Test Name", "description": "Volume Test Desc", "availability_zone": "zonen:hostn"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_volume_create_with_image_id(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) self.ext_mgr.extensions = {'os-image-create': 'fake'} test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77" vol = {"size": '1', "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "nova", "imageRef": test_id} expected = {'volume': {'status': 'fakestatus', 'display_description': 'Volume Test Desc', 'availability_zone': 'nova', 'display_name': 'Volume Test Name', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'image_id': test_id, 'snapshot_id': None, 'source_volid': None, 'metadata': {}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') res_dict = self.controller.create(req, body) self.assertEqual(expected, res_dict) def test_volume_create_with_image_id_is_integer(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = {"size": '1', "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "cinder", "imageRef": 1234} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_id_not_uuid_format(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = {"size": '1', "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "cinder", "imageRef": '12345'} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_volume_create_with_image_id_with_empty_string(self): self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) self.ext_mgr.extensions = {'os-image-create': 'fake'} vol = {"size": 1, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "cinder", "imageRef": ''} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch.object(db, 'volume_admin_metadata_get', return_value={'attached_mode': 'rw', 'readonly': 'False'}) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', side_effect=stubs.stub_volume_type_get) @mock.patch.object(volume_api.API, 'get', side_effect=stubs.stub_volume_api_get, autospec=True) @mock.patch.object(volume_api.API, 'update', side_effect=stubs.stub_volume_update, autospec=True) def test_volume_update(self, *args): updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) @mock.patch.object(db, 'volume_admin_metadata_get', return_value={"qos_max_iops": 2000, "readonly": "False", "attached_mode": "rw"}) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', side_effect=stubs.stub_volume_type_get) @mock.patch.object(volume_api.API, 'get', side_effect=stubs.stub_volume_api_get, autospec=True) @mock.patch.object(volume_api.API, 'update', side_effect=stubs.stub_volume_update, autospec=True) def test_volume_update_metadata(self, *args): updates = { "metadata": {"qos_max_iops": 2000} } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {"qos_max_iops": '2000', "readonly": "False", "attached_mode": "rw"}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1 }} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) def test_update_empty_body(self): body = {} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.update, req, '1', body) def test_update_invalid_body(self): body = {'display_name': 'missing top level volume key'} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.update, req, '1', body) def test_update_not_found(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, '1', body) def test_volume_list(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'attached_mode': 'rw', 'readonly': 'False'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(volume_api.API, 'get_all', stubs.stub_volume_api_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes') res_dict = self.controller.index(req) expected = {'volumes': [{'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}]} self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.index(req) expected = {'volumes': [{'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1'}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}]} self.assertEqual(expected, res_dict) @mock.patch.object(db, 'volume_admin_metadata_get', return_value={'attached_mode': 'rw', 'readonly': 'False'}) def test_volume_list_detail(self, *args): self.stubs.Set(volume_api.API, 'get_all', stubs.stub_volume_api_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/detail') res_dict = self.controller.index(req) expected = {'volumes': [{'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}]} self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_detail_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/detail') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.index(req) expected = {'volumes': [{'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1'}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}]} self.assertEqual(expected, res_dict) @mock.patch.object(db, 'volume_admin_metadata_get', return_value={'attached_mode': 'rw', 'readonly': 'False'}) @mock.patch.object(volume_api.API, 'get', side_effect=stubs.stub_volume_api_get, autospec=True) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', side_effect=stubs.stub_volume_type_get, autospec=True) def test_volume_show(self, *args): req = fakes.HTTPRequest.blank('/v1/volumes/1') res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) # Finally test that we cached the returned volume self.assertIsNotNone(req.cached_resource_by_id('1')) def test_volume_show_no_attachments(self): def stub_volume_get(self, context, volume_id, **kwargs): vol = stubs.stub_volume(volume_id, attach_status='detached') return fake_volume.fake_volume_obj(context, **vol) self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/1') res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'false', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) def test_volume_show_bootable(self): def stub_volume_get(self, context, volume_id, **kwargs): vol = (stubs.stub_volume(volume_id, volume_glance_metadata=dict(foo='bar'))) return fake_volume.fake_volume_obj(context, **vol) self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/1') res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [], 'multiattach': 'false', 'bootable': 'true', 'volume_type': 'vol_type_name', 'snapshot_id': None, 'source_volid': None, 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) def test_volume_show_no_volume(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1) # Finally test that we did not cache anything self.assertIsNone(req.cached_resource_by_id('1')) def test_volume_detail_limit_offset(self): def volume_detail_limit_offset(is_admin): def stub_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): return [ stubs.stub_volume(1, display_name='vol1'), stubs.stub_volume(2, display_name='vol2'), ] self.stubs.Set(db, 'volume_get_all_by_project', stub_volume_get_all_by_project) self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2\ &offset=1', use_admin_context=is_admin) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual('2', volumes[0]['id']) # admin case volume_detail_limit_offset(is_admin=True) # non_admin case volume_detail_limit_offset(is_admin=False) def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1'}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict) def test_volume_show_with_encrypted_volume(self): def stub_volume_get(self, context, volume_id, **kwargs): vol = stubs.stub_volume(volume_id, encryption_key_id='fake_id') return fake_volume.fake_volume_obj(context, **vol) self.stubs.Set(volume_api.API, 'get', stub_volume_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/1') res_dict = self.controller.show(req, 1) self.assertTrue(res_dict['volume']['encrypted']) def test_volume_show_with_unencrypted_volume(self): self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/volumes/1') res_dict = self.controller.show(req, 1) self.assertEqual(False, res_dict['volume']['encrypted']) def test_volume_delete(self): self.stubs.Set(db.sqlalchemy.api, 'volume_get', stubs.stub_volume_get_db) req = fakes.HTTPRequest.blank('/v1/volumes/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_volume_delete_no_volume(self): self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def test_admin_list_volumes_limited_to_project(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/fake/volumes', use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def test_admin_list_volumes_all_tenants(self): self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1', use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(3, len(res['volumes'])) def test_all_tenants_non_admin_gets_all_tenants(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1') res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def test_non_admin_get_by_project(self): self.stubs.Set(db, 'volume_get_all_by_project', stubs.stub_volume_get_all_by_project) self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full', stubs.stub_volume_type_get) req = fakes.HTTPRequest.blank('/v1/fake/volumes') res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_string(self, get_all): req = mock.MagicMock() req.GET.copy.return_value = {'display_name': 'Volume-573108026'} context = mock.Mock() req.environ = {'cinder.context': context} self.controller._items(req, mock.Mock) get_all.assert_called_once_with( context, sort_dirs=['desc'], viewable_admin_meta=True, sort_keys=['created_at'], limit=None, filters={'display_name': 'Volume-573108026'}, marker=None) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_list(self, get_all): req = mock.MagicMock() req.GET.copy.return_value = {'id': "['1', '2', '3']"} context = mock.Mock() req.environ = {'cinder.context': context} self.controller._items(req, mock.Mock) get_all.assert_called_once_with( context, sort_dirs=['desc'], viewable_admin_meta=True, sort_keys=['created_at'], limit=None, filters={'id': ['1', '2', '3']}, marker=None) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_expression(self, get_all): req = mock.MagicMock() req.GET.copy.return_value = {'id': "d+"} context = mock.Mock() req.environ = {'cinder.context': context} self.controller._items(req, mock.Mock) get_all.assert_called_once_with( context, sort_dirs=['desc'], viewable_admin_meta=True, sort_keys=['created_at'], limit=None, filters={'id': 'd+'}, marker=None) class VolumeSerializerTest(test.TestCase): def _verify_volume_attachment(self, attach, tree): for attr in ('id', 'volume_id', 'server_id', 'device'): self.assertEqual(str(attach[attr]), tree.get(attr)) def _verify_volume(self, vol, tree): self.assertEqual(NS + 'volume', tree.tag) for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', 'display_name', 'display_description', 'volume_type', 'bootable', 'snapshot_id'): self.assertEqual(str(vol[attr]), tree.get(attr)) for child in tree: self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata')) if child.tag == 'attachments': self.assertEqual(1, len(child)) self.assertEqual('attachment', child[0].tag) self._verify_volume_attachment(vol['attachments'][0], child[0]) elif child.tag == 'metadata': not_seen = set(vol['metadata'].keys()) for gr_child in child: self.assertIn(gr_child.get("key"), not_seen) self.assertEqual(str(vol['metadata'][gr_child.get("key")]), gr_child.text) not_seen.remove(gr_child.get('key')) self.assertEqual(0, len(not_seen)) def test_volume_show_create_serializer(self): serializer = volumes.VolumeTemplate() raw_volume = dict( id='vol_id', status='vol_status', size=1024, availability_zone='vol_availability', bootable='false', created_at=timeutils.utcnow(), attachments=[dict(id='vol_id', volume_id='vol_id', server_id='instance_uuid', device='/foo')], display_name='vol_name', display_description='vol_desc', volume_type='vol_type', snapshot_id='snap_id', source_volid='source_volid', metadata=dict(foo='bar', baz='quux', ), ) text = serializer.serialize(dict(volume=raw_volume)) tree = etree.fromstring(text) self._verify_volume(raw_volume, tree) def test_volume_index_detail_serializer(self): serializer = volumes.VolumesTemplate() raw_volumes = [dict(id='vol1_id', status='vol1_status', size=1024, availability_zone='vol1_availability', bootable='true', created_at=timeutils.utcnow(), attachments=[dict(id='vol1_id', volume_id='vol1_id', server_id='instance_uuid', device='/foo1')], display_name='vol1_name', display_description='vol1_desc', volume_type='vol1_type', snapshot_id='snap1_id', source_volid=None, metadata=dict(foo='vol1_foo', bar='vol1_bar', ), ), dict(id='vol2_id', status='vol2_status', size=1024, availability_zone='vol2_availability', bootable='true', created_at=timeutils.utcnow(), attachments=[dict(id='vol2_id', volume_id='vol2_id', server_id='instance_uuid', device='/foo2')], display_name='vol2_name', display_description='vol2_desc', volume_type='vol2_type', snapshot_id='snap2_id', source_volid=None, metadata=dict(foo='vol2_foo', bar='vol2_bar', ), )] text = serializer.serialize(dict(volumes=raw_volumes)) tree = etree.fromstring(text) self.assertEqual(NS + 'volumes', tree.tag) self.assertEqual(len(raw_volumes), len(tree)) for idx, child in enumerate(tree): self._verify_volume(raw_volumes[idx], child) class TestVolumeCreateRequestXMLDeserializer(test.TestCase): def setUp(self): super(TestVolumeCreateRequestXMLDeserializer, self).setUp() self.deserializer = volumes.CreateDeserializer() def test_minimal_volume(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = {"volume": {"size": "1", }, } self.assertEqual(expected, request['body']) def test_display_name(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", }, } self.assertEqual(expected, request['body']) def test_display_description(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", }, } self.assertEqual(expected, request['body']) def test_volume_type(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", }, } self.assertEqual(expected, request['body']) def test_availability_zone(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", }, } self.assertEqual(expected, request['body']) def test_metadata(self): self_request = """ work""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "display_name": "Volume-xml", "size": "1", "metadata": { "Type": "work", }, }, } self.assertEqual(expected, request['body']) def test_full_volume(self): self_request = """ work""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", "metadata": { "Type": "work", }, }, } self.assertEqual(expected, request['body']) def test_imageref(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) def test_snapshot_id(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) def test_source_volid(self): self_request = """ """ request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737", }, } self.assertEqual(expected, request['body']) class VolumesUnprocessableEntityTestCase(test.TestCase): """Tests of places we throw 422 Unprocessable Entity from.""" def setUp(self): super(VolumesUnprocessableEntityTestCase, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = volumes.VolumeController(self.ext_mgr) def _unprocessable_volume_create(self, body): req = fakes.HTTPRequest.blank('/v2/fake/volumes') req.method = 'POST' self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_create_no_body(self): self._unprocessable_volume_create(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._unprocessable_volume_create(body=body) def test_create_malformed_entity(self): body = {'volume': 'string'} self._unprocessable_volume_create(body=body) cinder-8.0.0/cinder/tests/unit/api/v1/test_snapshot_metadata.py0000664000567000056710000006352212701406257025734 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v1 import snapshot_metadata from cinder.api.v1 import snapshots from cinder import context import cinder.db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder import volume def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): return stub_snapshot_metadata() def return_create_snapshot_metadata_insensitive(context, snapshot_id, metadata, delete): return stub_snapshot_metadata_insensitive() def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): return stub_new_snapshot_metadata() def return_empty_container_metadata(context, snapshot_id, metadata, delete): return {} def stub_snapshot_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_snapshot_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def stub_new_snapshot_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def return_snapshot(context, snapshot_id): return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', 'status': 'available', 'metadata': {}} def stub_get(context, volume_id, *args, **kwargs): vol = {'id': volume_id, 'size': 100, 'name': 'fake', 'host': 'fake-host', 'status': 'available', 'encryption_key_id': None, 'volume_type_id': None, 'migration_status': None, 'availability_zone': 'zone1:host1', 'attach_status': 'detached'} return fake_volume.fake_volume_obj(context, **vol) def return_snapshot_nonexistent(context, snapshot_id): raise exception.SnapshotNotFound('bogus test message') def fake_update_snapshot_metadata(self, context, snapshot, diff): pass class SnapshotMetaDataTest(test.TestCase): def setUp(self): super(SnapshotMetaDataTest, self).setUp() self.volume_api = cinder.volume.api.API() self.stubs.Set(volume.api.API, 'get', stub_get) self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) self.stubs.Set(self.volume_api, 'update_snapshot_metadata', fake_update_snapshot_metadata) self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) self.controller = snapshot_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v1/fake/snapshots/%s/metadata' % self.req_id snap = {"volume_size": 100, "volume_id": "fake-vol-id", "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "host": "fake-host", "metadata": {}} body = {"snapshot": snap} req = fakes.HTTPRequest.blank('/v1/snapshots') self.snapshot_controller.create(req, body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_no_data(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key2') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_metadata_delete') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(200, res.status_int) def test_delete_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key1') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create(self, snapshot_get_by_id, volume_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj # if the keys in uppercase_and_lowercase, should return the one # which server added self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata_insensitive) req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, self.req_id, body) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update', return_value={'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20'}) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, body) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_empty_container(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_empty_container_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get') @mock.patch('cinder.db.snapshot_metadata_update', autospec=True) def test_update_all_malformed_data(self, metadata_update, snapshot_get): snapshot_get.return_value = stub_get req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) @mock.patch('cinder.db.snapshot_metadata_update', return_value=dict()) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item(self, snapshot_get_by_id, snapshot_update, snapshot_metadata_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_snapshot(self): self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank( '/v1.1/fake/snapshots/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get') @mock.patch('cinder.db.snapshot_metadata_update', autospec=True) def test_update_item_empty_key(self, metadata_update, snapshot_get): snapshot_get.return_value = stub_get req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_key_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, ("a" * 260), body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_value_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, "key1", body) def test_update_item_too_many_keys(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_invalid_metadata_items_on_create(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } ctx = context.RequestContext('admin', 'fake', True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.stubs.Set(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) cinder-8.0.0/cinder/tests/unit/api/v1/test_volume_metadata.py0000664000567000056710000006514212701406257025404 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v1 import volume_metadata from cinder.api.v1 import volumes import cinder.db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v1 import stubs from cinder.tests.unit import fake_volume from cinder import volume CONF = cfg.CONF def return_create_volume_metadata_max(context, volume_id, metadata, delete): return stub_max_volume_metadata() def return_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_volume_metadata() def return_new_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_new_volume_metadata() def return_create_volume_metadata_insensitive(context, snapshot_id, metadata, delete, meta_type): return stub_volume_metadata_insensitive() def return_volume_metadata(context, volume_id): return stub_volume_metadata() def return_empty_volume_metadata(context, volume_id): return {} def return_empty_container_metadata(context, volume_id, metadata, delete, meta_type): return {} def delete_volume_metadata(context, volume_id, key, meta_type): pass def stub_volume_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_new_volume_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def stub_volume_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def stub_max_volume_metadata(): metadata = {"metadata": {}} for num in range(CONF.quota_metadata_items): metadata['metadata']['key%i' % num] = "blah" return metadata def get_volume(*args, **kwargs): vol = {'id': args[1], 'size': 100, 'name': 'fake', 'host': 'fake-host', 'status': 'available', 'encryption_key_id': None, 'volume_type_id': None, 'migration_status': None, 'availability_zone': 'zone1:host1', 'attach_status': 'detached'} return fake_volume.fake_volume_obj(args[0], **vol) def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') def fake_update_volume_metadata(self, context, volume, diff): pass class volumeMetaDataTest(test.TestCase): def setUp(self): super(volumeMetaDataTest, self).setUp() self.volume_api = cinder.volume.api.API() self.stubs.Set(volume.api.API, 'get', get_volume) self.stubs.Set(cinder.db, 'volume_metadata_get', return_volume_metadata) self.stubs.Set(cinder.db, 'service_get_all_by_topic', stubs.stub_service_get_all_by_topic) self.stubs.Set(self.volume_api, 'update_volume_metadata', fake_update_volume_metadata) self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.volume_controller = volumes.VolumeController(self.ext_mgr) self.controller = volume_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v1/fake/volumes/%s/metadata' % self.req_id vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "metadata": {}} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') self.volume_controller.create(req, body) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_volume(self): self.stubs.Set(cinder.db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) def test_index_no_data(self): self.stubs.Set(cinder.db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_volume(self): self.stubs.Set(cinder.db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key2') def test_show_meta_not_found(self): self.stubs.Set(cinder.db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch.object(cinder.db, 'volume_metadata_delete') @mock.patch.object(cinder.db, 'volume_metadata_get') def test_delete(self, metadata_get, metadata_delete): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = delete_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(200, res.status_int) get_volume.assert_called_with(fake_context, self.req_id) @mock.patch.object(cinder.db, 'volume_metadata_delete') @mock.patch.object(cinder.db, 'volume_metadata_get') def test_delete_nonexistent_volume(self, metadata_get, metadata_delete): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = return_volume_nonexistent req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key1') get_volume.assert_called_with(fake_context, self.req_id) def test_delete_meta_not_found(self): self.stubs.Set(cinder.db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch.object(cinder.db, 'volume_metadata_update') @mock.patch.object(cinder.db, 'volume_metadata_get') def test_create(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_empty_volume_metadata metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", }} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) @mock.patch.object(cinder.db, 'volume_metadata_update') @mock.patch.object(cinder.db, 'volume_metadata_get') def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): # if the keys in uppercase_and_lowercase, should return the one # which server added fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_empty_volume_metadata metadata_update.side_effect = return_create_volume_metadata_insensitive req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_volume(self): self.stubs.Set(volume.api.API, 'get', return_volume_nonexistent) self.stubs.Set(cinder.db, 'volume_metadata_get', return_volume_metadata) self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank('/v1/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, self.req_id, body) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_all(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(cinder.db, 'volume_metadata_update') @mock.patch.object(cinder.db, 'volume_metadata_get') def test_update_all_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_create_volume_metadata metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_all_empty_container(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_empty_container_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_item_value_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, "key1", body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_all_malformed_container(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_all_malformed_data(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_volume(self): self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_item(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_item_nonexistent_volume(self): self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank('/v1.1/fake/volumes/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_item_empty_key(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_update_item_key_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, ("a" * 260), body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_item_too_many_keys(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(cinder.db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) @mock.patch.object(cinder.db, 'volume_metadata_update') def test_invalid_metadata_items_on_create(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) cinder-8.0.0/cinder/tests/unit/test_gpfs.py0000664000567000056710000031151212701406250022061 0ustar jenkinsjenkins00000000000000 # Copyright IBM Corp. 2013 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import gpfs from cinder.volume import volume_types CONF = cfg.CONF class FakeQemuImgInfo(object): def __init__(self): self.file_format = None self.backing_file = None class GPFSDriverTestCase(test.TestCase): driver_name = "cinder.volume.drivers.gpfs.GPFSDriver" context = context.get_admin_context() def _execute_wrapper(self, cmd, *args, **kwargs): try: kwargs.pop('run_as_root') except KeyError: pass return utils.execute(cmd, *args, **kwargs) def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path self.addCleanup(self._cleanup, self.images_dir, self.volumes_path) if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = gpfs.GPFSDriver(configuration=conf.Configuration(None)) self.driver.gpfs_execute = self._execute_wrapper exec_patcher = mock.patch.object(self.driver, '_execute', self._execute_wrapper) exec_patcher.start() self.addCleanup(exec_patcher.stop) self.driver._cluster_id = '123456' self.driver._gpfs_device = '/dev/gpfs' self.driver._storage_pool = 'system' self.driver._encryption_state = 'yes' self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' CONF.gpfs_images_dir = self.images_dir def _cleanup(self, images_dir, volumes_path): try: os.rmdir(images_dir) os.rmdir(volumes_path) except OSError: pass def test_different(self): self.assertTrue(gpfs._different((True, False))) self.assertFalse(gpfs._different((True, True))) self.assertFalse(gpfs._different(None)) def test_sizestr(self): self.assertEqual('10G', gpfs._sizestr('10')) @mock.patch('cinder.utils.execute') def test_gpfs_local_execute(self, mock_exec): mock_exec.return_value = 'test' self.driver._gpfs_local_execute('test') expected = [mock.call('test', run_as_root=True)] self.assertEqual(expected, mock_exec.mock_calls) @mock.patch('cinder.utils.execute') def test_get_gpfs_state_ok(self, mock_exec): mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:' 'reserved:nodeName:nodeNumber:state:quorum:' 'nodesUp:totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:active:2:3:3:' 'quorum node:(undefined):', '') self.assertTrue(self.driver._get_gpfs_state().splitlines()[1]. startswith('mmgetstate::0:1:::devstack')) @mock.patch('cinder.utils.execute') def test_get_gpfs_state_fail_mmgetstate(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_state) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') def test_check_gpfs_state_ok(self, mock_get_gpfs_state): mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' 'reserved:reserved:nodeName:' 'nodeNumber:state:quorum:nodesUp:' 'totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:' 'active:2:3:3:' 'quorum node:(undefined):') self.driver._check_gpfs_state() @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') def test_check_gpfs_state_fail_not_active(self, mock_get_gpfs_state): mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' 'reserved:reserved:nodeName:' 'nodeNumber:state:quorum:nodesUp:' 'totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:' 'arbitrating:2:3:3:' 'quorum node:(undefined):') self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_gpfs_state) @mock.patch('cinder.utils.execute') def test_get_fs_from_path_ok(self, mock_exec): mock_exec.return_value = ('Filesystem 1K-blocks ' 'Used Available Use%% Mounted on\n' '%s 10485760 531968 9953792' ' 6%% /gpfs0' % self.driver._gpfs_device, '') self.assertEqual(self.driver._gpfs_device, self.driver._get_filesystem_from_path('/gpfs0')) @mock.patch('cinder.utils.execute') def test_get_fs_from_path_fail_path(self, mock_exec): mock_exec.return_value = ('Filesystem 1K-blocks ' 'Used Available Use% Mounted on\n' 'test 10485760 531968 ' '9953792 6% /gpfs0', '') self.assertNotEqual(self.driver._gpfs_device, self.driver._get_filesystem_from_path('/gpfs0')) @mock.patch('cinder.utils.execute') def test_get_fs_from_path_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_filesystem_from_path, '/gpfs0') @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_ok(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::clusterId:%s::' % self.driver._cluster_id, '') self.assertEqual(self.driver._cluster_id, self.driver._get_gpfs_cluster_id()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_fail_id(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER.:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::clusterId:test::', '') self.assertNotEqual(self.driver._cluster_id, self.driver._get_gpfs_cluster_id()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_cluster_id) @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_ok(self, mock_exec): mock_exec.return_value = ('file name: /gpfs0\n' 'metadata replication: 1 max 2\n' 'data replication: 1 max 2\n' 'immutable: no\n' 'appendOnly: no\n' 'flags:\n' 'storage pool name: system\n' 'fileset name: root\n' 'snapshot name:\n' 'Windows attributes: DIRECTORY', '') self.driver._get_fileset_from_path('') @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_fail_mmlsattr(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_fileset_from_path, '') @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_fail_find_fileset(self, mock_exec): mock_exec.return_value = ('file name: /gpfs0\n' 'metadata replication: 1 max 2\n' 'data replication: 1 max 2\n' 'immutable: no\n' 'appendOnly: no\n' 'flags:\n' 'storage pool name: system\n' '*** name: root\n' 'snapshot name:\n' 'Windows attributes: DIRECTORY', '') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_fileset_from_path, '') @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_ok(self, mock_exec): mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' '\n' 'Name Id BlkSize Data ' 'Meta ' 'Total Data in (KB) Free Data in (KB) ' 'Total Meta in (KB) Free Meta in (KB)\n' 'system 0 256 KB yes ' 'yes ' ' 10485760 9953792 ( 95%) ' '10485760 9954560 ( 95%)', '') self.assertTrue(self.driver._gpfs_device, self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_fail_pool(self, mock_exec): mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' '\n' 'Name Id BlkSize Data ' 'Meta ' 'Total Data in (KB) Free Data in (KB) ' 'Total Meta in (KB) Free Meta in (KB)\n' 'test 0 256 KB yes ' 'yes' ' 10485760 9953792 ( 95%)' ' 10485760 9954560 ( 95%)', '') self.assertTrue(self.driver._gpfs_device, self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertFalse(self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_ok(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = True self.assertTrue(self.driver._update_volume_storage_pool('', 'system')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_ok_pool_none(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = True self.assertTrue(self.driver._update_volume_storage_pool('', None)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_fail_pool(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver._update_volume_storage_pool, '', 'system') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_fail_mmchattr(self, mock_exec, mock_verify_pool): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') mock_verify_pool.return_value = True self.assertFalse(self.driver._update_volume_storage_pool('', 'system')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.utils.execute') def test_get_gpfs_fs_release_level_ok(self, mock_exec, mock_fs_from_path): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:filesystemVersion:14.03 ' '(4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersionLocal:' '14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersionManager' ':14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersion' 'Original:14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemHighest' 'Supported:14.03 (4.1.0.0):', '') mock_fs_from_path.return_value = '/dev/gpfs' self.assertEqual(('/dev/gpfs', 1403), self.driver._get_gpfs_fs_release_level('')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.utils.execute') def test_get_gpfs_fs_release_level_fail_mmlsfs(self, mock_exec, mock_fs_from_path): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') mock_fs_from_path.return_value = '/dev/gpfs' self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_fs_release_level, '') @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_release_level_ok(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::minReleaseLevel:1403::', '') self.assertEqual(1403, self.driver._get_gpfs_cluster_release_level()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_release_level_fail_mmlsconfig(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_cluster_release_level) @mock.patch('cinder.utils.execute') def test_is_gpfs_path_fail_mmlsattr(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._is_gpfs_path, '/dummy/path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_fileset_from_path') @mock.patch('cinder.utils.execute') def test_is_same_fileset_ok(self, mock_exec, mock_get_fileset_from_path): mock_get_fileset_from_path.return_value = True self.assertTrue(self.driver._is_same_fileset('', '')) mock_get_fileset_from_path.side_effect = [True, False] self.assertFalse(self.driver._is_same_fileset('', '')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_available_capacity') @mock.patch('cinder.utils.execute') def test_same_cluster_ok(self, mock_exec, mock_avail_capacity): mock_avail_capacity.return_value = (10192683008, 10737418240) stats = self.driver.get_volume_stats() loc = stats['location_info'] cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertTrue(self.driver._same_cluster(host)) locinfo = stats['location_info'] + '_' loc = locinfo cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertFalse(self.driver._same_cluster(host)) @mock.patch('cinder.utils.execute') def test_set_rw_permission(self, mock_exec): self.driver._set_rw_permission('') @mock.patch('cinder.utils.execute') def test_can_migrate_locally(self, mock_exec): host = {'host': 'foo', 'capabilities': ''} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver_:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s:testpath' % (self.driver._cluster_id + '_') cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertEqual('testpath', self.driver._can_migrate_locally(host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_encryption_status') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_ok(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool, mock_get_gpfs_fs_rel_lev, mock_verify_encryption_state): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True mock_get_gpfs_fs_rel_lev.return_value = 1405 mock_verify_encryption_state.return_value = 'Yes' self.driver.do_setup(ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_no_encryption(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool, mock_get_gpfs_fs_rel_lev): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True mock_get_gpfs_fs_rel_lev.return_value = 1403 self.driver.do_setup(ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_get_cluster_id(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_get_fs_from_path(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_fs_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_fs_from_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_verify_gpfs_pool.return_value = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_volume(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id. return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level') def test_check_for_setup_error_fail_conf(self, mock_get_gpfs_fs_rel_lev, mock_is_gpfs_path, mock_check_gpfs_state): fake_fs = '/dev/gpfs' fake_fs_release = 1400 fake_cluster_release = 1201 # fail configuration.gpfs_mount_point_base is None org_value = self.driver.configuration.gpfs_mount_point_base self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=None) mock_get_gpfs_fs_rel_lev.return_value = (fake_fs, fake_fs_release) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=org_value) # fail configuration.gpfs_images_share_mode not in # ['copy_on_write', 'copy'] org_value = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy_on_read') self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value) # fail configuration.gpfs_images_share_mode and # configuration.gpfs_images_dir is None org_value_share_mode = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy') org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value_share_mode) CONF.gpfs_images_dir = org_value_dir # fail configuration.gpfs_images_share_mode == 'copy_on_write' and not # _same_filesystem(configuration.gpfs_mount_point_base, # configuration.gpfs_images_dir) org_value = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy_on_write') with mock.patch('cinder.volume.drivers.ibm.gpfs._same_filesystem', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value) # fail self.configuration.gpfs_images_share_mode == 'copy_on_write' and # not self._is_same_fileset(self.configuration.gpfs_mount_point_base, # self.configuration.gpfs_images_dir) org_value = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy_on_write') with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_same_fileset', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value) # fail directory is None org_value_share_mode = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=None) org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.driver.check_for_setup_error() self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value_share_mode) CONF.gpfs_images_dir = org_value_dir # fail directory.startswith('/') org_value_mount = self.driver.configuration.gpfs_mount_point_base self.flags(volume_driver=self.driver_name, gpfs_mount_point_base='_' + self.volumes_path) org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=org_value_mount) CONF.gpfs_images_dir = org_value_dir # fail os.path.isdir(directory) org_value_mount = self.driver.configuration.gpfs_mount_point_base self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path + '_') org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=org_value_mount) CONF.gpfs_images_dir = org_value_dir # fail not cluster release level >= GPFS_CLONE_MIN_RELEASE org_fake_cluster_release = fake_cluster_release fake_cluster_release = 1105 org_value_mount = self.driver.configuration.gpfs_mount_point_base self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level', return_value=(fake_fs, fake_fs_release)): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) fake_cluster_release = org_fake_cluster_release # fail not fs release level >= GPFS_CLONE_MIN_RELEASE org_fake_fs_release = fake_fs_release fake_fs_release = 1105 org_value_mount = self.driver.configuration.gpfs_mount_point_base self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) org_value_dir = CONF.gpfs_images_dir CONF.gpfs_images_dir = None with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level', return_value=(fake_fs, fake_fs_release)): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=org_value_mount) CONF.gpfs_images_dir = org_value_dir fake_fs_release = org_fake_fs_release @mock.patch('cinder.utils.execute') def test_create_sparse_file(self, mock_exec): self.driver._create_sparse_file('', 100) @mock.patch('cinder.utils.execute') def test_allocate_file_blocks(self, mock_exec): self.driver._allocate_file_blocks(os.path.join(self.images_dir, 'test'), 1) @mock.patch('cinder.utils.execute') def test_gpfs_change_attributes(self, mock_exec): options = [] options.extend(['-T', 'test']) self.driver._gpfs_change_attributes(options, self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs): metadata = {'data_pool_name': 'test', 'replicas': 'test', 'dio': 'test', 'write_affinity_depth': 'test', 'block_group_factor': 'test', 'write_affinity_failure_group': 'test', 'fstype': 'test', 'fslabel': 'test', 'test': 'test'} self.driver._set_volume_attributes('', '', metadata) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes_no_attributes(self, mock_change_attributes): metadata = {} org_value = self.driver.configuration.gpfs_storage_pool self.flags(volume_driver=self.driver_name, gpfs_storage_pool='system') self.driver._set_volume_attributes('', '', metadata) self.flags(volume_driver=self.driver_name, gpfs_storage_pool=org_value) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes_no_options(self, mock_change_attributes): metadata = {} org_value = self.driver.configuration.gpfs_storage_pool self.flags(volume_driver=self.driver_name, gpfs_storage_pool='') self.driver._set_volume_attributes('', '', metadata) self.flags(volume_driver=self.driver_name, gpfs_storage_pool=org_value) def test_get_volume_metadata(self): volume = self._fake_volume() volume['volume_metadata'] = [{'key': 'fake_key', 'value': 'fake_value'}] expected_metadata = {'fake_key': 'fake_value'} v_metadata = self.driver._get_volume_metadata(volume) self.assertEqual(expected_metadata, v_metadata) volume.pop('volume_metadata') volume['metadata'] = {'key': 'value'} expected_metadata = {'key': 'value'} v_metadata = self.driver._get_volume_metadata(volume) self.assertEqual(expected_metadata, v_metadata) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' org_value = self.driver.configuration.gpfs_sparse_volumes self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=False) self.driver.create_volume(volume) self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=org_value) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume_no_sparse_volume(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' org_value = self.driver.configuration.gpfs_sparse_volumes self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=True) self.driver.create_volume(volume) self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=org_value) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume_with_metadata(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} org_value = self.driver.configuration.gpfs_sparse_volumes self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=False) self.driver.create_volume(volume) self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=org_value) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.' 'GPFSDriver._get_snapshot_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_volume_from_snapshot(self, mock_local_path, mock_snapshot_path, mock_gpfs_full_copy, mock_create_gpfs_copy, mock_rw_permission, mock_gpfs_redirect, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() volume['consistencygroup_id'] = None self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume snapshot = self._fake_snapshot() mock_snapshot_path.return_value = "/tmp/fakepath" self.assertEqual({'size': 5.0}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_volume_from_snapshot_metadata(self, mock_local_path, mock_snapshot_path, mock_gpfs_full_copy, mock_create_gpfs_copy, mock_rw_permission, mock_gpfs_redirect, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() volume['consistencygroup_id'] = None self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume snapshot = self._fake_snapshot() mock_snapshot_path.return_value = "/tmp/fakepath" mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.assertEqual({'size': 5.0}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_gpfs_clone') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_cloned_volume(self, mock_local_path, mock_gpfs_full_copy, mock_create_gpfs_clone, mock_rw_permission, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() src_volume = self._fake_volume() self.assertEqual({'size': 5.0}, self.driver.create_cloned_volume(volume, src_volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_gpfs_clone') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_cloned_volume_with_metadata(self, mock_local_path, mock_gpfs_full_copy, mock_create_gpfs_clone, mock_rw_permission, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() src_volume = self._fake_volume() mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.assertEqual({'size': 5.0}, self.driver.create_cloned_volume(volume, src_volume)) @mock.patch('cinder.utils.execute') def test_delete_gpfs_file_ok(self, mock_exec): mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) self.driver._delete_gpfs_file(self.images_dir + '_') mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' ' '/gpfs0/test.txt', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) @mock.patch('os.path.exists') @mock.patch('cinder.utils.execute') def test_delete_gpfs_file_ok_parent(self, mock_exec, mock_path_exists): mock_path_exists.side_effect = [True, False, False, True, False, False, True, False, False] mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.snap\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.ts\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.txt\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_delete_volume(self, mock_verify_gpfs_path_state, mock_local_path, mock_delete_gpfs_file): self.driver.delete_volume('') @mock.patch('cinder.utils.execute') def test_gpfs_redirect_ok(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertTrue(self.driver._gpfs_redirect('')) self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 1 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=org_value) @mock.patch('cinder.utils.execute') def test_gpfs_redirect_fail_depth(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=0) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=org_value) @mock.patch('cinder.utils.execute') def test_gpfs_redirect_fail_match(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=org_value) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.utils.execute') def test_create_gpfs_clone(self, mock_exec, mock_redirect, mock_cr_gpfs_cp, mock_cr_gpfs_snap): mock_redirect.return_value = True self.driver._create_gpfs_clone('', '') mock_redirect.side_effect = [True, False] self.driver._create_gpfs_clone('', '') @mock.patch('cinder.utils.execute') def test_create_gpfs_copy(self, mock_exec): self.driver._create_gpfs_copy('', '') @mock.patch('cinder.utils.execute') def test_create_gpfs_snap(self, mock_exec): self.driver._create_gpfs_snap('') self.driver._create_gpfs_snap('', '') @mock.patch('cinder.utils.execute') def test_is_gpfs_parent_file_ok(self, mock_exec): mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' yes 2 148488 ' '/gpfs0/test.txt', ''), ('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', '')] self.assertTrue(self.driver._is_gpfs_parent_file('')) self.assertFalse(self.driver._is_gpfs_parent_file('')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') def test_create_snapshot(self, mock_get_snapshot_path, mock_local_path, mock_create_gpfs_snap, mock_set_rw_permission, mock_gpfs_redirect): org_value = self.driver.configuration.gpfs_mount_point_base mock_get_snapshot_path.return_value = "/tmp/fakepath" self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) snapshot = {} snapshot['volume_name'] = 'test' self.driver.create_snapshot(snapshot) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=org_value) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') def test_delete_snapshot(self, mock_snapshot_path, mock_exec): snapshot = self._fake_snapshot() snapshot_path = "/tmp/fakepath" mock_snapshot_path.return_value = snapshot_path snapshot_ts_path = '%s.ts' % snapshot_path self.driver.delete_snapshot(snapshot) mock_exec.assert_any_call('mv', snapshot_path, snapshot_ts_path) mock_exec.assert_any_call('rm', '-f', snapshot_ts_path, check_exit_code=False) def test_ensure_export(self): self.assertIsNone(self.driver.ensure_export('', '')) def test_create_export(self): self.assertIsNone(self.driver.create_export('', '', {})) def test_remove_export(self): self.assertIsNone(self.driver.remove_export('', '')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_initialize_connection(self, mock_local_path): volume = self._fake_volume() mock_local_path.return_value = "/tmp/fakepath" data = self.driver.initialize_connection(volume, '') self.assertEqual('test', data['data']['name']) self.assertEqual("/tmp/fakepath", data['data']['device_path']) self.assertEqual('gpfs', data['driver_volume_type']) def test_terminate_connection(self): self.assertIsNone(self.driver.terminate_connection('', '')) def test_get_volume_stats(self): fake_avail = 80 * units.Gi fake_size = 2 * fake_avail with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_available_capacity', return_value=(fake_avail, fake_size)): stats = self.driver.get_volume_stats() self.assertEqual('GPFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) self.assertTrue(stats['gpfs_encryption_rest']) stats = self.driver.get_volume_stats(True) self.assertEqual('GPFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) self.assertTrue(stats['gpfs_encryption_rest']) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_true(self, mock_exec): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:encryption:Yes:', '') self.assertEqual('Yes', self.driver._get_gpfs_encryption_status()) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_false(self, mock_exec): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:encryption:No:', '') self.assertEqual('No', self.driver._get_gpfs_encryption_status()) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_fail(self, mock_exec): mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_encryption_status) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_stats') def test_get_volume_stats_none_stats(self, mock_upd_vol_stats): _stats_org = self.driver._stats self.driver._stats = mock.Mock() self.driver._stats.return_value = None self.driver.get_volume_stats() self.driver._stats = _stats_org @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image') def test_clone_image_pub(self, mock_exec): self.driver.clone_image('', '', '', {'id': 1}, '') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_is_cloneable_ok(self, mock_is_gpfs_path): self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='test') CONF.gpfs_images_dir = self.images_dir mock_is_gpfs_path.return_value = None self.assertEqual((True, None, os.path.join(CONF.gpfs_images_dir, '12345')), self.driver._is_cloneable('12345')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_is_cloneable_fail_config(self, mock_is_gpfs_path): self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='') CONF.gpfs_images_dir = '' mock_is_gpfs_path.return_value = None self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir, '12345')), self.driver._is_cloneable('12345')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_is_cloneable_fail_path(self, mock_is_gpfs_path): self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='test') CONF.gpfs_images_dir = self.images_dir mock_is_gpfs_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir, '12345')), self.driver._is_cloneable('12345')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_clonable(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_is_gpfs_parent_file, mock_create_gpfs_snap, mock_qemu_img_info, mock_create_gpfs_copy, mock_conv_image, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_is_gpfs_parent_file.return_value = False mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') volume = self._fake_volume() self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver' '._verify_gpfs_path_state') def test_clone_image_not_cloneable(self, mock_verify_gpfs_path_state, mock_is_cloneable): mock_is_cloneable.return_value = (False, 'test', self.images_dir) volume = self._fake_volume() self.assertEqual((None, False), self.driver._clone_image(volume, '', 1)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_raw_copy_on_write(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_is_gpfs_parent_file, mock_create_gpfs_snap, mock_qemu_img_info, mock_create_gpfs_copy, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_local_path.return_value = self.volumes_path mock_is_gpfs_parent_file.return_value = False mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') volume = self._fake_volume() org_value = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy_on_write') self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_create_gpfs_snap.assert_called_once_with(self.images_dir) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('shutil.copyfile') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_raw_copy(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_is_gpfs_parent_file, mock_qemu_img_info, mock_copyfile, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_local_path.return_value = self.volumes_path mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') volume = self._fake_volume() org_value = self.driver.configuration.gpfs_images_share_mode self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='copy') self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_copyfile.assert_called_once_with(self.images_dir, self.volumes_path) self.flags(volume_driver=self.driver_name, gpfs_images_share_mode=org_value) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_qcow2(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_qemu_img_info, mock_conv_image, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_local_path.return_value = self.volumes_path mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') volume = self._fake_volume() self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_conv_image.assert_called_once_with(self.images_dir, self.volumes_path, 'raw') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.image.image_utils.fetch_to_raw') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_copy_image_to_volume(self, mock_verify_gpfs_path_state, mock_fetch_to_raw, mock_local_path, mock_resize_volume_file): volume = self._fake_volume() self.driver.copy_image_to_volume('', volume, '', 1) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.resize_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_resize_volume_file_ok(self, mock_local_path, mock_resize_image, mock_qemu_img_info): volume = self._fake_volume() mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') self.assertEqual(self._fake_qemu_qcow2_image_info('').virtual_size, self.driver._resize_volume_file(volume, 2000)) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.resize_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_resize_volume_file_fail(self, mock_local_path, mock_resize_image, mock_qemu_img_info): volume = self._fake_volume() mock_resize_image.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') self.assertRaises(exception.VolumeBackendAPIException, self.driver._resize_volume_file, volume, 2000) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') def test_extend_volume(self, mock_resize_volume_file): volume = self._fake_volume() self.driver.extend_volume(volume, 2000) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.image.image_utils.upload_volume') def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path): volume = self._fake_volume() self.driver.copy_volume_to_image('', volume, '', '') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file') @mock.patch('six.moves.builtins.open') @mock.patch('cinder.utils.temporary_chown') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_gpfs_clone') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_backup_volume(self, mock_local_path, mock_create_gpfs_clone, mock_gpfs_redirect, mock_temp_chown, mock_file_open, mock_delete_gpfs_file): volume = self._fake_volume() self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume backup = {} backup['volume_id'] = 'test' backup['id'] = '123456' backup_service = mock.Mock() mock_local_path.return_value = self.volumes_path self.driver.backup_volume('', backup, backup_service) @mock.patch('six.moves.builtins.open') @mock.patch('cinder.utils.temporary_chown') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_restore_backup(self, mock_local_path, mock_temp_chown, mock_file_open): volume = self._fake_volume() backup = {} backup['id'] = '123456' backup_service = mock.Mock() mock_local_path.return_value = self.volumes_path self.driver.restore_backup('', backup, volume, backup_service) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_ok(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = (self.driver.configuration. gpfs_mount_point_base + '_') self.assertEqual((True, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_dest_path(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = None self.assertEqual((False, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_mpb(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = (self.driver.configuration. gpfs_mount_point_base) mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertEqual((True, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_mv(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = ( self.driver.configuration.gpfs_mount_point_base + '_') mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertEqual((False, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') def test_migrate_volume_ok_pub(self, mock_migrate_volume): self.driver.migrate_volume('', '', '') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_ok(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() self.driver.db = mock.Mock() mock_different.side_effect = [False, True, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (True, True) self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_backend(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [True, True, True] self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_pools_migrated(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() self.driver.db = mock.Mock() mock_different.side_effect = [False, False, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (True, True) self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_pools(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [False, False, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (False, False) self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_no_diff_hit(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [False, False, False] self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.utils.execute') def test_mkfs_ok(self, mock_exec): volume = self._fake_volume() self.driver._mkfs(volume, 'swap') self.driver._mkfs(volume, 'swap', 'test') self.driver._mkfs(volume, 'ext3', 'test') self.driver._mkfs(volume, 'vfat', 'test') @mock.patch('cinder.utils.execute') def test_mkfs_fail_mk(self, mock_exec): volume = self._fake_volume() mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._mkfs, volume, 'swap', 'test') @mock.patch('cinder.utils.execute') def test_get_available_capacity_ok(self, mock_exec): mock_exec.return_value = ('Filesystem 1-blocks Used ' 'Available Capacity Mounted on\n' '/dev/gpfs 10737418240 544735232 ' '10192683008 6%% /gpfs0', '') self.assertEqual((10192683008, 10737418240), self.driver._get_available_capacity('/gpfs0')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') @mock.patch('cinder.utils.execute') def test_get_available_capacity_fail_mounted(self, mock_exec, mock_path_state): mock_path_state.side_effect = ( exception.VolumeBackendAPIException('test')) mock_exec.return_value = ('Filesystem 1-blocks Used ' 'Available Capacity Mounted on\n' '/dev/gpfs 10737418240 544735232 ' '10192683008 6%% /gpfs0', '') self.assertEqual((0, 0), self.driver._get_available_capacity('/gpfs0')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_verify_gpfs_path_state_ok(self, mock_is_gpfs_path): self.driver._verify_gpfs_path_state(self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_verify_gpfs_path_state_fail_path(self, mock_is_gpfs_path): mock_is_gpfs_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._verify_gpfs_path_state, self.images_dir) @mock.patch('cinder.utils.execute') def test_create_consistencygroup(self, mock_exec): ctxt = self.context group = self._fake_group() self.driver.create_consistencygroup(ctxt, group) fsdev = self.driver._gpfs_device cgname = "consisgroup-%s" % group['id'] cgpath = os.path.join(self.driver.configuration.gpfs_mount_point_base, cgname) cmd = ['mmcrfileset', fsdev, cgname, '--inode-space', 'new'] mock_exec.assert_any_call(*cmd) cmd = ['mmlinkfileset', fsdev, cgname, '-J', cgpath] mock_exec.assert_any_call(*cmd) cmd = ['chmod', '770', cgpath] mock_exec.assert_any_call(*cmd) @mock.patch('cinder.utils.execute') def test_create_consistencygroup_fail(self, mock_exec): ctxt = self.context group = self._fake_group() mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_consistencygroup, ctxt, group) @mock.patch('cinder.utils.execute') def test_delete_consistencygroup(self, mock_exec): ctxt = self.context group = self._fake_group() group['status'] = fields.ConsistencyGroupStatus.AVAILABLE volume = self._fake_volume() volume['status'] = 'available' volumes = [] volumes.append(volume) self.driver.db = mock.Mock() self.driver.db.volume_get_all_by_group = mock.Mock() self.driver.db.volume_get_all_by_group.return_value = volumes self.driver.delete_consistencygroup(ctxt, group, []) fsdev = self.driver._gpfs_device cgname = "consisgroup-%s" % group['id'] cmd = ['mmunlinkfileset', fsdev, cgname, '-f'] mock_exec.assert_any_call(*cmd) cmd = ['mmdelfileset', fsdev, cgname, '-f'] mock_exec.assert_any_call(*cmd) @mock.patch('cinder.utils.execute') def test_delete_consistencygroup_fail(self, mock_exec): ctxt = self.context group = self._fake_group() group['status'] = fields.ConsistencyGroupStatus.AVAILABLE self.driver.db = mock.Mock() self.driver.db.volume_get_all_by_group = mock.Mock() self.driver.db.volume_get_all_by_group.return_value = [] mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_consistencygroup, ctxt, group, []) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') def test_create_cgsnapshot(self, mock_create_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() self.driver.db = mock.Mock() self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock() snapshot1 = self._fake_snapshot() snapshots = [snapshot1] self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots model_update, snapshots = self.driver.create_cgsnapshot(ctxt, cgsnap, []) self.driver.create_snapshot.assert_called_once_with(snapshot1) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual('available', snapshot1['status']) self.driver.db.snapshot_get_all_for_cgsnapshot.\ assert_called_once_with(ctxt, cgsnap['id']) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') def test_create_cgsnapshot_empty(self, mock_create_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() self.driver.db = mock.Mock() self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock() snapshots = [] self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots model_update, snapshots = self.driver.create_cgsnapshot(ctxt, cgsnap, []) self.assertFalse(self.driver.create_snapshot.called) self.assertEqual({'status': cgsnap['status']}, model_update) self.driver.db.snapshot_get_all_for_cgsnapshot.\ assert_called_once_with(ctxt, cgsnap['id']) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') def test_delete_cgsnapshot(self, mock_delete_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() self.driver.db = mock.Mock() self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock() snapshot1 = self._fake_snapshot() snapshots = [snapshot1] self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots model_update, snapshots = self.driver.delete_cgsnapshot(ctxt, cgsnap, []) self.driver.delete_snapshot.assert_called_once_with(snapshot1) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual('deleted', snapshot1['status']) self.driver.db.snapshot_get_all_for_cgsnapshot.\ assert_called_once_with(ctxt, cgsnap['id']) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') def test_delete_cgsnapshot_empty(self, mock_delete_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() self.driver.db = mock.Mock() self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock() snapshots = [] self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots model_update, snapshots = self.driver.delete_cgsnapshot(ctxt, cgsnap, []) self.assertFalse(self.driver.delete_snapshot.called) self.assertEqual({'status': cgsnap['status']}, model_update) self.driver.db.snapshot_get_all_for_cgsnapshot.\ assert_called_once_with(ctxt, cgsnap['id']) def test_local_path_volume_not_in_cg(self): volume = self._fake_volume() volume['consistencygroup_id'] = None volume_path = os.path.join( self.driver.configuration.gpfs_mount_point_base, volume['name'] ) ret = self.driver.local_path(volume) self.assertEqual(volume_path, ret) def test_local_path_volume_in_cg(self): volume = self._fake_volume() cgname = "consisgroup-%s" % volume['consistencygroup_id'] volume_path = os.path.join( self.driver.configuration.gpfs_mount_point_base, cgname, volume['name'] ) ret = self.driver.local_path(volume) self.assertEqual(volume_path, ret) @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_get_snapshot_path(self, mock_local_path, mock_admin_context): volume = self._fake_volume() self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume volume_path = self.volumes_path mock_local_path.return_value = volume_path snapshot = self._fake_snapshot() ret = self.driver._get_snapshot_path(snapshot) self.assertEqual( os.path.join(os.path.dirname(volume_path), snapshot['name']), ret ) @mock.patch('cinder.utils.execute') def test_gpfs_full_copy(self, mock_exec): src = "/tmp/vol1" dest = "/tmp/vol2" self.driver._gpfs_full_copy(src, dest) mock_exec.assert_called_once_with('cp', src, dest, check_exit_code=True) def _fake_volume(self): volume = {} volume['id'] = '123456' volume['name'] = 'test' volume['size'] = 1000 volume['consistencygroup_id'] = 'cg-1234' return volume def _fake_snapshot(self): snapshot = {} snapshot['id'] = '12345' snapshot['name'] = 'test-snap' snapshot['size'] = 1000 snapshot['volume_id'] = '123456' snapshot['status'] = 'available' return snapshot def _fake_volume_in_cg(self): volume = {} volume['id'] = '123456' volume['name'] = 'test' volume['size'] = 1000 volume['consistencygroup_id'] = 'fakecg' return volume def _fake_group(self): group = {} group['name'] = 'test_group' group['id'] = '123456' return group def _fake_cgsnapshot(self): cgsnap = {} cgsnap['id'] = '123456' cgsnap['name'] = 'testsnap' cgsnap['consistencygroup_id'] = '123456' cgsnap['status'] = 'available' return cgsnap def _fake_qemu_qcow2_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'qcow2' data.backing_file = None data.virtual_size = 1 * units.Gi return data def _fake_qemu_raw_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'raw' data.backing_file = None data.virtual_size = 1 * units.Gi return data def _fake_retype_arguments(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend1'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._fake_volume() volume['host'] = host return (volume, new_type, diff, host) class GPFSNFSDriverTestCase(test.TestCase): driver_name = "cinder.volume.drivers.gpfs.GPFSNFSDriver" TEST_NFS_EXPORT = 'nfs-host1:/export' TEST_SIZE_IN_GB = 1 TEST_EXTEND_SIZE_IN_GB = 2 TEST_MNT_POINT = '/mnt/nfs' TEST_MNT_POINT_BASE = '/mnt' TEST_GPFS_MNT_POINT_BASE = '/export' TEST_LOCAL_PATH = '/mnt/nfs/volume-123' TEST_VOLUME_PATH = '/export/volume-123' TEST_SNAP_PATH = '/export/snapshot-123' def _execute_wrapper(self, cmd, *args, **kwargs): try: kwargs.pop('run_as_root') except KeyError: pass return utils.execute(cmd, *args, **kwargs) def _fake_volume(self): volume = {} volume['id'] = '123456' volume['name'] = 'test' volume['size'] = 1000 volume['consistencygroup_id'] = 'cg-1234' return volume def _fake_snapshot(self): snapshot = {} snapshot['id'] = '12345' snapshot['name'] = 'test-snap' snapshot['size'] = 1000 snapshot['volume_id'] = '123456' snapshot['status'] = 'available' return snapshot def setUp(self): super(GPFSNFSDriverTestCase, self).setUp() self.driver = gpfs.GPFSNFSDriver(configuration=conf. Configuration(None)) self.driver.gpfs_execute = self._execute_wrapper self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_run_ssh') def test_gpfs_remote_execute(self, mock_run_ssh): mock_run_ssh.return_value = 'test' self.driver._gpfs_remote_execute('test', check_exit_code=True) expected = [mock.call(('test',), True)] self.assertEqual(expected, mock_run_ssh.mock_calls) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_ensure_shares_mounted') def test_update_volume_stats(self, mock_ensure): """Check update volume stats.""" mock_ensure.return_value = True fake_avail = 80 * units.Gi fake_size = 2 * fake_avail fake_used = 10 * units.Gi with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_capacity_info', return_value=(fake_avail, fake_size, fake_used)): stats = self.driver.get_volume_stats() self.assertEqual('GPFSNFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) stats = self.driver.get_volume_stats(True) self.assertEqual('GPFSNFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) def test_get_volume_path(self): self.driver.configuration.gpfs_mount_point_base = ( self.TEST_GPFS_MNT_POINT_BASE) volume = self._fake_volume() self.assertEqual('/export/consisgroup-cg-1234/test', self.driver._get_volume_path(volume)) volume['consistencygroup_id'] = None self.assertEqual('/export/test', self.driver._get_volume_path(volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_mount_point_for_share') def test_local_path(self, mock_mount_point): mock_mount_point.return_value = self.TEST_MNT_POINT volume = self._fake_volume() volume['provider_location'] = self.TEST_GPFS_MNT_POINT_BASE self.assertEqual('/mnt/nfs/consisgroup-cg-1234/test', self.driver.local_path(volume)) volume['consistencygroup_id'] = None self.assertEqual('/mnt/nfs/test', self.driver.local_path(volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_volume_path') def test_get_snapshot_path(self, mock_volume_path): volume = self._fake_volume() self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume mock_volume_path.return_value = os.path.join(self. TEST_GPFS_MNT_POINT_BASE, volume['name']) snapshot = self._fake_snapshot() self.assertEqual('/export/test-snap', self.driver._get_snapshot_path(snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' 'create_volume') def test_create_volume(self, mock_create_volume, mock_find_share): volume = self._fake_volume() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_volume(volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_delete_gpfs_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' 'local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_volume_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_delete_volume(self, mock_verify_gpfs_path_state, mock_volume_path, mock_local_path, mock_delete_gpfs_file): self.driver.delete_volume('') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' 'delete_snapshot') def test_delete_snapshot(self, mock_delete_snapshot): self.driver.delete_snapshot('') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_volume_from_snapshot, mock_find_share, mock_resize_volume_file): volume = self._fake_volume() snapshot = self._fake_snapshot() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_cloned_volume') def test_create_cloned_volume(self, mock_create_cloned_volume, mock_find_share, mock_resize_volume_file): volume = self._fake_volume() src_vref = self._fake_volume() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_cloned_volume(volume, src_vref)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_delete_gpfs_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_do_backup') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_backup_source') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' 'local_path') def test_backup_volume(self, mock_local_path, mock_create_backup_source, mock_do_backup, mock_delete_gpfs_file): volume = self._fake_volume() self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume backup = {} backup['volume_id'] = 'test' backup['id'] = '123456' backup_service = mock.Mock() mock_local_path.return_value = self.TEST_VOLUME_PATH self.driver.backup_volume('', backup, backup_service) cinder-8.0.0/cinder/tests/unit/test_eqlx.py0000664000567000056710000005170312701406250022076 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Dell Inc. # Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from eventlet import greenthread import mock from oslo_concurrency import processutils import paramiko import six from cinder import context from cinder import exception from cinder import ssh_utils from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers import eqlx class DellEQLSanISCSIDriverTestCase(test.TestCase): def setUp(self): super(DellEQLSanISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = "10.0.0.1" self.configuration.san_login = "foo" self.configuration.san_password = "bar" self.configuration.san_ssh_port = 16022 self.configuration.san_thin_provision = True self.configuration.san_private_key = 'foo' self.configuration.ssh_min_pool_conn = 1 self.configuration.ssh_max_pool_conn = 5 self.configuration.ssh_conn_timeout = 30 self.configuration.eqlx_pool = 'non-default' self.configuration.eqlx_group_name = 'group-0' self.configuration.eqlx_cli_timeout = 30 self.configuration.eqlx_cli_max_retries = 5 self.configuration.eqlx_use_chap = False self.configuration.use_chap_auth = True self.configuration.chap_username = 'admin' self.configuration.chap_password = 'password' self.configuration.max_over_subscription_ratio = 1.0 self.driver_stats_output = ['TotalCapacity: 111GB', 'FreeSpace: 11GB', 'VolumeReserve: 80GB'] self.cmd = 'this is dummy command' self._context = context.get_admin_context() self.driver = eqlx.DellEQLSanISCSIDriver( configuration=self.configuration) self.volume_name = "fakevolume" self.volid = "fakeid" self.connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost'} self.access_record_output = [ "ID Initiator Ipaddress AuthMethod UserName Apply-To", "--- --------------- ------------- ---------- ---------- --------", "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", " 7dab76162"] self.fake_iqn = 'iqn.2003-10.com.equallogic:group01:25366:fakev' self.fake_iqn_return = ['iSCSI target name is %s.' % self.fake_iqn] self.driver._group_ip = '10.0.1.6' self.properties = { 'target_discovered': True, 'target_portal': '%s:3260' % self.driver._group_ip, 'target_iqn': self.fake_iqn, 'volume_id': 1} self._model_update = { 'provider_location': "%s:3260,1 %s 0" % (self.driver._group_ip, self.fake_iqn), 'provider_auth': 'CHAP %s %s' % ( self.configuration.chap_username, self.configuration.chap_password) } def _fake_get_iscsi_properties(self, volume): return self.properties def test_create_volume(self): volume = {'name': self.volume_name, 'size': 1} mock_attrs = {'args': ['volume', 'create', volume['name'], "%sG" % (volume['size']), 'pool', self.configuration.eqlx_pool, 'thin-provision']} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.fake_iqn_return model_update = self.driver.create_volume(volume) self.assertEqual(self._model_update, model_update) def test_delete_volume(self): volume = {'name': self.volume_name, 'size': 1} show_attrs = {'args': ['volume', 'select', volume['name'], 'show']} off_attrs = {'args': ['volume', 'select', volume['name'], 'offline']} delete_attrs = {'args': ['volume', 'delete', volume['name']]} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**show_attrs) mock_eql_execute.configure_mock(**off_attrs) mock_eql_execute.configure_mock(**delete_attrs) self.driver.delete_volume(volume) def test_delete_absent_volume(self): volume = {'name': self.volume_name, 'size': 1, 'id': self.volid} mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.side_effect = processutils.ProcessExecutionError( stdout='% Error ..... does not exist.\n') self.driver.delete_volume(volume) def test_ensure_export(self): volume = {'name': self.volume_name, 'size': 1} mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) self.driver.ensure_export({}, volume) def test_create_snapshot(self): snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} snap_name = 'fake_snap_name' with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.return_value = ['Snapshot name is %s' % snap_name] self.driver.create_snapshot(snapshot) def test_create_volume_from_snapshot(self): snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} volume = {'name': self.volume_name} mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], 'snapshot', 'select', snapshot['name'], 'clone', volume['name']]} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.fake_iqn_return model_update = self.driver.create_volume_from_snapshot(volume, snapshot) self.assertEqual(self._model_update, model_update) def test_create_cloned_volume(self): src_vref = {'name': 'fake_uuid'} volume = {'name': self.volume_name} mock_attrs = {'args': ['volume', 'select', volume['name'], 'multihost-access', 'enable']} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.fake_iqn_return model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertEqual(self._model_update, model_update) def test_delete_snapshot(self): snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], 'snapshot', 'delete', snapshot['name']]} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) self.driver.delete_snapshot(snapshot) def test_extend_volume(self): new_size = '200' volume = {'name': self.volume_name, 'size': 100} mock_attrs = {'args': ['volume', 'select', volume['name'], 'size', "%sG" % new_size]} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) self.driver.extend_volume(volume, new_size) def test_initialize_connection(self): volume = {'name': self.volume_name} mock_attrs = {'args': ['volume', 'select', volume['name'], 'access', 'create', 'initiator', self.connector['initiator'], 'authmethod', 'chap', 'username', self.configuration.chap_username]} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: with mock.patch.object(self.driver, '_get_iscsi_properties') as mock_iscsi: mock_eql_execute.configure_mock(**mock_attrs) mock_iscsi.return_value = self.properties iscsi_properties = self.driver.initialize_connection( volume, self.connector) self.assertEqual(self._fake_get_iscsi_properties(volume), iscsi_properties['data']) def test_terminate_connection(self): def my_side_effect(*args, **kwargs): if args[4] == 'show': return self.access_record_output else: return '' volume = {'name': self.volume_name} with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.side_effect = my_side_effect self.driver.terminate_connection(volume, self.connector) def test_do_setup(self): fake_group_ip = '10.1.2.3' def my_side_effect(*args, **kwargs): if args[0] == 'grpparams': return ['Group-Ipaddress: %s' % fake_group_ip] else: return '' with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.side_effect = my_side_effect self.driver.do_setup(self._context) self.assertEqual(fake_group_ip, self.driver._group_ip) def test_update_volume_stats_thin(self): mock_attrs = {'args': ['pool', 'select', self.configuration.eqlx_pool, 'show']} self.configuration.san_thin_provision = True with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.driver_stats_output self.driver._update_volume_stats() self.assert_volume_stats(self.driver._stats) def test_update_volume_stats_thick(self): mock_attrs = {'args': ['pool', 'select', self.configuration.eqlx_pool, 'show']} self.configuration.san_thin_provision = False with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.driver_stats_output self.driver._update_volume_stats() self.assert_volume_stats(self.driver._stats) def test_get_volume_stats_thin(self): mock_attrs = {'args': ['pool', 'select', self.configuration.eqlx_pool, 'show']} self.configuration.san_thin_provision = True with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.driver_stats_output stats = self.driver.get_volume_stats(refresh=True) self.assert_volume_stats(stats) def test_get_volume_stats_thick(self): mock_attrs = {'args': ['pool', 'select', self.configuration.eqlx_pool, 'show']} self.configuration.san_thin_provision = False with mock.patch.object(self.driver, '_eql_execute') as mock_eql_execute: mock_eql_execute.configure_mock(**mock_attrs) mock_eql_execute.return_value = self.driver_stats_output stats = self.driver.get_volume_stats(refresh=True) self.assert_volume_stats(stats) def assert_volume_stats(self, stats): thin_enabled = self.configuration.san_thin_provision self.assertEqual(float('111.0'), stats['total_capacity_gb']) self.assertEqual(float('11.0'), stats['free_capacity_gb']) if thin_enabled: self.assertEqual(80.0, stats['provisioned_capacity_gb']) else: space = stats['total_capacity_gb'] - stats['free_capacity_gb'] self.assertEqual(space, stats['provisioned_capacity_gb']) self.assertEqual(thin_enabled, stats['thin_provisioning_support']) self.assertEqual(not thin_enabled, stats['thick_provisioning_support']) self.assertEqual('Dell', stats['vendor_name']) self.assertTrue(stats['multiattach']) def test_get_space_in_gb(self): self.assertEqual(123.0, self.driver._get_space_in_gb('123.0GB')) self.assertEqual(123.0 * 1024, self.driver._get_space_in_gb('123.0TB')) self.assertEqual(1.0, self.driver._get_space_in_gb('1024.0MB')) def test_get_output(self): def _fake_recv(ignore_arg): return '%s> ' % self.configuration.eqlx_group_name chan = mock.Mock(paramiko.Channel) mock_recv = self.mock_object(chan, 'recv') mock_recv.return_value = '%s> ' % self.configuration.eqlx_group_name self.assertEqual([_fake_recv(None)], self.driver._get_output(chan)) def test_get_prefixed_value(self): lines = ['Line1 passed', 'Line1 failed'] prefix = ['Line1', 'Line2'] expected_output = [' passed', None] self.assertEqual(expected_output[0], self.driver._get_prefixed_value(lines, prefix[0])) self.assertEqual(expected_output[1], self.driver._get_prefixed_value(lines, prefix[1])) def test_ssh_execute(self): ssh = mock.Mock(paramiko.SSHClient) chan = mock.Mock(paramiko.Channel) transport = mock.Mock(paramiko.Transport) mock_get_output = self.mock_object(self.driver, '_get_output') self.mock_object(chan, 'invoke_shell') expected_output = ['NoError: test run'] mock_get_output.return_value = expected_output ssh.get_transport.return_value = transport transport.open_session.return_value = chan chan.invoke_shell() chan.send('stty columns 255' + '\r') chan.send(self.cmd + '\r') chan.close() self.assertEqual(expected_output, self.driver._ssh_execute(ssh, self.cmd)) def test_ssh_execute_error(self): self.mock_object(self.driver, '_ssh_execute', mock.Mock(side_effect= processutils.ProcessExecutionError)) ssh = mock.Mock(paramiko.SSHClient) chan = mock.Mock(paramiko.Channel) transport = mock.Mock(paramiko.Transport) mock_get_output = self.mock_object(self.driver, '_get_output') self.mock_object(ssh, 'get_transport') self.mock_object(chan, 'invoke_shell') expected_output = ['Error: test run', '% Error'] mock_get_output.return_value = expected_output ssh.get_transport().return_value = transport transport.open_session.return_value = chan chan.invoke_shell() chan.send('stty columns 255' + '\r') chan.send(self.cmd + '\r') chan.close() self.assertRaises(processutils.ProcessExecutionError, self.driver._ssh_execute, ssh, self.cmd) @mock.patch.object(greenthread, 'sleep') def test_ensure_retries(self, _gt_sleep): num_attempts = 3 self.driver.configuration.eqlx_cli_max_retries = num_attempts self.mock_object(self.driver, '_ssh_execute', mock.Mock(side_effect=exception. VolumeBackendAPIException("some error"))) # mocks for calls in _run_ssh self.mock_object(utils, 'check_ssh_injection') self.mock_object(ssh_utils, 'SSHPool') sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) self.driver.sshpool = mock.Mock(return_value=sshpool) ssh = mock.Mock(paramiko.SSHClient) self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) # now call the execute self.assertRaises(exception.VolumeBackendAPIException, self.driver._eql_execute, "fake command") self.assertEqual(num_attempts + 1, self.driver._ssh_execute.call_count) @mock.patch.object(greenthread, 'sleep') def test_ensure_connection_retries(self, _gt_sleep): num_attempts = 3 self.driver.configuration.eqlx_cli_max_retries = num_attempts self.mock_object(self.driver, '_ssh_execute', mock.Mock(side_effect= processutils.ProcessExecutionError (stdout='% Error ... some error.\n'))) # mocks for calls in _run_ssh self.mock_object(utils, 'check_ssh_injection') self.mock_object(ssh_utils, 'SSHPool') sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) self.driver.sshpool = mock.Mock(return_value=sshpool) ssh = mock.Mock(paramiko.SSHClient) self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) # now call the execute self.assertRaises(exception.VolumeBackendAPIException, self.driver._eql_execute, "fake command") self.assertEqual(num_attempts + 1, self.driver._ssh_execute.call_count) @mock.patch.object(greenthread, 'sleep') def test_ensure_retries_on_channel_timeout(self, _gt_sleep): num_attempts = 3 self.driver.configuration.eqlx_cli_max_retries = num_attempts # mocks for calls and objects in _run_ssh self.mock_object(utils, 'check_ssh_injection') self.mock_object(ssh_utils, 'SSHPool') sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) self.driver.sshpool = mock.Mock(return_value=sshpool) ssh = mock.Mock(paramiko.SSHClient) self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) # mocks for _ssh_execute and _get_output self.mock_object(self.driver, '_get_output', mock.Mock(side_effect=exception. VolumeBackendAPIException("some error"))) # now call the execute with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.VolumeBackendAPIException, self.driver._eql_execute, "fake command") self.assertEqual(num_attempts + 1, self.driver._get_output.call_count) def test_with_timeout(self): @eqlx.with_timeout def no_timeout(cmd, *args, **kwargs): return 'no timeout' @eqlx.with_timeout def w_timeout(cmd, *args, **kwargs): time.sleep(1) self.assertEqual('no timeout', no_timeout('fake cmd')) self.assertRaises(exception.VolumeBackendAPIException, w_timeout, 'fake cmd', timeout=0.1) def test_local_path(self): self.assertRaises(NotImplementedError, self.driver.local_path, '') cinder-8.0.0/cinder/tests/unit/test_cmd.py0000664000567000056710000023553212701406257021703 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import six import sys from cinder import rpc try: from unittest import mock except ImportError: import mock from oslo_config import cfg try: import rtslib_fb except ImportError: import rtslib as rtslib_fb from cinder.cmd import all as cinder_all from cinder.cmd import api as cinder_api from cinder.cmd import backup as cinder_backup from cinder.cmd import manage as cinder_manage from cinder.cmd import rtstool as cinder_rtstool from cinder.cmd import scheduler as cinder_scheduler from cinder.cmd import volume as cinder_volume from cinder.cmd import volume_usage_audit from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_volume from cinder import version CONF = cfg.CONF class TestCinderApiCmd(test.TestCase): """Unit test cases for python modules under cinder/cmd.""" def setUp(self): super(TestCinderApiCmd, self).setUp() sys.argv = ['cinder-api'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderApiCmd, self).tearDown() @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.rpc.init') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher, wsgi_service): launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count cinder_api.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() rpc_init.assert_called_once_with(CONF) process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') launcher.launch_service.assert_called_once_with(server, workers=server.workers) launcher.wait.assert_called_once_with() class TestCinderBackupCmd(test.TestCase): def setUp(self): super(TestCinderBackupCmd, self).setUp() sys.argv = ['cinder-backup'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderBackupCmd, self).tearDown() @mock.patch('cinder.service.wait') @mock.patch('cinder.service.serve') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, service_serve, service_wait): server = service_create.return_value cinder_backup.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with(binary='cinder-backup') service_serve.assert_called_once_with(server) service_wait.assert_called_once_with() class TestCinderAllCmd(test.TestCase): def setUp(self): super(TestCinderAllCmd, self).setUp() sys.argv = ['cinder-all'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderAllCmd, self).tearDown() @mock.patch('cinder.rpc.init') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, get_logger, monkey_patch, process_launcher, wsgi_service, service_create, rpc_init): CONF.set_override('enabled_backends', None) launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count service = service_create.return_value cinder_all.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder.all') monkey_patch.assert_called_once_with() rpc_init.assert_called_once_with(CONF) process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') launcher.launch_service.assert_any_call(server, workers=server.workers) service_create.assert_has_calls([mock.call(binary='cinder-scheduler'), mock.call(binary='cinder-backup'), mock.call(binary='cinder-volume')]) self.assertEqual(3, service_create.call_count) launcher.launch_service.assert_has_calls([mock.call(service)] * 3) self.assertEqual(4, launcher.launch_service.call_count) launcher.wait.assert_called_once_with() @mock.patch('cinder.rpc.init') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') def test_main_with_backend(self, log_setup, get_logger, monkey_patch, process_launcher, wsgi_service, service_create, rpc_init): CONF.set_override('enabled_backends', ['backend1']) CONF.set_override('host', 'host') launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count service = service_create.return_value cinder_all.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder.all') monkey_patch.assert_called_once_with() rpc_init.assert_called_once_with(CONF) process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') launcher.launch_service.assert_any_call(server, workers=server.workers) service_create.assert_has_calls([mock.call(binary='cinder-scheduler'), mock.call(binary='cinder-backup'), mock.call(binary='cinder-volume', host='host@backend1', service_name='backend1')]) self.assertEqual(3, service_create.call_count) launcher.launch_service.assert_has_calls([mock.call(service)] * 3) self.assertEqual(4, launcher.launch_service.call_count) launcher.wait.assert_called_once_with() @mock.patch('cinder.rpc.init') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') def test_main_load_osapi_volume_exception(self, log_setup, get_logger, monkey_patch, process_launcher, wsgi_service, service_create, rpc_init): launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count mock_log = get_logger.return_value for ex in (Exception(), SystemExit()): launcher.launch_service.side_effect = ex cinder_all.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder.all') monkey_patch.assert_called_once_with() process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') rpc_init.assert_called_with(CONF) launcher.launch_service.assert_any_call(server, workers=server.workers) self.assertTrue(mock_log.exception.called) # Reset for the next exception log_setup.reset_mock() get_logger.reset_mock() monkey_patch.reset_mock() process_launcher.reset_mock() wsgi_service.reset_mock() mock_log.reset_mock() @mock.patch('cinder.rpc.init') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') def test_main_load_binary_exception(self, log_setup, get_logger, monkey_patch, process_launcher, wsgi_service, service_create, rpc_init): CONF.set_override('enabled_backends', None) launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count service = service_create.return_value mock_log = get_logger.return_value def launch_service(*args, **kwargs): if service in args: raise Exception() launcher.launch_service.side_effect = launch_service cinder_all.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder.all') monkey_patch.assert_called_once_with() process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') launcher.launch_service.assert_any_call(server, workers=server.workers) for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']: service_create.assert_any_call(binary=binary) launcher.launch_service.assert_called_with(service) rpc_init.assert_called_once_with(CONF) self.assertTrue(mock_log.exception.called) class TestCinderSchedulerCmd(test.TestCase): def setUp(self): super(TestCinderSchedulerCmd, self).setUp() sys.argv = ['cinder-scheduler'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderSchedulerCmd, self).tearDown() @mock.patch('cinder.service.wait') @mock.patch('cinder.service.serve') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, service_serve, service_wait): server = service_create.return_value cinder_scheduler.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with(binary='cinder-scheduler') service_serve.assert_called_once_with(server) service_wait.assert_called_once_with() class TestCinderVolumeCmd(test.TestCase): def setUp(self): super(TestCinderVolumeCmd, self).setUp() sys.argv = ['cinder-volume'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderVolumeCmd, self).tearDown() @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, get_launcher): CONF.set_override('enabled_backends', None) launcher = get_launcher.return_value server = service_create.return_value cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() get_launcher.assert_called_once_with() service_create.assert_called_once_with(binary='cinder-volume') launcher.launch_service.assert_called_once_with(server) launcher.wait.assert_called_once_with() @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_with_backends(self, log_setup, monkey_patch, service_create, get_launcher): backends = ['backend1', 'backend2'] CONF.set_override('enabled_backends', backends) launcher = get_launcher.return_value cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() get_launcher.assert_called_once_with() self.assertEqual(len(backends), service_create.call_count) self.assertEqual(len(backends), launcher.launch_service.call_count) launcher.wait.assert_called_once_with() class TestCinderManageCmd(test.TestCase): def setUp(self): super(TestCinderManageCmd, self).setUp() sys.argv = ['cinder-manage'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderManageCmd, self).tearDown() @mock.patch('cinder.db.migration.db_sync') def test_db_commands_sync(self, db_sync): version = mock.MagicMock() db_cmds = cinder_manage.DbCommands() db_cmds.sync(version=version) db_sync.assert_called_once_with(version) @mock.patch('oslo_db.sqlalchemy.migration.db_version') def test_db_commands_version(self, db_version): db_cmds = cinder_manage.DbCommands() with mock.patch('sys.stdout', new=six.StringIO()): db_cmds.version() self.assertEqual(1, db_version.call_count) @mock.patch('oslo_db.sqlalchemy.migration.db_version') def test_db_commands_downgrade_fails(self, db_version): db_version.return_value = 2 db_cmds = cinder_manage.DbCommands() with mock.patch('sys.stdout', new=six.StringIO()): self.assertRaises(exception.InvalidInput, db_cmds.sync, 1) @mock.patch('cinder.version.version_string') def test_versions_commands_list(self, version_string): version_cmds = cinder_manage.VersionCommands() with mock.patch('sys.stdout', new=six.StringIO()): version_cmds.list() version_string.assert_called_once_with() @mock.patch('cinder.version.version_string') def test_versions_commands_call(self, version_string): version_cmds = cinder_manage.VersionCommands() with mock.patch('sys.stdout', new=six.StringIO()): version_cmds.__call__() version_string.assert_called_once_with() @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def test_host_commands_list(self, get_admin_context, service_get_all): get_admin_context.return_value = mock.sentinel.ctxt service_get_all.return_value = [{'host': 'fake-host', 'availability_zone': 'fake-az'}] with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: expected_out = ("%(host)-25s\t%(zone)-15s\n" % {'host': 'host', 'zone': 'zone'}) expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % {'host': 'fake-host', 'availability_zone': 'fake-az'}) host_cmds = cinder_manage.HostCommands() host_cmds.list() get_admin_context.assert_called_once_with() service_get_all.assert_called_once_with(mock.sentinel.ctxt, None) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def test_host_commands_list_with_zone(self, get_admin_context, service_get_all): get_admin_context.return_value = mock.sentinel.ctxt service_get_all.return_value = [{'host': 'fake-host', 'availability_zone': 'fake-az1'}, {'host': 'fake-host', 'availability_zone': 'fake-az2'}] with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: expected_out = ("%(host)-25s\t%(zone)-15s\n" % {'host': 'host', 'zone': 'zone'}) expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % {'host': 'fake-host', 'availability_zone': 'fake-az1'}) host_cmds = cinder_manage.HostCommands() host_cmds.list(zone='fake-az1') get_admin_context.assert_called_once_with() service_get_all.assert_called_once_with(mock.sentinel.ctxt, None) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.objects.base.CinderObjectSerializer') @mock.patch('cinder.rpc.get_client') @mock.patch('cinder.rpc.init') @mock.patch('cinder.rpc.initialized', return_value=False) @mock.patch('oslo_messaging.Target') def test_volume_commands_init(self, messaging_target, rpc_initialized, rpc_init, get_client, object_serializer): CONF.set_override('volume_topic', 'fake-topic') mock_target = messaging_target.return_value mock_rpc_client = get_client.return_value volume_cmds = cinder_manage.VolumeCommands() rpc_client = volume_cmds._rpc_client() rpc_initialized.assert_called_once_with() rpc_init.assert_called_once_with(CONF) messaging_target.assert_called_once_with(topic='fake-topic') get_client.assert_called_once_with(mock_target, serializer=object_serializer()) self.assertEqual(mock_rpc_client, rpc_client) @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.get_client') @mock.patch('cinder.rpc.init') def test_volume_commands_delete(self, rpc_init, get_client, get_admin_context, volume_get): ctxt = context.RequestContext('admin', 'fake', True) get_admin_context.return_value = ctxt mock_client = mock.MagicMock() cctxt = mock.MagicMock() mock_client.prepare.return_value = cctxt get_client.return_value = mock_client host = 'fake@host' db_volume = {'host': host + '#pool1'} volume = fake_volume.fake_db_volume(**db_volume) volume_obj = fake_volume.fake_volume_obj(ctxt, **volume) volume_id = volume['id'] volume_get.return_value = volume volume_cmds = cinder_manage.VolumeCommands() volume_cmds._client = mock_client volume_cmds.delete(volume_id) volume_get.assert_called_once_with(ctxt, volume_id) mock_client.prepare.assert_called_once_with(server=host) cctxt.cast.assert_called_once_with(ctxt, 'delete_volume', volume_id=volume['id'], volume=volume_obj) @mock.patch('cinder.db.volume_destroy') @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.init') def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context, volume_get, volume_destroy): ctxt = context.RequestContext('fake-user', 'fake-project', is_admin=True) get_admin_context.return_value = ctxt volume = fake_volume.fake_db_volume() volume_id = volume['id'] volume_get.return_value = volume with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: expected_out = ('Volume not yet assigned to host.\n' 'Deleting volume from database and skipping' ' rpc.\n') volume_cmds = cinder_manage.VolumeCommands() volume_cmds.delete(volume_id) get_admin_context.assert_called_once_with() volume_get.assert_called_once_with(ctxt, volume_id) self.assertTrue(volume_destroy.called) admin_context = volume_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.volume_destroy') @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.init') def test_volume_commands_delete_volume_in_use(self, rpc_init, get_admin_context, volume_get, volume_destroy): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt db_volume = {'status': 'in-use', 'host': 'fake-host'} volume = fake_volume.fake_db_volume(**db_volume) volume_id = volume['id'] volume_get.return_value = volume with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: expected_out = ('Volume is in-use.\n' 'Detach volume from instance and then try' ' again.\n') volume_cmds = cinder_manage.VolumeCommands() volume_cmds.delete(volume_id) volume_get.assert_called_once_with(ctxt, volume_id) self.assertEqual(expected_out, fake_out.getvalue()) def test_config_commands_list(self): with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: expected_out = '' for key, value in CONF.items(): expected_out += '%s = %s' % (key, value) + '\n' config_cmds = cinder_manage.ConfigCommands() config_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) def test_config_commands_list_param(self): with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: CONF.set_override('host', 'fake') expected_out = 'host = fake\n' config_cmds = cinder_manage.ConfigCommands() config_cmds.list(param='host') self.assertEqual(expected_out, fake_out.getvalue()) def test_get_log_commands_no_errors(self): with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: CONF.set_override('log_dir', None) expected_out = 'No errors in logfiles!\n' get_log_cmds = cinder_manage.GetLogCommands() get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('six.moves.builtins.open') @mock.patch('os.listdir') def test_get_log_commands_errors(self, listdir, open): CONF.set_override('log_dir', 'fake-dir') listdir.return_value = ['fake-error.log'] with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: open.return_value = six.StringIO( '[ ERROR ] fake-error-message') expected_out = ('fake-dir/fake-error.log:-\n' 'Line 1 : [ ERROR ] fake-error-message\n') get_log_cmds = cinder_manage.GetLogCommands() get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) open.assert_called_once_with('fake-dir/fake-error.log', 'r') listdir.assert_called_once_with(CONF.log_dir) @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists') def test_get_log_commands_syslog_no_log_file(self, path_exists, open): path_exists.return_value = False get_log_cmds = cinder_manage.GetLogCommands() with mock.patch('sys.stdout', new=six.StringIO()): exit = self.assertRaises(SystemExit, get_log_cmds.syslog) self.assertEqual(1, exit.code) path_exists.assert_any_call('/var/log/syslog') path_exists.assert_any_call('/var/log/messages') @mock.patch('cinder.db.backup_get_all') @mock.patch('cinder.context.get_admin_context') def test_backup_commands_list(self, get_admin_context, backup_get_all): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt backup = {'id': 1, 'user_id': 'fake-user-id', 'project_id': 'fake-project-id', 'host': 'fake-host', 'display_name': 'fake-display-name', 'container': 'fake-container', 'status': fields.BackupStatus.AVAILABLE, 'size': 123, 'object_count': 1, 'volume_id': 'fake-volume-id', } backup_get_all.return_value = [backup] with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s' '\t%-12s') header = hdr % ('ID', 'User ID', 'Project ID', 'Host', 'Name', 'Container', 'Status', 'Size', 'Object Count') res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d' '\t%-12s') resource = res % (backup['id'], backup['user_id'], backup['project_id'], backup['host'], backup['display_name'], backup['container'], backup['status'], backup['size'], 1) expected_out = header + '\n' + resource + '\n' backup_cmds = cinder_manage.BackupCommands() backup_cmds.list() get_admin_context.assert_called_once_with() backup_get_all.assert_called_once_with(ctxt, None, None, None, None, None, None) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.backup_update') @mock.patch('cinder.db.backup_get_all_by_host') @mock.patch('cinder.context.get_admin_context') def test_update_backup_host(self, get_admin_context, backup_get_by_host, backup_update): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt backup = {'id': fake_constants.backup_id, 'user_id': 'fake-user-id', 'project_id': 'fake-project-id', 'host': 'fake-host', 'display_name': 'fake-display-name', 'container': 'fake-container', 'status': fields.BackupStatus.AVAILABLE, 'size': 123, 'object_count': 1, 'volume_id': 'fake-volume-id', } backup_get_by_host.return_value = [backup] backup_cmds = cinder_manage.BackupCommands() backup_cmds.update_backup_host('fake_host', 'fake_host2') get_admin_context.assert_called_once_with() backup_get_by_host.assert_called_once_with(ctxt, 'fake_host') backup_update.assert_called_once_with(ctxt, fake_constants.backup_id, {'host': 'fake_host2'}) @mock.patch('cinder.utils.service_is_up') @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def _test_service_commands_list(self, service, get_admin_context, service_get_all, service_is_up): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt service_get_all.return_value = [service] service_is_up.return_value = True with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s" print_format = format % ('Binary', 'Host', 'Zone', 'Status', 'State', 'Updated At', 'RPC Version', 'Object Version') rpc_version = service['rpc_current_version'] if not rpc_version: rpc_version = rpc.LIBERTY_RPC_VERSIONS[service['binary']] object_version = service['object_current_version'] if not object_version: object_version = 'liberty' service_format = format % (service['binary'], service['host'].partition('.')[0], service['availability_zone'], 'enabled', ':-)', service['updated_at'], rpc_version, object_version) expected_out = print_format + '\n' + service_format + '\n' service_cmds = cinder_manage.ServiceCommands() service_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) get_admin_context.assert_called_with() service_get_all.assert_called_with(ctxt, None) def test_service_commands_list(self): service = {'binary': 'cinder-binary', 'host': 'fake-host.fake-domain', 'availability_zone': 'fake-zone', 'updated_at': '2014-06-30 11:22:33', 'disabled': False, 'rpc_current_version': '1.1', 'object_current_version': '1.1'} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) def test_service_commands_list_no_updated_at(self): service = {'binary': 'cinder-binary', 'host': 'fake-host.fake-domain', 'availability_zone': 'fake-zone', 'updated_at': None, 'disabled': False, 'rpc_current_version': None, 'object_current_version': None} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) def test_get_arg_string(self): args1 = "foobar" args2 = "-foo bar" args3 = "--foo bar" self.assertEqual("foobar", cinder_manage.get_arg_string(args1)) self.assertEqual("foo bar", cinder_manage.get_arg_string(args2)) self.assertEqual("foo bar", cinder_manage.get_arg_string(args3)) @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_argv_lt_2(self, register_cli_opt): script_name = 'cinder-manage' sys.argv = [script_name] CONF(sys.argv[1:], project='cinder', version=version.version_string()) with mock.patch('sys.stdout', new=six.StringIO()): exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_sudo_failed(self, register_cli_opt, log_setup, config_opts_call): script_name = 'cinder-manage' sys.argv = [script_name, 'fake_category', 'fake_action'] config_opts_call.side_effect = cfg.ConfigFilesNotFoundError( mock.sentinel._namespace) with mock.patch('sys.stdout', new=six.StringIO()): exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main(self, register_cli_opt, config_opts_call): script_name = 'cinder-manage' sys.argv = [script_name, 'config', 'list'] action_fn = mock.MagicMock() CONF.category = mock.MagicMock(action_fn=action_fn) cinder_manage.main() self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertTrue(action_fn.called) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_invalid_dir(self, register_cli_opt, log_setup, config_opts_call): script_name = 'cinder-manage' fake_dir = 'fake-dir' invalid_dir = 'Invalid directory:' sys.argv = [script_name, '--config-dir', fake_dir] config_opts_call.side_effect = cfg.ConfigDirNotFoundError(fake_dir) with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertIn(invalid_dir, fake_out.getvalue()) self.assertIn(fake_dir, fake_out.getvalue()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('cinder.db') def test_remove_service_failure(self, mock_db): mock_db.service_destroy.side_effect = SystemExit(1) service_commands = cinder_manage.ServiceCommands() exit = service_commands.remove('abinary', 'ahost') self.assertEqual(2, exit) @mock.patch('cinder.db.service_destroy') @mock.patch('cinder.db.service_get_by_args', return_value = {'id': '12'}) def test_remove_service_success(self, mock_get_by_args, mock_service_destroy): service_commands = cinder_manage.ServiceCommands() self.assertIsNone(service_commands.remove('abinary', 'ahost')) class TestCinderRtstoolCmd(test.TestCase): def setUp(self): super(TestCinderRtstoolCmd, self).setUp() sys.argv = ['cinder-rtstool'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) self.INITIATOR_IQN = 'iqn.2015.12.com.example.openstack.i:UNIT1' self.TARGET_IQN = 'iqn.2015.12.com.example.openstack.i:TARGET1' def tearDown(self): super(TestCinderRtstoolCmd, self).tearDown() @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_create_rtslib_error(self, rtsroot): rtsroot.side_effect = rtslib_fb.utils.RTSLibError() with mock.patch('sys.stdout', new=six.StringIO()): self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.create, mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) def _test_create_rtslib_error_network_portal(self, ip): with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ mock.patch.object(rtslib_fb, 'LUN') as lun, \ mock.patch.object(rtslib_fb, 'TPG') as tpg, \ mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \ mock.patch.object(rtslib_fb, 'Target') as target, \ mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ block_storage_object, \ mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: root_new = mock.MagicMock(storage_objects=mock.MagicMock()) rts_root.return_value = root_new block_storage_object.return_value = mock.sentinel.so_new target.return_value = mock.sentinel.target_new fabric_module.return_value = mock.sentinel.fabric_new tpg_new = tpg.return_value lun.return_value = mock.sentinel.lun_new if ip == '0.0.0.0': network_portal.side_effect = rtslib_fb.utils.RTSLibError() self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.create, mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) else: cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) rts_root.assert_called_once_with() block_storage_object.assert_called_once_with( name=mock.sentinel.name, dev=mock.sentinel.backing_device) target.assert_called_once_with(mock.sentinel.fabric_new, mock.sentinel.name, 'create') fabric_module.assert_called_once_with('iscsi') tpg.assert_called_once_with(mock.sentinel.target_new, mode='create') tpg_new.set_attribute.assert_called_once_with('authentication', '1') lun.assert_called_once_with(tpg_new, storage_object=mock.sentinel.so_new) self.assertEqual(1, tpg_new.enable) network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') if ip == '::0': network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') def test_create_rtslib_error_network_portal_ipv4(self): with mock.patch('sys.stdout', new=six.StringIO()): self._test_create_rtslib_error_network_portal('0.0.0.0') def test_create_rtslib_error_network_portal_ipv6(self): with mock.patch('sys.stdout', new=six.StringIO()): self._test_create_rtslib_error_network_portal('::0') def _test_create(self, ip): with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ mock.patch.object(rtslib_fb, 'LUN') as lun, \ mock.patch.object(rtslib_fb, 'TPG') as tpg, \ mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \ mock.patch.object(rtslib_fb, 'Target') as target, \ mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ block_storage_object, \ mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: root_new = mock.MagicMock(storage_objects=mock.MagicMock()) rts_root.return_value = root_new block_storage_object.return_value = mock.sentinel.so_new target.return_value = mock.sentinel.target_new fabric_module.return_value = mock.sentinel.fabric_new tpg_new = tpg.return_value lun.return_value = mock.sentinel.lun_new def network_portal_exception(*args, **kwargs): if set([tpg_new, '::0', 3260]).issubset(list(args)): raise rtslib_fb.utils.RTSLibError() else: pass cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) rts_root.assert_called_once_with() block_storage_object.assert_called_once_with( name=mock.sentinel.name, dev=mock.sentinel.backing_device) target.assert_called_once_with(mock.sentinel.fabric_new, mock.sentinel.name, 'create') fabric_module.assert_called_once_with('iscsi') tpg.assert_called_once_with(mock.sentinel.target_new, mode='create') tpg_new.set_attribute.assert_called_once_with('authentication', '1') lun.assert_called_once_with(tpg_new, storage_object=mock.sentinel.so_new) self.assertEqual(1, tpg_new.enable) network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') if ip == '::0': network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') def test_create_ipv4(self): self._test_create('0.0.0.0') def test_create_ipv6(self): self._test_create('::0') @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_create_ips_and_port(self, mock_rtslib): port = 3261 ips = ['ip1', 'ip2', 'ip3'] mock_rtslib.BlockStorageObject.return_value = mock.sentinel.bso mock_rtslib.Target.return_value = mock.sentinel.target_new mock_rtslib.FabricModule.return_value = mock.sentinel.iscsi_fabric tpg_new = mock_rtslib.TPG.return_value cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, portals_ips=ips, portals_port=port) mock_rtslib.Target.assert_called_once_with(mock.sentinel.iscsi_fabric, mock.sentinel.name, 'create') mock_rtslib.TPG.assert_called_once_with(mock.sentinel.target_new, mode='create') mock_rtslib.LUN.assert_called_once_with( tpg_new, storage_object=mock.sentinel.bso) mock_rtslib.NetworkPortal.assert_has_calls( map(lambda ip: mock.call(tpg_new, ip, port, mode='any'), ips), any_order=True ) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_rtslib_error(self, rtsroot): rtsroot.side_effect = rtslib_fb.utils.RTSLibError() with mock.patch('sys.stdout', new=six.StringIO()): self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.add_initiator, mock.sentinel.target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_rtstool_error(self, rtsroot): rtsroot.targets.return_value = {} self.assertRaises(cinder_rtstool.RtstoolError, cinder_rtstool.add_initiator, mock.sentinel.target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=self.TARGET_IQN) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(self.TARGET_IQN, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists_case_1(self, rtsroot, node_acl, mapped_lun): """Ensure initiator iqns are handled in a case-insensitive manner.""" target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN.lower()}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists_case_2(self, rtsroot, node_acl, mapped_lun): """Ensure initiator iqns are handled in a case-insensitive manner.""" iqn_lower = self.INITIATOR_IQN.lower() target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=iqn_lower) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] tpg = mock.MagicMock() tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid, chap_password=mock.sentinel.password) node_acl.return_value = acl_new cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) node_acl.assert_called_once_with(tpg, self.INITIATOR_IQN, mode='create') mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_get_targets(self, rtsroot): target = mock.MagicMock() target.dump.return_value = {'wwn': 'fake-wwn'} rtsroot.return_value = mock.MagicMock(targets=[target]) with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: cinder_rtstool.get_targets() self.assertEqual(str(target.wwn), fake_out.getvalue().strip()) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete(self, rtsroot): target = mock.MagicMock(wwn=mock.sentinel.iqn) storage_object = mock.MagicMock() name = mock.PropertyMock(return_value=mock.sentinel.iqn) type(storage_object).name = name rtsroot.return_value = mock.MagicMock( targets=[target], storage_objects=[storage_object]) cinder_rtstool.delete(mock.sentinel.iqn) target.delete.assert_called_once_with() storage_object.delete.assert_called_once_with() @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete_initiator(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.delete_initiator(target_iqn, self.INITIATOR_IQN) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete_initiator_case(self, rtsroot, node_acl, mapped_lun): """Ensure iqns are handled in a case-insensitive manner.""" initiator_iqn_lower = self.INITIATOR_IQN.lower() target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': initiator_iqn_lower}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.delete_initiator(target_iqn, self.INITIATOR_IQN) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_with_filename(self, mock_rtslib, mock_os): filename = mock.sentinel.filename cinder_rtstool.save_to_file(filename) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() self.assertEqual(0, mock_os.path.dirname.call_count) self.assertEqual(0, mock_os.path.exists.call_count) self.assertEqual(0, mock_os.makedirs.call_count) rtsroot.return_value.save_to_file.assert_called_once_with(filename) @mock.patch.object(cinder_rtstool, 'os', **{'path.exists.return_value': True, 'path.dirname.return_value': mock.sentinel.dirname}) @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_save(self, mock_rtslib, mock_os): """Test that we check path exists with default file.""" cinder_rtstool.save_to_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.save_to_file.assert_called_once_with( mock.sentinel.filename) mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) self.assertEqual(0, mock_os.makedirs.call_count) @mock.patch.object(cinder_rtstool, 'os', **{'path.exists.return_value': False, 'path.dirname.return_value': mock.sentinel.dirname}) @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_save_no_targetcli(self, mock_rtslib, mock_os): """Test that we create path if it doesn't exist with default file.""" cinder_rtstool.save_to_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.save_to_file.assert_called_once_with( mock.sentinel.filename) mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) mock_os.makedirs.assert_called_once_with(mock.sentinel.dirname, 0o755) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_error_creating_dir(self, mock_rtslib, mock_os): mock_os.path.dirname.return_value = 'dirname' mock_os.path.exists.return_value = False mock_os.makedirs.side_effect = OSError('error') regexp = (u'targetcli not installed and could not create default ' 'directory \(dirname\): error$') self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp, cinder_rtstool.save_to_file, None) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_error_saving(self, mock_rtslib, mock_os): save = mock_rtslib.root.RTSRoot.return_value.save_to_file save.side_effect = OSError('error') regexp = u'Could not save configuration to myfile: error' self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp, cinder_rtstool.save_to_file, 'myfile') @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_restore(self, mock_rtslib): """Test that we restore target configuration with default file.""" cinder_rtstool.restore_from_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.restore_from_file.assert_called_once_with( mock.sentinel.filename) @mock.patch.object(cinder_rtstool, 'rtslib_fb') def test_restore_with_file(self, mock_rtslib): """Test that we restore target configuration with specified file.""" cinder_rtstool.restore_from_file('saved_file') rtsroot = mock_rtslib.root.RTSRoot rtsroot.return_value.restore_from_file.assert_called_once_with( 'saved_file') @mock.patch('cinder.cmd.rtstool.restore_from_file') def test_restore_error(self, restore_from_file): """Test that we fail to restore target configuration.""" restore_from_file.side_effect = OSError self.assertRaises(OSError, cinder_rtstool.restore_from_file, mock.sentinel.filename) def test_usage(self): with mock.patch('sys.stdout', new=six.StringIO()): exit = self.assertRaises(SystemExit, cinder_rtstool.usage) self.assertEqual(1, exit.code) @mock.patch('cinder.cmd.rtstool.usage') def test_main_argc_lt_2(self, usage): usage.side_effect = SystemExit(1) sys.argv = ['cinder-rtstool'] exit = self.assertRaises(SystemExit, cinder_rtstool.usage) self.assertTrue(usage.called) self.assertEqual(1, exit.code) def test_main_create_argv_lt_6(self): sys.argv = ['cinder-rtstool', 'create'] self._test_main_check_argv() def test_main_create_argv_gt_7(self): sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2', 'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6'] self._test_main_check_argv() def test_main_add_initiator_argv_lt_6(self): sys.argv = ['cinder-rtstool', 'add-initiator'] self._test_main_check_argv() def test_main_delete_argv_lt_3(self): sys.argv = ['cinder-rtstool', 'delete'] self._test_main_check_argv() def test_main_no_action(self): sys.argv = ['cinder-rtstool'] self._test_main_check_argv() def _test_main_check_argv(self): with mock.patch('cinder.cmd.rtstool.usage') as usage: usage.side_effect = SystemExit(1) sys.argv = ['cinder-rtstool', 'create'] exit = self.assertRaises(SystemExit, cinder_rtstool.main) self.assertTrue(usage.called) self.assertEqual(1, exit.code) @mock.patch('cinder.cmd.rtstool.save_to_file') def test_main_save(self, mock_save): sys.argv = ['cinder-rtstool', 'save'] rc = cinder_rtstool.main() mock_save.assert_called_once_with(None) self.assertEqual(0, rc) @mock.patch('cinder.cmd.rtstool.save_to_file') def test_main_save_with_file(self, mock_save): sys.argv = ['cinder-rtstool', 'save', mock.sentinel.filename] rc = cinder_rtstool.main() mock_save.assert_called_once_with(mock.sentinel.filename) self.assertEqual(0, rc) def test_main_create(self): with mock.patch('cinder.cmd.rtstool.create') as create: sys.argv = ['cinder-rtstool', 'create', mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, str(mock.sentinel.initiator_iqns)] rc = cinder_rtstool.main() create.assert_called_once_with( mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, initiator_iqns=str(mock.sentinel.initiator_iqns)) self.assertEqual(0, rc) @mock.patch('cinder.cmd.rtstool.create') def test_main_create_ips_and_port(self, mock_create): sys.argv = ['cinder-rtstool', 'create', mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, str(mock.sentinel.initiator_iqns), '-p3261', '-aip1,ip2,ip3'] rc = cinder_rtstool.main() mock_create.assert_called_once_with( mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, initiator_iqns=str(mock.sentinel.initiator_iqns), portals_ips=['ip1', 'ip2', 'ip3'], portals_port=3261) self.assertEqual(0, rc) def test_main_add_initiator(self): with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator: sys.argv = ['cinder-rtstool', 'add-initiator', mock.sentinel.target_iqn, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.initiator_iqns] rc = cinder_rtstool.main() add_initiator.assert_called_once_with( mock.sentinel.target_iqn, mock.sentinel.initiator_iqns, mock.sentinel.userid, mock.sentinel.password) self.assertEqual(0, rc) def test_main_get_targets(self): with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets: sys.argv = ['cinder-rtstool', 'get-targets'] rc = cinder_rtstool.main() get_targets.assert_called_once_with() self.assertEqual(0, rc) def test_main_delete(self): with mock.patch('cinder.cmd.rtstool.delete') as delete: sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn] rc = cinder_rtstool.main() delete.assert_called_once_with(mock.sentinel.iqn) self.assertEqual(0, rc) @mock.patch.object(cinder_rtstool, 'verify_rtslib') def test_main_verify(self, mock_verify_rtslib): sys.argv = ['cinder-rtstool', 'verify'] rc = cinder_rtstool.main() mock_verify_rtslib.assert_called_once_with() self.assertEqual(0, rc) class TestCinderVolumeUsageAuditCmd(test.TestCase): def setUp(self): super(TestCinderVolumeUsageAuditCmd, self).setUp() sys.argv = ['cinder-volume-usage-audit'] CONF(sys.argv[1:], project='cinder', version=version.version_string()) def tearDown(self): super(TestCinderVolumeUsageAuditCmd, self).tearDown() @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_time_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period): CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2013-01-01 01:00:00') last_completed_audit_period.return_value = (mock.sentinel.begin, mock.sentinel.end) exit = self.assertRaises(SystemExit, volume_usage_audit.main) get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') self.assertEqual(-1, exit.code) rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() @mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.db.volume_get_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_create_volume_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_active_by_window, notify_about_volume_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0) end = datetime.datetime(2014, 2, 2, 2, 0) ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0) volume1 = mock.MagicMock(id='1', project_id='fake-project', created_at=volume1_created, deleted_at=volume1_deleted) volume_get_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } def _notify_about_volume_usage(*args, **kwargs): if 'create.end' in args: raise Exception() else: pass notify_about_volume_usage.side_effect = _notify_about_volume_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_any_call(ctxt, volume1, 'exists', extra_usage_info=extra_info) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.start', extra_usage_info=local_extra_info) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.end', extra_usage_info=local_extra_info) @mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.db.volume_get_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_delete_volume_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_active_by_window, notify_about_volume_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0) end = datetime.datetime(2014, 2, 2, 2, 0) ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0) volume1 = mock.MagicMock(id='1', project_id='fake-project', created_at=volume1_created, deleted_at=volume1_deleted) volume_get_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info_create = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } local_extra_info_delete = { 'audit_period_beginning': str(volume1.deleted_at), 'audit_period_ending': str(volume1.deleted_at), } def _notify_about_volume_usage(*args, **kwargs): if 'delete.end' in args: raise Exception() else: pass notify_about_volume_usage.side_effect = _notify_about_volume_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'exists', extra_usage_info=extra_info) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.start', extra_usage_info=local_extra_info_create) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.end', extra_usage_info=local_extra_info_create) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'delete.start', extra_usage_info=local_extra_info_delete) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'delete.end', extra_usage_info=local_extra_info_delete) @mock.patch('cinder.volume.utils.notify_about_snapshot_usage') @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window') @mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.db.volume_get_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_snapshot_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_active_by_window, notify_about_volume_usage, snapshot_get_active_by_window, notify_about_snapshot_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0) end = datetime.datetime(2014, 2, 2, 2, 0) ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0) snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0) snapshot1 = mock.MagicMock(id='1', project_id='fake-project', created_at=snapshot1_created, deleted_at=snapshot1_deleted) volume_get_active_by_window.return_value = [] snapshot_get_active_by_window.return_value = [snapshot1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info_create = { 'audit_period_beginning': str(snapshot1.created_at), 'audit_period_ending': str(snapshot1.created_at), } local_extra_info_delete = { 'audit_period_beginning': str(snapshot1.deleted_at), 'audit_period_ending': str(snapshot1.deleted_at), } def _notify_about_snapshot_usage(*args, **kwargs): # notify_about_snapshot_usage raises an exception, but does not # block raise Exception() notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_active_by_window.assert_called_once_with(ctxt, begin, end) self.assertFalse(notify_about_volume_usage.called) notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1, 'exists', extra_info) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'create.start', extra_usage_info=local_extra_info_create) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'delete.start', extra_usage_info=local_extra_info_delete) @mock.patch('cinder.volume.utils.notify_about_snapshot_usage') @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window') @mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.db.volume_get_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_active_by_window, notify_about_volume_usage, snapshot_get_active_by_window, notify_about_snapshot_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0) end = datetime.datetime(2014, 2, 2, 2, 0) ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0) volume1 = mock.MagicMock(id='1', project_id='fake-project', created_at=volume1_created, deleted_at=volume1_deleted) volume_get_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } extra_info_volume_create = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } extra_info_volume_delete = { 'audit_period_beginning': str(volume1.deleted_at), 'audit_period_ending': str(volume1.deleted_at), } snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0) snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0) snapshot1 = mock.MagicMock(id='1', project_id='fake-project', created_at=snapshot1_created, deleted_at=snapshot1_deleted) snapshot_get_active_by_window.return_value = [snapshot1] extra_info_snapshot_create = { 'audit_period_beginning': str(snapshot1.created_at), 'audit_period_ending': str(snapshot1.created_at), } extra_info_snapshot_delete = { 'audit_period_beginning': str(snapshot1.deleted_at), 'audit_period_ending': str(snapshot1.deleted_at), } volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'exists', extra_usage_info=extra_info) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.start', extra_usage_info=extra_info_volume_create) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'create.end', extra_usage_info=extra_info_volume_create) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'delete.start', extra_usage_info=extra_info_volume_delete) notify_about_volume_usage.assert_any_call( ctxt, volume1, 'delete.end', extra_usage_info=extra_info_volume_delete) notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1, 'exists', extra_info) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'create.start', extra_usage_info=extra_info_snapshot_create) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'create.end', extra_usage_info=extra_info_snapshot_create) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'delete.start', extra_usage_info=extra_info_snapshot_delete) notify_about_snapshot_usage.assert_any_call( ctxt, snapshot1, 'delete.end', extra_usage_info=extra_info_snapshot_delete) cinder-8.0.0/cinder/tests/unit/test_solidfire.py0000664000567000056710000023511312701406250023104 0ustar jenkinsjenkins00000000000000 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers import solidfire from cinder.volume import qos_specs from cinder.volume import volume_types def create_configuration(): configuration = mock.Mock(conf.Configuration) configuration.san_is_local = False configuration.append_config_values(mock.IgnoreArg()) return configuration class SolidFireVolumeTestCase(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() self.configuration = mock.Mock(conf.Configuration) self.configuration.sf_allow_tenant_qos = True self.configuration.san_is_local = True self.configuration.sf_emulate_512 = True self.configuration.sf_account_prefix = 'cinder' self.configuration.reserved_percentage = 25 self.configuration.iscsi_helper = None self.configuration.sf_template_account_name = 'openstack-vtemplate' self.configuration.sf_allow_template_caching = False self.configuration.sf_svip = None self.configuration.sf_enable_volume_mapping = True self.configuration.sf_volume_prefix = 'UUID-' self.configuration.sf_enable_vag = False super(SolidFireVolumeTestCase, self).setUp() self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(solidfire.SolidFireDriver, '_build_endpoint_info', self.fake_build_endpoint_info) self.stubs.Set(solidfire.SolidFireDriver, '_set_cluster_uuid', self.fake_set_cluster_uuid) self.expected_qos_results = {'minIOPS': 1000, 'maxIOPS': 10000, 'burstIOPS': 20000} self.mock_stats_data =\ {'result': {'clusterCapacity': {'maxProvisionedSpace': 107374182400, 'usedSpace': 1073741824, 'compressionPercent': 100, 'deDuplicationPercent': 100, 'thinProvisioningPercent': 100}}} self.mock_volume = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': 'fast', 'created_at': timeutils.utcnow()} self.fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'updated_at': datetime.datetime(2013, 9, 28, 15, 27, 36, 325355), 'is_public': True, 'owner': 'testprjid'} self.fake_image_service = 'null' def fake_build_endpoint_info(obj, **kwargs): endpoint = {} endpoint['mvip'] = '1.1.1.1' endpoint['login'] = 'admin' endpoint['passwd'] = 'admin' endpoint['port'] = '443' endpoint['url'] = '{scheme}://{mvip}'.format(mvip='%s:%s' % (endpoint['mvip'], endpoint['port']), scheme='https') return endpoint def fake_set_cluster_uuid(obj): return '95e46307-67d4-49b3-8857-6104a9c30e46' def fake_issue_api_request(obj, method, params, version='1.0'): if method is 'GetClusterCapacity' and version == '1.0': data = {'result': {'clusterCapacity': {'maxProvisionedSpace': 107374182400, 'usedSpace': 1073741824, 'compressionPercent': 100, 'deDuplicationPercent': 100, 'thinProvisioningPercent': 100}}} return data elif method is 'GetClusterInfo' and version == '1.0': results = {'result': {'clusterInfo': {'name': 'fake-cluster', 'mvip': '1.1.1.1', 'svip': '1.1.1.1', 'uniqueID': 'unqid', 'repCount': 2, 'attributes': {}}}} return results elif method is 'AddAccount' and version == '1.0': return {'result': {'accountID': 25}, 'id': 1} elif method is 'GetAccountByName' and version == '1.0': results = {'result': {'account': {'accountID': 25, 'username': params['username'], 'status': 'active', 'initiatorSecret': '123456789012', 'targetSecret': '123456789012', 'attributes': {}, 'volumes': [6, 7, 20]}}, "id": 1} return results elif method is 'CreateVolume' and version == '1.0': return {'result': {'volumeID': 5}, 'id': 1} elif method is 'CreateSnapshot' and version == '6.0': return {'result': {'snapshotID': 5}, 'id': 1} elif method is 'DeleteVolume' and version == '1.0': return {'result': {}, 'id': 1} elif method is 'ModifyVolume' and version == '5.0': return {'result': {}, 'id': 1} elif method is 'CloneVolume': return {'result': {'volumeID': 6}, 'id': 2} elif method is 'ModifyVolume': return elif method is 'ListVolumesForAccount' and version == '1.0': test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': test_name}]}} return result elif method is 'ListActiveVolumes': test_name = "existing_volume" result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': test_name}]}} return result elif method is 'DeleteSnapshot': return {'result': {}} else: # Crap, unimplemented API call in Fake return None def fake_issue_api_request_fails(obj, method, params, version='1.0', endpoint=None): response = {'error': {'code': 000, 'name': 'DummyError', 'message': 'This is a fake error response'}, 'id': 1} msg = ('Error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) raise exception.SolidFireAPIException(message=msg) def fake_set_qos_by_volume_type(self, type_id, ctxt): return {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000} def fake_volume_get(obj, key, default=None): return {'qos': 'fast'} def fake_update_cluster_status(self): return def fake_get_model_info(self, account, vid): return {'fake': 'fake-model'} @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_create_volume_with_qos_type(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': 'fast', 'created_at': timeutils.utcnow()} fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] test_type = {'name': 'sf-1', 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', 'deleted': False, 'created_at': '2014-02-06 04:58:11', 'updated_at': None, 'extra_specs': {}, 'deleted_at': None, 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} test_qos_spec = {'id': 'asdfafdasdf', 'specs': {'minIOPS': '1000', 'maxIOPS': '2000', 'burstIOPS': '3000'}} def _fake_get_volume_type(ctxt, type_id): return test_type def _fake_get_qos_spec(ctxt, spec_id): return test_qos_spec def _fake_do_volume_create(account, params): return params sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]), \ mock.patch.object(sfv, '_do_volume_create', side_effect=_fake_do_volume_create), \ mock.patch.object(volume_types, 'get_volume_type', side_effect=_fake_get_volume_type), \ mock.patch.object(qos_specs, 'get_qos_specs', side_effect=_fake_get_qos_spec): self.assertEqual({'burstIOPS': 3000, 'minIOPS': 1000, 'maxIOPS': 2000}, sfv.create_volume(testvol)['qos']) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_create_volume(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]): model_update = sfv.create_volume(testvol) self.assertIsNotNone(model_update) self.assertIsNone(model_update.get('provider_geometry', None)) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_create_volume_non_512e(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]): self.configuration.sf_emulate_512 = False model_update = sfv.create_volume(testvol) self.configuration.sf_emulate_512 = True self.assertEqual('4096 4096', model_update.get('provider_geometry', None)) def test_create_delete_snapshot(self): testsnap = {'project_id': 'testprjid', 'name': 'testvol', 'volume_size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow(), 'provider_id': '8 99 None'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_uuid = 'UUID-b831c4d1-d1f0-11e1-9b23-0800200c9a66' with mock.patch.object( solidfire.SolidFireDriver, '_get_sf_snapshots', return_value=[{'snapshotID': '5', 'name': fake_uuid, 'volumeID': 5}]), \ mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=[{'accountID': 5, 'name': 'testprjid'}]): sfv.create_snapshot(testsnap) sfv.delete_snapshot(testsnap) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_create_clone(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 _fake_get_snaps = [{'snapshotID': 5, 'name': 'testvol'}] _fake_get_volume = ( {'volumeID': 99, 'name': 'UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'attributes': {}}) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testvol_b = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sf_snapshots', return_value=_fake_get_snaps), \ mock.patch.object(sfv, '_get_sf_volume', return_value=_fake_get_volume), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=[]), \ mock.patch.object(sfv, '_get_model_info', return_value={}): sfv.create_cloned_volume(testvol_b, testvol) def test_initialize_connector_with_blocksizes(self): connector = {'initiator': 'iqn.2012-07.org.fake:01'} testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), } sfv = solidfire.SolidFireDriver(configuration=self.configuration) properties = sfv.initialize_connection(testvol, connector) self.assertEqual('4096', properties['data']['physical_block_size']) self.assertEqual('4096', properties['data']['logical_block_size']) self.assertTrue(properties['data']['discard']) def test_create_volume_fails(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.stubs.Set(solidfire.SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) try: sfv.create_volume(testvol) self.fail("Should have thrown Error") except Exception: pass def test_create_sfaccount(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._create_sfaccount('project-id') self.assertIsNotNone(account) def test_create_sfaccount_fails(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(exception.SolidFireAPIException, sfv._create_sfaccount, 'project-id') def test_get_sfaccount_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._get_sfaccount_by_name('some-name') self.assertIsNotNone(account) def test_get_sfaccount_by_name_fails(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(exception.SolidFireAPIException, sfv._get_sfaccount_by_name, 'some-name') def test_delete_volume(self): testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow(), 'provider_id': '1 5 None', 'multiattach': True } fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] get_vol_result = [{'volumeID': 5, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': 'super_fake_iqn'}] mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=get_vol_result), \ mock.patch.object(sfv, '_issue_api_request'), \ mock.patch.object(sfv, '_remove_volume_from_vags') as rem_vol: sfv.delete_volume(testvol) rem_vol.assert_called_with(get_vol_result[0]['volumeID']) def test_delete_volume_no_volume_on_backend(self): fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] fake_no_volumes = [] testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=fake_no_volumes): sfv.delete_volume(testvol) def test_delete_snapshot_no_snapshot_on_backend(self): fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] fake_no_volumes = [] testsnap = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=fake_no_volumes): sfv.delete_snapshot(testsnap) def test_get_cluster_info(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._get_cluster_info() def test_get_cluster_info_fail(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.stubs.Set(solidfire.SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAPIException, sfv._get_cluster_info) def test_extend_volume(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.extend_volume(testvol, 2) def test_extend_volume_fails_no_volume(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'not-found'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.VolumeNotFound, sfv.extend_volume, testvol, 2) def test_extend_volume_fails_account_lookup(self): # NOTE(JDG) This test just fakes update_cluster_status # this is intentional for this test self.stubs.Set(solidfire.SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAPIException, sfv.extend_volume, testvol, 2) def test_set_by_qos_spec_with_scoping(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'qos:minIOPS': '1000', 'qos:maxIOPS': '10000', 'qos:burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual(self.expected_qos_results, qos) def test_set_by_qos_spec(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'minIOPS': '1000', 'maxIOPS': '10000', 'burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual(self.expected_qos_results, qos) def test_set_by_qos_by_type_only(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual({'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 300}, qos) def test_accept_transfer(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} expected = {'provider_auth': 'CHAP cinder-new_project 123456789012'} self.assertEqual(expected, sfv.accept_transfer(self.ctxt, testvol, 'new_user', 'new_project')) def test_accept_transfer_volume_not_found_raises(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'created_at': timeutils.utcnow()} self.assertRaises(exception.VolumeNotFound, sfv.accept_transfer, self.ctxt, testvol, 'new_user', 'new_project') def test_retype(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "500", "qos:burstIOPS": "2000", "qos:maxIOPS": "1000"}) diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'qos:burstIOPS': ('10000', u'2000'), 'qos:minIOPS': ('1000', u'500'), 'qos:maxIOPS': ('10000', u'1000')}} host = None testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} self.assertTrue(sfv.retype(self.ctxt, testvol, type_ref, diff, host)) def test_retype_with_qos_spec(self): test_type = {'name': 'sf-1', 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', 'deleted': False, 'created_at': '2014-02-06 04:58:11', 'updated_at': None, 'extra_specs': {}, 'deleted_at': None, 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} test_qos_spec = {'id': 'asdfafdasdf', 'specs': {'minIOPS': '1000', 'maxIOPS': '2000', 'burstIOPS': '3000'}} def _fake_get_volume_type(ctxt, type_id): return test_type def _fake_get_qos_spec(ctxt, spec_id): return test_qos_spec self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(volume_types, 'get_volume_type', _fake_get_volume_type) self.stubs.Set(qos_specs, 'get_qos_specs', _fake_get_qos_spec) sfv = solidfire.SolidFireDriver(configuration=self.configuration) diff = {'encryption': {}, 'extra_specs': {}, 'qos_specs': {'burstIOPS': ('10000', '2000'), 'minIOPS': ('1000', '500'), 'maxIOPS': ('10000', '1000')}} host = None testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertTrue(sfv.retype(self.ctxt, testvol, test_type, diff, host)) def test_update_cluster_status(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._update_cluster_status() self.assertEqual(99.0, sfv.cluster_stats['free_capacity_gb']) self.assertEqual(100.0, sfv.cluster_stats['total_capacity_gb']) def test_manage_existing_volume(self): external_ref = {'name': 'existing volume', 'source-id': 5} testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) model_update = sfv.manage_existing(testvol, external_ref) self.assertIsNotNone(model_update) self.assertIsNone(model_update.get('provider_geometry', None)) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_create_volume_for_migration(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77', 'volume_type_id': None, 'created_at': timeutils.utcnow(), 'migration_status': 'target:' 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] def _fake_do_v_create(project_id, params): return project_id, params sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]), \ mock.patch.object(sfv, '_do_volume_create', side_effect=_fake_do_v_create): proj_id, sf_vol_object = sfv.create_volume(testvol) self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66', sf_vol_object['attributes']['uuid']) self.assertEqual('b830b3c0-d1f0-11e1-9b23-1900200c9a77', sf_vol_object['attributes']['migration_uuid']) self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', sf_vol_object['name']) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') def test_verify_image_volume_out_of_date(self, _mock_create_image_volume, _mock_get_sf_volume, _mock_get_sfaccount, _mock_issue_api_request): fake_sf_vref = { 'status': 'active', 'volumeID': 1, 'attributes': { 'image_info': {'image_updated_at': '2014-12-17T00:16:23+00:00', 'image_id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'image_name': 'fake-image', 'image_created_at': '2014-12-17T00:16:23+00:00'}}} stats_data =\ {'result': {'clusterCapacity': {'maxProvisionedSpace': 107374182400, 'usedSpace': 1073741824, 'compressionPercent': 100, 'deDuplicationPercent': 100, 'thinProvisioningPercent': 100}}} _mock_issue_api_request.return_value = stats_data _mock_get_sfaccount.return_value = {'username': 'openstack-vtemplate', 'accountID': 7777} _mock_get_sf_volume.return_value = fake_sf_vref _mock_create_image_volume.return_value = fake_sf_vref image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'updated_at': datetime.datetime(2013, 9, 28, 15, 27, 36, 325355)} image_service = 'null' sfv = solidfire.SolidFireDriver(configuration=self.configuration) _mock_issue_api_request.return_value = {'result': 'ok'} sfv._verify_image_volume(self.ctxt, image_meta, image_service) self.assertTrue(_mock_create_image_volume.called) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') def test_verify_image_volume_ok(self, _mock_create_image_volume, _mock_get_sf_volume, _mock_get_sfaccount, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_get_sfaccount.return_value = {'username': 'openstack-vtemplate', 'accountID': 7777} _mock_get_sf_volume.return_value =\ {'status': 'active', 'volumeID': 1, 'attributes': { 'image_info': {'image_updated_at': '2013-09-28T15:27:36.325355', 'image_id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'image_name': 'fake-image', 'image_created_at': '2014-12-17T00:16:23+00:00'}}} _mock_create_image_volume.return_value = None image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'updated_at': datetime.datetime(2013, 9, 28, 15, 27, 36, 325355)} image_service = 'null' sfv = solidfire.SolidFireDriver(configuration=self.configuration) _mock_issue_api_request.return_value = {'result': 'ok'} sfv._verify_image_volume(self.ctxt, image_meta, image_service) self.assertFalse(_mock_create_image_volume.called) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_clone_image_not_configured(self, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertEqual((None, False), sfv.clone_image(self.ctxt, self.mock_volume, 'fake', self.fake_image_meta, 'fake')) @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') def test_clone_image_authorization(self, _mock_create_template_account): _mock_create_template_account.return_value = 1 self.configuration.sf_allow_template_caching = True sfv = solidfire.SolidFireDriver(configuration=self.configuration) # Make sure if it's NOT public and we're NOT the owner it # doesn't try and cache _fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', 'updated_at': datetime.datetime(2013, 9, 28, 15, 27, 36, 325355), 'properties': {'virtual_size': 1}, 'is_public': False, 'owner': 'wrong-owner'} with mock.patch.object(sfv, '_do_clone_volume', return_value=('fe', 'fi', 'fo')): self.assertEqual((None, False), sfv.clone_image(self.ctxt, self.mock_volume, 'fake', _fake_image_meta, 'fake')) # And is_public False, but the correct owner does work _fake_image_meta['owner'] = 'testprjid' self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, self.mock_volume, 'fake', _fake_image_meta, 'fake')) # And is_public True, even if not the correct owner _fake_image_meta['is_public'] = True _fake_image_meta['owner'] = 'wrong-owner' self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, self.mock_volume, 'fake', _fake_image_meta, 'fake')) # And using the new V2 visibility tag _fake_image_meta['visibility'] = 'public' _fake_image_meta['owner'] = 'wrong-owner' self.assertEqual(('fo', True), sfv.clone_image(self.ctxt, self.mock_volume, 'fake', _fake_image_meta, 'fake')) def test_create_template_no_account(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) def _fake_issue_api_req(method, params, version=0): if 'GetAccountByName' in method: raise exception.SolidFireAPIException return {'result': {'accountID': 1}} with mock.patch.object(sfv, '_issue_api_request', side_effect=_fake_issue_api_req): self.assertEqual(1, sfv._create_template_account('foo')) def test_configured_svip(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) def _fake_get_volumes(account_id): return [{'volumeID': 1, 'iqn': ''}] def _fake_get_cluster_info(): return {'clusterInfo': {'svip': 1}} with mock.patch.object(sfv, '_get_volumes_by_sfaccount', side_effect=_fake_get_volumes),\ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request): sfaccount = {'targetSecret': 'yakitiyak', 'accountID': 5, 'username': 'bobthebuilder'} v = sfv._get_model_info(sfaccount, 1) self.assertEqual('1.1.1.1:3260 0', v['provider_location']) configured_svip = '9.9.9.9:6500' self.configuration.sf_svip = configured_svip v = sfv._get_model_info(sfaccount, 1) self.assertEqual('%s 0' % configured_svip, v['provider_location']) def test_init_volume_mappings(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) vid_1 = 'c9125d6d-22ff-4cc3-974d-d4e350df9c91' vid_2 = '79883868-6933-47a1-a362-edfbf8d55a18' sid_1 = 'e3caa4fa-485e-45ca-970e-1d3e693a2520' project_1 = 'e6fb073c-11f0-4f4c-897c-90e7c7c4bcf8' project_2 = '4ff32607-305c-4a6b-a51a-0dd33124eecf' vrefs = [{'id': vid_1, 'project_id': project_1, 'provider_id': None}, {'id': vid_2, 'project_id': project_2, 'provider_id': 22}] snaprefs = [{'id': sid_1, 'project_id': project_1, 'provider_id': None, 'volume_id': vid_1}] sf_vols = [{'volumeID': 99, 'name': 'UUID-' + vid_1, 'accountID': 100}, {'volumeID': 22, 'name': 'UUID-' + vid_2, 'accountID': 200}] sf_snaps = [{'snapshotID': 1, 'name': 'UUID-' + sid_1, 'volumeID': 99}] def _fake_issue_api_req(method, params, version=0): if 'ListActiveVolumes' in method: return {'result': {'volumes': sf_vols}} if 'ListSnapshots'in method: return {'result': {'snapshots': sf_snaps}} with mock.patch.object( sfv, '_issue_api_request', side_effect=_fake_issue_api_req): volume_updates, snapshot_updates = sfv.update_provider_info( vrefs, snaprefs) self.assertEqual('99 100 None', volume_updates[0]['provider_id']) self.assertEqual(1, len(volume_updates)) self.assertEqual('1 99 None', snapshot_updates[0]['provider_id']) self.assertEqual(1, len(snapshot_updates)) def test_get_sf_volume_missing_attributes(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) test_name = "existing_volume" fake_response = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'qos': None, 'iqn': test_name}]}} def _fake_issue_api_req(method, params, version=0): return fake_response with mock.patch.object( sfv, '_issue_api_request', side_effect=_fake_issue_api_req): self.assertEqual(5, sfv._get_sf_volume(test_name, 8)['volumeID']) def test_sf_init_conn_with_vag(self): # Verify with the _enable_vag conf set that we correctly create a VAG. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1" } connector = {'initiator': 'iqn.2012-07.org.fake:01'} provider_id = testvol['provider_id'] vol_id = int(sfv._parse_provider_id_string(provider_id)[0]) vag_id = 1 with mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_add_volume_to_vag') as add_vol: sfv._sf_initialize_connection(testvol, connector) create_vag.assert_called_with(connector['initiator'], vol_id) add_vol.assert_called_with(vol_id, connector['initiator'], vag_id) def test_sf_term_conn_with_vag_rem_vag(self): # Verify we correctly remove an empty VAG on detach. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1", 'multiattach': False } connector = {'initiator': 'iqn.2012-07.org.fake:01'} vag_id = 1 vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [connector['initiator']], 'name': 'fakeiqn', 'volumeAccessGroupID': vag_id, 'volumes': [1], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_remove_vag') as rem_vag: sfv._sf_terminate_connection(testvol, connector, False) rem_vag.assert_called_with(vag_id) def test_sf_term_conn_with_vag_rem_vol(self): # Verify we correctly remove a the volume from a non-empty VAG. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1", 'multiattach': False } provider_id = testvol['provider_id'] vol_id = int(sfv._parse_provider_id_string(provider_id)[0]) connector = {'initiator': 'iqn.2012-07.org.fake:01'} vag_id = 1 vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [connector['initiator']], 'name': 'fakeiqn', 'volumeAccessGroupID': vag_id, 'volumes': [1, 2], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_remove_volume_from_vag') as rem_vag: sfv._sf_terminate_connection(testvol, connector, False) rem_vag.assert_called_with(vol_id, vag_id) def test_safe_create_vag_simple(self): # Test the sunny day call straight into _create_vag. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'fake_iqn' vol_id = 1 with mock.patch.object(sfv, '_get_vags_by_name', return_value=[]), \ mock.patch.object(sfv, '_create_vag') as mock_create_vag: sfv._safe_create_vag(iqn, vol_id) mock_create_vag.assert_called_with(iqn, vol_id) def test_safe_create_vag_matching_vag(self): # Vag exists, resuse. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [iqn], 'name': iqn, 'volumeAccessGroupID': 1, 'volumes': [1, 2], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_create_vag') as create_vag, \ mock.patch.object(sfv, '_add_initiator_to_vag') as add_iqn: vag_id = sfv._safe_create_vag(iqn, None) self.assertEqual(vag_id, vags[0]['volumeAccessGroupID']) create_vag.assert_not_called() add_iqn.assert_not_called() def test_safe_create_vag_reuse_vag(self): # Reuse a matching vag. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [], 'name': iqn, 'volumeAccessGroupID': 1, 'volumes': [1, 2], 'virtualNetworkIDs': []}] vag_id = vags[0]['volumeAccessGroupID'] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_add_initiator_to_vag', return_value = vag_id) as add_init: res_vag_id = sfv._safe_create_vag(iqn, None) self.assertEqual(res_vag_id, vag_id) add_init.assert_called_with(iqn, vag_id) def test_create_vag_iqn_fail(self): # Attempt to create a VAG with an already in-use initiator. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xExceededLimit: {}'.format(params['initiators'][0]) raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_purge_vags') as purge_vags: res_vag_id = sfv._create_vag(iqn, vol_id) self.assertEqual(res_vag_id, vag_id) create_vag.assert_called_with(iqn, vol_id) purge_vags.assert_not_called() def test_create_vag_limit_fail(self): # Attempt to create a VAG with VAG limit reached. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xExceededLimit' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_purge_vags') as purge_vags: res_vag_id = sfv._create_vag(iqn, vol_id) self.assertEqual(res_vag_id, vag_id) create_vag.assert_called_with(iqn, vol_id) purge_vags.assert_called_with() def test_add_initiator_duplicate(self): # Thrown exception should yield vag_id. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 def throw_request(method, params, version): msg = 'xAlreadyInVolumeAccessGroup' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) self.assertEqual(vag_id, res_vag_id) def test_add_initiator_missing_vag(self): # Thrown exception should result in create_vag call. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as mock_create_vag: res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) self.assertEqual(vag_id, res_vag_id) mock_create_vag.assert_called_with(iqn) def test_add_volume_to_vag_duplicate(self): # Thrown exception should yield vag_id sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xAlreadyInVolumeAccessGroup' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) self.assertEqual(res_vag_id, vag_id) def test_add_volume_to_vag_missing_vag(self): # Thrown exception should yield vag_id sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as mock_create_vag: res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) self.assertEqual(res_vag_id, vag_id) mock_create_vag.assert_called_with(iqn, vol_id) def test_remove_volume_from_vag_missing_volume(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xNotInVolumeAccessGroup' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): sfv._remove_volume_from_vag(vol_id, vag_id) def test_remove_volume_from_vag_missing_vag(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): sfv._remove_volume_from_vag(vol_id, vag_id) def test_remove_volume_from_vag_unknown_exception(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xUnknownException' raise exception.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): self.assertRaises(exception.SolidFireAPIException, sfv._remove_volume_from_vag, vol_id, vag_id) def test_remove_volume_from_vags(self): # Remove volume from several VAGs. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vol_id = 42 vags = [{'volumeAccessGroupID': 1, 'volumes': [vol_id]}, {'volumeAccessGroupID': 2, 'volumes': [vol_id, 43]}] with mock.patch.object(sfv, '_base_get_vags', return_value=vags), \ mock.patch.object(sfv, '_remove_volume_from_vag') as rem_vol: sfv._remove_volume_from_vags(vol_id) self.assertEqual(len(vags), rem_vol.call_count) def test_purge_vags(self): # Remove subset of VAGs. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vags = [{'initiators': [], 'volumeAccessGroupID': 1, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': True}}, {'initiators': [], 'volumeAccessGroupID': 2, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': False}}, {'initiators': [], 'volumeAccessGroupID': 3, 'deletedVolumes': [1], 'volumes': [], 'attributes': {'openstack': True}}, {'initiators': [], 'volumeAccessGroupID': 4, 'deletedVolumes': [], 'volumes': [1], 'attributes': {'openstack': True}}, {'initiators': ['fakeiqn'], 'volumeAccessGroupID': 5, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': True}}] with mock.patch.object(sfv, '_base_get_vags', return_value=vags), \ mock.patch.object(sfv, '_remove_vag') as rem_vag: sfv._purge_vags() # Of the vags provided there is only one that is valid for purge # based on the limits of no initiators, volumes, deleted volumes, # and features the openstack attribute. self.assertEqual(1, rem_vag.call_count) rem_vag.assert_called_with(1) def test_create_group_snapshot(self): # Sunny day group snapshot creation. sfv = solidfire.SolidFireDriver(configuration=self.configuration) name = 'great_gsnap_name' sf_volumes = [{'volumeID': 1}, {'volumeID': 42}] expected_params = {'name': name, 'volumes': [1, 42]} fake_result = {'result': 'contrived_test'} with mock.patch.object(sfv, '_issue_api_request', return_value=fake_result) as fake_api: res = sfv._create_group_snapshot(name, sf_volumes) self.assertEqual('contrived_test', res) fake_api.assert_called_with('CreateGroupSnapshot', expected_params, version='7.0') def test_group_snapshot_creator_sunny(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_name = 'great_gsnap_name' prefix = sfv.configuration.sf_volume_prefix vol_uuids = ['one', 'two', 'three'] active_vols = [{'name': prefix + 'one'}, {'name': prefix + 'two'}, {'name': prefix + 'three'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols),\ mock.patch.object(sfv, '_create_group_snapshot', return_value=None) as create: sfv._group_snapshot_creator(gsnap_name, vol_uuids) create.assert_called_with(gsnap_name, active_vols) def test_group_snapshot_creator_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_name = 'great_gsnap_name' prefix = sfv.configuration.sf_volume_prefix vol_uuids = ['one', 'two', 'three'] active_vols = [{'name': prefix + 'one'}, {'name': prefix + 'two'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols): self.assertRaises(exception.SolidFireDriverException, sfv._group_snapshot_creator, gsnap_name, vol_uuids) def test_create_temp_group_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) cg = {'id': 'great_gsnap_name'} prefix = sfv.configuration.sf_volume_prefix tmp_name = prefix + cg['id'] + '-tmp' vols = [{'id': 'one'}, {'id': 'two'}, {'id': 'three'}] with mock.patch.object(sfv, '_group_snapshot_creator', return_value=None) as create: sfv._create_temp_group_snapshot(cg, vols) create.assert_called_with(tmp_name, ['one', 'two', 'three']) def test_list_group_snapshots(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) res = {'result': {'groupSnapshots': 'a_thing'}} with mock.patch.object(sfv, '_issue_api_request', return_value=res): result = sfv._list_group_snapshots() self.assertEqual('a_thing', result) def test_get_group_snapshot_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_snaps = [{'name': 'a_fantastic_name'}] with mock.patch.object(sfv, '_list_group_snapshots', return_value=fake_snaps): result = sfv._get_group_snapshot_by_name('a_fantastic_name') self.assertEqual(fake_snaps[0], result) def test_delete_group_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_id = 1 with mock.patch.object(sfv, '_issue_api_request') as api_req: sfv._delete_group_snapshot(gsnap_id) api_req.assert_called_with('DeleteGroupSnapshot', {'groupSnapshotID': gsnap_id}, version='7.0') def test_delete_cgsnapshot_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_gsnap = {'groupSnapshotID': 42} with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=fake_gsnap),\ mock.patch.object(sfv, '_delete_group_snapshot') as del_stuff: sfv._delete_cgsnapshot_by_name('does not matter') del_stuff.assert_called_with(fake_gsnap['groupSnapshotID']) def test_delete_cgsnapshot_by_name_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=None): self.assertRaises(exception.SolidFireDriverException, sfv._delete_cgsnapshot_by_name, 'does not matter') def test_find_linked_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_snap = {'members': [{'volumeID': 1}, {'volumeID': 2}]} source_vol = {'volumeID': 1} with mock.patch.object(sfv, '_get_sf_volume', return_value=source_vol) as get_vol: res = sfv._find_linked_snapshot('fake_uuid', group_snap) self.assertEqual(source_vol, res) get_vol.assert_called_with('fake_uuid') def test_create_consisgroup_from_src_cgsnapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None group = {} volumes = [{'id': 'one'}, {'id': 'two'}, {'id': 'three'}] cgsnapshot = {'id': 'great_uuid'} snapshots = [{'id': 'snap_id_1', 'volume_id': 'one'}, {'id': 'snap_id_2', 'volume_id': 'two'}, {'id': 'snap_id_3', 'volume_id': 'three'}] source_cg = None source_vols = None group_snap = {} name = sfv.configuration.sf_volume_prefix + cgsnapshot['id'] kek = (None, None, {}) with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=group_snap) as get_snap,\ mock.patch.object(sfv, '_find_linked_snapshot'),\ mock.patch.object(sfv, '_do_clone_volume', return_value=kek): model, vol_models = sfv.create_consistencygroup_from_src( ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) get_snap.assert_called_with(name) self.assertEqual( {'status': fields.ConsistencyGroupStatus.AVAILABLE}, model) def test_create_consisgroup_from_src_source_cg(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None group = {} volumes = [{'id': 'one', 'source_volid': 'source_one'}, {'id': 'two', 'source_volid': 'source_two'}, {'id': 'three', 'source_volid': 'source_three'}] cgsnapshot = {'id': 'great_uuid'} snapshots = None source_cg = {'id': 'fantastic_cg'} source_vols = [1, 2, 3] source_snap = None group_snap = {} kek = (None, None, {}) with mock.patch.object(sfv, '_create_temp_group_snapshot', return_value=source_cg['id']),\ mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=group_snap) as get_snap,\ mock.patch.object(sfv, '_find_linked_snapshot', return_value=source_snap),\ mock.patch.object(sfv, '_do_clone_volume', return_value=kek),\ mock.patch.object(sfv, '_delete_cgsnapshot_by_name'): model, vol_models = sfv.create_consistencygroup_from_src( ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) get_snap.assert_called_with(source_cg['id']) self.assertEqual( {'status': fields.ConsistencyGroupStatus.AVAILABLE}, model) def test_create_cgsnapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None cgsnapshot = {'id': 'acceptable_cgsnap_id'} snapshots = [{'volume_id': 'one'}, {'volume_id': 'two'}] pfx = sfv.configuration.sf_volume_prefix active_vols = [{'name': pfx + 'one'}, {'name': pfx + 'two'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols),\ mock.patch.object(sfv, '_create_group_snapshot') as create_gsnap: sfv.create_cgsnapshot(ctxt, cgsnapshot, snapshots) create_gsnap.assert_called_with(pfx + cgsnapshot['id'], active_vols) def test_create_cgsnapshot_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None cgsnapshot = {'id': 'acceptable_cgsnap_id'} snapshots = [{'volume_id': 'one'}, {'volume_id': 'two'}] pfx = sfv.configuration.sf_volume_prefix active_vols = [{'name': pfx + 'one'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols),\ mock.patch.object(sfv, '_create_group_snapshot'): self.assertRaises(exception.SolidFireDriverException, sfv.create_cgsnapshot, ctxt, cgsnapshot, snapshots) cinder-8.0.0/cinder/tests/unit/fake_volume.py0000664000567000056710000000726712701406250022371 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_volume(**updates): db_volume = { 'id': fake.volume_id, 'size': 1, 'name': 'volume-%s' % fake.volume_id, 'availability_zone': 'fake_availability_zone', 'status': 'available', 'attach_status': 'detached', 'previous_status': None, 'volume_attachment': [], 'volume_metadata': [], 'volume_admin_metadata': [], 'volume_glance_metadata': [], 'snapshots': [], } for name, field in objects.Volume.fields.items(): if name in db_volume: continue if field.nullable: db_volume[name] = None elif field.default != fields.UnspecifiedDefault: db_volume[name] = field.default else: raise Exception('fake_db_volume needs help with %s.' % name) if updates: db_volume.update(updates) return db_volume def fake_db_volume_type(**updates): db_volume_type = { 'id': fake.volume_type_id, 'name': 'type-1', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, } for name, field in objects.VolumeType.fields.items(): if name in db_volume_type: continue if field.nullable: db_volume_type[name] = None elif field.default != fields.UnspecifiedDefault: db_volume_type[name] = field.default else: raise Exception('fake_db_volume_type needs help with %s.' % name) if updates: db_volume_type.update(updates) return db_volume_type def fake_db_volume_attachment(**updates): db_volume_attachment = { 'id': fake.attachment_id, 'volume_id': fake.volume_id, } for name, field in objects.VolumeAttachment.fields.items(): if name in db_volume_attachment: continue if field.nullable: db_volume_attachment[name] = None elif field.default != fields.UnspecifiedDefault: db_volume_attachment[name] = field.default else: raise Exception( 'fake_db_volume_attachment needs help with %s.' % name) if updates: db_volume_attachment.update(updates) return db_volume_attachment def fake_volume_obj(context, **updates): expected_attrs = updates.pop('expected_attrs', ['metadata', 'admin_metadata']) vol = objects.Volume._from_db_object(context, objects.Volume(), fake_db_volume(**updates), expected_attrs=expected_attrs) return vol def fake_volume_type_obj(context, **updates): return objects.VolumeType._from_db_object( context, objects.VolumeType(), fake_db_volume_type(**updates)) def fake_volume_attachment_obj(context, **updates): return objects.VolumeAttachment._from_db_object( context, objects.VolumeAttachment(), fake_db_volume_attachment(**updates)) cinder-8.0.0/cinder/tests/unit/test_v7000_fcp.py0000664000567000056710000005050712701406250022532 0ustar jenkinsjenkins00000000000000# Copyright 2015 Violin Memory, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver """ import mock from cinder import exception from cinder import test from cinder.tests.unit import fake_vmem_client as vmemclient from cinder.volume import configuration as conf from cinder.volume.drivers.violin import v7000_common from cinder.volume.drivers.violin import v7000_fcp VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" VOLUME = { "name": "volume-" + VOLUME_ID, "id": VOLUME_ID, "display_name": "fake_volume", "size": 2, "host": "myhost", "volume_type": None, "volume_type_id": None, } SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" SNAPSHOT = { "name": "snapshot-" + SNAPSHOT_ID, "id": SNAPSHOT_ID, "volume_id": VOLUME_ID, "volume_name": "volume-" + VOLUME_ID, "volume_size": 2, "display_name": "fake_snapshot", "volume": VOLUME, } SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" SRC_VOL = { "name": "volume-" + SRC_VOL_ID, "id": SRC_VOL_ID, "display_name": "fake_src_vol", "size": 2, "host": "myhost", "volume_type": None, "volume_type_id": None, } INITIATOR_IQN = "iqn.1111-22.org.debian:11:222" CONNECTOR = { "initiator": INITIATOR_IQN, "host": "irrelevant", 'wwpns': ['50014380186b3f65', '50014380186b3f67'], } FC_TARGET_WWPNS = [ '31000024ff45fb22', '21000024ff45fb23', '51000024ff45f1be', '41000024ff45f1bf' ] FC_INITIATOR_WWPNS = [ '50014380186b3f65', '50014380186b3f67' ] FC_FABRIC_MAP = { 'fabricA': {'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], 'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]}, 'fabricB': {'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]], 'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]} } FC_INITIATOR_TARGET_MAP = { FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]] } PHY_DEVICES_RESPONSE = { 'data': {'physical_devices': [{'availsize': 1099504287744, 'availsize_mb': 524284, 'category': 'Virtual Device', 'connection_type': 'block', 'firmware': 'v1.0', 'guid': '3cc4d6dd-166d-77d2-4967-00005463f597', 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0', 'is_foreign': True, 'name': 'BKSC:OTHDISK-MFCN01.000', 'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151', 'owner': 'example.com', 'pool': None, 'product': 'OTHDISK-MFCN01', 'scsi_address': {'adapter': '98', 'channel': '0', 'id': '0', 'lun': '0', 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, 'size': 1099504287744, 'size_mb': 1048569, 'type': 'Direct-Access', 'usedsize': 0, 'usedsize_mb': 0, 'vendor': 'BKSC', 'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'}, {'availsize': 1099504287744, 'availsize_mb': 524284, 'category': 'Virtual Device', 'connection_type': 'block', 'firmware': 'v1.0', 'guid': '283b2694-192b-4745-6768-00005463f673', 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0', 'is_foreign': False, 'name': 'BKSC:OTHDISK-MFCN08.000', 'object_id': '8555b888-bf43-5083-a433-f0c7b0282370', 'owner': 'example.com', 'pool': {'name': 'mga-pool', 'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'}, 'product': 'OTHDISK-MFCN08', 'scsi_address': {'adapter': '98', 'channel': '0', 'id': '11', 'lun': '0', 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, 'size': 1099504287744, 'size_mb': 1048569, 'type': 'Direct-Access', 'usedsize': 0, 'usedsize_mb': 0, 'vendor': 'BKSC', 'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'}, {'availsize': 1099504287744, 'availsize_mb': 1048569, 'category': 'Virtual Device', 'connection_type': 'block', 'firmware': 'v1.0', 'guid': '7f47db19-019c-707d-0df1-00005463f949', 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0', 'is_foreign': False, 'name': 'BKSC:OTHDISK-MFCN09.000', 'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291', 'owner': 'a.b.c.d', 'pool': {'name': 'mga-pool', 'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'}, 'product': 'OTHDISK-MFCN09', 'scsi_address': {'adapter': '98', 'channel': '0', 'id': '12', 'lun': '0', 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, 'size': 1099504287744, 'size_mb': 524284, 'type': 'Direct-Access', 'usedsize': 0, 'usedsize_mb': 0, 'vendor': 'BKSC', 'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}], 'total_physical_devices': 3}, 'msg': 'Successful', 'success': True } # The FC_INFO dict returned by the backend is keyed on # object_id of the FC adapter and the values are the # wwmns FC_INFO = { '1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'], '4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'], 'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'], 'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231'] } CLIENT_INFO = { 'FCPolicy': {'AS400enabled': False, 'VSAenabled': False, 'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66', '50-01-43-80-18-6b-3f-64']}, 'FibreChannelDevices': [{'access': 'ReadWrite', 'id': 'v0000004', 'initiatorWWPN': '*', 'lun': '8', 'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba', 'sizeMB': 10240, 'targetWWPN': '*', 'type': 'SAN'}] } CLIENT_INFO1 = { 'FCPolicy': {'AS400enabled': False, 'VSAenabled': False, 'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66', '50-01-43-80-18-6b-3f-64']}, 'FibreChannelDevices': [] } class V7000FCPDriverTestCase(test.TestCase): """Test cases for VMEM FCP driver.""" def setUp(self): super(V7000FCPDriverTestCase, self).setUp() self.conf = self.setup_configuration() self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf) self.driver.common.container = 'myContainer' self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022' self.driver.gateway_fc_wwns = FC_TARGET_WWPNS self.stats = {} self.driver.set_initialized() def tearDown(self): super(V7000FCPDriverTestCase, self).tearDown() def setup_configuration(self): config = mock.Mock(spec=conf.Configuration) config.volume_backend_name = 'v7000_fcp' config.san_ip = '8.8.8.8' config.san_login = 'admin' config.san_password = '' config.san_thin_provision = False config.san_is_local = False config.request_timeout = 300 config.container = 'myContainer' return config def setup_mock_concerto(self, m_conf=None): """Create a fake Concerto communication object.""" _m_concerto = mock.Mock(name='Concerto', version='1.1.1', spec=vmemclient.mock_client_conf) if m_conf: _m_concerto.configure_mock(**m_conf) return _m_concerto @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error') def test_check_for_setup_error(self, m_setup_func): """No setup errors are found.""" result = self.driver.check_for_setup_error() m_setup_func.assert_called_with() self.assertIsNone(result) @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error') def test_check_for_setup_error_no_wwn_config(self, m_setup_func): """No wwns were found during setup.""" self.driver.gateway_fc_wwns = [] failure = exception.ViolinInvalidBackendConfig self.assertRaises(failure, self.driver.check_for_setup_error) def test_create_volume(self): """Volume created successfully.""" self.driver.common._create_lun = mock.Mock() result = self.driver.create_volume(VOLUME) self.driver.common._create_lun.assert_called_with(VOLUME) self.assertIsNone(result) def test_create_volume_from_snapshot(self): self.driver.common._create_volume_from_snapshot = mock.Mock() result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) self.driver.common._create_volume_from_snapshot.assert_called_with( SNAPSHOT, VOLUME) self.assertIsNone(result) def test_create_cloned_volume(self): self.driver.common._create_lun_from_lun = mock.Mock() result = self.driver.create_cloned_volume(VOLUME, SRC_VOL) self.driver.common._create_lun_from_lun.assert_called_with( SRC_VOL, VOLUME) self.assertIsNone(result) def test_delete_volume(self): """Volume deleted successfully.""" self.driver.common._delete_lun = mock.Mock() result = self.driver.delete_volume(VOLUME) self.driver.common._delete_lun.assert_called_with(VOLUME) self.assertIsNone(result) def test_extend_volume(self): """Volume extended successfully.""" new_size = 10 self.driver.common._extend_lun = mock.Mock() result = self.driver.extend_volume(VOLUME, new_size) self.driver.common._extend_lun.assert_called_with(VOLUME, new_size) self.assertIsNone(result) def test_create_snapshot(self): self.driver.common._create_lun_snapshot = mock.Mock() result = self.driver.create_snapshot(SNAPSHOT) self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT) self.assertIsNone(result) def test_delete_snapshot(self): self.driver.common._delete_lun_snapshot = mock.Mock() result = self.driver.delete_snapshot(SNAPSHOT) self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT) self.assertIsNone(result) def test_get_volume_stats(self): self.driver._update_volume_stats = mock.Mock() self.driver._update_volume_stats() result = self.driver.get_volume_stats(True) self.driver._update_volume_stats.assert_called_with() self.assertEqual(self.driver.stats, result) @mock.patch('socket.gethostbyaddr') def test_update_volume_stats(self, mock_gethost): """Test Update Volume Stats. Makes a mock query to the backend to collect stats on all physical devices. """ def gethostbyaddr(addr): if addr == '8.8.8.8' or addr == 'example.com': return ('example.com', [], ['8.8.8.8']) else: return ('a.b.c.d', [], addr) mock_gethost.side_effect = gethostbyaddr backend_name = self.conf.volume_backend_name vendor_name = "Violin Memory, Inc." tot_gb = 2046 free_gb = 1022 phy_devices = "/batch/physicalresource/physicaldevice" conf = { 'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ], } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) result = self.driver._update_volume_stats() calls = [mock.call(phy_devices)] self.driver.common.vmem_mg.basic.get.assert_has_calls(calls) self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb']) self.assertEqual(free_gb, self.driver.stats['free_capacity_gb']) self.assertEqual(backend_name, self.driver.stats['volume_backend_name']) self.assertEqual(vendor_name, self.driver.stats['vendor_name']) self.assertIsNone(result) def test_get_active_fc_targets(self): """Test Get Active FC Targets. Makes a mock query to the backend to collect all the physical adapters and extract the WWNs. """ conf = { 'adapter.get_fc_info.return_value': FC_INFO, } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) result = self.driver._get_active_fc_targets() self.assertEqual({'2100001b9745e230', '2100001b9745e25f', '2100001b9745e231', '2100001b9745e25e'}, set(result)) def test_initialize_connection(self): lun_id = 1 target_wwns = self.driver.gateway_fc_wwns init_targ_map = {} conf = { 'client.create_client.return_value': None, } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver._export_lun = mock.Mock(return_value=lun_id) self.driver._build_initiator_target_map = mock.Mock( return_value=(target_wwns, init_targ_map)) props = self.driver.initialize_connection(VOLUME, CONNECTOR) self.driver.common.vmem_mg.client.create_client.assert_called_with( name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns']) self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR) self.driver._build_initiator_target_map.assert_called_with( CONNECTOR) self.assertEqual("fibre_channel", props['driver_volume_type']) self.assertTrue(props['data']['target_discovered']) self.assertEqual(self.driver.gateway_fc_wwns, props['data']['target_wwn']) self.assertEqual(lun_id, props['data']['target_lun']) def test_terminate_connection(self): target_wwns = self.driver.gateway_fc_wwns init_targ_map = {} self.driver.common.vmem_mg = self.setup_mock_concerto() self.driver._unexport_lun = mock.Mock() self.driver._is_initiator_connected_to_array = mock.Mock( return_value=False) self.driver._build_initiator_target_map = mock.Mock( return_value=(target_wwns, init_targ_map)) props = self.driver.terminate_connection(VOLUME, CONNECTOR) self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR) self.driver._is_initiator_connected_to_array.assert_called_with( CONNECTOR) self.driver._build_initiator_target_map.assert_called_with( CONNECTOR) self.assertEqual("fibre_channel", props['driver_volume_type']) self.assertEqual(target_wwns, props['data']['target_wwn']) self.assertEqual(init_targ_map, props['data']['initiator_target_map']) def test_export_lun(self): lun_id = '1' response = {'success': True, 'msg': 'Assign SAN client successfully'} conf = { 'client.get_client_info.return_value': CLIENT_INFO, } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.driver.common._send_cmd_and_verify = mock.Mock( return_value=response) self.driver._get_lun_id = mock.Mock(return_value=lun_id) result = self.driver._export_lun(VOLUME, CONNECTOR) self.driver.common._send_cmd_and_verify.assert_called_with( self.driver.common.vmem_mg.lun.assign_lun_to_client, self.driver._is_lun_id_ready, 'Assign SAN client successfully', [VOLUME['id'], CONNECTOR['host'], "ReadWrite"], [VOLUME['id'], CONNECTOR['host']]) self.driver._get_lun_id.assert_called_with( VOLUME['id'], CONNECTOR['host']) self.assertEqual(lun_id, result) def test_export_lun_fails_with_exception(self): lun_id = '1' response = {'status': False, 'msg': 'Generic error'} failure = exception.ViolinBackendErr self.driver.common.vmem_mg = self.setup_mock_concerto() self.driver.common._send_cmd_and_verify = mock.Mock( side_effect=exception.ViolinBackendErr(response['msg'])) self.driver._get_lun_id = mock.Mock(return_value=lun_id) self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR) def test_unexport_lun(self): response = {'success': True, 'msg': 'Unassign SAN client successfully'} self.driver.common.vmem_mg = self.setup_mock_concerto() self.driver.common._send_cmd = mock.Mock( return_value=response) result = self.driver._unexport_lun(VOLUME, CONNECTOR) self.driver.common._send_cmd.assert_called_with( self.driver.common.vmem_mg.lun.unassign_client_lun, "Unassign SAN client successfully", VOLUME['id'], CONNECTOR['host'], True) self.assertIsNone(result) def test_get_lun_id(self): conf = { 'client.get_client_info.return_value': CLIENT_INFO, } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host']) self.assertEqual(8, result) def test_is_lun_id_ready(self): lun_id = '1' self.driver.common.vmem_mg = self.setup_mock_concerto() self.driver._get_lun_id = mock.Mock(return_value=lun_id) result = self.driver._is_lun_id_ready( VOLUME['id'], CONNECTOR['host']) self.assertTrue(result) def test_build_initiator_target_map(self): """Successfully build a map when zoning is enabled.""" expected_targ_wwns = FC_TARGET_WWPNS self.driver.lookup_service = mock.Mock() (self.driver.lookup_service.get_device_mapping_from_network. return_value) = FC_FABRIC_MAP result = self.driver._build_initiator_target_map(CONNECTOR) (targ_wwns, init_targ_map) = result (self.driver.lookup_service.get_device_mapping_from_network. assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns)) self.assertEqual(set(expected_targ_wwns), set(targ_wwns)) i = FC_INITIATOR_WWPNS[0] self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i]) self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i]) self.assertEqual(2, len(init_targ_map[i])) i = FC_INITIATOR_WWPNS[1] self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i]) self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i]) self.assertEqual(2, len(init_targ_map[i])) self.assertEqual(2, len(init_targ_map)) def test_build_initiator_target_map_no_lookup_service(self): """Successfully build a map when zoning is disabled.""" expected_targ_wwns = FC_TARGET_WWPNS expected_init_targ_map = { CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS, CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS } self.driver.lookup_service = None targ_wwns, init_targ_map = self.driver._build_initiator_target_map( CONNECTOR) self.assertEqual(expected_targ_wwns, targ_wwns) self.assertEqual(expected_init_targ_map, init_targ_map) def test_is_initiator_connected_to_array(self): """Successfully finds an initiator with remaining active session.""" conf = { 'client.get_client_info.return_value': CLIENT_INFO, } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.assertTrue(self.driver._is_initiator_connected_to_array( CONNECTOR)) self.driver.common.vmem_mg.client.get_client_info.assert_called_with( CONNECTOR['host']) def test_is_initiator_connected_to_array_empty_response(self): """Successfully finds no initiators with remaining active sessions.""" conf = { 'client.get_client_info.return_value': CLIENT_INFO1 } self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) self.assertFalse(self.driver._is_initiator_connected_to_array( CONNECTOR)) cinder-8.0.0/cinder/tests/unit/test_nexenta.py0000664000567000056710000005765012701406250022576 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for OpenStack Cinder volume driver """ import mock from oslo_utils import units from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta import iscsi from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import nfs from cinder.volume.drivers.nexenta import utils class TestNexentaISCSIDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, 'id': '1', 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, 'id': '2', 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, } def __init__(self, method): super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): super(TestNexentaISCSIDriver, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.ctxt = context.get_admin_context() self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' self.cfg.nexenta_rest_port = 2000 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_iscsi_target_portal_port = 3260 self.cfg.nexenta_target_prefix = 'iqn:' self.cfg.nexenta_target_group_prefix = 'cinder/' self.cfg.nexenta_blocksize = '8K' self.cfg.nexenta_sparse = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.reserved_percentage = 20 self.nms_mock = mock.Mock() for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', 'stmf', 'scsidisk', 'snapshot']: setattr(self.nms_mock, mod, mock.Mock()) self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nms_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def test_check_do_setup(self): self.assertEqual('http', self.drv.nms_protocol) def test_check_for_setup_error(self): self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_local_path(self): self.assertRaises(NotImplementedError, self.drv.local_path, '') def test_create_volume(self): self.drv.create_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.create.assert_called_with( 'cinder/%s' % self.TEST_VOLUME_REF['name'], '1G', self.cfg.nexenta_blocksize, self.cfg.nexenta_sparse) def test_delete_volume(self): self.nms_mock.zvol.get_child_props.return_value = ( {'origin': 'cinder/volume0@snapshot'}) self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with( 'cinder/volume1', '') self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.get_child_props.return_value = ( {'origin': 'cinder/volume0@cinder-clone-snapshot-1'}) self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume0@cinder-clone-snapshot-1', '') self.nms_mock.volume.object_exists.assert_called_with('cinder/volume0') def test_create_cloned_volume(self): vol = self.TEST_VOLUME_REF2 src_vref = self.TEST_VOLUME_REF snapshot = { 'volume_name': src_vref['name'], 'name': 'cinder-clone-snapshot-%s' % vol['id'], } self.drv.create_cloned_volume(vol, src_vref) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % src_vref['name'], snapshot['name'], '') self.nms_mock.zvol.clone.assert_called_with( 'cinder/%s@%s' % (src_vref['name'], snapshot['name']), 'cinder/%s' % vol['name']) def test_migrate_volume(self): volume = self.TEST_VOLUME_REF host = { 'capabilities': { 'vendor_name': 'Nexenta', 'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder', 'free_capacity_gb': 1, 'iscsi_target_portal_port': 3260, 'nms_url': 'http://admin:password@1.1.1.1:2000' } } snapshot = { 'volume_name': volume['name'], 'name': 'cinder-migrate-snapshot-%s' % volume['id'], } volume_name = 'cinder/%s' % volume['name'] self.nms_mock.appliance.ssh_list_bindings.return_value = ( {'0': [True, True, True, '1.1.1.1']}) self.nms_mock.zvol.get_child_props.return_value = None self.drv.migrate_volume(None, volume, host) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % volume['name'], snapshot['name'], '') src = '%(volume)s/%(zvol)s@%(snapshot)s' % { 'volume': 'cinder', 'zvol': volume['name'], 'snapshot': snapshot['name'] } dst = '1.1.1.1:cinder' cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst]) self.nms_mock.appliance.execute.assert_called_with(cmd) snapshot_name = 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] } self.nms_mock.snapshot.destroy.assert_called_with(snapshot_name, '') self.nms_mock.zvol.destroy.assert_called_with(volume_name, '') self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] }, '') self.nms_mock.volume.object_exists.assert_called_with(volume_name) self.mox.ReplayAll() def test_create_snapshot(self): self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/volume1', 'snapshot1', '') def test_create_volume_from_snapshot(self): self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.clone.assert_called_with( 'cinder/volume1@snapshot1', 'cinder/volume2') def test_delete_snapshot(self): self._create_volume_db_entry() self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') self.nms_mock.volume.object_exists.assert_called_with( 'cinder/volume1') # Check that exception not raised if snapshot does not exist self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.side_effect = ( exception.NexentaException('does not exist')) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') self.nms_mock.volume.object_exists.assert_called_with( 'cinder/volume1') def _mock_all_export_methods(self, fail=False): self.assertTrue(self.nms_mock.stmf.list_targets.called) self.nms_mock.iscsitarget.create_target.assert_called_with( {'target_name': 'iqn:1.1.1.1-0'}) self.nms_mock.stmf.list_targetgroups() zvol_name = 'cinder/volume1' self.nms_mock.stmf.create_targetgroup.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.stmf.list_targetgroup_members.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.scsidisk.lu_exists.assert_called_with(zvol_name) self.nms_mock.scsidisk.create_lu.assert_called_with(zvol_name, {}) def _stub_all_export_methods(self): self.nms_mock.scsidisk.lu_exists.return_value = False self.nms_mock.scsidisk.lu_shared.side_effect = ( exception.NexentaException(['does not exist for zvol'])) self.nms_mock.scsidisk.create_lu.return_value = {'lun': 0} self.nms_mock.stmf.list_targets.return_value = [] self.nms_mock.stmf.list_targetgroups.return_value = [] self.nms_mock.stmf.list_targetgroup_members.return_value = [] self.nms_mock._get_target_name.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.iscsitarget.create_targetgroup.return_value = ({ 'target_name': 'cinder/1.1.1.1-0'}) self.nms_mock.scsidisk.add_lun_mapping_entry.return_value = {'lun': 0} def test_create_export(self): self._stub_all_export_methods() retval = self.drv.create_export({}, self.TEST_VOLUME_REF, None) self._mock_all_export_methods() location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.cfg.nexenta_host, 'port': self.cfg.nexenta_iscsi_target_portal_port, 'name': 'iqn:1.1.1.1-0', 'lun': '0' } self.assertEqual({'provider_location': location}, retval) def test_ensure_export(self): self._stub_all_export_methods() self.drv.ensure_export({}, self.TEST_VOLUME_REF) self._mock_all_export_methods() def test_remove_export(self): self.nms_mock.stmf.list_targets.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.stmf.list_targetgroups.return_value = ( ['cinder/1.1.1.1-0']) self.nms_mock.stmf.list_targetgroup_members.return_value = ( ['iqn:1.1.1.1-0']) self.drv.remove_export({}, self.TEST_VOLUME_REF) self.assertTrue(self.nms_mock.stmf.list_targets.called) self.assertTrue(self.nms_mock.stmf.list_targetgroups.called) self.nms_mock.scsidisk.delete_lu.assert_called_with('cinder/volume1') def test_get_volume_stats(self): stats = {'size': '5368709120G', 'used': '5368709120G', 'available': '5368709120G', 'health': 'ONLINE'} self.nms_mock.volume.get_child_props.return_value = stats stats = self.drv.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual(5368709120.0, stats['total_capacity_gb']) self.assertEqual(5368709120.0, stats['free_capacity_gb']) self.assertEqual(20, stats['reserved_percentage']) self.assertFalse(stats['QoS_support']) def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_VOLUME_NAME } return db.volume_create(self.ctxt, vol)['id'] class TestNexentaNfsDriver(test.TestCase): TEST_EXPORT1 = 'host1:/volumes/stack/share' TEST_NMS1 = 'http://admin:nexenta@host1:2000' TEST_EXPORT2 = 'host2:/volumes/stack/share' TEST_NMS2 = 'http://admin:nexenta@host2:2000' TEST_EXPORT2_OPTIONS = '-o intr' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' TEST_SHARE_SVC = 'svc:/network/nfs/server:default' TEST_SHARE_OPTS = { 'read_only': '', 'read_write': '*', 'recursive': 'true', 'anonymous_rw': 'true', 'extra_options': 'anon=0', 'root': 'nobody' } def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_EXPORT1 } return db.volume_create(self.ctxt, vol)['id'] def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_shares_config = None self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_options = None self.cfg.nas_mount_options = None self.cfg.nexenta_nms_cache_volroot = False self.cfg.nfs_mount_attempts = 3 self.cfg.reserved_percentage = 20 self.cfg.max_over_subscription_ratio = 20.0 self.nms_mock = mock.Mock() for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc', 'snapshot', 'netsvc'): setattr(self.nms_mock, mod, mock.Mock()) self.nms_mock.__hash__ = lambda *_, **__: 1 self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nms_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.shares = {} self.drv.share2nms = {} def test_check_for_setup_error(self): self.drv.share2nms = { 'host1:/volumes/stack/share': self.nms_mock } self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = True share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } self.drv.check_for_setup_error() self.nms_mock.netstorsvc.share_folder.assert_called_with( 'svc:/network/nfs/server:default', 'stack/share', share_opts) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_initialize_connection(self): self.drv.shares = { self.TEST_EXPORT1: None } volume = { 'provider_location': self.TEST_EXPORT1, 'name': 'volume' } result = self.drv.initialize_connection(volume, None) self.assertEqual('%s/volume' % self.TEST_EXPORT1, result['data']['export']) def test_do_create_volume(self): volume = { 'provider_location': self.TEST_EXPORT1, 'size': 1, 'name': 'volume-1' } self.drv.shares = {self.TEST_EXPORT1: None} self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} compression = self.cfg.nexenta_dataset_compression self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default', 'configure').AndReturn({ 'nfs_server_versmax': { 'current': u'3'}}) self.nms_mock.netsvc.get_confopts.return_value = { 'nfs_server_versmax': {'current': 4}} self.nms_mock._ensure_share_mounted.return_value = True self.drv._do_create_volume(volume) self.nms_mock.folder.create_with_props.assert_called_with( 'stack', 'share/volume-1', {'compression': compression}) self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, 'stack/share/volume-1', self.TEST_SHARE_OPTS) mock_chmod = self.nms_mock.appliance.execute mock_chmod.assert_called_with( 'chmod ugo+rw /volumes/stack/share/volume-1/volume') mock_truncate = self.nms_mock.appliance.execute mock_truncate.side_effect = exception.NexentaException() self.nms_mock.server.get_prop.return_value = '/volumes' self.assertRaises(exception.NexentaException, self.drv._do_create_volume, volume) def test_create_sparsed_file(self): self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'truncate --size 1G /tmp/path') def test_create_regular_file(self): self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'dd if=/dev/zero of=/tmp/path bs=1M count=1024') def test_set_rw_permissions_for_all(self): path = '/tmp/path' self.drv._set_rw_permissions_for_all(self.nms_mock, path) self.nms_mock.appliance.execute.assert_called_with( 'chmod ugo+rw %s' % path) def test_local_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.local_path(volume) self.assertEqual( '$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume', path ) def test_remote_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.remote_path(volume) self.assertEqual('/volumes/stack/share/volume-1/volume', path) def test_share_folder(self): self.drv._share_folder(self.nms_mock, 'stack', 'share/folder') path = 'stack/share/folder' self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, path, self.TEST_SHARE_OPTS) def test_load_shares_config(self): self.drv.configuration.nfs_shares_config = ( self.TEST_SHARES_CONFIG_FILE) config_data = [ '%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1), '# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2), '', '%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2, self.TEST_EXPORT2_OPTIONS) ] with mock.patch.object(self.drv, '_read_config_file') as \ mock_read_config_file: mock_read_config_file.return_value = config_data self.drv._load_shares_config( self.drv.configuration.nfs_shares_config) self.assertIn(self.TEST_EXPORT1, self.drv.shares) self.assertIn(self.TEST_EXPORT2, self.drv.shares) self.assertEqual(2, len(self.drv.shares)) self.assertIn(self.TEST_EXPORT1, self.drv.share2nms) self.assertIn(self.TEST_EXPORT2, self.drv.share2nms) self.assertEqual(2, len(self.drv.share2nms.keys())) self.assertEqual(self.TEST_EXPORT2_OPTIONS, self.drv.shares[self.TEST_EXPORT2]) def test_get_capacity_info(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = { 'available': '1G', 'used': '2G' } total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1) self.assertEqual(3 * units.Gi, total) self.assertEqual(units.Gi, free) self.assertEqual(2 * units.Gi, allocated) def test_get_share_datasets(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' volume_name, folder_name = ( self.drv._get_share_datasets(self.TEST_EXPORT1)) self.assertEqual('stack', volume_name) self.assertEqual('share', folder_name) def test_delete_snapshot(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.nms_mock.server.get_prop.return_value = '/volumes' self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) self.nms_mock.snapshot.destroy.assert_called_with( 'stack/share/volume-1@snapshot1', '') def test_delete_volume(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.drv._ensure_share_mounted = lambda *_, **__: 0 self.drv._execute = lambda *_, **__: 0 self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = None self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) self.nms_mock.folder.destroy.assert_called_with( 'stack/share/volume-1', '-r') # Check that exception not raised if folder does not exist on # NexentaStor appliance. mock = self.nms_mock.folder.destroy mock.side_effect = exception.NexentaException('Folder does not exist') self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) class TestNexentaUtils(test.TestCase): def test_str2size(self): values_to_test = ( # Test empty value (None, 0), ('', 0), ('0', 0), ('12', 12), # Test int values (10, 10), # Test bytes string ('1b', 1), ('1B', 1), ('1023b', 1023), ('0B', 0), # Test other units ('1M', units.Mi), ('1.0M', units.Mi), ) for value, result in values_to_test: self.assertEqual(result, utils.str2size(value)) # Invalid format value self.assertRaises(ValueError, utils.str2size, 'A') def test_str2gib_size(self): self.assertEqual(1, utils.str2gib_size('1024M')) self.assertEqual(300 * units.Mi // units.Gi, utils.str2gib_size('300M')) self.assertEqual(1.2 * units.Ti // units.Gi, utils.str2gib_size('1.2T')) self.assertRaises(ValueError, utils.str2gib_size, 'A') def test_parse_nms_url(self): urls = ( ('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '2000', '/rest/nms/')), ('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '8080', '/rest/nms/')), ('https://root:password@192.168.1.1:8080', (False, 'https', 'root', 'password', '192.168.1.1', '8080', '/rest/nms/')), ) for url, result in urls: self.assertEqual(result, utils.parse_nms_url(url)) cinder-8.0.0/cinder/tests/unit/windows/0000775000567000056710000000000012701406543021205 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/windows/test_windows_remotefs.py0000664000567000056710000000642112701406250026212 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import exception from cinder import test from cinder.volume.drivers.windows import remotefs class WindowsRemoteFsTestCase(test.TestCase): def setUp(self): super(WindowsRemoteFsTestCase, self).setUp() with mock.patch.object(remotefs.WindowsRemoteFsClient, '__init__', lambda x: None): self._remotefs = remotefs.WindowsRemoteFsClient() self._remotefs._mount_base = mock.sentinel.mnt_base self._remotefs._smbutils = mock.Mock() self._remotefs._pathutils = mock.Mock() @mock.patch('os.path.isdir') @mock.patch('os.makedirs') @mock.patch('os.path.exists') @mock.patch('os.path.abspath') @mock.patch.object(remotefs.WindowsRemoteFsClient, 'get_mount_point') def _test_mount_share(self, mock_get_mnt_point, mock_abspath, mock_path_exists, mock_makedirs, mock_isdir, mnt_point_exists=False, is_mnt_point_slink=True): mount_options = dict(username=mock.sentinel.username, password=mock.sentinel.password) mock_isdir.return_value = False mock_get_mnt_point.return_value = mock.sentinel.mnt_point mock_abspath.return_value = mock.sentinel.norm_export_path mock_path_exists.return_value = mnt_point_exists self._remotefs._pathutils.is_symlink.return_value = is_mnt_point_slink self._remotefs._smbutils.check_smb_mapping.return_value = False if mnt_point_exists and not is_mnt_point_slink: self.assertRaises(exception.SmbfsException, self._remotefs.mount, mock.sentinel.export_path, mount_options) else: self._remotefs.mount(mock.sentinel.export_path, mount_options) mock_makedirs.assert_called_once_with(mock.sentinel.mnt_base) mock_get_mnt_point.assert_called_once_with(mock.sentinel.export_path) self._remotefs._smbutils.check_smb_mapping.assert_called_once_with( mock.sentinel.norm_export_path, remove_unavailable_mapping=True) self._remotefs._smbutils.mount_smb_share.assert_called_once_with( mock.sentinel.norm_export_path, **mount_options) if not mnt_point_exists: self._remotefs._pathutils.create_sym_link.assert_called_once_with( mock.sentinel.mnt_point, mock.sentinel.norm_export_path) def test_mount_share(self): self._test_mount_share() def test_mount_share_existing_mnt_point_not_symlink(self): self._test_mount_share(mnt_point_exists=True, is_mnt_point_slink=False) cinder-8.0.0/cinder/tests/unit/windows/test_windows.py0000664000567000056710000004352612701406250024315 0ustar jenkinsjenkins00000000000000# Copyright 2012 Pedro Navarro Perez # Copyright 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Windows Server 2012 OpenStack Cinder volume driver """ import mock import os from oslo_utils import fileutils from oslo_utils import units from cinder.image import image_utils from cinder import test from cinder.tests.unit.windows import db_fakes from cinder.volume import configuration as conf from cinder.volume.drivers.windows import windows class TestWindowsDriver(test.TestCase): @mock.patch.object(windows, 'utilsfactory') def setUp(self, mock_utilsfactory): super(TestWindowsDriver, self).setUp() configuration = conf.Configuration(None) configuration.append_config_values(windows.windows_opts) self.flags(windows_iscsi_lun_path=mock.sentinel.iscsi_lun_path) self.flags(image_conversion_dir=mock.sentinel.image_conversion_dir) self._driver = windows.WindowsDriver(configuration=configuration) @mock.patch.object(fileutils, 'ensure_tree') def test_do_setup(self, mock_ensure_tree): self._driver.do_setup(mock.sentinel.context) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.iscsi_lun_path), mock.call(mock.sentinel.image_conversion_dir)]) def test_check_for_setup_error(self): self._driver.check_for_setup_error() self._driver._tgt_utils.get_portal_locations.assert_called_once_with( available_only=True, fail_if_none_found=True) @mock.patch.object(windows.WindowsDriver, '_get_target_name') def test_get_host_information(self, mock_get_target_name): tgt_utils = self._driver._tgt_utils fake_auth_meth = 'CHAP' fake_chap_username = 'fake_chap_username' fake_chap_password = 'fake_chap_password' fake_host_info = {'fake_prop': 'fake_value'} fake_volume = db_fakes.get_fake_volume_info() fake_volume['provider_auth'] = "%s %s %s" % (fake_auth_meth, fake_chap_username, fake_chap_password) mock_get_target_name.return_value = mock.sentinel.target_name tgt_utils.get_portal_locations.return_value = [ mock.sentinel.portal_location] tgt_utils.get_target_information.return_value = fake_host_info expected_host_info = dict(fake_host_info, auth_method=fake_auth_meth, auth_username=fake_chap_username, auth_password=fake_chap_password, target_discovered=False, target_portal=mock.sentinel.portal_location, target_lun=0, volume_id=fake_volume['id']) host_info = self._driver._get_host_information(fake_volume) self.assertEqual(expected_host_info, host_info) mock_get_target_name.assert_called_once_with(fake_volume) tgt_utils.get_portal_locations.assert_called_once_with() tgt_utils.get_target_information.assert_called_once_with( mock.sentinel.target_name) @mock.patch.object(windows.WindowsDriver, '_get_host_information') def test_initialize_connection(self, mock_get_host_info): tgt_utils = self._driver._tgt_utils fake_volume = db_fakes.get_fake_volume_info() fake_initiator = db_fakes.get_fake_connector_info() fake_host_info = {'fake_host_prop': 'fake_value'} mock_get_host_info.return_value = fake_host_info expected_conn_info = {'driver_volume_type': 'iscsi', 'data': fake_host_info} conn_info = self._driver.initialize_connection(fake_volume, fake_initiator) self.assertEqual(expected_conn_info, conn_info) mock_associate = tgt_utils.associate_initiator_with_iscsi_target mock_associate.assert_called_once_with( fake_initiator['initiator'], fake_volume['provider_location']) def test_terminate_connection(self): fake_volume = db_fakes.get_fake_volume_info() fake_initiator = db_fakes.get_fake_connector_info() self._driver.terminate_connection(fake_volume, fake_initiator) self._driver._tgt_utils.deassociate_initiator.assert_called_once_with( fake_initiator['initiator'], fake_volume['provider_location']) @mock.patch.object(windows.WindowsDriver, 'local_path') def test_create_volume(self, mock_local_path): fake_volume = db_fakes.get_fake_volume_info() self._driver.create_volume(fake_volume) mock_local_path.assert_called_once_with(fake_volume) self._driver._tgt_utils.create_wt_disk.assert_called_once_with( mock_local_path.return_value, fake_volume['name'], size_mb=fake_volume['size'] * 1024) def test_local_path(self): fake_volume = db_fakes.get_fake_volume_info() fake_lun_path = 'fake_lun_path' self.flags(windows_iscsi_lun_path=fake_lun_path) disk_format = 'vhd' mock_get_fmt = self._driver._tgt_utils.get_supported_disk_format mock_get_fmt.return_value = disk_format disk_path = self._driver.local_path(fake_volume) expected_fname = "%s.%s" % (fake_volume['name'], disk_format) expected_disk_path = os.path.join(fake_lun_path, expected_fname) self.assertEqual(expected_disk_path, disk_path) mock_get_fmt.assert_called_once_with() @mock.patch.object(windows.WindowsDriver, 'local_path') @mock.patch.object(fileutils, 'delete_if_exists') def test_delete_volume(self, mock_delete_if_exists, mock_local_path): fake_volume = db_fakes.get_fake_volume_info() self._driver.delete_volume(fake_volume) mock_local_path.assert_called_once_with(fake_volume) self._driver._tgt_utils.remove_wt_disk.assert_called_once_with( fake_volume['name']) mock_delete_if_exists.assert_called_once_with( mock_local_path.return_value) def test_create_snapshot(self): fake_snapshot = db_fakes.get_fake_snapshot_info() self._driver.create_snapshot(fake_snapshot) self._driver._tgt_utils.create_snapshot.assert_called_once_with( fake_snapshot['volume_name'], fake_snapshot['name']) @mock.patch.object(windows.WindowsDriver, 'local_path') def test_create_volume_from_snapshot(self, mock_local_path): fake_volume = db_fakes.get_fake_volume_info() fake_snapshot = db_fakes.get_fake_snapshot_info() self._driver.create_volume_from_snapshot(fake_volume, fake_snapshot) self._driver._tgt_utils.export_snapshot.assert_called_once_with( fake_snapshot['name'], mock_local_path.return_value) self._driver._tgt_utils.import_wt_disk.assert_called_once_with( mock_local_path.return_value, fake_volume['name']) def test_delete_snapshot(self): fake_snapshot = db_fakes.get_fake_snapshot_info() self._driver.delete_snapshot(fake_snapshot) self._driver._tgt_utils.delete_snapshot.assert_called_once_with( fake_snapshot['name']) def test_get_target_name(self): fake_volume = db_fakes.get_fake_volume_info() expected_target_name = "%s%s" % ( self._driver.configuration.iscsi_target_prefix, fake_volume['name']) target_name = self._driver._get_target_name(fake_volume) self.assertEqual(expected_target_name, target_name) @mock.patch.object(windows.WindowsDriver, '_get_target_name') @mock.patch.object(windows.utils, 'generate_username') @mock.patch.object(windows.utils, 'generate_password') def test_create_export(self, mock_generate_password, mock_generate_username, mock_get_target_name): tgt_utils = self._driver._tgt_utils fake_volume = db_fakes.get_fake_volume_info() self._driver.configuration.chap_username = None self._driver.configuration.chap_password = None self._driver.configuration.use_chap_auth = True fake_chap_username = 'fake_chap_username' fake_chap_password = 'fake_chap_password' mock_get_target_name.return_value = mock.sentinel.target_name mock_generate_username.return_value = fake_chap_username mock_generate_password.return_value = fake_chap_password tgt_utils.iscsi_target_exists.return_value = False vol_updates = self._driver.create_export(mock.sentinel.context, fake_volume, mock.sentinel.connector) mock_get_target_name.assert_called_once_with(fake_volume) tgt_utils.iscsi_target_exists.assert_called_once_with( mock.sentinel.target_name) tgt_utils.set_chap_credentials.assert_called_once_with( mock.sentinel.target_name, fake_chap_username, fake_chap_password) tgt_utils.add_disk_to_target.assert_called_once_with( fake_volume['name'], mock.sentinel.target_name) expected_provider_auth = ' '.join(('CHAP', fake_chap_username, fake_chap_password)) expected_vol_updates = dict( provider_location=mock.sentinel.target_name, provider_auth=expected_provider_auth) self.assertEqual(expected_vol_updates, vol_updates) @mock.patch.object(windows.WindowsDriver, '_get_target_name') def test_remove_export(self, mock_get_target_name): fake_volume = db_fakes.get_fake_volume_info() self._driver.remove_export(mock.sentinel.context, fake_volume) mock_get_target_name.assert_called_once_with(fake_volume) self._driver._tgt_utils.delete_iscsi_target.assert_called_once_with( mock_get_target_name.return_value) @mock.patch.object(windows.WindowsDriver, 'local_path') @mock.patch.object(image_utils, 'temporary_file') @mock.patch.object(image_utils, 'fetch_to_vhd') @mock.patch('os.unlink') def test_copy_image_to_volume(self, mock_unlink, mock_fetch_to_vhd, mock_tmp_file, mock_local_path): tgt_utils = self._driver._tgt_utils fake_volume = db_fakes.get_fake_volume_info() mock_tmp_file.return_value.__enter__.return_value = ( mock.sentinel.tmp_vhd_path) mock_local_path.return_value = mock.sentinel.vol_vhd_path self._driver.copy_image_to_volume(mock.sentinel.context, fake_volume, mock.sentinel.image_service, mock.sentinel.image_id) mock_local_path.assert_called_once_with(fake_volume) mock_tmp_file.assert_called_once_with(suffix='.vhd') image_utils.fetch_to_vhd.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, mock.sentinel.image_id, mock.sentinel.tmp_vhd_path, self._driver.configuration.volume_dd_blocksize) mock_unlink.assert_called_once_with(mock.sentinel.vol_vhd_path) self._driver._vhdutils.convert_vhd.assert_called_once_with( mock.sentinel.tmp_vhd_path, mock.sentinel.vol_vhd_path, tgt_utils.get_supported_vhd_type.return_value) self._driver._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.vol_vhd_path, fake_volume['size'] * units.Gi, is_file_max_size=False) tgt_utils.change_wt_disk_status.assert_has_calls( [mock.call(fake_volume['name'], enabled=False), mock.call(fake_volume['name'], enabled=True)]) @mock.patch.object(windows.uuidutils, 'generate_uuid') def test_temporary_snapshot(self, mock_generate_uuid): tgt_utils = self._driver._tgt_utils mock_generate_uuid.return_value = mock.sentinel.snap_uuid expected_snap_name = '%s-tmp-snapshot-%s' % ( mock.sentinel.volume_name, mock.sentinel.snap_uuid) with self._driver._temporary_snapshot( mock.sentinel.volume_name) as snap_name: self.assertEqual(expected_snap_name, snap_name) tgt_utils.create_snapshot.assert_called_once_with( mock.sentinel.volume_name, expected_snap_name) tgt_utils.delete_snapshot.assert_called_once_with( expected_snap_name) @mock.patch.object(windows.WindowsDriver, '_temporary_snapshot') @mock.patch.object(image_utils, 'upload_volume') @mock.patch.object(fileutils, 'delete_if_exists') def test_copy_volume_to_image(self, mock_delete_if_exists, mock_upload_volume, mock_tmp_snap): tgt_utils = self._driver._tgt_utils disk_format = 'vhd' fake_image_meta = db_fakes.get_fake_image_meta() fake_volume = db_fakes.get_fake_volume_info() fake_img_conv_dir = 'fake_img_conv_dir' self._driver.configuration.image_conversion_dir = fake_img_conv_dir tgt_utils.get_supported_disk_format.return_value = disk_format mock_tmp_snap.return_value.__enter__.return_value = ( mock.sentinel.tmp_snap_name) expected_tmp_vhd_path = os.path.join( fake_img_conv_dir, fake_image_meta['id'] + '.' + disk_format) self._driver.copy_volume_to_image( mock.sentinel.context, fake_volume, mock.sentinel.image_service, fake_image_meta) mock_tmp_snap.assert_called_once_with(fake_volume['name']) tgt_utils.export_snapshot.assert_called_once_with( mock.sentinel.tmp_snap_name, expected_tmp_vhd_path) mock_upload_volume.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, expected_tmp_vhd_path, 'vhd') mock_delete_if_exists.assert_called_once_with( expected_tmp_vhd_path) @mock.patch.object(windows.WindowsDriver, '_temporary_snapshot') @mock.patch.object(windows.WindowsDriver, 'local_path') def test_create_cloned_volume(self, mock_local_path, mock_tmp_snap): tgt_utils = self._driver._tgt_utils fake_volume = db_fakes.get_fake_volume_info() fake_src_volume = db_fakes.get_fake_volume_info_cloned() mock_tmp_snap.return_value.__enter__.return_value = ( mock.sentinel.tmp_snap_name) mock_local_path.return_value = mock.sentinel.vol_vhd_path self._driver.create_cloned_volume(fake_volume, fake_src_volume) mock_tmp_snap.assert_called_once_with(fake_src_volume['name']) tgt_utils.export_snapshot.assert_called_once_with( mock.sentinel.tmp_snap_name, mock.sentinel.vol_vhd_path) self._driver._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.vol_vhd_path, fake_volume['size'] * units.Gi, is_file_max_size=False) tgt_utils.import_wt_disk.assert_called_once_with( mock.sentinel.vol_vhd_path, fake_volume['name']) @mock.patch('os.path.splitdrive') def test_get_capacity_info(self, mock_splitdrive): mock_splitdrive.return_value = (mock.sentinel.drive, mock.sentinel.path_tail) fake_size_gb = 2 fake_free_space_gb = 1 self._driver._hostutils.get_volume_info.return_value = ( fake_size_gb * units.Gi, fake_free_space_gb * units.Gi) total_gb, free_gb = self._driver._get_capacity_info() self.assertEqual(fake_size_gb, total_gb) self.assertEqual(fake_free_space_gb, free_gb) self._driver._hostutils.get_volume_info.assert_called_once_with( mock.sentinel.drive) mock_splitdrive.assert_called_once_with( mock.sentinel.iscsi_lun_path) @mock.patch.object(windows.WindowsDriver, '_get_capacity_info') def test_update_volume_stats(self, mock_get_capacity_info): mock_get_capacity_info.return_value = ( mock.sentinel.size_gb, mock.sentinel.free_space_gb) self.flags(volume_backend_name=mock.sentinel.backend_name) self.flags(reserved_percentage=mock.sentinel.reserved_percentage) expected_volume_stats = dict( volume_backend_name=mock.sentinel.backend_name, vendor_name='Microsoft', driver_version=self._driver.VERSION, storage_protocol='iSCSI', total_capacity_gb=mock.sentinel.size_gb, free_capacity_gb=mock.sentinel.free_space_gb, reserved_percentage=mock.sentinel.reserved_percentage, QoS_support=False) self._driver._update_volume_stats() self.assertEqual(expected_volume_stats, self._driver._stats) def test_extend_volume(self): fake_volume = db_fakes.get_fake_volume_info() new_size_gb = 2 expected_additional_sz_mb = 1024 self._driver.extend_volume(fake_volume, new_size_gb) self._driver._tgt_utils.extend_wt_disk.assert_called_once_with( fake_volume['name'], expected_additional_sz_mb) cinder-8.0.0/cinder/tests/unit/windows/db_fakes.py0000664000567000056710000000273312701406250023315 0ustar jenkinsjenkins00000000000000# Copyright 2012 Pedro Navarro Perez # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Stubouts, mocks and fixtures for windows volume test suite """ def get_fake_volume_info(): return {'name': 'volume_name', 'size': 1, 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name', 'id': 1, 'provider_auth': None} def get_fake_volume_info_cloned(): return {'name': 'volume_name_cloned', 'size': 1, 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name_cloned', 'id': 1, 'provider_auth': None} def get_fake_image_meta(): return {'id': '10958016-e196-42e3-9e7f-5d8927ae3099' } def get_fake_snapshot_info(): return {'name': 'snapshot_name', 'volume_name': 'volume_name', } def get_fake_connector_info(): return {'initiator': 'iqn.2010-10.org.openstack:' + 'volume_name', } cinder-8.0.0/cinder/tests/unit/windows/__init__.py0000664000567000056710000000000012701406250023277 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/unit/windows/test_smbfs.py0000664000567000056710000002501312701406250023724 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder import test from cinder.volume.drivers.windows import smbfs class WindowsSmbFsTestCase(test.TestCase): _FAKE_SHARE = '//1.2.3.4/share1' _FAKE_MNT_BASE = 'c:\openstack\mnt' _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash') _FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_SNAPSHOT_NAME = _FAKE_VOLUME_NAME + '-snapshot.vhdx' _FAKE_SNAPSHOT_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_SNAPSHOT_NAME) _FAKE_TOTAL_SIZE = '2048' _FAKE_TOTAL_AVAILABLE = '1024' _FAKE_TOTAL_ALLOCATED = 1024 _FAKE_VOLUME = {'id': 'e8d76af4-cbb9-4b70-8e9e-5a133f1a1a66', 'size': 1, 'provider_location': _FAKE_SHARE} _FAKE_SNAPSHOT = {'id': '35a23942-7625-4683-ad84-144b76e87a80', 'volume': _FAKE_VOLUME, 'volume_size': _FAKE_VOLUME['size']} _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME + '.vhdx') @mock.patch.object(smbfs, 'utilsfactory') @mock.patch.object(smbfs, 'remotefs') def setUp(self, mock_remotefs, mock_utilsfactory): super(WindowsSmbFsTestCase, self).setUp() self._smbfs_driver = smbfs.WindowsSmbfsDriver( configuration=mock.Mock()) self._smbfs_driver._delete = mock.Mock() self._smbfs_driver.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) def _test_create_volume(self, volume_exists=False, volume_format='vhdx'): self._smbfs_driver.create_dynamic_vhd = mock.MagicMock() fake_create = self._smbfs_driver._vhdutils.create_dynamic_vhd self._smbfs_driver.get_volume_format = mock.Mock( return_value=volume_format) with mock.patch('os.path.exists', new=lambda x: volume_exists): if volume_exists or volume_format not in ('vhd', 'vhdx'): self.assertRaises(exception.InvalidVolume, self._smbfs_driver._do_create_volume, self._FAKE_VOLUME) else: fake_vol_path = self._FAKE_VOLUME_PATH self._smbfs_driver._do_create_volume(self._FAKE_VOLUME) fake_create.assert_called_once_with( fake_vol_path, self._FAKE_VOLUME['size'] << 30) def test_create_volume(self): self._test_create_volume() def test_create_existing_volume(self): self._test_create_volume(True) def test_create_volume_invalid_volume(self): self._test_create_volume(volume_format="qcow") def test_get_capacity_info(self): self._smbfs_driver._smbutils.get_share_capacity_info.return_value = ( self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE) self._smbfs_driver._get_total_allocated = mock.Mock( return_value=self._FAKE_TOTAL_ALLOCATED) ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE) expected_ret_val = [int(x) for x in [self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE, self._FAKE_TOTAL_ALLOCATED]] self.assertEqual(expected_ret_val, ret_val) def _test_get_img_info(self, backing_file=None): self._smbfs_driver._vhdutils.get_vhd_parent_path.return_value = ( backing_file) image_info = self._smbfs_driver._qemu_img_info(self._FAKE_VOLUME_PATH) self.assertEqual(self._FAKE_VOLUME_NAME + '.vhdx', image_info.image) backing_file_name = backing_file and os.path.basename(backing_file) self.assertEqual(backing_file_name, image_info.backing_file) def test_get_img_info_without_backing_file(self): self._test_get_img_info() def test_get_snapshot_info(self): self._test_get_img_info(self._FAKE_VOLUME_PATH) def test_create_snapshot(self): self._smbfs_driver._vhdutils.create_differencing_vhd = ( mock.Mock()) self._smbfs_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) fake_create_diff = ( self._smbfs_driver._vhdutils.create_differencing_vhd) self._smbfs_driver._do_create_snapshot( self._FAKE_SNAPSHOT, os.path.basename(self._FAKE_VOLUME_PATH), self._FAKE_SNAPSHOT_PATH) fake_create_diff.assert_called_once_with(self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH) def _test_copy_volume_to_image(self, has_parent=False, volume_format='vhd'): drv = self._smbfs_driver fake_image_meta = {'id': 'fake-image-id'} if has_parent: fake_volume_path = self._FAKE_SNAPSHOT_PATH fake_parent_path = self._FAKE_VOLUME_PATH else: fake_volume_path = self._FAKE_VOLUME_PATH fake_parent_path = None if volume_format == drv._DISK_FORMAT_VHD: fake_volume_path = fake_volume_path[:-1] fake_active_image = os.path.basename(fake_volume_path) drv.get_active_image_from_info = mock.Mock( return_value=fake_active_image) drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_volume_format = mock.Mock( return_value=volume_format) drv._vhdutils.get_vhd_parent_path.return_value = ( fake_parent_path) with mock.patch.object(image_utils, 'upload_volume') as ( fake_upload_volume): drv.copy_volume_to_image( mock.sentinel.context, self._FAKE_VOLUME, mock.sentinel.image_service, fake_image_meta) expected_conversion = ( has_parent or volume_format == drv._DISK_FORMAT_VHDX) if expected_conversion: fake_temp_image_name = '%s.temp_image.%s.%s' % ( self._FAKE_VOLUME['id'], fake_image_meta['id'], drv._DISK_FORMAT_VHD) fake_temp_image_path = os.path.join( self._FAKE_MNT_POINT, fake_temp_image_name) fake_active_image_path = os.path.join( self._FAKE_MNT_POINT, fake_active_image) upload_path = fake_temp_image_path drv._vhdutils.convert_vhd.assert_called_once_with( fake_active_image_path, fake_temp_image_path) drv._delete.assert_called_once_with( fake_temp_image_path) else: upload_path = fake_volume_path fake_upload_volume.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, upload_path, drv._DISK_FORMAT_VHD) def test_copy_volume_to_image_having_snapshot(self): self._test_copy_volume_to_image(has_parent=True) def test_copy_vhdx_volume_to_image(self): self._test_copy_volume_to_image(volume_format='vhdx') def test_copy_vhd_volume_to_image(self): self._test_copy_volume_to_image(volume_format='vhd') def test_copy_image_to_volume(self): drv = self._smbfs_driver drv.get_volume_format = mock.Mock( return_value=mock.sentinel.volume_format) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv.configuration = mock.MagicMock() drv.configuration.volume_dd_blocksize = mock.sentinel.block_size with mock.patch.object(image_utils, 'fetch_to_volume_format') as fake_fetch: drv.copy_image_to_volume( mock.sentinel.context, self._FAKE_VOLUME, mock.sentinel.image_service, mock.sentinel.image_id) fake_fetch.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, mock.sentinel.image_id, self._FAKE_VOLUME_PATH, mock.sentinel.volume_format, mock.sentinel.block_size) drv._vhdutils.resize_vhd.assert_called_once_with( self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'] * units.Gi) def test_copy_volume_from_snapshot(self): drv = self._smbfs_driver fake_volume_info = { self._FAKE_SNAPSHOT['id']: 'fake_snapshot_file_name'} fake_img_info = mock.MagicMock() fake_img_info.backing_file = self._FAKE_VOLUME_NAME + '.vhdx' drv._local_path_volume_info = mock.Mock( return_value=self._FAKE_VOLUME_PATH + '.info') drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv._read_info_file = mock.Mock( return_value=fake_volume_info) drv._qemu_img_info = mock.Mock( return_value=fake_img_info) drv.local_path = mock.Mock( return_value=mock.sentinel.new_volume_path) drv._copy_volume_from_snapshot( self._FAKE_SNAPSHOT, self._FAKE_VOLUME, self._FAKE_VOLUME['size']) drv._delete.assert_called_once_with(mock.sentinel.new_volume_path) drv._vhdutils.convert_vhd.assert_called_once_with( self._FAKE_VOLUME_PATH, mock.sentinel.new_volume_path) drv._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.new_volume_path, self._FAKE_VOLUME['size'] * units.Gi) def test_rebase_img(self): drv = self._smbfs_driver drv._rebase_img( self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_NAME + '.vhdx', 'vhdx') drv._vhdutils.reconnect_parent_vhd.assert_called_once_with( self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH) cinder-8.0.0/cinder/tests/unit/fake_constants.py0000664000567000056710000000425212701406257023074 0ustar jenkinsjenkins00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. attachment_id = '4dc3bb12-ad75-41b9-ab2c-7609e743e600' backup_id = '707844eb-6d8a-4ac1-8b98-618e1c0b3a3a' backup2_id = '40e8462a-c9d8-462f-a810-b732a1790535' backup3_id = '30ae7641-017e-4221-a642-855687c8bd71' cgsnapshot_id = '5e34cce3-bc97-46b7-a127-5cfb95ef445d' cgsnapshot2_id = '5c36d762-d6ba-4f04-bd07-88a298cc410a' cgsnapshot3_id = '5f392156-fc03-492a-9cb8-e46a7eedaf33' consistency_group_id = 'f18abf73-79ee-4f2b-8d4f-1c044148f117' image_id = 'e79161cd-5f9d-4007-8823-81a807a64332' object_id = 'd7c5b12f-d57d-4762-99ab-db5f62ae3569' object2_id = '51f5b8fa-c13c-48ba-8c9d-b470466cbc9c' object3_id = '7bf5ffa9-18a2-4b64-aab4-0798b53ee4e7' project_id = '89afd400-b646-4bbc-b12b-c0a4d63e5bd3' project2_id = '452ebfbc-55d9-402a-87af-65061916c24b' provider_id = '60087173-e899-470a-9e3a-ba4cffa3e3e3' snapshot_id = '253b2878-ec60-4793-ad19-e65496ec7aab' snapshot2_id = 'c02c44fa-5665-4a26-9e66-2ebaf25e5d2d' snapshot3_id = '454f9970-1e05-4193-a3ed-5c390c3faa18' user_id = 'c853ca26-e8ea-4797-8a52-ee124a013d0e' user2_id = '95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df' volume_id = '1e5177e7-95e5-4a0f-b170-e45f4b469f6a' volume2_id = '43a09914-e495-475f-b862-0bda3c8918e4' volume3_id = '1b1cf149-219c-44ac-aee3-13121a7f86a7' volume4_id = '904d4602-4301-4e9b-8df1-8133b51904e6' volume5_id = '17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' volume_name_id = 'ee73d33c-52ed-4cb7-a8a9-2687c1205c22' volume2_name_id = '63fbdd21-03bc-4309-b867-2893848f86af' volume_type_id = '4e9e6d23-eed0-426d-b90a-28f87a94b6fe' volume_type2_id = '23defc6f-6d21-4fb5-8d36-b887cbd5a19c' will_not_be_found_id = 'ce816f65-c5aa-46d6-bd62-5272752d584a' cinder-8.0.0/cinder/tests/unit/test_tegile.py0000664000567000056710000004323612701406250022400 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver Test for Tegile storage. """ import mock from cinder import context from cinder.exception import TegileAPIException from cinder import test from cinder.volume.drivers import tegile BASE_DRIVER = tegile.TegileIntelliFlashVolumeDriver ISCSI_DRIVER = tegile.TegileISCSIDriver FC_DRIVER = tegile.TegileFCDriver test_config = mock.Mock() test_config.san_ip = 'some-ip' test_config.san_login = 'some-user' test_config.san_password = 'some-password' test_config.san_is_local = True test_config.tegile_default_pool = 'random-pool' test_config.tegile_default_project = 'random-project' test_config.volume_backend_name = "unittest" test_volume = {'host': 'node#testPool', 'name': 'testvol', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', '_name_id': 'testvol', 'metadata': {'project': 'testProj'}, 'provider_location': None, 'size': 10} test_snapshot = {'name': 'testSnap', 'id': '07ae9978-5445-405e-8881-28f2adfee732', 'volume': {'host': 'node#testPool', 'size': 1, '_name_id': 'testvol' } } array_stats = {'total_capacity_gb': 4569.199686084874, 'free_capacity_gb': 4565.381390112452, 'pools': [{'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 911.812650680542, 'reserved_percentage': 0, 'pool_name': 'pyramid' }, {'total_capacity_gb': 2742.1996604874, 'QoS_support': False, 'free_capacity_gb': 2740.148867149747, 'reserved_percentage': 0, 'pool_name': 'cobalt' }, {'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 913.4198722839355, 'reserved_percentage': 0, 'pool_name': 'test' }] } class FakeTegileService(object): @staticmethod def send_api_request(method, params=None, request_type='post', api_service='v2', fine_logging=False): if method is 'createVolume': return '' elif method is 'deleteVolume': return '' elif method is 'createVolumeSnapshot': return '' elif method is 'deleteVolumeSnapshot': return '' elif method is 'cloneVolumeSnapshot': return '' elif method is 'listPools': return '' elif method is 'resizeVolume': return '' elif method is 'getVolumeSizeinGB': return 25 elif method is 'getISCSIMappingForVolume': return {'target_lun': '27', 'target_iqn': 'iqn.2012-02.com.tegile:openstack-cobalt', 'target_portal': '10.68.103.106:3260' } elif method is 'getFCPortsForVolume': return {'target_lun': '12', 'initiator_target_map': '{"21000024ff59bb6e":["21000024ff578701",],' '"21000024ff59bb6f":["21000024ff578700",],}', 'target_wwn': '["21000024ff578700","21000024ff578701",]'} elif method is 'getArrayStats': return array_stats fake_tegile_backend = FakeTegileService() class FakeTegileServiceFail(object): @staticmethod def send_api_request(method, params=None, request_type='post', api_service='v2', fine_logging=False): raise TegileAPIException fake_tegile_backend_fail = FakeTegileServiceFail() class TegileIntelliFlashVolumeDriverTestCase(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() self.configuration = test_config super(TegileIntelliFlashVolumeDriverTestCase, self).setUp() def test_create_volume(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({ 'metadata': {'pool': 'testPool', 'project': test_config.tegile_default_project } }, tegile_driver.create_volume(test_volume)) def test_create_volume_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.create_volume, test_volume) def test_delete_volume(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): tegile_driver.delete_volume(test_volume) def test_delete_volume_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.delete_volume, test_volume) def test_create_snapshot(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): tegile_driver.create_snapshot(test_snapshot) def test_create_snapshot_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.create_snapshot, test_snapshot) def test_delete_snapshot(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): tegile_driver.delete_snapshot(test_snapshot) def test_delete_snapshot_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.delete_snapshot, test_snapshot) def test_create_volume_from_snapshot(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({ 'metadata': {'pool': 'testPool', 'project': test_config.tegile_default_project } }, tegile_driver.create_volume_from_snapshot(test_volume, test_snapshot)) def test_create_volume_from_snapshot_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.create_volume_from_snapshot, test_volume, test_snapshot) def test_create_cloned_volume(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({'metadata': {'project': 'testProj', 'pool': 'testPool'}}, tegile_driver.create_cloned_volume(test_volume, test_volume)) def test_create_cloned_volume_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.create_cloned_volume, test_volume, test_volume) def test_get_volume_stats(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({'driver_version': '1.0.0', 'free_capacity_gb': 4565.381390112452, 'pools': [{'QoS_support': False, 'allocated_capacity_gb': 0.0, 'free_capacity_gb': 911.812650680542, 'pool_name': 'pyramid', 'reserved_percentage': 0, 'total_capacity_gb': 913.5}, {'QoS_support': False, 'allocated_capacity_gb': 0.0, 'free_capacity_gb': 2740.148867149747, 'pool_name': 'cobalt', 'reserved_percentage': 0, 'total_capacity_gb': 2742.1996604874}, {'QoS_support': False, 'allocated_capacity_gb': 0.0, 'free_capacity_gb': 913.4198722839355, 'pool_name': 'test', 'reserved_percentage': 0, 'total_capacity_gb': 913.5}], 'storage_protocol': 'iSCSI', 'total_capacity_gb': 4569.199686084874, 'vendor_name': 'Tegile Systems Inc.', 'volume_backend_name': 'unittest'}, tegile_driver.get_volume_stats(True)) def test_get_pool(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual('testPool', tegile_driver.get_pool(test_volume)) def test_extend_volume(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): tegile_driver.extend_volume(test_volume, 12) def test_extend_volume_fail(self): tegile_driver = self.get_object(self.configuration) with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.extend_volume, test_volume, 30) def test_manage_existing(self): tegile_driver = self.get_object(self.configuration) existing_ref = {'name': 'existingvol'} with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({'metadata': {'pool': 'testPool', 'project': 'testProj' }, '_name_id': ('existingvol',) }, tegile_driver.manage_existing(test_volume, existing_ref)) def test_manage_existing_get_size(self): tegile_driver = self.get_object(self.configuration) existing_ref = {'name': 'existingvol'} with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual(25, tegile_driver.manage_existing_get_size( test_volume, existing_ref)) def test_manage_existing_get_size_fail(self): tegile_driver = self.get_object(self.configuration) existing_ref = {'name': 'existingvol'} with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend_fail): self.assertRaises(TegileAPIException, tegile_driver.manage_existing_get_size, test_volume, existing_ref) def get_object(self, configuration): class TegileBaseDriver(BASE_DRIVER): def initialize_connection(self, volume, connector, **kwargs): pass def terminate_connection(self, volume, connector, force=False, **kwargs): pass return TegileBaseDriver(configuration=self.configuration) class TegileISCSIDriverTestCase(test.TestCase): def setUp(self): super(TegileISCSIDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.configuration = test_config self.configuration.chap_username = 'fake' self.configuration.chap_password = "test" def test_initialize_connection(self): tegile_driver = self.get_object(self.configuration) connector = {'initiator': 'iqn.1993-08.org.debian:01:d0bb9a834f8'} with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual( {'data': {'auth_method': 'CHAP', 'discard': False, 'target_discovered': (False,), 'auth_password': 'test', 'auth_username': 'fake', 'target_iqn': 'iqn.2012-02.' 'com.tegile:openstack-cobalt', 'target_lun': '27', 'target_portal': '10.68.103.106:3260', 'volume_id': ( 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e',)}, 'driver_volume_type': 'iscsi'}, tegile_driver.initialize_connection(test_volume, connector)) def get_object(self, configuration): return ISCSI_DRIVER(configuration=configuration) class TegileFCDriverTestCase(test.TestCase): def setUp(self): super(TegileFCDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.configuration = test_config def test_initialize_connection(self): tegile_driver = self.get_object(self.configuration) connector = {'wwpns': ['500110a0001a3990']} with mock.patch.object(tegile_driver, '_api_executor', fake_tegile_backend): self.assertEqual({'data': {'encrypted': False, 'initiator_target_map': { '21000024ff59bb6e': ['21000024ff578701'], '21000024ff59bb6f': ['21000024ff578700'] }, 'target_discovered': False, 'target_lun': '12', 'target_wwn': ['21000024ff578700', '21000024ff578701']}, 'driver_volume_type': 'fibre_channel'}, tegile_driver.initialize_connection( test_volume, connector)) def get_object(self, configuration): return FC_DRIVER(configuration=configuration) cinder-8.0.0/cinder/tests/unit/test_exception.py0000664000567000056710000001077312701406250023125 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder import test import mock import six import webob.util class CinderExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeCinderException(exception.CinderException): message = "default message" exc = FakeCinderException() self.assertEqual('default message', six.text_type(exc)) def test_error_msg(self): self.assertEqual('test', six.text_type(exception.CinderException('test'))) def test_default_error_msg_with_kwargs(self): class FakeCinderException(exception.CinderException): message = "default message: %(code)s" exc = FakeCinderException(code=500) self.assertEqual('default message: 500', six.text_type(exc)) def test_error_msg_exception_with_kwargs(self): # NOTE(dprince): disable format errors for this test self.flags(fatal_exception_format_errors=False) class FakeCinderException(exception.CinderException): message = "default message: %(misspelled_code)s" exc = FakeCinderException(code=500) self.assertEqual('default message: %(misspelled_code)s', six.text_type(exc)) def test_default_error_code(self): class FakeCinderException(exception.CinderException): code = 404 exc = FakeCinderException() self.assertEqual(404, exc.kwargs['code']) def test_error_code_from_kwarg(self): class FakeCinderException(exception.CinderException): code = 500 exc = FakeCinderException(code=404) self.assertEqual(404, exc.kwargs['code']) def test_error_msg_is_exception_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.CinderException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_kwargs_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.CinderException(kwarg1=exc1) self.assertEqual(msg, exc2.kwargs['kwarg1']) def test_message_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'FakeCinderException: %(message)s' exc = FakeCinderException(message='message') self.assertEqual('FakeCinderException: message', six.text_type(exc)) def test_message_and_kwarg_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'Error %(code)d: %(message)s' exc = FakeCinderException(message='message', code=404) self.assertEqual('Error 404: message', six.text_type(exc)) def test_message_is_exception_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'Exception: %(message)s' msg = 'test message' exc1 = Exception(msg) exc2 = FakeCinderException(message=exc1) self.assertEqual('Exception: test message', six.text_type(exc2)) class CinderConvertedExceptionTestCase(test.TestCase): def test_default_args(self): exc = exception.ConvertedException() self.assertNotEqual('', exc.title) self.assertEqual(500, exc.code) self.assertEqual('', exc.explanation) def test_standard_status_code(self): with mock.patch.dict(webob.util.status_reasons, {200: 'reason'}): exc = exception.ConvertedException(code=200) self.assertEqual('reason', exc.title) @mock.patch.dict(webob.util.status_reasons, {500: 'reason'}) def test_generic_status_code(self): with mock.patch.dict(webob.util.status_generic_reasons, {5: 'generic_reason'}): exc = exception.ConvertedException(code=599) self.assertEqual('generic_reason', exc.title) cinder-8.0.0/cinder/tests/unit/test_vzstorage.py0000664000567000056710000002722712701406250023155 0ustar jenkinsjenkins00000000000000# Copyright 2015 Odin # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import os import mock from os_brick.remotefs import remotefs from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder import test from cinder.volume.drivers import vzstorage _orig_path_exists = os.path.exists class VZStorageTestCase(test.TestCase): _FAKE_SHARE = "10.0.0.1,10.0.0.2:/cluster123:123123" _FAKE_MNT_BASE = '/mnt' _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash') _FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': _FAKE_SHARE, 'name': _FAKE_VOLUME_NAME, 'status': 'available'} _FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba' _FAKE_SNAPSHOT_PATH = ( _FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID) _FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID, 'volume': _FAKE_VOLUME, 'status': 'available', 'volume_size': 1} _FAKE_VZ_CONFIG = mock.MagicMock() _FAKE_VZ_CONFIG.vzstorage_shares_config = '/fake/config/path' _FAKE_VZ_CONFIG.vzstorage_sparsed_volumes = False _FAKE_VZ_CONFIG.vzstorage_used_ratio = 0.7 _FAKE_VZ_CONFIG.vzstorage_mount_point_base = _FAKE_MNT_BASE _FAKE_VZ_CONFIG.nas_secure_file_operations = 'auto' _FAKE_VZ_CONFIG.nas_secure_file_permissions = 'auto' def setUp(self): super(VZStorageTestCase, self).setUp() self._remotefsclient = mock.patch.object(remotefs, 'RemoteFsClient').start() get_mount_point = mock.Mock(return_value=self._FAKE_MNT_POINT) self._remotefsclient.get_mount_point = get_mount_point cfg = copy.copy(self._FAKE_VZ_CONFIG) self._vz_driver = vzstorage.VZStorageDriver(configuration=cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE def _path_exists(self, path): if path.startswith(self._FAKE_VZ_CONFIG.vzstorage_shares_config): return True return _orig_path_exists(path) def _path_dont_exists(self, path): if path.startswith('/fake'): return False return _orig_path_exists(path) @mock.patch('os.path.exists') def test_setup_ok(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.do_setup(mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_missing_shares_conf(self, mock_exists): mock_exists.side_effect = self._path_dont_exists self.assertRaises(exception.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_usage_ratio(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.configuration.vzstorage_used_ratio = 1.2 self.assertRaises(exception.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_usage_ratio2(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.configuration.vzstorage_used_ratio = 0 self.assertRaises(exception.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_mount_point_base(self, mock_exists): mock_exists.side_effect = self._path_exists conf = copy.copy(self._FAKE_VZ_CONFIG) conf.vzstorage_mount_point_base = './tmp' vz_driver = vzstorage.VZStorageDriver(configuration=conf) self.assertRaises(exception.VzStorageException, vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_no_vzstorage(self, mock_exists): mock_exists.side_effect = self._path_exists exc = OSError() exc.errno = errno.ENOENT self._vz_driver._execute.side_effect = exc self.assertRaises(exception.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) def test_initialize_connection(self): drv = self._vz_driver file_format = 'raw' info = mock.Mock() info.file_format = file_format with mock.patch.object(drv, '_qemu_img_info', return_value=info): ret = drv.initialize_connection(self._FAKE_VOLUME, None) name = drv.get_active_image_from_info(self._FAKE_VOLUME) expected = {'driver_volume_type': 'vzstorage', 'data': {'export': self._FAKE_SHARE, 'format': file_format, 'name': name}, 'mount_point_base': self._FAKE_MNT_BASE} self.assertEqual(expected, ret) def test_ensure_share_mounted_invalid_share(self): self.assertRaises(exception.VzStorageException, self._vz_driver._ensure_share_mounted, ':') def test_ensure_share_mounted(self): drv = self._vz_driver share = self._FAKE_SHARE drv.shares = {'1': '["1", "2", "3"]', share: '["some", "options"]'} drv._ensure_share_mounted(share) def test_find_share(self): drv = self._vz_driver drv._mounted_shares = [self._FAKE_SHARE] with mock.patch.object(drv, '_is_share_eligible', return_value=True): ret = drv._find_share(1) self.assertEqual(self._FAKE_SHARE, ret) def test_find_share_no_shares_mounted(self): drv = self._vz_driver with mock.patch.object(drv, '_is_share_eligible', return_value=True): self.assertRaises(exception.VzStorageNoSharesMounted, drv._find_share, 1) def test_find_share_no_shares_suitable(self): drv = self._vz_driver drv._mounted_shares = [self._FAKE_SHARE] with mock.patch.object(drv, '_is_share_eligible', return_value=False): self.assertRaises(exception.VzStorageNoSuitableShareFound, drv._find_share, 1) def test_is_share_eligible_false(self): drv = self._vz_driver cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) with mock.patch.object(drv, '_get_capacity_info', return_value=cap_info): ret = drv._is_share_eligible(self._FAKE_SHARE, 50) self.assertFalse(ret) def test_is_share_eligible_true(self): drv = self._vz_driver cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) with mock.patch.object(drv, '_get_capacity_info', return_value=cap_info): ret = drv._is_share_eligible(self._FAKE_SHARE, 30) self.assertTrue(ret) @mock.patch.object(image_utils, 'resize_image') def test_extend_volume(self, mock_resize_image): drv = self._vz_driver drv._check_extend_volume_support = mock.Mock(return_value=True) drv._is_file_size_equal = mock.Mock(return_value=True) with mock.patch.object(drv, 'local_path', return_value=self._FAKE_VOLUME_PATH): drv.extend_volume(self._FAKE_VOLUME, 10) mock_resize_image.assert_called_once_with(self._FAKE_VOLUME_PATH, 10) def _test_check_extend_support(self, has_snapshots=False, is_eligible=True): drv = self._vz_driver drv.local_path = mock.Mock(return_value=self._FAKE_VOLUME_PATH) drv._is_share_eligible = mock.Mock(return_value=is_eligible) if has_snapshots: active = self._FAKE_SNAPSHOT_PATH else: active = self._FAKE_VOLUME_PATH drv.get_active_image_from_info = mock.Mock(return_value=active) if has_snapshots: self.assertRaises(exception.InvalidVolume, drv._check_extend_volume_support, self._FAKE_VOLUME, 2) elif not is_eligible: self.assertRaises(exception.ExtendVolumeError, drv._check_extend_volume_support, self._FAKE_VOLUME, 2) else: drv._check_extend_volume_support(self._FAKE_VOLUME, 2) drv._is_share_eligible.assert_called_once_with(self._FAKE_SHARE, 1) def test_check_extend_support(self): self._test_check_extend_support() def test_check_extend_volume_with_snapshots(self): self._test_check_extend_support(has_snapshots=True) def test_check_extend_volume_uneligible_share(self): self._test_check_extend_support(is_eligible=False) @mock.patch.object(image_utils, 'convert_image') def test_copy_volume_from_snapshot(self, mock_convert_image): drv = self._vz_driver fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'} fake_img_info = mock.MagicMock() fake_img_info.backing_file = self._FAKE_VOLUME_NAME drv.get_volume_format = mock.Mock(return_value='raw') drv._local_path_volume_info = mock.Mock( return_value=self._FAKE_VOLUME_PATH + '.info') drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv._read_info_file = mock.Mock( return_value=fake_volume_info) drv._qemu_img_info = mock.Mock( return_value=fake_img_info) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH[:-1]) drv._extend_volume = mock.Mock() drv._copy_volume_from_snapshot( self._FAKE_SNAPSHOT, self._FAKE_VOLUME, self._FAKE_VOLUME['size']) drv._extend_volume.assert_called_once_with( self._FAKE_VOLUME, self._FAKE_VOLUME['size']) mock_convert_image.assert_called_once_with( self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw') def test_delete_volume(self): drv = self._vz_driver fake_vol_info = self._FAKE_VOLUME_PATH + '.info' drv._ensure_share_mounted = mock.MagicMock() fake_ensure_mounted = drv._ensure_share_mounted drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) drv._delete = mock.Mock() drv._local_path_volume_info = mock.Mock( return_value=fake_vol_info) with mock.patch('os.path.exists', lambda x: True): drv.delete_volume(self._FAKE_VOLUME) fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) drv._delete.assert_any_call( self._FAKE_VOLUME_PATH) drv._delete.assert_any_call(fake_vol_info) cinder-8.0.0/cinder/tests/unit/test_remotefs.py0000664000567000056710000003524612701406250022755 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import mock from cinder import exception from cinder import test from cinder import utils from cinder.volume.drivers import remotefs class RemoteFsSnapDriverTestCase(test.TestCase): _FAKE_CONTEXT = 'fake_context' _FAKE_VOLUME_ID = '4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_VOLUME_NAME = 'volume-%s' % _FAKE_VOLUME_ID _FAKE_VOLUME = {'id': _FAKE_VOLUME_ID, 'size': 1, 'provider_location': 'fake_share', 'name': _FAKE_VOLUME_NAME, 'status': 'available'} _FAKE_MNT_POINT = '/mnt/fake_hash' _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) _FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba' _FAKE_SNAPSHOT = {'context': _FAKE_CONTEXT, 'id': _FAKE_SNAPSHOT_ID, 'volume': _FAKE_VOLUME, 'volume_id': _FAKE_VOLUME_ID, 'status': 'available', 'volume_size': 1} _FAKE_SNAPSHOT_PATH = (_FAKE_VOLUME_PATH + '.' + _FAKE_SNAPSHOT_ID) def setUp(self): super(RemoteFsSnapDriverTestCase, self).setUp() self._driver = remotefs.RemoteFSSnapDriver() self._driver._remotefsclient = mock.Mock() self._driver._execute = mock.Mock() self._driver._delete = mock.Mock() def _test_delete_snapshot(self, volume_in_use=False, stale_snapshot=False, is_active_image=True): # If the snapshot is not the active image, it is guaranteed that # another snapshot exists having it as backing file. fake_snapshot_name = os.path.basename(self._FAKE_SNAPSHOT_PATH) fake_info = {'active': fake_snapshot_name, self._FAKE_SNAPSHOT['id']: fake_snapshot_name} fake_snap_img_info = mock.Mock() fake_base_img_info = mock.Mock() if stale_snapshot: fake_snap_img_info.backing_file = None else: fake_snap_img_info.backing_file = self._FAKE_VOLUME_NAME fake_snap_img_info.file_format = 'qcow2' fake_base_img_info.backing_file = None fake_base_img_info.file_format = 'raw' self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._qemu_img_info = mock.Mock( side_effect=[fake_snap_img_info, fake_base_img_info]) self._driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._driver._read_info_file = mock.Mock() self._driver._write_info_file = mock.Mock() self._driver._img_commit = mock.Mock() self._driver._rebase_img = mock.Mock() self._driver._ensure_share_writable = mock.Mock() self._driver._delete_stale_snapshot = mock.Mock() self._driver._delete_snapshot_online = mock.Mock() expected_info = { 'active': fake_snapshot_name, self._FAKE_SNAPSHOT_ID: fake_snapshot_name } if volume_in_use: fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT) fake_snapshot['volume']['status'] = 'in-use' self._driver._read_info_file.return_value = fake_info self._driver._delete_snapshot(fake_snapshot) if stale_snapshot: self._driver._delete_stale_snapshot.assert_called_once_with( fake_snapshot) else: expected_online_delete_info = { 'active_file': fake_snapshot_name, 'snapshot_file': fake_snapshot_name, 'base_file': self._FAKE_VOLUME_NAME, 'base_id': None, 'new_base_file': None } self._driver._delete_snapshot_online.assert_called_once_with( self._FAKE_CONTEXT, fake_snapshot, expected_online_delete_info) elif is_active_image: self._driver._read_info_file.return_value = fake_info self._driver._delete_snapshot(self._FAKE_SNAPSHOT) self._driver._img_commit.assert_called_once_with( self._FAKE_SNAPSHOT_PATH) self.assertNotIn(self._FAKE_SNAPSHOT_ID, fake_info) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, fake_info) else: fake_upper_snap_id = 'fake_upper_snap_id' fake_upper_snap_path = ( self._FAKE_VOLUME_PATH + '-snapshot' + fake_upper_snap_id) fake_upper_snap_name = os.path.basename(fake_upper_snap_path) fake_backing_chain = [ {'filename': fake_upper_snap_name, 'backing-filename': fake_snapshot_name}, {'filename': fake_snapshot_name, 'backing-filename': self._FAKE_VOLUME_NAME}, {'filename': self._FAKE_VOLUME_NAME, 'backing-filename': None}] fake_info[fake_upper_snap_id] = fake_upper_snap_name fake_info[self._FAKE_SNAPSHOT_ID] = fake_snapshot_name fake_info['active'] = fake_upper_snap_name expected_info = copy.deepcopy(fake_info) del expected_info[self._FAKE_SNAPSHOT_ID] self._driver._read_info_file.return_value = fake_info self._driver._get_backing_chain_for_path = mock.Mock( return_value=fake_backing_chain) self._driver._delete_snapshot(self._FAKE_SNAPSHOT) self._driver._img_commit.assert_called_once_with( self._FAKE_SNAPSHOT_PATH) self._driver._rebase_img.assert_called_once_with( fake_upper_snap_path, self._FAKE_VOLUME_NAME, fake_base_img_info.file_format) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, expected_info) def test_delete_snapshot_when_active_file(self): self._test_delete_snapshot() def test_delete_snapshot_in_use(self): self._test_delete_snapshot(volume_in_use=True) def test_delete_snapshot_in_use_stale_snapshot(self): self._test_delete_snapshot(volume_in_use=True, stale_snapshot=True) def test_delete_snapshot_with_one_upper_file(self): self._test_delete_snapshot(is_active_image=False) def test_delete_stale_snapshot(self): fake_snapshot_name = os.path.basename(self._FAKE_SNAPSHOT_PATH) fake_snap_info = { 'active': self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_ID: fake_snapshot_name } expected_info = {'active': self._FAKE_VOLUME_NAME} self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._read_info_file = mock.Mock( return_value=fake_snap_info) self._driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._driver._write_info_file = mock.Mock() self._driver._delete_stale_snapshot(self._FAKE_SNAPSHOT) self._driver._delete.assert_called_once_with(self._FAKE_SNAPSHOT_PATH) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, expected_info) def test_do_create_snapshot(self): self._driver._local_volume_dir = mock.Mock( return_value=self._FAKE_VOLUME_PATH) fake_backing_path = os.path.join( self._driver._local_volume_dir(), self._FAKE_VOLUME_NAME) self._driver._execute = mock.Mock() self._driver._set_rw_permissions = mock.Mock() self._driver._qemu_img_info = mock.Mock( return_value=mock.Mock(file_format=mock.sentinel.backing_fmt)) self._driver._do_create_snapshot(self._FAKE_SNAPSHOT, self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_PATH) command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % fake_backing_path, self._FAKE_SNAPSHOT_PATH] command2 = ['qemu-img', 'rebase', '-u', '-b', self._FAKE_VOLUME_NAME, '-F', mock.sentinel.backing_fmt, self._FAKE_SNAPSHOT_PATH] self._driver._execute.assert_any_call(*command1, run_as_root=True) self._driver._execute.assert_any_call(*command2, run_as_root=True) def _test_create_snapshot(self, volume_in_use=False): fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT) fake_snapshot_info = {} fake_snapshot_file_name = os.path.basename(self._FAKE_SNAPSHOT_PATH) self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._read_info_file = mock.Mock( return_value=fake_snapshot_info) self._driver._do_create_snapshot = mock.Mock() self._driver._create_snapshot_online = mock.Mock() self._driver._write_info_file = mock.Mock() self._driver.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) self._driver._get_new_snap_path = mock.Mock( return_value=self._FAKE_SNAPSHOT_PATH) expected_snapshot_info = { 'active': fake_snapshot_file_name, self._FAKE_SNAPSHOT_ID: fake_snapshot_file_name } if volume_in_use: fake_snapshot['volume']['status'] = 'in-use' expected_method_called = '_create_snapshot_online' else: fake_snapshot['volume']['status'] = 'available' expected_method_called = '_do_create_snapshot' self._driver._create_snapshot(fake_snapshot) fake_method = getattr(self._driver, expected_method_called) fake_method.assert_called_with( fake_snapshot, self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_PATH) self._driver._write_info_file.assert_called_with( mock.sentinel.fake_info_path, expected_snapshot_info) def test_create_snapshot_volume_available(self): self._test_create_snapshot() def test_create_snapshot_volume_in_use(self): self._test_create_snapshot(volume_in_use=True) def test_create_snapshot_invalid_volume(self): fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT) fake_snapshot['volume']['status'] = 'error' self.assertRaises(exception.InvalidVolume, self._driver._create_snapshot, fake_snapshot) @mock.patch('cinder.db.snapshot_get') @mock.patch('time.sleep') def test_create_snapshot_online_with_concurrent_delete( self, mock_sleep, mock_snapshot_get): self._driver._nova = mock.Mock() # Test what happens when progress is so slow that someone # decides to delete the snapshot while the last known status is # "creating". mock_snapshot_get.side_effect = [ {'status': 'creating', 'progress': '42%'}, {'status': 'creating', 'progress': '45%'}, {'status': 'deleting'}, ] with mock.patch.object(self._driver, '_do_create_snapshot') as \ mock_do_create_snapshot: self.assertRaises(exception.RemoteFSConcurrentRequest, self._driver._create_snapshot_online, self._FAKE_SNAPSHOT, self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_PATH) mock_do_create_snapshot.assert_called_once_with( self._FAKE_SNAPSHOT, self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_PATH) self.assertEqual([mock.call(1), mock.call(1)], mock_sleep.call_args_list) self.assertEqual(3, mock_snapshot_get.call_count) mock_snapshot_get.assert_called_with(self._FAKE_SNAPSHOT['context'], self._FAKE_SNAPSHOT['id']) @mock.patch.object(utils, 'synchronized') def _locked_volume_operation_test_helper(self, mock_synchronized, func, expected_exception=False, *args, **kwargs): def mock_decorator(*args, **kwargs): def mock_inner(f): return f return mock_inner mock_synchronized.side_effect = mock_decorator expected_lock = '%s-%s' % (self._driver.driver_prefix, self._FAKE_VOLUME_ID) if expected_exception: self.assertRaises(expected_exception, func, self._driver, *args, **kwargs) else: ret_val = func(self._driver, *args, **kwargs) mock_synchronized.assert_called_with(expected_lock, external=False) self.assertEqual(mock.sentinel.ret_val, ret_val) def test_locked_volume_id_operation(self): mock_volume = {'id': self._FAKE_VOLUME_ID} @remotefs.locked_volume_id_operation def synchronized_func(inst, volume): return mock.sentinel.ret_val self._locked_volume_operation_test_helper(func=synchronized_func, volume=mock_volume) def test_locked_volume_id_snapshot_operation(self): mock_snapshot = {'volume': {'id': self._FAKE_VOLUME_ID}} @remotefs.locked_volume_id_operation def synchronized_func(inst, snapshot): return mock.sentinel.ret_val self._locked_volume_operation_test_helper(func=synchronized_func, snapshot=mock_snapshot) def test_locked_volume_id_operation_exception(self): @remotefs.locked_volume_id_operation def synchronized_func(inst): return mock.sentinel.ret_val self._locked_volume_operation_test_helper( func=synchronized_func, expected_exception=exception.VolumeBackendAPIException) cinder-8.0.0/cinder/tests/functional/0000775000567000056710000000000012701406543020676 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/functional/__init__.py0000664000567000056710000000000012701406250022770 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/functional/functional_helpers.py0000664000567000056710000001063212701406250025131 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides common functionality for functional tests """ import os.path import random import string import uuid import fixtures import mock from oslo_config import cfg from cinder import service from cinder import test # For the flags from cinder.tests.functional.api import client CONF = cfg.CONF def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate class _FunctionalTestBase(test.TestCase): def setUp(self): super(_FunctionalTestBase, self).setUp() f = self._get_flags() self.flags(**f) self.flags(verbose=True) for var in ('http_proxy', 'HTTP_PROXY'): self.useFixture(fixtures.EnvironmentVariable(var)) # set up services self.volume = self.start_service('volume') # NOTE(dulek): Mocking eventlet.sleep so test won't time out on # scheduler service start. with mock.patch('eventlet.sleep'): self.scheduler = self.start_service('scheduler') self._start_api_service() self.addCleanup(self.osapi.stop) self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) def _start_api_service(self): default_conf = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', '..', '..', 'etc/cinder/api-paste.ini')) CONF.api_paste_config = default_conf self.osapi = service.WSGIService("osapi_volume") self.osapi.start() # FIXME(ja): this is not the auth url - this is the service url # FIXME(ja): this needs fixed in nova as well self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port) def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} # Ensure tests only listen on localhost f['osapi_volume_listen'] = '127.0.0.1' # Auto-assign ports to allow concurrent tests f['osapi_volume_listen_port'] = 0 # Use simple scheduler to avoid complications - we test schedulers # separately f['scheduler_driver'] = ('cinder.scheduler.filter_scheduler.FilterSche' 'duler') return f def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_invalid_image(self): return str(uuid.uuid4()) def _build_minimal_create_server_request(self): server = {} image = self.api.get_images()[0] if 'imageRef' in image: image_href = image['imageRef'] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server cinder-8.0.0/cinder/tests/functional/test_volumes.py0000664000567000056710000001511312701406257024004 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import testtools from cinder import service from cinder.tests.functional.api import client from cinder.tests.functional import functional_helpers from cinder.tests.unit import fake_driver class VolumesTest(functional_helpers._FunctionalTestBase): def setUp(self): super(VolumesTest, self).setUp() fake_driver.LoggingVolumeDriver.clear_logs() def _start_api_service(self): self.osapi = service.WSGIService("osapi_volume") self.osapi.start() self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port) def _get_flags(self): f = super(VolumesTest, self)._get_flags() f['volume_driver'] = \ 'cinder.tests.unit.fake_driver.LoggingVolumeDriver' return f def test_get_volumes_summary(self): """Simple check that listing volumes works.""" volumes = self.api.get_volumes(False) self.assertIsNotNone(volumes) def test_get_volumes(self): """Simple check that listing volumes works.""" volumes = self.api.get_volumes() self.assertIsNotNone(volumes) def _poll_while(self, volume_id, continue_states, max_retries=5): """Poll (briefly) while the state is in continue_states.""" retries = 0 while True: try: found_volume = self.api.get_volume(volume_id) except client.OpenStackApiNotFoundException: found_volume = None break self.assertEqual(volume_id, found_volume['id']) if found_volume['status'] not in continue_states: break time.sleep(1) retries = retries + 1 if retries > max_retries: break return found_volume @testtools.skip('This test is failing: bug 1173266') def test_create_and_delete_volume(self): """Creates and deletes a volume.""" # Create volume created_volume = self.api.post_volume({'volume': {'size': 1}}) self.assertTrue(created_volume['id']) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) # It should also be in the all-volume list volumes = self.api.get_volumes() volume_names = [volume['id'] for volume in volumes] self.assertIn(created_volume_id, volume_names) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Delete the volume self.api.delete_volume(created_volume_id) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_while(created_volume_id, ['deleting']) # Should be gone self.assertFalse(found_volume) create_actions = fake_driver.LoggingVolumeDriver.logs_like( 'create_volume', id=created_volume_id) self.assertEqual(1, len(create_actions)) create_action = create_actions[0] self.assertEqual(create_action['id'], created_volume_id) self.assertEqual('nova', create_action['availability_zone']) self.assertEqual(1, create_action['size']) export_actions = fake_driver.LoggingVolumeDriver.logs_like( 'create_export', id=created_volume_id) self.assertEqual(1, len(export_actions)) export_action = export_actions[0] self.assertEqual(export_action['id'], created_volume_id) self.assertEqual('nova', export_action['availability_zone']) delete_actions = fake_driver.LoggingVolumeDriver.logs_like( 'delete_volume', id=created_volume_id) self.assertEqual(1, len(delete_actions)) delete_action = export_actions[0] self.assertEqual(delete_action['id'], created_volume_id) def test_create_volume_with_metadata(self): """Creates a volume with metadata.""" # Create volume metadata = {'key1': 'value1', 'key2': 'value2'} created_volume = self.api.post_volume( {'volume': {'size': 1, 'metadata': metadata}}) self.assertTrue(created_volume['id']) created_volume_id = created_volume['id'] # Check it's there and metadata present found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(metadata, found_volume['metadata']) def test_create_volume_in_availability_zone(self): """Creates a volume in availability_zone.""" # Create volume availability_zone = 'nova' created_volume = self.api.post_volume( {'volume': {'size': 1, 'availability_zone': availability_zone}}) self.assertTrue(created_volume['id']) created_volume_id = created_volume['id'] # Check it's there and availability zone present found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(availability_zone, found_volume['availability_zone']) def test_create_and_update_volume(self): # Create vol1 created_volume = self.api.post_volume({'volume': { 'size': 1, 'name': 'vol1'}}) self.assertEqual('vol1', created_volume['name']) created_volume_id = created_volume['id'] # update volume body = {'volume': {'name': 'vol-one'}} updated_volume = self.api.put_volume(created_volume_id, body) self.assertEqual('vol-one', updated_volume['name']) # check for update found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual('vol-one', found_volume['name']) cinder-8.0.0/cinder/tests/functional/test_xml.py0000664000567000056710000000311412701406250023101 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from cinder.api import common from cinder.tests.functional import functional_helpers class XmlTests(functional_helpers._FunctionalTestBase): """Some basic XML sanity checks.""" # FIXME(ja): does cinder need limits? # def test_namespace_limits(self): # headers = {} # headers['Accept'] = 'application/xml' # response = self.api.api_request('/limits', headers=headers) # data = response.read() # LOG.debug("data: %s" % data) # root = etree.XML(data) # self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10) def test_namespace_volumes(self): headers = {} headers['Accept'] = 'application/xml' response = self.api.api_request('/volumes', headers=headers, stream=True) data = response.raw root = etree.parse(data).getroot() self.assertEqual(common.XML_NS_V2, root.nsmap.get(None)) cinder-8.0.0/cinder/tests/functional/test_login.py0000664000567000056710000000164212701406250023415 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional import functional_helpers class LoginTest(functional_helpers._FunctionalTestBase): def test_login(self): """Simple check - we list volumes - so we know we're logged in.""" volumes = self.api.get_volumes() self.assertIsNotNone(volumes) cinder-8.0.0/cinder/tests/functional/api/0000775000567000056710000000000012701406543021447 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/tests/functional/api/__init__.py0000664000567000056710000000133512701406250023555 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`api` -- OpenStack API client, for testing rather than production ================================= """ cinder-8.0.0/cinder/tests/functional/api/foxinsocks.py0000664000567000056710000000556312701406250024213 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." class FoxInSocksServerControllerExtension(wsgi.Controller): @wsgi.action('add_tweedle') def _add_tweedle(self, req, id, body): return "Tweedle Beetle Added." @wsgi.action('delete_tweedle') def _delete_tweedle(self, req, id, body): return "Tweedle Beetle Deleted." @wsgi.action('fail') def _fail(self, req, id, body): raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' class Foxinsocks(extensions.ExtensionDescriptor): """The Fox In Socks Extension.""" name = "Fox In Socks" alias = "FOXNSOX" namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" updated = "2011-01-22T13:25:27-06:00" def __init__(self, ext_mgr): ext_mgr.register(self) def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_controller_extensions(self): extension_list = [] extension_set = [ (FoxInSocksServerControllerExtension, 'servers'), (FoxInSocksFlavorGooseControllerExtension, 'flavors'), (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] for klass, collection in extension_set: controller = klass() ext = extensions.ControllerExtension(self, collection, controller) extension_list.append(ext) return extension_list cinder-8.0.0/cinder/tests/functional/api/client.py0000664000567000056710000001625112701406257023306 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import netutils import requests from six.moves import urllib from cinder.i18n import _ class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: message = _('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s') % {'_status': response.status_code, '_body': response.text} super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Authentication error") super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Authorization error") super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Item not found") super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri # default project_id self.project_id = 'openstack' def request(self, url, method='GET', body=None, headers=None, ssl_verify=True, stream=False): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = urllib.parse.urlparse(url) port = parsed_url.port hostname = parsed_url.hostname scheme = parsed_url.scheme if netutils.is_valid_ipv6(hostname): hostname = "[%s]" % hostname relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query if port: _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url) else: _url = "%s://%s%s" % (scheme, hostname, relative_url) response = requests.request(method, _url, data=body, headers=_headers, verify=ssl_verify, stream=stream) return response def _authenticate(self): if self.auth_result: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status_code if http_status == 401: raise OpenStackApiAuthenticationException(response=response) self.auth_result = response.headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() # NOTE(justinsb): httplib 'helpfully' converts headers to lower case base_uri = auth_result['x-server-management-url'] full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) http_status = response.status_code if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message=_("Unexpected status code"), response=response) return response def _decode_json(self, response): body = response.text if body: return jsonutils.loads(body) else: return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_volume(self, volume_id): return self.api_get('/volumes/%s' % volume_id)['volume'] def get_volumes(self, detail=True): rel_url = '/volumes/detail' if detail else '/volumes' return self.api_get(rel_url)['volumes'] def post_volume(self, volume): return self.api_post('/volumes', volume)['volume'] def delete_volume(self, volume_id): return self.api_delete('/volumes/%s' % volume_id) def put_volume(self, volume_id, volume): return self.api_put('/volumes/%s' % volume_id, volume)['volume'] cinder-8.0.0/cinder/tests/functional/test_extensions.py0000664000567000056710000001744012701406250024507 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import iso8601 from lxml import etree from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v1 import router from cinder.api import xmlutil from cinder.tests.functional import functional_helpers NS = "{http://docs.openstack.org/common/api/v1.0}" CONF = cfg.CONF class ExtensionTestCase(functional_helpers._FunctionalTestBase): def _get_flags(self): f = super(ExtensionTestCase, self)._get_flags() f['osapi_volume_extension'] = CONF.osapi_volume_extension[:] f['osapi_volume_extension'].append( 'cinder.tests.functional.api.foxinsocks.Foxinsocks') return f class ExtensionsTest(ExtensionTestCase): def test_get_foxnsocks(self): """Simple check that fox-n-socks works.""" response = self.api.api_request('/foxnsocks') foxnsocks = response.text self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = ["TypesManage", "TypesExtraSpecs", ] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(self.ext_list, names) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', 'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, fox_ext) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(ext['alias'], output['extension']['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( {"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", "name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}, data['extension']) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) def test_list_extensions_xml(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) root = etree.XML(response.body) self.assertEqual(NS, root.tag.split('extensions')[0]) # Make sure we have all the extensions, extras extensions being OK. exts = root.findall('{0}extension'.format(NS)) self.assertGreaterEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual('Fox In Socks', fox_ext.get('name')) self.assertEqual( 'http://www.fox.in.socks/api/ext/pie/v1.0', fox_ext.get('namespace')) self.assertEqual('2011-01-22T13:25:27-06:00', fox_ext.get('updated')) self.assertEqual( 'The Fox In Socks Extension.', fox_ext.findtext('{0}description'.format(NS))) xmlutil.validate_schema(root, 'extensions') def test_get_extension_xml(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) xml = response.body root = etree.XML(xml) self.assertEqual(NS, root.tag.split('extension')[0]) self.assertEqual('FOXNSOX', root.get('alias')) self.assertEqual('Fox In Socks', root.get('name')) self.assertEqual( 'http://www.fox.in.socks/api/ext/pie/v1.0', root.get('namespace')) self.assertEqual('2011-01-22T13:25:27-06:00', root.get('updated')) self.assertEqual( 'The Fox In Socks Extension.', root.findtext('{0}description'.format(NS))) xmlutil.validate_schema(root, 'extension') class StubExtensionManager(object): """Provides access to Tweedle Beetles.""" name = "Tweedle Beetle Extension" alias = "TWDLBETL" def __init__(self, resource_ext=None, action_ext=None, request_ext=None, controller_ext=None): self.resource_ext = resource_ext self.controller_ext = controller_ext self.extra_resource_ext = None def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) if self.extra_resource_ext: resource_exts.append(self.extra_resource_ext) return resource_exts def get_controller_extensions(self): controller_extensions = [] if self.controller_ext: controller_extensions.append(self.controller_ext) return controller_extensions class ExtensionControllerIdFormatTest(ExtensionTestCase): def _bounce_id(self, test_id): class BounceController(object): def show(self, req, id): return id res_ext = extensions.ResourceExtension('bounce', BounceController()) manager = StubExtensionManager(res_ext) app = router.APIRouter(manager) request = webob.Request.blank("/fake/bounce/%s" % test_id) response = request.get_response(app) return response.body def test_id_with_xml_format(self): result = self._bounce_id('foo.xml') self.assertEqual(b'foo', result) def test_id_with_json_format(self): result = self._bounce_id('foo.json') self.assertEqual(b'foo', result) def test_id_with_bad_format(self): result = self._bounce_id('foo.bad') self.assertEqual(b'foo.bad', result) cinder-8.0.0/cinder/tests/fixtures.py0000664000567000056710000000664512701406250020765 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Cinder tests.""" # NOTE(mriedem): This is needed for importing from fixtures. from __future__ import absolute_import import logging as std_logging import os import fixtures _TRUE_VALUES = ('True', 'true', '1', 'yes') class NullHandler(std_logging.Handler): """custom default NullHandler to attempt to format the record. Used in conjunction with log_fixture.get_logging_handle_error_fixture to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(fixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a Null Logger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super(StandardLogging, self).setUp() # set root logger to debug root = std_logging.getLogger() root.setLevel(std_logging.DEBUG) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in _TRUE_VALUES: level = std_logging.DEBUG else: level = std_logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( fixtures.FakeLogger(format=fs, level=None)) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > std_logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(std_logging.DEBUG) # Don't log every single DB migration step std_logging.getLogger( 'migrate.versioning.api').setLevel(std_logging.WARNING) cinder-8.0.0/cinder/policy.py0000664000567000056710000000611712701406250017243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Cinder""" from oslo_config import cfg from oslo_policy import opts as policy_opts from oslo_policy import policy from cinder import exception CONF = cfg.CONF policy_opts.set_defaults(cfg.CONF, 'policy.json') _ENFORCER = None def init(): global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) def enforce_action(context, action): """Checks that the action can be done by the given context. Applies a check to ensure the context's project_id and user_id can be applied to the given action using the policy enforcement api. """ return enforce(context, action, {'project_id': context.project_id, 'user_id': context.user_id}) def enforce(context, action, target): """Verifies that the action is valid on the target in this context. :param context: cinder context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param object: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :raises PolicyNotAuthorized: if verification fails. """ init() return _ENFORCER.enforce(action, target, context.to_dict(), do_raise=True, exc=exception.PolicyNotAuthorized, action=action) def check_is_admin(roles, context=None): """Whether or not user is admin according to policy setting. Can use roles or user_id from context to determine if user is admin. In a multi-domain configuration, roles alone may not be sufficient. """ init() # include project_id on target to avoid KeyError if context_is_admin # policy definition is missing, and default admin_or_owner rule # attempts to apply. Since our credentials dict does not include a # project_id, this target can never match as a generic rule. target = {'project_id': ''} if context is None: credentials = {'roles': roles} else: credentials = {'roles': context.roles, 'user_id': context.user_id } return _ENFORCER.enforce('context_is_admin', target, credentials) cinder-8.0.0/cinder/opts.py0000664000567000056710000004616012701406250016733 0ustar jenkinsjenkins00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from cinder.api import common as cinder_api_common from cinder.api.middleware import auth as cinder_api_middleware_auth from cinder.api.middleware import sizelimit as cinder_api_middleware_sizelimit from cinder.api.views import versions as cinder_api_views_versions from cinder.backup import api as cinder_backup_api from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver from cinder.backup import driver as cinder_backup_driver from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs from cinder.backup.drivers import google as cinder_backup_drivers_google from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs from cinder.backup.drivers import posix as cinder_backup_drivers_posix from cinder.backup.drivers import swift as cinder_backup_drivers_swift from cinder.backup.drivers import tsm as cinder_backup_drivers_tsm from cinder.backup import manager as cinder_backup_manager from cinder.cmd import all as cinder_cmd_all from cinder.cmd import volume as cinder_cmd_volume from cinder.common import config as cinder_common_config import cinder.compute from cinder.compute import nova as cinder_compute_nova from cinder import context as cinder_context from cinder import coordination as cinder_coordination from cinder.db import api as cinder_db_api from cinder.db import base as cinder_db_base from cinder import exception as cinder_exception from cinder.image import glance as cinder_image_glance from cinder.image import image_utils as cinder_image_imageutils import cinder.keymgr from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr from cinder.keymgr import key_mgr as cinder_keymgr_keymgr from cinder import quota as cinder_quota from cinder.scheduler import driver as cinder_scheduler_driver from cinder.scheduler import host_manager as cinder_scheduler_hostmanager from cinder.scheduler import manager as cinder_scheduler_manager from cinder.scheduler import scheduler_options as \ cinder_scheduler_scheduleroptions from cinder.scheduler.weights import capacity as \ cinder_scheduler_weights_capacity from cinder.scheduler.weights import volume_number as \ cinder_scheduler_weights_volumenumber from cinder import service as cinder_service from cinder import ssh_utils as cinder_sshutils from cinder.transfer import api as cinder_transfer_api from cinder.volume import api as cinder_volume_api from cinder.volume import driver as cinder_volume_driver from cinder.volume.drivers import block_device as \ cinder_volume_drivers_blockdevice from cinder.volume.drivers import blockbridge as \ cinder_volume_drivers_blockbridge from cinder.volume.drivers.cloudbyte import options as \ cinder_volume_drivers_cloudbyte_options from cinder.volume.drivers import coho as cinder_volume_drivers_coho from cinder.volume.drivers import datera as cinder_volume_drivers_datera from cinder.volume.drivers.dell import dell_storagecenter_common as \ cinder_volume_drivers_dell_dellstoragecentercommon from cinder.volume.drivers.disco import disco as \ cinder_volume_drivers_disco_disco from cinder.volume.drivers.dothill import dothill_common as \ cinder_volume_drivers_dothill_dothillcommon from cinder.volume.drivers import drbdmanagedrv as \ cinder_volume_drivers_drbdmanagedrv from cinder.volume.drivers.emc import emc_vmax_common as \ cinder_volume_drivers_emc_emcvmaxcommon from cinder.volume.drivers.emc import emc_vnx_cli as \ cinder_volume_drivers_emc_emcvnxcli from cinder.volume.drivers.emc import scaleio as \ cinder_volume_drivers_emc_scaleio from cinder.volume.drivers.emc import xtremio as \ cinder_volume_drivers_emc_xtremio from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx from cinder.volume.drivers.fujitsu import eternus_dx_common as \ cinder_volume_drivers_fujitsu_eternusdxcommon from cinder.volume.drivers import glusterfs as cinder_volume_drivers_glusterfs from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst from cinder.volume.drivers.hitachi import hbsd_common as \ cinder_volume_drivers_hitachi_hbsdcommon from cinder.volume.drivers.hitachi import hbsd_fc as \ cinder_volume_drivers_hitachi_hbsdfc from cinder.volume.drivers.hitachi import hbsd_horcm as \ cinder_volume_drivers_hitachi_hbsdhorcm from cinder.volume.drivers.hitachi import hbsd_iscsi as \ cinder_volume_drivers_hitachi_hbsdiscsi from cinder.volume.drivers.hitachi import hnas_iscsi as \ cinder_volume_drivers_hitachi_hnasiscsi from cinder.volume.drivers.hitachi import hnas_nfs as \ cinder_volume_drivers_hitachi_hnasnfs from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ cinder_volume_drivers_hpe_hpelefthandiscsi from cinder.volume.drivers.hpe import hpe_xp_opts as \ cinder_volume_drivers_hpe_hpexpopts from cinder.volume.drivers.huawei import huawei_driver as \ cinder_volume_drivers_huawei_huaweidriver from cinder.volume.drivers.ibm import flashsystem_common as \ cinder_volume_drivers_ibm_flashsystemcommon from cinder.volume.drivers.ibm import flashsystem_fc as \ cinder_volume_drivers_ibm_flashsystemfc from cinder.volume.drivers.ibm import flashsystem_iscsi as \ cinder_volume_drivers_ibm_flashsystemiscsi from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi from cinder.volume.drivers.ibm import xiv_ds8k as \ cinder_volume_drivers_ibm_xivds8k from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli as \ cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli from cinder.volume.drivers.lenovo import lenovo_common as \ cinder_volume_drivers_lenovo_lenovocommon from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm from cinder.volume.drivers.netapp import options as \ cinder_volume_drivers_netapp_options from cinder.volume.drivers.nexenta import options as \ cinder_volume_drivers_nexenta_options from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble from cinder.volume.drivers.prophetstor import options as \ cinder_volume_drivers_prophetstor_options from cinder.volume.drivers import pure as cinder_volume_drivers_pure from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs from cinder.volume.drivers.san.hp import hpmsa_common as \ cinder_volume_drivers_san_hp_hpmsacommon from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san from cinder.volume.drivers import scality as cinder_volume_drivers_scality from cinder.volume.drivers import sheepdog as cinder_volume_drivers_sheepdog from cinder.volume.drivers import smbfs as cinder_volume_drivers_smbfs from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire from cinder.volume.drivers import tegile as cinder_volume_drivers_tegile from cinder.volume.drivers import tintri as cinder_volume_drivers_tintri from cinder.volume.drivers.violin import v7000_common as \ cinder_volume_drivers_violin_v7000common from cinder.volume.drivers.vmware import vmdk as \ cinder_volume_drivers_vmware_vmdk from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage from cinder.volume.drivers.windows import windows as \ cinder_volume_drivers_windows_windows from cinder.volume.drivers import xio as cinder_volume_drivers_xio from cinder.volume.drivers.zfssa import zfssaiscsi as \ cinder_volume_drivers_zfssa_zfssaiscsi from cinder.volume.drivers.zfssa import zfssanfs as \ cinder_volume_drivers_zfssa_zfssanfs from cinder.volume import manager as cinder_volume_manager from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \ cinder_zonemanager_drivers_brocade_brcdfabricopts from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \ cinder_zonemanager_drivers_brocade_brcdfczonedriver from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \ cinder_zonemanager_drivers_cisco_ciscofabricopts from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \ cinder_zonemanager_drivers_cisco_ciscofczonedriver from cinder.zonemanager import fc_zone_manager as \ cinder_zonemanager_fczonemanager def list_opts(): return [ ('FC-ZONE-MANAGER', itertools.chain( cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, )), ('KEYMGR', itertools.chain( cinder_keymgr_keymgr.encryption_opts, cinder.keymgr.keymgr_opts, cinder_keymgr_confkeymgr.key_mgr_opts, )), ('DEFAULT', itertools.chain( cinder_backup_driver.service_opts, cinder_api_common.api_common_opts, cinder_backup_drivers_ceph.service_opts, cinder_volume_drivers_smbfs.volume_opts, cinder_backup_chunkeddriver.chunkedbackup_service_opts, cinder_volume_drivers_san_san.san_opts, cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS, cinder_wsgi_eventletserver.socket_opts, cinder_sshutils.ssh_opts, cinder_volume_drivers_netapp_options.netapp_proxy_opts, cinder_volume_drivers_netapp_options.netapp_connection_opts, cinder_volume_drivers_netapp_options.netapp_transport_opts, cinder_volume_drivers_netapp_options.netapp_basicauth_opts, cinder_volume_drivers_netapp_options.netapp_cluster_opts, cinder_volume_drivers_netapp_options.netapp_7mode_opts, cinder_volume_drivers_netapp_options.netapp_provisioning_opts, cinder_volume_drivers_netapp_options.netapp_img_cache_opts, cinder_volume_drivers_netapp_options.netapp_eseries_opts, cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts, cinder_volume_drivers_netapp_options.netapp_san_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. storwize_svc_iscsi_opts, cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, cinder_backup_drivers_tsm.tsm_opts, cinder_volume_drivers_fujitsu_eternusdxcommon. FJ_ETERNUS_DX_OPT_opts, cinder_volume_drivers_ibm_gpfs.gpfs_opts, cinder_volume_drivers_violin_v7000common.violin_opts, cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS, cinder_exception.exc_log_opts, cinder_common_config.global_opts, cinder_scheduler_weights_capacity.capacity_weight_opts, cinder_volume_drivers_sheepdog.sheepdog_opts, [cinder_api_middleware_sizelimit.max_request_body_size_opt], cinder_volume_drivers_solidfire.sf_opts, cinder_backup_drivers_swift.swiftbackup_service_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_add_qosgroup_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_create_volume_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_connection_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_update_volume_opts, cinder_service.service_opts, cinder.compute.compute_opts, cinder_volume_drivers_drbdmanagedrv.drbd_opts, cinder_volume_drivers_dothill_dothillcommon.common_opts, cinder_volume_drivers_dothill_dothillcommon.iscsi_opts, cinder_volume_drivers_glusterfs.volume_opts, cinder_volume_drivers_pure.PURE_OPTS, cinder_context.context_opts, cinder_scheduler_driver.scheduler_driver_opts, cinder_volume_drivers_scality.volume_opts, cinder_volume_drivers_emc_emcvnxcli.loc_opts, cinder_volume_drivers_vmware_vmdk.vmdk_opts, cinder_volume_drivers_lenovo_lenovocommon.common_opts, cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, cinder_backup_drivers_posix.posixbackup_service_opts, cinder_volume_drivers_emc_scaleio.scaleio_opts, [cinder_db_base.db_driver_opt], cinder_volume_drivers_eqlx.eqlx_opts, cinder_transfer_api.volume_transfer_opts, cinder_db_api.db_opts, cinder_scheduler_weights_volumenumber. volume_number_weight_opts, cinder_volume_drivers_coho.coho_opts, cinder_volume_drivers_xio.XIO_OPTS, cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_api_views_versions.versions_opts, cinder_volume_drivers_nimble.nimble_opts, cinder_volume_drivers_windows_windows.windows_opts, cinder_volume_drivers_san_hp_hpmsacommon.common_opts, cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts, cinder_image_glance.glance_opts, cinder_image_glance.glance_core_properties_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, cinder_volume_drivers_lvm.volume_opts, cinder_volume_drivers_emc_emcvmaxcommon.emc_opts, cinder_volume_drivers_remotefs.nas_opts, cinder_volume_drivers_remotefs.volume_opts, cinder_volume_drivers_emc_xtremio.XTREMIO_OPTS, cinder_backup_drivers_google.gcsbackup_service_opts, [cinder_api_middleware_auth.use_forwarded_for_opt], cinder_volume_drivers_hitachi_hbsdcommon.volume_opts, cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli. infortrend_esds_opts, cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli. infortrend_esds_extra_opts, cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS, cinder_volume_drivers_rbd.rbd_opts, cinder_volume_drivers_tintri.tintri_opts, cinder_backup_api.backup_api_opts, cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts, cinder_backup_manager.backup_manager_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon. storwize_svc_opts, cinder_volume_drivers_hitachi_hbsdfc.volume_opts, cinder_quota.quota_opts, cinder_volume_drivers_huawei_huaweidriver.huawei_opts, cinder_volume_drivers_dell_dellstoragecentercommon. common_opts, cinder_scheduler_hostmanager.host_manager_opts, [cinder_scheduler_manager.scheduler_driver_opt], cinder_backup_drivers_nfs.nfsbackup_service_opts, cinder_volume_drivers_blockbridge.blockbridge_opts, [cinder_scheduler_scheduleroptions. scheduler_json_config_location_opt], cinder_volume_drivers_zfssa_zfssanfs.ZFSSA_OPTS, cinder_volume_drivers_disco_disco.disco_opts, cinder_volume_drivers_hgst.hgst_opts, cinder_image_imageutils.image_helper_opts, cinder_compute_nova.nova_opts, cinder_volume_drivers_ibm_flashsystemfc.flashsystem_fc_opts, cinder_volume_drivers_prophetstor_options.DPL_OPTS, cinder_volume_drivers_hpe_hpexpopts.FC_VOLUME_OPTS, cinder_volume_drivers_hpe_hpexpopts.COMMON_VOLUME_OPTS, cinder_volume_drivers_hpe_hpexpopts.HORCM_VOLUME_OPTS, cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts, cinder_volume_manager.volume_manager_opts, cinder_volume_drivers_ibm_flashsystemiscsi. flashsystem_iscsi_opts, cinder_volume_drivers_tegile.tegile_opts, cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, [cinder_volume_api.allow_force_upload_opt], [cinder_volume_api.volume_host_opt], [cinder_volume_api.volume_same_az_opt], [cinder_volume_api.az_cache_time_opt], cinder_volume_drivers_ibm_xivds8k.xiv_ds8k_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_datera.d_opts, cinder_volume_drivers_blockdevice.volume_opts, cinder_volume_drivers_quobyte.volume_opts, cinder_volume_drivers_vzstorage.vzstorage_opts, cinder_volume_drivers_nfs.nfs_opts, )), ('CISCO_FABRIC_EXAMPLE', itertools.chain( cinder_zonemanager_drivers_cisco_ciscofabricopts. cisco_zone_opts, )), ('BRCD_FABRIC_EXAMPLE', itertools.chain( cinder_zonemanager_drivers_brocade_brcdfabricopts. brcd_zone_opts, )), ('COORDINATION', itertools.chain( cinder_coordination.coordination_opts, )), ('BACKEND', itertools.chain( [cinder_cmd_volume.host_opt], [cinder_cmd_all.volume_cmd.host_opt], )), ] cinder-8.0.0/cinder/volume/0000775000567000056710000000000012701406543016701 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/qos_specs.py0000664000567000056710000002233612701406250021253 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The QoS Specs Implementation""" from oslo_db import exception as db_exc from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.volume import volume_types LOG = logging.getLogger(__name__) CONTROL_LOCATION = ['front-end', 'back-end', 'both'] def _verify_prepare_qos_specs(specs, create=True): """Check if 'consumer' value in qos specs is valid. Verify 'consumer' value in qos_specs is valid, raise exception if not. Assign default value to 'consumer', which is 'back-end' if input is empty. :params create a flag indicate if specs being verified is for create. If it's false, that means specs is for update, so that there's no need to add 'consumer' if that wasn't in specs. """ # Check control location, if it's missing in input, assign default # control location: 'front-end' if not specs: specs = {} # remove 'name' since we will handle that elsewhere. if specs.get('name', None): del specs['name'] try: if specs['consumer'] not in CONTROL_LOCATION: msg = _("Valid consumer of QoS specs are: %s") % CONTROL_LOCATION raise exception.InvalidQoSSpecs(reason=msg) except KeyError: # Default consumer is back-end, i.e Cinder volume service if create: specs['consumer'] = 'back-end' return specs def create(context, name, specs=None): """Creates qos_specs. :param specs dictionary that contains specifications for QoS e.g. {'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000} """ _verify_prepare_qos_specs(specs) values = dict(name=name, qos_specs=specs) LOG.debug("Dict for qos_specs: %s", values) try: qos_specs_ref = db.qos_specs_create(context, values) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.QoSSpecsCreateFailed(name=name, qos_specs=specs) return qos_specs_ref def update(context, qos_specs_id, specs): """Update qos specs. :param specs dictionary that contains key/value pairs for updating existing specs. e.g. {'consumer': 'front-end', 'total_iops_sec': 500, 'total_bytes_sec': 512000,} """ # need to verify specs in case 'consumer' is passed _verify_prepare_qos_specs(specs, create=False) LOG.debug('qos_specs.update(): specs %s' % specs) try: res = db.qos_specs_update(context, qos_specs_id, specs) except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) return res def delete(context, qos_specs_id, force=False): """Marks qos specs as deleted. 'force' parameter is a flag to determine whether should destroy should continue when there were entities associated with the qos specs. force=True indicates caller would like to mark qos specs as deleted even if there was entities associate with target qos specs. Trying to delete a qos specs still associated with entities will cause QoSSpecsInUse exception if force=False (default). """ if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) # check if there is any entity associated with this qos specs res = db.qos_specs_associations_get(context, qos_specs_id) if res and not force: raise exception.QoSSpecsInUse(specs_id=qos_specs_id) elif res and force: # remove all association db.qos_specs_disassociate_all(context, qos_specs_id) db.qos_specs_delete(context, qos_specs_id) def delete_keys(context, qos_specs_id, keys): """Marks specified key of target qos specs as deleted.""" if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) # make sure qos_specs_id is valid get_qos_specs(context, qos_specs_id) for key in keys: db.qos_specs_item_delete(context, qos_specs_id, key) def get_associations(context, specs_id): """Get all associations of given qos specs.""" try: # query returns a list of volume types associated with qos specs associates = db.qos_specs_associations_get(context, specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) msg = _('Failed to get all associations of ' 'qos specs %s') % specs_id LOG.warning(msg) raise exception.CinderException(message=msg) result = [] for vol_type in associates: member = dict(association_type='volume_type') member.update(dict(name=vol_type['name'])) member.update(dict(id=vol_type['id'])) result.append(member) return result def associate_qos_with_type(context, specs_id, type_id): """Associate qos_specs with volume type. Associate target qos specs with specific volume type. Would raise following exceptions: VolumeTypeNotFound - if volume type doesn't exist; QoSSpecsNotFound - if qos specs doesn't exist; InvalidVolumeType - if volume type is already associated with qos specs other than given one. QoSSpecsAssociateFailed - if there was general DB error :param specs_id: qos specs ID to associate with :param type_id: volume type ID to associate with """ try: get_qos_specs(context, specs_id) res = volume_types.get_volume_type_qos_specs(type_id) if res.get('qos_specs', None): if res['qos_specs'].get('id') != specs_id: msg = (_("Type %(type_id)s is already associated with another " "qos specs: %(qos_specs_id)s") % {'type_id': type_id, 'qos_specs_id': res['qos_specs']['id']}) raise exception.InvalidVolumeType(reason=msg) else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_qos_specs(context, specs_id, type_id): """Disassociate qos_specs from volume type.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to disassociate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_all(context, specs_id): """Disassociate qos_specs from all entities.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None) def get_all_specs(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all non-deleted qos specs.""" qos_specs = db.qos_specs_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return qos_specs def get_qos_specs(ctxt, id): """Retrieves single qos specs by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.qos_specs_get(ctxt, id) def get_qos_specs_by_name(context, name): """Retrieves single qos specs by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidQoSSpecs(reason=msg) return db.qos_specs_get_by_name(context, name) cinder-8.0.0/cinder/volume/targets/0000775000567000056710000000000012701406543020352 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/targets/iser.py0000664000567000056710000000341712701406250021666 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.i18n import _LW from cinder.volume.targets import tgt LOG = logging.getLogger(__name__) class ISERTgtAdm(tgt.TgtAdm): VERSION = '0.2' def __init__(self, *args, **kwargs): super(ISERTgtAdm, self).__init__(*args, **kwargs) LOG.warning(_LW('ISERTgtAdm is deprecated, you should ' 'now just use LVMVolumeDriver and specify ' 'iscsi_helper for the target driver you ' 'wish to use. In order to enable iser, please ' 'set iscsi_protocol=iser with lioadm or tgtadm ' 'target helpers.')) self.volumes_dir = self.configuration.safe_get('volumes_dir') self.iscsi_protocol = 'iser' self.protocol = 'iSER' # backwards compatibility mess self.configuration.num_volume_device_scan_tries = \ self.configuration.num_iser_scan_tries self.configuration.iscsi_target_prefix = \ self.configuration.iser_target_prefix self.configuration.iscsi_ip_address = \ self.configuration.iser_ip_address self.configuration.iscsi_port = self.configuration.iser_port cinder-8.0.0/cinder/volume/targets/cxt.py0000664000567000056710000002322712701406250021523 0ustar jenkinsjenkins00000000000000# Copyright 2015 Chelsio Communications Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import netutils from cinder import exception from cinder.i18n import _LI, _LW, _LE from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class CxtAdm(iscsi.ISCSITarget): """Chiscsi target configuration for block storage devices. This includes things like create targets, attach, detach etc. """ TARGET_FMT = """ target: TargetName=%s TargetDevice=%s PortalGroup=1@%s """ TARGET_FMT_WITH_CHAP = """ target: TargetName=%s TargetDevice=%s PortalGroup=1@%s AuthMethod=CHAP Auth_CHAP_Policy=Oneway Auth_CHAP_Initiator=%s """ cxt_subdir = 'cxt' def __init__(self, *args, **kwargs): super(CxtAdm, self).__init__(*args, **kwargs) self.volumes_dir = self.configuration.safe_get('volumes_dir') self.volumes_dir = os.path.join(self.volumes_dir, self.cxt_subdir) self.config = self.configuration.safe_get('chiscsi_conf') def _get_volumes_dir(self): return self.volumes_dir def _get_target(self, iqn): # We can use target=iqn here, but iscsictl has no --brief mode, and # this way we save on a lot of unnecessary parsing (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: parsed = line.split() tid = parsed[2] return tid[3:].rstrip(',') return None def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 0 # For chiscsi dev starts at lun 0 iscsi_target = 1 return iscsi_target, lun @staticmethod def _get_portal(ip, port=None): # ipv6 addresses use [ip]:port format, ipv4 use ip:port portal_port = ':%d' % port if port else '' if netutils.is_valid_ipv4(ip): portal_ip = ip else: portal_ip = '[' + ip + ']' return portal_ip + portal_port def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) LOG.debug("Targets prior to update: %s", out) volumes_dir = self._get_volumes_dir() fileutils.ensure_tree(volumes_dir) vol_id = name.split(':')[1] cfg_port = kwargs.get('portals_port') cfg_ips = kwargs.get('portals_ips') portals = ','.join(map(lambda ip: self._get_portal(ip, cfg_port), cfg_ips)) if chap_auth is None: volume_conf = self.TARGET_FMT % (name, path, portals) else: volume_conf = self.TARGET_FMT_WITH_CHAP % (name, path, portals, '"%s":"%s"' % chap_auth) LOG.debug('Creating iscsi_target for: %s', vol_id) volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): LOG.warning(_LW('Persistence file already exists for volume, ' 'found file at: %s'), volume_path) utils.robust_file_write(volumes_dir, vol_id, volume_conf) LOG.debug('Created volume path %(vp)s,\n' 'content: %(vc)s', {'vp': volume_path, 'vc': volume_conf}) old_persist_file = None old_name = kwargs.get('old_name', None) if old_name: LOG.debug('Detected old persistence file for volume ' '%{vol}s at %{old_name}s', {'vol': vol_id, 'old_name': old_name}) old_persist_file = os.path.join(volumes_dir, old_name) try: # With the persistent tgts we create them # by creating the entry in the persist file # and then doing an update to get the target # created. (out, err) = utils.execute('iscsictl', '-S', 'target=%s' % name, '-f', volume_path, '-x', self.config, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) finally: LOG.debug("StdOut from iscsictl -S: %s", out) LOG.debug("StdErr from iscsictl -S: %s", err) # Grab targets list for debug (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) LOG.debug("Targets after update: %s", out) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error(_LE("Failed to create iscsi target for volume " "id:%(vol_id)s. Please verify your configuration " "in %(volumes_dir)s'"), { 'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() if old_persist_file is not None and os.path.exists(old_persist_file): os.unlink(old_persist_file) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI('Removing iscsi_target for: %s'), vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self._get_volumes_dir(), vol_uuid_file) if not os.path.exists(volume_path): LOG.warning(_LW('Volume path %s does not exist, ' 'nothing to remove.'), volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) target_exists = False try: (out, err) = utils.execute('iscsictl', '-c', 'target=%s' % iqn, run_as_root=True) LOG.debug("StdOut from iscsictl -c: %s", out) LOG.debug("StdErr from iscsictl -c: %s", err) except putils.ProcessExecutionError as e: if "NOT found" in e.stdout: LOG.info(_LI("No iscsi target present for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) return else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: target_exists = True try: utils.execute('iscsictl', '-s', 'target=%s' % iqn, run_as_root=True) except putils.ProcessExecutionError as e: # There exists a race condition where multiple calls to # remove_iscsi_target come in simultaneously. If we can poll # for a target successfully but it is gone before we can remove # it, fail silently if "is not found" in e.stderr and target_exists: LOG.info(_LI("No iscsi target present for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) return else: LOG.error(_LE("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # Carried over from tgt # NOTE(jdg): This *should* be there still but incase # it's not we don't care, so just ignore it if was # somehow deleted between entry of this method # and here if os.path.exists(volume_path): os.unlink(volume_path) else: LOG.debug('Volume path %s not found at end, ' 'of remove_iscsi_target.', volume_path) cinder-8.0.0/cinder/volume/targets/tgt.py0000664000567000056710000003153712701406250021526 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import textwrap import time from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import fileutils from cinder import exception from cinder.i18n import _LI, _LW, _LE from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class TgtAdm(iscsi.ISCSITarget): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. """ VOLUME_CONF = textwrap.dedent(""" backing-store %(path)s driver %(driver)s %(chap_auth)s %(target_flags)s write-cache %(write_cache)s """) def __init__(self, *args, **kwargs): super(TgtAdm, self).__init__(*args, **kwargs) def _get_target(self, iqn): (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: parsed = line.split() tid = parsed[1] return tid[:-1] return None def _verify_backing_lun(self, iqn, tid): backing_lun = True capture = False target_info = [] (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line and "Target %s" % tid in line: capture = True if capture: target_info.append(line) if iqn not in line and 'Target ' in line: capture = False if ' LUN: 1' not in target_info: backing_lun = False return backing_lun def _recreate_backing_lun(self, iqn, tid, name, path): LOG.warning(_LW('Attempting recreate of backing lun...')) # Since we think the most common case of this is a dev busy # (create vol from snapshot) we're going to add a sleep here # this will hopefully give things enough time to stabilize # how long should we wait?? I have no idea, let's go big # and error on the side of caution time.sleep(10) (out, err) = (None, None) try: (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'new', '--mode', 'logicalunit', '--tid', tid, '--lun', '1', '-b', path, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed recovery attempt to create " "iscsi backing lun for Volume " "ID:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e}) finally: LOG.debug('StdOut from recreate backing lun: %s', out) LOG.debug('StdErr from recreate backing lun: %s', err) def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 iscsi_target = 0 # NOTE(jdg): Not used by tgtadm return iscsi_target, lun @utils.retry(putils.ProcessExecutionError) def _do_tgt_update(self, name): (out, err) = utils.execute('tgt-admin', '--update', name, run_as_root=True) LOG.debug("StdOut from tgt-admin --update: %s", out) LOG.debug("StdErr from tgt-admin --update: %s", err) def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # Note(jdg) tid and lun aren't used by TgtAdm but remain for # compatibility # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078 # for now, since we intermittently hit target already exists we're # adding some debug info to try and pinpoint what's going on (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target', run_as_root=True) LOG.debug("Targets prior to update: %s", out) fileutils.ensure_tree(self.volumes_dir) vol_id = name.split(':')[1] write_cache = self.configuration.get('iscsi_write_cache', 'on') driver = self.iscsi_protocol chap_str = '' if chap_auth is not None: chap_str = 'incominguser %s %s' % chap_auth target_flags = self.configuration.get('iscsi_target_flags', '') if target_flags: target_flags = 'bsoflags ' + target_flags volume_conf = self.VOLUME_CONF % { 'name': name, 'path': path, 'driver': driver, 'chap_auth': chap_str, 'target_flags': target_flags, 'write_cache': write_cache} LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id) volumes_dir = self.volumes_dir volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): LOG.warning(_LW('Persistence file already exists for volume, ' 'found file at: %s'), volume_path) utils.robust_file_write(volumes_dir, vol_id, volume_conf) LOG.debug(('Created volume path %(vp)s,\n' 'content: %(vc)s'), {'vp': volume_path, 'vc': volume_conf}) old_persist_file = None old_name = kwargs.get('old_name', None) if old_name is not None: LOG.debug('Detected old persistence file for volume ' '%{vol}s at %{old_name}s', {'vol': vol_id, 'old_name': old_name}) old_persist_file = os.path.join(volumes_dir, old_name) try: # With the persistent tgts we create them # by creating the entry in the persist file # and then doing an update to get the target # created. self._do_tgt_update(name) except putils.ProcessExecutionError as e: if "target already exists" in e.stderr: # Adding the additional Warning message below for a clear # ER marker (Ref bug: #1398078). LOG.warning(_LW('Could not create target because ' 'it already exists for volume: %s'), vol_id) LOG.debug('Exception was: %s', e) else: LOG.error(_LE("Failed to create iscsi target for Volume " "ID: %(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Grab targets list for debug # Consider adding a check for lun 0 and 1 for tgtadm # before considering this as valid (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target', run_as_root=True) LOG.debug("Targets after update: %s", out) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error(_LE("Failed to create iscsi target for Volume " "ID: %(vol_id)s. Please ensure your tgtd config " "file contains 'include %(volumes_dir)s/*'"), { 'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() # NOTE(jdg): Sometimes we have some issues with the backing lun # not being created, believe this is due to a device busy # or something related, so we're going to add some code # here that verifies the backing lun (lun 1) was created # and we'll try and recreate it if it's not there if not self._verify_backing_lun(iqn, tid): try: self._recreate_backing_lun(iqn, tid, name, path) except putils.ProcessExecutionError: os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Finally check once more and if no go, fail and punt if not self._verify_backing_lun(iqn, tid): os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) if old_persist_file is not None and os.path.exists(old_persist_file): os.unlink(old_persist_file) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI('Removing iscsi_target for Volume ID: %s'), vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self.volumes_dir, vol_uuid_file) if not os.path.exists(volume_path): LOG.warning(_LW('Volume path %s does not exist, ' 'nothing to remove.'), volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: # NOTE(vish): --force is a workaround for bug: # https://bugs.launchpad.net/cinder/+bug/1159948 utils.execute('tgt-admin', '--force', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: non_fatal_errors = ("can't find the target", "access control rule does not exist") if any(error in e.stderr for error in non_fatal_errors): LOG.warning(_LW("Failed target removal because target or " "ACL's couldn't be found for iqn: %s."), iqn) else: LOG.error(_LE("Failed to remove iscsi target for Volume " "ID: %(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): There's a bug in some versions of tgt that # will sometimes fail silently when using the force flag # https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343 # For now work-around by checking if the target was deleted, # if it wasn't, try again without the force. # This will NOT do any good for the case of mutliple sessions # which the force was aded for but it will however address # the cases pointed out in bug: # https://bugs.launchpad.net/cinder/+bug/1304122 if self._get_target(iqn): try: LOG.warning(_LW('Silent failure of target removal ' 'detected, retry....')) utils.execute('tgt-admin', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove iscsi target for Volume " "ID: %(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): This *should* be there still but incase # it's not we don't care, so just ignore it if was # somehow deleted between entry of this method # and here if os.path.exists(volume_path): os.unlink(volume_path) else: LOG.debug('Volume path %s not found at end, ' 'of remove_iscsi_target.', volume_path) cinder-8.0.0/cinder/volume/targets/__init__.py0000664000567000056710000000000012701406250022444 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/targets/iscsi.py0000664000567000056710000003642212701406250022040 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LI, _LW, _LE from cinder import utils from cinder.volume.targets import driver from cinder.volume import utils as vutils LOG = logging.getLogger(__name__) class ISCSITarget(driver.Target): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. """ def __init__(self, *args, **kwargs): super(ISCSITarget, self).__init__(*args, **kwargs) self.iscsi_target_prefix = \ self.configuration.safe_get('iscsi_target_prefix') self.iscsi_protocol = \ self.configuration.safe_get('iscsi_protocol') self.protocol = 'iSCSI' self.volumes_dir = self.configuration.safe_get('volumes_dir') def _get_iscsi_properties(self, volume, multipath=False): """Gets iscsi configuration We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in the future. The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the uuid of the volume :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :discard: boolean indicating if discard is supported In some of drivers that support multiple connections (for multipath and for single path with failover on connection failure), it returns :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. Note that some of drivers don't return :target_portals even if they support multipath. Then the connector should use sendtargets discovery to find the other portals if it supports multipath. """ properties = {} location = volume['provider_location'] if location: # provider_location is the same format as iSCSI discovery output properties['target_discovered'] = False else: location = self._do_iscsi_discovery(volume) if not location: msg = (_("Could not find iSCSI export for volume %s") % (volume['name'])) raise exception.InvalidVolume(reason=msg) LOG.debug(("ISCSI Discovery: Found %s") % (location)) properties['target_discovered'] = True results = location.split(" ") portals = results[0].split(",")[0].split(";") iqn = results[1] nr_portals = len(portals) try: lun = int(results[2]) except (IndexError, ValueError): # NOTE(jdg): The following is carried over from the existing # code. The trick here is that different targets use different # default lun numbers, the base driver with tgtadm uses 1 # others like LIO use 0. if (self.configuration.volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and self.configuration.iscsi_helper == 'tgtadm'): lun = 1 else: lun = 0 if nr_portals > 1 or multipath: properties['target_portals'] = portals properties['target_iqns'] = [iqn] * nr_portals properties['target_luns'] = [lun] * nr_portals properties['target_portal'] = portals[0] properties['target_iqn'] = iqn properties['target_lun'] = lun properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret geometry = volume.get('provider_geometry', None) if geometry: (physical_block_size, logical_block_size) = geometry.split() properties['physical_block_size'] = physical_block_size properties['logical_block_size'] = logical_block_size encryption_key_id = volume.get('encryption_key_id', None) properties['encrypted'] = encryption_key_id is not None return properties def _iscsi_authentication(self, chap, name, password): return "%s %s %s" % (chap, name, password) def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) LOG.warning(_LW("ISCSI provider_location not stored, using discovery")) volume_id = volume['id'] try: # NOTE(griff) We're doing the split straight away which should be # safe since using '@' in hostname is considered invalid (out, _err) = utils.execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: LOG.error(_LE("ISCSI discovery attempt failed for:%s") % volume['host'].split('@')[0]) LOG.debug(("Error from iscsiadm -m discovery: %s") % ex.stderr) return None for target in out.splitlines(): if (self.configuration.safe_get('iscsi_ip_address') in target and volume_id in target): return target return None def _get_portals_config(self): # Prepare portals configuration portals_ips = ([self.configuration.iscsi_ip_address] + self.configuration.iscsi_secondary_ip_addresses or []) return {'portals_ips': portals_ips, 'portals_port': self.configuration.iscsi_port} def create_export(self, context, volume, volume_path): """Creates an export for a logical volume.""" # 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001' iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) iscsi_target, lun = self._get_target_and_lun(context, volume) # Verify we haven't setup a CHAP creds file already # if DNE no big deal, we'll just create it chap_auth = self._get_target_chap_auth(context, iscsi_name) if not chap_auth: chap_auth = (vutils.generate_username(), vutils.generate_password()) # Get portals ips and port portals_config = self._get_portals_config() # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need # should clean this all up at some point in the future tid = self.create_iscsi_target(iscsi_name, iscsi_target, lun, volume_path, chap_auth, **portals_config) data = {} data['location'] = self._iscsi_location( self.configuration.iscsi_ip_address, tid, iscsi_name, lun, self.configuration.iscsi_secondary_ip_addresses) LOG.debug('Set provider_location to: %s', data['location']) data['auth'] = self._iscsi_authentication( 'CHAP', *chap_auth) return data def remove_export(self, context, volume): try: iscsi_target, lun = self._get_target_and_lun(context, volume) except exception.NotFound: LOG.info(_LI("Skipping remove_export. No iscsi_target " "provisioned for volume: %s"), volume['id']) return try: # NOTE: provider_location may be unset if the volume hasn't # been exported location = volume['provider_location'].split(' ') iqn = location[1] # ietadm show will exit with an error # this export has already been removed self.show_target(iscsi_target, iqn=iqn) except Exception: LOG.info(_LI("Skipping remove_export. No iscsi_target " "is presently exported for volume: %s"), volume['id']) return # NOTE: For TgtAdm case volume['id'] is the ONLY param we need self.remove_iscsi_target(iscsi_target, lun, volume['id'], volume['name']) def ensure_export(self, context, volume, volume_path): """Recreates an export for a logical volume.""" iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) # Verify we haven't setup a CHAP creds file already # if DNE no big deal, we'll just create it chap_auth = self._get_target_chap_auth(context, iscsi_name) if not chap_auth: LOG.info(_LI("Skipping ensure_export. No iscsi_target " "provision for volume: %s"), volume['id']) # Get portals ips and port portals_config = self._get_portals_config() iscsi_target, lun = self._get_target_and_lun(context, volume) self.create_iscsi_target( iscsi_name, iscsi_target, lun, volume_path, chap_auth, check_exit_code=False, old_name=None, **portals_config) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': '9a0d35d0-175a-11e4-8c21-0800200c9a66', 'discard': False, } } """ iscsi_properties = self._get_iscsi_properties(volume, connector.get( 'multipath')) return { 'driver_volume_type': self.iscsi_protocol, 'data': iscsi_properties } def terminate_connection(self, volume, connector, **kwargs): pass def validate_connector(self, connector): # NOTE(jdg): api passes in connector which is initiator info if 'initiator' not in connector: err_msg = (_LE('The volume driver requires the iSCSI initiator ' 'name in the connector.')) LOG.error(err_msg) raise exception.InvalidConnectorException(missing='initiator') return True def _iscsi_location(self, ip, target, iqn, lun=None, ip_secondary=None): ip_secondary = ip_secondary or [] port = self.configuration.iscsi_port portals = map(lambda x: "%s:%s" % (x, port), [ip] + ip_secondary) return ("%(portals)s,%(target)s %(iqn)s %(lun)s" % ({'portals': ";".join(portals), 'target': target, 'iqn': iqn, 'lun': lun})) def show_target(self, iscsi_target, iqn, **kwargs): if iqn is None: raise exception.InvalidParameterValue( err=_('valid iqn needed for show_target')) tid = self._get_target(iqn) if tid is None: raise exception.NotFound() def _get_target_chap_auth(self, context, iscsi_name): """Get the current chap auth username and password.""" try: # 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001' vol_id = iscsi_name.split(':volume-')[1] volume_info = self.db.volume_get(context, vol_id) # 'provider_auth': 'CHAP user_id password' if volume_info['provider_auth']: return tuple(volume_info['provider_auth'].split(' ', 3)[1:]) except exception.NotFound: LOG.debug('Failed to get CHAP auth from DB for %s.', vol_id) @abc.abstractmethod def _get_target_and_lun(self, context, volume): """Get iscsi target and lun.""" pass @abc.abstractmethod def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass @abc.abstractmethod def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass @abc.abstractmethod def _get_iscsi_target(self, context, vol_id): pass @abc.abstractmethod def _get_target(self, iqn): pass class SanISCSITarget(ISCSITarget): """iSCSI target for san devices. San devices are slightly different, they don't need to implement all of the same things that we need to implement locally fro LVM and local block devices when we create and manage our own targets. """ def __init__(self, *args, **kwargs): super(SanISCSITarget, self).__init__(*args, **kwargs) @abc.abstractmethod def create_export(self, context, volume, volume_path): pass @abc.abstractmethod def remove_export(self, context, volume): pass @abc.abstractmethod def ensure_export(self, context, volume, volume_path): pass @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): pass # NOTE(jdg): Items needed for local iSCSI target drivers, # but NOT sans Stub them out here to make abc happy # Use care when looking at these to make sure something # that's inheritted isn't dependent on one of # these. def _get_target_and_lun(self, context, volume): pass def _get_target_chap_auth(self, context, iscsi_name): pass def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass def _get_iscsi_target(self, context, vol_id): pass def _get_target(self, iqn): pass cinder-8.0.0/cinder/volume/targets/iet.py0000664000567000056710000002512112701406250021501 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception from cinder.i18n import _LI, _LE, _LW from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class IetAdm(iscsi.ISCSITarget): VERSION = '0.1' def __init__(self, *args, **kwargs): super(IetAdm, self).__init__(*args, **kwargs) self.iet_conf = self.configuration.safe_get('iet_conf') self.iscsi_iotype = self.configuration.safe_get('iscsi_iotype') self.auth_type = 'IncomingUser' self.iet_sessions = '/proc/net/iet/session' def _get_target(self, iqn): # Find existing iSCSI target session from /proc/net/iet/session # # tid:2 name:iqn.2010-10.org:volume-222 # sid:562950561399296 initiator:iqn.1994-05.com:5a6894679665 # cid:0 ip:192.168.122.1 state:active hd:none dd:none # tid:1 name:iqn.2010-10.org:volume-111 # sid:281475567911424 initiator:iqn.1994-05.com:5a6894679665 # cid:0 ip:192.168.122.1 state:active hd:none dd:none iscsi_target = 0 try: with open(self.iet_sessions, 'r') as f: sessions = f.read() except Exception: LOG.exception(_LE("Failed to open iet session list for %s"), iqn) raise session_list = re.split('^tid:(?m)', sessions)[1:] for ses in session_list: m = re.match('(\d+) name:(\S+)\s+', ses) if m and iqn in m.group(2): return m.group(1) return iscsi_target def _get_iscsi_target(self, context, vol_id): pass def _get_target_and_lun(self, context, volume): # For ietadm dev starts at lun 0 lun = 0 # Using 0, ietadm tries to search empty tid for creating # new iSCSI target iscsi_target = 0 # Find existing iSCSI target based on iqn iqn = '%svolume-%s' % (self.iscsi_target_prefix, volume['id']) iscsi_target = self._get_target(iqn) return iscsi_target, lun def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): config_auth = None vol_id = name.split(':')[1] # Check the target is already existing. tmp_tid = self._get_target(name) # Create a new iSCSI target. If a target already exists, # the command returns 234, but we ignore it. try: self._new_target(name, tid) tid = self._get_target(name) self._new_logicalunit(tid, lun, path) if chap_auth is not None: (username, password) = chap_auth config_auth = ' '.join((self.auth_type,) + chap_auth) self._new_auth(tid, self.auth_type, username, password) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to create iscsi target for volume " "id:%s"), vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Update config file only if new scsi target is created. if not tmp_tid: self.update_config_file(name, tid, path, config_auth) return tid def update_config_file(self, name, tid, path, config_auth): conf_file = self.iet_conf vol_id = name.split(':')[1] # If config file does not exist, create a blank conf file and # add configuration for the volume on the new file. if not os.path.exists(conf_file): try: utils.execute("truncate", conf_file, "--size=0", run_as_root=True) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to create %(conf)s for volume " "id:%(vol_id)s"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) try: volume_conf = """ Target %s %s Lun 0 Path=%s,Type=%s """ % (name, config_auth, path, self._iotype(path)) with utils.temporary_chown(conf_file): with open(conf_file, 'a+') as f: f.write(volume_conf) except Exception: LOG.exception(_LE("Failed to update %(conf)s for volume " "id:%(vol_id)s"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id) try: self._delete_logicalunit(tid, lun) session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id) if session_info: sid, cid = session_info self._force_delete_target(tid, sid, cid) self._delete_target(tid) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to remove iscsi target for volume " "id:%s"), vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) vol_uuid_file = vol_name conf_file = self.iet_conf if os.path.exists(conf_file): try: with utils.temporary_chown(conf_file): with open(conf_file, 'r+') as iet_conf_text: full_txt = iet_conf_text.readlines() new_iet_conf_txt = [] count = 0 for line in full_txt: if count > 0: count -= 1 continue elif vol_uuid_file in line: count = 2 continue else: new_iet_conf_txt.append(line) iet_conf_text.seek(0) iet_conf_text.truncate(0) iet_conf_text.writelines(new_iet_conf_txt) except Exception: LOG.exception(_LE("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: LOG.warning(_LW("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target. " "%(conf)s does not exist."), {'conf': conf_file, 'vol_id': vol_id}) def _find_sid_cid_for_target(self, tid, name, vol_id): """Find sid, cid for existing iscsi target""" try: with open(self.iet_sessions, 'r') as f: sessions = f.read() except Exception as e: LOG.info(_LI("Failed to open iet session list for " "%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) return None session_list = re.split('^tid:(?m)', sessions)[1:] for ses in session_list: m = re.match('(\d+) name:(\S+)\s+sid:(\d+).+\s+cid:(\d+)', ses) if m and tid in m.group(1) and name in m.group(2): return m.group(3), m.group(4) def _is_block(self, path): mode = os.stat(path).st_mode return stat.S_ISBLK(mode) def _iotype(self, path): if self.iscsi_iotype == 'auto': return 'blockio' if self._is_block(path) else 'fileio' else: return self.iscsi_iotype def _new_target(self, name, tid): """Create new scsi target using specified parameters. If the target already exists, ietadm returns 'Invalid argument' and error code '234'. This should be ignored for ensure export case. """ utils.execute('ietadm', '--op', 'new', '--tid=%s' % tid, '--params', 'Name=%s' % name, run_as_root=True, check_exit_code=[0, 234]) def _delete_target(self, tid): utils.execute('ietadm', '--op', 'delete', '--tid=%s' % tid, run_as_root=True) def _force_delete_target(self, tid, sid, cid): utils.execute('ietadm', '--op', 'delete', '--tid=%s' % tid, '--sid=%s' % sid, '--cid=%s' % cid, run_as_root=True) def show_target(self, tid, iqn=None): utils.execute('ietadm', '--op', 'show', '--tid=%s' % tid, run_as_root=True) def _new_logicalunit(self, tid, lun, path): """Attach a new volume to scsi target as a logical unit. If a logical unit exists on the specified target lun, ietadm returns 'File exists' and error code '239'. This should be ignored for ensure export case. """ utils.execute('ietadm', '--op', 'new', '--tid=%s' % tid, '--lun=%d' % lun, '--params', 'Path=%s,Type=%s' % (path, self._iotype(path)), run_as_root=True, check_exit_code=[0, 239]) def _delete_logicalunit(self, tid, lun): utils.execute('ietadm', '--op', 'delete', '--tid=%s' % tid, '--lun=%d' % lun, run_as_root=True) def _new_auth(self, tid, type, username, password): utils.execute('ietadm', '--op', 'new', '--tid=%s' % tid, '--user', '--params=%s=%s,Password=%s' % (type, username, password), run_as_root=True) cinder-8.0.0/cinder/volume/targets/scst.py0000664000567000056710000004272212701406250021702 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception from cinder import utils from cinder.i18n import _, _LE from cinder.volume.targets import iscsi from cinder.volume import utils as vutils LOG = logging.getLogger(__name__) class SCSTAdm(iscsi.ISCSITarget): def __init__(self, *args, **kwargs): super(SCSTAdm, self).__init__(*args, **kwargs) self.volumes_dir = self.configuration.safe_get('volumes_dir') self.iscsi_target_prefix = self.configuration.safe_get( 'iscsi_target_prefix') self.target_name = self.configuration.safe_get('scst_target_iqn_name') self.target_driver = self.configuration.safe_get('scst_target_driver') self.chap_username = self.configuration.safe_get('chap_username') self.chap_password = self.configuration.safe_get('chap_password') self.initiator_iqn = None self.remove_initiator_iqn = None def scst_execute(self, *args): return utils.execute('scstadmin', *args, run_as_root=True) def validate_connector(self, connector): # iSCSI drivers require the initiator information if 'initiator' not in connector: err_msg = _('The volume driver requires the iSCSI initiator ' 'name in the connector.') LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) self.initiator_iqn = connector['initiator'] def terminate_connection(self, volume, connector, **kwargs): self.remove_initiator_iqn = connector['initiator'] def _get_target(self, iqn): (out, _err) = self.scst_execute('-list_target') if iqn in out: return self._target_attribute(iqn) return None def _target_attribute(self, iqn): (out, _err) = self.scst_execute('-list_tgt_attr', iqn, '-driver', self.target_driver) lines = out.split('\n') for line in lines: if "rel_tgt_id" in line: parsed = line.split() return parsed[1] def _get_group(self): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) (out, _err) = self.scst_execute('-list_group') if scst_group in out: return out return None def _get_luns_info(self): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) (out, _err) = self.scst_execute('-list_group', scst_group, '-driver', self.target_driver, '-target', self.target_name) first = "Assigned LUNs:" last = "Assigned Initiators:" start = out.index(first) + len(first) end = out.index(last, start) out = out[start:end] luns = [] for line in out.strip().split("\n")[2:]: luns.append(int(line.strip().split(" ")[0])) luns = sorted(set(luns)) return luns def _get_target_and_lun(self, context, volume): iscsi_target = 0 if not self.target_name or not self._get_group(): lun = 1 return iscsi_target, lun luns = self._get_luns_info() if (not luns) or (luns[0] != 1): lun = 1 return iscsi_target, lun else: for lun in luns: if (luns[-1] == lun) or (luns[lun - 1] + 1 != luns[lun]): return iscsi_target, (lun + 1) def create_iscsi_target(self, name, vol_id, tid, lun, path, chap_auth=None): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) vol_name = path.split("/")[3] try: (out, _err) = self.scst_execute('-noprompt', '-set_drv_attr', self.target_driver, '-attributes', 'enabled=1') LOG.debug('StdOut from set driver attribute: %s', out) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to set attribute for enable target driver " "%s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to enable SCST Target driver.") if self._get_target(name) is None: try: (out, _err) = self.scst_execute('-add_target', name, '-driver', self.target_driver) LOG.debug("StdOut from scstadmin create target: %s", out) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e}) raise exception.ISCSITargetCreateFailed(volume_id=vol_name) try: (out, _err) = self.scst_execute('-enable_target', name, '-driver', self.target_driver) LOG.debug("StdOut from scstadmin enable target: %s", out) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to set 'enable' attribute for " "SCST target %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_mesage="Failed to enable SCST Target.") if chap_auth and self.target_name: try: chap_string = self._iscsi_authentication('IncomingUser=', *chap_auth) (out, _err) = self.scst_execute('-noprompt', '-set_tgt_attr', name, '-driver', self.target_driver, '-attributes', chap_string) LOG.debug("StdOut from scstadmin set target attribute:" " %s.", out) except putils.ProcessExecutionError: msg = _("Failed to set attribute 'Incoming user' for " "SCST target.") LOG.exception(msg) raise exception.ISCSITargetHelperCommandFailed( error_mesage=msg) if self.target_name: if self._get_group() is None: try: (out, _err) = self.scst_execute('-add_group', scst_group, '-driver', self.target_driver, '-target', name) LOG.debug("StdOut from scstadmin create group: %s", out) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to create group to SCST target " "%s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to create group to SCST target.") try: (out, _err) = self.scst_execute('-add_init', self.initiator_iqn, '-driver', self.target_driver, '-target', name, '-group', scst_group) LOG.debug("StdOut from scstadmin add initiator: %s", out) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to add initiator to group " " for SCST target %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add Initiator to group for " "SCST target.") tid = self._get_target(name) if self.target_name is None: disk_id = "disk%s" % tid else: disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) try: self.scst_execute('-open_dev', disk_id, '-handler', 'vdisk_fileio', '-attributes', 'filename=%s' % path) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to add device to handler %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add device to SCST handler.") try: if self.target_name: self.scst_execute('-add_lun', lun, '-driver', self.target_driver, '-target', name, '-device', disk_id, '-group', scst_group) else: self.scst_execute('-add_lun', lun, '-driver', self.target_driver, '-target', name, '-device', disk_id) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to add lun to SCST target " "id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e}) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add LUN to SCST Target for " "volume " + vol_name) # SCST uses /etc/scst.conf as the default configuration when it # starts try: self.scst_execute('-write_config', '/etc/scst.conf') except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to write in /etc/scst.conf.")) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to write in /etc/scst.conf.") return tid def _iscsi_location(self, ip, target, iqn, lun=None): return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, target, iqn, lun) def _get_iscsi_target(self, context, vol_id): # FIXME(jdg): Need to implement abc method pass def _get_target_chap_auth(self, context, iscsi_name): # FIXME(jdg): Need to implement abc method if self._get_target(iscsi_name) is None: return None (out, _err) = self.scst_execute('-list_tgt_attr', iscsi_name, '-driver', self.target_driver) first = "KEY" last = "Dynamic attributes" start = out.index(first) + len(first) end = out.index(last, start) out = out[start:end] out = out.split("\n")[2] if "IncomingUser" in out: out = out.split(" ") out = filter(lambda a: a != "", out) return (out[1], out[2]) else: return None def ensure_export(self, context, volume, volume_path): iscsi_target, lun = self._get_target_and_lun(context, volume) if self.target_name is None: iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) else: iscsi_name = self.target_name if self.chap_username and self.chap_password: chap_auth = (self.chap_username, self.chap_password) else: chap_auth = self._get_target_chap_auth(context, iscsi_name) self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, lun, volume_path, chap_auth) def create_export(self, context, volume, volume_path): """Creates an export for a logical volume.""" iscsi_target, lun = self._get_target_and_lun(context, volume) if self.target_name is None: iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) else: iscsi_name = self.target_name if self.chap_username and self.chap_password: chap_auth = (self.chap_username, self.chap_password) else: chap_auth = self._get_target_chap_auth(context, iscsi_name) if not chap_auth: chap_auth = (vutils.generate_username(), vutils.generate_password()) tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, lun, volume_path, chap_auth) data = {} data['location'] = self._iscsi_location( self.configuration.iscsi_ip_address, tid, iscsi_name, lun) LOG.debug('Set provider_location to: %s', data['location']) data['auth'] = self._iscsi_authentication( 'CHAP', *chap_auth) return data def remove_export(self, context, volume): try: location = volume['provider_location'].split(' ') iqn = location[1] iscsi_target = self._get_target(iqn) self.show_target(iscsi_target, iqn) except Exception: LOG.error(_LE("Skipping remove_export. No iscsi_target is" "presently exported for volume: %s"), volume['id']) return vol = self.db.volume_get(context, volume['id']) lun = "".join(vol['provider_location'].split(" ")[-1:]) self.remove_iscsi_target(iscsi_target, lun, volume['id'], volume['name']) def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) vol_uuid_file = vol_name if self.target_name is None: iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: iqn = self.target_name if self.target_name is None: try: self.scst_execute('-noprompt', '-rem_target', iqn, '-driver', 'iscsi') except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: self.scst_execute('-noprompt', '-close_dev', "disk%s" % tid, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to close disk device %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") if self._get_target(iqn): try: self.scst_execute('-noprompt', '-rem_target', iqn, '-driver', self.target_driver) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove iscsi target for " "volume id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: if not int(lun) in self._get_luns_info(): raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: scst_group = "%s%s" % (self.remove_initiator_iqn, self.target_name) self.scst_execute('-noprompt', '-rem_lun', lun, '-driver', self.target_driver, '-target', iqn, '-group', scst_group) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove LUN %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to remove LUN for SCST Target.") try: self.scst_execute('-noprompt', '-close_dev', disk_id, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to close disk device %s"), e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") self.scst_execute('-write_config', '/etc/scst.conf') def show_target(self, tid, iqn): if iqn is None: raise exception.InvalidParameterValue( err=_('valid iqn needed for show_target')) tid = self._get_target(iqn) if tid is None: raise exception.ISCSITargetHelperCommandFailed( error_message="Target not found") cinder-8.0.0/cinder/volume/targets/driver.py0000664000567000056710000000426012701406250022214 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg import six CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class Target(object): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. Base class here does nothing more than set an executor and db as well as force implementation of required methods. """ def __init__(self, *args, **kwargs): self.db = kwargs.get('db') self.configuration = kwargs.get('configuration') self._root_helper = kwargs.get('root_helper', 'sudo cinder-rootwrap %s' % CONF.rootwrap_config) @abc.abstractmethod def ensure_export(self, context, volume, volume_path): """Synchronously recreates an export for a volume.""" pass @abc.abstractmethod def create_export(self, context, volume, volume_path): """Exports a Target/Volume. Can optionally return a Dict of changes to the volume object to be persisted. """ pass @abc.abstractmethod def remove_export(self, context, volume): """Removes an export for a Target/Volume.""" pass @abc.abstractmethod def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" pass @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" pass cinder-8.0.0/cinder/volume/targets/lio.py0000664000567000056710000001745512701406250021516 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class LioAdm(iscsi.ISCSITarget): """iSCSI target administration for LIO using python-rtslib.""" def __init__(self, *args, **kwargs): super(LioAdm, self).__init__(*args, **kwargs) # FIXME(jdg): modify executor to use the cinder-rtstool self.iscsi_target_prefix =\ self.configuration.safe_get('iscsi_target_prefix') self._verify_rtstool() def _verify_rtstool(self): try: # This call doesn't need locking utils.execute('cinder-rtstool', 'verify') except (OSError, putils.ProcessExecutionError): LOG.error(_LE('cinder-rtstool is not installed correctly')) raise @staticmethod @utils.synchronized('lioadm', external=True) def _execute(*args, **kwargs): """Locked execution to prevent racing issues. Racing issues are derived from a bug in RTSLib: https://github.com/agrover/rtslib-fb/issues/36 """ return utils.execute(*args, **kwargs) def _get_target(self, iqn): (out, err) = self._execute('cinder-rtstool', 'get-targets', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: return line return None def _get_targets(self): (out, err) = self._execute('cinder-rtstool', 'get-targets', run_as_root=True) return out def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 0 # For lio, the lun starts at lun 0. iscsi_target = 0 # NOTE: Not used by lio. return iscsi_target, lun def _persist_configuration(self, vol_id): try: self._execute('cinder-rtstool', 'save', run_as_root=True) # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: LOG.warning(_LW("Failed to save iscsi LIO configuration when " "modifying volume id: %(vol_id)s."), {'vol_id': vol_id}) def _restore_configuration(self): try: self._execute('cinder-rtstool', 'restore', run_as_root=True) # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: LOG.warning(_LW("Failed to restore iscsi LIO configuration.")) def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # tid and lun are not used vol_id = name.split(':')[1] LOG.info(_LI('Creating iscsi_target for volume: %s'), vol_id) chap_auth_userid = "" chap_auth_password = "" if chap_auth is not None: (chap_auth_userid, chap_auth_password) = chap_auth optional_args = [] if 'portals_port' in kwargs: optional_args.append('-p%s' % kwargs['portals_port']) if 'portals_ips' in kwargs: optional_args.append('-a' + ','.join(kwargs['portals_ips'])) try: command_args = ['cinder-rtstool', 'create', path, name, chap_auth_userid, chap_auth_password, self.iscsi_protocol == 'iser'] + optional_args self._execute(*command_args, run_as_root=True) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to create iscsi target for volume " "id:%s."), vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error(_LE("Failed to create iscsi target for volume " "id:%s."), vol_id) raise exception.NotFound() # We make changes persistent self._persist_configuration(vol_id) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI('Removing iscsi_target: %s'), vol_id) vol_uuid_name = vol_name iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name) try: self._execute('cinder-rtstool', 'delete', iqn, run_as_root=True) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to remove iscsi target for volume " "id:%s."), vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # We make changes persistent self._persist_configuration(vol_id) def initialize_connection(self, volume, connector): volume_iqn = volume['provider_location'].split(' ')[1] (auth_method, auth_user, auth_pass) = \ volume['provider_auth'].split(' ', 3) # Add initiator iqns to target ACL try: self._execute('cinder-rtstool', 'add-initiator', volume_iqn, auth_user, auth_pass, connector['initiator'], run_as_root=True) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to add initiator iqn %s to target"), connector['initiator']) raise exception.ISCSITargetAttachFailed( volume_id=volume['id']) # We make changes persistent self._persist_configuration(volume['id']) return super(LioAdm, self).initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): volume_iqn = volume['provider_location'].split(' ')[1] # Delete initiator iqns from target ACL try: self._execute('cinder-rtstool', 'delete-initiator', volume_iqn, connector['initiator'], run_as_root=True) except putils.ProcessExecutionError: LOG.exception( _LE("Failed to delete initiator iqn %s from target."), connector['initiator']) raise exception.ISCSITargetDetachFailed(volume_id=volume['id']) # We make changes persistent self._persist_configuration(volume['id']) def ensure_export(self, context, volume, volume_path): """Recreate exports for logical volumes.""" # Restore saved configuration file if no target exists. if not self._get_targets(): LOG.info(_LI('Restoring iSCSI target from configuration file')) self._restore_configuration() return LOG.info(_LI("Skipping ensure_export. Found existing iSCSI target.")) cinder-8.0.0/cinder/volume/targets/fake.py0000664000567000056710000000221312701406250021623 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.volume.targets import iscsi class FakeTarget(iscsi.ISCSITarget): VERSION = '0.1' def __init__(self, *args, **kwargs): super(FakeTarget, self).__init__(*args, **kwargs) def _get_target_and_lun(self, context, volume): return(0, 0) def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass def _get_iscsi_target(self, context, vol_id): pass def _get_target(self, iqn): pass cinder-8.0.0/cinder/volume/utils.py0000664000567000056710000006222112701406250020411 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume-related Utilities and helpers.""" import ast import math import re import time import uuid from Crypto.Random import random import eventlet from eventlet import tpool from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units import six from six.moves import range from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LI, _LW, _LE from cinder import rpc from cinder import utils from cinder.volume import throttling CONF = cfg.CONF LOG = logging.getLogger(__name__) def null_safe_str(s): return str(s) if s else '' def _usage_from_volume(context, volume_ref, **kw): now = timeutils.utcnow() launched_at = volume_ref['launched_at'] or now created_at = volume_ref['created_at'] or now usage_info = dict( tenant_id=volume_ref['project_id'], host=volume_ref['host'], user_id=volume_ref['user_id'], availability_zone=volume_ref['availability_zone'], volume_id=volume_ref['id'], volume_type=volume_ref['volume_type_id'], display_name=volume_ref['display_name'], launched_at=launched_at.isoformat(), created_at=created_at.isoformat(), status=volume_ref['status'], snapshot_id=volume_ref['snapshot_id'], size=volume_ref['size'], replication_status=volume_ref['replication_status'], replication_extended_status=volume_ref['replication_extended_status'], replication_driver_data=volume_ref['replication_driver_data'], metadata=volume_ref.get('volume_metadata'),) usage_info.update(kw) try: attachments = db.volume_attachment_get_used_by_volume_id( context, volume_ref['id']) usage_info['volume_attachment'] = attachments glance_meta = db.volume_glance_metadata_get(context, volume_ref['id']) if glance_meta: usage_info['glance_metadata'] = glance_meta except exception.GlanceMetadataNotFound: pass except exception.VolumeNotFound: LOG.debug("Can not find volume %s at notify usage", volume_ref['id']) return usage_info def _usage_from_backup(backup_ref, **kw): num_dependent_backups = backup_ref['num_dependent_backups'] usage_info = dict(tenant_id=backup_ref['project_id'], user_id=backup_ref['user_id'], availability_zone=backup_ref['availability_zone'], backup_id=backup_ref['id'], host=backup_ref['host'], display_name=backup_ref['display_name'], created_at=str(backup_ref['created_at']), status=backup_ref['status'], volume_id=backup_ref['volume_id'], size=backup_ref['size'], service_metadata=backup_ref['service_metadata'], service=backup_ref['service'], fail_reason=backup_ref['fail_reason'], parent_id=backup_ref['parent_id'], num_dependent_backups=num_dependent_backups, snapshot_id=backup_ref['snapshot_id'], ) usage_info.update(kw) return usage_info def notify_about_volume_usage(context, volume, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_volume(context, volume, **extra_usage_info) rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix, usage_info) def notify_about_backup_usage(context, backup, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_backup(backup, **extra_usage_info) rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix, usage_info) def _usage_from_snapshot(snapshot, **extra_usage_info): usage_info = { 'tenant_id': snapshot.project_id, 'user_id': snapshot.user_id, 'availability_zone': snapshot.volume['availability_zone'], 'volume_id': snapshot.volume_id, 'volume_size': snapshot.volume_size, 'snapshot_id': snapshot.id, 'display_name': snapshot.display_name, 'created_at': str(snapshot.created_at), 'status': snapshot.status, 'deleted': null_safe_str(snapshot.deleted), 'metadata': null_safe_str(snapshot.metadata), } usage_info.update(extra_usage_info) return usage_info def notify_about_snapshot_usage(context, snapshot, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_snapshot(snapshot, **extra_usage_info) rpc.get_notifier('snapshot', host).info(context, 'snapshot.%s' % event_suffix, usage_info) def notify_about_replication_usage(context, volume, suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_volume(context, volume, **extra_usage_info) rpc.get_notifier('replication', host).info(context, 'replication.%s' % suffix, usage_info) def notify_about_replication_error(context, volume, suffix, extra_error_info=None, host=None): if not host: host = CONF.host if not extra_error_info: extra_error_info = {} usage_info = _usage_from_volume(context, volume, **extra_error_info) rpc.get_notifier('replication', host).error(context, 'replication.%s' % suffix, usage_info) def _usage_from_consistencygroup(group_ref, **kw): usage_info = dict(tenant_id=group_ref.project_id, user_id=group_ref.user_id, availability_zone=group_ref.availability_zone, consistencygroup_id=group_ref.id, name=group_ref.name, created_at=group_ref.created_at.isoformat(), status=group_ref.status) usage_info.update(kw) return usage_info def notify_about_consistencygroup_usage(context, group, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_consistencygroup(group, **extra_usage_info) rpc.get_notifier("consistencygroup", host).info( context, 'consistencygroup.%s' % event_suffix, usage_info) def _usage_from_cgsnapshot(cgsnapshot, **kw): usage_info = dict( tenant_id=cgsnapshot.project_id, user_id=cgsnapshot.user_id, cgsnapshot_id=cgsnapshot.id, name=cgsnapshot.name, consistencygroup_id=cgsnapshot.consistencygroup_id, created_at=cgsnapshot.created_at.isoformat(), status=cgsnapshot.status) usage_info.update(kw) return usage_info def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_cgsnapshot(cgsnapshot, **extra_usage_info) rpc.get_notifier("cgsnapshot", host).info( context, 'cgsnapshot.%s' % event_suffix, usage_info) def _calculate_count(size_in_m, blocksize): # Check if volume_dd_blocksize is valid try: # Rule out zero-sized/negative/float dd blocksize which # cannot be caught by strutils if blocksize.startswith(('-', '0')) or '.' in blocksize: raise ValueError bs = strutils.string_to_bytes('%sB' % blocksize) except ValueError: LOG.warning(_LW("Incorrect value error: %(blocksize)s, " "it may indicate that \'volume_dd_blocksize\' " "was configured incorrectly. Fall back to default."), {'blocksize': blocksize}) # Fall back to default blocksize CONF.clear_override('volume_dd_blocksize') blocksize = CONF.volume_dd_blocksize bs = strutils.string_to_bytes('%sB' % blocksize) count = math.ceil(size_in_m * units.Mi / bs) return blocksize, int(count) def check_for_odirect_support(src, dest, flag='oflag=direct'): # Check whether O_DIRECT is supported try: # iflag=direct and if=/dev/zero combination does not work # error: dd: failed to open '/dev/zero': Invalid argument if (src == '/dev/zero' and flag == 'iflag=direct'): return False else: utils.execute('dd', 'count=0', 'if=%s' % src, 'of=%s' % dest, flag, run_as_root=True) return True except processutils.ProcessExecutionError: return False def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize, sync=False, execute=utils.execute, ionice=None, sparse=False): # Use O_DIRECT to avoid thrashing the system buffer cache extra_flags = [] if check_for_odirect_support(srcstr, deststr, 'iflag=direct'): extra_flags.append('iflag=direct') if check_for_odirect_support(srcstr, deststr, 'oflag=direct'): extra_flags.append('oflag=direct') # If the volume is being unprovisioned then # request the data is persisted before returning, # so that it's not discarded from the cache. conv = [] if sync and not extra_flags: conv.append('fdatasync') if sparse: conv.append('sparse') if conv: conv_options = 'conv=' + ",".join(conv) extra_flags.append(conv_options) blocksize, count = _calculate_count(size_in_m, blocksize) cmd = ['dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % count, 'bs=%s' % blocksize] cmd.extend(extra_flags) if ionice is not None: cmd = ['ionice', ionice] + cmd cmd = prefix + cmd # Perform the copy start_time = timeutils.utcnow() execute(*cmd, run_as_root=True) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 mbps = (size_in_m / duration) LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, " "size %(sz).2f MB, duration %(duration).2f sec", {"src": srcstr, "dest": deststr, "sz": size_in_m, "duration": duration}) LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"), {'size_in_m': size_in_m, 'mbps': mbps}) def _open_volume_with_path(path, mode): try: with utils.temporary_chown(path): handle = open(path, mode) return handle except Exception: LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path}) def _transfer_data(src, dest, length, chunk_size): """Transfer data between files (Python IO objects).""" chunks = int(math.ceil(length / chunk_size)) remaining_length = length LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.", {'chunks': chunks, 'bytes': chunk_size}) for chunk in range(0, chunks): before = time.time() data = tpool.execute(src.read, min(chunk_size, remaining_length)) # If we have reached end of source, discard any extraneous bytes from # destination volume if trim is enabled and stop writing. if data == b'': break tpool.execute(dest.write, data) remaining_length -= len(data) delta = (time.time() - before) rate = (chunk_size / delta) / units.Ki LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).", {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate}) # yield to any other pending operations eventlet.sleep(0) tpool.execute(dest.flush) def _copy_volume_with_file(src, dest, size_in_m): src_handle = src if isinstance(src, six.string_types): src_handle = _open_volume_with_path(src, 'rb') dest_handle = dest if isinstance(dest, six.string_types): dest_handle = _open_volume_with_path(dest, 'wb') if not src_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, source device unavailable.")) if not dest_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, destination device unavailable.")) start_time = timeutils.utcnow() _transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4) duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow())) if isinstance(src, six.string_types): src_handle.close() if isinstance(dest, six.string_types): dest_handle.close() mbps = (size_in_m / duration) LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at " "%(mbps).2f MB/s)."), {'size_in_m': size_in_m, 'mbps': mbps}) def copy_volume(src, dest, size_in_m, blocksize, sync=False, execute=utils.execute, ionice=None, throttle=None, sparse=False): """Copy data from the source volume to the destination volume. The parameters 'src' and 'dest' are both typically of type str, which represents the path to each volume on the filesystem. Connectors can optionally return a volume handle of type RawIOBase for volumes that are not available on the local filesystem for open/close operations. If either 'src' or 'dest' are not of type str, then they are assumed to be of type RawIOBase or any derivative that supports file operations such as read and write. In this case, the handles are treated as file handles instead of file paths and, at present moment, throttling is unavailable. """ if (isinstance(src, six.string_types) and isinstance(dest, six.string_types)): if not throttle: throttle = throttling.Throttle.get_default() with throttle.subcommand(src, dest) as throttle_cmd: _copy_volume_with_path(throttle_cmd['prefix'], src, dest, size_in_m, blocksize, sync=sync, execute=execute, ionice=ionice, sparse=sparse) else: _copy_volume_with_file(src, dest, size_in_m) def clear_volume(volume_size, volume_path, volume_clear=None, volume_clear_size=None, volume_clear_ionice=None, throttle=None): """Unprovision old volumes to prevent data leaking between users.""" if volume_clear is None: volume_clear = CONF.volume_clear if volume_clear_size is None: volume_clear_size = CONF.volume_clear_size if volume_clear_size == 0: volume_clear_size = volume_size if volume_clear_ionice is None: volume_clear_ionice = CONF.volume_clear_ionice LOG.info(_LI("Performing secure delete on volume: %s"), volume_path) # We pass sparse=False explicitly here so that zero blocks are not # skipped in order to clear the volume. if volume_clear == 'zero': return copy_volume('/dev/zero', volume_path, volume_clear_size, CONF.volume_dd_blocksize, sync=True, execute=utils.execute, ionice=volume_clear_ionice, throttle=throttle, sparse=False) elif volume_clear == 'shred': clear_cmd = ['shred', '-n3'] if volume_clear_size: clear_cmd.append('-s%dMiB' % volume_clear_size) else: raise exception.InvalidConfigurationValue( option='volume_clear', value=volume_clear) clear_cmd.append(volume_path) start_time = timeutils.utcnow() utils.execute(*clear_cmd, run_as_root=True) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 LOG.info(_LI('Elapsed time for clear volume: %.2f sec'), duration) def supports_thin_provisioning(): return brick_lvm.LVM.supports_thin_provisioning( utils.get_root_helper()) def get_all_physical_volumes(vg_name=None): return brick_lvm.LVM.get_all_physical_volumes( utils.get_root_helper(), vg_name) def get_all_volume_groups(vg_name=None): return brick_lvm.LVM.get_all_volume_groups( utils.get_root_helper(), vg_name) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz') # Removed: l def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [random.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. random.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([random.choice(symbols) for _i in range(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group random.shuffle(password) return ''.join(password) def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): # Use the same implementation as the password generation. return generate_password(length, symbolgroups) DEFAULT_POOL_NAME = '_pool0' def extract_host(host, level='backend', default_pool_name=False): """Extract Host, Backend or Pool information from host string. :param host: String for host, which could include host@backend#pool info :param level: Indicate which level of information should be extracted from host string. Level can be 'host', 'backend' or 'pool', default value is 'backend' :param default_pool_name: this flag specify what to do if level == 'pool' and there is no 'pool' info encoded in host string. default_pool_name=True will return DEFAULT_POOL_NAME, otherwise we return None. Default value of this parameter is False. :return: expected level of information For example: host = 'HostA@BackendB#PoolC' ret = extract_host(host, 'host') # ret is 'HostA' ret = extract_host(host, 'backend') # ret is 'HostA@BackendB' ret = extract_host(host, 'pool') # ret is 'PoolC' host = 'HostX@BackendY' ret = extract_host(host, 'pool') # ret is None ret = extract_host(host, 'pool', True) # ret is '_pool0' """ if level == 'host': # make sure pool is not included hst = host.split('#')[0] return hst.split('@')[0] elif level == 'backend': return host.split('#')[0] elif level == 'pool': lst = host.split('#') if len(lst) == 2: return lst[1] elif default_pool_name is True: return DEFAULT_POOL_NAME else: return None def get_volume_rpc_host(host): if CONF.rpc_backend and CONF.rpc_backend == "zmq": # ZeroMQ RPC driver requires only the hostname. # So, return just that. return extract_host(host, 'host') return extract_host(host) def append_host(host, pool): """Encode pool into host info.""" if not host or not pool: return host new_host = "#".join([host, pool]) return new_host def matching_backend_name(src_volume_type, volume_type): if src_volume_type.get('volume_backend_name') and \ volume_type.get('volume_backend_name'): return src_volume_type.get('volume_backend_name') == \ volume_type.get('volume_backend_name') else: return False def hosts_are_equivalent(host_1, host_2): return extract_host(host_1) == extract_host(host_2) def read_proc_mounts(): """Read the /proc/mounts file. It's a dummy function but it eases the writing of unit tests as mocking __builtin__open() for a specific file only is not trivial. """ with open('/proc/mounts') as mounts: return mounts.readlines() def _extract_id(vol_name): regex = re.compile( CONF.volume_name_template.replace('%s', '(?P.+)')) match = regex.match(vol_name) return match.group('uuid') if match else None def check_already_managed_volume(db, vol_name): """Check cinder db for already managed volume. :param db: database api parameter :param vol_name: volume name parameter :returns: bool -- return True, if db entry with specified volume name exist, otherwise return False """ vol_id = _extract_id(vol_name) try: if vol_id and uuid.UUID(vol_id, version=4): db.volume_get(context.get_admin_context(), vol_id) return True except (exception.VolumeNotFound, ValueError): return False return False def convert_config_string_to_dict(config_string): """Convert config file replication string to a dict. The only supported form is as follows: "{'key-1'='val-1' 'key-2'='val-2'...}" :param config_string: Properly formatted string to convert to dict. :response: dict of string values """ resultant_dict = {} try: st = config_string.replace("=", ":") st = st.replace(" ", ", ") resultant_dict = ast.literal_eval(st) except Exception: LOG.warning(_LW("Error encountered translating config_string: " "%(config_string)s to dict"), {'config_string': config_string}) return resultant_dict def process_reserve_over_quota(context, overs, usages, quotas, size): def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': size, 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeSizeExceedsAvailableQuota( requested=size, consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) elif 'snapshots' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.SnapshotLimitExceeded(allowed=quotas[over]) cinder-8.0.0/cinder/volume/__init__.py0000664000567000056710000000211112701406250021000 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.volume import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF def API(*args, **kwargs): class_name = CONF.volume_api_class return importutils.import_object(class_name, *args, **kwargs) cinder-8.0.0/cinder/volume/drivers/0000775000567000056710000000000012701406543020357 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/san/0000775000567000056710000000000012701406543021140 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/san/__init__.py0000664000567000056710000000172512701406250023251 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.volume.san` -- Cinder San Drivers ===================================================== .. automodule:: cinder.volume.san :platform: Unix :synopsis: Module containing all the Cinder San drivers. """ # Adding imports for backwards compatibility in loading volume_driver. from cinder.volume.drivers.san.san import SanISCSIDriver # noqa cinder-8.0.0/cinder/volume/drivers/san/san.py0000664000567000056710000001535012701406250022272 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Default Driver for san-stored volumes. The unique thing about a SAN is that we don't expect that we can run the volume controller on the SAN hardware. We expect to access it over SSH or some API. """ import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE from cinder import ssh_utils from cinder import utils from cinder.volume import driver LOG = logging.getLogger(__name__) san_opts = [ cfg.BoolOpt('san_thin_provision', default=True, help='Use thin provisioning for SAN volumes?'), cfg.StrOpt('san_ip', default='', help='IP address of SAN controller'), cfg.StrOpt('san_login', default='admin', help='Username for SAN controller'), cfg.StrOpt('san_password', default='', help='Password for SAN controller', secret=True), cfg.StrOpt('san_private_key', default='', help='Filename of private key to use for SSH authentication'), cfg.StrOpt('san_clustername', default='', help='Cluster name to use for creating volumes'), cfg.PortOpt('san_ssh_port', default=22, help='SSH port to use with SAN'), cfg.BoolOpt('san_is_local', default=False, help='Execute commands locally instead of over SSH; ' 'use if the volume service is running on the SAN device'), cfg.IntOpt('ssh_conn_timeout', default=30, help="SSH connection timeout in seconds"), cfg.IntOpt('ssh_min_pool_conn', default=1, help='Minimum ssh connections in the pool'), cfg.IntOpt('ssh_max_pool_conn', default=5, help='Maximum ssh connections in the pool'), ] CONF = cfg.CONF CONF.register_opts(san_opts) class SanDriver(driver.BaseVD): """Base class for SAN-style storage volumes A SAN-style storage value is 'different' because the volume controller probably won't run on it, so we need to access is over SSH or another remote protocol. """ def __init__(self, *args, **kwargs): execute = kwargs.pop('execute', self.san_execute) super(SanDriver, self).__init__(execute=execute, *args, **kwargs) self.configuration.append_config_values(san_opts) self.run_local = self.configuration.san_is_local self.sshpool = None def san_execute(self, *cmd, **kwargs): if self.run_local: return utils.execute(*cmd, **kwargs) else: check_exit_code = kwargs.pop('check_exit_code', None) return self._run_ssh(cmd, check_exit_code) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool( self.configuration.san_ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(e) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self.run_local: if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Specify san_password or san_private_key')) # The san_ip must always be set, because we use it for the target if not self.configuration.san_ip: raise exception.InvalidInput(reason=_("san_ip must be set")) class SanISCSIDriver(SanDriver, driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(SanISCSIDriver, self).__init__(*args, **kwargs) def _build_iscsi_target_name(self, volume): return "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) cinder-8.0.0/cinder/volume/drivers/san/hp/0000775000567000056710000000000012701406543021547 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/san/hp/hpmsa_fc.py0000664000567000056710000000233512701406250023677 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_fc from cinder.volume.drivers.san.hp import hpmsa_common class HPMSAFCDriver(dothill_fc.DotHillFCDriver): """OpenStack Fibre Channel cinder drivers for HPMSA arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(HPMSAFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpmsa_common.common_opts) def _init_common(self): return hpmsa_common.HPMSACommon(self.configuration) cinder-8.0.0/cinder/volume/drivers/san/hp/__init__.py0000664000567000056710000000000012701406250023641 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/san/hp/hpmsa_common.py0000664000567000056710000000514412701406250024600 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume.drivers.dothill import dothill_common from cinder.volume.drivers.san.hp import hpmsa_client common_opts = [ cfg.StrOpt('hpmsa_backend_name', default='A', help="Pool or Vdisk name to use for volume creation."), cfg.StrOpt('hpmsa_backend_type', choices=['linear', 'virtual'], default='virtual', help="linear (for Vdisk) or virtual (for Pool)."), cfg.StrOpt('hpmsa_api_protocol', choices=['http', 'https'], default='https', help="HPMSA API interface protocol."), cfg.BoolOpt('hpmsa_verify_certificate', default=False, help="Whether to verify HPMSA array SSL certificate."), cfg.StrOpt('hpmsa_verify_certificate_path', help="HPMSA array SSL certificate path."), ] iscsi_opts = [ cfg.ListOpt('hpmsa_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts) CONF.register_opts(iscsi_opts) class HPMSACommon(dothill_common.DotHillCommon): VERSION = "1.0" def __init__(self, config): self.config = config self.vendor_name = "HPMSA" self.backend_name = self.config.hpmsa_backend_name self.backend_type = self.config.hpmsa_backend_type self.api_protocol = self.config.hpmsa_api_protocol ssl_verify = False if (self.api_protocol == 'https' and self.config.hpmsa_verify_certificate): ssl_verify = self.config.hpmsa_verify_certificate_path or True self.client = hpmsa_client.HPMSAClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) cinder-8.0.0/cinder/volume/drivers/san/hp/hpmsa_client.py0000664000567000056710000000167312701406250024571 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_client class HPMSAClient(dothill_client.DotHillClient): def __init__(self, host, login, password, protocol, ssl_verify): super(HPMSAClient, self).__init__(host, login, password, protocol, ssl_verify) cinder-8.0.0/cinder/volume/drivers/san/hp/hpmsa_iscsi.py0000664000567000056710000000255112701406250024421 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_iscsi from cinder.volume.drivers.san.hp import hpmsa_common class HPMSAISCSIDriver(dothill_iscsi.DotHillISCSIDriver): """OpenStack iSCSI cinder drivers for HPMSA arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(HPMSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpmsa_common.common_opts) self.configuration.append_config_values(hpmsa_common.iscsi_opts) self.iscsi_ips = self.configuration.hpmsa_iscsi_ips def _init_common(self): return hpmsa_common.HPMSACommon(self.configuration) cinder-8.0.0/cinder/volume/drivers/zfssa/0000775000567000056710000000000012701406543021505 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/zfssa/__init__.py0000664000567000056710000000000012701406250023577 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/zfssa/zfssaiscsi.py0000664000567000056710000014544312701406250024246 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Cinder Volume Driver """ import ast import math from oslo_config import cfg from oslo_log import log from oslo_serialization import base64 from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder import utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.zfssa import zfssarest from cinder.volume import volume_types import taskflow.engines from taskflow.patterns import linear_flow as lf from taskflow import task CONF = cfg.CONF LOG = log.getLogger(__name__) ZFSSA_OPTS = [ cfg.StrOpt('zfssa_pool', help='Storage pool name.'), cfg.StrOpt('zfssa_project', help='Project name.'), cfg.StrOpt('zfssa_lun_volblocksize', default='8k', choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k', '128k'], help='Block size.'), cfg.BoolOpt('zfssa_lun_sparse', default=False, help='Flag to enable sparse (thin-provisioned): True, False.'), cfg.StrOpt('zfssa_lun_compression', default='off', choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'], help='Data compression.'), cfg.StrOpt('zfssa_lun_logbias', default='latency', choices=['latency', 'throughput'], help='Synchronous write bias.'), cfg.StrOpt('zfssa_initiator_group', default='', help='iSCSI initiator group.'), cfg.StrOpt('zfssa_initiator', default='', help='iSCSI initiator IQNs. (comma separated)'), cfg.StrOpt('zfssa_initiator_user', default='', help='iSCSI initiator CHAP user (name).'), cfg.StrOpt('zfssa_initiator_password', default='', help='Secret of the iSCSI initiator CHAP user.', secret=True), cfg.StrOpt('zfssa_initiator_config', default='', help='iSCSI initiators configuration.'), cfg.StrOpt('zfssa_target_group', default='tgt-grp', help='iSCSI target group name.'), cfg.StrOpt('zfssa_target_user', default='', help='iSCSI target CHAP user (name).'), cfg.StrOpt('zfssa_target_password', default='', secret=True, help='Secret of the iSCSI target CHAP user.'), cfg.StrOpt('zfssa_target_portal', help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'), cfg.StrOpt('zfssa_target_interfaces', help='Network interfaces of iSCSI targets. (comma separated)'), cfg.IntOpt('zfssa_rest_timeout', help='REST connection timeout. (seconds)'), cfg.StrOpt('zfssa_replication_ip', default='', help='IP address used for replication data. (maybe the same as ' 'data ip)'), cfg.BoolOpt('zfssa_enable_local_cache', default=True, help='Flag to enable local caching: True, False.'), cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache', help='Name of ZFSSA project where cache volumes are stored.'), cfg.StrOpt('zfssa_manage_policy', default='loose', choices=['loose', 'strict'], help='Driver policy for volume manage.') ] CONF.register_opts(ZFSSA_OPTS) ZFSSA_LUN_SPECS = { 'zfssa:volblocksize', 'zfssa:sparse', 'zfssa:compression', 'zfssa:logbias', } def factory_zfssa(): return zfssarest.ZFSSAApi() class ZFSSAISCSIDriver(driver.ISCSIDriver): """ZFSSA Cinder iSCSI volume driver. Version history: 1.0.1: Backend enabled volume migration. Local cache feature. 1.0.2: Volume manage/unmanage support. """ VERSION = '1.0.2' protocol = 'iSCSI' def __init__(self, *args, **kwargs): super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) self.configuration.append_config_values(san.san_opts) self.zfssa = None self.tgt_zfssa = None self._stats = None self.tgtiqn = None def _get_target_alias(self): """return target alias.""" return self.configuration.zfssa_target_group def do_setup(self, context): """Setup - create multiple elements. Project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) self.zfssa = factory_zfssa() self.tgt_zfssa = factory_zfssa() self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) schemas = [ {'property': 'cinder_managed', 'description': 'Managed by Cinder', 'type': 'Boolean'}] if lcfg.zfssa_enable_local_cache: self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_cache_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) schemas.extend([ {'property': 'image_id', 'description': 'OpenStack image ID', 'type': 'String'}, {'property': 'updated_at', 'description': 'Most recent updated time of image', 'type': 'String'}]) self.zfssa.create_schemas(schemas) if (lcfg.zfssa_initiator_config != ''): initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) for initiator_group in initiator_config: zfssa_initiator_group = initiator_group for zfssa_initiator in initiator_config[zfssa_initiator_group]: self.zfssa.create_initiator(zfssa_initiator['iqn'], zfssa_initiator_group + '-' + zfssa_initiator['iqn'], chapuser= zfssa_initiator['user'], chapsecret= zfssa_initiator['password']) if (zfssa_initiator_group != 'default'): self.zfssa.add_to_initiatorgroup( zfssa_initiator['iqn'], zfssa_initiator_group) else: LOG.warning(_LW('zfssa_initiator_config not found. ' 'Using deprecated configuration options.')) if (not lcfg.zfssa_initiator and (not lcfg.zfssa_initiator_group and lcfg.zfssa_initiator_group != 'default')): LOG.error(_LE('zfssa_initiator cannot be empty when ' 'creating a zfssa_initiator_group.')) raise exception.InvalidConfigurationValue( value='', option='zfssa_initiator') if (lcfg.zfssa_initiator != '' and (lcfg.zfssa_initiator_group == '' or lcfg.zfssa_initiator_group == 'default')): LOG.warning(_LW('zfssa_initiator: %(ini)s' ' wont be used on ' 'zfssa_initiator_group= %(inigrp)s.'), {'ini': lcfg.zfssa_initiator, 'inigrp': lcfg.zfssa_initiator_group}) # Setup initiator and initiator group if (lcfg.zfssa_initiator != '' and lcfg.zfssa_initiator_group != '' and lcfg.zfssa_initiator_group != 'default'): for initiator in lcfg.zfssa_initiator.split(','): initiator = initiator.strip() self.zfssa.create_initiator( initiator, lcfg.zfssa_initiator_group + '-' + initiator, chapuser=lcfg.zfssa_initiator_user, chapsecret=lcfg.zfssa_initiator_password) self.zfssa.add_to_initiatorgroup( initiator, lcfg.zfssa_initiator_group) # Parse interfaces interfaces = [] for interface in lcfg.zfssa_target_interfaces.split(','): if interface == '': continue interfaces.append(interface) # Setup target and target group iqn = self.zfssa.create_target( self._get_target_alias(), interfaces, tchapuser=lcfg.zfssa_target_user, tchapsecret=lcfg.zfssa_target_password) self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group) if lcfg.zfssa_manage_policy not in ("loose", "strict"): err_msg = (_("zfssa_manage_policy property needs to be set to" " 'strict' or 'loose'. Current value is: %s.") % lcfg.zfssa_manage_policy) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) def check_for_setup_error(self): """Check that driver can login. Check also pool, project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration self.zfssa.verify_pool(lcfg.zfssa_pool) self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project) if (lcfg.zfssa_initiator_config != ''): initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) for initiator_group in initiator_config: zfssa_initiator_group = initiator_group for zfssa_initiator in initiator_config[zfssa_initiator_group]: self.zfssa.verify_initiator(zfssa_initiator['iqn']) else: if (lcfg.zfssa_initiator != '' and lcfg.zfssa_initiator_group != '' and lcfg.zfssa_initiator_group != 'default'): for initiator in lcfg.zfssa_initiator.split(','): self.zfssa.verify_initiator(initiator) self.zfssa.verify_target(self._get_target_alias()) def _get_provider_info(self, volume, lun=None): """Return provider information.""" lcfg = self.configuration project = lcfg.zfssa_project if ((lcfg.zfssa_enable_local_cache is True) and (volume['name'].startswith('os-cache-vol-'))): project = lcfg.zfssa_cache_project if lun is None: lun = self.zfssa.get_lun(lcfg.zfssa_pool, project, volume['name']) if isinstance(lun['number'], list): lun['number'] = lun['number'][0] if self.tgtiqn is None: self.tgtiqn = self.zfssa.get_target(self._get_target_alias()) loc = "%s %s %s" % (lcfg.zfssa_target_portal, self.tgtiqn, lun['number']) LOG.debug('_get_provider_info: provider_location: %s', loc) provider = {'provider_location': loc} if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '': provider['provider_auth'] = ('CHAP %s %s' % (lcfg.zfssa_target_user, lcfg.zfssa_target_password)) return provider def create_volume(self, volume): """Create a volume on ZFSSA.""" LOG.debug('zfssa.create_volume: volume=' + volume['name']) lcfg = self.configuration volsize = str(volume['size']) + 'g' specs = self._get_voltype_specs(volume) specs.update({'custom:cinder_managed': True}) self.zfssa.create_lun(lcfg.zfssa_pool, lcfg.zfssa_project, volume['name'], volsize, lcfg.zfssa_target_group, specs) def delete_volume(self, volume): """Deletes a volume with the given volume['name'].""" LOG.debug('zfssa.delete_volume: name=%s', volume['name']) lcfg = self.configuration try: lun2del = self.zfssa.get_lun(lcfg.zfssa_pool, lcfg.zfssa_project, volume['name']) except exception.VolumeBackendAPIException as ex: # NOTE(jdg): This will log an error and continue # if for some reason the volume no longer exists # on the backend if 'Error Getting Volume' in ex.message: LOG.error(_LE("Volume ID %s was not found on " "the zfssa device while attempting " "delete_volume operation."), volume['id']) return # Delete clone temp snapshot. see create_cloned_volume() if 'origin' in lun2del and 'id' in volume: if lun2del['nodestroy']: self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, volume['name'], nodestroy=False) tmpsnap = 'tmp-snapshot-%s' % volume['id'] if lun2del['origin']['snapshot'] == tmpsnap: self.zfssa.delete_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, lun2del['origin']['share'], lun2del['origin']['snapshot']) return self.zfssa.delete_lun(pool=lcfg.zfssa_pool, project=lcfg.zfssa_project, lun=volume['name']) if ('origin' in lun2del and lun2del['origin']['project'] == lcfg.zfssa_cache_project): self._check_origin(lun2del, volume['name']) def create_snapshot(self, snapshot): """Creates a snapshot of a volume. Snapshot name: snapshot['name'] Volume name: snapshot['volume_name'] """ LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name']) lcfg = self.configuration self.zfssa.create_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['volume_name'], snapshot['name']) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name']) lcfg = self.configuration numclones = self.zfssa.num_clones(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['volume_name'], snapshot['name']) if numclones > 0: LOG.error(_LE('Snapshot %s: has clones'), snapshot['name']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) self.zfssa.delete_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['volume_name'], snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot - clone a snapshot.""" LOG.debug('zfssa.create_volume_from_snapshot: volume=%s', volume['name']) LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s', snapshot['name']) if not self._verify_clone_size(snapshot, volume['size'] * units.Gi): exception_msg = (_('Error verifying clone size on ' 'Volume clone: %(clone)s ' 'Size: %(size)d on' 'Snapshot: %(snapshot)s') % {'clone': volume['name'], 'size': volume['size'], 'snapshot': snapshot['name']}) LOG.error(exception_msg) raise exception.InvalidInput(reason=exception_msg) lcfg = self.configuration self.zfssa.clone_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['volume_name'], snapshot['name'], lcfg.zfssa_project, volume['name']) def _update_volume_status(self): """Retrieve status info from volume group.""" LOG.debug("Updating volume status") self._stats = None data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'Oracle' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol lcfg = self.configuration (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) if avail is None or total is None: return host = lcfg.san_ip pool = lcfg.zfssa_pool project = lcfg.zfssa_project auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) zfssa_tgt_group = lcfg.zfssa_target_group repl_ip = lcfg.zfssa_replication_ip data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool, project, zfssa_tgt_group, repl_ip) data['total_capacity_gb'] = int(total) / units.Gi data['free_capacity_gb'] = int(avail) / units.Gi data['reserved_percentage'] = 0 data['QoS_support'] = False pool_details = self.zfssa.get_pool_details(lcfg.zfssa_pool) data['zfssa_poolprofile'] = pool_details['profile'] data['zfssa_volblocksize'] = lcfg.zfssa_lun_volblocksize data['zfssa_sparse'] = six.text_type(lcfg.zfssa_lun_sparse) data['zfssa_compression'] = lcfg.zfssa_lun_compression data['zfssa_logbias'] = lcfg.zfssa_lun_logbias self._stats = data def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_status() return self._stats def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass def ensure_export(self, context, volume): pass def extend_volume(self, volume, new_size): """Driver entry point to extent volume size.""" LOG.debug('extend_volume: volume name: %s', volume['name']) lcfg = self.configuration self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, volume['name'], volsize=new_size * units.Gi) def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" zfssa_snapshot = {'volume_name': src_vref['name'], 'name': 'tmp-snapshot-%s' % volume['id']} self.create_snapshot(zfssa_snapshot) try: self.create_volume_from_snapshot(volume, zfssa_snapshot) except exception.VolumeBackendAPIException: LOG.error(_LE('Clone Volume:' '%(volume)s failed from source volume:' '%(src_vref)s'), {'volume': volume['name'], 'src_vref': src_vref['name']}) # Cleanup snapshot self.delete_snapshot(zfssa_snapshot) @utils.synchronized('zfssaiscsi', external=True) def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Verify the image ID being used: (1) If there is no existing cache volume, create one and transfer image data to it. Take a snapshot. (2) If a cache volume already exists, verify if it is either alternated or updated. If so try to remove it, raise exception if removal fails. Create a new cache volume as in (1). Clone a volume from the cache volume and returns it to Cinder. A file lock is placed on this method to prevent: (a) a race condition when a cache volume has been verified, but then gets deleted before it is cloned. (b) failure of subsequent clone_image requests if the first request is still pending. """ LOG.debug('Cloning image %(image)s to volume %(volume)s', {'image': image_meta['id'], 'volume': volume['name']}) lcfg = self.configuration cachevol_size = 0 if not lcfg.zfssa_enable_local_cache: return None, False with image_utils.TemporaryImages.fetch(image_service, context, image_meta['id']) as tmp_image: info = image_utils.qemu_img_info(tmp_image) cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi)) if cachevol_size > volume['size']: exception_msg = (_LE('Image size %(img_size)dGB is larger ' 'than volume size %(vol_size)dGB.'), {'img_size': cachevol_size, 'vol_size': volume['size']}) LOG.error(exception_msg) return None, False specs = self._get_voltype_specs(volume) cachevol_props = {'size': cachevol_size} try: cache_vol, cache_snap = self._verify_cache_volume(context, image_meta, image_service, specs, cachevol_props) # A cache volume and a snapshot should be ready by now # Create a clone from the cache volume self.zfssa.clone_snapshot(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol, cache_snap, lcfg.zfssa_project, volume['name']) if cachevol_size < volume['size']: self.extend_volume(volume, volume['size']) except exception.VolumeBackendAPIException as exc: exception_msg = (_LE('Cannot clone image %(image)s to ' 'volume %(volume)s. Error: %(error)s.'), {'volume': volume['name'], 'image': image_meta['id'], 'error': exc.message}) LOG.error(exception_msg) return None, False return None, True def _verify_cache_volume(self, context, img_meta, img_service, specs, cachevol_props): """Verify if we have a cache volume that we want. If we don't, create one. If we do, check if it's been updated: * If so, delete it and recreate a new volume * If not, we are good. If it's out of date, delete it and create a new one. After the function returns, there should be a cache volume available, ready for cloning. """ lcfg = self.configuration cachevol_name = 'os-cache-vol-%s' % img_meta['id'] cachesnap_name = 'image-%s' % img_meta['id'] cachevol_meta = { 'cache_name': cachevol_name, 'snap_name': cachesnap_name, } cachevol_props.update(cachevol_meta) cache_vol, cache_snap = None, None updated_at = six.text_type(img_meta['updated_at'].isoformat()) LOG.debug('Verifying cache volume %s:', cachevol_name) try: cache_vol = self.zfssa.get_lun(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cachevol_name) if (not cache_vol.get('updated_at', None) or not cache_vol.get('image_id', None)): exc_msg = (_('Cache volume %s does not have required ' 'properties') % cachevol_name) LOG.error(exc_msg) raise exception.VolumeBackendAPIException(data=exc_msg) cache_snap = self.zfssa.get_lun_snapshot(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cachevol_name, cachesnap_name) except exception.VolumeNotFound: # There is no existing cache volume, create one: return self._create_cache_volume(context, img_meta, img_service, specs, cachevol_props) except exception.SnapshotNotFound: exception_msg = (_('Cache volume %(cache_vol)s' 'does not have snapshot %(cache_snap)s.'), {'cache_vol': cachevol_name, 'cache_snap': cachesnap_name}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) # A cache volume does exist, check if it's updated: if ((cache_vol['updated_at'] != updated_at) or (cache_vol['image_id'] != img_meta['id'])): # The cache volume is updated, but has clones: if cache_snap['numclones'] > 0: exception_msg = (_('Cannot delete ' 'cache volume: %(cachevol_name)s. ' 'It was updated at %(updated_at)s ' 'and currently has %(numclones)s ' 'volume instances.'), {'cachevol_name': cachevol_name, 'updated_at': updated_at, 'numclones': cache_snap['numclones']}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) # The cache volume is updated, but has no clone, so we delete it # and re-create a new one: self.zfssa.delete_lun(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cachevol_name) return self._create_cache_volume(context, img_meta, img_service, specs, cachevol_props) return cachevol_name, cachesnap_name def _create_cache_volume(self, context, img_meta, img_service, specs, cachevol_props): """Create a cache volume from an image. Returns names of the cache volume and its snapshot. """ lcfg = self.configuration cachevol_size = int(cachevol_props['size']) lunsize = "%sg" % six.text_type(cachevol_size) lun_props = { 'custom:image_id': img_meta['id'], 'custom:updated_at': ( six.text_type(img_meta['updated_at'].isoformat())), } lun_props.update(specs) cache_vol = { 'name': cachevol_props['cache_name'], 'id': img_meta['id'], 'size': cachevol_size, } LOG.debug('Creating cache volume %s.', cache_vol['name']) try: self.zfssa.create_lun(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol['name'], lunsize, lcfg.zfssa_target_group, lun_props) super(ZFSSAISCSIDriver, self).copy_image_to_volume(context, cache_vol, img_service, img_meta['id']) self.zfssa.create_snapshot(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol['name'], cachevol_props['snap_name']) except Exception as exc: exc_msg = (_('Fail to create cache volume %(volume)s. ' 'Error: %(err)s'), {'volume': cache_vol['name'], 'err': six.text_type(exc)}) LOG.error(exc_msg) self.zfssa.delete_lun(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache_vol['name']) raise exception.VolumeBackendAPIException(data=exc_msg) return cachevol_props['cache_name'], cachevol_props['snap_name'] def local_path(self, volume): """Not implemented.""" pass def backup_volume(self, context, backup, backup_service): """Not implemented.""" pass def restore_backup(self, context, backup, volume, backup_service): """Not implemented.""" pass def _verify_clone_size(self, snapshot, size): """Check whether the clone size is the same as the parent volume.""" lcfg = self.configuration lun = self.zfssa.get_lun(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['volume_name']) return lun['size'] == size def initialize_connection(self, volume, connector): lcfg = self.configuration init_groups = self.zfssa.get_initiator_initiatorgroup( connector['initiator']) if not init_groups: if lcfg.zfssa_initiator_group == 'default': init_groups.append('default') else: exception_msg = (_('Failed to find iSCSI initiator group ' 'containing %(initiator)s.') % {'initiator': connector['initiator']}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) if ((lcfg.zfssa_enable_local_cache is True) and (volume['name'].startswith('os-cache-vol-'))): project = lcfg.zfssa_cache_project else: project = lcfg.zfssa_project for initiator_group in init_groups: self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool, project, volume['name'], initiator_group) iscsi_properties = {} provider = self._get_provider_info(volume) (target_portal, iqn, lun) = provider['provider_location'].split() iscsi_properties['target_discovered'] = False iscsi_properties['target_portal'] = target_portal iscsi_properties['target_iqn'] = iqn iscsi_properties['target_lun'] = int(lun) iscsi_properties['volume_id'] = volume['id'] if 'provider_auth' in provider: (auth_method, auth_username, auth_password) = provider[ 'provider_auth'].split() iscsi_properties['auth_method'] = auth_method iscsi_properties['auth_username'] = auth_username iscsi_properties['auth_password'] = auth_password return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to terminate a connection for a volume.""" LOG.debug('terminate_connection: volume name: %s.', volume['name']) lcfg = self.configuration project = lcfg.zfssa_project if ((lcfg.zfssa_enable_local_cache is True) and (volume['name'].startswith('os-cache-vol-'))): project = lcfg.zfssa_cache_project self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool, project, volume['name'], '') def _get_voltype_specs(self, volume): """Get specs suitable for volume creation.""" vtype = volume.get('volume_type_id', None) extra_specs = None if vtype: extra_specs = volume_types.get_volume_type_extra_specs(vtype) return self._get_specs(extra_specs) def _get_specs(self, xspecs): """Return a dict with extra specs and/or config values.""" result = {} for spc in ZFSSA_LUN_SPECS: val = None prop = spc.split(':')[1] cfg = 'zfssa_lun_' + prop if xspecs: val = xspecs.pop(spc, None) if val is None: val = self.configuration.safe_get(cfg) if val is not None and val != '': result.update({prop: val}) return result def migrate_volume(self, ctxt, volume, host): LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, ' 'host: %(host)s, status=%(status)s.', {'id': volume['id'], 'host': host, 'status': volume['status']}) lcfg = self.configuration default_ret = (False, None) if volume['status'] != "available": LOG.debug('Only available volumes can be migrated using backend ' 'assisted migration. Defaulting to generic migration.') return default_ret if (host['capabilities']['vendor_name'] != 'Oracle' or host['capabilities']['storage_protocol'] != self.protocol): LOG.debug('Source and destination drivers need to be Oracle iSCSI ' 'to use backend assisted migration. Defaulting to ' 'generic migration.') return default_ret if 'location_info' not in host['capabilities']: LOG.debug('Could not find location_info in capabilities reported ' 'by the destination driver. Defaulting to generic ' 'migration.') return default_ret loc_info = host['capabilities']['location_info'] try: (tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup, tgt_repl_ip) = loc_info.split(':') except ValueError: LOG.error(_LE("Location info needed for backend enabled volume " "migration not in correct format: %s. Continuing " "with generic volume migration."), loc_info) return default_ret if tgt_repl_ip == '': msg = _LE("zfssa_replication_ip not set in cinder.conf. " "zfssa_replication_ip is needed for backend enabled " "volume migration. Continuing with generic volume " "migration.") LOG.error(msg) return default_ret src_pool = lcfg.zfssa_pool src_project = lcfg.zfssa_project try: LOG.info(_LI('Connecting to target host: %s for backend enabled ' 'migration.'), tgt_host) self.tgt_zfssa.set_host(tgt_host) self.tgt_zfssa.login(auth_str) # Verify that the replication service is online try: self.zfssa.verify_service('replication') self.tgt_zfssa.verify_service('replication') except exception.VolumeBackendAPIException: return default_ret # ensure that a target group by the same name exists on the target # system also, if not, use default migration. lun = self.zfssa.get_lun(src_pool, src_project, volume['name']) if lun['targetgroup'] != tgt_tgtgroup: return default_ret tgt_asn = self.tgt_zfssa.get_asn() src_asn = self.zfssa.get_asn() # verify on the source system that the destination has been # registered as a replication target tgts = self.zfssa.get_replication_targets() targets = [] for target in tgts['targets']: if target['asn'] == tgt_asn: targets.append(target) if targets == []: LOG.debug('Target host: %(host)s for volume migration ' 'not configured as a replication target ' 'for volume: %(vol)s.', {'host': tgt_repl_ip, 'vol': volume['name']}) return default_ret # Multiple ips from the same appliance may be configured # as different targets for target in targets: if target['address'] == tgt_repl_ip + ':216': break if target['address'] != tgt_repl_ip + ':216': LOG.debug('Target with replication ip: %s not configured on ' 'the source appliance for backend enabled volume ' 'migration. Proceeding with default migration.', tgt_repl_ip) return default_ret flow = lf.Flow('zfssa_volume_migration').add( MigrateVolumeInit(), MigrateVolumeCreateAction(provides='action_id'), MigrateVolumeSendReplUpdate(), MigrateVolumeSeverRepl(), MigrateVolumeMoveVol(), MigrateVolumeCleanUp() ) taskflow.engines.run(flow, store={'driver': self, 'tgt_zfssa': self.tgt_zfssa, 'tgt_pool': tgt_pool, 'tgt_project': tgt_project, 'volume': volume, 'tgt_asn': tgt_asn, 'src_zfssa': self.zfssa, 'src_asn': src_asn, 'src_pool': src_pool, 'src_project': src_project, 'target': target}) return(True, None) except Exception: LOG.error(_LE("Error migrating volume: %s"), volume['name']) raise def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ lcfg = self.configuration original_name = CONF.volume_name_template % volume['id'] current_name = CONF.volume_name_template % new_volume['id'] LOG.debug('Renaming migrated volume: %(cur)s to %(org)s', {'cur': current_name, 'org': original_name}) self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, current_name, name=original_name) return {'_name_id': None} @utils.synchronized('zfssaiscsi', external=True) def _check_origin(self, lun, volname): """Verify the cache volume of a bootable volume. If the cache no longer has clone, it will be deleted. There is a small lag between the time a clone is deleted and the number of clones being updated accordingly. There is also a race condition when multiple volumes (clones of a cache volume) are deleted at once, leading to the number of clones reported incorrectly. The file lock is here to avoid such issues. """ lcfg = self.configuration cache = lun['origin'] numclones = -1 if (cache['snapshot'].startswith('image-') and cache['share'].startswith('os-cache-vol')): try: numclones = self.zfssa.num_clones(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache['share'], cache['snapshot']) except Exception: LOG.debug('Cache volume is already deleted.') return LOG.debug('Checking cache volume %(name)s, numclones = %(clones)d', {'name': cache['share'], 'clones': numclones}) # Sometimes numclones still hold old values even when all clones # have been deleted. So we handle this situation separately here: if numclones == 1: try: self.zfssa.get_lun(lcfg.zfssa_pool, lcfg.zfssa_project, volname) # The volume does exist, so return return except exception.VolumeNotFound: # The volume is already deleted numclones = 0 if numclones == 0: try: self.zfssa.delete_lun(lcfg.zfssa_pool, lcfg.zfssa_cache_project, cache['share']) except exception.VolumeBackendAPIException: LOG.warning(_LW("Volume %s exists but can't be deleted"), cache['share']) def manage_existing(self, volume, existing_ref): """Manage an existing volume in the ZFSSA backend. :param volume: Reference to the new volume. :param existing_ref: Reference to the existing volume to be managed. """ lcfg = self.configuration existing_vol = self._get_existing_vol(existing_ref) self._verify_volume_to_manage(existing_vol) new_vol_name = volume['name'] try: self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, existing_vol['name'], name=new_vol_name, schema={"custom:cinder_managed": True}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to rename volume %(existing)s to " "%(new)s. Volume manage failed."), {'existing': existing_vol['name'], 'new': new_vol_name}) return None def manage_existing_get_size(self, volume, existing_ref): """Return size of the volume to be managed by manage_existing.""" existing_vol = self._get_existing_vol(existing_ref) size = existing_vol['size'] return int(math.ceil(float(size) / units.Gi)) def unmanage(self, volume): """Remove an existing volume from cinder management. :param volume: Reference to the volume to be unmanaged. """ lcfg = self.configuration new_name = 'unmanaged-' + volume['name'] try: self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, volume['name'], name=new_name, schema={"custom:cinder_managed": False}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to rename volume %(existing)s to" " %(new)s. Volume unmanage failed."), {'existing': volume['name'], 'new': new_name}) return None def _verify_volume_to_manage(self, volume): lcfg = self.configuration if lcfg.zfssa_manage_policy == 'loose': return vol_name = volume['name'] if 'cinder_managed' not in volume: err_msg = (_("Unknown if the volume: %s to be managed is " "already being managed by Cinder. Aborting manage " "volume. Please add 'cinder_managed' custom schema " "property to the volume and set its value to False." " Alternatively, set the value of cinder config " "policy 'zfssa_manage_policy' to 'loose' to " "remove this restriction.") % vol_name) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if volume['cinder_managed'] is True: msg = (_("Volume: %s is already being managed by Cinder.") % vol_name) LOG.error(msg) raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name) def _get_existing_vol(self, existing_ref): lcfg = self.configuration if 'source-name' not in existing_ref: msg = (_("Reference to volume: %s to be managed must contain " "source-name.") % existing_ref) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) try: existing_vol = self.zfssa.get_lun(lcfg.zfssa_pool, lcfg.zfssa_project, existing_ref['source-name']) except exception.VolumeNotFound: err_msg = (_("Volume %s doesn't exist on the ZFSSA " "backend.") % existing_vol['name']) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return existing_vol class MigrateVolumeInit(task.Task): def execute(self, src_zfssa, volume, src_pool, src_project): LOG.debug('Setting inherit flag on source backend to False.') src_zfssa.edit_inherit_replication_flag(src_pool, src_project, volume['name'], set=False) def revert(self, src_zfssa, volume, src_pool, src_project, **kwargs): LOG.debug('Rollback: Setting inherit flag on source appliance to ' 'True.') src_zfssa.edit_inherit_replication_flag(src_pool, src_project, volume['name'], set=True) class MigrateVolumeCreateAction(task.Task): def execute(self, src_zfssa, volume, src_pool, src_project, target, tgt_pool): LOG.debug('Creating replication action on source appliance.') action_id = src_zfssa.create_replication_action(src_pool, src_project, target['label'], tgt_pool, volume['name']) self._action_id = action_id return action_id def revert(self, src_zfssa, **kwargs): if hasattr(self, '_action_id'): LOG.debug('Rollback: deleting replication action on source ' 'appliance.') src_zfssa.delete_replication_action(self._action_id) class MigrateVolumeSendReplUpdate(task.Task): def execute(self, src_zfssa, action_id): LOG.debug('Sending replication update from source appliance.') src_zfssa.send_repl_update(action_id) LOG.debug('Deleting replication action on source appliance.') src_zfssa.delete_replication_action(action_id) self._action_deleted = True class MigrateVolumeSeverRepl(task.Task): def execute(self, tgt_zfssa, src_asn, action_id, driver): source = tgt_zfssa.get_replication_source(src_asn) if not source: err = (_('Source with host ip/name: %s not found on the ' 'target appliance for backend enabled volume ' 'migration, procedding with default migration.'), driver.configuration.san_ip) LOG.error(err) raise exception.VolumeBackendAPIException(data=err) LOG.debug('Severing replication package on destination appliance.') tgt_zfssa.sever_replication(action_id, source['name'], project=action_id) class MigrateVolumeMoveVol(task.Task): def execute(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume): LOG.debug('Moving LUN to destination project on destination ' 'appliance.') tgt_zfssa.move_volume(tgt_pool, action_id, volume['name'], tgt_project) LOG.debug('Deleting temporary project on destination appliance.') tgt_zfssa.delete_project(tgt_pool, action_id) self._project_deleted = True def revert(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume, **kwargs): if not hasattr(self, '_project_deleted'): LOG.debug('Rollback: deleting temporary project on destination ' 'appliance.') tgt_zfssa.delete_project(tgt_pool, action_id) class MigrateVolumeCleanUp(task.Task): def execute(self, driver, volume, tgt_zfssa): LOG.debug('Finally, delete source volume on source appliance.') driver.delete_volume(volume) tgt_zfssa.logout() cinder-8.0.0/cinder/volume/drivers/zfssa/zfssanfs.py0000664000567000056710000007626512701406250023727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance NFS Cinder Volume Driver """ import datetime as dt import errno import math import os from oslo_config import cfg from oslo_log import log from oslo_serialization import base64 from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder import utils from cinder.i18n import _, _LE, _LI from cinder.image import image_utils from cinder.volume.drivers import nfs from cinder.volume.drivers.san import san from cinder.volume.drivers.zfssa import zfssarest ZFSSA_OPTS = [ cfg.StrOpt('zfssa_data_ip', help='Data path IP address'), cfg.StrOpt('zfssa_https_port', default='443', help='HTTPS port number'), cfg.StrOpt('zfssa_nfs_mount_options', default='', help='Options to be passed while mounting share over nfs'), cfg.StrOpt('zfssa_nfs_pool', default='', help='Storage pool name.'), cfg.StrOpt('zfssa_nfs_project', default='NFSProject', help='Project name.'), cfg.StrOpt('zfssa_nfs_share', default='nfs_share', help='Share name.'), cfg.StrOpt('zfssa_nfs_share_compression', default='off', choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'], help='Data compression.'), cfg.StrOpt('zfssa_nfs_share_logbias', default='latency', choices=['latency', 'throughput'], help='Synchronous write bias-latency, throughput.'), cfg.IntOpt('zfssa_rest_timeout', help='REST connection timeout. (seconds)'), cfg.BoolOpt('zfssa_enable_local_cache', default=True, help='Flag to enable local caching: True, False.'), cfg.StrOpt('zfssa_cache_directory', default='os-cinder-cache', help='Name of directory inside zfssa_nfs_share where cache ' 'volumes are stored.'), cfg.StrOpt('zfssa_manage_policy', default='loose', choices=['loose', 'strict'], help='Driver policy for volume manage.') ] LOG = log.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(ZFSSA_OPTS) def factory_zfssa(): return zfssarest.ZFSSANfsApi() class ZFSSANFSDriver(nfs.NfsDriver): """ZFSSA Cinder NFS volume driver. Version history: 1.0.1: Backend enabled volume migration. Local cache feature. 1.0.2: Volume manage/unmanage support. """ VERSION = '1.0.2' volume_backend_name = 'ZFSSA_NFS' protocol = driver_prefix = driver_volume_type = 'nfs' def __init__(self, *args, **kwargs): super(ZFSSANFSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) self.configuration.append_config_values(san.san_opts) self.zfssa = None self._stats = None def do_setup(self, context): if not self.configuration.max_over_subscription_ratio > 0: msg = _("Config 'max_over_subscription_ratio' invalid. Must be > " "0: %s") % self.configuration.max_over_subscription_ratio LOG.error(msg) raise exception.NfsException(msg) package = 'mount.nfs' try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.NfsException(msg) else: raise lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) host = lcfg.san_ip user = lcfg.san_login password = lcfg.san_password https_port = lcfg.zfssa_https_port credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip'] for cred in credentials: if not getattr(lcfg, cred, None): exception_msg = _('%s not set in cinder.conf') % cred LOG.error(exception_msg) raise exception.CinderException(exception_msg) self.zfssa = factory_zfssa() self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout) auth_str = base64.encode_as_text('%s:%s' % (user, password)) self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, compression=lcfg.zfssa_nfs_share_compression, logbias=lcfg.zfssa_nfs_share_logbias) share_args = { 'sharedav': 'rw', 'sharenfs': 'rw', 'root_permissions': '777', 'compression': lcfg.zfssa_nfs_share_compression, 'logbias': lcfg.zfssa_nfs_share_logbias } self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, share_args) share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) mountpoint = share_details['mountpoint'] self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \ '/shares' + mountpoint LOG.debug('NFS mount path: %s', self.mount_path) LOG.debug('WebDAV path to the share: %s', https_path) self.shares = {} mnt_opts = self.configuration.zfssa_nfs_mount_options self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None # Initialize the WebDAV client self.zfssa.set_webdav(https_path, auth_str) # Edit http service so that WebDAV requests are always authenticated args = {'https_port': https_port, 'require_login': True} self.zfssa.modify_service('http', args) self.zfssa.enable_service('http') if lcfg.zfssa_enable_local_cache: LOG.debug('Creating local cache directory %s.', lcfg.zfssa_cache_directory) self.zfssa.create_directory(lcfg.zfssa_cache_directory) def _ensure_shares_mounted(self): try: self._ensure_share_mounted(self.mount_path) except Exception as exc: LOG.error(_LE('Exception during mounting %s.'), exc) self._mounted_shares = [self.mount_path] LOG.debug('Available shares %s', self._mounted_shares) def check_for_setup_error(self): """Check that driver can login. Check also for properly configured pool, project and share Check that the http and nfs services are enabled """ lcfg = self.configuration self.zfssa.verify_pool(lcfg.zfssa_nfs_pool) self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project) self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) self.zfssa.verify_service('http') self.zfssa.verify_service('nfs') def create_volume(self, volume): super(ZFSSANFSDriver, self).create_volume(volume) self.zfssa.set_file_props(volume['name'], {'cinder_managed': 'True'}) def create_snapshot(self, snapshot): """Creates a snapshot of a volume.""" LOG.info(_LI('Creating snapshot: %s'), snapshot['name']) lcfg = self.configuration snap_name = self._create_snapshot_name() self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, snap_name) src_file = snap_name + '/' + snapshot['volume_name'] try: self.zfssa.create_snapshot_of_volume_file(src_file=src_file, dst_file= snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.debug('Error thrown during snapshot: %s creation', snapshot['name']) finally: self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, snap_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.info(_LI('Deleting snapshot: %s'), snapshot['name']) self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot, method='COPY'): LOG.info(_LI('Creatng volume from snapshot. volume: %s'), volume['name']) LOG.info(_LI('Source Snapshot: %s'), snapshot['name']) self._ensure_shares_mounted() self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'], dst_file=volume['name'], method=method) volume['provider_location'] = self.mount_path if volume['size'] != snapshot['volume_size']: try: self.extend_volume(volume, volume['size']) except Exception: vol_path = self.local_path(volume) with excutils.save_and_reraise_exception(): LOG.error(_LE('Error in extending volume size: Volume: ' '%(volume)s Vol_Size: %(vol_size)d with ' 'Snapshot: %(snapshot)s Snap_Size: ' '%(snap_size)d'), {'volume': volume['name'], 'vol_size': volume['size'], 'snapshot': snapshot['name'], 'snap_size': snapshot['volume_size']}) self._execute('rm', '-f', vol_path, run_as_root=True) volume_origin = {'origin': snapshot['volume_name']} self.zfssa.set_file_props(volume['name'], volume_origin) return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Creates a snapshot and then clones the snapshot into a volume.""" LOG.info(_LI('new cloned volume: %s'), volume['name']) LOG.info(_LI('source volume for cloning: %s'), src_vref['name']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'volume_size': src_vref['size'], 'name': self._create_snapshot_name()} self.create_snapshot(snapshot) return self.create_volume_from_snapshot(volume, snapshot, method='MOVE') def delete_volume(self, volume): LOG.debug('Deleting volume %s.', volume['name']) lcfg = self.configuration try: vol_props = self.zfssa.get_volume(volume['name']) except exception.VolumeNotFound: return super(ZFSSANFSDriver, self).delete_volume(volume) if vol_props['origin'].startswith(lcfg.zfssa_cache_directory): LOG.info(_LI('Checking origin %(origin)s of volume %(volume)s.'), {'origin': vol_props['origin'], 'volume': volume['name']}) self._check_origin(vol_props['origin']) @utils.synchronized('zfssanfs', external=True) def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Verify the image ID being used: (1) If there is no existing cache volume, create one and transfer image data to it. Take a snapshot. (2) If a cache volume already exists, verify if it is either alternated or updated. If so try to remove it, raise exception if removal fails. Create a new cache volume as in (1). Clone a volume from the cache volume and returns it to Cinder. A file lock is placed on this method to prevent: (a) a race condition when a cache volume has been verified, but then gets deleted before it is cloned. (b) failure of subsequent clone_image requests if the first request is still pending. """ LOG.debug('Cloning image %(image)s to volume %(volume)s', {'image': image_meta['id'], 'volume': volume['name']}) lcfg = self.configuration cachevol_size = 0 if not lcfg.zfssa_enable_local_cache: return None, False with image_utils.TemporaryImages.fetch( image_service, context, image_meta['id']) as tmp_image: info = image_utils.qemu_img_info(tmp_image) cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi)) if cachevol_size > volume['size']: exception_msg = (_LE('Image size %(img_size)dGB is larger ' 'than volume size %(vol_size)dGB.'), {'img_size': cachevol_size, 'vol_size': volume['size']}) LOG.error(exception_msg) return None, False cache_dir = '%s/' % lcfg.zfssa_cache_directory updated_at = six.text_type(image_meta['updated_at'].isoformat()) cachevol_props = { 'name': '%sos-cache-vol-%s' % (cache_dir, image_meta['id']), 'size': cachevol_size, 'updated_at': updated_at, 'image_id': image_meta['id'], } try: cachevol_name = self._verify_cache_volume(context, image_meta, image_service, cachevol_props) # A cache volume should be ready by now # Create a clone from the cache volume cache_vol = { 'name': cachevol_name, 'size': cachevol_size, 'id': image_meta['id'], } clone_vol = self.create_cloned_volume(volume, cache_vol) self._update_origin(volume['name'], cachevol_name) except exception.VolumeBackendAPIException as exc: exception_msg = (_LE('Cannot clone image %(image)s to ' 'volume %(volume)s. Error: %(error)s.'), {'volume': volume['name'], 'image': image_meta['id'], 'error': exc.message}) LOG.error(exception_msg) return None, False return clone_vol, True def _verify_cache_volume(self, context, img_meta, img_service, cachevol_props): """Verify if we have a cache volume that we want. If we don't, create one. If we do, check if it's been updated: * If so, delete it and recreate a new volume * If not, we are good. If it's out of date, delete it and create a new one. After the function returns, there should be a cache volume available, ready for cloning. """ cachevol_name = cachevol_props['name'] cache_vol = None LOG.debug('Verifying cache volume %s:', cachevol_name) try: cache_vol = self.zfssa.get_volume(cachevol_name) except exception.VolumeNotFound: # There is no existing cache volume, create one: LOG.debug('Cache volume not found. Creating one...') return self._create_cache_volume(context, img_meta, img_service, cachevol_props) # A cache volume does exist, check if it's updated: if ((cache_vol['updated_at'] != cachevol_props['updated_at']) or (cache_vol['image_id'] != cachevol_props['image_id'])): if cache_vol['numclones'] > 0: # The cache volume is updated, but has clones exception_msg = (_('Cannot delete ' 'cache volume: %(cachevol_name)s. ' 'It was updated at %(updated_at)s ' 'and currently has %(numclones)d ' 'volume instances.'), {'cachevol_name': cachevol_name, 'updated_at': cachevol_props['updated_at'], 'numclones': cache_vol['numclones']}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) # The cache volume is updated, but has no clone, so we delete it # and re-create a new one: cache_vol = { 'provider_location': self.mount_path, 'name': cachevol_name, } self.delete_volume(cache_vol) return self._create_cache_volume(context, img_meta, img_service, cachevol_props) return cachevol_name def _create_cache_volume(self, context, img_meta, img_service, cachevol_props): """Create a cache volume from an image. Returns name of the cache volume. """ cache_vol = { 'provider_location': self.mount_path, 'size': cachevol_props['size'], 'name': cachevol_props['name'], } LOG.debug('Creating cache volume %s', cache_vol['name']) try: super(ZFSSANFSDriver, self).create_volume(cache_vol) LOG.debug('Copying image data:') super(ZFSSANFSDriver, self).copy_image_to_volume(context, cache_vol, img_service, img_meta['id']) except Exception as exc: exc_msg = (_('Fail to create cache volume %(volume)s. ' 'Error: %(err)s'), {'volume': cache_vol['name'], 'err': six.text_type(exc)}) LOG.error(exc_msg) self.zfssa.delete_file(cache_vol['name']) raise exception.VolumeBackendAPIException(data=exc_msg) cachevol_meta = { 'updated_at': cachevol_props['updated_at'], 'image_id': cachevol_props['image_id'], } cachevol_meta.update({'numclones': '0'}) self.zfssa.set_file_props(cache_vol['name'], cachevol_meta) return cache_vol['name'] def _create_snapshot_name(self): """Creates a snapshot name from the date and time.""" return ('cinder-zfssa-nfs-snapshot-%s' % dt.datetime.utcnow().isoformat()) def _get_share_capacity_info(self): """Get available and used capacity info for the NFS share.""" lcfg = self.configuration share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) free = share_details['space_available'] used = share_details['space_total'] return free, used @utils.synchronized('zfssanfs', external=True) def _check_origin(self, origin): """Verify the cache volume of a bootable volume. If the cache no longer has clone, it will be deleted. """ try: cachevol_props = self.zfssa.get_volume(origin) except exception.VolumeNotFound: LOG.debug('Origin %s does not exist', origin) return numclones = cachevol_props['numclones'] LOG.debug('Number of clones: %d', numclones) if numclones <= 1: # This cache vol does not have any other clone self.zfssa.delete_file(origin) else: cachevol_props = {'numclones': six.text_type(numclones - 1)} self.zfssa.set_file_props(origin, cachevol_props) def _update_origin(self, vol_name, cachevol_name): """Update WebDAV property of a volume. WebDAV properties are used to keep track of: (1) The number of clones of a cache volume. (2) The cache volume name (origin) of a bootable volume. To avoid race conditions when multiple volumes are created and needed to be updated, a file lock is used to ensure that the properties are updated properly. """ volume_origin = {'origin': cachevol_name} self.zfssa.set_file_props(vol_name, volume_origin) cache_props = self.zfssa.get_volume(cachevol_name) cache_props.update({'numclones': six.text_type(cache_props['numclones'] + 1)}) self.zfssa.set_file_props(cachevol_name, cache_props) def _update_volume_stats(self): """Get volume stats from zfssa""" self._ensure_shares_mounted() data = {} lcfg = self.configuration backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['vendor_name'] = 'Oracle' data['driver_version'] = self.VERSION data['storage_protocol'] = self.protocol asn = self.zfssa.get_asn() data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share) free, used = self._get_share_capacity_info() capacity = float(free) + float(used) ratio_used = used / capacity data['QoS_support'] = False data['reserved_percentage'] = 0 used_percentage_limit = 100 - self.configuration.reserved_percentage used_ratio_limit = used_percentage_limit / 100.0 if (ratio_used > used_ratio_limit or ratio_used >= self.configuration.max_over_subscription_ratio): data['reserved_percentage'] = 100 data['total_capacity_gb'] = float(capacity) / units.Gi data['free_capacity_gb'] = float(free) / units.Gi share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool) data['zfssa_compression'] = share_details['compression'] data['zfssa_encryption'] = share_details['encryption'] data['zfssa_logbias'] = share_details['logbias'] data['zfssa_poolprofile'] = pool_details['profile'] data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes) self._stats = data def migrate_volume(self, ctxt, volume, host): LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, ' 'host: %(host)s, status=%(status)s', {'id': volume['id'], 'host': host, 'status': volume['status']}) lcfg = self.configuration default_ret = (False, None) if volume['status'] != "available": LOG.debug('Only available volumes can be migrated using backend ' 'assisted migration. Defaulting to generic migration.') return default_ret if (host['capabilities']['vendor_name'] != 'Oracle' or host['capabilities']['storage_protocol'] != self.protocol): LOG.debug('Source and destination drivers need to be Oracle iSCSI ' 'to use backend assisted migration. Defaulting to ' 'generic migration.') return default_ret if 'location_info' not in host['capabilities']: LOG.debug('Could not find location_info in capabilities reported ' 'by the destination driver. Defaulting to generic ' 'migration.') return default_ret loc_info = host['capabilities']['location_info'] try: (tgt_asn, tgt_share) = loc_info.split(':') except ValueError: LOG.error(_LE("Location info needed for backend enabled volume " "migration not in correct format: %s. Continuing " "with generic volume migration."), loc_info) return default_ret src_asn = self.zfssa.get_asn() if tgt_asn == src_asn and lcfg.zfssa_nfs_share == tgt_share: LOG.info(_LI('Source and destination ZFSSA shares are the same. ' 'Do nothing. volume: %s'), volume['name']) return (True, None) return (False, None) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ original_name = CONF.volume_name_template % volume['id'] current_name = CONF.volume_name_template % new_volume['id'] LOG.debug('Renaming migrated volume: %(cur)s to %(org)s.', {'cur': current_name, 'org': original_name}) self.zfssa.create_volume_from_snapshot_file(src_file=current_name, dst_file=original_name, method='MOVE') provider_location = new_volume['provider_location'] return {'_name_id': None, 'provider_location': provider_location} def manage_existing(self, volume, existing_ref): """Manage an existing volume in the ZFSSA backend. :param volume: Reference to the new volume. :param existing_ref: Reference to the existing volume to be managed. """ existing_vol_name = self._get_existing_vol_name(existing_ref) try: vol_props = self.zfssa.get_volume(existing_vol_name) except exception.VolumeNotFound: err_msg = (_("Volume %s doesn't exist on the ZFSSA backend.") % existing_vol_name) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) self._verify_volume_to_manage(existing_vol_name, vol_props) try: self.zfssa.rename_volume(existing_vol_name, volume['name']) except Exception: LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. " "Volume manage failed."), {'existing': existing_vol_name, 'new': volume['name']}) raise try: self.zfssa.set_file_props(volume['name'], {'cinder_managed': 'True'}) except Exception: self.zfssa.rename_volume(volume['name'], existing_vol_name) LOG.error(_LE("Failed to set properties for volume %(existing)s. " "Volume manage failed."), {'existing': volume['name']}) raise return {'provider_location': self.mount_path} def manage_existing_get_size(self, volume, existing_ref): """Return size of the volume to be managed by manage_existing.""" existing_vol_name = self._get_existing_vol_name(existing_ref) # The ZFSSA NFS driver only has one mounted share. local_share_mount = self._get_mount_point_for_share( self._mounted_shares[0]) local_vol_path = os.path.join(local_share_mount, existing_vol_name) try: if os.path.isfile(local_vol_path): size = int(math.ceil(float( utils.get_file_size(local_vol_path)) / units.Gi)) except (OSError, ValueError): err_msg = (_("Failed to get size of existing volume: %(vol). " "Volume Manage failed."), {'vol': existing_vol_name}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.debug("Size volume: %(vol)s to be migrated is: %(size)s.", {'vol': existing_vol_name, 'size': size}) return size def _verify_volume_to_manage(self, name, vol_props): lcfg = self.configuration if lcfg.zfssa_manage_policy != 'strict': return if vol_props['cinder_managed'] == "": err_msg = (_("Unknown if the volume: %s to be managed is " "already being managed by Cinder. Aborting manage " "volume. Please add 'cinder_managed' custom schema " "property to the volume and set its value to False. " "Alternatively, Set the value of cinder config " "policy 'zfssa_manage_policy' to 'loose' to " "remove this restriction.") % name) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if vol_props['cinder_managed'] == 'True': msg = (_("Volume: %s is already being managed by Cinder.") % name) LOG.error(msg) raise exception.ManageExistingAlreadyManaged(volume_ref=name) def unmanage(self, volume): """Remove an existing volume from cinder management. :param volume: Reference to the volume to be unmanaged. """ new_name = 'unmanaged-' + volume['name'] try: self.zfssa.rename_volume(volume['name'], new_name) except Exception: LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. " "Volume unmanage failed."), {'existing': volume['name'], 'new': new_name}) raise try: self.zfssa.set_file_props(new_name, {'cinder_managed': 'False'}) except Exception: self.zfssa.rename_volume(new_name, volume['name']) LOG.error(_LE("Failed to set properties for volume %(existing)s. " "Volume unmanage failed."), {'existing': volume['name']}) raise def _get_existing_vol_name(self, existing_ref): if 'source-name' not in existing_ref: msg = _("Reference to volume to be managed must contain " "source-name.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return existing_ref['source-name'] cinder-8.0.0/cinder/volume/drivers/zfssa/restclient.py0000664000567000056710000003030512701406250024227 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance REST API Client Programmatic Interface """ import json import ssl import time from oslo_log import log import six from six.moves import http_client from six.moves import urllib from cinder.i18n import _LE, _LI LOG = log.getLogger(__name__) class Status(object): """Result HTTP Status""" def __init__(self): pass #: Request return OK OK = http_client.OK #: New resource created successfully CREATED = http_client.CREATED #: Command accepted ACCEPTED = http_client.ACCEPTED #: Command returned OK but no data will be returned NO_CONTENT = http_client.NO_CONTENT #: Bad Request BAD_REQUEST = http_client.BAD_REQUEST #: User is not authorized UNAUTHORIZED = http_client.UNAUTHORIZED #: The request is not allowed FORBIDDEN = http_client.FORBIDDEN #: The requested resource was not found NOT_FOUND = http_client.NOT_FOUND #: The request is not allowed NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED #: Request timed out TIMEOUT = http_client.REQUEST_TIMEOUT #: Invalid request CONFLICT = http_client.CONFLICT #: Service Unavailable BUSY = http_client.SERVICE_UNAVAILABLE class RestResult(object): """Result from a REST API operation""" def __init__(self, response=None, err=None): """Initialize a RestResult containing the results from a REST call. :param response: HTTP response """ self.response = response self.error = err self.data = "" self.status = 0 if self.response: self.status = self.response.getcode() result = self.response.read() while result: self.data += result result = self.response.read() if self.error: self.status = self.error.code self.data = http_client.responses[self.status] LOG.debug('Response code: %s', self.status) LOG.debug('Response data: %s', self.data) def get_header(self, name): """Get an HTTP header with the given name from the results :param name: HTTP header name :return: The header value or None if no value is found """ if self.response is None: return None info = self.response.info() return info.getheader(name) class RestClientError(Exception): """Exception for ZFS REST API client errors""" def __init__(self, status, name="ERR_INTERNAL", message=None): """Create a REST Response exception :param status: HTTP response status :param name: The name of the REST API error type :param message: Descriptive error message returned from REST call """ super(RestClientError, self).__init__(message) self.code = status self.name = name self.msg = message if status in http_client.responses: self.msg = http_client.responses[status] def __str__(self): return "%d %s %s" % (self.code, self.name, self.msg) class RestClientURL(object): """ZFSSA urllib client""" def __init__(self, url, **kwargs): """Initialize a REST client. :param url: The ZFSSA REST API URL :key session: HTTP Cookie value of x-auth-session obtained from a normal BUI login. :key timeout: Time in seconds to wait for command to complete. (Default is 60 seconds) """ self.url = url self.local = kwargs.get("local", False) self.base_path = kwargs.get("base_path", "/api") self.timeout = kwargs.get("timeout", 60) self.headers = None if kwargs.get('session'): self.headers['x-auth-session'] = kwargs.get('session') self.headers = {"content-type": "application/json"} self.do_logout = False self.auth_str = None def _path(self, path, base_path=None): """build rest url path""" if path.startswith("http://") or path.startswith("https://"): return path if base_path is None: base_path = self.base_path if not path.startswith(base_path) and not ( self.local and ("/api" + path).startswith(base_path)): path = "%s%s" % (base_path, path) if self.local and path.startswith("/api"): path = path[4:] return self.url + path def _authorize(self): """Performs authorization setting x-auth-session""" self.headers['authorization'] = 'Basic %s' % self.auth_str if 'x-auth-session' in self.headers: del self.headers['x-auth-session'] try: result = self.post("/access/v1") del self.headers['authorization'] if result.status == http_client.CREATED: self.headers['x-auth-session'] = \ result.get_header('x-auth-session') self.do_logout = True LOG.info(_LI('ZFSSA version: %s'), result.get_header('x-zfssa-version')) elif result.status == http_client.NOT_FOUND: raise RestClientError(result.status, name="ERR_RESTError", message="REST Not Available: \ Please Upgrade") except RestClientError: del self.headers['authorization'] raise def login(self, auth_str): """Login to an appliance using a user name and password. Start a session like what is done logging into the BUI. This is not a requirement to run REST commands, since the protocol is stateless. What is does is set up a cookie session so that some server side caching can be done. If login is used remember to call logout when finished. :param auth_str: Authorization string (base64) """ self.auth_str = auth_str self._authorize() def logout(self): """Logout of an appliance""" result = None try: result = self.delete("/access/v1", base_path="/api") except RestClientError: pass self.headers.clear() self.do_logout = False return result def islogin(self): """return if client is login""" return self.do_logout @staticmethod def mkpath(*args, **kwargs): """Make a path?query string for making a REST request :cmd_params args: The path part :cmd_params kwargs: The query part """ buf = six.StringIO() query = "?" for arg in args: buf.write("/") buf.write(arg) for k in kwargs: buf.write(query) if query == "?": query = "&" buf.write(k) buf.write("=") buf.write(kwargs[k]) return buf.getvalue() def request(self, path, request, body=None, **kwargs): """Make an HTTP request and return the results :param path: Path used with the initialized URL to make a request :param request: HTTP request type (GET, POST, PUT, DELETE) :param body: HTTP body of request :key accept: Set HTTP 'Accept' header with this value :key base_path: Override the base_path for this request :key content: Set HTTP 'Content-Type' header with this value """ out_hdrs = dict.copy(self.headers) if kwargs.get("accept"): out_hdrs['accept'] = kwargs.get("accept") if body: if isinstance(body, dict): body = str(json.dumps(body)) if body and len(body): out_hdrs['content-length'] = len(body) zfssaurl = self._path(path, kwargs.get("base_path")) req = urllib.request.Request(zfssaurl, body, out_hdrs) req.get_method = lambda: request maxreqretries = kwargs.get("maxreqretries", 10) retry = 0 response = None LOG.debug('Request: %(request)s %(url)s', {'request': request, 'url': zfssaurl}) LOG.debug('Out headers: %s', out_hdrs) if body and body != '': LOG.debug('Body: %s', body) context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() else: context = None while retry < maxreqretries: try: if context: response = urllib.request.urlopen(req, timeout=self.timeout, context=context) else: response = urllib.request.urlopen(req, timeout=self.timeout) except urllib.error.HTTPError as err: if err.code == http_client.NOT_FOUND: LOG.debug('REST Not Found: %s', err.code) else: LOG.error(_LE('REST Not Available: %s'), err.code) if err.code == http_client.SERVICE_UNAVAILABLE and \ retry < maxreqretries: retry += 1 time.sleep(1) LOG.error(_LE('Server Busy retry request: %s'), retry) continue if (err.code == http_client.UNAUTHORIZED or err.code == http_client.INTERNAL_SERVER_ERROR) and \ '/access/v1' not in zfssaurl: try: LOG.error(_LE('Authorizing request: %(zfssaurl)s ' 'retry: %(retry)d .'), {'zfssaurl': zfssaurl, 'retry': retry}) self._authorize() req.add_header('x-auth-session', self.headers['x-auth-session']) except RestClientError: pass retry += 1 time.sleep(1) continue return RestResult(err=err) except urllib.error.URLError as err: LOG.error(_LE('URLError: %s'), err.reason) raise RestClientError(-1, name="ERR_URLError", message=err.reason) break if (response and (response.getcode() == http_client.SERVICE_UNAVAILABLE and retry >= maxreqretries)): raise RestClientError(response.getcode(), name="ERR_HTTPError", message="REST Not Available: Disabled") return RestResult(response=response) def get(self, path, **kwargs): """Make an HTTP GET request :param path: Path to resource. """ return self.request(path, "GET", **kwargs) def post(self, path, body="", **kwargs): """Make an HTTP POST request :param path: Path to resource. :param body: Post data content """ return self.request(path, "POST", body, **kwargs) def put(self, path, body="", **kwargs): """Make an HTTP PUT request :param path: Path to resource. :param body: Put data content """ return self.request(path, "PUT", body, **kwargs) def delete(self, path, **kwargs): """Make an HTTP DELETE request :param path: Path to resource that will be deleted. """ return self.request(path, "DELETE", **kwargs) def head(self, path, **kwargs): """Make an HTTP HEAD request :param path: Path to resource. """ return self.request(path, "HEAD", **kwargs) cinder-8.0.0/cinder/volume/drivers/zfssa/webdavclient.py0000664000567000056710000001334012701406250024522 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance WebDAV Client """ import time from oslo_log import log from six.moves import http_client from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE LOG = log.getLogger(__name__) bad_gateway_err = _('Check the state of the http service. Also ensure that ' 'the https port number is the same as the one specified ' 'in cinder.conf.') WebDAVHTTPErrors = { http_client.UNAUTHORIZED: _('User not authorized to perform WebDAV ' 'operations.'), http_client.BAD_GATEWAY: bad_gateway_err, http_client.FORBIDDEN: _('Check access permissions for the ZFS share ' 'assigned to this driver.'), http_client.NOT_FOUND: _('The source volume for this WebDAV operation not ' 'found.'), http_client.INSUFFICIENT_STORAGE: _('Not enough storage space in the ZFS ' 'share to perform this operation.') } WebDAVErrors = { 'BadStatusLine': _('http service may have been abruptly disabled or put ' 'to maintenance state in the middle of this ' 'operation.'), 'Bad_Gateway': bad_gateway_err } propertyupdate_data = """ prop_val """ class ZFSSAWebDAVClient(object): def __init__(self, url, auth_str, **kwargs): """Initialize WebDAV Client""" self.https_path = url self.auth_str = auth_str def _lookup_error(self, error): msg = '' if error in http_client.responses: msg = http_client.responses[error] if error in WebDAVHTTPErrors: msg = WebDAVHTTPErrors[error] elif error in WebDAVErrors: msg = WebDAVErrors[error] return msg def build_data(self, data, propname, value): res = data.replace('prop_name', propname) res = res.replace('prop_val', value) return res def set_file_prop(self, filename, propname, propval): data = self.build_data(propertyupdate_data, propname, propval) return self.request(src_file=filename, data=data, method='PROPPATCH') def request(self, src_file="", dst_file="", method="", maxretries=10, data=""): retry = 0 src_url = self.https_path + "/" + src_file dst_url = self.https_path + "/" + dst_file request = urllib.request.Request(url=src_url, data=data) if dst_file != "": request.add_header('Destination', dst_url) if method == "PROPPATCH": request.add_header('Translate', 'F') request.add_header("Authorization", "Basic %s" % self.auth_str) request.get_method = lambda: method LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s', {'method': method, 'src': src_url, 'des': dst_url}) while retry < maxretries: try: response = urllib.request.urlopen(request, timeout=None) except urllib.error.HTTPError as err: LOG.error(_LE('WebDAV returned with %(code)s error during ' '%(method)s call.'), {'code': err.code, 'method': method}) if err.code == http_client.INTERNAL_SERVER_ERROR: LOG.error(_LE('WebDAV operation failed with error code: ' '%(code)s reason: %(reason)s Retry attempt ' '%(retry)s in progress.'), {'code': err.code, 'reason': err.reason, 'retry': retry}) if retry < maxretries: retry += 1 time.sleep(1) continue msg = self._lookup_error(err.code) raise exception.WebDAVClientError(msg=msg, code=err.code, src=src_file, dst=dst_file, method=method) except http_client.BadStatusLine as err: msg = self._lookup_error('BadStatusLine') code = 'http_client.BadStatusLine' raise exception.WebDAVClientError(msg=msg, code=code, src=src_file, dst=dst_file, method=method) except urllib.error.URLError as err: reason = '' if getattr(err, 'reason'): reason = err.reason msg = self._lookup_error('Bad_Gateway') raise exception.WebDAVClientError(msg=msg, code=reason, src=src_file, dst=dst_file, method=method) break return response cinder-8.0.0/cinder/volume/drivers/zfssa/zfssarest.py0000664000567000056710000016054012701406250024104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Proxy """ import json from oslo_log import log from oslo_service import loopingcall from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.volume.drivers.zfssa import restclient from cinder.volume.drivers.zfssa import webdavclient LOG = log.getLogger(__name__) def factory_restclient(url, **kwargs): return restclient.RestClientURL(url, **kwargs) class ZFSSAApi(object): """ZFSSA API proxy class""" def __init__(self): self.host = None self.url = None self.rclient = None def __del__(self): if self.rclient and self.rclient.islogin(): self.rclient.logout() def _is_pool_owned(self, pdata): """Returns True if the pool's owner is the same as the host.""" svc = '/api/system/v1/version' ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting version: ' 'svc: %(svc)s.' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'svc': svc, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) vdata = json.loads(ret.data) return vdata['version']['asn'] == pdata['pool']['asn'] and \ vdata['version']['nodename'] == pdata['pool']['owner'] def get_pool_details(self, pool): """Get properties of a pool.""" svc = '/api/storage/v1/pools/%s' % pool ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting Pool Stats: ' 'Pool: %(pool)s ' 'Return code: %(status)d ' 'Message: %(data)s.') % {'pool': pool, 'status': ret.status, 'data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) if not self._is_pool_owned(val): exception_msg = (_('Error Pool ownership: ' 'Pool %(pool)s is not owned ' 'by %(host)s.') % {'pool': pool, 'host': self.host}) LOG.error(exception_msg) raise exception.InvalidInput(reason=exception_msg) return val['pool'] def set_host(self, host, timeout=None): self.host = host self.url = "https://" + self.host + ":215" self.rclient = factory_restclient(self.url, timeout=timeout) def login(self, auth_str): """Login to the appliance""" if self.rclient and not self.rclient.islogin(): self.rclient.login(auth_str) def logout(self): self.rclient.logout() def verify_service(self, service, status='online'): """Checks whether a service is online or not""" svc = '/api/service/v1/services/' + service ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'Service: %(service)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'service': service, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) data = json.loads(ret.data)['service'] if data[''] != status: exception_msg = (_('%(service)s Service is not %(status)s ' 'on storage appliance: %(host)s') % {'service': service, 'status': status, 'host': self.host}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def get_asn(self): """Returns appliance asn.""" svc = '/api/system/v1/version' ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting appliance version details. ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['version']['asn'] def get_replication_targets(self): """Returns all replication targets configured on the appliance.""" svc = '/api/storage/v1/replication/targets' ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting replication target details. ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val def edit_inherit_replication_flag(self, pool, project, volume, set=True): """Edit the inherit replication flag for volume.""" svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' '/filesystems/%(volume)s/replication' % {'pool': pool, 'project': project, 'volume': volume}) arg = {'inherited': set} ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error setting replication inheritance ' 'to %(set)s ' 'for volume: %(vol)s ' 'project %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'set': set, 'project': project, 'vol': volume, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_replication_action(self, host_pool, host_project, tgt_name, tgt_pool, volume): """Create a replication action.""" arg = {'pool': host_pool, 'project': host_project, 'target_pool': tgt_pool, 'target': tgt_name} if volume is not None: arg.update({'share': volume}) svc = '/api/storage/v1/replication/actions' ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating replication action on: ' 'pool: %(pool)s ' 'Project: %(proj)s ' 'volume: %(vol)s ' 'for target: %(tgt)s and pool: %(tgt_pool)s' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'pool': host_pool, 'proj': host_project, 'vol': volume, 'tgt': tgt_name, 'tgt_pool': tgt_pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['action']['id'] def delete_replication_action(self, action_id): """Delete a replication action.""" svc = '/api/storage/v1/replication/actions/%s' % action_id ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error Deleting ' 'replication action: %(id)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'id': action_id, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def send_repl_update(self, action_id): """Send replication update Send replication update to the target appliance and then wait for it to complete. """ svc = '/api/storage/v1/replication/actions/%s/sendupdate' % action_id ret = self.rclient.put(svc) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error sending replication update ' 'for action id: %(id)s . ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'id': action_id, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def _loop_func(): svc = '/api/storage/v1/replication/actions/%s' % action_id ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting replication action: %(id)s. ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'id': action_id, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) if val['action']['last_result'] == 'success': raise loopingcall.LoopingCallDone() elif (val['action']['last_result'] == '' and val['action']['state'] == 'sending'): pass else: exception_msg = (_('Error sending replication update. ' 'Returned error: %(err)s. ' 'Action: %(id)s.') % {'err': val['action']['last_result'], 'id': action_id}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) timer = loopingcall.FixedIntervalLoopingCall(_loop_func) timer.start(interval=5).wait() def get_replication_source(self, asn): """Return the replication source json which has a matching asn.""" svc = "/api/storage/v1/replication/sources" ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting replication source details. ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) for source in val['sources']: if source['asn'] == asn: return source return None def sever_replication(self, package, src_name, project=None): """Sever Replication at the destination. This method will sever the package and move the volume to a project, if project name is not passed in then the package name is selected as the project name """ svc = ('/api/storage/v1/replication/sources/%(src)s/packages/%(pkg)s' '/sever' % {'src': src_name, 'pkg': package}) if not project: project = package arg = {'projname': project} ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error severing the package: %(package)s ' 'from source: %(src)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'package': package, 'src': src_name, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def move_volume(self, pool, project, volume, tgt_project): """Move a LUN from one project to another within the same pool.""" svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' '/filesystems/%(volume)s' % {'pool': pool, 'project': project, 'volume': volume}) arg = {'project': tgt_project} ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error moving volume: %(vol)s ' 'from source project: %(src)s ' 'to target project: %(tgt)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'vol': volume, 'src': project, 'tgt': tgt_project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def delete_project(self, pool, project): """Delete a project.""" svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' % {'pool': pool, 'project': project}) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error Deleting ' 'project: %(project)s ' 'on pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def get_project_stats(self, pool, project): """Get project stats. Get available space and total space of a project returns (avail, total). """ svc = '/api/storage/v1/pools/%s/projects/%s' % (pool, project) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting Project Stats: ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) avail = val['project']['space_available'] total = avail + val['project']['space_total'] return avail, total def create_project(self, pool, project, compression=None, logbias=None): """Create a project on a pool. Check first whether the pool exists. """ self.verify_pool(pool) svc = '/api/storage/v1/pools/' + pool + '/projects/' + project ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = '/api/storage/v1/pools/' + pool + '/projects' arg = { 'name': project } if compression and compression != '': arg.update({'compression': compression}) if logbias and logbias != '': arg.update({'logbias': logbias}) ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating Project: ' '%(project)s on ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_initiator(self, initiator, alias, chapuser=None, chapsecret=None): """Create an iSCSI initiator.""" svc = '/api/san/v1/iscsi/initiators/alias=' + alias ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = '/api/san/v1/iscsi/initiators' arg = { 'initiator': initiator, 'alias': alias } if chapuser and chapuser != '' and chapsecret and chapsecret != '': arg.update({'chapuser': chapuser, 'chapsecret': chapsecret}) ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating Initiator: ' '%(initiator)s on ' 'Alias: %(alias)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'initiator': initiator, 'alias': alias, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def add_to_initiatorgroup(self, initiator, initiatorgroup): """Add an iSCSI initiator to initiatorgroup""" svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = '/api/san/v1/iscsi/initiator-groups' arg = { 'name': initiatorgroup, 'initiators': [initiator] } ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Adding Initiator: ' '%(initiator)s on group' 'InitiatorGroup: %(initiatorgroup)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'initiator': initiator, 'initiatorgroup': initiatorgroup, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) else: val = json.loads(ret.data) inits = val['group']['initiators'] if inits is None: exception_msg = (_('Error Getting Initiators: ' 'InitiatorGroup: %(initiatorgroup)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'initiatorgroup': initiatorgroup, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) if initiator in inits: return inits.append(initiator) svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup arg = { 'initiators': inits } ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error Adding Initiator: ' '%(initiator)s on group' 'InitiatorGroup: %(initiatorgroup)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'initiator': initiator, 'initiatorgroup': initiatorgroup, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_target(self, alias, interfaces=None, tchapuser=None, tchapsecret=None): """Create an iSCSI target. :param interfaces: an array with network interfaces :param tchapuser, tchapsecret: target's chapuser and chapsecret :returns: target iqn """ svc = '/api/san/v1/iscsi/targets/alias=' + alias ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = '/api/san/v1/iscsi/targets' arg = { 'alias': alias } if tchapuser and tchapuser != '' and tchapsecret and \ tchapsecret != '': arg.update({'targetchapuser': tchapuser, 'targetchapsecret': tchapsecret, 'auth': 'chap'}) if interfaces is not None and len(interfaces) > 0: arg.update({'interfaces': interfaces}) ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating Target: ' '%(alias)s' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'alias': alias, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['target']['iqn'] def get_target(self, alias): """Get an iSCSI target iqn.""" svc = '/api/san/v1/iscsi/targets/alias=' + alias ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting Target: ' '%(alias)s' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'alias': alias, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['target']['iqn'] def add_to_targetgroup(self, iqn, targetgroup): """Add an iSCSI target to targetgroup.""" svc = '/api/san/v1/iscsi/target-groups/' + targetgroup ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svccrt = '/api/san/v1/iscsi/target-groups' arg = { 'name': targetgroup, 'targets': [iqn] } ret = self.rclient.post(svccrt, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating TargetGroup: ' '%(targetgroup)s with' 'IQN: %(iqn)s' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'targetgroup': targetgroup, 'iqn': iqn, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) return arg = { 'targets': [iqn] } ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error Adding to TargetGroup: ' '%(targetgroup)s with' 'IQN: %(iqn)s' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'targetgroup': targetgroup, 'iqn': iqn, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def verify_pool(self, pool): """Checks whether pool exists.""" svc = '/api/storage/v1/pools/' + pool ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying Pool: ' '%(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def verify_project(self, pool, project): """Checks whether project exists.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + project ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'Project: %(project)s on ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def verify_initiator(self, iqn): """Check whether initiator iqn exists.""" svc = '/api/san/v1/iscsi/initiators/' + iqn ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'Initiator: %(iqn)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'initiator': iqn, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def verify_target(self, alias): """Check whether target alias exists.""" svc = '/api/san/v1/iscsi/targets/alias=' + alias ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'Target: %(alias)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'alias': alias, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_lun(self, pool, project, lun, volsize, targetgroup, specs): """Create a LUN. specs - contains volume properties (e.g blocksize, compression). """ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns' arg = { 'name': lun, 'volsize': volsize, 'targetgroup': targetgroup, 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll' } if specs: arg.update(specs) ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Volume: %(lun)s ' 'Size: %(size)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'lun': lun, 'size': volsize, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val def get_lun(self, pool, project, lun): """return iscsi lun properties.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + "/luns/" + lun ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting ' 'Volume: %(lun)s on ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeNotFound(volume_id=lun) val = json.loads(ret.data) ret = { 'name': val['lun']['name'], 'guid': val['lun']['lunguid'], 'number': val['lun']['assignednumber'], 'initiatorgroup': val['lun']['initiatorgroup'], 'size': val['lun']['volsize'], 'nodestroy': val['lun']['nodestroy'], 'targetgroup': val['lun']['targetgroup'] } if 'origin' in val['lun']: ret.update({'origin': val['lun']['origin']}) if 'custom:image_id' in val['lun']: ret.update({'image_id': val['lun']['custom:image_id']}) ret.update({'updated_at': val['lun']['custom:updated_at']}) if 'custom:cinder_managed' in val['lun']: ret.update({'cinder_managed': val['lun']['custom:cinder_managed']}) return ret def get_lun_snapshot(self, pool, project, lun, snapshot): """Return iscsi lun snapshot properties.""" svc = ('/api/storage/v1/pools/' + pool + '/projects/' + project + '/luns/' + lun + '/snapshots/' + snapshot) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_LE('Error Getting ' 'Snapshot: %(snapshot)s of ' 'Volume: %(lun)s in ' 'Pool: %(pool)s, ' 'Project: %(project)s ' 'Return code: %(ret.status)d, ' 'Message: %(ret.data)s.'), {'snapshot': snapshot, 'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.SnapshotNotFound(snapshot_id=snapshot) val = json.loads(ret.data)['snapshot'] ret = { 'name': val['name'], 'numclones': val['numclones'], } return ret def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup): """Set the initiatorgroup property of a LUN.""" if initiatorgroup == '': initiatorgroup = 'com.sun.ms.vss.hg.maskAll' svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun arg = { 'initiatorgroup': initiatorgroup } ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: ' '%(initiatorgroup)s Pool: %(pool)s Project: ' '%(project)s Return code: %(ret.status)d Message: ' '%(ret.data)s.'), {'lun': lun, 'initiatorgroup': initiatorgroup, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) def delete_lun(self, pool, project, lun): """delete iscsi lun.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error Deleting Volume: %(lun)s from ' 'Pool: %(pool)s, Project: %(project)s. ' 'Return code: %(ret.status)d, ' 'Message: %(ret.data)s.'), {'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) if ret.status == restclient.Status.FORBIDDEN: # This means that the lun exists but it can't be deleted: raise exception.VolumeBackendAPIException(data=exception_msg) def create_snapshot(self, pool, project, lun, snapshot): """create snapshot.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun + '/snapshots' arg = { 'name': snapshot } ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Snapshot: %(snapshot)s on' 'Volume: %(lun)s to ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.'), {'snapshot': snapshot, 'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def delete_snapshot(self, pool, project, lun, snapshot): """delete snapshot.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun + '/snapshots/' + snapshot ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error Deleting ' 'Snapshot: %(snapshot)s on ' 'Volume: %(lun)s to ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'snapshot': snapshot, 'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def clone_snapshot(self, pool, project, lun, snapshot, clone_proj, clone): """clone 'snapshot' to a lun named 'clone' in project 'clone_proj'.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone' arg = { 'project': clone_proj, 'share': clone, 'nodestroy': True } ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Cloning ' 'Snapshot: %(snapshot)s on ' 'Volume: %(lun)s of ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Clone project: %(clone_proj)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'snapshot': snapshot, 'lun': lun, 'pool': pool, 'project': project, 'clone_proj': clone_proj, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def set_lun_props(self, pool, project, lun, **kargs): """set lun properties.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun if kargs is None: return if 'schema' in kargs: kargs.update(kargs.pop('schema')) ret = self.rclient.put(svc, kargs) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error Setting props ' 'Props: %(props)s on ' 'Volume: %(lun)s of ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'props': kargs, 'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def num_clones(self, pool, project, lun, snapshot): """Checks whether snapshot has clones or not.""" svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ project + '/luns/' + lun + '/snapshots/' + snapshot ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting ' 'Snapshot: %(snapshot)s on ' 'Volume: %(lun)s to ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'snapshot': snapshot, 'lun': lun, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['snapshot']['numclones'] def get_initiator_initiatorgroup(self, initiator): """Returns the initiator group of the initiator.""" groups = [] svc = "/api/san/v1/iscsi/initiator-groups" ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: msg = _('Error getting initiator groups.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) val = json.loads(ret.data) for initiator_group in val['groups']: if initiator in initiator_group['initiators']: groups.append(initiator_group["name"]) return groups def create_schema(self, schema): """Create a custom ZFSSA schema.""" base = '/api/storage/v1/schema' svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']} ret = self.rclient.get(svc) if ret.status == restclient.Status.OK: LOG.warning(_LW('Property %s already exists.'), schema['property']) return ret = self.rclient.post(base, schema) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Property: %(property)s ' 'Type: %(type)s ' 'Description: %(description)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'property': schema['property'], 'type': schema['type'], 'description': schema['description'], 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_schemas(self, schemas): """Create multiple custom ZFSSA schemas.""" ret = [] for schema in schemas: res = self.create_schema(schema) ret.append(res) return ret class ZFSSANfsApi(ZFSSAApi): """ZFSSA API proxy class for NFS driver""" projects_path = '/api/storage/v1/pools/%s/projects' project_path = projects_path + '/%s' shares_path = project_path + '/filesystems' share_path = shares_path + '/%s' share_snapshots_path = share_path + '/snapshots' share_snapshot_path = share_snapshots_path + '/%s' services_path = '/api/service/v1/services/' def __init__(self, *args, **kwargs): super(ZFSSANfsApi, self).__init__(*args, **kwargs) self.webdavclient = None def set_webdav(self, https_path, auth_str): self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path, auth_str) def verify_share(self, pool, project, share): """Checks whether the share exists""" svc = self.share_path % (pool, project, share) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'share: %(share)s on ' 'Project: %(project)s and ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'share': share, 'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_snapshot(self, pool, project, share, snapshot): """create snapshot of a share""" svc = self.share_snapshots_path % (pool, project, share) arg = { 'name': snapshot } ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Snapshot: %(snapshot)s on' 'share: %(share)s to ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def delete_snapshot(self, pool, project, share, snapshot): """delete snapshot of a share""" svc = self.share_snapshot_path % (pool, project, share, snapshot) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error Deleting ' 'Snapshot: %(snapshot)s on ' 'Share: %(share)s to ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_snapshot_of_volume_file(self, src_file="", dst_file=""): src_file = '.zfs/snapshot/' + src_file return self.webdavclient.request(src_file=src_file, dst_file=dst_file, method='COPY') def delete_snapshot_of_volume_file(self, src_file=""): return self.webdavclient.request(src_file=src_file, method='DELETE') def create_volume_from_snapshot_file(self, src_file="", dst_file="", method='COPY'): return self.webdavclient.request(src_file=src_file, dst_file=dst_file, method=method) def _change_service_state(self, service, state=''): svc = self.services_path + service + '/' + state ret = self.rclient.put(svc) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error Verifying ' 'Service: %(service)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'service': service, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) data = json.loads(ret.data)['service'] LOG.debug('%(service)s service state: %(data)s', {'service': service, 'data': data}) status = 'online' if state == 'enable' else 'disabled' if data[''] != status: exception_msg = (_('%(service)s Service is not %(status)s ' 'on storage appliance: %(host)s') % {'service': service, 'status': status, 'host': self.host}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def enable_service(self, service): self._change_service_state(service, state='enable') self.verify_service(service) def disable_service(self, service): self._change_service_state(service, state='disable') self.verify_service(service, status='offline') def modify_service(self, service, edit_args=None): """Edit service properties""" if edit_args is None: edit_args = {} svc = self.services_path + service ret = self.rclient.put(svc, edit_args) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error modifying ' 'Service: %(service)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'service': service, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) data = json.loads(ret.data)['service'] LOG.debug('Modify %(service)s service ' 'return data: %(data)s', {'service': service, 'data': data}) def create_share(self, pool, project, share, args): """Create a share in the specified pool and project""" svc = self.share_path % (pool, project, share) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.shares_path % (pool, project) args.update({'name': share}) ret = self.rclient.post(svc, args) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Share: %(name)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'name': share, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) else: LOG.debug('Editing properties of a pre-existing share') ret = self.rclient.put(svc, args) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error editing share: ' '%(share)s on ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'share': share, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def get_share(self, pool, project, share): """return share properties""" svc = self.share_path % (pool, project, share) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting ' 'Share: %(share)s on ' 'Pool: %(pool)s ' 'Project: %(project)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) val = json.loads(ret.data) return val['filesystem'] def get_volume(self, volume): LOG.debug('Getting volume %s.', volume) try: resp = self.webdavclient.request(src_file=volume, method='PROPFIND') except Exception: raise exception.VolumeNotFound(volume_id=volume) resp = resp.read() numclones = self._parse_prop(resp, 'numclones') result = { 'numclones': int(numclones) if numclones != '' else 0, 'updated_at': self._parse_prop(resp, 'updated_at'), 'image_id': self._parse_prop(resp, 'image_id'), 'origin': self._parse_prop(resp, 'origin'), 'cinder_managed': self._parse_prop(resp, 'cinder_managed'), } return result def delete_file(self, filename): try: self.webdavclient.request(src_file=filename, method='DELETE') except Exception: exception_msg = (_LE('Cannot delete file %s.'), filename) LOG.error(exception_msg) def set_file_props(self, file, specs): """Set custom properties to a file.""" for key in specs: self.webdavclient.set_file_prop(file, key, specs[key]) def _parse_prop(self, response, prop): """Parse a property value from the WebDAV response.""" propval = "" for line in response.split("\n"): if prop in line: try: propval = line[(line.index('>') + 1):line.index('. ' 'The default is to use a prefix of \'UUID-\'.'), cfg.StrOpt('sf_template_account_name', default='openstack-vtemplate', help='Account name on the SolidFire Cluster to use as owner of ' 'template/cache volumes (created if does not exist).'), cfg.BoolOpt('sf_allow_template_caching', default=True, help='Create an internal cache of copy of images when ' 'a bootable volume is created to eliminate fetch from ' 'glance and qemu-conversion on subsequent calls.'), cfg.StrOpt('sf_svip', help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.BoolOpt('sf_enable_volume_mapping', default=True, help='Create an internal mapping of volume IDs and account. ' 'Optimizes lookups and performance at the expense of ' 'memory, very large deployments may want to consider ' 'setting to False.'), cfg.PortOpt('sf_api_port', default=443, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.'), cfg.BoolOpt('sf_enable_vag', default=False, help='Utilize volume access groups on a per-tenant basis.')] CONF = cfg.CONF CONF.register_opts(sf_opts) # SolidFire API Error Constants xExceededLimit = 'xExceededLimit' xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup' xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist' xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup' def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @six.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise exception.SolidFireAPIException(message=msg) return func_retry return retry_dec class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account """ VERSION = '2.0.2' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] cluster_stats = {} retry_exc_tuple = (exception.SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xNotReadyForIO'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.cluster_uuid = None self.configuration.append_config_values(sf_opts) self._endpoint = self._build_endpoint_info() self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} try: self._update_cluster_status() except exception.SolidFireAPIException: pass if self.configuration.sf_allow_template_caching: account = self.configuration.sf_template_account_name self.template_account_id = self._create_template_account(account) self.target_driver = SolidFireISCSI(solidfire_driver=self, configuration=self.configuration) self._set_cluster_uuid() def __getattr__(self, attr): return getattr(self.target_driver, attr) def _set_cluster_uuid(self): self.cluster_uuid = ( self._get_cluster_info()['clusterInfo']['uuid']) def _parse_provider_id_string(self, id_string): return tuple(id_string.split()) def _create_provider_id_string(self, resource_id, account_or_vol_id, cluster_uuid=None): # NOTE(jdg): We use the same format, but in the case # of snapshots, we don't have an account id, we instead # swap that with the parent volume id cluster_id = self.cluster_uuid # We allow specifying a remote cluster if cluster_uuid: cluster_id = cluster_uuid return "%s %s %s" % (resource_id, account_or_vol_id, cluster_id) def _init_snapshot_mappings(self, srefs): updates = [] sf_snaps = self._issue_api_request( 'ListSnapshots', {}, version='6.0')['result']['snapshots'] for s in srefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) sfsnap = next( (ss for ss in sf_snaps if ss['name'] == seek_name), None) if sfsnap: id_string = self._create_provider_id_string( sfsnap['snapshotID'], sfsnap['volumeID']) if s.get('provider_id') != id_string: updates.append( {'id': s['id'], 'provider_id': id_string}) return updates def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if v.get('provider_id', 'nil') != sfvol['volumeID']: v['provider_id'] == sfvol['volumeID'] updates.append( {'id': v['id'], 'provider_id': self._create_provider_id_string( sfvol['volumeID'], sfvol['accountID'])}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = self._init_snapshot_mappings(snaprefs) return (volume_updates, snapshot_updates) def _create_template_account(self, account_name): # We raise an API exception if the account doesn't exist # We need to take account_prefix settings into consideration # This just uses the same method to do template account create # as we use for any other OpenStack account account_name = self._get_sf_account_name(account_name) try: id = self._issue_api_request( 'GetAccountByName', {'username': account_name})['result']['account']['accountID'] except exception.SolidFireAPIException: chap_secret = self._generate_random_string(12) params = {'username': account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} id = self._issue_api_request('AddAccount', params)['result']['accountID'] return id def _build_endpoint_info(self, **kwargs): endpoint = {} endpoint['mvip'] = ( kwargs.get('mvip', self.configuration.san_ip)) endpoint['login'] = ( kwargs.get('login', self.configuration.san_login)) endpoint['passwd'] = ( kwargs.get('passwd', self.configuration.san_password)) endpoint['port'] = ( kwargs.get('port', self.configuration.sf_api_port)) endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], endpoint['port']) # TODO(jdg): consider a call to GetAPI and setting version return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None): if params is None: params = {} if endpoint is None: endpoint = self._endpoint payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=False, timeout=30) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) raise exception.SolidFireRetryableException(message=msg) if 'error' in response: msg = _('API response: %s') % response raise exception.SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] def _get_sfaccount_by_name(self, sf_account_name): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except exception.SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, project_id): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params) sfaccount = self._get_sfaccount_by_name(sf_account_name) return sfaccount def _get_cluster_info(self): """Query the SolidFire cluster for some property info.""" params = {} return self._issue_api_request('GetClusterInfo', params)['result'] def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set, length)) def _get_model_info(self, sfaccount, sf_volume_id): """Gets the connection info for specified account and volume.""" cluster_info = self._get_cluster_info() if self.configuration.sf_svip is None: iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' else: iscsi_portal = self.configuration.sf_svip chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID']) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_LE('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!'), sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) model_update['provider_id'] = ( self._create_provider_id_string(sf_volume_id, sfaccount['accountID'], self.cluster_uuid)) return model_update def _snapshot_discovery(self, src_uuid, params, vref): # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False sf_vol = None snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume(src_uuid) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True return params, is_clone, sf_vol def _do_clone_volume(self, src_uuid, vref, sf_src_snap=None): """Create a clone of an existing volume or snapshot.""" attributes = {} sf_account = self._get_create_account(vref['project_id']) params = {'name': '%(prefix)s%(id)s' % {'prefix': self.configuration.sf_volume_prefix, 'id': vref['id']}, 'newAccountID': sf_account['accountID']} is_clone = False sf_vol = None if sf_src_snap: # In some scenarios we are passed the snapshot information that we # are supposed to clone. params['snapshotID'] = sf_src_snap['snapshotID'] params['volumeID'] = sf_src_snap['volumeID'] params['newSize'] = int(vref['size'] * units.Gi) else: params, is_clone, sf_vol = self._snapshot_discovery(src_uuid, params, vref) data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = {'volumeID': sf_volume_id} create_time = vref['created_at'].isoformat() attributes = {'uuid': vref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sf_account, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _do_volume_create(self, sf_account, params): sf_volid = self._issue_api_request( 'CreateVolume', params)['result']['volumeID'] return self._get_model_info(sf_account, sf_volid) def _do_snapshot_create(self, params): model_update = {} snapshot_id = self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] snaps = self._get_sf_snapshots() snap = ( next((s for s in snaps if int(s["snapshotID"]) == int(snapshot_id)), None)) model_update['provider_id'] = ( self._create_provider_id_string(snap['snapshotID'], snap['volumeID'], self.cluster_uuid)) return model_update def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning(_LW('More than one valid preset was ' 'detected, using %s'), presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _set_qos_by_volume_type(self, ctxt, type_id): qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) return qos def _get_sf_volume(self, uuid, params=None): if params: vols = self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] else: vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = '' if meta: alt_id = meta.get('uuid', '') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _create_image_volume(self, context, image_meta, image_service, image_id): with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi)) attributes = {} attributes['image_info'] = {} attributes['image_info']['image_updated_at'] = ( image_meta['updated_at'].isoformat()) attributes['image_info']['image_name'] = ( image_meta['name']) attributes['image_info']['image_created_at'] = ( image_meta['created_at'].isoformat()) attributes['image_info']['image_id'] = image_meta['id'] params = {'name': 'OpenStackIMG-%s' % image_id, 'accountID': self.template_account_id, 'sliceCount': 1, 'totalSize': int(virtual_size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': {}} sf_account = self._issue_api_request( 'GetAccountByID', {'accountID': self.template_account_id})['result']['account'] template_vol = self._do_volume_create(sf_account, params) tvol = {} tvol['id'] = image_id tvol['provider_location'] = template_vol['provider_location'] tvol['provider_auth'] = template_vol['provider_auth'] connector = {'multipath': False} conn = self.initialize_connection(tvol, connector) attach_info = super(SolidFireDriver, self)._connect_device(conn) properties = 'na' try: image_utils.convert_image(tmp_image, attach_info['device']['path'], 'raw', run_as_root=True) data = image_utils.qemu_img_info(attach_info['device']['path'], run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % {'vol_format': 'raw', 'file_format': data. file_format}) except Exception as exc: vol = self._get_sf_volume(image_id) LOG.error(_LE('Failed image conversion during ' 'cache creation: %s'), exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', vol['volumeID']) self._detach_volume(context, attach_info, tvol, properties) self._issue_api_request('DeleteVolume', params) return self._detach_volume(context, attach_info, tvol, properties) sf_vol = self._get_sf_volume(image_id, params) LOG.debug('Successfully created SolidFire Image Template ' 'for image-id: %s', image_id) return sf_vol def _verify_image_volume(self, context, image_meta, image_service): # This method just verifies that IF we have a cache volume that # it's still up to date and current WRT the image in Glance # ie an image-update hasn't occurred since we grabbed it # If it's out of date, just delete it and we'll create a new one # Any other case we don't care and just return without doing anything params = {'accountID': self.template_account_id} sf_vol = self._get_sf_volume(image_meta['id'], params) if sf_vol is None: return # Check updated_at field, delete copy and update if needed if sf_vol['attributes']['image_info']['image_updated_at'] == ( image_meta['updated_at'].isoformat()): return else: # Bummer, it's been updated, delete it params = {'accountID': self.template_account_id} params['volumeID'] = sf_vol['volumeID'] self._issue_api_request('DeleteVolume', params) if not self._create_image_volume(context, image_meta, image_service, image_meta['id']): msg = _("Failed to create SolidFire Image-Volume") raise exception.SolidFireAPIException(msg) def _get_sfaccounts_for_tenant(self, cinder_project_id): accounts = self._issue_api_request( 'ListAccounts', {})['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if cinder_project_id in acc['username']]) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_all_deleted_volumes(self, cinder_uuid=None): params = {} vols = self._issue_api_request('ListDeletedVolumes', params)['result']['volumes'] if cinder_uuid: deleted_vols = ([v for v in vols if cinder_uuid in v['name']]) else: deleted_vols = [v for v in vols] return deleted_vols def _get_account_create_availability(self, accounts): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if self._get_volumes_for_account( acc['accountID']) > self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') return sfaccount return None def _get_create_account(self, proj_id): # Retrieve SolidFire accountID to be used for creating volumes. sf_accounts = self._get_sfaccounts_for_tenant(proj_id) if not sf_accounts: sf_account = self._create_sfaccount(proj_id) else: # Check availability for creates sf_account = self._get_account_create_availability(sf_accounts) if not sf_account: # TODO(jdg): We're not doing tertiaries, so fail. msg = _('Volumes/account exceeded on both primary and ' 'secondary SolidFire accounts.') raise exception.SolidFireDriverException(msg) return sf_account def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional params = {'accountID': sf_account_id} vols = self._issue_api_request('ListVolumesForAccount', params)['result']['volumes'] if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def _create_vag(self, iqn, vol_id=None): """Create a volume access group(vag). Returns the vag_id. """ vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) params = {'name': vag_name, 'initiators': [iqn], 'volumes': [vol_id], 'attributes': {'openstack': True}} try: result = self._issue_api_request('CreateVolumeAccessGroup', params, version='7.0') return result['result']['volumeAccessGroupID'] except exception.SolidFireAPIException as error: if xExceededLimit in error.msg: if iqn in error.msg: # Initiator double registered. return self._safe_create_vag(iqn, vol_id) else: # VAG limit reached. Purge and start over. self._purge_vags() return self._safe_create_vag(iqn, vol_id) else: raise def _safe_create_vag(self, iqn, vol_id=None): # Potential race condition with simultaneous volume attaches to the # same host. To help avoid this, VAG creation makes a best attempt at # finding and using an existing VAG. vags = self._get_vags_by_name(iqn) if vags: # Filter through the vags and find the one with matching initiator vag = next((v for v in vags if iqn in v['initiators']), None) if vag: return vag['volumeAccessGroupID'] else: # No matches, use the first result, add initiator IQN. vag_id = vags[0]['volumeAccessGroupID'] return self._add_initiator_to_vag(iqn, vag_id) return self._create_vag(iqn, vol_id) def _base_get_vags(self): params = {} vags = self._issue_api_request( 'ListVolumeAccessGroups', params, version='7.0')['result']['volumeAccessGroups'] return vags def _get_vags_by_name(self, iqn): """Retrieve SolidFire volume access group objects by name. Returns an array of vags with a matching name value. Returns an empty array if there are no matches. """ vags = self._base_get_vags() vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) matching_vags = [vag for vag in vags if vag['name'] == vag_name] return matching_vags def _add_initiator_to_vag(self, iqn, vag_id): # Added a vag_id return as there is a chance that we might have to # create a new VAG if our target VAG is deleted underneath us. params = {"initiators": [iqn], "volumeAccessGroupID": vag_id} try: self._issue_api_request('AddInitiatorsToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: # No locking means sometimes a VAG can be removed by a parallel # volume detach against the same host. return self._safe_create_vag(iqn) else: raise def _add_volume_to_vag(self, vol_id, iqn, vag_id): # Added a vag_id return to be consistent with add_initiator_to_vag. It # isn't necessary but may be helpful in the future. params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('AddVolumesToVolumeAccessGroup', params, version='7.0') return vag_id except exception.SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: return self._safe_create_vag(iqn, vol_id) else: raise def _remove_volume_from_vag(self, vol_id, vag_id): params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('RemoveVolumesFromVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xNotInVolumeAccessGroup in error.msg: pass elif xVolumeAccessGroupIDDoesNotExist in error.msg: pass else: raise def _remove_volume_from_vags(self, vol_id): # Due to all sorts of uncertainty around multiattach, on volume # deletion we make a best attempt at removing the vol_id from VAGs. vags = self._base_get_vags() targets = [v for v in vags if vol_id in v['volumes']] for vag in targets: self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID']) def _remove_vag(self, vag_id): params = {"volumeAccessGroupID": vag_id} try: self._issue_api_request('DeleteVolumeAccessGroup', params, version='7.0') except exception.SolidFireAPIException as error: if xVolumeAccessGroupIDDoesNotExist not in error.msg: raise def _purge_vags(self, limit=10): # Purge up to limit number of VAGs that have no active volumes, # initiators, and an OpenStack attribute. Purge oldest VAGs first. vags = self._base_get_vags() targets = [v for v in vags if v['volumes'] == [] and v['initiators'] == [] and v['deletedVolumes'] == [] and v['attributes'].get('openstack')] sorted_targets = sorted(targets, key=lambda k: k['volumeAccessGroupID']) for vag in sorted_targets[:limit]: self._remove_vag(vag['volumeAccessGroupID']) def clone_image(self, context, volume, image_location, image_meta, image_service): public = False # Check out pre-requisites: # Is template caching enabled? if not self.configuration.sf_allow_template_caching: return None, False # NOTE(jdg): Glance V2 moved from is_public to visibility # so we check both, as we don't necessarily know or want # to care which we're using. Will need to look at # future handling of things like shared and community # but for now, it's owner or public and that's it visibility = image_meta.get('visibility', None) if visibility and visibility == 'public': public = True elif image_meta.get('is_public', False): public = True else: if image_meta['owner'] == volume['project_id']: public = True if not public: LOG.warning(_LW("Requested image is not " "accessible by current Tenant.")) return None, False try: self._verify_image_volume(context, image_meta, image_service) except exception.SolidFireAPIException: return None, False try: (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], volume) except exception.VolumeNotFound: if self._create_image_volume(context, image_meta, image_service, image_meta['id']) is None: # We failed, dump out return None, False # Ok, should be good to go now, try it again (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], volume) return model, True def _retrieve_qos_setting(self, volume): qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata')is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) return qos def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ slice_count = 1 attributes = {} sf_account = self._get_create_account(volume['project_id']) qos = self._retrieve_qos_setting(volume) create_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'created_at': create_time} vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id']) params = {'name': vname, 'accountID': sf_account['accountID'], 'sliceCount': slice_count, 'totalSize': int(volume['size'] * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') vname = '%s%s' % (self.configuration.sf_volume_prefix, v) params['name'] = vname params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v return self._do_volume_create(sf_account, params) def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ sf_vol = None accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) LOG.error(_LE("This usually means the volume was never " "successfully created.")) return for acc in accounts: vols = self._get_volumes_for_account(acc['accountID'], volume['id']) if vols: sf_vol = vols[0] break if sf_vol is not None: params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) if volume.get('multiattach'): self._remove_volume_from_vags(sf_vol['volumeID']) else: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!"), volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id']) accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for acct in accounts: params = {'accountID': acct['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol: sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return # Make sure it's not "old style" using clones as snaps LOG.debug("Snapshot not found, checking old style clones.") self.delete_volume(snapshot) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!"), snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id'])} return self._do_snapshot_create(params) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], volume) return model # Consistency group helpers def _create_group_snapshot(self, name, sf_volumes): # Group snapshot is our version of a consistency group snapshot. vol_ids = [vol['volumeID'] for vol in sf_volumes] params = {'name': name, 'volumes': vol_ids} snapshot_id = self._issue_api_request('CreateGroupSnapshot', params, version='7.0') return snapshot_id['result'] def _group_snapshot_creator(self, gsnap_name, src_vol_ids): # Common helper that takes in an array of OpenStack Volume UUIDs and # creates a SolidFire group snapshot with them. vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in src_vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(src_vol_ids) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder volumes. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(src_vol_ids)}) raise exception.SolidFireDriverException(msg) result = self._create_group_snapshot(gsnap_name, target_vols) return result def _create_temp_group_snapshot(self, source_cg, source_vols): # Take a temporary snapshot to create the volumes for a new # consistency group. gsnap_name = ("%(prefix)s%(id)s-tmp" % {"prefix": self.configuration.sf_volume_prefix, "id": source_cg['id']}) vol_ids = [vol['id'] for vol in source_vols] self._group_snapshot_creator(gsnap_name, vol_ids) return gsnap_name def _list_group_snapshots(self): result = self._issue_api_request('ListGroupSnapshots', {}, version='7.0') return result['result']['groupSnapshots'] def _get_group_snapshot_by_name(self, name): target_snaps = self._list_group_snapshots() target = next((snap for snap in target_snaps if snap['name'] == name), None) return target def _delete_group_snapshot(self, gsnapid): params = {'groupSnapshotID': gsnapid} self._issue_api_request('DeleteGroupSnapshot', params, version='7.0') def _delete_cgsnapshot_by_name(self, snap_name): # Common function used to find and delete a snapshot. target = self._get_group_snapshot_by_name(snap_name) if not target: msg = _("Failed to find group snapshot named: %s") % snap_name raise exception.SolidFireDriverException(msg) self._delete_group_snapshot(target['groupSnapshotID']) def _find_linked_snapshot(self, target_uuid, group_snap): # Because group snapshots name each individual snapshot the group # snapshot name, we have to trawl through the SolidFire snapshots to # find the SolidFire snapshot from the group that is linked with the # SolidFire volumeID that is linked to the Cinder snapshot source # volume. source_vol = self._get_sf_volume(target_uuid) target_snap = next((sn for sn in group_snap['members'] if sn['volumeID'] == source_vol['volumeID']), None) return target_snap def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid, sf_group_snap, vol): # Find the correct SolidFire backing snapshot. sf_src_snap = self._find_linked_snapshot(target_uuid, sf_group_snap) _data, _sfaccount, model = self._do_clone_volume(src_uuid, vol, sf_src_snap) model['id'] = vol['id'] model['status'] = 'available' return model # Required consistency group functions def create_consistencygroup(self, ctxt, group): # SolidFire does not have a viable means for storing consistency group # volume associations. So, we're just going to play along with the # consistency group song and dance. There will be a lot of no-ops # because of this. return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def create_consistencygroup_from_src(self, ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): if cgsnapshot and snapshots: sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] sf_group_snap = self._get_group_snapshot_by_name(sf_name) # Go about creating volumes from provided snaps. vol_models = [] for vol, snap in zip(volumes, snapshots): vol_models.append(self._create_clone_from_sf_snapshot( snap['volume_id'], snap['id'], sf_group_snap, vol)) return ({'status': fields.ConsistencyGroupStatus.AVAILABLE}, vol_models) elif source_cg and source_vols: # Create temporary group snapshot. gsnap_name = self._create_temp_group_snapshot(source_cg, source_vols) try: sf_group_snap = self._get_group_snapshot_by_name(gsnap_name) # For each temporary snapshot clone the volume. vol_models = [] for vol in volumes: vol_models.append(self._create_clone_from_sf_snapshot( vol['source_volid'], vol['source_volid'], sf_group_snap, vol)) finally: self._delete_cgsnapshot_by_name(gsnap_name) return {'status': 'available'}, vol_models def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): vol_ids = [snapshot['volume_id'] for snapshot in snapshots] vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(snapshots) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder snapshots. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(snapshots)}) raise exception.SolidFireDriverException(msg) snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._create_group_snapshot(snap_name, target_vols) return None, None def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): # Similar to create_consistencygroup, SolidFire's lack of a consistency # group object means there is nothing to update on the cluster. return None, None, None def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._delete_cgsnapshot_by_name(snap_name) return None, None def delete_consistencygroup(self, ctxt, group, volumes): for vol in volumes: self.delete_volume(vol) return None, None def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except exception.SolidFireAPIException: pass return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi) } self._issue_api_request('ModifyVolume', params, version='5.0') def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} # NOTE(jdg): The SF api provides an UNBELIEVABLE amount # of stats data, this is just one of the calls results = self._issue_api_request('GetClusterCapacity', params) results = results['result']['clusterCapacity'] free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['consistencygroup_support'] = True data['replication_enabled'] = True data['total_capacity_gb'] = ( float(results['maxProvisionedSpace'] / units.Gi)) data['free_capacity_gb'] = float(free_capacity / units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['compression_percent'] = ( results['compressionPercent']) data['deduplicaton_percent'] = ( results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) self.cluster_stats = data def initialize_connection(self, volume, connector, initiator_data=None): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ properties = self._sf_initialize_connection(volume, connector, initiator_data) properties['data']['discard'] = True return properties def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def terminate_connection(self, volume, properties, force): return self._sf_terminate_connection(volume, properties, force) def detach_volume(self, context, volume, attachment=None): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "detach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = { 'volumeID': sf_vol['volumeID'], 'attributes': attributes } self._issue_api_request('ModifyVolume', params) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error(_LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._create_sfaccount(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id']) if qos: params['qos'] = qos self._issue_api_request('ModifyVolume', params) return True def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._create_sfaccount(volume['project_id']) attributes = {} qos = self._retrieve_qos_setting(volume) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} params = {'name': volume['name'], 'volumeID': sf_ref['volumeID'], 'accountID': sfaccount['accountID'], 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} self._issue_api_request('ModifyVolume', params, version='5.0') return self._get_model_info(sfaccount, sf_ref['volumeID']) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': } """ sfid = external_ref.get('source-id', None) if sfid is None: raise exception.SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] return int(vols[0]['totalSize']) / int(units.Gi) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_LE("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!"), volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def __getattr__(self, attr): return getattr(self.sf_driver, attr) def _do_iscsi_export(self, volume): sfaccount = self._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except exception.SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass def _sf_initialize_connection(self, volume, connector, initiator_data=None): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ if self.configuration.sf_enable_vag: iqn = connector['initiator'] provider_id = volume['provider_id'] vol_id = int(self._parse_provider_id_string(provider_id)[0]) # safe_create_vag may opt to reuse vs create a vag, so we need to # add our vol_id. vag_id = self._safe_create_vag(iqn, vol_id) self._add_volume_to_vag(vol_id, iqn, vag_id) # Continue along with default behavior return super(SolidFireISCSI, self).initialize_connection(volume, connector) def _sf_terminate_connection(self, volume, properties, force): """Terminate the volume connection. Optionally remove volume from volume access group. If the VAG is empty then the VAG is also removed. """ if self.configuration.sf_enable_vag: iqn = properties['initiator'] vag = self._get_vags_by_name(iqn) provider_id = volume['provider_id'] vol_id = int(self._parse_provider_id_string(provider_id)[0]) if vag and not volume['multiattach']: # Multiattach causes problems with removing volumes from VAGs. # Compromise solution for now is to remove multiattach volumes # from VAGs during volume deletion. vag = vag[0] vag_id = vag['volumeAccessGroupID'] if [vol_id] == vag['volumes']: self._remove_vag(vag_id) elif vol_id in vag['volumes']: self._remove_volume_from_vag(vol_id, vag_id) return super(SolidFireISCSI, self).terminate_connection(volume, properties, force=force) cinder-8.0.0/cinder/volume/drivers/nexenta/0000775000567000056710000000000012701406543022021 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/nexentaedge/0000775000567000056710000000000012701406543024310 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/nexentaedge/__init__.py0000664000567000056710000000000012701406250026402 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/nexentaedge/iscsi.py0000664000567000056710000002564612701406257026013 0ustar jenkinsjenkins00000000000000# Copyright 2015 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE from cinder.volume import driver from cinder.volume.drivers.nexenta.nexentaedge import jsonrpc from cinder.volume.drivers.nexenta import options LOG = logging.getLogger(__name__) class NexentaEdgeISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on NexentaEdge cluster. Version history: 1.0.0 - Initial driver version. 1.0.1 - Moved opts to options.py. """ VERSION = '1.0.1' def __init__(self, *args, **kwargs): super(NexentaEdgeISCSIDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_ISCSI_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_EDGE_OPTS) self.restapi_protocol = self.configuration.nexenta_rest_protocol self.restapi_host = self.configuration.nexenta_rest_address self.restapi_port = self.configuration.nexenta_rest_port self.restapi_user = self.configuration.nexenta_rest_user self.restapi_password = self.configuration.nexenta_rest_password self.iscsi_service = self.configuration.nexenta_iscsi_service self.bucket_path = self.configuration.nexenta_lun_container self.blocksize = self.configuration.nexenta_blocksize self.chunksize = self.configuration.nexenta_chunksize self.cluster, self.tenant, self.bucket = self.bucket_path.split('/') self.bucket_url = ('clusters/' + self.cluster + '/tenants/' + self.tenant + '/buckets/' + self.bucket) self.iscsi_target_port = (self.configuration. nexenta_iscsi_target_portal_port) self.target_vip = None @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): if self.restapi_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.restapi_protocol, False try: self.restapi = jsonrpc.NexentaEdgeJSONProxy( protocol, self.restapi_host, self.restapi_port, '/', self.restapi_user, self.restapi_password, auto=auto) rsp = self.restapi.get('service/' + self.iscsi_service + '/iscsi/status') data_keys = rsp['data'][list(rsp['data'].keys())[0]] self.target_name = data_keys.split('\n', 1)[0].split(' ')[2] rsp = self.restapi.get('service/' + self.iscsi_service) if 'X-VIPS' in rsp['data']: vips = json.loads(rsp['data']['X-VIPS']) if len(vips[0]) == 1: self.target_vip = vips[0][0]['ip'].split('/', 1)[0] else: self.target_vip = vips[0][1]['ip'].split('/', 1)[0] else: self.target_vip = self.configuration.safe_get( 'nexenta_client_address') if not self.target_vip: LOG.error(_LE('No VIP configured for service %s'), self.iscsi_service) raise exception.VolumeBackendAPIException( _('No service VIP configured and ' 'no nexenta_client_address')) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error verifying iSCSI service %(serv)s on ' 'host %(hst)s'), {'serv': self.iscsi_service, 'hst': self.restapi_host}) def check_for_setup_error(self): try: self.restapi.get(self.bucket_url + '/objects/') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error verifying LUN container %(bkt)s'), {'bkt': self.bucket_path}) def _get_lun_number(self, volname): try: rsp = self.restapi.put( 'service/' + self.iscsi_service + '/iscsi/number', { 'objectPath': self.bucket_path + '/' + volname }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error retrieving LUN %(vol)s number'), {'vol': volname}) return rsp['data'] def _get_target_address(self, volname): return self.target_vip def _get_provider_location(self, volume): return '%(host)s:%(port)s,1 %(name)s %(number)s' % { 'host': self._get_target_address(volume['name']), 'port': self.iscsi_target_port, 'name': self.target_name, 'number': self._get_lun_number(volume['name']) } def create_volume(self, volume): try: self.restapi.post('service/' + self.iscsi_service + '/iscsi', { 'objectPath': self.bucket_path + '/' + volume['name'], 'volSizeMB': int(volume['size']) * units.Ki, 'blockSize': self.blocksize, 'chunkSize': self.chunksize }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error creating volume')) def delete_volume(self, volume): try: self.restapi.delete('service/' + self.iscsi_service + '/iscsi', {'objectPath': self.bucket_path + '/' + volume['name']}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error deleting volume')) def extend_volume(self, volume, new_size): try: self.restapi.put('service/' + self.iscsi_service + '/iscsi/resize', {'objectPath': self.bucket_path + '/' + volume['name'], 'newSizeMB': new_size * units.Ki}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error extending volume')) def create_volume_from_snapshot(self, volume, snapshot): try: self.restapi.put( 'service/' + self.iscsi_service + '/iscsi/snapshot/clone', { 'objectPath': self.bucket_path + '/' + snapshot['volume_name'], 'clonePath': self.bucket_path + '/' + volume['name'], 'snapName': snapshot['name'] }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error cloning volume')) def create_snapshot(self, snapshot): try: self.restapi.post( 'service/' + self.iscsi_service + '/iscsi/snapshot', { 'objectPath': self.bucket_path + '/' + snapshot['volume_name'], 'snapName': snapshot['name'] }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error creating snapshot')) def delete_snapshot(self, snapshot): try: self.restapi.delete( 'service/' + self.iscsi_service + '/iscsi/snapshot', { 'objectPath': self.bucket_path + '/' + snapshot['volume_name'], 'snapName': snapshot['name'] }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error deleting snapshot')) def create_cloned_volume(self, volume, src_vref): vol_url = (self.bucket_url + '/objects/' + src_vref['name'] + '/clone') clone_body = { 'tenant_name': self.tenant, 'bucket_name': self.bucket, 'object_name': volume['name'] } try: self.restapi.post(vol_url, clone_body) self.restapi.post('service/' + self.iscsi_service + '/iscsi', { 'objectPath': self.bucket_path + '/' + volume['name'], 'volSizeMB': int(src_vref['size']) * units.Ki, 'blockSize': self.blocksize, 'chunkSize': self.chunksize }) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Error creating cloned volume')) def create_export(self, context, volume, connector=None): return {'provider_location': self._get_provider_location(volume)} def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def local_path(self, volume): raise NotImplementedError def get_volume_stats(self, refresh=False): location_info = '%(driver)s:%(host)s:%(bucket)s' % { 'driver': self.__class__.__name__, 'host': self._get_target_address(None), 'bucket': self.bucket_path } return { 'vendor_name': 'Nexenta', 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'reserved_percentage': 0, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'QoS_support': False, 'volume_backend_name': self.backend_name, 'location_info': location_info, 'iscsi_target_portal_port': self.iscsi_target_port, 'restapi_url': self.restapi.url } cinder-8.0.0/cinder/volume/drivers/nexenta/nexentaedge/jsonrpc.py0000664000567000056710000000607012701406250026336 0ustar jenkinsjenkins00000000000000# Copyright 2015 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests import socket from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.utils import retry LOG = logging.getLogger(__name__) socket.setdefaulttimeout(100) class NexentaEdgeJSONProxy(object): retry_exc_tuple = ( requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout ) def __init__(self, protocol, host, port, path, user, password, auto=False, method=None): self.protocol = protocol.lower() self.host = host self.port = port self.path = path self.user = user self.password = password self.auto = auto self.method = method @property def url(self): return '%s://%s:%s%s' % (self.protocol, self.host, self.port, self.path) def __getattr__(self, name): if not self.method: method = name else: raise exception.VolumeDriverException( _("Wrong resource call syntax")) return NexentaEdgeJSONProxy( self.protocol, self.host, self.port, self.path, self.user, self.password, self.auto, method) def __hash__(self): return self.url.__hash___() def __repr__(self): return 'HTTP JSON proxy: %s' % self.url @retry(retry_exc_tuple, interval=1, retries=6) def __call__(self, *args): self.path += args[0] data = None if len(args) > 1: data = json.dumps(args[1]) auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % auth } LOG.debug('Sending JSON data: %s', self.url) if self.method == 'get': req = requests.get(self.url, headers=headers) if self.method == 'post': req = requests.post(self.url, data=data, headers=headers) if self.method == 'put': req = requests.put(self.url, data=data, headers=headers) if self.method == 'delete': req = requests.delete(self.url, data=data, headers=headers) rsp = req.json() req.close() LOG.debug('Got response: %s', rsp) if rsp.get('response') is None: raise exception.VolumeBackendAPIException( _('Error response: %s') % rsp) return rsp.get('response') cinder-8.0.0/cinder/volume/drivers/nexenta/options.py0000664000567000056710000001357112701406250024070 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.options` -- Contains configuration options for Nexenta drivers. ============================================================================= .. automodule:: nexenta.options """ from oslo_config import cfg NEXENTA_EDGE_OPTS = [ cfg.StrOpt('nexenta_rest_address', default='', help='IP address of NexentaEdge management REST API endpoint'), cfg.StrOpt('nexenta_rest_user', default='admin', help='User name to connect to NexentaEdge'), cfg.StrOpt('nexenta_rest_password', default='nexenta', help='Password to connect to NexentaEdge', secret=True), cfg.StrOpt('nexenta_lun_container', default='', help='NexentaEdge logical path of bucket for LUNs'), cfg.StrOpt('nexenta_iscsi_service', default='', help='NexentaEdge iSCSI service name'), cfg.StrOpt('nexenta_client_address', default='', help='NexentaEdge iSCSI Gateway client ' 'address for non-VIP service'), cfg.IntOpt('nexenta_chunksize', default=16384, help='NexentaEdge iSCSI LUN object chunk size') ] NEXENTA_CONNECTION_OPTS = [ cfg.StrOpt('nexenta_host', default='', help='IP address of Nexenta SA'), cfg.IntOpt('nexenta_rest_port', default=8080, help='HTTP port to connect to Nexenta REST API server'), cfg.StrOpt('nexenta_rest_protocol', default='auto', choices=['http', 'https', 'auto'], help='Use http or https for REST connection (default auto)'), cfg.StrOpt('nexenta_user', default='admin', help='User name to connect to Nexenta SA'), cfg.StrOpt('nexenta_password', default='nexenta', help='Password to connect to Nexenta SA', secret=True), ] NEXENTA_ISCSI_OPTS = [ cfg.IntOpt('nexenta_iscsi_target_portal_port', default=3260, help='Nexenta target portal port'), cfg.StrOpt('nexenta_volume', default='cinder', help='SA Pool that holds all volumes'), cfg.StrOpt('nexenta_target_prefix', default='iqn.1986-03.com.sun:02:cinder-', help='IQN prefix for iSCSI targets'), cfg.StrOpt('nexenta_target_group_prefix', default='cinder/', help='Prefix for iSCSI target groups on SA'), cfg.StrOpt('nexenta_volume_group', default='iscsi', help='Volume group for ns5'), ] NEXENTA_NFS_OPTS = [ cfg.StrOpt('nexenta_shares_config', default='/etc/cinder/nfs_shares', help='File with the list of available nfs shares'), cfg.StrOpt('nexenta_mount_point_base', default='$state_path/mnt', help='Base directory that contains NFS share mount points'), cfg.BoolOpt('nexenta_sparsed_volumes', default=True, help='Enables or disables the creation of volumes as ' 'sparsed files that take no space. If disabled ' '(False), volume is created as a regular file, ' 'which takes a long time.'), cfg.BoolOpt('nexenta_nms_cache_volroot', default=True, help=('If set True cache NexentaStor appliance volroot option ' 'value.')) ] NEXENTA_DATASET_OPTS = [ cfg.StrOpt('nexenta_dataset_compression', default='on', choices=['on', 'off', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lzjb', 'zle', 'lz4'], help='Compression value for new ZFS folders.'), cfg.StrOpt('nexenta_dataset_dedup', default='off', choices=['on', 'off', 'sha256', 'verify', 'sha256, verify'], help='Deduplication value for new ZFS folders.'), cfg.StrOpt('nexenta_dataset_description', default='', help='Human-readable description for the folder.'), cfg.IntOpt('nexenta_blocksize', default=4096, help='Block size for datasets'), cfg.IntOpt('nexenta_ns5_blocksize', default=32, help='Block size for datasets'), cfg.BoolOpt('nexenta_sparse', default=False, help='Enables or disables the creation of sparse datasets'), ] NEXENTA_RRMGR_OPTS = [ cfg.IntOpt('nexenta_rrmgr_compression', default=0, help=('Enable stream compression, level 1..9. 1 - gives best ' 'speed; 9 - gives best compression.')), cfg.IntOpt('nexenta_rrmgr_tcp_buf_size', default=4096, help='TCP Buffer size in KiloBytes.'), cfg.IntOpt('nexenta_rrmgr_connections', default=2, help='Number of TCP connections.'), ] CONF = cfg.CONF CONF.register_opts(NEXENTA_CONNECTION_OPTS) CONF.register_opts(NEXENTA_ISCSI_OPTS) CONF.register_opts(NEXENTA_DATASET_OPTS) CONF.register_opts(NEXENTA_NFS_OPTS) CONF.register_opts(NEXENTA_RRMGR_OPTS) CONF.register_opts(NEXENTA_EDGE_OPTS) cinder-8.0.0/cinder/volume/drivers/nexenta/utils.py0000664000567000056710000001176512701406250023540 0ustar jenkinsjenkins00000000000000# Copyright 2013 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.utils` -- Nexenta-specific utils functions. ========================================================= .. automodule:: nexenta.utils """ import re import six from oslo_utils import units import six.moves.urllib.parse as urlparse from cinder.i18n import _ def str2size(s, scale=1024): """Convert size-string. String format: [:space:] to bytes. :param s: size-string :param scale: base size """ if not s: return 0 if isinstance(s, int): return s match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) if match is None: raise ValueError(_('Invalid value: "%s"') % s) groups = match.groups() value = float(groups[0]) suffix = len(groups) > 1 and groups[1].upper() or 'B' types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') for i, t in enumerate(types): if suffix == t: return int(value * pow(scale, i)) def str2gib_size(s): """Covert size-string to size in gigabytes.""" size_in_bytes = str2size(s) return size_in_bytes // units.Gi def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None, connections=None): """Returns rrmgr command for source and destination.""" cmd = ['rrmgr', '-s', 'zfs'] if compression: cmd.extend(['-c', '%s' % compression]) cmd.append('-q') cmd.append('-e') if tcp_buf_size: cmd.extend(['-w', six.text_type(tcp_buf_size)]) if connections: cmd.extend(['-n', six.text_type(connections)]) cmd.extend([src, dst]) return ' '.join(cmd) def parse_nms_url(url): """Parse NMS url into normalized parts like scheme, user, host and others. Example NMS URL: auto://admin:nexenta@192.168.1.1:2000/ NMS URL parts: auto True if url starts with auto://, protocol will be automatically switched to https if http not supported; scheme (auto) connection protocol (http or https); user (admin) NMS user; password (nexenta) NMS password; host (192.168.1.1) NMS host; port (2000) NMS port. :param url: url string :return: tuple (auto, scheme, user, password, host, port, path) """ pr = urlparse.urlparse(url) scheme = pr.scheme auto = scheme == 'auto' if auto: scheme = 'http' user = 'admin' password = 'nexenta' if '@' not in pr.netloc: host_and_port = pr.netloc else: user_and_password, host_and_port = pr.netloc.split('@', 1) if ':' in user_and_password: user, password = user_and_password.split(':') else: user = user_and_password if ':' in host_and_port: host, port = host_and_port.split(':', 1) else: host, port = host_and_port, '2000' return auto, scheme, user, password, host, port, '/rest/nms/' def parse_nef_url(url): """Parse NMS url into normalized parts like scheme, user, host and others. Example NMS URL: auto://admin:nexenta@192.168.1.1:8080/ NMS URL parts: auto True if url starts with auto://, protocol will be automatically switched to https if http not supported; scheme (auto) connection protocol (http or https); user (admin) NMS user; password (nexenta) NMS password; host (192.168.1.1) NMS host; port (8080) NMS port. :param url: url string :return: tuple (auto, scheme, user, password, host, port) """ pr = urlparse.urlparse(url) scheme = pr.scheme auto = scheme == 'auto' if auto: scheme = 'http' user = 'admin' password = 'nexenta' if '@' not in pr.netloc: host_and_port = pr.netloc else: user_and_password, host_and_port = pr.netloc.split('@', 1) if ':' in user_and_password: user, password = user_and_password.split(':') else: user = user_and_password if ':' in host_and_port: host, port = host_and_port.split(':', 1) else: host, port = host_and_port, '8080' return auto, scheme, user, password, host, port def get_migrate_snapshot_name(volume): """Return name for snapshot that will be used to migrate the volume.""" return 'cinder-migrate-snapshot-%(id)s' % volume cinder-8.0.0/cinder/volume/drivers/nexenta/__init__.py0000664000567000056710000000000012701406250024113 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/ns5/0000775000567000056710000000000012701406543022526 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/ns5/__init__.py0000664000567000056710000000000012701406250024620 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/nexenta/ns5/iscsi.py0000664000567000056710000004762012701406257024225 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance ===================================================================== .. automodule:: nexenta.volume """ from oslo_log import log as logging from oslo_utils import units from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LI, _LE, _LW from cinder.volume import driver from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils VERSION = '1.0.0' LOG = logging.getLogger(__name__) class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921 """Executes volume driver commands on Nexenta Appliance. Version history: 1.0.0 - Initial driver version. """ VERSION = VERSION def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) self.nef = None self.targets = {} self.targetgroups = {} if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_ISCSI_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_RRMGR_OPTS) self.nef_protocol = self.configuration.nexenta_rest_protocol self.nef_host = self.configuration.nexenta_host self.nef_port = self.configuration.nexenta_rest_port self.nef_user = self.configuration.nexenta_user self.nef_password = self.configuration.nexenta_password self.storage_pool = self.configuration.nexenta_volume self.volume_group = self.configuration.nexenta_volume_group self.dataset_compression = ( self.configuration.nexenta_dataset_compression) self.dataset_deduplication = self.configuration.nexenta_dataset_dedup self.dataset_description = ( self.configuration.nexenta_dataset_description) self.iscsi_target_portal_port = ( self.configuration.nexenta_iscsi_target_portal_port) @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): if self.nef_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.nef_protocol, False self.nef = jsonrpc.NexentaJSONProxy( protocol, self.nef_host, self.nef_port, self.nef_user, self.nef_password, auto=auto) url = 'storage/pools/%s/volumeGroups' % self.storage_pool data = { 'name': self.volume_group, 'volumeBlockSize': ( self.configuration.nexenta_ns5_blocksize * units.Ki) } try: self.nef.post(url, data) except exception.NexentaException as e: if 'EEXIST' in e.args[0]: LOG.debug('volumeGroup already exists, skipping') else: raise def check_for_setup_error(self): """Verify that the zfs volumes exist. :raise: :py:exc:`LookupError` """ url = 'storage/pools/%(pool)s/volumeGroups/%(group)s' % { 'pool': self.storage_pool, 'group': self.volume_group, } try: self.nef.get(url) except exception.NexentaException: raise LookupError(_( "Dataset group %s not found at Nexenta SA"), '/'.join( [self.storage_pool, self.volume_group])) services = self.nef.get('services') for service in services['data']: if service['name'] == 'iscsit': if service['state'] != 'online': raise exception.NexentaException( 'iSCSI service is not running on NS appliance') break def _get_volume_path(self, volume): """Return zfs volume name that corresponds given volume name.""" return '%s/%s/%s' % (self.storage_pool, self.volume_group, volume['name']) def _create_target(self, target_idx): target_alias = '%s-%i' % ( self.nef_host, target_idx ) target = self._get_target_by_alias(target_alias) if not target: url = 'san/iscsi/targets' data = {'alias': target_alias} self.nef.post(url, data) target = self._get_target_by_alias(target_alias) if not self._target_group_exists(target_alias): url = 'san/targetgroups' data = {'name': target_alias, 'targets': [target['name']]} self.nef.post(url, data) self.targetgroups[target['name']] = target_alias self.targets[target['name']] = [] return target['name'] def _get_target_name(self, volume): """Return iSCSI target name with least LUs.""" provider_location = volume.get('provider_location') target_names = list(self.targets) if provider_location: target_name = provider_location.split(',1 ')[1].split(' ')[0] if not self.targets.get(target_name): self.targets[target_name] = [] if not(volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) if not self.targetgroups.get(target_name): url = 'san/iscsi/targets' data = self.nef.get(url).get('data') target_alias = data[0]['alias'] self.targetgroups[target_name] = target_alias elif not target_names: # create first target and target group target_name = self._create_target(0) self.targets[target_name].append(volume['name']) else: target_name = target_names[0] for target in target_names: # find target with minimum number of volumes if len(self.targets[target]) < len(self.targets[target_name]): target_name = target if len(self.targets[target_name]) >= 20: # create new target and target group target_name = self._create_target(len(target_names)) if not(volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) return target_name def _get_targetgroup_name(self, volume): target_name = self._get_target_name(volume) return self.targetgroups[target_name] @staticmethod def _get_clone_snapshot_name(volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume def create_volume(self, volume): """Create a zfs volume on appliance. :param volume: volume reference :return: model update dict for volume reference """ url = 'storage/pools/%(pool)s/volumeGroups/%(group)s/volumes' % { 'pool': self.storage_pool, 'group': self.volume_group, } data = { 'name': volume['name'], 'volumeSize': volume['size'] * units.Gi, 'volumeBlockSize': ( self.configuration.nexenta_ns5_blocksize * units.Ki), 'sparseVolume': self.configuration.nexenta_sparse } self.nef.post(url, data) def delete_volume(self, volume): """Destroy a zfs volume on appliance. :param volume: volume reference """ pool, group, name = self._get_volume_path(volume).split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s' '/volumes/%(name)s') % { 'pool': pool, 'group': group, 'name': name } try: self.nef.delete(url) except exception.NexentaException as exc: # We assume that volume is gone LOG.warning(_LW('Got error trying to delete volume %(volume)s,' ' assuming it is already gone: %(exc)s'), {'volume': volume, 'exc': exc}) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), {'id': volume['id'], 'size': new_size}) pool, group, name = self._get_volume_path(volume).split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' 'volumes/%(name)s') % { 'pool': pool, 'group': group, 'name': name } self.nef.put(url, {'volumeSize': new_size * units.Gi}) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ snapshot_vol = self._get_snapshot_volume(snapshot) LOG.info(_LI('Creating snapshot %(snap)s of volume %(vol)s'), { 'snap': snapshot['name'], 'vol': snapshot_vol['name'] }) volume_path = self._get_volume_path(snapshot_vol) pool, group, volume = volume_path.split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' 'volumes/%(volume)s/snapshots') % { 'pool': pool, 'group': group, 'volume': snapshot_vol['name'] } self.nef.post(url, {'name': snapshot['name']}) def delete_snapshot(self, snapshot): """Delete volume's snapshot on appliance. :param snapshot: snapshot reference """ LOG.info(_LI('Deleting snapshot: %s'), snapshot['name']) snapshot_vol = self._get_snapshot_volume(snapshot) volume_path = self._get_volume_path(snapshot_vol) pool, group, volume = volume_path.split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' 'volumes/%(volume)s/snapshots/%(snapshot)s') % { 'pool': pool, 'group': group, 'volume': volume, 'snapshot': snapshot['name'] } try: self.nef.delete(url) except exception.NexentaException as exc: if 'EBUSY' in exc.args[0]: LOG.warning(_LW( 'Could not delete snapshot %s - it has dependencies'), snapshot['name']) else: LOG.warning(exc) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ LOG.info(_LI('Creating volume from snapshot: %s'), snapshot['name']) snapshot_vol = self._get_snapshot_volume(snapshot) volume_path = self._get_volume_path(snapshot_vol) pool, group, snapshot_vol = volume_path.split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' 'volumes/%(volume)s/snapshots/%(snapshot)s/clone') % { 'pool': pool, 'group': group, 'volume': snapshot_vol, 'snapshot': snapshot['name'] } targetPath = self._get_volume_path(volume) self.nef.post(url, {'targetPath': targetPath}) url = ('storage/pools/%(pool)s/volumeGroups/' '%(group)s/volumes/%(name)s/promote') % { 'pool': pool, 'group': group, 'name': volume['name'], } self.nef.post(url) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'name': self._get_clone_snapshot_name(volume)} LOG.debug('Creating temp snapshot of the original volume: ' '%s@%s', snapshot['volume_name'], snapshot['name']) # We don't delete this snapshot, because this snapshot will be origin # of new volume. This snapshot will be automatically promoted by NEF # when user will delete origin volume. But when cloned volume deleted # we check its origin property and delete source snapshot if needed. self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: LOG.error(_LE('Volume creation failed, deleting created snapshot ' '%s'), '@'.join( [snapshot['volume_name'], snapshot['name']])) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): LOG.warning(_LW('Failed to delete zfs snapshot ' '%s'), '@'.join( [snapshot['volume_name'], snapshot['name']])) raise def _get_snapshot_volume(self, snapshot): ctxt = context.get_admin_context() return db.volume_get(ctxt, snapshot['volume_id']) def _get_target_by_alias(self, alias): """Get an iSCSI target by it's alias. :param alias: target alias :return: First found target, else None """ url = 'san/iscsi/targets?alias=%s' % alias targets = self.nef.get(url).get('data') if not targets: return None return targets[0] def _target_group_exists(self, target_group): """Check if target group exist. :param target_group: target group :return: True if target group exist, else False """ url = 'san/targetgroups?name=%s' % target_group return bool(self.nef.get(url).get('data')) def _lu_exists(self, volume): """Check if LU exists on appliance. :param volume: cinder volume :return: True if LU exists, else False """ try: self._get_lun_id(volume) except LookupError: return False return True def _get_lun_id(self, volume): """Get lun id for zfs volume. :param volume: cinder volume :raises: LookupError if zfs volume does not exist or not mapped to LU :return: LUN """ volume_path = self._get_volume_path(volume) targetgroup_name = self._get_targetgroup_name(volume) url = 'san/targetgroups/%s/luns?volume=%s' % ( targetgroup_name, volume_path.replace('/', '%2F')) data = self.nef.get(url).get('data') if not data: raise LookupError(_("LU does not exist for volume: %s"), volume['name']) else: return data[0]['guid'] def _get_lun(self, volume): try: lun_id = self._get_lun_id(volume) except LookupError: return None targetgroup_name = self._get_targetgroup_name(volume) url = 'san/targetgroups/%s/luns/%s/views' % ( targetgroup_name, lun_id) data = self.nef.get(url).get('data') if not data: raise LookupError(_("No views found for LUN: %s"), lun_id) return data[0]['lunNumber'] def _do_export(self, _ctx, volume): """Do all steps to get zfs volume exported at separate target. :param volume: reference of volume to be exported """ volume_path = self._get_volume_path(volume) target_name = self._get_target_name(volume) targetgroup_name = self._get_targetgroup_name(volume) entry = {} if not self._lu_exists(volume): url = 'san/targetgroups/%s/luns' % targetgroup_name data = {'volume': volume_path} self.nef.post(url, data) entry['lun'] = self._get_lun(volume) model_update = {} if entry.get('lun') is not None: provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.nef_host, 'port': self.configuration.nexenta_iscsi_target_portal_port, 'name': target_name, 'lun': entry['lun'], } model_update = {'provider_location': provider_location} return model_update def create_export(self, _ctx, volume, connector): """Create new export for zfs volume. :param volume: reference of volume to be exported :return: iscsiadm-formatted provider location string """ model_update = self._do_export(_ctx, volume) return model_update def ensure_export(self, _ctx, volume): """Recreate parts of export if necessary. :param volume: reference of volume to be exported """ self._do_export(_ctx, volume) def remove_export(self, _ctx, volume): """Destroy all resources created to export zfs volume. :param volume: reference of volume to be unexported """ try: lun_id = self._get_lun_id(volume) except LookupError: return targetgroup_name = self._get_targetgroup_name(volume) url = 'san/targetgroups/%s/luns/%s' % ( targetgroup_name, lun_id) try: self.nef.delete(url) except exception.NexentaException as exc: if 'No such logical unit in target group' in exc.args[0]: LOG.debug('LU already deleted from appliance') else: raise def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') url = 'storage/pools/%(pool)s/volumeGroups/%(group)s' % { 'pool': self.storage_pool, 'group': self.volume_group, } stats = self.nef.get(url) total_amount = utils.str2gib_size(stats['bytesAvailable']) free_amount = utils.str2gib_size( stats['bytesAvailable'] - stats['bytesUsed']) location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.__class__.__name__, 'host': self.nef_host, 'pool': self.storage_pool, 'group': self.volume_group, } self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.dataset_deduplication, 'compression': self.dataset_compression, 'description': self.dataset_description, 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'total_capacity_gb': total_amount, 'free_capacity_gb': free_amount, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'volume_backend_name': self.backend_name, 'location_info': location_info, 'iscsi_target_portal_port': self.iscsi_target_portal_port, 'nef_url': self.nef.url } cinder-8.0.0/cinder/volume/drivers/nexenta/ns5/jsonrpc.py0000664000567000056710000000630312701406250024553 0ustar jenkinsjenkins00000000000000# Copyright 2011 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client ===================================================================== .. automodule:: nexenta.jsonrpc """ import base64 import json import time from oslo_log import log as logging from oslo_serialization import jsonutils import requests from cinder import exception LOG = logging.getLogger(__name__) class NexentaJSONProxy(object): def __init__(self, scheme, host, port, user, password, auto=False, method=None): self.scheme = scheme self.host = host self.port = port self.user = user self.password = password self.auto = True self.method = method @property def url(self): return '%s://%s:%s/' % (self.scheme, self.host, self.port) def __getattr__(self, method=None): if method: return NexentaJSONProxy( self.scheme, self.host, self.port, self.user, self.password, self.auto, method) def __hash__(self): return self.url.__hash__() def __repr__(self): return 'NEF proxy: %s' % self.url def __call__(self, path, data=None): auth = base64.b64encode( ('%s:%s' % (self.user, self.password)).encode('utf-8'))[:-1] headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % auth } url = self.url + path if data: data = jsonutils.dumps(data) LOG.debug('Sending JSON to url: %s, data: %s, method: %s', path, data, self.method) resp = getattr(requests, self.method)(url, data=data, headers=headers) if resp.status_code == 201 or ( resp.status_code == 200 and not resp.content): LOG.debug('Got response: Success') return 'Success' response = json.loads(resp.content) resp.close() if response and resp.status_code == 202: url = self.url + response['links'][0]['href'] while resp.status_code == 202: time.sleep(1) resp = requests.get(url) if resp.status_code == 201 or ( resp.status_code == 200 and not resp.content): LOG.debug('Got response: Success') return 'Success' else: response = json.loads(resp.content) resp.close() if response.get('code'): raise exception.NexentaException(response) LOG.debug('Got response: %s', response) return response cinder-8.0.0/cinder/volume/drivers/nexenta/ns5/nfs.py0000664000567000056710000004227512701406250023673 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance. ======================================================================= .. automodule:: nexenta.nfs """ import hashlib import os from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils from cinder.volume.drivers import nfs VERSION = '1.0.0' LOG = logging.getLogger(__name__) class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 """Executes volume driver commands on Nexenta Appliance. Version history: 1.0.0 - Initial driver version. """ driver_prefix = 'nexenta' volume_backend_name = 'NexentaNfsDriver' VERSION = VERSION def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_NFS_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base self.dataset_compression = ( self.configuration.nexenta_dataset_compression) self.dataset_deduplication = self.configuration.nexenta_dataset_dedup self.dataset_description = ( self.configuration.nexenta_dataset_description) self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes self.nef = None self.nef_protocol = self.configuration.nexenta_rest_protocol self.nef_host = self.configuration.nas_ip self.share = self.configuration.nas_share_path self.nef_port = self.configuration.nexenta_rest_port self.nef_user = self.configuration.nexenta_user self.nef_password = self.configuration.nexenta_password @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): if self.nef_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.nef_protocol, False self.nef = jsonrpc.NexentaJSONProxy( protocol, self.nef_host, self.nef_port, self.nef_user, self.nef_password, auto=auto) def check_for_setup_error(self): """Verify that the volume for our folder exists. :raise: :py:exc:`LookupError` """ pool_name, fs = self._get_share_datasets(self.share) url = 'storage/pools/%s' % (pool_name) if not self.nef.get(url): raise LookupError(_("Pool %s does not exist in Nexenta " "Store appliance") % pool_name) url = 'storage/pools/%s/filesystems/%s' % ( pool_name, fs) if not self.nef.get(url): raise LookupError(_("filesystem %s does not exist in " "Nexenta Store appliance") % fs) path = '/'.join([pool_name, fs]) shared = False response = self.nef.get('nas/nfs') for share in response['data']: if share.get('filesystem') == path: shared = True break if not shared: raise LookupError(_("Dataset %s is not shared in Nexenta " "Store appliance") % path) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ data = {'export': volume['provider_location'], 'name': 'volume'} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] return { 'driver_volume_type': self.driver_volume_type, 'data': data } def create_volume(self, volume): """Creates a volume. :param volume: volume reference :returns: provider_location update dict for database """ self._do_create_volume(volume) return {'provider_location': volume['provider_location']} def _do_create_volume(self, volume): pool, fs = self._get_share_datasets(self.share) filesystem = '%s/%s/%s' % (pool, fs, volume['name']) LOG.debug('Creating filesystem on NexentaStor %s', filesystem) url = 'storage/pools/%s/filesystems' % pool data = { 'name': '/'.join([fs, volume['name']]), 'compressionMode': self.dataset_compression, 'dedupMode': self.dataset_deduplication, } self.nef.post(url, data) volume['provider_location'] = '%s:/%s/%s' % ( self.nef_host, self.share, volume['name']) try: self._share_folder(fs, volume['name']) self._ensure_share_mounted('%s:/%s/%s' % ( self.nef_host, self.share, volume['name'])) volume_size = volume['size'] if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(self.local_path(volume), volume_size) else: url = 'storage/pools/%s/filesystems/%s' % ( pool, '%2F'.join([fs, volume['name']])) compression = self.nef.get(url).get('compressionMode') if compression != 'off': # Disable compression, because otherwise will not use space # on disk. self.nef.put(url, {'compressionMode': 'off'}) try: self._create_regular_file( self.local_path(volume), volume_size) finally: if compression != 'off': # Backup default compression value if it was changed. self.nef.put(url, {'compressionMode': compression}) except exception.NexentaException: try: url = 'storage/pools/%s/filesystems/%s/%s' % ( pool, '%2F'.join([fs, volume['name']])) self.nef.delete(url) except exception.NexentaException: LOG.warning(_LW("Cannot destroy created folder: " "%(vol)s/%(folder)s"), {'vol': pool, 'folder': '/'.join( [fs, volume['name']])}) raise def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ pool, fs = self._get_share_datasets(self.share) url = ('storage/pools/%(pool)s/filesystems/%(fs)s') % { 'pool': pool, 'fs': '%2F'.join([fs, volume['name']]) } origin = self.nef.get(url).get('originalSnapshot') url = ('storage/pools/%(pool)s/filesystems/' '%(fs)s?snapshots=true') % { 'pool': pool, 'fs': '%2F'.join([fs, volume['name']]) } try: self.nef.delete(url) except exception.NexentaException as exc: if 'Failed to destroy snapshot' in exc.args[0]: LOG.debug('Snapshot has dependent clones, skipping') else: raise try: if origin and self._is_clone_snapshot_name(origin): path, snap = origin.split('@') pool, fs = path.split('/', 1) snap_url = ('storage/pools/%(pool)s/' 'filesystems/%(fs)s/snapshots/%(snap)s') % { 'pool': pool, 'fs': fs, 'snap': snap } self.nef.delete(snap_url) except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.debug( 'Volume %s does not exist on appliance', '/'.join( [pool, fs])) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) pool, fs = self._get_share_datasets(self.share) url = 'storage/pools/%(pool)s/filesystems/%(fs)s/snapshots' % { 'pool': pool, 'fs': '%2F'.join([fs, volume['name']]), } data = {'name': snapshot['name']} self.nef.post(url, data) def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) pool, fs = self._get_share_datasets(self.share) url = ('storage/pools/%(pool)s/' 'filesystems/%(fs)s/snapshots/%(snap)s') % { 'pool': pool, 'fs': '%2F'.join([fs, volume['name']]), 'snap': snapshot['name'] } try: self.nef.delete(url) except exception.NexentaException as exc: if 'EBUSY' is exc: LOG.warning(_LW( 'Could not delete snapshot %s - it has dependencies'), snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ snapshot_vol = self._get_snapshot_volume(snapshot) volume['provider_location'] = snapshot_vol['provider_location'] pool, fs = self._get_share_datasets(self.share) dataset_path = '%s/%s' % (pool, fs) url = ('storage/pools/%(pool)s/' 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % { 'pool': pool, 'fs': '%2F'.join([fs, snapshot_vol['name']]), 'snap': snapshot['name'] } path = '/'.join([pool, fs, volume['name']]) data = {'targetPath': path} self.nef.post(url, data) path = '%2F'.join([pool, fs, volume['name']]) url = 'storage/filesystems/%s/promote' % path self.nef.post(url) try: self._share_folder(fs, volume['name']) except exception.NexentaException: try: url = ('storage/pools/%(pool)s/' 'filesystems/%(fs)s') % { 'pool': pool, 'fs': volume['name'] } self.nef.delete(url) except exception.NexentaException: LOG.warning(_LW("Cannot destroy cloned filesystem: " "%(vol)s/%(filesystem)s"), {'vol': dataset_path, 'filesystem': volume['name']}) raise return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'name': self._get_clone_snapshot_name(volume)} self.create_snapshot(snapshot) try: return self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: LOG.error(_LE('Volume creation failed, deleting created snapshot ' '%(volume_name)s@%(name)s'), snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): LOG.warning(_LW('Failed to delete zfs snapshot ' '%(volume_name)s@%(name)s'), snapshot) raise self.delete_snapshot(snapshot) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ nfs_share = volume['provider_location'] return os.path.join(self._get_mount_point_for_share(nfs_share), 'volume') def _get_mount_point_for_share(self, nfs_share): """Returns path to mount point NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ nfs_share = nfs_share.encode('utf-8') return os.path.join(self.configuration.nexenta_mount_point_base, hashlib.md5(nfs_share).hexdigest()) def _share_folder(self, path, filesystem): """Share NFS filesystem on NexentaStor Appliance. :param nef: nef object :param path: path to parent filesystem :param filesystem: filesystem that needs to be shared """ pool = self.share.split('/')[0] LOG.debug( 'Creating ACL for filesystem %s on Nexenta Store', filesystem) url = 'storage/pools/%s/filesystems/%s/acl' % ( pool, '%2F'.join([path.replace('/', '%2F'), filesystem])) data = { "type": "allow", "principal": "everyone@", "permissions": [ "list_directory", "read_data", "add_file", "write_data", "add_subdirectory", "append_data", "read_xattr", "write_xattr", "execute", "delete_child", "read_attributes", "write_attributes", "delete", "read_acl", "write_acl", "write_owner", "synchronize" ], "flags": [ "file_inherit", "dir_inherit" ] } self.nef.post(url, data) LOG.debug( 'Successfully shared filesystem %s', '/'.join( [path, filesystem])) def _get_capacity_info(self, path): """Calculate available space on the NFS share. :param path: example pool/nfs """ pool, fs = self._get_share_datasets(path) url = 'storage/pools/%s/filesystems/%s' % ( pool, fs) data = self.nef.get(url) total = utils.str2size(data['bytesAvailable']) allocated = utils.str2size(data['bytesUsed']) free = total - allocated return total, free, allocated def _get_snapshot_volume(self, snapshot): ctxt = context.get_admin_context() return db.volume_get(ctxt, snapshot['volume_id']) def _get_share_datasets(self, nfs_share): pool_name, fs = nfs_share.split('/', 1) return pool_name, fs def _get_clone_snapshot_name(self, volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume def _is_clone_snapshot_name(self, snapshot): """Check if snapshot is created for cloning.""" name = snapshot.split('@')[-1] return name.startswith('cinder-clone-snapshot-') def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') share = ':/'.join([self.nef_host, self.share]) total, free, allocated = self._get_capacity_info(self.share) total_space = utils.str2gib_size(total) free_space = utils.str2gib_size(free) location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, 'share': share } self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.dataset_deduplication, 'compression': self.dataset_compression, 'description': self.dataset_description, 'nef_url': self.nef_host, 'driver_version': self.VERSION, 'storage_protocol': 'NFS', 'total_capacity_gb': total_space, 'free_capacity_gb': free_space, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'location_info': location_info, 'volume_backend_name': self.backend_name, 'nfs_mount_point_base': self.nfs_mount_point_base } cinder-8.0.0/cinder/volume/drivers/nexenta/iscsi.py0000664000567000056710000006617212701406257023523 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance ===================================================================== .. automodule:: nexenta.iscsi """ import six from oslo_log import log as logging from oslo_utils import excutils from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils VERSION = '1.3.0.1' LOG = logging.getLogger(__name__) class NexentaISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Nexenta Appliance. Version history: 1.0.0 - Initial driver version. 1.0.1 - Fixed bug #1236626: catch "does not exist" exception of lu_exists. 1.1.0 - Changed class name to NexentaISCSIDriver. 1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy. 1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs clone. 1.1.3 - Extended volume stats provided by _update_volume_stats method. 1.2.0 - Added volume migration with storage assist method. 1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location of migrated volume; after migrating volume migrate_volume destroy snapshot on migration destination. 1.3.0 - Added retype method. 1.3.0.1 - Target creation refactor. """ VERSION = VERSION def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) self.nms = None self.targets = {} if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_ISCSI_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_RRMGR_OPTS) self.nms_protocol = self.configuration.nexenta_rest_protocol self.nms_host = self.configuration.nexenta_host self.nms_port = self.configuration.nexenta_rest_port self.nms_user = self.configuration.nexenta_user self.nms_password = self.configuration.nexenta_password self.volume = self.configuration.nexenta_volume self.volume_compression = ( self.configuration.nexenta_dataset_compression) self.volume_deduplication = self.configuration.nexenta_dataset_dedup self.volume_description = ( self.configuration.nexenta_dataset_description) self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections self.iscsi_target_portal_port = ( self.configuration.nexenta_iscsi_target_portal_port) @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): if self.nms_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.nms_protocol, False self.nms = jsonrpc.NexentaJSONProxy( protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user, self.nms_password, auto=auto) def check_for_setup_error(self): """Verify that the volume for our zvols exists. :raise: :py:exc:`LookupError` """ if not self.nms.volume.object_exists(self.volume): raise LookupError(_("Volume %s does not exist in Nexenta SA") % self.volume) def _get_zvol_name(self, volume_name): """Return zvol name that corresponds given volume name.""" return '%s/%s' % (self.volume, volume_name) def _create_target(self, target_idx): target_name = '%s%s-%i' % ( self.configuration.nexenta_target_prefix, self.nms_host, target_idx ) target_group_name = self._get_target_group_name(target_name) if not self._target_exists(target_name): try: self.nms.iscsitarget.create_target({ 'target_name': target_name}) except exception.NexentaException as exc: if 'already' in exc.args[0]: LOG.info(_LI('Ignored target creation error "%s" while ' 'ensuring export.'), exc) else: raise if not self._target_group_exists(target_group_name): try: self.nms.stmf.create_targetgroup(target_group_name) except exception.NexentaException as exc: if ('already' in exc.args[0]): LOG.info(_LI('Ignored target group creation error "%s" ' 'while ensuring export.'), exc) else: raise if not self._target_member_in_target_group(target_group_name, target_name): try: self.nms.stmf.add_targetgroup_member(target_group_name, target_name) except exception.NexentaException as exc: if ('already' in exc.args[0]): LOG.info(_LI('Ignored target group member addition error ' '"%s" while ensuring export.'), exc) else: raise self.targets[target_name] = [] return target_name def _get_target_name(self, volume): """Return iSCSI target name with least LUs.""" provider_location = volume.get('provider_location') target_names = self.targets.keys() if provider_location: target_name = provider_location.split(',1 ')[1].split(' ')[0] if not(self.targets.get(target_name)): self.targets[target_name] = [] if not(volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) elif not(target_names): # create first target and target group target_name = self._create_target(0) self.targets[target_name].append(volume['name']) else: target_name = target_names[0] for target in target_names: if len(self.targets[target]) < len(self.targets[target_name]): target_name = target if len(self.targets[target_name]) >= 20: # create new target and target group target_name = self._create_target(len(target_names)) if not(volume['name'] in self.targets[target_name]): self.targets[target_name].append(volume['name']) return target_name def _get_target_group_name(self, target_name): """Return Nexenta iSCSI target group name for volume.""" return target_name.replace( self.configuration.nexenta_target_prefix, self.configuration.nexenta_target_group_prefix ) @staticmethod def _get_clone_snapshot_name(volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume @staticmethod def _is_clone_snapshot_name(snapshot): """Check if snapshot is created for cloning.""" name = snapshot.split('@')[-1] return name.startswith('cinder-clone-snapshot-') def create_volume(self, volume): """Create a zvol on appliance. :param volume: volume reference :return: model update dict for volume reference """ self.nms.zvol.create( self._get_zvol_name(volume['name']), '%sG' % (volume['size'],), six.text_type(self.configuration.nexenta_blocksize), self.configuration.nexenta_sparse) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), {'id': volume['id'], 'size': new_size}) self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']), 'volsize', '%sG' % new_size) def delete_volume(self, volume): """Destroy a zvol on appliance. :param volume: volume reference """ volume_name = self._get_zvol_name(volume['name']) try: props = self.nms.zvol.get_child_props(volume_name, 'origin') or {} self.nms.zvol.destroy(volume_name, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Volume %s does not exist, it ' 'seems it was already deleted.'), volume_name) return if 'zvol has children' in exc.args[0]: LOG.info(_LI('Volume %s will be deleted later.'), volume_name) return raise origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): volume, snapshot = origin.split('@') volume = volume.lstrip('%s/' % self.configuration.nexenta_volume) try: self.delete_snapshot({'volume_name': volume, 'name': snapshot}) except exception.NexentaException as exc: LOG.warning(_LW('Cannot delete snapshot %(origin)s: %(exc)s'), {'origin': origin, 'exc': exc}) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ snapshot = {'volume_name': src_vref['name'], 'name': self._get_clone_snapshot_name(volume)} LOG.debug('Creating temp snapshot of the original volume: ' '%(volume_name)s@%(name)s', snapshot) # We don't delete this snapshot, because this snapshot will be origin # of new volume. This snapshot will be automatically promoted by NMS # when user will delete origin volume. But when cloned volume deleted # we check its origin property and delete source snapshot if needed. self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: with excutils.save_and_reraise_exception(): LOG.exception(_LE( 'Volume creation failed, deleting created snapshot ' '%(volume_name)s@%(name)s'), snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): LOG.warning(_LW('Failed to delete zfs snapshot ' '%(volume_name)s@%(name)s'), snapshot) raise def _get_zfs_send_recv_cmd(self, src, dst): """Returns rrmgr command for source and destination.""" return utils.get_rrmgr_cmd(src, dst, compression=self.rrmgr_compression, tcp_buf_size=self.rrmgr_tcp_buf_size, connections=self.rrmgr_connections) @staticmethod def get_nms_for_url(url): """Returns initialized nms object for url.""" auto, scheme, user, password, host, port, path = ( utils.parse_nms_url(url)) return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, password, auto=auto) def migrate_volume(self, ctxt, volume, host): """Migrate if volume and host are managed by Nexenta appliance. :param ctxt: context :param volume: a dictionary describing the volume to migrate :param host: a dictionary describing the host to migrate to """ LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host}) false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): return false_ret if 'capabilities' not in host: return false_ret capabilities = host['capabilities'] if ('location_info' not in capabilities or 'iscsi_target_portal_port' not in capabilities or 'nms_url' not in capabilities): return false_ret nms_url = capabilities['nms_url'] dst_parts = capabilities['location_info'].split(':') if (capabilities.get('vendor_name') != 'Nexenta' or dst_parts[0] != self.__class__.__name__ or capabilities['free_capacity_gb'] < volume['size']): return false_ret dst_host, dst_volume = dst_parts[1:] ssh_bound = False ssh_bindings = self.nms.appliance.ssh_list_bindings() for bind in ssh_bindings: if dst_host.startswith(ssh_bindings[bind][3]): ssh_bound = True break if not ssh_bound: LOG.warning(_LW("Remote NexentaStor appliance at %s should be " "SSH-bound."), dst_host) # Create temporary snapshot of volume on NexentaStor Appliance. snapshot = { 'volume_name': volume['name'], 'name': utils.get_migrate_snapshot_name(volume) } self.create_snapshot(snapshot) src = '%(volume)s/%(zvol)s@%(snapshot)s' % { 'volume': self.volume, 'zvol': volume['name'], 'snapshot': snapshot['name'] } dst = ':'.join([dst_host, dst_volume]) try: self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except exception.NexentaException as exc: LOG.warning(_LW("Cannot send source snapshot %(src)s to " "destination %(dst)s. Reason: %(exc)s"), {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete temporary source snapshot " "%(src)s on NexentaStor Appliance: %(exc)s"), {'src': src, 'exc': exc}) try: self.delete_volume(volume) except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete source volume %(volume)s on " "NexentaStor Appliance: %(exc)s"), {'volume': volume['name'], 'exc': exc}) dst_nms = self.get_nms_for_url(nms_url) dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'], snapshot['name']) try: dst_nms.snapshot.destroy(dst_snapshot, '') except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete temporary destination snapshot " "%(dst)s on NexentaStor Appliance: %(exc)s"), {'dst': dst_snapshot, 'exc': exc}) return True, None def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) options = dict( compression='compression', dedup='dedup', description='nms:description' ) retyped = False migrated = False capabilities = host['capabilities'] src_backend = self.__class__.__name__ dst_backend = capabilities['location_info'].split(':')[0] if src_backend != dst_backend: LOG.warning(_LW('Cannot retype from %(src_backend)s to ' '%(dst_backend)s.'), { 'src_backend': src_backend, 'dst_backend': dst_backend, }) return False hosts = (volume['host'], host['host']) old, new = hosts if old != new: migrated, provider_location = self.migrate_volume( context, volume, host) if not migrated: nms = self.nms else: nms_url = capabilities['nms_url'] nms = self.get_nms_for_url(nms_url) zvol = '%s/%s' % ( capabilities['location_info'].split(':')[-1], volume['name']) for opt in options: old, new = diff.get('extra_specs').get(opt, (False, False)) if old != new: LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', {'opt': opt, 'old': old, 'new': new}) try: nms.zvol.set_child_prop( zvol, options[opt], new) retyped = True except exception.NexentaException: LOG.error(_LE('Error trying to change %(opt)s' ' from %(old)s to %(new)s'), {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, None def create_snapshot(self, snapshot): """Create snapshot of existing zvol on appliance. :param snapshot: snapshot reference """ self.nms.zvol.create_snapshot( self._get_zvol_name(snapshot['volume_name']), snapshot['name'], '') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ self.nms.zvol.clone( '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), snapshot['name']), self._get_zvol_name(volume['name'])) def delete_snapshot(self, snapshot): """Delete volume's snapshot on appliance. :param snapshot: snapshot reference """ volume_name = self._get_zvol_name(snapshot['volume_name']) snapshot_name = '%s@%s' % (volume_name, snapshot['name']) try: self.nms.snapshot.destroy(snapshot_name, '') except exception.NexentaException as exc: if "does not exist" in exc.args[0]: LOG.info(_LI('Snapshot %s does not exist, it seems it was ' 'already deleted.'), snapshot_name) elif "snapshot has dependent clones" in exc.args[0]: LOG.info(_LI('Snapshot %s has dependent clones, will be ' 'deleted later.'), snapshot_name) else: raise ctxt = context.get_admin_context() try: self.db.volume_get(ctxt, snapshot['volume_name']) except exception.VolumeNotFound: LOG.info(_LI('Origin volume %s appears to be removed, try to ' 'remove it from backend if it is there.')) if self.nms.volume.object_exists(volume_name): self.nms.zvol.destroy(volume_name, '') def local_path(self, volume): """Return local path to existing local volume. We never have local volumes, so it raises NotImplementedError. :raise: :py:exc:`NotImplementedError` """ raise NotImplementedError def _target_exists(self, target): """Check if iSCSI target exist. :param target: target name :return: True if target exist, else False """ targets = self.nms.stmf.list_targets() if not targets: return False return (target in self.nms.stmf.list_targets()) def _target_group_exists(self, target_group): """Check if target group exist. :param target_group: target group :return: True if target group exist, else False """ groups = self.nms.stmf.list_targetgroups() if not groups: return False return target_group in groups def _target_member_in_target_group(self, target_group, target_member): """Check if target member in target group. :param target_group: target group :param target_member: target member :return: True if target member in target group, else False :raises: NexentaException if target group doesn't exist """ members = self.nms.stmf.list_targetgroup_members(target_group) if not members: return False return target_member in members def _lu_exists(self, zvol_name): """Check if LU exists on appliance. :param zvol_name: Zvol name :raises: NexentaException if zvol not exists :return: True if LU exists, else False """ try: return bool(self.nms.scsidisk.lu_exists(zvol_name)) except exception.NexentaException as exc: if 'does not exist' not in exc.args[0]: raise return False def _is_lu_shared(self, zvol_name): """Check if LU exists on appliance and shared. :param zvol_name: Zvol name :raises: NexentaException if Zvol not exist :return: True if LU exists and shared, else False """ try: shared = self.nms.scsidisk.lu_shared(zvol_name) > 0 except exception.NexentaException as exc: if 'does not exist for zvol' not in exc.args[0]: raise # Zvol does not exists shared = False # LU does not exist return shared def create_export(self, _ctx, volume, connector): """Create new export for zvol. :param volume: reference of volume to be exported :return: iscsiadm-formatted provider location string """ model_update = self._do_export(_ctx, volume) return model_update def ensure_export(self, _ctx, volume): self._do_export(_ctx, volume) def _do_export(self, _ctx, volume): """Recreate parts of export if necessary. :param volume: reference of volume to be exported """ zvol_name = self._get_zvol_name(volume['name']) target_name = self._get_target_name(volume) target_group_name = self._get_target_group_name(target_name) entry = None if not self._lu_exists(zvol_name): try: entry = self.nms.scsidisk.create_lu(zvol_name, {}) except exception.NexentaException as exc: if 'in use' not in exc.args[0]: raise LOG.info(_LI('Ignored LU creation error "%s" while ensuring ' 'export.'), exc) if not self._is_lu_shared(zvol_name): try: entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { 'target_group': target_group_name}) except exception.NexentaException as exc: if 'view entry exists' not in exc.args[0]: raise LOG.info(_LI('Ignored LUN mapping entry addition error "%s" ' 'while ensuring export.'), exc) model_update = {} if entry: provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.nms_host, 'port': self.configuration.nexenta_iscsi_target_portal_port, 'name': target_name, 'lun': entry['lun'], } model_update = {'provider_location': provider_location} return model_update def remove_export(self, _ctx, volume): """Destroy all resources created to export zvol. :param volume: reference of volume to be unexported """ target_name = self._get_target_name(volume) self.targets[target_name].remove(volume['name']) zvol_name = self._get_zvol_name(volume['name']) self.nms.scsidisk.delete_lu(zvol_name) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') stats = self.nms.volume.get_child_props( self.configuration.nexenta_volume, 'health|size|used|available') total_amount = utils.str2gib_size(stats['size']) free_amount = utils.str2gib_size(stats['available']) location_info = '%(driver)s:%(host)s:%(volume)s' % { 'driver': self.__class__.__name__, 'host': self.nms_host, 'volume': self.volume } self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.volume_deduplication, 'compression': self.volume_compression, 'description': self.volume_description, 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'total_capacity_gb': total_amount, 'free_capacity_gb': free_amount, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'volume_backend_name': self.backend_name, 'location_info': location_info, 'iscsi_target_portal_port': self.iscsi_target_portal_port, 'nms_url': self.nms.url } cinder-8.0.0/cinder/volume/drivers/nexenta/jsonrpc.py0000664000567000056710000000567312701406250024057 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client ===================================================================== .. automodule:: nexenta.jsonrpc """ import socket from oslo_log import log as logging from oslo_serialization import jsonutils import requests from cinder import exception from cinder.utils import retry LOG = logging.getLogger(__name__) socket.setdefaulttimeout(100) class NexentaJSONProxy(object): retry_exc_tuple = (requests.exceptions.ConnectionError,) def __init__(self, scheme, host, port, path, user, password, auto=False, obj=None, method=None): self.scheme = scheme.lower() self.host = host self.port = port self.path = path self.user = user self.password = password self.auto = auto self.obj = obj self.method = method def __getattr__(self, name): if not self.obj: obj, method = name, None elif not self.method: obj, method = self.obj, name else: obj, method = '%s.%s' % (self.obj, self.method), name return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, self.user, self.password, self.auto, obj, method) @property def url(self): return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) def __hash__(self): return self.url.__hash__() def __repr__(self): return 'NMS proxy: %s' % self.url @retry(retry_exc_tuple, retries=6) def __call__(self, *args): data = jsonutils.dumps({ 'object': self.obj, 'method': self.method, 'params': args }) auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % auth } LOG.debug('Sending JSON data: %s', data) req = requests.post(self.url, data=data, headers=headers) response = req.json() req.close() LOG.debug('Got response: %s', response) if response.get('error') is not None: message = response['error'].get('message', '') raise exception.NexentaException(message) return response.get('result') cinder-8.0.0/cinder/volume/drivers/nexenta/nfs.py0000664000567000056710000010200012701406250023145 0ustar jenkinsjenkins00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance. ======================================================================= .. automodule:: nexenta.nfs """ import hashlib import os import re import six from eventlet import greenthread from oslo_log import log as logging from oslo_utils import units from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils from cinder.volume.drivers import nfs VERSION = '1.3.0' LOG = logging.getLogger(__name__) class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 """Executes volume driver commands on Nexenta Appliance. Version history: 1.0.0 - Initial driver version. 1.1.0 - Auto sharing for enclosing folder. 1.1.1 - Added caching for NexentaStor appliance 'volroot' value. 1.1.2 - Ignore "folder does not exist" error in delete_volume and delete_snapshot method. 1.1.3 - Redefined volume_backend_name attribute inherited from RemoteFsDriver. 1.2.0 - Added migrate and retype methods. 1.3.0 - Extend volume method. """ driver_prefix = 'nexenta' volume_backend_name = 'NexentaNfsDriver' VERSION = VERSION VOLUME_FILE_NAME = 'volume' def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_NFS_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_RRMGR_OPTS) self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base self.volume_compression = ( self.configuration.nexenta_dataset_compression) self.volume_deduplication = self.configuration.nexenta_dataset_dedup self.volume_description = ( self.configuration.nexenta_dataset_description) self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes self._nms2volroot = {} self.share2nms = {} self.nfs_versions = {} @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): shares_config = getattr(self.configuration, self.driver_prefix + '_shares_config') if shares_config: self.configuration.nfs_shares_config = shares_config super(NexentaNfsDriver, self).do_setup(context) self._load_shares_config(shares_config) self._mount_subfolders() def check_for_setup_error(self): """Verify that the volume for our folder exists. :raise: :py:exc:`LookupError` """ if self.share2nms: for nfs_share in self.share2nms: nms = self.share2nms[nfs_share] volume_name, dataset = self._get_share_datasets(nfs_share) if not nms.volume.object_exists(volume_name): raise LookupError(_("Volume %s does not exist in Nexenta " "Store appliance"), volume_name) folder = '%s/%s' % (volume_name, dataset) if not nms.folder.object_exists(folder): raise LookupError(_("Folder %s does not exist in Nexenta " "Store appliance"), folder) self._share_folder(nms, volume_name, dataset) def migrate_volume(self, ctxt, volume, host): """Migrate if volume and host are managed by Nexenta appliance. :param ctxt: context :param volume: a dictionary describing the volume to migrate :param host: a dictionary describing the host to migrate to """ LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host}) false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): LOG.warning(_LW("Volume status must be 'available' or 'retyping'." " Current volume status: %s"), volume['status']) return false_ret if 'capabilities' not in host: LOG.warning(_LW("Unsupported host. No capabilities found")) return false_ret capabilities = host['capabilities'] ns_shares = capabilities['ns_shares'] dst_parts = capabilities['location_info'].split(':') dst_host, dst_volume = dst_parts[1:] if (capabilities.get('vendor_name') != 'Nexenta' or dst_parts[0] != self.__class__.__name__ or capabilities['free_capacity_gb'] < volume['size']): return false_ret nms = self.share2nms[volume['provider_location']] ssh_bindings = nms.appliance.ssh_list_bindings() shares = [] for bind in ssh_bindings: for share in ns_shares: if (share.startswith(ssh_bindings[bind][3]) and ns_shares[share] >= volume['size']): shares.append(share) if len(shares) == 0: LOG.warning(_LW("Remote NexentaStor appliance at %s should be " "SSH-bound."), share) return false_ret share = sorted(shares, key=ns_shares.get, reverse=True)[0] snapshot = { 'volume_name': volume['name'], 'volume_id': volume['id'], 'name': utils.get_migrate_snapshot_name(volume) } self.create_snapshot(snapshot) location = volume['provider_location'] src = '%(share)s/%(volume)s@%(snapshot)s' % { 'share': location.split(':')[1].split('volumes/')[1], 'volume': volume['name'], 'snapshot': snapshot['name'] } dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]]) try: nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except exception.NexentaException as exc: LOG.warning(_LW("Cannot send source snapshot %(src)s to " "destination %(dst)s. Reason: %(exc)s"), {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete temporary source snapshot " "%(src)s on NexentaStor Appliance: %(exc)s"), {'src': src, 'exc': exc}) try: self.delete_volume(volume) except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete source volume %(volume)s on " "NexentaStor Appliance: %(exc)s"), {'volume': volume['name'], 'exc': exc}) dst_nms = self._get_nms_for_url(capabilities['nms_url']) dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1], volume['name'], snapshot['name']) try: dst_nms.snapshot.destroy(dst_snapshot, '') except exception.NexentaException as exc: LOG.warning(_LW("Cannot delete temporary destination snapshot " "%(dst)s on NexentaStor Appliance: %(exc)s"), {'dst': dst_snapshot, 'exc': exc}) return True, {'provider_location': share} def _get_zfs_send_recv_cmd(self, src, dst): """Returns rrmgr command for source and destination.""" return utils.get_rrmgr_cmd(src, dst, compression=self.rrmgr_compression, tcp_buf_size=self.rrmgr_tcp_buf_size, connections=self.rrmgr_connections) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ export = '%s/%s' % (volume['provider_location'], volume['name']) data = {'export': export, 'name': 'volume'} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] return { 'driver_volume_type': self.driver_volume_type, 'data': data } def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) options = dict( compression='compression', dedup='dedup', description='nms:description' ) retyped = False migrated = False model_update = None src_backend = self.__class__.__name__ dst_backend = host['capabilities']['location_info'].split(':')[0] if src_backend != dst_backend: LOG.warning(_LW('Cannot retype from %(src_backend)s to ' '%(dst_backend)s.'), { 'src_backend': src_backend, 'dst_backend': dst_backend }) return False hosts = (volume['host'], host['host']) old, new = hosts if old != new: migrated, provider_location = self.migrate_volume( context, volume, host) if not migrated: provider_location = volume['provider_location'] nms = self.share2nms[provider_location] else: nms_url = host['capabilities']['nms_url'] nms = self._get_nms_for_url(nms_url) model_update = provider_location provider_location = provider_location['provider_location'] share = provider_location.split(':')[1].split('volumes/')[1] folder = '%(share)s/%(volume)s' % { 'share': share, 'volume': volume['name'] } for opt in options: old, new = diff.get('extra_specs').get(opt, (False, False)) if old != new: LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', {'opt': opt, 'old': old, 'new': new}) try: nms.folder.set_child_prop( folder, options[opt], new) retyped = True except exception.NexentaException: LOG.error(_LE('Error trying to change %(opt)s' ' from %(old)s to %(new)s'), {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, model_update def _do_create_volume(self, volume): nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s' % (dataset, volume['name']) LOG.debug('Creating folder on Nexenta Store %s', folder) nms.folder.create_with_props( vol, folder, {'compression': self.configuration.nexenta_dataset_compression} ) volume_path = self.remote_path(volume) volume_size = volume['size'] try: self._share_folder(nms, vol, folder) if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(nms, volume_path, volume_size) else: folder_path = '%s/%s' % (vol, folder) compression = nms.folder.get_child_prop( folder_path, 'compression') if compression != 'off': # Disable compression, because otherwise will not use space # on disk. nms.folder.set_child_prop( folder_path, 'compression', 'off') try: self._create_regular_file(nms, volume_path, volume_size) finally: if compression != 'off': # Backup default compression value if it was changed. nms.folder.set_child_prop( folder_path, 'compression', compression) self._set_rw_permissions_for_all(nms, volume_path) if self._get_nfs_server_version(nfs_share) < 4: sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, volume) self._ensure_share_mounted(sub_share, mnt_path) except exception.NexentaException: try: nms.folder.destroy('%s/%s' % (vol, folder)) except exception.NexentaException: LOG.warning(_LW("Cannot destroy created folder: " "%(vol)s/%(folder)s"), {'vol': vol, 'folder': folder}) raise def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ self._ensure_shares_mounted() snapshot_vol = self._get_snapshot_volume(snapshot) nfs_share = snapshot_vol['provider_location'] volume['provider_location'] = nfs_share nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'], snapshot['name']) folder = '%s/%s' % (dataset, volume['name']) nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder)) try: self._share_folder(nms, vol, folder) except exception.NexentaException: try: nms.folder.destroy('%s/%s' % (vol, folder), '') except exception.NexentaException: LOG.warning(_LW("Cannot destroy cloned folder: " "%(vol)s/%(folder)s"), {'vol': vol, 'folder': folder}) raise if self._get_nfs_server_version(nfs_share) < 4: sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, volume) self._ensure_share_mounted(sub_share, mnt_path) return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'name': self._get_clone_snapshot_name(volume)} # We don't delete this snapshot, because this snapshot will be origin # of new volume. This snapshot will be automatically promoted by NMS # when user will delete its origin. self.create_snapshot(snapshot) try: return self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: LOG.error(_LE('Volume creation failed, deleting created snapshot ' '%(volume_name)s@%(name)s'), snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): LOG.warning(_LW('Failed to delete zfs snapshot ' '%(volume_name)s@%(name)s'), snapshot) raise def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ nfs_share = volume.get('provider_location') if nfs_share: nms = self.share2nms[nfs_share] vol, parent_folder = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, parent_folder, volume['name']) mount_path = self.remote_path(volume).strip( '/%s' % self.VOLUME_FILE_NAME) if mount_path in self._remotefsclient._read_mounts(): self._execute('umount', mount_path, run_as_root=True) try: props = nms.folder.get_child_props(folder, 'origin') or {} nms.folder.destroy(folder, '-r') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Folder %s does not exist, it was ' 'already deleted.'), folder) return raise origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): try: nms.snapshot.destroy(origin, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Snapshot %s does not exist, it was ' 'already deleted.'), origin) return raise def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), {'id': volume['id'], 'size': new_size}) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] volume_path = self.remote_path(volume) if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(nms, volume_path, new_size) else: block_size_mb = 1 block_count = ((new_size - volume['size']) * units.Gi / (block_size_mb * units.Mi)) nms.appliance.execute( 'dd if=/dev/zero seek=%(seek)d of=%(path)s' ' bs=%(bs)dM count=%(count)d' % { 'seek': volume['size'] * units.Gi / block_size_mb, 'path': volume_path, 'bs': block_size_mb, 'count': block_count } ) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, dataset, volume['name']) nms.folder.create_snapshot(folder, snapshot['name'], '-r') def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, dataset, volume['name']) try: nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not ' 'exist, it was already deleted.'), { 'folder': folder, 'snapshot': snapshot, }) return elif 'has dependent clones' in exc.args[0]: LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent ' 'clones, it will be deleted later.'), { 'folder': folder, 'snapshot': snapshot, }) return def _create_sparsed_file(self, nms, path, size): """Creates file with 0 disk usage. :param nms: nms object :param path: path to new file :param size: size of file """ nms.appliance.execute( 'truncate --size %(size)dG %(path)s' % { 'path': path, 'size': size } ) def _create_regular_file(self, nms, path, size): """Creates regular file of given size. Takes a lot of time for large files. :param nms: nms object :param path: path to new file :param size: size of file """ block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) LOG.info(_LI('Creating regular file: %s.' 'This may take some time.'), path) nms.appliance.execute( 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % { 'path': path, 'bs': block_size_mb, 'count': block_count } ) LOG.info(_LI('Regular file: %s created.'), path) def _set_rw_permissions_for_all(self, nms, path): """Sets 666 permissions for the path. :param nms: nms object :param path: path to file """ nms.appliance.execute('chmod ugo+rw %s' % path) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ nfs_share = volume['provider_location'] return os.path.join(self._get_mount_point_for_share(nfs_share), volume['name'], 'volume') def _get_mount_point_for_share(self, nfs_share): """Returns path to mount point NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ nfs_share = nfs_share.encode('utf-8') return os.path.join(self.configuration.nexenta_mount_point_base, hashlib.md5(nfs_share).hexdigest()) def remote_path(self, volume): """Get volume path (mounted remotely fs path) for given volume. :param volume: volume reference """ nfs_share = volume['provider_location'] share = nfs_share.split(':')[1].rstrip('/') return '%s/%s/volume' % (share, volume['name']) def _share_folder(self, nms, volume, folder): """Share NFS folder on NexentaStor Appliance. :param nms: nms object :param volume: volume name :param folder: folder name """ path = '%s/%s' % (volume, folder.lstrip('/')) share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } LOG.debug('Sharing folder %s on Nexenta Store', folder) nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path, share_opts) def _load_shares_config(self, share_file): self.shares = {} self.share2nms = {} for share in self._read_config_file(share_file): # A configuration line may be either: # host:/share_name http://user:pass@host:[port]/ # or # host:/share_name http://user:pass@host:[port]/ # -o options=123,rw --other if not share.strip(): continue if share.startswith('#'): continue share_info = re.split(r'\s+', share, 2) share_address = share_info[0].strip() nms_url = share_info[1].strip() share_opts = share_info[2].strip() if len(share_info) > 2 else None if not re.match(r'.+:/.+', share_address): LOG.warning(_LW("Share %s ignored due to invalid format. " "Must be of form address:/export."), share_address) continue self.shares[share_address] = share_opts self.share2nms[share_address] = self._get_nms_for_url(nms_url) LOG.debug('Shares loaded: %s', self.shares) def _get_subshare_mount_point(self, nfs_share, volume): mnt_path = '%s/%s' % ( self._get_mount_point_for_share(nfs_share), volume['name']) sub_share = '%s/%s' % (nfs_share, volume['name']) return sub_share, mnt_path def _ensure_share_mounted(self, nfs_share, mount_path=None): """Ensure that NFS share is mounted on the host. Unlike the parent method this one accepts mount_path as an optional parameter and uses it as a mount point if provided. :param nfs_share: NFS share name :param mount_path: mount path on the host """ mnt_flags = [] if self.shares.get(nfs_share) is not None: mnt_flags = self.shares[nfs_share].split() num_attempts = max(1, self.configuration.nfs_mount_attempts) for attempt in range(num_attempts): try: if mount_path is None: self._remotefsclient.mount(nfs_share, mnt_flags) else: if mount_path in self._remotefsclient._read_mounts(): LOG.info(_LI('Already mounted: %s'), mount_path) return self._execute('mkdir', '-p', mount_path, check_exit_code=False) self._remotefsclient._mount_nfs(nfs_share, mount_path, mnt_flags) return except Exception as e: if attempt == (num_attempts - 1): LOG.error(_LE('Mount failure for %(share)s after ' '%(count)d attempts.'), { 'share': nfs_share, 'count': num_attempts}) raise exception.NfsException(six.text_type(e)) LOG.warning( _LW('Mount attempt %(attempt)d failed: %(error)s. ' 'Retrying mount ...'), { 'attempt': attempt, 'error': e}) greenthread.sleep(1) def _mount_subfolders(self): ctxt = context.get_admin_context() vol_entries = self.db.volume_get_all_by_host(ctxt, self.host) for vol in vol_entries: nfs_share = vol['provider_location'] if ((nfs_share in self.shares) and (self._get_nfs_server_version(nfs_share) < 4)): sub_share, mnt_path = self._get_subshare_mount_point( nfs_share, vol) self._ensure_share_mounted(sub_share, mnt_path) def _get_nfs_server_version(self, share): if not self.nfs_versions.get(share): nms = self.share2nms[share] nfs_opts = nms.netsvc.get_confopts( 'svc:/network/nfs/server:default', 'configure') try: self.nfs_versions[share] = int( nfs_opts['nfs_server_versmax']['current']) except KeyError: self.nfs_versions[share] = int( nfs_opts['server_versmax']['current']) return self.nfs_versions[share] def _get_capacity_info(self, nfs_share): """Calculate available space on the NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ nms = self.share2nms[nfs_share] ns_volume, ns_folder = self._get_share_datasets(nfs_share) folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume, ns_folder), 'used|available') free = utils.str2size(folder_props['available']) allocated = utils.str2size(folder_props['used']) return free + allocated, free, allocated def _get_nms_for_url(self, url): """Returns initialized nms object for url.""" auto, scheme, user, password, host, port, path = ( utils.parse_nms_url(url)) return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, password, auto=auto) def _get_snapshot_volume(self, snapshot): ctxt = context.get_admin_context() return db.volume_get(ctxt, snapshot['volume_id']) def _get_volroot(self, nms): """Returns volroot property value from NexentaStor appliance.""" if not self.nms_cache_volroot: return nms.server.get_prop('volroot') if nms not in self._nms2volroot: self._nms2volroot[nms] = nms.server.get_prop('volroot') return self._nms2volroot[nms] def _get_share_datasets(self, nfs_share): nms = self.share2nms[nfs_share] volroot = self._get_volroot(nms) path = nfs_share.split(':')[1][len(volroot):].strip('/') volume_name = path.split('/')[0] folder_name = '/'.join(path.split('/')[1:]) return volume_name, folder_name def _get_clone_snapshot_name(self, volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume def _is_clone_snapshot_name(self, snapshot): """Check if snapshot is created for cloning.""" name = snapshot.split('@')[-1] return name.startswith('cinder-clone-snapshot-') def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') total_space = 0 free_space = 0 shares_with_capacities = {} for mounted_share in self._mounted_shares: total, free, allocated = self._get_capacity_info(mounted_share) shares_with_capacities[mounted_share] = utils.str2gib_size(total) if total_space < utils.str2gib_size(total): total_space = utils.str2gib_size(total) if free_space < utils.str2gib_size(free): free_space = utils.str2gib_size(free) share = mounted_share location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, 'share': share } nms_url = self.share2nms[share].url self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.volume_deduplication, 'compression': self.volume_compression, 'description': self.volume_description, 'nms_url': nms_url, 'ns_shares': shares_with_capacities, 'driver_version': self.VERSION, 'storage_protocol': 'NFS', 'total_capacity_gb': total_space, 'free_capacity_gb': free_space, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'location_info': location_info, 'volume_backend_name': self.backend_name, 'nfs_mount_point_base': self.nfs_mount_point_base } cinder-8.0.0/cinder/volume/drivers/remotefs.py0000664000567000056710000015726012701406250022563 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. # Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import inspect import json import os import re import tempfile import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import compute from cinder import db from cinder import exception from cinder import utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder.volume import driver LOG = logging.getLogger(__name__) nas_opts = [ # TODO(eharney): deprecate nas_ip and change this to nas_host cfg.StrOpt('nas_ip', default='', help='IP address or Hostname of NAS system.'), cfg.StrOpt('nas_login', default='admin', help='User name to connect to NAS system.'), cfg.StrOpt('nas_password', default='', help='Password to connect to NAS system.', secret=True), cfg.PortOpt('nas_ssh_port', default=22, help='SSH port to use to connect to NAS system.'), cfg.StrOpt('nas_private_key', default='', help='Filename of private key to use for SSH authentication.'), cfg.StrOpt('nas_secure_file_operations', default='auto', help=('Allow network-attached storage systems to operate in a ' 'secure environment where root level access is not ' 'permitted. If set to False, access is as the root user ' 'and insecure. If set to True, access is not as root. ' 'If set to auto, a check is done to determine if this is ' 'a new installation: True is used if so, otherwise ' 'False. Default is auto.')), cfg.StrOpt('nas_secure_file_permissions', default='auto', help=('Set more secure file permissions on network-attached ' 'storage volume files to restrict broad other/world ' 'access. If set to False, volumes are created with open ' 'permissions. If set to True, volumes are created with ' 'permissions for the cinder user and group (660). If ' 'set to auto, a check is done to determine if ' 'this is a new installation: True is used if so, ' 'otherwise False. Default is auto.')), cfg.StrOpt('nas_share_path', default='', help=('Path to the share to use for storing Cinder volumes. ' 'For example: "/srv/export1" for an NFS server export ' 'available at 10.0.5.10:/srv/export1 .')), cfg.StrOpt('nas_mount_options', help=('Options used to mount the storage backend file system ' 'where Cinder volumes are stored.')), ] old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'), cfg.DeprecatedOpt('glusterfs_qcow2_volumes')] volume_opts = [ cfg.StrOpt('nas_volume_prov_type', default='thin', choices=['thin', 'thick'], deprecated_opts=old_vol_type_opts, help=('Provisioning type that will be used when ' 'creating volumes.')), ] CONF = cfg.CONF CONF.register_opts(nas_opts) CONF.register_opts(volume_opts) def locked_volume_id_operation(f, external=False): """Lock decorator for volume operations. Takes a named lock prior to executing the operation. The lock is named with the id of the volume. This lock can be used by driver methods to prevent conflicts with other operations modifying the same volume. May be applied to methods that take a 'volume' or 'snapshot' argument. """ def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) if call_args.get('volume'): volume_id = call_args['volume']['id'] elif call_args.get('snapshot'): volume_id = call_args['snapshot']['volume']['id'] else: err_msg = _('The decorated method must accept either a volume or ' 'a snapshot object') raise exception.VolumeBackendAPIException(data=err_msg) @utils.synchronized('%s-%s' % (lock_tag, volume_id), external=external) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD): """Common base for drivers that work like NFS.""" driver_volume_type = None driver_prefix = 'remotefs' volume_backend_name = None SHARE_FORMAT_REGEX = r'.+:/.+' def __init__(self, *args, **kwargs): super(RemoteFSDriver, self).__init__(*args, **kwargs) self.shares = {} self._mounted_shares = [] self._execute_as_root = True self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None) if self.configuration: self.configuration.append_config_values(nas_opts) self.configuration.append_config_values(volume_opts) def check_for_setup_error(self): """Just to override parent behavior.""" pass def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ data = {'export': volume['provider_location'], 'name': volume['name']} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(RemoteFSDriver, self).do_setup(context) # Validate the settings for our secure file options. self.configuration.nas_secure_file_permissions = \ self.configuration.nas_secure_file_permissions.lower() self.configuration.nas_secure_file_operations = \ self.configuration.nas_secure_file_operations.lower() valid_secure_opts = ['auto', 'true', 'false'] secure_options = {'nas_secure_file_permissions': self.configuration.nas_secure_file_permissions, 'nas_secure_file_operations': self.configuration.nas_secure_file_operations} for opt_name, opt_value in secure_options.items(): if opt_value not in valid_secure_opts: err_parms = {'name': opt_name, 'value': opt_value} msg = _("NAS config '%(name)s=%(value)s' invalid. Must be " "'auto', 'true', or 'false'") % err_parms LOG.error(msg) raise exception.InvalidConfigurationValue(msg) def _get_provisioned_capacity(self): """Returns the provisioned capacity. Get the sum of sizes of volumes, snapshots and any other files on the mountpoint. """ provisioned_size = 0.0 for share in self.shares.keys(): mount_path = self._get_mount_point_for_share(share) out, _ = self._execute('du', '--bytes', mount_path, run_as_root=True) provisioned_size += int(out.split()[0]) return round(provisioned_size / units.Gi, 2) def _get_mount_point_base(self): """Returns the mount point base for the remote fs. This method facilitates returning mount point base for the specific remote fs. Override this method in the respective driver to return the entry to be used while attach/detach using brick in cinder. If not overridden then it returns None without raising exception to continue working for cases when not used with brick. """ LOG.debug("Driver specific implementation needs to return" " mount_point_base.") return None def create_volume(self, volume): """Creates a volume. :param volume: volume reference :returns: provider_location update dict for database """ self._ensure_shares_mounted() volume['provider_location'] = self._find_share(volume['size']) LOG.info(_LI('casted to %s'), volume['provider_location']) self._do_create_volume(volume) return {'provider_location': volume['provider_location']} def _do_create_volume(self, volume): """Create a volume on given remote share. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume['size'] if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) self._set_rw_permissions(volume_path) def _ensure_shares_mounted(self): """Look for remote shares in the flags and mount them locally.""" mounted_shares = [] self._load_shares_config(getattr(self.configuration, self.driver_prefix + '_shares_config')) for share in self.shares.keys(): try: self._ensure_share_mounted(share) mounted_shares.append(share) except Exception as exc: LOG.error(_LE('Exception during mounting %s'), exc) self._mounted_shares = mounted_shares LOG.debug('Available shares %s', self._mounted_shares) def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ if not volume['provider_location']: LOG.warning(_LW('Volume %s does not have ' 'provider_location specified, ' 'skipping'), volume['name']) return self._ensure_share_mounted(volume['provider_location']) mounted_path = self.local_path(volume) self._delete(mounted_path) def ensure_export(self, ctx, volume): """Synchronously recreates an export for a logical volume.""" self._ensure_share_mounted(volume['provider_location']) def create_export(self, ctx, volume, connector): """Exports the volume. Can optionally return a dictionary of changes to the volume object to be persisted. """ pass def remove_export(self, ctx, volume): """Removes an export for a logical volume.""" pass def delete_snapshot(self, snapshot): """Delete snapshot. Do nothing for this driver, but allow manager to handle deletion of snapshot in error state. """ pass def _delete(self, path): # Note(lpetrut): this method is needed in order to provide # interoperability with Windows as it will be overridden. self._execute('rm', '-f', path, run_as_root=self._execute_as_root) def _create_sparsed_file(self, path, size): """Creates a sparse file of a given size in GiB.""" self._execute('truncate', '-s', '%sG' % size, path, run_as_root=self._execute_as_root) def _create_regular_file(self, path, size): """Creates a regular file of given size in GiB.""" block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) self._execute('dd', 'if=/dev/zero', 'of=%s' % path, 'bs=%dM' % block_size_mb, 'count=%d' % block_count, run_as_root=self._execute_as_root) def _fallocate(self, path, size): """Creates a raw file of given size in GiB using fallocate.""" self._execute('fallocate', '--length=%sG' % size, path, run_as_root=True) def _create_qcow2_file(self, path, size_gb): """Creates a QCOW2 file of a given size in GiB.""" self._execute('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', path, str(size_gb * units.Gi), run_as_root=self._execute_as_root) def _set_rw_permissions(self, path): """Sets access permissions for given NFS path. Volume file permissions are set based upon the value of secure_file_permissions: 'true' sets secure access permissions and 'false' sets more open (insecure) access permissions. :param path: the volume file path. """ if self.configuration.nas_secure_file_permissions == 'true': permissions = '660' LOG.debug('File path %(path)s is being set with permissions: ' '%(permissions)s', {'path': path, 'permissions': permissions}) else: permissions = 'ugo+rw' LOG.warning(_LW('%(path)s is being set with open permissions: ' '%(perm)s'), {'path': path, 'perm': permissions}) self._execute('chmod', permissions, path, run_as_root=self._execute_as_root) def _set_rw_permissions_for_all(self, path): """Sets 666 permissions for the path.""" self._execute('chmod', 'ugo+rw', path, run_as_root=self._execute_as_root) def _set_rw_permissions_for_owner(self, path): """Sets read-write permissions to the owner for the path.""" self._execute('chmod', 'u+rw', path, run_as_root=self._execute_as_root) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ remotefs_share = volume['provider_location'] return os.path.join(self._get_mount_point_for_share(remotefs_share), volume['name']) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" run_as_root = self._execute_as_root image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size'], run_as_root=run_as_root) # NOTE (leseb): Set the virtual size of the image # the raw conversion overwrote the destination file # (which had the correct size) # with the fetched glance image size, # thus the initial 'size' parameter is not honored # this sets the size to the one asked in the first place by the user # and then verify the final virtual size image_utils.resize_image(self.local_path(volume), volume['size'], run_as_root=run_as_root) data = image_utils.qemu_img_info(self.local_path(volume), run_as_root=run_as_root) virt_size = data.virtual_size / units.Gi if virt_size != volume['size']: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("Expected volume size was %d") % volume['size']) + (_(" but size is now %d") % virt_size)) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def _read_config_file(self, config_file): # Returns list of lines in file with open(config_file) as f: return f.readlines() def _load_shares_config(self, share_file=None): self.shares = {} if all((self.configuration.nas_ip, self.configuration.nas_share_path)): LOG.debug('Using nas_ip and nas_share_path configuration.') nas_ip = self.configuration.nas_ip nas_share_path = self.configuration.nas_share_path share_address = '%s:%s' % (nas_ip, nas_share_path) if not re.match(self.SHARE_FORMAT_REGEX, share_address): msg = (_("Share %s ignored due to invalid format. Must " "be of form address:/export. Please check the " "nas_ip and nas_share_path settings."), share_address) raise exception.InvalidConfigurationValue(msg) self.shares[share_address] = self.configuration.nas_mount_options elif share_file is not None: LOG.debug('Loading shares from %s.', share_file) for share in self._read_config_file(share_file): # A configuration line may be either: # host:/vol_name # or # host:/vol_name -o options=123,rw --other if not share.strip(): # Skip blank or whitespace-only lines continue if share.startswith('#'): continue share_info = share.split(' ', 1) # results in share_info = # [ 'address:/vol', '-o options=123,rw --other' ] share_address = share_info[0].strip() # Replace \040 with a space, to support paths with spaces share_address = share_address.replace("\\040", " ") share_opts = None if len(share_info) > 1: share_opts = share_info[1].strip() if not re.match(self.SHARE_FORMAT_REGEX, share_address): LOG.error(_LE("Share %s ignored due to invalid format. " "Must be of form address:/export."), share_address) continue self.shares[share_address] = share_opts LOG.debug("shares loaded: %s", self.shares) def _get_mount_point_for_share(self, path): raise NotImplementedError() def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" pass def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, update the stats first. """ if refresh or not self._stats: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.volume_backend_name data['vendor_name'] = 'Open Source' data['driver_version'] = self.get_version() data['storage_protocol'] = self.driver_volume_type self._ensure_shares_mounted() global_capacity = 0 global_free = 0 for share in self._mounted_shares: capacity, free, used = self._get_capacity_info(share) global_capacity += capacity global_free += free data['total_capacity_gb'] = global_capacity / float(units.Gi) data['free_capacity_gb'] = global_free / float(units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False self._stats = data def _get_capacity_info(self, share): raise NotImplementedError() def _find_share(self, volume_size_in_gib): raise NotImplementedError() def _ensure_share_mounted(self, share): raise NotImplementedError() def secure_file_operations_enabled(self): """Determine if driver is operating in Secure File Operations mode. The Cinder Volume driver needs to query if this driver is operating in a secure file mode; check our nas_secure_file_operations flag. """ if self.configuration.nas_secure_file_operations == 'true': return True return False def set_nas_security_options(self, is_new_cinder_install): """Determine the setting to use for Secure NAS options. This method must be overridden by child wishing to use secure NAS file operations. This base method will set the NAS security options to false. """ doc_html = "http://docs.openstack.org/admin-guide-cloud" \ "/blockstorage_nfs_backend.html" self.configuration.nas_secure_file_operations = 'false' LOG.warning(_LW("The NAS file operations will be run as root: " "allowing root level access at the storage backend. " "This is considered an insecure NAS environment. " "Please see %s for information on a secure NAS " "configuration."), doc_html) self.configuration.nas_secure_file_permissions = 'false' LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing " "other/world read & write access). This is considered " "an insecure NAS environment. Please see %s for " "information on a secure NFS configuration."), doc_html) def _determine_nas_security_option_setting(self, nas_option, mount_point, is_new_cinder_install): """Determine NAS security option setting when 'auto' is assigned. This method determines the final 'true'/'false' setting of an NAS security option when the default value of 'auto' has been detected. If the nas option isn't 'auto' then its current value is used. :param nas_option: The NAS security option value loaded from config. :param mount_point: Mount where indicator file is written. :param is_new_cinder_install: boolean for new Cinder installation. :return string: 'true' or 'false' for new option setting. """ if nas_option == 'auto': # For auto detection, we first check to see if we have been # through this process before by checking for the existence of # the Cinder secure environment indicator file. file_name = '.cinderSecureEnvIndicator' file_path = os.path.join(mount_point, file_name) if os.path.isfile(file_path): nas_option = 'true' LOG.info(_LI('Cinder secure environment ' 'indicator file exists.')) else: # The indicator file does not exist. If it is a new # installation, set to 'true' and create the indicator file. if is_new_cinder_install: nas_option = 'true' try: with open(file_path, 'w') as fh: fh.write('Detector file for Cinder secure ' 'environment usage.\n') fh.write('Do not delete this file.\n') # Set the permissions on our special marker file to # protect from accidental removal (owner write only). self._execute('chmod', '640', file_path, run_as_root=False) LOG.info(_LI('New Cinder secure environment indicator' ' file created at path %s.'), file_path) except IOError as err: LOG.error(_LE('Failed to created Cinder secure ' 'environment indicator file: %s'), err) else: # For existing installs, we default to 'false'. The # admin can always set the option at the driver config. nas_option = 'false' return nas_option class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD): """Base class for remotefs drivers implementing qcow2 snapshots. Driver must implement: _local_volume_dir(self, volume) """ def __init__(self, *args, **kwargs): self._remotefsclient = None self.base = None self._nova = None super(RemoteFSSnapDriver, self).__init__(*args, **kwargs) def do_setup(self, context): super(RemoteFSSnapDriver, self).do_setup(context) self._nova = compute.API() def _local_volume_dir(self, volume): share = volume['provider_location'] local_dir = self._get_mount_point_for_share(share) return local_dir def _local_path_volume(self, volume): path_to_disk = os.path.join( self._local_volume_dir(volume), volume['name']) return path_to_disk def _get_new_snap_path(self, snapshot): vol_path = self.local_path(snapshot['volume']) snap_path = '%s.%s' % (vol_path, snapshot['id']) return snap_path def _local_path_volume_info(self, volume): return '%s%s' % (self.local_path(volume), '.info') def _read_file(self, filename): """This method is to make it easier to stub out code for testing. Returns a string representing the contents of the file. """ with open(filename, 'r') as f: return f.read() def _write_info_file(self, info_path, snap_info): if 'active' not in snap_info.keys(): msg = _("'active' must be present when writing snap_info.") raise exception.RemoteFSException(msg) with open(info_path, 'w') as f: json.dump(snap_info, f, indent=1, sort_keys=True) def _qemu_img_info_base(self, path, volume_name, basedir): """Sanitize image_utils' qemu_img_info. This code expects to deal only with relative filenames. """ info = image_utils.qemu_img_info(path) if info.image: info.image = os.path.basename(info.image) if info.backing_file: backing_file_template = \ "(%(basedir)s/[0-9a-f]+/)?%" \ "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % { 'basedir': basedir, 'volname': volume_name } if not re.match(backing_file_template, info.backing_file): msg = _("File %(path)s has invalid backing file " "%(bfile)s, aborting.") % {'path': path, 'bfile': info.backing_file} raise exception.RemoteFSException(msg) info.backing_file = os.path.basename(info.backing_file) return info def _qemu_img_info(self, path, volume_name): raise NotImplementedError() def _img_commit(self, path): self._execute('qemu-img', 'commit', path, run_as_root=self._execute_as_root) self._delete(path) def _rebase_img(self, image, backing_file, volume_format): self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image, '-F', volume_format, run_as_root=self._execute_as_root) def _read_info_file(self, info_path, empty_if_missing=False): """Return dict of snapshot information. :param: info_path: path to file :param: empty_if_missing: True=return empty dict if no file """ if not os.path.exists(info_path): if empty_if_missing is True: return {} return json.loads(self._read_file(info_path)) def _get_backing_chain_for_path(self, volume, path): """Returns list of dicts containing backing-chain information. Includes 'filename', and 'backing-filename' for each applicable entry. Consider converting this to use --backing-chain and --output=json when environment supports qemu-img 1.5.0. :param volume: volume reference :param path: path to image file at top of chain """ output = [] info = self._qemu_img_info(path, volume['name']) new_info = {} new_info['filename'] = os.path.basename(path) new_info['backing-filename'] = info.backing_file output.append(new_info) while new_info['backing-filename']: filename = new_info['backing-filename'] path = os.path.join(self._local_volume_dir(volume), filename) info = self._qemu_img_info(path, volume['name']) backing_filename = info.backing_file new_info = {} new_info['filename'] = filename new_info['backing-filename'] = backing_filename output.append(new_info) return output def _get_hash_str(self, base_str): """Return a string that represents hash of base_str. Returns string in a hex format. """ if isinstance(base_str, six.text_type): base_str = base_str.encode('utf-8') return hashlib.md5(base_str).hexdigest() def _get_mount_point_for_share(self, share): """Return mount point for share. :param share: example 172.18.194.100:/var/fs """ return self._remotefsclient.get_mount_point(share) def _get_available_capacity(self, share): """Calculate available space on the share. :param share: example 172.18.194.100:/var/fs """ mount_point = self._get_mount_point_for_share(share) out, _ = self._execute('df', '--portability', '--block-size', '1', mount_point, run_as_root=self._execute_as_root) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _get_capacity_info(self, remotefs_share): available, size = self._get_available_capacity(remotefs_share) return size, available, size - available def _get_mount_point_base(self): return self.base def _ensure_share_writable(self, path): """Ensure that the Cinder user can write to the share. If not, raise an exception. :param path: path to test :raises: RemoteFSException :returns: None """ prefix = '.cinder-write-test-' + str(os.getpid()) + '-' try: tempfile.NamedTemporaryFile(prefix=prefix, dir=path) except OSError: msg = _('Share at %(dir)s is not writable by the ' 'Cinder volume service. Snapshot operations will not be ' 'supported.') % {'dir': path} raise exception.RemoteFSException(msg) def _copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" # If snapshots exist, flatten to a temporary image, and upload it active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) info = self._qemu_img_info(active_file_path, volume['name']) backing_file = info.backing_file root_file_fmt = info.file_format tmp_params = { 'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']), 'suffix': '.img' } with image_utils.temporary_file(**tmp_params) as temp_path: if backing_file or (root_file_fmt != 'raw'): # Convert due to snapshots # or volume data not being stored in raw format # (upload_volume assumes raw format input) image_utils.convert_image(active_file_path, temp_path, 'raw') upload_path = temp_path else: upload_path = active_file_path image_utils.upload_volume(context, image_service, image_meta, upload_path) def get_active_image_from_info(self, volume): """Returns filename of the active image from the info file.""" info_file = self._local_path_volume_info(volume) snap_info = self._read_info_file(info_file, empty_if_missing=True) if not snap_info: # No info file = no snapshots exist vol_path = os.path.basename(self.local_path(volume)) return vol_path return snap_info['active'] def _create_cloned_volume(self, volume, src_vref): LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'), {'src': src_vref['id'], 'dst': volume['id']}) if src_vref['status'] != 'available': msg = _("Volume status must be 'available'.") raise exception.InvalidVolume(msg) volume_name = CONF.volume_name_template % volume['id'] volume_info = {'provider_location': src_vref['provider_location'], 'size': src_vref['size'], 'id': volume['id'], 'name': volume_name, 'status': src_vref['status']} temp_snapshot = {'volume_name': volume_name, 'size': src_vref['size'], 'volume_size': src_vref['size'], 'name': 'clone-snap-%s' % src_vref['id'], 'volume_id': src_vref['id'], 'id': 'tmp-snap-%s' % src_vref['id'], 'volume': src_vref} self._create_snapshot(temp_snapshot) try: self._copy_volume_from_snapshot(temp_snapshot, volume_info, volume['size']) finally: self._delete_snapshot(temp_snapshot) return {'provider_location': src_vref['provider_location']} def _delete_stale_snapshot(self, snapshot): info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) snapshot_file = snap_info[snapshot['id']] active_file = self.get_active_image_from_info(snapshot['volume']) snapshot_path = os.path.join( self._local_volume_dir(snapshot['volume']), snapshot_file) if (snapshot_file == active_file): return LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id']) self._delete(snapshot_path) del(snap_info[snapshot['id']]) self._write_info_file(info_path, snap_info) def _delete_snapshot(self, snapshot): """Delete a snapshot. If volume status is 'available', delete snapshot here in Cinder using qemu-img. If volume status is 'in-use', calculate what qcow2 files need to merge, and call to Nova to perform this operation. :raises: InvalidVolume if status not acceptable :raises: RemoteFSException(msg) if operation fails :returns: None """ LOG.debug('Deleting snapshot %s:', snapshot['id']) volume_status = snapshot['volume']['status'] if volume_status not in ['available', 'in-use']: msg = _('Volume status must be "available" or "in-use".') raise exception.InvalidVolume(msg) vol_path = self._local_volume_dir(snapshot['volume']) self._ensure_share_writable(vol_path) # Determine the true snapshot file for this snapshot # based on the .info file info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path, empty_if_missing=True) if snapshot['id'] not in snap_info: # If snapshot info file is present, but snapshot record does not # exist, do not attempt to delete. # (This happens, for example, if snapshot_create failed due to lack # of permission to write to the share.) LOG.info(_LI('Snapshot record for %s is not present, allowing ' 'snapshot_delete to proceed.'), snapshot['id']) return snapshot_file = snap_info[snapshot['id']] LOG.debug('snapshot_file for this snap is: %s', snapshot_file) snapshot_path = os.path.join( self._local_volume_dir(snapshot['volume']), snapshot_file) snapshot_path_img_info = self._qemu_img_info( snapshot_path, snapshot['volume']['name']) base_file = snapshot_path_img_info.backing_file if base_file is None: # There should always be at least the original volume # file as base. LOG.warning(_LW('No backing file found for %s, allowing ' 'snapshot to be deleted.'), snapshot_path) # Snapshot may be stale, so just delete it and update the # info file instead of blocking return self._delete_stale_snapshot(snapshot) base_path = os.path.join(vol_path, base_file) base_file_img_info = self._qemu_img_info(base_path, snapshot['volume']['name']) # Find what file has this as its backing file active_file = self.get_active_image_from_info(snapshot['volume']) active_file_path = os.path.join(vol_path, active_file) if volume_status == 'in-use': # Online delete context = snapshot['context'] new_base_file = base_file_img_info.backing_file base_id = None for key, value in snap_info.items(): if value == base_file and key != 'active': base_id = key break if base_id is None: # This means we are deleting the oldest snapshot LOG.debug('No %(base_id)s found for %(file)s', {'base_id': 'base_id', 'file': snapshot_file}) online_delete_info = { 'active_file': active_file, 'snapshot_file': snapshot_file, 'base_file': base_file, 'base_id': base_id, 'new_base_file': new_base_file } return self._delete_snapshot_online(context, snapshot, online_delete_info) if snapshot_file == active_file: # There is no top file # T0 | T1 | # base | snapshot_file | None # (guaranteed to| (being deleted, | # exist) | committed down) | self._img_commit(snapshot_path) # Active file has changed snap_info['active'] = base_file else: # T0 | T1 | T2 | T3 # base | snapshot_file | higher_file | highest_file # (guaranteed to | (being deleted, | (guaranteed to | (may exist) # exist, not | committed down) | exist, needs | # used here) | | ptr update) | backing_chain = self._get_backing_chain_for_path( snapshot['volume'], active_file_path) # This file is guaranteed to exist since we aren't operating on # the active file. higher_file = next((os.path.basename(f['filename']) for f in backing_chain if f.get('backing-filename', '') == snapshot_file), None) if higher_file is None: msg = _('No file found with %s as backing file.') %\ snapshot_file raise exception.RemoteFSException(msg) higher_id = next((i for i in snap_info if snap_info[i] == higher_file and i != 'active'), None) if higher_id is None: msg = _('No snap found with %s as backing file.') %\ higher_file raise exception.RemoteFSException(msg) self._img_commit(snapshot_path) higher_file_path = os.path.join(vol_path, higher_file) base_file_fmt = base_file_img_info.file_format self._rebase_img(higher_file_path, base_file, base_file_fmt) # Remove snapshot_file from info del(snap_info[snapshot['id']]) self._write_info_file(info_path, snap_info) def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Snapshot must not be the active snapshot. (offline) """ if snapshot['status'] != 'available': msg = _('Snapshot status must be "available" to clone.') raise exception.InvalidSnapshot(msg) self._ensure_shares_mounted() volume['provider_location'] = self._find_share(volume['size']) self._do_create_volume(volume) self._copy_volume_from_snapshot(snapshot, volume, volume['size']) return {'provider_location': volume['provider_location']} def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): raise NotImplementedError() def _do_create_snapshot(self, snapshot, backing_filename, new_snap_path): """Create a QCOW2 file backed by another file. :param snapshot: snapshot reference :param backing_filename: filename of file that will back the new qcow2 file :param new_snap_path: filename of new qcow2 file """ backing_path_full_path = os.path.join( self._local_volume_dir(snapshot['volume']), backing_filename) command = ['qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % backing_path_full_path, new_snap_path] self._execute(*command, run_as_root=self._execute_as_root) info = self._qemu_img_info(backing_path_full_path, snapshot['volume']['name']) backing_fmt = info.file_format command = ['qemu-img', 'rebase', '-u', '-b', backing_filename, '-F', backing_fmt, new_snap_path] self._execute(*command, run_as_root=self._execute_as_root) self._set_rw_permissions(new_snap_path) def _create_snapshot(self, snapshot): """Create a snapshot. If volume is attached, call to Nova to create snapshot, providing a qcow2 file. Cinder creates and deletes qcow2 files, but Nova is responsible for transitioning the VM between them and handling live transfers of data between files as required. If volume is detached, create locally with qemu-img. Cinder handles manipulation of qcow2 files. A file named volume-.info is stored with the volume data and is a JSON table which contains a mapping between Cinder snapshot UUIDs and filenames, as these associations will change as snapshots are deleted. Basic snapshot operation: 1. Initial volume file: volume-1234 2. Snapshot created: volume-1234 <- volume-1234.aaaa volume-1234.aaaa becomes the new "active" disk image. If the volume is not attached, this filename will be used to attach the volume to a VM at volume-attach time. If the volume is attached, the VM will switch to this file as part of the snapshot process. Note that volume-1234.aaaa represents changes after snapshot 'aaaa' was created. So the data for snapshot 'aaaa' is actually in the backing file(s) of volume-1234.aaaa. This file has a qcow2 header recording the fact that volume-1234 is its backing file. Delta changes since the snapshot was created are stored in this file, and the backing file (volume-1234) does not change. info file: { 'active': 'volume-1234.aaaa', 'aaaa': 'volume-1234.aaaa' } 3. Second snapshot created: volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb volume-1234.bbbb now becomes the "active" disk image, recording changes made to the volume. info file: { 'active': 'volume-1234.bbbb', (* changed!) 'aaaa': 'volume-1234.aaaa', 'bbbb': 'volume-1234.bbbb' } (* added!) 4. Snapshot deletion when volume is attached ('in-use' state): * When first snapshot is deleted, Cinder calls Nova for online snapshot deletion. Nova deletes snapshot with id "aaaa" and makes snapshot with id "bbbb" point to the base image. Snapshot with id "bbbb" is the active image. volume-1234 <- volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.bbbb' } * When second snapshot is deleted, Cinder calls Nova for online snapshot deletion. Nova deletes snapshot with id "bbbb" by pulling volume-1234's data into volume-1234.bbbb. This (logically) removes snapshot with id "bbbb" and the active file remains the same. volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb' } TODO (deepakcs): Change this once Nova supports blockCommit for in-use volumes. 5. Snapshot deletion when volume is detached ('available' state): * When first snapshot is deleted, Cinder does the snapshot deletion. volume-1234.aaaa is removed from the snapshot chain. The data from it is merged into its parent. volume-1234.bbbb is rebased, having volume-1234 as its new parent. volume-1234 <- volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.bbbb' } * When second snapshot is deleted, Cinder does the snapshot deletion. volume-1234.aaaa is removed from the snapshot chain. The base image, volume-1234 becomes the active image for this volume again. volume-1234 info file: { 'active': 'volume-1234' } (* changed!) """ status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = _('Volume status must be "available" or "in-use"' ' for snapshot. (is %s)') % status raise exception.InvalidVolume(msg) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path, empty_if_missing=True) backing_filename = self.get_active_image_from_info( snapshot['volume']) new_snap_path = self._get_new_snap_path(snapshot) if status == 'in-use': self._create_snapshot_online(snapshot, backing_filename, new_snap_path) else: self._do_create_snapshot(snapshot, backing_filename, new_snap_path) snap_info['active'] = os.path.basename(new_snap_path) snap_info[snapshot['id']] = os.path.basename(new_snap_path) self._write_info_file(info_path, snap_info) def _create_snapshot_online(self, snapshot, backing_filename, new_snap_path): # Perform online snapshot via Nova context = snapshot['context'] self._do_create_snapshot(snapshot, backing_filename, new_snap_path) connection_info = { 'type': 'qcow2', 'new_file': os.path.basename(new_snap_path), 'snapshot_id': snapshot['id'] } try: result = self._nova.create_volume_snapshot( context, snapshot['volume_id'], connection_info) LOG.debug('nova call result: %s', result) except Exception: LOG.exception(_LE('Call to Nova to create snapshot failed')) raise # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(context, snapshot['id']) LOG.debug('Status of snapshot %(id)s is now %(status)s', {'id': snapshot['id'], 'status': s['status']}) if s['status'] == 'creating': if s['progress'] == '90%': # Nova tasks completed successfully break time.sleep(increment) seconds_elapsed += increment elif s['status'] == 'error': msg = _('Nova returned "error" status ' 'while creating snapshot.') raise exception.RemoteFSException(msg) elif s['status'] == 'deleting' or s['status'] == 'error_deleting': msg = _('Snapshot %(id)s has been asked to be deleted while ' 'waiting for it to become available. Perhaps a ' 'concurrent request was made.') % {'id': snapshot['id']} raise exception.RemoteFSConcurrentRequest(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for creation of snapshot %s.') % snapshot['id'] raise exception.RemoteFSException(msg) def _delete_snapshot_online(self, context, snapshot, info): # Update info over the course of this method # active file never changes info_path = self._local_path_volume(snapshot['volume']) + '.info' snap_info = self._read_info_file(info_path) if info['active_file'] == info['snapshot_file']: # blockRebase/Pull base into active # info['base'] => snapshot_file file_to_delete = info['base_file'] if info['base_id'] is None: # Passing base=none to blockRebase ensures that # libvirt blanks out the qcow2 backing file pointer new_base = None else: new_base = info['new_base_file'] snap_info[info['base_id']] = info['snapshot_file'] delete_info = {'file_to_merge': new_base, 'merge_target_file': None, # current 'type': 'qcow2', 'volume_id': snapshot['volume']['id']} del(snap_info[snapshot['id']]) else: # blockCommit snapshot into base # info['base'] <= snapshot_file # delete record of snapshot file_to_delete = info['snapshot_file'] delete_info = {'file_to_merge': info['snapshot_file'], 'merge_target_file': info['base_file'], 'type': 'qcow2', 'volume_id': snapshot['volume']['id']} del(snap_info[snapshot['id']]) try: self._nova.delete_volume_snapshot( context, snapshot['id'], delete_info) except Exception: LOG.exception(_LE('Call to Nova delete snapshot failed')) raise # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 7200 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'deleting': if s['progress'] == '90%': # Nova tasks completed successfully break else: LOG.debug('status of snapshot %s is still "deleting"... ' 'waiting', snapshot['id']) time.sleep(increment) seconds_elapsed += increment else: msg = _('Unable to delete snapshot %(id)s, ' 'status: %(status)s.') % {'id': snapshot['id'], 'status': s['status']} raise exception.RemoteFSException(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for deletion of snapshot %(id)s.') %\ {'id': snapshot['id']} raise exception.RemoteFSException(msg) # Write info file updated above self._write_info_file(info_path, snap_info) # Delete stale file path_to_delete = os.path.join( self._local_volume_dir(snapshot['volume']), file_to_delete) self._execute('rm', '-f', path_to_delete, run_as_root=True) @locked_volume_id_operation def create_snapshot(self, snapshot): """Apply locking to the create snapshot operation.""" return self._create_snapshot(snapshot) @locked_volume_id_operation def delete_snapshot(self, snapshot): """Apply locking to the delete snapshot operation.""" return self._delete_snapshot(snapshot) @locked_volume_id_operation def create_volume_from_snapshot(self, volume, snapshot): return self._create_volume_from_snapshot(volume, snapshot) @locked_volume_id_operation def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" return self._create_cloned_volume(volume, src_vref) @locked_volume_id_operation def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" return self._copy_volume_to_image(context, volume, image_service, image_meta) cinder-8.0.0/cinder/volume/drivers/block_device.py0000664000567000056710000002711612701406250023344 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LI, _LW from cinder.image import image_utils from cinder import objects from cinder import utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) volume_opts = [ cfg.ListOpt('available_devices', default=[], help='List of all available devices'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableImageVD, driver.TransferVD): VERSION = '2.2.0' def __init__(self, *args, **kwargs): super(BlockDeviceDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.backend_name = \ self.configuration.safe_get('volume_backend_name') or "BlockDev" target_driver =\ self.target_mapping[self.configuration.safe_get('iscsi_helper')] self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, db=self.db, executor=self._execute) def check_for_setup_error(self): pass def _update_provider_location(self, object, device): # We update provider_location and host to mark device as used to # avoid race with other threads. # TODO(ynesenenko): need to remove DB access from driver object.update({'provider_location': device, 'host': self.host}) object.save() @utils.synchronized('block_device', external=True) def create_volume(self, volume): device = self.find_appropriate_size_device(volume.size) LOG.info(_LI("Creating %(volume)s on %(device)s"), {"volume": volume.name, "device": device}) self._update_provider_location(volume, device) def delete_volume(self, volume): """Deletes a logical volume.""" self._clear_block_device(volume) def _clear_block_device(self, device): """Deletes a block device.""" dev_path = self.local_path(device) if not dev_path or dev_path not in \ self.configuration.available_devices: return if os.path.exists(dev_path) and \ self.configuration.volume_clear != 'none': dev_size = self._get_devices_sizes([dev_path]) volutils.clear_volume( dev_size[dev_path], dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) else: LOG.warning(_LW("The device %s won't be cleared."), device) if device.status == "error_deleting": msg = _("Failed to delete device.") LOG.error(msg, resource=device) raise exception.VolumeDriverException(msg) def local_path(self, device): if device.provider_location: path = device.provider_location.rsplit(" ", 1) return path[-1] else: return None def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume.size) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) @utils.synchronized('block_device', external=True) def create_cloned_volume(self, volume, src_vref): LOG.info(_LI('Creating clone of volume: %s.'), src_vref.id) device = self.find_appropriate_size_device(src_vref.size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( self.local_path(src_vref), device, dev_size[device], self.configuration.volume_dd_blocksize, execute=self._execute) self._update_provider_location(volume, device) def get_volume_stats(self, refresh=False): if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" dict_of_devices_sizes = self._devices_sizes() used_devices = self._get_used_devices() total_size = 0 free_size = 0 for device, size in dict_of_devices_sizes.items(): if device not in used_devices: free_size += size total_size += size LOG.debug("Updating volume stats.") backend_name = self.configuration.safe_get('volume_backend_name') data = {'total_capacity_gb': total_size / units.Ki, 'free_capacity_gb': free_size / units.Ki, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'volume_backend_name': backend_name or self.__class__.__name__, 'vendor_name': "Open Source", 'driver_version': self.VERSION, 'storage_protocol': 'unknown'} self._stats = data def _get_used_paths(self, lst): used_dev = set() for item in lst: local_path = self.local_path(item) if local_path: used_dev.add(local_path) return used_dev def _get_used_devices(self): lst = objects.VolumeList.get_all_by_host(context.get_admin_context(), self.host) used_devices = self._get_used_paths(lst) snp_lst = objects.SnapshotList.get_by_host(context.get_admin_context(), self.host) return used_devices.union(self._get_used_paths(snp_lst)) def _get_devices_sizes(self, dev_paths): """Return devices' sizes in Mb""" out, _err = self._execute('blockdev', '--getsize64', *dev_paths, run_as_root=True) dev_sizes = {} out = out.split('\n') # blockdev returns devices' sizes in order that # they have been passed to it. for n, size in enumerate(out[:-1]): dev_sizes[dev_paths[n]] = int(size) / units.Mi return dev_sizes def _devices_sizes(self): available_devices = self.configuration.available_devices return self._get_devices_sizes(available_devices) def find_appropriate_size_device(self, size): dict_of_devices_sizes = self._devices_sizes() free_devices = (set(self.configuration.available_devices) - self._get_used_devices()) if not free_devices: raise exception.CinderException(_("No free disk")) possible_device = None possible_device_size = None for device in free_devices: dev_size = dict_of_devices_sizes[device] if (size * units.Ki <= dev_size and (possible_device is None or dev_size < possible_device_size)): possible_device = device possible_device_size = dev_size if possible_device: return possible_device else: raise exception.CinderException(_("No big enough free disk")) def extend_volume(self, volume, new_size): dev_path = self.local_path(volume) total_size = self._get_devices_sizes([dev_path]) # Convert from Megabytes to Gigabytes size = total_size[dev_path] / units.Ki if size < new_size: msg = _("Insufficient free space available to extend volume.") LOG.error(msg, resource=volume) raise exception.CinderException(msg) @utils.synchronized('block_device', external=True) def create_snapshot(self, snapshot): volume = snapshot.volume if volume.status != 'available': msg = _("Volume is not available.") LOG.error(msg, resource=volume) raise exception.CinderException(msg) LOG.info(_LI('Creating volume snapshot: %s.'), snapshot.id) device = self.find_appropriate_size_device(snapshot.volume_size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( self.local_path(volume), device, dev_size[device], self.configuration.volume_dd_blocksize, execute=self._execute) self._update_provider_location(snapshot, device) def delete_snapshot(self, snapshot): self._clear_block_device(snapshot) @utils.synchronized('block_device', external=True) def create_volume_from_snapshot(self, volume, snapshot): LOG.info(_LI('Creating volume %s from snapshot.'), volume.id) device = self.find_appropriate_size_device(snapshot.volume_size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( self.local_path(snapshot), device, dev_size[device], self.configuration.volume_dd_blocksize, execute=self._execute) self._update_provider_location(volume, device) # ####### Interface methods for DataPath (Target Driver) ######## def ensure_export(self, context, volume): volume_path = self.local_path(volume) model_update = \ self.target_driver.ensure_export( context, volume, volume_path) return model_update def create_export(self, context, volume, connector): volume_path = self.local_path(volume) export_info = self.target_driver.create_export(context, volume, volume_path) return { 'provider_location': export_info['location'] + ' ' + volume_path, 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): if connector['host'] != volutils.extract_host(volume.host, 'host'): return self.target_driver.initialize_connection(volume, connector) else: return { 'driver_volume_type': 'local', 'data': {'device_path': self.local_path(volume)}, } def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): pass cinder-8.0.0/cinder/volume/drivers/disco/0000775000567000056710000000000012701406543021460 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/disco/__init__.py0000664000567000056710000000000012701406250023552 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/disco/disco.py0000664000567000056710000005326412701406250023140 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Industrial Technology Research Institute. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DISCO Block device Driver.""" import os import time from os_brick.initiator import connector from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import six from suds import client from cinder import context from cinder.db.sqlalchemy import api from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import utils from cinder.volume import driver LOG = logging.getLogger(__name__) disco_opts = [ cfg.IPOpt('disco_client', default='127.0.0.1', help='The IP of DMS client socket server'), cfg.PortOpt('disco_client_port', default='9898', help='The port to connect DMS client socket server'), cfg.StrOpt('disco_wsdl_path', default='/etc/cinder/DISCOService.wsdl', help='Path to the wsdl file ' 'to communicate with DISCO request manager'), cfg.StrOpt('volume_name_prefix', default='openstack-', help='Prefix before volume name to differenciate ' 'DISCO volume created through openstack ' 'and the other ones'), cfg.IntOpt('snapshot_check_timeout', default=3600, help='How long we check whether a snapshot ' 'is finished before we give up'), cfg.IntOpt('restore_check_timeout', default=3600, help='How long we check whether a restore ' 'is finished before we give up'), cfg.IntOpt('clone_check_timeout', default=3600, help='How long we check whether a clone ' 'is finished before we give up'), cfg.IntOpt('retry_interval', default=1, help='How long we wait before retrying to ' 'get an item detail') ] DISCO_CODE_MAPPING = { 'request.success': 1, 'request.ongoing': 2, 'request.failure': 3, } CONF = cfg.CONF CONF.register_opts(disco_opts) # Driver to communicate with DISCO storage solution class DiscoDriver(driver.VolumeDriver): """Execute commands related to DISCO Volumes.""" VERSION = "1.0" def __init__(self, *args, **kwargs): """Init Disco driver : get configuration, create client.""" super(DiscoDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(disco_opts) self.ctxt = context.get_admin_context() self.connector = connector.InitiatorConnector.factory( self._get_connector_identifier(), utils.get_root_helper(), device_scan_attempts=( self.configuration.num_volume_device_scan_tries) ) self.connection_conf = {} self.connection_conf['server_ip'] = self.configuration.disco_client self.connection_conf['server_port'] = ( self.configuration.disco_client_port) self.connection_properties = {} self.connection_properties['name'] = None self.connection_properties['disco_id'] = None self.connection_properties['conf'] = self.connection_conf def do_setup(self, context): """Create client for DISCO request manager.""" LOG.debug("Enter in DiscoDriver do_setup.") path = ''.join(['file:', self.configuration.disco_wsdl_path]) self.client = client.Client(path, cache=None) def check_for_setup_error(self): """Make sure we have the pre-requisites.""" LOG.debug("Enter in DiscoDriver check_for_setup_error.") path = self.configuration.disco_wsdl_path if not os.path.exists(path): msg = _("Could not find DISCO wsdl file.") raise exception.VolumeBackendAPIException(data=msg) def _get_connector_identifier(self): """Return connector identifier, put here to mock it in unit tests.""" return connector.DISCO def create_volume(self, volume): """Create a disco volume.""" name = self.configuration.volume_name_prefix, volume["id"] vol_name = ''.join(name) vol_size = volume['size'] * units.Ki LOG.debug("Create volume : [name] %(vname)s - [size] %(vsize)s.", {'vname': vol_name, 'vsize': six.text_type(vol_size)}) reply = self.client.service.volumeCreate(vol_name, vol_size) status = reply['status'] result = reply['result'] LOG.debug("Create volume : [status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error while creating volume " "[status] %(stat)s - [result] %(res)s.") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Volume %s created.", volume["name"]) return {'provider_location': result} def delete_volume(self, volume): """Delete a logical volume.""" disco_vol_id = volume['provider_location'] LOG.debug("Delete disco volume : %s.", disco_vol_id) reply = self.client.service.volumeDelete(disco_vol_id) status = reply['status'] result = reply['result'] LOG.debug("Delete volume [status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error while deleting volume " "[status] %(stat)s - [result] %(res)s.") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Volume %s deleted.", volume['name']) def create_snapshot(self, snapshot): """Create a disco snapshot.""" volume = api.volume_get(self.ctxt, snapshot['volume_id']) description = snapshot['display_description'] vol_id = volume['provider_location'] LOG.debug("Create snapshot of volume : %(id)s, " "description : %(desc)s.", {'id': vol_id, 'desc': description}) # Trigger an asynchronous local snapshot reply = self.client.service.snapshotCreate(vol_id, -1, -1, description) status = reply['status'] result = reply['result'] LOG.debug("Create snapshot : [status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error while creating snapshot " "[status] %(stat)s - [result] %(res)s.") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Monitor the status until it becomes either success or fail params = {'snapshot_id': int(result)} start_time = int(time.time()) timer = loopingcall.FixedIntervalLoopingCall( self._retry_get_detail, start_time, self.configuration.snapshot_check_timeout, 'snapshot_detail', params) reply = timer.start(interval=self.configuration.retry_interval).wait() snapshot['provider_location'] = result LOG.debug("snapshot taken successfully on volume : %(volume)s.", {'volume': volume['name']}) return {'provider_location': result} def delete_snapshot(self, snapshot): """Delete a disco snapshot.""" LOG.debug("Enter in delete a disco snapshot.") snap_id = snapshot['provider_location'] LOG.debug("[start] Delete snapshot : %s.", snap_id) reply = self.client.service.snapshotDelete(snap_id) status = reply['status'] result = reply['result'] LOG.debug("[End] Delete snapshot : " "[status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error while deleting snapshot " "[status] %(stat)s - [result] %(res)s") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" name = self.configuration.volume_name_prefix, volume['id'] snap_id = snapshot['provider_location'] vol_name = ''.join(name) # Trigger an asynchronous restore operation LOG.debug("[start] Create volume from snapshot : " "%(snap_id)s - name : %(vol_name)s.", {'snap_id': snap_id, 'vol_name': vol_name}) reply = self.client.service.restoreFromSnapshot(snap_id, vol_name) status = reply['status'] result = reply['result'] LOG.debug("Restore volume from snapshot " "[status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error[%(stat)s - %(res)s] while restoring snapshot " "[%(snap_id)s] into volume [%(vol)s].") % {'stat': six.text_type(status), 'res': result, 'snap_id': snap_id, 'vol': vol_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Monitor the status until it becomes # either success, fail or timeout params = {'restore_id': int(result)} start_time = int(time.time()) timer = loopingcall.FixedIntervalLoopingCall( self._retry_get_detail, start_time, self.configuration.restore_check_timeout, 'restore_detail', params) reply = timer.start(interval=self.configuration.retry_interval).wait() reply = self.client.service.volumeDetailByName(vol_name) status = reply['status'] new_vol_id = reply['volumeInfoResult']['volumeId'] if status != 0: msg = (_("Error[status] %(stat)s - [result] %(res)s] " "while getting volume id.") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Restore done [status] %(stat)s - " "[volume id] %(vol_id)s.", {'stat': status, 'vol_id': six.text_type(new_vol_id)}) return {'provider_location': new_vol_id} def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" LOG.debug("Creating clone of volume: %s.", src_vref['id']) name = self.configuration.volume_name_prefix, volume['id'] vol_name = ''.join(name) vol_size = volume['size'] * units.Ki src_vol_id = src_vref['provider_location'] LOG.debug("Clone volume : " "[name] %(name)s - [source] %(source)s - [size] %(size)s.", {'name': vol_name, 'source': src_vol_id, 'size': six.text_type(vol_size)}) reply = self.client.service.volumeClone(src_vol_id, vol_name) status = reply['status'] result = reply['result'] LOG.debug("Clone volume : [status] %(stat)s - [result] %(res)s.", {'stat': six.text_type(status), 'res': result}) if status != 0: msg = (_("Error while creating volume " "[status] %(stat)s - [result] %(res)s.") % {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Monitor the status until it becomes # either success, fail or timeout params = {'clone_id': int(result), 'vol_name': vol_name} start_time = int(time.time()) timer = loopingcall.FixedIntervalLoopingCall( self._retry_get_detail, start_time, self.configuration.clone_check_timeout, 'clone_detail', params) reply = timer.start(interval=self.configuration.retry_interval).wait() reply = self.client.service.volumeDetailByName(vol_name) status = reply['status'] new_vol_id = reply['volumeInfoResult']['volumeId'] if status != 0: msg = (_("Error[%(stat)s - %(res)s] " "while getting volume id."), {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("clone done : " "[status] %(stat)s - [volume id] %(vol_id)s.", {'stat': status, 'vol_id': six.text_type(new_vol_id)}) return {'provider_location': new_vol_id} def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" LOG.debug("Enter in copy image to volume for disco.") try: device_info = self._attach_volume(volume) image_utils.fetch_to_raw(context, image_service, image_id, device_info['path'], self.configuration.volume_dd_blocksize, size=volume['size']) finally: self._detach_volume(volume) def _attach_volume(self, volume): """Call the connector.connect_volume().""" connection_properties = self._get_connection_properties(volume) device_info = self.connector.connect_volume(connection_properties) return device_info def _detach_volume(self, volume): """Call the connector.disconnect_volume().""" connection_properties = self._get_connection_properties(volume) self.connector.disconnect_volume(connection_properties, volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy a volume to a new image.""" LOG.debug("Enter in copy image to volume for disco.") try: device_info = self._attach_volume(volume) image_utils.upload_volume(context, image_service, image_meta, device_info['path']) finally: self._detach_volume(volume) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" vol_id = volume['provider_location'] LOG.debug("Extends volume : %(id)s, new size : %(size)s.", {'id': vol_id, 'size': new_size}) new_size_mb = new_size * units.Ki reply = self.client.service.volumeExtend(vol_id, new_size_mb) status = reply['status'] result = reply['result'] if status != 0: msg = (_("Error while extending volume " "[status] %(stat)s - [result] %(res)s."), {'stat': six.text_type(status), 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Volume extended : [id] %(vid)s - " "[status] %(stat)s - [result] %(res)s.", {'vid': vol_id, 'stat': six.text_type(status), 'res': result}) def initialize_connection(self, volume, connector): """Function called before attaching a volume.""" LOG.debug("Enter in initialize connection with disco, " "connector is %s.", connector) data = { 'driver_volume_type': 'disco', 'data': self._get_connection_properties(volume) } LOG.debug("Initialize connection [data]: %s.", data) return data def _get_connection_properties(self, volume): """Return a dictionnary with the connection properties.""" connection_properties = dict(self.connection_properties) connection_properties['name'] = volume['name'] connection_properties['disco_id'] = volume['provider_location'] return connection_properties def terminate_connection(self, volume, connector, **kwargs): """Function called after attaching a volume.""" LOG.debug("Enter in terminate connection with disco.") def _update_volume_stats(self): LOG.debug("Enter in update volume stats.") stats = {} backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'disco' stats['storage_protocol'] = 'disco' stats['driver_version'] = self.VERSION stats['reserved_percentage'] = 0 stats['vendor_name'] = 'ITRI' stats['QoS_support'] = False try: reply = self.client.service.systemInformationList() status = reply['status'] if status != 0: msg = (_("Error while getting " "disco information [%s].") % six.text_type(status)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) info_list = reply['propertyListResult']['PropertyInfoList'] for info in info_list: if info['name'] == 'freeCapacityGB': stats['free_capacity_gb'] = float(info['value']) elif info['name'] == 'totalCapacityGB': stats['total_capacity_gb'] = float(info['value']) except Exception: stats['total_capacity_gb'] = 'unknown' stats['free_capacity_gb'] = 'unknown' self._stats = stats def get_volume_stats(self, refresh=False): """Get backend information.""" if refresh: self._update_volume_stats() return self._stats def local_path(self, volume): """Return the path to the DISCO volume.""" return "/dev/dms%s" % volume['name'] def ensure_export(self, context, volume): """Ensure an export.""" pass def create_export(self, context, volume, connector): """Export the volume.""" pass def remove_export(self, context, volume): """Remove an export for a logical volume.""" pass def is_timeout(self, start_time, timeout): """Check whether we reach the timeout.""" current_time = int(time.time()) if current_time - start_time > timeout: return True else: return False def _retry_get_detail(self, start_time, timeout, operation, params): """Keep trying to query an item detail unless we reach the timeout.""" reply = self._call_api(operation, params) status = reply['status'] msg = (_("Error while getting %(op)s details, " "returned code: %(status)s.") % {'op': operation, 'status': six.text_type(status)}) if status != 0: LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) item_status = self._get_item_status(operation, reply) if item_status == DISCO_CODE_MAPPING['request.failure']: LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif item_status == DISCO_CODE_MAPPING['request.success']: raise loopingcall.LoopingCallDone(retvalue=reply) elif self.is_timeout(start_time, timeout): msg = (_("Timeout while calling %s ") % operation) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _call_api(self, operation, params): """Make the call to the SOAP api.""" if operation == 'snapshot_detail': return self.client.service.snapshotDetail(params['snapshot_id']) if operation == 'restore_detail': return self.client.service.restoreDetail(params['restore_id']) if operation == 'clone_detail': return self.client.service.cloneDetail(params['clone_id'], params['vol_name']) else: msg = (_("Unknown operation %s."), operation) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_item_status(self, operation, reply): """Make the call to the SOAP api.""" if reply is None: msg = (_("Call returned a None object")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif operation == 'snapshot_detail': return reply['snapshotInfoResult']['status'] elif operation == 'restore_detail': return reply['restoreInfoResult']['status'] elif operation == 'clone_detail': return int(reply['result']) else: msg = (_("Unknown operation " "%s."), operation) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) cinder-8.0.0/cinder/volume/drivers/eqlx.py0000664000567000056710000005743412701406250021712 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Dell Inc. # Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell EqualLogic Storage.""" import functools import random import eventlet from eventlet import greenthread import greenlet from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import excutils from six.moves import range from cinder import exception from cinder.i18n import _, _LE, _LW, _LI from cinder import ssh_utils from cinder import utils from cinder.volume.drivers import san LOG = logging.getLogger(__name__) eqlx_opts = [ cfg.StrOpt('eqlx_group_name', default='group-0', help='Group name to use for creating volumes. Defaults to ' '"group-0".'), cfg.IntOpt('eqlx_cli_timeout', default=30, help='Timeout for the Group Manager cli command execution. ' 'Default is 30. Note that this option is deprecated ' 'in favour of "ssh_conn_timeout" as ' 'specified in cinder/volume/drivers/san/san.py ' 'and will be removed in M release.'), cfg.IntOpt('eqlx_cli_max_retries', default=5, help='Maximum retry count for reconnection. Default is 5.'), cfg.BoolOpt('eqlx_use_chap', default=False, help='Use CHAP authentication for targets. Note that this ' 'option is deprecated in favour of "use_chap_auth" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in next release.'), cfg.StrOpt('eqlx_chap_login', default='admin', help='Existing CHAP account name. Note that this ' 'option is deprecated in favour of "chap_username" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in next release.'), cfg.StrOpt('eqlx_chap_password', default='password', help='Password for specified CHAP account name. Note that this ' 'option is deprecated in favour of "chap_password" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in the next release', secret=True), cfg.StrOpt('eqlx_pool', default='default', help='Pool in which volumes will be created. Defaults ' 'to "default".') ] CONF = cfg.CONF CONF.register_opts(eqlx_opts) def with_timeout(f): @functools.wraps(f) def __inner(self, *args, **kwargs): timeout = kwargs.pop('timeout', None) gt = eventlet.spawn(f, self, *args, **kwargs) if timeout is None: return gt.wait() else: kill_thread = eventlet.spawn_after(timeout, gt.kill) try: res = gt.wait() except greenlet.GreenletExit: raise exception.VolumeBackendAPIException( data="Command timed out") else: kill_thread.cancel() return res return __inner class DellEQLSanISCSIDriver(san.SanISCSIDriver): """Implements commands for Dell EqualLogic SAN ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver Driver's prerequisites are: - a separate volume group set up and running on the SAN - SSH access to the SAN - a special user must be created which must be able to - create/delete volumes and snapshots; - clone snapshots into volumes; - modify volume access records; The access credentials to the SAN are provided by means of the following flags san_ip= san_login= san_password= san_private_key= Thin provision of volumes is enabled by default, to disable it use: san_thin_provision=false In order to use target CHAP authentication (which is disabled by default) SAN administrator must create a local CHAP user and specify the following flags for the driver: use_chap_auth=True chap_login= chap_password= eqlx_group_name parameter actually represents the CLI prompt message without '>' ending. E.g. if prompt looks like 'group-0>', then the parameter must be set to 'group-0' Version history: 1.0 - Initial driver 1.1.0 - Misc fixes 1.2.0 - Deprecated eqlx_cli_timeout infavor of ssh_conn_timeout """ VERSION = "1.2.0" def __init__(self, *args, **kwargs): super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(eqlx_opts) self._group_ip = None self.sshpool = None if self.configuration.eqlx_use_chap is True: LOG.warning(_LW( 'Configuration options eqlx_use_chap, ' 'eqlx_chap_login and eqlx_chap_password are deprecated. Use ' 'use_chap_auth, chap_username and chap_password ' 'respectively for the same.')) self.configuration.use_chap_auth = ( self.configuration.eqlx_use_chap) self.configuration.chap_username = ( self.configuration.eqlx_chap_login) self.configuration.chap_password = ( self.configuration.eqlx_chap_password) if self.configuration.eqlx_cli_timeout: msg = _LW('Configuration option eqlx_cli_timeout ' 'is deprecated and will be removed in M release. ' 'Use ssh_conn_timeout instead.') self.configuration.ssh_conn_timeout = ( self.configuration.eqlx_cli_timeout) versionutils.report_deprecated_feature(LOG, msg) def _get_output(self, chan): out = '' ending = '%s> ' % self.configuration.eqlx_group_name while out.find(ending) == -1: ret = chan.recv(102400) if len(ret) == 0: # According to paramiko.channel.Channel documentation, which # says "If a string of length zero is returned, the channel # stream has closed". So we can confirm that the EQL server # has closed the connection. msg = _("The EQL array has closed the connection.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) out += ret LOG.debug("CLI output\n%s", out) return out.splitlines() def _get_prefixed_value(self, lines, prefix): for line in lines: if line.startswith(prefix): return line[len(prefix):] return @with_timeout def _ssh_execute(self, ssh, command, *arg, **kwargs): transport = ssh.get_transport() chan = transport.open_session() completed = False try: chan.invoke_shell() LOG.debug("Reading CLI MOTD") self._get_output(chan) cmd = 'stty columns 255' LOG.debug("Setting CLI terminal width: '%s'", cmd) chan.send(cmd + '\r') out = self._get_output(chan) LOG.debug("Sending CLI command: '%s'", command) chan.send(command + '\r') out = self._get_output(chan) completed = True if any(ln.startswith(('% Error', 'Error:')) for ln in out): desc = _("Error executing EQL command") cmdout = '\n'.join(out) LOG.error(_LE("%s"), cmdout) raise processutils.ProcessExecutionError( stdout=cmdout, cmd=command, description=desc) return out finally: if not completed: LOG.debug("Timed out executing command: '%s'", command) chan.close() def _run_ssh(self, cmd_list, attempts=1): utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool( self.configuration.san_ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) try: total_attempts = attempts with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: LOG.info(_LI('EQL-driver: executing "%s".'), command) return self._ssh_execute( ssh, command, timeout=self.configuration.ssh_conn_timeout) except Exception: LOG.exception(_LE('Error running command.')) greenthread.sleep(random.randint(20, 500) / 100.0) msg = (_("SSH Command failed after '%(total_attempts)r' " "attempts : '%(command)s'") % {'total_attempts': total_attempts - attempts, 'command': command}) raise exception.VolumeBackendAPIException(data=msg) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error running SSH command: "%s".'), command) def check_for_setup_error(self): super(DellEQLSanISCSIDriver, self).check_for_setup_error() if self.configuration.eqlx_cli_max_retries < 0: raise exception.InvalidInput( reason=_("eqlx_cli_max_retries must be greater than or " "equal to 0")) def _eql_execute(self, *args, **kwargs): return self._run_ssh( args, attempts=self.configuration.eqlx_cli_max_retries + 1) def _get_volume_data(self, lines): prefix = 'iSCSI target name is ' target_name = self._get_prefixed_value(lines, prefix)[:-1] lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name) model_update = {} model_update['provider_location'] = lun_id if self.configuration.use_chap_auth: model_update['provider_auth'] = 'CHAP %s %s' % \ (self.configuration.chap_username, self.configuration.chap_password) return model_update def _get_space_in_gb(self, val): scale = 1.0 part = 'GB' if val.endswith('MB'): scale = 1.0 / 1024 part = 'MB' elif val.endswith('TB'): scale = 1.0 * 1024 part = 'TB' return scale * float(val.partition(part)[0]) def _update_volume_stats(self): """Retrieve stats info from eqlx group.""" LOG.debug('Updating volume stats.') data = {} backend_name = "eqlx" if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'eqlx' data["vendor_name"] = 'Dell' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['reserved_percentage'] = 0 data['QoS_support'] = False data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 data['multiattach'] = True provisioned_capacity = 0 for line in self._eql_execute('pool', 'select', self.configuration.eqlx_pool, 'show'): if line.startswith('TotalCapacity:'): out_tup = line.rstrip().partition(' ') data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) if line.startswith('FreeSpace:'): out_tup = line.rstrip().partition(' ') data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) if line.startswith('VolumeReserve:'): out_tup = line.rstrip().partition(' ') provisioned_capacity = self._get_space_in_gb(out_tup[-1]) global_capacity = data['total_capacity_gb'] global_free = data['free_capacity_gb'] thin_enabled = self.configuration.san_thin_provision if not thin_enabled: provisioned_capacity = round(global_capacity - global_free, 2) data['provisioned_capacity_gb'] = provisioned_capacity data['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) data['thin_provisioning_support'] = thin_enabled data['thick_provisioning_support'] = not thin_enabled self._stats = data def _check_volume(self, volume): """Check if the volume exists on the Array.""" command = ['volume', 'select', volume['name'], 'show'] try: self._eql_execute(*command) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): if err.stdout.find('does not exist.\n') > -1: LOG.debug('Volume %s does not exist, ' 'it may have already been deleted', volume['name']) raise exception.VolumeNotFound(volume_id=volume['id']) def _parse_connection(self, connector, out): """Returns the correct connection id for the initiator. This parses the cli output from the command 'volume select access show' and returns the correct connection id. """ lines = [line for line in out if line != ''] # Every record has 2 lines for i in range(0, len(lines), 2): try: int(lines[i][0]) # sanity check if len(lines[i + 1].split()) == 1: check = lines[i].split()[1] + lines[i + 1].strip() if connector['initiator'] == check: return lines[i].split()[0] except (IndexError, ValueError): pass # skip the line that is not a valid access record return None def do_setup(self, context): """Disable cli confirmation and tune output format.""" try: disabled_cli_features = ('confirmation', 'paging', 'events', 'formatoutput') for feature in disabled_cli_features: self._eql_execute('cli-settings', feature, 'off') for line in self._eql_execute('grpparams', 'show'): if line.startswith('Group-Ipaddress:'): out_tup = line.rstrip().partition(' ') self._group_ip = out_tup[-1] LOG.info(_LI('EQL-driver: Setup is complete, group IP is "%s".'), self._group_ip) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to setup the Dell EqualLogic driver.')) def create_volume(self, volume): """Create a volume.""" try: cmd = ['volume', 'create', volume['name'], "%sG" % (volume['size'])] if self.configuration.eqlx_pool != 'default': cmd.append('pool') cmd.append(self.configuration.eqlx_pool) if self.configuration.san_thin_provision: cmd.append('thin-provision') out = self._eql_execute(*cmd) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume "%s".'), volume['name']) def add_multihost_access(self, volume): """Add multihost-access to a volume. Needed for live migration.""" try: cmd = ['volume', 'select', volume['name'], 'multihost-access', 'enable'] self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to add multihost-access ' 'for volume "%s".'), volume['name']) def delete_volume(self, volume): """Delete a volume.""" try: self._check_volume(volume) self._eql_execute('volume', 'select', volume['name'], 'offline') self._eql_execute('volume', 'delete', volume['name']) except exception.VolumeNotFound: LOG.warning(_LW('Volume %s was not found while trying to delete ' 'it.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete ' 'volume "%s".'), volume['name']) def create_snapshot(self, snapshot): """Create snapshot of existing volume on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'create-now') prefix = 'Snapshot name is ' snap_name = self._get_prefixed_value(out, prefix) self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'rename', snap_name, snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create snapshot of volume "%s".'), snapshot['volume_name']) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'select', snapshot['name'], 'clone', volume['name']) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume from snapshot "%s".'), snapshot['name']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" try: src_volume_name = src_vref['name'] out = self._eql_execute('volume', 'select', src_volume_name, 'clone', volume['name']) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create clone of volume "%s".'), volume['name']) def delete_snapshot(self, snapshot): """Delete volume's snapshot.""" try: self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'delete', snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete snapshot %(snap)s of ' 'volume %(vol)s.'), {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) def initialize_connection(self, volume, connector): """Restrict access to a volume.""" try: cmd = ['volume', 'select', volume['name'], 'access', 'create', 'initiator', connector['initiator']] if self.configuration.use_chap_auth: cmd.extend(['authmethod', 'chap', 'username', self.configuration.chap_username]) self._eql_execute(*cmd) iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to initialize connection ' 'to volume "%s".'), volume['name']) def terminate_connection(self, volume, connector, force=False, **kwargs): """Remove access restrictions from a volume.""" try: out = self._eql_execute('volume', 'select', volume['name'], 'access', 'show') connection_id = self._parse_connection(connector, out) if connection_id is not None: self._eql_execute('volume', 'select', volume['name'], 'access', 'delete', connection_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to terminate connection ' 'to volume "%s".'), volume['name']) def create_export(self, context, volume, connector): """Create an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. """ pass def ensure_export(self, context, volume): """Ensure an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. We will just make sure that the volume exists on the array and issue a warning. """ try: self._check_volume(volume) except exception.VolumeNotFound: LOG.warning(_LW('Volume %s is not found!, it may have been ' 'deleted.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to ensure export of volume "%s".'), volume['name']) def remove_export(self, context, volume): """Remove an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. Nothing to remove since there's nothing exported. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" try: self._eql_execute('volume', 'select', volume['name'], 'size', "%sG" % new_size) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to extend_volume %(name)s from ' '%(current_size)sGB to %(new_size)sGB.'), {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) def local_path(self, volume): raise NotImplementedError() cinder-8.0.0/cinder/volume/drivers/nimble.py0000664000567000056710000011462712701406250022205 0ustar jenkinsjenkins00000000000000# Nimble Storage, Inc. (c) 2013-2014 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Nimble Storage. This driver supports Nimble Storage controller CS-Series. """ import functools import random import re import six import string import sys from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from six.moves import urllib from suds import client from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import volume from cinder.volume.drivers.san import san from cinder.volume import volume_types DRIVER_VERSION = '2.0.2' AES_256_XTS_CIPHER = 2 DEFAULT_CIPHER = 3 EXTRA_SPEC_ENCRYPTION = 'nimble:encryption' EXTRA_SPEC_PERF_POLICY = 'nimble:perfpol-name' EXTRA_SPEC_MULTI_INITIATOR = 'nimble:multi-initiator' DEFAULT_PERF_POLICY_SETTING = 'default' DEFAULT_ENCRYPTION_SETTING = 'no' DEFAULT_MULTI_INITIATOR_SETTING = 'false' DEFAULT_SNAP_QUOTA = sys.maxsize VOL_EDIT_MASK = 4 + 16 + 32 + 64 + 256 + 512 MANAGE_EDIT_MASK = 1 + 262144 UNMANAGE_EDIT_MASK = 262144 AGENT_TYPE_OPENSTACK = 5 AGENT_TYPE_NONE = 1 SOAP_PORT = 5391 SM_ACL_APPLY_TO_BOTH = 3 SM_ACL_CHAP_USER_ANY = '*' SM_SUBNET_DATA = 3 SM_SUBNET_MGMT_PLUS_DATA = 4 LUN_ID = '0' WARN_LEVEL = 0.8 LOG = logging.getLogger(__name__) nimble_opts = [ cfg.StrOpt('nimble_pool_name', default='default', help='Nimble Controller pool name'), cfg.StrOpt('nimble_subnet_label', default='*', help='Nimble Subnet Label'), ] CONF = cfg.CONF CONF.register_opts(nimble_opts) class NimbleDriverException(exception.VolumeDriverException): message = _("Nimble Cinder Driver exception") class NimbleAPIException(exception.VolumeBackendAPIException): message = _("Unexpected response from Nimble API") class NimbleISCSIDriver(san.SanISCSIDriver): """OpenStack driver to enable Nimble Controller. Version history: 1.0 - Initial driver 1.1.1 - Updated VERSION to Nimble driver version 1.1.2 - Update snap-quota to unlimited 2.0.0 - Added Extra Spec Capability Correct capacity reporting Added Manage/Unmanage volume support 2.0.1 - Added multi-initiator support through extra-specs 2.0.2 - Fixed supporting extra specs while cloning vols """ VERSION = DRIVER_VERSION def __init__(self, *args, **kwargs): super(NimbleISCSIDriver, self).__init__(*args, **kwargs) self.APIExecutor = None self.group_stats = {} self.configuration.append_config_values(nimble_opts) def _check_config(self): """Ensure that the flags we care about are set.""" required_config = ['san_ip', 'san_login', 'san_password'] for attr in required_config: if not getattr(self.configuration, attr, None): raise exception.InvalidInput(reason=_('%s is not set.') % attr) def _get_discovery_ip(self, netconfig): """Get discovery ip.""" subnet_label = self.configuration.nimble_subnet_label LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s', {'netlabel': subnet_label, 'netconf': netconfig}) ret_discovery_ip = '' for subnet in netconfig['subnet-list']: LOG.info(_LI('Exploring array subnet label %s'), subnet['label']) if subnet_label == '*': # Use the first data subnet, save mgmt+data for later if subnet['subnet-id']['type'] == SM_SUBNET_DATA: LOG.info(_LI('Discovery ip %(disc_ip)s is used ' 'on data subnet %(net_label)s'), {'disc_ip': subnet['discovery-ip'], 'net_label': subnet['label']}) return subnet['discovery-ip'] elif (subnet['subnet-id']['type'] == SM_SUBNET_MGMT_PLUS_DATA): LOG.info(_LI('Discovery ip %(disc_ip)s is found' ' on mgmt+data subnet %(net_label)s'), {'disc_ip': subnet['discovery-ip'], 'net_label': subnet['label']}) ret_discovery_ip = subnet['discovery-ip'] # If subnet is specified and found, use the subnet elif subnet_label == subnet['label']: LOG.info(_LI('Discovery ip %(disc_ip)s is used' ' on subnet %(net_label)s'), {'disc_ip': subnet['discovery-ip'], 'net_label': subnet['label']}) return subnet['discovery-ip'] if ret_discovery_ip: LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'), ret_discovery_ip) return ret_discovery_ip else: raise NimbleDriverException(_('No suitable discovery ip found')) def _update_existing_vols_agent_type(self, context): LOG.debug("Updating existing volumes to have " "agent_type = 'OPENSTACK'") backend_name = self.configuration.safe_get('volume_backend_name') all_vols = volume.VolumeList.get_all( context, None, None, None, None, {'status': 'available'}) for vol in all_vols: if backend_name in vol.host: try: self.APIExecutor.edit_vol( vol.name, UNMANAGE_EDIT_MASK, {'agent-type': AGENT_TYPE_OPENSTACK}) except NimbleAPIException: LOG.warning(_LW('Error updating agent-type for ' 'volume %s.'), vol.name) def do_setup(self, context): """Setup the Nimble Cinder volume driver.""" self._check_config() # Setup API Executor try: self.APIExecutor = NimbleAPIExecutor( username=self.configuration.san_login, password=self.configuration.san_password, ip=self.configuration.san_ip) except Exception: LOG.error(_LE('Failed to create SOAP client.' 'Check san_ip, username, password' ' and make sure the array version is compatible')) raise self._update_existing_vols_agent_type(context) def _get_provider_location(self, volume_name): """Get volume iqn for initiator access.""" vol_info = self.APIExecutor.get_vol_info(volume_name) iqn = vol_info['target-name'] netconfig = self.APIExecutor.get_netconfig('active') target_ipaddr = self._get_discovery_ip(netconfig) iscsi_portal = target_ipaddr + ':3260' provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID) LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'), {'name': volume_name, 'loc': provider_location}) return provider_location def _get_model_info(self, volume_name): """Get model info for the volume.""" return ( {'provider_location': self._get_provider_location(volume_name), 'provider_auth': None}) def create_volume(self, volume): """Create a new volume.""" reserve = not self.configuration.san_thin_provision self.APIExecutor.create_vol( volume, self.configuration.nimble_pool_name, reserve) return self._get_model_info(volume['name']) def delete_volume(self, volume): """Delete the specified volume.""" self.APIExecutor.online_vol(volume['name'], False, ignore_list=['SM-enoent']) self.APIExecutor.dissociate_volcoll(volume['name'], ignore_list=['SM-enoent']) self.APIExecutor.delete_vol(volume['name'], ignore_list=['SM-enoent']) def _generate_random_string(self, length): """Generates random_string.""" char_set = string.ascii_lowercase return ''.join(random.sample(char_set, length)) def _clone_volume_from_snapshot(self, volume, snapshot): """Clone volume from snapshot. Extend the volume if the size of the volume is more than the snapshot. """ reserve = not self.configuration.san_thin_provision self.APIExecutor.clone_vol(volume, snapshot, reserve) if(volume['size'] > snapshot['volume_size']): vol_size = volume['size'] * units.Gi reserve_size = vol_size if reserve else 0 self.APIExecutor.edit_vol( volume['name'], VOL_EDIT_MASK, # mask for vol attributes {'size': vol_size, 'reserve': reserve_size, 'warn-level': int(vol_size * WARN_LEVEL), 'quota': vol_size, 'snap-quota': DEFAULT_SNAP_QUOTA}) return self._get_model_info(volume['name']) def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" snapshot_name = ('openstack-clone-' + volume['name'] + '-' + self._generate_random_string(12)) snapshot = {'volume_name': src_vref['name'], 'name': snapshot_name, 'volume_size': src_vref['size'], 'display_name': '', 'display_description': ''} self.APIExecutor.snap_vol(snapshot) self._clone_volume_from_snapshot(volume, snapshot) return self._get_model_info(volume['name']) def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return self._get_model_info(volume['name']) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return self._get_model_info(volume['name']) def create_snapshot(self, snapshot): """Create a snapshot.""" self.APIExecutor.snap_vol(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" self.APIExecutor.online_snap( snapshot['volume_name'], False, snapshot['name'], ignore_list=['SM-ealready', 'SM-enoent']) self.APIExecutor.delete_snap(snapshot['volume_name'], snapshot['name'], ignore_list=['SM-enoent']) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" self._clone_volume_from_snapshot(volume, snapshot) return self._get_model_info(volume['name']) def get_volume_stats(self, refresh=False): """Get volume stats. This is more of getting group stats.""" if refresh: group_info = self.APIExecutor.get_group_config() if not group_info['spaceInfoValid']: raise NimbleDriverException(_('SpaceInfo returned by' 'array is invalid')) total_capacity = (group_info['usableCapacity'] / float(units.Gi)) used_space = ((group_info['volUsageCompressed'] + group_info['snapUsageCompressed'] + group_info['unusedReserve']) / float(units.Gi)) free_space = total_capacity - used_space LOG.debug('total_capacity=%(capacity)f ' 'used_space=%(used)f free_space=%(free)f', {'capacity': total_capacity, 'used': used_space, 'free': free_space}) backend_name = self.configuration.safe_get( 'volume_backend_name') or self.__class__.__name__ self.group_stats = {'volume_backend_name': backend_name, 'vendor_name': 'Nimble', 'driver_version': DRIVER_VERSION, 'storage_protocol': 'iSCSI'} # Just use a single pool for now, FIXME to support multiple # pools single_pool = dict( pool_name=backend_name, total_capacity_gb=total_capacity, free_capacity_gb=free_space, reserved_percentage=0, QoS_support=False) self.group_stats['pools'] = [single_pool] return self.group_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" volume_name = volume['name'] LOG.info(_LI('Entering extend_volume volume=%(vol)s ' 'new_size=%(size)s'), {'vol': volume_name, 'size': new_size}) vol_size = int(new_size) * units.Gi reserve = not self.configuration.san_thin_provision reserve_size = vol_size if reserve else 0 self.APIExecutor.edit_vol( volume_name, VOL_EDIT_MASK, # mask for vol attributes {'size': vol_size, 'reserve': reserve_size, 'warn-level': int(vol_size * WARN_LEVEL), 'quota': vol_size, 'snap-quota': DEFAULT_SNAP_QUOTA}) def _get_existing_volume_ref_name(self, existing_ref): """Returns the volume name of an existing ref""" vol_name = None if 'source-name' in existing_ref: vol_name = existing_ref['source-name'] else: reason = _("Reference must contain source-name.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return vol_name def manage_existing(self, volume, external_ref): """Manage an existing nimble volume (import to cinder)""" # Get the volume name from the external reference target_vol_name = self._get_existing_volume_ref_name(external_ref) LOG.debug('Entering manage_existing. ' 'Target_volume_name = %s', target_vol_name) # Get vol info from the volume name obtained from the reference vol_info = self.APIExecutor.get_vol_info(target_vol_name) # Check if volume is already managed by OpenStack if vol_info['agent-type'] == AGENT_TYPE_OPENSTACK: msg = (_('Volume %s is already managed by OpenStack.') % target_vol_name) raise exception.ManageExistingAlreadyManaged( volume_ref=volume['id']) # If agent-type is not None then raise exception if vol_info['agent-type'] != AGENT_TYPE_NONE: msg = (_('Volume should have agent-type set as None.')) raise exception.InvalidVolume(reason=msg) new_vol_name = volume['name'] if vol_info['online']: msg = (_('Volume %s is online. Set volume to offline for ' 'managing using OpenStack.') % target_vol_name) raise exception.InvalidVolume(reason=msg) # edit the volume self.APIExecutor.edit_vol(target_vol_name, MANAGE_EDIT_MASK, {'name': new_vol_name, 'agent-type': AGENT_TYPE_OPENSTACK}) # make the volume online after rename self.APIExecutor.online_vol(new_vol_name, True, ignore_list=[ 'SM-enoent']) return self._get_model_info(new_vol_name) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing volume""" LOG.debug('Volume name : %(name)s External ref : %(ref)s', {'name': volume['name'], 'ref': external_ref}) target_vol_name = self._get_existing_volume_ref_name(external_ref) # get vol info vol_info = self.APIExecutor.get_vol_info(target_vol_name) LOG.debug('Volume size : %(size)s Volume-name : %(name)s', {'size': vol_info['size'], 'name': vol_info['name']}) return int(vol_info['size'] / units.Gi) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" vol_name = volume['name'] LOG.info(_LI("Entering unmanage_volume volume = %s"), vol_name) # check agent type vol_info = self.APIExecutor.get_vol_info(vol_name) if vol_info['agent-type'] != AGENT_TYPE_OPENSTACK: msg = (_('Only volumes managed by OpenStack can be unmanaged.')) raise exception.InvalidVolume(reason=msg) # update the agent-type to None self.APIExecutor.edit_vol(vol_name, UNMANAGE_EDIT_MASK, {'agent-type': AGENT_TYPE_NONE}) # offline the volume self.APIExecutor.online_vol(vol_name, False, ignore_list=[ 'SM-enoent']) def _create_igroup_for_initiator(self, initiator_name): """Creates igroup for an initiator and returns the igroup name.""" igrp_name = 'openstack-' + self._generate_random_string(12) LOG.info(_LI('Creating initiator group %(grp)s ' 'with initiator %(iname)s'), {'grp': igrp_name, 'iname': initiator_name}) self.APIExecutor.create_initiator_group(igrp_name, initiator_name) return igrp_name def _get_igroupname_for_initiator(self, initiator_name): initiator_groups = self.APIExecutor.get_initiator_grp_list() for initiator_group in initiator_groups: if 'initiator-list' in initiator_group: if (len(initiator_group['initiator-list']) == 1 and initiator_group['initiator-list'][0]['name'] == initiator_name): LOG.info(_LI('igroup %(grp)s found for ' 'initiator %(iname)s'), {'grp': initiator_group['name'], 'iname': initiator_name}) return initiator_group['name'] LOG.info(_LI('No igroup found for initiator %s'), initiator_name) return '' def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" LOG.info(_LI('Entering initialize_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s'), {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) initiator_name = connector['initiator'] initiator_group_name = self._get_igroupname_for_initiator( initiator_name) if not initiator_group_name: initiator_group_name = self._create_igroup_for_initiator( initiator_name) LOG.info(_LI('Initiator group name is %(grp)s for initiator ' '%(iname)s'), {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) (iscsi_portal, iqn, lun_num) = volume['provider_location'].split() properties = {} properties['target_discovered'] = False # whether discovery was used properties['target_portal'] = iscsi_portal properties['target_iqn'] = iqn properties['target_lun'] = lun_num properties['volume_id'] = volume['id'] # used by xen currently return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" LOG.info(_LI('Entering terminate_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s.'), {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) initiator_name = connector['initiator'] initiator_group_name = self._get_igroupname_for_initiator( initiator_name) if not initiator_group_name: raise NimbleDriverException( _('No initiator group found for initiator %s') % initiator_name) self.APIExecutor.remove_acl(volume, initiator_group_name) def _response_checker(func): """Decorator function to check if the response of an API is positive.""" @functools.wraps(func) def inner_response_checker(self, *args, **kwargs): response = func(self, *args, **kwargs) ignore_list = (kwargs['ignore_list'] if 'ignore_list' in kwargs else []) for err in response['err-list']['err-list']: err_str = self._get_err_str(err['code']) if err_str != 'SM-ok' and err_str not in ignore_list: msg = (_('API %(name)s failed with error string %(err)s') % {'name': func.__name__, 'err': err_str}) LOG.error(msg) raise NimbleAPIException(msg) return response return inner_response_checker def _connection_checker(func): """Decorator to re-establish and re-run the api if session has expired.""" @functools.wraps(func) def inner_connection_checker(self, *args, **kwargs): for attempts in range(2): try: return func(self, *args, **kwargs) except NimbleAPIException as e: if attempts < 1 and (re.search('SM-eaccess', six.text_type(e))): LOG.info(_LI('Session might have expired.' ' Trying to relogin')) self.login() continue else: LOG.error(_LE('Re-throwing Exception %s'), e) raise return inner_connection_checker class NimbleAPIExecutor(object): """Makes Nimble API calls.""" def __init__(self, *args, **kwargs): self.sid = None self.username = kwargs['username'] self.password = kwargs['password'] wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip']) LOG.debug('Using Nimble wsdl_url: %s', wsdl_url) self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url) self.client = client.Client(wsdl_url, username=self.username, password=self.password) soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'], 'port': SOAP_PORT}) LOG.debug('Using Nimble soap_url: %s', soap_url) self.client.set_options(location=soap_url) self.login() def _create_err_code_to_str_mapper(self, wsdl_url): f = urllib.request.urlopen(wsdl_url) wsdl_file = f.read() err_enums = re.findall( r'(.*?)', wsdl_file, re.DOTALL) err_enums = ''.join(err_enums).split('\n') ret_dict = {} for enum in err_enums: m = re.search(r'"(.*?)"(.*?)= (\d+) ', enum) if m: ret_dict[int(m.group(3))] = m.group(1) return ret_dict def _get_err_str(self, code): if code in self.err_string_dict: return self.err_string_dict[code] else: return 'Unknown error Code: %s' % code @_response_checker def _execute_login(self): return self.client.service.login(req={ 'username': self.username, 'password': self.password }) def login(self): """Execute Https Login API.""" response = self._execute_login() LOG.info(_LI('Successful login by user %s'), self.username) self.sid = response['authInfo']['sid'] @_connection_checker @_response_checker def _execute_get_netconfig(self, name): return self.client.service.getNetConfig(request={'sid': self.sid, 'name': name}) def get_netconfig(self, name): """Execute getNetConfig API.""" response = self._execute_get_netconfig(name) return response['config'] def _get_volumetype_extraspecs(self, volume): specs = {} type_id = volume['volume_type_id'] if type_id is not None: specs = volume_types.get_volume_type_extra_specs(type_id) return specs def _get_extra_spec_values(self, extra_specs): """Nimble specific extra specs.""" perf_policy_name = extra_specs.get(EXTRA_SPEC_PERF_POLICY, DEFAULT_PERF_POLICY_SETTING) encryption = extra_specs.get(EXTRA_SPEC_ENCRYPTION, DEFAULT_ENCRYPTION_SETTING) multi_initiator = extra_specs.get(EXTRA_SPEC_MULTI_INITIATOR, DEFAULT_MULTI_INITIATOR_SETTING) extra_specs_map = {} extra_specs_map[EXTRA_SPEC_PERF_POLICY] = perf_policy_name extra_specs_map[EXTRA_SPEC_ENCRYPTION] = encryption extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR] = multi_initiator return extra_specs_map @_connection_checker @_response_checker def _execute_create_vol(self, volume, pool_name, reserve): # Set volume size, display name and description volume_size = volume['size'] * units.Gi reserve_size = volume_size if reserve else 0 # Set volume description display_list = [getattr(volume, 'display_name', ''), getattr(volume, 'display_description', '')] description = ':'.join(filter(None, display_list)) # Limit description size to 254 characters description = description[:254] specs = self._get_volumetype_extraspecs(volume) extra_specs_map = self._get_extra_spec_values(specs) perf_policy_name = extra_specs_map[EXTRA_SPEC_PERF_POLICY] encrypt = extra_specs_map[EXTRA_SPEC_ENCRYPTION] multi_initiator = extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR] # default value of cipher for encryption cipher = DEFAULT_CIPHER if encrypt.lower() == 'yes': cipher = AES_256_XTS_CIPHER LOG.debug('Creating a new volume=%(vol)s size=%(size)s' ' reserve=%(reserve)s in pool=%(pool)s' ' description=%(description)s with Extra Specs' ' perfpol-name=%(perfpol-name)s' ' encryption=%(encryption)s cipher=%(cipher)s' ' agent-type=%(agent-type)s' ' multi-initiator=%(multi-initiator)s', {'vol': volume['name'], 'size': volume_size, 'reserve': reserve, 'pool': pool_name, 'description': description, 'perfpol-name': perf_policy_name, 'encryption': encrypt, 'cipher': cipher, 'agent-type': AGENT_TYPE_OPENSTACK, 'multi-initiator': multi_initiator}) return self.client.service.createVol( request={'sid': self.sid, 'attr': {'name': volume['name'], 'description': description, 'size': volume_size, 'reserve': reserve_size, 'warn-level': int(volume_size * WARN_LEVEL), 'quota': volume_size, 'snap-quota': DEFAULT_SNAP_QUOTA, 'online': True, 'pool-name': pool_name, 'agent-type': AGENT_TYPE_OPENSTACK, 'perfpol-name': perf_policy_name, 'encryptionAttr': {'cipher': cipher}, 'multi-initiator': multi_initiator}}) def create_vol(self, volume, pool_name, reserve): """Execute createVol API.""" response = self._execute_create_vol(volume, pool_name, reserve) LOG.info(_LI('Successfully create volume %s'), response['name']) return response['name'] @_connection_checker @_response_checker def _execute_get_group_config(self): LOG.debug('Getting group config information') return self.client.service.getGroupConfig(request={'sid': self.sid}) def get_group_config(self): """Execute getGroupConfig API.""" response = self._execute_get_group_config() LOG.debug('Successfully retrieved group config information') return response['info'] @_connection_checker @_response_checker def add_acl(self, volume, initiator_group_name): """Execute addAcl API.""" LOG.info(_LI('Adding ACL to volume=%(vol)s with' ' initiator group name %(igrp)s'), {'vol': volume['name'], 'igrp': initiator_group_name}) return self.client.service.addVolAcl( request={'sid': self.sid, 'volname': volume['name'], 'apply-to': SM_ACL_APPLY_TO_BOTH, 'chapuser': SM_ACL_CHAP_USER_ANY, 'initiatorgrp': initiator_group_name}) @_connection_checker @_response_checker def remove_acl(self, volume, initiator_group_name): """Execute removeVolAcl API.""" LOG.info(_LI('Removing ACL from volume=%(vol)s' ' for initiator group %(igrp)s'), {'vol': volume['name'], 'igrp': initiator_group_name}) return self.client.service.removeVolAcl( request={'sid': self.sid, 'volname': volume['name'], 'apply-to': SM_ACL_APPLY_TO_BOTH, 'chapuser': SM_ACL_CHAP_USER_ANY, 'initiatorgrp': initiator_group_name}) @_connection_checker @_response_checker def _execute_get_vol_info(self, vol_name): LOG.info(_LI('Getting volume information ' 'for vol_name=%s'), vol_name) return self.client.service.getVolInfo(request={'sid': self.sid, 'name': vol_name}) def get_vol_info(self, vol_name): """Execute getVolInfo API.""" response = self._execute_get_vol_info(vol_name) LOG.info(_LI('Successfully got volume information for volume %s'), vol_name) return response['vol'] @_connection_checker @_response_checker def online_vol(self, vol_name, online_flag, *args, **kwargs): """Execute onlineVol API.""" LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s'), {'vol': vol_name, 'flag': online_flag}) return self.client.service.onlineVol(request={'sid': self.sid, 'name': vol_name, 'online': online_flag}) @_connection_checker @_response_checker def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs): """Execute onlineSnap API.""" LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s'), {'snap': snap_name, 'flag': online_flag}) return self.client.service.onlineSnap(request={'sid': self.sid, 'vol': vol_name, 'name': snap_name, 'online': online_flag}) @_connection_checker @_response_checker def dissociate_volcoll(self, vol_name, *args, **kwargs): """Execute dissocProtPol API.""" LOG.info(_LI('Dissociating volume %s '), vol_name) return self.client.service.dissocProtPol( request={'sid': self.sid, 'vol-name': vol_name}) @_connection_checker @_response_checker def delete_vol(self, vol_name, *args, **kwargs): """Execute deleteVol API.""" LOG.info(_LI('Deleting volume %s '), vol_name) return self.client.service.deleteVol(request={'sid': self.sid, 'name': vol_name}) @_connection_checker @_response_checker def snap_vol(self, snapshot): """Execute snapVol API.""" volume_name = snapshot['volume_name'] snap_name = snapshot['name'] # Set snapshot description display_list = [getattr(snapshot, 'display_name', ''), getattr(snapshot, 'display_description', '')] snap_description = ':'.join(filter(None, display_list)) # Limit to 254 characters snap_description = snap_description[:254] LOG.info(_LI('Creating snapshot for volume_name=%(vol)s' ' snap_name=%(name)s snap_description=%(desc)s'), {'vol': volume_name, 'name': snap_name, 'desc': snap_description}) return self.client.service.snapVol( request={'sid': self.sid, 'vol': volume_name, 'snapAttr': {'name': snap_name, 'description': snap_description}}) @_connection_checker @_response_checker def delete_snap(self, vol_name, snap_name, *args, **kwargs): """Execute deleteSnap API.""" LOG.info(_LI('Deleting snapshot %s '), snap_name) return self.client.service.deleteSnap(request={'sid': self.sid, 'vol': vol_name, 'name': snap_name}) @_connection_checker @_response_checker def clone_vol(self, volume, snapshot, reserve): """Execute cloneVol API.""" volume_name = snapshot['volume_name'] snap_name = snapshot['name'] clone_name = volume['name'] snap_size = snapshot['volume_size'] reserve_size = snap_size * units.Gi if reserve else 0 specs = self._get_volumetype_extraspecs(volume) extra_specs_map = self._get_extra_spec_values(specs) perf_policy_name = extra_specs_map.get(EXTRA_SPEC_PERF_POLICY) encrypt = extra_specs_map.get(EXTRA_SPEC_ENCRYPTION) multi_initiator = extra_specs_map.get(EXTRA_SPEC_MULTI_INITIATOR) # default value of cipher for encryption cipher = DEFAULT_CIPHER if encrypt.lower() == 'yes': cipher = AES_256_XTS_CIPHER LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s ' 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s ' 'reserve=%(reserve)s' 'agent-type=%(agent-type)s ' 'perfpol-name=%(perfpol-name)s ' 'encryption=%(encryption)s cipher=%(cipher)s ' 'multi-initiator=%(multi-initiator)s'), {'vol': volume_name, 'snap': snap_name, 'clone': clone_name, 'size': snap_size, 'reserve': reserve, 'agent-type': AGENT_TYPE_OPENSTACK, 'perfpol-name': perf_policy_name, 'encryption': encrypt, 'cipher': cipher, 'multi-initiator': multi_initiator}) clone_size = snap_size * units.Gi return self.client.service.cloneVol( request={'sid': self.sid, 'name': volume_name, 'attr': {'name': clone_name, 'reserve': reserve_size, 'warn-level': int(clone_size * WARN_LEVEL), 'quota': clone_size, 'snap-quota': DEFAULT_SNAP_QUOTA, 'online': True, 'agent-type': AGENT_TYPE_OPENSTACK, 'perfpol-name': perf_policy_name, 'encryptionAttr': {'cipher': cipher}, 'multi-initiator': multi_initiator}, 'snap-name': snap_name}) @_connection_checker @_response_checker def edit_vol(self, vol_name, mask, attr): """Execute editVol API.""" LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s'), {'vol': vol_name, 'mask': str(mask)}) return self.client.service.editVol(request={'sid': self.sid, 'name': vol_name, 'mask': mask, 'attr': attr}) @_connection_checker @_response_checker def _execute_get_initiator_grp_list(self): LOG.info(_LI('Getting getInitiatorGrpList')) return (self.client.service.getInitiatorGrpList( request={'sid': self.sid})) def get_initiator_grp_list(self): """Execute getInitiatorGrpList API.""" response = self._execute_get_initiator_grp_list() LOG.info(_LI('Successfully retrieved InitiatorGrpList')) return (response['initiatorgrp-list'] if 'initiatorgrp-list' in response else []) @_connection_checker @_response_checker def create_initiator_group(self, initiator_group_name, initiator_name): """Execute createInitiatorGrp API.""" LOG.info(_LI('Creating initiator group %(igrp)s' ' with one initiator %(iname)s'), {'igrp': initiator_group_name, 'iname': initiator_name}) return self.client.service.createInitiatorGrp( request={'sid': self.sid, 'attr': {'name': initiator_group_name, 'initiator-list': [{'label': initiator_name, 'name': initiator_name}]}}) @_connection_checker @_response_checker def delete_initiator_group(self, initiator_group_name, *args, **kwargs): """Execute deleteInitiatorGrp API.""" LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name) return self.client.service.deleteInitiatorGrp( request={'sid': self.sid, 'name': initiator_group_name}) cinder-8.0.0/cinder/volume/drivers/hitachi/0000775000567000056710000000000012701406543021770 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/hitachi/hnas_backend.py0000664000567000056710000010442112701406250024737 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Hitachi Unified Storage (HUS-HNAS) platform. Backend operations. """ import re from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import units import six from cinder.i18n import _, _LW, _LI, _LE from cinder import exception from cinder import ssh_utils from cinder import utils LOG = logging.getLogger("cinder.volume.driver") HNAS_SSC_RETRIES = 5 class HnasBackend(object): """Back end. Talks to HUS-HNAS.""" def __init__(self, drv_configs): self.drv_configs = drv_configs self.sshpool = None @utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES, wait_random=True) def run_cmd(self, cmd, ip0, user, pw, *args, **kwargs): """Run a command on SMU or using SSH :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :returns: formated string with version information """ LOG.debug('Enable ssh: %s', six.text_type(self.drv_configs['ssh_enabled'])) if self.drv_configs['ssh_enabled'] != 'True': # Direct connection via ssc args = (cmd, '--user', user, '--password', pw, ip0) + args try: out, err = utils.execute(*args, **kwargs) LOG.debug("command %(cmd)s result: out = %(out)s - err = " "%(err)s", {'cmd': cmd, 'out': out, 'err': err}) return out, err except putils.ProcessExecutionError as e: if 'Failed to establish SSC connection' in e.stderr: LOG.debug("SSC connection error!") msg = _("Failed to establish SSC connection.") raise exception.HNASConnError(msg) elif 'Connection reset' in e.stderr: LOG.debug("HNAS connection reset!") msg = _("HNAS has disconnected SSC") raise exception.HNASConnError(msg) else: raise else: if self.drv_configs['cluster_admin_ip0'] is None: # Connect to SMU through SSH and run ssc locally args = (cmd, 'localhost') + args else: args = (cmd, '--smuauth', self.drv_configs['cluster_admin_ip0']) + args utils.check_ssh_injection(args) command = ' '.join(args) command = command.replace('"', '\\"') if not self.sshpool: server = self.drv_configs['mgmt_ip0'] port = int(self.drv_configs['ssh_port']) username = self.drv_configs['username'] # We only accept private/public key auth password = "" privatekey = self.drv_configs['ssh_private_key'] self.sshpool = ssh_utils.SSHPool(server, port, None, username, password=password, privatekey=privatekey) with self.sshpool.item() as ssh: try: out, err = putils.ssh_execute(ssh, command, check_exit_code=True) LOG.debug("command %(cmd)s result: out = " "%(out)s - err = %(err)s", {'cmd': cmd, 'out': out, 'err': err}) return out, err except putils.ProcessExecutionError as e: if 'Failed to establish SSC connection' in e.stderr: LOG.debug("SSC connection error!") msg = _("Failed to establish SSC connection.") raise exception.HNASConnError(msg) else: raise putils.ProcessExecutionError def get_version(self, cmd, ver, ip0, user, pw): """Gets version information from the storage unit :param cmd: ssc command name :param ver: string driver version :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :returns: formatted string with version information """ out, err = self.run_cmd(cmd, ip0, user, pw, "cluster-getmac", check_exit_code=True) hardware = out.split()[2] out, err = self.run_cmd(cmd, ip0, user, pw, "ver", check_exit_code=True) lines = out.split('\n') model = "" for line in lines: if 'Model:' in line: model = line.split()[1] if 'Software:' in line: ver = line.split()[1] # If not using SSH, the local utility version can be different from the # one used in HNAS if self.drv_configs['ssh_enabled'] != 'True': out, err = utils.execute(cmd, "-version", check_exit_code=True) util = out.split()[1] out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 " "RG: 0 RG_LU: 0 Utility_version: %(util)s" % {'arr': hardware, 'mod': model, 'ver': ver, 'util': util}) else: out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 " "RG: 0 RG_LU: 0" % {'arr': hardware, 'mod': model, 'ver': ver}) LOG.debug('get_version: %(out)s -- %(err)s', {'out': out, 'err': err}) return out def get_iscsi_info(self, cmd, ip0, user, pw): """Gets IP addresses for EVSs, use EVSID as controller. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :returns: formated string with iSCSI information """ out, err = self.run_cmd(cmd, ip0, user, pw, 'evsipaddr', '-l', check_exit_code=True) lines = out.split('\n') newout = "" for line in lines: if 'evs' in line and 'admin' not in line: inf = line.split() (evsnum, ip) = (inf[1], inf[3]) newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \ % (evsnum, ip) LOG.debug('get_iscsi_info: %(out)s -- %(err)s', {'out': out, 'err': err}) return newout def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None): """Gets the list of filesystems and fsids. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param fslabel: filesystem label we want to get info :returns: formated string with filesystems and fsids """ if fslabel is None: out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a', check_exit_code=True) else: out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel, check_exit_code=True) lines = out.split('\n') single_evs = True LOG.debug("Parsing output: %s", lines) newout = "" for line in lines: if 'Not mounted' in line or 'Not determined' in line: continue if 'not' not in line and 'EVS' in line: single_evs = False if 'GB' in line or 'TB' in line: LOG.debug("Parsing output: %s", line) inf = line.split() if not single_evs: (fsid, fslabel, capacity) = (inf[0], inf[1], inf[3]) (used, perstr) = (inf[5], inf[7]) (availunit, usedunit) = (inf[4], inf[6]) else: (fsid, fslabel, capacity) = (inf[0], inf[1], inf[2]) (used, perstr) = (inf[4], inf[6]) (availunit, usedunit) = (inf[3], inf[5]) if usedunit == 'GB': usedmultiplier = units.Ki else: usedmultiplier = units.Mi if availunit == 'GB': availmultiplier = units.Ki else: availmultiplier = units.Mi m = re.match("\((\d+)\%\)", perstr) if m: percent = m.group(1) else: percent = 0 newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \ % (fsid, int(float(capacity) * availmultiplier), int(float(used) * usedmultiplier), int(percent), fslabel) LOG.debug('get_hdp_info: %(out)s -- %(err)s', {'out': newout, 'err': err}) return newout def get_evs(self, cmd, ip0, user, pw, fsid): """Gets the EVSID for the named filesystem. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :returns: EVS id of the file system """ out, err = self.run_cmd(cmd, ip0, user, pw, "evsfs", "list", check_exit_code=True) LOG.debug('get_evs: out %s.', out) lines = out.split('\n') for line in lines: inf = line.split() if fsid in line and (fsid == inf[0] or fsid == inf[1]): return inf[3] LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'), {'out': out, 'fsid': fsid}) return 0 def _get_evsips(self, cmd, ip0, user, pw, evsid): """Gets the EVS IPs for the named filesystem.""" out, err = self.run_cmd(cmd, ip0, user, pw, 'evsipaddr', '-e', evsid, check_exit_code=True) iplist = "" lines = out.split('\n') for line in lines: inf = line.split() if 'evs' in line: iplist += inf[3] + ' ' LOG.debug('get_evsips: %s', iplist) return iplist def _get_fsid(self, cmd, ip0, user, pw, fslabel): """Gets the FSID for the named filesystem.""" out, err = self.run_cmd(cmd, ip0, user, pw, 'evsfs', 'list', check_exit_code=True) LOG.debug('get_fsid: out %s', out) lines = out.split('\n') for line in lines: inf = line.split() if fslabel in line and fslabel == inf[1]: LOG.debug('get_fsid: %s', line) return inf[0] LOG.warning(_LW('get_fsid: %(out)s -- No info for %(fslabel)s'), {'out': out, 'fslabel': fslabel}) return 0 def _get_targets(self, cmd, ip0, user, pw, evsid, tgtalias=None): """Get the target list of an EVS. Get the target list of an EVS. Optionally can return the target list of a specific target. """ LOG.debug("Getting target list for evs %s, tgtalias: %s.", evsid, tgtalias) try: out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evsid, 'iscsi-target', 'list', check_exit_code=True) except putils.ProcessExecutionError as e: LOG.error(_LE('Error getting iSCSI target info ' 'from EVS %(evs)s.'), {'evs': evsid}) LOG.debug("_get_targets out: %(out)s, err: %(err)s.", {'out': e.stdout, 'err': e.stderr}) return [] tgt_list = [] if 'No targets' in out: LOG.debug("No targets found in EVS %(evsid)s.", {'evsid': evsid}) return tgt_list tgt_raw_list = out.split('Alias')[1:] for tgt_raw_info in tgt_raw_list: tgt = {} tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop() tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop() tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop() tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop() luns = [] tgt_raw_info = tgt_raw_info.split('\n\n')[1] tgt_raw_list = tgt_raw_info.split('\n')[2:] for lun_raw_line in tgt_raw_list: lun_raw_line = lun_raw_line.strip() lun_raw_line = lun_raw_line.split(' ') lun = {} lun['id'] = lun_raw_line[0] lun['name'] = lun_raw_line.pop() luns.append(lun) tgt['luns'] = luns if tgtalias == tgt['alias']: return [tgt] tgt_list.append(tgt) if tgtalias is not None: # We tried to find 'tgtalias' but didn't find. Return an empty # list. LOG.debug("There's no target %(alias)s in EVS %(evsid)s.", {'alias': tgtalias, 'evsid': evsid}) return [] LOG.debug("Targets in EVS %(evs)s: %(tgtl)s.", {'evs': evsid, 'tgtl': tgt_list}) return tgt_list def _get_unused_lunid(self, cmd, ip0, user, pw, tgt_info): if len(tgt_info['luns']) == 0: return 0 free_lun = 0 for lun in tgt_info['luns']: if int(lun['id']) == free_lun: free_lun += 1 if int(lun['id']) > free_lun: # Found a free LUN number break return free_lun def get_nfs_info(self, cmd, ip0, user, pw): """Gets information on each NFS export. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :returns: formated string """ out, err = self.run_cmd(cmd, ip0, user, pw, 'for-each-evs', '-q', 'nfs-export', 'list', check_exit_code=True) lines = out.split('\n') newout = "" export = "" path = "" for line in lines: inf = line.split() if 'Export name' in line: export = inf[2] if 'Export path' in line: path = inf[2] if 'File system info' in line: fs = "" if 'File system label' in line: fs = inf[3] if 'Transfer setting' in line and fs != "": fsid = self._get_fsid(cmd, ip0, user, pw, fs) evsid = self.get_evs(cmd, ip0, user, pw, fsid) ips = self._get_evsips(cmd, ip0, user, pw, evsid) newout += "Export: %s Path: %s HDP: %s FSID: %s \ EVS: %s IPS: %s\n" \ % (export, path, fs, fsid, evsid, ips) fs = "" LOG.debug('get_nfs_info: %(out)s -- %(err)s', {'out': newout, 'err': err}) return newout def create_lu(self, cmd, ip0, user, pw, hdp, size, name): """Creates a new Logical Unit. If the operation can not be performed for some reason, utils.execute() throws an error and aborts the operation. Used for iSCSI only :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param hdp: data Pool the logical unit will be created :param size: Size (Mb) of the new logical unit :param name: name of the logical unit :returns: formated string with 'LUN %d HDP: %d size: %s MB, is successfully created' """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-lu', 'add', "-e", name, hdp, '/.cinder/' + name + '.iscsi', size + 'M', check_exit_code=True) out = "LUN %s HDP: %s size: %s MB, is successfully created" \ % (name, hdp, size) LOG.debug('create_lu: %s.', out) return out def delete_lu(self, cmd, ip0, user, pw, hdp, lun): """Delete an logical unit. Used for iSCSI only :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param hdp: data Pool of the logical unit :param lun: id of the logical unit being deleted :returns: formated string 'Logical unit deleted successfully.' """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-lu', 'del', '-d', '-f', lun, check_exit_code=True) LOG.debug('delete_lu: %(out)s -- %(err)s.', {'out': out, 'err': err}) return out def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name): """Clones a volume Clone primitive used to support all iSCSI snapshot/cloning functions. Used for iSCSI only. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param hdp: data Pool of the logical unit :param src_lun: id of the logical unit being deleted :param size: size of the LU being cloned. Only for logging purposes :returns: formated string """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-lu', 'clone', '-e', src_lun, name, '/.cinder/' + name + '.iscsi', check_exit_code=True) out = "LUN %s HDP: %s size: %s MB, is successfully created" \ % (name, hdp, size) LOG.debug('create_dup: %(out)s -- %(err)s.', {'out': out, 'err': err}) return out def file_clone(self, cmd, ip0, user, pw, fslabel, src, name): """Clones NFS files to a new one named 'name' Clone primitive used to support all NFS snapshot/cloning functions. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param fslabel: file system label of the new file :param src: source file :param name: target path of the new created file :returns: formated string """ _fsid = self._get_fsid(cmd, ip0, user, pw, fslabel) _evsid = self.get_evs(cmd, ip0, user, pw, _fsid) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'file-clone-create', '-f', fslabel, src, name, check_exit_code=True) out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name) LOG.debug('file_clone: %(out)s -- %(err)s.', {'out': out, 'err': err}) return out def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name): """Extend a iSCSI volume. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param hdp: data Pool of the logical unit :param lun: id of the logical unit being extended :param new_size: new size of the LU :param name: formated string """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-lu', 'expand', name, new_size + 'M', check_exit_code=True) out = ("LUN: %s successfully extended to %s MB" % (name, new_size)) LOG.debug('extend_vol: %s.', out) return out @utils.retry(putils.ProcessExecutionError, retries=HNAS_SSC_RETRIES, wait_random=True) def add_iscsi_conn(self, cmd, ip0, user, pw, lun_name, hdp, port, tgtalias, initiator): """Setup the lun on on the specified target port :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param lun_name: id of the logical unit being extended :param hdp: data pool of the logical unit :param port: iSCSI port :param tgtalias: iSCSI qualified name :param initiator: initiator address """ LOG.debug('Adding %(lun)s to %(tgt)s returns %(tgt)s.', {'lun': lun_name, 'tgt': tgtalias}) found, lunid, tgt = self.check_lu(cmd, ip0, user, pw, lun_name, hdp) evsid = self.get_evs(cmd, ip0, user, pw, hdp) if found: conn = (int(lunid), lun_name, initiator, int(lunid), tgt['iqn'], int(lunid), hdp, port) out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s " "@ index: %d, and Target: %s @ index %d is " "successfully paired @ CTL: %s, Port: %s.") % conn else: tgt = self._get_targets(cmd, ip0, user, pw, evsid, tgtalias) lunid = self._get_unused_lunid(cmd, ip0, user, pw, tgt[0]) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evsid, 'iscsi-target', 'addlu', tgtalias, lun_name, six.text_type(lunid), check_exit_code=True) conn = (int(lunid), lun_name, initiator, int(lunid), tgt[0]['iqn'], int(lunid), hdp, port) out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s " "@ index: %d, and Target: %s @ index %d is " "successfully paired @ CTL: %s, Port: %s.") % conn LOG.debug('add_iscsi_conn: returns %s.', out) return out def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun): """Remove the lun on on the specified target port :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param evsid: EVSID for the file system :param iqn: iSCSI qualified name :param hlun: logical unit id :returns: formated string """ out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evsid, 'iscsi-target', 'list', iqn, check_exit_code=True) lines = out.split('\n') out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn)) # see if lun is already detached for line in lines: if line.startswith(' '): lunline = line.split()[0] if lunline[0].isdigit() and lunline == hlun: out = "" break if out != "": # hlun wasn't found LOG.info(_LI('del_iscsi_conn: hlun not found %s.'), out) return out # remove the LU from the target out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evsid, 'iscsi-target', 'dellu', '-f', iqn, hlun, check_exit_code=True) out = "H-LUN: %d successfully deleted from target %s" \ % (int(hlun), iqn) LOG.debug('del_iscsi_conn: %s.', out) return out def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret): """Obtain the targets full iqn Returns the target's full iqn rather than its alias. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param targetalias: alias of the target :param hdp: data pool of the logical unit :param secret: CHAP secret of the target :returns: string with full IQN """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'list', targetalias, check_exit_code=True) if "does not exist" in out: if secret == "": secret = '""' out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'add', targetalias, secret, check_exit_code=True) else: out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'add', targetalias, secret, check_exit_code=True) if "success" in out: return targetalias lines = out.split('\n') # returns the first iqn for line in lines: if 'Alias' in line: fulliqn = line.split()[2] return fulliqn def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret): """Sets the chap secret for the specified target. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param targetalias: alias of the target :param hdp: data pool of the logical unit :param secret: CHAP secret of the target """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'list', targetalias, check_exit_code=False) if "does not exist" in out: out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'add', targetalias, secret, check_exit_code=True) else: LOG.info(_LI('targetlist: %s'), targetalias) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'mod', '-s', secret, '-a', 'enable', targetalias, check_exit_code=True) def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp): """Returns the chap secret for the specified target. :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param targetalias: alias of the target :param hdp: data pool of the logical unit :return secret: CHAP secret of the target """ _evsid = self.get_evs(cmd, ip0, user, pw, hdp) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", _evsid, 'iscsi-target', 'list', targetalias, check_exit_code=True) enabled = "" secret = "" lines = out.split('\n') for line in lines: if 'Secret' in line: if len(line.split()) > 2: secret = line.split()[2] if 'Authentication' in line: enabled = line.split()[2] if enabled == 'Enabled': return secret else: return "" def check_target(self, cmd, ip0, user, pw, hdp, target_alias): """Checks if a given target exists and gets its info :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param hdp: pool name used :param target_alias: alias of the target :returns: True if target exists :returns: list with the target info """ LOG.debug("Checking if target %(tgt)s exists.", {'tgt': target_alias}) evsid = self.get_evs(cmd, ip0, user, pw, hdp) tgt_list = self._get_targets(cmd, ip0, user, pw, evsid) for tgt in tgt_list: if tgt['alias'] == target_alias: attached_luns = len(tgt['luns']) LOG.debug("Target %(tgt)s has %(lun)s volumes.", {'tgt': target_alias, 'lun': attached_luns}) return True, tgt LOG.debug("Target %(tgt)s does not exist.", {'tgt': target_alias}) return False, None def check_lu(self, cmd, ip0, user, pw, volume_name, hdp): """Checks if a given LUN is already mapped :param cmd: ssc command name :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param volume_name: number of the LUN :param hdp: storage pool of the LUN :returns: True if the lun is attached :returns: the LUN id :returns: Info related to the target """ LOG.debug("Checking if vol %s (hdp: %s) is attached.", volume_name, hdp) evsid = self.get_evs(cmd, ip0, user, pw, hdp) tgt_list = self._get_targets(cmd, ip0, user, pw, evsid) for tgt in tgt_list: if len(tgt['luns']) == 0: continue for lun in tgt['luns']: lunid = lun['id'] lunname = lun['name'] if lunname[:29] == volume_name[:29]: LOG.debug("LUN %(lun)s attached on %(lunid)s, " "target: %(tgt)s.", {'lun': volume_name, 'lunid': lunid, 'tgt': tgt}) return True, lunid, tgt LOG.debug("LUN %(lun)s not attached.", {'lun': volume_name}) return False, 0, None def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun): """Returns the information for the specified Logical Unit. Returns the information of an existing Logical Unit on HNAS, according to the name provided. :param cmd: the command that will be run on SMU :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param fslabel: label of the file system :param lun: label of the logical unit """ evs = self.get_evs(cmd, ip0, user, pw, fslabel) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evs, 'iscsi-lu', 'list', lun) return out def rename_existing_lu(self, cmd, ip0, user, pw, fslabel, new_name, vol_name): """Renames the specified Logical Unit. Renames an existing Logical Unit on HNAS according to the new name provided. :param cmd: command that will be run on SMU :param ip0: string IP address of controller :param user: string user authentication for array :param pw: string password authentication for array :param fslabel: label of the file system :param new_name: new name to the existing volume :param vol_name: current name of the existing volume """ evs = self.get_evs(cmd, ip0, user, pw, fslabel) out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs", evs, "iscsi-lu", "mod", "-n", new_name, vol_name) return out cinder-8.0.0/cinder/volume/drivers/hitachi/__init__.py0000664000567000056710000000000012701406250024062 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_horcm.py0000664000567000056710000016136012701406250024454 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, 2015, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import re import shlex import threading import time from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib GETSTORAGEARRAY_ONCE = 100 MAX_SNAPSHOT_COUNT = 1021 SNAP_LAST_PATH_SSB = '0xB958,0x020A' HOST_IO_SSB = '0xB958,0x0233' INVALID_LUN_SSB = '0x2E20,0x0000' INTERCEPT_LDEV_SSB = '0x2E22,0x0001' HOSTGROUP_INSTALLED = '0xB956,0x3173' RESOURCE_LOCKED = 'SSB=0x2E11,0x2205' LDEV_STATUS_WAITTIME = 120 LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME LUN_DELETE_INTERVAL = 3 EXEC_MAX_WAITTIME = 30 EXEC_RETRY_INTERVAL = 5 HORCM_WAITTIME = 1 PAIR_TYPE = ('HORC', 'MRCF', 'QS') PERMITTED_TYPE = ('CVS', 'HDP', 'HDT') RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_' HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_' RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_' STATUS_TABLE = { 'SMPL': basic_lib.SMPL, 'COPY': basic_lib.COPY, 'RCPY': basic_lib.COPY, 'PAIR': basic_lib.PAIR, 'PFUL': basic_lib.PAIR, 'PSUS': basic_lib.PSUS, 'PFUS': basic_lib.PSUS, 'SSUS': basic_lib.PSUS, 'PSUE': basic_lib.PSUE, } NOT_SET = '-' HORCM_RUNNING = 1 COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d' SNAP_NAME = basic_lib.NAME_PREFIX + 'snap' LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d' MAX_MUNS = 3 EX_ENAUTH = 202 EX_ENOOBJ = 205 EX_CMDRJE = 221 EX_CMDIOE = 237 EX_INVCMD = 240 EX_INVMOD = 241 EX_ENODEV = 246 EX_ENOENT = 247 EX_OPTINV = 248 EX_ATTDBG = 250 EX_ATTHOR = 251 EX_COMERR = 255 EX_UNKOWN = -1 NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT) COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV) HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR) MAX_HOSTGROUPS = 254 MAX_HLUN = 2047 DEFAULT_PORT_BASE = 31000 LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('hitachi_horcm_numbers', default='200,201', help='Instance numbers for HORCM'), cfg.StrOpt('hitachi_horcm_user', help='Username of storage system for HORCM'), cfg.StrOpt('hitachi_horcm_password', help='Password of storage system for HORCM', secret=True), cfg.BoolOpt('hitachi_horcm_add_conf', default=True, help='Add to HORCM configuration'), cfg.IntOpt('hitachi_horcm_resource_lock_timeout', default=600, help='Timeout until a resource lock is released, in seconds. ' 'The value must be between 0 and 7200.'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) def horcm_synchronized(function): @functools.wraps(function) def wrapper(*args, **kargs): if len(args) == 1: inst = args[0].conf.hitachi_horcm_numbers[0] raidcom_obj_lock = args[0].raidcom_lock else: inst = args[1] raidcom_obj_lock = args[0].raidcom_pair_lock raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) lock = basic_lib.get_process_lock(raidcom_lock_file) with raidcom_obj_lock, lock: return function(*args, **kargs) return wrapper def storage_synchronized(function): @functools.wraps(function) def wrapper(*args, **kargs): serial = args[0].conf.hitachi_serial_number resource_lock = args[0].resource_lock resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) lock = basic_lib.get_process_lock(resource_lock_file) with resource_lock, lock: return function(*args, **kargs) return wrapper class HBSDHORCM(basic_lib.HBSDBasicLib): def __init__(self, conf): super(HBSDHORCM, self).__init__(conf=conf) self.copy_groups = [None] * MAX_MUNS self.raidcom_lock = threading.Lock() self.raidcom_pair_lock = threading.Lock() self.horcmgr_lock = threading.Lock() self.horcmgr_flock = None self.resource_lock = threading.Lock() def check_param(self): numbers = self.conf.hitachi_horcm_numbers.split(',') if len(numbers) != 2: msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') raise exception.HBSDError(message=msg) for i in numbers: if not i.isdigit(): msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') raise exception.HBSDError(message=msg) self.conf.hitachi_horcm_numbers = [int(num) for num in numbers] inst = self.conf.hitachi_horcm_numbers[0] pair_inst = self.conf.hitachi_horcm_numbers[1] if inst == pair_inst: msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') raise exception.HBSDError(message=msg) for param in ('hitachi_horcm_user', 'hitachi_horcm_password'): if not getattr(self.conf, param): msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id: msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') raise exception.HBSDError(message=msg) resource_lock_timeout = self.conf.hitachi_horcm_resource_lock_timeout if not ((resource_lock_timeout >= 0) and (resource_lock_timeout <= 7200)): msg = basic_lib.output_err( 601, param='hitachi_horcm_resource_lock_timeout') raise exception.HBSDError(message=msg) for opt in volume_opts: getattr(self.conf, opt.name) def set_copy_groups(self, host_ip): serial = self.conf.hitachi_serial_number inst = self.conf.hitachi_horcm_numbers[1] for mun in range(MAX_MUNS): copy_group = COPY_GROUP % (host_ip, serial, inst, mun) self.copy_groups[mun] = copy_group def set_pair_flock(self): inst = self.conf.hitachi_horcm_numbers[1] name = '%s%d' % (HORCMGR_LOCK_FILE, inst) self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock) return self.horcmgr_flock def check_horcm(self, inst): args = 'HORCMINST=%d horcmgr -check' % inst ret, _stdout, _stderr = self.exec_command('env', args=args, printflag=False) return ret def shutdown_horcm(self, inst): ret, stdout, stderr = self.exec_command( 'horcmshutdown.sh', args=six.text_type(inst), printflag=False) return ret def start_horcm(self, inst): return self.exec_command('horcmstart.sh', args=six.text_type(inst), printflag=False) def _wait_for_horcm_shutdown(self, inst): if self.check_horcm(inst) != HORCM_RUNNING: raise loopingcall.LoopingCallDone() if self.shutdown_horcm(inst): LOG.error(_LE("Failed to shutdown horcm.")) raise loopingcall.LoopingCallDone() @horcm_synchronized def restart_horcm(self, inst=None): if inst is None: inst = self.conf.hitachi_horcm_numbers[0] loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_horcm_shutdown, inst) loop.start(interval=HORCM_WAITTIME).wait() ret, stdout, stderr = self.start_horcm(inst) if ret: msg = basic_lib.output_err( 600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def restart_pair_horcm(self): inst = self.conf.hitachi_horcm_numbers[1] self.restart_horcm(inst=inst) def setup_horcmgr(self, host_ip): pair_inst = self.conf.hitachi_horcm_numbers[1] self.set_copy_groups(host_ip) if self.conf.hitachi_horcm_add_conf: self.create_horcmconf() self.create_horcmconf(inst=pair_inst) self.restart_horcm() with self.horcmgr_flock: self.restart_pair_horcm() ret, stdout, stderr = self.comm_login() if ret: msg = basic_lib.output_err( 600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def _wait_for_exec_horcm(self, cmd, args, printflag, start): if cmd == 'raidcom': serial = self.conf.hitachi_serial_number inst = self.conf.hitachi_horcm_numbers[0] raidcom_obj_lock = self.raidcom_lock args = '%s -s %s -I%d' % (args, serial, inst) else: inst = self.conf.hitachi_horcm_numbers[1] raidcom_obj_lock = self.raidcom_pair_lock args = '%s -ISI%d' % (args, inst) user = self.conf.hitachi_horcm_user passwd = self.conf.hitachi_horcm_password raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) lock = basic_lib.get_process_lock(raidcom_lock_file) with raidcom_obj_lock, lock: ret, stdout, stderr = self.exec_command(cmd, args=args, printflag=printflag) # The resource group may be locked by other software. # Therefore, wait until the lock is released. if (RESOURCE_LOCKED in stderr and (time.time() - start < self.conf.hitachi_horcm_resource_lock_timeout)): return if not ret or ret <= 127: raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= EXEC_MAX_WAITTIME: LOG.error(_LE("horcm command timeout.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (ret == EX_ENAUTH and not re.search("-login %s %s" % (user, passwd), args)): _ret, _stdout, _stderr = self.comm_login() if _ret: LOG.error(_LE("Failed to authenticate user.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret in HORCM_ERROR: _ret = 0 with raidcom_obj_lock, lock: if self.check_horcm(inst) != HORCM_RUNNING: _ret, _stdout, _stderr = self.start_horcm(inst) if _ret and _ret != HORCM_RUNNING: LOG.error(_LE("Failed to start horcm.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret not in COMMAND_IO_TO_RAID: LOG.error(_LE("Unexpected error occurs in horcm.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_raidcom(self, cmd, args, printflag=True): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_exec_horcm, cmd, args, printflag, time.time()) return loop.start(interval=EXEC_RETRY_INTERVAL).wait() def comm_login(self): rmi_user = self.conf.hitachi_horcm_user rmi_pass = self.conf.hitachi_horcm_password args = '-login %s %s' % (rmi_user, rmi_pass) return self.exec_raidcom('raidcom', args, printflag=False) def comm_reset_status(self): self.exec_raidcom('raidcom', 'reset command_status') def comm_get_status(self): return self.exec_raidcom('raidcom', 'get command_status') def get_command_error(self, stdout): lines = stdout.splitlines() line = shlex.split(lines[1]) return int(line[3]) def comm_get_ldev(self, ldev): opt = 'get ldev -ldev_id %s' % ldev ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return stdout def add_used_hlun(self, port, gid, used_list): opt = 'get lun -port %s-%d' % (port, gid) ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: lun = int(shlex.split(line)[3]) if lun not in used_list: used_list.append(lun) def get_unused_ldev(self, ldev_range): start = ldev_range[0] end = ldev_range[1] while start < end: if end - start + 1 > GETSTORAGEARRAY_ONCE: cnt = GETSTORAGEARRAY_ONCE else: cnt = end - start + 1 opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt) ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() ldev_num = None for line in lines: if re.match("LDEV :", line): ldev_num = int(shlex.split(line)[2]) continue if re.match("VOL_TYPE : NOT DEFINED", line): return ldev_num start += GETSTORAGEARRAY_ONCE else: msg = basic_lib.output_err(648, resource='LDEV') raise exception.HBSDError(message=msg) def get_hgname_gid(self, port, host_grp_name): opt = 'get host_grp -port %s -key host_grp' % port ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if line[2] == host_grp_name: return int(line[1]) return None def get_unused_gid(self, range, port): _min = range[0] _max = range[1] opt = 'get host_grp -port %s -key host_grp' % port ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() free_gid = None for line in lines[_min + 1:]: line = shlex.split(line) if int(line[1]) > _max: break if line[2] == '-': free_gid = int(line[1]) break if free_gid is None: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) return free_gid def comm_set_target_wwns(self, target_ports): opt = 'get port' ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) target_wwns = {} lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) port = line[0][:5] if target_ports and port not in target_ports: continue target_wwns[port] = line[10] LOG.debug('target wwns: %s', target_wwns) return target_wwns def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected): opt = 'get host_grp -port %s' % port ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() found_wwns = 0 for line in lines[1:]: line = shlex.split(line) if not re.match(basic_lib.NAME_PREFIX, line[2]): continue gid = line[1] opt = 'get hba_wwn -port %s-%s' % (port, gid) ret, stdout, stderr = self.exec_raidcom( 'raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: hba_info = shlex.split(line) if hba_info[3] in wwns: hostgroups.append({'port': six.text_type(port), 'gid': int(hba_info[1]), 'initiator_wwn': hba_info[3], 'detected': is_detected}) found_wwns += 1 if len(wwns) == found_wwns: break if len(wwns) == found_wwns: break def comm_chk_login_wwn(self, wwns, port): opt = 'get port -port %s' % port ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: login_info = shlex.split(line) if login_info[1] in wwns: return True else: return False def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): security_ports = [] hostgroups = [] opt = 'get port' ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) port = line[0][:5] if target_ports and port not in target_ports: continue security = True if line[7] == 'Y' else False is_detected = None if login: is_detected = self.comm_chk_login_wwn(wwns, port) if security: self.comm_get_hbawwn(hostgroups, wwns, port, is_detected) security_ports.append(port) for hostgroup in hostgroups: hgs.append(hostgroup) return security_ports def _get_lun(self, port, gid, ldev): lun = None opt = 'get lun -port %s-%d' % (port, gid) ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if line[5] == six.text_type(ldev): lun = int(line[3]) break return lun def _wait_for_delete_lun(self, hostgroup, ldev, start): opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'], hostgroup['gid'], ldev) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if not ret: raise loopingcall.LoopingCallDone() if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and not self.comm_get_snapshot(ldev) or re.search('SSB=%s' % HOST_IO_SSB, stderr)): LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr)) if time.time() - start >= LUN_DELETE_WAITTIME: msg = basic_lib.output_err( 637, method='_wait_for_delete_lun', timeout=LUN_DELETE_WAITTIME) raise exception.HBSDError(message=msg) else: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_delete_lun_core(self, hostgroup, ldev): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_delete_lun, hostgroup, ldev, time.time()) loop.start(interval=LUN_DELETE_INTERVAL).wait() def comm_delete_lun(self, hostgroups, ldev): deleted_hostgroups = [] no_ldev_cnt = 0 for hostgroup in hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] is_deleted = False for deleted in deleted_hostgroups: if port == deleted['port'] and gid == deleted['gid']: is_deleted = True if is_deleted: continue try: self.comm_delete_lun_core(hostgroup, ldev) except exception.HBSDCmdError as ex: no_ldev_cnt += 1 if ex.ret == EX_ENOOBJ: if no_ldev_cnt != len(hostgroups): continue raise exception.HBSDNotFound else: raise deleted_hostgroups.append({'port': port, 'gid': gid}) def _check_ldev_status(self, ldev, status): opt = ('get ldev -ldev_id %s -check_status %s -time %s' % (ldev, status, LDEV_STATUS_WAITTIME)) ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt) return ret # Don't remove a storage_syncronized decorator. # It is need to avoid comm_add_ldev() and comm_delete_ldev() are # executed concurrently. @storage_synchronized def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): emulation = 'OPEN-V' if is_vvol: opt = ('add ldev -pool snap -ldev_id %d ' '-capacity %dG -emulation %s' % (ldev, capacity, emulation)) else: opt = ('add ldev -pool %d -ldev_id %d ' '-capacity %dG -emulation %s' % (pool_id, ldev, capacity, emulation)) self.comm_reset_status() ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr): raise exception.HBSDNotFound msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) if self._check_ldev_status(ldev, "NML"): msg = basic_lib.output_err(653, ldev=ldev) raise exception.HBSDError(message=msg) def comm_add_hostgrp(self, port, gid, host_grp_name): opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid, host_grp_name) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr): raise exception.HBSDNotFound msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_del_hostgrp(self, port, gid, host_grp_name): opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_hbawwn(self, port, gid, wwn): opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) @storage_synchronized def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False): tmp_hostgroups = hostgroups[:] is_ok = False used_list = [] lun = None old_lun = None for hostgroup in hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] self.add_used_hlun(port, gid, used_list) lun = self._get_lun(port, gid, ldev) # When 'lun' or 'old_lun' is 0, it should be true. # So, it cannot remove 'is not None'. if lun is not None: if old_lun is not None and old_lun != lun: msg = basic_lib.output_err(648, resource='LUN (HLUN)') raise exception.HBSDError(message=msg) is_ok = True hostgroup['lun'] = lun tmp_hostgroups.remove(hostgroup) old_lun = lun if is_once: # When 'lun' is 0, it should be true. # So, it cannot remove 'is not None'. if lun is not None: return elif len(used_list) < MAX_HLUN + 1: break else: tmp_hostgroups.remove(hostgroup) if tmp_hostgroups: used_list = [] if not used_list: lun = 0 elif lun is None: for i in range(MAX_HLUN + 1): if i not in used_list: lun = i break else: raise exception.HBSDNotFound opt = None ret = 0 stdout = None stderr = None invalid_hgs_str = None for hostgroup in tmp_hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] if not hostgroup['detected']: if invalid_hgs_str: invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, port, gid) else: invalid_hgs_str = '%s:%d' % (port, gid) continue opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % ( port, gid, ldev, lun) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if not ret: is_ok = True hostgroup['lun'] = lun if is_once: break else: LOG.warning(basic_lib.set_msg( 314, ldev=ldev, lun=lun, port=port, id=gid)) if not is_ok: if stderr: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) else: msg = basic_lib.output_err(659, gid=invalid_hgs_str) raise exception.HBSDError(message=msg) # Don't remove a storage_syncronized decorator. # It is need to avoid comm_add_ldev() and comm_delete_ldev() are # executed concurrently. @storage_synchronized def comm_delete_ldev(self, ldev, is_vvol): ret = -1 stdout = "" stderr = "" self.comm_reset_status() opt = 'delete ldev -ldev_id %d' % ldev ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: if re.search('SSB=%s' % INVALID_LUN_SSB, stderr): raise exception.HBSDNotFound msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) ret, stdout, stderr = self.comm_get_status() if ret or self.get_command_error(stdout): opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_extend_ldev(self, ldev, old_size, new_size): extend_size = new_size - old_size opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size) ret, stdout, stderr = self.exec_raidcom('raidcom', opt) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_get_dp_pool(self, pool_id): opt = 'get dp_pool' ret, stdout, stderr = self.exec_raidcom('raidcom', opt, printflag=False) if ret: opt = 'raidcom %s' % opt msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[1:]: if int(shlex.split(line)[0]) == pool_id: free_gb = int(shlex.split(line)[3]) / 1024 total_gb = int(shlex.split(line)[4]) / 1024 return total_gb, free_gb msg = basic_lib.output_err(640, pool_id=pool_id) raise exception.HBSDError(message=msg) def comm_modify_ldev(self, ldev): args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev ret, stdout, stderr = self.exec_raidcom('raidcom', args) if ret: LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr)) def is_detected(self, port, wwn): return self.comm_chk_login_wwn([wwn], port) def discard_zero_page(self, ldev): try: self.comm_modify_ldev(ldev) except Exception as ex: LOG.warning(_LW('Failed to discard zero page: %s'), ex) def comm_add_snapshot(self, pvol, svol): pool = self.conf.hitachi_thin_pool_id copy_size = self.conf.hitachi_copy_speed args = ('add snapshot -ldev_id %d %d -pool %d ' '-snapshot_name %s -copy_size %d' % (pvol, svol, pool, SNAP_NAME, copy_size)) ret, stdout, stderr = self.exec_raidcom('raidcom', args) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_delete_snapshot(self, ldev): args = 'delete snapshot -ldev_id %d' % ldev ret, stdout, stderr = self.exec_raidcom('raidcom', args) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_modify_snapshot(self, ldev, op): args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op)) ret, stdout, stderr = self.exec_raidcom('raidcom', args) if ret: msg = basic_lib.output_err( 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def _wait_for_snap_status(self, pvol, svol, status, timeout, start): if (self.get_snap_pvol_status(pvol, svol) in status and self.get_snap_svol_status(svol) in status): raise loopingcall.LoopingCallDone() if time.time() - start >= timeout: msg = basic_lib.output_err( 637, method='_wait_for_snap_status', timuout=timeout) raise exception.HBSDError(message=msg) def wait_snap(self, pvol, svol, status, timeout, interval): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_snap_status, pvol, svol, status, timeout, time.time()) loop.start(interval=interval).wait() def comm_get_snapshot(self, ldev): args = 'get snapshot -ldev_id %d' % ldev ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return stdout def check_snap_count(self, ldev): stdout = self.comm_get_snapshot(ldev) if not stdout: return lines = stdout.splitlines() if len(lines) >= MAX_SNAPSHOT_COUNT + 1: msg = basic_lib.output_err( 615, copy_method=basic_lib.THIN, pvol=ldev) raise exception.HBSDBusy(message=msg) def get_snap_pvol_status(self, pvol, svol): stdout = self.comm_get_snapshot(pvol) if not stdout: return basic_lib.SMPL lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if int(line[6]) == svol: return STATUS_TABLE[line[2]] else: return basic_lib.SMPL def get_snap_svol_status(self, ldev): stdout = self.comm_get_snapshot(ldev) if not stdout: return basic_lib.SMPL lines = stdout.splitlines() line = shlex.split(lines[1]) return STATUS_TABLE[line[2]] @horcm_synchronized def create_horcmconf(self, inst=None): if inst is None: inst = self.conf.hitachi_horcm_numbers[0] serial = self.conf.hitachi_serial_number filename = '/etc/horcm%d.conf' % inst port = DEFAULT_PORT_BASE + inst found = False if not os.path.exists(filename): file_str = """ HORCM_MON #ip_address service poll(10ms) timeout(10ms) 127.0.0.1 %16d 6000 3000 HORCM_CMD """ % port else: file_str = utils.read_file_as_root(filename) lines = file_str.splitlines() for line in lines: if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line): found = True break if not found: insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)', r'\1\n%s\n' % insert_str, file_str) try: utils.execute('tee', filename, process_input=file_str, run_as_root=True) except putils.ProcessExecutionError as ex: msg = basic_lib.output_err( 632, file=filename, ret=ex.exit_code, err=ex.stderr) raise exception.HBSDError(message=msg) def comm_get_copy_grp(self): ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp', printflag=False) if ret: opt = 'raidcom get copy_grp' msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return stdout def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun): args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d' % (copy_group, pvol_group, svol_group, mun)) ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_delete_copy_grp(self, copy_group): args = 'delete copy_grp -copy_grp_name %s' % copy_group ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_get_device_grp(self, group_name): args = 'get device_grp -device_grp_name %s' % group_name ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return stdout def comm_add_device_grp(self, group_name, ldev_name, ldev): args = ('add device_grp -device_grp_name %s %s -ldev_id %d' % (group_name, ldev_name, ldev)) ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_delete_device_grp(self, group_name, ldev): args = ('delete device_grp -device_grp_name %s -ldev_id %d' % (group_name, ldev)) ret, stdout, stderr = self.exec_raidcom('raidcom', args, printflag=False) if ret: opt = 'raidcom %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_paircreate(self, copy_group, ldev_name): args = ('-g %s -d %s -split -fq quick -c %d -vl' % (copy_group, ldev_name, self.conf.hitachi_copy_speed)) ret, stdout, stderr = self.exec_raidcom('paircreate', args) if ret: opt = 'paircreate %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_pairsplit(self, copy_group, ldev_name): args = '-g %s -d %s -S' % (copy_group, ldev_name) ret, stdout, stderr = self.exec_raidcom('pairsplit', args) if ret: opt = 'pairsplit %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_pairevtwait(self, copy_group, ldev_name, check_svol): if not check_svol: option = '-nowait' else: option = '-nowaits' args = '-g %s -d %s %s' % (copy_group, ldev_name, option) ret, stdout, stderr = self.exec_raidcom('pairevtwait', args, printflag=False) if ret > 127: opt = 'pairevtwait %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return ret def comm_pairdisplay(self, copy_group, ldev_name=None): if not ldev_name: args = '-g %s -CLI' % copy_group else: args = '-g %s -d %s -CLI' % (copy_group, ldev_name) ret, stdout, stderr = self.exec_raidcom('pairdisplay', args, printflag=False) if ret and ret not in NO_SUCH_DEVICE: opt = 'pairdisplay %s' % args msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return ret, stdout, stderr def check_copy_grp(self, copy_group): stdout = self.comm_get_copy_grp() lines = stdout.splitlines() count = 0 for line in lines[1:]: line = shlex.split(line) if line[0] == copy_group: count += 1 if count == 2: break return count def check_device_grp(self, group_name, ldev, ldev_name=None): stdout = self.comm_get_device_grp(group_name) lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if int(line[2]) == ldev: if not ldev_name: return True else: return line[1] == ldev_name else: return False def is_smpl(self, copy_group, ldev_name): ret, stdout, stderr = self.comm_pairdisplay(copy_group, ldev_name=ldev_name) if not stdout: return True lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if line[9] in [NOT_SET, 'SMPL']: return True else: return False def get_copy_groups(self): copy_groups = [] stdout = self.comm_get_copy_grp() lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if line[0] in self.copy_groups and line[0] not in copy_groups: copy_groups.append(line[0]) return copy_groups def get_matched_copy_group(self, pvol, svol, ldev_name): for copy_group in self.get_copy_groups(): pvol_group = '%sP' % copy_group if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): return copy_group else: return None def get_paired_info(self, ldev, only_flag=False): paired_info = {'pvol': None, 'svol': []} pvol = None is_svol = False stdout = self.comm_get_snapshot(ldev) if stdout: lines = stdout.splitlines() line = shlex.split(lines[1]) status = STATUS_TABLE.get(line[2], basic_lib.UNKN) if line[1] == 'P-VOL': pvol = ldev svol = int(line[6]) else: is_svol = True pvol = int(line[6]) svol = ldev if status == basic_lib.PSUS: status = self.get_snap_pvol_status(pvol, svol) svol_info = {'lun': svol, 'status': status, 'is_vvol': True} paired_info['svol'].append(svol_info) paired_info['pvol'] = pvol if only_flag or is_svol: return paired_info for copy_group in self.get_copy_groups(): ldev_name = None pvol_status = basic_lib.UNKN svol_status = basic_lib.UNKN ret, stdout, stderr = self.comm_pairdisplay(copy_group) if not stdout: continue lines = stdout.splitlines() for line in lines[1:]: line = shlex.split(line) if line[9] not in ['P-VOL', 'S-VOL']: continue ldev0 = int(line[8]) ldev1 = int(line[12]) if ldev not in [ldev0, ldev1]: continue ldev_name = line[1] if line[9] == 'P-VOL': pvol = ldev0 svol = ldev1 pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) else: svol = ldev0 pvol = ldev1 svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) if svol == ldev: is_svol = True if not ldev_name: continue pvol_group = '%sP' % copy_group pvol_ok = self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name) svol_group = '%sS' % copy_group svol_ok = self.check_device_grp(svol_group, svol, ldev_name=ldev_name) if pvol_ok and svol_ok: if pvol_status == basic_lib.PSUS: status = svol_status else: status = pvol_status svol_info = {'lun': svol, 'status': status, 'is_vvol': False} paired_info['svol'].append(svol_info) if is_svol: break # When 'pvol' is 0, it should be true. # So, it cannot remove 'is not None'. if pvol is not None and paired_info['pvol'] is None: paired_info['pvol'] = pvol return paired_info def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): pvol_group = '%sP' % copy_group svol_group = '%sS' % copy_group self.comm_add_device_grp(pvol_group, ldev_name, pvol) self.comm_add_device_grp(svol_group, ldev_name, svol) nr_copy_groups = self.check_copy_grp(copy_group) if nr_copy_groups == 1: self.comm_delete_copy_grp(copy_group) if nr_copy_groups != 2: self.comm_add_copy_grp(copy_group, pvol_group, svol_group, mun) def delete_pair_config(self, pvol, svol, copy_group, ldev_name): pvol_group = '%sP' % copy_group svol_group = '%sS' % copy_group if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): self.comm_delete_device_grp(pvol_group, pvol) if self.check_device_grp(svol_group, svol, ldev_name=ldev_name): self.comm_delete_device_grp(svol_group, svol) def _wait_for_pair_status(self, copy_group, ldev_name, status, timeout, check_svol, start): if self.comm_pairevtwait(copy_group, ldev_name, check_svol) in status: raise loopingcall.LoopingCallDone() if time.time() - start >= timeout: msg = basic_lib.output_err( 637, method='_wait_for_pair_status', timout=timeout) raise exception.HBSDError(message=msg) def wait_pair(self, copy_group, ldev_name, status, timeout, interval, check_svol=False): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_pair_status, copy_group, ldev_name, status, timeout, check_svol, time.time()) loop.start(interval=interval).wait() def comm_create_pair(self, pvol, svol, is_vvol): timeout = basic_lib.DEFAULT_PROCESS_WAITTIME interval = self.conf.hitachi_copy_check_interval if not is_vvol: restart = False create = False ldev_name = LDEV_NAME % (pvol, svol) mun = 0 for mun in range(MAX_MUNS): copy_group = self.copy_groups[mun] pvol_group = '%sP' % copy_group if not self.check_device_grp(pvol_group, pvol): break else: msg = basic_lib.output_err( 615, copy_method=basic_lib.FULL, pvol=pvol) raise exception.HBSDBusy(message=msg) try: self.add_pair_config(pvol, svol, copy_group, ldev_name, mun) self.restart_pair_horcm() restart = True self.comm_paircreate(copy_group, ldev_name) create = True self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS], timeout, interval) self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS, basic_lib.COPY], timeout, interval, check_svol=True) except Exception: with excutils.save_and_reraise_exception(): if create: try: self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS], timeout, interval) self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS], timeout, interval, check_svol=True) except Exception as ex: LOG.warning(_LW('Failed to create pair: %s'), ex) try: self.comm_pairsplit(copy_group, ldev_name) self.wait_pair( copy_group, ldev_name, [basic_lib.SMPL], timeout, self.conf.hitachi_async_copy_check_interval) except Exception as ex: LOG.warning(_LW('Failed to create pair: %s'), ex) if self.is_smpl(copy_group, ldev_name): try: self.delete_pair_config(pvol, svol, copy_group, ldev_name) except Exception as ex: LOG.warning(_LW('Failed to create pair: %s'), ex) if restart: try: self.restart_pair_horcm() except Exception as ex: LOG.warning(_LW('Failed to restart horcm: %s'), ex) else: self.check_snap_count(pvol) self.comm_add_snapshot(pvol, svol) try: self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval) self.comm_modify_snapshot(svol, 'create') self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval) except Exception: with excutils.save_and_reraise_exception(): try: self.comm_delete_snapshot(svol) self.wait_snap( pvol, svol, [basic_lib.SMPL], timeout, self.conf.hitachi_async_copy_check_interval) except Exception as ex: LOG.warning(_LW('Failed to create pair: %s'), ex) def delete_pair(self, pvol, svol, is_vvol): timeout = basic_lib.DEFAULT_PROCESS_WAITTIME interval = self.conf.hitachi_async_copy_check_interval if not is_vvol: ldev_name = LDEV_NAME % (pvol, svol) copy_group = self.get_matched_copy_group(pvol, svol, ldev_name) if not copy_group: return try: self.comm_pairsplit(copy_group, ldev_name) self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL], timeout, interval) finally: if self.is_smpl(copy_group, ldev_name): self.delete_pair_config(pvol, svol, copy_group, ldev_name) else: self.comm_delete_snapshot(svol) self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval) def comm_raidqry(self): ret, stdout, stderr = self.exec_command('raidqry', '-h') if ret: opt = 'raidqry -h' msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return stdout def get_comm_version(self): stdout = self.comm_raidqry() lines = stdout.splitlines() return shlex.split(lines[1])[1] def output_param_to_log(self, conf): for opt in volume_opts: if not opt.secret: value = getattr(conf, opt.name) LOG.info(_LI('\t%(name)-35s : %(value)s'), {'name': opt.name, 'value': value}) def create_lock_file(self): inst = self.conf.hitachi_horcm_numbers[0] pair_inst = self.conf.hitachi_horcm_numbers[1] serial = self.conf.hitachi_serial_number raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst) horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst) resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) basic_lib.create_empty_file(raidcom_lock_file) basic_lib.create_empty_file(raidcom_pair_lock_file) basic_lib.create_empty_file(horcmgr_lock_file) basic_lib.create_empty_file(resource_lock_file) def connect_storage(self): properties = utils.brick_get_connector_properties() self.setup_horcmgr(properties['ip']) def get_max_hostgroups(self): """return the maximum value of hostgroup id.""" return MAX_HOSTGROUPS def get_hostgroup_luns(self, port, gid): list = [] self.add_used_hlun(port, gid, list) return list def get_ldev_size_in_gigabyte(self, ldev, existing_ref): param = 'serial_number' if param not in existing_ref: msg = basic_lib.output_err(700, param=param) raise exception.HBSDError(data=msg) storage = existing_ref.get(param) if storage != self.conf.hitachi_serial_number: msg = basic_lib.output_err(648, resource=param) raise exception.HBSDError(data=msg) stdout = self.comm_get_ldev(ldev) if not stdout: msg = basic_lib.output_err(648, resource='LDEV') raise exception.HBSDError(data=msg) sts_line = vol_type = "" vol_attrs = [] size = num_port = 1 lines = stdout.splitlines() for line in lines: if line.startswith("STS :"): sts_line = line elif line.startswith("VOL_TYPE :"): vol_type = shlex.split(line)[2] elif line.startswith("VOL_ATTR :"): vol_attrs = shlex.split(line)[2:] elif line.startswith("VOL_Capacity(BLK) :"): size = int(shlex.split(line)[2]) elif line.startswith("NUM_PORT :"): num_port = int(shlex.split(line)[2]) if 'NML' not in sts_line: msg = basic_lib.output_err(648, resource='LDEV') raise exception.HBSDError(data=msg) if 'OPEN-V' not in vol_type: msg = basic_lib.output_err(702, ldev=ldev) raise exception.HBSDError(data=msg) if 'HDP' not in vol_attrs: msg = basic_lib.output_err(702, ldev=ldev) raise exception.HBSDError(data=msg) for vol_attr in vol_attrs: if vol_attr == ':': continue if vol_attr in PAIR_TYPE: msg = basic_lib.output_err(705, ldev=ldev) raise exception.HBSDError(data=msg) if vol_attr not in PERMITTED_TYPE: msg = basic_lib.output_err(702, ldev=ldev) raise exception.HBSDError(data=msg) # Hitachi storage calculates volume sizes in a block unit, 512 bytes. # So, units.Gi is divided by 512. if size % (units.Gi / 512): msg = basic_lib.output_err(703, ldev=ldev) raise exception.HBSDError(data=msg) if num_port: msg = basic_lib.output_err(704, ldev=ldev) raise exception.HBSDError(data=msg) return size / (units.Gi / 512) cinder-8.0.0/cinder/volume/drivers/hitachi/hnas_nfs.py0000664000567000056710000007250612701406257024155 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for HDS HNAS NFS storage. """ import math import os import re import six import socket import time from xml.etree import ElementTree as ETree from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.image import image_utils from cinder import utils as cutils from cinder.volume.drivers.hitachi import hnas_backend from cinder.volume.drivers import nfs from cinder.volume import utils from cinder.volume import volume_types HDS_HNAS_NFS_VERSION = '4.1.0' LOG = logging.getLogger(__name__) NFS_OPTS = [ cfg.StrOpt('hds_hnas_nfs_config_file', default='/opt/hds/hnas/cinder_nfs_conf.xml', help='Configuration file for HDS NFS cinder plugin'), ] CONF = cfg.CONF CONF.register_opts(NFS_OPTS) HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'ssh_port': '22'} def _xml_read(root, element, check=None): """Read an xml element.""" val = root.findtext(element) # mandatory parameter not found if val is None and check: raise exception.ParameterNotFound(param=element) # tag not found if val is None: return None svc_tag_pattern = re.compile("svc_.$") # tag found but empty parameter. if not val.strip(): if svc_tag_pattern.search(element): return "" raise exception.ParameterNotFound(param=element) LOG.debug(_LI("%(element)s: %(val)s"), {'element': element, 'val': val if element != 'password' else '***'}) return val.strip() def _read_config(xml_config_file): """Read hds driver specific xml config file. :param xml_config_file: string filename containing XML configuration """ if not os.access(xml_config_file, os.R_OK): msg = (_("Can't open config file: %s") % xml_config_file) raise exception.NotFound(message=msg) try: root = ETree.parse(xml_config_file).getroot() except Exception: msg = (_("Error parsing config file: %s") % xml_config_file) raise exception.ConfigNotFound(message=msg) # mandatory parameters config = {} arg_prereqs = ['mgmt_ip0', 'username'] for req in arg_prereqs: config[req] = _xml_read(root, req, True) # optional parameters opt_parameters = ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0'] for req in opt_parameters: config[req] = _xml_read(root, req) if config['ssh_enabled'] == 'True': config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True) config['password'] = _xml_read(root, 'password') config['ssh_port'] = _xml_read(root, 'ssh_port') if config['ssh_port'] is None: config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] else: # password is mandatory when not using SSH config['password'] = _xml_read(root, 'password', True) if config['hnas_cmd'] is None: config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd'] config['hdp'] = {} config['services'] = {} # min one needed for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: if _xml_read(root, svc) is None: continue service = {'label': svc} # none optional for arg in ['volume_type', 'hdp']: service[arg] = _xml_read(root, svc + '/' + arg, True) config['services'][service['volume_type']] = service config['hdp'][service['hdp']] = service['hdp'] # at least one service required! if config['services'].keys() is None: raise exception.ParameterNotFound(param="No service found") return config def factory_bend(drv_config): """Factory over-ride in self-tests.""" return hnas_backend.HnasBackend(drv_config) class HDSNFSDriver(nfs.NfsDriver): """Base class for Hitachi NFS driver. Executes commands relating to Volumes. Version 1.0.0: Initial driver version Version 2.2.0: Added support to SSH authentication Version 3.0.0: Added pool aware scheduling Version 4.0.0: Added manage/unmanage features Version 4.1.0: Fixed XML parser checks on blank options """ def __init__(self, *args, **kwargs): # NOTE(vish): db is set by Manager self._execute = None self.context = None self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(NFS_OPTS) self.config = _read_config( self.configuration.hds_hnas_nfs_config_file) super(HDSNFSDriver, self).__init__(*args, **kwargs) self.bend = factory_bend(self.config) def _array_info_get(self): """Get array parameters.""" out = self.bend.get_version(self.config['hnas_cmd'], HDS_HNAS_NFS_VERSION, self.config['mgmt_ip0'], self.config['username'], self.config['password']) inf = out.split() return inf[1], 'nfs_' + inf[1], inf[6] def _id_to_vol(self, volume_id): """Given the volume id, retrieve the volume object from database. :param volume_id: string volume id """ vol = self.db.volume_get(self.context, volume_id) return vol def _get_service(self, volume): """Get service parameters. Get the available service parameters for a given volume using its type. :param volume: dictionary volume reference """ LOG.debug("_get_service: volume: %s", volume) label = utils.extract_host(volume['host'], level='pool') if label in self.config['services'].keys(): svc = self.config['services'][label] LOG.info(_LI("Get service: %(lbl)s->%(svc)s"), {'lbl': label, 'svc': svc['fslabel']}) service = (svc['hdp'], svc['path'], svc['fslabel']) else: LOG.info(_LI("Available services: %s"), self.config['services'].keys()) LOG.error(_LE("No configuration found for service: %s"), label) raise exception.ParameterNotFound(param=label) return service def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: dictionary volume reference :param new_size: int size in GB to extend """ nfs_mount = self._get_provider_location(volume['id']) path = self._get_volume_path(nfs_mount, volume['name']) # Resize the image file on share to new size. LOG.debug("Checking file for resize") if self._is_file_size_equal(path, new_size): return else: LOG.info(_LI("Resizing file to %sG"), new_size) image_utils.resize_image(path, new_size) if self._is_file_size_equal(path, new_size): LOG.info(_LI("LUN %(id)s extended to %(size)s GB."), {'id': volume['id'], 'size': new_size}) return else: raise exception.InvalidResults( _("Resizing image file failed.")) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path) virt_size = data.virtual_size / units.Gi if virt_size == size: return True else: return False def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug("create_volume_from %s", volume) vol_size = volume['size'] snap_size = snapshot['volume_size'] if vol_size != snap_size: msg = _("Cannot create volume of size %(vol_size)s from " "snapshot of size %(snap_size)s") msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} raise exception.CinderException(msg % msg_fmt) self._clone_volume(snapshot['name'], volume['name'], snapshot['volume_id']) share = self._get_volume_location(snapshot['volume_id']) return {'provider_location': share} def create_snapshot(self, snapshot): """Create a snapshot. :param snapshot: dictionary snapshot reference """ self._clone_volume(snapshot['volume_name'], snapshot['name'], snapshot['volume_id']) share = self._get_volume_location(snapshot['volume_id']) LOG.debug('Share: %s', share) # returns the mount point (not path) return {'provider_location': share} def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: dictionary snapshot reference """ nfs_mount = self._get_provider_location(snapshot['volume_id']) if self._volume_not_present(nfs_mount, snapshot['name']): return True self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']), run_as_root=True) def _get_volume_location(self, volume_id): """Returns NFS mount address as :. :param volume_id: string volume id """ nfs_server_ip = self._get_host_ip(volume_id) export_path = self._get_export_path(volume_id) return nfs_server_ip + ':' + export_path def _get_provider_location(self, volume_id): """Returns provider location for given volume. :param volume_id: string volume id """ volume = self.db.volume_get(self.context, volume_id) # same format as _get_volume_location return volume.provider_location def _get_host_ip(self, volume_id): """Returns IP address for the given volume. :param volume_id: string volume id """ return self._get_provider_location(volume_id).split(':')[0] def _get_export_path(self, volume_id): """Returns NFS export path for the given volume. :param volume_id: string volume id """ return self._get_provider_location(volume_id).split(':')[1] def _volume_not_present(self, nfs_mount, volume_name): """Check if volume exists. :param volume_name: string volume name """ try: self._try_execute('ls', self._get_volume_path(nfs_mount, volume_name)) except processutils.ProcessExecutionError: # If the volume isn't present return True return False def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except processutils.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception(_LE("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) def _get_volume_path(self, nfs_share, volume_name): """Get volume path (local fs path) for given name on given nfs share. :param nfs_share string, example 172.18.194.100:/var/nfs :param volume_name string, example volume-91ee65ec-c473-4391-8c09-162b00c68a8c """ return os.path.join(self._get_mount_point_for_share(nfs_share), volume_name) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: dictionary volume reference :param src_vref: dictionary src_vref reference """ vol_size = volume['size'] src_vol_size = src_vref['size'] if vol_size != src_vol_size: msg = _("Cannot create clone of size %(vol_size)s from " "volume of size %(src_vol_size)s") msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} raise exception.CinderException(msg % msg_fmt) self._clone_volume(src_vref['name'], volume['name'], src_vref['id']) share = self._get_volume_location(src_vref['id']) return {'provider_location': share} def get_volume_stats(self, refresh=False): """Get volume stats. if 'refresh' is True, update the stats first. """ _stats = super(HDSNFSDriver, self).get_volume_stats(refresh) _stats["vendor_name"] = 'HDS' _stats["driver_version"] = HDS_HNAS_NFS_VERSION _stats["storage_protocol"] = 'NFS' for pool in self.pools: capacity, free, used = self._get_capacity_info(pool['hdp']) pool['total_capacity_gb'] = capacity / float(units.Gi) pool['free_capacity_gb'] = free / float(units.Gi) pool['allocated_capacity_gb'] = used / float(units.Gi) pool['QoS_support'] = 'False' pool['reserved_percentage'] = 0 _stats['pools'] = self.pools LOG.info(_LI('Driver stats: %s'), _stats) return _stats def _get_nfs_info(self): out = self.bend.get_nfs_info(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password']) lines = out.split('\n') # dict based on NFS exports addresses conf = {} for line in lines: if 'Export' in line: inf = line.split() (export, path, fslabel, hdp, ip1) = \ inf[1], inf[3], inf[5], inf[7], inf[11] # 9, 10, etc are IP addrs key = ip1 + ':' + export conf[key] = {} conf[key]['path'] = path conf[key]['hdp'] = hdp conf[key]['fslabel'] = fslabel LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s " "FSID: %(hdp)s"), {'key': key, 'path': path, 'fslabel': fslabel, 'hdp': hdp}) return conf def do_setup(self, context): """Perform internal driver setup.""" self.context = context self._load_shares_config(getattr(self.configuration, self.driver_prefix + '_shares_config')) LOG.info(_LI("Review shares: %s"), self.shares) nfs_info = self._get_nfs_info() LOG.debug("nfs_info: %s", nfs_info) for share in self.shares: if share in nfs_info.keys(): LOG.info(_LI("share: %(share)s -> %(info)s"), {'share': share, 'info': nfs_info[share]['path']}) for svc in self.config['services'].keys(): if share == self.config['services'][svc]['hdp']: self.config['services'][svc]['path'] = \ nfs_info[share]['path'] # don't overwrite HDP value self.config['services'][svc]['fsid'] = \ nfs_info[share]['hdp'] self.config['services'][svc]['fslabel'] = \ nfs_info[share]['fslabel'] LOG.info(_LI("Save service info for" " %(svc)s -> %(hdp)s, %(path)s"), {'svc': svc, 'hdp': nfs_info[share]['hdp'], 'path': nfs_info[share]['path']}) break if share != self.config['services'][svc]['hdp']: LOG.error(_LE("NFS share %(share)s has no service entry:" " %(svc)s -> %(hdp)s"), {'share': share, 'svc': svc, 'hdp': self.config['services'][svc]['hdp']}) raise exception.ParameterNotFound(param=svc) else: LOG.info(_LI("share: %s incorrect entry"), share) LOG.debug("self.config['services'] = %s", self.config['services']) service_list = self.config['services'].keys() for svc in service_list: svc = self.config['services'][svc] pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] pool['hdp'] = svc['hdp'] self.pools.append(pool) LOG.info(_LI("Configured pools: %s"), self.pools) def _clone_volume(self, volume_name, clone_name, volume_id): """Clones mounted volume using the HNAS file_clone. :param volume_name: string volume name :param clone_name: string clone name (or snapshot) :param volume_id: string volume id """ export_path = self._get_export_path(volume_id) # volume-ID snapshot-ID, /cinder LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s" " export_path %(epath)s"), {'vname': volume_name, 'cname': clone_name, 'epath': export_path}) source_vol = self._id_to_vol(volume_id) # sps; added target (_hdp, _path, _fslabel) = self._get_service(source_vol) target_path = '%s/%s' % (_path, clone_name) source_path = '%s/%s' % (_path, volume_name) out = self.bend.file_clone(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], _fslabel, source_path, target_path) return out def get_pool(self, volume): if not volume['volume_type']: return 'default' else: metadata = {} type_id = volume['volume_type_id'] if type_id is not None: metadata = volume_types.get_volume_type_extra_specs(type_id) if not metadata.get('service_label'): return 'default' else: if metadata['service_label'] not in \ self.config['services'].keys(): return 'default' else: return metadata['service_label'] def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ self._ensure_shares_mounted() (_hdp, _path, _fslabel) = self._get_service(volume) volume['provider_location'] = _hdp LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"), {'label': _fslabel, 'loc': volume['provider_location']}) self._do_create_volume(volume) return {'provider_location': volume['provider_location']} def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): """Converts the share point name to an IP address. The volume reference may have a DNS name portion in the share name. Convert that to an IP address and then restore the entire path. :param vol_ref: driver-specific information used to identify a volume :returns: a volume reference where share is in IP format """ # First strip out share and convert to IP format. share_split = vol_ref.split(':') try: vol_ref_share_ip = cutils.resolve_hostname(share_split[0]) except socket.gaierror as e: LOG.error(_LE('Invalid hostname %(host)s'), {'host': share_split[0]}) LOG.debug('error: %s', e.strerror) raise # Now place back into volume reference. vol_ref_share = vol_ref_share_ip + ':' + share_split[1] return vol_ref_share def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): """Get the NFS share, the NFS mount, and the volume from reference. Determine the NFS share point, the NFS mount point, and the volume (with possible path) from the given volume reference. Raise exception if unsuccessful. :param vol_ref: driver-specific information used to identify a volume :returns: NFS Share, NFS mount, volume path or raise error """ # Check that the reference is valid. if 'source-name' not in vol_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=reason) vol_ref_name = vol_ref['source-name'] self._ensure_shares_mounted() # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config # file, but the admin tries to manage the file located at # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below # when searching self._mounted_shares to see if we have an existing # mount that would work to access the volume-to-be-managed (a string # comparison is done instead of IP comparison). vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( vol_ref_name) for nfs_share in self._mounted_shares: cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) (orig_share, work_share, file_path) = vol_ref_share.partition(cfg_share) if work_share == cfg_share: file_path = file_path[1:] # strip off leading path divider LOG.debug("Found possible share %s; checking mount.", work_share) nfs_mount = self._get_mount_point_for_share(nfs_share) vol_full_path = os.path.join(nfs_mount, file_path) if os.path.isfile(vol_full_path): LOG.debug("Found share %(share)s and vol %(path)s on " "mount %(mnt)s.", {'share': nfs_share, 'path': file_path, 'mnt': nfs_mount}) return nfs_share, nfs_mount, file_path else: LOG.debug("vol_ref %(ref)s not on share %(share)s.", {'ref': vol_ref_share, 'share': nfs_share}) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=_('Volume not found on configured storage backend.')) def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is an NFS share point and some [/path]/volume; e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage :param volume: cinder volume to manage :param existing_vol_ref: driver-specific information used to identify a volume """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s.", {'vol': volume['id'], 'ref': existing_vol_ref['source-name']}) self._check_pool_and_share(volume, nfs_share) if vol_path == volume['name']: LOG.debug("New Cinder volume %s name matches reference name: " "no need to rename.", volume['name']) else: src_vol = os.path.join(nfs_mount, vol_path) dst_vol = os.path.join(nfs_mount, volume['name']) try: self._execute("mv", src_vol, dst_vol, run_as_root=False, check_exit_code=True) LOG.debug("Setting newly managed Cinder volume name to %s.", volume['name']) self._set_rw_permissions_for_all(dst_vol) except (OSError, processutils.ProcessExecutionError) as err: exception_msg = (_("Failed to manage existing volume " "%(name)s, because rename operation " "failed: Error msg: %(msg)s."), {'name': existing_vol_ref['source-name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) return {'provider_location': nfs_share} def _check_pool_and_share(self, volume, nfs_share): """Validates the pool and the NFS share. Checks if the NFS share for the volume-type chosen matches the one passed in the volume reference. Also, checks if the pool for the volume type matches the pool for the host passed. :param volume: cinder volume reference :param nfs_share: NFS share passed to manage """ pool_from_vol_type = self.get_pool(volume) pool_from_host = utils.extract_host(volume['host'], level='pool') if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the NFS share " "passed in the volume reference."), {'Share passed': nfs_share, 'Share for volume type': self.config['services'][pool_from_vol_type]['hdp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if pool_from_host != pool_from_vol_type: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the pool of " "the host."), {'Pool of the volume type': pool_from_vol_type, 'Pool of the host': pool_from_host}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) def manage_existing_get_size(self, volume, existing_vol_ref): """Returns the size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: cinder volume to manage :param existing_vol_ref: existing volume to take under management """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) try: LOG.debug("Asked to get size of NFS vol_ref %s.", existing_vol_ref['source-name']) file_path = os.path.join(nfs_mount, vol_path) file_size = float(cutils.get_file_size(file_path)) / units.Gi vol_size = int(math.ceil(file_size)) except (OSError, ValueError): exception_message = (_("Failed to manage existing volume " "%(name)s, because of error in getting " "volume size."), {'name': existing_vol_ref['source-name']}) raise exception.VolumeBackendAPIException(data=exception_message) LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.", {'ref': existing_vol_ref['source-name'], 'size': vol_size}) return vol_size def unmanage(self, volume): """Removes the specified volume from Cinder management. It does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: cinder volume to unmanage """ vol_str = CONF.volume_name_template % volume['id'] path = self._get_mount_point_for_share(volume['provider_location']) new_str = "unmanage-" + vol_str vol_path = os.path.join(path, vol_str) new_path = os.path.join(path, new_str) try: self._execute("mv", vol_path, new_path, run_as_root=False, check_exit_code=True) LOG.info(_LI("Cinder NFS volume with current path %(cr)s is " "no longer being managed."), {'cr': new_path}) except (OSError, ValueError): LOG.error(_LE("The NFS Volume %(cr)s does not exist."), {'cr': new_path}) cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_snm2.py0000664000567000056710000012523412701406250024223 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import shlex import threading import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _LE, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib LOG = logging.getLogger(__name__) SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm ' 'LD_LIBRARY_PATH=/usr/stonavm/lib ' 'STONAVM_RSP_PASS=on STONAVM_ACT=on') MAX_HOSTGROUPS = 127 MAX_HOSTGROUPS_ISCSI = 254 MAX_HLUN = 2047 EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_' EXEC_TIMEOUT = 10 EXEC_INTERVAL = 1 CHAP_TIMEOUT = 5 PAIRED = 12 DUMMY_LU = -1 class HBSDSNM2(basic_lib.HBSDBasicLib): def __init__(self, conf): super(HBSDSNM2, self).__init__(conf=conf) self.unit_name = conf.hitachi_unit_name self.hsnm_lock = threading.Lock() self.hsnm_lock_file = ('%s%s' % (EXEC_LOCK_PATH_BASE, self.unit_name)) copy_speed = conf.hitachi_copy_speed if copy_speed <= 2: self.pace = 'slow' elif copy_speed == 3: self.pace = 'normal' else: self.pace = 'prior' def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start): lock = basic_lib.get_process_lock(self.hsnm_lock_file) with self.hsnm_lock, lock: ret, stdout, stderr = self.exec_command('env', args=args, printflag=printflag) if not ret or noretry: raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= timeout: LOG.error(_LE("snm2 command timeout.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (re.search('DMEC002047', stderr) or re.search('DMEC002048', stderr) or re.search('DMED09000A', stderr) or re.search('DMED090026', stderr) or re.search('DMED0E002B', stderr) or re.search('DMER03006A', stderr) or re.search('DMER030080', stderr) or re.search('DMER0300B8', stderr) or re.search('DMER0800CF', stderr) or re.search('DMER0800D[0-6D]', stderr) or re.search('DMES052602', stderr)): LOG.error(_LE("Unexpected error occurs in snm2.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_hsnm(self, command, args, printflag=True, noretry=False, timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL): args = '%s %s %s' % (SNM2_ENV, command, args) loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_exec_hsnm, args, printflag, noretry, timeout, time.time()) return loop.start(interval=interval).wait() def _execute_with_exception(self, cmd, args, **kwargs): ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs) if ret: cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args} msg = basic_lib.output_err( 600, cmd=cmds, ret=ret, out=stdout, err=stderr) raise exception.HBSDError(data=msg) return ret, stdout, stderr def _execute_and_return_stdout(self, cmd, args, **kwargs): result = self._execute_with_exception(cmd, args, **kwargs) return result[1] def get_comm_version(self): ret, stdout, stderr = self.exec_hsnm('auman', '-help') m = re.search('Version (\d+).(\d+)', stdout) if not m: msg = basic_lib.output_err( 600, cmd='auman', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return '%s.%s' % (m.group(1), m.group(2)) def add_used_hlun(self, command, port, gid, used_list, ldev): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm(command, '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: line = shlex.split(line) if not line: continue if line[0] == port and int(line[1][0:3]) == gid: if int(line[2]) not in used_list: used_list.append(int(line[2])) if int(line[3]) == ldev: hlu = int(line[2]) LOG.warning(_LW('ldev(%(ldev)d) is already mapped ' '(hlun: %(hlu)d)'), {'ldev': ldev, 'hlu': hlu}) return hlu return None def _get_lu(self, lu=None): # When 'lu' is 0, it should be true. So, it cannot remove 'is None'. if lu is None: args = '-unit %s' % self.unit_name else: args = '-unit %s -lu %s' % (self.unit_name, lu) return self._execute_and_return_stdout('auluref', args) def get_unused_ldev(self, ldev_range): start = ldev_range[0] end = ldev_range[1] unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auluref', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) free_ldev = start lines = stdout.splitlines() found = False for line in lines[2:]: line = shlex.split(line) if not line: continue ldev_num = int(line[0]) if free_ldev > ldev_num: continue if free_ldev == ldev_num: free_ldev += 1 else: found = True break if free_ldev > end: break else: found = True if not found: msg = basic_lib.output_err(648, resource='LDEV') raise exception.HBSDError(message=msg) return free_ldev def get_hgname_gid(self, port, host_grp_name): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False for line in lines: line = shlex.split(line) if not line: continue if line[0] == 'Port' and line[1] == port: is_target_port = True continue if is_target_port: if line[0] == 'Port': break if not line[0].isdigit(): continue gid = int(line[0]) if line[1] == host_grp_name: return gid return None def get_unused_gid(self, group_range, port): start = group_range[0] end = group_range[1] unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False free_gid = start found = False for line in lines: line = shlex.split(line) if not line: continue if line[0] == 'Port' and line[1] == port: is_target_port = True continue if is_target_port: if line[0] == 'Port': found = True break if not line[0].isdigit(): continue gid = int(line[0]) if free_gid > gid: continue if free_gid == gid: free_gid += 1 else: found = True break if free_gid > end or free_gid > MAX_HOSTGROUPS: break else: found = True if not found: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) return free_gid def comm_set_target_wwns(self, target_ports): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('aufibre1', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='aufibre1', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() target_wwns = {} for line in lines[3:]: if re.match('Transfer', line): break line = shlex.split(line) if len(line) < 4: continue port = '%s%s' % (line[0], line[1]) if target_ports: if port in target_ports: target_wwns[port] = line[3] else: target_wwns[port] = line[3] LOG.debug('target wwns: %s', target_wwns) return target_wwns def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login): for pt in wwns: for line in buf[port]['assigned']: hgname = shlex.split(line[38:])[1][4:] if not re.match(basic_lib.NAME_PREFIX, hgname): continue if pt.search(line[38:54]): wwn = line[38:54] gid = int(shlex.split(line[38:])[1][0:3]) is_detected = None if login: for line in buf[port]['detected']: if pt.search(line[38:54]): is_detected = True break else: is_detected = False hostgroups.append({'port': six.text_type(port), 'gid': gid, 'initiator_wwn': wwn, 'detected': is_detected}) def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgwwn', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) security_ports = [] patterns = [] for wwn in wwns: pt = re.compile(wwn, re.IGNORECASE) patterns.append(pt) lines = stdout.splitlines() buf = {} _buffer = [] port = None security = None for line in lines: if re.match('Port', line): port = shlex.split(line)[1] if target_ports and port not in target_ports: port = None else: security = True if shlex.split(line)[5] == 'ON' else False buf[port] = {'detected': [], 'assigned': [], 'assignable': []} if security: security_ports.append(port) continue if port and security: if re.search('Detected WWN', line): _buffer = buf[port]['detected'] continue elif re.search('Assigned WWN', line): _buffer = buf[port]['assigned'] continue elif re.search('Assignable WWN', line): _buffer = buf[port]['assignable'] continue _buffer.append(line) hostgroups = [] for port in buf.keys(): self.get_hostgroup_from_wwns( hostgroups, port, patterns, buf, login) for hostgroup in hostgroups: hgs.append(hostgroup) return security_ports def comm_delete_lun_core(self, command, hostgroups, lun): unit = self.unit_name no_lun_cnt = 0 deleted_hostgroups = [] for hostgroup in hostgroups: LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup) port = hostgroup['port'] gid = hostgroup['gid'] ctl_no = port[0] port_no = port[1] is_deleted = False for deleted in deleted_hostgroups: if port == deleted['port'] and gid == deleted['gid']: is_deleted = True if is_deleted: continue ret, stdout, stderr = self.exec_hsnm(command, '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: line = shlex.split(line) if not line: continue if (line[0] == port and int(line[1][0:3]) == gid and int(line[3]) == lun): hlu = int(line[2]) break else: no_lun_cnt += 1 if no_lun_cnt == len(hostgroups): raise exception.HBSDNotFound else: continue opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no, gid, hlu, lun) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) deleted_hostgroups.append({'port': port, 'gid': gid}) LOG.debug('comm_delete_lun is over (%d)', lun) def comm_delete_lun(self, hostgroups, ldev): self.comm_delete_lun_core('auhgmap', hostgroups, ldev) def comm_delete_lun_iscsi(self, hostgroups, ldev): self.comm_delete_lun_core('autargetmap', hostgroups, ldev) def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): unit = self.unit_name if is_vvol: command = 'aureplicationvvol' opt = ('-unit %s -add -lu %d -size %dg' % (unit, ldev, capacity)) else: command = 'auluadd' opt = ('-unit %s -lu %d -dppoolno %d -size %dg' % (unit, ldev, pool_id, capacity)) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret: if (re.search('DMEC002047', stderr) or re.search('DMES052602', stderr) or re.search('DMED09000A', stderr)): raise exception.HBSDNotFound else: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_hostgrp(self, port, gid, host_grp_name): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no, port_no, gid, host_grp_name) ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) if ret: raise exception.HBSDNotFound def comm_del_hostgrp(self, port, gid, host_grp_name): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no, host_grp_name) ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_hbawwn(self, port, gid, wwn): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no, port_no, wwn, gid) ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) if ret: opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no, port_no, wwn, gid) ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) if ret: msg = basic_lib.output_err( 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_lun(self, command, hostgroups, ldev, is_once=False): unit = self.unit_name tmp_hostgroups = hostgroups[:] used_list = [] is_ok = False hlu = None old_hlu = None for hostgroup in hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] hlu = self.add_used_hlun(command, port, gid, used_list, ldev) # When 'hlu' or 'old_hlu' is 0, it should be true. # So, it cannot remove 'is not None'. if hlu is not None: if old_hlu is not None and old_hlu != hlu: msg = basic_lib.output_err(648, resource='LUN (HLUN)') raise exception.HBSDError(message=msg) is_ok = True hostgroup['lun'] = hlu tmp_hostgroups.remove(hostgroup) old_hlu = hlu else: hlu = old_hlu if not used_list: hlu = 0 elif hlu is None: for i in range(MAX_HLUN + 1): if i not in used_list: hlu = i break else: raise exception.HBSDNotFound ret = 0 stdout = None stderr = None invalid_hgs_str = None for hostgroup in tmp_hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] ctl_no = port[0] port_no = port[1] if not hostgroup['detected']: if invalid_hgs_str: invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, port, gid) else: invalid_hgs_str = '%s:%d' % (port, gid) continue opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no, gid, hlu, ldev) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret == 0: is_ok = True hostgroup['lun'] = hlu if is_once: break else: LOG.warning(basic_lib.set_msg( 314, ldev=ldev, lun=hlu, port=port, id=gid)) if not is_ok: if stderr: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) else: msg = basic_lib.output_err(659, gid=invalid_hgs_str) raise exception.HBSDError(message=msg) def comm_delete_ldev(self, ldev, is_vvol): unit = self.unit_name if is_vvol: command = 'aureplicationvvol' opt = '-unit %s -rm -lu %d' % (unit, ldev) else: command = 'auludel' opt = '-unit %s -lu %d -f' % (unit, ldev) ret, stdout, stderr = self.exec_hsnm(command, opt, timeout=30, interval=3) if ret: if (re.search('DMEC002048', stderr) or re.search('DMED090026', stderr)): raise exception.HBSDNotFound msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return ret def comm_extend_ldev(self, ldev, old_size, new_size): unit = self.unit_name command = 'auluchgsize' options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size) ret, stdout, stderr = self.exec_hsnm(command, options) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def delete_chap_user(self, port): unit = self.unit_name ctl_no = port[0] port_no = port[1] auth_username = self.conf.hitachi_auth_user opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no, auth_username) return self.exec_hsnm('auchapuser', opt) def _wait_for_add_chap_user(self, cmd, auth_username, auth_password, start): # Don't move 'import pexpect' to the beginning of the file so that # a tempest can work. import pexpect lock = basic_lib.get_process_lock(self.hsnm_lock_file) with self.hsnm_lock, lock: try: child = pexpect.spawn(cmd) child.expect('Secret: ', timeout=CHAP_TIMEOUT) child.sendline(auth_password) child.expect('Re-enter Secret: ', timeout=CHAP_TIMEOUT) child.sendline(auth_password) child.expect('The CHAP user information has ' 'been added successfully.', timeout=CHAP_TIMEOUT) except Exception: if time.time() - start >= EXEC_TIMEOUT: msg = basic_lib.output_err(642, user=auth_username) raise exception.HBSDError(message=msg) else: raise loopingcall.LoopingCallDone(True) def set_chap_authention(self, port, gid): ctl_no = port[0] port_no = port[1] unit = self.unit_name auth_username = self.conf.hitachi_auth_user auth_password = self.conf.hitachi_auth_password add_chap_user = self.conf.hitachi_add_chap_user assign_flag = True added_flag = False opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True) if ret: if not add_chap_user: msg = basic_lib.output_err(643, user=auth_username) raise exception.HBSDError(message=msg) root_helper = utils.get_root_helper() cmd = ('%s env %s auchapuser -unit %s -add %s %s ' '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no, port_no, gid, auth_username)) LOG.debug('Add CHAP user') loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_add_chap_user, cmd, auth_username, auth_password, time.time()) added_flag = loop.start(interval=EXEC_INTERVAL).wait() else: lines = stdout.splitlines()[4:] for line in lines: if int(shlex.split(line)[0][0:3]) == gid: assign_flag = False break if assign_flag: opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no, port_no, gid, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt) if ret: if added_flag: _ret, _stdout, _stderr = self.delete_chap_user(port) if _ret: LOG.warning(basic_lib.set_msg( 303, user=auth_username)) msg = basic_lib.output_err( 600, cmd='auchapuser', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return added_flag def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn): auth_method = self.conf.hitachi_auth_method unit = self.unit_name ctl_no = port[0] port_no = port[1] if auth_method: auth_arg = '-authmethod %s -mutual disable' % auth_method else: auth_arg = '-authmethod None' opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid) opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn, auth_arg) ret, stdout, stderr = self.exec_hsnm('autargetdef', opt) if ret: raise exception.HBSDNotFound def delete_iscsi_target(self, port, _target_no, target_alias): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no, target_alias) return self.exec_hsnm('autargetdef', opt) def comm_set_hostgrp_reportportal(self, port, target_alias): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no, target_alias) opt = '%s -ReportFullPortalList enable' % opt ret, stdout, stderr = self.exec_hsnm('autargetopt', opt) if ret: msg = basic_lib.output_err( 600, cmd='autargetopt', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_initiator(self, port, gid, host_iqn): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no, port_no, gid, host_iqn) ret, stdout, stderr = self.exec_hsnm('autargetini', opt) if ret: msg = basic_lib.output_err( 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetini', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) security_ports = [] lines = stdout.splitlines() hostgroups = [] security = True for line in lines: if not shlex.split(line): continue if re.match('Port', line): line = shlex.split(line) port = line[1] security = True if line[4] == 'ON' else False continue if target_ports and port not in target_ports: continue if security: if (host_iqn in shlex.split(line[72:]) and re.match(basic_lib.NAME_PREFIX, shlex.split(line)[0][4:])): gid = int(shlex.split(line)[0][0:3]) hostgroups.append( {'port': port, 'gid': gid, 'detected': True}) LOG.debug('Find port=%(port)s gid=%(gid)d', {'port': port, 'gid': gid}) if port not in security_ports: security_ports.append(port) for hostgroup in hostgroups: hgs.append(hostgroup) return security_ports def comm_get_iscsi_ip(self, port): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auiscsi', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auiscsi', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False for line in lines: line_array = shlex.split(line) if not line_array: continue if line_array[0] == 'Port' and line_array[1] != 'Number': if line_array[1] == port: is_target_port = True else: is_target_port = False continue if is_target_port and re.search('IPv4 Address', line): ip_addr = shlex.split(line)[3] break if is_target_port and re.search('Port Number', line): ip_port = shlex.split(line)[3] else: msg = basic_lib.output_err(651) raise exception.HBSDError(message=msg) return ip_addr, ip_port def comm_get_target_iqn(self, port, gid): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) is_target_host = False tmp_port = None lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue gid_tmp = line[0][0:3] if gid_tmp.isdigit() and int(gid_tmp) == gid: is_target_host = True continue if is_target_host and line[0] == "iSCSI": target_iqn = line[3] break else: msg = basic_lib.output_err(650, resource='IQN') raise exception.HBSDError(message=msg) return target_iqn def get_unused_gid_iscsi(self, group_range, port): start = group_range[0] end = min(group_range[1], MAX_HOSTGROUPS_ISCSI) unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) used_list = [] tmp_port = None lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue if line[0][0:3].isdigit(): gid = int(line[0][0:3]) if start <= gid <= end: used_list.append(gid) if not used_list: return start for gid in range(start, end + 1): if gid not in used_list: break else: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) return gid def get_gid_from_targetiqn(self, target_iqn, target_alias, port): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) gid = None tmp_port = None found_alias_full = False found_alias_part = False lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue if line[0][0:3].isdigit(): tmp_gid = int(line[0][0:3]) if re.match(basic_lib.NAME_PREFIX, line[0][4:]): found_alias_part = True if line[0][4:] == target_alias: found_alias_full = True continue if line[0] == "iSCSI": if line[3] == target_iqn: gid = tmp_gid break else: found_alias_part = False if found_alias_full and gid is None: msg = basic_lib.output_err(641) raise exception.HBSDError(message=msg) # When 'gid' is 0, it should be true. # So, it cannot remove 'is not None'. if not found_alias_part and gid is not None: msg = basic_lib.output_err(641) raise exception.HBSDError(message=msg) return gid def comm_get_dp_pool(self, pool_id): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('audppool', '-unit %s -refer -g' % unit, printflag=False) if ret: msg = basic_lib.output_err( 600, cmd='audppool', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: tc_cc = re.search('\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line) pool_tmp = re.match('\s*\d+', line) if (pool_tmp and tc_cc and int(pool_tmp.group(0)) == pool_id): total_gb = int(float(tc_cc.group(1))) free_gb = total_gb - int(float(tc_cc.group(2))) return total_gb, free_gb msg = basic_lib.output_err(640, pool_id=pool_id) raise exception.HBSDError(message=msg) def is_detected(self, port, wwn): hgs = [] self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True) return hgs[0]['detected'] def pairoperate(self, opr, pvol, svol, is_vvol, args=None): unit = self.unit_name method = '-ss' if is_vvol else '-si' opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method, pvol, svol) if args: opt = '%s %s' % (opt, args) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt) if ret: opt = '%s %s' % ('aureplicationlocal', opt) msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_create_pair(self, pvol, svol, is_vvol): if not is_vvol: args = '-compsplit -pace %s' % self.pace method = basic_lib.FULL else: pool = self.conf.hitachi_thin_pool_id args = ('-localrepdppoolno %d -localmngdppoolno %d ' '-compsplit -pace %s' % (pool, pool, self.pace)) method = basic_lib.THIN try: self.pairoperate('create', pvol, svol, is_vvol, args=args) except exception.HBSDCmdError as ex: if (re.search('DMER0300B8', ex.stderr) or re.search('DMER0800CF', ex.stderr) or re.search('DMER0800D[0-6D]', ex.stderr) or re.search('DMER03006A', ex.stderr) or re.search('DMER030080', ex.stderr)): msg = basic_lib.output_err(615, copy_method=method, pvol=pvol) raise exception.HBSDBusy(message=msg) else: raise def _comm_pairevtwait(self, pvol, svol, is_vvol): unit = self.unit_name if not is_vvol: pairname = 'SI_LU%04d_LU%04d' % (pvol, svol) method = '-si' else: pairname = 'SS_LU%04d_LU%04d' % (pvol, svol) method = '-ss' opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' % (unit, method, pairname)) ret, stdout, stderr = self.exec_hsnm('aureplicationmon', opt, noretry=True) return ret def _wait_for_pair_status(self, pvol, svol, is_vvol, status, timeout, start): if self._comm_pairevtwait(pvol, svol, is_vvol) in status: raise loopingcall.LoopingCallDone() if time.time() - start >= timeout: msg = basic_lib.output_err( 637, method='_wait_for_pair_status', timeout=timeout) raise exception.HBSDError(message=msg) def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_pair_status, pvol, svol, is_vvol, status, timeout, time.time()) loop.start(interval=interval).wait() def delete_pair(self, pvol, svol, is_vvol): self.pairoperate('simplex', pvol, svol, is_vvol) def trans_status_hsnm2raid(self, str): status = None obj = re.search('Split\((.*)%\)', str) if obj: status = basic_lib.PSUS obj = re.search('Paired\((.*)%\)', str) if obj: status = basic_lib.PAIR return status def get_paired_info(self, ldev, only_flag=False): opt_base = '-unit %s -refer' % self.unit_name if only_flag: opt_base = '%s -ss' % opt_base opt = '%s -pvol %d' % (opt_base, ldev) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt, noretry=True) if ret == 0: lines = stdout.splitlines() pair_info = {'pvol': ldev, 'svol': []} for line in lines[1:]: status = self.trans_status_hsnm2raid(line) if re.search('SnapShot', line[100:]): is_vvol = True else: is_vvol = False line = shlex.split(line) if not line: break svol = int(line[2]) pair_info['svol'].append({'lun': svol, 'status': status, 'is_vvol': is_vvol}) return pair_info opt = '%s -svol %d' % (opt_base, ldev) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt, noretry=True) if ret == 1: return {'pvol': None, 'svol': []} lines = stdout.splitlines() status = self.trans_status_hsnm2raid(lines[1]) if re.search('SnapShot', lines[1][100:]): is_vvol = True else: is_vvol = False line = shlex.split(lines[1]) pvol = int(line[1]) return {'pvol': pvol, 'svol': [{'lun': ldev, 'status': status, 'is_vvol': is_vvol}]} def create_lock_file(self): basic_lib.create_empty_file(self.hsnm_lock_file) def get_hostgroup_luns(self, port, gid): list = [] self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU) return list def get_ldev_size_in_gigabyte(self, ldev, existing_ref): param = 'unit_name' if param not in existing_ref: msg = basic_lib.output_err(700, param=param) raise exception.HBSDError(data=msg) storage = existing_ref.get(param) if storage != self.conf.hitachi_unit_name: msg = basic_lib.output_err(648, resource=param) raise exception.HBSDError(data=msg) try: stdout = self._get_lu(ldev) except exception.HBSDError: with excutils.save_and_reraise_exception(): basic_lib.output_err(648, resource='LDEV') lines = stdout.splitlines() line = lines[2] splits = shlex.split(line) vol_type = splits[len(splits) - 1] if basic_lib.NORMAL_VOLUME_TYPE != vol_type: msg = basic_lib.output_err(702, ldev=ldev) raise exception.HBSDError(data=msg) dppool = splits[5] if 'N/A' == dppool: msg = basic_lib.output_err(702, ldev=ldev) raise exception.HBSDError(data=msg) # Hitachi storage calculates volume sizes in a block unit, 512 bytes. # So, units.Gi is divided by 512. size = int(splits[1]) if size % (units.Gi / 512): msg = basic_lib.output_err(703, ldev=ldev) raise exception.HBSDError(data=msg) num_port = int(splits[len(splits) - 2]) if num_port: msg = basic_lib.output_err(704, ldev=ldev) raise exception.HBSDError(data=msg) return size / (units.Gi / 512) cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_basiclib.py0000664000567000056710000002326412701406250025114 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import os import shlex from oslo_concurrency import lockutils from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE from cinder import utils SMPL = 1 COPY = 2 PAIR = 3 PSUS = 4 PSUE = 5 UNKN = 0xff FULL = 'Full copy' THIN = 'Thin copy' DEFAULT_TRY_RANGE = range(3) MAX_PROCESS_WAITTIME = 86400 DEFAULT_PROCESS_WAITTIME = 900 GETSTORAGEARRAY_ONCE = 100 WARNING_ID = 300 DEFAULT_GROUP_RANGE = [0, 65535] NAME_PREFIX = 'HBSD-' NORMAL_VOLUME_TYPE = 'Normal' LOCK_DIR = '/var/lock/hbsd/' LOG = logging.getLogger(__name__) HBSD_INFO_MSG = { 1: _('The parameter of the storage backend. ' '(config_group: %(config_group)s)'), 3: _('The storage backend can be used. (config_group: %(config_group)s)'), 4: _('The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)'), 5: _('The volume %(volume_id)s is unmanaged successfully. ' '(LDEV: %(ldev)s)'), } HBSD_WARN_MSG = { 301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'), 302: _('Failed to specify a logical device for the volume ' '%(volume_id)s to be unmapped.'), 303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'), 304: _('Failed to specify a logical device to be deleted. ' '(method: %(method)s, id: %(id)s)'), 305: _('The logical device for specified %(type)s %(id)s ' 'was already deleted.'), 306: _('A host group could not be deleted. (port: %(port)s, ' 'gid: %(gid)s, name: %(name)s)'), 307: _('An iSCSI target could not be deleted. (port: %(port)s, ' 'tno: %(tno)s, alias: %(alias)s)'), 308: _('A host group could not be added. (port: %(port)s, ' 'name: %(name)s)'), 309: _('An iSCSI target could not be added. ' '(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'), 310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, ' 'reason: %(reason)s)'), 311: _('A free LUN (HLUN) was not found. Add a different host' ' group. (LDEV: %(ldev)s)'), 312: _('Failed to get a storage resource. The system will attempt ' 'to get the storage resource again. (resource: %(resource)s)'), 313: _('Failed to delete a logical device. (LDEV: %(ldev)s, ' 'reason: %(reason)s)'), 314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, ' 'port: %(port)s, id: %(id)s)'), 315: _('Failed to perform a zero-page reclamation. ' '(LDEV: %(ldev)s, reason: %(reason)s)'), 316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, ' 'reason: %(reason)s)'), } HBSD_ERR_MSG = { 600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, ' 'stderr: %(err)s)'), 601: _('A parameter is invalid. (%(param)s)'), 602: _('A parameter value is invalid. (%(meta)s)'), 603: _('Failed to acquire a resource lock. (serial: %(serial)s, ' 'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'), 604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'), 605: _('Either hitachi_serial_number or hitachi_unit_name is required.'), 615: _('A pair could not be created. The maximum number of pair is ' 'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'), 616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'), 617: _('The specified operation is not supported. The volume size ' 'must be the same as the source %(type)s. (volume: %(volume_id)s)'), 618: _('The volume %(volume_id)s could not be extended. ' 'The volume type must be Normal.'), 619: _('The volume %(volume_id)s to be mapped was not found.'), 624: _('The %(type)s %(id)s source to be replicated was not found.'), 631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, ' 'stderr: %(err)s)'), 632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, ' 'stderr: %(err)s)'), 633: _('%(file)s: Permission denied.'), 636: _('Failed to add the logical device.'), 637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'), 640: _('A pool could not be found. (pool id: %(pool_id)s)'), 641: _('The host group or iSCSI target could not be added.'), 642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'), 643: _('The iSCSI CHAP user %(user)s does not exist.'), 648: _('There are no resources available for use. ' '(resource: %(resource)s)'), 649: _('The host group or iSCSI target was not found.'), 650: _('The resource %(resource)s was not found.'), 651: _('The IP Address was not found.'), 653: _('The creation of a logical device could not be ' 'completed. (LDEV: %(ldev)s)'), 654: _('A volume status is invalid. (status: %(status)s)'), 655: _('A snapshot status is invalid. (status: %(status)s)'), 659: _('A host group is invalid. (host group: %(gid)s)'), 660: _('The specified %(desc)s is busy.'), 700: _('There is no designation of the %(param)s. ' 'The specified storage is essential to manage the volume.'), 701: _('There is no designation of the ldev. ' 'The specified ldev is essential to manage the volume.'), 702: _('The specified ldev %(ldev)s could not be managed. ' 'The volume type must be DP-VOL.'), 703: _('The specified ldev %(ldev)s could not be managed. ' 'The ldev size must be in multiples of gigabyte.'), 704: _('The specified ldev %(ldev)s could not be managed. ' 'The ldev must not be mapping.'), 705: _('The specified ldev %(ldev)s could not be managed. ' 'The ldev must not be paired.'), 706: _('The volume %(volume_id)s could not be unmanaged. ' 'The volume type must be %(volume_type)s.'), } def set_msg(msg_id, **kwargs): if msg_id < WARNING_ID: msg_header = 'MSGID%04d-I:' % msg_id msg_body = HBSD_INFO_MSG.get(msg_id) else: msg_header = 'MSGID%04d-W:' % msg_id msg_body = HBSD_WARN_MSG.get(msg_id) return '%(header)s %(body)s' % {'header': msg_header, 'body': msg_body % kwargs} def output_err(msg_id, **kwargs): msg = HBSD_ERR_MSG.get(msg_id) % kwargs LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg}) return msg def get_process_lock(file): if not os.access(file, os.W_OK): msg = output_err(633, file=file) raise exception.HBSDError(message=msg) return lockutils.InterProcessLock(file) def create_empty_file(filename): if not os.path.exists(filename): try: utils.execute('touch', filename) except putils.ProcessExecutionError as ex: msg = output_err( 631, file=filename, ret=ex.exit_code, err=ex.stderr) raise exception.HBSDError(message=msg) class FileLock(lockutils.InterProcessLock): def __init__(self, name, lock_object): self.lock_object = lock_object super(FileLock, self).__init__(name) def __enter__(self): self.lock_object.acquire() try: ret = super(FileLock, self).__enter__() except Exception: with excutils.save_and_reraise_exception(): self.lock_object.release() return ret def __exit__(self, exc_type, exc_val, exc_tb): try: super(FileLock, self).__exit__(exc_type, exc_val, exc_tb) finally: self.lock_object.release() class NopLock(object): def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class HBSDBasicLib(object): def __init__(self, conf=None): self.conf = conf def exec_command(self, cmd, args=None, printflag=True): if printflag: if args: LOG.debug('cmd: %(cmd)s, args: %(args)s', {'cmd': cmd, 'args': args}) else: LOG.debug('cmd: %s', cmd) cmd = [cmd] if args: if six.PY2 and isinstance(args, six.text_type): cmd += shlex.split(args.encode()) else: cmd += shlex.split(args) try: stdout, stderr = utils.execute(*cmd, run_as_root=True) ret = 0 except putils.ProcessExecutionError as e: ret = e.exit_code stdout = e.stdout stderr = e.stderr LOG.debug('cmd: %s', cmd) LOG.debug('from: %s', inspect.stack()[2]) LOG.debug('ret: %d', ret) LOG.debug('stdout: %s', stdout.replace(os.linesep, ' ')) LOG.debug('stderr: %s', stderr.replace(os.linesep, ' ')) return ret, stdout, stderr def set_pair_flock(self): return NopLock() def set_horcmgr_flock(self): return NopLock() def discard_zero_page(self, ldev): pass def output_param_to_log(self, conf): pass def connect_storage(self): pass def get_max_hostgroups(self): pass def restart_pair_horcm(self): pass cinder-8.0.0/cinder/volume/drivers/hitachi/hnas_iscsi.py0000664000567000056710000011411112701406257024466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform. """ import os import re import six from xml.etree import ElementTree as ETree from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.hitachi import hnas_backend from cinder.volume import utils from cinder.volume import volume_types HDS_HNAS_ISCSI_VERSION = '4.3.0' LOG = logging.getLogger(__name__) iSCSI_OPTS = [ cfg.StrOpt('hds_hnas_iscsi_config_file', default='/opt/hds/hnas/cinder_iscsi_conf.xml', help='Configuration file for HDS iSCSI cinder plugin')] CONF = cfg.CONF CONF.register_opts(iSCSI_OPTS) HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'chap_enabled': 'True', 'ssh_port': '22'} MAX_HNAS_ISCSI_TARGETS = 32 def factory_bend(drv_configs): return hnas_backend.HnasBackend(drv_configs) def _loc_info(loc): """Parse info from location string.""" LOG.info(_LI("Parse_loc: %s"), loc) info = {} tup = loc.split(',') if len(tup) < 5: info['id_lu'] = tup[0].split('.') return info info['id_lu'] = tup[2].split('.') info['tgt'] = tup return info def _xml_read(root, element, check=None): """Read an xml element.""" val = root.findtext(element) # mandatory parameter not found if val is None and check: raise exception.ParameterNotFound(param=element) # tag not found if val is None: return None svc_tag_pattern = re.compile("svc_[0-3]$") # tag found but empty parameter. if not val.strip(): # Service tags are empty if svc_tag_pattern.search(element): return "" else: raise exception.ParameterNotFound(param=element) LOG.debug(_LI("%(element)s: %(val)s"), {'element': element, 'val': val if element != 'password' else '***'}) return val.strip() def _read_config(xml_config_file): """Read hds driver specific xml config file.""" if not os.access(xml_config_file, os.R_OK): msg = (_("Can't open config file: %s") % xml_config_file) raise exception.NotFound(message=msg) try: root = ETree.parse(xml_config_file).getroot() except Exception: msg = (_("Error parsing config file: %s") % xml_config_file) raise exception.ConfigNotFound(message=msg) # mandatory parameters config = {} arg_prereqs = ['mgmt_ip0', 'username'] for req in arg_prereqs: config[req] = _xml_read(root, req, True) # optional parameters opt_parameters = ['hnas_cmd', 'ssh_enabled', 'chap_enabled', 'cluster_admin_ip0'] for req in opt_parameters: config[req] = _xml_read(root, req) if config['chap_enabled'] is None: config['chap_enabled'] = HNAS_DEFAULT_CONFIG['chap_enabled'] if config['ssh_enabled'] == 'True': config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True) config['ssh_port'] = _xml_read(root, 'ssh_port') config['password'] = _xml_read(root, 'password') if config['ssh_port'] is None: config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] else: # password is mandatory when not using SSH config['password'] = _xml_read(root, 'password', True) if config['hnas_cmd'] is None: config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd'] config['hdp'] = {} config['services'] = {} # min one needed for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: if _xml_read(root, svc) is None: continue service = {'label': svc} # none optional for arg in ['volume_type', 'hdp', 'iscsi_ip']: service[arg] = _xml_read(root, svc + '/' + arg, True) config['services'][service['volume_type']] = service config['hdp'][service['hdp']] = service['hdp'] # at least one service required! if config['services'].keys() is None: raise exception.ParameterNotFound(param="No service found") return config class HDSISCSIDriver(driver.ISCSIDriver): """HDS HNAS volume driver. Version 1.0.0: Initial driver version Version 2.2.0: Added support to SSH authentication Version 3.2.0: Added pool aware scheduling Fixed concurrency errors Version 3.3.0: Fixed iSCSI target limitation error Version 4.0.0: Added manage/unmanage features Version 4.1.0: Fixed XML parser checks on blank options Version 4.2.0: Fixed SSH and cluster_admin_ip0 verification Version 4.3.0: Fixed attachment with os-brick 1.0.0 """ def __init__(self, *args, **kwargs): """Initialize, read different config parameters.""" super(HDSISCSIDriver, self).__init__(*args, **kwargs) self.driver_stats = {} self.context = {} self.configuration.append_config_values(iSCSI_OPTS) self.config = _read_config( self.configuration.hds_hnas_iscsi_config_file) self.type = 'HNAS' self.platform = self.type.lower() LOG.info(_LI("Backend type: %s"), self.type) self.bend = factory_bend(self.config) def _array_info_get(self): """Get array parameters.""" out = self.bend.get_version(self.config['hnas_cmd'], HDS_HNAS_ISCSI_VERSION, self.config['mgmt_ip0'], self.config['username'], self.config['password']) inf = out.split() return inf[1], 'hnas_' + inf[1], inf[6] def _get_iscsi_info(self): """Validate array iscsi parameters.""" out = self.bend.get_iscsi_info(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password']) lines = out.split('\n') # dict based on iSCSI portal ip addresses conf = {} for line in lines: # only record up links if 'CTL' in line and 'Up' in line: inf = line.split() (ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7]) conf[ip] = {} conf[ip]['ctl'] = ctl conf[ip]['port'] = port conf[ip]['iscsi_port'] = ipp LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s", {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port}) return conf def _get_service(self, volume): """Get the available service parameters Get the available service parametersfor a given volume using its type. :param volume: dictionary volume reference :returns: HDP related to the service """ label = utils.extract_host(volume['host'], level='pool') LOG.info(_LI("Using service label: %s"), label) if label in self.config['services'].keys(): svc = self.config['services'][label] return svc['hdp'] else: LOG.info(_LI("Available services: %s."), self.config['services'].keys()) LOG.error(_LE("No configuration found for service: %s."), label) raise exception.ParameterNotFound(param=label) def _get_service_target(self, volume): """Get the available service parameters Get the available service parameters for a given volume using its type. :param volume: dictionary volume reference """ hdp = self._get_service(volume) info = _loc_info(volume['provider_location']) (arid, lun_name) = info['id_lu'] evsid = self.bend.get_evs(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp) svc_label = utils.extract_host(volume['host'], level='pool') svc = self.config['services'][svc_label] LOG.info(_LI("_get_service_target hdp: %s."), hdp) LOG.info(_LI("config[services]: %s."), self.config['services']) mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], lun_name, hdp) LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."), {'map': "mapped" if mapped else "not mapped", 'tgtl': tgt}) # The volume is already mapped to a LUN, so no need to create any # targets if mapped: service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], svc['port'], hdp, tgt['alias'], tgt['secret']) return service # Each EVS can have up to 32 targets. Each target can have up to 32 # LUNs attached and have the name format 'evs-tgt<0-N>'. We run # from the first 'evs1-tgt0' until we find a target that is not already # created in the BE or is created but have slots to place new targets. found_tgt = False for i in range(0, MAX_HNAS_ISCSI_TARGETS): tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i) # TODO(erlon): we need to go to the BE 32 times here tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, tgt_alias) if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist: # Target exists and has free space or, target does not exist # yet. Proceed and use the target or create a target using this # name. found_tgt = True break # If we've got here and found_tgt is not True, we run out of targets, # raise and go away. if not found_tgt: LOG.error(_LE("No more targets avaliable.")) raise exception.NoMoreTargets(param=tgt_alias) LOG.info(_LI("Using target label: %s."), tgt_alias) # Check if we have a secret stored for this target so we don't have to # go to BE on every query if 'targets' not in self.config.keys(): self.config['targets'] = {} if tgt_alias not in self.config['targets'].keys(): self.config['targets'][tgt_alias] = {} tgt_info = self.config['targets'][tgt_alias] # HNAS - one time lookup # see if the client supports CHAP authentication and if # iscsi_secret has already been set, retrieve the secret if # available, otherwise generate and store if self.config['chap_enabled'] == 'True': # It may not exist, create and set secret. if 'iscsi_secret' not in tgt_info.keys(): LOG.info(_LI("Retrieving secret for service: %s."), tgt_alias) out = self.bend.get_targetsecret(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], tgt_alias, hdp) tgt_info['iscsi_secret'] = out if tgt_info['iscsi_secret'] == "": randon_secret = utils.generate_password()[0:15] tgt_info['iscsi_secret'] = randon_secret self.bend.set_targetsecret(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], tgt_alias, hdp, tgt_info['iscsi_secret']) LOG.info(_LI("Set tgt CHAP secret for service: %s."), tgt_alias) else: # We set blank password when the client does not # support CHAP. Later on, if the client tries to create a new # target that does not exists in the backend, we check for this # value and use a temporary dummy password. if 'iscsi_secret' not in tgt_info.keys(): # Warns in the first time LOG.info(_LI("CHAP authentication disabled.")) tgt_info['iscsi_secret'] = "" if 'tgt_iqn' not in tgt_info: LOG.info(_LI("Retrieving target for service: %s."), tgt_alias) out = self.bend.get_targetiqn(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], tgt_alias, hdp, tgt_info['iscsi_secret']) tgt_info['tgt_iqn'] = out self.config['targets'][tgt_alias] = tgt_info service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], svc['port'], hdp, tgt_alias, tgt_info['iscsi_secret']) return service def _get_stats(self): """Get HDP stats from HNAS.""" hnas_stat = {} be_name = self.configuration.safe_get('volume_backend_name') hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver' hnas_stat["vendor_name"] = 'HDS' hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION hnas_stat["storage_protocol"] = 'iSCSI' hnas_stat['reserved_percentage'] = 0 for pool in self.pools: out = self.bend.get_hdp_info(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], pool['hdp']) LOG.debug('Query for pool %(pool)s: %(out)s.', {'pool': pool['pool_name'], 'out': out}) (hdp, size, _ign, used) = out.split()[1:5] # in MB pool['total_capacity_gb'] = int(size) / units.Ki pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki pool['allocated_capacity_gb'] = int(used) / units.Ki pool['QoS_support'] = 'False' pool['reserved_percentage'] = 0 hnas_stat['pools'] = self.pools LOG.info(_LI("stats: stats: %s."), hnas_stat) return hnas_stat def _get_hdp_list(self): """Get HDPs from HNAS.""" out = self.bend.get_hdp_info(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password']) hdp_list = [] for line in out.split('\n'): if 'HDP' in line: inf = line.split() if int(inf[1]) >= units.Ki: # HDP fsids start at units.Ki (1024) hdp_list.append(inf[11]) else: # HDP pools are 2-digits max hdp_list.extend(inf[1:2]) # returns a list of HDP IDs LOG.info(_LI("HDP list: %s"), hdp_list) return hdp_list def _check_hdp_list(self): """Verify HDPs in HNAS array. Verify that all HDPs specified in the configuration files actually exists on the storage. """ hdpl = self._get_hdp_list() lst = self.config['hdp'].keys() for hdp in lst: if hdp not in hdpl: LOG.error(_LE("HDP not found: %s"), hdp) err = "HDP not found: " + hdp raise exception.ParameterNotFound(param=err) # status, verify corresponding status is Normal def _id_to_vol(self, volume_id): """Given the volume id, retrieve the volume object from database. :param volume_id: volume id string """ vol = self.db.volume_get(self.context, volume_id) return vol def _update_vol_location(self, volume_id, loc): """Update the provider location. :param volume_id: volume id string :param loc: string provider location value """ update = {'provider_location': loc} self.db.volume_update(self.context, volume_id, update) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" pass def do_setup(self, context): """Setup and verify HDS HNAS storage connection.""" self.context = context (self.arid, self.hnas_name, self.lumax) = self._array_info_get() self._check_hdp_list() service_list = self.config['services'].keys() for svc in service_list: svc = self.config['services'][svc] pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] pool['hdp'] = svc['hdp'] self.pools.append(pool) LOG.info(_LI("Configured pools: %s"), self.pools) iscsi_info = self._get_iscsi_info() LOG.info(_LI("do_setup: %s"), iscsi_info) for svc in self.config['services'].keys(): svc_ip = self.config['services'][svc]['iscsi_ip'] if svc_ip in iscsi_info.keys(): LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip) self.config['services'][svc]['port'] = \ iscsi_info[svc_ip]['port'] self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl'] self.config['services'][svc]['iscsi_port'] = \ iscsi_info[svc_ip]['iscsi_port'] else: # config iscsi address not found on device! LOG.error(_LE("iSCSI portal not found " "for service: %s"), svc_ip) raise exception.ParameterNotFound(param=svc_ip) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): """Create an export. Moved to initialize_connection. :param context: :param volume: volume reference """ name = volume['name'] LOG.debug("create_export %s", name) pass def remove_export(self, context, volume): """Disconnect a volume from an attached instance. :param context: context :param volume: dictionary volume reference """ provider = volume['provider_location'] name = volume['name'] LOG.debug("remove_export provider %(provider)s on %(name)s", {'provider': provider, 'name': name}) pass def create_volume(self, volume): """Create a LU on HNAS. :param volume: dictionary volume reference """ hdp = self._get_service(volume) out = self.bend.create_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, '%s' % (int(volume['size']) * units.Ki), volume['name']) LOG.info(_LI("create_volume: create_lu returns %s"), out) lun = self.arid + '.' + out.split()[1] sz = int(out.split()[5]) # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created."), {'lun': lun, 'sz': sz}) return {'provider_location': lun} def create_cloned_volume(self, dst, src): """Create a clone of a volume. :param dst: ditctionary destination volume reference :param src: ditctionary source volume reference """ if src['size'] != dst['size']: msg = 'clone volume size mismatch' raise exception.VolumeBackendAPIException(data=msg) hdp = self._get_service(dst) size = int(src['size']) * units.Ki source_vol = self._id_to_vol(src['id']) (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] out = self.bend.create_dup(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], slun, hdp, '%s' % size, dst['name']) lun = self.arid + '.' + out.split()[1] size = int(out.split()[5]) LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.", {'lun': lun, 'size': size}) return {'provider_location': lun} def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: dictionary volume reference :param new_size: int size in GB to extend """ hdp = self._get_service(volume) (arid, lun) = _loc_info(volume['provider_location'])['id_lu'] self.bend.extend_vol(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, lun, '%s' % (new_size * units.Ki), volume['name']) LOG.info(_LI("LUN %(lun)s extended to %(size)s GB."), {'lun': lun, 'size': new_size}) def delete_volume(self, volume): """Delete an LU on HNAS. :param volume: dictionary volume reference """ prov_loc = volume['provider_location'] if prov_loc is None: LOG.error(_LE("delete_vol: provider location empty.")) return info = _loc_info(prov_loc) (arid, lun) = info['id_lu'] if 'tgt' in info.keys(): # connected? LOG.info(_LI("delete lun loc %s"), info['tgt']) # loc = id.lun (_portal, iqn, loc, ctl, port, hlun) = info['tgt'] self.bend.del_iscsi_conn(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], ctl, iqn, hlun) name = self.hnas_name LOG.debug("delete lun %(lun)s on %(name)s", {'lun': lun, 'name': name}) hdp = self._get_service(volume) self.bend.delete_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, lun) @cinder_utils.synchronized('volume_mapping') def initialize_connection(self, volume, connector): """Map the created volume to connector['initiator']. :param volume: dictionary volume reference :param connector: dictionary connector reference """ LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"), {'vol': volume, 'conn': connector}) # connector[ip, host, wwnns, unititator, wwp/ service_info = self._get_service_target(volume) (ip, ipp, ctl, port, _hdp, tgtalias, secret) = service_info info = _loc_info(volume['provider_location']) if 'tgt' in info.keys(): # spurious repeat connection # print info.keys() LOG.debug("initiate_conn: tgt already set %s", info['tgt']) (arid, lun_name) = info['id_lu'] loc = arid + '.' + lun_name # sps, use target if provided try: out = self.bend.add_iscsi_conn(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], lun_name, _hdp, port, tgtalias, connector['initiator']) except processutils.ProcessExecutionError: msg = _("Error attaching volume %s. " "Target limit might be reached!") % volume['id'] raise exception.ISCSITargetAttachFailed(message=msg) hnas_portal = ip + ':' + ipp # sps need hlun, fulliqn hlun = out.split()[1] fulliqn = out.split()[13] tgt = hnas_portal + ',' + tgtalias + ',' + loc + ',' + ctl + ',' tgt += port + ',' + hlun LOG.info(_LI("initiate: connection %s"), tgt) properties = {} properties['provider_location'] = tgt self._update_vol_location(volume['id'], tgt) properties['target_discovered'] = False properties['target_portal'] = hnas_portal properties['target_iqn'] = fulliqn properties['target_lun'] = int(hlun) properties['volume_id'] = volume['id'] properties['auth_username'] = connector['initiator'] if self.config['chap_enabled'] == 'True': properties['auth_method'] = 'CHAP' properties['auth_password'] = secret conn_info = {'driver_volume_type': 'iscsi', 'data': properties} LOG.debug("initialize_connection: conn_info: %s.", conn_info) return conn_info @cinder_utils.synchronized('volume_mapping') def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume. :param volume: dictionary volume reference :param connector: dictionary connector reference """ info = _loc_info(volume['provider_location']) if 'tgt' not in info.keys(): # spurious disconnection LOG.warning(_LW("terminate_conn: provider location empty.")) return (arid, lun) = info['id_lu'] (_portal, tgtalias, loc, ctl, port, hlun) = info['tgt'] LOG.info(_LI("terminate: connection %s"), volume['provider_location']) self.bend.del_iscsi_conn(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], ctl, tgtalias, hlun) self._update_vol_location(volume['id'], loc) return {'provider_location': loc} def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. :param volume: dictionary volume reference :param snapshot: dictionary snapshot reference """ size = int(snapshot['volume_size']) * units.Ki (arid, slun) = _loc_info(snapshot['provider_location'])['id_lu'] hdp = self._get_service(volume) out = self.bend.create_dup(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], slun, hdp, '%s' % (size), volume['name']) lun = self.arid + '.' + out.split()[1] sz = int(out.split()[5]) LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.", {'lun': lun, 'sz': sz}) return {'provider_location': lun} def create_snapshot(self, snapshot): """Create a snapshot. :param snapshot: dictionary snapshot reference """ source_vol = self._id_to_vol(snapshot['volume_id']) hdp = self._get_service(source_vol) size = int(snapshot['volume_size']) * units.Ki (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] out = self.bend.create_dup(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], slun, hdp, '%s' % (size), snapshot['name']) lun = self.arid + '.' + out.split()[1] size = int(out.split()[5]) LOG.debug("LUN %(lun)s of size %(size)s MB is created.", {'lun': lun, 'size': size}) return {'provider_location': lun} def delete_snapshot(self, snapshot): """Delete a snapshot. :param snapshot: dictionary snapshot reference """ loc = snapshot['provider_location'] # to take care of spurious input if loc is None: # which could cause exception. return (arid, lun) = loc.split('.') source_vol = self._id_to_vol(snapshot['volume_id']) hdp = self._get_service(source_vol) myid = self.arid if arid != myid: LOG.error(_LE("Array mismatch %(myid)s vs %(arid)s"), {'myid': myid, 'arid': arid}) msg = 'Array id mismatch in delete snapshot' raise exception.VolumeBackendAPIException(data=msg) self.bend.delete_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, lun) LOG.debug("LUN %s is deleted.", lun) return def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh', run update the stats first.""" if refresh: self.driver_stats = self._get_stats() return self.driver_stats def get_pool(self, volume): if not volume['volume_type']: return 'default' else: metadata = {} type_id = volume['volume_type_id'] if type_id is not None: metadata = volume_types.get_volume_type_extra_specs(type_id) if not metadata.get('service_label'): return 'default' else: if metadata['service_label'] not in \ self.config['services'].keys(): return 'default' else: pass return metadata['service_label'] def _check_pool_and_fs(self, volume, fs_label): """Validation of the pool and filesystem. Checks if the file system for the volume-type chosen matches the one passed in the volume reference. Also, checks if the pool for the volume type matches the pool for the host passed. :param volume: Reference to the volume. :param fs_label: Label of the file system. """ pool_from_vol_type = self.get_pool(volume) pool_from_host = utils.extract_host(volume['host'], level='pool') if self.config['services'][pool_from_vol_type]['hdp'] != fs_label: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the file system " "passed in the volume reference."), {'File System passed': fs_label, 'File System for volume type': self.config['services'][pool_from_vol_type]['hdp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if pool_from_host != pool_from_vol_type: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the pool of " "the host."), {'Pool of the volume type': pool_from_vol_type, 'Pool of the host': pool_from_host}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) def _get_info_from_vol_ref(self, vol_ref): """Gets information from the volume reference. Returns the information (File system and volume name) taken from the volume reference. :param vol_ref: existing volume to take under management """ vol_info = vol_ref.strip().split('/') if len(vol_info) == 2 and '' not in vol_info: fs_label = vol_info[0] vol_name = vol_info[1] return fs_label, vol_name else: msg = (_("The reference to the volume in the backend should have " "the format file_system/volume_name (volume_name cannot " "contain '/')")) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=msg) def manage_existing_get_size(self, volume, existing_vol_ref): """Gets the size to manage_existing. Returns the size of volume to be managed by manage_existing. :param volume: cinder volume to manage :param existing_vol_ref: existing volume to take under management """ # Check that the reference is valid. if 'source-name' not in existing_vol_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_vol_ref, reason=reason) ref_name = existing_vol_ref['source-name'] fs_label, vol_name = self._get_info_from_vol_ref(ref_name) LOG.debug("File System: %(fs_label)s " "Volume name: %(vol_name)s.", {'fs_label': fs_label, 'vol_name': vol_name}) vol_name = "'{}'".format(vol_name) lu_info = self.bend.get_existing_lu_info(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], fs_label, vol_name) if fs_label in lu_info: aux = lu_info.split('\n')[3] size = aux.split(':')[1] size_unit = size.split(' ')[2] if size_unit == 'TB': return int(size.split(' ')[1]) * units.k else: return int(size.split(' ')[1]) else: raise exception.ManageExistingInvalidReference( existing_ref=existing_vol_ref, reason=_('Volume not found on configured storage backend. ' 'If your volume name contains "/", please rename it ' 'and try to manage again.')) def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is a File System and some volume_name; e.g., openstack/vol_to_manage :param volume: cinder volume to manage :param existing_vol_ref: driver-specific information used to identify a volume """ ref_name = existing_vol_ref['source-name'] fs_label, vol_name = self._get_info_from_vol_ref(ref_name) LOG.debug("Asked to manage ISCSI volume %(vol)s, with vol " "ref %(ref)s.", {'vol': volume['id'], 'ref': existing_vol_ref['source-name']}) self._check_pool_and_fs(volume, fs_label) vol_name = "'{}'".format(vol_name) self.bend.rename_existing_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], fs_label, volume['name'], vol_name) LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."), {'name': volume['name']}) lun = self.arid + '.' + volume['name'] return {'provider_location': lun} def unmanage(self, volume): """Unmanages a volume from cinder. Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: cinder volume to unmanage """ svc = self._get_service(volume) new_name = 'unmanage-' + volume['name'] vol_path = svc + '/' + volume['name'] self.bend.rename_existing_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], svc, new_name, volume['name']) LOG.info(_LI("Cinder ISCSI volume with current path %(path)s is " "no longer being managed. The new name is %(unm)s."), {'path': vol_path, 'unm': new_name}) cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_fc.py0000664000567000056710000004756712701406250023750 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fibre channel Cinder volume driver for Hitachi storage. """ import os import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _LI, _LW from cinder import utils import cinder.volume.driver from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) volume_opts = [ cfg.BoolOpt('hitachi_zoning_request', default=False, help='Request for FC Zone creating HostGroup'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): VERSION = common.VERSION def __init__(self, *args, **kwargs): os.environ['LANG'] = 'C' super(HBSDFCDriver, self).__init__(*args, **kwargs) self.db = kwargs.get('db') self.common = None self.configuration.append_config_values(common.volume_opts) self._stats = {} self.context = None self.max_hostgroups = None self.pair_hostgroups = [] self.pair_hostnum = 0 self.do_setup_status = threading.Event() def _check_param(self): self.configuration.append_config_values(volume_opts) for opt in volume_opts: getattr(self.configuration, opt.name) def check_param(self): try: self.common.check_param() self._check_param() except exception.HBSDError: raise except Exception as ex: msg = basic_lib.output_err(601, param=six.text_type(ex)) raise exception.HBSDError(message=msg) def output_param_to_log(self): lock = basic_lib.get_process_lock(self.common.system_lock_file) with lock: self.common.output_param_to_log('FC') for opt in volume_opts: if not opt.secret: value = getattr(self.configuration, opt.name) LOG.info(_LI('\t%(name)-35s : %(value)s'), {'name': opt.name, 'value': value}) self.common.command.output_param_to_log(self.configuration) def _add_wwn(self, hgs, port, gid, wwns): for wwn in wwns: wwn = six.text_type(wwn) self.common.command.comm_add_hbawwn(port, gid, wwn) detected = self.common.command.is_detected(port, wwn) hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn, 'detected': detected}) LOG.debug('Create host group for %s', hgs) def _add_lun(self, hostgroups, ldev): if hostgroups is self.pair_hostgroups: is_once = True else: is_once = False self.common.add_lun('auhgmap', hostgroups, ldev, is_once) def _delete_lun(self, hostgroups, ldev): try: self.common.command.comm_delete_lun(hostgroups, ldev) except exception.HBSDNotFound: LOG.warning(basic_lib.set_msg(301, ldev=ldev)) def _get_hgname_gid(self, port, host_grp_name): return self.common.command.get_hgname_gid(port, host_grp_name) def _get_unused_gid(self, port): group_range = self.configuration.hitachi_group_range if not group_range: group_range = basic_lib.DEFAULT_GROUP_RANGE return self.common.command.get_unused_gid(group_range, port) def _get_hostgroup_info(self, hgs, wwns, login=True): target_ports = self.configuration.hitachi_target_ports return self.common.command.comm_get_hostgroup_info( hgs, wwns, target_ports, login=login) def _fill_group(self, hgs, port, host_grp_name, wwns): added_hostgroup = False LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s ' 'name: %(name)s wwns: %(wwns)s)', {'hgs': hgs, 'port': port, 'name': host_grp_name, 'wwns': wwns}) gid = self._get_hgname_gid(port, host_grp_name) if gid is None: for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: try: gid = self._get_unused_gid(port) self._add_hostgroup(port, gid, host_grp_name) added_hostgroup = True except exception.HBSDNotFound: gid = None LOG.warning(basic_lib.set_msg(312, resource='GID')) continue else: LOG.debug('Completed to add host target' '(port: %(port)s gid: %(gid)d)', {'port': port, 'gid': gid}) break else: msg = basic_lib.output_err(641) raise exception.HBSDError(message=msg) try: if wwns: self._add_wwn(hgs, port, gid, wwns) else: hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None, 'detected': True}) except Exception: with excutils.save_and_reraise_exception(): if added_hostgroup: self._delete_hostgroup(port, gid, host_grp_name) def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports): target_ports = self.configuration.hitachi_target_ports group_request = self.configuration.hitachi_group_request wwns = [] for wwn in master_wwns: wwns.append(wwn.lower()) if target_ports and group_request: host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) for port in security_ports: wwns_copy = wwns[:] for hostgroup in hgs: if (hostgroup['port'] == port and hostgroup['initiator_wwn'].lower() in wwns_copy): wwns_copy.remove(hostgroup['initiator_wwn'].lower()) if wwns_copy: try: self._fill_group(hgs, port, host_grp_name, wwns_copy) except Exception as ex: LOG.warning(_LW('Failed to add host group: %s'), ex) LOG.warning(basic_lib.set_msg( 308, port=port, name=host_grp_name)) if not hgs: raise exception.HBSDError(message=basic_lib.output_err(649)) def add_hostgroup_pair(self, pair_hostgroups): if self.configuration.hitachi_unit_name: return properties = utils.brick_get_connector_properties() if 'wwpns' not in properties: msg = basic_lib.output_err(650, resource='HBA') raise exception.HBSDError(message=msg) hostgroups = [] self._get_hostgroup_info(hostgroups, properties['wwpns'], login=False) host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX, self.pair_hostnum) for hostgroup in hostgroups: gid = self._get_hgname_gid(hostgroup['port'], host_grp_name) # When 'gid' is 0, it should be true. # So, it cannot remove 'is not None'. if gid is not None: pair_hostgroups.append({'port': hostgroup['port'], 'gid': gid, 'initiator_wwn': None, 'detected': True}) break if not pair_hostgroups: for hostgroup in hostgroups: pair_port = hostgroup['port'] try: self._fill_group(pair_hostgroups, pair_port, host_grp_name, None) except Exception: if hostgroup is hostgroups[-1]: raise else: break def add_hostgroup(self): properties = utils.brick_get_connector_properties() if 'wwpns' not in properties: msg = basic_lib.output_err(650, resource='HBA') raise exception.HBSDError(message=msg) LOG.debug("wwpns: %s", properties['wwpns']) hostgroups = [] security_ports = self._get_hostgroup_info( hostgroups, properties['wwpns'], login=False) self.add_hostgroup_master(hostgroups, properties['wwpns'], properties['ip'], security_ports) self.add_hostgroup_pair(self.pair_hostgroups) def _get_target_wwn(self, port): target_wwns = self.common.command.comm_set_target_wwns( self.configuration.hitachi_target_ports) return target_wwns[port] def _add_hostgroup(self, port, gid, host_grp_name): self.common.command.comm_add_hostgrp(port, gid, host_grp_name) def _delete_hostgroup(self, port, gid, host_grp_name): try: self.common.command.comm_del_hostgrp(port, gid, host_grp_name) except Exception: with excutils.save_and_reraise_exception(): LOG.warning(basic_lib.set_msg( 306, port=port, gid=gid, name=host_grp_name)) def _check_volume_mapping(self, hostgroup): port = hostgroup['port'] gid = hostgroup['gid'] if self.common.command.get_hostgroup_luns(port, gid): return True else: return False def _build_initiator_target_map(self, hostgroups, terminate=False): target_wwns = [] init_targ_map = {} target_ports = self.configuration.hitachi_target_ports zoning_request = self.configuration.hitachi_zoning_request for hostgroup in hostgroups: target_wwn = self._get_target_wwn(hostgroup['port']) if target_wwn not in target_wwns: target_wwns.append(target_wwn) if target_ports and zoning_request: if terminate and self._check_volume_mapping(hostgroup): continue initiator_wwn = hostgroup['initiator_wwn'] if initiator_wwn not in init_targ_map: init_targ_map[initiator_wwn] = [] init_targ_map[initiator_wwn].append(target_wwn) return target_wwns, init_targ_map def _get_properties(self, volume, hostgroups, terminate=False): properties = {} target_wwns, init_targ_map = self._build_initiator_target_map( hostgroups, terminate) properties['target_wwn'] = target_wwns if init_targ_map: properties['initiator_target_map'] = init_targ_map if not terminate: properties['target_lun'] = hostgroups[0]['lun'] return properties def do_setup(self, context): self.context = context self.common = common.HBSDCommon(self.configuration, self, context, self.db) self.check_param() self.common.create_lock_file() self.common.command.connect_storage() self.max_hostgroups = self.common.command.get_max_hostgroups() lock = basic_lib.get_process_lock(self.common.service_lock_file) with lock: self.add_hostgroup() self.output_param_to_log() self.do_setup_status.set() def check_for_setup_error(self): pass def extend_volume(self, volume, new_size): self.do_setup_status.wait() self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): if refresh: if self.do_setup_status.isSet(): self.common.output_backend_available_once() _stats = self.common.update_volume_stats("FC") if _stats: self._stats = _stats return self._stats def create_volume(self, volume): self.do_setup_status.wait() metadata = self.common.create_volume(volume) return metadata def delete_volume(self, volume): self.do_setup_status.wait() self.common.delete_volume(volume) def create_snapshot(self, snapshot): self.do_setup_status.wait() metadata = self.common.create_snapshot(snapshot) return metadata def delete_snapshot(self, snapshot): self.do_setup_status.wait() self.common.delete_snapshot(snapshot) def create_cloned_volume(self, volume, src_vref): self.do_setup_status.wait() metadata = self.common.create_cloned_volume(volume, src_vref) return metadata def create_volume_from_snapshot(self, volume, snapshot): self.do_setup_status.wait() metadata = self.common.create_volume_from_snapshot(volume, snapshot) return metadata def _initialize_connection(self, ldev, connector, src_hgs=None): LOG.debug("Call _initialize_connection " "(config_group: %(group)s ldev: %(ldev)d)", {'group': self.configuration.config_group, 'ldev': ldev}) if src_hgs is self.pair_hostgroups: hostgroups = src_hgs else: hostgroups = [] security_ports = self._get_hostgroup_info( hostgroups, connector['wwpns'], login=True) self.add_hostgroup_master(hostgroups, connector['wwpns'], connector['ip'], security_ports) if src_hgs is self.pair_hostgroups: try: self._add_lun(hostgroups, ldev) except exception.HBSDNotFound: LOG.warning(basic_lib.set_msg(311, ldev=ldev)) for i in range(self.max_hostgroups + 1): self.pair_hostnum += 1 pair_hostgroups = [] try: self.add_hostgroup_pair(pair_hostgroups) self.pair_hostgroups.extend(pair_hostgroups) except exception.HBSDNotFound: if i >= self.max_hostgroups: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) else: break self.pair_initialize_connection(ldev) else: self._add_lun(hostgroups, ldev) return hostgroups @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): self.do_setup_status.wait() ldev = self.common.get_ldev(volume) if ldev is None: msg = basic_lib.output_err(619, volume_id=volume['id']) raise exception.HBSDError(message=msg) self.common.add_volinfo(ldev, volume['id']) with self.common.volume_info[ldev]['lock'],\ self.common.volume_info[ldev]['in_use']: hostgroups = self._initialize_connection(ldev, connector) properties = self._get_properties(volume, hostgroups) LOG.debug('Initialize volume_info: %s', self.common.volume_info) LOG.debug('HFCDrv: properties=%s', properties) return { 'driver_volume_type': 'fibre_channel', 'data': properties } def _terminate_connection(self, ldev, connector, src_hgs): LOG.debug("Call _terminate_connection(config_group: %s)", self.configuration.config_group) hostgroups = src_hgs[:] self._delete_lun(hostgroups, ldev) LOG.debug("*** _terminate_ ***") @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): self.do_setup_status.wait() ldev = self.common.get_ldev(volume) if ldev is None: LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) return if 'wwpns' not in connector: msg = basic_lib.output_err(650, resource='HBA') raise exception.HBSDError(message=msg) hostgroups = [] self._get_hostgroup_info(hostgroups, connector['wwpns'], login=False) if not hostgroups: msg = basic_lib.output_err(649) raise exception.HBSDError(message=msg) self.common.add_volinfo(ldev, volume['id']) with self.common.volume_info[ldev]['lock'],\ self.common.volume_info[ldev]['in_use']: self._terminate_connection(ldev, connector, hostgroups) properties = self._get_properties(volume, hostgroups, terminate=True) LOG.debug('Terminate volume_info: %s', self.common.volume_info) return { 'driver_volume_type': 'fibre_channel', 'data': properties } def pair_initialize_connection(self, ldev): if self.configuration.hitachi_unit_name: return self._initialize_connection(ldev, None, self.pair_hostgroups) def pair_terminate_connection(self, ldev): if self.configuration.hitachi_unit_name: return self._terminate_connection(ldev, None, self.pair_hostgroups) def discard_zero_page(self, volume): self.common.command.discard_zero_page(self.common.get_ldev(volume)) def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def copy_image_to_volume(self, context, volume, image_service, image_id): self.do_setup_status.wait() super(HBSDFCDriver, self).copy_image_to_volume(context, volume, image_service, image_id) self.discard_zero_page(volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): self.do_setup_status.wait() if volume['volume_attachment']: desc = 'volume %s' % volume['id'] msg = basic_lib.output_err(660, desc=desc) raise exception.HBSDError(message=msg) super(HBSDFCDriver, self).copy_volume_to_image(context, volume, image_service, image_meta) def before_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions before copyvolume data. This method will be called before _copy_volume_data during volume migration """ self.do_setup_status.wait() def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions after copyvolume data. This method will be called after _copy_volume_data during volume migration """ self.discard_zero_page(dest_vol) def restore_backup(self, context, backup, volume, backup_service): self.do_setup_status.wait() super(HBSDFCDriver, self).restore_backup(context, backup, volume, backup_service) self.discard_zero_page(volume) def manage_existing(self, volume, existing_ref): return self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): self.do_setup_status.wait() return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): self.do_setup_status.wait() self.common.unmanage(volume) cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_iscsi.py0000664000567000056710000004003612701406250024452 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iSCSI Cinder volume driver for Hitachi storage. """ import os import threading from oslo_config import cfg from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _LE, _LI from cinder import utils import cinder.volume.driver from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib from cinder.volume.drivers.hitachi import hbsd_common as common LOG = logging.getLogger(__name__) CHAP_METHOD = ('None', 'CHAP None', 'CHAP') volume_opts = [ cfg.BoolOpt('hitachi_add_chap_user', default=False, help='Add CHAP user'), cfg.StrOpt('hitachi_auth_method', help='iSCSI authentication method'), cfg.StrOpt('hitachi_auth_user', default='%sCHAP-user' % basic_lib.NAME_PREFIX, help='iSCSI authentication username'), cfg.StrOpt('hitachi_auth_password', default='%sCHAP-password' % basic_lib.NAME_PREFIX, help='iSCSI authentication password', secret=True), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): VERSION = common.VERSION def __init__(self, *args, **kwargs): os.environ['LANG'] = 'C' super(HBSDISCSIDriver, self).__init__(*args, **kwargs) self.db = kwargs.get('db') self.common = None self.configuration.append_config_values(common.volume_opts) self._stats = {} self.context = None self.do_setup_status = threading.Event() def _check_param(self): self.configuration.append_config_values(volume_opts) if (self.configuration.hitachi_auth_method and self.configuration.hitachi_auth_method not in CHAP_METHOD): raise exception.HBSDError( message=basic_lib.output_err(601, param='hitachi_auth_method')) if self.configuration.hitachi_auth_method == 'None': self.configuration.hitachi_auth_method = None for opt in volume_opts: getattr(self.configuration, opt.name) def check_param(self): try: self.common.check_param() self._check_param() except exception.HBSDError: raise except Exception as ex: raise exception.HBSDError( message=basic_lib.output_err(601, param=six.text_type(ex))) def output_param_to_log(self): lock = basic_lib.get_process_lock(self.common.system_lock_file) with lock: self.common.output_param_to_log('iSCSI') for opt in volume_opts: if not opt.secret: value = getattr(self.configuration, opt.name) LOG.info(_LI('\t%(name)-35s : %(value)s'), {'name': opt.name, 'value': value}) def _delete_lun_iscsi(self, hostgroups, ldev): try: self.common.command.comm_delete_lun_iscsi(hostgroups, ldev) except exception.HBSDNotFound: LOG.warning(basic_lib.set_msg(301, ldev=ldev)) def _add_target(self, hostgroups, ldev): self.common.add_lun('autargetmap', hostgroups, ldev) def _add_initiator(self, hgs, port, gid, host_iqn): self.common.command.comm_add_initiator(port, gid, host_iqn) hgs.append({'port': port, 'gid': int(gid), 'detected': True}) LOG.debug("Create iSCSI target for %s", hgs) def _get_unused_gid_iscsi(self, port): group_range = self.configuration.hitachi_group_range if not group_range: group_range = basic_lib.DEFAULT_GROUP_RANGE return self.common.command.get_unused_gid_iscsi(group_range, port) def _delete_iscsi_target(self, port, target_no, target_alias): ret, _stdout, _stderr = self.common.command.delete_iscsi_target( port, target_no, target_alias) if ret: LOG.warning(basic_lib.set_msg( 307, port=port, tno=target_no, alias=target_alias)) def _delete_chap_user(self, port): ret, _stdout, _stderr = self.common.command.delete_chap_user(port) if ret: LOG.warning(basic_lib.set_msg( 303, user=self.configuration.hitachi_auth_user)) def _get_hostgroup_info_iscsi(self, hgs, host_iqn): return self.common.command.comm_get_hostgroup_info_iscsi( hgs, host_iqn, self.configuration.hitachi_target_ports) def _discovery_iscsi_target(self, hostgroups): for hostgroup in hostgroups: ip_addr, ip_port = self.common.command.comm_get_iscsi_ip( hostgroup['port']) target_iqn = self.common.command.comm_get_target_iqn( hostgroup['port'], hostgroup['gid']) hostgroup['ip_addr'] = ip_addr hostgroup['ip_port'] = ip_port hostgroup['target_iqn'] = target_iqn LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s", {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn}) def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn): for port in ports: added_hostgroup = False added_user = False LOG.debug('Create target (hgs: %(hgs)s port: %(port)s ' 'target_iqn: %(tiqn)s target_alias: %(alias)s ' 'add_iqn: %(aiqn)s)', {'hgs': hgs, 'port': port, 'tiqn': target_iqn, 'alias': target_alias, 'aiqn': add_iqn}) gid = self.common.command.get_gid_from_targetiqn( target_iqn, target_alias, port) if gid is None: for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: gid = None try: gid = self._get_unused_gid_iscsi(port) self.common.command.comm_add_hostgrp_iscsi( port, gid, target_alias, target_iqn) added_hostgroup = True except exception.HBSDNotFound: LOG.warning(basic_lib.set_msg(312, resource='GID')) continue except Exception as ex: LOG.warning(basic_lib.set_msg( 309, port=port, alias=target_alias, reason=ex)) break else: LOG.debug('Completed to add target' '(port: %(port)s gid: %(gid)d)', {'port': port, 'gid': gid}) break if gid is None: LOG.error(_LE('Failed to add target(port: %s)'), port) continue try: if added_hostgroup: if self.configuration.hitachi_auth_method: added_user = self.common.command.set_chap_authention( port, gid) self.common.command.comm_set_hostgrp_reportportal( port, target_alias) self._add_initiator(hgs, port, gid, add_iqn) except Exception as ex: LOG.warning(basic_lib.set_msg( 316, port=port, reason=ex)) if added_hostgroup: if added_user: self._delete_chap_user(port) self._delete_iscsi_target(port, gid, target_alias) def add_hostgroup_core(self, hgs, ports, target_iqn, target_alias, add_iqn): if ports: self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn) def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports): target_ports = self.configuration.hitachi_target_ports group_request = self.configuration.hitachi_group_request target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) if target_ports and group_request: target_iqn = '%s.target' % master_iqn diff_ports = [] for port in security_ports: for hostgroup in hgs: if hostgroup['port'] == port: break else: diff_ports.append(port) self.add_hostgroup_core(hgs, diff_ports, target_iqn, target_alias, master_iqn) if not hgs: raise exception.HBSDError(message=basic_lib.output_err(649)) def add_hostgroup(self): properties = utils.brick_get_connector_properties() if 'initiator' not in properties: raise exception.HBSDError( message=basic_lib.output_err(650, resource='HBA')) LOG.debug("initiator: %s", properties['initiator']) hostgroups = [] security_ports = self._get_hostgroup_info_iscsi( hostgroups, properties['initiator']) self.add_hostgroup_master(hostgroups, properties['initiator'], properties['ip'], security_ports) def _get_properties(self, volume, hostgroups): conf = self.configuration properties = {} self._discovery_iscsi_target(hostgroups) hostgroup = hostgroups[0] properties['target_discovered'] = True properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'], hostgroup['ip_port']) properties['target_iqn'] = hostgroup['target_iqn'] properties['target_lun'] = hostgroup['lun'] if conf.hitachi_auth_method: properties['auth_method'] = 'CHAP' properties['auth_username'] = conf.hitachi_auth_user properties['auth_password'] = conf.hitachi_auth_password return properties def do_setup(self, context): self.context = context self.common = common.HBSDCommon(self.configuration, self, context, self.db) self.check_param() self.common.create_lock_file() self.common.command.connect_storage() lock = basic_lib.get_process_lock(self.common.service_lock_file) with lock: self.add_hostgroup() self.output_param_to_log() self.do_setup_status.set() def check_for_setup_error(self): pass def extend_volume(self, volume, new_size): self.do_setup_status.wait() self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): if refresh: if self.do_setup_status.isSet(): self.common.output_backend_available_once() _stats = self.common.update_volume_stats("iSCSI") if _stats: self._stats = _stats return self._stats def create_volume(self, volume): self.do_setup_status.wait() metadata = self.common.create_volume(volume) return metadata def delete_volume(self, volume): self.do_setup_status.wait() self.common.delete_volume(volume) def create_snapshot(self, snapshot): self.do_setup_status.wait() metadata = self.common.create_snapshot(snapshot) return metadata def delete_snapshot(self, snapshot): self.do_setup_status.wait() self.common.delete_snapshot(snapshot) def create_cloned_volume(self, volume, src_vref): self.do_setup_status.wait() metadata = self.common.create_cloned_volume(volume, src_vref) return metadata def create_volume_from_snapshot(self, volume, snapshot): self.do_setup_status.wait() metadata = self.common.create_volume_from_snapshot(volume, snapshot) return metadata def _initialize_connection(self, ldev, connector, src_hgs=None): LOG.debug("Call _initialize_connection " "(config_group: %(group)s ldev: %(ldev)d)", {'group': self.configuration.config_group, 'ldev': ldev}) if src_hgs: hostgroups = src_hgs[:] else: hostgroups = [] security_ports = self._get_hostgroup_info_iscsi( hostgroups, connector['initiator']) self.add_hostgroup_master(hostgroups, connector['initiator'], connector['ip'], security_ports) self._add_target(hostgroups, ldev) return hostgroups def initialize_connection(self, volume, connector): self.do_setup_status.wait() ldev = self.common.get_ldev(volume) if ldev is None: raise exception.HBSDError( message=basic_lib.output_err(619, volume_id=volume['id'])) self.common.add_volinfo(ldev, volume['id']) with self.common.volume_info[ldev]['lock'],\ self.common.volume_info[ldev]['in_use']: hostgroups = self._initialize_connection(ldev, connector) protocol = 'iscsi' properties = self._get_properties(volume, hostgroups) LOG.debug('Initialize volume_info: %s', self.common.volume_info) LOG.debug('HFCDrv: properties=%s', properties) return { 'driver_volume_type': protocol, 'data': properties } def _terminate_connection(self, ldev, connector, src_hgs): LOG.debug("Call _terminate_connection(config_group: %s)", self.configuration.config_group) hostgroups = src_hgs[:] self._delete_lun_iscsi(hostgroups, ldev) LOG.debug("*** _terminate_ ***") def terminate_connection(self, volume, connector, **kwargs): self.do_setup_status.wait() ldev = self.common.get_ldev(volume) if ldev is None: LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) return if 'initiator' not in connector: raise exception.HBSDError( message=basic_lib.output_err(650, resource='HBA')) hostgroups = [] self._get_hostgroup_info_iscsi(hostgroups, connector['initiator']) if not hostgroups: raise exception.HBSDError(message=basic_lib.output_err(649)) self.common.add_volinfo(ldev, volume['id']) with self.common.volume_info[ldev]['lock'],\ self.common.volume_info[ldev]['in_use']: self._terminate_connection(ldev, connector, hostgroups) def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def pair_initialize_connection(self, unused_ldev): pass def pair_terminate_connection(self, unused_ldev): pass def copy_volume_to_image(self, context, volume, image_service, image_meta): self.do_setup_status.wait() if volume['volume_attachment']: desc = 'volume %s' % volume['id'] raise exception.HBSDError( message=basic_lib.output_err(660, desc=desc)) super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume, image_service, image_meta) def manage_existing(self, volume, existing_ref): return self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): self.do_setup_status.wait() return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): self.do_setup_status.wait() self.common.unmanage(volume) cinder-8.0.0/cinder/volume/drivers/hitachi/hbsd_common.py0000664000567000056710000007647212701406257024654 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common class for Hitachi storage drivers. """ import re import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib from cinder.volume.drivers.hitachi import hbsd_horcm as horcm from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2 from cinder.volume import utils as volume_utils """ Version history: 1.0.0 - Initial driver 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods """ VERSION = '1.1.0' PARAM_RANGE = { 'hitachi_copy_check_interval': {'min': 1, 'max': 600}, 'hitachi_async_copy_check_interval': {'min': 1, 'max': 600}, 'hitachi_copy_speed': {'min': 1, 'max': 15}, } DEFAULT_LDEV_RANGE = [0, 65535] COPY_METHOD = ('FULL', 'THIN') VALID_DP_VOLUME_STATUS = ['available', 'in-use'] VALID_V_VOLUME_STATUS = ['available'] SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system' SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_' STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('hitachi_serial_number', help='Serial number of storage system'), cfg.StrOpt('hitachi_unit_name', help='Name of an array unit'), cfg.IntOpt('hitachi_pool_id', help='Pool ID of storage system'), cfg.IntOpt('hitachi_thin_pool_id', help='Thin pool ID of storage system'), cfg.StrOpt('hitachi_ldev_range', help='Range of logical device of storage system'), cfg.StrOpt('hitachi_default_copy_method', default='FULL', help='Default copy method of storage system'), cfg.IntOpt('hitachi_copy_speed', default=3, help='Copy speed of storage system'), cfg.IntOpt('hitachi_copy_check_interval', default=3, help='Interval to check copy'), cfg.IntOpt('hitachi_async_copy_check_interval', default=10, help='Interval to check copy asynchronously'), cfg.StrOpt('hitachi_target_ports', help='Control port names for HostGroup or iSCSI Target'), cfg.StrOpt('hitachi_group_range', help='Range of group number'), cfg.BoolOpt('hitachi_group_request', default=False, secret=True, help='Request for creating HostGroup or iSCSI Target'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class TryLock(object): def __init__(self): self.lock = threading.RLock() self.desc = None def set_desc(self, description): self.desc = description def __enter__(self): if not self.lock.acquire(False): msg = basic_lib.output_err(660, desc=self.desc) raise exception.HBSDError(message=msg) return self def __exit__(self, exc_type, exc_val, exc_tb): self.lock.release() class HBSDCommon(object): def __init__(self, conf, parent, context, db): self.configuration = conf self.generated_from = parent self.context = context self.db = db self.system_lock_file = SYSTEM_LOCK_FILE self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE, conf.config_group) if conf.hitachi_serial_number: self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, six.text_type( conf.hitachi_serial_number)) elif conf.hitachi_unit_name: self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, six.text_type( conf.hitachi_unit_name)) self.storage_obj_lock = threading.Lock() self.volinfo_lock = threading.Lock() self.volume_info = {} self.output_first = True def get_volume(self, volume_id): return self.db.volume_get(self.context, volume_id) def get_volume_metadata(self, volume_id): return self.db.volume_metadata_get(self.context, volume_id) def get_snapshot_metadata(self, snapshot_id): return self.db.snapshot_metadata_get(self.context, snapshot_id) def _update_volume_metadata(self, volume_id, volume_metadata): self.db.volume_metadata_update(self.context, volume_id, volume_metadata, False) def get_ldev(self, obj): if not obj: return None ldev = obj.get('provider_location') if not ldev or not ldev.isdigit(): return None else: return int(ldev) def get_value(self, obj, name, key): if not obj: return None if obj.get(name): if isinstance(obj[name], dict): return obj[name].get(key) else: for i in obj[name]: if i['key'] == key: return i['value'] return None def get_is_vvol(self, obj, name): return self.get_value(obj, name, 'type') == 'V-VOL' def get_volume_is_vvol(self, volume): return self.get_is_vvol(volume, 'volume_metadata') def get_snapshot_is_vvol(self, snapshot): return self.get_is_vvol(snapshot, 'metadata') def get_copy_method(self, volume): method = self.get_value(volume, 'volume_metadata', 'copy_method') if method: if method not in COPY_METHOD: msg = basic_lib.output_err(602, meta='copy_method') raise exception.HBSDError(message=msg) elif (method == 'THIN' and self.configuration.hitachi_thin_pool_id is None): msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') raise exception.HBSDError(message=msg) else: method = self.configuration.hitachi_default_copy_method return method def _string2int(self, num): if not num: return None if num.isdigit(): return int(num, 10) if not re.match(r'\w\w:\w\w:\w\w', num): return None try: num = int(num.replace(':', ''), 16) except ValueError: return None return num def _range2list(self, conf, param): str = getattr(conf, param) lists = str.split('-') if len(lists) != 2: msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) first_type = None for i in range(len(lists)): if lists[i].isdigit(): lists[i] = int(lists[i], 10) if first_type == 'hex': msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) first_type = 'dig' else: if (first_type == 'dig' or not re.match('\w\w:\w\w:\w\w', lists[i])): msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) try: lists[i] = int(lists[i].replace(':', ''), 16) first_type = 'hex' except Exception: msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) if lists[0] > lists[1]: msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) return lists def output_param_to_log(self, storage_protocol): essential_inherited_param = ['volume_backend_name', 'volume_driver'] conf = self.configuration LOG.info(basic_lib.set_msg(1, config_group=conf.config_group)) version = self.command.get_comm_version() if conf.hitachi_unit_name: prefix = 'HSNM2 version' else: prefix = 'RAID Manager version' LOG.info(_LI('\t%(prefix)-35s : %(version)s'), {'prefix': prefix, 'version': version}) for param in essential_inherited_param: value = conf.safe_get(param) LOG.info(_LI('\t%(param)-35s : %(value)s'), {'param': param, 'value': value}) for opt in volume_opts: if not opt.secret: value = getattr(conf, opt.name) LOG.info(_LI('\t%(name)-35s : %(value)s'), {'name': opt.name, 'value': value}) if storage_protocol == 'iSCSI': value = getattr(conf, 'hitachi_group_request') LOG.info(_LI('\t%(request)-35s : %(value)s'), {'request': 'hitachi_group_request', 'value': value}) def check_param(self): conf = self.configuration if conf.hitachi_unit_name and conf.hitachi_serial_number: msg = basic_lib.output_err(604) raise exception.HBSDError(message=msg) if not conf.hitachi_unit_name and not conf.hitachi_serial_number: msg = basic_lib.output_err(605) raise exception.HBSDError(message=msg) if conf.hitachi_pool_id is None: msg = basic_lib.output_err(601, param='hitachi_pool_id') raise exception.HBSDError(message=msg) for param in PARAM_RANGE.keys(): _value = getattr(conf, param) if (_value and (not PARAM_RANGE[param]['min'] <= _value <= PARAM_RANGE[param]['max'])): msg = basic_lib.output_err(601, param=param) raise exception.HBSDError(message=msg) if conf.hitachi_default_copy_method not in COPY_METHOD: msg = basic_lib.output_err(601, param='hitachi_default_copy_method') raise exception.HBSDError(message=msg) if (conf.hitachi_default_copy_method == 'THIN' and conf.hitachi_thin_pool_id is None): msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') raise exception.HBSDError(message=msg) for param in ('hitachi_ldev_range', 'hitachi_group_range'): if not getattr(conf, param): continue else: _value = self._range2list(conf, param) setattr(conf, param, _value) if conf.hitachi_target_ports: conf.hitachi_target_ports = conf.hitachi_target_ports.split(',') for opt in volume_opts: getattr(conf, opt.name) if conf.hitachi_unit_name: self.command = snm2.HBSDSNM2(conf) else: conf.append_config_values(horcm.volume_opts) self.command = horcm.HBSDHORCM(conf) self.command.check_param() self.pair_flock = self.command.set_pair_flock() self.horcmgr_flock = self.command.set_horcmgr_flock() def create_lock_file(self): basic_lib.create_empty_file(self.system_lock_file) basic_lib.create_empty_file(self.service_lock_file) basic_lib.create_empty_file(self.storage_lock_file) self.command.create_lock_file() def _add_ldev(self, volume_num, capacity, pool_id, is_vvol): self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol) def _get_unused_volume_num(self, ldev_range): return self.command.get_unused_ldev(ldev_range) def add_volinfo(self, ldev, id=None, type='volume'): with self.volinfo_lock: if ldev not in self.volume_info: self.init_volinfo(self.volume_info, ldev) if id: desc = '%s %s' % (type, id) self.volume_info[ldev]['in_use'].set_desc(desc) def delete_pair(self, ldev, all_split=True, is_vvol=None): paired_info = self.command.get_paired_info(ldev) LOG.debug('paired_info: %s', paired_info) pvol = paired_info['pvol'] svols = paired_info['svol'] driver = self.generated_from restart = False svol_list = [] try: if pvol is None: return elif pvol == ldev: for svol in svols[:]: if svol['is_vvol'] or svol['status'] != basic_lib.PSUS: continue self.command.delete_pair(pvol, svol['lun'], False) restart = True driver.pair_terminate_connection(svol['lun']) svols.remove(svol) if all_split and svols: svol_list.append(six.text_type(svols[0]['lun'])) for svol in svols[1:]: svol_list.append(', %d' % svol['lun']) msg = basic_lib.output_err(616, pvol=pvol, svol=''.join(svol_list)) raise exception.HBSDBusy(message=msg) if not svols: driver.pair_terminate_connection(pvol) else: self.add_volinfo(pvol) if not self.volume_info[pvol]['in_use'].lock.acquire(False): desc = self.volume_info[pvol]['in_use'].desc msg = basic_lib.output_err(660, desc=desc) raise exception.HBSDBusy(message=msg) try: paired_info = self.command.get_paired_info(ldev) if paired_info['pvol'] is None: return svol = paired_info['svol'][0] if svol['status'] != basic_lib.PSUS: msg = basic_lib.output_err(616, pvol=pvol, svol=ldev) raise exception.HBSDBusy(message=msg) self.command.delete_pair(pvol, ldev, svol['is_vvol']) if not svol['is_vvol']: restart = True driver.pair_terminate_connection(ldev) paired_info = self.command.get_paired_info(pvol) if paired_info['pvol'] is None: driver.pair_terminate_connection(pvol) finally: self.volume_info[pvol]['in_use'].lock.release() except Exception: with excutils.save_and_reraise_exception(): if restart: try: self.command.restart_pair_horcm() except Exception as e: LOG.warning(_LW('Failed to restart horcm: %s'), e) else: if (all_split or is_vvol) and restart: try: self.command.restart_pair_horcm() except Exception as e: LOG.warning(_LW('Failed to restart horcm: %s'), e) def copy_async_data(self, pvol, svol, is_vvol): path_list = [] driver = self.generated_from try: with self.pair_flock: self.delete_pair(pvol, all_split=False, is_vvol=is_vvol) paired_info = self.command.get_paired_info(pvol) if paired_info['pvol'] is None: driver.pair_initialize_connection(pvol) path_list.append(pvol) driver.pair_initialize_connection(svol) path_list.append(svol) self.command.comm_create_pair(pvol, svol, is_vvol) except Exception: with excutils.save_and_reraise_exception(): for ldev in path_list: try: driver.pair_terminate_connection(ldev) except Exception as ex: LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=ex)) def copy_sync_data(self, src_ldev, dest_ldev, size): src_vol = {'provider_location': six.text_type(src_ldev), 'id': 'src_vol'} dest_vol = {'provider_location': six.text_type(dest_ldev), 'id': 'dest_vol'} properties = utils.brick_get_connector_properties() driver = self.generated_from src_info = None dest_info = None try: dest_info = driver._attach_volume(self.context, dest_vol, properties) src_info = driver._attach_volume(self.context, src_vol, properties) volume_utils.copy_volume(src_info['device']['path'], dest_info['device']['path'], size * 1024, self.configuration.volume_dd_blocksize) finally: if dest_info: driver._detach_volume(self.context, dest_info, dest_vol, properties) if src_info: driver._detach_volume(self.context, src_info, src_vol, properties) self.command.discard_zero_page(dest_ldev) def copy_data(self, pvol, size, p_is_vvol, method): type = 'Normal' is_vvol = method == 'THIN' svol = self._create_volume(size, is_vvol=is_vvol) try: if p_is_vvol: self.copy_sync_data(pvol, svol, size) else: if is_vvol: type = 'V-VOL' self.copy_async_data(pvol, svol, is_vvol) except Exception: with excutils.save_and_reraise_exception(): try: self.delete_ldev(svol, is_vvol) except Exception as ex: LOG.warning(basic_lib.set_msg(313, ldev=svol, reason=ex)) return six.text_type(svol), type def add_lun(self, command, hostgroups, ldev, is_once=False): lock = basic_lib.get_process_lock(self.storage_lock_file) with lock: self.command.comm_add_lun(command, hostgroups, ldev, is_once) def create_ldev(self, size, ldev_range, pool_id, is_vvol): LOG.debug('create start (normal)') for i in basic_lib.DEFAULT_TRY_RANGE: LOG.debug('Try number: %(tries)s / %(max_tries)s', {'tries': i + 1, 'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)}) new_ldev = self._get_unused_volume_num(ldev_range) try: self._add_ldev(new_ldev, size, pool_id, is_vvol) except exception.HBSDNotFound: LOG.warning(basic_lib.set_msg(312, resource='LDEV')) continue else: break else: msg = basic_lib.output_err(636) raise exception.HBSDError(message=msg) LOG.debug('create end (normal: %s)', new_ldev) self.init_volinfo(self.volume_info, new_ldev) return new_ldev def _create_volume(self, size, is_vvol=False): ldev_range = self.configuration.hitachi_ldev_range if not ldev_range: ldev_range = DEFAULT_LDEV_RANGE pool_id = self.configuration.hitachi_pool_id lock = basic_lib.get_process_lock(self.storage_lock_file) with self.storage_obj_lock, lock: ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol) return ldev def create_volume(self, volume): volume_metadata = self.get_volume_metadata(volume['id']) volume_metadata['type'] = 'Normal' size = volume['size'] ldev = self._create_volume(size) volume_metadata['ldev'] = six.text_type(ldev) return {'provider_location': six.text_type(ldev), 'metadata': volume_metadata} def delete_ldev(self, ldev, is_vvol): LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)', {'ldev': ldev, 'vvol': is_vvol}) with self.pair_flock: self.delete_pair(ldev) self.command.comm_delete_ldev(ldev, is_vvol) with self.volinfo_lock: if ldev in self.volume_info: self.volume_info.pop(ldev) LOG.debug('delete_ldev is finished ' '(LDEV: %(ldev)d, is_vvol: %(vvol)s)', {'ldev': ldev, 'vvol': is_vvol}) def delete_volume(self, volume): ldev = self.get_ldev(volume) if ldev is None: LOG.warning(basic_lib.set_msg(304, method='delete_volume', id=volume['id'])) return self.add_volinfo(ldev, volume['id']) if not self.volume_info[ldev]['in_use'].lock.acquire(False): desc = self.volume_info[ldev]['in_use'].desc basic_lib.output_err(660, desc=desc) raise exception.VolumeIsBusy(volume_name=volume['name']) try: is_vvol = self.get_volume_is_vvol(volume) try: self.delete_ldev(ldev, is_vvol) except exception.HBSDNotFound: with self.volinfo_lock: if ldev in self.volume_info: self.volume_info.pop(ldev) LOG.warning(basic_lib.set_msg( 305, type='volume', id=volume['id'])) except exception.HBSDBusy: raise exception.VolumeIsBusy(volume_name=volume['name']) finally: if ldev in self.volume_info: self.volume_info[ldev]['in_use'].lock.release() def check_volume_status(self, volume, is_vvol): if not is_vvol: status = VALID_DP_VOLUME_STATUS else: status = VALID_V_VOLUME_STATUS if volume['status'] not in status: msg = basic_lib.output_err(654, status=volume['status']) raise exception.HBSDError(message=msg) def create_snapshot(self, snapshot): src_ref = self.get_volume(snapshot['volume_id']) pvol = self.get_ldev(src_ref) if pvol is None: msg = basic_lib.output_err(624, type='volume', id=src_ref['id']) raise exception.HBSDError(message=msg) self.add_volinfo(pvol, src_ref['id']) with self.volume_info[pvol]['in_use']: is_vvol = self.get_volume_is_vvol(src_ref) self.check_volume_status(src_ref, is_vvol) size = snapshot['volume_size'] snap_metadata = snapshot.get('metadata') method = None if is_vvol else self.get_copy_method(src_ref) svol, type = self.copy_data(pvol, size, is_vvol, method) if type == 'V-VOL': snap_metadata['type'] = type snap_metadata['ldev'] = svol return {'provider_location': svol, 'metadata': snap_metadata} def delete_snapshot(self, snapshot): ldev = self.get_ldev(snapshot) if ldev is None: LOG.warning(basic_lib.set_msg( 304, method='delete_snapshot', id=snapshot['id'])) return self.add_volinfo(ldev, id=snapshot['id'], type='snapshot') if not self.volume_info[ldev]['in_use'].lock.acquire(False): desc = self.volume_info[ldev]['in_use'].desc basic_lib.output_err(660, desc=desc) raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) try: is_vvol = self.get_snapshot_is_vvol(snapshot) try: self.delete_ldev(ldev, is_vvol) except exception.HBSDNotFound: with self.volinfo_lock: if ldev in self.volume_info: self.volume_info.pop(ldev) LOG.warning(basic_lib.set_msg( 305, type='snapshot', id=snapshot['id'])) except exception.HBSDBusy: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) finally: if ldev in self.volume_info: self.volume_info[ldev]['in_use'].lock.release() def create_cloned_volume(self, volume, src_vref): pvol = self.get_ldev(src_vref) if pvol is None: msg = basic_lib.output_err(624, type='volume', id=src_vref['id']) raise exception.HBSDError(message=msg) self.add_volinfo(pvol, src_vref['id']) with self.volume_info[pvol]['in_use']: is_vvol = self.get_volume_is_vvol(src_vref) self.check_volume_status(self.get_volume(src_vref['id']), is_vvol) size = volume['size'] src_size = src_vref['size'] if size != src_size: msg = basic_lib.output_err(617, type='volume', volume_id=volume['id']) raise exception.HBSDError(message=msg) metadata = self.get_volume_metadata(volume['id']) method = None if is_vvol else self.get_copy_method(volume) svol, type = self.copy_data(pvol, size, is_vvol, method) metadata['type'] = type metadata['volume'] = src_vref['id'] metadata['ldev'] = svol return {'provider_location': svol, 'metadata': metadata} def create_volume_from_snapshot(self, volume, snapshot): pvol = self.get_ldev(snapshot) if pvol is None: msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id']) raise exception.HBSDError(message=msg) self.add_volinfo(pvol, id=snapshot['id'], type='snapshot') with self.volume_info[pvol]['in_use']: is_vvol = self.get_snapshot_is_vvol(snapshot) if snapshot['status'] != 'available': msg = basic_lib.output_err(655, status=snapshot['status']) raise exception.HBSDError(message=msg) size = volume['size'] src_size = snapshot['volume_size'] if size != src_size: msg = basic_lib.output_err(617, type='snapshot', volume_id=volume['id']) raise exception.HBSDError(message=msg) metadata = self.get_volume_metadata(volume['id']) method = None if is_vvol else self.get_copy_method(volume) svol, type = self.copy_data(pvol, size, is_vvol, method) metadata['type'] = type metadata['snapshot'] = snapshot['id'] metadata['ldev'] = svol return {'provider_location': svol, 'metadata': metadata} def _extend_volume(self, ldev, old_size, new_size): with self.pair_flock: self.delete_pair(ldev) self.command.comm_extend_ldev(ldev, old_size, new_size) def extend_volume(self, volume, new_size): pvol = self.get_ldev(volume) self.add_volinfo(pvol, volume['id']) with self.volume_info[pvol]['in_use']: if self.get_volume_is_vvol(volume): msg = basic_lib.output_err(618, volume_id=volume['id']) raise exception.HBSDError(message=msg) self._extend_volume(pvol, volume['size'], new_size) def output_backend_available_once(self): if self.output_first: self.output_first = False LOG.warning(basic_lib.set_msg( 3, config_group=self.configuration.config_group)) def update_volume_stats(self, storage_protocol): data = {} total_gb = None free_gb = None data['volume_backend_name'] = self.configuration.safe_get( 'volume_backend_name') or 'HBSD%s' % storage_protocol data['vendor_name'] = 'Hitachi' data['driver_version'] = VERSION data['storage_protocol'] = storage_protocol try: total_gb, free_gb = self.command.comm_get_dp_pool( self.configuration.hitachi_pool_id) except Exception as ex: LOG.error(_LE('Failed to update volume status: %s'), ex) return None data['total_capacity_gb'] = total_gb data['free_capacity_gb'] = free_gb data['reserved_percentage'] = self.configuration.safe_get( 'reserved_percentage') data['QoS_support'] = False LOG.debug('Updating volume status (%s)', data) return data def init_volinfo(self, vol_info, ldev): vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()} def manage_existing(self, volume, existing_ref): """Manage an existing Hitachi storage volume. existing_ref is a dictionary of the form: For HUS 100 Family, {'ldev': , 'unit_name': } For VSP G1000/VSP/HUS VM, {'ldev': , 'serial_number': } """ ldev = self._string2int(existing_ref.get('ldev')) LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)) return {'provider_location': ldev} def _manage_existing_get_size(self, volume, existing_ref): """Return size of volume for manage_existing.""" ldev = self._string2int(existing_ref.get('ldev')) if ldev is None: msg = basic_lib.output_err(701) raise exception.HBSDError(data=msg) size = self.command.get_ldev_size_in_gigabyte(ldev, existing_ref) metadata = {'type': basic_lib.NORMAL_VOLUME_TYPE, 'ldev': ldev} self._update_volume_metadata(volume['id'], metadata) return size def manage_existing_get_size(self, volume, existing_ref): try: return self._manage_existing_get_size(volume, existing_ref) except exception.HBSDError as ex: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=six.text_type(ex)) def _unmanage(self, volume, ldev): with self.horcmgr_flock: self.delete_pair(ldev) with self.volinfo_lock: if ldev in self.volume_info: self.volume_info.pop(ldev) def unmanage(self, volume): """Remove the specified volume from Cinder management.""" ldev = self.get_ldev(volume) if ldev is None: return self.add_volinfo(ldev, volume['id']) if not self.volume_info[ldev]['in_use'].lock.acquire(False): desc = self.volume_info[ldev]['in_use'].desc basic_lib.output_err(660, desc=desc) raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) is_vvol = self.get_volume_is_vvol(volume) if is_vvol: basic_lib.output_err(706, volume_id=volume['id'], volume_type=basic_lib.NORMAL_VOLUME_TYPE) raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) try: self._unmanage(volume, ldev) except exception.HBSDBusy: raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) else: LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)) finally: if ldev in self.volume_info: self.volume_info[ldev]['in_use'].lock.release() cinder-8.0.0/cinder/volume/drivers/__init__.py0000664000567000056710000000150512701406250022464 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.volume.driver` -- Cinder Drivers ===================================================== .. automodule:: cinder.volume.driver :platform: Unix :synopsis: Module containing all the Cinder drivers. """ cinder-8.0.0/cinder/volume/drivers/hpe/0000775000567000056710000000000012701406543021133 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/hpe/__init__.py0000664000567000056710000000000012701406250023225 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/hpe/hpe_3par_fc.py0000664000567000056710000005604212701406250023660 0ustar jenkinsjenkins00000000000000# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for HPE 3PAR Storage array. This driver requires 3.1.3 firmware on the 3PAR array, using the 4.x version of the hpe3parclient. You will need to install the python hpe3parclient. sudo pip install --upgrade "hpe3parclient>=4.0" Set the following in the cinder.conf file to enable the 3PAR Fibre Channel Driver along with the required flags: volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver """ try: from hpe3parclient import exceptions as hpeexceptions except ImportError: hpeexceptions = None from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class HPE3PARFCDriver(driver.TransferVD, driver.ManageableVD, driver.ExtendVD, driver.SnapshotVD, driver.ManageableSnapshotsVD, driver.MigrateVD, driver.ConsistencyGroupVD, driver.BaseVD): """OpenStack Fibre Channel driver to enable 3PAR storage array. Version history: 1.0 - Initial driver 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, session changes, faster clone, requires 3.1.2 MU2 firmware, copy volume <--> Image. 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored the drivers to use the new APIs. 1.2.1 - Synchronized extend_volume method. 1.2.2 - Added try/finally around client login/logout. 1.2.3 - Added ability to add WWNs to host. 1.2.4 - Added metadata during attach/detach bug #1258033. 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Added initiator-target map for FC Zone Manager 2.0.4 - Added support for managing/unmanaging of volumes 2.0.5 - Only remove FC Zone on last volume detach 2.0.6 - Added support for volume retype 2.0.7 - Only one FC port is used when a single FC path is present. bug #1360001 2.0.8 - Fixing missing login/logout around attach/detach bug #1367429 2.0.9 - Add support for pools with model update 2.0.10 - Migrate without losing type settings bug #1356608 2.0.11 - Removing locks bug #1381190 2.0.12 - Fix queryHost call to specify wwns bug #1398206 2.0.13 - Fix missing host name during attach bug #1398206 2.0.14 - Removed usage of host name cache #1398914 2.0.15 - Added support for updated detach_volume attachment. 2.0.16 - Added encrypted property to initialize_connection #1439917 2.0.17 - Improved VLUN creation and deletion logic. #1469816 2.0.18 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.19 - Adds consistency group support 2.0.20 - Update driver to use ABC metaclasses 2.0.21 - Added update_migrated_volume. bug # 1492023 3.0.0 - Rebranded HP to HPE. 3.0.1 - Remove db access for consistency groups 3.0.2 - Adds v2 managed replication support 3.0.3 - Adds v2 unmanaged replication support 3.0.4 - Adding manage/unmanage snapshot support 3.0.5 - Optimize array ID retrieval 3.0.6 - Update replication to version 2.1 """ VERSION = "3.0.6" def __init__(self, *args, **kwargs): super(HPE3PARFCDriver, self).__init__(*args, **kwargs) self._active_backend_id = kwargs.get('active_backend_id', None) self.configuration.append_config_values(hpecommon.hpe3par_opts) self.configuration.append_config_values(san.san_opts) self.lookup_service = fczm_utils.create_lookup_service() def _init_common(self): return hpecommon.HPE3PARCommon(self.configuration, self._active_backend_id) def _login(self, timeout=None): common = self._init_common() # If replication is enabled and we cannot login, we do not want to # raise an exception so a failover can still be executed. try: common.do_setup(None, timeout=timeout, stats=self._stats) common.client_login() except Exception: if common._replication_enabled: LOG.warning(_LW("The primary array is not reachable at this " "time. Since replication is enabled, " "listing replication targets and failing over " "a volume can still be performed.")) pass else: raise return common def _logout(self, common): # If replication is enabled and we do not have a client ID, we did not # login, but can still failover. There is no need to logout. if common.client is None and common._replication_enabled: return common.client_logout() def _check_flags(self, common): """Sanity check to ensure we have required options set.""" required_flags = ['hpe3par_api_url', 'hpe3par_username', 'hpe3par_password', 'san_ip', 'san_login', 'san_password'] common.check_flags(self.configuration, required_flags) def get_volume_stats(self, refresh=False): common = self._login() try: self._stats = common.get_volume_stats( refresh, self.get_filter_function(), self.get_goodness_function()) self._stats['storage_protocol'] = 'FC' self._stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') self._stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return self._stats finally: self._logout(common) def do_setup(self, context): common = self._init_common() common.do_setup(context) self._check_flags(common) common.check_for_setup_error() def check_for_setup_error(self): """Setup errors are already checked for in do_setup so return pass.""" pass def create_volume(self, volume): common = self._login() try: return common.create_volume(volume) finally: self._logout(common) def create_cloned_volume(self, volume, src_vref): common = self._login() try: return common.create_cloned_volume(volume, src_vref) finally: self._logout(common) def delete_volume(self, volume): common = self._login() try: common.delete_volume(volume) finally: self._logout(common) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. TODO: support using the size from the user. """ common = self._login() try: return common.create_volume_from_snapshot(volume, snapshot) finally: self._logout(common) def create_snapshot(self, snapshot): common = self._login() try: common.create_snapshot(snapshot) finally: self._logout(common) def delete_snapshot(self, snapshot): common = self._login() try: common.delete_snapshot(snapshot) finally: self._logout(common) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'encrypted': False, 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'encrypted': False, 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], } } Steps to export a volume on 3PAR * Create a host on the 3par with the target wwn * Create a VLUN for that HOST with the volume we want to export. """ common = self._login() try: # we have to make sure we have a host host = self._create_host(common, volume, connector) target_wwns, init_targ_map, numPaths = \ self._build_initiator_target_map(common, connector) # check if a VLUN already exists for this host existing_vlun = common.find_existing_vlun(volume, host) vlun = None if existing_vlun is None: # now that we have a host, create the VLUN if self.lookup_service is not None and numPaths == 1: nsp = None active_fc_port_list = common.get_active_fc_target_ports() for port in active_fc_port_list: if port['portWWN'].lower() == target_wwns[0].lower(): nsp = port['nsp'] break vlun = common.create_vlun(volume, host, nsp) else: vlun = common.create_vlun(volume, host) else: vlun = existing_vlun info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': vlun['lun'], 'target_discovered': True, 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} encryption_key_id = volume.get('encryption_key_id', None) info['data']['encrypted'] = encryption_key_id is not None return info finally: self._logout(common) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" common = self._login() try: hostname = common._safe_hostname(connector['host']) common.terminate_connection(volume, hostname, wwn=connector['wwpns']) info = {'driver_volume_type': 'fibre_channel', 'data': {}} try: common.client.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: # No more exports for this host. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map")) target_wwns, init_targ_map, _numPaths = \ self._build_initiator_target_map(common, connector) info['data'] = {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map} return info finally: self._logout(common) def _build_initiator_target_map(self, common, connector): """Build the target_wwns and the initiator target map.""" fc_ports = common.get_active_fc_target_ports() all_target_wwns = [] target_wwns = [] init_targ_map = {} numPaths = 0 for port in fc_ports: all_target_wwns.append(port['portWWN']) if self.lookup_service is not None: # use FC san lookup to determine which NSPs to use # for the new VLUN. dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) for _target in init_targ_map[initiator]: numPaths += 1 target_wwns = list(set(target_wwns)) else: initiator_wwns = connector['wwpns'] target_wwns = all_target_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map, numPaths def _create_3par_fibrechan_host(self, common, hostname, wwns, domain, persona_id): """Create a 3PAR host. Create a 3PAR host, if there is already a host on the 3par using the same wwn but with a different hostname, return the hostname used by 3PAR. """ # first search for an existing host host_found = None hosts = common.client.queryHost(wwns=wwns) if hosts and hosts['members'] and 'name' in hosts['members'][0]: host_found = hosts['members'][0]['name'] if host_found is not None: return host_found else: persona_id = int(persona_id) common.client.createHost(hostname, FCWwns=wwns, optional={'domain': domain, 'persona': persona_id}) return hostname def _modify_3par_fibrechan_host(self, common, hostname, wwn): mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, 'FCWWNs': wwn} common.client.modifyHost(hostname, mod_request) def _create_host(self, common, volume, connector): """Creates or modifies existing 3PAR host.""" host = None hostname = common._safe_hostname(connector['host']) cpg = common.get_cpg(volume, allowSnap=True) domain = common.get_domain(cpg) try: host = common._get_3par_host(hostname) except hpeexceptions.HTTPNotFound: # get persona from the volume type extra specs persona_id = common.get_persona_type(volume) # host doesn't exist, we have to create it hostname = self._create_3par_fibrechan_host(common, hostname, connector['wwpns'], domain, persona_id) host = common._get_3par_host(hostname) return self._add_new_wwn_to_host(common, host, connector['wwpns']) def _add_new_wwn_to_host(self, common, host, wwns): """Add wwns to a host if one or more don't exist. Identify if argument wwns contains any world wide names not configured in the 3PAR host path. If any are found, add them to the 3PAR host. """ # get the currently configured wwns # from the host's FC paths host_wwns = [] if 'FCPaths' in host: for path in host['FCPaths']: wwn = path.get('wwn', None) if wwn is not None: host_wwns.append(wwn.lower()) # lower case all wwns in the compare list compare_wwns = [x.lower() for x in wwns] # calculate wwns in compare list, but not in host_wwns list new_wwns = list(set(compare_wwns).difference(host_wwns)) # if any wwns found that were not in host list, # add them to the host if (len(new_wwns) > 0): self._modify_3par_fibrechan_host(common, host['name'], new_wwns) host = common._get_3par_host(host['name']) return host def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def extend_volume(self, volume, new_size): common = self._login() try: common.extend_volume(volume, new_size) finally: self._logout(common) def create_consistencygroup(self, context, group): common = self._login() try: return common.create_consistencygroup(context, group) finally: self._logout(common) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): common = self._login() try: return common.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) finally: self._logout(common) def delete_consistencygroup(self, context, group, volumes): common = self._login() try: return common.delete_consistencygroup(context, group, volumes) finally: self._logout(common) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() try: return common.update_consistencygroup(context, group, add_volumes, remove_volumes) finally: self._logout(common) def create_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: return common.create_cgsnapshot(context, cgsnapshot, snapshots) finally: self._logout(common) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: return common.delete_cgsnapshot(context, cgsnapshot, snapshots) finally: self._logout(common) def manage_existing(self, volume, existing_ref): common = self._login() try: return common.manage_existing(volume, existing_ref) finally: self._logout(common) def manage_existing_snapshot(self, snapshot, existing_ref): common = self._login() try: return common.manage_existing_snapshot(snapshot, existing_ref) finally: self._logout(common) def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: return common.manage_existing_get_size(volume, existing_ref) finally: self._logout(common) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): common = self._login() try: return common.manage_existing_snapshot_get_size(snapshot, existing_ref) finally: self._logout(common) def unmanage(self, volume): common = self._login() try: common.unmanage(volume) finally: self._logout(common) def unmanage_snapshot(self, snapshot): common = self._login() try: common.unmanage_snapshot(snapshot) finally: self._logout(common) def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): common = self._login() try: common.attach_volume(volume, instance_uuid) finally: self._logout(common) def detach_volume(self, context, volume, attachment=None): common = self._login() try: common.detach_volume(volume, attachment) finally: self._logout(common) def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() try: return common.retype(volume, new_type, diff, host) finally: self._logout(common) def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] if protocol != 'FC': LOG.debug("3PAR FC driver cannot migrate in-use volume " "to a host with storage_protocol=%s.", protocol) return False, None common = self._login() try: return common.migrate_volume(volume, host) finally: self._logout(common) def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" common = self._login() try: return common.update_migrated_volume(context, volume, new_volume, original_volume_status) finally: self._logout(common) def get_pool(self, volume): common = self._login() try: return common.get_cpg(volume) except hpeexceptions.HTTPNotFound: reason = (_("Volume %s doesn't exist on array.") % volume) LOG.error(reason) raise exception.InvalidVolume(reason) finally: self._logout(common) def failover_host(self, context, volumes, secondary_backend_id): """Force failover to a secondary replication target.""" common = self._login(timeout=30) try: # Update the active_backend_id in the driver and return it. active_backend_id, volume_updates = common.failover_host( context, volumes, secondary_backend_id) self._active_backend_id = active_backend_id return active_backend_id, volume_updates finally: self._logout(common) cinder-8.0.0/cinder/volume/drivers/hpe/hpe_xp_opts.py0000664000567000056710000000764512701406250024044 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE XP driver options.""" from oslo_config import cfg FC_VOLUME_OPTS = [ cfg.BoolOpt( 'hpexp_zoning_request', default=False, help='Request for FC Zone creating host group', deprecated_name='hpxp_zoning_request'), ] COMMON_VOLUME_OPTS = [ cfg.StrOpt( 'hpexp_storage_cli', help='Type of storage command line interface', deprecated_name='hpxp_storage_cli'), cfg.StrOpt( 'hpexp_storage_id', help='ID of storage system', deprecated_name='hpxp_storage_id'), cfg.StrOpt( 'hpexp_pool', help='Pool of storage system', deprecated_name='hpxp_pool'), cfg.StrOpt( 'hpexp_thin_pool', help='Thin pool of storage system', deprecated_name='hpxp_thin_pool'), cfg.StrOpt( 'hpexp_ldev_range', help='Logical device range of storage system', deprecated_name='hpxp_ldev_range'), cfg.StrOpt( 'hpexp_default_copy_method', default='FULL', help='Default copy method of storage system. ' 'There are two valid values: "FULL" specifies that a full copy; ' '"THIN" specifies that a thin copy. Default value is "FULL"', deprecated_name='hpxp_default_copy_method'), cfg.IntOpt( 'hpexp_copy_speed', default=3, help='Copy speed of storage system', deprecated_name='hpxp_copy_speed'), cfg.IntOpt( 'hpexp_copy_check_interval', default=3, help='Interval to check copy', deprecated_name='hpxp_copy_check_interval'), cfg.IntOpt( 'hpexp_async_copy_check_interval', default=10, help='Interval to check copy asynchronously', deprecated_name='hpxp_async_copy_check_interval'), cfg.ListOpt( 'hpexp_target_ports', help='Target port names for host group or iSCSI target', deprecated_name='hpxp_target_ports'), cfg.ListOpt( 'hpexp_compute_target_ports', help=( 'Target port names of compute node ' 'for host group or iSCSI target'), deprecated_name='hpxp_compute_target_ports'), cfg.BoolOpt( 'hpexp_group_request', default=False, help='Request for creating host group or iSCSI target', deprecated_name='hpxp_group_request'), ] HORCM_VOLUME_OPTS = [ cfg.ListOpt( 'hpexp_horcm_numbers', default=["200", "201"], help='Instance numbers for HORCM', deprecated_name='hpxp_horcm_numbers'), cfg.StrOpt( 'hpexp_horcm_user', help='Username of storage system for HORCM', deprecated_name='hpxp_horcm_user'), cfg.BoolOpt( 'hpexp_horcm_add_conf', default=True, help='Add to HORCM configuration', deprecated_name='hpxp_horcm_add_conf'), cfg.StrOpt( 'hpexp_horcm_resource_name', default='meta_resource', help='Resource group name of storage system for HORCM', deprecated_name='hpxp_horcm_resource_name'), cfg.BoolOpt( 'hpexp_horcm_name_only_discovery', default=False, help='Only discover a specific name of host group or iSCSI target', deprecated_name='hpxp_horcm_name_only_discovery'), ] CONF = cfg.CONF CONF.register_opts(FC_VOLUME_OPTS) CONF.register_opts(COMMON_VOLUME_OPTS) CONF.register_opts(HORCM_VOLUME_OPTS) cinder-8.0.0/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py0000664000567000056710000024120212701406250025314 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """HPE LeftHand SAN ISCSI REST Proxy. Volume driver for HPE LeftHand Storage array. This driver requires 11.5 or greater firmware on the LeftHand array, using the 2.0 or greater version of the hpelefthandclient. You will need to install the python hpelefthandclient module. sudo pip install python-lefthandclient Set the following in the cinder.conf file to enable the LeftHand iSCSI REST Driver along with the required flags: volume_driver=cinder.volume.drivers.hpe.hpe_lefthand_iscsi. HPELeftHandISCSIDriver It also requires the setting of hpelefthand_api_url, hpelefthand_username, hpelefthand_password for credentials to talk to the REST service on the LeftHand array. """ from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import utils from cinder.volume import volume_types import math import re import six LOG = logging.getLogger(__name__) hpelefthandclient = importutils.try_import("hpelefthandclient") if hpelefthandclient: from hpelefthandclient import client as hpe_lh_client from hpelefthandclient import exceptions as hpeexceptions hpelefthand_opts = [ cfg.StrOpt('hpelefthand_api_url', default=None, help="HPE LeftHand WSAPI Server Url like " "https://:8081/lhos", deprecated_name='hplefthand_api_url'), cfg.StrOpt('hpelefthand_username', default=None, help="HPE LeftHand Super user username", deprecated_name='hplefthand_username'), cfg.StrOpt('hpelefthand_password', default=None, help="HPE LeftHand Super user password", secret=True, deprecated_name='hplefthand_password'), cfg.StrOpt('hpelefthand_clustername', default=None, help="HPE LeftHand cluster name", deprecated_name='hplefthand_clustername'), cfg.BoolOpt('hpelefthand_iscsi_chap_enabled', default=False, help='Configure CHAP authentication for iSCSI connections ' '(Default: Disabled)', deprecated_name='hplefthand_iscsi_chap_enabled'), cfg.BoolOpt('hpelefthand_debug', default=False, help="Enable HTTP debugging to LeftHand", deprecated_name='hplefthand_debug'), cfg.PortOpt('hpelefthand_ssh_port', default=16022, help="Port number of SSH service."), ] CONF = cfg.CONF CONF.register_opts(hpelefthand_opts) MIN_API_VERSION = "1.1" MIN_CLIENT_VERSION = '2.1.0' # map the extra spec key to the REST client option key extra_specs_key_map = { 'hpelh:provisioning': 'isThinProvisioned', 'hpelh:ao': 'isAdaptiveOptimizationEnabled', 'hpelh:data_pl': 'dataProtectionLevel', 'hplh:provisioning': 'isThinProvisioned', 'hplh:ao': 'isAdaptiveOptimizationEnabled', 'hplh:data_pl': 'dataProtectionLevel', } # map the extra spec value to the REST client option value extra_specs_value_map = { 'isThinProvisioned': {'thin': True, 'full': False}, 'isAdaptiveOptimizationEnabled': {'true': True, 'false': False}, 'dataProtectionLevel': { 'r-0': 0, 'r-5': 1, 'r-10-2': 2, 'r-10-3': 3, 'r-10-4': 4, 'r-6': 5} } class HPELeftHandISCSIDriver(driver.ISCSIDriver): """Executes REST commands relating to HPE/LeftHand SAN ISCSI volumes. Version history: 1.0.0 - Initial REST iSCSI proxy 1.0.1 - Added support for retype 1.0.2 - Added support for volume migrate 1.0.3 - Fixed bug #1285829, HP LeftHand backend assisted migration should check for snapshots 1.0.4 - Fixed bug #1285925, LeftHand AO volume create performance improvement 1.0.5 - Fixed bug #1311350, Live-migration of an instance when attached to a volume was causing an error. 1.0.6 - Removing locks bug #1395953 1.0.7 - Fixed bug #1353137, Server was not removed from the HP Lefthand backend after the last volume was detached. 1.0.8 - Fixed bug #1418201, A cloned volume fails to attach. 1.0.9 - Adding support for manage/unmanage. 1.0.10 - Add stats for goodness_function and filter_function 1.0.11 - Add over subscription support 1.0.12 - Adds consistency group support 1.0.13 - Added update_migrated_volume #1493546 1.0.14 - Removed the old CLIQ based driver 2.0.0 - Rebranded HP to HPE 2.0.1 - Remove db access for consistency groups 2.0.2 - Adds v2 managed replication support 2.0.3 - Adds v2 unmanaged replication support 2.0.4 - Add manage/unmanage snapshot support 2.0.5 - Changed minimum client version to be 2.1.0 2.0.6 - Update replication to version 2.1 2.0.7 - Fixed bug #1554746, Create clone volume with new size. 2.0.8 - Add defaults for creating a replication client, bug #1556331 """ VERSION = "2.0.8" device_stats = {} # v2 replication constants EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period" EXTRA_SPEC_REP_RETENTION_COUNT = "replication:retention_count" EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT = ( "replication:remote_retention_count") MIN_REP_SYNC_PERIOD = 1800 DEFAULT_RETENTION_COUNT = 5 MAX_RETENTION_COUNT = 50 DEFAULT_REMOTE_RETENTION_COUNT = 5 MAX_REMOTE_RETENTION_COUNT = 50 REP_SNAPSHOT_SUFFIX = "_SS" REP_SCHEDULE_SUFFIX = "_SCHED" FAILBACK_VALUE = 'default' def __init__(self, *args, **kwargs): super(HPELeftHandISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpelefthand_opts) self.configuration.append_config_values(san.san_opts) if not self.configuration.hpelefthand_api_url: raise exception.NotFound(_("HPELeftHand url not found")) # blank is the only invalid character for cluster names # so we need to use it as a separator self.DRIVER_LOCATION = self.__class__.__name__ + ' %(cluster)s %(vip)s' self._client_conf = {} self._replication_targets = [] self._replication_enabled = False self._active_backend_id = kwargs.get('active_backend_id', None) def _login(self, timeout=None): conf = self._get_lefthand_config() if conf: self._client_conf['hpelefthand_username'] = ( conf['hpelefthand_username']) self._client_conf['hpelefthand_password'] = ( conf['hpelefthand_password']) self._client_conf['hpelefthand_clustername'] = ( conf['hpelefthand_clustername']) self._client_conf['hpelefthand_api_url'] = ( conf['hpelefthand_api_url']) self._client_conf['hpelefthand_ssh_port'] = ( conf['hpelefthand_ssh_port']) self._client_conf['hpelefthand_iscsi_chap_enabled'] = ( conf['hpelefthand_iscsi_chap_enabled']) self._client_conf['ssh_conn_timeout'] = conf['ssh_conn_timeout'] self._client_conf['san_private_key'] = conf['san_private_key'] else: self._client_conf['hpelefthand_username'] = ( self.configuration.hpelefthand_username) self._client_conf['hpelefthand_password'] = ( self.configuration.hpelefthand_password) self._client_conf['hpelefthand_clustername'] = ( self.configuration.hpelefthand_clustername) self._client_conf['hpelefthand_api_url'] = ( self.configuration.hpelefthand_api_url) self._client_conf['hpelefthand_ssh_port'] = ( self.configuration.hpelefthand_ssh_port) self._client_conf['hpelefthand_iscsi_chap_enabled'] = ( self.configuration.hpelefthand_iscsi_chap_enabled) self._client_conf['ssh_conn_timeout'] = ( self.configuration.ssh_conn_timeout) self._client_conf['san_private_key'] = ( self.configuration.san_private_key) client = self._create_client(timeout=timeout) try: if self.configuration.hpelefthand_debug: client.debug_rest(True) client.login( self._client_conf['hpelefthand_username'], self._client_conf['hpelefthand_password']) cluster_info = client.getClusterByName( self._client_conf['hpelefthand_clustername']) self.cluster_id = cluster_info['id'] virtual_ips = cluster_info['virtualIPAddresses'] self.cluster_vip = virtual_ips[0]['ipV4Address'] # Extract IP address from API URL ssh_ip = self._extract_ip_from_url( self._client_conf['hpelefthand_api_url']) known_hosts_file = CONF.ssh_hosts_key_file policy = "AutoAddPolicy" if CONF.strict_ssh_host_key_policy: policy = "RejectPolicy" client.setSSHOptions( ssh_ip, self._client_conf['hpelefthand_username'], self._client_conf['hpelefthand_password'], port=self._client_conf['hpelefthand_ssh_port'], conn_timeout=self._client_conf['ssh_conn_timeout'], privatekey=self._client_conf['san_private_key'], missing_key_policy=policy, known_hosts_file=known_hosts_file) return client except hpeexceptions.HTTPNotFound: raise exception.DriverNotInitialized( _('LeftHand cluster not found')) except Exception as ex: raise exception.DriverNotInitialized(ex) def _logout(self, client): if client is not None: client.logout() def _create_client(self, timeout=None): # Timeout is only supported in version 2.0.1 and greater of the # python-lefthandclient. hpelefthand_api_url = self._client_conf['hpelefthand_api_url'] client = hpe_lh_client.HPELeftHandClient( hpelefthand_api_url, timeout=timeout) return client def _create_replication_client(self, remote_array): cl = hpe_lh_client.HPELeftHandClient( remote_array['hpelefthand_api_url']) try: cl.login( remote_array['hpelefthand_username'], remote_array['hpelefthand_password']) ssh_conn_timeout = remote_array.get('ssh_conn_timeout', 30) san_private_key = remote_array.get('san_private_key', '') # Extract IP address from API URL ssh_ip = self._extract_ip_from_url( remote_array['hpelefthand_api_url']) known_hosts_file = CONF.ssh_hosts_key_file policy = "AutoAddPolicy" if CONF.strict_ssh_host_key_policy: policy = "RejectPolicy" cl.setSSHOptions( ssh_ip, remote_array['hpelefthand_username'], remote_array['hpelefthand_password'], port=remote_array['hpelefthand_ssh_port'], conn_timeout=ssh_conn_timeout, privatekey=san_private_key, missing_key_policy=policy, known_hosts_file=known_hosts_file) return cl except hpeexceptions.HTTPNotFound: raise exception.DriverNotInitialized( _('LeftHand cluster not found')) except Exception as ex: raise exception.DriverNotInitialized(ex) def _destroy_replication_client(self, client): if client is not None: client.logout() def _extract_ip_from_url(self, url): result = re.search("://(.*):", url) ip = result.group(1) return ip def do_setup(self, context): """Set up LeftHand client.""" if hpelefthandclient.version < MIN_CLIENT_VERSION: ex_msg = (_("Invalid hpelefthandclient version found (" "%(found)s). Version %(minimum)s or greater " "required. Run 'pip install --upgrade " "python-lefthandclient' to upgrade the " "hpelefthandclient.") % {'found': hpelefthandclient.version, 'minimum': MIN_CLIENT_VERSION}) LOG.error(ex_msg) raise exception.InvalidInput(reason=ex_msg) self._do_replication_setup() def check_for_setup_error(self): """Checks for incorrect LeftHand API being used on backend.""" client = self._login() try: self.api_version = client.getApiVersion() LOG.info(_LI("HPELeftHand API version %s"), self.api_version) if self.api_version < MIN_API_VERSION: LOG.warning(_LW("HPELeftHand API is version %(current)s. " "A minimum version of %(min)s is needed for " "manage/unmanage support."), {'current': self.api_version, 'min': MIN_API_VERSION}) finally: self._logout(client) def check_replication_flags(self, options, required_flags): for flag in required_flags: if not options.get(flag, None): msg = _('%s is not set and is required for the replication ' 'device to be valid.') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def get_version_string(self): return (_('REST %(proxy_ver)s hpelefthandclient %(rest_ver)s') % { 'proxy_ver': self.VERSION, 'rest_ver': hpelefthandclient.get_version_string()}) def create_volume(self, volume): """Creates a volume.""" client = self._login() try: # get the extra specs of interest from this volume's volume type volume_extra_specs = self._get_volume_extra_specs(volume) extra_specs = self._get_lh_extra_specs( volume_extra_specs, extra_specs_key_map.keys()) # map the extra specs key/value pairs to key/value pairs # used as optional configuration values by the LeftHand backend optional = self._map_extra_specs(extra_specs) # if provisioning is not set, default to thin if 'isThinProvisioned' not in optional: optional['isThinProvisioned'] = True # AdaptiveOptimization defaults to 'true' if you don't specify the # value on a create, and that is the most efficient way to create # a volume. If you pass in 'false' or 'true' for AO, it will result # in an update operation following the create operation to set this # value, so it is best to not specify the value and let it default # to 'true'. if optional.get('isAdaptiveOptimizationEnabled'): del optional['isAdaptiveOptimizationEnabled'] clusterName = self._client_conf['hpelefthand_clustername'] optional['clusterName'] = clusterName volume_info = client.createVolume( volume['name'], self.cluster_id, volume['size'] * units.Gi, optional) model_update = self._update_provider(volume_info) # v2 replication check if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume, client, optional)): model_update['replication_status'] = 'enabled' model_update['replication_driver_data'] = (json.dumps( {'location': self._client_conf['hpelefthand_api_url']})) return model_update except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: self._logout(client) def delete_volume(self, volume): """Deletes a volume.""" client = self._login() # v2 replication check # If the volume type is replication enabled, we want to call our own # method of deconstructing the volume and its dependencies if self._volume_of_replicated_type(volume): self._do_volume_replication_destroy(volume, client) return try: volume_info = client.getVolumeByName(volume['name']) client.deleteVolume(volume_info['id']) except hpeexceptions.HTTPNotFound: LOG.error(_LE("Volume did not exist. It will not be deleted")) except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def extend_volume(self, volume, new_size): """Extend the size of an existing volume.""" client = self._login() try: volume_info = client.getVolumeByName(volume['name']) # convert GB to bytes options = {'size': int(new_size) * units.Gi} client.modifyVolume(volume_info['id'], options) except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from a source""" msg = _("Creating a consistency group from a source is not " "currently supported.") LOG.error(msg) raise NotImplementedError(msg) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" volume_model_updates = [] for volume in volumes: volume_update = {'id': volume.id} try: self.delete_volume(volume) volume_update['status'] = 'deleted' except Exception as ex: LOG.error(_LE("There was an error deleting volume %(id)s: " "%(error)s."), {'id': volume.id, 'error': six.text_type(ex)}) volume_update['status'] = 'error' volume_model_updates.append(volume_update) model_update = {'status': group.status} return model_update, volume_model_updates def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. Because the backend has no concept of volume grouping, cinder will maintain all volume/consistency group relationships. Because of this functionality, there is no need to make any client calls; instead simply returning out of this function allows cinder to properly add/remove volumes from the consistency group. """ return None, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a consistency group snapshot.""" client = self._login() try: snap_set = [] snapshot_base_name = "snapshot-" + cgsnapshot.id snapshot_model_updates = [] for i, snapshot in enumerate(snapshots): volume = snapshot.volume volume_name = volume['name'] try: volume_info = client.getVolumeByName(volume_name) except Exception as ex: error = six.text_type(ex) LOG.error(_LE("Could not find volume with name %(name)s. " "Error: %(error)s"), {'name': volume_name, 'error': error}) raise exception.VolumeBackendAPIException(data=error) volume_id = volume_info['id'] snapshot_name = snapshot_base_name + "-" + six.text_type(i) snap_set_member = {'volumeName': volume_name, 'volumeId': volume_id, 'snapshotName': snapshot_name} snap_set.append(snap_set_member) snapshot_update = {'id': snapshot['id'], 'status': 'available'} snapshot_model_updates.append(snapshot_update) source_volume_id = snap_set[0]['volumeId'] optional = {'inheritAccess': True} description = cgsnapshot.description if description: optional['description'] = description try: client.createSnapshotSet(source_volume_id, snap_set, optional) except Exception as ex: error = six.text_type(ex) LOG.error(_LE("Could not create snapshot set. Error: '%s'"), error) raise exception.VolumeBackendAPIException( data=error) except Exception as ex: raise exception.VolumeBackendAPIException(data=six.text_type(ex)) finally: self._logout(client) model_update = {'status': 'available'} return model_update, snapshot_model_updates def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a consistency group snapshot.""" client = self._login() snap_name_base = "snapshot-" + cgsnapshot.id snapshot_model_updates = [] for i, snapshot in enumerate(snapshots): snapshot_update = {'id': snapshot['id']} try: snap_name = snap_name_base + "-" + six.text_type(i) snap_info = client.getSnapshotByName(snap_name) client.deleteSnapshot(snap_info['id']) snapshot_update['status'] = 'deleted' except hpeexceptions.HTTPServerError as ex: in_use_msg = ('cannot be deleted because it is a clone ' 'point') if in_use_msg in ex.get_description(): LOG.error(_LE("The snapshot cannot be deleted because " "it is a clone point.")) snapshot_update['status'] = 'error' except Exception as ex: LOG.error(_LE("There was an error deleting snapshot %(id)s: " "%(error)."), {'id': snapshot['id'], 'error': six.text_type(ex)}) snapshot_update['status'] = 'error' snapshot_model_updates.append(snapshot_update) self._logout(client) model_update = {'status': cgsnapshot.status} return model_update, snapshot_model_updates def create_snapshot(self, snapshot): """Creates a snapshot.""" client = self._login() try: volume_info = client.getVolumeByName(snapshot['volume_name']) option = {'inheritAccess': True} client.createSnapshot(snapshot['name'], volume_info['id'], option) except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" client = self._login() try: snap_info = client.getSnapshotByName(snapshot['name']) client.deleteSnapshot(snap_info['id']) except hpeexceptions.HTTPNotFound: LOG.error(_LE("Snapshot did not exist. It will not be deleted")) except hpeexceptions.HTTPServerError as ex: in_use_msg = 'cannot be deleted because it is a clone point' if in_use_msg in ex.get_description(): raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) raise exception.VolumeBackendAPIException(ex) except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def get_volume_stats(self, refresh=False): """Gets volume stats.""" client = self._login() try: if refresh: self._update_backend_status(client) return self.device_stats finally: self._logout(client) def _update_backend_status(self, client): data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['driver_version'] = self.VERSION data['volume_backend_name'] = backend_name or self.__class__.__name__ data['reserved_percentage'] = ( self.configuration.safe_get('reserved_percentage')) data['storage_protocol'] = 'iSCSI' data['vendor_name'] = 'Hewlett Packard Enterprise' data['location_info'] = (self.DRIVER_LOCATION % { 'cluster': self._client_conf['hpelefthand_clustername'], 'vip': self.cluster_vip}) data['thin_provisioning_support'] = True data['thick_provisioning_support'] = True data['max_over_subscription_ratio'] = ( self.configuration.safe_get('max_over_subscription_ratio')) cluster_info = client.getCluster(self.cluster_id) total_capacity = cluster_info['spaceTotal'] free_capacity = cluster_info['spaceAvailable'] # convert to GB data['total_capacity_gb'] = int(total_capacity) / units.Gi data['free_capacity_gb'] = int(free_capacity) / units.Gi # Collect some stats capacity_utilization = ( (float(total_capacity - free_capacity) / float(total_capacity)) * 100) # Don't have a better way to get the total number volumes # so try to limit the size of data for now. Once new lefthand API is # available, replace this call. total_volumes = 0 provisioned_size = 0 volumes = client.getVolumes( cluster=self._client_conf['hpelefthand_clustername'], fields=['members[id]', 'members[clusterName]', 'members[size]']) if volumes: total_volumes = volumes['total'] provisioned_size = sum( members['size'] for members in volumes['members']) data['provisioned_capacity_gb'] = int(provisioned_size) / units.Gi data['capacity_utilization'] = capacity_utilization data['total_volumes'] = total_volumes data['filter_function'] = self.get_filter_function() data['goodness_function'] = self.get_goodness_function() data['consistencygroup_support'] = True data['replication_enabled'] = self._replication_enabled data['replication_type'] = ['periodic'] data['replication_count'] = len(self._replication_targets) data['replication_targets'] = self._get_replication_targets() self.device_stats = data def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. HPE VSA requires a volume to be assigned to a server. """ client = self._login() try: server_info = self._create_server(connector, client) volume_info = client.getVolumeByName(volume['name']) access_already_enabled = False if volume_info['iscsiSessions'] is not None: # Extract the server id for each session to check if the # new server already has access permissions enabled. for session in volume_info['iscsiSessions']: server_id = int(session['server']['uri'].split('/')[3]) if server_id == server_info['id']: access_already_enabled = True break if not access_already_enabled: client.addServerAccess( volume_info['id'], server_info['id']) iscsi_properties = self._get_iscsi_properties(volume) if ('chapAuthenticationRequired' in server_info and server_info['chapAuthenticationRequired']): iscsi_properties['auth_method'] = 'CHAP' iscsi_properties['auth_username'] = connector['initiator'] iscsi_properties['auth_password'] = ( server_info['chapTargetSecret']) return {'driver_volume_type': 'iscsi', 'data': iscsi_properties} except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def terminate_connection(self, volume, connector, **kwargs): """Unassign the volume from the host.""" client = self._login() try: volume_info = client.getVolumeByName(volume['name']) server_info = client.getServerByName(connector['host']) volume_list = client.findServerVolumes(server_info['name']) removeServer = True for entry in volume_list: if entry['id'] != volume_info['id']: removeServer = False break client.removeServerAccess( volume_info['id'], server_info['id']) if removeServer: client.deleteServer(server_info['id']) except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" client = self._login() try: snap_info = client.getSnapshotByName(snapshot['name']) volume_info = client.cloneSnapshot( volume['name'], snap_info['id']) model_update = self._update_provider(volume_info) # v2 replication check if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume, client)): model_update['replication_status'] = 'enabled' model_update['replication_driver_data'] = (json.dumps( {'location': self._client_conf['hpelefthand_api_url']})) return model_update except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def create_cloned_volume(self, volume, src_vref): client = self._login() try: volume_info = client.getVolumeByName(src_vref['name']) clone_info = client.cloneVolume(volume['name'], volume_info['id']) # Extend volume if volume['size'] > src_vref['size']: LOG.debug("Resize the new volume to %s.", volume['size']) self.extend_volume(volume, volume['size']) model_update = self._update_provider(clone_info) # v2 replication check if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume, client)): model_update['replication_status'] = 'enabled' model_update['replication_driver_data'] = (json.dumps( {'location': self._client_conf['hpelefthand_api_url']})) return model_update except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: self._logout(client) def _get_volume_extra_specs(self, volume): """Get extra specs from a volume.""" extra_specs = {} type_id = volume.get('volume_type_id', None) if type_id is not None: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) extra_specs = volume_type.get('extra_specs') return extra_specs def _get_lh_extra_specs(self, extra_specs, valid_keys): """Get LeftHand extra_specs (valid_keys only).""" extra_specs_of_interest = {} for key, value in extra_specs.items(): if key in valid_keys: prefix = key.split(":") if prefix[0] == "hplh": LOG.warning(_LW("The 'hplh' prefix is deprecated. Use " "'hpelh' instead.")) extra_specs_of_interest[key] = value return extra_specs_of_interest def _map_extra_specs(self, extra_specs): """Map the extra spec key/values to LeftHand key/values.""" client_options = {} for key, value in extra_specs.items(): # map extra spec key to lh client option key client_key = extra_specs_key_map[key] # map extra spect value to lh client option value try: value_map = extra_specs_value_map[client_key] # an invalid value will throw KeyError client_value = value_map[value] client_options[client_key] = client_value except KeyError: LOG.error(_LE("'%(value)s' is an invalid value " "for extra spec '%(key)s'"), {'value': value, 'key': key}) return client_options def _update_provider(self, volume_info, cluster_vip=None): if not cluster_vip: cluster_vip = self.cluster_vip # TODO(justinsb): Is this always 1? Does it matter? cluster_interface = '1' iscsi_portal = cluster_vip + ":3260," + cluster_interface return {'provider_location': ( "%s %s %s" % (iscsi_portal, volume_info['iscsiIqn'], 0))} def _create_server(self, connector, client): server_info = None chap_enabled = self._client_conf['hpelefthand_iscsi_chap_enabled'] try: server_info = client.getServerByName(connector['host']) chap_secret = server_info['chapTargetSecret'] if not chap_enabled and chap_secret: LOG.warning(_LW('CHAP secret exists for host %s but CHAP is ' 'disabled'), connector['host']) if chap_enabled and chap_secret is None: LOG.warning(_LW('CHAP is enabled, but server secret not ' 'configured on server %s'), connector['host']) return server_info except hpeexceptions.HTTPNotFound: # server does not exist, so create one pass optional = None if chap_enabled: chap_secret = utils.generate_password() optional = {'chapName': connector['initiator'], 'chapTargetSecret': chap_secret, 'chapAuthenticationRequired': True } server_info = client.createServer(connector['host'], connector['initiator'], optional) return server_info def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) client = self._login() try: volume_info = client.getVolumeByName(volume['name']) # pick out the LH extra specs new_extra_specs = dict(new_type).get('extra_specs') lh_extra_specs = self._get_lh_extra_specs( new_extra_specs, extra_specs_key_map.keys()) LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs}) # only set the ones that have changed changed_extra_specs = {} for key, value in lh_extra_specs.items(): (old, new) = diff['extra_specs'][key] if old != new: changed_extra_specs[key] = value # map extra specs to LeftHand options options = self._map_extra_specs(changed_extra_specs) if len(options) > 0: client.modifyVolume(volume_info['id'], options) return True except hpeexceptions.HTTPNotFound: raise exception.VolumeNotFound(volume_id=volume['id']) except Exception as ex: LOG.warning(_LW("%s"), ex) finally: self._logout(client) return False def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Backend assisted volume migration will occur if and only if; 1. Same LeftHand backend 2. Volume cannot be attached 3. Volumes with snapshots cannot be migrated 4. Source and Destination clusters must be in the same management group Volume re-type is not supported. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ false_ret = (False, None) if 'location_info' not in host['capabilities']: return false_ret host_location = host['capabilities']['location_info'] (driver, cluster, vip) = host_location.split(' ') client = self._login() LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, ' 'cluster=%(cluster)s', { 'id': volume['id'], 'host': host, 'cluster': self._client_conf['hpelefthand_clustername']}) try: # get the cluster info, if it exists and compare cluster_info = client.getClusterByName(cluster) LOG.debug('Cluster info: %s', cluster_info) virtual_ips = cluster_info['virtualIPAddresses'] if driver != self.__class__.__name__: LOG.info(_LI("Cannot provide backend assisted migration for " "volume: %s because volume is from a different " "backend."), volume['name']) return false_ret if vip != virtual_ips[0]['ipV4Address']: LOG.info(_LI("Cannot provide backend assisted migration for " "volume: %s because cluster exists in different " "management group."), volume['name']) return false_ret except hpeexceptions.HTTPNotFound: LOG.info(_LI("Cannot provide backend assisted migration for " "volume: %s because cluster exists in different " "management group."), volume['name']) return false_ret finally: self._logout(client) client = self._login() try: volume_info = client.getVolumeByName(volume['name']) LOG.debug('Volume info: %s', volume_info) # can't migrate if server is attached if volume_info['iscsiSessions'] is not None: LOG.info(_LI("Cannot provide backend assisted migration " "for volume: %s because the volume has been " "exported."), volume['name']) return false_ret # can't migrate if volume has snapshots snap_info = client.getVolume( volume_info['id'], 'fields=snapshots,snapshots[resource[members[name]]]') LOG.debug('Snapshot info: %s', snap_info) if snap_info['snapshots']['resource'] is not None: LOG.info(_LI("Cannot provide backend assisted migration " "for volume: %s because the volume has " "snapshots."), volume['name']) return false_ret options = {'clusterName': cluster} client.modifyVolume(volume_info['id'], options) except hpeexceptions.HTTPNotFound: LOG.info(_LI("Cannot provide backend assisted migration for " "volume: %s because volume does not exist in this " "management group."), volume['name']) return false_ret except hpeexceptions.HTTPServerError as ex: LOG.error(_LE("Exception: %s"), ex) return false_ret finally: self._logout(client) return (True, None) def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Rename the new (temp) volume to it's original name. This method tries to rename the new volume to it's original name after the migration has completed. """ LOG.debug("Update volume name for %(id)s.", {'id': new_volume['id']}) name_id = None provider_location = None if original_volume_status == 'available': # volume isn't attached and can be updated original_name = CONF.volume_name_template % volume['id'] current_name = CONF.volume_name_template % new_volume['id'] client = self._login() try: volume_info = client.getVolumeByName(current_name) volumeMods = {'name': original_name} client.modifyVolume(volume_info['id'], volumeMods) LOG.info(_LI("Volume name changed from %(tmp)s to %(orig)s."), {'tmp': current_name, 'orig': original_name}) except Exception as e: LOG.error(_LE("Changing the volume name from %(tmp)s to " "%(orig)s failed because %(reason)s."), {'tmp': current_name, 'orig': original_name, 'reason': e}) name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] finally: self._logout(client) else: # the backend can't change the name. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def manage_existing(self, volume, existing_ref): """Manage an existing LeftHand volume. existing_ref is a dictionary of the form: {'source-name': } """ # Check API Version self._check_api_version() target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Check for the existence of the virtual volume. client = self._login() try: volume_info = client.getVolumeByName(target_vol_name) except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) finally: self._logout(client) # Generate the new volume information based on the new ID. new_vol_name = 'volume-' + volume['id'] volume_type = None if volume['volume_type_id']: try: volume_type = self._get_volume_type(volume['volume_type_id']) except Exception: reason = (_("Volume type ID '%s' is invalid.") % volume['volume_type_id']) raise exception.ManageExistingVolumeTypeMismatch(reason=reason) new_vals = {"name": new_vol_name} client = self._login() try: # Update the existing volume with the new name. client.modifyVolume(volume_info['id'], new_vals) finally: self._logout(client) LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."), {'ref': existing_ref['source-name'], 'new': new_vol_name}) display_name = None if volume['display_name']: display_name = volume['display_name'] if volume_type: LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " "being retyped."), {'disp': display_name, 'new': new_vol_name}) try: self.retype(None, volume, volume_type, volume_type['extra_specs'], volume['host']) LOG.info(_LI("Virtual volume %(disp)s successfully retyped to " "%(new_type)s."), {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: with excutils.save_and_reraise_exception(): LOG.warning(_LW("Failed to manage virtual volume %(disp)s " "due to error during retype."), {'disp': display_name}) # Try to undo the rename and clear the new comment. client = self._login() try: client.modifyVolume( volume_info['id'], {'name': target_vol_name}) finally: self._logout(client) updates = {'display_name': display_name} LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " "now being managed."), {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and # any model updates from retype. return updates def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing LeftHand snapshot. existing_ref is a dictionary of the form: {'source-name': } """ # Check API Version self._check_api_version() # Potential parent volume for the snapshot volume = snapshot['volume'] if volume.get('replication_status') == 'failed-over': err = (_("Managing of snapshots to failed-over volumes is " "not allowed.")) raise exception.InvalidInput(reason=err) target_snap_name = self._get_existing_volume_ref_name(existing_ref) # Check for the existence of the virtual volume. client = self._login() try: updates = self._manage_snapshot(client, volume, snapshot, target_snap_name, existing_ref) finally: self._logout(client) # Return display name to update the name displayed in the GUI and # any model updates from retype. return updates def _manage_snapshot(self, client, volume, snapshot, target_snap_name, existing_ref): # Check for the existence of the virtual volume. try: snapshot_info = client.getSnapshotByName(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) # Make sure the snapshot is being associated with the correct volume. try: parent_vol = client.getSnapshotParentVolume(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Could not find the parent volume for Snapshot '%s' on " "array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) parent_vol_name = 'volume-' + snapshot['volume_id'] if parent_vol_name != parent_vol['name']: err = (_("The provided snapshot '%s' is not a snapshot of " "the provided volume.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) # Generate the new snapshot information based on the new ID. new_snap_name = 'snapshot-' + snapshot['id'] new_vals = {"name": new_snap_name} try: # Update the existing snapshot with the new name. client.modifySnapshot(snapshot_info['id'], new_vals) except hpeexceptions.HTTPServerError: err = (_("An error occured while attempting to modify" "Snapshot '%s'.") % snapshot_info['id']) LOG.error(err) LOG.info(_LI("Snapshot '%(ref)s' renamed to '%(new)s'."), {'ref': existing_ref['source-name'], 'new': new_snap_name}) display_name = None if snapshot['display_name']: display_name = snapshot['display_name'] updates = {'display_name': display_name} LOG.info(_LI("Snapshot %(disp)s '%(new)s' is " "now being managed."), {'disp': display_name, 'new': new_snap_name}) return updates def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ # Check API version. self._check_api_version() target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Make sure the reference is not in use. if re.match('volume-*|snapshot-*', target_vol_name): reason = _("Reference must be the volume name of an unmanaged " "virtual volume.") raise exception.ManageExistingInvalidReference( existing_ref=target_vol_name, reason=reason) # Check for the existence of the virtual volume. client = self._login() try: volume_info = client.getVolumeByName(target_vol_name) except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) finally: self._logout(client) return int(math.ceil(float(volume_info['size']) / units.Gi)) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ # Check API version. self._check_api_version() target_snap_name = self._get_existing_volume_ref_name(existing_ref) # Make sure the reference is not in use. if re.match('volume-*|snapshot-*|unm-*', target_snap_name): reason = _("Reference must be the name of an unmanaged " "snapshot.") raise exception.ManageExistingInvalidReference( existing_ref=target_snap_name, reason=reason) # Check for the existence of the virtual volume. client = self._login() try: snapshot_info = client.getSnapshotByName(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) finally: self._logout(client) return int(math.ceil(float(snapshot_info['size']) / units.Gi)) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" # Check API version. self._check_api_version() # Rename the volume's name to unm-* format so that it can be # easily found later. client = self._login() try: volume_info = client.getVolumeByName(volume['name']) new_vol_name = 'unm-' + six.text_type(volume['id']) options = {'name': new_vol_name} client.modifyVolume(volume_info['id'], options) finally: self._logout(client) LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. " "Volume renamed to '%(new)s'."), {'disp': volume['display_name'], 'vol': volume['name'], 'new': new_vol_name}) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management.""" # Check API version. self._check_api_version() # Potential parent volume for the snapshot volume = snapshot['volume'] if volume.get('replication_status') == 'failed-over': err = (_("Unmanaging of snapshots from 'failed-over' volumes is " "not allowed.")) LOG.error(err) # TODO(leeantho) Change this exception to Invalid when the volume # manager supports handling that. raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) # Rename the snapshots's name to ums-* format so that it can be # easily found later. client = self._login() try: snapshot_info = client.getSnapshotByName(snapshot['name']) new_snap_name = 'ums-' + six.text_type(snapshot['id']) options = {'name': new_snap_name} client.modifySnapshot(snapshot_info['id'], options) LOG.info(_LI("Snapshot %(disp)s '%(vol)s' is no longer managed. " "Snapshot renamed to '%(new)s'."), {'disp': snapshot['display_name'], 'vol': snapshot['name'], 'new': new_snap_name}) finally: self._logout(client) def _get_existing_volume_ref_name(self, existing_ref): """Returns the volume name of an existing reference. Checks if an existing volume reference has a source-name element. If source-name is not present an error will be thrown. """ if 'source-name' not in existing_ref: reason = _("Reference must contain source-name.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return existing_ref['source-name'] def _check_api_version(self): """Checks that the API version is correct.""" if (self.api_version < MIN_API_VERSION): ex_msg = (_('Invalid HPELeftHand API version found: %(found)s. ' 'Version %(minimum)s or greater required for ' 'manage/unmanage support.') % {'found': self.api_version, 'minimum': MIN_API_VERSION}) LOG.error(ex_msg) raise exception.InvalidInput(reason=ex_msg) def _get_volume_type(self, type_id): ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id) # v2 replication methods def failover_host(self, context, volumes, secondary_backend_id): """Force failover to a secondary replication target.""" if secondary_backend_id == self.FAILBACK_VALUE: volume_update_list = self._replication_failback(volumes) target_id = None else: failover_target = None for target in self._replication_targets: if target['backend_id'] == secondary_backend_id: failover_target = target break if not failover_target: msg = _("A valid secondary target MUST be specified in order " "to failover.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) target_id = failover_target['backend_id'] volume_update_list = [] for volume in volumes: if self._volume_of_replicated_type(volume): # Try and stop the remote snapshot schedule. If the primary # array is down, we will continue with the failover. client = None try: client = self._login(timeout=30) name = volume['name'] + self.REP_SCHEDULE_SUFFIX + ( "_Pri") client.stopRemoteSnapshotSchedule(name) except Exception: LOG.warning(_LW("The primary array is currently " "offline, remote copy has been " "automatically paused.")) finally: self._logout(client) # Update provider location to the new array. cl = None try: cl = self._create_replication_client(failover_target) # Stop snapshot schedule try: name = volume['name'] + ( self.REP_SCHEDULE_SUFFIX + "_Rmt") cl.stopRemoteSnapshotSchedule(name) except Exception: pass # Make the volume primary so it can be attached after a # fail-over. cl.makeVolumePrimary(volume['name']) # Update the provider info for a proper fail-over. volume_info = cl.getVolumeByName(volume['name']) prov_location = self._update_provider( volume_info, cluster_vip=failover_target['cluster_vip']) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'failed-over', 'provider_location': prov_location['provider_location']}}) except Exception as ex: msg = (_LE("There was a problem with the failover " "(%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available " "on the failed over target."), {'error': six.text_type(ex), 'volume': volume['id']}) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) finally: self._destroy_replication_client(cl) else: # If the volume is not of replicated type, we need to # force the status into error state so a user knows they # do not have access to the volume. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'error'}}) self._active_backend_id = target_id return target_id, volume_update_list def _do_replication_setup(self): default_san_ssh_port = self.configuration.hpelefthand_ssh_port default_ssh_conn_timeout = self.configuration.ssh_conn_timeout default_san_private_key = self.configuration.san_private_key replication_targets = [] replication_devices = self.configuration.replication_device if replication_devices: # We do not want to fail if we cannot log into the client here # as a failover can still occur, so we need out replication # devices to exist. for dev in replication_devices: remote_array = dict(dev.items()) # Override and set defaults for certain entries remote_array['managed_backend_name'] = ( dev.get('managed_backend_name')) remote_array['hpelefthand_ssh_port'] = ( dev.get('hpelefthand_ssh_port', default_san_ssh_port)) remote_array['ssh_conn_timeout'] = ( dev.get('ssh_conn_timeout', default_ssh_conn_timeout)) remote_array['san_private_key'] = ( dev.get('san_private_key', default_san_private_key)) # Format hpe3par_iscsi_chap_enabled as a bool remote_array['hpelefthand_iscsi_chap_enabled'] = ( dev.get('hpelefthand_iscsi_chap_enabled') == 'True') remote_array['cluster_id'] = None remote_array['cluster_vip'] = None array_name = remote_array['backend_id'] # Make sure we can log into the array, that it has been # correctly configured, and its API version meets the # minimum requirement. cl = None try: cl = self._create_replication_client(remote_array) api_version = cl.getApiVersion() cluster_info = cl.getClusterByName( remote_array['hpelefthand_clustername']) remote_array['cluster_id'] = cluster_info['id'] virtual_ips = cluster_info['virtualIPAddresses'] remote_array['cluster_vip'] = virtual_ips[0]['ipV4Address'] if api_version < MIN_API_VERSION: msg = (_LW("The secondary array must have an API " "version of %(min_ver)s or higher. " "Array '%(target)s' is on %(target_ver)s, " "therefore it will not be added as a valid " "replication target.") % {'min_ver': MIN_API_VERSION, 'target': array_name, 'target_ver': api_version}) LOG.warning(msg) elif not self._is_valid_replication_array(remote_array): msg = (_LW("'%s' is not a valid replication array. " "In order to be valid, backend_id, " "hpelefthand_api_url, " "hpelefthand_username, " "hpelefthand_password, and " "hpelefthand_clustername, " "must be specified. If the target is " "managed, managed_backend_name must be set " "as well.") % array_name) LOG.warning(msg) else: replication_targets.append(remote_array) except Exception: msg = (_LE("Could not log in to LeftHand array (%s) with " "the provided credentials.") % array_name) LOG.error(msg) finally: self._destroy_replication_client(cl) self._replication_targets = replication_targets if self._is_replication_configured_correct(): self._replication_enabled = True def _replication_failback(self, volumes): array_config = {'hpelefthand_api_url': self.configuration.hpelefthand_api_url, 'hpelefthand_username': self.configuration.hpelefthand_username, 'hpelefthand_password': self.configuration.hpelefthand_password, 'hpelefthand_ssh_port': self.configuration.hpelefthand_ssh_port} # Make sure the proper steps on the backend have been completed before # we allow a failback. if not self._is_host_ready_for_failback(volumes, array_config): msg = _("The host is not ready to be failed back. Please " "resynchronize the volumes and resume replication on the " "LeftHand backends.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) cl = None volume_update_list = [] for volume in volumes: if self._volume_of_replicated_type(volume): try: cl = self._create_replication_client(array_config) # Update the provider info for a proper fail-back. volume_info = cl.getVolumeByName(volume['name']) cluster_info = cl.getClusterByName( self.configuration.hpelefthand_clustername) virtual_ips = cluster_info['virtualIPAddresses'] cluster_vip = virtual_ips[0]['ipV4Address'] provider_location = self._update_provider( volume_info, cluster_vip=cluster_vip) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'available', 'provider_location': provider_location['provider_location']}}) except Exception as ex: # The secondary array was not able to execute the fail-back # properly. The replication status is now in an unknown # state, so we will treat it as an error. msg = (_LE("There was a problem with the failover " "(%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available " "on the failed over target."), {'error': six.text_type(ex), 'volume': volume['id']}) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) finally: self._destroy_replication_client(cl) else: # Upon failing back, we can move the non-replicated volumes # back into available state. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'available'}}) return volume_update_list def _is_host_ready_for_failback(self, volumes, array_config): """Checks to make sure the volumes have been synchronized This entails ensuring the remote snapshot schedule has been resumed on the backends and the secondary volume's data has been copied back to the primary. """ is_ready = True cl = None try: for volume in volumes: if self._volume_of_replicated_type(volume): schedule_name = volume['name'] + ( self.REP_SCHEDULE_SUFFIX + "_Pri") cl = self._create_replication_client(array_config) schedule = cl.getRemoteSnapshotSchedule(schedule_name) schedule = ''.join(schedule) # We need to check the status of the schedule to make sure # it is not paused. result = re.search(".*paused\s+(\w+)", schedule) is_schedule_active = result.group(1) == 'false' volume_info = cl.getVolumeByName(volume['name']) if not volume_info['isPrimary'] or not is_schedule_active: is_ready = False break except Exception as ex: LOG.error(_LW("There was a problem when trying to determine if " "the volume can be failed-back: %s") % six.text_type(ex)) is_ready = False finally: self._destroy_replication_client(cl) return is_ready def _get_replication_targets(self): replication_targets = [] for target in self._replication_targets: replication_targets.append(target['backend_id']) return replication_targets def _is_valid_replication_array(self, target): required_flags = ['hpelefthand_api_url', 'hpelefthand_username', 'hpelefthand_password', 'backend_id', 'hpelefthand_clustername'] try: self.check_replication_flags(target, required_flags) return True except Exception: return False def _is_replication_configured_correct(self): rep_flag = True # Make sure there is at least one replication target. if len(self._replication_targets) < 1: LOG.error(_LE("There must be at least one valid replication " "device configured.")) rep_flag = False return rep_flag def _volume_of_replicated_type(self, volume): replicated_type = False volume_type_id = volume.get('volume_type_id') if volume_type_id: volume_type = self._get_volume_type(volume_type_id) extra_specs = volume_type.get('extra_specs') if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] replicated_type = (rep_val == " True") return replicated_type def _does_snapshot_schedule_exist(self, schedule_name, client): try: exists = client.doesRemoteSnapshotScheduleExist(schedule_name) except Exception: exists = False return exists def _get_lefthand_config(self): conf = None for target in self._replication_targets: if target['backend_id'] == self._active_backend_id: conf = target break return conf def _do_volume_replication_setup(self, volume, client, optional=None): """This function will do or ensure the following: -Create volume on main array (already done in create_volume) -Create volume on secondary array -Make volume remote on secondary array -Create the snapshot schedule If anything here fails, we will need to clean everything up in reverse order, including the original volume. """ schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX # If there is already a snapshot schedule, the volume is setup # for replication on the backend. Start the schedule and return # success. if self._does_snapshot_schedule_exist(schedule_name + "_Pri", client): try: client.startRemoteSnapshotSchedule(schedule_name + "_Pri") except Exception: pass return True # Grab the extra_spec entries for replication and make sure they # are set correctly. volume_type = self._get_volume_type(volume["volume_type_id"]) extra_specs = volume_type.get("extra_specs") # Get and check replication sync period replication_sync_period = extra_specs.get( self.EXTRA_SPEC_REP_SYNC_PERIOD) if replication_sync_period: replication_sync_period = int(replication_sync_period) if replication_sync_period < self.MIN_REP_SYNC_PERIOD: msg = (_("The replication sync period must be at least %s " "seconds.") % self.MIN_REP_SYNC_PERIOD) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: # If there is no extra_spec value for replication sync period, we # will default it to the required minimum and log a warning. replication_sync_period = self.MIN_REP_SYNC_PERIOD LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " "so the default value of %(def_val)s will be " "used. To overwrite this, set this value in the " "volume type extra_specs."), {'spec_name': self.EXTRA_SPEC_REP_SYNC_PERIOD, 'def_val': self.MIN_REP_SYNC_PERIOD}) # Get and check retention count retention_count = extra_specs.get( self.EXTRA_SPEC_REP_RETENTION_COUNT) if retention_count: retention_count = int(retention_count) if retention_count > self.MAX_RETENTION_COUNT: msg = (_("The retention count must be %s or less.") % self.MAX_RETENTION_COUNT) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: # If there is no extra_spec value for retention count, we # will default it and log a warning. retention_count = self.DEFAULT_RETENTION_COUNT LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " "so the default value of %(def_val)s will be " "used. To overwrite this, set this value in the " "volume type extra_specs."), {'spec_name': self.EXTRA_SPEC_REP_RETENTION_COUNT, 'def_val': self.DEFAULT_RETENTION_COUNT}) # Get and checkout remote retention count remote_retention_count = extra_specs.get( self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT) if remote_retention_count: remote_retention_count = int(remote_retention_count) if remote_retention_count > self.MAX_REMOTE_RETENTION_COUNT: msg = (_("The remote retention count must be %s or less.") % self.MAX_REMOTE_RETENTION_COUNT) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: # If there is no extra_spec value for remote retention count, we # will default it and log a warning. remote_retention_count = self.DEFAULT_REMOTE_RETENTION_COUNT spec_name = self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " "so the default value of %(def_val)s will be " "used. To overwrite this, set this value in the " "volume type extra_specs."), {'spec_name': spec_name, 'def_val': self.DEFAULT_REMOTE_RETENTION_COUNT}) cl = None try: # Create volume on secondary system for remote_target in self._replication_targets: cl = self._create_replication_client(remote_target) if optional: optional['clusterName'] = ( remote_target['hpelefthand_clustername']) cl.createVolume(volume['name'], remote_target['cluster_id'], volume['size'] * units.Gi, optional) # Make secondary volume a remote volume # NOTE: The snapshot created when making a volume remote is # not managed by cinder. This snapshot will be removed when # _do_volume_replication_destroy is called. snap_name = volume['name'] + self.REP_SNAPSHOT_SUFFIX cl.makeVolumeRemote(volume['name'], snap_name) # A remote IP address is needed from the cluster in order to # create the snapshot schedule. remote_ip = cl.getIPFromCluster( remote_target['hpelefthand_clustername']) # Destroy remote client self._destroy_replication_client(cl) # Create remote snapshot schedule on the primary system. # We want to start the remote snapshot schedule instantly; a # date in the past will do that. We will use the Linux epoch # date formatted to ISO 8601 (YYYY-MM-DDTHH:MM:SSZ). start_date = "1970-01-01T00:00:00Z" remote_vol_name = volume['name'] client.createRemoteSnapshotSchedule( volume['name'], schedule_name, replication_sync_period, start_date, retention_count, remote_target['hpelefthand_clustername'], remote_retention_count, remote_vol_name, remote_ip, remote_target['hpelefthand_username'], remote_target['hpelefthand_password']) return True except Exception as ex: # Destroy the replication client that was created self._destroy_replication_client(cl) # Deconstruct what we tried to create self._do_volume_replication_destroy(volume, client) msg = (_("There was an error setting up a remote schedule " "on the LeftHand arrays: ('%s'). The volume will not be " "recognized as replication type.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _do_volume_replication_destroy(self, volume, client): """This will remove all dependencies of a replicated volume It should be used when deleting a replication enabled volume or if setting up a remote copy group fails. It will try and do the following: -Delete the snapshot schedule -Delete volume and snapshots on secondary array -Delete volume and snapshots on primary array """ # Delete snapshot schedule try: schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX client.deleteRemoteSnapshotSchedule(schedule_name) except Exception: pass # Delete volume on secondary array(s) remote_vol_name = volume['name'] for remote_target in self._replication_targets: try: cl = self._create_replication_client(remote_target) volume_info = cl.getVolumeByName(remote_vol_name) cl.deleteVolume(volume_info['id']) except Exception: pass finally: # Destroy the replication client that was created self._destroy_replication_client(cl) # Delete volume on primary array try: volume_info = client.getVolumeByName(volume['name']) client.deleteVolume(volume_info['id']) except Exception: pass cinder-8.0.0/cinder/volume/drivers/hpe/hpe_xp_fc.py0000664000567000056710000001245112701406250023436 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014-2015, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fibre channel Cinder volume driver for Hewlett Packard Enterprise storage. """ from oslo_utils import importutils from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_xp_opts as opts from cinder.zonemanager import utils as fczm_utils _DRIVER_DIR = 'cinder.volume.drivers.hpe' _DRIVER_CLASS = 'hpe_xp_horcm_fc.HPEXPHORCMFC' class HPEXPFCDriver(driver.FibreChannelDriver): """OpenStack Fibre Channel driver to enable HPE XP storage.""" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(HPEXPFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(opts.FC_VOLUME_OPTS) self.configuration.append_config_values(opts.COMMON_VOLUME_OPTS) self.common = importutils.import_object( '.'.join([_DRIVER_DIR, _DRIVER_CLASS]), self.configuration, 'FC', **kwargs) def check_for_setup_error(self): """Setup errors are already checked for in do_setup so return pass.""" pass def create_volume(self, volume): """Create a volume.""" return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" return self.common.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): """Delete a volume.""" self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Create a snapshot.""" return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" self.common.delete_snapshot(snapshot) def local_path(self, volume): pass def get_volume_stats(self, refresh=False): """Get volume stats.""" return self.common.get_volume_stats(refresh) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume. Call copy_image_to_volume() of super class and carry out original postprocessing. """ super(HPEXPFCDriver, self).copy_image_to_volume( context, volume, image_service, image_id) self.common.copy_image_to_volume( context, volume, image_service, image_id) def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions after copyvolume data. This method will be called after _copy_volume_data during volume migration """ self.common.copy_volume_data(context, src_vol, dest_vol, remote) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume. Call restore_backup() of super class and carry out original postprocessing. """ super(HPEXPFCDriver, self).restore_backup( context, backup, volume, backup_service) self.common.restore_backup(context, backup, volume, backup_service) def extend_volume(self, volume, new_size): """Extend a volume.""" self.common.extend_volume(volume, new_size) def manage_existing(self, volume, existing_ref): """Manage an existing HPE XP storage volume. existing_ref is a dictionary of the form: {'ldev': , 'storage_id': } """ return self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume for manage_existing.""" return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): """Remove the specified volume from Cinder management.""" self.common.unmanage(volume) def do_setup(self, context): """Setup and verify HPE XP storage connection.""" self.common.do_setup(context) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Attach the volume to an instance.""" return self.common.initialize_connection(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Detach a volume from an instance.""" return self.common.terminate_connection(volume, connector, **kwargs) cinder-8.0.0/cinder/volume/drivers/hpe/hpe_3par_iscsi.py0000664000567000056710000011075512701406250024404 0ustar jenkinsjenkins00000000000000# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for HPE 3PAR Storage array. This driver requires 3.1.3 firmware on the 3PAR array, using the 4.x version of the hpe3parclient. You will need to install the python hpe3parclient. sudo pip install --upgrade "hpe3parclient>=4.0" Set the following in the cinder.conf file to enable the 3PAR iSCSI Driver along with the required flags: volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver """ import re import sys try: from hpe3parclient import exceptions as hpeexceptions except ImportError: hpeexceptions = None from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) DEFAULT_ISCSI_PORT = 3260 CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" class HPE3PARISCSIDriver(driver.TransferVD, driver.ManageableVD, driver.ExtendVD, driver.SnapshotVD, driver.ManageableSnapshotsVD, driver.MigrateVD, driver.ConsistencyGroupVD, driver.BaseVD): """OpenStack iSCSI driver to enable 3PAR storage array. Version history: 1.0 - Initial driver 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, session changes, faster clone, requires 3.1.2 MU2 firmware. 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored the drivers to use the new APIs. 1.2.1 - Synchronized extend_volume method. 1.2.2 - Added try/finally around client login/logout. 1.2.3 - log exceptions before raising 1.2.4 - Fixed iSCSI active path bug #1224594 1.2.5 - Added metadata during attach/detach bug #1258033 1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515 This update now requires 3.1.2 MU3 firmware 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Added support for managing/unmanaging of volumes 2.0.4 - Added support for volume retype 2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware and hp3parclient 3.1.0. 2.0.6 - Fixing missing login/logout around attach/detach bug #1367429 2.0.7 - Add support for pools with model update 2.0.8 - Migrate without losing type settings bug #1356608 2.0.9 - Removing locks bug #1381190 2.0.10 - Add call to queryHost instead SSH based findHost #1398206 2.0.11 - Added missing host name during attach fix #1398206 2.0.12 - Removed usage of host name cache #1398914 2.0.13 - Update LOG usage to fix translations. bug #1384312 2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be used during live-migration. bug #1423958 2.0.15 - Added support for updated detach_volume attachment. 2.0.16 - Added encrypted property to initialize_connection #1439917 2.0.17 - Python 3 fixes 2.0.18 - Improved VLUN creation and deletion logic. #1469816 2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.20 - Adding changes to support 3PAR iSCSI multipath. 2.0.21 - Adds consistency group support 2.0.22 - Update driver to use ABC metaclasses 2.0.23 - Added update_migrated_volume. bug # 1492023 3.0.0 - Rebranded HP to HPE. 3.0.1 - Python 3 support 3.0.2 - Remove db access for consistency groups 3.0.3 - Fix multipath dictionary key error. bug #1522062 3.0.4 - Adds v2 managed replication support 3.0.5 - Adds v2 unmanaged replication support 3.0.6 - Adding manage/unmanage snapshot support 3.0.7 - Optimize array ID retrieval 3.0.8 - Update replication to version 2.1 3.0.9 - Use same LUN ID for each VLUN path #1551994 """ VERSION = "3.0.9" def __init__(self, *args, **kwargs): super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs) self._active_backend_id = kwargs.get('active_backend_id', None) self.configuration.append_config_values(hpecommon.hpe3par_opts) self.configuration.append_config_values(san.san_opts) def _init_common(self): return hpecommon.HPE3PARCommon(self.configuration, self._active_backend_id) def _login(self, timeout=None): common = self._init_common() # If replication is enabled and we cannot login, we do not want to # raise an exception so a failover can still be executed. try: common.do_setup(None, timeout=timeout, stats=self._stats) common.client_login() except Exception: if common._replication_enabled: LOG.warning(_LW("The primary array is not reachable at this " "time. Since replication is enabled, " "listing replication targets and failing over " "a volume can still be performed.")) pass else: raise return common def _logout(self, common): # If replication is enabled and we do not have a client ID, we did not # login, but can still failover. There is no need to logout. if common.client is None and common._replication_enabled: return common.client_logout() def _check_flags(self, common): """Sanity check to ensure we have required options set.""" required_flags = ['hpe3par_api_url', 'hpe3par_username', 'hpe3par_password', 'san_ip', 'san_login', 'san_password'] common.check_flags(self.configuration, required_flags) def get_volume_stats(self, refresh=False): common = self._login() try: self._stats = common.get_volume_stats( refresh, self.get_filter_function(), self.get_goodness_function()) self._stats['storage_protocol'] = 'iSCSI' self._stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') self._stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return self._stats finally: self._logout(common) def do_setup(self, context): common = self._init_common() common.do_setup(context) self._check_flags(common) common.check_for_setup_error() self.iscsi_ips = {} common.client_login() try: self.initialize_iscsi_ports(common) finally: self._logout(common) def initialize_iscsi_ports(self, common): # map iscsi_ip-> ip_port # -> iqn # -> nsp iscsi_ip_list = {} temp_iscsi_ip = {} # use the 3PAR ip_addr list for iSCSI configuration if len(common._client_conf['hpe3par_iscsi_ips']) > 0: # add port values to ip_addr, if necessary for ip_addr in common._client_conf['hpe3par_iscsi_ips']: ip = ip_addr.split(':') if len(ip) == 1: temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT} elif len(ip) == 2: temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} else: LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr) # add the single value iscsi_ip_address option to the IP dictionary. # This way we can see if it's a valid iSCSI IP. If it's not valid, # we won't use it and won't bother to report it, see below if (common._client_conf['iscsi_ip_address'] not in temp_iscsi_ip): ip = common._client_conf['iscsi_ip_address'] ip_port = common._client_conf['iscsi_port'] temp_iscsi_ip[ip] = {'ip_port': ip_port} # get all the valid iSCSI ports from 3PAR # when found, add the valid iSCSI ip, ip port, iqn and nsp # to the iSCSI IP dictionary iscsi_ports = common.get_active_iscsi_target_ports() for port in iscsi_ports: ip = port['IPAddr'] if ip in temp_iscsi_ip: ip_port = temp_iscsi_ip[ip]['ip_port'] iscsi_ip_list[ip] = {'ip_port': ip_port, 'nsp': port['nsp'], 'iqn': port['iSCSIName']} del temp_iscsi_ip[ip] # if the single value iscsi_ip_address option is still in the # temp dictionary it's because it defaults to $my_ip which doesn't # make sense in this context. So, if present, remove it and move on. if common._client_conf['iscsi_ip_address'] in temp_iscsi_ip: del temp_iscsi_ip[common._client_conf['iscsi_ip_address']] # lets see if there are invalid iSCSI IPs left in the temp dict if len(temp_iscsi_ip) > 0: LOG.warning(_LW("Found invalid iSCSI IP address(s) in " "configuration option(s) hpe3par_iscsi_ips or " "iscsi_ip_address '%s.'"), (", ".join(temp_iscsi_ip))) if not len(iscsi_ip_list) > 0: msg = _('At least one valid iSCSI IP address must be set.') LOG.error(msg) raise exception.InvalidInput(reason=msg) self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list def check_for_setup_error(self): """Setup errors are already checked for in do_setup so return pass.""" pass def create_volume(self, volume): common = self._login() try: return common.create_volume(volume) finally: self._logout(common) def create_cloned_volume(self, volume, src_vref): """Clone an existing volume.""" common = self._login() try: return common.create_cloned_volume(volume, src_vref) finally: self._logout(common) def delete_volume(self, volume): common = self._login() try: common.delete_volume(volume) finally: self._logout(common) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. TODO: support using the size from the user. """ common = self._login() try: return common.create_volume_from_snapshot(volume, snapshot) finally: self._logout(common) def create_snapshot(self, snapshot): common = self._login() try: common.create_snapshot(snapshot) finally: self._logout(common) def delete_snapshot(self, snapshot): common = self._login() try: common.delete_snapshot(snapshot) finally: self._logout(common) def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. This driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value: { 'driver_volume_type': 'iscsi' 'data': { 'encrypted': False, 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_protal': '127.0.0.1:3260', 'volume_id': 1, } } Steps to export a volume on 3PAR * Get the 3PAR iSCSI iqn * Create a host on the 3par * create vlun on the 3par """ common = self._login() try: # If the volume has been failed over, we need to reinitialize # iSCSI ports so they represent the new array. if volume.get('replication_status') == 'failed-over' and ( common._client_conf['hpe3par_api_url'] not in self.iscsi_ips): self.initialize_iscsi_ports(common) # Grab the correct iSCSI ports iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] # we have to make sure we have a host host, username, password = self._create_host( common, volume, connector) if connector.get('multipath'): ready_ports = common.client.getiSCSIPorts( state=common.client.PORT_STATE_READY) target_portals = [] target_iqns = [] target_luns = [] # Target portal ips are defined in cinder.conf. target_portal_ips = iscsi_ips.keys() # Collect all existing VLUNs for this volume/host combination. existing_vluns = common.find_existing_vluns(volume, host) # Cycle through each ready iSCSI port and determine if a new # VLUN should be created or an existing one used. lun_id = None for port in ready_ports: iscsi_ip = port['IPAddr'] if iscsi_ip in target_portal_ips: vlun = None # check for an already existing VLUN matching the # nsp for this iSCSI IP. If one is found, use it # instead of creating a new VLUN. for v in existing_vluns: portPos = common.build_portPos( iscsi_ips[iscsi_ip]['nsp']) if v['portPos'] == portPos: vlun = v break else: vlun = common.create_vlun( volume, host, iscsi_ips[iscsi_ip]['nsp'], lun_id=lun_id) # We want to use the same LUN ID for every port if lun_id is None: lun_id = vlun['lun'] iscsi_ip_port = "%s:%s" % ( iscsi_ip, iscsi_ips[iscsi_ip]['ip_port']) target_portals.append(iscsi_ip_port) target_iqns.append(port['iSCSIName']) target_luns.append(vlun['lun']) else: LOG.warning(_LW("iSCSI IP: '%s' was not found in " "hpe3par_iscsi_ips list defined in " "cinder.conf."), iscsi_ip) info = {'driver_volume_type': 'iscsi', 'data': {'target_portals': target_portals, 'target_iqns': target_iqns, 'target_luns': target_luns, 'target_discovered': True } } else: least_used_nsp = None # check if a VLUN already exists for this host existing_vlun = common.find_existing_vlun(volume, host) if existing_vlun: # We override the nsp here on purpose to force the # volume to be exported out the same IP as it already is. # This happens during nova live-migration, we want to # disable the picking of a different IP that we export # the volume to, or nova complains. least_used_nsp = common.build_nsp(existing_vlun['portPos']) if not least_used_nsp: least_used_nsp = self._get_least_used_nsp_for_host( common, host['name']) vlun = None if existing_vlun is None: # now that we have a host, create the VLUN vlun = common.create_vlun(volume, host, least_used_nsp) else: vlun = existing_vlun if least_used_nsp is None: LOG.warning(_LW("Least busy iSCSI port not found, " "using first iSCSI port in list.")) iscsi_ip = iscsi_ips.keys()[0] else: iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common) iscsi_ip_port = iscsi_ips[iscsi_ip]['ip_port'] iscsi_target_iqn = iscsi_ips[iscsi_ip]['iqn'] info = {'driver_volume_type': 'iscsi', 'data': {'target_portal': "%s:%s" % (iscsi_ip, iscsi_ip_port), 'target_iqn': iscsi_target_iqn, 'target_lun': vlun['lun'], 'target_discovered': True } } if common._client_conf['hpe3par_iscsi_chap_enabled']: info['data']['auth_method'] = 'CHAP' info['data']['auth_username'] = username info['data']['auth_password'] = password encryption_key_id = volume.get('encryption_key_id', None) info['data']['encrypted'] = encryption_key_id is not None return info finally: self._logout(common) def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" common = self._login() try: hostname = common._safe_hostname(connector['host']) common.terminate_connection( volume, hostname, iqn=connector['initiator']) self._clear_chap_3par(common, volume) finally: self._logout(common) def _clear_chap_3par(self, common, volume): """Clears CHAP credentials on a 3par volume. Ignore exceptions caused by the keys not being present on a volume. """ vol_name = common._get_3par_vol_name(volume['id']) try: common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY) except hpeexceptions.HTTPNotFound: pass except Exception: raise try: common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY) except hpeexceptions.HTTPNotFound: pass except Exception: raise def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain, persona_id): """Create a 3PAR host. Create a 3PAR host, if there is already a host on the 3par using the same iqn but with a different hostname, return the hostname used by 3PAR. """ # first search for an existing host host_found = None hosts = common.client.queryHost(iqns=[iscsi_iqn]) if hosts and hosts['members'] and 'name' in hosts['members'][0]: host_found = hosts['members'][0]['name'] if host_found is not None: return host_found else: if isinstance(iscsi_iqn, six.string_types): iqn = [iscsi_iqn] else: iqn = iscsi_iqn persona_id = int(persona_id) common.client.createHost(hostname, iscsiNames=iqn, optional={'domain': domain, 'persona': persona_id}) return hostname def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn): mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, 'iSCSINames': [iscsi_iqn]} common.client.modifyHost(hostname, mod_request) def _set_3par_chaps(self, common, hostname, volume, username, password): """Sets a 3PAR host's CHAP credentials.""" if not common._client_conf['hpe3par_iscsi_chap_enabled']: return mod_request = {'chapOperation': common.client.HOST_EDIT_ADD, 'chapOperationMode': common.client.CHAP_INITIATOR, 'chapName': username, 'chapSecret': password} common.client.modifyHost(hostname, mod_request) def _create_host(self, common, volume, connector): """Creates or modifies existing 3PAR host.""" # make sure we don't have the host already host = None username = None password = None hostname = common._safe_hostname(connector['host']) cpg = common.get_cpg(volume, allowSnap=True) domain = common.get_domain(cpg) # Get the CHAP secret if CHAP is enabled if common._client_conf['hpe3par_iscsi_chap_enabled']: vol_name = common._get_3par_vol_name(volume['id']) username = common.client.getVolumeMetaData( vol_name, CHAP_USER_KEY)['value'] password = common.client.getVolumeMetaData( vol_name, CHAP_PASS_KEY)['value'] try: host = common._get_3par_host(hostname) except hpeexceptions.HTTPNotFound: # get persona from the volume type extra specs persona_id = common.get_persona_type(volume) # host doesn't exist, we have to create it hostname = self._create_3par_iscsi_host(common, hostname, connector['initiator'], domain, persona_id) self._set_3par_chaps(common, hostname, volume, username, password) host = common._get_3par_host(hostname) else: if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1: self._modify_3par_iscsi_host( common, hostname, connector['initiator']) self._set_3par_chaps( common, hostname, volume, username, password) host = common._get_3par_host(hostname) elif (not host['initiatorChapEnabled'] and common._client_conf['hpe3par_iscsi_chap_enabled']): LOG.warning(_LW("Host exists without CHAP credentials set and " "has iSCSI attachments but CHAP is enabled. " "Updating host with new CHAP credentials.")) self._set_3par_chaps( common, hostname, volume, username, password) return host, username, password def _do_export(self, common, volume): """Gets the associated account, generates CHAP info and updates.""" model_update = {} if not common._client_conf['hpe3par_iscsi_chap_enabled']: model_update['provider_auth'] = None return model_update # CHAP username will be the hostname chap_username = volume['host'].split('@')[0] chap_password = None try: # Get all active VLUNs for the host vluns = common.client.getHostVLUNs(chap_username) # Host has active VLUNs... is CHAP enabled on host? host_info = common.client.getHost(chap_username) if not host_info['initiatorChapEnabled']: LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled.")) except hpeexceptions.HTTPNotFound: chap_password = volume_utils.generate_password(16) LOG.warning(_LW("No host or VLUNs exist. Generating new " "CHAP key.")) else: # Get a list of all iSCSI VLUNs and see if there is already a CHAP # key assigned to one of them. Use that CHAP key if present, # otherwise create a new one. Skip any VLUNs that are missing # CHAP credentials in metadata. chap_exists = False active_vluns = 0 for vlun in vluns: if not vlun['active']: continue active_vluns += 1 # iSCSI connections start with 'iqn'. if ('remoteName' in vlun and re.match('iqn.*', vlun['remoteName'])): try: chap_password = common.client.getVolumeMetaData( vlun['volumeName'], CHAP_PASS_KEY)['value'] chap_exists = True break except hpeexceptions.HTTPNotFound: LOG.debug("The VLUN %s is missing CHAP credentials " "but CHAP is enabled. Skipping.", vlun['remoteName']) else: LOG.warning(_LW("Non-iSCSI VLUN detected.")) if not chap_exists: chap_password = volume_utils.generate_password(16) LOG.warning(_LW("No VLUN contained CHAP credentials. " "Generating new CHAP key.")) # Add CHAP credentials to the volume metadata vol_name = common._get_3par_vol_name(volume['id']) common.client.setVolumeMetaData( vol_name, CHAP_USER_KEY, chap_username) common.client.setVolumeMetaData( vol_name, CHAP_PASS_KEY, chap_password) model_update['provider_auth'] = ('CHAP %s %s' % (chap_username, chap_password)) return model_update def create_export(self, context, volume, connector): common = self._login() try: return self._do_export(common, volume) finally: self._logout(common) def ensure_export(self, context, volume): """Ensure the volume still exists on the 3PAR. Also retrieves CHAP credentials, if present on the volume """ common = self._login() try: vol_name = common._get_3par_vol_name(volume['id']) common.client.getVolume(vol_name) except hpeexceptions.HTTPNotFound: LOG.error(_LE("Volume %s doesn't exist on array."), vol_name) else: metadata = common.client.getAllVolumeMetaData(vol_name) username = None password = None model_update = {} model_update['provider_auth'] = None for member in metadata['members']: if member['key'] == CHAP_USER_KEY: username = member['value'] elif member['key'] == CHAP_PASS_KEY: password = member['value'] if username and password: model_update['provider_auth'] = ('CHAP %s %s' % (username, password)) return model_update finally: self._logout(common) def remove_export(self, context, volume): pass def _get_least_used_nsp_for_host(self, common, hostname): """Get the least used NSP for the current host. Steps to determine which NSP to use. * If only one iSCSI NSP, return it * If there is already an active vlun to this host, return its NSP * Return NSP with fewest active vluns """ iscsi_nsps = self._get_iscsi_nsps(common) # If there's only one path, use it if len(iscsi_nsps) == 1: return iscsi_nsps[0] # Try to reuse an existing iscsi path to the host vluns = common.client.getVLUNs() for vlun in vluns['members']: if vlun['active']: if vlun['hostname'] == hostname: temp_nsp = common.build_nsp(vlun['portPos']) if temp_nsp in iscsi_nsps: # this host already has an iscsi path, so use it return temp_nsp # Calculate the least used iscsi nsp least_used_nsp = self._get_least_used_nsp(common, vluns['members'], self._get_iscsi_nsps(common)) return least_used_nsp def _get_iscsi_nsps(self, common): """Return the list of candidate nsps.""" nsps = [] iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] for value in iscsi_ips.values(): nsps.append(value['nsp']) return nsps def _get_ip_using_nsp(self, nsp, common): """Return IP associated with given nsp.""" iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] for (key, value) in iscsi_ips.items(): if value['nsp'] == nsp: return key def _get_least_used_nsp(self, common, vluns, nspss): """Return the nsp that has the fewest active vluns.""" # return only the nsp (node:server:port) # count the number of nsps nsp_counts = {} for nsp in nspss: # initialize counts to zero nsp_counts[nsp] = 0 current_least_used_nsp = None for vlun in vluns: if vlun['active']: nsp = common.build_nsp(vlun['portPos']) if nsp in nsp_counts: nsp_counts[nsp] = nsp_counts[nsp] + 1 # identify key (nsp) of least used nsp current_smallest_count = sys.maxsize for (nsp, count) in nsp_counts.items(): if count < current_smallest_count: current_least_used_nsp = nsp current_smallest_count = count return current_least_used_nsp def extend_volume(self, volume, new_size): common = self._login() try: common.extend_volume(volume, new_size) finally: self._logout(common) def create_consistencygroup(self, context, group): common = self._login() try: common.create_consistencygroup(context, group) finally: self._logout(common) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): common = self._login() try: return common.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) finally: self._logout(common) def delete_consistencygroup(self, context, group, volumes): common = self._login() try: return common.delete_consistencygroup(context, group, volumes) finally: self._logout(common) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() try: return common.update_consistencygroup(context, group, add_volumes, remove_volumes) finally: self._logout(common) def create_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: return common.create_cgsnapshot(context, cgsnapshot, snapshots) finally: self._logout(common) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): common = self._login() try: return common.delete_cgsnapshot(context, cgsnapshot, snapshots) finally: self._logout(common) def manage_existing(self, volume, existing_ref): common = self._login() try: return common.manage_existing(volume, existing_ref) finally: self._logout(common) def manage_existing_snapshot(self, snapshot, existing_ref): common = self._login() try: return common.manage_existing_snapshot(snapshot, existing_ref) finally: self._logout(common) def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: return common.manage_existing_get_size(volume, existing_ref) finally: self._logout(common) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): common = self._login() try: return common.manage_existing_snapshot_get_size(snapshot, existing_ref) finally: self._logout(common) def unmanage(self, volume): common = self._login() try: common.unmanage(volume) finally: self._logout(common) def unmanage_snapshot(self, snapshot): common = self._login() try: common.unmanage_snapshot(snapshot) finally: self._logout(common) def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): common = self._login() try: common.attach_volume(volume, instance_uuid) finally: self._logout(common) def detach_volume(self, context, volume, attachment=None): common = self._login() try: common.detach_volume(volume, attachment) finally: self._logout(common) def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() try: return common.retype(volume, new_type, diff, host) finally: self._logout(common) def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] if protocol != 'iSCSI': LOG.debug("3PAR ISCSI driver cannot migrate in-use volume " "to a host with storage_protocol=%s.", protocol) return False, None common = self._login() try: return common.migrate_volume(volume, host) finally: self._logout(common) def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" common = self._login() try: return common.update_migrated_volume(context, volume, new_volume, original_volume_status) finally: self._logout(common) def get_pool(self, volume): common = self._login() try: return common.get_cpg(volume) except hpeexceptions.HTTPNotFound: reason = (_("Volume %s doesn't exist on array.") % volume) LOG.error(reason) raise exception.InvalidVolume(reason) finally: self._logout(common) def failover_host(self, context, volumes, secondary_backend_id): """Force failover to a secondary replication target.""" common = self._login(timeout=30) try: # Update the active_backend_id in the driver and return it. active_backend_id, volume_updates = common.failover_host( context, volumes, secondary_backend_id) self._active_backend_id = active_backend_id return active_backend_id, volume_updates finally: self._logout(common) cinder-8.0.0/cinder/volume/drivers/hpe/hpe_3par_common.py0000664000567000056710000047476012701406250024573 0ustar jenkinsjenkins00000000000000# (c) Copyright 2012-2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver common utilities for HPE 3PAR Storage array The 3PAR drivers requires 3.1.3 firmware on the 3PAR array. You will need to install the python hpe3parclient module. sudo pip install python-3parclient The drivers uses both the REST service and the SSH command line to correctly operate. Since the ssh credentials and the REST credentials can be different we need to have settings for both. The drivers requires the use of the san_ip, san_login, san_password settings for ssh connections into the 3PAR array. It also requires the setting of hpe3par_api_url, hpe3par_username, hpe3par_password for credentials to talk to the REST service on the 3PAR array. """ import ast import json import math import pprint import re import six import uuid from oslo_serialization import base64 from oslo_utils import importutils hpe3parclient = importutils.try_import("hpe3parclient") if hpe3parclient: from hpe3parclient import client from hpe3parclient import exceptions as hpeexceptions from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder.volume import qos_specs from cinder.volume import utils as volume_utils from cinder.volume import volume_types import taskflow.engines from taskflow.patterns import linear_flow LOG = logging.getLogger(__name__) MIN_CLIENT_VERSION = '4.2.0' DEDUP_API_VERSION = 30201120 FLASH_CACHE_API_VERSION = 30201200 SRSTATLD_API_VERSION = 30201200 REMOTE_COPY_API_VERSION = 30202290 hpe3par_opts = [ cfg.StrOpt('hpe3par_api_url', default='', help="3PAR WSAPI Server Url like " "https://<3par ip>:8080/api/v1", deprecated_name='hp3par_api_url'), cfg.StrOpt('hpe3par_username', default='', help="3PAR username with the 'edit' role", deprecated_name='hp3par_username'), cfg.StrOpt('hpe3par_password', default='', help="3PAR password for the user specified in hpe3par_username", secret=True, deprecated_name='hp3par_password'), cfg.ListOpt('hpe3par_cpg', default=["OpenStack"], help="List of the CPG(s) to use for volume creation", deprecated_name='hp3par_cpg'), cfg.StrOpt('hpe3par_cpg_snap', default="", help="The CPG to use for Snapshots for volumes. " "If empty the userCPG will be used.", deprecated_name='hp3par_cpg_snap'), cfg.StrOpt('hpe3par_snapshot_retention', default="", help="The time in hours to retain a snapshot. " "You can't delete it before this expires.", deprecated_name='hp3par_snapshot_retention'), cfg.StrOpt('hpe3par_snapshot_expiration', default="", help="The time in hours when a snapshot expires " " and is deleted. This must be larger than expiration", deprecated_name='hp3par_snapshot_expiration'), cfg.BoolOpt('hpe3par_debug', default=False, help="Enable HTTP debugging to 3PAR", deprecated_name='hp3par_debug'), cfg.ListOpt('hpe3par_iscsi_ips', default=[], help="List of target iSCSI addresses to use.", deprecated_name='hp3par_iscsi_ips'), cfg.BoolOpt('hpe3par_iscsi_chap_enabled', default=False, help="Enable CHAP authentication for iSCSI connections.", deprecated_name='hp3par_iscsi_chap_enabled'), ] CONF = cfg.CONF CONF.register_opts(hpe3par_opts) # Input/output (total read/write) operations per second. THROUGHPUT = 'throughput' # Data processed (total read/write) per unit time: kilobytes per second. BANDWIDTH = 'bandwidth' # Response time (total read/write): microseconds. LATENCY = 'latency' # IO size (total read/write): kilobytes. IO_SIZE = 'io_size' # Queue length for processing IO requests QUEUE_LENGTH = 'queue_length' # Average busy percentage AVG_BUSY_PERC = 'avg_busy_perc' class HPE3PARCommon(object): """Class that contains common code for the 3PAR drivers. Version history: 1.2.0 - Updated hp3parclient API use to 2.0.x 1.2.1 - Check that the VVS exists 1.2.2 - log prior to raising exceptions 1.2.3 - Methods to update key/value pair bug #1258033 1.2.4 - Remove deprecated config option hp3par_domain 1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249 1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515 This update now requires 3.1.2 MU3 firmware 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.1 - Updated to use qos_specs, added new qos settings and personas 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Allow deleting missing snapshots bug #1283233 2.0.4 - Allow volumes created from snapshots to be larger bug #1279478 2.0.5 - Fix extend volume units bug #1284368 2.0.6 - use loopingcall.wait instead of time.sleep 2.0.7 - Allow extend volume based on snapshot bug #1285906 2.0.8 - Fix detach issue for multiple hosts bug #1288927 2.0.9 - Remove unused 3PAR driver method bug #1310807 2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542 2.0.11 - Remove hp3parclient requirement from unit tests #1315195 2.0.12 - Volume detach hangs when host is in a host set bug #1317134 2.0.13 - Added support for managing/unmanaging of volumes 2.0.14 - Modified manage volume to use standard 'source-name' element. 2.0.15 - Added support for volume retype 2.0.16 - Add a better log during delete_volume time. Bug #1349636 2.0.17 - Added iSCSI CHAP support This update now requires 3.1.3 MU1 firmware and hp3parclient 3.1.0 2.0.18 - HP 3PAR manage_existing with volume-type support 2.0.19 - Update default persona from Generic to Generic-ALUA 2.0.20 - Configurable SSH missing key policy and known hosts file 2.0.21 - Remove bogus invalid snapCPG=None exception 2.0.22 - HP 3PAR drivers should not claim to have 'infinite' space 2.0.23 - Increase the hostname size from 23 to 31 Bug #1371242 2.0.24 - Add pools (hp3par_cpg now accepts a list of CPGs) 2.0.25 - Migrate without losing type settings bug #1356608 2.0.26 - Don't ignore extra-specs snap_cpg when missing cpg #1368972 2.0.27 - Fixing manage source-id error bug #1357075 2.0.28 - Removing locks bug #1381190 2.0.29 - Report a limitless cpg's stats better bug #1398651 2.0.30 - Update the minimum hp3parclient version bug #1402115 2.0.31 - Removed usage of host name cache #1398914 2.0.32 - Update LOG usage to fix translations. bug #1384312 2.0.33 - Fix host persona to match WSAPI mapping bug #1403997 2.0.34 - Fix log messages to match guidelines. bug #1411370 2.0.35 - Fix default snapCPG for manage_existing bug #1393609 2.0.36 - Added support for dedup provisioning 2.0.37 - Added support for enabling Flash Cache 2.0.38 - Add stats for hp3par goodness_function and filter_function 2.0.39 - Added support for updated detach_volume attachment. 2.0.40 - Make the 3PAR drivers honor the pool in create bug #1432876 2.0.41 - Only log versions at startup. bug #1447697 2.0.42 - Fix type for snapshot config settings. bug #1461640 2.0.43 - Report the capability of supporting multiattach 2.0.44 - Update help strings to reduce the 3PAR user role requirements 2.0.45 - Python 3 fixes 2.0.46 - Improved VLUN creation and deletion logic. #1469816 2.0.47 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.48 - Adding changes to support 3PAR iSCSI multipath. 2.0.49 - Added client CPG stats to driver volume stats. bug #1482741 2.0.50 - Add over subscription support 2.0.51 - Adds consistency group support 2.0.52 - Added update_migrated_volume. bug #1492023 2.0.53 - Fix volume size conversion. bug #1513158 3.0.0 - Rebranded HP to HPE. 3.0.1 - Fixed find_existing_vluns bug #1515033 3.0.2 - Python 3 support 3.0.3 - Remove db access for consistency groups 3.0.4 - Adds v2 managed replication support 3.0.5 - Adds v2 unmanaged replication support 3.0.6 - Adding manage/unmanage snapshot support 3.0.7 - Enable standard capabilities based on 3PAR licenses 3.0.8 - Optimize array ID retrieval 3.0.9 - Bump minimum API version for volume replication 3.0.10 - Added additional volumes checks to the manage snapshot API 3.0.11 - Fix the image cache capability bug #1491088 3.0.12 - Remove client version checks for replication 3.0.13 - Support creating a cg from a source cg 3.0.14 - Comparison of WWNs now handles case difference. bug #1546453 3.0.15 - Update replication to version 2.1 3.0.16 - Use same LUN ID for each VLUN path #1551994 3.0.17 - Don't fail on clearing 3PAR object volume key. bug #1546392 3.0.18 - create_cloned_volume account for larger size. bug #1554740 """ VERSION = "3.0.18" stats = {} # TODO(Ramy): move these to the 3PAR Client VLUN_TYPE_EMPTY = 1 VLUN_TYPE_PORT = 2 VLUN_TYPE_HOST = 3 VLUN_TYPE_MATCHED_SET = 4 VLUN_TYPE_HOST_SET = 5 THIN = 2 DEDUP = 6 CONVERT_TO_THIN = 1 CONVERT_TO_FULL = 2 CONVERT_TO_DEDUP = 3 # v2 replication constants SYNC = 1 PERIODIC = 2 EXTRA_SPEC_REP_MODE = "replication:mode" EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period" RC_ACTION_CHANGE_TO_PRIMARY = 7 DEFAULT_REP_MODE = 'periodic' DEFAULT_SYNC_PERIOD = 900 RC_GROUP_STARTED = 3 SYNC_STATUS_COMPLETED = 3 FAILBACK_VALUE = 'default' # License values for reported capabilities PRIORITY_OPT_LIC = "Priority Optimization" THIN_PROV_LIC = "Thin Provisioning" REMOTE_COPY_LIC = "Remote Copy" # Valid values for volume type extra specs # The first value in the list is the default value valid_prov_values = ['thin', 'full', 'dedup'] valid_persona_values = ['2 - Generic-ALUA', '1 - Generic', '3 - Generic-legacy', '4 - HPUX-legacy', '5 - AIX-legacy', '6 - EGENERA', '7 - ONTAP-legacy', '8 - VMware', '9 - OpenVMS', '10 - HPUX', '11 - WindowsServer'] hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency', 'priority'] qos_priority_level = {'low': 1, 'normal': 2, 'high': 3} hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs', 'flash_cache'] def __init__(self, config, active_backend_id=None): self.config = config self.client = None self.uuid = uuid.uuid4() self._client_conf = {} self._replication_targets = [] self._replication_enabled = False self._active_backend_id = active_backend_id def get_version(self): return self.VERSION def check_flags(self, options, required_flags): for flag in required_flags: if not getattr(options, flag, None): msg = _('%s is not set') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def check_replication_flags(self, options, required_flags): for flag in required_flags: if not options.get(flag, None): msg = (_('%s is not set and is required for the replication ' 'device to be valid.') % flag) LOG.error(msg) raise exception.InvalidInput(reason=msg) def _create_client(self, timeout=None): hpe3par_api_url = self._client_conf['hpe3par_api_url'] cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout) client_version = hpe3parclient.version if client_version < MIN_CLIENT_VERSION: ex_msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': client_version, 'minimum': MIN_CLIENT_VERSION}) LOG.error(ex_msg) raise exception.InvalidInput(reason=ex_msg) return cl def client_login(self): try: LOG.debug("Connecting to 3PAR") self.client.login(self._client_conf['hpe3par_username'], self._client_conf['hpe3par_password']) except hpeexceptions.HTTPUnauthorized as ex: msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % {'url': self._client_conf['hpe3par_api_url'], 'err': ex}) LOG.error(msg) raise exception.InvalidInput(reason=msg) known_hosts_file = CONF.ssh_hosts_key_file policy = "AutoAddPolicy" if CONF.strict_ssh_host_key_policy: policy = "RejectPolicy" self.client.setSSHOptions( self._client_conf['san_ip'], self._client_conf['san_login'], self._client_conf['san_password'], port=self._client_conf['san_ssh_port'], conn_timeout=self._client_conf['ssh_conn_timeout'], privatekey=self._client_conf['san_private_key'], missing_key_policy=policy, known_hosts_file=known_hosts_file) def client_logout(self): LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid) self.client.logout() def _create_replication_client(self, remote_array): try: cl = client.HPE3ParClient(remote_array['hpe3par_api_url']) cl.login(remote_array['hpe3par_username'], remote_array['hpe3par_password']) except hpeexceptions.HTTPUnauthorized as ex: msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % {'url': remote_array['hpe3par_api_url'], 'err': ex}) LOG.error(msg) raise exception.InvalidInput(reason=msg) known_hosts_file = CONF.ssh_hosts_key_file policy = "AutoAddPolicy" if CONF.strict_ssh_host_key_policy: policy = "RejectPolicy" cl.setSSHOptions( remote_array['san_ip'], remote_array['san_login'], remote_array['san_password'], port=remote_array['san_ssh_port'], conn_timeout=remote_array['ssh_conn_timeout'], privatekey=remote_array['san_private_key'], missing_key_policy=policy, known_hosts_file=known_hosts_file) return cl def _destroy_replication_client(self, client): if client is not None: client.logout() def do_setup(self, context, timeout=None, stats=None): if hpe3parclient is None: msg = _('You must install hpe3parclient before using 3PAR' ' drivers. Run "pip install python-3parclient" to' ' install the hpe3parclient.') raise exception.VolumeBackendAPIException(data=msg) try: # This will set self._client_conf with the proper credentials # to communicate with the 3PAR array. It will contain either # the values for the primary array or secondary array in the # case of a fail-over. self._get_3par_config() self.client = self._create_client(timeout=timeout) wsapi_version = self.client.getWsApiVersion() self.API_VERSION = wsapi_version['build'] # If replication is properly configured, the primary array's # API version must meet the minimum requirements. if self._replication_enabled and ( self.API_VERSION < REMOTE_COPY_API_VERSION): self._replication_enabled = False msg = (_LE("The primary array must have an API version of " "%(min_ver)s or higher, but is only on " "%(current_ver)s, therefore replication is not " "supported.") % {'min_ver': REMOTE_COPY_API_VERSION, 'current_ver': self.API_VERSION}) LOG.error(msg) except hpeexceptions.UnsupportedVersion as ex: # In the event we cannot contact the configured primary array, # we want to allow a failover if replication is enabled. self._do_replication_setup() if self._replication_enabled: self.client = None raise exception.InvalidInput(ex) if context: # The context is None except at driver startup. LOG.info(_LI("HPE3PARCommon %(common_ver)s," "hpe3parclient %(rest_ver)s"), {"common_ver": self.VERSION, "rest_ver": hpe3parclient.get_version_string()}) if self.config.hpe3par_debug: self.client.debug_rest(True) if self.API_VERSION < SRSTATLD_API_VERSION: # Firmware version not compatible with srstatld LOG.warning(_LW("srstatld requires " "WSAPI version '%(srstatld_version)s' " "version '%(version)s' is installed.") % {'srstatld_version': SRSTATLD_API_VERSION, 'version': self.API_VERSION}) # Get the client ID for provider_location. We only need to retrieve # the ID directly from the array if the driver stats are not provided. if not stats: try: self.client_login() info = self.client.getStorageSystemInfo() self.client.id = six.text_type(info['id']) except Exception: self.client.id = 0 finally: self.client_logout() else: self.client.id = stats['array_id'] def check_for_setup_error(self): if self.client: self.client_login() try: cpg_names = self._client_conf['hpe3par_cpg'] for cpg_name in cpg_names: self.validate_cpg(cpg_name) finally: self.client_logout() def validate_cpg(self, cpg_name): try: self.client.getCPG(cpg_name) except hpeexceptions.HTTPNotFound: err = (_("CPG (%s) doesn't exist on array") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) def get_domain(self, cpg_name): try: cpg = self.client.getCPG(cpg_name) except hpeexceptions.HTTPNotFound: err = (_("Failed to get domain because CPG (%s) doesn't " "exist on array.") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) if 'domain' in cpg: return cpg['domain'] return None def extend_volume(self, volume, new_size): volume_name = self._get_3par_vol_name(volume['id']) old_size = volume['size'] growth_size = int(new_size) - old_size LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, " " by %(diff)s GB.", {'vol': volume_name, 'old': old_size, 'new': new_size, 'diff': growth_size}) growth_size_mib = growth_size * units.Ki self._extend_volume(volume, volume_name, growth_size_mib) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" pool = volume_utils.extract_host(group.host, level='pool') domain = self.get_domain(pool) cg_name = self._get_3par_vvs_name(group.id) extra = {'consistency_group_id': group.id} extra['description'] = group.description extra['display_name'] = group.name if group.cgsnapshot_id: extra['cgsnapshot_id'] = group.cgsnapshot_id self.client.createVolumeSet(cg_name, domain=domain, comment=six.text_type(extra)) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): self.create_consistencygroup(context, group) vvs_name = self._get_3par_vvs_name(group.id) if cgsnapshot and snapshots: cgsnap_name = self._get_3par_snap_name(cgsnapshot.id) snap_base = cgsnap_name elif source_cg and source_vols: cg_id = source_cg.id # Create a brand new uuid for the temp snap. snap_uuid = uuid.uuid4().hex # Create a temporary snapshot of the volume set in order to # perform an online copy. These temp snapshots will be deleted # when the source consistency group is deleted. temp_snap = self._get_3par_snap_name(snap_uuid, temp_snap=True) snap_shot_name = temp_snap + "-@count@" copy_of_name = self._get_3par_vvs_name(cg_id) optional = {'expirationHours': 1} self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, optional=optional) snap_base = temp_snap for i, volume in enumerate(volumes): snap_name = snap_base + "-" + six.text_type(i) volume_name = self._get_3par_vol_name(volume['id']) type_info = self.get_volume_settings_from_type(volume) cpg = type_info['cpg'] optional = {'online': True, 'snapCPG': cpg} self.client.copyVolume(snap_name, volume_name, cpg, optional) self.client.addVolumeToVolumeSet(vvs_name, volume_name) return None, None def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" try: cg_name = self._get_3par_vvs_name(group.id) self.client.deleteVolumeSet(cg_name) except hpeexceptions.HTTPNotFound: err = (_LW("Virtual Volume Set '%s' doesn't exist on array.") % cg_name) LOG.warning(err) except hpeexceptions.HTTPConflict as e: err = (_LE("Conflict detected in Virtual Volume Set" " %(volume_set)s: %(error)s")) LOG.error(err, {"volume_set": cg_name, "error": e}) volume_model_updates = [] for volume in volumes: volume_update = {'id': volume.id} try: self.delete_volume(volume) volume_update['status'] = 'deleted' except Exception as ex: LOG.error(_LE("There was an error deleting volume %(id)s: " "%(error)."), {'id': volume.id, 'error': six.text_type(ex)}) volume_update['status'] = 'error' volume_model_updates.append(volume_update) model_update = {'status': group.status} return model_update, volume_model_updates def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): volume_set_name = self._get_3par_vvs_name(group.id) for volume in add_volumes: volume_name = self._get_3par_vol_name(volume['id']) try: self.client.addVolumeToVolumeSet(volume_set_name, volume_name) except hpeexceptions.HTTPNotFound: msg = (_LE('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) for volume in remove_volumes: volume_name = self._get_3par_vol_name(volume['id']) try: self.client.removeVolumeFromVolumeSet( volume_set_name, volume_name) except hpeexceptions.HTTPNotFound: msg = (_LE('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) return None, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" cg_id = cgsnapshot.consistencygroup_id snap_shot_name = self._get_3par_snap_name(cgsnapshot.id) + ( "-@count@") copy_of_name = self._get_3par_vvs_name(cg_id) extra = {'cgsnapshot_id': cgsnapshot.id} extra['consistency_group_id'] = cg_id extra['description'] = cgsnapshot.description optional = {'comment': json.dumps(extra), 'readOnly': False} if self.config.hpe3par_snapshot_expiration: optional['expirationHours'] = ( int(self.config.hpe3par_snapshot_expiration)) if self.config.hpe3par_snapshot_retention: optional['retentionHours'] = ( int(self.config.hpe3par_snapshot_retention)) try: self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, optional=optional) except Exception as ex: msg = (_('There was an error creating the cgsnapshot: %s'), six.text_type(ex)) LOG.error(msg) raise exception.InvalidInput(reason=msg) snapshot_model_updates = [] for snapshot in snapshots: snapshot_update = {'id': snapshot['id'], 'status': 'available'} snapshot_model_updates.append(snapshot_update) model_update = {'status': 'available'} return model_update, snapshot_model_updates def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" cgsnap_name = self._get_3par_snap_name(cgsnapshot.id) snapshot_model_updates = [] for i, snapshot in enumerate(snapshots): snapshot_update = {'id': snapshot['id']} try: snap_name = cgsnap_name + "-" + six.text_type(i) self.client.deleteVolume(snap_name) snapshot_update['status'] = 'deleted' except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning(_LW("Delete Snapshot id not found. Removing from " "cinder: %(id)s Ex: %(msg)s"), {'id': snapshot['id'], 'msg': ex}) snapshot_update['status'] = 'error' except Exception as ex: LOG.error(_LE("There was an error deleting snapshot %(id)s: " "%(error)."), {'id': snapshot['id'], 'error': six.text_type(ex)}) snapshot_update['status'] = 'error' snapshot_model_updates.append(snapshot_update) model_update = {'status': cgsnapshot.status} return model_update, snapshot_model_updates def manage_existing(self, volume, existing_ref): """Manage an existing 3PAR volume. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Check for the existence of the virtual volume. old_comment_str = "" try: vol = self.client.getVolume(target_vol_name) if 'comment' in vol: old_comment_str = vol['comment'] except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) new_comment = {} # Use the display name from the existing volume if no new name # was chosen by the user. if volume['display_name']: display_name = volume['display_name'] new_comment['display_name'] = volume['display_name'] elif 'comment' in vol: display_name = self._get_3par_vol_comment_value(vol['comment'], 'display_name') if display_name: new_comment['display_name'] = display_name else: display_name = None # Generate the new volume information based on the new ID. new_vol_name = self._get_3par_vol_name(volume['id']) name = 'volume-' + volume['id'] new_comment['volume_id'] = volume['id'] new_comment['name'] = name new_comment['type'] = 'OpenStack' volume_type = None if volume['volume_type_id']: try: volume_type = self._get_volume_type(volume['volume_type_id']) except Exception: reason = (_("Volume type ID '%s' is invalid.") % volume['volume_type_id']) raise exception.ManageExistingVolumeTypeMismatch(reason=reason) new_vals = {'newName': new_vol_name, 'comment': json.dumps(new_comment)} # Ensure that snapCPG is set if 'snapCPG' not in vol: new_vals['snapCPG'] = vol['userCPG'] LOG.info(_LI("Virtual volume %(disp)s '%(new)s' snapCPG " "is empty so it will be set to: %(cpg)s"), {'disp': display_name, 'new': new_vol_name, 'cpg': new_vals['snapCPG']}) # Update the existing volume with the new name and comments. self.client.modifyVolume(target_vol_name, new_vals) LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."), {'ref': existing_ref['source-name'], 'new': new_vol_name}) retyped = False model_update = None if volume_type: LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " "being retyped."), {'disp': display_name, 'new': new_vol_name}) try: retyped, model_update = self._retype_from_no_type(volume, volume_type) LOG.info(_LI("Virtual volume %(disp)s successfully retyped to " "%(new_type)s."), {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: with excutils.save_and_reraise_exception(): LOG.warning(_LW("Failed to manage virtual volume %(disp)s " "due to error during retype."), {'disp': display_name}) # Try to undo the rename and clear the new comment. self.client.modifyVolume( new_vol_name, {'newName': target_vol_name, 'comment': old_comment_str}) updates = {'display_name': display_name} if retyped and model_update: updates.update(model_update) LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " "now being managed."), {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and # any model updates from retype. return updates def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing 3PAR snapshot. existing_ref is a dictionary of the form: {'source-name': } """ # Potential parent volume for the snapshot volume = snapshot['volume'] # Do not allow for managing of snapshots for 'failed-over' volumes. if volume.get('replication_status') == 'failed-over': err = (_("Managing of snapshots to failed-over volumes is " "not allowed.")) raise exception.InvalidInput(reason=err) target_snap_name = self._get_existing_volume_ref_name(existing_ref, is_snapshot=True) # Check for the existence of the snapshot. try: snap = self.client.getVolume(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) # Make sure the snapshot is being associated with the correct volume. parent_vol_name = self._get_3par_vol_name(volume['id']) if parent_vol_name != snap['copyOf']: err = (_("The provided snapshot '%s' is not a snapshot of " "the provided volume.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) new_comment = {} # Use the display name from the existing snapshot if no new name # was chosen by the user. if snapshot['display_name']: display_name = snapshot['display_name'] new_comment['display_name'] = snapshot['display_name'] elif 'comment' in snap: display_name = self._get_3par_vol_comment_value(snap['comment'], 'display_name') if display_name: new_comment['display_name'] = display_name else: display_name = None # Generate the new snapshot information based on the new ID. new_snap_name = self._get_3par_snap_name(snapshot['id']) new_comment['volume_id'] = volume['id'] new_comment['volume_name'] = 'volume-' + volume['id'] if snapshot.get('display_description', None): new_comment['description'] = snapshot['display_description'] else: new_comment['description'] = "" new_vals = {'newName': new_snap_name, 'comment': json.dumps(new_comment)} # Update the existing snapshot with the new name and comments. self.client.modifyVolume(target_snap_name, new_vals) LOG.info(_LI("Snapshot '%(ref)s' renamed to '%(new)s'."), {'ref': existing_ref['source-name'], 'new': new_snap_name}) updates = {'display_name': display_name} LOG.info(_LI("Snapshot %(disp)s '%(new)s' is now being managed."), {'disp': display_name, 'new': new_snap_name}) # Return display name to update the name displayed in the GUI. return updates def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Make sure the reference is not in use. if re.match('osv-*|oss-*|vvs-*', target_vol_name): reason = _("Reference must be for an unmanaged virtual volume.") raise exception.ManageExistingInvalidReference( existing_ref=target_vol_name, reason=reason) # Check for the existence of the virtual volume. try: vol = self.client.getVolume(target_vol_name) except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) return int(math.ceil(float(vol['sizeMiB']) / units.Ki)) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing_snapshot. existing_ref is a dictionary of the form: {'source-name': } """ target_snap_name = self._get_existing_volume_ref_name(existing_ref, is_snapshot=True) # Make sure the reference is not in use. if re.match('osv-*|oss-*|vvs-*|unm-*', target_snap_name): reason = _("Reference must be for an unmanaged snapshot.") raise exception.ManageExistingInvalidReference( existing_ref=target_snap_name, reason=reason) # Check for the existence of the snapshot. try: snap = self.client.getVolume(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) return int(math.ceil(float(snap['sizeMiB']) / units.Ki)) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" # Rename the volume's name to unm-* format so that it can be # easily found later. vol_name = self._get_3par_vol_name(volume['id']) new_vol_name = self._get_3par_unm_name(volume['id']) self.client.modifyVolume(vol_name, {'newName': new_vol_name}) LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. " "Volume renamed to '%(new)s'."), {'disp': volume['display_name'], 'vol': vol_name, 'new': new_vol_name}) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management.""" # Parent volume for the snapshot volume = snapshot['volume'] # Do not allow unmanaging of snapshots from 'failed-over' volumes. if volume.get('replication_status') == 'failed-over': err = (_("Unmanaging of snapshots from failed-over volumes is " "not allowed.")) LOG.error(err) # TODO(leeantho) Change this exception to Invalid when the volume # manager supports handling that. raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) # Rename the snapshots's name to ums-* format so that it can be # easily found later. snap_name = self._get_3par_snap_name(snapshot['id']) new_snap_name = self._get_3par_ums_name(snapshot['id']) self.client.modifyVolume(snap_name, {'newName': new_snap_name}) LOG.info(_LI("Snapshot %(disp)s '%(vol)s' is no longer managed. " "Snapshot renamed to '%(new)s'."), {'disp': snapshot['display_name'], 'vol': snap_name, 'new': new_snap_name}) def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False): """Returns the volume name of an existing reference. Checks if an existing volume reference has a source-name or source-id element. If source-name or source-id is not present an error will be thrown. """ vol_name = None if 'source-name' in existing_ref: vol_name = existing_ref['source-name'] elif 'source-id' in existing_ref: if is_snapshot: vol_name = self._get_3par_ums_name(existing_ref['source-id']) else: vol_name = self._get_3par_unm_name(existing_ref['source-id']) else: reason = _("Reference must contain source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return vol_name def _extend_volume(self, volume, volume_name, growth_size_mib, _convert_to_base=False): model_update = None rcg_name = self._get_3par_rcg_name(volume['id']) is_volume_replicated = self._volume_of_replicated_type(volume) try: if _convert_to_base: LOG.debug("Converting to base volume prior to growing.") model_update = self._convert_to_base_volume(volume) # If the volume is replicated and we are not failed over, # remote copy has to be stopped before the volume can be extended. failed_over = volume.get("replication_status", None) is_failed_over = failed_over == "failed-over" if is_volume_replicated and not is_failed_over: self.client.stopRemoteCopy(rcg_name) self.client.growVolume(volume_name, growth_size_mib) if is_volume_replicated and not is_failed_over: self.client.startRemoteCopy(rcg_name) except Exception as ex: # If the extend fails, we must restart remote copy. if is_volume_replicated: self.client.startRemoteCopy(rcg_name) with excutils.save_and_reraise_exception() as ex_ctxt: if (not _convert_to_base and isinstance(ex, hpeexceptions.HTTPForbidden) and ex.get_code() == 150): # Error code 150 means 'invalid operation: Cannot grow # this type of volume'. # Suppress raising this exception because we can # resolve it by converting it into a base volume. # Afterwards, extending the volume should succeed, or # fail with a different exception/error code. ex_ctxt.reraise = False model_update = self._extend_volume( volume, volume_name, growth_size_mib, _convert_to_base=True) else: LOG.error(_LE("Error extending volume: %(vol)s. " "Exception: %(ex)s"), {'vol': volume_name, 'ex': ex}) return model_update def _get_3par_vol_name(self, volume_id): """Get converted 3PAR volume name. Converts the openstack volume id from ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 to osv-7P.DD5jLTPWF7tcwnMF80g We convert the 128 bits of the uuid into a 24character long base64 encoded string to ensure we don't exceed the maximum allowed 31 character name limit on 3Par We strip the padding '=' and replace + with . and / with - """ volume_name = self._encode_name(volume_id) return "osv-%s" % volume_name def _get_3par_snap_name(self, snapshot_id, temp_snap=False): snapshot_name = self._encode_name(snapshot_id) if temp_snap: # is this a temporary snapshot # this is done during cloning prefix = "tss-%s" else: prefix = "oss-%s" return prefix % snapshot_name def _get_3par_ums_name(self, snapshot_id): ums_name = self._encode_name(snapshot_id) return "ums-%s" % ums_name def _get_3par_vvs_name(self, volume_id): vvs_name = self._encode_name(volume_id) return "vvs-%s" % vvs_name def _get_3par_unm_name(self, volume_id): unm_name = self._encode_name(volume_id) return "unm-%s" % unm_name # v2 replication conversion def _get_3par_rcg_name(self, volume_id): rcg_name = self._encode_name(volume_id) rcg = "rcg-%s" % rcg_name return rcg[:22] def _get_3par_remote_rcg_name(self, volume_id, provider_location): return self._get_3par_rcg_name(volume_id) + ".r" + ( six.text_type(provider_location)) def _encode_name(self, name): uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.encode_as_text(vol_uuid.bytes) # 3par doesn't allow +, nor / vol_encoded = vol_encoded.replace('+', '.') vol_encoded = vol_encoded.replace('/', '-') # strip off the == as 3par doesn't like those. vol_encoded = vol_encoded.replace('=', '') return vol_encoded def _capacity_from_size(self, vol_size): # because 3PAR volume sizes are in Mebibytes. if int(vol_size) == 0: capacity = units.Gi # default: 1GiB else: capacity = vol_size * units.Gi capacity = int(math.ceil(capacity / units.Mi)) return capacity def _delete_3par_host(self, hostname): self.client.deleteHost(hostname) def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None): try: location = None auto = True if lun_id: auto = False if nsp is None: location = self.client.createVLUN(volume, hostname=hostname, auto=auto, lun=lun_id) else: port = self.build_portPos(nsp) location = self.client.createVLUN(volume, hostname=hostname, auto=auto, portPos=port, lun=lun_id) vlun_info = None if location: # The LUN id is returned as part of the location URI vlun = location.split(',') vlun_info = {'volume_name': vlun[0], 'lun_id': int(vlun[1]), 'host_name': vlun[2], } if len(vlun) > 3: vlun_info['nsp'] = vlun[3] return vlun_info except hpeexceptions.HTTPBadRequest as e: if 'must be in the same domain' in e.get_description(): LOG.error(e.get_description()) raise exception.Invalid3PARDomain(err=e.get_description()) else: raise exception.VolumeBackendAPIException( data=e.get_description()) def _safe_hostname(self, hostname): """We have to use a safe hostname length for 3PAR host names.""" try: index = hostname.index('.') except ValueError: # couldn't find it index = len(hostname) # we'll just chop this off for now. if index > 31: index = 31 return hostname[:index] def _get_3par_host(self, hostname): return self.client.getHost(hostname) def get_ports(self): return self.client.getPorts() def get_active_target_ports(self): ports = self.get_ports() target_ports = [] for port in ports['members']: if ( port['mode'] == self.client.PORT_MODE_TARGET and port['linkState'] == self.client.PORT_STATE_READY ): port['nsp'] = self.build_nsp(port['portPos']) target_ports.append(port) return target_ports def get_active_fc_target_ports(self): ports = self.get_active_target_ports() fc_ports = [] for port in ports: if port['protocol'] == self.client.PORT_PROTO_FC: fc_ports.append(port) return fc_ports def get_active_iscsi_target_ports(self): ports = self.get_active_target_ports() iscsi_ports = [] for port in ports: if port['protocol'] == self.client.PORT_PROTO_ISCSI: iscsi_ports.append(port) return iscsi_ports def get_volume_stats(self, refresh, filter_function=None, goodness_function=None): if refresh: self._update_volume_stats( filter_function=filter_function, goodness_function=goodness_function) return self.stats def _update_volume_stats(self, filter_function=None, goodness_function=None): # const to convert MiB to GB const = 0.0009765625 # storage_protocol and volume_backend_name are # set in the child classes pools = [] info = self.client.getStorageSystemInfo() qos_support = True thin_support = True remotecopy_support = True if 'licenseInfo' in info: if 'licenses' in info['licenseInfo']: valid_licenses = info['licenseInfo']['licenses'] qos_support = self._check_license_enabled( valid_licenses, self.PRIORITY_OPT_LIC, "QoS_support") thin_support = self._check_license_enabled( valid_licenses, self.THIN_PROV_LIC, "Thin_provisioning_support") remotecopy_support = self._check_license_enabled( valid_licenses, self.REMOTE_COPY_LIC, "Replication") for cpg_name in self._client_conf['hpe3par_cpg']: try: cpg = self.client.getCPG(cpg_name) if (self.API_VERSION >= SRSTATLD_API_VERSION): interval = 'daily' history = '7d' stat_capabilities = self.client.getCPGStatData(cpg_name, interval, history) else: stat_capabilities = { THROUGHPUT: None, BANDWIDTH: None, LATENCY: None, IO_SIZE: None, QUEUE_LENGTH: None, AVG_BUSY_PERC: None } if 'numTDVVs' in cpg: total_volumes = int( cpg['numFPVVs'] + cpg['numTPVVs'] + cpg['numTDVVs'] ) else: total_volumes = int( cpg['numFPVVs'] + cpg['numTPVVs'] ) if 'limitMiB' not in cpg['SDGrowth']: # cpg usable free space cpg_avail_space = ( self.client.getCPGAvailableSpace(cpg_name)) free_capacity = int( cpg_avail_space['usableFreeMiB'] * const) # total_capacity is the best we can do for a limitless cpg total_capacity = int( (cpg['SDUsage']['usedMiB'] + cpg['UsrUsage']['usedMiB'] + cpg_avail_space['usableFreeMiB']) * const) else: total_capacity = int(cpg['SDGrowth']['limitMiB'] * const) free_capacity = int((cpg['SDGrowth']['limitMiB'] - (cpg['UsrUsage']['usedMiB'] + cpg['SDUsage']['usedMiB'])) * const) capacity_utilization = ( (float(total_capacity - free_capacity) / float(total_capacity)) * 100) provisioned_capacity = int((cpg['UsrUsage']['totalMiB'] + cpg['SAUsage']['totalMiB'] + cpg['SDUsage']['totalMiB']) * const) except hpeexceptions.HTTPNotFound: err = (_("CPG (%s) doesn't exist on array") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) pool = {'pool_name': cpg_name, 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'provisioned_capacity_gb': provisioned_capacity, 'QoS_support': qos_support, 'thin_provisioning_support': thin_support, 'thick_provisioning_support': True, 'max_over_subscription_ratio': ( self.config.safe_get('max_over_subscription_ratio')), 'reserved_percentage': ( self.config.safe_get('reserved_percentage')), 'location_info': ('HPE3PARDriver:%(sys_id)s:%(dest_cpg)s' % {'sys_id': info['serialNumber'], 'dest_cpg': cpg_name}), 'total_volumes': total_volumes, 'capacity_utilization': capacity_utilization, THROUGHPUT: stat_capabilities[THROUGHPUT], BANDWIDTH: stat_capabilities[BANDWIDTH], LATENCY: stat_capabilities[LATENCY], IO_SIZE: stat_capabilities[IO_SIZE], QUEUE_LENGTH: stat_capabilities[QUEUE_LENGTH], AVG_BUSY_PERC: stat_capabilities[AVG_BUSY_PERC], 'filter_function': filter_function, 'goodness_function': goodness_function, 'multiattach': True, 'consistencygroup_support': True, } if remotecopy_support: pool['replication_enabled'] = self._replication_enabled pool['replication_type'] = ['sync', 'periodic'] pool['replication_count'] = len(self._replication_targets) pools.append(pool) self.stats = {'driver_version': '3.0', 'storage_protocol': None, 'vendor_name': 'Hewlett Packard Enterprise', 'volume_backend_name': None, 'array_id': info['id'], 'replication_enabled': self._replication_enabled, 'replication_targets': self._get_replication_targets(), 'pools': pools} def _check_license_enabled(self, valid_licenses, license_to_check, capability): """Check a license against valid licenses on the array.""" if valid_licenses: for license in valid_licenses: if license_to_check in license.get('name'): return True LOG.debug(("'%(capability)s' requires a '%(license)s' " "license which is not installed.") % {'capability': capability, 'license': license_to_check}) return False def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None): """find a VLUN on a 3PAR host.""" vluns = self.client.getHostVLUNs(hostname) found_vlun = None for vlun in vluns: if volume_name in vlun['volumeName']: if lun_id is not None: if vlun['lun'] == lun_id: if nsp: port = self.build_portPos(nsp) if vlun['portPos'] == port: found_vlun = vlun break else: found_vlun = vlun break else: found_vlun = vlun break if found_vlun is None: LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"), {'name': volume_name, 'host': hostname}) return found_vlun def create_vlun(self, volume, host, nsp=None, lun_id=None): """Create a VLUN. In order to export a volume on a 3PAR box, we have to create a VLUN. """ volume_name = self._get_3par_vol_name(volume['id']) vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp, lun_id=lun_id) return self._get_vlun(volume_name, host['name'], vlun_info['lun_id'], nsp) def delete_vlun(self, volume, hostname): volume_name = self._get_3par_vol_name(volume['id']) vluns = self.client.getHostVLUNs(hostname) # Find all the VLUNs associated with the volume. The VLUNs will then # be split into groups based on the active status of the VLUN. If there # are active VLUNs detected a delete will be attempted on them. If # there are no active VLUNs but there are inactive VLUNs, then the # inactive VLUNs will be deleted. The inactive VLUNs are the templates # on the 3PAR backend. active_volume_vluns = [] inactive_volume_vluns = [] volume_vluns = [] for vlun in vluns: if volume_name in vlun['volumeName']: if vlun['active']: active_volume_vluns.append(vlun) else: inactive_volume_vluns.append(vlun) if active_volume_vluns: volume_vluns = active_volume_vluns elif inactive_volume_vluns: volume_vluns = inactive_volume_vluns if not volume_vluns: msg = ( _LW("3PAR vlun for volume %(name)s not found on " "host %(host)s"), {'name': volume_name, 'host': hostname}) LOG.warning(msg) return # VLUN Type of MATCHED_SET 4 requires the port to be provided removed_luns = [] for vlun in volume_vluns: if self.VLUN_TYPE_MATCHED_SET == vlun['type']: self.client.deleteVLUN(volume_name, vlun['lun'], hostname, vlun['portPos']) else: # This is HOST_SEES or a type that is not MATCHED_SET. # By deleting one VLUN, all the others should be deleted, too. if vlun['lun'] not in removed_luns: self.client.deleteVLUN(volume_name, vlun['lun'], hostname) removed_luns.append(vlun['lun']) # Determine if there are other volumes attached to the host. # This will determine whether we should try removing host from host set # and deleting the host. vluns = [] try: vluns = self.client.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: LOG.debug("All VLUNs removed from host %s", hostname) pass for vlun in vluns: if volume_name not in vlun['volumeName']: # Found another volume break else: # We deleted the last vlun, so try to delete the host too. # This check avoids the old unnecessary try/fail when vluns exist # but adds a minor race condition if a vlun is manually deleted # externally at precisely the wrong time. Worst case is leftover # host, so it is worth the unlikely risk. try: self._delete_3par_host(hostname) except Exception as ex: # Any exception down here is only logged. The vlun is deleted. # If the host is in a host set, the delete host will fail and # the host will remain in the host set. This is desired # because cinder was not responsible for the host set # assignment. The host set could be used outside of cinder # for future needs (e.g. export volume to host set). # The log info explains why the host was left alone. LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, " "but the host '%(host)s' was not deleted " "because: %(reason)s"), {'name': volume_name, 'host': hostname, 'reason': ex.get_description()}) def _get_volume_type(self, type_id): ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id) def _get_key_value(self, hpe3par_keys, key, default=None): if hpe3par_keys is not None and key in hpe3par_keys: return hpe3par_keys[key] else: return default def _get_qos_value(self, qos, key, default=None): if key in qos: return qos[key] else: return default def _get_qos_by_volume_type(self, volume_type): qos = {} qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(kmartin): We prefer the qos_specs association # and override any existing extra-specs settings # if present. if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(context.get_admin_context(), qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if 'qos:' in key: fields = key.split(':') key = fields[1] if key in self.hpe_qos_keys: qos[key] = value return qos def _get_keys_by_volume_type(self, volume_type): hpe3par_keys = {} specs = volume_type.get('extra_specs') for key, value in specs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.hpe3par_valid_keys: hpe3par_keys[key] = value return hpe3par_keys def _set_qos_rule(self, qos, vvs_name): min_io = self._get_qos_value(qos, 'minIOPS') max_io = self._get_qos_value(qos, 'maxIOPS') min_bw = self._get_qos_value(qos, 'minBWS') max_bw = self._get_qos_value(qos, 'maxBWS') latency = self._get_qos_value(qos, 'latency') priority = self._get_qos_value(qos, 'priority', 'normal') qosRule = {} if min_io: qosRule['ioMinGoal'] = int(min_io) if max_io is None: qosRule['ioMaxLimit'] = int(min_io) if max_io: qosRule['ioMaxLimit'] = int(max_io) if min_io is None: qosRule['ioMinGoal'] = int(max_io) if min_bw: qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki if max_bw is None: qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki if max_bw: qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki if min_bw is None: qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki if latency: qosRule['latencyGoal'] = int(latency) if priority: qosRule['priority'] = self.qos_priority_level.get(priority.lower()) try: self.client.createQoSRules(vvs_name, qosRule) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating QOS rule %s"), qosRule) def get_flash_cache_policy(self, hpe3par_keys): if hpe3par_keys is not None: # First check list of extra spec keys val = self._get_key_value(hpe3par_keys, 'flash_cache', None) if val is not None: # If requested, see if supported on back end if self.API_VERSION < FLASH_CACHE_API_VERSION: err = (_("Flash Cache Policy requires " "WSAPI version '%(fcache_version)s' " "version '%(version)s' is installed.") % {'fcache_version': FLASH_CACHE_API_VERSION, 'version': self.API_VERSION}) LOG.error(err) raise exception.InvalidInput(reason=err) else: if val.lower() == 'true': return self.client.FLASH_CACHE_ENABLED else: return self.client.FLASH_CACHE_DISABLED return None def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name): # Update virtual volume set if flash_cache: try: self.client.modifyVolumeSet(vvs_name, flashCachePolicy=flash_cache) LOG.info(_LI("Flash Cache policy set to %s"), flash_cache) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error setting Flash Cache policy " "to %s - exception"), flash_cache) def _add_volume_to_volume_set(self, volume, volume_name, cpg, vvs_name, qos, flash_cache): if vvs_name is not None: # Admin has set a volume set name to add the volume to try: self.client.addVolumeToVolumeSet(vvs_name, volume_name) except hpeexceptions.HTTPNotFound: msg = _('VV Set %s does not exist.') % vvs_name LOG.error(msg) raise exception.InvalidInput(reason=msg) else: vvs_name = self._get_3par_vvs_name(volume['id']) domain = self.get_domain(cpg) self.client.createVolumeSet(vvs_name, domain) try: self._set_qos_rule(qos, vvs_name) self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name) self.client.addVolumeToVolumeSet(vvs_name, volume_name) except Exception as ex: # Cleanup the volume set if unable to create the qos rule # or flash cache policy or add the volume to the volume set self.client.deleteVolumeSet(vvs_name) raise exception.CinderException(ex) def get_cpg(self, volume, allowSnap=False): volume_name = self._get_3par_vol_name(volume['id']) vol = self.client.getVolume(volume_name) if 'userCPG' in vol: return vol['userCPG'] elif allowSnap: return vol['snapCPG'] return None def _get_3par_vol_comment(self, volume_name): vol = self.client.getVolume(volume_name) if 'comment' in vol: return vol['comment'] return None def validate_persona(self, persona_value): """Validate persona value. If the passed in persona_value is not valid, raise InvalidInput, otherwise return the persona ID. :param persona_value: :raises: exception.InvalidInput :returns: persona ID """ if persona_value not in self.valid_persona_values: err = (_("Must specify a valid persona %(valid)s," "value '%(persona)s' is invalid.") % {'valid': self.valid_persona_values, 'persona': persona_value}) LOG.error(err) raise exception.InvalidInput(reason=err) # persona is set by the id so remove the text and return the id # i.e for persona '1 - Generic' returns 1 persona_id = persona_value.split(' ') return persona_id[0] def get_persona_type(self, volume, hpe3par_keys=None): default_persona = self.valid_persona_values[0] type_id = volume.get('volume_type_id', None) if type_id is not None: volume_type = self._get_volume_type(type_id) if hpe3par_keys is None: hpe3par_keys = self._get_keys_by_volume_type(volume_type) persona_value = self._get_key_value(hpe3par_keys, 'persona', default_persona) return self.validate_persona(persona_value) def get_type_info(self, type_id): """Get 3PAR type info for the given type_id. Reconciles VV Set, old-style extra-specs, and QOS specs and returns commonly used info about the type. :returns: hpe3par_keys, qos, volume_type, vvs_name """ volume_type = None vvs_name = None hpe3par_keys = {} qos = {} if type_id is not None: volume_type = self._get_volume_type(type_id) hpe3par_keys = self._get_keys_by_volume_type(volume_type) vvs_name = self._get_key_value(hpe3par_keys, 'vvs') if vvs_name is None: qos = self._get_qos_by_volume_type(volume_type) return hpe3par_keys, qos, volume_type, vvs_name def get_volume_settings_from_type_id(self, type_id, pool): """Get 3PAR volume settings given a type_id. Combines type info and config settings to return a dictionary describing the 3PAR volume settings. Does some validation (CPG). Uses pool as the default cpg (when not specified in volume type specs). :param type_id: id of type to get settings for :param pool: CPG to use if type does not have one set :returns: dict """ hpe3par_keys, qos, volume_type, vvs_name = self.get_type_info(type_id) # Default to pool extracted from host. # If that doesn't work use the 1st CPG in the config as the default. default_cpg = pool or self._client_conf['hpe3par_cpg'][0] cpg = self._get_key_value(hpe3par_keys, 'cpg', default_cpg) if cpg is not default_cpg: # The cpg was specified in a volume type extra spec so it # needs to be validated that it's in the correct domain. # log warning here msg = _LW("'hpe3par:cpg' is not supported as an extra spec " "in a volume type. CPG's are chosen by " "the cinder scheduler, as a pool, from the " "cinder.conf entry 'hpe3par_cpg', which can " "be a list of CPGs.") versionutils.report_deprecated_feature(LOG, msg) LOG.info(_LI("Using pool %(pool)s instead of %(cpg)s"), {'pool': pool, 'cpg': cpg}) cpg = pool self.validate_cpg(cpg) # Look to see if the snap_cpg was specified in volume type # extra spec, if not use hpe3par_cpg_snap from config as the # default. snap_cpg = self.config.hpe3par_cpg_snap snap_cpg = self._get_key_value(hpe3par_keys, 'snap_cpg', snap_cpg) # If it's still not set or empty then set it to the cpg. if not snap_cpg: snap_cpg = cpg # if provisioning is not set use thin default_prov = self.valid_prov_values[0] prov_value = self._get_key_value(hpe3par_keys, 'provisioning', default_prov) # check for valid provisioning type if prov_value not in self.valid_prov_values: err = (_("Must specify a valid provisioning type %(valid)s, " "value '%(prov)s' is invalid.") % {'valid': self.valid_prov_values, 'prov': prov_value}) LOG.error(err) raise exception.InvalidInput(reason=err) tpvv = True tdvv = False if prov_value == "full": tpvv = False elif prov_value == "dedup": tpvv = False tdvv = True if tdvv and (self.API_VERSION < DEDUP_API_VERSION): err = (_("Dedup is a valid provisioning type, " "but requires WSAPI version '%(dedup_version)s' " "version '%(version)s' is installed.") % {'dedup_version': DEDUP_API_VERSION, 'version': self.API_VERSION}) LOG.error(err) raise exception.InvalidInput(reason=err) return {'hpe3par_keys': hpe3par_keys, 'cpg': cpg, 'snap_cpg': snap_cpg, 'vvs_name': vvs_name, 'qos': qos, 'tpvv': tpvv, 'tdvv': tdvv, 'volume_type': volume_type} def get_volume_settings_from_type(self, volume, host=None): """Get 3PAR volume settings given a volume. Combines type info and config settings to return a dictionary describing the 3PAR volume settings. Does some validation (CPG and persona). :param volume: :param host: Optional host to use for default pool. :returns: dict """ type_id = volume.get('volume_type_id', None) pool = None if host: pool = volume_utils.extract_host(host['host'], 'pool') else: pool = volume_utils.extract_host(volume['host'], 'pool') volume_settings = self.get_volume_settings_from_type_id(type_id, pool) # check for valid persona even if we don't use it until # attach time, this will give the end user notice that the # persona type is invalid at volume creation time self.get_persona_type(volume, volume_settings['hpe3par_keys']) return volume_settings def create_volume(self, volume): LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on ' '%(host)s)', {'disp_name': volume['display_name'], 'vol_name': volume['name'], 'id': self._get_3par_vol_name(volume['id']), 'host': volume['host']}) try: comments = {'volume_id': volume['id'], 'name': volume['name'], 'type': 'OpenStack'} name = volume.get('display_name', None) if name: comments['display_name'] = name # get the options supported by volume types type_info = self.get_volume_settings_from_type(volume) volume_type = type_info['volume_type'] vvs_name = type_info['vvs_name'] qos = type_info['qos'] cpg = type_info['cpg'] snap_cpg = type_info['snap_cpg'] tpvv = type_info['tpvv'] tdvv = type_info['tdvv'] flash_cache = self.get_flash_cache_policy( type_info['hpe3par_keys']) cg_id = volume.get('consistencygroup_id', None) if cg_id: vvs_name = self._get_3par_vvs_name(cg_id) type_id = volume.get('volume_type_id', None) if type_id is not None: comments['volume_type_name'] = volume_type.get('name') comments['volume_type_id'] = type_id if vvs_name is not None: comments['vvs'] = vvs_name else: comments['qos'] = qos extras = {'comment': json.dumps(comments), 'snapCPG': snap_cpg, 'tpvv': tpvv} # Only set the dedup option if the backend supports it. if self.API_VERSION >= DEDUP_API_VERSION: extras['tdvv'] = tdvv capacity = self._capacity_from_size(volume['size']) volume_name = self._get_3par_vol_name(volume['id']) self.client.createVolume(volume_name, cpg, capacity, extras) if qos or vvs_name or flash_cache is not None: try: self._add_volume_to_volume_set(volume, volume_name, cpg, vvs_name, qos, flash_cache) except exception.InvalidInput as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) # v2 replication check replication_flag = False if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume)): replication_flag = True except hpeexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array") % volume_name LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.Invalid(ex.get_description()) except exception.InvalidInput as ex: LOG.error(_LE("Exception: %s"), ex) raise except exception.CinderException as ex: LOG.error(_LE("Exception: %s"), ex) raise except Exception as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg, replication=replication_flag, provider_location=self.client.id) def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None, tpvv=True, tdvv=False): # Virtual volume sets are not supported with the -online option LOG.debug('Creating clone of a volume %(src)s to %(dest)s.', {'src': src_name, 'dest': dest_name}) optional = {'tpvv': tpvv, 'online': True} if snap_cpg is not None: optional['snapCPG'] = snap_cpg if self.API_VERSION >= DEDUP_API_VERSION: optional['tdvv'] = tdvv body = self.client.copyVolume(src_name, dest_name, cpg, optional) return body['taskid'] def get_next_word(self, s, search_string): """Return the next word. Search 's' for 'search_string', if found return the word preceding 'search_string' from 's'. """ word = re.search(search_string.strip(' ') + ' ([^ ]*)', s) return word.groups()[0].strip(' ') def _get_3par_vol_comment_value(self, vol_comment, key): comment_dict = dict(ast.literal_eval(vol_comment)) if key in comment_dict: return comment_dict[key] return None def _get_model_update(self, volume_host, cpg, replication=False, provider_location=None): """Get model_update dict to use when we select a pool. The pools implementation uses a volume['host'] suffix of :poolname. When the volume comes in with this selected pool, we sometimes use a different pool (e.g. because the type says to use a different pool). So in the several places that we do this, we need to return a model update so that the volume will have the actual pool name in the host suffix after the operation. Given a volume_host, which should (might) have the pool suffix, and given the CPG we actually chose to use, return a dict to use for a model update iff an update is needed. :param volume_host: The volume's host string. :param cpg: The actual pool (cpg) used, for example from the type. :returns: dict Model update if we need to update volume host, else None """ model_update = {} host = volume_utils.extract_host(volume_host, 'backend') host_and_pool = volume_utils.append_host(host, cpg) if volume_host != host_and_pool: # Since we selected a pool based on type, update the model. model_update['host'] = host_and_pool if replication: model_update['replication_status'] = 'enabled' if replication and provider_location: model_update['provider_location'] = provider_location if not model_update: model_update = None return model_update def _create_temp_snapshot(self, volume): """This creates a temporary snapshot of a volume. This is used by cloning a volume so that we can then issue extend volume against the original volume. """ vol_name = self._get_3par_vol_name(volume['id']) # create a brand new uuid for the temp snap snap_uuid = uuid.uuid4().hex # this will be named tss-%s snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True) extra = {'volume_name': volume['name'], 'volume_id': volume['id']} optional = {'comment': json.dumps(extra)} # let the snapshot die in an hour optional['expirationHours'] = 1 LOG.info(_LI("Creating temp snapshot %(snap)s from volume %(vol)s"), {'snap': snap_name, 'vol': vol_name}) self.client.createSnapshot(snap_name, vol_name, optional) return self.client.getVolume(snap_name) def create_cloned_volume(self, volume, src_vref): try: vol_name = self._get_3par_vol_name(volume['id']) src_vol_name = self._get_3par_vol_name(src_vref['id']) # if the sizes of the 2 volumes are the same # we can do an online copy, which is a background process # on the 3PAR that makes the volume instantly available. # We can't resize a volume, while it's being copied. if volume['size'] == src_vref['size']: LOG.debug("Creating a clone of same size, using online copy.") # create a temporary snapshot snapshot = self._create_temp_snapshot(src_vref) type_info = self.get_volume_settings_from_type(volume) cpg = type_info['cpg'] # make the 3PAR copy the contents. # can't delete the original until the copy is done. self._copy_volume(snapshot['name'], vol_name, cpg=cpg, snap_cpg=type_info['snap_cpg'], tpvv=type_info['tpvv'], tdvv=type_info['tdvv']) # v2 replication check replication_flag = False if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume)): replication_flag = True return self._get_model_update(volume['host'], cpg, replication=replication_flag, provider_location=self.client.id) else: # The size of the new volume is different, so we have to # copy the volume and wait. Do the resize after the copy # is complete. LOG.debug("Clone a volume with a different target size. " "Using non-online copy.") # we first have to create the destination volume model_update = self.create_volume(volume) optional = {'priority': 1} body = self.client.copyVolume(src_vol_name, vol_name, None, optional=optional) task_id = body['taskid'] task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': volume['id']} msg = _('Copy volume task failed: create_cloned_volume ' 'id=%(id)s, status=%(status)s.') % dbg raise exception.CinderException(msg) else: LOG.debug('Copy volume completed: create_cloned_volume: ' 'id=%s.', volume['id']) return model_update except hpeexceptions.HTTPForbidden: raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound: raise exception.NotFound() except Exception as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) def delete_volume(self, volume): # v2 replication check # If the volume type is replication enabled, we want to call our own # method of deconstructing the volume and its dependencies if self._volume_of_replicated_type(volume): replication_status = volume.get('replication_status', None) if replication_status and replication_status == "failed-over": self._delete_replicated_failed_over_volume(volume) else: self._do_volume_replication_destroy(volume) return try: volume_name = self._get_3par_vol_name(volume['id']) # Try and delete the volume, it might fail here because # the volume is part of a volume set which will have the # volume set name in the error. try: self.client.deleteVolume(volume_name) except hpeexceptions.HTTPBadRequest as ex: if ex.get_code() == 29: if self.client.isOnlinePhysicalCopy(volume_name): LOG.debug("Found an online copy for %(volume)s", {'volume': volume_name}) # the volume is in process of being cloned. # stopOnlinePhysicalCopy will also delete # the volume once it stops the copy. self.client.stopOnlinePhysicalCopy(volume_name) else: LOG.error(_LE("Exception: %s"), ex) raise else: LOG.error(_LE("Exception: %s"), ex) raise except hpeexceptions.HTTPConflict as ex: if ex.get_code() == 34: # This is a special case which means the # volume is part of a volume set. vvset_name = self.client.findVolumeSet(volume_name) LOG.debug("Returned vvset_name = %s", vvset_name) if vvset_name is not None and \ vvset_name.startswith('vvs-'): # We have a single volume per volume set, so # remove the volume set. self.client.deleteVolumeSet( self._get_3par_vvs_name(volume['id'])) elif vvset_name is not None: # We have a pre-defined volume set just remove the # volume and leave the volume set. self.client.removeVolumeFromVolumeSet(vvset_name, volume_name) self.client.deleteVolume(volume_name) elif (ex.get_code() == 151): # the volume is being operated on in a background # task on the 3PAR. # TODO(walter-boring) do a retry a few times. # for now lets log a better message msg = _("The volume is currently busy on the 3PAR" " and cannot be deleted at this time. " "You can try again later.") LOG.error(msg) raise exception.VolumeIsBusy(message=msg) elif (ex.get_code() == 32): # Error 32 means that the volume has children # see if we have any temp snapshots snaps = self.client.getVolumeSnapshots(volume_name) for snap in snaps: if snap.startswith('tss-'): # looks like we found a temp snapshot. LOG.info( _LI("Found a temporary snapshot %(name)s"), {'name': snap}) try: self.client.deleteVolume(snap) except hpeexceptions.HTTPNotFound: # if the volume is gone, it's as good as a # successful delete pass except Exception: msg = _("Volume has a temporary snapshot that " "can't be deleted at this time.") raise exception.VolumeIsBusy(message=msg) try: self.delete_volume(volume) except Exception: msg = _("Volume has children and cannot be deleted!") raise exception.VolumeIsBusy(message=msg) else: LOG.error(_LE("Exception: %s"), ex) raise exception.VolumeIsBusy(message=ex.get_description()) except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning(_LW("Delete volume id not found. Removing from " "cinder: %(id)s Ex: %(msg)s"), {'id': volume['id'], 'msg': ex}) except hpeexceptions.HTTPForbidden as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotAuthorized(ex.get_description()) except hpeexceptions.HTTPConflict as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.VolumeIsBusy(message=ex.get_description()) except Exception as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) def create_volume_from_snapshot(self, volume, snapshot, snap_name=None, vvs_name=None): """Creates a volume from a snapshot.""" LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s", {'vol_name': pprint.pformat(volume['display_name']), 'ss_name': pprint.pformat(snapshot['display_name'])}) model_update = {} if volume['size'] < snapshot['volume_size']: err = ("You cannot reduce size of the volume. It must " "be greater than or equal to the snapshot.") LOG.error(err) raise exception.InvalidInput(reason=err) try: if not snap_name: snap_name = self._get_3par_snap_name(snapshot['id']) volume_name = self._get_3par_vol_name(volume['id']) extra = {'volume_id': volume['id'], 'snapshot_id': snapshot['id']} type_id = volume.get('volume_type_id', None) hpe3par_keys, qos, _volume_type, vvs = self.get_type_info( type_id) if vvs: vvs_name = vvs name = volume.get('display_name', None) if name: extra['display_name'] = name description = volume.get('display_description', None) if description: extra['description'] = description optional = {'comment': json.dumps(extra), 'readOnly': False} self.client.createSnapshot(volume_name, snap_name, optional) # Grow the snapshot if needed growth_size = volume['size'] - snapshot['volume_size'] if growth_size > 0: try: LOG.debug('Converting to base volume type: %s.', volume['id']) model_update = self._convert_to_base_volume(volume) growth_size_mib = growth_size * units.Gi / units.Mi LOG.debug('Growing volume: %(id)s by %(size)s GiB.', {'id': volume['id'], 'size': growth_size}) self.client.growVolume(volume_name, growth_size_mib) except Exception as ex: LOG.error(_LE("Error extending volume %(id)s. " "Ex: %(ex)s"), {'id': volume['id'], 'ex': ex}) # Delete the volume if unable to grow it self.client.deleteVolume(volume_name) raise exception.CinderException(ex) # Check for flash cache setting in extra specs flash_cache = self.get_flash_cache_policy(hpe3par_keys) if qos or vvs_name or flash_cache is not None: cpg_names = self._get_key_value( hpe3par_keys, 'cpg', self._client_conf['hpe3par_cpg']) try: self._add_volume_to_volume_set(volume, volume_name, cpg_names[0], vvs_name, qos, flash_cache) except Exception as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) # v2 replication check if self._volume_of_replicated_type(volume) and ( self._do_volume_replication_setup(volume)): model_update['replication_status'] = 'enabled' model_update['provider_location'] = self.client.id except hpeexceptions.HTTPForbidden as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotFound() except Exception as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) return model_update def create_snapshot(self, snapshot): LOG.debug("Create Snapshot\n%s", pprint.pformat(snapshot)) try: snap_name = self._get_3par_snap_name(snapshot['id']) vol_name = self._get_3par_vol_name(snapshot['volume_id']) extra = {'volume_name': snapshot['volume_name']} vol_id = snapshot.get('volume_id', None) if vol_id: extra['volume_id'] = vol_id try: extra['display_name'] = snapshot['display_name'] except AttributeError: pass try: extra['description'] = snapshot['display_description'] except AttributeError: pass optional = {'comment': json.dumps(extra), 'readOnly': True} if self.config.hpe3par_snapshot_expiration: optional['expirationHours'] = ( int(self.config.hpe3par_snapshot_expiration)) if self.config.hpe3par_snapshot_retention: optional['retentionHours'] = ( int(self.config.hpe3par_snapshot_retention)) self.client.createSnapshot(snap_name, vol_name, optional) except hpeexceptions.HTTPForbidden as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotFound() def update_volume_key_value_pair(self, volume, key, value): """Updates key,value pair as metadata onto virtual volume. If key already exists, the value will be replaced. """ LOG.debug("VOLUME (%(disp_name)s : %(vol_name)s %(id)s) " "Updating KEY-VALUE pair: (%(key)s : %(val)s)", {'disp_name': volume['display_name'], 'vol_name': volume['name'], 'id': self._get_3par_vol_name(volume['id']), 'key': key, 'val': value}) try: volume_name = self._get_3par_vol_name(volume['id']) if value is None: value = '' self.client.setVolumeMetaData(volume_name, key, value) except Exception as ex: msg = _('Failure in update_volume_key_value_pair:%s') % ex LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def clear_volume_key_value_pair(self, volume, key): """Clears key,value pairs metadata from virtual volume.""" LOG.debug("VOLUME (%(disp_name)s : %(vol_name)s %(id)s) " "Clearing Key : %(key)s)", {'disp_name': volume['display_name'], 'vol_name': volume['name'], 'id': self._get_3par_vol_name(volume['id']), 'key': key}) try: volume_name = self._get_3par_vol_name(volume['id']) self.client.removeVolumeMetaData(volume_name, key) except Exception as ex: LOG.warning(_LW('Issue occurred in clear_volume_key_value_pair: ' '%s'), six.text_type(ex)) def attach_volume(self, volume, instance_uuid): """Save the instance UUID in the volume. TODO: add support for multi-attach """ LOG.debug("Attach Volume\n%s", pprint.pformat(volume)) try: self.update_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid', instance_uuid) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error attaching volume %s"), volume) def detach_volume(self, volume, attachment=None): """Remove the instance uuid from the volume. TODO: add support for multi-attach. """ LOG.debug("Detach Volume\n%s", pprint.pformat(volume)) try: self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid') except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error detaching volume %s"), volume) def migrate_volume(self, volume, host): """Migrate directly if source and dest are managed by same storage. :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. :returns: (False, None) if the driver does not support migration, (True, model_update) if successful """ dbg = {'id': volume['id'], 'host': host['host'], 'status': volume['status']} LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, ' 'status=%(status)s.', dbg) ret = False, None if volume['status'] in ['available', 'in-use']: volume_type = None if volume['volume_type_id']: volume_type = self._get_volume_type(volume['volume_type_id']) try: ret = self.retype(volume, volume_type, None, host) except Exception as e: LOG.info(_LI('3PAR driver cannot perform migration. ' 'Retype exception: %s'), e) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, ' 'status=%(status)s.', dbg) dbg_ret = {'supported': ret[0], 'model_update': ret[1]} LOG.debug('migrate_volume result: %(supported)s, %(model_update)s', dbg_ret) return ret def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Rename the new (temp) volume to it's original name. This method tries to rename the new volume to it's original name after the migration has completed. """ LOG.debug("Update volume name for %(id)s", {'id': new_volume['id']}) name_id = None provider_location = None if original_volume_status == 'available': # volume isn't attached and can be updated original_name = self._get_3par_vol_name(volume['id']) current_name = self._get_3par_vol_name(new_volume['id']) try: volumeMods = {'newName': original_name} self.client.modifyVolume(current_name, volumeMods) LOG.info(_LI("Volume name changed from %(tmp)s to %(orig)s"), {'tmp': current_name, 'orig': original_name}) except Exception as e: LOG.error(_LE("Changing the volume name from %(tmp)s to " "%(orig)s failed because %(reason)s"), {'tmp': current_name, 'orig': original_name, 'reason': e}) name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] else: # the backend can't change the name. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def _wait_for_task_completion(self, task_id): """This waits for a 3PAR background task complete or fail. This looks for a task to get out of the 'active' state. """ # Wait for the physical copy task to complete def _wait_for_task(task_id): status = self.client.getTask(task_id) LOG.debug("3PAR Task id %(id)s status = %(status)s", {'id': task_id, 'status': status['status']}) if status['status'] is not self.client.TASK_ACTIVE: self._task_status = status raise loopingcall.LoopingCallDone() self._task_status = None timer = loopingcall.FixedIntervalLoopingCall( _wait_for_task, task_id) timer.start(interval=1).wait() return self._task_status def _convert_to_base_volume(self, volume, new_cpg=None): try: type_info = self.get_volume_settings_from_type(volume) if new_cpg: cpg = new_cpg else: cpg = type_info['cpg'] # Change the name such that it is unique since 3PAR # names must be unique across all CPGs volume_name = self._get_3par_vol_name(volume['id']) temp_vol_name = volume_name.replace("osv-", "omv-") # Create a physical copy of the volume task_id = self._copy_volume(volume_name, temp_vol_name, cpg, cpg, type_info['tpvv'], type_info['tdvv']) LOG.debug('Copy volume scheduled: convert_to_base_volume: ' 'id=%s.', volume['id']) task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': volume['id']} msg = _('Copy volume task failed: convert_to_base_volume: ' 'id=%(id)s, status=%(status)s.') % dbg raise exception.CinderException(msg) else: LOG.debug('Copy volume completed: convert_to_base_volume: ' 'id=%s.', volume['id']) comment = self._get_3par_vol_comment(volume_name) if comment: self.client.modifyVolume(temp_vol_name, {'comment': comment}) LOG.debug('Volume rename completed: convert_to_base_volume: ' 'id=%s.', volume['id']) # Delete source volume after the copy is complete self.client.deleteVolume(volume_name) LOG.debug('Delete src volume completed: convert_to_base_volume: ' 'id=%s.', volume['id']) # Rename the new volume to the original name self.client.modifyVolume(temp_vol_name, {'newName': volume_name}) LOG.info(_LI('Completed: convert_to_base_volume: ' 'id=%s.'), volume['id']) except hpeexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array.") % volume_name LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.Invalid(ex.get_description()) except exception.InvalidInput as ex: LOG.error(_LE("Exception: %s"), ex) raise except exception.CinderException as ex: LOG.error(_LE("Exception: %s"), ex) raise except Exception as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg) def delete_snapshot(self, snapshot): LOG.debug("Delete Snapshot id %(id)s %(name)s", {'id': snapshot['id'], 'name': pprint.pformat(snapshot)}) try: snap_name = self._get_3par_snap_name(snapshot['id']) self.client.deleteVolume(snap_name) except hpeexceptions.HTTPForbidden as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning(_LW("Delete Snapshot id not found. Removing from " "cinder: %(id)s Ex: %(msg)s"), {'id': snapshot['id'], 'msg': ex}) except hpeexceptions.HTTPConflict as ex: LOG.error(_LE("Exception: %s"), ex) raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): if wwns is not None and not isinstance(wwns, list): wwns = [wwns] if iqns is not None and not isinstance(iqns, list): iqns = [iqns] out = self.client.getHosts() hosts = out['members'] for host in hosts: if 'iSCSIPaths' in host and iqns is not None: iscsi_paths = host['iSCSIPaths'] for iscsi in iscsi_paths: for iqn in iqns: if iqn == iscsi['name']: return host['name'] if 'FCPaths' in host and wwns is not None: fc_paths = host['FCPaths'] for fc in fc_paths: for wwn in wwns: if wwn.upper() == fc['wwn'].upper(): return host['name'] def terminate_connection(self, volume, hostname, wwn=None, iqn=None): """Driver entry point to unattach a volume from an instance.""" # does 3par know this host by a different name? hosts = None if wwn: hosts = self.client.queryHost(wwns=wwn) elif iqn: hosts = self.client.queryHost(iqns=[iqn]) if hosts and hosts['members'] and 'name' in hosts['members'][0]: hostname = hosts['members'][0]['name'] try: self.delete_vlun(volume, hostname) return except hpeexceptions.HTTPNotFound as e: if 'host does not exist' in e.get_description(): # use the wwn to see if we can find the hostname hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn) # no 3par host, re-throw if hostname is None: LOG.error(_LE("Exception: %s"), e) raise else: # not a 'host does not exist' HTTPNotFound exception, re-throw LOG.error(_LE("Exception: %s"), e) raise # try again with name retrieved from 3par self.delete_vlun(volume, hostname) def build_nsp(self, portPos): return '%s:%s:%s' % (portPos['node'], portPos['slot'], portPos['cardPort']) def build_portPos(self, nsp): split = nsp.split(":") portPos = {} portPos['node'] = int(split[0]) portPos['slot'] = int(split[1]) portPos['cardPort'] = int(split[2]) return portPos def tune_vv(self, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name): """Tune the volume to change the userCPG and/or provisioningType. The volume will be modified/tuned/converted to the new userCPG and provisioningType, as needed. TaskWaiter is used to make this function wait until the 3PAR task is no longer active. When the task is no longer active, then it must either be done or it is in a state that we need to treat as an error. """ if old_tpvv == new_tpvv and old_tdvv == new_tdvv: if new_cpg != old_cpg: LOG.info(_LI("Modifying %(volume_name)s userCPG " "from %(old_cpg)s" " to %(new_cpg)s"), {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) _response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, 'userCPG': new_cpg}) task_id = body['taskid'] status = self.TaskWaiter(self.client, task_id).wait_for_task() if status['status'] is not self.client.TASK_DONE: msg = (_('Tune volume task stopped before it was done: ' 'volume_name=%(volume_name)s, ' 'task-status=%(status)s.') % {'status': status, 'volume_name': volume_name}) raise exception.VolumeBackendAPIException(msg) else: if new_tpvv: cop = self.CONVERT_TO_THIN LOG.info(_LI("Converting %(volume_name)s to thin provisioning " "with userCPG=%(new_cpg)s"), {'volume_name': volume_name, 'new_cpg': new_cpg}) elif new_tdvv: cop = self.CONVERT_TO_DEDUP LOG.info(_LI("Converting %(volume_name)s to thin dedup " "provisioning with userCPG=%(new_cpg)s"), {'volume_name': volume_name, 'new_cpg': new_cpg}) else: cop = self.CONVERT_TO_FULL LOG.info(_LI("Converting %(volume_name)s to full provisioning " "with userCPG=%(new_cpg)s"), {'volume_name': volume_name, 'new_cpg': new_cpg}) try: response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, 'userCPG': new_cpg, 'conversionOperation': cop}) except hpeexceptions.HTTPBadRequest as ex: if ex.get_code() == 40 and "keepVV" in six.text_type(ex): # Cannot retype with snapshots because we don't want to # use keepVV and have straggling volumes. Log additional # info and then raise. LOG.info(_LI("tunevv failed because the volume '%s' " "has snapshots."), volume_name) raise task_id = body['taskid'] status = self.TaskWaiter(self.client, task_id).wait_for_task() if status['status'] is not self.client.TASK_DONE: msg = (_('Tune volume task stopped before it was done: ' 'volume_name=%(volume_name)s, ' 'task-status=%(status)s.') % {'status': status, 'volume_name': volume_name}) raise exception.VolumeBackendAPIException(msg) def _retype_pre_checks(self, volume, host, new_persona, old_cpg, new_cpg, new_snap_cpg): """Test retype parameters before making retype changes. Do pre-retype parameter validation. These checks will raise an exception if we should not attempt this retype. """ if new_persona: self.validate_persona(new_persona) if host is not None: (host_type, host_id, _host_cpg) = ( host['capabilities']['location_info']).split(':') if not (host_type == 'HPE3PARDriver'): reason = (_("Cannot retype from HPE3PARDriver to %s.") % host_type) raise exception.InvalidHost(reason) sys_info = self.client.getStorageSystemInfo() if not (host_id == sys_info['serialNumber']): reason = (_("Cannot retype from one 3PAR array to another.")) raise exception.InvalidHost(reason) # Validate new_snap_cpg. A white-space snapCPG will fail eventually, # but we'd prefer to fail fast -- if this ever happens. if not new_snap_cpg or new_snap_cpg.isspace(): reason = (_("Invalid new snapCPG name for retype. " "new_snap_cpg='%s'.") % new_snap_cpg) raise exception.InvalidInput(reason) # Check to make sure CPGs are in the same domain domain = self.get_domain(old_cpg) if domain != self.get_domain(new_cpg): reason = (_('Cannot retype to a CPG in a different domain.')) raise exception.Invalid3PARDomain(reason) if domain != self.get_domain(new_snap_cpg): reason = (_('Cannot retype to a snap CPG in a different domain.')) raise exception.Invalid3PARDomain(reason) def _retype(self, volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache, old_comment): action = "volume:retype" self._retype_pre_checks(volume, host, new_persona, old_cpg, new_cpg, new_snap_cpg) flow_name = action.replace(":", "_") + "_api" retype_flow = linear_flow.Flow(flow_name) # Keep this linear and do the big tunevv last. Everything leading # up to that is reversible, but we'd let the 3PAR deal with tunevv # errors on its own. retype_flow.add( ModifyVolumeTask(action), ModifySpecsTask(action), TuneVolumeTask(action)) taskflow.engines.run( retype_flow, store={'common': self, 'volume_name': volume_name, 'volume': volume, 'old_tpvv': old_tpvv, 'new_tpvv': new_tpvv, 'old_tdvv': old_tdvv, 'new_tdvv': new_tdvv, 'old_cpg': old_cpg, 'new_cpg': new_cpg, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg, 'old_vvs': old_vvs, 'new_vvs': new_vvs, 'old_qos': old_qos, 'new_qos': new_qos, 'old_flash_cache': old_flash_cache, 'new_flash_cache': new_flash_cache, 'new_type_name': new_type_name, 'new_type_id': new_type_id, 'old_comment': old_comment }) def _retype_from_old_to_new(self, volume, new_type, old_volume_settings, host): """Convert the volume to be of the new type. Given old type settings. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param old_volume_settings: Volume settings describing the old type. :param host: A dictionary describing the host, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. Host validation is just skipped if host is None. """ volume_id = volume['id'] volume_name = self._get_3par_vol_name(volume_id) new_type_name = None new_type_id = None if new_type: new_type_name = new_type['name'] new_type_id = new_type['id'] pool = None if host: pool = volume_utils.extract_host(host['host'], 'pool') else: pool = volume_utils.extract_host(volume['host'], 'pool') new_volume_settings = self.get_volume_settings_from_type_id( new_type_id, pool) new_cpg = new_volume_settings['cpg'] new_snap_cpg = new_volume_settings['snap_cpg'] new_tpvv = new_volume_settings['tpvv'] new_tdvv = new_volume_settings['tdvv'] new_qos = new_volume_settings['qos'] new_vvs = new_volume_settings['vvs_name'] new_persona = None new_hpe3par_keys = new_volume_settings['hpe3par_keys'] if 'persona' in new_hpe3par_keys: new_persona = new_hpe3par_keys['persona'] new_flash_cache = self.get_flash_cache_policy(new_hpe3par_keys) old_qos = old_volume_settings['qos'] old_vvs = old_volume_settings['vvs_name'] old_hpe3par_keys = old_volume_settings['hpe3par_keys'] old_flash_cache = self.get_flash_cache_policy(old_hpe3par_keys) # Get the current volume info because we can get in a bad state # if we trust that all the volume type settings are still the # same settings that were used with this volume. old_volume_info = self.client.getVolume(volume_name) old_tpvv = old_volume_info['provisioningType'] == self.THIN old_tdvv = old_volume_info['provisioningType'] == self.DEDUP old_cpg = old_volume_info['userCPG'] old_comment = old_volume_info['comment'] old_snap_cpg = None if 'snapCPG' in old_volume_info: old_snap_cpg = old_volume_info['snapCPG'] LOG.debug("retype old_volume_info=%s", old_volume_info) LOG.debug("retype old_volume_settings=%s", old_volume_settings) LOG.debug("retype new_volume_settings=%s", new_volume_settings) self._retype(volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache, old_comment) if host: return True, self._get_model_update(host['host'], new_cpg) else: return True, self._get_model_update(volume['host'], new_cpg) def _retype_from_no_type(self, volume, new_type): """Convert the volume to be of the new type. Starting from no type. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype. Except the volume-type is not used here. This method uses None. :param new_type: A dictionary describing the volume type to convert to """ pool = volume_utils.extract_host(volume['host'], 'pool') none_type_settings = self.get_volume_settings_from_type_id(None, pool) return self._retype_from_old_to_new(volume, new_type, none_type_settings, None) def retype(self, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. Host validation is just skipped if host is None. """ LOG.debug(("enter: retype: id=%(id)s, new_type=%(new_type)s," "diff=%(diff)s, host=%(host)s"), {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) old_volume_settings = self.get_volume_settings_from_type(volume, host) return self._retype_from_old_to_new(volume, new_type, old_volume_settings, host) def find_existing_vlun(self, volume, host): """Finds an existing VLUN for a volume on a host. Returns an existing VLUN's information. If no existing VLUN is found, None is returned. :param volume: A dictionary describing a volume. :param host: A dictionary describing a host. """ existing_vlun = None try: vol_name = self._get_3par_vol_name(volume['id']) host_vluns = self.client.getHostVLUNs(host['name']) # The first existing VLUN found will be returned. for vlun in host_vluns: if vlun['volumeName'] == vol_name: existing_vlun = vlun break except hpeexceptions.HTTPNotFound: # ignore, no existing VLUNs were found LOG.debug("No existing VLUNs were found for host/volume " "combination: %(host)s, %(vol)s", {'host': host['name'], 'vol': vol_name}) pass return existing_vlun def find_existing_vluns(self, volume, host): existing_vluns = [] try: vol_name = self._get_3par_vol_name(volume['id']) host_vluns = self.client.getHostVLUNs(host['name']) for vlun in host_vluns: if vlun['volumeName'] == vol_name: existing_vluns.append(vlun) except hpeexceptions.HTTPNotFound: # ignore, no existing VLUNs were found LOG.debug("No existing VLUNs were found for host/volume " "combination: %(host)s, %(vol)s", {'host': host['name'], 'vol': vol_name}) pass return existing_vluns # v2 replication methods def failover_host(self, context, volumes, secondary_backend_id): """Force failover to a secondary replication target.""" # Ensure replication is enabled before we try and failover. if not self._replication_enabled: msg = _LE("Issuing a fail-over failed because replication is " "not properly configured.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check to see if the user requested to failback. if secondary_backend_id == self.FAILBACK_VALUE: volume_update_list = self._replication_failback(volumes) target_id = None else: # Find the failover target. failover_target = None for target in self._replication_targets: if target['backend_id'] == secondary_backend_id: failover_target = target break if not failover_target: msg = _("A valid secondary target MUST be specified in order " "to failover.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) target_id = failover_target['backend_id'] # For each volume, if it is replicated, we want to fail it over. volume_update_list = [] for volume in volumes: if self._volume_of_replicated_type(volume): try: # Try and stop remote-copy on main array. We eat the # exception here because when an array goes down, the # groups will stop automatically. rcg_name = self._get_3par_rcg_name(volume['id']) self.client.stopRemoteCopy(rcg_name) except Exception: pass try: # Failover to secondary array. remote_rcg_name = self._get_3par_remote_rcg_name( volume['id'], volume['provider_location']) cl = self._create_replication_client(failover_target) cl.recoverRemoteCopyGroupFromDisaster( remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'failed-over'}}) except Exception as ex: msg = (_LE("There was a problem with the failover " "(%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available " "on the failed over target."), {'error': six.text_type(ex), 'volume': volume['id']}) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) finally: self._destroy_replication_client(cl) else: # If the volume is not of replicated type, we need to # force the status into error state so a user knows they # do not have access to the volume. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'error'}}) return target_id, volume_update_list def _replication_failback(self, volumes): # Make sure the proper steps on the backend have been completed before # we allow a fail-over. if not self._is_host_ready_for_failback(volumes): msg = _("The host is not ready to be failed back. Please " "resynchronize the volumes and resume replication on the " "3PAR backends.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) # Update the volumes status to available. volume_update_list = [] for volume in volumes: if self._volume_of_replicated_type(volume): volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'available'}}) else: # Upon failing back, we can move the non-replicated volumes # back into available state. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'available'}}) return volume_update_list def _is_host_ready_for_failback(self, volumes): """Checks to make sure the volume has been synchronized This ensures that all the remote copy targets have been restored to their natural direction, and all of the volumes have been fully synchronized. """ try: for volume in volumes: if self._volume_of_replicated_type(volume): location = volume.get('provider_location') remote_rcg_name = self._get_3par_remote_rcg_name( volume['id'], location) rcg = self.client.getRemoteCopyGroup(remote_rcg_name) # Make sure all targets are in their natural direction. targets = rcg['targets'] for target in targets: if target['roleReversed'] or ( target['state'] != self.RC_GROUP_STARTED): return False # Make sure all volumes are fully synced. volumes = rcg['volumes'] for volume in volumes: remote_volumes = volume['remoteVolumes'] for remote_volume in remote_volumes: if remote_volume['syncStatus'] != ( self.SYNC_STATUS_COMPLETED): return False except Exception: # If there was a problem, we will return false so we can # log an error in the parent function. return False return True def _do_replication_setup(self): replication_targets = [] replication_devices = self.config.replication_device if replication_devices: for dev in replication_devices: remote_array = dict(dev.items()) # Override and set defaults for certain entries remote_array['managed_backend_name'] = ( dev.get('managed_backend_name')) remote_array['replication_mode'] = ( self._get_remote_copy_mode_num( dev.get('replication_mode'))) remote_array['san_ssh_port'] = ( dev.get('san_ssh_port', self.config.san_ssh_port)) remote_array['ssh_conn_timeout'] = ( dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout)) remote_array['san_private_key'] = ( dev.get('san_private_key', self.config.san_private_key)) # Format iscsi IPs correctly iscsi_ips = dev.get('hpe3par_iscsi_ips') if iscsi_ips: remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ') # Format hpe3par_iscsi_chap_enabled as a bool remote_array['hpe3par_iscsi_chap_enabled'] = ( dev.get('hpe3par_iscsi_chap_enabled') == 'True') array_name = remote_array['backend_id'] # Make sure we can log into the array, that it has been # correctly configured, and its API version meets the # minimum requirement. cl = None try: cl = self._create_replication_client(remote_array) array_id = six.text_type(cl.getStorageSystemInfo()['id']) remote_array['id'] = array_id wsapi_version = cl.getWsApiVersion()['build'] if wsapi_version < REMOTE_COPY_API_VERSION: msg = (_LW("The secondary array must have an API " "version of %(min_ver)s or higher. Array " "'%(target)s' is on %(target_ver)s, " "therefore it will not be added as a valid " "replication target.") % {'target': array_name, 'min_ver': REMOTE_COPY_API_VERSION, 'target_ver': wsapi_version}) LOG.warning(msg) elif not self._is_valid_replication_array(remote_array): msg = (_LW("'%s' is not a valid replication array. " "In order to be valid, backend_id, " "replication_mode, " "hpe3par_api_url, hpe3par_username, " "hpe3par_password, cpg_map, san_ip, " "san_login, and san_password " "must be specified. If the target is " "managed, managed_backend_name must be set " "as well.") % array_name) LOG.warning(msg) else: replication_targets.append(remote_array) except Exception: msg = (_LE("Could not log in to 3PAR array (%s) with the " "provided credentials.") % array_name) LOG.error(msg) finally: self._destroy_replication_client(cl) self._replication_targets = replication_targets if self._is_replication_configured_correct(): self._replication_enabled = True def _is_valid_replication_array(self, target): required_flags = ['hpe3par_api_url', 'hpe3par_username', 'hpe3par_password', 'san_ip', 'san_login', 'san_password', 'backend_id', 'replication_mode', 'cpg_map'] try: self.check_replication_flags(target, required_flags) return True except Exception: return False def _is_replication_configured_correct(self): rep_flag = True # Make sure there is at least one replication target. if len(self._replication_targets) < 1: LOG.error(_LE("There must be at least one valid replication " "device configured.")) rep_flag = False return rep_flag def _is_replication_mode_correct(self, mode, sync_num): rep_flag = True # Make sure replication_mode is set to either sync|periodic. mode = self._get_remote_copy_mode_num(mode) if not mode: LOG.error(_LE("Extra spec replication:mode must be set and must " "be either 'sync' or 'periodic'.")) rep_flag = False else: # If replication:mode is periodic, replication_sync_period must be # set between 300 - 31622400 seconds. if mode == self.PERIODIC and ( sync_num < 300 or sync_num > 31622400): LOG.error(_LE("Extra spec replication:sync_period must be " "greater than 299 and less than 31622401 " "seconds.")) rep_flag = False return rep_flag def _volume_of_replicated_type(self, volume): replicated_type = False volume_type_id = volume.get('volume_type_id') if volume_type_id: volume_type = self._get_volume_type(volume_type_id) extra_specs = volume_type.get('extra_specs') if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] replicated_type = (rep_val == " True") return replicated_type def _is_volume_in_remote_copy_group(self, volume): rcg_name = self._get_3par_rcg_name(volume['id']) try: self.client.getRemoteCopyGroup(rcg_name) return True except hpeexceptions.HTTPNotFound: return False def _get_remote_copy_mode_num(self, mode): ret_mode = None if mode == "sync": ret_mode = self.SYNC if mode == "periodic": ret_mode = self.PERIODIC return ret_mode def _get_3par_config(self): self._do_replication_setup() conf = None if self._replication_enabled: for target in self._replication_targets: if target['backend_id'] == self._active_backend_id: conf = target break self._build_3par_config(conf) def _build_3par_config(self, conf=None): """Build 3PAR client config dictionary. self._client_conf will contain values from self.config if the volume is located on the primary array in order to properly contact it. If the volume has been failed over and therefore on a secondary array, self._client_conf will contain values on how to contact that array. The only time we will return with entries from a secondary array is with unmanaged replication. """ if conf: self._client_conf['hpe3par_cpg'] = self._generate_hpe3par_cpgs( conf.get('cpg_map')) self._client_conf['hpe3par_username'] = ( conf.get('hpe3par_username')) self._client_conf['hpe3par_password'] = ( conf.get('hpe3par_password')) self._client_conf['san_ip'] = conf.get('san_ip') self._client_conf['san_login'] = conf.get('san_login') self._client_conf['san_password'] = conf.get('san_password') self._client_conf['san_ssh_port'] = conf.get('san_ssh_port') self._client_conf['ssh_conn_timeout'] = ( conf.get('ssh_conn_timeout')) self._client_conf['san_private_key'] = conf.get('san_private_key') self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url') self._client_conf['hpe3par_iscsi_ips'] = ( conf.get('hpe3par_iscsi_ips')) self._client_conf['hpe3par_iscsi_chap_enabled'] = ( conf.get('hpe3par_iscsi_chap_enabled')) self._client_conf['iscsi_ip_address'] = ( conf.get('iscsi_ip_address')) self._client_conf['iscsi_port'] = conf.get('iscsi_port') else: self._client_conf['hpe3par_cpg'] = ( self.config.hpe3par_cpg) self._client_conf['hpe3par_username'] = ( self.config.hpe3par_username) self._client_conf['hpe3par_password'] = ( self.config.hpe3par_password) self._client_conf['san_ip'] = self.config.san_ip self._client_conf['san_login'] = self.config.san_login self._client_conf['san_password'] = self.config.san_password self._client_conf['san_ssh_port'] = self.config.san_ssh_port self._client_conf['ssh_conn_timeout'] = ( self.config.ssh_conn_timeout) self._client_conf['san_private_key'] = self.config.san_private_key self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url self._client_conf['hpe3par_iscsi_ips'] = ( self.config.hpe3par_iscsi_ips) self._client_conf['hpe3par_iscsi_chap_enabled'] = ( self.config.hpe3par_iscsi_chap_enabled) self._client_conf['iscsi_ip_address'] = ( self.config.iscsi_ip_address) self._client_conf['iscsi_port'] = self.config.iscsi_port def _get_cpg_from_cpg_map(self, cpg_map, target_cpg): ret_target_cpg = None cpg_pairs = cpg_map.split(' ') for cpg_pair in cpg_pairs: cpgs = cpg_pair.split(':') cpg = cpgs[0] dest_cpg = cpgs[1] if cpg == target_cpg: ret_target_cpg = dest_cpg return ret_target_cpg def _generate_hpe3par_cpgs(self, cpg_map): hpe3par_cpgs = [] cpg_pairs = cpg_map.split(' ') for cpg_pair in cpg_pairs: cpgs = cpg_pair.split(':') hpe3par_cpgs.append(cpgs[1]) return hpe3par_cpgs def _get_replication_targets(self): replication_targets = [] for target in self._replication_targets: replication_targets.append(target['backend_id']) return replication_targets def _do_volume_replication_setup(self, volume): """This function will do or ensure the following: -Create volume on main array (already done in create_volume) -Create Remote Copy Group on main array -Add volume to Remote Copy Group on main array -Start remote copy If anything here fails, we will need to clean everything up in reverse order, including the original volume. """ rcg_name = self._get_3par_rcg_name(volume['id']) # If the volume is already in a remote copy group, return True # after starting remote copy. If remote copy is already started, # issuing this command again will be fine. if self._is_volume_in_remote_copy_group(volume): try: self.client.startRemoteCopy(rcg_name) except Exception: pass return True try: # Grab the extra_spec entries for replication and make sure they # are set correctly. volume_type = self._get_volume_type(volume["volume_type_id"]) extra_specs = volume_type.get("extra_specs") replication_mode = extra_specs.get( self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE) replication_mode_num = self._get_remote_copy_mode_num( replication_mode) replication_sync_period = extra_specs.get( self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD) if replication_sync_period: replication_sync_period = int(replication_sync_period) if not self._is_replication_mode_correct(replication_mode, replication_sync_period): msg = _("The replication mode was not configured correctly " "in the volume type extra_specs. If replication:mode " "is periodic, replication:sync_period must also be " "specified and be between 300 and 31622400 seconds.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vol_settings = self.get_volume_settings_from_type(volume) local_cpg = vol_settings['cpg'] vol_name = self._get_3par_vol_name(volume['id']) # Create remote copy group on main array. rcg_targets = [] sync_targets = [] for target in self._replication_targets: # Only add targets that match the volumes replication mode. if target['replication_mode'] == replication_mode_num: cpg = self._get_cpg_from_cpg_map(target['cpg_map'], local_cpg) rcg_target = {'targetName': target['backend_id'], 'mode': replication_mode_num, 'snapCPG': cpg, 'userCPG': cpg} rcg_targets.append(rcg_target) sync_target = {'targetName': target['backend_id'], 'syncPeriod': replication_sync_period} sync_targets.append(sync_target) optional = {'localSnapCPG': vol_settings['snap_cpg'], 'localUserCPG': local_cpg} pool = volume_utils.extract_host(volume['host'], level='pool') domain = self.get_domain(pool) if domain: optional["domain"] = domain try: self.client.createRemoteCopyGroup(rcg_name, rcg_targets, optional) except Exception as ex: msg = (_("There was an error creating the remote copy " "group: %s.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Add volume to remote copy group. rcg_targets = [] for target in self._replication_targets: # Only add targets that match the volumes replication mode. if target['replication_mode'] == replication_mode_num: rcg_target = {'targetName': target['backend_id'], 'secVolumeName': vol_name} rcg_targets.append(rcg_target) optional = {'volumeAutoCreation': True} try: self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name, rcg_targets, optional=optional) except Exception as ex: msg = (_("There was an error adding the volume to the remote " "copy group: %s.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check and see if we are in periodic mode. If we are, update # Remote Copy Group to have a sync period. if replication_sync_period and ( replication_mode_num == self.PERIODIC): opt = {'targets': sync_targets} try: self.client.modifyRemoteCopyGroup(rcg_name, opt) except Exception as ex: msg = (_("There was an error setting the sync period for " "the remote copy group: %s.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Start the remote copy. try: self.client.startRemoteCopy(rcg_name) except Exception as ex: msg = (_("There was an error starting remote copy: %s.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return True except Exception as ex: self._do_volume_replication_destroy(volume) msg = (_("There was an error setting up a remote copy group " "on the 3PAR arrays: ('%s'). The volume will not be " "recognized as replication type.") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _do_volume_replication_destroy(self, volume, rcg_name=None): """This will completely remove all traces of a remote copy group. It should be used when deleting a replication enabled volume or if setting up a remote copy group fails. It will try and do the following: -Stop remote copy -Remove volume from Remote Copy Group on main array -Delete Remote Copy Group from main array -Delete volume from main array """ if not rcg_name: rcg_name = self._get_3par_rcg_name(volume['id']) vol_name = self._get_3par_vol_name(volume['id']) # Stop remote copy. try: self.client.stopRemoteCopy(rcg_name) except Exception: pass # Delete volume from remote copy group on main array. try: self.client.removeVolumeFromRemoteCopyGroup( rcg_name, vol_name, removeFromTarget=True) except Exception: pass # Delete remote copy group on main array. try: self.client.removeRemoteCopyGroup(rcg_name) except Exception: pass # Delete volume on the main array. try: self.client.deleteVolume(vol_name) except Exception: pass def _delete_replicated_failed_over_volume(self, volume): location = volume.get('provider_location') rcg_name = self._get_3par_remote_rcg_name(volume['id'], location) targets = self.client.getRemoteCopyGroup(rcg_name)['targets'] # When failed over, we want to temporarily disable config mirroring # in order to be allowed to delete the volume and remote copy group for target in targets: target_name = target['targetName'] self.client.toggleRemoteCopyConfigMirror(target_name, mirror_config=False) # Do regular volume replication destroy now config mirroring is off try: self._do_volume_replication_destroy(volume, rcg_name) except Exception as ex: msg = (_("The failed-over volume could not be deleted: %s") % six.text_type(ex)) LOG.error(msg) raise exception.VolumeIsBusy(message=msg) finally: # Turn config mirroring back on for target in targets: target_name = target['targetName'] self.client.toggleRemoteCopyConfigMirror(target_name, mirror_config=True) class TaskWaiter(object): """TaskWaiter waits for task to be not active and returns status.""" def __init__(self, client, task_id, interval=1, initial_delay=0): self.client = client self.task_id = task_id self.interval = interval self.initial_delay = initial_delay def _wait_for_task(self): status = self.client.getTask(self.task_id) LOG.debug("3PAR Task id %(id)s status = %(status)s", {'id': self.task_id, 'status': status['status']}) if status['status'] is not self.client.TASK_ACTIVE: raise loopingcall.LoopingCallDone(status) def wait_for_task(self): timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_task) return timer.start(interval=self.interval, initial_delay=self.initial_delay).wait() class ModifyVolumeTask(flow_utils.CinderTask): """Task to change a volume's snapCPG and comment. This is a task for changing the snapCPG and comment. It is intended for use during retype(). These changes are done together with a single modify request which should be fast and easy to revert. Because we do not support retype with existing snapshots, we can change the snapCPG without using a keepVV. If snapshots exist, then this will fail, as desired. This task does not change the userCPG or provisioningType. Those changes may require tunevv, so they are done by the TuneVolumeTask. The new comment will contain the new type, VVS and QOS information along with whatever else was in the old comment dict. The old comment and snapCPG are restored if revert is called. """ def __init__(self, action): self.needs_revert = False super(ModifyVolumeTask, self).__init__(addons=[action]) def _get_new_comment(self, old_comment, new_vvs, new_qos, new_type_name, new_type_id): # Modify the comment during ModifyVolume comment_dict = dict(ast.literal_eval(old_comment)) if 'vvs' in comment_dict: del comment_dict['vvs'] if 'qos' in comment_dict: del comment_dict['qos'] if new_vvs: comment_dict['vvs'] = new_vvs elif new_qos: comment_dict['qos'] = new_qos else: comment_dict['qos'] = {} if new_type_name: comment_dict['volume_type_name'] = new_type_name else: comment_dict.pop('volume_type_name', None) if new_type_id: comment_dict['volume_type_id'] = new_type_id else: comment_dict.pop('volume_type_id', None) return comment_dict def execute(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, new_vvs, new_qos, new_type_name, new_type_id): comment_dict = self._get_new_comment( old_comment, new_vvs, new_qos, new_type_name, new_type_id) if new_snap_cpg != old_snap_cpg: # Modify the snap_cpg. This will fail with snapshots. LOG.info(_LI("Modifying %(volume_name)s snap_cpg from " "%(old_snap_cpg)s to %(new_snap_cpg)s."), {'volume_name': volume_name, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg}) common.client.modifyVolume( volume_name, {'snapCPG': new_snap_cpg, 'comment': json.dumps(comment_dict)}) self.needs_revert = True else: LOG.info(_LI("Modifying %s comments."), volume_name) common.client.modifyVolume( volume_name, {'comment': json.dumps(comment_dict)}) self.needs_revert = True def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, **kwargs): if self.needs_revert: LOG.info(_LI("Retype revert %(volume_name)s snap_cpg from " "%(new_snap_cpg)s back to %(old_snap_cpg)s."), {'volume_name': volume_name, 'new_snap_cpg': new_snap_cpg, 'old_snap_cpg': old_snap_cpg}) try: common.client.modifyVolume( volume_name, {'snapCPG': old_snap_cpg, 'comment': old_comment}) except Exception as ex: LOG.error(_LE("Exception during snapCPG revert: %s"), ex) class TuneVolumeTask(flow_utils.CinderTask): """Task to change a volume's CPG and/or provisioning type. This is a task for changing the CPG and/or provisioning type. It is intended for use during retype(). This task has no revert. The current design is to do this task last and do revert-able tasks first. Un-doing a tunevv can be expensive and should be avoided. """ def __init__(self, action, **kwargs): super(TuneVolumeTask, self).__init__(addons=[action]) def execute(self, common, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name): common.tune_vv(old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name) class ModifySpecsTask(flow_utils.CinderTask): """Set/unset the QOS settings and/or VV set for the volume's new type. This is a task for changing the QOS settings and/or VV set. It is intended for use during retype(). If changes are made during execute(), then they need to be undone if revert() is called (i.e., if a later task fails). For 3PAR, we ignore QOS settings if a VVS is explicitly set, otherwise we create a VV set and use that for QOS settings. That is why they are lumped together here. Most of the decision-making about VVS vs. QOS settings vs. old-style scoped extra-specs is handled in existing reusable code. Here we mainly need to know what old stuff to remove before calling the function that knows how to set the new stuff. Basic task flow is as follows: Remove the volume from the old externally created VVS (when appropriate), delete the old cinder-created VVS, call the function that knows how to set a new VVS or QOS settings. If any changes are made during execute, then revert needs to reverse them. """ def __init__(self, action): self.needs_revert = False super(ModifySpecsTask, self).__init__(addons=[action]) def execute(self, common, volume_name, volume, old_cpg, new_cpg, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache): if (old_vvs != new_vvs or old_qos != new_qos or old_flash_cache != new_flash_cache): # Remove VV from old VV Set. if old_vvs is not None and old_vvs != new_vvs: common.client.removeVolumeFromVolumeSet(old_vvs, volume_name) self.needs_revert = True # If any extra or qos specs changed then remove the old # special VV set that we create. We'll recreate it # as needed. vvs_name = common._get_3par_vvs_name(volume['id']) try: common.client.deleteVolumeSet(vvs_name) self.needs_revert = True except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: LOG.error(_LE("Unexpected error when retype() tried to " "deleteVolumeSet(%s)"), vvs_name) raise if new_vvs or new_qos or new_flash_cache: common._add_volume_to_volume_set( volume, volume_name, new_cpg, new_vvs, new_qos, new_flash_cache) self.needs_revert = True def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos, old_cpg, **kwargs): if self.needs_revert: # If any extra or qos specs changed then remove the old # special VV set that we create and recreate it per # the old type specs. vvs_name = common._get_3par_vvs_name(volume['id']) try: common.client.deleteVolumeSet(vvs_name) except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: LOG.error(_LE("Unexpected error when retype() revert " "tried to deleteVolumeSet(%s)"), vvs_name) except Exception: LOG.error(_LE("Unexpected error when retype() revert " "tried to deleteVolumeSet(%s)"), vvs_name) if old_vvs is not None or old_qos is not None: try: common._add_volume_to_volume_set( volume, volume_name, old_cpg, old_vvs, old_qos) except Exception as ex: LOG.error(_LE("%(exception)s: Exception during revert of " "retype for volume %(volume_name)s. " "Original volume set/QOS settings may not " "have been fully restored."), {'exception': ex, 'volume_name': volume_name}) if new_vvs is not None and old_vvs != new_vvs: try: common.client.removeVolumeFromVolumeSet( new_vvs, volume_name) except Exception as ex: LOG.error(_LE("%(exception)s: Exception during revert of " "retype for volume %(volume_name)s. " "Failed to remove from new volume set " "%(new_vvs)s."), {'exception': ex, 'volume_name': volume_name, 'new_vvs': new_vvs}) cinder-8.0.0/cinder/volume/drivers/vzstorage.py0000664000567000056710000003123612701406250022755 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Parallels IP Holdings GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import json import os import re from os_brick.remotefs import remotefs from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import exception from cinder.i18n import _, _LI from cinder.image import image_utils from cinder import utils from cinder.volume.drivers import remotefs as remotefs_drv VERSION = '1.0' LOG = logging.getLogger(__name__) vzstorage_opts = [ cfg.StrOpt('vzstorage_shares_config', default='/etc/cinder/vzstorage_shares', help='File with the list of available vzstorage shares.'), cfg.BoolOpt('vzstorage_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space ' 'rather than regular files when using raw format, ' 'in which case volume creation takes lot of time.')), cfg.FloatOpt('vzstorage_used_ratio', default=0.95, help=('Percent of ACTUAL usage of the underlying volume ' 'before no new volumes can be allocated to the volume ' 'destination.')), cfg.StrOpt('vzstorage_mount_point_base', default='$state_path/mnt', help=('Base dir containing mount points for ' 'vzstorage shares.')), cfg.ListOpt('vzstorage_mount_options', help=('Mount options passed to the vzstorage client. ' 'See section of the pstorage-mount man page ' 'for details.')), ] CONF = cfg.CONF CONF.register_opts(vzstorage_opts) class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): """Cinder driver for Virtuozzo Storage. Creates volumes as files on the mounted vzstorage cluster. Version history: 1.0 - Initial driver. """ driver_volume_type = 'vzstorage' driver_prefix = 'vzstorage' volume_backend_name = 'Virtuozzo_Storage' VERSION = VERSION SHARE_FORMAT_REGEX = r'(?:(\S+):\/)?([a-zA-Z0-9_-]+)(?::(\S+))?' def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(VZStorageDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(vzstorage_opts) self._execute_as_root = False root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = getattr(self.configuration, 'vzstorage_mount_point_base', CONF.vzstorage_mount_point_base) opts = getattr(self.configuration, 'vzstorage_mount_options', CONF.vzstorage_mount_options) self._remotefsclient = remotefs.RemoteFsClient( 'vzstorage', root_helper, execute=execute, vzstorage_mount_point_base=self.base, vzstorage_mount_options=opts) def _qemu_img_info(self, path, volume_name): return super(VZStorageDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.vzstorage_mount_point_base) @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ # Find active image active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) info = self._qemu_img_info(active_file_path, volume['name']) fmt = info.file_format data = {'export': volume['provider_location'], 'format': fmt, 'name': active_file, } return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base(), } def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(VZStorageDriver, self).do_setup(context) config = self.configuration.vzstorage_shares_config if not os.path.exists(config): msg = (_("VzStorage config file at %(config)s doesn't exist.") % {'config': config}) LOG.error(msg) raise exception.VzStorageException(msg) if not os.path.isabs(self.base): msg = _("Invalid mount point base: %s.") % self.base LOG.error(msg) raise exception.VzStorageException(msg) used_ratio = self.configuration.vzstorage_used_ratio if not ((used_ratio > 0) and (used_ratio <= 1)): msg = _("VzStorage config 'vzstorage_used_ratio' invalid. " "Must be > 0 and <= 1.0: %s.") % used_ratio LOG.error(msg) raise exception.VzStorageException(msg) self.shares = {} # Check if mount.fuse.pstorage is installed on this system; # note that we don't need to be root to see if the package # is installed. package = 'mount.fuse.pstorage' try: self._execute(package, check_exit_code=False, run_as_root=False) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed.') % package raise exception.VzStorageException(msg) else: raise self.configuration.nas_secure_file_operations = 'true' self.configuration.nas_secure_file_permissions = 'true' def _ensure_share_mounted(self, share): m = re.search(self.SHARE_FORMAT_REGEX, share) if not m: msg = (_("Invalid Virtuozzo Storage share specification: %r. " "Must be: [MDS1[,MDS2],...:/][:PASSWORD].") % share) raise exception.VzStorageException(msg) cluster_name = m.group(2) # set up logging to non-default path, so that it will # be possible to mount the same cluster to another mount # point by hand with default options. mnt_flags = ['-l', '/var/log/pstorage/%s-cinder.log.gz' % cluster_name] if self.shares.get(share) is not None: extra_flags = json.loads(self.shares[share]) mnt_flags.extend(extra_flags) self._remotefsclient.mount(share, mnt_flags) def _find_share(self, volume_size_in_gib): """Choose VzStorage share among available ones for given volume size. For instances with more than one share that meets the criteria, the first suitable share will be selected. :param volume_size_in_gib: int size in GB """ if not self._mounted_shares: raise exception.VzStorageNoSharesMounted() for share in self._mounted_shares: if self._is_share_eligible(share, volume_size_in_gib): break else: raise exception.VzStorageNoSuitableShareFound( volume_size=volume_size_in_gib) LOG.debug('Selected %s as target VzStorage share.', share) return share def _is_share_eligible(self, vz_share, volume_size_in_gib): """Verifies VzStorage share is eligible to host volume with given size. :param vz_share: vzstorage share :param volume_size_in_gib: int size in GB """ used_ratio = self.configuration.vzstorage_used_ratio volume_size = volume_size_in_gib * units.Gi total_size, available, allocated = self._get_capacity_info(vz_share) if (allocated + volume_size) // total_size > used_ratio: LOG.debug('_is_share_eligible: %s is above ' 'vzstorage_used_ratio.', vz_share) return False return True @remotefs_drv.locked_volume_id_operation def extend_volume(self, volume, size_gb): LOG.info(_LI('Extending volume %s.'), volume['id']) self._extend_volume(volume, size_gb) def _extend_volume(self, volume, size_gb): volume_path = self.local_path(volume) self._check_extend_volume_support(volume, size_gb) LOG.info(_LI('Resizing file to %sG...'), size_gb) self._do_extend_volume(volume_path, size_gb) def _do_extend_volume(self, volume_path, size_gb): image_utils.resize_image(volume_path, size_gb) if not self._is_file_size_equal(volume_path, size_gb): raise exception.ExtendVolumeError( reason='Resizing image file failed.') def _check_extend_volume_support(self, volume, size_gb): volume_path = self.local_path(volume) active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) if active_file_path != volume_path: msg = _('Extend volume is only supported for this ' 'driver when no snapshots exist.') raise exception.InvalidVolume(msg) extend_by = int(size_gb) - volume['size'] if not self._is_share_eligible(volume['provider_location'], extend_by): raise exception.ExtendVolumeError(reason='Insufficient space to ' 'extend volume %s to %sG.' % (volume['id'], size_gb)) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path) virt_size = data.virtual_size / units.Gi return virt_size == size def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ LOG.debug("_copy_volume_from_snapshot: snapshot: %(snap)s, " "volume: %(vol)s, volume_size: %(size)s.", {'snap': snapshot['id'], 'vol': volume['id'], 'size': volume_size, }) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) vol_dir = self._local_volume_dir(snapshot['volume']) out_format = "raw" forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_dir, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot['volume']['name']) path_to_snap_img = os.path.join(vol_dir, img_info.backing_file) LOG.debug("_copy_volume_from_snapshot: will copy " "from snapshot at %s.", path_to_snap_img) image_utils.convert_image(path_to_snap_img, self.local_path(volume), out_format) self._extend_volume(volume, volume_size) @remotefs_drv.locked_volume_id_operation def delete_volume(self, volume): """Deletes a logical volume.""" if not volume['provider_location']: msg = (_('Volume %s does not have provider_location ' 'specified, skipping.') % volume['name']) LOG.error(msg) raise exception.VzStorageException(msg) self._ensure_share_mounted(volume['provider_location']) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) if os.path.exists(mounted_path): self._delete(mounted_path) else: LOG.info(_LI("Skipping deletion of volume %s " "as it does not exist."), mounted_path) info_path = self._local_path_volume_info(volume) self._delete(info_path) cinder-8.0.0/cinder/volume/drivers/fujitsu/0000775000567000056710000000000012701406543022050 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/fujitsu/eternus_dx_common.py0000664000567000056710000024713012701406257026163 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Cinder Volume driver for Fujitsu ETERNUS DX S3 series. """ import ast import base64 import hashlib import six import time from xml.etree.ElementTree import parse from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units LOG = logging.getLogger(__name__) CONF = cfg.CONF try: import pywbem except ImportError: msg = _LE('import pywbem failed!! ' 'pywbem is necessary for this volume driver.') LOG.error(msg) VOL_PREFIX = "FJosv_" RAIDGROUP = 2 TPPOOL = 5 SNAPOPC = 4 OPC = 5 RETURN_TO_RESOURCEPOOL = 19 DETACH = 8 INITIALIZED = 2 UNSYNCHRONIZED = 3 BROKEN = 5 PREPARED = 11 REPL = "FUJITSU_ReplicationService" STOR_CONF = "FUJITSU_StorageConfigurationService" CTRL_CONF = "FUJITSU_ControllerConfigurationService" STOR_HWID = "FUJITSU_StorageHardwareIDManagementService" UNDEF_MSG = 'Undefined Error!!' JOB_RETRIES = 60 JOB_INTERVAL_SEC = 10 # Error code keyword. VOLUMENAME_IN_USE = 32788 COPYSESSION_NOT_EXIST = 32793 LUNAME_IN_USE = 4102 LUNAME_NOT_EXIST = 4097 # Only for InvokeMethod(HidePaths). EC_REC = 3 FJ_ETERNUS_DX_OPT_opts = [ cfg.StrOpt('cinder_eternus_config_file', default='/etc/cinder/cinder_fujitsu_eternus_dx.xml', help='config file for cinder eternus_dx volume driver'), ] POOL_TYPE_dic = { RAIDGROUP: 'RAID_GROUP', TPPOOL: 'Thinporvisioning_POOL', } OPERATION_dic = { SNAPOPC: RETURN_TO_RESOURCEPOOL, OPC: DETACH, EC_REC: DETACH, } RETCODE_dic = { '0': 'Success', '1': 'Method Not Supported', '4': 'Failed', '5': 'Invalid Parameter', '4096': 'Method Parameters Checked - Job Started', '4097': 'Size Not Supported', '4101': 'Target/initiator combination already exposed', '4102': 'Requested logical unit number in use', '32769': 'Maximum number of Logical Volume in a RAID group ' 'has been reached', '32770': 'Maximum number of Logical Volume in the storage device ' 'has been reached', '32771': 'Maximum number of registered Host WWN ' 'has been reached', '32772': 'Maximum number of affinity group has been reached', '32773': 'Maximum number of host affinity has been reached', '32785': 'The RAID group is in busy state', '32786': 'The Logical Volume is in busy state', '32787': 'The device is in busy state', '32788': 'Element Name is in use', '32792': 'No Copy License', '32793': 'Session is not exist', '32796': 'Quick Format Error', '32801': 'The CA port is in invalid setting', '32802': 'The Logical Volume is Mainframe volume', '32803': 'The RAID group is not operative', '32804': 'The Logical Volume is not operative', '32808': 'No Thin Provisioning License', '32809': 'The Logical Element is ODX volume', '32811': 'This operation cannot be performed to the NAS resources', '32812': 'This operation cannot be performed to the Storage Cluster ' 'resources', '32816': 'Fatal error generic', '35302': 'Invalid LogicalElement', '35304': 'LogicalElement state error', '35316': 'Multi-hop error', '35318': 'Maximum number of multi-hop has been reached', '35324': 'RAID is broken', '35331': 'Maximum number of session has been reached(per device)', '35333': 'Maximum number of session has been reached(per SourceElement)', '35334': 'Maximum number of session has been reached(per TargetElement)', '35335': 'Maximum number of Snapshot generation has been reached ' '(per SourceElement)', '35346': 'Copy table size is not setup', '35347': 'Copy table size is not enough', } CONF.register_opts(FJ_ETERNUS_DX_OPT_opts) class FJDXCommon(object): """Common code that does not depend on protocol.""" VERSION = "1.3.0" stats = { 'driver_version': VERSION, 'free_capacity_gb': 0, 'reserved_percentage': 0, 'storage_protocol': None, 'total_capacity_gb': 0, 'vendor_name': 'FUJITSU', 'QoS_support': False, 'volume_backend_name': None, } def __init__(self, prtcl, configuration=None): self.protocol = prtcl self.configuration = configuration self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts) if prtcl == 'iSCSI': # Get iSCSI ipaddress from driver configuration file. self.configuration.iscsi_ip_address = ( self._get_drvcfg('EternusISCSIIP')) def create_volume(self, volume): """Create volume on ETERNUS.""" LOG.debug('create_volume, ' 'volume id: %(vid)s, volume size: %(vsize)s.', {'vid': volume['id'], 'vsize': volume['size']}) self.conn = self._get_eternus_connection() volumesize = int(volume['size']) * units.Gi volumename = self._create_volume_name(volume['id']) LOG.debug('create_volume, volumename: %(volumename)s, ' 'volumesize: %(volumesize)u.', {'volumename': volumename, 'volumesize': volumesize}) # get poolname from driver configuration file eternus_pool = self._get_drvcfg('EternusPool') # Existence check the pool pool = self._find_pool(eternus_pool) if 'RSP' in pool['InstanceID']: pooltype = RAIDGROUP else: pooltype = TPPOOL configservice = self._find_eternus_service(STOR_CONF) if configservice is None: msg = (_('create_volume, volume: %(volume)s, ' 'volumename: %(volumename)s, ' 'eternus_pool: %(eternus_pool)s, ' 'Storage Configuration Service not found.') % {'volume': volume, 'volumename': volumename, 'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('create_volume, ' 'CreateOrModifyElementFromStoragePool, ' 'ConfigService: %(service)s, ' 'ElementName: %(volumename)s, ' 'InPool: %(eternus_pool)s, ' 'ElementType: %(pooltype)u, ' 'Size: %(volumesize)u.', {'service': configservice, 'volumename': volumename, 'eternus_pool': eternus_pool, 'pooltype': pooltype, 'volumesize': volumesize}) # Invoke method for create volume rc, errordesc, job = self._exec_eternus_service( 'CreateOrModifyElementFromStoragePool', configservice, ElementName=volumename, InPool=pool, ElementType=self._pywbem_uint(pooltype, '16'), Size=self._pywbem_uint(volumesize, '64')) if rc == VOLUMENAME_IN_USE: # Element Name is in use LOG.warning(_LW('create_volume, ' 'volumename: %(volumename)s, ' 'Element Name is in use.'), {'volumename': volumename}) vol_instance = self._find_lun(volume) element = vol_instance elif rc != 0: msg = (_('create_volume, ' 'volumename: %(volumename)s, ' 'poolname: %(eternus_pool)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'eternus_pool': eternus_pool, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: element = job['TheElement'] # Get eternus model name try: systemnamelist = ( self._enum_eternus_instances('FUJITSU_StorageProduct')) except Exception: msg = (_('create_volume, ' 'volume: %(volume)s, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.') % {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('create_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Backend: %(backend)s, ' 'Pool Name: %(eternus_pool)s, ' 'Pool Type: %(pooltype)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'backend': systemnamelist[0]['IdentifyingNumber'], 'eternus_pool': eternus_pool, 'pooltype': POOL_TYPE_dic[pooltype]}) # Create return value. element_path = { 'classname': element.classname, 'keybindings': { 'CreationClassName': element['CreationClassName'], 'SystemName': element['SystemName'], 'DeviceID': element['DeviceID'], 'SystemCreationClassName': element['SystemCreationClassName'] } } volume_no = "0x" + element['DeviceID'][24:28] metadata = {'FJ_Backend': systemnamelist[0]['IdentifyingNumber'], 'FJ_Volume_Name': volumename, 'FJ_Volume_No': volume_no, 'FJ_Pool_Name': eternus_pool, 'FJ_Pool_Type': POOL_TYPE_dic[pooltype]} return (element_path, metadata) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug('create_volume_from_snapshot, ' 'volume id: %(vid)s, volume size: %(vsize)s, ' 'snapshot id: %(sid)s.', {'vid': volume['id'], 'vsize': volume['size'], 'sid': snapshot['id']}) self.conn = self._get_eternus_connection() source_volume_instance = self._find_lun(snapshot) # Check the existence of source volume. if source_volume_instance is None: msg = _('create_volume_from_snapshot, ' 'Source Volume does not exist in ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create volume for the target volume. (element_path, metadata) = self.create_volume(volume) target_volume_instancename = self._create_eternus_instance_name( element_path['classname'], element_path['keybindings']) try: target_volume_instance = ( self._get_eternus_instance(target_volume_instancename)) except Exception: msg = (_('create_volume_from_snapshot, ' 'target volume instancename: %(volume_instancename)s, ' 'Get Instance Failed.') % {'volume_instancename': target_volume_instancename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._create_local_cloned_volume(target_volume_instance, source_volume_instance) return (element_path, metadata) def create_cloned_volume(self, volume, src_vref): """Create clone of the specified volume.""" LOG.debug('create_cloned_volume, ' 'tgt: (%(tid)s, %(tsize)s), src: (%(sid)s, %(ssize)s).', {'tid': volume['id'], 'tsize': volume['size'], 'sid': src_vref['id'], 'ssize': src_vref['size']}) self.conn = self._get_eternus_connection() source_volume_instance = self._find_lun(src_vref) if source_volume_instance is None: msg = _('create_cloned_volume, ' 'Source Volume does not exist in ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) (element_path, metadata) = self.create_volume(volume) target_volume_instancename = self._create_eternus_instance_name( element_path['classname'], element_path['keybindings']) try: target_volume_instance = ( self._get_eternus_instance(target_volume_instancename)) except Exception: msg = (_('create_cloned_volume, ' 'target volume instancename: %(volume_instancename)s, ' 'Get Instance Failed.') % {'volume_instancename': target_volume_instancename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._create_local_cloned_volume(target_volume_instance, source_volume_instance) return (element_path, metadata) @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _create_local_cloned_volume(self, tgt_vol_instance, src_vol_instance): """Create local clone of the specified volume.""" s_volumename = src_vol_instance['ElementName'] t_volumename = tgt_vol_instance['ElementName'] LOG.debug('_create_local_cloned_volume, ' 'tgt volume name: %(t_volumename)s, ' 'src volume name: %(s_volumename)s, ', {'t_volumename': t_volumename, 's_volumename': s_volumename}) # Get replicationservice for CreateElementReplica. repservice = self._find_eternus_service(REPL) if repservice is None: msg = _('_create_local_cloned_volume, ' 'Replication Service not found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Invoke method for create cloned volume from volume. rc, errordesc, job = self._exec_eternus_service( 'CreateElementReplica', repservice, SyncType=self._pywbem_uint(8, '16'), SourceElement=src_vol_instance.path, TargetElement=tgt_vol_instance.path) if rc != 0: msg = (_('_create_local_cloned_volume, ' 'volumename: %(volumename)s, ' 'sourcevolumename: %(sourcevolumename)s, ' 'source volume instance: %(source_volume)s, ' 'target volume instance: %(target_volume)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': t_volumename, 'sourcevolumename': s_volumename, 'source_volume': src_vol_instance.path, 'target_volume': tgt_vol_instance.path, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_create_local_cloned_volume, out: %(rc)s, %(job)s.', {'rc': rc, 'job': job}) def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('delete_volume, volume id: %s.', volume['id']) self.conn = self._get_eternus_connection() vol_exist = self._delete_volume_setting(volume) if not vol_exist: LOG.debug('delete_volume, volume not found in 1st check.') return False # Check volume existence on ETERNUS again # because volume is deleted when SnapOPC copysession is deleted. vol_instance = self._find_lun(volume) if vol_instance is None: LOG.debug('delete_volume, volume not found in 2nd check, ' 'but no problem.') return True self._delete_volume(vol_instance) return True @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _delete_volume_setting(self, volume): """Delete volume setting (HostAffinity, CopySession) on ETERNUS.""" LOG.debug('_delete_volume_setting, volume id: %s.', volume['id']) # Check the existence of volume. volumename = self._create_volume_name(volume['id']) vol_instance = self._find_lun(volume) if vol_instance is None: LOG.info(_LI('_delete_volume_setting, volumename:%(volumename)s, ' 'volume not found on ETERNUS. '), {'volumename': volumename}) return False # Delete host-affinity setting remained by unexpected error. self._unmap_lun(volume, None, force=True) # Check copy session relating to target volume. cpsessionlist = self._find_copysession(vol_instance) delete_copysession_list = [] wait_copysession_list = [] for cpsession in cpsessionlist: LOG.debug('_delete_volume_setting, ' 'volumename: %(volumename)s, ' 'cpsession: %(cpsession)s.', {'volumename': volumename, 'cpsession': cpsession}) if cpsession['SyncedElement'] == vol_instance.path: # Copy target : other_volume --(copy)--> vol_instance delete_copysession_list.append(cpsession) elif cpsession['SystemElement'] == vol_instance.path: # Copy source : vol_instance --(copy)--> other volume wait_copysession_list.append(cpsession) LOG.debug('_delete_volume_setting, ' 'wait_cpsession: %(wait_cpsession)s, ' 'delete_cpsession: %(delete_cpsession)s.', {'wait_cpsession': wait_copysession_list, 'delete_cpsession': delete_copysession_list}) for cpsession in wait_copysession_list: self._wait_for_copy_complete(cpsession) for cpsession in delete_copysession_list: self._delete_copysession(cpsession) LOG.debug('_delete_volume_setting, ' 'wait_cpsession: %(wait_cpsession)s, ' 'delete_cpsession: %(delete_cpsession)s, complete.', {'wait_cpsession': wait_copysession_list, 'delete_cpsession': delete_copysession_list}) return True @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _delete_volume(self, vol_instance): """Delete volume on ETERNUS.""" LOG.debug('_delete_volume, volume name: %s.', vol_instance['ElementName']) volumename = vol_instance['ElementName'] configservice = self._find_eternus_service(STOR_CONF) if configservice is None: msg = (_('_delete_volume, volumename: %(volumename)s, ' 'Storage Configuration Service not found.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_delete_volume, volumename: %(volumename)s, ' 'vol_instance: %(vol_instance)s, ' 'Method: ReturnToStoragePool.', {'volumename': volumename, 'vol_instance': vol_instance.path}) # Invoke method for delete volume rc, errordesc, job = self._exec_eternus_service( 'ReturnToStoragePool', configservice, TheElement=vol_instance.path) if rc != 0: msg = (_('_delete_volume, volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_delete_volume, volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc}) @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def create_snapshot(self, snapshot): """Create snapshot using SnapOPC.""" LOG.debug('create_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) self.conn = self._get_eternus_connection() snapshotname = snapshot['name'] volumename = snapshot['volume_name'] vol_id = snapshot['volume_id'] volume = snapshot['volume'] d_volumename = self._create_volume_name(snapshot['id']) s_volumename = self._create_volume_name(vol_id) vol_instance = self._find_lun(volume) repservice = self._find_eternus_service(REPL) # Check the existence of volume. if vol_instance is None: # Volume not found on ETERNUS. msg = (_('create_snapshot, ' 'volumename: %(s_volumename)s, ' 'source volume not found on ETERNUS.') % {'s_volumename': s_volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if repservice is None: msg = (_('create_snapshot, ' 'volumename: %(volumename)s, ' 'Replication Service not found.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get poolname from driver configuration file. eternus_pool = self._get_drvcfg('EternusSnapPool') # Check the existence of pool pool = self._find_pool(eternus_pool) if pool is None: msg = (_('create_snapshot, ' 'eternus_pool: %(eternus_pool)s, ' 'pool not found.') % {'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('create_snapshot, ' 'snapshotname: %(snapshotname)s, ' 'source volume name: %(volumename)s, ' 'vol_instance.path: %(vol_instance)s, ' 'dest_volumename: %(d_volumename)s, ' 'pool: %(pool)s, ' 'Invoke CreateElementReplica.', {'snapshotname': snapshotname, 'volumename': volumename, 'vol_instance': vol_instance.path, 'd_volumename': d_volumename, 'pool': pool}) # Invoke method for create snapshot rc, errordesc, job = self._exec_eternus_service( 'CreateElementReplica', repservice, ElementName=d_volumename, TargetPool=pool, SyncType=self._pywbem_uint(7, '16'), SourceElement=vol_instance.path) if rc != 0: msg = (_('create_snapshot, ' 'snapshotname: %(snapshotname)s, ' 'source volume name: %(volumename)s, ' 'vol_instance.path: %(vol_instance)s, ' 'dest volume name: %(d_volumename)s, ' 'pool: %(pool)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'snapshotname': snapshotname, 'volumename': volumename, 'vol_instance': vol_instance.path, 'd_volumename': d_volumename, 'pool': pool, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: element = job['TargetElement'] LOG.debug('create_snapshot, ' 'volumename:%(volumename)s, ' 'Return code:%(rc)lu, ' 'Error:%(errordesc)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc}) # Create return value. element_path = { 'classname': element.classname, 'keybindings': { 'CreationClassName': element['CreationClassName'], 'SystemName': element['SystemName'], 'DeviceID': element['DeviceID'], 'SystemCreationClassName': element['SystemCreationClassName'] } } sdv_no = "0x" + element['DeviceID'][24:28] metadata = {'FJ_SDV_Name': d_volumename, 'FJ_SDV_No': sdv_no, 'FJ_Pool_Name': eternus_pool} return (element_path, metadata) def delete_snapshot(self, snapshot): """Delete snapshot.""" LOG.debug('delete_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) vol_exist = self.delete_volume(snapshot) LOG.debug('delete_snapshot, vol_exist: %s.', vol_exist) return vol_exist def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" LOG.debug('initialize_connection, ' 'volume id: %(vid)s, protocol: %(prtcl)s.', {'vid': volume['id'], 'prtcl': self.protocol}) self.conn = self._get_eternus_connection() vol_instance = self._find_lun(volume) # Check the existence of volume if vol_instance is None: # Volume not found msg = (_('initialize_connection, ' 'volume: %(volume)s, ' 'Volume not found.') % {'volume': volume['name']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_portlist = self._get_target_port() mapdata = self._get_mapdata(vol_instance, connector, target_portlist) if mapdata: # volume is already mapped target_lun = mapdata.get('target_lun', None) target_luns = mapdata.get('target_luns', None) LOG.info(_LI('initialize_connection, ' 'volume: %(volume)s, ' 'target_lun: %(target_lun)s, ' 'target_luns: %(target_luns)s, ' 'Volume is already mapped.'), {'volume': volume['name'], 'target_lun': target_lun, 'target_luns': target_luns}) else: self._map_lun(vol_instance, connector, target_portlist) mapdata = self._get_mapdata(vol_instance, connector, target_portlist) mapdata['target_discovered'] = True mapdata['volume_id'] = volume['id'] if self.protocol == 'fc': device_info = {'driver_volume_type': 'fibre_channel', 'data': mapdata} elif self.protocol == 'iSCSI': device_info = {'driver_volume_type': 'iscsi', 'data': mapdata} LOG.debug('initialize_connection, ' 'device_info:%(info)s.', {'info': device_info}) return device_info def terminate_connection(self, volume, connector, force=False, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection, ' 'volume id: %(vid)s, protocol: %(prtcl)s, force: %(frc)s.', {'vid': volume['id'], 'prtcl': self.protocol, 'frc': force}) self.conn = self._get_eternus_connection() map_exist = self._unmap_lun(volume, connector) LOG.debug('terminate_connection, map_exist: %s.', map_exist) return map_exist def build_fc_init_tgt_map(self, connector, target_wwn=None): """Build parameter for Zone Manager""" LOG.debug('build_fc_init_tgt_map, target_wwn: %s.', target_wwn) initiatorlist = self._find_initiator_names(connector) if target_wwn is None: target_wwn = [] target_portlist = self._get_target_port() for target_port in target_portlist: target_wwn.append(target_port['Name']) init_tgt_map = {initiator: target_wwn for initiator in initiatorlist} LOG.debug('build_fc_init_tgt_map, ' 'initiator target mapping: %s.', init_tgt_map) return init_tgt_map def check_attached_volume_in_zone(self, connector): """Check Attached Volume in Same FC Zone or not""" LOG.debug('check_attached_volume_in_zone, connector: %s.', connector) aglist = self._find_affinity_group(connector) if not aglist: attached = False else: attached = True LOG.debug('check_attached_volume_in_zone, attached: %s.', attached) return attached @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def extend_volume(self, volume, new_size): """Extend volume on ETERNUS.""" LOG.debug('extend_volume, volume id: %(vid)s, ' 'size: %(size)s, new_size: %(nsize)s.', {'vid': volume['id'], 'size': volume['size'], 'nsize': new_size}) self.conn = self._get_eternus_connection() volumesize = new_size * units.Gi volumename = self._create_volume_name(volume['id']) # Get source volume instance. vol_instance = self._find_lun(volume) if vol_instance is None: msg = (_('extend_volume, ' 'volumename: %(volumename)s, ' 'volume not found.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, volumename: %(volumename)s, ' 'volumesize: %(volumesize)u, ' 'volume instance: %(vol_instance)s.', {'volumename': volumename, 'volumesize': volumesize, 'vol_instance': vol_instance.path}) # Get poolname from driver configuration file. eternus_pool = self._get_drvcfg('EternusPool') # Check the existence of volume. pool = self._find_pool(eternus_pool) if pool is None: msg = (_('extend_volume, ' 'eternus_pool: %(eternus_pool)s, ' 'pool not found.') % {'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Set pooltype. if 'RSP' in pool['InstanceID']: pooltype = RAIDGROUP else: pooltype = TPPOOL configservice = self._find_eternus_service(STOR_CONF) if configservice is None: msg = (_('extend_volume, volume: %(volume)s, ' 'volumename: %(volumename)s, ' 'eternus_pool: %(eternus_pool)s, ' 'Storage Configuration Service not found.') % {'volume': volume, 'volumename': volumename, 'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, ' 'CreateOrModifyElementFromStoragePool, ' 'ConfigService: %(service)s, ' 'ElementName: %(volumename)s, ' 'InPool: %(eternus_pool)s, ' 'ElementType: %(pooltype)u, ' 'Size: %(volumesize)u, ' 'TheElement: %(vol_instance)s.', {'service': configservice, 'volumename': volumename, 'eternus_pool': eternus_pool, 'pooltype': pooltype, 'volumesize': volumesize, 'vol_instance': vol_instance.path}) # Invoke method for extend volume rc, errordesc, job = self._exec_eternus_service( 'CreateOrModifyElementFromStoragePool', configservice, ElementName=volumename, InPool=pool, ElementType=self._pywbem_uint(pooltype, '16'), Size=self._pywbem_uint(volumesize, '64'), TheElement=vol_instance.path) if rc != 0: msg = (_('extend_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'PoolType: %(pooltype)s.') % {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'pooltype': POOL_TYPE_dic[pooltype]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Pool Name: %(eternus_pool)s, ' 'Pool Type: %(pooltype)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'eternus_pool': eternus_pool, 'pooltype': POOL_TYPE_dic[pooltype]}) return eternus_pool @lockutils.synchronized('ETERNUS-update', 'cinder-', True) def update_volume_stats(self): """get pool capacity.""" self.conn = self._get_eternus_connection() eternus_pool = self._get_drvcfg('EternusPool') LOG.debug('update_volume_stats, pool name: %s.', eternus_pool) pool = self._find_pool(eternus_pool, True) if pool: # pool is found self.stats['total_capacity_gb'] = ( pool['TotalManagedSpace'] / units.Gi) self.stats['free_capacity_gb'] = ( pool['RemainingManagedSpace'] / units.Gi) else: # if pool information is unknown, set 0 GB to capacity information LOG.warning(_LW('update_volume_stats, ' 'eternus_pool:%(eternus_pool)s, ' 'specified pool is not found.'), {'eternus_pool': eternus_pool}) self.stats['total_capacity_gb'] = 0 self.stats['free_capacity_gb'] = 0 self.stats['multiattach'] = True LOG.debug('update_volume_stats, ' 'eternus_pool:%(eternus_pool)s, ' 'total capacity[%(total)s], ' 'free capacity[%(free)s].', {'eternus_pool': eternus_pool, 'total': self.stats['total_capacity_gb'], 'free': self.stats['free_capacity_gb']}) return (self.stats, eternus_pool) def _get_mapdata(self, vol_instance, connector, target_portlist): """return mapping information.""" mapdata = None multipath = connector.get('multipath', False) LOG.debug('_get_mapdata, volume name: %(vname)s, ' 'protocol: %(prtcl)s, multipath: %(mpath)s.', {'vname': vol_instance['ElementName'], 'prtcl': self.protocol, 'mpath': multipath}) # find affinity group # attach the connector and include the volume aglist = self._find_affinity_group(connector, vol_instance) if not aglist: LOG.debug('_get_mapdata, ag_list:%s.', aglist) else: if self.protocol == 'fc': mapdata = self._get_mapdata_fc(aglist, vol_instance, target_portlist) elif self.protocol == 'iSCSI': mapdata = self._get_mapdata_iscsi(aglist, vol_instance, multipath) LOG.debug('_get_mapdata, mapdata: %s.', mapdata) return mapdata def _get_mapdata_fc(self, aglist, vol_instance, target_portlist): """_get_mapdata for FibreChannel.""" target_wwn = [] try: ag_volmaplist = self._reference_eternus_names( aglist[0], ResultClass='CIM_ProtocolControllerForUnit') vo_volmaplist = self._reference_eternus_names( vol_instance.path, ResultClass='CIM_ProtocolControllerForUnit') except pywbem.CIM_Error: msg = (_('_get_mapdata_fc, ' 'getting host-affinity from aglist/vol_instance failed, ' 'affinitygroup: %(ag)s, ' 'ReferenceNames, ' 'cannot connect to ETERNUS.') % {'ag': aglist[0]}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) volmap = None for vo_volmap in vo_volmaplist: if vo_volmap in ag_volmaplist: volmap = vo_volmap break try: volmapinstance = self._get_eternus_instance( volmap, LocalOnly=False) except pywbem.CIM_Error: msg = (_('_get_mapdata_fc, ' 'getting host-affinity instance failed, ' 'volmap: %(volmap)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'volmap': volmap}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) target_lun = int(volmapinstance['DeviceNumber'], 16) for target_port in target_portlist: target_wwn.append(target_port['Name']) mapdata = {'target_wwn': target_wwn, 'target_lun': target_lun} LOG.debug('_get_mapdata_fc, mapdata: %s.', mapdata) return mapdata def _get_mapdata_iscsi(self, aglist, vol_instance, multipath): """_get_mapdata for iSCSI.""" target_portals = [] target_iqns = [] target_luns = [] try: vo_volmaplist = self._reference_eternus_names( vol_instance.path, ResultClass='CIM_ProtocolControllerForUnit') except Exception: msg = (_('_get_mapdata_iscsi, ' 'vol_instance: %(vol_instance)s, ' 'ReferenceNames: CIM_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'vol_instance': vol_instance}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_properties_list = self._get_eternus_iscsi_properties() target_list = [prop[0] for prop in target_properties_list] properties_list = ( [(prop[1], prop[2]) for prop in target_properties_list]) for ag in aglist: try: iscsi_endpointlist = ( self._assoc_eternus_names( ag, AssocClass='FUJITSU_SAPAvailableForElement', ResultClass='FUJITSU_iSCSIProtocolEndpoint')) except Exception: msg = (_('_get_mapdata_iscsi, ' 'Associators: FUJITSU_SAPAvailableForElement, ' 'cannot connect to ETERNUS.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) iscsi_endpoint = iscsi_endpointlist[0] if iscsi_endpoint not in target_list: continue idx = target_list.index(iscsi_endpoint) target_portal, target_iqn = properties_list[idx] try: ag_volmaplist = self._reference_eternus_names( ag, ResultClass='CIM_ProtocolControllerForUnit') except Exception: msg = (_('_get_mapdata_iscsi, ' 'affinitygroup: %(ag)s, ' 'ReferenceNames, ' 'cannot connect to ETERNUS.') % {'ag': ag}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volmap = None for vo_volmap in vo_volmaplist: if vo_volmap in ag_volmaplist: volmap = vo_volmap break if volmap is None: continue try: volmapinstance = self._get_eternus_instance( volmap, LocalOnly=False) except Exception: msg = (_('_get_mapdata_iscsi, ' 'volmap: %(volmap)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'volmap': volmap}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_lun = int(volmapinstance['DeviceNumber'], 16) target_portals.append(target_portal) target_iqns.append(target_iqn) target_luns.append(target_lun) if multipath: mapdata = {'target_portals': target_portals, 'target_iqns': target_iqns, 'target_luns': target_luns} else: mapdata = {'target_portal': target_portals[0], 'target_iqn': target_iqns[0], 'target_lun': target_luns[0]} LOG.debug('_get_mapdata_iscsi, mapdata: %s.', mapdata) return mapdata def _get_drvcfg(self, tagname, filename=None, multiple=False): """read from driver configuration file.""" if filename is None: # set default configuration file name filename = self.configuration.cinder_eternus_config_file LOG.debug("_get_drvcfg, input[%(filename)s][%(tagname)s].", {'filename': filename, 'tagname': tagname}) tree = parse(filename) elem = tree.getroot() ret = None if not multiple: ret = elem.findtext(".//" + tagname) else: ret = [] for e in elem.findall(".//" + tagname): if (e.text is not None) and (e.text not in ret): ret.append(e.text) if not ret: msg = (_('_get_drvcfg, ' 'filename: %(filename)s, ' 'tagname: %(tagname)s, ' 'data is None!! ' 'Please edit driver configuration file and correct.') % {'filename': filename, 'tagname': tagname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return ret def _get_eternus_connection(self, filename=None): """return WBEM connection.""" LOG.debug('_get_eternus_connection, filename: %s.', filename) ip = self._get_drvcfg('EternusIP', filename) port = self._get_drvcfg('EternusPort', filename) user = self._get_drvcfg('EternusUser', filename) passwd = self._get_drvcfg('EternusPassword', filename) url = 'http://' + ip + ':' + port conn = pywbem.WBEMConnection(url, (user, passwd), default_namespace='root/eternus') if conn is None: msg = (_('_get_eternus_connection, ' 'filename: %(filename)s, ' 'ip: %(ip)s, ' 'port: %(port)s, ' 'user: %(user)s, ' 'passwd: ****, ' 'url: %(url)s, ' 'FAILED!!.') % {'filename': filename, 'ip': ip, 'port': port, 'user': user, 'url': url}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_eternus_connection, conn: %s.', conn) return conn def _create_volume_name(self, id_code): """create volume_name on ETERNUS from id on OpenStack.""" LOG.debug('_create_volume_name, id_code: %s.', id_code) if id_code is None: msg = _('_create_volume_name, id_code is None.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) m = hashlib.md5() m.update(id_code.encode('utf-8')) # pylint: disable=E1121 volumename = base64.urlsafe_b64encode(m.digest()).decode() ret = VOL_PREFIX + six.text_type(volumename) LOG.debug('_create_volume_name, ret: %s', ret) return ret def _find_pool(self, eternus_pool, detail=False): """find Instance or InstanceName of pool by pool name on ETERNUS.""" LOG.debug('_find_pool, pool name: %s.', eternus_pool) tppoollist = [] rgpoollist = [] # Get pools info form CIM instance(include info about instance path). try: tppoollist = self._enum_eternus_instances( 'FUJITSU_ThinProvisioningPool') rgpoollist = self._enum_eternus_instances( 'FUJITSU_RAIDStoragePool') except Exception: msg = (_('_find_pool, ' 'eternus_pool:%(eternus_pool)s, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.') % {'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Make total pools list. poollist = tppoollist + rgpoollist # One eternus backend has only one special pool name # so just use pool name can get the target pool. for pool in poollist: if pool['ElementName'] == eternus_pool: poolinstance = pool break else: poolinstance = None if poolinstance is None: ret = None elif detail is True: ret = poolinstance else: ret = poolinstance.path LOG.debug('_find_pool, pool: %s.', ret) return ret def _find_eternus_service(self, classname): """find CIM instance about service information.""" LOG.debug('_find_eternus_service, ' 'classname: %s.', classname) try: services = self._enum_eternus_instance_names( six.text_type(classname)) except Exception: msg = (_('_find_eternus_service, ' 'classname: %(classname)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'classname': classname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ret = services[0] LOG.debug('_find_eternus_service, ' 'classname: %(classname)s, ' 'ret: %(ret)s.', {'classname': classname, 'ret': ret}) return ret @lockutils.synchronized('ETERNUS-SMIS-exec', 'cinder-', True) def _exec_eternus_service(self, classname, instanceNameList, **param_dict): """Execute SMI-S Method.""" LOG.debug('_exec_eternus_service, ' 'classname: %(a)s, ' 'instanceNameList: %(b)s, ' 'parameters: %(c)s.', {'a': classname, 'b': instanceNameList, 'c': param_dict}) # Use InvokeMethod. try: rc, retdata = self.conn.InvokeMethod( classname, instanceNameList, **param_dict) except Exception: if rc is None: msg = (_('_exec_eternus_service, ' 'classname: %(classname)s, ' 'InvokeMethod, ' 'cannot connect to ETERNUS.') % {'classname': classname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # If the result has job information, wait for job complete if "Job" in retdata: rc = self._wait_for_job_complete(self.conn, retdata) errordesc = RETCODE_dic.get(six.text_type(rc), UNDEF_MSG) ret = (rc, errordesc, retdata) LOG.debug('_exec_eternus_service, ' 'classname: %(a)s, ' 'instanceNameList: %(b)s, ' 'parameters: %(c)s, ' 'Return code: %(rc)s, ' 'Error: %(errordesc)s, ' 'Retrun data: %(retdata)s.', {'a': classname, 'b': instanceNameList, 'c': param_dict, 'rc': rc, 'errordesc': errordesc, 'retdata': retdata}) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) def _enum_eternus_instances(self, classname): """Enumerate Instances.""" LOG.debug('_enum_eternus_instances, classname: %s.', classname) ret = self.conn.EnumerateInstances(classname) LOG.debug('_enum_eternus_instances, enum %d instances.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) def _enum_eternus_instance_names(self, classname): """Enumerate Instance Names.""" LOG.debug('_enum_eternus_instance_names, classname: %s.', classname) ret = self.conn.EnumerateInstanceNames(classname) LOG.debug('_enum_eternus_instance_names, enum %d names.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-getinstance', 'cinder-', True) def _get_eternus_instance(self, classname, **param_dict): """Get Instance.""" LOG.debug('_get_eternus_instance, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = self.conn.GetInstance(classname, **param_dict) LOG.debug('_get_eternus_instance, ret: %s.', ret) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) def _assoc_eternus(self, classname, **param_dict): """Associator.""" LOG.debug('_assoc_eternus, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = self.conn.Associators(classname, **param_dict) LOG.debug('_assoc_eternus, enum %d instances.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) def _assoc_eternus_names(self, classname, **param_dict): """Associator Names.""" LOG.debug('_assoc_eternus_names, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = self.conn.AssociatorNames(classname, **param_dict) LOG.debug('_assoc_eternus_names, enum %d names.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) def _reference_eternus_names(self, classname, **param_dict): """Refference Names.""" LOG.debug('_reference_eternus_names, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = self.conn.ReferenceNames(classname, **param_dict) LOG.debug('_reference_eternus_names, enum %d names.', len(ret)) return ret def _create_eternus_instance_name(self, classname, bindings): """create CIM InstanceName from classname and bindings.""" LOG.debug('_create_eternus_instance_name, ' 'classname: %(cls)s, bindings: %(bind)s.', {'cls': classname, 'bind': bindings}) instancename = None try: instancename = pywbem.CIMInstanceName( classname, namespace='root/eternus', keybindings=bindings) except NameError: instancename = None LOG.debug('_create_eternus_instance_name, ret: %s.', instancename) return instancename def _find_lun(self, volume): """find lun instance from volume class or volumename on ETERNUS.""" LOG.debug('_find_lun, volume id: %s.', volume['id']) volumeinstance = None volumename = self._create_volume_name(volume['id']) try: location = ast.literal_eval(volume['provider_location']) classname = location['classname'] bindings = location['keybindings'] if classname and bindings: LOG.debug('_find_lun, ' 'classname: %(classname)s, ' 'bindings: %(bindings)s.', {'classname': classname, 'bindings': bindings}) volume_instance_name = ( self._create_eternus_instance_name(classname, bindings)) LOG.debug('_find_lun, ' 'volume_insatnce_name: %(volume_instance_name)s.', {'volume_instance_name': volume_instance_name}) vol_instance = ( self._get_eternus_instance(volume_instance_name)) if vol_instance['ElementName'] == volumename: volumeinstance = vol_instance except Exception: volumeinstance = None LOG.debug('_find_lun, ' 'Cannot get volume instance from provider location, ' 'Search all volume using EnumerateInstanceNames.') if volumeinstance is None: # for old version LOG.debug('_find_lun, ' 'volumename: %(volumename)s.', {'volumename': volumename}) # get volume instance from volumename on ETERNUS try: namelist = self._enum_eternus_instance_names( 'FUJITSU_StorageVolume') except Exception: msg = (_('_find_lun, ' 'volumename: %(volumename)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for name in namelist: try: vol_instance = self._get_eternus_instance(name) if vol_instance['ElementName'] == volumename: volumeinstance = vol_instance path = volumeinstance.path LOG.debug('_find_lun, ' 'volumename: %(volumename)s, ' 'vol_instance: %(vol_instance)s.', {'volumename': volumename, 'vol_instance': path}) break except Exception: continue else: LOG.debug('_find_lun, ' 'volumename: %(volumename)s, ' 'volume not found on ETERNUS.', {'volumename': volumename}) LOG.debug('_find_lun, ret: %s.', volumeinstance) return volumeinstance def _find_copysession(self, vol_instance): """find copysession from volumename on ETERNUS.""" LOG.debug('_find_copysession, volume name: %s.', vol_instance['ElementName']) try: cpsessionlist = self.conn.ReferenceNames( vol_instance.path, ResultClass='FUJITSU_StorageSynchronized') except Exception: msg = (_('_find_copysession, ' 'ReferenceNames, ' 'vol_instance: %(vol_instance_path)s, ' 'Cannot connect to ETERNUS.') % {'vol_instance_path': vol_instance.path}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_copysession, ' 'cpsessionlist: %(cpsessionlist)s.', {'cpsessionlist': cpsessionlist}) LOG.debug('_find_copysession, ret: %s.', cpsessionlist) return cpsessionlist def _wait_for_copy_complete(self, cpsession): """Wait for the completion of copy.""" LOG.debug('_wait_for_copy_complete, cpsession: %s.', cpsession) cpsession_instance = None while True: try: cpsession_instance = self.conn.GetInstance( cpsession, LocalOnly=False) except Exception: cpsession_instance = None # if copy session is none, # it means copy session was finished,break and return if cpsession_instance is None: break LOG.debug('_wait_for_copy_complete, ' 'find target copysession, ' 'wait for end of copysession.') if cpsession_instance['CopyState'] == BROKEN: msg = (_('_wait_for_copy_complete, ' 'cpsession: %(cpsession)s, ' 'copysession state is BROKEN.') % {'cpsession': cpsession}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) time.sleep(10) def _delete_copysession(self, cpsession): """delete copysession.""" LOG.debug('_delete_copysession: cpssession: %s.', cpsession) try: cpsession_instance = self._get_eternus_instance( cpsession, LocalOnly=False) except Exception: LOG.info(_LI('_delete_copysession, ' 'The copysession was already completed.')) return copytype = cpsession_instance['CopyType'] # set oparation code # SnapOPC: 19 (Return To ResourcePool) # OPC:8 (Detach) # EC/REC:8 (Detach) operation = OPERATION_dic.get(copytype, None) if operation is None: msg = (_('_delete_copysession, ' 'copy session type is undefined! ' 'copy session: %(cpsession)s, ' 'copy type: %(copytype)s.') % {'cpsession': cpsession, 'copytype': copytype}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) repservice = self._find_eternus_service(REPL) if repservice is None: msg = (_('_delete_copysession, ' 'Cannot find Replication Service')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Invoke method for delete copysession rc, errordesc, job = self._exec_eternus_service( 'ModifyReplicaSynchronization', repservice, Operation=self._pywbem_uint(operation, '16'), Synchronization=cpsession, Force=True, WaitForCopyState=self._pywbem_uint(15, '16')) LOG.debug('_delete_copysession, ' 'copysession: %(cpsession)s, ' 'operation: %(operation)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'cpsession': cpsession, 'operation': operation, 'rc': rc, 'errordesc': errordesc}) if rc == COPYSESSION_NOT_EXIST: LOG.debug('_delete_copysession, ' 'cpsession: %(cpsession)s, ' 'copysession is not exist.', {'cpsession': cpsession}) elif rc != 0: msg = (_('_delete_copysession, ' 'copysession: %(cpsession)s, ' 'operation: %(operation)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'cpsession': cpsession, 'operation': operation, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_target_port(self): """return target portid.""" LOG.debug('_get_target_port, protocol: %s.', self.protocol) target_portlist = [] if self.protocol == 'fc': prtcl_endpoint = 'FUJITSU_SCSIProtocolEndpoint' connection_type = 2 elif self.protocol == 'iSCSI': prtcl_endpoint = 'FUJITSU_iSCSIProtocolEndpoint' connection_type = 7 try: tgtportlist = self._enum_eternus_instances(prtcl_endpoint) except Exception: msg = (_('_get_target_port, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for tgtport in tgtportlist: # Check : protocol of tgtport if tgtport['ConnectionType'] != connection_type: continue # Check : if port is for remote copy, continue if (tgtport['RAMode'] & 0x7B) != 0x00: continue # Check : if port is for StorageCluster, continue if 'SCGroupNo' in tgtport: continue target_portlist.append(tgtport) LOG.debug('_get_target_port, ' 'connection type: %(cont)s, ' 'ramode: %(ramode)s.', {'cont': tgtport['ConnectionType'], 'ramode': tgtport['RAMode']}) LOG.debug('_get_target_port, ' 'target port: %(target_portid)s.', {'target_portid': target_portlist}) if len(target_portlist) == 0: msg = (_('_get_target_port, ' 'protcol: %(protocol)s, ' 'target_port not found.') % {'protocol': self.protocol}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_target_port, ret: %s.', target_portlist) return target_portlist @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) def _map_lun(self, vol_instance, connector, targetlist=None): """map volume to host.""" volumename = vol_instance['ElementName'] LOG.debug('_map_lun, ' 'volume name: %(vname)s, connector: %(connector)s.', {'vname': volumename, 'connector': connector}) volume_uid = vol_instance['Name'] initiatorlist = self._find_initiator_names(connector) aglist = self._find_affinity_group(connector) configservice = self._find_eternus_service(CTRL_CONF) if targetlist is None: targetlist = self._get_target_port() if configservice is None: msg = (_('_map_lun, ' 'vol_instance.path:%(vol)s, ' 'volumename: %(volumename)s, ' 'volume_uid: %(uid)s, ' 'initiator: %(initiator)s, ' 'target: %(tgt)s, ' 'aglist: %(aglist)s, ' 'Storage Configuration Service not found.') % {'vol': vol_instance.path, 'volumename': volumename, 'uid': volume_uid, 'initiator': initiatorlist, 'tgt': targetlist, 'aglist': aglist}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_map_lun, ' 'vol_instance.path: %(vol_instance)s, ' 'volumename:%(volumename)s, ' 'initiator:%(initiator)s, ' 'target:%(tgt)s.', {'vol_instance': vol_instance.path, 'volumename': [volumename], 'initiator': initiatorlist, 'tgt': targetlist}) if not aglist: # Create affinity group and set host-affinity. for target in targetlist: LOG.debug('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'target: %(target)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'target': target['Name']}) rc, errordesc, job = self._exec_eternus_service( 'ExposePaths', configservice, LUNames=[volume_uid], InitiatorPortIDs=initiatorlist, TargetPortIDs=[target['Name']], DeviceAccesses=[self._pywbem_uint(2, '16')]) LOG.debug('_map_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu, ' 'Create affinitygroup and set host-affinity.', {'errordesc': errordesc, 'rc': rc}) if rc != 0 and rc != LUNAME_IN_USE: LOG.warning(_LW('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'target: %(target)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.'), {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'target': target['Name'], 'rc': rc, 'errordesc': errordesc}) else: # Add lun to affinity group for ag in aglist: LOG.debug('_map_lun, ' 'ag: %(ag)s, lun_name: %(volume_uid)s.', {'ag': ag, 'volume_uid': volume_uid}) rc, errordesc, job = self._exec_eternus_service( 'ExposePaths', configservice, LUNames=[volume_uid], DeviceAccesses=[self._pywbem_uint(2, '16')], ProtocolControllers=[ag]) LOG.debug('_map_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu, ' 'Add lun to affinity group.', {'errordesc': errordesc, 'rc': rc}) if rc != 0 and rc != LUNAME_IN_USE: LOG.warning(_LW('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'ag: %(ag)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.'), {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'ag': ag, 'rc': rc, 'errordesc': errordesc}) def _find_initiator_names(self, connector): """return initiator names.""" initiatornamelist = [] if self.protocol == 'fc' and connector['wwpns']: LOG.debug('_find_initiator_names, wwpns: %s.', connector['wwpns']) initiatornamelist = connector['wwpns'] elif self.protocol == 'iSCSI' and connector['initiator']: LOG.debug('_find_initiator_names, initiator: %s.', connector['initiator']) initiatornamelist.append(connector['initiator']) if not initiatornamelist: msg = (_('_find_initiator_names, ' 'connector: %(connector)s, ' 'initiator not found.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_initiator_names, ' 'initiator list: %(initiator)s.', {'initiator': initiatornamelist}) return initiatornamelist def _find_affinity_group(self, connector, vol_instance=None): """find affinity group from connector.""" LOG.debug('_find_affinity_group, vol_instance: %s.', vol_instance) affinity_grouplist = [] initiatorlist = self._find_initiator_names(connector) if vol_instance is None: try: aglist = self._enum_eternus_instance_names( 'FUJITSU_AffinityGroupController') except Exception: msg = (_('_find_affinity_group, ' 'connector: %(connector)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_affinity_group,' 'affinity_groups:%s', aglist) else: try: aglist = self._assoc_eternus_names( vol_instance.path, AssocClass='FUJITSU_ProtocolControllerForUnit', ResultClass='FUJITSU_AffinityGroupController') except Exception: msg = (_('_find_affinity_group,' 'connector: %(connector)s,' 'AssocNames: FUJITSU_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_affinity_group, ' 'vol_instance.path: %(volume)s, ' 'affinity_groups: %(aglist)s.', {'volume': vol_instance.path, 'aglist': aglist}) for ag in aglist: try: hostaglist = self._assoc_eternus( ag, AssocClass='FUJITSU_AuthorizedTarget', ResultClass='FUJITSU_AuthorizedPrivilege') except Exception: msg = (_('_find_affinity_group, ' 'connector: %(connector)s, ' 'Associators: FUJITSU_AuthorizedTarget, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for hostag in hostaglist: for initiator in initiatorlist: if initiator.lower() not in hostag['InstanceID'].lower(): continue LOG.debug('_find_affinity_group, ' 'AffinityGroup: %(ag)s.', {'ag': ag}) affinity_grouplist.append(ag) break break LOG.debug('_find_affinity_group, ' 'initiators: %(initiator)s, ' 'affinity_group: %(affinity_group)s.', {'initiator': initiatorlist, 'affinity_group': affinity_grouplist}) return affinity_grouplist @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) def _unmap_lun(self, volume, connector, force=False): """unmap volume from host.""" LOG.debug('_map_lun, volume id: %(vid)s, ' 'connector: %(connector)s, force: %(frc)s.', {'vid': volume['id'], 'connector': connector, 'frc': force}) volumename = self._create_volume_name(volume['id']) vol_instance = self._find_lun(volume) if vol_instance is None: LOG.info(_LI('_unmap_lun, ' 'volumename:%(volumename)s, ' 'volume not found.'), {'volumename': volumename}) return False volume_uid = vol_instance['Name'] if not force: aglist = self._find_affinity_group(connector, vol_instance) if not aglist: LOG.info(_LI('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume is not mapped.'), {'volumename': volumename}) return False else: try: aglist = self._assoc_eternus_names( vol_instance.path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='FUJITSU_AffinityGroupController') except Exception: msg = (_('_unmap_lun,' 'vol_instance.path: %(volume)s, ' 'AssociatorNames: CIM_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'volume': vol_instance.path}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_unmap_lun, ' 'vol_instance.path: %(volume)s, ' 'affinity_groups: %(aglist)s.', {'volume': vol_instance.path, 'aglist': aglist}) configservice = self._find_eternus_service(CTRL_CONF) if configservice is None: msg = (_('_unmap_lun, ' 'vol_instance.path: %(volume)s, ' 'volumename: %(volumename)s, ' 'volume_uid: %(uid)s, ' 'aglist: %(aglist)s, ' 'Controller Configuration Service not found.') % {'vol': vol_instance.path, 'volumename': [volumename], 'uid': [volume_uid], 'aglist': aglist}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for ag in aglist: LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume_uid: %(volume_uid)s, ' 'AffinityGroup: %(ag)s.', {'volumename': volumename, 'volume_uid': volume_uid, 'ag': ag}) rc, errordesc, job = self._exec_eternus_service( 'HidePaths', configservice, LUNames=[volume_uid], ProtocolControllers=[ag]) LOG.debug('_unmap_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu.', {'errordesc': errordesc, 'rc': rc}) if rc == LUNAME_NOT_EXIST: LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s, ' 'Invalid LUNames.', {'volumename': volumename}) elif rc != 0: msg = (_('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume_uid: %(volume_uid)s, ' 'AffinityGroup: %(ag)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'volume_uid': volume_uid, 'ag': ag, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s.', {'volumename': volumename}) return True def _get_eternus_iscsi_properties(self): """get target port iqns and target_portals.""" iscsi_properties_list = [] iscsiip_list = self._get_drvcfg('EternusISCSIIP', multiple=True) iscsi_port = self.configuration.iscsi_port LOG.debug('_get_eternus_iscsi_properties, iplist: %s.', iscsiip_list) try: ip_endpointlist = self._enum_eternus_instance_names( 'FUJITSU_IPProtocolEndpoint') except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'iscsiip': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for ip_endpoint in ip_endpointlist: try: ip_endpoint_instance = self._get_eternus_instance( ip_endpoint) ip_address = ip_endpoint_instance['IPv4Address'] LOG.debug('_get_eternus_iscsi_properties, ' 'instanceip: %(ip)s, ' 'iscsiip: %(iscsiip)s.', {'ip': ip_address, 'iscsiip': iscsiip_list}) except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'iscsiip': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if ip_address not in iscsiip_list: continue LOG.debug('_get_eternus_iscsi_properties, ' 'find iscsiip: %(ip)s.', {'ip': ip_address}) try: tcp_endpointlist = self._assoc_eternus_names( ip_endpoint, AssocClass='CIM_BindsTo', ResultClass='FUJITSU_TCPProtocolEndpoint') except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'AssociatorNames: CIM_BindsTo, ' 'cannot connect to ETERNUS.') % {'iscsiip': ip_address}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for tcp_endpoint in tcp_endpointlist: try: iscsi_endpointlist = ( self._assoc_eternus(tcp_endpoint, AssocClass='CIM_BindsTo', ResultClass='FUJITSU_iSCSI' 'ProtocolEndpoint')) except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'AssociatorNames: CIM_BindsTo, ' 'cannot connect to ETERNUS.') % {'iscsiip': ip_address}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for iscsi_endpoint in iscsi_endpointlist: target_portal = "%s:%s" % (ip_address, iscsi_port) iqn = iscsi_endpoint['Name'].split(',')[0] iscsi_properties_list.append((iscsi_endpoint.path, target_portal, iqn)) LOG.debug('_get_eternus_iscsi_properties, ' 'target_portal: %(target_portal)s, ' 'iqn: %(iqn)s.', {'target_portal': target_portal, 'iqn': iqn}) if len(iscsi_properties_list) == 0: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip list: %(iscsiip_list)s, ' 'iqn not found.') % {'iscsiip_list': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_eternus_iscsi_properties, ' 'iscsi_properties_list: %(iscsi_properties_list)s.', {'iscsi_properties_list': iscsi_properties_list}) return iscsi_properties_list def _wait_for_job_complete(self, conn, job): """Given the job wait for it to complete.""" self.retries = 0 self.wait_for_job_called = False def _wait_for_job_complete(): """Called at an interval until the job is finished.""" if self._is_job_finished(conn, job): raise loopingcall.LoopingCallDone() if self.retries > JOB_RETRIES: LOG.error(_LE("_wait_for_job_complete, " "failed after %(retries)d tries."), {'retries': self.retries}) raise loopingcall.LoopingCallDone() try: self.retries += 1 if not self.wait_for_job_called: if self._is_job_finished(conn, job): self.wait_for_job_called = True except Exception as e: LOG.error(_LE("Exception: %s"), e) exceptionMessage = _("Issue encountered waiting for job.") LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage) self.wait_for_job_called = False timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) timer.start(interval=JOB_INTERVAL_SEC).wait() jobInstanceName = job['Job'] jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) rc = jobinstance['ErrorCode'] LOG.debug('_wait_for_job_complete, rc: %s.', rc) return rc def _is_job_finished(self, conn, job): """Check if the job is finished.""" jobInstanceName = job['Job'] jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) jobstate = jobinstance['JobState'] LOG.debug('_is_job_finished,' 'state: %(state)s', {'state': jobstate}) # From ValueMap of JobState in CIM_ConcreteJob # 2=New, 3=Starting, 4=Running, 32767=Queue Pending # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, # 32768..65535"), # Values("New, Starting, Running, Suspended, Shutting Down, # Completed, Terminated, Killed, Exception, Service, # Query Pending, DMTF Reserved, Vendor Reserved")] # NOTE(deva): string matching based on # http://ipmitool.cvs.sourceforge.net/ # viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c if jobstate in [2, 3, 4]: job_finished = False else: job_finished = True LOG.debug('_is_job_finished, finish: %s.', job_finished) return job_finished def _pywbem_uint(self, num, datatype): try: result = { '8': pywbem.Uint8(num), '16': pywbem.Uint16(num), '32': pywbem.Uint32(num), '64': pywbem.Uint64(num) } result = result.get(datatype, num) except NameError: result = num return result cinder-8.0.0/cinder/volume/drivers/fujitsu/eternus_dx_fc.py0000664000567000056710000001731512701406250025254 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series. """ from oslo_log import log as logging import six from cinder.volume import driver from cinder.volume.drivers.fujitsu import eternus_dx_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class FJDXFCDriver(driver.FibreChannelDriver): """FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" def __init__(self, *args, **kwargs): super(FJDXFCDriver, self).__init__(*args, **kwargs) self.common = eternus_dx_common.FJDXCommon( 'fc', configuration=self.configuration) self.VERSION = self.common.VERSION def check_for_setup_error(self): pass def create_volume(self, volume): """Create volume.""" LOG.debug('create_volume, ' 'volume id: %s, enter method.', volume['id']) location, metadata = self.common.create_volume(volume) v_metadata = self._get_metadata(volume) metadata.update(v_metadata) LOG.debug('create_volume, info: %s, exit method.', metadata) return {'provider_location': six.text_type(location), 'metadata': metadata} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug('create_volume_from_snapshot, ' 'volume id: %(vid)s, snap id: %(sid)s, enter method.', {'vid': volume['id'], 'sid': snapshot['id']}) location, metadata = ( self.common.create_volume_from_snapshot(volume, snapshot)) v_metadata = self._get_metadata(volume) metadata.update(v_metadata) LOG.debug('create_volume_from_snapshot, ' 'info: %s, exit method.', metadata) return {'provider_location': six.text_type(location), 'metadata': metadata} def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" LOG.debug('create_cloned_volume, ' 'target volume id: %(tid)s, ' 'source volume id: %(sid)s, enter method.', {'tid': volume['id'], 'sid': src_vref['id']}) location, metadata = ( self.common.create_cloned_volume(volume, src_vref)) v_metadata = self._get_metadata(volume) metadata.update(v_metadata) LOG.debug('create_cloned_volume, ' 'info: %s, exit method.', metadata) return {'provider_location': six.text_type(location), 'metadata': metadata} def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('delete_volume, ' 'volume id: %s, enter method.', volume['id']) vol_exist = self.common.delete_volume(volume) LOG.debug('delete_volume, ' 'delete: %s, exit method.', vol_exist) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug('create_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) location, metadata = self.common.create_snapshot(snapshot) LOG.debug('create_snapshot, info: %s, exit method.', metadata) return {'provider_location': six.text_type(location)} def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug('delete_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) vol_exist = self.common.delete_snapshot(snapshot) LOG.debug('delete_snapshot, ' 'delete: %s, exit method.', vol_exist) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" return @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" LOG.debug('initialize_connection, volume id: %(vid)s, ' 'wwpns: %(wwpns)s, enter method.', {'vid': volume['id'], 'wwpns': connector['wwpns']}) info = self.common.initialize_connection(volume, connector) data = info['data'] init_tgt_map = ( self.common.build_fc_init_tgt_map(connector, data['target_wwn'])) data['initiator_target_map'] = init_tgt_map info['data'] = data LOG.debug('initialize_connection, ' 'info: %s, exit method.', info) return info @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection, volume id: %(vid)s, ' 'wwpns: %(wwpns)s, enter method.', {'vid': volume['id'], 'wwpns': connector['wwpns']}) map_exist = self.common.terminate_connection(volume, connector) attached = self.common.check_attached_volume_in_zone(connector) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if not attached: # No more volumes attached to the host init_tgt_map = self.common.build_fc_init_tgt_map(connector) info['data'] = {'initiator_target_map': init_tgt_map} LOG.debug('terminate_connection, unmap: %(unmap)s, ' 'connection info: %(info)s, exit method', {'unmap': map_exist, 'info': info}) return info def get_volume_stats(self, refresh=False): """Get volume stats.""" LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh) pool_name = None if refresh is True: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXFCDriver' data['storage_protocol'] = 'FC' self._stats = data LOG.debug('get_volume_stats, ' 'pool name: %s, exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('extend_volume, ' 'volume id: %s, enter method.', volume['id']) used_pool_name = self.common.extend_volume(volume, new_size) LOG.debug('extend_volume, ' 'used pool name: %s, exit method.', used_pool_name) def _get_metadata(self, volume): v_metadata = volume.get('volume_metadata') if v_metadata: ret = {data['key']: data['value'] for data in v_metadata} else: ret = volume.get('metadata', {}) return ret cinder-8.0.0/cinder/volume/drivers/fujitsu/__init__.py0000664000567000056710000000000012701406250024142 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py0000664000567000056710000001675112701406250026001 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series. """ import six from cinder.i18n import _LI from cinder.volume import driver from cinder.volume.drivers.fujitsu import eternus_dx_common from oslo_log import log as logging LOG = logging.getLogger(__name__) class FJDXISCSIDriver(driver.ISCSIDriver): """iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" def __init__(self, *args, **kwargs): super(FJDXISCSIDriver, self).__init__(*args, **kwargs) self.common = eternus_dx_common.FJDXCommon( 'iSCSI', configuration=self.configuration) self.VERSION = self.common.VERSION def check_for_setup_error(self): return def create_volume(self, volume): """Create volume.""" LOG.info(_LI('create_volume, ' 'volume id: %s, Enter method.'), volume['id']) element_path, metadata = self.common.create_volume(volume) v_metadata = volume.get('volume_metadata') if v_metadata: for data in v_metadata: metadata[data['key']] = data['value'] else: v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) LOG.info(_LI('create_volume, info: %s, Exit method.'), metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.info(_LI('create_volume_from_snapshot, ' 'volume id: %(vid)s, snap id: %(sid)s, Enter method.'), {'vid': volume['id'], 'sid': snapshot['id']}) element_path, metadata = ( self.common.create_volume_from_snapshot(volume, snapshot)) v_metadata = volume.get('volume_metadata') if v_metadata: for data in v_metadata: metadata[data['key']] = data['value'] else: v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) LOG.info(_LI('create_volume_from_snapshot, ' 'info: %s, Exit method.'), metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" LOG.info(_LI('create_cloned_volume, ' 'target volume id: %(tid)s, ' 'source volume id: %(sid)s, Enter method.'), {'tid': volume['id'], 'sid': src_vref['id']}) element_path, metadata = ( self.common.create_cloned_volume(volume, src_vref)) v_metadata = volume.get('volume_metadata') if v_metadata: for data in v_metadata: metadata[data['key']] = data['value'] else: v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) LOG.info(_LI('create_cloned_volume, ' 'info: %s, Exit method.'), metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.info(_LI('delete_volume, ' 'volume id: %s, Enter method.'), volume['id']) vol_exist = self.common.delete_volume(volume) LOG.info(_LI('delete_volume, ' 'delete: %s, Exit method.'), vol_exist) return def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.info(_LI('create_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, Enter method.'), {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) element_path, metadata = self.common.create_snapshot(snapshot) LOG.info(_LI('create_snapshot, info: %s, Exit method.'), metadata) return {'provider_location': six.text_type(element_path)} def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.info(_LI('delete_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, Enter method.'), {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) vol_exist = self.common.delete_snapshot(snapshot) LOG.info(_LI('delete_snapshot, ' 'delete: %s, Exit method.'), vol_exist) return def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" return def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" LOG.info(_LI('initialize_connection, volume id: %(vid)s, ' 'initiator: %(initiator)s, Enter method.'), {'vid': volume['id'], 'initiator': connector['initiator']}) info = self.common.initialize_connection(volume, connector) LOG.info(_LI('initialize_connection, ' 'info: %s, Exit method.'), info) return info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.info(_LI('terminate_connection, volume id: %(vid)s, ' 'initiator: %(initiator)s, Enter method.'), {'vid': volume['id'], 'initiator': connector['initiator']}) map_exist = self.common.terminate_connection(volume, connector) LOG.info(_LI('terminate_connection, ' 'unmap: %s, Exit method.'), map_exist) return def get_volume_stats(self, refresh=False): """Get volume stats.""" LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) pool_name = None if refresh is True: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver' data['storage_protocol'] = 'iSCSI' self._stats = data LOG.debug('get_volume_stats, ' 'pool name: %s, Exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): """Extend volume.""" LOG.info(_LI('extend_volume, ' 'volume id: %s, Enter method.'), volume['id']) used_pool_name = self.common.extend_volume(volume, new_size) LOG.info(_LI('extend_volume, ' 'used pool name: %s, Exit method.'), used_pool_name) cinder-8.0.0/cinder/volume/drivers/coho.py0000664000567000056710000003155312701406250021663 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Coho Data, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import six import socket import xdrlib from oslo_config import cfg from oslo_log import log as logging from random import randint from cinder import exception from cinder.i18n import _ from cinder.volume.drivers import nfs # # RPC Definition # RPCVERSION = 2 CALL = 0 REPLY = 1 AUTH_NULL = 0 MSG_ACCEPTED = 0 MSG_DENIED = 1 SUCCESS = 0 PROG_UNAVAIL = 1 PROG_MISMATCH = 2 PROC_UNAVAIL = 3 GARBAGE_ARGS = 4 RPC_MISMATCH = 0 AUTH_ERROR = 1 COHO_PROGRAM = 400115 COHO_V1 = 1 COHO1_CREATE_SNAPSHOT = 1 COHO1_DELETE_SNAPSHOT = 2 COHO1_CREATE_VOLUME_FROM_SNAPSHOT = 3 # # Simple RPC Client # def make_auth_null(): return six.b('') class Client(object): def __init__(self, address, prog, vers, port): self.packer = xdrlib.Packer() self.unpacker = xdrlib.Unpacker('') self.address = address self.prog = prog self.vers = vers self.port = port self.cred = None self.verf = None self.init_socket() self.init_xid() def init_socket(self): try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind(('', 0)) self.sock.connect((self.address, self.port)) except socket.error: msg = _('Failed to establish connection with Coho cluster') raise exception.CohoException(msg) def init_xid(self): self.xid = randint(0, 4096) def make_xid(self): self.xid += 1 def make_cred(self): if self.cred is None: self.cred = (AUTH_NULL, make_auth_null()) return self.cred def make_verf(self): if self.verf is None: self.verf = (AUTH_NULL, make_auth_null()) return self.verf def pack_auth(self, auth): flavor, stuff = auth self.packer.pack_enum(flavor) self.packer.pack_opaque(stuff) def pack_callheader(self, xid, prog, vers, proc, cred, verf): self.packer.pack_uint(xid) self.packer.pack_enum(CALL) self.packer.pack_uint(RPCVERSION) self.packer.pack_uint(prog) self.packer.pack_uint(vers) self.packer.pack_uint(proc) self.pack_auth(cred) self.pack_auth(verf) def unpack_auth(self): flavor = self.unpacker.unpack_enum() stuff = self.unpacker.unpack_opaque() return (flavor, stuff) def unpack_replyheader(self): xid = self.unpacker.unpack_uint() mtype = self.unpacker.unpack_enum() if mtype != REPLY: raise exception.CohoException( _('no REPLY but %r') % (mtype,)) stat = self.unpacker.unpack_enum() if stat == MSG_DENIED: stat = self.unpacker.unpack_enum() if stat == RPC_MISMATCH: low = self.unpacker.unpack_uint() high = self.unpacker.unpack_uint() raise exception.CohoException( _('MSG_DENIED: RPC_MISMATCH: %r') % ((low, high),)) if stat == AUTH_ERROR: stat = self.unpacker.unpack_uint() raise exception.CohoException( _('MSG_DENIED: AUTH_ERROR: %r') % (stat,)) raise exception.CohoException(_('MSG_DENIED: %r') % (stat,)) if stat != MSG_ACCEPTED: raise exception.CohoException( _('Neither MSG_DENIED nor MSG_ACCEPTED: %r') % (stat,)) verf = self.unpack_auth() stat = self.unpacker.unpack_enum() if stat == PROG_UNAVAIL: raise exception.CohoException(_('call failed: PROG_UNAVAIL')) if stat == PROG_MISMATCH: low = self.unpacker.unpack_uint() high = self.unpacker.unpack_uint() raise exception.CohoException( _('call failed: PROG_MISMATCH: %r') % ((low, high),)) if stat == PROC_UNAVAIL: raise exception.CohoException(_('call failed: PROC_UNAVAIL')) if stat == GARBAGE_ARGS: raise exception.CohoException(_('call failed: GARBAGE_ARGS')) if stat != SUCCESS: raise exception.CohoException(_('call failed: %r') % (stat,)) return xid, verf def init_call(self, proc, args): self.make_xid() self.packer.reset() cred = self.make_cred() verf = self.make_verf() self.pack_callheader(self.xid, self.prog, self.vers, proc, cred, verf) for arg, func in args: func(arg) return self.xid, self.packer.get_buf() def _sendfrag(self, last, frag): x = len(frag) if last: x = x | 0x80000000 header = (six.int2byte(int(x >> 24 & 0xff)) + six.int2byte(int(x >> 16 & 0xff)) + six.int2byte(int(x >> 8 & 0xff)) + six.int2byte(int(x & 0xff))) self.sock.send(header + frag) def _sendrecord(self, record): self._sendfrag(1, record) def _recvfrag(self): header = self.sock.recv(4) if len(header) < 4: raise exception.CohoException( _('Invalid response header from RPC server')) x = (six.indexbytes(header, 0) << 24 | six.indexbytes(header, 1) << 16 | six.indexbytes(header, 2) << 8 | six.indexbytes(header, 3)) last = ((x & 0x80000000) != 0) n = int(x & 0x7fffffff) frag = six.b('') while n > 0: buf = self.sock.recv(n) if not buf: raise exception.CohoException( _('RPC server response is incomplete')) n = n - len(buf) frag = frag + buf return last, frag def _recvrecord(self): record = six.b('') last = 0 while not last: last, frag = self._recvfrag() record = record + frag return record def _make_call(self, proc, args): self.packer.reset() xid, call = self.init_call(proc, args) self._sendrecord(call) reply = self._recvrecord() self.unpacker.reset(reply) xid, verf = self.unpack_replyheader() def _call(self, proc, args): self._make_call(proc, args) res = self.unpacker.unpack_uint() if res != SUCCESS: raise exception.CohoException(os.strerror(res)) class CohoRPCClient(Client): def __init__(self, address, port): Client.__init__(self, address, COHO_PROGRAM, 1, port) def create_snapshot(self, src, dst, flags): self._call(COHO1_CREATE_SNAPSHOT, [(six.b(src), self.packer.pack_string), (six.b(dst), self.packer.pack_string), (flags, self.packer.pack_uint)]) def delete_snapshot(self, name): self._call(COHO1_DELETE_SNAPSHOT, [(six.b(name), self.packer.pack_string)]) def create_volume_from_snapshot(self, src, dst): self._call(COHO1_CREATE_VOLUME_FROM_SNAPSHOT, [(six.b(src), self.packer.pack_string), (six.b(dst), self.packer.pack_string)]) # # Coho Data Volume Driver # VERSION = '1.0.0' LOG = logging.getLogger(__name__) coho_opts = [ cfg.IntOpt('coho_rpc_port', default=2049, help='RPC port to connect to Coha Data MicroArray') ] CONF = cfg.CONF CONF.register_opts(coho_opts) class CohoDriver(nfs.NfsDriver): """Coho Data NFS based cinder driver. Creates file on NFS share for using it as block device on hypervisor. Version history: 1.0.0 - Initial driver """ # We have to overload this attribute of RemoteFSDriver because # unfortunately the base method doesn't accept exports of the form: #
:/ # It expects a non blank export name following the /. # We are more permissive. SHARE_FORMAT_REGEX = r'.+:/.*' def __init__(self, *args, **kwargs): super(CohoDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(coho_opts) self._rpcclients = dict() self._backend_name = (self.configuration.volume_backend_name or self.__class__.__name__) def _init_rpcclient(self, addr, port): client = CohoRPCClient(addr, port) self._rpcclients[(addr, port)] = client return client def _get_rpcclient(self, addr, port): if (addr, port) in self._rpcclients: return self._rpcclients[(addr, port)] return self._init_rpcclient(addr, port) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(CohoDriver, self).do_setup(context) self._execute_as_root = True self._context = context config = self.configuration.coho_rpc_port if not config: msg = _("Coho rpc port is not configured") LOG.warning(msg) raise exception.CohoException(msg) if config < 1 or config > 65535: msg = (_("Invalid port number %(config)s for Coho rpc port") % {'config': config}) LOG.warning(msg) raise exception.CohoException(msg) def _do_clone_volume(self, volume, src): """Clone volume to source. Create a volume on given remote share with the same contents as the specified source. """ volume_path = self.local_path(volume) source_path = self.local_path(src) self._execute('cp', source_path, volume_path, run_as_root=self._execute_as_root) def _get_volume_location(self, volume_id): """Returns provider location for given volume.""" # The driver should not directly access db, but since volume is not # passed in create_snapshot and delete_snapshot we are forced to read # the volume info from the database volume = self.db.volume_get(self._context, volume_id) addr, path = volume.provider_location.split(":") return addr, path def create_snapshot(self, snapshot): """Create a volume snapshot.""" addr, path = self._get_volume_location(snapshot['volume_id']) volume_path = os.path.join(path, snapshot['volume_name']) snapshot_name = snapshot['name'] flags = 0 # unused at this time client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) client.create_snapshot(volume_path, snapshot_name, flags) def delete_snapshot(self, snapshot): """Delete a volume snapshot.""" addr, path = self._get_volume_location(snapshot['volume_id']) snapshot_name = snapshot['name'] client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) client.delete_snapshot(snapshot_name) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" volume['provider_location'] = self._find_share(volume['size']) addr, path = volume['provider_location'].split(":") volume_path = os.path.join(path, volume['name']) snapshot_name = snapshot['name'] client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) client.create_volume_from_snapshot(snapshot_name, volume_path) return {'provider_location': volume['provider_location']} def _extend_file_sparse(self, path, size): """Extend the size of a file (with no additional disk usage).""" self._execute('truncate', '-s', '%sG' % size, path, run_as_root=self._execute_as_root) def create_cloned_volume(self, volume, src_vref): volume['provider_location'] = self._find_share(volume['size']) self._do_clone_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend the specified file to the new_size (sparsely).""" volume_path = self.local_path(volume) self._extend_file_sparse(volume_path, new_size) def get_volume_stats(self, refresh): """Pass in Coho Data information in volume stats.""" _stats = super(CohoDriver, self).get_volume_stats(refresh) _stats["vendor_name"] = 'Coho Data' _stats["driver_version"] = VERSION _stats["storage_protocol"] = 'NFS' _stats["volume_backend_name"] = self._backend_name _stats["total_capacity_gb"] = 'unknown' _stats["free_capacity_gb"] = 'unknown' _stats["export_paths"] = self._mounted_shares return _stats cinder-8.0.0/cinder/volume/drivers/emc/0000775000567000056710000000000012701406543021123 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_provision.py0000664000567000056710000012543612701406250025412 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.emc import emc_vmax_utils LOG = logging.getLogger(__name__) STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 EMC_ROOT = 'root/emc' THINPROVISIONINGCOMPOSITE = 32768 THINPROVISIONING = 5 SYNC_CLONE_LOCAL = 10 COPY_ON_WRITE = 6 TF_CLONE = 8 class EMCVMAXProvision(object): """Provisioning Class for SMI-S based EMC volume drivers. This Provisioning class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ def __init__(self, prtcl): self.protocol = prtcl self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) def delete_volume_from_pool( self, conn, storageConfigservice, volumeInstanceName, volumeName, extraSpecs): """Given the volume instance remove it from the pool. :param conn: connection the the ecom server :param storageConfigservice: volume created from job :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param extraSpecs: additional info :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() if isinstance(volumeInstanceName, list): theElements = volumeInstanceName volumeName = 'Bulk Delete' else: theElements = [volumeInstanceName] rc, job = conn.InvokeMethod( 'ReturnElementsToStoragePool', storageConfigservice, TheElements=theElements) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Delete Volume: %(volumeName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod EMCReturnToStoragePool took: " "%(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc def create_volume_from_pool( self, conn, storageConfigService, volumeName, poolInstanceName, volumeSize, extraSpecs): """Create the volume in the specified pool. :param conn: the connection information to the ecom server :param storageConfigService: the storage configuration service :param volumeName: the volume name (String) :param poolInstanceName: the pool instance name to create the dummy volume in :param volumeSize: volume size (String) :param extraSpecs: additional info :returns: dict -- the volume dict :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateOrModifyElementFromStoragePool', storageConfigService, ElementName=volumeName, InPool=poolInstanceName, ElementType=self.utils.get_num(THINPROVISIONING, '16'), Size=self.utils.get_num(volumeSize, '64'), EMCBindElements=False) LOG.debug("Create Volume: %(volumename)s Return code: %(rc)lu.", {'volumename': volumeName, 'rc': rc}) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Volume: %(volumeName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) # Find the newly created volume. volumeDict = self.get_volume_dict_from_job(conn, job['Job']) return volumeDict, rc def create_and_get_storage_group(self, conn, controllerConfigService, storageGroupName, volumeInstanceName, extraSpecs): """Create a storage group and return it. :param conn: the connection information to the ecom server :param controllerConfigService: the controller configuration service :param storageGroupName: the storage group name (String :param volumeInstanceName: the volume instance name :param extraSpecs: additional info :returns: foundStorageGroupInstanceName - instance name of the default storage group :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateGroup', controllerConfigService, GroupName=storageGroupName, Type=self.utils.get_num(STORAGEGROUPTYPE, '16'), Members=[volumeInstanceName]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Group: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': storageGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateGroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) foundStorageGroupInstanceName = self._find_new_storage_group( conn, job, storageGroupName) return foundStorageGroupInstanceName def create_storage_group_no_members( self, conn, controllerConfigService, groupName, extraSpecs): """Create a new storage group that has no members. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param groupName: the proposed group name :param extraSpecs: additional info :returns: foundStorageGroupInstanceName :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateGroup', controllerConfigService, GroupName=groupName, Type=self.utils.get_num(STORAGEGROUPTYPE, '16'), DeleteWhenBecomesUnassociated=False) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Group: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': groupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateGroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) foundStorageGroupInstanceName = self._find_new_storage_group( conn, job, groupName) return foundStorageGroupInstanceName def _find_new_storage_group( self, conn, maskingGroupDict, storageGroupName): """After creating a new storage group find it and return it. :param conn: connection the ecom server :param maskingGroupDict: the maskingGroupDict dict :param storageGroupName: storage group name (String) :returns: maskingGroupDict['MaskingGroup'] """ foundStorageGroupInstanceName = None if 'MaskingGroup' in maskingGroupDict: foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup'] return foundStorageGroupInstanceName def get_volume_dict_from_job(self, conn, jobInstance): """Given the jobInstance determine the volume Instance. :param conn: the ecom connection :param jobInstance: the instance of a job :returns: dict -- volumeDict - an instance of a volume """ associators = conn.Associators( jobInstance, ResultClass='EMC_StorageVolume') volpath = associators[0].path volumeDict = {} volumeDict['classname'] = volpath.classname keys = {} keys['CreationClassName'] = volpath['CreationClassName'] keys['SystemName'] = volpath['SystemName'] keys['DeviceID'] = volpath['DeviceID'] keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] volumeDict['keybindings'] = keys return volumeDict def remove_device_from_storage_group( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs): """Remove a volume from a storage group. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration service :param storageGroupInstanceName: the instance name of the storage group :param volumeInstanceName: the instance name of the volume :param volumeName: the volume name (String) :param extraSpecs: additional info :returns: int -- the return code of the job :raises: VolumeBackendAPIException """ startTime = time.time() rc, jobDict = conn.InvokeMethod('RemoveMembers', controllerConfigService, MaskingGroup=storageGroupInstanceName, Members=[volumeInstanceName]) if rc != 0: rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict, extraSpecs) if rc != 0: exceptionMessage = (_( "Error removing volume %(vol)s. %(error)s.") % {'vol': volumeName, 'error': errorDesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod RemoveMembers " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc def add_members_to_masking_group( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs): """Add a member to a masking group group. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration service :param storageGroupInstanceName: the instance name of the storage group :param volumeInstanceName: the instance name of the volume :param volumeName: the volume name (String) :param extraSpecs: additional info :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'AddMembers', controllerConfigService, MaskingGroup=storageGroupInstanceName, Members=[volumeInstanceName]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error mapping volume %(vol)s. %(error)s.") % {'vol': volumeName, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod AddMembers " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) def unbind_volume_from_storage_pool( self, conn, storageConfigService, volumeInstanceName, volumeName, extraSpecs): """Unbind a volume from a pool and return the unbound volume. :param conn: the connection information to the ecom server :param storageConfigService: the storage configuration service instance name :param volumeInstanceName: the volume instance name :param volumeName: the volume name :param extraSpecs: additional info :returns: int -- return code :returns: the job object :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'EMCUnBindElement', storageConfigService, TheElement=volumeInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error unbinding volume %(vol)s from pool. %(error)s.") % {'vol': volumeName, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod EMCUnBindElement " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def modify_composite_volume( self, conn, elementCompositionService, theVolumeInstanceName, inVolumeInstanceName, extraSpecs): """Given a composite volume add a storage volume to it. :param conn: the connection to the ecom :param elementCompositionService: the element composition service :param theVolumeInstanceName: the existing composite volume :param inVolumeInstanceName: the volume you wish to add to the composite volume :param extraSpecs: additional info :returns: int -- rc - return code :returns: the job object :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateOrModifyCompositeElement', elementCompositionService, TheElement=theVolumeInstanceName, InElements=[inVolumeInstanceName]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error adding volume to composite volume. " "Error is: %(error)s.") % {'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyCompositeElement " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def create_composite_volume( self, conn, elementCompositionService, volumeSize, volumeName, poolInstanceName, compositeType, numMembers, extraSpecs): """Create a new volume using the auto meta feature. :param conn: the connection the the ecom server :param elementCompositionService: the element composition service :param volumeSize: the size of the volume :param volumeName: user friendly name :param poolInstanceName: the pool to bind the composite volume to :param compositeType: the proposed composite type of the volume e.g striped/concatenated :param numMembers: the number of meta members to make up the composite. If it is 1 then a non composite is created :param extraSpecs: additional info :returns: dict -- volumeDict :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() newMembers = 2 LOG.debug( "Parameters for CreateOrModifyCompositeElement: " "elementCompositionService: %(elementCompositionService)s " "provisioning: %(provisioning)lu " "volumeSize: %(volumeSize)s " "newMembers: %(newMembers)lu " "poolInstanceName: %(poolInstanceName)s " "compositeType: %(compositeType)lu " "numMembers: %(numMembers)s.", {'elementCompositionService': elementCompositionService, 'provisioning': THINPROVISIONINGCOMPOSITE, 'volumeSize': volumeSize, 'newMembers': newMembers, 'poolInstanceName': poolInstanceName, 'compositeType': compositeType, 'numMembers': numMembers}) rc, job = conn.InvokeMethod( 'CreateOrModifyCompositeElement', elementCompositionService, ElementName=volumeName, ElementType=self.utils.get_num(THINPROVISIONINGCOMPOSITE, '16'), Size=self.utils.get_num(volumeSize, '64'), ElementSource=self.utils.get_num(newMembers, '16'), EMCInPools=[poolInstanceName], CompositeType=self.utils.get_num(compositeType, '16'), EMCNumberOfMembers=self.utils.get_num(numMembers, '32')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Volume: %(volumename)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumename': volumeName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyCompositeElement " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) # Find the newly created volume. volumeDict = self.get_volume_dict_from_job(conn, job['Job']) return volumeDict, rc def create_new_composite_volume( self, conn, elementCompositionService, compositeHeadInstanceName, compositeMemberInstanceName, compositeType, extraSpecs): """Creates a new composite volume. Given a bound composite head and an unbound composite member create a new composite volume. :param conn: the connection the the ecom server :param elementCompositionService: the element composition service :param compositeHeadInstanceName: the composite head. This can be bound :param compositeMemberInstanceName: the composite member. This must be unbound :param compositeType: the composite type e.g striped or concatenated :param extraSpecs: additional info :returns: int -- return code :returns: the job object :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateOrModifyCompositeElement', elementCompositionService, ElementType=self.utils.get_num('2', '16'), InElements=( [compositeHeadInstanceName, compositeMemberInstanceName]), CompositeType=self.utils.get_num(compositeType, '16')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Creating new composite Volume Return code: " "%(rc)lu. Error: %(error)s.") % {'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyCompositeElement " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def _migrate_volume( self, conn, storageRelocationServiceInstanceName, volumeInstanceName, targetPoolInstanceName, extraSpecs): """Migrate a volume to another pool. :param conn: the connection to the ecom server :param storageRelocationServiceInstanceName: the storage relocation service :param volumeInstanceName: the volume to be migrated :param targetPoolInstanceName: the target pool to migrate the volume to :param extraSpecs: additional info :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'RelocateStorageVolumesToStoragePool', storageRelocationServiceInstanceName, TheElements=[volumeInstanceName], TargetPool=targetPoolInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Migrating volume from one pool to another. " "Return code: %(rc)lu. Error: %(error)s.") % {'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod RelocateStorageVolumesToStoragePool " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc def migrate_volume_to_storage_pool( self, conn, storageRelocationServiceInstanceName, volumeInstanceName, targetPoolInstanceName, extraSpecs): """Given the storage system name, get the storage relocation service. :param conn: the connection to the ecom server :param storageRelocationServiceInstanceName: the storage relocation service :param volumeInstanceName: the volume to be migrated :param targetPoolInstanceName: the target pool to migrate the volume to. :param extraSpecs: additional info :returns: int -- rc, return code :raises: VolumeBackendAPIException """ LOG.debug( "Volume instance name is %(volumeInstanceName)s. " "Pool instance name is : %(targetPoolInstanceName)s. ", {'volumeInstanceName': volumeInstanceName, 'targetPoolInstanceName': targetPoolInstanceName}) rc = -1 try: rc = self._migrate_volume( conn, storageRelocationServiceInstanceName, volumeInstanceName, targetPoolInstanceName, extraSpecs) except Exception as ex: if 'source of a migration session' in six.text_type(ex): try: rc = self._terminate_migrate_session( conn, volumeInstanceName, extraSpecs) except Exception: exceptionMessage = (_( "Failed to terminate migrate session.")) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) try: rc = self._migrate_volume( conn, storageRelocationServiceInstanceName, volumeInstanceName, targetPoolInstanceName, extraSpecs) except Exception: exceptionMessage = (_( "Failed to migrate volume for the second time.")) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: exceptionMessage = (_( "Failed to migrate volume for the first time.")) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return rc def _terminate_migrate_session(self, conn, volumeInstanceName, extraSpecs): """Given the volume instance terminate a migrate session. :param conn: the connection to the ecom server :param volumeInstanceName: the volume to be migrated :param extraSpecs: additional info :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'RequestStateChange', volumeInstanceName, RequestedState=self.utils.get_num(32769, '16')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Terminating migrate session. " "Return code: %(rc)lu. Error: %(error)s.") % {'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod RequestStateChange " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc def create_element_replica( self, conn, repServiceInstanceName, cloneName, sourceName, sourceInstance, targetInstance, extraSpecs, copyOnWrite=False): """Make SMI-S call to create replica for source element. :param conn: the connection to the ecom server :param repServiceInstanceName: replication service :param cloneName: replica name :param sourceName: source volume name :param sourceInstance: source volume instance :param targetInstance: the target instance :param extraSpecs: additional info :param copyOnWrite: optional :returns: int -- return code :returns: job object of the replica creation operation :raises: VolumeBackendAPIException """ if copyOnWrite: startTime = time.time() # ReplicationType 10 - Synchronous Clone Local. # Set DesiredCopyMethodology to Copy-On-Write (6). rsdInstance = self.utils.set_copy_methodology_in_rsd( conn, repServiceInstanceName, SYNC_CLONE_LOCAL, COPY_ON_WRITE, extraSpecs) # SyncType 8 - Clone. # ReplicationSettingData.DesiredCopyMethodology Copy-On-Write (6). rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=self.utils.get_num(TF_CLONE, '16'), ReplicationSettingData=rsdInstance, SourceElement=sourceInstance.path) else: startTime = time.time() if targetInstance is None: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=self.utils.get_num(TF_CLONE, '16'), SourceElement=sourceInstance.path) else: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=self.utils.get_num(TF_CLONE, '16'), SourceElement=sourceInstance.path, TargetElement=targetInstance.path) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Cloned Volume: " "Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. " "Error: %(error)s.") % {'cloneName': cloneName, 'sourceName': sourceName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateElementReplica " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def delete_clone_relationship( self, conn, repServiceInstanceName, syncInstanceName, extraSpecs, force=False): """Deletes the relationship between the clone and source volume. Makes an SMI-S call to break clone relationship between the clone volume and the source. 8/Detach - Delete the synchronization between two storage objects. Treat the objects as independent after the synchronization is deleted. :param conn: the connection to the ecom server :param repServiceInstanceName: instance name of the replication service :param syncInstanceName: instance name of the SE_StorageSynchronized_SV_SV object :param extraSpecs: additional info :param force: optional param :returns: int -- return code :returns: job object of the replica creation operation :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'ModifyReplicaSynchronization', repServiceInstanceName, Operation=self.utils.get_num(8, '16'), Synchronization=syncInstanceName, Force=force) LOG.debug("Delete clone relationship: Sync Name: %(syncName)s " "Return code: %(rc)lu.", {'syncName': syncInstanceName, 'rc': rc}) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error break clone relationship: " "Sync Name: %(syncName)s " "Return code: %(rc)lu. Error: %(error)s.") % {'syncName': syncInstanceName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod ModifyReplicaSynchronization " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def create_consistency_group( self, conn, replicationService, consistencyGroupName, extraSpecs): """Create a new consistency group. :param conn: the connection to the ecom server :param replicationService: the replication Service :param consistencyGroupName: the CG group name :param extraSpecs: additional info :returns: int -- return code :returns: job object :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateGroup', replicationService, GroupName=consistencyGroupName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Failed to create consistency group: " "%(consistencyGroupName)s " "Return code: %(rc)lu. Error: %(error)s.") % {'consistencyGroupName': consistencyGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateGroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def delete_consistency_group( self, conn, replicationService, cgInstanceName, consistencyGroupName, extraSpecs): """Delete a consistency group. :param conn: the connection to the ecom server :param replicationService: the replication Service :param cgInstanceName: the CG instance name :param consistencyGroupName: the CG group name :param extraSpecs: additional info :returns: int -- return code :returns: job object :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'DeleteGroup', replicationService, ReplicationGroup=cgInstanceName, RemoveElements=True) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Failed to delete consistency group: " "%(consistencyGroupName)s " "Return code: %(rc)lu. Error: %(error)s.") % {'consistencyGroupName': consistencyGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod DeleteGroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def add_volume_to_cg( self, conn, replicationService, cgInstanceName, volumeInstanceName, cgName, volumeName, extraSpecs): """Add a volume to a consistency group. :param conn: the connection to the ecom server :param replicationService: the replication Service :param cgInstanceName: the CG instance name :param volumeInstanceName: the volume instance name :param cgName: the CG group name :param volumeName: the volume name :param extraSpecs: additional info :returns: int -- return code :returns: job object :raises: VolumeBackendAPIException """ startTime = time.time() if isinstance(volumeInstanceName, list): theElements = volumeInstanceName volumeName = 'Bulk Add' else: theElements = [volumeInstanceName] rc, job = conn.InvokeMethod( 'AddMembers', replicationService, Members=theElements, ReplicationGroup=cgInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Failed to add volume %(volumeName)s " "to consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'cgName': cgName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod AddMembers " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def remove_volume_from_cg( self, conn, replicationService, cgInstanceName, volumeInstanceName, cgName, volumeName, extraSpecs): """Remove a volume from a consistency group. :param conn: the connection to the ecom server :param replicationService: the replication Service :param cgInstanceName: the CG instance name :param volumeInstanceName: the volume instance name :param cgName: the CG group name :param volumeName: the volume name :param extraSpecs: additional info :returns: int -- return code :returns: job object :raises: VolumeBackendAPIException """ startTime = time.time() if isinstance(volumeInstanceName, list): theElements = volumeInstanceName volumeName = 'Bulk Remove' else: theElements = [volumeInstanceName] rc, job = conn.InvokeMethod( 'RemoveMembers', replicationService, Members=theElements, ReplicationGroup=cgInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Failed to remove volume %(volumeName)s " "from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'cgName': cgName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod RemoveMembers " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def create_group_replica( self, conn, replicationService, srcGroupInstanceName, tgtGroupInstanceName, relationName, extraSpecs): """Make SMI-S call to create replica for source group. :param conn: the connection to the ecom server :param replicationService: replication service :param srcGroupInstanceName: source group instance name :param tgtGroupInstanceName: target group instance name :param relationName: relation name :param extraSpecs: additional info :returns: int -- return code :returns: job object of the replica creation operation :raises: VolumeBackendAPIException """ LOG.debug( "Parameters for CreateGroupReplica: " "replicationService: %(replicationService)s " "RelationName: %(relationName)s " "sourceGroup: %(srcGroup)s " "targetGroup: %(tgtGroup)s.", {'replicationService': replicationService, 'relationName': relationName, 'srcGroup': srcGroupInstanceName, 'tgtGroup': tgtGroupInstanceName}) # 8 for clone. rc, job = conn.InvokeMethod( 'CreateGroupReplica', replicationService, RelationshipName=relationName, SourceGroup=srcGroupInstanceName, TargetGroup=tgtGroupInstanceName, SyncType=self.utils.get_num(8, '16'), WaitForCopyState=self.utils.get_num(4, '16')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMsg = (_("Error CreateGroupReplica: " "source: %(source)s target: %(target)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'source': srcGroupInstanceName, 'target': tgtGroupInstanceName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMsg) raise exception.VolumeBackendAPIException(data=exceptionMsg) return rc, job cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_provision_v3.py0000664000567000056710000010251212701406250026010 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.volume.drivers.emc import emc_vmax_utils LOG = logging.getLogger(__name__) STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 EMC_ROOT = 'root/emc' THINPROVISIONINGCOMPOSITE = 32768 THINPROVISIONING = 5 INFO_SRC_V3 = 3 ACTIVATESNAPVX = 4 DEACTIVATESNAPVX = 19 SNAPSYNCTYPE = 7 class EMCVMAXProvisionV3(object): """Provisioning Class for SMI-S based EMC volume drivers. This Provisioning class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ def __init__(self, prtcl): self.protocol = prtcl self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) def delete_volume_from_pool( self, conn, storageConfigservice, volumeInstanceName, volumeName, extraSpecs): """Given the volume instance remove it from the pool. :param conn: connection the the ecom server :param storageConfigservice: volume created from job :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param extraSpecs: additional info :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() if isinstance(volumeInstanceName, list): theElements = volumeInstanceName volumeName = 'Bulk Delete' else: theElements = [volumeInstanceName] rc, job = conn.InvokeMethod( 'ReturnElementsToStoragePool', storageConfigservice, TheElements=theElements) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Delete Volume: %(volumeName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod ReturnElementsToStoragePool took: " "%(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc def create_volume_from_sg( self, conn, storageConfigService, volumeName, sgInstanceName, volumeSize, extraSpecs): """Create the volume and associate it with a storage group. We use EMCCollections parameter to supply a Device Masking Group to contain a newly created storage volume. :param conn: the connection information to the ecom server :param storageConfigService: the storage configuration service :param volumeName: the volume name (String) :param sgInstanceName: the storage group instance name associated with an SLO :param volumeSize: volume size (String) :param extraSpecs: additional info :returns: dict -- volumeDict - the volume dict :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateOrModifyElementFromStoragePool', storageConfigService, ElementName=volumeName, EMCCollections=[sgInstanceName], ElementType=self.utils.get_num(THINPROVISIONING, '16'), Size=self.utils.get_num(volumeSize, '64')) LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.", {'volumename': volumeName, 'rc': rc}) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Volume: %(volumeName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) # Find the newly created volume. volumeDict = self.get_volume_dict_from_job(conn, job['Job']) return volumeDict, rc def _find_new_storage_group( self, conn, maskingGroupDict, storageGroupName): """After creating an new storage group find it and return it. :param conn: connection to the ecom server :param maskingGroupDict: the maskingGroupDict dict :param storageGroupName: storage group name (String) :returns: maskingGroupDict['MaskingGroup'] or None """ foundStorageGroupInstanceName = None if 'MaskingGroup' in maskingGroupDict: foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup'] return foundStorageGroupInstanceName def get_volume_dict_from_job(self, conn, jobInstance): """Given the jobInstance determine the volume Instance. :param conn: the ecom connection :param jobInstance: the instance of a job :returns: dict -- volumeDict - an instance of a volume """ associators = conn.Associators( jobInstance, ResultClass='EMC_StorageVolume') if len(associators) > 0: return self.create_volume_dict(associators[0].path) else: exceptionMessage = (_( "Unable to get storage volume from job.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) def get_volume_from_job(self, conn, jobInstance): """Given the jobInstance determine the volume Instance. :param conn: the ecom connection :param jobInstance: the instance of a job :returns: dict -- volumeDict - an instance of a volume """ associators = conn.Associators( jobInstance, ResultClass='EMC_StorageVolume') if len(associators) > 0: return associators[0] else: exceptionMessage = (_( "Unable to get storage volume from job.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) def create_volume_dict(self, volumeInstanceName): """Create volume dictionary :param volumeInstanceName: the instance of a job :returns: dict -- volumeDict - an instance of a volume """ volpath = volumeInstanceName volumeDict = {} volumeDict['classname'] = volpath.classname keys = {} keys['CreationClassName'] = volpath['CreationClassName'] keys['SystemName'] = volpath['SystemName'] keys['DeviceID'] = volpath['DeviceID'] keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] volumeDict['keybindings'] = keys return volumeDict def create_element_replica( self, conn, repServiceInstanceName, cloneName, syncType, sourceInstance, extraSpecs, targetInstance=None, rsdInstance=None): """Make SMI-S call to create replica for source element. :param conn: the connection to the ecom server :param repServiceInstanceName: replication service :param cloneName: clone volume name :param syncType: 7=snapshot, 8=clone :param sourceInstance: source volume instance :param extraSpecs: additional info :param targetInstance: Target volume instance. Default None :param rsdInstance: replication settingdata instance. Default None :returns: int -- rc - return code :returns: job - job object of the replica creation operation :raises: VolumeBackendAPIException """ startTime = time.time() LOG.debug("Create replica: %(clone)s " "syncType: %(syncType)s Source: %(source)s.", {'clone': cloneName, 'syncType': syncType, 'source': sourceInstance.path}) storageSystemName = sourceInstance['SystemName'] __, __, sgInstanceName = ( self.utils.get_v3_default_sg_instance_name( conn, extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], extraSpecs[self.utils.WORKLOAD], storageSystemName)) if targetInstance is None and rsdInstance is None: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=self.utils.get_num(syncType, '16'), SourceElement=sourceInstance.path, Collections=[sgInstanceName]) else: rc, job = self._create_element_replica_extra_params( conn, repServiceInstanceName, cloneName, syncType, sourceInstance, targetInstance, rsdInstance, sgInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Cloned Volume: %(cloneName)s " "Return code: %(rc)lu. Error: %(error)s.") % {'cloneName': cloneName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateElementReplica " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def _create_element_replica_extra_params( self, conn, repServiceInstanceName, cloneName, syncType, sourceInstance, targetInstance, rsdInstance, sgInstanceName): """CreateElementReplica using extra parameters. :param conn: the connection to the ecom server :param repServiceInstanceName: replication service :param cloneName: clone volume name :param syncType: 7=snapshot, 8=clone :param sourceInstance: source volume instance :param targetInstance: Target volume instance. Default None :param rsdInstance: replication settingdata instance. Default None :param sgInstanceName: pool instance name :returns: int -- rc - return code :returns: job - job object of the replica creation operation """ syncType = self.utils.get_num(syncType, '16') if targetInstance and rsdInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=syncType, SourceElement=sourceInstance.path, TargetElement=targetInstance.path, ReplicationSettingData=rsdInstance) elif targetInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=syncType, SourceElement=sourceInstance.path, TargetElement=targetInstance.path) elif rsdInstance: rc, job = conn.InvokeMethod( 'CreateElementReplica', repServiceInstanceName, ElementName=cloneName, SyncType=syncType, SourceElement=sourceInstance.path, ReplicationSettingData=rsdInstance, Collections=[sgInstanceName]) return rc, job def break_replication_relationship( self, conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs, force=False): """Deletes the relationship between the clone/snap and source volume. Makes an SMI-S call to break clone relationship between the clone volume and the source. :param conn: the connection to the ecom server :param repServiceInstanceName: instance name of the replication service :param syncInstanceName: instance name of the SE_StorageSynchronized_SV_SV object :param operation: operation code :param extraSpecs: additional info :param force: force to break replication relationship if True :returns: rc - return code :returns: job - job object of the replica creation operation """ LOG.debug("Break replication relationship: %(sv)s " "operation: %(operation)s.", {'sv': syncInstanceName, 'operation': operation}) return self._modify_replica_synchronization( conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs, force) def create_storage_group_v3(self, conn, controllerConfigService, groupName, srp, slo, workload, extraSpecs): """Create the volume in the specified pool. :param conn: the connection information to the ecom server :param controllerConfigService: the controller configuration service :param groupName: the group name (String) :param srp: the SRP (String) :param slo: the SLO (String) :param workload: the workload (String) :param extraSpecs: additional info :returns: storageGroupInstanceName - storage group instance name """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateGroup', controllerConfigService, GroupName=groupName, Type=self.utils.get_num(4, '16'), EMCSRP=srp, EMCSLO=slo, EMCWorkload=workload) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: LOG.error(_LE( "Error Create Group: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s."), {'groupName': groupName, 'rc': rc, 'error': errordesc}) raise LOG.debug("InvokeMethod CreateGroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) foundStorageGroupInstanceName = self._find_new_storage_group( conn, job, groupName) return foundStorageGroupInstanceName def get_storage_pool_capability(self, conn, poolInstanceName): """Get the pool capability. :param conn: the connection information to the ecom server :param poolInstanceName: the pool instance :returns: the storage pool capability instance. None if not found """ storagePoolCapability = None associators = ( conn.AssociatorNames(poolInstanceName, ResultClass='Symm_StoragePoolCapabilities')) if len(associators) > 0: storagePoolCapability = associators[0] return storagePoolCapability def get_storage_pool_setting( self, conn, storagePoolCapability, slo, workload): """Get the pool setting for pool capability. :param conn: the connection information to the ecom server :param storagePoolCapability: the storage pool capability instance :param slo: the slo string e.g Bronze :param workload: the workload string e.g DSS_REP :returns: the storage pool setting instance """ foundStoragePoolSetting = None storagePoolSettings = ( conn.AssociatorNames(storagePoolCapability, ResultClass='CIM_storageSetting')) for storagePoolSetting in storagePoolSettings: settingInstanceID = storagePoolSetting['InstanceID'] matchString = ("%(slo)s:%(workload)s" % {'slo': slo, 'workload': workload}) if matchString in settingInstanceID: foundStoragePoolSetting = storagePoolSetting break if foundStoragePoolSetting is None: exceptionMessage = (_( "The array does not support the storage pool setting " "for SLO %(slo)s and workload %(workload)s. Please " "check the array for valid SLOs and workloads.") % {'slo': slo, 'workload': workload}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return foundStoragePoolSetting def _get_supported_size_range_for_SLO( self, conn, storageConfigService, srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs): """Gets available performance capacity per SLO. :param conn: the connection information to the ecom server :param storageConfigService: the storage configuration service instance :param srpPoolInstanceName: the SRP storage pool instance :param storagePoolSettingInstanceName: the SLO type, e.g Bronze :param extraSpecs: additional info :returns: dict -- supportedSizeDict - the supported size dict :raises: VolumeBackendAPIException """ startTime = time.time() rc, supportedSizeDict = conn.InvokeMethod( 'GetSupportedSizeRange', srpPoolInstanceName, ElementType=self.utils.get_num(3, '16'), Goal=storagePoolSettingInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete( conn, supportedSizeDict, extraSpecs) if rc != 0: exceptionMessage = (_( "Cannot get supported size range for %(sps)s " "Return code: %(rc)lu. Error: %(error)s.") % {'sps': storagePoolSettingInstanceName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod GetSupportedSizeRange " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return supportedSizeDict def get_volume_range( self, conn, storageConfigService, poolInstanceName, slo, workload, extraSpecs): """Get upper and lower range for volume for slo/workload combination. :param conn: the connection information to the ecom server :param storageConfigService: the storage config service :param poolInstanceName: the pool instance :param slo: slo string e.g Bronze :param workload: workload string e.g DSS :param extraSpecs: additional info :returns: supportedSizeDict """ supportedSizeDict = {} storagePoolCapabilityInstanceName = self.get_storage_pool_capability( conn, poolInstanceName) if storagePoolCapabilityInstanceName: storagePoolSettingInstanceName = self.get_storage_pool_setting( conn, storagePoolCapabilityInstanceName, slo, workload) supportedSizeDict = self._get_supported_size_range_for_SLO( conn, storageConfigService, poolInstanceName, storagePoolSettingInstanceName, extraSpecs) return supportedSizeDict def activate_snap_relationship( self, conn, repServiceInstanceName, syncInstanceName, extraSpecs): """Activate snap relationship and start copy operation. :param conn: the connection to the ecom server :param repServiceInstanceName: instance name of the replication service :param syncInstanceName: instance name of the SE_StorageSynchronized_SV_SV object :param extraSpecs: additional info :returns: int -- return code :returns: job object of the replica creation operation """ # Operation 4: activate the snapVx. operation = ACTIVATESNAPVX LOG.debug("Activate snap: %(sv)s operation: %(operation)s.", {'sv': syncInstanceName, 'operation': operation}) return self._modify_replica_synchronization( conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs) def return_to_resource_pool(self, conn, repServiceInstanceName, syncInstanceName, extraSpecs): """Return the snap target resources back to the pool. :param conn: the connection to the ecom server :param repServiceInstanceName: instance name of the replication service :param syncInstanceName: instance name of the :param extraSpecs: additional info :returns: rc - return code :returns: job object of the replica creation operation """ # Operation 4: activate the snapVx. operation = DEACTIVATESNAPVX LOG.debug("Return snap resource back to pool: " "%(sv)s operation: %(operation)s.", {'sv': syncInstanceName, 'operation': operation}) return self._modify_replica_synchronization( conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs) def _modify_replica_synchronization( self, conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs, force=False): """Modify the relationship between the clone/snap and source volume. Helper function that makes an SMI-S call to break clone relationship between the clone volume and the source. :param conn: the connection to the ecom server :param repServiceInstanceName: instance name of the replication service :param syncInstanceName: instance name of the SE_StorageSynchronized_SV_SV object :param operation: operation code :param extraSpecs: additional info :param force: force to modify replication synchronization if True :returns: int -- return code :returns: job object of the replica creation operation :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'ModifyReplicaSynchronization', repServiceInstanceName, Operation=self.utils.get_num(operation, '16'), Synchronization=syncInstanceName, Force=force) LOG.debug("_modify_replica_synchronization: %(sv)s " "operation: %(operation)s Return code: %(rc)lu.", {'sv': syncInstanceName, 'operation': operation, 'rc': rc}) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error modify replica synchronization: %(sv)s " "operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'sv': syncInstanceName, 'operation': operation, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod ModifyReplicaSynchronization " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) return rc, job def create_group_replica( self, conn, replicationService, srcGroupInstanceName, tgtGroupInstanceName, relationName, extraSpecs): """Make SMI-S call to create replica for source group. :param conn: the connection to the ecom server :param replicationService: replication service :param srcGroupInstanceName: source group instance name :param tgtGroupInstanceName: target group instance name :param relationName: replica relationship name :param extraSpecs: additional info :returns: int -- return code :returns: job object of the replica creation operation :raises: VolumeBackendAPIException """ LOG.debug( "Creating CreateGroupReplica V3: " "replicationService: %(replicationService)s " "RelationName: %(relationName)s " "sourceGroup: %(srcGroup)s " "targetGroup: %(tgtGroup)s.", {'replicationService': replicationService, 'relationName': relationName, 'srcGroup': srcGroupInstanceName, 'tgtGroup': tgtGroupInstanceName}) rc, job = conn.InvokeMethod( 'CreateGroupReplica', replicationService, RelationshipName=relationName, SourceGroup=srcGroupInstanceName, TargetGroup=tgtGroupInstanceName, SyncType=self.utils.get_num(SNAPSYNCTYPE, '16')) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMsg = (_("Error CreateGroupReplica: " "source: %(source)s target: %(target)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'source': srcGroupInstanceName, 'target': tgtGroupInstanceName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMsg) raise exception.VolumeBackendAPIException(data=exceptionMsg) return rc, job def get_srp_pool_stats(self, conn, arrayInfo): """Get the totalManagedSpace, remainingManagedSpace. :param conn: the connection to the ecom server :param arrayInfo: the array dict :returns: totalCapacityGb :returns: remainingCapacityGb """ totalCapacityGb = -1 remainingCapacityGb = -1 storageSystemInstanceName = self.utils.find_storageSystem( conn, arrayInfo['SerialNumber']) srpPoolInstanceNames = conn.AssociatorNames( storageSystemInstanceName, ResultClass='Symm_SRPStoragePool') for srpPoolInstanceName in srpPoolInstanceNames: poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName) if six.text_type(arrayInfo['PoolName']) == ( six.text_type(poolnameStr)): try: # Check that pool hasn't suddently been deleted. srpPoolInstance = conn.GetInstance(srpPoolInstanceName) propertiesList = srpPoolInstance.properties.items() for properties in propertiesList: if properties[0] == 'TotalManagedSpace': cimProperties = properties[1] totalManagedSpace = cimProperties.value totalCapacityGb = self.utils.convert_bits_to_gbs( totalManagedSpace) elif properties[0] == 'RemainingManagedSpace': cimProperties = properties[1] remainingManagedSpace = cimProperties.value remainingCapacityGb = ( self.utils.convert_bits_to_gbs( remainingManagedSpace)) except Exception: pass remainingSLOCapacityGb = ( self._get_remaining_slo_capacity_wlp( conn, srpPoolInstanceName, arrayInfo, storageSystemInstanceName['Name'])) if remainingSLOCapacityGb != -1: remainingCapacityGb = remainingSLOCapacityGb else: LOG.warning(_LW( "Remaining capacity %(remainingCapacityGb)s " "GBs is determined from SRP pool capacity " "and not the SLO capacity. Performance may " "not be what you expect."), {'remainingCapacityGb': remainingCapacityGb}) return totalCapacityGb, remainingCapacityGb def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName, arrayInfo, systemName): """Get the remaining SLO capacity. This is derived from the WLP portion of Unisphere. Please see the SMIProvider doc and the readme doc for details. :param conn: the connection to the ecom server :param srpPoolInstanceName: SRP instance name :param arrayInfo: the array dict :param systemName: the system name :returns: remainingCapacityGb """ remainingCapacityGb = -1 storageConfigService = ( self.utils.find_storage_configuration_service( conn, systemName)) supportedSizeDict = ( self.get_volume_range( conn, storageConfigService, srpPoolInstanceName, arrayInfo['SLO'], arrayInfo['Workload'], None)) try: # Information source is V3. if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3: remainingCapacityGb = self.utils.convert_bits_to_gbs( supportedSizeDict['EMCRemainingSLOCapacity']) LOG.debug("Received remaining SLO Capacity " "%(remainingCapacityGb)s GBs for SLO " "%(SLO)s and workload %(workload)s.", {'remainingCapacityGb': remainingCapacityGb, 'SLO': arrayInfo['SLO'], 'workload': arrayInfo['Workload']}) except KeyError: pass return remainingCapacityGb def extend_volume_in_SG( self, conn, storageConfigService, volumeInstanceName, volumeName, volumeSize, extraSpecs): """Extend a volume instance. :param conn: connection the the ecom server :param storageConfigservice: the storage configuration service :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param volumeSize: the volume size :param extraSpecs: additional info :returns: volumeDict :returns: int -- return code :raises: VolumeBackendAPIException """ startTime = time.time() rc, job = conn.InvokeMethod( 'CreateOrModifyElementFromStoragePool', storageConfigService, TheElement=volumeInstanceName, Size=self.utils.get_num(volumeSize, '64')) LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.", {'volumename': volumeName, 'rc': rc}) if rc != 0: rc, error_desc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Extend Volume: %(volumeName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'volumeName': volumeName, 'rc': rc, 'error': error_desc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(startTime, time.time())}) # Find the newly created volume. volumeDict = self.get_volume_dict_from_job(conn, job['Job']) return volumeDict, rc cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_masking.py0000664000567000056710000035474212701406250025017 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import lockutils from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.emc import emc_vmax_fast from cinder.volume.drivers.emc import emc_vmax_provision from cinder.volume.drivers.emc import emc_vmax_provision_v3 from cinder.volume.drivers.emc import emc_vmax_utils LOG = logging.getLogger(__name__) STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 INITIATORGROUPTYPE = 2 ISCSI = 'iscsi' FC = 'fc' EMC_ROOT = 'root/emc' FASTPOLICY = 'storagetype:fastpolicy' ISV3 = 'isV3' class EMCVMAXMasking(object): """Masking class for SMI-S based EMC volume drivers. Masking code to dynamically create a masking view This masking class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ def __init__(self, prtcl): self.protocol = prtcl self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) self.fast = emc_vmax_fast.EMCVMAXFast(prtcl) self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl) self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl) def setup_masking_view(self, conn, maskingViewDict, extraSpecs): @lockutils.synchronized(maskingViewDict['maskingViewName'], "emc-mv-", True) def do_get_or_create_masking_view_and_map_lun(): return self.get_or_create_masking_view_and_map_lun(conn, maskingViewDict, extraSpecs) return do_get_or_create_masking_view_and_map_lun() def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict, extraSpecs): """Get or Create a masking view and add a volume to the storage group. Given a masking view tuple either get or create a masking view and add the volume to the associated storage group. If it is a live migration operation then we do not need to remove the volume from any storage group (default or otherwise). :param conn: the connection to ecom :param maskingViewDict: the masking view dict :param extraSpecs: additional info :returns: dict -- rollbackDict :raises: VolumeBackendAPIException """ rollbackDict = {} controllerConfigService = maskingViewDict['controllerConfigService'] volumeInstance = maskingViewDict['volumeInstance'] maskingViewName = maskingViewDict['maskingViewName'] volumeName = maskingViewDict['volumeName'] isV3 = maskingViewDict['isV3'] isLiveMigration = maskingViewDict['isLiveMigration'] maskingViewDict['extraSpecs'] = extraSpecs defaultStorageGroupInstanceName = None fastPolicyName = None assocStorageGroupName = None storageGroupInstanceName = None if isLiveMigration is False: if isV3: defaultStorageGroupInstanceName = ( self._get_v3_default_storagegroup_instancename( conn, volumeInstance, maskingViewDict, controllerConfigService, volumeName)) else: fastPolicyName = maskingViewDict['fastPolicy'] # If FAST is enabled remove the volume from the default SG. if fastPolicyName is not None: defaultStorageGroupInstanceName = ( self._get_and_remove_from_storage_group_v2( conn, controllerConfigService, volumeInstance.path, volumeName, fastPolicyName, extraSpecs)) # If anything has gone wrong with the masking view we rollback try: maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( self._validate_masking_view(conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs)) LOG.debug( "The masking view in the attach operation is " "%(maskingViewInstanceName)s. The storage group " "in the masking view is %(storageGroupInstanceName)s.", {'maskingViewInstanceName': maskingViewInstanceName, 'storageGroupInstanceName': storageGroupInstanceName}) except Exception as e: LOG.exception(_LE( "Masking View creation or retrieval was not successful " "for masking view %(maskingViewName)s. " "Attempting rollback."), {'maskingViewName': maskingViewDict['maskingViewName']}) errorMessage = e rollbackDict['pgGroupName'], errorMessage = ( self._get_port_group_name_from_mv( conn, maskingViewDict['maskingViewName'], maskingViewDict['storageSystemName'])) if not errorMessage: # Only after the masking view has been validated, add the # volume to the storage group and recheck that it has been # successfully added. errorMessage = self._check_adding_volume_to_storage_group( conn, maskingViewDict, storageGroupInstanceName) rollbackDict['controllerConfigService'] = controllerConfigService rollbackDict['defaultStorageGroupInstanceName'] = ( defaultStorageGroupInstanceName) rollbackDict['volumeInstance'] = volumeInstance rollbackDict['volumeName'] = volumeName rollbackDict['fastPolicyName'] = fastPolicyName rollbackDict['isV3'] = isV3 rollbackDict['extraSpecs'] = extraSpecs rollbackDict['sgName'] = maskingViewDict['sgGroupName'] if errorMessage: # Rollback code if we cannot complete any of the steps above # successfully then we must roll back by adding the volume back to # the default storage group for that fast policy. if (fastPolicyName is not None): # If the errorMessage was returned before the volume # was removed from the default storage group no action. self._check_if_rollback_action_for_masking_required( conn, rollbackDict) if isV3: rollbackDict['sgGroupName'] = assocStorageGroupName rollbackDict['storageSystemName'] = ( maskingViewDict['storageSystemName']) self._check_if_rollback_action_for_masking_required( conn, rollbackDict) exceptionMessage = (_( "Failed to get, create or add volume %(volumeName)s " "to masking view %(maskingViewName)s. " "The error message received was %(errorMessage)s.") % {'maskingViewName': maskingViewName, 'volumeName': volumeName, 'errorMessage': errorMessage}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return rollbackDict def _get_v3_default_storagegroup_instancename(self, conn, volumeinstance, maskingviewdict, controllerConfigService, volumeName): defaultStorageGroupInstanceName = None defaultSgGroupName = self.utils.get_v3_storage_group_name( maskingviewdict['pool'], maskingviewdict['slo'], maskingviewdict['workload']) assocStorageGroupInstanceNames = ( self.utils.get_storage_groups_from_volume( conn, volumeinstance.path)) for assocStorageGroupInstanceName in ( assocStorageGroupInstanceNames): instance = conn.GetInstance( assocStorageGroupInstanceName, LocalOnly=False) assocStorageGroupName = instance['ElementName'] if assocStorageGroupName == defaultSgGroupName: defaultStorageGroupInstanceName = ( assocStorageGroupInstanceName) break if defaultStorageGroupInstanceName: self._get_and_remove_from_storage_group_v3( conn, controllerConfigService, volumeinstance.path, volumeName, maskingviewdict, defaultStorageGroupInstanceName) else: LOG.warning(_LW( "Volume: %(volumeName)s does not belong " "to storage group %(defaultSgGroupName)s."), {'volumeName': volumeName, 'defaultSgGroupName': defaultSgGroupName}) return defaultStorageGroupInstanceName def _validate_masking_view(self, conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs): """Validate all the individual pieces of the masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default SG :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: storageGroupInstanceName, :returns: string -- errorMessage """ storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] maskingViewInstanceName = self._find_masking_view( conn, maskingViewName, storageSystemName) if maskingViewInstanceName is None: maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( self._validate_new_masking_view( conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs)) else: storageGroupInstanceName, errorMessage = ( self._validate_existing_masking_view( conn, maskingViewDict, maskingViewInstanceName, extraSpecs)) return maskingViewInstanceName, storageGroupInstanceName, errorMessage def _validate_new_masking_view(self, conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs): """Validate the creation of a new masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default SG :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: storageGroupInstanceName, :returns: string -- errorMessage """ controllerConfigService = maskingViewDict['controllerConfigService'] igGroupName = maskingViewDict['igGroupName'] connector = maskingViewDict['connector'] storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] pgGroupName = maskingViewDict['pgGroupName'] storageGroupInstanceName, errorMessage = ( self._check_storage_group( conn, maskingViewDict, defaultStorageGroupInstanceName)) if errorMessage: return None, storageGroupInstanceName, errorMessage portGroupInstanceName, errorMessage = ( self._check_port_group(conn, controllerConfigService, pgGroupName)) if errorMessage: return None, storageGroupInstanceName, errorMessage initiatorGroupInstanceName, errorMessage = ( self._check_initiator_group(conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if errorMessage: return None, storageGroupInstanceName, errorMessage # Only after the components of the MV have been validated, # add the volume to the storage group and recheck that it # has been successfully added. This is necessary before # creating a new masking view. errorMessage = self._check_adding_volume_to_storage_group( conn, maskingViewDict, storageGroupInstanceName) if errorMessage: return None, storageGroupInstanceName, errorMessage maskingViewInstanceName, errorMessage = ( self._check_masking_view( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) return maskingViewInstanceName, storageGroupInstanceName, errorMessage def _validate_existing_masking_view(self, conn, maskingViewDict, maskingViewInstanceName, extraSpecs): """Validate the components of an existing masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param maskingViewInstanceName: the masking view instance name :param extraSpecs: extra specification :returns: storageGroupInstanceName :returns: string -- errorMessage """ storageGroupInstanceName = None controllerConfigService = maskingViewDict['controllerConfigService'] sgGroupName = maskingViewDict['sgGroupName'] igGroupName = maskingViewDict['igGroupName'] connector = maskingViewDict['connector'] storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] # First verify that the initiator group matches the initiators. errorMessage = self._check_existing_initiator_group( conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs) if errorMessage: return storageGroupInstanceName, errorMessage storageGroupInstanceName, errorMessage = ( self._check_existing_storage_group( conn, controllerConfigService, sgGroupName, maskingViewInstanceName)) return storageGroupInstanceName, errorMessage def _check_storage_group(self, conn, maskingViewDict, storageGroupInstanceName): """Get the storage group and return it. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: default storage group instance name :returns: storageGroupInstanceName :returns: string -- msg, the error message """ msg = None storageGroupInstanceName = ( self._get_storage_group_instance_name( conn, maskingViewDict, storageGroupInstanceName)) if storageGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get or create a storage group: %(sgGroupName)s" " for volume %(volumeName)s ") % {'sgGroupName': maskingViewDict['sgGroupName'], 'volumeName': maskingViewDict['volumeName']}) LOG.error(msg) return storageGroupInstanceName, msg def _check_existing_storage_group( self, conn, controllerConfigService, sgGroupName, maskingViewInstanceName): """Check that we can get the existing storage group. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param sgGroupName: the storage group name :param maskingViewInstanceName: the masking view instance name :returns: storageGroupInstanceName :returns: string -- msg, the error message """ msg = None sgFromMvInstanceName = ( self._get_storage_group_from_masking_view_instance( conn, maskingViewInstanceName)) if sgFromMvInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get storage group: %(sgGroupName)s " "from masking view %(maskingViewInstanceName)s. ") % {'sgGroupName': sgGroupName, 'maskingViewInstanceName': maskingViewInstanceName}) LOG.error(msg) return sgFromMvInstanceName, msg def _check_port_group(self, conn, controllerConfigService, pgGroupName): """Check that you can either get or create a port group. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param pgGroupName: the port group Name :returns: portGroupInstanceName :returns: string -- msg, the error message """ msg = None portGroupInstanceName = self._get_port_group_instance_name( conn, controllerConfigService, pgGroupName) if portGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get port group: %(pgGroupName)s. ") % {'pgGroupName': pgGroupName}) LOG.error(msg) return portGroupInstanceName, msg def _check_initiator_group( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Check that initiator group can be either retrieved or created. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param igGroupName: the initiator group Name :param connector: the connector object :param storageSystemName: the storage system name :param extraSpecs: extra specifications :returns: initiatorGroupInstanceName :returns: string -- the error message """ msg = None initiatorGroupInstanceName = ( self._get_initiator_group_instance_name( conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if initiatorGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get or create initiator group: " "%(igGroupName)s. ") % {'igGroupName': igGroupName}) LOG.error(msg) return initiatorGroupInstanceName, msg def _check_existing_initiator_group( self, conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): """Check that existing initiator group in the masking view. Check if the initiators in the initiator group match those in the system. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param maskingViewName: the masking view name :param connector: the connector object :param storageSystemName: the storage system name :param igGroupName: the initiator group name :param extraSpecs: extra specification :returns: string -- msg, the error message """ msg = None if not self._verify_initiator_group_from_masking_view( conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): # This may be used in exception hence _ instead of _LE. msg = (_( "Unable to verify initiator group: %(igGroupName)s " "in masking view %(maskingViewName)s. ") % {'igGroupName': igGroupName, 'maskingViewName': maskingViewName}) LOG.error(msg) return msg def _check_masking_view( self, conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs): """Check that masking view can be either got or created. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param maskingViewName: the masking view name :param storageGroupInstanceName: storage group instance name :param portGroupInstanceName: port group instance name :param initiatorGroupInstanceName: the initiator group instance name :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: string -- msg, the error message """ msg = None maskingViewInstanceName = ( self._get_masking_view_instance_name( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) if maskingViewInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot create masking view: %(maskingViewName)s. ") % {'maskingViewName': maskingViewName}) LOG.error(msg) return maskingViewInstanceName, msg def _check_adding_volume_to_storage_group( self, conn, maskingViewDict, storageGroupInstanceName): """Add the volume to the storage group and double check it is there. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: storage group instance name :returns: string -- the error message """ controllerConfigService = maskingViewDict['controllerConfigService'] sgGroupName = maskingViewDict['sgGroupName'] volumeInstance = maskingViewDict['volumeInstance'] volumeName = maskingViewDict['volumeName'] msg = None if self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): LOG.warning(_LW( "Volume: %(volumeName)s is already part " "of storage group %(sgGroupName)s."), {'volumeName': volumeName, 'sgGroupName': sgGroupName}) else: msg = self._add_volume_to_sg_and_verify( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, maskingViewDict['extraSpecs']) return msg def _add_volume_to_sg_and_verify( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, extraSpecs): """Add the volume to the storage group and double check it is there. :param conn: the ecom connection :param controllerConfigService: controller service :param storageGroupInstanceName: storage group instance name :param volumeInstance: the volume instance :param volumeName: the volume name :param sgGroupName: the storage group name :param extraSpecs: the extra specifications :returns: string -- the error message """ msg = None self.add_volume_to_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, extraSpecs) if not self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): # This may be used in exception hence _ instead of _LE. msg = (_( "Volume: %(volumeName)s was not added " "to storage group %(sgGroupName)s.") % {'volumeName': volumeName, 'sgGroupName': sgGroupName}) LOG.error(msg) else: LOG.info(_LI("Successfully added %(volumeName)s to " "%(sgGroupName)s."), {'volumeName': volumeName, 'sgGroupName': sgGroupName}) return msg def _get_and_remove_from_storage_group_v2( self, conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs): """Get the storage group and remove volume from it. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param volumeInstanceName: volume instance name :param volumeName: volume name :param fastPolicyName: fast name :param extraSpecs: additional info :returns: defaultStorageGroupInstanceName :raises: VolumeBackendAPIException """ defaultStorageGroupInstanceName = ( self.fast.get_and_verify_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName)) if defaultStorageGroupInstanceName is None: exceptionMessage = (_( "Cannot get the default storage group for FAST policy: " "%(fastPolicyName)s.") % {'fastPolicyName': fastPolicyName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) retStorageGroupInstanceName = ( self.remove_device_from_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs)) if retStorageGroupInstanceName is None: exceptionMessage = (_( "Failed to remove volume %(volumeName)s from default SG.") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return defaultStorageGroupInstanceName def _get_and_remove_from_storage_group_v3( self, conn, controllerConfigService, volumeInstanceName, volumeName, maskingViewDict, storageGroupInstanceName): """Get the storage group and remove volume from it. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param volumeInstanceName: volume instance name :param volumeName: volume name :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: storage group instance name :raises: VolumeBackendAPIException """ assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "before removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, maskingViewDict['extraSpecs']) assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "after removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) # Required for unit tests. emptyStorageGroupInstanceName = ( self._wrap_get_storage_group_from_volume( conn, volumeInstanceName, maskingViewDict['sgGroupName'])) if emptyStorageGroupInstanceName is not None: exceptionMessage = (_( "Failed to remove volume %(volumeName)s from default SG: " "%(volumeName)s.") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def _is_volume_in_storage_group( self, conn, storageGroupInstanceName, volumeInstance, sgName): """Check if the volume is already part of the storage group. Check if the volume is already part of the storage group, if it is no need to re-add it. :param conn: the connection to ecom :param storageGroupInstanceName: the storage group instance name :param volumeInstance: the volume instance :param sgName: the storage group name :returns: boolean """ foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( conn, volumeInstance.path, sgName)) if foundStorageGroupInstanceName is not None: storageGroupInstance = conn.GetInstance( storageGroupInstanceName, LocalOnly=False) LOG.debug( "The existing storage group instance element name is: " "%(existingElement)s.", {'existingElement': storageGroupInstance['ElementName']}) foundStorageGroupInstance = conn.GetInstance( foundStorageGroupInstanceName, LocalOnly=False) LOG.debug( "The found storage group instance element name is: " "%(foundElement)s.", {'foundElement': foundStorageGroupInstance['ElementName']}) if (foundStorageGroupInstance['ElementName'] == ( storageGroupInstance['ElementName'])): return True return False def _find_masking_view(self, conn, maskingViewName, storageSystemName): """Given the masking view name get the masking view instance. :param conn: connection to the ecom server :param maskingViewName: the masking view name :param storageSystemName: the storage system name(String) :returns: dict -- foundMaskingViewInstanceName """ foundMaskingViewInstanceName = None storageSystemInstanceName = self.utils.find_storageSystem( conn, storageSystemName) maskingViewInstances = conn.Associators( storageSystemInstanceName, ResultClass='EMC_LunMaskingSCSIProtocolController') for maskingViewInstance in maskingViewInstances: if maskingViewName == maskingViewInstance['ElementName']: foundMaskingViewInstanceName = maskingViewInstance.path break if foundMaskingViewInstanceName is not None: # Now check that is has not been deleted. instance = self.utils.get_existing_instance( conn, foundMaskingViewInstanceName) if instance is None: foundMaskingViewInstanceName = None LOG.error(_LE( "Looks like masking view: %(maskingViewName)s " "has recently been deleted."), {'maskingViewName': maskingViewName}) else: LOG.info(_LI( "Found existing masking view: %(maskingViewName)s."), {'maskingViewName': maskingViewName}) return foundMaskingViewInstanceName def _create_storage_group( self, conn, maskingViewDict, defaultStorageGroupInstanceName): """Create a new storage group that doesn't already exist. If fastPolicyName is not none we attempt to remove it from the default storage group of that policy and associate to the new storage group that will be part of the masking view. Will not handle any exception in this method it will be handled up the stack. :param conn: connection the ecom server :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default storage group instance name (Can be None) :returns: foundStorageGroupInstanceName the instance Name of the storage group """ failedRet = None controllerConfigService = maskingViewDict['controllerConfigService'] storageGroupName = maskingViewDict['sgGroupName'] isV3 = maskingViewDict['isV3'] if isV3: workload = maskingViewDict['workload'] pool = maskingViewDict['pool'] slo = maskingViewDict['slo'] foundStorageGroupInstanceName = ( self.provisionv3.create_storage_group_v3( conn, controllerConfigService, storageGroupName, pool, slo, workload, maskingViewDict['extraSpecs'])) else: fastPolicyName = maskingViewDict['fastPolicy'] volumeInstance = maskingViewDict['volumeInstance'] foundStorageGroupInstanceName = ( self.provision.create_and_get_storage_group( conn, controllerConfigService, storageGroupName, volumeInstance.path, maskingViewDict['extraSpecs'])) if (fastPolicyName is not None and defaultStorageGroupInstanceName is not None): assocTierPolicyInstanceName = ( self.fast.add_storage_group_and_verify_tier_policy_assoc( conn, controllerConfigService, foundStorageGroupInstanceName, storageGroupName, fastPolicyName, maskingViewDict['extraSpecs'])) if assocTierPolicyInstanceName is None: LOG.error(_LE( "Cannot add and verify tier policy association for " "storage group : %(storageGroupName)s to " "FAST policy : %(fastPolicyName)s."), {'storageGroupName': storageGroupName, 'fastPolicyName': fastPolicyName}) return failedRet if foundStorageGroupInstanceName is None: LOG.error(_LE( "Cannot get storage Group from job : %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return failedRet else: LOG.info(_LI( "Created new storage group: %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return foundStorageGroupInstanceName def find_port_group(self, conn, controllerConfigService, portGroupName): """Given the port Group name get the port group instance name. :param conn: connection to the ecom server :param controllerConfigService: the controller configuration service :param portGroupName: the name of the port group you are getting :returns: foundPortGroupInstanceName """ foundPortGroupInstanceName = None portMaskingGroupInstances = conn.Associators( controllerConfigService, ResultClass='CIM_TargetMaskingGroup') for portMaskingGroupInstance in portMaskingGroupInstances: if portGroupName == portMaskingGroupInstance['ElementName']: # Check to see if it has been recently deleted. instance = self.utils.get_existing_instance( conn, portMaskingGroupInstance.path) if instance is None: foundPortGroupInstanceName = None else: foundPortGroupInstanceName = instance.path break if foundPortGroupInstanceName is None: LOG.error(_LE( "Could not find port group : %(portGroupName)s. Check that " "the EMC configuration file has the correct port group name."), {'portGroupName': portGroupName}) return foundPortGroupInstanceName def _create_or_get_initiator_group( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Attempt to create an initiatorGroup. If one already exists with the same Initiator/wwns then get it. Check to see if an initiatorGroup already exists, that matches the connector information. NOTE: An initiator/wwn can only belong to one initiatorGroup. If we were to attempt to create one with an initiator/wwn that is already belong to another initiatorGroup, it would fail. :param conn: connection to the ecom server :param controllerConfigService: the controller config Servicer :param igGroupName: the proposed name of the initiator group :param connector: the connector information to the host :param storageSystemName: the storage system name (String) :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName """ initiatorNames = self._find_initiator_names(conn, connector) LOG.debug("The initiator name(s) are: %(initiatorNames)s.", {'initiatorNames': initiatorNames}) foundInitiatorGroupInstanceName = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) # If you cannot find an initiatorGroup that matches the connector # info create a new initiatorGroup. if foundInitiatorGroupInstanceName is None: # Check that our connector information matches the # hardwareId(s) on the vmax. storageHardwareIDInstanceNames = ( self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.info(_LI( "Initiator Name(s) %(initiatorNames)s are not on array " "%(storageSystemName)s."), {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( self._create_hardware_ids(conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: msg = (_("Failed to create hardware id(s) on " "%(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) foundInitiatorGroupInstanceName = self._create_initiator_Group( conn, controllerConfigService, igGroupName, storageHardwareIDInstanceNames, extraSpecs) LOG.info(_LI( "Created new initiator group name: %(igGroupName)s."), {'igGroupName': igGroupName}) else: LOG.info(_LI( "Using existing initiator group name: %(igGroupName)s."), {'igGroupName': igGroupName}) return foundInitiatorGroupInstanceName def _find_initiator_names(self, conn, connector): """Check the connector object for initiators(ISCSI) or wwpns(FC). :param conn: the connection to the ecom :param connector: the connector object :returns: list -- list of found initiator names :raises: VolumeBackendAPIException """ foundinitiatornames = [] name = 'initiator name' if (self.protocol.lower() == ISCSI and connector['initiator']): foundinitiatornames.append(connector['initiator']) elif self.protocol.lower() == FC: if ('wwpns' in connector and connector['wwpns']): for wwn in connector['wwpns']: foundinitiatornames.append(wwn) name = 'world wide port names' else: msg = (_("FC is the protocol but wwpns are " "not supplied by OpenStack.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (foundinitiatornames is None or len(foundinitiatornames) == 0): msg = (_("Error finding %(name)s.") % {'name': name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Found %(name)s: %(initiator)s.", {'name': name, 'initiator': foundinitiatornames}) return foundinitiatornames def _find_initiator_masking_group( self, conn, controllerConfigService, initiatorNames): """Check to see if an initiatorGroup already exists. NOTE: An initiator/wwn can only belong to one initiatorGroup. If we were to attempt to create one with an initiator/wwn that is already belong to another initiatorGroup, it would fail. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration service :param initiatorNames: the list of initiator names :returns: foundInitiatorMaskingGroup """ foundInitiatorMaskingGroupInstanceName = None initiatorMaskingGroupInstanceNames = ( conn.AssociatorNames(controllerConfigService, ResultClass='CIM_InitiatorMaskingGroup')) for initiatorMaskingGroupInstanceName in ( initiatorMaskingGroupInstanceNames): # Check that it hasn't been deleted. If it has, break out # of the for loop. instance = self.utils.get_existing_instance( conn, initiatorMaskingGroupInstanceName) if instance is None: # MaskingGroup doesn't exist any more. break storageHardwareIdInstances = ( conn.Associators(initiatorMaskingGroupInstanceName, ResultClass='EMC_StorageHardwareID')) for storageHardwareIdInstance in storageHardwareIdInstances: # If EMC_StorageHardwareID matches the initiator, # we found the existing CIM_InitiatorMaskingGroup. hardwareid = storageHardwareIdInstance['StorageID'] for initiator in initiatorNames: if six.text_type(hardwareid).lower() == ( six.text_type(initiator).lower()): foundInitiatorMaskingGroupInstanceName = ( initiatorMaskingGroupInstanceName) break if foundInitiatorMaskingGroupInstanceName is not None: break if foundInitiatorMaskingGroupInstanceName is not None: break return foundInitiatorMaskingGroupInstanceName def _get_storage_hardware_id_instance_names( self, conn, initiatorNames, storageSystemName): """Given a list of initiator names find CIM_StorageHardwareID instance. :param conn: the connection to the ecom server :param initiatorNames: the list of initiator names :param storageSystemName: the storage system name :returns: list -- foundHardwardIDsInstanceNames """ foundHardwardIDsInstanceNames = [] hardwareIdManagementService = ( self.utils.find_storage_hardwareid_service( conn, storageSystemName)) hardwareIdInstances = ( self.utils.get_hardware_id_instances_from_array( conn, hardwareIdManagementService)) for hardwareIdInstance in hardwareIdInstances: storageId = hardwareIdInstance['StorageID'] for initiatorName in initiatorNames: if storageId.lower() == initiatorName.lower(): # Check that the found hardwareId has been deleted. # If it has, we don't want to add it to the list. instance = self.utils.get_existing_instance( conn, hardwareIdInstance.path) if instance is None: # HardwareId doesn't exist. Skip it. break foundHardwardIDsInstanceNames.append( hardwareIdInstance.path) break LOG.debug( "The found hardware IDs are : %(foundHardwardIDsInstanceNames)s.", {'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames}) return foundHardwardIDsInstanceNames def _get_initiator_group_from_job(self, conn, job): """After creating an new initiator group find it and return it. :param conn: the connection to the ecom server :param job: the create initiator group job :returns: dict -- initiatorDict """ associators = conn.Associators( job['Job'], ResultClass='CIM_InitiatorMaskingGroup') volpath = associators[0].path initiatorDict = {} initiatorDict['classname'] = volpath.classname keys = {} keys['CreationClassName'] = volpath['CreationClassName'] keys['SystemName'] = volpath['SystemName'] keys['DeviceID'] = volpath['DeviceID'] keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] initiatorDict['keybindings'] = keys return initiatorDict def _create_masking_view( self, conn, configService, maskingViewName, deviceMaskingGroup, targetMaskingGroup, initiatorMaskingGroup, extraSpecs): """After creating an new initiator group find it and return it. :param conn: the connection to the ecom server :param configService: the create initiator group job :param maskingViewName: the masking view name string :param deviceMaskingGroup: device(storage) masking group (instanceName) :param targetMaskingGroup: target(port) masking group (instanceName) :param initiatorMaskingGroup: initiator masking group (instanceName) :param extraSpecs: extra specifications :returns: int -- return code :returns: dict -- job :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod( 'CreateMaskingView', configService, ElementName=maskingViewName, InitiatorMaskingGroup=initiatorMaskingGroup, DeviceMaskingGroup=deviceMaskingGroup, TargetMaskingGroup=targetMaskingGroup) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Masking View: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': maskingViewName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.info(_LI( "Created new masking view : %(maskingViewName)s."), {'maskingViewName': maskingViewName}) return rc, job def find_new_masking_view(self, conn, jobDict): """Find the newly created volume. :param conn: the connection to the ecom server :param jobDict: the job dictionary :returns: dict -- maskingViewInstance """ associators = conn.Associators( jobDict['Job'], ResultClass='Symm_LunMaskingView') mvpath = associators[0].path maskingViewInstance = {} maskingViewInstance['classname'] = mvpath.classname keys = {} keys['CreationClassName'] = mvpath['CreationClassName'] keys['SystemName'] = mvpath['SystemName'] keys['DeviceID'] = mvpath['DeviceID'] keys['SystemCreationClassName'] = mvpath['SystemCreationClassName'] maskingViewInstance['keybindings'] = keys return maskingViewInstance def _get_storage_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Gets the Device Masking Group from masking view. :param conn: the connection to the ecom server :param maskingViewName: the masking view name (String) :param storageSystemName: storage system name (String) :returns: instance name foundStorageGroupInstanceName """ foundStorageGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView is not None: foundStorageGroupInstanceName = ( self._get_storage_group_from_masking_view_instance( conn, foundView)) LOG.debug( "Masking view: %(view)s DeviceMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundStorageGroupInstanceName}) else: LOG.warning(_LW("Unable to find Masking view: %(view)s."), {'view': maskingViewName}) return foundStorageGroupInstanceName def _get_storage_group_from_masking_view_instance( self, conn, maskingViewInstance): """Gets the Device Masking Group from masking view instance. :param conn: the connection to the ecom server :param maskingViewInstance: the masking view instance :returns: instance name foundStorageGroupInstanceName """ foundStorageGroupInstanceName = None groups = conn.AssociatorNames( maskingViewInstance, ResultClass='CIM_DeviceMaskingGroup') if len(groups) > 0: foundStorageGroupInstanceName = groups[0] return foundStorageGroupInstanceName def _get_storage_group_instance_name( self, conn, maskingViewDict, defaultStorageGroupInstanceName): """Gets the storage group instance name. If fastPolicy name is None then NON FAST is assumed. If it is a valid fastPolicy name then associate the new storage group with the fast policy. If we are using an existing storage group then we must check that it is associated with the correct fast policy. :param conn: the connection to the ecom server :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: default storage group instance name (can be None for Non FAST) :returns: instance name storageGroupInstanceName :raises: VolumeBackendAPIException """ storageGroupInstanceName = self.utils.find_storage_masking_group( conn, maskingViewDict['controllerConfigService'], maskingViewDict['sgGroupName']) if storageGroupInstanceName is None: storageGroupInstanceName = self._create_storage_group( conn, maskingViewDict, defaultStorageGroupInstanceName) if storageGroupInstanceName is None: errorMessage = (_( "Cannot create or find an storage group with name " "%(sgGroupName)s.") % {'sgGroupName': maskingViewDict['sgGroupName']}) LOG.error(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return storageGroupInstanceName def _get_port_group_instance_name( self, conn, controllerConfigService, pgGroupName): """Gets the port group instance name. The portGroup name has been defined in the EMC Config file if it does not exist the operation should fail. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param pgGroupName: the port group name :returns: instance name foundPortGroupInstanceName """ foundPortGroupInstanceName = self.find_port_group( conn, controllerConfigService, pgGroupName) if foundPortGroupInstanceName is None: LOG.error(_LE( "Cannot find a portGroup with name %(pgGroupName)s. " "The port group for a masking view must be pre-defined."), {'pgGroupName': pgGroupName}) return foundPortGroupInstanceName LOG.info(_LI( "Port group instance name is %(foundPortGroupInstanceName)s."), {'foundPortGroupInstanceName': foundPortGroupInstanceName}) return foundPortGroupInstanceName def _get_initiator_group_instance_name( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Gets the initiator group instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param igGroupName: the port group name :param connector: the connector object :param storageSystemName: the storage system name :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName """ foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group( conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if foundInitiatorGroupInstanceName is None: LOG.error(_LE( "Cannot create or find an initiator group with " "name %(igGroupName)s."), {'igGroupName': igGroupName}) return foundInitiatorGroupInstanceName def _get_masking_view_instance_name( self, conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs): """Gets the masking view instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param maskingViewName: the masking view name (String) :param storageGroupInstanceName: the storage group instance name :param portGroupInstanceName: the port group instance name :param initiatorGroupInstanceName: the initiator group instance name :param extraSpecs: extra specifications :returns: instance name foundMaskingViewInstanceName """ _rc, job = ( self._create_masking_view( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) foundMaskingViewInstanceName = self.find_new_masking_view(conn, job) if foundMaskingViewInstanceName is None: LOG.error(_LE( "Cannot find the new masking view just created with name " "%(maskingViewName)s."), {'maskingViewName': maskingViewName}) return foundMaskingViewInstanceName def _check_if_rollback_action_for_masking_required( self, conn, rollbackDict): """This is a rollback action for FAST. We need to be able to return the volume to the default storage group if anything has gone wrong. The volume can also potentially belong to a storage group that is not the default depending on where the exception occurred. :param conn: the connection to the ecom server :param rollbackDict: the rollback dictionary :returns: message :raises: VolumeBackendAPIException """ message = None try: if rollbackDict['isV3']: errorMessage = self._check_adding_volume_to_storage_group( conn, rollbackDict, rollbackDict['defaultStorageGroupInstanceName']) if errorMessage: LOG.error(errorMessage) message = (_("V3 rollback")) else: foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( conn, rollbackDict['volumeInstance'].path, rollbackDict['sgName'])) # Volume is not associated with any storage group so add # it back to the default. if not foundStorageGroupInstanceName: LOG.warning(_LW( "No storage group found. " "Performing rollback on Volume: %(volumeName)s " "To return it to the default storage group for FAST " "policy %(fastPolicyName)s."), {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) assocDefaultStorageGroupName = ( self.fast .add_volume_to_default_storage_group_for_fast_policy( conn, rollbackDict['controllerConfigService'], rollbackDict['volumeInstance'], rollbackDict['volumeName'], rollbackDict['fastPolicyName'], rollbackDict['extraSpecs'])) if assocDefaultStorageGroupName is None: LOG.error(_LE( "Failed to Roll back to re-add volume " "%(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s: Please contact your sys " "admin to get the volume re-added manually."), {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) message = (_("V2 rollback, volume is not in any storage " "group.")) else: LOG.info(_LI( "The storage group found is " "%(foundStorageGroupInstanceName)s."), {'foundStorageGroupInstanceName': foundStorageGroupInstanceName}) # Check the name, see is it the default storage group # or another. if (foundStorageGroupInstanceName != rollbackDict['defaultStorageGroupInstanceName']): # Remove it from its current masking view and return it # to its default masking view if fast is enabled. self.remove_and_reset_members( conn, rollbackDict['controllerConfigService'], rollbackDict['volumeInstance'], rollbackDict['volumeName'], rollbackDict['extraSpecs']) message = (_("V2 rollback - Volume in another storage " "group besides default storage group.")) except Exception: errorMessage = (_( "Rollback for Volume: %(volumeName)s has failed. " "Please contact your system administrator to manually return " "your volume to the default storage group for fast policy " "%(fastPolicyName)s failed.") % {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return message def _find_new_initiator_group(self, conn, maskingGroupDict): """After creating an new initiator group find it and return it. :param conn: connection the ecom server :param maskingGroupDict: the maskingGroupDict dict :returns: instance name foundInitiatorGroupInstanceName """ foundInitiatorGroupInstanceName = None if 'MaskingGroup' in maskingGroupDict: foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup'] return foundInitiatorGroupInstanceName def _get_initiator_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Given the masking view name get the initiator group from it. :param conn: connection the the ecom server :param maskingViewName: the name of the masking view :param storageSystemName: the storage system name :returns: instance name foundInitiatorMaskingGroupInstanceName """ foundInitiatorMaskingGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView is not None: groups = conn.AssociatorNames( foundView, ResultClass='CIM_InitiatorMaskingGroup') if len(groups): foundInitiatorMaskingGroupInstanceName = groups[0] LOG.debug( "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundInitiatorMaskingGroupInstanceName}) else: LOG.warning(_LW("Unable to find Masking view: %(view)s."), {'view': maskingViewName}) return foundInitiatorMaskingGroupInstanceName def _verify_initiator_group_from_masking_view( self, conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): """Check that the initiator group contains the correct initiators. If using an existing masking view check that the initiator group contains the correct initiators. If it does not contain the correct initiators then we delete the initiator group from the masking view, re-create it with the correct initiators and add it to the masking view NOTE: EMC does not support ModifyMaskingView so we must first delete the masking view and recreate it. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param maskingViewName: maskingview name (String) :param connector: the connector dict :param storageSystemName: the storage System Name (string) :param igGroupName: the initiator group name (String) :param extraSpecs: extra specifications :returns: boolean """ initiatorNames = self._find_initiator_names(conn, connector) foundInitiatorGroupFromConnector = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) foundInitiatorGroupFromMaskingView = ( self._get_initiator_group_from_masking_view( conn, maskingViewName, storageSystemName)) if (foundInitiatorGroupFromConnector != foundInitiatorGroupFromMaskingView): if foundInitiatorGroupFromMaskingView is not None: maskingViewInstanceName = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundInitiatorGroupFromConnector is None: storageHardwareIDInstanceNames = ( self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.info(_LI( "Initiator Name(s) %(initiatorNames)s are not on " "array %(storageSystemName)s. "), {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( self._create_hardware_ids(conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.error(_LE( "Failed to create hardware id(s) on " "%(storageSystemName)s."), {'storageSystemName': storageSystemName}) return False foundInitiatorGroupFromConnector = ( self._create_initiator_Group( conn, controllerConfigService, igGroupName, storageHardwareIDInstanceNames, extraSpecs)) storageGroupInstanceName = ( self._get_storage_group_from_masking_view( conn, maskingViewName, storageSystemName)) portGroupInstanceName = self._get_port_group_from_masking_view( conn, maskingViewName, storageSystemName) if (foundInitiatorGroupFromConnector is not None and storageGroupInstanceName is not None and portGroupInstanceName is not None): self._delete_masking_view( conn, controllerConfigService, maskingViewName, maskingViewInstanceName, extraSpecs) newMaskingViewInstanceName = ( self._get_masking_view_instance_name( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, foundInitiatorGroupFromConnector, extraSpecs)) if newMaskingViewInstanceName is not None: LOG.debug( "The old masking view has been replaced: " "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) else: LOG.error(_LE( "One of the components of the original masking view " "%(maskingViewName)s cannot be retrieved so " "please contact your system administrator to check " "that the correct initiator(s) are part of masking."), {'maskingViewName': maskingViewName}) return False return True def _create_initiator_Group( self, conn, controllerConfigService, igGroupName, hardwareIdinstanceNames, extraSpecs): """Create a new initiator group. Given a list of hardwareId Instance name create a new initiator group. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param igGroupName: the initiator group name (String) :param hardwareIdinstanceNames: one or more hardware id instance names :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod( 'CreateGroup', controllerConfigService, GroupName=igGroupName, Type=self.utils.get_num(INITIATORGROUPTYPE, '16'), Members=[hardwareIdinstanceNames[0]]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Group: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': igGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) foundInitiatorGroupInstanceName = self._find_new_initiator_group( conn, job) numHardwareIDInstanceNames = len(hardwareIdinstanceNames) if numHardwareIDInstanceNames > 1: for j in range(1, numHardwareIDInstanceNames): rc, job = conn.InvokeMethod( 'AddMembers', controllerConfigService, MaskingGroup=foundInitiatorGroupInstanceName, Members=[hardwareIdinstanceNames[j]]) if rc != 0: rc, errordesc = ( self.utils.wait_for_job_complete(conn, job, extraSpecs)) if rc != 0: exceptionMessage = (_( "Error adding initiator to group : %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': igGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) j = j + 1 return foundInitiatorGroupInstanceName def _get_port_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Given the masking view name get the port group from it. :param conn: connection the the ecom server :param maskingViewName: the name of the masking view :param storageSystemName: the storage system name :returns: instance name foundPortMaskingGroupInstanceName """ foundPortMaskingGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView: groups = conn.AssociatorNames( foundView, ResultClass='CIM_TargetMaskingGroup') if len(groups) > 0: foundPortMaskingGroupInstanceName = groups[0] LOG.debug( "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundPortMaskingGroupInstanceName}) return foundPortMaskingGroupInstanceName def _delete_masking_view( self, conn, controllerConfigService, maskingViewName, maskingViewInstanceName, extraSpecs): """Delete a masking view. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param maskingViewName: maskingview name (String) :param maskingViewInstanceName: the masking view instance name :param extraSpecs: extra specifications :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod('DeleteMaskingView', controllerConfigService, ProtocolController=maskingViewInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Modifying masking view : %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': maskingViewName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def get_masking_view_from_storage_group( self, conn, storageGroupInstanceName): """Get the associated maskingview instance name. Given storage group instance name, get the associated masking view instance name. :param conn: connection the ecom server :param storageGroupInstanceName: the storage group instance name :returns: instance name foundMaskingViewInstanceName """ foundMaskingViewInstanceName = None maskingViews = conn.AssociatorNames( storageGroupInstanceName, ResultClass='Symm_LunMaskingView') if len(maskingViews) > 0: foundMaskingViewInstanceName = maskingViews[0] return foundMaskingViewInstanceName def add_volume_to_storage_group( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, extraSpecs): """Add a volume to an existing storage group. :param conn: connection to ecom server :param controllerConfigService: the controller configuration service :param storageGroupInstanceName: storage group instance name :param volumeInstance: the volume instance :param volumeName: the name of the volume (String) :param sgGroupName: the name of the storage group (String) :param extraSpecs: additional info :returns: int -- rc the return code of the job :returns: dict -- the job dict """ self.provision.add_members_to_masking_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) LOG.info(_LI( "Added volume: %(volumeName)s to existing storage group " "%(sgGroupName)s."), {'volumeName': volumeName, 'sgGroupName': sgGroupName}) def remove_device_from_default_storage_group( self, conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs): """Remove the volume from the default storage group. Remove the volume from the default storage group for the FAST policy and return the default storage group instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller config service :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: additional info :returns: instance name defaultStorageGroupInstanceName """ failedRet = None defaultStorageGroupInstanceName, defaultSgName = ( self.fast.get_and_verify_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName)) if defaultStorageGroupInstanceName is None: LOG.warning(_LW( "Volume %(volumeName)s was not first part of the default " "storage group for the FAST Policy."), {'volumeName': volumeName}) return failedRet assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "for fast before removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, defaultStorageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "for fast after removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) # Required for unit tests. emptyStorageGroupInstanceName = ( self._wrap_get_storage_group_from_volume(conn, volumeInstanceName, defaultSgName)) if emptyStorageGroupInstanceName is not None: LOG.error(_LE( "Failed to remove %(volumeName)s from the default storage " "group for the FAST Policy."), {'volumeName': volumeName}) return failedRet return defaultStorageGroupInstanceName def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, defaultSgName): """Wrapper for get_storage_group_from_volume. Needed for override in tests. :param conn: the connection to the ecom server :param volumeInstanceName: the volume instance name :param defaultSgName: the default storage group name :returns: emptyStorageGroupInstanceName """ return self.utils.get_storage_group_from_volume( conn, volumeInstanceName, defaultSgName) def get_devices_from_storage_group( self, conn, storageGroupInstanceName): """Get the associated volume Instance names. Given the storage group instance name get the associated volume Instance names. :param conn: connection the the ecom server :param storageGroupInstanceName: the storage group instance name :returns: list -- volumeInstanceNames list of volume instance names """ volumeInstanceNames = conn.AssociatorNames( storageGroupInstanceName, ResultClass='EMC_StorageVolume') return volumeInstanceNames def get_associated_masking_groups_from_device( self, conn, volumeInstanceName): """Get the associated storage groups from the volume Instance name. Given the volume instance name get the associated storage group instance names. :param conn: connection the the ecom server :param volumeInstanceName: the volume instance name :returns: list -- list of storage group instance names """ maskingGroupInstanceNames = conn.AssociatorNames( volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup', AssocClass='CIM_OrderedMemberOfCollection') if len(maskingGroupInstanceNames) > 0: return maskingGroupInstanceNames else: LOG.info(_LI("Volume %(volumeName)s not in any storage group."), {'volumeName': volumeInstanceName}) return None def remove_and_reset_members( self, conn, controllerConfigService, volumeInstance, volumeName, extraSpecs, connector=None, reset=True): """This is called on a delete, unmap device or rollback. If the connector is not None get the associated SG and remove volume from the storage group, otherwise it is a VMAX3 deletion. :param conn: connection the the ecom server :param controllerConfigService: the controller configuration service :param volumeInstance: the volume Instance :param volumeName: the volume name :param extraSpecs: additional info :param connector: optional :param reset: reset, return to original SG (optional) :returns: storageGroupInstanceName """ storageGroupInstanceName = None if connector is not None: storageGroupInstanceName = self._get_sg_associated_with_connector( conn, controllerConfigService, volumeInstance.path, volumeName, connector) if storageGroupInstanceName: self._remove_volume_from_sg( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, extraSpecs) else: # Connector is None in V3 volume deletion case. self._cleanup_deletion_v3( conn, controllerConfigService, volumeInstance, extraSpecs) if reset: self._return_back_to_default_sg( conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) return storageGroupInstanceName def _cleanup_deletion_v3( self, conn, controllerConfigService, volumeInstance, extraSpecs): """Pre cleanup before VMAX3 deletion operation :param conn: the ecom connection :param controllerConfigService: storage system instance name :param volumeInstance: the volume instance :param extraSpecs: the extra specifications """ storageGroupInstanceNames = ( self.get_associated_masking_groups_from_device( conn, volumeInstance.path)) if storageGroupInstanceNames: sgNum = len(storageGroupInstanceNames) if len(storageGroupInstanceNames) > 1: LOG.warning(_LW("Volume %(volumeName)s is belong to " "%(sgNum)s storage groups."), {'volumeName': volumeInstance['ElementName'], 'sgNum': sgNum}) for storageGroupInstanceName in storageGroupInstanceNames: self._remove_volume_from_sg( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, extraSpecs) def _remove_volume_from_sg( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstance, extraSpecs): """Remove volume from storage group :param conn: the ecom connection :param controllerConfigService: storage system instance name :param storageGroupInstanceName: the SG instance name :param volumeInstance: the volume instance :param extraSpecs: the extra specifications """ instance = conn.GetInstance(storageGroupInstanceName, LocalOnly=False) storageGroupName = instance['ElementName'] volumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) numVolInStorageGroup = len(volumeInstanceNames) LOG.debug( "There are %(numVol)d volumes in the storage group " "%(maskingGroup)s.", {'numVol': numVolInStorageGroup, 'maskingGroup': storageGroupInstanceName}) if numVolInStorageGroup == 1: # Last volume in the storage group. self._last_vol_in_SG( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstance, volumeInstance['ElementName'], extraSpecs) else: # Not the last volume so remove it from storage group self._multiple_vols_in_SG( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeInstance['ElementName'], numVolInStorageGroup, extraSpecs) def _last_vol_in_SG( self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstance, volumeName, extraSpecs): """Steps if the volume is the last in a storage group. 1. Check if the volume is in a masking view. 2. If it is in a masking view, delete the masking view, remove the initiators from the initiator group and delete the initiator group if there are no other masking views associated with the initiator group, remove the volume from the storage group, and delete the storage group. 3. If it is not in a masking view, remove the volume from the storage group and delete the storage group. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param storageGroupInstanceName: the SG instance name :param storageGroupName: the Storage group name (String) :param volumeInstance: the volume instance :param volumeName: the volume name :param extraSpecs: the extra specifications """ status = False LOG.debug("Only one volume remains in storage group " "%(sgname)s. Driver will attempt cleanup.", {'sgname': storageGroupName}) mvInstanceName = self.get_masking_view_from_storage_group( conn, storageGroupInstanceName) if mvInstanceName is None: LOG.debug("Unable to get masking view %(maskingView)s " "from storage group.", {'maskingView': mvInstanceName}) else: maskingViewInstance = conn.GetInstance( mvInstanceName, LocalOnly=False) maskingViewName = maskingViewInstance['ElementName'] if mvInstanceName: maskingViewInstance = conn.GetInstance( mvInstanceName, LocalOnly=False) maskingViewName = maskingViewInstance['ElementName'] @lockutils.synchronized(maskingViewName, "emc-mv-", True) def do_delete_mv_ig_and_sg(): return self._delete_mv_ig_and_sg( conn, controllerConfigService, mvInstanceName, maskingViewName, storageGroupInstanceName, storageGroupName, volumeInstance, volumeName, extraSpecs) do_delete_mv_ig_and_sg() status = True else: # Remove the volume from the storage group and delete the SG. self._remove_last_vol_and_delete_sg( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstance.path, volumeName, extraSpecs) status = True return status def _multiple_vols_in_SG( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, numVolsInSG, extraSpecs): """If the volume is not the last in the storage group Remove the volume from the SG. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param storageGroupInstanceName: the SG instance name :param volumeInstance: the volume instance :param volumeName: the volume name :param numVolsInSG: the number of volumes in the SG :param extraSpecs: the extra specifications """ LOG.debug("Start: number of volumes in masking storage group: " "%(numVol)d", {'numVol': numVolsInSG}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) LOG.debug( "RemoveMembers for volume %(volumeName)s completed " "successfully.", {'volumeName': volumeName}) volumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "End: number of volumes in masking storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) def _delete_mv_ig_and_sg( self, conn, controllerConfigService, mvInstanceName, maskingViewName, storageGroupInstanceName, storageGroupName, volumeInstance, volumeName, extraSpecs): """Delete the Masking view, the storage Group and the initiator group. :param conn: connection the the ecom server :param controllerConfigService: the controller configuration service :param mvInstanceName: masking view instance name :param maskingViewName: masking view name :param storageGroupInstanceName: storage group instance name :param maskingViewName: masking view name :param volumeInstance: the volume Instance :param volumeName: the volume name :param extraSpecs: extra specs """ isV3 = extraSpecs[ISV3] fastPolicyName = extraSpecs.get(FASTPOLICY, None) storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) initiatorGroupInstanceName = ( self.get_initiator_group_from_masking_view(conn, mvInstanceName)) self._last_volume_delete_masking_view( conn, controllerConfigService, mvInstanceName, maskingViewName, extraSpecs) self._last_volume_delete_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs) if not isV3: isTieringPolicySupported, tierPolicyServiceInstanceName = ( self._get_tiering_info(conn, storageSystemInstanceName, fastPolicyName)) self._get_and_remove_rule_association( conn, fastPolicyName, isTieringPolicySupported, tierPolicyServiceInstanceName, storageSystemInstanceName['Name'], storageGroupInstanceName, extraSpecs) self._remove_last_vol_and_delete_sg( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstance.path, volumeName, extraSpecs) LOG.debug( "Volume %(volumeName)s successfully removed from SG and " "Storage Group %(storageGroupName)s successfully deleted. ", {'volumeName': volumeName, 'storageGroupName': storageGroupName}) def _return_back_to_default_sg( self, conn, controllerConfigService, volumeInstance, volumeName, extraSpecs): """Return volume to default storage group Moving the volume to the default SG for VMAX3 and FAST for VMAX2. :param conn: connection the the ecom server :param controllerConfigService: the controller configuration service :param volumeInstance: the volume Instance :param volumeName: the volume name :param extraSpecs: extra specs """ # Add it back to the default storage group. if extraSpecs[ISV3]: self.return_volume_to_default_storage_group_v3( conn, controllerConfigService, volumeInstance, volumeName, extraSpecs) else: # V2 if FAST POLICY enabled, move the volume to the default # SG. fastPolicyName = extraSpecs.get(FASTPOLICY, None) storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) isTieringPolicySupported, __ = ( self._get_tiering_info(conn, storageSystemInstanceName, fastPolicyName)) if fastPolicyName is not None and isTieringPolicySupported: self._cleanup_tiering( conn, controllerConfigService, fastPolicyName, volumeInstance, volumeName, extraSpecs) def _get_sg_associated_with_connector( self, conn, controllerConfigService, volumeInstanceName, volumeName, connector): """Get storage group associated with connector. If the connector gets passed then extra logic required to get storage group. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param connector: the connector object :returns: storageGroupInstanceName(can be None) """ return self._get_sg_or_mv_associated_with_initiator( conn, controllerConfigService, volumeInstanceName, volumeName, connector, True) def _get_tiering_info( self, conn, storageSystemInstanceName, fastPolicyName): """Get tiering specifics. :param conn: the ecom connection :param storageSystemInstanceName: storage system instance name :param fastPolicyName: :returns: boolean -- isTieringPolicySupported :returns: tierPolicyServiceInstanceName """ isTieringPolicySupported = False tierPolicyServiceInstanceName = None if fastPolicyName is not None: tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) isTieringPolicySupported = self.fast.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) LOG.debug( "FAST policy enabled on %(storageSystem)s: %(isSupported)s", {'storageSystem': storageSystemInstanceName, 'isSupported': isTieringPolicySupported}) return isTieringPolicySupported, tierPolicyServiceInstanceName def _last_volume_delete_masking_view( self, conn, controllerConfigService, mvInstanceName, maskingViewName, extraSpecs): """Delete the masking view. Delete the masking view if the volume is the last one in the storage group. :param conn: the ecom connection :param controllerConfigService: controller config service :param mvInstanceName: masking view instance name :param maskingViewName: masking view name :param extraSpecs: extra specifications """ LOG.debug( "Last volume in the storage group, deleting masking view " "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) self._delete_masking_view( conn, controllerConfigService, maskingViewName, mvInstanceName, extraSpecs) mvInstance = self.utils.get_existing_instance( conn, mvInstanceName) if mvInstance: exceptionMessage = (_( "Masking view %(maskingViewName)s " "was not deleted successfully") % {'maskingViewName': maskingViewName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: LOG.info(_LI( "Masking view %(maskingViewName)s successfully deleted."), {'maskingViewName': maskingViewName}) def _get_and_remove_rule_association( self, conn, fastPolicyName, isTieringPolicySupported, tierPolicyServiceInstanceName, storageSystemName, storageGroupInstanceName, extraSpecs): """Remove the storage group from the policy rule. :param conn: the ecom connection :param fastPolicyName: the fast policy name :param isTieringPolicySupported: boolean :param tierPolicyServiceInstanceName: the tier policy instance name :param storageSystemName: storage system name :param storageGroupInstanceName: the storage group instance name :param extraSpecs: additional info """ # Disassociate storage group from FAST policy. if fastPolicyName is not None and isTieringPolicySupported is True: tierPolicyInstanceName = self.fast.get_tier_policy_by_name( conn, storageSystemName, fastPolicyName) LOG.debug( "Policy: %(policy)s, policy service:%(service)s, " "masking group: %(maskingGroup)s.", {'policy': tierPolicyInstanceName, 'service': tierPolicyServiceInstanceName, 'maskingGroup': storageGroupInstanceName}) self.fast.delete_storage_group_from_tier_policy_rule( conn, tierPolicyServiceInstanceName, storageGroupInstanceName, tierPolicyInstanceName, extraSpecs) def return_volume_to_default_storage_group_v3( self, conn, controllerConfigurationService, volumeInstance, volumeName, extraSpecs): """Return volume to the default storage group in v3. :param conn: the ecom connection :param controllerConfigService: controller config service :param volumeInstance: volumeInstance :param volumeName: the volume name :param extraSpecs: additional info :raises: VolumeBackendAPIException """ storageGroupName = self.utils.get_v3_storage_group_name( extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], extraSpecs[self.utils.WORKLOAD]) storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigurationService, storageGroupName) if not storageGroupInstanceName: storageGroupInstanceName = ( self.provisionv3.create_storage_group_v3( conn, controllerConfigurationService, storageGroupName, extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], extraSpecs[self.utils.WORKLOAD], extraSpecs)) if not storageGroupInstanceName: errorMessage = (_("Failed to create storage group " "%(storageGroupName)s.") % {'storageGroupName': storageGroupName}) LOG.error(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) self._add_volume_to_sg_and_verify( conn, controllerConfigurationService, storageGroupInstanceName, volumeInstance, volumeName, storageGroupName, extraSpecs) def _cleanup_tiering( self, conn, controllerConfigService, fastPolicyName, volumeInstance, volumeName, extraSpecs): """Clean up tiering. :param conn: the ecom connection :param controllerConfigService: the controller configuration service :param fastPolicyName: the fast policy name :param volumeInstance: volume instance :param volumeName: the volume name :param extraSpecs: additional info """ defaultStorageGroupInstanceName = ( self.fast.get_policy_default_storage_group( conn, controllerConfigService, fastPolicyName)) volumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "Start: number of volumes in default storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) defaultStorageGroupInstanceName = ( self.fast.add_volume_to_default_storage_group_for_fast_policy( conn, controllerConfigService, volumeInstance, volumeName, fastPolicyName, extraSpecs)) # Check default storage group number of volumes. volumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "End: number of volumes in default storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) def get_target_wwns(self, conn, mvInstanceName): """Get the DA ports wwns. :param conn: the ecom connection :param mvInstanceName: masking view instance name :returns: list -- the list of target wwns for the masking view """ targetWwns = [] targetPortInstanceNames = conn.AssociatorNames( mvInstanceName, ResultClass='Symm_FCSCSIProtocolEndpoint') numberOfPorts = len(targetPortInstanceNames) if numberOfPorts <= 0: LOG.warning(_LW("No target ports found in " "masking view %(maskingView)s."), {'numPorts': len(targetPortInstanceNames), 'maskingView': mvInstanceName}) for targetPortInstanceName in targetPortInstanceNames: targetWwns.append(targetPortInstanceName['Name']) return targetWwns def get_masking_view_by_volume(self, conn, volumeInstance, connector): """Given volume, retrieve the masking view instance name. :param conn: the ecom connection :param volumeInstance: the volume instance :param connector: the connector object :returns: masking view instance name """ storageSystemName = volumeInstance['SystemName'] controllerConfigService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) volumeName = volumeInstance['ElementName'] mvInstanceName = ( self._get_sg_or_mv_associated_with_initiator( conn, controllerConfigService, volumeInstance.path, volumeName, connector, False)) return mvInstanceName def get_masking_views_by_port_group(self, conn, portGroupInstanceName): """Given port group, retrieve the masking view instance name. :param conn: the ecom connection :param portGroupInstanceName: the instance name of the port group :returns: masking view instance names """ mvInstanceNames = conn.AssociatorNames( portGroupInstanceName, ResultClass='Symm_LunMaskingView') return mvInstanceNames def get_masking_views_by_initiator_group( self, conn, initiatorGroupInstanceName): """Given initiator group, retrieve the masking view instance name. Retrieve the list of masking view instances associated with the initiator group instance name. :param conn: the ecom connection :param initiatorGroupInstanceName: the instance name of the initiator group :returns: list of masking view instance names """ mvInstanceNames = conn.AssociatorNames( initiatorGroupInstanceName, ResultClass='Symm_LunMaskingView') return mvInstanceNames def get_port_group_from_masking_view(self, conn, maskingViewInstanceName): """Get the port group in a masking view. :param conn: the ecom connection :param maskingViewInstanceName: masking view instance name :returns: portGroupInstanceName """ portGroupInstanceNames = conn.AssociatorNames( maskingViewInstanceName, ResultClass='SE_TargetMaskingGroup') if len(portGroupInstanceNames) > 0: LOG.debug("Found port group %(pg)s in masking view %(mv)s.", {'pg': portGroupInstanceNames[0], 'mv': maskingViewInstanceName}) return portGroupInstanceNames[0] else: LOG.warning(_LW("No port group found in masking view %(mv)s."), {'mv': maskingViewInstanceName}) def get_initiator_group_from_masking_view( self, conn, maskingViewInstanceName): """Get initiator group in a masking view. :param conn: the ecom connection :param maskingViewInstanceName: masking view instance name :returns: initiatorGroupInstanceName or None if it is not found """ initiatorGroupInstanceNames = conn.AssociatorNames( maskingViewInstanceName, ResultClass='SE_InitiatorMaskingGroup') if len(initiatorGroupInstanceNames) > 0: LOG.debug("Found initiator group %(ig)s in masking view %(mv)s.", {'ig': initiatorGroupInstanceNames[0], 'mv': maskingViewInstanceName}) return initiatorGroupInstanceNames[0] else: LOG.warning(_LW("No Initiator group found in masking view " "%(mv)s."), {'mv': maskingViewInstanceName}) def _get_sg_or_mv_associated_with_initiator( self, conn, controllerConfigService, volumeInstanceName, volumeName, connector, getSG=True): """Get storage group or masking view associated with connector. If the connector gets passed then extra logic required to get storage group. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param volumeInstanceName: volume instance name :param volumeName: volume element name :param connector: the connector object :param getSG: True if to get storage group; otherwise get masking :returns: foundInstanceName(can be None) """ foundInstanceName = None initiatorNames = self._find_initiator_names(conn, connector) igInstanceNameFromConnector = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) # Device can be shared by multi-SGs in a multi-host attach case. storageGroupInstanceNames = ( self.get_associated_masking_groups_from_device( conn, volumeInstanceName)) LOG.debug("Found storage groups volume " "%(volumeName)s is in: %(storageGroups)s", {'volumeName': volumeName, 'storageGroups': storageGroupInstanceNames}) if storageGroupInstanceNames: # not empty # Get the SG by IGs. for sgInstanceName in storageGroupInstanceNames: # Get maskingview from storage group. mvInstanceName = self.get_masking_view_from_storage_group( conn, sgInstanceName) LOG.debug("Found masking view associated with SG " "%(storageGroup)s: %(maskingview)s", {'maskingview': mvInstanceName, 'storageGroup': sgInstanceName}) # Get initiator group from masking view. igInstanceName = ( self.get_initiator_group_from_masking_view( conn, mvInstanceName)) LOG.debug("Initiator Group in masking view %(ig)s: " "IG associated with connector%(igFromConnector)s", {'ig': igInstanceName, 'igFromConnector': igInstanceNameFromConnector}) if igInstanceName == igInstanceNameFromConnector: if getSG is True: foundInstanceName = sgInstanceName LOG.debug("Found the storage group associated with " "initiator %(initiator)s: %(storageGroup)s", {'initiator': initiatorNames, 'storageGroup': foundInstanceName}) else: foundInstanceName = mvInstanceName LOG.debug("Found the masking view associated with " "initiator %(initiator)s: %(maskingview)s.", {'initiator': initiatorNames, 'maskingview': foundInstanceName}) break return foundInstanceName def _remove_last_vol_and_delete_sg(self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstanceName, volumeName, extraSpecs): """Remove the last volume and delete the storage group :param conn: the ecom connection :param controllerConfigService: controller config service :param storageGroupInstanceName: storage group instance name :param storageGroupName: storage group name :param volumeInstanceName: volume instance name :param volumeName: volume name :param extrSpecs: additional info """ self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) LOG.debug( "Remove the last volume %(volumeName)s completed " "successfully.", {'volumeName': volumeName}) # Delete storage group. self._delete_storage_group(conn, controllerConfigService, storageGroupInstanceName, storageGroupName, extraSpecs) storageGroupInstance = self.utils.get_existing_instance( conn, storageGroupInstanceName) if storageGroupInstance: exceptionMessage = (_( "Storage group %(storageGroupName)s " "was not deleted successfully") % {'storageGroupName': storageGroupName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: LOG.info(_LI( "Storage Group %(storageGroupName)s successfully deleted."), {'storageGroupName': storageGroupName}) def _delete_storage_group(self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, extraSpecs): """Delete empty storage group :param conn: the ecom connection :param controllerConfigService: controller config service :param storageGroupInstanceName: storage group instance name :param storageGroupName: storage group name :param extraSpecs: extra specifications """ rc, job = conn.InvokeMethod( 'DeleteGroup', controllerConfigService, MaskingGroup=storageGroupInstanceName, Force=True) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Deleting Group: %(storageGroupName)s. " "Return code: %(rc)lu. Error: %(error)s") % {'storageGroupName': storageGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def _delete_initiator_group(self, conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName, extraSpecs): """Delete an initiatorGroup. :param conn - connection to the ecom server :param controllerConfigService - controller config service :param initiatorGroupInstanceName - the initiator group instance name :param initiatorGroupName - initiator group name :param extraSpecs: extra specifications """ rc, job = conn.InvokeMethod( 'DeleteGroup', controllerConfigService, MaskingGroup=initiatorGroupInstanceName, Force=True) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Deleting Initiator Group: %(initiatorGroupName)s. " "Return code: %(rc)lu. Error: %(error)s") % {'initiatorGroupName': initiatorGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: LOG.debug("Initiator group %(initiatorGroupName)s " "is successfully deleted.", {'initiatorGroupName': initiatorGroupName}) else: LOG.debug("Initiator group %(initiatorGroupName)s " "is successfully deleted.", {'initiatorGroupName': initiatorGroupName}) def _delete_storage_hardware_id(self, conn, hardwareIdManagementService, hardwareIdPath): """Delete given initiator path Delete the initiator. Do not rise exception or failure if deletion fails due to any reasons. :param conn - connection to the ecom server :param hardwareIdManagementService - hardware id management service :param hardwareIdPath - The path of the initiator object """ ret = conn.InvokeMethod('DeleteStorageHardwareID', hardwareIdManagementService, HardwareID = hardwareIdPath) if ret == 0: LOG.debug("Deletion of initiator path %(hardwareIdPath)s " "is successful.", {'hardwareIdPath': hardwareIdPath}) else: LOG.warning(_LW("Deletion of initiator path %(hardwareIdPath)s " "is failed."), {'hardwareIdPath': hardwareIdPath}) def _delete_initiators_from_initiator_group(self, conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName): """Delete initiators Delete all initiators associated with the initiator group instance. Cleanup whatever is possible. It will not return any failure or rise exception if deletion fails due to any reasons. :param conn - connection to the ecom server :param controllerConfigService - controller config service :param initiatorGroupInstanceName - the initiator group instance name """ storageHardwareIdInstanceNames = ( conn.AssociatorNames(initiatorGroupInstanceName, ResultClass='SE_StorageHardwareID')) if len(storageHardwareIdInstanceNames) == 0: LOG.debug("No initiators found in Initiator group " "%(initiatorGroupName)s.", {'initiatorGroupName': initiatorGroupName}) return storageSystemName = controllerConfigService['SystemName'] hardwareIdManagementService = ( self.utils.find_storage_hardwareid_service(conn, storageSystemName)) for storageHardwareIdInstanceName in storageHardwareIdInstanceNames: initiatorName = storageHardwareIdInstanceName['InstanceID'] hardwareIdPath = storageHardwareIdInstanceName LOG.debug("Initiator %(initiatorName)s " "will be deleted from the Initiator group " "%(initiatorGroupName)s. HardwareIdPath is " "%(hardwareIdPath)s.", {'initiatorName': initiatorName, 'initiatorGroupName': initiatorGroupName, 'hardwareIdPath': hardwareIdPath}) self._delete_storage_hardware_id(conn, hardwareIdManagementService, hardwareIdPath) def _last_volume_delete_initiator_group( self, conn, controllerConfigService, initiatorGroupInstanceName, extraSpecs): """Delete the initiator group Delete the Initiator group if there are no masking views associated with the initiator group. :param conn: the ecom connection :param controllerConfigService: controller config service :param igInstanceNames: initiator group instance name :param extraSpecs: extra specifications """ maskingViewInstanceNames = self.get_masking_views_by_initiator_group( conn, initiatorGroupInstanceName) initiatorGroupInstance = conn.GetInstance(initiatorGroupInstanceName) initiatorGroupName = initiatorGroupInstance['ElementName'] if len(maskingViewInstanceNames) == 0: LOG.debug( "Last volume is associated with the initiator group, deleting " "the associated initiator group %(initiatorGroupName)s.", {'initiatorGroupName': initiatorGroupName}) self._delete_initiators_from_initiator_group( conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName) self._delete_initiator_group(conn, controllerConfigService, initiatorGroupInstanceName, initiatorGroupName, extraSpecs) else: LOG.warning(_LW("Initiator group %(initiatorGroupName)s is " "associated with masking views and can't be " "deleted. Number of associated masking view is: " "%(nmv)d."), {'initiatorGroupName': initiatorGroupName, 'nmv': len(maskingViewInstanceNames)}) def _create_hardware_ids( self, conn, initiatorNames, storageSystemName): """Create hardwareIds for initiator(s). :param conn: the connection to the ecom server :param initiatorNames: the list of initiator names :param storageSystemName: the storage system name :returns: list -- foundHardwareIDsInstanceNames """ foundHardwareIDsInstanceNames = [] hardwareIdManagementService = ( self.utils.find_storage_hardwareid_service( conn, storageSystemName)) for initiatorName in initiatorNames: hardwareIdInstanceName = ( self.utils.create_storage_hardwareId_instance_name( conn, hardwareIdManagementService, initiatorName)) LOG.debug( "Created hardwareId Instance: %(hardwareIdInstanceName)s.", {'hardwareIdInstanceName': hardwareIdInstanceName}) foundHardwareIDsInstanceNames.append(hardwareIdInstanceName) return foundHardwareIDsInstanceNames def _get_port_group_name_from_mv(self, conn, maskingViewName, storageSystemName): """Get the port group name from the masking view. :param conn: the connection to the ecom server :param maskingViewName: the masking view name :param storageSystemName: the storage system name :returns: String - port group name String - error message """ errorMessage = None portGroupName = None portGroupInstanceName = ( self._get_port_group_from_masking_view( conn, maskingViewName, storageSystemName)) if portGroupInstanceName is None: LOG.error(_LE( "Cannot get port group from masking view: " "%(maskingViewName)s. "), {'maskingViewName': maskingViewName}) else: try: portGroupInstance = ( conn.GetInstance(portGroupInstanceName)) portGroupName = ( portGroupInstance['ElementName']) except Exception: LOG.error(_LE( "Cannot get port group name.")) return portGroupName, errorMessage cinder-8.0.0/cinder/volume/drivers/emc/__init__.py0000664000567000056710000000000012701406250023215 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/emc/scaleio.py0000664000567000056710000015042612701406250023117 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for EMC ScaleIO based on ScaleIO remote CLI. """ import base64 import binascii import json from os_brick.initiator import connector from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests import six from six.moves import urllib from cinder import context from cinder import exception from cinder.i18n import _, _LI, _LW, _LE from cinder.image import image_utils from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) scaleio_opts = [ cfg.StrOpt('sio_rest_server_port', default='443', help='REST server port.'), cfg.BoolOpt('sio_verify_server_certificate', default=False, help='Verify server certificate.'), cfg.StrOpt('sio_server_certificate_path', help='Server certificate path.'), cfg.BoolOpt('sio_round_volume_capacity', default=True, help='Round up volume capacity.'), cfg.BoolOpt('sio_unmap_volume_before_deletion', default=False, help='Unmap volume before deletion.'), cfg.StrOpt('sio_protection_domain_id', help='Protection Domain ID.'), cfg.StrOpt('sio_protection_domain_name', help='Protection Domain name.'), cfg.StrOpt('sio_storage_pools', help='Storage Pools.'), cfg.StrOpt('sio_storage_pool_name', help='Storage Pool name.'), cfg.StrOpt('sio_storage_pool_id', help='Storage Pool ID.') ] CONF.register_opts(scaleio_opts) STORAGE_POOL_NAME = 'sio:sp_name' STORAGE_POOL_ID = 'sio:sp_id' PROTECTION_DOMAIN_NAME = 'sio:pd_name' PROTECTION_DOMAIN_ID = 'sio:pd_id' PROVISIONING_KEY = 'sio:provisioning_type' IOPS_LIMIT_KEY = 'sio:iops_limit' BANDWIDTH_LIMIT = 'sio:bandwidth_limit' QOS_IOPS_LIMIT_KEY = 'maxIOPS' QOS_BANDWIDTH_LIMIT = 'maxBWS' BLOCK_SIZE = 8 OK_STATUS_CODE = 200 VOLUME_NOT_FOUND_ERROR = 79 VOLUME_NOT_MAPPED_ERROR = 84 VOLUME_ALREADY_MAPPED_ERROR = 81 class ScaleIODriver(driver.VolumeDriver): """EMC ScaleIO Driver.""" VERSION = "2.0" scaleio_qos_keys = (QOS_IOPS_LIMIT_KEY, QOS_BANDWIDTH_LIMIT) def __init__(self, *args, **kwargs): super(ScaleIODriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(scaleio_opts) self.server_ip = self.configuration.san_ip self.server_port = self.configuration.sio_rest_server_port self.server_username = self.configuration.san_login self.server_password = self.configuration.san_password self.server_token = None self.verify_server_certificate = ( self.configuration.sio_verify_server_certificate) self.server_certificate_path = None if self.verify_server_certificate: self.server_certificate_path = ( self.configuration.sio_server_certificate_path) LOG.info(_LI( "REST server IP: %(ip)s, port: %(port)s, username: %(" "user)s. " "Verify server's certificate: %(verify_cert)s."), {'ip': self.server_ip, 'port': self.server_port, 'user': self.server_username, 'verify_cert': self.verify_server_certificate}) self.storage_pools = None if self.configuration.sio_storage_pools: self.storage_pools = [ e.strip() for e in self.configuration.sio_storage_pools.split(',')] self.storage_pool_name = self.configuration.sio_storage_pool_name self.storage_pool_id = self.configuration.sio_storage_pool_id if self.storage_pool_name is None and self.storage_pool_id is None: LOG.warning(_LW("No storage pool name or id was found.")) else: LOG.info(_LI( "Storage pools names: %(pools)s, " "storage pool name: %(pool)s, pool id: %(pool_id)s."), {'pools': self.storage_pools, 'pool': self.storage_pool_name, 'pool_id': self.storage_pool_id}) self.protection_domain_name = ( self.configuration.sio_protection_domain_name) LOG.info(_LI( "Protection domain name: %(domain_name)s."), {'domain_name': self.protection_domain_name}) self.protection_domain_id = self.configuration.sio_protection_domain_id LOG.info(_LI( "Protection domain id: %(domain_id)s."), {'domain_id': self.protection_domain_id}) self.connector = connector.InitiatorConnector.factory( connector.SCALEIO, utils.get_root_helper(), device_scan_attempts= self.configuration.num_volume_device_scan_tries ) self.connection_properties = {} self.connection_properties['scaleIO_volname'] = None self.connection_properties['hostIP'] = None self.connection_properties['serverIP'] = self.server_ip self.connection_properties['serverPort'] = self.server_port self.connection_properties['serverUsername'] = self.server_username self.connection_properties['serverPassword'] = self.server_password self.connection_properties['serverToken'] = self.server_token self.connection_properties['iopsLimit'] = None self.connection_properties['bandwidthLimit'] = None def check_for_setup_error(self): if (not self.protection_domain_name and not self.protection_domain_id): LOG.warning(_LW("No protection domain name or id " "was specified in configuration.")) if self.protection_domain_name and self.protection_domain_id: msg = _("Cannot specify both protection domain name " "and protection domain id.") raise exception.InvalidInput(reason=msg) if not self.server_ip: msg = _("REST server IP must by specified.") raise exception.InvalidInput(reason=msg) if not self.server_username: msg = _("REST server username must by specified.") raise exception.InvalidInput(reason=msg) if not self.server_password: msg = _("REST server password must by specified.") raise exception.InvalidInput(reason=msg) if not self.verify_server_certificate: LOG.warning(_LW("Verify certificate is not set, using default of " "False.")) if self.verify_server_certificate and not self.server_certificate_path: msg = _("Path to REST server's certificate must be specified.") raise exception.InvalidInput(reason=msg) if self.storage_pool_name and self.storage_pool_id: msg = _("Cannot specify both storage pool name and storage " "pool id.") raise exception.InvalidInput(reason=msg) if not self.storage_pool_name and not self.storage_pool_id: msg = _("Must specify storage pool name or id.") raise exception.InvalidInput(reason=msg) if not self.storage_pools: msg = (_("Must specify storage pools. Option: " "sio_storage_pools.")) raise exception.InvalidInput(reason=msg) def _find_storage_pool_id_from_storage_type(self, storage_type): # Default to what was configured in configuration file if not defined. return storage_type.get(STORAGE_POOL_ID, self.storage_pool_id) def _find_storage_pool_name_from_storage_type(self, storage_type): return storage_type.get(STORAGE_POOL_NAME, self.storage_pool_name) def _find_protection_domain_id_from_storage_type(self, storage_type): # Default to what was configured in configuration file if not defined. return storage_type.get(PROTECTION_DOMAIN_ID, self.protection_domain_id) def _find_protection_domain_name_from_storage_type(self, storage_type): # Default to what was configured in configuration file if not defined. return storage_type.get(PROTECTION_DOMAIN_NAME, self.protection_domain_name) def _find_provisioning_type(self, storage_type): return storage_type.get(PROVISIONING_KEY) def _find_limit(self, storage_type, qos_key, extraspecs_key): qos_limit = storage_type.get(qos_key) extraspecs_limit = storage_type.get(extraspecs_key) if extraspecs_limit is not None: if qos_limit is not None: LOG.warning(_LW("QoS specs are overriding extra_specs.")) else: LOG.info(_LI("Using extra_specs for defining QoS specs " "will be deprecated in the N release " "of OpenStack. Please use QoS specs.")) return qos_limit if qos_limit is not None else extraspecs_limit def _id_to_base64(self, id): # Base64 encode the id to get a volume name less than 32 characters due # to ScaleIO limitation. name = six.text_type(id).replace("-", "") try: name = base64.b16decode(name.upper()) except (TypeError, binascii.Error): pass encoded_name = name if isinstance(encoded_name, six.text_type): encoded_name = encoded_name.encode('utf-8') encoded_name = base64.b64encode(encoded_name) if six.PY3: encoded_name = encoded_name.decode('ascii') LOG.debug("Converted id %(id)s to scaleio name %(name)s.", {'id': id, 'name': encoded_name}) return encoded_name def create_volume(self, volume): """Creates a scaleIO volume.""" self._check_volume_size(volume.size) volname = self._id_to_base64(volume.id) storage_type = self._get_volumetype_extraspecs(volume) storage_pool_name = self._find_storage_pool_name_from_storage_type( storage_type) storage_pool_id = self._find_storage_pool_id_from_storage_type( storage_type) protection_domain_id = ( self._find_protection_domain_id_from_storage_type(storage_type)) protection_domain_name = ( self._find_protection_domain_name_from_storage_type(storage_type)) provisioning_type = self._find_provisioning_type(storage_type) LOG.info(_LI( "Volume type: %(volume_type)s, " "storage pool name: %(pool_name)s, " "storage pool id: %(pool_id)s, protection domain id: " "%(domain_id)s, protection domain name: %(domain_name)s."), {'volume_type': storage_type, 'pool_name': storage_pool_name, 'pool_id': storage_pool_id, 'domain_id': protection_domain_id, 'domain_name': protection_domain_name}) verify_cert = self._get_verify_cert() if storage_pool_name: self.storage_pool_name = storage_pool_name self.storage_pool_id = None if storage_pool_id: self.storage_pool_id = storage_pool_id self.storage_pool_name = None if protection_domain_name: self.protection_domain_name = protection_domain_name self.protection_domain_id = None if protection_domain_id: self.protection_domain_id = protection_domain_id self.protection_domain_name = None domain_id = self.protection_domain_id if not domain_id: if not self.protection_domain_name: msg = _("Must specify protection domain name or" " protection domain id.") raise exception.VolumeBackendAPIException(data=msg) domain_name = self.protection_domain_name encoded_domain_name = urllib.parse.quote(domain_name, '') req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'encoded_domain_name': encoded_domain_name} request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Domain/instances/getByName::" "%(encoded_domain_name)s") % req_vars LOG.info(_LI("ScaleIO get domain id by name request: %s."), request) r = requests.get( request, auth=( self.server_username, self.server_token), verify=verify_cert) r = self._check_response(r, request) domain_id = r.json() if not domain_id: msg = (_("Domain with name %s wasn't found.") % self.protection_domain_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id: msg = (_("Error getting domain id from name %(name)s: %(id)s.") % {'name': self.protection_domain_name, 'id': domain_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Domain id is %s."), domain_id) pool_name = self.storage_pool_name pool_id = self.storage_pool_id if pool_name: encoded_domain_name = urllib.parse.quote(pool_name, '') req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'domain_id': domain_id, 'encoded_domain_name': encoded_domain_name} request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Pool/instances/getByName::" "%(domain_id)s,%(encoded_domain_name)s") % req_vars LOG.info(_LI("ScaleIO get pool id by name request: %s."), request) r = requests.get( request, auth=( self.server_username, self.server_token), verify=verify_cert) pool_id = r.json() if not pool_id: msg = (_("Pool with name %(pool_name)s wasn't found in " "domain %(domain_id)s.") % {'pool_name': pool_name, 'domain_id': domain_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id: msg = (_("Error getting pool id from name %(pool_name)s: " "%(err_msg)s.") % {'pool_name': pool_name, 'err_msg': pool_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Pool id is %s."), pool_id) if provisioning_type == 'thin': provisioning = "ThinProvisioned" # Default volume type is thick. else: provisioning = "ThickProvisioned" # units.Mi = 1024 ** 2 volume_size_kb = volume.size * units.Mi params = {'protectionDomainId': domain_id, 'volumeSizeInKb': six.text_type(volume_size_kb), 'name': volname, 'volumeType': provisioning, 'storagePoolId': pool_id} LOG.info(_LI("Params for add volume request: %s."), params) r = requests.post( "https://" + self.server_ip + ":" + self.server_port + "/api/types/Volume/instances", data=json.dumps(params), headers=self._get_headers(), auth=( self.server_username, self.server_token), verify=verify_cert) response = r.json() LOG.info(_LI("Add volume response: %s"), response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Error creating volume: %s.") % response['message']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."), {'volname': volname, 'volid': volume.id}) return {'provider_id': response['id']} def _check_volume_size(self, size): if size % 8 != 0: round_volume_capacity = ( self.configuration.sio_round_volume_capacity) if not round_volume_capacity: exception_msg = (_( "Cannot create volume of size %s: " "not multiple of 8GB.") % size) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) def create_snapshot(self, snapshot): """Creates a scaleio snapshot.""" volume_id = snapshot.volume.provider_id snapname = self._id_to_base64(snapshot.id) return self._snapshot_volume(volume_id, snapname) def _snapshot_volume(self, vol_id, snapname): LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") % {'vol': vol_id, 'id': snapname}) params = { 'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]} req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/System/action/snapshotVolumes") % req_vars r, response = self._execute_scaleio_post_request(params, request) LOG.info(_LI("Snapshot volume response: %s."), response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for volume %(volname)s: " "%(response)s.") % {'volname': vol_id, 'response': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return {'provider_id': response['volumeIdList'][0]} def _execute_scaleio_post_request(self, params, request): r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=( self.server_username, self.server_token), verify=self._get_verify_cert()) r = self._check_response(r, request, False, params) response = r.json() return r, response def _check_response(self, response, request, is_get_request=True, params=None): if response.status_code == 401 or response.status_code == 403: LOG.info(_LI("Token is invalid, going to re-login and get " "a new one.")) login_request = ( "https://" + self.server_ip + ":" + self.server_port + "/api/login") verify_cert = self._get_verify_cert() r = requests.get( login_request, auth=( self.server_username, self.server_password), verify=verify_cert) token = r.json() self.server_token = token # Repeat request with valid token. LOG.info(_LI( "Going to perform request again %s with valid token."), request) if is_get_request: res = requests.get(request, auth=(self.server_username, self.server_token), verify=verify_cert) else: res = requests.post(request, data=json.dumps(params), headers=self._get_headers(), auth=(self.server_username, self.server_token), verify=verify_cert) return res return response def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" # We interchange 'volume' and 'snapshot' because in ScaleIO # snapshot is a volume: once a snapshot is generated it # becomes a new unmapped volume in the system and the user # may manipulate it in the same manner as any other volume # exposed by the system volume_id = snapshot.provider_id snapname = self._id_to_base64(volume.id) LOG.info(_LI( "ScaleIO create volume from snapshot: snapshot %(snapname)s " "to volume %(volname)s."), {'volname': volume_id, 'snapname': snapname}) return self._snapshot_volume(volume_id, snapname) def _get_headers(self): return {'content-type': 'application/json'} def _get_verify_cert(self): verify_cert = False if self.verify_server_certificate: verify_cert = self.server_certificate_path return verify_cert def extend_volume(self, volume, new_size): """Extends the size of an existing available ScaleIO volume. This action will round up the volume to the nearest size that is a granularity of 8 GBs. """ return self._extend_volume(volume['provider_id'], volume.size, new_size) def _extend_volume(self, volume_id, old_size, new_size): vol_id = volume_id LOG.info(_LI( "ScaleIO extend volume: volume %(volname)s to size %(new_size)s."), {'volname': vol_id, 'new_size': new_size}) req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'vol_id': vol_id} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(vol_id)s" "/action/setVolumeSize") % req_vars LOG.info(_LI("Change volume capacity request: %s."), request) # Round up the volume size so that it is a granularity of 8 GBs # because ScaleIO only supports volumes with a granularity of 8 GBs. volume_new_size = self._round_to_8_gran(new_size) volume_real_old_size = self._round_to_8_gran(old_size) if volume_real_old_size == volume_new_size: return round_volume_capacity = self.configuration.sio_round_volume_capacity if (not round_volume_capacity and not new_size % 8 == 0): LOG.warning(_LW("ScaleIO only supports volumes with a granularity " "of 8 GBs. The new volume size is: %d."), volume_new_size) params = {'sizeInGB': six.text_type(volume_new_size)} r, response = self._execute_scaleio_post_request(params, request) if r.status_code != OK_STATUS_CODE: response = r.json() msg = (_("Error extending volume %(vol)s: %(err)s.") % {'vol': vol_id, 'err': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _round_to_8_gran(self, size): if size % 8 == 0: return size return size + 8 - (size % 8) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" volume_id = src_vref['provider_id'] snapname = self._id_to_base64(volume.id) LOG.info(_LI( "ScaleIO create cloned volume: source volume %(src)s to " "target volume %(tgt)s."), {'src': volume_id, 'tgt': snapname}) ret = self._snapshot_volume(volume_id, snapname) if volume.size > src_vref.size: self._extend_volume(ret['provider_id'], src_vref.size, volume.size) return ret def delete_volume(self, volume): """Deletes a self.logical volume""" volume_id = volume['provider_id'] self._delete_volume(volume_id) def _delete_volume(self, vol_id): verify_cert = self._get_verify_cert() req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'vol_id': six.text_type(vol_id)} unmap_before_delete = ( self.configuration.sio_unmap_volume_before_deletion) # Ensure that the volume is not mapped to any SDC before deletion in # case unmap_before_deletion is enabled. if unmap_before_delete: params = {'allSdcs': ''} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(vol_id)s" "/action/removeMappedSdc") % req_vars LOG.info(_LI( "Trying to unmap volume from all sdcs" " before deletion: %s."), request) r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=( self.server_username, self.server_token), verify=verify_cert ) r = self._check_response(r, request, False, params) LOG.debug("Unmap volume response: %s.", r.text) params = {'removeMode': 'ONLY_ME'} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(vol_id)s" "/action/removeVolume") % req_vars r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=(self.server_username, self.server_token), verify=verify_cert ) r = self._check_response(r, request, False, params) if r.status_code != OK_STATUS_CODE: response = r.json() error_code = response['errorCode'] if error_code == VOLUME_NOT_FOUND_ERROR: LOG.warning(_LW( "Ignoring error in delete volume %s:" " Volume not found."), vol_id) elif vol_id is None: LOG.warning(_LW( "Volume does not have provider_id thus does not " "map to a ScaleIO volume. " "Allowing deletion to proceed.")) else: msg = (_("Error deleting volume %(vol)s: %(err)s.") % {'vol': vol_id, 'err': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def delete_snapshot(self, snapshot): """Deletes a ScaleIO snapshot.""" snap_id = snapshot.provider_id LOG.info(_LI("ScaleIO delete snapshot.")) return self._delete_volume(snap_id) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The scaleio driver returns a driver_volume_type of 'scaleio'. """ LOG.debug("Connector is %s.", connector) connection_properties = dict(self.connection_properties) volname = self._id_to_base64(volume.id) connection_properties['scaleIO_volname'] = volname extra_specs = self._get_volumetype_extraspecs(volume) qos_specs = self._get_volumetype_qos(volume) storage_type = extra_specs.copy() storage_type.update(qos_specs) LOG.info(_LI("Volume type is %s."), storage_type) iops_limit = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY, IOPS_LIMIT_KEY) LOG.info(_LI("iops limit is: %s."), iops_limit) bandwidth_limit = self._find_limit(storage_type, QOS_BANDWIDTH_LIMIT, BANDWIDTH_LIMIT) LOG.info(_LI("Bandwidth limit is: %s."), bandwidth_limit) connection_properties['iopsLimit'] = iops_limit connection_properties['bandwidthLimit'] = bandwidth_limit return {'driver_volume_type': 'scaleio', 'data': connection_properties} def terminate_connection(self, volume, connector, **kwargs): LOG.debug("scaleio driver terminate connection.") def _update_volume_stats(self): stats = {} backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'scaleio' stats['vendor_name'] = 'EMC' stats['driver_version'] = self.VERSION stats['storage_protocol'] = 'scaleio' stats['total_capacity_gb'] = 'unknown' stats['free_capacity_gb'] = 'unknown' stats['reserved_percentage'] = 0 stats['QoS_support'] = True stats['consistencygroup_support'] = True pools = [] verify_cert = self._get_verify_cert() max_free_capacity = 0 total_capacity = 0 for sp_name in self.storage_pools: splitted_name = sp_name.split(':') domain_name = splitted_name[0] pool_name = splitted_name[1] LOG.debug("domain name is %(domain)s, pool name is %(pool)s.", {'domain': domain_name, 'pool': pool_name}) # Get domain id from name. encoded_domain_name = urllib.parse.quote(domain_name, '') req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'encoded_domain_name': encoded_domain_name} request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Domain/instances/getByName::" "%(encoded_domain_name)s") % req_vars LOG.info(_LI("ScaleIO get domain id by name request: %s."), request) LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."), {'username': self.server_username, 'verify': verify_cert}) r = requests.get( request, auth=( self.server_username, self.server_token), verify=verify_cert) r = self._check_response(r, request) LOG.info(_LI("Get domain by name response: %s"), r.text) domain_id = r.json() if not domain_id: msg = (_("Domain with name %s wasn't found.") % self.protection_domain_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id: msg = (_("Error getting domain id from name %(name)s: " "%(err)s.") % {'name': self.protection_domain_name, 'err': domain_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Domain id is %s."), domain_id) # Get pool id from name. encoded_pool_name = urllib.parse.quote(pool_name, '') req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'domain_id': domain_id, 'encoded_pool_name': encoded_pool_name} request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Pool/instances/getByName::" "%(domain_id)s,%(encoded_pool_name)s") % req_vars LOG.info(_LI("ScaleIO get pool id by name request: %s."), request) r = requests.get( request, auth=( self.server_username, self.server_token), verify=verify_cert) pool_id = r.json() if not pool_id: msg = (_("Pool with name %(pool)s wasn't found in domain " "%(domain)s.") % {'pool': pool_name, 'domain': domain_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id: msg = (_("Error getting pool id from name %(pool)s: " "%(err)s.") % {'pool': pool_name, 'err': pool_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Pool id is %s."), pool_id) req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} request = ("https://%(server_ip)s:%(server_port)s" "/api/types/StoragePool/instances/action/" "querySelectedStatistics") % req_vars params = {'ids': [pool_id], 'properties': [ "capacityInUseInKb", "capacityLimitInKb"]} r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=( self.server_username, self.server_token), verify=verify_cert) response = r.json() LOG.info(_LI("Query capacity stats response: %s."), response) for res in response.values(): capacityInUse = res['capacityInUseInKb'] capacityLimit = res['capacityLimitInKb'] total_capacity_gb = capacityLimit / units.Mi used_capacity_gb = capacityInUse / units.Mi free_capacity_gb = total_capacity_gb - used_capacity_gb LOG.info(_LI( "free capacity of pool %(pool)s is: %(free)s, " "total capacity: %(total)s."), {'pool': pool_name, 'free': free_capacity_gb, 'total': total_capacity_gb}) pool = {'pool_name': sp_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'QoS_support': True, 'consistencygroup_support': True, 'reserved_percentage': 0 } pools.append(pool) if free_capacity_gb > max_free_capacity: max_free_capacity = free_capacity_gb total_capacity = total_capacity + total_capacity_gb # Use zero capacities here so we always use a pool. stats['total_capacity_gb'] = total_capacity stats['free_capacity_gb'] = max_free_capacity LOG.info(_LI( "Free capacity for backend is: %(free)s, total capacity: " "%(total)s."), {'free': max_free_capacity, 'total': total_capacity}) stats['pools'] = pools LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"]) self._stats = stats def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _get_volumetype_extraspecs(self, volume): specs = {} ctxt = context.get_admin_context() type_id = volume['volume_type_id'] if type_id: volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') for key, value in specs.items(): specs[key] = value return specs def _get_volumetype_qos(self, volume): qos = {} ctxt = context.get_admin_context() type_id = volume['volume_type_id'] if type_id: volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: specs = {} for key, value in specs.items(): if key in self.scaleio_qos_keys: qos[key] = value return qos def _sio_attach_volume(self, volume): """Call connector.connect_volume() and return the path. """ LOG.debug("Calling os-brick to attach ScaleIO volume.") connection_properties = dict(self.connection_properties) connection_properties['scaleIO_volname'] = self._id_to_base64( volume.id) device_info = self.connector.connect_volume(connection_properties) return device_info['path'] def _sio_detach_volume(self, volume): """Call the connector.disconnect() """ LOG.info(_LI("Calling os-brick to detach ScaleIO volume.")) connection_properties = dict(self.connection_properties) connection_properties['scaleIO_volname'] = self._id_to_base64( volume.id) self.connector.disconnect_volume(connection_properties, volume) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" LOG.info(_LI( "ScaleIO copy_image_to_volume volume: %(vol)s image service: " "%(service)s image id: %(id)s."), {'vol': volume, 'service': six.text_type(image_service), 'id': six.text_type(image_id)}) try: image_utils.fetch_to_raw(context, image_service, image_id, self._sio_attach_volume(volume), BLOCK_SIZE, size=volume['size']) finally: self._sio_detach_volume(volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" LOG.info(_LI( "ScaleIO copy_volume_to_image volume: %(vol)s image service: " "%(service)s image meta: %(meta)s."), {'vol': volume, 'service': six.text_type(image_service), 'meta': six.text_type(image_meta)}) try: image_utils.upload_volume(context, image_service, image_meta, self._sio_attach_volume(volume)) finally: self._sio_detach_volume(volume) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return the update from ScaleIO migrated volume. This method updates the volume name of the new ScaleIO volume to match the updated volume ID. The original volume is renamed first since ScaleIO does not allow multiple volumes to have the same name. """ name_id = None location = None if original_volume_status == 'available': # During migration, a new volume is created and will replace # the original volume at the end of the migration. We need to # rename the new volume. The current_name of the new volume, # which is the id of the new volume, will be changed to the # new_name, which is the id of the original volume. current_name = new_volume['id'] new_name = volume['id'] vol_id = new_volume['provider_id'] LOG.info(_LI("Renaming %(id)s from %(current_name)s to " "%(new_name)s."), {'id': vol_id, 'current_name': current_name, 'new_name': new_name}) # Original volume needs to be renamed first self._rename_volume(volume, "ff" + new_name) self._rename_volume(new_volume, new_name) else: # The back-end will not be renamed. name_id = new_volume['_name_id'] or new_volume['id'] location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': location} def _rename_volume(self, volume, new_id): new_name = self._id_to_base64(new_id) vol_id = volume['provider_id'] req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'id': vol_id} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(id)s/action/setVolumeName" % req_vars) LOG.info(_LI("ScaleIO rename volume request: %s."), request) params = {'newName': new_name} r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=(self.server_username, self.server_token), verify=self._get_verify_cert() ) r = self._check_response(r, request, False, params) if r.status_code != OK_STATUS_CODE: response = r.json() msg = (_("Error renaming volume %(vol)s: %(err)s.") % {'vol': vol_id, 'err': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI("ScaleIO volume %(vol)s was renamed to " "%(new_name)s."), {'vol': vol_id, 'new_name': new_name}) def manage_existing(self, volume, existing_ref): """Manage an existing ScaleIO volume. existing_ref is a dictionary of the form: {'source-id': } """ request = self._create_scaleio_get_volume_request(volume, existing_ref) r, response = self._execute_scaleio_get_request(request) LOG.info(_LI("Get Volume response: %s"), response) self._manage_existing_check_legal_response(r, existing_ref) if response['mappedSdcInfo'] is not None: reason = _("manage_existing cannot manage a volume " "connected to hosts. Please disconnect this volume " "from existing hosts before importing") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) return {'provider_id': response['id']} def manage_existing_get_size(self, volume, existing_ref): request = self._create_scaleio_get_volume_request(volume, existing_ref) r, response = self._execute_scaleio_get_request(request) LOG.info(_LI("Get Volume response: %s"), response) self._manage_existing_check_legal_response(r, existing_ref) return int(response['sizeInKb'] / units.Mi) def _execute_scaleio_get_request(self, request): r = requests.get( request, auth=( self.server_username, self.server_token), verify=self._get_verify_cert()) r = self._check_response(r, request) response = r.json() return r, response def _create_scaleio_get_volume_request(self, volume, existing_ref): """Throws an exception if the input is invalid for manage existing. if the input is valid - return a request. """ type_id = volume.get('volume_type_id') if 'source-id' not in existing_ref: reason = _("Reference must contain source-id.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) if type_id is None: reason = _("Volume must have a volume type") raise exception.ManageExistingVolumeTypeMismatch( existing_ref=existing_ref, reason=reason ) vol_id = existing_ref['source-id'] req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port, 'id': vol_id} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(id)s" % req_vars) LOG.info(_LI("ScaleIO get volume by id request: %s."), request) return request def _manage_existing_check_legal_response(self, response, existing_ref): if response.status_code != OK_STATUS_CODE: reason = (_("Error managing volume: %s.") % response.json()[ 'message']) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) def create_consistencygroup(self, context, group): """Creates a consistency group. ScaleIO won't create CG until cg-snapshot creation, db will maintain the volumes and CG relationship. """ LOG.info(_LI("Creating Consistency Group")) model_update = {'status': 'available'} return model_update def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. ScaleIO will delete the volumes of the CG. """ LOG.info(_LI("Deleting Consistency Group")) model_update = {'status': 'deleted'} error_statuses = ['error', 'error_deleting'] volumes_model_update = [] for volume in volumes: try: self._delete_volume(volume['provider_id']) update_item = {'id': volume['id'], 'status': 'deleted'} volumes_model_update.append(update_item) except exception.VolumeBackendAPIException as err: update_item = {'id': volume['id'], 'status': 'error_deleting'} volumes_model_update.append(update_item) if model_update['status'] not in error_statuses: model_update['status'] = 'error_deleting' LOG.error(_LE("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s."), {'vol': volume['name'], 'exception': err}) return model_update, volumes_model_update def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" get_scaleio_snapshot_params = lambda snapshot: { 'volumeId': snapshot.volume['provider_id'], 'snapshotName': self._id_to_base64(snapshot['id'])} snapshotDefs = list(map(get_scaleio_snapshot_params, snapshots)) r, response = self._snapshot_volume_group(snapshotDefs) LOG.info(_LI("Snapshot volume response: %s."), response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for group: " "%(response)s.") % {'response': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) snapshot_model_update = [] for snapshot, scaleio_id in zip(snapshots, response['volumeIdList']): update_item = {'id': snapshot['id'], 'status': 'available', 'provider_id': scaleio_id} snapshot_model_update.append(update_item) model_update = {'status': 'available'} return model_update, snapshot_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" error_statuses = ['error', 'error_deleting'] model_update = {'status': cgsnapshot['status']} snapshot_model_update = [] for snapshot in snapshots: try: self._delete_volume(snapshot.provider_id) update_item = {'id': snapshot['id'], 'status': 'deleted'} snapshot_model_update.append(update_item) except exception.VolumeBackendAPIException as err: update_item = {'id': snapshot['id'], 'status': 'error_deleting'} snapshot_model_update.append(update_item) if model_update['status'] not in error_statuses: model_update['status'] = 'error_deleting' LOG.error(_LE("Failed to delete the snapshot %(snap)s " "of cgsnapshot: %(cgsnapshot_id)s. " "Exception: %(exception)s."), {'snap': snapshot['name'], 'exception': err, 'cgsnapshot_id': cgsnapshot.id}) model_update['status'] = 'deleted' return model_update, snapshot_model_update def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from a source.""" get_scaleio_snapshot_params = lambda src_volume, trg_volume: { 'volumeId': src_volume['provider_id'], 'snapshotName': self._id_to_base64(trg_volume['id'])} if cgsnapshot and snapshots: snapshotDefs = map(get_scaleio_snapshot_params, snapshots, volumes) else: snapshotDefs = map(get_scaleio_snapshot_params, source_vols, volumes) r, response = self._snapshot_volume_group(list(snapshotDefs)) LOG.info(_LI("Snapshot volume response: %s."), response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for group: " "%(response)s.") % {'response': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volumes_model_update = [] for volume, scaleio_id in zip(volumes, response['volumeIdList']): update_item = {'id': volume['id'], 'status': 'available', 'provider_id': scaleio_id} volumes_model_update.append(update_item) model_update = {'status': 'available'} return model_update, volumes_model_update def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Update a consistency group. ScaleIO does not handle volume grouping. Cinder maintains volumes and CG relationship. """ return None, None, None def _snapshot_volume_group(self, snapshotDefs): LOG.info(_LI("ScaleIO snapshot group of volumes")) params = {'snapshotDefs': snapshotDefs} req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/System/action/snapshotVolumes") % req_vars return self._execute_scaleio_post_request(params, request) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_iscsi.py0000664000567000056710000004013312701406250024462 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ISCSI Drivers for EMC VMAX arrays based on SMI-S. """ import os from oslo_log import log as logging import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.volume import driver from cinder.volume.drivers.emc import emc_vmax_common LOG = logging.getLogger(__name__) CINDER_CONF = '/etc/cinder/cinder.conf' class EMCVMAXISCSIDriver(driver.ISCSIDriver): """EMC ISCSI Drivers for VMAX using SMI-S. Version history: 1.0.0 - Initial driver 1.1.0 - Multiple pools and thick/thin provisioning, performance enhancement. 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) 2.1.2 - Clean up failed clones (bug #1440154) 2.1.3 - Fixed a problem with FAST support (bug #1435069) 2.2.0 - Add manage/unmanage 2.2.1 - Support for SE 8.0.3 2.2.2 - Update Consistency Group 2.2.3 - Pool aware scheduler(multi-pool) support 2.2.4 - Create CG from CG snapshot 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - Fix for randomly choosing port group. (bug #1501919) - get_short_host_name needs to be called in find_device_number (bug #1520635) - Proper error handling for invalid SLOs (bug #1512795) - Extend Volume for VMAX3, SE8.1.0.3 https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - Incorrect SG selected on an attach (#1515176) - Cleanup Zoning (bug #1501938) NOTE: FC only - Last volume in SG fix - _remove_last_vol_and_delete_sg is not being called for VMAX3 (bug #1520549) - necessary updates for CG changes (#1534616) - Changing PercentSynced to CopyState (bug #1517103) - Getting iscsi ip from port in existing masking view - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) """ VERSION = "2.3.0" def __init__(self, *args, **kwargs): super(EMCVMAXISCSIDriver, self).__init__(*args, **kwargs) self.common = ( emc_vmax_common.EMCVMAXCommon('iSCSI', self.VERSION, configuration=self.configuration)) self.iscsi_ip_addresses = [] def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a VMAX volume.""" volpath = self.common.create_volume(volume) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" volpath = self.common.create_volume_from_snapshot(volume, snapshot) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" volpath = self.common.create_cloned_volume(volume, src_vref) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def delete_volume(self, volume): """Deletes an EMC volume.""" self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot.""" ctxt = context.get_admin_context() volumename = snapshot['volume_name'] index = volumename.index('-') volumeid = volumename[index + 1:] volume = self.db.volume_get(ctxt, volumeid) volpath = self.common.create_snapshot(snapshot, volume) model_update = {} snapshot['provider_location'] = six.text_type(volpath) model_update['provider_location'] = snapshot['provider_location'] return model_update def delete_snapshot(self, snapshot): """Deletes a snapshot.""" ctxt = context.get_admin_context() volumename = snapshot['volume_name'] index = volumename.index('-') volumeid = volumename[index + 1:] volume = self.db.volume_get(ctxt, volumeid) self.common.delete_snapshot(snapshot, volume) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. the format of the driver data is defined in smis_get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': '12345678-1234-4321-1234-123456789012', } } """ self.iscsi_ip_addresses = self.common.initialize_connection( volume, connector) iscsi_properties = self.smis_get_iscsi_properties( volume, connector) LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } def _call_iscsiadm(self, iscsi_ip_address): """Calls iscsiadm with iscsi ip address""" try: (out, _err) = self._execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', iscsi_ip_address, run_as_root=True) return out, _err, False, None except Exception as ex: return None, None, True, ex def smis_do_iscsi_discovery(self, volume): """Calls iscsiadm with each iscsi ip address in the list""" LOG.info(_LI("ISCSI provider_location not stored, using discovery.")) targets = [] if len(self.iscsi_ip_addresses) == 0: LOG.error(_LE("The list of iscsi_ip_addresses is empty")) return targets for iscsi_ip_address in self.iscsi_ip_addresses: out, _err, go_again, ex = self._call_iscsiadm(iscsi_ip_address) if not go_again: break if not out: if ex: exception_message = (_("Unsuccessful iscsiadm. " "Exception is %(ex)s. ") % {'ex': ex}) else: exception_message = (_("iscsiadm execution failed. ")) raise exception.VolumeBackendAPIException(data=exception_message) LOG.info(_LI( "smis_do_iscsi_discovery is: %(out)s."), {'out': out}) for target in out.splitlines(): targets.append(target) return targets def smis_get_iscsi_properties(self, volume, connector): """Gets iscsi configuration. We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the UUID of the volume :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. """ properties = {} location = self.smis_do_iscsi_discovery(volume) if not location: raise exception.InvalidVolume(_("Could not find iSCSI export " " for volume %(volumeName)s.") % {'volumeName': volume['name']}) LOG.debug("ISCSI Discovery: Found %s", location) properties['target_discovered'] = True device_info = self.common.find_device_number( volume, connector['host']) if device_info is None or device_info['hostlunid'] is None: exception_message = (_("Cannot find device number for volume " "%(volumeName)s.") % {'volumeName': volume['name']}) raise exception.VolumeBackendAPIException(data=exception_message) device_number = device_info['hostlunid'] LOG.info(_LI( "location is: %(location)s"), {'location': location}) for loc in location: results = loc.split(" ") properties['target_portal'] = results[0].split(",")[0] properties['target_iqn'] = results[1] properties['target_lun'] = device_number properties['volume_id'] = volume['id'] LOG.info(_LI( "ISCSI properties: %(properties)s"), {'properties': properties}) LOG.info(_LI( "ISCSI volume is: %(volume)s"), {'volume': volume}) if 'provider_auth' in volume: auth = volume['provider_auth'] LOG.info(_LI( "AUTH properties: %(authProps)s"), {'authProps': auth}) if auth is not None: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret LOG.info(_LI("AUTH properties: %s."), properties) return properties def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" self.common.terminate_connection(volume, connector) def extend_volume(self, volume, new_size): """Extend an existing volume.""" self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self.update_volume_stats() return self._stats def update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = self.common.update_volume_stats() data['storage_protocol'] = 'iSCSI' data['driver_version'] = self.VERSION self._stats = data def migrate_volume(self, ctxt, volume, host): """Migrate a volume from one Volume Backend to another. :param ctxt: context :param volume: the volume object including the volume_type_id :param host: the host dict holding the relevant target information :returns: boolean -- Always returns True :returns: dict -- Empty dict {} """ return self.common.migrate_volume(ctxt, volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Migrate volume to another host using retype. :param ctxt: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: Unused parameter in common.retype :param host: the host dict holding the relevant target information :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(ctxt, volume, new_type, diff, host) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" self.common.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return self.common.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" return self.common.create_cgsnapshot(context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots) def _check_for_iscsi_ip_address(self): """Check to see if iscsi_ip_address is set in cinder.conf :returns: boolean -- True if iscsi_ip_address id defined in config. """ bExists = os.path.exists(CINDER_CONF) if bExists: if 'iscsi_ip_address' in open(CINDER_CONF).read(): return True return False def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ return self.common.manage_existing(volume, external_ref) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ return self.common.manage_existing_get_size(volume, external_ref) def unmanage(self, volume): """Export VMAX volume and leave volume intact on the backend array.""" return self.common.unmanage(volume) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Updates LUNs in consistency group.""" return self.common.update_consistencygroup(group, add_volumes, remove_volumes) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates the consistency group from source. Currently the source can only be a cgsnapshot. :param context: the context :param group: the consistency group object to be created :param volumes: volumes in the consistency group :param cgsnapshot: the source consistency group snapshot :param snapshots: snapshots of the source volumes :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. """ return self.common.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/volume/drivers/emc/emc_vnx_cli.py0000664000567000056710000063476012701406257024005 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VNX CLI """ import copy import math import os import random import re import time import types import eventlet from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import timeutils import six from six.moves import range import taskflow.engines from taskflow.patterns import linear_flow from taskflow import task from taskflow.types import failure from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder import utils from cinder.volume import configuration as config from cinder.volume.drivers.san import san from cinder.volume import manager from cinder.volume import utils as vol_utils from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) INTERVAL_5_SEC = 5 INTERVAL_20_SEC = 20 INTERVAL_30_SEC = 30 INTERVAL_60_SEC = 60 ENABLE_TRACE = False loc_opts = [ cfg.StrOpt('storage_vnx_authentication_type', default='global', help='VNX authentication scope type.'), cfg.StrOpt('storage_vnx_security_file_dir', help='Directory path that contains the VNX security file. ' 'Make sure the security file is generated first.'), cfg.StrOpt('naviseccli_path', default='', help='Naviseccli Path.'), cfg.StrOpt('storage_vnx_pool_names', deprecated_name='storage_vnx_pool_name', help='Comma-separated list of storage pool names to be used.'), cfg.StrOpt('san_secondary_ip', help='VNX secondary SP IP Address.'), cfg.IntOpt('default_timeout', default=60 * 24 * 365, help='Default timeout for CLI operations in minutes. ' 'For example, LUN migration is a typical long ' 'running operation, which depends on the LUN size and ' 'the load of the array. ' 'An upper bound in the specific deployment can be set to ' 'avoid unnecessary long wait. ' 'By default, it is 365 days long.'), cfg.IntOpt('max_luns_per_storage_group', default=255, help='Default max number of LUNs in a storage group.' ' By default, the value is 255.'), cfg.BoolOpt('destroy_empty_storage_group', default=False, help='To destroy storage group ' 'when the last LUN is removed from it. ' 'By default, the value is False.'), cfg.StrOpt('iscsi_initiators', default='', help='Mapping between hostname and ' 'its iSCSI initiator IP addresses.'), cfg.StrOpt('io_port_list', default='*', help='Comma separated iSCSI or FC ports ' 'to be used in Nova or Cinder.'), cfg.BoolOpt('initiator_auto_registration', default=False, help='Automatically register initiators. ' 'By default, the value is False.'), cfg.BoolOpt('initiator_auto_deregistration', default=False, help='Automatically deregister initiators after the related ' 'storage group is destroyed. ' 'By default, the value is False.'), cfg.BoolOpt('check_max_pool_luns_threshold', default=False, help='Report free_capacity_gb as 0 when the limit to ' 'maximum number of pool LUNs is reached. ' 'By default, the value is False.'), cfg.BoolOpt('force_delete_lun_in_storagegroup', default=False, help='Delete a LUN even if it is in Storage Groups.'), cfg.BoolOpt('ignore_pool_full_threshold', default=False, help='Force LUN creation even if ' 'the full threshold of pool is reached.') ] CONF.register_opts(loc_opts) def decorate_all_methods(method_decorator): """Applies decorator on the methods of a class. This is a class decorator, which will apply method decorator referred by method_decorator to all the public methods (without underscore as the prefix) in a class. """ if not ENABLE_TRACE: return lambda cls: cls def _decorate_all_methods(cls): for attr_name, attr_val in cls.__dict__.items(): if (isinstance(attr_val, types.FunctionType) and not attr_name.startswith("_")): setattr(cls, attr_name, method_decorator(attr_val)) return cls return _decorate_all_methods def log_enter_exit(func): if not CONF.debug: return func def inner(self, *args, **kwargs): LOG.debug("Entering %(cls)s.%(method)s", {'cls': self.__class__.__name__, 'method': func.__name__}) start = timeutils.utcnow() ret = func(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug("Exiting %(cls)s.%(method)s. " "Spent %(duration)s sec. " "Return %(return)s", {'cls': self.__class__.__name__, 'duration': timeutils.delta_seconds(start, end), 'method': func.__name__, 'return': ret}) return ret return inner class PropertyDescriptor(object): def __init__(self, option, label, key=None, converter=None): self.option = option self._label = None self._key = key self.converter = converter self.label = label @property def label(self): return self._label @label.setter def label(self, value): value = value.strip() if value[-1] == ':': value = value[:-1] self._label = value @property def key(self): if self._key is None: self._key = '_'.join(self.label.lower().split()) return self._key class _Enum(object): @classmethod def get_all(cls): return [getattr(cls, member) for member in dir(cls) if cls._is_enum(member)] @classmethod def _is_enum(cls, name): return (isinstance(name, str) and hasattr(cls, name) and name.isupper()) @classmethod def get_opt(cls, tier): option_map = getattr(cls, '_map', None) if option_map is None: raise NotImplementedError( _('Option map (cls._map) is not defined.')) ret = option_map.get(tier) if ret is None: raise ValueError(_("{} is not a valid option.").format(tier)) return ret class VNXError(_Enum): GENERAL_NOT_FOUND = 'cannot find|may not exist|does not exist' SG_NAME_IN_USE = 'Storage Group name already in use' LUN_ALREADY_EXPANDED = 0x712d8e04 LUN_EXISTED = 0x712d8d04 LUN_IS_PREPARING = 0x712d8e0e LUN_IN_SG = 'contained in a Storage Group|LUN mapping still exists' LUN_NOT_MIGRATING = ('The specified source LUN is ' 'not currently migrating') LUN_IS_NOT_SMP = 'it is not a snapshot mount point' CG_IS_DELETING = 0x712d8801 CG_EXISTED = 0x716d8021 CG_SNAP_NAME_EXISTED = 0x716d8005 SNAP_NAME_EXISTED = 0x716d8005 SNAP_NAME_IN_USE = 0x716d8003 SNAP_ALREADY_MOUNTED = 0x716d8055 SNAP_NOT_ATTACHED = ('The specified Snapshot mount point ' 'is not currently attached.') MIRROR_NOT_FOUND = 'Mirror not found' MIRROR_IN_USE = 'Mirror name already in use' @staticmethod def _match(output, error_code): is_match = False if VNXError._is_enum(error_code): error_code = getattr(VNXError, error_code) if isinstance(error_code, int): error_code = hex(error_code) if isinstance(error_code, str): error_code = error_code.strip() found = re.findall(error_code, output, flags=re.IGNORECASE) is_match = len(found) > 0 return is_match @classmethod def has_error(cls, output, *error_codes): if error_codes is None or len(error_codes) == 0: error_codes = VNXError.get_all() return any([cls._match(output, error_code) for error_code in error_codes]) class VNXMigrationRate(_Enum): LOW = 'low' MEDIUM = 'medium' HIGH = 'high' ASAP = 'asap' class VNXProvisionEnum(_Enum): THIN = 'thin' THICK = 'thick' COMPRESSED = 'compressed' DEDUPED = 'deduplicated' _map = { THIN: ['-type', 'Thin'], THICK: ['-type', 'NonThin'], COMPRESSED: ['-type', 'Thin'], DEDUPED: ['-type', 'Thin', '-deduplication', 'on']} class VNXTieringEnum(_Enum): NONE = 'none' HIGH_AUTO = 'starthighthenauto' AUTO = 'auto' HIGH = 'highestavailable' LOW = 'lowestavailable' NO_MOVE = 'nomovement' _map = { NONE: ['', ''], HIGH_AUTO: [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'autoTier'], AUTO: [ '-initialTier', 'optimizePool', '-tieringPolicy', 'autoTier'], HIGH: [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'highestAvailable'], LOW: [ '-initialTier', 'lowestAvailable', '-tieringPolicy', 'lowestAvailable'], NO_MOVE: [ '-initialTier', 'optimizePool', '-tieringPolicy', 'noMovement'] } @classmethod def get_tier(cls, initial, policy): ret = None for k, v in cls._map.items(): if len(v) >= 4: v_initial, v_policy = v[1], v[3] if (cls.match_option(initial, v_initial) and cls.match_option(policy, v_policy)): ret = k break elif cls.match_option(policy, 'noMovement'): # no movement could have different initial tier ret = cls.NO_MOVE break if ret is None: raise ValueError(_('Initial tier: {}, policy: {} is not valid.') .format(initial, policy)) return ret @staticmethod def match_option(output, option): return output.replace(' ', '').lower() == option.lower() class VNXLun(object): DEFAULT_TIER = VNXTieringEnum.HIGH_AUTO DEFAULT_PROVISION = VNXProvisionEnum.THICK def __init__(self): self._lun_id = -1 self._capacity = 0.0 self._pool_name = '' self._tier = self.DEFAULT_TIER self._provision = self.DEFAULT_PROVISION self._const = VNXLunProperties @property def lun_id(self): return self._lun_id @lun_id.setter def lun_id(self, data): if isinstance(data, dict): self._lun_id = self._get(data, self._const.LUN_ID) elif isinstance(data, int): self._lun_id = data elif isinstance(data, str): try: self._lun_id = int(data) except ValueError: raise ValueError( _('LUN number ({}) is not an integer.').format(data)) else: self._raise_type_error(data) if self.lun_id < 0: raise ValueError(_('LUN id({}) is not valid.') .format(self.lun_id)) @property def pool_name(self): return self._pool_name @pool_name.setter def pool_name(self, data): if isinstance(data, dict): self._pool_name = self._get(data, self._const.LUN_POOL) elif isinstance(data, str): self._pool_name = data else: self._raise_type_error(data) @property def capacity(self): return self._capacity @capacity.setter def capacity(self, data): if isinstance(data, dict): self._capacity = self._get(data, self._const.LUN_CAPACITY) elif isinstance(data, float): self._capacity = data elif isinstance(data, int): self._capacity = float(data) else: self._raise_type_error(data) @property def tier(self): return self._tier @tier.setter def tier(self, data): if isinstance(data, dict): initial = self._get(data, self._const.LUN_INITIAL_TIER) policy = self._get(data, self._const.LUN_TIERING_POLICY) self._tier = VNXTieringEnum.get_tier(initial, policy) elif isinstance(data, str) and data in VNXTieringEnum.get_all(): self._tier = data else: self._raise_type_error(data) @property def provision(self): return self._provision @provision.setter def provision(self, data): self._provision = VNXProvisionEnum.THICK if isinstance(data, dict): is_thin = self._get(data, self._const.LUN_IS_THIN_LUN) is_compressed = self._get(data, self._const.LUN_IS_COMPRESSED) is_dedup = self._get(data, self._const.LUN_DEDUP_STATE) if is_compressed: self._provision = VNXProvisionEnum.COMPRESSED elif is_dedup: self._provision = VNXProvisionEnum.DEDUPED elif is_thin: self._provision = VNXProvisionEnum.THIN elif isinstance(data, str) and data in VNXProvisionEnum.get_all(): self._provision = data else: self._raise_type_error(data) @staticmethod def _raise_type_error(data): raise ValueError(_('Input type {} is not supported.') .format(type(data))) def update(self, data): self.lun_id = data self.pool_name = data self.capacity = data self.provision = data self.tier = data @staticmethod def get_lun_by_id(client, lun_id): lun = VNXLun() lun.lun_id = lun_id lun.update(client) return lun @staticmethod def _get(data, key): if isinstance(key, PropertyDescriptor): key = key.key return data.get(key) def __repr__(self): return ('VNXLun [' 'lun_id: {}, ' 'pool_name: {}, ' 'capacity: {}, ' 'provision: {}, ' 'tier: {}]' .format(self.lun_id, self.pool_name, self.capacity, self.provision, self.tier)) class Converter(object): @staticmethod def str_to_boolean(str_input): ret = False if str_input.strip().lower() in ('yes', 'true', 'enabled', 'on'): ret = True return ret class Dict(dict): def __getattr__(self, item): try: ret = super(Dict, self).__getattr__(item) except AttributeError: if item in self: value = self.get(item) else: raise AttributeError( _("'{}' object has no attribute '{}'") .format(__name__, item)) ret = value return ret class VNXCliParser(_Enum): @classmethod def get_all_property_descriptor(cls): return (p for p in cls.get_all() if isinstance(p, PropertyDescriptor)) @classmethod def get_property_options(cls): properties = cls.get_all_property_descriptor() return [p.option for p in properties if p.option is not None] @classmethod def parse(cls, output, properties=None): ret = Dict() output = output.strip() if properties is None: properties = cls.get_all_property_descriptor() for p in properties: pattern = re.compile( '^\s*{}\s*[:]?\s*(?P.*)\s*$'.format( re.escape(p.label)), re.MULTILINE | re.IGNORECASE) matched = re.search(pattern, output) if matched is not None: value = matched.group('value') if p.converter is not None and callable(p.converter): value = p.converter(value) ret[p.key] = value else: ret[p.key] = None return ret class VNXLunProperties(VNXCliParser): LUN_STATE = PropertyDescriptor( '-state', 'Current State', 'state') LUN_STATUS = PropertyDescriptor( '-status', 'Status') LUN_OPERATION = PropertyDescriptor( '-opDetails', 'Current Operation', 'operation') LUN_CAPACITY = PropertyDescriptor( '-userCap', 'User Capacity (GBs)', 'total_capacity_gb', float) LUN_OWNER = PropertyDescriptor( '-owner', 'Current Owner', 'owner') LUN_ATTACHEDSNAP = PropertyDescriptor( '-attachedSnapshot', 'Attached Snapshot') LUN_NAME = PropertyDescriptor( None, 'Name', 'lun_name') LUN_ID = PropertyDescriptor( None, 'LOGICAL UNIT NUMBER', 'lun_id', int) LUN_POOL = PropertyDescriptor( '-poolName', 'Pool Name', 'pool') LUN_IS_THIN_LUN = PropertyDescriptor( '-isThinLUN', 'Is Thin LUN', converter=Converter.str_to_boolean) LUN_IS_COMPRESSED = PropertyDescriptor( '-isCompressed', 'Is Compressed', converter=Converter.str_to_boolean) LUN_DEDUP_STATE = PropertyDescriptor( '-dedupState', 'Deduplication State', 'dedup_state', Converter.str_to_boolean) LUN_INITIAL_TIER = PropertyDescriptor( '-initialTier', 'Initial Tier') LUN_TIERING_POLICY = PropertyDescriptor( '-tieringPolicy', 'Tiering Policy') lun_all = [LUN_STATE, LUN_STATUS, LUN_OPERATION, LUN_CAPACITY, LUN_OWNER, LUN_ATTACHEDSNAP] lun_with_pool = [LUN_STATE, LUN_CAPACITY, LUN_OWNER, LUN_ATTACHEDSNAP, LUN_POOL] class VNXPoolProperties(VNXCliParser): POOL_ID = PropertyDescriptor( None, 'Pool ID', 'pool_id', int) POOL_STATE = PropertyDescriptor( '-state', 'State') POOL_TOTAL_CAPACITY = PropertyDescriptor( '-userCap', 'User Capacity (GBs)', 'total_capacity_gb', float) POOL_FREE_CAPACITY = PropertyDescriptor( '-availableCap', 'Available Capacity (GBs)', 'free_capacity_gb', float) POOL_FAST_CACHE = PropertyDescriptor( '-fastcache', 'FAST Cache', 'fast_cache_enabled', Converter.str_to_boolean) POOL_NAME = PropertyDescriptor( None, 'Pool Name') POOL_SUBSCRIBED_CAPACITY = PropertyDescriptor( '-subscribedCap', 'Total Subscribed Capacity (GBs)', 'provisioned_capacity_gb', float) POOL_FULL_THRESHOLD = PropertyDescriptor( '-prcntFullThreshold', 'Percent Full Threshold', 'pool_full_threshold', int) pool_all = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY, POOL_STATE, POOL_FULL_THRESHOLD] class VNXPoolFeatureProperties(VNXCliParser): MAX_POOL_LUNS = PropertyDescriptor( '-maxPoolLUNs', 'Max. Pool LUNs', 'max_pool_luns', int) TOTAL_POOL_LUNS = PropertyDescriptor( '-numPoolLUNs', 'Total Number of Pool LUNs', 'total_pool_luns', int) default = [MAX_POOL_LUNS, TOTAL_POOL_LUNS] @decorate_all_methods(log_enter_exit) class CommandLineHelper(object): # extra spec constants tiering_spec = 'storagetype:tiering' provisioning_specs = [ 'provisioning:type', 'storagetype:provisioning'] copytype_spec = 'copytype:snap' def __init__(self, configuration): configuration.append_config_values(san.san_opts) self.timeout = configuration.default_timeout * INTERVAL_60_SEC self.max_luns = configuration.max_luns_per_storage_group # Checking for existence of naviseccli tool navisecclipath = configuration.naviseccli_path if not os.path.exists(navisecclipath): err_msg = _('naviseccli_path: Could not find ' 'NAVISECCLI tool %(path)s.') % {'path': navisecclipath} LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) self.command = (navisecclipath, '-address') self.active_storage_ip = configuration.san_ip self.primary_storage_ip = self.active_storage_ip self.secondary_storage_ip = configuration.san_secondary_ip if self.secondary_storage_ip == self.primary_storage_ip: LOG.warning(_LW("san_secondary_ip is configured as " "the same value as san_ip.")) self.secondary_storage_ip = None if not configuration.san_ip: err_msg = _('san_ip: Mandatory field configuration. ' 'san_ip is not set.') LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) # Lock file name for this specific back-end self.toggle_lock_name = configuration.config_group self.credentials = () storage_username = configuration.san_login storage_password = configuration.san_password storage_auth_type = configuration.storage_vnx_authentication_type storage_vnx_security_file = configuration.storage_vnx_security_file_dir if storage_auth_type is None: storage_auth_type = 'global' elif storage_auth_type.lower() not in ('ldap', 'local', 'global'): err_msg = (_('Invalid VNX authentication type: %s') % storage_auth_type) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) # if there is security file path provided, use this security file if storage_vnx_security_file: self.credentials = ('-secfilepath', storage_vnx_security_file) LOG.info(_LI("Using security file in %s for authentication"), storage_vnx_security_file) # if there is a username/password provided, use those in the cmd line elif storage_username is not None and len(storage_username) > 0 and\ storage_password is not None and len(storage_password) > 0: self.credentials = ('-user', storage_username, '-password', storage_password, '-scope', storage_auth_type) LOG.info(_LI("Plain text credentials are being used for " "authentication")) else: LOG.info(_LI("Neither security file nor plain " "text credentials are specified. Security file under " "home directory will be used for authentication " "if present.")) self.iscsi_initiator_map = None if configuration.iscsi_initiators: self.iscsi_initiator_map = \ json.loads(configuration.iscsi_initiators) LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map) def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs): raise exception.EMCVnxCLICmdError(cmd=cmd, rc=rc, out=out.split('\n'), **kwargs) def create_lun_with_advance_feature(self, pool, name, size, provisioning, tiering, consistencygroup_id=None, ignore_thresholds=False, poll=True): command_create_lun = ['lun', '-create', '-capacity', int(size), '-sq', 'gb', '-poolName', pool, '-name', name] if not poll: command_create_lun = ['-np'] + command_create_lun # provisioning if provisioning: command_create_lun.extend(VNXProvisionEnum.get_opt(provisioning)) # tiering if tiering and tiering != 'none': command_create_lun.extend(VNXTieringEnum.get_opt(tiering)) if ignore_thresholds: command_create_lun.append('-ignoreThresholds') # create lun data = self.create_lun_by_cmd(command_create_lun, name) # handle compression try: if provisioning == 'compressed': self.enable_or_disable_compression_on_lun( name, 'on') except exception.EMCVnxCLICmdError as ex: with excutils.save_and_reraise_exception(): self.delete_lun(name) LOG.error(_LE("Error on enable compression on lun %s."), ex) # handle consistency group try: if consistencygroup_id: self.add_lun_to_consistency_group( consistencygroup_id, data['lun_id']) except exception.EMCVnxCLICmdError as ex: with excutils.save_and_reraise_exception(): self.delete_lun(name) LOG.error(_LE("Error on adding lun to consistency" " group. %s"), ex) return data def create_lun_by_cmd(self, cmd, name): out, rc = self.command_execute(*cmd) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.LUN_EXISTED): LOG.warning(_LW('LUN already exists, LUN name %(name)s. ' 'Message: %(msg)s'), {'name': name, 'msg': out}) else: self._raise_cli_error(cmd, rc, out) def _lun_state_validation(lun_data): lun_state = lun_data[VNXLunProperties.LUN_STATE.key] if lun_state == 'Initializing': return False # Lun in Ready or Faulted state is eligible for IO access, # so if no lun operation, return success. elif lun_state in ['Ready', 'Faulted']: return lun_data[VNXLunProperties.LUN_OPERATION.key] == 'None' # Raise exception if lun state is Offline, Invalid, Destroying # or other unexpected states. else: msg = (_("Volume %(name)s was created in VNX, " "but in %(state)s state.") % {'name': lun_data[VNXLunProperties.LUN_NAME.key], 'state': lun_state}) raise exception.VolumeBackendAPIException(data=msg) def lun_is_ready(): try: data = self.get_lun_by_name(name, VNXLunProperties.lun_all, False) except exception.EMCVnxCLICmdError as ex: orig_out = "\n".join(ex.kwargs["out"]) if VNXError.has_error(orig_out, VNXError.GENERAL_NOT_FOUND): return False else: raise return _lun_state_validation(data) self._wait_for_a_condition(lun_is_ready, interval=INTERVAL_5_SEC, ignorable_exception_arbiter=lambda ex: isinstance(ex, exception.EMCVnxCLICmdError)) lun = self.get_lun_by_name(name, VNXLunProperties.lun_all, False) return lun def delete_lun(self, name): """Deletes a LUN or mount point.""" command_delete_lun = ['lun', '-destroy', '-name', name, '-forceDetach', '-o'] # executing cli command to delete volume out, rc = self.command_execute(*command_delete_lun) if rc != 0 or out.strip(): # Ignore the error that due to retry if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND): LOG.warning(_LW("LUN is already deleted, LUN name %(name)s. " "Message: %(msg)s"), {'name': name, 'msg': out}) else: self._raise_cli_error(command_delete_lun, rc, out) def get_hlus(self, lun_id, poll=True): hlus = list() command_storage_group_list = ('storagegroup', '-list') out, rc = self.command_execute(*command_storage_group_list, poll=poll) if rc != 0: self._raise_cli_error(command_storage_group_list, rc, out) sg_name_p = re.compile(r'^\s*(?P[^\n\r]+)') hlu_alu_p = re.compile(r'HLU/ALU Pairs:' r'\s*HLU Number\s*ALU Number' r'\s*[-\s]*' r'(\d|\s)*' r'\s+(?P\d+)( |\t)+%s' % lun_id) for sg_info in out.split('Storage Group Name:'): hlu_alu_m = hlu_alu_p.search(sg_info) if hlu_alu_m is None: continue sg_name_m = sg_name_p.search(sg_info) if sg_name_m: hlus.append((hlu_alu_m.group('hlu'), sg_name_m.group('sg_name'))) return hlus def _wait_for_a_condition(self, testmethod, timeout=None, interval=INTERVAL_5_SEC, ignorable_exception_arbiter=lambda ex: True, *args, **kwargs): start_time = time.time() if timeout is None: timeout = self.timeout def _inner(): try: test_value = testmethod(*args, **kwargs) except Exception as ex: test_value = False with excutils.save_and_reraise_exception( reraise=not ignorable_exception_arbiter(ex)): LOG.debug('CommandLineHelper.' '_wait_for_a_condition: %(method_name)s ' 'execution failed for %(exception)s', {'method_name': testmethod.__name__, 'exception': ex}) if test_value: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = (_('CommandLineHelper._wait_for_a_condition: %s timeout') % testmethod.__name__) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def expand_lun(self, name, new_size, poll=True): command_expand_lun = ('lun', '-expand', '-name', name, '-capacity', new_size, '-sq', 'gb', '-o', '-ignoreThresholds') out, rc = self.command_execute(*command_expand_lun, poll=poll) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.LUN_ALREADY_EXPANDED): LOG.warning(_LW("LUN %(name)s is already expanded. " "Message: %(msg)s"), {'name': name, 'msg': out}) else: self._raise_cli_error(command_expand_lun, rc, out) def expand_lun_and_wait(self, name, new_size): self.expand_lun(name, new_size, poll=False) def lun_is_extented(): data = self.get_lun_by_name(name, poll=False) return new_size == data[VNXLunProperties.LUN_CAPACITY.key] self._wait_for_a_condition(lun_is_extented) def rename_lun(self, lun_id, new_name, poll=False): command_lun_rename = ('lun', '-modify', '-l', lun_id, '-newName', new_name, '-o') out, rc = self.command_execute(*command_lun_rename, poll=poll) if rc != 0: self._raise_cli_error(command_lun_rename, rc, out) def modify_lun_tiering_by_id(self, lun_id, tiering): """Modify the tiering policy of the LUN.""" command_modify_lun = ['lun', '-modify', '-l', lun_id, '-o'] self._modify_lun_tiering(command_modify_lun, tiering) def modify_lun_tiering_by_name(self, name, tiering): """This function used to modify a lun's tiering policy.""" command_modify_lun = ['lun', '-modify', '-name', name, '-o'] self._modify_lun_tiering(command_modify_lun, tiering) def _modify_lun_tiering(self, command_modify_lun, tiering): if tiering and tiering != 'none': command_modify_lun.extend(VNXTieringEnum.get_opt(tiering)) out, rc = self.command_execute(*command_modify_lun) if rc != 0: self._raise_cli_error(command_modify_lun, rc, out) def create_consistencygroup(self, cg_name, members=None, poll=False): """create the consistency group.""" command_create_cg = ('snap', '-group', '-create', '-name', cg_name, '-allowSnapAutoDelete', 'no') if members: command_create_cg += ('-res', ','.join(map(six.text_type, members))) out, rc = self.command_execute(*command_create_cg, poll=poll) if rc != 0: # Ignore the error if consistency group already exists if VNXError.has_error(out, VNXError.CG_EXISTED): LOG.warning(_LW('Consistency group %(name)s already ' 'exists. Message: %(msg)s'), {'name': cg_name, 'msg': out}) else: self._raise_cli_error(command_create_cg, rc, out) self._wait_for_a_condition(self.get_consistency_group_by_name, cg_name=cg_name, interval=INTERVAL_5_SEC, ignorable_exception_arbiter=lambda ex: isinstance(ex, exception.EMCVnxCLICmdError)) def get_consistency_group_by_name(self, cg_name): cmd = ('snap', '-group', '-list', '-id', cg_name) data = { 'Name': None, 'Luns': None, 'State': None } out, rc = self.command_execute(*cmd) if rc == 0: cg_pat = r"Name:(.*)\n"\ r"Description:(.*)\n"\ r"Allow auto delete:(.*)\n"\ r"Member LUN ID\(s\):(.*)\n"\ r"State:(.*)\n" for m in re.finditer(cg_pat, out): data['Name'] = m.groups()[0].strip() data['State'] = m.groups()[4].strip() # Handle case when no lun in cg Member LUN ID(s): None luns_of_cg = m.groups()[3].replace('None', '').strip() data['Luns'] = ([lun.strip() for lun in luns_of_cg.split(',')] if luns_of_cg else []) LOG.debug("Found consistent group %s.", data['Name']) else: self._raise_cli_error(cmd, rc, out) return data def add_lun_to_consistency_group(self, cg_name, lun_id, poll=False): add_lun_to_cg_cmd = ('snap', '-group', '-addmember', '-id', cg_name, '-res', lun_id) out, rc = self.command_execute(*add_lun_to_cg_cmd, poll=poll) if rc != 0: LOG.error(_LE("Can not add the lun %(lun)s to consistency " "group %(cg_name)s."), {'lun': lun_id, 'cg_name': cg_name}) self._raise_cli_error(add_lun_to_cg_cmd, rc, out) def remove_luns_from_consistencygroup(self, cg_name, remove_ids, poll=False): """Removes LUN(s) from cg""" remove_luns_cmd = ('snap', '-group', '-rmmember', '-id', cg_name, '-res', ','.join(remove_ids)) out, rc = self.command_execute(*remove_luns_cmd, poll=poll) if rc != 0: LOG.error(_LE("Can not remove LUNs %(luns)s in consistency " "group %(cg_name)s."), {'luns': remove_ids, 'cg_name': cg_name}) self._raise_cli_error(remove_luns_cmd, rc, out) def replace_luns_in_consistencygroup(self, cg_name, new_ids, poll=False): """Replaces LUN(s) with new_ids for cg""" replace_luns_cmd = ('snap', '-group', '-replmember', '-id', cg_name, '-res', ','.join(new_ids)) out, rc = self.command_execute(*replace_luns_cmd, poll=poll) if rc != 0: LOG.error(_LE("Can not place new LUNs %(luns)s in consistency " "group %(cg_name)s."), {'luns': new_ids, 'cg_name': cg_name}) self._raise_cli_error(replace_luns_cmd, rc, out) def delete_consistencygroup(self, cg_name): delete_cg_cmd = ('-np', 'snap', '-group', '-destroy', '-id', cg_name) out, rc = self.command_execute(*delete_cg_cmd) if rc != 0: # Ignore the error if CG doesn't exist if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND): LOG.warning(_LW("CG %(cg_name)s does not exist. " "Message: %(msg)s"), {'cg_name': cg_name, 'msg': out}) elif VNXError.has_error(out, VNXError.CG_IS_DELETING): LOG.warning(_LW("CG %(cg_name)s is deleting. " "Message: %(msg)s"), {'cg_name': cg_name, 'msg': out}) else: self._raise_cli_error(delete_cg_cmd, rc, out) else: LOG.info(_LI('Consistency group %s was deleted ' 'successfully.'), cg_name) def create_cgsnapshot(self, cg_name, snap_name): """Create a cgsnapshot (snap group).""" create_cg_snap_cmd = ('-np', 'snap', '-create', '-res', cg_name, '-resType', 'CG', '-name', snap_name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') out, rc = self.command_execute(*create_cg_snap_cmd) if rc != 0: # Ignore the error if cgsnapshot already exists if VNXError.has_error(out, VNXError.CG_SNAP_NAME_EXISTED): LOG.warning(_LW('Cgsnapshot name %(name)s already ' 'exists. Message: %(msg)s'), {'name': snap_name, 'msg': out}) else: self._raise_cli_error(create_cg_snap_cmd, rc, out) self._wait_for_a_condition(self.check_snapshot, snap_name=snap_name, interval=INTERVAL_30_SEC, ignorable_exception_arbiter=lambda ex: isinstance(ex, exception.EMCVnxCLICmdError)) def check_snapshot(self, snap_name, poll=True): """check if a snapshot/cgsnapshot is existed.""" cmd_get = ('snap', '-list', '-id', snap_name) out, rc = self.command_execute(*cmd_get) if rc == 0: return True else: self._raise_cli_error(cmd_get, rc, out) def delete_cgsnapshot(self, snap_name): """Delete a cgsnapshot (snap group).""" delete_cg_snap_cmd = ('-np', 'snap', '-destroy', '-id', snap_name, '-o') out, rc = self.command_execute(*delete_cg_snap_cmd) if rc != 0: # Ignore the error if cgsnapshot does not exist. if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND): LOG.warning(_LW('Snapshot %(name)s for consistency group ' 'does not exist. Message: %(msg)s'), {'name': snap_name, 'msg': out}) else: self._raise_cli_error(delete_cg_snap_cmd, rc, out) def create_snapshot(self, lun_id, name): if lun_id is not None: command_create_snapshot = ('snap', '-create', '-res', lun_id, '-name', name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') out, rc = self.command_execute(*command_create_snapshot, poll=False) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.SNAP_NAME_EXISTED): LOG.warning(_LW('Snapshot %(name)s already exists. ' 'Message: %(msg)s'), {'name': name, 'msg': out}) else: self._raise_cli_error(command_create_snapshot, rc, out) else: msg = _('Failed to create snapshot as no LUN ID is specified') raise exception.VolumeBackendAPIException(data=msg) def copy_snapshot(self, src_snap_name, new_name): command_copy_snapshot = ('snap', '-copy', '-id', src_snap_name, '-name', new_name, '-ignoreMigrationCheck', '-ignoreDeduplicationCheck') out, rc = self.command_execute(*command_copy_snapshot) if rc != 0: # Ignore the error if the snap already exists if VNXError.has_error(out, VNXError.SNAP_NAME_EXISTED): LOG.warning(_LW('Snapshot %(name)s already exists. ' 'Message: %(msg)s'), {'name': new_name, 'msg': out}) else: self._raise_cli_error(command_copy_snapshot, rc, out) def delete_snapshot(self, name): def delete_snapshot_success(): command_delete_snapshot = ('snap', '-destroy', '-id', name, '-o') out, rc = self.command_execute(*command_delete_snapshot, poll=True) if rc != 0: # Ignore the error that due to retry if rc == 5 and out.find("not exist") >= 0: LOG.warning(_LW("Snapshot %(name)s may deleted already. " "Message: %(msg)s"), {'name': name, 'msg': out}) return True # The snapshot cannot be destroyed because it is # attached to a snapshot mount point. Wait elif VNXError.has_error(out, VNXError.SNAP_NAME_IN_USE): LOG.warning(_LW("Snapshot %(name)s is in use, retry. " "Message: %(msg)s"), {'name': name, 'msg': out}) return False else: self._raise_cli_error(command_delete_snapshot, rc, out) else: LOG.info(_LI('Snapshot %s was deleted successfully.'), name) return True self._wait_for_a_condition(delete_snapshot_success, interval=INTERVAL_30_SEC, timeout=INTERVAL_30_SEC * 3) def create_mount_point(self, primary_lun_name, name): command_create_mount_point = ('lun', '-create', '-type', 'snap', '-primaryLunName', primary_lun_name, '-name', name) out, rc = self.command_execute(*command_create_mount_point, poll=False) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.LUN_EXISTED): LOG.warning(_LW("Mount point %(name)s already exists. " "Message: %(msg)s"), {'name': name, 'msg': out}) else: self._raise_cli_error(command_create_mount_point, rc, out) return rc def allow_snapshot_readwrite_and_autodelete(self, snap_name): modify_cmd = ('snap', '-modify', '-id', snap_name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'yes') out, rc = self.command_execute(*modify_cmd) if rc != 0: self._raise_cli_error(modify_cmd, rc, out) def attach_mount_point(self, name, snapshot_name): command_attach_mount_point = ('lun', '-attach', '-name', name, '-snapName', snapshot_name) out, rc = self.command_execute(*command_attach_mount_point) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.SNAP_ALREADY_MOUNTED): LOG.warning(_LW("Snapshot %(snapname)s is attached to " "snapshot mount point %(mpname)s already. " "Message: %(msg)s"), {'snapname': snapshot_name, 'mpname': name, 'msg': out}) else: self._raise_cli_error(command_attach_mount_point, rc, out) return rc def detach_mount_point(self, smp_name): command_detach_mount_point = ('lun', '-detach', '-name', smp_name, '-o') out, rc = self.command_execute(*command_detach_mount_point) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.SNAP_NOT_ATTACHED): LOG.warning(_LW("The specified Snapshot mount point %s is not " "currently attached."), smp_name) else: self._raise_cli_error(command_detach_mount_point, rc, out) return rc def migrate_lun(self, src_id, dst_id, rate=VNXMigrationRate.HIGH): command_migrate_lun = ('migrate', '-start', '-source', src_id, '-dest', dst_id, '-rate', rate, '-o') # SP HA is not supported by LUN migration out, rc = self.command_execute(*command_migrate_lun, retry_disable=True, poll=True) if 0 != rc: self._raise_cli_error(command_migrate_lun, rc, out) return rc def migrate_lun_without_verification(self, src_id, dst_id, dst_name=None, rate=VNXMigrationRate.HIGH): try: self.migrate_lun(src_id, dst_id, rate) return True except exception.EMCVnxCLICmdError as ex: migration_succeed = False orig_out = "\n".join(ex.kwargs["out"]) if self._is_sp_unavailable_error(orig_out): LOG.warning(_LW("Migration command may get network timeout. " "Double check whether migration in fact " "started successfully. Message: %(msg)s"), {'msg': ex.kwargs["out"]}) command_migrate_list = ('migrate', '-list', '-source', src_id) rc = self.command_execute(*command_migrate_list, poll=True)[1] if rc == 0: migration_succeed = True if not migration_succeed: LOG.warning(_LW("Start migration failed. Message: %s"), ex.kwargs["out"]) if dst_name is not None: LOG.warning(_LW("Delete temp LUN after migration " "start failed. LUN: %s"), dst_name) self.delete_lun(dst_name) return False else: return True def verify_lun_migration(self, src_id): # Set the proper interval to verify the migration status def migration_is_ready(poll=False): mig_ready = False cmd_migrate_list = ('migrate', '-list', '-source', src_id) out, rc = self.command_execute(*cmd_migrate_list, poll=poll) LOG.debug("Migration output: %s", out) if rc == 0: # parse the percentage state = re.search(r'Current State:\s*([^\n]+)', out) percentage = re.search(r'Percent Complete:\s*([^\n]+)', out) percentage_complete = 'N/A' current_state = 'N/A' if state is not None: current_state = state.group(1) percentage_complete = percentage.group(1) else: self._raise_cli_error(cmd_migrate_list, rc, out) if ("FAULTED" in current_state or "STOPPED" in current_state): reason = _("Migration of LUN %s has been stopped or" " faulted.") % src_id raise exception.VolumeBackendAPIException(data=reason) if ("TRANSITIONING" in current_state or "MIGRATING" in current_state): LOG.debug("Migration of LUN %(src_id)s in process " "%(percentage)s %%.", {"src_id": src_id, "percentage": percentage_complete}) else: if VNXError.has_error(out, VNXError.LUN_NOT_MIGRATING): LOG.debug("Migration of LUN %s is finished.", src_id) mig_ready = True else: self._raise_cli_error(cmd_migrate_list, rc, out) return mig_ready def migration_disappeared(poll=False): cmd_migrate_list = ('migrate', '-list', '-source', src_id) out, rc = self.command_execute(*cmd_migrate_list, poll=poll) if rc != 0: if VNXError.has_error(out, VNXError.LUN_NOT_MIGRATING): LOG.debug("Migration of LUN %s is finished.", src_id) return True else: LOG.error(_LE("Failed to query migration status of LUN."), src_id) self._raise_cli_error(cmd_migrate_list, rc, out) return False try: if migration_is_ready(True): return True self._wait_for_a_condition( migration_is_ready, interval=INTERVAL_30_SEC, ignorable_exception_arbiter=lambda ex: type(ex) is not exception.VolumeBackendAPIException) # Migration cancellation for clean up except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Migration of LUN %s failed to complete."), src_id) self.migration_cancel(src_id) self._wait_for_a_condition(migration_disappeared, interval=INTERVAL_30_SEC) return True # Cancel migration in case where status is faulted or stopped def migration_cancel(self, src_id): LOG.info(_LI("Cancelling Migration from LUN %s."), src_id) cmd_migrate_cancel = ('migrate', '-cancel', '-source', src_id, '-o') out, rc = self.command_execute(*cmd_migrate_cancel) if rc != 0: self._raise_cli_error(cmd_migrate_cancel, rc, out) def migrate_lun_with_verification(self, src_id, dst_id, dst_name=None, rate=VNXMigrationRate.HIGH): migration_started = self.migrate_lun_without_verification( src_id, dst_id, dst_name, rate) if not migration_started: return False eventlet.sleep(INTERVAL_30_SEC) return self.verify_lun_migration(src_id) def get_storage_group(self, name, poll=True): # ALU/HLU as key/value map lun_map = {} data = {'storage_group_name': name, 'storage_group_uid': None, 'lunmap': lun_map, 'raw_output': ''} command_get_storage_group = ('storagegroup', '-list', '-gname', name, '-host', '-iscsiAttributes') out, rc = self.command_execute(*command_get_storage_group, poll=poll) if rc != 0: self._raise_cli_error(command_get_storage_group, rc, out) data['raw_output'] = out re_stroage_group_id = 'Storage Group UID:\s*(.*)\s*' m = re.search(re_stroage_group_id, out) if m is not None: data['storage_group_uid'] = m.group(1) re_HLU_ALU_pair = 'HLU\/ALU Pairs:\s*HLU Number' \ '\s*ALU Number\s*[-\s]*(?P(\d+\s*)+)' m = re.search(re_HLU_ALU_pair, out) if m is not None: lun_details = m.group('lun_details').strip() values = re.split('\s*', lun_details) while (len(values) >= 2): key = values.pop() value = values.pop() lun_map[int(key)] = int(value) return data def create_storage_group(self, name): command_create_storage_group = ('storagegroup', '-create', '-gname', name) out, rc = self.command_execute(*command_create_storage_group) if rc != 0: # Ignore the error that due to retry if VNXError.has_error(out, VNXError.SG_NAME_IN_USE): LOG.warning(_LW('Storage group %(name)s already exists. ' 'Message: %(msg)s'), {'name': name, 'msg': out}) else: self._raise_cli_error(command_create_storage_group, rc, out) def delete_storage_group(self, name): command_delete_storage_group = ('storagegroup', '-destroy', '-gname', name, '-o') out, rc = self.command_execute(*command_delete_storage_group) if rc != 0: # Ignore the error that due to retry if rc == 83 and out.find("group name or UID does not " "match any storage groups") >= 0: LOG.warning(_LW("Storage group %(name)s doesn't exist, " "may have already been deleted. " "Message: %(msg)s"), {'name': name, 'msg': out}) else: self._raise_cli_error(command_delete_storage_group, rc, out) def connect_host_to_storage_group(self, hostname, sg_name): command_host_connect = ('storagegroup', '-connecthost', '-host', hostname, '-gname', sg_name, '-o') out, rc = self.command_execute(*command_host_connect) if rc != 0: self._raise_cli_error(command_host_connect, rc, out) def disconnect_host_from_storage_group(self, hostname, sg_name): command_host_disconnect = ('storagegroup', '-disconnecthost', '-host', hostname, '-gname', sg_name, '-o') out, rc = self.command_execute(*command_host_disconnect) if rc != 0: # Ignore the error that due to retry if rc == 116 and \ re.search("host is not.*connected to.*storage group", out) is not None: LOG.warning(_LW("Host %(host)s has already disconnected from " "storage group %(sgname)s. Message: %(msg)s"), {'host': hostname, 'sgname': sg_name, 'msg': out}) else: self._raise_cli_error(command_host_disconnect, rc, out) def add_hlu_to_storage_group(self, hlu, alu, sg_name): """Adds a lun into storage group as specified hlu number. Return True if the hlu is as specified, otherwise False. """ command_add_hlu = ('storagegroup', '-addhlu', '-hlu', hlu, '-alu', alu, '-gname', sg_name, '-o') out, rc = self.command_execute(*command_add_hlu, poll=False) if rc != 0: # Do not need to consider the retry for add hlu # Retry is handled in the caller self._raise_cli_error(command_add_hlu, rc, out) return True def remove_hlu_from_storagegroup(self, hlu, sg_name, poll=False): command_remove_hlu = ('storagegroup', '-removehlu', '-hlu', hlu, '-gname', sg_name, '-o') out, rc = self.command_execute(*command_remove_hlu, poll=poll) if rc != 0: # Ignore the error that due to retry if rc == 66 and\ out.find("No such Host LUN in this Storage Group") >= 0: LOG.warning(_LW("HLU %(hlu)s has already been removed from " "%(sgname)s. Message: %(msg)s"), {'hlu': hlu, 'sgname': sg_name, 'msg': out}) else: self._raise_cli_error(command_remove_hlu, rc, out) def get_lun_by_name(self, name, properties=VNXLunProperties.lun_all, poll=True): data = self.get_lun_properties(('-name', name), properties, poll=poll) return data def get_lun_by_id(self, lunid, properties=VNXLunProperties.lun_all, poll=True): data = self.get_lun_properties(('-l', lunid), properties, poll=poll) return data def get_lun_current_ops_state(self, name, poll=False): data = self.get_lun_by_name(name, poll=poll) return data[VNXLunProperties.LUN_OPERATION.key] def wait_until_lun_ready_for_ops(self, name): def is_lun_ready_for_ops(): data = self.get_lun_current_ops_state(name, False) return data == 'None' # Get the volume's latest operation state by polling. # Otherwise, the operation state may be out of date. ops = self.get_lun_current_ops_state(name, True) if ops != 'None': self._wait_for_a_condition(is_lun_ready_for_ops) def get_pool(self, name, properties=VNXPoolProperties.pool_all, poll=True): data = self.get_pool_properties(('-name', name), properties=properties, poll=poll) return data def get_pool_properties(self, filter_option, properties=VNXPoolProperties.pool_all, poll=True): module_list = ('storagepool', '-list') data = self._get_obj_properties( module_list, filter_option, base_properties=(VNXPoolProperties.POOL_NAME,), adv_properties=tuple(properties), poll=poll) return data def get_lun_properties(self, filter_option, properties=VNXLunProperties.lun_all, poll=True): module_list = ('lun', '-list') data = self._get_obj_properties( module_list, filter_option, base_properties=(VNXLunProperties.LUN_NAME, VNXLunProperties.LUN_ID), adv_properties=tuple(properties), poll=poll) return data def get_pool_feature_properties( self, properties=VNXPoolFeatureProperties.default, poll=True): module_list = ("storagepool", '-feature', '-info') data = self._get_obj_properties( module_list, tuple(), base_properties=(), adv_properties=tuple(properties), poll=poll) return data def _get_obj_properties(self, module_list, filter_option, base_properties=tuple(), adv_properties=tuple(), poll=True): # to do instance check command_get = module_list + filter_option options = [] for prop in adv_properties: option = prop.option if option and option not in options: options.append(option) command_get += tuple(options) out, rc = self.command_execute(*command_get, poll=poll) if rc != 0: self._raise_cli_error(command_get, rc, out) data = VNXCliParser.parse(out, list(base_properties) + list(adv_properties)) LOG.debug('Return Object properties. Data: %s', data) return data def _get_property_value(self, out, propertyDescriptor): label = propertyDescriptor.label m = re.search(label, out) if m: if (propertyDescriptor.converter is not None): try: converter = propertyDescriptor.converter if isinstance(converter, staticmethod): converter = converter.__func__ return converter(m.group(1)) except ValueError: LOG.error(_LE("Invalid value for %(key)s, " "value is %(value)s."), {'key': propertyDescriptor.key, 'value': m.group(1)}) return None else: return m.group(1) else: LOG.debug('%s value is not found in the output.', propertyDescriptor.label) return None def check_lun_has_snap(self, lun_id): cmd = ('snap', '-list', '-res', lun_id) rc = self.command_execute(*cmd, poll=False)[1] if rc == 0: LOG.debug("Found snapshots for %s.", lun_id) return True else: return False def get_pool_list(self, properties=VNXPoolProperties.pool_all, poll=True): temp_cache = [] list_cmd = ('storagepool', '-list') for prop in properties: list_cmd += (prop.option,) output_properties = [VNXPoolProperties.POOL_NAME] + properties out, rc = self.command_execute(*list_cmd, poll=poll) if rc != 0: self._raise_cli_error(list_cmd, rc, out) try: for pool in out.strip().split('\n\n'): pool_data = VNXPoolProperties.parse( pool, output_properties) temp_cache.append(pool_data) except Exception as ex: LOG.error(_LE("Error happened during storage pool querying, %s."), ex) # NOTE: Do not want to continue raise the exception # as the pools may be temporarily unavailable pass return temp_cache def get_array_serial(self, poll=False): """return array Serial No for pool backend.""" data = {'array_serial': 'unknown'} command_get_array_serial = ('getagent', '-serial') # Set the property timeout to get array serial out, rc = self.command_execute(*command_get_array_serial, poll=poll) if 0 == rc: m = re.search(r'Serial No:\s+(\w+)', out) if m: data['array_serial'] = m.group(1) else: LOG.warning(_LW("No array serial number returned, " "set as unknown.")) else: self._raise_cli_error(command_get_array_serial, rc, out) return data def get_status_up_ports(self, storage_group_name, io_ports=None, poll=True): """Function to get ports whose status are up.""" cmd_get_hba = ('storagegroup', '-list', '-gname', storage_group_name) out, rc = self.command_execute(*cmd_get_hba, poll=poll) wwns = [] if 0 == rc: _re_hba_sp_pair = re.compile('((\w\w:){15}(\w\w)\s*' + '(SP\s[A-B]){1}\s*(\d*)\s*\n)') _all_hba_sp_pairs = re.findall(_re_hba_sp_pair, out) sps = [each[3] for each in _all_hba_sp_pairs] portid = [each[4] for each in _all_hba_sp_pairs] cmd_get_port = ('port', '-list', '-sp') out, rc = self.command_execute(*cmd_get_port) if 0 != rc: self._raise_cli_error(cmd_get_port, rc, out) for i, sp in enumerate(sps): if io_ports: # Skip ports which are not in io_ports if (sp.split()[1], int(portid[i])) not in io_ports: continue wwn = self.get_port_wwn(sp, portid[i], out) if (wwn is not None) and (wwn not in wwns): LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.', {'wwn': wwn, 'sg': storage_group_name}) wwns.append(wwn) elif 83 == rc: LOG.warning(_LW("Storage Group %s is not found."), storage_group_name) else: self._raise_cli_error(cmd_get_hba, rc, out) return wwns def get_login_ports(self, storage_group_name, connector_wwpns, io_ports=None): cmd_list_hba = ('port', '-list', '-gname', storage_group_name) out, rc = self.command_execute(*cmd_list_hba) ports = [] wwns = [] connector_hba_list = [] if 0 == rc and out.find('Information about each HBA:') != -1: hba_list = out.split('Information about each SPPORT:')[0].split( 'Information about each HBA:')[1:] allports = out.split('Information about each SPPORT:')[1] hba_uid_pat = re.compile('HBA\sUID:\s*((\w\w:){15}(\w\w))') for each in hba_list: obj_search = re.search(hba_uid_pat, each) if obj_search and obj_search.group(1). \ replace(':', '')[16:].lower() in connector_wwpns: connector_hba_list.append(each) port_pat = re.compile('SP Name:\s*(SP\s\w)\n\s*' + 'SP Port ID:\s*(\w*)\n\s*' + 'HBA Devicename:.*\n\s*' + 'Trusted:.*\n\s*' + 'Logged In:\s*YES\n') for each in connector_hba_list: ports.extend(re.findall(port_pat, each)) ports = list(set(ports)) if io_ports: ports = filter(lambda po: (po[0].split()[1], int(po[1])) in io_ports, ports) for each in ports: wwn = self.get_port_wwn(each[0], each[1], allports) if wwn: wwns.append(wwn) else: self._raise_cli_error(cmd_list_hba, rc, out) return wwns def get_port_wwn(self, sp, port_id, allports=None): """Returns wwn via sp and port_id :param sp: should be in this format 'SP A' :param port_id: '0' or 0 """ wwn = None if allports is None: allports, rc = self.get_port_output() _re_port_wwn = re.compile('SP Name:\s*' + sp + '\nSP Port ID:\s*' + str(port_id) + '\nSP UID:\s*((\w\w:){15}(\w\w))' + '\nLink Status: Up' + '\nPort Status: Online') _obj_search = re.search(_re_port_wwn, allports) if _obj_search is not None: wwn = _obj_search.group(1).replace(':', '')[16:] return wwn def get_fc_targets(self): out, rc = self.get_port_output() fc_target_dict = {'A': [], 'B': []} _fcport_pat = (r'SP Name: SP\s(\w)\s*' r'SP Port ID:\s*(\w*)\n' r'SP UID:\s*((\w\w:){15}(\w\w))\s*' r'Link Status: Up\n' r'Port Status: Online\n') for m in re.finditer(_fcport_pat, out): sp = m.groups()[0] sp_port_id = m.groups()[1] fc_target_dict[sp].append({'SP': sp, 'Port ID': sp_port_id}) return fc_target_dict def get_port_output(self): cmd_get_port = ('port', '-list', '-sp') out, rc = self.command_execute(*cmd_get_port) if 0 != rc: self._raise_cli_error(cmd_get_port, rc, out) return out, rc def get_connection_getport_output(self): connection_getport_cmd = ('connection', '-getport', '-vlanid') out, rc = self.command_execute(*connection_getport_cmd) if 0 != rc: self._raise_cli_error(connection_getport_cmd, rc, out) return out, rc def _filter_iscsi_ports(self, all_ports, io_ports): """Filter ports in white list from all iSCSI ports.""" new_iscsi_ports = {'A': [], 'B': []} valid_ports = [] for sp in all_ports: for port in all_ports[sp]: port_tuple = (port['SP'], port['Port ID'], port['Virtual Port ID']) if port_tuple in io_ports: new_iscsi_ports[sp].append(port) valid_ports.append(port_tuple) if len(io_ports) != len(valid_ports): invalid_port_set = set(io_ports) - set(valid_ports) for invalid in invalid_port_set: LOG.warning(_LW('Invalid iSCSI port %(sp)s-%(port)s-%(vlan)s ' 'found in io_port_list, will be ignored.'), {'sp': invalid[0], 'port': invalid[1], 'vlan': invalid[2]}) return new_iscsi_ports def get_iscsi_targets(self, poll=False, io_ports=None): cmd_getport = ('connection', '-getport', '-address', '-vlanid') out, rc = self.command_execute(*cmd_getport, poll=poll) if rc != 0: self._raise_cli_error(cmd_getport, rc, out) iscsi_target_dict = {'A': [], 'B': []} iscsi_spport_pat = r'(A|B)\s*' + \ r'Port ID:\s+(\d+)\s*' + \ r'Port WWN:\s+(iqn\S+)' iscsi_vport_pat = r'Virtual Port ID:\s+(\d+)\s*' + \ r'VLAN ID:\s*\S*\s*' + \ r'IP Address:\s+(\S+)' for spport_content in re.split(r'^SP:\s+|\nSP:\s*', out): m_spport = re.match(iscsi_spport_pat, spport_content, flags=re.IGNORECASE) if not m_spport: continue sp = m_spport.group(1) port_id = int(m_spport.group(2)) iqn = m_spport.group(3) for m_vport in re.finditer(iscsi_vport_pat, spport_content): vport_id = int(m_vport.group(1)) ip_addr = m_vport.group(2) if ip_addr.find('N/A') != -1: LOG.debug("Skip port without IP Address: %s", m_spport.group(0) + m_vport.group(0)) continue iscsi_target_dict[sp].append({'SP': sp, 'Port ID': port_id, 'Port WWN': iqn, 'Virtual Port ID': vport_id, 'IP Address': ip_addr}) if io_ports: return self._filter_iscsi_ports(iscsi_target_dict, io_ports) return iscsi_target_dict def get_registered_spport_set(self, initiator_iqn, sgname, sg_raw_out): spport_set = set() for m_spport in re.finditer( r'\n\s+%s\s+SP\s.*\n.*\n\s*SPPort:\s+(A|B)-(\d+)v(\d+)\s*\n' % initiator_iqn, sg_raw_out, flags=re.IGNORECASE): spport_set.add((m_spport.group(1), int(m_spport.group(2)), int(m_spport.group(3)))) LOG.debug('See path %(path)s in %(sg)s.', {'path': spport_set, 'sg': sgname}) return spport_set def ping_node(self, target_portal, initiator_ip): connection_pingnode = ('connection', '-pingnode', '-sp', target_portal['SP'], '-portid', target_portal['Port ID'], '-vportid', target_portal['Virtual Port ID'], '-address', initiator_ip, '-count', '1') out, rc = self.command_execute(*connection_pingnode) if rc == 0: ping_ok = re.compile(r'Reply from %s' % initiator_ip) if re.match(ping_ok, out) is not None: LOG.debug("See available iSCSI target: %s", connection_pingnode) return True LOG.warning(_LW("See unavailable iSCSI target: %s"), connection_pingnode) return False def find_available_iscsi_targets(self, hostname, registered_spport_set, all_iscsi_targets): """Finds available iscsi targets for a host. When the iscsi_initiator_map is configured, the driver will find an accessible portal and put it as the first portal in the portal list to ensure the accessible portal will be used when multipath is not used. All the registered portals will be returned for Nova to clean up all the unused devices related to this LUN created by logging into these portals during attaching other LUNs on VNX. """ if self.iscsi_initiator_map and hostname in self.iscsi_initiator_map: iscsi_initiator_ips = list(self.iscsi_initiator_map[hostname]) random.shuffle(iscsi_initiator_ips) else: iscsi_initiator_ips = None target_portals = [] all_portals = all_iscsi_targets['A'] + all_iscsi_targets['B'] random.shuffle(all_portals) for portal in all_portals: spport = (portal['SP'], portal['Port ID'], portal['Virtual Port ID']) if spport not in registered_spport_set: LOG.debug( "Skip SP Port %(port)s since " "no path from %(host)s is through it.", {'port': spport, 'host': hostname}) continue target_portals.append(portal) main_portal_index = None if iscsi_initiator_ips: for i, portal in enumerate(target_portals): for initiator_ip in iscsi_initiator_ips: if self.ping_node(portal, initiator_ip): main_portal_index = i break else: # Else for the for loop. If there is no main portal found, # continue to try next initiator IP. continue break if main_portal_index is not None: target_portals.insert(0, target_portals.pop(main_portal_index)) return target_portals def _is_sp_unavailable_error(self, out): error_pattern = ('(^Error.*Message.*End of data stream.*)|' '(.*Message.*connection refused.*)|' '(^Error.*Message.*Service Unavailable.*)|' '(^A network error occurred while trying to' ' connect.* )|' '(^Exception: Error occurred because of time out\s*)') pattern = re.compile(error_pattern) return pattern.match(out) @utils.retry(exception.EMCSPUnavailableException, retries=5, interval=30, backoff_rate=1) def command_execute(self, *command, **kwargs): """Executes command against the VNX array. When there is named parameter poll=False, the command will be sent alone with option -np. """ # NOTE: retry_disable need to be removed from kwargs # before it pass to utils.execute, otherwise exception will thrown retry_disable = kwargs.pop('retry_disable', False) # get active ip before execute command current_ip = self.active_storage_ip out, rc = self._command_execute_on_active_ip(*command, **kwargs) if not retry_disable and self._is_sp_unavailable_error(out): # When active sp is unavailable, switch to another sp # and set it to active and force a poll if self._toggle_sp(current_ip): LOG.debug('EMC: Command Exception: %(rc)s %(result)s. ' 'Retry on another SP.', {'rc': rc, 'result': out}) # Raise exception for retry raise exception.EMCSPUnavailableException( cmd=command, rc=rc, out=out.split('\n')) return out, rc def _command_execute_on_active_ip(self, *command, **kwargs): if "check_exit_code" not in kwargs: kwargs["check_exit_code"] = True rc = 0 out = "" need_poll = kwargs.pop('poll', True) if "-np" not in command and not need_poll: command = ("-np",) + command try: active_ip = (self.active_storage_ip,) out, err = utils.execute( *(self.command + active_ip + self.credentials + command), **kwargs) except processutils.ProcessExecutionError as pe: rc = pe.exit_code out = pe.stdout out = out.replace('\n', '\\n') LOG.debug('EMC: Command: %(command)s. Result: %(result)s.', {'command': self.command + active_ip + command, 'result': out.replace('\n', '\\n')}) return out, rc def _toggle_sp(self, current_ip): """Toggle the storage IP. :param current_ip: active ip before toggle :returns True or False: if toggle happens, return True, otherwise False """ @lockutils.synchronized( 'vnx-toggle-' + self.toggle_lock_name, 'vnx-toggle-', True) def inner(): if self.secondary_storage_ip is None: return False self.active_storage_ip = ( self.secondary_storage_ip if current_ip == self.primary_storage_ip else self.primary_storage_ip) LOG.info(_LI('Toggle san_ip from %(current)s to ' '%(new)s.'), {'current': current_ip, 'new': self.active_storage_ip}) return True return inner() def get_enablers_on_array(self, poll=False): """The function would get all the enablers installed on array.""" enablers = [] cmd_list = ('ndu', '-list') out, rc = self.command_execute(*cmd_list, poll=poll) if rc != 0: self._raise_cli_error(cmd_list, rc, out) else: enabler_pat = r'Name of the software package:\s*(\S+)\s*' for m in re.finditer(enabler_pat, out): enablers.append(m.groups()[0]) LOG.debug('Enablers on array %s.', enablers) return enablers def enable_or_disable_compression_on_lun(self, volumename, compression): """The function will enable or disable the compression on lun.""" lun_data = self.get_lun_by_name(volumename) command_compression_cmd = ('compression', '-' + compression, '-l', lun_data['lun_id'], '-ignoreThresholds', '-o') out, rc = self.command_execute(*command_compression_cmd) if 0 != rc: self._raise_cli_error(command_compression_cmd, rc, out) return rc, out def deregister_initiator(self, initiator_uid): """This function tries to deregister initiators on VNX.""" command_deregister = ('port', '-removeHBA', '-hbauid', initiator_uid, '-o') out, rc = self.command_execute(*command_deregister) return rc, out @decorate_all_methods(log_enter_exit) class EMCVnxCliBase(object): """This class defines the functions to use the native CLI functionality.""" VERSION = '07.00.00' stats = {'driver_version': VERSION, 'storage_protocol': None, 'vendor_name': 'EMC', 'volume_backend_name': None, 'compression_support': 'False', 'fast_support': 'False', 'deduplication_support': 'False', 'thin_provisioning_support': False, 'thick_provisioning_support': True} REPLICATION_KEYS = ['san_ip', 'san_login', 'san_password', 'san_secondary_ip', 'storage_vnx_authentication_type', 'storage_vnx_security_file_dir'] enablers = [] tmp_snap_prefix = 'tmp-snap-' tmp_smp_for_backup_prefix = 'tmp-smp-' snap_as_vol_prefix = 'snap-as-vol-' def __init__(self, prtcl, configuration=None, active_backend_id=None): self.protocol = prtcl self.configuration = configuration self.max_luns_per_sg = self.configuration.max_luns_per_storage_group self.destroy_empty_sg = self.configuration.destroy_empty_storage_group self.itor_auto_reg = self.configuration.initiator_auto_registration self.itor_auto_dereg = self.configuration.initiator_auto_deregistration self.check_max_pool_luns_threshold = ( self.configuration.check_max_pool_luns_threshold) # if zoning_mode is fabric, use lookup service to build itor_tgt_map self.zonemanager_lookup_service = None zm_conf = config.Configuration(manager.volume_manager_opts) if (zm_conf.safe_get('zoning_mode') == 'fabric' or self.configuration.safe_get('zoning_mode') == 'fabric'): from cinder.zonemanager import fc_san_lookup_service as fc_service self.zonemanager_lookup_service = \ fc_service.FCSanLookupService(configuration=configuration) self.max_retries = 5 if self.destroy_empty_sg: LOG.warning(_LW("destroy_empty_storage_group: True. " "Empty storage group will be deleted " "after volume is detached.")) if not self.itor_auto_reg: LOG.info(_LI("initiator_auto_registration: False. " "Initiator auto registration is not enabled. " "Please register initiator manually.")) self.hlu_set = set(range(1, self.max_luns_per_sg + 1)) self._client = self._build_client(active_backend_id) self._active_backend_id = active_backend_id # Create connection to the secondary storage device self._mirror = self._build_mirror_view() self.update_enabler_in_volume_stats() # Fail the driver if configuration is not correct if self._mirror: if '-MirrorView/S' not in self.enablers: no_enabler_err = _('MirrorView/S enabler is not installed.') raise exception.VolumeBackendAPIException(data=no_enabler_err) else: self._mirror = None conf_pools = self.configuration.safe_get("storage_vnx_pool_names") self.storage_pools = self._get_managed_storage_pools(conf_pools) self.array_serial = None self.io_ports = self._parse_ports(self.configuration.io_port_list, self.protocol) if self.protocol == 'iSCSI': self.iscsi_targets = self._client.get_iscsi_targets( poll=True, io_ports=self.io_ports) self.hlu_cache = {} self.force_delete_lun_in_sg = ( self.configuration.force_delete_lun_in_storagegroup) if self.force_delete_lun_in_sg: LOG.warning(_LW("force_delete_lun_in_storagegroup=True")) self.max_over_subscription_ratio = ( self.configuration.max_over_subscription_ratio) self.ignore_pool_full_threshold = ( self.configuration.ignore_pool_full_threshold) if self.ignore_pool_full_threshold: LOG.warning(_LW("ignore_pool_full_threshold: True. " "LUN creation will still be forced " "even if the pool full threshold is exceeded.")) self.reserved_percentage = self.configuration.reserved_percentage def _get_managed_storage_pools(self, pools): storage_pools = set() if pools: storage_pools = set([po.strip() for po in pools.split(",")]) array_pools = self._client.get_pool_list( [VNXPoolProperties.POOL_STATE], False) array_pools = set([po['pool_name'] for po in array_pools]) un_exist_pools = storage_pools.difference(array_pools) storage_pools.difference_update(un_exist_pools) if not storage_pools: msg = _("All the specified storage pools to be managed " "do not exist. Please check your configuration. " "Non-existent pools: %s") % ",".join(un_exist_pools) raise exception.VolumeBackendAPIException(data=msg) if un_exist_pools: LOG.warning(_LW("The following specified storage pools " "do not exist: %(unexist)s. " "This host will only manage the storage " "pools: %(exist)s"), {'unexist': ",".join(un_exist_pools), 'exist': ",".join(storage_pools)}) else: LOG.debug("This host will manage the storage pools: %s.", ",".join(storage_pools)) else: LOG.debug("No storage pool is configured. This host will " "manage all the pools on the VNX system.") return storage_pools def _parse_ports(self, io_port_list, protocol): """Validates IO port format, supported format is a-1, b-3, a-3-0.""" if not io_port_list or io_port_list == '*': return None ports = re.split('\s*,\s*', io_port_list) valid_ports = [] invalid_ports = [] if 'iSCSI' == protocol: out, rc = self._client.get_connection_getport_output() for port in ports: port_tuple = port.split('-') if (re.match('[abAB]-\d+-\d+$', port) and self._validate_iscsi_port( port_tuple[0], port_tuple[1], port_tuple[2], out)): valid_ports.append( (port_tuple[0].upper(), int(port_tuple[1]), int(port_tuple[2]))) else: invalid_ports.append(port) elif 'FC' == protocol: out, rc = self._client.get_port_output() for port in ports: port_tuple = port.split('-') if re.match('[abAB]-\d+$', port) and self._validate_fc_port( port_tuple[0], port_tuple[1], out): valid_ports.append( (port_tuple[0].upper(), int(port_tuple[1]))) else: invalid_ports.append(port) if len(invalid_ports) > 0: msg = _('Invalid %(protocol)s ports %(port)s specified ' 'for io_port_list.') % {'protocol': self.protocol, 'port': ','.join(invalid_ports)} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return valid_ports def _validate_iscsi_port(self, sp, port_id, vport_id, cmd_output): """Validates whether the iSCSI port is existed on VNX.""" sp_port_pattern = (r'SP:\s+%(sp)s\nPort ID:\s+%(port_id)s\n' % {'sp': sp.upper(), 'port_id': port_id}) sp_port_fields = re.split(sp_port_pattern, cmd_output) if len(sp_port_fields) < 2: return False sp_port_info = re.split('SP:\s+(A|B)', sp_port_fields[1])[0] vport_pattern = '\nVirtual Port ID:\s+%s\nVLAN ID:' % vport_id return re.search(vport_pattern, sp_port_info) is not None def _validate_fc_port(self, sp, port_id, cmd_output): """Validates whether the FC port is existed on VNX""" fc_pattern = ('SP Name:\s*SP\s*' + sp.upper() + '\nSP Port ID:\s*' + str(port_id) + '\nSP UID:\s*((\w\w:){15}(\w\w))') return re.search(fc_pattern, cmd_output) def get_array_serial(self): if not self.array_serial: self.array_serial = self._client.get_array_serial() return self.array_serial['array_serial'] def _construct_store_spec(self, volume, snapshot): if snapshot['cgsnapshot_id']: # Snapshot is part of cg snapshot snapshot_name = snapshot['cgsnapshot_id'] else: snapshot_name = snapshot['name'] new_snap_name = snapshot_name if self._is_snapcopy_enabled(volume): new_snap_name = self._construct_snap_name(volume) pool_name = self.get_target_storagepool(volume, snapshot['volume']) volume_name = volume['name'] volume_size = snapshot['volume_size'] dest_volume_name = volume_name + '_dest' specs = self.get_volumetype_extraspecs(volume) provisioning, tiering = self._get_extra_spec_value(specs) store_spec = { 'volume': volume, 'src_snap_name': snapshot_name, 'new_snap_name': new_snap_name, 'dest_vol_name': dest_volume_name, 'pool_name': pool_name, 'provisioning': provisioning, 'tiering': tiering, 'volume_size': volume_size, 'client': self._client, 'ignore_pool_full_threshold': self.ignore_pool_full_threshold } return store_spec def _construct_snap_name(self, volume): """Returns snapshot or cgsnapshot name.""" if self._is_snapcopy_enabled(volume): return self.snap_as_vol_prefix + six.text_type(volume['name_id']) else: return self.tmp_snap_prefix + six.text_type(volume['name_id']) def _construct_tmp_smp_name(self, snapshot): return self.tmp_smp_for_backup_prefix + snapshot.id def _construct_mirror_name(self, volume): return 'mirror_' + volume.id def create_volume(self, volume): """Creates a EMC volume.""" volume_size = volume['size'] volume_name = volume['name'] self._volume_creation_check(volume) volume_metadata = self._get_volume_metadata(volume) # defining CLI command specs = self.get_volumetype_extraspecs(volume) pool = self.get_target_storagepool(volume) provisioning, tiering = self._get_extra_spec_value(specs) if 'snapcopy' in volume_metadata: # We ignore snapcopy metadata when creating volume LOG.warning(_LW('snapcopy metadata is ignored when' ' creating volume.')) volume_metadata['snapcopy'] = 'False' LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s ' 'pool: %(pool)s ' 'provisioning: %(provisioning)s ' 'tiering: %(tiering)s '), {'volume': volume_name, 'size': volume_size, 'pool': pool, 'provisioning': provisioning, 'tiering': tiering}) data = self._client.create_lun_with_advance_feature( pool, volume_name, volume_size, provisioning, tiering, volume['consistencygroup_id'], ignore_thresholds=self.ignore_pool_full_threshold, poll=False) pl = self._build_provider_location(lun_id=data['lun_id'], base_lun_name=volume['name']) # Setup LUN Replication/MirrorView between devices, # secondary LUN will inherit properties from primary LUN. rep_update, metadata_update = self.setup_lun_replication( volume, data['lun_id'], provisioning, tiering) volume_metadata.update(metadata_update) model_update = {'provider_location': pl, 'metadata': volume_metadata} model_update.update(rep_update) return model_update def _volume_creation_check(self, volume): """Checks on extra spec before the volume can be created.""" specs = self.get_volumetype_extraspecs(volume) self._get_and_validate_extra_specs(specs) def _get_migration_rate(self, volume): metadata = self._get_volume_metadata(volume) rate = metadata.get('migrate_rate', VNXMigrationRate.HIGH) if rate: if rate.lower() in VNXMigrationRate.get_all(): return rate.lower() else: LOG.warning(_LW('Unknown migration rate specified, ' 'using [high] as migration rate.')) return VNXMigrationRate.HIGH def _get_and_validate_extra_specs(self, specs): """Checks on extra specs combinations.""" if "storagetype:pool" in specs: LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted " "since driver version 5.1.0. This key will be " "ignored.")) provisioning, tiering = self._get_extra_spec_value(specs) # step 1: check extra spec value if provisioning: self._check_extra_spec_value( provisioning, VNXProvisionEnum.get_all()) if tiering: self._check_extra_spec_value( tiering, VNXTieringEnum.get_all()) # step 2: check extra spec combination self._check_extra_spec_combination([provisioning, tiering]) return provisioning, tiering def _check_extra_spec_value(self, extra_spec, valid_values): """Checks whether an extra spec's value is valid.""" if not extra_spec or not valid_values: LOG.error(_LE('The given extra_spec or valid_values is None.')) elif extra_spec not in valid_values: msg = _("The extra_spec: %s is invalid.") % extra_spec LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return def _get_extra_spec_value(self, extra_specs): """Gets EMC extra spec values.""" provisioning = 'thick' if self._client.provisioning_specs[0] in extra_specs: provisioning = ( extra_specs[self._client.provisioning_specs[0]].lower()) if self._client.provisioning_specs[1] in extra_specs: LOG.warning(_LW("Both 'storagetype:prvosioning' and " "'provisioning:type' are set in the " "extra specs, the value of " "'provisioning:type' will be used. The " "key 'storagetype:provisioning' may be " "deprecated in the next release.")) elif self._client.provisioning_specs[1] in extra_specs: provisioning = ( extra_specs[self._client.provisioning_specs[1]].lower()) LOG.warning(_LW("Extra spec key 'storagetype:provisioning' may " "be deprecated in the next release. It is " "recommended to use extra spec key " "'provisioning:type' instead.")) tiering = extra_specs.get( self._client.tiering_spec, 'None').lower() return provisioning, tiering def _check_extra_spec_combination(self, spec_values): """Checks whether extra spec combination is valid.""" enablers = self.enablers # check provisioning, tiering # deduplicated and tiering can not be both enabled provisioning, tiering = spec_values if provisioning == 'deduplicated' and tiering != 'none': msg = _("deduplicated and auto tiering can't be both enabled.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif provisioning == 'compressed' and '-Compression' not in enablers: msg = _("Compression Enabler is not installed. " "Can not create compressed volume.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif provisioning == 'deduplicated' and \ '-Deduplication' not in enablers: msg = _("Deduplication Enabler is not installed." " Can not create deduplicated volume") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif provisioning in ['thin', 'deduplicated', 'compressed'] and \ '-ThinProvisioning' not in enablers: msg = _("ThinProvisioning Enabler is not installed. " "Can not create thin volume") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif tiering != 'none' and '-FAST' not in enablers: msg = _("FAST VP Enabler is not installed. " "Can't set tiering policy for the volume") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return def delete_volume(self, volume, force_delete=False): """Deletes an EMC volume.""" if self._is_replication_enabled(volume): self.cleanup_lun_replication(volume) try: self._client.delete_lun(volume['name']) except exception.EMCVnxCLICmdError as ex: orig_out = "\n".join(ex.kwargs["out"]) if ((force_delete or self.force_delete_lun_in_sg) and VNXError.has_error(orig_out, VNXError.LUN_IN_SG)): LOG.warning(_LW('LUN corresponding to %s is still ' 'in some Storage Groups.' 'Try to bring the LUN out of Storage Groups ' 'and retry the deletion.'), volume['name']) lun_id = self.get_lun_id(volume) for hlu, sg in self._client.get_hlus(lun_id): self._client.remove_hlu_from_storagegroup(hlu, sg) self._client.delete_lun(volume['name']) else: with excutils.save_and_reraise_exception(): # Reraise the original exception pass if volume['provider_location']: lun_type = self._extract_provider_location( volume['provider_location'], 'type') if lun_type == 'smp': self._client.delete_snapshot( self._construct_snap_name(volume)) def extend_volume(self, volume, new_size): """Extends an EMC volume.""" try: self._client.expand_lun_and_wait(volume['name'], new_size) except exception.EMCVnxCLICmdError as ex: with excutils.save_and_reraise_exception(ex) as ctxt: out = "\n".join(ex.kwargs["out"]) if VNXError.has_error(out, VNXError.LUN_IS_PREPARING): # The error means the operation cannot be performed # because the LUN is 'Preparing'. Wait for a while # so that the LUN may get out of the transitioning # state. LOG.warning(_LW("LUN %(name)s is not ready for extension: " "%(out)s"), {'name': volume['name'], 'out': out}) self._client.wait_until_lun_ready_for_ops(volume['name']) self._client.expand_lun_and_wait(volume['name'], new_size) ctxt.reraise = False else: ctxt.reraise = True def _get_original_status(self, volume): if not volume['volume_attachment']: return 'available' else: return 'in-use' def _is_valid_for_storage_assisted_migration( self, volume, host, new_type=None): """Check the src and dest volume to decide the migration type.""" false_ret = (False, None) if 'location_info' not in host['capabilities']: LOG.warning(_LW("Failed to get target_pool_name and " "target_array_serial. 'location_info' " "is not in host['capabilities'].")) return false_ret # mandatory info should be ok info = host['capabilities']['location_info'] LOG.debug("Host for migration is %s.", info) try: info_detail = info.split('|') target_pool_name = info_detail[0] target_array_serial = info_detail[1] except AttributeError: LOG.warning(_LW("Error on parsing target_pool_name/" "target_array_serial.")) return false_ret # source and destination should be on same array array_serial = self.get_array_serial() if target_array_serial != array_serial: LOG.debug('Skip storage-assisted migration because ' 'target and source backend are not managing' 'the same array.') return false_ret if len(target_pool_name) == 0: # Destination host is using a legacy driver LOG.warning(_LW("Didn't get the pool information of the " "host %s. Storage assisted Migration is not " "supported. The host may be using a legacy " "driver."), host['name']) return false_ret # Same protocol should be used if volume is in-use if host['capabilities']['storage_protocol'] != self.protocol \ and self._get_original_status(volume) == 'in-use': LOG.debug('Skip storage-assisted migration because ' 'in-use volume can not be ' 'migrate between different protocols.') return false_ret return (True, target_pool_name) def migrate_volume(self, ctxt, volume, host, new_type=None): """Leverage the VNX on-array migration functionality. This method is invoked at the source backend. """ false_ret = (False, None) is_valid, target_pool_name = \ self._is_valid_for_storage_assisted_migration( volume, host, new_type) if not is_valid: return false_ret specs = None if new_type is not None: specs = new_type.get('extra_specs') return self._migrate_volume(volume, target_pool_name, specs) def _migrate_volume(self, volume, target_pool_name, type_specs=None, src_id=None): LOG.debug("Starting real storage-assisted migration...") # first create a new volume with same name and size of source volume volume_name = volume['name'] new_volume_name = "%(src)s-%(ts)s" % {'src': volume_name, 'ts': int(time.time())} if src_id is None: src_id = self.get_lun_id(volume) if type_specs is not None: provisioning, tiering = self._get_extra_spec_value( type_specs) else: provisioning, tiering = self._get_extra_spec_value( self.get_volumetype_extraspecs(volume)) data = self._client.create_lun_with_advance_feature( target_pool_name, new_volume_name, volume['size'], provisioning, tiering, ignore_thresholds=self.ignore_pool_full_threshold) dst_id = data['lun_id'] moved = self._client.migrate_lun_with_verification( src_id, dst_id, new_volume_name, rate=self._get_migration_rate(volume)) lun_type = self._extract_provider_location( volume['provider_location'], 'type') # A smp will become a LUN after migration if lun_type == 'smp': self._client.delete_snapshot( self._construct_snap_name(volume)) pl = self._build_provider_location(src_id, 'lun', base_lun_name=volume['name']) volume_metadata = self._get_volume_metadata(volume) volume_metadata['snapcopy'] = 'False' model_update = {'provider_location': pl, 'metadata': volume_metadata} return moved, model_update def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Updates metadata after host-assisted migration.""" lun_type = self._extract_provider_location( new_volume['provider_location'], 'type') volume_metadata = self._get_volume_metadata(volume) model_update = {'provider_location': new_volume['provider_location']} if lun_type: volume_metadata['snapcopy'] = ( 'True' if lun_type == 'smp' else 'False') model_update['metadata'] = volume_metadata return model_update def retype(self, ctxt, volume, new_type, diff, host): new_specs = new_type['extra_specs'] new_provisioning, new_tiering = ( self._get_and_validate_extra_specs(new_specs)) # Check what changes are needed changes = self.determine_changes_when_retype(volume, new_type, host) if self._client.check_lun_has_snap(self.get_lun_id(volume)): # Reject if volume has snapshot when migration is needed if changes['migration']: LOG.debug('Driver is not able to do retype because the volume ' '%s has a snapshot which is forbidden to migrate.', volume['id']) return False # Reject if volume has snapshot when trying to # turn on compression if changes['compression_on']: LOG.debug('Driver is not able to do retype because the volume ' '%s has a snapshot which is forbidden to turn on ' 'compression.', volume['id']) return False if changes['migration']: # Check whether the migration is valid is_valid, target_pool_name = ( self._is_valid_for_storage_assisted_migration( volume, host, new_type)) if is_valid: specs = None if new_type is not None: specs = new_type.get('extra_specs') moved, model_update = self._migrate_volume( volume, target_pool_name, specs) if moved: return moved, model_update else: LOG.warning(_LW('Storage-assisted migration failed during ' 'retype.')) return False else: # Migration is invalid LOG.debug('Driver is not able to do retype due to ' 'storage-assisted migration is not valid ' 'in this situation.') return False if changes['compression_on']: # Turn on compression feature on the volume self._client.enable_or_disable_compression_on_lun( volume['name'], 'on') if changes['tiering']: # Modify lun to change tiering policy self._client.modify_lun_tiering_by_name(volume['name'], new_tiering) return True def determine_changes_when_retype(self, volume, new_type, host): changes = { 'migration': False, 'tiering': False, 'compression_on': False } old_specs = self.get_volumetype_extraspecs(volume) old_provisioning, old_tiering = ( self._get_extra_spec_value(old_specs)) new_specs = new_type['extra_specs'] new_provisioning, new_tiering = ( self._get_extra_spec_value(new_specs)) lun_type = self._extract_provider_location( volume['provider_location'], 'type') if volume['host'] != host['host']: changes['migration'] = True elif old_provisioning != new_provisioning: if (old_provisioning in ['thin', 'thick'] and new_provisioning == 'compressed'): changes['compression_on'] = True else: changes['migration'] = True if lun_type == 'smp': changes['migration'] = True if new_tiering != old_tiering: changes['tiering'] = True return changes def determine_all_enablers_exist(self, enablers): """Determine all wanted enablers whether exist.""" wanted = ['-ThinProvisioning', '-Deduplication', '-FAST', '-Compression'] for each in wanted: if each not in enablers: return False return True def _build_pool_stats(self, pool, pool_feature=None): pool_stats = { 'pool_name': pool['pool_name'], 'total_capacity_gb': pool['total_capacity_gb'], 'provisioned_capacity_gb': (pool['provisioned_capacity_gb']) } # Handle pool state Initializing, Ready, Faulted, Offline or Deleting. if pool['state'] in ('Initializing', 'Offline', 'Deleting'): pool_stats['free_capacity_gb'] = 0 LOG.warning(_LW("Storage Pool '%(pool)s' is '%(state)s'."), {'pool': pool_stats['pool_name'], 'state': pool['state']}) else: pool_stats['free_capacity_gb'] = pool['free_capacity_gb'] if self.check_max_pool_luns_threshold: pool_feature = self._client.get_pool_feature_properties( poll=False) if not pool_feature else pool_feature if (pool_feature['max_pool_luns'] <= pool_feature['total_pool_luns']): LOG.warning(_LW("Maximum number of Pool LUNs, %s, " "have been created. " "No more LUN creation can be done."), pool_feature['max_pool_luns']) pool_stats['free_capacity_gb'] = 0 if not self.reserved_percentage: # Since the admin is not sure of what value is proper, # the driver will calculate the recommended value. # Some extra capacity will be used by meta data of pool LUNs. # The overhead is about LUN_Capacity * 0.02 + 3 GB # reserved_percentage will be used to make sure the scheduler # takes the overhead into consideration. # Assume that all the remaining capacity is to be used to create # a thick LUN, reserved_percentage is estimated as follows: reserved = (((0.02 * pool['free_capacity_gb'] + 3) / (1.02 * pool['total_capacity_gb'])) * 100) # Take pool full threshold into consideration if not self.ignore_pool_full_threshold: reserved += 100 - pool['pool_full_threshold'] pool_stats['reserved_percentage'] = int(math.ceil(min(reserved, 100))) else: pool_stats['reserved_percentage'] = self.reserved_percentage array_serial = self.get_array_serial() pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' % {'pool_name': pool['pool_name'], 'array_serial': array_serial}) # Check if this pool's fast_cache is enabled if 'fast_cache_enabled' not in pool: pool_stats['fast_cache_enabled'] = 'False' else: pool_stats['fast_cache_enabled'] = pool['fast_cache_enabled'] # Copy advanced feature stats from backend stats pool_stats['compression_support'] = self.stats['compression_support'] pool_stats['fast_support'] = self.stats['fast_support'] pool_stats['deduplication_support'] = ( self.stats['deduplication_support']) # Thin provisioning is supported on VNX pools only when # ThinProvisioning Enabler software is installed on VNX, # and thick provisioning is always supported on VNX pools. pool_stats['thin_provisioning_support'] = ( self.stats['thin_provisioning_support']) pool_stats['thick_provisioning_support'] = True pool_stats['consistencygroup_support'] = ( self.stats['consistencygroup_support']) pool_stats['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # Add replication V2 support targets = [] if self._mirror: pool_stats['replication_enabled'] = True pool_stats['replication_count'] = 1 pool_stats['replication_type'] = ['sync'] for device in self.configuration.replication_device: targets.append(device['backend_id']) else: pool_stats['replication_enabled'] = False pool_stats['replication_targets'] = targets return pool_stats def update_enabler_in_volume_stats(self): """Updates the enabler information in stats.""" if not self.determine_all_enablers_exist(self.enablers): self.enablers = self._client.get_enablers_on_array() self.stats['compression_support'] = ( 'True' if '-Compression' in self.enablers else 'False') self.stats['fast_support'] = ( 'True' if '-FAST' in self.enablers else 'False') self.stats['deduplication_support'] = ( 'True' if '-Deduplication' in self.enablers else 'False') self.stats['thin_provisioning_support'] = ( True if '-ThinProvisioning' in self.enablers else False) self.stats['consistencygroup_support'] = ( 'True' if '-VNXSnapshots' in self.enablers else 'False') return self.stats def create_snapshot(self, snapshot): """Creates a snapshot.""" snapshot_name = snapshot['name'] volume_name = snapshot['volume_name'] volume = snapshot['volume'] LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'), {'snapshot': snapshot_name, 'volume': volume_name}) lun_id = self.get_lun_id(volume) try: self._client.create_snapshot(lun_id, snapshot_name) except exception.EMCVnxCLICmdError as ex: with excutils.save_and_reraise_exception(ex) as ctxt: out = "\n".join(ex.kwargs["out"]) if VNXError.has_error(out, VNXError.LUN_IS_PREPARING): # The error means the operation cannot be performed # because the LUN is 'Preparing'. Wait for a while # so that the LUN may get out of the transitioning # state. LOG.warning(_LW("LUN %(name)s is not ready for snapshot: " "%(out)s"), {'name': volume_name, 'out': out}) self._client.wait_until_lun_ready_for_ops(volume['name']) self._client.create_snapshot(lun_id, snapshot_name) ctxt.reraise = False else: ctxt.reraise = True def delete_snapshot(self, snapshot): """Deletes a snapshot.""" snapshot_name = snapshot['name'] LOG.info(_LI('Delete Snapshot: %(snapshot)s'), {'snapshot': snapshot_name}) self._client.delete_snapshot(snapshot_name) def create_volume_from_snapshot(self, volume, snapshot): """Constructs a work flow to create a volume from snapshot. This flow will do the following: 1. Create a snap mount point (SMP) for the snapshot. 2. Attach the snapshot to the SMP created in the first step. 3. Create a temporary lun prepare for migration. (Skipped if snapcopy='true') 4. Start a migration between the SMP and the temp lun. (Skipped if snapcopy='true') """ self._volume_creation_check(volume) flow_name = 'create_volume_from_snapshot' base_lun_name = self._get_base_lun_name(snapshot.volume) work_flow = linear_flow.Flow(flow_name) store_spec = self._construct_store_spec(volume, snapshot) store_spec.update({'base_lun_name': base_lun_name}) volume_metadata = self._get_volume_metadata(volume) rep_update = {} if self._is_snapcopy_enabled(volume): if self._is_replication_enabled(volume): err_msg = _("Unable to enable replication " "and snapcopy at the same time.") raise exception.VolumeBackendAPIException(data=err_msg) work_flow.add(CopySnapshotTask(), AllowReadWriteOnSnapshotTask(), CreateSMPTask(), AttachSnapTask()) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() new_lun_id = flow_engine.storage.fetch('new_smp_id') pl = self._build_provider_location( new_lun_id, 'smp', base_lun_name) volume_metadata['snapcopy'] = 'True' else: store_spec.update({'rate': self._get_migration_rate(volume)}) work_flow.add(CreateSMPTask(), AttachSnapTask(), CreateDestLunTask(), MigrateLunTask()) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() new_lun_id = flow_engine.storage.fetch('new_lun_id') pl = self._build_provider_location( new_lun_id, 'lun', volume['name']) volume_metadata['snapcopy'] = 'False' # Setup LUN Replication/MirrorView between devices, # secondary LUN will inherit properties from primary LUN. rep_update, metadata_update = self.setup_lun_replication( volume, new_lun_id, store_spec['provisioning'], store_spec['tiering']) volume_metadata.update(metadata_update) model_update = {'provider_location': pl, 'metadata': volume_metadata} model_update.update(rep_update) volume_host = volume['host'] host = vol_utils.extract_host(volume_host, 'backend') host_and_pool = vol_utils.append_host(host, store_spec['pool_name']) if volume_host != host_and_pool: model_update['host'] = host_and_pool return model_update def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" self._volume_creation_check(volume) base_lun_name = self._get_base_lun_name(src_vref) source_lun_id = self.get_lun_id(src_vref) volume_size = volume['size'] source_volume_name = src_vref['name'] consistencygroup_id = src_vref['consistencygroup_id'] cgsnapshot_name = None if consistencygroup_id: cgsnapshot_name = self._construct_snap_name(volume) snapshot_name = self._construct_snap_name(volume) snapshot = { 'name': snapshot_name, 'volume_name': source_volume_name, 'volume_size': volume_size, 'volume': src_vref, 'cgsnapshot_id': cgsnapshot_name, 'consistencygroup_id': consistencygroup_id, 'id': cgsnapshot_name } flow_name = 'create_cloned_volume' store_spec = self._construct_store_spec(volume, snapshot) work_flow = linear_flow.Flow(flow_name) store_spec.update({'snapshot': snapshot}) store_spec.update({'source_lun_id': source_lun_id}) store_spec.update({'base_lun_name': base_lun_name}) volume_metadata = self._get_volume_metadata(volume) rep_update = {} if self._is_snapcopy_enabled(volume): # snapcopy feature enabled if self._is_replication_enabled(volume): err_msg = _("Unable to enable replication " "and snapcopy at the same time.") raise exception.VolumeBackendAPIException(data=err_msg) work_flow.add(CreateSnapshotTask(), CreateSMPTask(), AttachSnapTask()) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() new_lun_id = flow_engine.storage.fetch('new_smp_id') pl = self._build_provider_location( new_lun_id, 'smp', base_lun_name) else: # snapcopy feature disabled, need to migrate store_spec.update({'rate': self._get_migration_rate(volume)}) work_flow.add(CreateSnapshotTask(), CreateSMPTask(), AttachSnapTask(), CreateDestLunTask(), MigrateLunTask()) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() new_lun_id = flow_engine.storage.fetch('new_lun_id') # Delete temp Snapshot if consistencygroup_id: self._client.delete_cgsnapshot(snapshot['id']) else: self.delete_snapshot(snapshot) # After migration, volume's base lun is itself pl = self._build_provider_location( new_lun_id, 'lun', volume['name']) volume_metadata['snapcopy'] = 'False' # Setup LUN Replication/MirrorView between devices, # secondary LUN will inherit properties from primary LUN. rep_update, metadata_update = self.setup_lun_replication( volume, new_lun_id, store_spec['provisioning'], store_spec['tiering']) volume_metadata.update(metadata_update) model_update = {'provider_location': pl, 'metadata': volume_metadata} model_update.update(rep_update) volume_host = volume['host'] host = vol_utils.extract_host(volume_host, 'backend') host_and_pool = vol_utils.append_host(host, store_spec['pool_name']) if volume_host != host_and_pool: model_update['host'] = host_and_pool return model_update def _get_volume_metadata(self, volume): # Since versionedobjects is partially merged, metadata # may come from 'volume_metadata' or 'metadata', here # we need to take care both of them. volume_metadata = {} if 'volume_metadata' in volume: for metadata in volume['volume_metadata']: volume_metadata[metadata['key']] = metadata['value'] return volume_metadata return volume['metadata'] if 'metadata' in volume else {} def _is_snapcopy_enabled(self, volume): meta = self._get_volume_metadata(volume) return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true' def _get_base_lun_name(self, volume): """Returns base LUN name for SMP or LUN.""" base_name = self._extract_provider_location( volume['provider_location'], 'base_lun_name') if base_name is None or base_name == 'None': return volume['name'] return base_name def dumps_provider_location(self, pl_dict): return '|'.join([k + '^' + pl_dict[k] for k in pl_dict]) def _build_provider_location(self, lun_id, type='lun', base_lun_name=None): """Builds provider_location for volume or snapshot. :param lun_id: LUN ID in VNX :param type: 'lun' or 'smp' :param base_lun_name: primary LUN name, it will be used when creating snap lun """ pl_dict = {'system': self.get_array_serial(), 'type': type, 'id': six.text_type(lun_id), 'base_lun_name': six.text_type(base_lun_name), 'version': self.VERSION} return self.dumps_provider_location(pl_dict) def _update_provider_location(self, provider_location, key=None, value=None): pl_dict = {tp.split('^')[0]: tp.split('^')[1] for tp in provider_location.split('|')} pl_dict[key] = value return self.dumps_provider_location(pl_dict) def _extract_provider_location(self, provider_location, key='id'): """Extracts value of the specified field from provider_location string. :param provider_location: provider_location string :param key: field name of the value that to be extracted :return: value of the specified field if it exists, otherwise, None is returned """ kvps = provider_location.split('|') for kvp in kvps: fields = kvp.split('^') if len(fields) == 2 and fields[0] == key: return fields[1] def _consistencygroup_creation_check(self, group): """Check extra spec for consistency group.""" if group.get('volume_type_id') is not None: for id in group['volume_type_id'].split(","): if id: provisioning, tiering = ( self._get_extra_spec_value( volume_types.get_volume_type_extra_specs(id))) if provisioning == 'compressed': msg = _("Failed to create consistency group %s " "because VNX consistency group cannot " "accept compressed LUNs as members." ) % group['id'] raise exception.VolumeBackendAPIException(data=msg) def create_consistencygroup(self, context, group): """Creates a consistency group.""" LOG.info(_LI('Start to create consistency group: %(group_name)s ' 'id: %(id)s'), {'group_name': group['name'], 'id': group['id']}) self._consistencygroup_creation_check(group) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} try: self._client.create_consistencygroup(group['id']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Create consistency group %s failed.'), group['id']) return model_update def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" cg_name = group['id'] model_update = {} volumes_model_update = [] model_update['status'] = group['status'] LOG.info(_LI('Start to delete consistency group: %(cg_name)s'), {'cg_name': cg_name}) try: self._client.delete_consistencygroup(cg_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Delete consistency group %s failed.'), cg_name) for volume_ref in volumes: try: self._client.delete_lun(volume_ref['name']) volumes_model_update.append( {'id': volume_ref['id'], 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume_ref['id'], 'status': 'error_deleting'}) return model_update, volumes_model_update def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Adds or removes LUN(s) to/from an existing consistency group""" model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} cg_name = group['id'] add_ids = [six.text_type(self.get_lun_id(vol)) for vol in add_volumes] if add_volumes else [] remove_ids = [six.text_type(self.get_lun_id(vol)) for vol in remove_volumes] if remove_volumes else [] data = self._client.get_consistency_group_by_name(cg_name) ids_curr = data['Luns'] ids_later = [] if ids_curr: ids_later.extend(ids_curr) ids_later.extend(add_ids) for remove_id in remove_ids: if remove_id in ids_later: ids_later.remove(remove_id) else: LOG.warning(_LW("LUN with id %(remove_id)s is not present " "in cg %(cg_name)s, skip it."), {'remove_id': remove_id, 'cg_name': cg_name}) # Remove all from cg if not ids_later: self._client.remove_luns_from_consistencygroup(cg_name, ids_curr) else: self._client.replace_luns_in_consistencygroup(cg_name, ids_later) return model_update, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot (snap group).""" cgsnapshot_id = cgsnapshot['id'] model_update = {} snapshots_model_update = [] LOG.info(_LI('Start to create cgsnapshot for consistency group' ': %(group_name)s'), {'group_name': cgsnapshot['consistencygroup_id']}) try: self._client.create_cgsnapshot(cgsnapshot['consistencygroup_id'], cgsnapshot['id']) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot['id'], 'status': 'available'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Create cg snapshot %s failed.'), cgsnapshot_id) model_update['status'] = 'available' return model_update, snapshots_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot (snap group).""" cgsnapshot_id = cgsnapshot['id'] model_update = {} snapshots_model_update = [] model_update['status'] = cgsnapshot['status'] LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: ' '%(group_name)s'), {'snap_name': cgsnapshot['id'], 'group_name': cgsnapshot['consistencygroup_id']}) try: self._client.delete_cgsnapshot(cgsnapshot['id']) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot['id'], 'status': 'deleted'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Delete cgsnapshot %s failed.'), cgsnapshot_id) return model_update, snapshots_model_update def get_lun_id(self, volume): lun_id = None try: provider_location = volume.get('provider_location') if provider_location: lun_id = self._extract_provider_location( provider_location, 'id') if lun_id: lun_id = int(lun_id) else: LOG.debug('Lun id is not stored in provider location, ' 'query it.') lun_id = self._client.get_lun_by_name(volume['name'])['lun_id'] except Exception as ex: LOG.debug('Exception when getting lun id: %s.', six.text_type(ex)) lun_id = self._client.get_lun_by_name(volume['name'])['lun_id'] LOG.debug('Get lun_id: %s.', lun_id) return lun_id def get_lun_map(self, storage_group): data = self._client.get_storage_group(storage_group) return data['lunmap'] def assure_storage_group(self, storage_group): self._client.create_storage_group(storage_group) def assure_host_in_storage_group(self, hostname, storage_group): try: self._client.connect_host_to_storage_group(hostname, storage_group) except exception.EMCVnxCLICmdError as ex: if ex.kwargs["rc"] == 83: # SG was not created or was destroyed by another concurrent # operation before connected. # Create SG and try to connect again LOG.warning(_LW('Storage Group %s is not found. Create it.'), storage_group) self.assure_storage_group(storage_group) self._client.connect_host_to_storage_group( hostname, storage_group) else: raise return hostname def get_lun_owner(self, volume): """Returns SP owner of the volume.""" data = self._client.get_lun_by_name(volume['name'], poll=False) owner_sp = data['owner'] LOG.debug('Owner SP : %s', owner_sp) return owner_sp def filter_available_hlu_set(self, used_hlus): used_hlu_set = set(used_hlus) return self.hlu_set - used_hlu_set def _extract_iscsi_uids(self, connector): if 'initiator' not in connector: if self.protocol == 'iSCSI': msg = (_('Host %s has no iSCSI initiator') % connector['host']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: return () return [connector['initiator']] def _extract_fc_uids(self, connector): if 'wwnns' not in connector or 'wwpns' not in connector: if self.protocol == 'FC': msg = _('Host %s has no FC initiators') % connector['host'] LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: return () wwnns = connector['wwnns'] wwpns = connector['wwpns'] wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)] return [re.sub(r'\S\S', lambda m: m.group(0) + ':', wwn, len(wwn) // 2 - 1) for wwn in wwns] def _exec_command_setpath(self, initiator_uid, sp, port_id, ip, host, vport_id=None): gname = host if vport_id is not None: cmd_iscsi_setpath = ('storagegroup', '-setpath', '-gname', gname, '-hbauid', initiator_uid, '-sp', sp, '-spport', port_id, '-spvport', vport_id, '-ip', ip, '-host', host, '-o') out, rc = self._client.command_execute(*cmd_iscsi_setpath) else: cmd_fc_setpath = ('storagegroup', '-setpath', '-gname', gname, '-hbauid', initiator_uid, '-sp', sp, '-spport', port_id, '-ip', ip, '-host', host, '-o') out, rc = self._client.command_execute(*cmd_fc_setpath) if rc != 0: LOG.warning(_LW("Failed to register %(itor)s to SP%(sp)s " "port %(portid)s because: %(msg)s."), {'itor': initiator_uid, 'sp': sp, 'portid': port_id, 'msg': out}) def auto_register_with_io_port_filter(self, connector, sgdata, io_port_filter): """Automatically register specific IO ports to storage group.""" initiator = connector['initiator'] ip = connector['ip'] host = connector['host'] new_white = {'A': [], 'B': []} if self.protocol == 'iSCSI': if sgdata: sp_ports = self._client.get_registered_spport_set( initiator, host, sgdata['raw_output']) # Normalize io_ports for sp in ('A', 'B'): new_ports = filter( lambda pt: (pt['SP'], pt['Port ID'], pt['Virtual Port ID']) not in sp_ports, self.iscsi_targets[sp]) new_white[sp] = map(lambda white: {'SP': white['SP'], 'Port ID': white['Port ID'], 'Virtual Port ID': white['Virtual Port ID']}, new_ports) else: new_white = self.iscsi_targets self._register_iscsi_initiator(ip, host, [initiator], new_white) elif self.protocol == 'FC': wwns = self._extract_fc_uids(connector) ports_list = [] if sgdata: for wwn in wwns: for port in io_port_filter: if ((port not in ports_list) and (not re.search(wwn + '\s+SP\s+' + port[0] + '\s+' + str(port[1]), sgdata['raw_output'], re.IGNORECASE))): # Record ports to be added ports_list.append(port) new_white[port[0]].append({ 'SP': port[0], 'Port ID': port[1]}) else: # Need to translate to dict format for fc_port in io_port_filter: new_white[fc_port[0]].append({'SP': fc_port[0], 'Port ID': fc_port[1]}) self._register_fc_initiator(ip, host, wwns, new_white) return new_white['A'] or new_white['B'] def _register_iscsi_initiator(self, ip, host, initiator_uids, port_to_register=None): iscsi_targets = (port_to_register if port_to_register else self.iscsi_targets) for initiator_uid in initiator_uids: LOG.info(_LI('Get ISCSI targets %(tg)s to register ' 'initiator %(in)s.'), {'tg': iscsi_targets, 'in': initiator_uid}) target_portals_SPA = list(iscsi_targets['A']) target_portals_SPB = list(iscsi_targets['B']) for pa in target_portals_SPA: sp = 'A' port_id = pa['Port ID'] vport_id = pa['Virtual Port ID'] self._exec_command_setpath(initiator_uid, sp, port_id, ip, host, vport_id) for pb in target_portals_SPB: sp = 'B' port_id = pb['Port ID'] vport_id = pb['Virtual Port ID'] self._exec_command_setpath(initiator_uid, sp, port_id, ip, host, vport_id) def _register_fc_initiator(self, ip, host, initiator_uids, ports_to_register=None): fc_targets = (ports_to_register if ports_to_register else self._client.get_fc_targets()) for initiator_uid in initiator_uids: LOG.info(_LI('Get FC targets %(tg)s to register ' 'initiator %(in)s.'), {'tg': fc_targets, 'in': initiator_uid}) target_portals_SPA = list(fc_targets['A']) target_portals_SPB = list(fc_targets['B']) for pa in target_portals_SPA: sp = 'A' port_id = pa['Port ID'] self._exec_command_setpath(initiator_uid, sp, port_id, ip, host) for pb in target_portals_SPB: sp = 'B' port_id = pb['Port ID'] self._exec_command_setpath(initiator_uid, sp, port_id, ip, host) def _deregister_initiators(self, connector): initiator_uids = [] try: if self.protocol == 'iSCSI': initiator_uids = self._extract_iscsi_uids(connector) elif self.protocol == 'FC': initiator_uids = self._extract_fc_uids(connector) except exception.VolumeBackendAPIException: LOG.warning(_LW("Failed to extract initiators of %s, so ignore " "deregistration operation."), connector['host']) if initiator_uids: for initiator_uid in initiator_uids: rc, out = self._client.deregister_initiator(initiator_uid) if rc != 0: LOG.warning(_LW("Failed to deregister %(itor)s " "because: %(msg)s."), {'itor': initiator_uid, 'msg': out}) def _filter_unregistered_initiators(self, initiator_uids, sgdata): unregistered_initiators = [] if not initiator_uids: return unregistered_initiators out = sgdata['raw_output'] for initiator_uid in initiator_uids: m = re.search(initiator_uid, out) if m is None: unregistered_initiators.append(initiator_uid) return unregistered_initiators def auto_register_initiator_to_all(self, connector, sgdata): """Automatically registers available initiators. Returns True if has registered initiator otherwise returns False. """ initiator_uids = [] ip = connector['ip'] host = connector['host'] if self.protocol == 'iSCSI': initiator_uids = self._extract_iscsi_uids(connector) if sgdata is not None: itors_toReg = self._filter_unregistered_initiators( initiator_uids, sgdata) else: itors_toReg = initiator_uids if len(itors_toReg) == 0: return False LOG.info(_LI('iSCSI Initiators %(in)s of %(ins)s ' 'need registration.'), {'in': itors_toReg, 'ins': initiator_uids}) self._register_iscsi_initiator(ip, host, itors_toReg) return True elif self.protocol == 'FC': initiator_uids = self._extract_fc_uids(connector) if sgdata is not None: itors_toReg = self._filter_unregistered_initiators( initiator_uids, sgdata) else: itors_toReg = initiator_uids if len(itors_toReg) == 0: return False LOG.info(_LI('FC Initiators %(in)s of %(ins)s need registration'), {'in': itors_toReg, 'ins': initiator_uids}) self._register_fc_initiator(ip, host, itors_toReg) return True def auto_register_initiator(self, connector, sgdata, io_ports_filter=None): """Automatically register available initiators. :returns: True if has registered initiator otherwise return False """ if io_ports_filter: return self.auto_register_with_io_port_filter(connector, sgdata, io_ports_filter) else: return self.auto_register_initiator_to_all(connector, sgdata) def assure_host_access(self, volume, connector): hostname = connector['host'] volumename = volume['name'] auto_registration_done = False try: sgdata = self._client.get_storage_group(hostname, poll=False) except exception.EMCVnxCLICmdError as ex: if ex.kwargs["rc"] != 83: raise # Storage Group has not existed yet self.assure_storage_group(hostname) if self.itor_auto_reg: self.auto_register_initiator(connector, None, self.io_ports) auto_registration_done = True else: self._client.connect_host_to_storage_group(hostname, hostname) sgdata = self._client.get_storage_group(hostname, poll=True) if self.itor_auto_reg and not auto_registration_done: new_registerred = self.auto_register_initiator(connector, sgdata, self.io_ports) if new_registerred: sgdata = self._client.get_storage_group(hostname, poll=True) lun_id = self.get_lun_id(volume) tried = 0 while tried < self.max_retries: tried += 1 lun_map = sgdata['lunmap'] used_hlus = lun_map.values() candidate_hlus = self.filter_available_hlu_set(used_hlus) candidate_hlus = list(candidate_hlus) if len(candidate_hlus) != 0: hlu = candidate_hlus[random.randint(0, len(candidate_hlus) - 1)] try: self._client.add_hlu_to_storage_group( hlu, lun_id, hostname) if hostname not in self.hlu_cache: self.hlu_cache[hostname] = {} self.hlu_cache[hostname][lun_id] = hlu return hlu, sgdata except exception.EMCVnxCLICmdError as ex: LOG.debug("Add HLU to storagegroup failed, retry %s", tried) elif tried == 1: # The first try didn't get the in time data, # so we need a retry LOG.debug("Did not find candidate HLUs, retry %s", tried) else: msg = (_('Reach limitation set by configuration ' 'option max_luns_per_storage_group. ' 'Operation to add %(vol)s into ' 'Storage Group %(sg)s is rejected.') % {'vol': volumename, 'sg': hostname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Need a full poll to get the real in time data # Query storage group with poll for retry sgdata = self._client.get_storage_group(hostname, poll=True) self.hlu_cache[hostname] = sgdata['lunmap'] if lun_id in sgdata['lunmap']: hlu = sgdata['lunmap'][lun_id] return hlu, sgdata msg = _("Failed to add %(vol)s into %(sg)s " "after %(retries)s tries.") % \ {'vol': volumename, 'sg': hostname, 'retries': tried} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def vnx_get_iscsi_properties(self, volume, connector, hlu, sg_raw_output): storage_group = connector['host'] registered_spports = self._client.get_registered_spport_set( connector['initiator'], storage_group, sg_raw_output) targets = self._client.find_available_iscsi_targets( storage_group, registered_spports, self.iscsi_targets) properties = {'target_discovered': False, 'target_iqn': 'unknown', 'target_iqns': None, 'target_portal': 'unknown', 'target_portals': None, 'target_lun': 'unknown', 'target_luns': None, 'volume_id': volume['id']} if targets: properties['target_discovered'] = True properties['target_iqns'] = [t['Port WWN'] for t in targets] properties['target_iqn'] = properties['target_iqns'][0] properties['target_portals'] = [ "%s:3260" % t['IP Address'] for t in targets] properties['target_portal'] = properties['target_portals'][0] properties['target_luns'] = [hlu] * len(targets) properties['target_lun'] = hlu else: LOG.error(_LE('Failed to find available iSCSI targets for %s.'), storage_group) LOG.debug('The iSCSI properties for %(host)s is %(properties)s.', {'host': storage_group, 'properties': properties}) return properties def vnx_get_fc_properties(self, connector, device_number): fc_properties = {'target_lun': device_number, 'target_dicovered': True, 'target_wwn': None} if self.zonemanager_lookup_service is None: fc_properties['target_wwn'] = self.get_login_ports(connector, self.io_ports) else: target_wwns, itor_tgt_map = self.get_initiator_target_map( connector['wwpns'], self.get_status_up_ports(connector, self.io_ports)) fc_properties['target_wwn'] = target_wwns fc_properties['initiator_target_map'] = itor_tgt_map return fc_properties def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info.""" @lockutils.synchronized('emc-connection-' + connector['host'], "emc-connection-", True) def do_initialize_connection(): return self.assure_host_access( volume, connector) data = {} if self.protocol == 'iSCSI': (device_number, sg_data) = do_initialize_connection() iscsi_properties = self.vnx_get_iscsi_properties( volume, connector, device_number, sg_data['raw_output'] ) data = {'driver_volume_type': 'iscsi', 'data': iscsi_properties} elif self.protocol == 'FC': (device_number, sg_data) = do_initialize_connection() fc_properties = self.vnx_get_fc_properties(connector, device_number) fc_properties['volume_id'] = volume['id'] data = {'driver_volume_type': 'fibre_channel', 'data': fc_properties} return data def terminate_connection(self, volume, connector): """Disallow connection from connector.""" @lockutils.synchronized('emc-connection-' + connector['host'], "emc-connection-", True) def do_terminate_connection(): hostname = connector['host'] volume_name = volume['name'] lun_id = self.get_lun_id(volume) lun_map = None conn_info = None if (hostname in self.hlu_cache and lun_id in self.hlu_cache[hostname] and not self.destroy_empty_sg and not self.zonemanager_lookup_service): hlu = self.hlu_cache[hostname][lun_id] self._client.remove_hlu_from_storagegroup(hlu, hostname, poll=True) self.hlu_cache[hostname].pop(lun_id) else: try: lun_map = self.get_lun_map(hostname) self.hlu_cache[hostname] = lun_map except exception.EMCVnxCLICmdError as ex: if ex.kwargs["rc"] == 83: LOG.warning(_LW("Storage Group %s is not found. " "terminate_connection() is " "unnecessary."), hostname) if lun_id in lun_map: self._client.remove_hlu_from_storagegroup( lun_map[lun_id], hostname) lun_map.pop(lun_id) else: LOG.warning(_LW("Volume %(vol)s was not in Storage Group" " %(sg)s."), {'vol': volume_name, 'sg': hostname}) if self.protocol == 'FC': conn_info = {'driver_volume_type': 'fibre_channel', 'data': {}} if self.zonemanager_lookup_service and not lun_map: target_wwns, itor_tgt_map = self.get_initiator_target_map( connector['wwpns'], self.get_status_up_ports(connector)) conn_info['data']['initiator_target_map'] = itor_tgt_map if self.destroy_empty_sg and not lun_map: try: LOG.info(_LI("Storage Group %s was empty."), hostname) self._client.disconnect_host_from_storage_group( hostname, hostname) self._client.delete_storage_group(hostname) if self.itor_auto_dereg: self._deregister_initiators(connector) except Exception: LOG.warning(_LW("Failed to destroy Storage Group %s."), hostname) try: self._client.connect_host_to_storage_group( hostname, hostname) except Exception: LOG.warning(_LW("Fail to connect host %(host)s " "back to storage group %(sg)s."), {'host': hostname, 'sg': hostname}) return conn_info return do_terminate_connection() def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initializes connection for mount point.""" smp_name = self._construct_tmp_smp_name(snapshot) self._client.attach_mount_point(smp_name, snapshot.name) volume = {'name': smp_name, 'id': snapshot.id} return self.initialize_connection(volume, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallows connection for mount point.""" smp_name = self._construct_tmp_smp_name(snapshot) volume = {'name': smp_name} conn_info = self.terminate_connection(volume, connector) self._client.detach_mount_point(smp_name) return conn_info def create_export_snapshot(self, context, snapshot, connector): """Creates mount point for a snapshot.""" smp_name = self._construct_tmp_smp_name(snapshot) primary_lun_name = snapshot.volume_name self._client.create_mount_point(primary_lun_name, smp_name) return None def remove_export_snapshot(self, context, snapshot): """Removes mount point for a snapshot.""" smp_name = self._construct_tmp_smp_name(snapshot) volume = {'name': smp_name, 'provider_location': None} self.delete_volume(volume, True) def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by manage_existing.""" if 'source-id' in existing_ref: data = self._client.get_lun_by_id( existing_ref['source-id'], properties=VNXLunProperties.lun_with_pool) elif 'source-name' in existing_ref: data = self._client.get_lun_by_name( existing_ref['source-name'], properties=VNXLunProperties.lun_with_pool) else: reason = _('Reference must contain source-id or source-name key.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) target_pool = self.get_target_storagepool(volume) if target_pool and data['pool'] != target_pool: reason = (_('The imported lun %(lun_id)s is in pool %(lun_pool)s ' 'which is not managed by the host %(host)s.') % {'lun_id': data['lun_id'], 'lun_pool': data['pool'], 'host': volume['host']}) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return data['total_capacity_gb'] def manage_existing(self, volume, manage_existing_ref): """Imports the existing backend storage object as a volume. manage_existing_ref:{ 'source-id': } or manage_existing_ref:{ 'source-name': } """ client = self._client lun_id = self._get_lun_id(manage_existing_ref) specs = self.get_volumetype_extraspecs(volume) LOG.debug('Specs of the volume is: %s.', specs) host = volume['host'] LOG.debug('Host of the volume is: %s.', host) tar_pool = vol_utils.extract_host(volume['host'], 'pool') LOG.debug("Target pool of LUN to manage is: %s.", tar_pool) tar_type, tar_tier = self._get_extra_spec_value(specs) vnx_lun = self._get_lun_pool_and_type(lun_id) LOG.debug("LUN to manage: %s.", vnx_lun) LOG.debug("Target info: pool: %(pool)s, type: %(type)s, " "tier: %(tier)s.", {'pool': tar_pool, 'type': tar_type, 'tier': tar_tier}) do_migration = (tar_type is not None and tar_type != vnx_lun.provision or tar_pool != vnx_lun.pool_name) change_tier = (tar_tier is not None and not do_migration and tar_tier != vnx_lun.tier) reason = None if do_migration: LOG.debug("Need migration during manage.") if client.check_lun_has_snap(lun_id): reason = _('Driver is not able to do retype because' ' the volume (LUN {}) has snapshot which is ' 'forbidden to migrate.').format(lun_id) else: volume['size'] = vnx_lun.capacity moved, empty = self._migrate_volume(volume, tar_pool, specs, src_id=lun_id) if not moved: reason = _('Storage-assisted migration failed during ' 'manage volume.') if reason is None and change_tier: LOG.debug('Change LUN tier policy to: %s.', tar_tier) client.modify_lun_tiering_by_id(lun_id, tar_tier) if reason is not None: raise exception.ManageExistingVolumeTypeMismatch(reason=reason) else: client.rename_lun(lun_id, volume['name']) location = self._build_provider_location(lun_id, 'lun', volume['name']) return {'provider_location': location} def _get_lun_pool_and_type(self, lun_id): client = self._client data = client.get_lun_by_id(lun_id, VNXLunProperties.get_all(), poll=False) lun = VNXLun() lun.update(data) return lun def _get_lun_id(self, manage_existing_ref): if 'source-id' in manage_existing_ref: lun_id = manage_existing_ref['source-id'] elif 'source-name' in manage_existing_ref: lun_id = self._client.get_lun_by_name( manage_existing_ref['source-name'], poll=False)['lun_id'] else: reason = _('Reference must contain source-id or source-name key.') raise exception.ManageExistingInvalidReference( existing_ref=manage_existing_ref, reason=reason) return lun_id def get_login_ports(self, connector, io_ports=None): return self._client.get_login_ports(connector['host'], connector['wwpns'], io_ports) def get_status_up_ports(self, connector, io_ports=None): return self._client.get_status_up_ports(connector['host'], io_ports=io_ports) def get_initiator_target_map(self, fc_initiators, fc_targets): target_wwns = [] itor_tgt_map = {} if self.zonemanager_lookup_service: mapping = \ self.zonemanager_lookup_service. \ get_device_mapping_from_network(fc_initiators, fc_targets) for each in mapping: map_d = mapping[each] target_wwns.extend(map_d['target_port_wwn_list']) for initiator in map_d['initiator_port_wwn_list']: itor_tgt_map[initiator] = map_d['target_port_wwn_list'] return list(set(target_wwns)), itor_tgt_map def get_volumetype_extraspecs(self, volume): specs = {} type_id = volume['volume_type_id'] if type_id is not None: specs = volume_types.get_volume_type_extra_specs(type_id) return specs def failover_host(self, context, volumes, secondary_backend_id): """Fails over the volume back and forth. Driver needs to update following info for this volume: 2. provider_location: update serial number and lun id """ volume_update_list = [] if secondary_backend_id != 'default': rep_status = 'failed-over' backend_id = ( self.configuration.replication_device[0]['backend_id']) if secondary_backend_id != backend_id: msg = (_('Invalid secondary_backend_id specified. ' 'Valid backend id is %s.') % backend_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: rep_status = 'enabled' def failover_one(volume, new_status): rep_data = json.loads(volume['replication_driver_data']) is_primary = rep_data['is_primary'] mirror_name = self._construct_mirror_name(volume) mirror_view = self._get_mirror_view(volume) remote_client = mirror_view._secondary_client provider_location = volume['provider_location'] try: mirror_view.promote_image(mirror_name) except exception.EMCVnxCLICmdError as ex: msg = _LE( 'Failed to failover volume %(volume_id)s ' 'to %(target)s: %(error)s.') LOG.error(msg, {'volume_id': volume.id, 'target': secondary_backend_id, 'error': ex},) new_status = 'error' else: rep_data.update({'is_primary': not is_primary}) # Transfer ownership to secondary_backend_id and # update provider_location field provider_location = self._update_provider_location( provider_location, 'system', remote_client.get_array_serial()['array_serial']) provider_location = self._update_provider_location( provider_location, 'id', six.text_type( remote_client.get_lun_by_name(volume.name)['lun_id']) ) model_update = {'volume_id': volume.id, 'updates': {'replication_driver_data': json.dumps(rep_data), 'replication_status': new_status, 'provider_location': provider_location}} volume_update_list.append(model_update) for volume in volumes: if self._is_replication_enabled(volume): failover_one(volume, rep_status) else: volume_update_list.append({ 'volume_id': volume.id, 'updates': {'status': 'error'}}) return secondary_backend_id, volume_update_list def _is_replication_enabled(self, volume): """Return True if replication extra specs is specified. If replication_enabled exists and value is ' True', return True. If replication exists and value is 'True', return True. Otherwise, return False. """ specs = self.get_volumetype_extraspecs(volume) return specs and specs.get('replication_enabled') == ' True' def setup_lun_replication(self, volume, primary_lun_id, provisioning, tiering): """Setup replication for LUN, this only happens in primary system.""" rep_update = {'replication_driver_data': None, 'replication_status': 'disabled'} metadata_update = {} if self._is_replication_enabled(volume): LOG.debug('Starting setup replication ' 'for volume: %s.', volume.id) lun_size = volume['size'] mirror_name = self._construct_mirror_name(volume) pool_name = vol_utils.extract_host(volume.host, 'pool') self._mirror.create_mirror_workflow( mirror_name, primary_lun_id, pool_name, volume.name, lun_size, provisioning, tiering) LOG.info(_LI('Successfully setup replication for %s.'), volume.id) rep_update.update({'replication_driver_data': self.__class__._build_replication_driver_data( self.configuration), 'replication_status': 'enabled'}) metadata_update = { 'system': self.get_array_serial()} return rep_update, metadata_update def cleanup_lun_replication(self, volume): if self._is_replication_enabled(volume): LOG.debug('Starting cleanup replication form volume: ' '%s.', volume.id) mirror_name = self._construct_mirror_name(volume) mirror_view = self._get_mirror_view(volume) mv = mirror_view.get_image(mirror_name) if mv: mirror_view.destroy_mirror_view(mirror_name, volume.name, mv) def _get_mirror_view(self, volume): """Determines where to build a Mirror View operator.""" if volume['replication_driver_data']: rep_data = json.loads(volume['replication_driver_data']) is_primary = rep_data['is_primary'] else: is_primary = True if is_primary: # if on primary, promote to configured array in conf mirror_view = self._mirror else: # else promote to array according to volume data mirror_view = self._build_mirror_view(volume) return mirror_view @staticmethod def _build_replication_driver_data(configuration): """Builds driver specific data for replication. This data will be used by secondary backend to connect primary device. """ driver_data = dict() driver_data['san_ip'] = configuration.san_ip driver_data['san_login'] = configuration.san_login driver_data['san_password'] = configuration.san_password driver_data['san_secondary_ip'] = configuration.san_secondary_ip driver_data['storage_vnx_authentication_type'] = ( configuration.storage_vnx_authentication_type) driver_data['storage_vnx_security_file_dir'] = ( configuration.storage_vnx_security_file_dir) driver_data['is_primary'] = True return json.dumps(driver_data) def _build_client(self, active_backend_id=None): """Builds a client pointing to the right VNX.""" if not active_backend_id: return CommandLineHelper(self.configuration) else: configuration = self.configuration if not configuration.replication_device: err_msg = ( _('replication_device should be configured ' 'on backend: %s.') % configuration.config_group) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) current_target = None for target in configuration.replication_device: if target['backend_id'] == active_backend_id: current_target = target break if not current_target: err_msg = ( _('replication_device with backend_id [%s] is missing.') % active_backend_id) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) target_conf = copy.copy(configuration) for key in self.REPLICATION_KEYS: if key in current_target: setattr(target_conf, key, current_target[key]) return CommandLineHelper(target_conf) def _build_mirror_view(self, volume=None): """Builds a client for remote storage device. Currently, only support one remote, managed device. :param volume: if volume is not None, then build a remote client from volume's replication_driver_data. """ configuration = self.configuration remote_info = None if volume: if volume['replication_driver_data']: remote_info = json.loads(volume['replication_driver_data']) else: LOG.warning( _LW('No replication info from this volume: %s.'), volume.id) return None else: if not configuration.replication_device: LOG.info(_LI('Replication is not configured on backend: %s.'), configuration.config_group) return None remote_info = configuration.replication_device[0] # Copy info to replica configuration for remote client replica_conf = copy.copy(configuration) for key in self.REPLICATION_KEYS: if key in remote_info: setattr(replica_conf, key, remote_info[key]) _remote_client = CommandLineHelper(replica_conf) _mirror = MirrorView(self._client, _remote_client) return _mirror def get_pool(self, volume): """Returns the pool name of a volume.""" data = self._client.get_lun_by_name(volume['name'], [VNXLunProperties.LUN_POOL], poll=False) return data.get(VNXLunProperties.LUN_POOL.key) def unmanage(self, volume): """Unmanages a volume""" pass def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from cgsnapshot.""" if cgsnapshot and snapshots and not source_cg: return self._create_consisgroup_from_cgsnapshot( group, volumes, cgsnapshot, snapshots) elif source_cg and source_vols and not cgsnapshot: return self._clone_consisgroup( group, volumes, source_cg, source_vols) else: msg = _("create_consistencygroup_from_src supports a " "cgsnapshot source or a consistency group source. " "Multiple sources cannot be used.") raise exception.InvalidInput(reason=msg) def _clone_consisgroup(self, group, volumes, source_cg, source_vols): temp_cgsnapshot_name = 'temp_snapshot_for_{}'.format(group.id) store_spec = { 'group': group, 'snapshot': {'id': temp_cgsnapshot_name, 'consistencygroup_id': source_cg.id}, 'new_snap_name': temp_cgsnapshot_name, 'source_lun_id': None, 'client': self._client } flow_name = 'clone_consisgroup' snap_build_tasks = [CreateSnapshotTask()] volume_model_updates = self._create_cg_from_cgsnap_use_workflow( flow_name, snap_build_tasks, store_spec, volumes, source_vols) self._delete_temp_cgsnap(temp_cgsnapshot_name) LOG.info(_LI('Consistency group %(cg)s is created successfully.'), {'cg': group.id}) return None, volume_model_updates def _create_consisgroup_from_cgsnapshot(self, group, volumes, cgsnapshot, snapshots): flow_name = 'create_consisgroup_from_cgsnapshot' copied_snapshot_name = 'temp_snapshot_for_%s' % group.id store_spec = { 'group': group, 'src_snap_name': cgsnapshot['id'], 'new_snap_name': copied_snapshot_name, 'client': self._client } snap_build_tasks = [CopySnapshotTask(), AllowReadWriteOnSnapshotTask()] src_vols = map(lambda snap: snap.volume, snapshots) volume_model_updates = self._create_cg_from_cgsnap_use_workflow( flow_name, snap_build_tasks, store_spec, volumes, src_vols) self._delete_temp_cgsnap(copied_snapshot_name) LOG.info(_LI('Consistency group %(cg)s is created successfully.'), {'cg': group.id}) return None, volume_model_updates def _delete_temp_cgsnap(self, snap): try: self._client.delete_cgsnapshot(snap) except exception.EMCVnxCLICmdError as ex: LOG.warning(_LW('Delete the temporary cgsnapshot %(name)s failed. ' 'This temporary cgsnapshot can be deleted ' 'manually. ' 'Message: %(msg)s'), {'name': snap, 'msg': ex.kwargs['out']}) def _create_cg_from_cgsnap_use_workflow(self, flow_name, snap_build_tasks, store_spec, volumes, source_vols): work_flow = linear_flow.Flow(flow_name) work_flow.add(*snap_build_tasks) # Add tasks for each volumes in the consistency group lun_id_key_template = 'new_lun_id_%s' lun_data_key_template = 'vol_%s' volume_model_updates = [] for i, (volume, src_volume) in enumerate(zip(volumes, source_vols)): specs = self.get_volumetype_extraspecs(volume) provisioning, tiering = ( self._get_and_validate_extra_specs(specs)) pool_name = self.get_target_storagepool(volume, src_volume) base_lun_name = self._get_base_lun_name(src_volume) sub_store_spec = { 'volume': volume, 'base_lun_name': base_lun_name, 'pool_name': pool_name, 'dest_vol_name': volume['name'] + '_dest', 'volume_size': volume['size'], 'provisioning': provisioning, 'tiering': tiering, 'ignore_pool_full_threshold': self.ignore_pool_full_threshold, } work_flow.add( CreateSMPTask(name="CreateSMPTask%s" % i, inject=sub_store_spec), AttachSnapTask(name="AttachSnapTask%s" % i, inject=sub_store_spec), CreateDestLunTask(name="CreateDestLunTask%s" % i, provides=lun_data_key_template % i, inject=sub_store_spec), MigrateLunTask(name="MigrateLunTask%s" % i, provides=lun_id_key_template % i, inject=sub_store_spec, rebind={'lun_data': lun_data_key_template % i}, wait_for_completion=False)) volume_model_updates.append({'id': volume['id']}) volume_host = volume['host'] host = vol_utils.extract_host(volume_host, 'backend') host_and_pool = vol_utils.append_host(host, pool_name) if volume_host != host_and_pool: volume_model_updates[i]['host'] = host_and_pool work_flow.add(WaitMigrationsCompleteTask(lun_id_key_template, len(volumes)), CreateConsistencyGroupTask(lun_id_key_template, len(volumes))) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() for i, update in enumerate(volume_model_updates): new_lun_id = flow_engine.storage.fetch(lun_id_key_template % i) update['provider_location'] = ( self._build_provider_location(new_lun_id)) return volume_model_updates def get_target_storagepool(self, volume, source_volume=None): pool = vol_utils.extract_host(volume['host'], 'pool') # For new created volume that is not from snapshot or cloned # or the pool is the managed pool, # just use the pool selected by scheduler if not source_volume or pool in self.storage_pools: return pool # For volume created from snapshot or cloned from volume, the pool to # use depends on the source volume version. If the source volume is # created by older version of driver which doesn't support pool # scheduler, use the pool where the source volume locates. Otherwise, # use the pool selected by scheduler provider_location = source_volume.get('provider_location') if (not provider_location or not self._extract_provider_location(provider_location, 'version')): LOG.warning(_LW("The source volume is a legacy volume. " "Create volume in the pool where the source " "volume %s is created."), source_volume['name']) data = self._client.get_lun_by_name(source_volume['name'], [VNXLunProperties.LUN_POOL], poll=False) if data is None: msg = (_("Failed to find storage pool for source volume %s.") % source_volume['name']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) pool = data[VNXLunProperties.LUN_POOL.key] if self.storage_pools and pool not in self.storage_pools: msg = (_("The source volume %s is not in the pool which " "is managed by the current host.") % source_volume['name']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pool def update_volume_stats(self): """Retrieves stats info.""" self.update_enabler_in_volume_stats() if self.protocol == 'iSCSI': self.iscsi_targets = self._client.get_iscsi_targets( poll=False, io_ports=self.io_ports) properties = [VNXPoolProperties.POOL_FREE_CAPACITY, VNXPoolProperties.POOL_TOTAL_CAPACITY, VNXPoolProperties.POOL_STATE, VNXPoolProperties.POOL_SUBSCRIBED_CAPACITY, VNXPoolProperties.POOL_FULL_THRESHOLD] if '-FASTCache' in self.enablers: properties.append(VNXPoolProperties.POOL_FAST_CACHE) pool_list = self._client.get_pool_list(properties, False) if self.storage_pools: pool_list = filter(lambda a: a['pool_name'] in self.storage_pools, pool_list) pool_feature = (self._client.get_pool_feature_properties(poll=False) if self.check_max_pool_luns_threshold else None) self.stats['pools'] = [self._build_pool_stats(pool, pool_feature) for pool in pool_list] return self.stats def getEMCVnxCli(prtcl, configuration=None, active_backend_id=None): configuration.append_config_values(loc_opts) return EMCVnxCliBase(prtcl, configuration=configuration, active_backend_id=active_backend_id) class CreateSMPTask(task.Task): """Creates a snap mount point (SMP) for the source snapshot. Reversion strategy: Delete the SMP. """ def __init__(self, name=None, inject=None): super(CreateSMPTask, self).__init__(name=name, provides='new_smp_id', inject=inject) def execute(self, client, volume, base_lun_name, *args, **kwargs): LOG.debug('CreateSMPTask.execute') client.create_mount_point(base_lun_name, volume['name']) return client.get_lun_by_name(volume['name'])['lun_id'] def revert(self, result, client, volume, *args, **kwargs): LOG.debug('CreateSMPTask.revert') if isinstance(result, failure.Failure): return else: LOG.warning(_LW('CreateSMPTask.revert: delete mount point %s'), volume['name']) client.delete_lun(volume['name']) class AttachSnapTask(task.Task): """Attaches the snapshot to the SMP created before. Reversion strategy: Detach the SMP. """ def execute(self, client, volume, new_snap_name, *args, **kwargs): LOG.debug('AttachSnapTask.execute') client.attach_mount_point(volume['name'], new_snap_name) def revert(self, result, client, volume, *args, **kwargs): LOG.debug('AttachSnapTask.revert') if isinstance(result, failure.Failure): return else: LOG.warning(_LW('AttachSnapTask.revert: detach mount point %s'), volume['name']) try: client.detach_mount_point(volume['name']) except exception.EMCVnxCLICmdError as ex: with excutils.save_and_reraise_exception() as ctxt: is_not_smp_err = ( ex.kwargs["rc"] == 163 and VNXError.has_error("".join(ex.kwargs["out"]), VNXError.LUN_IS_NOT_SMP)) ctxt.reraise = not is_not_smp_err class CreateDestLunTask(task.Task): """Creates a destination lun for migration. Reversion strategy: Delete the temp destination lun. """ def __init__(self, name=None, provides='lun_data', inject=None): super(CreateDestLunTask, self).__init__(name=name, provides=provides, inject=inject) def execute(self, client, pool_name, dest_vol_name, volume_size, provisioning, tiering, ignore_pool_full_threshold, *args, **kwargs): LOG.debug('CreateDestLunTask.execute') data = client.create_lun_with_advance_feature( pool_name, dest_vol_name, volume_size, provisioning, tiering, ignore_thresholds=ignore_pool_full_threshold) return data def revert(self, result, client, dest_vol_name, *args, **kwargs): LOG.debug('CreateDestLunTask.revert') if isinstance(result, failure.Failure): return else: LOG.warning(_LW('CreateDestLunTask.revert: delete temp lun %s'), dest_vol_name) client.delete_lun(dest_vol_name) class MigrateLunTask(task.Task): """Starts a migration between the SMP and the temp lun. Reversion strategy: None """ def __init__(self, name=None, provides='new_lun_id', inject=None, rebind=None, wait_for_completion=True): super(MigrateLunTask, self).__init__(name=name, provides=provides, inject=inject, rebind=rebind) self.wait_for_completion = wait_for_completion def execute(self, client, new_smp_id, lun_data, rate=VNXMigrationRate.HIGH, *args, **kwargs): LOG.debug('MigrateLunTask.execute') dest_vol_lun_id = lun_data['lun_id'] LOG.debug('Migrating Mount Point Volume ID: %s', new_smp_id) if self.wait_for_completion: migrated = client.migrate_lun_with_verification(new_smp_id, dest_vol_lun_id, None, rate) else: migrated = client.migrate_lun_without_verification( new_smp_id, dest_vol_lun_id, None) if not migrated: msg = (_("Migrate volume failed between source vol %(src)s" " and dest vol %(dst)s.") % {'src': new_smp_id, 'dst': dest_vol_lun_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return new_smp_id def revert(self, *args, **kwargs): pass class CreateSnapshotTask(task.Task): """Creates a snapshot/cgsnapshot of a volume. Reversion Strategy: Delete the created snapshot/cgsnapshot. """ def execute(self, client, snapshot, source_lun_id, *args, **kwargs): LOG.debug('CreateSnapshotTask.execute') # Create temp Snapshot if snapshot['consistencygroup_id']: client.create_cgsnapshot(snapshot['consistencygroup_id'], snapshot['id']) else: snapshot_name = snapshot['name'] volume_name = snapshot['volume_name'] LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'), {'snapshot': snapshot_name, 'volume': volume_name}) client.create_snapshot(source_lun_id, snapshot_name) def revert(self, result, client, snapshot, *args, **kwargs): LOG.debug('CreateSnapshotTask.revert') if isinstance(result, failure.Failure): return else: if snapshot['consistencygroup_id']: LOG.warning(_LW('CreateSnapshotTask.revert: ' 'delete temp cgsnapshot %s'), snapshot['consistencygroup_id']) client.delete_cgsnapshot(snapshot['id']) else: LOG.warning(_LW('CreateSnapshotTask.revert: ' 'delete temp snapshot %s'), snapshot['name']) client.delete_snapshot(snapshot['name']) class CopySnapshotTask(task.Task): """Task to copy a volume snapshot/consistency group snapshot. Reversion Strategy: Delete the copied snapshot/cgsnapshot """ def execute(self, client, src_snap_name, new_snap_name, *args, **kwargs): LOG.debug('CopySnapshotTask.execute') client.copy_snapshot(src_snap_name, new_snap_name) def revert(self, result, client, src_snap_name, new_snap_name, *args, **kwargs): LOG.debug('CopySnapshotTask.revert') if isinstance(result, failure.Failure): return else: LOG.warning(_LW('CopySnapshotTask.revert: delete the ' 'copied snapshot %(new_name)s of ' '%(source_name)s.'), {'new_name': new_snap_name, 'source_name': src_snap_name}) client.delete_snapshot(new_snap_name) class AllowReadWriteOnSnapshotTask(task.Task): """Task to modify a Snapshot to allow ReadWrite on it.""" def execute(self, client, new_snap_name, *args, **kwargs): LOG.debug('AllowReadWriteOnSnapshotTask.execute') client.allow_snapshot_readwrite_and_autodelete(new_snap_name) class CreateConsistencyGroupTask(task.Task): """Task to create a consistency group.""" def __init__(self, lun_id_key_template, num_of_members): self.lun_id_keys = sorted(set( [lun_id_key_template % i for i in range(num_of_members)])) super(CreateConsistencyGroupTask, self).__init__( requires=self.lun_id_keys) def execute(self, client, group, *args, **kwargs): LOG.debug('CreateConsistencyGroupTask.execute') lun_ids = [kwargs[key] for key in self.lun_id_keys] client.create_consistencygroup(group['id'], lun_ids, poll=True) class WaitMigrationsCompleteTask(task.Task): """Task to wait migrations to be completed.""" def __init__(self, lun_id_key_template, num_of_members): self.lun_id_keys = sorted(set( [lun_id_key_template % i for i in range(num_of_members)])) super(WaitMigrationsCompleteTask, self).__init__( requires=self.lun_id_keys) def execute(self, client, *args, **kwargs): LOG.debug('WaitMigrationsCompleteTask.execute') lun_ids = [kwargs[key] for key in self.lun_id_keys] for lun_id in lun_ids: migrated = client.verify_lun_migration(lun_id) if not migrated: msg = _("Migrate volume %(src)s failed.") % {'src': lun_id} raise exception.VolumeBackendAPIException(data=msg) class MirrorView(object): """MirrorView synchronous/asynchronous operations. This class is to support operations for volume replication. Each operation should ensure commands are sent to correct targeting device. NOTE: currently, only synchronous is supported. """ SYNCHRONIZE_MODE = ['sync'] SYNCHRONIZED_STATE = 'Synchronized' CONSISTENT_STATE = 'Consistent' def __init__(self, client, secondary_client, mode='sync'): """Caller needs to initialize MirrorView via this method. :param client: client connecting to primary system :param secondary_client: client connecting to secondary system :param mode: only 'sync' is allowed """ self._client = client self._secondary_client = secondary_client if mode not in self.SYNCHRONIZE_MODE: msg = _('Invalid synchronize mode specified, allowed ' 'mode is %s.') % self.SYNCHRONIZE_MODE raise exception.VolumeBackendAPIException( data=msg) self.mode = '-sync' def create_mirror_workflow(self, mirror_name, lun_id, pool_name, lun_name, lun_size, provisioning, tiering): """Creates mirror view for LUN.""" store_spec = {'mirror': self} work_flow = self._get_create_mirror_flow( mirror_name, lun_id, pool_name, lun_name, lun_size, provisioning, tiering) flow_engine = taskflow.engines.load(work_flow, store=store_spec) flow_engine.run() def destroy_mirror_view(self, mirror_name, lun_name, mv=None): self.fracture_image(mirror_name, mv) self.remove_image(mirror_name, mv) self.destroy_mirror(mirror_name) self.delete_secondary_lun(lun_name) def _get_create_mirror_flow(self, mirror_name, lun_id, pool_name, lun_name, lun_size, provisioning, tiering): """Gets mirror create flow.""" flow_name = 'create_mirror_view' work_flow = linear_flow.Flow(flow_name) work_flow.add(MirrorCreateTask(mirror_name, lun_id), MirrorSecLunCreateTask(pool_name, lun_name, lun_size, provisioning, tiering), MirrorAddImageTask(mirror_name)) return work_flow def create_mirror(self, name, primary_lun_id, poll=False): command_create = ('mirror', '-sync', '-create', '-name', name, '-lun', primary_lun_id, '-usewriteintentlog', '-o') out, rc = self._client.command_execute(*command_create, poll=poll) if rc != 0 or out.strip(): if VNXError.has_error(out, VNXError.MIRROR_IN_USE): LOG.warning(_LW('MirrorView already created, mirror name ' '%(name)s. Message: %(msg)s'), {'name': name, 'msg': out}) else: self._client._raise_cli_error(cmd=command_create, rc=rc, out=out) return rc def create_secondary_lun(self, pool_name, lun_name, lun_size, provisioning, tiering, poll=False): """Creates secondary LUN in remote device.""" data = self._secondary_client.create_lun_with_advance_feature( pool=pool_name, name=lun_name, size=lun_size, provisioning=provisioning, tiering=tiering, consistencygroup_id=None, ignore_thresholds=False, poll=poll) return data['lun_id'] def delete_secondary_lun(self, lun_name): """Deletes secondary LUN in remote device.""" self._secondary_client.delete_lun(lun_name) def destroy_mirror(self, name, poll=False): command_destroy = ('mirror', '-sync', '-destroy', '-name', name, '-force', '-o') out, rc = self._client.command_execute(*command_destroy, poll=poll) if rc != 0 or out.strip(): if VNXError.has_error(out, VNXError.MIRROR_NOT_FOUND): LOG.warning(_LW('MirrorView %(name)s was already deleted. ' 'Message: %(msg)s'), {'name': name, 'msg': out}) else: self._client._raise_cli_error(cmd=command_destroy, rc=rc, out=out) return out, rc def add_image(self, name, secondary_lun_id, poll=False): """Adds secondary image to mirror.""" secondary_array_ip = self._secondary_client.active_storage_ip command_add = ('mirror', '-sync', '-addimage', '-name', name, '-arrayhost', secondary_array_ip, '-lun', secondary_lun_id, '-recoverypolicy', 'auto', '-syncrate', 'high') out, rc = self._client.command_execute(*command_add, poll=poll) if rc != 0: self._client._raise_cli_error(cmd=command_add, rc=rc, out=out) return out, rc def remove_image(self, name, mirror_view=None, poll=False): """Removes secondary image(s) from mirror.""" if not mirror_view: mirror_view = self.get_image(name, poll=True) image_uid = self._extract_image_uid(mirror_view, 'secondary') command_remove = ('mirror', '-sync', '-removeimage', '-name', name, '-imageuid', image_uid, '-o') out, rc = self._client.command_execute(*command_remove, poll=poll) if rc != 0: self._client._raise_cli_error(cmd=command_remove, rc=rc, out=out) return out, rc def get_image(self, name, use_secondary=False, poll=False): """Returns mirror view properties. :param name: mirror view name :param use_secondary: get image info from secodnary or not :return: dict of mirror view properties as below: { 'MirrorView Name': 'mirror name', 'MirrorView Description': 'some desciption here', ..., 'images': [ { 'Image UID': '50:06:01:60:88:60:08:0F', 'Is Image Primary': 'YES', ... 'Preferred SP': 'A' }, { 'Image UID': '50:06:01:60:88:60:03:BA', 'Is Image Primary': 'NO', ..., 'Synchronizing Progress(%)': 100 } ] } """ if use_secondary: client = self._secondary_client else: client = self._client command_get = ('mirror', '-sync', '-list', '-name', name) out, rc = client.command_execute( *command_get, poll=poll) if rc != 0: if VNXError.has_error(out, VNXError.MIRROR_NOT_FOUND): LOG.warning(_LW('Getting MirrorView %(name)s failed.' ' Message: %(msg)s.'), {'name': name, 'msg': out}) return None else: client._raise_cli_error(cmd=command_get, rc=rc, out=out) mvs = {} mvs_info, images_info = re.split('Images:\s*', out) for line in mvs_info.strip('\n').split('\n'): k, v = re.split(':\s*', line, 1) mvs[k] = v if not v or re.search('\D', v) else int(v) mvs['images'] = [] for image_raw in re.split('\n\n+', images_info.strip('\n')): image = {} for line in image_raw.split('\n'): k, v = re.split(':\s*', line, 1) image[k] = v if not v or re.search('\D', v) else int(v) mvs['images'].append(image) return mvs def fracture_image(self, name, mirror_view=None, poll=False): """Stops the synchronization between LUNs.""" if not mirror_view: mirror_view = self.get_image(name, poll=True) image_uid = self._extract_image_uid(mirror_view, 'secondary') command_fracture = ('mirror', '-sync', '-fractureimage', '-name', name, '-imageuid', image_uid, '-o') out, rc = self._client.command_execute(*command_fracture, poll=poll) if rc != 0: self._client._raise_cli_error(cmd=command_fracture, rc=rc, out=out) return out, rc def sync_image(self, name, mirror_view=None, poll=False): """Synchronizes the secondary image and wait for completion.""" if not mirror_view: mirror_view = self.get_image(name, poll=True) image_state = mirror_view['images'][1]['Image State'] if image_state == self.SYNCHRONIZED_STATE: LOG.debug('replication %(name)s is already in %(state)s.', {'name': name, 'state': image_state}) return "", 0 image_uid = self._extract_image_uid(mirror_view, 'secondary') command_sync = ('mirror', '-sync', '-syncimage', '-name', name, '-imageuid', image_uid, '-o') out, rc = self._client.command_execute(*command_sync, poll=poll) if rc != 0: self._client._raise_cli_error(cmd=command_sync, rc=rc, out=out) def inner(): tmp_mirror = self.get_image(name) for image in tmp_mirror['images']: if 'Image State' in image: # Only secondary image contains this field return (image['Image State'] == self.SYNCHRONIZED_STATE and image['Synchronizing Progress(%)'] == 100) self._client._wait_for_a_condition(inner) return out, rc def promote_image(self, name, mirror_view=None, poll=False): """Promotes the secondary image on secondary system. NOTE: Only when "Image State" in 'Consistent' or 'Synchnonized' can be promoted. """ if not mirror_view: mirror_view = self.get_image(name, use_secondary=True, poll=True) image_uid = self._extract_image_uid(mirror_view, 'secondary') command_promote = ('mirror', '-sync', '-promoteimage', '-name', name, '-imageuid', image_uid, '-o') out, rc = self._secondary_client.command_execute( *command_promote, poll=poll) if rc != 0: raise self._client._raise_cli_error(command_promote, rc, out) return out, rc def _extract_image_uid(self, mirror_view, image_type='primary'): """Returns primary or secondary image uid from mirror objects. :param mirror_view: parsed mirror view. :param image_type: 'primary' or 'secondary'. """ images = mirror_view['images'] for image in images: is_primary = image['Is Image Primary'] if image_type == 'primary' and is_primary == 'YES': image_uid = image['Image UID'] break if image_type == 'secondary' and is_primary == 'NO': image_uid = image['Image UID'] break return image_uid class MirrorCreateTask(task.Task): """Creates a MirrorView with primary lun for replication. Reversion strategy: Destroy the created MirrorView. """ def __init__(self, mirror_name, primary_lun_id, **kwargs): super(MirrorCreateTask, self).__init__() self.mirror_name = mirror_name self.primary_lun_id = primary_lun_id def execute(self, mirror, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) mirror.create_mirror(self.mirror_name, self.primary_lun_id, poll=True) def revert(self, result, mirror, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.debug(method_name) if isinstance(result, failure.Failure): return else: LOG.warning(_LW('%(method)s: destroying mirror ' 'view %(name)s.'), {'method': method_name, 'name': self.mirror_name}) mirror.destroy_mirror(self.mirror_name, poll=True) class MirrorSecLunCreateTask(task.Task): """Creates a secondary LUN on secondary system. Reversion strategy: Delete secondary LUN. """ def __init__(self, pool_name, lun_name, lun_size, provisioning, tiering): super(MirrorSecLunCreateTask, self).__init__(provides='sec_lun_id') self.pool_name = pool_name self.lun_name = lun_name self.lun_size = lun_size self.provisioning = provisioning self.tiering = tiering def execute(self, mirror, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) sec_lun_id = mirror.create_secondary_lun( self.pool_name, self.lun_name, self.lun_size, self.provisioning, self.tiering) return sec_lun_id def revert(self, result, mirror, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.debug(method_name) if isinstance(result, failure.Failure): return else: LOG.warning(_LW('%(method)s: destroying secondary LUN ' '%(name)s.'), {'method': method_name, 'name': self.lun_name}) mirror.delete_secondary_lun(self.lun_name) class MirrorAddImageTask(task.Task): """Add the secondary image to MirrorView. Reversion strategy: Remove the secondary image. """ def __init__(self, mirror_name): super(MirrorAddImageTask, self).__init__() self.mirror_name = mirror_name def execute(self, mirror, sec_lun_id, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) mirror.add_image(self.mirror_name, sec_lun_id, poll=True) def revert(self, result, mirror, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.debug(method_name) if isinstance(result, failure.Failure): return else: LOG.warning(_LW('%(method)s: removing secondary image ' 'from %(name)s.'), {'method': method_name, 'name': self.mirror_name}) mirror.remove_image(self.mirror_name, poll=True) cinder-8.0.0/cinder/volume/drivers/emc/emc_cli_fc.py0000664000567000056710000002776112701406250023550 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fibre Channel Driver for EMC VNX array based on CLI.""" from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.emc import emc_vnx_cli from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) class EMCCLIFCDriver(driver.FibreChannelDriver): """EMC FC Driver for VNX using CLI. Version history: 1.0.0 - Initial driver 2.0.0 - Thick/thin provisioning, robust enhancement 3.0.0 - Array-based Backend Support, FC Basic Support, Target Port Selection for MPIO, Initiator Auto Registration, Storage Group Auto Deletion, Multiple Authentication Type Support, Storage-Assisted Volume Migration, SP Toggle for HA 3.0.1 - Security File Support 4.0.0 - Advance LUN Features (Compression Support, Deduplication Support, FAST VP Support, FAST Cache Support), Storage-assisted Retype, External Volume Management, Read-only Volume, FC Auto Zoning 4.1.0 - Consistency group support 5.0.0 - Performance enhancement, LUN Number Threshold Support, Initiator Auto Deregistration, Force Deleting LUN in Storage Groups, robust enhancement 5.1.0 - iSCSI multipath enhancement 5.2.0 - Pool-aware scheduler support 5.3.0 - Consistency group modification support 6.0.0 - Over subscription support Create consistency group from cgsnapshot support Multiple pools support enhancement Manage/unmanage volume revise White list target ports support Snap copy support Support efficient non-disruptive backup 7.0.0 - Clone consistency group support Replication v2 support(managed) Configurable migration rate support """ def __init__(self, *args, **kwargs): super(EMCCLIFCDriver, self).__init__(*args, **kwargs) self.cli = emc_vnx_cli.getEMCVnxCli( 'FC', configuration=self.configuration, active_backend_id=kwargs.get('active_backend_id')) self.VERSION = self.cli.VERSION def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a volume.""" return self.cli.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" return self.cli.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" return self.cli.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" self.cli.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" self.cli.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate volume via EMC migration functionality.""" return self.cli.migrate_volume(ctxt, volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.cli.retype(ctxt, volume, new_type, diff, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.cli.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.cli.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass @zm_utils.AddFCZone def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. The initiator_target_map is a map that represents the remote wwn(s) and a list of wwns which are visible to the remote wwn(s). Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'initiator_target_map': { '1122334455667788': ['1234567890123'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'initiator_target_map': { '1122334455667788': ['1234567890123', '0987654321321'] } } } """ conn_info = self.cli.initialize_connection(volume, connector) LOG.debug("Exit initialize_connection" " - Returning FC connection info: %(conn_info)s.", {'conn_info': conn_info}) return conn_info @zm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" conn_info = self.cli.terminate_connection(volume, connector) LOG.debug("Exit terminate_connection" " - Returning FC connection info: %(conn_info)s.", {'conn_info': conn_info}) return conn_info def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self.update_volume_stats() return self._stats def update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = self.cli.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver' data['storage_protocol'] = 'FC' self._stats = data def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. manage_existing_ref:{ 'source-id': } or manage_existing_ref:{ 'source-name': } """ return self.cli.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.cli.manage_existing_get_size(volume, existing_ref) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" return self.cli.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return self.cli.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" return self.cli.create_cgsnapshot( context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" return self.cli.delete_cgsnapshot( context, cgsnapshot, snapshots) def get_pool(self, volume): """Returns the pool name of a volume.""" return self.cli.get_pool(volume) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Updates LUNs in consistency group.""" return self.cli.update_consistencygroup(context, group, add_volumes, remove_volumes) def unmanage(self, volume): """Unmanages a volume.""" return self.cli.unmanage(volume) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from source.""" return self.cli.create_consistencygroup_from_src(context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) def update_migrated_volume(self, context, volume, new_volume, original_volume_status=None): """Returns model update for migrated volume.""" return self.cli.update_migrated_volume(context, volume, new_volume, original_volume_status) def create_export_snapshot(self, context, snapshot, connector): """Creates a snapshot mount point for snapshot.""" return self.cli.create_export_snapshot(context, snapshot, connector) def remove_export_snapshot(self, context, snapshot): """Removes snapshot mount point for snapshot.""" return self.cli.remove_export_snapshot(context, snapshot) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allows connection to snapshot.""" return self.cli.initialize_connection_snapshot(snapshot, connector, **kwargs) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallows connection to snapshot.""" return self.cli.terminate_connection_snapshot(snapshot, connector, **kwargs) def backup_use_temp_snapshot(self): return True def failover_host(self, context, volumes, secondary_backend_id): """Failovers volume from primary device to secondary.""" return self.cli.failover_host(context, volumes, secondary_backend_id) cinder-8.0.0/cinder/volume/drivers/emc/xtremio.py0000664000567000056710000012172312701406250023165 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for EMC XtremIO Storage. supported XtremIO version 2.4 and up 1.0.0 - initial release 1.0.1 - enable volume extend 1.0.2 - added FC support, improved error handling 1.0.3 - update logging level, add translation 1.0.4 - support for FC zones 1.0.5 - add support for XtremIO 4.0 1.0.6 - add support for iSCSI multipath, CA validation, consistency groups, R/O snapshots, CHAP discovery authentication 1.0.7 - cache glance images on the array """ import json import math import random import requests import string from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF DEFAULT_PROVISIONING_FACTOR = 20.0 XTREMIO_OPTS = [ cfg.StrOpt('xtremio_cluster_name', default='', help='XMS cluster id in multi-cluster environment'), cfg.IntOpt('xtremio_array_busy_retry_count', default=5, help='Number of retries in case array is busy'), cfg.IntOpt('xtremio_array_busy_retry_interval', default=5, help='Interval between retries in case array is busy'), cfg.IntOpt('xtremio_volumes_per_glance_cache', default=100, help='Number of volumes created from each cached glance image')] CONF.register_opts(XTREMIO_OPTS) RANDOM = random.Random() OBJ_NOT_FOUND_ERR = 'obj_not_found' VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique' VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found' ALREADY_MAPPED_ERR = 'already_mapped' SYSTEM_BUSY = 'system_is_busy' TOO_MANY_OBJECTS = 'too_many_objs' TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol' XTREMIO_OID_NAME = 1 XTREMIO_OID_INDEX = 2 class XtremIOClient(object): def __init__(self, configuration, cluster_id): self.configuration = configuration self.cluster_id = cluster_id self.verify = (self.configuration. safe_get('driver_ssl_cert_verify') or False) if self.verify: verify_path = (self.configuration. safe_get('driver_ssl_cert_path') or None) if verify_path: self.verify = verify_path def get_base_url(self, ver): if ver == 'v1': return 'https://%s/api/json/types' % self.configuration.san_ip elif ver == 'v2': return 'https://%s/api/json/v2/types' % self.configuration.san_ip @utils.retry(exception.XtremIOArrayBusy, CONF.xtremio_array_busy_retry_count, CONF.xtremio_array_busy_retry_interval, 1) def req(self, object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if not data: data = {} if name and idx: msg = _("can't handle both name and index in req") LOG.error(msg) raise exception.VolumeDriverException(message=msg) url = '%s/%s' % (self.get_base_url(ver), object_type) params = {} key = None if name: params['name'] = name key = name elif idx: url = '%s/%d' % (url, idx) key = str(idx) if method in ('GET', 'DELETE'): params.update(data) self.update_url(params, self.cluster_id) if method != 'GET': self.update_data(data, self.cluster_id) LOG.debug('data: %s', data) LOG.debug('%(type)s %(url)s', {'type': method, 'url': url}) try: response = requests.request(method, url, params=params, data=json.dumps(data), verify=self.verify, auth=(self.configuration.san_login, self.configuration.san_password)) except requests.exceptions.RequestException as exc: msg = (_('Exception: %s') % six.text_type(exc)) raise exception.VolumeDriverException(message=msg) if 200 <= response.status_code < 300: if method in ('GET', 'POST'): return response.json() else: return '' self.handle_errors(response, key, object_type) def handle_errors(self, response, key, object_type): if response.status_code == 400: error = response.json() err_msg = error.get('message') if err_msg.endswith(OBJ_NOT_FOUND_ERR): LOG.warning(_LW("object %(key)s of " "type %(typ)s not found, %(err_msg)s"), {'key': key, 'typ': object_type, 'err_msg': err_msg, }) raise exception.NotFound() elif err_msg == VOL_NOT_UNIQUE_ERR: LOG.error(_LE("can't create 2 volumes with the same name, %s"), err_msg) msg = (_('Volume by this name already exists')) raise exception.VolumeBackendAPIException(data=msg) elif err_msg == VOL_OBJ_NOT_FOUND_ERR: LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"), {'key': key, 'msg': err_msg, }) raise exception.VolumeNotFound(volume_id=key) elif ALREADY_MAPPED_ERR in err_msg: raise exception.XtremIOAlreadyMappedError() elif err_msg == SYSTEM_BUSY: raise exception.XtremIOArrayBusy() elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL): raise exception.XtremIOSnapshotsLimitExceeded() msg = _('Bad response from XMS, %s') % response.text LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) def update_url(self, data, cluster_id): return def update_data(self, data, cluster_id): return def get_cluster(self): return self.req('clusters', idx=1)['content'] def create_snapshot(self, src, dest, ro=False): """Create a snapshot of a volume on the array. XtreamIO array snapshots are also volumes. :src: name of the source volume to be cloned :dest: name for the new snapshot :ro: new snapshot type ro/regular. only applicable to Client4 """ raise NotImplementedError() def get_extra_capabilities(self): return {} def get_initiator(self, port_address): raise NotImplementedError() def add_vol_to_cg(self, vol_id, cg_id): pass class XtremIOClient3(XtremIOClient): def __init__(self, configuration, cluster_id): super(XtremIOClient3, self).__init__(configuration, cluster_id) self._portals = [] def find_lunmap(self, ig_name, vol_name): try: lun_mappings = self.req('lun-maps')['lun-maps'] except exception.NotFound: raise (exception.VolumeDriverException (_("can't find lun-map, ig:%(ig)s vol:%(vol)s") % {'ig': ig_name, 'vol': vol_name})) for lm_link in lun_mappings: idx = lm_link['href'].split('/')[-1] # NOTE(geguileo): There can be races so mapped elements retrieved # in the listing may no longer exist. try: lm = self.req('lun-maps', idx=int(idx))['content'] except exception.NotFound: continue if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name: return lm return None def num_of_mapped_volumes(self, initiator): cnt = 0 for lm_link in self.req('lun-maps')['lun-maps']: idx = lm_link['href'].split('/')[-1] # NOTE(geguileo): There can be races so mapped elements retrieved # in the listing may no longer exist. try: lm = self.req('lun-maps', idx=int(idx))['content'] except exception.NotFound: continue if lm['ig-name'] == initiator: cnt += 1 return cnt def get_iscsi_portals(self): if self._portals: return self._portals iscsi_portals = [t['name'] for t in self.req('iscsi-portals') ['iscsi-portals']] for portal_name in iscsi_portals: try: self._portals.append(self.req('iscsi-portals', name=portal_name)['content']) except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("iscsi portal, %s, not found") % portal_name)) return self._portals def create_snapshot(self, src, dest, ro=False): data = {'snap-vol-name': dest, 'ancestor-vol-id': src} self.req('snapshots', 'POST', data) def get_initiator(self, port_address): try: return self.req('initiators', 'GET', name=port_address)['content'] except exception.NotFound: pass class XtremIOClient4(XtremIOClient): def __init__(self, configuration, cluster_id): super(XtremIOClient4, self).__init__(configuration, cluster_id) self._cluster_name = None def req(self, object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v2'): return super(XtremIOClient4, self).req(object_type, method, data, name, idx, ver) def get_extra_capabilities(self): return {'consistencygroup_support': True} def find_lunmap(self, ig_name, vol_name): try: return (self.req('lun-maps', data={'full': 1, 'filter': ['vol-name:eq:%s' % vol_name, 'ig-name:eq:%s' % ig_name]}) ['lun-maps'][0]) except (KeyError, IndexError): raise exception.VolumeNotFound(volume_id=vol_name) def num_of_mapped_volumes(self, initiator): return len(self.req('lun-maps', data={'filter': 'ig-name:eq:%s' % initiator}) ['lun-maps']) def update_url(self, data, cluster_id): if cluster_id: data['cluster-name'] = cluster_id def update_data(self, data, cluster_id): if cluster_id: data['cluster-id'] = cluster_id def get_iscsi_portals(self): return self.req('iscsi-portals', data={'full': 1})['iscsi-portals'] def get_cluster(self): if not self.cluster_id: self.cluster_id = self.req('clusters')['clusters'][0]['name'] return self.req('clusters', name=self.cluster_id)['content'] def create_snapshot(self, src, dest, ro=False): data = {'snapshot-set-name': dest, 'snap-suffix': dest, 'volume-list': [src], 'snapshot-type': 'readonly' if ro else 'regular'} res = self.req('snapshots', 'POST', data, ver='v2') typ, idx = res['links'][0]['href'].split('/')[-2:] # rename the snapshot data = {'name': dest} try: self.req(typ, 'PUT', data, idx=int(idx)) except exception.VolumeBackendAPIException: # reverting msg = _LE('Failed to rename the created snapshot, reverting.') LOG.error(msg) self.req(typ, 'DELETE', idx=int(idx)) raise def add_vol_to_cg(self, vol_id, cg_id): add_data = {'vol-id': vol_id, 'cg-id': cg_id} self.req('consistency-group-volumes', 'POST', add_data, ver='v2') def get_initiator(self, port_address): inits = self.req('initiators', data={'filter': 'port-address:eq:' + port_address, 'full': 1})['initiators'] if len(inits) == 1: return inits[0] else: pass class XtremIOVolumeDriver(san.SanDriver): """Executes commands relating to Volumes.""" VERSION = '1.0.7' driver_name = 'XtremIO' MIN_XMS_VERSION = [3, 0, 0] def __init__(self, *args, **kwargs): super(XtremIOVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(XTREMIO_OPTS) self.protocol = None self.backend_name = (self.configuration.safe_get('volume_backend_name') or self.driver_name) self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name') or '') self.provisioning_factor = (self.configuration. safe_get('max_over_subscription_ratio') or DEFAULT_PROVISIONING_FACTOR) self._stats = {} self.client = XtremIOClient3(self.configuration, self.cluster_id) def _obj_from_result(self, res): typ, idx = res['links'][0]['href'].split('/')[-2:] return self.client.req(typ, idx=int(idx))['content'] def check_for_setup_error(self): try: name = self.client.req('clusters')['clusters'][0]['name'] cluster = self.client.req('clusters', name=name)['content'] version_text = cluster['sys-sw-version'] except exception.NotFound: msg = _("XtremIO not initialized correctly, no clusters found") raise (exception.VolumeBackendAPIException (data=msg)) ver = [int(n) for n in version_text.split('-')[0].split('.')] if ver < self.MIN_XMS_VERSION: msg = (_('Invalid XtremIO version %(cur)s,' ' version %(min)s or up is required') % {'min': self.MIN_XMS_VERSION, 'cur': ver}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI('XtremIO SW version %s'), version_text) if ver[0] >= 4: self.client = XtremIOClient4(self.configuration, self.cluster_id) def create_volume(self, volume): "Creates a volume" data = {'vol-name': volume['id'], 'vol-size': str(volume['size']) + 'g' } self.client.req('volumes', 'POST', data) if volume.get('consistencygroup_id'): self.client.add_vol_to_cg(volume['id'], volume['consistencygroup_id']) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" if snapshot.get('cgsnapshot_id'): # get array snapshot id from CG snapshot snap_by_anc = self._get_snapset_ancestors(snapshot.cgsnapshot) snapshot_id = snap_by_anc[snapshot['volume_id']] else: snapshot_id = snapshot['id'] self.client.create_snapshot(snapshot_id, volume['id']) # add new volume to consistency group if (volume.get('consistencygroup_id') and self.client is XtremIOClient4): self.client.add_vol_to_cg(volume['id'], snapshot['consistencygroup_id']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" vol = self.client.req('volumes', name=src_vref['id'])['content'] ctxt = context.get_admin_context() cache = self.db.image_volume_cache_get_by_volume_id(ctxt, src_vref['id']) limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache') if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']: raise exception.CinderException('Exceeded the configured limit of ' '%d snapshots per volume' % limit) try: self.client.create_snapshot(src_vref['id'], volume['id']) except exception.XtremIOSnapshotsLimitExceeded as e: raise exception.CinderException(e.message) if volume.get('consistencygroup_id') and self.client is XtremIOClient4: self.client.add_vol_to_cg(volume['id'], volume['consistencygroup_id']) def delete_volume(self, volume): """Deletes a volume.""" try: self.client.req('volumes', 'DELETE', name=volume['id']) except exception.NotFound: LOG.info(_LI("volume %s doesn't exist"), volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.client.create_snapshot(snapshot.volume_id, snapshot.id, True) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" try: self.client.req('volumes', 'DELETE', name=snapshot.id) except exception.NotFound: LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id) def _update_volume_stats(self): sys = self.client.get_cluster() physical_space = int(sys["ud-ssd-space"]) / units.Mi used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi free_physical = physical_space - used_physical_space actual_prov = int(sys["vol-size"]) / units.Mi self._stats = {'volume_backend_name': self.backend_name, 'vendor_name': 'EMC', 'driver_version': self.VERSION, 'storage_protocol': self.protocol, 'total_capacity_gb': physical_space, 'free_capacity_gb': (free_physical * self.provisioning_factor), 'provisioned_capacity_gb': actual_prov, 'max_over_subscription_ratio': self.provisioning_factor, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': True, } self._stats.update(self.client.get_extra_capabilities()) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def manage_existing(self, volume, existing_ref): """Manages an existing LV.""" lv_name = existing_ref['source-name'] # Attempt to locate the volume. try: vol_obj = self.client.req('volumes', name=lv_name)['content'] except exception.NotFound: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical volume does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) # Attempt to rename the LV to match the OpenStack internal name. self.client.req('volumes', 'PUT', data={'vol-name': volume['id']}, idx=vol_obj['index']) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing LV for manage_existing.""" # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lv_name = existing_ref['source-name'] # Attempt to locate the volume. try: vol_obj = self.client.req('volumes', name=lv_name)['content'] except exception.NotFound: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical volume does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. lv_size = int(math.ceil(int(vol_obj['vol-size']) / units.Mi)) return lv_size def unmanage(self, volume): """Removes the specified volume from Cinder management.""" # trying to rename the volume to [cinder name]-unmanged try: self.client.req('volumes', 'PUT', name=volume['id'], data={'vol-name': volume['name'] + '-unmanged'}) except exception.NotFound: LOG.info(_LI("Volume with the name %s wasn't found," " can't unmanage"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" data = {'vol-size': six.text_type(new_size) + 'g'} try: self.client.req('volumes', 'PUT', data, name=volume['id']) except exception.NotFound: msg = _("can't find the volume to extend") raise exception.VolumeDriverException(message=msg) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector""" tg = self.client.req('target-groups', name='Default')['content'] vol = self.client.req('volumes', name=volume['id'])['content'] for ig_idx in self._get_ig_indexes_from_initiators(connector): lm_name = '%s_%s_%s' % (six.text_type(vol['index']), six.text_type(ig_idx), six.text_type(tg['index'])) LOG.debug('Removing lun map %s.', lm_name) try: self.client.req('lun-maps', 'DELETE', name=lm_name) except exception.NotFound: LOG.warning(_LW("terminate_connection: lun map not found")) def _get_password(self): return ''.join(RANDOM.choice (string.ascii_uppercase + string.digits) for _ in range(12)) def create_lun_map(self, volume, ig, lun_num=None): try: data = {'ig-id': ig, 'vol-id': volume['id']} if lun_num: data['lun'] = lun_num res = self.client.req('lun-maps', 'POST', data) lunmap = self._obj_from_result(res) LOG.info(_LI('Created lun-map:\n%s'), lunmap) except exception.XtremIOAlreadyMappedError: LOG.info(_LI('Volume already mapped, retrieving %(ig)s, %(vol)s'), {'ig': ig, 'vol': volume['id']}) lunmap = self.client.find_lunmap(ig, volume['id']) return lunmap def _get_ig_name(self, connector): raise NotImplementedError() def _get_ig_indexes_from_initiators(self, connector): initiator_names = self._get_initiator_names(connector) ig_indexes = set() for initiator_name in initiator_names: initiator = self.client.get_initiator(initiator_name) ig_indexes.add(initiator['ig-id'][XTREMIO_OID_INDEX]) return list(ig_indexes) def _get_initiator_names(self, connector): raise NotImplementedError() def create_consistencygroup(self, context, group): """Creates a consistency group. :param context: the context :param group: the group object to be created :returns: dict -- modelUpdate = {'status': 'available'} :raises: VolumeBackendAPIException """ create_data = {'consistency-group-name': group['id']} self.client.req('consistency-groups', 'POST', data=create_data, ver='v2') return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" self.client.req('consistency-groups', 'DELETE', name=group['id'], ver='v2') volumes = self.db.volume_get_all_by_group(context, group['id']) for volume in volumes: self.delete_volume(volume) volume.status = 'deleted' model_update = {'status': group['status']} return model_update, volumes def _get_snapset_ancestors(self, snapset_name): snapset = self.client.req('snapshot-sets', name=snapset_name)['content'] volume_ids = [s[XTREMIO_OID_INDEX] for s in snapset['vol-list']] return {v['ancestor-vol-id'][XTREMIO_OID_NAME]: v['name'] for v in self.client.req('volumes', data={'full': 1, 'props': 'ancestor-vol-id'})['volumes'] if v['index'] in volume_ids} def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns model_update, volumes_model_update """ if not (cgsnapshot and snapshots and not source_cg or source_cg and source_vols and not cgsnapshot): msg = _("create_consistencygroup_from_src only supports a " "cgsnapshot source or a consistency group source. " "Multiple sources cannot be used.") raise exception.InvalidInput(msg) if cgsnapshot: snap_name = self._get_cgsnap_name(cgsnapshot) snap_by_anc = self._get_snapset_ancestors(snap_name) for volume, snapshot in zip(volumes, snapshots): real_snap = snap_by_anc[snapshot['volume_id']] self.create_volume_from_snapshot(volume, {'id': real_snap}) elif source_cg: data = {'consistency-group-id': source_cg['id'], 'snapshot-set-name': group['id']} self.client.req('snapshots', 'POST', data, ver='v2') snap_by_anc = self._get_snapset_ancestors(group['id']) for volume, src_vol in zip(volumes, source_vols): snap_vol_name = snap_by_anc[src_vol['id']] self.client.req('volumes', 'PUT', {'name': volume['id']}, name=snap_vol_name) create_data = {'consistency-group-name': group['id'], 'vol-list': [v['id'] for v in volumes]} self.client.req('consistency-groups', 'POST', data=create_data, ver='v2') return None, None def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ add_volumes = add_volumes if add_volumes else [] remove_volumes = remove_volumes if remove_volumes else [] for vol in add_volumes: add_data = {'vol-id': vol['id'], 'cg-id': group['id']} self.client.req('consistency-group-volumes', 'POST', add_data, ver='v2') for vol in remove_volumes: remove_data = {'vol-id': vol['id'], 'cg-id': group['id']} self.client.req('consistency-group-volumes', 'DELETE', remove_data, name=group['id'], ver='v2') return None, None, None def _get_cgsnap_name(self, cgsnapshot): return '%(cg)s%(snap)s' % {'cg': cgsnapshot['consistencygroup_id'] .replace('-', ''), 'snap': cgsnapshot['id'].replace('-', '')} def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" data = {'consistency-group-id': cgsnapshot['consistencygroup_id'], 'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)} self.client.req('snapshots', 'POST', data, ver='v2') snapshots = objects.SnapshotList().get_all_for_cgsnapshot( context, cgsnapshot['id']) for snapshot in snapshots: snapshot.status = 'available' model_update = {'status': 'available'} return model_update, snapshots def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" self.client.req('snapshot-sets', 'DELETE', name=self._get_cgsnap_name(cgsnapshot), ver='v2') snapshots = objects.SnapshotList().get_all_for_cgsnapshot( context, cgsnapshot['id']) for snapshot in snapshots: snapshot.status = 'deleted' model_update = {'status': cgsnapshot.status} return model_update, snapshots def _get_ig(self, name): try: return self.client.req('initiator-groups', 'GET', name=name)['content'] except exception.NotFound: pass def _create_ig(self, name): # create an initiator group to hold the initiator data = {'ig-name': name} self.client.req('initiator-groups', 'POST', data) try: return self.client.req('initiator-groups', name=name)['content'] except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("Failed to create IG, %s") % name)) class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ driver_name = 'XtremIO_ISCSI' def __init__(self, *args, **kwargs): super(XtremIOISCSIDriver, self).__init__(*args, **kwargs) self.protocol = 'iSCSI' def _add_auth(self, data, login_chap, discovery_chap): login_passwd, discovery_passwd = None, None if login_chap: data['initiator-authentication-user-name'] = 'chap_user' login_passwd = self._get_password() data['initiator-authentication-password'] = login_passwd if discovery_chap: data['initiator-discovery-user-name'] = 'chap_user' discovery_passwd = self._get_password() data['initiator-discovery-password'] = discovery_passwd return login_passwd, discovery_passwd def _create_initiator(self, connector, login_chap, discovery_chap): initiator = self._get_initiator_names(connector)[0] # create an initiator data = {'initiator-name': initiator, 'ig-id': initiator, 'port-address': initiator} l, d = self._add_auth(data, login_chap, discovery_chap) self.client.req('initiators', 'POST', data) return l, d def initialize_connection(self, volume, connector): try: sys = self.client.get_cluster() except exception.NotFound: msg = _("XtremIO not initialized correctly, no clusters found") raise exception.VolumeBackendAPIException(data=msg) login_chap = (sys.get('chap-authentication-mode', 'disabled') != 'disabled') discovery_chap = (sys.get('chap-discovery-mode', 'disabled') != 'disabled') initiator_name = self._get_initiator_names(connector)[0] initiator = self.client.get_initiator(initiator_name) if initiator: login_passwd = initiator['chap-authentication-initiator-password'] discovery_passwd = initiator['chap-discovery-initiator-password'] ig = self._get_ig(initiator['ig-id'][XTREMIO_OID_NAME]) else: ig = self._get_ig(self._get_ig_name(connector)) if not ig: ig = self._create_ig(self._get_ig_name(connector)) (login_passwd, discovery_passwd) = self._create_initiator(connector, login_chap, discovery_chap) # if CHAP was enabled after the the initiator was created if login_chap and not login_passwd: LOG.info(_LI('initiator has no password while using chap,' 'adding it')) data = {} (login_passwd, d_passwd) = self._add_auth(data, login_chap, discovery_chap and not discovery_passwd) discovery_passwd = (discovery_passwd if discovery_passwd else d_passwd) self.client.req('initiators', 'PUT', data, idx=initiator['index']) # lun mappping lunmap = self.create_lun_map(volume, ig['ig-id'][XTREMIO_OID_NAME]) properties = self._get_iscsi_properties(lunmap) if login_chap: properties['auth_method'] = 'CHAP' properties['auth_username'] = 'chap_user' properties['auth_password'] = login_passwd if discovery_chap: properties['discovery_auth_method'] = 'CHAP' properties['discovery_auth_username'] = 'chap_user' properties['discovery_auth_password'] = discovery_passwd LOG.debug('init conn params:\n%s', properties) return { 'driver_volume_type': 'iscsi', 'data': properties } def _get_iscsi_properties(self, lunmap): """Gets iscsi configuration. :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the id of the volume (currently used by xen) :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. multiple connection return :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. """ portals = self.client.get_iscsi_portals() if not portals: msg = _("XtremIO not configured correctly, no iscsi portals found") LOG.error(msg) raise exception.VolumeDriverException(message=msg) portal = RANDOM.choice(portals) portal_addr = ('%(ip)s:%(port)d' % {'ip': portal['ip-addr'].split('/')[0], 'port': portal['ip-port']}) tg_portals = ['%(ip)s:%(port)d' % {'ip': p['ip-addr'].split('/')[0], 'port': p['ip-port']} for p in portals] properties = {'target_discovered': False, 'target_iqn': portal['port-address'], 'target_lun': lunmap['lun'], 'target_portal': portal_addr, 'target_iqns': [p['port-address'] for p in portals], 'target_portals': tg_portals, 'target_luns': [lunmap['lun']] * len(portals)} return properties def _get_initiator_names(self, connector): return [connector['initiator']] def _get_ig_name(self, connector): return connector['initiator'] class XtremIOFibreChannelDriver(XtremIOVolumeDriver, driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(XtremIOFibreChannelDriver, self).__init__(*args, **kwargs) self.protocol = 'FC' self._targets = None def get_targets(self): if not self._targets: try: target_list = self.client.req('targets')["targets"] targets = [self.client.req('targets', name=target['name'])['content'] for target in target_list if '-fc' in target['name']] self._targets = [target['port-address'].replace(':', '') for target in targets if target['port-state'] == 'up'] except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("Failed to get targets"))) return self._targets def _get_free_lun(self, igs): luns = [] for ig in igs: luns.extend(lm['lun'] for lm in self.client.req('lun-maps', data={'full': 1, 'prop': 'lun', 'filter': 'ig-name:eq:%s' % ig}) ['lun-maps']) uniq_luns = set(luns + [0]) seq = range(len(uniq_luns) + 1) return min(set(seq) - uniq_luns) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): wwpns = self._get_initiator_names(connector) ig_name = self._get_ig_name(connector) i_t_map = {} found = [] new = [] for wwpn in wwpns: init = self.client.get_initiator(wwpn) if init: found.append(init) else: new.append(wwpn) i_t_map[wwpn.replace(':', '')] = self.get_targets() # get or create initiator group if new: ig = self._get_ig(ig_name) if not ig: ig = self._create_ig(ig_name) for wwpn in new: data = {'initiator-name': wwpn, 'ig-id': ig_name, 'port-address': wwpn} self.client.req('initiators', 'POST', data) igs = list(set([i['ig-id'][XTREMIO_OID_NAME] for i in found])) if new and ig['ig-id'][XTREMIO_OID_NAME] not in igs: igs.append(ig['ig-id'][XTREMIO_OID_NAME]) if len(igs) > 1: lun_num = self._get_free_lun(igs) else: lun_num = None for ig in igs: lunmap = self.create_lun_map(volume, ig, lun_num) lun_num = lunmap['lun'] return {'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': False, 'target_lun': lun_num, 'target_wwn': self.get_targets(), 'initiator_target_map': i_t_map}} @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): (super(XtremIOFibreChannelDriver, self) .terminate_connection(volume, connector, **kwargs)) num_vols = (self.client .num_of_mapped_volumes(self._get_ig_name(connector))) if num_vols > 0: data = {} else: i_t_map = {} for initiator in self._get_initiator_names(connector): i_t_map[initiator.replace(':', '')] = self.get_targets() data = {'target_wwn': self.get_targets(), 'initiator_target_map': i_t_map} return {'driver_volume_type': 'fibre_channel', 'data': data} def _get_initiator_names(self, connector): return [wwpn if ':' in wwpn else ':'.join(wwpn[i:i + 2] for i in range(0, len(wwpn), 2)) for wwpn in connector['wwpns']] def _get_ig_name(self, connector): return connector['host'] cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_utils.py0000664000567000056710000030621712701406250024520 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import random import re from xml.dom import minidom from oslo_log import log as logging from oslo_service import loopingcall import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume import volume_types LOG = logging.getLogger(__name__) try: import pywbem pywbemAvailable = True except ImportError: pywbemAvailable = False STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 CLONE_REPLICATION_TYPE = 10 MAX_POOL_LENGTH = 16 MAX_FASTPOLICY_LENGTH = 14 EMC_ROOT = 'root/emc' CONCATENATED = 'concatenated' CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_' CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' ISCSI = 'iscsi' FC = 'fc' JOB_RETRIES = 60 INTERVAL_10_SEC = 10 INTERVAL = 'storagetype:interval' RETRIES = 'storagetype:retries' CIM_ERR_NOT_FOUND = 6 VOLUME_ELEMENT_NAME_PREFIX = 'OS-' SYNCHRONIZED = 4 class EMCVMAXUtils(object): """Utility class for SMI-S based EMC volume drivers. This Utility class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ SLO = 'storagetype:slo' WORKLOAD = 'storagetype:workload' POOL = 'storagetype:pool' def __init__(self, prtcl): if not pywbemAvailable: LOG.info(_LI( "Module PyWBEM not installed. " "Install PyWBEM using the python-pywbem package.")) self.protocol = prtcl def find_storage_configuration_service(self, conn, storageSystemName): """Get storage configuration service with given storage system name. :param conn: connection to the ecom server :param storageSystemName: the storage system name :returns: foundConfigService :raises: VolumeBackendAPIException """ foundConfigService = None configservices = conn.EnumerateInstanceNames( 'EMC_StorageConfigurationService') for configservice in configservices: if storageSystemName == configservice['SystemName']: foundConfigService = configservice LOG.debug("Found Storage Configuration Service: " "%(configservice)s.", {'configservice': configservice}) break if foundConfigService is None: exceptionMessage = (_("Storage Configuration Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundConfigService def find_controller_configuration_service(self, conn, storageSystemName): """Get the controller config by using the storage service name. Given the storage system name, get the controller configuration service. :param conn: connection to the ecom server :param storageSystemName: the storage system name :returns: foundconfigService :raises: VolumeBackendAPIException """ foundConfigService = None configservices = conn.EnumerateInstanceNames( 'EMC_ControllerConfigurationService') for configservice in configservices: if storageSystemName == configservice['SystemName']: foundConfigService = configservice LOG.debug("Found Controller Configuration Service: " "%(configservice)s.", {'configservice': configservice}) break if foundConfigService is None: exceptionMessage = (_("Controller Configuration Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundConfigService def find_element_composition_service(self, conn, storageSystemName): """Given the storage system name, get the element composition service. :param conn: the connection to the ecom server :param storageSystemName: the storage system name :returns: foundElementCompositionService :raises: VolumeBackendAPIException """ foundElementCompositionService = None elementCompositionServices = conn.EnumerateInstanceNames( 'Symm_ElementCompositionService') for elementCompositionService in elementCompositionServices: if storageSystemName == elementCompositionService['SystemName']: foundElementCompositionService = elementCompositionService LOG.debug( "Found Element Composition Service: " "%(elementCompositionService)s.", { 'elementCompositionService': elementCompositionService}) break if foundElementCompositionService is None: exceptionMessage = (_("Element Composition Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundElementCompositionService def find_storage_relocation_service(self, conn, storageSystemName): """Given the storage system name, get the storage relocation service. :param conn: the connection to the ecom server :param storageSystemName: the storage system name :returns: foundStorageRelocationService :raises: VolumeBackendAPIException """ foundStorageRelocationService = None storageRelocationServices = conn.EnumerateInstanceNames( 'Symm_StorageRelocationService') for storageRelocationService in storageRelocationServices: if storageSystemName == storageRelocationService['SystemName']: foundStorageRelocationService = storageRelocationService LOG.debug( "Found Element Composition Service: " "%(storageRelocationService)s.", {'storageRelocationService': storageRelocationService}) break if foundStorageRelocationService is None: exceptionMessage = (_("Storage Relocation Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundStorageRelocationService def find_storage_hardwareid_service(self, conn, storageSystemName): """Given the storage system name, get the storage hardware service. :param conn: the connection to the ecom server :param storageSystemName: the storage system name :returns: foundStorageRelocationService :raises: VolumeBackendAPIException """ foundHardwareService = None storageHardwareservices = conn.EnumerateInstanceNames( 'EMC_StorageHardwareIDManagementService') for storageHardwareservice in storageHardwareservices: if storageSystemName == storageHardwareservice['SystemName']: foundHardwareService = storageHardwareservice LOG.debug("Found Storage Hardware ID Management Service:" "%(storageHardwareservice)s.", {'storageHardwareservice': storageHardwareservice}) break if foundHardwareService is None: exceptionMessage = (_("Storage HardwareId mgmt Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundHardwareService def find_replication_service(self, conn, storageSystemName): """Given the storage system name, get the replication service. :param conn: the connection to the ecom server :param storageSystemName: the storage system name :returns: foundRepService :raises: VolumeBackendAPIException """ foundRepService = None repservices = conn.EnumerateInstanceNames( 'EMC_ReplicationService') for repservice in repservices: if storageSystemName == repservice['SystemName']: foundRepService = repservice LOG.debug("Found Replication Service:" "%(repservice)s", {'repservice': repservice}) break if foundRepService is None: exceptionMessage = (_("Replication Service not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundRepService def get_tier_policy_service(self, conn, storageSystemInstanceName): """Gets the tier policy service for a given storage system instance. Given the storage system instance name, get the existing tier policy service. :param conn: the connection information to the ecom server :param storageSystemInstanceName: the storageSystem instance Name :returns: foundTierPolicyService - the tier policy service instance name :raises: VolumeBackendAPIException """ foundTierPolicyService = None groups = conn.AssociatorNames( storageSystemInstanceName, ResultClass='Symm_TierPolicyService', AssocClass='CIM_HostedService') if len(groups) > 0: foundTierPolicyService = groups[0] if foundTierPolicyService is None: exceptionMessage = (_( "Tier Policy Service not found " "for %(storageSystemName)s.") % {'storageSystemName': storageSystemInstanceName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundTierPolicyService def wait_for_job_complete(self, conn, job, extraSpecs=None): """Given the job wait for it to complete. :param conn: connection to the ecom server :param job: the job dict :param extraSpecs: the extraSpecs dict. Defaults to None :returns: int -- the return code :returns: errorDesc - the error description string """ jobInstanceName = job['Job'] if extraSpecs and (INTERVAL in extraSpecs or RETRIES in extraSpecs): self._wait_for_job_complete(conn, job, extraSpecs) else: self._wait_for_job_complete(conn, job) jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) rc = jobinstance['ErrorCode'] errorDesc = jobinstance['ErrorDescription'] LOG.debug("Return code is: %(rc)lu. " "Error Description is: %(errorDesc)s.", {'rc': rc, 'errorDesc': errorDesc}) return rc, errorDesc def _wait_for_job_complete(self, conn, job, extraSpecs=None): """Given the job wait for it to complete. :param conn: connection to the ecom server :param job: the job dict :param extraSpecs: the extraSpecs dict. Defaults to None :raises: loopingcall.LoopingCallDone :raises: VolumeBackendAPIException """ def _wait_for_job_complete(): # Called at an interval until the job is finished. maxJobRetries = self._get_max_job_retries(extraSpecs) retries = kwargs['retries'] wait_for_job_called = kwargs['wait_for_job_called'] if self._is_job_finished(conn, job): raise loopingcall.LoopingCallDone() if retries > maxJobRetries: LOG.error(_LE("_wait_for_job_complete " "failed after %(retries)d " "tries."), {'retries': retries}) raise loopingcall.LoopingCallDone() try: kwargs['retries'] = retries + 1 if not wait_for_job_called: if self._is_job_finished(conn, job): kwargs['wait_for_job_called'] = True except Exception: exceptionMessage = (_("Issue encountered waiting for job.")) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage) kwargs = {'retries': 0, 'wait_for_job_called': False} intervalInSecs = self._get_interval_in_secs(extraSpecs) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) timer.start(interval=intervalInSecs).wait() def _get_max_job_retries(self, extraSpecs): """Get max job retries either default or user defined :param extraSpecs: extraSpecs dict :returns: JOB_RETRIES or user defined """ if extraSpecs and RETRIES in extraSpecs: jobRetries = extraSpecs[RETRIES] else: jobRetries = JOB_RETRIES return int(jobRetries) def _get_interval_in_secs(self, extraSpecs): """Get interval in secs, either default or user defined :param extraSpecs: extraSpecs dict :returns: INTERVAL_10_SEC or user defined """ if extraSpecs and INTERVAL in extraSpecs: intervalInSecs = extraSpecs[INTERVAL] else: intervalInSecs = INTERVAL_10_SEC return int(intervalInSecs) def _is_job_finished(self, conn, job): """Check if the job is finished. :param conn: connection to the ecom server :param job: the job dict :returns: boolean -- True if finished; False if not finished; """ jobInstanceName = job['Job'] jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) jobstate = jobinstance['JobState'] # From ValueMap of JobState in CIM_ConcreteJob # 2=New, 3=Starting, 4=Running, 32767=Queue Pending # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, # 32768..65535"), # Values("New, Starting, Running, Suspended, Shutting Down, # Completed, Terminated, Killed, Exception, Service, # Query Pending, DMTF Reserved, Vendor Reserved")] if jobstate in [2, 3, 4, 32767]: return False else: return True def wait_for_sync(self, conn, syncName, extraSpecs=None): """Given the sync name wait for it to fully synchronize. :param conn: connection to the ecom server :param syncName: the syncName :param extraSpecs: extra specifications :raises: loopingcall.LoopingCallDone :raises: VolumeBackendAPIException """ def _wait_for_sync(): """Called at an interval until the synchronization is finished. :raises: loopingcall.LoopingCallDone :raises: VolumeBackendAPIException """ retries = kwargs['retries'] try: kwargs['retries'] = retries + 1 if not kwargs['wait_for_sync_called']: if self._is_sync_complete(conn, syncName): kwargs['wait_for_sync_called'] = True except Exception: exceptionMessage = (_("Issue encountered waiting for " "synchronization.")) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage) if kwargs['retries'] > maxJobRetries: LOG.error(_LE("_wait_for_sync failed after %(retries)d " "tries."), {'retries': retries}) raise loopingcall.LoopingCallDone(retvalue=maxJobRetries) if kwargs['wait_for_sync_called']: raise loopingcall.LoopingCallDone() maxJobRetries = self._get_max_job_retries(extraSpecs) kwargs = {'retries': 0, 'wait_for_sync_called': False} intervalInSecs = self._get_interval_in_secs(extraSpecs) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) rc = timer.start(interval=intervalInSecs).wait() return rc def _is_sync_complete(self, conn, syncName): """Check if the job is finished. :param conn: connection to the ecom server :param syncName: the sync name :returns: True if fully synchronized; False if not; """ syncInstance = conn.GetInstance(syncName, LocalOnly=False) copyState = syncInstance['CopyState'] LOG.debug("CopyState is %(copyState)lu.", {'copyState': copyState}) return copyState == SYNCHRONIZED def get_num(self, numStr, datatype): """Get the ecom int from the number. :param numStr: the number in string format :param datatype: the type to convert it to :returns: result """ try: result = { '8': pywbem.Uint8(numStr), '16': pywbem.Uint16(numStr), '32': pywbem.Uint32(numStr), '64': pywbem.Uint64(numStr) } result = result.get(datatype, numStr) except NameError: result = numStr return result def find_storage_system(self, conn, configService): """Finds the storage system for a particular config service. Given the storage configuration service get the CIM_StorageSystem from it. :param conn: the connection to the ecom server :param configService: the storage configuration service :returns: int -- rc - the return code of the job :returns: dict -- jobDict - the job dict """ foundStorageSystemInstanceName = None groups = conn.AssociatorNames( configService, AssocClass='CIM_HostedService') if len(groups) > 0: foundStorageSystemInstanceName = groups[0] else: LOG.error(_LE("Cannot get storage system.")) raise return foundStorageSystemInstanceName def get_storage_group_from_volume(self, conn, volumeInstanceName, sgName): """Returns the storage group for a particular volume. Given the volume instance name get the associated storage group if it is belong to one. :param conn: connection to the ecom server :param volumeInstanceName: the volume instance name :param sgName: the storage group name :returns: foundStorageGroupInstanceName """ foundStorageGroupInstanceName = None storageGroupInstanceNames = conn.AssociatorNames( volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup') if len(storageGroupInstanceNames) > 1: LOG.info(_LI( "The volume belongs to more than one storage group. " "Returning storage group %(sgName)s."), {'sgName': sgName}) for storageGroupInstanceName in storageGroupInstanceNames: instance = self.get_existing_instance( conn, storageGroupInstanceName) if instance and sgName == instance['ElementName']: foundStorageGroupInstanceName = storageGroupInstanceName break return foundStorageGroupInstanceName def get_storage_groups_from_volume(self, conn, volumeInstanceName): """Returns all the storage group for a particular volume. Given the volume instance name get all the associated storage groups. :param conn: connection to the ecom server :param volumeInstanceName: the volume instance name :returns: foundStorageGroupInstanceName """ storageGroupInstanceNames = conn.AssociatorNames( volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup') if storageGroupInstanceNames: LOG.debug("There are %(len)d storage groups associated " "with volume %(volumeInstanceName)s.", {'len': len(storageGroupInstanceNames), 'volumeInstanceName': volumeInstanceName}) else: LOG.debug("There are no storage groups associated " "with volume %(volumeInstanceName)s.", {'volumeInstanceName': volumeInstanceName}) return storageGroupInstanceNames def wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, sgName): """Unit test aid""" return self.get_storage_group_from_volume(conn, volumeInstanceName, sgName) def find_storage_masking_group(self, conn, controllerConfigService, storageGroupName): """Given the storage group name get the storage group. :param conn: connection to the ecom server :param controllerConfigService: the controllerConfigService :param storageGroupName: the name of the storage group you are getting :returns: foundStorageMaskingGroupInstanceName """ foundStorageMaskingGroupInstanceName = None storageMaskingGroupInstances = ( conn.Associators(controllerConfigService, ResultClass='CIM_DeviceMaskingGroup')) for storageMaskingGroupInstance in storageMaskingGroupInstances: if storageGroupName == storageMaskingGroupInstance['ElementName']: # Check that it has not been deleted recently. instance = self.get_existing_instance( conn, storageMaskingGroupInstance.path) if instance is None: # Storage group not found. foundStorageMaskingGroupInstanceName = None else: foundStorageMaskingGroupInstanceName = ( storageMaskingGroupInstance.path) break return foundStorageMaskingGroupInstanceName def find_storage_system_name_from_service(self, configService): """Given any service get the storage system name from it. :param configService: the configuration service :returns: string -- configService['SystemName'] - storage system name """ return configService['SystemName'] def find_volume_instance(self, conn, volumeDict, volumeName): """Given the volumeDict get the instance from it. :param conn: connection to the ecom server :param volumeDict: the volume Dict :param volumeName: the user friendly name of the volume :returns: foundVolumeInstance - the found volume instance """ volumeInstanceName = self.get_instance_name(volumeDict['classname'], volumeDict['keybindings']) foundVolumeInstance = conn.GetInstance(volumeInstanceName) if foundVolumeInstance is None: LOG.debug("Volume %(volumeName)s not found on the array.", {'volumeName': volumeName}) else: LOG.debug("Volume name: %(volumeName)s Volume instance: " "%(vol_instance)s.", {'volumeName': volumeName, 'vol_instance': foundVolumeInstance.path}) return foundVolumeInstance def get_host_short_name(self, hostName): """Returns the short name for a given qualified host name. Checks the host name to see if it is the fully qualified host name and returns part before the dot. If there is no dot in the hostName the full hostName is returned. :param hostName: the fully qualified host name () :returns: string -- the short hostName """ shortHostName = None hostArray = hostName.split('.') if len(hostArray) > 1: shortHostName = hostArray[0] else: shortHostName = hostName return self.generate_unique_trunc_host(shortHostName) def get_instance_name(self, classname, bindings): """Get the instance from the classname and bindings. :param classname: class name for the volume instance :param bindings: volume created from job :returns: pywbem.CIMInstanceName -- instanceName """ instanceName = None try: instanceName = pywbem.CIMInstanceName( classname, namespace=EMC_ROOT, keybindings=bindings) except NameError: instanceName = None return instanceName def parse_pool_instance_id(self, poolInstanceId): """Given the instance Id parse the pool name and system name from it. Example of pool InstanceId: Symmetrix+0001233455555+U+Pool 0 :param poolInstanceId: the path and name of the file :returns: string -- poolName - the pool name :returns: string -- systemName - the system name """ poolName = None systemName = None endp = poolInstanceId.rfind('+') if endp > -1: poolName = poolInstanceId[endp + 1:] idarray = poolInstanceId.split('+') if len(idarray) > 2: systemName = self._format_system_name(idarray[0], idarray[1], '+') LOG.debug("Pool name: %(poolName)s System name: %(systemName)s.", {'poolName': poolName, 'systemName': systemName}) return poolName, systemName def _format_system_name(self, part1, part2, sep): """Join to make up system name :param part1: the prefix :param sep: the separator :param part2: the postfix :returns: systemName """ return ("%(part1)s%(sep)s%(part2)s" % {'part1': part1, 'sep': sep, 'part2': part2}) def parse_pool_instance_id_v3(self, poolInstanceId): """Given the instance Id parse the pool name and system name from it. Example of pool InstanceId: Symmetrix+0001233455555+U+Pool 0 :param poolInstanceId: the path and name of the file :returns: poolName - the pool name :returns: systemName - the system name """ poolName = None systemName = None endp = poolInstanceId.rfind('-+-') if endp > -1: poolName = poolInstanceId[endp + 3:] idarray = poolInstanceId.split('-+-') if len(idarray) > 2: systemName = ( self._format_system_name(idarray[0], idarray[1], '-+-')) LOG.debug("Pool name: %(poolName)s System name: %(systemName)s.", {'poolName': poolName, 'systemName': systemName}) return poolName, systemName def convert_gb_to_bits(self, strGbSize): """Convert GB(string) to bytes(string). :param strGB: string -- The size in GB :returns: string -- The size in bytes """ strBitsSize = six.text_type(int(strGbSize) * 1024 * 1024 * 1024) LOG.debug("Converted %(strGbSize)s GBs to %(strBitsSize)s Bits.", {'strGbSize': strGbSize, 'strBitsSize': strBitsSize}) return strBitsSize def check_if_volume_is_composite(self, conn, volumeInstance): """Check if the volume is composite. :param conn: the connection information to the ecom server :param volumeInstance: the volume Instance :returns: string -- 'True', 'False' or 'Undetermined' """ propertiesList = volumeInstance.properties.items() for properties in propertiesList: if properties[0] == 'IsComposite': cimProperties = properties[1] if 'True' in six.text_type(cimProperties.value): return 'True' elif 'False' in six.text_type(cimProperties.value): return 'False' else: return 'Undetermined' return 'Undetermined' def get_assoc_pool_from_volume(self, conn, volumeInstanceName): """Give the volume instance get the associated pool instance :param conn: connection to the ecom server :param volumeInstanceName: the volume instance name :returns: foundPoolInstanceName """ foundPoolInstanceName = None foundPoolInstanceNames = ( conn.AssociatorNames(volumeInstanceName, ResultClass='EMC_VirtualProvisioningPool')) if len(foundPoolInstanceNames) > 0: foundPoolInstanceName = foundPoolInstanceNames[0] return foundPoolInstanceName def check_if_volume_is_extendable(self, conn, volumeInstance): """Checks if a volume is extendable or not. Check underlying CIM_StorageExtent to see if the volume is concatenated or not. If isConcatenated is true then it is a concatenated and extendable. If isConcatenated is False and isVolumeComposite is True then it is striped and not extendable. If isConcatenated is False and isVolumeComposite is False then it has one member only but is still extendable. :param conn: the connection information to the ecom server :param volumeInstance: the volume instance :returns: string -- 'True', 'False' or 'Undetermined' """ isConcatenated = None isVolumeComposite = self.check_if_volume_is_composite( conn, volumeInstance) storageExtentInstances = conn.Associators( volumeInstance.path, ResultClass='CIM_StorageExtent') if len(storageExtentInstances) > 0: storageExtentInstance = storageExtentInstances[0] propertiesList = storageExtentInstance.properties.items() for properties in propertiesList: if properties[0] == 'IsConcatenated': cimProperties = properties[1] isConcatenated = six.text_type(cimProperties.value) if isConcatenated is not None: break if 'True' in isConcatenated: return 'True' elif 'False' in isConcatenated and 'True' in isVolumeComposite: return 'False' elif 'False' in isConcatenated and 'False' in isVolumeComposite: return 'True' else: return 'Undetermined' def get_composite_type(self, compositeTypeStr): """Get the int value of composite type. The default is '2' concatenated. :param compositeTypeStr: 'concatenated' or 'striped'. Cannot be None :returns: int -- compositeType = 2 for concatenated, or 3 for striped """ compositeType = 2 stripedStr = 'striped' try: if compositeTypeStr.lower() == stripedStr.lower(): compositeType = 3 except KeyError: # Default to concatenated if not defined. pass return compositeType def is_volume_bound_to_pool(self, conn, volumeInstance): """Check if volume is bound to a pool. :param conn: the connection information to the ecom server :param volumeInstance: the volume instance :returns: string -- 'True' 'False' or 'Undetermined' """ propertiesList = volumeInstance.properties.items() for properties in propertiesList: if properties[0] == 'EMCIsBound': cimProperties = properties[1] if 'True' in six.text_type(cimProperties.value): return 'True' elif 'False' in six.text_type(cimProperties.value): return 'False' else: return 'Undetermined' return 'Undetermined' def get_space_consumed(self, conn, volumeInstance): """Check the space consumed of a volume. :param conn: the connection information to the ecom server :param volumeInstance: the volume Instance :returns: spaceConsumed """ foundSpaceConsumed = None unitnames = conn.References( volumeInstance, ResultClass='CIM_AllocatedFromStoragePool', Role='Dependent') for unitname in unitnames: propertiesList = unitname.properties.items() for properties in propertiesList: if properties[0] == 'SpaceConsumed': cimProperties = properties[1] foundSpaceConsumed = cimProperties.value break if foundSpaceConsumed is not None: break return foundSpaceConsumed def get_volume_size(self, conn, volumeInstance): """Get the volume size which is ConsumableBlocks * BlockSize. :param conn: the connection information to the ecom server :param volumeInstance: the volume Instance :returns: string -- volumeSizeOut """ volumeSizeOut = 'Undetermined' numBlocks = 0 blockSize = 0 propertiesList = volumeInstance.properties.items() for properties in propertiesList: if properties[0] == 'ConsumableBlocks': cimProperties = properties[1] numBlocks = int(cimProperties.value) if properties[0] == 'BlockSize': cimProperties = properties[1] blockSize = int(cimProperties.value) if blockSize > 0 and numBlocks > 0: break if blockSize > 0 and numBlocks > 0: volumeSizeOut = six.text_type(numBlocks * blockSize) return volumeSizeOut def determine_member_count(self, sizeStr, memberCount, compositeType): """Determines how many members a volume should contain. Based on the size of the proposed volume, the compositeType and the memberCount, determine (or validate) how many meta members there should be in a volume. :param sizeStr: the size in GBs of the proposed volume :param memberCount: the initial member count :param compositeType: the composite type :returns: string -- memberCount :returns: string -- errorDesc - the error description """ errorDesc = None if compositeType in 'concatenated' and int(sizeStr) > 240: newMemberCount = int(sizeStr) // 240 modular = int(sizeStr) % 240 if modular > 0: newMemberCount += 1 memberCount = six.text_type(newMemberCount) if compositeType in 'striped': metaSize = int(sizeStr) / int(memberCount) modular = int(sizeStr) % int(memberCount) metaSize = metaSize + modular if metaSize > 240: errorDesc = ('Meta Size is greater than maximum allowed meta ' 'size') return memberCount, errorDesc def get_extra_specs_by_volume_type_name(self, volumeTypeName): """Gets the extra specs associated with a volume type. Given the string value of the volume type name, get the extra specs object associated with the volume type. :param volumeTypeName: string value of the volume type name :returns: extra_specs - extra specs object """ ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type_by_name( ctxt, volumeTypeName) extra_specs = volume_type['extra_specs'] return extra_specs def get_pool_capacities(self, conn, poolName, storageSystemName): """Get the total and remaining capacity in GB for a storage pool. Given the storage pool name, get the total capacity and remaining capacity in GB. :param conn: connection to the ecom server :param poolName: string value of the storage pool name :param storageSystemName: the storage system name :returns: tuple -- (total_capacity_gb, free_capacity_gb) """ LOG.debug( "Retrieving capacity for pool %(poolName)s on array %(array)s.", {'poolName': poolName, 'array': storageSystemName}) poolInstanceName = self.get_pool_by_name( conn, poolName, storageSystemName) if poolInstanceName is None: LOG.error(_LE( "Unable to retrieve pool instance of %(poolName)s on " "array %(array)s."), {'poolName': poolName, 'array': storageSystemName}) return (0, 0) storagePoolInstance = conn.GetInstance( poolInstanceName, LocalOnly=False) total_capacity_gb = self.convert_bits_to_gbs( storagePoolInstance['TotalManagedSpace']) allocated_capacity_gb = self.convert_bits_to_gbs( storagePoolInstance['EMCSubscribedCapacity']) free_capacity_gb = total_capacity_gb - allocated_capacity_gb return (total_capacity_gb, free_capacity_gb) def get_pool_by_name(self, conn, storagePoolName, storageSystemName): """Returns the instance name associated with a storage pool name. :param conn: connection to the ecom server :param storagePoolName: string value of the storage pool name :param storageSystemName: string value of array :returns: foundPoolInstanceName - instance name of storage pool """ foundPoolInstanceName = None LOG.debug( "storagePoolName: %(poolName)s, storageSystemName: %(array)s.", {'poolName': storagePoolName, 'array': storageSystemName}) storageSystemInstanceName = self.find_storageSystem(conn, storageSystemName) poolInstanceNames = conn.AssociatorNames( storageSystemInstanceName, ResultClass='EMC_VirtualProvisioningPool') for poolInstanceName in poolInstanceNames: poolName = self.get_pool_name(conn, poolInstanceName) if (poolName == storagePoolName): # Check that the pool hasn't been recently deleted. instance = self.get_existing_instance(conn, poolInstanceName) if instance is None: foundPoolInstanceName = None else: foundPoolInstanceName = poolInstanceName break return foundPoolInstanceName def convert_bits_to_gbs(self, strBitSize): """Convert bytes(string) to GB(string). :param strBitSize: string -- The size in bytes :returns: int -- The size in GB """ gbSize = int(strBitSize) // 1024 // 1024 // 1024 return gbSize def compare_size(self, size1Str, size2Str): """Compare the bit sizes to an approximate. :param size1Str: the first bit size (String) :param size2Str: the second bit size (String) :returns: int -- size1GBs - size2GBs """ size1GBs = self.convert_bits_to_gbs(size1Str) size2GBs = self.convert_bits_to_gbs(size2Str) return size1GBs - size2GBs def get_volumetype_extraspecs(self, volume, volumeTypeId=None): """Compare the bit sizes to an approximate. :param volume: the volume dictionary :param volumeTypeId: Optional override for volume['volume_type_id'] :returns: dict -- extraSpecs - the extra specs """ extraSpecs = {} try: if volumeTypeId: type_id = volumeTypeId else: type_id = volume['volume_type_id'] if type_id is not None: extraSpecs = volume_types.get_volume_type_extra_specs(type_id) except Exception: pass return extraSpecs def get_volume_type_name(self, volume): """Get the volume type name. :param volume: the volume dictionary :returns: string -- volumeTypeName - the volume type name """ volumeTypeName = None ctxt = context.get_admin_context() typeId = volume['volume_type_id'] if typeId is not None: volumeType = volume_types.get_volume_type(ctxt, typeId) volumeTypeName = volumeType['name'] return volumeTypeName def parse_volume_type_from_filename(self, emcConfigFile): """Parse the volume type from the file (if it exists). :param emcConfigFile: the EMC configuration file :returns: volumeTypeName - the volume type name """ volumeTypeName = None m = re.search('/etc/cinder/cinder_emc_config_(.+?).xml', emcConfigFile) if m: volumeTypeName = m.group(1) return volumeTypeName def get_volumes_from_pool(self, conn, poolInstanceName): """Check the space consumed of a volume. :param conn: the connection information to the ecom server :param poolInstanceName: the pool instance name :returns: the volumes in the pool """ return conn.AssociatorNames( poolInstanceName, AssocClass='CIM_AllocatedFromStoragePool', ResultClass='CIM_StorageVolume') def check_is_volume_bound_to_pool(self, conn, volumeInstance): """Check the space consumed of a volume. :param conn: the connection information to the ecom server :param volumeInstance: the volume Instance :returns: string -- 'True', 'False' or 'Undetermined' """ foundSpaceConsumed = None unitnames = conn.References( volumeInstance, ResultClass='CIM_AllocatedFromStoragePool', Role='Dependent') for unitname in unitnames: propertiesList = unitname.properties.items() for properties in propertiesList: if properties[0] == 'EMCBoundToThinStoragePool': cimProperties = properties[1] foundSpaceConsumed = cimProperties.value break if foundSpaceConsumed is not None: break if 'True' in six.text_type(cimProperties.value): return 'True' elif 'False' in six.text_type(cimProperties.value): return 'False' else: return 'Undetermined' def get_short_protocol_type(self, protocol): """Given the protocol type, return I for iscsi and F for fc. :param protocol: iscsi or fc :returns: string -- 'I' for iscsi or 'F' for fc """ if protocol.lower() == ISCSI.lower(): return 'I' elif protocol.lower() == FC.lower(): return 'F' else: return protocol def get_hardware_id_instances_from_array( self, conn, hardwareIdManagementService): """Get all the hardware ids from an array. :param conn: connection to the ecom server :param: hardwareIdManagementService - hardware id management service :returns: hardwareIdInstances - the list of hardware id instances """ hardwareIdInstances = ( conn.Associators(hardwareIdManagementService, ResultClass='EMC_StorageHardwareID')) return hardwareIdInstances def truncate_string(self, strToTruncate, maxNum): """Truncate a string by taking first and last characters. :param strToTruncate: the string to be truncated :param maxNum: the maximum number of characters :returns: string -- truncated string or original string """ if len(strToTruncate) > maxNum: newNum = len(strToTruncate) - maxNum // 2 firstChars = strToTruncate[:maxNum // 2] lastChars = strToTruncate[newNum:] strToTruncate = firstChars + lastChars return strToTruncate def get_array(self, host): """Extract the array from the host capabilites. :param host: the host object :returns: storageSystem - storage system represents the array """ try: if '@' in host: infoDetail = host.split('@') storageSystem = 'SYMMETRIX+' + infoDetail[0] except Exception: LOG.error(_LE("Error parsing array from host capabilities.")) return storageSystem def get_time_delta(self, startTime, endTime): """Get the delta between start and end time. :param startTime: the start time :param endTime: the end time :returns: string -- delta in string H:MM:SS """ delta = endTime - startTime return six.text_type(datetime.timedelta(seconds=int(delta))) def find_sync_sv_by_target( self, conn, storageSystem, target, extraSpecs, waitforsync=True): """Find the storage synchronized name by target device ID. :param conn: connection to the ecom server :param storageSystem: the storage system name :param target: target volume object :param extraSpecs: the extraSpecs dict :param waitforsync: wait for the synchronization to complete if True :returns: foundSyncInstanceName """ foundSyncInstanceName = None syncInstanceNames = conn.EnumerateInstanceNames( 'SE_StorageSynchronized_SV_SV') for syncInstanceName in syncInstanceNames: syncSvTarget = syncInstanceName['SyncedElement'] if storageSystem != syncSvTarget['SystemName']: continue if syncSvTarget['DeviceID'] == target['DeviceID']: # Check that it hasn't recently been deleted. try: conn.GetInstance(syncInstanceName) foundSyncInstanceName = syncInstanceName LOG.debug("Found sync Name: " "%(syncName)s.", {'syncName': foundSyncInstanceName}) except Exception: foundSyncInstanceName = None break if foundSyncInstanceName is None: LOG.warning(_LW( "Storage sync name not found for target %(target)s " "on %(storageSystem)s."), {'target': target['DeviceID'], 'storageSystem': storageSystem}) else: # Wait for SE_StorageSynchronized_SV_SV to be fully synced. if waitforsync: self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) return foundSyncInstanceName def find_group_sync_rg_by_target( self, conn, storageSystem, targetRgInstanceName, extraSpecs, waitforsync=True): """Find the SE_GroupSynchronized_RG_RG instance name by target group. :param conn: connection to the ecom server :param storageSystem: the storage system name :param targetRgInstanceName: target group instance name :param extraSpecs: the extraSpecs dict :param waitforsync: wait for synchronization to complete :returns: foundSyncInstanceName """ foundSyncInstanceName = None groupSyncRgInstanceNames = conn.EnumerateInstanceNames( 'SE_GroupSynchronized_RG_RG') for rgInstanceName in groupSyncRgInstanceNames: rgTarget = rgInstanceName['SyncedElement'] if targetRgInstanceName['InstanceID'] == rgTarget['InstanceID']: # Check that it has not recently been deleted. try: conn.GetInstance(rgInstanceName) foundSyncInstanceName = rgInstanceName LOG.debug("Found group sync name: " "%(syncName)s.", {'syncName': foundSyncInstanceName}) except Exception: foundSyncInstanceName = None break if foundSyncInstanceName is None: LOG.warning(_LW( "Group sync name not found for target group %(target)s " "on %(storageSystem)s."), {'target': targetRgInstanceName['InstanceID'], 'storageSystem': storageSystem}) else: # Wait for SE_StorageSynchronized_SV_SV to be fully synced. if waitforsync: self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) return foundSyncInstanceName def get_firmware_version(self, conn, arrayName): """Get the firmware version of array. :param conn: the connection to the ecom server :param arrayName: the array name :returns: string -- firmwareVersion """ firmwareVersion = None softwareIdentities = conn.EnumerateInstanceNames( 'symm_storageSystemsoftwareidentity') for softwareIdentity in softwareIdentities: if arrayName in softwareIdentity['InstanceID']: softwareIdentityInstance = conn.GetInstance(softwareIdentity) propertiesList = softwareIdentityInstance.properties.items() for properties in propertiesList: if properties[0] == 'VersionString': cimProperties = properties[1] firmwareVersion = cimProperties.value break return firmwareVersion def get_srp_pool_stats(self, conn, arrayName, poolName): """Get the totalManagedSpace, remainingManagedSpace. :param conn: the connection to the ecom server :param arrayName: the array name :param poolName: the pool name :returns: totalCapacityGb :returns: remainingCapacityGb """ totalCapacityGb = -1 remainingCapacityGb = -1 storageSystemInstanceName = self.find_storageSystem(conn, arrayName) srpPoolInstanceNames = conn.AssociatorNames( storageSystemInstanceName, ResultClass='Symm_SRPStoragePool') for srpPoolInstanceName in srpPoolInstanceNames: poolInstanceID = srpPoolInstanceName['InstanceID'] poolnameStr, _systemName = ( self.parse_pool_instance_id_v3(poolInstanceID)) if six.text_type(poolName) == six.text_type(poolnameStr): try: # Check that pool hasn't suddenly been deleted. srpPoolInstance = conn.GetInstance(srpPoolInstanceName) propertiesList = srpPoolInstance.properties.items() for properties in propertiesList: if properties[0] == 'TotalManagedSpace': cimProperties = properties[1] totalManagedSpace = cimProperties.value totalCapacityGb = self.convert_bits_to_gbs( totalManagedSpace) elif properties[0] == 'RemainingManagedSpace': cimProperties = properties[1] remainingManagedSpace = cimProperties.value remainingCapacityGb = self.convert_bits_to_gbs( remainingManagedSpace) except Exception: pass return totalCapacityGb, remainingCapacityGb def isArrayV3(self, conn, arrayName): """Check if the array is V2 or V3. :param conn: the connection to the ecom server :param arrayName: the array name :returns: boolean """ firmwareVersion = self.get_firmware_version(conn, arrayName) m = re.search('^(\d+)', firmwareVersion) majorVersion = m.group(0) if int(majorVersion) >= 5900: return True else: return False def get_pool_and_system_name_v2( self, conn, storageSystemInstanceName, poolNameInStr): """Get pool instance and system name string for V2. :param conn: the connection to the ecom server :param storageSystemInstanceName: the storage system instance name :param poolNameInStr: the pool name :returns: foundPoolInstanceName :returns: string -- systemNameStr """ vpoolInstanceNames = conn.AssociatorNames( storageSystemInstanceName, ResultClass='EMC_VirtualProvisioningPool') return self._get_pool_instance_and_system_name( conn, vpoolInstanceNames, storageSystemInstanceName, poolNameInStr) def get_pool_and_system_name_v3( self, conn, storageSystemInstanceName, poolNameInStr): """Get pool instance and system name string for V2. :param conn: the connection to the ecom server :param storageSystemInstanceName: the storage system instance name :param poolNameInStr: the pool name :returns: foundPoolInstanceName :returns: string -- systemNameStr """ srpPoolInstanceNames = conn.AssociatorNames( storageSystemInstanceName, ResultClass='Symm_SRPStoragePool') return self._get_pool_instance_and_system_name( conn, srpPoolInstanceNames, storageSystemInstanceName, poolNameInStr) def _get_pool_instance_and_system_name( self, conn, poolInstanceNames, storageSystemInstanceName, poolname): """Get the pool instance and the system name :param conn: the ecom connection :param poolInstanceNames: list of pool instances :param poolname: pool name (string) :returns: foundPoolInstanceName, systemname """ foundPoolInstanceName = None poolnameStr = None systemNameStr = storageSystemInstanceName['Name'] for poolInstanceName in poolInstanceNames: # Example: SYMMETRIX-+-000196700535-+-SR-+-SRP_1 # Example: SYMMETRIX+000195900551+TP+Sol_Innov poolnameStr = self.get_pool_name(conn, poolInstanceName) if poolnameStr is not None: if six.text_type(poolname) == six.text_type(poolnameStr): try: conn.GetInstance(poolInstanceName) foundPoolInstanceName = poolInstanceName except Exception: foundPoolInstanceName = None break return foundPoolInstanceName, systemNameStr def get_pool_name(self, conn, poolInstanceName): """Get the pool name from the instance :param conn: the ecom connection :param poolInstanceName: the pool instance :returns: poolnameStr """ poolnameStr = None try: poolInstance = conn.GetInstance(poolInstanceName) poolnameStr = poolInstance['ElementName'] except Exception: pass return poolnameStr def find_storageSystem(self, conn, arrayStr): """Find an array instance name by the array name. :param conn: the ecom connection :param arrayStr: the array Serial number (string) :returns: foundPoolInstanceName, the CIM Instance Name of the Pool :raises: VolumeBackendAPIException """ foundStorageSystemInstanceName = None storageSystemInstanceNames = conn.EnumerateInstanceNames( 'EMC_StorageSystem') for storageSystemInstanceName in storageSystemInstanceNames: arrayName = storageSystemInstanceName['Name'] index = arrayName.find(arrayStr) if index > -1: foundStorageSystemInstanceName = storageSystemInstanceName if foundStorageSystemInstanceName is None: exceptionMessage = (_("StorageSystem %(array)s is not found.") % {'array': arrayStr}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) LOG.debug("Array Found: %(array)s.", {'array': arrayStr}) return foundStorageSystemInstanceName def is_in_range(self, volumeSize, maximumVolumeSize, minimumVolumeSize): """Check that volumeSize is in range. :param volumeSize: volume size :param maximumVolumeSize: the max volume size :param minimumVolumeSize: the min volume size :returns: boolean """ if (long(volumeSize) < long(maximumVolumeSize)) and ( long(volumeSize) >= long(minimumVolumeSize)): return True else: return False def verify_slo_workload(self, slo, workload): """Check if SLO and workload values are valid. :param slo: Service Level Object e.g bronze :param workload: workload e.g DSS :returns: boolean """ isValidSLO = False isValidWorkload = False validSLOs = ['Bronze', 'Silver', 'Gold', 'Platinum', 'Diamond', 'Optimized', 'NONE'] validWorkloads = ['DSS_REP', 'DSS', 'OLTP', 'OLTP_REP', 'NONE'] for validSLO in validSLOs: if slo == validSLO: isValidSLO = True break for validWorkload in validWorkloads: if workload == validWorkload: isValidWorkload = True break if not isValidSLO: LOG.error(_LE( "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, " "Gold, Platinum, Diamond, Optimized, NONE."), {'slo': slo}) if not isValidWorkload: LOG.error(_LE( "Workload: %(workload)s is not valid. Valid values are " "DSS_REP, DSS, OLTP, OLTP_REP, NONE."), {'workload': workload}) return isValidSLO, isValidWorkload def get_v3_storage_group_name(self, poolName, slo, workload): """Determine default v3 storage group from extraSpecs. :param poolName: the poolName :param slo: the SLO string e.g Bronze :param workload: the workload string e.g DSS :returns: storageGroupName """ storageGroupName = ("OS-%(poolName)s-%(slo)s-%(workload)s-SG" % {'poolName': poolName, 'slo': slo, 'workload': workload}) return storageGroupName def _get_fast_settings_from_storage_group(self, storageGroupInstance): """Get the emc FAST setting from the storage group. :param storageGroupInstance: the storage group instance :returns: emcFastSetting """ emcFastSetting = None propertiesList = storageGroupInstance.properties.items() for properties in propertiesList: if properties[0] == 'EMCFastSetting': cimProperties = properties[1] emcFastSetting = cimProperties.value break return emcFastSetting def get_volume_meta_head(self, conn, volumeInstanceName): """Get the head of a meta volume. :param conn: the ecom connection :param volumeInstanceName: the composite volume instance name :returns: the instance name of the meta volume head """ metaHeadInstanceName = None metaHeads = conn.AssociatorNames( volumeInstanceName, ResultClass='EMC_Meta') if len(metaHeads) > 0: metaHeadInstanceName = metaHeads[0] if metaHeadInstanceName is None: LOG.info(_LI( "Volume %(volume)s does not have meta device members."), {'volume': volumeInstanceName}) return metaHeadInstanceName def get_meta_members_of_composite_volume( self, conn, metaHeadInstanceName): """Get the member volumes of a composite volume. :param conn: the ecom connection :param metaHeadInstanceName: head of the composite volume :returns: an array containing instance names of member volumes """ metaMembers = conn.AssociatorNames( metaHeadInstanceName, AssocClass='CIM_BasedOn', ResultClass='EMC_PartialAllocOfConcreteExtent') LOG.debug("metaMembers: %(members)s.", {'members': metaMembers}) return metaMembers def get_meta_members_capacity_in_byte(self, conn, volumeInstanceNames): """Get the capacity in byte of all meta device member volumes. :param conn: the ecom connection :param volumeInstanceNames: array contains meta device member volumes :returns: array contains capacities of each member device in bits """ capacitiesInByte = [] headVolume = conn.GetInstance(volumeInstanceNames[0]) totalSizeInByte = ( headVolume['ConsumableBlocks'] * headVolume['BlockSize']) volumeInstanceNames.pop(0) for volumeInstanceName in volumeInstanceNames: volumeInstance = conn.GetInstance(volumeInstanceName) numOfBlocks = volumeInstance['ConsumableBlocks'] blockSize = volumeInstance['BlockSize'] volumeSizeInByte = numOfBlocks * blockSize capacitiesInByte.append(volumeSizeInByte) totalSizeInByte = totalSizeInByte - volumeSizeInByte capacitiesInByte.insert(0, totalSizeInByte) return capacitiesInByte def get_existing_instance(self, conn, instanceName): """Check that the instance name still exists and return the instance. :param conn: the connection to the ecom server :param instanceName: the instanceName to be checked :returns: instance or None """ instance = None try: instance = conn.GetInstance(instanceName, LocalOnly=False) except pywbem.cim_operations.CIMError as arg: instance = self.process_exception_args(arg, instanceName) return instance def process_exception_args(self, arg, instanceName): """Process exception arguments. :param arg: the arg list :param instanceName: the instance name :returns: None :raises: VolumeBackendAPIException """ instance = None code, desc = arg[0], arg[1] if code == CIM_ERR_NOT_FOUND: # Object doesn't exist any more. instance = None else: # Something else that we cannot recover from has happened. LOG.error(_LE("Exception: %s"), six.text_type(desc)) exceptionMessage = (_( "Cannot verify the existence of object:" "%(instanceName)s.") % {'instanceName': instanceName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return instance def find_replication_service_capabilities(self, conn, storageSystemName): """Find the replication service capabilities instance name. :param conn: the connection to the ecom server :param storageSystemName: the storage system name :returns: foundRepServCapability """ foundRepServCapability = None repservices = conn.EnumerateInstanceNames( 'CIM_ReplicationServiceCapabilities') for repservCap in repservices: if storageSystemName in repservCap['InstanceID']: foundRepServCapability = repservCap LOG.debug("Found Replication Service Capabilities: " "%(repservCap)s", {'repservCap': repservCap}) break if foundRepServCapability is None: exceptionMessage = (_("Replication Service Capability not found " "on %(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundRepServCapability def is_clone_licensed(self, conn, capabilityInstanceName): """Check if the clone feature is licensed and enabled. :param conn: the connection to the ecom server :param capabilityInstanceName: the replication service capabilities instance name :returns: True if licensed and enabled; False otherwise. """ capabilityInstance = conn.GetInstance(capabilityInstanceName) propertiesList = capabilityInstance.properties.items() for properties in propertiesList: if properties[0] == 'SupportedReplicationTypes': cimProperties = properties[1] repTypes = cimProperties.value LOG.debug("Found supported replication types: " "%(repTypes)s", {'repTypes': repTypes}) if CLONE_REPLICATION_TYPE in repTypes: # Clone is a supported replication type. LOG.debug("Clone is licensed and enabled.") return True return False def create_storage_hardwareId_instance_name( self, conn, hardwareIdManagementService, initiator): """Create storage hardware ID instance name based on the WWPN/IQN. :param conn: connection to the ecom server :param hardwareIdManagementService: the hardware ID management service :param initiator: initiator(IQN or WWPN) to create the hardware ID instance :returns: hardwareIdList """ hardwareIdList = None hardwareIdType = self._get_hardware_type(initiator) rc, ret = conn.InvokeMethod( 'CreateStorageHardwareID', hardwareIdManagementService, StorageID=initiator, IDType=self.get_num(hardwareIdType, '16')) if 'HardwareID' in ret: LOG.debug("Created hardware ID instance for initiator:" "%(initiator)s rc=%(rc)d, ret=%(ret)s", {'initiator': initiator, 'rc': rc, 'ret': ret}) hardwareIdList = ret['HardwareID'] else: LOG.warning(_LW("CreateStorageHardwareID failed. initiator: " "%(initiator)s, rc=%(rc)d, ret=%(ret)s."), {'initiator': initiator, 'rc': rc, 'ret': ret}) return hardwareIdList def _get_hardware_type( self, initiator): """Determine the hardware type based on the initiator. :param initiator: initiator(IQN or WWPN) :returns: hardwareTypeId """ hardwareTypeId = 0 try: int(initiator, 16) hardwareTypeId = 2 except Exception: if 'iqn' in initiator.lower(): hardwareTypeId = 5 if hardwareTypeId == 0: LOG.warning(_LW("Cannot determine the hardware type.")) return hardwareTypeId def _process_tag(self, element, tagName): """Process the tag to get the value. :param element: the parent element :param tagName: the tag name :returns: nodeValue(can be None) """ nodeValue = None try: processedElement = element.getElementsByTagName(tagName)[0] nodeValue = processedElement.childNodes[0].nodeValue if nodeValue: nodeValue = nodeValue.strip() except IndexError: pass return nodeValue def _get_connection_info(self, ecomElement): """Given the filename get the ecomUser and ecomPasswd. :param ecomElement: the ecom element :returns: dict -- connargs - the connection info dictionary :raises: VolumeBackendAPIException """ connargs = {} connargs['EcomServerIp'] = ( self._process_tag(ecomElement, 'EcomServerIp')) connargs['EcomServerPort'] = ( self._process_tag(ecomElement, 'EcomServerPort')) connargs['EcomUserName'] = ( self._process_tag(ecomElement, 'EcomUserName')) connargs['EcomPassword'] = ( self._process_tag(ecomElement, 'EcomPassword')) for k, __ in connargs.items(): if connargs[k] is None: exceptionMessage = (_( "EcomServerIp, EcomServerPort, EcomUserName, " "EcomPassword must have valid values.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) # These can be None connargs['EcomUseSSL'] = self._process_tag(ecomElement, 'EcomUseSSL') connargs['EcomCACert'] = self._process_tag(ecomElement, 'EcomCACert') connargs['EcomNoVerification'] = ( self._process_tag(ecomElement, 'EcomNoVerification')) if connargs['EcomUseSSL'] and connargs['EcomUseSSL'] == 'True': connargs['EcomUseSSL'] = True if connargs['EcomNoVerification'] and ( connargs['EcomNoVerification'] == 'True'): connargs['EcomNoVerification'] = True else: connargs['EcomUseSSL'] = False connargs['EcomNoVerification'] = False return connargs def _fill_record(self, connargs, serialNumber, poolName, portGroup, element): """Fill a single record. :param connargs: the connection info :param serialNumber: the serial number of array :param poolName: the poolname :param portGroup: the portGroup :param element: the parent element :returns: dict -- kwargs """ kwargs = {} kwargs['EcomServerIp'] = connargs['EcomServerIp'] kwargs['EcomServerPort'] = connargs['EcomServerPort'] kwargs['EcomUserName'] = connargs['EcomUserName'] kwargs['EcomPassword'] = connargs['EcomPassword'] kwargs['EcomUseSSL'] = connargs['EcomUseSSL'] kwargs['EcomCACert'] = connargs['EcomCACert'] kwargs['EcomNoVerification'] = connargs['EcomNoVerification'] slo = self._process_tag(element, 'SLO') if slo is None: slo = 'NONE' kwargs['SLO'] = slo workload = self._process_tag(element, 'Workload') if workload is None: workload = 'NONE' kwargs['Workload'] = workload fastPolicy = self._process_tag(element, 'FastPolicy') kwargs['FastPolicy'] = fastPolicy kwargs['SerialNumber'] = serialNumber kwargs['PoolName'] = poolName kwargs['PortGroup'] = portGroup return kwargs def _multi_pool_support(self, fileName): """Multi pool support. 10.108.246.202 ... 000198700439 ... FC_SLVR1 ... :param fileName: the configuration file :returns: list """ myList = [] connargs = {} myFile = open(fileName, 'r') data = myFile.read() myFile.close() dom = minidom.parseString(data) interval = self._process_tag(dom, 'Interval') retries = self._process_tag(dom, 'Retries') try: ecomElements = dom.getElementsByTagName('EcomServer') if ecomElements and len(ecomElements) > 0: for ecomElement in ecomElements: connargs = self._get_connection_info(ecomElement) arrayElements = ecomElement.getElementsByTagName('Array') if arrayElements and len(arrayElements) > 0: for arrayElement in arrayElements: myList = self._get_pool_info(arrayElement, fileName, connargs, interval, retries, myList) else: LOG.error(_LE( "Please check your xml for format or syntax " "errors. Please see documentation for more " "details.")) except IndexError: pass return myList def _single_pool_support(self, fileName): """Single pool support. 10.108.246.202 5988 admin #1Password OS-PORTGROUP1-PG 000198700439 FC_SLVR1 :param fileName: the configuration file :returns: list """ myList = [] kwargs = {} connargs = {} myFile = open(fileName, 'r') data = myFile.read() myFile.close() dom = minidom.parseString(data) try: connargs = self._get_connection_info(dom) interval = self._process_tag(dom, 'Interval') retries = self._process_tag(dom, 'Retries') portGroup = self._get_random_portgroup(dom) serialNumber = self._process_tag(dom, 'Array') if serialNumber is None: LOG.error(_LE( "Array Serial Number must be in the file " "%(fileName)s."), {'fileName': fileName}) poolName = self._process_tag(dom, 'Pool') if poolName is None: LOG.error(_LE( "PoolName must be in the file " "%(fileName)s."), {'fileName': fileName}) kwargs = self._fill_record( connargs, serialNumber, poolName, portGroup, dom) if interval: kwargs['Interval'] = interval if retries: kwargs['Retries'] = retries myList.append(kwargs) except IndexError: pass return myList def parse_file_to_get_array_map(self, fileName): """Parses a file and gets array map. Given a file, parse it to get array and any pool(s) or fast policy(s), SLOs, Workloads that might exist. :param fileName: the path and name of the file :returns: list """ # Multi-pool support. myList = self._multi_pool_support(fileName) if len(myList) == 0: myList = self._single_pool_support(fileName) return myList def extract_record(self, arrayInfo, pool): """Given pool string determine the correct record. The poolName and the serialNumber will determine the correct record to return in VMAX2. The poolName, SLO and the serialNumber will determine the correct record to return in VMAX3. :param arrayInfo: list of records :param pool: e.g 'SATA_BRONZE1+000198700439' 'SRP_1+Bronze+000198700555' :returns: single record """ foundArrayInfoRec = {} if pool: for arrayInfoRec in arrayInfo: if pool.count('+') == 2: compString = ("%(slo)s+%(poolName)s+%(array)s" % {'slo': arrayInfoRec['SLO'], 'poolName': arrayInfoRec['PoolName'], 'array': arrayInfoRec['SerialNumber']}) else: compString = ("%(poolName)s+%(array)s" % {'poolName': arrayInfoRec['PoolName'], 'array': arrayInfoRec['SerialNumber']}) if compString == pool: LOG.info(_LI( "The pool_name from extraSpecs is %(pool)s."), {'pool': pool}) foundArrayInfoRec = arrayInfoRec break else: foundArrayInfoRec = self._get_serial_number(arrayInfo) return foundArrayInfoRec def _get_random_portgroup(self, element): """Get a portgroup from list of portgroup. Parse all available port groups under a particular array and choose one. :param element: the parent element :returns: the randomly chosen port group :raises: VolumeBackendAPIException """ portGroupElements = element.getElementsByTagName('PortGroup') if portGroupElements and len(portGroupElements) > 0: portGroupNames = [] for portGroupElement in portGroupElements: if portGroupElement.childNodes: portGroupName = portGroupElement.childNodes[0].nodeValue if portGroupName: portGroupNames.append(portGroupName.strip()) portGroupNames = EMCVMAXUtils._filter_list(portGroupNames) if len(portGroupNames) > 0: return EMCVMAXUtils._get_random_pg_from_list(portGroupNames) exception_message = (_("No Port Group elements found in config file.")) LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) @staticmethod def _get_random_pg_from_list(portgroupnames): """From list of portgroup, choose one randomly :param portGroupNames: list of available portgroups :returns: portGroupName - the random portgroup """ portgroupname = ( portgroupnames[random.randint(0, len(portgroupnames) - 1)]) LOG.info(_LI("Returning random Port Group: " "%(portGroupName)s."), {'portGroupName': portgroupname}) return portgroupname @staticmethod def _filter_list(portgroupnames): """Clean up the port group list :param portgroupnames: list of available portgroups :returns: portgroupnames - cleaned up list """ portgroupnames = filter(None, portgroupnames) # Convert list to set to remove duplicate portgroups portgroupnames = list(set(portgroupnames)) return portgroupnames def _get_serial_number(self, arrayInfo): """If we don't have a pool then we just get the serial number. If there is more then one serial number we must return an error and a recommendation to edit the EMC conf file. :param arrayInfo: list of records :returns: any record where serial number exists :raises: VolumeBackendAPIException """ serialNumberList = [] foundRecord = {} for arrayInfoRec in arrayInfo: serialNumberList.append(arrayInfoRec['SerialNumber']) foundRecord = arrayInfoRec if len(set(serialNumberList)) > 1: # We have more than one serial number in the dict. exception_message = (_("Multiple SerialNumbers found, when only " "one was expected for this operation. " "Please change your EMC config file.")) raise exception.VolumeBackendAPIException(data=exception_message) return foundRecord def _get_pool_info(self, arrayElement, fileName, connargs, interval, retries, myList): """Get pool information from element. :param arrayElement: arrayElement :param fileName: configuration file :param connargs: connection arguments :param interval: interval, can be None :param retries: retries, can be None :param myList: list (input) :returns: list (output) :raises: VolumeBackendAPIException """ kwargs = {} portGroup = self._get_random_portgroup(arrayElement) serialNumber = self._process_tag( arrayElement, 'SerialNumber') if serialNumber is None: exceptionMessage = (_( "SerialNumber must be in the file " "%(fileName)s."), {'fileName': fileName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) poolElements = arrayElement.getElementsByTagName('Pool') if poolElements and len(poolElements) > 0: for poolElement in poolElements: poolName = self._process_tag(poolElement, 'PoolName') if poolName is None: exceptionMessage = (_( "PoolName must be in the file " "%(fileName)s."), {'fileName': fileName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) kwargs = self._fill_record(connargs, serialNumber, poolName, portGroup, poolElement) if interval: kwargs['Interval'] = interval if retries: kwargs['Retries'] = retries myList.append(kwargs) return myList def find_volume_by_device_id_on_array(self, conn, storageSystem, deviceID): """Find the volume by device ID on a specific array. :param conn: connection to the ecom server :param storageSystem: the storage system name :param deviceID: string value of the volume device ID :returns: foundVolumeInstanceName """ foundVolumeInstanceName = None volumeInstanceNames = conn.EnumerateInstanceNames( 'CIM_StorageVolume') for volumeInstanceName in volumeInstanceNames: if storageSystem not in volumeInstanceName['SystemName']: continue if deviceID == volumeInstanceName['DeviceID']: foundVolumeInstanceName = volumeInstanceName LOG.debug("Found volume: %(vol)s", {'vol': foundVolumeInstanceName}) break if foundVolumeInstanceName is None: exceptionMessage = (_("Volume %(deviceID)s not found.") % {'deviceID': deviceID}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return foundVolumeInstanceName def get_volume_element_name(self, volumeId): """Get volume element name follows naming convention, i.e. 'OS-UUID'. :param volumeId: volume id containing uuid :returns: volume element name in format of OS-UUID """ elementName = volumeId uuid_regex = (re.compile( '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', re.I)) match = uuid_regex.search(volumeId) if match: volumeUUID = match.group() elementName = ("%(prefix)s%(volumeUUID)s" % {'prefix': VOLUME_ELEMENT_NAME_PREFIX, 'volumeUUID': volumeUUID}) LOG.debug( "get_volume_element_name elementName: %(elementName)s.", {'elementName': elementName}) return elementName def rename_volume(self, conn, volume, newName): """Change the volume ElementName to specified new name. :param conn: connection to the ecom server :param volume: the volume instance name or volume instance :param newName: new ElementName of the volume :returns: volumeInstance after rename """ if type(volume) is pywbem.cim_obj.CIMInstance: volumeInstance = volume else: volumeInstance = conn.GetInstance(volume) volumeInstance['ElementName'] = newName LOG.debug("Rename volume to new ElementName %(newName)s.", {'newName': newName}) conn.ModifyInstance(volumeInstance, PropertyList=['ElementName']) return volumeInstance def get_array_and_device_id(self, volume, external_ref): """Helper function for manage volume to get array name and device ID. :param volume: volume object from API :param external_ref: the existing volume object to be manged :returns: string value of the array name and device ID """ deviceId = external_ref.get(u'source-name', None) arrayName = '' for metadata in volume['volume_metadata']: if metadata['key'].lower() == 'array': arrayName = metadata['value'] break if deviceId: LOG.debug("Get device ID of existing volume - device ID: " "%(deviceId)s, Array: %(arrayName)s.", {'deviceId': deviceId, 'arrayName': arrayName}) else: exception_message = (_("Source volume device ID is required.")) raise exception.VolumeBackendAPIException( data=exception_message) return (arrayName, deviceId) def get_associated_replication_from_source_volume( self, conn, storageSystem, sourceDeviceId): """Get associated replication from source volume. Given the source volume device ID, find associated replication storage synchronized instance names. :param conn: connection to the ecom server :param storageSystem: the storage system name :param source: target volume object :returns: foundSyncName (String) """ foundSyncInstanceName = None syncInstanceNames = conn.EnumerateInstanceNames( 'SE_StorageSynchronized_SV_SV') for syncInstanceName in syncInstanceNames: sourceVolume = syncInstanceName['SystemElement'] if storageSystem != sourceVolume['SystemName']: continue if sourceVolume['DeviceID'] == sourceDeviceId: # Check that it hasn't recently been deleted. try: conn.GetInstance(syncInstanceName) foundSyncInstanceName = syncInstanceName LOG.debug("Found sync Name: " "%(syncName)s.", {'syncName': foundSyncInstanceName}) except Exception: foundSyncInstanceName = None break if foundSyncInstanceName is None: LOG.info(_LI( "No replication synchronization session found associated " "with source volume %(source)s on %(storageSystem)s."), {'source': sourceDeviceId, 'storageSystem': storageSystem}) return foundSyncInstanceName def get_volume_model_updates( self, context, volumes, cgId, status='available'): """Update the volume model's status and return it. :param context: the context :param volumes: volumes object api :param cgId: cg id :param status: string value reflects the status of the member volume :returns: volume_model_updates - updated volumes """ volume_model_updates = [] LOG.info(_LI( "Updating status for CG: %(id)s."), {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume['id'], 'status': status}) else: LOG.info(_LI("No volume found for CG: %(cg)s."), {'cg': cgId}) return volume_model_updates def get_smi_version(self, conn): """Get the SMI_S version. :param conn: the connection to the ecom server :returns: string -- version """ intVersion = 0 swIndentityInstances = conn.EnumerateInstances( 'SE_ManagementServerSoftwareIdentity') if swIndentityInstances: swIndentityInstance = swIndentityInstances[0] majorVersion = swIndentityInstance['MajorVersion'] minorVersion = swIndentityInstance['MinorVersion'] revisionNumber = swIndentityInstance['RevisionNumber'] intVersion = int(six.text_type(majorVersion) + six.text_type(minorVersion) + six.text_type(revisionNumber)) LOG.debug("Major version: %(majV)lu, Minor version: %(minV)lu, " "Revision number: %(revNum)lu, Version: %(intV)lu.", {'majV': majorVersion, 'minV': minorVersion, 'revNum': revisionNumber, 'intV': intVersion}) return intVersion def get_composite_elements( self, conn, volumeInstance): """Get the meta members of a composite volume. :param conn: ECOM connection :param volumeInstance: the volume instance :returns memberVolumes: a list of meta members """ memberVolumes = None storageSystemName = volumeInstance['SystemName'] elementCompositionService = self.find_element_composition_service( conn, storageSystemName) rc, ret = conn.InvokeMethod( 'GetCompositeElements', elementCompositionService, TheElement=volumeInstance.path) if 'OutElements' in ret: LOG.debug("Get composite elements of volume " "%(volume)s rc=%(rc)d, ret=%(ret)s", {'volume': volumeInstance.path, 'rc': rc, 'ret': ret}) memberVolumes = ret['OutElements'] return memberVolumes def generate_unique_trunc_host(self, hostName): """Create a unique short host name under 40 chars :param sgName: long storage group name :returns: truncated storage group name """ if hostName and len(hostName) > 38: hostName = hostName.lower() m = hashlib.md5() m.update(hostName.encode('utf-8')) uuid = m.hexdigest() return( ("%(host)s%(uuid)s" % {'host': hostName[-6:], 'uuid': uuid})) else: return hostName def generate_unique_trunc_pool(self, poolName): """Create a unique pool name under 16 chars :param poolName: long pool name :returns: truncated pool name """ if poolName and len(poolName) > MAX_POOL_LENGTH: return ( ("%(first)s_%(last)s" % {'first': poolName[:8], 'last': poolName[-7:]})) else: return poolName def generate_unique_trunc_fastpolicy(self, fastPolicyName): """Create a unique fast policy name under 14 chars :param fastPolicyName: long fast policy name :returns: truncated fast policy name """ if fastPolicyName and len(fastPolicyName) > MAX_FASTPOLICY_LENGTH: return ( ("%(first)s_%(last)s" % {'first': fastPolicyName[:7], 'last': fastPolicyName[-6:]})) else: return fastPolicyName def get_iscsi_protocol_endpoints(self, conn, portgroupinstancename): """Get the iscsi protocol endpoints of a port group. :param conn: the ecom connection :param portgroupinstancename: the portgroup instance name :returns: iscsiendpoints """ iscsiendpoints = conn.AssociatorNames( portgroupinstancename, AssocClass='CIM_MemberOfCollection') return iscsiendpoints def get_tcp_protocol_endpoints(self, conn, iscsiendpointinstancename): """Get the tcp protocol endpoints associated with an iscsi endpoint :param conn: the ecom connection :param iscsiendpointinstancename: the iscsi endpoint instance name :returns: tcpendpoints """ tcpendpoints = conn.AssociatorNames( iscsiendpointinstancename, AssocClass='CIM_BindsTo') return tcpendpoints def get_ip_protocol_endpoints(self, conn, tcpendpointinstancename): """Get the ip protocol endpoints associated with an tcp endpoint :param conn: the ecom connection :param tcpendpointinstancename: the tcp endpoint instance name :returns: ipendpoints """ ipendpoints = conn.AssociatorNames( tcpendpointinstancename, AssocClass='CIM_BindsTo') return ipendpoints def get_iscsi_ip_address(self, conn, ipendpointinstancename): """Get the IPv4Address from the ip endpoint instance name :param conn: the ecom connection :param ipendpointinstancename: the ip endpoint instance name :returns: foundIpAddress """ foundIpAddress = None ipendpointinstance = conn.GetInstance(ipendpointinstancename) propertiesList = ipendpointinstance.properties.items() for properties in propertiesList: if properties[0] == 'IPv4Address': cimProperties = properties[1] foundIpAddress = cimProperties.value return foundIpAddress def get_target_endpoints(self, conn, hardwareId): """Given the hardwareId get the target endpoints. :param conn: the connection to the ecom server :param hardwareId: the hardware Id :returns: targetEndpoints :raises: VolumeBackendAPIException """ protocolControllerInstanceName = self.get_protocol_controller( conn, hardwareId) targetEndpoints = conn.AssociatorNames( protocolControllerInstanceName, ResultClass='EMC_FCSCSIProtocolEndpoint') return targetEndpoints def get_protocol_controller(self, conn, hardwareinstancename): """Get the front end protocol endpoints of a hardware instance :param conn: the ecom connection :param hardwareinstancename: the hardware instance name :returns: protocolControllerInstanceName :raises: VolumeBackendAPIException """ protocolControllerInstanceName = None protocol_controllers = conn.AssociatorNames( hardwareinstancename, ResultClass='EMC_FrontEndSCSIProtocolController') if len(protocol_controllers) > 0: protocolControllerInstanceName = protocol_controllers[0] if protocolControllerInstanceName is None: exceptionMessage = (_( "Unable to get target endpoints for hardwareId " "%(hardwareIdInstance)s.") % {'hardwareIdInstance': hardwareinstancename}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return protocolControllerInstanceName def get_replication_setting_data(self, conn, repServiceInstanceName, replication_type, extraSpecs): """Get the replication setting data :param conn: connection the ecom server :param repServiceInstanceName: the storage group instance name :param replication_type: the replication type :param copy_methodology: the copy methodology :returns: instance rsdInstance """ repServiceCapabilityInstanceNames = conn.AssociatorNames( repServiceInstanceName, ResultClass='CIM_ReplicationServiceCapabilities', AssocClass='CIM_ElementCapabilities') repServiceCapabilityInstanceName = ( repServiceCapabilityInstanceNames[0]) rc, rsd = conn.InvokeMethod( 'GetDefaultReplicationSettingData', repServiceCapabilityInstanceName, ReplicationType=self.get_num(replication_type, '16')) if rc != 0: rc, errordesc = self.wait_for_job_complete(conn, rsd, extraSpecs) if rc != 0: exceptionMessage = (_( "Error getting ReplicationSettingData. " "Return code: %(rc)lu. " "Error: %(error)s.") % {'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return rsd def set_copy_methodology_in_rsd(self, conn, repServiceInstanceName, replication_type, copy_methodology, extraSpecs): """Get the replication setting data :param conn: connection the ecom server :param repServiceInstanceName: the storage group instance name :param replication_type: the replication type :param copy_methodology: the copy methodology :returns: instance rsdInstance """ rsd = self.get_replication_setting_data( conn, repServiceInstanceName, replication_type, extraSpecs) rsdInstance = rsd['DefaultInstance'] rsdInstance['DesiredCopyMethodology'] = ( self.get_num(copy_methodology, '16')) return rsdInstance def set_target_element_supplier_in_rsd( self, conn, repServiceInstanceName, replication_type, target_type, extraSpecs): """Get the replication setting data :param conn: connection the ecom server :param repServiceInstanceName: the storage group instance name :param replication_type: the replication type :param target_type: Use existing, Create new, Use and create :returns: instance rsdInstance """ rsd = self.get_replication_setting_data( conn, repServiceInstanceName, replication_type, extraSpecs) rsdInstance = rsd['DefaultInstance'] rsdInstance['TargetElementSupplier'] = ( self.get_num(target_type, '16')) return rsdInstance def get_v3_default_sg_instance_name( self, conn, poolName, slo, workload, storageSystemName): """Get the V3 default instance name :param conn: the connection to the ecom server :param poolName: the pool name :param slo: the SLO :param workload: the workload :param storageSystemName: the storage system name :returns: the storage group instance name """ storageGroupName = self.get_v3_storage_group_name( poolName, slo, workload) controllerConfigService = ( self.find_controller_configuration_service( conn, storageSystemName)) sgInstanceName = self.find_storage_masking_group( conn, controllerConfigService, storageGroupName) return storageGroupName, controllerConfigService, sgInstanceName cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_https.py0000664000567000056710000003020612701406250024512 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import os import socket import ssl import string import struct from eventlet import patcher try: import OpenSSL except ImportError: OpenSSL = None from oslo_log import log as logging import six from six.moves import http_client from six.moves import urllib from cinder.i18n import _, _LI # Handle case where we are running in a monkey patched environment if OpenSSL and patcher.is_monkey_patched('socket'): from eventlet.green.OpenSSL import SSL try: import pywbem pywbemAvailable = True except ImportError: pywbemAvailable = False LOG = logging.getLogger(__name__) def to_bytes(s): if isinstance(s, six.string_types): return six.b(s) else: return s def get_default_ca_certs(): """Gets the default CA certificates if found, otherwise None. Try to find out system path with ca certificates. This path is cached and returned. If no path is found out, None is returned. """ if not hasattr(get_default_ca_certs, '_path'): for path in ( '/etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt', '/etc/ssl/certs', '/etc/ssl/certificates'): if os.path.exists(path): get_default_ca_certs._path = path break else: get_default_ca_certs._path = None return get_default_ca_certs._path class OpenSSLConnectionDelegator(object): """An OpenSSL.SSL.Connection delegator. Supplies an additional 'makefile' method which http_client requires and is not present in OpenSSL.SSL.Connection. Note: Since it is not possible to inherit from OpenSSL.SSL.Connection a delegator must be used. """ def __init__(self, *args, **kwargs): self.connection = SSL.GreenConnection(*args, **kwargs) def __getattr__(self, name): return getattr(self.connection, name) def makefile(self, *args, **kwargs): return socket._fileobject(self.connection, *args, **kwargs) class HTTPSConnection(http_client.HTTPSConnection): def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, ca_certs=None, no_verification=False): if not pywbemAvailable: LOG.info(_LI( 'Module PyWBEM not installed. ' 'Install PyWBEM using the python-pywbem package.')) if six.PY3: excp_lst = (TypeError, ssl.SSLError) else: excp_lst = () try: http_client.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = None if key_file is None else key_file self.cert_file = None if cert_file is None else cert_file self.insecure = no_verification self.ca_certs = ( None if ca_certs is None else six.text_type(ca_certs)) self.set_context() # ssl exceptions are reported in various form in Python 3 # so to be compatible, we report the same kind as under # Python2 except excp_lst as e: raise pywbem.cim_http.Error(six.text_type(e)) @staticmethod def host_matches_cert(host, x509): """Verify that the certificate matches host. Verify that the x509 certificate we have received from 'host' correctly identifies the server we are connecting to, ie that the certificate's Common Name or a Subject Alternative Name matches 'host'. """ def check_match(name): # Directly match the name. if name == host: return True # Support single wildcard matching. if name.startswith('*.') and host.find('.') > 0: if name[2:] == host.split('.', 1)[1]: return True common_name = x509.get_subject().commonName # First see if we can match the CN. if check_match(common_name): return True # Also try Subject Alternative Names for a match. san_list = None for i in range(x509.get_extension_count()): ext = x509.get_extension(i) if ext.get_short_name() == b'subjectAltName': san_list = six.text_type(ext) for san in ''.join(san_list.split()).split(','): if san.startswith('DNS:'): if check_match(san.split(':', 1)[1]): return True # Server certificate does not match host. msg = (_("Host %(host)s does not match x509 certificate contents: " "CommonName %(commonName)s.") % {'host': host, 'commonName': common_name}) if san_list is not None: msg = (_("%(message)s, subjectAltName: %(sanList)s.") % {'message': msg, 'sanList': san_list}) raise pywbem.cim_http.AuthError(msg) def verify_callback(self, connection, x509, errnum, depth, preverify_ok): if x509.has_expired(): msg = msg = (_("SSL Certificate expired on %s.") % x509.get_notAfter()) raise pywbem.cim_http.AuthError(msg) if depth == 0 and preverify_ok: # We verify that the host matches against the last # certificate in the chain. return self.host_matches_cert(self.host, x509) else: # Pass through OpenSSL's default result. return preverify_ok def set_context(self): """Set up the OpenSSL context.""" self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) if self.insecure is not True: self.context.set_verify(OpenSSL.SSL.VERIFY_PEER, self.verify_callback) else: self.context.set_verify(OpenSSL.SSL.VERIFY_NONE, lambda *args: True) if self.cert_file: try: self.context.use_certificate_file(self.cert_file) except Exception as e: msg = (_("Unable to load cert from %(cert)s %(e)s.") % {'cert': self.cert_file, 'e': e}) raise pywbem.cim_http.AuthError(msg) if self.key_file is None: # We support having key and cert in same file. try: self.context.use_privatekey_file(self.cert_file) except Exception as e: msg = (_("No key file specified and unable to load key " "from %(cert)s %(e)s.") % {'cert': self.cert_file, 'e': e}) raise pywbem.cim_http.AuthError(msg) if self.key_file: try: self.context.use_privatekey_file(self.key_file) except Exception as e: msg = (_("Unable to load key from %(cert)s %(e)s.") % {'cert': self.cert_file, 'e': e}) raise pywbem.cim_http.AuthError(msg) if self.ca_certs: try: self.context.load_verify_locations(to_bytes(self.ca_certs)) except Exception as e: msg = (_("Unable to load CA from %(cert)s %(e)s.") % {'cert': self.cert_file, 'e': e}) raise pywbem.cim_http.AuthError(msg) else: self.context.set_default_verify_paths() def connect(self): result = socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM) if result: socket_family = result[0][0] if socket_family == socket.AF_INET6: sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) else: # If due to some reason the address lookup fails - we still # connect to IPv4 socket. This retains the older behavior. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.timeout is not None: # '0' microseconds sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack('LL', 0, 0)) self.sock = OpenSSLConnectionDelegator(self.context, sock) self.sock.connect((self.host, self.port)) def wbem_request(url, data, creds, headers=None, debug=0, x509=None, verify_callback=None, ca_certs=None, no_verification=False): """Send request over HTTP. Send XML data over HTTP to the specified url. Return the response in XML. Uses Python's build-in http_client. x509 may be a dictionary containing the location of the SSL certificate and key files. """ if headers is None: headers = [] host, port, use_ssl = pywbem.cim_http.parse_url(url) key_file = None cert_file = None if use_ssl and x509 is not None: cert_file = x509.get('cert_file') key_file = x509.get('key_file') numTries = 0 localAuthHeader = None tryLimit = 5 if isinstance(data, six.text_type): data = data.encode('utf-8') data = '\n' + data if not no_verification and ca_certs is None: ca_certs = get_default_ca_certs() elif no_verification: ca_certs = None if use_ssl: h = HTTPSConnection( host, port=port, key_file=key_file, cert_file=cert_file, ca_certs=ca_certs, no_verification=no_verification) locallogin = None while numTries < tryLimit: numTries = numTries + 1 h.putrequest('POST', '/cimom') h.putheader('Content-type', 'application/xml; charset="utf-8"') h.putheader('Content-length', len(data)) if localAuthHeader is not None: h.putheader(*localAuthHeader) elif creds is not None: h.putheader('Authorization', 'Basic %s' % base64.encodestring('%s:%s' % (creds[0], creds[1])) .replace('\n', '')) elif locallogin is not None: h.putheader('PegasusAuthorization', 'Local "%s"' % locallogin) for hdr in headers: if isinstance(hdr, six.text_type): hdr = hdr.encode('utf-8') s = map(lambda x: string.strip(x), string.split(hdr, ":", 1)) h.putheader(urllib.parse.quote(s[0]), urllib.parse.quote(s[1])) try: h.endheaders() try: h.send(data) except socket.error as arg: if arg[0] != 104 and arg[0] != 32: raise response = h.getresponse() body = response.read() if response.status != 200: raise pywbem.cim_http.Error('HTTP error') except http_client.BadStatusLine as arg: msg = (_("Bad Status line returned: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) except socket.sslerror as arg: msg = (_("SSL error: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) except socket.error as arg: msg = (_("Socket error: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) break return body cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_fc.py0000664000567000056710000004055512701406250023750 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from oslo_log import log as logging import six from cinder import context from cinder.i18n import _LW from cinder.volume import driver from cinder.volume.drivers.emc import emc_vmax_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class EMCVMAXFCDriver(driver.FibreChannelDriver): """EMC FC Drivers for VMAX using SMI-S. Version history: 1.0.0 - Initial driver 1.1.0 - Multiple pools and thick/thin provisioning, performance enhancement. 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) 2.1.2 - Clean up failed clones (bug #1440154) 2.1.3 - Fixed a problem with FAST support (bug #1435069) 2.2.0 - Add manage/unmanage 2.2.1 - Support for SE 8.0.3 2.2.2 - Update Consistency Group 2.2.3 - Pool aware scheduler(multi-pool) support 2.2.4 - Create CG from CG snapshot 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - Fix for randomly choosing port group. (bug #1501919) - get_short_host_name needs to be called in find_device_number (bug #1520635) - Proper error handling for invalid SLOs (bug #1512795) - Extend Volume for VMAX3, SE8.1.0.3 https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - Incorrect SG selected on an attach (#1515176) - Cleanup Zoning (bug #1501938) NOTE: FC only - Last volume in SG fix - _remove_last_vol_and_delete_sg is not being called for VMAX3 (bug #1520549) - necessary updates for CG changes (#1534616) - Changing PercentSynced to CopyState (bug #1517103) - Getting iscsi ip from port in existing masking view - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) """ VERSION = "2.3.0" def __init__(self, *args, **kwargs): super(EMCVMAXFCDriver, self).__init__(*args, **kwargs) self.common = emc_vmax_common.EMCVMAXCommon( 'FC', self.VERSION, configuration=self.configuration) self.zonemanager_lookup_service = fczm_utils.create_lookup_service() def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a EMC(VMAX/VNX) volume.""" volpath = self.common.create_volume(volume) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" volpath = self.common.create_volume_from_snapshot(volume, snapshot) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" volpath = self.common.create_cloned_volume(volume, src_vref) model_update = {} volume['provider_location'] = six.text_type(volpath) model_update['provider_location'] = volume['provider_location'] return model_update def delete_volume(self, volume): """Deletes an EMC volume.""" self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot.""" ctxt = context.get_admin_context() volumename = snapshot['volume_name'] index = volumename.index('-') volumeid = volumename[index + 1:] volume = self.db.volume_get(ctxt, volumeid) volpath = self.common.create_snapshot(snapshot, volume) model_update = {} snapshot['provider_location'] = six.text_type(volpath) model_update['provider_location'] = snapshot['provider_location'] return model_update def delete_snapshot(self, snapshot): """Deletes a snapshot.""" ctxt = context.get_admin_context() volumename = snapshot['volume_name'] index = volumename.index('-') volumeid = volumename[index + 1:] volume = self.db.volume_get(ctxt, volumeid) self.common.delete_snapshot(snapshot, volume) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], } } """ device_info = self.common.initialize_connection( volume, connector) device_number = device_info['hostlunid'] storage_system = device_info['storagesystem'] target_wwns, init_targ_map = self._build_initiator_target_map( storage_system, volume, connector) data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': device_number, 'target_discovered': True, 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} LOG.debug("Return FC data for zone addition: %(data)s.", {'data': data}) return data @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :param volume: the volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map if the zone is to be removed, otherwise empty """ data = {'driver_volume_type': 'fibre_channel', 'data': {}} loc = volume['provider_location'] name = ast.literal_eval(loc) storage_system = name['keybindings']['SystemName'] LOG.debug("Start FC detach process for volume: %(volume)s.", {'volume': volume['name']}) mvInstanceName = self.common.get_masking_view_by_volume( volume, connector) if mvInstanceName is not None: portGroupInstanceName = ( self.common.get_port_group_from_masking_view( mvInstanceName)) initiatorGroupInstanceName = ( self.common.get_initiator_group_from_masking_view( mvInstanceName)) LOG.debug("Found port group: %(portGroup)s " "in masking view %(maskingView)s.", {'portGroup': portGroupInstanceName, 'maskingView': mvInstanceName}) # Map must be populated before the terminate_connection target_wwns, init_targ_map = self._build_initiator_target_map( storage_system, volume, connector) self.common.terminate_connection(volume, connector) LOG.debug("Looking for masking views still associated with " "Port Group %s.", portGroupInstanceName) mvInstances = self._get_common_masking_views( portGroupInstanceName, initiatorGroupInstanceName) if len(mvInstances) > 0: LOG.debug("Found %(numViews)lu MaskingViews.", {'numViews': len(mvInstances)}) else: # No views found. LOG.debug("No MaskingViews were found. Deleting zone.") data = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} LOG.debug("Return FC data for zone removal: %(data)s.", {'data': data}) else: LOG.warning(_LW("Volume %(volume)s is not in any masking view."), {'volume': volume['name']}) return data def _get_common_masking_views( self, portGroupInstanceName, initiatorGroupInstanceName): """Check to see the existence of mv in list""" mvInstances = [] mvInstancesByPG = self.common.get_masking_views_by_port_group( portGroupInstanceName) mvInstancesByIG = self.common.get_masking_views_by_initiator_group( initiatorGroupInstanceName) for mvInstanceByPG in mvInstancesByPG: if mvInstanceByPG in mvInstancesByIG: mvInstances.append(mvInstanceByPG) return mvInstances def _build_initiator_target_map(self, storage_system, volume, connector): """Build the target_wwns and the initiator target map.""" target_wwns = [] init_targ_map = {} initiator_wwns = connector['wwpns'] if self.zonemanager_lookup_service: fc_targets = self.common.get_target_wwns_from_masking_view( storage_system, volume, connector) mapping = ( self.zonemanager_lookup_service. get_device_mapping_from_network(initiator_wwns, fc_targets)) for entry in mapping: map_d = mapping[entry] target_wwns.extend(map_d['target_port_wwn_list']) for initiator in map_d['initiator_port_wwn_list']: init_targ_map[initiator] = map_d['target_port_wwn_list'] else: # No lookup service, pre-zoned case. target_wwns = self.common.get_target_wwns(storage_system, connector) for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return list(set(target_wwns)), init_targ_map def extend_volume(self, volume, new_size): """Extend an existing volume.""" self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): """Get volume stats. :param refresh: boolean -- If True, run update the stats first. :returns: dict -- the stats dict """ if refresh: self.update_volume_stats() return self._stats def update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = self.common.update_volume_stats() data['storage_protocol'] = 'FC' data['driver_version'] = self.VERSION self._stats = data def migrate_volume(self, ctxt, volume, host): """Migrate a volume from one Volume Backend to another. :param ctxt: context :param volume: the volume object including the volume_type_id :param host: the host dict holding the relevant target(destination) information :returns: boolean -- Always returns True :returns: dict -- Empty dict {} """ return self.common.migrate_volume(ctxt, volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Migrate volume to another host using retype. :param ctxt: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: Unused parameter. :param host: the host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(ctxt, volume, new_type, diff, host) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" self.common.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return self.common.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" return self.common.create_cgsnapshot(context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots) def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ return self.common.manage_existing(volume, external_ref) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ return self.common.manage_existing_get_size(volume, external_ref) def unmanage(self, volume): """Export VMAX volume from Cinder. Leave the volume intact on the backend array. """ return self.common.unmanage(volume) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Updates LUNs in consistency group.""" return self.common.update_consistencygroup(group, add_volumes, remove_volumes) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates the consistency group from source. Currently the source can only be a cgsnapshot. :param context: the context :param group: the consistency group object to be created :param volumes: volumes in the consistency group :param cgsnapshot: the source consistency group snapshot :param snapshots: snapshots of the source volumes :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. """ return self.common.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_common.py0000664000567000056710000057661312701406250024661 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import os.path from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder.volume.drivers.emc import emc_vmax_fast from cinder.volume.drivers.emc import emc_vmax_https from cinder.volume.drivers.emc import emc_vmax_masking from cinder.volume.drivers.emc import emc_vmax_provision from cinder.volume.drivers.emc import emc_vmax_provision_v3 from cinder.volume.drivers.emc import emc_vmax_utils from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF try: import pywbem pywbemAvailable = True except ImportError: pywbemAvailable = False CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_' CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' BACKENDNAME = 'volume_backend_name' PREFIXBACKENDNAME = 'capabilities:volume_backend_name' PORTGROUPNAME = 'portgroupname' EMC_ROOT = 'root/emc' POOL = 'storagetype:pool' ARRAY = 'storagetype:array' FASTPOLICY = 'storagetype:fastpolicy' BACKENDNAME = 'volume_backend_name' COMPOSITETYPE = 'storagetype:compositetype' STRIPECOUNT = 'storagetype:stripecount' MEMBERCOUNT = 'storagetype:membercount' STRIPED = 'striped' CONCATENATED = 'concatenated' SMI_VERSION_8 = 800 # V3 SLO = 'storagetype:slo' WORKLOAD = 'storagetype:workload' INTERVAL = 'storagetype:interval' RETRIES = 'storagetype:retries' ISV3 = 'isV3' TRUNCATE_5 = 5 TRUNCATE_8 = 8 SNAPVX = 7 DISSOLVE_SNAPVX = 9 CREATE_NEW_TARGET = 2 SNAPVX_REPLICATION_TYPE = 6 emc_opts = [ cfg.StrOpt('cinder_emc_config_file', default=CINDER_EMC_CONFIG_FILE, help='use this file for cinder emc plugin ' 'config data'), ] CONF.register_opts(emc_opts) class EMCVMAXCommon(object): """Common class for SMI-S based EMC volume drivers. This common class is for EMC volume drivers based on SMI-S. It supports VNX and VMAX arrays. """ VERSION = "2.0.0" stats = {'driver_version': '1.0', 'free_capacity_gb': 0, 'reserved_percentage': 0, 'storage_protocol': None, 'total_capacity_gb': 0, 'vendor_name': 'EMC', 'volume_backend_name': None} pool_info = {'backend_name': None, 'config_file': None, 'arrays_info': {}} def __init__(self, prtcl, version, configuration=None): if not pywbemAvailable: LOG.info(_LI( "Module PyWBEM not installed. " "Install PyWBEM using the python-pywbem package.")) self.protocol = prtcl self.configuration = configuration self.configuration.append_config_values(emc_opts) self.conn = None self.url = None self.user = None self.passwd = None self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl) self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) self.fast = emc_vmax_fast.EMCVMAXFast(prtcl) self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl) self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl) self.version = version self._gather_info() def _gather_info(self): """Gather the relevant information for update_volume_stats.""" if hasattr(self.configuration, 'cinder_emc_config_file'): self.pool_info['config_file'] = ( self.configuration.cinder_emc_config_file) else: self.pool_info['config_file'] = ( self.configuration.safe_get('cinder_emc_config_file')) self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) LOG.debug( "Updating volume stats on file %(emcConfigFileName)s on " "backend %(backendName)s.", {'emcConfigFileName': self.pool_info['config_file'], 'backendName': self.pool_info['backend_name']}) self.pool_info['arrays_info'] = ( self.utils.parse_file_to_get_array_map( self.pool_info['config_file'])) def create_volume(self, volume): """Creates a EMC(VMAX) volume from a pre-existing storage pool. For a concatenated compositeType: If the volume size is over 240GB then a composite is created EMCNumberOfMembers > 1, otherwise it defaults to a non composite For a striped compositeType: The user must supply an extra spec to determine how many metas will make up the striped volume. If the meta size is greater than 240GB an error is returned to the user. Otherwise the EMCNumberOfMembers is what the user specifies. :param volume: volume Object :returns: dict -- volumeDict - the volume dictionary """ volumeSize = int(self.utils.convert_gb_to_bits(volume['size'])) volumeName = volume['id'] extraSpecs = self._initial_setup(volume) self.conn = self._get_ecom_connection() if extraSpecs[ISV3]: rc, volumeDict, storageSystemName = ( self._create_v3_volume(volume, volumeName, volumeSize, extraSpecs)) else: rc, volumeDict, storageSystemName = ( self._create_composite_volume(volume, volumeName, volumeSize, extraSpecs)) # If volume is created as part of a consistency group. if 'consistencygroup_id' in volume and volume['consistencygroup_id']: cgName = self.utils.truncate_string( volume['consistencygroup_id'], 8) volumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, volumeName) replicationService = ( self.utils.find_replication_service(self.conn, storageSystemName)) cgInstanceName = ( self._find_consistency_group(replicationService, cgName)) self.provision.add_volume_to_cg(self.conn, replicationService, cgInstanceName, volumeInstance.path, cgName, volumeName, extraSpecs) LOG.info(_LI("Leaving create_volume: %(volumeName)s " "Return code: %(rc)lu " "volume dict: %(name)s."), {'volumeName': volumeName, 'rc': rc, 'name': volumeDict}) # Adding version information volumeDict['version'] = self.version return volumeDict def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. For VMAX, replace snapshot with clone. :param volume: volume Object :param snapshot: snapshot object :returns: dict -- the cloned volume dictionary :raises: VolumeBackendAPIException """ LOG.debug("Entering create_volume_from_snapshot.") snapshot['host'] = volume['host'] extraSpecs = self._initial_setup(snapshot) self.conn = self._get_ecom_connection() snapshotInstance = self._find_lun(snapshot) storageSystem = snapshotInstance['SystemName'] syncName = self.utils.find_sync_sv_by_target( self.conn, storageSystem, snapshotInstance, extraSpecs, True) if syncName is not None: repservice = self.utils.find_replication_service(self.conn, storageSystem) if repservice is None: exception_message = (_("Cannot find Replication Service to " "create volume for snapshot %s.") % snapshotInstance) raise exception.VolumeBackendAPIException( data=exception_message) self.provision.delete_clone_relationship( self.conn, repservice, syncName, extraSpecs) snapshot['host'] = volume['host'] return self._create_cloned_volume(volume, snapshot, extraSpecs, False) def create_cloned_volume(self, cloneVolume, sourceVolume): """Creates a clone of the specified volume. :param cloneVolume: clone volume Object :param sourceVolume: volume object :returns: cloneVolumeDict -- the cloned volume dictionary """ extraSpecs = self._initial_setup(sourceVolume) return self._create_cloned_volume(cloneVolume, sourceVolume, extraSpecs, False) def delete_volume(self, volume): """Deletes a EMC(VMAX) volume. :param volume: volume Object """ LOG.info(_LI("Deleting Volume: %(volume)s"), {'volume': volume['name']}) rc, volumeName = self._delete_volume(volume) LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: " "%(rc)lu."), {'volumename': volumeName, 'rc': rc}) def create_snapshot(self, snapshot, volume): """Creates a snapshot. For VMAX, replace snapshot with clone. :param snapshot: snapshot object :param volume: volume Object to create snapshot from :returns: dict -- the cloned volume dictionary """ extraSpecs = self._initial_setup(volume) return self._create_cloned_volume(snapshot, volume, extraSpecs, True) def delete_snapshot(self, snapshot, volume): """Deletes a snapshot. :param snapshot: snapshot object :param volume: volume Object to create snapshot from """ LOG.info(_LI("Delete Snapshot: %(snapshotName)s."), {'snapshotName': snapshot['name']}) snapshot['host'] = volume['host'] self._delete_snapshot(snapshot) def _remove_members(self, controllerConfigService, volumeInstance, connector, extraSpecs): """This method unmaps a volume from a host. Removes volume from the Device Masking Group that belongs to a Masking View. Check if fast policy is in the extra specs. If it isn't we do not need to do any thing for FAST. Assume that isTieringPolicySupported is False unless the FAST policy is in the extra specs and tiering is enabled on the array. :param controllerConfigService: instance name of ControllerConfigurationService :param volumeInstance: volume Object :param connector: the connector object :param extraSpecs: extra specifications :returns: storageGroupInstanceName """ volumeName = volumeInstance['ElementName'] LOG.debug("Detaching volume %s.", volumeName) return self.masking.remove_and_reset_members( self.conn, controllerConfigService, volumeInstance, volumeName, extraSpecs, connector) def _unmap_lun(self, volume, connector): """Unmaps a volume from the host. :param volume: the volume Object :param connector: the connector Object :raises: VolumeBackendAPIException """ extraSpecs = self._initial_setup(volume) volumename = volume['name'] LOG.info(_LI("Unmap volume: %(volume)s."), {'volume': volumename}) device_info = self.find_device_number(volume, connector['host']) if 'hostlunid' not in device_info: LOG.info(_LI("Volume %s is not mapped. No volume to unmap."), volumename) return vol_instance = self._find_lun(volume) storage_system = vol_instance['SystemName'] configservice = self.utils.find_controller_configuration_service( self.conn, storage_system) if configservice is None: exception_message = (_("Cannot find Controller Configuration " "Service for storage system " "%(storage_system)s.") % {'storage_system': storage_system}) raise exception.VolumeBackendAPIException(data=exception_message) self._remove_members(configservice, vol_instance, connector, extraSpecs) def initialize_connection(self, volume, connector): """Initializes the connection and returns device and connection info. The volume may be already mapped, if this is so the deviceInfo tuple is returned. If the volume is not already mapped then we need to gather information to either 1. Create an new masking view or 2. Add the volume to an existing storage group within an already existing maskingview. The naming convention is the following: initiatorGroupName = OS---IG e.g OS-myShortHost-I-IG storageGroupName = OS----SG e.g OS-myShortHost-SATA_BRONZ1-I-SG portGroupName = OS--PG The portGroupName will come from the EMC configuration xml file. These are precreated. If the portGroup does not exist then an error will be returned to the user maskingView = OS----MV e.g OS-myShortHost-SATA_BRONZ1-I-MV :param volume: volume Object :param connector: the connector Object :returns: dict -- deviceInfoDict - device information dict :raises: VolumeBackendAPIException """ portGroupName = None extraSpecs = self._initial_setup(volume) volumeName = volume['name'] LOG.info(_LI("Initialize connection: %(volume)s."), {'volume': volumeName}) self.conn = self._get_ecom_connection() deviceInfoDict = self._wrap_find_device_number( volume, connector['host']) maskingViewDict = self._populate_masking_dict( volume, connector, extraSpecs) if ('hostlunid' in deviceInfoDict and deviceInfoDict['hostlunid'] is not None): isSameHost = self._is_same_host(connector, deviceInfoDict) if isSameHost: # Device is already mapped to same host so we will leave # the state as is. deviceNumber = deviceInfoDict['hostlunid'] LOG.info(_LI("Volume %(volume)s is already mapped. " "The device number is %(deviceNumber)s."), {'volume': volumeName, 'deviceNumber': deviceNumber}) # Special case, we still need to get the iscsi ip address. portGroupName = ( self._get_correct_port_group( deviceInfoDict, maskingViewDict['storageSystemName'])) else: deviceInfoDict, portGroupName = self._attach_volume( volume, connector, extraSpecs, maskingViewDict, True) else: deviceInfoDict, portGroupName = ( self._attach_volume( volume, connector, extraSpecs, maskingViewDict)) if self.protocol.lower() == 'iscsi': return self._find_ip_protocol_endpoints( self.conn, deviceInfoDict['storagesystem'], portGroupName) else: return deviceInfoDict def _attach_volume(self, volume, connector, extraSpecs, maskingViewDict, isLiveMigration=False): """Attach a volume to a host. If live migration is being undertaken then the volume remains attached to the source host. :params volume: the volume object :params connector: the connector object :param extraSpecs: extra specifications :param maskingViewDict: masking view information :param isLiveMigration: boolean, can be None :returns: dict -- deviceInfoDict String -- port group name :raises: VolumeBackendAPIException """ volumeName = volume['name'] maskingViewDict = self._populate_masking_dict( volume, connector, extraSpecs) if isLiveMigration: maskingViewDict['isLiveMigration'] = True else: maskingViewDict['isLiveMigration'] = False rollbackDict = self.masking.setup_masking_view( self.conn, maskingViewDict, extraSpecs) # Find host lun id again after the volume is exported to the host. deviceInfoDict = self.find_device_number(volume, connector['host']) if 'hostlunid' not in deviceInfoDict: # Did not successfully attach to host, # so a rollback for FAST is required. LOG.error(_LE("Error Attaching volume %(vol)s."), {'vol': volumeName}) if ((rollbackDict['fastPolicyName'] is not None) or (rollbackDict['isV3'] is not None)): (self.masking ._check_if_rollback_action_for_masking_required( self.conn, rollbackDict)) exception_message = (_("Error Attaching volume %(vol)s.") % {'vol': volumeName}) raise exception.VolumeBackendAPIException( data=exception_message) return deviceInfoDict, rollbackDict['pgGroupName'] def _is_same_host(self, connector, deviceInfoDict): """Check if the host is the same. Check if the host to attach to is the same host that is already attached. This is necessary for live migration. :params connector: the connector object :params deviceInfoDict: the device information dictionary :returns: boolean -- True if the host is the same, False otherwise. """ if 'host' in connector: currentHost = connector['host'] if ('maskingview' in deviceInfoDict and deviceInfoDict['maskingview'] is not None): if currentHost in deviceInfoDict['maskingview']: return True return False def _get_correct_port_group(self, deviceInfoDict, storageSystemName): """Get the portgroup name from the existing masking view. :params deviceInfoDict: the device info dictionary :params storageSystemName: storage system name :returns: String port group name """ if ('controller' in deviceInfoDict and deviceInfoDict['controller'] is not None): maskingViewInstanceName = deviceInfoDict['controller'] try: maskingViewInstance = ( self.conn.GetInstance(maskingViewInstanceName)) except Exception: exception_message = (_("Unable to get the name of " "the masking view.")) raise exception.VolumeBackendAPIException( data=exception_message) # Get the portgroup from masking view portGroupInstanceName = ( self.masking._get_port_group_from_masking_view( self.conn, maskingViewInstance['ElementName'], storageSystemName)) try: portGroupInstance = ( self.conn.GetInstance(portGroupInstanceName)) portGroupName = ( portGroupInstance['ElementName']) except Exception: exception_message = (_("Unable to get the name of " "the portgroup.")) raise exception.VolumeBackendAPIException( data=exception_message) else: exception_message = (_("Cannot get the portgroup from " "the masking view.")) raise exception.VolumeBackendAPIException( data=exception_message) return portGroupName def terminate_connection(self, volume, connector): """Disallow connection from connector. :params volume: the volume Object :params connector: the connector Object """ volumename = volume['name'] LOG.info(_LI("Terminate connection: %(volume)s."), {'volume': volumename}) self._unmap_lun(volume, connector) def extend_volume(self, volume, newSize): """Extends an existing volume. Prequisites: 1. The volume must be composite e.g StorageVolume.EMCIsComposite=True 2. The volume can only be concatenated e.g StorageExtent.IsConcatenated=True :params volume: the volume Object :params newSize: the new size to increase the volume to :returns: dict -- modifiedVolumeDict - the extended volume Object :raises: VolumeBackendAPIException """ originalVolumeSize = volume['size'] volumeName = volume['name'] extraSpecs = self._initial_setup(volume) self.conn = self._get_ecom_connection() volumeInstance = self._find_lun(volume) if volumeInstance is None: exceptionMessage = (_("Cannot find Volume: %(volumename)s. " "Extend operation. Exiting....") % {'volumename': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) if int(originalVolumeSize) > int(newSize): exceptionMessage = (_( "Your original size: %(originalVolumeSize)s GB is greater " "than: %(newSize)s GB. Only Extend is supported. Exiting...") % {'originalVolumeSize': originalVolumeSize, 'newSize': newSize}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) additionalVolumeSize = six.text_type( int(newSize) - int(originalVolumeSize)) additionalVolumeSize = self.utils.convert_gb_to_bits( additionalVolumeSize) if extraSpecs[ISV3]: rc, modifiedVolumeDict = self._extend_v3_volume( volumeInstance, volumeName, newSize, extraSpecs) else: # This is V2. rc, modifiedVolumeDict = self._extend_composite_volume( volumeInstance, volumeName, newSize, additionalVolumeSize, extraSpecs) # Check the occupied space of the new extended volume. extendedVolumeInstance = self.utils.find_volume_instance( self.conn, modifiedVolumeDict, volumeName) extendedVolumeSize = self.utils.get_volume_size( self.conn, extendedVolumeInstance) LOG.debug( "The actual volume size of the extended volume: %(volumeName)s " "is %(volumeSize)s.", {'volumeName': volumeName, 'volumeSize': extendedVolumeSize}) # If the requested size and the actual size don't # tally throw an exception. newSizeBits = self.utils.convert_gb_to_bits(newSize) diffVolumeSize = self.utils.compare_size( newSizeBits, extendedVolumeSize) if diffVolumeSize != 0: exceptionMessage = (_( "The requested size : %(requestedSize)s is not the same as " "resulting size: %(resultSize)s.") % {'requestedSize': newSizeBits, 'resultSize': extendedVolumeSize}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) LOG.debug( "Leaving extend_volume: %(volumeName)s. " "Return code: %(rc)lu, " "volume dict: %(name)s.", {'volumeName': volumeName, 'rc': rc, 'name': modifiedVolumeDict}) return modifiedVolumeDict def update_volume_stats(self): """Retrieve stats info.""" pools = [] backendName = self.pool_info['backend_name'] for arrayInfo in self.pool_info['arrays_info']: self._set_ecom_credentials(arrayInfo) # Check what type of array it is isV3 = self.utils.isArrayV3(self.conn, arrayInfo['SerialNumber']) if isV3: location_info, total_capacity_gb, free_capacity_gb = ( self._update_srp_stats(arrayInfo)) poolName = ("%(slo)s+%(poolName)s+%(array)s" % {'slo': arrayInfo['SLO'], 'poolName': arrayInfo['PoolName'], 'array': arrayInfo['SerialNumber']}) else: # This is V2 location_info, total_capacity_gb, free_capacity_gb = ( self._update_pool_stats(backendName, arrayInfo)) poolName = ("%(poolName)s+%(array)s" % {'poolName': arrayInfo['PoolName'], 'array': arrayInfo['SerialNumber']}) pool = {'pool_name': poolName, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'reserved_percentage': 0, 'QoS_support': False, 'location_info': location_info, 'consistencygroup_support': True} pools.append(pool) data = {'vendor_name': "EMC", 'driver_version': self.version, 'storage_protocol': 'unknown', 'volume_backend_name': self.pool_info['backend_name'] or self.__class__.__name__, # Use zero capacities here so we always use a pool. 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'reserved_percentage': 0, 'pools': pools} return data def _update_srp_stats(self, arrayInfo): """Update SRP stats. :param arrayInfo: array information :returns: location_info :returns: totalManagedSpaceGbs :returns: remainingManagedSpaceGbs """ totalManagedSpaceGbs, remainingManagedSpaceGbs = ( self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo)) LOG.info(_LI( "Capacity stats for SRP pool %(poolName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu"), {'poolName': arrayInfo['PoolName'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': totalManagedSpaceGbs, 'free_capacity_gb': remainingManagedSpaceGbs}) location_info = ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s" % {'arrayName': arrayInfo['SerialNumber'], 'poolName': arrayInfo['PoolName'], 'slo': arrayInfo['SLO'], 'workload': arrayInfo['Workload']}) return location_info, totalManagedSpaceGbs, remainingManagedSpaceGbs def retype(self, ctxt, volume, new_type, diff, host): """Migrate volume to another host using retype. :param ctxt: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: Unused parameter. :param host: The host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ volumeName = volume['name'] volumeStatus = volume['status'] LOG.info(_LI("Migrating using retype Volume: %(volume)s."), {'volume': volumeName}) extraSpecs = self._initial_setup(volume) self.conn = self._get_ecom_connection() volumeInstance = self._find_lun(volume) if volumeInstance is None: LOG.error(_LE("Volume %(name)s not found on the array. " "No volume to migrate using retype."), {'name': volumeName}) return False if extraSpecs[ISV3]: return self._slo_workload_migration(volumeInstance, volume, host, volumeName, volumeStatus, new_type, extraSpecs) else: return self._pool_migration(volumeInstance, volume, host, volumeName, volumeStatus, extraSpecs[FASTPOLICY], new_type, extraSpecs) def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate volume to another host. :param ctxt: context :param volume: the volume object including the volume_type_id :param host: the host dict holding the relevant target(destination) information :param new_type: None :returns: boolean -- Always returns True :returns: dict -- Empty dict {} """ LOG.warning(_LW("The VMAX plugin only supports Retype. " "If a pool based migration is necessary " "this will happen on a Retype " "From the command line: " "cinder --os-volume-api-version 2 retype " " --migration-policy on-demand")) return True, {} def _migrate_volume( self, volume, volumeInstance, targetPoolName, targetFastPolicyName, sourceFastPolicyName, extraSpecs, new_type=None): """Migrate volume to another host. :param volume: the volume object including the volume_type_id :param volumeInstance: the volume instance :param targetPoolName: the target poolName :param targetFastPolicyName: the target FAST policy name, can be None :param sourceFastPolicyName: the source FAST policy name, can be None :param extraSpecs: extra specifications :param new_type: None :returns: boolean -- True/False :returns: list -- empty list """ volumeName = volume['name'] storageSystemName = volumeInstance['SystemName'] sourcePoolInstanceName = self.utils.get_assoc_pool_from_volume( self.conn, volumeInstance.path) moved, rc = self._migrate_volume_from( volume, volumeInstance, targetPoolName, sourceFastPolicyName, extraSpecs) if moved is False and sourceFastPolicyName is not None: # Return the volume to the default source fast policy storage # group because the migrate was unsuccessful. LOG.warning(_LW( "Failed to migrate: %(volumeName)s from " "default source storage group " "for FAST policy: %(sourceFastPolicyName)s. " "Attempting cleanup... "), {'volumeName': volumeName, 'sourceFastPolicyName': sourceFastPolicyName}) if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume( self.conn, volumeInstance.path): self._migrate_cleanup(self.conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs) else: # Migrate was successful but still issues. self._migrate_rollback( self.conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, sourcePoolInstanceName, extraSpecs) return moved if targetFastPolicyName == 'None': targetFastPolicyName = None if moved is True and targetFastPolicyName is not None: if not self._migrate_volume_fast_target( volumeInstance, storageSystemName, targetFastPolicyName, volumeName, extraSpecs): LOG.warning(_LW( "Attempting a rollback of: %(volumeName)s to " "original pool %(sourcePoolInstanceName)s."), {'volumeName': volumeName, 'sourcePoolInstanceName': sourcePoolInstanceName}) self._migrate_rollback( self.conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, sourcePoolInstanceName, extraSpecs) if rc == 0: moved = True return moved def _migrate_rollback(self, conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, sourcePoolInstanceName, extraSpecs): """Full rollback. Failed on final step on adding migrated volume to new target default storage group for the target FAST policy. :param conn: connection info to ECOM :param volumeInstance: the volume instance :param storageSystemName: the storage system name :param sourceFastPolicyName: the source FAST policy name :param volumeName: the volume Name :param sourcePoolInstanceName: the instance name of the source pool :param extraSpecs: extra specifications """ LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."), {'volumeName': volumeName}) storageRelocationService = self.utils.find_storage_relocation_service( conn, storageSystemName) try: self.provision.migrate_volume_to_storage_pool( conn, storageRelocationService, volumeInstance.path, sourcePoolInstanceName, extraSpecs) except Exception: LOG.error(_LE( "Failed to return volume %(volumeName)s to " "original storage pool. Please contact your system " "administrator to return it to the correct location."), {'volumeName': volumeName}) if sourceFastPolicyName is not None: self.add_to_default_SG( conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs) def _migrate_cleanup(self, conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs): """If the migrate fails, put volume back to source FAST SG. :param conn: connection info to ECOM :param volumeInstance: the volume instance :param storageSystemName: the storage system name :param sourceFastPolicyName: the source FAST policy name :param volumeName: the volume Name :param extraSpecs: extra specifications :returns: boolean -- True/False """ LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."), {'volumeName': volumeName}) return_to_default = True controllerConfigurationService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) # Check to see what SG it is in. assocStorageGroupInstanceNames = ( self.utils.get_storage_groups_from_volume(conn, volumeInstance.path)) # This is the SG it should be in. defaultStorageGroupInstanceName = ( self.fast.get_policy_default_storage_group( conn, controllerConfigurationService, sourceFastPolicyName)) for assocStorageGroupInstanceName in assocStorageGroupInstanceNames: # It is in the incorrect storage group. if (assocStorageGroupInstanceName != defaultStorageGroupInstanceName): self.provision.remove_device_from_storage_group( conn, controllerConfigurationService, assocStorageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) else: # The volume is already in the default. return_to_default = False if return_to_default: self.add_to_default_SG( conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs) return return_to_default def _migrate_volume_fast_target( self, volumeInstance, storageSystemName, targetFastPolicyName, volumeName, extraSpecs): """If the target host is FAST enabled. If the target host is FAST enabled then we need to add it to the default storage group for that policy. :param volumeInstance: the volume instance :param storageSystemName: the storage system name :param targetFastPolicyName: the target fast policy name :param volumeName: the volume name :param extraSpecs: extra specifications :returns: boolean -- True/False """ falseRet = False LOG.info(_LI( "Adding volume: %(volumeName)s to default storage group " "for FAST policy: %(fastPolicyName)s."), {'volumeName': volumeName, 'fastPolicyName': targetFastPolicyName}) controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) defaultStorageGroupInstanceName = ( self.fast.get_or_create_default_storage_group( self.conn, controllerConfigurationService, targetFastPolicyName, volumeInstance, extraSpecs)) if defaultStorageGroupInstanceName is None: LOG.error(_LE( "Unable to create or get default storage group for FAST policy" ": %(fastPolicyName)s."), {'fastPolicyName': targetFastPolicyName}) return falseRet defaultStorageGroupInstanceName = ( self.fast.add_volume_to_default_storage_group_for_fast_policy( self.conn, controllerConfigurationService, volumeInstance, volumeName, targetFastPolicyName, extraSpecs)) if defaultStorageGroupInstanceName is None: LOG.error(_LE( "Failed to verify that volume was added to storage group for " "FAST policy: %(fastPolicyName)s."), {'fastPolicyName': targetFastPolicyName}) return falseRet return True def _migrate_volume_from(self, volume, volumeInstance, targetPoolName, sourceFastPolicyName, extraSpecs): """Check FAST policies and migrate from source pool. :param volume: the volume object including the volume_type_id :param volumeInstance: the volume instance :param targetPoolName: the target poolName :param sourceFastPolicyName: the source FAST policy name, can be None :param extraSpecs: extra specifications :returns: boolean -- True/False :returns: int -- the return code from migrate operation """ falseRet = (False, -1) volumeName = volume['name'] storageSystemName = volumeInstance['SystemName'] LOG.debug("sourceFastPolicyName is : %(sourceFastPolicyName)s.", {'sourceFastPolicyName': sourceFastPolicyName}) # If the source volume is FAST enabled it must first be removed # from the default storage group for that policy. if sourceFastPolicyName is not None: self.remove_from_default_SG( self.conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs) # Migrate from one pool to another. storageRelocationService = self.utils.find_storage_relocation_service( self.conn, storageSystemName) targetPoolInstanceName = self.utils.get_pool_by_name( self.conn, targetPoolName, storageSystemName) if targetPoolInstanceName is None: LOG.error(_LE( "Error finding target pool instance name for pool: " "%(targetPoolName)s."), {'targetPoolName': targetPoolName}) return falseRet try: rc = self.provision.migrate_volume_to_storage_pool( self.conn, storageRelocationService, volumeInstance.path, targetPoolInstanceName, extraSpecs) except Exception: # Rollback by deleting the volume if adding the volume to the # default storage group were to fail. LOG.exception(_LE( "Error migrating volume: %(volumename)s. " "to target pool %(targetPoolName)s."), {'volumename': volumeName, 'targetPoolName': targetPoolName}) return falseRet # Check that the volume is now migrated to the correct storage pool, # if it is terminate the migrate session. foundPoolInstanceName = self.utils.get_assoc_pool_from_volume( self.conn, volumeInstance.path) if (foundPoolInstanceName is None or (foundPoolInstanceName['InstanceID'] != targetPoolInstanceName['InstanceID'])): LOG.error(_LE( "Volume : %(volumeName)s. was not successfully migrated to " "target pool %(targetPoolName)s."), {'volumeName': volumeName, 'targetPoolName': targetPoolName}) return falseRet else: LOG.debug("Terminating migration session on: %(volumeName)s.", {'volumeName': volumeName}) self.provision._terminate_migrate_session( self.conn, volumeInstance.path, extraSpecs) if rc == 0: moved = True return moved, rc def remove_from_default_SG( self, conn, volumeInstance, storageSystemName, sourceFastPolicyName, volumeName, extraSpecs): """For FAST, remove volume from default storage group. :param conn: connection info to ECOM :param volumeInstance: the volume instance :param storageSystemName: the storage system name :param sourceFastPolicyName: the source FAST policy name :param volumeName: the volume Name :param extraSpecs: extra specifications :raises: VolumeBackendAPIException """ controllerConfigurationService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) try: defaultStorageGroupInstanceName = ( self.masking.remove_device_from_default_storage_group( conn, controllerConfigurationService, volumeInstance.path, volumeName, sourceFastPolicyName, extraSpecs)) except Exception: exceptionMessage = (_( "Failed to remove: %(volumename)s. " "from the default storage group for " "FAST policy %(fastPolicyName)s.") % {'volumename': volumeName, 'fastPolicyName': sourceFastPolicyName}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) if defaultStorageGroupInstanceName is None: LOG.warning(_LW( "The volume: %(volumename)s " "was not first part of the default storage " "group for FAST policy %(fastPolicyName)s."), {'volumename': volumeName, 'fastPolicyName': sourceFastPolicyName}) def add_to_default_SG( self, conn, volumeInstance, storageSystemName, targetFastPolicyName, volumeName, extraSpecs): """For FAST, add volume to default storage group. :param conn: connection info to ECOM :param volumeInstance: the volume instance :param storageSystemName: the storage system name :param targetFastPolicyName: the target FAST policy name :param volumeName: the volume Name :param extraSpecs: extra specifications """ controllerConfigurationService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) assocDefaultStorageGroupName = ( self.fast .add_volume_to_default_storage_group_for_fast_policy( conn, controllerConfigurationService, volumeInstance, volumeName, targetFastPolicyName, extraSpecs)) if assocDefaultStorageGroupName is None: LOG.error(_LE( "Failed to add %(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s."), {'volumeName': volumeName, 'fastPolicyName': targetFastPolicyName}) def _is_valid_for_storage_assisted_migration_v3( self, volumeInstanceName, host, sourceArraySerialNumber, sourcePoolName, volumeName, volumeStatus, sgName): """Check if volume is suitable for storage assisted (pool) migration. :param volumeInstanceName: the volume instance id :param host: the host object :param sourceArraySerialNumber: the array serial number of the original volume :param sourcePoolName: the pool name of the original volume :param volumeName: the name of the volume to be migrated :param volumeStatus: the status of the volume :param sgName: storage group name :returns: boolean -- True/False :returns: string -- targetSlo :returns: string -- targetWorkload """ falseRet = (False, None, None) if 'location_info' not in host['capabilities']: LOG.error(_LE('Error getting array, pool, SLO and workload.')) return falseRet info = host['capabilities']['location_info'] LOG.debug("Location info is : %(info)s.", {'info': info}) try: infoDetail = info.split('#') targetArraySerialNumber = infoDetail[0] targetPoolName = infoDetail[1] targetSlo = infoDetail[2] targetWorkload = infoDetail[3] except KeyError: LOG.error(_LE("Error parsing array, pool, SLO and workload.")) if targetArraySerialNumber not in sourceArraySerialNumber: LOG.error(_LE( "The source array : %(sourceArraySerialNumber)s does not " "match the target array: %(targetArraySerialNumber)s " "skipping storage-assisted migration."), {'sourceArraySerialNumber': sourceArraySerialNumber, 'targetArraySerialNumber': targetArraySerialNumber}) return falseRet if targetPoolName not in sourcePoolName: LOG.error(_LE( "Only SLO/workload migration within the same SRP Pool " "is supported in this version " "The source pool : %(sourcePoolName)s does not " "match the target array: %(targetPoolName)s. " "Skipping storage-assisted migration."), {'sourcePoolName': sourcePoolName, 'targetPoolName': targetPoolName}) return falseRet foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( self.conn, volumeInstanceName, sgName)) if foundStorageGroupInstanceName is None: LOG.warning(_LW( "Volume: %(volumeName)s is not currently " "belonging to any storage group."), {'volumeName': volumeName}) else: storageGroupInstance = self.conn.GetInstance( foundStorageGroupInstanceName) emcFastSetting = self.utils._get_fast_settings_from_storage_group( storageGroupInstance) targetCombination = ("%(targetSlo)s+%(targetWorkload)s" % {'targetSlo': targetSlo, 'targetWorkload': targetWorkload}) if targetCombination in emcFastSetting: LOG.error(_LE( "No action required. Volume: %(volumeName)s is " "already part of slo/workload combination: " "%(targetCombination)s."), {'volumeName': volumeName, 'targetCombination': targetCombination}) return falseRet return (True, targetSlo, targetWorkload) def _is_valid_for_storage_assisted_migration( self, volumeInstanceName, host, sourceArraySerialNumber, volumeName, volumeStatus): """Check if volume is suitable for storage assisted (pool) migration. :param volumeInstanceName: the volume instance id :param host: the host object :param sourceArraySerialNumber: the array serial number of the original volume :param volumeName: the name of the volume to be migrated :param volumeStatus: the status of the volume e.g :returns: boolean -- True/False :returns: string -- targetPool :returns: string -- targetFastPolicy """ falseRet = (False, None, None) if 'location_info' not in host['capabilities']: LOG.error(_LE("Error getting target pool name and array.")) return falseRet info = host['capabilities']['location_info'] LOG.debug("Location info is : %(info)s.", {'info': info}) try: infoDetail = info.split('#') targetArraySerialNumber = infoDetail[0] targetPoolName = infoDetail[1] targetFastPolicy = infoDetail[2] except KeyError: LOG.error(_LE( "Error parsing target pool name, array, and fast policy.")) if targetArraySerialNumber not in sourceArraySerialNumber: LOG.error(_LE( "The source array : %(sourceArraySerialNumber)s does not " "match the target array: %(targetArraySerialNumber)s, " "skipping storage-assisted migration."), {'sourceArraySerialNumber': sourceArraySerialNumber, 'targetArraySerialNumber': targetArraySerialNumber}) return falseRet # Get the pool from the source array and check that is different # to the pool in the target array. assocPoolInstanceName = self.utils.get_assoc_pool_from_volume( self.conn, volumeInstanceName) assocPoolInstance = self.conn.GetInstance( assocPoolInstanceName) if assocPoolInstance['ElementName'] == targetPoolName: LOG.error(_LE( "No action required. Volume: %(volumeName)s is " "already part of pool: %(pool)s."), {'volumeName': volumeName, 'pool': targetPoolName}) return falseRet LOG.info(_LI("Volume status is: %s."), volumeStatus) if (host['capabilities']['storage_protocol'] != self.protocol and (volumeStatus != 'available' and volumeStatus != 'retyping')): LOG.error(_LE( "Only available volumes can be migrated between " "different protocols.")) return falseRet return (True, targetPoolName, targetFastPolicy) def _set_config_file_and_get_extra_specs(self, volume, volumeTypeId=None): """Given the volume object get the associated volumetype. Given the volume object get the associated volumetype and the extra specs associated with it. Based on the name of the config group, register the config file :param volume: the volume object including the volume_type_id :param volumeTypeId: Optional override of volume['volume_type_id'] :returns: dict -- the extra specs dict :returns: string -- configuration file """ extraSpecs = self.utils.get_volumetype_extraspecs(volume, volumeTypeId) configGroup = None # If there are no extra specs then the default case is assumed. if extraSpecs: configGroup = self.configuration.config_group configurationFile = self._register_config_file_from_config_group( configGroup) return extraSpecs, configurationFile def _get_ecom_connection(self): """Get the ecom connection. :returns: pywbem.WBEMConnection -- conn, the ecom connection :raises: VolumeBackendAPIException """ ecomx509 = None if self.ecomUseSSL: if (self.configuration.safe_get('driver_client_cert_key') and self.configuration.safe_get('driver_client_cert')): ecomx509 = {"key_file": self.configuration.safe_get( 'driver_client_cert_key'), "cert_file": self.configuration.safe_get( 'driver_client_cert')} pywbem.cim_http.wbem_request = emc_vmax_https.wbem_request conn = pywbem.WBEMConnection( self.url, (self.user, self.passwd), default_namespace='root/emc', x509=ecomx509, ca_certs=self.configuration.safe_get('driver_ssl_cert_path'), no_verification=not self.configuration.safe_get( 'driver_ssl_cert_verify')) else: conn = pywbem.WBEMConnection( self.url, (self.user, self.passwd), default_namespace='root/emc') if conn is None: exception_message = (_("Cannot connect to ECOM server.")) raise exception.VolumeBackendAPIException(data=exception_message) return conn def _find_pool_in_array(self, arrayStr, poolNameInStr, isV3): """Find a pool based on the pool name on a given array. :param arrayStr: the array Serial number (String) :param poolNameInStr: the name of the poolname (String) :param isv3: True/False :returns: foundPoolInstanceName - the CIM Instance Name of the Pool :returns: string -- systemNameStr :raises: VolumeBackendAPIException """ foundPoolInstanceName = None systemNameStr = None storageSystemInstanceName = self.utils.find_storageSystem( self.conn, arrayStr) if isV3: foundPoolInstanceName, systemNameStr = ( self.utils.get_pool_and_system_name_v3( self.conn, storageSystemInstanceName, poolNameInStr)) else: foundPoolInstanceName, systemNameStr = ( self.utils.get_pool_and_system_name_v2( self.conn, storageSystemInstanceName, poolNameInStr)) if foundPoolInstanceName is None: exceptionMessage = (_("Pool %(poolNameInStr)s is not found.") % {'poolNameInStr': poolNameInStr}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) if systemNameStr is None: exception_message = (_("Storage system not found for pool " "%(poolNameInStr)s.") % {'poolNameInStr': poolNameInStr}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) LOG.debug("Pool: %(pool)s SystemName: %(systemname)s.", {'pool': foundPoolInstanceName, 'systemname': systemNameStr}) return foundPoolInstanceName, systemNameStr def _find_lun(self, volume): """Given the volume get the instance from it. :param volume: volume object :returns: foundVolumeinstance """ foundVolumeinstance = None volumename = volume['name'] loc = volume['provider_location'] if self.conn is None: self.conn = self._get_ecom_connection() if isinstance(loc, six.string_types): name = ast.literal_eval(loc) keys = name['keybindings'] systemName = keys['SystemName'] prefix1 = 'SYMMETRIX+' prefix2 = 'SYMMETRIX-+-' smiversion = self.utils.get_smi_version(self.conn) if smiversion > SMI_VERSION_8 and prefix1 in systemName: keys['SystemName'] = systemName.replace(prefix1, prefix2) name['keybindings'] = keys instancename = self.utils.get_instance_name( name['classname'], name['keybindings']) # Allow for an external app to delete the volume. LOG.debug("Volume instance name: %(in)s", {'in': instancename}) try: foundVolumeinstance = self.conn.GetInstance(instancename) except Exception: foundVolumeinstance = None if foundVolumeinstance is None: LOG.debug("Volume %(volumename)s not found on the array.", {'volumename': volumename}) else: LOG.debug("Volume name: %(volumename)s Volume instance: " "%(foundVolumeinstance)s.", {'volumename': volumename, 'foundVolumeinstance': foundVolumeinstance}) return foundVolumeinstance def _find_storage_sync_sv_sv(self, snapshot, volume, extraSpecs, waitforsync=True): """Find the storage synchronized name. :param snapshot: snapshot object :param volume: volume object :param extraSpecs: extra specifications :param waitforsync: boolean -- Wait for Solutions Enabler sync. :returns: string -- foundsyncname :returns: string -- storage_system """ snapshotname = snapshot['name'] volumename = volume['name'] LOG.debug("Source: %(volumename)s Target: %(snapshotname)s.", {'volumename': volumename, 'snapshotname': snapshotname}) snapshot_instance = self._find_lun(snapshot) volume_instance = self._find_lun(volume) storage_system = volume_instance['SystemName'] classname = 'SE_StorageSynchronized_SV_SV' bindings = {'SyncedElement': snapshot_instance.path, 'SystemElement': volume_instance.path} foundsyncname = self.utils.get_instance_name(classname, bindings) if foundsyncname is None: LOG.debug( "Source: %(volumename)s Target: %(snapshotname)s. " "Storage Synchronized not found.", {'volumename': volumename, 'snapshotname': snapshotname}) else: LOG.debug("Storage system: %(storage_system)s. " "Storage Synchronized instance: %(sync)s.", {'storage_system': storage_system, 'sync': foundsyncname}) # Wait for SE_StorageSynchronized_SV_SV to be fully synced. if waitforsync: self.utils.wait_for_sync(self.conn, foundsyncname, extraSpecs) return foundsyncname, storage_system def _find_initiator_names(self, connector): foundinitiatornames = [] iscsi = 'iscsi' fc = 'fc' name = 'initiator name' if self.protocol.lower() == iscsi and connector['initiator']: foundinitiatornames.append(connector['initiator']) elif self.protocol.lower() == fc and connector['wwpns']: for wwn in connector['wwpns']: foundinitiatornames.append(wwn) name = 'world wide port names' if foundinitiatornames is None or len(foundinitiatornames) == 0: msg = (_("Error finding %s.") % name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Found %(name)s: %(initiator)s.", {'name': name, 'initiator': foundinitiatornames}) return foundinitiatornames def _wrap_find_device_number(self, volume, host): return self.find_device_number(volume, host) def find_device_number(self, volume, host): """Given the volume dict find a device number. Find a device number that a host can see for a volume. :param volume: the volume dict :param host: host from connector :returns: dict -- the data dict """ maskedvols = [] data = {} foundController = None foundNumDeviceNumber = None foundMaskingViewName = None volumeName = volume['name'] volumeInstance = self._find_lun(volume) storageSystemName = volumeInstance['SystemName'] unitnames = self.conn.ReferenceNames( volumeInstance.path, ResultClass='CIM_ProtocolControllerForUnit') for unitname in unitnames: controller = unitname['Antecedent'] classname = controller['CreationClassName'] index = classname.find('Symm_LunMaskingView') if index > -1: unitinstance = self.conn.GetInstance(unitname, LocalOnly=False) numDeviceNumber = int(unitinstance['DeviceNumber'], 16) foundNumDeviceNumber = numDeviceNumber foundController = controller controllerInstance = self.conn.GetInstance(controller, LocalOnly=False) propertiesList = controllerInstance.properties.items() for properties in propertiesList: if properties[0] == 'ElementName': cimProperties = properties[1] foundMaskingViewName = cimProperties.value devicedict = {'hostlunid': foundNumDeviceNumber, 'storagesystem': storageSystemName, 'maskingview': foundMaskingViewName, 'controller': foundController} maskedvols.append(devicedict) if not maskedvols: LOG.debug( "Device number not found for volume " "%(volumeName)s %(volumeInstance)s.", {'volumeName': volumeName, 'volumeInstance': volumeInstance.path}) else: host = self.utils.get_host_short_name(host) hoststr = ("-%(host)s-" % {'host': host}) for maskedvol in maskedvols: if hoststr.lower() in maskedvol['maskingview'].lower(): data = maskedvol break if not data: LOG.warning(_LW( "Volume is masked but not to host %(host)s as " "expected. Returning empty dictionary."), {'host': hoststr}) LOG.debug("Device info: %(data)s.", {'data': data}) return data def get_target_wwns(self, storageSystem, connector): """Find target WWNs. :param storageSystem: the storage system name :param connector: the connector dict :returns: list -- targetWwns, the target WWN list :raises: VolumeBackendAPIException """ targetWwns = [] storageHardwareService = self.utils.find_storage_hardwareid_service( self.conn, storageSystem) hardwareIdInstances = self._find_storage_hardwareids( connector, storageHardwareService) LOG.debug( "EMCGetTargetEndpoints: Service: %(service)s, " "Storage HardwareIDs: %(hardwareIds)s.", {'service': storageHardwareService, 'hardwareIds': hardwareIdInstances}) for hardwareIdInstance in hardwareIdInstances: LOG.debug("HardwareID instance is: %(hardwareIdInstance)s.", {'hardwareIdInstance': hardwareIdInstance}) try: targetEndpoints = ( self.utils.get_target_endpoints( self.conn, hardwareIdInstance)) except Exception: errorMessage = (_( "Unable to get target endpoints for hardwareId " "%(hardwareIdInstance)s.") % {'hardwareIdInstance': hardwareIdInstance}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) if targetEndpoints: LOG.debug("There are %(len)lu endpoints.", {'len': len(targetEndpoints)}) for targetendpoint in targetEndpoints: wwn = targetendpoint['Name'] # Add target wwn to the list if it is not already there. if not any(d == wwn for d in targetWwns): targetWwns.append(wwn) else: LOG.error(_LE( "Target end points do not exist for hardware Id: " "%(hardwareIdInstance)s."), {'hardwareIdInstance': hardwareIdInstance}) LOG.debug("Target WWNs: %(targetWwns)s.", {'targetWwns': targetWwns}) return targetWwns def _find_storage_hardwareids( self, connector, hardwareIdManagementService): """Find the storage hardware ID instances. :param connector: the connector dict :param hardwareIdManagementService: the storage Hardware management service :returns: list -- the list of storage hardware ID instances """ foundHardwareIdList = [] wwpns = self._find_initiator_names(connector) hardwareIdInstances = ( self.utils.get_hardware_id_instances_from_array( self.conn, hardwareIdManagementService)) for hardwareIdInstance in hardwareIdInstances: storageId = hardwareIdInstance['StorageID'] for wwpn in wwpns: if wwpn.lower() == storageId.lower(): # Check that the found hardwareId has not been # deleted. If it has, we don't want to add it to the list. instance = self.utils.get_existing_instance( self.conn, hardwareIdInstance.path) if instance is None: # HardwareId doesn't exist any more. Skip it. break foundHardwareIdList.append(hardwareIdInstance.path) break LOG.debug("Storage Hardware IDs for %(wwpns)s is " "%(foundInstances)s.", {'wwpns': wwpns, 'foundInstances': foundHardwareIdList}) return foundHardwareIdList def _register_config_file_from_config_group(self, configGroupName): """Given the config group name register the file. :param configGroupName: the config group name :returns: string -- configurationFile - name of the configuration file """ if configGroupName is None: return CINDER_EMC_CONFIG_FILE if hasattr(self.configuration, 'cinder_emc_config_file'): configurationFile = self.configuration.cinder_emc_config_file else: configurationFile = ( ("%(prefix)s%(configGroupName)s%(postfix)s" % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, 'configGroupName': configGroupName, 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) # The file saved in self.configuration may not be the correct one, # double check. if configGroupName not in configurationFile: configurationFile = ( ("%(prefix)s%(configGroupName)s%(postfix)s" % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, 'configGroupName': configGroupName, 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) if os.path.isfile(configurationFile): LOG.debug("Configuration file : %(configurationFile)s exists.", {'configurationFile': configurationFile}) else: exceptionMessage = (_( "Configuration file %(configurationFile)s does not exist.") % {'configurationFile': configurationFile}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return configurationFile def _set_ecom_credentials(self, arrayInfo): """Given the array record set the ecom credentials. :param arrayInfo: record :raises: VolumeBackendAPIException """ ip = arrayInfo['EcomServerIp'] port = arrayInfo['EcomServerPort'] self.user = arrayInfo['EcomUserName'] self.passwd = arrayInfo['EcomPassword'] self.ecomUseSSL = self.configuration.safe_get('driver_use_ssl') ip_port = ("%(ip)s:%(port)s" % {'ip': ip, 'port': port}) if self.ecomUseSSL: self.url = ("https://%(ip_port)s" % {'ip_port': ip_port}) else: self.url = ("http://%(ip_port)s" % {'ip_port': ip_port}) self.conn = self._get_ecom_connection() def _initial_setup(self, volume, volumeTypeId=None): """Necessary setup to accumulate the relevant information. The volume object has a host in which we can parse the config group name. The config group name is the key to our EMC configuration file. The emc configuration file contains pool name and array name which are mandatory fields. FastPolicy is optional. StripedMetaCount is an extra spec that determines whether the composite volume should be concatenated or striped. :param volume: the volume Object :param volumeTypeId: Optional override of volume['volume_type_id'] :returns: dict -- extra spec dict :raises: VolumeBackendAPIException """ try: extraSpecs, configurationFile = ( self._set_config_file_and_get_extra_specs( volume, volumeTypeId)) pool = self._validate_pool(volume) LOG.debug("Pool returned is %(pool)s.", {'pool': pool}) arrayInfo = self.utils.parse_file_to_get_array_map( configurationFile) poolRecord = self.utils.extract_record(arrayInfo, pool) if not poolRecord: exceptionMessage = (_( "Unable to get corresponding record for pool.")) raise exception.VolumeBackendAPIException( data=exceptionMessage) self._set_ecom_credentials(poolRecord) isV3 = self.utils.isArrayV3( self.conn, poolRecord['SerialNumber']) if isV3: extraSpecs = self._set_v3_extra_specs(extraSpecs, poolRecord) else: # V2 extra specs extraSpecs = self._set_v2_extra_specs(extraSpecs, poolRecord) except Exception: import sys exceptionMessage = (_( "Unable to get configuration information necessary to " "create a volume: %(errorMessage)s.") % {'errorMessage': sys.exc_info()[1]}) raise exception.VolumeBackendAPIException(data=exceptionMessage) return extraSpecs def _get_pool_and_storage_system(self, extraSpecs): """Given the extra specs get the pool and storage system name. :param extraSpecs: extra specifications :returns: poolInstanceName The pool instance name :returns: string -- the storage system name :raises: VolumeBackendAPIException """ try: array = extraSpecs[ARRAY] poolInstanceName, storageSystemStr = self._find_pool_in_array( array, extraSpecs[POOL], extraSpecs[ISV3]) except Exception: exceptionMessage = (_( "You must supply an array in your EMC configuration file.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) if poolInstanceName is None or storageSystemStr is None: exceptionMessage = (_( "Cannot get necessary pool or storage system information.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return poolInstanceName, storageSystemStr def _populate_masking_dict(self, volume, connector, extraSpecs): """Get all the names of the maskingView and subComponents. :param volume: the volume object :param connector: the connector object :param extraSpecs: extra specifications :returns: dict -- a dictionary with masking view information """ maskingViewDict = {} hostName = connector['host'] uniqueName = self.utils.generate_unique_trunc_pool(extraSpecs[POOL]) isV3 = extraSpecs[ISV3] maskingViewDict['isV3'] = isV3 protocol = self.utils.get_short_protocol_type(self.protocol) shortHostName = self.utils.get_host_short_name(hostName) if isV3: slo = extraSpecs[SLO] workload = extraSpecs[WORKLOAD] maskingViewDict['slo'] = slo maskingViewDict['workload'] = workload maskingViewDict['pool'] = uniqueName prefix = ( ("OS-%(shortHostName)s-%(poolName)s-%(slo)s-%(workload)s" % {'shortHostName': shortHostName, 'poolName': uniqueName, 'slo': slo, 'workload': workload})) else: maskingViewDict['fastPolicy'] = extraSpecs[FASTPOLICY] if maskingViewDict['fastPolicy']: uniqueName = self.utils.generate_unique_trunc_fastpolicy( maskingViewDict['fastPolicy']) + '-FP' prefix = ( ("OS-%(shortHostName)s-%(poolName)s-%(protocol)s" % {'shortHostName': shortHostName, 'poolName': uniqueName, 'protocol': protocol})) maskingViewDict['sgGroupName'] = ("%(prefix)s-SG" % {'prefix': prefix}) maskingViewDict['maskingViewName'] = ("%(prefix)s-MV" % {'prefix': prefix}) volumeName = volume['name'] volumeInstance = self._find_lun(volume) storageSystemName = volumeInstance['SystemName'] maskingViewDict['controllerConfigService'] = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) # The portGroup is gotten from emc xml config file. maskingViewDict['pgGroupName'] = extraSpecs[PORTGROUPNAME] maskingViewDict['igGroupName'] = ( ("OS-%(shortHostName)s-%(protocol)s-IG" % {'shortHostName': shortHostName, 'protocol': protocol})) maskingViewDict['connector'] = connector maskingViewDict['volumeInstance'] = volumeInstance maskingViewDict['volumeName'] = volumeName maskingViewDict['storageSystemName'] = storageSystemName return maskingViewDict def _add_volume_to_default_storage_group_on_create( self, volumeDict, volumeName, storageConfigService, storageSystemName, fastPolicyName, extraSpecs): """Add the volume to the default storage group for that policy. On a create when fast policy is enable add the volume to the default storage group for that policy. If it fails do the necessary rollback. :param volumeDict: the volume dictionary :param volumeName: the volume name (String) :param storageConfigService: the storage configuration service :param storageSystemName: the storage system name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: extra specifications :returns: dict -- maskingViewDict with masking view information :raises: VolumeBackendAPIException """ try: volumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, volumeName) controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) defaultSgName = self.fast.format_default_sg_string(fastPolicyName) self.fast.add_volume_to_default_storage_group_for_fast_policy( self.conn, controllerConfigurationService, volumeInstance, volumeName, fastPolicyName, extraSpecs) foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, defaultSgName)) if foundStorageGroupInstanceName is None: exceptionMessage = (_( "Error adding Volume: %(volumeName)s " "with instance path: %(volumeInstancePath)s.") % {'volumeName': volumeName, 'volumeInstancePath': volumeInstance.path}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) except Exception: # Rollback by deleting the volume if adding the volume to the # default storage group were to fail. errorMessage = (_( "Rolling back %(volumeName)s by deleting it.") % {'volumeName': volumeName}) LOG.exception(errorMessage) self.provision.delete_volume_from_pool( self.conn, storageConfigService, volumeInstance.path, volumeName, extraSpecs) raise exception.VolumeBackendAPIException(data=errorMessage) def _create_and_get_unbound_volume( self, conn, storageConfigService, compositeVolumeInstanceName, additionalSize, extraSpecs): """Create an unbound volume. Create an unbound volume so it is in the correct state to add to a composite volume. :param conn: the connection information to the ecom server :param storageConfigService: the storage config service instance name :param compositeVolumeInstanceName: the composite volume instance name :param additionalSize: the size you want to increase the volume by :param extraSpecs: extra specifications :returns: volume instance modifiedCompositeVolumeInstance """ assocPoolInstanceName = self.utils.get_assoc_pool_from_volume( conn, compositeVolumeInstanceName) appendVolumeInstance = self._create_and_get_volume_instance( conn, storageConfigService, assocPoolInstanceName, 'appendVolume', additionalSize, extraSpecs) isVolumeBound = self.utils.is_volume_bound_to_pool( conn, appendVolumeInstance) if 'True' in isVolumeBound: appendVolumeInstance = ( self._unbind_and_get_volume_from_storage_pool( conn, storageConfigService, appendVolumeInstance.path, 'appendVolume', extraSpecs)) return appendVolumeInstance def _create_and_get_volume_instance( self, conn, storageConfigService, poolInstanceName, volumeName, volumeSize, extraSpecs): """Create and get a new volume. :param conn: the connection information to the ecom server :param storageConfigService: the storage config service instance name :param poolInstanceName: the pool instance name :param volumeName: the volume name :param volumeSize: the size to create the volume :param extraSpecs: extra specifications :returns: volumeInstance -- the volume instance """ volumeDict, _rc = ( self.provision.create_volume_from_pool( self.conn, storageConfigService, volumeName, poolInstanceName, volumeSize, extraSpecs)) volumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, volumeName) return volumeInstance def _unbind_and_get_volume_from_storage_pool( self, conn, storageConfigService, volumeInstanceName, volumeName, extraSpecs): """Unbind a volume from a pool and return the unbound volume. :param conn: the connection information to the ecom server :param storageConfigService: the storage config service instance name :param volumeInstanceName: the volume instance name :param volumeName: string the volumeName :param extraSpecs: extra specifications :returns: unboundVolumeInstance -- the unbound volume instance """ _rc, _job = ( self.provision.unbind_volume_from_storage_pool( conn, storageConfigService, volumeInstanceName, volumeName, extraSpecs)) # Check that the volume in unbound volumeInstance = conn.GetInstance(volumeInstanceName) isVolumeBound = self.utils.is_volume_bound_to_pool( conn, volumeInstance) if 'False' not in isVolumeBound: exceptionMessage = (_( "Failed to unbind volume %(volume)s") % {'volume': volumeInstanceName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return volumeInstance def _modify_and_get_composite_volume_instance( self, conn, elementCompositionServiceInstanceName, volumeInstance, appendVolumeInstanceName, volumeName, compositeType, extraSpecs): """Given an existing composite volume add a new composite volume to it. :param conn: the connection information to the ecom server :param elementCompositionServiceInstanceName: the storage element composition service instance name :param volumeInstance: the volume instance :param appendVolumeInstanceName: the appended volume instance name :param volumeName: the volume name :param compositeType: concatenated :param extraSpecs: extra specifications :returns: int -- the return code :returns: dict -- modifiedVolumeDict - the modified volume dict """ isComposite = self.utils.check_if_volume_is_composite( self.conn, volumeInstance) if 'True' in isComposite: rc, job = self.provision.modify_composite_volume( conn, elementCompositionServiceInstanceName, volumeInstance.path, appendVolumeInstanceName, extraSpecs) elif 'False' in isComposite: rc, job = self.provision.create_new_composite_volume( conn, elementCompositionServiceInstanceName, volumeInstance.path, appendVolumeInstanceName, compositeType, extraSpecs) else: LOG.error(_LE( "Unable to determine whether %(volumeName)s is " "composite or not."), {'volumeName': volumeName}) raise modifiedVolumeDict = self.provision.get_volume_dict_from_job( conn, job['Job']) return rc, modifiedVolumeDict def _get_or_create_default_storage_group( self, conn, storageSystemName, volumeDict, volumeName, fastPolicyName, extraSpecs): """Get or create a default storage group for a fast policy. :param conn: the connection information to the ecom server :param storageSystemName: the storage system name :param volumeDict: the volume dictionary :param volumeName: the volume name :param fastPolicyName: the fast policy name :param extraSpecs: extra specifications :returns: defaultStorageGroupInstanceName """ controllerConfigService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) volumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, volumeName) defaultStorageGroupInstanceName = ( self.fast.get_or_create_default_storage_group( self.conn, controllerConfigService, fastPolicyName, volumeInstance, extraSpecs)) return defaultStorageGroupInstanceName def _create_cloned_volume( self, cloneVolume, sourceVolume, extraSpecs, isSnapshot=False): """Create a clone volume from the source volume. :param cloneVolume: clone volume :param sourceVolume: source of the clone volume :param extraSpecs: extra specs :param isSnapshot: boolean -- Defaults to False :returns: dict -- cloneDict the cloned volume dictionary :raises: VolumeBackendAPIException """ sourceName = sourceVolume['name'] cloneName = cloneVolume['name'] LOG.info(_LI( "Create a replica from Volume: Clone Volume: %(cloneName)s " "Source Volume: %(sourceName)s."), {'cloneName': cloneName, 'sourceName': sourceName}) self.conn = self._get_ecom_connection() sourceInstance = self._find_lun(sourceVolume) storageSystem = sourceInstance['SystemName'] repServCapabilityInstanceName = ( self.utils.find_replication_service_capabilities(self.conn, storageSystem)) is_clone_license = self.utils.is_clone_licensed( self.conn, repServCapabilityInstanceName) if is_clone_license is False: exceptionMessage = (_( "Clone feature is not licensed on %(storageSystem)s.") % {'storageSystem': storageSystem}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) repServiceInstanceName = self.utils.find_replication_service( self.conn, storageSystem) LOG.debug("Create volume replica: Volume: %(cloneName)s " "Source Volume: %(sourceName)s " "Method: CreateElementReplica " "ReplicationService: %(service)s ElementName: " "%(elementname)s SyncType: 8 SourceElement: " "%(sourceelement)s.", {'cloneName': cloneName, 'sourceName': sourceName, 'service': repServiceInstanceName, 'elementname': cloneName, 'sourceelement': sourceInstance.path}) if extraSpecs[ISV3]: rc, cloneDict = self._create_replica_v3(repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, isSnapshot, extraSpecs) else: rc, cloneDict = self._create_clone_v2(repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, isSnapshot, extraSpecs) LOG.debug("Leaving _create_cloned_volume: Volume: " "%(cloneName)s Source Volume: %(sourceName)s " "Return code: %(rc)lu.", {'cloneName': cloneName, 'sourceName': sourceName, 'rc': rc}) # Adding version information cloneDict['version'] = self.version return cloneDict def _add_clone_to_default_storage_group( self, fastPolicyName, storageSystemName, cloneDict, cloneName, extraSpecs): """Helper function to add clone to the default storage group. :param fastPolicyName: the fast policy name :param storageSystemName: the storage system name :param cloneDict: clone dictionary :param cloneName: clone name :param extraSpecs: extra specifications :raises: VolumeBackendAPIException """ # Check if the clone/snapshot volume already part of the default sg. cloneInstance = self.utils.find_volume_instance( self.conn, cloneDict, cloneName) if self.fast.is_volume_in_default_SG(self.conn, cloneInstance.path): return # If FAST enabled place clone volume or volume from snapshot to # default storage group. LOG.debug("Adding volume: %(cloneName)s to default storage group " "for FAST policy: %(fastPolicyName)s.", {'cloneName': cloneName, 'fastPolicyName': fastPolicyName}) storageConfigService = ( self.utils.find_storage_configuration_service( self.conn, storageSystemName)) defaultStorageGroupInstanceName = ( self._get_or_create_default_storage_group( self.conn, storageSystemName, cloneDict, cloneName, fastPolicyName, extraSpecs)) if defaultStorageGroupInstanceName is None: exceptionMessage = (_( "Unable to create or get default storage group for FAST " "policy: %(fastPolicyName)s.") % {'fastPolicyName': fastPolicyName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) self._add_volume_to_default_storage_group_on_create( cloneDict, cloneName, storageConfigService, storageSystemName, fastPolicyName, extraSpecs) def _delete_volume(self, volume): """Helper function to delete the specified volume. :param volume: volume object to be deleted :returns: tuple -- rc (int return code), volumeName (string vol name) """ volumeName = volume['name'] rc = -1 errorRet = (rc, volumeName) extraSpecs = self._initial_setup(volume) self.conn = self._get_ecom_connection() volumeInstance = self._find_lun(volume) if volumeInstance is None: LOG.error(_LE( "Volume %(name)s not found on the array. " "No volume to delete."), {'name': volumeName}) return errorRet storageConfigService = self.utils.find_storage_configuration_service( self.conn, volumeInstance['SystemName']) deviceId = volumeInstance['DeviceID'] if extraSpecs[ISV3]: rc = self._delete_from_pool_v3( storageConfigService, volumeInstance, volumeName, deviceId, extraSpecs) else: rc = self._delete_from_pool(storageConfigService, volumeInstance, volumeName, deviceId, extraSpecs[FASTPOLICY], extraSpecs) return (rc, volumeName) def _remove_device_from_storage_group( self, controllerConfigurationService, volumeInstanceName, volumeName, extraSpecs): """Check if volume is part of a storage group prior to delete. Log a warning if volume is part of storage group. :param controllerConfigurationService: controller configuration service :param volumeInstanceName: volume instance name :param volumeName: volume name (string) :param extraSpecs: extra specifications """ storageGroupInstanceNames = ( self.masking.get_associated_masking_groups_from_device( self.conn, volumeInstanceName)) if storageGroupInstanceNames: LOG.warning(_LW( "Pre check for deletion. " "Volume: %(volumeName)s is part of a storage group. " "Attempting removal from %(storageGroupInstanceNames)s."), {'volumeName': volumeName, 'storageGroupInstanceNames': storageGroupInstanceNames}) for storageGroupInstanceName in storageGroupInstanceNames: self.provision.remove_device_from_storage_group( self.conn, controllerConfigurationService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) def _find_lunmasking_scsi_protocol_controller(self, storageSystemName, connector): """Find LunMaskingSCSIProtocolController for the local host. Find out how many volumes are mapped to a host associated to the LunMaskingSCSIProtocolController. :param storageSystemName: the storage system name :param connector: volume object to be deleted :returns: foundControllerInstanceName """ foundControllerInstanceName = None initiators = self._find_initiator_names(connector) storageSystemInstanceName = self.utils.find_storageSystem( self.conn, storageSystemName) controllerInstanceNames = self.conn.AssociatorNames( storageSystemInstanceName, ResultClass='EMC_LunMaskingSCSIProtocolController') for controllerInstanceName in controllerInstanceNames: try: # This is a check to see if the controller has # been deleted. self.conn.GetInstance(controllerInstanceName) storageHardwareIdInstances = self.conn.Associators( controllerInstanceName, ResultClass='EMC_StorageHardwareID') for storageHardwareIdInstance in storageHardwareIdInstances: # If EMC_StorageHardwareID matches the initiator, we # found the existing EMC_LunMaskingSCSIProtocolController. hardwareid = storageHardwareIdInstance['StorageID'] for initiator in initiators: if hardwareid.lower() == initiator.lower(): # This is a check to see if the controller # has been deleted. instance = self.utils.get_existing_instance( self.conn, controllerInstanceName) if instance is None: # Skip this controller as it doesn't exist # any more. pass else: foundControllerInstanceName = ( controllerInstanceName) break if foundControllerInstanceName is not None: break except pywbem.cim_operations.CIMError as arg: instance = self.utils.process_exception_args( arg, controllerInstanceName) if instance is None: # Skip this controller as it doesn't exist any more. pass if foundControllerInstanceName is not None: break LOG.debug("LunMaskingSCSIProtocolController for storage system " "%(storage_system)s and initiator %(initiator)s is " "%(ctrl)s.", {'storage_system': storageSystemName, 'initiator': initiators, 'ctrl': foundControllerInstanceName}) return foundControllerInstanceName def get_num_volumes_mapped(self, volume, connector): """Returns how many volumes are in the same zone as the connector. Find out how many volumes are mapped to a host associated to the LunMaskingSCSIProtocolController. :param volume: volume object to be deleted :param connector: volume object to be deleted :returns: int -- numVolumesMapped :raises: VolumeBackendAPIException """ volumename = volume['name'] vol_instance = self._find_lun(volume) if vol_instance is None: msg = (_("Volume %(name)s not found on the array. " "Cannot determine if there are volumes mapped.") % {'name': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) storage_system = vol_instance['SystemName'] ctrl = self._find_lunmasking_scsi_protocol_controller( storage_system, connector) LOG.debug("LunMaskingSCSIProtocolController for storage system " "%(storage)s and %(connector)s is %(ctrl)s.", {'storage': storage_system, 'connector': connector, 'ctrl': ctrl}) # Return 0 if masking view does not exist. if ctrl is None: return 0 associators = self.conn.Associators( ctrl, ResultClass='EMC_StorageVolume') numVolumesMapped = len(associators) LOG.debug("Found %(numVolumesMapped)d volumes on storage system " "%(storage)s mapped to %(connector)s.", {'numVolumesMapped': numVolumesMapped, 'storage': storage_system, 'connector': connector}) return numVolumesMapped def _delete_snapshot(self, snapshot): """Helper function to delete the specified snapshot. :param snapshot: snapshot object to be deleted :raises: VolumeBackendAPIException """ LOG.debug("Entering delete_snapshot.") snapshotname = snapshot['name'] LOG.info(_LI("Delete Snapshot: %(snapshot)s."), {'snapshot': snapshotname}) extraSpecs = self._initial_setup(snapshot) self.conn = self._get_ecom_connection() if not extraSpecs[ISV3]: snapshotInstance = self._find_lun(snapshot) if snapshotInstance is None: LOG.error(_LE( "Snapshot %(snapshotname)s not found on the array. " "No volume to delete."), {'snapshotname': snapshotname}) return (-1, snapshotname) storageSystem = snapshotInstance['SystemName'] # Wait for it to fully sync in case there is an ongoing # create volume from snapshot request. syncName = self.utils.find_sync_sv_by_target( self.conn, storageSystem, snapshotInstance, extraSpecs, True) if syncName is None: LOG.info(_LI( "Snapshot: %(snapshot)s: not found on the array."), {'snapshot': snapshotname}) else: repservice = self.utils.find_replication_service(self.conn, storageSystem) if repservice is None: exception_message = _( "Cannot find Replication Service to" " delete snapshot %s.") % snapshotname raise exception.VolumeBackendAPIException( data=exception_message) # Break the replication relationship LOG.debug("Deleting snap relationship: Target: %(snapshot)s " "Method: ModifyReplicaSynchronization " "Replication Service: %(service)s Operation: 8 " "Synchronization: %(syncName)s.", {'snapshot': snapshotname, 'service': repservice, 'syncName': syncName}) self.provision.delete_clone_relationship( self.conn, repservice, syncName, extraSpecs, True) # Delete the target device. self._delete_volume(snapshot) def create_consistencygroup(self, context, group): """Creates a consistency group. :param context: the context :param group: the group object to be created :returns: dict -- modelUpdate = {'status': 'available'} :raises: VolumeBackendAPIException """ LOG.info(_LI("Create Consistency Group: %(group)s."), {'group': group['id']}) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} volumeTypeId = group['volume_type_id'].replace(",", "") cgName = self.utils.truncate_string(group['id'], 8) extraSpecs = self._initial_setup(None, volumeTypeId) _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) self.conn = self._get_ecom_connection() # Find storage system. try: replicationService = self.utils.find_replication_service( self.conn, storageSystem) self.provision.create_consistency_group( self.conn, replicationService, cgName, extraSpecs) except Exception: exceptionMessage = (_("Failed to create consistency group:" " %(cgName)s.") % {'cgName': cgName}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return modelUpdate def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context :param group: the group object to be deleted :param volumes: the list of volumes in the consisgroup to be deleted :returns: dict -- modelUpdate :returns: list -- list of volume objects :raises: VolumeBackendAPIException """ LOG.info(_LI("Delete Consistency Group: %(group)s."), {'group': group['id']}) cgName = self.utils.truncate_string(group['id'], 8) modelUpdate = {} modelUpdate['status'] = group['status'] volumeTypeId = group['volume_type_id'].replace(",", "") extraSpecs = self._initial_setup(None, volumeTypeId) _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) try: replicationService = self.utils.find_replication_service( self.conn, storageSystem) storageConfigservice = ( self.utils.find_storage_configuration_service( self.conn, storageSystem)) cgInstanceName = self._find_consistency_group( replicationService, cgName) if cgInstanceName is None: exception_message = (_("Cannot find CG group %s.") % cgName) raise exception.VolumeBackendAPIException( data=exception_message) memberInstanceNames = self._get_members_of_replication_group( cgInstanceName) self.provision.delete_consistency_group(self.conn, replicationService, cgInstanceName, cgName, extraSpecs) # Do a bulk delete, a lot faster than single deletes. if memberInstanceNames: volumes, modelUpdate = self._do_bulk_delete( storageSystem, memberInstanceNames, storageConfigservice, volumes, modelUpdate, extraSpecs[ISV3], extraSpecs) except Exception: exceptionMessage = (_( "Failed to delete consistency group: %(cgName)s.") % {'cgName': cgName}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return modelUpdate, volumes def _do_bulk_delete(self, storageSystem, memberInstanceNames, storageConfigservice, volumes, modelUpdate, isV3, extraSpecs): """Do a bulk delete. :param storageSystem: storage system name :param memberInstanceNames: volume Instance names :param storageConfigservice: storage config service :param volumes: volume objects :param modelUpdate: dict :param isV3: boolean :param extraSpecs: extra specifications :returns: list -- list of volume objects :returns: dict -- modelUpdate """ try: controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystem)) for memberInstanceName in memberInstanceNames: self._remove_device_from_storage_group( controllerConfigurationService, memberInstanceName, 'Member Volume', extraSpecs) if isV3: self.provisionv3.delete_volume_from_pool( self.conn, storageConfigservice, memberInstanceNames, None, extraSpecs) else: self.provision.delete_volume_from_pool( self.conn, storageConfigservice, memberInstanceNames, None, extraSpecs) for volumeRef in volumes: volumeRef['status'] = 'deleted' except Exception: for volumeRef in volumes: volumeRef['status'] = 'error_deleting' modelUpdate['status'] = 'error_deleting' return volumes, modelUpdate def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. :param context: the context :param cgsnapshot: the consistency group snapshot to be created :param snapshots: snapshots :returns: dict -- modelUpdate :returns: list -- list of snapshots :raises: VolumeBackendAPIException """ consistencyGroup = cgsnapshot.get('consistencygroup') snapshots_model_update = [] LOG.info(_LI( "Create snapshot for Consistency Group %(cgId)s " "cgsnapshotID: %(cgsnapshot)s."), {'cgsnapshot': cgsnapshot['id'], 'cgId': cgsnapshot['consistencygroup_id']}) cgName = self.utils.truncate_string( cgsnapshot['consistencygroup_id'], 8) volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "") extraSpecs = self._initial_setup(None, volumeTypeId) self.conn = self._get_ecom_connection() _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) try: replicationService = self.utils.find_replication_service( self.conn, storageSystem) cgInstanceName = ( self._find_consistency_group(replicationService, cgName)) if cgInstanceName is None: exception_message = (_("Cannot find CG group %s.") % cgName) raise exception.VolumeBackendAPIException( data=exception_message) memberInstanceNames = self._get_members_of_replication_group( cgInstanceName) # Create the target consistency group. targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8) self.provision.create_consistency_group( self.conn, replicationService, targetCgName, extraSpecs) targetCgInstanceName = self._find_consistency_group( replicationService, targetCgName) LOG.info(_LI("Create target consistency group %(targetCg)s."), {'targetCg': targetCgInstanceName}) for memberInstanceName in memberInstanceNames: volInstance = self.conn.GetInstance( memberInstanceName, LocalOnly=False) numOfBlocks = volInstance['NumberOfBlocks'] blockSize = volInstance['BlockSize'] volumeSizeInbits = numOfBlocks * blockSize targetVolumeName = 'targetVol' volume = {'size': int(self.utils.convert_bits_to_gbs( volumeSizeInbits))} if extraSpecs[ISV3]: _rc, volumeDict, _storageSystemName = ( self._create_v3_volume( volume, targetVolumeName, volumeSizeInbits, extraSpecs)) else: _rc, volumeDict, _storageSystemName = ( self._create_composite_volume( volume, targetVolumeName, volumeSizeInbits, extraSpecs)) targetVolumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, targetVolumeName) LOG.debug("Create target volume for member volume " "Source volume: %(memberVol)s " "Target volume %(targetVol)s.", {'memberVol': memberInstanceName, 'targetVol': targetVolumeInstance.path}) self.provision.add_volume_to_cg(self.conn, replicationService, targetCgInstanceName, targetVolumeInstance.path, targetCgName, targetVolumeName, extraSpecs) # Less than 5 characters relationship name. relationName = self.utils.truncate_string(cgsnapshot['id'], 5) if extraSpecs[ISV3]: self.provisionv3.create_group_replica( self.conn, replicationService, cgInstanceName, targetCgInstanceName, relationName, extraSpecs) else: self.provision.create_group_replica( self.conn, replicationService, cgInstanceName, targetCgInstanceName, relationName, extraSpecs) # Break the replica group relationship. rgSyncInstanceName = self.utils.find_group_sync_rg_by_target( self.conn, storageSystem, targetCgInstanceName, extraSpecs, True) if rgSyncInstanceName is not None: repservice = self.utils.find_replication_service( self.conn, storageSystem) if repservice is None: exception_message = (_( "Cannot find Replication service on system %s.") % storageSystem) raise exception.VolumeBackendAPIException( data=exception_message) if extraSpecs[ISV3]: # Operation 7: dissolve for snapVx. operation = self.utils.get_num(9, '16') self.provisionv3.break_replication_relationship( self.conn, repservice, rgSyncInstanceName, operation, extraSpecs) else: self.provision.delete_clone_relationship(self.conn, repservice, rgSyncInstanceName, extraSpecs) except Exception: exceptionMessage = (_("Failed to create snapshot for cg:" " %(cgName)s.") % {'cgName': cgName}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot['id'], 'status': 'available'}) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return modelUpdate, snapshots_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete a cgsnapshot. :param context: the context :param cgsnapshot: the consistency group snapshot to be created :param snapshots: snapshots :returns: dict -- modelUpdate :returns: list -- list of snapshots :raises: VolumeBackendAPIException """ consistencyGroup = cgsnapshot.get('consistencygroup') model_update = {} snapshots_model_update = [] LOG.info(_LI( "Delete snapshot for source CG %(cgId)s " "cgsnapshotID: %(cgsnapshot)s."), {'cgsnapshot': cgsnapshot['id'], 'cgId': cgsnapshot['consistencygroup_id']}) model_update['status'] = cgsnapshot['status'] volumeTypeId = consistencyGroup['volume_type_id'].replace(",", "") extraSpecs = self._initial_setup(None, volumeTypeId) self.conn = self._get_ecom_connection() _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) try: targetCgName = self.utils.truncate_string(cgsnapshot['id'], 8) model_update, snapshots = self._delete_cg_and_members( storageSystem, targetCgName, model_update, snapshots, extraSpecs) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot['id'], 'status': 'deleted'}) except Exception: exceptionMessage = (_("Failed to delete snapshot for cg: " "%(cgId)s.") % {'cgId': cgsnapshot['consistencygroup_id']}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return model_update, snapshots_model_update def _find_consistency_group(self, replicationService, cgName): """Finds a CG given its name. :param replicationService: the replication service :param cgName: the consistency group name :returns: foundCgInstanceName """ foundCgInstanceName = None cgInstanceNames = ( self.conn.AssociatorNames(replicationService, ResultClass='CIM_ReplicationGroup')) for cgInstanceName in cgInstanceNames: instance = self.conn.GetInstance(cgInstanceName, LocalOnly=False) if cgName == instance['ElementName']: foundCgInstanceName = cgInstanceName break return foundCgInstanceName def _get_members_of_replication_group(self, cgInstanceName): """Get the members of consistency group. :param cgInstanceName: the CG instance name :returns: list -- memberInstanceNames """ memberInstanceNames = self.conn.AssociatorNames( cgInstanceName, AssocClass='CIM_OrderedMemberOfCollection') return memberInstanceNames def _create_composite_volume( self, volume, volumeName, volumeSize, extraSpecs, memberCount=None): """Create a composite volume (V2). :param volume: the volume object :param volumeName: the name of the volume :param volumeSize: the size of the volume :param extraSpecs: extra specifications :param memberCount: the number of meta members in a composite volume :returns: int -- return code :returns: dict -- volumeDict :returns: string -- storageSystemName :raises: VolumeBackendAPIException """ if not memberCount: memberCount, errorDesc = self.utils.determine_member_count( volume['size'], extraSpecs[MEMBERCOUNT], extraSpecs[COMPOSITETYPE]) if errorDesc is not None: exceptionMessage = (_("The striped meta count of " "%(memberCount)s is too small for " "volume: %(volumeName)s, " "with size %(volumeSize)s.") % {'memberCount': memberCount, 'volumeName': volumeName, 'volumeSize': volume['size']}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) poolInstanceName, storageSystemName = ( self._get_pool_and_storage_system(extraSpecs)) LOG.debug("Create Volume: %(volume)s Pool: %(pool)s " "Storage System: %(storageSystem)s " "Size: %(size)lu MemberCount: %(memberCount)s.", {'volume': volumeName, 'pool': poolInstanceName, 'storageSystem': storageSystemName, 'size': volumeSize, 'memberCount': memberCount}) elementCompositionService = ( self.utils.find_element_composition_service(self.conn, storageSystemName)) storageConfigService = self.utils.find_storage_configuration_service( self.conn, storageSystemName) # If FAST is intended to be used we must first check that the pool # is associated with the correct storage tier. if extraSpecs[FASTPOLICY] is not None: foundPoolInstanceName = self.fast.get_pool_associated_to_policy( self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY], storageConfigService, poolInstanceName) if foundPoolInstanceName is None: exceptionMessage = (_("Pool: %(poolName)s. " "is not associated to storage tier for " "fast policy %(fastPolicy)s.") % {'poolName': extraSpecs[POOL], 'fastPolicy': extraSpecs[FASTPOLICY]}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) compositeType = self.utils.get_composite_type( extraSpecs[COMPOSITETYPE]) volumeDict, rc = self.provision.create_composite_volume( self.conn, elementCompositionService, volumeSize, volumeName, poolInstanceName, compositeType, memberCount, extraSpecs) # Now that we have already checked that the pool is associated with # the correct storage tier and the volume was successfully created # add the volume to the default storage group created for # volumes in pools associated with this fast policy. if extraSpecs[FASTPOLICY]: LOG.info(_LI( "Adding volume: %(volumeName)s to default storage group" " for FAST policy: %(fastPolicyName)s."), {'volumeName': volumeName, 'fastPolicyName': extraSpecs[FASTPOLICY]}) defaultStorageGroupInstanceName = ( self._get_or_create_default_storage_group( self.conn, storageSystemName, volumeDict, volumeName, extraSpecs[FASTPOLICY], extraSpecs)) if not defaultStorageGroupInstanceName: exceptionMessage = (_( "Unable to create or get default storage group for " "FAST policy: %(fastPolicyName)s.") % {'fastPolicyName': extraSpecs[FASTPOLICY]}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) self._add_volume_to_default_storage_group_on_create( volumeDict, volumeName, storageConfigService, storageSystemName, extraSpecs[FASTPOLICY], extraSpecs) return rc, volumeDict, storageSystemName def _create_v3_volume( self, volume, volumeName, volumeSize, extraSpecs): """Create a volume (V3). :param volume: the volume object :param volumeName: the volume name :param volumeSize: the volume size :param extraSpecs: extra specifications :returns: int -- return code :returns: dict -- volumeDict :returns: string -- storageSystemName :raises: VolumeBackendAPIException """ isValidSLO, isValidWorkload = self.utils.verify_slo_workload( extraSpecs[SLO], extraSpecs[WORKLOAD]) if not isValidSLO or not isValidWorkload: exceptionMessage = (_( "Either SLO: %(slo)s or workload %(workload)s is invalid. " "Examine previous error statement for valid values.") % {'slo': extraSpecs[SLO], 'workload': extraSpecs[WORKLOAD]}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) poolInstanceName, storageSystemName = ( self._get_pool_and_storage_system(extraSpecs)) # Check to see if SLO and Workload are configured on the array. storagePoolCapability = self.provisionv3.get_storage_pool_capability( self.conn, poolInstanceName) if storagePoolCapability: self.provisionv3.get_storage_pool_setting( self.conn, storagePoolCapability, extraSpecs[SLO], extraSpecs[WORKLOAD]) else: exceptionMessage = (_( "Cannot determine storage pool settings.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) LOG.debug("Create Volume: %(volume)s Pool: %(pool)s " "Storage System: %(storageSystem)s " "Size: %(size)lu.", {'volume': volumeName, 'pool': poolInstanceName, 'storageSystem': storageSystemName, 'size': volumeSize}) storageConfigService = self.utils.find_storage_configuration_service( self.conn, storageSystemName) # A volume created without specifying a storage group during # creation time is allocated from the default SRP pool and # assigned the optimized SLO. sgInstanceName = self._get_or_create_storage_group_v3( extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], storageSystemName, extraSpecs) volumeDict, rc = self.provisionv3.create_volume_from_sg( self.conn, storageConfigService, volumeName, sgInstanceName, volumeSize, extraSpecs) return rc, volumeDict, storageSystemName def _get_or_create_storage_group_v3( self, poolName, slo, workload, storageSystemName, extraSpecs): """Get or create storage group_v3 (V3). :param poolName: the SRP pool nsmr :param slo: the SLO :param workload: the workload :param storageSystemName: storage system name :param extraSpecs: extra specifications :returns: sgInstanceName """ storageGroupName, controllerConfigService, sgInstanceName = ( self.utils.get_v3_default_sg_instance_name( self.conn, poolName, slo, workload, storageSystemName)) if sgInstanceName is None: sgInstanceName = self.provisionv3.create_storage_group_v3( self.conn, controllerConfigService, storageGroupName, poolName, slo, workload, extraSpecs) return sgInstanceName def _extend_composite_volume(self, volumeInstance, volumeName, newSize, additionalVolumeSize, extraSpecs): """Extend a composite volume (V2). :param volumeInstance: the volume instance :param volumeName: the name of the volume :param newSize: in GBs :param additionalVolumeSize: additional volume size :param extraSpecs: extra specifications :returns: int -- return code :returns: dict -- modifiedVolumeDict :raises: VolumeBackendAPIException """ # Is the volume extendable. isConcatenated = self.utils.check_if_volume_is_extendable( self.conn, volumeInstance) if 'True' not in isConcatenated: exceptionMessage = (_( "Volume: %(volumeName)s is not a concatenated volume. " "You can only perform extend on concatenated volume. " "Exiting...") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) else: compositeType = self.utils.get_composite_type(CONCATENATED) LOG.debug("Extend Volume: %(volume)s New size: %(newSize)s GBs.", {'volume': volumeName, 'newSize': newSize}) deviceId = volumeInstance['DeviceID'] storageSystemName = volumeInstance['SystemName'] LOG.debug( "Device ID: %(deviceid)s: Storage System: " "%(storagesystem)s.", {'deviceid': deviceId, 'storagesystem': storageSystemName}) storageConfigService = self.utils.find_storage_configuration_service( self.conn, storageSystemName) elementCompositionService = ( self.utils.find_element_composition_service( self.conn, storageSystemName)) # Create a volume to the size of the # newSize - oldSize = additionalVolumeSize. unboundVolumeInstance = self._create_and_get_unbound_volume( self.conn, storageConfigService, volumeInstance.path, additionalVolumeSize, extraSpecs) if unboundVolumeInstance is None: exceptionMessage = (_( "Error Creating unbound volume on an Extend operation.")) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) # Add the new unbound volume to the original composite volume. rc, modifiedVolumeDict = ( self._modify_and_get_composite_volume_instance( self.conn, elementCompositionService, volumeInstance, unboundVolumeInstance.path, volumeName, compositeType, extraSpecs)) if modifiedVolumeDict is None: exceptionMessage = (_( "On an Extend Operation, error adding volume to composite " "volume: %(volumename)s.") % {'volumename': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return rc, modifiedVolumeDict def _slo_workload_migration(self, volumeInstance, volume, host, volumeName, volumeStatus, newType, extraSpecs): """Migrate from SLO/Workload combination to another (V3). :param volumeInstance: the volume instance :param volume: the volume object :param host: the host object :param volumeName: the name of the volume :param volumeStatus: the volume status :param newType: the type to migrate to :param extraSpecs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ storageGroupName = self.utils.get_v3_storage_group_name( extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) volumeInstanceName = volumeInstance.path isValid, targetSlo, targetWorkload = ( self._is_valid_for_storage_assisted_migration_v3( volumeInstanceName, host, extraSpecs[ARRAY], extraSpecs[POOL], volumeName, volumeStatus, storageGroupName)) storageSystemName = volumeInstance['SystemName'] if not isValid: LOG.error(_LE( "Volume %(name)s is not suitable for storage " "assisted migration using retype."), {'name': volumeName}) return False if volume['host'] != host['host']: LOG.debug( "Retype Volume %(name)s from source host %(sourceHost)s " "to target host %(targetHost)s.", {'name': volumeName, 'sourceHost': volume['host'], 'targetHost': host['host']}) return self._migrate_volume_v3( volume, volumeInstance, extraSpecs[POOL], targetSlo, targetWorkload, storageSystemName, newType, extraSpecs) return False def _migrate_volume_v3( self, volume, volumeInstance, poolName, targetSlo, targetWorkload, storageSystemName, newType, extraSpecs): """Migrate from one slo/workload combination to another (V3). This requires moving the volume from its current SG to a new or existing SG that has the target attributes. :param volume: the volume object :param volumeInstance: the volume instance :param poolName: the SRP Pool Name :param targetSlo: the target SLO :param targetWorkload: the target workload :param storageSystemName: the storage system name :param newType: the type to migrate to :param extraSpecs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ volumeName = volume['name'] controllerConfigService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) defaultSgName = self.utils.get_v3_storage_group_name( extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD]) foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, defaultSgName)) if foundStorageGroupInstanceName is None: LOG.warning(_LW( "Volume : %(volumeName)s is not currently " "belonging to any storage group."), {'volumeName': volumeName}) else: self.provision.remove_device_from_storage_group( self.conn, controllerConfigService, foundStorageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) # Check that it has been removed. sgFromVolRemovedInstanceName = ( self.utils.wrap_get_storage_group_from_volume( self.conn, volumeInstance.path, defaultSgName)) if sgFromVolRemovedInstanceName is not None: LOG.error(_LE( "Volume : %(volumeName)s has not been " "removed from source storage group %(storageGroup)s."), {'volumeName': volumeName, 'storageGroup': sgFromVolRemovedInstanceName}) return False storageGroupName = self.utils.get_v3_storage_group_name( poolName, targetSlo, targetWorkload) targetSgInstanceName = self._get_or_create_storage_group_v3( poolName, targetSlo, targetWorkload, storageSystemName, extraSpecs) if targetSgInstanceName is None: LOG.error(_LE( "Failed to get or create storage group %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return False self.masking.add_volume_to_storage_group( self.conn, controllerConfigService, targetSgInstanceName, volumeInstance, volumeName, storageGroupName, extraSpecs) # Check that it has been added. sgFromVolAddedInstanceName = ( self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, storageGroupName)) if sgFromVolAddedInstanceName is None: LOG.error(_LE( "Volume : %(volumeName)s has not been " "added to target storage group %(storageGroup)s."), {'volumeName': volumeName, 'storageGroup': targetSgInstanceName}) return False return True def _pool_migration(self, volumeInstance, volume, host, volumeName, volumeStatus, fastPolicyName, newType, extraSpecs): """Migrate from one pool to another (V2). :param volumeInstance: the volume instance :param volume: the volume object :param host: the host object :param volumeName: the name of the volume :param volumeStatus: the volume status :param fastPolicyName: the FAST policy Name :param newType: the type to migrate to :param extraSpecs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ storageSystemName = volumeInstance['SystemName'] isValid, targetPoolName, targetFastPolicyName = ( self._is_valid_for_storage_assisted_migration( volumeInstance.path, host, storageSystemName, volumeName, volumeStatus)) if not isValid: LOG.error(_LE( "Volume %(name)s is not suitable for storage " "assisted migration using retype."), {'name': volumeName}) return False if volume['host'] != host['host']: LOG.debug( "Retype Volume %(name)s from source host %(sourceHost)s " "to target host %(targetHost)s.", {'name': volumeName, 'sourceHost': volume['host'], 'targetHost': host['host']}) return self._migrate_volume( volume, volumeInstance, targetPoolName, targetFastPolicyName, fastPolicyName, extraSpecs, newType) return False def _update_pool_stats( self, backendName, arrayInfo): """Update pool statistics (V2). :param backendName: the backend name :param arrayInfo: the arrayInfo :returns: location_info, total_capacity_gb, free_capacity_gb """ if arrayInfo['FastPolicy']: LOG.debug( "Fast policy %(fastPolicyName)s is enabled on %(arrayName)s.", {'fastPolicyName': arrayInfo['FastPolicy'], 'arrayName': arrayInfo['SerialNumber']}) else: LOG.debug( "No Fast policy for Array:%(arrayName)s " "backend:%(backendName)s.", {'arrayName': arrayInfo['SerialNumber'], 'backendName': backendName}) storageSystemInstanceName = self.utils.find_storageSystem( self.conn, arrayInfo['SerialNumber']) isTieringPolicySupported = ( self.fast.is_tiering_policy_enabled_on_storage_system( self.conn, storageSystemInstanceName)) if (arrayInfo['FastPolicy'] is not None and isTieringPolicySupported is True): # FAST enabled total_capacity_gb, free_capacity_gb = ( self.fast.get_capacities_associated_to_policy( self.conn, arrayInfo['SerialNumber'], arrayInfo['FastPolicy'])) LOG.info(_LI( "FAST: capacity stats for policy %(fastPolicyName)s on array " "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu."), {'fastPolicyName': arrayInfo['FastPolicy'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb}) else: # NON-FAST total_capacity_gb, free_capacity_gb = ( self.utils.get_pool_capacities(self.conn, arrayInfo['PoolName'], arrayInfo['SerialNumber'])) LOG.info(_LI( "NON-FAST: capacity stats for pool %(poolName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu."), {'poolName': arrayInfo['PoolName'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb}) location_info = ("%(arrayName)s#%(poolName)s#%(policyName)s" % {'arrayName': arrayInfo['SerialNumber'], 'poolName': arrayInfo['PoolName'], 'policyName': arrayInfo['FastPolicy']}) return location_info, total_capacity_gb, free_capacity_gb def _set_v2_extra_specs(self, extraSpecs, poolRecord): """Set the VMAX V2 extra specs. :param extraSpecs: extra specifications :param poolRecord: pool record :returns: dict -- the extraSpecs :raises: VolumeBackendAPIException """ try: stripedMetaCount = extraSpecs[STRIPECOUNT] extraSpecs[MEMBERCOUNT] = stripedMetaCount extraSpecs[COMPOSITETYPE] = STRIPED LOG.debug( "There are: %(stripedMetaCount)s striped metas in " "the extra specs.", {'stripedMetaCount': stripedMetaCount}) except KeyError: memberCount = '1' extraSpecs[MEMBERCOUNT] = memberCount extraSpecs[COMPOSITETYPE] = CONCATENATED LOG.debug("StripedMetaCount is not in the extra specs.") # Get the FAST policy from the file. This value can be None if the # user doesn't want to associate with any FAST policy. if poolRecord['FastPolicy']: LOG.debug("The fast policy name is: %(fastPolicyName)s.", {'fastPolicyName': poolRecord['FastPolicy']}) extraSpecs[FASTPOLICY] = poolRecord['FastPolicy'] extraSpecs[ISV3] = False extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord) LOG.debug("Pool is: %(pool)s " "Array is: %(array)s " "FastPolicy is: %(fastPolicy)s " "CompositeType is: %(compositeType)s " "MemberCount is: %(memberCount)s.", {'pool': extraSpecs[POOL], 'array': extraSpecs[ARRAY], 'fastPolicy': extraSpecs[FASTPOLICY], 'compositeType': extraSpecs[COMPOSITETYPE], 'memberCount': extraSpecs[MEMBERCOUNT]}) return extraSpecs def _set_v3_extra_specs(self, extraSpecs, poolRecord): """Set the VMAX V3 extra specs. If SLO or workload are not specified then the default values are NONE and the Optimized SLO will be assigned to the volume. :param extraSpecs: extra specifications :param poolRecord: pool record :returns: dict -- the extra specifications dictionary """ extraSpecs[SLO] = poolRecord['SLO'] extraSpecs[WORKLOAD] = poolRecord['Workload'] extraSpecs[ISV3] = True extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord) LOG.debug("Pool is: %(pool)s " "Array is: %(array)s " "SLO is: %(slo)s " "Workload is: %(workload)s.", {'pool': extraSpecs[POOL], 'array': extraSpecs[ARRAY], 'slo': extraSpecs[SLO], 'workload': extraSpecs[WORKLOAD]}) return extraSpecs def _set_common_extraSpecs(self, extraSpecs, poolRecord): """Set common extra specs. The extraSpecs are common to v2 and v3 :param extraSpecs: extra specifications :param poolRecord: pool record :returns: dict -- the extra specifications dictionary """ extraSpecs[POOL] = poolRecord['PoolName'] extraSpecs[ARRAY] = poolRecord['SerialNumber'] extraSpecs[PORTGROUPNAME] = poolRecord['PortGroup'] if 'Interval' in poolRecord and poolRecord['Interval']: extraSpecs[INTERVAL] = poolRecord['Interval'] LOG.debug("The user defined interval is : %(intervalInSecs)s.", {'intervalInSecs': poolRecord['Interval']}) else: LOG.debug("Interval not overridden, default of 10 assumed.") if 'Retries' in poolRecord and poolRecord['Retries']: extraSpecs[RETRIES] = poolRecord['Retries'] LOG.debug("The user defined retries is : %(retries)s.", {'retries': poolRecord['Retries']}) else: LOG.debug("Retries not overridden, default of 60 assumed.") return extraSpecs def _delete_from_pool(self, storageConfigService, volumeInstance, volumeName, deviceId, fastPolicyName, extraSpecs): """Delete from pool (v2). :param storageConfigService: the storage config service :param volumeInstance: the volume instance :param volumeName: the volume Name :param deviceId: the device ID of the volume :param fastPolicyName: the FAST policy name(if it exists) :param extraSpecs: extra specifications :returns: int -- return code :raises: VolumeBackendAPIException """ storageSystemName = volumeInstance['SystemName'] controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) if fastPolicyName is not None: defaultStorageGroupInstanceName = ( self.masking.remove_device_from_default_storage_group( self.conn, controllerConfigurationService, volumeInstance.path, volumeName, fastPolicyName, extraSpecs)) if defaultStorageGroupInstanceName is None: LOG.warning(_LW( "The volume: %(volumename)s. was not first part of the " "default storage group for FAST policy %(fastPolicyName)s" "."), {'volumename': volumeName, 'fastPolicyName': fastPolicyName}) # Check if it is part of another storage group. self._remove_device_from_storage_group( controllerConfigurationService, volumeInstance.path, volumeName, extraSpecs) else: # Check if volume is part of a storage group. self._remove_device_from_storage_group( controllerConfigurationService, volumeInstance.path, volumeName, extraSpecs) LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool " "ConfigService: %(service)s TheElement: %(vol_instance)s " "DeviceId: %(deviceId)s.", {'service': storageConfigService, 'name': volumeName, 'vol_instance': volumeInstance.path, 'deviceId': deviceId}) try: rc = self.provision.delete_volume_from_pool( self.conn, storageConfigService, volumeInstance.path, volumeName, extraSpecs) except Exception: # If we cannot successfully delete the volume then we want to # return the volume to the default storage group. if (fastPolicyName is not None and defaultStorageGroupInstanceName is not None and storageSystemName is not None): assocDefaultStorageGroupName = ( self.fast .add_volume_to_default_storage_group_for_fast_policy( self.conn, controllerConfigurationService, volumeInstance, volumeName, fastPolicyName, extraSpecs)) if assocDefaultStorageGroupName is None: LOG.error(_LE( "Failed to Roll back to re-add volume %(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s. Please contact your sysadmin to " "get the volume returned to the default " "storage group."), {'volumeName': volumeName, 'fastPolicyName': fastPolicyName}) errorMessage = (_("Failed to delete volume %(volumeName)s.") % {'volumeName': volumeName}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return rc def _delete_from_pool_v3(self, storageConfigService, volumeInstance, volumeName, deviceId, extraSpecs): """Delete from pool (v3). :param storageConfigService: the storage config service :param volumeInstance: the volume instance :param volumeName: the volume Name :param deviceId: the device ID of the volume :param extraSpecs: extra specifications :returns: int -- return code :raises: VolumeBackendAPIException """ storageSystemName = volumeInstance['SystemName'] controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystemName)) # Check if it is part of a storage group and delete it # extra logic for case when volume is the last member. self.masking.remove_and_reset_members( self.conn, controllerConfigurationService, volumeInstance, volumeName, extraSpecs, None, False) LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool " "ConfigServic: %(service)s TheElement: %(vol_instance)s " "DeviceId: %(deviceId)s.", {'service': storageConfigService, 'name': volumeName, 'vol_instance': volumeInstance.path, 'deviceId': deviceId}) try: rc = self.provisionv3.delete_volume_from_pool( self.conn, storageConfigService, volumeInstance.path, volumeName, extraSpecs) except Exception: # If we cannot successfully delete the volume, then we want to # return the volume to the default storage group, # which should be the SG it previously belonged to. self.masking.return_volume_to_default_storage_group_v3( self.conn, controllerConfigurationService, volumeInstance, volumeName, extraSpecs) errorMessage = (_("Failed to delete volume %(volumeName)s.") % {'volumeName': volumeName}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return rc def _create_clone_v2(self, repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, isSnapshot, extraSpecs): """Create a clone (v2). :param repServiceInstanceName: the replication service :param cloneVolume: the clone volume object :param sourceVolume: the source volume object :param sourceInstance: the device ID of the volume :param isSnapshot: check to see if it is a snapshot :param extraSpecs: extra specifications :returns: int -- return code :raises: VolumeBackendAPIException """ # Check if the source volume contains any meta devices. metaHeadInstanceName = self.utils.get_volume_meta_head( self.conn, sourceInstance.path) if metaHeadInstanceName is None: # Simple volume. return self._create_v2_replica_and_delete_clone_relationship( repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, None, extraSpecs, isSnapshot) else: # Composite volume with meta device members. # Check if the meta members capacity. metaMemberInstanceNames = ( self.utils.get_composite_elements( self.conn, sourceInstance)) volumeCapacities = self.utils.get_meta_members_capacity_in_byte( self.conn, metaMemberInstanceNames) LOG.debug("Volume capacities: %(metasizes)s.", {'metasizes': volumeCapacities}) if len(set(volumeCapacities)) == 1: LOG.debug("Meta volume all of the same size.") return self._create_v2_replica_and_delete_clone_relationship( repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, None, extraSpecs, isSnapshot) LOG.debug("Meta volumes are of different sizes, " "%d different sizes.", len(set(volumeCapacities))) baseTargetVolumeInstance = None for volumeSizeInbits in volumeCapacities: if baseTargetVolumeInstance is None: # Create base volume. baseVolumeName = "TargetBaseVol" volume = {'size': int(self.utils.convert_bits_to_gbs( volumeSizeInbits))} _rc, baseVolumeDict, storageSystemName = ( self._create_composite_volume( volume, baseVolumeName, volumeSizeInbits, extraSpecs, 1)) baseTargetVolumeInstance = self.utils.find_volume_instance( self.conn, baseVolumeDict, baseVolumeName) LOG.debug("Base target volume %(targetVol)s created. " "capacity in bits: %(capInBits)lu.", {'capInBits': volumeSizeInbits, 'targetVol': baseTargetVolumeInstance.path}) else: # Create append volume targetVolumeName = "MetaVol" volume = {'size': int(self.utils.convert_bits_to_gbs( volumeSizeInbits))} storageConfigService = ( self.utils.find_storage_configuration_service( self.conn, storageSystemName)) unboundVolumeInstance = ( self._create_and_get_unbound_volume( self.conn, storageConfigService, baseTargetVolumeInstance.path, volumeSizeInbits, extraSpecs)) if unboundVolumeInstance is None: exceptionMessage = (_( "Error Creating unbound volume.")) LOG.error(exceptionMessage) # Remove target volume self._delete_target_volume_v2(storageConfigService, baseTargetVolumeInstance, extraSpecs) raise exception.VolumeBackendAPIException( data=exceptionMessage) # Append the new unbound volume to the # base target composite volume. baseTargetVolumeInstance = self.utils.find_volume_instance( self.conn, baseVolumeDict, baseVolumeName) try: elementCompositionService = ( self.utils.find_element_composition_service( self.conn, storageSystemName)) compositeType = self.utils.get_composite_type( extraSpecs[COMPOSITETYPE]) _rc, modifiedVolumeDict = ( self._modify_and_get_composite_volume_instance( self.conn, elementCompositionService, baseTargetVolumeInstance, unboundVolumeInstance.path, targetVolumeName, compositeType, extraSpecs)) if modifiedVolumeDict is None: exceptionMessage = (_( "Error appending volume %(volumename)s to " "target base volume.") % {'volumename': targetVolumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) except Exception: exceptionMessage = (_( "Exception appending meta volume to target volume " "%(volumename)s.") % {'volumename': baseVolumeName}) LOG.error(exceptionMessage) # Remove append volume and target base volume self._delete_target_volume_v2( storageConfigService, unboundVolumeInstance, extraSpecs) self._delete_target_volume_v2( storageConfigService, baseTargetVolumeInstance, extraSpecs) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.debug("Create V2 replica for meta members of different sizes.") return self._create_v2_replica_and_delete_clone_relationship( repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, baseTargetVolumeInstance, extraSpecs, isSnapshot) def _create_v2_replica_and_delete_clone_relationship( self, repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, targetInstance, extraSpecs, isSnapshot=False): """Create a replica and delete the clone relationship. :param repServiceInstanceName: the replication service :param cloneVolume: the clone volume object :param sourceVolume: the source volume object :param sourceInstance: the source volume instance :param targetInstance: the target volume instance :param extraSpecs: extra specifications :param isSnapshot: check to see if it is a snapshot :returns: int -- return code :returns: dict -- cloneDict """ sourceName = sourceVolume['name'] cloneName = cloneVolume['name'] try: rc, job = self.provision.create_element_replica( self.conn, repServiceInstanceName, cloneName, sourceName, sourceInstance, targetInstance, extraSpecs) except Exception: exceptionMessage = (_( "Exception during create element replica. " "Clone name: %(cloneName)s " "Source name: %(sourceName)s " "Extra specs: %(extraSpecs)s ") % {'cloneName': cloneName, 'sourceName': sourceName, 'extraSpecs': extraSpecs}) LOG.error(exceptionMessage) if targetInstance is not None: # Check if the copy session exists. storageSystem = targetInstance['SystemName'] syncInstanceName = self.utils.find_sync_sv_by_target( self.conn, storageSystem, targetInstance, False) if syncInstanceName is not None: # Remove the Clone relationship. rc, job = self.provision.delete_clone_relationship( self.conn, repServiceInstanceName, syncInstanceName, extraSpecs, True) storageConfigService = ( self.utils.find_storage_configuration_service( self.conn, storageSystem)) self._delete_target_volume_v2( storageConfigService, targetInstance, extraSpecs) raise exception.VolumeBackendAPIException( data=exceptionMessage) cloneDict = self.provision.get_volume_dict_from_job( self.conn, job['Job']) fastPolicyName = extraSpecs[FASTPOLICY] if isSnapshot: if fastPolicyName is not None: storageSystemName = sourceInstance['SystemName'] self._add_clone_to_default_storage_group( fastPolicyName, storageSystemName, cloneDict, cloneName, extraSpecs) LOG.info(_LI("Snapshot creation %(cloneName)s completed. " "Source Volume: %(sourceName)s."), {'cloneName': cloneName, 'sourceName': sourceName}) return rc, cloneDict cloneVolume['provider_location'] = six.text_type(cloneDict) syncInstanceName, storageSystemName = ( self._find_storage_sync_sv_sv(cloneVolume, sourceVolume, extraSpecs)) # Remove the Clone relationship so it can be used as a regular lun. # 8 - Detach operation. rc, job = self.provision.delete_clone_relationship( self.conn, repServiceInstanceName, syncInstanceName, extraSpecs) if fastPolicyName is not None: self._add_clone_to_default_storage_group( fastPolicyName, storageSystemName, cloneDict, cloneName, extraSpecs) return rc, cloneDict def get_target_wwns_from_masking_view( self, storageSystem, volume, connector): """Find target WWNs via the masking view. :param storageSystem: the storage system name :param volume: volume to be attached :param connector: the connector dict :returns: list -- the target WWN list """ targetWwns = [] mvInstanceName = self.get_masking_view_by_volume(volume, connector) if mvInstanceName is not None: targetWwns = self.masking.get_target_wwns( self.conn, mvInstanceName) LOG.info(_LI("Target wwns in masking view %(maskingView)s: " "%(targetWwns)s."), {'maskingView': mvInstanceName, 'targetWwns': six.text_type(targetWwns)}) return targetWwns def get_port_group_from_masking_view(self, maskingViewInstanceName): """Get the port groups in a masking view. :param maskingViewInstanceName: masking view instance name :returns: portGroupInstanceName """ return self.masking.get_port_group_from_masking_view( self.conn, maskingViewInstanceName) def get_initiator_group_from_masking_view(self, maskingViewInstanceName): """Get the initiator group in a masking view. :param maskingViewInstanceName: masking view instance name :returns: initiatorGroupInstanceName """ return self.masking.get_initiator_group_from_masking_view( self.conn, maskingViewInstanceName) def get_masking_view_by_volume(self, volume, connector): """Given volume, retrieve the masking view instance name. :param volume: the volume :param connector: the connector object :returns: maskingviewInstanceName """ LOG.debug("Finding Masking View for volume %(volume)s.", {'volume': volume}) volumeInstance = self._find_lun(volume) return self.masking.get_masking_view_by_volume( self.conn, volumeInstance, connector) def get_masking_views_by_port_group(self, portGroupInstanceName): """Given port group, retrieve the masking view instance name. :param portGroupInstanceName: port group instance name :returns: list -- maskingViewInstanceNames """ LOG.debug("Finding Masking Views for port group %(pg)s.", {'pg': portGroupInstanceName}) return self.masking.get_masking_views_by_port_group( self.conn, portGroupInstanceName) def get_masking_views_by_initiator_group( self, initiatorGroupInstanceName): """Given initiator group, retrieve the masking view instance name. :param initiatorGroupInstanceName: initiator group instance name :returns: list -- maskingViewInstanceNames """ LOG.debug("Finding Masking Views for initiator group %(ig)s.", {'ig': initiatorGroupInstanceName}) return self.masking.get_masking_views_by_initiator_group( self.conn, initiatorGroupInstanceName) def _create_replica_v3( self, repServiceInstanceName, cloneVolume, sourceVolume, sourceInstance, isSnapshot, extraSpecs): """Create a replica. V3 specific function, create replica for source volume, including clone and snapshot. :param repServiceInstanceName: the replication service :param cloneVolume: the clone volume object :param sourceVolume: the source volume object :param sourceInstance: the device ID of the volume :param isSnapshot: boolean -- check to see if it is a snapshot :param extraSpecs: extra specifications :returns: int -- return code :returns: dict -- cloneDict """ cloneName = cloneVolume['name'] # SyncType 7: snap, VG3R default snapshot is snapVx. syncType = self.utils.get_num(SNAPVX, '16') # Operation 9: Dissolve for snapVx. operation = self.utils.get_num(DISSOLVE_SNAPVX, '16') rsdInstance = None targetInstance = None if isSnapshot: rsdInstance = self.utils.set_target_element_supplier_in_rsd( self.conn, repServiceInstanceName, SNAPVX_REPLICATION_TYPE, CREATE_NEW_TARGET, extraSpecs) else: targetInstance = self._create_duplicate_volume( sourceInstance, cloneName, extraSpecs) try: _rc, job = ( self.provisionv3.create_element_replica( self.conn, repServiceInstanceName, cloneName, syncType, sourceInstance, extraSpecs, targetInstance, rsdInstance)) except Exception: LOG.warning(_LW( "Clone failed on V3. Cleaning up the target volume. " "Clone name: %(cloneName)s "), {'cloneName': cloneName}) # Check if the copy session exists. if targetInstance: self._cleanup_target( repServiceInstanceName, targetInstance, extraSpecs) # Re-throw the exception. raise cloneDict = self.provisionv3.get_volume_dict_from_job( self.conn, job['Job']) targetVolumeInstance = ( self.provisionv3.get_volume_from_job(self.conn, job['Job'])) LOG.info(_LI("The target instance device id is: %(deviceid)s."), {'deviceid': targetVolumeInstance['DeviceID']}) cloneVolume['provider_location'] = six.text_type(cloneDict) syncInstanceName, _storageSystem = ( self._find_storage_sync_sv_sv(cloneVolume, sourceVolume, extraSpecs, True)) rc, job = self.provisionv3.break_replication_relationship( self.conn, repServiceInstanceName, syncInstanceName, operation, extraSpecs) return rc, cloneDict def _cleanup_target( self, repServiceInstanceName, targetInstance, extraSpecs): """cleanup target after exception :param repServiceInstanceName: the replication service :param targetInstance: the target instance :param extraSpecs: extra specifications """ storageSystem = targetInstance['SystemName'] syncInstanceName = self.utils.find_sync_sv_by_target( self.conn, storageSystem, targetInstance, False) if syncInstanceName is not None: # Break the clone relationship. self.provisionv3.break_replication_relationship( self.conn, repServiceInstanceName, syncInstanceName, DISSOLVE_SNAPVX, extraSpecs, True) storageConfigService = ( self.utils.find_storage_configuration_service( self.conn, storageSystem)) deviceId = targetInstance['DeviceID'] volumeName = targetInstance['Name'] self._delete_from_pool_v3( storageConfigService, targetInstance, volumeName, deviceId, extraSpecs) def _delete_cg_and_members( self, storageSystem, cgName, modelUpdate, volumes, extraSpecs): """Helper function to delete a consistencygroup and its member volumes. :param storageSystem: storage system :param cgName: consistency group name :param modelUpdate: dict -- the model update dict :param volumes: the list of member volumes :param extraSpecs: extra specifications :returns: dict -- modelUpdate :returns: list -- the updated list of member volumes :raises: VolumeBackendAPIException """ replicationService = self.utils.find_replication_service( self.conn, storageSystem) storageConfigservice = ( self.utils.find_storage_configuration_service( self.conn, storageSystem)) cgInstanceName = self._find_consistency_group( replicationService, cgName) if cgInstanceName is None: exception_message = (_("Cannot find CG group %s.") % cgName) raise exception.VolumeBackendAPIException( data=exception_message) memberInstanceNames = self._get_members_of_replication_group( cgInstanceName) self.provision.delete_consistency_group( self.conn, replicationService, cgInstanceName, cgName, extraSpecs) if memberInstanceNames: try: controllerConfigurationService = ( self.utils.find_controller_configuration_service( self.conn, storageSystem)) for memberInstanceName in memberInstanceNames: self._remove_device_from_storage_group( controllerConfigurationService, memberInstanceName, 'Member Volume', extraSpecs) LOG.debug("Deleting CG members. CG: %(cg)s " "%(numVols)lu member volumes: %(memVols)s.", {'cg': cgInstanceName, 'numVols': len(memberInstanceNames), 'memVols': memberInstanceNames}) if extraSpecs[ISV3]: self.provisionv3.delete_volume_from_pool( self.conn, storageConfigservice, memberInstanceNames, None, extraSpecs) else: self.provision.delete_volume_from_pool( self.conn, storageConfigservice, memberInstanceNames, None, extraSpecs) for volumeRef in volumes: volumeRef['status'] = 'deleted' except Exception: for volumeRef in volumes: volumeRef['status'] = 'error_deleting' modelUpdate['status'] = 'error_deleting' return modelUpdate, volumes def _delete_target_volume_v2( self, storageConfigService, targetVolumeInstance, extraSpecs): """Helper function to delete the clone target volume instance. :param storageConfigService: storage configuration service instance :param targetVolumeInstance: clone target volume instance :param extraSpecs: extra specifications """ deviceId = targetVolumeInstance['DeviceID'] volumeName = targetVolumeInstance['Name'] rc = self._delete_from_pool(storageConfigService, targetVolumeInstance, volumeName, deviceId, extraSpecs[FASTPOLICY], extraSpecs) return rc def _validate_pool(self, volume): """Get the pool from volume['host']. There may be backward compatibiliy concerns, so putting in a check to see if a version has been added to provider_location. If it has, we know we are at the current version, if not, we assume it was created pre 'Pool Aware Scheduler' feature. :param volume: the volume Object :returns: string -- pool :raises: VolumeBackendAPIException """ pool = None # Volume is None in CG ops. if volume is None: return pool # This check is for all operations except a create. # On a create provider_location is None try: if volume['provider_location']: version = self._get_version_from_provider_location( volume['provider_location']) if not version: return pool except KeyError: return pool try: pool = volume_utils.extract_host(volume['host'], 'pool') if pool: LOG.debug("Pool from volume['host'] is %(pool)s.", {'pool': pool}) else: exceptionMessage = (_( "Pool from volume['host'] %(host)s not found.") % {'host': volume['host']}) raise exception.VolumeBackendAPIException( data=exceptionMessage) except Exception as ex: exceptionMessage = (_( "Pool from volume['host'] failed with: %(ex)s.") % {'ex': ex}) raise exception.VolumeBackendAPIException( data=exceptionMessage) return pool def _get_version_from_provider_location(self, loc): """Get the version from the provider location. :param loc: the provider_location dict :returns: version or None """ version = None try: if isinstance(loc, six.string_types): name = ast.literal_eval(loc) version = name['version'] except KeyError: pass return version def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). Renames the existing volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: dict -- model_update :raises: VolumeBackendAPIException """ extraSpecs = self._initial_setup(volume) self.conn = self._get_ecom_connection() arrayName, deviceId = self.utils.get_array_and_device_id(volume, external_ref) # Manage existing volume is not supported if fast enabled. if extraSpecs[FASTPOLICY]: LOG.warning(_LW( "FAST is enabled. Policy: %(fastPolicyName)s."), {'fastPolicyName': extraSpecs[FASTPOLICY]}) exceptionMessage = (_( "Manage volume is not supported if FAST is enable. " "FAST policy: %(fastPolicyName)s.") % {'fastPolicyName': extraSpecs[FASTPOLICY]}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) # Check if the volume is attached by checking if in any masking view. volumeInstanceName = ( self.utils.find_volume_by_device_id_on_array(self.conn, arrayName, deviceId)) sgInstanceNames = ( self.utils.get_storage_groups_from_volume( self.conn, volumeInstanceName)) for sgInstanceName in sgInstanceNames: mvInstanceName = self.masking.get_masking_view_from_storage_group( self.conn, sgInstanceName) if mvInstanceName: exceptionMessage = (_( "Unable to import volume %(deviceId)s to cinder. " "Volume is in masking view %(mv)s.") % {'deviceId': deviceId, 'mv': mvInstanceName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) # Check if there is any associated snapshots with the volume. cinderPoolInstanceName, storageSystemName = ( self._get_pool_and_storage_system(extraSpecs)) repSessionInstanceName = ( self.utils.get_associated_replication_from_source_volume( self.conn, storageSystemName, deviceId)) if repSessionInstanceName: exceptionMessage = (_( "Unable to import volume %(deviceId)s to cinder. " "It is the source volume of replication session %(sync)s.") % {'deviceId': deviceId, 'sync': repSessionInstanceName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) # Make sure the existing external volume is in the same storage pool. volumePoolInstanceName = ( self.utils.get_assoc_pool_from_volume(self.conn, volumeInstanceName)) volumePoolName = volumePoolInstanceName['InstanceID'] cinderPoolName = cinderPoolInstanceName['InstanceID'] LOG.debug("Storage pool of existing volume: %(volPool)s, " "Storage pool currently managed by cinder: %(cinderPool)s.", {'volPool': volumePoolName, 'cinderPool': cinderPoolName}) if volumePoolName != cinderPoolName: exceptionMessage = (_( "Unable to import volume %(deviceId)s to cinder. The external " "volume is not in the pool managed by current cinder host.") % {'deviceId': deviceId}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) # Rename the volume volumeId = volume['name'] volumeElementName = self.utils.get_volume_element_name(volumeId) LOG.debug("Rename volume %(vol)s to %(elementName)s.", {'vol': volumeInstanceName, 'elementName': volumeElementName}) volumeInstance = self.utils.rename_volume(self.conn, volumeInstanceName, volumeElementName) keys = {} volpath = volumeInstance.path keys['CreationClassName'] = volpath['CreationClassName'] keys['SystemName'] = volpath['SystemName'] keys['DeviceID'] = volpath['DeviceID'] keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] model_update = {} provider_location = {} provider_location['classname'] = volpath['CreationClassName'] provider_location['keybindings'] = keys model_update.update({'display_name': volumeElementName}) volume['provider_location'] = six.text_type(provider_location) model_update.update({'provider_location': volume['provider_location']}) return model_update def manage_existing_get_size(self, volume, external_ref): """Return size of an existing VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ LOG.debug("Volume in manage_existing_get_size: %(volume)s.", {'volume': volume}) arrayName, deviceId = self.utils.get_array_and_device_id(volume, external_ref) volumeInstanceName = ( self.utils.find_volume_by_device_id_on_array(self.conn, arrayName, deviceId)) volumeInstance = self.conn.GetInstance(volumeInstanceName) byteSize = self.utils.get_volume_size(self.conn, volumeInstance) gbSize = int(byteSize) / units.Gi LOG.debug( "Size of volume %(deviceID)s is %(volumeSize)s GB.", {'deviceID': deviceId, 'volumeSize': gbSize}) return gbSize def unmanage(self, volume): """Export VMAX volume from Cinder. Leave the volume intact on the backend array. :param volume: the volume object :raises: VolumeBackendAPIException """ volumeName = volume['name'] volumeId = volume['id'] LOG.debug("Unmanage volume %(name)s, id=%(id)s", {'name': volumeName, 'id': volumeId}) self._initial_setup(volume) self.conn = self._get_ecom_connection() volumeInstance = self._find_lun(volume) if volumeInstance is None: exceptionMessage = (_("Cannot find Volume: %(id)s. " "unmanage operation. Exiting...") % {'id': volumeId}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) # Rename the volume to volumeId, thus remove the 'OS-' prefix. volumeInstance = self.utils.rename_volume(self.conn, volumeInstance, volumeId) def update_consistencygroup(self, group, add_volumes, remove_volumes): """Updates LUNs in consistency group. :param group: storage configuration service instance :param add_volumes: the volumes uuids you want to add to the CG :param remove_volumes: the volumes uuids you want to remove from the CG """ LOG.info(_LI("Update Consistency Group: %(group)s. " "This adds and/or removes volumes from a CG."), {'group': group['id']}) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} volumeTypeId = group['volume_type_id'].replace(",", "") cg_name = self.utils.truncate_string(group['id'], 8) extraSpecs = self._initial_setup(None, volumeTypeId) _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) add_vols = [vol for vol in add_volumes] if add_volumes else [] add_instance_names = self._get_volume_instance_names(add_vols) remove_vols = [vol for vol in remove_volumes] if remove_volumes else [] remove_instance_names = self._get_volume_instance_names(remove_vols) self.conn = self._get_ecom_connection() try: replicationService = self.utils.find_replication_service( self.conn, storageSystem) cgInstanceName = ( self._find_consistency_group(replicationService, cg_name)) if cgInstanceName is None: raise exception.ConsistencyGroupNotFound( consistencygroup_id=cg_name) # Add volume(s) to a consistency group if add_instance_names: self.provision.add_volume_to_cg( self.conn, replicationService, cgInstanceName, add_instance_names, cg_name, None, extraSpecs) # Remove volume(s) from a consistency group if remove_instance_names: self.provision.remove_volume_from_cg( self.conn, replicationService, cgInstanceName, remove_instance_names, cg_name, None, extraSpecs) except exception.ConsistencyGroupNotFound: raise except Exception as ex: LOG.error(_LE("Exception: %(ex)s"), {'ex': ex}) exceptionMessage = (_("Failed to update consistency group:" " %(cgName)s.") % {'cgName': cg_name}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return modelUpdate, None, None def _get_volume_instance_names(self, volumes): """Get volume instance names from volume. :param volumes: volume objects :returns: volume instance names """ volumeInstanceNames = [] for volume in volumes: volumeInstance = self._find_lun(volume) if volumeInstance is None: LOG.error(_LE("Volume %(name)s not found on the array."), {'name': volume['name']}) else: volumeInstanceNames.append(volumeInstance.path) return volumeInstanceNames def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): """Creates the consistency group from source. Currently the source can only be a cgsnapshot. :param context: the context :param group: the consistency group object to be created :param volumes: volumes in the consistency group :param cgsnapshot: the source consistency group snapshot :param snapshots: snapshots of the source volumes :param source_cg: the source consistency group :param source_vols: the source vols :returns: model_update, volumes_model_update model_update is a dictionary of cg status volumes_model_update is a list of dictionaries of volume update """ LOG.debug("Enter EMCVMAXCommon::create_consistencygroup_from_src. " "Group to be created: %(cgId)s, " "Source snapshot: %(cgSnapshot)s.", {'cgId': group['id'], 'cgSnapshot': cgsnapshot['consistencygroup_id']}) volumeTypeId = group['volume_type_id'].replace(",", "") extraSpecs = self._initial_setup(None, volumeTypeId) self.create_consistencygroup(context, group) targetCgName = self.utils.truncate_string(group['id'], TRUNCATE_8) if not snapshots: exceptionMessage = (_("No source snapshots provided to create " "consistency group %s.") % targetCgName) raise exception.VolumeBackendAPIException( data=exceptionMessage) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} _poolInstanceName, storageSystem = ( self._get_pool_and_storage_system(extraSpecs)) try: replicationService = self.utils.find_replication_service( self.conn, storageSystem) if replicationService is None: exceptionMessage = (_( "Cannot find replication service on system %s.") % storageSystem) raise exception.VolumeBackendAPIException( data=exceptionMessage) targetCgInstanceName = self._find_consistency_group( replicationService, targetCgName) LOG.debug("Create CG %(targetCg)s from snapshot.", {'targetCg': targetCgInstanceName}) for volume, snapshot in zip(volumes, snapshots): volumeSizeInbits = int(self.utils.convert_gb_to_bits( snapshot['volume_size'])) targetVolumeName = 'targetVol' volume = {'size': int(self.utils.convert_bits_to_gbs( volumeSizeInbits))} if extraSpecs[ISV3]: _rc, volumeDict, _storageSystemName = ( self._create_v3_volume( volume, targetVolumeName, volumeSizeInbits, extraSpecs)) else: _rc, volumeDict, _storageSystemName = ( self._create_composite_volume( volume, targetVolumeName, volumeSizeInbits, extraSpecs)) targetVolumeInstance = self.utils.find_volume_instance( self.conn, volumeDict, targetVolumeName) LOG.debug("Create target volume for member snapshot. " "Source snapshot: %(snapshot)s, " "Target volume: %(targetVol)s.", {'snapshot': snapshot['id'], 'targetVol': targetVolumeInstance.path}) self.provision.add_volume_to_cg(self.conn, replicationService, targetCgInstanceName, targetVolumeInstance.path, targetCgName, targetVolumeName, extraSpecs) sourceCgName = self.utils.truncate_string(cgsnapshot['id'], TRUNCATE_8) sourceCgInstanceName = self._find_consistency_group( replicationService, sourceCgName) if sourceCgInstanceName is None: exceptionMessage = (_("Cannot find source CG instance. " "consistencygroup_id: %s.") % cgsnapshot['consistencygroup_id']) raise exception.VolumeBackendAPIException( data=exceptionMessage) relationName = self.utils.truncate_string(group['id'], TRUNCATE_5) if extraSpecs[ISV3]: self.provisionv3.create_group_replica( self.conn, replicationService, sourceCgInstanceName, targetCgInstanceName, relationName, extraSpecs) else: self.provision.create_group_replica( self.conn, replicationService, sourceCgInstanceName, targetCgInstanceName, relationName, extraSpecs) # Break the replica group relationship. rgSyncInstanceName = self.utils.find_group_sync_rg_by_target( self.conn, storageSystem, targetCgInstanceName, extraSpecs, True) if rgSyncInstanceName is not None: if extraSpecs[ISV3]: # Operation 9: dissolve for snapVx operation = self.utils.get_num(9, '16') self.provisionv3.break_replication_relationship( self.conn, replicationService, rgSyncInstanceName, operation, extraSpecs) else: self.provision.delete_clone_relationship( self.conn, replicationService, rgSyncInstanceName, extraSpecs) except Exception: cgSnapshotId = cgsnapshot['consistencygroup_id'] exceptionMessage = (_("Failed to create CG %(cgName)s " "from snapshot %(cgSnapshot)s.") % {'cgName': targetCgName, 'cgSnapshot': cgSnapshotId}) LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) volumes_model_update = self.utils.get_volume_model_updates( context, volumes, group['id'], modelUpdate['status']) return modelUpdate, volumes_model_update def _find_ip_protocol_endpoints(self, conn, storageSystemName, portgroupname): """Find the IP protocol endpoint for ISCSI. :param storageSystemName: the system name :param portgroupname: the portgroup name :returns: foundIpAddresses """ LOG.debug("The portgroup name for iscsiadm is %(pg)s", {'pg': portgroupname}) foundipaddresses = [] configservice = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) portgroupinstancename = ( self.masking.find_port_group(conn, configservice, portgroupname)) iscsiendpointinstancenames = ( self.utils.get_iscsi_protocol_endpoints( conn, portgroupinstancename)) for iscsiendpointinstancename in iscsiendpointinstancenames: tcpendpointinstancenames = ( self.utils.get_tcp_protocol_endpoints( conn, iscsiendpointinstancename)) for tcpendpointinstancename in tcpendpointinstancenames: ipendpointinstancenames = ( self.utils.get_ip_protocol_endpoints( conn, tcpendpointinstancename)) for ipendpointinstancename in ipendpointinstancenames: ipaddress = ( self.utils.get_iscsi_ip_address( conn, ipendpointinstancename)) foundipaddresses.append(ipaddress) return foundipaddresses def _extend_v3_volume(self, volumeInstance, volumeName, newSize, extraSpecs): """Extends a VMAX3 volume. :param volumeInstance: volume instance :param volumeName: volume name :param newSize: new size the volume will be increased to :param extraSpecs: extra specifications :returns: int -- return code :returns: volumeDict """ new_size_in_bits = int(self.utils.convert_gb_to_bits(newSize)) storageConfigService = self.utils.find_storage_configuration_service( self.conn, volumeInstance['SystemName']) volumeDict, rc = self.provisionv3.extend_volume_in_SG( self.conn, storageConfigService, volumeInstance.path, volumeName, new_size_in_bits, extraSpecs) return rc, volumeDict def _create_duplicate_volume( self, sourceInstance, cloneName, extraSpecs): """Create a volume in the same dimensions of the source volume. :param sourceInstance: the source volume instance :param cloneName: the user supplied snap name :param extraSpecs: additional info :returns: targetInstance """ numOfBlocks = sourceInstance['NumberOfBlocks'] blockSize = sourceInstance['BlockSize'] volumeSizeInbits = numOfBlocks * blockSize volume = {'size': int(self.utils.convert_bits_to_gbs(volumeSizeInbits))} _rc, volumeDict, _storageSystemName = ( self._create_v3_volume( volume, cloneName, volumeSizeInbits, extraSpecs)) targetInstance = self.utils.find_volume_instance( self.conn, volumeDict, cloneName) LOG.debug("Create replica target volume " "Source Volume: %(sourceVol)s, " "Target Volume: %(targetVol)s.", {'sourceVol': sourceInstance.path, 'targetVol': targetInstance.path}) return targetInstance cinder-8.0.0/cinder/volume/drivers/emc/emc_cli_iscsi.py0000664000567000056710000002620712701406250024264 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """iSCSI Drivers for EMC VNX array based on CLI.""" from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.emc import emc_vnx_cli LOG = logging.getLogger(__name__) class EMCCLIISCSIDriver(driver.ISCSIDriver): """EMC ISCSI Drivers for VNX using CLI. Version history: 1.0.0 - Initial driver 2.0.0 - Thick/thin provisioning, robust enhancement 3.0.0 - Array-based Backend Support, FC Basic Support, Target Port Selection for MPIO, Initiator Auto Registration, Storage Group Auto Deletion, Multiple Authentication Type Support, Storage-Assisted Volume Migration, SP Toggle for HA 3.0.1 - Security File Support 4.0.0 - Advance LUN Features (Compression Support, Deduplication Support, FAST VP Support, FAST Cache Support), Storage-assisted Retype, External Volume Management, Read-only Volume, FC Auto Zoning 4.1.0 - Consistency group support 5.0.0 - Performance enhancement, LUN Number Threshold Support, Initiator Auto Deregistration, Force Deleting LUN in Storage Groups, robust enhancement 5.1.0 - iSCSI multipath enhancement 5.2.0 - Pool-aware scheduler support 5.3.0 - Consistency group modification support 6.0.0 - Over subscription support Create consistency group from cgsnapshot support Multiple pools support enhancement Manage/unmanage volume revise White list target ports support Snap copy support Support efficient non-disruptive backup 7.0.0 - Clone consistency group support Replication v2 support(managed) Configurable migration rate support """ def __init__(self, *args, **kwargs): super(EMCCLIISCSIDriver, self).__init__(*args, **kwargs) self.cli = emc_vnx_cli.getEMCVnxCli( 'iSCSI', configuration=self.configuration, active_backend_id=kwargs.get('active_backend_id')) self.VERSION = self.cli.VERSION def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a VNX volume.""" return self.cli.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" return self.cli.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" return self.cli.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" self.cli.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a VNX volume.""" self.cli.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): return self.cli.migrate_volume(ctxt, volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.cli.retype(ctxt, volume, new_type, diff, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.cli.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.cli.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. the format of the driver data is defined in vnx_get_iscsi_properties. Example return value (multipath is not enabled):: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'target_lun': 1, } } Example return value (multipath is enabled):: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', 'iqn.2010-10.org.openstack:volume-00002'], 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], 'target_luns': [1, 1], } } """ return self.cli.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" self.cli.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self.update_volume_stats() return self._stats def update_volume_stats(self): """Retrieve status info from volume group.""" LOG.debug("Updating volume status.") # retrieving the volume update from the VNX data = self.cli.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'EMCCLIISCSIDriver' data['storage_protocol'] = 'iSCSI' self._stats = data def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. manage_existing_ref:{ 'source-id': } or manage_existing_ref:{ 'source-name': } """ return self.cli.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.cli.manage_existing_get_size(volume, existing_ref) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" return self.cli.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return self.cli.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" return self.cli.create_cgsnapshot( context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" return self.cli.delete_cgsnapshot( context, cgsnapshot, snapshots) def get_pool(self, volume): """Returns the pool name of a volume.""" return self.cli.get_pool(volume) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Updates LUNs in consistency group.""" return self.cli.update_consistencygroup(context, group, add_volumes, remove_volumes) def unmanage(self, volume): """Unmanages a volume.""" self.cli.unmanage(volume) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from source.""" return self.cli.create_consistencygroup_from_src(context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) def update_migrated_volume(self, context, volume, new_volume, original_volume_status=None): """Returns model update for migrated volume.""" return self.cli.update_migrated_volume(context, volume, new_volume, original_volume_status) def create_export_snapshot(self, context, snapshot, connector): """Creates a snapshot mount point for snapshot.""" return self.cli.create_export_snapshot(context, snapshot, connector) def remove_export_snapshot(self, context, snapshot): """Removes snapshot mount point for snapshot.""" return self.cli.remove_export_snapshot(context, snapshot) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allows connection to snapshot.""" return self.cli.initialize_connection_snapshot(snapshot, connector, **kwargs) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallows connection to snapshot.""" return self.cli.terminate_connection_snapshot(snapshot, connector, **kwargs) def backup_use_temp_snapshot(self): return True def failover_host(self, context, volumes, secondary_backend_id): """Failovers volume from primary device to secondary.""" return self.cli.failover_host(context, volumes, secondary_backend_id) cinder-8.0.0/cinder/volume/drivers/emc/emc_vmax_fast.py0000664000567000056710000010754512701406250024320 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.emc import emc_vmax_provision from cinder.volume.drivers.emc import emc_vmax_utils LOG = logging.getLogger(__name__) DEFAULT_SG_PREFIX = 'OS_default_' DEFAULT_SG_POSTFIX = '_SG' class EMCVMAXFast(object): """FAST Class for SMI-S based EMC volume drivers. This FAST class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ def __init__(self, prtcl): self.protocol = prtcl self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl) def _check_if_fast_supported(self, conn, storageSystemInstanceName): """Check to see if fast is supported on the array. :param conn: the ecom connection :param storageSystemInstanceName: the storage system Instance name :returns: boolean -- isTieringPolicySupported """ tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) isTieringPolicySupported = self.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) if isTieringPolicySupported is None: LOG.error(_LE("Cannot determine whether " "Tiering Policy is supported on this array.")) if isTieringPolicySupported is False: LOG.error(_LE("Tiering Policy is not " "supported on this array.")) return isTieringPolicySupported def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName): """Checks to see if tiering policy is supported. We will only check if there is a fast policy specified in the config file. :param conn: the connection information to the ecom server :param tierPolicyServiceInstanceName: the tier policy service instance name :returns: boolean -- foundIsSupportsTieringPolicies """ foundIsSupportsTieringPolicies = None tierPolicyCapabilityInstanceNames = conn.AssociatorNames( tierPolicyServiceInstanceName, ResultClass='CIM_TierPolicyServiceCapabilities', AssocClass='CIM_ElementCapabilities') tierPolicyCapabilityInstanceName = tierPolicyCapabilityInstanceNames[0] tierPolicyCapabilityInstance = conn.GetInstance( tierPolicyCapabilityInstanceName, LocalOnly=False) propertiesList = (tierPolicyCapabilityInstance .properties.items()) for properties in propertiesList: if properties[0] == 'SupportsTieringPolicies': cimProperties = properties[1] foundIsSupportsTieringPolicies = cimProperties.value break if foundIsSupportsTieringPolicies is None: LOG.error(_LE("Cannot determine if Tiering Policies " "are supported.")) return foundIsSupportsTieringPolicies def get_and_verify_default_storage_group( self, conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName): """Retrieves and verifies the default storage group for a volume. Given the volumeInstanceName get any associated storage group and check that it is the default storage group. The default storage group should have been already created. If not found error is logged. :param conn: the connection to the ecom server :param controllerConfigService: the controller config service :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param fastPolicyName: the fast policy name (String) :returns: foundDefaultStorageGroupInstanceName, defaultSgName """ foundDefaultStorageGroupInstanceName = None storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) if not self._check_if_fast_supported(conn, storageSystemInstanceName): LOG.error(_LE( "FAST is not supported on this array.")) raise defaultSgName = self.format_default_sg_string(fastPolicyName) assocStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume(conn, volumeInstanceName, defaultSgName)) defaultStorageGroupInstanceName = ( self.utils.find_storage_masking_group(conn, controllerConfigService, defaultSgName)) if defaultStorageGroupInstanceName is None: LOG.error(_LE( "Unable to find default storage group " "for FAST policy : %(fastPolicyName)s."), {'fastPolicyName': fastPolicyName}) raise if assocStorageGroupInstanceName == defaultStorageGroupInstanceName: foundDefaultStorageGroupInstanceName = ( assocStorageGroupInstanceName) else: LOG.warning(_LW( "Volume: %(volumeName)s Does not belong " "to storage group %(defaultSgName)s."), {'volumeName': volumeName, 'defaultSgName': defaultSgName}) return foundDefaultStorageGroupInstanceName, defaultSgName def format_default_sg_string(self, fastPolicyName): """Format the default storage group name :param fastPolicyName: the fast policy name :returns: defaultSgName """ return ("%(prefix)s%(fastPolicyName)s%(postfix)s" % {'prefix': DEFAULT_SG_PREFIX, 'fastPolicyName': fastPolicyName, 'postfix': DEFAULT_SG_POSTFIX}) def add_volume_to_default_storage_group_for_fast_policy( self, conn, controllerConfigService, volumeInstance, volumeName, fastPolicyName, extraSpecs): """Add a volume to the default storage group for FAST policy. The storage group must pre-exist. Once added to the storage group, check the association to make sure it has been successfully added. :param conn: the ecom connection :param controllerConfigService: the controller configuration service :param volumeInstance: the volume instance :param volumeName: the volume name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: additional info :returns: assocStorageGroupInstanceName - the storage group associated with the volume """ failedRet = None defaultSgName = self.format_default_sg_string(fastPolicyName) storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigService, defaultSgName) if storageGroupInstanceName is None: LOG.error(_LE( "Unable to get default storage group %(defaultSgName)s."), {'defaultSgName': defaultSgName}) return failedRet self.provision.add_members_to_masking_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) # Check to see if the volume is in the storage group. assocStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume(conn, volumeInstance.path, defaultSgName)) return assocStorageGroupInstanceName def _create_default_storage_group(self, conn, controllerConfigService, fastPolicyName, storageGroupName, volumeInstance, extraSpecs): """Create a first volume for the storage group. This is necessary because you cannot remove a volume if it is the last in the group. Create the default storage group for the FAST policy Associate the storage group with the tier policy rule. :param conn: the connection information to the ecom server :param controllerConfigService: the controller configuration service :param fastPolicyName: the fast policy name (String) :param storageGroupName: the storage group name (String) :param volumeInstance: the volume instance :param extraSpecs: additional info :returns: defaultstorageGroupInstanceName - instance name of the default storage group """ failedRet = None firstVolumeInstance = self._create_volume_for_default_volume_group( conn, controllerConfigService, volumeInstance.path, extraSpecs) if firstVolumeInstance is None: LOG.error(_LE( "Failed to create a first volume for storage " "group : %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return failedRet defaultStorageGroupInstanceName = ( self.provision.create_and_get_storage_group( conn, controllerConfigService, storageGroupName, firstVolumeInstance.path, extraSpecs)) if defaultStorageGroupInstanceName is None: LOG.error(_LE( "Failed to create default storage group for " "FAST policy : %(fastPolicyName)s."), {'fastPolicyName': fastPolicyName}) return failedRet storageSystemInstanceName = ( self.utils.find_storage_system(conn, controllerConfigService)) tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) # Get the fast policy instance name. tierPolicyRuleInstanceName = self._get_service_level_tier_policy( conn, tierPolicyServiceInstanceName, fastPolicyName) if tierPolicyRuleInstanceName is None: LOG.error(_LE( "Unable to get policy rule for fast policy: " "%(fastPolicyName)s."), {'fastPolicyName': fastPolicyName}) return failedRet # Now associate it with a FAST policy. self.add_storage_group_to_tier_policy_rule( conn, tierPolicyServiceInstanceName, defaultStorageGroupInstanceName, tierPolicyRuleInstanceName, storageGroupName, fastPolicyName, extraSpecs) return defaultStorageGroupInstanceName def _create_volume_for_default_volume_group( self, conn, controllerConfigService, volumeInstanceName, extraSpecs): """Creates a volume for the default storage group for a fast policy. Creates a small first volume for the default storage group for a fast policy. This is necessary because you cannot remove the last volume from a storage group and this scenario is likely. :param conn: the connection information to the ecom server :param controllerConfigService: the controller configuration service :param volumeInstanceName: the volume instance name :param extraSpecs: additional info :returns: firstVolumeInstanceName - instance name of the first volume in the storage group """ failedRet = None storageSystemName = self.utils.find_storage_system_name_from_service( controllerConfigService) storageConfigurationInstanceName = ( self.utils.find_storage_configuration_service( conn, storageSystemName)) poolInstanceName = self.utils.get_assoc_pool_from_volume( conn, volumeInstanceName) if poolInstanceName is None: LOG.error(_LE("Unable to get associated pool of volume.")) return failedRet volumeName = 'vol1' volumeSize = '1' volumeDict, _rc = ( self.provision.create_volume_from_pool( conn, storageConfigurationInstanceName, volumeName, poolInstanceName, volumeSize, extraSpecs)) firstVolumeInstanceName = self.utils.find_volume_instance( conn, volumeDict, volumeName) return firstVolumeInstanceName def add_storage_group_to_tier_policy_rule( self, conn, tierPolicyServiceInstanceName, storageGroupInstanceName, tierPolicyRuleInstanceName, storageGroupName, fastPolicyName, extraSpecs): """Add the storage group to the tier policy rule. :param conn: the connection information to the ecom server :param tierPolicyServiceInstanceName: tier policy service :param storageGroupInstanceName: storage group instance name :param tierPolicyRuleInstanceName: tier policy instance name :param storageGroupName: the storage group name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: additional info :returns: int -- return code :raises: VolumeBackendAPIException """ # 5 is ("Add InElements to Policy"). modificationType = '5' rc, job = conn.InvokeMethod( 'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName, PolicyRule=tierPolicyRuleInstanceName, Operation=self.utils.get_num(modificationType, '16'), InElements=[storageGroupInstanceName]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error associating storage group : %(storageGroupName)s. " "To fast Policy: %(fastPolicyName)s with error " "description: %(errordesc)s.") % {'storageGroupName': storageGroupName, 'fastPolicyName': fastPolicyName, 'errordesc': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return rc def _get_service_level_tier_policy( self, conn, tierPolicyServiceInstanceName, fastPolicyName): """Returns the existing tier policies for a storage system instance. Given the storage system instance name, get the existing tier policies on that array. :param conn: the connection information to the ecom server :param tierPolicyServiceInstanceName: the policy service :param fastPolicyName: the fast policy name e.g BRONZE1 :returns: foundTierPolicyRuleInstanceName - the short name, everything after the : """ foundTierPolicyRuleInstanceName = None tierPolicyRuleInstanceNames = self._get_existing_tier_policies( conn, tierPolicyServiceInstanceName) for tierPolicyRuleInstanceName in tierPolicyRuleInstanceNames: policyRuleName = tierPolicyRuleInstanceName['PolicyRuleName'] if fastPolicyName == policyRuleName: foundTierPolicyRuleInstanceName = tierPolicyRuleInstanceName break return foundTierPolicyRuleInstanceName def _get_existing_tier_policies(self, conn, tierPolicyServiceInstanceName): """Given the tier policy service, get the existing tier policies. :param conn: the connection information to the ecom server :param tierPolicyServiceInstanceName: the tier policy service instance Name :returns: list -- the tier policy rule instance names """ tierPolicyRuleInstanceNames = conn.AssociatorNames( tierPolicyServiceInstanceName, ResultClass='Symm_TierPolicyRule') return tierPolicyRuleInstanceNames def get_associated_tier_policy_from_storage_group( self, conn, storageGroupInstanceName): """Given the tier policy instance name get the storage groups. :param conn: the connection information to the ecom server :param storageGroupInstanceName: the storage group instance name :returns: list -- the list of tier policy instance names """ tierPolicyInstanceName = None tierPolicyInstanceNames = conn.AssociatorNames( storageGroupInstanceName, AssocClass='CIM_TierPolicySetAppliesToElement', ResultClass='CIM_TierPolicyRule') if (len(tierPolicyInstanceNames) > 0 and len(tierPolicyInstanceNames) < 2): tierPolicyInstanceName = tierPolicyInstanceNames[0] return tierPolicyInstanceName def get_associated_tier_from_tier_policy( self, conn, tierPolicyRuleInstanceName): """Given the tierPolicyInstanceName get the associated tiers. :param conn: the connection information to the ecom server :param tierPolicyRuleInstanceName: the tier policy rule instance name :returns: list -- a list of storage tier instance names """ storageTierInstanceNames = conn.AssociatorNames( tierPolicyRuleInstanceName, AssocClass='CIM_AssociatedTierPolicy') if len(storageTierInstanceNames) == 0: storageTierInstanceNames = None LOG.warning(_LW( "Unable to get storage tiers from tier policy rule.")) return storageTierInstanceNames def get_policy_default_storage_group( self, conn, controllerConfigService, policyName): """Returns the default storage group for a tier policy. Given the tier policy instance name get the associated default storage group. :param conn: the connection information to the ecom server :param controllerConfigService: ControllerConfigurationService instance name :param policyName: string value :returns: storageGroupInstanceName - instance name of the default storage group """ foundStorageMaskingGroupInstanceName = None storageMaskingGroupInstances = conn.Associators( controllerConfigService, ResultClass='CIM_DeviceMaskingGroup') for storageMaskingGroupInstance in storageMaskingGroupInstances: if ('_default_' in storageMaskingGroupInstance['ElementName'] and policyName in storageMaskingGroupInstance['ElementName']): # Check that it has not been recently deleted. instance = self.utils.get_existing_instance( conn, storageMaskingGroupInstance.path) if instance is None: # Storage Group doesn't exist any more. foundStorageMaskingGroupInstanceName = None else: foundStorageMaskingGroupInstanceName = ( storageMaskingGroupInstance.path) return foundStorageMaskingGroupInstanceName def _get_associated_storage_groups_from_tier_policy( self, conn, tierPolicyInstanceName): """Given the tier policy instance name get the storage groups. :param conn: the connection information to the ecom server :param tierPolicyInstanceName: tier policy instance name :returns: list -- the list of storage instance names """ managedElementInstanceNames = conn.AssociatorNames( tierPolicyInstanceName, AssocClass='CIM_TierPolicySetAppliesToElement', ResultClass='CIM_DeviceMaskingGroup') return managedElementInstanceNames def get_associated_pools_from_tier( self, conn, storageTierInstanceName): """Given the storage tier instance name get the storage pools. :param conn: the connection information to the ecom server :param storageTierInstanceName: the storage tier instance name :returns: list -- a list of storage tier instance names """ storagePoolInstanceNames = conn.AssociatorNames( storageTierInstanceName, AssocClass='CIM_MemberOfCollection', ResultClass='CIM_StoragePool') return storagePoolInstanceNames def add_storage_group_and_verify_tier_policy_assoc( self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, fastPolicyName, extraSpecs): """Adds a storage group to a tier policy and verifies success. Add a storage group to a tier policy rule and verify that it was successful by getting the association. :param conn: the connection to the ecom server :param controllerConfigService: the controller config service :param storageGroupInstanceName: the storage group instance name :param storageGroupName: the storage group name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: additional info :returns: assocTierPolicyInstanceName """ failedRet = None assocTierPolicyInstanceName = None storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) # Get the fast policy instance name. tierPolicyRuleInstanceName = self._get_service_level_tier_policy( conn, tierPolicyServiceInstanceName, fastPolicyName) if tierPolicyRuleInstanceName is None: LOG.error(_LE( "Cannot find the fast policy %(fastPolicyName)s."), {'fastPolicyName': fastPolicyName}) return failedRet else: LOG.debug( "Adding storage group %(storageGroupInstanceName)s to " "tier policy rule %(tierPolicyRuleInstanceName)s.", {'storageGroupInstanceName': storageGroupInstanceName, 'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName}) # Associate the new storage group with the existing fast policy. try: self.add_storage_group_to_tier_policy_rule( conn, tierPolicyServiceInstanceName, storageGroupInstanceName, tierPolicyRuleInstanceName, storageGroupName, fastPolicyName, extraSpecs) except Exception: LOG.exception(_LE( "Failed to add storage group %(storageGroupInstanceName)s " "to tier policy rule %(tierPolicyRuleInstanceName)s."), {'storageGroupInstanceName': storageGroupInstanceName, 'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName}) return failedRet # Check that the storage group has been associated with with the # tier policy rule. assocTierPolicyInstanceName = ( self.get_associated_tier_policy_from_storage_group( conn, storageGroupInstanceName)) LOG.debug( "AssocTierPolicyInstanceName is " "%(assocTierPolicyInstanceName)s.", {'assocTierPolicyInstanceName': assocTierPolicyInstanceName}) return assocTierPolicyInstanceName def get_associated_policy_from_storage_group( self, conn, storageGroupInstanceName): """Get the tier policy instance name for a storage group instance name. :param conn: the connection information to the ecom server :param storageGroupInstanceName: storage group instance name :returns: foundTierPolicyInstanceName - instance name of the tier policy object """ foundTierPolicyInstanceName = None tierPolicyInstanceNames = conn.AssociatorNames( storageGroupInstanceName, ResultClass='Symm_TierPolicyRule', AssocClass='Symm_TierPolicySetAppliesToElement') if len(tierPolicyInstanceNames) > 0: foundTierPolicyInstanceName = tierPolicyInstanceNames[0] return foundTierPolicyInstanceName def delete_storage_group_from_tier_policy_rule( self, conn, tierPolicyServiceInstanceName, storageGroupInstanceName, tierPolicyRuleInstanceName, extraSpecs): """Disassociate the storage group from its tier policy rule. :param conn: connection the ecom server :param tierPolicyServiceInstanceName: instance name of the tier policy service :param storageGroupInstanceName: instance name of the storage group :param tierPolicyRuleInstanceName: instance name of the tier policy associated with the storage group :param extraSpecs: additional information """ modificationType = '6' LOG.debug("Invoking ModifyStorageTierPolicyRule %s.", tierPolicyRuleInstanceName) try: rc, job = conn.InvokeMethod( 'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName, PolicyRule=tierPolicyRuleInstanceName, Operation=self.utils.get_num(modificationType, '16'), InElements=[storageGroupInstanceName]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: LOG.error(_LE("Error disassociating storage group from " "policy: %s."), errordesc) else: LOG.debug("Disassociated storage group from policy.") else: LOG.debug("ModifyStorageTierPolicyRule completed.") except Exception as e: LOG.info(_LI("Storage group not associated with the " "policy. Exception is %s."), e) def get_pool_associated_to_policy( self, conn, fastPolicyName, arraySN, storageConfigService, poolInstanceName): """Given a FAST policy check that the pool is linked to the policy. If it's associated return the pool instance, if not return None. First check if FAST is enabled on the array. :param conn: the ecom connection :param fastPolicyName: the fast policy name (String) :param arraySN: the array serial number (String) :param storageConfigService: the storage Config Service :param poolInstanceName: the pool instance we want to check for association with the fast storage tier :returns: foundPoolInstanceName """ storageSystemInstanceName = self.utils.find_storage_system( conn, storageConfigService) if not self._check_if_fast_supported(conn, storageSystemInstanceName): errorMessage = (_( "FAST is not supported on this array.")) LOG.error(errorMessage) exception.VolumeBackendAPIException(data=errorMessage) tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) tierPolicyRuleInstanceName = self._get_service_level_tier_policy( conn, tierPolicyServiceInstanceName, fastPolicyName) # Get the associated storage tiers from the tier policy rule. storageTierInstanceNames = self.get_associated_tier_from_tier_policy( conn, tierPolicyRuleInstanceName) # For each gold storage tier get the associated pools. foundPoolInstanceName = None for storageTierInstanceName in storageTierInstanceNames: assocStoragePoolInstanceNames = ( self.get_associated_pools_from_tier(conn, storageTierInstanceName)) for assocStoragePoolInstanceName in assocStoragePoolInstanceNames: if poolInstanceName == assocStoragePoolInstanceName: foundPoolInstanceName = poolInstanceName break if foundPoolInstanceName is not None: break return foundPoolInstanceName def is_tiering_policy_enabled_on_storage_system( self, conn, storageSystemInstanceName): """Checks if tiering policy in enabled on a storage system. True if FAST policy enabled on the given storage system; False otherwise. :param conn: the ecom connection :param storageSystemInstanceName: a storage system instance name :returns: boolean -- isTieringPolicySupported """ try: tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) isTieringPolicySupported = self.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) except Exception as e: LOG.error(_LE("Exception: %s."), e) return False return isTieringPolicySupported def get_tier_policy_by_name( self, conn, arrayName, policyName): """Given the name of the policy, get the TierPolicyRule instance name. :param conn: the ecom connection :param arrayName: the array :param policyName: string -- the name of policy rule :returns: tier policy instance name. None if not found """ tierPolicyInstanceNames = conn.EnumerateInstanceNames( 'Symm_TierPolicyRule') for policy in tierPolicyInstanceNames: if (policyName == policy['PolicyRuleName'] and arrayName in policy['SystemName']): return policy return None def get_capacities_associated_to_policy(self, conn, arrayName, policyName): """Gets the total and un-used capacities for all pools in a policy. Given the name of the policy, get the total capacity and un-used capacity in GB of all the storage pools associated with the policy. :param conn: the ecom connection :param arrayName: the array :param policyName: the name of policy rule, a string value :returns: int -- total capacity in GB of all pools associated with the policy :returns: int -- (total capacity-EMCSubscribedCapacity) in GB of all pools associated with the policy """ policyInstanceName = self.get_tier_policy_by_name( conn, arrayName, policyName) total_capacity_gb = 0 allocated_capacity_gb = 0 tierInstanceNames = self.get_associated_tier_from_tier_policy( conn, policyInstanceName) for tierInstanceName in tierInstanceNames: # Check that tier hasn't suddenly been deleted. instance = self.utils.get_existing_instance(conn, tierInstanceName) if instance is None: # Tier doesn't exist any more. break poolInstanceNames = self.get_associated_pools_from_tier( conn, tierInstanceName) for poolInstanceName in poolInstanceNames: # Check that pool hasn't suddenly been deleted. storagePoolInstance = self.utils.get_existing_instance( conn, poolInstanceName) if storagePoolInstance is None: # Pool doesn't exist any more. break total_capacity_gb += self.utils.convert_bits_to_gbs( storagePoolInstance['TotalManagedSpace']) allocated_capacity_gb += self.utils.convert_bits_to_gbs( storagePoolInstance['EMCSubscribedCapacity']) LOG.debug( "PolicyName:%(policyName)s, pool: %(poolInstanceName)s, " "allocated_capacity_gb = %(allocated_capacity_gb)lu.", {'policyName': policyName, 'poolInstanceName': poolInstanceName, 'allocated_capacity_gb': allocated_capacity_gb}) free_capacity_gb = total_capacity_gb - allocated_capacity_gb return (total_capacity_gb, free_capacity_gb) def get_or_create_default_storage_group( self, conn, controllerConfigService, fastPolicyName, volumeInstance, extraSpecs): """Create or get a default storage group for FAST policy. :param conn: the ecom connection :param controllerConfigService: the controller configuration service :param fastPolicyName: the fast policy name (String) :param volumeInstance: the volume instance :param extraSpecs: additional info :returns: defaultStorageGroupInstanceName - the default storage group instance name """ defaultSgName = self.format_default_sg_string(fastPolicyName) defaultStorageGroupInstanceName = ( self.utils.find_storage_masking_group(conn, controllerConfigService, defaultSgName)) if defaultStorageGroupInstanceName is None: # Create it and associate it with the FAST policy in question. defaultStorageGroupInstanceName = ( self._create_default_storage_group(conn, controllerConfigService, fastPolicyName, defaultSgName, volumeInstance, extraSpecs)) return defaultStorageGroupInstanceName def _get_associated_tier_policy_from_pool(self, conn, poolInstanceName): """Given the pool instance name get the associated FAST tier policy. :param conn: the connection information to the ecom server :param poolInstanceName: the pool instance name :returns: the FAST Policy name (if it exists) """ fastPolicyName = None storageTierInstanceNames = conn.AssociatorNames( poolInstanceName, AssocClass='CIM_MemberOfCollection', ResultClass='CIM_StorageTier') if len(storageTierInstanceNames) > 0: tierPolicyInstanceNames = conn.AssociatorNames( storageTierInstanceNames[0], AssocClass='CIM_AssociatedTierPolicy') if len(tierPolicyInstanceNames) > 0: tierPolicyInstanceName = tierPolicyInstanceNames[0] fastPolicyName = tierPolicyInstanceName['PolicyRuleName'] return fastPolicyName def is_volume_in_default_SG(self, conn, volumeInstanceName): """Check if the volume is already part of the default storage group. :param conn: the ecom connection :param volumeInstanceName: the volume instance :returns: boolean -- True if the volume is already in default storage group. False otherwise """ sgInstanceNames = conn.AssociatorNames( volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup') if len(sgInstanceNames) == 0: LOG.debug("volume %(vol)s is not in default sg.", {'vol': volumeInstanceName}) return False else: for sgInstance in sgInstanceNames: if DEFAULT_SG_PREFIX in sgInstance['InstanceID']: LOG.debug("volume %(vol)s already in default sg.", {'vol': volumeInstanceName}) return True return False cinder-8.0.0/cinder/volume/drivers/lenovo/0000775000567000056710000000000012701406543021661 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/lenovo/lenovo_common.py0000664000567000056710000000517412701406250025107 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume.drivers.dothill import dothill_common from cinder.volume.drivers.lenovo import lenovo_client common_opts = [ cfg.StrOpt('lenovo_backend_name', default='A', help="Pool or Vdisk name to use for volume creation."), cfg.StrOpt('lenovo_backend_type', choices=['linear', 'virtual'], default='virtual', help="linear (for VDisk) or virtual (for Pool)."), cfg.StrOpt('lenovo_api_protocol', choices=['http', 'https'], default='https', help="Lenovo api interface protocol."), cfg.BoolOpt('lenovo_verify_certificate', default=False, help="Whether to verify Lenovo array SSL certificate."), cfg.StrOpt('lenovo_verify_certificate_path', help="Lenovo array SSL certificate path.") ] iscsi_opts = [ cfg.ListOpt('lenovo_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts) CONF.register_opts(iscsi_opts) class LenovoCommon(dothill_common.DotHillCommon): VERSION = "1.0" def __init__(self, config): self.config = config self.vendor_name = "Lenovo" self.backend_name = self.config.lenovo_backend_name self.backend_type = self.config.lenovo_backend_type self.api_protocol = self.config.lenovo_api_protocol ssl_verify = False if (self.api_protocol == 'https' and self.config.lenovo_verify_certificate): ssl_verify = self.config.lenovo_verify_certificate_path or True self.client = lenovo_client.LenovoClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) cinder-8.0.0/cinder/volume/drivers/lenovo/__init__.py0000664000567000056710000000000012701406250023753 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/lenovo/lenovo_client.py0000664000567000056710000000167612701406250025100 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_client class LenovoClient(dothill_client.DotHillClient): def __init__(self, host, login, password, protocol, ssl_verify): super(LenovoClient, self).__init__(host, login, password, protocol, ssl_verify) cinder-8.0.0/cinder/volume/drivers/lenovo/lenovo_iscsi.py0000664000567000056710000000257212701406250024730 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_iscsi from cinder.volume.drivers.lenovo import lenovo_common class LenovoISCSIDriver(dothill_iscsi.DotHillISCSIDriver): """OpenStack iSCSI cinder drivers for Lenovo Storage arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(LenovoISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) self.configuration.append_config_values(lenovo_common.iscsi_opts) self.iscsi_ips = self.configuration.lenovo_iscsi_ips def _init_common(self): return lenovo_common.LenovoCommon(self.configuration) cinder-8.0.0/cinder/volume/drivers/lenovo/lenovo_fc.py0000664000567000056710000000235412701406250024204 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.volume.drivers.dothill import dothill_fc from cinder.volume.drivers.lenovo import lenovo_common class LenovoFCDriver(dothill_fc.DotHillFCDriver): """OpenStack Fibre Channel cinder drivers for Lenovo Storage arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(LenovoFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) def _init_common(self): return lenovo_common.LenovoCommon(self.configuration) cinder-8.0.0/cinder/volume/drivers/lvm.py0000664000567000056710000010106312701406250021523 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for Linux servers running LVM. """ import math import os import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import units import six from cinder.brick.local_dev import lvm as lvm from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import objects from cinder import utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) # FIXME(jdg): We'll put the lvm_ prefix back on these when we # move over to using this as the real LVM driver, for now we'll # rename them so that the config generation utility doesn't barf # on duplicate entries. volume_opts = [ cfg.StrOpt('volume_group', default='cinder-volumes', help='Name for the VG that will contain exported volumes'), cfg.IntOpt('lvm_mirrors', default=0, help='If >0, create LVs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space'), cfg.StrOpt('lvm_type', default='default', choices=['default', 'thin', 'auto'], help='Type of LVM volumes to deploy; (default, thin, or auto). ' 'Auto defaults to thin if thin is supported.'), cfg.StrOpt('lvm_conf_file', default='/etc/cinder/lvm.conf', help='LVM conf file to use for the LVM driver in Cinder; ' 'this setting is ignored if the specified file does ' 'not exist (You can also specify \'None\' to not use ' 'a conf file even if one exists).'), cfg.FloatOpt('lvm_max_over_subscription_ratio', # This option exists to provide a default value for the # LVM driver which is different than the global default. default=1.0, help='max_over_subscription_ratio setting for the LVM ' 'driver. If set, this takes precedence over the ' 'general max_over_subscription_ratio option. If ' 'None, the general option is used.') ] CONF = cfg.CONF CONF.register_opts(volume_opts) class LVMVolumeDriver(driver.VolumeDriver): """Executes commands relating to Volumes.""" VERSION = '3.0.0' def __init__(self, vg_obj=None, *args, **kwargs): # Parent sets db, host, _execute and base config super(LVMVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.hostname = socket.gethostname() self.vg = vg_obj self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'LVM' # Target Driver is what handles data-transport # Transport specific code should NOT be in # the driver (control path), this way # different target drivers can be added (iscsi, FC etc) target_driver = \ self.target_mapping[self.configuration.safe_get('iscsi_helper')] LOG.debug('Attempting to initialize LVM driver with the ' 'following target_driver: %s', target_driver) self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, db=self.db, executor=self._execute) self.protocol = self.target_driver.protocol self._sparse_copy_volume = False if self.configuration.lvm_max_over_subscription_ratio is not None: self.configuration.max_over_subscription_ratio = \ self.configuration.lvm_max_over_subscription_ratio def _sizestr(self, size_in_g): return '%sg' % size_in_g def _volume_not_present(self, volume_name): return self.vg.get_volume(volume_name) is None def _delete_volume(self, volume, is_snapshot=False): """Deletes a logical volume.""" if self.configuration.volume_clear != 'none' and \ self.configuration.lvm_type != 'thin': self._clear_volume(volume, is_snapshot) name = volume['name'] if is_snapshot: name = self._escape_snapshot(volume['name']) self.vg.delete(name) def _clear_volume(self, volume, is_snapshot=False): # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority if is_snapshot: # if the volume to be cleared is a snapshot of another volume # we need to clear out the volume using the -cow instead of the # directly volume path. We need to skip this if we are using # thin provisioned LVs. # bug# lp1191812 dev_path = self.local_path(volume) + "-cow" else: dev_path = self.local_path(volume) # TODO(jdg): Maybe we could optimize this for snaps by looking at # the cow table and only overwriting what's necessary? # for now we're still skipping on snaps due to hang issue if not os.path.exists(dev_path): msg = (_('Volume device file path %s does not exist.') % dev_path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) size_in_g = (volume.get('volume_size') if is_snapshot else volume.get('size')) if size_in_g is None: msg = (_("Size for volume: %s not found, cannot secure delete.") % volume['id']) LOG.error(msg) raise exception.InvalidParameterValue(msg) # clear_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in vol_sz_in_meg = size_in_g * units.Ki volutils.clear_volume( vol_sz_in_meg, dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) def _escape_snapshot(self, snapshot_name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not snapshot_name.startswith('snapshot'): return snapshot_name return '_' + snapshot_name def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): vg_ref = self.vg if vg is not None: vg_ref = vg vg_ref.create_volume(name, size, lvm_type, mirror_count) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") if self.vg is None: LOG.warning(_LW('Unable to update stats on non-initialized ' 'Volume Group: %s'), self.configuration.volume_group) return self.vg.update_volume_group_info() data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["volume_backend_name"] = self.backend_name data["vendor_name"] = 'Open Source' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol data["pools"] = [] total_capacity = 0 free_capacity = 0 if self.configuration.lvm_mirrors > 0: total_capacity =\ self.vg.vg_mirror_size(self.configuration.lvm_mirrors) free_capacity =\ self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) provisioned_capacity = round( float(total_capacity) - float(free_capacity), 2) elif self.configuration.lvm_type == 'thin': total_capacity = self.vg.vg_thin_pool_size free_capacity = self.vg.vg_thin_pool_free_space provisioned_capacity = self.vg.vg_provisioned_capacity else: total_capacity = self.vg.vg_size free_capacity = self.vg.vg_free_space provisioned_capacity = round( float(total_capacity) - float(free_capacity), 2) location_info = \ ('LVMVolumeDriver:%(hostname)s:%(vg)s' ':%(lvm_type)s:%(lvm_mirrors)s' % {'hostname': self.hostname, 'vg': self.configuration.volume_group, 'lvm_type': self.configuration.lvm_type, 'lvm_mirrors': self.configuration.lvm_mirrors}) thin_enabled = self.configuration.lvm_type == 'thin' # Calculate the total volumes used by the VG group. # This includes volumes and snapshots. total_volumes = len(self.vg.get_volumes()) # Skip enabled_pools setting, treat the whole backend as one pool # XXX FIXME if multipool support is added to LVM driver. single_pool = {} single_pool.update(dict( pool_name=data["volume_backend_name"], total_capacity_gb=total_capacity, free_capacity_gb=free_capacity, reserved_percentage=self.configuration.reserved_percentage, location_info=location_info, QoS_support=False, provisioned_capacity_gb=provisioned_capacity, max_over_subscription_ratio=( self.configuration.max_over_subscription_ratio), thin_provisioning_support=thin_enabled, thick_provisioning_support=not thin_enabled, total_volumes=total_volumes, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function(), multiattach=True )) data["pools"].append(single_pool) # Check availability of sparse volume copy. data['sparse_copy_volume'] = self._sparse_copy_volume self._stats = data def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None try: self.vg = lvm.LVM(self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) except exception.VolumeGroupNotFound: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ next(vg for vg in vg_list if vg['name'] == self.vg.vg_name) if vg_dict is None: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.configuration.lvm_type == 'auto': # Default to thin provisioning if it is supported and # the volume group is empty, or contains a thin pool # for us to use. self.vg.update_volume_group_info() self.configuration.lvm_type = 'default' if volutils.supports_thin_provisioning(): if self.vg.get_volume(pool_name) is not None: LOG.info(_LI('Enabling LVM thin provisioning by default ' 'because a thin pool exists.')) self.configuration.lvm_type = 'thin' elif len(self.vg.get_volumes()) == 0: LOG.info(_LI('Enabling LVM thin provisioning by default ' 'because no LVs exist.')) self.configuration.lvm_type = 'thin' if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create thin pool, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException( data=exception_message) # Enable sparse copy since lvm_type is 'thin' self._sparse_copy_volume = True def create_volume(self, volume): """Creates a logical volume.""" mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from LVM for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ name_id = None provider_location = None if original_volume_status == 'available': current_name = CONF.volume_name_template % new_volume['id'] original_volume_name = CONF.volume_name_template % volume['id'] try: self.vg.rename_volume(current_name, original_volume_name) except processutils.ProcessExecutionError: LOG.error(_LE('Unable to rename the logical volume ' 'for volume: %s'), volume['id']) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] else: # The back-end will not be renamed. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, self.configuration.lvm_mirrors) # Some configurations of LVM do not automatically activate # ThinLVM snapshot LVs. self.vg.activate_lv(snapshot['name'], is_snapshot=True) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in volutils.copy_volume(self.local_path(snapshot), self.local_path(volume), snapshot['volume_size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) def delete_volume(self, volume): """Deletes a logical volume.""" # NOTE(jdg): We don't need to explicitly call # remove export here because we already did it # in the manager before we got here. if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True if self.vg.lv_has_snapshot(volume['name']): LOG.error(_LE('Unable to delete due to existing snapshot ' 'for volume: %s'), volume['name']) raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume) LOG.info(_LI('Successfully deleted volume: %s'), volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), snapshot['volume_name'], self.configuration.lvm_type) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if self._volume_not_present(self._escape_snapshot(snapshot['name'])): # If the snapshot isn't present, then don't attempt to delete LOG.warning(_LW("snapshot: %s not found, " "skipping delete operations"), snapshot['name']) LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) return True # TODO(yamahata): zeroing out the whole snapshot triggers COW. # it's quite slow. self._delete_volume(snapshot, is_snapshot=True) def local_path(self, volume, vg=None): if vg is None: vg = self.configuration.volume_group # NOTE(vish): stops deprecation warning escaped_group = vg.replace('-', '--') escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size']) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" if self.configuration.lvm_type == 'thin': self.vg.create_lv_snapshot(volume['name'], src_vref['name'], self.configuration.lvm_type) if volume['size'] > src_vref['size']: LOG.debug("Resize the new volume to %s.", volume['size']) self.extend_volume(volume, volume['size']) self.vg.activate_lv(volume['name'], is_snapshot=True, permanent=True) return mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) volume_name = src_vref['name'] temp_id = 'tmp-snap-%s' % volume['id'] temp_snapshot = {'volume_name': volume_name, 'size': src_vref['size'], 'volume_size': src_vref['size'], 'name': 'clone-snap-%s' % volume['id'], 'id': temp_id} self.create_snapshot(temp_snapshot) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in try: self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) volutils.copy_volume( self.local_path(temp_snapshot), self.local_path(volume), src_vref['size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) finally: self.delete_snapshot(temp_snapshot) def clone_image(self, context, volume, image_location, image_meta, image_service): return None, False def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) temp_snapshot = None # NOTE(xyang): If it is to backup from snapshot, back it up # directly. No need to clean it up. if snapshot: volume_path = self.local_path(snapshot) else: # NOTE(xyang): If it is not to backup from snapshot, check volume # status. If the volume status is 'in-use', create a temp snapshot # from the source volume, backup the temp snapshot, and then clean # up the temp snapshot; if the volume status is 'available', just # backup the volume. previous_status = volume.get('previous_status', None) if previous_status == "in-use": temp_snapshot = self._create_temp_snapshot(context, volume) backup.temp_snapshot_id = temp_snapshot.id backup.save() volume_path = self.local_path(temp_snapshot) else: volume_path = self.local_path(volume) try: with utils.temporary_chown(volume_path): with open(volume_path) as volume_file: backup_service.backup(backup, volume_file) finally: if temp_snapshot: self._delete_temp_snapshot(context, temp_snapshot) backup.temp_snapshot_id = None backup.save() def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" volume_path = self.local_path(volume) with utils.temporary_chown(volume_path): with open(volume_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" self.vg.extend_volume(volume['name'], self._sizestr(new_size)) def manage_existing(self, volume, existing_ref): """Manages an existing LV. Renames the LV to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. """ lv_name = existing_ref['source-name'] self.vg.get_volume(lv_name) if volutils.check_already_managed_volume(self.db, lv_name): raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name) # Attempt to rename the LV to match the OpenStack internal name. try: self.vg.rename_volume(lv_name, volume['name']) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to rename logical volume %(name)s, " "error message was: %(err_msg)s") % {'name': lv_name, 'err_msg': exc.stderr}) raise exception.VolumeBackendAPIException( data=exception_message) def manage_existing_object_get_size(self, existing_object, existing_ref, object_type): """Return size of an existing LV for manage existing volume/snapshot. existing_ref is a dictionary of the form: {'source-name': } """ # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lv_name = existing_ref['source-name'] lv = self.vg.get_volume(lv_name) # Raise an exception if we didn't find a suitable LV. if not lv: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical volume does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. try: lv_size = int(math.ceil(float(lv['size']))) except ValueError: exception_message = (_("Failed to manage existing %(type)s " "%(name)s, because reported size %(size)s " "was not a floating-point number.") % {'type': object_type, 'name': lv_name, 'size': lv['size']}) raise exception.VolumeBackendAPIException( data=exception_message) return lv_size def manage_existing_get_size(self, volume, existing_ref): return self.manage_existing_object_get_size(volume, existing_ref, "volume") def manage_existing_snapshot_get_size(self, snapshot, existing_ref): if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} return self.manage_existing_object_get_size(snapshot, existing_ref, "snapshot") def manage_existing_snapshot(self, snapshot, existing_ref): dest_name = self._escape_snapshot(snapshot['name']) snapshot_temp = {"name": dest_name} if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} return self.manage_existing(snapshot_temp, existing_ref) def retype(self, context, volume, new_type, diff, host): """Retypes a volume, allow QoS and extra_specs change.""" LOG.debug('LVM retype called for volume %s. No action ' 'required for LVM volumes.', volume['id']) return True def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() try: next(vg for vg in vg_list if vg['name'] == dest_vg) except StopIteration: LOG.error(_LE("Destination Volume Group %s does not exist"), dest_vg) return false_ret helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in size_in_mb = int(volume['size']) * units.Ki try: volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), size_in_mb, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Volume migration failed due to " "exception: %(reason)s."), {'reason': six.text_type(e)}, resource=volume) dest_vg_ref.delete(volume) self._delete_volume(volume) return (True, None) else: message = (_("Refusing to migrate volume ID: %(id)s. Please " "check your configuration because source and " "destination are the same Volume Group: %(name)s.") % {'id': volume['id'], 'name': self.vg.vg_name}) LOG.error(message) raise exception.VolumeBackendAPIException(data=message) def get_pool(self, volume): return self.backend_name # ####### Interface methods for DataPath (Target Driver) ######## def ensure_export(self, context, volume): volume_path = "/dev/%s/%s" % (self.configuration.volume_group, volume['name']) model_update = \ self.target_driver.ensure_export(context, volume, volume_path) return model_update def create_export(self, context, volume, connector, vg=None): if vg is None: vg = self.configuration.volume_group volume_path = "/dev/%s/%s" % (vg, volume['name']) export_info = self.target_driver.create_export( context, volume, volume_path) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): return self.target_driver.terminate_connection(volume, connector, **kwargs) cinder-8.0.0/cinder/volume/drivers/quobyte.py0000664000567000056710000004511712701406250022424 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Quobyte Inc. # Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from cinder import compute from cinder import exception from cinder.i18n import _, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume.drivers import remotefs as remotefs_drv VERSION = '1.1' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('quobyte_volume_url', help=('URL to the Quobyte volume e.g.,' ' quobyte:///')), cfg.StrOpt('quobyte_client_cfg', help=('Path to a Quobyte Client configuration file.')), cfg.BoolOpt('quobyte_sparsed_volumes', default=True, help=('Create volumes as sparse files which take no space.' ' If set to False, volume is created as regular file.' 'In such case volume creation takes a lot of time.')), cfg.BoolOpt('quobyte_qcow2_volumes', default=True, help=('Create volumes as QCOW2 files rather than raw files.')), cfg.StrOpt('quobyte_mount_point_base', default='$state_path/mnt', help=('Base dir containing the mount point' ' for the Quobyte volume.')), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver): """Cinder driver for Quobyte USP. Volumes are stored as files on the mounted Quobyte volume. The hypervisor will expose them as block devices. Unlike other similar drivers, this driver uses exactly one Quobyte volume because Quobyte USP is a distributed storage system. To add or remove capacity, administrators can add or remove storage servers to/from the volume. For different types of volumes e.g., SSD vs. rotating disks, use multiple backends in Cinder. Note: To be compliant with the inherited RemoteFSSnapDriver, Quobyte volumes are also referred to as shares. Version history: 1.0 - Initial driver. 1.1 - Adds optional insecure NAS settings """ driver_volume_type = 'quobyte' driver_prefix = 'quobyte' volume_backend_name = 'Quobyte' VERSION = VERSION def __init__(self, execute=processutils.execute, *args, **kwargs): super(QuobyteDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) # Used to manage snapshots which are currently attached to a VM. self._nova = None def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(QuobyteDriver, self).do_setup(context) self.set_nas_security_options(is_new_cinder_install=False) self.shares = {} # address : options self._nova = compute.API() def check_for_setup_error(self): if not self.configuration.quobyte_volume_url: msg = (_("There's no Quobyte volume configured (%s). Example:" " quobyte:///") % 'quobyte_volume_url') LOG.warning(msg) raise exception.VolumeDriverException(msg) # Check if mount.quobyte is installed try: self._execute('mount.quobyte', check_exit_code=False, run_as_root=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.VolumeDriverException( 'mount.quobyte is not installed') else: raise def set_nas_security_options(self, is_new_cinder_install): self._execute_as_root = False LOG.debug("nas_secure_file_* settings are %(ops)s and %(perm)s", {'ops': self.configuration.nas_secure_file_operations, 'perm': self.configuration.nas_secure_file_permissions} ) if self.configuration.nas_secure_file_operations == 'auto': """Note (kaisers): All previous Quobyte driver versions ran with secure settings hardcoded to 'True'. Therefore the default 'auto' setting can safely be mapped to the same, secure, setting. """ LOG.debug("Mapping 'auto' value to 'true' for" " nas_secure_file_operations.") self.configuration.nas_secure_file_operations = 'true' if self.configuration.nas_secure_file_permissions == 'auto': """Note (kaisers): All previous Quobyte driver versions ran with secure settings hardcoded to 'True'. Therefore the default 'auto' setting can safely be mapped to the same, secure, setting. """ LOG.debug("Mapping 'auto' value to 'true' for" " nas_secure_file_permissions.") self.configuration.nas_secure_file_permissions = 'true' if self.configuration.nas_secure_file_operations == 'false': LOG.warning(_LW("The NAS file operations will be run as " "root, allowing root level access at the storage " "backend.")) self._execute_as_root = True else: LOG.info(_LI("The NAS file operations will be run as" " non privileged user in secure mode. Please" " ensure your libvirtd settings have been configured" " accordingly (see section 'OpenStack' in the Quobyte" " Manual.")) if self.configuration.nas_secure_file_permissions == 'false': LOG.warning(_LW("The NAS file permissions mode will be 666 " "(allowing other/world read & write access).")) def _qemu_img_info(self, path, volume_name): return super(QuobyteDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.quobyte_mount_point_base) @utils.synchronized('quobyte', external=False) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" self._create_cloned_volume(volume, src_vref) @utils.synchronized('quobyte', external=False) def create_volume(self, volume): return super(QuobyteDriver, self).create_volume(volume) @utils.synchronized('quobyte', external=False) def create_volume_from_snapshot(self, volume, snapshot): return self._create_volume_from_snapshot(volume, snapshot) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ", {'snap': snapshot['id'], 'vol': volume['id'], 'size': volume_size}) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) vol_path = self._local_volume_dir(snapshot['volume']) forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_path, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot['volume']['name']) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) path_to_new_vol = self._local_path_volume(volume) LOG.debug("will copy from snapshot at %s", path_to_snap_img) if self.configuration.quobyte_qcow2_volumes: out_format = 'qcow2' else: out_format = 'raw' image_utils.convert_image(path_to_snap_img, path_to_new_vol, out_format, run_as_root=self._execute_as_root) self._set_rw_permissions_for_all(path_to_new_vol) @utils.synchronized('quobyte', external=False) def delete_volume(self, volume): """Deletes a logical volume.""" if not volume['provider_location']: LOG.warning(_LW('Volume %s does not have provider_location ' 'specified, skipping'), volume['name']) return self._ensure_share_mounted(volume['provider_location']) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) self._execute('rm', '-f', mounted_path, run_as_root=self._execute_as_root) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path) @utils.synchronized('quobyte', external=False) def create_snapshot(self, snapshot): """Apply locking to the create snapshot operation.""" return self._create_snapshot(snapshot) @utils.synchronized('quobyte', external=False) def delete_snapshot(self, snapshot): """Apply locking to the delete snapshot operation.""" self._delete_snapshot(snapshot) @utils.synchronized('quobyte', external=False) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" # Find active qcow2 file active_file = self.get_active_image_from_info(volume) path = '%s/%s/%s' % (self.configuration.quobyte_mount_point_base, self._get_hash_str(volume['provider_location']), active_file) data = {'export': volume['provider_location'], 'name': active_file} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] # Test file for raw vs. qcow2 format info = self._qemu_img_info(path, volume['name']) data['format'] = info.file_format if data['format'] not in ['raw', 'qcow2']: msg = _('%s must be a valid raw or qcow2 image.') % path raise exception.InvalidVolume(msg) return { 'driver_volume_type': 'quobyte', 'data': data, 'mount_point_base': self.configuration.quobyte_mount_point_base } @utils.synchronized('quobyte', external=False) def copy_volume_to_image(self, context, volume, image_service, image_meta): self._copy_volume_to_image(context, volume, image_service, image_meta) @utils.synchronized('quobyte', external=False) def extend_volume(self, volume, size_gb): volume_path = self.local_path(volume) volume_filename = os.path.basename(volume_path) # Ensure no snapshots exist for the volume active_image = self.get_active_image_from_info(volume) if volume_filename != active_image: msg = _('Extend volume is only supported for this' ' driver when no snapshots exist.') raise exception.InvalidVolume(msg) info = self._qemu_img_info(volume_path, volume['name']) backing_fmt = info.file_format if backing_fmt not in ['raw', 'qcow2']: msg = _('Unrecognized backing format: %s') raise exception.InvalidVolume(msg % backing_fmt) # qemu-img can resize both raw and qcow2 files image_utils.resize_image(volume_path, size_gb) def _do_create_volume(self, volume): """Create a volume on given Quobyte volume. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume['size'] if self.configuration.quobyte_qcow2_volumes: self._create_qcow2_file(volume_path, volume_size) else: if self.configuration.quobyte_sparsed_volumes: self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) self._set_rw_permissions_for_all(volume_path) def _load_shares_config(self, share_file=None): """Put 'quobyte_volume_url' into the 'shares' list. :param share_file: string, Not used because the user has to specify the the Quobyte volume directly. """ self.shares = {} url = self.configuration.quobyte_volume_url # Strip quobyte:// from the URL protocol = self.driver_volume_type + "://" if url.startswith(protocol): url = url[len(protocol):] self.shares[url] = None # None = No extra mount options. LOG.debug("Quobyte Volume URL set to: %s", self.shares) def _ensure_share_mounted(self, quobyte_volume): """Mount Quobyte volume. :param quobyte_volume: string """ mount_path = self._get_mount_point_for_share(quobyte_volume) self._mount_quobyte(quobyte_volume, mount_path, ensure=True) @utils.synchronized('quobyte_ensure', external=False) def _ensure_shares_mounted(self): """Mount the Quobyte volume. Used for example by RemoteFsDriver._update_volume_stats """ self._mounted_shares = [] self._load_shares_config() for share in self.shares.keys(): try: self._ensure_share_mounted(share) self._mounted_shares.append(share) except Exception as exc: LOG.warning(_LW('Exception during mounting %s'), exc) LOG.debug('Available shares %s', self._mounted_shares) def _find_share(self, volume_size_in_gib): """Returns the mounted Quobyte volume. Multiple shares are not supported because the virtualization of multiple storage devices is taken care of at the level of Quobyte USP. For different types of volumes e.g., SSD vs. rotating disks, use multiple backends in Cinder. :param volume_size_in_gib: int size in GB. Ignored by this driver. """ if not self._mounted_shares: raise exception.NotFound() assert len(self._mounted_shares) == 1, 'There must be exactly' \ ' one Quobyte volume.' target_volume = self._mounted_shares[0] LOG.debug('Selected %s as target Quobyte volume.', target_volume) return target_volume def _get_mount_point_for_share(self, quobyte_volume): """Return mount point for Quobyte volume. :param quobyte_volume: Example: storage-host/openstack-volumes """ return os.path.join(self.configuration.quobyte_mount_point_base, self._get_hash_str(quobyte_volume)) # open() wrapper to mock reading from /proc/mount. @staticmethod def read_proc_mount(): # pragma: no cover return open('/proc/mounts') def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False): """Mount Quobyte volume to mount path.""" mounted = False for l in QuobyteDriver.read_proc_mount(): if l.split()[1] == mount_path: mounted = True break if mounted: try: os.stat(mount_path) except OSError as exc: if exc.errno == errno.ENOTCONN: mounted = False try: LOG.info(_LI('Fixing previous mount %s which was not' ' unmounted correctly.'), mount_path) self._execute('umount.quobyte', mount_path, run_as_root=self._execute_as_root) except processutils.ProcessExecutionError as exc: LOG.warning(_LW("Failed to unmount previous mount: " "%s"), exc) else: # TODO(quobyte): Extend exc analysis in here? LOG.warning(_LW("Unknown error occurred while checking " "mount point: %s Trying to continue."), exc) if not mounted: if not os.path.isdir(mount_path): self._execute('mkdir', '-p', mount_path) command = ['mount.quobyte', quobyte_volume, mount_path] if self.configuration.quobyte_client_cfg: command.extend(['-c', self.configuration.quobyte_client_cfg]) try: LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume) self._execute(*command, run_as_root=self._execute_as_root) LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume) mounted = True except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: LOG.warning(_LW("%s is already mounted"), quobyte_volume) else: raise if mounted: self._validate_volume(mount_path) def _validate_volume(self, mount_path): """Wraps execute calls for checking validity of a Quobyte volume""" command = ['getfattr', "-n", "quobyte.info", mount_path] try: self._execute(*command, run_as_root=self._execute_as_root) except processutils.ProcessExecutionError as exc: msg = (_("The mount %(mount_path)s is not a valid" " Quobyte USP volume. Error: %(exc)s") % {'mount_path': mount_path, 'exc': exc}) raise exception.VolumeDriverException(msg) if not os.access(mount_path, os.W_OK | os.X_OK): LOG.warning(_LW("Volume is not writable. Please broaden the file" " permissions. Mount: %s"), mount_path) cinder-8.0.0/cinder/volume/drivers/sheepdog.py0000664000567000056710000006647112701406257022547 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright (c) 2013 Zelin.io # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SheepDog Volume Driver. """ import errno import eventlet import io import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver LOG = logging.getLogger(__name__) sheepdog_opts = [ cfg.StrOpt('sheepdog_store_address', default='127.0.0.1', help=('IP address of sheep daemon.')), cfg.PortOpt('sheepdog_store_port', default=7000, help=('Port of sheep daemon.')) ] CONF = cfg.CONF CONF.import_opt("image_conversion_dir", "cinder.image.image_utils") CONF.register_opts(sheepdog_opts) class SheepdogClient(object): """Sheepdog command executor.""" QEMU_SHEEPDOG_PREFIX = 'sheepdog:' DOG_RESP_CONNECTION_ERROR = 'failed to connect to' DOG_RESP_CLUSTER_RUNNING = 'Cluster status: running' DOG_RESP_CLUSTER_NOT_FORMATTED = ('Cluster status: ' 'Waiting for cluster to be formatted') DOG_RESP_CLUSTER_WAITING = ('Cluster status: ' 'Waiting for other nodes to join cluster') DOG_RESP_VDI_ALREADY_EXISTS = ': VDI exists already' DOG_RESP_VDI_NOT_FOUND = ': No VDI found' DOG_RESP_VDI_SHRINK_NOT_SUPPORT = 'Shrinking VDIs is not implemented' DOG_RESP_VDI_SIZE_TOO_LARGE = 'New VDI size is too large' DOG_RESP_SNAPSHOT_VDI_NOT_FOUND = ': No VDI found' DOG_RESP_SNAPSHOT_NOT_FOUND = ': Failed to find requested tag' DOG_RESP_SNAPSHOT_EXISTED = 'tag (%(snapname)s) is existed' QEMU_IMG_RESP_CONNECTION_ERROR = ('Failed to connect socket: ' 'Connection refused') QEMU_IMG_RESP_ALREADY_EXISTS = ': VDI exists already' QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND = 'Failed to find the requested tag' QEMU_IMG_RESP_VDI_NOT_FOUND = 'No vdi found' QEMU_IMG_RESP_SIZE_TOO_LARGE = 'An image is too large.' def __init__(self, addr, port): self.addr = addr self.port = port def _run_dog(self, command, subcommand, *params): cmd = ('env', 'LC_ALL=C', 'LANG=C', 'dog', command, subcommand, '-a', self.addr, '-p', self.port) + params try: return utils.execute(*cmd) except OSError as e: with excutils.save_and_reraise_exception(): if e.errno == errno.ENOENT: msg = _LE('Sheepdog is not installed. ' 'OSError: command is %s.') else: msg = _LE('OSError: command is %s.') LOG.error(msg, cmd) except processutils.ProcessExecutionError as e: _stderr = e.stderr if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR): reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': self.addr, 'port': self.port}) raise exception.SheepdogError(reason=reason) raise exception.SheepdogCmdError( cmd=e.cmd, exit_code=e.exit_code, stdout=e.stdout.replace('\n', '\\n'), stderr=e.stderr.replace('\n', '\\n')) def _run_qemu_img(self, command, *params): """Executes qemu-img command wrapper""" cmd = ['env', 'LC_ALL=C', 'LANG=C', 'qemu-img', command] for param in params: if param.startswith(self.QEMU_SHEEPDOG_PREFIX): # replace 'sheepdog:vdiname[:snapshotname]' to # 'sheepdog:addr:port:vdiname[:snapshotname]' param = param.replace(self.QEMU_SHEEPDOG_PREFIX, '%(prefix)s%(addr)s:%(port)s:' % {'prefix': self.QEMU_SHEEPDOG_PREFIX, 'addr': self.addr, 'port': self.port}, 1) cmd.append(param) try: return utils.execute(*cmd) except OSError as e: with excutils.save_and_reraise_exception(): if e.errno == errno.ENOENT: msg = _LE('Qemu-img is not installed. ' 'OSError: command is %(cmd)s.') else: msg = _LE('OSError: command is %(cmd)s.') LOG.error(msg, {'cmd': tuple(cmd)}) except processutils.ProcessExecutionError as e: raise exception.SheepdogCmdError( cmd=e.cmd, exit_code=e.exit_code, stdout=e.stdout.replace('\n', '\\n'), stderr=e.stderr.replace('\n', '\\n')) def check_cluster_status(self): try: (_stdout, _stderr) = self._run_dog('cluster', 'info') except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to check cluster status.' '(command: %s)'), cmd) if _stdout.startswith(self.DOG_RESP_CLUSTER_RUNNING): LOG.debug('Sheepdog cluster is running.') return reason = _('Invalid sheepdog cluster status.') if _stdout.startswith(self.DOG_RESP_CLUSTER_NOT_FORMATTED): reason = _('Cluster is not formatted. ' 'You should probably perform "dog cluster format".') elif _stdout.startswith(self.DOG_RESP_CLUSTER_WAITING): reason = _('Waiting for all nodes to join cluster. ' 'Ensure all sheep daemons are running.') raise exception.SheepdogError(reason=reason) def create(self, vdiname, size): try: self._run_dog('vdi', 'create', vdiname, '%sG' % size) except exception.SheepdogCmdError as e: _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_VDI_ALREADY_EXISTS): LOG.error(_LE('Volume already exists. %s'), vdiname) else: LOG.error(_LE('Failed to create volume. %s'), vdiname) def delete(self, vdiname): try: (_stdout, _stderr) = self._run_dog('vdi', 'delete', vdiname) if _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): LOG.warning(_LW('Volume not found. %s'), vdiname) elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR): # NOTE(tishizaki) # Dog command does not return error_code although # dog command cannot connect to sheep process. # That is a Sheepdog's bug. # To avoid a Sheepdog's bug, now we need to check stderr. # If Sheepdog has been fixed, this check logic is needed # by old Sheepdog users. reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': self.addr, 'port': self.port}) raise exception.SheepdogError(reason=reason) except exception.SheepdogCmdError as e: _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete volume. %s'), vdiname) def create_snapshot(self, vdiname, snapname): try: self._run_dog('vdi', 'snapshot', '-s', snapname, vdiname) except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_SNAPSHOT_VDI_NOT_FOUND): LOG.error(_LE('Volume "%s" not found. Please check the ' 'results of "dog vdi list".'), vdiname) elif _stderr.rstrip('\\n').endswith( self.DOG_RESP_SNAPSHOT_EXISTED % {'snapname': snapname}): LOG.error(_LE('Snapshot "%s" already exists.'), snapname) else: LOG.error(_LE('Failed to create snapshot. (command: %s)'), cmd) def delete_snapshot(self, vdiname, snapname): try: (_stdout, _stderr) = self._run_dog('vdi', 'delete', '-s', snapname, vdiname) if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND): LOG.warning(_LW('Snapshot "%s" not found.'), snapname) elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): LOG.warning(_LW('Volume "%s" not found.'), vdiname) elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR): # NOTE(tishizaki) # Dog command does not return error_code although # dog command cannot connect to sheep process. # That is a Sheepdog's bug. # To avoid a Sheepdog's bug, now we need to check stderr. # If Sheepdog has been fixed, this check logic is needed # by old Sheepdog users. reason = (_('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': self.addr, 'port': self.port}) raise exception.SheepdogError(reason=reason) except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete snapshot. (command: %s)'), cmd) def clone(self, src_vdiname, src_snapname, dst_vdiname, size): try: self._run_qemu_img('create', '-b', 'sheepdog:%(src_vdiname)s:%(src_snapname)s' % {'src_vdiname': src_vdiname, 'src_snapname': src_snapname}, 'sheepdog:%s' % dst_vdiname, '%sG' % size) except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): if self.QEMU_IMG_RESP_CONNECTION_ERROR in _stderr: LOG.error(_LE('Failed to connect to sheep daemon. ' 'addr: %(addr)s, port: %(port)s'), {'addr': self.addr, 'port': self.port}) elif self.QEMU_IMG_RESP_ALREADY_EXISTS in _stderr: LOG.error(_LE('Clone volume "%s" already exists. ' 'Please check the results of "dog vdi list".'), dst_vdiname) elif self.QEMU_IMG_RESP_VDI_NOT_FOUND in _stderr: LOG.error(_LE('Src Volume "%s" not found. ' 'Please check the results of "dog vdi list".'), src_vdiname) elif self.QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND in _stderr: LOG.error(_LE('Snapshot "%s" not found. ' 'Please check the results of "dog vdi list".'), src_snapname) elif self.QEMU_IMG_RESP_SIZE_TOO_LARGE in _stderr: LOG.error(_LE('Volume size "%sG" is too large.'), size) else: LOG.error(_LE('Failed to clone volume.(command: %s)'), cmd) def resize(self, vdiname, size): size = int(size) * units.Gi try: (_stdout, _stderr) = self._run_dog('vdi', 'resize', vdiname, size) except exception.SheepdogCmdError as e: _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_VDI_NOT_FOUND): LOG.error(_LE('Failed to resize vdi. vdi not found. %s'), vdiname) elif _stderr.startswith(self.DOG_RESP_VDI_SHRINK_NOT_SUPPORT): LOG.error(_LE('Failed to resize vdi. ' 'Shrinking vdi not supported. ' 'vdi: %(vdiname)s new size: %(size)s'), {'vdiname': vdiname, 'size': size}) elif _stderr.startswith(self.DOG_RESP_VDI_SIZE_TOO_LARGE): LOG.error(_LE('Failed to resize vdi. ' 'Too large volume size. ' 'vdi: %(vdiname)s new size: %(size)s'), {'vdiname': vdiname, 'size': size}) else: LOG.error(_LE('Failed to resize vdi. ' 'vdi: %(vdiname)s new size: %(size)s'), {'vdiname': vdiname, 'size': size}) def get_volume_stats(self): try: (_stdout, _stderr) = self._run_dog('node', 'info', '-r') except exception.SheepdogCmdError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to get volume status. %s'), e) return _stdout class SheepdogIOWrapper(io.RawIOBase): """File-like object with Sheepdog backend.""" def __init__(self, addr, port, volume, snapshot_name=None): self._addr = addr self._port = port self._vdiname = volume['name'] self._snapshot_name = snapshot_name self._offset = 0 # SheepdogIOWrapper instance becomes invalid if a write error occurs. self._valid = True def _execute(self, cmd, data=None): try: # NOTE(yamada-h): processutils.execute causes busy waiting # under eventlet. # To avoid wasting CPU resources, it should not be used for # the command which takes long time to execute. # For workaround, we replace a subprocess module with # the original one while only executing a read/write command. _processutils_subprocess = processutils.subprocess processutils.subprocess = eventlet.patcher.original('subprocess') return processutils.execute(*cmd, process_input=data)[0] except (processutils.ProcessExecutionError, OSError): self._valid = False msg = _('Sheepdog I/O Error, command was: "%s".') % ' '.join(cmd) raise exception.VolumeDriverException(message=msg) finally: processutils.subprocess = _processutils_subprocess def read(self, length=None): if not self._valid: msg = _('An error occurred while reading volume "%s".' ) % self._vdiname raise exception.VolumeDriverException(message=msg) cmd = ['dog', 'vdi', 'read', '-a', self._addr, '-p', self._port] if self._snapshot_name: cmd.extend(('-s', self._snapshot_name)) cmd.extend((self._vdiname, self._offset)) if length: cmd.append(length) data = self._execute(cmd) self._offset += len(data) return data def write(self, data): if not self._valid: msg = _('An error occurred while writing to volume "%s".' ) % self._vdiname raise exception.VolumeDriverException(message=msg) length = len(data) cmd = ('dog', 'vdi', 'write', '-a', self._addr, '-p', self._port, self._vdiname, self._offset, length) self._execute(cmd, data) self._offset += length return length def seek(self, offset, whence=0): if not self._valid: msg = _('An error occured while seeking for volume "%s".' ) % self._vdiname raise exception.VolumeDriverException(message=msg) if whence == 0: # SEEK_SET or 0 - start of the stream (the default); # offset should be zero or positive new_offset = offset elif whence == 1: # SEEK_CUR or 1 - current stream position; offset may be negative new_offset = self._offset + offset else: # SEEK_END or 2 - end of the stream; offset is usually negative # TODO(yamada-h): Support SEEK_END raise IOError(_("Invalid argument - whence=%s not supported.") % whence) if new_offset < 0: raise IOError(_("Invalid argument - negative seek offset.")) self._offset = new_offset def tell(self): return self._offset def flush(self): pass def fileno(self): """Sheepdog does not have support for fileno so we raise IOError. Raising IOError is recommended way to notify caller that interface is not supported - see http://docs.python.org/2/library/io.html#io.IOBase """ raise IOError(_("fileno is not supported by SheepdogIOWrapper")) class SheepdogDriver(driver.VolumeDriver): """Executes commands relating to Sheepdog Volumes.""" VERSION = "1.0.0" def __init__(self, *args, **kwargs): super(SheepdogDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(sheepdog_opts) self.addr = self.configuration.sheepdog_store_address self.port = self.configuration.sheepdog_store_port self.client = SheepdogClient(self.addr, self.port) self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*') self._stats = {} def check_for_setup_error(self): self.client.check_cluster_status() def _is_cloneable(self, image_location, image_meta): """Check the image can be clone or not.""" if image_location is None: return False prefix = 'sheepdog://' if not image_location.startswith(prefix): LOG.debug("Image is not stored in sheepdog.") return False if image_meta['disk_format'] != 'raw': LOG.debug("Image clone requires image format to be " "'raw' but image %s(%s) is '%s'.", image_location, image_meta['id'], image_meta['disk_format']) return False cloneable = False # check whether volume is stored in sheepdog try: # The image location would be like # "sheepdog://192.168.10.2:7000:Alice" (ip, port, name) = image_location[len(prefix):].split(":", 2) self._try_execute('collie', 'vdi', 'list', '--address', ip, '--port', port, name) cloneable = True except processutils.ProcessExecutionError as e: LOG.debug("Can not find vdi %(image)s: %(err)s", {'image': name, 'err': e}) return cloneable def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image.""" image_location = image_location[0] if image_location else None if not self._is_cloneable(image_location, image_meta): return {}, False volume_ref = {'name': image_meta['id'], 'size': image_meta['size']} self.create_cloned_volume(volume, volume_ref) self.client.resize(volume.name, volume.size) vol_path = self.local_path(volume) return {'provider_location': vol_path}, True def create_cloned_volume(self, volume, src_vref): """Clone a sheepdog volume from another volume.""" snapshot_name = src_vref['name'] + '-temp-snapshot' snapshot = { 'name': snapshot_name, 'volume_name': src_vref['name'], 'volume_size': src_vref['size'], } self.client.create_snapshot(snapshot['volume_name'], snapshot_name) try: self.client.clone(snapshot['volume_name'], snapshot_name, volume.name, volume.size) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create cloned volume %s.'), volume.name) finally: # Delete temp Snapshot self.client.delete_snapshot(snapshot['volume_name'], snapshot_name) def create_volume(self, volume): """Create a sheepdog volume.""" self.client.create(volume.name, volume.size) def create_volume_from_snapshot(self, volume, snapshot): """Create a sheepdog volume from a snapshot.""" self.client.clone(snapshot.volume_name, snapshot.name, volume.name, volume.size) def delete_volume(self, volume): """Delete a logical volume.""" self.client.delete(volume.name) def copy_image_to_volume(self, context, volume, image_service, image_id): with image_utils.temporary_file() as tmp: # (wenhao): we don't need to convert to raw for sheepdog. image_utils.fetch_verify_image(context, image_service, image_id, tmp) # remove the image created by import before this function. # see volume/drivers/manager.py:_create_volume self.client.delete(volume.name) # convert and store into sheepdog image_utils.convert_image(tmp, self.local_path(volume), 'raw') self.client.resize(volume.name, volume.size) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_id = image_meta['id'] with image_utils.temporary_file() as tmp: # image_utils.convert_image doesn't support "sheepdog:" source, # so we use the qemu-img directly. # Sheepdog volume is always raw-formatted. cmd = ('qemu-img', 'convert', '-f', 'raw', '-t', 'none', '-O', 'raw', self.local_path(volume), tmp) self._try_execute(*cmd) with open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) def create_snapshot(self, snapshot): """Create a sheepdog snapshot.""" self.client.create_snapshot(snapshot.volume_name, snapshot.name) def delete_snapshot(self, snapshot): """Delete a sheepdog snapshot.""" self.client.delete_snapshot(snapshot.volume_name, snapshot.name) def local_path(self, volume): return "sheepdog:%(addr)s:%(port)s:%(name)s" % { 'addr': self.addr, 'port': self.port, 'name': volume['name']} def ensure_export(self, context, volume): """Safely and synchronously recreate an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Export a volume.""" pass def remove_export(self, context, volume): """Remove an export for a logical volume.""" pass def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'sheepdog', 'data': { 'name': volume['name'], 'hosts': [self.addr], 'ports': ["%d" % self.port], } } def terminate_connection(self, volume, connector, **kwargs): pass def _update_volume_stats(self): stats = {} backend_name = "sheepdog" if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') stats["volume_backend_name"] = backend_name or 'sheepdog' stats['vendor_name'] = 'Open Source' stats['driver_version'] = self.VERSION stats['storage_protocol'] = 'sheepdog' stats['total_capacity_gb'] = 'unknown' stats['free_capacity_gb'] = 'unknown' stats['reserved_percentage'] = 0 stats['QoS_support'] = False stdout = self.client.get_volume_stats() m = self.stats_pattern.match(stdout) total = float(m.group(1)) used = float(m.group(2)) stats['total_capacity_gb'] = total / units.Gi stats['free_capacity_gb'] = (total - used) / units.Gi self._stats = stats def get_volume_stats(self, refresh=False): if refresh: self._update_volume_stats() return self._stats def extend_volume(self, volume, new_size): """Extend an Existing Volume.""" self.client.resize(volume.name, new_size) LOG.debug('Extend volume from %(old_size)s GB to %(new_size)s GB.', {'old_size': volume.size, 'new_size': new_size}) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" src_volume = self.db.volume_get(context, backup.volume_id) temp_snapshot_name = 'tmp-snap-%s' % src_volume.name # NOTE(tishizaki): If previous backup_volume operation has failed, # a temporary snapshot for previous operation may exist. # So, the old snapshot must be deleted before backup_volume. # Sheepdog 0.9 or later 'delete_snapshot' operation # is done successfully, although target snapshot does not exist. # However, sheepdog 0.8 or before 'delete_snapshot' operation # is failed, and raise ProcessExecutionError when target snapshot # does not exist. try: self.client.delete_snapshot(src_volume.name, temp_snapshot_name) except (exception.SheepdogCmdError): pass try: self.client.create_snapshot(src_volume.name, temp_snapshot_name) except (exception.SheepdogCmdError, OSError): msg = (_('Failed to create a temporary snapshot for volume %s.') % src_volume.id) LOG.exception(msg) raise exception.SheepdogError(reason=msg) try: sheepdog_fd = SheepdogIOWrapper(self.addr, self.port, src_volume, temp_snapshot_name) backup_service.backup(backup, sheepdog_fd) finally: self.client.delete_snapshot(src_volume.name, temp_snapshot_name) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" sheepdog_fd = SheepdogIOWrapper(self.addr, self.port, volume) backup_service.restore(backup, volume['id'], sheepdog_fd) cinder-8.0.0/cinder/volume/drivers/rbd.py0000664000567000056710000012722112701406250021500 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RADOS Block Device Driver""" from __future__ import absolute_import import io import json import math import os import tempfile from eventlet import tpool from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver try: import rados import rbd except ImportError: rados = None rbd = None LOG = logging.getLogger(__name__) rbd_opts = [ cfg.StrOpt('rbd_cluster_name', default='ceph', help='The name of ceph cluster'), cfg.StrOpt('rbd_pool', default='rbd', help='The RADOS pool where rbd volumes are stored'), cfg.StrOpt('rbd_user', help='The RADOS client name for accessing rbd volumes ' '- only set when using cephx authentication'), cfg.StrOpt('rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file'), cfg.BoolOpt('rbd_flatten_volume_from_snapshot', default=False, help='Flatten volumes created from snapshots to remove ' 'dependency from volume to snapshot'), cfg.StrOpt('rbd_secret_uuid', help='The libvirt uuid of the secret for the rbd_user ' 'volumes'), cfg.StrOpt('volume_tmp_dir', help='Directory where temporary image files are stored ' 'when the volume driver does not write them directly ' 'to the volume. Warning: this option is now deprecated, ' 'please use image_conversion_dir instead.'), cfg.IntOpt('rbd_max_clone_depth', default=5, help='Maximum number of nested volume clones that are ' 'taken before a flatten occurs. Set to 0 to disable ' 'cloning.'), cfg.IntOpt('rbd_store_chunk_size', default=4, help=_('Volumes will be chunked into objects of this size ' '(in megabytes).')), cfg.IntOpt('rados_connect_timeout', default=-1, help=_('Timeout value (in seconds) used when connecting to ' 'ceph cluster. If value < 0, no timeout is set and ' 'default librados value is used.')), cfg.IntOpt('rados_connection_retries', default=3, help=_('Number of retries if connection to ceph cluster ' 'failed.')), cfg.IntOpt('rados_connection_interval', default=5, help=_('Interval value (in seconds) between connection ' 'retries to ceph cluster.')) ] CONF = cfg.CONF CONF.register_opts(rbd_opts) class RBDImageMetadata(object): """RBD image metadata to be used with RBDImageIOWrapper.""" def __init__(self, image, pool, user, conf): self.image = image self.pool = utils.convert_str(pool) self.user = utils.convert_str(user) self.conf = utils.convert_str(conf) class RBDImageIOWrapper(io.RawIOBase): """Enables LibRBD.Image objects to be treated as Python IO objects. Calling unimplemented interfaces will raise IOError. """ def __init__(self, rbd_meta): super(RBDImageIOWrapper, self).__init__() self._rbd_meta = rbd_meta self._offset = 0 def _inc_offset(self, length): self._offset += length @property def rbd_image(self): return self._rbd_meta.image @property def rbd_user(self): return self._rbd_meta.user @property def rbd_pool(self): return self._rbd_meta.pool @property def rbd_conf(self): return self._rbd_meta.conf def read(self, length=None): offset = self._offset total = self._rbd_meta.image.size() # NOTE(dosaboy): posix files do not barf if you read beyond their # length (they just return nothing) but rbd images do so we need to # return empty string if we have reached the end of the image. if (offset >= total): return b'' if length is None: length = total if (offset + length) > total: length = total - offset self._inc_offset(length) return self._rbd_meta.image.read(int(offset), int(length)) def write(self, data): self._rbd_meta.image.write(data, self._offset) self._inc_offset(len(data)) def seekable(self): return True def seek(self, offset, whence=0): if whence == 0: new_offset = offset elif whence == 1: new_offset = self._offset + offset elif whence == 2: new_offset = self._rbd_meta.image.size() new_offset += offset else: raise IOError(_("Invalid argument - whence=%s not supported") % (whence)) if (new_offset < 0): raise IOError(_("Invalid argument")) self._offset = new_offset def tell(self): return self._offset def flush(self): try: self._rbd_meta.image.flush() except AttributeError: LOG.warning(_LW("flush() not supported in " "this version of librbd")) def fileno(self): """RBD does not have support for fileno() so we raise IOError. Raising IOError is recommended way to notify caller that interface is not supported - see http://docs.python.org/2/library/io.html#io.IOBase """ raise IOError(_("fileno() not supported by RBD()")) # NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes # it which, if this is not overridden, calls flush() prior to close which # in this case is unwanted since the rbd image may have been closed prior # to the autoclean - currently triggering a segfault in librbd. def close(self): pass class RBDVolumeProxy(object): """Context manager for dealing with an existing rbd volume. This handles connecting to rados and opening an ioctx automatically, and otherwise acts like a librbd Image object. The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ def __init__(self, driver, name, pool=None, snapshot=None, read_only=False): client, ioctx = driver._connect_to_rados(pool) if snapshot is not None: snapshot = utils.convert_str(snapshot) try: self.volume = driver.rbd.Image(ioctx, utils.convert_str(name), snapshot=snapshot, read_only=read_only) except driver.rbd.Error: LOG.exception(_LE("error opening rbd image %s"), name) driver._disconnect_from_rados(client, ioctx) raise self.driver = driver self.client = client self.ioctx = ioctx def __enter__(self): return self def __exit__(self, type_, value, traceback): try: self.volume.close() finally: self.driver._disconnect_from_rados(self.client, self.ioctx) def __getattr__(self, attrib): return getattr(self.volume, attrib) class RADOSClient(object): """Context manager to simplify error handling for connecting to ceph.""" def __init__(self, driver, pool=None): self.driver = driver self.cluster, self.ioctx = driver._connect_to_rados(pool) def __enter__(self): return self def __exit__(self, type_, value, traceback): self.driver._disconnect_from_rados(self.cluster, self.ioctx) @property def features(self): features = self.cluster.conf_get('rbd_default_features') if ((features is None) or (int(features) == 0)): features = self.driver.rbd.RBD_FEATURE_LAYERING return int(features) class RBDDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableImageVD, driver.SnapshotVD, driver.MigrateVD, driver.BaseVD): """Implements RADOS block device (RBD) volume commands.""" VERSION = '1.2.0' def __init__(self, *args, **kwargs): super(RBDDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rbd_opts) self._stats = {} # allow overrides for testing self.rados = kwargs.get('rados', rados) self.rbd = kwargs.get('rbd', rbd) # All string args used with librbd must be None or utf-8 otherwise # librbd will break. for attr in ['rbd_cluster_name', 'rbd_user', 'rbd_ceph_conf', 'rbd_pool']: val = getattr(self.configuration, attr) if val is not None: setattr(self.configuration, attr, utils.convert_str(val)) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if rados is None: msg = _('rados and rbd python libraries not found') raise exception.VolumeBackendAPIException(data=msg) for attr in ['rbd_cluster_name', 'rbd_pool']: val = getattr(self.configuration, attr) if not val: raise exception.InvalidConfigurationValue(option=attr, value=val) # NOTE: Checking connection to ceph # RADOSClient __init__ method invokes _connect_to_rados # so no need to check for self.rados.Error here. with RADOSClient(self): pass def RBDProxy(self): return tpool.Proxy(self.rbd.RBD()) def _ceph_args(self): args = [] if self.configuration.rbd_user: args.extend(['--id', self.configuration.rbd_user]) if self.configuration.rbd_ceph_conf: args.extend(['--conf', self.configuration.rbd_ceph_conf]) if self.configuration.rbd_cluster_name: args.extend(['--cluster', self.configuration.rbd_cluster_name]) return args @utils.retry(exception.VolumeBackendAPIException, CONF.rados_connection_interval, CONF.rados_connection_retries) def _connect_to_rados(self, pool=None): LOG.debug("opening connection to ceph cluster (timeout=%s).", self.configuration.rados_connect_timeout) client = self.rados.Rados( rados_id=self.configuration.rbd_user, clustername=self.configuration.rbd_cluster_name, conffile=self.configuration.rbd_ceph_conf) if pool is not None: pool = utils.convert_str(pool) else: pool = self.configuration.rbd_pool try: if self.configuration.rados_connect_timeout >= 0: client.connect(timeout= self.configuration.rados_connect_timeout) else: client.connect() ioctx = client.open_ioctx(pool) return client, ioctx except self.rados.Error: msg = _("Error connecting to ceph cluster.") LOG.exception(msg) client.shutdown() raise exception.VolumeBackendAPIException(data=msg) def _disconnect_from_rados(self, client, ioctx): # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def _get_backup_snaps(self, rbd_image): """Get list of any backup snapshots that exist on this volume. There should only ever be one but accept all since they need to be deleted before the volume can be. """ # NOTE(dosaboy): we do the import here otherwise we get import conflict # issues between the rbd driver and the ceph backup driver. These # issues only seem to occur when NOT using them together and are # triggered when the ceph backup driver imports the rbd volume driver. from cinder.backup.drivers import ceph return ceph.CephBackupDriver.get_backup_snaps(rbd_image) def _get_mon_addrs(self): args = ['ceph', 'mon', 'dump', '--format=json'] args.extend(self._ceph_args()) out, _ = self._execute(*args) lines = out.split('\n') if lines[0].startswith('dumped monmap epoch'): lines = lines[1:] monmap = json.loads('\n'.join(lines)) addrs = [mon['addr'] for mon in monmap['mons']] hosts = [] ports = [] for addr in addrs: host_port = addr[:addr.rindex('/')] host, port = host_port.rsplit(':', 1) hosts.append(host.strip('[]')) ports.append(port) return hosts, ports def _update_volume_stats(self): stats = { 'vendor_name': 'Open Source', 'driver_version': self.VERSION, 'storage_protocol': 'ceph', 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'multiattach': False, } backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'RBD' try: with RADOSClient(self) as client: ret, outbuf, _outs = client.cluster.mon_command( '{"prefix":"df", "format":"json"}', '') if ret != 0: LOG.warning(_LW('Unable to get rados pool stats.')) else: outbuf = json.loads(outbuf) pool_stats = [pool for pool in outbuf['pools'] if pool['name'] == self.configuration.rbd_pool][0]['stats'] stats['free_capacity_gb'] = round((float( pool_stats['max_avail']) / units.Gi), 2) used_capacity_gb = float( pool_stats['bytes_used']) / units.Gi stats['total_capacity_gb'] = round( (stats['free_capacity_gb'] + used_capacity_gb), 2) except self.rados.Error: # just log and return unknown capacities LOG.exception(_LE('error refreshing volume stats')) self._stats = stats def get_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. """ if refresh: self._update_volume_stats() return self._stats def _get_clone_depth(self, client, volume_name, depth=0): """Returns the number of ancestral clones of the given volume.""" parent_volume = self.rbd.Image(client.ioctx, volume_name) try: _pool, parent, _snap = self._get_clone_info(parent_volume, volume_name) finally: parent_volume.close() if not parent: return depth # If clone depth was reached, flatten should have occurred so if it has # been exceeded then something has gone wrong. if depth > self.configuration.rbd_max_clone_depth: raise Exception(_("clone depth exceeds limit of %s") % (self.configuration.rbd_max_clone_depth)) return self._get_clone_depth(client, parent, depth + 1) def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from another volume. Since we are cloning from a volume and not a snapshot, we must first create a snapshot of the source volume. The user has the option to limit how long a volume's clone chain can be by setting rbd_max_clone_depth. If a clone is made of another clone and that clone has rbd_max_clone_depth clones behind it, the source volume will be flattened. """ src_name = utils.convert_str(src_vref.name) dest_name = utils.convert_str(volume.name) flatten_parent = False # Do full copy if requested if self.configuration.rbd_max_clone_depth <= 0: with RBDVolumeProxy(self, src_name, read_only=True) as vol: vol.copy(vol.ioctx, dest_name) return # Otherwise do COW clone. with RADOSClient(self) as client: depth = self._get_clone_depth(client, src_name) # If source volume is a clone and rbd_max_clone_depth reached, # flatten the source before cloning. Zero rbd_max_clone_depth means # infinite is allowed. if depth == self.configuration.rbd_max_clone_depth: LOG.debug("maximum clone depth (%d) has been reached - " "flattening source volume", self.configuration.rbd_max_clone_depth) flatten_parent = True src_volume = self.rbd.Image(client.ioctx, src_name) try: # First flatten source volume if required. if flatten_parent: _pool, parent, snap = self._get_clone_info(src_volume, src_name) # Flatten source volume LOG.debug("flattening source volume %s", src_name) src_volume.flatten() # Delete parent clone snap parent_volume = self.rbd.Image(client.ioctx, parent) try: parent_volume.unprotect_snap(snap) parent_volume.remove_snap(snap) finally: parent_volume.close() # Create new snapshot of source volume clone_snap = "%s.clone_snap" % dest_name LOG.debug("creating snapshot='%s'", clone_snap) src_volume.create_snap(clone_snap) src_volume.protect_snap(clone_snap) except Exception: # Only close if exception since we still need it. src_volume.close() raise # Now clone source volume snapshot try: LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to " "'%(dest)s'", {'src_vol': src_name, 'src_snap': clone_snap, 'dest': dest_name}) self.RBDProxy().clone(client.ioctx, src_name, clone_snap, client.ioctx, dest_name, features=client.features) except Exception: src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) raise finally: src_volume.close() if volume.size != src_vref.size: LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " "%(dst_size)d", {'dst_vol': volume.name, 'src_size': src_vref.size, 'dst_size': volume.size}) self._resize(volume) LOG.debug("clone created successfully") def create_volume(self, volume): """Creates a logical volume.""" size = int(volume.size) * units.Gi LOG.debug("creating volume '%s'", volume.name) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) with RADOSClient(self) as client: self.RBDProxy().create(client.ioctx, utils.convert_str(volume.name), size, order, old_format=False, features=client.features) def _flatten(self, pool, volume_name): LOG.debug('flattening %(pool)s/%(img)s', dict(pool=pool, img=volume_name)) with RBDVolumeProxy(self, volume_name, pool) as vol: vol.flatten() def _clone(self, volume, src_pool, src_image, src_snap): LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s', dict(pool=src_pool, img=src_image, snap=src_snap, dst=volume.name)) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) with RADOSClient(self, src_pool) as src_client: with RADOSClient(self) as dest_client: self.RBDProxy().clone(src_client.ioctx, utils.convert_str(src_image), utils.convert_str(src_snap), dest_client.ioctx, utils.convert_str(volume.name), features=src_client.features, order=order) def _resize(self, volume, **kwargs): size = kwargs.get('size', None) if not size: size = int(volume.size) * units.Gi with RBDVolumeProxy(self, volume.name) as vol: vol.resize(size) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self._clone(volume, self.configuration.rbd_pool, snapshot.volume_name, snapshot.name) if self.configuration.rbd_flatten_volume_from_snapshot: self._flatten(self.configuration.rbd_pool, volume.name) if int(volume.size): self._resize(volume) def _delete_backup_snaps(self, rbd_image): backup_snaps = self._get_backup_snaps(rbd_image) if backup_snaps: for snap in backup_snaps: rbd_image.remove_snap(snap['name']) else: LOG.debug("volume has no backup snaps") def _get_clone_info(self, volume, volume_name, snap=None): """If volume is a clone, return its parent info. Returns a tuple of (pool, parent, snap). A snapshot may optionally be provided for the case where a cloned volume has been flattened but it's snapshot still depends on the parent. """ try: if snap: volume.set_snap(snap) pool, parent, parent_snap = tuple(volume.parent_info()) if snap: volume.set_snap(None) # Strip the tag off the end of the volume name since it will not be # in the snap name. if volume_name.endswith('.deleted'): volume_name = volume_name[:-len('.deleted')] # Now check the snap name matches. if parent_snap == "%s.clone_snap" % volume_name: return pool, parent, parent_snap except self.rbd.ImageNotFound: LOG.debug("Volume %s is not a clone.", volume_name) volume.set_snap(None) return (None, None, None) def _get_children_info(self, volume, snap): """List children for the given snapshot of a volume(image). Returns a list of (pool, image). """ children_list = [] if snap: volume.set_snap(snap) children_list = volume.list_children() volume.set_snap(None) return children_list def _delete_clone_parent_refs(self, client, parent_name, parent_snap): """Walk back up the clone chain and delete references. Deletes references i.e. deleted parent volumes and snapshots. """ parent_rbd = self.rbd.Image(client.ioctx, parent_name) parent_has_snaps = False try: # Check for grandparent _pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd, parent_name, parent_snap) LOG.debug("deleting parent snapshot %s", parent_snap) parent_rbd.unprotect_snap(parent_snap) parent_rbd.remove_snap(parent_snap) parent_has_snaps = bool(list(parent_rbd.list_snaps())) finally: parent_rbd.close() # If parent has been deleted in Cinder, delete the silent reference and # keep walking up the chain if it is itself a clone. if (not parent_has_snaps) and parent_name.endswith('.deleted'): LOG.debug("deleting parent %s", parent_name) self.RBDProxy().remove(client.ioctx, parent_name) # Now move up to grandparent if there is one if g_parent: self._delete_clone_parent_refs(client, g_parent, g_parent_snap) def delete_volume(self, volume): """Deletes a logical volume.""" # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are # utf-8 otherwise librbd will barf. volume_name = utils.convert_str(volume.name) with RADOSClient(self) as client: try: rbd_image = self.rbd.Image(client.ioctx, volume_name) except self.rbd.ImageNotFound: LOG.info(_LI("volume %s no longer exists in backend"), volume_name) return clone_snap = None parent = None # Ensure any backup snapshots are deleted self._delete_backup_snaps(rbd_image) # If the volume has non-clone snapshots this delete is expected to # raise VolumeIsBusy so do so straight away. try: snaps = rbd_image.list_snaps() for snap in snaps: if snap.name.endswith('.clone_snap'): LOG.debug("volume has clone snapshot(s)") # We grab one of these and use it when fetching parent # info in case the volume has been flattened. clone_snap = snap.name break raise exception.VolumeIsBusy(volume_name=volume_name) # Determine if this volume is itself a clone _pool, parent, parent_snap = self._get_clone_info(rbd_image, volume_name, clone_snap) finally: rbd_image.close() @utils.retry(self.rbd.ImageBusy, self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def _try_remove_volume(client, volume_name): self.RBDProxy().remove(client.ioctx, volume_name) if clone_snap is None: LOG.debug("deleting rbd volume %s", volume_name) try: _try_remove_volume(client, volume_name) except self.rbd.ImageBusy: msg = (_("ImageBusy error raised while deleting rbd " "volume. This may have been caused by a " "connection from a client that has crashed and, " "if so, may be resolved by retrying the delete " "after 30 seconds has elapsed.")) LOG.warning(msg) # Now raise this so that volume stays available so that we # delete can be retried. raise exception.VolumeIsBusy(msg, volume_name=volume_name) except self.rbd.ImageNotFound: LOG.info(_LI("RBD volume %s not found, allowing delete " "operation to proceed."), volume_name) return # If it is a clone, walk back up the parent chain deleting # references. if parent: LOG.debug("volume is a clone so cleaning references") self._delete_clone_parent_refs(client, parent, parent_snap) else: # If the volume has copy-on-write clones we will not be able to # delete it. Instead we will keep it as a silent volume which # will be deleted when it's snapshot and clones are deleted. new_name = "%s.deleted" % (volume_name) self.RBDProxy().rename(client.ioctx, volume_name, new_name) def create_snapshot(self, snapshot): """Creates an rbd snapshot.""" with RBDVolumeProxy(self, snapshot.volume_name) as volume: snap = utils.convert_str(snapshot.name) volume.create_snap(snap) volume.protect_snap(snap) def delete_snapshot(self, snapshot): """Deletes an rbd snapshot.""" # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are # utf-8 otherwise librbd will barf. volume_name = utils.convert_str(snapshot.volume_name) snap_name = utils.convert_str(snapshot.name) with RBDVolumeProxy(self, volume_name) as volume: try: volume.unprotect_snap(snap_name) except self.rbd.InvalidArgument: LOG.info(_LI("Unable to unprotect snapshot %s."), snap_name) except self.rbd.ImageNotFound: LOG.info(_LI("Snapshot %s does not exist in backend."), snap_name) except self.rbd.ImageBusy: children_list = self._get_children_info(volume, snap_name) if children_list: for (pool, image) in children_list: LOG.info(_LI('Image %(pool)s/%(image)s is dependent ' 'on the snapshot %(snap)s.'), {'pool': pool, 'image': image, 'snap': snap_name}) raise exception.SnapshotIsBusy(snapshot_name=snap_name) volume.remove_snap(snap_name) def retype(self, context, volume, new_type, diff, host): """Retypes a volume, allow Qos and extra_specs change.""" # No need to check encryption, extra_specs and Qos here as: # encryptions have been checked as same. # extra_specs are not used in the driver. # Qos settings are not used in the driver. LOG.debug('RBD retype called for volume %s. No action ' 'required for RBD volumes.', volume.id) return True def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def initialize_connection(self, volume, connector): hosts, ports = self._get_mon_addrs() data = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.configuration.rbd_pool, volume.name), 'hosts': hosts, 'ports': ports, 'auth_enabled': (self.configuration.rbd_user is not None), 'auth_username': self.configuration.rbd_user, 'secret_type': 'ceph', 'secret_uuid': self.configuration.rbd_secret_uuid, 'volume_id': volume.id, } } LOG.debug('connection data: %s', data) return data def terminate_connection(self, volume, connector, **kwargs): pass def _parse_location(self, location): prefix = 'rbd://' if not location.startswith(prefix): reason = _('Not stored in rbd') raise exception.ImageUnacceptable(image_id=location, reason=reason) pieces = [urllib.parse.unquote(loc) for loc in location[len(prefix):].split('/')] if any(map(lambda p: p == '', pieces)): reason = _('Blank components') raise exception.ImageUnacceptable(image_id=location, reason=reason) if len(pieces) != 4: reason = _('Not an rbd snapshot') raise exception.ImageUnacceptable(image_id=location, reason=reason) return pieces def _get_fsid(self): with RADOSClient(self) as client: return client.cluster.get_fsid() def _is_cloneable(self, image_location, image_meta): try: fsid, pool, image, snapshot = self._parse_location(image_location) except exception.ImageUnacceptable as e: LOG.debug('not cloneable: %s.', e) return False if self._get_fsid() != fsid: LOG.debug('%s is in a different ceph cluster.', image_location) return False if image_meta['disk_format'] != 'raw': LOG.debug("rbd image clone requires image format to be " "'raw' but image %(image)s is '%(format)s'", {"image": image_location, "format": image_meta['disk_format']}) return False # check that we can read the image try: with RBDVolumeProxy(self, image, pool=pool, snapshot=snapshot, read_only=True): return True except self.rbd.Error as e: LOG.debug('Unable to open image %(loc)s: %(err)s.', dict(loc=image_location, err=e)) return False def clone_image(self, context, volume, image_location, image_meta, image_service): if image_location: # Note: image_location[0] is glance image direct_url. # image_location[1] contains the list of all locations (including # direct_url) or None if show_multiple_locations is False in # glance configuration. if image_location[1]: url_locations = [location['url'] for location in image_location[1]] else: url_locations = [image_location[0]] # iterate all locations to look for a cloneable one. for url_location in url_locations: if url_location and self._is_cloneable( url_location, image_meta): _prefix, pool, image, snapshot = \ self._parse_location(url_location) self._clone(volume, pool, image, snapshot) self._resize(volume) return {'provider_location': None}, True return ({}, False) def _image_conversion_dir(self): tmpdir = (self.configuration.volume_tmp_dir or CONF.image_conversion_dir or tempfile.gettempdir()) if tmpdir == self.configuration.volume_tmp_dir: LOG.warning(_LW('volume_tmp_dir is now deprecated, please use ' 'image_conversion_dir.')) # ensure temporary directory exists if not os.path.exists(tmpdir): os.makedirs(tmpdir) return tmpdir def copy_image_to_volume(self, context, volume, image_service, image_id): tmp_dir = self._image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: image_utils.fetch_to_raw(context, image_service, image_id, tmp.name, self.configuration.volume_dd_blocksize, size=volume.size) self.delete_volume(volume) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) # keep using the command line import instead of librbd since it # detects zeroes to preserve sparseness in the image args = ['rbd', 'import', '--pool', self.configuration.rbd_pool, '--order', order, tmp.name, volume.name, '--new-format'] args.extend(self._ceph_args()) self._try_execute(*args) self._resize(volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = self._image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): args = ['rbd', 'export', '--pool', self.configuration.rbd_pool, volume.name, tmp_file] args.extend(self._ceph_args()) self._try_execute(*args) image_utils.upload_volume(context, image_service, image_meta, tmp_file) os.unlink(tmp_file) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup.volume_id) with RBDVolumeProxy(self, volume.name, self.configuration.rbd_pool) as rbd_image: rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, self.configuration.rbd_user, self.configuration.rbd_ceph_conf) rbd_fd = RBDImageIOWrapper(rbd_meta) backup_service.backup(backup, rbd_fd) LOG.debug("volume backup complete.") def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" with RBDVolumeProxy(self, volume.name, self.configuration.rbd_pool) as rbd_image: rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, self.configuration.rbd_user, self.configuration.rbd_ceph_conf) rbd_fd = RBDImageIOWrapper(rbd_meta) backup_service.restore(backup, volume.id, rbd_fd) LOG.debug("volume restore complete.") def extend_volume(self, volume, new_size): """Extend an existing volume.""" old_size = volume.size try: size = int(new_size) * units.Gi self._resize(volume, size=size) except Exception: msg = _('Failed to Extend Volume ' '%(volname)s') % {'volname': volume.name} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", {'old_size': old_size, 'new_size': new_size}) def manage_existing(self, volume, existing_ref): """Manages an existing image. Renames the image name to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ # Raise an exception if we didn't find a suitable rbd image. with RADOSClient(self) as client: rbd_name = existing_ref['source-name'] self.RBDProxy().rename(client.ioctx, utils.convert_str(rbd_name), utils.convert_str(volume.name)) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing image for manage_existing. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) rbd_name = utils.convert_str(existing_ref['source-name']) with RADOSClient(self) as client: # Raise an exception if we didn't find a suitable rbd image. try: rbd_image = self.rbd.Image(client.ioctx, rbd_name) image_size = rbd_image.size() except self.rbd.ImageNotFound: kwargs = {'existing_ref': rbd_name, 'reason': 'Specified rbd image does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) finally: rbd_image.close() # RBD image size is returned in bytes. Attempt to parse # size as a float and round up to the next integer. try: convert_size = int(math.ceil(int(image_size))) / units.Gi return convert_size except ValueError: exception_message = (_("Failed to manage existing volume " "%(name)s, because reported size " "%(size)s was not a floating-point" " number.") % {'name': rbd_name, 'size': image_size}) raise exception.VolumeBackendAPIException( data=exception_message) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from RBD for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ name_id = None provider_location = None existing_name = CONF.volume_name_template % new_volume.id wanted_name = CONF.volume_name_template % volume.id with RADOSClient(self) as client: try: self.RBDProxy().rename(client.ioctx, utils.convert_str(existing_name), utils.convert_str(wanted_name)) except self.rbd.ImageNotFound: LOG.error(_LE('Unable to rename the logical volume ' 'for volume %s.'), volume.id) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume._name_id or new_volume.id provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def migrate_volume(self, context, volume, host): return (False, None) cinder-8.0.0/cinder/volume/drivers/dell/0000775000567000056710000000000012701406543021277 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/dell/dell_storagecenter_fc.py0000664000567000056710000001633012701406250026164 0ustar jenkinsjenkins00000000000000# Copyright 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell Storage Center.""" from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, driver.FibreChannelDriver): """Implements commands for Dell EqualLogic SAN ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell.DellStorageCenterFCDriver Version history: 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Driver retype support for switching volume's Storage Profile 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. """ VERSION = '2.5.0' def __init__(self, *args, **kwargs): super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'Dell-FC' @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. """ # We use id to name the volume name as it is a # known unique name. volume_name = volume.get('id') LOG.debug('Initialize connection: %s', volume_name) with self._client.open_connection() as api: try: # Find our server. wwpns = connector.get('wwpns') for wwn in wwpns: scserver = api.find_server(wwn) if scserver is not None: break # No? Create it. if scserver is None: scserver = api.create_server_multiple_hbas(wwpns) # Find the volume on the storage center. scvolume = api.find_volume(volume_name) if scserver is not None and scvolume is not None: mapping = api.map_volume(scvolume, scserver) if mapping is not None: # Since we just mapped our volume we had best update # our sc volume object. scvolume = api.find_volume(volume_name) lun, targets, init_targ_map = api.find_wwns(scvolume, scserver) if lun is not None and len(targets) > 0: data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': lun, 'target_discovered': True, 'target_wwn': targets, 'initiator_target_map': init_targ_map, 'discard': True}} LOG.debug('Return FC data: %s', data) return data LOG.error(_LE('Lun mapping returned null!')) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to initialize connection.')) # We get here because our mapping is none so blow up. raise exception.VolumeBackendAPIException(_('Unable to map volume.')) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, force=False, **kwargs): # Get our volume name volume_name = volume.get('id') LOG.debug('Terminate connection: %s', volume_name) with self._client.open_connection() as api: try: wwpns = connector.get('wwpns') for wwn in wwpns: scserver = api.find_server(wwn) if scserver is not None: break # Find the volume on the storage center. scvolume = api.find_volume(volume_name) # Get our target map so we can return it to free up a zone. lun, targets, init_targ_map = api.find_wwns(scvolume, scserver) # If we have a server and a volume lets unmap them. if (scserver is not None and scvolume is not None and api.unmap_volume(scvolume, scserver) is True): LOG.debug('Connection terminated') else: raise exception.VolumeBackendAPIException( _('Terminate connection failed')) # basic return info... info = {'driver_volume_type': 'fibre_channel', 'data': {}} # if not then we return the target map so that # the zone can be freed up. if api.get_volume_count(scserver) == 0: info['data'] = {'target_wwn': targets, 'initiator_target_map': init_targ_map} return info except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to terminate connection')) raise exception.VolumeBackendAPIException( _('Terminate connection unable to connect to backend.')) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() # Update our protocol to the correct one. self._stats['storage_protocol'] = 'FC' return self._stats cinder-8.0.0/cinder/volume/drivers/dell/__init__.py0000664000567000056710000000000012701406250023371 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py0000664000567000056710000001747112701406250026715 0ustar jenkinsjenkins00000000000000# Copyright 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell Storage Center.""" from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common LOG = logging.getLogger(__name__) class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, driver.ISCSIDriver): """Implements commands for Dell StorageCenter ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell.DellStorageCenterISCSIDriver Version history: 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Driver retype support for switching volume's Storage Profile. Added API 2.2 support. 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. """ VERSION = '2.5.0' def __init__(self, *args, **kwargs): super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs) self.backend_name = ( self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI') def initialize_connection(self, volume, connector): # Initialize_connection will find or create a server identified by the # connector on the Dell backend. It will then map the volume to it # and return the properties as follows.. # {'driver_volume_type': 'iscsi', # data = {'target_discovered': False, # 'target_iqn': preferred iqn, # 'target_iqns': all iqns, # 'target_portal': preferred portal, # 'target_portals': all portals, # 'target_lun': preferred lun, # 'target_luns': all luns, # } # We use id to name the volume name as it is a # known unique name. volume_name = volume.get('id') initiator_name = connector.get('initiator') multipath = connector.get('multipath', False) LOG.info(_LI('initialize_ connection: %(vol)s:%(initiator)s'), {'vol': volume_name, 'initiator': initiator_name}) with self._client.open_connection() as api: try: # Find our server. server = api.find_server(initiator_name) # No? Create it. if server is None: server = api.create_server(initiator_name) # Find the volume on the storage center. scvolume = api.find_volume(volume_name) # if we have a server and a volume lets bring them together. if server is not None and scvolume is not None: mapping = api.map_volume(scvolume, server) if mapping is not None: # Since we just mapped our volume we had best update # our sc volume object. scvolume = api.find_volume(volume_name) # Our return. iscsiprops = {} ip = None port = None if not multipath: # We want to make sure we point to the specified # ip address for our target_portal return. This # isn't an issue with multipath since it should # try all the alternate portal. ip = self.configuration.iscsi_ip_address port = self.configuration.iscsi_port # Three cases that should all be satisfied with the # same return of Target_Portal and Target_Portals. # 1. Nova is calling us so we need to return the # Target_Portal stuff. It should ignore the # Target_Portals stuff. # 2. OS brick is calling us in multipath mode so we # want to return Target_Portals. It will ignore # the Target_Portal stuff. # 3. OS brick is calling us in single path mode so # we want to return Target_Portal and # Target_Portals as alternates. iscsiprops = (api.find_iscsi_properties(scvolume, ip, port)) # Return our iscsi properties. iscsiprops['discard'] = True return {'driver_volume_type': 'iscsi', 'data': iscsiprops} # Re-raise any backend exception. except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to initialize connection')) # If there is a data structure issue then detail the exception # and bail with a Backend Exception. except Exception as error: LOG.error(error) raise exception.VolumeBackendAPIException(error) # We get here because our mapping is none or we have no valid iqn to # return so blow up. raise exception.VolumeBackendAPIException( _('Unable to map volume')) def terminate_connection(self, volume, connector, force=False, **kwargs): # Grab some initial info. initiator_name = connector.get('initiator') volume_name = volume.get('id') LOG.debug('Terminate connection: %(vol)s:%(initiator)s', {'vol': volume_name, 'initiator': initiator_name}) with self._client.open_connection() as api: try: scserver = api.find_server(initiator_name) # Find the volume on the storage center. scvolume = api.find_volume(volume_name) # If we have a server and a volume lets pull them apart. if (scserver is not None and scvolume is not None and api.unmap_volume(scvolume, scserver) is True): LOG.debug('Connection terminated') return except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to terminate connection ' '%(initiator)s %(vol)s'), {'initiator': initiator_name, 'vol': volume_name}) raise exception.VolumeBackendAPIException( _('Terminate connection failed')) cinder-8.0.0/cinder/volume/drivers/dell/dell_storagecenter_api.py0000664000567000056710000033370712701406257026366 0ustar jenkinsjenkins00000000000000# Copyright 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Interface for interacting with the Dell Storage Center array.""" import json import os.path from oslo_log import log as logging import requests from simplejson import scanner import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils LOG = logging.getLogger(__name__) class PayloadFilter(object): """PayloadFilter Simple class for creating filters for interacting with the Dell Storage API 15.3 and later. """ def __init__(self, filtertype='AND'): self.payload = {} self.payload['filter'] = {'filterType': filtertype, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filter']['filters'].append(apifilter) class LegacyPayloadFilter(object): """LegacyPayloadFilter Simple class for creating filters for interacting with the Dell Storage API 15.1 and 15.2. """ def __init__(self, filter_type='AND'): self.payload = {'filterType': filter_type, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filters'].append(apifilter) class HttpClient(object): """HttpClient Helper for making the REST calls. """ def __init__(self, host, port, user, password, verify, apiversion): """HttpClient handles the REST requests. :param host: IP address of the Dell Data Collector. :param port: Port the Data Collector is listening on. :param user: User account to login with. :param password: Password. :param verify: Boolean indicating whether certificate verification should be turned on or not. :param apiversion: Dell API version. """ self.baseUrl = 'https://%s:%s/api/rest/' % (host, port) self.session = requests.Session() self.session.auth = (user, password) self.header = {} self.header['Content-Type'] = 'application/json; charset=utf-8' self.header['Accept'] = 'application/json' self.header['x-dell-api-version'] = apiversion self.verify = verify # Verify is a configurable option. So if this is false do not # spam the c-vol log. if not verify: requests.packages.urllib3.disable_warnings() def __enter__(self): return self def __exit__(self, type, value, traceback): self.session.close() def __formatUrl(self, url): return '%s%s' % (self.baseUrl, url if url[0] != '/' else url[1:]) @utils.retry(exceptions=(requests.ConnectionError,)) def get(self, url): LOG.debug('get: %(url)s', {'url': url}) return self.session.get( self.__formatUrl(url), headers=self.header, verify=self.verify) @utils.retry(exceptions=(requests.ConnectionError,)) def post(self, url, payload): LOG.debug('post: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self.session.post( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self.header, verify=self.verify) @utils.retry(exceptions=(requests.ConnectionError,)) def put(self, url, payload): LOG.debug('put: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self.session.put( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self.header, verify=self.verify) @utils.retry(exceptions=(requests.ConnectionError,)) def delete(self, url): LOG.debug('delete: %(url)s', {'url': url}) return self.session.delete( self.__formatUrl(url), headers=self.header, verify=self.verify) class StorageCenterApiHelper(object): """StorageCenterApiHelper Helper class for API access. Handles opening and closing the connection to the Dell REST API. """ def __init__(self, config, active_backend_id): self.config = config # Now that active_backend_id is set on failover. # Use that if set. Mark the backend as failed over. self.active_backend_id = active_backend_id self.ssn = self.config.dell_sc_ssn self.apiversion = '2.0' def open_connection(self): """Creates the StorageCenterApi object. :return: StorageCenterApi object. :raises: VolumeBackendAPIException """ connection = None LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'), {'ssn': self.ssn, 'ip': self.config.san_ip}) if self.ssn: """Open connection to REST API.""" connection = StorageCenterApi(self.config.san_ip, self.config.dell_sc_api_port, self.config.san_login, self.config.san_password, self.config.dell_sc_verify_cert, self.apiversion) # This instance is for a single backend. That backend has a # few items of information we should save rather than passing them # about. connection.vfname = self.config.dell_sc_volume_folder connection.sfname = self.config.dell_sc_server_folder # Set appropriate ssn and failover state. if self.active_backend_id: # active_backend_id is a string. Convert to int. connection.ssn = int(self.active_backend_id) connection.failed_over = True else: connection.ssn = self.ssn connection.failed_over = False # Open connection. connection.open_connection() # Save our api version for next time. if self.apiversion != connection.apiversion: LOG.info(_LI('open_connection: Updating API version to %s'), connection.apiversion) self.apiversion = connection.apiversion else: raise exception.VolumeBackendAPIException( data=_('Configuration error: dell_sc_ssn not set.')) return connection class StorageCenterApi(object): """StorageCenterApi Handles calls to Dell SC and EM via the REST API interface. Version history: 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Added API 2.2 support. 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. """ APIDRIVERVERSION = '2.5.0' def __init__(self, host, port, user, password, verify, apiversion): """This creates a connection to Dell SC or EM. :param host: IP address of the REST interface.. :param port: Port the REST interface is listening on. :param user: User account to login with. :param password: Password. :param verify: Boolean indicating whether certificate verification should be turned on or not. :param apiversion: Version used on login. """ self.notes = 'Created by Dell Cinder Driver' self.repl_prefix = 'Cinder repl of ' self.ssn = None self.failed_over = False self.vfname = 'openstack' self.sfname = 'openstack' self.legacypayloadfilters = False self.consisgroups = True self.apiversion = apiversion # Nothing other than Replication should care if we are direct connect # or not. self.is_direct_connect = False self.client = HttpClient(host, port, user, password, verify, apiversion) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close_connection() @staticmethod def _check_result(rest_response): """Checks and logs API responses. :param rest_response: The result from a REST API call. :returns: ``True`` if success, ``False`` otherwise. """ if 200 <= rest_response.status_code < 300: # API call was a normal success return True LOG.debug('REST call result:\n' '\tUrl: %(url)s\n' '\tCode: %(code)d\n' '\tReason: %(reason)s\n' '\tText: %(text)s', {'url': rest_response.url, 'code': rest_response.status_code, 'reason': rest_response.reason, 'text': rest_response.text}) return False @staticmethod def _path_to_array(path): """Breaks a path into a reversed string array. :param path: Path to a folder on the Storage Center. :return: A reversed array of each path element. """ array = [] while True: (path, tail) = os.path.split(path) if tail == '': array.reverse() return array array.append(tail) @staticmethod def _lower_first(s): return s[:1].lower() + s[1:] if s else '' def _lower_key(self, in_dict): if type(in_dict) is dict: out_dict = {} for key, item in in_dict.items(): out_dict[self._lower_first(key)] = self._lower_key(item) return out_dict elif type(in_dict) is list: return [self._lower_key(obj) for obj in in_dict] else: return in_dict def _first_result(self, blob): """Get the first result from the JSON return value. :param blob: Full return from a REST call. :return: The JSON encoded dict or the first item in a JSON encoded list. """ return self._get_result(blob, None, None) def _get_result(self, blob, attribute, value): """Find the result specified by attribute and value. If the JSON blob is a list then it will be searched for the attribute and value combination. If attribute and value are not specified then the the first item is returned. If the JSON blob is a dict then it will be returned so long as the dict matches the attribute and value combination or attribute is None. :param blob: The REST call's JSON response. Can be a list or dict. :param attribute: The attribute we are looking for. If it is None the first item in the list, or the dict, is returned. :param value: The attribute value we are looking for. If the attribute is None this value is ignored. :returns: The JSON content in blob, the dict specified by matching the attribute and value or None. """ rsp = None content = self._get_json(blob) if content is not None: # We can get a list or a dict or nothing if isinstance(content, list): for r in content: if attribute is None or r.get(attribute) == value: rsp = r break elif isinstance(content, dict): if attribute is None or content.get(attribute) == value: rsp = content elif attribute is None: rsp = content if rsp is None: LOG.debug('Unable to find result where %(attr)s is %(val)s', {'attr': attribute, 'val': value}) LOG.debug('Blob was %(blob)s', {'blob': blob.text}) return rsp def _get_json(self, blob): """Returns a dict from the JSON of a REST response. :param blob: The response from a REST call. :returns: JSON or None on error. """ try: return self._lower_key(blob.json()) except AttributeError: LOG.error(_LE('Error invalid json: %s'), blob) except TypeError as ex: LOG.error(_LE('Error TypeError. %s'), ex) except scanner.JSONDecodeError as ex: LOG.error(_LE('Error JSONDecodeError. %s'), ex) # We are here so this went poorly. Log our blob. LOG.debug('_get_json blob %s', blob) return None def _get_id(self, blob): """Returns the instanceId from a Dell REST object. :param blob: A Dell SC REST call's response. :returns: The instanceId from the Dell SC object or None on error. """ try: if isinstance(blob, dict): return blob.get('instanceId') except AttributeError: LOG.error(_LE('Invalid API object: %s'), blob) except TypeError as ex: LOG.error(_LE('Error TypeError. %s'), ex) except scanner.JSONDecodeError as ex: LOG.error(_LE('Error JSONDecodeError. %s'), ex) LOG.debug('_get_json blob %s', blob) return None def _get_payload_filter(self, filterType='AND'): # 2.1 or earlier and we are talking LegacyPayloadFilters. if self.legacypayloadfilters: return LegacyPayloadFilter(filterType) return PayloadFilter(filterType) def _check_version_fail(self, payload, response): try: # Is it even our error? if response.text.startswith('Invalid API version specified, ' 'the version must be in the range ['): # We're looking for something very specific. The except # will catch any errors. # Update our version and update our header. self.apiversion = response.text.split('[')[1].split(',')[0] self.client.header['x-dell-api-version'] = self.apiversion LOG.debug('API version updated to %s', self.apiversion) # Give login another go. r = self.client.post('ApiConnection/Login', payload) return r except Exception: # We don't care what failed. The clues are already in the logs. # Just log a parsing error and move on. LOG.error(_LE('_check_version_fail: Parsing error.')) # Just eat this if it isn't a version error. return response def open_connection(self): """Authenticate with Dell REST interface. :raises: VolumeBackendAPIException. """ # Login payload = {} payload['Application'] = 'Cinder REST Driver' payload['ApplicationVersion'] = self.APIDRIVERVERSION LOG.debug('open_connection %s', self.client.header['x-dell-api-version']) r = self.client.post('ApiConnection/Login', payload) if not self._check_result(r): # SC requires a specific version. See if we can get it. r = self._check_version_fail(payload, r) # Either we tried to login and have a new result or we are # just checking the same result. Either way raise on fail. if not self._check_result(r): raise exception.VolumeBackendAPIException( data=_('Failed to connect to Dell REST API')) # We should be logged in. Try to grab the api version out of the # response. try: apidict = self._get_json(r) version = apidict['apiVersion'] self.is_direct_connect = apidict['provider'] == 'StorageCenter' splitver = version.split('.') if splitver[0] == '2': if splitver[1] == '0': self.consisgroups = False self.legacypayloadfilters = True elif splitver[1] == '1': self.legacypayloadfilters = True return except Exception: # Good return but not the login response we were expecting. # Log it and error out. LOG.error(_LE('Unrecognized Login Response: %s'), r) def close_connection(self): """Logout of Dell REST API.""" r = self.client.post('ApiConnection/Logout', {}) # 204 expected. self._check_result(r) self.client = None def find_sc(self, ssn=-1): """Check that the SC is there and being managed by EM. :returns: The SC SSN. :raises: VolumeBackendAPIException """ # We might be looking for another ssn. If not then # look for our default. if ssn == -1: ssn = self.ssn r = self.client.get('StorageCenter/StorageCenter') result = self._get_result(r, 'scSerialNumber', ssn) if result is None: LOG.error(_LE('Failed to find %(s)s. Result %(r)s'), {'s': ssn, 'r': r}) raise exception.VolumeBackendAPIException( data=_('Failed to find Storage Center')) return self._get_id(result) # Folder functions def _create_folder(self, url, parent, folder): """Creates folder under parent. This can create both to server and volume folders. The REST url sent in defines the folder type being created on the Dell Storage Center backend. :param url: This is the Dell SC rest url for creating the specific (server or volume) folder type. :param parent: The instance ID of this folder's parent folder. :param folder: The folder name to be created. This is one level deep. :returns: The REST folder object. """ scfolder = None payload = {} payload['Name'] = folder payload['StorageCenter'] = self.ssn if parent != '': payload['Parent'] = parent payload['Notes'] = self.notes r = self.client.post(url, payload) # 201 expected. if self._check_result(r): scfolder = self._first_result(r) return scfolder def _create_folder_path(self, url, foldername): """Creates a folder path from a fully qualified name. The REST url sent in defines the folder type being created on the Dell Storage Center backend. Thus this is generic to server and volume folders. :param url: This is the Dell SC REST url for creating the specific (server or volume) folder type. :param foldername: The full folder name with path. :returns: The REST folder object. """ path = self._path_to_array(foldername) folderpath = '' instanceId = '' # Technically the first folder is the root so that is already created. found = True scfolder = None for folder in path: folderpath = folderpath + folder # If the last was found see if this part of the path exists too if found: listurl = url + '/GetList' scfolder = self._find_folder(listurl, folderpath) if scfolder is None: found = False # We didn't find it so create it if found is False: scfolder = self._create_folder(url, instanceId, folder) # If we haven't found a folder or created it then leave if scfolder is None: LOG.error(_LE('Unable to create folder path %s'), folderpath) break # Next part of the path will need this instanceId = self._get_id(scfolder) folderpath = folderpath + '/' return scfolder def _find_folder(self, url, foldername): """Find a folder on the SC using the specified url. Most of the time the folder will already have been created so we look for the end folder and check that the rest of the path is right. The REST url sent in defines the folder type being created on the Dell Storage Center backend. Thus this is generic to server and volume folders. :param url: The portion of the url after the base url (see http class) to use for this operation. (Can be for Server or Volume folders.) :param foldername: Full path to the folder we are looking for. :returns: Dell folder object. """ pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) basename = os.path.basename(foldername) pf.append('Name', basename) # If we have any kind of path we throw it into the filters. folderpath = os.path.dirname(foldername) if folderpath != '': # SC convention is to end with a '/' so make sure we do. folderpath += '/' pf.append('folderPath', folderpath) folder = None r = self.client.post(url, pf.payload) # 200 expected. if self._check_result(r): folder = self._get_result(r, 'folderPath', folderpath) return folder def _find_volume_folder(self, create=False): """Looks for the volume folder where backend volumes will be created. Volume folder is specified in the cindef.conf. See __init. :param create: If True will create the folder if not found. :returns: Folder object. """ folder = self._find_folder('StorageCenter/ScVolumeFolder/GetList', self.vfname) # Doesn't exist? make it if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScVolumeFolder', self.vfname) return folder def _init_volume(self, scvolume): """Initializes the volume. Maps the volume to a random server and immediately unmaps it. This initializes the volume. Don't wig out if this fails. :param scvolume: Dell Volume object. """ pf = self._get_payload_filter() pf.append('scSerialNumber', scvolume.get('scSerialNumber'), 'Equals') r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) # 200 expected. if self._check_result(r): scservers = self._get_json(r) # Sort through the servers looking for one with connectivity. for scserver in scservers: # TODO(tom_swanson): Add check for server type. # This needs to be either a physical or virtual server. # Outside of tempest tests this should not matter as we only # "init" a volume to allow snapshotting of an empty volume. if scserver.get('status', '').lower() != 'down': # Map to actually create the volume self.map_volume(scvolume, scserver) # We have changed the volume so grab a new copy of it. scvolume = self.find_volume(scvolume.get('name')) self.unmap_volume(scvolume, scserver) return # We didn't map/unmap the volume. So no initialization done. # Warn the user before we leave. Note that this is almost certainly # a tempest test failure we are trying to catch here. A snapshot # has likely been attempted before the volume has been instantiated # on the Storage Center. In the real world no one will snapshot # a volume without first putting some data in that volume. LOG.warning(_LW('Volume initialization failure. (%s)'), self._get_id(scvolume)) def _find_storage_profile(self, storage_profile): """Looks for a Storage Profile on the array. Storage Profiles determine tiering settings. If not specified a volume will use the Default storage profile. :param storage_profile: The Storage Profile name to find with any spaces stripped. :returns: The Storage Profile object or None. """ if not storage_profile: return None # Since we are stripping out spaces for convenience we are not # able to just filter on name. Need to get all Storage Profiles # and look through for the one we want. Never many profiles, so # this doesn't cause as much overhead as it might seem. storage_profile = storage_profile.replace(' ', '').lower() pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn, 'Equals') r = self.client.post( 'StorageCenter/ScStorageProfile/GetList', pf.payload) # 200 expected. if self._check_result(r): profiles = self._get_json(r) for profile in profiles: # Look for the stripped, case insensitive match name = profile.get('name', '').replace(' ', '').lower() if name == storage_profile: return profile return None def _find_user_replay_profiles(self): """Find user default profiles. Note that this only deals with standard and not cg profiles. :return: List of replay profiles. """ user_prefs = self._get_user_preferences() if user_prefs: profileids = [profile['instanceId'] for profile in user_prefs['replayProfileList']] return profileids return [] def _find_daily_replay_profile(self): """Find the system replay profile named "Daily". :return: Profile instanceId or None. """ pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('instanceName', 'Daily') r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) if self._check_result(r): profiles = self._get_json(r) if profiles: return profiles[0]['instanceId'] return None def _find_replay_profiles(self, replay_profile_string): """Find our replay profiles. Note that if called on volume creation the removeids list can be safely ignored. :param replay_profile_string: Comma separated list of profile names. :return: List replication profiles to use, List to remove. :raises VolumeBackendAPIException: If we can't find our profiles. """ addids = [] removeids = [] replay_profiles = [] if replay_profile_string: replay_profiles = replay_profile_string.split(',') # Most of the time they will not specify this so don't call anything. if replay_profiles: pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('type', 'Standard') r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) if self._check_result(r): profiles = self._get_json(r) for profile in profiles: if replay_profiles.count(profile['name']) > 0: addids.append(profile['instanceId']) else: # in the volume. removeids.append(profile['instanceId']) # Check that we've found what we are looking for if anything if len(addids) != len(replay_profiles): msg = (_('Unable to locate specified replay profiles %s ') % replay_profile_string) raise exception.VolumeBackendAPIException(data=msg) return addids, removeids def update_replay_profiles(self, scvolume, replay_profile_string): """Update our replay profiles. If the replay_profile_string is empty we look for the user's default profiles. If those aren't found we look for the Daily profile. Note that this is in addition to the CG profiles which we do not touch. :param scvolume: SC Volume object. :param replay_profile_string: Comma separated string of replay profile names. :return: True/False. """ # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # We either found what we were looking for. # If we are clearing out our ids then find a default. if not addids: # if no replay profiles specified we must be clearing out. addids = self._find_user_replay_profiles() if not addids: addids = [self._find_daily_replay_profile()] # Do any removals first. for id in removeids: # We might have added to the addids list after creating removeids. # User preferences or the daily profile could have been added. # If our id is in both lists just skip it and remove it from # The add list. if addids.count(id): addids.remove(id) elif not self._update_volume_profiles( scvolume, addid=None, removeid=id): return False # Add anything new. for id in addids: if not self._update_volume_profiles( scvolume, addid=id, removeid=None): return False return True def create_volume(self, name, size, storage_profile=None, replay_profile_string=None): """Creates a new volume on the Storage Center. It will create it in a folder called self.vfname. If self.vfname does not exist it will create it. If it cannot create it the volume will be created in the root. :param name: Name of the volume to be created on the Dell SC backend. This is the cinder volume ID. :param size: The size of the volume to be created in GB. :param storage_profile: Optional storage profile to set for the volume. :param replay_profile_string: Optional replay profile to set for the volume. :returns: Dell Volume object or None. """ LOG.debug('Create Volume %(name)s %(ssn)s %(folder)s %(profile)s', {'name': name, 'ssn': self.ssn, 'folder': self.vfname, 'profile': storage_profile, 'replay': replay_profile_string }) # Find our folder folder = self._find_volume_folder(True) # If we actually have a place to put our volume create it if folder is None: LOG.warning(_LW('Unable to create folder %s'), self.vfname) # See if we need a storage profile profile = self._find_storage_profile(storage_profile) if storage_profile and profile is None: msg = _('Storage Profile %s not found.') % storage_profile raise exception.VolumeBackendAPIException(data=msg) # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # Init our return. scvolume = None # Create the volume payload = {} payload['Name'] = name payload['Notes'] = self.notes payload['Size'] = '%d GB' % size payload['StorageCenter'] = self.ssn if folder is not None: payload['VolumeFolder'] = self._get_id(folder) if profile: payload['StorageProfile'] = self._get_id(profile) # This is a new volume so there is nothing to remove. if addids: payload['ReplayProfileList'] = addids r = self.client.post('StorageCenter/ScVolume', payload) # 201 expected. if self._check_result(r): scvolume = self._get_json(r) if scvolume: LOG.info(_LI('Created volume %(instanceId)s: %(name)s'), {'instanceId': scvolume['instanceId'], 'name': scvolume['name']}) else: LOG.error(_LE('ScVolume returned success with empty payload.' ' Attempting to locate volume')) # In theory it is there since success was returned. # Try one last time to find it before returning. scvolume = self.find_volume(name) else: LOG.error(_LE('Unable to create volume on SC: %s'), name) return scvolume def _get_volume_list(self, name, deviceid, filterbyvfname=True): """Return the specified list of volumes. :param name: Volume name. :param deviceid: Volume device ID on the SC backend. :param filterbyvfname: If set to true then this filters by the preset folder name. :return: Returns the scvolume list or None. """ result = None # We need a name or a device ID to find a volume. if name or deviceid: pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) if name is not None: pf.append('Name', name) if deviceid is not None: pf.append('DeviceId', deviceid) # set folderPath if filterbyvfname: vfname = (self.vfname if self.vfname.endswith('/') else self.vfname + '/') pf.append('volumeFolderPath', vfname) r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload) # 200 expected. if self._check_result(r): result = self._get_json(r) # We return None if there was an error and a list if the command # succeeded. It might be an empty list. return result def find_volume(self, name): """Search self.ssn for volume of name. This searches the folder self.vfname (specified in the cinder.conf) for the volume first. If not found it searches the entire array for the volume. Remember that in the case of a failover we have already been switched to our new SSN. So the initial searches are valid. :param name: Name of the volume to search for. This is the cinder volume ID. :returns: Dell Volume object or None if not found. :raises VolumeBackendAPIException: If multiple copies are found. """ LOG.debug('Searching %(sn)s for %(name)s', {'sn': self.ssn, 'name': name}) # Cannot find a volume without the name if name is None: return None # Look for our volume in our folder. vollist = self._get_volume_list(name, None, True) # If an empty list was returned they probably moved the volumes or # changed the folder name so try again without the folder. if not vollist: LOG.debug('Cannot find volume %(n)s in %(v)s. Searching SC.', {'n': name, 'v': self.vfname}) vollist = self._get_volume_list(name, None, False) # If we found nothing and are failed over then we might not have # completed our replication failover. Look for the replication # volume. We are already pointing at that SC. if not vollist and self.failed_over: LOG.debug('Unable to locate volume. Checking for failover.') # Get our replay name. fn = self._repl_name(name) vollist = self._get_volume_list(fn, None, False) # Same deal as the rest of these. If 0 not found. If greater than # one we have multiple copies and cannot return a valid result. if len(vollist) == 1: LOG.info(_LI('Found failover volume. Competing failover.')) # Import our found volume. This completes our failover. scvolume = self._import_one(vollist[0], name) if scvolume: LOG.info(_LI('Imported %(fail)s to %(guid)s.'), {'fail': fn, 'guid': name}) return scvolume msg = _('Unable to complete failover of %s.') % fn raise exception.VolumeBackendAPIException(data=msg) # If multiple volumes of the same name are found we need to error. if len(vollist) > 1: # blow up msg = _('Multiple copies of volume %s found.') % name raise exception.VolumeBackendAPIException(data=msg) # We made it and should have a valid volume. return None if not vollist else vollist[0] def delete_volume(self, name): """Deletes the volume from the SC backend array. If the volume cannot be found we claim success. :param name: Name of the volume to search for. This is the cinder volume ID. :returns: Boolean indicating success or failure. """ vol = self.find_volume(name) if vol is not None: r = self.client.delete('StorageCenter/ScVolume/%s' % self._get_id(vol)) # 200 expected if not self._check_result(r): msg = _('Error deleting volume %(ssn)s: %(volume)s') % { 'ssn': self.ssn, 'volume': name} raise exception.VolumeBackendAPIException(data=msg) # json return should be true or false return self._get_json(r) # If we can't find the volume then it is effectively gone. LOG.warning(_LW('delete_volume: unable to find volume %s'), name) return True def _find_server_folder(self, create=False): """Looks for the server folder on the Dell Storage Center. This is the folder where a server objects for mapping volumes will be created. Server folder is specified in cinder.conf. See __init. :param create: If True will create the folder if not found. :return: Folder object. """ folder = self._find_folder('StorageCenter/ScServerFolder/GetList', self.sfname) if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScServerFolder', self.sfname) return folder def _add_hba(self, scserver, wwnoriscsiname, isfc=False): """This adds a server HBA to the Dell server object. The HBA is taken from the connector provided in initialize_connection. The Dell server object is largely a container object for the list of HBAs associated with a single server (or vm or cluster) for the purposes of mapping volumes. :param scserver: Dell server object. :param wwnoriscsiname: The WWN or IQN to add to this server. :param isfc: Boolean indicating whether this is an FC HBA or not. :returns: Boolean indicating success or failure. """ payload = {} if isfc is True: payload['HbaPortType'] = 'FibreChannel' else: payload['HbaPortType'] = 'Iscsi' payload['WwnOrIscsiName'] = wwnoriscsiname payload['AllowManual'] = True r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba' % self._get_id(scserver), payload) # 200 expected. if not self._check_result(r): LOG.error(_LE('AddHba error: %(wwn)s to %(srvname)s'), {'wwn': wwnoriscsiname, 'srvname': scserver['name']}) return False return True def _find_serveros(self, osname='Red Hat Linux 6.x'): """Returns the serveros instance id of the specified osname. Required to create a Dell server object. We do not know that we are Red Hat Linux 6.x but that works best for Red Hat and Ubuntu. So we use that. :param osname: The name of the OS to look for. :returns: InstanceId of the ScServerOperatingSystem object. """ pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) r = self.client.post('StorageCenter/ScServerOperatingSystem/GetList', pf.payload) # 200 expected. if self._check_result(r): oslist = self._get_json(r) for srvos in oslist: name = srvos.get('name', 'nope') if name.lower() == osname.lower(): # Found it return the id return self._get_id(srvos) LOG.warning(_LW('Unable to find appropriate OS %s'), osname) return None def create_server_multiple_hbas(self, wwns): """Creates a server with multiple WWNS associated with it. Same as create_server except it can take a list of HBAs. :param wwns: A list of FC WWNs or iSCSI IQNs associated with this server. :returns: Dell server object. """ scserver = None # Our instance names for wwn in wwns: if scserver is None: # Use the fist wwn to create the server. scserver = self.create_server(wwn, True) else: # Add the wwn to our server self._add_hba(scserver, wwn, True) return scserver def create_server(self, wwnoriscsiname, isfc=False): """Creates a Dell server object on the the Storage Center. Adds the first HBA identified by wwnoriscsiname to it. :param wwnoriscsiname: A list of FC WWNs or iSCSI IQNs associated with this Dell server object. :param isfc: Boolean indicating whether this is an FC HBA or not. :returns: Dell server object. """ LOG.info(_LI('Creating server %s'), wwnoriscsiname) scserver = None payload = {} payload['Name'] = 'Server_' + wwnoriscsiname payload['StorageCenter'] = self.ssn payload['Notes'] = self.notes # We pick Red Hat Linux 6.x because it supports multipath and # will attach luns to paths as they are found. scserveros = self._find_serveros('Red Hat Linux 6.x') if scserveros is not None: payload['OperatingSystem'] = scserveros # Find our folder or make it folder = self._find_server_folder(True) # At this point it doesn't matter if the folder was created or not. # We just attempt to create the server. Let it be in the root if # the folder creation fails. if folder is not None: payload['ServerFolder'] = self._get_id(folder) # create our server r = self.client.post('StorageCenter/ScPhysicalServer', payload) # 201 expected. if self._check_result(r): # Server was created scserver = self._first_result(r) LOG.info(_LI('SC server created %s'), scserver) # Add hba to our server if scserver is not None: if not self._add_hba(scserver, wwnoriscsiname, isfc): LOG.error(_LE('Error adding HBA to server')) # Can't have a server without an HBA self._delete_server(scserver) scserver = None # Success or failure is determined by the caller return scserver def find_server(self, instance_name): """Hunts for a server on the Dell backend by instance_name. The instance_name is the same as the server's HBA. This is the IQN or WWN listed in the connector. If found, the server the HBA is attached to, if any, is returned. :param instance_name: instance_name is a FC WWN or iSCSI IQN from the connector. In cinder a server is identified by its HBA. :returns: Dell server object or None. """ scserver = None # We search for our server by first finding our HBA hba = self._find_serverhba(instance_name) # Once created hbas stay in the system. So it isn't enough # that we found one it actually has to be attached to a # server. if hba is not None and hba.get('server') is not None: pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('instanceId', self._get_id(hba['server'])) r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) # 200 expected. if self._check_result(r): scserver = self._first_result(r) if scserver is None: LOG.debug('Server (%s) not found.', instance_name) return scserver def _find_serverhba(self, instance_name): """Hunts for a server HBA on the Dell backend by instance_name. Instance_name is the same as the IQN or WWN specified in the connector. :param instance_name: Instance_name is a FC WWN or iSCSI IQN from the connector. :returns: Dell server HBA object. """ scserverhba = None # We search for our server by first finding our HBA pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('instanceName', instance_name) r = self.client.post('StorageCenter/ScServerHba/GetList', pf.payload) # 200 expected. if self._check_result(r): scserverhba = self._first_result(r) return scserverhba def _find_domains(self, cportid): """Find the list of Dell domain objects associated with the cportid. :param cportid: The Instance ID of the Dell controller port. :returns: List of fault domains associated with this controller port. """ r = self.client.get('StorageCenter/ScControllerPort/%s/FaultDomainList' % cportid) # 200 expected. if self._check_result(r): domains = self._get_json(r) return domains LOG.error(_LE('Error getting FaultDomainList for %s'), cportid) return None def _find_fc_initiators(self, scserver): """Returns a list of FC WWNs associated with the specified Dell server. :param scserver: The Dell backend server object. :returns: A list of FC WWNs associated with this server. """ initiators = [] r = self.client.get('StorageCenter/ScServer/%s/HbaList' % self._get_id(scserver)) # 200 expected. if self._check_result(r): hbas = self._get_json(r) for hba in hbas: wwn = hba.get('instanceName') if (hba.get('portType') == 'FibreChannel' and wwn is not None): initiators.append(wwn) else: LOG.error(_LE('Unable to find FC initiators')) LOG.debug('fc_initiators: %s', initiators) return initiators def get_volume_count(self, scserver): """Returns the number of volumes attached to specified Dell server. :param scserver: The Dell backend server object. :returns: Mapping count. -1 if there was an error. """ r = self.client.get('StorageCenter/ScServer/%s/MappingList' % self._get_id(scserver)) # 200 expected. if self._check_result(r): mappings = self._get_json(r) return len(mappings) # Panic mildly but do not return 0. return -1 def _find_mappings(self, scvolume): """Find the Dell volume object mappings. :param scvolume: Dell volume object. :returns: A list of Dell mappings objects. """ mappings = [] if scvolume.get('active', False): r = self.client.get('StorageCenter/ScVolume/%s/MappingList' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): mappings = self._get_json(r) else: LOG.error(_LE('_find_mappings: volume is not active')) LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'), {'name': scvolume.get('name'), 'mappings': mappings}) return mappings def _find_mapping_profiles(self, scvolume): """Find the Dell volume object mapping profiles. :param scvolume: Dell volume object. :returns: A list of Dell mapping profile objects. """ mapping_profiles = [] r = self.client.get('StorageCenter/ScVolume/%s/MappingProfileList' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): mapping_profiles = self._get_json(r) else: LOG.error(_LE('Unable to find mapping profiles: %s'), scvolume.get('name')) LOG.debug(mapping_profiles) return mapping_profiles def _find_controller_port(self, cportid): """Finds the SC controller port object for the specified cportid. :param cportid: The instanceID of the Dell backend controller port. :returns: The controller port object. """ controllerport = None r = self.client.get('StorageCenter/ScControllerPort/%s' % cportid) # 200 expected. if self._check_result(r): controllerport = self._first_result(r) else: LOG.error(_LE('Unable to find controller port: %s'), cportid) LOG.debug(controllerport) return controllerport def find_wwns(self, scvolume, scserver): """Finds the lun and wwns of the mapped volume. :param scvolume: Storage Center volume object. :param scserver: Storage Center server opbject. :returns: Lun, wwns, initiator target map """ lun = None # our lun. We return the first lun. wwns = [] # list of targets itmap = {} # dict of initiators and the associated targets # Make sure we know our server's initiators. Only return # mappings that contain HBA for this server. initiators = self._find_fc_initiators(scserver) # Get our volume mappings mappings = self._find_mappings(scvolume) if len(mappings) > 0: # We check each of our mappings. We want to return # the mapping we have been configured to use. for mapping in mappings: # Find the controller port for this mapping cport = mapping.get('controllerPort') controllerport = self._find_controller_port( self._get_id(cport)) if controllerport is not None: # This changed case at one point or another. # Look for both keys. wwn = controllerport.get('wwn', controllerport.get('WWN')) if wwn: serverhba = mapping.get('serverHba') if serverhba: hbaname = serverhba.get('instanceName') if hbaname in initiators: if itmap.get(hbaname) is None: itmap[hbaname] = [] itmap[hbaname].append(wwn) wwns.append(wwn) mappinglun = mapping.get('lun') if lun is None: lun = mappinglun elif lun != mappinglun: LOG.warning(_LW('Inconsistent Luns.')) else: LOG.debug('%s not found in initiator list', hbaname) else: LOG.debug('serverhba is None.') else: LOG.debug('Unable to find port wwn.') else: LOG.debug('controllerport is None.') else: LOG.error(_LE('Volume appears unmapped')) LOG.debug(lun) LOG.debug(wwns) LOG.debug(itmap) # TODO(tom_swanson): if we have nothing to return raise an exception # here. We can't do anything with an unmapped volume. We shouldn't # pretend we succeeded. return lun, wwns, itmap def _find_active_controller(self, scvolume): """Finds the controller on which the Dell volume is active. There can be more than one Dell backend controller per Storage center but a given volume can only be active on one of them at a time. :param scvolume: Dell backend volume object. :returns: Active controller ID. """ actvctrl = None # TODO(Swanson): We have a function that gets this. Call that. r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): volconfig = self._first_result(r) controller = volconfig.get('controller') actvctrl = self._get_id(controller) else: LOG.error(_LE('Unable to retrieve VolumeConfiguration: %s'), self._get_id(scvolume)) LOG.debug('activecontroller %s', actvctrl) return actvctrl def _get_controller_id(self, mapping): # The mapping lists the associated controller. return self._get_id(mapping.get('controller')) def _get_domains(self, mapping): # Return a list of domains associated with this controller port. return self._find_domains(self._get_id(mapping.get('controllerPort'))) def _get_iqn(self, mapping): # Get our iqn from the controller port listed in our our mapping. iqn = None cportid = self._get_id(mapping.get('controllerPort')) controllerport = self._find_controller_port(cportid) LOG.debug('controllerport: %s', controllerport) if controllerport: iqn = controllerport.get('iscsiName') return iqn def _is_virtualport_mode(self): isvpmode = False r = self.client.get('StorageCenter/ScConfiguration/%s' % self.ssn) # 200 expected. if self._check_result(r): scconfig = self._get_json(r) if scconfig: isvpmode = True if (scconfig['iscsiTransportMode'] == 'VirtualPort') else False return isvpmode def _find_controller_port_iscsi_config(self, cportid): """Finds the SC controller port object for the specified cportid. :param cportid: The instanceID of the Dell backend controller port. :returns: The controller port object. """ controllerport = None r = self.client.get('StorageCenter/' 'ScControllerPortIscsiConfiguration/%s' % cportid) # 200 expected. if self._check_result(r): controllerport = self._first_result(r) else: LOG.error(_LE('Unable to find controller ' 'port iscsi configuration: %s'), cportid) return controllerport def find_iscsi_properties(self, scvolume, ip=None, port=None): """Finds target information for a given Dell scvolume object mapping. The data coming back is both the preferred path and all the paths. :param scvolume: The dell sc volume object. :param ip: The preferred target portal ip. :param port: The preferred target portal port. :returns: iSCSI property dictionary. :raises: VolumeBackendAPIException """ LOG.debug('enter find_iscsi_properties') LOG.debug('scvolume: %s', scvolume) # Our mutable process object. pdata = {'active': -1, 'up': -1, 'ip': ip, 'port': port} # Our output lists. portals = [] luns = [] iqns = [] # Process just looks for the best port to return. def process(lun, iqn, address, port, status, active): """Process this mapping information. :param lun: SCSI Lun. :param iqn: iSCSI IQN address. :param address: IP address. :param port: IP Port number :param readonly: Boolean indicating mapping is readonly. :param status: String indicating mapping status. (Up is what we are looking for.) :param active: Boolean indicating whether this is on the active controller or not. :return: Nothing """ portals.append(address + ':' + six.text_type(port)) iqns.append(iqn) luns.append(lun) # We've all the information. We need to find # the best single portal to return. So check # this one if it is on the right IP, port and # if the access and status are correct. if ((pdata['ip'] is None or pdata['ip'] == address) and (pdata['port'] is None or pdata['port'] == port)): # We need to point to the best link. # So state active and status up is preferred # but we don't actually need the state to be # up at this point. if pdata['up'] == -1: if active: pdata['active'] = len(iqns) - 1 if status == 'Up': pdata['up'] = pdata['active'] # Start by getting our mappings. mappings = self._find_mappings(scvolume) # We should have mappings at the time of this call but do check. if len(mappings) > 0: # In multipath (per Liberty) we will return all paths. But # if multipath is not set (ip and port are None) then we need # to return a mapping from the controller on which the volume # is active. So find that controller. actvctrl = self._find_active_controller(scvolume) # Two different methods are used to find our luns and portals # depending on whether we are in virtual or legacy port mode. isvpmode = self._is_virtualport_mode() # Trundle through our mappings. for mapping in mappings: # The lun, ro mode and status are in the mapping. LOG.debug('mapping: %s', mapping) lun = mapping.get('lun') status = mapping.get('status') # Get our IQN from our mapping. iqn = self._get_iqn(mapping) # Check if our controller ID matches our active controller ID. isactive = True if (self._get_controller_id(mapping) == actvctrl) else False # If we have an IQN and are in virtual port mode. if isvpmode and iqn: domains = self._get_domains(mapping) if domains: for dom in domains: LOG.debug('domain: %s', dom) ipaddress = dom.get('targetIpv4Address', dom.get('wellKnownIpAddress')) portnumber = dom.get('portNumber') # We have all our information. Process this portal. process(lun, iqn, ipaddress, portnumber, status, isactive) # Else we are in legacy mode. elif iqn: # Need to get individual ports cportid = self._get_id(mapping.get('controllerPort')) # Legacy mode stuff is in the ISCSI configuration object. cpconfig = self._find_controller_port_iscsi_config(cportid) # This should really never fail. Things happen so if it # does just keep moving. Return what we can. if cpconfig: ipaddress = cpconfig.get('ipAddress') portnumber = cpconfig.get('portNumber') # We have all our information. Process this portal. process(lun, iqn, ipaddress, portnumber, status, isactive) # We've gone through all our mappings. # Make sure we found something to return. if len(luns) == 0: # Since we just mapped this and can't find that mapping the world # is wrong so we raise exception. raise exception.VolumeBackendAPIException( data=_('Unable to find iSCSI mappings.')) # Make sure we point to the best portal we can. This means it is # on the active controller and, preferably, up. If it isn't return # what we have. if pdata['up'] != -1: # We found a connection that is already up. Return that. pdata['active'] = pdata['up'] elif pdata['active'] == -1: # This shouldn't be able to happen. Maybe a controller went # down in the middle of this so just return the first one and # hope the ports are up by the time the connection is attempted. LOG.debug('Volume is not yet active on any controller.') pdata['active'] = 0 data = {'target_discovered': False, 'target_iqn': iqns[pdata['active']], 'target_iqns': iqns, 'target_portal': portals[pdata['active']], 'target_portals': portals, 'target_lun': luns[pdata['active']], 'target_luns': luns, } LOG.debug('find_iscsi_properties return: %s', data) return data def map_volume(self, scvolume, scserver): """Maps the Dell backend volume object to the Dell server object. The check for the Dell server object existence is elsewhere; does not create the Dell server object. :param scvolume: Storage Center volume object. :param scserver: Storage Center server opbject. :returns: SC mapping profile or None """ # Make sure we have what we think we have serverid = self._get_id(scserver) volumeid = self._get_id(scvolume) if serverid is not None and volumeid is not None: # If we have a mapping to our server return it here. mprofiles = self._find_mapping_profiles(scvolume) for mprofile in mprofiles: if self._get_id(mprofile.get('server')) == serverid: return mprofile # No? Then map it up. payload = {} payload['server'] = serverid advanced = {} advanced['MapToDownServerHbas'] = True payload['Advanced'] = advanced r = self.client.post('StorageCenter/ScVolume/%s/MapToServer' % volumeid, payload) # 200 expected. if self._check_result(r): # We just return our mapping return self._first_result(r) # Error out LOG.error(_LE('Unable to map %(vol)s to %(srv)s'), {'vol': scvolume['name'], 'srv': scserver['name']}) return None def unmap_volume(self, scvolume, scserver): """Unmaps the Dell volume object from the Dell server object. Deletes all mappings to a Dell server object, not just the ones on the path defined in cinder.conf. :param scvolume: Storage Center volume object. :param scserver: Storage Center server opbject. :returns: True or False. """ rtn = True serverid = self._get_id(scserver) volumeid = self._get_id(scvolume) if serverid is not None and volumeid is not None: profiles = self._find_mapping_profiles(scvolume) for profile in profiles: prosrv = profile.get('server') if prosrv is not None and self._get_id(prosrv) == serverid: r = self.client.delete('StorageCenter/ScMappingProfile/%s' % self._get_id(profile)) # 200 expected. if self._check_result(r): # Check our result in the json. result = self._get_json(r) # EM 15.1 and 15.2 return a boolean directly. # 15.3 on up return it in a dict under 'result'. if result is True or (type(result) is dict and result.get('result')): LOG.debug('Volume %(vol)s unmapped from %(srv)s', {'vol': volumeid, 'srv': serverid}) continue LOG.error(_LE('Unable to unmap Volume %s'), volumeid) # 1 failed unmap is as good as 100. # Fail it and leave rtn = False break # return true/false. return rtn def get_storage_usage(self): """Gets the storage usage object from the Dell backend. This contains capacity and usage information for the SC. :returns: The SC storageusage object. """ storageusage = None if self.ssn is not None: r = self.client.get('StorageCenter/StorageCenter/%s/StorageUsage' % self.ssn) # 200 expected. if self._check_result(r): storageusage = self._get_json(r) return storageusage def create_replay(self, scvolume, replayid, expire): """Takes a snapshot of a volume. One could snap a volume before it has been activated, so activate by mapping and unmapping to a random server and let them. This should be a fail but the Tempest tests require it. :param scvolume: Volume to snapshot. :param replayid: Name to use for the snapshot. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :param expire: Time in minutes before the replay expires. For most snapshots this will be 0 (never expire) but if we are cloning a volume we will snap it right before creating the clone. :returns: The Dell replay object or None. """ replay = None if scvolume is not None: if (scvolume.get('active') is not True or scvolume.get('replayAllowed') is not True): self._init_volume(scvolume) payload = {} payload['description'] = replayid payload['expireTime'] = expire r = self.client.post('StorageCenter/ScVolume/%s/CreateReplay' % self._get_id(scvolume), payload) # 200 expected. if self._check_result(r): replay = self._first_result(r) # Quick double check. if replay is None: LOG.warning(_LW('Unable to create snapshot %s'), replayid) # Return replay or None. return replay def find_replay(self, scvolume, replayid): """Searches for the replay by replayid. replayid is stored in the replay's description attribute. :param scvolume: Dell volume object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Dell replay object or None. """ r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' % self._get_id(scvolume)) try: replays = self._get_json(r) # This will be a list. If it isn't bail if isinstance(replays, list): for replay in replays: # The only place to save our information with the public # api is the description field which isn't quite long # enough. So we check that our description is pretty much # the max length and we compare that to the start of # the snapshot id. description = replay.get('description') if (len(description) >= 30 and replayid.startswith(description) is True and replay.get('markedForExpiration') is not True): # We found our replay so return it. return replay except Exception: LOG.error(_LE('Invalid ReplayList return: %s'), r) # If we are here then we didn't find the replay so warn and leave. LOG.warning(_LW('Unable to find snapshot %s'), replayid) return None def manage_replay(self, screplay, replayid): """Basically renames the screplay and sets it to never expire. :param screplay: DellSC object. :param replayid: New name for replay. :return: True on success. False on fail. """ if screplay and replayid: payload = {} payload['description'] = replayid payload['expireTime'] = 0 r = self.client.put('StorageCenter/ScReplay/%s' % self._get_id(screplay), payload) if self._check_result(r): return True LOG.error(_LE('Error managing replay %s'), screplay.get('description')) return False def unmanage_replay(self, screplay): """Basically sets the expireTime :param screplay: DellSC object. :return: True on success. False on fail. """ if screplay: payload = {} payload['expireTime'] = 1440 r = self.client.put('StorageCenter/ScReplay/%s' % self._get_id(screplay), payload) if self._check_result(r): return True LOG.error(_LE('Error unmanaging replay %s'), screplay.get('description')) return False def delete_replay(self, scvolume, replayid): """Finds a Dell replay by replayid string and expires it. Once marked for expiration we do not return the replay as a snapshot even though it might still exist. (Backend requirements.) :param scvolume: Dell volume object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Boolean for success or failure. """ LOG.debug('Expiring replay %s', replayid) replay = self.find_replay(scvolume, replayid) if replay is not None: r = self.client.post('StorageCenter/ScReplay/%s/Expire' % self._get_id(replay), {}) # 204 expected. if not self._check_result(r): return False # We either couldn't find it or expired it. return True def create_view_volume(self, volname, screplay, replay_profile_string): """Creates a new volume named volname from the screplay. :param volname: Name of new volume. This is the cinder volume ID. :param screplay: Dell replay object from which to make a new volume. :param replay_profile_string: Profiles to be applied to the volume :returns: Dell volume object or None. """ folder = self._find_volume_folder(True) # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # payload is just the volume name and folder if we have one. payload = {} payload['Name'] = volname payload['Notes'] = self.notes if folder is not None: payload['VolumeFolder'] = self._get_id(folder) if addids: payload['ReplayProfileList'] = addids r = self.client.post('StorageCenter/ScReplay/%s/CreateView' % self._get_id(screplay), payload) volume = None # 200 expected. if self._check_result(r): volume = self._first_result(r) if volume is None: LOG.error(_LE('Unable to create volume %s from replay'), volname) return volume def create_cloned_volume(self, volumename, scvolume, replay_profile_list): """Creates a volume named volumename from a copy of scvolume. This is done by creating a replay and then a view volume from that replay. The replay is set to expire after an hour. It is only needed long enough to create the volume. (1 minute should be enough but we set an hour in case the world has gone mad.) :param volumename: Name of new volume. This is the cinder volume ID. :param scvolume: Dell volume object. :param replay_profile_list: List of snapshot profiles. :returns: The new volume's Dell volume object. """ clone = None replay = self.create_replay(scvolume, 'Cinder Clone Replay', 60) if replay is not None: clone = self.create_view_volume(volumename, replay, replay_profile_list) else: LOG.error(_LE('Error: unable to snap replay')) return clone def expand_volume(self, scvolume, newsize): """Expands scvolume to newsize GBs. :param scvolume: Dell volume object to be expanded. :param newsize: The new size of the volume object. :returns: The updated Dell volume object on success or None on failure. """ payload = {} payload['NewSize'] = '%d GB' % newsize r = self.client.post('StorageCenter/ScVolume/%s/ExpandToSize' % self._get_id(scvolume), payload) vol = None # 200 expected. if self._check_result(r): vol = self._get_json(r) # More info might be good. if vol is not None: LOG.debug('Volume expanded: %(name)s %(size)s', {'name': vol['name'], 'size': vol['configuredSize']}) else: LOG.error(_LE('Error expanding volume %s.'), scvolume['name']) return vol def rename_volume(self, scvolume, name): """Rename scvolume to name. This is mostly used by update_migrated_volume. :param scvolume: The Dell volume object to be renamed. :param name: The new volume name. :returns: Boolean indicating success or failure. """ payload = {} payload['Name'] = name r = self.client.post('StorageCenter/ScVolume/%s/Modify' % self._get_id(scvolume), payload) # 200 expected. if not self._check_result(r): LOG.error(_LE('Error renaming volume %(original)s to %(name)s'), {'original': scvolume['name'], 'name': name}) return False return True def update_storage_profile(self, scvolume, storage_profile): """Update a volume's Storage Profile. Changes the volume setting to use a different Storage Profile. If storage_profile is None, will reset to the default profile for the cinder user account. :param scvolume: The Storage Center volume to be updated. :param storage_profile: The requested Storage Profile name. :returns: True if successful, False otherwise. """ prefs = self._get_user_preferences() if not prefs: return False if not prefs.get('allowStorageProfileSelection'): LOG.error(_LE('User does not have permission to change ' 'Storage Profile selection.')) return False profile = self._find_storage_profile(storage_profile) if storage_profile: if not profile: LOG.error(_LE('Storage Profile %s was not found.'), storage_profile) return False else: # Going from specific profile to the user default profile = prefs.get('storageProfile') if not profile: LOG.error(_LE('Default Storage Profile was not found.')) return False LOG.info(_LI('Switching volume %(vol)s to profile %(prof)s.'), {'vol': scvolume['name'], 'prof': profile.get('name')}) payload = {} payload['StorageProfile'] = self._get_id(profile) r = self.client.post('StorageCenter/ScVolumeConfiguration' '/%s/Modify' % self._get_id(scvolume), payload) # 200 expected. if not self._check_result(r): LOG.error(_LE('Error changing Storage Profile for volume ' '%(original)s to %(name)s'), {'original': scvolume['name'], 'name': storage_profile}) return False return True def _get_user_preferences(self): """Gets the preferences and defaults for this user. There are a set of preferences and defaults for each user on the Storage Center. This retrieves all settings for the current account used by Cinder. """ r = self.client.get('StorageCenter/StorageCenter/%s/UserPreferences' % self.ssn) # 200 expected. if not self._check_result(r): return {} return self._get_json(r) def _delete_server(self, scserver): """Deletes scserver from the backend. Just give it a shot. If it fails it doesn't matter to cinder. This is generally used when a create_server call fails in the middle of creation. Cinder knows nothing of the servers objects on Dell backends so success or failure is purely an internal thing. Note that we do not delete a server object in normal operation. :param scserver: Dell server object to delete. :returns: Nothing. Only logs messages. """ LOG.debug('ScServer delete %s', self._get_id(scserver)) if scserver.get('deleteAllowed') is True: r = self.client.delete('StorageCenter/ScServer/%s' % self._get_id(scserver)) if self._check_result(r): LOG.debug('ScServer deleted.') else: LOG.debug('_delete_server: deleteAllowed is False.') def find_replay_profile(self, name): """Finds the Dell SC replay profile object name. :param name: Name of the replay profile object. This is the consistency group id. :return: Dell SC replay profile or None. :raises: VolumeBackendAPIException """ self.cg_except_on_no_support() pf = self._get_payload_filter() pf.append('ScSerialNumber', self.ssn) pf.append('Name', name) r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) # 200 expected. if self._check_result(r): profilelist = self._get_json(r) if profilelist: if len(profilelist) > 1: LOG.error(_LE('Multiple replay profiles under name %s'), name) raise exception.VolumeBackendAPIException( data=_('Multiple profiles found.')) return profilelist[0] return None def create_replay_profile(self, name): """Creates a replay profile on the Dell SC. :param name: The ID of the consistency group. This will be matched to the name on the Dell SC. :return: SC profile or None. """ self.cg_except_on_no_support() profile = self.find_replay_profile(name) if not profile: payload = {} payload['StorageCenter'] = self.ssn payload['Name'] = name payload['Type'] = 'Consistent' payload['Notes'] = self.notes r = self.client.post('StorageCenter/ScReplayProfile', payload) # 201 expected. if self._check_result(r): profile = self._first_result(r) return profile def delete_replay_profile(self, profile): """Delete the replay profile from the Dell SC. :param profile: SC replay profile. :return: Nothing. :raises: VolumeBackendAPIException """ self.cg_except_on_no_support() r = self.client.delete('StorageCenter/ScReplayProfile/%s' % self._get_id(profile)) # 200 is a good return. Log and leave. if self._check_result(r): LOG.info(_LI('Profile %s has been deleted.'), profile.get('name')) else: # We failed due to a failure to delete an existing profile. # This is reason to raise an exception. LOG.error(_LE('Unable to delete profile %s.'), profile.get('name')) raise exception.VolumeBackendAPIException( data=_('Error deleting replay profile.')) def _get_volume_configuration(self, scvolume): """Get the ScVolumeConfiguration object. :param scvolume: The Dell SC volume object. :return: The SCVolumeConfiguration object or None. """ r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): return self._first_result(r) return None def _update_volume_profiles(self, scvolume, addid=None, removeid=None): """Either Adds or removes the listed profile from the SC volume. :param scvolume: Dell SC volume object. :param addid: Profile ID to be added to the SC volume configuration. :param removeid: ID to be removed to the SC volume configuration. :return: True/False on success/failure. """ if scvolume: scvolumecfg = self._get_volume_configuration(scvolume) if scvolumecfg: profilelist = scvolumecfg.get('replayProfileList', []) newprofilelist = [] # Do we have one to add? Start the list with it. if addid: newprofilelist = [addid] # Re-add our existing profiles. for profile in profilelist: profileid = self._get_id(profile) # Make sure it isn't one we want removed and that we # haven't already added it. (IE it isn't the addid.) if (profileid != removeid and newprofilelist.count(profileid) == 0): newprofilelist.append(profileid) # Update our volume configuration. payload = {} payload['ReplayProfileList'] = newprofilelist r = self.client.put('StorageCenter/ScVolumeConfiguration/%s' % self._get_id(scvolumecfg), payload) # check result LOG.debug('_update_volume_profiles %s : %s : %s', self._get_id(scvolume), profilelist, r) # 200 expected. if self._check_result(r): return True return False def _add_cg_volumes(self, profileid, add_volumes): """Trundles through add_volumes and adds the replay profile to them. :param profileid: The ID of the replay profile. :param add_volumes: List of Dell SC volume objects that are getting added to the consistency group. :return: True/False on success/failure. """ for vol in add_volumes: if (self._update_volume_profiles(self.find_volume(vol['id']), addid=profileid, removeid=None)): LOG.info(_LI('Added %s to cg.'), vol['id']) else: LOG.error(_LE('Failed to add %s to cg.'), vol['id']) return False return True def _remove_cg_volumes(self, profileid, remove_volumes): """Removes the replay profile from the remove_volumes list of vols. :param profileid: The ID of the replay profile. :param remove_volumes: List of Dell SC volume objects that are getting removed from the consistency group. :return: True/False on success/failure. """ for vol in remove_volumes: if (self._update_volume_profiles(self.find_volume(vol['id']), addid=None, removeid=profileid)): LOG.info(_LI('Removed %s from cg.'), vol['id']) else: LOG.error(_LE('Failed to remove %s from cg.'), vol['id']) return False return True def update_cg_volumes(self, profile, add_volumes=None, remove_volumes=None): """Adds or removes the profile from the specified volumes :param profile: Dell SC replay profile object. :param add_volumes: List of volumes we are adding to the consistency group. (Which is to say we are adding the profile to this list of volumes.) :param remove_volumes: List of volumes we are removing from the consistency group. (Which is to say we are removing the profile from this list of volumes.) :return: True/False on success/failure. """ self.cg_except_on_no_support() ret = True profileid = self._get_id(profile) if add_volumes: LOG.info(_LI('Adding volumes to cg %s.'), profile['name']) ret = self._add_cg_volumes(profileid, add_volumes) if ret and remove_volumes: LOG.info(_LI('Removing volumes from cg %s.'), profile['name']) ret = self._remove_cg_volumes(profileid, remove_volumes) return ret def _init_cg_volumes(self, profileid): """Gets the cg volume list and maps/unmaps the non active volumes. :param profileid: Replay profile identifier. :return: Nothing """ r = self.client.get('StorageCenter/ScReplayProfile/%s/VolumeList' % profileid) # 200 expected. if self._check_result(r): vols = self._get_json(r) for vol in vols: if (vol.get('active') is not True or vol.get('replayAllowed') is not True): self._init_volume(vol) def snap_cg_replay(self, profile, replayid, expire): """Snaps a replay of a consistency group. :param profile: The name of the consistency group profile. :param replayid: The name of the replay. :param expire: Time in mintues before a replay expires. 0 means no expiration. :returns: Dell SC replay object. """ self.cg_except_on_no_support() if profile: # We have to make sure these are snappable. self._init_cg_volumes(self._get_id(profile)) # Succeed or fail we soldier on. payload = {} payload['description'] = replayid payload['expireTime'] = expire r = self.client.post('StorageCenter/ScReplayProfile/%s/' 'CreateReplay' % self._get_id(profile), payload) # 204 expected. if self._check_result(r): LOG.info(_LI('CreateReplay success %s'), replayid) return True return False def _find_sc_cg(self, profile, replayid): """Finds the sc consistency group that matches replayid :param profile: Dell profile object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :return: Consistency group object or None. """ self.cg_except_on_no_support() r = self.client.get( 'StorageCenter/ScReplayProfile/%s/ConsistencyGroupList' % self._get_id(profile)) # 200 expected. if self._check_result(r): cglist = self._get_json(r) if cglist and isinstance(cglist, list): for cg in cglist: desc = cg.get('description') if (len(desc) >= 30 and replayid.startswith(desc) is True): # We found our cg so return it. return cg return None def _find_cg_replays(self, profile, replayid): """Searches for the replays that match replayid for a given profile. replayid is stored in the replay's description attribute. :param profile: Dell profile object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Dell replay object array. """ self.cg_except_on_no_support() replays = [] sccg = self._find_sc_cg(profile, replayid) if sccg: r = self.client.get( 'StorageCenter/ScReplayConsistencyGroup/%s/ReplayList' % self._get_id(sccg)) replays = self._get_json(r) else: LOG.error(_LE('Unable to locate snapshot %s'), replayid) return replays def delete_cg_replay(self, profile, replayid): """Finds a Dell cg replay by replayid string and expires it. Once marked for expiration we do not return the replay as a snapshot even though it might still exist. (Backend requirements.) :param cg_name: Consistency Group name. This is the ReplayProfileName. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Boolean for success or failure. """ self.cg_except_on_no_support() LOG.debug('Expiring consistency group replay %s', replayid) replays = self._find_cg_replays(profile, replayid) for replay in replays: instanceid = self._get_id(replay) LOG.debug('Expiring replay %s', instanceid) r = self.client.post('StorageCenter/ScReplay/%s/Expire' % instanceid, {}) # 204 expected. if not self._check_result(r): return False # We either couldn't find it or expired it. return True def cg_except_on_no_support(self): if not self.consisgroups: msg = _('Dell API 2.1 or later required' ' for Consistency Group support') raise NotImplementedError(data=msg) @staticmethod def size_to_gb(spacestring): """Splits a SC size string into GB and a remainder. Space is returned in a string like ... 7.38197504E8 Bytes Need to split that apart and convert to GB. :param spacestring: SC size string. :return: Size in GB and remainder in byte. """ try: n = spacestring.split(' ', 1) fgb = int(float(n[0]) // 1073741824) frem = int(float(n[0]) % 1073741824) return fgb, frem except Exception: # We received an invalid size string. Blow up. raise exception.VolumeBackendAPIException( data=_('Error retrieving volume size')) def _import_one(self, scvolume, newname): # Find our folder folder = self._find_volume_folder(True) # If we actually have a place to put our volume create it if folder is None: LOG.warning(_LW('Unable to create folder %s'), self.vfname) # Rename and move our volume. payload = {} payload['Name'] = newname if folder: payload['VolumeFolder'] = self._get_id(folder) r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload) # 200 expected. if self._check_result(r): return self._get_json(r) return None def manage_existing(self, newname, existing): """Finds the volume named existing and renames it. This checks a few things. The volume has to exist. There can only be one volume by that name. Since cinder manages volumes by the GB it has to be defined on a GB boundry. This renames existing to newname. newname is the guid from the cinder volume['id']. The volume is moved to the defined cinder volume folder. :param newname: Name to rename the volume to. :param existing: The existing volume dict.. :return: Nothing. :raises: VolumeBackendAPIException, ManageExistingInvalidReference """ vollist = self._get_volume_list(existing.get('source-name'), existing.get('source-id'), False) count = len(vollist) # If we found one volume with that name we can work with it. if count == 1: # First thing to check is if the size is something we can # work with. sz, rem = self.size_to_gb(vollist[0]['configuredSize']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must multiple of 1 GB.')) # We only want to grab detached volumes. mappings = self._find_mappings(vollist[0]) if len(mappings) > 0: msg = _('Volume is attached to a server. (%s)') % existing raise exception.VolumeBackendAPIException(data=msg) scvolume = self._import_one(vollist[0], newname) if not scvolume: msg = _('Unable to manage volume %s') % existing raise exception.VolumeBackendAPIException(data=msg) elif count > 1: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not unique.')) else: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not found.')) def get_unmanaged_volume_size(self, existing): """Looks up the volume named existing and returns its size string. :param existing: Existing volume dict. :return: The SC configuredSize string. :raises: ManageExistingInvalidReference """ vollist = self._get_volume_list(existing.get('source-name'), existing.get('source-id'), False) count = len(vollist) # If we found one volume with that name we can work with it. if count == 1: sz, rem = self.size_to_gb(vollist[0]['configuredSize']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must multiple of 1 GB.')) return sz elif count > 1: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not unique.')) else: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not found.')) def unmanage(self, scvolume): """Unmanage our volume. We simply rename with with a prefix of 'Unmanaged_'. That's it. :param scvolume: The Dell SC volume object. :return: Nothing. :raises: VolumeBackendAPIException """ newname = 'Unmanaged_' + scvolume['name'] payload = {} payload['Name'] = newname r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload) # 200 expected. if self._check_result(r): LOG.info(_LI('Volume %s unmanaged.'), scvolume['name']) else: msg = _('Unable to rename volume %(existing)s to %(newname)s') % { 'existing': scvolume['name'], 'newname': newname} raise exception.VolumeBackendAPIException(data=msg) def _find_qos(self, qosnode): """Find Dell SC QOS Node entry for replication. :param qosnode: Name of qosnode. :return: scqos node object. """ pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('name', qosnode) r = self.client.post('StorageCenter/ScReplicationQosNode/GetList', pf.payload) # 200 expected. if self._check_result(r): nodes = self._get_json(r) if len(nodes) > 0: return nodes[0] else: payload = {} payload['LinkSpeed'] = '1 Gbps' payload['Name'] = qosnode payload['StorageCenter'] = self.ssn payload['BandwidthLimited'] = False r = self.client.post('StorageCenter/ScReplicationQosNode', payload) # 201 expected. if self._check_result(r): return self._get_json(r) LOG.error(_LE('Unable to find or create QoS Node named %s'), qosnode) raise exception.VolumeBackendAPIException( data=_('Failed to find QoSnode')) def update_replicate_active_replay(self, scvolume, replactive): """Enables or disables replicating the active replay for given vol. :param scvolume: SC Volume object. :param replactive: True or False :return: True or False """ r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): replications = self._get_json(r) for replication in replications: if replication['replicateActiveReplay'] != replactive: payload = {'ReplicateActiveReplay': replactive} r = self.client.put('StorageCenter/ScReplication/%s' % replication['instanceId'], payload) if not self._check_result(r): return False return True def get_screplication(self, scvolume, destssn): """Find the screplication object for the volume on the dest backend. :param scvolume: :param destssn: :return: """ LOG.debug('get_screplication') r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % self._get_id(scvolume)) # 200 expected. if self._check_result(r): replications = self._get_json(r) for replication in replications: # So we need to find the replication we are looking for. LOG.debug(replication) LOG.debug('looking for %s', destssn) if replication.get('destinationScSerialNumber') == destssn: return replication # Unable to locate replication. LOG.warning(_LW('Unable to locate replication %(vol)s to %(ssn)s'), {'vol': scvolume.get('name'), 'ssn': destssn}) return None def delete_replication(self, scvolume, destssn): """Deletes the SC replication object from scvolume to the destssn. :param scvolume: Dell SC Volume object. :param destssn: SC the replication is replicating to.S :return: True on success. False on fail. """ replication = self.get_screplication(scvolume, destssn) if replication: # TODO(tswanson): Sort out why we cannot send down attributes. r = self.client.delete('StorageCenter/ScReplication/%s' % self._get_id(replication)) if self._check_result(r): # check that we whacked the dest volume LOG.info(_LI('Replication %(vol)s to %(dest)s.'), {'vol': scvolume.get('name'), 'dest': destssn}) return True else: LOG.error(_LE('Unable to delete replication for ' '%(vol)s to %(dest)s.'), {'vol': scvolume.get('name'), 'dest': destssn}) return False def _repl_name(self, name): return self.repl_prefix + name def _get_disk_folder(self, ssn, foldername): # TODO(tswanson): Harden this. diskfolder = None # If no folder name we just pass through this. if foldername: pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('name', foldername) r = self.client.post('StorageCenter/ScDiskFolder/GetList', pf.payload) if self._check_result(r): try: # Go for broke. diskfolder = self._get_json(r)[0] except Exception: # We just log this as an error and return nothing. LOG.error(_LE('Unable to find ' 'disk folder %(name)s on %(ssn)s'), {'name': foldername, 'ssn': ssn}) return diskfolder def create_replication(self, scvolume, destssn, qosnode, synchronous, diskfolder, replicate_active): """Create repl from scvol to destssn. :param scvolume: Dell SC volume object. :param destssn: Destination SSN string. :param qosnode: Name of Dell SC QOS Node for this replication. :param synchronous: Boolean. :param diskfolder: optional disk folder name. :param replicate_active: replicate active replay. :return: Dell SC replication object. """ screpl = None ssn = self.find_sc(int(destssn)) payload = {} payload['DestinationStorageCenter'] = ssn payload['QosNode'] = self._get_id(self._find_qos(qosnode)) payload['SourceVolume'] = self._get_id(scvolume) payload['StorageCenter'] = self.find_sc() # Have to replicate the active replay. payload['ReplicateActiveReplay'] = replicate_active or synchronous payload['Type'] = 'Synchronous' if synchronous else 'Asynchronous' destinationvolumeattributes = {} destinationvolumeattributes['CreateSourceVolumeFolderPath'] = True destinationvolumeattributes['Notes'] = self.notes destinationvolumeattributes['Name'] = self._repl_name(scvolume['name']) # Find our disk folder. If they haven't specified one this will just # drop through. If they have specified one and it can't be found the # error will be logged but this will keep going. df = self._get_disk_folder(destssn, diskfolder) if df: destinationvolumeattributes['DiskFolder'] = self._get_id(df) payload['DestinationVolumeAttributes'] = destinationvolumeattributes r = self.client.post('StorageCenter/ScReplication', payload) # 201 expected. if self._check_result(r): LOG.info(_LI('Replication created for %(volname)s to %(destsc)s'), {'volname': scvolume.get('name'), 'destsc': destssn}) screpl = self._get_json(r) # Check we did something. if not screpl: # Failed to launch. Inform user. Throw. LOG.error(_LE('Unable to replicate %(volname)s to %(destsc)s'), {'volname': scvolume.get('name'), 'destsc': destssn}) return screpl def find_repl_volume(self, guid, destssn, instance_id=None): """Find our replay destination volume on the destssn. :param guid: Volume ID. :param destssn: Where to look for the volume. :param instance_id: If we know our exact volume ID use that. :return: SC Volume object or None """ # Do a normal volume search. pf = self._get_payload_filter() pf.append('scSerialNumber', destssn) pf.append('ReplicationDestination', True) # There is a chance we know the exact volume. If so then use that. if instance_id: pf.append('instanceId', instance_id) else: # Try the name. pf.append('Name', self._repl_name(guid)) r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload) if self._check_result(r): volumes = self._get_json(r) if len(volumes) == 1: return volumes[0] return None def remove_mappings(self, scvol): """Peels all the mappings off of scvol. :param scvol: :return: """ if scvol: r = self.client.post('StorageCenter/ScVolume/%s/Unmap' % self._get_id(scvol), {}) return self._check_result(r) return False def break_replication(self, volumename, destssn): """This just breaks the replication. If we find the source we just delete the replication. If the source is down then we find the destination and unmap it. Fail pretty much every time this goes south. :param volumename: :return: """ ret = False replid = None scvolume = self.find_volume(volumename) screplication = self.get_screplication(scvolume, destssn) # if we got our replication volume we can do this nicely. if screplication: replid = screplication['destinationVolume']['instanceId'] screplvol = self.find_repl_volume(volumename, destssn, replid) # delete_replication fails to delete replication without also # stuffing it into the recycle bin. # Instead we try to unmap the destination volume which will break # the replication but leave the replication object on the SC. ret = self.remove_mappings(screplvol) # If the volume is free of replication. if ret: # Try to kill mappings on the source. # We don't care that this succeeded or failed. Just move on. self.remove_mappings(scvolume) return ret cinder-8.0.0/cinder/volume/drivers/dell/dell_storagecenter_common.py0000664000567000056710000015427612701406257027107 0ustar jenkinsjenkins00000000000000# Copyright 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_api from cinder.volume.drivers.san.san import san_opts from cinder.volume import volume_types common_opts = [ cfg.IntOpt('dell_sc_ssn', default=64702, help='Storage Center System Serial Number'), cfg.PortOpt('dell_sc_api_port', default=3033, help='Dell API port'), cfg.StrOpt('dell_sc_server_folder', default='openstack', help='Name of the server folder to use on the Storage Center'), cfg.StrOpt('dell_sc_volume_folder', default='openstack', help='Name of the volume folder to use on the Storage Center'), cfg.BoolOpt('dell_sc_verify_cert', default=False, help='Enable HTTPS SC certificate verification.') ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(common_opts) class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD, driver.ExtendVD, driver.ManageableSnapshotsVD, driver.SnapshotVD, driver.BaseVD): def __init__(self, *args, **kwargs): super(DellCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(common_opts) self.configuration.append_config_values(san_opts) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'Dell' self.backends = self.configuration.safe_get('replication_device') self.replication_enabled = True if self.backends else False self.is_direct_connect = False self.active_backend_id = kwargs.get('active_backend_id', None) self.failed_over = (self.active_backend_id is not None) def _bytes_to_gb(self, spacestring): """Space is returned in a string like ... 7.38197504E8 Bytes Need to split that apart and convert to GB. :returns: gbs in int form """ try: n = spacestring.split(' ', 1) fgbs = float(n[0]) / 1073741824.0 igbs = int(fgbs) return igbs except Exception: # If any of that blew up it isn't in the format we # thought so eat our error and return None return None def do_setup(self, context): """One time driver setup. Called once by the manager after the driver is loaded. Sets up clients, check licenses, sets up protocol specific helpers. """ self._client = dell_storagecenter_api.StorageCenterApiHelper( self.configuration, self.active_backend_id) def check_for_setup_error(self): """Validates the configuration information.""" with self._client.open_connection() as api: api.find_sc() self.is_direct_connect = api.is_direct_connect if self.is_direct_connect and self.replication_enabled: msg = _('Dell Cinder driver configuration error replication ' 'not supported with direct connect.') raise exception.InvalidHost(reason=msg) # If we are a healthy replicated system make sure our backend # is alive. if self.replication_enabled and not self.failed_over: # Check that our replication destinations are available. for backend in self.backends: replssn = backend['target_device_id'] try: # Just do a find_sc on it. If it raises we catch # that and raise with a correct exception. api.find_sc(int(replssn)) except exception.VolumeBackendAPIException: msg = _('Dell Cinder driver configuration error ' 'replication_device %s not found') % replssn raise exception.InvalidHost(reason=msg) def _get_volume_extra_specs(self, volume): """Gets extra specs for the given volume.""" type_id = volume.get('volume_type_id') if type_id: return volume_types.get_volume_type_extra_specs(type_id) return {} def _add_volume_to_consistency_group(self, api, scvolume, volume): """Just a helper to add a volume to a consistency group. :param api: Dell SC API opbject. :param scvolume: Dell SC Volume object. :param volume: Cinder Volume object. :returns: Nothing. """ if scvolume and volume.get('consistencygroup_id'): profile = api.find_replay_profile( volume.get('consistencygroup_id')) if profile: api.update_cg_volumes(profile, [volume]) def _do_repl(self, api, volume): """Checks if we can do replication. Need the extra spec set and we have to be talking to EM. :param api: Dell REST API object. :param volume: Cinder Volume object. :return: Boolean (True if replication enabled), Boolean (True if replication type is sync. """ do_repl = False sync = False # Repl does not work with direct connect. if not self.failed_over and not self.is_direct_connect: specs = self._get_volume_extra_specs(volume) do_repl = specs.get('replication_enabled') == ' True' sync = specs.get('replication_type') == ' sync' return do_repl, sync def _create_replications(self, api, volume, scvolume): """Creates any appropriate replications for a given volume. :param api: Dell REST API object. :param volume: Cinder volume object. :param scvolume: Dell Storage Center Volume object. :return: model_update """ # Replication V2 # for now we assume we have an array named backends. replication_driver_data = None # Replicate if we are supposed to. do_repl, sync = self._do_repl(api, volume) if do_repl: for backend in self.backends: # Check if we are to replicate the active replay or not. specs = self._get_volume_extra_specs(volume) replact = specs.get('replication:activereplay') == ' True' if not api.create_replication(scvolume, backend['target_device_id'], backend.get('qosnode', 'cinderqos'), sync, backend.get('diskfolder', None), replact): # Create replication will have printed a better error. msg = _('Replication %(name)s to %(ssn)s failed.') % { 'name': volume['id'], 'ssn': backend['target_device_id']} raise exception.VolumeBackendAPIException(data=msg) if not replication_driver_data: replication_driver_data = backend['target_device_id'] else: replication_driver_data += ',' replication_driver_data += backend['target_device_id'] # If we did something return model update. model_update = {} if replication_driver_data: model_update = {'replication_status': 'enabled', 'replication_driver_data': replication_driver_data} return model_update @staticmethod def _cleanup_failed_create_volume(api, volumename): try: api.delete_volume(volumename) except exception.VolumeBackendAPIException as ex: LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg) def create_volume(self, volume): """Create a volume.""" model_update = {} # We use id as our name as it is unique. volume_name = volume.get('id') # Look for our volume volume_size = volume.get('size') # See if we have any extra specs. specs = self._get_volume_extra_specs(volume) storage_profile = specs.get('storagetype:storageprofile') replay_profile_string = specs.get('storagetype:replayprofiles') LOG.debug('Creating volume %(name)s of size %(size)s', {'name': volume_name, 'size': volume_size}) scvolume = None with self._client.open_connection() as api: try: if api.find_sc(): scvolume = api.create_volume(volume_name, volume_size, storage_profile, replay_profile_string) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume %s') % volume_name) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Create replications. (Or not. It checks.) model_update = self._create_replications(api, volume, scvolume) except Exception: # if we actually created a volume but failed elsewhere # clean up the volume now. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is None: raise exception.VolumeBackendAPIException( data=_('Unable to create volume. Backend down.')) return model_update def _split_driver_data(self, replication_driver_data): """Splits the replication_driver_data into an array of ssn strings. :param replication_driver_data: A string of comma separated SSNs. :returns: SSNs in an array of strings. """ ssnstrings = [] # We have any replication_driver_data. if replication_driver_data: # Split the array and wiffle through the entries. for str in replication_driver_data.split(','): # Strip any junk from the string. ssnstring = str.strip() # Anything left? if ssnstring: # Add it to our array. ssnstrings.append(ssnstring) return ssnstrings def _delete_replications(self, api, volume): """Delete replications associated with a given volume. We should be able to roll through the replication_driver_data list of SSNs and delete replication objects between them and the source volume. :param api: Dell REST API object. :param volume: Cinder Volume object :return: """ do_repl, sync = self._do_repl(api, volume) if do_repl: volume_name = volume.get('id') scvol = api.find_volume(volume_name) replication_driver_data = volume.get('replication_driver_data') # This is just a string of ssns separated by commas. ssnstrings = self._split_driver_data(replication_driver_data) # Trundle through these and delete them all. for ssnstring in ssnstrings: ssn = int(ssnstring) if not api.delete_replication(scvol, ssn): LOG.warning(_LW('Unable to delete replication of ' 'Volume %(vname)s to Storage Center ' '%(sc)s.'), {'vname': volume_name, 'sc': ssnstring}) # If none of that worked or there was nothing to do doesn't matter. # Just move on. def delete_volume(self, volume): deleted = False # We use id as our name as it is unique. volume_name = volume.get('id') LOG.debug('Deleting volume %s', volume_name) with self._client.open_connection() as api: try: if api.find_sc(): self._delete_replications(api, volume) deleted = api.delete_volume(volume_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete volume %s'), volume_name) # if there was an error we will have raised an # exception. If it failed to delete it is because # the conditions to delete a volume were not met. if deleted is False: raise exception.VolumeIsBusy(volume_name=volume_name) def create_snapshot(self, snapshot): """Create snapshot""" # our volume name is the volume id volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: if api.find_sc(): scvolume = api.find_volume(volume_name) if scvolume is not None: if api.create_replay(scvolume, snapshot_id, 0) is not None: snapshot['status'] = 'available' return else: LOG.warning(_LW('Unable to locate volume:%s'), volume_name) snapshot['status'] = 'error_creating' msg = _('Failed to create snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" model_update = {} scvolume = None src_volume_name = snapshot.get('volume_id') # This snapshot could have been created on its own or as part of a # cgsnapshot. If it was a cgsnapshot it will be identified on the Dell # backend under cgsnapshot_id. Given the volume ID and the # cgsnapshot_id we can find the appropriate snapshot. # So first we look for cgsnapshot_id. If that is blank then it must # have been a normal snapshot which will be found under snapshot_id. snapshot_id = snapshot.get('cgsnapshot_id') if not snapshot_id: snapshot_id = snapshot.get('id') volume_name = volume.get('id') LOG.debug( 'Creating new volume %(vol)s from snapshot %(snap)s ' 'from vol %(src)s', {'vol': volume_name, 'snap': snapshot_id, 'src': src_volume_name}) with self._client.open_connection() as api: try: if api.find_sc(): srcvol = api.find_volume(src_volume_name) if srcvol is not None: replay = api.find_replay(srcvol, snapshot_id) if replay is not None: volume_name = volume.get('id') # See if we have any extra specs. specs = self._get_volume_extra_specs(volume) replay_profile_string = specs.get( 'storagetype:replayprofiles') scvolume = api.create_view_volume( volume_name, replay, replay_profile_string) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(snap)s.') % {'name': volume_name, 'snap': snapshot_id}) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s created from %(snap)s', {'vol': volume_name, 'snap': snapshot_id}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" model_update = {} scvolume = None src_volume_name = src_vref.get('id') volume_name = volume.get('id') LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s', {'clone': volume_name, 'vol': src_volume_name}) with self._client.open_connection() as api: try: if api.find_sc(): srcvol = api.find_volume(src_volume_name) if srcvol is not None: # See if we have any extra specs. specs = self._get_volume_extra_specs(volume) replay_profile_string = specs.get( 'storagetype:replayprofiles') # Create our volume scvolume = api.create_cloned_volume( volume_name, srcvol, replay_profile_string) # Extend Volume if scvolume and volume['size'] > src_vref['size']: LOG.debug("Resize the new volume to %s.", volume['size']) scvolume = api.expand_volume(scvolume, volume['size']) # If either of those didn't work we bail. if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(vol)s.') % {'name': volume_name, 'vol': src_volume_name}) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s cloned from %(src)s', {'vol': volume_name, 'src': src_volume_name}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def delete_snapshot(self, snapshot): """delete_snapshot""" volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: if api.find_sc(): scvolume = api.find_volume(volume_name) if scvolume is not None: if api.delete_replay(scvolume, snapshot_id): return # if we are here things went poorly. snapshot['status'] = 'error_deleting' msg = _('Failed to delete snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_export(self, context, volume, connector): """Create an export of a volume. The volume exists on creation and will be visible on initialize connection. So nothing to do here. """ # TODO(tswanson): Move mapping code here. pass def ensure_export(self, context, volume): """Ensure an export of a volume. Per the eqlx driver we just make sure that the volume actually exists where we think it does. """ scvolume = None volume_name = volume.get('id') LOG.debug('Checking existence of volume %s', volume_name) with self._client.open_connection() as api: try: if api.find_sc(): scvolume = api.find_volume(volume_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to ensure export of volume %s'), volume_name) if scvolume is None: msg = _('Unable to find volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def remove_export(self, context, volume): """Remove an export of a volume. We do nothing here to match the nothing we do in create export. Again we do everything in initialize and terminate connection. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" volume_name = volume.get('id') LOG.debug('Extending volume %(vol)s to %(size)s', {'vol': volume_name, 'size': new_size}) if volume is not None: with self._client.open_connection() as api: if api.find_sc(): scvolume = api.find_volume(volume_name) if api.expand_volume(scvolume, new_size) is not None: return # If we are here nothing good happened. msg = _('Unable to extend volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" with self._client.open_connection() as api: storageusage = api.get_storage_usage() if api.find_sc() else None # all of this is basically static for now data = {} data['volume_backend_name'] = self.backend_name data['vendor_name'] = 'Dell' data['driver_version'] = self.VERSION data['storage_protocol'] = 'iSCSI' data['reserved_percentage'] = 0 data['free_capacity_gb'] = 'unavailable' data['total_capacity_gb'] = 'unavailable' data['consistencygroup_support'] = True # In theory if storageusage is None then we should have # blown up getting it. If not just report unavailable. if storageusage is not None: totalcapacity = storageusage.get('availableSpace') totalcapacitygb = self._bytes_to_gb(totalcapacity) data['total_capacity_gb'] = totalcapacitygb freespace = storageusage.get('freeSpace') freespacegb = self._bytes_to_gb(freespace) data['free_capacity_gb'] = freespacegb data['QoS_support'] = False data['replication_enabled'] = self.replication_enabled if self.replication_enabled: data['replication_type'] = ['async', 'sync'] data['replication_count'] = len(self.backends) replication_targets = [] # Trundle through our backends. for backend in self.backends: target_device_id = backend.get('target_device_id') if target_device_id: replication_targets.append(target_device_id) data['replication_targets'] = replication_targets self._stats = data LOG.debug('Total cap %(total)s Free cap %(free)s', {'total': data['total_capacity_gb'], 'free': data['free_capacity_gb']}) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ # We use id as our volume name so we need to rename the backend # volume to the original volume name. original_volume_name = volume.get('id') current_name = new_volume.get('id') LOG.debug('update_migrated_volume: %(current)s to %(original)s', {'current': current_name, 'original': original_volume_name}) if original_volume_name: with self._client.open_connection() as api: if api.find_sc(): scvolume = api.find_volume(current_name) if (scvolume and api.rename_volume(scvolume, original_volume_name)): # Replicate if we are supposed to. model_update = self._create_replications(api, new_volume, scvolume) model_update['_name_id'] = None return model_update # The world was horrible to us so we should error and leave. LOG.error(_LE('Unable to rename the logical volume for volume: %s'), original_volume_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} def create_consistencygroup(self, context, group): """This creates a replay profile on the storage backend. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: Nothing on success. :raises: VolumeBackendAPIException """ gid = group['id'] with self._client.open_connection() as api: cgroup = api.create_replay_profile(gid) if cgroup: LOG.info(_LI('Created Consistency Group %s'), gid) return msg = _('Unable to create consistency group %s') % gid raise exception.VolumeBackendAPIException(data=msg) def delete_consistencygroup(self, context, group, volumes): """Delete the Dell SC profile associated with this consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: Updated model_update, volumes. """ gid = group['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(gid) if profile: api.delete_replay_profile(profile) # If we are here because we found no profile that should be fine # as we are trying to delete it anyway. # Trundle through the list deleting the volumes. for volume in volumes: self.delete_volume(volume) volume['status'] = 'deleted' model_update = {'status': group['status']} return model_update, volumes def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of cinder.db.sqlalchemy.models.Volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ gid = group['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(gid) if not profile: LOG.error(_LE('Cannot find Consistency Group %s'), gid) elif api.update_cg_volumes(profile, add_volumes, remove_volumes): LOG.info(_LI('Updated Consistency Group %s'), gid) # we need nothing updated above us so just return None. return None, None, None # Things did not go well so throw. msg = _('Unable to update consistency group %s') % gid raise exception.VolumeBackendAPIException(data=msg) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Takes a snapshot of the consistency group. :param context: the context of the caller. :param cgsnapshot: Information about the snapshot to take. :param snapshots: List of snapshots for this cgsnapshot. :returns: Updated model_update, snapshots. :raises: VolumeBackendAPIException. """ cgid = cgsnapshot['consistencygroup_id'] snapshotid = cgsnapshot['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(cgid) if profile: LOG.debug('profile %s replayid %s', profile, snapshotid) if api.snap_cg_replay(profile, snapshotid, 0): snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append({ 'id': snapshot.id, 'status': 'available' }) model_update = {'status': 'available'} return model_update, snapshot_updates # That didn't go well. Tell them why. Then bomb out. LOG.error(_LE('Failed to snap Consistency Group %s'), cgid) else: LOG.error(_LE('Cannot find Consistency Group %s'), cgid) msg = _('Unable to snap Consistency Group %s') % cgid raise exception.VolumeBackendAPIException(data=msg) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot. If profile isn't found return success. If failed to delete the replay (the snapshot) then raise an exception. :param context: the context of the caller. :param cgsnapshot: Information about the snapshot to delete. :returns: Updated model_update, snapshots. :raises: VolumeBackendAPIException. """ cgid = cgsnapshot['consistencygroup_id'] snapshotid = cgsnapshot['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(cgid) if profile: LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'), {'ss': snapshotid, 'pro': profile}) if not api.delete_cg_replay(profile, snapshotid): msg = (_('Unable to delete Consistency Group snapshot %s') % snapshotid) raise exception.VolumeBackendAPIException(data=msg) for snapshot in snapshots: snapshot.status = 'deleted' model_update = {'status': 'deleted'} return model_update, snapshots def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: api.manage_existing(volume['id'], existing_ref) # Replicate if we are supposed to. scvolume = api.find_volume(volume['id']) model_update = self._create_replications(api, volume, scvolume) if model_update: return model_update else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Only return a model_update if we have replication info to add. return None def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: return api.get_unmanaged_volume_size(existing_ref) else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ with self._client.open_connection() as api: scvolume = api.find_volume(volume['id']) if scvolume: api.unmanage(scvolume) def _get_retype_spec(self, diff, volume_name, specname, spectype): """Helper function to get current and requested spec. :param diff: A difference dictionary. :param volume_name: The volume name we are working with. :param specname: The pretty name of the parameter. :param spectype: The actual spec string. :return: current, requested spec. :raises: VolumeBackendAPIException """ spec = (diff['extra_specs'].get(spectype)) if spec: if len(spec) != 2: msg = _('Unable to retype %(specname)s, expected to receive ' 'current and requested %(spectype)s values. Value ' 'received: %(spec)s') % {'specname': specname, 'spectype': spectype, 'spec': spec} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) current = spec[0] requested = spec[1] if current != requested: LOG.debug('Retyping volume %(vol)s to use %(specname)s ' '%(spec)s.', {'vol': volume_name, 'specname': specname, 'spec': requested}) return current, requested else: LOG.info(_LI('Retype was to same Storage Profile.')) return None, None def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ model_update = None # Any spec changes? if diff['extra_specs']: volume_name = volume.get('id') with self._client.open_connection() as api: try: # Get our volume scvolume = api.find_volume(volume_name) if scvolume is None: LOG.error(_LE('Retype unable to find volume %s.'), volume_name) return False # Check our specs. # Storage profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Storage Profile', 'storagetype:storageprofile')) # if there is a change and it didn't work fast fail. if (current != requested and not api.update_storage_profile(scvolume, requested)): LOG.error(_LE('Failed to update storage profile')) return False # Replay profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Replay Profiles', 'storagetype:replayprofiles')) # if there is a change and it didn't work fast fail. if requested and not api.update_replay_profiles(scvolume, requested): LOG.error(_LE('Failed to update replay profiles')) return False # Replication_enabled. current, requested = ( self._get_retype_spec(diff, volume_name, 'replication_enabled', 'replication_enabled')) # if there is a change and it didn't work fast fail. if current != requested: if requested: model_update = self._create_replications(api, volume, scvolume) else: self._delete_replications(api, volume) model_update = {'replication_status': 'disabled', 'replication_driver_data': ''} # Active Replay current, requested = ( self._get_retype_spec(diff, volume_name, 'Replicate Active Replay', 'replication:activereplay')) if current != requested and not ( api.update_replicate_active_replay( scvolume, requested == ' True')): LOG.error(_LE('Failed to apply ' 'replication:activereplay setting')) return False # TODO(tswanson): replaytype once it actually works. except exception.VolumeBackendAPIException: # We do nothing with this. We simply return failure. return False # If we have something to send down... if model_update: return model_update return True def _parse_secondary(self, api, secondary): """Find the replication destination associated with secondary. :param api: Dell StorageCenterApi :param secondary: String indicating the secondary to failover to. :return: Destination SSN for the given secondary. """ LOG.debug('_parse_secondary. Looking for %s.', secondary) destssn = None # Trundle through these looking for our secondary. for backend in self.backends: ssnstring = backend['target_device_id'] # If they list a secondary it has to match. # If they do not list a secondary we return the first # replication on a working system. if not secondary or secondary == ssnstring: # Is a string. Need an int. ssn = int(ssnstring) # Without the source being up we have no good # way to pick a destination to failover to. So just # look for one that is just up. try: # If the SC ssn exists use it. if api.find_sc(ssn): destssn = ssn break except exception.VolumeBackendAPIException: LOG.warning(_LW('SSN %s appears to be down.'), ssn) LOG.info(_LI('replication failover secondary is %(ssn)s'), {'ssn': destssn}) return destssn def _update_backend(self, active_backend_id): # Update our backend id. On the next open_connection it will use this. self.active_backend_id = str(active_backend_id) self._client.active_backend_id = self.active_backend_id def failover_host(self, context, volumes, secondary_id=None): """Failover to secondary. :param context: security context :param secondary_id: Specifies rep target to fail over to :param volumes: List of volumes serviced by this backend. :returns : destssn, volume_updates data structure Example volume_updates data structure: [{'volume_id': , 'updates': {'provider_id': 8, 'replication_status': 'failed-over', 'replication_extended_status': 'whatever',...}},] """ # We do not allow failback. Dragons be there. if self.failed_over: raise exception.VolumeBackendAPIException(message=_( 'Backend has already been failed over. Unable to fail back.')) LOG.info(_LI('Failing backend to %s'), secondary_id) # basic check if self.replication_enabled: with self._client.open_connection() as api: # Look for the specified secondary. destssn = self._parse_secondary(api, secondary_id) if destssn: # We roll through trying to break replications. # Is failing here a complete failure of failover? volume_updates = [] for volume in volumes: model_update = {} if volume.get('replication_driver_data'): ret = api.break_replication(volume['id'], destssn) LOG.info(_LI('Failing over volume %(id)s ' 'replication: %(res)s.'), {'id': volume['id'], 'res': ('FAILED', 'SUCCESS')[ret]}) # We should note that we are now failed over. model_update = { 'replication_status': 'failed-over'} else: # Not a replicated volume. Try to unmap it. scvolume = api.find_volume(volume['id']) api.remove_mappings(scvolume) model_update = {'status': 'error'} # Either we are failed over or our status is now error. volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) # this is it. self._update_backend(destssn) return destssn, volume_updates else: raise exception.InvalidInput(message=( _('replication_failover failed. %s not found.') % secondary_id)) # I don't think we should ever get here. raise exception.VolumeBackendAPIException(message=( _('replication_failover failed. ' 'Backend not configured for failover'))) def _get_unmanaged_replay(self, api, volume_name, existing_ref): replay_name = None if existing_ref: replay_name = existing_ref.get('source-name') if not replay_name: msg = _('_get_unmanaged_replay: Must specify source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Find our volume. scvolume = api.find_volume(volume_name) if not scvolume: # Didn't find it. msg = (_('_get_unmanaged_replay: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, replay_name) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('_get_unmanaged_replay: Cannot ' 'find snapshot named %s') % replay_name) LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return screplay def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. """ volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') with self._client.open_connection() as api: # Find our unmanaged snapshot. This will raise on error. screplay = self._get_unmanaged_replay(api, volume_name, existing_ref) # Manage means update description and update expiration. if not api.manage_replay(screplay, snapshot_id): # That didn't work. Error. msg = (_('manage_existing_snapshot: Error managing ' 'existing replay %(ss)s on volume %(vol)s') % {'ss': screplay.get('description'), 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Life is good. Let the world know what we've done. LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on ' 'volume %(volume)s has been renamed to %(id)s and is ' 'now managed by Cinder.'), {'exist': screplay.get('description'), 'volume': volume_name, 'id': snapshot_id}) # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. """ volume_name = snapshot.get('volume_id') with self._client.open_connection() as api: screplay = self._get_unmanaged_replay(api, volume_name, existing_ref) sz, rem = dell_storagecenter_api.StorageCenterApi.size_to_gb( screplay['size']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must be a multiple of 1 GB.')) return sz # NOTE: Can't use abstractmethod before all drivers implement it def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. NOTE: We do set the expire countdown to 1 day. Once a snapshot is unmanaged it will expire 24 hours later. """ volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') with self._client.open_connection() as api: # Find our volume. scvolume = api.find_volume(volume_name) if not scvolume: # Didn't find it. msg = (_('unmanage_snapshot: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, snapshot_id) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('unmanage_snapshot: Cannot find snapshot named %s') % snapshot_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Free our snapshot. api.unmanage_replay(screplay) # Do not check our result. cinder-8.0.0/cinder/volume/drivers/prophetstor/0000775000567000056710000000000012701406543022750 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/prophetstor/dpl_iscsi.py0000664000567000056710000001556012701406250025275 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LI, _LW import cinder.volume.driver from cinder.volume.drivers.prophetstor import dplcommon LOG = logging.getLogger(__name__) class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, cinder.volume.driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(DPLISCSIDriver, self).__init__(*args, **kwargs) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" properties = {} properties['target_lun'] = None properties['target_discovered'] = True properties['target_portal'] = '' properties['target_iqn'] = None properties['volume_id'] = volume['id'] dpl_server = self.configuration.san_ip dpl_iscsi_port = self.configuration.iscsi_port ret, output = self.dpl.assign_vdev(self._conver_uuid2hex( volume['id']), connector['initiator'].lower(), volume['id'], '%s:%d' % (dpl_server, dpl_iscsi_port), 0) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if len(event_uuid): ret = 0 status = self._wait_event( self.dpl.get_vdev_status, self._conver_uuid2hex( volume['id']), event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s due to ' 'unable to query status by event ' 'id.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor assign volume failed.:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) if ret == 0: ret, output = self.dpl.get_vdev( self._conver_uuid2hex(volume['id'])) if ret == 0: for tgInfo in output['exports']['Network/iSCSI']: if tgInfo['permissions'] and \ isinstance(tgInfo['permissions'][0], dict): for assign in tgInfo['permissions']: if connector['initiator'].lower() in assign.keys(): for tgportal in tgInfo.get('portals', {}): properties['target_portal'] = tgportal break properties['target_lun'] = \ int(assign[connector['initiator'].lower()]) break if properties['target_portal'] != '': properties['target_iqn'] = tgInfo['target_identifier'] break else: if connector['initiator'].lower() in tgInfo['permissions']: for tgportal in tgInfo.get('portals', {}): properties['target_portal'] = tgportal break if properties['target_portal'] != '': properties['target_lun'] = \ int(tgInfo['logical_unit_number']) properties['target_iqn'] = \ tgInfo['target_identifier'] break if not (ret == 0 or properties['target_portal']): msg = _('Flexvisor failed to assign volume %(volume)s ' 'iqn %(iqn)s.') % {'volume': volume['id'], 'iqn': connector['initiator']} raise exception.VolumeBackendAPIException(data=msg) return {'driver_volume_type': 'iscsi', 'data': properties} def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" ret, output = self.dpl.unassign_vdev( self._conver_uuid2hex(volume['id']), connector['initiator']) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event( self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volume['id'], 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: LOG.info(_LI('Flexvisor already unassigned volume ' '%(id)s.'), {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to unassign volume:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) def get_volume_stats(self, refresh=False): if refresh: try: data = super(DPLISCSIDriver, self).get_volume_stats(refresh) if data: data['storage_protocol'] = 'iSCSI' backend_name = \ self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = \ (backend_name or 'DPLISCSIDriver') self._stats = data except Exception as exc: LOG.warning(_LW('Cannot get volume status ' '%(exc)s.'), {'exc': exc}) return self._stats cinder-8.0.0/cinder/volume/drivers/prophetstor/options.py0000664000567000056710000000167212701406250025016 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg DPL_OPTS = [ cfg.StrOpt('dpl_pool', default='', help='DPL pool uuid in which DPL volumes are stored.'), cfg.PortOpt('dpl_port', default=8357, help='DPL port number.'), ] CONF = cfg.CONF CONF.register_opts(DPL_OPTS) cinder-8.0.0/cinder/volume/drivers/prophetstor/__init__.py0000664000567000056710000000000012701406250025042 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/prophetstor/dpl_fc.py0000664000567000056710000004116012701406250024546 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.volume import driver from cinder.volume.drivers.prophetstor import dplcommon from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class DPLFCDriver(dplcommon.DPLCOMMONDriver, driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(DPLFCDriver, self).__init__(*args, **kwargs) def _get_fc_channel(self): """Get FibreChannel info. :returns: fcInfos[uuid] fcInfo[uuid]['display_name'] fcInfo[uuid]['display_description'] fcInfo[uuid]['hardware_address'] fcInfo[uuid]['type'] fcInfo[uuid]['speed'] fcInfo[uuid]['state'] """ output = None fcInfos = {} try: retCode, output = self.dpl.get_server_info() if retCode == 0 and output: fcUuids = output.get('metadata', {}).get('storage_adapter', {}).keys() for fcUuid in fcUuids: fcInfo = output.get('metadata', {}).get('storage_adapter', {}).get(fcUuid) if fcInfo['type'] == 'fc': fcInfos[fcUuid] = fcInfo except Exception as e: LOG.error(_LE("Failed to get fiber channel info from storage " "due to %(stat)s"), {'stat': e}) return fcInfos def _get_targets(self): """Get targets. :returns: targetInfos[uuid] = targetInfo targetInfo['targetUuid'] targetInfo['targetName'] targetInfo['targetAddr'] """ output = None targetInfos = {} try: retCode, output = self.dpl.get_target_list('target') if retCode == 0 and output: for targetInfo in output.get('children', []): targetI = {} targetI['targetUuid'] = targetInfo[0] targetI['targetName'] = targetInfo[1] targetI['targetAddr'] = targetInfo[2] targetInfos[str(targetInfo[0])] = targetI except Exception as e: targetInfos = {} LOG.error(_LE("Failed to get fiber channel target from " "storage server due to %(stat)s"), {'stat': e}) return targetInfos def _get_targetwpns(self, volumeid, initiatorWwpns): lstargetWwpns = [] try: ret, output = self.dpl.get_vdev(volumeid) if ret == 0 and output: exports = output.get('exports', {}) fc_infos = exports.get('Network/FC', {}) for fc_info in fc_infos: for p in fc_info.get('permissions', []): if p.get(initiatorWwpns, None): targetWwpns = fc_info.get('target_identifier', '') lstargetWwpns.append(targetWwpns) except Exception as e: LOG.error(_LE("Failed to get target wwpns from storage due " "to %(stat)s"), {'stat': e}) lstargetWwpns = [] return lstargetWwpns def _is_initiator_wwpn_active(self, targetWwpn, initiatorWwpn): fActive = False output = None try: retCode, output = self.dpl.get_sns_table(targetWwpn) if retCode == 0 and output: for fdwwpn, fcport in output.get('metadata', {}).get('sns_table', []): if fdwwpn == initiatorWwpn: fActive = True break except Exception: LOG.error(_LE('Failed to get sns table')) return fActive def _convertHex2String(self, wwpns): szwwpns = '' if len(str(wwpns)) == 16: szwwpns = '%2s:%2s:%2s:%2s:%2s:%2s:%2s:%2s' % ( str(wwpns)[0:2], str(wwpns)[2:4], str(wwpns)[4:6], str(wwpns)[6:8], str(wwpns)[8:10], str(wwpns)[10:12], str(wwpns)[12:14], str(wwpns)[14:16]) return szwwpns def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename): ret = 0 output = '' LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s', {'volume': volumeid, 'wwpns': targetwwpns, 'iqn': initiatorwwpns, 'volumename': volumename}) try: ret, output = self.dpl.assign_vdev_fc( self._conver_uuid2hex(volumeid), targetwwpns, initiatorwwpns, volumename) except Exception: LOG.error(_LE('Volume %(volumeid)s failed to send assign command, ' 'ret: %(status)s output: %(output)s'), {'volumeid': volumeid, 'status': ret, 'output': output}) ret = errno.EFAULT if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if len(event_uuid): ret = 0 status = self._wait_event( self.dpl.get_vdev_status, self._conver_uuid2hex(volumeid), event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s: ' '%(status)s.') % {'id': volumeid, 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s due to ' 'unable to query status by event ' 'id.') % {'id': volumeid} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor assign volume failed:%(id)s:' '%(status)s.') % {'id': volumeid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) return ret def _delete_export_fc(self, volumeid, targetwwpns, initiatorwwpns): ret = 0 output = '' ret, output = self.dpl.unassign_vdev_fc( self._conver_uuid2hex(volumeid), targetwwpns, initiatorwwpns) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0 and len(event_uuid): status = self._wait_event( self.dpl.get_vdev_status, volumeid, event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volumeid, 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volumeid} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor unassign volume failed:%(id)s:' '%(status)s.') % {'id': volumeid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI('Flexvisor succeeded to unassign volume %(id)s.'), {'id': volumeid}) return ret def _build_initiator_target_map(self, connector, tgtwwns): """Build the target_wwns and the initiator target map.""" init_targ_map = {} initiator_wwns = connector['wwpns'] for initiator in initiator_wwns: init_targ_map[initiator] = tgtwwns return init_targ_map @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" """ connector = {'ip': CONF.my_ip, 'host': CONF.host, 'initiator': self._initiator, 'wwnns': self._fc_wwnns, 'wwpns': self._fc_wwpns} """ dc_fc = {} dc_target = {} lsTargetWwpn = [] output = None properties = {} preferTargets = {} ret = 0 targetIdentifier = [] szwwpns = [] LOG.info(_LI('initialize_connection volume: %(volume)s, connector:' ' %(connector)s'), {"volume": volume, "connector": connector}) # Get Storage Fiber channel controller dc_fc = self._get_fc_channel() # Get existed FC target list to decide target wwpn dc_target = self._get_targets() if len(dc_target) == 0: msg = _('Backend storage did not configure fiber channel ' 'target.') raise exception.VolumeBackendAPIException(data=msg) for keyFc in dc_fc.keys(): for targetuuid in dc_target.keys(): if dc_fc[keyFc]['hardware_address'] == \ dc_target[targetuuid]['targetAddr']: preferTargets[targetuuid] = dc_target[targetuuid] break # Confirm client wwpn is existed in sns table # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format for dwwpn in connector['wwpns']: szwwpn = self._convertHex2String(dwwpn) if len(szwwpn) == 0: msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) szwwpns.append(szwwpn) if len(szwwpns): for targetUuid in preferTargets.keys(): targetWwpn = '' targetWwpn = preferTargets.get(targetUuid, {}).get('targetAddr', '') lsTargetWwpn.append(targetWwpn) # Use wwpns to assign volume. LOG.info(_LI('Prefer use target wwpn %(wwpn)s'), {'wwpn': lsTargetWwpn}) # Start to create export in all FC target node. assignedTarget = [] for pTarget in lsTargetWwpn: try: ret = self._export_fc(volume['id'], str(pTarget), szwwpns, volume['name']) if ret: break else: assignedTarget.append(pTarget) except Exception as e: LOG.error(_LE('Failed to export fiber channel target ' 'due to %s'), e) ret = errno.EFAULT break if ret == 0: ret, output = self.dpl.get_vdev(self._conver_uuid2hex( volume['id'])) nLun = -1 if ret == 0: try: for p in output['exports']['Network/FC']: # check initiator wwpn existed in target initiator list for initI in p.get('permissions', []): for szwpn in szwwpns: if initI.get(szwpn, None): nLun = initI[szwpn] break if nLun != -1: break if nLun != -1: targetIdentifier.append( str(p['target_identifier']).replace(':', '')) except Exception: msg = _('Invalid connection initialization response of ' 'volume %(name)s: ' '%(output)s') % {'name': volume['name'], 'output': output} raise exception.VolumeBackendAPIException(data=msg) if nLun != -1: init_targ_map = self._build_initiator_target_map(connector, targetIdentifier) properties['target_discovered'] = True properties['target_wwn'] = targetIdentifier properties['target_lun'] = int(nLun) properties['volume_id'] = volume['id'] properties['initiator_target_map'] = init_targ_map LOG.info(_LI('%(volume)s assign type fibre_channel, properties ' '%(properties)s'), {'volume': volume['id'], 'properties': properties}) else: msg = _('Invalid connection initialization response of ' 'volume %(name)s') % {'name': volume['name']} raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI('Connect initialization info: ' '{driver_volume_type: fibre_channel, ' 'data: %(properties)s'), {'properties': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties} @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" """ connector = {'ip': CONF.my_ip, 'host': CONF.host, 'initiator': self._initiator, 'wwnns': self._fc_wwnns, 'wwpns': self._fc_wwpns} """ lstargetWwpns = [] lsTargets = [] szwwpns = [] ret = 0 info = {'driver_volume_type': 'fibre_channel', 'data': {}} LOG.info(_LI('terminate_connection volume: %(volume)s, ' 'connector: %(con)s'), {'volume': volume, 'con': connector}) # Query targetwwpns. # Get all target list of volume. for dwwpn in connector['wwpns']: szwwpn = self._convertHex2String(dwwpn) if len(szwwpn) == 0: msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) szwwpns.append(szwwpn) if len(szwwpns) == 0: ret = errno.EFAULT msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) else: for szwwpn in szwwpns: lstargetWwpns = self._get_targetwpns( self._conver_uuid2hex(volume['id']), szwwpn) lsTargets = list(set(lsTargets + lstargetWwpns)) # Remove all export target try: for ptarget in lsTargets: ret = self._delete_export_fc(volume['id'], ptarget, szwwpns) if ret: break except Exception: ret = errno.EFAULT finally: if ret: msg = _('Faield to unassign %(volume)s') % (volume['id']) raise exception.VolumeBackendAPIException(data=msg) # Failed to delete export with fibre channel if ret: init_targ_map = self._build_initiator_target_map(connector, lsTargets) info['data'] = {'target_wwn': lsTargets, 'initiator_target_map': init_targ_map} return info def get_volume_stats(self, refresh=False): if refresh: data = super(DPLFCDriver, self).get_volume_stats(refresh) if data: data['storage_protocol'] = 'FC' backend_name = \ self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or 'DPLFCDriver') self._stats = data return self._stats cinder-8.0.0/cinder/volume/drivers/prophetstor/dplcommon.py0000664000567000056710000017220112701406250025310 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implementation of the class of ProphetStor DPL storage adapter of Federator. # v2.0.1 Consistency group support # v2.0.2 Pool aware scheduler # v2.0.3 Consistency group modification support # v2.0.4 Port ProphetStor driver to use new driver model """ import base64 import errno import json import random import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import six from six.moves import http_client from cinder import exception from cinder.i18n import _, _LI, _LW, _LE from cinder import objects from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.prophetstor import options from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) CONNECTION_RETRY = 10 MAXSNAPSHOTS = 1024 DISCOVER_SERVER_TYPE = 'dpl' DPL_BLOCKSTOR = '/dpl_blockstor' DPL_SYSTEM = '/dpl_system' DPL_VER_V1 = 'v1' DPL_OBJ_POOL = 'dpl_pool' DPL_OBJ_DISK = 'dpl_disk' DPL_OBJ_VOLUME = 'dpl_volume' DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup' DPL_OBJ_SNAPSHOT = 'cdmi_snapshots' DPL_OBJ_EXPORT = 'dpl_export' DPL_OBJ_REPLICATION = 'cdmi_replication' DPL_OBJ_TARGET = 'dpl_target' DPL_OBJ_SYSTEM = 'dpl_system' DPL_OBJ_SNS = 'sns_table' class DPLCommand(object): """DPL command interface.""" def __init__(self, ip, port, username, password): self.ip = ip self.port = port self.username = username self.password = password def send_cmd(self, method, url, params, expected_status): """Send command to DPL.""" connection = None retcode = 0 response = {} data = {} header = {'Content-Type': 'application/cdmi-container', 'Accept': 'application/cdmi-container', 'x-cdmi-specification-version': '1.0.2'} # base64 encode the username and password auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') header['Authorization'] = 'Basic %s' % auth if not params: payload = None else: try: payload = json.dumps(params, ensure_ascii=False) payload.encode('utf-8') except Exception as e: LOG.error(_LE('JSON encode params %(param)s error:' ' %(status)s.'), {'param': params, 'status': e}) retcode = errno.EINVAL for i in range(CONNECTION_RETRY): try: connection = http_client.HTTPSConnection(self.ip, self.port, timeout=60) if connection: retcode = 0 break except IOError as ioerr: LOG.error(_LE('Connect to Flexvisor error: %s.'), ioerr) retcode = errno.ENOTCONN except Exception as e: LOG.error(_LE('Connect to Flexvisor failed: %s.'), e) retcode = errno.EFAULT retry = CONNECTION_RETRY while (connection and retry): try: connection.request(method, url, payload, header) except http_client.CannotSendRequest as e: connection.close() time.sleep(1) connection = http_client.HTTPSConnection(self.ip, self.port, timeout=60) retry -= 1 if connection: if retry == 0: retcode = errno.ENOTCONN else: retcode = 0 else: retcode = errno.ENOTCONN continue except Exception as e: LOG.error(_LE('Failed to send request: %s.'), e) retcode = errno.EFAULT break if retcode == 0: try: response = connection.getresponse() if response.status == http_client.SERVICE_UNAVAILABLE: LOG.error(_LE('The Flexvisor service is unavailable.')) time.sleep(1) retry -= 1 retcode = errno.ENOPROTOOPT continue else: retcode = 0 break except http_client.ResponseNotReady as e: time.sleep(1) retry -= 1 retcode = errno.EFAULT continue except Exception as e: LOG.error(_LE('Failed to get response: %s.'), e) retcode = errno.EFAULT break if (retcode == 0 and response.status in expected_status and response.status == http_client.NOT_FOUND): retcode = errno.ENODATA elif retcode == 0 and response.status not in expected_status: LOG.error(_LE('%(method)s %(url)s unexpected response status: ' '%(response)s (expects: %(expects)s).'), {'method': method, 'url': url, 'response': http_client.responses[response.status], 'expects': expected_status}) if response.status == http_client.UNAUTHORIZED: raise exception.NotAuthorized else: retcode = errno.EIO elif retcode == 0 and response.status is http_client.NOT_FOUND: retcode = errno.ENODATA elif retcode == 0 and response.status is http_client.ACCEPTED: retcode = errno.EAGAIN try: data = response.read() data = json.loads(data) except (TypeError, ValueError) as e: LOG.error(_LE('Call to json.loads() raised an exception: %s.'), e) retcode = errno.ENOEXEC except Exception as e: LOG.error(_LE('Read response raised an exception: %s.'), e) retcode = errno.ENOEXEC elif (retcode == 0 and response.status in [http_client.OK, http_client.CREATED] and http_client.NO_CONTENT not in expected_status): try: data = response.read() data = json.loads(data) except (TypeError, ValueError) as e: LOG.error(_LE('Call to json.loads() raised an exception: %s.'), e) retcode = errno.ENOEXEC except Exception as e: LOG.error(_LE('Read response raised an exception: %s.'), e) retcode = errno.ENOEXEC if connection: connection.close() return retcode, data class DPLVolume(object): def __init__(self, dplServer, dplPort, dplUser, dplPassword): self.objCmd = DPLCommand(dplServer, dplPort, dplUser, dplPassword) def _execute(self, method, url, params, expected_status): if self.objCmd: return self.objCmd.send_cmd(method, url, params, expected_status) else: return -1, None def _gen_snapshot_url(self, vdevid, snapshotid): snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid) return snapshot_url def get_server_info(self): method = 'GET' url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM)) return self._execute(method, url, None, [http_client.OK, http_client.ACCEPTED]) def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) if volumeName is None or volumeName == '': metadata['display_name'] = volumeID else: metadata['display_name'] = volumeName metadata['display_description'] = volumeDesc metadata['pool_uuid'] = poolID metadata['total_capacity'] = volumeSize metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota is not None: metadata['snapshot_quota'] = int(snapshot_quota) metadata['properties'] = dict(thin_provision=fthinprovision) params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) if volumeName is None or volumeName == '': metadata['display_name'] = volumeID else: metadata['display_name'] = volumeName metadata['display_description'] = volumeDesc metadata['total_capacity'] = int(volumeSize) metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota is not None: metadata['snapshot_quota'] = snapshot_quota params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def delete_vdev(self, volumeID, force=True): method = 'DELETE' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) metadata['force'] = force params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND, http_client.NO_CONTENT]) def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc, snapshotID, poolID, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID) metadata['snapshot_operation'] = 'copy' if vdevDisplayName is None or vdevDisplayName == "": metadata['display_name'] = vdevID else: metadata['display_name'] = vdevDisplayName metadata['display_description'] = vdevDesc metadata['pool_uuid'] = poolID metadata['properties'] = {} metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota: metadata['snapshot_quota'] = snapshot_quota metadata['properties'] = dict(thin_provision=fthinprovision) params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(vdevID, snapshotID) return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id, vol_display_name, description, snap_id): method = 'PUT' params = {} metadata = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id) metadata['snapshot_operation'] = 'spawn' if vol_display_name is None or vol_display_name == '': metadata['display_name'] = new_vol_id else: metadata['display_name'] = vol_display_name metadata['display_description'] = description params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id) return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def get_pools(self): method = 'GET' url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL) return self._execute(method, url, None, [http_client.OK]) def get_pool(self, poolid): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid) return self._execute(method, url, None, [http_client.OK, http_client.ACCEPTED]) def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName, volumeDesc, volumeSize, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' params = {} metadata = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID) metadata["snapshot_operation"] = "clone" if volumeName is None or volumeName == '': metadata["display_name"] = NewVolumeID else: metadata["display_name"] = volumeName metadata["display_description"] = volumeDesc metadata["pool_uuid"] = poolID metadata["total_capacity"] = volumeSize metadata["maximum_snapshot"] = maximum_snapshot if snapshot_quota: metadata["snapshot_quota"] = snapshot_quota metadata["properties"] = dict(thin_provision=fthinprovision) params["metadata"] = metadata params["copy"] = SourceVolumeID return self._execute(method, url, params, [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='', snapshotdes='', isgroup=False): method = 'PUT' metadata = {} params = {} if isgroup: url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid) else: url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) if not snapshotname: metadata['display_name'] = snapshotid else: metadata['display_name'] = snapshotname metadata['display_description'] = snapshotdes params['metadata'] = metadata params['snapshot'] = snapshotid return self._execute(method, url, params, [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) def get_vdev(self, vdevid): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) return self._execute(method, url, None, [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND]) def get_vdev_status(self, vdevid, eventid): method = 'GET' url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, eventid)) return self._execute(method, url, None, [http_client.OK, http_client.NOT_FOUND]) def get_pool_status(self, poolid, eventid): method = 'GET' url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL, poolid, eventid)) return self._execute(method, url, None, [http_client.OK, http_client.NOT_FOUND]) def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'assign' exports['Network/iSCSI'] = {} target_info = {} target_info['logical_unit_number'] = 0 target_info['logical_unit_name'] = lunname permissions = [] portals = [] portals.append(portal) permissions.append(iqn) target_info['permissions'] = permissions target_info['portals'] = portals exports['Network/iSCSI'] = target_info params['metadata'] = metadata params['exports'] = exports return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname, lunid=-1): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'assign' exports['Network/FC'] = {} target_info = {} target_info['target_identifier'] = targetwwpn target_info['logical_unit_number'] = lunid target_info['logical_unit_name'] = lunname target_info['permissions'] = initiatorwwpn exports['Network/FC'] = target_info params['metadata'] = metadata params['exports'] = exports return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/iSCSI'] = {} exports['Network/iSCSI']['target_identifier'] = targetIqn permissions = [] permissions.append(initiatorIqn) exports['Network/iSCSI']['permissions'] = permissions params['exports'] = exports return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, http_client.NOT_FOUND]) def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/FC'] = {} exports['Network/FC']['target_identifier'] = targetwwpn permissions = initiatorwwpns exports['Network/FC']['permissions'] = permissions params['exports'] = exports return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, http_client.NOT_FOUND]) def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False): method = 'DELETE' if isGroup: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, objID, DPL_OBJ_SNAPSHOT, snapshotID)) else: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, objID, DPL_OBJ_SNAPSHOT, snapshotID)) return self._execute(method, url, None, [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, http_client.NOT_FOUND]) def rollback_vdev(self, vdevid, snapshotid): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) params['copy'] = self._gen_snapshot_url(vdevid, snapshotid) return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED]) def list_vdev_snapshots(self, vdevid, isGroup=False): method = 'GET' if isGroup: url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, DPL_OBJ_SNAPSHOT)) else: url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, DPL_OBJ_SNAPSHOT)) return self._execute(method, url, None, [http_client.OK]) def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False): method = 'GET' if isGroup: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) else: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) return self._execute(method, url, None, [http_client.OK]) def create_target(self, targetID, protocol, displayName, targetAddress, description=''): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) params['metadata'] = {} metadata = params['metadata'] metadata['type'] = 'target' metadata['protocol'] = protocol if displayName is None or displayName == '': metadata['display_name'] = targetID else: metadata['display_name'] = displayName metadata['display_description'] = description metadata['address'] = targetAddress return self._execute(method, url, params, [http_client.OK]) def get_target(self, targetID): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) return self._execute(method, url, None, [http_client.OK]) def delete_target(self, targetID): method = 'DELETE' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) return self._execute(method, url, None, [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND]) def get_target_list(self, type='target'): # type = target/initiator method = 'GET' if type is None: url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT) else: url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type) return self._execute(method, url, None, [http_client.OK]) def get_sns_table(self, wwpn): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS) params['metadata'] = {} params['metadata']['protocol'] = 'fc' params['metadata']['address'] = str(wwpn) return self._execute(method, url, params, [http_client.OK]) def create_vg(self, groupID, groupName, groupDesc='', listVolume=None, maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True): method = 'PUT' metadata = {} params = {} properties = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) if listVolume: metadata['volume'] = listVolume else: metadata['volume'] = [] metadata['display_name'] = groupName metadata['display_description'] = groupDesc metadata['maximum_snapshot'] = maxSnapshots properties['snapshot_rotation'] = rotationSnapshot metadata['properties'] = properties params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) def get_vg_list(self, vgtype=None): method = 'GET' if vgtype: url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype) else: url = '/%s/' % (DPL_OBJ_VOLUMEGROUP) return self._execute(method, url, None, [http_client.OK]) def get_vg(self, groupID): method = 'GET' url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) return self._execute(method, url, None, [http_client.OK]) def delete_vg(self, groupID, force=True): method = 'DELETE' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['force'] = force params['metadata'] = metadata return self._execute(method, url, params, [http_client.NO_CONTENT, http_client.NOT_FOUND]) def join_vg(self, volumeID, groupID): method = 'PUT' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['volume_group_operation'] = 'join' metadata['volume'] = [] metadata['volume'].append(volumeID) params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED]) def leave_vg(self, volumeID, groupID): method = 'PUT' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['volume_group_operation'] = 'leave' metadata['volume'] = [] metadata['volume'].append(volumeID) params['metadata'] = metadata return self._execute(method, url, params, [http_client.OK, http_client.ACCEPTED]) class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD, driver.CloneableImageVD, driver.SnapshotVD, driver.LocalVD, driver.BaseVD): """Class of dpl storage adapter.""" VERSION = '2.0.4' def __init__(self, *args, **kwargs): super(DPLCOMMONDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values(options.DPL_OPTS) self.configuration.append_config_values(san.san_opts) self.dpl = DPLVolume(self.configuration.san_ip, self.configuration.dpl_port, self.configuration.san_login, self.configuration.san_password) self._stats = {} def _convert_size_GB(self, size): s = round(float(size) / units.Gi, 2) if s > 0: return s else: return 0 def _conver_uuid2hex(self, strID): if strID: return strID.replace('-', '') else: return None def _get_event_uuid(self, output): ret = 0 event_uuid = "" if (type(output) is dict and output.get("metadata") and output["metadata"]): if (output["metadata"].get("event_uuid") and output["metadata"]["event_uuid"]): event_uuid = output["metadata"]["event_uuid"] else: ret = errno.EINVAL else: ret = errno.EINVAL return ret, event_uuid def _wait_event(self, callFun, objuuid, eventid=None): nRetry = 30 fExit = False status = {} status['state'] = 'error' status['output'] = {} while nRetry: try: if eventid: ret, output = callFun( self._conver_uuid2hex(objuuid), self._conver_uuid2hex(eventid)) else: ret, output = callFun(self._conver_uuid2hex(objuuid)) if ret == 0: if output['completionStatus'] == 'Complete': fExit = True status['state'] = 'available' status['output'] = output elif output['completionStatus'] == 'Error': fExit = True status['state'] = 'error' raise loopingcall.LoopingCallDone(retvalue=False) else: nsleep = random.randint(0, 10) value = round(float(nsleep) / 10, 2) time.sleep(value) elif ret == errno.ENODATA: status['state'] = 'deleted' fExit = True else: nRetry -= 1 time.sleep(3) continue except Exception as e: LOG.error(_LE('Flexvisor failed to get event %(volume)s ' '(%(status)s).'), {'volume': eventid, 'status': e}) raise loopingcall.LoopingCallDone(retvalue=False) if fExit is True: break return status def _join_volume_group(self, volume, cgId): # Join volume group if consistency group id not empty msg = '' try: ret, output = self.dpl.join_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(cgId)) except Exception as e: ret = errno.EFAULT msg = _('Fexvisor failed to add volume %(id)s ' 'due to %(reason)s.') % {"id": volume['id'], "reason": six.text_type(e)} if ret: if not msg: msg = _('Flexvisor failed to add volume %(id)s ' 'to group %(cgid)s.') % {'id': volume['id'], 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI('Flexvisor succeeded to add volume %(id)s to ' 'group %(cgid)s.'), {'id': volume['id'], 'cgid': cgId}) def _leave_volume_group(self, volume, cgId): # Leave volume group if consistency group id not empty msg = '' try: ret, output = self.dpl.leave_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(cgId)) except Exception as e: ret = errno.EFAULT msg = _('Fexvisor failed to remove volume %(id)s ' 'due to %(reason)s.') % {"id": volume['id'], "reason": six.text_type(e)} if ret: if not msg: msg = _('Flexvisor failed to remove volume %(id)s ' 'from group %(cgid)s.') % {'id': volume['id'], 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI('Flexvisor succeeded to remove volume %(id)s from ' 'group %(cgid)s.'), {'id': volume['id'], 'cgid': cgId}) def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID): snapshotID = None ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True) if ret == 0: volumes = out.get('metadata', {}).get('member', {}) if volumes: snapshotID = volumes.get(volumeID, None) else: msg = _('Flexvisor failed to get snapshot id of volume ' '%(id)s from group %(vgid)s.') % {'id': volumeID, 'vgid': vgID} raise exception.VolumeBackendAPIException(data=msg) if not snapshotID: msg = _('Flexvisor could not find volume %(id)s snapshot in' ' the group %(vgid)s snapshot ' '%(vgsid)s.') % {'id': volumeID, 'vgid': vgID, 'vgsid': vgsnapshotID} raise exception.VolumeBackendAPIException(data=msg) return snapshotID def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" LOG.info(_LI('Start to create consistency group: %(group_name)s ' 'id: %(id)s'), {'group_name': group['name'], 'id': group['id']}) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} try: ret, output = self.dpl.create_vg( self._conver_uuid2hex(group['id']), group['name'], group['description']) if ret: msg = _('Failed to create consistency group ' '%(id)s:%(ret)s.') % {'id': group['id'], 'ret': ret} raise exception.VolumeBackendAPIException(data=msg) else: return model_update except Exception as e: msg = _('Failed to create consistency group ' '%(id)s due to %(reason)s.') % {'id': group['id'], 'reason': six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) def delete_consistencygroup(self, context, group, volumes): """Delete a consistency group.""" ret = 0 volumes = self.db.volume_get_all_by_group( context, group['id']) model_update = {} model_update['status'] = group['status'] LOG.info(_LI('Start to delete consistency group: %(cg_name)s'), {'cg_name': group['id']}) try: self.dpl.delete_vg(self._conver_uuid2hex(group['id'])) except Exception as e: msg = _('Failed to delete consistency group %(id)s ' 'due to %(reason)s.') % {'id': group['id'], 'reason': six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) for volume_ref in volumes: try: self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id'])) volume_ref['status'] = 'deleted' except Exception: ret = errno.EFAULT volume_ref['status'] = 'error_deleting' model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) if ret == 0: model_update['status'] = fields.ConsistencyGroupStatus.DELETED return model_update, volumes def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" snapshots = objects.SnapshotList().get_all_for_cgsnapshot( context, cgsnapshot['id']) model_update = {} LOG.info(_LI('Start to create cgsnapshot for consistency group' ': %(group_name)s'), {'group_name': cgsnapshot['consistencygroup_id']}) try: self.dpl.create_vdev_snapshot( self._conver_uuid2hex(cgsnapshot['consistencygroup_id']), self._conver_uuid2hex(cgsnapshot['id']), cgsnapshot['name'], cgsnapshot.get('description', ''), True) for snapshot in snapshots: snapshot.status = 'available' except Exception as e: msg = _('Failed to create cg snapshot %(id)s ' 'due to %(reason)s.') % {'id': cgsnapshot['id'], 'reason': six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) model_update['status'] = 'available' return model_update, snapshots def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" snapshots = objects.SnapshotList().get_all_for_cgsnapshot( context, cgsnapshot['id']) model_update = {} model_update['status'] = cgsnapshot['status'] LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: ' '%(group_name)s'), {'snap_name': cgsnapshot['id'], 'group_name': cgsnapshot['consistencygroup_id']}) try: self.dpl.delete_vdev_snapshot( self._conver_uuid2hex(cgsnapshot['consistencygroup_id']), self._conver_uuid2hex(cgsnapshot['id']), True) for snapshot in snapshots: snapshot.status = 'deleted' except Exception as e: msg = _('Failed to delete cgsnapshot %(id)s due to ' '%(reason)s.') % {'id': cgsnapshot['id'], 'reason': six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) model_update['status'] = 'deleted' return model_update, snapshots def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): addvollist = [] removevollist = [] cgid = group['id'] vid = '' model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} # Get current group info in backend storage. ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid)) if ret == 0: group_members = output.get('children', []) if add_volumes: addvollist = add_volumes if remove_volumes: removevollist = remove_volumes # Process join volumes. try: for volume in addvollist: vid = volume['id'] # Verify the volume exists in the group or not. if self._conver_uuid2hex(vid) in group_members: continue self._join_volume_group(volume, cgid) except Exception as e: msg = _("Fexvisor failed to join the volume %(vol)s in the " "group %(group)s due to " "%(ret)s.") % {"vol": vid, "group": cgid, "ret": six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) # Process leave volumes. try: for volume in removevollist: vid = volume['id'] if self._conver_uuid2hex(vid) in group_members: self._leave_volume_group(volume, cgid) except Exception as e: msg = _("Fexvisor failed to remove the volume %(vol)s in the " "group %(group)s due to " "%(ret)s.") % {"vol": vid, "group": cgid, "ret": six.text_type(e)} raise exception.VolumeBackendAPIException(data=msg) return model_update, None, None def create_volume(self, volume): """Create a volume.""" pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.create_vdev( self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), pool, int(volume['size']) * units.Gi, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to create volume %(volume)s: ' '%(status)s.') % {'volume': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to create volume (get event) ' '%s.') % (volume['id']) raise exception.VolumeBackendAPIException( data=msg) elif ret != 0: msg = _('Flexvisor create volume failed.:%(volumeid)s:' '%(status)s.') % {'volumeid': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info(_LI('Flexvisor succeeded to create volume %(id)s.'), {'id': volume['id']}) if volume.get('consistencygroup_id', None): try: self._join_volume_group(volume, volume['consistencygroup_id']) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) msg = _('Flexvisor failed to create volume %(id)s in the ' 'group %(vgid)s.') % { 'id': volume['id'], 'vgid': volume['consistencygroup_id']} raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" src_volume = None vgID = None # Detect whether a member of the group. snapshotID = snapshot['id'] # Try to get cgid if volume belong in the group. src_volumeID = snapshot['volume_id'] cgsnapshotID = snapshot.get('cgsnapshot_id', None) if cgsnapshotID: try: src_volume = self.db.volume_get(src_volumeID) except Exception: msg = _("Flexvisor unable to find the source volume " "%(id)s info.") % {'id': src_volumeID} raise exception.VolumeBackendAPIException(data=msg) if src_volume: vgID = src_volume.get('consistencygroup_id', None) # Get the volume origin snapshot id if the source snapshot is group # snapshot. if vgID: snapshotID = self._get_snapshotid_of_vgsnapshot( self._conver_uuid2hex(vgID), self._conver_uuid2hex(cgsnapshotID), self._conver_uuid2hex(src_volumeID)) pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.create_vdev_from_snapshot( self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), self._conver_uuid2hex(snapshotID), pool, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to create volume from ' 'snapshot %(id)s:' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: msg = _('Flexvisor failed to create volume from snapshot ' '(failed to get event) ' '%(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create volume from snapshot ' '%(id)s: %(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info(_LI('Flexvisor succeeded to create volume %(id)s ' 'from snapshot.'), {'id': volume['id']}) if volume.get('consistencygroup_id', None): try: self._join_volume_group(volume, volume['consistencygroup_id']) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) raise def spawn_volume_from_snapshot(self, volume, snapshot): """Spawn a REFERENCED volume from a snapshot.""" ret, output = self.dpl.spawn_vdev_from_snapshot( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(snapshot['volume_id']), volume.get('display_name', ''), volume.get('display_description', ''), self._conver_uuid2hex(snapshot['id'])) if ret == errno.EAGAIN: # its an async process ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to spawn volume from snapshot ' '%(id)s:%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to spawn volume from snapshot ' '(failed to get event) ' '%(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create volume from snapshot ' '%(id)s: %(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info(_LI('Flexvisor succeeded to create volume %(id)s ' 'from snapshot.'), {'id': volume['id']}) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.clone_vdev( self._conver_uuid2hex(src_vref['id']), self._conver_uuid2hex(volume['id']), pool, volume.get('display_name', ''), volume.get('display_description', ''), int(volume['size']) * units.Gi, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to clone volume %(id)s: ' '%(status)s.') % {'id': src_vref['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to clone volume (failed to' ' get event) %(id)s.') % {'id': src_vref['id']} raise exception.VolumeBackendAPIException( data=msg) elif ret != 0: msg = _('Flexvisor failed to clone volume %(id)s: ' '%(status)s.') % {'id': src_vref['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info(_LI('Flexvisor succeeded to clone volume %(id)s.'), {'id': volume['id']}) if volume.get('consistencygroup_id', None): try: self._join_volume_group(volume, volume['consistencygroup_id']) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) msg = _('Flexvisor volume %(id)s failed to join group ' '%(vgid)s.') % {'id': volume['id'], 'vgid': volume['consistencygroup_id']} raise exception.VolumeBackendAPIException(data=msg) def delete_volume(self, volume): """Deletes a volume.""" ret = 0 if volume.get('consistencygroup_id', None): msg = '' try: ret, out = self.dpl.leave_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(volume['consistencygroup_id'])) if ret: LOG.warning(_LW('Flexvisor failed to delete volume ' '%(id)s from the group %(vgid)s.'), {'id': volume['id'], 'vgid': volume['consistencygroup_id']}) except Exception as e: LOG.warning(_LW('Flexvisor failed to delete volume %(id)s ' 'from group %(vgid)s due to %(status)s.'), {'id': volume['id'], 'vgid': volume['consistencygroup_id'], 'status': e}) if ret: ret = 0 ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) if ret == errno.EAGAIN: status = self._wait_event(self.dpl.get_vdev, volume['id']) if status['state'] == 'error': msg = _('Flexvisor failed deleting volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: ret = 0 LOG.info(_LI('Flexvisor volume %(id)s does not ' 'exist.'), {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to delete volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) def extend_volume(self, volume, new_size): ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), new_size * units.Gi) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to extend volume ' '%(id)s:%(status)s.') % {'id': volume, 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: msg = _('Flexvisor failed to extend volume ' '(failed to get event) ' '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to extend volume ' '%(id)s: %(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info(_LI('Flexvisor succeeded to extend volume' ' %(id)s.'), {'id': volume['id']}) def create_snapshot(self, snapshot): """Creates a snapshot.""" ret, output = self.dpl.create_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id']), snapshot.get('display_name', ''), snapshot.get('display_description', '')) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = (_('Flexvisor failed to create snapshot for volume ' '%(id)s: %(status)s.') % {'id': snapshot['volume_id'], 'status': ret}) raise exception.VolumeBackendAPIException(data=msg) else: msg = (_('Flexvisor failed to create snapshot for volume ' '(failed to get event) %(id)s.') % {'id': snapshot['volume_id']}) raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' '%(status)s.') % {'id': snapshot['volume_id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" ret, output = self.dpl.delete_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id'])) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to delete snapshot (failed to ' 'get event) %(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: LOG.info(_LI('Flexvisor snapshot %(id)s not existed.'), {'id': snapshot['id']}) elif ret != 0: msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info(_LI('Flexvisor succeeded to delete snapshot %(id)s.'), {'id': snapshot['id']}) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _get_pools(self): pools = [] qpools = [] # Defined access pool by cinder configuration. defined_pool = self.configuration.dpl_pool if defined_pool: qpools.append(defined_pool) else: try: ret, output = self.dpl.get_pools() if ret == 0: for poolUuid, poolName in output.get('children', []): qpools.append(poolUuid) else: LOG.error(_LE("Flexvisor failed to get pool list." "(Error: %d)"), ret) except Exception as e: LOG.error(_LE("Flexvisor failed to get pool list due to " "%s."), e) # Query pool detail information for poolid in qpools: ret, output = self._get_pool_info(poolid) if ret == 0: pool = {} pool['pool_name'] = output['metadata']['pool_uuid'] pool['total_capacity_gb'] = ( self._convert_size_GB( int(output['metadata']['total_capacity']))) pool['free_capacity_gb'] = ( self._convert_size_GB( int(output['metadata']['available_capacity']))) pool['allocated_capacity_gb'] = ( self._convert_size_GB( int(output['metadata']['used_capacity']))) pool['QoS_support'] = False pool['reserved_percentage'] = 0 pools.append(pool) else: LOG.warning(_LW("Failed to query pool %(id)s status " "%(ret)d."), {'id': poolid, 'ret': ret}) continue return pools def _update_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. """ data = {} pools = self._get_pools() data['volume_backend_name'] = ( self.configuration.safe_get('volume_backend_name')) location_info = '%(driver)s:%(host)s:%(volume)s' % { 'driver': self.__class__.__name__, 'host': self.configuration.san_ip, 'volume': self.configuration.dpl_pool } try: ret, output = self.dpl.get_server_info() if ret == 0: data['vendor_name'] = output['metadata']['vendor'] data['driver_version'] = output['metadata']['version'] data['storage_protocol'] = 'iSCSI' data['location_info'] = location_info data['consistencygroup_support'] = True data['pools'] = pools self._stats = data except Exception as e: LOG.error(_LE('Failed to get server info due to ' '%(state)s.'), {'state': e}) return self._stats def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.context = context LOG.info(_LI('Activate Flexvisor cinder volume driver.')) def check_for_setup_error(self): """Check DPL can connect properly.""" pass def _get_pool_info(self, poolid): """Query pool information.""" ret, output = self.dpl.get_pool(poolid) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_pool_status, poolid, event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: ret = 0 output = status.get('output', {}) else: LOG.error(_LE('Flexvisor failed to get pool %(id)s info.'), {'id': poolid}) raise exception.VolumeBackendAPIException( data="failed to get event") elif ret != 0: msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.debug('Flexvisor succeeded to get pool info.') return ret, output cinder-8.0.0/cinder/volume/drivers/ibm/0000775000567000056710000000000012701406543021126 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/ibm/flashsystem_iscsi.py0000664000567000056710000003424012701406250025232 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems with iSCSI protocol. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import random import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LW from cinder import utils import cinder.volume.driver from cinder.volume.drivers.ibm import flashsystem_common as fscommon from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) flashsystem_iscsi_opts = [ cfg.IntOpt('flashsystem_iscsi_portid', default=0, help='Default iSCSI Port ID of FlashSystem. ' '(Default port is 0.)') ] CONF = cfg.CONF CONF.register_opts(flashsystem_iscsi_opts) class FlashSystemISCSIDriver(fscommon.FlashSystemDriver, cinder.volume.driver.ISCSIDriver): """IBM FlashSystem iSCSI volume driver. Version history: 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC """ VERSION = "1.0.7" def __init__(self, *args, **kwargs): super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(fscommon.flashsystem_opts) self.configuration.append_config_values(flashsystem_iscsi_opts) self.configuration.append_config_values(san.san_opts) def _check_vdisk_params(self, params): # Check that the requested protocol is enabled if not params['protocol'] in self._protocol: msg = (_("'%(prot)s' is invalid for " "flashsystem_connection_protocol " "in config file. valid value(s) are " "%(enabled)s.") % {'prot': params['protocol'], 'enabled': self._protocol}) raise exception.InvalidInput(reason=msg) # Check if iscsi_ip is set when protocol is iSCSI if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None': msg = _("iscsi_ip_address must be set in config file when " "using protocol 'iSCSI'.") raise exception.InvalidInput(reason=msg) def _create_host(self, connector): """Create a new host on the storage system. We create a host and associate it with the given connection information. """ LOG.debug('enter: _create_host: host %s.', connector['host']) rand_id = six.text_type(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), rand_id) ports = [] if 'iSCSI' == self._protocol and 'initiator' in connector: ports.append('-iscsiname %s' % connector['initiator']) self._driver_assert(ports, (_('_create_host: No connector ports.'))) port1 = ports.pop(0) arg_name, arg_val = port1.split() ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', '"%s"' % host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return('successfully created' in out, '_create_host', ssh_cmd, out, err) for port in ports: arg_name, arg_val = port.split() ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val, host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( (not out.strip()), '_create_host', ssh_cmd, out, err) LOG.debug( 'leave: _create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def _find_host_exhaustive(self, connector, hosts): for host in hosts: ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_find_host_exhaustive', ssh_cmd, out, err) for attr_line in out.split('\n'): # If '!' not found, return the string and two empty strings attr_name, foo, attr_val = attr_line.partition('!') if (attr_name == 'iscsi_name' and 'initiator' in connector and attr_val == connector['initiator']): return host return None def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): """Get the map properties of vdisk.""" LOG.debug( 'enter: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) preferred_node = '0' IO_group = '0' # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for k, node in self._storage_nodes.items(): if vdisk_params['protocol'] != node['protocol']: continue if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not io_group_nodes: msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': vdisk_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning(_LW('_get_vdisk_map_properties: Did not find a ' 'preferred node for vdisk %s.'), vdisk_name) properties = { 'target_discovered': False, 'target_lun': lun_id, 'volume_id': vdisk_id, } type_str = 'iscsi' if preferred_node_entry['ipv4']: ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] iscsi_port = self.configuration.iscsi_port properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port) properties['target_iqn'] = preferred_node_entry['iscsi_name'] LOG.debug( 'leave: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) return {'driver_volume_type': type_str, 'data': properties} @utils.synchronized('flashsystem-init-conn', external=True) def initialize_connection(self, volume, connector): """Perform work so that an iSCSI connection can be made. To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] vdisk_id = volume['id'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) self._wait_vdisk_copy_completed(vdisk_name) self._driver_assert( self._is_vdisk_defined(vdisk_name), (_('vdisk %s is not defined.') % vdisk_name)) lun_id = self._map_vdisk_to_host(vdisk_name, connector) properties = {} try: properties = self._get_vdisk_map_properties( connector, lun_id, vdisk_name, vdisk_id, vdisk_params) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('Failed to collect return properties for ' 'volume %(vol)s and connector %(conn)s.'), {'vol': volume, 'conn': connector}) LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n connector ' '%(conn)s\n properties: %(prop)s.', {'vol': volume, 'conn': connector, 'prop': properties}) return properties @utils.synchronized('flashsystem-term-conn', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug( 'enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._unmap_vdisk_from_host(vdisk_name, connector) LOG.debug( 'leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return {'driver_volume_type': 'iscsi'} def _get_iscsi_ip_addrs(self): """get ip address of iSCSI interface.""" LOG.debug('enter: _get_iscsi_ip_addrs') cmd = ['svcinfo', 'lsportip'] generator = self._port_conf_generator(cmd) header = next(generator, None) if not header: return for key in self._storage_nodes: if self._storage_nodes[key]['config_node'] == 'yes': node = self._storage_nodes[key] break if node is None: msg = _('No config node found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for port_data in generator: try: port_ipv4 = port_data['IP_address'] port_ipv6 = port_data['IP_address_6'] state = port_data['state'] speed = port_data['speed'] except KeyError: self._handle_keyerror('lsportip', header) if port_ipv4 == self.configuration.iscsi_ip_address and ( port_data['id'] == ( six.text_type( self.configuration.flashsystem_iscsi_portid))): if state not in ('configured', 'online'): msg = (_('State of node is wrong. Current state is %s.') % state) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if state in ('configured', 'online') and speed != 'NONE': if port_ipv4: node['ipv4'].append(port_ipv4) if port_ipv6: node['ipv6'].append(port_ipv6) break if not (len(node['ipv4']) or len(node['ipv6'])): msg = _('No ip address found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('leave: _get_iscsi_ip_addrs') def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') self._context = ctxt # Get data of configured node self._get_node_data() # Get the iSCSI IP addresses of the FlashSystem nodes self._get_iscsi_ip_addrs() for k, node in self._storage_nodes.items(): if self.configuration.flashsystem_connection_protocol == 'iSCSI': if (len(node['ipv4']) or len(node['ipv6']) and len(node['iscsi_name'])): node['protocol'] = 'iSCSI' self._protocol = 'iSCSI' # Set for vdisk synchronization self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = threading.Lock() self._check_lock_interval = 5 LOG.debug('leave: do_setup') def _build_default_params(self): protocol = self.configuration.flashsystem_connection_protocol if protocol.lower() == 'iscsi': protocol = 'iSCSI' return { 'protocol': protocol, 'iscsi_ip': self.configuration.iscsi_ip_address, 'iscsi_port': self.configuration.iscsi_port, 'iscsi_ported': self.configuration.flashsystem_iscsi_portid, } def validate_connector(self, connector): """Check connector for enabled protocol.""" valid = False if 'iSCSI' == self._protocol and 'initiator' in connector: valid = True if not valid: msg = _LE('The connector does not contain the ' 'required information: initiator is missing') LOG.error(msg) raise exception.InvalidConnectorException(missing=( 'initiator')) cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/0000775000567000056710000000000012701406543023667 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py0000664000567000056710000041754112701406257030370 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import math import paramiko import random import re import string import time import unicodedata from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.ibm.storwize_svc import ( replication as storwize_rep) from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import utils from cinder.volume import volume_types INTERVAL_1_SEC = 1 DEFAULT_TIMEOUT = 15 LOG = logging.getLogger(__name__) storwize_svc_opts = [ cfg.ListOpt('storwize_svc_volpool_name', default=['volpool'], help='Comma separated list of storage system storage ' 'pools for volumes.'), cfg.IntOpt('storwize_svc_vol_rsize', default=2, min=-1, max=100, help='Storage system space-efficiency parameter for volumes ' '(percentage)'), cfg.IntOpt('storwize_svc_vol_warning', default=0, min=-1, max=100, help='Storage system threshold for volume capacity warnings ' '(percentage)'), cfg.BoolOpt('storwize_svc_vol_autoexpand', default=True, help='Storage system autoexpand parameter for volumes ' '(True/False)'), cfg.IntOpt('storwize_svc_vol_grainsize', default=256, help='Storage system grain size parameter for volumes ' '(32/64/128/256)'), cfg.BoolOpt('storwize_svc_vol_compression', default=False, help='Storage system compression option for volumes'), cfg.BoolOpt('storwize_svc_vol_easytier', default=True, help='Enable Easy Tier for volumes'), cfg.IntOpt('storwize_svc_vol_iogrp', default=0, help='The I/O group in which to allocate volumes'), cfg.IntOpt('storwize_svc_flashcopy_timeout', default=120, min=1, max=600, help='Maximum number of seconds to wait for FlashCopy to be ' 'prepared.'), cfg.BoolOpt('storwize_svc_multihostmap_enabled', default=True, help='This option no longer has any affect. It is deprecated ' 'and will be removed in the next release.', deprecated_for_removal=True), cfg.BoolOpt('storwize_svc_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('storwize_svc_stretched_cluster_partner', default=None, help='If operating in stretched cluster mode, specify the ' 'name of the pool in which mirrored copies are stored.' 'Example: "pool2"'), cfg.StrOpt('storwize_san_secondary_ip', default=None, help='Specifies secondary management IP or hostname to be ' 'used if san_ip is invalid or becomes inaccessible.'), cfg.BoolOpt('storwize_svc_vol_nofmtdisk', default=False, help='Specifies that the volume not be formatted during ' 'creation.'), cfg.IntOpt('storwize_svc_flashcopy_rate', default=50, min=1, max=100, help='Specifies the Storwize FlashCopy copy rate to be used ' 'when creating a full volume copy. The default is rate ' 'is 50, and the valid rates are 1-100.'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_opts) class StorwizeSSH(object): """SSH interface to IBM Storwize family and SVC storage systems.""" def __init__(self, run_ssh): self._ssh = run_ssh def _run_ssh(self, ssh_cmd): try: return self._ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_info(self, ssh_cmd, delim='!', with_header=False): """Run an SSH command and return parsed output.""" raw = self._run_ssh(ssh_cmd) return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim, with_header=with_header) def run_ssh_assert_no_output(self, ssh_cmd): """Run an SSH command and assert no output returned.""" out, err = self._run_ssh(ssh_cmd) if len(out.strip()) != 0: msg = (_('Expected no output from CLI command %(cmd)s, ' 'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_check_created(self, ssh_cmd): """Run an SSH command and return the ID of the created object.""" out, err = self._run_ssh(ssh_cmd) try: match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) return match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsnode(self, node_id=None): with_header = True ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] if node_id: with_header = False ssh_cmd.append(node_id) return self.run_ssh_info(ssh_cmd, with_header=with_header) def lslicense(self): ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lsguicapabilities(self): ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lssystem(self): ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lsmdiskgrp(self, pool): ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', '"%s"' % pool] return self.run_ssh_info(ssh_cmd)[0] def lsiogrp(self): ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsportip(self): ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) @staticmethod def _create_port_arg(port_type, port_name): if port_type == 'initiator': port = ['-iscsiname'] else: port = ['-hbawwpn'] port.append(port_name) return port def mkhost(self, host_name, port_type, port_name): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['svctask', 'mkhost', '-force'] + port ssh_cmd += ['-name', '"%s"' % host_name] return self.run_ssh_check_created(ssh_cmd) def addhostport(self, host, port_type, port_name): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def lshost(self, host=None): with_header = True ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] if host: with_header = False ssh_cmd.append('"%s"' % host) return self.run_ssh_info(ssh_cmd, with_header=with_header) def add_chap_secret(self, secret, host): ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def lsiscsiauth(self): ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfabric(self, wwpn=None, host=None): ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!'] if wwpn: ssh_cmd.extend(['-wwpn', wwpn]) elif host: ssh_cmd.extend(['-host', '"%s"' % host]) else: msg = (_('Must pass wwpn or host to lsfabric.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return self.run_ssh_info(ssh_cmd, with_header=True) def mkvdiskhostmap(self, host, vdisk, lun, multihostmap): """Map vdisk to host. If vdisk already mapped and multihostmap is True, use the force flag. """ ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, '-scsi', lun, vdisk] if multihostmap: ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') try: self.run_ssh_check_created(ssh_cmd) except Exception as ex: if (not multihostmap and hasattr(ex, 'message') and 'CMMVC6071E' in ex.message): LOG.error(_LE('storwize_svc_multihostmap_enabled is set ' 'to False, not allowing multi host mapping.')) raise exception.VolumeDriverException( message=_('CMMVC6071E The VDisk-to-host mapping was not ' 'created because the VDisk is already mapped ' 'to a host.\n"')) with excutils.save_and_reraise_exception(): LOG.error(_LE('Error mapping VDisk-to-host')) def mkrcrelationship(self, master, aux, system, name, asyncmirror): ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master, '-aux', aux, '-cluster', system, '-name', name] if asyncmirror: ssh_cmd.append('-global') return self.run_ssh_check_created(ssh_cmd) def rmrcrelationship(self, relationship): ssh_cmd = ['svctask', 'rmrcrelationship', relationship] self.run_ssh_assert_no_output(ssh_cmd) def switchrelationship(self, relationship, aux=True): primary = 'aux' if aux else 'master' ssh_cmd = ['svctask', 'switchrcrelationship', '-primary', primary, relationship] self.run_ssh_assert_no_output(ssh_cmd) def startrcrelationship(self, rc_rel, primary=None): ssh_cmd = ['svctask', 'startrcrelationship', '-force'] if primary: ssh_cmd.extend(['-primary', primary]) ssh_cmd.append(rc_rel) self.run_ssh_assert_no_output(ssh_cmd) def stoprcrelationship(self, relationship, access=False): ssh_cmd = ['svctask', 'stoprcrelationship'] if access: ssh_cmd.append('-access') ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def lsrcrelationship(self, volume_name): key_value = 'name=%s' % volume_name ssh_cmd = ['svcinfo', 'lsrcrelationship', '-filtervalue', key_value, '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lspartnership(self, system_name): key_value = 'name=%s' % system_name ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue', key_value, '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lspartnershipcandidate(self): ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def mkippartnership(self, ip_v4, bandwith): ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4', '-clusterip', ip_v4, '-linkbandwidthmbits', six.text_type(bandwith)] return self.run_ssh_assert_no_output(ssh_cmd) def mkfcpartnership(self, system_name, bandwith): ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits', six.text_type(bandwith), system_name] return self.run_ssh_assert_no_output(ssh_cmd) def startpartnership(self, partnership_id): ssh_cmd = ['svctask', 'chpartnership', '-start', partnership_id] return self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskhostmap(self, host, vdisk): ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskhostmap(self, vdisk): ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lshostvdiskmap(self, host): ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host] return self.run_ssh_info(ssh_cmd, with_header=True) def rmhost(self, host): ssh_cmd = ['svctask', 'rmhost', '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def mkvdisk(self, name, size, units, pool, opts, params): ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', '"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']), '-size', size, '-unit', units] + params return self.run_ssh_check_created(ssh_cmd) def rmvdisk(self, vdisk, force=True): ssh_cmd = ['svctask', 'rmvdisk'] if force: ssh_cmd += ['-force'] ssh_cmd += [vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdisk(self, vdisk): """Return vdisk attributes or None if it doesn't exist.""" ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk] out, err = self._ssh(ssh_cmd, check_exit_code=False) if not len(err): return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False)[0] if err.startswith('CMMVC5754E'): return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsvdisks_from_filter(self, filter_name, value): """Performs an lsvdisk command, filtering the results as specified. Returns an iterable for all matching vdisks. """ ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', '-filtervalue', '%s=%s' % (filter_name, value)] return self.run_ssh_info(ssh_cmd, with_header=True) def chvdisk(self, vdisk, params): ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk] self.run_ssh_assert_no_output(ssh_cmd) def movevdisk(self, vdisk, iogrp): ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk] self.run_ssh_assert_no_output(ssh_cmd) def expandvdisksize(self, vdisk, amount): ssh_cmd = ( ['svctask', 'expandvdisksize', '-size', six.text_type(amount), '-unit', 'gb', vdisk]) self.run_ssh_assert_no_output(ssh_cmd) def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None): ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target', target, '-autodelete'] if not full_copy: ssh_cmd.extend(['-copyrate', '0']) else: ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)]) if consistgrp: ssh_cmd.extend(['-consistgrp', consistgrp]) out, err = self._ssh(ssh_cmd, check_exit_code=False) if 'successfully created' not in out: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], ' 'successfully created', out) fc_map_id = match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return fc_map_id def prestartfcmap(self, fc_map_id): ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def startfcmap(self, fc_map_id): ssh_cmd = ['svctask', 'startfcmap', fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def prestartfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def startfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def stopfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def chfcmap(self, fc_map_id, copyrate='50', autodel='on'): ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate, '-autodelete', autodel, fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def stopfcmap(self, fc_map_id): ssh_cmd = ['svctask', 'stopfcmap', fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def rmfcmap(self, fc_map_id): ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskfcmappings(self, vdisk): ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcmap(self, fc_map_id): ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue', 'id=%s' % fc_map_id, '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcconsistgrp(self, fc_consistgrp): ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp] out, err = self._ssh(ssh_cmd) return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False) def mkfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group] return self.run_ssh_check_created(ssh_cmd) def rmfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group] return self.run_ssh_assert_no_output(ssh_cmd) def addvdiskcopy(self, vdisk, dest_pool, params): ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp', '"%s"' % dest_pool, vdisk]) return self.run_ssh_check_created(ssh_cmd) def lsvdiskcopy(self, vdisk, copy_id=None): ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!'] with_header = True if copy_id: ssh_cmd += ['-copy', copy_id] with_header = False ssh_cmd += [vdisk] return self.run_ssh_info(ssh_cmd, with_header=with_header) def lsvdisksyncprogress(self, vdisk, copy_id): ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!', '-copy', copy_id, vdisk] return self.run_ssh_info(ssh_cmd, with_header=True)[0] def rmvdiskcopy(self, vdisk, copy_id): ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk] self.run_ssh_assert_no_output(ssh_cmd) def addvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, vdisk] self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsportfc(self, node_id): ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!', '-filtervalue', 'node_id=%s' % node_id] return self.run_ssh_info(ssh_cmd, with_header=True) class StorwizeHelpers(object): # All the supported QoS key are saved in this dict. When a new # key is going to add, three values MUST be set: # 'default': to indicate the value, when the parameter is disabled. # 'param': to indicate the corresponding parameter in the command. # 'type': to indicate the type of this value. svc_qos_keys = {'IOThrottling': {'default': '0', 'param': 'rate', 'type': int}} def __init__(self, run_ssh): self.ssh = StorwizeSSH(run_ssh) self.check_fcmapping_interval = 3 @staticmethod def handle_keyerror(cmd, out): msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') % {'out': out, 'cmd': cmd}) raise exception.VolumeBackendAPIException(data=msg) def compression_enabled(self): """Return whether or not compression is enabled for this system.""" resp = self.ssh.lslicense() keys = ['license_compression_enclosures', 'license_compression_capacity'] for key in keys: if resp.get(key, '0') != '0': return True # lslicense is not used for V9000 compression check # compression_enclosures and compression_capacity are # always 0. V9000 uses license_scheme 9846 as an # indicator and can always do compression try: resp = self.ssh.lsguicapabilities() if resp.get('license_scheme', '0') == '9846': return True except exception.VolumeBackendAPIException as war: LOG.warning(_LW("Failed to run lsguicapability. " "Exception: %s."), war) return False def get_system_info(self): """Return system's name, ID, and code level.""" resp = self.ssh.lssystem() level = resp['code_level'] match_obj = re.search('([0-9].){3}[0-9]', level) if match_obj is None: msg = _('Failed to get code level (%s).') % level raise exception.VolumeBackendAPIException(data=msg) code_level = match_obj.group().split('.') return {'code_level': tuple([int(x) for x in code_level]), 'system_name': resp['name'], 'system_id': resp['id']} def get_pool_attrs(self, pool): """Return attributes for the specified pool.""" return self.ssh.lsmdiskgrp(pool) def get_available_io_groups(self): """Return list of available IO groups.""" iogrps = [] resp = self.ssh.lsiogrp() for iogrp in resp: try: if int(iogrp['node_count']) > 0: iogrps.append(int(iogrp['id'])) except KeyError: self.handle_keyerror('lsiogrp', iogrp) except ValueError: msg = (_('Expected integer for node_count, ' 'svcinfo lsiogrp returned: %(node)s.') % {'node': iogrp['node_count']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return iogrps def get_volume_io_group(self, vol_name): vdisk = self.ssh.lsvdisk(vol_name) if vdisk: resp = self.ssh.lsiogrp() for iogrp in resp: if iogrp['name'] == vdisk['IO_group_name']: return int(iogrp['id']) return None def get_node_info(self): """Return dictionary containing information on system's nodes.""" nodes = {} resp = self.ssh.lsnode() for node_data in resp: try: if node_data['status'] != 'online': continue node = {} node['id'] = node_data['id'] node['name'] = node_data['name'] node['IO_group'] = node_data['IO_group_id'] node['iscsi_name'] = node_data['iscsi_name'] node['WWNN'] = node_data['WWNN'] node['status'] = node_data['status'] node['WWPN'] = [] node['ipv4'] = [] node['ipv6'] = [] node['enabled_protocols'] = [] nodes[node['id']] = node except KeyError: self.handle_keyerror('lsnode', node_data) return nodes def add_iscsi_ip_addrs(self, storage_nodes): """Add iSCSI IP addresses to system node information.""" resp = self.ssh.lsportip() for ip_data in resp: try: state = ip_data['state'] if ip_data['node_id'] in storage_nodes and ( state == 'configured' or state == 'online'): node = storage_nodes[ip_data['node_id']] if len(ip_data['IP_address']): node['ipv4'].append(ip_data['IP_address']) if len(ip_data['IP_address_6']): node['ipv6'].append(ip_data['IP_address_6']) except KeyError: self.handle_keyerror('lsportip', ip_data) def add_fc_wwpns(self, storage_nodes): """Add FC WWPNs to system node information.""" for key in storage_nodes: node = storage_nodes[key] wwpns = set(node['WWPN']) resp = self.ssh.lsportfc(node_id=node['id']) for port_info in resp: if (port_info['type'] == 'fc' and port_info['status'] == 'active'): wwpns.add(port_info['WWPN']) node['WWPN'] = list(wwpns) LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'), {'node': node['id'], 'wwpn': node['WWPN']}) def add_chap_secret_to_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" chap_secret = utils.generate_password() self.ssh.add_chap_secret(chap_secret, host_name) return chap_secret def get_chap_secret_for_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" resp = self.ssh.lsiscsiauth() host_found = False for host_data in resp: try: if host_data['name'] == host_name: host_found = True if host_data['iscsi_auth_method'] == 'chap': return host_data['iscsi_chap_secret'] except KeyError: self.handle_keyerror('lsiscsiauth', host_data) if not host_found: msg = _('Failed to find host %s.') % host_name raise exception.VolumeBackendAPIException(data=msg) return None def get_conn_fc_wwpns(self, host): wwpns = set() resp = self.ssh.lsfabric(host=host) for wwpn in resp.select('local_wwpn'): if wwpn is not None: wwpns.add(wwpn) return list(wwpns) def get_host_from_connector(self, connector): """Return the Storwize host described by the connector.""" LOG.debug('Enter: get_host_from_connector: %s.', connector) # If we have FC information, we have a faster lookup option host_name = None if 'wwpns' in connector: for wwpn in connector['wwpns']: resp = self.ssh.lsfabric(wwpn=wwpn) for wwpn_info in resp: try: if (wwpn_info['remote_wwpn'] and wwpn_info['name'] and wwpn_info['remote_wwpn'].lower() == wwpn.lower()): host_name = wwpn_info['name'] except KeyError: self.handle_keyerror('lsfabric', wwpn_info) if host_name: LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name # That didn't work, so try exhaustive search hosts_info = self.ssh.lshost() found = False for name in hosts_info.select('name'): resp = self.ssh.lshost(host=name) if 'initiator' in connector: for iscsi in resp.select('iscsi_name'): if iscsi == connector['initiator']: host_name = name found = True break elif 'wwpns' in connector and len(connector['wwpns']): connector_wwpns = [str(x).lower() for x in connector['wwpns']] for wwpn in resp.select('WWPN'): if wwpn and wwpn.lower() in connector_wwpns: host_name = name found = True break if found: break LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name def create_host(self, connector): """Create a new host on the storage system. We create a host name and associate it with the given connection information. The host name will be a cleaned up version of the given host name (at most 55 characters), plus a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ LOG.debug('Enter: create_host: host %s.', connector['host']) # Before we start, make sure host name is a string and that we have at # least one port. host_name = connector['host'] if not isinstance(host_name, six.string_types): msg = _('create_host: Host name is not unicode or string.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) ports = [] if 'initiator' in connector: ports.append(['initiator', '%s' % connector['initiator']]) if 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append(['wwpn', '%s' % wwpn]) if not len(ports): msg = _('create_host: No initiators or wwpns supplied.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Build a host name for the Storwize host - first clean up the name if isinstance(host_name, six.text_type): host_name = unicodedata.normalize('NFKD', host_name).encode( 'ascii', 'replace').decode('ascii') for num in range(0, 128): ch = str(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: host_name = host_name.replace(ch, '-') # Storwize doesn't like hostname that doesn't starts with letter or _. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name # Add a random 8-character suffix to avoid collisions rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (host_name[:55], rand_id) # Create a host with one port port = ports.pop(0) self.ssh.mkhost(host_name, port[0], port[1]) # Add any additional ports to the host for port in ports: self.ssh.addhostport(host_name, port[0], port[1]) LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def delete_host(self, host_name): self.ssh.rmhost(host_name) def map_vol_to_host(self, volume_name, host_name, multihostmap): """Create a mapping between a volume to a host.""" LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) # Check if this volume is already mapped to this host mapped = False luns_used = [] result_lun = '-1' resp = self.ssh.lshostvdiskmap(host_name) for mapping_info in resp: luns_used.append(int(mapping_info['SCSI_id'])) if mapping_info['vdisk_name'] == volume_name: mapped = True result_lun = mapping_info['SCSI_id'] if not mapped: # Find unused lun luns_used.sort() result_lun = str(len(luns_used)) for index, n in enumerate(luns_used): if n > index: result_lun = str(index) break self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun, multihostmap) LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume ' '%(volume_name)s, host %(host_name)s.', {'result_lun': result_lun, 'volume_name': volume_name, 'host_name': host_name}) return int(result_lun) def unmap_vol_from_host(self, volume_name, host_name): """Unmap the volume and delete the host if it has no more mappings.""" LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) # Check if the mapping exists resp = self.ssh.lsvdiskhostmap(volume_name) if not len(resp): LOG.warning(_LW('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to any host found.'), {'vol_name': volume_name}) return if host_name is None: if len(resp) > 1: LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of ' 'volume %(vol_name)s found, no host ' 'specified.'), {'vol_name': volume_name}) return else: host_name = resp[0]['host_name'] else: found = False for h in resp.select('host_name'): if h == host_name: found = True if not found: LOG.warning(_LW('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to host %(host)s found.'), {'vol_name': volume_name, 'host': host_name}) # We now know that the mapping exists self.ssh.rmvdiskhostmap(host_name, volume_name) LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) return host_name def check_host_mapped_vols(self, host_name): return self.ssh.lshostvdiskmap(host_name) @staticmethod def build_default_opts(config): # Ignore capitalization cluster_partner = config.storwize_svc_stretched_cluster_partner opt = {'rsize': config.storwize_svc_vol_rsize, 'warning': config.storwize_svc_vol_warning, 'autoexpand': config.storwize_svc_vol_autoexpand, 'grainsize': config.storwize_svc_vol_grainsize, 'compression': config.storwize_svc_vol_compression, 'easytier': config.storwize_svc_vol_easytier, 'iogrp': config.storwize_svc_vol_iogrp, 'qos': None, 'stretched_cluster': cluster_partner, 'replication': False, 'nofmtdisk': config.storwize_svc_vol_nofmtdisk} return opt @staticmethod def check_vdisk_opts(state, opts): # Check that grainsize is 32/64/128/256 if opts['grainsize'] not in [32, 64, 128, 256]: raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_grainsize: set to either ' '32, 64, 128, or 256.')) # Check that compression is supported if opts['compression'] and not state['compression_enabled']: raise exception.InvalidInput( reason=_('System does not support compression.')) # Check that rsize is set if compression is set if opts['compression'] and opts['rsize'] == -1: raise exception.InvalidInput( reason=_('If compression is set to True, rsize must ' 'also be set (not equal to -1).')) if opts['iogrp'] not in state['available_iogrps']: avail_grps = ''.join(str(e) for e in state['available_iogrps']) raise exception.InvalidInput( reason=_('I/O group %(iogrp)d is not valid; available ' 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': avail_grps}) if opts['nofmtdisk'] and opts['rsize'] != -1: raise exception.InvalidInput( reason=_('If nofmtdisk is set to True, rsize must ' 'also be set to -1.')) def _get_opts_from_specs(self, opts, specs): qos = {} for k, value in specs.items(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # replication is a special case where the user asks for # a volume to be replicated, and we want both the scheduler and # the driver to act on the value. if ((not scope or scope == 'capabilities') and key == 'replication'): scope = None key = 'replication' words = value.split() if not (words and len(words) == 2 and words[0] == ''): LOG.error(_LE('Replication must be specified as ' '\' True\' or \' False\'.')) del words[0] value = words[0] # Add the QoS. if scope and scope == 'qos': if key in self.svc_qos_keys.keys(): try: type_fn = self.svc_qos_keys[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue # Any keys that the driver should look at should have the # 'drivers' scope. if scope and scope != 'drivers': continue if key in opts: this_type = type(opts[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) opts[key] = value if len(qos) != 0: opts['qos'] = qos return opts def _get_qos_from_volume_metadata(self, volume_metadata): """Return the QoS information from the volume metadata.""" qos = {} for i in volume_metadata: k = i.get('key', None) value = i.get('value', None) key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # Add the QoS. if scope and scope == 'qos': if key in self.svc_qos_keys.keys(): try: type_fn = self.svc_qos_keys[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue return qos def _wait_for_a_condition(self, testmethod, timeout=None, interval=INTERVAL_1_SEC): start_time = time.time() if timeout is None: timeout = DEFAULT_TIMEOUT def _inner(): try: testValue = testmethod() except Exception as ex: testValue = False LOG.debug('Helper.' '_wait_for_condition: %(method_name)s ' 'execution failed for %(exception)s.', {'method_name': testmethod.__name__, 'exception': ex.message}) if testValue: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = (_('CommandLineHelper._wait_for_condition: %s timeout.') % testmethod.__name__) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def get_vdisk_params(self, config, state, type_id, volume_type=None, volume_metadata=None): """Return the parameters for creating the vdisk. Takes volume type and defaults from config options into account. """ opts = self.build_default_opts(config) ctxt = context.get_admin_context() if volume_type is None and type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) if volume_type: qos_specs_id = volume_type.get('qos_specs_id') specs = dict(volume_type).get('extra_specs') # NOTE(vhou): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] # Merge the qos_specs into extra_specs and qos_specs has higher # priority than extra_specs if they have different values for # the same key. specs.update(kvs) opts = self._get_opts_from_specs(opts, specs) if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos and volume_metadata): qos = self._get_qos_from_volume_metadata(volume_metadata) if len(qos) != 0: opts['qos'] = qos self.check_vdisk_opts(state, opts) return opts @staticmethod def _get_vdisk_create_params(opts): easytier = 'on' if opts['easytier'] else 'off' if opts['rsize'] == -1: params = [] if opts['nofmtdisk']: params.append('-nofmtdisk') else: params = ['-rsize', '%s%%' % str(opts['rsize']), '-autoexpand', '-warning', '%s%%' % str(opts['warning'])] if not opts['autoexpand']: params.remove('-autoexpand') if opts['compression']: params.append('-compressed') else: params.extend(['-grainsize', str(opts['grainsize'])]) params.extend(['-easytier', easytier]) return params def create_vdisk(self, name, size, units, pool, opts): LOG.debug('Enter: create_vdisk: vdisk %s.', name) params = self._get_vdisk_create_params(opts) self.ssh.mkvdisk(name, size, units, pool, opts, params) LOG.debug('Leave: _create_vdisk: volume %s.', name) def get_vdisk_attributes(self, vdisk): attrs = self.ssh.lsvdisk(vdisk) return attrs def is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" attrs = self.get_vdisk_attributes(vdisk_name) return attrs is not None def find_vdisk_copy_id(self, vdisk, pool): resp = self.ssh.lsvdiskcopy(vdisk) for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'): if mdisk_grp == pool: return copy_id msg = _('Failed to find a vdisk copy in the expected pool.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) def get_vdisk_copy_attrs(self, vdisk, copy_id): return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0] def get_vdisk_copies(self, vdisk): copies = {'primary': None, 'secondary': None} resp = self.ssh.lsvdiskcopy(vdisk) for copy_id, status, sync, primary, mdisk_grp in ( resp.select('copy_id', 'status', 'sync', 'primary', 'mdisk_grp_name')): copy = {'copy_id': copy_id, 'status': status, 'sync': sync, 'primary': primary, 'mdisk_grp_name': mdisk_grp, 'sync_progress': None} if copy['sync'] != 'yes': progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id) copy['sync_progress'] = progress_info['progress'] if copy['primary'] == 'yes': copies['primary'] = copy else: copies['secondary'] = copy return copies def _prepare_fc_map(self, fc_map_id, timeout): self.ssh.prestartfcmap(fc_map_id) mapping_ready = False wait_time = 5 max_retries = (timeout // wait_time) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) if (mapping_attrs is None or 'status' not in mapping_attrs): break if mapping_attrs['status'] == 'prepared': mapping_ready = True break elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcmap(fc_map_id) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexecpted mapping status %(status)s for mapping ' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': fc_map_id, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) greenthread.sleep(wait_time) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the' 'allotted %(to)d seconds timeout. Terminating.') % {'id': fc_map_id, 'to': timeout}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def start_fc_consistgrp(self, fc_consistgrp): self.ssh.startfcconsistgrp(fc_consistgrp) def create_fc_consistgrp(self, fc_consistgrp): self.ssh.mkfcconsistgrp(fc_consistgrp) def delete_fc_consistgrp(self, fc_consistgrp): self.ssh.rmfcconsistgrp(fc_consistgrp) def stop_fc_consistgrp(self, fc_consistgrp): self.ssh.stopfcconsistgrp(fc_consistgrp) def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state, config, timeout): cgsnapshot = {'status': 'available'} try: for snapshot in snapshots: opts = self.get_vdisk_params(config, state, snapshot['volume_type_id']) self.create_flashcopy_to_consistgrp(snapshot['volume_name'], snapshot['name'], fc_consistgrp, config, opts) snapshot['status'] = 'available' self.prepare_fc_consistgrp(fc_consistgrp, timeout) self.start_fc_consistgrp(fc_consistgrp) # There is CG limitation that could not create more than 128 CGs. # After start CG, we delete CG to avoid CG limitation. # Cinder general will maintain the CG and snapshots relationship. self.delete_fc_consistgrp(fc_consistgrp) except exception.VolumeBackendAPIException as err: for snapshot in snapshots: snapshot['status'] = 'error' cgsnapshot['status'] = 'error' # Release cg self.delete_fc_consistgrp(fc_consistgrp) LOG.error(_LE("Failed to create CGSnapshot. " "Exception: %s."), err) return cgsnapshot, snapshots def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots): """Delete flashcopy maps and consistent group.""" cgsnapshot = {'status': 'available'} try: for snapshot in snapshots: self.ssh.rmvdisk(snapshot['name'], True) snapshot['status'] = 'deleted' except exception.VolumeBackendAPIException as err: for snapshot in snapshots: snapshot['status'] = 'error_deleting' cgsnapshot['status'] = 'error_deleting' LOG.error(_LE("Failed to delete the snapshot %(snap)s of " "CGSnapshot. Exception: %(exception)s."), {'snap': snapshot['name'], 'exception': err}) return cgsnapshot, snapshots def prepare_fc_consistgrp(self, fc_consistgrp, timeout): """Prepare FC Consistency Group.""" self.ssh.prestartfcconsistgrp(fc_consistgrp) def prepare_fc_consistgrp_success(): mapping_ready = False mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp) if (mapping_attrs is None or 'status' not in mapping_attrs): pass if mapping_attrs['status'] == 'prepared': mapping_ready = True elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcconsistgrp(fc_consistgrp) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexpected mapping status %(status)s for mapping' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': fc_consistgrp, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return mapping_ready self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout) def create_cg_from_source(self, group, fc_consistgrp, sources, targets, state, config, timeout): """Create consistence group from source""" LOG.debug('Enter: create_cg_from_source: cg %(cg)s' ' source %(source)s, target %(target)s', {'cg': fc_consistgrp, 'source': sources, 'target': targets}) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} ctxt = context.get_admin_context() try: for source, target in zip(sources, targets): opts = self.get_vdisk_params(config, state, source['volume_type_id']) pool = utils.extract_host(target['host'], 'pool') self.create_flashcopy_to_consistgrp(source['name'], target['name'], fc_consistgrp, config, opts, True, pool=pool) self.prepare_fc_consistgrp(fc_consistgrp, timeout) self.start_fc_consistgrp(fc_consistgrp) self.delete_fc_consistgrp(fc_consistgrp) volumes_model_update = self._get_volume_model_updates( ctxt, targets, group['id'], model_update['status']) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.ConsistencyGroupStatus.ERROR volumes_model_update = self._get_volume_model_updates( ctxt, targets, group['id'], model_update['status']) with excutils.save_and_reraise_exception(): # Release cg self.delete_fc_consistgrp(fc_consistgrp) LOG.error(_LE("Failed to create CG from CGsnapshot. " "Exception: %s"), err) return model_update, volumes_model_update LOG.debug('Leave: create_cg_from_source.') return model_update, volumes_model_update def _get_volume_model_updates(self, ctxt, volumes, cgId, status='available'): """Update the volume model's status and return it.""" volume_model_updates = [] LOG.info(_LI( "Updating status for CG: %(id)s."), {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume['id'], 'status': status}) else: LOG.info(_LI("No volume found for CG: %(cg)s."), {'cg': cgId}) return volume_model_updates def run_flashcopy(self, source, target, timeout, copy_rate, full_copy=True): """Create a FlashCopy mapping from the source to the target.""" LOG.debug('Enter: run_flashcopy: execute FlashCopy from source ' '%(source)s to target %(target)s.', {'source': source, 'target': target}) fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate) self._prepare_fc_map(fc_map_id, timeout) self.ssh.startfcmap(fc_map_id) LOG.debug('Leave: run_flashcopy: FlashCopy started from ' '%(source)s to %(target)s.', {'source': source, 'target': target}) def create_flashcopy_to_consistgrp(self, source, target, consistgrp, config, opts, full_copy=False, pool=None): """Create a FlashCopy mapping and add to consistent group.""" LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy' ' from source %(source)s to target %(target)s' 'Then add the flashcopy to %(cg)s.', {'source': source, 'target': target, 'cg': consistgrp}) src_attrs = self.get_vdisk_attributes(source) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s ' 'does not exist.') % {'src': source}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = src_attrs['mdisk_grp_name'] self.create_vdisk(target, src_size, 'b', pool, opts) self.ssh.mkfcmap(source, target, full_copy, config.storwize_svc_flashcopy_rate, consistgrp=consistgrp) LOG.debug('Leave: create_flashcopy_to_consistgrp: ' 'FlashCopy started from %(source)s to %(target)s.', {'source': source, 'target': target}) def _get_vdisk_fc_mappings(self, vdisk): """Return FlashCopy mappings that this vdisk is associated with.""" mapping_ids = [] resp = self.ssh.lsvdiskfcmappings(vdisk) for id in resp.select('id'): mapping_ids.append(id) return mapping_ids def _get_flashcopy_mapping_attributes(self, fc_map_id): resp = self.ssh.lsfcmap(fc_map_id) if not len(resp): return None return resp[0] def _get_flashcopy_consistgrp_attr(self, fc_map_id): resp = self.ssh.lsfcconsistgrp(fc_map_id) if not len(resp): return None return resp[0] def _check_vdisk_fc_mappings(self, name, allow_snaps=True): """FlashCopy mapping check helper.""" LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name) mapping_ids = self._get_vdisk_fc_mappings(name) wait_for_copy = False for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) if not attrs: continue source = attrs['source_vdisk_name'] target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] status = attrs['status'] if copy_rate == '0': if source == name: # Vdisk with snapshots. Return False if snapshot # not allowed. if not allow_snaps: raise loopingcall.LoopingCallDone(retvalue=False) self.ssh.chfcmap(map_id, copyrate='50', autodel='on') wait_for_copy = True else: # A snapshot if target != name: msg = (_('Vdisk %(name)s not involved in ' 'mapping %(src)s -> %(tgt)s.') % {'name': name, 'src': source, 'tgt': target}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if status in ['copying', 'prepared']: self.ssh.stopfcmap(map_id) # Need to wait for the fcmap to change to # stopped state before remove fcmap wait_for_copy = True elif status in ['stopping', 'preparing']: wait_for_copy = True else: self.ssh.rmfcmap(map_id) # Case 4: Copy in progress - wait and will autodelete else: if status == 'prepared': self.ssh.stopfcmap(map_id) self.ssh.rmfcmap(map_id) elif status == 'idle_or_copied': # Prepare failed self.ssh.rmfcmap(map_id) else: wait_for_copy = True if not wait_for_copy or not len(mapping_ids): raise loopingcall.LoopingCallDone(retvalue=True) def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True): """Ensure vdisk has no flashcopy mappings.""" timer = loopingcall.FixedIntervalLoopingCall( self._check_vdisk_fc_mappings, name, allow_snaps) # Create a timer greenthread. The default volume service heart # beat is every 10 seconds. The flashcopy usually takes hours # before it finishes. Don't set the sleep interval shorter # than the heartbeat. Otherwise volume service heartbeat # will not be serviced. LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.', name) ret = timer.start(interval=self.check_fcmapping_interval).wait() timer.stop() return ret def start_relationship(self, volume_name, primary=None): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.startrcrelationship(vol_attrs['RC_name'], primary) def stop_relationship(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=True) def create_relationship(self, master, aux, system, asyncmirror): name = 'rcrel' + ''.join(random.sample(string.digits, 10)) try: rc_id = self.ssh.mkrcrelationship(master, aux, system, name, asyncmirror) except exception.VolumeBackendAPIException as e: # CMMVC5959E is the code in Stowize storage, meaning that # there is a relationship that already has this name on the # master cluster. if 'CMMVC5959E' not in e: # If there is no relation between the primary and the # secondary back-end storage, the exception is raised. raise if rc_id: self.start_relationship(master) def delete_relationship(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.stoprcrelationship(vol_attrs['RC_name']) self.ssh.rmrcrelationship(vol_attrs['RC_name']) vol_attrs = self.get_vdisk_attributes(volume_name) def get_relationship_info(self, volume): vol_attrs = self.get_vdisk_attributes(volume['name']) if not vol_attrs or not vol_attrs['RC_name']: LOG.info(_LI("Unable to get remote copy information for " "volume %s"), volume['name']) return relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) return relationship[0] if len(relationship) > 0 else None def switch_relationship(self, relationship, aux=True): self.ssh.switchrelationship(relationship, aux) def get_partnership_info(self, system_name): partnership = self.ssh.lspartnership(system_name) return partnership[0] if len(partnership) > 0 else None def get_partnershipcandidate_info(self, system_name): candidates = self.ssh.lspartnershipcandidate() for candidate in candidates: if system_name == candidate['name']: return candidate return None def mkippartnership(self, ip_v4, bandwith=1000): self.ssh.mkippartnership(ip_v4, bandwith) def mkfcpartnership(self, system_name, bandwith=1000): self.ssh.mkfcpartnership(system_name, bandwith) def startpartnership(self, partnership_id): self.ssh.startpartnership(partnership_id) def delete_vdisk(self, vdisk, force): """Ensures that vdisk is not part of FC mapping and deletes it.""" LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk) if not self.is_vdisk_defined(vdisk): LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk) return self.ensure_vdisk_no_fc_mappings(vdisk) self.ssh.rmvdisk(vdisk, force=force) LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk) def create_copy(self, src, tgt, src_id, config, opts, full_copy, pool=None): """Create a new snapshot using FlashCopy.""" LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.', {'tgt': tgt, 'src': src}) src_attrs = self.get_vdisk_attributes(src) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' 'does not exist.') % {'src': src, 'src_id': src_id}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = src_attrs['mdisk_grp_name'] self.create_vdisk(tgt, src_size, 'b', pool, opts) timeout = config.storwize_svc_flashcopy_timeout try: self.run_flashcopy(src, tgt, timeout, config.storwize_svc_flashcopy_rate, full_copy=full_copy) except Exception: with excutils.save_and_reraise_exception(): self.delete_vdisk(tgt, True) LOG.debug('Leave: _create_copy: snapshot %(tgt)s from ' 'vdisk %(src)s.', {'tgt': tgt, 'src': src}) def extend_vdisk(self, vdisk, amount): self.ssh.expandvdisksize(vdisk, amount) def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config): """Add a vdisk copy in the given pool.""" resp = self.ssh.lsvdiskcopy(vdisk) if len(resp) > 1: msg = (_('add_vdisk_copy failed: A copy of volume %s exists. ' 'Adding another copy would exceed the limit of ' '2 copies.') % vdisk) raise exception.VolumeDriverException(message=msg) orig_copy_id = resp[0].get("copy_id", None) if orig_copy_id is None: msg = (_('add_vdisk_copy started without a vdisk copy in the ' 'expected pool.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if volume_type is None: opts = self.get_vdisk_params(config, state, None) else: opts = self.get_vdisk_params(config, state, volume_type['id'], volume_type=volume_type) params = self._get_vdisk_create_params(opts) new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params) return (orig_copy_id, new_copy_id) def is_vdisk_copy_synced(self, vdisk, copy_id): sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync'] if sync == 'yes': return True return False def rm_vdisk_copy(self, vdisk, copy_id): self.ssh.rmvdiskcopy(vdisk, copy_id) @staticmethod def can_migrate_to_host(host, state): if 'location_info' not in host['capabilities']: return None info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_pool) = info.split(':') except ValueError: return None if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']): return None return dest_pool def add_vdisk_qos(self, vdisk, qos): """Add the QoS configuration to the volume.""" for key, value in qos.items(): if key in self.svc_qos_keys.keys(): param = self.svc_qos_keys[key]['param'] self.ssh.chvdisk(vdisk, ['-' + param, str(value)]) def update_vdisk_qos(self, vdisk, qos): """Update all the QoS in terms of a key and value. svc_qos_keys saves all the supported QoS parameters. Going through this dict, we set the new values to all the parameters. If QoS is available in the QoS configuration, the value is taken from it; if not, the value will be set to default. """ for key, value in self.svc_qos_keys.items(): param = value['param'] if key in qos.keys(): # If the value is set in QoS, take the value from # the QoS configuration. v = qos[key] else: # If not, set the value to default. v = value['default'] self.ssh.chvdisk(vdisk, ['-' + param, str(v)]) def disable_vdisk_qos(self, vdisk, qos): """Disable the QoS.""" for key, value in qos.items(): if key in self.svc_qos_keys.keys(): param = self.svc_qos_keys[key]['param'] # Take the default value. value = self.svc_qos_keys[key]['default'] self.ssh.chvdisk(vdisk, ['-' + param, value]) def change_vdisk_options(self, vdisk, changes, opts, state): if 'warning' in opts: opts['warning'] = '%s%%' % str(opts['warning']) if 'easytier' in opts: opts['easytier'] = 'on' if opts['easytier'] else 'off' if 'autoexpand' in opts: opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off' for key in changes: self.ssh.chvdisk(vdisk, ['-' + key, opts[key]]) def change_vdisk_iogrp(self, vdisk, state, iogrp): if state['code_level'] < (6, 4, 0, 0): LOG.debug('Ignore change IO group as storage code level is ' '%(code_level)s, below the required 6.4.0.0.', {'code_level': state['code_level']}) else: self.ssh.movevdisk(vdisk, str(iogrp[0])) self.ssh.addvdiskaccess(vdisk, str(iogrp[0])) self.ssh.rmvdiskaccess(vdisk, str(iogrp[1])) def vdisk_by_uid(self, vdisk_uid): """Returns the properties of the vdisk with the specified UID. Returns None if no such disk exists. """ vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid) if len(vdisks) == 0: return None if len(vdisks) != 1: msg = (_('Expected single vdisk returned from lsvdisk when ' 'filtering on vdisk_UID. %(count)s were returned.') % {'count': len(vdisks)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vdisk = vdisks.result[0] return self.ssh.lsvdisk(vdisk['name']) def is_vdisk_in_use(self, vdisk): """Returns True if the specified vdisk is mapped to at least 1 host.""" resp = self.ssh.lsvdiskhostmap(vdisk) return len(resp) != 0 def rename_vdisk(self, vdisk, new_name): self.ssh.chvdisk(vdisk, ['-name', new_name]) def change_vdisk_primary_copy(self, vdisk, copy_id): self.ssh.chvdisk(vdisk, ['-primary', copy_id]) class CLIResponse(object): """Parse SVC CLI output and generate iterable.""" def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): super(CLIResponse, self).__init__() if ssh_cmd: self.ssh_cmd = ' '.join(ssh_cmd) else: self.ssh_cmd = 'None' self.raw = raw self.delim = delim self.with_header = with_header self.result = self._parse() def select(self, *keys): for a in self.result: vs = [] for k in keys: v = a.get(k, None) if isinstance(v, six.string_types) or v is None: v = [v] if isinstance(v, list): vs.append(v) for item in zip(*vs): if len(item) == 1: yield item[0] else: yield item def __getitem__(self, key): try: return self.result[key] except KeyError: msg = (_('Did not find the expected key %(key)s in %(fun)s: ' '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw}) raise exception.VolumeBackendAPIException(data=msg) def __iter__(self): for a in self.result: yield a def __len__(self): return len(self.result) def _parse(self): def get_reader(content, delim): for line in content.lstrip().splitlines(): line = line.strip() if line: yield line.split(delim) else: yield [] if isinstance(self.raw, six.string_types): stdout, stderr = self.raw, '' else: stdout, stderr = self.raw reader = get_reader(stdout, self.delim) result = [] if self.with_header: hds = tuple() for row in reader: hds = row break for row in reader: cur = dict() if len(hds) != len(row): msg = (_('Unexpected CLI response: header/row mismatch. ' 'header: %(header)s, row: %(row)s.') % {'header': hds, 'row': row}) raise exception.VolumeBackendAPIException(data=msg) for k, v in zip(hds, row): CLIResponse.append_dict(cur, k, v) result.append(cur) else: cur = dict() for row in reader: if row: CLIResponse.append_dict(cur, row[0], ' '.join(row[1:])) elif cur: # start new section result.append(cur) cur = dict() if cur: result.append(cur) return result @staticmethod def append_dict(dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ class StorwizeSVCCommonDriver(san.SanDriver, driver.ManageableVD, driver.ExtendVD, driver.SnapshotVD, driver.MigrateVD, driver.ReplicaVD, driver.ConsistencyGroupVD, driver.CloneableImageVD, driver.TransferVD): """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers. Version history: 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 """ VERSION = "2.1.1" VDISKCOPYOPS_INTERVAL = 600 GLOBAL = 'global' METRO = 'metro' VALID_REP_TYPES = (GLOBAL, METRO) FAILBACK_VALUE = 'default' def __init__(self, *args, **kwargs): super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(storwize_svc_opts) self._backend_name = self.configuration.safe_get('volume_backend_name') self._helpers = StorwizeHelpers(self._run_ssh) self._vdiskcopyops = {} self._vdiskcopyops_loop = None self.protocol = None self.replication = None self._state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None, } self._active_backend_id = kwargs.get('active_backend_id') # Since there are three replication modes supported by Storwize, # this dictionary is used to map the replication types to certain # replications. self.replications = {} # One driver can be configured with multiple replication targets # to failover. self._replication_targets = [] # This boolean is used to indicate whether this driver is configured # with replication. self._replication_enabled = False # This list is used to save the supported replication modes. self._supported_replication_types = [] # Storwize has the limitation that can not burst more than 3 new ssh # connections within 1 second. So slow down the initialization. time.sleep(1) def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') # Get storage system name, id, and code level self._state.update(self._helpers.get_system_info()) # Get the replication helpers self.replication = storwize_rep.StorwizeSVCReplication.factory(self) # Validate that the pool exists self._validate_pools_exist() # Check if compression is supported self._state['compression_enabled'] = (self._helpers. compression_enabled()) # Get the available I/O groups self._state['available_iogrps'] = (self._helpers. get_available_io_groups()) # Get the iSCSI and FC names of the Storwize/SVC nodes self._state['storage_nodes'] = self._helpers.get_node_info() # Add the iSCSI IP addresses and WWPNs to the storage node info self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes']) self._helpers.add_fc_wwpns(self._state['storage_nodes']) # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._state['storage_nodes'].items(): if ((len(node['ipv4']) or len(node['ipv6'])) and len(node['iscsi_name'])): node['enabled_protocols'].append('iSCSI') self._state['enabled_protocols'].add('iSCSI') if len(node['WWPN']): node['enabled_protocols'].append('FC') self._state['enabled_protocols'].add('FC') if not len(node['enabled_protocols']): to_delete.append(k) for delkey in to_delete: del self._state['storage_nodes'][delkey] # Build the list of in-progress vdisk copy operations if ctxt is None: admin_context = context.get_admin_context() else: admin_context = ctxt.elevated() volumes = self.db.volume_get_all_by_host(admin_context, self.host) for volume in volumes: metadata = self.db.volume_admin_metadata_get(admin_context, volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: ops = [tuple(x.split(':')) for x in curr_ops.split(';')] self._vdiskcopyops[volume['id']] = ops # if vdiskcopy exists in database, start the looping call if len(self._vdiskcopyops) >= 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) LOG.debug('leave: do_setup') # v2 replication setup self._do_replication_setup() def _validate_pools_exist(self): # Validate that the pool exists pools = self.configuration.storwize_svc_volpool_name for pool in pools: try: self._helpers.get_pool_attrs(pool) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s.') % pool raise exception.InvalidInput(reason=msg) def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._state['system_name'] is None: exception_msg = (_('Unable to determine system name.')) raise exception.VolumeBackendAPIException(data=exception_msg) if self._state['system_id'] is None: exception_msg = (_('Unable to determine system id.')) raise exception.VolumeBackendAPIException(data=exception_msg) # Make sure we have at least one node configured if not len(self._state['storage_nodes']): msg = _('do_setup: No configured nodes.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.protocol not in self._state['enabled_protocols']: # TODO(mc_nair): improve this error message by looking at # self._state['enabled_protocols'] to tell user what driver to use raise exception.InvalidInput( reason=_('The storage device does not support %(prot)s. ' 'Please configure the device to support %(prot)s or ' 'switch to a driver using a different protocol.') % {'prot': self.protocol}) required_flags = ['san_ip', 'san_ssh_port', 'san_login', 'storwize_svc_volpool_name'] for flag in required_flags: if not self.configuration.safe_get(flag): raise exception.InvalidInput(reason=_('%s is not set.') % flag) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Password or SSH private key is required for ' 'authentication: set either san_password or ' 'san_private_key option.')) opts = self._helpers.build_default_opts(self.configuration) self._helpers.check_vdisk_opts(self._state, opts) LOG.debug('leave: check_for_setup_error') def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): cinder_utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: try: self.sshpool = self._set_up_sshpool(self.configuration.san_ip) except paramiko.SSHException: LOG.warning(_LW('Unable to use san_ip to create SSHPool. Now ' 'attempting to use storwize_san_secondary_ip ' 'to create SSHPool.')) if self.configuration.storwize_san_secondary_ip is not None: self.sshpool = self._set_up_sshpool( self.configuration.storwize_san_secondary_ip) else: LOG.warning(_LW('Unable to create SSHPool using san_ip ' 'and not able to use ' 'storwize_san_secondary_ip since it is ' 'not configured.')) raise try: return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) except Exception: # Need to check if creating an SSHPool storwize_san_secondary_ip # before raising an error. if self.configuration.storwize_san_secondary_ip is not None: if (self.sshpool.ip == self.configuration.storwize_san_secondary_ip): LOG.warning(_LW("Unable to execute SSH command with " "storwize_san_secondary_ip. " "Attempting to switch IP back " "to san_ip %s."), self.configuration.san_ip) self.sshpool = self._set_up_sshpool( self.configuration.san_ip) return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) else: LOG.warning(_LW("Unable to execute SSH command. " "Attempting to switch IP to %s."), self.configuration.storwize_san_secondary_ip) self.sshpool = self._set_up_sshpool( self.configuration.storwize_san_secondary_ip) return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) else: LOG.warning(_LW('Unable to execute SSH command. ' 'Not able to use ' 'storwize_san_secondary_ip since it is ' 'not configured.')) with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def _set_up_sshpool(self, ip): password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn sshpool = ssh_utils.SSHPool( ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) return sshpool def _ssh_execute(self, sshpool, command, check_exit_code = True, attempts=1): try: with sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(_LE('Error has occurred: %s'), e) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def ensure_export(self, ctxt, volume): """Check that the volume exists on the storage. The system does not "export" volumes as a Linux iSCSI target does, and therefore we just check that the volume exists on the storage. """ volume_defined = self._helpers.is_vdisk_defined(volume['name']) if not volume_defined: LOG.error(_LE('ensure_export: Volume %s not found on storage.'), volume['name']) def create_export(self, ctxt, volume, connector): model_update = None return model_update def remove_export(self, ctxt, volume): pass def _get_vdisk_params(self, type_id, volume_type=None, volume_metadata=None): return self._helpers.get_vdisk_params(self.configuration, self._state, type_id, volume_type=volume_type, volume_metadata=volume_metadata) def create_volume(self, volume): opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) pool = utils.extract_host(volume['host'], 'pool') self._helpers.create_vdisk(volume['name'], str(volume['size']), 'gb', pool, opts) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) model_update = None ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) # The replication V2 has a higher priority than the replication V1. # Check if V2 is available first, then check if V1 is available. if rep_type: self.replications.get(rep_type).volume_replication_setup(ctxt, volume) model_update = {'replication_status': 'enabled'} elif opts.get('replication'): model_update = self.replication.create_replica(ctxt, volume) return model_update def delete_volume(self, volume): ctxt = context.get_admin_context() rep_mirror_type = self._get_volume_replicated_type_mirror(ctxt, volume) rep_status = volume.get("replication_status", None) if rep_mirror_type and rep_status != "failed-over": self.replications.get(rep_mirror_type).delete_target_volume( volume) self._helpers.delete_vdisk(volume['name'], False) if volume['id'] in self._vdiskcopyops: del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None def create_snapshot(self, snapshot): ctxt = context.get_admin_context() try: source_vol = self.db.volume_get(ctxt, snapshot['volume_id']) except Exception: msg = (_('create_snapshot: get source volume failed.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) pool = utils.extract_host(source_vol['host'], 'pool') opts = self._get_vdisk_params(source_vol['volume_type_id']) self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], snapshot['volume_id'], self.configuration, opts, False, pool=pool) def delete_snapshot(self, snapshot): self._helpers.delete_vdisk(snapshot['name'], False) def create_volume_from_snapshot(self, volume, snapshot): if volume['size'] != snapshot['volume_size']: msg = (_('create_volume_from_snapshot: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) pool = utils.extract_host(volume['host'], 'pool') self._helpers.create_copy(snapshot['name'], volume['name'], snapshot['id'], self.configuration, opts, True, pool=pool) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) # The replication V2 has a higher priority than the replication V1. # Check if V2 is available first, then check if V1 is available. if rep_type and self._replication_enabled: self.replications.get(rep_type).volume_replication_setup(ctxt, volume) return {'replication_status': 'enabled'} elif opts.get('replication'): replica_status = self.replication.create_replica(ctxt, volume) if replica_status: return replica_status def create_cloned_volume(self, tgt_volume, src_volume): """Creates a clone of the specified volume.""" if src_volume['size'] > tgt_volume['size']: msg = (_("create_cloned_volume: source volume %(src_vol)s " "size is %(src_size)dGB and doesn't fit in target " "volume %(tgt_vol)s of size %(tgt_size)dGB.") % {'src_vol': src_volume['name'], 'src_size': src_volume['size'], 'tgt_vol': tgt_volume['name'], 'tgt_size': tgt_volume['size']}) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params(tgt_volume['volume_type_id'], volume_metadata= tgt_volume.get('volume_metadata')) pool = utils.extract_host(tgt_volume['host'], 'pool') self._helpers.create_copy(src_volume['name'], tgt_volume['name'], src_volume['id'], self.configuration, opts, True, pool=pool) # The source volume size is equal to target volume size # in most of the cases. But in some scenario, the target # volume size may be bigger than the source volume size. # SVC does not support flashcopy between two volumes # with two different size. So use source volume size to # create target volume first and then extend target # volume to orginal size. if tgt_volume['size'] > src_volume['size']: # extend the new created target volume to expected size. self._extend_volume_op(tgt_volume, tgt_volume['size'], src_volume['size']) if opts['qos']: self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos']) ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, tgt_volume) # The replication V2 has a higher priority than the replication V1. # Check if V2 is available first, then check if V1 is available. if rep_type and self._replication_enabled: self.replications.get(rep_type).volume_replication_setup( ctxt, tgt_volume) return {'replication_status': 'enabled'} elif opts.get('replication'): replica_status = self.replication.create_replica(ctxt, tgt_volume) if replica_status: return replica_status def extend_volume(self, volume, new_size): self._extend_volume_op(volume, new_size) def _extend_volume_op(self, volume, new_size, old_size=None): LOG.debug('enter: _extend_volume_op: volume %s', volume['id']) ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'], allow_snaps=False) if not ret: msg = (_('_extend_volume_op: Extending a volume with snapshots is ' 'not supported.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if old_size is None: old_size = volume['size'] extend_amt = int(new_size) - old_size ctxt = context.get_admin_context() rep_mirror_type = self._get_volume_replicated_type_mirror(ctxt, volume) rep_status = volume.get("replication_status", None) target_vol_name = None if rep_mirror_type and rep_status != "failed-over": try: rel_info = self._helpers.get_relationship_info(volume) self._helpers.delete_relationship(volume) except Exception as e: msg = (_('Failed to get remote copy information for ' '%(volume)s. Exception: %(err)s.'), {'volume': volume['id'], 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if rel_info: target_vol_name = rel_info.get('aux_vdisk_name') self.replications.get(rep_mirror_type).extend_target_volume( target_vol_name, extend_amt) self._helpers.extend_vdisk(volume['name'], extend_amt) if rep_mirror_type and rep_status != "failed-over": self.replications.get(rep_mirror_type).create_relationship( volume, target_vol_name) LOG.debug('leave: _extend_volume_op: volume %s', volume['id']) def add_vdisk_copy(self, volume, dest_pool, vol_type): return self._helpers.add_vdisk_copy(volume, dest_pool, vol_type, self._state, self.configuration) def _add_vdisk_copy_op(self, ctxt, volume, new_op): metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] new_ops_list = curr_ops_list.append(new_op) else: new_ops_list = [new_op] new_ops_str = ';'.join([':'.join(x) for x in new_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) if volume['id'] in self._vdiskcopyops: self._vdiskcopyops[volume['id']].append(new_op) else: self._vdiskcopyops[volume['id']] = [new_op] # We added the first copy operation, so start the looping call if len(self._vdiskcopyops) == 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id): try: self._vdiskcopyops[volume['id']].remove((orig_copy_id, new_copy_id)) if not len(self._vdiskcopyops[volume['id']]): del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None except KeyError: LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any ' 'registered vdisk copy operations.'), volume['id']) return except ValueError: LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have ' 'the specified vdisk copy operation: orig=%(orig)s ' 'new=%(new)s.'), {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if not curr_ops: LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not ' 'have any registered vdisk copy operations.'), volume['id']) return curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] try: curr_ops_list.remove((orig_copy_id, new_copy_id)) except ValueError: LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does ' 'not have the specified vdisk copy operation: ' 'orig=%(orig)s new=%(new)s.'), {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return if len(curr_ops_list): new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) else: self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'], 'vdiskcopyops') def promote_replica(self, ctxt, volume): return self.replication.promote_replica(volume) def reenable_replication(self, ctxt, volume): return self.replication.reenable_replication(volume) def create_replica_test_volume(self, tgt_volume, src_volume): if src_volume['size'] != tgt_volume['size']: msg = (_('create_cloned_volume: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.InvalidInput(message=msg) replica_status = self.replication.test_replica(tgt_volume, src_volume) return replica_status def get_replication_status(self, ctxt, volume): replica_status = None if self.replication: replica_status = self.replication.get_replication_status(volume) return replica_status def _check_volume_copy_ops(self): LOG.debug("Enter: update volume copy status.") ctxt = context.get_admin_context() copy_items = list(self._vdiskcopyops.items()) for vol_id, copy_ops in copy_items: try: volume = self.db.volume_get(ctxt, vol_id) except Exception: LOG.warning(_LW('Volume %s does not exist.'), vol_id) del self._vdiskcopyops[vol_id] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None continue for copy_op in copy_ops: try: synced = self._helpers.is_vdisk_copy_synced(volume['name'], copy_op[1]) except Exception: LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does ' 'not have the specified vdisk copy ' 'operation: orig=%(orig)s new=%(new)s.'), {'vol': volume['id'], 'orig': copy_op[0], 'new': copy_op[1]}) else: if synced: self._helpers.rm_vdisk_copy(volume['name'], copy_op[0]) self._rm_vdisk_copy_op(ctxt, volume, copy_op[0], copy_op[1]) LOG.debug("Exit: update volume copy status.") # #### V2.1 replication methods #### # def failover_host(self, context, volumes, secondary_id=None): """Force failover to a secondary replication target.""" self._validate_replication_enabled() if self.FAILBACK_VALUE == secondary_id: # In this case the administrator would like to fail back. volume_update_list = self._replication_failback(context, volumes) return None, volume_update_list # In this case the administrator would like to fail over. failover_target = None for target in self._replication_targets: if target['backend_id'] == secondary_id: failover_target = target break if not failover_target: msg = _("A valid secondary target MUST be specified in order " "to failover.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) target_id = failover_target['backend_id'] volume_update_list = [] for volume in volumes: rep_type = self._get_volume_replicated_type(context, volume) if rep_type: replication = self.replications.get(rep_type) if replication.target.get('backend_id') == target_id: # Check if the target backend matches the replication type. # If so, fail over the volume. try: replication.failover_volume_host(context, volume, target_id) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'failed-over'}}) except exception.VolumeDriverException: msg = (_LE('Unable to failover to the secondary. ' 'Please make sure that the secondary ' 'back-end is ready.')) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) else: # If the volume is not of replicated type, we need to # force the status into error state so a user knows they # do not have access to the volume. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'error'}}) return target_id, volume_update_list def _is_host_ready_for_failback(self, ctxt, volumes): valid_sync_status = ('consistent_synchronized', 'consistent_stopped', 'synchronized', 'idling') # Check the status of each volume to see if it is in # a consistent status. for volume in volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: replication = self.replications.get(rep_type) if replication: status = replication.get_relationship_status(volume) # We need to make sure of that all the volumes are # in the valid status to trigger a successful # fail-back. False will be be returned even if only # one volume is not ready. if status not in valid_sync_status: return False else: return False else: return False return True def _replication_failback(self, ctxt, volumes): """Fail back all the volume on the secondary backend.""" if not self._is_host_ready_for_failback(ctxt, volumes): msg = _("The host is not ready to be failed back. Please " "resynchronize the volumes and resume replication on the " "Storwize backends.") LOG.error(msg) raise exception.VolumeDriverException(data=msg) volume_update_list = [] for volume in volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: replication = self.replications.get(rep_type) replication.replication_failback(volume) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'enabled'}}) else: volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'available'}}) return volume_update_list def _validate_replication_enabled(self): if not self._replication_enabled: msg = _("Issuing a fail-over failed because replication is " "not properly configured.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _validate_volume_rep_type(self, ctxt, volume): rep_type = self._get_volume_replicated_type(ctxt, volume) if not rep_type: msg = (_("Volume %s is not of replicated type. " "This volume needs to be of a volume type " "with the extra spec replication_enabled set " "to ' True' to support replication " "actions."), volume['id']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not self._replication_enabled: msg = _("The back-end where the volume is created " "does not have replication enabled.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return rep_type def _get_volume_replicated_type_mirror(self, ctxt, volume): rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type in self.VALID_REP_TYPES: return rep_type else: return None def _get_specs_replicated_type(self, volume_type): replication_type = None extra_specs = volume_type.get("extra_specs", {}) rep_val = extra_specs.get('replication_enabled') if rep_val == " True": replication_type = extra_specs.get('replication_type', self.GLOBAL) # The format for replication_type in extra spec is in # " global". Otherwise, the code will # not reach here. if replication_type != self.GLOBAL: # Pick up the replication type specified in the # extra spec from the format like " global". replication_type = replication_type.split()[1] if replication_type not in self.VALID_REP_TYPES: replication_type = None return replication_type def _get_volume_replicated_type(self, ctxt, volume): replication_type = None if volume.get("volume_type_id"): volume_type = volume_types.get_volume_type( ctxt, volume["volume_type_id"]) replication_type = self._get_specs_replicated_type(volume_type) return replication_type def _do_replication_setup(self): replication_devices = self.configuration.replication_device if replication_devices: replication_targets = [] for dev in replication_devices: remote_array = {} remote_array['managed_backend_name'] = ( dev.get('managed_backend_name')) if not remote_array['managed_backend_name']: raise exception.InvalidConfigurationValue( option='managed_backend_name', value=remote_array['managed_backend_name']) rep_mode = dev.get('replication_mode') remote_array['replication_mode'] = rep_mode remote_array['san_ip'] = ( dev.get('san_ip')) remote_array['backend_id'] = ( dev.get('backend_id')) remote_array['san_login'] = ( dev.get('san_login')) remote_array['san_password'] = ( dev.get('san_password')) remote_array['pool_name'] = ( dev.get('pool_name')) replication_targets.append(remote_array) # Each replication type will have a coresponding replication. self.create_replication_types(replication_targets) if len(self._supported_replication_types) > 0: self._replication_enabled = True def create_replication_types(self, replication_targets): for target in replication_targets: rep_type = target['replication_mode'] if (rep_type in self.VALID_REP_TYPES and rep_type not in self.replications.keys()): replication = self.replication_factory(rep_type, target) try: replication.establish_target_partnership() except exception.VolumeDriverException: msg = (_LE('The replication mode of %(type)s has not ' 'successfully established partnership ' 'with the replica Storwize target %(stor)s.'), {'type': rep_type, 'stor': target['backend_id']}) LOG.error(msg) continue self.replications[rep_type] = replication self._replication_targets.append(target) self._supported_replication_types.append(rep_type) def replication_factory(self, replication_type, rep_target): """Use replication methods for the requested mode.""" if replication_type == self.GLOBAL: return storwize_rep.StorwizeSVCReplicationGlobalMirror( self, rep_target, StorwizeHelpers) if replication_type == self.METRO: return storwize_rep.StorwizeSVCReplicationMetroMirror( self, rep_target, StorwizeHelpers) def migrate_volume(self, ctxt, volume, host): """Migrate directly if source and dest are managed by same storage. We create a new vdisk copy in the desired pool, and add the original vdisk copy to the admin_metadata of the volume to be deleted. The deletion will occur using a periodic task once the new copy is synced. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host['host']}) false_ret = (False, None) dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return false_ret ctxt = context.get_admin_context() volume_type_id = volume['volume_type_id'] if volume_type_id is not None: vol_type = volume_types.get_volume_type(ctxt, volume_type_id) else: vol_type = None self._check_volume_copy_ops() new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type) self._add_vdisk_copy_op(ctxt, volume, new_op) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host['host']}) return (True, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ def retype_iogrp_property(volume, new, old): if new != old: self._helpers.change_vdisk_iogrp(volume['name'], self._state, (new, old)) LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) no_copy_keys = ['warning', 'autoexpand', 'easytier'] copy_keys = ['rsize', 'grainsize', 'compression'] all_keys = no_copy_keys + copy_keys old_opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_matadata')) new_opts = self._get_vdisk_params(new_type['id'], volume_type=new_type) # Check if retype affects volume replication model_update = None old_type_replication = old_opts.get('replication', False) new_type_replication = new_opts.get('replication', False) # Delete replica if needed if old_type_replication and not new_type_replication: self.replication.delete_replica(volume) model_update = {'replication_status': 'disabled', 'replication_driver_data': None, 'replication_extended_status': None} vdisk_changes = [] need_copy = False for key in all_keys: if old_opts[key] != new_opts[key]: if key in copy_keys: need_copy = True break elif key in no_copy_keys: vdisk_changes.append(key) if (utils.extract_host(volume['host'], 'pool') != utils.extract_host(host['host'], 'pool')): need_copy = True if need_copy: self._check_volume_copy_ops() dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return False # If volume is replicated, can't copy if new_type_replication: msg = (_('Unable to retype: Current action needs volume-copy,' ' it is not allowed when new type is replication.' ' Volume = %s'), volume['id']) raise exception.VolumeDriverException(message=msg) retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp']) try: new_op = self.add_vdisk_copy(volume['name'], dest_pool, new_type) self._add_vdisk_copy_op(ctxt, volume, new_op) except exception.VolumeDriverException: # roll back changing iogrp property retype_iogrp_property(volume, old_opts['iogrp'], new_opts['iogrp']) msg = (_('Unable to retype: A copy of volume %s exists. ' 'Retyping would exceed the limit of 2 copies.'), volume['id']) raise exception.VolumeDriverException(message=msg) else: retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp']) self._helpers.change_vdisk_options(volume['name'], vdisk_changes, new_opts, self._state) if new_opts['qos']: # Add the new QoS setting to the volume. If the volume has an # old QoS setting, it will be overwritten. self._helpers.update_vdisk_qos(volume['name'], new_opts['qos']) elif old_opts['qos']: # If the old_opts contain QoS keys, disable them. self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos']) # Add replica if needed if not old_type_replication and new_type_replication: model_update = self.replication.create_replica(ctxt, volume, new_type) LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host['host']}) return True, model_update def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from Storwize for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ current_name = CONF.volume_name_template % new_volume['id'] original_volume_name = CONF.volume_name_template % volume['id'] try: self._helpers.rename_vdisk(current_name, original_volume_name) except exception.VolumeBackendAPIException: LOG.error(_LE('Unable to rename the logical volume ' 'for volume: %s'), volume['id']) return {'_name_id': new_volume['_name_id'] or new_volume['id']} # If the back-end name(id) for the volume has been renamed, # it is OK for the volume to keep the original name(id) and there is # no need to use the column "_name_id" to establish the mapping # relationship between the volume id and the back-end volume # name(id). # Set the key "_name_id" to None for a successful rename. model_update = {'_name_id': None} return model_update def manage_existing(self, volume, ref): """Manages an existing vdisk. Renames the vdisk to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated - if we got here then we have a vdisk that isn't in use (or we don't care if it is in use. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name']) if vdisk_io_grp not in self._state['available_iogrps']: msg = (_("Failed to manage existing volume due to " "the volume to be managed is not in a valid " "I/O group.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume['volume_type_id']: opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0') if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thin, but " "the volume type chosen is thick.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if not vdisk_copy['autoexpand'] and opts['rsize'] != -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thick, but " "the volume type chosen is thin.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'no' and opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is not compress, but " "the volume type chosen is compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'yes' and not opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is compress, but " "the volume type chosen is not compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if vdisk_io_grp != opts['iogrp']: msg = (_("Failed to manage existing volume due to " "I/O group mismatch. The I/O group of the " "volume to be managed is %(vdisk_iogrp)s. I/O group" "of the chosen type is %(opt_iogrp)s.") % {'vdisk_iogrp': vdisk['IO_group_name'], 'opt_iogrp': opts['iogrp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) pool = utils.extract_host(volume['host'], 'pool') if vdisk['mdisk_grp_name'] != pool: msg = (_("Failed to manage existing volume due to the " "pool of the volume to be managed does not " "match the backend pool. Pool of the " "volume to be managed is %(vdisk_pool)s. Pool " "of the backend is %(backend_pool)s.") % {'vdisk_pool': vdisk['mdisk_grp_name'], 'backend_pool': self.configuration.storwize_svc_volpool_name}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) self._helpers.rename_vdisk(vdisk['name'], volume['name']) def manage_existing_get_size(self, volume, ref): """Return size of an existing Vdisk for manage_existing. existing_ref is a dictionary of the form: {'source-id': } or {'source-name': } Optional elements are: 'manage_if_in_use': True/False (default is False) If set to True, a volume will be managed even if it is currently attached to a host system. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) # Check if the disk is in use, if we need to. manage_if_in_use = ref.get('manage_if_in_use', False) if (not manage_if_in_use and self._helpers.is_vdisk_in_use(vdisk['name'])): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return int(math.ceil(float(vdisk['capacity']) / units.Gi)) def unmanage(self, volume): """Remove the specified volume from Cinder management.""" pass def get_volume_stats(self, refresh=False): """Get volume stats. If we haven't gotten stats yet or 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def create_consistencygroup(self, context, group): """Create a consistency group. IBM Storwize will create CG until cg-snapshot creation, db will maintain the volumes and CG relationship. """ LOG.debug("Creating consistency group.") model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. IBM Storwize will delete the volumes of the CG. """ LOG.debug("Deleting consistency group.") model_update = {} model_update['status'] = fields.ConsistencyGroupStatus.DELETED volumes = self.db.volume_get_all_by_group(context, group['id']) for volume in volumes: try: self._helpers.delete_vdisk(volume['name'], True) volume['status'] = 'deleted' except exception.VolumeBackendAPIException as err: volume['status'] = 'error_deleting' if model_update['status'] != 'error_deleting': model_update['status'] = 'error_deleting' LOG.error(_LE("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s."), {'vol': volume['name'], 'exception': err}) return model_update, volumes def update_consistencygroup(self, ctxt, group, add_volumes, remove_volumes): """Adds or removes volume(s) to/from an existing consistency group.""" LOG.debug("Updating consistency group.") return None, None, None def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :return model_update, volumes_model_update """ LOG.debug('Enter: create_consistencygroup_from_src.') if cgsnapshot and snapshots: cg_name = 'cg-' + cgsnapshot.id sources = snapshots elif source_cg and source_vols: cg_name = 'cg-' + source_cg.id sources = source_vols else: error_msg = _("create_consistencygroup_from_src must be " "creating from a CG snapshot, or a source CG.") raise exception.InvalidInput(reason=error_msg) LOG.debug('create_consistencygroup_from_src: cg_name %(cg_name)s' ' %(sources)s', {'cg_name': cg_name, 'sources': sources}) self._helpers.create_fc_consistgrp(cg_name) timeout = self.configuration.storwize_svc_flashcopy_timeout model_update, snapshots_model = ( self._helpers.create_cg_from_source(group, cg_name, sources, volumes, self._state, self.configuration, timeout)) LOG.debug("Leave: create_consistencygroup_from_src.") return model_update, snapshots_model def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): """Creates a cgsnapshot.""" # Use cgsnapshot id as cg name cg_name = 'cg_snap-' + cgsnapshot['id'] # Create new cg as cg_snapshot self._helpers.create_fc_consistgrp(cg_name) snapshots = self.db.snapshot_get_all_for_cgsnapshot( ctxt, cgsnapshot['id']) timeout = self.configuration.storwize_svc_flashcopy_timeout model_update, snapshots_model = ( self._helpers.run_consistgrp_snapshots(cg_name, snapshots, self._state, self.configuration, timeout)) return model_update, snapshots_model def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" cgsnapshot_id = cgsnapshot['id'] cg_name = 'cg_snap-' + cgsnapshot_id snapshots = self.db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) model_update, snapshots_model = ( self._helpers.delete_consistgrp_snapshots(cg_name, snapshots)) return model_update, snapshots_model def get_pool(self, volume): attr = self._helpers.get_vdisk_attributes(volume['name']) if attr is None: msg = (_('get_pool: Failed to get attributes for volume ' '%s') % volume['name']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return attr['mdisk_grp_name'] def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = {} data['vendor_name'] = 'IBM' data['driver_version'] = self.VERSION data['storage_protocol'] = self.protocol data['pools'] = [] data['multiattach'] = (self.configuration. storwize_svc_multihostmap_enabled) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or self._state['system_name']) data['pools'] = [self._build_pool_stats(pool) for pool in self.configuration.storwize_svc_volpool_name] data['replication'] = self._replication_enabled data['replication_enabled'] = self._replication_enabled data['replication_targets'] = self._get_replication_targets(), self._stats = data def _build_pool_stats(self, pool): """Build pool status""" QoS_support = True pool_stats = {} try: pool_data = self._helpers.get_pool_attrs(pool) if pool_data: easy_tier = pool_data['easy_tier'] in ['on', 'auto'] total_capacity_gb = float(pool_data['capacity']) / units.Gi free_capacity_gb = float(pool_data['free_capacity']) / units.Gi allocated_capacity_gb = (float(pool_data['used_capacity']) / units.Gi) location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._state['system_id'], 'pool': pool_data['name']}) pool_stats = { 'pool_name': pool_data['name'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'allocated_capacity_gb': allocated_capacity_gb, 'compression_support': self._state['compression_enabled'], 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': QoS_support, 'consistencygroup_support': True, 'location_info': location_info, 'easytier_support': easy_tier } if self._replication_enabled: pool_stats.update({ 'replication_enabled': self._replication_enabled, 'replication_type': self._supported_replication_types, 'replication_targets': self._get_replication_targets(), 'replication_count': len(self._replication_targets) }) elif self.replication: pool_stats.update(self.replication.get_replication_info()) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s.') % pool raise exception.VolumeBackendAPIException(data=msg) return pool_stats def _get_replication_targets(self): return [target['backend_id'] for target in self._replication_targets] def _manage_input_check(self, ref): """Verify the input of manage function.""" # Check that the reference is valid if 'source-name' in ref: manage_source = ref['source-name'] vdisk = self._helpers.get_vdisk_attributes(manage_source) elif 'source-id' in ref: manage_source = ref['source-id'] vdisk = self._helpers.vdisk_by_uid(manage_source) else: reason = _('Reference must contain source-id or ' 'source-name element.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) if vdisk is None: reason = (_('No vdisk with the UID specified by ref %s.') % manage_source) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return vdisk cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/replication.py0000664000567000056710000004375512701406250026563 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random import uuid from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI from cinder import ssh_utils from cinder import utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) class StorwizeSVCReplication(object): def __init__(self, driver): self.driver = driver @staticmethod def factory(driver): """Use replication methods for the requested mode.""" stretch = driver.configuration.storwize_svc_stretched_cluster_partner if stretch: return StorwizeSVCReplicationStretchedCluster(driver) def create_replica(self, ctxt, volume): return (None, None) def is_replicated(self, volume): return False def promote_replica(self, volume): pass def test_replica(self, tgt_volume, src_volume): pass def get_replication_status(self, volume): return None def get_replication_info(self): return {} def reenable_replication(self, volume): """Enable the replication between the primary and secondary volumes. This is not implemented in the StorwizeSVCReplicationStretchedCluster, as the Storwize backend is responsible for automatically resuming mirroring when stopped. """ pass class StorwizeSVCReplicationStretchedCluster(StorwizeSVCReplication): """Support for Storwize/SVC stretched cluster mode replication. This stretched cluster mode implements volume replication in terms of adding a copy to an existing volume, which changes a nonmirrored volume into a mirrored volume. """ def __init__(self, driver, replication_target=None): super(StorwizeSVCReplicationStretchedCluster, self).__init__(driver) self.target = replication_target or {} def create_replica(self, ctxt, volume, vol_type = None): # if vol_type is None, use the source volume type if vol_type is None: vol_type = volume['volume_type_id'] vol_type = volume_types.get_volume_type(ctxt, vol_type) conf = self.driver.configuration dest_pool = conf.storwize_svc_stretched_cluster_partner self.driver.add_vdisk_copy(volume['name'], dest_pool, vol_type) vol_update = {'replication_status': 'copying'} return vol_update def delete_replica(self, volume): vdisk = volume['name'] copies = self.driver._helpers.get_vdisk_copies(vdisk) secondary = copies['secondary'] if secondary: self.driver._helpers.rm_vdisk_copy(volume['name'], secondary['copy_id']) else: LOG.info(_LI('Could not find replica to delete of' ' volume %(vol)s.'), {'vol': vdisk}) def test_replica(self, tgt_volume, src_volume): vdisk = src_volume['name'] opts = self.driver._get_vdisk_params(tgt_volume['volume_type_id']) copies = self.driver._helpers.get_vdisk_copies(vdisk) if copies['secondary']: dest_pool = copies['secondary']['mdisk_grp_name'] self.driver._helpers.create_copy(src_volume['name'], tgt_volume['name'], src_volume['id'], self.driver.configuration, opts, True, pool=dest_pool) else: msg = (_('Unable to create replica clone for volume %s.'), vdisk) raise exception.VolumeDriverException(message=msg) def promote_replica(self, volume): vdisk = volume['name'] copies = self.driver._helpers.get_vdisk_copies(vdisk) if copies['secondary']: copy_id = copies['secondary']['copy_id'] self.driver._helpers.change_vdisk_primary_copy(volume['name'], copy_id) else: msg = (_('Unable to promote replica to primary for volume %s.' ' No secondary copy available.'), volume['id']) raise exception.VolumeDriverException(message=msg) def get_replication_status(self, volume): # Make sure volume is replicated, otherwise ignore if volume['replication_status'] == 'disabled': return None vdisk = volume['name'] orig = (volume['replication_status'], volume['replication_extended_status']) copies = self.driver._helpers.get_vdisk_copies(vdisk) primary = copies.get('primary', None) secondary = copies.get('secondary', None) # Check status of primary copy, set status 'error' as default status = 'error' if not primary: primary = {'status': 'not found', 'sync': 'no'} else: if primary['status'] == 'online': status = 'active' extended1 = (_('Primary copy status: %(status)s' ' and synchronized: %(sync)s.') % {'status': primary['status'], 'sync': primary['sync']}) # Check status of secondary copy if not secondary: secondary = {'status': 'not found', 'sync': 'no', 'sync_progress': '0'} status = 'error' else: if secondary['status'] != 'online': status = 'error' else: if secondary['sync'] == 'yes': secondary['sync_progress'] = '100' # Only change the status if not in error state if status != 'error': status = 'active' else: # Primary offline, secondary online, data consistent, # stop copying status = 'active-stop' else: # Primary and secondary both online, the status is copying if status != 'error': status = 'copying' extended2 = (_('Secondary copy status: %(status)s' ' and synchronized: %(sync)s,' ' sync progress is: %(progress)s%%.') % {'status': secondary['status'], 'sync': secondary['sync'], 'progress': secondary['sync_progress']}) extended = '%s. %s' % (extended1, extended2) if (status, extended) != orig: return {'replication_status': status, 'replication_extended_status': extended} else: return None def get_replication_info(self): data = {} data['replication'] = True return data class StorwizeSVCReplicationGlobalMirror( StorwizeSVCReplicationStretchedCluster): """Support for Storwize/SVC global mirror mode replication. Global Mirror establishes a Global Mirror relationship between two volumes of equal size. The volumes in a Global Mirror relationship are referred to as the master (source) volume and the auxiliary (target) volume. This mode is dedicated to the asynchronous volume replication. """ asyncmirror = True UUID_LEN = 36 def __init__(self, driver, replication_target=None, target_helpers=None): super(StorwizeSVCReplicationGlobalMirror, self).__init__( driver, replication_target) self.sshpool = None self.target_helpers = target_helpers(self._run_ssh) def _partnership_validate_create(self, client, remote_name, remote_ip): try: partnership_info = client.get_partnership_info( remote_name) if not partnership_info: candidate_info = client.get_partnershipcandidate_info( remote_name) if not candidate_info: client.mkippartnership(remote_ip) else: client.mkfcpartnership(remote_name) elif partnership_info['partnership'] == ( 'fully_configured_stopped'): client.startpartnership(partnership_info['id']) except Exception: msg = (_('Unable to establish the partnership with ' 'the Storwize cluster %s.'), remote_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def establish_target_partnership(self): local_system_info = self.driver._helpers.get_system_info() target_system_info = self.target_helpers.get_system_info() local_system_name = local_system_info['system_name'] target_system_name = target_system_info['system_name'] local_ip = self.driver.configuration.safe_get('san_ip') target_ip = self.target.get('san_ip') self._partnership_validate_create(self.driver._helpers, target_system_name, target_ip) self._partnership_validate_create(self.target_helpers, local_system_name, local_ip) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd_list) # TODO(vhou): We'll have a common method in ssh_utils to take # care of this _run_ssh method. command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool( self.target.get('san_ip'), self.target.get('san_ssh_port', 22), self.target.get('ssh_conn_timeout', 30), self.target.get('san_login'), password=self.target.get('san_password'), privatekey=self.target.get('san_private_key', ''), min_size=self.target.get('ssh_min_pool_conn', 1), max_size=self.target.get('ssh_max_pool_conn', 5),) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(six.text_type(e)) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def volume_replication_setup(self, context, vref): target_vol_name = vref['name'] try: attr = self.target_helpers.get_vdisk_attributes(target_vol_name) if attr: # If the volume name exists in the target pool, we need # to change to a different target name. vol_id = six.text_type(uuid.uuid4()) prefix = vref['name'][0:len(vref['name']) - len(vol_id)] target_vol_name = prefix + vol_id opts = self.driver._get_vdisk_params(vref['volume_type_id']) pool = self.target.get('pool_name') self.target_helpers.create_vdisk(target_vol_name, six.text_type(vref['size']), 'gb', pool, opts) system_info = self.target_helpers.get_system_info() self.driver._helpers.create_relationship( vref['name'], target_vol_name, system_info.get('system_name'), self.asyncmirror) except Exception as e: msg = (_("Unable to set up mirror mode replication for %(vol)s. " "Exception: %(err)s."), {'vol': vref['id'], 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def create_relationship(self, vref, target_vol_name): if not target_vol_name: return try: system_info = self.target_helpers.get_system_info() self.driver._helpers.create_relationship( vref['name'], target_vol_name, system_info.get('system_name'), self.asyncmirror) except Exception: msg = (_("Unable to create the relationship for %s."), vref['name']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def extend_target_volume(self, target_vol_name, amount): if not target_vol_name: return self.target_helpers.extend_vdisk(target_vol_name, amount) def delete_target_volume(self, vref): try: rel_info = self.driver._helpers.get_relationship_info(vref) except Exception as e: msg = (_('Failed to get remote copy information for %(volume)s ' 'due to %(err)s.'), {'volume': vref['id'], 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(data=msg) if rel_info and rel_info.get('aux_vdisk_name', None): try: self.driver._helpers.delete_relationship(vref['name']) self.driver._helpers.delete_vdisk( rel_info['aux_vdisk_name'], False) except Exception as e: msg = (_('Unable to delete the target volume for ' 'volume %(vol)s. Exception: %(err)s.'), {'vol': vref['id'], 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def get_relationship_status(self, volume): rel_info = {} try: rel_info = self.target_helpers.get_relationship_info(volume) except Exception: msg = (_LE('Unable to access the Storwize back-end ' 'for volume %s.'), volume['id']) LOG.error(msg) return rel_info.get('state') if rel_info else None def failover_volume_host(self, context, vref, secondary): if not self.target or self.target.get('backend_id') != secondary: msg = _LE("A valid secondary target MUST be specified in order " "to failover.") LOG.error(msg) # If the admin does not provide a valid secondary, the failover # will fail, but it is not severe enough to throw an exception. # The admin can still issue another failover request. That is # why we tentatively put return None instead of raising an # exception. return try: rel_info = self.target_helpers.get_relationship_info(vref) except Exception: msg = (_('Unable to access the Storwize back-end for volume %s.'), vref['id']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if not rel_info: msg = (_('Unable to get the replication relationship for volume ' '%s.'), vref['id']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: try: # Reverse the role of the primary and secondary volumes, # because the secondary volume becomes the primary in the # fail-over status. self.target_helpers.switch_relationship( rel_info.get('name')) except Exception as e: msg = (_('Unable to fail-over the volume %(id)s to the ' 'secondary back-end, because the replication ' 'relationship is unable to switch: %(error)s'), {"id": vref['id'], "error": e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def replication_failback(self, volume): rel_info = self.target_helpers.get_relationship_info(volume) if rel_info: self.target_helpers.switch_relationship(rel_info.get('name'), aux=False) class StorwizeSVCReplicationMetroMirror( StorwizeSVCReplicationGlobalMirror): """Support for Storwize/SVC metro mirror mode replication. Metro Mirror establishes a Metro Mirror relationship between two volumes of equal size. The volumes in a Metro Mirror relationship are referred to as the master (source) volume and the auxiliary (target) volume. """ asyncmirror = False def __init__(self, driver, replication_target=None, target_helpers=None): super(StorwizeSVCReplicationMetroMirror, self).__init__( driver, replication_target, target_helpers) cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/__init__.py0000664000567000056710000000000012701406250025761 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py0000664000567000056710000002563712701406257030213 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ ISCSI volume driver for IBM Storwize family and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, error messages may be in a localized format. 2. Clones and creating volumes from snapshots, where the source and target are of different sizes, is not supported. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE, _LW from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import ( storwize_svc_common as storwize_common) LOG = logging.getLogger(__name__) storwize_svc_iscsi_opts = [ cfg.BoolOpt('storwize_svc_iscsi_chap_enabled', default=True, help='Configure CHAP authentication for iSCSI connections ' '(Default: Enabled)'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_iscsi_opts) class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): """IBM Storwize V7000 and SVC iSCSI volume driver. Version history: 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.0.1 - Added support for multiple pools with model update 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 """ VERSION = "2.1.1" def __init__(self, *args, **kwargs): super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs) self.protocol = 'iSCSI' self.configuration.append_config_values( storwize_svc_iscsi_opts) def validate_connector(self, connector): """Check connector for at least one enabled iSCSI protocol.""" if 'initiator' not in connector: LOG.error(_LE('The connector does not contain the required ' 'information.')) raise exception.InvalidConnectorException( missing='initiator') @utils.synchronized('storwize-host', external=True) def initialize_connection(self, volume, connector): """Perform necessary work to make an iSCSI connection. To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug('enter: initialize_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume['id'], 'conn': connector}) volume_name = volume['name'] # Check if a host object is defined for this host name host_name = self._helpers.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to Storwize/SVC host_name = self._helpers.create_host(connector) chap_secret = self._helpers.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled if chap_enabled and chap_secret is None: chap_secret = self._helpers.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: LOG.warning(_LW('CHAP secret exists for host but CHAP is ' 'disabled.')) volume_attributes = self._helpers.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('initialize_connection: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = self._helpers.map_vol_to_host(volume_name, host_name, multihostmap) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: LOG.error(_LE('Did not find expected column name in ' 'lsvdisk: %s.'), e) raise exception.VolumeBackendAPIException( data=_('initialize_connection: Missing volume attribute for ' 'volume %s.') % volume_name) try: # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in self._state['storage_nodes'].values(): if self.protocol not in node['enabled_protocols']: continue if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not len(io_group_nodes): msg = (_('initialize_connection: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning(_LW('initialize_connection: Did not find a ' 'preferred node for volume %s.'), volume_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] if len(preferred_node_entry['ipv4']): ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] properties['target_portal'] = '%s:%s' % (ipaddr, '3260') properties['target_iqn'] = preferred_node_entry['iscsi_name'] if chap_secret: properties['auth_method'] = 'CHAP' properties['auth_username'] = connector['initiator'] properties['auth_password'] = chap_secret properties['discovery_auth_method'] = 'CHAP' properties['discovery_auth_username'] = ( connector['initiator']) properties['discovery_auth_password'] = chap_secret except Exception: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n'), {'vol': volume, 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s', {'vol': volume['id'], 'conn': connector, 'prop': properties}) return {'driver_volume_type': 'iscsi', 'data': properties, } @utils.synchronized('storwize-host', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug('enter: terminate_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume['id'], 'conn': connector}) vol_name = volume['name'] info = {} if 'host' in connector: # get host according to iSCSI protocol info = {'driver_volume_type': 'iscsi', 'data': {}} host_name = self._helpers.get_host_from_connector(connector) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: # See bug #1244257 host_name = None # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = self._helpers.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = self._helpers.check_host_mapped_vols(host_name) if not len(resp): self._helpers.delete_host(host_name) LOG.debug('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s', {'vol': volume['id'], 'conn': connector}) return info cinder-8.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py0000664000567000056710000002751712701406257027470 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume FC driver for IBM Storwize family and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, error messages may be in a localized format. 2. Clones and creating volumes from snapshots, where the source and target are of different sizes, is not supported. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import ( storwize_svc_common as storwize_common) from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) storwize_svc_fc_opts = [ cfg.BoolOpt('storwize_svc_multipath_enabled', default=False, help='Connect with multipath (FC only; iSCSI multipath is ' 'controlled by Nova)'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_fc_opts) class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): """IBM Storwize V7000 and SVC FC volume driver. Version history: 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.0.1 - Added support for multiple pools with model update 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 """ VERSION = "2.1.1" def __init__(self, *args, **kwargs): super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs) self.protocol = 'FC' self.configuration.append_config_values( storwize_svc_fc_opts) def validate_connector(self, connector): """Check connector for at least one enabled FC protocol.""" if 'wwpns' not in connector: LOG.error(_LE('The connector does not contain the required ' 'information.')) raise exception.InvalidConnectorException( missing='wwpns') @fczm_utils.AddFCZone @utils.synchronized('storwize-host', external=True) def initialize_connection(self, volume, connector): """Perform necessary work to make a FC connection. To be able to create an FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug('enter: initialize_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume['id'], 'conn': connector}) volume_name = volume['name'] # Check if a host object is defined for this host name host_name = self._helpers.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to Storwize/SVC host_name = self._helpers.create_host(connector) volume_attributes = self._helpers.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('initialize_connection: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = self._helpers.map_vol_to_host(volume_name, host_name, multihostmap) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: LOG.error(_LE('Did not find expected column name in ' 'lsvdisk: %s.'), e) raise exception.VolumeBackendAPIException( data=_('initialize_connection: Missing volume attribute for ' 'volume %s.') % volume_name) try: # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in self._state['storage_nodes'].values(): if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not len(io_group_nodes): msg = (_('initialize_connection: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning(_LW('initialize_connection: Did not find a ' 'preferred node for volume %s.'), volume_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name) # If conn_wwpns is empty, then that means that there were # no target ports with visibility to any of the initiators # so we return all target ports. if len(conn_wwpns) == 0: for node in self._state['storage_nodes'].values(): conn_wwpns.extend(node['WWPN']) properties['target_wwn'] = conn_wwpns i_t_map = self._make_initiator_target_map(connector['wwpns'], conn_wwpns) properties['initiator_target_map'] = i_t_map # specific for z/VM, refer to cinder bug 1323993 if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] except Exception: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n'), {'vol': volume, 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s', {'vol': volume['id'], 'conn': connector, 'prop': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties, } def _make_initiator_target_map(self, initiator_wwpns, target_wwpns): """Build a simplistic all-to-all mapping.""" i_t_map = {} for i_wwpn in initiator_wwpns: i_t_map[str(i_wwpn)] = [] for t_wwpn in target_wwpns: i_t_map[i_wwpn].append(t_wwpn) return i_t_map @fczm_utils.RemoveFCZone @utils.synchronized('storwize-host', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug('enter: terminate_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume['id'], 'conn': connector}) vol_name = volume['name'] info = {} if 'host' in connector: # get host according to FC protocol connector = connector.copy() connector.pop('initiator', None) info = {'driver_volume_type': 'fibre_channel', 'data': {}} host_name = self._helpers.get_host_from_connector(connector) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: # See bug #1244257 host_name = None # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = self._helpers.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = self._helpers.check_host_mapped_vols(host_name) if not len(resp): LOG.info(_LI("Need to remove FC Zone, building initiator " "target map.")) # Build info data structure for zone removing if 'wwpns' in connector and host_name: target_wwpns = [] # Returning all target_wwpns in storage_nodes, since # we cannot determine which wwpns are logged in during # a VM deletion. for node in self._state['storage_nodes'].values(): target_wwpns.extend(node['WWPN']) init_targ_map = (self._make_initiator_target_map (connector['wwpns'], target_wwpns)) info['data'] = {'initiator_target_map': init_targ_map} # No volume mapped to the host, delete host from array self._helpers.delete_host(host_name) LOG.debug('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s', {'vol': volume['id'], 'conn': connector}) return info cinder-8.0.0/cinder/volume/drivers/ibm/__init__.py0000664000567000056710000000000012701406250023220 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/ibm/flashsystem_fc.py0000664000567000056710000003400712701406250024511 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems with FC protocol. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import random import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils import cinder.volume.driver from cinder.volume.drivers.ibm import flashsystem_common as fscommon from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) flashsystem_fc_opts = [ cfg.BoolOpt('flashsystem_multipath_enabled', default=False, help='This option no longer has any affect. It is deprecated ' 'and will be removed in the next release.', deprecated_for_removal=True) ] CONF = cfg.CONF CONF.register_opts(flashsystem_fc_opts) class FlashSystemFCDriver(fscommon.FlashSystemDriver, cinder.volume.driver.FibreChannelDriver): """IBM FlashSystem FC volume driver. Version history: 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC """ VERSION = "1.0.7" def __init__(self, *args, **kwargs): super(FlashSystemFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(fscommon.flashsystem_opts) self.configuration.append_config_values(flashsystem_fc_opts) self.configuration.append_config_values(san.san_opts) def _check_vdisk_params(self, params): # Check that the requested protocol is enabled if params['protocol'] != self._protocol: msg = (_("Illegal value '%(prot)s' specified for " "flashsystem_connection_protocol: " "valid value(s) are %(enabled)s.") % {'prot': params['protocol'], 'enabled': self._protocol}) raise exception.InvalidInput(reason=msg) def _create_host(self, connector): """Create a new host on the storage system. We create a host and associate it with the given connection information. """ LOG.debug('enter: _create_host: host %s.', connector['host']) rand_id = six.text_type(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), rand_id) ports = [] if 'FC' == self._protocol and 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append('-hbawwpn %s' % wwpn) self._driver_assert(ports, (_('_create_host: No connector ports.'))) port1 = ports.pop(0) arg_name, arg_val = port1.split() ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', '"%s"' % host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return('successfully created' in out, '_create_host', ssh_cmd, out, err) for port in ports: arg_name, arg_val = port.split() ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val, host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( (not out.strip()), '_create_host', ssh_cmd, out, err) LOG.debug( 'leave: _create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def _find_host_exhaustive(self, connector, hosts): hname = connector['host'] hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts] if hname in hnames: host = hosts[hnames.index(hname)] ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_find_host_exhaustive', ssh_cmd, out, err) attr_lines = [attr_line for attr_line in out.split('\n')] attr_parm = {} for attr_line in attr_lines: attr_name, foo, attr_val = attr_line.partition('!') attr_parm[attr_name] = attr_val if ('WWPN' in attr_parm.keys() and 'wwpns' in connector and attr_parm['WWPN'].lower() in map(str.lower, map(str, connector['wwpns']))): return host else: LOG.warning(_LW('Host %(host)s was not found on backend storage.'), {'host': hname}) return None def _get_conn_fc_wwpns(self): wwpns = [] cmd = ['svcinfo', 'lsportfc'] generator = self._port_conf_generator(cmd) header = next(generator, None) if not header: return wwpns for port_data in generator: try: if port_data['status'] == 'active': wwpns.append(port_data['WWPN']) except KeyError: self._handle_keyerror('lsportfc', header) return wwpns def _get_fc_wwpns(self): for key in self._storage_nodes: node = self._storage_nodes[key] ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']] attributes = self._execute_command_and_parse_attributes(ssh_cmd) wwpns = set(node['WWPN']) for i, s in zip(attributes['port_id'], attributes['port_status']): if 'unconfigured' != s: wwpns.add(i) node['WWPN'] = list(wwpns) LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'), {'node': node['id'], 'wwpn': node['WWPN']}) def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): """Get the map properties of vdisk.""" LOG.debug( 'enter: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) IO_group = '0' io_group_nodes = [] for k, node in self._storage_nodes.items(): if vdisk_params['protocol'] != node['protocol']: continue if node['IO_group'] == IO_group: io_group_nodes.append(node) if not io_group_nodes: msg = (_('_get_vdisk_map_properties: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': vdisk_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = vdisk_id type_str = 'fibre_channel' conn_wwpns = self._get_conn_fc_wwpns() if not conn_wwpns: msg = _('_get_vdisk_map_properties: Could not get FC ' 'connection information for the host-volume ' 'connection. Is the host configured properly ' 'for FC connections?') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties['target_wwn'] = conn_wwpns if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] properties['initiator_target_map'] = self._build_initiator_target_map( connector['wwpns'], conn_wwpns) LOG.debug( 'leave: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) return {'driver_volume_type': type_str, 'data': properties} @fczm_utils.AddFCZone @utils.synchronized('flashsystem-init-conn', external=True) def initialize_connection(self, volume, connector): """Perform work so that an FC connection can be made. To be able to create a FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] vdisk_id = volume['id'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) # TODO(edwin): might fix it after vdisk copy function is # ready in FlashSystem thin-provision layer. As this validation # is to check the vdisk which is in copying, at present in firmware # level vdisk doesn't allow to map host which it is copy. New # vdisk clone and snapshot function will cover it. After that the # _wait_vdisk_copy_completed need some modification. self._wait_vdisk_copy_completed(vdisk_name) self._driver_assert( self._is_vdisk_defined(vdisk_name), (_('initialize_connection: vdisk %s is not defined.') % vdisk_name)) lun_id = self._map_vdisk_to_host(vdisk_name, connector) properties = {} try: properties = self._get_vdisk_map_properties( connector, lun_id, vdisk_name, vdisk_id, vdisk_params) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('initialize_connection: Failed to collect ' 'return properties for volume %(vol)s and ' 'connector %(conn)s.'), {'vol': volume, 'conn': connector}) LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n connector ' '%(conn)s\n properties: %(prop)s.', {'vol': volume, 'conn': connector, 'prop': properties}) return properties @fczm_utils.RemoveFCZone @utils.synchronized('flashsystem-term-conn', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug( 'enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return_data = { 'driver_volume_type': 'fibre_channel', 'data': {}, } vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._unmap_vdisk_from_host(vdisk_name, connector) host_name = self._get_host_from_connector(connector) if not host_name: properties = {} conn_wwpns = self._get_conn_fc_wwpns() properties['target_wwn'] = conn_wwpns properties['initiator_target_map'] = ( self._build_initiator_target_map( connector['wwpns'], conn_wwpns)) return_data['data'] = properties LOG.debug( 'leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return return_data def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" self._context = ctxt # Get data of configured node self._get_node_data() # Get the WWPNs of the FlashSystem nodes self._get_fc_wwpns() # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._storage_nodes.items(): if not node['WWPN']: to_delete.append(k) for delkey in to_delete: del self._storage_nodes[delkey] # Make sure we have at least one node configured self._driver_assert(self._storage_nodes, 'do_setup: No configured nodes.') self._protocol = node['protocol'] = 'FC' # Set for vdisk synchronization self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = threading.Lock() self._check_lock_interval = 5 def validate_connector(self, connector): """Check connector.""" if 'FC' == self._protocol and 'wwpns' not in connector: msg = _LE('The connector does not contain the ' 'required information: wwpns is missing') LOG.error(msg) raise exception.InvalidConnectorException(missing='wwpns') cinder-8.0.0/cinder/volume/drivers/ibm/xiv_ds8k.py0000664000567000056710000002411712701406250023237 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Authors: # Erik Zaadi # Avishay Traeger """ Unified Volume driver for IBM XIV and DS8K Storage Systems. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.volume import driver from cinder.volume.drivers.san import san xiv_ds8k_opts = [ cfg.StrOpt( 'xiv_ds8k_proxy', default='xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy', help='Proxy driver that connects to the IBM Storage Array'), cfg.StrOpt( 'xiv_ds8k_connection_type', default='iscsi', choices=['fibre_channel', 'iscsi'], help='Connection type to the IBM Storage Array'), cfg.StrOpt( 'xiv_chap', default='disabled', choices=['disabled', 'enabled'], help='CHAP authentication mode, effective only for iscsi' ' (disabled|enabled)'), cfg.StrOpt( 'management_ips', default='', help='List of Management IP addresses (separated by commas)'), ] CONF = cfg.CONF CONF.register_opts(xiv_ds8k_opts) LOG = logging.getLogger(__name__) class XIVDS8KDriver(san.SanDriver, driver.ManageableVD, driver.ExtendVD, driver.SnapshotVD, driver.MigrateVD, driver.ConsistencyGroupVD, driver.CloneableImageVD, driver.TransferVD): """Unified IBM XIV and DS8K volume driver.""" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(XIVDS8KDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(xiv_ds8k_opts) proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy) active_backend_id = kwargs.get('active_backend_id', None) # NOTE: All Array specific configurations are prefixed with: # "xiv_ds8k_array_" # These additional flags should be specified in the cinder.conf # preferably in each backend configuration. self.xiv_ds8k_proxy = proxy( { "xiv_ds8k_user": self.configuration.san_login, "xiv_ds8k_pass": self.configuration.san_password, "xiv_ds8k_address": self.configuration.san_ip, "xiv_ds8k_vol_pool": self.configuration.san_clustername, "xiv_ds8k_connection_type": self.configuration.xiv_ds8k_connection_type, "xiv_chap": self.configuration.xiv_chap, "management_ips": self.configuration.management_ips }, LOG, exception, driver=self, active_backend_id=active_backend_id) def do_setup(self, context): """Setup and verify IBM XIV and DS8K Storage connection.""" self.xiv_ds8k_proxy.setup(context) def ensure_export(self, context, volume): """Ensure an export.""" return self.xiv_ds8k_proxy.ensure_export(context, volume) def create_export(self, context, volume, connector): """Create an export.""" return self.xiv_ds8k_proxy.create_export(context, volume) def create_volume(self, volume): """Create a volume on the IBM XIV and DS8K Storage system.""" return self.xiv_ds8k_proxy.create_volume(volume) def delete_volume(self, volume): """Delete a volume on the IBM XIV and DS8K Storage system.""" self.xiv_ds8k_proxy.delete_volume(volume) def remove_export(self, context, volume): """Disconnect a volume from an attached instance.""" return self.xiv_ds8k_proxy.remove_export(context, volume) def initialize_connection(self, volume, connector): """Map the created volume.""" return self.xiv_ds8k_proxy.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume.""" return self.xiv_ds8k_proxy.terminate_connection(volume, connector) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" return self.xiv_ds8k_proxy.create_volume_from_snapshot( volume, snapshot) def create_snapshot(self, snapshot): """Create a snapshot.""" return self.xiv_ds8k_proxy.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" return self.xiv_ds8k_proxy.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): """Get volume stats.""" return self.xiv_ds8k_proxy.get_volume_stats(refresh) def create_cloned_volume(self, tgt_volume, src_volume): """Create Cloned Volume.""" return self.xiv_ds8k_proxy.create_cloned_volume(tgt_volume, src_volume) def extend_volume(self, volume, new_size): """Extend Created Volume.""" self.xiv_ds8k_proxy.extend_volume(volume, new_size) def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host.""" return self.xiv_ds8k_proxy.migrate_volume(context, volume, host) def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. In the case of XIV, the existing_ref consists of a single field named 'existing_ref' representing the name of the volume on the storage. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. """ return self.xiv_ds8k_proxy.manage_volume(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.xiv_ds8k_proxy.manage_volume_get_size(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" return self.xiv_ds8k_proxy.unmanage_volume(volume) def freeze_backend(self, context): """Notify the backend that it's frozen. """ return self.xiv_ds8k_proxy.freeze_backend(context) def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. """ return self.xiv_ds8k_proxy.thaw_backend(context) def failover_host(self, context, volumes, secondary_id=None): """Failover a backend to a secondary replication target. """ return self.xiv_ds8k_proxy.failover_host( context, volumes, secondary_id) def get_replication_status(self, context, volume): """Return replication status.""" return self.xiv_ds8k_proxy.get_replication_status(context, volume) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.xiv_ds8k_proxy.retype(ctxt, volume, new_type, diff, host) def create_consistencygroup(self, context, group): """Creates a consistency group.""" return self.xiv_ds8k_proxy.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return self.xiv_ds8k_proxy.delete_consistencygroup( context, group, volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a consistency group snapshot.""" return self.xiv_ds8k_proxy.create_cgsnapshot( context, cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a consistency group snapshot.""" return self.xiv_ds8k_proxy.delete_cgsnapshot( context, cgsnapshot, snapshots) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): """Adds or removes volume(s) to/from an existing consistency group.""" return self.xiv_ds8k_proxy.update_consistencygroup( context, group, add_volumes, remove_volumes) def create_consistencygroup_from_src( self, context, group, volumes, cgsnapshot, snapshots, source_cg=None, source_vols=None): """Creates a consistencygroup from source.""" return self.xiv_ds8k_proxy.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/volume/drivers/ibm/flashsystem_common.py0000664000567000056710000012320012701406250025403 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import re import string from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0' FLASHSYSTEM_VOL_IOGRP = 0 flashsystem_opts = [ cfg.StrOpt('flashsystem_connection_protocol', default='FC', help='Connection protocol should be FC. ' '(Default is FC.)'), cfg.BoolOpt('flashsystem_multihostmap_enabled', default=True, help='Allows vdisk to multi host mapping. ' '(Default is True)') ] CONF = cfg.CONF CONF.register_opts(flashsystem_opts) class FlashSystemDriver(san.SanDriver): """IBM FlashSystem volume driver. Version history: 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC """ VERSION = "1.0.7" def __init__(self, *args, **kwargs): super(FlashSystemDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(flashsystem_opts) self._storage_nodes = {} self._protocol = None self._context = None self._system_name = None self._system_id = None def _ssh(self, ssh_cmd, check_exit_code=True): try: return self._run_ssh(ssh_cmd, check_exit_code) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _append_dict(self, dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ def _assert_ssh_return(self, test, fun, ssh_cmd, out, err): self._driver_assert(test, (_('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n ' 'stderr: %(err)s') % {'fun': fun, 'cmd': ssh_cmd, 'out': six.text_type(out), 'err': six.text_type(err)})) def _build_default_params(self): return {'protocol': self.configuration.flashsystem_connection_protocol} def _build_initiator_target_map(self, initiator_wwpns, target_wwpns): map = {} for i_wwpn in initiator_wwpns: idx = six.text_type(i_wwpn) map[idx] = [] for t_wwpn in target_wwpns: map[idx].append(t_wwpn) return map def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. Translate a host's name and IP to the prefix of its hostname on the storage subsystem. We create a host name from the host and IP address, replacing any invalid characters (at most 55 characters), and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ # Build cleanup translation tables for host names invalid_ch_in_host = '' for num in range(0, 128): ch = six.text_type(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: invalid_ch_in_host = invalid_ch_in_host + ch host_name = connector['host'] if isinstance(host_name, six.text_type): unicode_host_name_filter = {ord(six.text_type(char)): u'-' for char in invalid_ch_in_host} host_name = host_name.translate(unicode_host_name_filter) elif isinstance(host_name, str): string_host_name_filter = string.maketrans( invalid_ch_in_host, '-' * len(invalid_ch_in_host)) host_name = host_name.translate(string_host_name_filter) else: msg = _('_create_host: Can not translate host name. Host name ' 'is not unicode or string.') LOG.error(msg) raise exception.NoValidHost(reason=msg) host_name = six.text_type(host_name) # FlashSystem family doesn't like hostname that starts with number. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name return host_name[:55] def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id): """Copy data from src vdisk to dest vdisk. To be able to copy data between vdisks, we must ensure that both vdisks have been mapped to host. If vdisk has not been mapped, it must be mapped firstly. When data copy completed, vdisk should be restored to previous mapped or non-mapped status. """ LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) connector = utils.brick_get_connector_properties() (src_map, src_lun_id) = self._is_vdisk_map( src_vdisk_name, connector) (dest_map, dest_lun_id) = self._is_vdisk_map( dest_vdisk_name, connector) src_map_device = None src_properties = None dest_map_device = None dest_properties = None try: if not src_map: src_lun_id = self._map_vdisk_to_host(src_vdisk_name, connector) if not dest_map: dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name, connector) src_properties = self._get_vdisk_map_properties( connector, src_lun_id, src_vdisk_name, src_vdisk_id, self._get_vdisk_params(None)) src_map_device = self._scan_device(src_properties) dest_properties = self._get_vdisk_map_properties( connector, dest_lun_id, dest_vdisk_name, dest_vdisk_id, self._get_vdisk_params(None)) dest_map_device = self._scan_device(dest_properties) src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) # vdisk capacity is bytes, translate into MB size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi volume_utils.copy_volume( src_map_device['path'], dest_map_device['path'], size_in_mb, self.configuration.volume_dd_blocksize) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to copy %(src)s to %(dest)s.'), {'src': src_vdisk_name, 'dest': dest_vdisk_name}) finally: if not dest_map: self._unmap_vdisk_from_host(dest_vdisk_name, connector) self._remove_device(dest_properties, dest_map_device) if not src_map: self._unmap_vdisk_from_host(src_vdisk_name, connector) self._remove_device(src_properties, src_map_device) LOG.debug( 'leave: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id): vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) self._driver_assert( vdisk_attr is not None, (_('_create_and_copy_vdisk_data: Failed to get attributes for ' 'vdisk %s.') % src_vdisk_name)) self._create_vdisk(dest_vdisk_name, vdisk_attr['capacity'], 'b', None) # create a timer to lock vdisk that will be used to data copy timer = loopingcall.FixedIntervalLoopingCall( self._set_vdisk_copy_in_progress, [src_vdisk_name, dest_vdisk_name]) timer.start(interval=self._check_lock_interval).wait() try: self._copy_vdisk_data(src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id) finally: self._unset_vdisk_copy_in_progress( [src_vdisk_name, dest_vdisk_name]) def _create_vdisk(self, name, size, unit, opts): """Create a new vdisk.""" LOG.debug('enter: _create_vdisk: vdisk %s.', name) ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', FLASHSYSTEM_VOLPOOL_NAME, '-iogrp', six.text_type(FLASHSYSTEM_VOL_IOGRP), '-size', size, '-unit', unit] out, err = self._ssh(ssh_cmd) self._assert_ssh_return(out.strip(), '_create_vdisk', ssh_cmd, out, err) # Ensure that the output is as expected match_obj = re.search( 'Virtual Disk, id \[([0-9]+)\], successfully created', out) self._driver_assert( match_obj is not None, (_('_create_vdisk %(name)s - did not find ' 'success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': six.text_type(out), 'err': six.text_type(err)})) LOG.debug('leave: _create_vdisk: vdisk %s.', name) def _delete_host(self, host_name): """Delete a host on the storage system.""" LOG.debug('enter: _delete_host: host %s.', host_name) ssh_cmd = ['svctask', 'rmhost', host_name] out, err = self._ssh(ssh_cmd) # No output should be returned from rmhost self._assert_ssh_return( (not out.strip()), '_delete_host', ssh_cmd, out, err) LOG.debug('leave: _delete_host: host %s.', host_name) def _delete_vdisk(self, name, force): """Deletes existing vdisks.""" LOG.debug('enter: _delete_vdisk: vdisk %s.', name) # Try to delete volume only if found on the storage vdisk_defined = self._is_vdisk_defined(name) if not vdisk_defined: LOG.warning(_LW('warning: Tried to delete vdisk %s but ' 'it does not exist.'), name) return ssh_cmd = ['svctask', 'rmvdisk', '-force', name] if not force: ssh_cmd.remove('-force') out, err = self._ssh(ssh_cmd) # No output should be returned from rmvdisk self._assert_ssh_return( (not out.strip()), ('_delete_vdisk %(name)s') % {'name': name}, ssh_cmd, out, err) LOG.debug('leave: _delete_vdisk: vdisk %s.', name) def _driver_assert(self, assert_condition, exception_message): """Internal assertion mechanism for CLI output.""" if not assert_condition: LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) def _execute_command_and_parse_attributes(self, ssh_cmd): """Execute command on the FlashSystem and parse attributes. Exception is raised if the information from the system can not be obtained. """ LOG.debug( 'enter: _execute_command_and_parse_attributes: ' 'command: %s.', six.text_type(ssh_cmd)) try: out, err = self._ssh(ssh_cmd) except processutils.ProcessExecutionError: LOG.warning(_LW('Failed to run command: ' '%s.'), ssh_cmd) # Does not raise exception when command encounters error. # Only return and the upper logic decides what to do. return None self._assert_ssh_return( out, '_execute_command_and_parse_attributes', ssh_cmd, out, err) attributes = {} for attrib_line in out.split('\n'): # If '!' not found, return the string and two empty strings attrib_name, foo, attrib_value = attrib_line.partition('!') if attrib_name is not None and attrib_name.strip(): self._append_dict(attributes, attrib_name, attrib_value) LOG.debug( 'leave: _execute_command_and_parse_attributes: ' 'command: %(cmd)s attributes: %(attr)s.', {'cmd': six.text_type(ssh_cmd), 'attr': six.text_type(attributes)}) return attributes def _get_hdr_dic(self, header, row, delim): """Return CLI row data as a dictionary indexed by names from header. The strings are converted to columns using the delimiter in delim. """ attributes = header.split(delim) values = row.split(delim) self._driver_assert( len(values) == len(attributes), (_('_get_hdr_dic: attribute headers and values do not match.\n ' 'Headers: %(header)s\n Values: %(row)s.') % {'header': six.text_type(header), 'row': six.text_type(row)})) dic = {a: v for a, v in zip(attributes, values)} return dic def _get_host_from_connector(self, connector): """List the hosts defined in the storage. Return the host name with the given connection info, or None if there is no host fitting that information. """ LOG.debug('enter: _get_host_from_connector: %s.', connector) # Get list of host in the storage ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return None # If we have FC information, we have a faster lookup option hostname = None host_lines = out.strip().split('\n') self._assert_ssh_return( host_lines, '_get_host_from_connector', ssh_cmd, out, err) header = host_lines.pop(0).split('!') self._assert_ssh_return( 'name' in header, '_get_host_from_connector', ssh_cmd, out, err) name_index = header.index('name') hosts = [x.split('!')[name_index] for x in host_lines] hostname = self._find_host_exhaustive(connector, hosts) LOG.debug('leave: _get_host_from_connector: host %s.', hostname) return hostname def _get_hostvdisk_mappings(self, host_name): """Return the defined storage mappings for a host.""" return_data = {} ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['vdisk_name']] = mapping_data return return_data def _get_node_data(self): """Get and verify node configuration.""" # Get storage system name and id ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes or not ('name' in attributes): msg = _('Could not get system name.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._system_name = attributes['name'] self._system_id = attributes['id'] # Validate value of open_access_enabled flag, for now only # support when open_access_enabled is off if not attributes or not ('open_access_enabled' in attributes) or ( attributes['open_access_enabled'] != 'off'): msg = _('open_access_enabled is not off.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Validate that the array exists pool = FLASHSYSTEM_VOLPOOL_NAME ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes: msg = _('Unable to parse attributes.') LOG.error(msg) raise exception.InvalidInput(reason=msg) if not ('status' in attributes) or ( attributes['status'] == 'offline'): msg = (_('Array does not exist or is offline. ' 'Current status of array is %s.') % attributes['status']) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Get the iSCSI names of the FlashSystem nodes ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_get_config_data', ssh_cmd, out, err) nodes = out.strip().splitlines() self._assert_ssh_return(nodes, '_get_node_data', ssh_cmd, out, err) header = nodes.pop(0) for node_line in nodes: try: node_data = self._get_hdr_dic(header, node_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('_get_node_data', ssh_cmd, out, err) try: node = { 'id': node_data['id'], 'name': node_data['name'], 'IO_group': node_data['IO_group_id'], 'WWNN': node_data['WWNN'], 'status': node_data['status'], 'WWPN': [], 'protocol': None, 'iscsi_name': node_data['iscsi_name'], 'config_node': node_data['config_node'], 'ipv4': [], 'ipv6': [], } if node['status'] == 'online': self._storage_nodes[node['id']] = node except KeyError: self._handle_keyerror('lsnode', header) def _get_vdisk_attributes(self, vdisk_name): """Return vdisk attributes Exception is raised if the information from system can not be parsed/matched to a single vdisk. """ ssh_cmd = [ 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name] return self._execute_command_and_parse_attributes(ssh_cmd) def _get_vdiskhost_mappings(self, vdisk_name): """Return the defined storage mappings for a vdisk.""" return_data = {} ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['host_name']] = mapping_data return return_data def _get_vdisk_params(self, type_id): params = self._build_default_params() if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') for k, value in specs.items(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # protocol is a special case where the user asks for a given # protocol and we want both the scheduler and the driver to act # on the value. if ((not scope or scope == 'capabilities') and key == 'storage_protocol'): scope = None key = 'protocol' # Anything keys that the driver should look at should have the # 'drivers' scope. if scope and scope != "drivers": continue if key in params: this_type = type(params[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) params[key] = value self._check_vdisk_params(params) return params def _handle_keyerror(self, function, header): msg = (_('Did not find expected column in %(fun)s: %(hdr)s.') % {'fun': function, 'hdr': header}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" LOG.debug('enter: _is_vdisk_defined: vdisk %s.', vdisk_name) vdisk_attributes = self._get_vdisk_attributes(vdisk_name) LOG.debug( 'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.', {'vol': vdisk_name, 'str': vdisk_attributes is not None}) if vdisk_attributes is None: return False else: return True def _is_vdisk_copy_in_progress(self, vdisk_name): LOG.debug( '_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': vdisk_name, 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) if vdisk_name not in self._vdisk_copy_in_progress: LOG.debug( '_is_vdisk_copy_in_progress: ' 'vdisk copy is not in progress.') raise loopingcall.LoopingCallDone(retvalue=True) def _is_vdisk_map(self, vdisk_name, connector): """Check if vdisk is mapped. If map, return True and lun id. If not map, return False and expected lun id. """ LOG.debug('enter: _is_vdisk_map: %(src)s.', {'src': vdisk_name}) map_flag = False result_lun = '-1' host_name = self._get_host_from_connector(connector) if host_name is None: return (map_flag, int(result_lun)) mapping_data = self._get_hostvdisk_mappings(host_name) if vdisk_name in mapping_data: map_flag = True result_lun = mapping_data[vdisk_name]['SCSI_id'] else: lun_used = [int(v['SCSI_id']) for v in mapping_data.values()] lun_used.sort() # Start from 1 due to problems with lun id being 0. result_lun = 1 for lun_id in lun_used: if result_lun < lun_id: break elif result_lun == lun_id: result_lun += 1 LOG.debug( 'leave: _is_vdisk_map: %(src)s ' 'mapped %(map_flag)s %(result_lun)s.', {'src': vdisk_name, 'map_flag': six.text_type(map_flag), 'result_lun': result_lun}) return (map_flag, int(result_lun)) def _log_cli_output_error(self, function, cmd, out, err): LOG.error(_LE('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n'), {'fun': function, 'cmd': cmd, 'out': six.text_type(out), 'err': six.text_type(err)}) @utils.synchronized('flashsystem-map', external=True) def _map_vdisk_to_host(self, vdisk_name, connector): """Create a mapping between a vdisk to a host.""" LOG.debug( 'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to ' 'host %(host)s.', {'vdisk_name': vdisk_name, 'host': connector}) # Check if a host object is defined for this host name host_name = self._get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to FlashSystem host_name = self._create_host(connector) # Verify that create_new_host succeeded self._driver_assert( host_name is not None, (_('_create_host failed to return the host name.'))) (map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector) # Volume is not mapped to host, create a new LUN if not map_flag: ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name, '-scsi', six.text_type(result_lun), vdisk_name] out, err = self._ssh(ssh_cmd, check_exit_code=False) if err and err.startswith('CMMVC6071E'): if not self.configuration.flashsystem_multihostmap_enabled: msg = _('flashsystem_multihostmap_enabled is set ' 'to False, not allow multi host mapping. ' 'CMMVC6071E The VDisk-to-host mapping ' 'was not created because the VDisk is ' 'already mapped to a host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for i in range(len(ssh_cmd)): if ssh_cmd[i] == 'mkvdiskhostmap': ssh_cmd.insert(i + 1, '-force') # try to map one volume to multiple hosts out, err = self._ssh(ssh_cmd) LOG.info(_LI('Volume %s is mapping to multiple hosts.'), vdisk_name) self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) else: self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) LOG.debug( ('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk ' '%(vdisk_name)s, host %(host_name)s.'), {'result_lun': result_lun, 'vdisk_name': vdisk_name, 'host_name': host_name}) return int(result_lun) def _port_conf_generator(self, cmd): ssh_cmd = cmd + ['-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return port_lines = out.strip().split('\n') if not port_lines: return header = port_lines.pop(0) yield header for portip_line in port_lines: try: port_data = self._get_hdr_dic(header, portip_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('_port_conf_generator', ssh_cmd, out, err) yield port_data def _remove_device(self, properties, device): LOG.debug('enter: _remove_device') if not properties or not device: LOG.warning(_LW('_remove_device: invalid properties or device.')) return use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) connector.disconnect_volume(properties['data'], device) LOG.debug('leave: _remove_device') def _scan_device(self, properties): LOG.debug('enter: _scan_device') use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) device = connector.connect_volume(properties['data']) host_device = device['path'] if not connector.check_valid_device(host_device): msg = (_('Unable to access the backend storage ' 'via the path %(path)s.') % {'path': host_device}) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('leave: _scan_device') return device @utils.synchronized('flashsystem-unmap', external=True) def _unmap_vdisk_from_host(self, vdisk_name, connector): if 'host' in connector: host_name = self._get_host_from_connector(connector) self._driver_assert( host_name is not None, (_('_get_host_from_connector failed to return the host name ' 'for connector.'))) else: host_name = None # Check if vdisk-host mapping exists, remove if it does. If no host # name was given, but only one mapping exists, we can use that. mapping_data = self._get_vdiskhost_mappings(vdisk_name) if not mapping_data: LOG.warning(_LW('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to any host found.'), {'vol_name': vdisk_name}) return if host_name is None: if len(mapping_data) > 1: LOG.warning(_LW('_unmap_vdisk_from_host: Multiple mappings of ' 'volume %(vdisk_name)s found, no host ' 'specified.'), {'vdisk_name': vdisk_name}) return else: host_name = list(mapping_data.keys())[0] else: if host_name not in mapping_data: LOG.error(_LE('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to host %(host_name)s found.'), {'vol_name': vdisk_name, 'host_name': host_name}) return # We have a valid host_name now ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host_name, vdisk_name] out, err = self._ssh(ssh_cmd) # Verify CLI behaviour - no output is returned from rmvdiskhostmap self._assert_ssh_return( (not out.strip()), '_unmap_vdisk_from_host', ssh_cmd, out, err) # If this host has no more mappings, delete it mapping_data = self._get_hostvdisk_mappings(host_name) if not mapping_data: self._delete_host(host_name) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = { 'vendor_name': 'IBM', 'driver_version': self.VERSION, 'storage_protocol': self._protocol, 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': True, } pool = FLASHSYSTEM_VOLPOOL_NAME backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = '%s_%s' % (self._system_name, pool) data['volume_backend_name'] = backend_name ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes: msg = _('_update_volume_stats: Could not get storage pool data.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) data['total_capacity_gb'] = ( float(attributes['capacity']) / units.Gi) data['free_capacity_gb'] = ( float(attributes['free_capacity']) / units.Gi) data['easytier_support'] = False # Do not support easy tier data['location_info'] = ( 'FlashSystemDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._system_id, 'pool': pool}) self._stats = data def _set_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': six.text_type(vdisk_list), 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) get_lock = True self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: get_lock = False break if get_lock: self._vdisk_copy_in_progress.update(vdisk_list) self._vdisk_copy_lock.release() if get_lock: LOG.debug( '_set_vdisk_copy_in_progress: %s.', six.text_type(self._vdisk_copy_in_progress)) raise loopingcall.LoopingCallDone(retvalue=True) def _unset_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': six.text_type(vdisk_list), 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: self._vdisk_copy_in_progress.remove(vdisk) self._vdisk_copy_lock.release() def _wait_vdisk_copy_completed(self, vdisk_name): timer = loopingcall.FixedIntervalLoopingCall( self._is_vdisk_copy_in_progress, vdisk_name) timer.start(interval=self._check_lock_interval).wait() def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._system_name is None: msg = ( _('check_for_setup_error: Unable to determine system name.')) raise exception.VolumeBackendAPIException(data=msg) if self._system_id is None: msg = _('check_for_setup_error: Unable to determine system id.') raise exception.VolumeBackendAPIException(data=msg) required_flags = ['san_ip', 'san_ssh_port', 'san_login'] for flag in required_flags: if not self.configuration.safe_get(flag): msg = (_('%s is not set.') % flag) raise exception.InvalidInput(reason=msg) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): msg = _('check_for_setup_error: Password or SSH private key ' 'is required for authentication: set either ' 'san_password or san_private_key option.') raise exception.InvalidInput(reason=msg) params = self._build_default_params() self._check_vdisk_params(params) LOG.debug('leave: check_for_setup_error') def create_volume(self, volume): """Create volume.""" vdisk_name = volume['name'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) vdisk_size = six.text_type(volume['size']) return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params) def delete_volume(self, volume): """Delete volume.""" vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._delete_vdisk(vdisk_name, False) def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('enter: extend_volume: volume %s.', volume['name']) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) extend_amt = int(new_size) - volume['size'] ssh_cmd = (['svctask', 'expandvdisksize', '-size', six.text_type(extend_amt), '-unit', 'gb', vdisk_name]) out, err = self._ssh(ssh_cmd) # No output should be returned from expandvdisksize self._assert_ssh_return( (not out.strip()), 'extend_volume', ssh_cmd, out, err) LOG.debug('leave: extend_volume: volume %s.', volume['name']) def create_snapshot(self, snapshot): """Create snapshot from volume.""" LOG.debug( 'enter: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = (_( 'create_snapshot: Volume status must be "available" or ' '"in-use" for snapshot. The invalid status is %s.') % status) raise exception.InvalidVolume(msg) self._create_and_copy_vdisk_data(snapshot['volume']['name'], snapshot['volume']['id'], snapshot['name'], snapshot['id']) LOG.debug( 'leave: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) def delete_snapshot(self, snapshot): """Delete snapshot.""" LOG.debug( 'enter: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) self._wait_vdisk_copy_completed(snapshot['name']) self._delete_vdisk(snapshot['name'], False) LOG.debug( 'leave: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug( 'enter: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) if volume['size'] != snapshot['volume_size']: msg = _('create_volume_from_snapshot: Volume size is different ' 'from snapshot based volume.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) status = snapshot['status'] if status != 'available': msg = (_('create_volume_from_snapshot: Snapshot status ' 'must be "available" for creating volume. ' 'The invalid status is: %s.') % status) raise exception.InvalidSnapshot(msg) self._create_and_copy_vdisk_data(snapshot['name'], snapshot['id'], volume['name'], volume['id']) LOG.debug( 'leave: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) def create_cloned_volume(self, volume, src_volume): """Create volume from a source volume.""" LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) if src_volume['size'] != volume['size']: msg = _('create_cloned_volume: Source and destination ' 'size differ.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._create_and_copy_vdisk_data(src_volume['name'], src_volume['id'], volume['name'], volume['id']) LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) def get_volume_stats(self, refresh=False): """Get volume stats. If we haven't gotten stats yet or 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats cinder-8.0.0/cinder/volume/drivers/ibm/gpfs.py0000664000567000056710000016402112701406250022436 0ustar jenkinsjenkins00000000000000# Copyright IBM Corp. 2013 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GPFS Volume Driver. """ import math import os import re import shutil from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.image import image_utils from cinder.objects import fields from cinder import utils from cinder.volume import driver from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs from cinder.volume.drivers.san import san GPFS_CLONE_MIN_RELEASE = 1200 GPFS_ENC_MIN_RELEASE = 1404 MIGRATION_ALLOWED_DEST_TYPE = ['GPFSDriver', 'GPFSNFSDriver'] LOG = logging.getLogger(__name__) gpfs_opts = [ cfg.StrOpt('gpfs_mount_point_base', help='Specifies the path of the GPFS directory where Block ' 'Storage volume and snapshot files are stored.'), cfg.StrOpt('gpfs_images_dir', help='Specifies the path of the Image service repository in ' 'GPFS. Leave undefined if not storing images in GPFS.'), cfg.StrOpt('gpfs_images_share_mode', choices=['copy', 'copy_on_write', None], help='Specifies the type of image copy to be used. Set this ' 'when the Image service repository also uses GPFS so ' 'that image files can be transferred efficiently from ' 'the Image service to the Block Storage service. There ' 'are two valid values: "copy" specifies that a full copy ' 'of the image is made; "copy_on_write" specifies that ' 'copy-on-write optimization strategy is used and ' 'unmodified blocks of the image file are shared ' 'efficiently.'), cfg.IntOpt('gpfs_max_clone_depth', default=0, help='Specifies an upper limit on the number of indirections ' 'required to reach a specific block due to snapshots or ' 'clones. A lengthy chain of copy-on-write snapshots or ' 'clones can have a negative impact on performance, but ' 'improves space utilization. 0 indicates unlimited ' 'clone depth.'), cfg.BoolOpt('gpfs_sparse_volumes', default=True, help=('Specifies that volumes are created as sparse files ' 'which initially consume no space. If set to False, the ' 'volume is created as a fully allocated file, in which ' 'case, creation may take a significantly longer time.')), cfg.StrOpt('gpfs_storage_pool', default='system', help=('Specifies the storage pool that volumes are assigned ' 'to. By default, the system storage pool is used.')), ] CONF = cfg.CONF CONF.register_opts(gpfs_opts) def _different(difference_tuple): """Return true if two elements of a tuple are different.""" if difference_tuple: member1, member2 = difference_tuple return member1 != member2 else: return False def _same_filesystem(path1, path2): """Return true if the two paths are in the same GPFS file system.""" return os.lstat(path1).st_dev == os.lstat(path2).st_dev def _sizestr(size_in_g): """Convert the specified size into a string value.""" return '%sG' % size_in_g class GPFSDriver(driver.ConsistencyGroupVD, driver.ExtendVD, driver.LocalVD, driver.TransferVD, driver.CloneableImageVD, driver.SnapshotVD, driver.MigrateVD, driver.BaseVD): """Implements volume functions using GPFS primitives. Version history: 1.0.0 - Initial driver 1.1.0 - Add volume retype, refactor volume migration 1.2.0 - Add consistency group support 1.3.0 - Add NFS based GPFS storage backend support 1.3.1 - Add GPFS native encryption (encryption of data at rest) support """ VERSION = "1.3.1" def __init__(self, *args, **kwargs): super(GPFSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(gpfs_opts) self.gpfs_execute = self._gpfs_local_execute self._execute = utils.execute def _gpfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': True}) return utils.execute(*cmd, **kwargs) def _get_gpfs_state(self): """Return GPFS state information.""" try: (out, err) = self.gpfs_execute('mmgetstate', '-Y') return out except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmgetstate command, error: %s.'), exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def _check_gpfs_state(self): """Raise VolumeBackendAPIException if GPFS is not active.""" out = self._get_gpfs_state() lines = out.splitlines() state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] if gpfs_state != 'active': LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out) raise exception.VolumeBackendAPIException( data=_('GPFS is not running, state: %s.') % gpfs_state) def _get_filesystem_from_path(self, path): """Return filesystem for specified path.""" try: (out, err) = self.gpfs_execute('df', path) lines = out.splitlines() filesystem = lines[1].split()[0] return filesystem except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue df command for path %(path)s, ' 'error: %(error)s.'), {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) def _get_gpfs_cluster_id(self): """Return the id for GPFS cluster being used.""" try: (out, err) = self.gpfs_execute('mmlsconfig', 'clusterId', '-Y') lines = out.splitlines() value_token = lines[0].split(':').index('value') cluster_id = lines[1].split(':')[value_token] return cluster_id except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'), exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def _get_fileset_from_path(self, path): """Return the GPFS fileset for specified path.""" fs_regex = re.compile(r'.*fileset.name:\s+(?P\w+)', re.S) try: (out, err) = self.gpfs_execute('mmlsattr', '-L', path) except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, ' 'error: %(error)s'), {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) try: fileset = fs_regex.match(out).group('fileset') return fileset except AttributeError as exc: msg = (_('Failed to find fileset for path %(path)s, command ' 'output: %(cmdout)s.') % {'path': path, 'cmdout': out}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _verify_gpfs_pool(self, storage_pool): """Return true if the specified pool is a valid GPFS storage pool.""" try: self.gpfs_execute('mmlspool', self._gpfs_device, storage_pool) return True except processutils.ProcessExecutionError: return False def _update_volume_storage_pool(self, local_path, new_pool): """Set the storage pool for a volume to the specified value.""" if new_pool is None: new_pool = 'system' if not self._verify_gpfs_pool(new_pool): msg = (_('Invalid storage pool %s requested. Retype failed.') % new_pool) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute('mmchattr', '-P', new_pool, local_path) LOG.debug('Updated storage pool with mmchattr to %s.', new_pool) return True except processutils.ProcessExecutionError as exc: LOG.info(_LI('Could not update storage pool with mmchattr to ' '%(pool)s, error: %(error)s'), {'pool': new_pool, 'error': exc.stderr}) return False def _get_gpfs_fs_release_level(self, path): """Return the GPFS version of the specified file system. The file system is specified by any valid path it contains. """ filesystem = self._get_filesystem_from_path(path) try: (out, err) = self.gpfs_execute('mmlsfs', filesystem, '-V', '-Y') except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, ' 'error: %(error)s.'), {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) lines = out.splitlines() value_token = lines[0].split(':').index('data') fs_release_level_str = lines[1].split(':')[value_token] # at this point, release string looks like "13.23 (3.5.0.7)" # extract first token and convert to whole number value fs_release_level = int(float(fs_release_level_str.split()[0]) * 100) return filesystem, fs_release_level def _get_gpfs_cluster_release_level(self): """Return the GPFS version of current cluster.""" try: (out, err) = self.gpfs_execute('mmlsconfig', 'minreleaseLeveldaemon', '-Y') except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'), exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) lines = out.splitlines() value_token = lines[0].split(':').index('value') min_release_level = lines[1].split(':')[value_token] return int(min_release_level) def _is_gpfs_path(self, directory): """Determine if the specified path is in a gpfs file system. If not part of a gpfs file system, raise ProcessExecutionError. """ try: self.gpfs_execute('mmlsattr', directory) except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsattr command ' 'for path %(path)s, ' 'error: %(error)s.'), {'path': directory, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) def _is_same_fileset(self, path1, path2): """Return true if the two paths are in the same GPFS fileset.""" if self._get_fileset_from_path(path1) == \ self._get_fileset_from_path(path2): return True return False def _same_cluster(self, host): """Return true if the host is a member of the same GPFS cluster.""" dest_location = host['capabilities'].get('location_info') if self._stats['location_info'] == dest_location: return True return False def _set_rw_permission(self, path, modebits='660'): """Set permission bits for the path.""" self.gpfs_execute('chmod', modebits, path) def _can_migrate_locally(self, host): """Return true if the host can migrate a volume locally.""" if 'location_info' not in host['capabilities']: LOG.debug('Evaluate migration: no location info, ' 'cannot migrate locally.') return None info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_path) = info.split(':') except ValueError: LOG.debug('Evaluate migration: unexpected location info, ' 'cannot migrate locally: %s.', info) return None if (dest_id != self._cluster_id or dest_type not in MIGRATION_ALLOWED_DEST_TYPE): LOG.debug('Evaluate migration: different destination driver or ' 'cluster id in location info: %s.', info) return None LOG.debug('Evaluate migration: use local migration.') return dest_path def do_setup(self, ctxt): """Determine storage back end capabilities.""" try: self._cluster_id = self._get_gpfs_cluster_id() except Exception as setup_exception: msg = (_('Could not find GPFS cluster id: %s.') % setup_exception) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: gpfs_base = self.configuration.gpfs_mount_point_base self._gpfs_device = self._get_filesystem_from_path(gpfs_base) except Exception as setup_exception: msg = (_('Could not find GPFS file system device: %s.') % setup_exception) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) pool = self.configuration.safe_get('gpfs_storage_pool') self._storage_pool = pool if not self._verify_gpfs_pool(self._storage_pool): msg = (_('Invalid storage pool %s specificed.') % self._storage_pool) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() if _gpfs_cluster_release_level >= GPFS_ENC_MIN_RELEASE: self._encryption_state = self._get_gpfs_encryption_status() else: LOG.info(_LI('Downlevel GPFS Cluster Detected. GPFS ' 'encryption-at-rest feature not enabled in cluster ' 'daemon level %(cur)s - must be at least at ' 'level %(min)s.'), {'cur': _gpfs_cluster_release_level, 'min': GPFS_ENC_MIN_RELEASE}) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self._check_gpfs_state() if self.configuration.gpfs_mount_point_base is None: msg = _('Option gpfs_mount_point_base is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (self.configuration.gpfs_images_share_mode and self.configuration.gpfs_images_share_mode not in ['copy_on_write', 'copy']): msg = _('Option gpfs_images_share_mode is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if(self.configuration.gpfs_images_share_mode and self.configuration.gpfs_images_dir is None): msg = _('Option gpfs_images_dir is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and not _same_filesystem(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' '%(vol)s and %(img)s belong to different file ' 'systems.') % {'vol': self.configuration.gpfs_mount_point_base, 'img': self.configuration.gpfs_images_dir}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and not self._is_same_fileset(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' '%(vol)s and %(img)s belong to different filesets.') % {'vol': self.configuration.gpfs_mount_point_base, 'img': self.configuration.gpfs_images_dir}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() if not _gpfs_cluster_release_level >= GPFS_CLONE_MIN_RELEASE: msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature ' 'not enabled in cluster daemon level %(cur)s - must ' 'be at least at level %(min)s.') % {'cur': _gpfs_cluster_release_level, 'min': GPFS_CLONE_MIN_RELEASE}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for directory in [self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir]: if directory is None: continue if not directory.startswith('/'): msg = (_('%s must be an absolute path.') % directory) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not os.path.isdir(directory): msg = (_('%s is not a directory.') % directory) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check if GPFS is mounted self._verify_gpfs_path_state(directory) filesystem, fslevel = \ self._get_gpfs_fs_release_level(directory) if not fslevel >= GPFS_CLONE_MIN_RELEASE: msg = (_('The GPFS filesystem %(fs)s is not at the required ' 'release level. Current level is %(cur)s, must be ' 'at least %(min)s.') % {'fs': filesystem, 'cur': fslevel, 'min': GPFS_CLONE_MIN_RELEASE}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _create_sparse_file(self, path, size): """Creates file with 0 disk usage.""" sizestr = _sizestr(size) self.gpfs_execute('truncate', '-s', sizestr, path) def _allocate_file_blocks(self, path, size): """Preallocate file blocks by writing zeros.""" block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) self.gpfs_execute('dd', 'if=/dev/zero', 'of=%s' % path, 'bs=%dM' % block_size_mb, 'count=%d' % block_count) def _gpfs_change_attributes(self, options, path): """Update GPFS attributes on the specified file.""" cmd = ['mmchattr'] cmd.extend(options) cmd.append(path) LOG.debug('Update volume attributes with mmchattr to %s.', options) self.gpfs_execute(*cmd) def _set_volume_attributes(self, volume, path, metadata): """Set various GPFS attributes for this volume.""" set_pool = False options = [] for item in metadata: if item == 'data_pool_name': options.extend(['-P', metadata[item]]) set_pool = True elif item == 'replicas': options.extend(['-r', metadata[item], '-m', metadata[item]]) elif item == 'dio': options.extend(['-D', metadata[item]]) elif item == 'write_affinity_depth': options.extend(['--write-affinity-depth', metadata[item]]) elif item == 'block_group_factor': options.extend(['--block-group-factor', metadata[item]]) elif item == 'write_affinity_failure_group': options.extend(['--write-affinity-failure-group', metadata[item]]) # metadata value has precedence over value set in volume type if self.configuration.gpfs_storage_pool and not set_pool: options.extend(['-P', self.configuration.gpfs_storage_pool]) if options: self._gpfs_change_attributes(options, path) fstype = None fslabel = None for item in metadata: if item == 'fstype': fstype = metadata[item] elif item == 'fslabel': fslabel = metadata[item] if fstype: self._mkfs(volume, fstype, fslabel) def _get_volume_metadata(self, volume): volume_metadata = {} if 'volume_metadata' in volume: for metadata in volume['volume_metadata']: volume_metadata[metadata['key']] = metadata['value'] return volume_metadata return volume['metadata'] if 'metadata' in volume else {} def create_volume(self, volume): """Creates a GPFS volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self._get_volume_path(volume) volume_size = volume['size'] # Create a sparse file first; allocate blocks later if requested self._create_sparse_file(volume_path, volume_size) self._set_rw_permission(volume_path) # Set the attributes prior to allocating any blocks so that # they are allocated according to the policy v_metadata = self._get_volume_metadata(volume) self._set_volume_attributes(volume, volume_path, v_metadata) if not self.configuration.gpfs_sparse_volumes: self._allocate_file_blocks(volume_path, volume_size) def _create_volume_from_snapshot(self, volume, snapshot): snapshot_path = self._get_snapshot_path(snapshot) # check if the snapshot lies in the same CG as the volume to be created # if yes, clone the volume from the snapshot, else perform full copy clone = False ctxt = context.get_admin_context() snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id']) if (volume['consistencygroup_id'] == snap_parent_vol['consistencygroup_id']): clone = True volume_path = self._get_volume_path(volume) if clone: self._create_gpfs_copy(src=snapshot_path, dest=volume_path) self._gpfs_redirect(volume_path) else: self._gpfs_full_copy(snapshot_path, volume_path) self._set_rw_permission(volume_path) v_metadata = self._get_volume_metadata(volume) self._set_volume_attributes(volume, volume_path, v_metadata) def create_volume_from_snapshot(self, volume, snapshot): """Creates a GPFS volume from a snapshot.""" self._create_volume_from_snapshot(volume, snapshot) virt_size = self._resize_volume_file(volume, volume['size']) return {'size': math.ceil(virt_size / units.Gi)} def _get_volume_path(self, volume): return self.local_path(volume) def _create_cloned_volume(self, volume, src_vref): src = self._get_volume_path(src_vref) dest = self._get_volume_path(volume) if (volume['consistencygroup_id'] == src_vref['consistencygroup_id']): self._create_gpfs_clone(src, dest) else: self._gpfs_full_copy(src, dest) self._set_rw_permission(dest) v_metadata = self._get_volume_metadata(volume) self._set_volume_attributes(volume, dest, v_metadata) def create_cloned_volume(self, volume, src_vref): """Create a GPFS volume from another volume.""" self._create_cloned_volume(volume, src_vref) virt_size = self._resize_volume_file(volume, volume['size']) return {'size': math.ceil(virt_size / units.Gi)} def _delete_gpfs_file(self, fchild, mount_point=None): """Delete a GPFS file and cleanup clone children.""" if mount_point is None: if not os.path.exists(fchild): return else: fchild_local_path = os.path.join(mount_point, os.path.basename(fchild)) if not os.path.exists(fchild_local_path): return (out, err) = self.gpfs_execute('mmclone', 'show', fchild) fparent = None delete_parent = False inode_regex = re.compile( r'.*\s+(?:yes|no)\s+\d+\s+(?P\d+)', re.M | re.S) match = inode_regex.match(out) if match: inode = match.group('inode') if mount_point is None: path = os.path.dirname(fchild) else: path = mount_point (out, err) = self._execute('find', path, '-maxdepth', '1', '-inum', inode, run_as_root=True) if out: fparent = out.split('\n', 1)[0] if mount_point is None: self._execute( 'rm', '-f', fchild, check_exit_code=False, run_as_root=True) else: self._execute( 'rm', '-f', fchild_local_path, check_exit_code=False, run_as_root=True) # There is no need to check for volume references on this snapshot # because 'rm -f' itself serves as a simple and implicit check. If the # parent is referenced by another volume, GPFS doesn't allow deleting # it. 'rm -f' silently fails and the subsequent check on the path # indicates whether there are any volumes derived from that snapshot. # If there are such volumes, we quit recursion and let the other # volumes delete the snapshot later. If there are no references, rm # would succeed and the snapshot is deleted. if mount_point is None: if not os.path.exists(fchild) and fparent: delete_parent = True else: if not os.path.exists(fchild_local_path) and fparent: delete_parent = True if delete_parent: fpbase = os.path.basename(fparent) if fpbase.endswith('.snap') or fpbase.endswith('.ts'): if mount_point is None: self._delete_gpfs_file(fparent) else: fparent_remote_path = os.path.join(os.path.dirname(fchild), fpbase) fparent_mount_path = os.path.dirname(fparent) self._delete_gpfs_file(fparent_remote_path, fparent_mount_path) def delete_volume(self, volume): """Deletes a logical volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self.local_path(volume) self._delete_gpfs_file(volume_path) def _gpfs_redirect(self, src): """Removes the copy_on_write dependency between src and parent. Remove the copy_on_write dependency between the src file and its immediate parent such that the length of dependency chain is reduced by 1. """ max_depth = self.configuration.gpfs_max_clone_depth if max_depth == 0: return False (out, err) = self.gpfs_execute('mmclone', 'show', src) depth_regex = re.compile(r'.*\s+no\s+(?P\d+)', re.M | re.S) match = depth_regex.match(out) if match: depth = int(match.group('depth')) if depth > max_depth: self.gpfs_execute('mmclone', 'redirect', src) return True return False def _create_gpfs_clone(self, src, dest): """Create a GPFS file clone parent for the specified file.""" snap = dest + ".snap" self._create_gpfs_snap(src, snap) self._create_gpfs_copy(snap, dest) if self._gpfs_redirect(src) and self._gpfs_redirect(dest): self._execute('rm', '-f', snap, run_as_root=True) def _create_gpfs_copy(self, src, dest): """Create a GPFS file clone copy for the specified file.""" self.gpfs_execute('mmclone', 'copy', src, dest) def _gpfs_full_copy(self, src, dest): """Create a full copy from src to dest.""" self.gpfs_execute('cp', src, dest, check_exit_code=True) def _create_gpfs_snap(self, src, dest=None): """Create a GPFS file clone snapshot for the specified file.""" if dest is None: self.gpfs_execute('mmclone', 'snap', src) else: self.gpfs_execute('mmclone', 'snap', src, dest) def _is_gpfs_parent_file(self, gpfs_file): """Return true if the specified file is a gpfs clone parent.""" out, err = self.gpfs_execute('mmclone', 'show', gpfs_file) ptoken = out.splitlines().pop().split()[0] return ptoken == 'yes' def create_snapshot(self, snapshot): """Creates a GPFS snapshot.""" snapshot_path = self._get_snapshot_path(snapshot) volume_path = os.path.join(os.path.dirname(snapshot_path), snapshot['volume_name']) self._create_gpfs_snap(src=volume_path, dest=snapshot_path) self._set_rw_permission(snapshot_path, modebits='640') self._gpfs_redirect(volume_path) def delete_snapshot(self, snapshot): """Deletes a GPFS snapshot.""" # Rename the deleted snapshot to indicate it no longer exists in # cinder db. Attempt to delete the snapshot. If the snapshot has # clone children, the delete will fail silently. When volumes that # are clone children are deleted in the future, the remaining ts # snapshots will also be deleted. snapshot_path = self._get_snapshot_path(snapshot) snapshot_ts_path = '%s.ts' % snapshot_path self.gpfs_execute('mv', snapshot_path, snapshot_ts_path) self.gpfs_execute('rm', '-f', snapshot_ts_path, check_exit_code=False) def _get_snapshot_path(self, snapshot): ctxt = context.get_admin_context() snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id']) snap_parent_vol_path = self.local_path(snap_parent_vol) snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), snapshot['name']) return snapshot_path def local_path(self, volume): """Return the local path for the specified volume.""" # Check if the volume is part of a consistency group and return # the local_path accordingly. if volume['consistencygroup_id'] is not None: cgname = "consisgroup-%s" % volume['consistencygroup_id'] volume_path = os.path.join( self.configuration.gpfs_mount_point_base, cgname, volume['name'] ) else: volume_path = os.path.join( self.configuration.gpfs_mount_point_base, volume['name'] ) return volume_path def _get_gpfs_encryption_status(self): """Determine if the backend is configured with key manager.""" try: (out, err) = self.gpfs_execute('mmlsfs', self._gpfs_device, '--encryption', '-Y') lines = out.splitlines() value_token = lines[0].split(':').index('data') encryption_status = lines[1].split(':')[value_token] return encryption_status except processutils.ProcessExecutionError as exc: LOG.error(_LE('Failed to issue mmlsfs command, error: %s.'), exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'gpfs', 'data': { 'name': volume['name'], 'device_path': self.local_path(volume), } } def terminate_connection(self, volume, connector, **kwargs): pass def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, or stats have never been updated, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") gpfs_base = self.configuration.gpfs_mount_point_base data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'GPFS' data["vendor_name"] = 'IBM' data["driver_version"] = self.VERSION data["storage_protocol"] = 'file' free, capacity = self._get_available_capacity(self.configuration. gpfs_mount_point_base) data['total_capacity_gb'] = math.ceil(capacity / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) data['reserved_percentage'] = 0 data['QoS_support'] = False data['storage_pool'] = self._storage_pool data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' % {'cluster_id': self._cluster_id, 'root_path': gpfs_base}) data['consistencygroup_support'] = 'True' if self._encryption_state.lower() == 'yes': data['gpfs_encryption_rest'] = 'True' self._stats = data def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume from the specified image.""" return self._clone_image(volume, image_location, image_meta['id']) def _is_cloneable(self, image_id): """Return true if the specified image can be cloned by GPFS.""" if not((self.configuration.gpfs_images_dir and self.configuration.gpfs_images_share_mode)): reason = 'glance repository not configured to use GPFS' return False, reason, None image_path = os.path.join(self.configuration.gpfs_images_dir, image_id) try: self._is_gpfs_path(image_path) except processutils.ProcessExecutionError: reason = 'image file not in GPFS' return False, reason, None return True, None, image_path def _clone_image(self, volume, image_location, image_id): """Attempt to create a volume by efficiently copying image to volume. If both source and target are backed by gpfs storage and the source image is in raw format move the image to create a volume using either gpfs clone operation or with a file copy. If the image format is not raw, convert it to raw at the volume path. """ # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) cloneable_image, reason, image_path = self._is_cloneable(image_id) if not cloneable_image: LOG.debug('Image %(img)s not cloneable: %(reas)s.', {'img': image_id, 'reas': reason}) return (None, False) vol_path = self.local_path(volume) data = image_utils.qemu_img_info(image_path) # if image format is already raw either clone it or # copy it depending on config file settings if data.file_format == 'raw': if (self.configuration.gpfs_images_share_mode == 'copy_on_write'): LOG.debug('Clone image to vol %s using mmclone.', volume['id']) # if the image is not already a GPFS snap file make it so if not self._is_gpfs_parent_file(image_path): self._create_gpfs_snap(image_path) self._create_gpfs_copy(image_path, vol_path) elif self.configuration.gpfs_images_share_mode == 'copy': LOG.debug('Clone image to vol %s using copyfile.', volume['id']) shutil.copyfile(image_path, vol_path) # if image is not raw convert it to raw into vol_path destination else: LOG.debug('Clone image to vol %s using qemu convert.', volume['id']) image_utils.convert_image(image_path, vol_path, 'raw') self._set_rw_permission(vol_path) self._resize_volume_file(volume, volume['size']) return {'provider_location': None}, True def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume. Note that cinder.volume.flows.create_volume will attempt to use clone_image to efficiently create volume from image when both source and target are backed by gpfs storage. If that is not the case, this function is invoked and uses fetch_to_raw to create the volume. """ # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.', volume['id']) image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size']) self._resize_volume_file(volume, volume['size']) def _resize_volume_file(self, volume, new_size): """Resize volume file to new size.""" vol_path = self.local_path(volume) try: image_utils.resize_image(vol_path, new_size, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.error(_LE("Failed to resize volume " "%(volume_id)s, error: %(error)s."), {'volume_id': volume['id'], 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) data = image_utils.qemu_img_info(vol_path) return data.virtual_size def extend_volume(self, volume, new_size): """Extend an existing volume.""" self._resize_volume_file(volume, new_size) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def _create_backup_source(self, volume, backup): src_path = self._get_volume_path(volume) dest_path = '%s_%s' % (src_path, backup['id']) self._create_gpfs_clone(src_path, dest_path) self._gpfs_redirect(src_path) return dest_path def _do_backup(self, backup_path, backup, backup_service): with utils.temporary_chown(backup_path): with open(backup_path) as backup_file: backup_service.backup(backup, backup_file) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) volume_path = self.local_path(volume) backup_path = '%s_%s' % (volume_path, backup['id']) # create a snapshot that will be used as the backup source self._create_backup_source(volume, backup) try: LOG.debug('Begin backup of volume %s.', volume['name']) self._do_backup(backup_path, backup, backup_service) finally: # clean up snapshot file. If it is a clone parent, delete # will fail silently, but be cleaned up when volume is # eventually removed. This ensures we do not accumulate # more than gpfs_max_clone_depth snap files. self._delete_gpfs_file(backup_path) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" LOG.debug('Begin restore of backup %s.', backup['id']) volume_path = self.local_path(volume) with utils.temporary_chown(volume_path): with open(volume_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) def _migrate_volume(self, volume, host): """Migrate vol if source and dest are managed by same GPFS cluster.""" LOG.debug('Migrate volume request %(vol)s to %(host)s.', {'vol': volume['name'], 'host': host['host']}) dest_path = self._can_migrate_locally(host) if dest_path is None: LOG.debug('Cannot migrate volume locally, use generic migration.') return (False, None) if dest_path == self.configuration.gpfs_mount_point_base: LOG.debug('Migration target is same cluster and path, ' 'no work needed.') return (True, None) LOG.debug('Migration target is same cluster but different path, ' 'move the volume file.') local_path = self._get_volume_path(volume) new_path = os.path.join(dest_path, volume['name']) try: self.gpfs_execute('mv', local_path, new_path) return (True, None) except processutils.ProcessExecutionError as exc: LOG.error(_LE('Driver-based migration of volume %(vol)s failed. ' 'Move from %(src)s to %(dst)s failed with error: ' '%(error)s.'), {'vol': volume['name'], 'src': local_path, 'dst': new_path, 'error': exc.stderr}) return (False, None) def migrate_volume(self, context, volume, host): """Attempt to migrate a volume to specified host.""" return self._migrate_volume(volume, host) def retype(self, context, volume, new_type, diff, host): """Modify volume to be of new type.""" LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) retyped = False migrated = False pools = diff['extra_specs'].get('capabilities:storage_pool') backends = diff['extra_specs'].get('volume_backend_name') hosts = (volume['host'], host['host']) # if different backends let migration create a new volume and copy # data because the volume is considered to be substantially different if _different(backends): backend1, backend2 = backends LOG.debug('Retype request is for different backends, ' 'use migration: %(backend1)s %(backend2)s.', {'backend1': backend1, 'backend2': backend1}) return False if _different(pools): old, new = pools LOG.debug('Retype pool attribute from %(old)s to %(new)s.', {'old': old, 'new': new}) retyped = self._update_volume_storage_pool(self.local_path(volume), new) if _different(hosts): source, destination = hosts LOG.debug('Retype hosts migrate from: %(source)s to ' '%(destination)s.', {'source': source, 'destination': destination}) migrated, mdl_update = self._migrate_volume(volume, host) if migrated: updates = {'host': host['host']} self.db.volume_update(context, volume['id'], updates) return retyped or migrated def _mkfs(self, volume, filesystem, label=None): """Initialize volume to be specified filesystem type.""" if filesystem == 'swap': cmd = ['mkswap'] else: cmd = ['mkfs', '-t', filesystem] if filesystem in ('ext3', 'ext4'): cmd.append('-F') if label: if filesystem in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' cmd.extend([label_opt, label]) path = self.local_path(volume) cmd.append(path) try: self._execute(*cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("mkfs failed on volume %(vol)s, " "error message was: %(err)s.") % {'vol': volume['name'], 'err': exc.stderr}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( data=exception_message) def _get_available_capacity(self, path): """Calculate available space on path.""" # Check if GPFS is mounted try: self._verify_gpfs_path_state(path) mounted = True except exception.VolumeBackendAPIException: mounted = False # If GPFS is not mounted, return zero capacity. So that the volume # request can be scheduled to another volume service. if not mounted: return 0, 0 out, err = self._execute('df', '-P', '-B', '1', path, run_as_root=True) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _verify_gpfs_path_state(self, path): """Examine if GPFS is active and file system is mounted or not.""" try: self._is_gpfs_path(path) except processutils.ProcessExecutionError: msg = (_('%s cannot be accessed. Verify that GPFS is active and ' 'file system is mounted.') % path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_consistencygroup(self, context, group): """Create consistency group of GPFS volumes.""" cgname = "consisgroup-%s" % group['id'] fsdev = self._gpfs_device cgpath = os.path.join(self.configuration.gpfs_mount_point_base, cgname) try: self.gpfs_execute('mmcrfileset', fsdev, cgname, '--inode-space', 'new') except processutils.ProcessExecutionError as e: msg = (_('Failed to create consistency group: %(cgid)s. ' 'Error: %(excmsg)s.') % {'cgid': group['id'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute('mmlinkfileset', fsdev, cgname, '-J', cgpath) except processutils.ProcessExecutionError as e: msg = (_('Failed to link fileset for the share %(cgname)s. ' 'Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute('chmod', '770', cgpath) except processutils.ProcessExecutionError as e: msg = (_('Failed to set permissions for the consistency group ' '%(cgname)s. ' 'Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def delete_consistencygroup(self, context, group, volumes): """Delete consistency group of GPFS volumes.""" cgname = "consisgroup-%s" % group['id'] fsdev = self._gpfs_device model_update = {} model_update['status'] = group['status'] volumes = self.db.volume_get_all_by_group(context, group['id']) # Unlink and delete the fileset associated with the consistency group. # All of the volumes and volume snapshot data will also be deleted. try: self.gpfs_execute('mmunlinkfileset', fsdev, cgname, '-f') except processutils.ProcessExecutionError as e: msg = (_('Failed to unlink fileset for consistency group ' '%(cgname)s. Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute('mmdelfileset', fsdev, cgname, '-f') except processutils.ProcessExecutionError as e: msg = (_('Failed to delete fileset for consistency group ' '%(cgname)s. Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for volume_ref in volumes: volume_ref['status'] = 'deleted' model_update = {'status': group['status']} return model_update, volumes def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Create snapshot of a consistency group of GPFS volumes.""" snapshots = self.db.snapshot_get_all_for_cgsnapshot( context, cgsnapshot['id']) for snapshot in snapshots: self.create_snapshot(snapshot) snapshot['status'] = 'available' model_update = {'status': 'available'} return model_update, snapshots def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete snapshot of a consistency group of GPFS volumes.""" snapshots = self.db.snapshot_get_all_for_cgsnapshot( context, cgsnapshot['id']) for snapshot in snapshots: self.delete_snapshot(snapshot) snapshot['status'] = 'deleted' model_update = {'status': cgsnapshot['status']} return model_update, snapshots class GPFSNFSDriver(GPFSDriver, nfs.NfsDriver, san.SanDriver): """GPFS cinder driver extension. This extends the capability of existing GPFS cinder driver to be able to create cinder volumes when cinder volume service is not running on GPFS node. """ def __init__(self, *args, **kwargs): self._context = None self._storage_pool = None self._cluster_id = None super(GPFSNFSDriver, self).__init__(*args, **kwargs) self.gpfs_execute = self._gpfs_remote_execute self.configuration.append_config_values(remotefs.nas_opts) self.configuration.san_ip = self.configuration.nas_ip self.configuration.san_login = self.configuration.nas_login self.configuration.san_password = self.configuration.nas_password self.configuration.san_private_key = ( self.configuration.nas_private_key) self.configuration.san_ssh_port = self.configuration.nas_ssh_port def _gpfs_remote_execute(self, *cmd, **kwargs): check_exit_code = kwargs.pop('check_exit_code', None) return self._run_ssh(cmd, check_exit_code) def do_setup(self, context): super(GPFSNFSDriver, self).do_setup(context) self._context = context def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, or stats have never been updated, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Enter _update_volume_stats.") gpfs_base = self.configuration.gpfs_mount_point_base data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'GPFSNFS' data['vendor_name'] = 'IBM' data['driver_version'] = self.get_version() data['storage_protocol'] = 'file' self._ensure_shares_mounted() global_capacity = 0 global_free = 0 for share in self._mounted_shares: capacity, free, _used = self._get_capacity_info(share) global_capacity += capacity global_free += free data['total_capacity_gb'] = global_capacity / float(units.Gi) data['free_capacity_gb'] = global_free / float(units.Gi) data['reserved_percentage'] = 0 data['QoS_support'] = False data['storage_pool'] = self._storage_pool data['location_info'] = ('GPFSNFSDriver:%(cluster_id)s:%(root_path)s' % {'cluster_id': self._cluster_id, 'root_path': gpfs_base}) data['consistencygroup_support'] = 'True' self._stats = data LOG.debug("Exit _update_volume_stats.") def _get_volume_path(self, volume): """Returns remote GPFS path for the given volume.""" export_path = self.configuration.gpfs_mount_point_base if volume['consistencygroup_id'] is not None: cgname = "consisgroup-%s" % volume['consistencygroup_id'] volume_path = os.path.join(export_path, cgname, volume['name']) else: volume_path = os.path.join(export_path, volume['name']) return volume_path def local_path(self, volume): """Returns the local path for the specified volume.""" remotefs_share = volume['provider_location'] base_local_path = self._get_mount_point_for_share(remotefs_share) # Check if the volume is part of a consistency group and return # the local_path accordingly. if volume['consistencygroup_id'] is not None: cgname = "consisgroup-%s" % volume['consistencygroup_id'] volume_path = os.path.join(base_local_path, cgname, volume['name']) else: volume_path = os.path.join(base_local_path, volume['name']) return volume_path def _get_snapshot_path(self, snapshot): """Returns remote GPFS path for the given snapshot.""" snap_parent_vol = self.db.volume_get(self._context, snapshot['volume_id']) snap_parent_vol_path = self._get_volume_path(snap_parent_vol) snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), snapshot['name']) return snapshot_path def create_volume(self, volume): """Creates a GPFS volume.""" super(GPFSNFSDriver, self).create_volume(volume) volume['provider_location'] = self._find_share(volume['size']) return {'provider_location': volume['provider_location']} def delete_volume(self, volume): """Deletes a logical volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self._get_volume_path(volume) mount_point = os.path.dirname(self.local_path(volume)) # Delete all dependent snapshots, the snapshot will get deleted # if the link count goes to zero, else rm will fail silently self._delete_gpfs_file(volume_path, mount_point) def create_volume_from_snapshot(self, volume, snapshot): """Creates a GPFS volume from a snapshot.""" self._create_volume_from_snapshot(volume, snapshot) volume['provider_location'] = self._find_share(volume['size']) self._resize_volume_file(volume, volume['size']) return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Create a GPFS volume from another volume.""" self._create_cloned_volume(volume, src_vref) volume['provider_location'] = self._find_share(volume['size']) self._resize_volume_file(volume, volume['size']) return {'provider_location': volume['provider_location']} def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) volume_path = self.local_path(volume) backup_path = '%s_%s' % (volume_path, backup['id']) # create a snapshot that will be used as the backup source backup_remote_path = self._create_backup_source(volume, backup) try: LOG.debug('Begin backup of volume %s.', volume['name']) self._do_backup(backup_path, backup, backup_service) finally: # clean up snapshot file. If it is a clone parent, delete # will fail silently, but be cleaned up when volume is # eventually removed. This ensures we do not accumulate # more than gpfs_max_clone_depth snap files. backup_mount_path = os.path.dirname(backup_path) self._delete_gpfs_file(backup_remote_path, backup_mount_path) cinder-8.0.0/cinder/volume/drivers/netapp/0000775000567000056710000000000012701406543021646 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/options.py0000664000567000056710000002470312701406250023714 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contains configuration options for NetApp drivers. Common place to hold configuration options for all NetApp drivers. Options need to be grouped into granular units to be able to be reused by different modules and classes. This does not restrict declaring options in individual modules. If options are not re usable then can be declared in individual modules. It is recommended to Keep options at a single place to ensure re usability and better management of configuration options. """ from oslo_config import cfg NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2 netapp_proxy_opts = [ cfg.StrOpt('netapp_storage_family', default='ontap_cluster', choices=['ontap_7mode', 'ontap_cluster', 'eseries'], help=('The storage family type used on the storage system; ' 'valid values are ontap_7mode for using Data ONTAP ' 'operating in 7-Mode, ontap_cluster for using ' 'clustered Data ONTAP, or eseries for using E-Series.')), cfg.StrOpt('netapp_storage_protocol', choices=['iscsi', 'fc', 'nfs'], help=('The storage protocol to be used on the data path with ' 'the storage system.')), ] netapp_connection_opts = [ cfg.StrOpt('netapp_server_hostname', help='The hostname (or IP address) for the storage system or ' 'proxy server.'), cfg.IntOpt('netapp_server_port', help=('The TCP port to use for communication with the storage ' 'system or proxy server. If not specified, Data ONTAP ' 'drivers will use 80 for HTTP and 443 for HTTPS; ' 'E-Series will use 8080 for HTTP and 8443 for HTTPS.')), ] netapp_transport_opts = [ cfg.StrOpt('netapp_transport_type', default='http', choices=['http', 'https'], help=('The transport protocol used when communicating with ' 'the storage system or proxy server.')), ] netapp_basicauth_opts = [ cfg.StrOpt('netapp_login', help=('Administrative user account name used to access the ' 'storage system or proxy server.')), cfg.StrOpt('netapp_password', help=('Password for the administrative user account ' 'specified in the netapp_login option.'), secret=True), ] netapp_provisioning_opts = [ cfg.FloatOpt('netapp_size_multiplier', default=NETAPP_SIZE_MULTIPLIER_DEFAULT, help=('The quantity to be multiplied by the requested ' 'volume size to ensure enough space is available on ' 'the virtual storage server (Vserver) to fulfill ' 'the volume creation request. Note: this option ' 'is deprecated and will be removed in favor of ' '"reserved_percentage" in the Mitaka release.')), cfg.StrOpt('netapp_lun_space_reservation', default='enabled', choices=['enabled', 'disabled'], help=('This option determines if storage space is reserved ' 'for LUN allocation. If enabled, LUNs are thick ' 'provisioned. If space reservation is disabled, ' 'storage space is allocated on demand.')), ] netapp_cluster_opts = [ cfg.StrOpt('netapp_vserver', help=('This option specifies the virtual storage server ' '(Vserver) name on the storage cluster on which ' 'provisioning of block storage volumes should occur.')), ] netapp_7mode_opts = [ cfg.StrOpt('netapp_vfiler', help=('The vFiler unit on which provisioning of block storage ' 'volumes will be done. This option is only used by the ' 'driver when connecting to an instance with a storage ' 'family of Data ONTAP operating in 7-Mode. Only use this ' 'option when utilizing the MultiStore feature on the ' 'NetApp storage system.')), cfg.StrOpt('netapp_partner_backend_name', help=('The name of the config.conf stanza for a Data ONTAP ' '(7-mode) HA partner. This option is only used by the ' 'driver when connecting to an instance with a storage ' 'family of Data ONTAP operating in 7-Mode, and it is ' 'required if the storage protocol selected is FC.')), ] netapp_img_cache_opts = [ cfg.IntOpt('thres_avl_size_perc_start', default=20, help=('If the percentage of available space for an NFS share ' 'has dropped below the value specified by this option, ' 'the NFS image cache will be cleaned.')), cfg.IntOpt('thres_avl_size_perc_stop', default=60, help=('When the percentage of available space on an NFS share ' 'has reached the percentage specified by this option, ' 'the driver will stop clearing files from the NFS image ' 'cache that have not been accessed in the last M ' 'minutes, where M is the value of the ' 'expiry_thres_minutes configuration option.')), cfg.IntOpt('expiry_thres_minutes', default=720, help=('This option specifies the threshold for last access ' 'time for images in the NFS image cache. When a cache ' 'cleaning cycle begins, images in the cache that have ' 'not been accessed in the last M minutes, where M is ' 'the value of this parameter, will be deleted from the ' 'cache to create free space on the NFS share.')), ] netapp_eseries_opts = [ cfg.StrOpt('netapp_webservice_path', default='/devmgr/v2', help=('This option is used to specify the path to the E-Series ' 'proxy application on a proxy server. The value is ' 'combined with the value of the netapp_transport_type, ' 'netapp_server_hostname, and netapp_server_port options ' 'to create the URL used by the driver to connect to the ' 'proxy application.')), cfg.StrOpt('netapp_controller_ips', help=('This option is only utilized when the storage family ' 'is configured to eseries. This option is used to ' 'restrict provisioning to the specified controllers. ' 'Specify the value of this option to be a comma ' 'separated list of controller hostnames or IP addresses ' 'to be used for provisioning.')), cfg.StrOpt('netapp_sa_password', help=('Password for the NetApp E-Series storage array.'), secret=True), cfg.BoolOpt('netapp_enable_multiattach', default=False, help='This option specifies whether the driver should allow ' 'operations that require multiple attachments to a ' 'volume. An example would be live migration of servers ' 'that have volumes attached. When enabled, this backend ' 'is limited to 256 total volumes in order to ' 'guarantee volumes can be accessed by more than one ' 'host.'), ] netapp_nfs_extra_opts = [ cfg.StrOpt('netapp_copyoffload_tool_path', help=('This option specifies the path of the NetApp copy ' 'offload tool binary. Ensure that the binary has execute ' 'permissions set which allow the effective user of the ' 'cinder-volume process to execute the file.')), ] netapp_san_opts = [ cfg.StrOpt('netapp_lun_ostype', help=('This option defines the type of operating system that' ' will access a LUN exported from Data ONTAP; it is' ' assigned to the LUN at the time it is created.')), cfg.StrOpt('netapp_host_type', deprecated_name='netapp_eseries_host_type', help=('This option defines the type of operating system for' ' all initiators that can access a LUN. This information' ' is used when mapping LUNs to individual hosts or' ' groups of hosts.')), cfg.StrOpt('netapp_pool_name_search_pattern', deprecated_opts=[cfg.DeprecatedOpt(name='netapp_volume_list'), cfg.DeprecatedOpt(name='netapp_storage_pools') ], default="(.+)", help=('This option is used to restrict provisioning to the ' 'specified pools. Specify the value of ' 'this option to be a regular expression which will be ' 'applied to the names of objects from the storage ' 'backend which represent pools in Cinder. This option ' 'is only utilized when the storage protocol is ' 'configured to use iSCSI or FC.')), ] CONF = cfg.CONF CONF.register_opts(netapp_proxy_opts) CONF.register_opts(netapp_connection_opts) CONF.register_opts(netapp_transport_opts) CONF.register_opts(netapp_basicauth_opts) CONF.register_opts(netapp_cluster_opts) CONF.register_opts(netapp_7mode_opts) CONF.register_opts(netapp_provisioning_opts) CONF.register_opts(netapp_img_cache_opts) CONF.register_opts(netapp_eseries_opts) CONF.register_opts(netapp_nfs_extra_opts) CONF.register_opts(netapp_san_opts) cinder-8.0.0/cinder/volume/drivers/netapp/utils.py0000664000567000056710000004547712701406250023374 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Michael Price. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for NetApp drivers. This module contains common utilities to be used by one or more NetApp drivers to achieve the desired functionality. """ import decimal import platform import re import socket from oslo_concurrency import processutils as putils from oslo_log import log as logging import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LW, _LI from cinder import utils from cinder import version from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) OPENSTACK_PREFIX = 'openstack-' OBSOLETE_SSC_SPECS = {'netapp:raid_type': 'netapp_raid_type', 'netapp:disk_type': 'netapp_disk_type'} DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored', 'netapp_nodedup': 'netapp_dedup', 'netapp_nocompression': 'netapp_compression', 'netapp_thick_provisioned': 'netapp_thin_provisioned'} QOS_KEYS = frozenset(['maxIOPS', 'maxIOPSperGiB', 'maxBPS', 'maxBPSperGiB']) BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) # Secret length cannot be less than 96 bits. http://tools.ietf.org/html/rfc3723 CHAP_SECRET_LENGTH = 16 DEFAULT_CHAP_USER_NAME = 'NetApp_iSCSI_CHAP_Username' def validate_instantiation(**kwargs): """Checks if a driver is instantiated other than by the unified driver. Helps check direct instantiation of netapp drivers. Call this function in every netapp block driver constructor. """ if kwargs and kwargs.get('netapp_mode') == 'proxy': return LOG.warning(_LW("It is not the recommended way to use drivers by NetApp. " "Please use NetAppDriver to achieve the functionality.")) def check_flags(required_flags, configuration): """Ensure that the flags we care about are set.""" for flag in required_flags: if not getattr(configuration, flag, None): msg = _('Configuration value %s is not set.') % flag raise exception.InvalidInput(reason=msg) def to_bool(val): """Converts true, yes, y, 1 to True, False otherwise.""" if val: strg = six.text_type(val).lower() if (strg == 'true' or strg == 'y' or strg == 'yes' or strg == 'enabled' or strg == '1'): return True else: return False else: return False @utils.synchronized("safe_set_attr") def set_safe_attr(instance, attr, val): """Sets the attribute in a thread safe manner. Returns if new val was set on attribute. If attr already had the value then False. """ if not instance or not attr: return False old_val = getattr(instance, attr, None) if val is None and old_val is None: return False elif val == old_val: return False else: setattr(instance, attr, val) return True def get_volume_extra_specs(volume): """Provides extra specs associated with volume.""" ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') if type_id is None: return {} volume_type = volume_types.get_volume_type(ctxt, type_id) if volume_type is None: return {} extra_specs = volume_type.get('extra_specs', {}) log_extra_spec_warnings(extra_specs) return extra_specs def resolve_hostname(hostname): """Resolves host name to IP address.""" res = socket.getaddrinfo(hostname, None)[0] family, socktype, proto, canonname, sockaddr = res return sockaddr[0] def round_down(value, precision): return float(decimal.Decimal(six.text_type(value)).quantize( decimal.Decimal(precision), rounding=decimal.ROUND_DOWN)) def log_extra_spec_warnings(extra_specs): for spec in (set(extra_specs.keys() if extra_specs else []) & set(OBSOLETE_SSC_SPECS.keys())): LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s ' 'instead.'), {'old': spec, 'new': OBSOLETE_SSC_SPECS[spec]}) for spec in (set(extra_specs.keys() if extra_specs else []) & set(DEPRECATED_SSC_SPECS.keys())): LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s ' 'instead.'), {'old': spec, 'new': DEPRECATED_SSC_SPECS[spec]}) def get_iscsi_connection_properties(lun_id, volume, iqn, address, port): properties = {} properties['target_discovered'] = False properties['target_portal'] = '%s:%s' % (address, port) properties['target_iqn'] = iqn properties['target_lun'] = int(lun_id) properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } def validate_qos_spec(qos_spec): """Check validity of Cinder qos spec for our backend.""" if qos_spec is None: return normalized_qos_keys = [key.lower() for key in QOS_KEYS] keylist = [] for key, value in qos_spec.items(): lower_case_key = key.lower() if lower_case_key not in normalized_qos_keys: msg = _('Unrecognized QOS keyword: "%s"') % key raise exception.Invalid(msg) keylist.append(lower_case_key) # Modify the following check when we allow multiple settings in one spec. if len(keylist) > 1: msg = _('Only one limit can be set in a QoS spec.') raise exception.Invalid(msg) def get_volume_type_from_volume(volume): """Provides volume type associated with volume.""" type_id = volume.get('volume_type_id') if type_id is None: return {} ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id) def map_qos_spec(qos_spec, volume): """Map Cinder QOS spec to limit/throughput-value as used in client API.""" if qos_spec is None: return None qos_spec = map_dict_to_lower(qos_spec) spec = dict(policy_name=get_qos_policy_group_name(volume), max_throughput=None) # QoS specs are exclusive of one another. if 'maxiops' in qos_spec: spec['max_throughput'] = '%siops' % qos_spec['maxiops'] elif 'maxiopspergib' in qos_spec: spec['max_throughput'] = '%siops' % six.text_type( int(qos_spec['maxiopspergib']) * int(volume['size'])) elif 'maxbps' in qos_spec: spec['max_throughput'] = '%sB/s' % qos_spec['maxbps'] elif 'maxbpspergib' in qos_spec: spec['max_throughput'] = '%sB/s' % six.text_type( int(qos_spec['maxbpspergib']) * int(volume['size'])) return spec def map_dict_to_lower(input_dict): """Return an equivalent to the input dictionary with lower-case keys.""" lower_case_dict = {} for key in input_dict: lower_case_dict[key.lower()] = input_dict[key] return lower_case_dict def get_qos_policy_group_name(volume): """Return the name of backend QOS policy group based on its volume id.""" if 'id' in volume: return OPENSTACK_PREFIX + volume['id'] return None def get_qos_policy_group_name_from_info(qos_policy_group_info): """Return the name of a QOS policy group given qos policy group info.""" if qos_policy_group_info is None: return None legacy = qos_policy_group_info.get('legacy') if legacy is not None: return legacy['policy_name'] spec = qos_policy_group_info.get('spec') if spec is not None: return spec['policy_name'] return None def get_pool_name_filter_regex(configuration): """Build the regex for filtering pools by name :param configuration: The volume driver configuration :raise InvalidConfigurationValue: if configured regex pattern is invalid :returns: A compiled regex for filtering pool names """ # If the configuration parameter is specified as an empty string # (interpreted as matching all pools), we replace it here with # (.+) to be explicit with CSV compatibility support implemented below. pool_patterns = configuration.netapp_pool_name_search_pattern or '(.+)' # Strip whitespace from start/end and then 'or' all regex patterns pool_patterns = '|'.join(['^' + pool_pattern.strip('^$ \t') + '$' for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue( option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern) def get_valid_qos_policy_group_info(volume, extra_specs=None): """Given a volume, return information for QOS provisioning.""" info = dict(legacy=None, spec=None) try: volume_type = get_volume_type_from_volume(volume) except KeyError: LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id']) return info if volume_type is None: return info if extra_specs is None: extra_specs = volume_type.get('extra_specs', {}) info['legacy'] = get_legacy_qos_policy(extra_specs) info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, volume_type) msg = 'QoS policy group info for volume %(vol)s: %(info)s' LOG.debug(msg, {'vol': volume['name'], 'info': info}) check_for_invalid_qos_spec_combination(info, volume_type) return info def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): """Given a volume type, return the associated Cinder QoS spec.""" spec_key_values = get_backend_qos_spec_from_volume_type(volume_type) if spec_key_values is None: return None validate_qos_spec(spec_key_values) return map_qos_spec(spec_key_values, volume) def get_backend_qos_spec_from_volume_type(volume_type): qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is None: return None ctxt = context.get_admin_context() qos_spec = qos_specs.get_qos_specs(ctxt, qos_specs_id) if qos_spec is None: return None consumer = qos_spec['consumer'] # Front end QoS specs are handled by libvirt and we ignore them here. if consumer not in BACKEND_QOS_CONSUMERS: return None spec_key_values = qos_spec['specs'] return spec_key_values def check_for_invalid_qos_spec_combination(info, volume_type): """Invalidate QOS spec if both legacy and non-legacy info is present.""" if info['legacy'] and info['spec']: msg = _('Conflicting QoS specifications in volume type ' '%s: when QoS spec is associated to volume ' 'type, legacy "netapp:qos_policy_group" is not allowed in ' 'the volume type extra specs.') % volume_type['id'] raise exception.Invalid(msg) def get_legacy_qos_policy(extra_specs): """Return legacy qos policy information if present in extra specs.""" external_policy_name = extra_specs.get('netapp:qos_policy_group') if external_policy_name is None: return None return dict(policy_name=external_policy_name) class hashabledict(dict): """A hashable dictionary that is comparable (i.e. in unit tests, etc.)""" def __hash__(self): return hash(tuple(sorted(self.items()))) class OpenStackInfo(object): """OS/distribution, release, and version. NetApp uses these fields as content for EMS log entry. """ PACKAGE_NAME = 'python-cinder' def __init__(self): self._version = 'unknown version' self._release = 'unknown release' self._vendor = 'unknown vendor' self._platform = 'unknown platform' def _update_version_from_version_string(self): try: self._version = version.version_info.version_string() except Exception: pass def _update_release_from_release_string(self): try: self._release = version.version_info.release_string() except Exception: pass def _update_platform(self): try: self._platform = platform.platform() except Exception: pass @staticmethod def _get_version_info_version(): return version.version_info.version @staticmethod def _get_version_info_release(): return version.version_info.release def _update_info_from_version_info(self): try: ver = self._get_version_info_version() if ver: self._version = ver except Exception: pass try: rel = self._get_version_info_release() if rel: self._release = rel except Exception: pass # RDO, RHEL-OSP, Mirantis on Redhat, SUSE def _update_info_from_rpm(self): LOG.debug('Trying rpm command.') try: out, err = putils.execute("rpm", "-q", "--queryformat", "'%{version}\t%{release}\t%{vendor}'", self.PACKAGE_NAME) if not out: LOG.info(_LI('No rpm info found for %(pkg)s package.'), { 'pkg': self.PACKAGE_NAME}) return False parts = out.split() self._version = parts[0] self._release = parts[1] self._vendor = ' '.join(parts[2::]) return True except Exception as e: LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e}) return False # ubuntu, mirantis on ubuntu def _update_info_from_dpkg(self): LOG.debug('Trying dpkg-query command.') try: _vendor = None out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'), {'pkg': self.PACKAGE_NAME}) return False # debian format: [epoch:]upstream_version[-debian_revision] deb_version = out # in case epoch or revision is missing, copy entire string _release = deb_version if ':' in deb_version: deb_epoch, upstream_version = deb_version.split(':') _release = upstream_version if '-' in deb_version: deb_revision = deb_version.split('-')[1] _vendor = deb_revision self._release = _release if _vendor: self._vendor = _vendor return True except Exception as e: LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), { 'msg': e}) return False def _update_openstack_info(self): self._update_version_from_version_string() self._update_release_from_release_string() self._update_platform() # some distributions override with more meaningful information self._update_info_from_version_info() # see if we have still more targeted info from rpm or apt found_package = self._update_info_from_rpm() if not found_package: self._update_info_from_dpkg() def info(self): self._update_openstack_info() return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % { 'version': self._version, 'release': self._release, 'vendor': self._vendor, 'platform': self._platform} class Features(object): def __init__(self): self.defined_features = set() def add_feature(self, name, supported=True, min_version=None): if not isinstance(supported, bool): raise TypeError("Feature value must be a bool type.") self.defined_features.add(name) setattr(self, name, FeatureState(supported, min_version)) def __getattr__(self, name): # NOTE(cknight): Needed to keep pylint happy. raise AttributeError class FeatureState(object): def __init__(self, supported=True, minimum_version=None): """Represents the current state of enablement for a Feature :param supported: True if supported, false otherwise :param minimum_version: The minimum version that this feature is supported at """ self.supported = supported self.minimum_version = minimum_version def __nonzero__(self): """Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False """ return self.supported def __bool__(self): """py3 Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False """ return self.supported class BitSet(object): def __init__(self, value=0): self._value = value def set(self, bit): self._value |= 1 << bit return self def unset(self, bit): self._value &= ~(1 << bit) return self def is_set(self, bit): return self._value & 1 << bit def __and__(self, other): self._value &= other return self def __or__(self, other): self._value |= other return self def __invert__(self): self._value = ~self._value return self def __xor__(self, other): self._value ^= other return self def __lshift__(self, other): self._value <<= other return self def __rshift__(self, other): self._value >>= other return self def __int__(self): return self._value def __str__(self): return bin(self._value) def __repr__(self): return str(self._value) def __eq__(self, other): return (isinstance(other, self.__class__) and self._value == other._value) or self._value == int(other) def __ne__(self, other): return not self.__eq__(other) cinder-8.0.0/cinder/volume/drivers/netapp/__init__.py0000664000567000056710000000000012701406250023740 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/common.py0000664000567000056710000001047712701406250023514 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Alex Meade. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unified driver for NetApp storage systems. Supports multiple storage systems of different families and protocols. """ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _, _LI from cinder.volume import driver from cinder.volume.drivers.netapp import options from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap' ESERIES_PATH = 'cinder.volume.drivers.netapp.eseries' # Add new drivers here, no other code changes required. NETAPP_UNIFIED_DRIVER_REGISTRY = { 'ontap_cluster': { 'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver', 'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver', 'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver' }, 'ontap_7mode': { 'iscsi': DATAONTAP_PATH + '.iscsi_7mode.NetApp7modeISCSIDriver', 'nfs': DATAONTAP_PATH + '.nfs_7mode.NetApp7modeNfsDriver', 'fc': DATAONTAP_PATH + '.fc_7mode.NetApp7modeFibreChannelDriver' }, 'eseries': { 'iscsi': ESERIES_PATH + '.iscsi_driver.NetAppEseriesISCSIDriver', 'fc': ESERIES_PATH + '.fc_driver.NetAppEseriesFibreChannelDriver' }} class NetAppDriver(driver.ProxyVD): """NetApp unified block storage driver. Acts as a factory to create NetApp storage drivers based on the storage family and protocol configured. """ REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol'] def __new__(cls, *args, **kwargs): config = kwargs.get('configuration', None) if not config: raise exception.InvalidInput( reason=_('Required configuration not found')) config.append_config_values(options.netapp_proxy_opts) na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) app_version = na_utils.OpenStackInfo().info() LOG.info(_LI('OpenStack OS Version Info: %(info)s'), {'info': app_version}) kwargs['app_version'] = app_version return NetAppDriver.create_driver(config.netapp_storage_family, config.netapp_storage_protocol, *args, **kwargs) @staticmethod def create_driver(storage_family, storage_protocol, *args, **kwargs): """Creates an appropriate driver based on family and protocol.""" storage_family = storage_family.lower() storage_protocol = storage_protocol.lower() fmt = {'storage_family': storage_family, 'storage_protocol': storage_protocol} LOG.info(_LI('Requested unified config: %(storage_family)s and ' '%(storage_protocol)s.'), fmt) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) if family_meta is None: raise exception.InvalidInput( reason=_('Storage family %s is not supported.') % storage_family) driver_loc = family_meta.get(storage_protocol) if driver_loc is None: raise exception.InvalidInput( reason=_('Protocol %(storage_protocol)s is not supported ' 'for storage family %(storage_family)s.') % fmt) kwargs = kwargs or {} kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol ' '%(storage_protocol)s loaded.'), fmt) return driver cinder-8.0.0/cinder/volume/drivers/netapp/eseries/0000775000567000056710000000000012701406543023305 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/eseries/utils.py0000664000567000056710000000410112701406250025006 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for NetApp E-series drivers. """ import base64 import binascii import uuid from oslo_log import log as logging import six LOG = logging.getLogger(__name__) MULTI_ATTACH_HOST_GROUP_NAME = 'cinder-multi-attach' NULL_REF = '0000000000000000000000000000000000000000' MAX_LUNS_PER_HOST = 256 MAX_LUNS_PER_HOST_GROUP = 256 def encode_hex_to_base32(hex_string): """Encodes hex to base32 bit as per RFC4648.""" bin_form = binascii.unhexlify(hex_string) return base64.b32encode(bin_form) def decode_base32_to_hex(base32_string): """Decodes base32 string to hex string.""" bin_form = base64.b32decode(base32_string) return binascii.hexlify(bin_form) def convert_uuid_to_es_fmt(uuid_str): """Converts uuid to e-series compatible name format.""" uuid_base32 = encode_hex_to_base32(uuid.UUID(six.text_type(uuid_str)).hex) es_label = uuid_base32.strip(b'=') if six.PY3: es_label = es_label.decode('ascii') return es_label def convert_es_fmt_to_uuid(es_label): """Converts e-series name format to uuid.""" if isinstance(es_label, six.text_type): es_label = es_label.encode('utf-8') if es_label.startswith(b'tmp-'): es_label = es_label[4:] es_label = es_label.ljust(32, b'=') es_label = binascii.hexlify(base64.b32decode(es_label)) if six.PY3: es_label = es_label.decode('ascii') return uuid.UUID(es_label) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/__init__.py0000664000567000056710000000000012701406250025377 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/eseries/exception.py0000664000567000056710000000242712701406250025655 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All Rights Reserved. # Copyright (c) 2015 Michael Price. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.i18n import _ class VolumeNotMapped(exception.NetAppDriverException): message = _("Volume %(volume_id)s is not currently mapped to host " "%(host)s") class UnsupportedHostGroup(exception.NetAppDriverException): message = _("Volume %(volume_id)s is currently mapped to unsupported " "host group %(group)s") class WebServiceException(exception.NetAppDriverException): def __init__(self, message=None, status_code=None): self.status_code = status_code super(WebServiceException, self).__init__(message=message) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/host_mapper.py0000664000567000056710000002514412701406250026201 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All Rights Reserved. # Copyright (c) 2015 Yogesh Kshirsagar. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This module handles mapping E-Series volumes to E-Series Hosts and Host Groups. """ import collections import random from oslo_log import log as logging from six.moves import range from cinder import exception from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.netapp.eseries import exception as eseries_exc from cinder.volume.drivers.netapp.eseries import utils LOG = logging.getLogger(__name__) @cinder_utils.trace_method @cinder_utils.synchronized('map_es_volume') def map_volume_to_single_host(client, volume, eseries_vol, host, vol_map, multiattach_enabled): """Maps the e-series volume to host with initiator.""" LOG.debug("Attempting to map volume %s to single host.", volume['id']) # If volume is not mapped on the backend, map directly to host if not vol_map: mappings = client.get_volume_mappings_for_host(host['hostRef']) lun = _get_free_lun(client, host, multiattach_enabled, mappings) return client.create_volume_mapping(eseries_vol['volumeRef'], host['hostRef'], lun) # If volume is already mapped to desired host if vol_map.get('mapRef') == host['hostRef']: return vol_map multiattach_cluster_ref = None try: host_group = client.get_host_group_by_name( utils.MULTI_ATTACH_HOST_GROUP_NAME) multiattach_cluster_ref = host_group['clusterRef'] except exception.NotFound: pass # Volume is mapped to the multiattach host group if vol_map.get('mapRef') == multiattach_cluster_ref: LOG.debug("Volume %s is mapped to multiattach host group.", volume['id']) # If volume is not currently attached according to Cinder, it is # safe to delete the mapping if not (volume['attach_status'] == 'attached'): LOG.debug("Volume %(vol)s is not currently attached, moving " "existing mapping to host %(host)s.", {'vol': volume['id'], 'host': host['label']}) mappings = client.get_volume_mappings_for_host( host['hostRef']) lun = _get_free_lun(client, host, multiattach_enabled, mappings) return client.move_volume_mapping_via_symbol( vol_map.get('mapRef'), host['hostRef'], lun ) # If we got this far, volume must be mapped to something else msg = _("Cannot attach already attached volume %s; " "multiattach is disabled via the " "'netapp_enable_multiattach' configuration option.") raise exception.NetAppDriverException(msg % volume['id']) @cinder_utils.trace_method @cinder_utils.synchronized('map_es_volume') def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host, mapping): """Maps the e-series volume to multiattach host group.""" LOG.debug("Attempting to map volume %s to multiple hosts.", volume['id']) # If volume is already mapped to desired host, return the mapping if mapping['mapRef'] == target_host['hostRef']: LOG.debug("Volume %(vol)s already mapped to host %(host)s", {'vol': volume['id'], 'host': target_host['label']}) return mapping # If target host in a host group, ensure it is the multiattach host group if target_host['clusterRef'] != utils.NULL_REF: host_group = client.get_host_group(target_host[ 'clusterRef']) if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: msg = _("Specified host to map to volume %(vol)s is in " "unsupported host group with %(group)s.") params = {'vol': volume['id'], 'group': host_group['label']} raise eseries_exc.UnsupportedHostGroup(msg % params) mapped_host_group = None multiattach_host_group = None try: mapped_host_group = client.get_host_group(mapping['mapRef']) # If volume is mapped to a foreign host group raise an error if mapped_host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: raise eseries_exc.UnsupportedHostGroup( volume_id=volume['id'], group=mapped_host_group['label']) multiattach_host_group = mapped_host_group except exception.NotFound: pass if not multiattach_host_group: multiattach_host_group = client.get_host_group_by_name( utils.MULTI_ATTACH_HOST_GROUP_NAME) # If volume is mapped directly to a host, move the host into the # multiattach host group. Error if the host is in a foreign host group if not mapped_host_group: current_host = client.get_host(mapping['mapRef']) if current_host['clusterRef'] != utils.NULL_REF: host_group = client.get_host_group(current_host[ 'clusterRef']) if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: msg = _("Currently mapped host for volume %(vol)s is in " "unsupported host group with %(group)s.") params = {'vol': volume['id'], 'group': host_group['label']} raise eseries_exc.UnsupportedHostGroup(msg % params) client.set_host_group_for_host(current_host['hostRef'], multiattach_host_group['clusterRef']) # Move destination host into multiattach host group client.set_host_group_for_host(target_host[ 'hostRef'], multiattach_host_group['clusterRef']) # Once both existing and target hosts are in the multiattach host group, # move the volume mapping to said group. if not mapped_host_group: LOG.debug("Moving mapping for volume %s to multiattach host group.", volume['id']) return client.move_volume_mapping_via_symbol( mapping.get('lunMappingRef'), multiattach_host_group['clusterRef'], mapping['lun'] ) return mapping def _get_free_lun(client, host, multiattach_enabled, mappings): """Returns least used LUN ID available on the given host.""" if not _is_host_full(client, host): unused_luns = _get_unused_lun_ids(mappings) if unused_luns: chosen_lun = random.sample(unused_luns, 1) return chosen_lun[0] elif multiattach_enabled: msg = _("No unused LUN IDs are available on the host; " "multiattach is enabled which requires that all LUN IDs " "to be unique across the entire host group.") raise exception.NetAppDriverException(msg) used_lun_counts = _get_used_lun_id_counter(mappings) # most_common returns an arbitrary tuple of members with same frequency for lun_id, __ in reversed(used_lun_counts.most_common()): if _is_lun_id_available_on_host(client, host, lun_id): return lun_id msg = _("No free LUN IDs left. Maximum number of volumes that can be " "attached to host (%s) has been exceeded.") raise exception.NetAppDriverException(msg % utils.MAX_LUNS_PER_HOST) def _get_unused_lun_ids(mappings): """Returns unused LUN IDs given mappings.""" used_luns = _get_used_lun_ids_for_mappings(mappings) unused_luns = (set(range(utils.MAX_LUNS_PER_HOST)) - set(used_luns)) return unused_luns def _get_used_lun_id_counter(mapping): """Returns used LUN IDs with count as a dictionary.""" used_luns = _get_used_lun_ids_for_mappings(mapping) used_lun_id_counter = collections.Counter(used_luns) return used_lun_id_counter def _is_host_full(client, host): """Checks whether maximum volumes attached to a host have been reached.""" luns = client.get_volume_mappings_for_host(host['hostRef']) return len(luns) >= utils.MAX_LUNS_PER_HOST def _is_lun_id_available_on_host(client, host, lun_id): """Returns a boolean value depending on whether a LUN ID is available.""" mapping = client.get_volume_mappings_for_host(host['hostRef']) used_lun_ids = _get_used_lun_ids_for_mappings(mapping) return lun_id not in used_lun_ids def _get_used_lun_ids_for_mappings(mappings): """Returns used LUNs when provided with mappings.""" used_luns = set(map(lambda lun: int(lun['lun']), mappings)) # E-Series uses LUN ID 0 for special purposes and should not be # assigned for general use used_luns.add(0) return used_luns def unmap_volume_from_host(client, volume, host, mapping): # Volume is mapped directly to host, so delete the mapping if mapping.get('mapRef') == host['hostRef']: LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; " "removing mapping.", {'vol': volume['id'], 'host': host['label']}) client.delete_volume_mapping(mapping['lunMappingRef']) return try: host_group = client.get_host_group(mapping['mapRef']) except exception.NotFound: # Volumes is mapped but to a different initiator raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], host=host['label']) # If volume is mapped to a foreign host group raise error if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: raise eseries_exc.UnsupportedHostGroup(volume_id=volume['id'], group=host_group['label']) # If target host is not in the multiattach host group if host['clusterRef'] != host_group['clusterRef']: raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], host=host['label']) # Volume is mapped to multiattach host group # Remove mapping if volume should no longer be attached after this # operation. if volume['status'] == 'detaching': LOG.debug("Volume %s is mapped directly to multiattach host group but " "is not currently attached; removing mapping.", volume['id']) client.delete_volume_mapping(mapping['lunMappingRef']) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/fc_driver.py0000664000567000056710000001157412701406250025625 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2014, Alex Meade. All rights reserved. # Copyright (c) - 2015, Yogesh Kshirsagar. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp E-Series FibreChannel storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.eseries import library from cinder.volume.drivers.netapp import utils as na_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetAppEseriesFibreChannelDriver(driver.BaseVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD, driver.ConsistencyGroupVD): """NetApp E-Series FibreChannel volume driver.""" DRIVER_NAME = 'NetApp_FibreChannel_ESeries' def __init__(self, *args, **kwargs): super(NetAppEseriesFibreChannelDriver, self).__init__(*args, **kwargs) na_utils.validate_instantiation(**kwargs) self.library = library.NetAppESeriesLibrary(self.DRIVER_NAME, 'FC', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): return self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh) def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector, **kwargs): return self.library.initialize_connection_fc(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_fc(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup( group, add_volumes, remove_volumes) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/library.py0000664000567000056710000026105312701406250025325 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Navneet Singh # Copyright (c) 2015 Yogesh Kshirsagar # Copyright (c) 2015 Tom Barron # Copyright (c) 2015 Michael Price # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import math import socket import time import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils as cinder_utils from cinder.volume.drivers.netapp.eseries import client from cinder.volume.drivers.netapp.eseries import exception as eseries_exc from cinder.volume.drivers.netapp.eseries import host_mapper from cinder.volume.drivers.netapp.eseries import utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF @six.add_metaclass(cinder_utils.TraceWrapperMetaclass) class NetAppESeriesLibrary(object): """Executes commands relating to Volumes.""" DRIVER_NAME = 'NetApp_iSCSI_ESeries' AUTOSUPPORT_INTERVAL_SECONDS = 3600 # hourly VERSION = "1.0.0" REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips', 'netapp_login', 'netapp_password'] SLEEP_SECS = 5 HOST_TYPES = {'aix': 'AIX MPIO', 'avt': 'AVT_4M', 'factoryDefault': 'FactoryDefault', 'hpux': 'HP-UX TPGS', 'linux_atto': 'LnxTPGSALUA', 'linux_dm_mp': 'LnxALUA', 'linux_mpp_rdac': 'Linux', 'linux_pathmanager': 'LnxTPGSALUA_PM', 'macos': 'MacTPGSALUA', 'ontap': 'ONTAP', 'svc': 'SVC', 'solaris_v11': 'SolTPGSALUA', 'solaris_v10': 'Solaris', 'vmware': 'VmwTPGSALUA', 'windows': 'Windows 2000/Server 2003/Server 2008 Non-Clustered', 'windows_atto': 'WinTPGSALUA', 'windows_clustered': 'Windows 2000/Server 2003/Server 2008 Clustered' } # NOTE(ameade): This maps what is reported by the e-series api to a # consistent set of values that are reported by all NetApp drivers # to the cinder scheduler. SSC_DISK_TYPE_MAPPING = { 'scsi': 'SCSI', 'fibre': 'FCAL', 'sas': 'SAS', 'sata': 'SATA', 'ssd': 'SSD', } SSC_RAID_TYPE_MAPPING = { 'raidDiskPool': 'DDP', 'raid0': 'raid0', 'raid1': 'raid1', # RAID3 is being deprecated and is actually implemented as RAID5 'raid3': 'raid5', 'raid5': 'raid5', 'raid6': 'raid6', } READ_CACHE_Q_SPEC = 'netapp:read_cache' WRITE_CACHE_Q_SPEC = 'netapp:write_cache' DA_UQ_SPEC = 'netapp_eseries_data_assurance' FLASH_CACHE_UQ_SPEC = 'netapp_eseries_flash_read_cache' DISK_TYPE_UQ_SPEC = 'netapp_disk_type' ENCRYPTION_UQ_SPEC = 'netapp_disk_encryption' SPINDLE_SPD_UQ_SPEC = 'netapp_eseries_disk_spindle_speed' RAID_UQ_SPEC = 'netapp_raid_type' THIN_UQ_SPEC = 'netapp_thin_provisioned' SSC_UPDATE_INTERVAL = 60 # seconds SA_COMM_TIMEOUT = 30 WORLDWIDENAME = 'worldWideName' DEFAULT_HOST_TYPE = 'linux_dm_mp' # Define name marker string to use in snapshot groups that are for copying # volumes. This is to differentiate them from ordinary snapshot groups. SNAPSHOT_VOL_COPY_SUFFIX = 'SGCV' # Define a name marker string used to identify snapshot volumes that have # an underlying snapshot that is awaiting deletion. SNAPSHOT_VOL_DEL_SUFFIX = '_DEL' # Maximum number of snapshots per snapshot group MAX_SNAPSHOT_COUNT = 32 # Maximum number of snapshot groups MAX_SNAPSHOT_GROUP_COUNT = 4 RESERVED_SNAPSHOT_GROUP_COUNT = 1 SNAPSHOT_PERSISTENT_STORE_KEY = 'cinder-snapshots' SNAPSHOT_PERSISTENT_STORE_LOCK = str(uuid.uuid4()) def __init__(self, driver_name, driver_protocol="iSCSI", configuration=None, **kwargs): self.configuration = configuration self._app_version = kwargs.pop("app_version", "unknown") self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values( na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_eseries_opts) self.configuration.append_config_values(na_opts.netapp_san_opts) self.lookup_service = fczm_utils.create_lookup_service() self._backend_name = self.configuration.safe_get( "volume_backend_name") or "NetApp_ESeries" self.driver_name = driver_name self.driver_protocol = driver_protocol self._stats = {} self._ssc_stats = {} def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.context = context na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) self._client = self._create_rest_client(self.configuration) self._check_mode_get_or_register_storage_system() self._version_check() if self.configuration.netapp_enable_multiattach: self._ensure_multi_attach_host_group_exists() def _create_rest_client(self, configuration): port = configuration.netapp_server_port scheme = configuration.netapp_transport_type.lower() if port is None: if scheme == 'http': port = 8080 elif scheme == 'https': port = 8443 return client.RestClient( scheme=scheme, host=configuration.netapp_server_hostname, port=port, service_path=configuration.netapp_webservice_path, username=configuration.netapp_login, password=configuration.netapp_password) def _version_check(self): """Ensure that the minimum version of the REST API is available""" if not self._client.features.REST_1_4_RELEASE: min_version = ( self._client.features.REST_1_4_RELEASE.minimum_version) raise exception.NetAppDriverException( 'This version (%(cur)s of the NetApp SANtricity Webservices ' 'Proxy is not supported. Install version %(supp)s or ' 'later.' % {'cur': self._client.api_version, 'supp': min_version}) def _start_periodic_tasks(self): ssc_periodic_task = loopingcall.FixedIntervalLoopingCall( self._update_ssc_info) ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL) # Start the task that logs autosupport (ASUP) data to the controller asup_periodic_task = loopingcall.FixedIntervalLoopingCall( self._create_asup, CONF.host) asup_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS, initial_delay=0) def check_for_setup_error(self): self._check_host_type() self._check_multipath() # It is important that this be called before any other methods that # interact with the storage-system. It blocks until the # storage-system comes online. self._check_storage_system() self._check_pools() self._start_periodic_tasks() def _check_host_type(self): host_type = (self.configuration.netapp_host_type or self.DEFAULT_HOST_TYPE) self.host_type = self.HOST_TYPES.get(host_type) if not self.host_type: raise exception.NetAppDriverException( _('Configured host type is not supported.')) def _check_multipath(self): if not self.configuration.use_multipath_for_image_xfer: LOG.warning(_LW('Production use of "%(backend)s" backend requires ' 'the Cinder controller to have multipathing ' 'properly set up and the configuration option ' '"%(mpflag)s" to be set to "True".'), {'backend': self._backend_name, 'mpflag': 'use_multipath_for_image_xfer'}) def _check_pools(self): """Ensure that the pool listing contains at least one pool""" if not self._get_storage_pools(): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise exception.NetAppDriverException(msg) def _ensure_multi_attach_host_group_exists(self): try: host_group = self._client.get_host_group_by_name( utils.MULTI_ATTACH_HOST_GROUP_NAME) LOG.info(_LI("The multi-attach E-Series host group '%(label)s' " "already exists with clusterRef %(clusterRef)s"), host_group) except exception.NotFound: host_group = self._client.create_host_group( utils.MULTI_ATTACH_HOST_GROUP_NAME) LOG.info(_LI("Created multi-attach E-Series host group %(label)s " "with clusterRef %(clusterRef)s"), host_group) def _check_mode_get_or_register_storage_system(self): """Does validity checks for storage system registry and health.""" def _resolve_host(host): try: ip = na_utils.resolve_hostname(host) return ip except socket.gaierror as e: LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'), {'host': host, 'e': e}) raise exception.NoValidHost( _("Controller IP '%(host)s' could not be resolved: %(e)s.") % {'host': host, 'e': e}) ips = self.configuration.netapp_controller_ips ips = [i.strip() for i in ips.split(",")] ips = [x for x in ips if _resolve_host(x)] host = na_utils.resolve_hostname( self.configuration.netapp_server_hostname) if host in ips: LOG.info(_LI('Embedded mode detected.')) system = self._client.list_storage_systems()[0] else: LOG.info(_LI('Proxy mode detected.')) system = self._client.register_storage_system( ips, password=self.configuration.netapp_sa_password) self._client.set_system_id(system.get('id')) self._client._init_features() def _check_password_status(self, system): """Determine if the storage system's password status is valid. The password status has the following possible states: unknown, valid, invalid. If the password state cannot be retrieved from the storage system, an empty string will be returned as the status, and the password status will be assumed to be valid. This is done to ensure that access to a storage system will not be blocked in the event of a problem with the API. This method returns a tuple consisting of the storage system's password status and whether or not the status is valid. Example: (invalid, True) :returns: (str, bool) """ status = system.get('passwordStatus') status = status.lower() if status else '' return status, status not in ['invalid', 'unknown'] def _check_storage_system_status(self, system): """Determine if the storage system's status is valid. The storage system status has the following possible states: neverContacted, offline, optimal, needsAttn. If the storage system state cannot be retrieved, an empty string will be returned as the status, and the storage system's status will be assumed to be valid. This is done to ensure that access to a storage system will not be blocked in the event of a problem with the API. This method returns a tuple consisting of the storage system's password status and whether or not the status is valid. Example: (needsAttn, True) :returns: (str, bool) """ status = system.get('status') status = status.lower() if status else '' return status, status not in ['nevercontacted', 'offline'] def _check_storage_system(self): """Checks whether system is registered and has good status.""" try: self._client.list_storage_system() except exception.NetAppDriverException: with excutils.save_and_reraise_exception(): LOG.info(_LI("System with controller addresses [%s] is not " "registered with web service."), self.configuration.netapp_controller_ips) # Update the stored password # We do this to trigger the webservices password validation routine new_pwd = self.configuration.netapp_sa_password self._client.update_stored_system_password(new_pwd) start_time = int(time.time()) def check_system_status(): system = self._client.list_storage_system() pass_status, pass_status_valid = ( self._check_password_status(system)) status, status_valid = self._check_storage_system_status(system) msg_dict = {'id': system.get('id'), 'status': status, 'pass_status': pass_status} # wait if array not contacted or # password was not in sync previously. if not (pass_status_valid and status_valid): if not pass_status_valid: LOG.info(_LI('Waiting for web service to validate the ' 'configured password.')) else: LOG.info(_LI('Waiting for web service array ' 'communication.')) if int(time.time() - start_time) >= self.SA_COMM_TIMEOUT: if not status_valid: raise exception.NetAppDriverException( _("System %(id)s found with bad status - " "%(status)s.") % msg_dict) else: raise exception.NetAppDriverException( _("System %(id)s found with bad password status - " "%(pass_status)s.") % msg_dict) # The system was found to have a good status else: LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict) raise loopingcall.LoopingCallDone() checker = loopingcall.FixedIntervalLoopingCall(f=check_system_status) checker.start(interval = self.SLEEP_SECS, initial_delay=self.SLEEP_SECS).wait() return True def _get_volume(self, uid): """Retrieve a volume by its label""" if uid is None: raise exception.InvalidInput(_('The volume label is required' ' as input.')) uid = utils.convert_uuid_to_es_fmt(uid) return self._client.list_volume(uid) def _get_snapshot_group_for_snapshot(self, snapshot): snapshot = self._get_snapshot(snapshot) try: return self._client.list_snapshot_group(snapshot['pitGroupRef']) except (exception.NetAppDriverException, eseries_exc.WebServiceException): msg = _("Specified snapshot group with id %s could not be found.") raise exception.NotFound(msg % snapshot['pitGroupRef']) def _get_snapshot_legacy(self, snapshot): """Find a E-Series snapshot by the name of the snapshot group. Snapshots were previously identified by the unique name of the snapshot group. A snapshot volume is now utilized to uniquely identify the snapshot, so any snapshots previously defined in this way must be updated. :param snapshot_id: Cinder snapshot identifer :return: An E-Series snapshot image """ label = utils.convert_uuid_to_es_fmt(snapshot['id']) for group in self._client.list_snapshot_groups(): if group['label'] == label: image = self._get_oldest_image_in_snapshot_group(group['id']) group_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) # Modify the group label so we don't have a name collision self._client.update_snapshot_group(group['id'], group_label) snapshot.update({'provider_id': image['id']}) snapshot.save() return image raise exception.NotFound(_('Snapshot with id of %s could not be ' 'found.') % snapshot['id']) def _get_snapshot(self, snapshot): """Find a E-Series snapshot by its Cinder identifier An E-Series snapshot image does not have a configuration name/label, so we define a snapshot volume underneath of it that will help us to identify it. We retrieve the snapshot volume with the matching name, and then we find its underlying snapshot. :param snapshot_id: Cinder snapshot identifer :return: An E-Series snapshot image """ try: return self._client.list_snapshot_image( snapshot.get('provider_id')) except (eseries_exc.WebServiceException or exception.NetAppDriverException): try: LOG.debug('Unable to locate snapshot by its id, falling ' 'back to legacy behavior.') return self._get_snapshot_legacy(snapshot) except exception.NetAppDriverException: raise exception.NotFound(_('Snapshot with id of %s could not' ' be found.') % snapshot['id']) def _get_snapshot_group(self, snapshot_group_id): try: return self._client.list_snapshot_group(snapshot_group_id) except exception.NetAppDriverException: raise exception.NotFound(_('Unable to retrieve snapshot group ' 'with id of %s.') % snapshot_group_id) def _get_ordered_images_in_snapshot_group(self, snapshot_group_id): images = self._client.list_snapshot_images() if images: filtered_images = filter(lambda img: (img['pitGroupRef'] == snapshot_group_id), images) sorted_imgs = sorted(filtered_images, key=lambda x: x[ 'pitTimestamp']) return sorted_imgs return list() def _get_oldest_image_in_snapshot_group(self, snapshot_group_id): group = self._get_snapshot_group(snapshot_group_id) images = self._get_ordered_images_in_snapshot_group(snapshot_group_id) if images: return images[0] msg = _("No snapshot image found in snapshot group %s.") raise exception.NotFound(msg % group['label']) def _get_latest_image_in_snapshot_group(self, snapshot_group_id): group = self._get_snapshot_group(snapshot_group_id) images = self._get_ordered_images_in_snapshot_group(snapshot_group_id) if images: return images[-1] msg = _("No snapshot image found in snapshot group %s.") raise exception.NotFound(msg % group['label']) def _is_volume_containing_snaps(self, label): """Checks if volume contains snapshot groups.""" vol_id = utils.convert_es_fmt_to_uuid(label) for snap in self._client.list_snapshot_groups(): if snap['baseVolume'] == vol_id: return True return False def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :returns: Name of the pool where given volume is hosted. """ eseries_volume = self._get_volume(volume['name_id']) storage_pool = self._client.get_storage_pool( eseries_volume['volumeGroupRef']) if storage_pool: return storage_pool.get('label') def _add_volume_to_consistencygroup(self, volume): if volume.get('consistencygroup_id'): es_cg = self._get_consistencygroup(volume['consistencygroup']) self._update_consistency_group_members(es_cg, [volume], []) def create_volume(self, volume): """Creates a volume.""" LOG.debug('create_volume on %s', volume['host']) # get E-series pool label as pool name eseries_pool_label = volume_utils.extract_host(volume['host'], level='pool') if eseries_pool_label is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id']) extra_specs = na_utils.get_volume_extra_specs(volume) # get size of the requested volume creation size_gb = int(volume['size']) self._create_volume(eseries_pool_label, eseries_volume_label, size_gb, extra_specs) self._add_volume_to_consistencygroup(volume) def _create_volume(self, eseries_pool_label, eseries_volume_label, size_gb, extra_specs=None): """Creates volume with given label and size.""" if extra_specs is None: extra_specs = {} if self.configuration.netapp_enable_multiattach: volumes = self._client.list_volumes() # NOTE(ameade): Ensure we do not create more volumes than we could # map to the multi attach ESeries host group. if len(volumes) > utils.MAX_LUNS_PER_HOST_GROUP: msg = (_("Cannot create more than %(req)s volumes on the " "ESeries array when 'netapp_enable_multiattach' is " "set to true.") % {'req': utils.MAX_LUNS_PER_HOST_GROUP}) raise exception.NetAppDriverException(msg) # These must be either boolean values, or None read_cache = extra_specs.get(self.READ_CACHE_Q_SPEC) if read_cache is not None: read_cache = na_utils.to_bool(read_cache) write_cache = extra_specs.get(self.WRITE_CACHE_Q_SPEC) if write_cache is not None: write_cache = na_utils.to_bool(write_cache) flash_cache = extra_specs.get(self.FLASH_CACHE_UQ_SPEC) if flash_cache is not None: flash_cache = na_utils.to_bool(flash_cache) data_assurance = extra_specs.get(self.DA_UQ_SPEC) if data_assurance is not None: data_assurance = na_utils.to_bool(data_assurance) thin_provision = extra_specs.get(self.THIN_UQ_SPEC) if(thin_provision is not None): thin_provision = na_utils.to_bool(thin_provision) target_pool = None pools = self._get_storage_pools() for pool in pools: if pool["label"] == eseries_pool_label: target_pool = pool break if not target_pool: msg = _("Pools %s does not exist") raise exception.NetAppDriverException(msg % eseries_pool_label) try: vol = self._client.create_volume(target_pool['volumeGroupRef'], eseries_volume_label, size_gb, read_cache=read_cache, write_cache=write_cache, flash_cache=flash_cache, data_assurance=data_assurance, thin_provision=thin_provision) LOG.info(_LI("Created volume with " "label %s."), eseries_volume_label) except exception.NetAppDriverException as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating volume. Msg - %s."), e) # There was some kind failure creating the volume, make sure no # partial flawed work exists try: bad_vol = self._get_volume(eseries_volume_label) except Exception: # Swallowing the exception intentionally because this is # emergency cleanup to make sure no intermediate volumes # were left. In this whole error situation, the more # common route would be for no volume to have been created. pass else: # Some sort of partial volume was created despite the # error. Lets clean it out so no partial state volumes or # orphans are left. try: self._client.delete_volume(bad_vol["id"]) except exception.NetAppDriverException as e2: LOG.error(_LE( "Error cleaning up failed volume creation. " "Msg - %s."), e2) return vol def _is_data_assurance_supported(self): """Determine if the storage backend is PI (DataAssurance) compatible""" return self.driver_protocol != "iSCSI" def _schedule_and_create_volume(self, label, size_gb): """Creates volume with given label and size.""" avl_pools = self._get_sorted_available_storage_pools(size_gb) for pool in avl_pools: try: vol = self._client.create_volume(pool['volumeGroupRef'], label, size_gb) LOG.info(_LI("Created volume with label %s."), label) return vol except exception.NetAppDriverException as e: LOG.error(_LE("Error creating volume. Msg - %s."), e) msg = _("Failure creating volume %s.") raise exception.NetAppDriverException(msg % label) def _create_volume_from_snapshot(self, volume, image): """Define a new volume based on an E-Series snapshot image. This method should be synchronized on the snapshot id. :param volume: a Cinder volume :param image: an E-Series snapshot image :return: the clone volume """ label = utils.convert_uuid_to_es_fmt(volume['id']) size = volume['size'] dst_vol = self._schedule_and_create_volume(label, size) src_vol = None try: src_vol = self._create_snapshot_volume(image) self._copy_volume_high_priority_readonly(src_vol, dst_vol) LOG.info(_LI("Created volume with label %s."), label) except exception.NetAppDriverException: with excutils.save_and_reraise_exception(): self._client.delete_volume(dst_vol['volumeRef']) finally: if src_vol: try: self._client.delete_snapshot_volume(src_vol['id']) except exception.NetAppDriverException as e: LOG.error(_LE("Failure restarting snap vol. Error: %s."), e) else: LOG.warning(_LW("Snapshot volume creation failed for " "snapshot %s."), image['id']) return dst_vol def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" es_snapshot = self._get_snapshot(snapshot) cinder_utils.synchronized(snapshot['id'])( self._create_volume_from_snapshot)(volume, es_snapshot) self._add_volume_to_consistencygroup(volume) def _copy_volume_high_priority_readonly(self, src_vol, dst_vol): """Copies src volume to dest volume.""" LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."), {'src': src_vol['label'], 'dst': dst_vol['label']}) job = None try: job = self._client.create_volume_copy_job( src_vol['id'], dst_vol['volumeRef']) def wait_for_copy(): j_st = self._client.list_vol_copy_job(job['volcopyRef']) if (j_st['status'] in ['inProgress', 'pending', 'unknown']): return if j_st['status'] == 'failed' or j_st['status'] == 'halted': LOG.error(_LE("Vol copy job status %s."), j_st['status']) raise exception.NetAppDriverException( _("Vol copy job for dest %s failed.") % dst_vol['label']) LOG.info(_LI("Vol copy job completed for dest %s."), dst_vol['label']) raise loopingcall.LoopingCallDone() checker = loopingcall.FixedIntervalLoopingCall(wait_for_copy) checker.start(interval=self.SLEEP_SECS, initial_delay=self.SLEEP_SECS, stop_on_exception=True).wait() finally: if job: try: self._client.delete_vol_copy_job(job['volcopyRef']) except exception.NetAppDriverException: LOG.warning(_LW("Failure deleting " "job %s."), job['volcopyRef']) else: LOG.warning(_LW('Volume copy job for src vol %s not found.'), src_vol['id']) LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" es_vol = self._get_volume(src_vref['id']) es_snapshot = self._create_es_snapshot_for_clone(es_vol) try: self._create_volume_from_snapshot(volume, es_snapshot) self._add_volume_to_consistencygroup(volume) finally: try: self._client.delete_snapshot_group(es_snapshot['pitGroupRef']) except exception.NetAppDriverException: LOG.warning(_LW("Failure deleting temp snapshot %s."), es_snapshot['id']) def delete_volume(self, volume): """Deletes a volume.""" try: vol = self._get_volume(volume['name_id']) self._client.delete_volume(vol['volumeRef']) except (exception.NetAppDriverException, exception.VolumeNotFound): LOG.warning(_LW("Volume %s already deleted."), volume['id']) return def _is_cgsnapshot(self, snapshot_image): """Determine if an E-Series snapshot image is part of a cgsnapshot""" cg_id = snapshot_image.get('consistencyGroupId') # A snapshot that is not part of a consistency group may have a # cg_id of either none or a string of all 0's, so we check for both return not (cg_id is None or utils.NULL_REF == cg_id) def _create_snapshot_volume(self, image): """Creates snapshot volume for given group with snapshot_id.""" group = self._get_snapshot_group(image['pitGroupRef']) LOG.debug("Creating snap vol for group %s", group['label']) label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) if self._is_cgsnapshot(image): return self._client.create_cg_snapshot_view( image['consistencyGroupId'], label, image['id']) else: return self._client.create_snapshot_volume( image['pitRef'], label, image['baseVol']) def _create_snapshot_group(self, label, volume, percentage_capacity=20.0): """Define a new snapshot group for a volume :param label: the label for the snapshot group :param volume: an E-Series volume :param percentage_capacity: an optional repository percentage :return a new snapshot group """ # Newer versions of the REST API are capable of automatically finding # the best pool candidate if not self._client.features.REST_1_3_RELEASE: vol_size_gb = int(volume['totalSizeInBytes']) / units.Gi pools = self._get_sorted_available_storage_pools(vol_size_gb) volume_pool = next(pool for pool in pools if volume[ 'volumeGroupRef'] == pool['id']) # A disk pool can only utilize a candidate from its own pool if volume_pool.get('raidLevel') == 'raidDiskPool': pool_id_to_use = volume_pool['volumeGroupRef'] # Otherwise, choose the best available pool else: pool_id_to_use = pools[0]['volumeGroupRef'] group = self._client.create_snapshot_group( label, volume['volumeRef'], pool_id_to_use, repo_percent=percentage_capacity) else: group = self._client.create_snapshot_group( label, volume['volumeRef'], repo_percent=percentage_capacity) return group def _get_snapshot_groups_for_volume(self, vol): """Find all snapshot groups associated with an E-Series volume :param vol: An E-Series volume object :return A list of snapshot groups :raise NetAppDriverException: if the list of snapshot groups cannot be retrieved """ return [grp for grp in self._client.list_snapshot_groups() if grp['baseVolume'] == vol['id']] def _get_available_snapshot_group(self, vol): """Find a snapshot group that has remaining capacity for snapshots. In order to minimize repository usage, we prioritize the snapshot group with remaining snapshot capacity that has most recently had a snapshot defined on it. :param vol: An E-Series volume object :return A valid snapshot group that has available snapshot capacity, or None :raise NetAppDriverException: if the list of snapshot groups cannot be retrieved """ groups_for_v = self._get_snapshot_groups_for_volume(vol) # Filter out reserved snapshot groups groups = filter(lambda g: self.SNAPSHOT_VOL_COPY_SUFFIX not in g[ 'label'], groups_for_v) # Filter out groups that are part of a consistency group groups = filter(lambda g: not g['consistencyGroup'], groups) # Find all groups with free snapshot capacity groups = [group for group in groups if group.get('snapshotCount') < self.MAX_SNAPSHOT_COUNT] # Order by the last defined snapshot on the group if len(groups) > 1: group_by_id = {g['id']: g for g in groups} snap_imgs = list() for group in groups: try: snap_imgs.append( self._get_latest_image_in_snapshot_group(group['id'])) except exception.NotFound: pass snap_imgs = sorted(snap_imgs, key=lambda x: x['pitSequenceNumber']) if snap_imgs: # The newest image img = snap_imgs[-1] return group_by_id[img['pitGroupRef']] else: return groups[0] if groups else None # Skip the snapshot image checks if there is only one snapshot group elif groups: return groups[0] else: return None def _create_es_snapshot_for_clone(self, vol): group_name = (utils.convert_uuid_to_es_fmt(uuid.uuid4()) + self.SNAPSHOT_VOL_COPY_SUFFIX) return self._create_es_snapshot(vol, group_name) def _create_es_snapshot(self, vol, group_name=None): snap_grp, snap_image = None, None try: snap_grp = self._get_available_snapshot_group(vol) # If a snapshot group is not available, create one if possible if snap_grp is None: snap_groups_for_vol = self._get_snapshot_groups_for_volume( vol) # We need a reserved snapshot group if (group_name is not None and (self.SNAPSHOT_VOL_COPY_SUFFIX in group_name)): # First we search for an existing reserved group for grp in snap_groups_for_vol: if grp['label'].endswith( self.SNAPSHOT_VOL_COPY_SUFFIX): snap_grp = grp break # No reserved group exists, so we create it if (snap_grp is None and (len(snap_groups_for_vol) < self.MAX_SNAPSHOT_GROUP_COUNT)): snap_grp = self._create_snapshot_group(group_name, vol) # Ensure we don't exceed the snapshot group limit elif (len(snap_groups_for_vol) < (self.MAX_SNAPSHOT_GROUP_COUNT - self.RESERVED_SNAPSHOT_GROUP_COUNT)): label = group_name if group_name is not None else ( utils.convert_uuid_to_es_fmt(uuid.uuid4())) snap_grp = self._create_snapshot_group(label, vol) LOG.info(_LI("Created snap grp with label %s."), label) # We couldn't retrieve or create a snapshot group if snap_grp is None: raise exception.SnapshotLimitExceeded( allowed=(self.MAX_SNAPSHOT_COUNT * (self.MAX_SNAPSHOT_GROUP_COUNT - self.RESERVED_SNAPSHOT_GROUP_COUNT))) return self._client.create_snapshot_image( snap_grp['id']) except exception.NetAppDriverException: with excutils.save_and_reraise_exception(): if snap_image is None and snap_grp: self._delete_snapshot_group(snap_grp['id']) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: The Cinder snapshot :param group_name: An optional label for the snapshot group :return An E-Series snapshot image """ os_vol = snapshot['volume'] vol = self._get_volume(os_vol['name_id']) snap_image = cinder_utils.synchronized(vol['id'])( self._create_es_snapshot)(vol) model_update = { 'provider_id': snap_image['id'] } return model_update def _delete_es_snapshot(self, es_snapshot): """Perform a soft-delete on an E-Series snapshot. Mark the snapshot image as no longer needed, so that it can be purged from the backend when no other snapshots are dependent upon it. :param es_snapshot: an E-Series snapshot image :return None """ index = self._get_soft_delete_map() snapgroup_ref = es_snapshot['pitGroupRef'] if snapgroup_ref in index: bitset = na_utils.BitSet(int((index[snapgroup_ref]))) else: bitset = na_utils.BitSet(0) images = [img for img in self._client.list_snapshot_images() if img['pitGroupRef'] == snapgroup_ref] for i, image in enumerate(sorted(images, key=lambda x: x[ 'pitSequenceNumber'])): if(image['pitSequenceNumber'] == es_snapshot[ 'pitSequenceNumber']): bitset.set(i) break index_update, keys_to_del = ( self._cleanup_snapshot_images(images, bitset)) self._merge_soft_delete_changes(index_update, keys_to_del) def delete_snapshot(self, snapshot): """Delete a snapshot.""" try: es_snapshot = self._get_snapshot(snapshot) except exception.NotFound: LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id']) else: os_vol = snapshot['volume'] vol = self._get_volume(os_vol['name_id']) cinder_utils.synchronized(vol['id'])(self._delete_es_snapshot)( es_snapshot) def _get_soft_delete_map(self): """Retrieve the snapshot index from the storage backend""" return self._client.list_backend_store( self.SNAPSHOT_PERSISTENT_STORE_KEY) @cinder_utils.synchronized(SNAPSHOT_PERSISTENT_STORE_LOCK) def _merge_soft_delete_changes(self, index_update, keys_to_del): """Merge changes to the snapshot index and save it on the backend This method merges provided changes into the index, locking, to ensure that concurrent changes that don't overlap are not overwritten. No update will occur if neither an update or keys to delete are provided. :param index_update: a dict of keys/value pairs to update in the index :param keys_to_del: a list of keys to purge from the index """ if index_update or keys_to_del: index = self._get_soft_delete_map() if index_update: index.update(index_update) if keys_to_del: for key in keys_to_del: if key in index: del index[key] self._client.save_backend_store( self.SNAPSHOT_PERSISTENT_STORE_KEY, index) def _cleanup_snapshot_images(self, images, bitset): """Delete snapshot images that are marked for removal from the backend. This method will iterate over all snapshots (beginning with the oldest), that are defined on the same snapshot group as the provided snapshot image. If the snapshot is marked for deletion, it will be purged from the backend. Otherwise, the method will return because no further snapshots can be purged. The bitset will be updated based on the return from this method. Any updates to the index will be provided as a dict, and any keys to be removed from the index should be returned as (dict, list). :param images: a list of E-Series snapshot images :param bitset: a bitset representing the snapshot images that are no longer needed on the backend (and may be deleted when possible) :return (dict, list) a tuple containing a dict of updates for the index and a list of keys to remove from the index """ snap_grp_ref = images[0]['pitGroupRef'] # All images are marked as deleted, we can delete the snapshot group if bitset == 2 ** len(images) - 1: try: self._delete_snapshot_group(snap_grp_ref) except exception.NetAppDriverException as e: LOG.warning(_LW("Unable to remove snapshot group - " "%s."), e.msg) return None, [snap_grp_ref] else: # Order by their sequence number, from oldest to newest snapshots = sorted(images, key=lambda x: x['pitSequenceNumber']) deleted = 0 for i, snapshot in enumerate(snapshots): if bitset.is_set(i): self._delete_snapshot_image(snapshot) deleted += 1 else: # Snapshots must be deleted in order, so if the current # snapshot is not pending deletion, we don't want to # process any more break if deleted: # Update the bitset based on the deleted snapshots bitset >>= deleted LOG.debug('Deleted %(count)s snapshot images from snapshot ' 'group: %(grp)s.', {'count': deleted, 'grp': snap_grp_ref}) if deleted >= len(images): try: self._delete_snapshot_group(snap_grp_ref) except exception.NetAppDriverException as e: LOG.warning(_LW("Unable to remove snapshot group - " "%s."), e.msg) return None, [snap_grp_ref] return {snap_grp_ref: repr(bitset)}, None def _delete_snapshot_group(self, group_id): try: self._client.delete_snapshot_group(group_id) except eseries_exc.WebServiceException as e: raise exception.NetAppDriverException(e.msg) def _delete_snapshot_image(self, es_snapshot): """Remove a snapshot image from the storage backend If a snapshot group has no remaining snapshot images associated with it, it will be deleted as well. When the snapshot is deleted, any snapshot volumes that are associated with it will be orphaned, so they are also deleted. :param es_snapshot: An E-Series snapshot image :param snapshot_volumes: Snapshot volumes associated with the snapshot """ self._client.delete_snapshot_image(es_snapshot['id']) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a volume.""" pass def map_volume_to_host(self, volume, eseries_volume, initiators): """Ensures the specified initiator has access to the volume.""" existing_maps = self._client.get_volume_mappings_for_volume( eseries_volume) host = self._get_or_create_host(initiators, self.host_type) # There can only be one or zero mappings on a volume in E-Series current_map = existing_maps[0] if existing_maps else None if self.configuration.netapp_enable_multiattach and current_map: self._ensure_multi_attach_host_group_exists() mapping = host_mapper.map_volume_to_multiple_hosts(self._client, volume, eseries_volume, host, current_map) else: mapping = host_mapper.map_volume_to_single_host( self._client, volume, eseries_volume, host, current_map, self.configuration.netapp_enable_multiattach) return mapping def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assigns the specified volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] eseries_vol = self._get_volume(volume['name_id']) mapping = self.map_volume_to_host(volume, eseries_vol, initiators) lun_id = mapping['lun'] initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info if target_wwpns: msg = ("Successfully fetched target details for LUN %(id)s " "and initiator(s) %(initiators)s.") msg_fmt = {'id': volume['id'], 'initiators': initiators} LOG.debug(msg, msg_fmt) else: msg = _('Failed to get LUN target details for the LUN %s.') raise exception.VolumeBackendAPIException(data=msg % volume['id']) target_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map}} return target_info def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ eseries_vol = self._get_volume(volume['name_id']) initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] host = self._get_host_with_matching_port(initiators) mappings = eseries_vol.get('listOfMappings', []) # There can only be one or zero mappings on a volume in E-Series mapping = mappings[0] if mappings else None if not mapping: raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], host=host['label']) host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if len(self._client.get_volume_mappings_for_host( host['hostRef'])) == 0: # No more exports for this host, so tear down zone. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map.")) initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info info['data'] = {'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map} return info def _build_initiator_target_map_fc(self, connector): """Build the target_wwns and the initiator target map.""" # get WWPNs from controller and strip colons all_target_wwpns = self._client.list_target_wwpns() all_target_wwpns = [six.text_type(wwpn).replace(':', '') for wwpn in all_target_wwpns] target_wwpns = [] init_targ_map = {} num_paths = 0 if self.lookup_service: # Use FC SAN lookup to determine which ports are visible. dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwpns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwpns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) for target in init_targ_map[initiator]: num_paths += 1 target_wwpns = list(set(target_wwpns)) else: initiator_wwns = connector['wwpns'] target_wwpns = all_target_wwpns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwpns return target_wwpns, init_targ_map, num_paths def initialize_connection_iscsi(self, volume, connector): """Allow connection to connector and return connection info.""" initiator_name = connector['initiator'] eseries_vol = self._get_volume(volume['name_id']) mapping = self.map_volume_to_host(volume, eseries_vol, [initiator_name]) lun_id = mapping['lun'] msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name} LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.", msg_fmt) iscsi_details = self._get_iscsi_service_details() iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol, iscsi_details) LOG.debug("Successfully fetched target details for volume %(id)s and " "initiator %(initiator_name)s.", msg_fmt) iqn = iscsi_portal['iqn'] address = iscsi_portal['ip'] port = iscsi_portal['tcp_port'] properties = na_utils.get_iscsi_connection_properties(lun_id, volume, iqn, address, port) return properties def _get_iscsi_service_details(self): """Gets iscsi iqn, ip and port information.""" ports = [] hw_inventory = self._client.list_hardware_inventory() iscsi_ports = hw_inventory.get('iscsiPorts') if iscsi_ports: for port in iscsi_ports: if (port.get('ipv4Enabled') and port.get('iqn') and port.get('ipv4Data') and port['ipv4Data'].get('ipv4AddressData') and port['ipv4Data']['ipv4AddressData'] .get('ipv4Address') and port['ipv4Data'] ['ipv4AddressData'].get('configState') == 'configured'): iscsi_det = {} iscsi_det['ip'] =\ port['ipv4Data']['ipv4AddressData']['ipv4Address'] iscsi_det['iqn'] = port['iqn'] iscsi_det['tcp_port'] = port.get('tcpListenPort') iscsi_det['controller'] = port.get('controllerId') ports.append(iscsi_det) if not ports: msg = _('No good iscsi portals found for %s.') raise exception.NetAppDriverException( msg % self._client.get_system_id()) return ports def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True): """Get the iscsi portal info relevant to volume.""" for portal in portals: if portal.get('controller') == volume.get('currentManager'): return portal if anyController and portals: return portals[0] msg = _('No good iscsi portal found in supplied list for %s.') raise exception.NetAppDriverException( msg % self._client.get_system_id()) def _get_or_create_host(self, port_ids, host_type): """Fetch or create a host by given port.""" try: host = self._get_host_with_matching_port(port_ids) ht_def = self._get_host_type_definition(host_type) if host.get('hostTypeIndex') != ht_def.get('index'): try: host = self._client.update_host_type( host['hostRef'], ht_def) except exception.NetAppDriverException as e: LOG.warning(_LW("Unable to update host type for host with " "label %(l)s. %(e)s"), {'l': host['label'], 'e': e.msg}) return host except exception.NotFound as e: LOG.warning(_LW("Message - %s."), e.msg) return self._create_host(port_ids, host_type) def _get_host_with_matching_port(self, port_ids): """Gets or creates a host with given port id.""" # Remove any extra colons port_ids = [six.text_type(wwpn).replace(':', '') for wwpn in port_ids] hosts = self._client.list_hosts() for port_id in port_ids: for host in hosts: if host.get('hostSidePorts'): ports = host.get('hostSidePorts') for port in ports: address = port.get('address').upper().replace(':', '') if address == port_id.upper(): return host msg = _("Host with ports %(ports)s not found.") raise exception.NotFound(msg % {'ports': port_ids}) def _create_host(self, port_ids, host_type, host_group=None): """Creates host on system with given initiator as port_id.""" LOG.info(_LI("Creating host with ports %s."), port_ids) host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) host_type = self._get_host_type_definition(host_type) port_type = self.driver_protocol.lower() return self._client.create_host_with_ports(host_label, host_type, port_ids, group_id=host_group, port_type=port_type) def _get_host_type_definition(self, host_type): """Gets supported host type if available on storage system.""" host_types = self._client.list_host_types() for ht in host_types: if ht.get('name', 'unknown').lower() == host_type.lower(): return ht raise exception.NotFound(_("Host type %s not supported.") % host_type) def terminate_connection_iscsi(self, volume, connector, **kwargs): """Disallow connection from connector.""" eseries_vol = self._get_volume(volume['name_id']) initiator = connector['initiator'] host = self._get_host_with_matching_port([initiator]) mappings = eseries_vol.get('listOfMappings', []) # There can only be one or zero mappings on a volume in E-Series mapping = mappings[0] if mappings else None if not mapping: raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], host=host['label']) host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) def get_volume_stats(self, refresh=False): """Return the current state of the volume service.""" if refresh: if not self._ssc_stats: self._update_ssc_info() self._update_volume_stats() return self._stats def _update_volume_stats(self): """Update volume statistics.""" LOG.debug("Updating volume stats.") data = dict() data["volume_backend_name"] = self._backend_name data["vendor_name"] = "NetApp" data["driver_version"] = self.VERSION data["storage_protocol"] = self.driver_protocol data["pools"] = [] for storage_pool in self._get_storage_pools(): cinder_pool = {} cinder_pool["pool_name"] = storage_pool.get("label") cinder_pool["QoS_support"] = False cinder_pool["reserved_percentage"] = ( self.configuration.reserved_percentage) cinder_pool["max_over_subscription_ratio"] = ( self.configuration.max_over_subscription_ratio) tot_bytes = int(storage_pool.get("totalRaidedSpace", 0)) used_bytes = int(storage_pool.get("usedSpace", 0)) cinder_pool["provisioned_capacity_gb"] = used_bytes / units.Gi cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) / units.Gi) cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi pool_ssc_stats = self._ssc_stats.get( storage_pool["volumeGroupRef"]) if pool_ssc_stats: thin = pool_ssc_stats.get(self.THIN_UQ_SPEC) or False cinder_pool.update(pool_ssc_stats) else: thin = False cinder_pool["thin_provisioning_support"] = thin # All E-Series pools support thick provisioning cinder_pool["thick_provisioning_support"] = True data["pools"].append(cinder_pool) self._stats = data self._garbage_collect_tmp_vols() def _create_asup(self, cinder_host): if not self._client.features.AUTOSUPPORT: msg = _LI("E-series proxy API version %s does not support " "autosupport logging.") LOG.info(msg % self._client.api_version) return event_source = ("Cinder driver %s" % self.DRIVER_NAME) category = "provisioning" event_description = "OpenStack Cinder connected to E-Series proxy" asup_info = self._client.get_asup_info() model = asup_info.get('model') firmware_version = asup_info.get('firmware_version') serial_numbers = asup_info.get('serial_numbers') chassis_sn = asup_info.get('chassis_sn') key = ("openstack-%s-%s-%s" % (cinder_host, serial_numbers[0], serial_numbers[1])) # The counter is being set here to a key-value combination # comprised of serial numbers and cinder host with a default # heartbeat of 1. The counter is set to inform the user that the # key does not have a stale value. self._client.set_counter("%s-heartbeat" % key, value=1) data = { 'computer-name': cinder_host, 'event-source': event_source, 'app-version': self._app_version, 'category': category, 'event-description': event_description, 'controller1-serial': serial_numbers[0], 'controller2-serial': serial_numbers[1], 'chassis-serial-number': chassis_sn, 'model': model, 'system-version': firmware_version, 'operating-mode': self._client.api_operating_mode } self._client.add_autosupport_data(key, data) @cinder_utils.synchronized("netapp_update_ssc_info", external=False) def _update_ssc_info(self): """Periodically runs to update ssc information from the backend. The self._ssc_stats attribute is updated with the following format. { : {: }} """ LOG.info(_LI("Updating storage service catalog information for " "backend '%s'"), self._backend_name) relevant_pools = self._get_storage_pools() if self._client.features.SSC_API_V2: self._update_ssc_info_v2(relevant_pools) else: self._update_ssc_info_v1(relevant_pools) def _update_ssc_info_v1(self, relevant_pools): """Update ssc data using the legacy API :param relevant_pools: The pools that this driver cares about """ msg = _LI("E-series proxy API version %(version)s does not " "support full set of SSC extra specs. The proxy version" " must be at at least %(min_version)s.") LOG.info(msg, {'version': self._client.api_version, 'min_version': self._client.features.SSC_API_V2.minimum_version}) self._ssc_stats = ( self._update_ssc_disk_encryption(relevant_pools)) self._ssc_stats = ( self._update_ssc_disk_types(relevant_pools)) self._ssc_stats = ( self._update_ssc_raid_type(relevant_pools)) def _update_ssc_info_v2(self, relevant_pools): """Update the ssc dictionary with ssc info for relevant pools :param relevant_pools: The pools that this driver cares about """ ssc_stats = copy.deepcopy(self._ssc_stats) storage_pool_labels = [pool['label'] for pool in relevant_pools] ssc_data = self._client.list_ssc_storage_pools() ssc_data = [pool for pool in ssc_data if pool['name'] in storage_pool_labels] for pool in ssc_data: poolId = pool['poolId'] if poolId not in ssc_stats: ssc_stats[poolId] = {} pool_ssc_info = ssc_stats[poolId] pool_ssc_info['consistencygroup_support'] = True pool_ssc_info[self.ENCRYPTION_UQ_SPEC] = ( six.text_type(pool['encrypted']).lower()) pool_ssc_info[self.SPINDLE_SPD_UQ_SPEC] = (pool['spindleSpeed']) flash_cache_capable = pool['flashCacheCapable'] pool_ssc_info[self.FLASH_CACHE_UQ_SPEC] = ( six.text_type(flash_cache_capable).lower()) # Data Assurance is not compatible with some backend types da_capable = pool['dataAssuranceCapable'] and ( self._is_data_assurance_supported()) pool_ssc_info[self.DA_UQ_SPEC] = ( six.text_type(da_capable).lower()) pool_ssc_info[self.RAID_UQ_SPEC] = ( self.SSC_RAID_TYPE_MAPPING.get(pool['raidLevel'], 'unknown')) pool_ssc_info[self.THIN_UQ_SPEC] = ( six.text_type(pool['thinProvisioningCapable']).lower()) if pool['pool'].get("driveMediaType") == 'ssd': pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = 'SSD' else: pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = ( self.SSC_DISK_TYPE_MAPPING.get( pool['pool'].get('drivePhysicalType'), 'unknown')) self._ssc_stats = ssc_stats def _update_ssc_disk_types(self, storage_pools): """Updates the given ssc dictionary with new disk type information. :param storage_pools: The storage pools this driver cares about """ ssc_stats = copy.deepcopy(self._ssc_stats) all_disks = self._client.list_drives() pool_ids = set(pool.get("volumeGroupRef") for pool in storage_pools) relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in pool_ids, all_disks) for drive in relevant_disks: current_vol_group = drive.get('currentVolumeGroupRef') if current_vol_group not in ssc_stats: ssc_stats[current_vol_group] = {} if drive.get("driveMediaType") == 'ssd': ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = 'SSD' else: disk_type = drive.get('interfaceType').get('driveType') ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = ( self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')) return ssc_stats def _update_ssc_disk_encryption(self, storage_pools): """Updates the given ssc dictionary with new disk encryption information. :param storage_pools: The storage pools this driver cares about """ ssc_stats = copy.deepcopy(self._ssc_stats) for pool in storage_pools: current_vol_group = pool.get('volumeGroupRef') if current_vol_group not in ssc_stats: ssc_stats[current_vol_group] = {} ssc_stats[current_vol_group][self.ENCRYPTION_UQ_SPEC] = ( six.text_type(pool['securityType'] == 'enabled').lower() ) return ssc_stats def _update_ssc_raid_type(self, storage_pools): """Updates the given ssc dictionary with new RAID type information. :param storage_pools: The storage pools this driver cares about """ ssc_stats = copy.deepcopy(self._ssc_stats) for pool in storage_pools: current_vol_group = pool.get('volumeGroupRef') if current_vol_group not in ssc_stats: ssc_stats[current_vol_group] = {} raid_type = pool.get('raidLevel') ssc_stats[current_vol_group]['netapp_raid_type'] = ( self.SSC_RAID_TYPE_MAPPING.get(raid_type, 'unknown')) return ssc_stats def _get_storage_pools(self): """Retrieve storage pools that match user-configured search pattern.""" # Inform deprecation of legacy option. if self.configuration.safe_get('netapp_storage_pools'): msg = _LW("The option 'netapp_storage_pools' is deprecated and " "will be removed in the future releases. Please use " "the option 'netapp_pool_name_search_pattern' instead.") versionutils.report_deprecated_feature(LOG, msg) pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) storage_pools = self._client.list_storage_pools() filtered_pools = [] for pool in storage_pools: pool_name = pool['label'] if pool_regex.match(pool_name): msg = ("Pool '%(pool_name)s' matches against regular " "expression: %(pool_pattern)s") LOG.debug(msg, {'pool_name': pool_name, 'pool_pattern': pool_regex.pattern}) filtered_pools.append(pool) else: msg = ("Pool '%(pool_name)s' does not match against regular " "expression: %(pool_pattern)s") LOG.debug(msg, {'pool_name': pool_name, 'pool_pattern': pool_regex.pattern}) return filtered_pools def _get_sorted_available_storage_pools(self, size_gb): """Returns storage pools sorted on available capacity.""" size = size_gb * units.Gi sorted_pools = sorted(self._get_storage_pools(), key=lambda x: (int(x.get('totalRaidedSpace', 0)) - int(x.get('usedSpace', 0))), reverse=True) avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) - int(x.get('usedSpace', 0)) >= size)), sorted_pools) if not avl_pools: LOG.warning(_LW("No storage pool found with available capacity " "%s."), size_gb) return avl_pools def _is_thin_provisioned(self, volume): """Determine if a volume is thin provisioned""" return volume.get('objectType') == 'thinVolume' or volume.get( 'thinProvisioned', False) def _get_pool_operation_progress(self, pool_id, action=None): """Retrieve the progress of a long running operation on a pool The return will be a tuple containing: a bool representing whether or not the operation is complete, a set of actions that are currently running on the storage pool, and the estimated time remaining in minutes. An action type may be passed in such that once no actions of that type remain active on the pool, the operation will be considered completed. If no action str is passed in, it is assumed that multiple actions compose the operation, and none are terminal, so the operation will not be considered completed until there are no actions remaining to be completed on any volume on the pool. :param pool_id: The id of a storage pool :param action: The anticipated action :returns: A tuple (bool, set(str), int) """ actions = set() eta = 0 for progress in self._client.get_pool_operation_progress(pool_id): actions.add(progress.get('currentAction')) eta += progress.get('estimatedTimeToCompletion', 0) if action is not None: complete = action not in actions else: complete = not actions return complete, actions, eta def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" src_vol = self._get_volume(volume['name_id']) thin_provisioned = self._is_thin_provisioned(src_vol) self._client.expand_volume(src_vol['id'], new_size, thin_provisioned) # If the volume is thin or defined on a disk pool, there is no need # to block. if not (thin_provisioned or src_vol.get('diskPool')): # Wait for the expansion to start def check_progress(): complete, actions, eta = ( self._get_pool_operation_progress(src_vol[ 'volumeGroupRef'], 'remappingDve')) if complete: raise loopingcall.LoopingCallDone() else: msg = _LI("Waiting for volume expansion of %(vol)s to " "complete, current remaining actions are " "%(action)s. ETA: %(eta)s mins.") LOG.info(msg, {'vol': volume['name_id'], 'action': ', '.join(actions), 'eta': eta}) checker = loopingcall.FixedIntervalLoopingCall( check_progress) checker.start(interval=self.SLEEP_SECS, initial_delay=self.SLEEP_SECS, stop_on_exception=True).wait() def create_cgsnapshot(self, cgsnapshot, snapshots): """Creates a cgsnapshot.""" cg_id = cgsnapshot['consistencygroup_id'] cg_name = utils.convert_uuid_to_es_fmt(cg_id) # Retrieve the E-Series consistency group es_cg = self._get_consistencygroup_by_name(cg_name) # Define an E-Series CG Snapshot es_snaphots = self._client.create_consistency_group_snapshot( es_cg['id']) # Build the snapshot updates snapshot_updates = list() for snap in snapshots: es_vol = self._get_volume(snap['volume']['id']) for es_snap in es_snaphots: if es_snap['baseVol'] == es_vol['id']: snapshot_updates.append({ 'id': snap['id'], # Directly track the backend snapshot ID 'provider_id': es_snap['id'], 'status': 'available' }) return None, snapshot_updates def delete_cgsnapshot(self, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" cg_id = cgsnapshot['consistencygroup_id'] cg_name = utils.convert_uuid_to_es_fmt(cg_id) # Retrieve the E-Series consistency group es_cg = self._get_consistencygroup_by_name(cg_name) # Find the smallest sequence number defined on the group min_seq_num = min(es_cg['uniqueSequenceNumber']) es_snapshots = self._client.get_consistency_group_snapshots( es_cg['id']) es_snap_ids = set(snap.get('provider_id') for snap in snapshots) # We need to find a single snapshot that is a part of the CG snap seq_num = None for snap in es_snapshots: if snap['id'] in es_snap_ids: seq_num = snap['pitSequenceNumber'] break if seq_num is None: raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_id) # Perform a full backend deletion of the cgsnapshot if int(seq_num) <= int(min_seq_num): self._client.delete_consistency_group_snapshot( es_cg['id'], seq_num) return None, None else: # Perform a soft-delete, removing this snapshot from cinder # management, and marking it as available for deletion. return cinder_utils.synchronized(cg_id)( self._soft_delete_cgsnapshot)( es_cg, seq_num) def _soft_delete_cgsnapshot(self, es_cg, snap_seq_num): """Mark a cgsnapshot as available for deletion from the backend. E-Series snapshots cannot be deleted out of order, as older snapshots in the snapshot group are dependent on the newer snapshots. A "soft delete" results in the cgsnapshot being removed from Cinder management, with the snapshot marked as available for deletion once all snapshots dependent on it are also deleted. :param es_cg: E-Series consistency group :param snap_seq_num: unique sequence number of the cgsnapshot :return an update to the snapshot index """ index = self._get_soft_delete_map() cg_ref = es_cg['id'] if cg_ref in index: bitset = na_utils.BitSet(int((index[cg_ref]))) else: bitset = na_utils.BitSet(0) seq_nums = ( set([snap['pitSequenceNumber'] for snap in self._client.get_consistency_group_snapshots(cg_ref)])) # Determine the relative index of the snapshot's sequence number for i, seq_num in enumerate(sorted(seq_nums)): if snap_seq_num == seq_num: bitset.set(i) break index_update = ( self._cleanup_cg_snapshots(cg_ref, seq_nums, bitset)) self._merge_soft_delete_changes(index_update, None) return None, None def _cleanup_cg_snapshots(self, cg_ref, seq_nums, bitset): """Delete cg snapshot images that are marked for removal The snapshot index tracks all snapshots that have been removed from Cinder, and are therefore available for deletion when this operation is possible. CG snapshots are tracked by unique sequence numbers that are associated with 1 or more snapshot images. The sequence numbers are tracked (relative to the 32 images allowed per group), within the snapshot index. This method will purge CG snapshots that have been marked as available for deletion within the backend persistent store. :param cg_ref: reference to an E-Series consistent group :param seq_nums: set of unique sequence numbers associated with the consistency group :param bitset: the bitset representing which sequence numbers are marked for deletion :return update for the snapshot index """ deleted = 0 # Order by their sequence number, from oldest to newest for i, seq_num in enumerate(sorted(seq_nums)): if bitset.is_set(i): self._client.delete_consistency_group_snapshot(cg_ref, seq_num) deleted += 1 else: # Snapshots must be deleted in order, so if the current # snapshot is not pending deletion, we don't want to # process any more break if deleted: # We need to update the bitset to reflect the fact that older # snapshots have been deleted, so snapshot relative indexes # have now been updated. bitset >>= deleted LOG.debug('Deleted %(count)s snapshot images from ' 'consistency group: %(grp)s.', {'count': deleted, 'grp': cg_ref}) # Update the index return {cg_ref: repr(bitset)} def create_consistencygroup(self, cinder_cg): """Define a consistency group.""" self._create_consistency_group(cinder_cg) return {'status': 'available'} def _create_consistency_group(self, cinder_cg): """Define a new consistency group on the E-Series backend""" name = utils.convert_uuid_to_es_fmt(cinder_cg['id']) return self._client.create_consistency_group(name) def _get_consistencygroup(self, cinder_cg): """Retrieve an E-Series consistency group""" name = utils.convert_uuid_to_es_fmt(cinder_cg['id']) return self._get_consistencygroup_by_name(name) def _get_consistencygroup_by_name(self, name): """Retrieve an E-Series consistency group by name""" for cg in self._client.list_consistency_groups(): if name == cg['name']: return cg raise exception.ConsistencyGroupNotFound(consistencygroup_id=name) def delete_consistencygroup(self, group, volumes): """Deletes a consistency group.""" volume_update = list() for volume in volumes: LOG.info(_LI('Deleting volume %s.'), volume['id']) volume_update.append({ 'status': 'deleted', 'id': volume['id'], }) self.delete_volume(volume) try: cg = self._get_consistencygroup(group) except exception.ConsistencyGroupNotFound: LOG.warning(_LW('Consistency group already deleted.')) else: self._client.delete_consistency_group(cg['id']) try: self._merge_soft_delete_changes(None, [cg['id']]) except (exception.NetAppDriverException, eseries_exc.WebServiceException): LOG.warning(_LW('Unable to remove CG from the deletion map.')) model_update = {'status': 'deleted'} return model_update, volume_update def _update_consistency_group_members(self, es_cg, add_volumes, remove_volumes): """Add or remove consistency group members :param es_cg: The E-Series consistency group :param add_volumes: A list of Cinder volumes to add to the consistency group :param remove_volumes: A list of Cinder volumes to remove from the consistency group :return None """ for volume in remove_volumes: es_vol = self._get_volume(volume['id']) LOG.info( _LI('Removing volume %(v)s from consistency group %(''cg)s.'), {'v': es_vol['label'], 'cg': es_cg['label']}) self._client.remove_consistency_group_member(es_vol['id'], es_cg['id']) for volume in add_volumes: es_vol = self._get_volume(volume['id']) LOG.info(_LI('Adding volume %(v)s to consistency group %(cg)s.'), {'v': es_vol['label'], 'cg': es_cg['label']}) self._client.add_consistency_group_member( es_vol['id'], es_cg['id']) def update_consistencygroup(self, group, add_volumes, remove_volumes): """Add or remove volumes from an existing consistency group""" cg = self._get_consistencygroup(group) self._update_consistency_group_members( cg, add_volumes, remove_volumes) return None, None, None def create_consistencygroup_from_src(self, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): """Define a consistency group based on an existing group Define a new consistency group from a source consistency group. If only a source_cg is provided, then clone each base volume and add it to a new consistency group. If a cgsnapshot is provided, clone each snapshot image to a new volume and add it to the cg. :param group: The new consistency group to define :param volumes: The volumes to add to the consistency group :param cgsnapshot: The cgsnapshot to base the group on :param snapshots: The list of snapshots on the source cg :param source_cg: The source consistency group :param source_vols: The volumes added to the source cg """ cg = self._create_consistency_group(group) if cgsnapshot: for vol, snap in zip(volumes, snapshots): image = self._get_snapshot(snap) self._create_volume_from_snapshot(vol, image) else: for vol, src in zip(volumes, source_vols): es_vol = self._get_volume(src['id']) es_snapshot = self._create_es_snapshot_for_clone(es_vol) try: self._create_volume_from_snapshot(vol, es_snapshot) finally: self._delete_es_snapshot(es_snapshot) self._update_consistency_group_members(cg, volumes, []) return None, None def _garbage_collect_tmp_vols(self): """Removes tmp vols with no snapshots.""" try: if not na_utils.set_safe_attr(self, 'clean_job_running', True): LOG.warning(_LW('Returning as clean tmp ' 'vol job already running.')) return for vol in self._client.list_volumes(): label = vol['label'] if (label.startswith('tmp-') and not self._is_volume_containing_snaps(label)): try: self._client.delete_volume(vol['volumeRef']) except exception.NetAppDriverException as e: LOG.debug("Error deleting vol with label %(label)s:" " %(error)s.", {'label': label, 'error': e}) finally: na_utils.set_safe_attr(self, 'clean_job_running', False) @cinder_utils.synchronized('manage_existing') def manage_existing(self, volume, existing_ref): """Brings an existing storage object under Cinder management.""" vol = self._get_existing_vol_with_manage_ref(existing_ref) label = utils.convert_uuid_to_es_fmt(volume['id']) if label == vol['label']: LOG.info(_LI("Volume with given ref %s need not be renamed during" " manage operation."), existing_ref) managed_vol = vol else: managed_vol = self._client.update_volume(vol['id'], label) LOG.info(_LI("Manage operation completed for volume with new label" " %(label)s and wwn %(wwn)s."), {'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]}) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ vol = self._get_existing_vol_with_manage_ref(existing_ref) return int(math.ceil(float(vol['capacity']) / units.Gi)) def _get_existing_vol_with_manage_ref(self, existing_ref): try: vol_id = existing_ref.get('source-name') or existing_ref.get( 'source-id') if vol_id is None: raise exception.InvalidInput(message='No valid identifier ' 'was available for the ' 'volume.') return self._client.list_volume(vol_id) except exception.InvalidInput: reason = _('Reference must contain either source-name' ' or source-id element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) except exception.VolumeNotFound: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_('Volume not found on configured storage pools.')) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. Logs a message to indicate the volume is no longer under Cinder's control. """ managed_vol = self._get_volume(volume['id']) LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn " "%(wwn)s."), {'label': managed_vol['label'], 'wwn': managed_vol[self.WORLDWIDENAME]}) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/iscsi_driver.py0000664000567000056710000001146612701406250026347 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc. All Rights Reserved. # Copyright (c) 2015 Alex Meade. All Rights Reserved. # Copyright (c) 2015 Rushil Chugh. All Rights Reserved. # Copyright (c) 2015 Navneet Singh. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp E-Series iSCSI storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.eseries import library from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) class NetAppEseriesISCSIDriver(driver.BaseVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD, driver.ConsistencyGroupVD): """NetApp E-Series iSCSI volume driver.""" DRIVER_NAME = 'NetApp_iSCSI_ESeries' def __init__(self, *args, **kwargs): super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs) na_utils.validate_instantiation(**kwargs) self.library = library.NetAppESeriesLibrary(self.DRIVER_NAME, 'iSCSI', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): return self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh) def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) def initialize_connection(self, volume, connector): return self.library.initialize_connection_iscsi(volume, connector) def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_iscsi(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup( group, add_volumes, remove_volumes) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot, snapshots, source_cg, source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/eseries/client.py0000664000567000056710000012642712701406250025144 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc # Copyright (c) 2014 Navneet Singh # Copyright (c) 2015 Alex Meade # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Yogesh Kshirsagar # Copyright (c) 2015 Michael Price # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client classes for web services. """ import copy import json import uuid from oslo_log import log as logging import requests from simplejson import scanner import six from six.moves import urllib from cinder import exception from cinder.i18n import _ from cinder.i18n import _LE import cinder.utils as cinder_utils from cinder.volume.drivers.netapp.eseries import exception as es_exception from cinder.volume.drivers.netapp.eseries import utils from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) class WebserviceClient(object): """Base client for NetApp Storage web services.""" def __init__(self, scheme, host, port, service_path, username, password, **kwargs): self._validate_params(scheme, host, port) self._create_endpoint(scheme, host, port, service_path) self._username = username self._password = password self._init_connection() def _validate_params(self, scheme, host, port): """Does some basic validation for web service params.""" if host is None or port is None or scheme is None: msg = _('One of the required inputs from host, ' 'port or scheme was not found.') raise exception.InvalidInput(reason=msg) if scheme not in ('http', 'https'): raise exception.InvalidInput(reason=_("Invalid transport type.")) def _create_endpoint(self, scheme, host, port, service_path): """Creates end point url for the service.""" netloc = '%s:%s' % (host, port) self._endpoint = urllib.parse.urlunparse((scheme, netloc, service_path, None, None, None)) def _init_connection(self): """Do client specific set up for session and connection pooling.""" self.conn = requests.Session() if self._username and self._password: self.conn.auth = (self._username, self._password) def invoke_service(self, method='GET', url=None, params=None, data=None, headers=None, timeout=None, verify=False): url = url or self._endpoint try: response = self.conn.request(method, url, params, data, headers=headers, timeout=timeout, verify=verify) # Catching error conditions other than the perceived ones. # Helps propagating only known exceptions back to the caller. except Exception as e: LOG.exception(_LE("Unexpected error while invoking web service." " Error - %s."), e) raise exception.NetAppDriverException( _("Invoking web service failed.")) return response class RestClient(WebserviceClient): """REST client specific to e-series storage service.""" ID = 'id' WWN = 'worldWideName' NAME = 'label' ASUP_VALID_VERSION = (1, 52, 9000, 3) # We need to check for both the release and the pre-release versions SSC_VALID_VERSIONS = ((1, 53, 9000, 1), (1, 53, 9010, 17)) REST_1_3_VERSION = (1, 53, 9000, 1) REST_1_4_VERSIONS = ((1, 54, 9000, 1), (1, 54, 9090, 0)) RESOURCE_PATHS = { 'volumes': '/storage-systems/{system-id}/volumes', 'volume': '/storage-systems/{system-id}/volumes/{object-id}', 'pool_operation_progress': '/storage-systems/{system-id}/storage-pools/{object-id}' '/action-progress', 'volume_expand': '/storage-systems/{system-id}/volumes/{object-id}/expand', 'thin_volume_expand': '/storage-systems/{system-id}/thin-volumes/{object-id}/expand', 'ssc_volumes': '/storage-systems/{system-id}/ssc/volumes', 'ssc_volume': '/storage-systems/{system-id}/ssc/volumes/{object-id}', 'snapshot_groups': '/storage-systems/{system-id}/snapshot-groups', 'snapshot_group': '/storage-systems/{system-id}/snapshot-groups/{object-id}', 'snapshot_volumes': '/storage-systems/{system-id}/snapshot-volumes', 'snapshot_volume': '/storage-systems/{system-id}/snapshot-volumes/{object-id}', 'snapshot_images': '/storage-systems/{system-id}/snapshot-images', 'snapshot_image': '/storage-systems/{system-id}/snapshot-images/{object-id}', 'cgroup': '/storage-systems/{system-id}/consistency-groups/{object-id}', 'cgroups': '/storage-systems/{system-id}/consistency-groups', 'cgroup_members': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/member-volumes', 'cgroup_member': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/member-volumes/{vol-id}', 'cgroup_snapshots': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/snapshots', 'cgroup_snapshot': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/snapshots/{seq-num}', 'cgroup_snapshots_by_seq': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/snapshots/{seq-num}', 'cgroup_cgsnap_view': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/views/{seq-num}', 'cgroup_cgsnap_views': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/views/', 'cgroup_snapshot_views': '/storage-systems/{system-id}/consistency-groups/{object-id}' '/views/{view-id}/views', 'persistent-stores': '/storage-systems/{' 'system-id}/persistent-records/', 'persistent-store': '/storage-systems/{' 'system-id}/persistent-records/{key}' } def __init__(self, scheme, host, port, service_path, username, password, **kwargs): super(RestClient, self).__init__(scheme, host, port, service_path, username, password, **kwargs) kwargs = kwargs or {} self._system_id = kwargs.get('system_id') self._content_type = kwargs.get('content_type') or 'json' def _init_features(self): """Sets up and initializes E-Series feature support map.""" self.features = na_utils.Features() self.api_operating_mode, self.api_version = self.get_eseries_api_info( verify=False) api_version_tuple = tuple(int(version) for version in self.api_version.split('.')) asup_api_valid_version = self._validate_version( self.ASUP_VALID_VERSION, api_version_tuple) rest_1_3_api_valid_version = self._validate_version( self.REST_1_3_VERSION, api_version_tuple) rest_1_4_api_valid_version = any( self._validate_version(valid_version, api_version_tuple) for valid_version in self.REST_1_4_VERSIONS) ssc_api_valid_version = any(self._validate_version(valid_version, api_version_tuple) for valid_version in self.SSC_VALID_VERSIONS) self.features.add_feature('AUTOSUPPORT', supported=asup_api_valid_version, min_version=self._version_tuple_to_str( self.ASUP_VALID_VERSION)) self.features.add_feature('SSC_API_V2', supported=ssc_api_valid_version, min_version=self._version_tuple_to_str( self.SSC_VALID_VERSIONS[0])) self.features.add_feature( 'REST_1_3_RELEASE', supported=rest_1_3_api_valid_version, min_version=self._version_tuple_to_str(self.REST_1_3_VERSION)) self.features.add_feature( 'REST_1_4_RELEASE', supported=rest_1_4_api_valid_version, min_version=self._version_tuple_to_str(self.REST_1_4_VERSIONS[0])) def _version_tuple_to_str(self, version): return ".".join([str(part) for part in version]) def _validate_version(self, version, actual_version): """Determine if version is newer than, or equal to the actual version The proxy version number is formatted as AA.BB.CCCC.DDDD A: Major version part 1 B: Major version part 2 C: Release version: 9000->Release, 9010->Pre-release, 9090->Integration D: Minor version Examples: 02.53.9000.0010 02.52.9010.0001 Note: The build version is actually 'newer' the lower the release (CCCC) number is. :param version: The version to validate :param actual_version: The running version of the Webservice :returns: True if the actual_version is equal or newer than the current running version, otherwise False """ major_1, major_2, release, minor = version actual_major_1, actual_major_2, actual_release, actual_minor = ( actual_version) # We need to invert the release number for it to work with this # comparison return (actual_major_1, actual_major_2, 10000 - actual_release, actual_minor) >= (major_1, major_2, 10000 - release, minor) def set_system_id(self, system_id): """Set the storage system id.""" self._system_id = system_id def get_system_id(self): """Get the storage system id.""" return getattr(self, '_system_id', None) def _get_resource_url(self, path, use_system=True, **kwargs): """Creates end point url for rest service.""" kwargs = kwargs or {} if use_system: if not self._system_id: raise exception.NotFound(_('Storage system id not set.')) kwargs['system-id'] = self._system_id path = path.format(**kwargs) if not self._endpoint.endswith('/'): self._endpoint = '%s/' % self._endpoint return urllib.parse.urljoin(self._endpoint, path.lstrip('/')) def _invoke(self, method, path, data=None, use_system=True, timeout=None, verify=False, **kwargs): """Invokes end point for resource on path.""" url = self._get_resource_url(path, use_system, **kwargs) if self._content_type == 'json': headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} if cinder_utils.TRACE_API: self._log_http_request(method, url, headers, data) data = json.dumps(data) if data else None res = self.invoke_service(method, url, data=data, headers=headers, timeout=timeout, verify=verify) try: res_dict = res.json() if res.text else None # This should only occur if we expected JSON, but were sent # something else except scanner.JSONDecodeError: res_dict = None if cinder_utils.TRACE_API: self._log_http_response(res.status_code, dict(res.headers), res_dict) self._eval_response(res) return res_dict else: raise exception.NetAppDriverException( _("Content type not supported.")) def _to_pretty_dict_string(self, data): """Convert specified dict to pretty printed string.""" return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')) def _log_http_request(self, verb, url, headers, body): scrubbed_body = copy.deepcopy(body) if scrubbed_body: if 'password' in scrubbed_body: scrubbed_body['password'] = "****" if 'storedPassword' in scrubbed_body: scrubbed_body['storedPassword'] = "****" params = {'verb': verb, 'path': url, 'body': self._to_pretty_dict_string(scrubbed_body) or "", 'headers': self._to_pretty_dict_string(headers)} LOG.debug("Invoking ESeries Rest API, Request:\n" "HTTP Verb: %(verb)s\n" "URL Path: %(path)s\n" "HTTP Headers:\n" "%(headers)s\n" "Body:\n" "%(body)s\n", (params)) def _log_http_response(self, status, headers, body): params = {'status': status, 'body': self._to_pretty_dict_string(body) or "", 'headers': self._to_pretty_dict_string(headers)} LOG.debug("ESeries Rest API, Response:\n" "HTTP Status Code: %(status)s\n" "HTTP Headers:\n" "%(headers)s\n" "Body:\n" "%(body)s\n", (params)) def _eval_response(self, response): """Evaluates response before passing result to invoker.""" status_code = int(response.status_code) # codes >= 300 are not ok and to be treated as errors if status_code >= 300: # Response code 422 returns error code and message if status_code == 422: msg = _("Response error - %s.") % response.text json_response = response.json() if json_response is not None: ret_code = json_response.get('retcode', '') if ret_code == '30' or ret_code == 'authFailPassword': msg = _("The storage array password for %s is " "incorrect, please update the configured " "password.") % self._system_id elif status_code == 424: msg = _("Response error - The storage-system is offline.") else: msg = _("Response error code - %s.") % status_code raise es_exception.WebServiceException(msg, status_code=status_code) def _get_volume_api_path(self, path_key): """Retrieve the correct API path based on API availability :param path_key: The volume API to request (volume or volumes) :raise KeyError: If the path_key is not valid """ if self.features.SSC_API_V2: path_key = 'ssc_' + path_key return self.RESOURCE_PATHS[path_key] def create_volume(self, pool, label, size, unit='gb', seg_size=0, read_cache=None, write_cache=None, flash_cache=None, data_assurance=None, thin_provision=False): """Creates a volume on array with the configured attributes Note: if read_cache, write_cache, flash_cache, or data_assurance are not provided, the default will be utilized by the Webservice. :param pool: The pool unique identifier :param label: The unqiue label for the volume :param size: The capacity in units :param unit: The unit for capacity :param seg_size: The segment size for the volume, expressed in KB. Default will allow the Webservice to choose. :param read_cache: If true, enable read caching, if false, explicitly disable it. :param write_cache: If true, enable write caching, if false, explicitly disable it. :param flash_cache: If true, add the volume to a Flash Cache :param data_assurance: If true, enable the Data Assurance capability :returns: The created volume """ # Utilize the new API if it is available if self.features.SSC_API_V2: path = "/storage-systems/{system-id}/ssc/volumes" data = {'poolId': pool, 'name': label, 'sizeUnit': unit, 'size': int(size), 'dataAssuranceEnable': data_assurance, 'flashCacheEnable': flash_cache, 'readCacheEnable': read_cache, 'writeCacheEnable': write_cache, 'thinProvision': thin_provision} # Use the old API else: # Determine if there are were extra specs provided that are not # supported extra_specs = [read_cache, write_cache] unsupported_spec = any([spec is not None for spec in extra_specs]) if(unsupported_spec): msg = _("E-series proxy API version %(current_version)s does " "not support full set of SSC extra specs. The proxy" " version must be at at least %(min_version)s.") min_version = self.features.SSC_API_V2.minimum_version raise exception.NetAppDriverException(msg % {'current_version': self.api_version, 'min_version': min_version}) path = "/storage-systems/{system-id}/volumes" data = {'poolId': pool, 'name': label, 'sizeUnit': unit, 'size': int(size), 'segSize': seg_size} return self._invoke('POST', path, data) def delete_volume(self, object_id): """Deletes given volume from array.""" if self.features.SSC_API_V2: path = self.RESOURCE_PATHS.get('ssc_volume') else: path = self.RESOURCE_PATHS.get('volume') return self._invoke('DELETE', path, **{'object-id': object_id}) def list_volumes(self): """Lists all volumes in storage array.""" if self.features.SSC_API_V2: path = self.RESOURCE_PATHS.get('ssc_volumes') else: path = self.RESOURCE_PATHS.get('volumes') return self._invoke('GET', path) def list_volume(self, object_id): """Retrieve the given volume from array. :param object_id: The volume id, label, or wwn :returns: The volume identified by object_id :raise: VolumeNotFound if the volume could not be found """ if self.features.SSC_API_V2: return self._list_volume_v2(object_id) # The new API is not available, else: # Search for the volume with label, id, or wwn. return self._list_volume_v1(object_id) def _list_volume_v1(self, object_id): # Search for the volume with label, id, or wwn. for vol in self.list_volumes(): if (object_id == vol.get(self.NAME) or object_id == vol.get( self.WWN) or object_id == vol.get(self.ID)): return vol # The volume could not be found raise exception.VolumeNotFound(volume_id=object_id) def _list_volume_v2(self, object_id): path = self.RESOURCE_PATHS.get('ssc_volume') try: return self._invoke('GET', path, **{'object-id': object_id}) except es_exception.WebServiceException as e: if 404 == e.status_code: raise exception.VolumeNotFound(volume_id=object_id) else: raise def update_volume(self, object_id, label): """Renames given volume on array.""" if self.features.SSC_API_V2: path = self.RESOURCE_PATHS.get('ssc_volume') else: path = self.RESOURCE_PATHS.get('volume') data = {'name': label} return self._invoke('POST', path, data, **{'object-id': object_id}) def create_consistency_group(self, name, warn_at_percent_full=75, rollback_priority='medium', full_policy='failbasewrites'): """Define a new consistency group""" path = self.RESOURCE_PATHS.get('cgroups') data = { 'name': name, 'fullWarnThresholdPercent': warn_at_percent_full, 'repositoryFullPolicy': full_policy, # A non-zero threshold enables auto-deletion 'autoDeleteThreshold': 0, 'rollbackPriority': rollback_priority, } return self._invoke('POST', path, data) def get_consistency_group(self, object_id): """Retrieve the consistency group identified by object_id""" path = self.RESOURCE_PATHS.get('cgroup') return self._invoke('GET', path, **{'object-id': object_id}) def list_consistency_groups(self): """Retrieve all consistency groups defined on the array""" path = self.RESOURCE_PATHS.get('cgroups') return self._invoke('GET', path) def delete_consistency_group(self, object_id): path = self.RESOURCE_PATHS.get('cgroup') self._invoke('DELETE', path, **{'object-id': object_id}) def add_consistency_group_member(self, volume_id, cg_id, repo_percent=20.0): """Add a volume to a consistency group :param volume_id the eseries volume id :param cg_id: the eseries cg id :param repo_percent: percentage capacity of the volume to use for capacity of the copy-on-write repository """ path = self.RESOURCE_PATHS.get('cgroup_members') data = {'volumeId': volume_id, 'repositoryPercent': repo_percent} return self._invoke('POST', path, data, **{'object-id': cg_id}) def remove_consistency_group_member(self, volume_id, cg_id): """Remove a volume from a consistency group""" path = self.RESOURCE_PATHS.get('cgroup_member') self._invoke('DELETE', path, **{'object-id': cg_id, 'vol-id': volume_id}) def create_consistency_group_snapshot(self, cg_id): """Define a consistency group snapshot""" path = self.RESOURCE_PATHS.get('cgroup_snapshots') return self._invoke('POST', path, **{'object-id': cg_id}) def delete_consistency_group_snapshot(self, cg_id, seq_num): """Define a consistency group snapshot""" path = self.RESOURCE_PATHS.get('cgroup_snapshot') return self._invoke('DELETE', path, **{'object-id': cg_id, 'seq-num': seq_num}) def get_consistency_group_snapshots(self, cg_id): """Retrieve all snapshots defined for a consistency group""" path = self.RESOURCE_PATHS.get('cgroup_snapshots') return self._invoke('GET', path, **{'object-id': cg_id}) def create_cg_snapshot_view(self, cg_id, name, snap_id): """Define a snapshot view for the cgsnapshot In order to define a snapshot view for a snapshot defined under a consistency group, the view must be defined at the cgsnapshot level. :param cg_id: E-Series cg identifier :param name: the label for the view :param snap_id: E-Series snapshot view to locate :raise NetAppDriverException: if the snapshot view cannot be located for the snapshot identified by snap_id :return snapshot view for snapshot identified by snap_id """ path = self.RESOURCE_PATHS.get('cgroup_cgsnap_views') data = { 'name': name, 'accessMode': 'readOnly', # Only define a view for this snapshot 'pitId': snap_id, } # Define a view for the cgsnapshot cgsnapshot_view = self._invoke( 'POST', path, data, **{'object-id': cg_id}) # Retrieve the snapshot views associated with our cgsnapshot view views = self.list_cg_snapshot_views(cg_id, cgsnapshot_view[ 'cgViewRef']) # Find the snapshot view defined for our snapshot for view in views: if view['basePIT'] == snap_id: return view else: try: self.delete_cg_snapshot_view(cg_id, cgsnapshot_view['id']) finally: raise exception.NetAppDriverException( 'Unable to create snapshot view.') def list_cg_snapshot_views(self, cg_id, view_id): path = self.RESOURCE_PATHS.get('cgroup_snapshot_views') return self._invoke('GET', path, **{'object-id': cg_id, 'view-id': view_id}) def delete_cg_snapshot_view(self, cg_id, view_id): path = self.RESOURCE_PATHS.get('cgroup_snap_view') return self._invoke('DELETE', path, **{'object-id': cg_id, 'view-id': view_id}) def get_pool_operation_progress(self, object_id): """Retrieve the progress long-running operations on a storage pool Example: [ { "volumeRef": "3232....", # Volume being referenced "progressPercentage": 0, # Approxmate percent complete "estimatedTimeToCompletion": 0, # ETA in minutes "currentAction": "none" # Current volume action } ... ] :param object_id: A pool id :returns: A dict representing the action progress """ path = self.RESOURCE_PATHS.get('pool_operation_progress') return self._invoke('GET', path, **{'object-id': object_id}) def expand_volume(self, object_id, new_capacity, thin_provisioned, capacity_unit='gb'): """Increase the capacity of a volume""" if thin_provisioned: path = self.RESOURCE_PATHS.get('thin_volume_expand') data = {'newVirtualSize': new_capacity, 'sizeUnit': capacity_unit, 'newRepositorySize': new_capacity} return self._invoke('POST', path, data, **{'object-id': object_id}) else: path = self.RESOURCE_PATHS.get('volume_expand') data = {'expansionSize': new_capacity, 'sizeUnit': capacity_unit} return self._invoke('POST', path, data, **{'object-id': object_id}) def get_volume_mappings(self): """Creates volume mapping on array.""" path = "/storage-systems/{system-id}/volume-mappings" return self._invoke('GET', path) def get_volume_mappings_for_volume(self, volume): """Gets all host mappings for given volume from array.""" mappings = self.get_volume_mappings() or [] return [x for x in mappings if x.get('volumeRef') == volume['volumeRef']] def get_volume_mappings_for_host(self, host_ref): """Gets all volume mappings for given host from array.""" mappings = self.get_volume_mappings() or [] return [x for x in mappings if x.get('mapRef') == host_ref] def get_volume_mappings_for_host_group(self, hg_ref): """Gets all volume mappings for given host group from array.""" mappings = self.get_volume_mappings() or [] return [x for x in mappings if x.get('mapRef') == hg_ref] def create_volume_mapping(self, object_id, target_id, lun): """Creates volume mapping on array.""" path = "/storage-systems/{system-id}/volume-mappings" data = {'mappableObjectId': object_id, 'targetId': target_id, 'lun': lun} return self._invoke('POST', path, data) def delete_volume_mapping(self, map_object_id): """Deletes given volume mapping from array.""" path = "/storage-systems/{system-id}/volume-mappings/{object-id}" return self._invoke('DELETE', path, **{'object-id': map_object_id}) def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id): """Moves a map from one host/host_group object to another.""" path = "/storage-systems/{system-id}/symbol/moveLUNMapping" data = {'lunMappingRef': map_ref, 'lun': int(lun_id), 'mapRef': to_ref} return_code = self._invoke('POST', path, data) if return_code == 'ok': return {'lun': lun_id} msg = _("Failed to move LUN mapping. Return code: %s") % return_code raise exception.NetAppDriverException(msg) def list_hardware_inventory(self): """Lists objects in the hardware inventory.""" path = "/storage-systems/{system-id}/hardware-inventory" return self._invoke('GET', path) def list_target_wwpns(self): """Lists the world-wide port names of the target.""" inventory = self.list_hardware_inventory() fc_ports = inventory.get("fibrePorts", []) wwpns = [port['portName'] for port in fc_ports] return wwpns def create_host_group(self, label): """Creates a host group on the array.""" path = "/storage-systems/{system-id}/host-groups" data = {'name': label} return self._invoke('POST', path, data) def get_host_group(self, host_group_ref): """Gets a single host group from the array.""" path = "/storage-systems/{system-id}/host-groups/{object-id}" try: return self._invoke('GET', path, **{'object-id': host_group_ref}) except exception.NetAppDriverException: raise exception.NotFound(_("Host group with ref %s not found") % host_group_ref) def get_host_group_by_name(self, name): """Gets a single host group by name from the array.""" host_groups = self.list_host_groups() matching = [host_group for host_group in host_groups if host_group['label'] == name] if len(matching): return matching[0] raise exception.NotFound(_("Host group with name %s not found") % name) def list_host_groups(self): """Lists host groups on the array.""" path = "/storage-systems/{system-id}/host-groups" return self._invoke('GET', path) def list_hosts(self): """Lists host objects in the system.""" path = "/storage-systems/{system-id}/hosts" return self._invoke('GET', path) def create_host(self, label, host_type, ports=None, group_id=None): """Creates host on array.""" path = "/storage-systems/{system-id}/hosts" data = {'name': label, 'hostType': host_type} data.setdefault('groupId', group_id if group_id else None) data.setdefault('ports', ports if ports else None) return self._invoke('POST', path, data) def create_host_with_ports(self, label, host_type, port_ids, port_type='iscsi', group_id=None): """Creates host on array with given port information.""" if port_type == 'fc': port_ids = [six.text_type(wwpn).replace(':', '') for wwpn in port_ids] ports = [] for port_id in port_ids: port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) port = {'type': port_type, 'port': port_id, 'label': port_label} ports.append(port) return self.create_host(label, host_type, ports, group_id) def update_host(self, host_ref, data): """Updates host type for a given host.""" path = "/storage-systems/{system-id}/hosts/{object-id}" return self._invoke('POST', path, data, **{'object-id': host_ref}) def get_host(self, host_ref): """Gets a single host from the array.""" path = "/storage-systems/{system-id}/hosts/{object-id}" return self._invoke('GET', path, **{'object-id': host_ref}) def update_host_type(self, host_ref, host_type): """Updates host type for a given host.""" data = {'hostType': host_type} return self.update_host(host_ref, data) def set_host_group_for_host(self, host_ref, host_group_ref=utils.NULL_REF): """Sets or clears which host group a host is in.""" data = {'groupId': host_group_ref} self.update_host(host_ref, data) def list_host_types(self): """Lists host types in storage system.""" path = "/storage-systems/{system-id}/host-types" return self._invoke('GET', path) def list_snapshot_groups(self): """Lists snapshot groups.""" path = self.RESOURCE_PATHS['snapshot_groups'] return self._invoke('GET', path) def list_snapshot_group(self, object_id): """Retrieve given snapshot group from the array.""" path = self.RESOURCE_PATHS['snapshot_group'] return self._invoke('GET', path, **{'object-id': object_id}) def create_snapshot_group(self, label, object_id, storage_pool_id=None, repo_percent=99, warn_thres=99, auto_del_limit=0, full_policy='failbasewrites'): """Creates snapshot group on array.""" path = self.RESOURCE_PATHS['snapshot_groups'] data = {'baseMappableObjectId': object_id, 'name': label, 'storagePoolId': storage_pool_id, 'repositoryPercentage': repo_percent, 'warningThreshold': warn_thres, 'autoDeleteLimit': auto_del_limit, 'fullPolicy': full_policy} return self._invoke('POST', path, data) def update_snapshot_group(self, group_id, label): """Modify a snapshot group on the array.""" path = self.RESOURCE_PATHS['snapshot_group'] data = {'name': label} return self._invoke('POST', path, data, **{'object-id': group_id}) def delete_snapshot_group(self, object_id): """Deletes given snapshot group from array.""" path = self.RESOURCE_PATHS['snapshot_group'] return self._invoke('DELETE', path, **{'object-id': object_id}) def create_snapshot_image(self, group_id): """Creates snapshot image in snapshot group.""" path = self.RESOURCE_PATHS['snapshot_images'] data = {'groupId': group_id} return self._invoke('POST', path, data) def delete_snapshot_image(self, object_id): """Deletes given snapshot image in snapshot group.""" path = self.RESOURCE_PATHS['snapshot_image'] return self._invoke('DELETE', path, **{'object-id': object_id}) def list_snapshot_image(self, object_id): """Retrieve given snapshot image from the array.""" path = self.RESOURCE_PATHS['snapshot_image'] return self._invoke('GET', path, **{'object-id': object_id}) def list_snapshot_images(self): """Lists snapshot images.""" path = self.RESOURCE_PATHS['snapshot_images'] return self._invoke('GET', path) def create_snapshot_volume(self, image_id, label, base_object_id, storage_pool_id=None, repo_percent=99, full_thres=99, view_mode='readOnly'): """Creates snapshot volume.""" path = self.RESOURCE_PATHS['snapshot_volumes'] data = {'snapshotImageId': image_id, 'fullThreshold': full_thres, 'storagePoolId': storage_pool_id, 'name': label, 'viewMode': view_mode, 'repositoryPercentage': repo_percent, 'baseMappableObjectId': base_object_id, 'repositoryPoolId': storage_pool_id} return self._invoke('POST', path, data) def update_snapshot_volume(self, snap_vol_id, label=None, full_thres=None): """Modify an existing snapshot volume.""" path = self.RESOURCE_PATHS['snapshot_volume'] data = {'name': label, 'fullThreshold': full_thres} return self._invoke('POST', path, data, **{'object-id': snap_vol_id}) def delete_snapshot_volume(self, object_id): """Deletes given snapshot volume.""" path = self.RESOURCE_PATHS['snapshot_volume'] return self._invoke('DELETE', path, **{'object-id': object_id}) def list_snapshot_volumes(self): """Lists snapshot volumes/views defined on the array.""" path = self.RESOURCE_PATHS['snapshot_volumes'] return self._invoke('GET', path) def list_ssc_storage_pools(self): """Lists pools and their service quality defined on the array.""" path = "/storage-systems/{system-id}/ssc/pools" return self._invoke('GET', path) def get_ssc_storage_pool(self, volume_group_ref): """Get storage pool service quality information from the array.""" path = "/storage-systems/{system-id}/ssc/pools/{object-id}" return self._invoke('GET', path, **{'object-id': volume_group_ref}) def list_storage_pools(self): """Lists storage pools in the array.""" path = "/storage-systems/{system-id}/storage-pools" return self._invoke('GET', path) def get_storage_pool(self, volume_group_ref): """Get storage pool information from the array.""" path = "/storage-systems/{system-id}/storage-pools/{object-id}" return self._invoke('GET', path, **{'object-id': volume_group_ref}) def list_drives(self): """Lists drives in the array.""" path = "/storage-systems/{system-id}/drives" return self._invoke('GET', path) def list_storage_systems(self): """Lists managed storage systems registered with web service.""" path = "/storage-systems" return self._invoke('GET', path, use_system=False) def list_storage_system(self): """List current storage system registered with web service.""" path = "/storage-systems/{system-id}" return self._invoke('GET', path) def register_storage_system(self, controller_addresses, password=None, wwn=None): """Registers storage system with web service.""" path = "/storage-systems" data = {'controllerAddresses': controller_addresses} data.setdefault('wwn', wwn if wwn else None) data.setdefault('password', password if password else None) return self._invoke('POST', path, data, use_system=False) def update_stored_system_password(self, password): """Update array password stored on web service.""" path = "/storage-systems/{system-id}" data = {'storedPassword': password} return self._invoke('POST', path, data) def create_volume_copy_job(self, src_id, tgt_id, priority='priority4', tgt_wrt_protected='true'): """Creates a volume copy job.""" path = "/storage-systems/{system-id}/volume-copy-jobs" data = {'sourceId': src_id, 'targetId': tgt_id, 'copyPriority': priority, 'targetWriteProtected': tgt_wrt_protected} return self._invoke('POST', path, data) def control_volume_copy_job(self, obj_id, control='start'): """Controls a volume copy job.""" path = ("/storage-systems/{system-id}/volume-copy-jobs-control" "/{object-id}?control={String}") return self._invoke('PUT', path, **{'object-id': obj_id, 'String': control}) def list_vol_copy_job(self, object_id): """List volume copy job.""" path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}" return self._invoke('GET', path, **{'object-id': object_id}) def delete_vol_copy_job(self, object_id): """Delete volume copy job.""" path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}" return self._invoke('DELETE', path, **{'object-id': object_id}) def add_autosupport_data(self, key, data): """Register driver statistics via autosupport log.""" path = ('/key-values/%s' % key) self._invoke('POST', path, json.dumps(data)) def set_counter(self, key, value): path = ('/counters/%s/setCounter?value=%d' % (key, value)) self._invoke('POST', path) def get_asup_info(self): """Returns a dictionary of relevant autosupport information. Currently returned fields are: model -- E-series model name serial_numbers -- Serial number for each controller firmware_version -- Version of active firmware chassis_sn -- Serial number for whole chassis """ asup_info = {} controllers = self.list_hardware_inventory().get('controllers') if controllers: asup_info['model'] = controllers[0].get('modelName', 'unknown') serial_numbers = [value['serialNumber'].rstrip() for __, value in enumerate(controllers)] serial_numbers.sort() for index, value in enumerate(serial_numbers): if not value: serial_numbers[index] = 'unknown' asup_info['serial_numbers'] = serial_numbers else: asup_info['model'] = 'unknown' asup_info['serial_numbers'] = ['unknown', 'unknown'] system_info = self.list_storage_system() if system_info: asup_info['firmware_version'] = system_info['fwVersion'] asup_info['chassis_sn'] = system_info['chassisSerialNumber'] else: asup_info['firmware_version'] = 'unknown' asup_info['chassis_sn'] = 'unknown' return asup_info def get_eseries_api_info(self, verify=False): """Get E-Series API information from the array.""" api_operating_mode = 'embedded' path = 'devmgr/utils/about' headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} url = self._get_resource_url(path, True).replace( '/devmgr/v2', '', 1) result = self.invoke_service(method='GET', url=url, headers=headers, verify=verify) about_response_dict = result.json() mode_is_proxy = about_response_dict['runningAsProxy'] if mode_is_proxy: api_operating_mode = 'proxy' return api_operating_mode, about_response_dict['version'] def list_backend_store(self, key): """Retrieve data by key from the the persistent store on the backend. Example response: {"key": "cinder-snapshots", "value": "[]"} :param key: the persistent store to retrieve :return a json body representing the value of the store, or an empty json object """ path = self.RESOURCE_PATHS.get('persistent-store') try: resp = self._invoke('GET', path, **{'key': key}) except exception.NetAppDriverException: return dict() else: data = resp['value'] if data: return json.loads(data) return dict() def save_backend_store(self, key, store_data): """Save a json value to the persistent storage on the backend. The storage backend provides a small amount of persistent storage that we can utilize for storing driver information. :param key: The key utilized for storing/retrieving the data :param store_data: a python data structure that will be stored as a json value """ path = self.RESOURCE_PATHS.get('persistent-stores') store_data = json.dumps(store_data, separators=(',', ':')) data = { 'key': key, 'value': store_data } self._invoke('POST', path, data) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/0000775000567000056710000000000012701406543023621 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py0000664000567000056710000001201512701406250026446 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (C-mode) iSCSI storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_cmode LOG = logging.getLogger(__name__) class NetAppCmodeISCSIDriver(driver.BaseVD, driver.ConsistencyGroupVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD): """NetApp C-mode iSCSI volume driver.""" DRIVER_NAME = 'NetApp_iSCSI_Cluster_direct' def __init__(self, *args, **kwargs): super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( self.DRIVER_NAME, 'iSCSI', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) def initialize_connection(self, volume, connector): return self.library.initialize_connection_iscsi(volume, connector) def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_iscsi(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup(group, add_volumes=None, remove_volumes=None) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py0000664000567000056710000007015412701406250026132 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp NFS storage. """ import os import uuid from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) QOS_CLEANUP_INTERVAL_SECONDS = 60 @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): """NetApp NFS driver for Data ONTAP (Cluster-mode).""" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] def __init__(self, *args, **kwargs): super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) def do_setup(self, context): """Do the customized set up on client for cluster mode.""" super(NetAppCmodeNfsDriver, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) self.vserver = self.configuration.netapp_vserver self.zapi_client = client_cmode.Client( transport_type=self.configuration.netapp_transport_type, username=self.configuration.netapp_login, password=self.configuration.netapp_password, hostname=self.configuration.netapp_server_hostname, port=self.configuration.netapp_server_port, vserver=self.vserver) self.ssc_enabled = True self.ssc_vols = None self.stale_vols = set() self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" super(NetAppCmodeNfsDriver, self).check_for_setup_error() ssc_cmode.check_ssc_api_permissions(self.zapi_client) self._start_periodic_tasks() def _do_qos_for_volume(self, volume, extra_specs, cleanup=True): try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume, extra_specs) self.zapi_client.provision_qos_policy_group(qos_policy_group_info) self._set_qos_policy_group_on_volume(volume, qos_policy_group_info) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Setting QoS for %s failed"), volume['id']) if cleanup: LOG.debug("Cleaning volume %s", volume['id']) self._cleanup_volume_on_failure(volume) def _start_periodic_tasks(self): # Start the task that harvests soft-deleted QoS policy groups. harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall( self.zapi_client.remove_unused_qos_policy_groups) harvest_qos_periodic_task.start( interval=QOS_CLEANUP_INTERVAL_SECONDS, initial_delay=QOS_CLEANUP_INTERVAL_SECONDS) def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info): if qos_policy_group_info is None: return qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info) if qos_policy_group_name is None: return target_path = '%s' % (volume['name']) share = volume_utils.extract_host(volume['host'], level='pool') export_path = share.split(':')[1] flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver, export_path) self.zapi_client.file_assign_qos(flex_vol_name, qos_policy_group_name, target_path) def _check_volume_type(self, volume, share, file_name, extra_specs): """Match volume type for share file.""" if not self._is_share_vol_type_match(volume, share): raise exception.ManageExistingVolumeTypeMismatch( reason=(_("Volume type does not match for share %s."), share)) def _clone_backing_file_for_volume(self, volume_name, clone_name, volume_id, share=None): """Clone backing file for Cinder volume.""" (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share) self.zapi_client.clone_file(exp_volume, volume_name, clone_name, vserver) share = share if share else self._get_provider_location(volume_id) self._post_prov_deprov_in_ssc(share) def _get_vserver_and_exp_vol(self, volume_id=None, share=None): """Gets the vserver and export volume for share.""" (host_ip, export_path) = self._get_export_ip_path(volume_id, share) ifs = self.zapi_client.get_if_info_by_ip(host_ip) vserver = ifs[0].get_child_content('vserver') exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver, export_path) return vserver, exp_volume def _update_volume_stats(self): """Retrieve stats info from vserver.""" self._ensure_shares_mounted() sync = True if self.ssc_vols is None else False ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection, self.vserver, synchronous=sync) LOG.debug('Updating volume stats') data = {} netapp_backend = 'NetApp_NFS_Cluster_direct' backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or netapp_backend data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = 'nfs' data['pools'] = self._get_pool_stats( filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function()) data['sparse_copy_volume'] = True self._spawn_clean_cache_job() self.zapi_client.provide_ems(self, netapp_backend, self._app_version) self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (i.e. NFS share) stats info from SSC volumes.""" self.perf_library.update_performance_cache( self.ssc_vols.get('all', [])) pools = [] for nfs_share in self._mounted_shares: capacity = self._get_share_capacity_info(nfs_share) pool = dict() pool['pool_name'] = nfs_share pool['QoS_support'] = True pool.update(capacity) # add SSC content if available vol = self._get_vol_for_share(nfs_share) if vol and self.ssc_vols: pool['netapp_raid_type'] = vol.aggr['raid_type'] pool['netapp_disk_type'] = vol.aggr['disk_type'] mirrored = vol in self.ssc_vols['mirrored'] pool['netapp_mirrored'] = six.text_type(mirrored).lower() pool['netapp_unmirrored'] = six.text_type(not mirrored).lower() dedup = vol in self.ssc_vols['dedup'] pool['netapp_dedup'] = six.text_type(dedup).lower() pool['netapp_nodedup'] = six.text_type(not dedup).lower() compression = vol in self.ssc_vols['compression'] pool['netapp_compression'] = six.text_type(compression).lower() pool['netapp_nocompression'] = six.text_type( not compression).lower() flexvol_thin = vol in self.ssc_vols['thin'] pool['netapp_thin_provisioned'] = six.text_type( flexvol_thin).lower() pool['netapp_thick_provisioned'] = six.text_type( not flexvol_thin).lower() thick = (not flexvol_thin and not self.configuration.nfs_sparsed_volumes) pool['thick_provisioning_support'] = thick pool['thin_provisioning_support'] = not thick utilization = self.perf_library.get_node_utilization_for_pool( vol.id['name']) pool['utilization'] = na_utils.round_down(utilization, '0.01') pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function pools.append(pool) return pools @utils.synchronized('update_stale') def _update_stale_vols(self, volume=None, reset=False): """Populates stale vols with vol and returns set copy.""" if volume: self.stale_vols.add(volume) set_copy = self.stale_vols.copy() if reset: self.stale_vols.clear() return set_copy @utils.synchronized("refresh_ssc_vols") def refresh_ssc_vols(self, vols): """Refreshes ssc_vols with latest entries.""" if not self._mounted_shares: LOG.warning(_LW("No shares found hence skipping ssc refresh.")) return mnt_share_vols = set() vs_ifs = self.zapi_client.get_vserver_ips(self.vserver) for vol in vols['all']: for sh in self._mounted_shares: host = sh.split(':')[0] junction = sh.split(':')[1] ip = na_utils.resolve_hostname(host) if (self._ip_in_ifs(ip, vs_ifs) and junction == vol.id['junction_path']): mnt_share_vols.add(vol) vol.export['path'] = sh break for key in vols.keys(): vols[key] = vols[key] & mnt_share_vols self.ssc_vols = vols def _ip_in_ifs(self, ip, api_ifs): """Checks if ip is listed for ifs in API format.""" if api_ifs is None: return False for ifc in api_ifs: ifc_ip = ifc.get_child_content("address") if ifc_ip == ip: return True return False def _shortlist_del_eligible_files(self, share, old_files): """Prepares list of eligible files to be deleted from cache.""" file_list = [] (vserver, exp_volume) = self._get_vserver_and_exp_vol( volume_id=None, share=share) for old_file in old_files: path = '/vol/%s/%s' % (exp_volume, old_file) u_bytes = self.zapi_client.get_file_usage(path, vserver) file_list.append((old_file, u_bytes)) LOG.debug('Shortlisted files eligible for deletion: %s', file_list) return file_list def _share_match_for_ip(self, ip, shares): """Returns the share that is served by ip. Multiple shares can have same dir path but can be served using different ips. It finds the share which is served by ip on same nfs server. """ ip_vserver = self._get_vserver_for_ip(ip) if ip_vserver and shares: for share in shares: ip_sh = share.split(':')[0] sh_vserver = self._get_vserver_for_ip(ip_sh) if sh_vserver == ip_vserver: LOG.debug('Share match found for ip %s', ip) return share LOG.debug('No share match found for ip %s', ip) return None def _get_vserver_for_ip(self, ip): """Get vserver for the mentioned ip.""" try: ifs = self.zapi_client.get_if_info_by_ip(ip) vserver = ifs[0].get_child_content('vserver') return vserver except Exception: return None def _get_vol_for_share(self, nfs_share): """Gets the ssc vol with given share.""" if self.ssc_vols: for vol in self.ssc_vols['all']: if vol.export['path'] == nfs_share: return vol return None def _is_share_clone_compatible(self, volume, share): """Checks if share is compatible with volume to host its clone.""" thin = self._is_volume_thin_provisioned(volume) compatible = self._share_has_space_for_clone(share, volume['size'], thin) if compatible and self.ssc_enabled: matched = self._is_share_vol_type_match(volume, share) compatible = compatible and matched return compatible def _is_volume_thin_provisioned(self, volume): if self.configuration.nfs_sparsed_volumes: return True if self.ssc_enabled and volume in self.ssc_vols['thin']: return True return False def _is_share_vol_type_match(self, volume, share): """Checks if share matches volume type.""" netapp_vol = self._get_vol_for_share(share) LOG.debug("Found volume %(vol)s for share %(share)s.", {'vol': netapp_vol, 'share': share}) extra_specs = na_utils.get_volume_extra_specs(volume) vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) return netapp_vol in vols def delete_volume(self, volume): """Deletes a logical volume.""" share = volume['provider_location'] self._delete_backing_file_for_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info) except Exception: # Don't blow up here if something went wrong de-provisioning the # QoS policy for the volume. pass self._post_prov_deprov_in_ssc(share) def _delete_backing_file_for_volume(self, volume): """Deletes file on nfs share that backs a cinder volume.""" try: LOG.debug('Deleting backing file for volume %s.', volume['id']) self._delete_volume_on_filer(volume) except Exception: LOG.exception(_LE('Could not do delete of volume %s on filer, ' 'falling back to exec of "rm" command.'), volume['id']) try: super(NetAppCmodeNfsDriver, self).delete_volume(volume) except Exception: LOG.exception(_LE('Exec of "rm" command on backing file for ' '%s was unsuccessful.'), volume['id']) def _delete_volume_on_filer(self, volume): (_vserver, flexvol) = self._get_export_ip_path(volume_id=volume['id']) path_on_filer = '/vol' + flexvol + '/' + volume['name'] LOG.debug('Attempting to delete backing file %s for volume %s on ' 'filer.', path_on_filer, volume['id']) self.zapi_client.delete_file(path_on_filer) @utils.trace_method def delete_snapshot(self, snapshot): """Deletes a snapshot.""" share = self._get_provider_location(snapshot.volume_id) self._delete_backing_file_for_snapshot(snapshot) self._post_prov_deprov_in_ssc(share) @utils.trace_method def _delete_backing_file_for_snapshot(self, snapshot): """Deletes file on nfs share that backs a cinder volume.""" try: LOG.debug('Deleting backing file for snapshot %s.', snapshot['id']) self._delete_snapshot_on_filer(snapshot) except Exception: LOG.exception(_LE('Could not do delete of snapshot %s on filer, ' 'falling back to exec of "rm" command.'), snapshot['id']) try: super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) except Exception: LOG.exception(_LE('Exec of "rm" command on backing file for' ' %s was unsuccessful.'), snapshot['id']) @utils.trace_method def _delete_snapshot_on_filer(self, snapshot): (_vserver, flexvol) = self._get_export_ip_path( volume_id=snapshot['volume_id']) path_on_filer = '/vol' + flexvol + '/' + snapshot['name'] LOG.debug('Attempting to delete backing file %s for snapshot %s ' 'on filer.', path_on_filer, snapshot['id']) self.zapi_client.delete_file(path_on_filer) def _post_prov_deprov_in_ssc(self, share): if self.ssc_enabled and share: netapp_vol = self._get_vol_for_share(share) if netapp_vol: self._update_stale_vols(volume=netapp_vol) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" copy_success = False try: major, minor = self.zapi_client.get_ontapi_version() col_path = self.configuration.netapp_copyoffload_tool_path # Search the local image cache before attempting copy offload cache_result = self._find_image_in_cache(image_id) if cache_result: copy_success = self._copy_from_cache(volume, image_id, cache_result) if copy_success: LOG.info(_LI('Copied image %(img)s to volume %(vol)s ' 'using local image cache.'), {'img': image_id, 'vol': volume['id']}) # Image cache was not present, attempt copy offload workflow if not copy_success and col_path and major == 1 and minor >= 20: LOG.debug('No result found in image cache') self._copy_from_img_service(context, volume, image_service, image_id) LOG.info(_LI('Copied image %(img)s to volume %(vol)s using' ' copy offload workflow.'), {'img': image_id, 'vol': volume['id']}) copy_success = True except Exception as e: LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e) finally: if not copy_success: super(NetAppCmodeNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id) if self.ssc_enabled: sh = self._get_provider_location(volume['id']) self._update_stale_vols(self._get_vol_for_share(sh)) def _get_ip_verify_on_cluster(self, host): """Verifies if host on same cluster and returns ip.""" ip = na_utils.resolve_hostname(host) vserver = self._get_vserver_for_ip(ip) if not vserver: raise exception.NotFound(_("Unable to locate an SVM that is " "managing the IP address '%s'") % ip) return ip def _copy_from_cache(self, volume, image_id, cache_result): """Try copying image file_name from cached file_name.""" LOG.debug("Trying copy from cache using copy offload.") copied = False for res in cache_result: try: (share, file_name) = res LOG.debug("Found cache file_name on share %s.", share) if share != self._get_provider_location(volume['id']): col_path = self.configuration.netapp_copyoffload_tool_path src_ip = self._get_ip_verify_on_cluster( share.split(':')[0]) src_path = os.path.join(share.split(':')[1], file_name) dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip( volume['id'])) dst_path = os.path.join( self._get_export_path(volume['id']), volume['name']) # Always run copy offload as regular user, it's sufficient # and rootwrap doesn't allow copy offload to run as root # anyways. self._execute(col_path, src_ip, dst_ip, src_path, dst_path, run_as_root=False, check_exit_code=0) self._register_image_in_cache(volume, image_id) LOG.debug("Copied image from cache to volume %s using" " copy offload.", volume['id']) else: self._clone_file_dst_exists(share, file_name, volume['name'], dest_exists=True) LOG.debug("Copied image from cache to volume %s using" " cloning.", volume['id']) self._post_clone_image(volume) copied = True break except Exception as e: LOG.exception(_LE('Error in workflow copy from cache. %s.'), e) return copied def _clone_file_dst_exists(self, share, src_name, dst_name, dest_exists=False): """Clone file even if dest exists.""" (vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share) self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver, dest_exists=dest_exists) def _copy_from_img_service(self, context, volume, image_service, image_id): """Copies from the image service using copy offload.""" LOG.debug("Trying copy from image service using copy offload.") image_loc = image_service.get_location(context, image_id) locations = self._construct_image_nfs_url(image_loc) src_ip = None selected_loc = None # this will match the first location that has a valid IP on cluster for location in locations: conn, dr = self._check_get_nfs_path_segs(location) if conn: try: src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0]) selected_loc = location break except Exception.NotFound: pass if src_ip is None: raise exception.NotFound(_("Source host details not found.")) (__, ___, img_file) = selected_loc.rpartition('/') src_path = os.path.join(dr, img_file) dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip( volume['id'])) # tmp file is required to deal with img formats tmp_img_file = six.text_type(uuid.uuid4()) col_path = self.configuration.netapp_copyoffload_tool_path img_info = image_service.show(context, image_id) dst_share = self._get_provider_location(volume['id']) self._check_share_can_hold_size(dst_share, img_info['size']) run_as_root = self._execute_as_root dst_dir = self._get_mount_point_for_share(dst_share) dst_img_local = os.path.join(dst_dir, tmp_img_file) try: # If src and dst share not equal if (('%s:%s' % (src_ip, dr)) != ('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))): dst_img_serv_path = os.path.join( self._get_export_path(volume['id']), tmp_img_file) # Always run copy offload as regular user, it's sufficient # and rootwrap doesn't allow copy offload to run as root # anyways. self._execute(col_path, src_ip, dst_ip, src_path, dst_img_serv_path, run_as_root=False, check_exit_code=0) else: self._clone_file_dst_exists(dst_share, img_file, tmp_img_file) self._discover_file_till_timeout(dst_img_local, timeout=120) LOG.debug('Copied image %(img)s to tmp file %(tmp)s.', {'img': image_id, 'tmp': tmp_img_file}) dst_img_cache_local = os.path.join(dst_dir, 'img-cache-%s' % image_id) if img_info['disk_format'] == 'raw': LOG.debug('Image is raw %s.', image_id) self._clone_file_dst_exists(dst_share, tmp_img_file, volume['name'], dest_exists=True) self._move_nfs_file(dst_img_local, dst_img_cache_local) LOG.debug('Copied raw image %(img)s to volume %(vol)s.', {'img': image_id, 'vol': volume['id']}) else: LOG.debug('Image will be converted to raw %s.', image_id) img_conv = six.text_type(uuid.uuid4()) dst_img_conv_local = os.path.join(dst_dir, img_conv) # Checking against image size which is approximate check self._check_share_can_hold_size(dst_share, img_info['size']) try: image_utils.convert_image(dst_img_local, dst_img_conv_local, 'raw', run_as_root=run_as_root) data = image_utils.qemu_img_info(dst_img_conv_local, run_as_root=run_as_root) if data.file_format != "raw": raise exception.InvalidResults( _("Converted to raw, but format is now %s.") % data.file_format) else: self._clone_file_dst_exists(dst_share, img_conv, volume['name'], dest_exists=True) self._move_nfs_file(dst_img_conv_local, dst_img_cache_local) LOG.debug('Copied locally converted raw image' ' %(img)s to volume %(vol)s.', {'img': image_id, 'vol': volume['id']}) finally: if os.path.exists(dst_img_conv_local): self._delete_file_at_path(dst_img_conv_local) self._post_clone_image(volume) finally: if os.path.exists(dst_img_local): self._delete_file_at_path(dst_img_local) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: Cinder volume to unmanage """ try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info) except Exception: # Unmanage even if there was a problem deprovisioning the # associated qos policy group. pass super(NetAppCmodeNfsDriver, self).unmanage(volume) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/performance/0000775000567000056710000000000012701406543026122 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py0000664000567000056710000002036012701406250030573 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp cDOT systems. """ from oslo_log import log as logging from cinder.i18n import _LE from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base LOG = logging.getLogger(__name__) class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): def __init__(self, zapi_client): super(PerformanceCmodeLibrary, self).__init__(zapi_client) self.performance_counters = {} self.pool_utilization = {} def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" super(PerformanceCmodeLibrary, self)._init_counter_info() try: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.system_object_name = 'system:constituent' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system:constituent', 'avg_processor_busy')) elif self.zapi_client.features.SYSTEM_METRICS: self.system_object_name = 'system' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system', 'avg_processor_busy')) except netapp_api.NaApiError: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' else: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' LOG.exception(_LE('Could not get performance base counter ' 'name. Performance-based scheduler ' 'functions may not be available.')) def update_performance_cache(self, ssc_pools): """Called periodically to update per-pool node utilization metrics.""" # Nothing to do on older systems if not (self.zapi_client.features.SYSTEM_METRICS or self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS): return # Get aggregates and nodes for all known pools aggr_names = self._get_aggregates_for_pools(ssc_pools) node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names) # Update performance counter cache for each node node_utilization = {} for node_name in node_names: if node_name not in self.performance_counters: self.performance_counters[node_name] = [] # Get new performance counters and save only the last 10 counters = self._get_node_utilization_counters(node_name) if not counters: continue self.performance_counters[node_name].append(counters) self.performance_counters[node_name] = ( self.performance_counters[node_name][-10:]) # Update utilization for each node using newest & oldest sample counters = self.performance_counters[node_name] if len(counters) < 2: node_utilization[node_name] = perf_base.DEFAULT_UTILIZATION else: node_utilization[node_name] = self._get_node_utilization( counters[0], counters[-1], node_name) # Update pool utilization map atomically pool_utilization = {} for pool in ssc_pools: pool_name = pool.id['name'] aggr_name = pool.aggr['name'] node_name = aggr_node_map.get(aggr_name) if node_name: pool_utilization[pool_name] = node_utilization.get( node_name, perf_base.DEFAULT_UTILIZATION) else: pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION self.pool_utilization = pool_utilization def get_node_utilization_for_pool(self, pool_name): """Get the node utilization for the specified pool, if available.""" return self.pool_utilization.get(pool_name, perf_base.DEFAULT_UTILIZATION) def _get_aggregates_for_pools(self, ssc_pools): """Get the set of aggregates that contain the specified pools.""" aggr_names = set() for pool in ssc_pools: aggr_names.add(pool.aggr['name']) return aggr_names def _get_nodes_for_aggregates(self, aggr_names): """Get the cluster nodes that own the specified aggregates.""" node_names = set() aggr_node_map = {} for aggr_name in aggr_names: node_name = self.zapi_client.get_node_for_aggregate(aggr_name) if node_name: node_names.add(node_name) aggr_node_map[aggr_name] = node_name return node_names, aggr_node_map def _get_node_utilization_counters(self, node_name): """Get all performance counters for calculating node utilization.""" try: return (self._get_node_utilization_system_counters(node_name) + self._get_node_utilization_wafl_counters(node_name) + self._get_node_utilization_processor_counters(node_name)) except netapp_api.NaApiError: LOG.exception(_LE('Could not get utilization counters from node ' '%s'), node_name) return None def _get_node_utilization_system_counters(self, node_name): """Get the system counters for calculating node utilization.""" system_instance_uuids = ( self.zapi_client.get_performance_instance_uuids( self.system_object_name, node_name)) system_counter_names = [ 'avg_processor_busy', self.avg_processor_busy_base_counter_name, ] if 'cpu_elapsed_time1' in system_counter_names: system_counter_names.append('cpu_elapsed_time') system_counters = self.zapi_client.get_performance_counters( self.system_object_name, system_instance_uuids, system_counter_names) return system_counters def _get_node_utilization_wafl_counters(self, node_name): """Get the WAFL counters for calculating node utilization.""" wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids( 'wafl', node_name) wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] wafl_counters = self.zapi_client.get_performance_counters( 'wafl', wafl_instance_uuids, wafl_counter_names) # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] for counter in wafl_counters: if 'cp_phase_times' in counter: self._expand_performance_array( 'wafl', 'cp_phase_times', counter) return wafl_counters def _get_node_utilization_processor_counters(self, node_name): """Get the processor counters for calculating node utilization.""" processor_instance_uuids = ( self.zapi_client.get_performance_instance_uuids('processor', node_name)) processor_counter_names = ['domain_busy', 'processor_elapsed_time'] processor_counters = self.zapi_client.get_performance_counters( 'processor', processor_instance_uuids, processor_counter_names) # Expand array data so we can use processor:domain_busy[kahuna] for counter in processor_counters: if 'domain_busy' in counter: self._expand_performance_array( 'processor', 'domain_busy', counter) return processor_counters cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/performance/__init__.py0000664000567000056710000000000012701406250030214 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py0000664000567000056710000002257312701406250030426 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp systems. """ from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE LOG = logging.getLogger(__name__) DEFAULT_UTILIZATION = 50 class PerformanceLibrary(object): def __init__(self, zapi_client): self.zapi_client = zapi_client self._init_counter_info() def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" self.system_object_name = None self.avg_processor_busy_base_counter_name = None def _get_node_utilization(self, counters_t1, counters_t2, node_name): """Get node utilization from two sets of performance counters.""" try: # Time spent in the single-threaded Kahuna domain kahuna_percent = self._get_kahuna_utilization(counters_t1, counters_t2) # If Kahuna is using >60% of the CPU, the controller is fully busy if kahuna_percent > 60: return 100.0 # Average CPU busyness across all processors avg_cpu_percent = 100.0 * self._get_average_cpu_utilization( counters_t1, counters_t2) # Total Consistency Point (CP) time total_cp_time_msec = self._get_total_consistency_point_time( counters_t1, counters_t2) # Time spent in CP Phase 2 (buffer flush) p2_flush_time_msec = self._get_consistency_point_p2_flush_time( counters_t1, counters_t2) # Wall-clock time between the two counter sets poll_time_msec = self._get_total_time(counters_t1, counters_t2, 'total_cp_msecs') # If two polls happened in quick succession, use CPU utilization if total_cp_time_msec == 0 or poll_time_msec == 0: return max(min(100.0, avg_cpu_percent), 0) # Adjusted Consistency Point time adjusted_cp_time_msec = self._get_adjusted_consistency_point_time( total_cp_time_msec, p2_flush_time_msec) adjusted_cp_percent = (100.0 * adjusted_cp_time_msec / poll_time_msec) # Utilization is the greater of CPU busyness & CP time node_utilization = max(avg_cpu_percent, adjusted_cp_percent) return max(min(100.0, node_utilization), 0) except Exception: LOG.exception(_LE('Could not calculate node utilization for ' 'node %s.'), node_name) return DEFAULT_UTILIZATION def _get_kahuna_utilization(self, counters_t1, counters_t2): """Get time spent in the single-threaded Kahuna domain.""" # Note(cknight): Because Kahuna is single-threaded, running only on # one CPU at a time, we can safely sum the Kahuna CPU usage # percentages across all processors in a node. return sum(self._get_performance_counter_average_multi_instance( counters_t1, counters_t2, 'domain_busy:kahuna', 'processor_elapsed_time')) * 100.0 def _get_average_cpu_utilization(self, counters_t1, counters_t2): """Get average CPU busyness across all processors.""" return self._get_performance_counter_average( counters_t1, counters_t2, 'avg_processor_busy', self.avg_processor_busy_base_counter_name) def _get_total_consistency_point_time(self, counters_t1, counters_t2): """Get time spent in Consistency Points in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'total_cp_msecs')) def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2): """Get time spent in CP Phase 2 (buffer flush) in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'cp_phase_times:p2_flush')) def _get_total_time(self, counters_t1, counters_t2, counter_name): """Get wall clock time between two successive counters in msecs.""" timestamp_t1 = float(self._find_performance_counter_timestamp( counters_t1, counter_name)) timestamp_t2 = float(self._find_performance_counter_timestamp( counters_t2, counter_name)) return (timestamp_t2 - timestamp_t1) * 1000.0 def _get_adjusted_consistency_point_time(self, total_cp_time, p2_flush_time): """Get adjusted CP time by limiting CP phase 2 flush time to 20%.""" return (total_cp_time * (1.0 - (1.0 * p2_flush_time / total_cp_time) / 0.8)) def _get_performance_counter_delta(self, counters_t1, counters_t2, counter_name): """Calculate a delta value from two performance counters.""" counter_t1 = int( self._find_performance_counter_value(counters_t1, counter_name)) counter_t2 = int( self._find_performance_counter_value(counters_t2, counter_name)) return counter_t2 - counter_t1 def _get_performance_counter_average(self, counters_t1, counters_t2, counter_name, base_counter_name, instance_name=None): """Calculate an average value from two performance counters.""" counter_t1 = float(self._find_performance_counter_value( counters_t1, counter_name, instance_name)) counter_t2 = float(self._find_performance_counter_value( counters_t2, counter_name, instance_name)) base_counter_t1 = float(self._find_performance_counter_value( counters_t1, base_counter_name, instance_name)) base_counter_t2 = float(self._find_performance_counter_value( counters_t2, base_counter_name, instance_name)) return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1) def _get_performance_counter_average_multi_instance(self, counters_t1, counters_t2, counter_name, base_counter_name): """Calculate an average value from multiple counter instances.""" averages = [] instance_names = [] for counter in counters_t1: if counter_name in counter: instance_names.append(counter['instance-name']) for instance_name in instance_names: average = self._get_performance_counter_average( counters_t1, counters_t2, counter_name, base_counter_name, instance_name) averages.append(average) return averages def _find_performance_counter_value(self, counters, counter_name, instance_name=None): """Given a counter set, return the value of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter[counter_name] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _find_performance_counter_timestamp(self, counters, counter_name, instance_name=None): """Given a counter set, return the timestamp of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter['timestamp'] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _expand_performance_array(self, object_name, counter_name, counter): """Get array labels and expand counter data array.""" # Get array labels for counter value counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) array_labels = [counter_name + ':' + label.lower() for label in counter_info['labels']] array_values = counter[counter_name].split(',') # Combine labels and values, and then mix into existing counter array_data = dict(zip(array_labels, array_values)) counter.update(array_data) def _get_base_counter_name(self, object_name, counter_name): """Get the name of the base counter for the specified counter.""" counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) return counter_info['base-counter'] cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py0000664000567000056710000001326512701406250030525 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp 7-mode Data ONTAP systems. """ from oslo_log import log as logging from cinder.i18n import _LE from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base LOG = logging.getLogger(__name__) class Performance7modeLibrary(perf_base.PerformanceLibrary): def __init__(self, zapi_client): super(Performance7modeLibrary, self).__init__(zapi_client) self.performance_counters = [] self.utilization = perf_base.DEFAULT_UTILIZATION self.node_name = self.zapi_client.get_system_name() def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" super(Performance7modeLibrary, self)._init_counter_info() if self.zapi_client.features.SYSTEM_METRICS: self.system_object_name = 'system' try: self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system', 'avg_processor_busy')) except netapp_api.NaApiError: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' LOG.exception(_LE('Could not get performance base counter ' 'name. Performance-based scheduler ' 'functions may not be available.')) def update_performance_cache(self): """Called periodically to update node utilization metrics.""" # Nothing to do on older systems if not self.zapi_client.features.SYSTEM_METRICS: return # Get new performance counters and save only the last 10 counters = self._get_node_utilization_counters() if not counters: return self.performance_counters.append(counters) self.performance_counters = self.performance_counters[-10:] # Update utilization using newest & oldest sample if len(self.performance_counters) < 2: self.utilization = perf_base.DEFAULT_UTILIZATION else: self.utilization = self._get_node_utilization( self.performance_counters[0], self.performance_counters[-1], self.node_name) def get_node_utilization(self): """Get the node utilization, if available.""" return self.utilization def _get_node_utilization_counters(self): """Get all performance counters for calculating node utilization.""" try: return (self._get_node_utilization_system_counters() + self._get_node_utilization_wafl_counters() + self._get_node_utilization_processor_counters()) except netapp_api.NaApiError: LOG.exception(_LE('Could not get utilization counters from node ' '%s'), self.node_name) return None def _get_node_utilization_system_counters(self): """Get the system counters for calculating node utilization.""" system_instance_names = ( self.zapi_client.get_performance_instance_names( self.system_object_name)) system_counter_names = [ 'avg_processor_busy', self.avg_processor_busy_base_counter_name, ] if 'cpu_elapsed_time1' in system_counter_names: system_counter_names.append('cpu_elapsed_time') system_counters = self.zapi_client.get_performance_counters( self.system_object_name, system_instance_names, system_counter_names) return system_counters def _get_node_utilization_wafl_counters(self): """Get the WAFL counters for calculating node utilization.""" wafl_instance_names = self.zapi_client.get_performance_instance_names( 'wafl') wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] wafl_counters = self.zapi_client.get_performance_counters( 'wafl', wafl_instance_names, wafl_counter_names) # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] for counter in wafl_counters: if 'cp_phase_times' in counter: self._expand_performance_array( 'wafl', 'cp_phase_times', counter) return wafl_counters def _get_node_utilization_processor_counters(self): """Get the processor counters for calculating node utilization.""" processor_instance_names = ( self.zapi_client.get_performance_instance_names('processor')) processor_counter_names = ['domain_busy', 'processor_elapsed_time'] processor_counters = self.zapi_client.get_performance_counters( 'processor', processor_instance_names, processor_counter_names) # Expand array data so we can use processor:domain_busy[kahuna] for counter in processor_counters: if 'domain_busy' in counter: self._expand_performance_array( 'processor', 'domain_busy', counter) return processor_counters cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/fc_7mode.py0000664000567000056710000001226212701406250025654 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2014, Clinton Knight. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_7mode from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetApp7modeFibreChannelDriver(driver.BaseVD, driver.ConsistencyGroupVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD): """NetApp 7-mode FibreChannel volume driver.""" DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct' def __init__(self, *args, **kwargs): super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_7mode.NetAppBlockStorage7modeLibrary( self.DRIVER_NAME, 'FC', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): return self.library.initialize_connection_fc(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_fc(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup(group, add_volumes=None, remove_volumes=None) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/__init__.py0000664000567000056710000000000012701406250025713 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/nfs_base.py0000664000567000056710000012455112701406250025756 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp NFS storage. """ import copy import math import os import re import shutil import threading import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) class NetAppNfsDriver(driver.ManageableVD, driver.CloneableImageVD, driver.SnapshotVD, nfs.NfsDriver): """Base class for NetApp NFS driver for Data ONTAP.""" # do not increment this as it may be used in volume type definitions VERSION = "1.0.0" REQUIRED_FLAGS = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' def __init__(self, *args, **kwargs): na_utils.validate_instantiation(**kwargs) self._execute = None self._context = None self._app_version = kwargs.pop("app_version", "unknown") super(NetAppNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_img_cache_opts) self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts) def do_setup(self, context): super(NetAppNfsDriver, self).do_setup(context) self._context = context na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) self.zapi_client = None self.ssc_enabled = False def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" super(NetAppNfsDriver, self).check_for_setup_error() def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ return volume['provider_location'] def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s', volume['host']) self._ensure_shares_mounted() # get share as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) try: volume['provider_location'] = pool_name LOG.debug('Using pool %s.', pool_name) self._do_create_volume(volume) self._do_qos_for_volume(volume, extra_specs) return {'provider_location': volume['provider_location']} except Exception: LOG.exception(_LE("Exception creating vol %(name)s on " "pool %(pool)s."), {'name': volume['name'], 'pool': volume['provider_location']}) # We need to set this for the model update in order for the # manager to behave correctly. volume['provider_location'] = None finally: if self.ssc_enabled: self._update_stale_vols(self._get_vol_for_share(pool_name)) msg = _("Volume %(vol)s could not be created in pool %(pool)s.") raise exception.VolumeBackendAPIException(data=msg % { 'vol': volume['name'], 'pool': pool_name}) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" source = { 'name': snapshot['name'], 'size': snapshot['volume_size'], 'id': snapshot['volume_id'], } return self._clone_source_to_destination_volume(source, volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" source = {'name': src_vref['name'], 'size': src_vref['size'], 'id': src_vref['id']} return self._clone_source_to_destination_volume(source, volume) def _clone_source_to_destination_volume(self, source, destination_volume): share = self._get_volume_location(source['id']) extra_specs = na_utils.get_volume_extra_specs(destination_volume) try: destination_volume['provider_location'] = share self._clone_with_extension_check( source, destination_volume) self._do_qos_for_volume(destination_volume, extra_specs) return {'provider_location': destination_volume[ 'provider_location']} except Exception: LOG.exception(_LE("Exception creating volume %(name)s from source " "%(source)s on share %(share)s."), {'name': destination_volume['id'], 'source': source['name'], 'share': destination_volume['provider_location']}) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % ( destination_volume['id'])) def _clone_with_extension_check(self, source, destination_volume): source_size = source['size'] source_id = source['id'] source_name = source['name'] destination_volume_size = destination_volume['size'] self._clone_backing_file_for_volume(source_name, destination_volume['name'], source_id) path = self.local_path(destination_volume) if self._discover_file_till_timeout(path): self._set_rw_permissions(path) if destination_volume_size != source_size: try: self.extend_volume(destination_volume, destination_volume_size) except Exception: LOG.error(_LE("Resizing %s failed. Cleaning " "volume."), destination_volume['name']) self._cleanup_volume_on_failure(destination_volume) raise exception.CinderException( _("Resizing clone %s failed.") % destination_volume['name']) else: raise exception.CinderException(_("NFS file %s not discovered.") % destination_volume['name']) def _cleanup_volume_on_failure(self, volume): LOG.debug('Cleaning up, failed operation on %s', volume['name']) vol_path = self.local_path(volume) if os.path.exists(vol_path): LOG.debug('Found %s, deleting ...', vol_path) self._delete_file_at_path(vol_path) else: LOG.debug('Could not find %s, continuing ...', vol_path) def _do_qos_for_volume(self, volume, extra_specs, cleanup=False): """Set QoS policy on backend from volume type information.""" raise NotImplementedError() def create_snapshot(self, snapshot): """Creates a snapshot.""" self._clone_backing_file_for_volume(snapshot['volume_name'], snapshot['name'], snapshot['volume_id']) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" nfs_mount = self._get_provider_location(snapshot.volume_id) if self._volume_not_present(nfs_mount, snapshot.name): return True self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), run_as_root=self._execute_as_root) def _get_volume_location(self, volume_id): """Returns NFS mount address as :.""" nfs_server_ip = self._get_host_ip(volume_id) export_path = self._get_export_path(volume_id) return nfs_server_ip + ':' + export_path def _clone_backing_file_for_volume(self, volume_name, clone_name, volume_id, share=None): """Clone backing file for Cinder volume.""" raise NotImplementedError() def _get_provider_location(self, volume_id): """Returns provider location for given volume.""" volume = self.db.volume_get(self._context, volume_id) return volume.provider_location def _get_host_ip(self, volume_id): """Returns IP address for the given volume.""" return self._get_provider_location(volume_id).rsplit(':')[0] def _get_export_path(self, volume_id): """Returns NFS export path for the given volume.""" return self._get_provider_location(volume_id).rsplit(':')[1] def _volume_not_present(self, nfs_mount, volume_name): """Check if volume exists.""" try: self._try_execute('ls', self._get_volume_path(nfs_mount, volume_name)) except processutils.ProcessExecutionError: # If the volume isn't present return True return False def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except processutils.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception(_LE("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) def _get_volume_path(self, nfs_share, volume_name): """Get volume path. Get volume path (local fs path) for given volume name on given nfs share. :param nfs_share: string, example 172.18.194.100:/var/nfs :param volume_name: string, example volume-91ee65ec-c473-4391-8c09-162b00c68a8c """ return os.path.join(self._get_mount_point_for_share(nfs_share), volume_name) def _update_volume_stats(self): """Retrieve stats info from volume group.""" raise NotImplementedError() def get_default_filter_function(self): """Get the default filter_function string.""" return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" super(NetAppNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id) LOG.info(_LI('Copied image to volume %s using regular download.'), volume['id']) self._register_image_in_cache(volume, image_id) def _register_image_in_cache(self, volume, image_id): """Stores image in the cache.""" file_name = 'img-cache-%s' % image_id LOG.info(_LI("Registering image in cache %s"), file_name) try: self._do_clone_rel_img_cache( volume['name'], file_name, volume['provider_location'], file_name) except Exception as e: LOG.warning(_LW('Exception while registering image %(image_id)s' ' in cache. Exception: %(exc)s'), {'image_id': image_id, 'exc': e}) def _find_image_in_cache(self, image_id): """Finds image in cache and returns list of shares with file name.""" result = [] if getattr(self, '_mounted_shares', None): for share in self._mounted_shares: dir = self._get_mount_point_for_share(share) file_name = 'img-cache-%s' % image_id file_path = '%s/%s' % (dir, file_name) if os.path.isfile(file_path): LOG.debug('Found cache file for image %(image_id)s' ' on share %(share)s', {'image_id': image_id, 'share': share}) result.append((share, file_name)) return result def _do_clone_rel_img_cache(self, src, dst, share, cache_file): """Do clone operation w.r.t image cache file.""" @utils.synchronized(cache_file, external=True) def _do_clone(): dir = self._get_mount_point_for_share(share) file_path = '%s/%s' % (dir, dst) if not os.path.exists(file_path): LOG.info(_LI('Cloning from cache to destination %s'), dst) self._clone_backing_file_for_volume(src, dst, volume_id=None, share=share) _do_clone() @utils.synchronized('clean_cache') def _spawn_clean_cache_job(self): """Spawns a clean task if not running.""" if getattr(self, 'cleaning', None): LOG.debug('Image cache cleaning in progress. Returning... ') return else: # Set cleaning to True self.cleaning = True t = threading.Timer(0, self._clean_image_cache) t.start() def _clean_image_cache(self): """Clean the image cache files in cache of space crunch.""" try: LOG.debug('Image cache cleaning in progress.') thres_size_perc_start =\ self.configuration.thres_avl_size_perc_start thres_size_perc_stop = \ self.configuration.thres_avl_size_perc_stop for share in getattr(self, '_mounted_shares', []): try: total_size, total_avl = \ self._get_capacity_info(share) avl_percent = int((total_avl / total_size) * 100) if avl_percent <= thres_size_perc_start: LOG.info(_LI('Cleaning cache for share %s.'), share) eligible_files = self._find_old_cache_files(share) threshold_size = int( (thres_size_perc_stop * total_size) / 100) bytes_to_free = int(threshold_size - total_avl) LOG.debug('Files to be queued for deletion %s', eligible_files) self._delete_files_till_bytes_free( eligible_files, share, bytes_to_free) else: continue except Exception as e: LOG.warning(_LW('Exception during cache cleaning' ' %(share)s. Message - %(ex)s'), {'share': share, 'ex': e}) continue finally: LOG.debug('Image cache cleaning done.') self.cleaning = False def _shortlist_del_eligible_files(self, share, old_files): """Prepares list of eligible files to be deleted from cache.""" raise NotImplementedError() def _find_old_cache_files(self, share): """Finds the old files in cache.""" mount_fs = self._get_mount_point_for_share(share) threshold_minutes = self.configuration.expiry_thres_minutes cmd = ['find', mount_fs, '-maxdepth', '1', '-name', 'img-cache*', '-amin', '+%s' % threshold_minutes] res, _err = self._execute(*cmd, run_as_root=self._execute_as_root) if res: old_file_paths = res.strip('\n').split('\n') mount_fs_len = len(mount_fs) old_files = [x[mount_fs_len + 1:] for x in old_file_paths] eligible_files = self._shortlist_del_eligible_files( share, old_files) return eligible_files return [] def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0): """Delete files from disk till bytes are freed or list exhausted.""" LOG.debug('Bytes to free %s', bytes_to_free) if file_list and bytes_to_free > 0: sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True) mount_fs = self._get_mount_point_for_share(share) for f in sorted_files: if f: file_path = '%s/%s' % (mount_fs, f[0]) LOG.debug('Delete file path %s', file_path) @utils.synchronized(f[0], external=True) def _do_delete(): if self._delete_file_at_path(file_path): return True return False if _do_delete(): bytes_to_free -= int(f[1]) if bytes_to_free <= 0: return def _delete_file_at_path(self, path): """Delete file from disk and return result as boolean.""" try: LOG.debug('Deleting file at path %s', path) cmd = ['rm', '-f', path] self._execute(*cmd, run_as_root=self._execute_as_root) return True except Exception as ex: LOG.warning(_LW('Exception during deleting %s'), ex) return False def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._clone_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info(_LI('Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s'), {'image_id': image_id, 'msg': msg}) finally: cloned = cloned and post_clone share = volume['provider_location'] if cloned else None bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned def _clone_from_cache(self, volume, image_id, cache_result): """Clones a copy from image cache.""" cloned = False LOG.info(_LI('Cloning image %s from cache'), image_id) for res in cache_result: # Repeat tries in other shares if failed in some (share, file_name) = res LOG.debug('Cache share: %s', share) if (share and self._is_share_clone_compatible(volume, share)): try: self._do_clone_rel_img_cache( file_name, volume['name'], share, file_name) cloned = True volume['provider_location'] = share break except Exception: LOG.warning(_LW('Unexpected exception during' ' image cloning in share %s'), share) return cloned def _direct_nfs_clone(self, volume, image_location, image_id): """Clone directly in nfs share.""" LOG.info(_LI('Checking image clone %s from glance share.'), image_id) cloned = False image_locations = self._construct_image_nfs_url(image_location) run_as_root = self._execute_as_root for loc in image_locations: share = self._is_cloneable_share(loc) if share and self._is_share_clone_compatible(volume, share): LOG.debug('Share is cloneable %s', share) volume['provider_location'] = share (__, ___, img_file) = loc.rpartition('/') dir_path = self._get_mount_point_for_share(share) img_path = '%s/%s' % (dir_path, img_file) img_info = image_utils.qemu_img_info(img_path, run_as_root=run_as_root) if img_info.file_format == 'raw': LOG.debug('Image is raw %s', image_id) self._clone_backing_file_for_volume( img_file, volume['name'], volume_id=None, share=share) cloned = True break else: LOG.info( _LI('Image will locally be converted to raw %s'), image_id) dst = '%s/%s' % (dir_path, volume['name']) image_utils.convert_image(img_path, dst, 'raw', run_as_root=run_as_root) data = image_utils.qemu_img_info(dst, run_as_root=run_as_root) if data.file_format != "raw": raise exception.InvalidResults( _("Converted to raw, but" " format is now %s") % data.file_format) else: cloned = True self._register_image_in_cache( volume, image_id) break return cloned def _post_clone_image(self, volume): """Do operations post image cloning.""" LOG.info(_LI('Performing post clone for %s'), volume['name']) vol_path = self.local_path(volume) if self._discover_file_till_timeout(vol_path): self._set_rw_permissions(vol_path) self._resize_image_file(vol_path, volume['size']) return True raise exception.InvalidResults( _("NFS file could not be discovered.")) def _resize_image_file(self, path, new_size): """Resize the image file on share to new size.""" LOG.debug('Checking file for resize') if self._is_file_size_equal(path, new_size): return else: LOG.info(_LI('Resizing file to %sG'), new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if self._is_file_size_equal(path, new_size): return else: raise exception.InvalidResults( _('Resizing image file failed.')) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path, run_as_root=self._execute_as_root) virt_size = data.virtual_size / units.Gi if virt_size == size: return True else: return False def _discover_file_till_timeout(self, path, timeout=45): """Checks if file size at path is equal to size.""" # Sometimes nfs takes time to discover file # Retrying in case any unexpected situation occurs retry_seconds = timeout sleep_interval = 2 while True: if os.path.exists(path): return True else: if retry_seconds <= 0: LOG.warning(_LW('Discover file retries exhausted.')) return False else: time.sleep(sleep_interval) retry_seconds -= sleep_interval def _is_cloneable_share(self, image_location): """Finds if the image at location is cloneable.""" conn, dr = self._check_get_nfs_path_segs(image_location) return self._check_share_in_use(conn, dr) def _check_get_nfs_path_segs(self, image_location): """Checks if the nfs path format is matched. WebNFS url format with relative-path is supported. Accepting all characters in path-names and checking against the mounted shares which will contain only allowed path segments. Returns connection and dir details. """ conn, dr = None, None if image_location: nfs_loc_pattern = \ ('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)' '*(/[^\/\\\\]+)$)') matched = re.match(nfs_loc_pattern, image_location, flags=0) if not matched: LOG.debug('Image location not in the' ' expected format %s', image_location) else: conn = matched.group(2) dr = matched.group(3) or '/' return conn, dr def _share_match_for_ip(self, ip, shares): """Returns the share that is served by ip. Multiple shares can have same dir path but can be served using different ips. It finds the share which is served by ip on same nfs server. """ raise NotImplementedError() def _check_share_in_use(self, conn, dir): """Checks if share is cinder mounted and returns it.""" try: if conn: host = conn.split(':')[0] ip = na_utils.resolve_hostname(host) share_candidates = [] for sh in self._mounted_shares: sh_exp = sh.split(':')[1] if sh_exp == dir: share_candidates.append(sh) if share_candidates: LOG.debug('Found possible share matches %s', share_candidates) return self._share_match_for_ip(ip, share_candidates) except Exception: LOG.warning(_LW("Unexpected exception while " "short listing used share.")) return None def _construct_image_nfs_url(self, image_location): """Construct direct url for nfs backend. It creates direct url from image_location which is a tuple with direct_url and locations. Returns array of urls with nfs scheme if nfs store else returns url. It needs to be verified by backend before use. """ direct_url, locations = image_location if not direct_url and not locations: raise exception.NotFound(_('Image location not present.')) urls = [] if not locations: urls.append(direct_url) else: for location in locations: if not location['metadata']: continue location_type = location['metadata'].get('type') if not location_type or location_type.lower() != "nfs": continue share_location = location['metadata'].get('share_location') mountpoint = location['metadata'].get('mountpoint') if not share_location or not mountpoint: continue url = location['url'] url_parse = urllib.parse.urlparse(url) abs_path = os.path.join(url_parse.netloc, url_parse.path) rel_path = os.path.relpath(abs_path, mountpoint) direct_url = "%s/%s" % (share_location, rel_path) urls.append(direct_url) return urls def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" LOG.info(_LI('Extending volume %s.'), volume['name']) try: path = self.local_path(volume) self._resize_image_file(path, new_size) except Exception as err: exception_msg = (_("Failed to extend volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) try: extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) def _is_share_clone_compatible(self, volume, share): """Checks if share is compatible with volume to host its clone.""" raise NotImplementedError() def _share_has_space_for_clone(self, share, size_in_gib, thin=True): """Is there space on the share for a clone given the original size?""" requested_size = size_in_gib * units.Gi total_size, total_available = self._get_capacity_info(share) reserved_ratio = self.reserved_percentage / 100.0 reserved = int(round(total_size * reserved_ratio)) available = max(0, total_available - reserved) if thin: available = available * self.max_over_subscription_ratio return available >= requested_size def _check_share_can_hold_size(self, share, size): """Checks if volume can hold image with size.""" _tot_size, tot_available = self._get_capacity_info( share) if tot_available < size: msg = _("Container size smaller than required file size.") raise exception.VolumeDriverException(msg) def _move_nfs_file(self, source_path, dest_path): """Moves source to destination.""" @utils.synchronized(dest_path, external=True) def _move_file(src, dst): if os.path.exists(dst): LOG.warning(_LW("Destination %s already exists."), dst) return False self._execute('mv', src, dst, run_as_root=self._execute_as_root) return True try: return _move_file(source_path, dest_path) except Exception as e: LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'), {'src': source_path, 'e': e}) return False def _get_export_ip_path(self, volume_id=None, share=None): """Returns export ip and path. One of volume id or share is used to return the values. """ if volume_id: host_ip = self._get_host_ip(volume_id) export_path = self._get_export_path(volume_id) elif share: host_ip = share.split(':')[0] export_path = share.split(':')[1] else: raise exception.InvalidInput( 'A volume ID or share was not specified.') return host_ip, export_path def _get_share_capacity_info(self, nfs_share): """Returns the share capacity metrics needed by the scheduler.""" capacity = dict() capacity['reserved_percentage'] = self.reserved_percentage capacity['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) total_size, total_available = self._get_capacity_info(nfs_share) capacity['total_capacity_gb'] = na_utils.round_down( total_size / units.Gi, '0.01') capacity['free_capacity_gb'] = na_utils.round_down( total_available / units.Gi, '0.01') capacity['provisioned_capacity_gb'] = (round( capacity['total_capacity_gb'] - capacity['free_capacity_gb'], 2)) return capacity def _get_capacity_info(self, nfs_share): """Get total capacity and free capacity in bytes for an nfs share.""" export_path = nfs_share.rsplit(':', 1)[1] return self.zapi_client.get_flexvol_capacity(export_path) def _check_volume_type(self, volume, share, file_name, extra_specs): """Match volume type for share file.""" raise NotImplementedError() def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): """Converts the share point name to an IP address The volume reference may have a DNS name portion in the share name. Convert that to an IP address and then restore the entire path. :param vol_ref: Driver-specific information used to identify a volume :return: A volume reference where share is in IP format. """ # First strip out share and convert to IP format. share_split = vol_ref.rsplit(':', 1) vol_ref_share_ip = na_utils.resolve_hostname(share_split[0]) # Now place back into volume reference. vol_ref_share = vol_ref_share_ip + ':' + share_split[1] return vol_ref_share def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): """Get the NFS share, the NFS mount, and the volume from reference Determine the NFS share point, the NFS mount point, and the volume (with possible path) from the given volume reference. Raise exception if unsuccessful. :param vol_ref: Driver-specific information used to identify a volume :return: NFS Share, NFS mount, volume path or raise error """ # Check that the reference is valid. if 'source-name' not in vol_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=reason) vol_ref_name = vol_ref['source-name'] self._ensure_shares_mounted() # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config # file, but the admin tries to manage the file located at # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below # when searching self._mounted_shares to see if we have an existing # mount that would work to access the volume-to-be-managed (a string # comparison is done instead of IP comparison). vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( vol_ref_name) for nfs_share in self._mounted_shares: cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) (orig_share, work_share, file_path) = \ vol_ref_share.partition(cfg_share) if work_share == cfg_share: file_path = file_path[1:] # strip off leading path divider LOG.debug("Found possible share %s; checking mount.", work_share) nfs_mount = self._get_mount_point_for_share(nfs_share) vol_full_path = os.path.join(nfs_mount, file_path) if os.path.isfile(vol_full_path): LOG.debug("Found share %(share)s and vol %(path)s on " "mount %(mnt)s", {'share': nfs_share, 'path': file_path, 'mnt': nfs_mount}) return nfs_share, nfs_mount, file_path else: LOG.debug("vol_ref %(ref)s not on share %(share)s.", {'ref': vol_ref_share, 'share': nfs_share}) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=_('Volume not found on configured storage backend.')) def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is an NFS share point and some [/path]/volume; e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage :param volume: Cinder volume to manage :param existing_vol_ref: Driver-specific information used to identify a volume """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = \ self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s", {'vol': volume['id'], 'ref': existing_vol_ref['source-name']}) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type(volume, nfs_share, vol_path, extra_specs) if vol_path == volume['name']: LOG.debug("New Cinder volume %s name matches reference name: " "no need to rename.", volume['name']) else: src_vol = os.path.join(nfs_mount, vol_path) dst_vol = os.path.join(nfs_mount, volume['name']) try: shutil.move(src_vol, dst_vol) LOG.debug("Setting newly managed Cinder volume name to %s", volume['name']) self._set_rw_permissions_for_all(dst_vol) except (OSError, IOError) as err: exception_msg = (_("Failed to manage existing volume %(name)s," " because rename operation failed:" " Error msg: %(msg)s."), {'name': existing_vol_ref['source-name'], 'msg': err}) raise exception.VolumeBackendAPIException(data=exception_msg) try: self._do_qos_for_volume(volume, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': existing_vol_ref['source-name'], 'msg': six.text_type(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) return {'provider_location': nfs_share} def manage_existing_get_size(self, volume, existing_vol_ref): """Returns the size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_vol_ref: Existing volume to take under management """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = \ self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) try: LOG.debug("Asked to get size of NFS vol_ref %s.", existing_vol_ref['source-name']) file_path = os.path.join(nfs_mount, vol_path) file_size = float(utils.get_file_size(file_path)) / units.Gi vol_size = int(math.ceil(file_size)) except (OSError, ValueError): exception_message = (_("Failed to manage existing volume " "%(name)s, because of error in getting " "volume size."), {'name': existing_vol_ref['source-name']}) raise exception.VolumeBackendAPIException(data=exception_message) LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.", {'ref': existing_vol_ref['source-name'], 'size': vol_size}) return vol_size def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: Cinder volume to unmanage """ vol_str = CONF.volume_name_template % volume['id'] vol_path = os.path.join(volume['provider_location'], vol_str) LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is " "no longer being managed."), {'cr': vol_path}) @utils.synchronized('update_stale') def _update_stale_vols(self, volume=None, reset=False): """Populates stale vols with vol and returns set copy.""" raise NotImplementedError def _get_vol_for_share(self, nfs_share): """Gets the ssc vol with given share.""" raise NotImplementedError cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/block_7mode.py0000664000567000056710000004526212701406250026364 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp 7-mode block storage systems. """ from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import timeutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LW from cinder import utils from cinder.volume import configuration from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import client_7mode from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperMetaclass) class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): """NetApp block storage library for Data ONTAP (7-mode).""" def __init__(self, driver_name, driver_protocol, **kwargs): super(NetAppBlockStorage7modeLibrary, self).__init__(driver_name, driver_protocol, **kwargs) self.configuration.append_config_values(na_opts.netapp_7mode_opts) self.driver_mode = '7mode' def do_setup(self, context): super(NetAppBlockStorage7modeLibrary, self).do_setup(context) self.volume_list = [] self.vfiler = self.configuration.netapp_vfiler self.zapi_client = client_7mode.Client( self.volume_list, transport_type=self.configuration.netapp_transport_type, username=self.configuration.netapp_login, password=self.configuration.netapp_password, hostname=self.configuration.netapp_server_hostname, port=self.configuration.netapp_server_port, vfiler=self.vfiler) self._do_partner_setup() self.vol_refresh_time = None self.vol_refresh_interval = 1800 self.vol_refresh_running = False self.vol_refresh_voluntary = False self.root_volume_name = self._get_root_volume_name() self.perf_library = perf_7mode.Performance7modeLibrary( self.zapi_client) def _do_partner_setup(self): partner_backend = self.configuration.netapp_partner_backend_name if partner_backend: config = configuration.Configuration(na_opts.netapp_7mode_opts, partner_backend) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_transport_opts) self.partner_zapi_client = client_7mode.Client( None, transport_type=config.netapp_transport_type, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vfiler=None) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" api_version = self.zapi_client.get_ontapi_version() if api_version: major, minor = api_version if major == 1 and minor < 9: msg = _("Unsupported Data ONTAP version." " Data ONTAP version 7.3.1 and above is supported.") raise exception.VolumeBackendAPIException(data=msg) else: msg = _("API version could not be determined.") raise exception.VolumeBackendAPIException(data=msg) self._refresh_volume_info() if not self.volume_list: msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise exception.NetAppDriverException(msg) super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error() def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" if qos_policy_group_name is not None: msg = _('Data ONTAP operating in 7-Mode does not support QoS ' 'policy groups.') raise exception.VolumeDriverException(msg) self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name) self.vol_refresh_voluntary = True def _get_root_volume_name(self): # switch to volume-get-root-name API when possible vols = self.zapi_client.get_filer_volumes() for vol in vols: volume_name = vol.get_child_content('name') if self._get_vol_option(volume_name, 'root') == 'true': return volume_name LOG.warning(_LW('Could not determine root volume name ' 'on %s.'), self._get_owner()) return None def _get_owner(self): if self.vfiler: owner = '%s:%s' % (self.configuration.netapp_server_hostname, self.vfiler) else: owner = self.configuration.netapp_server_hostname return owner def _create_lun_handle(self, metadata): """Returns LUN handle based on filer type.""" owner = self._get_owner() return '%s:%s' % (owner, metadata['Path']) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" initiator_set = set(initiator_list) result = self.zapi_client.get_lun_map(path) initiator_groups = result.get_child_by_name('initiator-groups') if initiator_groups: for initiator_group_info in initiator_groups.get_children(): initiator_set_for_igroup = set() for initiator_info in initiator_group_info.get_child_by_name( 'initiators').get_children(): initiator_set_for_igroup.add( initiator_info.get_child_content('initiator-name')) if initiator_set == initiator_set_for_igroup: igroup = initiator_group_info.get_child_content( 'initiator-group-name') lun_id = initiator_group_info.get_child_content( 'lun-id') return igroup, lun_id return None, None def _has_luns_mapped_to_initiators(self, initiator_list, include_partner=True): """Checks whether any LUNs are mapped to the given initiator(s).""" if self.zapi_client.has_luns_mapped_to_initiators(initiator_list): return True if include_partner and self.partner_zapi_client and \ self.partner_zapi_client.has_luns_mapped_to_initiators( initiator_list): return True return False def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation if qos_policy_group_name is not None: msg = _('Data ONTAP operating in 7-Mode does not support QoS ' 'policy groups.') raise exception.VolumeDriverException(msg) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] (parent, _splitter, name) = path.rpartition('/') clone_path = '%s/%s' % (parent, new_name) self.zapi_client.clone_lun(path, clone_path, name, new_name, space_reserved, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot) self.vol_refresh_voluntary = True luns = self.zapi_client.get_lun_by_args(path=clone_path) cloned_lun = luns[0] self.zapi_client.set_space_reserve(clone_path, space_reserved) clone_meta = self._create_lun_meta(cloned_lun) handle = self._create_lun_handle(clone_meta) self._add_lun_to_table( block_base.NetAppLun(handle, new_name, cloned_lun.get_child_content('size'), clone_meta)) def _create_lun_meta(self, lun): """Creates LUN metadata dictionary.""" self.zapi_client.check_is_naelement(lun) meta_dict = {} meta_dict['Path'] = lun.get_child_content('path') meta_dict['Volume'] = lun.get_child_content('path').split('/')[2] meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') meta_dict['SpaceReserved'] = lun.get_child_content( 'is-space-reservation-enabled') meta_dict['UUID'] = lun.get_child_content('uuid') return meta_dict def _get_fc_target_wwpns(self, include_partner=True): wwpns = self.zapi_client.get_fc_target_wwpns() if include_partner and self.partner_zapi_client: wwpns.extend(self.partner_zapi_client.get_fc_target_wwpns()) return wwpns def _update_volume_stats(self, filter_function=None, goodness_function=None): """Retrieve stats info from filer.""" # ensure we get current data self.vol_refresh_voluntary = True self._refresh_volume_info() LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = self.driver_protocol data['pools'] = self._get_pool_stats( filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True self.zapi_client.provide_ems(self, self.driver_name, self.app_version, server_type=self.driver_mode) self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (i.e. Data ONTAP volume) stats info from volumes.""" pools = [] self.perf_library.update_performance_cache() for vol in self.vols: volume_name = vol.get_child_content('name') # omit volumes not specified in the config if self.volume_list and volume_name not in self.volume_list: continue # omit root volume if volume_name == self.root_volume_name: continue # ensure good volume state state = vol.get_child_content('state') inconsistent = vol.get_child_content('is-inconsistent') invalid = vol.get_child_content('is-invalid') if (state != 'online' or inconsistent != 'false' or invalid != 'false'): continue pool = dict() pool['pool_name'] = volume_name pool['QoS_support'] = False pool['reserved_percentage'] = ( self.reserved_percentage) pool['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # convert sizes to GB total = float(vol.get_child_content('size-total') or 0) total /= units.Gi pool['total_capacity_gb'] = na_utils.round_down(total, '0.01') free = float(vol.get_child_content('size-available') or 0) free /= units.Gi pool['free_capacity_gb'] = na_utils.round_down(free, '0.01') pool['provisioned_capacity_gb'] = (round( pool['total_capacity_gb'] - pool['free_capacity_gb'], 2)) thick = ( self.configuration.netapp_lun_space_reservation == 'enabled') pool['thick_provisioning_support'] = thick pool['thin_provisioning_support'] = not thick utilization = self.perf_library.get_node_utilization() pool['utilization'] = na_utils.round_down(utilization, '0.01') pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function pool['consistencygroup_support'] = True pools.append(pool) return pools def _get_filtered_pools(self): """Return available pools filtered by a pool name search pattern.""" # Inform deprecation of legacy option. if self.configuration.safe_get('netapp_volume_list'): msg = _LW("The option 'netapp_volume_list' is deprecated and " "will be removed in the future releases. Please use " "the option 'netapp_pool_name_search_pattern' instead.") versionutils.report_deprecated_feature(LOG, msg) pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) filtered_pools = [] for vol in self.vols: vol_name = vol.get_child_content('name') if pool_regex.match(vol_name): msg = ("Volume '%(vol_name)s' matches against regular " "expression: %(vol_pattern)s") LOG.debug(msg, {'vol_name': vol_name, 'vol_pattern': pool_regex.pattern}) filtered_pools.append(vol_name) else: msg = ("Volume '%(vol_name)s' does not match against regular " "expression: %(vol_pattern)s") LOG.debug(msg, {'vol_name': vol_name, 'vol_pattern': pool_regex.pattern}) return filtered_pools def _get_lun_block_count(self, path): """Gets block counts for the LUN.""" bs = super(NetAppBlockStorage7modeLibrary, self)._get_lun_block_count(path) api_version = self.zapi_client.get_ontapi_version() if api_version: major = api_version[0] minor = api_version[1] if major == 1 and minor < 15: bs -= 1 return bs def _refresh_volume_info(self): """Saves the volume information for the filer.""" if (self.vol_refresh_time is None or self.vol_refresh_voluntary or timeutils.is_newer_than(self.vol_refresh_time, self.vol_refresh_interval)): try: job_set = na_utils.set_safe_attr(self, 'vol_refresh_running', True) if not job_set: LOG.warning(_LW("Volume refresh job already running. " "Returning...")) return self.vol_refresh_voluntary = False self.vols = self.zapi_client.get_filer_volumes() self.volume_list = self._get_filtered_pools() self.vol_refresh_time = timeutils.utcnow() except Exception as e: LOG.warning(_LW("Error refreshing volume info. Message: %s"), e) finally: na_utils.set_safe_attr(self, 'vol_refresh_running', False) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" super(NetAppBlockStorage7modeLibrary, self).delete_volume(volume) self.vol_refresh_voluntary = True LOG.debug('Deleted LUN with name %s', volume['name']) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" super(NetAppBlockStorage7modeLibrary, self).delete_snapshot(snapshot) self.vol_refresh_voluntary = True def _is_lun_valid_on_storage(self, lun): """Validate LUN specific to storage system.""" if self.volume_list: lun_vol = lun.get_metadata_property('Volume') if lun_vol not in self.volume_list: return False return True def _check_volume_type_for_lun(self, volume, lun, existing_ref, extra_specs): """Check if LUN satisfies volume type.""" if extra_specs: legacy_policy = extra_specs.get('netapp:qos_policy_group') if legacy_policy is not None: raise exception.ManageExistingVolumeTypeMismatch( reason=_("Setting LUN QoS policy group is not supported " "on this storage family and ONTAP version.")) volume_type = na_utils.get_volume_type_from_volume(volume) if volume_type is None: return spec = na_utils.get_backend_qos_spec_from_volume_type(volume_type) if spec is not None: raise exception.ManageExistingVolumeTypeMismatch( reason=_("Back-end QoS specs are not supported on this " "storage family and ONTAP version.")) def _get_preferred_target_from_list(self, target_details_list, filter=None): # 7-mode iSCSI LIFs migrate from controller to controller # in failover and flap operational state in transit, so # we don't filter these on operational state. return (super(NetAppBlockStorage7modeLibrary, self) ._get_preferred_target_from_list(target_details_list)) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py0000664000567000056710000002146312701406250026055 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp NFS storage. """ import os from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import client_7mode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): """NetApp NFS driver for Data ONTAP (7-mode).""" def __init__(self, *args, **kwargs): super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_7mode_opts) def do_setup(self, context): """Do the customized set up on client if any for 7 mode.""" super(NetApp7modeNfsDriver, self).do_setup(context) self.zapi_client = client_7mode.Client( transport_type=self.configuration.netapp_transport_type, username=self.configuration.netapp_login, password=self.configuration.netapp_password, hostname=self.configuration.netapp_server_hostname, port=self.configuration.netapp_server_port, vfiler=self.configuration.netapp_vfiler) self.ssc_enabled = False self.perf_library = perf_7mode.Performance7modeLibrary( self.zapi_client) def check_for_setup_error(self): """Checks if setup occurred properly.""" api_version = self.zapi_client.get_ontapi_version() if api_version: major, minor = api_version if major == 1 and minor < 9: msg = _("Unsupported Data ONTAP version." " Data ONTAP version 7.3.1 and above is supported.") raise exception.VolumeBackendAPIException(data=msg) else: msg = _("Data ONTAP API version could not be determined.") raise exception.VolumeBackendAPIException(data=msg) super(NetApp7modeNfsDriver, self).check_for_setup_error() def _clone_backing_file_for_volume(self, volume_name, clone_name, volume_id, share=None): """Clone backing file for Cinder volume.""" (_host_ip, export_path) = self._get_export_ip_path(volume_id, share) storage_path = self.zapi_client.get_actual_path_for_export(export_path) target_path = '%s/%s' % (storage_path, clone_name) self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name), target_path) def _update_volume_stats(self): """Retrieve stats info from vserver.""" self._ensure_shares_mounted() LOG.debug('Updating volume stats') data = {} netapp_backend = 'NetApp_NFS_7mode_direct' backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or netapp_backend data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = 'nfs' data['pools'] = self._get_pool_stats( filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function()) data['sparse_copy_volume'] = True self._spawn_clean_cache_job() self.zapi_client.provide_ems(self, netapp_backend, self._app_version, server_type="7mode") self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (i.e. NFS share) stats info from SSC volumes.""" pools = [] self.perf_library.update_performance_cache() for nfs_share in self._mounted_shares: capacity = self._get_share_capacity_info(nfs_share) pool = dict() pool['pool_name'] = nfs_share pool['QoS_support'] = False pool.update(capacity) thick = not self.configuration.nfs_sparsed_volumes pool['thick_provisioning_support'] = thick pool['thin_provisioning_support'] = not thick utilization = self.perf_library.get_node_utilization() pool['utilization'] = na_utils.round_down(utilization, '0.01') pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function pools.append(pool) return pools def _shortlist_del_eligible_files(self, share, old_files): """Prepares list of eligible files to be deleted from cache.""" file_list = [] (_, export_path) = self._get_export_ip_path(share=share) exported_volume = self.zapi_client.get_actual_path_for_export( export_path) for old_file in old_files: path = os.path.join(exported_volume, old_file) u_bytes = self.zapi_client.get_file_usage(path) file_list.append((old_file, u_bytes)) LOG.debug('Shortlisted files eligible for deletion: %s', file_list) return file_list def _is_filer_ip(self, ip): """Checks whether ip is on the same filer.""" try: ifconfig = self.zapi_client.get_ifconfig() if_info = ifconfig.get_child_by_name('interface-config-info') if if_info: ifs = if_info.get_children() for intf in ifs: v4_addr = intf.get_child_by_name('v4-primary-address') if v4_addr: ip_info = v4_addr.get_child_by_name('ip-address-info') if ip_info: address = ip_info.get_child_content('address') if ip == address: return True else: continue except Exception: return False return False def _share_match_for_ip(self, ip, shares): """Returns the share that is served by ip. Multiple shares can have same dir path but can be served using different ips. It finds the share which is served by ip on same nfs server. """ if self._is_filer_ip(ip) and shares: for share in shares: ip_sh = share.split(':')[0] if self._is_filer_ip(ip_sh): LOG.debug('Share match found for ip %s', ip) return share LOG.debug('No share match found for ip %s', ip) return None def _is_share_clone_compatible(self, volume, share): """Checks if share is compatible with volume to host its clone.""" thin = self.configuration.nfs_sparsed_volumes return self._share_has_space_for_clone(share, volume['size'], thin) def _check_volume_type(self, volume, share, file_name, extra_specs): """Matches a volume type for share file.""" qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ if extra_specs else None if qos_policy_group: raise exception.ManageExistingVolumeTypeMismatch( reason=(_("Setting file qos policy group is not supported" " on this storage family and ontap version."))) volume_type = na_utils.get_volume_type_from_volume(volume) if volume_type and 'qos_spec_id' in volume_type: raise exception.ManageExistingVolumeTypeMismatch( reason=_("QoS specs are not supported" " on this storage family and ONTAP version.")) def _do_qos_for_volume(self, volume, extra_specs, cleanup=False): """Set QoS policy on backend from volume type information.""" # 7-mode DOT does not support QoS. return cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/fc_cmode.py0000664000567000056710000001226612701406250025734 0ustar jenkinsjenkins00000000000000# Copyright (c) - 2014, Clinton Knight. All rights reserved. # Copyright (c) - 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetAppCmodeFibreChannelDriver(driver.BaseVD, driver.ConsistencyGroupVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD): """NetApp C-mode FibreChannel volume driver.""" DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct' def __init__(self, *args, **kwargs): super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( self.DRIVER_NAME, 'FC', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): return self.library.initialize_connection_fc(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_fc(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup(group, add_volumes=None, remove_volumes=None) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/block_cmode.py0000664000567000056710000004455612701406250026445 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp C-mode block storage systems. """ import copy from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import six from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap import ssc_cmode from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) QOS_CLEANUP_INTERVAL_SECONDS = 60 @six.add_metaclass(utils.TraceWrapperMetaclass) class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): """NetApp block storage library for Data ONTAP (Cluster-mode).""" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] def __init__(self, driver_name, driver_protocol, **kwargs): super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name, driver_protocol, **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.driver_mode = 'cluster' def do_setup(self, context): super(NetAppBlockStorageCmodeLibrary, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) self.vserver = self.configuration.netapp_vserver self.zapi_client = client_cmode.Client( transport_type=self.configuration.netapp_transport_type, username=self.configuration.netapp_login, password=self.configuration.netapp_password, hostname=self.configuration.netapp_server_hostname, port=self.configuration.netapp_server_port, vserver=self.vserver) self.ssc_vols = {} self.stale_vols = set() self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" ssc_cmode.check_ssc_api_permissions(self.zapi_client) ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.get_connection(), self.vserver, synchronous=True) if not self._get_filtered_pools(): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise exception.NetAppDriverException(msg) super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() self._start_periodic_tasks() def _start_periodic_tasks(self): # Start the task that harvests soft-deleted QoS policy groups. harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall( self.zapi_client.remove_unused_qos_policy_groups) harvest_qos_periodic_task.start( interval=QOS_CLEANUP_INTERVAL_SECONDS, initial_delay=QOS_CLEANUP_INTERVAL_SECONDS) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name) self._update_stale_vols( volume=ssc_cmode.NetAppVolume(volume_name, self.vserver)) def _create_lun_handle(self, metadata): """Returns LUN handle based on filer type.""" return '%s:%s' % (self.vserver, metadata['Path']) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" initiator_igroups = self.zapi_client.get_igroup_by_initiators( initiator_list) lun_maps = self.zapi_client.get_lun_map(path) if initiator_igroups and lun_maps: for igroup in initiator_igroups: igroup_name = igroup['initiator-group-name'] if igroup_name.startswith(na_utils.OPENSTACK_PREFIX): for lun_map in lun_maps: if lun_map['initiator-group'] == igroup_name: return igroup_name, lun_map['lun-id'] return None, None def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation metadata = self._get_lun_attr(name, 'metadata') volume = metadata['Volume'] self.zapi_client.clone_lun(volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot) LOG.debug("Cloned LUN with new name %s", new_name) lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' % (volume, new_name)) if len(lun) == 0: msg = _("No cloned LUN named %s found on the filer") raise exception.VolumeBackendAPIException(data=msg % new_name) clone_meta = self._create_lun_meta(lun[0]) self._add_lun_to_table( block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'], clone_meta['Path']), new_name, lun[0].get_child_content('size'), clone_meta)) self._update_stale_vols( volume=ssc_cmode.NetAppVolume(volume, self.vserver)) def _create_lun_meta(self, lun): """Creates LUN metadata dictionary.""" self.zapi_client.check_is_naelement(lun) meta_dict = {} meta_dict['Vserver'] = lun.get_child_content('vserver') meta_dict['Volume'] = lun.get_child_content('volume') meta_dict['Qtree'] = lun.get_child_content('qtree') meta_dict['Path'] = lun.get_child_content('path') meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') meta_dict['SpaceReserved'] = \ lun.get_child_content('is-space-reservation-enabled') meta_dict['UUID'] = lun.get_child_content('uuid') return meta_dict def _get_fc_target_wwpns(self, include_partner=True): return self.zapi_client.get_fc_target_wwpns() def _configure_tunneling(self, do_tunneling=False): """Configures tunneling for Data ONTAP cluster.""" if do_tunneling: self.zapi_client.set_vserver(self.vserver) else: self.zapi_client.set_vserver(None) def _update_volume_stats(self, filter_function=None, goodness_function=None): """Retrieve stats info from vserver.""" sync = True if self.ssc_vols is None else False ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.get_connection(), self.vserver, synchronous=sync) LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = self.driver_protocol data['pools'] = self._get_pool_stats( filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True self.zapi_client.provide_ems(self, self.driver_name, self.app_version) self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (Data ONTAP volume) stats info from SSC volumes.""" pools = [] if not self.ssc_vols: return pools filtered_pools = self._get_filtered_pools() self.perf_library.update_performance_cache(filtered_pools) for vol in filtered_pools: pool_name = vol.id['name'] pool = dict() pool['pool_name'] = pool_name pool['QoS_support'] = True pool['reserved_percentage'] = ( self.reserved_percentage) pool['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # convert sizes to GB total = float(vol.space['size_total_bytes']) total /= units.Gi pool['total_capacity_gb'] = na_utils.round_down(total, '0.01') free = float(vol.space['size_avl_bytes']) free /= units.Gi pool['free_capacity_gb'] = na_utils.round_down(free, '0.01') pool['provisioned_capacity_gb'] = (round( pool['total_capacity_gb'] - pool['free_capacity_gb'], 2)) pool['netapp_raid_type'] = vol.aggr['raid_type'] pool['netapp_disk_type'] = vol.aggr['disk_type'] mirrored = vol in self.ssc_vols['mirrored'] pool['netapp_mirrored'] = six.text_type(mirrored).lower() pool['netapp_unmirrored'] = six.text_type(not mirrored).lower() dedup = vol in self.ssc_vols['dedup'] pool['netapp_dedup'] = six.text_type(dedup).lower() pool['netapp_nodedup'] = six.text_type(not dedup).lower() compression = vol in self.ssc_vols['compression'] pool['netapp_compression'] = six.text_type(compression).lower() pool['netapp_nocompression'] = six.text_type( not compression).lower() thin = vol in self.ssc_vols['thin'] pool['netapp_thin_provisioned'] = six.text_type(thin).lower() pool['netapp_thick_provisioned'] = six.text_type(not thin).lower() thick = (not thin and self.configuration.netapp_lun_space_reservation == 'enabled') pool['thick_provisioning_support'] = thick pool['thin_provisioning_support'] = not thick utilization = self.perf_library.get_node_utilization_for_pool( pool_name) pool['utilization'] = na_utils.round_down(utilization, '0.01') pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function pool['consistencygroup_support'] = True pools.append(pool) return pools def _get_filtered_pools(self): """Return filtered pools given a pool name search pattern.""" pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) filtered_pools = [] for vol in self.ssc_vols.get('all', []): vol_name = vol.id['name'] if pool_regex.match(vol_name): msg = ("Volume '%(vol_name)s' matches against regular " "expression: %(vol_pattern)s") LOG.debug(msg, {'vol_name': vol_name, 'vol_pattern': pool_regex.pattern}) filtered_pools.append(vol) else: msg = ("Volume '%(vol_name)s' does not match against regular " "expression: %(vol_pattern)s") LOG.debug(msg, {'vol_name': vol_name, 'vol_pattern': pool_regex.pattern}) return filtered_pools @utils.synchronized('update_stale') def _update_stale_vols(self, volume=None, reset=False): """Populates stale vols with vol and returns set copy if reset.""" if volume: self.stale_vols.add(volume) if reset: set_copy = copy.deepcopy(self.stale_vols) self.stale_vols.clear() return set_copy @utils.synchronized("refresh_ssc_vols") def refresh_ssc_vols(self, vols): """Refreshes ssc_vols with latest entries.""" self.ssc_vols = vols def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" lun = self.lun_table.get(volume['name']) netapp_vol = None if lun: netapp_vol = lun.get_metadata_property('Volume') super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Delete even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) if netapp_vol: self._update_stale_vols( volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver)) msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s' LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info}) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" lun = self.lun_table.get(snapshot['name']) netapp_vol = lun.get_metadata_property('Volume') if lun else None super(NetAppBlockStorageCmodeLibrary, self).delete_snapshot(snapshot) if netapp_vol: self._update_stale_vols( volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver)) def _check_volume_type_for_lun(self, volume, lun, existing_ref, extra_specs): """Check if LUN satisfies volume type.""" def scan_ssc_data(): volumes = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs) for vol in volumes: if lun.get_metadata_property('Volume') == vol.id['name']: return True return False match_read = scan_ssc_data() if not match_read: ssc_cmode.get_cluster_latest_ssc( self, self.zapi_client.get_connection(), self.vserver) match_read = scan_ssc_data() if not match_read: raise exception.ManageExistingVolumeTypeMismatch( reason=(_("LUN with given ref %(ref)s does not satisfy volume" " type. Ensure LUN volume with ssc features is" " present on vserver %(vs)s.") % {'ref': existing_ref, 'vs': self.vserver})) def _get_preferred_target_from_list(self, target_details_list, filter=None): # cDOT iSCSI LIFs do not migrate from controller to controller # in failover. Rather, an iSCSI LIF must be configured on each # controller and the initiator has to take responsibility for # using a LIF that is UP. In failover, the iSCSI LIF on the # downed controller goes DOWN until the controller comes back up. # # Currently Nova only accepts a single target when obtaining # target details from Cinder, so we pass back the first portal # with an UP iSCSI LIF. There are plans to have Nova accept # and try multiple targets. When that happens, we can and should # remove this filter and return all targets since their operational # state could change between the time we test here and the time # Nova uses the target. operational_addresses = ( self.zapi_client.get_operational_network_interface_addresses()) return (super(NetAppBlockStorageCmodeLibrary, self) ._get_preferred_target_from_list(target_details_list, filter=operational_addresses)) def _setup_qos_for_volume(self, volume, extra_specs): try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume, extra_specs) except exception.Invalid: msg = _('Invalid QoS specification detected while getting QoS ' 'policy for volume %s') % volume['id'] raise exception.VolumeBackendAPIException(data=msg) self.zapi_client.provision_qos_policy_group(qos_policy_group_info) return qos_policy_group_info def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Unmanage even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/ssc_cmode.py0000664000567000056710000006276012701406250026140 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Storage service catalog utility functions and classes for NetApp systems. """ import copy import threading from oslo_log import log as logging from oslo_utils import timeutils import six from cinder import exception from cinder.i18n import _, _LI, _LW from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) class NetAppVolume(object): """Represents a NetApp volume. Present attributes id - name, vserver, junction_path, type aggr - name, raid_type, ha_policy, disk_type sis - dedup, compression state - status, vserver_root, cluster_volume, inconsistent, invalid, junction_active qos - qos_policy_group space - space-guarantee-enabled, space-guarantee, thin_provisioned, size_avl_bytes, size_total_bytes mirror - mirrored i.e. dp mirror export - path """ def __init__(self, name, vserver=None): self.id = {} self.aggr = {} self.sis = {} self.state = {} self.qos = {} self.space = {} self.mirror = {} self.export = {} self.id['name'] = name self.id['vserver'] = vserver def __eq__(self, other): """Checks for equality.""" if (self.id['name'] == other.id['name'] and self.id['vserver'] == other.id['vserver']): return True def __hash__(self): """Computes hash for the object.""" return hash(self.id['name']) def __cmp__(self, other): """Implements comparison logic for volumes.""" self_size_avl = self.space.get('size_avl_bytes') other_size_avl = other.space.get('size_avl_bytes') if self_size_avl is None and other_size_avl is not None: return -1 elif self_size_avl is not None and other_size_avl is None: return 1 elif self_size_avl is None and other_size_avl is None: return 0 elif int(self_size_avl) < int(other_size_avl): return -1 elif int(self_size_avl) > int(other_size_avl): return 1 else: return 0 def __str__(self): """Returns human readable form for object.""" vol_str = "NetApp Volume id: %s, aggr: %s,"\ " space: %s, sis: %s, state: %s, qos: %s"\ % (self.id, self.aggr, self.space, self.sis, self.state, self.qos) return vol_str @utils.trace_method def get_cluster_vols_with_ssc(na_server, vserver, volume=None): """Gets ssc vols for cluster vserver.""" volumes = query_cluster_vols_for_ssc(na_server, vserver, volume) sis_vols = get_sis_vol_dict(na_server, vserver, volume) mirrored_vols = get_snapmirror_vol_dict(na_server, vserver, volume) aggrs = {} for vol in volumes: aggr_name = vol.aggr['name'] if aggr_name: if aggr_name in aggrs: aggr_attrs = aggrs[aggr_name] else: aggr_attrs = query_aggr_options(na_server, aggr_name) if aggr_attrs: eff_disk_type = query_aggr_storage_disk(na_server, aggr_name) aggr_attrs['disk_type'] = eff_disk_type aggrs[aggr_name] = aggr_attrs vol.aggr['raid_type'] = aggr_attrs.get('raid_type') vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy') vol.aggr['disk_type'] = aggr_attrs.get('disk_type') if sis_vols: if vol.id['name'] in sis_vols: vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup'] vol.sis['compression'] =\ sis_vols[vol.id['name']]['compression'] else: vol.sis['dedup'] = False vol.sis['compression'] = False if (vol.space['space-guarantee-enabled'] and (vol.space['space-guarantee'] == 'file' or vol.space['space-guarantee'] == 'volume')): vol.space['thin_provisioned'] = False else: vol.space['thin_provisioned'] = True if mirrored_vols: vol.mirror['mirrored'] = False if vol.id['name'] in mirrored_vols: for mirr_attrs in mirrored_vols[vol.id['name']]: if (mirr_attrs['rel_type'] == 'data_protection' and mirr_attrs['mirr_state'] == 'snapmirrored'): vol.mirror['mirrored'] = True break return volumes @utils.trace_method def query_cluster_vols_for_ssc(na_server, vserver, volume=None): """Queries cluster volumes for ssc.""" query = {'volume-attributes': None} volume_id = { 'volume-id-attributes': { 'owning-vserver-name': vserver, 'type': 'rw', 'style': 'flex', }, } if volume: volume_id['volume-id-attributes']['name'] = volume query['volume-attributes'] = volume_id des_attr = {'volume-attributes': ['volume-id-attributes', 'volume-space-attributes', 'volume-state-attributes', 'volume-qos-attributes']} result = netapp_api.invoke_api(na_server, api_name='volume-get-iter', api_family='cm', query=query, des_result=des_attr, additional_elems=None, is_iter=True) vols = set() for res in result: records = res.get_child_content('num-records') if int(records) > 0: attr_list = res.get_child_by_name('attributes-list') if attr_list: vol_attrs = attr_list.get_children() vols_found = create_vol_list(vol_attrs) vols.update(vols_found) return vols @utils.trace_method def create_vol_list(vol_attrs): """Creates vol list with features from attr list.""" vols = set() for v in vol_attrs: try: # name and vserver are mandatory # Absence will skip by giving KeyError. name = v['volume-id-attributes']['name'] vserver = v['volume-id-attributes']['owning-vserver-name'] vol = NetAppVolume(name, vserver) vol.id['type'] =\ v['volume-id-attributes'].get_child_content('type') if vol.id['type'] == "tmp": continue vol.id['junction_path'] =\ v['volume-id-attributes'].get_child_content('junction-path') # state attributes mandatory. vol.state['vserver_root'] =\ na_utils.to_bool( v['volume-state-attributes'].get_child_content( 'is-vserver-root')) if vol.state['vserver_root']: continue vol.state['status'] =\ v['volume-state-attributes'].get_child_content('state') vol.state['inconsistent'] =\ na_utils.to_bool( v['volume-state-attributes'].get_child_content( 'is-inconsistent')) vol.state['invalid'] =\ na_utils.to_bool( v['volume-state-attributes'].get_child_content( 'is-invalid')) vol.state['junction_active'] =\ na_utils.to_bool( v['volume-state-attributes'].get_child_content( 'is-junction-active')) vol.state['cluster_volume'] =\ na_utils.to_bool( v['volume-state-attributes'].get_child_content( 'is-cluster-volume')) if (vol.state['status'] != 'online' or vol.state['inconsistent'] or vol.state['invalid']): # offline, invalid and inconsistent volumes are not usable continue # aggr attributes mandatory. vol.aggr['name'] =\ v['volume-id-attributes']['containing-aggregate-name'] # space attributes mandatory. vol.space['size_avl_bytes'] =\ v['volume-space-attributes']['size-available'] vol.space['size_total_bytes'] =\ v['volume-space-attributes']['size-total'] vol.space['space-guarantee-enabled'] =\ na_utils.to_bool( v['volume-space-attributes'].get_child_content( 'is-space-guarantee-enabled')) vol.space['space-guarantee'] =\ v['volume-space-attributes'].get_child_content( 'space-guarantee') # qos attributes optional. if v.get_child_by_name('volume-qos-attributes'): vol.qos['qos_policy_group'] =\ v['volume-qos-attributes'].get_child_content( 'policy-group-name') else: vol.qos['qos_policy_group'] = None vols.add(vol) except KeyError as e: LOG.debug('Unexpected error while creating' ' ssc vol list. Message - %s', e) continue return vols @utils.trace_method def query_aggr_options(na_server, aggr_name): """Queries cluster aggr for attributes. Currently queries for raid and ha-policy. """ add_elems = {'aggregate': aggr_name} attrs = {} try: result = netapp_api.invoke_api(na_server, api_name='aggr-options-list-info', api_family='cm', query=None, des_result=None, additional_elems=add_elems, is_iter=False) for res in result: options = res.get_child_by_name('options') if options: op_list = options.get_children() for op in op_list: if op.get_child_content('name') == 'ha_policy': attrs['ha_policy'] = op.get_child_content('value') if op.get_child_content('name') == 'raidtype': attrs['raid_type'] = op.get_child_content('value') except Exception as e: LOG.debug("Exception querying aggr options. %s", e) return attrs @utils.trace_method def get_sis_vol_dict(na_server, vserver, volume=None): """Queries sis for volumes. If volume is present sis is queried for it. Records dedup and compression enabled. """ sis_vols = {} query_attr = {'vserver': vserver} if volume: vol_path = '/vol/%s' % (volume) query_attr['path'] = vol_path query = {'sis-status-info': query_attr} try: result = netapp_api.invoke_api(na_server, api_name='sis-get-iter', api_family='cm', query=query, is_iter=True) for res in result: attr_list = res.get_child_by_name('attributes-list') if attr_list: sis_status = attr_list.get_children() for sis in sis_status: path = sis.get_child_content('path') if not path: continue (___, __, vol) = path.rpartition('/') if not vol: continue v_sis = {} v_sis['compression'] = na_utils.to_bool( sis.get_child_content('is-compression-enabled')) v_sis['dedup'] = na_utils.to_bool( sis.get_child_content('state')) sis_vols[vol] = v_sis except Exception as e: LOG.debug("Exception querying sis information. %s", e) return sis_vols @utils.trace_method def get_snapmirror_vol_dict(na_server, vserver, volume=None): """Queries snapmirror volumes.""" mirrored_vols = {} query_attr = {'source-vserver': vserver} if volume: query_attr['source-volume'] = volume query = {'snapmirror-info': query_attr} try: result = netapp_api.invoke_api(na_server, api_name='snapmirror-get-iter', api_family='cm', query=query, is_iter=True) for res in result: attr_list = res.get_child_by_name('attributes-list') if attr_list: snap_info = attr_list.get_children() for snap in snap_info: src_volume = snap.get_child_content('source-volume') v_snap = {} v_snap['dest_loc'] =\ snap.get_child_content('destination-location') v_snap['rel_type'] =\ snap.get_child_content('relationship-type') v_snap['mirr_state'] =\ snap.get_child_content('mirror-state') if mirrored_vols.get(src_volume): mirrored_vols.get(src_volume).append(v_snap) else: mirrored_vols[src_volume] = [v_snap] except Exception as e: LOG.debug("Exception querying mirror information. %s", e) return mirrored_vols @utils.trace_method def query_aggr_storage_disk(na_server, aggr): """Queries for storage disks associated to an aggregate.""" query = {'storage-disk-info': {'disk-raid-info': {'disk-aggregate-info': {'aggregate-name': aggr}}}} des_attr = {'storage-disk-info': {'disk-raid-info': ['effective-disk-type']}} try: result = netapp_api.invoke_api(na_server, api_name='storage-disk-get-iter', api_family='cm', query=query, des_result=des_attr, additional_elems=None, is_iter=True) for res in result: attr_list = res.get_child_by_name('attributes-list') if attr_list: storage_disks = attr_list.get_children() for disk in storage_disks: raid_info = disk.get_child_by_name('disk-raid-info') if raid_info: eff_disk_type =\ raid_info.get_child_content('effective-disk-type') if eff_disk_type: return eff_disk_type else: continue except Exception as e: LOG.debug("Exception querying storage disk. %s", e) return 'unknown' @utils.trace_method def get_cluster_ssc(na_server, vserver): """Provides cluster volumes with ssc.""" netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver) mirror_vols = set() dedup_vols = set() compress_vols = set() thin_prov_vols = set() ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols, 'compression': compress_vols, 'thin': thin_prov_vols, 'all': netapp_volumes} for vol in netapp_volumes: if vol.sis.get('dedup'): dedup_vols.add(vol) if vol.sis.get('compression'): compress_vols.add(vol) if vol.mirror.get('mirrored'): mirror_vols.add(vol) if vol.space.get('thin_provisioned'): thin_prov_vols.add(vol) return ssc_map @utils.trace_method def refresh_cluster_stale_ssc(*args, **kwargs): """Refreshes stale ssc volumes with latest.""" backend = args[0] na_server = args[1] vserver = args[2] identity = six.text_type(id(backend)) lock_pr = '%s_%s' % ('refresh_ssc', identity) try: job_set = na_utils.set_safe_attr( backend, 'refresh_stale_running', True) if not job_set: return @utils.synchronized(lock_pr) def refresh_stale_ssc(): stale_vols = backend._update_stale_vols(reset=True) LOG.info(_LI('Running stale ssc refresh job for %(server)s' ' and vserver %(vs)s'), {'server': na_server, 'vs': vserver}) # refreshing single volumes can create inconsistency # hence doing manipulations on copy ssc_vols_copy = copy.deepcopy(backend.ssc_vols) refresh_vols = set() expired_vols = set() for vol in stale_vols: name = vol.id['name'] res = get_cluster_vols_with_ssc(na_server, vserver, name) if res: refresh_vols.add(res.pop()) else: expired_vols.add(vol) for vol in refresh_vols: for k in ssc_vols_copy: vol_set = ssc_vols_copy[k] vol_set.discard(vol) if k == "mirrored" and vol.mirror.get('mirrored'): vol_set.add(vol) if k == "dedup" and vol.sis.get('dedup'): vol_set.add(vol) if k == "compression" and vol.sis.get('compression'): vol_set.add(vol) if k == "thin" and vol.space.get('thin_provisioned'): vol_set.add(vol) if k == "all": vol_set.add(vol) for vol in expired_vols: for k in ssc_vols_copy: vol_set = ssc_vols_copy[k] vol_set.discard(vol) backend.refresh_ssc_vols(ssc_vols_copy) LOG.info(_LI('Successfully completed stale refresh job for' ' %(server)s and vserver %(vs)s'), {'server': na_server, 'vs': vserver}) refresh_stale_ssc() finally: na_utils.set_safe_attr(backend, 'refresh_stale_running', False) @utils.trace_method def get_cluster_latest_ssc(*args, **kwargs): """Updates volumes including ssc.""" backend = args[0] na_server = args[1] vserver = args[2] identity = six.text_type(id(backend)) lock_pr = '%s_%s' % ('refresh_ssc', identity) # As this depends on stale job running state # set flag as soon as job starts to avoid # job accumulation. try: job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True) if not job_set: return @utils.synchronized(lock_pr) def get_latest_ssc(): LOG.info(_LI('Running cluster latest ssc job for %(server)s' ' and vserver %(vs)s'), {'server': na_server, 'vs': vserver}) ssc_vols = get_cluster_ssc(na_server, vserver) backend.refresh_ssc_vols(ssc_vols) backend.ssc_run_time = timeutils.utcnow() LOG.info(_LI('Successfully completed ssc job for %(server)s' ' and vserver %(vs)s'), {'server': na_server, 'vs': vserver}) get_latest_ssc() finally: na_utils.set_safe_attr(backend, 'ssc_job_running', False) @utils.trace_method def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False): """Refresh cluster ssc for backend.""" if not isinstance(na_server, netapp_api.NaServer): raise exception.InvalidInput(reason=_("Backend server not NaServer.")) delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800) if getattr(backend, 'ssc_job_running', None): LOG.warning(_LW('ssc job in progress. Returning... ')) return elif (getattr(backend, 'ssc_run_time', None) is None or (backend.ssc_run_time and timeutils.is_older_than(backend.ssc_run_time, delta_secs))): if synchronous: get_cluster_latest_ssc(backend, na_server, vserver) else: t = threading.Timer(0, get_cluster_latest_ssc, args=[backend, na_server, vserver]) t.start() elif getattr(backend, 'refresh_stale_running', None): LOG.warning(_LW('refresh stale ssc job in progress. Returning... ')) return else: if backend.stale_vols: if synchronous: refresh_cluster_stale_ssc(backend, na_server, vserver) else: t = threading.Timer(0, refresh_cluster_stale_ssc, args=[backend, na_server, vserver]) t.start() @utils.trace_method def get_volumes_for_specs(ssc_vols, specs): """Shortlists volumes for extra specs provided.""" if specs is None or specs == {} or not isinstance(specs, dict): return ssc_vols['all'] result = copy.deepcopy(ssc_vols['all']) raid_type = specs.get('netapp:raid_type') disk_type = specs.get('netapp:disk_type') bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored', 'netapp_dedup', 'netapp_nodedup', 'netapp_compression', 'netapp_nocompression', 'netapp_thin_provisioned', 'netapp_thick_provisioned'] b_specs = {} for spec in bool_specs_list: b_specs[spec] = na_utils.to_bool(specs.get(spec))\ if specs.get(spec) else None def _spec_ineffect(b_specs, spec, opp_spec): """If the spec with opposite spec is ineffective.""" if ((b_specs[spec] is None and b_specs[opp_spec] is None) or (b_specs[spec] == b_specs[opp_spec])): return True else: return False if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'): pass else: if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False: result = result & ssc_vols['mirrored'] else: result = result - ssc_vols['mirrored'] if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'): pass else: if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False: result = result & ssc_vols['dedup'] else: result = result - ssc_vols['dedup'] if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'): pass else: if (b_specs['netapp_compression'] or b_specs['netapp_nocompression'] is False): result = result & ssc_vols['compression'] else: result = result - ssc_vols['compression'] if _spec_ineffect(b_specs, 'netapp_thin_provisioned', 'netapp_thick_provisioned'): pass else: if (b_specs['netapp_thin_provisioned'] or b_specs['netapp_thick_provisioned'] is False): result = result & ssc_vols['thin'] else: result = result - ssc_vols['thin'] if raid_type or disk_type: tmp = copy.deepcopy(result) for vol in tmp: if raid_type: vol_raid = vol.aggr['raid_type'] vol_raid = vol_raid.lower() if vol_raid else None if raid_type.lower() != vol_raid: result.discard(vol) if disk_type: vol_dtype = vol.aggr['disk_type'] vol_dtype = vol_dtype.lower() if vol_dtype else None if disk_type.lower() != vol_dtype: result.discard(vol) return result @utils.trace_method def check_ssc_api_permissions(client_cmode): """Checks backend SSC API permissions for the user.""" api_map = {'storage-disk-get-iter': ['netapp:disk_type'], 'snapmirror-get-iter': ['netapp_mirrored', 'netapp_unmirrored'], 'sis-get-iter': ['netapp_dedup', 'netapp_nodedup', 'netapp_compression', 'netapp_nocompression'], 'aggr-options-list-info': ['netapp:raid_type'], 'volume-get-iter': []} failed_apis = client_cmode.check_apis_on_cluster(api_map.keys()) if failed_apis: if 'volume-get-iter' in failed_apis: msg = _("Fatal error: User not permitted" " to query NetApp volumes.") raise exception.VolumeBackendAPIException(data=msg) else: unsupp_ssc_features = [] for fail in failed_apis: unsupp_ssc_features.extend(api_map[fail]) LOG.warning(_LW("The user does not have access or sufficient " "privileges to use all netapp APIs. The " "following extra_specs will fail or be ignored: " "%s"), unsupp_ssc_features) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py0000664000567000056710000001201312701406250026370 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_7mode LOG = logging.getLogger(__name__) class NetApp7modeISCSIDriver(driver.BaseVD, driver.ConsistencyGroupVD, driver.ManageableVD, driver.ExtendVD, driver.TransferVD, driver.SnapshotVD): """NetApp 7-mode iSCSI volume driver.""" DRIVER_NAME = 'NetApp_iSCSI_7mode_direct' def __init__(self, *args, **kwargs): super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs) self.library = block_7mode.NetAppBlockStorage7modeLibrary( self.DRIVER_NAME, 'iSCSI', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) def initialize_connection(self, volume, connector): return self.library.initialize_connection_iscsi(volume, connector) def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_iscsi(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_consistencygroup(self, context, group): return self.library.create_consistencygroup(group) def delete_consistencygroup(self, context, group, volumes): return self.library.delete_consistencygroup(group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_consistencygroup(group, add_volumes=None, remove_volumes=None) def create_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.create_cgsnapshot(cgsnapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): return self.library.delete_cgsnapshot(cgsnapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): return self.library.create_consistencygroup_from_src( group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, source_cg=source_cg, source_vols=source_vols) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/0000775000567000056710000000000012701406543025077 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py0000664000567000056710000011477612701406250030111 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import math from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LW from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp import utils as na_utils from oslo_utils import strutils LOG = logging.getLogger(__name__) DELETED_PREFIX = 'deleted_cinder_' @six.add_metaclass(utils.TraceWrapperMetaclass) class Client(client_base.Client): def __init__(self, **kwargs): super(Client, self).__init__(**kwargs) self.vserver = kwargs.get('vserver', None) self.connection.set_vserver(self.vserver) # Default values to run first api self.connection.set_api_version(1, 15) (major, minor) = self.get_ontapi_version(cached=False) self.connection.set_api_version(major, minor) self._init_features() def _init_features(self): super(Client, self)._init_features() ontapi_version = self.get_ontapi_version() # major, minor ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) ontapi_1_30 = ontapi_version >= (1, 30) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontapi_1_30) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) server.set_vserver(vserver) result = server.invoke_successfully(na_element, True) return result def _has_records(self, api_result_element): num_records = api_result_element.get_child_content('num-records') return bool(num_records and '0' != num_records) def set_vserver(self, vserver): self.connection.set_vserver(vserver) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" iscsi_if_iter = netapp_api.NaElement('iscsi-interface-get-iter') result = self.connection.invoke_successfully(iscsi_if_iter, True) tgt_list = [] num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') iscsi_if_list = attr_list.get_children() for iscsi_if in iscsi_if_list: d = dict() d['address'] = iscsi_if.get_child_content('ip-address') d['port'] = iscsi_if.get_child_content('ip-port') d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') d['interface-enabled'] = iscsi_if.get_child_content( 'is-interface-enabled') tgt_list.append(d) return tgt_list def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" initiator_exists = self.check_iscsi_initiator_exists(iqn) command_template = ('iscsi security %(mode)s -vserver %(vserver)s ' '-initiator-name %(iqn)s -auth-type CHAP ' '-user-name %(username)s') if initiator_exists: LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn}) command = command_template % { 'mode': 'modify', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } else: LOG.debug('Adding initiator %(iqn)s with CHAP authentication.', {'iqn': iqn}) command = command_template % { 'mode': 'create', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } try: with self.ssh_client.ssh_connect_semaphore: ssh_pool = self.ssh_client.ssh_pool with ssh_pool.item() as ssh: self.ssh_client.execute_command_with_prompt(ssh, command, 'Password:', password) except Exception as e: msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.' ' Details: %(ex)s') % { 'iqn': iqn, 'ex': e, } LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" initiator_exists = True try: auth_list = netapp_api.NaElement('iscsi-initiator-get-auth') auth_list.add_new_child('initiator', iqn) self.connection.invoke_successfully(auth_list, True) except netapp_api.NaApiError: initiator_exists = False return initiator_exists def get_fc_target_wwpns(self): """Gets the FC target details.""" wwpns = [] port_name_list_api = netapp_api.NaElement('fcp-port-name-get-iter') port_name_list_api.add_new_child('max-records', '100') result = self.connection.invoke_successfully(port_name_list_api, True) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: for port_name_info in result.get_child_by_name( 'attributes-list').get_children(): if port_name_info.get_child_content('is-used') != 'true': continue wwpn = port_name_info.get_child_content('port-name').lower() wwpns.append(wwpn) return wwpns def get_iscsi_service_details(self): """Returns iscsi iqn.""" iscsi_service_iter = netapp_api.NaElement('iscsi-service-get-iter') result = self.connection.invoke_successfully(iscsi_service_iter, True) if result.get_child_content('num-records') and\ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') iscsi_service = attr_list.get_child_by_name('iscsi-service-info') return iscsi_service.get_child_content('node-name') LOG.debug('No iSCSI service found for vserver %s', self.vserver) return None def get_lun_list(self): """Gets the list of LUNs on filer. Gets the LUNs from cluster with vserver. """ luns = [] tag = None while True: api = netapp_api.NaElement('lun-get-iter') api.add_new_child('max-records', '100') if tag: api.add_new_child('tag', tag, True) lun_info = netapp_api.NaElement('lun-info') lun_info.add_new_child('vserver', self.vserver) query = netapp_api.NaElement('query') query.add_child_elem(lun_info) api.add_child_elem(query) result = self.connection.invoke_successfully(api, True) if result.get_child_by_name('num-records') and\ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') luns.extend(attr_list.get_children()) tag = result.get_child_content('next-tag') if tag is None: break return luns def get_lun_map(self, path): """Gets the LUN map by LUN path.""" tag = None map_list = [] while True: lun_map_iter = netapp_api.NaElement('lun-map-get-iter') lun_map_iter.add_new_child('max-records', '100') if tag: lun_map_iter.add_new_child('tag', tag, True) query = netapp_api.NaElement('query') lun_map_iter.add_child_elem(query) query.add_node_with_children('lun-map-info', **{'path': path}) result = self.connection.invoke_successfully(lun_map_iter, True) tag = result.get_child_content('next-tag') if result.get_child_content('num-records') and \ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') lun_maps = attr_list.get_children() for lun_map in lun_maps: lun_m = dict() lun_m['initiator-group'] = lun_map.get_child_content( 'initiator-group') lun_m['lun-id'] = lun_map.get_child_content('lun-id') lun_m['vserver'] = lun_map.get_child_content('vserver') map_list.append(lun_m) if tag is None: break return map_list def _get_igroup_by_initiator_query(self, initiator, tag): igroup_get_iter = netapp_api.NaElement('igroup-get-iter') igroup_get_iter.add_new_child('max-records', '100') if tag: igroup_get_iter.add_new_child('tag', tag, True) query = netapp_api.NaElement('query') igroup_info = netapp_api.NaElement('initiator-group-info') query.add_child_elem(igroup_info) igroup_info.add_new_child('vserver', self.vserver) initiators = netapp_api.NaElement('initiators') igroup_info.add_child_elem(initiators) igroup_get_iter.add_child_elem(query) initiators.add_node_with_children( 'initiator-info', **{'initiator-name': initiator}) # limit results to just the attributes of interest desired_attrs = netapp_api.NaElement('desired-attributes') desired_igroup_info = netapp_api.NaElement('initiator-group-info') desired_igroup_info.add_node_with_children( 'initiators', **{'initiator-info': None}) desired_igroup_info.add_new_child('vserver', None) desired_igroup_info.add_new_child('initiator-group-name', None) desired_igroup_info.add_new_child('initiator-group-type', None) desired_igroup_info.add_new_child('initiator-group-os-type', None) desired_attrs.add_child_elem(desired_igroup_info) igroup_get_iter.add_child_elem(desired_attrs) return igroup_get_iter def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" tag = None igroup_list = [] if not initiator_list: return igroup_list initiator_set = set(initiator_list) while True: # C-mode getter APIs can't do an 'and' query, so match the first # initiator (which will greatly narrow the search results) and # filter the rest in this method. query = self._get_igroup_by_initiator_query(initiator_list[0], tag) result = self.connection.invoke_successfully(query, True) tag = result.get_child_content('next-tag') num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: for igroup_info in result.get_child_by_name( 'attributes-list').get_children(): initiator_set_for_igroup = set() for initiator_info in igroup_info.get_child_by_name( 'initiators').get_children(): initiator_set_for_igroup.add( initiator_info.get_child_content('initiator-name')) if initiator_set == initiator_set_for_igroup: igroup = {'initiator-group-os-type': igroup_info.get_child_content( 'initiator-group-os-type'), 'initiator-group-type': igroup_info.get_child_content( 'initiator-group-type'), 'initiator-group-name': igroup_info.get_child_content( 'initiator-group-name')} igroup_list.append(igroup) if tag is None: break return igroup_list def clone_lun(self, volume, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None): # zAPI can only handle 2^24 blocks per range bc_limit = 2 ** 24 # 8GB # zAPI can only handle 32 block ranges per call br_limit = 32 z_limit = br_limit * bc_limit # 256 GB z_calls = int(math.ceil(block_count / float(z_limit))) zbc = block_count if z_calls == 0: z_calls = 1 for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit else: block_count = zbc zapi_args = { 'volume': volume, 'source-path': name, 'destination-path': new_name, 'space-reserve': space_reserved, } if source_snapshot: zapi_args['snapshot-name'] = source_snapshot clone_create = netapp_api.NaElement.create_node_with_children( 'clone-create', **zapi_args) if qos_policy_group_name is not None: clone_create.add_new_child('qos-policy-group-name', qos_policy_group_name) if block_count > 0: block_ranges = netapp_api.NaElement("block-ranges") segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit else: block_count = bc block_range =\ netapp_api.NaElement.create_node_with_children( 'block-range', **{'source-block-number': six.text_type(src_block), 'destination-block-number': six.text_type(dest_block), 'block-count': six.text_type(block_count)}) block_ranges.add_child_elem(block_range) src_block += int(block_count) dest_block += int(block_count) clone_create.add_child_elem(block_ranges) self.connection.invoke_successfully(clone_create, True) def get_lun_by_args(self, **args): """Retrieves LUN with specified args.""" lun_iter = netapp_api.NaElement('lun-get-iter') lun_iter.add_new_child('max-records', '100') query = netapp_api.NaElement('query') lun_iter.add_child_elem(query) query.add_node_with_children('lun-info', **args) luns = self.connection.invoke_successfully(lun_iter, True) attr_list = luns.get_child_by_name('attributes-list') if not attr_list: return [] return attr_list.get_children() def file_assign_qos(self, flex_vol, qos_policy_group_name, file_path): """Assigns the named QoS policy-group to a file.""" api_args = { 'volume': flex_vol, 'qos-policy-group-name': qos_policy_group_name, 'file': file_path, 'vserver': self.vserver, } return self.send_request('file-assign-qos', api_args, False) def provision_qos_policy_group(self, qos_policy_group_info): """Create QOS policy group on the backend if appropriate.""" if qos_policy_group_info is None: return # Legacy QOS uses externally provisioned QOS policy group, # so we don't need to create one on the backend. legacy = qos_policy_group_info.get('legacy') if legacy is not None: return spec = qos_policy_group_info.get('spec') if spec is not None: if not self.qos_policy_group_exists(spec['policy_name']): self.qos_policy_group_create(spec['policy_name'], spec['max_throughput']) else: self.qos_policy_group_modify(spec['policy_name'], spec['max_throughput']) def qos_policy_group_exists(self, qos_policy_group_name): """Checks if a QOS policy group exists.""" api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': qos_policy_group_name, }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, }, }, } result = self.send_request('qos-policy-group-get-iter', api_args, False) return self._has_records(result) def qos_policy_group_create(self, qos_policy_group_name, max_throughput): """Creates a QOS policy group.""" api_args = { 'policy-group': qos_policy_group_name, 'max-throughput': max_throughput, 'vserver': self.vserver, } return self.send_request('qos-policy-group-create', api_args, False) def qos_policy_group_modify(self, qos_policy_group_name, max_throughput): """Modifies a QOS policy group.""" api_args = { 'policy-group': qos_policy_group_name, 'max-throughput': max_throughput, } return self.send_request('qos-policy-group-modify', api_args, False) def qos_policy_group_delete(self, qos_policy_group_name): """Attempts to delete a QOS policy group.""" api_args = {'policy-group': qos_policy_group_name} return self.send_request('qos-policy-group-delete', api_args, False) def qos_policy_group_rename(self, qos_policy_group_name, new_name): """Renames a QOS policy group.""" api_args = { 'policy-group-name': qos_policy_group_name, 'new-name': new_name, } return self.send_request('qos-policy-group-rename', api_args, False) def mark_qos_policy_group_for_deletion(self, qos_policy_group_info): """Do (soft) delete of backing QOS policy group for a cinder volume.""" if qos_policy_group_info is None: return spec = qos_policy_group_info.get('spec') # For cDOT we want to delete the QoS policy group that we created for # this cinder volume. Because the QoS policy may still be "in use" # after the zapi call to delete the volume itself returns successfully, # we instead rename the QoS policy group using a specific pattern and # later attempt on a best effort basis to delete any QoS policy groups # matching that pattern. if spec is not None: current_name = spec['policy_name'] new_name = DELETED_PREFIX + current_name try: self.qos_policy_group_rename(current_name, new_name) except netapp_api.NaApiError as ex: msg = _LW('Rename failure in cleanup of cDOT QOS policy group ' '%(name)s: %(ex)s') LOG.warning(msg, {'name': current_name, 'ex': ex}) # Attempt to delete any QoS policies named "delete-openstack-*". self.remove_unused_qos_policy_groups() def remove_unused_qos_policy_groups(self): """Deletes all QOS policy groups that are marked for deletion.""" api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': '%s*' % DELETED_PREFIX, 'vserver': self.vserver, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } try: self.send_request('qos-policy-group-delete-iter', api_args, False) except netapp_api.NaApiError as ex: msg = 'Could not delete QOS policy groups. Details: %(ex)s' msg_args = {'ex': ex} LOG.debug(msg % msg_args) def set_lun_qos_policy_group(self, path, qos_policy_group): """Sets qos_policy_group on a LUN.""" api_args = { 'path': path, 'qos-policy-group': qos_policy_group, } return self.send_request('lun-set-qos-policy-group', api_args) def get_if_info_by_ip(self, ip): """Gets the network interface info by ip.""" net_if_iter = netapp_api.NaElement('net-interface-get-iter') net_if_iter.add_new_child('max-records', '10') query = netapp_api.NaElement('query') net_if_iter.add_child_elem(query) query.add_node_with_children( 'net-interface-info', **{'address': na_utils.resolve_hostname(ip)}) result = self.connection.invoke_successfully(net_if_iter, True) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') return attr_list.get_children() raise exception.NotFound( _('No interface found on cluster for ip %s') % ip) def get_vol_by_junc_vserver(self, vserver, junction): """Gets the volume by junction path and vserver.""" vol_iter = netapp_api.NaElement('volume-get-iter') vol_iter.add_new_child('max-records', '10') query = netapp_api.NaElement('query') vol_iter.add_child_elem(query) vol_attrs = netapp_api.NaElement('volume-attributes') query.add_child_elem(vol_attrs) vol_attrs.add_node_with_children( 'volume-id-attributes', **{'junction-path': junction, 'owning-vserver-name': vserver}) des_attrs = netapp_api.NaElement('desired-attributes') des_attrs.add_node_with_children('volume-attributes', **{'volume-id-attributes': None}) vol_iter.add_child_elem(des_attrs) result = self._invoke_vserver_api(vol_iter, vserver) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') vols = attr_list.get_children() vol_id = vols[0].get_child_by_name('volume-id-attributes') return vol_id.get_child_content('name') msg_fmt = {'vserver': vserver, 'junction': junction} raise exception.NotFound(_("No volume on cluster with vserver " "%(vserver)s and junction path " "%(junction)s ") % msg_fmt) def clone_file(self, flex_vol, src_path, dest_path, vserver, dest_exists=False): """Clones file on vserver.""" LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, " "dest %(dest_path)s, vserver %(vserver)s", {'volume': flex_vol, 'src_path': src_path, 'dest_path': dest_path, 'vserver': vserver}) clone_create = netapp_api.NaElement.create_node_with_children( 'clone-create', **{'volume': flex_vol, 'source-path': src_path, 'destination-path': dest_path}) major, minor = self.connection.get_api_version() if major == 1 and minor >= 20 and dest_exists: clone_create.add_new_child('destination-exists', 'true') self._invoke_vserver_api(clone_create, vserver) def get_file_usage(self, path, vserver): """Gets the file unique bytes.""" LOG.debug('Getting file usage for %s', path) file_use = netapp_api.NaElement.create_node_with_children( 'file-usage-get', **{'path': path}) res = self._invoke_vserver_api(file_use, vserver) unique_bytes = res.get_child_content('unique-bytes') LOG.debug('file-usage for path %(path)s is %(bytes)s', {'path': path, 'bytes': unique_bytes}) return unique_bytes def get_vserver_ips(self, vserver): """Get ips for the vserver.""" result = netapp_api.invoke_api( self.connection, api_name='net-interface-get-iter', is_iter=True, tunnel=vserver) if_list = [] for res in result: records = res.get_child_content('num-records') if records > 0: attr_list = res['attributes-list'] ifs = attr_list.get_children() if_list.extend(ifs) return if_list def check_apis_on_cluster(self, api_list=None): """Checks API availability and permissions on cluster. Checks API availability and permissions for executing user. Returns a list of failed apis. """ api_list = api_list or [] failed_apis = [] if api_list: api_version = self.connection.get_api_version() if api_version: major, minor = api_version if major == 1 and minor < 20: for api_name in api_list: na_el = netapp_api.NaElement(api_name) try: self.connection.invoke_successfully(na_el) except Exception as e: if isinstance(e, netapp_api.NaApiError): if (e.code == netapp_api.NaErrors ['API_NOT_FOUND'].code or e.code == netapp_api.NaErrors ['INSUFFICIENT_PRIVS'].code): failed_apis.append(api_name) elif major == 1 and minor >= 20: failed_apis = copy.copy(api_list) result = netapp_api.invoke_api( self.connection, api_name='system-user-capability-get-iter', api_family='cm', additional_elems=None, is_iter=True) for res in result: attr_list = res.get_child_by_name('attributes-list') if attr_list: capabilities = attr_list.get_children() for capability in capabilities: op_list = capability.get_child_by_name( 'operation-list') if op_list: ops = op_list.get_children() for op in ops: apis = op.get_child_content( 'api-name') if apis: api_list = apis.split(',') for api_name in api_list: if (api_name and api_name.strip() in failed_apis): failed_apis.remove( api_name) else: continue else: msg = _("Unsupported Clustered Data ONTAP version.") raise exception.VolumeBackendAPIException(data=msg) else: msg = _("Data ONTAP API version could not be determined.") raise exception.VolumeBackendAPIException(data=msg) return failed_apis def get_operational_network_interface_addresses(self): """Gets the IP addresses of operational LIFs on the vserver.""" api_args = { 'query': { 'net-interface-info': { 'operational-status': 'up' } }, 'desired-attributes': { 'net-interface-info': { 'address': None, } } } result = self.send_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [lif_info.get_child_content('address') for lif_info in lif_info_list.get_children()] def get_flexvol_capacity(self, flexvol_path): """Gets total capacity and free capacity, in bytes, of the flexvol.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': flexvol_path } } }, 'desired-attributes': { 'volume-attributes': { 'volume-space-attributes': { 'size-available': None, 'size-total': None, } } }, } result = self.send_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name('attributes-list') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') size_available = float( volume_space_attributes.get_child_content('size-available')) size_total = float( volume_space_attributes.get_child_content('size-total')) return size_total, size_available @utils.trace_method def delete_file(self, path_to_file): """Delete file at path.""" api_args = { 'path': path_to_file, } # Use fast clone deletion engine if it is supported. if self.features.FAST_CLONE_DELETE: api_args['is-clone-file'] = 'true' self.send_request('file-delete-file', api_args, True) def _get_aggregates(self, aggregate_names=None, desired_attributes=None): query = { 'aggr-attributes': { 'aggregate-name': '|'.join(aggregate_names), } } if aggregate_names else None api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_request('aggr-get-iter', api_args, enable_tunneling=False) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: return None else: raise e if len(aggrs) < 1: return None aggr_ownership_attrs = aggrs[0].get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') return aggr_ownership_attrs.get_child_content('home-name') def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" api_args = { 'objectname': object_name, 'query': { 'instance-info': { 'uuid': node_name + ':*', } } } result = self.send_request('perf-object-instance-list-info-iter', api_args, enable_tunneling=False) uuids = [] instances = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('None') for instance_info in instances.get_children(): uuids.append(instance_info.get_child_content('uuid')) return uuids def get_performance_counters(self, object_name, instance_uuids, counter_names): """Gets or or more cDOT performance counters.""" api_args = { 'objectname': object_name, 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } result = self.send_request('perf-object-get-instances', api_args, enable_tunneling=False) counter_data = [] timestamp = result.get_child_content('timestamp') instances = result.get_child_by_name( 'instances') or netapp_api.NaElement('None') for instance in instances.get_children(): instance_name = instance.get_child_content('name') instance_uuid = instance.get_child_content('uuid') node_name = instance_uuid.split(':')[0] counters = instance.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): counter_name = counter.get_child_content('name') counter_value = counter.get_child_content('value') counter_data.append({ 'instance-name': instance_name, 'instance-uuid': instance_uuid, 'node-name': node_name, 'timestamp': timestamp, counter_name: counter_value, }) return counter_data def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.send_request('snapshot-get-iter', api_args) self._handle_get_snapshot_return_failure(result, snapshot_name) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') snapshot_info_list = attributes_list.get_children() self._handle_snapshot_not_found(result, snapshot_info_list, snapshot_name, volume_name) snapshot_info = snapshot_info_list[0] snapshot = { 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'busy': strutils.bool_from_string( snapshot_info.get_child_content('busy')), } snapshot_owners_list = snapshot_info.get_child_by_name( 'snapshot-owners-list') or netapp_api.NaElement('none') snapshot_owners = set([ snapshot_owner.get_child_content('owner') for snapshot_owner in snapshot_owners_list.get_children()]) snapshot['owners'] = snapshot_owners return snapshot def _handle_get_snapshot_return_failure(self, result, snapshot_name): error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason, } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(msg % msg_args) else: raise exception.VolumeBackendAPIException(data=msg % msg_args) def _handle_snapshot_not_found(self, result, snapshot_info_list, snapshot_name, volume_name): if not self._has_records(result): raise exception.SnapshotNotFound(snapshot_id=snapshot_name) elif len(snapshot_info_list) > 1: msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.VolumeBackendAPIException(data=msg % msg_args) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/__init__.py0000664000567000056710000000000012701406250027171 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/client_base.py0000664000567000056710000004320212701406250027715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket import sys from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils import six from cinder import exception from cinder.i18n import _, _LE, _LW, _LI from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperMetaclass) class Client(object): def __init__(self, **kwargs): host = kwargs['hostname'] username = kwargs['username'] password = kwargs['password'] self.connection = netapp_api.NaServer( host=host, transport_type=kwargs['transport_type'], port=kwargs['port'], username=username, password=password) self.ssh_client = self._init_ssh_client(host, username, password) def _init_ssh_client(self, host, username, password): return netapp_api.SSHUtil( host=host, username=username, password=password) def _init_features(self): """Set up the repository of available Data ONTAP features.""" self.features = na_utils.Features() def get_ontapi_version(self, cached=True): """Gets the supported ontapi version.""" if cached: return self.connection.get_api_version() ontapi_version = netapp_api.NaElement('system-get-ontapi-version') res = self.connection.invoke_successfully(ontapi_version, False) major = res.get_child_content('major-version') minor = res.get_child_content('minor-version') return major, minor def get_connection(self): return self.connection def check_is_naelement(self, elem): """Checks if object is instance of NaElement.""" if not isinstance(elem, netapp_api.NaElement): raise ValueError('Expects NaElement') def send_request(self, api_name, api_args=None, enable_tunneling=True): """Sends request to Ontapi.""" request = netapp_api.NaElement(api_name) if api_args: request.translate_struct(api_args) return self.connection.invoke_successfully(request, enable_tunneling) def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Issues API request for creating LUN on volume.""" path = '/vol/%s/%s' % (volume_name, lun_name) lun_create = netapp_api.NaElement.create_node_with_children( 'lun-create-by-size', **{'path': path, 'size': six.text_type(size), 'ostype': metadata['OsType'], 'space-reservation-enabled': metadata['SpaceReserved']}) if qos_policy_group_name: lun_create.add_new_child('qos-policy-group', qos_policy_group_name) try: self.connection.invoke_successfully(lun_create, True) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error provisioning volume %(lun_name)s on " "%(volume_name)s. Details: %(ex)s"), {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex}) def destroy_lun(self, path, force=True): """Destroys the LUN at the path.""" lun_destroy = netapp_api.NaElement.create_node_with_children( 'lun-destroy', **{'path': path}) if force: lun_destroy.add_new_child('force', 'true') self.connection.invoke_successfully(lun_destroy, True) seg = path.split("/") LOG.debug("Destroyed LUN %s", seg[-1]) def map_lun(self, path, igroup_name, lun_id=None): """Maps LUN to the initiator and returns LUN id assigned.""" lun_map = netapp_api.NaElement.create_node_with_children( 'lun-map', **{'path': path, 'initiator-group': igroup_name}) if lun_id: lun_map.add_new_child('lun-id', lun_id) try: result = self.connection.invoke_successfully(lun_map, True) return result.get_child_content('lun-id-assigned') except netapp_api.NaApiError as e: code = e.code message = e.message LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: ' '%(message)s'), {'code': code, 'message': message}) raise def unmap_lun(self, path, igroup_name): """Unmaps a LUN from given initiator.""" lun_unmap = netapp_api.NaElement.create_node_with_children( 'lun-unmap', **{'path': path, 'initiator-group': igroup_name}) try: self.connection.invoke_successfully(lun_unmap, True) except netapp_api.NaApiError as e: exc_info = sys.exc_info() LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: " "%(message)s"), {'code': e.code, 'message': e.message}) # if the LUN is already unmapped if e.code == '13115' or e.code == '9016': pass else: six.reraise(*exc_info) def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): """Creates igroup with specified args.""" igroup_create = netapp_api.NaElement.create_node_with_children( 'igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': igroup_type, 'os-type': os_type}) self.connection.invoke_successfully(igroup_create, True) def add_igroup_initiator(self, igroup, initiator): """Adds initiators to the specified igroup.""" igroup_add = netapp_api.NaElement.create_node_with_children( 'igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator}) self.connection.invoke_successfully(igroup_add, True) def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" seg = path.split("/") LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1]) lun_resize = netapp_api.NaElement.create_node_with_children( 'lun-resize', **{'path': path, 'size': new_size_bytes}) if force: lun_resize.add_new_child('force', 'true') self.connection.invoke_successfully(lun_resize, True) def get_lun_geometry(self, path): """Gets the LUN geometry.""" geometry = {} lun_geo = netapp_api.NaElement("lun-get-geometry") lun_geo.add_new_child('path', path) try: result = self.connection.invoke_successfully(lun_geo, True) geometry['size'] = result.get_child_content("size") geometry['bytes_per_sector'] =\ result.get_child_content("bytes-per-sector") geometry['sectors_per_track'] =\ result.get_child_content("sectors-per-track") geometry['tracks_per_cylinder'] =\ result.get_child_content("tracks-per-cylinder") geometry['cylinders'] =\ result.get_child_content("cylinders") geometry['max_resize'] =\ result.get_child_content("max-resize-size") except Exception as e: LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"), {'path': path, 'msg': e.message}) return geometry def get_volume_options(self, volume_name): """Get the value for the volume option.""" opts = [] vol_option_list = netapp_api.NaElement("volume-options-list-info") vol_option_list.add_new_child('volume', volume_name) result = self.connection.invoke_successfully(vol_option_list, True) options = result.get_child_by_name("options") if options: opts = options.get_children() return opts def move_lun(self, path, new_path): """Moves the LUN at path to new path.""" seg = path.split("/") new_seg = new_path.split("/") LOG.debug("Moving LUN %(name)s to %(new_name)s.", {'name': seg[-1], 'new_name': new_seg[-1]}) lun_move = netapp_api.NaElement("lun-move") lun_move.add_new_child("path", path) lun_move.add_new_child("new-path", new_path) self.connection.invoke_successfully(lun_move, True) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" raise NotImplementedError() def get_fc_target_wwpns(self): """Gets the FC target details.""" raise NotImplementedError() def get_iscsi_service_details(self): """Returns iscsi iqn.""" raise NotImplementedError() def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" raise NotImplementedError() def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" raise NotImplementedError() def get_lun_list(self): """Gets the list of LUNs on filer.""" raise NotImplementedError() def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" raise NotImplementedError() def _has_luns_mapped_to_initiator(self, initiator): """Checks whether any LUNs are mapped to the given initiator.""" lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info') lun_list_api.add_new_child('initiator', initiator) result = self.connection.invoke_successfully(lun_list_api, True) lun_maps_container = result.get_child_by_name( 'lun-maps') or netapp_api.NaElement('none') return len(lun_maps_container.get_children()) > 0 def has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" for initiator in initiator_list: if self._has_luns_mapped_to_initiator(initiator): return True return False def get_lun_by_args(self, **args): """Retrieves LUNs with specified args.""" raise NotImplementedError() def get_performance_counter_info(self, object_name, counter_name): """Gets info about one or more Data ONTAP performance counters.""" api_args = {'objectname': object_name} result = self.send_request('perf-object-counter-list-info', api_args, enable_tunneling=False) counters = result.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): if counter.get_child_content('name') == counter_name: labels = [] label_list = counter.get_child_by_name( 'labels') or netapp_api.NaElement('None') for label in label_list.get_children(): labels.extend(label.get_content().split(',')) base_counter = counter.get_child_content('base-counter') return { 'name': counter_name, 'labels': labels, 'base-counter': base_counter, } else: raise exception.NotFound(_('Counter %s not found') % counter_name) def provide_ems(self, requester, netapp_backend, app_version, server_type="cluster"): """Provide ems with volume stats for the requester. :param server_type: cluster or 7mode. """ def _create_ems(netapp_backend, app_version, server_type): """Create ems API request.""" ems_log = netapp_api.NaElement('ems-autosupport-log') host = socket.getfqdn() or 'Cinder_node' if server_type == "cluster": dest = "cluster node" else: dest = "7 mode controller" ems_log.add_new_child('computer-name', host) ems_log.add_new_child('event-id', '0') ems_log.add_new_child('event-source', 'Cinder driver %s' % netapp_backend) ems_log.add_new_child('app-version', app_version) ems_log.add_new_child('category', 'provisioning') ems_log.add_new_child('event-description', 'OpenStack Cinder connected to %s' % dest) ems_log.add_new_child('log-level', '6') ems_log.add_new_child('auto-support', 'false') return ems_log def _create_vs_get(): """Create vs_get API request.""" vs_get = netapp_api.NaElement('vserver-get-iter') vs_get.add_new_child('max-records', '1') query = netapp_api.NaElement('query') query.add_node_with_children('vserver-info', **{'vserver-type': 'node'}) vs_get.add_child_elem(query) desired = netapp_api.NaElement('desired-attributes') desired.add_node_with_children( 'vserver-info', **{'vserver-name': '', 'vserver-type': ''}) vs_get.add_child_elem(desired) return vs_get def _get_cluster_node(na_server): """Get the cluster node for ems.""" na_server.set_vserver(None) vs_get = _create_vs_get() res = na_server.invoke_successfully(vs_get) if (res.get_child_content('num-records') and int(res.get_child_content('num-records')) > 0): attr_list = res.get_child_by_name('attributes-list') vs_info = attr_list.get_child_by_name('vserver-info') vs_name = vs_info.get_child_content('vserver-name') return vs_name return None do_ems = True if hasattr(requester, 'last_ems'): sec_limit = 3559 if not (timeutils.is_older_than(requester.last_ems, sec_limit)): do_ems = False if do_ems: na_server = copy.copy(self.connection) na_server.set_timeout(25) ems = _create_ems(netapp_backend, app_version, server_type) try: if server_type == "cluster": api_version = na_server.get_api_version() if api_version: major, minor = api_version else: raise netapp_api.NaApiError( code='Not found', message='No API version found') if major == 1 and minor > 15: node = getattr(requester, 'vserver', None) else: node = _get_cluster_node(na_server) if node is None: raise netapp_api.NaApiError( code='Not found', message='No vserver found') na_server.set_vserver(node) else: na_server.set_vfiler(None) na_server.invoke_successfully(ems, True) LOG.debug("ems executed successfully.") except netapp_api.NaApiError as e: LOG.warning(_LW("Failed to invoke ems. Message : %s"), e) finally: requester.last_ems = timeutils.utcnow() def delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot.""" api_args = {'volume': volume_name, 'snapshot': snapshot_name} self.send_request('snapshot-delete', api_args) def create_cg_snapshot(self, volume_names, snapshot_name): """Creates a consistency group snapshot out of one or more flexvols. ONTAP requires an invocation of cg-start to first fence off the flexvols to be included in the snapshot. If cg-start returns success, a cg-commit must be executed to finalized the snapshot and unfence the flexvols. """ cg_id = self._start_cg_snapshot(volume_names, snapshot_name) if not cg_id: msg = _('Could not start consistency group snapshot %s.') raise exception.VolumeBackendAPIException(data=msg % snapshot_name) self._commit_cg_snapshot(cg_id) def _start_cg_snapshot(self, volume_names, snapshot_name): snapshot_init = { 'snapshot': snapshot_name, 'timeout': 'relaxed', 'volumes': [ {'volume-name': volume_name} for volume_name in volume_names ], } result = self.send_request('cg-start', snapshot_init) return result.get_child_content('cg-id') def _commit_cg_snapshot(self, cg_id): snapshot_commit = {'cg-id': cg_id} self.send_request('cg-commit', snapshot_commit) cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py0000664000567000056710000005750712701406250030033 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import math import time from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LW from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from oslo_utils import strutils LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperMetaclass) class Client(client_base.Client): def __init__(self, volume_list=None, **kwargs): super(Client, self).__init__(**kwargs) vfiler = kwargs.get('vfiler', None) self.connection.set_vfiler(vfiler) (major, minor) = self.get_ontapi_version(cached=False) self.connection.set_api_version(major, minor) self.volume_list = volume_list self._init_features() def _init_features(self): super(Client, self)._init_features() ontapi_version = self.get_ontapi_version() # major, minor ontapi_1_20 = ontapi_version >= (1, 20) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20) def _invoke_vfiler_api(self, na_element, vfiler): server = copy.copy(self.connection) server.set_vfiler(vfiler) result = server.invoke_successfully(na_element, True) return result def _invoke_7mode_iterator_getter(self, start_api_name, next_api_name, end_api_name, record_container_tag_name, maximum=100): """Invoke a 7-mode iterator-style getter API.""" data = [] start_api = netapp_api.NaElement(start_api_name) start_result = self.connection.invoke_successfully(start_api) tag = start_result.get_child_content('tag') if not tag: return data try: while True: next_api = netapp_api.NaElement(next_api_name) next_api.add_new_child('tag', tag) next_api.add_new_child('maximum', six.text_type(maximum)) next_result = self.connection.invoke_successfully(next_api) records = next_result.get_child_content('records') or 0 if int(records) == 0: break record_container = next_result.get_child_by_name( record_container_tag_name) or netapp_api.NaElement('none') data.extend(record_container.get_children()) finally: end_api = netapp_api.NaElement(end_api_name) end_api.add_new_child('tag', tag) self.connection.invoke_successfully(end_api) return data def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info') result = self.connection.invoke_successfully(iscsi_if_iter, True) tgt_list = [] portal_list_entries = result.get_child_by_name( 'iscsi-portal-list-entries') if portal_list_entries: portal_list = portal_list_entries.get_children() for iscsi_if in portal_list: d = dict() d['address'] = iscsi_if.get_child_content('ip-address') d['port'] = iscsi_if.get_child_content('ip-port') d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') tgt_list.append(d) return tgt_list def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" initiator_exists = True try: auth_list = netapp_api.NaElement('iscsi-initiator-auth-list-info') auth_list.add_new_child('initiator', iqn) self.connection.invoke_successfully(auth_list, True) except netapp_api.NaApiError: initiator_exists = False return initiator_exists def get_fc_target_wwpns(self): """Gets the FC target details.""" wwpns = [] port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info') result = self.connection.invoke_successfully(port_name_list_api) port_names = result.get_child_by_name('fcp-port-names') if port_names: for port_name_info in port_names.get_children(): wwpn = port_name_info.get_child_content('port-name').lower() wwpns.append(wwpn) return wwpns def get_iscsi_service_details(self): """Returns iscsi iqn.""" iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name') result = self.connection.invoke_successfully(iscsi_service_iter, True) return result.get_child_content('node-name') def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" command = ("iscsi security add -i %(iqn)s -s CHAP " "-p %(password)s -n %(username)s") % { 'iqn': iqn, 'password': password, 'username': username, } LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn}) try: ssh_pool = self.ssh_client.ssh_pool with ssh_pool.item() as ssh: self.ssh_client.execute_command(ssh, command) except Exception as e: msg = _('Failed to set CHAP authentication for target IQN ' '%(iqn)s. Details: %(ex)s') % { 'iqn': iqn, 'ex': e, } LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_lun_list(self): """Gets the list of LUNs on filer.""" lun_list = [] if self.volume_list: for vol in self.volume_list: try: luns = self._get_vol_luns(vol) if luns: lun_list.extend(luns) except netapp_api.NaApiError: LOG.warning(_LW("Error finding LUNs for volume %s." " Verify volume exists."), vol) else: luns = self._get_vol_luns(None) lun_list.extend(luns) return lun_list def _get_vol_luns(self, vol_name): """Gets the LUNs for a volume.""" api = netapp_api.NaElement('lun-list-info') if vol_name: api.add_new_child('volume-name', vol_name) result = self.connection.invoke_successfully(api, True) luns = result.get_child_by_name('luns') return luns.get_children() def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" igroup_list = [] if not initiator_list: return igroup_list initiator_set = set(initiator_list) igroup_list_info = netapp_api.NaElement('igroup-list-info') result = self.connection.invoke_successfully(igroup_list_info, True) initiator_groups = result.get_child_by_name( 'initiator-groups') or netapp_api.NaElement('none') for initiator_group_info in initiator_groups.get_children(): initiator_set_for_igroup = set() initiators = initiator_group_info.get_child_by_name( 'initiators') or netapp_api.NaElement('none') for initiator_info in initiators.get_children(): initiator_set_for_igroup.add( initiator_info.get_child_content('initiator-name')) if initiator_set == initiator_set_for_igroup: igroup = {'initiator-group-os-type': initiator_group_info.get_child_content( 'initiator-group-os-type'), 'initiator-group-type': initiator_group_info.get_child_content( 'initiator-group-type'), 'initiator-group-name': initiator_group_info.get_child_content( 'initiator-group-name')} igroup_list.append(igroup) return igroup_list def clone_lun(self, path, clone_path, name, new_name, space_reserved='true', src_block=0, dest_block=0, block_count=0, source_snapshot=None): # zAPI can only handle 2^24 blocks per range bc_limit = 2 ** 24 # 8GB # zAPI can only handle 32 block ranges per call br_limit = 32 z_limit = br_limit * bc_limit # 256 GB z_calls = int(math.ceil(block_count / float(z_limit))) zbc = block_count if z_calls == 0: z_calls = 1 for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit else: block_count = zbc zapi_args = { 'source-path': path, 'destination-path': clone_path, 'no-snap': 'true', } if source_snapshot: zapi_args['snapshot-name'] = source_snapshot clone_start = netapp_api.NaElement.create_node_with_children( 'clone-start', **zapi_args) if block_count > 0: block_ranges = netapp_api.NaElement("block-ranges") # zAPI can only handle 2^24 block ranges bc_limit = 2 ** 24 # 8GB segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit else: block_count = bc block_range =\ netapp_api.NaElement.create_node_with_children( 'block-range', **{'source-block-number': six.text_type(src_block), 'destination-block-number': six.text_type(dest_block), 'block-count': six.text_type(block_count)}) block_ranges.add_child_elem(block_range) src_block += int(block_count) dest_block += int(block_count) clone_start.add_child_elem(block_ranges) result = self.connection.invoke_successfully(clone_start, True) clone_id_el = result.get_child_by_name('clone-id') cl_id_info = clone_id_el.get_child_by_name('clone-id-info') vol_uuid = cl_id_info.get_child_content('volume-uuid') clone_id = cl_id_info.get_child_content('clone-op-id') if vol_uuid: self._check_clone_status(clone_id, vol_uuid, name, new_name) def _check_clone_status(self, clone_id, vol_uuid, name, new_name): """Checks for the job till completed.""" clone_status = netapp_api.NaElement('clone-list-status') cl_id = netapp_api.NaElement('clone-id') clone_status.add_child_elem(cl_id) cl_id.add_node_with_children('clone-id-info', **{'clone-op-id': clone_id, 'volume-uuid': vol_uuid}) running = True clone_ops_info = None while running: result = self.connection.invoke_successfully(clone_status, True) status = result.get_child_by_name('status') ops_info = status.get_children() if ops_info: for info in ops_info: if info.get_child_content('clone-state') == 'running': time.sleep(1) break else: running = False clone_ops_info = info break else: if clone_ops_info: fmt = {'name': name, 'new_name': new_name} if clone_ops_info.get_child_content('clone-state')\ == 'completed': LOG.debug("Clone operation with src %(name)s" " and dest %(new_name)s completed", fmt) else: LOG.debug("Clone operation with src %(name)s" " and dest %(new_name)s failed", fmt) raise netapp_api.NaApiError( clone_ops_info.get_child_content('error'), clone_ops_info.get_child_content('reason')) def get_lun_by_args(self, **args): """Retrieves LUNs with specified args.""" lun_info = netapp_api.NaElement.create_node_with_children( 'lun-list-info', **args) result = self.connection.invoke_successfully(lun_info, True) luns = result.get_child_by_name('luns') return luns.get_children() def get_filer_volumes(self, volume=None): """Returns list of filer volumes in API format.""" vol_request = netapp_api.NaElement('volume-list-info') res = self.connection.invoke_successfully(vol_request, True) volumes = res.get_child_by_name('volumes') if volumes: return volumes.get_children() return [] def get_lun_map(self, path): lun_map_list = netapp_api.NaElement.create_node_with_children( 'lun-map-list-info', **{'path': path}) return self.connection.invoke_successfully(lun_map_list, True) def set_space_reserve(self, path, enable): """Sets the space reserve info.""" space_res = netapp_api.NaElement.create_node_with_children( 'lun-set-space-reservation-info', **{'path': path, 'enable': enable}) self.connection.invoke_successfully(space_res, True) def get_actual_path_for_export(self, export_path): """Gets the actual path on the filer for export path.""" storage_path = netapp_api.NaElement.create_node_with_children( 'nfs-exportfs-storage-path', **{'pathname': export_path}) result = self.connection.invoke_successfully(storage_path, enable_tunneling=True) if result.get_child_content('actual-pathname'): return result.get_child_content('actual-pathname') raise exception.NotFound(_('No storage path found for export path %s') % (export_path)) def clone_file(self, src_path, dest_path): LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s", {'src_path': src_path, 'dest_path': dest_path}) clone_start = netapp_api.NaElement.create_node_with_children( 'clone-start', **{'source-path': src_path, 'destination-path': dest_path, 'no-snap': 'true'}) result = self.connection.invoke_successfully(clone_start, enable_tunneling=True) clone_id_el = result.get_child_by_name('clone-id') cl_id_info = clone_id_el.get_child_by_name('clone-id-info') vol_uuid = cl_id_info.get_child_content('volume-uuid') clone_id = cl_id_info.get_child_content('clone-op-id') if vol_uuid: try: self._wait_for_clone_finish(clone_id, vol_uuid) except netapp_api.NaApiError as e: if e.code != 'UnknownCloneId': self._clear_clone(clone_id) raise def _wait_for_clone_finish(self, clone_op_id, vol_uuid): """Waits till a clone operation is complete or errored out.""" clone_ls_st = netapp_api.NaElement('clone-list-status') clone_id = netapp_api.NaElement('clone-id') clone_ls_st.add_child_elem(clone_id) clone_id.add_node_with_children('clone-id-info', **{'clone-op-id': clone_op_id, 'volume-uuid': vol_uuid}) task_running = True while task_running: result = self.connection.invoke_successfully(clone_ls_st, enable_tunneling=True) status = result.get_child_by_name('status') ops_info = status.get_children() if ops_info: state = ops_info[0].get_child_content('clone-state') if state == 'completed': task_running = False elif state == 'failed': code = ops_info[0].get_child_content('error') reason = ops_info[0].get_child_content('reason') raise netapp_api.NaApiError(code, reason) else: time.sleep(1) else: raise netapp_api.NaApiError( 'UnknownCloneId', 'No clone operation for clone id %s found on the filer' % (clone_id)) def _clear_clone(self, clone_id): """Clear the clone information. Invoke this in case of failed clone. """ clone_clear = netapp_api.NaElement.create_node_with_children( 'clone-clear', **{'clone-id': clone_id}) retry = 3 while retry: try: self.connection.invoke_successfully(clone_clear, enable_tunneling=True) break except netapp_api.NaApiError: # Filer might be rebooting time.sleep(5) retry = retry - 1 def get_file_usage(self, path): """Gets the file unique bytes.""" LOG.debug('Getting file usage for %s', path) file_use = netapp_api.NaElement.create_node_with_children( 'file-usage-get', **{'path': path}) res = self.connection.invoke_successfully(file_use) bytes = res.get_child_content('unique-bytes') LOG.debug('file-usage for path %(path)s is %(bytes)s', {'path': path, 'bytes': bytes}) return bytes def get_ifconfig(self): ifconfig = netapp_api.NaElement('net-ifconfig-get') return self.connection.invoke_successfully(ifconfig) def get_flexvol_capacity(self, flexvol_path): """Gets total capacity and free capacity, in bytes, of the flexvol.""" api_args = {'volume': flexvol_path, 'verbose': 'false'} result = self.send_request('volume-list-info', api_args) flexvol_info_list = result.get_child_by_name('volumes') flexvol_info = flexvol_info_list.get_children()[0] total_bytes = float( flexvol_info.get_child_content('size-total')) available_bytes = float( flexvol_info.get_child_content('size-available')) return total_bytes, available_bytes def get_performance_instance_names(self, object_name): """Get names of performance instances for a node.""" api_args = {'objectname': object_name} result = self.send_request('perf-object-instance-list-info', api_args, enable_tunneling=False) instance_names = [] instances = result.get_child_by_name( 'instances') or netapp_api.NaElement('None') for instance_info in instances.get_children(): instance_names.append(instance_info.get_child_content('name')) return instance_names def get_performance_counters(self, object_name, instance_names, counter_names): """Gets or or more 7-mode Data ONTAP performance counters.""" api_args = { 'objectname': object_name, 'instances': [ {'instance': instance} for instance in instance_names ], 'counters': [ {'counter': counter} for counter in counter_names ], } result = self.send_request('perf-object-get-instances', api_args, enable_tunneling=False) counter_data = [] timestamp = result.get_child_content('timestamp') instances = result.get_child_by_name( 'instances') or netapp_api.NaElement('None') for instance in instances.get_children(): instance_name = instance.get_child_content('name') counters = instance.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): counter_name = counter.get_child_content('name') counter_value = counter.get_child_content('value') counter_data.append({ 'instance-name': instance_name, 'timestamp': timestamp, counter_name: counter_value, }) return counter_data def get_system_name(self): """Get the name of the 7-mode Data ONTAP controller.""" result = self.send_request('system-get-info', {}, enable_tunneling=False) system_info = result.get_child_by_name('system-info') system_name = system_info.get_child_content('system-name') return system_name def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" snapshot_list_info = netapp_api.NaElement('snapshot-list-info') snapshot_list_info.add_new_child('volume', volume_name) result = self.connection.invoke_successfully(snapshot_list_info, enable_tunneling=True) snapshots = result.get_child_by_name('snapshots') if not snapshots: msg = _('No snapshots could be found on volume %s.') raise exception.VolumeBackendAPIException(data=msg % volume_name) snapshot_list = snapshots.get_children() snapshot = None for s in snapshot_list: if (snapshot_name == s.get_child_content('name')) and (snapshot is None): snapshot = { 'name': s.get_child_content('name'), 'volume': s.get_child_content('volume'), 'busy': strutils.bool_from_string( s.get_child_content('busy')), } snapshot_owners_list = s.get_child_by_name( 'snapshot-owners-list') or netapp_api.NaElement('none') snapshot_owners = set([snapshot_owner.get_child_content( 'owner') for snapshot_owner in snapshot_owners_list.get_children()]) snapshot['owners'] = snapshot_owners elif (snapshot_name == s.get_child_content('name')) and ( snapshot is not None): msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.VolumeBackendAPIException(data=msg % msg_args) if not snapshot: raise exception.SnapshotNotFound(snapshot_id=snapshot_name) return snapshot cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/client/api.py0000664000567000056710000006306512701406250026227 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Glenn Gobeli. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp API for Data ONTAP and OnCommand DFM. Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. """ import copy from eventlet import greenthread from eventlet import semaphore from lxml import etree from oslo_log import log as logging import random import six from six.moves import urllib from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils LOG = logging.getLogger(__name__) EAPINOTFOUND = '13005' ESIS_CLONE_NOT_LICENSED = '14956' ESNAPSHOTNOTALLOWED = '13023' class NaServer(object): """Encapsulates server connection logic.""" TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' SERVER_TYPE_FILER = 'filer' SERVER_TYPE_DFM = 'dfm' URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' URL_DFM = 'apis/XMLrequest' NETAPP_NS = 'http://www.netapp.com/filer/admin' STYLE_LOGIN_PASSWORD = 'basic_auth' STYLE_CERTIFICATE = 'certificate_auth' def __init__(self, host, server_type=SERVER_TYPE_FILER, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, username=None, password=None, port=None): self._host = host self.set_server_type(server_type) self.set_transport_type(transport_type) self.set_style(style) if port: self.set_port(port) self._username = username self._password = password self._refresh_conn = True LOG.debug('Using NetApp controller: %s', self._host) def get_transport_type(self): """Get the transport type protocol.""" return self._protocol def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if not transport_type: raise ValueError('No transport type specified') if transport_type.lower() not in ( NaServer.TRANSPORT_TYPE_HTTP, NaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(80) else: self.set_port(8088) else: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(443) else: self.set_port(8488) self._refresh_conn = True def get_style(self): """Get the authorization style for communicating with the server.""" return self._auth_style def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, NaServer.STYLE_CERTIFICATE): raise ValueError('Unsupported authentication style') self._auth_style = style.lower() def get_server_type(self): """Get the target server type.""" return self._server_type def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, NaServer.SERVER_TYPE_DFM): raise ValueError('Unsupported server type') self._server_type = server_type.lower() if self._server_type == NaServer.SERVER_TYPE_FILER: self._url = NaServer.URL_FILER else: self._url = NaServer.URL_DFM self._ns = NaServer.NETAPP_NS self._refresh_conn = True def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = six.text_type(major) + "." + \ six.text_type(minor) except ValueError: raise ValueError('Major and minor versions must be integers') self._refresh_conn = True def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def set_port(self, port): """Set the server communication port.""" try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = six.text_type(port) self._refresh_conn = True def get_port(self): """Get the server communication port.""" return self._port def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def get_timeout(self): """Gets the timeout in seconds if set.""" if hasattr(self, '_timeout'): return self._timeout return None def get_vfiler(self): """Get the vfiler to use in tunneling.""" return self._vfiler def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self._vfiler = vfiler def get_vserver(self): """Get the vserver to use in tunneling.""" return self._vserver def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver def set_username(self, username): """Set the user name for authentication.""" self._username = username self._refresh_conn = True def set_password(self, password): """Set the password for authentication.""" self._password = password self._refresh_conn = True @utils.trace_api def send_http_request(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if not na_element or not isinstance(na_element, NaElement): raise ValueError('NaElement must be supplied to invoke API') request, request_element = self._create_request(na_element, enable_tunneling) if not hasattr(self, '_opener') or not self._opener \ or self._refresh_conn: self._build_opener() try: if hasattr(self, '_timeout'): response = self._opener.open(request, timeout=self._timeout) else: response = self._opener.open(request) except urllib.error.HTTPError as e: raise NaApiError(e.code, e.msg) except Exception: raise NaApiError('Unexpected error') response_xml = response.read() response_element = self._get_result(response_xml) return response_element def invoke_successfully(self, na_element, enable_tunneling=False): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ result = self.send_http_request(na_element, enable_tunneling) if result.has_attr('status') and result.get_attr('status') == 'passed': return result code = result.get_attr('errno')\ or result.get_child_content('errorno')\ or 'ESTATUSFAILED' if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = result.get_attr('reason')\ or result.get_child_content('reason')\ or 'Execution status is failed due to unknown reason' raise NaApiError(code, msg) def _create_request(self, na_element, enable_tunneling=False): """Creates request in the desired format.""" netapp_elem = NaElement('netapp') netapp_elem.add_attr('xmlns', self._ns) if hasattr(self, '_api_version'): netapp_elem.add_attr('version', self._api_version) if enable_tunneling: self._enable_tunnel_request(netapp_elem) netapp_elem.add_child_elem(na_element) request_d = netapp_elem.to_string() request = urllib.request.Request( self._get_url(), data=request_d, headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) return request, netapp_elem def _enable_tunnel_request(self, netapp_elem): """Enables vserver or vfiler tunneling.""" if hasattr(self, '_vfiler') and self._vfiler: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 7: netapp_elem.add_attr('vfiler', self._vfiler) else: raise ValueError('ontapi version has to be atleast 1.7' ' to send request to vfiler') if hasattr(self, '_vserver') and self._vserver: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 15: netapp_elem.add_attr('vfiler', self._vserver) else: raise ValueError('ontapi version has to be atleast 1.15' ' to send request to vserver') def _parse_response(self, response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml) def _get_result(self, response): """Gets the call result.""" processed_response = self._parse_response(response) return processed_response.get_child_by_name('results') def _get_url(self): return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, self._url) def _build_opener(self): if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: auth_handler = self._create_basic_auth_handler() else: auth_handler = self._create_certificate_auth_handler() opener = urllib.request.build_opener(auth_handler) self._opener = opener def _create_basic_auth_handler(self): password_man = urllib.request.HTTPPasswordMgrWithDefaultRealm() password_man.add_password(None, self._get_url(), self._username, self._password) auth_handler = urllib.request.HTTPBasicAuthHandler(password_man) return auth_handler def _create_certificate_auth_handler(self): raise NotImplementedError() def __str__(self): return "server: %s" % self._host class NaElement(object): """Class wraps basic building block for NetApp API request.""" def __init__(self, name): """Name of the element or etree.Element.""" if isinstance(name, etree._Element): self._element = name else: self._element = etree.Element(name) def get_name(self): """Returns the tag name of the element.""" return self._element.tag def set_content(self, text): """Set the text string for the element.""" self._element.text = text def get_content(self): """Get the text for the element.""" return self._element.text def add_attr(self, name, value): """Add the attribute to the element.""" self._element.set(name, value) def add_attrs(self, **attrs): """Add multiple attributes to the element.""" for attr in attrs.keys(): self._element.set(attr, attrs.get(attr)) def add_child_elem(self, na_element): """Add the child element to the element.""" if isinstance(na_element, NaElement): self._element.append(na_element._element) return raise def get_child_by_name(self, name): """Get the child element by the tag name.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return NaElement(child) return None def get_child_content(self, name): """Get the content of the child.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None def get_children(self): """Get the children for the element.""" return [NaElement(el) for el in self._element.iterchildren()] def has_attr(self, name): """Checks whether element has attribute.""" attributes = self._element.attrib or {} return name in attributes.keys() def get_attr(self, name): """Get the attribute with the given name.""" attributes = self._element.attrib or {} return attributes.get(name) def get_attr_names(self): """Returns the list of attribute names.""" attributes = self._element.attrib or {} return list(attributes.keys()) def add_new_child(self, name, content, convert=False): """Add child with tag name and content. Convert replaces entity refs to chars. """ child = NaElement(name) if convert: content = NaElement._convert_entity_refs(content) child.set_content(content) self.add_child_elem(child) @staticmethod def _convert_entity_refs(text): """Converts entity refs to chars to handle etree auto conversions.""" text = text.replace("<", "<") text = text.replace(">", ">") return text @staticmethod def create_node_with_children(node, **children): """Creates and returns named node with children.""" parent = NaElement(node) for child in children.keys(): parent.add_new_child(child, children.get(child, None)) return parent def add_node_with_children(self, node, **children): """Creates named node with children.""" parent = NaElement.create_node_with_children(node, **children) self.add_child_elem(parent) def to_string(self, pretty=False, method='xml', encoding='UTF-8'): """Prints the element to string.""" return etree.tostring(self._element, method=method, encoding=encoding, pretty_print=pretty) def __str__(self): xml = self.to_string(pretty=True) if six.PY3: xml = xml.decode('utf-8') return xml def __eq__(self, other): return str(self) == str(other) def __hash__(self): return hash(str(self)) def __repr__(self): return str(self) def __getitem__(self, key): """Dict getter method for NaElement. Returns NaElement list if present, text value in case no NaElement node children or attribute value if present. """ child = self.get_child_by_name(key) if child: if child.get_children(): return child else: return child.get_content() elif self.has_attr(key): return self.get_attr(key) raise KeyError(_('No element by given name %s.') % (key)) def __setitem__(self, key, value): """Dict setter method for NaElement. Accepts dict, list, tuple, str, int, float and long as valid value. """ if key: if value: if isinstance(value, NaElement): child = NaElement(key) child.add_child_elem(value) self.add_child_elem(child) elif isinstance(value, six.integer_types + (str, float)): self.add_new_child(key, six.text_type(value)) elif isinstance(value, (list, tuple, dict)): child = NaElement(key) child.translate_struct(value) self.add_child_elem(child) else: raise TypeError(_('Not a valid value for NaElement.')) else: self.add_child_elem(NaElement(key)) else: raise KeyError(_('NaElement name cannot be null.')) def translate_struct(self, data_struct): """Convert list, tuple, dict to NaElement and appends. Example usage: 1. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', 'elem3': 'vl3'}) 2. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, {'elem1': 'vl3'}]) """ if isinstance(data_struct, (list, tuple)): for el in data_struct: if isinstance(el, (list, tuple, dict)): self.translate_struct(el) else: self.add_child_elem(NaElement(el)) elif isinstance(data_struct, dict): for k in data_struct.keys(): child = NaElement(k) if isinstance(data_struct[k], (dict, list, tuple)): child.translate_struct(data_struct[k]) else: if data_struct[k]: child.set_content(six.text_type(data_struct[k])) self.add_child_elem(child) else: raise ValueError(_('Type cannot be converted into NaElement.')) class NaApiError(Exception): """Base exception class for NetApp API errors.""" def __init__(self, code='unknown', message='unknown'): self.code = code self.message = message def __str__(self, *args, **kwargs): return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) NaErrors = {'API_NOT_FOUND': NaApiError('13005', 'Unable to find API'), 'INSUFFICIENT_PRIVS': NaApiError('13003', 'Insufficient privileges')} def invoke_api(na_server, api_name, api_family='cm', query=None, des_result=None, additional_elems=None, is_iter=False, records=0, tag=None, timeout=0, tunnel=None): """Invokes any given API call to a NetApp server. :param na_server: na_server instance :param api_name: API name string :param api_family: cm or 7m :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param records: limit for records, 0 for infinite :param timeout: timeout seconds :param tunnel: tunnel entity, vserver or vfiler name """ record_step = 50 if not (na_server or isinstance(na_server, NaServer)): msg = _("Requires an NaServer instance.") raise exception.InvalidInput(reason=msg) server = copy.copy(na_server) if api_family == 'cm': server.set_vserver(tunnel) else: server.set_vfiler(tunnel) if timeout > 0: server.set_timeout(timeout) iter_records = 0 cond = True while cond: na_element = create_api_request( api_name, query, des_result, additional_elems, is_iter, record_step, tag) result = server.invoke_successfully(na_element, True) if is_iter: if records > 0: iter_records = iter_records + record_step if iter_records >= records: cond = False tag_el = result.get_child_by_name('next-tag') tag = tag_el.get_content() if tag_el else None if not tag: cond = False else: cond = False yield result def create_api_request(api_name, query=None, des_result=None, additional_elems=None, is_iter=False, record_step=50, tag=None): """Creates a NetApp API request. :param api_name: API name string :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param record_step: records at a time for iter API :param tag: next tag for iter API """ api_el = NaElement(api_name) if query: query_el = NaElement('query') query_el.translate_struct(query) api_el.add_child_elem(query_el) if des_result: res_el = NaElement('desired-attributes') res_el.translate_struct(des_result) api_el.add_child_elem(res_el) if additional_elems: api_el.translate_struct(additional_elems) if is_iter: api_el.add_new_child('max-records', six.text_type(record_step)) if tag: api_el.add_new_child('tag', tag, True) return api_el class SSHUtil(object): """Encapsulates connection logic and command execution for SSH client.""" MAX_CONCURRENT_SSH_CONNECTIONS = 5 RECV_TIMEOUT = 3 CONNECTION_KEEP_ALIVE = 600 WAIT_ON_STDOUT_TIMEOUT = 3 def __init__(self, host, username, password, port=22): self.ssh_pool = self._init_ssh_pool(host, port, username, password) # Note(cfouts) Number of SSH connections made to the backend need to be # limited. Use of SSHPool allows connections to be cached and reused # instead of creating a new connection each time a command is executed # via SSH. self.ssh_connect_semaphore = semaphore.Semaphore( self.MAX_CONCURRENT_SSH_CONNECTIONS) def _init_ssh_pool(self, host, port, username, password): return ssh_utils.SSHPool(host, port, self.CONNECTION_KEEP_ALIVE, username, password) def execute_command(self, client, command_text, timeout=RECV_TIMEOUT): LOG.debug("execute_command() - Sending command.") stdin, stdout, stderr = client.exec_command(command_text) stdin.close() self._wait_on_stdout(stdout, timeout) output = stdout.read() LOG.debug("Output of length %(size)d received.", {'size': len(output)}) stdout.close() stderr.close() return output def execute_command_with_prompt(self, client, command, expected_prompt_text, prompt_response, timeout=RECV_TIMEOUT): LOG.debug("execute_command_with_prompt() - Sending command.") stdin, stdout, stderr = client.exec_command(command) self._wait_on_stdout(stdout, timeout) response = stdout.channel.recv(999) if response.strip() != expected_prompt_text: msg = _("Unexpected output. Expected [%(expected)s] but " "received [%(output)s]") % { 'expected': expected_prompt_text, 'output': response.strip(), } LOG.error(msg) stdin.close() stdout.close() stderr.close() raise exception.VolumeBackendAPIException(msg) else: LOG.debug("execute_command_with_prompt() - Sending answer") stdin.write(prompt_response + '\n') stdin.flush() stdin.close() stdout.close() stderr.close() def _wait_on_stdout(self, stdout, timeout=WAIT_ON_STDOUT_TIMEOUT): wait_time = 0.0 # NOTE(cfouts): The server does not always indicate when EOF is reached # for stdout. The timeout exists for this reason and an attempt is made # to read from stdout. while not stdout.channel.exit_status_ready(): # period is 10 - 25 centiseconds period = random.randint(10, 25) / 100.0 greenthread.sleep(period) wait_time += period if wait_time > timeout: LOG.debug("Timeout exceeded while waiting for exit status.") break cinder-8.0.0/cinder/volume/drivers/netapp/dataontap/block_base.py0000664000567000056710000013565512701406250026271 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Chuck Fouts. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp 7/C-mode block storage systems. """ import copy import math import sys import uuid from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import excutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetAppLun(object): """Represents a LUN on NetApp storage.""" def __init__(self, handle, name, size, metadata_dict): self.handle = handle self.name = name self.size = size self.metadata = metadata_dict or {} def get_metadata_property(self, prop): """Get the metadata property of a LUN.""" if prop in self.metadata: return self.metadata[prop] name = self.name LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s", {'prop': prop, 'name': name}) def __str__(self, *args, **kwargs): return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % ( self.handle, self.name, self.size, self.metadata) @six.add_metaclass(utils.TraceWrapperMetaclass) class NetAppBlockStorageLibrary(object): """NetApp block storage library for Data ONTAP.""" # do not increment this as it may be used in volume type definitions VERSION = "1.0.0" REQUIRED_FLAGS = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows', 'windows_2008', 'windows_gpt', 'solaris', 'solaris_efi', 'netware', 'openvms', 'hyper_v'] ALLOWED_IGROUP_HOST_TYPES = ['linux', 'aix', 'hpux', 'windows', 'solaris', 'netware', 'default', 'vmware', 'openvms', 'xen', 'hyper_v'] DEFAULT_LUN_OS = 'linux' DEFAULT_HOST_TYPE = 'linux' DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' def __init__(self, driver_name, driver_protocol, **kwargs): na_utils.validate_instantiation(**kwargs) self.driver_name = driver_name self.driver_protocol = driver_protocol self.zapi_client = None self._stats = {} self.lun_table = {} self.lun_ostype = None self.host_type = None self.lun_space_reservation = 'true' self.lookup_service = fczm_utils.create_lookup_service() self.app_version = kwargs.get("app_version", "unknown") self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values( na_opts.netapp_provisioning_opts) self.configuration.append_config_values(na_opts.netapp_san_opts) self.max_over_subscription_ratio = ( self.configuration.max_over_subscription_ratio) self.reserved_percentage = self._get_reserved_percentage() def _get_reserved_percentage(self): # If the legacy config option if it is set to the default # value, use the more general configuration option. if self.configuration.netapp_size_multiplier == ( na_opts.NETAPP_SIZE_MULTIPLIER_DEFAULT): return self.configuration.reserved_percentage # If the legacy config option has a non-default value, # honor it for one release. Note that the "size multiplier" # actually acted as a divisor in the code and didn't apply # to the file size (as the help message for this option suggest), # but rather to total and free size for the pool. divisor = self.configuration.netapp_size_multiplier reserved_ratio = round(1 - (1 / divisor), 2) reserved_percentage = 100 * int(reserved_ratio) msg = _LW('The "netapp_size_multiplier" configuration option is ' 'deprecated and will be removed in the Mitaka release. ' 'Please set "reserved_percentage = %d" instead.') % ( reserved_percentage) versionutils.report_deprecated_feature(LOG, msg) return reserved_percentage def do_setup(self, context): na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) self.lun_ostype = (self.configuration.netapp_lun_ostype or self.DEFAULT_LUN_OS) self.host_type = (self.configuration.netapp_host_type or self.DEFAULT_HOST_TYPE) if self.configuration.netapp_lun_space_reservation == 'enabled': self.lun_space_reservation = 'true' else: self.lun_space_reservation = 'false' def check_for_setup_error(self): """Check that the driver is working and can communicate. Discovers the LUNs on the NetApp server. """ if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_lun_ostype.") LOG.error(msg) raise exception.NetAppDriverException(msg) if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_host_type.") LOG.error(msg) raise exception.NetAppDriverException(msg) lun_list = self.zapi_client.get_lun_list() self._extract_and_populate_luns(lun_list) LOG.debug("Success getting list of LUNs from server.") def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ name = volume['name'] metadata = self._get_lun_attr(name, 'metadata') or dict() return metadata.get('Volume', None) def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) lun_name = volume['name'] size = int(volume['size']) * units.Gi metadata = {'OsType': self.lun_ostype, 'SpaceReserved': self.lun_space_reservation, 'Path': '/vol/%s/%s' % (pool_name, lun_name)} qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name) except Exception: LOG.exception(_LE("Exception creating LUN %(name)s in pool " "%(pool)s."), {'name': lun_name, 'pool': pool_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % ( volume['name'])) LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', {'name': lun_name, 'qos': qos_policy_group_info}) metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) def _setup_qos_for_volume(self, volume, extra_specs): return None def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): return def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" self._delete_lun(volume['name']) def _delete_lun(self, lun_name): """Helper method to delete LUN backing a volume or snapshot.""" metadata = self._get_lun_attr(lun_name, 'metadata') if metadata: self.zapi_client.destroy_lun(metadata['Path']) self.lun_table.pop(lun_name) else: LOG.warning(_LW("No entry in LUN table for volume/snapshot" " %(name)s."), {'name': lun_name}) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" handle = self._get_lun_attr(volume['name'], 'handle') return {'provider_location': handle} def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" handle = self._get_lun_attr(volume['name'], 'handle') return {'provider_location': handle} def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. Since exporting is idempotent in this driver, we have nothing to do for unexporting. """ pass def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. This driver implements snapshots by using efficient single-file (LUN) cloning. """ vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] lun = self._get_lun_from_table(vol_name) self._clone_lun(lun.name, snapshot_name, space_reserved='false') def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" self._delete_lun(snapshot['name']) LOG.debug("Snapshot %s deletion successful", snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): source = {'name': snapshot['name'], 'size': snapshot['volume_size']} return self._clone_source_to_destination(source, volume) def create_cloned_volume(self, volume, src_vref): src_lun = self._get_lun_from_table(src_vref['name']) source = {'name': src_lun.name, 'size': src_vref['size']} return self._clone_source_to_destination(source, volume) def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] extra_specs = na_utils.get_volume_extra_specs(destination_volume) qos_policy_group_info = self._setup_qos_for_volume( destination_volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._clone_lun(source_name, destination_name, space_reserved=self.lun_space_reservation, qos_policy_group_name=qos_policy_group_name) if destination_size != source_size: try: self._extend_volume(destination_volume, destination_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Resizing %s failed. Cleaning volume."), destination_volume['id']) self.delete_volume(destination_volume) except Exception: LOG.exception(_LE("Exception cloning volume %(name)s from source " "volume %(source)s."), {'name': destination_name, 'source': source_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException( data=msg % destination_name) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" raise NotImplementedError() def _create_lun_handle(self, metadata): """Returns LUN handle based on filer type.""" raise NotImplementedError() def _extract_lun_info(self, lun): """Extracts the LUNs from API and populates the LUN table.""" meta_dict = self._create_lun_meta(lun) path = lun.get_child_content('path') (_rest, _splitter, name) = path.rpartition('/') handle = self._create_lun_handle(meta_dict) size = lun.get_child_content('size') return NetAppLun(handle, name, size, meta_dict) def _extract_and_populate_luns(self, api_luns): """Extracts the LUNs from API and populates the LUN table.""" for lun in api_luns: discovered_lun = self._extract_lun_info(lun) self._add_lun_to_table(discovered_lun) def _map_lun(self, name, initiator_list, initiator_type, lun_id=None): """Maps LUN to the initiator(s) and returns LUN ID assigned.""" metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] igroup_name, ig_host_os, ig_type = self._get_or_create_igroup( initiator_list, initiator_type, self.host_type) if ig_host_os != self.host_type: LOG.warning(_LW("LUN misalignment may occur for current" " initiator group %(ig_nm)s) with host OS type" " %(ig_os)s. Please configure initiator group" " manually according to the type of the" " host OS."), {'ig_nm': igroup_name, 'ig_os': ig_host_os}) try: return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id) except netapp_api.NaApiError: exc_info = sys.exc_info() (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator_list) if lun_id is not None: return lun_id else: six.reraise(*exc_info) def _unmap_lun(self, path, initiator_list): """Unmaps a LUN from given initiator.""" (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator_list) self.zapi_client.unmap_lun(path, igroup_name) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" raise NotImplementedError() def _has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" return self.zapi_client.has_luns_mapped_to_initiators(initiator_list) def _get_or_create_igroup(self, initiator_list, initiator_group_type, host_os_type): """Checks for an igroup for a set of one or more initiators. Creates igroup if not already present with given host os type, igroup type and adds initiators. """ igroups = self.zapi_client.get_igroup_by_initiators(initiator_list) igroup_name = None if igroups: igroup = igroups[0] igroup_name = igroup['initiator-group-name'] host_os_type = igroup['initiator-group-os-type'] initiator_group_type = igroup['initiator-group-type'] if not igroup_name: igroup_name = self._create_igroup_add_initiators( initiator_group_type, host_os_type, initiator_list) return igroup_name, host_os_type, initiator_group_type def _create_igroup_add_initiators(self, initiator_group_type, host_os_type, initiator_list): """Creates igroup and adds initiators.""" igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4()) self.zapi_client.create_igroup(igroup_name, initiator_group_type, host_os_type) for initiator in initiator_list: self.zapi_client.add_igroup_initiator(igroup_name, initiator) return igroup_name def _add_lun_to_table(self, lun): """Adds LUN to cache table.""" if not isinstance(lun, NetAppLun): msg = _("Object is not a NetApp LUN.") raise exception.VolumeBackendAPIException(data=msg) self.lun_table[lun.name] = lun def _get_lun_from_table(self, name): """Gets LUN from cache table. Refreshes cache if LUN not found in cache. """ lun = self.lun_table.get(name) if lun is None: lun_list = self.zapi_client.get_lun_list() self._extract_and_populate_luns(lun_list) lun = self.lun_table.get(name) if lun is None: raise exception.VolumeNotFound(volume_id=name) return lun def _clone_lun(self, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None): """Clone LUN with the given name to the new name.""" raise NotImplementedError() def _get_lun_attr(self, name, attr): """Get the LUN attribute if found else None.""" try: attr = getattr(self._get_lun_from_table(name), attr) return attr except exception.VolumeNotFound as e: LOG.error(_LE("Message: %s"), e.msg) except Exception as e: LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e) return None def _create_lun_meta(self, lun): raise NotImplementedError() def _get_fc_target_wwpns(self, include_partner=True): raise NotImplementedError() def get_volume_stats(self, refresh=False, filter_function=None, goodness_function=None): """Get volume stats. If 'refresh' is True, update the stats first. """ if refresh: self._update_volume_stats(filter_function=filter_function, goodness_function=goodness_function) return self._stats def _update_volume_stats(self, filter_function=None, goodness_function=None): raise NotImplementedError() def get_default_filter_function(self): """Get the default filter_function string.""" return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION def extend_volume(self, volume, new_size): """Driver entry point to increase the size of a volume.""" extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size qos_policy_group_info = self._setup_qos_for_volume(volume_copy, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._extend_volume(volume, new_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): # If anything went wrong, revert QoS settings self._setup_qos_for_volume(volume, extra_specs) def _extend_volume(self, volume, new_size, qos_policy_group_name): """Extend an existing volume to the new size.""" name = volume['name'] lun = self._get_lun_from_table(name) path = lun.metadata['Path'] curr_size_bytes = six.text_type(lun.size) new_size_bytes = six.text_type(int(new_size) * units.Gi) # Reused by clone scenarios. # Hence comparing the stored size. if curr_size_bytes != new_size_bytes: lun_geometry = self.zapi_client.get_lun_geometry(path) if (lun_geometry and lun_geometry.get("max_resize") and int(lun_geometry.get("max_resize")) >= int(new_size_bytes)): self.zapi_client.do_direct_resize(path, new_size_bytes) else: self._do_sub_clone_resize( path, new_size_bytes, qos_policy_group_name=qos_policy_group_name) self.lun_table[name].size = new_size_bytes else: LOG.info(_LI("No need to extend volume %s" " as it is already the requested new size."), name) def _get_vol_option(self, volume_name, option_name): """Get the value for the volume option.""" value = None options = self.zapi_client.get_volume_options(volume_name) for opt in options: if opt.get_child_content('name') == option_name: value = opt.get_child_content('value') break return value def _do_sub_clone_resize(self, lun_path, new_size_bytes, qos_policy_group_name=None): """Resize a LUN beyond its original geometry using sub-LUN cloning. Clones the block ranges, swaps the LUNs, and deletes the source LUN. """ seg = lun_path.split("/") LOG.info(_LI("Resizing LUN %s using clone operation."), seg[-1]) lun_name = seg[-1] vol_name = seg[2] lun = self._get_lun_from_table(lun_name) metadata = lun.metadata compression = self._get_vol_option(vol_name, 'compression') if compression == "on": msg = _('%s cannot be resized using clone operation' ' as it is hosted on compressed volume') raise exception.VolumeBackendAPIException(data=msg % lun_name) block_count = self._get_lun_block_count(lun_path) if block_count == 0: msg = _('%s cannot be resized using clone operation' ' as it contains no blocks.') raise exception.VolumeBackendAPIException(data=msg % lun_name) new_lun_name = 'new-%s' % lun_name self.zapi_client.create_lun( vol_name, new_lun_name, new_size_bytes, metadata, qos_policy_group_name=qos_policy_group_name) try: self._clone_lun(lun_name, new_lun_name, block_count=block_count) self._post_sub_clone_resize(lun_path) except Exception: with excutils.save_and_reraise_exception(): new_lun_path = '/vol/%s/%s' % (vol_name, new_lun_name) self.zapi_client.destroy_lun(new_lun_path) def _post_sub_clone_resize(self, path): """Try post sub clone resize in a transactional manner.""" st_tm_mv, st_nw_mv, st_del_old = None, None, None seg = path.split("/") LOG.info(_LI("Post clone resize LUN %s"), seg[-1]) new_lun = 'new-%s' % (seg[-1]) tmp_lun = 'tmp-%s' % (seg[-1]) tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) new_path = "/vol/%s/%s" % (seg[2], new_lun) try: st_tm_mv = self.zapi_client.move_lun(path, tmp_path) st_nw_mv = self.zapi_client.move_lun(new_path, path) st_del_old = self.zapi_client.destroy_lun(tmp_path) except Exception as e: if st_tm_mv is None: msg = _("Failure staging LUN %s to tmp.") raise exception.VolumeBackendAPIException(data=msg % (seg[-1])) else: if st_nw_mv is None: self.zapi_client.move_lun(tmp_path, path) msg = _("Failure moving new cloned LUN to %s.") raise exception.VolumeBackendAPIException( data=msg % (seg[-1])) elif st_del_old is None: LOG.error(_LE("Failure deleting staged tmp LUN %s."), tmp_lun) else: LOG.error(_LE("Unknown exception in" " post clone resize LUN %s."), seg[-1]) LOG.error(_LE("Exception details: %s"), e) def _get_lun_block_count(self, path): """Gets block counts for the LUN.""" LOG.debug("Getting LUN block count.") lun_infos = self.zapi_client.get_lun_by_args(path=path) if not lun_infos: seg = path.split('/') msg = _('Failure getting LUN info for %s.') raise exception.VolumeBackendAPIException(data=msg % seg[-1]) lun_info = lun_infos[-1] bs = int(lun_info.get_child_content('block-size')) ls = int(lun_info.get_child_content('size')) block_count = ls / bs return block_count def _check_volume_type_for_lun(self, volume, lun, existing_ref, extra_specs): """Checks if lun satifies the volume type.""" raise NotImplementedError() def manage_existing(self, volume, existing_ref): """Brings an existing storage object under Cinder management. existing_ref can contain source-id or source-name or both. source-id: lun uuid. source-name: complete lun path eg. /vol/vol0/lun. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs) qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) path = lun.get_metadata_property('Path') if lun.name == volume['name']: new_path = path LOG.info(_LI("LUN with given ref %s need not be renamed " "during manage operation."), existing_ref) else: (rest, splitter, name) = path.rpartition('/') new_path = '%s/%s' % (rest, volume['name']) self.zapi_client.move_lun(path, new_path) lun = self._get_existing_vol_with_manage_ref( {'source-name': new_path}) if qos_policy_group_name is not None: self.zapi_client.set_lun_qos_policy_group(new_path, qos_policy_group_name) self._add_lun_to_table(lun) LOG.info(_LI("Manage operation completed for LUN with new path" " %(path)s and uuid %(uuid)s."), {'path': lun.get_metadata_property('Path'), 'uuid': lun.get_metadata_property('UUID')}) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) return int(math.ceil(float(lun.size) / units.Gi)) def _get_existing_vol_with_manage_ref(self, existing_ref): """Get the corresponding LUN from the storage server.""" uuid = existing_ref.get('source-id') path = existing_ref.get('source-name') if not (uuid or path): reason = _('Reference must contain either source-id' ' or source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lun_info = {} lun_info.setdefault('path', path if path else None) if hasattr(self, 'vserver') and uuid: lun_info['uuid'] = uuid luns = self.zapi_client.get_lun_by_args(**lun_info) if luns: for lun in luns: netapp_lun = self._extract_lun_info(lun) storage_valid = self._is_lun_valid_on_storage(netapp_lun) uuid_valid = True if uuid: if netapp_lun.get_metadata_property('UUID') == uuid: uuid_valid = True else: uuid_valid = False if storage_valid and uuid_valid: return netapp_lun raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=(_('LUN not found with given ref %s.') % existing_ref)) def _is_lun_valid_on_storage(self, lun): """Validate lun specific to storage system.""" return True def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ managed_lun = self._get_lun_from_table(volume['name']) LOG.info(_LI("Unmanaged LUN with current path %(path)s and uuid " "%(uuid)s."), {'path': managed_lun.get_metadata_property('Path'), 'uuid': managed_lun.get_metadata_property('UUID') or 'unknown'}) def initialize_connection_iscsi(self, volume, connector): """Driver entry point to attach a volume to an instance. Do the LUN masking on the storage system so the initiator can access the LUN on the target. Also return the iSCSI properties so the initiator can find the LUN. This implementation does not call _get_iscsi_properties() to get the properties because cannot store the LUN number in the database. We only find out what the LUN number will be during this method call so we construct the properties dictionary ourselves. """ initiator_name = connector['initiator'] name = volume['name'] lun_id = self._map_lun(name, [initiator_name], 'iscsi', None) LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s", {'name': name, 'initiator_name': initiator_name}) target_list = self.zapi_client.get_iscsi_target_details() if not target_list: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target list for the LUN %s') % name) LOG.debug("Successfully fetched target list for LUN %(name)s and " "initiator %(initiator_name)s", {'name': name, 'initiator_name': initiator_name}) preferred_target = self._get_preferred_target_from_list( target_list) if preferred_target is None: msg = _('Failed to get target portal for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % name) (address, port) = (preferred_target['address'], preferred_target['port']) iqn = self.zapi_client.get_iscsi_service_details() if not iqn: msg = _('Failed to get target IQN for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % name) properties = na_utils.get_iscsi_connection_properties(lun_id, volume, iqn, address, port) if self.configuration.use_chap_auth: chap_username, chap_password = self._configure_chap(initiator_name) self._add_chap_properties(properties, chap_username, chap_password) return properties def _configure_chap(self, initiator_name): password = volume_utils.generate_password(na_utils.CHAP_SECRET_LENGTH) username = na_utils.DEFAULT_CHAP_USER_NAME self.zapi_client.set_iscsi_chap_authentication(initiator_name, username, password) LOG.debug("Set iSCSI CHAP authentication.") return username, password def _add_chap_properties(self, properties, username, password): properties['data']['auth_method'] = 'CHAP' properties['data']['auth_username'] = username properties['data']['auth_password'] = password properties['data']['discovery_auth_method'] = 'CHAP' properties['data']['discovery_auth_username'] = username properties['data']['discovery_auth_password'] = password def _get_preferred_target_from_list(self, target_details_list, filter=None): preferred_target = None for target in target_details_list: if filter and target['address'] not in filter: continue if target.get('interface-enabled', 'true') == 'true': preferred_target = target break if preferred_target is None and len(target_details_list) > 0: preferred_target = target_details_list[0] return preferred_target def terminate_connection_iscsi(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given initiator can no longer access it. """ initiator_name = connector['initiator'] name = volume['name'] metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, [initiator_name]) LOG.debug("Unmapped LUN %(name)s from the initiator " "%(initiator_name)s", {'name': name, 'initiator_name': initiator_name}) def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] volume_name = volume['name'] lun_id = self._map_lun(volume_name, initiators, 'fcp', None) LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) if target_wwpns: LOG.debug("Successfully fetched target details for LUN %(name)s " "and initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) else: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target details for ' 'the LUN %s') % volume_name) target_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map}} return target_info def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] name = volume['name'] metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiators) LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s", {'name': name, 'initiators': initiators}) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if not self._has_luns_mapped_to_initiators(initiators): # No more exports for this host, so tear down zone. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map")) target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) info['data'] = {'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map} return info def _build_initiator_target_map(self, connector): """Build the target_wwns and the initiator target map.""" # get WWPNs from controller and strip colons all_target_wwpns = self._get_fc_target_wwpns() all_target_wwpns = [six.text_type(wwpn).replace(':', '') for wwpn in all_target_wwpns] target_wwpns = [] init_targ_map = {} num_paths = 0 if self.lookup_service is not None: # Use FC SAN lookup to determine which ports are visible. dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwpns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwpns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) for target in init_targ_map[initiator]: num_paths += 1 target_wwpns = list(set(target_wwpns)) else: initiator_wwns = connector['wwpns'] target_wwpns = all_target_wwpns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwpns return target_wwpns, init_targ_map, num_paths def create_consistencygroup(self, group): """Driver entry point for creating a consistency group. ONTAP does not maintain an actual CG construct. As a result, no communication to the backend is necessary for consistency group creation. :return: Hard-coded model update for consistency group model. """ model_update = {'status': 'available'} return model_update def delete_consistencygroup(self, group, volumes): """Driver entry point for deleting a consistency group. :return: Updated consistency group model and list of volume models for the volumes that were deleted. """ model_update = {'status': 'deleted'} volumes_model_update = [] for volume in volumes: try: self._delete_lun(volume['name']) volumes_model_update.append( {'id': volume['id'], 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) LOG.exception(_LE("Volume %(vol)s in the consistency group " "could not be deleted."), {'vol': volume}) return model_update, volumes_model_update def update_consistencygroup(self, group, add_volumes=None, remove_volumes=None): """Driver entry point for updating a consistency group. Since no actual CG construct is ever created in ONTAP, it is not necessary to update any metadata on the backend. Since this is a NO-OP, there is guaranteed to be no change in any of the volumes' statuses. """ return None, None, None def create_cgsnapshot(self, cgsnapshot, snapshots): """Creates a Cinder cgsnapshot object. The Cinder cgsnapshot object is created by making use of an ephemeral ONTAP CG in order to provide write-order consistency for a set of flexvol snapshots. First, a list of the flexvols backing the given Cinder CG must be gathered. An ONTAP cg-snapshot of these flexvols will create a snapshot copy of all the Cinder volumes in the CG group. For each Cinder volume in the CG, it is then necessary to clone its backing LUN from the ONTAP cg-snapshot. The naming convention used for the clones is what indicates the clone's role as a Cinder snapshot and its inclusion in a Cinder CG. The ONTAP CG-snapshot of the flexvols is no longer required after having cloned the LUNs backing the Cinder volumes in the Cinder CG. :return: An implicit update for cgsnapshot and snapshots models that is interpreted by the manager to set their models to available. """ flexvols = set() for snapshot in snapshots: flexvols.add(volume_utils.extract_host(snapshot['volume']['host'], level='pool')) self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id']) for snapshot in snapshots: self._clone_lun(snapshot['volume']['name'], snapshot['name'], source_snapshot=cgsnapshot['id']) for flexvol in flexvols: self._handle_busy_snapshot(flexvol, cgsnapshot['id']) self.zapi_client.delete_snapshot(flexvol, cgsnapshot['id']) return None, None @utils.retry(exception.SnapshotIsBusy) def _handle_busy_snapshot(self, flexvol, snapshot_name): """Checks for and handles a busy snapshot. If a snapshot is not busy, take no action. If a snapshot is busy for reasons other than a clone dependency, raise immediately. Otherwise, since we always start a clone split operation after cloning a share, wait up to a minute for a clone dependency to clear before giving up. """ snapshot = self.zapi_client.get_snapshot(flexvol, snapshot_name) if not snapshot['busy']: LOG.info(_LI("Backing consistency group snapshot %s " "available for deletion"), snapshot_name) return else: LOG.debug('Snapshot %(snap)s for vol %(vol)s is busy, waiting ' 'for volume clone dependency to clear.', {'snap': snapshot_name, 'vol': flexvol}) raise exception.SnapshotIsBusy(snapshot_name=snapshot_name) def delete_cgsnapshot(self, cgsnapshot, snapshots): """Delete LUNs backing each snapshot in the cgsnapshot. :return: An implicit update for snapshots models that is interpreted by the manager to set their models to deleted. """ for snapshot in snapshots: self._delete_lun(snapshot['name']) LOG.debug("Snapshot %s deletion successful", snapshot['name']) return None, None def create_consistencygroup_from_src(self, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a CG from a either a cgsnapshot or group of cinder vols. :return: An implicit update for the volumes model that is interpreted by the manager as a successful operation. """ LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes]) if cgsnapshot: vols = zip(volumes, snapshots) for volume, snapshot in vols: source = { 'name': snapshot['name'], 'size': snapshot['volume_size'], } self._clone_source_to_destination(source, volume) else: vols = zip(volumes, source_vols) for volume, old_src_vref in vols: src_lun = self._get_lun_from_table(old_src_vref['name']) source = {'name': src_lun.name, 'size': old_src_vref['size']} self._clone_source_to_destination(source, volume) return None, None cinder-8.0.0/cinder/volume/drivers/datera.py0000664000567000056710000004613712701406257022206 0ustar jenkinsjenkins00000000000000# Copyright 2016 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import requests import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) d_opts = [ cfg.StrOpt('datera_api_port', default='7717', help='Datera API port.'), cfg.StrOpt('datera_api_version', default='2', help='Datera API version.'), cfg.StrOpt('datera_num_replicas', default='1', help='Number of replicas to create of an inode.') ] CONF = cfg.CONF CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') CONF.register_opts(d_opts) DEFAULT_STORAGE_NAME = 'storage-1' DEFAULT_VOLUME_NAME = 'volume-1' def _authenticated(func): """Ensure the driver is authenticated to make a request. In do_setup() we fetch an auth token and store it. If that expires when we do API request, we'll fetch a new one. """ def func_wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except exception.NotAuthorized: # Prevent recursion loop. After the self arg is the # resource_type arg from _issue_api_request(). If attempt to # login failed, we should just give up. if args[0] == 'login': raise # Token might've expired, get a new one, try again. self._login() return func(self, *args, **kwargs) return func_wrapper class DateraDriver(san.SanISCSIDriver): """The OpenStack Datera Driver Version history: 1.0 - Initial driver 1.1 - Look for lun-0 instead of lun-1. 2.0 - Update For Datera API v2 """ VERSION = '2.0' def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.num_replicas = self.configuration.datera_num_replicas self.username = self.configuration.san_login self.password = self.configuration.san_password self.auth_token = None self.cluster_stats = {} self.datera_api_token = None def _login(self): """Use the san_login and san_password to set self.auth_token.""" body = { 'name': self.username, 'password': self.password } # Unset token now, otherwise potential expired token will be sent # along to be used for authorization when trying to login. self.auth_token = None try: LOG.debug('Getting Datera auth token.') results = self._issue_api_request('login', 'put', body=body, sensitive=True) self.datera_api_token = results['key'] except exception.NotAuthorized: with excutils.save_and_reraise_exception(): LOG.error(_LE('Logging into the Datera cluster failed. Please ' 'check your username and password set in the ' 'cinder.conf and start the cinder-volume ' 'service again.')) def _get_lunid(self): return 0 def do_setup(self, context): # If we can't authenticate through the old and new method, just fail # now. if not all([self.username, self.password]): msg = _("san_login and/or san_password is not set for Datera " "driver in the cinder.conf. Set this information and " "start the cinder-volume service again.") LOG.error(msg) raise exception.InvalidInput(msg) self._login() @utils.retry(exception.VolumeDriverException, retries=3) def _wait_for_resource(self, id, resource_type): result = self._issue_api_request(resource_type, 'get', id) if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][ DEFAULT_VOLUME_NAME]['op_state'] == 'available': return else: raise exception.VolumeDriverException( message=_('Resource not ready.')) def _create_resource(self, resource, resource_type, body): type_id = resource.get('volume_type_id', None) result = None try: result = self._issue_api_request(resource_type, 'post', body=body) except exception.Invalid: if resource_type == 'volumes' and type_id: LOG.error(_LE("Creation request failed. Please verify the " "extra-specs set for your volume types are " "entered correctly.")) raise else: # Handle updating QOS Policies if resource_type == 'app_instances': url = ('app_instances/{}/storage_instances/{}/volumes/{' '}/performance_policy') url = url.format( resource['id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME) if type_id is not None: policies = self._get_policies_by_volume_type(type_id) if policies: self._issue_api_request(url, 'post', body=policies) if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][ DEFAULT_VOLUME_NAME]['op_state'] == 'available': return self._wait_for_resource(resource['id'], resource_type) def create_volume(self, volume): """Create a logical volume.""" # Generate App Instance, Storage Instance and Volume # Volume ID will be used as the App Instance Name # Storage Instance and Volumes will have standard names app_params = ( { 'create_mode': "openstack", 'uuid': str(volume['id']), 'name': str(volume['id']), 'access_control_mode': 'allow_all', 'storage_instances': { DEFAULT_STORAGE_NAME: { 'name': DEFAULT_STORAGE_NAME, 'volumes': { DEFAULT_VOLUME_NAME: { 'name': DEFAULT_VOLUME_NAME, 'size': volume['size'], 'replica_count': int(self.num_replicas), 'snapshot_policies': { } } } } } }) self._create_resource(volume, 'app_instances', body=app_params) def extend_volume(self, volume, new_size): # Offline App Instance, if necessary reonline = False app_inst = self._issue_api_request( "app_instances/{}".format(volume['id'])) if app_inst['admin_state'] == 'online': reonline = True self.detach_volume(None, volume) # Change Volume Size app_inst = volume['id'] storage_inst = DEFAULT_STORAGE_NAME data = { 'size': new_size } self._issue_api_request( 'app_instances/{}/storage_instances/{}/volumes/{}'.format( app_inst, storage_inst, DEFAULT_VOLUME_NAME), method='put', body=data) # Online Volume, if it was online before if reonline: self.create_export(None, volume) def create_cloned_volume(self, volume, src_vref): clone_src_template = ("/app_instances/{}/storage_instances/{" "}/volumes/{}") src = clone_src_template.format(src_vref['id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME) data = { 'create_mode': 'openstack', 'name': str(volume['id']), 'uuid': str(volume['id']), 'clone_src': src, 'access_control_mode': 'allow_all' } self._issue_api_request('app_instances', 'post', body=data) def delete_volume(self, volume): self.detach_volume(None, volume) app_inst = volume['id'] try: self._issue_api_request('app_instances/{}'.format(app_inst), method='delete') except exception.NotFound: msg = _LI("Tried to delete volume %s, but it was not found in the " "Datera cluster. Continuing with delete.") LOG.info(msg, volume['id']) def ensure_export(self, context, volume, connector): """Gets the associated account, retrieves CHAP info and updates.""" return self.create_export(context, volume, connector) def create_export(self, context, volume, connector): url = "app_instances/{}".format(volume['id']) data = { 'admin_state': 'online' } app_inst = self._issue_api_request(url, method='put', body=data) storage_instance = app_inst['storage_instances'][ DEFAULT_STORAGE_NAME] portal = storage_instance['access']['ips'][0] + ':3260' iqn = storage_instance['access']['iqn'] # Portal, IQN, LUNID provider_location = '%s %s %s' % (portal, iqn, self._get_lunid()) return {'provider_location': provider_location} def detach_volume(self, context, volume, attachment=None): url = "app_instances/{}".format(volume['id']) data = { 'admin_state': 'offline', 'force': True } try: self._issue_api_request(url, method='put', body=data) except exception.NotFound: msg = _LI("Tried to detach volume %s, but it was not found in the " "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) def create_snapshot(self, snapshot): url_template = ('app_instances/{}/storage_instances/{}/volumes/{' '}/snapshots') url = url_template.format(snapshot['volume_id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME) snap_params = { 'uuid': snapshot['id'], } self._issue_api_request(url, method='post', body=snap_params) def delete_snapshot(self, snapshot): snap_temp = ('app_instances/{}/storage_instances/{}/volumes/{' '}/snapshots') snapu = snap_temp.format(snapshot['volume_id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME) snapshots = self._issue_api_request(snapu, method='get') try: for ts, snap in snapshots.items(): if snap['uuid'] == snapshot['id']: url_template = snapu + '/{}' url = url_template.format(ts) self._issue_api_request(url, method='delete') break else: raise exception.NotFound except exception.NotFound: msg = _LI("Tried to delete snapshot %s, but was not found in " "Datera cluster. Continuing with delete.") LOG.info(msg, snapshot['id']) def create_volume_from_snapshot(self, volume, snapshot): snap_temp = ('app_instances/{}/storage_instances/{}/volumes/{' '}/snapshots') snapu = snap_temp.format(snapshot['volume_id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME) snapshots = self._issue_api_request(snapu, method='get') for ts, snap in snapshots.items(): if snap['uuid'] == snapshot['id']: found_ts = ts break else: raise exception.NotFound src = ('/app_instances/{}/storage_instances/{}/volumes/{' '}/snapshots/{}'.format( snapshot['volume_id'], DEFAULT_STORAGE_NAME, DEFAULT_VOLUME_NAME, found_ts)) app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': str(volume['id']), 'clone_src': src, 'access_control_mode': 'allow_all' }) self._issue_api_request( 'app_instances', method='post', body=app_params) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data. """ if refresh or not self.cluster_stats: try: self._update_cluster_stats() except exception.DateraAPIException: LOG.error(_LE('Failed to get updated stats from Datera ' 'cluster.')) return self.cluster_stats def _update_cluster_stats(self): LOG.debug("Updating cluster stats info.") results = self._issue_api_request('system') if 'uuid' not in results: LOG.error(_LE('Failed to get updated stats from Datera Cluster.')) backend_name = self.configuration.safe_get('volume_backend_name') stats = { 'volume_backend_name': backend_name or 'Datera', 'vendor_name': 'Datera', 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'total_capacity_gb': int(results['total_capacity']) / units.Gi, 'free_capacity_gb': int(results['available_capacity']) / units.Gi, 'reserved_percentage': 0, } self.cluster_stats = stats def _get_policies_by_volume_type(self, type_id): """Get extra_specs and qos_specs of a volume_type. This fetches the scoped keys from the volume type. Anything set from qos_specs will override key/values set from extra_specs. """ ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') policies = {} for key, value in specs.items(): if ':' in key: fields = key.split(':') key = fields[1] policies[key] = value qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] if qos_kvs: policies.update(qos_kvs) return policies @_authenticated def _issue_api_request(self, resource_type, method='get', resource=None, body=None, action=None, sensitive=False): """All API requests to Datera cluster go through this method. :param resource_type: the type of the resource :param method: the request verb :param resource: the identifier of the resource :param body: a dict with options for the action_type :param action: the action to perform :returns: a dict of the response from the Datera cluster """ host = self.configuration.san_ip port = self.configuration.datera_api_port api_token = self.datera_api_token api_version = self.configuration.datera_api_version payload = json.dumps(body, ensure_ascii=False) payload.encode('utf-8') if not sensitive: LOG.debug("Payload for Datera API call: %s", payload) header = {'Content-Type': 'application/json; charset=utf-8'} protocol = 'http' if self.configuration.driver_use_ssl: protocol = 'https' # TODO(thingee): Auth method through Auth-Token is deprecated. Remove # this and client cert verification stuff in the Liberty release. if api_token: header['Auth-Token'] = api_token client_cert = self.configuration.driver_client_cert client_cert_key = self.configuration.driver_client_cert_key cert_data = None if client_cert: protocol = 'https' cert_data = (client_cert, client_cert_key) connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port, api_version, resource_type) if resource is not None: connection_string += '/%s' % resource if action is not None: connection_string += '/%s' % action LOG.debug("Endpoint for Datera API call: %s", connection_string) try: response = getattr(requests, method)(connection_string, data=payload, headers=header, verify=False, cert=cert_data) except requests.exceptions.RequestException as ex: msg = _( 'Failed to make a request to Datera cluster endpoint due ' 'to the following reason: %s') % six.text_type( ex.message) LOG.error(msg) raise exception.DateraAPIException(msg) data = response.json() if not sensitive: LOG.debug("Results of Datera API call: %s", data) if not response.ok: LOG.debug(("Datera Response URL: %s\n" "Datera Response Payload: %s\n" "Response Object: %s\n"), response.url, payload, vars(response)) if response.status_code == 404: raise exception.NotFound(data['message']) elif response.status_code in [403, 401]: raise exception.NotAuthorized() elif response.status_code == 400 and 'invalidArgs' in data: msg = _('Bad request sent to Datera cluster:' 'Invalid args: %(args)s | %(message)s') % { 'args': data['invalidArgs']['invalidAttrs'], 'message': data['message']} raise exception.Invalid(msg) else: msg = _('Request to Datera cluster returned bad status:' ' %(status)s | %(reason)s') % { 'status': response.status_code, 'reason': response.reason} LOG.error(msg) raise exception.DateraAPIException(msg) return data cinder-8.0.0/cinder/volume/drivers/tintri.py0000664000567000056710000011521612701406250022243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Tintri. All rights reserved. # Copyright (c) 2012 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Tintri storage. """ import datetime import json import math import os import re import socket from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import requests from six.moves import urllib from cinder import exception from cinder import utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder.volume import driver from cinder.volume.drivers import nfs LOG = logging.getLogger(__name__) default_api_version = 'v310' img_prefix = 'image-' tintri_path = '/tintri/' tintri_opts = [ cfg.StrOpt('tintri_server_hostname', help='The hostname (or IP address) for the storage system'), cfg.StrOpt('tintri_server_username', help='User name for the storage system'), cfg.StrOpt('tintri_server_password', help='Password for the storage system', secret=True), cfg.StrOpt('tintri_api_version', default=default_api_version, help='API version for the storage system'), cfg.IntOpt('tintri_image_cache_expiry_days', default=30, help='Delete unused image snapshots older than mentioned days'), cfg.StrOpt('tintri_image_shares_config', help='Path to image nfs shares file'), ] CONF = cfg.CONF CONF.register_opts(tintri_opts) class TintriDriver(driver.ManageableVD, driver.CloneableImageVD, driver.SnapshotVD, nfs.NfsDriver): """Base class for Tintri driver. Version History 2.1.0.1 - Liberty driver 2.2.0.1 - Mitaka driver -- Retype -- Image cache clean up -- Direct image clone fix """ VENDOR = 'Tintri' VERSION = '2.2.0.1' REQUIRED_OPTIONS = ['tintri_server_hostname', 'tintri_server_username', 'tintri_server_password'] def __init__(self, *args, **kwargs): self._execute = None self._context = None super(TintriDriver, self).__init__(*args, **kwargs) self._execute_as_root = True self.configuration.append_config_values(tintri_opts) self.cache_cleanup = False self._mounted_image_shares = [] def do_setup(self, context): self._image_shares_config = getattr(self.configuration, 'tintri_image_shares_config') super(TintriDriver, self).do_setup(context) self._context = context self._check_ops(self.REQUIRED_OPTIONS, self.configuration) self._hostname = getattr(self.configuration, 'tintri_server_hostname') self._username = getattr(self.configuration, 'tintri_server_username') self._password = getattr(self.configuration, 'tintri_server_password') self._api_version = getattr(self.configuration, 'tintri_api_version', CONF.tintri_api_version) self._image_cache_expiry = getattr(self.configuration, 'tintri_image_cache_expiry_days', CONF.tintri_image_cache_expiry_days) def get_pool(self, volume): """Returns pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ return volume['provider_location'] def _get_client(self): """Returns a Tintri REST client connection.""" return TClient(self._hostname, self._username, self._password, self._api_version) def create_snapshot(self, snapshot): """Creates a snapshot.""" (__, path) = self._get_export_ip_path(snapshot.volume_id) volume_path = '%s/%s' % (path, snapshot.volume_name) volume_path = '%(path)s/%(volume_name)s' % { 'path': path, 'volume_name': snapshot.volume_name, } model_update = {} with self._get_client() as c: provider_id = c.create_snapshot(volume_path, snapshot.volume.display_name or snapshot.volume_name, snapshot.volume_id, snapshot.display_name or snapshot.name) snapshot.provider_id = provider_id # Store Tintri snapshot ID as snapshot provider_id model_update['provider_id'] = provider_id return model_update def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if snapshot.provider_id: with self._get_client() as c: c.delete_snapshot(snapshot.provider_id) else: LOG.info(_LI('Snapshot %s not found'), snapshot.name) def _check_ops(self, required_ops, configuration): """Ensures that the options we care about are set.""" for op in required_ops: if not getattr(configuration, op): LOG.error(_LE('Configuration value %s is not set.'), op) raise exception.InvalidConfigurationValue(option=op, value=None) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from snapshot.""" vol_size = volume.size snap_size = snapshot.volume_size self._clone_snapshot(snapshot.provider_id, volume.name, snapshot.volume_id) share = self._get_provider_location(snapshot.volume_id) volume['provider_location'] = share path = self.local_path(volume) self._set_rw_permissions(path) if vol_size != snap_size: try: self.extend_volume(volume, vol_size) except Exception: LOG.error(_LE('Resizing %s failed. Cleaning volume.'), volume.name) self._delete_file(path) raise return {'provider_location': volume['provider_location']} def _clone_snapshot(self, snapshot_id, clone_name, volume_id, share=None): """Clones volume from snapshot.""" (host, path) = self._get_export_ip_path(volume_id, share) clone_path = '%s/%s-d' % (path, clone_name) with self._get_client() as c: c.clone_volume(snapshot_id, clone_path) self._move_cloned_volume(clone_name, volume_id, share) def _move_cloned_volume(self, clone_name, volume_id, share=None): local_path = self._get_local_path(volume_id, share) source_path = os.path.join(local_path, clone_name + '-d') if self._is_volume_present(source_path): source_file = os.listdir(source_path)[0] source = os.path.join(source_path, source_file) target = os.path.join(local_path, clone_name) moved = self._move_file(source, target) self._execute('rm', '-rf', source_path, run_as_root=self._execute_as_root) if not moved: msg = (_('Failed to move volume %s.') % source) raise exception.VolumeDriverException(msg) else: raise exception.VolumeDriverException( _('Volume %s not found.') % source_path) def _clone_volume_to_volume(self, volume_name, clone_name, volume_display_name, volume_id, share=None, dst=None, image_id=None): """Creates volume snapshot then clones volume.""" (__, path) = self._get_export_ip_path(volume_id, share) volume_path = '%s/%s' % (path, volume_name) if dst: (___, dst_path) = self._get_export_ip_path(None, dst) clone_path = '%s/%s-d' % (dst_path, clone_name) else: clone_path = '%s/%s-d' % (path, clone_name) with self._get_client() as c: if share and image_id: snapshot_id = self._create_image_snapshot(volume_name, share, image_id, volume_display_name) else: snapshot_id = c.create_snapshot( volume_path, volume_display_name, volume_id, volume_name, deletion_policy='DELETE_ON_ZERO_CLONE_REFERENCES') c.clone_volume(snapshot_id, clone_path) self._move_cloned_volume(clone_name, volume_id, dst or share) @utils.synchronized('cache_cleanup') def _initiate_image_cache_cleanup(self): if self.cache_cleanup: LOG.debug('Image cache cleanup in progress.') return else: self.cache_cleanup = True timer = loopingcall.FixedIntervalLoopingCall( self._cleanup_cache) timer.start(interval=None) return timer def _cleanup_cache(self): LOG.debug('Cache cleanup: starting.') try: # Cleanup used cached image snapshots 30 days and older t = datetime.datetime.utcnow() - datetime.timedelta( days=self._image_cache_expiry) date = t.strftime("%Y-%m-%dT%H:%M:%S") with self._get_client() as c: # Get eligible snapshots to clean image_snaps = c.get_image_snapshots_to_date(date) if image_snaps: for snap in image_snaps: uuid = snap['uuid']['uuid'] LOG.debug( 'Cache cleanup: deleting image snapshot %s', uuid) try: c.delete_snapshot(uuid) except Exception: LOG.exception(_LE('Unexpected exception during ' 'cache cleanup of snapshot %s'), uuid) else: LOG.debug('Cache cleanup: nothing to clean') finally: self.cache_cleanup = False LOG.debug('Cache cleanup: finished') raise loopingcall.LoopingCallDone() def _update_volume_stats(self): """Retrieves stats info from volume group.""" data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.VENDOR data['vendor_name'] = self.VENDOR data['driver_version'] = self.get_version() data['storage_protocol'] = self.driver_volume_type self._ensure_shares_mounted() self._initiate_image_cache_cleanup() pools = [] for share in self._mounted_shares: pool = dict() capacity, free, used = self._get_capacity_info(share) pool['pool_name'] = share pool['total_capacity_gb'] = capacity / float(units.Gi) pool['free_capacity_gb'] = free / float(units.Gi) pool['reserved_percentage'] = 0 pool['QoS_support'] = True pools.append(pool) data['pools'] = pools self._stats = data def _get_provider_location(self, volume_id): """Returns provider location for given volume.""" volume = self.db.volume_get(self._context, volume_id) return volume.provider_location def _get_host_ip(self, volume_id): """Returns IP address for the given volume.""" return self._get_provider_location(volume_id).split(':')[0] def _get_export_path(self, volume_id): """Returns NFS export path for the given volume.""" return self._get_provider_location(volume_id).split(':')[1] def _resolve_hostname(self, hostname): """Resolves host name to IP address.""" res = socket.getaddrinfo(hostname, None)[0] family, socktype, proto, canonname, sockaddr = res return sockaddr[0] def _is_volume_present(self, volume_path): """Checks if volume exists.""" try: self._execute('ls', volume_path, run_as_root=self._execute_as_root) except Exception: return False return True def _get_volume_path(self, nfs_share, volume_name): """Gets local volume path for given volume name on given nfs share.""" return os.path.join(self._get_mount_point_for_share(nfs_share), volume_name) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" vol_size = volume.size src_vol_size = src_vref.size self._clone_volume_to_volume(src_vref.name, volume.name, src_vref.display_name, src_vref.id) share = self._get_provider_location(src_vref.id) volume['provider_location'] = share path = self.local_path(volume) self._set_rw_permissions(path) if vol_size != src_vol_size: try: self.extend_volume(volume, vol_size) except Exception: LOG.error(_LE('Resizing %s failed. Cleaning volume.'), volume.name) self._delete_file(path) raise return {'provider_location': volume['provider_location']} def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetches the image from image_service and write it to the volume.""" super(TintriDriver, self).copy_image_to_volume( context, volume, image_service, image_id) LOG.info(_LI('Copied image to volume %s using regular download.'), volume['name']) self._create_image_snapshot(volume['name'], volume['provider_location'], image_id, img_prefix + image_id) def _create_image_snapshot(self, volume_name, share, image_id, image_name): """Creates an image snapshot.""" snapshot_name = img_prefix + image_id LOG.info(_LI('Creating image snapshot %s'), snapshot_name) (host, path) = self._get_export_ip_path(None, share) volume_path = '%s/%s' % (path, volume_name) @utils.synchronized(snapshot_name, external=True) def _do_snapshot(): with self._get_client() as c: snapshot_id = c.get_snapshot(image_id) if not snapshot_id: snapshot_id = c.create_snapshot(volume_path, image_name, image_id, snapshot_name) return snapshot_id try: return _do_snapshot() except Exception as e: LOG.warning(_LW('Exception while creating image %(image_id)s ' 'snapshot. Exception: %(exc)s'), {'image_id': image_id, 'exc': e}) def _find_image_snapshot(self, image_id): """Finds image snapshot.""" with self._get_client() as c: return c.get_snapshot(image_id) def _clone_image_snapshot(self, snapshot_id, dst, share): """Clones volume from image snapshot.""" file_path = self._get_volume_path(share, dst) if not os.path.exists(file_path): LOG.info(_LI('Cloning from snapshot to destination %s'), dst) self._clone_snapshot(snapshot_id, dst, volume_id=None, share=share) def _delete_file(self, path): """Deletes file from disk and return result as boolean.""" try: LOG.debug('Deleting file at path %s', path) cmd = ['rm', '-f', path] self._execute(*cmd, run_as_root=self._execute_as_root) return True except Exception as e: LOG.warning(_LW('Exception during deleting %s'), e) return False def _move_file(self, source_path, dest_path): """Moves source to destination.""" @utils.synchronized(dest_path, external=True) def _do_move(src, dst): if os.path.exists(dst): LOG.warning(_LW('Destination %s already exists.'), dst) return False self._execute('mv', src, dst, run_as_root=self._execute_as_root) return True try: return _do_move(source_path, dest_path) except Exception as e: LOG.warning(_LW('Exception moving file %(src)s. Message: %(e)s'), {'src': source_path, 'e': e}) return False def clone_image(self, context, volume, image_location, image_meta, image_service): """Creates a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ image_name = image_meta['name'] image_id = image_meta['id'] if 'properties' in image_meta: provider_location = image_meta['properties'].get( 'provider_location') if provider_location: image_location = (provider_location, None) cloned = False post_clone = False try: snapshot_id = self._find_image_snapshot(image_id) if snapshot_id: cloned = self._clone_from_snapshot(volume, image_id, snapshot_id) else: cloned = self._direct_clone(volume, image_location, image_id, image_name) if cloned: post_clone = self._post_clone_image(volume) except Exception as e: LOG.info(_LI('Image cloning unsuccessful for image ' '%(image_id)s. Message: %(msg)s'), {'image_id': image_id, 'msg': e}) vol_path = self.local_path(volume) volume['provider_location'] = None if os.path.exists(vol_path): self._delete_file(vol_path) finally: cloned = cloned and post_clone share = volume['provider_location'] if cloned else None bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned def _clone_from_snapshot(self, volume, image_id, snapshot_id): """Clones a copy from image snapshot.""" cloned = False LOG.info(_LI('Cloning image %s from snapshot.'), image_id) for share in self._mounted_shares: # Repeat tries in other shares if failed in some LOG.debug('Image share: %s', share) if (share and self._is_share_vol_compatible(volume, share)): try: self._clone_image_snapshot(snapshot_id, volume['name'], share) cloned = True volume['provider_location'] = share break except Exception: LOG.warning(_LW('Unexpected exception during ' 'image cloning in share %s'), share) return cloned def _direct_clone(self, volume, image_location, image_id, image_name): """Clones directly in nfs share.""" LOG.info(_LI('Checking image clone %s from glance share.'), image_id) cloned = False image_location = self._get_image_nfs_url(image_location) share = self._is_cloneable_share(image_location) run_as_root = self._execute_as_root dst_share = None for dst in self._mounted_shares: if dst and self._is_share_vol_compatible(volume, dst): dst_share = dst LOG.debug('Image dst share: %s', dst) break if not dst_share: return cloned LOG.debug('Share is cloneable %s', dst_share) volume['provider_location'] = dst_share (__, ___, img_file) = image_location.rpartition('/') dir_path = self._get_mount_point_for_share(share) dst_path = self._get_mount_point_for_share(dst_share) img_path = '%s/%s' % (dir_path, img_file) img_info = image_utils.qemu_img_info(img_path, run_as_root=run_as_root) if img_info.file_format == 'raw': LOG.debug('Image is raw %s', image_id) self._clone_volume_to_volume( img_file, volume['name'], image_name, volume_id=None, share=share, dst=dst_share, image_id=image_id) cloned = True else: LOG.info(_LI('Image will locally be converted to raw %s'), image_id) dst = '%s/%s' % (dst_path, volume['name']) image_utils.convert_image(img_path, dst, 'raw', run_as_root=run_as_root) data = image_utils.qemu_img_info(dst, run_as_root=run_as_root) if data.file_format != "raw": raise exception.InvalidResults( _('Converted to raw, but ' 'format is now %s') % data.file_format) else: cloned = True self._create_image_snapshot( volume['name'], volume['provider_location'], image_id, image_name) return cloned def _post_clone_image(self, volume): """Performs operations post image cloning.""" LOG.info(_LI('Performing post clone for %s'), volume['name']) vol_path = self.local_path(volume) self._set_rw_permissions(vol_path) self._resize_image_file(vol_path, volume['size']) return True def _resize_image_file(self, path, new_size): """Resizes the image file on share to new size.""" LOG.debug('Checking file for resize.') if self._is_file_size_equal(path, new_size): return else: LOG.info(_LI('Resizing file to %sG'), new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if self._is_file_size_equal(path, new_size): return else: raise exception.InvalidResults( _('Resizing image file failed.')) def _is_cloneable_share(self, image_location): """Finds if the image at location is cloneable.""" conn, dr = self._check_nfs_path(image_location) return self._is_share_in_use(conn, dr) def _check_nfs_path(self, image_location): """Checks if the nfs path format is matched. WebNFS url format with relative-path is supported. Accepting all characters in path-names and checking against the mounted shares which will contain only allowed path segments. Returns connection and dir details. """ conn, dr = None, None if image_location: nfs_loc_pattern = \ '^nfs://(([\w\-\.]+:[\d]+|[\w\-\.]+)(/[^/].*)*(/[^/\\\\]+))$' matched = re.match(nfs_loc_pattern, image_location) if not matched: LOG.debug('Image location not in the expected format %s', image_location) else: conn = matched.group(2) dr = matched.group(3) or '/' return conn, dr def _is_share_in_use(self, conn, dr): """Checks if share is cinder mounted and returns it.""" try: if conn: host = conn.split(':')[0] ip = self._resolve_hostname(host) for sh in self._mounted_shares + self._mounted_image_shares: sh_ip = self._resolve_hostname(sh.split(':')[0]) sh_exp = sh.split(':')[1] if sh_ip == ip and sh_exp == dr: LOG.debug('Found share match %s', sh) return sh except Exception: LOG.warning(_LW('Unexpected exception while listing used share.')) def _get_image_nfs_url(self, image_location): """Gets direct url for nfs backend. It creates direct url from image_location which is a tuple with direct_url and locations. Returns url with nfs scheme if nfs store else returns url. It needs to be verified by backend before use. """ direct_url, locations = image_location if not direct_url and not locations: raise exception.NotFound(_('Image location not present.')) # Locations will be always a list of one until # bp multiple-image-locations is introduced if not locations: return direct_url location = locations[0] url = location['url'] if not location['metadata']: return url location_type = location['metadata'].get('type') if not location_type or location_type.lower() != "nfs": return url share_location = location['metadata'].get('share_location') mount_point = location['metadata'].get('mount_point') if not share_location or not mount_point: return url url_parse = urllib.parse.urlparse(url) abs_path = os.path.join(url_parse.netloc, url_parse.path) rel_path = os.path.relpath(abs_path, mount_point) direct_url = "%s/%s" % (share_location, rel_path) return direct_url def _is_share_vol_compatible(self, volume, share): """Checks if share is compatible with volume to host it.""" return self._is_share_eligible(share, volume['size']) def _can_share_hold_size(self, share, size): """Checks if volume can hold image with size.""" _tot_size, tot_available, _tot_allocated = self._get_capacity_info( share) if tot_available < size: msg = _('Container size smaller than required file size.') raise exception.VolumeDriverException(msg) def _get_export_ip_path(self, volume_id=None, share=None): """Returns export ip and path. One of volume id or share is used to return the values. """ if volume_id: host_ip = self._get_host_ip(volume_id) export_path = self._get_export_path(volume_id) elif share: host_ip = share.split(':')[0] export_path = share.split(':')[1] else: raise exception.InvalidInput( reason=_('A volume ID or share was not specified.')) return host_ip, export_path def _get_local_path(self, volume_id=None, share=None): """Returns local path. One of volume id or share is used to return the values. """ if volume_id: local_path = self._get_mount_point_for_share( self._get_provider_location(volume_id)) elif share: local_path = self._get_mount_point_for_share(share) else: raise exception.InvalidInput( reason=_('A volume ID or share was not specified.')) return local_path def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref) LOG.debug('Managing volume %(vol)s with ref %(ref)s', {'vol': volume['id'], 'ref': existing_ref}) if volume_name != volume['name']: src = os.path.join(nfs_mount, volume_name) dst = os.path.join(nfs_mount, volume['name']) if not self._move_file(src, dst): msg = (_('Failed to manage volume %s.') % existing_ref['source-name']) raise exception.VolumeDriverException(msg) self._set_rw_permissions(dst) LOG.info(_LI('Manage volume %s'), volume['name']) return {'provider_location': nfs_share} def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref) try: volume_path = os.path.join(nfs_mount, volume_name) vol_size = math.ceil(float(utils.get_file_size(volume_path)) / units.Gi) except OSError: msg = (_('Failed to get size of volume %s') % existing_ref['source-name']) raise exception.VolumeDriverException(msg) return vol_size def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ volume_path = self.local_path(volume) LOG.info(_LI('Unmanage volume %s'), volume_path) def _convert_volume_share(self, volume_share): """Converts the share name to IP address.""" share_split = volume_share.rsplit(':', 1) return self._resolve_hostname(share_split[0]) + ':' + share_split[1] def _get_share_mount(self, vol_ref): """Get the NFS share, NFS mount, and volume path from reference. :param vol_ref: Driver-specific information used to identify a volume :return: NFS Share, NFS mount, volume path """ if 'source-name' not in vol_ref or not vol_ref['source-name']: msg = _('Volume reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=msg) volume_share = self._convert_volume_share(vol_ref['source-name']) for nfs_share in self._mounted_shares: share = self._convert_volume_share(nfs_share) (__, match, volume_name) = volume_share.partition(share) if match == share: volume_name = volume_name.lstrip('/') nfs_mount = self._get_mount_point_for_share(nfs_share) volume_path = os.path.join(nfs_mount, volume_name) if os.path.isfile(volume_path): LOG.debug('Found volume %(path)s on share %(share)s', {'path': volume_path, 'share': nfs_share}) return nfs_share, nfs_mount, volume_name else: LOG.debug('Volume ref %(ref)s not on share %(share)s', {'ref': vol_ref, 'share': nfs_share}) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=_('Volume not found.')) def retype(self, context, volume, new_type, diff, host): """Retype from one volume type to another. At this point Tintri VMstore does not differentiate between volume types on the same array. This is a no-op for us. """ return True, None def _ensure_shares_mounted(self): # Mount image shares, we do not need to store these mounts # in _mounted_shares mounted_image_shares = [] if self._image_shares_config: self._load_shares_config(self._image_shares_config) for share in self.shares.keys(): try: self._ensure_share_mounted(share) mounted_image_shares.append(share) except Exception: LOG.exception(_LE( 'Exception during mounting.')) self._mounted_image_shares = mounted_image_shares # Mount Cinder shares super(TintriDriver, self)._ensure_shares_mounted() class TClient(object): """REST client for Tintri storage.""" def __init__(self, hostname, username, password, api_version=default_api_version): """Initializes a connection to Tintri server.""" self.api_url = 'https://' + hostname + '/api' self.api_version = api_version self.session_id = self.login(username, password) self.headers = {'content-type': 'application/json', 'cookie': 'JSESSIONID=' + self.session_id} def __enter__(self): return self def __exit__(self, type, value, traceback): self.logout() def get(self, api): return self.get_query(api, None) def get_query(self, api, query): url = self.api_url + api return requests.get(url, headers=self.headers, params=query, verify=False) def delete(self, api): url = self.api_url + api return requests.delete(url, headers=self.headers, verify=False) def put(self, api, payload): url = self.api_url + api return requests.put(url, data=json.dumps(payload), headers=self.headers, verify=False) def post(self, api, payload): url = self.api_url + api return requests.post(url, data=json.dumps(payload), headers=self.headers, verify=False) def login(self, username, password): # Payload, header and URL for login headers = {'content-type': 'application/json', 'Tintri-Api-Client': 'Tintri-Cinder-Driver-%s' % TintriDriver.VERSION} payload = {'username': username, 'password': password, 'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.' 'RestApiCredentials'} url = self.api_url + '/' + self.api_version + '/session/login' r = requests.post(url, data=json.dumps(payload), headers=headers, verify=False) if r.status_code != 200: msg = _('Failed to login for user %s.') % username raise exception.VolumeDriverException(msg) return r.cookies['JSESSIONID'] def logout(self): url = self.api_url + '/' + self.api_version + '/session/logout' requests.get(url, headers=self.headers, verify=False) @staticmethod def _remove_prefix(volume_path, prefix): if volume_path.startswith(prefix): return volume_path[len(prefix):] else: return volume_path def create_snapshot(self, volume_path, volume_name, volume_id, snapshot_name, deletion_policy=None): """Creates a volume snapshot.""" request = {'typeId': 'com.tintri.api.rest.' + self.api_version + '.dto.domain.beans.cinder.CinderSnapshotSpec', 'file': TClient._remove_prefix(volume_path, tintri_path), 'vmName': volume_name or snapshot_name, 'description': snapshot_name + ' (' + volume_id + ')', 'vmTintriUuid': volume_id, 'instanceId': volume_id, 'snapshotCreator': 'Cinder', 'deletionPolicy': deletion_policy, } payload = '/' + self.api_version + '/cinder/snapshot' r = self.post(payload, request) if r.status_code != 200: msg = _('Failed to create snapshot for volume %s.') % volume_path raise exception.VolumeDriverException(msg) return r.json()[0] def get_snapshot(self, volume_id): """Gets a volume snapshot.""" filter = {'vmUuid': volume_id} payload = '/' + self.api_version + '/snapshot' r = self.get_query(payload, filter) if r.status_code != 200: msg = _('Failed to get snapshot for volume %s.') % volume_id raise exception.VolumeDriverException(msg) if int(r.json()['filteredTotal']) > 0: return r.json()['items'][0]['uuid']['uuid'] def get_image_snapshots_to_date(self, date): filter = {'sortedBy': 'createTime', 'target': 'SNAPSHOT', 'consistency': 'CRASH_CONSISTENT', 'hasClone': 'No', 'type': 'CINDER_GENERATED_SNAPSHOT', 'contain': 'image-', 'limit': '100', 'page': '1', 'sortOrder': 'DESC', 'since': '1970-01-01T00:00:00', 'until': date, } payload = '/' + self.api_version + '/snapshot' r = self.get_query(payload, filter) if r.status_code != 200: msg = _('Failed to get image snapshots.') raise exception.VolumeDriverException(msg) return r.json()['items'] def delete_snapshot(self, snapshot_uuid): """Deletes a snapshot.""" url = '/' + self.api_version + '/snapshot/' self.delete(url + snapshot_uuid) def clone_volume(self, snapshot_uuid, volume_path): """Clones a volume from snapshot.""" request = {'typeId': 'com.tintri.api.rest.' + self.api_version + '.dto.domain.beans.cinder.CinderCloneSpec', 'destinationPaths': [TClient._remove_prefix(volume_path, tintri_path)], 'tintriSnapshotUuid': snapshot_uuid, } url = '/' + self.api_version + '/cinder/clone' r = self.post(url, request) if r.status_code != 200 and r.status_code != 204: msg = _('Failed to clone volume from snapshot %s.') % snapshot_uuid raise exception.VolumeDriverException(msg) cinder-8.0.0/cinder/volume/drivers/drbdmanagedrv.py0000664000567000056710000011470612701406250023535 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This driver connects Cinder to an installed DRBDmanage instance, see http://drbd.linbit.com/users-guide-9.0/ch-openstack.html for more details. """ import eventlet import json import six import socket import time import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LW, _LI, _LE from cinder.volume import driver try: import dbus import drbdmanage.consts as dm_const import drbdmanage.exceptions as dm_exc import drbdmanage.utils as dm_utils except ImportError: # Used for the tests, when no DRBDmanage is installed dbus = None dm_const = None dm_exc = None dm_utils = None LOG = logging.getLogger(__name__) drbd_opts = [ cfg.IntOpt('drbdmanage_redundancy', default=1, help='Number of nodes that should replicate the data.'), cfg.StrOpt('drbdmanage_resource_policy', default='{"ratio": "0.51", "timeout": "60"}', help='Resource deployment completion wait policy.'), cfg.StrOpt('drbdmanage_snapshot_policy', default='{"count": "1", "timeout": "60"}', help='Snapshot completion wait policy.'), cfg.StrOpt('drbdmanage_resize_policy', default='{"timeout": "60"}', help='Volume resize completion wait policy.'), cfg.StrOpt('drbdmanage_resource_plugin', default="drbdmanage.plugins.plugins.wait_for.WaitForResource", help='Resource deployment completion wait plugin.'), cfg.StrOpt('drbdmanage_snapshot_plugin', default="drbdmanage.plugins.plugins.wait_for.WaitForSnapshot", help='Snapshot completion wait plugin.'), cfg.StrOpt('drbdmanage_resize_plugin', default="drbdmanage.plugins.plugins.wait_for.WaitForVolumeSize", help='Volume resize completion wait plugin.'), cfg.BoolOpt('drbdmanage_devs_on_controller', default=True, help='''If set, the c-vol node will receive a useable /dev/drbdX device, even if the actual data is stored on other nodes only. This is useful for debugging, maintenance, and to be able to do the iSCSI export from the c-vol node.''') # TODO(PM): offsite_redundancy? # TODO(PM): choose DRBDmanage storage pool? ] CONF = cfg.CONF CONF.register_opts(drbd_opts) AUX_PROP_CINDER_VOL_ID = "cinder-id" AUX_PROP_TEMP_CLIENT = "cinder-is-temp-client" DM_VN_PREFIX = 'CV_' # sadly 2CV isn't allowed by DRBDmanage DM_SN_PREFIX = 'SN_' # Need to be set later, so that the tests can fake CS_DEPLOYED = None CS_DISKLESS = None CS_UPD_CON = None class DrbdManageBaseDriver(driver.VolumeDriver): """Cinder driver that uses DRBDmanage for storage.""" VERSION = '1.1.0' drbdmanage_dbus_name = 'org.drbd.drbdmanaged' drbdmanage_dbus_interface = '/interface' def __init__(self, *args, **kwargs): self.empty_list = dbus.Array([], signature="a(s)") self.empty_dict = dbus.Array([], signature="a(ss)") super(DrbdManageBaseDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(drbd_opts) if not self.drbdmanage_dbus_name: self.drbdmanage_dbus_name = 'org.drbd.drbdmanaged' if not self.drbdmanage_dbus_interface: self.drbdmanage_dbus_interface = '/interface' self.drbdmanage_redundancy = int(getattr(self.configuration, 'drbdmanage_redundancy', 1)) self.drbdmanage_devs_on_controller = bool( getattr(self.configuration, 'drbdmanage_devs_on_controller', True)) self.dm_control_vol = ".drbdctrl" self.backend_name = self.configuration.safe_get( 'volume_backend_name') or 'drbdmanage' js_decoder = json.JSONDecoder() self.policy_resource = js_decoder.decode( self.configuration.safe_get('drbdmanage_resource_policy')) self.policy_snapshot = js_decoder.decode( self.configuration.safe_get('drbdmanage_snapshot_policy')) self.policy_resize = js_decoder.decode( self.configuration.safe_get('drbdmanage_resize_policy')) self.plugin_resource = self.configuration.safe_get( 'drbdmanage_resource_plugin') self.plugin_snapshot = self.configuration.safe_get( 'drbdmanage_snapshot_plugin') self.plugin_resize = self.configuration.safe_get( 'drbdmanage_resize_plugin') # needed as per pep8: # F841 local variable 'CS_DEPLOYED' is assigned to but never used global CS_DEPLOYED, CS_DISKLESS, CS_UPD_CON CS_DEPLOYED = dm_const.CSTATE_PREFIX + dm_const.FLAG_DEPLOY CS_DISKLESS = dm_const.CSTATE_PREFIX + dm_const.FLAG_DISKLESS CS_UPD_CON = dm_const.CSTATE_PREFIX + dm_const.FLAG_UPD_CON def dbus_connect(self): self.odm = dbus.SystemBus().get_object(self.drbdmanage_dbus_name, self.drbdmanage_dbus_interface) self.odm.ping() def call_or_reconnect(self, fn, *args): """Call DBUS function; on a disconnect try once to reconnect.""" try: return fn(*args) except dbus.DBusException as e: LOG.warning(_LW("Got disconnected; trying to reconnect. (%s)"), e) self.dbus_connect() # Old function object is invalid, get new one. return getattr(self.odm, fn._method_name)(*args) def _fetch_answer_data(self, res, key, level=None, req=True): for code, fmt, data in res: if code == dm_exc.DM_INFO: if level and level != fmt: continue value = [v for k, v in data if k == key] if value: if len(value) == 1: return value[0] else: return value if req: if level: l = level + ":" + key else: l = key msg = _('DRBDmanage driver error: expected key "%s" ' 'not in answer, wrong DRBDmanage version?') % l LOG.error(msg) raise exception.VolumeDriverException(message=msg) return None def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(DrbdManageBaseDriver, self).do_setup(context) self.dbus_connect() def check_for_setup_error(self): """Verify that requirements are in place to use DRBDmanage driver.""" if not all((dbus, dm_exc, dm_const, dm_utils)): msg = _('DRBDmanage driver setup error: some required ' 'libraries (dbus, drbdmanage.*) not found.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.odm.ping() != 0: message = _('Cannot ping DRBDmanage backend') raise exception.VolumeBackendAPIException(data=message) def _clean_uuid(self): """Returns a UUID string, WITHOUT braces.""" # Some uuid library versions put braces around the result!? # We don't want them, just a plain [0-9a-f-]+ string. id = str(uuid.uuid4()) id = id.replace("{", "") id = id.replace("}", "") return id def _check_result(self, res, ignore=None, ret=0): seen_success = False seen_error = False result = ret for (code, fmt, arg_l) in res: # convert from DBUS to Python arg = dict(arg_l) if ignore and code in ignore: if not result: result = code continue if code == dm_exc.DM_SUCCESS: seen_success = True continue if code == dm_exc.DM_INFO: continue seen_error = _("Received error string: %s") % (fmt % arg) if seen_error: raise exception.VolumeBackendAPIException(data=seen_error) if seen_success: return ret # by default okay - or the ignored error code. return ret # DRBDmanage works in kiB units; Cinder uses GiB. def _vol_size_to_dm(self, size): return int(size * units.Gi / units.Ki) def _vol_size_to_cinder(self, size): return int(size * units.Ki / units.Gi) def is_clean_volume_name(self, name, prefix): try: if (name.startswith(CONF.volume_name_template % "") and uuid.UUID(name[7:]) is not None): return prefix + name[7:] except ValueError: return None try: if uuid.UUID(name) is not None: return prefix + name except ValueError: return None def _call_policy_plugin(self, plugin, pol_base, pol_this): """Returns True for done, False for timeout.""" pol_inp_data = dict(pol_base) pol_inp_data.update(pol_this, starttime=str(time.time())) retry = 0 while True: res, pol_result = self.call_or_reconnect( self.odm.run_external_plugin, plugin, pol_inp_data) self._check_result(res) if pol_result['result'] == dm_const.BOOL_TRUE: return True if pol_result['timeout'] == dm_const.BOOL_TRUE: return False eventlet.sleep(min(0.5 + retry / 5, 2)) retry += 1 def _wait_for_node_assignment(self, res_name, vol_nr, nodenames, filter_props=None, timeout=90, check_vol_deployed=True): """Return True as soon as one assignment matches the filter.""" # TODO(LINBIT): unify with policy plugins if not filter_props: filter_props = self.empty_dict end_time = time.time() + timeout retry = 0 while time.time() < end_time: res, assgs = self.call_or_reconnect(self.odm.list_assignments, nodenames, [res_name], 0, filter_props, self.empty_list) self._check_result(res) if len(assgs) > 0: for assg in assgs: vols = assg[3] for v_nr, v_prop in vols: if (v_nr == vol_nr): if not check_vol_deployed: # no need to check return True if v_prop[CS_DEPLOYED] == dm_const.BOOL_TRUE: return True retry += 1 # Not yet LOG.warning(_LW('Try #%(try)d: Volume "%(res)s"/%(vol)d ' 'not yet deployed on "%(host)s", waiting.'), {'try': retry, 'host': nodenames, 'res': res_name, 'vol': vol_nr}) eventlet.sleep(min(0.5 + retry / 5, 2)) # Timeout return False def _priv_hash_from_volume(self, volume): return dm_utils.dict_to_aux_props({ AUX_PROP_CINDER_VOL_ID: volume['id'], }) def snapshot_name_from_cinder_snapshot(self, snapshot): sn_name = self.is_clean_volume_name(snapshot['id'], DM_SN_PREFIX) return sn_name def _res_and_vl_data_for_volume(self, volume, empty_ok=False): """Find DRBD resource and volume ID. A DRBD resource might consist of several "volumes" (think consistency groups). So we have to find the number of the volume within one resource. Returns resource name, volume number, and resource and volume properties. """ # If we get a string, use it as-is. # Else it's a dictionary; then get the ID. if isinstance(volume, six.string_types): v_uuid = volume else: v_uuid = volume['id'] res, rl = self.call_or_reconnect(self.odm.list_volumes, self.empty_dict, 0, dm_utils.dict_to_aux_props( {AUX_PROP_CINDER_VOL_ID: v_uuid}), self.empty_dict) self._check_result(res) if (not rl) or (len(rl) == 0): if empty_ok: LOG.debug("No volume %s found.", v_uuid) return None, None, None, None raise exception.VolumeBackendAPIException( data=_("volume %s not found in drbdmanage") % v_uuid) if len(rl) > 1: raise exception.VolumeBackendAPIException( data=_("multiple resources with name %s found by drbdmanage") % v_uuid) (r_name, r_props, vols) = rl[0] if len(vols) != 1: raise exception.VolumeBackendAPIException( data=_("not exactly one volume with id %s") % v_uuid) (v_nr, v_props) = vols[0] LOG.debug("volume %(uuid)s is %(res)s/%(nr)d; %(rprop)s, %(vprop)s", {'uuid': v_uuid, 'res': r_name, 'nr': v_nr, 'rprop': dict(r_props), 'vprop': dict(v_props)}) return r_name, v_nr, r_props, v_props def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False): """Find DRBD resource and snapshot name from the snapshot ID.""" s_uuid = snapshot['id'] res, rs = self.call_or_reconnect(self.odm.list_snapshots, self.empty_dict, self.empty_dict, 0, dm_utils.dict_to_aux_props( {AUX_PROP_CINDER_VOL_ID: s_uuid}), self.empty_dict) self._check_result(res) if (not rs) or (len(rs) == 0): if empty_ok: return None else: raise exception.VolumeBackendAPIException( data=_("no snapshot with id %s found in drbdmanage") % s_uuid) if len(rs) > 1: raise exception.VolumeBackendAPIException( data=_("multiple resources with snapshot ID %s found") % s_uuid) (r_name, snaps) = rs[0] if len(snaps) != 1: raise exception.VolumeBackendAPIException( data=_("not exactly one snapshot with id %s") % s_uuid) (s_name, s_props) = snaps[0] LOG.debug("snapshot %(uuid)s is %(res)s/%(snap)s", {'uuid': s_uuid, 'res': r_name, 'snap': s_name}) return r_name, s_name, s_props def _resource_name_volnr_for_volume(self, volume, empty_ok=False): res, vol, __, __ = self._res_and_vl_data_for_volume(volume, empty_ok) return res, vol def local_path(self, volume): d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) res, data = self.call_or_reconnect(self.odm.text_query, [dm_const.TQ_GET_PATH, d_res_name, str(d_vol_nr)]) self._check_result(res) if len(data) == 1: return data[0] message = _('Got bad path information from DRBDmanage! (%s)') % data raise exception.VolumeBackendAPIException(data=message) def create_volume(self, volume): """Creates a DRBD resource. We address it later on via the ID that gets stored as a private property. """ # TODO(PM): consistency groups d_res_name = self.is_clean_volume_name(volume['id'], DM_VN_PREFIX) res = self.call_or_reconnect(self.odm.create_resource, d_res_name, self.empty_dict) self._check_result(res, ignore=[dm_exc.DM_EEXIST], ret=None) # If we get DM_EEXIST, then the volume already exists, eg. because # deploy gave an error on a previous try (like ENOSPC). # Still, there might or might not be the volume in the resource - # we have to check that explicitly. (__, drbd_vol) = self._resource_name_volnr_for_volume(volume, empty_ok=True) if not drbd_vol: props = self._priv_hash_from_volume(volume) # TODO(PM): properties - redundancy, etc res = self.call_or_reconnect(self.odm.create_volume, d_res_name, self._vol_size_to_dm(volume['size']), props) self._check_result(res) drbd_vol = self._fetch_answer_data(res, dm_const.VOL_ID) # If we crashed between create_volume and the deploy call, # the volume might be defined but not exist on any server. Oh my. res = self.call_or_reconnect(self.odm.auto_deploy, d_res_name, self.drbdmanage_redundancy, 0, True) self._check_result(res) okay = self._call_policy_plugin(self.plugin_resource, self.policy_resource, dict(resource=d_res_name, volnr=str(drbd_vol))) if not okay: message = (_('DRBDmanage timeout waiting for volume creation; ' 'resource "%(res)s", volume "%(vol)s"') % {'res': d_res_name, 'vol': volume['id']}) raise exception.VolumeBackendAPIException(data=message) if self.drbdmanage_devs_on_controller: # TODO(pm): CG res = self.call_or_reconnect(self.odm.assign, socket.gethostname(), d_res_name, [(dm_const.FLAG_DISKLESS, dm_const.BOOL_TRUE)]) self._check_result(res, ignore=[dm_exc.DM_EEXIST]) return {} def delete_volume(self, volume): """Deletes a resource.""" d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( volume, empty_ok=True) if not d_res_name: # OK, already gone. return True # TODO(PM): check if in use? Ask whether Primary, or just check result? res = self.call_or_reconnect(self.odm.remove_volume, d_res_name, d_vol_nr, False) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) # Ask for volumes in that resource that are not scheduled for deletion. res, rl = self.call_or_reconnect(self.odm.list_volumes, [d_res_name], 0, [(dm_const.TSTATE_PREFIX + dm_const.FLAG_REMOVE, dm_const.BOOL_FALSE)], self.empty_list) self._check_result(res) # We expect the _resource_ to be here still (we just got a volnr from # it!), so just query the volumes. # If the resource has no volumes anymore, the current DRBDmanage # version (errorneously, IMO) returns no *resource*, too. if len(rl) > 1: message = _('DRBDmanage expected one resource ("%(res)s"), ' 'got %(n)d') % {'res': d_res_name, 'n': len(rl)} raise exception.VolumeBackendAPIException(data=message) # Delete resource, if empty if (not rl) or (not rl[0]) or (len(rl[0][2]) == 0): res = self.call_or_reconnect(self.odm.remove_resource, d_res_name, False) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug("create vol from snap: from %(snap)s make %(vol)s", {'snap': snapshot['id'], 'vol': volume['id']}) # TODO(PM): Consistency groups. d_res_name, sname, sprop = self._resource_and_snap_data_from_snapshot( snapshot) new_res = self.is_clean_volume_name(volume['id'], DM_VN_PREFIX) r_props = self.empty_dict # TODO(PM): consistency groups => different volume number possible new_vol_nr = 0 v_props = [(new_vol_nr, self._priv_hash_from_volume(volume))] res = self.call_or_reconnect(self.odm.restore_snapshot, new_res, d_res_name, sname, r_props, v_props) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) # TODO(PM): CG okay = self._call_policy_plugin(self.plugin_resource, self.policy_resource, dict(resource=new_res, volnr=str(new_vol_nr))) if not okay: message = (_('DRBDmanage timeout waiting for new volume ' 'after snapshot restore; ' 'resource "%(res)s", volume "%(vol)s"') % {'res': new_res, 'vol': volume['id']}) raise exception.VolumeBackendAPIException(data=message) def create_cloned_volume(self, volume, src_vref): temp_id = self._clean_uuid() snapshot = {'id': temp_id} self.create_snapshot({'id': temp_id, 'volume_id': src_vref['id']}) self.create_volume_from_snapshot(volume, snapshot) self.delete_snapshot(snapshot) if (('size' in volume) and (volume['size'] > src_vref['size'])): LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " "%(dst_size)d", {'dst_vol': volume['id'], 'src_size': src_vref['size'], 'dst_size': volume['size']}) self.extend_volume(volume, volume['size']) def _update_volume_stats(self): data = {} data["vendor_name"] = 'Open Source' data["driver_version"] = self.VERSION # This has to match the name set in the cinder volume driver spec, # so keep it lowercase data["volume_backend_name"] = self.backend_name data["pools"] = [] res, free, total = self.call_or_reconnect(self.odm.cluster_free_query, self.drbdmanage_redundancy) self._check_result(res) location_info = ('DrbdManageBaseDriver:%(cvol)s:%(dbus)s' % {'cvol': self.dm_control_vol, 'dbus': self.drbdmanage_dbus_name}) # add volumes res, rl = self.call_or_reconnect(self.odm.list_volumes, self.empty_list, 0, self.empty_dict, self.empty_list) self._check_result(res) total_volumes = 0 for res in rl: total_volumes += len(res[2]) # TODO(PM): multiple DRBDmanage instances and/or multiple pools single_pool = {} single_pool.update(dict( pool_name=data["volume_backend_name"], free_capacity_gb=self._vol_size_to_cinder(free), total_capacity_gb=self._vol_size_to_cinder(total), reserved_percentage=self.configuration.reserved_percentage, location_info=location_info, total_volumes=total_volumes, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function(), QoS_support=False)) data["pools"].append(single_pool) self._stats = data return self._stats def extend_volume(self, volume, new_size): d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) res = self.call_or_reconnect(self.odm.resize_volume, d_res_name, d_vol_nr, -1, self._vol_size_to_dm(new_size), 0) self._check_result(res) okay = self._call_policy_plugin(self.plugin_resize, self.policy_resize, dict(resource=d_res_name, volnr=str(d_vol_nr), req_size=str(new_size))) if not okay: message = (_('DRBDmanage timeout waiting for volume size; ' 'volume ID "%(id)s" (res "%(res)s", vnr %(vnr)d)') % {'id': volume['id'], 'res': d_res_name, 'vnr': d_vol_nr}) raise exception.VolumeBackendAPIException(data=message) def create_snapshot(self, snapshot): """Creates a snapshot.""" sn_name = self.snapshot_name_from_cinder_snapshot(snapshot) d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( snapshot["volume_id"]) res, data = self.call_or_reconnect(self.odm.list_assignments, self.empty_dict, [d_res_name], 0, {CS_DISKLESS: dm_const.BOOL_FALSE}, self.empty_list) self._check_result(res) nodes = [d[0] for d in data] if len(nodes) < 1: raise exception.VolumeBackendAPIException( _('Snapshot res "%s" that is not deployed anywhere?') % (d_res_name)) props = self._priv_hash_from_volume(snapshot) res = self.call_or_reconnect(self.odm.create_snapshot, d_res_name, sn_name, nodes, props) self._check_result(res) okay = self._call_policy_plugin(self.plugin_snapshot, self.policy_snapshot, dict(resource=d_res_name, snapshot=sn_name)) if not okay: message = (_('DRBDmanage timeout waiting for snapshot creation; ' 'resource "%(res)s", snapshot "%(sn)s"') % {'res': d_res_name, 'sn': sn_name}) raise exception.VolumeBackendAPIException(data=message) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" d_res_name, sname, _ = self._resource_and_snap_data_from_snapshot( snapshot, empty_ok=True) if not d_res_name: # resource already gone? LOG.warning(_LW("snapshot: %s not found, " "skipping delete operation"), snapshot['id']) LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) return True res = self.call_or_reconnect(self.odm.remove_snapshot, d_res_name, sname, True) return self._check_result(res, ignore=[dm_exc.DM_ENOENT]) # Class with iSCSI interface methods class DrbdManageIscsiDriver(DrbdManageBaseDriver): """Cinder driver that uses the iSCSI protocol. """ def __init__(self, *args, **kwargs): super(DrbdManageIscsiDriver, self).__init__(*args, **kwargs) target_driver = self.target_mapping[ self.configuration.safe_get('iscsi_helper')] LOG.debug('Attempting to initialize DRBD driver with the ' 'following target_driver: %s', target_driver) self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, db=self.db, executor=self._execute) def get_volume_stats(self, refresh=False): """Get volume status.""" self._update_volume_stats() self._stats["storage_protocol"] = "iSCSI" return self._stats def ensure_export(self, context, volume): volume_path = self.local_path(volume) return self.target_driver.ensure_export( context, volume, volume_path) def create_export(self, context, volume, connector): volume_path = self.local_path(volume) export_info = self.target_driver.create_export( context, volume, volume_path) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): return self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): return self.target_driver.terminate_connection(volume, connector, **kwargs) return None # for backwards compatibility keep the old class name, too DrbdManageDriver = DrbdManageIscsiDriver # Class with DRBD transport mode class DrbdManageDrbdDriver(DrbdManageBaseDriver): """Cinder driver that uses the DRBD protocol. """ def __init__(self, *args, **kwargs): super(DrbdManageDrbdDriver, self).__init__(*args, **kwargs) def get_volume_stats(self, refresh=False): """Get volume status.""" self._update_volume_stats() self._stats["storage_protocol"] = "DRBD" return self._stats def _return_local_access(self, nodename, volume, d_res_name=None, volume_path=None): if not volume_path: volume_path = self.local_path(volume) return { 'driver_volume_type': 'local', 'data': { "device_path": volume_path } } def _return_drbdadm_config(self, volume, nodename, d_res_name=None, volume_path=None): if not d_res_name: d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) res, data = self.call_or_reconnect( self.odm.text_query, ['export_conf_split_up', nodename, d_res_name]) self._check_result(res) config = six.text_type(data.pop(0)) subst_data = {} while len(data): k = data.pop(0) subst_data[k] = data.pop(0) if not volume_path: volume_path = self.local_path(volume) return { 'driver_volume_type': 'drbd', 'data': { 'provider_location': ' '.join('drbd', nodename), 'device': volume_path, # TODO(pm): consistency groups 'devices': [volume_path], 'provider_auth': subst_data['shared-secret'], 'config': config, 'name': d_res_name, } } def _is_external_node(self, nodename): """Return whether the given node is an "external" node.""" # If the node accessing the data (the "initiator" in iSCSI speak, # "client" or "target" otherwise) is marked as an FLAG_EXTERNAL # node, it does not have DRBDmanage active - and that means # we have to send the necessary DRBD configuration. # # If DRBDmanage is running there, just pushing the (client) # assignment is enough to make the local path available. res, nodes = self.call_or_reconnect(self.odm.list_nodes, [nodename], 0, self.empty_dict, [dm_const.FLAG_EXTERNAL]) self._check_result(res) if len(nodes) != 1: msg = _('Expected exactly one node called "%s"') % nodename LOG.error(msg) raise exception.VolumeDriverException(message=msg) __, nodeattr = nodes[0] return getattr(nodeattr, dm_const.FLAG_EXTERNAL, dm_const.BOOL_FALSE) == dm_const.BOOL_TRUE def _return_connection_data(self, nodename, volume, d_res_name=None): if self._is_external_node(nodename): return self._return_drbdadm_config(nodename, volume, d_res_name=d_res_name) else: return self._return_local_access(nodename, volume) def create_export(self, context, volume, connector): d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) nodename = connector["host"] # Ensure the node is known to DRBDmanage. # Note that this does *not* mean that DRBDmanage has to # be installed on it! # This is just so that DRBD allows the IP to connect. node_prop = { dm_const.NODE_ADDR: connector["ip"], dm_const.FLAG_DRBDCTRL: dm_const.BOOL_FALSE, dm_const.FLAG_STORAGE: dm_const.BOOL_FALSE, dm_const.FLAG_EXTERNAL: dm_const.BOOL_TRUE, } res = self.call_or_reconnect( self.odm.create_node, nodename, node_prop) self._check_result(res, ignore=[dm_exc.DM_EEXIST]) # Ensure the data is accessible, by creating an assignment. assg_prop = { dm_const.FLAG_DISKLESS: dm_const.BOOL_TRUE, } # If we create the assignment here, it's temporary - # and has to be removed later on again. assg_prop.update(dm_utils.aux_props_to_dict({ AUX_PROP_TEMP_CLIENT: dm_const.BOOL_TRUE, })) res = self.call_or_reconnect( self.odm.assign, nodename, d_res_name, assg_prop) self._check_result(res, ignore=[dm_exc.DM_EEXIST]) # Wait for DRBDmanage to have completed that action. # A DRBDmanage controlled node will set the cstate:deploy flag; # an external node will not be available to change it, so we have # to wait for the storage nodes to remove the upd_con flag # (ie. they're now ready to receive the connection). if self._is_external_node(nodename): self._wait_for_node_assignment( d_res_name, d_vol_nr, [], check_vol_deployed=False, filter_props={ # must be deployed CS_DEPLOYED: dm_const.BOOL_TRUE, # must be a storage node (not diskless), CS_DISKLESS: dm_const.BOOL_FALSE, # connection must be available, no need for updating CS_UPD_CON: dm_const.BOOL_FALSE, }) else: self._wait_for_node_assignment( d_res_name, d_vol_nr, [nodename], check_vol_deployed=True, filter_props={ CS_DEPLOYED: dm_const.BOOL_TRUE, }) return self._return_connection_data(nodename, volume) def ensure_export(self, context, volume): fields = context['provider_location'].split(" ") nodename = fields[1] return self._return_connection_data(nodename, volume) def initialize_connection(self, volume, connector): nodename = connector["host"] return self._return_connection_data(nodename, volume) def terminate_connection(self, volume, connector, force=False, **kwargs): d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( volume, empty_ok=True) if not d_res_name: return nodename = connector["host"] # If the DRBD volume is diskless on that node, we remove it; # if it has local storage, we keep it. res, data = self.call_or_reconnect( self.odm.list_assignments, [nodename], [d_res_name], 0, self.empty_list, self.empty_list) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) if len(data) < 1: # already removed?! LOG.info(_LI('DRBD connection for %s already removed'), volume['id']) elif len(data) == 1: __, __, props, __ = data[0] my_props = dm_utils.dict_to_aux_props(props) diskless = getattr(props, dm_const.FLAG_DISKLESS, dm_const.BOOL_FALSE) temp_cli = getattr(my_props, AUX_PROP_TEMP_CLIENT, dm_const.BOOL_FALSE) # If diskless assigned, if ((diskless == dm_const.BOOL_TRUE) and (temp_cli == dm_const.BOOL_TRUE)): # remove the assignment # TODO(pm): does it make sense to relay "force" here? # What are the semantics? # TODO(pm): consistency groups shouldn't really # remove until *all* volumes are detached res = self.call_or_reconnect(self.odm.unassign, nodename, d_res_name, force) self._check_result(res, ignore=[dm_exc.DM_ENOENT]) else: # more than one assignment? LOG.error(_LE("DRBDmanage: too many assignments returned.")) return def remove_export(self, context, volume): pass cinder-8.0.0/cinder/volume/drivers/blockbridge.py0000664000567000056710000005240212701406250023176 0ustar jenkinsjenkins00000000000000# Copyright 2013-2015 Blockbridge Networks, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Blockbridge EPS iSCSI Volume Driver """ import base64 import socket from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import units import six from six.moves import http_client from six.moves import urllib from cinder import context from cinder import exception from cinder.i18n import _ from cinder.volume import driver from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) blockbridge_opts = [ cfg.StrOpt("blockbridge_api_host", help=_("IP address/hostname of Blockbridge API.")), cfg.IntOpt("blockbridge_api_port", help=_("Override HTTPS port to connect to Blockbridge " "API server.")), cfg.StrOpt("blockbridge_auth_scheme", default='token', choices=['token', 'password'], help=_("Blockbridge API authentication scheme (token " "or password)")), cfg.StrOpt("blockbridge_auth_token", help=_("Blockbridge API token (for auth scheme 'token')"), secret=True), cfg.StrOpt("blockbridge_auth_user", help=_("Blockbridge API user (for auth scheme 'password')")), cfg.StrOpt("blockbridge_auth_password", help=_("Blockbridge API password (for auth scheme 'password')"), secret=True), cfg.DictOpt("blockbridge_pools", default={'OpenStack': '+openstack'}, help=_("Defines the set of exposed pools and their associated " "backend query strings")), cfg.StrOpt("blockbridge_default_pool", help=_("Default pool name if unspecified.")), ] CONF = cfg.CONF CONF.register_opts(blockbridge_opts) class BlockbridgeAPIClient(object): _api_cfg = None def __init__(self, configuration=None): self.configuration = configuration def _get_api_cfg(self): if self._api_cfg: # return cached configuration return self._api_cfg if self.configuration.blockbridge_auth_scheme == 'password': user = self.configuration.safe_get('blockbridge_auth_user') pw = self.configuration.safe_get('blockbridge_auth_password') creds = "%s:%s" % (user, pw) if six.PY3: creds = creds.encode('utf-8') b64_creds = base64.encodestring(creds).decode('ascii') else: b64_creds = base64.encodestring(creds) authz = "Basic %s" % b64_creds.replace("\n", "") elif self.configuration.blockbridge_auth_scheme == 'token': token = self.configuration.blockbridge_auth_token or '' authz = "Bearer %s" % token # set and return cached api cfg self._api_cfg = { 'host': self.configuration.blockbridge_api_host, 'port': self.configuration.blockbridge_api_port, 'base_url': '/api/cinder', 'default_headers': { 'User-Agent': ("cinder-volume/%s" % BlockbridgeISCSIDriver.VERSION), 'Accept': 'application/vnd.blockbridge-3+json', 'Authorization': authz, }, } return self._api_cfg def submit(self, rel_url, method='GET', params=None, user_id=None, project_id=None, req_id=None, action=None, **kwargs): """Submit a request to the configured API endpoint.""" cfg = self._get_api_cfg() if cfg is None: msg = _("Failed to determine blockbridge API configuration") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # alter the url appropriately if an action is requested if action: rel_url += "/actions/%s" % action headers = cfg['default_headers'].copy() url = cfg['base_url'] + rel_url body = None # include user, project and req-id, if supplied tsk_ctx = [] if user_id and project_id: tsk_ctx.append("ext_auth=keystone/%s/%s" % (project_id, user_id)) if req_id: tsk_ctx.append("id=%s", req_id) if tsk_ctx: headers['X-Blockbridge-Task'] = ','.join(tsk_ctx) # encode params based on request method if method in ['GET', 'DELETE']: # For GET method add parameters to the URL if params: url += '?' + urllib.parse.urlencode(params) elif method in ['POST', 'PUT', 'PATCH']: body = jsonutils.dumps(params) headers['Content-Type'] = 'application/json' else: raise exception.UnknownCmd(cmd=method) # connect and execute the request connection = http_client.HTTPSConnection(cfg['host'], cfg['port']) connection.request(method, url, body, headers) response = connection.getresponse() # read response data rsp_body = response.read() rsp_data = jsonutils.loads(rsp_body) connection.close() code = response.status if code in [200, 201, 202, 204]: pass elif code == 401: raise exception.NotAuthorized(_("Invalid credentials")) elif code == 403: raise exception.NotAuthorized(_("Insufficient privileges")) else: raise exception.VolumeBackendAPIException(data=rsp_data['message']) return rsp_data class BlockbridgeISCSIDriver(driver.ISCSIDriver): """Manages volumes hosted on Blockbridge EPS.""" VERSION = '1.3.0' def __init__(self, *args, **kwargs): super(BlockbridgeISCSIDriver, self).__init__(*args, **kwargs) self.client = kwargs.get('client', None) or ( BlockbridgeAPIClient(configuration=self.configuration)) self.configuration.append_config_values(blockbridge_opts) self.hostname = socket.gethostname() def do_setup(self, context): """Set up the Blockbridge volume driver.""" pass def check_for_setup_error(self): """Verify configuration is valid.""" # ensure the host is configured if self.configuration.safe_get('blockbridge_api_host') is None: raise exception.InvalidInput( reason=_("Blockbridge api host not configured")) # ensure the auth scheme is valid and has the necessary configuration. auth_scheme = self.configuration.safe_get("blockbridge_auth_scheme") if auth_scheme == 'password': auth_user = self.configuration.safe_get('blockbridge_auth_user') auth_pw = self.configuration.safe_get('blockbridge_auth_password') if auth_user is None: raise exception.InvalidInput( reason=_("Blockbridge user not configured (required for " "auth scheme 'password')")) if auth_pw is None: raise exception.InvalidInput( reason=_("Blockbridge password not configured (required " "for auth scheme 'password')")) elif auth_scheme == 'token': token = self.configuration.safe_get('blockbridge_auth_token') if token is None: raise exception.InvalidInput( reason=_("Blockbridge token not configured (required " "for auth scheme 'token')")) else: raise exception.InvalidInput( reason=(_("Blockbridge configured with invalid auth scheme " "'%(auth_scheme)s'") % {'auth_scheme': auth_scheme})) # ensure at least one pool is defined pools = self.configuration.safe_get('blockbridge_pools') if pools is None: raise exception.InvalidInput( reason=_("Blockbridge pools not configured")) default_pool = self.configuration.safe_get('blockbridge_default_pool') if default_pool and default_pool not in pools: raise exception.InvalidInput( reason=_("Blockbridge default pool does not exist")) def _vol_api_submit(self, vol_id, **kwargs): vol_id = urllib.parse.quote(vol_id, '') rel_url = "/volumes/%s" % vol_id return self.client.submit(rel_url, **kwargs) def _create_volume(self, vol_id, params, **kwargs): """Execute a backend volume create operation.""" self._vol_api_submit(vol_id, method='PUT', params=params, **kwargs) def _delete_volume(self, vol_id, **kwargs): """Execute a backend volume delete operation.""" self._vol_api_submit(vol_id, method='DELETE', **kwargs) def _extend_volume(self, vol_id, capacity, **kwargs): """Execute a backend volume grow operation.""" params = kwargs.get('params', {}) params['capacity'] = capacity self._vol_api_submit(vol_id, method='POST', action='grow', params=params, **kwargs) def _snap_api_submit(self, vol_id, snap_id, **kwargs): vol_id = urllib.parse.quote(vol_id, '') snap_id = urllib.parse.quote(snap_id, '') rel_url = "/volumes/%s/snapshots/%s" % (vol_id, snap_id) return self.client.submit(rel_url, **kwargs) def _create_snapshot(self, vol_id, snap_id, params, **kwargs): """Execute a backend snapshot create operation.""" self._snap_api_submit(vol_id, snap_id, method='PUT', params=params, **kwargs) def _delete_snapshot(self, vol_id, snap_id, **kwargs): """Execute a backend snapshot delete operation.""" return self._snap_api_submit(vol_id, snap_id, method='DELETE', **kwargs) def _export_api_submit(self, vol_id, ini_name, **kwargs): vol_id = urllib.parse.quote(vol_id, '') ini_name = urllib.parse.quote(ini_name, '') rel_url = "/volumes/%s/exports/%s" % (vol_id, ini_name) return self.client.submit(rel_url, **kwargs) def _create_export(self, vol_id, ini_name, params, **kwargs): """Execute a backend volume export operation.""" return self._export_api_submit(vol_id, ini_name, method='PUT', params=params, **kwargs) def _delete_export(self, vol_id, ini_name, **kwargs): """Remove a previously created volume export.""" self._export_api_submit(vol_id, ini_name, method='DELETE', **kwargs) def _get_pool_stats(self, pool, query, **kwargs): """Retrieve pool statistics and capabilities.""" pq = { 'pool': pool, 'query': query, } pq.update(kwargs) return self.client.submit('/status', params=pq) def _get_dbref_name(self, ref): display_name = ref.get('display_name') if not display_name: return ref.get('name') return display_name def _get_query_string(self, ctxt, volume): pools = self.configuration.blockbridge_pools default_pool = self.configuration.blockbridge_default_pool explicit_pool = volume_utils.extract_host(volume['host'], 'pool') pool_name = explicit_pool or default_pool if pool_name: return pools[pool_name] else: # no pool specified or defaulted -- just pick whatever comes out of # the dictionary first. return list(pools.values())[0] def create_volume(self, volume): """Create a volume on a Blockbridge EPS backend. :param volume: volume reference """ ctxt = context.get_admin_context() create_params = { 'name': self._get_dbref_name(volume), 'query': self._get_query_string(ctxt, volume), 'capacity': int(volume['size'] * units.Gi), } LOG.debug("Provisioning %(capacity)s byte volume " "with query '%(query)s'", create_params, resource=volume) return self._create_volume(volume['id'], create_params, user_id=volume['user_id'], project_id=volume['project_id']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" create_params = { 'name': self._get_dbref_name(volume), 'capacity': int(volume['size'] * units.Gi), 'src': { 'volume_id': src_vref['id'], }, } LOG.debug("Cloning source volume %(id)s", src_vref, resource=volume) return self._create_volume(volume['id'], create_params, user_id=volume['user_id'], project_id=volume['project_id']) def delete_volume(self, volume): """Remove an existing volume. :param volume: volume reference """ LOG.debug("Removing volume %(id)s", volume, resource=volume) return self._delete_volume(volume['id'], user_id=volume['user_id'], project_id=volume['project_id']) def create_snapshot(self, snapshot): """Create snapshot of existing volume. :param snapshot: shapshot reference """ create_params = { 'name': self._get_dbref_name(snapshot), } LOG.debug("Creating snapshot of volume %(volume_id)s", snapshot, resource=snapshot) return self._create_snapshot(snapshot['volume_id'], snapshot['id'], create_params, user_id=snapshot['user_id'], project_id=snapshot['project_id']) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from existing snapshot. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ create_params = { 'name': self._get_dbref_name(volume), 'capacity': int(volume['size'] * units.Gi), 'src': { 'volume_id': snapshot['volume_id'], 'snapshot_id': snapshot['id'], }, } LOG.debug("Creating volume from snapshot %(id)s", snapshot, resource=volume) return self._create_volume(volume['id'], create_params, user_id=volume['user_id'], project_id=volume['project_id']) def delete_snapshot(self, snapshot): """Delete volume's snapshot. :param snapshot: shapshot reference """ LOG.debug("Deleting snapshot of volume %(volume_id)s", snapshot, resource=snapshot) self._delete_snapshot(snapshot['volume_id'], snapshot['id'], user_id=snapshot['user_id'], project_id=snapshot['project_id']) def create_export(self, _ctx, volume, connector): """Do nothing: target created during instance attachment.""" pass def ensure_export(self, _ctx, volume): """Do nothing: target created during instance attachment.""" pass def remove_export(self, _ctx, volume): """Do nothing: target created during instance attachment.""" pass def initialize_connection(self, volume, connector, **kwargs): """Attach volume to initiator/host. Creates a profile for the initiator, and adds the new profile to the target ACL. """ # generate a CHAP secret here -- there is no way to retrieve an # existing CHAP secret over the Blockbridge API, so it must be # supplied by the volume driver. export_params = { 'chap_user': ( kwargs.get('user', volume_utils.generate_username(16))), 'chap_secret': ( kwargs.get('password', volume_utils.generate_password(32))), } LOG.debug("Configuring export for %(initiator)s", connector, resource=volume) rsp = self._create_export(volume['id'], connector['initiator'], export_params, user_id=volume['user_id'], project_id=volume['project_id']) # combine locally generated chap credentials with target iqn/lun to # present the attach properties. target_portal = "%s:%s" % (rsp['target_ip'], rsp['target_port']) properties = { 'target_discovered': False, 'target_portal': target_portal, 'target_iqn': rsp['target_iqn'], 'target_lun': rsp['target_lun'], 'volume_id': volume['id'], 'auth_method': 'CHAP', 'auth_username': rsp['initiator_login'], 'auth_password': export_params['chap_secret'], } LOG.debug("Attach properties: %(properties)s", {'properties': properties}) return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector, **kwargs): """Detach volume from the initiator. Removes initiator profile entry from target ACL. """ LOG.debug("Unconfiguring export for %(initiator)s", connector, resource=volume) self._delete_export(volume['id'], connector['initiator'], user_id=volume['user_id'], project_id=volume['project_id']) def extend_volume(self, volume, new_size): """Extend an existing volume.""" capacity = new_size * units.Gi LOG.debug("Extending volume to %(capacity)s bytes", {'capacity': capacity}, resource=volume) self._extend_volume(volume['id'], int(new_size * units.Gi), user_id=volume['user_id'], project_id=volume['project_id']) def get_volume_stats(self, refresh=False): if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): if self.configuration: cfg_name = self.configuration.safe_get('volume_backend_name') backend_name = cfg_name or self.__class__.__name__ driver_cfg = { 'hostname': self.hostname, 'version': self.VERSION, 'backend_name': backend_name, } filter_function = self.get_filter_function() goodness_function = self.get_goodness_function() pools = [] LOG.debug("Updating volume driver statistics", resource={'type': 'driver', 'id': backend_name}) for pool_name, query in self.configuration.blockbridge_pools.items(): stats = self._get_pool_stats(pool_name, query, **driver_cfg) system_serial = stats.get('system_serial', 'unknown') free_capacity = stats.get('free_capacity', None) total_capacity = stats.get('total_capacity', None) provisioned_capacity = stats.get('provisioned_capacity', None) if free_capacity is None: free_capacity = 'unknown' else: free_capacity = int(free_capacity / units.Gi) if total_capacity is None: total_capacity = 'unknown' else: total_capacity = int(total_capacity / units.Gi) pool = { 'pool_name': pool_name, 'location_info': ('BlockbridgeDriver:%(sys_id)s:%(pool)s' % {'sys_id': system_serial, 'pool': pool_name}), 'max_over_subscription_ratio': ( self.configuration.safe_get('max_over_subscription_ratio') ), 'free_capacity_gb': free_capacity, 'total_capacity_gb': total_capacity, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'filter_function': filter_function, 'goodness_function': goodness_function, } if provisioned_capacity is not None: pool['provisioned_capacity_gb'] = int( provisioned_capacity / units.Gi ) pools.append(pool) self._stats = { 'volume_backend_name': backend_name, 'vendor_name': 'Blockbridge', 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'pools': pools, } cinder-8.0.0/cinder/volume/drivers/huawei/0000775000567000056710000000000012701406543021641 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/huawei/rest_client.py0000664000567000056710000023416412701406250024533 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import json import re import six import socket import time from oslo_log import log as logging from oslo_utils import excutils from six.moves import http_cookiejar from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class RestClient(object): """Common class for Huawei OceanStor storage system.""" def __init__(self, configuration, san_address, san_user, san_password, **kwargs): self.configuration = configuration self.san_address = san_address self.san_user = san_user self.san_password = san_password self.init_http_head() self.storage_pools = kwargs.get('storage_pools', self.configuration.storage_pools) self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.iscsi_default_target_ip = kwargs.get( 'iscsi_default_target_ip', self.configuration.iscsi_default_target_ip) def init_http_head(self): self.cookie = http_cookiejar.CookieJar() self.url = None self.device_id = None self.headers = { "Connection": "keep-alive", "Content-Type": "application/json", } def do_call(self, url=None, data=None, method=None, calltimeout=constants.SOCKET_TIMEOUT): """Send requests to Huawei storage server. Send HTTPS call, get response in JSON. Convert response into Python Object and return it. """ if self.url: url = self.url + url handler = urllib.request.HTTPCookieProcessor(self.cookie) opener = urllib.request.build_opener(handler) urllib.request.install_opener(opener) res_json = None try: socket.setdefaulttimeout(calltimeout) if data: data = json.dumps(data) req = urllib.request.Request(url, data, self.headers) if method: req.get_method = lambda: method res = urllib.request.urlopen(req).read().decode("utf-8") if "xx/sessions" not in url: LOG.info(_LI('\n\n\n\nRequest URL: %(url)s\n\n' 'Call Method: %(method)s\n\n' 'Request Data: %(data)s\n\n' 'Response Data:%(res)s\n\n'), {'url': url, 'method': method, 'data': data, 'res': res}) except Exception as err: LOG.error(_LE('Bad response from server: %(url)s.' ' Error: %(err)s'), {'url': url, 'err': err}) json_msg = ('{"error":{"code": %s,"description": "Connect to ' 'server error."}}') % constants.ERROR_CONNECT_TO_SERVER res_json = json.loads(json_msg) return res_json try: res_json = json.loads(res) except Exception as err: LOG.error(_LE('JSON transfer error: %s.'), err) raise return res_json def login(self): """Login Huawei storage array.""" device_id = None for item_url in self.san_address: url = item_url + "xx/sessions" data = {"username": self.san_user, "password": self.san_password, "scope": "0"} self.init_http_head() result = self.do_call(url, data, calltimeout=constants.LOGIN_SOCKET_TIMEOUT) if (result['error']['code'] != 0) or ("data" not in result): LOG.error(_LE("Login error. URL: %(url)s\n" "Reason: %(reason)s."), {"url": item_url, "reason": result}) continue LOG.debug('Login success: %(url)s', {'url': item_url}) device_id = result['data']['deviceid'] self.device_id = device_id self.url = item_url + device_id self.headers['iBaseToken'] = result['data']['iBaseToken'] break if device_id is None: msg = _("Failed to login with all rest URLs.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return device_id def try_login(self): try: self.login() except Exception as err: LOG.warning(_LW('Login failed. Error: %s.'), err) @utils.synchronized('huawei_cinder_call') def call(self, url, data=None, method=None): """Send requests to server. If fail, try another RestURL. """ device_id = None old_url = self.url result = self.do_call(url, data, method) error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error(_LE("Can't open the recent url, relogin.")) device_id = self.login() if device_id is not None: LOG.debug('Replace URL: \n' 'Old URL: %(old_url)s\n,' 'New URL: %(new_url)s\n.', {'old_url': old_url, 'new_url': self.url}) result = self.do_call(url, data, method) if result['error']['code'] in constants.RELOGIN_ERROR_PASS: result['error']['code'] = 0 return result def logout(self): """Logout the session.""" url = "/sessions" if self.url: result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Logout session error.')) def _assert_rest_result(self, result, err_str): if result['error']['code'] != 0: msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _assert_data_in_result(self, result, msg): if 'data' not in result: err_msg = _('%s "data" is not in result.') % msg LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) def create_lun(self, lun_params): url = "/lun" result = self.call(url, lun_params) if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: lun_id = self.get_lun_id_by_name(lun_params['NAME']) if lun_id: return self.get_lun_info(lun_id) msg = _('Create lun error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def check_lun_exist(self, lun_id, lun_wwn=None): url = "/lun/" + lun_id result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False if lun_wwn and result['data']['WWN'] != lun_wwn: LOG.debug("LUN ID %(id)s with WWN %(wwn)s does not exist on " "the array.", {"id": lun_id, "wwn": lun_wwn}) return False return True def delete_lun(self, lun_id): url = "/lun/" + lun_id data = {"TYPE": "11", "ID": lun_id} result = self.call(url, data, "DELETE") self._assert_rest_result(result, _('Delete lun error.')) def get_all_pools(self): url = "/storagepool" result = self.call(url, None) msg = _('Query resource pool error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_pool_info(self, pool_name=None, pools=None): info = {} if not pool_name: return info for pool in pools: if pool_name.strip() != pool['NAME']: continue if pool.get('USAGETYPE') == constants.FILE_SYSTEM_POOL_TYPE: break info['ID'] = pool['ID'] info['CAPACITY'] = pool.get('DATASPACE', pool['USERFREECAPACITY']) info['TOTALCAPACITY'] = pool['USERTOTALCAPACITY'] return info def get_pool_id(self, pool_name): pools = self.get_all_pools() pool_info = self.get_pool_info(pool_name, pools) if not pool_info: # The following code is to keep compatibility with old version of # Huawei driver. for pool_name in self.storage_pools: pool_info = self.get_pool_info(pool_name, pools) if pool_info: break if not pool_info: msg = _('Can not get pool info. pool: %s') % pool_name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pool_info['ID'] def _get_id_from_result(self, result, name, key): if 'data' in result: for item in result['data']: if name == item.get(key): return item['ID'] def get_lun_id_by_name(self, name): url = "/lun?range=[0-65535]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lun id by name error.')) return self._get_id_from_result(result, name, 'NAME') def activate_snapshot(self, snapshot_id): url = "/snapshot/activate" data = {"SNAPSHOTLIST": [snapshot_id]} result = self.call(url, data) self._assert_rest_result(result, _('Activate snapshot error.')) def create_snapshot(self, lun_id, snapshot_name, snapshot_description): url = "/snapshot" data = {"TYPE": "27", "NAME": snapshot_name, "PARENTTYPE": "11", "DESCRIPTION": snapshot_description, "PARENTID": lun_id} result = self.call(url, data) msg = _('Create snapshot error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_lun_id(self, volume, volume_name): lun_id = (volume.get('provider_location') or self.get_lun_id_by_name(volume_name)) if not lun_id: msg = (_("Can't find lun info on the array. " "volume: %(id)s, lun name: %(name)s.") % {'id': volume['id'], 'name': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return lun_id def check_snapshot_exist(self, snapshot_id): url = "/snapshot/%s" % snapshot_id result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False return True def stop_snapshot(self, snapshot_id): url = "/snapshot/stop" stopdata = {"ID": snapshot_id} result = self.call(url, stopdata, "PUT") self._assert_rest_result(result, _('Stop snapshot error.')) def delete_snapshot(self, snapshotid): url = "/snapshot/%s" % snapshotid data = {"TYPE": "27", "ID": snapshotid} result = self.call(url, data, "DELETE") self._assert_rest_result(result, _('Delete snapshot error.')) def get_snapshot_id_by_name(self, name): url = "/snapshot?range=[0-32767]" description = 'The snapshot license file is unavailable.' result = self.call(url, None, "GET") if 'error' in result: if description == result['error']['description']: return self._assert_rest_result(result, _('Get snapshot id error.')) return self._get_id_from_result(result, name, 'NAME') def create_luncopy(self, luncopyname, srclunid, tgtlunid): """Create a luncopy.""" url = "/luncopy" data = {"TYPE": 219, "NAME": luncopyname, "DESCRIPTION": luncopyname, "COPYSPEED": 2, "LUNCOPYTYPE": "1", "SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID" % srclunid), "TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID" % tgtlunid)} result = self.call(url, data) msg = _('Create luncopy error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def add_host_to_hostgroup(self, host_id): """Associate host to hostgroup. If hostgroup doesn't exist, create one. """ hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.create_hostgroup_with_check(hostgroup_name) is_associated = self._is_host_associate_to_hostgroup(hostgroup_id, host_id) if not is_associated: self._associate_host_to_hostgroup(hostgroup_id, host_id) return hostgroup_id def get_tgt_port_group(self, tgt_port_group): """Find target portgroup id by target port group name.""" url = "/portgroup?range=[0-8191]&TYPE=257" result = self.call(url, None, "GET") msg = _('Find portgroup error.') self._assert_rest_result(result, msg) return self._get_id_from_result(result, tgt_port_group, 'NAME') def _associate_portgroup_to_view(self, view_id, portgroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "257", "ASSOCIATEOBJID": portgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate portgroup to mapping ' 'view error.')) def _portgroup_associated(self, view_id, portgroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=%s" % portgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check portgroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None): """Add hostgroup and lungroup to mapping view.""" lungroup_name = constants.LUNGROUP_PREFIX + host_id mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id lungroup_id = self._find_lungroup(lungroup_name) view_id = self.find_mapping_view(mapping_view_name) map_info = {} LOG.info(_LI( 'do_mapping, lun_group: %(lun_group)s, ' 'view_id: %(view_id)s, lun_id: %(lun_id)s.'), {'lun_group': lungroup_id, 'view_id': view_id, 'lun_id': lun_id}) try: # Create lungroup and add LUN into to lungroup. if lungroup_id is None: lungroup_id = self._create_lungroup(lungroup_name) is_associated = self._is_lun_associated_to_lungroup(lungroup_id, lun_id) if not is_associated: self.associate_lun_to_lungroup(lungroup_id, lun_id) if view_id is None: view_id = self._add_mapping_view(mapping_view_name) self._associate_hostgroup_to_view(view_id, hostgroup_id) self._associate_lungroup_to_view(view_id, lungroup_id) if portgroup_id: self._associate_portgroup_to_view(view_id, portgroup_id) else: if not self.hostgroup_associated(view_id, hostgroup_id): self._associate_hostgroup_to_view(view_id, hostgroup_id) if not self.lungroup_associated(view_id, lungroup_id): self._associate_lungroup_to_view(view_id, lungroup_id) if portgroup_id: if not self._portgroup_associated(view_id, portgroup_id): self._associate_portgroup_to_view(view_id, portgroup_id) version = self.find_array_version() if version >= constants.ARRAY_VERSION: aval_luns = self.find_view_by_id(view_id) map_info["lun_id"] = lun_id map_info["view_id"] = view_id map_info["aval_luns"] = aval_luns except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE( 'Error occurred when adding hostgroup and lungroup to ' 'view. Remove lun from lungroup now.')) self.remove_lun_from_lungroup(lungroup_id, lun_id) return map_info def check_iscsi_initiators_exist_in_host(self, host_id): url = "/iscsi_initiator?range=[0-256]&PARENTID=%s" % host_id result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get host initiators info failed.') if "data" in result: return True return False def ensure_initiator_added(self, initiator_name, host_id): added = self._initiator_is_added_to_array(initiator_name) if not added: self._add_initiator_to_array(initiator_name) if not self.is_initiator_associated_to_host(initiator_name): self._associate_initiator_to_host(initiator_name, host_id) def _get_iscsi_tgt_port(self): url = "/iscsidevicename" result = self.call(url, None) msg = _('Get iSCSI target port error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'][0]['CMO_ISCSI_DEVICE_NAME'] def find_hostgroup(self, groupname): """Get the given hostgroup id.""" url = "/hostgroup?range=[0-8191]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get hostgroup information error.')) return self._get_id_from_result(result, groupname, 'NAME') def _find_lungroup(self, lungroup_name): """Get the given hostgroup id.""" url = "/lungroup?range=[0-8191]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lungroup information error.')) return self._get_id_from_result(result, lungroup_name, 'NAME') def create_hostgroup_with_check(self, hostgroup_name): """Check if host exists on the array, or create it.""" hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id: LOG.info(_LI( 'create_hostgroup_with_check. ' 'hostgroup name: %(name)s, ' 'hostgroup id: %(id)s'), {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id try: hostgroup_id = self._create_hostgroup(hostgroup_name) except Exception: LOG.info(_LI( 'Failed to create hostgroup: %(name)s. ' 'Please check if it exists on the array.'), {'name': hostgroup_name}) hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id is None: err_msg = (_( 'Failed to create hostgroup: %(name)s. ' 'Check if it exists on the array.') % {'name': hostgroup_name}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info(_LI( 'create_hostgroup_with_check. ' 'Create hostgroup success. ' 'hostgroup name: %(name)s, ' 'hostgroup id: %(id)s'), {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id def _create_hostgroup(self, hostgroup_name): url = "/hostgroup" data = {"TYPE": "14", "NAME": hostgroup_name} result = self.call(url, data) msg = _('Create hostgroup error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _create_lungroup(self, lungroup_name): url = "/lungroup" data = {"DESCRIPTION": lungroup_name, "APPTYPE": '0', "GROUPTYPE": '0', "NAME": lungroup_name} result = self.call(url, data) msg = _('Create lungroup error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def delete_lungroup(self, lungroup_id): url = "/LUNGroup/" + lungroup_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete lungroup error.')) def lungroup_associated(self, view_id, lungroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check lungroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def hostgroup_associated(self, view_id, hostgroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check hostgroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def get_host_lun_id(self, host_id, lun_id): url = ("/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21" "&ASSOCIATEOBJID=%s" % (host_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find host lun id error.')) host_lun_id = 1 if 'data' in result: for item in result['data']: if lun_id == item['ID']: associate_data = item['ASSOCIATEMETADATA'] try: hostassoinfo = json.loads(associate_data) host_lun_id = hostassoinfo['HostLUNID'] break except Exception as err: LOG.error(_LE("JSON transfer data error. %s."), err) raise return host_lun_id def get_host_id_by_name(self, host_name): """Get the given host ID.""" url = "/host?range=[0-65535]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find host in hostgroup error.')) return self._get_id_from_result(result, host_name, 'NAME') def add_host_with_check(self, host_name, host_name_before_hash): host_id = self.get_host_id_by_name(host_name) if host_id: LOG.info(_LI( 'add_host_with_check. ' 'host name: %(name)s, ' 'host id: %(id)s'), {'name': host_name, 'id': host_id}) return host_id try: host_id = self._add_host(host_name, host_name_before_hash) except Exception: LOG.info(_LI( 'Failed to create host: %(name)s. ' 'Check if it exists on the array.'), {'name': host_name}) host_id = self.get_host_id_by_name(host_name) if not host_id: err_msg = (_( 'Failed to create host: %(name)s. ' 'Please check if it exists on the array.'), {'name': host_name}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info(_LI( 'add_host_with_check. ' 'create host success. ' 'host name: %(name)s, ' 'host id: %(id)s'), {'name': host_name, 'id': host_id}) return host_id def _add_host(self, hostname, host_name_before_hash): """Add a new host.""" url = "/host" data = {"TYPE": "21", "NAME": hostname, "OPERATIONSYSTEM": "0", "DESCRIPTION": host_name_before_hash} result = self.call(url, data) self._assert_rest_result(result, _('Add new host error.')) if 'data' in result: return result['data']['ID'] def _is_host_associate_to_hostgroup(self, hostgroup_id, host_id): """Check whether the host is associated to the hostgroup.""" url = ("/host/associate?TYPE=21&" "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check hostgroup associate error.')) if self._get_id_from_result(result, host_id, 'ID'): return True return False def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id): """Check whether the lun is associated to the lungroup.""" url = ("/lun/associate?TYPE=11&" "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check lungroup associate error.')) if self._get_id_from_result(result, lun_id, 'ID'): return True return False def _associate_host_to_hostgroup(self, hostgroup_id, host_id): url = "/hostgroup/associate" data = {"TYPE": "14", "ID": hostgroup_id, "ASSOCIATEOBJTYPE": "21", "ASSOCIATEOBJID": host_id} result = self.call(url, data) self._assert_rest_result(result, _('Associate host to hostgroup ' 'error.')) def associate_lun_to_lungroup(self, lungroup_id, lun_id): """Associate lun to lungroup.""" url = "/lungroup/associate" data = {"ID": lungroup_id, "ASSOCIATEOBJTYPE": "11", "ASSOCIATEOBJID": lun_id} result = self.call(url, data) self._assert_rest_result(result, _('Associate lun to lungroup error.')) def remove_lun_from_lungroup(self, lungroup_id, lun_id): """Remove lun from lungroup.""" url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=11" "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_id)) result = self.call(url, None, 'DELETE') self._assert_rest_result( result, _('Delete associated lun from lungroup error.')) def _initiator_is_added_to_array(self, ininame): """Check whether the initiator is already added on the array.""" url = "/iscsi_initiator?range=[0-256]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check initiator added to array error.')) if self._get_id_from_result(result, ininame, 'ID'): return True return False def is_initiator_associated_to_host(self, ininame): """Check whether the initiator is associated to the host.""" url = "/iscsi_initiator?range=[0-256]" result = self.call(url, None, "GET") self._assert_rest_result( result, _('Check initiator associated to host error.')) if 'data' in result: for item in result['data']: if item['ID'] == ininame and item['ISFREE'] == "true": return False return True def _add_initiator_to_array(self, initiator_name): """Add a new initiator to storage device.""" url = "/iscsi_initiator" data = {"TYPE": "222", "ID": initiator_name, "USECHAP": "false"} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add initiator to array error.')) def _add_initiator_to_host(self, initiator_name, host_id): url = "/iscsi_initiator/" + initiator_name data = {"TYPE": "222", "ID": initiator_name, "USECHAP": "false", "PARENTTYPE": "21", "PARENTID": host_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate initiator to host error.')) def _associate_initiator_to_host(self, initiator_name, host_id): """Associate initiator with the host.""" chapinfo = self.find_chap_info(self.iscsi_info, initiator_name) multipath_type = self._find_alua_info(self.iscsi_info, initiator_name) if chapinfo: LOG.info(_LI('Use CHAP when adding initiator to host.')) self._use_chap(chapinfo, initiator_name, host_id) else: self._add_initiator_to_host(initiator_name, host_id) if multipath_type: LOG.info(_LI('Use ALUA when adding initiator to host.')) self._use_alua(initiator_name, multipath_type) def find_chap_info(self, iscsi_info, initiator_name): """Find CHAP info from xml.""" chapinfo = None for ini in iscsi_info: if ini['Name'] == initiator_name: if 'CHAPinfo' in ini: chapinfo = ini['CHAPinfo'] break return chapinfo def _find_alua_info(self, iscsi_info, initiator_name): """Find ALUA info from xml.""" multipath_type = 0 for ini in iscsi_info: if ini['Name'] == initiator_name: if 'ALUA' in ini: if ini['ALUA'] != '1' and ini['ALUA'] != '0': msg = (_( 'Invalid ALUA value. ' 'ALUA value must be 1 or 0.')) LOG.error(msg) raise exception.InvalidInput(msg) else: multipath_type = ini['ALUA'] break return multipath_type def _use_chap(self, chapinfo, initiator_name, host_id): """Use CHAP when adding initiator to host.""" (chap_username, chap_password) = chapinfo.split(";") url = "/iscsi_initiator/" + initiator_name data = {"TYPE": "222", "USECHAP": "true", "CHAPNAME": chap_username, "CHAPPASSWORD": chap_password, "ID": initiator_name, "PARENTTYPE": "21", "PARENTID": host_id} result = self.call(url, data, "PUT") msg = _('Use CHAP to associate initiator to host error. ' 'Please check the CHAP username and password.') self._assert_rest_result(result, msg) def _use_alua(self, initiator_name, multipath_type): """Use ALUA when adding initiator to host.""" url = "/iscsi_initiator" data = {"ID": initiator_name, "MULTIPATHTYPE": multipath_type} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Use ALUA to associate initiator to host error.')) def remove_chap(self, initiator_name): """Remove CHAP when terminate connection.""" url = "/iscsi_initiator" data = {"USECHAP": "false", "MULTIPATHTYPE": "0", "ID": initiator_name} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove CHAP error.')) def find_mapping_view(self, name): """Find mapping view.""" url = "/mappingview?range=[0-8191]" result = self.call(url, None, "GET") msg = _('Find mapping view error.') self._assert_rest_result(result, msg) return self._get_id_from_result(result, name, 'NAME') def _add_mapping_view(self, name): url = "/mappingview" data = {"NAME": name, "TYPE": "245"} result = self.call(url, data) self._assert_rest_result(result, _('Add mapping view error.')) return result['data']['ID'] def _associate_hostgroup_to_view(self, view_id, hostgroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "14", "ASSOCIATEOBJID": hostgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate host to mapping view ' 'error.')) def _associate_lungroup_to_view(self, view_id, lungroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "256", "ASSOCIATEOBJID": lungroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Associate lungroup to mapping view error.')) def delete_lungroup_mapping_view(self, view_id, lungroup_id): """Remove lungroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "256", "ASSOCIATEOBJID": lungroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Delete lungroup from mapping view ' 'error.')) def delete_hostgoup_mapping_view(self, view_id, hostgroup_id): """Remove hostgroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "14", "ASSOCIATEOBJID": hostgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Delete hostgroup from mapping view error.')) def delete_portgroup_mapping_view(self, view_id, portgroup_id): """Remove portgroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "257", "ASSOCIATEOBJID": portgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Delete portgroup from mapping view error.')) def delete_mapping_view(self, view_id): """Remove mapping view from the storage.""" url = "/mappingview/" + view_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete mapping view error.')) def get_lunnum_from_lungroup(self, lungroup_id): """Check if there are still other luns associated to the lungroup.""" lunnum = 0 if not lungroup_id: return lunnum url = ("/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256&" "ASSOCIATEOBJID=%s" % lungroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find lun number error.')) if 'data' in result: lunnum = int(result['data']['COUNT']) return lunnum def is_portgroup_associated_to_view(self, view_id, portgroup_id): """Check whether the port group is associated to the mapping view.""" url = ("/portgroup/associate?ASSOCIATEOBJTYPE=245&" "ASSOCIATEOBJID=%s&range=[0-8191]" % view_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find portgroup from mapping view ' 'error.')) if self._get_id_from_result(result, portgroup_id, 'ID'): return True return False def find_lungroup_from_map(self, view_id): """Get lungroup from the given map""" url = ("/mappingview/associate/lungroup?TYPE=256&" "ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=%s" % view_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find lun group from mapping view ' 'error.')) lungroup_id = None if 'data' in result: # One map can have only one lungroup. for item in result['data']: lungroup_id = item['ID'] return lungroup_id def start_luncopy(self, luncopy_id): """Start a LUNcopy.""" url = "/LUNCOPY/start" data = {"TYPE": "219", "ID": luncopy_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Start LUNcopy error.')) def _get_capacity(self, pool_name, result): """Get free capacity and total capacity of the pool.""" pool_info = self.get_pool_info(pool_name, result) pool_capacity = {'total_capacity': 0.0, 'free_capacity': 0.0} if pool_info: total = float(pool_info['TOTALCAPACITY']) / constants.CAPACITY_UNIT free = float(pool_info['CAPACITY']) / constants.CAPACITY_UNIT pool_capacity['total_capacity'] = total pool_capacity['free_capacity'] = free return pool_capacity def get_luncopy_info(self, luncopy_id): """Get LUNcopy information.""" url = "/LUNCOPY?range=[0-1023]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get LUNcopy information error.')) luncopyinfo = {} if 'data' in result: for item in result['data']: if luncopy_id == item['ID']: luncopyinfo['name'] = item['NAME'] luncopyinfo['id'] = item['ID'] luncopyinfo['state'] = item['HEALTHSTATUS'] luncopyinfo['status'] = item['RUNNINGSTATUS'] break return luncopyinfo def delete_luncopy(self, luncopy_id): """Delete a LUNcopy.""" url = "/LUNCOPY/%s" % luncopy_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete LUNcopy error.')) def get_init_targ_map(self, wwns): init_targ_map = {} tgt_port_wwns = [] for wwn in wwns: tgtwwpns = self.get_fc_target_wwpns(wwn) if not tgtwwpns: continue init_targ_map[wwn] = tgtwwpns for tgtwwpn in tgtwwpns: if tgtwwpn not in tgt_port_wwns: tgt_port_wwns.append(tgtwwpn) return (tgt_port_wwns, init_targ_map) def get_online_free_wwns(self): """Get online free WWNs. If no new ports connected, return an empty list. """ url = "/fc_initiator?ISFREE=true&range=[0-8191]" result = self.call(url, None, "GET") msg = _('Get connected free FC wwn error.') self._assert_rest_result(result, msg) wwns = [] if 'data' in result: for item in result['data']: if item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE: wwns.append(item['ID']) return wwns def add_fc_port_to_host(self, host_id, wwn): """Add a FC port to the host.""" url = "/fc_initiator/" + wwn data = {"TYPE": "223", "ID": wwn, "PARENTTYPE": 21, "PARENTID": host_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add FC port to host error.')) def _get_iscsi_port_info(self, ip): """Get iscsi port info in order to build the iscsi target iqn.""" url = "/eth_port" result = self.call(url, None, "GET") msg = _('Get iSCSI port information error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) iscsi_port_info = None for item in result['data']: if ip == item['IPV4ADDR']: iscsi_port_info = item['LOCATION'] break return iscsi_port_info def _get_tgt_iqn(self, iscsi_ip): """Get target iSCSI iqn.""" ip_info = self._get_iscsi_port_info(iscsi_ip) iqn_prefix = self._get_iscsi_tgt_port() if not ip_info: err_msg = (_( 'Get iSCSI port info error, please check the target IP ' 'configured in huawei conf file.')) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.debug('Request ip info is: %s.', ip_info) split_list = ip_info.split(".") newstr = split_list[1] + split_list[2] LOG.info(_LI('New str info is: %s.'), newstr) if ip_info: if newstr[0] == 'A': ctr = "0" elif newstr[0] == 'B': ctr = "1" interface = '0' + newstr[1] port = '0' + newstr[3] iqn_suffix = ctr + '02' + interface + port for i in range(0, len(iqn_suffix)): if iqn_suffix[i] != '0': iqn_suffix = iqn_suffix[i:] break iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsi_ip LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn) return iqn def get_fc_target_wwpns(self, wwn): url = ("/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn) result = self.call(url, None, "GET") msg = _('Get FC target wwpn error.') self._assert_rest_result(result, msg) fc_wwpns = [] if "data" in result: for item in result['data']: if wwn == item['INITIATOR_PORT_WWN']: fc_wwpns.append(item['TARGET_PORT_WWN']) return fc_wwpns def update_volume_stats(self): data = {} data['pools'] = [] result = self.get_all_pools() for pool_name in self.storage_pools: capacity = self._get_capacity(pool_name, result) pool = {} pool.update(dict( location_info=self.device_id, pool_name=pool_name, total_capacity_gb=capacity['total_capacity'], free_capacity_gb=capacity['free_capacity'], reserved_percentage=self.configuration.safe_get( 'reserved_percentage'), QoS_support=True, max_over_subscription_ratio=self.configuration.safe_get( 'max_over_subscription_ratio'), thin_provisioning_support=True, thick_provisioning_support=True, smarttier=True, smartcache=True, smartpartition=True, hypermetro=True, )) data['pools'].append(pool) return data def _find_qos_policy_info(self, policy_name): url = "/ioclass" result = self.call(url, None, "GET") msg = _('Get QoS policy error.') self._assert_rest_result(result, msg) qos_info = {} if 'data' in result: for item in result['data']: if policy_name == item['NAME']: qos_info['ID'] = item['ID'] lun_list = json.loads(item['LUNLIST']) qos_info['LUNLIST'] = lun_list qos_info['RUNNINGSTATUS'] = item['RUNNINGSTATUS'] break return qos_info def _update_qos_policy_lunlist(self, lun_list, policy_id): url = "/ioclass/" + policy_id data = {"TYPE": "230", "ID": policy_id, "LUNLIST": lun_list} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Update QoS policy error.')) def _get_tgt_ip_from_portgroup(self, portgroup_id): target_ips = [] url = ("/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s" % portgroup_id) result = self.call(url, None, "GET") msg = _('Get target IP error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) if 'data' in result: for item in result['data']: if (item['IPV4ADDR'] and item['HEALTHSTATUS'] == constants.STATUS_HEALTH and item['RUNNINGSTATUS'] == constants.STATUS_RUNNING): target_ip = item['IPV4ADDR'] LOG.info(_LI('_get_tgt_ip_from_portgroup: Get ip: %s.'), target_ip) target_ips.append(target_ip) return target_ips def get_iscsi_params(self, connector): """Get target iSCSI params, including iqn, IP.""" initiator = connector['initiator'] target_ips = [] target_iqns = [] portgroup = None portgroup_id = None for ini in self.iscsi_info: if ini['Name'] == initiator: for key in ini: if key == 'TargetPortGroup': portgroup = ini['TargetPortGroup'] elif key == 'TargetIP': target_ips.append(ini['TargetIP']) if portgroup: portgroup_id = self.get_tgt_port_group(portgroup) target_ips = self._get_tgt_ip_from_portgroup(portgroup_id) # If not specify target IP for some initiators, use default IP. if not target_ips: default_target_ips = self.iscsi_default_target_ip if default_target_ips: target_ips.append(default_target_ips[0]) else: msg = (_( 'get_iscsi_params: Failed to get target IP ' 'for initiator %(ini)s, please check config file.') % {'ini': initiator}) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Deal with the remote tgt ip. if 'remote_target_ip' in connector: target_ips.append(connector['remote_target_ip']) LOG.info(_LI('Get the default ip: %s.'), target_ips) for ip in target_ips: target_iqn = self._get_tgt_iqn_from_rest(ip) if not target_iqn: target_iqn = self._get_tgt_iqn(ip) if target_iqn: target_iqns.append(target_iqn) return (target_iqns, target_ips, portgroup_id) def _get_tgt_iqn_from_rest(self, target_ip): url = "/iscsi_tgt_port" result = self.call(url, None, "GET") target_iqn = None if result['error']['code'] != 0: LOG.warning(_LW("Can't find target iqn from rest.")) return target_iqn ip_pattern = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') if 'data' in result: for item in result['data']: ips = re.findall(ip_pattern, item['ID']) for ip in ips: if target_ip == ip: target_iqn = item['ID'] break if not target_iqn: LOG.warning(_LW("Can't find target iqn from rest.")) return target_iqn split_list = target_iqn.split(",") target_iqn_before = split_list[0] split_list_new = target_iqn_before.split("+") target_iqn = split_list_new[1] return target_iqn def create_qos_policy(self, qos, lun_id): # Get local time. localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) # Package QoS name. qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime data = {"TYPE": "230", "NAME": qos_name, "LUNLIST": ["%s" % lun_id], "CLASSTYPE": "1", "SCHEDULEPOLICY": "2", "SCHEDULESTARTTIME": "1410969600", "STARTTIME": "08:00", "DURATION": "86400", "CYCLESET": "[1,2,3,4,5,6,0]", } data.update(qos) url = "/ioclass/" result = self.call(url, data) self._assert_rest_result(result, _('Create QoS policy error.')) return result['data']['ID'] def delete_qos_policy(self, qos_id): """Delete a QoS policy.""" url = "/ioclass/" + qos_id data = {"TYPE": "230", "ID": qos_id} result = self.call(url, data, 'DELETE') self._assert_rest_result(result, _('Delete QoS policy error.')) def activate_deactivate_qos(self, qos_id, enablestatus): """Activate or deactivate QoS. enablestatus: true (activate) enbalestatus: false (deactivate) """ url = "/ioclass/active/" + qos_id data = {"TYPE": 230, "ID": qos_id, "ENABLESTATUS": enablestatus} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Activate or deactivate QoS error.')) def get_qos_info(self, qos_id): """Get QoS information.""" url = "/ioclass/" + qos_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result['data'] def get_lun_list_in_qos(self, qos_id, qos_info): """Get the lun list in QoS.""" lun_list = [] lun_string = qos_info['LUNLIST'][1:-1] for lun in lun_string.split(","): str = lun[1:-1] lun_list.append(str) return lun_list def remove_lun_from_qos(self, lun_id, lun_list, qos_id): """Remove lun from QoS.""" lun_list = [i for i in lun_list if i != lun_id] url = "/ioclass/" + qos_id data = {"LUNLIST": lun_list, "TYPE": 230, "ID": qos_id} result = self.call(url, data, "PUT") msg = _('Remove lun from QoS error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def change_lun_priority(self, lun_id): """Change lun priority to high.""" url = "/lun/" + lun_id data = {"TYPE": "11", "ID": lun_id, "IOPRIORITY": "3"} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Change lun priority error.')) def change_lun_smarttier(self, lunid, smarttier_policy): """Change lun smarttier policy.""" url = "/lun/" + lunid data = {"TYPE": "11", "ID": lunid, "DATATRANSFERPOLICY": smarttier_policy} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Change lun smarttier policy error.')) def get_qosid_by_lunid(self, lun_id): """Get QoS id by lun id.""" url = "/lun/" + lun_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS id by lun id error.')) return result['data']['IOCLASSID'] def get_lungroupids_by_lunid(self, lun_id): """Get lungroup ids by lun id.""" url = ("/lungroup/associate?TYPE=256" "&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=%s" % lun_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lungroup id by lun id error.')) lungroup_ids = [] if 'data' in result: for item in result['data']: lungroup_ids.append(item['ID']) return lungroup_ids def get_lun_info(self, lun_id): url = "/lun/" + lun_id result = self.call(url, None, "GET") msg = _('Get volume error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_snapshot_info(self, snapshot_id): url = "/snapshot/" + snapshot_id result = self.call(url, None, "GET") msg = _('Get snapshot error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def extend_lun(self, lun_id, new_volume_size): url = "/lun/expand" data = {"TYPE": 11, "ID": lun_id, "CAPACITY": new_volume_size} result = self.call(url, data, 'PUT') msg = _('Extend volume error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def create_lun_migration(self, src_id, dst_id, speed=2): url = "/LUN_MIGRATION" data = {"TYPE": '253', "PARENTID": src_id, "TARGETLUNID": dst_id, "SPEED": speed, "WORKMODE": 0} result = self.call(url, data, "POST") msg = _('Create lun migration error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def get_lun_migration_task(self): url = '/LUN_MIGRATION?range=[0-256]' result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lun migration task error.')) return result def delete_lun_migration(self, src_id, dst_id): url = '/LUN_MIGRATION/' + src_id result = self.call(url, None, "DELETE") msg = _('Delete lun migration error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def get_partition_id_by_name(self, name): url = "/cachepartition" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by name error.')) return self._get_id_from_result(result, name, 'NAME') def get_partition_info_by_id(self, partition_id): url = '/cachepartition/' + partition_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by partition id error.')) return result['data'] def add_lun_to_partition(self, lun_id, partition_id): url = "/lun/associate/cachepartition" data = {"ID": partition_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add lun to partition error.')) def remove_lun_from_partition(self, lun_id, partition_id): url = ('/lun/associate/cachepartition?ID=' + partition_id + '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=' + lun_id) result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove lun from partition error.')) def get_cache_id_by_name(self, name): url = "/SMARTCACHEPARTITION" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get cache by name error.')) return self._get_id_from_result(result, name, 'NAME') def get_cache_info_by_id(self, cacheid): url = "/SMARTCACHEPARTITION/" + cacheid data = {"TYPE": "273", "ID": cacheid} result = self.call(url, data, "GET") self._assert_rest_result( result, _('Get smartcache by cache id error.')) return result['data'] def remove_lun_from_cache(self, lun_id, cache_id): url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" data = {"ID": cache_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "TYPE": 273} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove lun from cache error.')) def get_qos(self): url = "/ioclass" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result def find_available_qos(self, qos): """"Find available QoS on the array.""" qos_id = None lun_list = [] extra_qos = [i for i in constants.EXTRA_QOS_KEYS if i not in qos] result = self.get_qos() if 'data' in result: for items in result['data']: qos_flag = 0 extra_flag = False if 'LATENCY' not in qos and items['LATENCY'] != '0': extra_flag = True else: for item in items: if item in extra_qos: extra_flag = True break for key in qos: if key not in items: break elif qos[key] != items[key]: break qos_flag = qos_flag + 1 lun_num = len(items['LUNLIST'].split(",")) qos_name = items['NAME'] qos_status = items['RUNNINGSTATUS'] # We use this QoS only if the LUNs in it is less than 64, # created by OpenStack and does not contain filesystem, # else we cannot add LUN to this QoS any more. if (qos_flag == len(qos) and not extra_flag and lun_num < constants.MAX_LUN_NUM_IN_QOS and qos_name.startswith(constants.QOS_NAME_PREFIX) and qos_status == constants.STATUS_QOS_ACTIVE and items['FSLIST'] == '[""]'): qos_id = items['ID'] lun_list = items['LUNLIST'] break return (qos_id, lun_list) def add_lun_to_qos(self, qos_id, lun_id, lun_list): """Add lun to QoS.""" url = "/ioclass/" + qos_id new_lun_list = [] lun_list_string = lun_list[1:-1] for lun_string in lun_list_string.split(","): tmp_lun_id = lun_string[1:-1] if '' != tmp_lun_id and tmp_lun_id != lun_id: new_lun_list.append(tmp_lun_id) new_lun_list.append(lun_id) data = {"LUNLIST": new_lun_list, "TYPE": 230, "ID": qos_id} result = self.call(url, data, "PUT") msg = _('Associate lun to QoS error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def add_lun_to_cache(self, lun_id, cache_id): url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" data = {"ID": cache_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "TYPE": 273} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add lun to cache error.')) def get_array_info(self): url = "/system/" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get array info error.')) return result.get('data', None) def find_array_version(self): info = self.get_array_info() return info.get('PRODUCTVERSION', None) def remove_host(self, host_id): url = "/host/%s" % host_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove host from array error.')) def delete_hostgroup(self, hostgroup_id): url = "/hostgroup/%s" % hostgroup_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete hostgroup error.')) def remove_host_from_hostgroup(self, hostgroup_id, host_id): url_subfix001 = "/host/associate?TYPE=14&ID=%s" % hostgroup_id url_subfix002 = "&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s" % host_id url = url_subfix001 + url_subfix002 result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove host from hostgroup error.')) def remove_iscsi_from_host(self, initiator): url = "/iscsi_initiator/remove_iscsi_from_host" data = {"TYPE": '222', "ID": initiator} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove iscsi from host error.')) def get_host_online_fc_initiators(self, host_id): url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id) and (item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE)): initiators.append(item['ID']) return initiators def get_host_fc_initiators(self, host_id): url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id)): initiators.append(item['ID']) return initiators def get_host_iscsi_initiators(self, host_id): url = "/iscsi_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id)): initiators.append(item['ID']) return initiators def rename_lun(self, lun_id, new_name, description=None): url = "/lun/" + lun_id data = {"NAME": new_name} if description: data.update({"DESCRIPTION": description}) result = self.call(url, data, "PUT") msg = _('Rename lun on array error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def rename_snapshot(self, snapshot_id, new_name, description=None): url = "/snapshot/" + snapshot_id data = {"NAME": new_name} if description: data.update({"DESCRIPTION": description}) result = self.call(url, data, "PUT") msg = _('Rename snapshot on array error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def is_fc_initiator_associated_to_host(self, ininame): """Check whether the initiator is associated to the host.""" url = '/fc_initiator?range=[0-256]' result = self.call(url, None, "GET") self._assert_rest_result(result, 'Check initiator associated to host error.') if "data" in result: for item in result['data']: if item['ID'] == ininame and item['ISFREE'] != "true": return True return False def remove_fc_from_host(self, initiator): url = '/fc_initiator/remove_fc_from_host' data = {"TYPE": '223', "ID": initiator} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove fc from host error.')) def check_fc_initiators_exist_in_host(self, host_id): url = "/fc_initiator?range=[0-256]&PARENTID=%s" % host_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get host initiators info failed.')) if 'data' in result: return True return False def _fc_initiator_is_added_to_array(self, ininame): """Check whether the fc initiator is already added on the array.""" url = "/fc_initiator/" + ininame result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False return True def _add_fc_initiator_to_array(self, ininame): """Add a fc initiator to storage device.""" url = '/fc_initiator/' data = {"TYPE": '223', "ID": ininame} result = self.call(url, data) self._assert_rest_result(result, _('Add fc initiator to array error.')) def ensure_fc_initiator_added(self, initiator_name, host_id): added = self._fc_initiator_is_added_to_array(initiator_name) if not added: self._add_fc_initiator_to_array(initiator_name) # Just add, no need to check whether have been added. self.add_fc_port_to_host(host_id, initiator_name) def get_fc_ports_on_array(self): url = '/fc_port' result = self.call(url, None, "GET") msg = _('Get FC ports from array error.') self._assert_rest_result(result, msg) return result['data'] def get_fc_ports_from_contr(self, contr): port_list_from_contr = [] location = [] data = self.get_fc_ports_on_array() for item in data: location = item['PARENTID'].split('.') if (location[0][1] == contr) and (item['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED): port_list_from_contr.append(item['WWN']) return port_list_from_contr def get_hyper_domain_id(self, domain_name): url = "/HyperMetroDomain?range=[0-32]" result = self.call(url, None, "GET") domain_id = None if "data" in result: for item in result['data']: if domain_name == item['NAME']: domain_id = item['ID'] break msg = _('get_hyper_domain_id error.') self._assert_rest_result(result, msg) return domain_id def create_hypermetro(self, hcp_param): url = "/HyperMetroPair" result = self.call(url, hcp_param, "POST") msg = _('create_hypermetro_pair error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def delete_hypermetro(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "DELETE") msg = _('delete_hypermetro error.') self._assert_rest_result(result, msg) def sync_hypermetro(self, metro_id): url = "/HyperMetroPair/synchronize_hcpair" data = {"ID": metro_id, "TYPE": "15361"} result = self.call(url, data, "PUT") msg = _('sync_hypermetro error.') self._assert_rest_result(result, msg) def stop_hypermetro(self, metro_id): url = '/HyperMetroPair/disable_hcpair' data = {"ID": metro_id, "TYPE": "15361"} result = self.call(url, data, "PUT") msg = _('stop_hypermetro error.') self._assert_rest_result(result, msg) def get_hypermetro_by_id(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "GET") msg = _('get_hypermetro_by_id error.') self._assert_rest_result(result, msg) return result def check_hypermetro_exist(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "GET") error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error(_LE("Can not open the recent url, login again.")) self.login() result = self.call(url, None, "GET") error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): msg = _("check_hypermetro_exist error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if error_code != 0: return False return True def change_hostlun_id(self, map_info, hostlun_id): url = "/mappingview" view_id = six.text_type(map_info['view_id']) lun_id = six.text_type(map_info['lun_id']) hostlun_id = six.text_type(hostlun_id) data = {"TYPE": 245, "ID": view_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "ASSOCIATEMETADATA": [{"LUNID": lun_id, "hostLUNId": hostlun_id}]} result = self.call(url, data, "PUT") msg = 'change hostlun id error.' self._assert_rest_result(result, msg) def find_view_by_id(self, view_id): url = "/MAPPINGVIEW/" + view_id result = self.call(url, None, "GET") msg = _('Change hostlun id error.') self._assert_rest_result(result, msg) if 'data' in result: return result["data"]["AVAILABLEHOSTLUNIDLIST"] def get_hypermetro_pairs(self): url = "/HyperMetroPair?range=[0-65535]" result = self.call(url, None, "GET") msg = _('Get HyperMetroPair error.') self._assert_rest_result(result, msg) return result.get('data', []) def get_split_mirrors(self): url = "/splitmirror?range=[0-512]" result = self.call(url, None, "GET") if result['error']['code'] == constants.NO_SPLITMIRROR_LICENSE: msg = _('License is unavailable.') raise exception.VolumeBackendAPIException(data=msg) msg = _('Get SplitMirror error.') self._assert_rest_result(result, msg) return result.get('data', []) def get_target_luns(self, id): url = ("/SPLITMIRRORTARGETLUN/targetLUN?TYPE=228&PARENTID=%s&" "PARENTTYPE=220") % id result = self.call(url, None, "GET") msg = _('Get target LUN of SplitMirror error.') self._assert_rest_result(result, msg) target_luns = [] for item in result.get('data', []): target_luns.append(item.get('ID')) return target_luns def get_migration_task(self): url = "/LUN_MIGRATION?range=[0-256]" result = self.call(url, None, "GET") if result['error']['code'] == constants.NO_MIGRATION_LICENSE: msg = _('License is unavailable.') raise exception.VolumeBackendAPIException(data=msg) msg = _('Get migration task error.') self._assert_rest_result(result, msg) return result.get('data', []) def is_lun_in_mirror(self, lun_id): url = "/lun?range=[0-65535]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get volume by name error.')) for item in result.get('data', []): rss_obj = item.get('HASRSSOBJECT') if rss_obj: rss_obj = ast.literal_eval(rss_obj) if (item.get('ID') == lun_id and rss_obj.get('LUNMirror') == 'TRUE'): return True return False def get_portgs_by_portid(self, port_id): portgs = [] if not port_id: return portgs url = ("/portgroup/associate/fc_port?TYPE=257&ASSOCIATEOBJTYPE=212&" "ASSOCIATEOBJID=%s") % port_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port groups by port error.')) for item in result.get("data", []): portgs.append(item["ID"]) return portgs def get_views_by_portg(self, portg_id): views = [] if not portg_id: return views url = ("/mappingview/associate/portgroup?TYPE=245&ASSOCIATEOBJTYPE=" "257&ASSOCIATEOBJID=%s") % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get views by port group error.')) for item in result.get("data", []): views.append(item["ID"]) return views def get_lungroup_by_view(self, view_id): if not view_id: return None url = ("/lungroup/associate/mappingview?TYPE=256&ASSOCIATEOBJTYPE=" "245&ASSOCIATEOBJID=%s") % view_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get LUN group by view error.')) for item in result.get("data", []): # In fact, there is just one lungroup in a view. return item["ID"] def get_portgroup_by_view(self, view_id): if not view_id: return None url = ("/portgroup/associate/mappingview?TYPE=257&ASSOCIATEOBJTYPE=" "245&ASSOCIATEOBJID=%s") % view_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port group by view error.')) return result.get("data", []) def get_fc_ports_by_portgroup(self, portg_id): ports = {} if not portg_id: return ports url = ("/fc_port/associate/portgroup?TYPE=212&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s") % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get FC ports by port group ' 'error.')) for item in result.get("data", []): ports[item["WWN"]] = item["ID"] return ports def create_portg(self, portg_name, description=""): url = "/PortGroup" data = {"DESCRIPTION": description, "NAME": portg_name, "TYPE": 257} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Create port group error.')) if "data" in result: return result['data']['ID'] def add_port_to_portg(self, portg_id, port_id): url = "/port/associate/portgroup" data = {"ASSOCIATEOBJID": port_id, "ASSOCIATEOBJTYPE": 212, "ID": portg_id, "TYPE": 257} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add port to port group error.')) def delete_portgroup(self, portg_id): url = "/PortGroup/%s" % portg_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete port group error.')) def remove_port_from_portgroup(self, portg_id, port_id): url = (("/port/associate/portgroup?ID=%(portg_id)s&TYPE=257&" "ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(port_id)s") % {"portg_id": portg_id, "port_id": port_id}) result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove port from port group' ' error.')) def get_all_engines(self): url = "/storageengine" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get engines error.')) return result.get("data", []) def get_portg_info(self, portg_id): url = "/portgroup/%s" % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port group error.')) return result.get("data", {}) def append_portg_desc(self, portg_id, description): portg_info = self.get_portg_info(portg_id) new_description = portg_info.get('DESCRIPTION') + ',' + description url = "/portgroup/%s" % portg_id data = {"DESCRIPTION": new_description, "ID": portg_id, "TYPE": 257} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Append port group description' ' error.')) def get_ports_by_portg(self, portg_id): wwns = [] url = ("/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s" % portg_id) result = self.call(url, None, "GET") msg = _('Get ports by port group error.') self._assert_rest_result(result, msg) for item in result.get('data', []): wwns.append(item['WWN']) return wwns def get_remote_devices(self): url = "/remote_device" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get remote devices error.')) return result.get('data', []) def create_pair(self, pair_params): url = "/REPLICATIONPAIR" result = self.call(url, pair_params, "POST") msg = _('Create replication error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_pair_by_id(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id result = self.call(url, None, "GET") msg = _('Get pair failed.') self._assert_rest_result(result, msg) return result.get('data', {}) def switch_pair(self, pair_id): url = '/REPLICATIONPAIR/switch' data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Switch over pair error.') self._assert_rest_result(result, msg) def split_pair(self, pair_id): url = '/REPLICATIONPAIR/split' data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Split pair error.') self._assert_rest_result(result, msg) def delete_pair(self, pair_id, force=False): url = "/REPLICATIONPAIR/" + pair_id data = None if force: data = {"ISLOCALDELETE": force} result = self.call(url, data, "DELETE") msg = _('delete_replication error.') self._assert_rest_result(result, msg) def sync_pair(self, pair_id): url = "/REPLICATIONPAIR/sync" data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Sync pair error.') self._assert_rest_result(result, msg) def check_pair_exist(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id result = self.call(url, None, "GET") return result['error']['code'] == 0 def set_pair_second_access(self, pair_id, access): url = "/REPLICATIONPAIR/" + pair_id data = {"ID": pair_id, "SECRESACCESS": access} result = self.call(url, data, "PUT") msg = _('Set pair secondary access error.') self._assert_rest_result(result, msg) def is_host_associated_to_hostgroup(self, host_id): url = "/host/" + host_id result = self.call(url, None, "GET") data = result.get('data') if data is not None: return data.get('ISADD2HOSTGROUP') == 'true' return False cinder-8.0.0/cinder/volume/drivers/huawei/huawei_driver.py0000664000567000056710000025130612701406250025052 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re import six import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import utils from cinder.volume import driver from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) huawei_opts = [ cfg.StrOpt('cinder_huawei_conf_file', default='/etc/cinder/cinder_huawei_conf.xml', help='The configuration file for the Cinder Huawei driver.'), cfg.StrOpt('hypermetro_devices', default=None, help='The remote device hypermetro will use.'), ] CONF = cfg.CONF CONF.register_opts(huawei_opts) class HuaweiBaseDriver(driver.VolumeDriver): def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) if not self.configuration: msg = _('Configuration is not found.') raise exception.InvalidInput(reason=msg) self.active_backend_id = kwargs.get('active_backend_id') self.configuration.append_config_values(huawei_opts) self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) self.metro_flag = False self.replica = None def get_local_and_remote_dev_conf(self): self.loc_dev_conf = self.huawei_conf.get_local_device() # Now just support one replication_devices. replica_devs = self.huawei_conf.get_replication_devices() self.replica_dev_conf = replica_devs[0] if replica_devs else {} def get_local_and_remote_client_conf(self): if self.active_backend_id: return self.replica_dev_conf, self.loc_dev_conf else: return self.loc_dev_conf, self.replica_dev_conf def do_setup(self, context): """Instantiate common class and login storage system.""" # Set huawei private configuration into Configuration object. self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() client_conf, replica_client_conf = ( self.get_local_and_remote_client_conf()) # init local client if not client_conf: msg = _('Get active client failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.client = rest_client.RestClient(self.configuration, **client_conf) self.client.login() # init remote client metro_san_address = self.configuration.safe_get("metro_san_address") metro_san_user = self.configuration.safe_get("metro_san_user") metro_san_password = self.configuration.safe_get("metro_san_password") if metro_san_address and metro_san_user and metro_san_password: self.metro_flag = True metro_san_address = metro_san_address.split(";") self.rmt_client = rest_client.RestClient(self.configuration, metro_san_address, metro_san_user, metro_san_password) self.rmt_client.login() # init replication manager if replica_client_conf: self.replica_client = rest_client.RestClient(self.configuration, **replica_client_conf) self.replica_client.try_login() self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) def check_for_setup_error(self): pass def get_volume_stats(self, refresh=False): """Get volume status and reload huawei config file.""" self.huawei_conf.update_config_value() if self.metro_flag: self.rmt_client.get_all_pools() stats = self.client.update_volume_stats() if self.replica: stats = self.replica.update_replica_capability(stats) targets = [self.replica_dev_conf['backend_id']] stats['replication_targets'] = targets stats['replication_enabled'] = True return stats def _get_volume_type(self, volume): volume_type = None type_id = volume['volume_type_id'] if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) return volume_type def _get_volume_params(self, volume_type): """Return the parameters for creating the volume.""" specs = {} if volume_type: specs = dict(volume_type).get('extra_specs') opts = self._get_volume_params_from_specs(specs) return opts def _get_volume_params_from_specs(self, specs): """Return the volume parameters from extra specs.""" opts_capability = { 'smarttier': False, 'smartcache': False, 'smartpartition': False, 'thin_provisioning_support': False, 'thick_provisioning_support': False, 'hypermetro': False, 'replication_enabled': False, 'replication_type': 'async', } opts_value = { 'policy': None, 'partitionname': None, 'cachename': None, } opts_associate = { 'smarttier': 'policy', 'smartcache': 'cachename', 'smartpartition': 'partitionname', } opts = self._get_opts_from_specs(opts_capability, opts_value, opts_associate, specs) opts = smartx.SmartX().get_smartx_specs_opts(opts) opts = replication.get_replication_opts(opts) LOG.debug('volume opts %(opts)s.', {'opts': opts}) return opts def _get_opts_from_specs(self, opts_capability, opts_value, opts_associate, specs): """Get the well defined extra specs.""" opts = {} opts.update(opts_capability) opts.update(opts_value) for key, value in specs.items(): # Get the scope, if is using scope format. scope = None key_split = key.split(':') if len(key_split) > 2 and key_split[0] != "capabilities": continue if len(key_split) == 1: key = key_split[0].lower() else: scope = key_split[0].lower() key = key_split[1].lower() if ((not scope or scope == 'capabilities') and key in opts_capability): words = value.split() if words and len(words) == 2 and words[0] in ('', ''): opts[key] = words[1].lower() elif key == 'replication_type': LOG.error(_LE("Extra specs must be specified as " "replication_type=' sync' or " "' async'.")) else: LOG.error(_LE("Extra specs must be specified as " "capabilities:%s=' True'."), key) if ((scope in opts_capability) and (key in opts_value) and (scope in opts_associate) and (opts_associate[scope] == key)): opts[key] = value return opts def _get_lun_params(self, volume, opts): pool_name = volume_utils.extract_host(volume['host'], level='pool') params = { 'TYPE': '11', 'NAME': huawei_utils.encode_name(volume['id']), 'PARENTTYPE': '216', 'PARENTID': self.client.get_pool_id(pool_name), 'DESCRIPTION': volume['name'], 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), 'CAPACITY': huawei_utils.get_volume_size(volume), 'WRITEPOLICY': self.configuration.lun_write_type, 'MIRRORPOLICY': self.configuration.lun_mirror_switch, 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, 'PREFETCHVALUE': self.configuration.lun_prefetch_value, 'DATATRANSFERPOLICY': opts.get('policy', self.configuration.lun_policy), 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } LOG.info(_LI('volume: %(volume)s, lun params: %(params)s.'), {'volume': volume['id'], 'params': params}) return params def _create_volume(self, volume, lun_params): # Create LUN on the array. model_update = {} lun_info = self.client.create_lun(lun_params) model_update['provider_location'] = lun_info['ID'] admin_metadata = huawei_utils.get_admin_metadata(volume) admin_metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update['admin_metadata'] = admin_metadata metadata = huawei_utils.get_volume_metadata(volume) model_update['metadata'] = metadata return lun_info, model_update def _create_base_type_volume(self, opts, volume, volume_type): """Create volume and add some base type. Base type is the services won't conflict with the other service. """ lun_params = self._get_lun_params(volume, opts) lun_info, model_update = self._create_volume(volume, lun_params) lun_id = lun_info['ID'] try: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(qos, lun_id) smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) except Exception as err: self._delete_lun_with_check(lun_id) msg = _('Create volume error. Because %s.') % six.text_type(err) raise exception.VolumeBackendAPIException(data=msg) return lun_params, lun_info, model_update def _add_extend_type_to_volume(self, opts, lun_params, lun_info, model_update): """Add the extend type. Extend type is the services may conflict with LUNCopy. So add it after the those services. """ lun_id = lun_info['ID'] if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro_info = metro.create_hypermetro(lun_id, lun_params) model_update['metadata'].update(metro_info) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Create hypermetro error: %s.'), err) self._delete_lun_with_check(lun_id) raise if opts.get('replication_enabled') == 'true': replica_model = opts.get('replication_type') try: replica_info = self.replica.create_replica(lun_info, replica_model) model_update.update(replica_info) except Exception as err: LOG.exception(_LE('Create replication volume error.')) self._delete_lun_with_check(lun_id) raise return model_update def create_volume(self, volume): """Create a volume.""" volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def _delete_volume(self, volume): lun_id = volume.get('provider_location') if not lun_id: return lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) if lun_group_ids and len(lun_group_ids) == 1: self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) self.client.delete_lun(lun_id) def delete_volume(self, volume): """Delete a volume. Three steps: Firstly, remove associate from lungroup. Secondly, remove associate from QoS policy. Thirdly, remove the lun. """ lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) metadata = huawei_utils.get_volume_metadata(volume) if 'hypermetro_id' in metadata: metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro.delete_hypermetro(volume) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Delete hypermetro error: %s.'), err) # We have checked the LUN WWN above, # no need to check again here. self._delete_volume(volume) raise # Delete a replication volume replica_data = volume.get('replication_driver_data') if replica_data: try: self.replica.delete_replica(volume) except exception.VolumeBackendAPIException as err: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Delete replication error.")) self._delete_volume(volume) self._delete_volume(volume) def _delete_lun_with_check(self, lun_id, lun_wwn=None): if not lun_id: return if self.client.check_lun_exist(lun_id, lun_wwn): qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) self.client.delete_lun(lun_id) def _is_lun_migration_complete(self, src_id, dst_id): result = self.client.get_lun_migration_task() found_migration_task = False if 'data' not in result: return False for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): found_migration_task = True if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: return True if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: msg = _("Lun migration error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not found_migration_task: err_msg = _("Cannot find migration task.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return False def _is_lun_migration_exist(self, src_id, dst_id): try: result = self.client.get_lun_migration_task() except Exception: LOG.error(_LE("Get LUN migration error.")) return False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): return True return False def _migrate_lun(self, src_id, dst_id): try: self.client.create_lun_migration(src_id, dst_id) def _is_lun_migration_complete(): return self._is_lun_migration_complete(src_id, dst_id) wait_interval = constants.MIGRATION_WAIT_INTERVAL huawei_utils.wait_for_condition(_is_lun_migration_complete, wait_interval, self.configuration.lun_timeout) # Clean up if migration failed. except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: if self._is_lun_migration_exist(src_id, dst_id): self.client.delete_lun_migration(src_id, dst_id) self._delete_lun_with_check(dst_id) LOG.debug("Migrate lun %s successfully.", src_id) return True def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def _get_original_status(self, volume): return 'in-use' if volume.get('volume_attachment') else 'available' def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): original_name = huawei_utils.encode_name(volume['id']) current_name = huawei_utils.encode_name(new_volume['id']) lun_id = self.client.get_lun_id_by_name(current_name) try: self.client.rename_lun(lun_id, original_name) except exception.VolumeBackendAPIException: LOG.error(_LE('Unable to rename lun %s on array.'), current_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} LOG.debug("Rename lun from %(current_name)s to %(original_name)s " "successfully.", {'current_name': current_name, 'original_name': original_name}) model_update = {'_name_id': None} return model_update def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate a volume within the same array.""" self._check_volume_exist_on_array(volume, constants.VOLUME_NOT_EXISTS_RAISE) # NOTE(jlc): Replication volume can't migrate. But retype # can remove replication relationship first then do migrate. # So don't add this judgement into _check_migration_valid(). volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': return (False, None) return self._migrate_volume(volume, host, new_type) def _check_migration_valid(self, host, volume): if 'pool_name' not in host['capabilities']: return False target_device = host['capabilities']['location_info'] # Source and destination should be on same array. if target_device != self.client.device_id: return False # Same protocol should be used if volume is in-use. protocol = self.configuration.san_protocol if (host['capabilities']['storage_protocol'] != protocol and self._get_original_status(volume) == 'in-use'): return False pool_name = host['capabilities']['pool_name'] if len(pool_name) == 0: return False return True def _migrate_volume(self, volume, host, new_type=None): if not self._check_migration_valid(host, volume): return (False, None) type_id = volume['volume_type_id'] volume_type = None if type_id: volume_type = volume_types.get_volume_type(None, type_id) pool_name = host['capabilities']['pool_name'] pools = self.client.get_all_pools() pool_info = self.client.get_pool_info(pool_name, pools) src_volume_name = huawei_utils.encode_name(volume['id']) dst_volume_name = six.text_type(hash(src_volume_name)) src_id = volume.get('provider_location') opts = None qos = None if new_type: # If new type exists, use new type. new_specs = new_type['extra_specs'] opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in opts: opts['LUNType'] = self.configuration.lun_type qos = smartx.SmartQos.get_qos_by_volume_type(new_type) elif volume_type: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if not opts: opts = self._get_volume_params(volume_type) lun_info = self.client.get_lun_info(src_id) policy = lun_info['DATATRANSFERPOLICY'] if opts['policy']: policy = opts['policy'] lun_params = { 'NAME': dst_volume_name, 'PARENTID': pool_info['ID'], 'DESCRIPTION': lun_info['DESCRIPTION'], 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), 'CAPACITY': lun_info['CAPACITY'], 'WRITEPOLICY': lun_info['WRITEPOLICY'], 'MIRRORPOLICY': lun_info['MIRRORPOLICY'], 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], 'DATATRANSFERPOLICY': policy, 'READCACHEPOLICY': lun_info['READCACHEPOLICY'], 'WRITECACHEPOLICY': lun_info['WRITECACHEPOLICY'], 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } lun_info = self.client.create_lun(lun_params) lun_id = lun_info['ID'] if qos: LOG.info(_LI('QoS: %s.'), qos) SmartQos = smartx.SmartQos(self.client) SmartQos.add(qos, lun_id) if opts: smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) dst_id = lun_info['ID'] self._wait_volume_ready(dst_id) moved = self._migrate_lun(src_id, dst_id) return moved, {} def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. We use LUNcopy to copy a new volume from snapshot. The time needed increases as volume size does. """ volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) snapshotname = huawei_utils.encode_name(snapshot['id']) snapshot_id = snapshot.get('provider_location') if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id is None: err_msg = (_( 'create_volume_from_snapshot: Snapshot %(name)s ' 'does not exist.') % {'name': snapshotname}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) tgt_lun_id = model_update['provider_location'] luncopy_name = huawei_utils.encode_name(volume['id']) LOG.info(_LI( 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'), {'src_lun_id': snapshot_id, 'tgt_lun_id': tgt_lun_id, 'copy_name': luncopy_name}) wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(tgt_lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) self._copy_volume(volume, luncopy_name, snapshot_id, tgt_lun_id) # NOTE(jlc): Actually, we just only support replication here right # now, not hypermetro. model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def create_cloned_volume(self, volume, src_vref): """Clone a new volume from an existing volume.""" self._check_volume_exist_on_array(src_vref, constants.VOLUME_NOT_EXISTS_RAISE) # Form the snapshot structure. snapshot = {'id': uuid.uuid4().__str__(), 'volume_id': src_vref['id'], 'volume': src_vref} # Create snapshot. self.create_snapshot(snapshot) try: # Create volume from snapshot. model_update = self.create_volume_from_snapshot(volume, snapshot) finally: try: # Delete snapshot. self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException: LOG.warning(_LW( 'Failure deleting the snapshot %(snapshot_id)s ' 'of volume %(volume_id)s.'), {'snapshot_id': snapshot['id'], 'volume_id': src_vref['id']},) return model_update def _check_volume_exist_on_array(self, volume, action): """Check whether the volume exists on the array. If the volume exists on the array, return the LUN ID. If not exists, raise or log warning. """ # Firstly, try to find LUN ID by volume['provider_location']. lun_id = volume.get('provider_location') # If LUN ID not recorded, find LUN ID by LUN NAME. if not lun_id: volume_name = huawei_utils.encode_name(volume['id']) lun_id = self.client.get_lun_id_by_name(volume_name) if not lun_id: msg = (_("Volume %s does not exist on the array.") % volume['id']) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return metadata = huawei_utils.get_admin_metadata(volume) lun_wwn = metadata.get('huawei_lun_wwn') if metadata else None if not lun_wwn: LOG.debug("No LUN WWN recorded for volume %s.", volume['id']) if not self.client.check_lun_exist(lun_id, lun_wwn): msg = (_("Volume %s does not exist on the array.") % volume['id']) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return return lun_id def extend_volume(self, volume, new_size): """Extend a volume.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': msg = (_("Can't extend replication volume, volume: %(id)s") % {"id": volume['id']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_info = self.client.get_lun_info(lun_id) old_size = int(lun_info.get('CAPACITY')) new_size = int(new_size) * units.Gi / 512 if new_size == old_size: LOG.info(_LI("New size is equal to the real size from backend" " storage, no need to extend." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) return if new_size < old_size: msg = (_("New size should be bigger than the real size from " "backend storage." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume['id']) LOG.info(_LI( 'Extend volume: %(volumename)s, ' 'oldsize: %(oldsize)s, newsize: %(newsize)s.'), {'volumename': volume_name, 'oldsize': old_size, 'newsize': new_size}) self.client.extend_lun(lun_id, new_size) def create_snapshot(self, snapshot): volume = snapshot.get('volume') if not volume: msg = (_("Can't get volume id from snapshot, snapshot: %(id)s") % {"id": snapshot['id']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(snapshot['volume_id']) lun_id = self.client.get_lun_id(volume, volume_name) snapshot_name = huawei_utils.encode_name(snapshot['id']) snapshot_description = snapshot['id'] snapshot_info = self.client.create_snapshot(lun_id, snapshot_name, snapshot_description) snapshot_id = snapshot_info['ID'] self.client.activate_snapshot(snapshot_id) return {'provider_location': snapshot_info['ID'], 'lun_info': snapshot_info} def delete_snapshot(self, snapshot): snapshotname = huawei_utils.encode_name(snapshot['id']) volume_name = huawei_utils.encode_name(snapshot['volume_id']) LOG.info(_LI( 'stop_snapshot: snapshot name: %(snapshot)s, ' 'volume name: %(volume)s.'), {'snapshot': snapshotname, 'volume': volume_name},) snapshot_id = snapshot.get('provider_location') if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id and self.client.check_snapshot_exist(snapshot_id): self.client.stop_snapshot(snapshot_id) self.client.delete_snapshot(snapshot_id) else: LOG.warning(_LW("Can't find snapshot on the array.")) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " "diff=%(diff)s, host=%(host)s.", {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) # Check what changes are needed migration, change_opts, lun_id = self.determine_changes_when_retype( volume, new_type, host) model_update = {} replica_enabled_change = change_opts.get('replication_enabled') replica_type_change = change_opts.get('replication_type') if replica_enabled_change and replica_enabled_change[0] == 'true': try: self.replica.delete_replica(volume) model_update.update({'replication_status': 'disabled', 'replication_driver_data': None}) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Delete replication failed.')) return False try: if migration: LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " "change %(change_opts)s.", {"lun_id": lun_id, "change_opts": change_opts}) if not self._migrate_volume(volume, host, new_type): LOG.warning(_LW("Storage-assisted migration failed during " "retype.")) return False else: # Modify lun to change policy self.modify_lun(lun_id, change_opts) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error.')) return False if replica_enabled_change and replica_enabled_change[1] == 'true': try: # If replica_enabled_change is not None, the # replica_type_change won't be None. See function # determine_changes_when_retype. lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, replica_type_change[1]) model_update.update(replica_info) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Create replication failed.')) return False return (True, model_update) def modify_lun(self, lun_id, change_opts): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_partition(lun_id, old_id) if new_id: self.client.add_lun_to_partition(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) success."), {"lun_id": lun_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_cache(lun_id, old_id) if new_id: self.client.add_lun_to_cache(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) successfully."), {'lun_id': lun_id, 'old_id': old_id, "old_name": old_name, 'new_id': new_id, "new_name": new_name}) if change_opts.get('policy'): old_policy, new_policy = change_opts['policy'] self.client.change_lun_smarttier(lun_id, new_policy) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from " "%(old_policy)s to %(new_policy)s success."), {'lun_id': lun_id, 'old_policy': old_policy, 'new_policy': new_policy}) if change_opts.get('qos'): old_qos, new_qos = change_opts['qos'] old_qos_id = old_qos[0] old_qos_value = old_qos[1] if old_qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(old_qos_id, lun_id) if new_qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(new_qos, lun_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from " "%(old_qos_value)s to %(new_qos)s success."), {'lun_id': lun_id, 'old_qos_value': old_qos_value, 'new_qos': new_qos}) def get_lun_specs(self, lun_id): lun_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'LUNType': None, } lun_info = self.client.get_lun_info(lun_id) lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) if lun_info.get('DATATRANSFERPOLICY'): lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] if lun_info.get('SMARTCACHEPARTITIONID'): lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] if lun_info.get('CACHEPARTITIONID'): lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] return lun_opts def _check_needed_changes(self, lun_id, old_opts, new_opts, change_opts, new_type): new_cache_id = None new_cache_name = new_opts['cachename'] if new_cache_name: new_cache_id = self.client.get_cache_id_by_name(new_cache_name) if new_cache_id is None: msg = (_( "Can't find cache name on the array, cache name is: " "%(name)s.") % {'name': new_cache_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_partition_id = None new_partition_name = new_opts['partitionname'] if new_partition_name: new_partition_id = self.client.get_partition_id_by_name( new_partition_name) if new_partition_id is None: msg = (_( "Can't find partition name on the array, partition name " "is: %(name)s.") % {'name': new_partition_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # smarttier if old_opts['policy'] != new_opts['policy']: change_opts['policy'] = (old_opts['policy'], new_opts['policy']) # smartcache old_cache_id = old_opts['cacheid'] if old_cache_id != new_cache_id: old_cache_name = None if old_cache_id: cache_info = self.client.get_cache_info_by_id(old_cache_id) old_cache_name = cache_info['NAME'] change_opts['cacheid'] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # smartpartition old_partition_id = old_opts['partitionid'] if old_partition_id != new_partition_id: old_partition_name = None if old_partition_id: partition_info = self.client.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts['partitionid'] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # smartqos new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) old_qos_id = self.client.get_qosid_by_lunid(lun_id) old_qos = self._get_qos_specs_from_array(old_qos_id) if old_qos != new_qos: change_opts['qos'] = ([old_qos_id, old_qos], new_qos) return change_opts def determine_changes_when_retype(self, volume, new_type, host): migration = False change_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None, 'host': None, 'LUNType': None, 'replication_enabled': None, 'replication_type': None, } lun_id = volume.get('provider_location') old_opts = self.get_lun_specs(lun_id) new_specs = new_type['extra_specs'] new_opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in new_opts: new_opts['LUNType'] = self.configuration.lun_type if volume['host'] != host['host']: migration = True change_opts['host'] = (volume['host'], host['host']) if old_opts['LUNType'] != new_opts['LUNType']: migration = True change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) volume_type = self._get_volume_type(volume) volume_opts = self._get_volume_params(volume_type) if (volume_opts['replication_enabled'] == 'true' or new_opts['replication_enabled'] == 'true'): # If replication_enabled changes, # then replication_type in change_opts will be set. change_opts['replication_enabled'] = ( volume_opts['replication_enabled'], new_opts['replication_enabled']) change_opts['replication_type'] = (volume_opts['replication_type'], new_opts['replication_type']) change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, new_type) LOG.debug("Determine changes when retype. Migration: " "%(migration)s, change_opts: %(change_opts)s.", {'migration': migration, 'change_opts': change_opts}) return migration, change_opts, lun_id def _get_qos_specs_from_array(self, qos_id): qos = {} qos_info = {} if qos_id: qos_info = self.client.get_qos_info(qos_id) for key, value in qos_info.items(): key = key.upper() if key in constants.QOS_KEYS: if key == 'LATENCY' and value == '0': continue else: qos[key] = value return qos def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): luncopy_id = self.client.create_luncopy(copy_name, src_lun, tgt_lun) wait_interval = self.configuration.lun_copy_wait_interval try: self.client.start_luncopy(luncopy_id) def _luncopy_complete(): luncopy_info = self.client.get_luncopy_info(luncopy_id) if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: # luncopy_info['status'] means for the running status of # the luncopy. If luncopy_info['status'] is equal to '40', # this luncopy is completely ready. return True elif luncopy_info['state'] != constants.STATUS_HEALTH: # luncopy_info['state'] means for the healthy status of the # luncopy. If luncopy_info['state'] is not equal to '1', # this means that an error occurred during the LUNcopy # operation and we should abort it. err_msg = (_( 'An error occurred during the LUNcopy operation. ' 'LUNcopy name: %(luncopyname)s. ' 'LUNcopy status: %(luncopystatus)s. ' 'LUNcopy state: %(luncopystate)s.') % {'luncopyname': luncopy_id, 'luncopystatus': luncopy_info['status'], 'luncopystate': luncopy_info['state']},) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) huawei_utils.wait_for_condition(_luncopy_complete, wait_interval, self.configuration.lun_timeout) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_luncopy(luncopy_id) self.delete_volume(volume) self.client.delete_luncopy(luncopy_id) def _check_lun_valid_for_manage(self, lun_info, external_ref): lun_id = lun_info.get('ID') # Check whether the LUN is already in LUN group. if lun_info.get('ISADD2LUNGROUP') == 'true': msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " "group.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN is Normal. if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import LUN %s to Cinder. LUN status is not " "normal.") % lun_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a HyperMetroPair. try: hypermetro_pairs = self.client.get_hypermetro_pairs() except exception.VolumeBackendAPIException: hypermetro_pairs = [] LOG.debug("Can't get hypermetro info, pass the check.") for pair in hypermetro_pairs: if pair.get('LOCALOBJID') == lun_id: msg = (_("Can't import LUN %s to Cinder. Already exists in a " "HyperMetroPair.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a SplitMirror. try: split_mirrors = self.client.get_split_mirrors() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has SplitMirror with it, # just pass the check and log it. split_mirrors = [] LOG.warning(_LW('No license for SplitMirror.')) else: msg = _("Failed to get SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) for mirror in split_mirrors: try: target_luns = self.client.get_target_luns(mirror.get('ID')) except exception.VolumeBackendAPIException: msg = _("Failed to get target LUN of SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) if (mirror.get('PRILUNID') == lun_id) or (lun_id in target_luns): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "SplitMirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a migration task. try: migration_tasks = self.client.get_migration_task() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has migration task with it, # just pass the check and log it. migration_tasks = [] LOG.warning(_LW('No license for migration.')) else: msg = _("Failed to get migration task.") raise exception.VolumeBackendAPIException(data=msg) for migration in migration_tasks: if lun_id in (migration.get('PARENTID'), migration.get('TARGETLUNID')): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "migration task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN copy task. lun_copy = lun_info.get('LUNCOPYIDS') if lun_copy and lun_copy[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN copy task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a remote replication task. rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') if rmt_replication and rmt_replication[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a remote replication task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN mirror. if self.client.is_lun_in_mirror(lun_id): msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN mirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def manage_existing(self, volume, external_ref): """Manage an existing volume on the backend storage.""" # Check whether the LUN is belonged to the specified pool. pool = volume_utils.extract_host(volume['host'], 'pool') LOG.debug("Pool specified is: %s.", pool) lun_info = self._get_lun_info_by_ref(external_ref) lun_id = lun_info.get('ID') description = lun_info.get('DESCRIPTION', '') if len(description) <= ( constants.MAX_VOL_DESCRIPTION - len(volume['name']) - 1): description = volume['name'] + ' ' + description lun_pool = lun_info.get('PARENTNAME') LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", {"lun": lun_id, "pool": lun_pool}) if pool != lun_pool: msg = (_("The specified LUN does not belong to the given " "pool: %s.") % pool) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check other stuffs to determine whether this LUN can be imported. self._check_lun_valid_for_manage(lun_info, external_ref) type_id = volume.get('volume_type_id') new_opts = None if type_id: # Handle volume type if specified. old_opts = self.get_lun_specs(lun_id) volume_type = volume_types.get_volume_type(None, type_id) new_specs = volume_type.get('extra_specs') new_opts = self._get_volume_params_from_specs(new_specs) if ('LUNType' in new_opts and old_opts['LUNType'] != new_opts['LUNType']): msg = (_("Can't import LUN %(lun_id)s to Cinder. " "LUN type mismatched.") % lun_id) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume_type: change_opts = {'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None} change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, volume_type) self.modify_lun(lun_id, change_opts) # Rename the LUN to make it manageable for Cinder. new_name = huawei_utils.encode_name(volume['id']) LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", {'old_name': lun_info.get('NAME'), 'new_name': new_name}) self.client.rename_lun(lun_id, new_name, description) metadata = huawei_utils.get_admin_metadata(volume) metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update = {} model_update.update({'admin_metadata': metadata}) model_update.update({'provider_location': lun_id}) if new_opts and new_opts.get('replication_enabled'): LOG.debug("Manage volume need to create replication.") try: lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, new_opts.get('replication_type')) model_update.update(replica_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Manage exist volume failed.")) return model_update def _get_lun_info_by_ref(self, external_ref): LOG.debug("Get external_ref: %s", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_id = id or self.client.get_lun_id_by_name(name) if not lun_id: msg = _("Can't find LUN on the array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_info = self.client.get_lun_info(lun_id) return lun_info def unmanage(self, volume): """Export Huawei volume from Cinder.""" LOG.debug("Unmanage volume: %s.", volume['id']) lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return lun_name = huawei_utils.encode_name(volume['id']) new_name = 'unmged_' + lun_name LOG.debug("Rename LUN %(lun_name)s to %(new_name)s.", {'lun_name': lun_name, 'new_name': new_name}) try: self.client.rename_lun(lun_id, new_name) except Exception: LOG.warning(_LW("Rename lun %(lun_id)s fails when " "unmanaging volume %(volume)s."), {"lun_id": lun_id, "volume": volume['id']}) def manage_existing_get_size(self, volume, external_ref): """Get the size of the existing volume.""" lun_info = self._get_lun_info_by_ref(external_ref) size = float(lun_info.get('CAPACITY')) // constants.CAPACITY_UNIT remainder = float(lun_info.get('CAPACITY')) % constants.CAPACITY_UNIT if int(remainder) > 0: msg = _("Volume size must be multiple of 1 GB.") raise exception.VolumeBackendAPIException(data=msg) return int(size) def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): snapshot_id = snapshot_info.get('ID') # Check whether the snapshot is normal. if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import snapshot %s to Cinder. " "Snapshot status is not normal" " or running status is not online.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': msg = _("Can't import snapshot %s to Cinder. " "Snapshot is exposed to initiator.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def _get_snapshot_info_by_ref(self, external_ref): LOG.debug("Get snapshot external_ref: %s.", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify snapshot source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_id = id or self.client.get_snapshot_id_by_name(name) if not snapshot_id: msg = _("Can't find snapshot on array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_info = self.client.get_snapshot_info(snapshot_id) return snapshot_info def manage_existing_snapshot(self, snapshot, existing_ref): snapshot_info = self._get_snapshot_info_by_ref(existing_ref) snapshot_id = snapshot_info.get('ID') volume = snapshot.get('volume') lun_id = volume.get('provider_location') if lun_id != snapshot_info.get('PARENTID'): msg = (_("Can't import snapshot %s to Cinder. " "Snapshot doesn't belong to volume."), snapshot_id) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check whether this snapshot can be imported. self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) # Rename the snapshot to make it manageable for Cinder. description = snapshot['id'] snapshot_name = huawei_utils.encode_name(snapshot['id']) self.client.rename_snapshot(snapshot_id, snapshot_name, description) if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: self.client.activate_snapshot(snapshot_id) LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", {'old_name': snapshot_info.get('NAME'), 'new_name': snapshot_name}) return {'provider_location': snapshot_id} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Get the size of the existing snapshot.""" snapshot_info = self._get_snapshot_info_by_ref(existing_ref) size = (float(snapshot_info.get('USERCAPACITY')) // constants.CAPACITY_UNIT) remainder = (float(snapshot_info.get('USERCAPACITY')) % constants.CAPACITY_UNIT) if int(remainder) > 0: msg = _("Snapshot size must be multiple of 1 GB.") raise exception.VolumeBackendAPIException(data=msg) return int(size) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" LOG.debug("Unmanage snapshot: %s.", snapshot['id']) snapshot_name = huawei_utils.encode_name(snapshot['id']) snapshot_id = self.client.get_snapshot_id_by_name(snapshot_name) if not snapshot_id: LOG.warning(_LW("Can't find snapshot on the array: %s."), snapshot_name) return new_name = 'unmged_' + snapshot_name LOG.debug("Rename snapshot %(snapshot_name)s to %(new_name)s.", {'snapshot_name': snapshot_name, 'new_name': new_name}) try: self.client.rename_snapshot(snapshot_id, new_name) except Exception: LOG.warning(_LW("Failed to rename snapshot %(snapshot_id)s, " "snapshot name on array is %(snapshot_name)s."), {'snapshot_id': snapshot['id'], 'snapshot_name': snapshot_name}) def _classify_volume(self, volumes): normal_volumes = [] replica_volumes = [] for v in volumes: volume_type = self._get_volume_type(v) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': replica_volumes.append(v) else: normal_volumes.append(v) return normal_volumes, replica_volumes def _failback_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v['id'] metadata = huawei_utils.get_volume_metadata(v) old_status = 'available' if 'old_status' in metadata: old_status = metadata['old_status'] del metadata['old_status'] v_update['updates'] = {'status': old_status, 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failback(self, volumes): if self.active_backend_id in ('', None): return 'default', [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failback(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failback_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = "" secondary_id = 'default' # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def _failover_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v['id'] metadata = huawei_utils.get_volume_metadata(v) metadata.update({'old_status': v['status']}) v_update['updates'] = {'status': 'error', 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failover(self, volumes): if self.active_backend_id not in ('', None): return self.replica_dev_conf['backend_id'], [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failover(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failover_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = self.replica_dev_conf['backend_id'] secondary_id = self.active_backend_id # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def failover_host(self, context, volumes, secondary_id=None): """Failover all volumes to secondary.""" if secondary_id == 'default': secondary_id, volumes_update = self._failback(volumes) elif (secondary_id == self.replica_dev_conf['backend_id'] or secondary_id is None): secondary_id, volumes_update = self._failover(volumes) else: msg = _("Invalid secondary id %s.") % secondary_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return secondary_id, volumes_update class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): """ISCSI driver for Huawei storage arrays. Version history: 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor storage 18000 driver 1.1.1 - Code refactor CHAP support Multiple pools support ISCSI multipath support SmartX support Volume migration support Volume retype support 2.0.0 - Rename to HuaweiISCSIDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiISCSIDriver 2.0.3 - Manage/unmanage snapshot support 2.0.5 - Replication V2 support """ VERSION = "2.0.5" def __init__(self, *args, **kwargs): super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'iSCSI' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) initiator_name = connector['initiator'] LOG.info(_LI( 'initiator name: %(initiator_name)s, ' 'LUN ID: %(lun_id)s.'), {'initiator_name': initiator_name, 'lun_id': lun_id}) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' 'target_ip: %(target_ip)s, ' 'portgroup_id: %(portgroup_id)s.'), {'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id},) # Create hostgroup if not exist. original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) # Add initiator to the host. self.client.ensure_initiator_added(initiator_name, host_id) hostgroup_id = self.client.add_host_to_hostgroup(host_id) # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, host_id, portgroup_id) hostlun_id = self.client.get_host_lun_id(host_id, lun_id) LOG.info(_LI("initialize_connection, host lun id is: %s."), hostlun_id) chapinfo = self.client.find_chap_info(self.configuration.iscsi_info, initiator_name) # Return iSCSI properties. properties = {} properties['target_discovered'] = False properties['volume_id'] = volume['id'] multipath = connector.get('multipath', False) hostlun_id = int(hostlun_id) if not multipath: properties['target_portal'] = ('%s:3260' % target_ips[0]) properties['target_iqn'] = iscsi_iqns[0] properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [iqn for iqn in iscsi_iqns] properties['target_portals'] = [ '%s:3260' % ip for ip in target_ips] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if chapinfo: chap_username, chap_password = chapinfo.split(';') properties['auth_method'] = 'CHAP' properties['auth_username'] = chap_username properties['auth_password'] = chap_password LOG.info(_LI("initialize_connection success. Return data: %s."), properties) return {'driver_volume_type': 'iscsi', 'data': properties} @utils.synchronized('huawei', external=True) def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) initiator_name = connector['initiator'] host_name = connector['host'] lungroup_id = None LOG.info(_LI( 'terminate_connection: initiator name: %(ini)s, ' 'LUN ID: %(lunid)s.'), {'ini': initiator_name, 'lunid': lun_id},) portgroup = None portgroup_id = None view_id = None left_lunnum = -1 for ini in self.configuration.iscsi_info: if ini['Name'] == initiator_name: for key in ini: if key == 'TargetPortGroup': portgroup = ini['TargetPortGroup'] break if portgroup: portgroup_id = self.client.get_tgt_port_group(portgroup) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) # Remove lun from lungroup. if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) # Remove portgroup from mapping view if no lun left in lungroup. if lungroup_id: left_lunnum = self.client.get_lunnum_from_lungroup(lungroup_id) if portgroup_id and view_id and (int(left_lunnum) <= 0): if self.client.is_portgroup_associated_to_view(view_id, portgroup_id): self.client.delete_portgroup_mapping_view(view_id, portgroup_id) if view_id and (int(left_lunnum) <= 0): self.client.remove_chap(initiator_name) if self.client.lungroup_associated(view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if self.client.is_initiator_associated_to_host(initiator_name): self.client.remove_iscsi_from_host(initiator_name) hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if self.client.hostgroup_associated(view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view(view_id, hostgroup_id) self.client.remove_host_from_hostgroup(hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) self.client.remove_host(host_id) self.client.delete_mapping_view(view_id) class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): """FC driver for Huawei OceanStor storage arrays. Version history: 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver 1.1.1 - Code refactor Multiple pools support SmartX support Volume migration support Volume retype support FC zone enhancement Volume hypermetro support 2.0.0 - Rename to HuaweiFCDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiFCDriver 2.0.3 - Manage/unmanage snapshot support 2.0.4 - Balanced FC port selection 2.0.5 - Replication V2 support """ VERSION = "2.0.5" def __init__(self, *args, **kwargs): super(HuaweiFCDriver, self).__init__(*args, **kwargs) self.fcsan = None def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'FC' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) wwns = connector['wwpns'] LOG.info(_LI( 'initialize_connection, initiator: %(wwpns)s,' ' LUN ID: %(lun_id)s.'), {'wwpns': wwns, 'lun_id': lun_id},) portg_id = None original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: # Use FC switch. zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.build_ini_targ_map(wwns, host_id, lun_id)) for ini in init_targ_map: self.client.ensure_fc_initiator_added(ini, host_id) else: # Not use FC switch. online_wwns_in_host = ( self.client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.client.get_online_free_wwns() for wwn in wwns: if (wwn not in online_wwns_in_host and wwn not in online_free_wwns): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) msg = _('No FC initiator can be added to host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for wwn in wwns: if wwn in online_free_wwns: self.client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.client.add_host_to_hostgroup(host_id) map_info = self.client.do_mapping(lun_id, hostgroup_id, host_id, portg_id) host_lun_id = self.client.get_host_lun_id(host_id, lun_id) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume['id'], 'initiator_target_map': init_targ_map, 'map_info': map_info}, } loc_tgt_wwn = fc_info['data']['target_wwn'] local_ini_tgt_map = fc_info['data']['initiator_target_map'] # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("initialize_connection, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) rmt_fc_info = hyperm.connect_volume_fc(volume, connector) rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) wwns = connector['wwpns'] for wwn in wwns: if (wwn in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn].extend( rmt_ini_tgt_map[wwn]) elif (wwn not in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn] = ( rmt_ini_tgt_map[wwn]) # else, do nothing loc_map_info = fc_info['data']['map_info'] rmt_map_info = rmt_fc_info['data']['map_info'] same_host_id = self._get_same_hostid(loc_map_info, rmt_map_info) self.client.change_hostlun_id(loc_map_info, same_host_id) hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) fc_info['data']['target_lun'] = same_host_id hyperm.rmt_client.logout() LOG.info(_LI("Return FC info is: %s."), fc_info) return fc_info def _get_same_hostid(self, loc_fc_info, rmt_fc_info): loc_aval_luns = loc_fc_info['aval_luns'] loc_aval_luns = json.loads(loc_aval_luns) rmt_aval_luns = rmt_fc_info['aval_luns'] rmt_aval_luns = json.loads(rmt_aval_luns) same_host_id = None for i in range(1, 512): if i in rmt_aval_luns and i in loc_aval_luns: same_host_id = i break LOG.info(_LI("The same hostid is: %s."), same_host_id) if not same_host_id: msg = _("Can't find the same host id from arrays.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return same_host_id @utils.synchronized('huawei', external=True) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) wwns = connector['wwpns'] host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info(_LI('terminate_connection: wwpns: %(wwns)s, ' 'LUN ID: %(lun_id)s.'), {'wwns': wwns, 'lun_id': lun_id}) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) else: LOG.warning(_LW("Can't find lun on the array.")) if lungroup_id: left_lunnum = self.client.get_lunnum_from_lungroup(lungroup_id) if int(left_lunnum) > 0: fc_info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( wwns, host_id) if lungroup_id: if view_id and self.client.lungroup_associated( view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if portg_id: if view_id and self.client.is_portgroup_associated_to_view( view_id, portg_id): self.client.delete_portgroup_mapping_view(view_id, portg_id) self.client.delete_portgroup(portg_id) if host_id: hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if view_id and self.client.hostgroup_associated( view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view( view_id, hostgroup_id) self.client.remove_host_from_hostgroup( hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) if not self.client.check_fc_initiators_exist_in_host( host_id): self.client.remove_host(host_id) if view_id: self.client.delete_mapping_view(view_id) # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("Detach Volume, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) hyperm.disconnect_volume_fc(volume, connector) LOG.info(_LI("terminate_connection, return data is: %s."), fc_info) return fc_info def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): # Get tgt_port_wwns and init_targ_map to remove zone. portg_id = None if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.get_init_targ_map(wwns, host_id)) else: (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Remove the initiators from host if need. if host_id: fc_initiators = self.client.get_host_fc_initiators(host_id) for wwn in wwns: if wwn in fc_initiators: self.client.remove_fc_from_host(wwn) info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id cinder-8.0.0/cinder/volume/drivers/huawei/hypermetro.py0000664000567000056710000002557712701406250024424 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LI, _LW from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils LOG = logging.getLogger(__name__) class HuaweiHyperMetro(object): def __init__(self, client, rmt_client, configuration): self.client = client self.rmt_client = rmt_client self.configuration = configuration def create_hypermetro(self, local_lun_id, lun_params): """Create hypermetro.""" try: # Get the remote pool info. config_pool = self.configuration.metro_storage_pools remote_pool = self.rmt_client.get_all_pools() pool = self.rmt_client.get_pool_info(config_pool, remote_pool) if not pool: err_msg = _("Remote pool cannot be found.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) # Create remote lun. lun_params['PARENTID'] = pool['ID'] remotelun_info = self.rmt_client.create_lun(lun_params) remote_lun_id = remotelun_info['ID'] # Get hypermetro domain. try: domain_name = self.configuration.metro_domain_name domain_id = self.rmt_client.get_hyper_domain_id(domain_name) self._wait_volume_ready(remote_lun_id) hypermetro = self._create_hypermetro_pair(domain_id, local_lun_id, remote_lun_id) LOG.info(_LI("Hypermetro id: %(metro_id)s. " "Remote lun id: %(remote_lun_id)s."), {'metro_id': hypermetro['ID'], 'remote_lun_id': remote_lun_id}) return {'hypermetro_id': hypermetro['ID'], 'remote_lun_id': remote_lun_id} except exception.VolumeBackendAPIException as err: self.rmt_client.delete_lun(remote_lun_id) msg = _('Create hypermetro error. %s.') % err raise exception.VolumeBackendAPIException(data=msg) except exception.VolumeBackendAPIException: raise def delete_hypermetro(self, volume): """Delete hypermetro.""" metadata = huawei_utils.get_volume_metadata(volume) metro_id = metadata['hypermetro_id'] remote_lun_id = metadata['remote_lun_id'] if metro_id: exst_flag = self.client.check_hypermetro_exist(metro_id) if exst_flag: metro_info = self.client.get_hypermetro_by_id(metro_id) metro_status = int(metro_info['data']['RUNNINGSTATUS']) LOG.debug("Hypermetro status is: %s.", metro_status) if constants.HYPERMETRO_RUNNSTATUS_STOP != metro_status: self.client.stop_hypermetro(metro_id) # Delete hypermetro self.client.delete_hypermetro(metro_id) # Delete remote lun. if remote_lun_id and self.rmt_client.check_lun_exist(remote_lun_id): self.rmt_client.delete_lun(remote_lun_id) def _create_hypermetro_pair(self, domain_id, lun_id, remote_lun_id): """Create a HyperMetroPair.""" hcp_param = {"DOMAINID": domain_id, "HCRESOURCETYPE": '1', "ISFIRSTSYNC": False, "LOCALOBJID": lun_id, "RECONVERYPOLICY": '1', "REMOTEOBJID": remote_lun_id, "SPEED": '2'} return self.client.create_hypermetro(hcp_param) def connect_volume_fc(self, volume, connector): """Create map between a volume and a host for FC.""" wwns = connector['wwpns'] volume_name = huawei_utils.encode_name(volume['id']) LOG.info(_LI( 'initialize_connection_fc, initiator: %(wwpns)s,' ' volume name: %(volume)s.'), {'wwpns': wwns, 'volume': volume_name}) metadata = huawei_utils.get_volume_metadata(volume) lun_id = metadata['remote_lun_id'] if lun_id is None: lun_id = self.rmt_client.get_lun_id_by_name(volume_name) if lun_id is None: msg = _("Can't get volume id. Volume name: %s.") % volume_name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) # Create hostgroup if not exist. host_id = self.rmt_client.add_host_with_check( host_name, original_host_name) online_wwns_in_host = ( self.rmt_client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.rmt_client.get_online_free_wwns() for wwn in wwns: if (wwn not in online_wwns_in_host and wwn not in online_free_wwns): wwns_in_host = ( self.rmt_client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.rmt_client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host): self.rmt_client.remove_host(host_id) msg = _('Can not add FC port to host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for wwn in wwns: if wwn in online_free_wwns: self.rmt_client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.rmt_client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.rmt_client.add_host_to_hostgroup(host_id) map_info = self.rmt_client.do_mapping(lun_id, hostgroup_id, host_id) if not map_info: msg = _('Map info is None due to array version ' 'not supporting hypermetro.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) host_lun_id = self.rmt_client.get_host_lun_id(host_id, lun_id) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume['id'], 'initiator_target_map': init_targ_map, 'map_info': map_info}, } LOG.info(_LI('Remote return FC info is: %s.'), fc_info) return fc_info def disconnect_volume_fc(self, volume, connector): """Delete map between a volume and a host for FC.""" wwns = connector['wwpns'] volume_name = huawei_utils.encode_name(volume['id']) metadata = huawei_utils.get_volume_metadata(volume) lun_id = metadata['remote_lun_id'] host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info(_LI('terminate_connection_fc: volume name: %(volume)s, ' 'wwpns: %(wwns)s, ' 'lun_id: %(lunid)s.'), {'volume': volume_name, 'wwns': wwns, 'lunid': lun_id},) host_name = huawei_utils.encode_host_name(host_name) hostid = self.rmt_client.get_host_id_by_name(host_name) if hostid: mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid view_id = self.rmt_client.find_mapping_view( mapping_view_name) if view_id: lungroup_id = self.rmt_client.find_lungroup_from_map( view_id) if lun_id and self.rmt_client.check_lun_exist(lun_id): if lungroup_id: lungroup_ids = self.rmt_client.get_lungroupids_by_lunid( lun_id) if lungroup_id in lungroup_ids: self.rmt_client.remove_lun_from_lungroup( lungroup_id, lun_id) else: LOG.warning(_LW("Lun is not in lungroup. " "Lun id: %(lun_id)s, " "lungroup id: %(lungroup_id)s"), {"lun_id": lun_id, "lungroup_id": lungroup_id}) (tgt_port_wwns, init_targ_map) = ( self.rmt_client.get_init_targ_map(wwns)) hostid = self.rmt_client.get_host_id_by_name(host_name) if hostid: mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid view_id = self.rmt_client.find_mapping_view( mapping_view_name) if view_id: lungroup_id = self.rmt_client.find_lungroup_from_map( view_id) if lungroup_id: left_lunnum = self.rmt_client.get_lunnum_from_lungroup( lungroup_id) if int(left_lunnum) > 0: info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}, } return info def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.rmt_client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def retype(self, volume, new_type): return False def get_hypermetro_stats(self, hypermetro_id): pass cinder-8.0.0/cinder/volume/drivers/huawei/replication.py0000664000567000056710000005544012701406250024527 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LW, _LE from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils LOG = logging.getLogger(__name__) class AbsReplicaOp(object): def __init__(self, client): self.client = client def create(self, **kwargs): pass def delete(self, replica_id): pass def protect_second(self, replica_id): pass def unprotect_second(self, replica_id): pass def sync(self, replica_id): pass def split(self, replica_id): pass def switch(self, replica_id): pass def is_primary(self, replica_info): flag = replica_info.get('ISPRIMARY') if flag and flag.lower() == 'true': return True return False def get_replica_info(self, replica_id): return {} def _is_status(self, status_key, status, replica_info): if type(status) in (list, tuple): return replica_info.get(status_key, '') in status if type(status) is str: return replica_info.get(status_key, '') == status return False def is_running_status(self, status, replica_info): return self._is_status(constants.REPLICA_RUNNING_STATUS_KEY, status, replica_info) def is_health_status(self, status, replica_info): return self._is_status(constants.REPLICA_HEALTH_STATUS_KEY, status, replica_info) class PairOp(AbsReplicaOp): def create(self, local_lun_id, rmt_lun_id, rmt_dev_id, rmt_dev_name, replica_model, speed=constants.REPLICA_SPEED, period=constants.REPLICA_PERIOD, **kwargs): super(PairOp, self).create(**kwargs) params = { "LOCALRESID": local_lun_id, "LOCALRESTYPE": '11', "REMOTEDEVICEID": rmt_dev_id, "REMOTEDEVICENAME": rmt_dev_name, "REMOTERESID": rmt_lun_id, "REPLICATIONMODEL": replica_model, # recovery policy. 1: auto, 2: manual "RECOVERYPOLICY": '2', "SPEED": speed, } if replica_model == constants.REPLICA_ASYNC_MODEL: # Synchronize type values: # 1, manual # 2, timed wait when synchronization begins # 3, timed wait when synchronization ends params['SYNCHRONIZETYPE'] = '2' params['TIMINGVAL'] = period try: pair_info = self.client.create_pair(params) except Exception as err: msg = _('Create replication pair failed. Error: %s.') % err LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pair_info def split(self, pair_id): self.client.split_pair(pair_id) def delete(self, pair_id, force=False): self.client.delete_pair(pair_id, force) def protect_second(self, pair_id): self.client.set_pair_second_access(pair_id, constants.REPLICA_SECOND_RO) def unprotect_second(self, pair_id): self.client.set_pair_second_access(pair_id, constants.REPLICA_SECOND_RW) def sync(self, pair_id): self.client.sync_pair(pair_id) def switch(self, pair_id): self.client.switch_pair(pair_id) def get_replica_info(self, pair_id): return self.client.get_pair_by_id(pair_id) class CGOp(AbsReplicaOp): pass class ReplicaCommonDriver(object): def __init__(self, conf, replica_op): self.conf = conf self.op = replica_op def protect_second(self, replica_id): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RO: return self.op.protect_second(replica_id) self.wait_second_access(replica_id, constants.REPLICA_SECOND_RO) def unprotect_second(self, replica_id): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RW: return self.op.unprotect_second(replica_id) self.wait_second_access(replica_id, constants.REPLICA_SECOND_RW) def sync(self, replica_id, wait_complete=False): self.protect_second(replica_id) expect_status = (constants.REPLICA_RUNNING_STATUS_NORMAL, constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) info = self.op.get_replica_info(replica_id) # When running status is synchronizing or normal, # it's not necessary to do synchronize again. if (info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL and self.op.is_running_status(expect_status, info)): return self.op.sync(replica_id) self.wait_expect_state(replica_id, expect_status) if wait_complete: self.wait_replica_ready(replica_id) def split(self, replica_id): running_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, constants.REPLICA_RUNNING_STATUS_INVALID) info = self.op.get_replica_info(replica_id) if self.op.is_running_status(running_status, info): return try: self.op.split(replica_id) except Exception as err: LOG.warning(_LW('Split replication exception: %s.'), err) try: self.wait_expect_state(replica_id, running_status) except Exception as err: msg = _('Split replication failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def enable(self, replica_id, wait_sync_complete=False): info = self.op.get_replica_info(replica_id) if not self.op.is_primary(info): self.switch(replica_id) self.sync(replica_id) return None def switch(self, replica_id): self.split(replica_id) self.unprotect_second(replica_id) self.op.switch(replica_id) # Wait to be primary def _wait_switch_to_primary(): info = self.op.get_replica_info(replica_id) if self.op.is_primary(info): return True return False interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_wait_switch_to_primary, interval, timeout) def failover(self, replica_id): """Failover replication. Purpose: 1. Split replication. 2. Set secondary access read & write. """ info = self.op.get_replica_info(replica_id) if self.op.is_primary(info): msg = _('We should not do switch over on primary array.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) sync_status_set = (constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) if self.op.is_running_status(sync_status_set, info): self.wait_replica_ready(replica_id) self.split(replica_id) self.op.unprotect_second(replica_id) def wait_replica_ready(self, replica_id, interval=None, timeout=None): LOG.debug('Wait synchronize complete.') running_status_normal = (constants.REPLICA_RUNNING_STATUS_NORMAL, constants.REPLICA_RUNNING_STATUS_SYNCED) running_status_sync = (constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) health_status_normal = constants.REPLICA_HEALTH_STATUS_NORMAL def _replica_ready(): info = self.op.get_replica_info(replica_id) if (self.op.is_running_status(running_status_normal, info) and self.op.is_health_status(health_status_normal, info)): return True if not self.op.is_running_status(running_status_sync, info): msg = (_('Wait synchronize failed. Running status: %s.') % info.get(constants.REPLICA_RUNNING_STATUS_KEY)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return False if not interval: interval = constants.DEFAULT_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_WAIT_TIMEOUT huawei_utils.wait_for_condition(_replica_ready, interval, timeout) def wait_second_access(self, replica_id, access_level): def _check_access(): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == access_level: return True return False interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_check_access, interval, timeout) def wait_expect_state(self, replica_id, running_status, health_status=None, interval=None, timeout=None): def _check_state(): info = self.op.get_replica_info(replica_id) if self.op.is_running_status(running_status, info): if (not health_status or self.op.is_health_status(health_status, info)): return True return False if not interval: interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_check_state, interval, timeout) def get_replication_driver_data(volume): if volume.get('replication_driver_data'): return json.loads(volume['replication_driver_data']) return {} def to_string(dict_data): if dict_data: return json.dumps(dict_data) return '' class ReplicaPairManager(object): def __init__(self, local_client, rmt_client, conf): self.local_client = local_client self.rmt_client = rmt_client self.conf = conf # Now just support one remote pool. self.rmt_pool = self.rmt_client.storage_pools[0] self.local_op = PairOp(self.local_client) self.local_driver = ReplicaCommonDriver(self.conf, self.local_op) self.rmt_op = PairOp(self.rmt_client) self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op) def try_get_remote_wwn(self): try: info = self.rmt_client.get_array_info() return info.get('wwn') except Exception as err: LOG.warning(_LW('Get remote array wwn failed. Error: %s.'), err) return None def get_remote_device_by_wwn(self, wwn): devices = {} try: devices = self.local_client.get_remote_devices() except Exception as err: LOG.warning(_LW('Get remote devices failed. Error: %s.'), err) for device in devices: if device.get('WWN') == wwn: return device return {} def check_remote_available(self): # We get device wwn in every check time. # If remote array changed, we can run normally. wwn = self.try_get_remote_wwn() if not wwn: return False device = self.get_remote_device_by_wwn(wwn) # Check remote device is available to use. # If array type is replication, 'ARRAYTYPE' == '1'. # If health status is normal, 'HEALTHSTATUS' == '1'. if (device and device.get('ARRAYTYPE') == '1' and device.get('HEALTHSTATUS') == '1' and device.get('RUNNINGSTATUS') == constants.STATUS_RUNNING): return True return False def update_replica_capability(self, stats): is_rmt_dev_available = self.check_remote_available() if not is_rmt_dev_available: LOG.warning(_LW('Remote device is unavailable.')) return stats for pool in stats['pools']: pool['replication_enabled'] = True pool['replication_type'] = ['sync', 'async'] return stats def get_rmt_dev_info(self): wwn = self.try_get_remote_wwn() if not wwn: return None, None device = self.get_remote_device_by_wwn(wwn) if not device: return None, None return device.get('ID'), device.get('NAME') def build_rmt_lun_params(self, local_lun_info): params = { 'TYPE': '11', 'NAME': local_lun_info['NAME'], 'PARENTTYPE': '216', 'PARENTID': self.rmt_client.get_pool_id(self.rmt_pool), 'DESCRIPTION': local_lun_info['DESCRIPTION'], 'ALLOCTYPE': local_lun_info['ALLOCTYPE'], 'CAPACITY': local_lun_info['CAPACITY'], 'WRITEPOLICY': self.conf.lun_write_type, 'MIRRORPOLICY': self.conf.lun_mirror_switch, 'PREFETCHPOLICY': self.conf.lun_prefetch_type, 'PREFETCHVALUE': self.conf.lun_prefetch_value, 'DATATRANSFERPOLICY': self.conf.lun_policy, 'READCACHEPOLICY': self.conf.lun_read_cache_policy, 'WRITECACHEPOLICY': self.conf.lun_write_cache_policy, } LOG.debug('Remote lun params: %s.', params) return params def wait_volume_online(self, client, lun_info, interval=None, timeout=None): online_status = constants.STATUS_VOLUME_READY if lun_info.get('RUNNINGSTATUS') == online_status: return lun_id = lun_info['ID'] def _wait_online(): info = client.get_lun_info(lun_id) return info.get('RUNNINGSTATUS') == online_status if not interval: interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_wait_online, interval, timeout) def create_rmt_lun(self, local_lun_info): # Create on rmt array. If failed, raise exception. lun_params = self.build_rmt_lun_params(local_lun_info) lun_info = self.rmt_client.create_lun(lun_params) try: self.wait_volume_online(self.rmt_client, lun_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.rmt_client.delete_lun(lun_info['ID']) return lun_info def create_replica(self, local_lun_info, replica_model): """Create remote LUN and replication pair. Purpose: 1. create remote lun 2. create replication pair 3. enable replication pair """ LOG.debug(('Create replication, local lun info: %(info)s, ' 'replication model: %(model)s.'), {'info': local_lun_info, 'model': replica_model}) local_lun_id = local_lun_info['ID'] self.wait_volume_online(self.local_client, local_lun_info) # step1, create remote lun rmt_lun_info = self.create_rmt_lun(local_lun_info) rmt_lun_id = rmt_lun_info['ID'] # step2, get remote device info rmt_dev_id, rmt_dev_name = self.get_rmt_dev_info() if not rmt_lun_id or not rmt_dev_name: self._delete_rmt_lun(rmt_lun_id) msg = _('Get remote device info failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # step3, create replication pair try: pair_info = self.local_op.create(local_lun_id, rmt_lun_id, rmt_dev_id, rmt_dev_name, replica_model) pair_id = pair_info['ID'] except Exception as err: with excutils.save_and_reraise_exception(): LOG.error(_LE('Create pair failed. Error: %s.'), err) self._delete_rmt_lun(rmt_lun_id) # step4, start sync manually. If replication type is sync, # then wait for sync complete. wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL) try: self.local_driver.sync(pair_id, wait_complete) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error(_LE('Start synchronization failed. Error: %s.'), err) self._delete_pair(pair_id) self._delete_rmt_lun(rmt_lun_id) model_update = {} driver_data = {'pair_id': pair_id, 'rmt_lun_id': rmt_lun_id} model_update['replication_driver_data'] = to_string(driver_data) model_update['replication_status'] = 'available' LOG.debug('Create replication, return info: %s.', model_update) return model_update def _delete_pair(self, pair_id): if (not pair_id or not self.local_client.check_pair_exist(pair_id)): return self.local_driver.split(pair_id) self.local_op.delete(pair_id) def _delete_rmt_lun(self, lun_id): if lun_id and self.rmt_client.check_lun_exist(lun_id): self.rmt_client.delete_lun(lun_id) def delete_replica(self, volume): """Delete replication pair and remote lun. Purpose: 1. delete replication pair 2. delete remote_lun """ LOG.debug('Delete replication, volume: %s.', volume['id']) info = get_replication_driver_data(volume) pair_id = info.get('pair_id') if pair_id: self._delete_pair(pair_id) # Delete remote_lun rmt_lun_id = info.get('rmt_lun_id') if rmt_lun_id: self._delete_rmt_lun(rmt_lun_id) def failback(self, volumes): """Failover volumes back to primary backend. The main steps: 1. Switch the role of replication pairs. 2. Copy the second LUN data back to primary LUN. 3. Split replication pairs. 4. Switch the role of replication pairs. 5. Enable replications. """ volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v['id'] drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: LOG.warning(_LW("No pair id in volume %s."), v['id']) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: LOG.warning(_LW("No remote lun id in volume %s."), v['id']) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue # Switch replication pair role, and start synchronize. self.local_driver.enable(pair_id) # Wait for synchronize complete. self.local_driver.wait_replica_ready(pair_id) # Split replication pair again self.rmt_driver.failover(pair_id) # Switch replication pair role, and start synchronize. self.rmt_driver.enable(pair_id) lun_info = self.rmt_client.get_lun_info(rmt_lun_id) admin_metadata = huawei_utils.get_admin_metadata(v) admin_metadata.update({'huawei_lun_wwn': lun_info['WWN']}) new_drv_data = {'pair_id': pair_id, 'rmt_lun_id': v['provider_location']} new_drv_data = to_string(new_drv_data) v_update['updates'] = {'provider_location': rmt_lun_id, 'replication_status': 'available', 'replication_driver_data': new_drv_data, 'admin_metadata': admin_metadata} volumes_update.append(v_update) return volumes_update def failover(self, volumes): """Failover volumes back to secondary array. Split the replication pairs and make the secondary LUNs R&W. """ volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v['id'] drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: LOG.warning(_LW("No pair id in volume %s."), v['id']) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: LOG.warning(_LW("No remote lun id in volume %s."), v['id']) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue self.rmt_driver.failover(pair_id) lun_info = self.rmt_client.get_lun_info(rmt_lun_id) admin_metadata = huawei_utils.get_admin_metadata(v) admin_metadata.update({'huawei_lun_wwn': lun_info['WWN']}) new_drv_data = {'pair_id': pair_id, 'rmt_lun_id': v['provider_location']} new_drv_data = to_string(new_drv_data) v_update['updates'] = {'provider_location': rmt_lun_id, 'replication_status': 'failed-over', 'replication_driver_data': new_drv_data, 'admin_metadata': admin_metadata} volumes_update.append(v_update) return volumes_update def get_replication_opts(opts): if opts.get('replication_type') == 'sync': opts['replication_type'] = constants.REPLICA_SYNC_MODEL else: opts['replication_type'] = constants.REPLICA_ASYNC_MODEL return opts cinder-8.0.0/cinder/volume/drivers/huawei/constants.py0000664000567000056710000000642212701406250024226 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_HEALTH = '1' STATUS_ACTIVE = '43' STATUS_RUNNING = '10' STATUS_VOLUME_READY = '27' STATUS_LUNCOPY_READY = '40' STATUS_QOS_ACTIVE = '2' BLOCK_STORAGE_POOL_TYPE = '1' FILE_SYSTEM_POOL_TYPE = '2' HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' LUNGROUP_PREFIX = 'OpenStack_LunGroup_' MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' PORTGROUP_PREFIX = 'OpenStack_PortGroup_' QOS_NAME_PREFIX = 'OpenStack_' PORTGROUP_DESCRIP_PREFIX = "Please do NOT modify this. Engine ID: " ARRAY_VERSION = 'V300R003C00' FC_PORT_CONNECTED = '10' FC_INIT_ONLINE = '27' FC_PORT_MODE_FABRIC = '0' CAPACITY_UNIT = 1024.0 * 1024.0 * 2 DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 DEFAULT_WAIT_INTERVAL = 5 MIGRATION_WAIT_INTERVAL = 5 MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 4 ERROR_VOLUME_NOT_EXIST = 1077939726 RELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST] HYPERMETRO_RUNNSTATUS_STOP = 41 HYPERMETRO_RUNNSTATUS_NORMAL = 1 NO_SPLITMIRROR_LICENSE = 1077950233 NO_MIGRATION_LICENSE = 1073806606 THICK_LUNTYPE = 0 THIN_LUNTYPE = 1 MAX_HOSTNAME_LENGTH = 31 MAX_VOL_DESCRIPTION = 170 PORT_NUM_PER_CONTR = 2 OS_TYPE = {'Linux': '0', 'Windows': '1', 'Solaris': '2', 'HP-UX': '3', 'AIX': '4', 'XenServer': '5', 'Mac OS X': '6', 'VMware ESX': '7'} HUAWEI_VALID_KEYS = ['maxIOPS', 'minIOPS', 'minBandWidth', 'maxBandWidth', 'latency', 'IOType'] QOS_KEYS = [i.upper() for i in HUAWEI_VALID_KEYS] EXTRA_QOS_KEYS = ['MAXIOPS', 'MINIOPS', 'MINBANDWIDTH', 'MAXBANDWIDTH'] LOWER_LIMIT_KEYS = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] UPPER_LIMIT_KEYS = ['MAXIOPS', 'MAXBANDWIDTH'] MAX_LUN_NUM_IN_QOS = 64 DEFAULT_REPLICA_WAIT_INTERVAL = 1 DEFAULT_REPLICA_WAIT_TIMEOUT = 10 REPLICA_SYNC_MODEL = '1' REPLICA_ASYNC_MODEL = '2' REPLICA_SPEED = '2' REPLICA_PERIOD = '3600' REPLICA_SECOND_RO = '2' REPLICA_SECOND_RW = '3' REPLICA_RUNNING_STATUS_KEY = 'RUNNINGSTATUS' REPLICA_RUNNING_STATUS_INITIAL_SYNC = '21' REPLICA_RUNNING_STATUS_SYNC = '23' REPLICA_RUNNING_STATUS_SYNCED = '24' REPLICA_RUNNING_STATUS_NORMAL = '1' REPLICA_RUNNING_STATUS_SPLIT = '26' REPLICA_RUNNING_STATUS_INVALID = '35' REPLICA_HEALTH_STATUS_KEY = 'HEALTHSTATUS' REPLICA_HEALTH_STATUS_NORMAL = '1' REPLICA_LOCAL_DATA_STATUS_KEY = 'PRIRESDATASTATUS' REPLICA_REMOTE_DATA_STATUS_KEY = 'SECRESDATASTATUS' REPLICA_DATA_SYNC_KEY = 'ISDATASYNC' REPLICA_DATA_STATUS_SYNCED = '1' REPLICA_DATA_STATUS_COMPLETE = '2' REPLICA_DATA_STATUS_INCOMPLETE = '3' VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' cinder-8.0.0/cinder/volume/drivers/huawei/huawei_conf.py0000664000567000056710000002452112701406250024501 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Set Huawei private configuration into Configuration object. For conveniently get private configuration. We parse Huawei config file and set every property into Configuration object as an attribute. """ import base64 import six from xml.etree import ElementTree as ET from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class HuaweiConf(object): def __init__(self, conf): self.conf = conf def _encode_authentication(self): need_encode = False tree = ET.parse(self.conf.cinder_huawei_conf_file) xml_root = tree.getroot() name_node = xml_root.find('Storage/UserName') pwd_node = xml_root.find('Storage/UserPassword') if (name_node is not None and not name_node.text.startswith('!$$$')): name_node.text = '!$$$' + base64.b64encode(name_node.text) need_encode = True if (pwd_node is not None and not pwd_node.text.startswith('!$$$')): pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text) need_encode = True if need_encode: utils.execute('chmod', '600', self.conf.cinder_huawei_conf_file, run_as_root=True) tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8') def update_config_value(self): self._encode_authentication() set_attr_funcs = (self._san_address, self._san_user, self._san_password, self._san_product, self._san_protocol, self._lun_type, self._lun_ready_wait_interval, self._lun_copy_wait_interval, self._lun_timeout, self._lun_write_type, self._lun_mirror_switch, self._lun_prefetch, self._lun_policy, self._lun_read_cache_policy, self._lun_write_cache_policy, self._storage_pools, self._iscsi_default_target_ip, self._iscsi_info,) tree = ET.parse(self.conf.cinder_huawei_conf_file) xml_root = tree.getroot() for f in set_attr_funcs: f(xml_root) def _san_address(self, xml_root): text = xml_root.findtext('Storage/RestURL') if not text: msg = _("RestURL is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) addrs = text.split(';') addrs = list(set([x.strip() for x in addrs if x.strip()])) setattr(self.conf, 'san_address', addrs) def _san_user(self, xml_root): text = xml_root.findtext('Storage/UserName') if not text: msg = _("UserName is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) user = base64.b64decode(text[4:]) setattr(self.conf, 'san_user', user) def _san_password(self, xml_root): text = xml_root.findtext('Storage/UserPassword') if not text: msg = _("UserPassword is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) pwd = base64.b64decode(text[4:]) setattr(self.conf, 'san_password', pwd) def _san_product(self, xml_root): text = xml_root.findtext('Storage/Product') if not text: msg = _("SAN product is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) product = text.strip() setattr(self.conf, 'san_product', product) def _san_protocol(self, xml_root): text = xml_root.findtext('Storage/Protocol') if not text: msg = _("SAN protocol is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) protocol = text.strip() setattr(self.conf, 'san_protocol', protocol) def _lun_type(self, xml_root): lun_type = constants.THICK_LUNTYPE text = xml_root.findtext('LUN/LUNType') if text: lun_type = text.strip() if lun_type == 'Thick': lun_type = constants.THICK_LUNTYPE elif lun_type == 'Thin': lun_type = constants.THIN_LUNTYPE else: msg = (_("Invalid lun type %s is configured.") % lun_type) LOG.exception(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'lun_type', lun_type) def _lun_ready_wait_interval(self, xml_root): text = xml_root.findtext('LUN/LUNReadyWaitInterval') interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL setattr(self.conf, 'lun_ready_wait_interval', int(interval)) def _lun_copy_wait_interval(self, xml_root): text = xml_root.findtext('LUN/LUNcopyWaitInterval') interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL setattr(self.conf, 'lun_copy_wait_interval', int(interval)) def _lun_timeout(self, xml_root): text = xml_root.findtext('LUN/Timeout') interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT setattr(self.conf, 'lun_timeout', int(interval)) def _lun_write_type(self, xml_root): text = xml_root.findtext('LUN/WriteType') write_type = text.strip() if text else '1' setattr(self.conf, 'lun_write_type', write_type) def _lun_mirror_switch(self, xml_root): text = xml_root.findtext('LUN/MirrorSwitch') mirror_switch = text.strip() if text else '1' setattr(self.conf, 'lun_mirror_switch', mirror_switch) def _lun_prefetch(self, xml_root): prefetch_type = '3' prefetch_value = '0' node = xml_root.find('LUN/Prefetch') if (node is not None and node.attrib['Type'] and node.attrib['Value']): prefetch_type = node.attrib['Type'].strip() if prefetch_type not in ['0', '1', '2', '3']: msg = (_( "Invalid prefetch type '%s' is configured. " "PrefetchType must be in 0,1,2,3.") % prefetch_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) prefetch_value = node.attrib['Value'].strip() factor = {'1': 2} factor = int(factor.get(prefetch_type, '1')) prefetch_value = int(prefetch_value) * factor prefetch_value = six.text_type(prefetch_value) setattr(self.conf, 'lun_prefetch_type', prefetch_type) setattr(self.conf, 'lun_prefetch_value', prefetch_value) def _lun_policy(self, xml_root): setattr(self.conf, 'lun_policy', '0') def _lun_read_cache_policy(self, xml_root): setattr(self.conf, 'lun_read_cache_policy', '2') def _lun_write_cache_policy(self, xml_root): setattr(self.conf, 'lun_write_cache_policy', '5') def _storage_pools(self, xml_root): nodes = xml_root.findall('LUN/StoragePool') if not nodes: msg = _('Storage pool is not configured.') LOG.error(msg) raise exception.InvalidInput(reason=msg) texts = [x.text for x in nodes] merged_text = ';'.join(texts) pools = set(x.strip() for x in merged_text.split(';') if x.strip()) if not pools: msg = _('Invalid storage pool is configured.') LOG.error(msg) raise exception.InvalidInput(msg) setattr(self.conf, 'storage_pools', list(pools)) def _iscsi_default_target_ip(self, xml_root): text = xml_root.findtext('iSCSI/DefaultTargetIP') target_ip = text.split() if text else [] setattr(self.conf, 'iscsi_default_target_ip', target_ip) def _iscsi_info(self, xml_root): nodes = xml_root.findall('iSCSI/Initiator') if nodes is None: setattr(self.conf, 'iscsi_info', []) return iscsi_info = [] for node in nodes: props = {} for item in node.items(): props[item[0].strip()] = item[1].strip() iscsi_info.append(props) setattr(self.conf, 'iscsi_info', iscsi_info) def get_replication_devices(self): devs = self.conf.safe_get('replication_device') if not devs: return [] devs_config = [] for dev in devs: dev_config = {} dev_config['backend_id'] = dev['backend_id'] dev_config['san_address'] = dev['san_address'].split(';') dev_config['san_user'] = dev['san_user'] dev_config['san_password'] = dev['san_password'] dev_config['storage_pool'] = dev['storage_pool'].split(';') dev_config['iscsi_info'] = [] dev_config['iscsi_default_target_ip'] = ( dev['iscsi_default_target_ip'].split(';') if 'iscsi_default_target_ip' in dev else []) devs_config.append(dev_config) return devs_config def get_local_device(self): dev_config = { 'backend_id': "default", 'san_address': self.conf.san_address, 'san_user': self.conf.san_user, 'san_password': self.conf.san_password, 'storage_pool': self.conf.storage_pools, 'iscsi_info': self.conf.iscsi_info, 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, } return dev_config cinder-8.0.0/cinder/volume/drivers/huawei/__init__.py0000664000567000056710000000000012701406250023733 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/huawei/huawei_utils.py0000664000567000056710000000663012701406250024715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import six import time import uuid from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) def encode_name(name): uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes) vol_encoded = vol_encoded.decode("utf-8") # Make it compatible to py3. newuuid = vol_encoded.replace("=", "") return newuuid def encode_host_name(name): if name and (len(name) > constants.MAX_HOSTNAME_LENGTH): name = six.text_type(hash(name)) return name def wait_for_condition(func, interval, timeout): start_time = time.time() def _inner(): try: res = func() except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) if res: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = (_('wait_for_condition: %s timed out.') % func.__name__) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def get_volume_size(volume): """Calculate the volume size. We should divide the given volume size by 512 for the 18000 system calculates volume size with sectors, which is 512 bytes. """ volume_size = units.Gi / 512 # 1G if int(volume['size']) != 0: volume_size = int(volume['size']) * units.Gi / 512 return volume_size def get_volume_metadata(volume): if type(volume) is objects.Volume: return volume.metadata if 'volume_metadata' in volume: metadata = volume.get('volume_metadata') return {item['key']: item['value'] for item in metadata} return {} def get_admin_metadata(volume): admin_metadata = {} if 'admin_metadata' in volume: admin_metadata = volume['admin_metadata'] elif 'volume_admin_metadata' in volume: metadata = volume.get('volume_admin_metadata', []) admin_metadata = {item['key']: item['value'] for item in metadata} LOG.debug("Volume ID: %(id)s, admin_metadata: %(admin_metadata)s.", {"id": volume['id'], "admin_metadata": admin_metadata}) return admin_metadata def get_snapshot_metadata_value(snapshot): if type(snapshot) is objects.Snapshot: return snapshot.metadata if 'snapshot_metadata' in snapshot: metadata = snapshot.get('snapshot_metadata') return {item['key']: item['value'] for item in metadata} return {} cinder-8.0.0/cinder/volume/drivers/huawei/smartx.py0000664000567000056710000002104012701406250023521 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from cinder import context from cinder import exception from cinder.i18n import _, _LI from cinder.volume.drivers.huawei import constants from cinder.volume import qos_specs LOG = logging.getLogger(__name__) class SmartQos(object): def __init__(self, client): self.client = client @staticmethod def get_qos_by_volume_type(volume_type): # We prefer the qos_specs association # and override any existing extra-specs settings # if present. if not volume_type: return {} qos_specs_id = volume_type.get('qos_specs_id') if not qos_specs_id: return {} qos = {} io_type_flag = None ctxt = context.get_admin_context() kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] LOG.info(_LI('The QoS sepcs is: %s.'), kvs) for k, v in kvs.items(): if k not in constants.HUAWEI_VALID_KEYS: continue if k != 'IOType' and int(v) <= 0: msg = _('QoS config is wrong. %s must > 0.') % k LOG.error(msg) raise exception.InvalidInput(reason=msg) if k == 'IOType': if v not in ['0', '1', '2']: msg = _('Illegal value specified for IOTYPE: 0, 1, or 2.') LOG.error(msg) raise exception.InvalidInput(reason=msg) io_type_flag = 1 qos[k.upper()] = v else: qos[k.upper()] = v if not io_type_flag: msg = (_('QoS policy must specify for IOTYPE: 0, 1, or 2, ' 'QoS policy: %(qos_policy)s ') % {'qos_policy': qos}) LOG.error(msg) raise exception.InvalidInput(reason=msg) # QoS policy must specify for IOTYPE and another qos_specs. if len(qos) < 2: msg = (_('QoS policy must specify for IOTYPE and another ' 'qos_specs, QoS policy: %(qos_policy)s.') % {'qos_policy': qos}) LOG.error(msg) raise exception.InvalidInput(reason=msg) for upper_limit in constants.UPPER_LIMIT_KEYS: for lower_limit in constants.LOWER_LIMIT_KEYS: if upper_limit in qos and lower_limit in qos: msg = (_('QoS policy upper_limit and lower_limit ' 'conflict, QoS policy: %(qos_policy)s.') % {'qos_policy': qos}) LOG.error(msg) raise exception.InvalidInput(reason=msg) return qos def _is_high_priority(self, qos): """Check QoS priority.""" for key, value in qos.items(): if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): return True return False def add(self, qos, lun_id): policy_id = None try: # Check QoS priority. if self._is_high_priority(qos): self.client.change_lun_priority(lun_id) # Create QoS policy and activate it. version = self.client.find_array_version() if version >= constants.ARRAY_VERSION: (qos_id, lun_list) = self.client.find_available_qos(qos) if qos_id: self.client.add_lun_to_qos(qos_id, lun_id, lun_list) else: policy_id = self.client.create_qos_policy(qos, lun_id) self.client.activate_deactivate_qos(policy_id, True) else: policy_id = self.client.create_qos_policy(qos, lun_id) self.client.activate_deactivate_qos(policy_id, True) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): if policy_id is not None: self.client.delete_qos_policy(policy_id) def remove(self, qos_id, lun_id): qos_info = self.client.get_qos_info(qos_id) lun_list = self.client.get_lun_list_in_qos(qos_id, qos_info) if len(lun_list) <= 1: qos_status = qos_info['RUNNINGSTATUS'] # 2: Active status. if qos_status == constants.STATUS_QOS_ACTIVE: self.client.activate_deactivate_qos(qos_id, False) self.client.delete_qos_policy(qos_id) else: self.client.remove_lun_from_qos(lun_id, lun_list, qos_id) class SmartPartition(object): def __init__(self, client): self.client = client def add(self, opts, lun_id): if opts['smartpartition'] != 'true': return if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'smartpartition:partitionname in key.')) partition_id = self.client.get_partition_id_by_name( opts['partitionname']) if not partition_id: raise exception.InvalidInput( reason=(_('Can not find partition id by name %(name)s.') % {'name': opts['partitionname']})) self.client.add_lun_to_partition(lun_id, partition_id) class SmartCache(object): def __init__(self, client): self.client = client def add(self, opts, lun_id): if opts['smartcache'] != 'true': return if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'smartcache:cachename in key.')) cache_id = self.client.get_cache_id_by_name(opts['cachename']) if not cache_id: raise exception.InvalidInput( reason=(_('Can not find cache id by cache name %(name)s.') % {'name': opts['cachename']})) self.client.add_lun_to_cache(lun_id, cache_id) class SmartX(object): def get_smartx_specs_opts(self, opts): # Check that smarttier is 0/1/2/3 opts = self.get_smarttier_opts(opts) opts = self.get_smartthin_opts(opts) opts = self.get_smartcache_opts(opts) opts = self.get_smartpartition_opts(opts) return opts def get_smarttier_opts(self, opts): if opts['smarttier'] == 'true': if not opts['policy']: opts['policy'] = '1' elif opts['policy'] not in ['0', '1', '2', '3']: raise exception.InvalidInput( reason=(_('Illegal value specified for smarttier: ' 'set to either 0, 1, 2, or 3.'))) else: opts['policy'] = '0' return opts def get_smartthin_opts(self, opts): if opts['thin_provisioning_support'] == 'true': if opts['thick_provisioning_support'] == 'true': raise exception.InvalidInput( reason=(_('Illegal value specified for thin: ' 'Can not set thin and thick at the same time.'))) else: opts['LUNType'] = constants.THIN_LUNTYPE if opts['thick_provisioning_support'] == 'true': opts['LUNType'] = constants.THICK_LUNTYPE return opts def get_smartcache_opts(self, opts): if opts['smartcache'] == 'true': if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'smartcache:cachename in key.')) else: opts['cachename'] = None return opts def get_smartpartition_opts(self, opts): if opts['smartpartition'] == 'true': if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'smartpartition:partitionname in key.')) else: opts['partitionname'] = None return opts cinder-8.0.0/cinder/volume/drivers/huawei/fc_zone_helper.py0000664000567000056710000002375012701406250025177 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class FCZoneHelper(object): """FC zone helper for Huawei driver.""" def __init__(self, fcsan_lookup_service, client): self.fcsan = fcsan_lookup_service self.client = client def _get_fc_ports_info(self): ports_info = {} data = self.client.get_fc_ports_on_array() for item in data: if item['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: location = item['PARENTID'].split('.') port_info = {} port_info['id'] = item['ID'] port_info['contr'] = location[0] port_info['bandwidth'] = item['RUNSPEED'] ports_info[item['WWN']] = port_info return ports_info def _count_port_weight(self, port, ports_info): LOG.debug("Count weight for port: %s.", port) portgs = self.client.get_portgs_by_portid(ports_info[port]['id']) LOG.debug("Port %(port)s belongs to PortGroup %(portgs)s.", {"port": port, "portgs": portgs}) weight = 0 for portg in portgs: views = self.client.get_views_by_portg(portg) if not views: LOG.debug("PortGroup %s doesn't belong to any view.", portg) break LOG.debug("PortGroup %(portg)s belongs to view %(views)s.", {"portg": portg, "views": views[0]}) # In fact, there is just one view for one port group. lungroup = self.client.get_lungroup_by_view(views[0]) lun_num = self.client.get_lunnum_from_lungroup(lungroup) ports_in_portg = self.client.get_ports_by_portg(portg) LOG.debug("PortGroup %(portg)s contains ports: %(ports)s.", {"portg": portg, "ports": ports_in_portg}) total_bandwidth = 0 for port_pg in ports_in_portg: if port_pg in ports_info: total_bandwidth += int(ports_info[port_pg]['bandwidth']) LOG.debug("Total bandwidth for PortGroup %(portg)s is %(bindw)s.", {"portg": portg, "bindw": total_bandwidth}) if total_bandwidth: weight += float(lun_num) / float(total_bandwidth) bandwidth = float(ports_info[port]['bandwidth']) return (weight, 10000 / bandwidth) def _get_weighted_ports_per_contr(self, ports, ports_info): port_weight_map = {} for port in ports: port_weight_map[port] = self._count_port_weight(port, ports_info) LOG.debug("port_weight_map: %s", port_weight_map) sorted_ports = sorted(port_weight_map.items(), key=lambda d: d[1]) weighted_ports = [] count = 0 for port in sorted_ports: if count >= constants.PORT_NUM_PER_CONTR: break weighted_ports.append(port[0]) count += 1 return weighted_ports def _get_weighted_ports(self, contr_port_map, ports_info, contrs): weighted_ports = [] for contr in contrs: if contr in contr_port_map: weighted_ports_per_contr = self._get_weighted_ports_per_contr( contr_port_map[contr], ports_info) LOG.debug("Selected ports %(ports)s on controller %(contr)s.", {"ports": weighted_ports_per_contr, "contr": contr}) weighted_ports.extend(weighted_ports_per_contr) return weighted_ports def _filter_by_fabric(self, wwns, ports): """Filter FC ports and initiators connected to fabrics.""" ini_tgt_map = self.fcsan.get_device_mapping_from_network(wwns, ports) fabric_connected_ports = [] fabric_connected_initiators = [] for fabric in ini_tgt_map: fabric_connected_ports.extend( ini_tgt_map[fabric]['target_port_wwn_list']) fabric_connected_initiators.extend( ini_tgt_map[fabric]['initiator_port_wwn_list']) if not fabric_connected_ports: msg = _("No FC port connected to fabric.") raise exception.VolumeBackendAPIException(data=msg) if not fabric_connected_initiators: msg = _("No initiator connected to fabric.") raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Fabric connected ports: %(ports)s, " "Fabric connected initiators: %(initiators)s.", {'ports': fabric_connected_ports, 'initiators': fabric_connected_initiators}) return fabric_connected_ports, fabric_connected_initiators def build_ini_targ_map(self, wwns, host_id, lun_id): lun_info = self.client.get_lun_info(lun_id) lun_contr_id = lun_info['OWNINGCONTROLLER'] engines = self.client.get_all_engines() LOG.debug("Get array engines: %s", engines) for engine in engines: contrs = json.loads(engine['NODELIST']) engine_id = engine['ID'] if lun_contr_id in contrs: LOG.debug("LUN %(lun_id)s belongs to engine %(engine_id)s.", {"lun_id": lun_id, "engine_id": engine_id}) break # Check if there is already a port group in the view. # If yes and have already considered the engine, # we won't change anything about the port group and zone. view_name = constants.MAPPING_VIEW_PREFIX + host_id portg_name = constants.PORTGROUP_PREFIX + host_id view_id = self.client.find_mapping_view(view_name) portg_info = self.client.get_portgroup_by_view(view_id) portg_id = portg_info[0]['ID'] if portg_info else None init_targ_map = {} if portg_id: description = portg_info[0].get("DESCRIPTION", '') engines = description.replace(constants.PORTGROUP_DESCRIP_PREFIX, "") engines = engines.split(',') ports = self.client.get_fc_ports_by_portgroup(portg_id) if engine_id in engines: LOG.debug("Have already selected ports for engine %s, just " "use them.", engine_id) return (list(ports.keys()), portg_id, init_targ_map) # Filter initiators and ports that connected to fabrics. ports_info = self._get_fc_ports_info() (fabric_connected_ports, fabric_connected_initiators) = ( self._filter_by_fabric(wwns, ports_info.keys())) # Build a controller->ports map for convenience. contr_port_map = {} for port in fabric_connected_ports: contr = ports_info[port]['contr'] if not contr_port_map.get(contr): contr_port_map[contr] = [] contr_port_map[contr].append(port) LOG.debug("Controller port map: %s.", contr_port_map) # Get the 'best' ports for the given controllers. weighted_ports = self._get_weighted_ports(contr_port_map, ports_info, contrs) # Handle port group. port_list = [ports_info[port]['id'] for port in weighted_ports] if portg_id: # Add engine ID to the description of the port group. self.client.append_portg_desc(portg_id, engine_id) # Extend the weighted_ports to include the ports already in the # port group. weighted_ports.extend(list(ports.keys())) else: portg_id = self.client.get_tgt_port_group(portg_name) if portg_id: LOG.debug("Found port group %s not belonged to any view, " "deleting it.", portg_name) ports = self.client.get_fc_ports_by_portgroup(portg_id) for port_id in ports.values(): self.client.remove_port_from_portgroup(portg_id, port_id) self.client.delete_portgroup(portg_id) description = constants.PORTGROUP_DESCRIP_PREFIX + engine_id portg_id = self.client.create_portg(portg_name, description) for port in port_list: self.client.add_port_to_portg(portg_id, port) for ini in fabric_connected_initiators: init_targ_map[ini] = weighted_ports LOG.debug("build_ini_targ_map: Port group name: %(portg_name)s, " "init_targ_map: %(map)s.", {"portg_name": portg_name, "map": init_targ_map}) return weighted_ports, portg_id, init_targ_map def get_init_targ_map(self, wwns, host_id): error_ret = ([], None, {}) if not host_id: return error_ret view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(view_name) if not view_id: return error_ret port_group = self.client.get_portgroup_by_view(view_id) portg_id = port_group[0]['ID'] if port_group else None ports = self.client.get_fc_ports_by_portgroup(portg_id) for port_id in ports.values(): self.client.remove_port_from_portgroup(portg_id, port_id) init_targ_map = {} for wwn in wwns: init_targ_map[wwn] = list(ports.keys()) return list(ports.keys()), portg_id, init_targ_map cinder-8.0.0/cinder/volume/drivers/hgst.py0000664000567000056710000006245612701406250021706 0ustar jenkinsjenkins00000000000000# Copyright 2015 HGST # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Desc : Driver to store Cinder volumes using HGST Flash Storage Suite Require : HGST Flash Storage Suite Author : Earle F. Philhower, III """ import grp import json import math import os import pwd import six import socket import string from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.i18n import _LE from cinder.i18n import _LW from cinder.image import image_utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) hgst_opts = [ cfg.StrOpt('hgst_net', default='Net 1 (IPv4)', help='Space network name to use for data transfer'), cfg.StrOpt('hgst_storage_servers', default='os:gbd0', help='Comma separated list of Space storage servers:devices. ' 'ex: os1_stor:gbd0,os2_stor:gbd0'), cfg.StrOpt('hgst_redundancy', default='0', help='Should spaces be redundantly stored (1/0)'), cfg.StrOpt('hgst_space_user', default='root', help='User to own created spaces'), cfg.StrOpt('hgst_space_group', default='disk', help='Group to own created spaces'), cfg.StrOpt('hgst_space_mode', default='0600', help='UNIX mode for created spaces'), ] CONF = cfg.CONF CONF.register_opts(hgst_opts) class HGSTDriver(driver.VolumeDriver): """This is the Class to set in cinder.conf (volume_driver). Implements a Cinder Volume driver which creates a HGST Space for each Cinder Volume or Snapshot requested. Use the vgc-cluster CLI to do all management operations. The Cinder host will nominally have all Spaces made visible to it, while individual compute nodes will only have Spaces connected to KVM instances connected. """ VERSION = '1.0.0' VGCCLUSTER = 'vgc-cluster' SPACEGB = units.G - 16 * units.M # Workaround for shrinkage Bug 28320 BLOCKED = "BLOCKED" # Exit code when a command is blocked def __init__(self, *args, **kwargs): """Initialize our protocol descriptor/etc.""" super(HGSTDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hgst_opts) self._vgc_host = None self.check_for_setup_error() self._stats = {'driver_version': self.VERSION, 'reserved_percentage': 0, 'storage_protocol': 'hgst', 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'vendor_name': 'HGST', } backend_name = self.configuration.safe_get('volume_backend_name') self._stats['volume_backend_name'] = backend_name or 'hgst' self.update_volume_stats() def _log_cli_err(self, err): """Dumps the full command output to a logfile in error cases.""" LOG.error(_LE("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n" "err: %(stderr)s"), {'cmd': err.cmd, 'code': err.exit_code, 'stdout': err.stdout, 'stderr': err.stderr}) def _find_vgc_host(self): """Finds vgc-cluster hostname for this box.""" params = [self.VGCCLUSTER, "domain-list", "-1"] try: out, unused = self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Unable to get list of domain members, check that " "the cluster is running.") raise exception.VolumeDriverException(message=msg) domain = out.splitlines() params = ["ip", "addr", "list"] try: out, unused = self._execute(*params, run_as_root=False) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Unable to get list of IP addresses on this host, " "check permissions and networking.") raise exception.VolumeDriverException(message=msg) nets = out.splitlines() for host in domain: try: ip = socket.gethostbyname(host) for l in nets: x = l.strip() if x.startswith("inet %s/" % ip): return host except socket.error: pass msg = _("Current host isn't part of HGST domain.") raise exception.VolumeDriverException(message=msg) def _hostname(self): """Returns hostname to use for cluster operations on this box.""" if self._vgc_host is None: self._vgc_host = self._find_vgc_host() return self._vgc_host def _make_server_list(self): """Converts a comma list into params for use by HGST CLI.""" csv = self.configuration.safe_get('hgst_storage_servers') servers = csv.split(",") params = [] for server in servers: params.append('-S') params.append(six.text_type(server)) return params def _make_space_name(self, name): """Generates the hashed name for the space from the name. This must be called in a locked context as there are race conditions where 2 contexts could both pick what they think is an unallocated space name, and fail later on due to that conflict. """ # Sanitize the name string valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits) name = ''.join(c for c in name if c in valid_chars) name = name.strip(".") # Remove any leading .s from evil users name = name or "space" # In case of all illegal chars, safe default # Start out with just the name, truncated to 14 characters outname = name[0:13] # See what names already defined params = [self.VGCCLUSTER, "space-list", "--name-only"] try: out, unused = self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Unable to get list of spaces to make new name. Please " "verify the cluster is running.") raise exception.VolumeDriverException(message=msg) names = out.splitlines() # And anything in /dev/* is also illegal names += os.listdir("/dev") # Do it the Python way! names += ['.', '..'] # Not included above # While there's a conflict, add incrementing digits until it passes itr = 0 while outname in names: itrstr = six.text_type(itr) outname = outname[0:13 - len(itrstr)] + itrstr itr += 1 return outname def _get_space_size_redundancy(self, space_name): """Parse space output to get allocated size and redundancy.""" params = [self.VGCCLUSTER, "space-list", "-n", space_name, "--json"] try: out, unused = self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Unable to get information on space %(space)s, please " "verify that the cluster is running and " "connected.") % {'space': space_name} raise exception.VolumeDriverException(message=msg) ret = json.loads(out) retval = {} retval['redundancy'] = int(ret['resources'][0]['redundancy']) retval['sizeBytes'] = int(ret['resources'][0]['sizeBytes']) return retval def _adjust_size_g(self, size_g): """Adjust space size to next legal value because of redundancy.""" # Extending requires expanding to a multiple of the # of # storage hosts in the cluster count = len(self._make_server_list()) // 2 # Remove -s from count if size_g % count: size_g = int(size_g + count) size_g -= size_g % count return int(math.ceil(size_g)) def do_setup(self, context): pass def _get_space_name(self, volume): """Pull name of /dev/ from the provider_id.""" try: return volume.get('provider_id') except Exception: return '' # Some error during create, may be able to continue def _handle_blocked(self, err, msg): """Safely handle a return code of BLOCKED from a cluster command. Handle the case where a command is in BLOCKED state by trying to cancel it. If the cancel fails, then the command actually did complete. If the cancel succeeds, then throw the original error back up the stack. """ if (err.stdout is not None) and (self.BLOCKED in err.stdout): # Command is queued but did not complete in X seconds, so # we will cancel it to keep things sane. request = err.stdout.split('\n', 1)[0].strip() params = [self.VGCCLUSTER, 'request-cancel'] params += ['-r', six.text_type(request)] throw_err = False try: self._execute(*params, run_as_root=True) # Cancel succeeded, the command was aborted # Send initial exception up the stack LOG.error(_LE("VGC-CLUSTER command blocked and cancelled.")) # Can't throw it here, the except below would catch it! throw_err = True except Exception: # The cancel failed because the command was just completed. # That means there was no failure, so continue with Cinder op pass if throw_err: self._log_cli_err(err) msg = _("Command %(cmd)s blocked in the CLI and was " "cancelled") % {'cmd': six.text_type(err.cmd)} raise exception.VolumeDriverException(message=msg) else: # Some other error, just throw it up the chain self._log_cli_err(err) raise exception.VolumeDriverException(message=msg) def _add_cinder_apphost(self, spacename): """Add this host to the apphost list of a space.""" # Connect to source volume params = [self.VGCCLUSTER, 'space-set-apphosts'] params += ['-n', spacename] params += ['-A', self._hostname()] params += ['--action', 'ADD'] # Non-error to add already existing try: self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: msg = _("Unable to add Cinder host to apphosts for space " "%(space)s") % {'space': spacename} self._handle_blocked(err, msg) @lockutils.synchronized('devices', 'cinder-hgst-') def create_volume(self, volume): """API entry to create a volume on the cluster as a HGST space. Creates a volume, adjusting for GiB/GB sizing. Locked to ensure we don't have race conditions on the name we pick to use for the space. """ # For ease of deugging, use friendly name if it exists volname = self._make_space_name(volume['display_name'] or volume['name']) volnet = self.configuration.safe_get('hgst_net') volbytes = volume['size'] * units.Gi # OS=Base2, but HGST=Base10 volsize_gb_cinder = int(math.ceil(float(volbytes) / float(self.SPACEGB))) volsize_g = self._adjust_size_g(volsize_gb_cinder) params = [self.VGCCLUSTER, 'space-create'] params += ['-n', six.text_type(volname)] params += ['-N', six.text_type(volnet)] params += ['-s', six.text_type(volsize_g)] params += ['--redundancy', six.text_type( self.configuration.safe_get('hgst_redundancy'))] params += ['--user', six.text_type( self.configuration.safe_get('hgst_space_user'))] params += ['--group', six.text_type( self.configuration.safe_get('hgst_space_group'))] params += ['--mode', six.text_type( self.configuration.safe_get('hgst_space_mode'))] params += self._make_server_list() params += ['-A', self._hostname()] # Make it visible only here try: self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: msg = _("Error in space-create for %(space)s of size " "%(size)d GB") % {'space': volname, 'size': int(volsize_g)} self._handle_blocked(err, msg) # Stash away the hashed name provider = {} provider['provider_id'] = volname return provider def update_volume_stats(self): """Parse the JSON output of vgc-cluster to find space available.""" params = [self.VGCCLUSTER, "host-storage", "--json"] try: out, unused = self._execute(*params, run_as_root=True) ret = json.loads(out) cap = ret["totalCapacityBytes"] // units.Gi used = ret["totalUsedBytes"] // units.Gi avail = cap - used if int(self.configuration.safe_get('hgst_redundancy')) == 1: cap = cap // 2 avail = avail // 2 # Reduce both by 1 GB due to BZ 28320 if cap > 0: cap = cap - 1 if avail > 0: avail = avail - 1 except processutils.ProcessExecutionError as err: # Could be cluster still starting up, return unknown for now LOG.warning(_LW("Unable to poll cluster free space.")) self._log_cli_err(err) cap = 'unknown' avail = 'unknown' self._stats['free_capacity_gb'] = avail self._stats['total_capacity_gb'] = cap self._stats['reserved_percentage'] = 0 def get_volume_stats(self, refresh=False): """Return Volume statistics, potentially cached copy.""" if refresh: self.update_volume_stats() return self._stats def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from an existing one. No cloning operation in the current release so simply copy using DD to a new space. This could be a lengthy operation. """ # Connect to source volume volname = self._get_space_name(src_vref) self._add_cinder_apphost(volname) # Make new volume provider = self.create_volume(volume) self._add_cinder_apphost(provider['provider_id']) # And copy original into it... info = self._get_space_size_redundancy(volname) volutils.copy_volume( self.local_path(src_vref), "/dev/" + provider['provider_id'], info['sizeBytes'] // units.Mi, self.configuration.volume_dd_blocksize, execute=self._execute) # That's all, folks! return provider def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size']) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def delete_volume(self, volume): """Delete a Volume's underlying space.""" volname = self._get_space_name(volume) if volname: params = [self.VGCCLUSTER, 'space-delete'] params += ['-n', six.text_type(volname)] # This can fail benignly when we are deleting a snapshot try: self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: LOG.warning(_LW("Unable to delete space %(space)s"), {'space': volname}) self._log_cli_err(err) else: # This can be benign when we are deleting a snapshot LOG.warning(_LW("Attempted to delete a space that's not there.")) def _check_host_storage(self, server): if ":" not in server: msg = _("hgst_storage server %(svr)s not of format " ":") % {'svr': server} raise exception.VolumeDriverException(message=msg) h, b = server.split(":") try: params = [self.VGCCLUSTER, 'host-storage', '-h', h] self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Storage host %(svr)s not detected, verify " "name") % {'svr': six.text_type(server)} raise exception.VolumeDriverException(message=msg) def check_for_setup_error(self): """Throw an exception if configuration values/setup isn't okay.""" # Verify vgc-cluster exists and is executable by cinder user try: params = [self.VGCCLUSTER, '--version'] self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("Cannot run vgc-cluster command, please ensure software " "is installed and permissions are set properly.") raise exception.VolumeDriverException(message=msg) # Checks the host is identified with the HGST domain, as well as # that vgcnode and vgcclustermgr services are running. self._vgc_host = None self._hostname() # Redundancy better be 0 or 1, otherwise no comprendo r = six.text_type(self.configuration.safe_get('hgst_redundancy')) if r not in ["0", "1"]: msg = _("hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in " "cinder.conf.") raise exception.VolumeDriverException(message=msg) # Verify user and group exist or we can't connect volumes try: pwd.getpwnam(self.configuration.safe_get('hgst_space_user')) grp.getgrnam(self.configuration.safe_get('hgst_space_group')) except KeyError as err: msg = _("hgst_group %(grp)s and hgst_user %(usr)s must map to " "valid users/groups in cinder.conf") % { 'grp': self.configuration.safe_get('hgst_space_group'), 'usr': self.configuration.safe_get('hgst_space_user')} raise exception.VolumeDriverException(message=msg) # Verify mode is a nicely formed octal or integer try: int(self.configuration.safe_get('hgst_space_mode')) except Exception as err: msg = _("hgst_space_mode must be an octal/int in cinder.conf") raise exception.VolumeDriverException(message=msg) # Validate network maps to something we know about try: params = [self.VGCCLUSTER, 'network-list'] params += ['-N', self.configuration.safe_get('hgst_net')] self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: self._log_cli_err(err) msg = _("hgst_net %(net)s specified in cinder.conf not found " "in cluster") % { 'net': self.configuration.safe_get('hgst_net')} raise exception.VolumeDriverException(message=msg) # Storage servers require us to split them up and check for sl = self.configuration.safe_get('hgst_storage_servers') if (sl is None) or (six.text_type(sl) == ""): msg = _("hgst_storage_servers must be defined in cinder.conf") raise exception.VolumeDriverException(message=msg) servers = sl.split(",") # Each server must be of the format : w/host in domain for server in servers: self._check_host_storage(server) # We made it here, we should be good to go! return True def create_snapshot(self, snapshot): """Create a snapshot volume. We don't yet support snaps in SW so make a new volume and dd the source one into it. This could be a lengthy operation. """ origvol = {} origvol['name'] = snapshot['volume_name'] origvol['size'] = snapshot['volume_size'] origvol['id'] = snapshot['volume_id'] origvol['provider_id'] = snapshot.get('volume').get('provider_id') # Add me to the apphosts so I can see the volume self._add_cinder_apphost(self._get_space_name(origvol)) # Make snapshot volume snapvol = {} snapvol['display_name'] = snapshot['display_name'] snapvol['name'] = snapshot['name'] snapvol['size'] = snapshot['volume_size'] snapvol['id'] = snapshot['id'] provider = self.create_volume(snapvol) # Create_volume attaches the volume to this host, ready to snapshot. # Copy it using dd for now, we don't have real snapshots # We need to copy the entire allocated volume space, Nova will allow # full access, even beyond requested size (when our volume is larger # due to our ~1B byte alignment or cluster makeup) info = self._get_space_size_redundancy(origvol['provider_id']) volutils.copy_volume( self.local_path(origvol), "/dev/" + provider['provider_id'], info['sizeBytes'] // units.Mi, self.configuration.volume_dd_blocksize, execute=self._execute) return provider def delete_snapshot(self, snapshot): """Delete a snapshot. For now, snapshots are full volumes.""" self.delete_volume(snapshot) def create_volume_from_snapshot(self, volume, snapshot): """Create volume from a snapshot, but snaps still full volumes.""" return self.create_cloned_volume(volume, snapshot) def extend_volume(self, volume, new_size): """Extend an existing volume. We may not actually need to resize the space because it's size is always rounded up to a function of the GiB/GB and number of storage nodes. """ volname = self._get_space_name(volume) info = self._get_space_size_redundancy(volname) volnewbytes = new_size * units.Gi new_size_g = math.ceil(float(volnewbytes) / float(self.SPACEGB)) wantedsize_g = self._adjust_size_g(new_size_g) havesize_g = (info['sizeBytes'] // self.SPACEGB) if havesize_g >= wantedsize_g: return # Already big enough, happens with redundancy else: # Have to extend it delta = int(wantedsize_g - havesize_g) params = [self.VGCCLUSTER, 'space-extend'] params += ['-n', six.text_type(volname)] params += ['-s', six.text_type(delta)] params += self._make_server_list() try: self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: msg = _("Error in space-extend for volume %(space)s with " "%(size)d additional GB") % {'space': volname, 'size': delta} self._handle_blocked(err, msg) def initialize_connection(self, volume, connector): """Return connection information. Need to return noremovehost so that the Nova host doesn't accidentally remove us from the apphost list if it is running on the same host (like in devstack testing). """ hgst_properties = {'name': volume['provider_id'], 'noremovehost': self._hostname()} return {'driver_volume_type': 'hgst', 'data': hgst_properties} def local_path(self, volume): """Query the provider_id to figure out the proper devnode.""" return "/dev/" + self._get_space_name(volume) def create_export(self, context, volume, connector): # Not needed for spaces pass def remove_export(self, context, volume): # Not needed for spaces pass def terminate_connection(self, volume, connector, **kwargs): # Not needed for spaces pass def ensure_export(self, context, volume): # Not needed for spaces pass cinder-8.0.0/cinder/volume/drivers/cloudbyte/0000775000567000056710000000000012701406543022351 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/cloudbyte/options.py0000664000567000056710000001043112701406250024410 0ustar jenkinsjenkins00000000000000# Copyright 2015 CloudByte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cloudbyte_connection_opts = [ cfg.StrOpt("cb_apikey", help="Driver will use this API key to authenticate " "against the CloudByte storage's management interface."), cfg.StrOpt("cb_account_name", help="CloudByte storage specific account name. " "This maps to a project name in OpenStack."), cfg.StrOpt("cb_tsm_name", help="This corresponds to the name of " "Tenant Storage Machine (TSM) in CloudByte storage. " "A volume will be created in this TSM."), cfg.IntOpt("cb_confirm_volume_create_retry_interval", default=5, help="A retry value in seconds. Will be used by the driver " "to check if volume creation was successful in " "CloudByte storage."), cfg.IntOpt("cb_confirm_volume_create_retries", default=3, help="Will confirm a successful volume " "creation in CloudByte storage by making " "this many number of attempts."), cfg.IntOpt("cb_confirm_volume_delete_retry_interval", default=5, help="A retry value in seconds. Will be used by the driver " "to check if volume deletion was successful in " "CloudByte storage."), cfg.IntOpt("cb_confirm_volume_delete_retries", default=3, help="Will confirm a successful volume " "deletion in CloudByte storage by making " "this many number of attempts."), cfg.StrOpt("cb_auth_group", help="This corresponds to the discovery authentication " "group in CloudByte storage. " "Chap users are added to this group. " "Driver uses the first user found for this group. " "Default value is None."), ] cloudbyte_add_qosgroup_opts = [ cfg.DictOpt('cb_add_qosgroup', default={ 'iops': '10', 'latency': '15', 'graceallowed': 'false', 'networkspeed': '0', 'memlimit': '0', 'tpcontrol': 'false', 'throughput': '0', 'iopscontrol': 'true' }, help="These values will be used for CloudByte storage's " "addQos API call."), ] cloudbyte_create_volume_opts = [ cfg.DictOpt('cb_create_volume', default={ 'blocklength': '512B', 'compression': 'off', 'deduplication': 'off', 'sync': 'always', 'recordsize': '16k', 'protocoltype': 'ISCSI' }, help="These values will be used for CloudByte storage's " "createVolume API call."), ] cloudbyte_update_volume_opts = [ cfg.ListOpt('cb_update_qos_group', default=["iops", "latency", "graceallowed"], help="These values will be used for CloudByte storage's " "updateQosGroup API call."), cfg.ListOpt('cb_update_file_system', default=["compression", "sync", "noofcopies", "readonly"], help="These values will be used for CloudByte storage's " "updateFileSystem API call."), ] CONF = cfg.CONF CONF.register_opts(cloudbyte_add_qosgroup_opts) CONF.register_opts(cloudbyte_create_volume_opts) CONF.register_opts(cloudbyte_connection_opts) CONF.register_opts(cloudbyte_update_volume_opts) cinder-8.0.0/cinder/volume/drivers/cloudbyte/__init__.py0000664000567000056710000000000012701406250024443 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/cloudbyte/cloudbyte.py0000664000567000056710000012514512701406250024720 0ustar jenkinsjenkins00000000000000# Copyright 2015 CloudByte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import uuid from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import six from six.moves import http_client from six.moves import urllib from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.volume.drivers.cloudbyte import options from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) class CloudByteISCSIDriver(san.SanISCSIDriver): """CloudByte ISCSI Driver. Version history: 1.0.0 - Initial driver 1.1.0 - Add chap support and minor bug fixes 1.1.1 - Add wait logic for delete volumes 1.1.2 - Update ig to None before delete volume 1.2.0 - Add retype support """ VERSION = '1.2.0' volume_stats = {} def __init__(self, *args, **kwargs): super(CloudByteISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values( options.cloudbyte_add_qosgroup_opts) self.configuration.append_config_values( options.cloudbyte_create_volume_opts) self.configuration.append_config_values( options.cloudbyte_update_volume_opts) self.configuration.append_config_values( options.cloudbyte_connection_opts) self.cb_use_chap = self.configuration.use_chap_auth self.get_volume_stats() def _get_url(self, cmd, params, apikey): """Will prepare URL that connects to CloudByte.""" if params is None: params = {} params['command'] = cmd params['response'] = 'json' sanitized_params = {} for key in params: value = params[key] if value is not None: sanitized_params[key] = six.text_type(value) sanitized_params = urllib.parse.urlencode(sanitized_params) url = ('/client/api?%s' % sanitized_params) LOG.debug("CloudByte URL to be executed: [%s].", url) # Add the apikey api = {} api['apiKey'] = apikey url = url + '&' + urllib.parse.urlencode(api) return url def _extract_http_error(self, error_data): # Extract the error message from error_data error_msg = "" # error_data is a single key value dict for key, value in error_data.items(): error_msg = value.get('errortext') return error_msg def _execute_and_get_response_details(self, host, url): """Will prepare response after executing an http request.""" res_details = {} try: # Prepare the connection connection = http_client.HTTPSConnection(host) # Make the connection connection.request('GET', url) # Extract the response as the connection was successful response = connection.getresponse() # Read the response data = response.read() # Transform the json string into a py object data = json.loads(data) # Extract http error msg if any error_details = None if response.status != 200: error_details = self._extract_http_error(data) # Prepare the return object res_details['data'] = data res_details['error'] = error_details res_details['http_status'] = response.status finally: connection.close() LOG.debug("CloudByte connection was closed successfully.") return res_details def _api_request_for_cloudbyte(self, cmd, params, version=None): """Make http calls to CloudByte.""" LOG.debug("Executing CloudByte API for command [%s].", cmd) if version is None: version = CloudByteISCSIDriver.VERSION # Below is retrieved from /etc/cinder/cinder.conf apikey = self.configuration.cb_apikey if apikey is None: msg = (_("API key is missing for CloudByte driver.")) raise exception.VolumeBackendAPIException(data=msg) host = self.configuration.san_ip # Construct the CloudByte URL with query params url = self._get_url(cmd, params, apikey) data = {} error_details = None http_status = None try: # Execute CloudByte API & frame the response res_obj = self._execute_and_get_response_details(host, url) data = res_obj['data'] error_details = res_obj['error'] http_status = res_obj['http_status'] except http_client.HTTPException as ex: msg = (_("Error executing CloudByte API [%(cmd)s], " "Error: %(err)s.") % {'cmd': cmd, 'err': ex}) raise exception.VolumeBackendAPIException(data=msg) # Check if it was an error response from CloudByte if http_status != 200: msg = (_("Failed to execute CloudByte API [%(cmd)s]." " Http status: %(status)s," " Error: %(error)s.") % {'cmd': cmd, 'status': http_status, 'error': error_details}) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("CloudByte API executed successfully for command [%s]."), cmd) return data def _request_tsm_details(self, account_id): params = {"accountid": account_id} # List all CloudByte tsm data = self._api_request_for_cloudbyte("listTsm", params) return data def _add_qos_group_request(self, volume, tsmid, volume_name, qos_group_params): # Prepare the user input params params = { "name": "QoS_" + volume_name, "tsmid": tsmid } # Get qos related params from configuration params.update(self.configuration.cb_add_qosgroup) # Override the default configuration by qos specs if qos_group_params: params.update(qos_group_params) data = self._api_request_for_cloudbyte("addQosGroup", params) return data def _create_volume_request(self, volume, datasetid, qosgroupid, tsmid, volume_name, file_system_params): size = volume.get('size') quotasize = six.text_type(size) + "G" # Prepare the user input params params = { "datasetid": datasetid, "name": volume_name, "qosgroupid": qosgroupid, "tsmid": tsmid, "quotasize": quotasize } # Get the additional params from configuration params.update(self.configuration.cb_create_volume) # Override the default configuration by qos specs if file_system_params: params.update(file_system_params) data = self._api_request_for_cloudbyte("createVolume", params) return data def _queryAsyncJobResult_request(self, jobid): async_cmd = "queryAsyncJobResult" params = { "jobId": jobid, } data = self._api_request_for_cloudbyte(async_cmd, params) return data def _get_tsm_details(self, data, tsm_name, account_name): # Filter required tsm's details tsms = data['listTsmResponse'].get('listTsm') if tsms is None: msg = (_("TSM [%(tsm)s] was not found in CloudByte storage " "for account [%(account)s].") % {'tsm': tsm_name, 'account': account_name}) raise exception.VolumeBackendAPIException(data=msg) tsmdetails = {} for tsm in tsms: if tsm['name'] == tsm_name: tsmdetails['datasetid'] = tsm['datasetid'] tsmdetails['tsmid'] = tsm['id'] break return tsmdetails def _retry_volume_operation(self, operation, retries, max_retries, jobid, cb_volume): """CloudByte async calls via the FixedIntervalLoopingCall.""" # Query the CloudByte storage with this jobid volume_response = self._queryAsyncJobResult_request(jobid) count = retries['count'] result_res = None if volume_response is not None: result_res = volume_response.get('queryasyncjobresultresponse') if result_res is None: msg = (_( "Null response received while querying " "for [%(operation)s] based job [%(job)s] " "at CloudByte storage.") % {'operation': operation, 'job': jobid}) raise exception.VolumeBackendAPIException(data=msg) status = result_res.get('jobstatus') if status == 1: LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for " "volume [%(cb_volume)s]."), {'operation': operation, 'cb_volume': cb_volume}) raise loopingcall.LoopingCallDone() elif status == 2: job_result = result_res.get("jobresult") err_msg = job_result.get("errortext") err_code = job_result.get("errorcode") msg = (_( "Error in Operation [%(operation)s] " "for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], " "error code: [%(error_code)s]."), {'cb_error': err_msg, 'error_code': err_code, 'cb_volume': cb_volume, 'operation': operation}) raise exception.VolumeBackendAPIException(data=msg) elif count == max_retries: # All attempts exhausted LOG.error(_LE("CloudByte operation [%(operation)s] failed" " for volume [%(vol)s]. Exhausted all" " [%(max)s] attempts."), {'operation': operation, 'vol': cb_volume, 'max': max_retries}) raise loopingcall.LoopingCallDone(retvalue=False) else: count += 1 retries['count'] = count LOG.debug("CloudByte operation [%(operation)s] for" " volume [%(vol)s]: retry [%(retry)s] of [%(max)s].", {'operation': operation, 'vol': cb_volume, 'retry': count, 'max': max_retries}) def _wait_for_volume_creation(self, volume_response, cb_volume_name): """Given the job wait for it to complete.""" vol_res = volume_response.get('createvolumeresponse') if vol_res is None: msg = _("Null response received while creating volume [%s] " "at CloudByte storage.") % cb_volume_name raise exception.VolumeBackendAPIException(data=msg) jobid = vol_res.get('jobid') if jobid is None: msg = _("Job id not found in CloudByte's " "create volume [%s] response.") % cb_volume_name raise exception.VolumeBackendAPIException(data=msg) retry_interval = ( self.configuration.cb_confirm_volume_create_retry_interval) max_retries = ( self.configuration.cb_confirm_volume_create_retries) retries = {'count': 0} timer = loopingcall.FixedIntervalLoopingCall( self._retry_volume_operation, 'Create Volume', retries, max_retries, jobid, cb_volume_name) timer.start(interval=retry_interval).wait() def _wait_for_volume_deletion(self, volume_response, cb_volume_id): """Given the job wait for it to complete.""" vol_res = volume_response.get('deleteFileSystemResponse') if vol_res is None: msg = _("Null response received while deleting volume [%s] " "at CloudByte storage.") % cb_volume_id raise exception.VolumeBackendAPIException(data=msg) jobid = vol_res.get('jobid') if jobid is None: msg = _("Job id not found in CloudByte's " "delete volume [%s] response.") % cb_volume_id raise exception.VolumeBackendAPIException(data=msg) retry_interval = ( self.configuration.cb_confirm_volume_delete_retry_interval) max_retries = ( self.configuration.cb_confirm_volume_delete_retries) retries = {'count': 0} timer = loopingcall.FixedIntervalLoopingCall( self._retry_volume_operation, 'Delete Volume', retries, max_retries, jobid, cb_volume_id) timer.start(interval=retry_interval).wait() def _get_volume_id_from_response(self, cb_volumes, volume_name): """Search the volume in CloudByte storage.""" vol_res = cb_volumes.get('listFilesystemResponse') if vol_res is None: msg = _("Null response received from CloudByte's " "list filesystem.") raise exception.VolumeBackendAPIException(data=msg) volumes = vol_res.get('filesystem') if volumes is None: msg = _('No volumes found in CloudByte storage.') raise exception.VolumeBackendAPIException(data=msg) volume_id = None for vol in volumes: if vol['name'] == volume_name: volume_id = vol['id'] break if volume_id is None: msg = _("Volume [%s] not found in CloudByte " "storage.") % volume_name raise exception.VolumeBackendAPIException(data=msg) return volume_id def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id): volumes = cb_volumes['listFilesystemResponse']['filesystem'] qosgroup_id = None for vol in volumes: if vol['id'] == volume_id: qosgroup_id = vol['groupid'] break return qosgroup_id def _build_provider_details_from_volume(self, volume, chap): model_update = {} model_update['provider_location'] = ( '%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0) ) # Will provide CHAP Authentication on forthcoming patches/release model_update['provider_auth'] = None if chap: model_update['provider_auth'] = ('CHAP %(username)s %(password)s' % chap) model_update['provider_id'] = volume['id'] LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].", {'iqn': volume['iqnname'], 'proid': volume['id']}) return model_update def _build_provider_details_from_response(self, cb_volumes, volume_name, chap): """Get provider information.""" model_update = {} volumes = cb_volumes['listFilesystemResponse']['filesystem'] for vol in volumes: if vol['name'] == volume_name: model_update = self._build_provider_details_from_volume(vol, chap) break return model_update def _get_initiator_group_id_from_response(self, data, filter): """Find iSCSI initiator group id.""" ig_list_res = data.get('listInitiatorsResponse') if ig_list_res is None: msg = _("Null response received from CloudByte's " "list iscsi initiators.") raise exception.VolumeBackendAPIException(data=msg) ig_list = ig_list_res.get('initiator') if ig_list is None: msg = _('No iscsi initiators were found in CloudByte.') raise exception.VolumeBackendAPIException(data=msg) ig_id = None for ig in ig_list: if ig.get('initiatorgroup') == filter: ig_id = ig['id'] break return ig_id def _get_iscsi_service_id_from_response(self, volume_id, data): iscsi_service_res = data.get('listVolumeiSCSIServiceResponse') if iscsi_service_res is None: msg = _("Null response received from CloudByte's " "list volume iscsi service.") raise exception.VolumeBackendAPIException(data=msg) iscsi_service_list = iscsi_service_res.get('iSCSIService') if iscsi_service_list is None: msg = _('No iscsi services found in CloudByte storage.') raise exception.VolumeBackendAPIException(data=msg) iscsi_id = None for iscsi_service in iscsi_service_list: if iscsi_service['volume_id'] == volume_id: iscsi_id = iscsi_service['id'] break if iscsi_id is None: msg = _("No iscsi service found for CloudByte " "volume [%s].") % volume_id raise exception.VolumeBackendAPIException(data=msg) else: return iscsi_id def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id): params = { "id": iscsi_id, "igid": ig_id } if ag_id: params['authgroupid'] = ag_id params['authmethod'] = "CHAP" self._api_request_for_cloudbyte( 'updateVolumeiSCSIService', params) def _get_cb_snapshot_path(self, snapshot_name, volume_id): """Find CloudByte snapshot path.""" params = {"id": volume_id} # List all snapshot from CloudByte cb_snapshots_list = self._api_request_for_cloudbyte( 'listStorageSnapshots', params) # Filter required snapshot from list cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse') cb_snapshot = {} if cb_snap_res is not None: cb_snapshot = cb_snap_res.get('snapshot') path = None # Filter snapshot path for snap in cb_snapshot: if snap['name'] == snapshot_name: path = snap['path'] break return path def _get_account_id_from_name(self, account_name): params = {} data = self._api_request_for_cloudbyte("listAccount", params) accounts = data["listAccountResponse"]["account"] account_id = None for account in accounts: if account.get("name") == account_name: account_id = account.get("id") break if account_id is None: msg = _("Failed to get CloudByte account details " "for account [%s].") % account_name raise exception.VolumeBackendAPIException(data=msg) return account_id def _search_volume_id(self, cb_volumes, cb_volume_id): """Search the volume in CloudByte.""" volumes_res = cb_volumes.get('listFilesystemResponse') if volumes_res is None: msg = _("No response was received from CloudByte's " "list filesystem api call.") raise exception.VolumeBackendAPIException(data=msg) volumes = volumes_res.get('filesystem') if volumes is None: msg = _("No volume was found at CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) volume_id = None for vol in volumes: if vol['id'] == cb_volume_id: volume_id = vol['id'] break return volume_id def _get_storage_info(self, tsmname): """Get CloudByte TSM that is associated with OpenStack backend.""" # List all TSMs from CloudByte storage tsm_list = self._api_request_for_cloudbyte('listTsm', params={}) tsm_details_res = tsm_list.get('listTsmResponse') if tsm_details_res is None: msg = _("No response was received from CloudByte storage " "list tsm API call.") raise exception.VolumeBackendAPIException(data=msg) tsm_details = tsm_details_res.get('listTsm') data = {} flag = 0 # Filter required TSM and get storage info for tsms in tsm_details: if tsms['name'] == tsmname: flag = 1 data['total_capacity_gb'] = ( float(tsms['numericquota']) / units.Ki) data['free_capacity_gb'] = ( float(tsms['availablequota']) / units.Ki) break # TSM not found in CloudByte storage if flag == 0: LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname) data['total_capacity_gb'] = 0.0 data['free_capacity_gb'] = 0.0 return data def _get_auth_group_id_from_response(self, data): """Find iSCSI auth group id.""" chap_group = self.configuration.cb_auth_group ag_list_res = data.get('listiSCSIAuthGroupResponse') if ag_list_res is None: msg = _("Null response received from CloudByte's " "list iscsi auth groups.") raise exception.VolumeBackendAPIException(data=msg) ag_list = ag_list_res.get('authgroup') if ag_list is None: msg = _('No iscsi auth groups were found in CloudByte.') raise exception.VolumeBackendAPIException(data=msg) ag_id = None for ag in ag_list: if ag.get('name') == chap_group: ag_id = ag['id'] break else: msg = _("Auth group [%s] details not found in " "CloudByte storage.") % chap_group raise exception.VolumeBackendAPIException(data=msg) return ag_id def _get_auth_group_info(self, account_id, ag_id): """Fetch the auth group details.""" params = {"accountid": account_id, "authgroupid": ag_id} auth_users = self._api_request_for_cloudbyte( 'listiSCSIAuthUser', params) auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse') if auth_user_details_res is None: msg = _("No response was received from CloudByte storage " "list iSCSI auth user API call.") raise exception.VolumeBackendAPIException(data=msg) auth_user_details = auth_user_details_res.get('authuser') if auth_user_details is None: msg = _("Auth user details not found in CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) chapuser = auth_user_details[0].get('chapusername') chappassword = auth_user_details[0].get('chappassword') if chapuser is None or chappassword is None: msg = _("Invalid chap user details found in CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id} return data def _get_chap_info(self, account_id): """Fetch the chap details.""" params = {"accountid": account_id} iscsi_auth_data = self._api_request_for_cloudbyte( 'listiSCSIAuthGroup', params) ag_id = self._get_auth_group_id_from_response( iscsi_auth_data) return self._get_auth_group_info(account_id, ag_id) def _export(self): model_update = {'provider_auth': None} if self.cb_use_chap is True: account_name = self.configuration.cb_account_name account_id = self._get_account_id_from_name(account_name) chap = self._get_chap_info(account_id) model_update['provider_auth'] = ('CHAP %(username)s %(password)s' % chap) return model_update def _update_initiator_group(self, volume_id, ig_name): # Get account id of this account account_name = self.configuration.cb_account_name account_id = self._get_account_id_from_name(account_name) # Fetch the initiator group ID params = {"accountid": account_id} iscsi_initiator_data = self._api_request_for_cloudbyte( 'listiSCSIInitiator', params) # Filter the list of initiator groups with the name ig_id = self._get_initiator_group_id_from_response( iscsi_initiator_data, ig_name) params = {"storageid": volume_id} iscsi_service_data = self._api_request_for_cloudbyte( 'listVolumeiSCSIService', params) iscsi_id = self._get_iscsi_service_id_from_response( volume_id, iscsi_service_data) # Update the iscsi service with above fetched iscsi_id self._request_update_iscsi_service(iscsi_id, ig_id, None) LOG.debug("CloudByte initiator group updated successfully for volume " "[%(vol)s] with ig [%(ig)s].", {'vol': volume_id, 'ig': ig_name}) def _get_qos_by_volume_type(self, ctxt, type_id): """Get the properties which can be QoS or file system related.""" update_qos_group_params = {} update_file_system_params = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') extra_specs = volume_type.get('extra_specs') if qos_specs_id is not None: specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] # Override extra specs with specs # Hence specs will prefer QoS than extra specs extra_specs.update(specs) for key, value in extra_specs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.configuration.cb_update_qos_group: update_qos_group_params[key] = value elif key in self.configuration.cb_update_file_system: update_file_system_params[key] = value return update_qos_group_params, update_file_system_params def create_volume(self, volume): qos_group_params = {} file_system_params = {} tsm_name = self.configuration.cb_tsm_name account_name = self.configuration.cb_account_name # Get account id of this account account_id = self._get_account_id_from_name(account_name) # Set backend storage volume name using OpenStack volume id cb_volume_name = volume['id'].replace("-", "") ctxt = context.get_admin_context() type_id = volume['volume_type_id'] if type_id is not None: qos_group_params, file_system_params = ( self._get_qos_by_volume_type(ctxt, type_id)) LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] " "at CloudByte storage w.r.t " "OpenStack volume [%(stack_vol)s].", {'cb_vol': cb_volume_name, 'stack_vol': volume.get('id'), 'tsm': tsm_name}) tsm_data = self._request_tsm_details(account_id) tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name) # Send request to create a qos group before creating a volume LOG.debug("Creating qos group for CloudByte volume [%s].", cb_volume_name) qos_data = self._add_qos_group_request( volume, tsm_details.get('tsmid'), cb_volume_name, qos_group_params) # Extract the qos group id from response qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id'] LOG.debug("Successfully created qos group for CloudByte volume [%s].", cb_volume_name) # Send a create volume request to CloudByte API vol_data = self._create_volume_request( volume, tsm_details.get('datasetid'), qosgroupid, tsm_details.get('tsmid'), cb_volume_name, file_system_params) # Since create volume is an async call; # need to confirm the creation before proceeding further self._wait_for_volume_creation(vol_data, cb_volume_name) # Fetch iscsi id cb_volumes = self._api_request_for_cloudbyte( 'listFileSystem', params={}) volume_id = self._get_volume_id_from_response(cb_volumes, cb_volume_name) params = {"storageid": volume_id} iscsi_service_data = self._api_request_for_cloudbyte( 'listVolumeiSCSIService', params) iscsi_id = self._get_iscsi_service_id_from_response( volume_id, iscsi_service_data) # Fetch the initiator group ID params = {"accountid": account_id} iscsi_initiator_data = self._api_request_for_cloudbyte( 'listiSCSIInitiator', params) ig_id = self._get_initiator_group_id_from_response( iscsi_initiator_data, 'ALL') LOG.debug("Updating iscsi service for CloudByte volume [%s].", cb_volume_name) ag_id = None chap_info = {} if self.cb_use_chap is True: chap_info = self._get_chap_info(account_id) ag_id = chap_info['ag_id'] # Update the iscsi service with above fetched iscsi_id & ig_id self._request_update_iscsi_service(iscsi_id, ig_id, ag_id) LOG.debug("CloudByte volume [%(vol)s] updated with " "iscsi id [%(iscsi)s] and initiator group [%(ig)s] and " "authentication group [%(ag)s].", {'vol': cb_volume_name, 'iscsi': iscsi_id, 'ig': ig_id, 'ag': ag_id}) # Provide the model after successful completion of above steps provider = self._build_provider_details_from_response( cb_volumes, cb_volume_name, chap_info) LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] " "w.r.t OpenStack volume [%(stack_vol)s]."), {'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')}) return provider def delete_volume(self, volume): params = {} # OpenStack source volume id source_volume_id = volume['id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = volume.get('provider_id') LOG.debug("Will delete CloudByte volume [%(cb_vol)s] " "w.r.t OpenStack volume [%(stack_vol)s].", {'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) # Delete volume at CloudByte if cb_volume_id is not None: cb_volumes = self._api_request_for_cloudbyte( 'listFileSystem', params) # Search cb_volume_id in CloudByte volumes # incase it has already been deleted from CloudByte cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id) # Delete volume at CloudByte if cb_volume_id is not None: # Need to set the initiator group to None before deleting self._update_initiator_group(cb_volume_id, 'None') params = {"id": cb_volume_id} del_res = self._api_request_for_cloudbyte('deleteFileSystem', params) self._wait_for_volume_deletion(del_res, cb_volume_id) LOG.info( _LI("Successfully deleted volume [%(cb_vol)s] " "at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]."), {'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) else: LOG.error(_LE("CloudByte does not have a volume corresponding " "to OpenStack volume [%s]."), source_volume_id) else: LOG.error(_LE("CloudByte volume information not available for" " OpenStack volume [%s]."), source_volume_id) def create_snapshot(self, snapshot): """Creates a snapshot at CloudByte.""" # OpenStack volume source_volume_id = snapshot['volume_id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') if cb_volume_id is not None: # Set backend storage snapshot name using OpenStack snapshot id snapshot_name = "snap_" + snapshot['id'].replace("-", "") params = { "name": snapshot_name, "id": cb_volume_id } LOG.debug( "Will create CloudByte snapshot [%(cb_snap)s] " "w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s].", {'cb_snap': snapshot_name, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) self._api_request_for_cloudbyte('createStorageSnapshot', params) # Get the snapshot path from CloudByte path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id) LOG.info( _LI("Created CloudByte snapshot [%(cb_snap)s] " "w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]."), {'cb_snap': path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) model_update = {} # Store snapshot path as snapshot provider_id model_update['provider_id'] = path else: msg = _("Failed to create snapshot. CloudByte volume information " "not found for OpenStack volume [%s].") % source_volume_id raise exception.VolumeBackendAPIException(data=msg) return model_update def create_cloned_volume(self, cloned_volume, src_volume): """Create a clone of an existing volume. First it will create a snapshot of the source/parent volume, then it creates a clone of this newly created snapshot. """ # Extract necessary information from input params parent_volume_id = src_volume.get('id') # Generating id for snapshot # as this is not user entered in this particular usecase snapshot_id = six.text_type(uuid.uuid1()) # Prepare the params for create_snapshot # as well as create_volume_from_snapshot method snapshot_params = { 'id': snapshot_id, 'volume_id': parent_volume_id, 'volume': src_volume, } # Create a snapshot snapshot = self.create_snapshot(snapshot_params) snapshot_params['provider_id'] = snapshot.get('provider_id') # Create a clone of above snapshot return self.create_volume_from_snapshot(cloned_volume, snapshot_params) def create_volume_from_snapshot(self, cloned_volume, snapshot): """Create a clone from an existing snapshot.""" # Getting necessary data from input params parent_volume_id = snapshot['volume_id'] cloned_volume_name = cloned_volume['id'].replace("-", "") # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') # CloudByte snapshot path equals OpenStack snapshot's provider_id cb_snapshot_path = snapshot['provider_id'] params = { "id": cb_volume_id, "clonename": cloned_volume_name, "path": cb_snapshot_path } LOG.debug( "Will create CloudByte clone [%(cb_clone)s] " "at CloudByte snapshot path [%(cb_snap)s] " "w.r.t parent OpenStack volume [%(stack_vol)s].", {'cb_clone': cloned_volume_name, 'cb_snap': cb_snapshot_path, 'stack_vol': parent_volume_id}) # Create clone of the snapshot clone_dataset_snapshot_res = ( self._api_request_for_cloudbyte('cloneDatasetSnapshot', params)) cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot') cb_vol = {} if cb_snap is not None: cb_vol = cb_snap.get('filesystem') else: msg = ("Error: Clone creation failed for " "OpenStack volume [%(vol)s] with CloudByte " "snapshot path [%(path)s]" % {'vol': parent_volume_id, 'path': cb_snapshot_path}) raise exception.VolumeBackendAPIException(data=msg) LOG.info( _LI("Created a clone [%(cb_clone)s] " "at CloudByte snapshot path [%(cb_snap)s] " "w.r.t parent OpenStack volume [%(stack_vol)s]."), {'cb_clone': cloned_volume_name, 'cb_snap': cb_snapshot_path, 'stack_vol': parent_volume_id}) chap_info = {} if self.cb_use_chap is True: account_name = self.configuration.cb_account_name # Get account id of this account account_id = self._get_account_id_from_name(account_name) chap_info = self._get_chap_info(account_id) model_update = self._build_provider_details_from_volume(cb_vol, chap_info) return model_update def delete_snapshot(self, snapshot): """Delete a snapshot at CloudByte.""" # Find volume id source_volume_id = snapshot['volume_id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') # CloudByte snapshot path equals OpenStack snapshot's provider_id cb_snapshot_path = snapshot['provider_id'] # If cb_snapshot_path is 'None' # then no need to execute CloudByte API if cb_snapshot_path is not None: params = { "id": cb_volume_id, "path": cb_snapshot_path } LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t " "parent CloudByte volume [%(cb_vol)s] " "and parent OpenStack volume [%(stack_vol)s].", {'snap': cb_snapshot_path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) # Execute CloudByte API self._api_request_for_cloudbyte('deleteSnapshot', params) LOG.info( _LI("Deleted CloudByte snapshot [%(snap)s] w.r.t " "parent CloudByte volume [%(cb_vol)s] " "and parent OpenStack volume [%(stack_vol)s]."), {'snap': cb_snapshot_path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) else: LOG.error(_LE("CloudByte snapshot information is not available" " for OpenStack volume [%s]."), source_volume_id) def extend_volume(self, volume, new_size): # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = volume.get('provider_id') params = { "id": cb_volume_id, "quotasize": six.text_type(new_size) + 'G' } # Request the CloudByte api to update the volume self._api_request_for_cloudbyte('updateFileSystem', params) def create_export(self, context, volume, connector): """Setup the iscsi export info.""" return self._export() def ensure_export(self, context, volume): """Verify the iscsi export info.""" return self._export() def get_volume_stats(self, refresh=False): """Get volume statistics. If 'refresh' is True, update/refresh the statistics first. """ if refresh: # Get the TSM name from configuration tsm_name = self.configuration.cb_tsm_name # Get the storage details of this TSM data = self._get_storage_info(tsm_name) data["volume_backend_name"] = ( self.configuration.safe_get('volume_backend_name') or 'CloudByte') data["vendor_name"] = 'CloudByte' data['reserved_percentage'] = 0 data["driver_version"] = CloudByteISCSIDriver.VERSION data["storage_protocol"] = 'iSCSI' LOG.debug("CloudByte driver stats: [%s].", data) # Set this to the instance variable self.volume_stats = data return self.volume_stats def retype(self, ctxt, volume, new_type, diff, host): """Retypes a volume, QoS and file system update is only done.""" cb_volume_id = volume.get('provider_id') if cb_volume_id is None: message = _("Provider information w.r.t CloudByte storage " "was not found for OpenStack " "volume [%s].") % volume['id'] raise exception.VolumeBackendAPIException(message) update_qos_group_params, update_file_system_params = ( self._get_qos_by_volume_type(ctxt, new_type['id'])) if update_qos_group_params: list_file_sys_params = {'id': cb_volume_id} response = self._api_request_for_cloudbyte( 'listFileSystem', list_file_sys_params) response = response['listFilesystemResponse'] cb_volume_list = response['filesystem'] cb_volume = cb_volume_list[0] if not cb_volume: msg = (_("Volume [%(cb_vol)s] was not found at " "CloudByte storage corresponding to OpenStack " "volume [%(ops_vol)s].") % {'cb_vol': cb_volume_id, 'ops_vol': volume['id']}) raise exception.VolumeBackendAPIException(data=msg) update_qos_group_params['id'] = cb_volume.get('groupid') self._api_request_for_cloudbyte( 'updateQosGroup', update_qos_group_params) if update_file_system_params: update_file_system_params['id'] = cb_volume_id self._api_request_for_cloudbyte( 'updateFileSystem', update_file_system_params) LOG.info(_LI("Successfully updated CloudByte volume [%(cb_vol)s] " "corresponding to OpenStack volume [%(ops_vol)s]."), {'cb_vol': cb_volume_id, 'ops_vol': volume['id']}) return True cinder-8.0.0/cinder/volume/drivers/vmware/0000775000567000056710000000000012701406543021660 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/vmware/__init__.py0000664000567000056710000000130212701406250023760 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`vmware` -- Volume support for VMware compatible datastores. """ cinder-8.0.0/cinder/volume/drivers/vmware/datastore.py0000664000567000056710000002565712701406250024232 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Classes and utility methods for datastore selection. """ from oslo_log import log as logging from oslo_utils import excutils from oslo_vmware import exceptions from oslo_vmware import pbm from cinder.i18n import _LE, _LW from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions LOG = logging.getLogger(__name__) class DatastoreType(object): """Supported datastore types.""" NFS = "nfs" VMFS = "vmfs" VSAN = "vsan" VVOL = "vvol" _ALL_TYPES = {NFS, VMFS, VSAN, VVOL} @staticmethod def get_all_types(): return DatastoreType._ALL_TYPES class DatastoreSelector(object): """Class for selecting datastores which satisfy input requirements.""" HARD_AFFINITY_DS_TYPE = "hardAffinityDatastoreTypes" HARD_ANTI_AFFINITY_DS = "hardAntiAffinityDatastores" PREF_UTIL_THRESH = "preferredUtilizationThreshold" SIZE_BYTES = "sizeBytes" PROFILE_NAME = "storageProfileName" # TODO(vbala) Remove dependency on volumeops. def __init__(self, vops, session): self._vops = vops self._session = session def get_profile_id(self, profile_name): """Get vCenter profile ID for the given profile name. :param profile_name: profile name :return: vCenter profile ID :raises: ProfileNotFoundException """ profile_id = pbm.get_profile_id_by_name(self._session, profile_name) if profile_id is None: LOG.error(_LE("Storage profile: %s cannot be found in vCenter."), profile_name) raise vmdk_exceptions.ProfileNotFoundException( storage_profile=profile_name) LOG.debug("Storage profile: %(name)s resolved to vCenter profile ID: " "%(id)s.", {'name': profile_name, 'id': profile_id}) return profile_id def _filter_by_profile(self, datastores, profile_id): """Filter out input datastores that do not match the given profile.""" cf = self._session.pbm.client.factory hubs = pbm.convert_datastores_to_hubs(cf, datastores) filtered_hubs = pbm.filter_hubs_by_profile(self._session, hubs, profile_id) return pbm.filter_datastores_by_hubs(filtered_hubs, datastores) def _filter_datastores(self, datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types): """Filter datastores based on profile, size and affinity.""" LOG.debug( "Filtering datastores: %(datastores)s based on size (bytes): " "%(size)d, profile: %(profile)s, hard-anti-affinity-datastores: " "%(hard_anti_affinity_datastores)s, hard-affinity-datastore-types:" " %(hard_affinity_ds_types)s.", {'datastores': datastores, 'size': size_bytes, 'profile': profile_id, 'hard_anti_affinity_datastores': hard_anti_affinity_datastores, 'hard_affinity_ds_types': hard_affinity_ds_types}) if hard_anti_affinity_datastores is None: hard_anti_affinity_datastores = [] filtered_datastores = [ds for ds in datastores if ds.value not in hard_anti_affinity_datastores] if filtered_datastores and profile_id is not None: filtered_datastores = self._filter_by_profile( filtered_datastores, profile_id) LOG.debug("Profile: %(id)s matched by datastores: %(datastores)s.", {'datastores': filtered_datastores, 'id': profile_id}) filtered_summaries = [self._vops.get_summary(ds) for ds in filtered_datastores] return [summary for summary in filtered_summaries if (summary.freeSpace > size_bytes and summary.type.lower() in DatastoreType.get_all_types() and (hard_affinity_ds_types is None or summary.type.lower() in hard_affinity_ds_types))] def _get_all_hosts(self): """Get all ESX hosts managed by vCenter.""" all_hosts = [] retrieve_result = self._vops.get_hosts() while retrieve_result: hosts = retrieve_result.objects if not hosts: break for host in hosts: if self._vops.is_host_usable(host.obj): all_hosts.append(host.obj) retrieve_result = self._vops.continue_retrieval( retrieve_result) return all_hosts def _compute_space_utilization(self, datastore_summary): """Compute space utilization of the given datastore.""" return ( 1.0 - datastore_summary.freeSpace / float(datastore_summary.capacity) ) def _select_best_summary(self, summaries): """Selects the best datastore summary. Selects the datastore which is connected to maximum number of hosts. Ties are broken based on space utilization-- datastore with low space utilization is preferred. """ best_summary = None max_host_count = 0 best_space_utilization = 1.0 for summary in summaries: host_count = len(self._vops.get_connected_hosts( summary.datastore)) if host_count > max_host_count: max_host_count = host_count best_space_utilization = self._compute_space_utilization( summary ) best_summary = summary elif host_count == max_host_count: # break the tie based on space utilization space_utilization = self._compute_space_utilization( summary ) if space_utilization < best_space_utilization: best_space_utilization = space_utilization best_summary = summary LOG.debug("Datastore: %(datastore)s is connected to %(host_count)d " "host(s) and has space utilization: %(utilization)s.", {'datastore': best_summary.datastore, 'host_count': max_host_count, 'utilization': best_space_utilization}) return (best_summary, best_space_utilization) def select_datastore(self, req, hosts=None): """Selects a datastore satisfying the given requirements. Returns the selected datastore summary along with a compute host and resource pool where a VM can be created. :param req: selection requirements :param hosts: list of hosts to consider :return: (host, resourcePool, summary) """ best_candidate = () best_utilization = 1.0 hard_affinity_ds_types = req.get( DatastoreSelector.HARD_AFFINITY_DS_TYPE) hard_anti_affinity_datastores = req.get( DatastoreSelector.HARD_ANTI_AFFINITY_DS) pref_utilization_thresh = req.get(DatastoreSelector.PREF_UTIL_THRESH, -1) size_bytes = req[DatastoreSelector.SIZE_BYTES] profile_name = req.get(DatastoreSelector.PROFILE_NAME) profile_id = None if profile_name is not None: profile_id = self.get_profile_id(profile_name) if not hosts: hosts = self._get_all_hosts() LOG.debug("Using hosts: %(hosts)s for datastore selection based on " "requirements: %(req)s.", {'hosts': hosts, 'req': req}) for host_ref in hosts: try: (datastores, rp) = self._vops.get_dss_rp(host_ref) except exceptions.VimConnectionException: # No need to try other hosts when there is a connection problem with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while " "selecting datastore.")) except exceptions.VimException: # TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException # for empty datastore list. LOG.warning(_LW("Unable to fetch datastores connected " "to host %s."), host_ref, exc_info=True) continue if not datastores: continue filtered_summaries = self._filter_datastores( datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types) LOG.debug("Datastores remaining after filtering: %s.", filtered_summaries) if not filtered_summaries: continue (summary, utilization) = self._select_best_summary( filtered_summaries) if (pref_utilization_thresh == -1 or utilization <= pref_utilization_thresh): return (host_ref, rp, summary) if utilization < best_utilization: best_candidate = (host_ref, rp, summary) best_utilization = utilization LOG.debug("Best candidate: %s.", best_candidate) return best_candidate def is_datastore_compliant(self, datastore, profile_name): """Check if the datastore is compliant with given profile. :param datastore: datastore to check the compliance :param profile_name: profile to check the compliance against :return: True if the datastore is compliant; False otherwise :raises: ProfileNotFoundException """ LOG.debug("Checking datastore: %(datastore)s compliance against " "profile: %(profile)s.", {'datastore': datastore, 'profile': profile_name}) if profile_name is None: # Any datastore is trivially compliant with a None profile. return True profile_id = self.get_profile_id(profile_name) is_compliant = bool(self._filter_by_profile([datastore], profile_id)) LOG.debug("Compliance is %(is_compliant)s for datastore: " "%(datastore)s against profile: %(profile)s.", {'is_compliant': is_compliant, 'datastore': datastore, 'profile': profile_name}) return is_compliant cinder-8.0.0/cinder/volume/drivers/vmware/exceptions.py0000664000567000056710000000367412701406250024420 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception definitions. """ from oslo_vmware import exceptions from cinder.i18n import _ class InvalidAdapterTypeException(exceptions.VMwareDriverException): """Thrown when the disk adapter type is invalid.""" msg_fmt = _("Invalid disk adapter type: %(invalid_type)s.") class InvalidDiskTypeException(exceptions.VMwareDriverException): """Thrown when the disk type is invalid.""" msg_fmt = _("Invalid disk type: %(disk_type)s.") class VirtualDiskNotFoundException(exceptions.VMwareDriverException): """Thrown when virtual disk is not found.""" msg_fmt = _("There is no virtual disk device.") class ProfileNotFoundException(exceptions.VMwareDriverException): """Thrown when the given storage profile cannot be found.""" msg_fmt = _("Storage profile: %(storage_profile)s not found.") class NoValidDatastoreException(exceptions.VMwareDriverException): """Thrown when there are no valid datastores.""" msg_fmt = _("There are no valid datastores.") class ClusterNotFoundException(exceptions.VMwareDriverException): """Thrown when the given cluster cannot be found.""" msg_fmt = _("Compute cluster: %(cluster)s not found.") class NoValidHostException(exceptions.VMwareDriverException): """Thrown when there are no valid ESX hosts.""" msg_fmt = _("There are no valid ESX hosts.") cinder-8.0.0/cinder/volume/drivers/vmware/vmdk.py0000664000567000056710000026225312701406250023200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for VMware vCenter managed datastores. The volumes created by this driver are backed by VMDK (Virtual Machine Disk) files stored in datastores. For ease of managing the VMDKs, the driver creates a virtual machine for each of the volumes. This virtual machine is never powered on and is often referred as the shadow VM. """ import contextlib import distutils.version as dist_version # pylint: disable=E0611 import math import os import tempfile from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from oslo_vmware import api from oslo_vmware import exceptions from oslo_vmware import image_transfer from oslo_vmware import pbm from oslo_vmware import vim_util import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import volumeops from cinder.volume import volume_types LOG = logging.getLogger(__name__) THIN_VMDK_TYPE = 'thin' THICK_VMDK_TYPE = 'thick' EAGER_ZEROED_THICK_VMDK_TYPE = 'eagerZeroedThick' CREATE_PARAM_ADAPTER_TYPE = 'adapter_type' CREATE_PARAM_DISK_LESS = 'disk_less' CREATE_PARAM_BACKING_NAME = 'name' CREATE_PARAM_DISK_SIZE = 'disk_size' TMP_IMAGES_DATASTORE_FOLDER_PATH = "cinder_temp/" EXTRA_CONFIG_VOLUME_ID_KEY = "cinder.volume.id" vmdk_opts = [ cfg.StrOpt('vmware_host_ip', help='IP address for connecting to VMware vCenter server.'), cfg.StrOpt('vmware_host_username', help='Username for authenticating with VMware vCenter ' 'server.'), cfg.StrOpt('vmware_host_password', help='Password for authenticating with VMware vCenter ' 'server.', secret=True), cfg.StrOpt('vmware_wsdl_location', help='Optional VIM service WSDL Location ' 'e.g http:///vimService.wsdl. Optional over-ride ' 'to default location for bug work-arounds.'), cfg.IntOpt('vmware_api_retry_count', default=10, help='Number of times VMware vCenter server API must be ' 'retried upon connection related issues.'), cfg.FloatOpt('vmware_task_poll_interval', default=0.5, help='The interval (in seconds) for polling remote tasks ' 'invoked on VMware vCenter server.'), cfg.StrOpt('vmware_volume_folder', default='Volumes', help='Name of the vCenter inventory folder that will ' 'contain Cinder volumes. This folder will be created ' 'under "OpenStack/", where project_folder ' 'is of format "Project ()".'), cfg.IntOpt('vmware_image_transfer_timeout_secs', default=7200, help='Timeout in seconds for VMDK volume transfer between ' 'Cinder and Glance.'), cfg.IntOpt('vmware_max_objects_retrieval', default=100, help='Max number of objects to be retrieved per batch. ' 'Query results will be obtained in batches from the ' 'server and not in one shot. Server may still limit the ' 'count to something less than the configured value.'), cfg.StrOpt('vmware_host_version', help='Optional string specifying the VMware vCenter server ' 'version. ' 'The driver attempts to retrieve the version from VMware ' 'vCenter server. Set this configuration only if you want ' 'to override the vCenter server version.'), cfg.StrOpt('vmware_tmp_dir', default='/tmp', help='Directory where virtual disks are stored during volume ' 'backup and restore.'), cfg.StrOpt('vmware_ca_file', help='CA bundle file to use in verifying the vCenter server ' 'certificate.'), cfg.BoolOpt('vmware_insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"vmware_ca_file" is set.'), cfg.MultiStrOpt('vmware_cluster_name', help='Name of a vCenter compute cluster where volumes ' 'should be created.'), ] CONF = cfg.CONF CONF.register_opts(vmdk_opts) def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None, default_value=None): """Get extra spec value. If the spec value is not present in the input possible_values, then default_value will be returned. If the type_id is None, then default_value is returned. The caller must not consider scope and the implementation adds/removes scope. The scope used here is 'vmware' e.g. key 'vmware:vmdk_type' and so the caller must pass vmdk_type as an input ignoring the scope. :param type_id: Volume type ID :param spec_key: Extra spec key :param possible_values: Permitted values for the extra spec if known :param default_value: Default value for the extra spec incase of an invalid value or if the entry does not exist :return: extra spec value """ if not type_id: return default_value spec_key = ('vmware:%s') % spec_key spec_value = volume_types.get_volume_type_extra_specs(type_id, spec_key) if not spec_value: LOG.debug("Returning default spec value: %s.", default_value) return default_value if possible_values is None: return spec_value if spec_value in possible_values: LOG.debug("Returning spec value %s", spec_value) return spec_value LOG.debug("Invalid spec value: %s specified.", spec_value) class ImageDiskType(object): """Supported disk types in images.""" PREALLOCATED = "preallocated" SPARSE = "sparse" STREAM_OPTIMIZED = "streamOptimized" THIN = "thin" @staticmethod def is_valid(extra_spec_disk_type): """Check if the given disk type in extra_spec is valid. :param extra_spec_disk_type: disk type to check :return: True if valid """ return extra_spec_disk_type in [ImageDiskType.PREALLOCATED, ImageDiskType.SPARSE, ImageDiskType.STREAM_OPTIMIZED, ImageDiskType.THIN] @staticmethod def validate(extra_spec_disk_type): """Validate the given disk type in extra_spec. This method throws ImageUnacceptable if the disk type is not a supported one. :param extra_spec_disk_type: disk type :raises: ImageUnacceptable """ if not ImageDiskType.is_valid(extra_spec_disk_type): raise exception.ImageUnacceptable(_("Invalid disk type: %s.") % extra_spec_disk_type) class VMwareVcVmdkDriver(driver.VolumeDriver): """Manage volumes on VMware vCenter server.""" # 1.0 - initial version of driver # 1.1.0 - selection of datastore based on number of host mounts # 1.2.0 - storage profile volume types based placement of volumes # 1.3.0 - support for volume backup/restore # 1.4.0 - support for volume retype # 1.5.0 - restrict volume placement to specific vCenter clusters # 1.6.0 - support for manage existing VERSION = '1.6.0' # Minimum supported vCenter version. MIN_SUPPORTED_VC_VERSION = dist_version.LooseVersion('5.1') # PBM is enabled only for vCenter versions 5.5 and above PBM_ENABLED_VC_VERSION = dist_version.LooseVersion('5.5') def __init__(self, *args, **kwargs): super(VMwareVcVmdkDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(vmdk_opts) self._session = None self._stats = None self._volumeops = None self._storage_policy_enabled = False self._ds_sel = None self._clusters = None @property def volumeops(self): if not self._volumeops: max_objects = self.configuration.vmware_max_objects_retrieval self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) return self._volumeops @property def ds_sel(self): if not self._ds_sel: self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session) return self._ds_sel def _validate_params(self): # Throw error if required parameters are not set. required_params = ['vmware_host_ip', 'vmware_host_username', 'vmware_host_password'] for param in required_params: if not getattr(self.configuration, param, None): raise exception.InvalidInput(_("%s not set.") % param) def check_for_setup_error(self): pass def get_volume_stats(self, refresh=False): """Obtain status of the volume service. :param refresh: Whether to get refreshed information """ if not self._stats: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ data = {'volume_backend_name': backend_name, 'vendor_name': 'VMware', 'driver_version': self.VERSION, 'storage_protocol': 'vmdk', 'reserved_percentage': 0, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown'} self._stats = data return self._stats def _verify_volume_creation(self, volume): """Verify the volume can be created. Verify that there is a datastore that can accommodate this volume. If this volume is being associated with a volume_type then verify the storage_profile exists and can accommodate this volume. Raise an exception otherwise. :param volume: Volume object """ try: # find if any host can accommodate the volume self._select_ds_for_volume(volume) except exceptions.VimException as excep: msg = _("Not able to find a suitable datastore for the volume: " "%s.") % volume['name'] LOG.exception(msg) raise exceptions.VimFaultException([excep], msg) LOG.debug("Verified volume %s can be created.", volume['name']) def create_volume(self, volume): """Creates a volume. We do not create any backing. We do it only the first time it is being attached to a virtual machine. :param volume: Volume object """ self._verify_volume_creation(volume) def _delete_volume(self, volume): """Delete the volume backing if it is present. :param volume: Volume object """ backing = self.volumeops.get_backing(volume['name']) if not backing: LOG.info(_LI("Backing not available, no operation " "to be performed.")) return self.volumeops.delete_backing(backing) def delete_volume(self, volume): """Deletes volume backing. :param volume: Volume object """ self._delete_volume(volume) def _get_extra_spec_storage_profile(self, type_id): """Get storage profile name in the given volume type's extra spec. If there is no storage profile in the extra spec, default is None. """ return _get_volume_type_extra_spec(type_id, 'storage_profile') def _get_storage_profile(self, volume): """Get storage profile associated with the given volume's volume_type. :param volume: Volume whose storage profile should be queried :return: String value of storage profile if volume type is associated and contains storage_profile extra_spec option; None otherwise """ return self._get_extra_spec_storage_profile(volume['volume_type_id']) @staticmethod def _get_extra_spec_disk_type(type_id): """Get disk type from the given volume type's extra spec. If there is no disk type option, default is THIN_VMDK_TYPE. """ disk_type = _get_volume_type_extra_spec(type_id, 'vmdk_type', default_value=THIN_VMDK_TYPE) volumeops.VirtualDiskType.validate(disk_type) return disk_type @staticmethod def _get_disk_type(volume): """Get disk type from the given volume's volume type. :param volume: Volume object :return: Disk type """ return VMwareVcVmdkDriver._get_extra_spec_disk_type( volume['volume_type_id']) def _get_storage_profile_id(self, volume): storage_profile = self._get_storage_profile(volume) profile_id = None if self._storage_policy_enabled and storage_profile: profile = pbm.get_profile_id_by_name(self.session, storage_profile) if profile: profile_id = profile.uniqueId return profile_id def _get_extra_config(self, volume): return {EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']} def _create_backing(self, volume, host=None, create_params=None): """Create volume backing under the given host. If host is unspecified, any suitable host is selected. :param volume: Volume object :param host: Reference of the host :param create_params: Dictionary specifying optional parameters for backing VM creation :return: Reference to the created backing """ create_params = create_params or {} (host_ref, resource_pool, folder, summary) = self._select_ds_for_volume(volume, host) # check if a storage profile needs to be associated with the backing VM profile_id = self._get_storage_profile_id(volume) # Use volume name as the default backing name. backing_name = create_params.get(CREATE_PARAM_BACKING_NAME, volume['name']) extra_config = self._get_extra_config(volume) # default is a backing with single disk disk_less = create_params.get(CREATE_PARAM_DISK_LESS, False) if disk_less: # create a disk-less backing-- disk can be added later; for e.g., # by copying an image return self.volumeops.create_backing_disk_less( backing_name, folder, resource_pool, host_ref, summary.name, profileId=profile_id, extra_config=extra_config) # create a backing with single disk disk_type = VMwareVcVmdkDriver._get_disk_type(volume) size_kb = volume['size'] * units.Mi adapter_type = create_params.get(CREATE_PARAM_ADAPTER_TYPE, 'lsiLogic') backing = self.volumeops.create_backing(backing_name, size_kb, disk_type, folder, resource_pool, host_ref, summary.name, profileId=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.volumeops.update_backing_disk_uuid(backing, volume['id']) return backing def _get_hosts(self, clusters): hosts = [] if clusters: for cluster in clusters: cluster_hosts = self.volumeops.get_cluster_hosts(cluster) for host in cluster_hosts: if self.volumeops.is_host_usable(host): hosts.append(host) return hosts def _select_datastore(self, req, host=None): """Selects datastore satisfying the given requirements. :return: (host, resource_pool, summary) """ hosts = None if host: hosts = [host] elif self._clusters: hosts = self._get_hosts(self._clusters) if not hosts: LOG.error(_LE("There are no valid hosts available in " "configured cluster(s): %s."), self._clusters) raise vmdk_exceptions.NoValidHostException() best_candidate = self.ds_sel.select_datastore(req, hosts=hosts) if not best_candidate: LOG.error(_LE("There is no valid datastore satisfying " "requirements: %s."), req) raise vmdk_exceptions.NoValidDatastoreException() return best_candidate def _select_ds_for_volume(self, volume, host=None, create_params=None): """Select datastore that can accommodate the given volume's backing. Returns the selected datastore summary along with a compute host and its resource pool and folder where the volume can be created :return: (host, resource_pool, folder, summary) """ # Form requirements for datastore selection. create_params = create_params or {} size = create_params.get(CREATE_PARAM_DISK_SIZE, volume['size']) req = {} req[hub.DatastoreSelector.SIZE_BYTES] = size * units.Gi req[hub.DatastoreSelector.PROFILE_NAME] = self._get_storage_profile( volume) (host_ref, resource_pool, summary) = self._select_datastore(req, host) dc = self.volumeops.get_dc(resource_pool) folder = self._get_volume_group_folder(dc, volume['project_id']) return (host_ref, resource_pool, folder, summary) def _initialize_connection(self, volume, connector): """Get information of volume's backing. If the volume does not have a backing yet. It will be created. :param volume: Volume object :param connector: Connector information :return: Return connection information """ connection_info = {'driver_volume_type': 'vmdk'} backing = self.volumeops.get_backing(volume['name']) if 'instance' in connector: # The instance exists instance = vim_util.get_moref(connector['instance'], 'VirtualMachine') LOG.debug("The instance: %s for which initialize connection " "is called, exists.", instance) # Get host managing the instance host = self.volumeops.get_host(instance) if not backing: # Create a backing in case it does not exist under the # host managing the instance. LOG.info(_LI("There is no backing for the volume: %s. " "Need to create one."), volume['name']) backing = self._create_backing(volume, host) else: # Relocate volume is necessary self._relocate_backing(volume, backing, host) else: # The instance does not exist LOG.debug("The instance for which initialize connection " "is called, does not exist.") if not backing: # Create a backing in case it does not exist. It is a bad use # case to boot from an empty volume. LOG.warning(_LW("Trying to boot from an empty volume: %s."), volume['name']) # Create backing backing = self._create_backing(volume) # Set volume's moref value and name connection_info['data'] = {'volume': backing.value, 'volume_id': volume['id']} LOG.info(_LI("Returning connection_info: %(info)s for volume: " "%(volume)s with connector: %(connector)s."), {'info': connection_info, 'volume': volume['name'], 'connector': connector}) return connection_info def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. The implementation returns the following information: {'driver_volume_type': 'vmdk' 'data': {'volume': $VOLUME_MOREF_VALUE 'volume_id': $VOLUME_ID } } :param volume: Volume object :param connector: Connector information :return: Return connection information """ return self._initialize_connection(volume, connector) def terminate_connection(self, volume, connector, force=False, **kwargs): pass def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def _create_snapshot(self, snapshot): """Creates a snapshot. If the volume does not have a backing then simply pass, else create a snapshot. Snapshot of only available volume is supported. :param snapshot: Snapshot object """ volume = snapshot['volume'] if volume['status'] != 'available': msg = _("Snapshot of volume not supported in " "state: %s.") % volume['status'] LOG.error(msg) raise exception.InvalidVolume(msg) backing = self.volumeops.get_backing(snapshot['volume_name']) if not backing: LOG.info(_LI("There is no backing, so will not create " "snapshot: %s."), snapshot['name']) return self.volumeops.create_snapshot(backing, snapshot['name'], snapshot['display_description']) LOG.info(_LI("Successfully created snapshot: %s."), snapshot['name']) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: Snapshot object """ self._create_snapshot(snapshot) def _delete_snapshot(self, snapshot): """Delete snapshot. If the volume does not have a backing or the snapshot does not exist then simply pass, else delete the snapshot. Snapshot deletion of only available volume is supported. :param snapshot: Snapshot object """ volume = snapshot['volume'] if volume['status'] != 'available': msg = _("Delete snapshot of volume not supported in " "state: %s.") % volume['status'] LOG.error(msg) raise exception.InvalidVolume(msg) backing = self.volumeops.get_backing(snapshot['volume_name']) if not backing: LOG.info(_LI("There is no backing, and so there is no " "snapshot: %s."), snapshot['name']) else: self.volumeops.delete_snapshot(backing, snapshot['name']) LOG.info(_LI("Successfully deleted snapshot: %s."), snapshot['name']) def delete_snapshot(self, snapshot): """Delete snapshot. :param snapshot: Snapshot object """ self._delete_snapshot(snapshot) def _get_ds_name_folder_path(self, backing): """Get datastore name and folder path of the given backing. :param backing: Reference to the backing entity :return: datastore name and folder path of the backing """ vmdk_ds_file_path = self.volumeops.get_path_name(backing) (datastore_name, folder_path, _) = volumeops.split_datastore_path(vmdk_ds_file_path) return (datastore_name, folder_path) @staticmethod def _validate_disk_format(disk_format): """Verify vmdk as disk format. :param disk_format: Disk format of the image """ if disk_format and disk_format.lower() != 'vmdk': msg = _("Cannot create image of disk format: %s. Only vmdk " "disk format is accepted.") % disk_format LOG.error(msg) raise exception.ImageUnacceptable(msg) def _copy_image(self, context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, upload_file_path): """Copy image (flat extent or sparse vmdk) to datastore.""" timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip ca_file = self.configuration.vmware_ca_file insecure = self.configuration.vmware_insecure cookies = self.session.vim.client.options.transport.cookiejar dc_name = self.volumeops.get_entity_name(dc_ref) LOG.debug("Copying image: %(image_id)s to %(path)s.", {'image_id': image_id, 'path': upload_file_path}) # TODO(vbala): add config option to override non-default port # ca_file is used for verifying vCenter certificate if it is set. # If ca_file is unset and insecure is False, the default CA truststore # is used for verification. We should pass cacerts=True in this # case. If ca_file is unset and insecure is True, there is no # certificate verification, and we should pass cacerts=False. cacerts = ca_file if ca_file else not insecure image_transfer.download_flat_image(context, timeout, image_service, image_id, image_size=image_size_in_bytes, host=host_ip, port=443, data_center_name=dc_name, datastore_name=ds_name, cookies=cookies, file_path=upload_file_path, cacerts=cacerts) LOG.debug("Image: %(image_id)s copied to %(path)s.", {'image_id': image_id, 'path': upload_file_path}) def _delete_temp_disk(self, descriptor_ds_file_path, dc_ref): """Deletes a temporary virtual disk.""" LOG.debug("Deleting temporary disk: %s.", descriptor_ds_file_path) try: self.volumeops.delete_vmdk_file( descriptor_ds_file_path, dc_ref) except exceptions.VimException: LOG.warning(_LW("Error occurred while deleting temporary " "disk: %s."), descriptor_ds_file_path, exc_info=True) def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref, dest_path): """Clones a temporary virtual disk and deletes it finally.""" try: self.volumeops.copy_vmdk_file( src_dc_ref, src_path.get_descriptor_ds_file_path(), dest_path.get_descriptor_ds_file_path(), dest_dc_ref) except exceptions.VimException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while copying %(src)s to " "%(dst)s."), {'src': src_path.get_descriptor_ds_file_path(), 'dst': dest_path.get_descriptor_ds_file_path()}) finally: # Delete temporary disk. self._delete_temp_disk(src_path.get_descriptor_ds_file_path(), src_dc_ref) def _get_temp_image_folder(self, image_size_in_bytes): """Get datastore folder for downloading temporary images.""" # Form requirements for datastore selection. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = image_size_in_bytes # vSAN/VVOL datastores don't support virtual disk with # flat extent; skip such datastores. req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = ( hub.DatastoreType.get_all_types() - {hub.DatastoreType.VSAN, hub.DatastoreType.VVOL}) # Select datastore satisfying the requirements. (host_ref, _resource_pool, summary) = self._select_datastore(req) ds_name = summary.name dc_ref = self.volumeops.get_dc(host_ref) # Create temporary datastore folder. folder_path = TMP_IMAGES_DATASTORE_FOLDER_PATH self.volumeops.create_datastore_folder(ds_name, folder_path, dc_ref) return (dc_ref, ds_name, folder_path) def _create_virtual_disk_from_sparse_image( self, context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name): """Creates a flat extent virtual disk from sparse vmdk image.""" # Upload the image to a temporary virtual disk. src_disk_name = uuidutils.generate_uuid() src_path = volumeops.MonolithicSparseVirtualDiskPath(ds_name, folder_path, src_disk_name) LOG.debug("Creating temporary virtual disk: %(path)s from sparse vmdk " "image: %(image_id)s.", {'path': src_path.get_descriptor_ds_file_path(), 'image_id': image_id}) self._copy_image(context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, src_path.get_descriptor_file_path()) # Copy sparse disk to create a flat extent virtual disk. dest_path = volumeops.FlatExtentVirtualDiskPath(ds_name, folder_path, disk_name) self._copy_temp_virtual_disk(dc_ref, src_path, dc_ref, dest_path) LOG.debug("Created virtual disk: %s from sparse vmdk image.", dest_path.get_descriptor_ds_file_path()) return dest_path def _create_virtual_disk_from_preallocated_image( self, context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type): """Creates virtual disk from an image which is a flat extent.""" # Upload the image and use it as a flat extent to create a virtual # disk. First, find the datastore folder to download the image. (dc_ref, ds_name, folder_path) = self._get_temp_image_folder(image_size_in_bytes) # pylint: disable=E1101 if ds_name == dest_ds_name and dc_ref.value == dest_dc_ref.value: # Temporary image folder and destination path are on the same # datastore. We can directly download the image to the destination # folder to save one virtual disk copy. path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, dest_folder_path, dest_disk_name) dest_path = path else: # Use the image to create a temporary virtual disk which is then # copied to the destination folder. disk_name = uuidutils.generate_uuid() path = volumeops.FlatExtentVirtualDiskPath(ds_name, folder_path, disk_name) dest_path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, dest_folder_path, dest_disk_name) LOG.debug("Creating virtual disk: %(path)s from (flat extent) image: " "%(image_id)s.", {'path': path.get_descriptor_ds_file_path(), 'image_id': image_id}) # We first create a descriptor with desired settings. self.volumeops.create_flat_extent_virtual_disk_descriptor( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, EAGER_ZEROED_THICK_VMDK_TYPE) # Upload the image and use it as the flat extent. try: self._copy_image(context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) except Exception: # Delete the descriptor. with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while copying image: " "%(image_id)s to %(path)s."), {'path': path.get_descriptor_ds_file_path(), 'image_id': image_id}) LOG.debug("Deleting descriptor: %s.", path.get_descriptor_ds_file_path()) try: self.volumeops.delete_file( path.get_descriptor_ds_file_path(), dc_ref) except exceptions.VimException: LOG.warning(_LW("Error occurred while deleting " "descriptor: %s."), path.get_descriptor_ds_file_path(), exc_info=True) if dest_path != path: # Copy temporary disk to given destination. self._copy_temp_virtual_disk(dc_ref, path, dest_dc_ref, dest_path) LOG.debug("Created virtual disk: %s from flat extent image.", dest_path.get_descriptor_ds_file_path()) return dest_path def _check_disk_conversion(self, image_disk_type, extra_spec_disk_type): """Check if disk type conversion is needed.""" if image_disk_type == ImageDiskType.SPARSE: # We cannot reliably determine the destination disk type of a # virtual disk copied from a sparse image. return True # Virtual disk created from flat extent is always of type # eagerZeroedThick. return not (volumeops.VirtualDiskType.get_virtual_disk_type( extra_spec_disk_type) == volumeops.VirtualDiskType.EAGER_ZEROED_THICK) def _delete_temp_backing(self, backing): """Deletes temporary backing.""" LOG.debug("Deleting backing: %s.", backing) try: self.volumeops.delete_backing(backing) except exceptions.VimException: LOG.warning(_LW("Error occurred while deleting backing: %s."), backing, exc_info=True) def _create_volume_from_non_stream_optimized_image( self, context, volume, image_service, image_id, image_size_in_bytes, adapter_type, image_disk_type): """Creates backing VM from non-streamOptimized image. First, we create a disk-less backing. Then we create a virtual disk using the image which is then attached to the backing VM. Finally, the backing VM is cloned if disk type conversion is required. """ # We should use the disk type in volume type for backing's virtual # disk. disk_type = VMwareVcVmdkDriver._get_disk_type(volume) # First, create a disk-less backing. create_params = {CREATE_PARAM_DISK_LESS: True} disk_conversion = self._check_disk_conversion(image_disk_type, disk_type) if disk_conversion: # The initial backing is a temporary one and used as the source # for clone operation. disk_name = uuidutils.generate_uuid() create_params[CREATE_PARAM_BACKING_NAME] = disk_name else: disk_name = volume['name'] LOG.debug("Creating disk-less backing for volume: %(id)s with params: " "%(param)s.", {'id': volume['id'], 'param': create_params}) backing = self._create_backing(volume, create_params=create_params) try: # Find the backing's datacenter, host, datastore and folder. (ds_name, folder_path) = self._get_ds_name_folder_path(backing) host = self.volumeops.get_host(backing) dc_ref = self.volumeops.get_dc(host) vmdk_path = None attached = False # Create flat extent virtual disk from the image. if image_disk_type == ImageDiskType.SPARSE: # Monolithic sparse image has embedded descriptor. vmdk_path = self._create_virtual_disk_from_sparse_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) else: # The image is just a flat extent. vmdk_path = self._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name, adapter_type) # Attach the virtual disk to the backing. LOG.debug("Attaching virtual disk: %(path)s to backing: " "%(backing)s.", {'path': vmdk_path.get_descriptor_ds_file_path(), 'backing': backing}) self.volumeops.attach_disk_to_backing( backing, image_size_in_bytes / units.Ki, disk_type, adapter_type, vmdk_path.get_descriptor_ds_file_path()) attached = True if disk_conversion: # Clone the temporary backing for disk type conversion. (host, rp, folder, summary) = self._select_ds_for_volume( volume) datastore = summary.datastore LOG.debug("Cloning temporary backing: %s for disk type " "conversion.", backing) clone = self.volumeops.clone_backing(volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, folder=folder) self._delete_temp_backing(backing) backing = clone self.volumeops.update_backing_disk_uuid(backing, volume['id']) except Exception: # Delete backing and virtual disk created from image. with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while creating " "volume: %(id)s" " from image: %(image_id)s."), {'id': volume['id'], 'image_id': image_id}) self._delete_temp_backing(backing) # Delete virtual disk if exists and unattached. if vmdk_path is not None and not attached: self._delete_temp_disk( vmdk_path.get_descriptor_ds_file_path(), dc_ref) def _fetch_stream_optimized_image(self, context, volume, image_service, image_id, image_size, adapter_type): """Creates volume from image using HttpNfc VM import. Uses Nfc API to download the VMDK file from Glance. Nfc creates the backing VM that wraps the VMDK in the vCenter inventory. This method assumes glance image is VMDK disk format and its vmware_disktype is 'streamOptimized'. """ try: # find host in which to create the volume (_host, rp, folder, summary) = self._select_ds_for_volume(volume) except exceptions.VimException as excep: err_msg = (_("Exception in _select_ds_for_volume: " "%s."), excep) raise exception.VolumeBackendAPIException(data=err_msg) size_gb = volume['size'] LOG.debug("Selected datastore %(ds)s for new volume of size " "%(size)s GB.", {'ds': summary.name, 'size': size_gb}) # prepare create spec for backing vm profile_id = self._get_storage_profile_id(volume) disk_type = VMwareVcVmdkDriver._get_disk_type(volume) # The size of stream optimized glance image is often suspect, # so better let vCenter figure out the disk capacity during import. dummy_disk_size = 0 extra_config = self._get_extra_config(volume) vm_create_spec = self.volumeops.get_create_spec( volume['name'], dummy_disk_size, disk_type, summary.name, profileId=profile_id, adapter_type=adapter_type, extra_config=extra_config) # convert vm_create_spec to vm_import_spec cf = self.session.vim.client.factory vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') vm_import_spec.configSpec = vm_create_spec try: # fetching image from glance will also create the backing timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip LOG.debug("Fetching glance image: %(id)s to server: %(host)s.", {'id': image_id, 'host': host_ip}) backing = image_transfer.download_stream_optimized_image( context, timeout, image_service, image_id, session=self.session, host=host_ip, port=443, resource_pool=rp, vm_folder=folder, vm_import_spec=vm_import_spec, image_size=image_size) self.volumeops.update_backing_disk_uuid(backing, volume['id']) except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while copying image: %(id)s " "to volume: %(vol)s."), {'id': image_id, 'vol': volume['name']}) backing = self.volumeops.get_backing(volume['name']) if backing: # delete the backing self.volumeops.delete_backing(backing) LOG.info(_LI("Done copying image: %(id)s to volume: %(vol)s."), {'id': image_id, 'vol': volume['name']}) def _extend_backing(self, backing, new_size_in_gb): """Extend volume backing's virtual disk. :param backing: volume backing :param new_size_in_gb: new size of virtual disk """ root_vmdk_path = self.volumeops.get_vmdk_path(backing) datacenter = self.volumeops.get_dc(backing) self.volumeops.extend_virtual_disk(new_size_in_gb, root_vmdk_path, datacenter) def copy_image_to_volume(self, context, volume, image_service, image_id): """Creates volume from image. This method only supports Glance image of VMDK disk format. Uses flat vmdk file copy for "sparse" and "preallocated" disk types Uses HttpNfc import API for "streamOptimized" disk types. This API creates a backing VM that wraps the VMDK in the vCenter inventory. :param context: context :param volume: Volume object :param image_service: Glance image service :param image_id: Glance image id """ LOG.debug("Copy glance image: %s to create new volume.", image_id) # Verify glance image is vmdk disk format metadata = image_service.show(context, image_id) VMwareVcVmdkDriver._validate_disk_format(metadata['disk_format']) # Validate container format; only 'bare' is supported currently. container_format = metadata.get('container_format') if (container_format and container_format != 'bare'): msg = _("Container format: %s is unsupported by the VMDK driver, " "only 'bare' is supported.") % container_format LOG.error(msg) raise exception.ImageUnacceptable(image_id=image_id, reason=msg) # Get the disk type, adapter type and size of vmdk image image_disk_type = ImageDiskType.PREALLOCATED image_adapter_type = volumeops.VirtualDiskAdapterType.LSI_LOGIC image_size_in_bytes = metadata['size'] properties = metadata['properties'] if properties: if 'vmware_disktype' in properties: image_disk_type = properties['vmware_disktype'] if 'vmware_adaptertype' in properties: image_adapter_type = properties['vmware_adaptertype'] try: # validate disk and adapter types in image meta-data volumeops.VirtualDiskAdapterType.validate(image_adapter_type) ImageDiskType.validate(image_disk_type) if image_disk_type == ImageDiskType.STREAM_OPTIMIZED: self._fetch_stream_optimized_image(context, volume, image_service, image_id, image_size_in_bytes, image_adapter_type) else: self._create_volume_from_non_stream_optimized_image( context, volume, image_service, image_id, image_size_in_bytes, image_adapter_type, image_disk_type) except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while copying image: %(id)s " "to volume: %(vol)s."), {'id': image_id, 'vol': volume['name']}) LOG.debug("Volume: %(id)s created from image: %(image_id)s.", {'id': volume['id'], 'image_id': image_id}) # If the user-specified volume size is greater than backing's # current disk size, we should extend the disk. volume_size = volume['size'] * units.Gi backing = self.volumeops.get_backing(volume['name']) disk_size = self.volumeops.get_disk_size(backing) if volume_size > disk_size: LOG.debug("Extending volume: %(name)s since the user specified " "volume size (bytes): %(vol_size)d is greater than " "backing's current disk size (bytes): %(disk_size)d.", {'name': volume['name'], 'vol_size': volume_size, 'disk_size': disk_size}) self._extend_backing(backing, volume['size']) # TODO(vbala): handle volume_size < disk_size case. def copy_volume_to_image(self, context, volume, image_service, image_meta): """Creates glance image from volume. Upload of only available volume is supported. The uploaded glance image has a vmdk disk type of "streamOptimized" that can only be downloaded using the HttpNfc API. Steps followed are: 1. Get the name of the vmdk file which the volume points to right now. Can be a chain of snapshots, so we need to know the last in the chain. 2. Use Nfc APIs to upload the contents of the vmdk file to glance. """ # if volume is attached raise exception if (volume['volume_attachment'] and len(volume['volume_attachment']) > 0): msg = _("Upload to glance of attached volume is not supported.") LOG.error(msg) raise exception.InvalidVolume(msg) # validate disk format is vmdk LOG.debug("Copy Volume: %s to new image.", volume['name']) VMwareVcVmdkDriver._validate_disk_format(image_meta['disk_format']) # get backing vm of volume and its vmdk path backing = self.volumeops.get_backing(volume['name']) if not backing: LOG.info(_LI("Backing not found, creating for volume: %s"), volume['name']) backing = self._create_backing(volume) vmdk_file_path = self.volumeops.get_vmdk_path(backing) # Upload image from vmdk timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip image_transfer.upload_image(context, timeout, image_service, image_meta['id'], volume['project_id'], session=self.session, host=host_ip, port=443, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1, is_public=image_meta['is_public']) LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s"), {'vol': volume['name'], 'img': image_meta['name']}) def _in_use(self, volume): """Check if the given volume is in use.""" return (volume['volume_attachment'] and len(volume['volume_attachment']) > 0) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. The retype is performed only if the volume is not in use. Retype is NOP if the backing doesn't exist. If disk type conversion is needed, the volume is cloned. If disk type conversion is needed and the volume contains snapshots, the backing is relocated instead of cloning. The backing is also relocated if the current datastore is not compliant with the new storage profile (if any). Finally, the storage profile of the backing VM is updated. :param ctxt: Context :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (unused) :returns: True if the retype occurred; False otherwise. """ # Can't attempt retype if the volume is in use. if self._in_use(volume): LOG.warning(_LW("Volume: %s is in use, can't retype."), volume['name']) return False # If the backing doesn't exist, retype is NOP. backing = self.volumeops.get_backing(volume['name']) if backing is None: LOG.debug("Backing for volume: %s doesn't exist; retype is NOP.", volume['name']) return True # Check whether we need disk type conversion. disk_type = VMwareVcVmdkDriver._get_disk_type(volume) new_disk_type = VMwareVcVmdkDriver._get_extra_spec_disk_type( new_type['id']) need_disk_type_conversion = disk_type != new_disk_type # Check whether we need to relocate the backing. If the backing # contains snapshots, relocate is the only way to achieve disk type # conversion. need_relocate = (need_disk_type_conversion and self.volumeops.snapshot_exists(backing)) datastore = self.volumeops.get_datastore(backing) # Check whether we need to change the storage profile. need_profile_change = False is_compliant = True new_profile = None if self._storage_policy_enabled: profile = self._get_storage_profile(volume) new_profile = self._get_extra_spec_storage_profile(new_type['id']) need_profile_change = profile != new_profile # The current datastore may be compliant with the new profile. is_compliant = self.ds_sel.is_datastore_compliant(datastore, new_profile) # No need to relocate or clone if there is no disk type conversion and # the current datastore is compliant with the new profile or storage # policy is disabled. if not need_disk_type_conversion and is_compliant: LOG.debug("Backing: %(backing)s for volume: %(name)s doesn't need " "disk type conversion.", {'backing': backing, 'name': volume['name']}) if self._storage_policy_enabled: LOG.debug("Backing: %(backing)s for volume: %(name)s is " "compliant with the new profile: %(new_profile)s.", {'backing': backing, 'name': volume['name'], 'new_profile': new_profile}) else: # Set requirements for datastore selection. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * units.Gi) if need_relocate: LOG.debug("Backing: %s should be relocated.", backing) req[hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = ( [datastore.value]) if new_profile: req[hub.DatastoreSelector.PROFILE_NAME] = new_profile # Select datastore satisfying the requirements. best_candidate = self.ds_sel.select_datastore(req) if not best_candidate: # No candidate datastores; can't retype. LOG.warning(_LW("There are no datastores matching new " "requirements; can't retype volume: %s."), volume['name']) return False (host, rp, summary) = best_candidate dc = self.volumeops.get_dc(rp) folder = self._get_volume_group_folder(dc, volume['project_id']) new_datastore = summary.datastore if datastore.value != new_datastore.value: # Datastore changed; relocate the backing. LOG.debug("Backing: %s needs to be relocated for retype.", backing) self.volumeops.relocate_backing( backing, new_datastore, rp, host, new_disk_type) self.volumeops.move_backing_to_folder(backing, folder) elif need_disk_type_conversion: # Same datastore, but clone is needed for disk type conversion. LOG.debug("Backing: %s needs to be cloned for retype.", backing) new_backing = None renamed = False tmp_name = uuidutils.generate_uuid() try: self.volumeops.rename_backing(backing, tmp_name) renamed = True new_backing = self.volumeops.clone_backing( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=new_disk_type, host=host, resource_pool=rp, folder=folder) self.volumeops.update_backing_disk_uuid(new_backing, volume['id']) self._delete_temp_backing(backing) backing = new_backing except exceptions.VimException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while cloning " "backing:" " %s during retype."), backing) if renamed: LOG.debug("Undo rename of backing: %(backing)s; " "changing name from %(new_name)s to " "%(old_name)s.", {'backing': backing, 'new_name': tmp_name, 'old_name': volume['name']}) try: self.volumeops.rename_backing(backing, volume['name']) except exceptions.VimException: LOG.warning(_LW("Changing backing: " "%(backing)s name from " "%(new_name)s to %(old_name)s " "failed."), {'backing': backing, 'new_name': tmp_name, 'old_name': volume['name']}) # Update the backing's storage profile if needed. if need_profile_change: LOG.debug("Backing: %(backing)s needs a profile change to:" " %(profile)s.", {'backing': backing, 'profile': new_profile}) profile_id = None if new_profile is not None: profile_id = self.ds_sel.get_profile_id(new_profile) self.volumeops.change_backing_profile(backing, profile_id) # Retype is done. LOG.debug("Volume: %s retype is done.", volume['name']) return True def extend_volume(self, volume, new_size): """Extend volume to new size. Extends the volume backing's virtual disk to new size. First, try to extend in place on the same datastore. If that fails due to insufficient disk space, then try to relocate the volume to a different datastore that can accommodate the backing with new size and retry extend. :param volume: dictionary describing the existing 'available' volume :param new_size: new size in GB to extend this volume to """ vol_name = volume['name'] backing = self.volumeops.get_backing(vol_name) if not backing: LOG.info(_LI("There is no backing for volume: %s; no need to " "extend the virtual disk."), vol_name) return # try extending vmdk in place try: self._extend_backing(backing, new_size) LOG.info(_LI("Successfully extended volume: %(vol)s to size: " "%(size)s GB."), {'vol': vol_name, 'size': new_size}) return except exceptions.NoDiskSpaceException: LOG.warning(_LW("Unable to extend volume: %(vol)s to size: " "%(size)s on current datastore due to insufficient" " space."), {'vol': vol_name, 'size': new_size}) # Insufficient disk space; relocate the volume to a different datastore # and retry extend. LOG.info(_LI("Relocating volume: %s to a different datastore due to " "insufficient disk space on current datastore."), vol_name) try: create_params = {CREATE_PARAM_DISK_SIZE: new_size} (host, rp, folder, summary) = self._select_ds_for_volume( volume, create_params=create_params) self.volumeops.relocate_backing(backing, summary.datastore, rp, host) self.volumeops.move_backing_to_folder(backing, folder) self._extend_backing(backing, new_size) except exceptions.VMwareDriverException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to extend volume: %(vol)s to size: " "%(size)s GB."), {'vol': vol_name, 'size': new_size}) LOG.info(_LI("Successfully extended volume: %(vol)s to size: " "%(size)s GB."), {'vol': vol_name, 'size': new_size}) @contextlib.contextmanager def _temporary_file(self, *args, **kwargs): """Create a temporary file and return its path.""" tmp_dir = self.configuration.vmware_tmp_dir fileutils.ensure_tree(tmp_dir) fd, tmp = tempfile.mkstemp( dir=self.configuration.vmware_tmp_dir, *args, **kwargs) try: os.close(fd) yield tmp finally: fileutils.delete_if_exists(tmp) def _download_vmdk(self, context, volume, backing, tmp_file_path): """Download virtual disk in streamOptimized format.""" timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip vmdk_ds_file_path = self.volumeops.get_vmdk_path(backing) with open(tmp_file_path, "wb") as tmp_file: image_transfer.copy_stream_optimized_disk( context, timeout, tmp_file, session=self.session, host=host_ip, port=443, vm=backing, vmdk_file_path=vmdk_ds_file_path, vmdk_size=volume['size'] * units.Gi) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) LOG.debug("Creating backup: %(backup_id)s for volume: %(name)s.", {'backup_id': backup['id'], 'name': volume['name']}) backing = self.volumeops.get_backing(volume['name']) if backing is None: LOG.debug("Creating backing for volume: %s.", volume['name']) backing = self._create_backing(volume) tmp_vmdk_name = uuidutils.generate_uuid() with self._temporary_file(suffix=".vmdk", prefix=tmp_vmdk_name) as tmp_file_path: # TODO(vbala) Clean up vmware_tmp_dir during driver init. LOG.debug("Using temporary file: %(tmp_path)s for creating backup:" " %(backup_id)s.", {'tmp_path': tmp_file_path, 'backup_id': backup['id']}) self._download_vmdk(context, volume, backing, tmp_file_path) with open(tmp_file_path, "rb") as tmp_file: LOG.debug("Calling backup service to backup file: %s.", tmp_file_path) backup_service.backup(backup, tmp_file) LOG.debug("Created backup: %(backup_id)s for volume: " "%(name)s.", {'backup_id': backup['id'], 'name': volume['name']}) def _create_backing_from_stream_optimized_file( self, context, name, volume, tmp_file_path, file_size_bytes): """Create backing from streamOptimized virtual disk file.""" LOG.debug("Creating backing: %(name)s from virtual disk: %(path)s.", {'name': name, 'path': tmp_file_path}) (_host, rp, folder, summary) = self._select_ds_for_volume(volume) LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.", {'ds': summary.name, 'name': name}) # Prepare import spec for backing. cf = self.session.vim.client.factory vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') profile_id = self._get_storage_profile_id(volume) disk_type = VMwareVcVmdkDriver._get_disk_type(volume) extra_config = self._get_extra_config(volume) # We cannot determine the size of a virtual disk created from # streamOptimized disk image. Set size to 0 and let vCenter # figure out the size after virtual disk creation. vm_create_spec = self.volumeops.get_create_spec( name, 0, disk_type, summary.name, profileId=profile_id, extra_config=extra_config) vm_import_spec.configSpec = vm_create_spec timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip try: with open(tmp_file_path, "rb") as tmp_file: vm_ref = image_transfer.download_stream_optimized_data( context, timeout, tmp_file, session=self.session, host=host_ip, port=443, resource_pool=rp, vm_folder=folder, vm_import_spec=vm_import_spec, image_size=file_size_bytes) LOG.debug("Created backing: %(name)s from virtual disk: " "%(path)s.", {'name': name, 'path': tmp_file_path}) return vm_ref except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error occurred while creating temporary " "backing.")) backing = self.volumeops.get_backing(name) if backing is not None: self._delete_temp_backing(backing) def _restore_backing( self, context, volume, backing, tmp_file_path, backup_size): """Restore backing from backup.""" # Create temporary backing from streamOptimized file. src_name = uuidutils.generate_uuid() src = self._create_backing_from_stream_optimized_file( context, src_name, volume, tmp_file_path, backup_size) # Copy temporary backing for desired disk type conversion. new_backing = (backing is None) if new_backing: # No backing exists; clone can be used as the volume backing. dest_name = volume['name'] else: # Backing exists; clone can be used as the volume backing only # after deleting the current backing. dest_name = uuidutils.generate_uuid() dest = None tmp_backing_name = None renamed = False try: # Find datastore for clone. (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareVcVmdkDriver._get_disk_type(volume) dest = self.volumeops.clone_backing(dest_name, src, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, folder=folder) self.volumeops.update_backing_disk_uuid(dest, volume['id']) if new_backing: LOG.debug("Created new backing: %s for restoring backup.", dest_name) return # Rename current backing. tmp_backing_name = uuidutils.generate_uuid() self.volumeops.rename_backing(backing, tmp_backing_name) renamed = True # Rename clone in order to treat it as the volume backing. self.volumeops.rename_backing(dest, volume['name']) # Now we can delete the old backing. self._delete_temp_backing(backing) LOG.debug("Deleted old backing and renamed clone for restoring " "backup.") except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): if dest is not None: # Copy happened; we need to delete the clone. self._delete_temp_backing(dest) if renamed: # Old backing was renamed; we need to undo that. try: self.volumeops.rename_backing(backing, volume['name']) except exceptions.VimException: LOG.warning(_LW("Cannot undo volume rename; old " "name was %(old_name)s and new " "name is %(new_name)s."), {'old_name': volume['name'], 'new_name': tmp_backing_name}, exc_info=True) finally: # Delete the temporary backing. self._delete_temp_backing(src) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume. This method raises InvalidVolume if the existing volume contains snapshots since it is not possible to restore the virtual disk of a backing with snapshots. """ LOG.debug("Restoring backup: %(backup_id)s to volume: %(name)s.", {'backup_id': backup['id'], 'name': volume['name']}) backing = self.volumeops.get_backing(volume['name']) if backing is not None and self.volumeops.snapshot_exists(backing): msg = _("Volume cannot be restored since it contains snapshots.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) tmp_vmdk_name = uuidutils.generate_uuid() with self._temporary_file(suffix=".vmdk", prefix=tmp_vmdk_name) as tmp_file_path: LOG.debug("Using temporary file: %(tmp_path)s for restoring " "backup: %(backup_id)s.", {'tmp_path': tmp_file_path, 'backup_id': backup['id']}) with open(tmp_file_path, "wb") as tmp_file: LOG.debug("Calling backup service to restore backup: " "%(backup_id)s to file: %(tmp_path)s.", {'backup_id': backup['id'], 'tmp_path': tmp_file_path}) backup_service.restore(backup, volume['id'], tmp_file) LOG.debug("Backup: %(backup_id)s restored to file: " "%(tmp_path)s.", {'backup_id': backup['id'], 'tmp_path': tmp_file_path}) self._restore_backing(context, volume, backing, tmp_file_path, backup['size'] * units.Gi) if backup['size'] < volume['size']: # Current backing size is backup size. LOG.debug("Backup size: %(backup_size)d is less than " "volume size: %(vol_size)d; extending volume.", {'backup_size': backup['size'], 'vol_size': volume['size']}) self.extend_volume(volume, volume['size']) LOG.debug("Backup: %(backup_id)s restored to volume: " "%(name)s.", {'backup_id': backup['id'], 'name': volume['name']}) def _get_disk_device(self, vmdk_path, vm_inv_path): # Get the VM that corresponds to the given inventory path. vm = self.volumeops.get_entity_by_inventory_path(vm_inv_path) if vm: # Get the disk device that corresponds to the given vmdk path. disk_device = self.volumeops.get_disk_device(vm, vmdk_path) if disk_device: return (vm, disk_device) def _get_existing(self, existing_ref): src_name = existing_ref.get('source-name') if not src_name: raise exception.InvalidInput( reason=_("source-name cannot be empty.")) # source-name format: vmdk_path@vm_inventory_path parts = src_name.split('@') if len(parts) != 2: raise exception.InvalidInput( reason=_("source-name format should be: " "'vmdk_path@vm_inventory_path'.")) (vmdk_path, vm_inv_path) = parts existing = self._get_disk_device(vmdk_path, vm_inv_path) if not existing: reason = _("%s does not exist.") % src_name raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return existing def manage_existing_get_size(self, volume, existing_ref): """Return size of the volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ (_vm, disk) = self._get_existing(existing_ref) return int(math.ceil(disk.capacityInKB * units.Ki / float(units.Gi))) def manage_existing(self, volume, existing_ref): """Brings an existing virtual disk under Cinder management. Detaches the virtual disk identified by existing_ref and attaches it to a volume backing. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ (vm, disk) = self._get_existing(existing_ref) # Create a backing for the volume. create_params = {CREATE_PARAM_DISK_LESS: True} backing = self._create_backing(volume, create_params=create_params) # Detach the disk to be managed from the source VM. self.volumeops.detach_disk_from_backing(vm, disk) # Move the disk to the datastore folder of volume backing. src_dc = self.volumeops.get_dc(vm) dest_dc = self.volumeops.get_dc(backing) (ds_name, folder_path) = self._get_ds_name_folder_path(backing) dest_path = volumeops.VirtualDiskPath( ds_name, folder_path, volume['name']) self.volumeops.move_vmdk_file(src_dc, disk.backing.fileName, dest_path.get_descriptor_ds_file_path(), dest_dc_ref=dest_dc) # Attach the disk to be managed to volume backing. self.volumeops.attach_disk_to_backing( backing, disk.capacityInKB, VMwareVcVmdkDriver._get_disk_type(volume), 'lsiLogic', dest_path.get_descriptor_ds_file_path()) self.volumeops.update_backing_disk_uuid(backing, volume['id']) @property def session(self): if not self._session: ip = self.configuration.vmware_host_ip username = self.configuration.vmware_host_username password = self.configuration.vmware_host_password api_retry_count = self.configuration.vmware_api_retry_count task_poll_interval = self.configuration.vmware_task_poll_interval wsdl_loc = self.configuration.safe_get('vmware_wsdl_location') pbm_wsdl = self.pbm_wsdl if hasattr(self, 'pbm_wsdl') else None ca_file = self.configuration.vmware_ca_file insecure = self.configuration.vmware_insecure self._session = api.VMwareAPISession(ip, username, password, api_retry_count, task_poll_interval, wsdl_loc=wsdl_loc, pbm_wsdl_loc=pbm_wsdl, cacert=ca_file, insecure=insecure) return self._session def _get_vc_version(self): """Connect to vCenter server and fetch version. Can be over-ridden by setting 'vmware_host_version' config. :returns: vCenter version as a LooseVersion object """ version_str = self.configuration.vmware_host_version if version_str: LOG.info(_LI("Using overridden vmware_host_version from config: " "%s"), version_str) else: version_str = vim_util.get_vc_version(self.session) LOG.info(_LI("Fetched vCenter server version: %s"), version_str) # Convert version_str to LooseVersion and return. version = None try: version = dist_version.LooseVersion(version_str) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Version string '%s' is not parseable"), version_str) return version def _validate_vcenter_version(self, vc_version): if vc_version < self.MIN_SUPPORTED_VC_VERSION: msg = _('Running Cinder with a VMware vCenter version less than ' '%s is not allowed.') % self.MIN_SUPPORTED_VC_VERSION LOG.error(msg) raise exceptions.VMwareDriverException(message=msg) def do_setup(self, context): """Any initialization the volume driver does while starting.""" self._validate_params() # Validate vCenter version. vc_version = self._get_vc_version() self._validate_vcenter_version(vc_version) # Enable pbm only if vCenter version is 5.5+. if vc_version and vc_version >= self.PBM_ENABLED_VC_VERSION: self.pbm_wsdl = pbm.get_pbm_wsdl_location( six.text_type(vc_version)) if not self.pbm_wsdl: LOG.error(_LE("Not able to configure PBM for vCenter server: " "%s"), vc_version) raise exceptions.VMwareDriverException() self._storage_policy_enabled = True # Destroy current session so that it is recreated with pbm enabled self._session = None # recreate session and initialize volumeops and ds_sel # TODO(vbala) remove properties: session, volumeops and ds_sel max_objects = self.configuration.vmware_max_objects_retrieval self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session) # Get clusters to be used for backing VM creation. cluster_names = self.configuration.vmware_cluster_name if cluster_names: self._clusters = self.volumeops.get_cluster_refs( cluster_names).values() LOG.info(_LI("Using compute cluster(s): %s."), cluster_names) LOG.info(_LI("Successfully setup driver: %(driver)s for server: " "%(ip)s."), {'driver': self.__class__.__name__, 'ip': self.configuration.vmware_host_ip}) def _get_volume_group_folder(self, datacenter, project_id): """Get inventory folder for organizing volume backings. The inventory folder for organizing volume backings has the following hierarchy: /OpenStack/Project ()/ where volume_folder is the vmdk driver config option "vmware_volume_folder". :param datacenter: Reference to the datacenter :param project_id: OpenStack project ID :return: Reference to the inventory folder """ volume_folder_name = self.configuration.vmware_volume_folder project_folder_name = "Project (%s)" % project_id folder_names = ['OpenStack', project_folder_name, volume_folder_name] return self.volumeops.create_vm_inventory_folder(datacenter, folder_names) def _relocate_backing(self, volume, backing, host): """Relocate volume backing to a datastore accessible to the given host. The backing is not relocated if the current datastore is already accessible to the host and compliant with the backing's storage profile. :param volume: Volume to be relocated :param backing: Reference to the backing :param host: Reference to the host """ # Check if the current datastore is visible to the host managing # the instance and compliant with the storage profile. datastore = self.volumeops.get_datastore(backing) backing_profile = None if self._storage_policy_enabled: backing_profile = self.volumeops.get_profile(backing) if (self.volumeops.is_datastore_accessible(datastore, host) and self.ds_sel.is_datastore_compliant(datastore, backing_profile)): LOG.debug("Datastore: %(datastore)s of backing: %(backing)s is " "already accessible to instance's host: %(host)s.", {'backing': backing, 'datastore': datastore, 'host': host}) if backing_profile: LOG.debug("Backing: %(backing)s is compliant with " "storage profile: %(profile)s.", {'backing': backing, 'profile': backing_profile}) return # We need to relocate the backing to an accessible and profile # compliant datastore. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * units.Gi) req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile # Select datastore satisfying the requirements. (host, resource_pool, summary) = self._select_datastore(req, host) dc = self.volumeops.get_dc(resource_pool) folder = self._get_volume_group_folder(dc, volume['project_id']) self.volumeops.relocate_backing(backing, summary.datastore, resource_pool, host) self.volumeops.move_backing_to_folder(backing, folder) @staticmethod def _get_clone_type(volume): """Get clone type from volume type. :param volume: Volume object :return: Clone type from the extra spec if present, else return default 'full' clone type """ clone_type = _get_volume_type_extra_spec( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) if (clone_type != volumeops.FULL_CLONE_TYPE and clone_type != volumeops.LINKED_CLONE_TYPE): msg = (_("Clone type '%(clone_type)s' is invalid; valid values" " are: '%(full_clone)s' and '%(linked_clone)s'.") % {'clone_type': clone_type, 'full_clone': volumeops.FULL_CLONE_TYPE, 'linked_clone': volumeops.LINKED_CLONE_TYPE}) LOG.error(msg) raise exception.Invalid(message=msg) return clone_type def _clone_backing(self, volume, backing, snapshot, clone_type, src_vsize): """Clone the backing. :param volume: New Volume object :param backing: Reference to the backing entity :param snapshot: Reference to the snapshot entity :param clone_type: type of the clone :param src_vsize: the size of the source volume """ datastore = None host = None rp = None folder = None if not clone_type == volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore extra_config = self._get_extra_config(volume) clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) self.volumeops.update_backing_disk_uuid(clone, volume['id']) # If the volume size specified by the user is greater than # the size of the source volume, the newly created volume will # allocate the capacity to the size of the source volume in the backend # VMDK datastore, though the volume information indicates it has a # capacity of the volume size. If the volume size is greater, # we need to extend/resize the capacity of the vmdk virtual disk from # the size of the source volume to the volume size. if volume['size'] > src_vsize: self._extend_backing(clone, volume['size']) LOG.info(_LI("Successfully created clone: %s."), clone) def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. If the snapshot does not exist or source volume's backing does not exist, then pass. :param volume: New Volume object :param snapshot: Reference to snapshot entity """ self._verify_volume_creation(volume) backing = self.volumeops.get_backing(snapshot['volume_name']) if not backing: LOG.info(_LI("There is no backing for the snapshotted volume: " "%(snap)s. Not creating any backing for the " "volume: %(vol)s."), {'snap': snapshot['name'], 'vol': volume['name']}) return snapshot_moref = self.volumeops.get_snapshot(backing, snapshot['name']) if not snapshot_moref: LOG.info(_LI("There is no snapshot point for the snapshotted " "volume: %(snap)s. Not creating any backing for " "the volume: %(vol)s."), {'snap': snapshot['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) self._clone_backing(volume, backing, snapshot_moref, clone_type, snapshot['volume_size']) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: New Volume object :param snapshot: Reference to snapshot entity """ self._create_volume_from_snapshot(volume, snapshot) def _create_cloned_volume(self, volume, src_vref): """Creates volume clone. If source volume's backing does not exist, then pass. Linked clone of attached volume is not supported. :param volume: New Volume object :param src_vref: Source Volume object """ self._verify_volume_creation(volume) backing = self.volumeops.get_backing(src_vref['name']) if not backing: LOG.info(_LI("There is no backing for the source volume: %(src)s. " "Not creating any backing for volume: %(vol)s."), {'src': src_vref['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) snapshot = None if clone_type == volumeops.LINKED_CLONE_TYPE: if src_vref['status'] != 'available': msg = _("Linked clone of source volume not supported " "in state: %s.") % src_vref['status'] LOG.error(msg) raise exception.InvalidVolume(msg) # For performing a linked clone, we snapshot the volume and # then create the linked clone out of this snapshot point. name = 'snapshot-%s' % volume['id'] snapshot = self.volumeops.create_snapshot(backing, name, None) self._clone_backing(volume, backing, snapshot, clone_type, src_vref['size']) def create_cloned_volume(self, volume, src_vref): """Creates volume clone. :param volume: New Volume object :param src_vref: Source Volume object """ self._create_cloned_volume(volume, src_vref) cinder-8.0.0/cinder/volume/drivers/vmware/volumeops.py0000664000567000056710000021564712701406250024275 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implements operations on volumes residing on VMware datastores. """ from oslo_log import log as logging from oslo_utils import units from oslo_vmware import exceptions from oslo_vmware import pbm from oslo_vmware import vim_util import six from six.moves import urllib from cinder.i18n import _, _LE, _LI from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions LOG = logging.getLogger(__name__) LINKED_CLONE_TYPE = 'linked' FULL_CLONE_TYPE = 'full' def split_datastore_path(datastore_path): """Split the datastore path to components. return the datastore name, relative folder path and the file name E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns (datastore1, my_volume/, my_volume.vmdk) :param datastore_path: Datastore path of a file :return: Parsed datastore name, relative folder path and file name """ splits = datastore_path.split('[', 1)[1].split(']', 1) datastore_name = None folder_path = None file_name = None if len(splits) == 1: datastore_name = splits[0] else: datastore_name, path = splits # Path will be of form my_volume/my_volume.vmdk # we need into my_volumes/ and my_volume.vmdk splits = path.split('/') file_name = splits[len(splits) - 1] folder_path = path[:-len(file_name)] return (datastore_name.strip(), folder_path.strip(), file_name.strip()) class VirtualDiskPath(object): """Class representing paths of files comprising a virtual disk.""" def __init__(self, ds_name, folder_path, disk_name): """Creates path object for the given disk. :param ds_name: name of the datastore where disk is stored :param folder_path: absolute path of the folder containing the disk :param disk_name: name of the virtual disk """ self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name) self._descriptor_ds_file_path = self.get_datastore_file_path( ds_name, self._descriptor_file_path) def get_datastore_file_path(self, ds_name, file_path): """Get datastore path corresponding to the given file path. :param ds_name: name of the datastore containing the file represented by the given file path :param file_path: absolute path of the file :return: datastore file path """ return "[%s] %s" % (ds_name, file_path) def get_descriptor_file_path(self): """Get absolute file path of the virtual disk descriptor.""" return self._descriptor_file_path def get_descriptor_ds_file_path(self): """Get datastore file path of the virtual disk descriptor.""" return self._descriptor_ds_file_path class FlatExtentVirtualDiskPath(VirtualDiskPath): """Paths of files in a non-monolithic disk with a single flat extent.""" def __init__(self, ds_name, folder_path, disk_name): """Creates path object for the given disk. :param ds_name: name of the datastore where disk is stored :param folder_path: absolute path of the folder containing the disk :param disk_name: name of the virtual disk """ super(FlatExtentVirtualDiskPath, self).__init__( ds_name, folder_path, disk_name) self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path, disk_name) self._flat_extent_ds_file_path = self.get_datastore_file_path( ds_name, self._flat_extent_file_path) def get_flat_extent_file_path(self): """Get absolute file path of the flat extent.""" return self._flat_extent_file_path def get_flat_extent_ds_file_path(self): """Get datastore file path of the flat extent.""" return self._flat_extent_ds_file_path class MonolithicSparseVirtualDiskPath(VirtualDiskPath): """Paths of file comprising a monolithic sparse disk.""" pass class VirtualDiskType(object): """Supported virtual disk types.""" EAGER_ZEROED_THICK = "eagerZeroedThick" PREALLOCATED = "preallocated" THIN = "thin" # thick in extra_spec means lazy-zeroed thick disk EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK, 'thick': PREALLOCATED, 'thin': THIN } @staticmethod def is_valid(extra_spec_disk_type): """Check if the given disk type in extra_spec is valid. :param extra_spec_disk_type: disk type in extra_spec :return: True if valid """ return (extra_spec_disk_type in VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT) @staticmethod def validate(extra_spec_disk_type): """Validate the given disk type in extra_spec. This method throws an instance of InvalidDiskTypeException if the given disk type is invalid. :param extra_spec_disk_type: disk type in extra_spec :raises: InvalidDiskTypeException """ if not VirtualDiskType.is_valid(extra_spec_disk_type): raise vmdk_exceptions.InvalidDiskTypeException( disk_type=extra_spec_disk_type) @staticmethod def get_virtual_disk_type(extra_spec_disk_type): """Return disk type corresponding to the extra_spec disk type. :param extra_spec_disk_type: disk type in extra_spec :return: virtual disk type :raises: InvalidDiskTypeException """ VirtualDiskType.validate(extra_spec_disk_type) return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[ extra_spec_disk_type]) class VirtualDiskAdapterType(object): """Supported virtual disk adapter types.""" LSI_LOGIC = "lsiLogic" BUS_LOGIC = "busLogic" LSI_LOGIC_SAS = "lsiLogicsas" IDE = "ide" @staticmethod def is_valid(adapter_type): """Check if the given adapter type is valid. :param adapter_type: adapter type to check :return: True if valid """ return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC, VirtualDiskAdapterType.BUS_LOGIC, VirtualDiskAdapterType.LSI_LOGIC_SAS, VirtualDiskAdapterType.IDE] @staticmethod def validate(extra_spec_adapter_type): """Validate the given adapter type in extra_spec. This method throws an instance of InvalidAdapterTypeException if the given adapter type is invalid. :param extra_spec_adapter_type: adapter type in extra_spec :raises: InvalidAdapterTypeException """ if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type): raise vmdk_exceptions.InvalidAdapterTypeException( invalid_type=extra_spec_adapter_type) @staticmethod def get_adapter_type(extra_spec_adapter_type): """Get the adapter type to be used in VirtualDiskSpec. :param extra_spec_adapter_type: adapter type in the extra_spec :return: adapter type to be used in VirtualDiskSpec """ VirtualDiskAdapterType.validate(extra_spec_adapter_type) # We set the adapter type as lsiLogic for lsiLogicsas since it is not # supported by VirtualDiskManager APIs. This won't be a problem because # we attach the virtual disk to the correct controller type and the # disk adapter type is always resolved using its controller key. if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS: return VirtualDiskAdapterType.LSI_LOGIC return extra_spec_adapter_type class ControllerType(object): """Encapsulate various controller types.""" LSI_LOGIC = 'VirtualLsiLogicController' BUS_LOGIC = 'VirtualBusLogicController' LSI_LOGIC_SAS = 'VirtualLsiLogicSASController' IDE = 'VirtualIDEController' CONTROLLER_TYPE_DICT = { VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC, VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC, VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS, VirtualDiskAdapterType.IDE: IDE} @staticmethod def get_controller_type(adapter_type): """Get the disk controller type based on the given adapter type. :param adapter_type: disk adapter type :return: controller type corresponding to the given adapter type :raises: InvalidAdapterTypeException """ if adapter_type in ControllerType.CONTROLLER_TYPE_DICT: return ControllerType.CONTROLLER_TYPE_DICT[adapter_type] raise vmdk_exceptions.InvalidAdapterTypeException( invalid_type=adapter_type) @staticmethod def is_scsi_controller(controller_type): """Check if the given controller is a SCSI controller. :param controller_type: controller type :return: True if the controller is a SCSI controller """ return controller_type in [ControllerType.LSI_LOGIC, ControllerType.BUS_LOGIC, ControllerType.LSI_LOGIC_SAS] class VMwareVolumeOps(object): """Manages volume operations.""" def __init__(self, session, max_objects): self._session = session self._max_objects = max_objects self._folder_cache = {} def get_backing(self, name): """Get the backing based on name. :param name: Name of the backing :return: Managed object reference to the backing """ retrieve_result = self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'VirtualMachine', self._max_objects) while retrieve_result: vms = retrieve_result.objects for vm in vms: if vm.propSet[0].val == name: # We got the result, so cancel further retrieval. self.cancel_retrieval(retrieve_result) return vm.obj # Result not obtained, continue retrieving results. retrieve_result = self.continue_retrieval(retrieve_result) LOG.debug("Did not find any backing with name: %s", name) def delete_backing(self, backing): """Delete the backing. :param backing: Managed object reference to the backing """ LOG.debug("Deleting the VM backing: %s.", backing) task = self._session.invoke_api(self._session.vim, 'Destroy_Task', backing) LOG.debug("Initiated deletion of VM backing: %s.", backing) self._session.wait_for_task(task) LOG.info(_LI("Deleted the VM backing: %s."), backing) # TODO(kartikaditya) Keep the methods not specific to volume in # a different file def get_host(self, instance): """Get host under which instance is present. :param instance: Managed object reference of the instance VM :return: Host managing the instance VM """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, instance, 'runtime.host') def is_host_usable(self, host): """Check if the given ESX host is usable. A host is usable if it is connected to vCenter server and not in maintenance mode. :param host: Managed object reference to the ESX host :return: True if host is usable, False otherwise """ runtime_info = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, host, 'runtime') return (runtime_info.connectionState == 'connected' and not runtime_info.inMaintenanceMode) def get_hosts(self): """Get all host from the inventory. :return: All the hosts from the inventory """ return self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'HostSystem', self._max_objects) def continue_retrieval(self, retrieve_result): """Continue retrieval of results if necessary. :param retrieve_result: Result from RetrievePropertiesEx """ return self._session.invoke_api(vim_util, 'continue_retrieval', self._session.vim, retrieve_result) def cancel_retrieval(self, retrieve_result): """Cancel retrieval of results if necessary. :param retrieve_result: Result from RetrievePropertiesEx """ self._session.invoke_api(vim_util, 'cancel_retrieval', self._session.vim, retrieve_result) def _is_usable(self, mount_info): """Check if a datastore is usable as per the given mount info. The datastore is considered to be usable for a host only if it is writable, mounted and accessible. :param mount_info: Host mount information :return: True if datastore is usable """ writable = mount_info.accessMode == 'readWrite' # If mounted attribute is not set, then default is True mounted = getattr(mount_info, 'mounted', True) # If accessible attribute is not set, then default is False accessible = getattr(mount_info, 'accessible', False) return writable and mounted and accessible def get_connected_hosts(self, datastore): """Get all the hosts to which the datastore is connected and usable. The datastore is considered to be usable for a host only if it is writable, mounted and accessible. :param datastore: Reference to the datastore entity :return: List of managed object references of all connected hosts """ summary = self.get_summary(datastore) if not summary.accessible: return [] host_mounts = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datastore, 'host') if not hasattr(host_mounts, 'DatastoreHostMount'): return [] connected_hosts = [] for host_mount in host_mounts.DatastoreHostMount: if self._is_usable(host_mount.mountInfo): connected_hosts.append(host_mount.key.value) return connected_hosts def is_datastore_accessible(self, datastore, host): """Check if the datastore is accessible to the given host. :param datastore: datastore reference :return: True if the datastore is accessible """ hosts = self.get_connected_hosts(datastore) return host.value in hosts def _in_maintenance(self, summary): """Check if a datastore is entering maintenance or in maintenance. :param summary: Summary information about the datastore :return: True if the datastore is entering maintenance or in maintenance """ if hasattr(summary, 'maintenanceMode'): return summary.maintenanceMode in ['enteringMaintenance', 'inMaintenance'] return False def _is_valid(self, datastore, host): """Check if the datastore is valid for the given host. A datastore is considered valid for a host only if the datastore is writable, mounted and accessible. Also, the datastore should not be in maintenance mode. :param datastore: Reference to the datastore entity :param host: Reference to the host entity :return: True if datastore can be used for volume creation """ summary = self.get_summary(datastore) in_maintenance = self._in_maintenance(summary) if not summary.accessible or in_maintenance: return False host_mounts = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datastore, 'host') for host_mount in host_mounts.DatastoreHostMount: if host_mount.key.value == host.value: return self._is_usable(host_mount.mountInfo) return False def get_dss_rp(self, host): """Get accessible datastores and resource pool of the host. :param host: Managed object reference of the host :return: Datastores accessible to the host and resource pool to which the host belongs to """ props = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, host, ['datastore', 'parent']) # Get datastores and compute resource or cluster compute resource datastores = [] compute_resource = None for elem in props: for prop in elem.propSet: if prop.name == 'datastore' and prop.val: # Consider only if datastores are present under host datastores = prop.val.ManagedObjectReference elif prop.name == 'parent': compute_resource = prop.val LOG.debug("Datastores attached to host %(host)s are: %(ds)s.", {'host': host, 'ds': datastores}) # Filter datastores based on if it is accessible, mounted and writable valid_dss = [] for datastore in datastores: if self._is_valid(datastore, host): valid_dss.append(datastore) # Get resource pool from compute resource or cluster compute resource resource_pool = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, compute_resource, 'resourcePool') if not valid_dss: msg = _("There are no valid datastores attached to %s.") % host LOG.error(msg) raise exceptions.VimException(msg) else: LOG.debug("Valid datastores are: %s", valid_dss) return (valid_dss, resource_pool) def _get_parent(self, child, parent_type): """Get immediate parent of given type via 'parent' property. :param child: Child entity reference :param parent_type: Entity type of the parent :return: Immediate parent of specific type up the hierarchy via 'parent' property """ if not child: return None if child._type == parent_type: return child parent = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, child, 'parent') return self._get_parent(parent, parent_type) def get_dc(self, child): """Get parent datacenter up the hierarchy via 'parent' property. :param child: Reference of the child entity :return: Parent Datacenter of the param child entity """ return self._get_parent(child, 'Datacenter') def get_vmfolder(self, datacenter): """Get the vmFolder. :param datacenter: Reference to the datacenter entity :return: vmFolder property of the datacenter """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datacenter, 'vmFolder') def _get_child_folder(self, parent_folder, child_folder_name): # Get list of child entities for the parent folder prop_val = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, parent_folder, 'childEntity') if prop_val and hasattr(prop_val, 'ManagedObjectReference'): child_entities = prop_val.ManagedObjectReference # Return if the child folder with input name is already present for child_entity in child_entities: if child_entity._type != 'Folder': continue child_entity_name = self.get_entity_name(child_entity) if (child_entity_name and (urllib.parse.unquote(child_entity_name) == child_folder_name)): LOG.debug("Child folder: %s exists.", child_folder_name) return child_entity def create_folder(self, parent_folder, child_folder_name): """Creates child folder with given name under the given parent folder. The method first checks if a child folder already exists, if it does, then it returns a moref for the folder, else it creates one and then return the moref. :param parent_folder: Reference to the folder entity :param child_folder_name: Name of the child folder :return: Reference to the child folder with input name if it already exists, else create one and return the reference """ LOG.debug("Creating folder: %(child_folder_name)s under parent " "folder: %(parent_folder)s.", {'child_folder_name': child_folder_name, 'parent_folder': parent_folder}) child_folder = self._get_child_folder(parent_folder, child_folder_name) if not child_folder: # Need to create the child folder. try: child_folder = self._session.invoke_api(self._session.vim, 'CreateFolder', parent_folder, name=child_folder_name) LOG.debug("Created child folder: %s.", child_folder) except exceptions.DuplicateName: # Another thread is trying to create the same folder, ignore # the exception. child_folder = self._get_child_folder(parent_folder, child_folder_name) return child_folder def create_vm_inventory_folder(self, datacenter, path_comp): """Create and return a VM inventory folder. This method caches references to inventory folders returned. :param datacenter: Reference to datacenter :param path_comp: Path components as a list """ LOG.debug("Creating inventory folder: %(path_comp)s under VM folder " "of datacenter: %(datacenter)s.", {'path_comp': path_comp, 'datacenter': datacenter}) path = "/" + datacenter.value parent = self._folder_cache.get(path) if not parent: parent = self.get_vmfolder(datacenter) self._folder_cache[path] = parent folder = None for folder_name in path_comp: path = "/".join([path, folder_name]) folder = self._folder_cache.get(path) if not folder: folder = self.create_folder(parent, folder_name) self._folder_cache[path] = folder parent = folder LOG.debug("Inventory folder for path: %(path)s is %(folder)s.", {'path': path, 'folder': folder}) return folder def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref, eager_zero=False): """Extend the virtual disk to the requested size. :param requested_size_in_gb: Size of the volume in GB :param path: Datastore path of the virtual disk to extend :param dc_ref: Reference to datacenter :param eager_zero: Boolean determining if the free space is zeroed out """ LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.", {'path': path, 'size': requested_size_in_gb}) diskMgr = self._session.vim.service_content.virtualDiskManager # VMWare API needs the capacity unit to be in KB, so convert the # capacity unit from GB to KB. size_in_kb = requested_size_in_gb * units.Mi task = self._session.invoke_api(self._session.vim, "ExtendVirtualDisk_Task", diskMgr, name=path, datacenter=dc_ref, newCapacityKb=size_in_kb, eagerZero=eager_zero) self._session.wait_for_task(task) LOG.info(_LI("Successfully extended virtual disk: %(path)s to " "%(size)s GB."), {'path': path, 'size': requested_size_in_gb}) def _create_controller_config_spec(self, adapter_type): """Returns config spec for adding a disk controller.""" cf = self._session.vim.client.factory controller_type = ControllerType.get_controller_type(adapter_type) controller_device = cf.create('ns0:%s' % controller_type) controller_device.key = -100 controller_device.busNumber = 0 if ControllerType.is_scsi_controller(controller_type): controller_device.sharedBus = 'noSharing' controller_spec = cf.create('ns0:VirtualDeviceConfigSpec') controller_spec.operation = 'add' controller_spec.device = controller_device return controller_spec def _create_disk_backing(self, disk_type, vmdk_ds_file_path): """Creates file backing for virtual disk.""" cf = self._session.vim.client.factory disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo') if disk_type == VirtualDiskType.EAGER_ZEROED_THICK: disk_device_bkng.eagerlyScrub = True elif disk_type == VirtualDiskType.THIN: disk_device_bkng.thinProvisioned = True disk_device_bkng.fileName = vmdk_ds_file_path or '' disk_device_bkng.diskMode = 'persistent' return disk_device_bkng def _create_virtual_disk_config_spec(self, size_kb, disk_type, controller_key, vmdk_ds_file_path): """Returns config spec for adding a virtual disk.""" cf = self._session.vim.client.factory disk_device = cf.create('ns0:VirtualDisk') # disk size should be at least 1024KB disk_device.capacityInKB = max(units.Ki, int(size_kb)) if controller_key < 0: disk_device.key = controller_key - 1 else: disk_device.key = -101 disk_device.unitNumber = 0 disk_device.controllerKey = controller_key disk_device.backing = self._create_disk_backing(disk_type, vmdk_ds_file_path) disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'add' if vmdk_ds_file_path is None: disk_spec.fileOperation = 'create' disk_spec.device = disk_device return disk_spec def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type, vmdk_ds_file_path=None): """Create controller and disk config specs for adding a new disk. :param size_kb: disk size in KB :param disk_type: disk provisioning type :param adapter_type: disk adapter type :param vmdk_ds_file_path: Optional datastore file path of an existing virtual disk. If specified, file backing is not created for the virtual disk. :return: list containing controller and disk config specs """ controller_spec = None if adapter_type == 'ide': # For IDE disks, use one of the default IDE controllers (with keys # 200 and 201) created as part of backing VM creation. controller_key = 200 else: controller_spec = self._create_controller_config_spec(adapter_type) controller_key = controller_spec.device.key disk_spec = self._create_virtual_disk_config_spec(size_kb, disk_type, controller_key, vmdk_ds_file_path) specs = [disk_spec] if controller_spec is not None: specs.append(controller_spec) return specs def _get_extra_config_option_values(self, extra_config): cf = self._session.vim.client.factory option_values = [] for key, value in six.iteritems(extra_config): opt = cf.create('ns0:OptionValue') opt.key = key opt.value = value option_values.append(opt) return option_values def _get_create_spec_disk_less(self, name, ds_name, profileId=None, extra_config=None): """Return spec for creating disk-less backing. :param name: Name of the backing :param ds_name: Datastore name where the disk is to be provisioned :param profileId: Storage profile ID for the backing :param extra_config: Key-value pairs to be written to backing's extra-config :return: Spec for creation """ cf = self._session.vim.client.factory vm_file_info = cf.create('ns0:VirtualMachineFileInfo') vm_file_info.vmPathName = '[%s]' % ds_name create_spec = cf.create('ns0:VirtualMachineConfigSpec') create_spec.name = name create_spec.guestId = 'otherGuest' create_spec.numCPUs = 1 create_spec.memoryMB = 128 create_spec.files = vm_file_info # Set the hardware version to a compatible version supported by # vSphere 5.0. This will ensure that the backing VM can be migrated # without any incompatibility issues in a mixed cluster of ESX hosts # with versions 5.0 or above. create_spec.version = "vmx-08" if profileId: vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec') vmProfile.profileId = profileId create_spec.vmProfile = [vmProfile] if extra_config: create_spec.extraConfig = self._get_extra_config_option_values( extra_config) return create_spec def get_create_spec(self, name, size_kb, disk_type, ds_name, profileId=None, adapter_type='lsiLogic', extra_config=None): """Return spec for creating backing with a single disk. :param name: name of the backing :param size_kb: disk size in KB :param disk_type: disk provisioning type :param ds_name: datastore name where the disk is to be provisioned :param profileId: storage profile ID for the backing :param adapter_type: disk adapter type :param extra_config: key-value pairs to be written to backing's extra-config :return: spec for creation """ create_spec = self._get_create_spec_disk_less( name, ds_name, profileId=profileId, extra_config=extra_config) create_spec.deviceChange = self._create_specs_for_disk_add( size_kb, disk_type, adapter_type) return create_spec def _create_backing_int(self, folder, resource_pool, host, create_spec): """Helper for create backing methods.""" LOG.debug("Creating volume backing with spec: %s.", create_spec) task = self._session.invoke_api(self._session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) task_info = self._session.wait_for_task(task) backing = task_info.result LOG.info(_LI("Successfully created volume backing: %s."), backing) return backing def create_backing(self, name, size_kb, disk_type, folder, resource_pool, host, ds_name, profileId=None, adapter_type='lsiLogic', extra_config=None): """Create backing for the volume. Creates a VM with one VMDK based on the given inputs. :param name: Name of the backing :param size_kb: Size in KB of the backing :param disk_type: VMDK type for the disk :param folder: Folder, where to create the backing under :param resource_pool: Resource pool reference :param host: Host reference :param ds_name: Datastore name where the disk is to be provisioned :param profileId: Storage profile ID to be associated with backing :param adapter_type: Disk adapter type :param extra_config: Key-value pairs to be written to backing's extra-config :return: Reference to the created backing entity """ LOG.debug("Creating volume backing with name: %(name)s " "disk_type: %(disk_type)s size_kb: %(size_kb)s " "adapter_type: %(adapter_type)s profileId: %(profile)s at " "folder: %(folder)s resource_pool: %(resource_pool)s " "host: %(host)s datastore_name: %(ds_name)s.", {'name': name, 'disk_type': disk_type, 'size_kb': size_kb, 'folder': folder, 'resource_pool': resource_pool, 'ds_name': ds_name, 'profile': profileId, 'host': host, 'adapter_type': adapter_type}) create_spec = self.get_create_spec( name, size_kb, disk_type, ds_name, profileId=profileId, adapter_type=adapter_type, extra_config=extra_config) return self._create_backing_int(folder, resource_pool, host, create_spec) def create_backing_disk_less(self, name, folder, resource_pool, host, ds_name, profileId=None, extra_config=None): """Create disk-less volume backing. This type of backing is useful for creating volume from image. The downloaded image from the image service can be copied to a virtual disk of desired provisioning type and added to the backing VM. :param name: Name of the backing :param folder: Folder where the backing is created :param resource_pool: Resource pool reference :param host: Host reference :param ds_name: Name of the datastore used for VM storage :param profileId: Storage profile ID to be associated with backing :param extra_config: Key-value pairs to be written to backing's extra-config :return: Reference to the created backing entity """ LOG.debug("Creating disk-less volume backing with name: %(name)s " "profileId: %(profile)s at folder: %(folder)s " "resource pool: %(resource_pool)s host: %(host)s " "datastore_name: %(ds_name)s.", {'name': name, 'profile': profileId, 'folder': folder, 'resource_pool': resource_pool, 'host': host, 'ds_name': ds_name}) create_spec = self._get_create_spec_disk_less( name, ds_name, profileId=profileId, extra_config=extra_config) return self._create_backing_int(folder, resource_pool, host, create_spec) def get_datastore(self, backing): """Get datastore where the backing resides. :param backing: Reference to the backing :return: Datastore reference to which the backing belongs """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'datastore').ManagedObjectReference[0] def get_summary(self, datastore): """Get datastore summary. :param datastore: Reference to the datastore :return: 'summary' property of the datastore """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datastore, 'summary') def _create_relocate_spec_disk_locator(self, datastore, disk_type, disk_device): """Creates spec for disk type conversion during relocate.""" cf = self._session.vim.client.factory disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator") disk_locator.datastore = datastore disk_locator.diskId = disk_device.key disk_locator.diskBackingInfo = self._create_disk_backing(disk_type, None) return disk_locator def _get_relocate_spec(self, datastore, resource_pool, host, disk_move_type, disk_type=None, disk_device=None): """Return spec for relocating volume backing. :param datastore: Reference to the datastore :param resource_pool: Reference to the resource pool :param host: Reference to the host :param disk_move_type: Disk move type option :param disk_type: Destination disk type :param disk_device: Virtual device corresponding to the disk :return: Spec for relocation """ cf = self._session.vim.client.factory relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec') relocate_spec.datastore = datastore relocate_spec.pool = resource_pool relocate_spec.host = host relocate_spec.diskMoveType = disk_move_type if disk_type is not None and disk_device is not None: disk_locator = self._create_relocate_spec_disk_locator(datastore, disk_type, disk_device) relocate_spec.disk = [disk_locator] LOG.debug("Spec for relocating the backing: %s.", relocate_spec) return relocate_spec def relocate_backing( self, backing, datastore, resource_pool, host, disk_type=None): """Relocates backing to the input datastore and resource pool. The implementation uses moveAllDiskBackingsAndAllowSharing disk move type. :param backing: Reference to the backing :param datastore: Reference to the datastore :param resource_pool: Reference to the resource pool :param host: Reference to the host :param disk_type: destination disk type """ LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s " "and resource pool: %(rp)s with destination disk type: " "%(disk_type)s.", {'backing': backing, 'ds': datastore, 'rp': resource_pool, 'disk_type': disk_type}) # Relocate the volume backing disk_move_type = 'moveAllDiskBackingsAndAllowSharing' disk_device = None if disk_type is not None: disk_device = self._get_disk_device(backing) relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task', backing, spec=relocate_spec) LOG.debug("Initiated relocation of volume backing: %s.", backing) self._session.wait_for_task(task) LOG.info(_LI("Successfully relocated volume backing: %(backing)s " "to datastore: %(ds)s and resource pool: %(rp)s."), {'backing': backing, 'ds': datastore, 'rp': resource_pool}) def move_backing_to_folder(self, backing, folder): """Move the volume backing to the folder. :param backing: Reference to the backing :param folder: Reference to the folder """ LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.", {'backing': backing, 'fol': folder}) task = self._session.invoke_api(self._session.vim, 'MoveIntoFolder_Task', folder, list=[backing]) LOG.debug("Initiated move of volume backing: %(backing)s into the " "folder: %(fol)s.", {'backing': backing, 'fol': folder}) self._session.wait_for_task(task) LOG.info(_LI("Successfully moved volume " "backing: %(backing)s into the " "folder: %(fol)s."), {'backing': backing, 'fol': folder}) def create_snapshot(self, backing, name, description, quiesce=False): """Create snapshot of the backing with given name and description. :param backing: Reference to the backing entity :param name: Snapshot name :param description: Snapshot description :param quiesce: Whether to quiesce the backing when taking snapshot :return: Created snapshot entity reference """ LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.", {'backing': backing, 'name': name}) task = self._session.invoke_api(self._session.vim, 'CreateSnapshot_Task', backing, name=name, description=description, memory=False, quiesce=quiesce) LOG.debug("Initiated snapshot of volume backing: %(backing)s " "named: %(name)s.", {'backing': backing, 'name': name}) task_info = self._session.wait_for_task(task) snapshot = task_info.result LOG.info(_LI("Successfully created snapshot: %(snap)s for volume " "backing: %(backing)s."), {'snap': snapshot, 'backing': backing}) return snapshot @staticmethod def _get_snapshot_from_tree(name, root): """Get snapshot by name from the snapshot tree root. :param name: Snapshot name :param root: Current root node in the snapshot tree :return: None in the snapshot tree with given snapshot name """ if not root: return None if root.name == name: return root.snapshot if (not hasattr(root, 'childSnapshotList') or not root.childSnapshotList): # When root does not have children, the childSnapshotList attr # is missing sometime. Adding an additional check. return None for node in root.childSnapshotList: snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node) if snapshot: return snapshot def get_snapshot(self, backing, name): """Get snapshot of the backing with given name. :param backing: Reference to the backing entity :param name: Snapshot name :return: Snapshot entity of the backing with given name """ snapshot = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'snapshot') if not snapshot or not snapshot.rootSnapshotList: return None for root in snapshot.rootSnapshotList: return VMwareVolumeOps._get_snapshot_from_tree(name, root) def snapshot_exists(self, backing): """Check if the given backing contains snapshots.""" snapshot = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'snapshot') if snapshot is None or snapshot.rootSnapshotList is None: return False return len(snapshot.rootSnapshotList) != 0 def delete_snapshot(self, backing, name): """Delete a given snapshot from volume backing. :param backing: Reference to the backing entity :param name: Snapshot name """ LOG.debug("Deleting the snapshot: %(name)s from backing: " "%(backing)s.", {'name': name, 'backing': backing}) snapshot = self.get_snapshot(backing, name) if not snapshot: LOG.info(_LI("Did not find the snapshot: %(name)s for backing: " "%(backing)s. Need not delete anything."), {'name': name, 'backing': backing}) return task = self._session.invoke_api(self._session.vim, 'RemoveSnapshot_Task', snapshot, removeChildren=False) LOG.debug("Initiated snapshot: %(name)s deletion for backing: " "%(backing)s.", {'name': name, 'backing': backing}) self._session.wait_for_task(task) LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: " "%(backing)s."), {'backing': backing, 'name': name}) def _get_folder(self, backing): """Get parent folder of the backing. :param backing: Reference to the backing entity :return: Reference to parent folder of the backing entity """ return self._get_parent(backing, 'Folder') def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing, disk_type, host=None, resource_pool=None, extra_config=None): """Get the clone spec. :param datastore: Reference to datastore :param disk_move_type: Disk move type :param snapshot: Reference to snapshot :param backing: Source backing VM :param disk_type: Disk type of clone :param host: Target host :param resource_pool: Target resource pool :param extra_config: Key-value pairs to be written to backing's extra-config :return: Clone spec """ if disk_type is not None: disk_device = self._get_disk_device(backing) else: disk_device = None relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) cf = self._session.vim.client.factory clone_spec = cf.create('ns0:VirtualMachineCloneSpec') clone_spec.location = relocate_spec clone_spec.powerOn = False clone_spec.template = False clone_spec.snapshot = snapshot if extra_config: config_spec = cf.create('ns0:VirtualMachineConfigSpec') config_spec.extraConfig = self._get_extra_config_option_values( extra_config) clone_spec.config = config_spec LOG.debug("Spec for cloning the backing: %s.", clone_spec) return clone_spec def clone_backing(self, name, backing, snapshot, clone_type, datastore, disk_type=None, host=None, resource_pool=None, extra_config=None, folder=None): """Clone backing. If the clone_type is 'full', then a full clone of the source volume backing will be created. Else, if it is 'linked', then a linked clone of the source volume backing will be created. :param name: Name for the clone :param backing: Reference to the backing entity :param snapshot: Snapshot point from which the clone should be done :param clone_type: Whether a full clone or linked clone is to be made :param datastore: Reference to the datastore entity :param disk_type: Disk type of the clone :param host: Target host :param resource_pool: Target resource pool :param extra_config: Key-value pairs to be written to backing's extra-config :param folder: The location of the clone """ LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, " "clone type: %(type)s from snapshot: %(snap)s on " "resource pool: %(resource_pool)s, host: %(host)s, " "datastore: %(ds)s with disk type: %(disk_type)s.", {'back': backing, 'name': name, 'type': clone_type, 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type, 'host': host, 'resource_pool': resource_pool}) if folder is None: # Use source folder as the location of the clone. folder = self._get_folder(backing) if clone_type == LINKED_CLONE_TYPE: disk_move_type = 'createNewChildDiskBacking' else: disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' clone_spec = self._get_clone_spec( datastore, disk_move_type, snapshot, backing, disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config) task = self._session.invoke_api(self._session.vim, 'CloneVM_Task', backing, folder=folder, name=name, spec=clone_spec) LOG.debug("Initiated clone of backing: %s.", name) task_info = self._session.wait_for_task(task) new_backing = task_info.result LOG.info(_LI("Successfully created clone: %s."), new_backing) return new_backing def _reconfigure_backing(self, backing, reconfig_spec): """Reconfigure backing VM with the given spec.""" LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.", {'backing': backing, 'spec': reconfig_spec}) reconfig_task = self._session.invoke_api(self._session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) LOG.debug("Task: %s created for reconfiguring backing VM.", reconfig_task) self._session.wait_for_task(reconfig_task) def attach_disk_to_backing(self, backing, size_in_kb, disk_type, adapter_type, vmdk_ds_file_path): """Attach an existing virtual disk to the backing VM. :param backing: reference to the backing VM :param size_in_kb: disk size in KB :param disk_type: virtual disk type :param adapter_type: disk adapter type :param vmdk_ds_file_path: datastore file path of the virtual disk to be attached """ LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: " "%(path)s with size (KB): %(size)d and adapter type: " "%(adapter_type)s.", {'backing': backing, 'path': vmdk_ds_file_path, 'size': size_in_kb, 'adapter_type': adapter_type}) cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') specs = self._create_specs_for_disk_add(size_in_kb, disk_type, adapter_type, vmdk_ds_file_path) reconfig_spec.deviceChange = specs self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %s reconfigured with new disk.", backing) def _create_spec_for_disk_remove(self, disk_device): cf = self._session.vim.client.factory disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'remove' disk_spec.device = disk_device return disk_spec def detach_disk_from_backing(self, backing, disk_device): """Detach the given disk from backing.""" LOG.debug("Reconfiguring backing VM: %(backing)s to remove disk: " "%(disk_device)s.", {'backing': backing, 'disk_device': disk_device}) cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') spec = self._create_spec_for_disk_remove(disk_device) reconfig_spec.deviceChange = [spec] self._reconfigure_backing(backing, reconfig_spec) def rename_backing(self, backing, new_name): """Rename backing VM. :param backing: VM to be renamed :param new_name: new VM name """ LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."), {'backing': backing, 'new_name': new_name}) rename_task = self._session.invoke_api(self._session.vim, "Rename_Task", backing, newName=new_name) LOG.debug("Task: %s created for renaming VM.", rename_task) self._session.wait_for_task(rename_task) LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."), {'backing': backing, 'new_name': new_name}) def change_backing_profile(self, backing, profile_id): """Change storage profile of the backing VM. The current profile is removed if the new profile is None. """ LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:" " %(profile)s.", {'backing': backing, 'profile': profile_id}) cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') if profile_id is None: vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec') vm_profile.dynamicType = 'profile' else: vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec') vm_profile.profileId = profile_id.uniqueId reconfig_spec.vmProfile = [vm_profile] self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %(backing)s reconfigured with new profile: " "%(profile)s.", {'backing': backing, 'profile': profile_id}) def update_backing_disk_uuid(self, backing, disk_uuid): """Update backing VM's disk UUID. :param backing: Reference to backing VM :param disk_uuid: New disk UUID """ LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID " "to: %(disk_uuid)s.", {'backing': backing, 'disk_uuid': disk_uuid}) disk_device = self._get_disk_device(backing) disk_device.backing.uuid = disk_uuid cf = self._session.vim.client.factory disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.device = disk_device disk_spec.operation = 'edit' reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') reconfig_spec.deviceChange = [disk_spec] self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: " "%(disk_uuid)s.", {'backing': backing, 'disk_uuid': disk_uuid}) def delete_file(self, file_path, datacenter=None): """Delete file or folder on the datastore. :param file_path: Datastore path of the file or folder """ LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.", {'file': file_path, 'dc': datacenter}) fileManager = self._session.vim.service_content.fileManager task = self._session.invoke_api(self._session.vim, 'DeleteDatastoreFile_Task', fileManager, name=file_path, datacenter=datacenter) LOG.debug("Initiated deletion via task: %s.", task) self._session.wait_for_task(task) LOG.info(_LI("Successfully deleted file: %s."), file_path) def create_datastore_folder(self, ds_name, folder_path, datacenter): """Creates a datastore folder. This method returns silently if the folder already exists. :param ds_name: datastore name :param folder_path: path of folder to create :param datacenter: datacenter of target datastore """ fileManager = self._session.vim.service_content.fileManager ds_folder_path = "[%s] %s" % (ds_name, folder_path) LOG.debug("Creating datastore folder: %s.", ds_folder_path) try: self._session.invoke_api(self._session.vim, 'MakeDirectory', fileManager, name=ds_folder_path, datacenter=datacenter) LOG.info(_LI("Created datastore folder: %s."), folder_path) except exceptions.FileAlreadyExistsException: LOG.debug("Datastore folder: %s already exists.", folder_path) def get_path_name(self, backing): """Get path name of the backing. :param backing: Reference to the backing entity :return: Path name of the backing """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'config.files').vmPathName def get_entity_name(self, entity): """Get name of the managed entity. :param entity: Reference to the entity :return: Name of the managed entity """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, entity, 'name') def _get_disk_device(self, backing): """Get the virtual device corresponding to disk.""" hardware_devices = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'config.hardware.device') if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": return device LOG.error(_LE("Virtual disk device of " "backing: %s not found."), backing) raise vmdk_exceptions.VirtualDiskNotFoundException() def get_vmdk_path(self, backing): """Get the vmdk file name of the backing. The vmdk file path of the backing returned is of the form: "[datastore1] my_folder/my_vm.vmdk" :param backing: Reference to the backing :return: VMDK file path of the backing """ disk_device = self._get_disk_device(backing) backing = disk_device.backing if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo": msg = _("Invalid disk backing: %s.") % backing.__class__.__name__ LOG.error(msg) raise AssertionError(msg) return backing.fileName def get_disk_size(self, backing): """Get disk size of the backing. :param backing: backing VM reference :return: disk size in bytes """ disk_device = self._get_disk_device(backing) return disk_device.capacityInKB * units.Ki def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type, disk_type): """Return spec for file-backed virtual disk creation.""" cf = self._session.vim.client.factory spec = cf.create('ns0:FileBackedVirtualDiskSpec') spec.capacityKb = size_in_kb spec.adapterType = VirtualDiskAdapterType.get_adapter_type( adapter_type) spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type) return spec def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb, adapter_type='busLogic', disk_type='preallocated'): """Create virtual disk with the given settings. :param dc_ref: datacenter reference :param vmdk_ds_file_path: datastore file path of the virtual disk :param size_in_kb: disk size in KB :param adapter_type: disk adapter type :param disk_type: vmdk type """ virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb, adapter_type, disk_type) LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec) disk_manager = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'CreateVirtualDisk_Task', disk_manager, name=vmdk_ds_file_path, datacenter=dc_ref, spec=virtual_disk_spec) LOG.debug("Task: %s created for virtual disk creation.", task) self._session.wait_for_task(task) LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec) def create_flat_extent_virtual_disk_descriptor( self, dc_ref, path, size_in_kb, adapter_type, disk_type): """Create descriptor for a single flat extent virtual disk. To create the descriptor, we create a virtual disk and delete its flat extent. :param dc_ref: reference to the datacenter :param path: descriptor datastore file path :param size_in_kb: size of the virtual disk in KB :param adapter_type: virtual disk adapter type :param disk_type: type of the virtual disk """ LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, " "adapter_type: %(adapter_type)s and disk_type: " "%(disk_type)s.", {'path': path.get_descriptor_ds_file_path(), 'size': size_in_kb, 'adapter_type': adapter_type, 'disk_type': disk_type }) self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(), size_in_kb, adapter_type, disk_type) self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref) LOG.debug("Created descriptor: %s.", path.get_descriptor_ds_file_path()) def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=None): """Copy contents of the src vmdk file to dest vmdk file. :param src_dc_ref: Reference to datacenter containing src datastore :param src_vmdk_file_path: Source vmdk file path :param dest_vmdk_file_path: Destination vmdk file path :param dest_dc_ref: Reference to datacenter of dest datastore. If unspecified, source datacenter is used. """ LOG.debug('Copying disk: %(src)s to %(dest)s.', {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) dest_dc_ref = dest_dc_ref or src_dc_ref diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'CopyVirtualDisk_Task', diskMgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) LOG.debug("Initiated copying disk data via task: %s.", task) self._session.wait_for_task(task) LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."), {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) def move_vmdk_file(self, src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=None): """Move the given vmdk file to another datastore location. :param src_dc_ref: Reference to datacenter containing src datastore :param src_vmdk_file_path: Source vmdk file path :param dest_vmdk_file_path: Destination vmdk file path :param dest_dc_ref: Reference to datacenter of dest datastore. If unspecified, source datacenter is used. """ LOG.debug('Moving disk: %(src)s to %(dest)s.', {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) dest_dc_ref = dest_dc_ref or src_dc_ref diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'MoveVirtualDisk_Task', diskMgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self._session.wait_for_task(task) def delete_vmdk_file(self, vmdk_file_path, dc_ref): """Delete given vmdk files. :param vmdk_file_path: VMDK file path to be deleted :param dc_ref: Reference to datacenter that contains this VMDK file """ LOG.debug("Deleting vmdk file: %s.", vmdk_file_path) diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'DeleteVirtualDisk_Task', diskMgr, name=vmdk_file_path, datacenter=dc_ref) LOG.debug("Initiated deleting vmdk file via task: %s.", task) self._session.wait_for_task(task) LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path) def get_profile(self, backing): """Query storage profile associated with the given backing. :param backing: backing reference :return: profile name """ profile_ids = pbm.get_profiles(self._session, backing) if profile_ids: return pbm.get_profiles_by_ids(self._session, profile_ids)[0].name def _get_all_clusters(self): clusters = {} retrieve_result = self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'ClusterComputeResource', self._max_objects) while retrieve_result: if retrieve_result.objects: for cluster in retrieve_result.objects: name = urllib.parse.unquote(cluster.propSet[0].val) clusters[name] = cluster.obj retrieve_result = self.continue_retrieval(retrieve_result) return clusters def get_cluster_refs(self, names): """Get references to given clusters. :param names: list of cluster names :return: Dictionary of cluster names to references """ clusters_ref = {} clusters = self._get_all_clusters() for name in names: if name not in clusters: LOG.error(_LE("Compute cluster: %s not found."), name) raise vmdk_exceptions.ClusterNotFoundException(cluster=name) clusters_ref[name] = clusters[name] return clusters_ref def get_cluster_hosts(self, cluster): """Get hosts in the given cluster. :param cluster: cluster reference :return: references to hosts in the cluster """ hosts = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, cluster, 'host') host_refs = [] if hosts and hosts.ManagedObjectReference: host_refs.extend(hosts.ManagedObjectReference) return host_refs def get_entity_by_inventory_path(self, path): """Returns the managed object identified by the given inventory path. :param path: Inventory path :return: Reference to the managed object """ return self._session.invoke_api( self._session.vim, "FindByInventoryPath", self._session.vim.service_content.searchIndex, inventoryPath=path) def _get_disk_devices(self, vm): disk_devices = [] hardware_devices = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, vm, 'config.hardware.device') if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": disk_devices.append(device) return disk_devices def get_disk_device(self, vm, vmdk_path): """Get the disk device of the VM which corresponds to the given path. :param vm: VM reference :param vmdk_path: Datastore path of virtual disk :return: Matching disk device """ disk_devices = self._get_disk_devices(vm) for disk_device in disk_devices: backing = disk_device.backing if (backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo" and backing.fileName == vmdk_path): return disk_device cinder-8.0.0/cinder/volume/drivers/xio.py0000664000567000056710000020641612701406250021534 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 X-IO. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import base64 from oslo_service import loopingcall from six.moves import urllib from cinder import context from cinder import exception from cinder.i18n import _LE, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils XIO_OPTS = [ cfg.IntOpt('ise_storage_pool', default=1, help='Default storage pool for volumes.'), cfg.IntOpt('ise_raid', default=1, help='Raid level for ISE volumes.'), cfg.IntOpt('ise_connection_retries', default=5, help='Number of retries (per port) when establishing ' 'connection to ISE management port.'), cfg.IntOpt('ise_retry_interval', default=1, help='Interval (secs) between retries.'), cfg.IntOpt('ise_completion_retries', default=30, help='Number on retries to get completion status after ' 'issuing a command to ISE.'), ] CONF = cfg.CONF CONF.register_opts(XIO_OPTS) LOG = logging.getLogger(__name__) OPERATIONAL_STATUS = 'OPERATIONAL' PREPARED_STATUS = 'PREPARED' INVALID_STATUS = 'VALID' NOTFOUND_STATUS = 'NOT FOUND' # Raise exception for X-IO driver def RaiseXIODriverException(): raise exception.XIODriverException() class XIOISEDriver(object): VERSION = '1.1.4' # Version Changes # 1.0.0 Base driver # 1.1.0 QoS, affinity, retype and thin support # 1.1.1 Fix retry loop (Bug 1429283) # 1.1.2 Fix host object deletion (Bug 1433450). # 1.1.3 Wait for volume/snapshot to be deleted. # 1.1.4 Force target_lun to be int (Bug 1549048) def __init__(self, *args, **kwargs): super(XIOISEDriver, self).__init__() LOG.debug("XIOISEDriver __init__ called.") self.configuration = kwargs.get('configuration', None) self.ise_primary_ip = '' self.ise_secondary_ip = '' self.newquery = 1 self.ise_globalid = None self._vol_stats = {} def do_setup(self, context): LOG.debug("XIOISEDriver do_setup called.") self._get_ise_globalid() def check_for_setup_error(self): LOG.debug("XIOISEDriver check_for_setup_error called.") # The san_ip must always be set if self.configuration.san_ip == "": LOG.error(_LE("san ip must be configured!")) RaiseXIODriverException() # The san_login must always be set if self.configuration.san_login == "": LOG.error(_LE("san_login must be configured!")) RaiseXIODriverException() # The san_password must always be set if self.configuration.san_password == "": LOG.error(_LE("san_password must be configured!")) RaiseXIODriverException() return def _get_version(self): """Return driver version.""" return self.VERSION def _send_query(self): """Do initial query to populate ISE global id.""" body = '' url = '/query' resp = self._connect('GET', url, body) status = resp['status'] if status != 200: # unsuccessful - this is fatal as we need the global id # to build REST requests. LOG.error(_LE("Array query failed - No response (%d)!"), status) RaiseXIODriverException() # Successfully fetched QUERY info. Parse out globalid along with # ipaddress for Controller 1 and Controller 2. We assign primary # ipaddress to use based on controller rank xml_tree = etree.fromstring(resp['content']) # first check that the ISE is running a supported FW version support = {} support['thin'] = False support['clones'] = False support['thin-clones'] = False self.configuration.ise_affinity = False self.configuration.ise_qos = False capabilities = xml_tree.find('capabilities') if capabilities is None: LOG.error(_LE("Array query failed. No capabilities in response!")) RaiseXIODriverException() for node in capabilities: if node.tag != 'capability': continue capability = node if capability.attrib['value'] == '49003': self.configuration.ise_affinity = True elif capability.attrib['value'] == '49004': self.configuration.ise_qos = True elif capability.attrib['value'] == '49005': support['thin'] = True elif capability.attrib['value'] == '49006': support['clones'] = True elif capability.attrib['value'] == '49007': support['thin-clones'] = True # Make sure ISE support necessary features if not support['clones']: LOG.error(_LE("ISE FW version is not compatible with OpenStack!")) RaiseXIODriverException() # set up thin provisioning support self.configuration.san_thin_provision = support['thin-clones'] # Fill in global id, primary and secondary ip addresses globalid = xml_tree.find('globalid') if globalid is None: LOG.error(_LE("Array query failed. No global id in XML response!")) RaiseXIODriverException() self.ise_globalid = globalid.text controllers = xml_tree.find('controllers') if controllers is None: LOG.error(_LE("Array query failed. No controllers in response!")) RaiseXIODriverException() for node in controllers: if node.tag != 'controller': continue # found a controller node controller = node ipaddress = controller.find('ipaddress') ranktag = controller.find('rank') if ipaddress is None: continue # found an ipaddress tag # make sure rank tag is present if ranktag is None: continue rank = ranktag.attrib['value'] # make sure rank value is present if rank is None: continue if rank == '1': # rank 1 means primary (xo) self.ise_primary_ip = ipaddress.text LOG.debug('Setting primary IP to: %s.', self.ise_primary_ip) elif rank == '0': # rank 0 means secondary (nxo) self.ise_secondary_ip = ipaddress.text LOG.debug('Setting secondary IP to: %s.', self.ise_secondary_ip) # clear out new query request flag on successful fetch of QUERY info. self.newquery = 0 return support def _get_ise_globalid(self): """Return ISE globalid.""" if self.ise_globalid is None or self.newquery == 1: # this call will populate globalid self._send_query() if self.ise_globalid is None: LOG.error(_LE("ISE globalid not set!")) RaiseXIODriverException() return self.ise_globalid def _get_ise_primary_ip(self): """Return Primary IP address to REST API.""" if self.ise_primary_ip == '': # Primary IP is set to ISE IP passed in from cinder.conf self.ise_primary_ip = self.configuration.san_ip if self.ise_primary_ip == '': # No IP - fatal. LOG.error(_LE("Primary IP must be set!")) RaiseXIODriverException() return self.ise_primary_ip def _get_ise_secondary_ip(self): """Return Secondary IP address to REST API.""" if self.ise_secondary_ip != '': return self.ise_secondary_ip def _get_uri_prefix(self): """Returns prefix in form of http(s)://1.2.3.4""" prefix = '' # figure out if http or https should be used if self.configuration.driver_use_ssl: prefix = 'https://' else: prefix = 'http://' # add the IP address prefix += self._get_ise_primary_ip() return prefix def _opener(self, method, url, body, header): """Wrapper to handle connection""" response = {} response['status'] = 0 response['content'] = '' response['location'] = '' # send the request req = urllib.request.Request(url, body, header) # Override method to allow GET, PUT, POST, DELETE req.get_method = lambda: method try: resp = urllib.request.urlopen(req) except urllib.error.HTTPError as err: # HTTP error. Return HTTP status and content and let caller # handle retries. response['status'] = err.code response['content'] = err.read() except urllib.error.URLError as err: # Connection failure. Return a status of 0 to indicate error. response['status'] = 0 else: # Successful. Return status code, content, # and location header, if present. response['status'] = resp.getcode() response['content'] = resp.read() response['location'] = \ resp.info().getheader('Content-Location', '') return response def _help_call_method(self, args, retry_count): """Helper function used for prepare clone and delete REST calls.""" # This function calls request method and URL and checks the response. # Certain cases allows for retries, while success and fatal status # will fall out and tell parent to break out of loop. # initialize remaining to one less than retries remaining = retry_count resp = self._send_cmd(args['method'], args['url'], args['arglist']) status = resp['status'] if (status == 400): reason = '' if 'content' in resp: reason = etree.fromstring(resp['content']) if reason is not None: reason = reason.text.upper() if INVALID_STATUS in reason: # Request failed with an invalid state. This can be because # source volume is in a temporary unavailable state. LOG.debug('REST call failed with invalid state: ' '%(method)s - %(status)d - %(reason)s', {'method': args['method'], 'status': status, 'reason': reason}) # Let parent check retry eligibility based on remaining retries remaining -= 1 else: # Fatal error. Set remaining to 0 to make caller exit loop. remaining = 0 else: # set remaining to 0 to make caller exit loop # original waiter will handle the difference between success and # fatal error based on resp['status']. remaining = 0 return (remaining, resp) def _help_call_opener(self, args, retry_count): """Helper function to call _opener.""" # This function calls _opener func and checks the response. # If response is 0 it will decrement the remaining retry count. # On successful connection it will set remaining to 0 to signal # parent to break out of loop. remaining = retry_count response = self._opener(args['method'], args['url'], args['body'], args['header']) if response['status'] != 0: # We are done remaining = 0 else: # Let parent check retry eligibility based on remaining retries. remaining -= 1 # Return remaining and response return (remaining, response) def _help_wait_for_status(self, args, retry_count): """Helper function to wait for specified volume status""" # This function calls _get_volume_info and checks the response. # If the status strings do not match the specified status it will # return the remaining retry count decremented by one. # On successful match it will set remaining to 0 to signal # parent to break out of loop. remaining = retry_count info = self._get_volume_info(args['name']) status = args['status_string'] if (status in info['string'] or status in info['details']): remaining = 0 else: # Let parent check retry eligibility based on remaining retries. remaining -= 1 # return remaining and volume info return (remaining, info) def _wait_for_completion(self, help_func, args, retry_count): """Helper function to wait for completion of passed function""" # Helper call loop function. def _call_loop(loop_args): remaining = loop_args['retries'] args = loop_args['args'] LOG.debug("In call loop (%(remaining)d) %(args)s", {'remaining': remaining, 'args': args}) (remaining, response) = loop_args['func'](args, remaining) if remaining == 0: # We are done - let our caller handle response raise loopingcall.LoopingCallDone(response) loop_args['retries'] = remaining # Setup retries, interval and call wait function. loop_args = {} loop_args['retries'] = retry_count loop_args['func'] = help_func loop_args['args'] = args interval = self.configuration.ise_retry_interval timer = loopingcall.FixedIntervalLoopingCall(_call_loop, loop_args) return timer.start(interval).wait() def _connect(self, method, uri, body=''): """Set up URL and HTML and call _opener to make request""" url = '' # see if we need to add prefix # this call will force primary ip to be filled in as well prefix = self._get_uri_prefix() if prefix not in uri: url = prefix url += uri # set up headers for XML and Auth header = {'Content-Type': 'application/xml; charset=utf-8'} auth_key = ('%s:%s' % (self.configuration.san_login, self.configuration.san_password)) auth_key = base64.encode_as_text(auth_key) header['Authorization'] = 'Basic %s' % auth_key # We allow 5 retries on each IP address. If connection to primary # fails, secondary will be tried. If connection to secondary is # successful, the request flag for a new QUERY will be set. The QUERY # will be sent on next connection attempt to figure out which # controller is primary in case it has changed. LOG.debug("Connect: %(method)s %(url)s %(body)s", {'method': method, 'url': url, 'body': body}) using_secondary = 0 response = {} response['status'] = 0 response['location'] = '' response['content'] = '' primary_ip = self._get_ise_primary_ip() secondary_ip = self._get_ise_secondary_ip() # This will first try connecting to primary IP and then secondary IP. args = {} args['method'] = method args['url'] = url args['body'] = body args['header'] = header retries = self.configuration.ise_connection_retries while True: response = self._wait_for_completion(self._help_call_opener, args, retries) if response['status'] != 0: # Connection succeeded. Request new query on next connection # attempt if we used secondary ip to sort out who should be # primary going forward self.newquery = using_secondary return response # connection failed - check if we have any retries left if using_secondary == 0: # connection on primary ip failed # try secondary ip if secondary_ip is '': # if secondary is not setup yet, then assert # connection on primary and secondary ip failed LOG.error(_LE("Connection to %s failed and no secondary!"), primary_ip) RaiseXIODriverException() # swap primary for secondary ip in URL url = url.replace(primary_ip, secondary_ip) LOG.debug('Trying secondary IP URL: %s', url) using_secondary = 1 continue # connection failed on both IPs - break out of the loop break # connection on primary and secondary ip failed LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"), {'primary': primary_ip, 'secondary': secondary_ip}) RaiseXIODriverException() def _param_string(self, params): """Turn (name, value) pairs into single param string""" param_str = [] for name, value in params.items(): if value != '': param_str.append("%s=%s" % (name, value)) return '&'.join(param_str) def _send_cmd(self, method, url, params): """Prepare HTTP request and call _connect""" # Add params to appropriate field based on method body = '' if method == 'GET': if params != {}: url += '?' + self._param_string(params) body = '' elif method == 'POST': body = self._param_string(params) elif method == 'DELETE': body = '' elif method == 'PUT': if params != {}: url += '?' + self._param_string(params) # ISE REST API is mostly synchronous but has some asynchronous # streaks. Add retries to work around design of ISE REST API that # does not allow certain operations to be in process concurrently. # This is only an issue if lots of CREATE/DELETE/SNAPSHOT/CLONE ops # are issued in short order. return self._connect(method, url, body) def find_target_chap(self): """Return target CHAP settings""" chap = {} chap['chap_user'] = '' chap['chap_passwd'] = '' url = '/storage/arrays/%s/ionetworks' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {}) status = resp['status'] if status != 200: LOG.warning(_LW("IOnetworks GET failed (%d)"), status) return chap # Got a good response. Parse out CHAP info. First check if CHAP is # enabled and if so parse out username and password. root = etree.fromstring(resp['content']) for element in root.iter(): if element.tag != 'chap': continue chapin = element.find('chapin') if chapin is None: continue if chapin.attrib['value'] != '1': continue # CHAP is enabled. Store username / pw chap_user = chapin.find('username') if chap_user is not None: chap['chap_user'] = chap_user.text chap_passwd = chapin.find('password') if chap_passwd is not None: chap['chap_passwd'] = chap_passwd.text break return chap def find_target_iqn(self, iscsi_ip): """Find Target IQN string""" url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {}) status = resp['status'] if status != 200: # Not good. Throw an exception. LOG.error(_LE("Controller GET failed (%d)"), status) RaiseXIODriverException() # Good response. Parse out IQN that matches iscsi_ip_address # passed in from cinder.conf. IQN is 'hidden' in globalid field. root = etree.fromstring(resp['content']) for element in root.iter(): if element.tag != 'ioport': continue ipaddrs = element.find('ipaddresses') if ipaddrs is None: continue for ipaddr in ipaddrs.iter(): # Look for match with iscsi_ip_address if ipaddr is None or ipaddr.text != iscsi_ip: continue endpoint = element.find('endpoint') if endpoint is None: continue global_id = endpoint.find('globalid') if global_id is None: continue target_iqn = global_id.text if target_iqn != '': return target_iqn # Did not find a matching IQN. Upsetting. LOG.error(_LE("Failed to get IQN!")) RaiseXIODriverException() def find_target_wwns(self): """Return target WWN""" # Let's look for WWNs target_wwns = [] target = '' url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {}) status = resp['status'] if status != 200: # Not good. Throw an exception. LOG.error(_LE("Controller GET failed (%d)"), status) RaiseXIODriverException() # Good response. Parse out globalid (WWN) of endpoint that matches # protocol and type (array). controllers = etree.fromstring(resp['content']) for controller in controllers.iter(): if controller.tag != 'controller': continue fcports = controller.find('fcports') if fcports is None: continue for fcport in fcports: if fcport.tag != 'fcport': continue wwn_tag = fcport.find('wwn') if wwn_tag is None: continue target = wwn_tag.text target_wwns.append(target) return target_wwns def _find_target_lun(self, location): """Return LUN for allocation specified in location string""" resp = self._send_cmd('GET', location, {}) status = resp['status'] if status != 200: # Not good. Throw an exception. LOG.error(_LE("Failed to get allocation information (%d)!"), status) RaiseXIODriverException() # Good response. Parse out LUN. xml_tree = etree.fromstring(resp['content']) allocation = xml_tree.find('allocation') if allocation is not None: luntag = allocation.find('lun') if luntag is not None: return luntag.text # Did not find LUN. Throw an exception. LOG.error(_LE("Failed to get LUN information!")) RaiseXIODriverException() def _get_volume_info(self, vol_name): """Return status of ISE volume""" vol_info = {} vol_info['value'] = '' vol_info['string'] = NOTFOUND_STATUS vol_info['details'] = '' vol_info['location'] = '' vol_info['size'] = '' # Attempt to collect status value, string and details. Also pick up # location string from response. Location is used in REST calls # DELETE/SNAPSHOT/CLONE. # We ask for specific volume, so response should only contain one # volume entry. url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {'name': vol_name}) if resp['status'] != 200: LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."), {'name': vol_name, 'status': resp['status']}) return vol_info # Good response. Parse down to Volume tag in list of one. root = etree.fromstring(resp['content']) volume_node = root.find('volume') if volume_node is None: LOG.warning(_LW("No volume node in XML content.")) return vol_info # Location can be found as an attribute in the volume node tag. vol_info['location'] = volume_node.attrib['self'] # Find status tag status = volume_node.find('status') if status is None: LOG.warning(_LW("No status payload for volume %s."), vol_name) return vol_info # Fill in value and string from status tag attributes. vol_info['value'] = status.attrib['value'] vol_info['string'] = status.attrib['string'].upper() # Detailed status has it's own list of tags. details = status.find('details') if details is not None: detail = details.find('detail') if detail is not None: vol_info['details'] = detail.text.upper() # Get volume size size_tag = volume_node.find('size') if size_tag is not None: vol_info['size'] = size_tag.text # Return value, string, details and location. return vol_info def _alloc_location(self, volume, hostname, delete=0): """Find location string for allocation. Also delete alloc per reqst""" location = '' url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {'name': volume['name'], 'hostname': hostname}) if resp['status'] != 200: LOG.error(_LE("Could not GET allocation information (%d)!"), resp['status']) RaiseXIODriverException() # Good response. Find the allocation based on volume name. allocation_tree = etree.fromstring(resp['content']) for allocation in allocation_tree.iter(): if allocation.tag != 'allocation': continue # verify volume name match volume_tag = allocation.find('volume') if volume_tag is None: continue volumename_tag = volume_tag.find('volumename') if volumename_tag is None: continue volumename = volumename_tag.text if volumename != volume['name']: continue # verified volume name match # find endpoints list endpoints = allocation.find('endpoints') if endpoints is None: continue # Found endpoints list. Found matching host if hostname specified, # otherwise any host is a go. This is used by the caller to # delete all allocations (presentations) to a volume. for endpoint in endpoints.iter(): if hostname != '': hname_tag = endpoint.find('hostname') if hname_tag is None: continue if hname_tag.text.upper() != hostname.upper(): continue # Found hostname match. Location string is an attribute in # allocation tag. location = allocation.attrib['self'] # Delete allocation if requested. if delete == 1: self._send_cmd('DELETE', location, {}) location = '' break else: return location return location def _present_volume(self, volume, hostname, lun): """Present volume to host at specified LUN""" # Set up params with volume name, host name and target lun, if # specified. target_lun = lun params = {'volumename': volume['name'], 'hostname': hostname} # Fill in LUN if specified. if target_lun != '': params['lun'] = target_lun # Issue POST call to allocation. url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) resp = self._send_cmd('POST', url, params) status = resp['status'] if status == 201: LOG.info(_LI("Volume %s presented."), volume['name']) elif status == 409: LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"), {'name': volume['name'], 'status': status}) else: LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"), {'name': volume['name'], 'status': status}) RaiseXIODriverException() # Fetch LUN. In theory the LUN should be what caller requested. # We try to use shortcut as location comes back in Location header. # Make sure shortcut of using location header worked, if not ask # for it explicitly. location = resp['location'] if location == '': location = self._alloc_location(volume, hostname) # Find target LUN if location != '': target_lun = self._find_target_lun(location) # Success. Return target LUN. LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s", {'volume': volume['name'], 'host': hostname, 'lun': target_lun}) return target_lun def find_allocations(self, hostname): """Find allocations for specified host""" alloc_cnt = 0 url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {'hostname': hostname}) status = resp['status'] if status != 200: LOG.error(_LE("Failed to get allocation information: " "%(host)s (%(status)d)!"), {'host': hostname, 'status': status}) RaiseXIODriverException() # Good response. Count the number of allocations. allocation_tree = etree.fromstring(resp['content']) for allocation in allocation_tree.iter(): if allocation.tag != 'allocation': continue alloc_cnt += 1 return alloc_cnt def _find_host(self, endpoints): """Check if host entry exists on ISE based on endpoint (IQN, WWNs)""" # FC host might have more than one endpoint. ISCSI has only one. # Check if endpoints is a list, if so use first entry in list for # host search. if type(endpoints) is list: for endpoint in endpoints: ep = endpoint break else: ep = endpoints # Got single end point. Now make REST API call to fetch all hosts LOG.debug("find_host: Looking for host %s.", ep) host = {} host['name'] = '' host['type'] = '' host['locator'] = '' params = {} url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, params) status = resp['status'] if resp['status'] != 200: LOG.error(_LE("Could not find any hosts (%s)"), status) RaiseXIODriverException() # Good response. Try to match up a host based on end point string. host_tree = etree.fromstring(resp['content']) for host_node in host_tree.iter(): if host_node.tag != 'host': continue # Found a host tag. Check if end point matches. endpoints_node = host_node.find('endpoints') if endpoints_node is None: continue for endpoint_node in endpoints_node.iter(): if endpoint_node.tag != 'endpoint': continue gid = endpoint_node.find('globalid') if gid is None: continue if gid.text.upper() != ep.upper(): continue # We have a match. Fill in host name, type and locator host['locator'] = host_node.attrib['self'] type_tag = host_node.find('type') if type_tag is not None: host['type'] = type_tag.text name_tag = host_node.find('name') if name_tag is not None: host['name'] = name_tag.text break # This will be filled in or '' based on findings above. return host def _create_host(self, hostname, endpoints): """Create host entry on ISE for connector""" # Create endpoint list for REST call. endpoint_str = '' if type(endpoints) is list: ep_str = [] ec = 0 for endpoint in endpoints: if ec == 0: ep_str.append("%s" % (endpoint)) else: ep_str.append("endpoint=%s" % (endpoint)) ec += 1 endpoint_str = '&'.join(ep_str) else: endpoint_str = endpoints # Log host creation. LOG.debug("Create host %(host)s; %(endpoint)s", {'host': hostname, 'endpoint': endpoint_str}) # Issue REST call to create host entry of OpenStack type. params = {'name': hostname, 'endpoint': endpoint_str, 'os': 'openstack'} url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) resp = self._send_cmd('POST', url, params) status = resp['status'] if status != 201 and status != 409: LOG.error(_LE("POST for host create failed (%s)!"), status) RaiseXIODriverException() # Successfully created host entry. Return host name. return hostname def _create_clone(self, volume, clone, clone_type): """Create clone worker function""" # This function is called for both snapshot and clone # clone_type specifies what type is being processed # Creating snapshots and clones is a two step process on current ISE # FW. First snapshot/clone is prepared and then created. volume_name = '' if clone_type == 'snapshot': volume_name = volume['volume_name'] elif clone_type == 'clone': volume_name = volume['name'] args = {} # Make sure source volume is ready. This is another case where # we have to work around asynchronous behavior in ISE REST API. args['name'] = volume_name args['status_string'] = OPERATIONAL_STATUS retries = self.configuration.ise_completion_retries vol_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if vol_info['value'] == '0': LOG.debug('Source volume %s ready.', volume_name) else: LOG.error(_LE("Source volume %s not ready!"), volume_name) RaiseXIODriverException() # Prepare snapshot # get extra_specs and qos specs from source volume # these functions fill in default values for entries used below ctxt = context.get_admin_context() type_id = volume['volume_type_id'] extra_specs = self._get_extra_specs(ctxt, type_id) LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", {'volume_name': volume['name'], 'extra_specs': extra_specs}) qos = self._get_qos_specs(ctxt, type_id) # Wait until snapshot/clone is prepared. args['method'] = 'POST' args['url'] = vol_info['location'] args['status'] = 202 args['arglist'] = {'name': clone['name'], 'type': clone_type, 'affinity': extra_specs['affinity'], 'IOPSmin': qos['minIOPS'], 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']} retries = self.configuration.ise_completion_retries resp = self._wait_for_completion(self._help_call_method, args, retries) if resp['status'] != 202: # clone prepare failed - bummer LOG.error(_LE("Prepare clone failed for %s."), clone['name']) RaiseXIODriverException() # clone prepare request accepted # make sure not to continue until clone prepared args['name'] = clone['name'] args['status_string'] = PREPARED_STATUS retries = self.configuration.ise_completion_retries clone_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if PREPARED_STATUS in clone_info['details']: LOG.debug('Clone %s prepared.', clone['name']) else: LOG.error(_LE("Clone %s not in prepared state!"), clone['name']) RaiseXIODriverException() # Clone prepared, now commit the create resp = self._send_cmd('PUT', clone_info['location'], {clone_type: 'true'}) if resp['status'] != 201: LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"), {'name': clone['name'], 'status': resp['status']}) RaiseXIODriverException() # Clone create request accepted. Make sure not to return until clone # operational. args['name'] = clone['name'] args['status_string'] = OPERATIONAL_STATUS retries = self.configuration.ise_completion_retries clone_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if OPERATIONAL_STATUS in clone_info['string']: LOG.info(_LI("Clone %s created."), clone['name']) else: LOG.error(_LE("Commit failed for %s!"), clone['name']) RaiseXIODriverException() return def _fill_in_available_capacity(self, node, pool): """Fill in free capacity info for pool.""" available = node.find('available') if available is None: pool['free_capacity_gb'] = 0 return pool pool['free_capacity_gb'] = int(available.get('total')) # Fill in separate RAID level cap byred = available.find('byredundancy') if byred is None: return pool raid = byred.find('raid-0') if raid is not None: pool['free_capacity_gb_raid_0'] = int(raid.text) raid = byred.find('raid-1') if raid is not None: pool['free_capacity_gb_raid_1'] = int(raid.text) raid = byred.find('raid-5') if raid is not None: pool['free_capacity_gb_raid_5'] = int(raid.text) raid = byred.find('raid-6') if raid is not None: pool['free_capacity_gb_raid_6'] = int(raid.text) return pool def _fill_in_used_capacity(self, node, pool): """Fill in used capacity info for pool.""" used = node.find('used') if used is None: pool['allocated_capacity_gb'] = 0 return pool pool['allocated_capacity_gb'] = int(used.get('total')) # Fill in separate RAID level cap byred = used.find('byredundancy') if byred is None: return pool raid = byred.find('raid-0') if raid is not None: pool['allocated_capacity_gb_raid_0'] = int(raid.text) raid = byred.find('raid-1') if raid is not None: pool['allocated_capacity_gb_raid_1'] = int(raid.text) raid = byred.find('raid-5') if raid is not None: pool['allocated_capacity_gb_raid_5'] = int(raid.text) raid = byred.find('raid-6') if raid is not None: pool['allocated_capacity_gb_raid_6'] = int(raid.text) return pool def _get_pools(self): """Return information about all pools on ISE""" pools = [] pool = {} vol_cnt = 0 url = '/storage/pools' resp = self._send_cmd('GET', url, {}) status = resp['status'] if status != 200: # Request failed. Return what we have, which isn't much. LOG.warning(_LW("Could not get pool information (%s)!"), status) return (pools, vol_cnt) # Parse out available (free) and used. Add them up to get total. xml_tree = etree.fromstring(resp['content']) for child in xml_tree: if child.tag != 'pool': continue # Fill in ise pool name tag = child.find('name') if tag is not None: pool['pool_ise_name'] = tag.text # Fill in globalid tag = child.find('globalid') if tag is not None: pool['globalid'] = tag.text # Fill in pool name tag = child.find('id') if tag is not None: pool['pool_name'] = tag.text # Fill in pool status tag = child.find('status') if tag is not None: pool['status'] = tag.attrib['string'] details = tag.find('details') if details is not None: detail = details.find('detail') if detail is not None: pool['status_details'] = detail.text # Fill in available capacity pool = self._fill_in_available_capacity(child, pool) # Fill in allocated capacity pool = self._fill_in_used_capacity(child, pool) # Fill in media health and type media = child.find('media') if media is not None: medium = media.find('medium') if medium is not None: health = medium.find('health') if health is not None: pool['health'] = int(health.text) tier = medium.find('tier') if tier is not None: pool['media'] = tier.attrib['string'] cap = child.find('IOPSmincap') if cap is not None: pool['minIOPS_capacity'] = cap.text cap = child.find('IOPSmaxcap') if cap is not None: pool['maxIOPS_capacity'] = cap.text cap = child.find('IOPSburstcap') if cap is not None: pool['burstIOPS_capacity'] = cap.text pool['total_capacity_gb'] = (int(pool['free_capacity_gb'] + pool['allocated_capacity_gb'])) pool['QoS_support'] = self.configuration.ise_qos pool['reserved_percentage'] = 0 pools.append(pool) # count volumes volumes = child.find('volumes') if volumes is not None: vol_cnt += len(volumes) return (pools, vol_cnt) def _update_volume_stats(self): """Update storage information""" self._send_query() data = {} data["vendor_name"] = 'X-IO' data["driver_version"] = self._get_version() if self.configuration.volume_backend_name: backend_name = self.configuration.volume_backend_name else: backend_name = self.__class__.__name__ data["volume_backend_name"] = backend_name data['reserved_percentage'] = 0 # Get total and free capacity. (pools, vol_cnt) = self._get_pools() total_cap = 0 free_cap = 0 # fill in global capability support # capacity for pool in pools: total_cap += int(pool['total_capacity_gb']) free_cap += int(pool['free_capacity_gb']) data['total_capacity_gb'] = int(total_cap) data['free_capacity_gb'] = int(free_cap) # QoS data['QoS_support'] = self.configuration.ise_qos # Volume affinity data['affinity'] = self.configuration.ise_affinity # Thin provisioning data['thin'] = self.configuration.san_thin_provision data['pools'] = pools data['active_volumes'] = int(vol_cnt) return data def get_volume_stats(self, refresh=False): """Get volume stats.""" if refresh: self._vol_stats = self._update_volume_stats() LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s", {'total': self._vol_stats['total_capacity_gb'], 'free': self._vol_stats['free_capacity_gb']}) return self._vol_stats def _get_extra_specs(self, ctxt, type_id): """Get extra specs from volume type.""" specs = {} specs['affinity'] = '' specs['alloctype'] = '' specs['pool'] = self.configuration.ise_storage_pool specs['raid'] = self.configuration.ise_raid if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) extra_specs = volume_type.get('extra_specs') # Parse out RAID, pool and affinity values for key, value in extra_specs.items(): subkey = '' if ':' in key: fields = key.split(':') key = fields[0] subkey = fields[1] if key.upper() == 'Feature'.upper(): if subkey.upper() == 'Raid'.upper(): specs['raid'] = value elif subkey.upper() == 'Pool'.upper(): specs['pool'] = value elif key.upper() == 'Affinity'.upper(): # Only fill this in if ISE FW supports volume affinity if self.configuration.ise_affinity: if subkey.upper() == 'Type'.upper(): specs['affinity'] = value elif key.upper() == 'Alloc'.upper(): # Only fill this in if ISE FW supports thin provisioning if self.configuration.san_thin_provision: if subkey.upper() == 'Type'.upper(): specs['alloctype'] = value return specs def _get_qos_specs(self, ctxt, type_id): """Get QoS specs from volume type.""" specs = {} specs['minIOPS'] = '' specs['maxIOPS'] = '' specs['burstIOPS'] = '' if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = volume_type.get('extra_specs') # Parse out min, max and burst values for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key.upper() == 'minIOPS'.upper(): specs['minIOPS'] = value elif key.upper() == 'maxIOPS'.upper(): specs['maxIOPS'] = value elif key.upper() == 'burstIOPS'.upper(): specs['burstIOPS'] = value return specs def create_volume(self, volume): """Create requested volume""" LOG.debug("X-IO create_volume called.") # get extra_specs and qos based on volume type # these functions fill in default values for entries used below ctxt = context.get_admin_context() type_id = volume['volume_type_id'] extra_specs = self._get_extra_specs(ctxt, type_id) LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", {'volume_name': volume['name'], 'extra_specs': extra_specs}) qos = self._get_qos_specs(ctxt, type_id) # Make create call url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) resp = self._send_cmd('POST', url, {'name': volume['name'], 'size': volume['size'], 'pool': extra_specs['pool'], 'redundancy': extra_specs['raid'], 'affinity': extra_specs['affinity'], 'alloctype': extra_specs['alloctype'], 'IOPSmin': qos['minIOPS'], 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if resp['status'] != 201: LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"), {'name': volume['name'], 'status': resp['status']}) RaiseXIODriverException() # Good response. Make sure volume is in operational state before # returning. Volume creation completes asynchronously. args = {} args['name'] = volume['name'] args['status_string'] = OPERATIONAL_STATUS retries = self.configuration.ise_completion_retries vol_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if OPERATIONAL_STATUS in vol_info['string']: # Ready. LOG.info(_LI("Volume %s created"), volume['name']) else: LOG.error(_LE("Failed to create volume %s."), volume['name']) RaiseXIODriverException() return def create_cloned_volume(self, volume, src_vref): """Create clone""" LOG.debug("X-IO create_cloned_volume called.") self._create_clone(src_vref, volume, 'clone') def create_snapshot(self, snapshot): """Create snapshot""" LOG.debug("X-IO create_snapshot called.") # Creating a snapshot uses same interface as clone operation on # ISE. Clone type ('snapshot' or 'clone') tells the ISE what kind # of operation is requested. self._create_clone(snapshot, snapshot, 'snapshot') def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot""" LOG.debug("X-IO create_volume_from_snapshot called.") # ISE snapshots are just like a volume so this is a clone operation. self._create_clone(snapshot, volume, 'clone') def _delete_volume(self, volume): """Delete specified volume""" # First unpresent volume from all hosts. self._alloc_location(volume, '', 1) # Get volume status. Location string for volume comes back # in response. Used for DELETE call below. vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': LOG.warning(_LW("%s not found!"), volume['name']) return # Make DELETE call. args = {} args['method'] = 'DELETE' args['url'] = vol_info['location'] args['arglist'] = {} args['status'] = 204 retries = self.configuration.ise_completion_retries resp = self._wait_for_completion(self._help_call_method, args, retries) if resp['status'] != 204: LOG.warning(_LW("DELETE call failed for %s!"), volume['name']) return # DELETE call successful, now wait for completion. # We do that by waiting for the REST call to return Volume Not Found. args['method'] = '' args['url'] = '' args['name'] = volume['name'] args['status_string'] = NOTFOUND_STATUS retries = self.configuration.ise_completion_retries vol_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if NOTFOUND_STATUS in vol_info['string']: # Volume no longer present on the backend. LOG.info(_LI("Successfully deleted %s."), volume['name']) return # If we come here it means the volume is still present # on the backend. LOG.error(_LE("Timed out deleting %s!"), volume['name']) return def delete_volume(self, volume): """Delete specified volume""" LOG.debug("X-IO delete_volume called.") self._delete_volume(volume) def delete_snapshot(self, snapshot): """Delete snapshot""" LOG.debug("X-IO delete_snapshot called.") # Delete snapshot and delete volume is identical to ISE. self._delete_volume(snapshot) def _modify_volume(self, volume, new_attributes): # Get volume status. Location string for volume comes back # in response. Used for PUT call below. vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': LOG.error(_LE("modify volume: %s does not exist!"), volume['name']) RaiseXIODriverException() # Make modify volume REST call using PUT. # Location from above is used as identifier. resp = self._send_cmd('PUT', vol_info['location'], new_attributes) status = resp['status'] if status == 201: LOG.debug("Volume %s modified.", volume['name']) return True LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."), {'name': volume['name'], 'status': status}) RaiseXIODriverException() def extend_volume(self, volume, new_size): """Extend volume to new size.""" LOG.debug("extend_volume called") ret = self._modify_volume(volume, {'size': new_size}) if ret is True: LOG.info(_LI("volume %(name)s extended to %(size)d."), {'name': volume['name'], 'size': new_size}) return def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("X-IO retype called") qos = self._get_qos_specs(ctxt, new_type['id']) ret = self._modify_volume(volume, {'IOPSmin': qos['minIOPS'], 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if ret is True: LOG.info(_LI("Volume %s retyped."), volume['name']) return True def manage_existing(self, volume, ise_volume_ref): """Convert an existing ISE volume to a Cinder volume.""" LOG.debug("X-IO manage_existing called") if 'source-name' not in ise_volume_ref: LOG.error(_LE("manage_existing: No source-name in ref!")) RaiseXIODriverException() # copy the source-name to 'name' for modify volume use ise_volume_ref['name'] = ise_volume_ref['source-name'] ctxt = context.get_admin_context() qos = self._get_qos_specs(ctxt, volume['volume_type_id']) ret = self._modify_volume(ise_volume_ref, {'name': volume['name'], 'IOPSmin': qos['minIOPS'], 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if ret is True: LOG.info(_LI("Volume %s converted."), ise_volume_ref['name']) return ret def manage_existing_get_size(self, volume, ise_volume_ref): """Get size of an existing ISE volume.""" LOG.debug("X-IO manage_existing_get_size called") if 'source-name' not in ise_volume_ref: LOG.error(_LE("manage_existing_get_size: No source-name in ref!")) RaiseXIODriverException() ref_name = ise_volume_ref['source-name'] # get volume status including size vol_info = self._get_volume_info(ref_name) if vol_info['location'] == '': LOG.error(_LE("manage_existing_get_size: %s does not exist!"), ref_name) RaiseXIODriverException() return int(vol_info['size']) def unmanage(self, volume): """Remove Cinder management from ISE volume""" LOG.debug("X-IO unmanage called") vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': LOG.error(_LE("unmanage: Volume %s does not exist!"), volume['name']) RaiseXIODriverException() # This is a noop. ISE does not store any Cinder specific information. def ise_present(self, volume, hostname_in, endpoints): """Set up presentation for volume and specified connector""" LOG.debug("X-IO ise_present called.") # Create host entry on ISE if necessary. # Check to see if host entry already exists. # Create if not found host = self._find_host(endpoints) if host['name'] == '': # host not found, so create new host entry # Use host name if filled in. If blank, ISE will make up a name. self._create_host(hostname_in, endpoints) host = self._find_host(endpoints) if host['name'] == '': # host still not found, this is fatal. LOG.error(_LE("Host could not be found!")) RaiseXIODriverException() elif host['type'].upper() != 'OPENSTACK': # Make sure host type is marked as OpenStack host params = {'os': 'openstack'} resp = self._send_cmd('PUT', host['locator'], params) status = resp['status'] if status != 201 and status != 409: LOG.error(_LE("Host PUT failed (%s)."), status) RaiseXIODriverException() # We have a host object. target_lun = '' # Present volume to host. target_lun = self._present_volume(volume, host['name'], target_lun) # Fill in target information. data = {} data['target_lun'] = int(target_lun) data['volume_id'] = volume['id'] return data def ise_unpresent(self, volume, endpoints): """Delete presentation between volume and connector""" LOG.debug("X-IO ise_unpresent called.") # Delete allocation uses host name. Go find it based on endpoints. host = self._find_host(endpoints) if host['name'] != '': # Delete allocation based on hostname and volume. self._alloc_location(volume, host['name'], 1) return host['name'] def create_export(self, context, volume): LOG.debug("X-IO create_export called.") def ensure_export(self, context, volume): LOG.debug("X-IO ensure_export called.") def remove_export(self, context, volume): LOG.debug("X-IO remove_export called.") def local_path(self, volume): LOG.debug("X-IO local_path called.") def delete_host(self, endpoints): """Delete ISE host object""" host = self._find_host(endpoints) if host['locator'] != '': # Delete host self._send_cmd('DELETE', host['locator'], {}) LOG.debug("X-IO: host %s deleted", host['name']) # Protocol specific classes for entry. They are wrappers around base class # above and every external API resuslts in a call to common function in base # class. class XIOISEISCSIDriver(driver.ISCSIDriver): """Requires ISE Running FW version 3.1.0 or higher""" def __init__(self, *args, **kwargs): super(XIOISEISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(XIO_OPTS) self.configuration.append_config_values(san.san_opts) # The iscsi_ip_address must always be set. if self.configuration.iscsi_ip_address == '': LOG.error(_LE("iscsi_ip_address must be set!")) RaiseXIODriverException() # Setup common driver self.driver = XIOISEDriver(configuration=self.configuration) def do_setup(self, context): return self.driver.do_setup(context) def check_for_setup_error(self): return self.driver.check_for_setup_error() def local_path(self, volume): return self.driver.local_path(volume) def get_volume_stats(self, refresh=False): data = self.driver.get_volume_stats(refresh) data["storage_protocol"] = 'iSCSI' return data def create_volume(self, volume): self.driver.create_volume(volume) # Volume created successfully. Fill in CHAP information. model_update = {} chap = self.driver.find_target_chap() if chap['chap_user'] != '': model_update['provider_auth'] = 'CHAP %s %s' % \ (chap['chap_user'], chap['chap_passwd']) else: model_update['provider_auth'] = '' return model_update def create_cloned_volume(self, volume, src_vref): return self.driver.create_cloned_volume(volume, src_vref) def create_volume_from_snapshot(self, volume, snapshot): return self.driver.create_volume_from_snapshot(volume, snapshot) def delete_volume(self, volume): return self.driver.delete_volume(volume) def extend_volume(self, volume, new_size): return self.driver.extend_volume(volume, new_size) def retype(self, ctxt, volume, new_type, diff, host): return self.driver.retype(ctxt, volume, new_type, diff, host) def manage_existing(self, volume, ise_volume_ref): ret = self.driver.manage_existing(volume, ise_volume_ref) if ret is True: # Volume converted successfully. Fill in CHAP information. model_update = {} chap = {} chap = self.driver.find_target_chap() if chap['chap_user'] != '': model_update['provider_auth'] = 'CHAP %s %s' % \ (chap['chap_user'], chap['chap_passwd']) else: model_update['provider_auth'] = '' return model_update def manage_existing_get_size(self, volume, ise_volume_ref): return self.driver.manage_existing_get_size(volume, ise_volume_ref) def unmanage(self, volume): return self.driver.unmanage(volume) def initialize_connection(self, volume, connector): hostname = '' if 'host' in connector: hostname = connector['host'] data = self.driver.ise_present(volume, hostname, connector['initiator']) # find IP for target data['target_portal'] = \ '%s:3260' % (self.configuration.iscsi_ip_address) # set IQN for target data['target_discovered'] = False data['target_iqn'] = \ self.driver.find_target_iqn(self.configuration.iscsi_ip_address) # Fill in authentication method (CHAP) if 'provider_auth' in volume: auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() data['auth_method'] = auth_method data['auth_username'] = auth_username data['auth_password'] = auth_secret return {'driver_volume_type': 'iscsi', 'data': data} def terminate_connection(self, volume, connector, **kwargs): hostname = self.driver.ise_unpresent(volume, connector['initiator']) alloc_cnt = 0 if hostname != '': alloc_cnt = self.driver.find_allocations(hostname) if alloc_cnt == 0: # delete host object self.driver.delete_host(connector['initiator']) def create_snapshot(self, snapshot): return self.driver.create_snapshot(snapshot) def delete_snapshot(self, snapshot): return self.driver.delete_snapshot(snapshot) def create_export(self, context, volume, connector): return self.driver.create_export(context, volume) def ensure_export(self, context, volume): return self.driver.ensure_export(context, volume) def remove_export(self, context, volume): return self.driver.remove_export(context, volume) class XIOISEFCDriver(driver.FibreChannelDriver): """Requires ISE Running FW version 2.8.0 or higher""" def __init__(self, *args, **kwargs): super(XIOISEFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(XIO_OPTS) self.configuration.append_config_values(san.san_opts) self.driver = XIOISEDriver(configuration=self.configuration) def do_setup(self, context): return self.driver.do_setup(context) def check_for_setup_error(self): return self.driver.check_for_setup_error() def local_path(self, volume): return self.driver.local_path(volume) def get_volume_stats(self, refresh=False): data = self.driver.get_volume_stats(refresh) data["storage_protocol"] = 'fibre_channel' return data def create_volume(self, volume): return self.driver.create_volume(volume) def create_cloned_volume(self, volume, src_vref): return self.driver.create_cloned_volume(volume, src_vref) def create_volume_from_snapshot(self, volume, snapshot): return self.driver.create_volume_from_snapshot(volume, snapshot) def delete_volume(self, volume): return self.driver.delete_volume(volume) def extend_volume(self, volume, new_size): return self.driver.extend_volume(volume, new_size) def retype(self, ctxt, volume, new_type, diff, host): return self.driver.retype(ctxt, volume, new_type, diff, host) def manage_existing(self, volume, ise_volume_ref): return self.driver.manage_existing(volume, ise_volume_ref) def manage_existing_get_size(self, volume, ise_volume_ref): return self.driver.manage_existing_get_size(volume, ise_volume_ref) def unmanage(self, volume): return self.driver.unmanage(volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): hostname = '' if 'host' in connector: hostname = connector['host'] data = self.driver.ise_present(volume, hostname, connector['wwpns']) data['target_discovered'] = True # set wwns for target target_wwns = self.driver.find_target_wwns() data['target_wwn'] = target_wwns # build target initiator map target_map = {} for initiator in connector['wwpns']: target_map[initiator] = target_wwns data['initiator_target_map'] = target_map return {'driver_volume_type': 'fibre_channel', 'data': data} @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): # now we are ready to tell ISE to delete presentations hostname = self.driver.ise_unpresent(volume, connector['wwpns']) # set target_wwn and initiator_target_map only if host # has no more presentations data = {} alloc_cnt = 0 if hostname != '': alloc_cnt = self.driver.find_allocations(hostname) if alloc_cnt == 0: target_wwns = self.driver.find_target_wwns() data['target_wwn'] = target_wwns # build target initiator map target_map = {} for initiator in connector['wwpns']: target_map[initiator] = target_wwns data['initiator_target_map'] = target_map # delete host object self.driver.delete_host(connector['wwpns']) return {'driver_volume_type': 'fibre_channel', 'data': data} def create_snapshot(self, snapshot): return self.driver.create_snapshot(snapshot) def delete_snapshot(self, snapshot): return self.driver.delete_snapshot(snapshot) def create_export(self, context, volume, connector): return self.driver.create_export(context, volume) def ensure_export(self, context, volume): return self.driver.ensure_export(context, volume) def remove_export(self, context, volume): return self.driver.remove_export(context, volume) cinder-8.0.0/cinder/volume/drivers/glusterfs.py0000664000567000056710000004204112701406250022743 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import stat from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver from cinder.volume.drivers import remotefs as remotefs_drv LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('glusterfs_shares_config', default='/etc/cinder/glusterfs_shares', help='File with the list of available gluster shares'), cfg.StrOpt('glusterfs_mount_point_base', default='$state_path/mnt', help='Base dir containing mount points for gluster shares.'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.ExtendVD): """Gluster based cinder driver. Creates file on Gluster share for using it as block device on hypervisor. Operations such as create/delete/extend volume/snapshot use locking on a per-process basis to prevent multiple threads from modifying qcow2 chains or the snapshot .info file simultaneously. """ driver_volume_type = 'glusterfs' driver_prefix = 'glusterfs' volume_backend_name = 'GlusterFS' VERSION = '1.3.0' def __init__(self, execute=processutils.execute, *args, **kwargs): self._remotefsclient = None super(GlusterfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) root_helper = utils.get_root_helper() self.base = getattr(self.configuration, 'glusterfs_mount_point_base', CONF.glusterfs_mount_point_base) self._remotefsclient = remotefs_brick.RemoteFsClient( 'glusterfs', root_helper, execute, glusterfs_mount_point_base=self.base) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(GlusterfsDriver, self).do_setup(context) config = self.configuration.glusterfs_shares_config if not config: msg = (_("There's no Gluster config file configured (%s)") % 'glusterfs_shares_config') LOG.warning(msg) raise exception.GlusterfsException(msg) if not os.path.exists(config): msg = (_("Gluster config file at %(config)s doesn't exist") % {'config': config}) LOG.warning(msg) raise exception.GlusterfsException(msg) self.shares = {} try: self._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed')) else: raise self._refresh_mounts() def _unmount_shares(self): self._load_shares_config(self.configuration.glusterfs_shares_config) for share in self.shares.keys(): try: self._do_umount(True, share) except Exception as exc: LOG.warning(_LW('Exception during unmounting %s'), exc) def _do_umount(self, ignore_not_mounted, share): mount_path = self._get_mount_point_for_share(share) command = ['umount', mount_path] try: self._execute(*command, run_as_root=True) except processutils.ProcessExecutionError as exc: if ignore_not_mounted and 'not mounted' in exc.stderr: LOG.info(_LI("%s is already umounted"), share) else: LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"), {'share': share, 'stderr': exc.stderr}) raise def _refresh_mounts(self): try: self._unmount_shares() except processutils.ProcessExecutionError as exc: if 'target is busy' in exc.stderr: LOG.warning(_LW("Failed to refresh mounts, reason=%s"), exc.stderr) else: raise self._ensure_shares_mounted() def _qemu_img_info(self, path, volume_name): return super(GlusterfsDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.glusterfs_mount_point_base) def check_for_setup_error(self): """Just to override parent behavior.""" pass def _local_volume_dir(self, volume): hashed = self._get_hash_str(volume['provider_location']) path = '%s/%s' % (self.configuration.glusterfs_mount_point_base, hashed) return path def _active_volume_path(self, volume): volume_dir = self._local_volume_dir(volume) path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) return path def _update_volume_stats(self): """Retrieve stats info from volume group.""" super(GlusterfsDriver, self)._update_volume_stats() data = self._stats global_capacity = data['total_capacity_gb'] global_free = data['free_capacity_gb'] thin_enabled = self.configuration.nas_volume_prov_type == 'thin' if thin_enabled: provisioned_capacity = self._get_provisioned_capacity() else: provisioned_capacity = round(global_capacity - global_free, 2) data['provisioned_capacity_gb'] = provisioned_capacity data['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) data['thin_provisioning_support'] = thin_enabled data['thick_provisioning_support'] = not thin_enabled self._stats = data @remotefs_drv.locked_volume_id_operation def create_volume(self, volume): """Creates a volume.""" self._ensure_shares_mounted() volume['provider_location'] = self._find_share(volume['size']) LOG.info(_LI('casted to %s'), volume['provider_location']) self._do_create_volume(volume) return {'provider_location': volume['provider_location']} def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ LOG.debug("snapshot: %(snap)s, volume: %(vol)s, " "volume_size: %(size)s", {'snap': snapshot['id'], 'vol': volume['id'], 'size': volume_size}) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) vol_path = self._local_volume_dir(snapshot['volume']) forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_path, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot['volume']['name']) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) path_to_new_vol = self._local_path_volume(volume) LOG.debug("will copy from snapshot at %s", path_to_snap_img) if self.configuration.nas_volume_prov_type == 'thin': out_format = 'qcow2' else: out_format = 'raw' image_utils.convert_image(path_to_snap_img, path_to_new_vol, out_format) self._set_rw_permissions_for_all(path_to_new_vol) @remotefs_drv.locked_volume_id_operation def delete_volume(self, volume): """Deletes a logical volume.""" if not volume['provider_location']: LOG.warning(_LW('Volume %s does not have ' 'provider_location specified, ' 'skipping'), volume['name']) return self._ensure_share_mounted(volume['provider_location']) mounted_path = self._active_volume_path(volume) self._execute('rm', '-f', mounted_path, run_as_root=True) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path) def _get_matching_backing_file(self, backing_chain, snapshot_file): return next(f for f in backing_chain if f.get('backing-filename', '') == snapshot_file) def ensure_export(self, ctx, volume): """Synchronously recreates an export for a logical volume.""" self._ensure_share_mounted(volume['provider_location']) def create_export(self, ctx, volume, connector): """Exports the volume.""" pass def remove_export(self, ctx, volume): """Removes an export for a logical volume.""" pass def validate_connector(self, connector): pass @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" # Find active qcow2 file active_file = self.get_active_image_from_info(volume) path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base, self._get_hash_str(volume['provider_location']), active_file) data = {'export': volume['provider_location'], 'name': active_file} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] # Test file for raw vs. qcow2 format info = self._qemu_img_info(path, volume['name']) data['format'] = info.file_format if data['format'] not in ['raw', 'qcow2']: msg = _('%s must be a valid raw or qcow2 image.') % path raise exception.InvalidVolume(msg) return { 'driver_volume_type': 'glusterfs', 'data': data, 'mount_point_base': self._get_mount_point_base() } def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" pass @remotefs_drv.locked_volume_id_operation def extend_volume(self, volume, size_gb): volume_path = self._active_volume_path(volume) info = self._qemu_img_info(volume_path, volume['name']) backing_fmt = info.file_format if backing_fmt not in ['raw', 'qcow2']: msg = _('Unrecognized backing format: %s') raise exception.InvalidVolume(msg % backing_fmt) # qemu-img can resize both raw and qcow2 files image_utils.resize_image(volume_path, size_gb) def _do_create_volume(self, volume): """Create a volume on given glusterfs_share. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume['size'] LOG.debug("creating new volume at %s", volume_path) if os.path.exists(volume_path): msg = _('file already exists at %s') % volume_path LOG.error(msg) raise exception.InvalidVolume(reason=msg) if self.configuration.nas_volume_prov_type == 'thin': self._create_qcow2_file(volume_path, volume_size) else: try: self._fallocate(volume_path, volume_size) except processutils.ProcessExecutionError as exc: if 'Operation not supported' in exc.stderr: LOG.warning(_LW('Fallocate not supported by current ' 'version of glusterfs. So falling ' 'back to dd.')) self._create_regular_file(volume_path, volume_size) else: fileutils.delete_if_exists(volume_path) raise self._set_rw_permissions_for_all(volume_path) def _ensure_shares_mounted(self): """Mount all configured GlusterFS shares.""" self._mounted_shares = [] self._load_shares_config(self.configuration.glusterfs_shares_config) for share in self.shares.keys(): try: self._ensure_share_mounted(share) self._mounted_shares.append(share) except Exception as exc: LOG.error(_LE('Exception during mounting %s'), exc) LOG.debug('Available shares: %s', self._mounted_shares) def _ensure_share_mounted(self, glusterfs_share): """Mount GlusterFS share. :param glusterfs_share: string """ mount_path = self._get_mount_point_for_share(glusterfs_share) self._mount_glusterfs(glusterfs_share) # Ensure we can write to this share group_id = os.getegid() current_group_id = utils.get_file_gid(mount_path) current_mode = utils.get_file_mode(mount_path) if group_id != current_group_id: cmd = ['chgrp', group_id, mount_path] self._execute(*cmd, run_as_root=True) if not (current_mode & stat.S_IWGRP): cmd = ['chmod', 'g+w', mount_path] self._execute(*cmd, run_as_root=True) self._ensure_share_writable(mount_path) def _find_share(self, volume_size_for): """Choose GlusterFS share among available ones for given volume size. Current implementation looks for greatest capacity. :param volume_size_for: int size in GB """ if not self._mounted_shares: raise exception.GlusterfsNoSharesMounted() greatest_size = 0 greatest_share = None for glusterfs_share in self._mounted_shares: capacity = self._get_available_capacity(glusterfs_share)[0] if capacity > greatest_size: greatest_share = glusterfs_share greatest_size = capacity if volume_size_for * units.Gi > greatest_size: raise exception.GlusterfsNoSuitableShareFound( volume_size=volume_size_for) return greatest_share def _mount_glusterfs(self, glusterfs_share): """Mount GlusterFS share to mount path.""" mnt_flags = [] if self.shares.get(glusterfs_share) is not None: mnt_flags = self.shares[glusterfs_share].split() try: self._remotefsclient.mount(glusterfs_share, mnt_flags) except processutils.ProcessExecutionError: LOG.error(_LE("Mount failure for %(share)s."), {'share': glusterfs_share}) raise def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume. Allow a backup to occur only if no snapshots exist. Check both Cinder and the file on-disk. The latter is only a safety mechanism to prevent further damage if the snapshot information is already inconsistent. """ snapshots = self.db.snapshot_get_all_for_volume(context, backup['volume_id']) snap_error_msg = _('Backup is not supported for GlusterFS ' 'volumes with snapshots.') if len(snapshots) > 0: raise exception.InvalidVolume(snap_error_msg) volume = self.db.volume_get(context, backup['volume_id']) active_file_path = self._active_volume_path(volume) info = self._qemu_img_info(active_file_path, volume['name']) if info.backing_file is not None: LOG.error(_LE('No snapshots found in database, but %(path)s has ' 'backing file %(backing_file)s!'), {'path': active_file_path, 'backing_file': info.backing_file}) raise exception.InvalidVolume(snap_error_msg) if info.file_format != 'raw': msg = _('Backup is only supported for raw-formatted ' 'GlusterFS volumes.') raise exception.InvalidVolume(msg) return super(GlusterfsDriver, self).backup_volume( context, backup, backup_service) cinder-8.0.0/cinder/volume/drivers/smbfs.py0000664000567000056710000006607712701406250022056 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import decorator import inspect import json import os from os_brick.remotefs import remotefs from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume.drivers import remotefs as remotefs_drv VERSION = '1.1.0' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('smbfs_shares_config', default='/etc/cinder/smbfs_shares', help='File with the list of available smbfs shares.'), cfg.StrOpt('smbfs_allocation_info_file_path', default='$state_path/allocation_data', help=('The path of the automatically generated file containing ' 'information about volume disk space allocation.')), cfg.StrOpt('smbfs_default_volume_format', default='qcow2', choices=['raw', 'qcow2', 'vhd', 'vhdx'], help=('Default format that will be used when creating volumes ' 'if no volume format is specified.')), cfg.BoolOpt('smbfs_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space ' 'rather than regular files when using raw format, ' 'in which case volume creation takes lot of time.')), cfg.FloatOpt('smbfs_used_ratio', default=0.95, help=('Percent of ACTUAL usage of the underlying volume ' 'before no new volumes can be allocated to the volume ' 'destination.')), cfg.FloatOpt('smbfs_oversub_ratio', default=1.0, help=('This will compare the allocated to available space on ' 'the volume destination. If the ratio exceeds this ' 'number, the destination will no longer be valid.')), cfg.StrOpt('smbfs_mount_point_base', default='$state_path/mnt', help=('Base dir containing mount points for smbfs shares.')), cfg.StrOpt('smbfs_mount_options', default='noperm,file_mode=0775,dir_mode=0775', help=('Mount options passed to the smbfs client. See ' 'mount.cifs man page for details.')), ] CONF = cfg.CONF CONF.register_opts(volume_opts) def update_allocation_data(delete=False): @decorator.decorator def wrapper(func, inst, *args, **kwargs): ret_val = func(inst, *args, **kwargs) call_args = inspect.getcallargs(func, inst, *args, **kwargs) volume = call_args['volume'] requested_size = call_args.get('size_gb', None) if delete: allocated_size_gb = None else: allocated_size_gb = requested_size or volume['size'] inst.update_disk_allocation_data(volume, allocated_size_gb) return ret_val return wrapper class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver): """SMBFS based cinder volume driver.""" driver_volume_type = 'smbfs' driver_prefix = 'smbfs' volume_backend_name = 'Generic_SMBFS' SHARE_FORMAT_REGEX = r'//.+/.+' VERSION = VERSION _MINIMUM_QEMU_IMG_VERSION = '1.7' _DISK_FORMAT_VHD = 'vhd' _DISK_FORMAT_VHD_LEGACY = 'vpc' _DISK_FORMAT_VHDX = 'vhdx' _DISK_FORMAT_RAW = 'raw' _DISK_FORMAT_QCOW2 = 'qcow2' def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(SmbfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) root_helper = utils.get_root_helper() self.base = getattr(self.configuration, 'smbfs_mount_point_base') opts = getattr(self.configuration, 'smbfs_mount_options') self._remotefsclient = remotefs.RemoteFsClient( 'cifs', root_helper, execute=execute, smbfs_mount_point_base=self.base, smbfs_mount_options=opts) self.img_suffix = None self._alloc_info_file_path = CONF.smbfs_allocation_info_file_path def _qemu_img_info(self, path, volume_name): return super(SmbfsDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.smbfs_mount_point_base) @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ # Find active image active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) info = self._qemu_img_info(active_file_path, volume['name']) fmt = info.file_format data = {'export': volume['provider_location'], 'format': fmt, 'name': active_file} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } def do_setup(self, context): image_utils.check_qemu_img_version(self._MINIMUM_QEMU_IMG_VERSION) config = self.configuration.smbfs_shares_config if not config: msg = (_("SMBFS config file not set (smbfs_shares_config).")) LOG.error(msg) raise exception.SmbfsException(msg) if not os.path.exists(config): msg = (_("SMBFS config file at %(config)s doesn't exist.") % {'config': config}) LOG.error(msg) raise exception.SmbfsException(msg) if not os.path.isabs(self.base): msg = _("Invalid mount point base: %s") % self.base LOG.error(msg) raise exception.SmbfsException(msg) if not self.configuration.smbfs_oversub_ratio > 0: msg = _( "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: " "%s") % self.configuration.smbfs_oversub_ratio LOG.error(msg) raise exception.SmbfsException(msg) if not 0 < self.configuration.smbfs_used_ratio <= 1: msg = _("SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 " "and <= 1.0: %s") % self.configuration.smbfs_used_ratio LOG.error(msg) raise exception.SmbfsException(msg) self.shares = {} # address : options self._ensure_shares_mounted() self._setup_allocation_data() def _setup_allocation_data(self): if not os.path.exists(self._alloc_info_file_path): fileutils.ensure_tree( os.path.dirname(self._alloc_info_file_path)) self._allocation_data = {} self._update_allocation_data_file() else: with open(self._alloc_info_file_path, 'r') as f: self._allocation_data = json.load(f) def update_disk_allocation_data(self, volume, virtual_size_gb=None): volume_name = volume['name'] smbfs_share = volume['provider_location'] if smbfs_share: share_hash = self._get_hash_str(smbfs_share) else: return share_alloc_data = self._allocation_data.get(share_hash, {}) old_virtual_size = share_alloc_data.get(volume_name, 0) total_allocated = share_alloc_data.get('total_allocated', 0) if virtual_size_gb: share_alloc_data[volume_name] = virtual_size_gb total_allocated += virtual_size_gb - old_virtual_size elif share_alloc_data.get(volume_name): # The volume is deleted. del share_alloc_data[volume_name] total_allocated -= old_virtual_size share_alloc_data['total_allocated'] = total_allocated self._allocation_data[share_hash] = share_alloc_data self._update_allocation_data_file() def _update_allocation_data_file(self): with open(self._alloc_info_file_path, 'w') as f: json.dump(self._allocation_data, f) def _get_total_allocated(self, smbfs_share): share_hash = self._get_hash_str(smbfs_share) share_alloc_data = self._allocation_data.get(share_hash, {}) total_allocated = share_alloc_data.get('total_allocated', 0) << 30 return float(total_allocated) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ volume_path_template = self._get_local_volume_path_template(volume) volume_path = self._lookup_local_volume_path(volume_path_template) if volume_path: return volume_path # The image does not exist, so retrieve the volume format # in order to build the path. fmt = self.get_volume_format(volume) if fmt in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX): volume_path = volume_path_template + '.' + fmt else: volume_path = volume_path_template return volume_path def _get_local_volume_path_template(self, volume): local_dir = self._local_volume_dir(volume) local_path_template = os.path.join(local_dir, volume['name']) return local_path_template def _lookup_local_volume_path(self, volume_path_template): for ext in ['', self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX]: volume_path = (volume_path_template + '.' + ext if ext else volume_path_template) if os.path.exists(volume_path): return volume_path def _local_path_volume_info(self, volume): return '%s%s' % (self.local_path(volume), '.info') def _get_new_snap_path(self, snapshot): vol_path = self.local_path(snapshot['volume']) snap_path, ext = os.path.splitext(vol_path) snap_path += '.' + snapshot['id'] + ext return snap_path def get_volume_format(self, volume, qemu_format=False): volume_path_template = self._get_local_volume_path_template(volume) volume_path = self._lookup_local_volume_path(volume_path_template) if volume_path: info = self._qemu_img_info(volume_path, volume['name']) volume_format = info.file_format else: volume_format = ( self._get_volume_format_spec(volume) or self.configuration.smbfs_default_volume_format) if qemu_format and volume_format == self._DISK_FORMAT_VHD: volume_format = self._DISK_FORMAT_VHD_LEGACY elif volume_format == self._DISK_FORMAT_VHD_LEGACY: volume_format = self._DISK_FORMAT_VHD return volume_format @remotefs_drv.locked_volume_id_operation @update_allocation_data(delete=True) def delete_volume(self, volume): """Deletes a logical volume.""" if not volume['provider_location']: LOG.warning(_LW('Volume %s does not have provider_location ' 'specified, skipping.'), volume['name']) return self._ensure_share_mounted(volume['provider_location']) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) if os.path.exists(mounted_path): self._delete(mounted_path) else: LOG.debug("Skipping deletion of volume %s as it does not exist.", mounted_path) info_path = self._local_path_volume_info(volume) self._delete(info_path) def _create_windows_image(self, volume_path, volume_size, volume_format): """Creates a VHD or VHDX file of a given size.""" # vhd is regarded as vpc by qemu if volume_format == self._DISK_FORMAT_VHD: volume_format = self._DISK_FORMAT_VHD_LEGACY self._execute('qemu-img', 'create', '-f', volume_format, volume_path, str(volume_size * units.Gi), run_as_root=True) @remotefs_drv.locked_volume_id_operation @update_allocation_data() def create_volume(self, volume): return super(SmbfsDriver, self).create_volume(volume) def _do_create_volume(self, volume): """Create a volume on given smbfs_share. :param volume: volume reference """ volume_format = self.get_volume_format(volume) volume_path = self.local_path(volume) volume_size = volume['size'] LOG.debug("Creating new volume at %s.", volume_path) if os.path.exists(volume_path): msg = _('File already exists at %s.') % volume_path LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX): self._create_windows_image(volume_path, volume_size, volume_format) else: self.img_suffix = None if volume_format == self._DISK_FORMAT_QCOW2: self._create_qcow2_file(volume_path, volume_size) elif self.configuration.smbfs_sparsed_volumes: self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) self._set_rw_permissions_for_all(volume_path) def _get_capacity_info(self, smbfs_share): """Calculate available space on the SMBFS share. :param smbfs_share: example //172.18.194.100/share """ mount_point = self._get_mount_point_for_share(smbfs_share) df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point, run_as_root=True) block_size, blocks_total, blocks_avail = map(float, df.split()) total_available = block_size * blocks_avail total_size = block_size * blocks_total total_allocated = self._get_total_allocated(smbfs_share) return total_size, total_available, total_allocated def _find_share(self, volume_size_in_gib): """Choose SMBFS share among available ones for given volume size. For instances with more than one share that meets the criteria, the share with the least "allocated" space will be selected. :param volume_size_in_gib: int size in GB """ if not self._mounted_shares: raise exception.SmbfsNoSharesMounted() target_share = None target_share_reserved = 0 for smbfs_share in self._mounted_shares: if not self._is_share_eligible(smbfs_share, volume_size_in_gib): continue total_allocated = self._get_total_allocated(smbfs_share) if target_share is not None: if target_share_reserved > total_allocated: target_share = smbfs_share target_share_reserved = total_allocated else: target_share = smbfs_share target_share_reserved = total_allocated if target_share is None: raise exception.SmbfsNoSuitableShareFound( volume_size=volume_size_in_gib) LOG.debug('Selected %s as target smbfs share.', target_share) return target_share def _is_share_eligible(self, smbfs_share, volume_size_in_gib): """Verifies SMBFS share is eligible to host volume with given size. First validation step: ratio of actual space (used_space / total_space) is less than 'smbfs_used_ratio'. Second validation step: apparent space allocated (differs from actual space used when using sparse files) and compares the apparent available space (total_available * smbfs_oversub_ratio) to ensure enough space is available for the new volume. :param smbfs_share: smbfs share :param volume_size_in_gib: int size in GB """ used_ratio = self.configuration.smbfs_used_ratio oversub_ratio = self.configuration.smbfs_oversub_ratio requested_volume_size = volume_size_in_gib * units.Gi total_size, total_available, total_allocated = \ self._get_capacity_info(smbfs_share) apparent_size = max(0, total_size * oversub_ratio) apparent_available = max(0, apparent_size - total_allocated) used = (total_size - total_available) / total_size if used > used_ratio: LOG.debug('%s is above smbfs_used_ratio.', smbfs_share) return False if apparent_available <= requested_volume_size: LOG.debug('%s is above smbfs_oversub_ratio.', smbfs_share) return False if total_allocated / total_size >= oversub_ratio: LOG.debug('%s reserved space is above smbfs_oversub_ratio.', smbfs_share) return False return True def _create_snapshot_online(self, snapshot, backing_filename, new_snap_path): msg = _("This driver does not support snapshotting in-use volumes.") raise exception.SmbfsException(msg) def _delete_snapshot_online(self, context, snapshot, info): msg = _("This driver does not support deleting in-use snapshots.") raise exception.SmbfsException(msg) def _do_create_snapshot(self, snapshot, backing_filename, new_snap_path): self._check_snapshot_support(snapshot) super(SmbfsDriver, self)._do_create_snapshot( snapshot, backing_filename, new_snap_path) def _check_snapshot_support(self, snapshot): volume_format = self.get_volume_format(snapshot['volume']) # qemu-img does not yet support differencing vhd/vhdx if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX): err_msg = _("Snapshots are not supported for this volume " "format: %s") % volume_format raise exception.InvalidVolume(err_msg) @remotefs_drv.locked_volume_id_operation @update_allocation_data() def extend_volume(self, volume, size_gb): LOG.info(_LI('Extending volume %s.'), volume['id']) self._extend_volume(volume, size_gb) def _extend_volume(self, volume, size_gb): volume_path = self.local_path(volume) self._check_extend_volume_support(volume, size_gb) LOG.info(_LI('Resizing file to %sG...'), size_gb) self._do_extend_volume(volume_path, size_gb, volume['name']) def _do_extend_volume(self, volume_path, size_gb, volume_name): info = self._qemu_img_info(volume_path, volume_name) fmt = info.file_format # Note(lpetrut): as for version 2.0, qemu-img cannot resize # vhd/x images. For the moment, we'll just use an intermediary # conversion in order to be able to do the resize. if fmt in (self._DISK_FORMAT_VHDX, self._DISK_FORMAT_VHD_LEGACY): temp_image = volume_path + '.tmp' image_utils.convert_image(volume_path, temp_image, self._DISK_FORMAT_RAW) image_utils.resize_image(temp_image, size_gb) image_utils.convert_image(temp_image, volume_path, fmt) self._delete(temp_image) else: image_utils.resize_image(volume_path, size_gb) if not self._is_file_size_equal(volume_path, size_gb): raise exception.ExtendVolumeError( reason='Resizing image file failed.') def _check_extend_volume_support(self, volume, size_gb): volume_path = self.local_path(volume) active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) if active_file_path != volume_path: msg = _('Extend volume is only supported for this ' 'driver when no snapshots exist.') raise exception.InvalidVolume(msg) extend_by = int(size_gb) - volume['size'] if not self._is_share_eligible(volume['provider_location'], extend_by): raise exception.ExtendVolumeError(reason='Insufficient space to ' 'extend volume %s to %sG.' % (volume['id'], size_gb)) @remotefs_drv.locked_volume_id_operation @update_allocation_data() def create_volume_from_snapshot(self, volume, snapshot): return self._create_volume_from_snapshot(volume, snapshot) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, " "volume_size: %(size)s", {'snap': snapshot['id'], 'vol': volume['id'], 'size': volume_size}) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) vol_dir = self._local_volume_dir(snapshot['volume']) out_format = self.get_volume_format(volume, qemu_format=True) forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_dir, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot['volume']['name']) path_to_snap_img = os.path.join(vol_dir, img_info.backing_file) LOG.debug("Will copy from snapshot at %s", path_to_snap_img) image_utils.convert_image(path_to_snap_img, self.local_path(volume), out_format) self._extend_volume(volume, volume_size) self._set_rw_permissions_for_all(self.local_path(volume)) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" volume_format = self.get_volume_format(volume, qemu_format=True) image_utils.fetch_to_volume_format( context, image_service, image_id, self.local_path(volume), volume_format, self.configuration.volume_dd_blocksize) self._do_extend_volume(self.local_path(volume), volume['size'], volume['name']) data = image_utils.qemu_img_info(self.local_path(volume)) virt_size = data.virtual_size / units.Gi if virt_size != volume['size']: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("Expected volume size was %d") % volume['size']) + (_(" but size is now %d.") % virt_size)) @remotefs_drv.locked_volume_id_operation @update_allocation_data() def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" return self._create_cloned_volume(volume, src_vref) def _ensure_share_mounted(self, smbfs_share): mnt_flags = [] if self.shares.get(smbfs_share) is not None: mnt_flags = self.shares[smbfs_share] # The domain name must be removed from the # user name when using Samba. mnt_flags = self.parse_credentials(mnt_flags).split() self._remotefsclient.mount(smbfs_share, mnt_flags) def parse_options(self, option_str): opts_dict = {} opts_list = [] if option_str: for i in option_str.split(): if i == '-o': continue for j in i.split(','): tmp_opt = j.split('=') if len(tmp_opt) > 1: opts_dict[tmp_opt[0]] = tmp_opt[1] else: opts_list.append(tmp_opt[0]) return opts_list, opts_dict def parse_credentials(self, mnt_flags): options_list, options_dict = self.parse_options(mnt_flags) username = (options_dict.pop('user', None) or options_dict.pop('username', None)) if username: # Remove the Domain from the user name options_dict['username'] = username.split('\\')[-1] else: options_dict['username'] = 'guest' named_options = ','.join("%s=%s" % (key, val) for (key, val) in options_dict.items()) options_list = ','.join(options_list) flags = '-o ' + ','.join([named_options, options_list]) return flags.strip(',') def _get_volume_format_spec(self, volume): # This method needs to be able to parse metadata/volume type # specs for volume SQLAlchemy objects and versioned objects, # as the transition to versioned objects is not complete and the # driver may receive either of them. # # TODO(lpetrut): once the transition to oslo.versionedobjects is # complete, we can skip some of those checks. volume_metadata_specs = {} volume_type_specs = {} if volume.get('metadata') and isinstance(volume.metadata, dict): volume_metadata_specs.update(volume.metadata) elif volume.get('volume_metadata'): volume_metadata_specs.update( {spec.key: spec.value for spec in volume.volume_metadata}) vol_type = volume.get('volume_type') if vol_type: specs = vol_type.get('extra_specs') or {} if isinstance(specs, dict): volume_type_specs.update(specs) else: volume_type_specs.update( {spec.key: spec.value for spec in specs}) # In this case, we want the volume metadata specs to take # precedence over the volume type specs. for specs in [volume_metadata_specs, volume_type_specs]: for key, val in specs.items(): if 'volume_format' in key: return val return None def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path) virt_size = data.virtual_size / units.Gi return virt_size == size cinder-8.0.0/cinder/volume/drivers/pure.py0000664000567000056710000022410712701406250021705 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Pure Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Pure Storage FlashArray storage system. This driver requires Purity version 4.0.0 or later. """ import math import re import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils try: from purestorage import purestorage except ImportError: purestorage = None LOG = logging.getLogger(__name__) PURE_OPTS = [ cfg.StrOpt("pure_api_token", help="REST API authorization token."), cfg.BoolOpt("pure_automatic_max_oversubscription_ratio", default=True, help="Automatically determine an oversubscription ratio based " "on the current total data reduction values. If used " "this calculated value will override the " "max_over_subscription_ratio config option."), # These are used as default settings. In future these can be overridden # by settings in volume-type. cfg.IntOpt("pure_replica_interval_default", default=900, help="Snapshot replication interval in seconds."), cfg.IntOpt("pure_replica_retention_short_term_default", default=14400, help="Retain all snapshots on target for this " "time (in seconds.)"), cfg.IntOpt("pure_replica_retention_long_term_per_day_default", default=3, help="Retain how many snapshots for each day."), cfg.IntOpt("pure_replica_retention_long_term_default", default=7, help="Retain snapshots per day on target for this time " "(in days.)"), cfg.BoolOpt("pure_eradicate_on_delete", default=False, help="When enabled, all Pure volumes, snapshots, and " "protection groups will be eradicated at the time of " "deletion in Cinder. Data will NOT be recoverable after " "a delete with this set to True! When disabled, volumes " "and snapshots will go into pending eradication state " "and can be recovered." ) ] CONF = cfg.CONF CONF.register_opts(PURE_OPTS) INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]") GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$") REPLICATION_CG_NAME = "cinder-group" CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET" ERR_MSG_NOT_EXIST = "does not exist" ERR_MSG_NO_SUCH_SNAPSHOT = "No such volume or snapshot" ERR_MSG_PENDING_ERADICATION = "has been destroyed" ERR_MSG_ALREADY_EXISTS = "already exists" ERR_MSG_COULD_NOT_BE_FOUND = "could not be found" ERR_MSG_ALREADY_INCLUDES = "already includes" ERR_MSG_ALREADY_ALLOWED = "already allowed on" ERR_MSG_NOT_CONNECTED = "is not connected" ERR_MSG_ALREADY_BELONGS = "already belongs to" EXTRA_SPECS_REPL_ENABLED = "replication_enabled" CONNECT_LOCK_NAME = 'PureVolumeDriver_connect' UNMANAGED_SUFFIX = '-unmanaged' MANAGE_SNAP_REQUIRED_API_VERSIONS = ['1.4', '1.5'] REPLICATION_REQUIRED_API_VERSIONS = ['1.3', '1.4', '1.5'] REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL = 5 # 5 seconds REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36 # 36 * 5 = 180 seconds def pure_driver_debug_trace(f): """Log the method entrance and exit including active backend name. This should only be used on VolumeDriver class methods. It depends on having a 'self' argument that is a PureBaseVolumeDriver. """ def wrapper(*args, **kwargs): driver = args[0] # self cls_name = driver.__class__.__name__ method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name, "method": f.__name__} backend_name = driver._get_current_array()._backend_id LOG.debug("[%(backend_name)s] Enter %(method_name)s" % {"method_name": method_name, "backend_name": backend_name}) result = f(*args, **kwargs) LOG.debug("[%(backend_name)s] Leave %(method_name)s" % {"method_name": method_name, "backend_name": backend_name}) return result return wrapper class PureBaseVolumeDriver(san.SanDriver): """Performs volume management on Pure Storage FlashArray.""" SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4', '1.5'] def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureBaseVolumeDriver, self).__init__(execute=execute, *args, **kwargs) self.configuration.append_config_values(PURE_OPTS) self._array = None self._storage_protocol = None self._backend_name = (self.configuration.volume_backend_name or self.__class__.__name__) self._replication_target_arrays = [] self._replication_pg_name = REPLICATION_CG_NAME self._replication_interval = None self._replication_retention_short_term = None self._replication_retention_long_term = None self._replication_retention_long_term_per_day = None self._is_replication_enabled = False self._active_backend_id = kwargs.get('active_backend_id', None) self._failed_over_primary_array = None def parse_replication_configs(self): self._replication_interval = ( self.configuration.pure_replica_interval_default) self._replication_retention_short_term = ( self.configuration.pure_replica_retention_short_term_default) self._replication_retention_long_term = ( self.configuration.pure_replica_retention_long_term_default) self._replication_retention_long_term_per_day = ( self.configuration. pure_replica_retention_long_term_per_day_default) retention_policy = self._generate_replication_retention() replication_devices = self.configuration.safe_get( 'replication_device') primary_array = self._get_current_array() if replication_devices: for replication_device in replication_devices: backend_id = replication_device["backend_id"] san_ip = replication_device["san_ip"] api_token = replication_device["api_token"] verify_https = replication_device.get("ssl_cert_verify", False) ssl_cert_path = replication_device.get("ssl_cert_path", None) target_array = self._get_flasharray( san_ip, api_token, verify_https=verify_https, ssl_cert_path=ssl_cert_path ) target_array._backend_id = backend_id LOG.debug("Adding san_ip %(san_ip)s to replication_targets.", {"san_ip": san_ip}) api_version = target_array.get_rest_version() if api_version not in REPLICATION_REQUIRED_API_VERSIONS: msg = _('Unable to do replication with Purity REST ' 'API version %(api_version)s, requires one of ' '%(required_versions)s.') % { 'api_version': api_version, 'required_versions': REPLICATION_REQUIRED_API_VERSIONS } raise exception.PureDriverException(reason=msg) target_array_info = target_array.get() target_array.array_name = target_array_info["array_name"] target_array.array_id = target_array_info["id"] LOG.debug("secondary array name: %s", target_array.array_name) LOG.debug("secondary array id: %s", target_array.array_id) self._replication_target_arrays.append(target_array) self._setup_replicated_pgroups(primary_array, self._replication_target_arrays, self._replication_pg_name, self._replication_interval, retention_policy) def do_setup(self, context): """Performs driver initialization steps that could raise exceptions.""" if purestorage is None: msg = _("Missing 'purestorage' python module, ensure the library" " is installed and available.") raise exception.PureDriverException(msg) # Raises PureDriverException if unable to connect and PureHTTPError # if unable to authenticate. purestorage.FlashArray.supported_rest_versions = \ self.SUPPORTED_REST_API_VERSIONS self._array = self._get_flasharray( self.configuration.san_ip, api_token=self.configuration.pure_api_token, verify_https=self.configuration.driver_ssl_cert_verify, ssl_cert_path=self.configuration.driver_ssl_cert_path ) self._array._backend_id = self._backend_name LOG.debug("Primary array backend_id: %s", self.configuration.config_group) LOG.debug("Primary array name: %s", self._array.array_name) LOG.debug("Primary array id: %s", self._array.array_id) self.do_setup_replication() # If we have failed over at some point we need to adjust our current # array based on the one that we have failed over to if (self._active_backend_id is not None and self._active_backend_id != self._array._backend_id): for array in self._replication_target_arrays: if array._backend_id == self._active_backend_id: self._failed_over_primary_array = self._array self._array = array break def do_setup_replication(self): replication_devices = self.configuration.safe_get( 'replication_device') if replication_devices: self.parse_replication_configs() self._is_replication_enabled = True def check_for_setup_error(self): # Avoid inheriting check_for_setup_error from SanDriver, which checks # for san_password or san_private_key, not relevant to our driver. pass @pure_driver_debug_trace def create_volume(self, volume): """Creates a volume.""" vol_name = self._get_vol_name(volume) vol_size = volume["size"] * units.Gi current_array = self._get_current_array() current_array.create_volume(vol_name, vol_size) if volume['consistencygroup_id']: self._add_volume_to_consistency_group( volume['consistencygroup_id'], vol_name ) self._enable_replication_if_needed(current_array, volume) @pure_driver_debug_trace def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" vol_name = self._get_vol_name(volume) if snapshot['cgsnapshot_id']: snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot) else: snap_name = self._get_snap_name(snapshot) if not snap_name: msg = _('Unable to determine snapshot name in Purity for snapshot ' '%(id)s.') % {'id': snapshot['id']} raise exception.PureDriverException(reason=msg) current_array = self._get_current_array() current_array.copy_volume(snap_name, vol_name) self._extend_if_needed(current_array, vol_name, snapshot["volume_size"], volume["size"]) if volume['consistencygroup_id']: self._add_volume_to_consistency_group( volume['consistencygroup_id'], vol_name) self._enable_replication_if_needed(current_array, volume) def _enable_replication_if_needed(self, array, volume): if self._is_volume_replicated_type(volume): self._enable_replication(array, volume) def _enable_replication(self, array, volume): """Add volume to replicated protection group.""" try: array.set_pgroup(self._replication_pg_name, addvollist=[self._get_vol_name(volume)]) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_ALREADY_BELONGS in err.text): # Happens if the volume already added to PG. ctxt.reraise = False LOG.warning(_LW("Adding Volume to Protection Group " "failed with message: %s"), err.text) @pure_driver_debug_trace def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" vol_name = self._get_vol_name(volume) src_name = self._get_vol_name(src_vref) # Check which backend the source volume is on. In case of failover # the source volume may be on the secondary array. current_array = self._get_current_array() current_array.copy_volume(src_name, vol_name) self._extend_if_needed(current_array, vol_name, src_vref["size"], volume["size"]) if volume['consistencygroup_id']: self._add_volume_to_consistency_group( volume['consistencygroup_id'], vol_name) self._enable_replication_if_needed(current_array, volume) def _extend_if_needed(self, array, vol_name, src_size, vol_size): """Extend the volume from size src_size to size vol_size.""" if vol_size > src_size: vol_size = vol_size * units.Gi array.extend_volume(vol_name, vol_size) @pure_driver_debug_trace def delete_volume(self, volume): """Disconnect all hosts and delete the volume""" vol_name = self._get_vol_name(volume) current_array = self._get_current_array() try: connected_hosts = current_array.list_volume_private_connections( vol_name) for host_info in connected_hosts: host_name = host_info["host"] self._disconnect_host(current_array, host_name, vol_name) current_array.destroy_volume(vol_name) if self.configuration.pure_eradicate_on_delete: current_array.eradicate_volume(vol_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_NOT_EXIST in err.text): # Happens if the volume does not exist. ctxt.reraise = False LOG.warning(_LW("Volume deletion failed with message: %s"), err.text) @pure_driver_debug_trace def create_snapshot(self, snapshot): """Creates a snapshot.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() vol_name, snap_suff = self._get_snap_name(snapshot).split(".") current_array.create_snapshot(vol_name, suffix=snap_suff) @pure_driver_debug_trace def delete_snapshot(self, snapshot): """Deletes a snapshot.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() snap_name = self._get_snap_name(snapshot) try: current_array.destroy_volume(snap_name) if self.configuration.pure_eradicate_on_delete: current_array.eradicate_volume(snap_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ( ERR_MSG_NOT_EXIST in err.text or ERR_MSG_NO_SUCH_SNAPSHOT in err.text): # Happens if the snapshot does not exist. ctxt.reraise = False LOG.warning(_LW("Unable to delete snapshot, assuming " "already deleted. Error: %s"), err.text) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def initialize_connection(self, volume, connector, initiator_data=None): """Connect the volume to the specified initiator in Purity. This implementation is specific to the host type (iSCSI, FC, etc). """ raise NotImplementedError def _get_host(self, array, connector): """Get a Purity Host that corresponds to the host in the connector. This implementation is specific to the host type (iSCSI, FC, etc). """ raise NotImplementedError @utils.synchronized(CONNECT_LOCK_NAME, external=True) def _disconnect(self, array, volume, connector, **kwargs): vol_name = self._get_vol_name(volume) host = self._get_host(array, connector) if host: host_name = host["name"] result = self._disconnect_host(array, host_name, vol_name) else: LOG.error(_LE("Unable to disconnect host from volume, could not " "determine Purity host")) result = False return result @pure_driver_debug_trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() self._disconnect(current_array, volume, connector, **kwargs) @pure_driver_debug_trace def _disconnect_host(self, array, host_name, vol_name): """Return value indicates if host was deleted on array or not""" try: array.disconnect_host(host_name, vol_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ERR_MSG_NOT_CONNECTED in err.text: # Happens if the host and volume are not connected. ctxt.reraise = False LOG.error(_LE("Disconnection failed with message: " "%(msg)s."), {"msg": err.text}) if (GENERATED_NAME.match(host_name) and not array.list_host_connections(host_name, private=True)): LOG.info(_LI("Deleting unneeded host %(host_name)r."), {"host_name": host_name}) try: array.delete_host(host_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ERR_MSG_NOT_EXIST in err.text: # Happens if the host is already deleted. # This is fine though, just treat it as a warning. ctxt.reraise = False LOG.warning(_LW("Purity host deletion failed: " "%(msg)s."), {"msg": err.text}) return True return False @pure_driver_debug_trace def get_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. """ if refresh: LOG.debug("Updating volume stats.") self._update_stats() return self._stats def _update_stats(self): """Set self._stats with relevant information.""" current_array = self._get_current_array() # Collect info from the array space_info = current_array.get(space=True) perf_info = current_array.get(action='monitor')[0] # Always index 0 hosts = current_array.list_hosts() snaps = current_array.list_volumes(snap=True, pending=True) pgroups = current_array.list_pgroups(pending=True) # Perform some translations and calculations total_capacity = float(space_info["capacity"]) / units.Gi used_space = float(space_info["total"]) / units.Gi free_space = float(total_capacity - used_space) prov_space, total_vols = self._get_provisioned_space() total_hosts = len(hosts) total_snaps = len(snaps) total_pgroups = len(pgroups) provisioned_space = float(prov_space) / units.Gi thin_provisioning = self._get_thin_provisioning(provisioned_space, used_space) # Start with some required info data = dict( volume_backend_name=self._backend_name, vendor_name='Pure Storage', driver_version=self.VERSION, storage_protocol=self._storage_protocol, ) # Add flags for supported features data['consistencygroup_support'] = True data['thin_provisioning_support'] = True data['multiattach'] = True # Add capacity info for scheduler data['total_capacity_gb'] = total_capacity data['free_capacity_gb'] = free_space data['reserved_percentage'] = self.configuration.reserved_percentage data['provisioned_capacity'] = provisioned_space data['max_over_subscription_ratio'] = thin_provisioning # Add the filtering/goodness functions data['filter_function'] = self.get_filter_function() data['goodness_function'] = self.get_goodness_function() # Add array metadata counts for filtering and weighing functions data['total_volumes'] = total_vols data['total_snapshots'] = total_snaps data['total_hosts'] = total_hosts data['total_pgroups'] = total_pgroups # Add performance stats for filtering and weighing functions # IOPS data['writes_per_sec'] = perf_info['writes_per_sec'] data['reads_per_sec'] = perf_info['reads_per_sec'] # Bandwidth data['input_per_sec'] = perf_info['input_per_sec'] data['output_per_sec'] = perf_info['output_per_sec'] # Latency data['usec_per_read_op'] = perf_info['usec_per_read_op'] data['usec_per_write_op'] = perf_info['usec_per_write_op'] data['queue_depth'] = perf_info['queue_depth'] # Replication data["replication_enabled"] = self._is_replication_enabled data["replication_type"] = ["async"] data["replication_count"] = len(self._replication_target_arrays) data["replication_targets"] = [array._backend_id for array in self._replication_target_arrays] self._stats = data def _get_provisioned_space(self): """Sum up provisioned size of all volumes on array""" volumes = self._get_current_array().list_volumes(pending=True) return sum(item["size"] for item in volumes), len(volumes) def _get_thin_provisioning(self, provisioned_space, used_space): """Get the current value for the thin provisioning ratio. If pure_automatic_max_oversubscription_ratio is True we will calculate a value, if not we will respect the configuration option for the max_over_subscription_ratio. """ if (self.configuration.pure_automatic_max_oversubscription_ratio and used_space != 0 and provisioned_space != 0): # If array is empty we can not calculate a max oversubscription # ratio. In this case we look to the config option as a starting # point. Once some volumes are actually created and some data is # stored on the array a much more accurate number will be # presented based on current usage. thin_provisioning = provisioned_space / used_space else: thin_provisioning = self.configuration.max_over_subscription_ratio return thin_provisioning @pure_driver_debug_trace def extend_volume(self, volume, new_size): """Extend volume to new_size.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() vol_name = self._get_vol_name(volume) new_size = new_size * units.Gi current_array.extend_volume(vol_name, new_size) def _add_volume_to_consistency_group(self, consistencygroup_id, vol_name): pgroup_name = self._get_pgroup_name_from_id(consistencygroup_id) current_array = self._get_current_array() current_array.set_pgroup(pgroup_name, addvollist=[vol_name]) @pure_driver_debug_trace def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" current_array = self._get_current_array() current_array.create_pgroup(self._get_pgroup_name_from_id(group.id)) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def _create_cg_from_cgsnap(self, volumes, snapshots): """Creates a new consistency group from a cgsnapshot. The new volumes will be consistent with the snapshot. """ for volume, snapshot in zip(volumes, snapshots): self.create_volume_from_snapshot(volume, snapshot) def _create_cg_from_cg(self, group, source_group, volumes, source_vols): """Creates a new consistency group from an existing cg. The new volumes will be in a consistent state, but this requires taking a new temporary group snapshot and cloning from that. """ pgroup_name = self._get_pgroup_name_from_id(source_group.id) tmp_suffix = '%s-tmp' % uuid.uuid4() tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % { 'pgroup_name': pgroup_name, 'pgsnap_suffix': tmp_suffix, } LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s ' 'while cloning Consistency Group %(source_group)s.', {'snap_name': tmp_pgsnap_name, 'source_group': source_group.id}) current_array = self._get_current_array() current_array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix) try: for source_vol, cloned_vol in zip(source_vols, volumes): source_snap_name = self._get_pgroup_vol_snap_name( pgroup_name, tmp_suffix, self._get_vol_name(source_vol) ) cloned_vol_name = self._get_vol_name(cloned_vol) current_array.copy_volume(source_snap_name, cloned_vol_name) self._add_volume_to_consistency_group( group.id, cloned_vol_name ) finally: self._delete_pgsnapshot(tmp_pgsnap_name) @pure_driver_debug_trace def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): self.create_consistencygroup(context, group) if cgsnapshot and snapshots: self._create_cg_from_cgsnap(volumes, snapshots) elif source_cg: self._create_cg_from_cg(group, source_cg, volumes, source_vols) return_volumes = [] for volume in volumes: return_volume = {'id': volume.id, 'status': 'available'} return_volumes.append(return_volume) model_update = {'status': 'available'} return model_update, return_volumes @pure_driver_debug_trace def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" try: pgroup_name = self._get_pgroup_name_from_id(group.id) current_array = self._get_current_array() current_array.destroy_pgroup(pgroup_name) if self.configuration.pure_eradicate_on_delete: current_array.eradicate_pgroup(pgroup_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and (ERR_MSG_PENDING_ERADICATION in err.text or ERR_MSG_NOT_EXIST in err.text)): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False LOG.warning(_LW("Unable to delete Protection Group: %s"), err.text) volume_updates = [] for volume in volumes: self.delete_volume(volume) volume_updates.append({ 'id': volume.id, 'status': 'deleted' }) model_update = {'status': group['status']} return model_update, volume_updates @pure_driver_debug_trace def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): pgroup_name = self._get_pgroup_name_from_id(group.id) if add_volumes: addvollist = [self._get_vol_name(vol) for vol in add_volumes] else: addvollist = [] if remove_volumes: remvollist = [self._get_vol_name(vol) for vol in remove_volumes] else: remvollist = [] current_array = self._get_current_array() current_array.set_pgroup(pgroup_name, addvollist=addvollist, remvollist=remvollist) return None, None, None @pure_driver_debug_trace def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" cg_id = cgsnapshot.consistencygroup_id pgroup_name = self._get_pgroup_name_from_id(cg_id) pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot) current_array = self._get_current_array() current_array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix) snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append({ 'id': snapshot.id, 'status': 'available' }) model_update = {'status': 'available'} return model_update, snapshot_updates def _delete_pgsnapshot(self, pgsnap_name): current_array = self._get_current_array() try: # FlashArray.destroy_pgroup is also used for deleting # pgroup snapshots. The underlying REST API is identical. current_array.destroy_pgroup(pgsnap_name) if self.configuration.pure_eradicate_on_delete: current_array.eradicate_pgroup(pgsnap_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and (ERR_MSG_PENDING_ERADICATION in err.text or ERR_MSG_NOT_EXIST in err.text)): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False LOG.warning(_LW("Unable to delete Protection Group " "Snapshot: %s"), err.text) @pure_driver_debug_trace def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" pgsnap_name = self._get_pgroup_snap_name(cgsnapshot) self._delete_pgsnapshot(pgsnap_name) snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append({ 'id': snapshot.id, 'status': 'deleted', }) model_update = {'status': cgsnapshot.status} return model_update, snapshot_updates def _validate_manage_existing_ref(self, existing_ref, is_snap=False): """Ensure that an existing_ref is valid and return volume info If the ref is not valid throw a ManageExistingInvalidReference exception with an appropriate error. Will return volume or snapshot information from the array for the object specified by existing_ref. """ if "name" not in existing_ref or not existing_ref["name"]: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("manage_existing requires a 'name'" " key to identify an existing volume.")) if is_snap: # Purity snapshot names are prefixed with the source volume name. ref_vol_name, ref_snap_suffix = existing_ref['name'].split('.') else: ref_vol_name = existing_ref['name'] current_array = self._get_current_array() try: volume_info = current_array.get_volume(ref_vol_name, snap=is_snap) if volume_info: if is_snap: for snap in volume_info: if snap['name'] == existing_ref['name']: return snap else: return volume_info except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_NOT_EXIST in err.text): ctxt.reraise = False # If volume information was unable to be retrieved we need # to throw a Invalid Reference exception. raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("Unable to find Purity ref with name=%s") % ref_vol_name) @pure_driver_debug_trace def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. We expect a volume name in the existing_ref that matches one in Purity. """ self._validate_manage_existing_ref(existing_ref) ref_vol_name = existing_ref['name'] current_array = self._get_current_array() connected_hosts = \ current_array.list_volume_private_connections(ref_vol_name) if len(connected_hosts) > 0: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("%(driver)s manage_existing cannot manage a volume " "connected to hosts. Please disconnect this volume " "from existing hosts before importing" ) % {'driver': self.__class__.__name__}) new_vol_name = self._get_vol_name(volume) LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"), {"ref_name": ref_vol_name, "new_name": new_vol_name}) self._rename_volume_object(ref_vol_name, new_vol_name, raise_not_exist=True) return None @pure_driver_debug_trace def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. We expect a volume name in the existing_ref that matches one in Purity. """ volume_info = self._validate_manage_existing_ref(existing_ref) size = int(math.ceil(float(volume_info["size"]) / units.Gi)) return size def _rename_volume_object(self, old_name, new_name, raise_not_exist=False): """Rename a volume object (could be snapshot) in Purity. This will not raise an exception if the object does not exist """ current_array = self._get_current_array() try: current_array.rename_volume(old_name, new_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_NOT_EXIST in err.text): ctxt.reraise = raise_not_exist LOG.warning(_LW("Unable to rename %(old_name)s, error " "message: %(error)s"), {"old_name": old_name, "error": err.text}) return new_name @pure_driver_debug_trace def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. The volume will be renamed with "-unmanaged" as a suffix """ vol_name = self._get_vol_name(volume) unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"), {"ref_name": vol_name, "new_name": unmanaged_vol_name}) self._rename_volume_object(vol_name, unmanaged_vol_name) def _verify_manage_snap_api_requirements(self): current_array = self._get_current_array() api_version = current_array.get_rest_version() if api_version not in MANAGE_SNAP_REQUIRED_API_VERSIONS: msg = _('Unable to do manage snapshot operations with Purity REST ' 'API version %(api_version)s, requires ' '%(required_versions)s.') % { 'api_version': api_version, 'required_versions': MANAGE_SNAP_REQUIRED_API_VERSIONS } raise exception.PureDriverException(reason=msg) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. We expect a snapshot name in the existing_ref that matches one in Purity. """ self._verify_manage_snap_api_requirements() self._validate_manage_existing_ref(existing_ref, is_snap=True) ref_snap_name = existing_ref['name'] new_snap_name = self._get_snap_name(snapshot) LOG.info(_LI("Renaming existing snapshot %(ref_name)s to " "%(new_name)s"), {"ref_name": ref_snap_name, "new_name": new_snap_name}) self._rename_volume_object(ref_snap_name, new_snap_name, raise_not_exist=True) return None def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. We expect a snapshot name in the existing_ref that matches one in Purity. """ self._verify_manage_snap_api_requirements() snap_info = self._validate_manage_existing_ref(existing_ref, is_snap=True) size = int(math.ceil(float(snap_info["size"]) / units.Gi)) return size def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. We expect a snapshot name in the existing_ref that matches one in Purity. """ self._verify_manage_snap_api_requirements() snap_name = self._get_snap_name(snapshot) unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX LOG.info(_LI("Renaming existing snapshot %(ref_name)s to " "%(new_name)s"), {"ref_name": snap_name, "new_name": unmanaged_snap_name}) self._rename_volume_object(snap_name, unmanaged_snap_name) def _get_flasharray(self, san_ip, api_token, rest_version=None, verify_https=None, ssl_cert_path=None): # Older versions of the module (1.4.0) do not support setting ssl certs # TODO(patrickeast): In future releases drop support for 1.4.0 if self._client_version_greater_than([1, 4, 0]): array = purestorage.FlashArray(san_ip, api_token=api_token, rest_version=rest_version, verify_https=verify_https, ssl_cert=ssl_cert_path) else: if verify_https or ssl_cert_path is not None: msg = _('HTTPS certificate verification was requested ' 'but cannot be enabled with purestorage ' 'module version %(version)s. Upgrade to a ' 'newer version to enable this feature.') % { 'version': purestorage.VERSION } raise exception.PureDriverException(reason=msg) array = purestorage.FlashArray(san_ip, api_token=api_token, rest_version=rest_version) array_info = array.get() array.array_name = array_info["array_name"] array.array_id = array_info["id"] LOG.debug("connected to %(array_name)s with REST API %(api_version)s", {"array_name": array.array_name, "api_version": array._rest_version}) return array @staticmethod def _client_version_greater_than(version): module_version = [int(v) for v in purestorage.VERSION.split('.')] for limit_version, actual_version in zip(version, module_version): if actual_version > limit_version: return True return False @staticmethod def _get_vol_name(volume): """Return the name of the volume Purity will use.""" return volume["name"] + "-cinder" @staticmethod def _get_snap_name(snapshot): """Return the name of the snapshot that Purity will use.""" return "%s-cinder.%s" % (snapshot["volume_name"], snapshot["name"]) @staticmethod def _get_pgroup_name_from_id(id): return "consisgroup-%s-cinder" % id @staticmethod def _get_pgroup_snap_suffix(cgsnapshot): return "cgsnapshot-%s-cinder" % cgsnapshot.id @classmethod def _get_pgroup_snap_name(cls, cgsnapshot): """Return the name of the pgroup snapshot that Purity will use""" cg_id = cgsnapshot.consistencygroup_id return "%s.%s" % (cls._get_pgroup_name_from_id(cg_id), cls._get_pgroup_snap_suffix(cgsnapshot)) @staticmethod def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name): return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % { 'pgroup_name': pg_name, 'pgsnap_suffix': pgsnap_suffix, 'volume_name': volume_name, } def _get_pgroup_snap_name_from_snapshot(self, snapshot): """Return the name of the snapshot that Purity will use.""" # TODO(patrickeast): Remove DB calls once the cgsnapshot objects are # available to use and can be associated with the snapshot objects. ctxt = context.get_admin_context() cgsnapshot = self.db.cgsnapshot_get(ctxt, snapshot.cgsnapshot_id) pg_vol_snap_name = "%(group_snap)s.%(volume_name)s-cinder" % { 'group_snap': self._get_pgroup_snap_name(cgsnapshot), 'volume_name': snapshot.volume_name } return pg_vol_snap_name @staticmethod def _generate_purity_host_name(name): """Return a valid Purity host name based on the name passed in.""" if len(name) > 23: name = name[0:23] name = INVALID_CHARACTERS.sub("-", name) name = name.lstrip("-") return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex) @staticmethod def _connect_host_to_vol(array, host_name, vol_name): connection = None try: connection = array.connect_host(host_name, vol_name) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text): # Happens if the volume is already connected to the host. # Treat this as a success. ctxt.reraise = False LOG.debug("Volume connection already exists for Purity " "host with message: %s", err.text) # Get the info for the existing connection. connected_hosts = ( array.list_volume_private_connections(vol_name)) for host_info in connected_hosts: if host_info["host"] == host_name: connection = host_info break if not connection: raise exception.PureDriverException( reason=_("Unable to connect or find connection to host")) return connection def retype(self, context, volume, new_type, diff, host): """Retype from one volume type to another on the same backend. For a Pure Array there is currently no differentiation between types of volumes other than some being part of a protection group to be replicated. """ previous_vol_replicated = self._is_volume_replicated_type(volume) new_vol_replicated = False if new_type: specs = new_type.get("extra_specs") if specs and EXTRA_SPECS_REPL_ENABLED in specs: replication_capability = specs[EXTRA_SPECS_REPL_ENABLED] # Do not validate settings, ignore invalid. new_vol_replicated = (replication_capability == " True") if previous_vol_replicated and not new_vol_replicated: # Remove from protection group. self._disable_replication(volume) elif not previous_vol_replicated and new_vol_replicated: # Add to protection group. self._enable_replication(volume) return True, None @pure_driver_debug_trace def _disable_replication(self, volume): """Disable replication on the given volume.""" current_array = self._get_current_array() LOG.debug("Disabling replication for volume %(id)s residing on " "array %(backend_id)s." % {"id": volume["id"], "backend_id": current_array._backend_id}) try: current_array.set_pgroup(self._replication_pg_name, remvollist=([self._get_vol_name(volume)])) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_COULD_NOT_BE_FOUND in err.text): ctxt.reraise = False LOG.warning(_LW("Disable replication on volume failed: " "already disabled: %s"), err.text) else: LOG.error(_LE("Disable replication on volume failed with " "message: %s"), err.text) @pure_driver_debug_trace def failover_host(self, context, volumes, secondary_id=None): """Failover backend to a secondary array This action will not affect the original volumes in any way and it will stay as is. If a subsequent failover is performed we will simply overwrite the original (now unmanaged) volumes. """ if secondary_id == 'default': # We are going back to the 'original' driver config, just put # our current array back to the primary. if self._failed_over_primary_array: self._set_current_array(self._failed_over_primary_array) return secondary_id, [] else: msg = _('Unable to failback to "default", this can only be ' 'done after a failover has completed.') raise exception.InvalidReplicationTarget(message=msg) current_array = self._get_current_array() LOG.debug("Failover replication for array %(primary)s to " "%(secondary)s." % { "primary": current_array._backend_id, "secondary": secondary_id }) if secondary_id == current_array._backend_id: raise exception.InvalidReplicationTarget( reason=_("Secondary id can not be the same as primary array, " "backend_id = %(secondary)s.") % {"secondary": secondary_id} ) secondary_array, pg_snap = self._find_failover_target(secondary_id) LOG.debug("Starting failover from %(primary)s to %(secondary)s", {"primary": current_array.array_name, "secondary": secondary_array.array_name}) # NOTE(patrickeast): This currently requires a call with REST API 1.3. # If we need to, create a temporary FlashArray for this operation. api_version = secondary_array.get_rest_version() LOG.debug("Current REST API for array id %(id)s is %(api_version)s", {"id": secondary_array.array_id, "api_version": api_version}) if api_version != '1.3': target_array = self._get_flasharray( secondary_array._target, api_token=secondary_array._api_token, rest_version='1.3', verify_https=secondary_array._verify_https, ssl_cert_path=secondary_array._ssl_cert ) else: target_array = secondary_array volume_snaps = target_array.get_volume(pg_snap['name'], snap=True, pgroup=True) # We only care about volumes that are in the list we are given. vol_names = set() for vol in volumes: vol_names.add(self._get_vol_name(vol)) for snap in volume_snaps: vol_name = snap['name'].split('.')[-1] if vol_name in vol_names: vol_names.remove(vol_name) LOG.debug('Creating volume %(vol)s from replicated snapshot ' '%(snap)s', {'vol': vol_name, 'snap': snap['name']}) secondary_array.copy_volume(snap['name'], vol_name, overwrite=True) else: LOG.debug('Ignoring unmanaged volume %(vol)s from replicated ' 'snapshot %(snap)s.', {'vol': vol_name, 'snap': snap['name']}) # The only volumes remaining in the vol_names set have been left behind # on the array and should be considered as being in an error state. model_updates = [] for vol in volumes: if self._get_vol_name(vol) in vol_names: model_updates.append({ 'volume_id': vol['id'], 'updates': { 'status': 'error', } }) # After failover we want our current array to be swapped for the # secondary array we just failed over to. self._failed_over_primary_array = self._get_current_array() self._set_current_array(secondary_array) return secondary_array._backend_id, model_updates def _does_pgroup_exist(self, array, pgroup_name): """Return True/False""" try: array.get_pgroup(pgroup_name) return True except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ERR_MSG_NOT_EXIST in err.text: ctxt.reraise = False return False # Any unexpected exception to be handled by caller. @pure_driver_debug_trace @utils.retry(exception.PureDriverException, REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, REPL_SETTINGS_PROPAGATE_MAX_RETRIES) def _wait_until_target_group_setting_propagates( self, target_array, pgroup_name_on_target): # Wait for pgroup to show up on target array. if self._does_pgroup_exist(target_array, pgroup_name_on_target): return else: raise exception.PureDriverException(message= _('Protection Group not ' 'ready.')) @pure_driver_debug_trace @utils.retry(exception.PureDriverException, REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, REPL_SETTINGS_PROPAGATE_MAX_RETRIES) def _wait_until_source_array_allowed(self, source_array, pgroup_name): result = source_array.get_pgroup(pgroup_name) if result["targets"][0]["allowed"]: return else: raise exception.PureDriverException(message=_('Replication not ' 'allowed yet.')) def _get_pgroup_name_on_target(self, source_array_name, pgroup_name): return "%s:%s" % (source_array_name, pgroup_name) @pure_driver_debug_trace def _setup_replicated_pgroups(self, primary, secondaries, pg_name, replication_interval, retention_policy): self._create_protection_group_if_not_exist( primary, pg_name) # Apply retention policies to a protection group. # These retention policies will be applied on the replicated # snapshots on the target array. primary.set_pgroup(pg_name, **retention_policy) # Configure replication propagation frequency on a # protection group. primary.set_pgroup(pg_name, replicate_frequency=replication_interval) for target_array in secondaries: try: # Configure PG to replicate to target_array. primary.set_pgroup(pg_name, addtargetlist=[target_array.array_name]) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ( ERR_MSG_ALREADY_INCLUDES in err.text): ctxt.reraise = False LOG.info(_LI("Skipping add target %(target_array)s" " to protection group %(pgname)s" " since it's already added."), {"target_array": target_array.array_name, "pgname": pg_name}) # Wait until "Target Group" setting propagates to target_array. pgroup_name_on_target = self._get_pgroup_name_on_target( primary.array_name, pg_name) for target_array in secondaries: self._wait_until_target_group_setting_propagates( target_array, pgroup_name_on_target) try: # Configure the target_array to allow replication from the # PG on source_array. target_array.set_pgroup(pgroup_name_on_target, allowed=True) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if (err.code == 400 and ERR_MSG_ALREADY_ALLOWED in err.text): ctxt.reraise = False LOG.info(_LI("Skipping allow pgroup %(pgname)s on " "target array %(target_array)s since " "it is already allowed."), {"pgname": pg_name, "target_array": target_array.array_name}) # Wait until source array acknowledges previous operation. self._wait_until_source_array_allowed(primary, pg_name) # Start replication on the PG. primary.set_pgroup(pg_name, replicate_enabled=True) @pure_driver_debug_trace def _generate_replication_retention(self): """Generates replication retention settings in Purity compatible format An example of the settings: target_all_for = 14400 (i.e. 4 hours) target_per_day = 6 target_days = 4 The settings above configure the target array to retain 4 hours of the most recent snapshots. After the most recent 4 hours, the target will choose 4 snapshots per day from the previous 6 days for retention :return: a dictionary representing replication retention settings """ replication_retention = dict( target_all_for=self._replication_retention_short_term, target_per_day=self._replication_retention_long_term_per_day, target_days=self._replication_retention_long_term ) return replication_retention @pure_driver_debug_trace def _get_latest_replicated_pg_snap(self, target_array, source_array_name, pgroup_name): # Get all protection group snapshots. snap_name = "%s:%s" % (source_array_name, pgroup_name) LOG.debug("Looking for snap %(snap)s on array id %(array_id)s", {"snap": snap_name, "array_id": target_array.array_id}) pg_snaps = target_array.get_pgroup(snap_name, snap=True, transfer=True) LOG.debug("Retrieved snapshots on target %(pg_snaps)s", {"pg_snaps": pg_snaps}) # Only use snapshots that are replicated completely. pg_snaps_filtered = [s for s in pg_snaps if s["progress"] == 1] LOG.debug("Filtered list of snapshots %(pg_snaps_filtered)s", {"pg_snaps_filtered": pg_snaps_filtered}) # Go through the protection group snapshots, latest first .... # stop when we find required volume snapshot. pg_snaps_filtered.sort(key=lambda x: x["created"], reverse=True) LOG.debug("Sorted list of snapshots %(pg_snaps_filtered)s", {"pg_snaps_filtered": pg_snaps_filtered}) pg_snap = pg_snaps_filtered[0] if pg_snaps_filtered else None LOG.debug("Selecting snapshot %(pg_snap)s for failover.", {"pg_snap": pg_snap}) return pg_snap @pure_driver_debug_trace def _create_protection_group_if_not_exist(self, source_array, pgname): try: source_array.create_pgroup(pgname) except purestorage.PureHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text: # Happens if the PG already exists ctxt.reraise = False LOG.warning(_LW("Skipping creation of PG %s since it " "already exists."), pgname) # We assume PG has already been setup with correct # replication settings. return if err.code == 400 and ( ERR_MSG_PENDING_ERADICATION in err.text): ctxt.reraise = False LOG.warning(_LW("Protection group %s is deleted but not" " eradicated - will recreate."), pgname) source_array.eradicate_pgroup(pgname) source_array.create_pgroup(pgname) def _is_volume_replicated_type(self, volume): ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, volume["volume_type_id"]) replication_flag = False specs = volume_type.get("extra_specs") if specs and EXTRA_SPECS_REPL_ENABLED in specs: replication_capability = specs[EXTRA_SPECS_REPL_ENABLED] # Do not validate settings, ignore invalid. replication_flag = (replication_capability == " True") return replication_flag def _find_failover_target(self, secondary): if not self._replication_target_arrays: raise exception.PureDriverException( reason=_("Unable to find failover target, no " "secondary targets configured.")) secondary_array = None pg_snap = None if secondary: for array in self._replication_target_arrays: if array._backend_id == secondary: secondary_array = array break if not secondary_array: raise exception.InvalidReplicationTarget( reason=_("Unable to determine secondary_array from" " supplied secondary: %(secondary)s.") % {"secondary": secondary} ) pg_snap = self._get_latest_replicated_pg_snap( secondary_array, self._get_current_array().array_name, self._replication_pg_name ) else: LOG.debug('No secondary array id specified, checking all targets.') for array in self._replication_target_arrays: try: secondary_array = array pg_snap = self._get_latest_replicated_pg_snap( secondary_array, self._get_current_array().array_name, self._replication_pg_name ) if pg_snap: break except Exception: LOG.exception(_LE('Error finding replicated pg snapshot ' 'on %(secondary)s.'), {'secondary': array._backend_id}) if not secondary_array: raise exception.PureDriverException( reason=_("Unable to find viable secondary array from" "configured targets: %(targets)s.") % {"targets": six.text_type(self._replication_target_arrays)} ) if not pg_snap: raise exception.PureDriverException( reason=_("Unable to find viable pg snapshot to use for" "failover on selected secondary array: %(id)s.") % {"id": secondary_array._backend_id} ) return secondary_array, pg_snap def _get_current_array(self): return self._array def _set_current_array(self, array): self._array = array class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): VERSION = "4.0.0" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs) self._storage_protocol = "iSCSI" def _get_host(self, array, connector): """Return dict describing existing Purity host object or None.""" hosts = array.list_hosts() for host in hosts: if connector["initiator"] in host["iqn"]: return host return None @pure_driver_debug_trace def initialize_connection(self, volume, connector, initiator_data=None): """Allow connection to connector and return connection info.""" connection = self._connect(volume, connector, initiator_data) target_ports = self._get_target_iscsi_ports() multipath = connector.get("multipath", False) properties = self._build_connection_properties(connection, target_ports, multipath) if self.configuration.use_chap_auth: properties["data"]["auth_method"] = "CHAP" properties["data"]["auth_username"] = connection["auth_username"] properties["data"]["auth_password"] = connection["auth_password"] initiator_update = connection.get("initiator_update", False) if initiator_update: properties["initiator_update"] = initiator_update return properties def _build_connection_properties(self, connection, target_ports, multipath): props = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, }, } port_iter = iter(target_ports) target_luns = [] target_iqns = [] target_portals = [] for port in port_iter: target_luns.append(connection["lun"]) target_iqns.append(port["iqn"]) target_portals.append(port["portal"]) # If we have multiple ports always report them. if target_luns and target_iqns and target_portals: props["data"]["target_luns"] = target_luns props["data"]["target_iqns"] = target_iqns props["data"]["target_portals"] = target_portals return props def _get_target_iscsi_ports(self): """Return list of iSCSI-enabled port descriptions.""" current_array = self._get_current_array() ports = current_array.list_ports() iscsi_ports = [port for port in ports if port["iqn"]] if not iscsi_ports: raise exception.PureDriverException( reason=_("No iSCSI-enabled ports on target array.")) return iscsi_ports @staticmethod def _generate_chap_secret(): return volume_utils.generate_password() @classmethod def _get_chap_credentials(cls, host, data): initiator_updates = None username = host password = None if data: for d in data: if d["key"] == CHAP_SECRET_KEY: password = d["value"] break if not password: password = cls._generate_chap_secret() initiator_updates = { "set_values": { CHAP_SECRET_KEY: password } } return username, password, initiator_updates @utils.synchronized(CONNECT_LOCK_NAME, external=True) def _connect(self, volume, connector, initiator_data): """Connect the host and volume; return dict describing connection.""" iqn = connector["initiator"] if self.configuration.use_chap_auth: (chap_username, chap_password, initiator_update) = \ self._get_chap_credentials(connector['host'], initiator_data) current_array = self._get_current_array() vol_name = self._get_vol_name(volume) host = self._get_host(current_array, connector) if host: host_name = host["name"] LOG.info(_LI("Re-using existing purity host %(host_name)r"), {"host_name": host_name}) if self.configuration.use_chap_auth: if not GENERATED_NAME.match(host_name): LOG.error(_LE("Purity host %(host_name)s is not managed " "by Cinder and can't have CHAP credentials " "modified. Remove IQN %(iqn)s from the host " "to resolve this issue."), {"host_name": host_name, "iqn": connector["initiator"]}) raise exception.PureDriverException( reason=_("Unable to re-use a host that is not " "managed by Cinder with use_chap_auth=True,")) elif chap_username is None or chap_password is None: LOG.error(_LE("Purity host %(host_name)s is managed by " "Cinder but CHAP credentials could not be " "retrieved from the Cinder database."), {"host_name": host_name}) raise exception.PureDriverException( reason=_("Unable to re-use host with unknown CHAP " "credentials configured.")) else: host_name = self._generate_purity_host_name(connector["host"]) LOG.info(_LI("Creating host object %(host_name)r with IQN:" " %(iqn)s."), {"host_name": host_name, "iqn": iqn}) current_array.create_host(host_name, iqnlist=[iqn]) if self.configuration.use_chap_auth: current_array.set_host(host_name, host_user=chap_username, host_password=chap_password) connection = self._connect_host_to_vol(current_array, host_name, vol_name) if self.configuration.use_chap_auth: connection["auth_username"] = chap_username connection["auth_password"] = chap_password if initiator_update: connection["initiator_update"] = initiator_update return connection class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): VERSION = "2.0.0" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs) self._storage_protocol = "FC" self._lookup_service = fczm_utils.create_lookup_service() def _get_host(self, array, connector): """Return dict describing existing Purity host object or None.""" hosts = array.list_hosts() for host in hosts: for wwn in connector["wwpns"]: if wwn in str(host["wwn"]).lower(): return host @staticmethod def _get_array_wwns(array): """Return list of wwns from the array""" ports = array.list_ports() return [port["wwn"] for port in ports if port["wwn"]] @fczm_utils.AddFCZone @pure_driver_debug_trace def initialize_connection(self, volume, connector, initiator_data=None): """Allow connection to connector and return connection info.""" current_array = self._get_current_array() connection = self._connect(volume, connector) target_wwns = self._get_array_wwns(current_array) init_targ_map = self._build_initiator_target_map(target_wwns, connector) properties = { "driver_volume_type": "fibre_channel", "data": { 'target_discovered': True, "target_lun": connection["lun"], "target_wwn": target_wwns, 'initiator_target_map': init_targ_map, "discard": True, } } return properties @utils.synchronized(CONNECT_LOCK_NAME, external=True) def _connect(self, volume, connector): """Connect the host and volume; return dict describing connection.""" wwns = connector["wwpns"] current_array = self._get_current_array() vol_name = self._get_vol_name(volume) host = self._get_host(current_array, connector) if host: host_name = host["name"] LOG.info(_LI("Re-using existing purity host %(host_name)r"), {"host_name": host_name}) else: host_name = self._generate_purity_host_name(connector["host"]) LOG.info(_LI("Creating host object %(host_name)r with WWN:" " %(wwn)s."), {"host_name": host_name, "wwn": wwns}) current_array.create_host(host_name, wwnlist=wwns) return self._connect_host_to_vol(current_array, host_name, vol_name) def _build_initiator_target_map(self, target_wwns, connector): """Build the target_wwns and the initiator target map.""" init_targ_map = {} if self._lookup_service: # use FC san lookup to determine which NSPs to use # for the new VLUN. dev_map = self._lookup_service.get_device_mapping_from_network( connector['wwpns'], target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) else: init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns) return init_targ_map @fczm_utils.RemoveFCZone @pure_driver_debug_trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection.""" current_array = self._get_current_array() no_more_connections = self._disconnect(current_array, volume, connector, **kwargs) properties = {"driver_volume_type": "fibre_channel", "data": {}} if no_more_connections: target_wwns = self._get_array_wwns(current_array) init_targ_map = self._build_initiator_target_map(target_wwns, connector) properties["data"] = {"target_wwn": target_wwns, "initiator_target_map": init_targ_map} return properties cinder-8.0.0/cinder/volume/drivers/scality.py0000664000567000056710000002646312701406250022407 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Scality # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scality SOFS Volume Driver. """ import errno import os from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils import six from six.moves import urllib from cinder import exception from cinder.i18n import _, _LI from cinder.image import image_utils from cinder import utils from cinder.volume.drivers import remotefs as remotefs_drv from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('scality_sofs_config', help='Path or URL to Scality SOFS configuration file'), cfg.StrOpt('scality_sofs_mount_point', default='$state_path/scality', help='Base dir where Scality SOFS shall be mounted'), cfg.StrOpt('scality_sofs_volume_dir', default='cinder/volumes', help='Path from Scality SOFS root to volume dir'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class ScalityDriver(remotefs_drv.RemoteFSSnapDriver): """Scality SOFS cinder driver. Creates sparse files on SOFS for hypervisors to use as block devices. """ driver_volume_type = 'scality' driver_prefix = 'scality_sofs' volume_backend_name = 'Scality_SOFS' VERSION = '2.0.0' def __init__(self, *args, **kwargs): super(ScalityDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.sofs_mount_point = self.configuration.scality_sofs_mount_point self.sofs_config = self.configuration.scality_sofs_config self.sofs_rel_volume_dir = self.configuration.scality_sofs_volume_dir self.sofs_abs_volume_dir = os.path.join(self.sofs_mount_point, self.sofs_rel_volume_dir) # The following config flag is used by RemoteFSDriver._do_create_volume # We want to use sparse file (ftruncated) without exposing this # as a config switch to customers. self.configuration.scality_sofs_sparsed_volumes = True def check_for_setup_error(self): """Sanity checks before attempting to mount SOFS.""" # config is mandatory if not self.sofs_config: msg = _("Value required for 'scality_sofs_config'") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # config can be a file path or a URL, check it config = self.sofs_config if urllib.parse.urlparse(self.sofs_config).scheme == '': # turn local path into URL config = 'file://%s' % self.sofs_config try: urllib.request.urlopen(config, timeout=5).close() except (urllib.error.URLError, urllib.error.HTTPError) as e: msg = _("Can't access 'scality_sofs_config'" ": %s") % six.text_type(e) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # mount.sofs must be installed if not os.access('/sbin/mount.sofs', os.X_OK): msg = _("Cannot execute /sbin/mount.sofs") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _load_shares_config(self, share_file=None): self.shares[self.sofs_rel_volume_dir] = None def _get_mount_point_for_share(self, share=None): # The _qemu_img_info_base() method from the RemoteFSSnapDriver class # expects files (volume) to be inside a subdir of the mount point. # So we have to append a dummy subdir. return self.sofs_abs_volume_dir + "/00" def _sofs_is_mounted(self): """Check if SOFS is already mounted at the expected location.""" mount_path = self.sofs_mount_point.rstrip('/') for mount in volume_utils.read_proc_mounts(): parts = mount.split() if (parts[0].endswith('fuse') and parts[1].rstrip('/') == mount_path): return True return False @lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True) def _ensure_share_mounted(self, share=None): """Mount SOFS if need be.""" fileutils.ensure_tree(self.sofs_mount_point) if not self._sofs_is_mounted(): self._execute('mount', '-t', 'sofs', self.sofs_config, self.sofs_mount_point, run_as_root=True) # Check whether the mount command succeeded if not self._sofs_is_mounted(): msg = _("Cannot mount Scality SOFS, check syslog for errors") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) fileutils.ensure_tree(self.sofs_abs_volume_dir) # We symlink the '00' subdir to its parent dir to maintain # compatibility with previous version of this driver. try: os.symlink(".", self._get_mount_point_for_share()) except OSError as exc: if exc.errno == errno.EEXIST: if not os.path.islink(self._get_mount_point_for_share()): raise else: raise def _ensure_shares_mounted(self): self._ensure_share_mounted() self._mounted_shares = [self.sofs_rel_volume_dir] def _find_share(self, volume_size_for): try: return self._mounted_shares[0] except IndexError: raise exception.RemoteFSNoSharesMounted() def get_volume_stats(self, refresh=False): """Return the current state of the volume service.""" stats = { 'vendor_name': 'Scality', 'driver_version': self.VERSION, 'storage_protocol': 'scality', 'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, } backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or self.volume_backend_name return stats @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" # Find active qcow2 file active_file = self.get_active_image_from_info(volume) path = '%s/%s' % (self._get_mount_point_for_share(), active_file) sofs_rel_path = os.path.join(self.sofs_rel_volume_dir, "00", volume['name']) data = {'export': volume['provider_location'], 'name': active_file, 'sofs_path': sofs_rel_path} # Test file for raw vs. qcow2 format info = self._qemu_img_info(path, volume['name']) data['format'] = info.file_format if data['format'] not in ['raw', 'qcow2']: msg = _('%s must be a valid raw or qcow2 image.') % path raise exception.InvalidVolume(msg) return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self.sofs_mount_point } def _qemu_img_info(self, path, volume_name): return super(ScalityDriver, self)._qemu_img_info_base( path, volume_name, self.sofs_abs_volume_dir) @remotefs_drv.locked_volume_id_operation def extend_volume(self, volume, size_gb): volume_path = self.local_path(volume) info = self._qemu_img_info(volume_path, volume['name']) backing_fmt = info.file_format if backing_fmt not in ['raw', 'qcow2']: msg = _('Unrecognized backing format: %s') raise exception.InvalidVolume(msg % backing_fmt) # qemu-img can resize both raw and qcow2 files image_utils.resize_image(volume_path, size_gb) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ info_path = self._local_path_volume_info(snapshot['volume']) # For BC compat' with version < 2 of this driver try: snap_info = self._read_info_file(info_path) except IOError as exc: if exc.errno != errno.ENOENT: raise else: path_to_snap_img = self.local_path(snapshot) else: vol_path = self._local_volume_dir(snapshot['volume']) forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_path, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot['volume']['name']) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) LOG.debug("will copy from snapshot at %s", path_to_snap_img) path_to_new_vol = self.local_path(volume) out_format = 'raw' image_utils.convert_image(path_to_snap_img, path_to_new_vol, out_format, run_as_root=self._execute_as_root) self._set_rw_permissions_for_all(path_to_new_vol) image_utils.resize_image(path_to_new_vol, volume_size) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) volume_local_path = self.local_path(volume) LOG.info(_LI('Begin backup of volume %s.'), volume['name']) qemu_img_info = image_utils.qemu_img_info(volume_local_path) if qemu_img_info.file_format != 'raw': msg = _('Backup is only supported for raw-formatted ' 'SOFS volumes.') raise exception.InvalidVolume(msg) if qemu_img_info.backing_file is not None: msg = _('Backup is only supported for SOFS volumes ' 'without backing file.') raise exception.InvalidVolume(msg) with utils.temporary_chown(volume_local_path): with open(volume_local_path) as volume_file: backup_service.backup(backup, volume_file) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.'), {'backup': backup['id'], 'volume': volume['name']}) volume_local_path = self.local_path(volume) with utils.temporary_chown(volume_local_path): with open(volume_local_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) cinder-8.0.0/cinder/volume/drivers/infortrend/0000775000567000056710000000000012701406543022531 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/infortrend/__init__.py0000664000567000056710000000000012701406250024623 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py0000664000567000056710000002357412701406250027304 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iSCSI Driver for Infortrend Eonstor based on CLI. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli LOG = logging.getLogger(__name__) class InfortrendCLIISCSIDriver(driver.ISCSIDriver): """Infortrend iSCSI Driver for Eonstor DS using CLI. Version history: 1.0.0 - Initial driver 1.0.1 - Support DS4000 """ def __init__(self, *args, **kwargs): super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( 'iSCSI', configuration=self.configuration) self.VERSION = self.common.VERSION def check_for_setup_error(self): LOG.debug('check_for_setup_error start') self.common.check_for_setup_error() def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug('create_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug( 'create_volume_from_snapshot volume id=%(volume_id)s ' 'snapshot id=%(snapshot_id)s', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return self.common.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug( 'create_cloned_volume volume id=%(volume_id)s ' 'src_vref provider_location=%(provider_location)s', { 'volume_id': volume['id'], 'provider_location': src_vref['provider_location']}) return self.common.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" LOG.debug( 'extend_volume volume id=%(volume_id)s new size=%(size)s', { 'volume_id': volume['id'], 'size': new_size}) self.common.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" LOG.debug('delete_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { 'volume_id': volume['id'], 'host': host['host']}) return self.common.migrate_volume(volume, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug( 'create_snapshot snapshot id=%(snapshot_id)s ' 'volume_id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug( 'delete_snapshot snapshot id=%(snapshot_id)s ' 'volume_id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug( 'create_export volume provider_location=%(provider_location)s', { 'provider_location': volume['provider_location']}) return self.common.create_export(context, volume) def remove_export(self, context, volume): """Removes an export for a volume.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection information. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, } } """ LOG.debug( 'initialize_connection volume id=%(volume_id)s ' 'connector initiator=%(initiator)s', { 'volume_id': volume['id'], 'initiator': connector['initiator']}) return self.common.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ LOG.debug('get_volume_stats refresh=%(refresh)s', { 'refresh': refresh}) return self.common.get_volume_stats(refresh) def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. existing_ref:{ 'id':lun_id } """ LOG.debug( 'manage_existing volume id=%(volume_id)s ' 'existing_ref source id=%(source_id)s', { 'volume_id': volume['id'], 'source_id': existing_ref['source-id']}) return self.common.manage_existing(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ LOG.debug('unmanage volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.unmanage(volume) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ LOG.debug( 'manage_existing_get_size volume id=%(volume_id)s ' 'existing_ref source id=%(source_id)s', { 'volume_id': volume['id'], 'source_id': existing_ref['source-id']}) return self.common.manage_existing_get_size(volume, existing_ref) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug( 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) return self.common.update_migrated_volume(ctxt, volume, new_volume, original_volume_status) cinder-8.0.0/cinder/volume/drivers/infortrend/eonstor_ds_cli/0000775000567000056710000000000012701406543025537 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/infortrend/eonstor_ds_cli/__init__.py0000664000567000056710000000000012701406250027631 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py0000664000567000056710000004402112701406250030403 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Infortrend basic CLI factory. """ import abc from oslo_concurrency import processutils from oslo_log import log as logging import six from cinder.i18n import _LE from cinder import utils LOG = logging.getLogger(__name__) DEFAULT_RETRY_TIME = 5 def retry_cli(func): def inner(self, *args, **kwargs): total_retry_time = self.cli_retry_time if total_retry_time is None: total_retry_time = DEFAULT_RETRY_TIME retry_time = 0 while retry_time < total_retry_time: rc, out = func(self, *args, **kwargs) retry_time += 1 if rc == 0: break LOG.error(_LE( 'Retry %(retry)s times: %(method)s Failed ' '%(rc)s: %(reason)s'), { 'retry': retry_time, 'method': self.__class__.__name__, 'rc': rc, 'reason': out}) LOG.debug( 'Method: %(method)s Return Code: %(rc)s ' 'Output: %(out)s', { 'method': self.__class__.__name__, 'rc': rc, 'out': out}) return rc, out return inner def util_execute(command_line): content, err = utils.execute(command_line, shell=True) return content def strip_empty_in_list(list): result = [] for entry in list: entry = entry.strip() if entry != "": result.append(entry) return result def table_to_dict(table): tableHeader = table[0].split(" ") tableHeaderList = strip_empty_in_list(tableHeader) result = [] for i in range(len(table) - 2): if table[i + 2].strip() == "": break resultEntry = {} tableEntry = table[i + 2].split(" ") tableEntryList = strip_empty_in_list(tableEntry) for key, value in zip(tableHeaderList, tableEntryList): resultEntry[key] = value result.append(resultEntry) return result def content_lines_to_dict(content_lines): result = [] resultEntry = {} for content_line in content_lines: if content_line.strip() == "": result.append(resultEntry) resultEntry = {} continue split_entry = content_line.strip().split(": ", 1) resultEntry[split_entry[0]] = split_entry[1] return result @six.add_metaclass(abc.ABCMeta) class BaseCommand(object): """The BaseCommand abstract class.""" def __init__(self): super(BaseCommand, self).__init__() @abc.abstractmethod def execute(self, *args, **kwargs): pass class ExecuteCommand(BaseCommand): """The Common ExecuteCommand.""" def __init__(self, cli_conf): super(ExecuteCommand, self).__init__() self.cli_retry_time = cli_conf.get('cli_retry_time') @retry_cli def execute(self, *args, **kwargs): result = None rc = 0 try: result, err = utils.execute(*args, **kwargs) except processutils.ProcessExecutionError as pe: rc = pe.exit_code result = pe.stdout result = result.replace('\n', '\\n') LOG.error(_LE( 'Error on execute command. ' 'Error code: %(exit_code)d Error msg: %(result)s'), { 'exit_code': pe.exit_code, 'result': result}) return rc, result class CLIBaseCommand(BaseCommand): """The CLIBaseCommand class.""" def __init__(self, cli_conf): super(CLIBaseCommand, self).__init__() self.java = "java -jar" self.execute_file = cli_conf.get('path') self.ip = cli_conf.get('ip') self.password = cli_conf.get('password') self.cli_retry_time = cli_conf.get('cli_retry_time') self.command = "" self.parameters = () self.command_line = "" def _generate_command(self, parameters): """Generate execute Command. use java, execute, command, parameters.""" self.parameters = parameters parameters_line = ' '.join(parameters) if self.password: parameters_line = 'password=%s %s' % ( self.password, parameters_line) self.command_line = "{0} {1} {2} {3} {4}".format( self.java, self.execute_file, self.ip, self.command, parameters_line) return self.command_line def _parser(self, content=None): """The parser to parse command result. :param content: The parse Content :returns: parse result """ content = content.replace("\r", "") content = content.replace("\\/-", "") content = content.strip() LOG.debug(content) if content is not None: content_lines = content.split("\n") rc, out = self._parse_return(content_lines) if rc != 0: return rc, out else: return rc, content_lines return -1, None @retry_cli def execute(self, *args, **kwargs): command_line = self._generate_command(args) LOG.debug('Executing: %(command)s', {'command': command_line}) rc = 0 result = None try: content = self._execute(command_line) rc, result = self._parser(content) except processutils.ProcessExecutionError as pe: rc = -2 # prevent confusing with cli real rc result = pe.stdout result = result.replace('\n', '\\n') LOG.error(_LE( 'Error on execute %(command)s. ' 'Error code: %(exit_code)d Error msg: %(result)s'), { 'command': command_line, 'exit_code': pe.exit_code, 'result': result}) return rc, result def _execute(self, command_line): return util_execute(command_line) def set_ip(self, ip): """Set the Raid's ip.""" self.ip = ip def _parse_return(self, content_lines): """Get the end of command line result.""" rc = 0 return_value = content_lines[-1].strip().split(' ', 1)[1] return_cli_result = content_lines[-2].strip().split(' ', 1)[1] rc = int(return_value, 16) return rc, return_cli_result class CreateLD(CLIBaseCommand): """The Create LD Command.""" def __init__(self, *args, **kwargs): super(CreateLD, self).__init__(*args, **kwargs) self.command = "create ld" class CreateLV(CLIBaseCommand): """The Create LV Command.""" def __init__(self, *args, **kwargs): super(CreateLV, self).__init__(*args, **kwargs) self.command = "create lv" class CreatePartition(CLIBaseCommand): """Create Partition. create part [LV-ID] [name] [size={partition-size}] [min={minimal-reserve-size}] [init={switch}] [tier={tier-level-list}] """ def __init__(self, *args, **kwargs): super(CreatePartition, self).__init__(*args, **kwargs) self.command = "create part" class DeletePartition(CLIBaseCommand): """Delete Partition. delete part [partition-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeletePartition, self).__init__(*args, **kwargs) self.command = "delete part" class SetPartition(CLIBaseCommand): """Set Partition. set part [partition-ID] [name={partition-name}] [min={minimal-reserve-size}] set part expand [partition-ID] [size={expand-size}] set part purge [partition-ID] [number] [rule-type] set part reclaim [partition-ID] """ def __init__(self, *args, **kwargs): super(SetPartition, self).__init__(*args, **kwargs) self.command = "set part" class CreateMap(CLIBaseCommand): """Map the Partition on the channel. create map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [assign={assign-to}] """ def __init__(self, *args, **kwargs): super(CreateMap, self).__init__(*args, **kwargs) self.command = "create map" class DeleteMap(CLIBaseCommand): """Unmap the Partition on the channel. delete map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteMap, self).__init__(*args, **kwargs) self.command = "delete map" class CreateSnapshot(CLIBaseCommand): """Create partition's Snapshot. create si [part] [partition-ID] """ def __init__(self, *args, **kwargs): super(CreateSnapshot, self).__init__(*args, **kwargs) self.command = "create si" class DeleteSnapshot(CLIBaseCommand): """Delete partition's Snapshot. delete si [snapshot-image-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteSnapshot, self).__init__(*args, **kwargs) self.command = "delete si" class CreateReplica(CLIBaseCommand): """Create partition or snapshot's replica. create replica [name] [part | si] [source-volume-ID] [part] [target-volume-ID] [type={replication-mode}] [priority={level}] [desc={description}] [incremental={switch}] [timeout={value}] [compression={switch}] """ def __init__(self, *args, **kwargs): super(CreateReplica, self).__init__(*args, **kwargs) self.command = "create replica" class DeleteReplica(CLIBaseCommand): """Delete and terminate specific replication job. delete replica [volume-pair-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteReplica, self).__init__(*args, **kwargs) self.command = "delete replica" class CreateIQN(CLIBaseCommand): """Create host iqn for CHAP or lun filter. create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}] [target={name}] [target-password={secret}] [ip={ip-address}] [mask={netmask-ip}] """ def __init__(self, *args, **kwargs): super(CreateIQN, self).__init__(*args, **kwargs) self.command = "create iqn" class DeleteIQN(CLIBaseCommand): """Delete host iqn by name. delete iqn [name] """ def __init__(self, *args, **kwargs): super(DeleteIQN, self).__init__(*args, **kwargs) self.command = "delete iqn" class ShowCommand(CLIBaseCommand): """Basic Show Command.""" def __init__(self, *args, **kwargs): super(ShowCommand, self).__init__(*args, **kwargs) self.param_detail = "-l" self.default_type = "table" self.start_key = "" def _parser(self, content=None): """Parse Table or Detail format into dict. # Table format ID Name LD-amount ---------------------- 123 LV-1 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } # Detail format ID: 5DE94FF775D81C30 Name: LV-1 LD-amount: 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowCommand, self)._parser(content) # Error. if rc != 0: return rc, out # No content. if len(out) < 6: return rc, [] detect_type = self.detect_type() # Show detail content. if detect_type == "list": start_id = self.detect_detail_start_index(out) if start_id < 0: return rc, [] result = content_lines_to_dict(out[start_id:-2]) else: start_id = self.detect_table_start_index(out) if start_id < 0: return rc, [] result = table_to_dict(out[start_id:-3]) return rc, result def detect_type(self): if self.param_detail in self.parameters: detect_type = "list" else: detect_type = self.default_type return detect_type def detect_table_start_index(self, content): for i in range(3, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 def detect_detail_start_index(self, content): for i in range(3, len(content)): split_entry = content[i].strip().split(' ') if len(split_entry) >= 2 and ':' in split_entry[0]: return i return -1 class ShowLD(ShowCommand): """Show LD. show ld [index-list] """ def __init__(self, *args, **kwargs): super(ShowLD, self).__init__(*args, **kwargs) self.command = "show ld" class ShowLV(ShowCommand): """Show LV. show lv [lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowLV, self).__init__(*args, **kwargs) self.command = "show lv" self.start_key = "ID" def detect_table_start_index(self, content): if "tier" in self.parameters: self.start_key = "LV-Name" for i in range(3, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 class ShowPartition(ShowCommand): """Show Partition. show part [part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowPartition, self).__init__(*args, **kwargs) self.command = "show part" self.start_key = "ID" class ShowSnapshot(ShowCommand): """Show Snapshot. show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowSnapshot, self).__init__(*args, **kwargs) self.command = "show si" self.start_key = "Index" class ShowDevice(ShowCommand): """Show Device. show device """ def __init__(self, *args, **kwargs): super(ShowDevice, self).__init__(*args, **kwargs) self.command = "show device" self.start_key = "Index" class ShowChannel(ShowCommand): """Show Channel. show channel """ def __init__(self, *args, **kwargs): super(ShowChannel, self).__init__(*args, **kwargs) self.command = "show channel" self.start_key = "Ch" class ShowDisk(ShowCommand): """The Show Disk Command. show disk [disk-index-list | channel={ch}] """ def __init__(self, *args, **kwargs): super(ShowDisk, self).__init__(*args, **kwargs) self.command = "show disk" class ShowMap(ShowCommand): """Show Map. show map [part={partition-IDs} | channel={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowMap, self).__init__(*args, **kwargs) self.command = "show map" self.start_key = "Ch" class ShowNet(ShowCommand): """Show IP network. show net [id={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowNet, self).__init__(*args, **kwargs) self.command = "show net" self.start_key = "ID" class ShowLicense(ShowCommand): """Show License. show license """ def __init__(self, *args, **kwargs): super(ShowLicense, self).__init__(*args, **kwargs) self.command = "show license" self.start_key = "License" def _parser(self, content=None): """Parse License format. # License format License Amount(Partition/Subsystem) Expired ------------------------------------------------ EonPath --- True # Result { 'EonPath': { 'Amount': '---', 'Support': True } } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowLicense, self)._parser(content) if rc != 0: return rc, out if len(out) > 0: result = {} for entry in out: if entry['Expired'] == '---' or entry['Expired'] == 'Expired': support = False else: support = True result[entry['License']] = { 'Amount': entry['Amount(Partition/Subsystem)'], 'Support': support } return rc, result return rc, [] class ShowReplica(ShowCommand): """Show information of all replication jobs or specific job. show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs} """ def __init__(self, *args, **kwargs): super(ShowReplica, self).__init__(*args, **kwargs) self.command = 'show replica' class ShowWWN(ShowCommand): """Show Fibre network. show wwn """ def __init__(self, *args, **kwargs): super(ShowWWN, self).__init__(*args, **kwargs) self.command = "show wwn" self.start_key = "CH" class ShowIQN(ShowCommand): """Show iSCSI initiator IQN which is set by create iqn. show iqn """ LIST_START_LINE = "List of initiator IQN(s):" def __init__(self, *args, **kwargs): super(ShowIQN, self).__init__(*args, **kwargs) self.command = "show iqn" self.default_type = "list" def detect_detail_start_index(self, content): for i in range(3, len(content)): if content[i].strip() == self.LIST_START_LINE: return i + 2 return -1 cinder-8.0.0/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py0000664000567000056710000020553312701406250030233 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Infortrend Common CLI. """ import math import time from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.infortrend.eonstor_ds_cli import cli_factory as cli from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) infortrend_esds_opts = [ cfg.StrOpt('infortrend_pools_name', default='', help='Infortrend raid pool name list. ' 'It is separated with comma.'), cfg.StrOpt('infortrend_cli_path', default='/opt/bin/Infortrend/raidcmd_ESDS10.jar', help='The Infortrend CLI absolute path. ' 'By default, it is at ' '/opt/bin/Infortrend/raidcmd_ESDS10.jar'), cfg.IntOpt('infortrend_cli_max_retries', default=5, help='Maximum retry time for cli. Default is 5.'), cfg.IntOpt('infortrend_cli_timeout', default=30, help='Default timeout for CLI copy operations in minutes. ' 'Support: migrate volume, create cloned volume and ' 'create volume from snapshot. ' 'By Default, it is 30 minutes.'), cfg.StrOpt('infortrend_slots_a_channels_id', default='0,1,2,3,4,5,6,7', help='Infortrend raid channel ID list on Slot A ' 'for OpenStack usage. It is separated with comma. ' 'By default, it is the channel 0~7.'), cfg.StrOpt('infortrend_slots_b_channels_id', default='0,1,2,3,4,5,6,7', help='Infortrend raid channel ID list on Slot B ' 'for OpenStack usage. It is separated with comma. ' 'By default, it is the channel 0~7.'), ] infortrend_esds_extra_opts = [ cfg.StrOpt('infortrend_provisioning', default='full', help='Let the volume use specific provisioning. ' 'By default, it is the full provisioning. ' 'The supported options are full or thin.'), cfg.StrOpt('infortrend_tiering', default='0', help='Let the volume use specific tiering level. ' 'By default, it is the level 0. ' 'The supported levels are 0,2,3,4.'), ] CONF = cfg.CONF CONF.register_opts(infortrend_esds_opts) CONF.register_opts(infortrend_esds_extra_opts) CLI_RC_FILTER = { 'CreatePartition': {'error': _('Failed to create partition.')}, 'DeletePartition': {'error': _('Failed to delete partition.')}, 'SetPartition': {'error': _('Failed to set partition.')}, 'CreateMap': { 'warning': {20: _LW('The MCS Channel is grouped.')}, 'error': _('Failed to create map.'), }, 'DeleteMap': { 'warning': {11: _LW('No mapping.')}, 'error': _('Failed to delete map.'), }, 'CreateSnapshot': {'error': _('Failed to create snapshot.')}, 'DeleteSnapshot': {'error': _('Failed to delete snapshot.')}, 'CreateReplica': {'error': _('Failed to create replica.')}, 'DeleteReplica': {'error': _('Failed to delete replica.')}, 'CreateIQN': { 'warning': {20: _LW('IQN already existed.')}, 'error': _('Failed to create iqn.'), }, 'DeleteIQN': { 'warning': { 20: _LW('IQN has been used to create map.'), 11: _LW('No such host alias name.'), }, 'error': _('Failed to delete iqn.'), }, 'ShowLV': {'error': _('Failed to get lv info.')}, 'ShowPartition': {'error': _('Failed to get partition info.')}, 'ShowSnapshot': {'error': _('Failed to get snapshot info.')}, 'ShowDevice': {'error': _('Failed to get device info.')}, 'ShowChannel': {'error': _('Failed to get channel info.')}, 'ShowMap': {'error': _('Failed to get map info.')}, 'ShowNet': {'error': _('Failed to get network info.')}, 'ShowLicense': {'error': _('Failed to get license info.')}, 'ShowReplica': {'error': _('Failed to get replica info.')}, 'ShowWWN': {'error': _('Failed to get wwn info.')}, 'ShowIQN': {'error': _('Failed to get iqn info.')}, 'ExecuteCommand': {'error': _('Failed to execute common command.')}, } def log_func(func): def inner(self, *args, **kwargs): LOG.debug('Entering: %(method)s', {'method': func.__name__}) start = timeutils.utcnow() ret = func(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug( 'Leaving: %(method)s, ' 'Spent: %(time)s sec, ' 'Return: %(ret)s.', { 'method': func.__name__, 'time': timeutils.delta_seconds(start, end), 'ret': ret}) return ret return inner def mi_to_gi(mi_size): return mi_size * units.Mi / units.Gi def gi_to_mi(gi_size): return gi_size * units.Gi / units.Mi class InfortrendCommon(object): """The Infortrend's Common Command using CLI. Version history: 1.0.0 - Initial driver 1.0.1 - Support DS4000 """ VERSION = '1.0.1' constants = { 'ISCSI_PORT': 3260, 'MAX_LUN_MAP_PER_CHL': 128 } provisioning_values = ['thin', 'full'] tiering_values = ['0', '2', '3', '4'] def __init__(self, protocol, configuration=None): self.protocol = protocol self.configuration = configuration self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(infortrend_esds_opts) self.configuration.append_config_values(infortrend_esds_extra_opts) self.iscsi_multipath = self.configuration.use_multipath_for_image_xfer self.path = self.configuration.infortrend_cli_path self.password = self.configuration.san_password self.ip = self.configuration.san_ip self.cli_retry_time = self.configuration.infortrend_cli_max_retries self.cli_timeout = self.configuration.infortrend_cli_timeout * 60 self.iqn = 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s' self.unmanaged_prefix = 'cinder-unmanaged-%s' if self.ip == '': msg = _('san_ip is not set.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self.fc_lookup_service = fczm_utils.create_lookup_service() self._volume_stats = None self._model_type = 'R' self._replica_timeout = self.cli_timeout self.map_dict = { 'slot_a': {}, 'slot_b': {}, } self.map_dict_init = False self.target_dict = { 'slot_a': {}, 'slot_b': {}, } if self.protocol == 'iSCSI': self.mcs_dict = { 'slot_a': {}, 'slot_b': {}, } self._init_pool_list() self._init_channel_list() self.cli_conf = { 'path': self.path, 'password': self.password, 'ip': self.ip, 'cli_retry_time': int(self.cli_retry_time), } def _init_pool_list(self): pools_name = self.configuration.infortrend_pools_name if pools_name == '': msg = _('Pools name is not set.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) tmp_pool_list = pools_name.split(',') self.pool_list = [pool.strip() for pool in tmp_pool_list] def _init_channel_list(self): self.channel_list = { 'slot_a': [], 'slot_b': [], } tmp_channel_list = ( self.configuration.infortrend_slots_a_channels_id.split(',') ) self.channel_list['slot_a'] = ( [channel.strip() for channel in tmp_channel_list] ) tmp_channel_list = ( self.configuration.infortrend_slots_b_channels_id.split(',') ) self.channel_list['slot_b'] = ( [channel.strip() for channel in tmp_channel_list] ) def _execute_command(self, cli_type, *args, **kwargs): command = getattr(cli, cli_type) return command(self.cli_conf).execute(*args, **kwargs) def _execute(self, cli_type, *args, **kwargs): LOG.debug('Executing command type: %(type)s.', {'type': cli_type}) rc, out = self._execute_command(cli_type, *args, **kwargs) if rc != 0: if ('warning' in CLI_RC_FILTER[cli_type] and rc in CLI_RC_FILTER[cli_type]['warning']): LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc]) else: msg = CLI_RC_FILTER[cli_type]['error'] LOG.error(msg) raise exception.InfortrendCliException( err=msg, param=args, rc=rc, out=out) return rc, out @log_func def _init_map_info(self, multipath=False): if not self.map_dict_init: rc, channel_info = self._execute('ShowChannel') if 'BID' in channel_info[0]: self._model_type = 'R' else: self._model_type = 'G' self._set_channel_id(channel_info, 'slot_a', multipath) if multipath and self._model_type == 'R': self._set_channel_id(channel_info, 'slot_b', multipath) self.map_dict_init = True @log_func def _update_map_info(self, multipath=False): """Record the driver mapping information. map_dict = { 'slot_a': { '0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4 }, 'slot_b' : { '1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3 } } """ rc, map_info = self._execute('ShowMap') self._update_map_info_by_slot(map_info, 'slot_a') if multipath and self._model_type == 'R': self._update_map_info_by_slot(map_info, 'slot_b') return map_info @log_func def _update_map_info_by_slot(self, map_info, slot_key): for key, value in self.map_dict[slot_key].items(): self.map_dict[slot_key][key] = list( range(self.constants['MAX_LUN_MAP_PER_CHL'])) if len(map_info) > 0 and isinstance(map_info, list): for entry in map_info: ch = entry['Ch'] lun = entry['LUN'] if ch not in self.map_dict[slot_key].keys(): continue target_id = self.target_dict[slot_key][ch] if (entry['Target'] == target_id and int(lun) in self.map_dict[slot_key][ch]): self.map_dict[slot_key][ch].remove(int(lun)) def _check_initiator_has_lun_map(self, initiator_wwns, map_info): for initiator in initiator_wwns: for entry in map_info: if initiator.lower() == entry['Host-ID'].lower(): return True return False @log_func def _set_channel_id( self, channel_info, controller='slot_a', multipath=False): if self.protocol == 'iSCSI': check_channel_type = 'NETWORK' else: check_channel_type = 'FIBRE' for entry in channel_info: if entry['Type'] == check_channel_type: if entry['Ch'] in self.channel_list[controller]: self.map_dict[controller][entry['Ch']] = [] if self.protocol == 'iSCSI': self._update_mcs_dict( entry['Ch'], entry['MCS'], controller) self._update_target_dict(entry, controller) @log_func def _update_target_dict(self, channel, controller): """Record the target id for mapping. # R model target_dict = { 'slot_a': { '0': '0', '1': '0', }, 'slot_b': { '0': '1', '1': '1', }, } # G model target_dict = { 'slot_a': { '2': '32', '3': '112', } } """ if self._model_type == 'G': self.target_dict[controller][channel['Ch']] = channel['ID'] else: if controller == 'slot_a': self.target_dict[controller][channel['Ch']] = channel['AID'] else: self.target_dict[controller][channel['Ch']] = channel['BID'] def _update_mcs_dict(self, channel_id, mcs_id, controller): """Record the iSCSI MCS topology. # R model with mcs, but it not working with iSCSI multipath mcs_dict = { 'slot_a': { '0': ['0', '1'], '1': ['2'] }, 'slot_b': { '0': ['0', '1'], '1': ['2'] } } # G model with mcs mcs_dict = { 'slot_a': { '0': ['0', '1'], '1': ['2'] }, 'slot_b': {} } """ if mcs_id not in self.mcs_dict[controller]: self.mcs_dict[controller][mcs_id] = [] self.mcs_dict[controller][mcs_id].append(channel_id) def _check_tiers_setup(self): tiering = self.configuration.infortrend_tiering if tiering != '0': self._check_extraspec_value( tiering, self.tiering_values) tier_levels_list = list(range(int(tiering))) tier_levels_list = list(map(str, tier_levels_list)) rc, lv_info = self._execute('ShowLV', 'tier') for pool in self.pool_list: support_tier_levels = tier_levels_list[:] for entry in lv_info: if (entry['LV-Name'] == pool and entry['Tier'] in support_tier_levels): support_tier_levels.remove(entry['Tier']) if len(support_tier_levels) == 0: break if len(support_tier_levels) != 0: msg = _('Please create %(tier_levels)s ' 'tier in pool %(pool)s in advance!') % { 'tier_levels': support_tier_levels, 'pool': pool} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _check_pools_setup(self): pool_list = self.pool_list[:] rc, lv_info = self._execute('ShowLV') for lv in lv_info: if lv['Name'] in pool_list: pool_list.remove(lv['Name']) if len(pool_list) == 0: break if len(pool_list) != 0: msg = _('Please create %(pool_list)s pool in advance!') % { 'pool_list': pool_list} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def check_for_setup_error(self): self._check_pools_setup() self._check_tiers_setup() def create_volume(self, volume): """Create a Infortrend partition.""" volume_id = volume['id'].replace('-', '') self._create_partition_by_default(volume) part_id = self._get_part_id(volume_id) system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': part_id, } model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info(_LI('Create Volume %(volume_id)s completed.'), { 'volume_id': volume_id}) return model_update def _create_partition_by_default(self, volume): pool_id = self._get_target_pool_id(volume) self._create_partition_with_pool(volume, pool_id) def _create_partition_with_pool( self, volume, pool_id, extraspecs=None): volume_id = volume['id'].replace('-', '') volume_size = gi_to_mi(volume['size']) if extraspecs is None: extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) provisioning = self._get_extraspecs_value(extraspecs, 'provisioning') tiering = self._get_extraspecs_value(extraspecs, 'tiering') extraspecs_dict = {} cmd = '' if provisioning == 'thin': provisioning = int(volume_size * 0.2) extraspecs_dict['provisioning'] = provisioning extraspecs_dict['init'] = 'disable' else: self._check_extraspec_value( provisioning, self.provisioning_values) if tiering != '0': self._check_extraspec_value( tiering, self.tiering_values) tier_levels_list = list(range(int(tiering))) tier_levels_list = list(map(str, tier_levels_list)) self._check_tiering_existing(tier_levels_list, pool_id) extraspecs_dict['provisioning'] = 0 extraspecs_dict['init'] = 'disable' if extraspecs_dict: cmd = self._create_part_parameters_str(extraspecs_dict) commands = (pool_id, volume_id, 'size=%s' % int(volume_size), cmd) self._execute('CreatePartition', *commands) def _create_part_parameters_str(self, extraspecs_dict): parameters_list = [] parameters = { 'provisioning': 'min=%sMB', 'tiering': 'tier=%s', 'init': 'init=%s', } for extraspec in extraspecs_dict.keys(): value = parameters[extraspec] % (extraspecs_dict[extraspec]) parameters_list.append(value) cmd = ' '.join(parameters_list) return cmd def _check_tiering_existing(self, tier_levels, pool_id): rc, lv_info = self._execute('ShowLV', 'tier') for entry in lv_info: if entry['LV-ID'] == pool_id and entry['Tier'] in tier_levels: tier_levels.remove(entry['Tier']) if len(tier_levels) == 0: break if len(tier_levels) != 0: msg = _('Have not created %(tier_levels)s tier(s).') % { 'tier_levels': tier_levels} LOG.error(msg) raise exception.VolumeDriverException(message=msg) @log_func def _create_map_with_lun_filter( self, part_id, channel_id, lun_id, host, controller='slot_a'): host_filter = self._create_target_id_and_host_filter( controller, host) target_id = self.target_dict[controller][channel_id] commands = ( 'part', part_id, channel_id, target_id, lun_id, host_filter ) self._execute('CreateMap', *commands) @log_func def _create_map_with_mcs( self, part_id, channel_list, lun_id, host, controller='slot_a'): map_channel_id = None for channel_id in channel_list: host_filter = self._create_target_id_and_host_filter( controller, host) target_id = self.target_dict[controller][channel_id] commands = ( 'part', part_id, channel_id, target_id, lun_id, host_filter ) rc, out = self._execute('CreateMap', *commands) if rc == 0: map_channel_id = channel_id break if map_channel_id is None: msg = _('Failed to create map on mcs, no channel can map.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) return map_channel_id def _create_target_id_and_host_filter(self, controller, host): if self.protocol == 'iSCSI': host_filter = 'iqn=%s' % host else: host_filter = 'wwn=%s' % host return host_filter def _get_extraspecs_dict(self, volume_type_id): extraspecs = {} if volume_type_id: extraspecs = volume_types.get_volume_type_extra_specs( volume_type_id) return extraspecs def _get_extraspecs_value(self, extraspecs, key): value = None if key == 'provisioning': if (extraspecs and 'infortrend_provisioning' in extraspecs.keys()): value = extraspecs['infortrend_provisioning'].lower() else: value = self.configuration.infortrend_provisioning.lower() elif key == 'tiering': value = self.configuration.infortrend_tiering return value def _select_most_free_capacity_pool_id(self, lv_info): largest_free_capacity_gb = 0.0 dest_pool_id = None for lv in lv_info: if lv['Name'] in self.pool_list: available_space = float(lv['Available'].split(' ', 1)[0]) free_capacity_gb = round(mi_to_gi(available_space)) if free_capacity_gb > largest_free_capacity_gb: largest_free_capacity_gb = free_capacity_gb dest_pool_id = lv['ID'] return dest_pool_id def _get_target_pool_id(self, volume): extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) pool_id = None rc, lv_info = self._execute('ShowLV') if 'pool_name' in extraspecs.keys(): poolname = extraspecs['pool_name'] for entry in lv_info: if entry['Name'] == poolname: pool_id = entry['ID'] else: pool_id = self._select_most_free_capacity_pool_id(lv_info) if pool_id is None: msg = _('Failed to get pool id with volume %(volume_id)s.') % { 'volume_id': volume['id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pool_id def _get_system_id(self, system_ip): rc, device_info = self._execute('ShowDevice') for entry in device_info: if system_ip == entry['Connected-IP']: return str(int(entry['ID'], 16)) return @log_func def _get_lun_id(self, ch_id, controller='slot_a'): lun_id = -1 if len(self.map_dict[controller][ch_id]) > 0: lun_id = self.map_dict[controller][ch_id][0] self.map_dict[controller][ch_id].remove(lun_id) if lun_id == -1: msg = _('LUN number is out of bound ' 'on channel id: %(ch_id)s.') % {'ch_id': ch_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: return lun_id @log_func def _get_mapping_info(self, multipath): if self.iscsi_multipath or multipath: return self._get_mapping_info_with_mcs() else: return self._get_mapping_info_with_normal() def _get_mapping_info_with_mcs(self): """Get the minimun mapping channel id and multi lun id mapping info. # R model with mcs map_chl = { 'slot_a': ['0', '1'] } map_lun = ['0'] # G model with mcs map_chl = { 'slot_a': ['1', '2'] } map_lun = ['0'] :returns: minimun mapping channel id per slot and multi lun id """ map_chl = { 'slot_a': [] } min_lun_num = 0 map_mcs_group = None for mcs in self.mcs_dict['slot_a']: if len(self.mcs_dict['slot_a'][mcs]) > 1: if min_lun_num < self._get_mcs_channel_lun_map_num(mcs): min_lun_num = self._get_mcs_channel_lun_map_num(mcs) map_mcs_group = mcs if map_mcs_group is None: msg = _('Raid did not have MCS Channel.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) map_chl['slot_a'] = self.mcs_dict['slot_a'][map_mcs_group] map_lun = self._get_mcs_channel_lun_map(map_chl['slot_a']) return map_chl, map_lun, map_mcs_group def _get_mcs_channel_lun_map_num(self, mcs_id): lun_num = 0 for channel in self.mcs_dict['slot_a'][mcs_id]: lun_num += len(self.map_dict['slot_a'][channel]) return lun_num def _get_mcs_channel_lun_map(self, channel_list): """Find the common lun id in mcs channel.""" map_lun = [] for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): check_map = True for channel_id in channel_list: if lun_id not in self.map_dict['slot_a'][channel_id]: check_map = False if check_map: map_lun.append(str(lun_id)) break return map_lun @log_func def _get_mapping_info_with_normal(self): """Get the minimun mapping channel id and lun id mapping info. # G model and R model map_chl = { 'slot_a': ['1'] } map_lun = ['0'] :returns: minimun mapping channel id per slot and lun id """ map_chl = { 'slot_a': [] } map_lun = [] ret_chl = self._get_minimun_mapping_channel_id('slot_a') lun_id = self._get_lun_id(ret_chl, 'slot_a') mcs_id = self._get_mcs_id_by_channel_id(ret_chl) map_chl['slot_a'].append(ret_chl) map_lun.append(str(lun_id)) return map_chl, map_lun, mcs_id @log_func def _get_minimun_mapping_channel_id(self, controller): empty_lun_num = 0 min_map_chl = -1 # Sort items to get a reliable behaviour. Dictionary items # are iterated in a random order because of hash randomization. for key, value in sorted(self.map_dict[controller].items()): if empty_lun_num < len(value): min_map_chl = key empty_lun_num = len(value) if int(min_map_chl) < 0: msg = _('LUN map overflow on every channel.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: return min_map_chl def _get_common_lun_map_id(self, wwpn_channel_info): map_lun = None for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): lun_id_exist = False for slot_name in ['slot_a', 'slot_b']: for wwpn in wwpn_channel_info: channel_id = wwpn_channel_info[wwpn]['channel'] if channel_id not in self.map_dict[slot_name]: continue elif lun_id not in self.map_dict[slot_name][channel_id]: lun_id_exist = True if not lun_id_exist: map_lun = str(lun_id) break return map_lun def _get_mcs_id_by_channel_id(self, channel_id): mcs_id = None for mcs in self.mcs_dict['slot_a']: if channel_id in self.mcs_dict['slot_a'][mcs]: mcs_id = mcs break if mcs_id is None: msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % { 'channel_id': channel_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return mcs_id def _concat_provider_location(self, model_dict): return '@'.join([i + '^' + str(model_dict[i]) for i in model_dict]) def delete_volume(self, volume): """Delete the specific volume.""" volume_id = volume['id'].replace('-', '') has_pair = False have_map = False part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') (check_exist, have_map, part_id) = ( self._check_volume_exist(volume_id, part_id) ) if not check_exist: LOG.warning(_LW('Volume %(volume_id)s already deleted.'), { 'volume_id': volume_id}) return rc, replica_list = self._execute('ShowReplica', '-l') for entry in replica_list: if (volume_id == entry['Source-Name'] and part_id == entry['Source']): if not self._check_replica_completed(entry): has_pair = True LOG.warning(_LW('Volume still %(status)s ' 'Cannot delete volume.'), { 'status': entry['Status']}) else: have_map = entry['Source-Mapped'] == 'Yes' self._execute('DeleteReplica', entry['Pair-ID'], '-y') elif (volume_id == entry['Target-Name'] and part_id == entry['Target']): have_map = entry['Target-Mapped'] == 'Yes' self._execute('DeleteReplica', entry['Pair-ID'], '-y') if not has_pair: rc, snapshot_list = self._execute( 'ShowSnapshot', 'part=%s' % part_id) for snapshot in snapshot_list: si_has_pair = self._delete_pair_with_snapshot( snapshot['SI-ID'], replica_list) if si_has_pair: msg = _('Failed to delete SI ' 'for volume_id: %(volume_id)s ' 'because it has pair.') % { 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._execute('DeleteSnapshot', snapshot['SI-ID'], '-y') rc, map_info = self._execute('ShowMap', 'part=%s' % part_id) if have_map or len(map_info) > 0: self._execute('DeleteMap', 'part', part_id, '-y') self._execute('DeletePartition', part_id, '-y') LOG.info(_LI('Delete Volume %(volume_id)s completed.'), { 'volume_id': volume_id}) else: msg = _('Failed to delete volume ' 'for volume_id: %(volume_id)s ' 'because it has pair.') % { 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _check_replica_completed(self, replica): if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or (replica['Type'] == 'Mirror' and replica['Status'] == 'Mirror')): return True return False def _check_volume_exist(self, volume_id, part_id): check_exist = False have_map = False result_part_id = part_id rc, part_list = self._execute('ShowPartition', '-l') for entry in part_list: if entry['Name'] == volume_id: check_exist = True if part_id is None: result_part_id = entry['ID'] if entry['Mapped'] == 'true': have_map = True if check_exist: return (check_exist, have_map, result_part_id) else: return (False, False, None) def create_cloned_volume(self, volume, src_vref): """Create a clone of the volume by volume copy.""" volume_id = volume['id'].replace('-', '') # Step1 create a snapshot of the volume src_part_id = self._extract_specific_provider_location( src_vref['provider_location'], 'partition_id') if src_part_id is None: src_part_id = self._get_part_id(volume_id) model_update = self._create_volume_from_volume(volume, src_part_id) LOG.info(_LI('Create Cloned Volume %(volume_id)s completed.'), { 'volume_id': volume['id']}) return model_update def _create_volume_from_volume(self, dst_volume, src_part_id): # create the target volume for volume copy dst_volume_id = dst_volume['id'].replace('-', '') self._create_partition_by_default(dst_volume) dst_part_id = self._get_part_id(dst_volume_id) # prepare return value system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': dst_part_id, } model_info = self._concat_provider_location(model_dict) model_update = {"provider_location": model_info} # clone the volume from the origin partition commands = ( 'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) return model_update def _extract_specific_provider_location(self, provider_location, key): provider_location_dict = self._extract_all_provider_location( provider_location) result = provider_location_dict.get(key, None) return result @log_func def _extract_all_provider_location(self, provider_location): provider_location_dict = {} dict_entry = provider_location.split("@") for entry in dict_entry: key, value = entry.split('^', 1) if value == 'None': value = None provider_location_dict[key] = value return provider_location_dict def create_export(self, context, volume): model_update = volume['provider_location'] LOG.info(_LI('Create export done from Volume %(volume_id)s.'), { 'volume_id': volume['id']}) return {'provider_location': model_update} def get_volume_stats(self, refresh=False): """Get volume status. If refresh is True, update the status first. """ if self._volume_stats is None or refresh: self._update_volume_stats() LOG.info(_LI( 'Successfully update volume stats. ' 'backend: %(volume_backend_name)s, ' 'vendor: %(vendor_name)s, ' 'driver version: %(driver_version)s, ' 'storage protocol: %(storage_protocol)s.'), self._volume_stats) return self._volume_stats def _update_volume_stats(self): backend_name = self.configuration.safe_get('volume_backend_name') data = { 'volume_backend_name': backend_name, 'vendor_name': 'Infortrend', 'driver_version': self.VERSION, 'storage_protocol': self.protocol, 'pools': self._update_pools_stats(), } self._volume_stats = data def _update_pools_stats(self): enable_specs_dict = self._get_enable_specs_on_array() if 'Thin Provisioning' in enable_specs_dict.keys(): provisioning = 'thin' provisioning_support = True else: provisioning = 'full' provisioning_support = False rc, part_list = self._execute('ShowPartition', '-l') rc, pools_info = self._execute('ShowLV') pools = [] for pool in pools_info: if pool['Name'] in self.pool_list: total_space = float(pool['Size'].split(' ', 1)[0]) available_space = float(pool['Available'].split(' ', 1)[0]) total_capacity_gb = round(mi_to_gi(total_space), 2) free_capacity_gb = round(mi_to_gi(available_space), 2) provisioning_factor = self.configuration.safe_get( 'max_over_subscription_ratio') provisioned_space = self._get_provisioned_space( pool['ID'], part_list) provisioned_capacity_gb = round(mi_to_gi(provisioned_space), 2) new_pool = { 'pool_name': pool['Name'], 'pool_id': pool['ID'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'reserved_percentage': 0, 'QoS_support': False, 'provisioned_capacity_gb': provisioned_capacity_gb, 'max_over_subscription_ratio': provisioning_factor, 'thin_provisioning_support': provisioning_support, 'thick_provisioning_support': True, 'infortrend_provisioning': provisioning, } pools.append(new_pool) return pools def _get_provisioned_space(self, pool_id, part_list): provisioning_space = 0 for entry in part_list: if entry['LV-ID'] == pool_id: provisioning_space += int(entry['Size']) return provisioning_space def create_snapshot(self, snapshot): """Creates a snapshot.""" snapshot_id = snapshot['id'].replace('-', '') volume_id = snapshot['volume_id'].replace('-', '') LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.', {'snapshot': snapshot_id, 'volume': volume_id}) model_update = {} part_id = self._get_part_id(volume_id) if part_id is None: msg = _('Failed to get Partition ID for volume %(volume_id)s.') % { 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @lockutils.synchronized( 'snapshot-' + part_id, 'infortrend-', True) def do_create_snapshot(): self._execute('CreateSnapshot', 'part', part_id) rc, tmp_snapshot_list = self._execute( 'ShowSnapshot', 'part=%s' % part_id) return tmp_snapshot_list snapshot_list = do_create_snapshot() LOG.info(_LI( 'Create success. ' 'Snapshot: %(snapshot)s, ' 'Snapshot ID in raid: %(raid_snapshot_id)s, ' 'volume: %(volume)s.'), { 'snapshot': snapshot_id, 'raid_snapshot_id': snapshot_list[-1]['SI-ID'], 'volume': volume_id}) model_update['provider_location'] = snapshot_list[-1]['SI-ID'] return model_update def delete_snapshot(self, snapshot): """Delete the snapshot.""" snapshot_id = snapshot['id'].replace('-', '') volume_id = snapshot['volume_id'].replace('-', '') LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.', {'snapshot': snapshot_id, 'volume': volume_id}) raid_snapshot_id = self._get_raid_snapshot_id(snapshot) if raid_snapshot_id: rc, replica_list = self._execute('ShowReplica', '-l') has_pair = self._delete_pair_with_snapshot( raid_snapshot_id, replica_list) if not has_pair: self._execute('DeleteSnapshot', raid_snapshot_id, '-y') LOG.info(_LI('Delete Snapshot %(snapshot_id)s completed.'), { 'snapshot_id': snapshot_id}) else: msg = _('Failed to delete snapshot ' 'for snapshot_id: %s ' 'because it has pair.') % snapshot_id LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: msg = _( 'Failed to get Raid Snapshot ID ' 'from Snapshot %(snapshot_id)s.') % { 'snapshot_id': snapshot_id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_raid_snapshot_id(self, snapshot): if 'provider_location' not in snapshot: LOG.warning(_LW( 'Failed to get Raid Snapshot ID and ' 'did not store in snapshot.')) return return snapshot['provider_location'] def _delete_pair_with_snapshot(self, snapshot_id, replica_list): has_pair = False for entry in replica_list: if entry['Source'] == snapshot_id: if not self._check_replica_completed(entry): has_pair = True LOG.warning(_LW( 'Snapshot still %(status)s Cannot delete snapshot.'), { 'status': entry['Status']}) else: self._execute('DeleteReplica', entry['Pair-ID'], '-y') return has_pair def _get_part_id(self, volume_id, pool_id=None, part_list=None): if part_list is None: rc, part_list = self._execute('ShowPartition') for entry in part_list: if pool_id is None: if entry['Name'] == volume_id: return entry['ID'] else: if entry['Name'] == volume_id and entry['LV-ID'] == pool_id: return entry['ID'] return def create_volume_from_snapshot(self, volume, snapshot): raid_snapshot_id = self._get_raid_snapshot_id(snapshot) if raid_snapshot_id is None: msg = _('Failed to get Raid Snapshot ID ' 'from snapshot: %(snapshot_id)s.') % { 'snapshot_id': snapshot['id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) src_part_id = self._check_snapshot_filled_block(raid_snapshot_id) model_update = self._create_volume_from_snapshot_id( volume, raid_snapshot_id, src_part_id) LOG.info(_LI( 'Create Volume %(volume_id)s from ' 'snapshot %(snapshot_id)s completed.'), { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return model_update def _check_snapshot_filled_block(self, raid_snapshot_id): rc, snapshot_list = self._execute( 'ShowSnapshot', 'si=%s' % raid_snapshot_id, '-l') if snapshot_list and snapshot_list[0]['Total-filled-block'] == '0': return snapshot_list[0]['Partition-ID'] return def _create_volume_from_snapshot_id( self, dst_volume, raid_snapshot_id, src_part_id): # create the target volume for volume copy dst_volume_id = dst_volume['id'].replace('-', '') self._create_partition_by_default(dst_volume) dst_part_id = self._get_part_id(dst_volume_id) # prepare return value system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': dst_part_id, } model_info = self._concat_provider_location(model_dict) model_update = {"provider_location": model_info} if src_part_id: # clone the volume from the origin partition commands = ( 'Cinder-Snapshot', 'part', src_part_id, 'part', dst_part_id ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) # clone the volume from the snapshot commands = ( 'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) return model_update @lockutils.synchronized('connection', 'infortrend-', True) def initialize_connection(self, volume, connector): if self.protocol == 'iSCSI': multipath = connector.get('multipath', False) return self._initialize_connection_iscsi( volume, connector, multipath) elif self.protocol == 'FC': return self._initialize_connection_fc( volume, connector) else: msg = _('Unknown protocol: %(protocol)s.') % { 'protocol': self.protocol} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _initialize_connection_fc(self, volume, connector): self._init_map_info(True) self._update_map_info(True) map_lun, target_wwpns, initiator_target_map = ( self._do_fc_connection(volume, connector) ) properties = self._generate_fc_connection_properties( map_lun, target_wwpns, initiator_target_map) LOG.info(_LI('Successfully initialized connection. ' 'target_wwn: %(target_wwn)s, ' 'initiator_target_map: %(initiator_target_map)s, ' 'lun: %(target_lun)s.'), properties['data']) return properties def _do_fc_connection(self, volume, connector): volume_id = volume['id'].replace('-', '') target_wwpns = [] partition_data = self._extract_all_provider_location( volume['provider_location']) part_id = partition_data['partition_id'] if part_id is None: part_id = self._get_part_id(volume_id) wwpn_list, wwpn_channel_info = self._get_wwpn_list() initiator_target_map, target_wwpns = self._build_initiator_target_map( connector, wwpn_list) map_lun = self._get_common_lun_map_id(wwpn_channel_info) # Sort items to get a reliable behaviour. Dictionary items # are iterated in a random order because of hash randomization. for initiator_wwpn in sorted(initiator_target_map): for target_wwpn in initiator_target_map[initiator_wwpn]: channel_id = wwpn_channel_info[target_wwpn.upper()]['channel'] controller = wwpn_channel_info[target_wwpn.upper()]['slot'] self._create_map_with_lun_filter( part_id, channel_id, map_lun, initiator_wwpn, controller=controller) return map_lun, target_wwpns, initiator_target_map def _build_initiator_target_map(self, connector, all_target_wwpns): initiator_target_map = {} target_wwpns = [] if self.fc_lookup_service: lookup_map = ( self.fc_lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwpns) ) for fabric_name in lookup_map: fabric = lookup_map[fabric_name] target_wwpns.extend(fabric['target_port_wwn_list']) for initiator in fabric['initiator_port_wwn_list']: initiator_target_map[initiator] = ( fabric['target_port_wwn_list'] ) else: initiator_wwns = connector['wwpns'] target_wwpns = all_target_wwpns for initiator in initiator_wwns: initiator_target_map[initiator] = all_target_wwpns return initiator_target_map, target_wwpns def _generate_fc_connection_properties( self, lun_id, target_wwpns, initiator_target_map): return { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map, }, } @log_func def _initialize_connection_iscsi(self, volume, connector, multipath): self._init_map_info(multipath) self._update_map_info(multipath) volume_id = volume['id'].replace('-', '') partition_data = self._extract_all_provider_location( volume['provider_location']) # system_id, part_id part_id = partition_data['partition_id'] if part_id is None: part_id = self._get_part_id(volume_id) self._set_host_iqn(connector['initiator']) map_chl, map_lun, mcs_id = self._get_mapping_info(multipath) lun_id = map_lun[0] if self.iscsi_multipath or multipath: channel_id = self._create_map_with_mcs( part_id, map_chl['slot_a'], lun_id, connector['initiator']) else: channel_id = map_chl['slot_a'][0] self._create_map_with_lun_filter( part_id, channel_id, lun_id, connector['initiator']) rc, net_list = self._execute('ShowNet') ip = self._get_ip_by_channel(channel_id, net_list) if ip is None: msg = _( 'Failed to get ip on Channel %(channel_id)s ' 'with volume: %(volume_id)s.') % { 'channel_id': channel_id, 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) partition_data = self._combine_channel_lun_target_id( partition_data, mcs_id, lun_id, channel_id) property_value = [{ 'lun_id': partition_data['lun_id'], 'iqn': self._generate_iqn(partition_data), 'ip': ip, 'port': self.constants['ISCSI_PORT'], }] properties = self._generate_iscsi_connection_properties( property_value, volume) LOG.info(_LI('Successfully initialized connection ' 'with volume: %(volume_id)s.'), properties['data']) return properties @log_func def _combine_channel_lun_target_id( self, partition_data, mcs_id, lun_id, channel_id): target_id = self.target_dict['slot_a'][channel_id] partition_data['mcs_id'] = mcs_id partition_data['lun_id'] = lun_id partition_data['target_id'] = target_id partition_data['slot_id'] = 1 return partition_data def _set_host_iqn(self, host_iqn): rc, iqn_list = self._execute('ShowIQN') check_iqn_exist = False for entry in iqn_list: if entry['IQN'] == host_iqn: check_iqn_exist = True if not check_iqn_exist: self._execute( 'CreateIQN', host_iqn, self._truncate_host_name(host_iqn)) def _truncate_host_name(self, iqn): if len(iqn) > 16: return iqn[-16:] else: return iqn @log_func def _generate_iqn(self, partition_data): return self.iqn % ( partition_data['system_id'], partition_data['mcs_id'], partition_data['target_id'], partition_data['slot_id']) @log_func def _get_ip_by_channel( self, channel_id, net_list, controller='slot_a'): slot_name = 'slotA' if controller == 'slot_a' else 'slotB' for entry in net_list: if entry['ID'] == channel_id and entry['Slot'] == slot_name: return entry['IPv4'] return def _get_wwpn_list(self): rc, wwn_list = self._execute('ShowWWN') wwpn_list = [] wwpn_channel_info = {} for entry in wwn_list: channel_id = entry['CH'] if 'BID' in entry['ID']: slot_name = 'slot_b' else: slot_name = 'slot_a' if channel_id in self.map_dict[slot_name]: wwpn_list.append(entry['WWPN']) wwpn_channel_info[entry['WWPN']] = { 'channel': channel_id, 'slot': slot_name, } return wwpn_list, wwpn_channel_info @log_func def _generate_iscsi_connection_properties( self, property_value, volume): properties = {} discovery_exist = False specific_property = property_value[0] discovery_ip = '%s:%s' % ( specific_property['ip'], specific_property['port']) discovery_iqn = specific_property['iqn'] if self._do_iscsi_discovery(discovery_iqn, discovery_ip): properties['target_portal'] = discovery_ip properties['target_iqn'] = discovery_iqn properties['target_lun'] = int(specific_property['lun_id']) discovery_exist = True if not discovery_exist: msg = _( 'Could not find iSCSI target ' 'for volume: %(volume_id)s.') % { 'volume_id': volume['id']} LOG.error(msg) raise exception.VolumeDriverException(message=msg) properties['target_discovered'] = discovery_exist properties['volume_id'] = volume['id'] if 'provider_auth' in volume: auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } @log_func def _do_iscsi_discovery(self, target_iqn, target_ip): rc, out = self._execute( 'ExecuteCommand', 'iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', target_ip, run_as_root=True) if rc != 0: LOG.error(_LE( 'Can not discovery in %(target_ip)s with %(target_iqn)s.'), { 'target_ip': target_ip, 'target_iqn': target_iqn}) return False else: for target in out.splitlines(): if target_iqn in target and target_ip in target: return True return False def extend_volume(self, volume, new_size): volume_id = volume['id'].replace('-', '') part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume_id) expand_size = new_size - volume['size'] if '.' in ('%s' % expand_size): expand_size = round(gi_to_mi(float(expand_size))) expand_command = 'size=%sMB' % expand_size else: expand_command = 'size=%sGB' % expand_size self._execute('SetPartition', 'expand', part_id, expand_command) LOG.info(_LI( 'Successfully extended volume %(volume_id)s to size %(size)s.'), { 'volume_id': volume['id'], 'size': new_size}) @lockutils.synchronized('connection', 'infortrend-', True) def terminate_connection(self, volume, connector): volume_id = volume['id'].replace('-', '') multipath = connector.get('multipath', False) conn_info = None part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume_id) self._execute('DeleteMap', 'part', part_id, '-y') if self.protocol == 'iSCSI': self._execute( 'DeleteIQN', self._truncate_host_name(connector['initiator'])) map_info = self._update_map_info(multipath) if self.protocol == 'FC' and self.fc_lookup_service: lun_map_exist = self._check_initiator_has_lun_map( connector['wwpns'], map_info) if not lun_map_exist: conn_info = {'driver_volume_type': 'fibre_channel', 'data': {}} wwpn_list, wwpn_channel_info = self._get_wwpn_list() init_target_map, target_wwpns = ( self._build_initiator_target_map(connector, wwpn_list) ) conn_info['data']['initiator_target_map'] = init_target_map LOG.info(_LI( 'Successfully terminated connection for volume: %(volume_id)s.'), { 'volume_id': volume['id']}) return conn_info def migrate_volume(self, volume, host, new_extraspecs=None): is_valid, dst_pool_id = ( self._is_valid_for_storage_assisted_migration(host) ) if not is_valid: return (False, None) model_dict = self._migrate_volume_with_pool( volume, dst_pool_id, new_extraspecs) model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info(_LI('Migrate Volume %(volume_id)s completed.'), { 'volume_id': volume['id']}) return (True, model_update) def _is_valid_for_storage_assisted_migration(self, host): if 'pool_id' not in host['capabilities']: LOG.warning(_LW('Failed to get target pool id.')) return (False, None) dst_pool_id = host['capabilities']['pool_id'] if dst_pool_id is None: return (False, None) return (True, dst_pool_id) def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None): volume_id = volume['id'].replace('-', '') # Get old partition data for delete map partition_data = self._extract_all_provider_location( volume['provider_location']) src_part_id = partition_data['partition_id'] if src_part_id is None: src_part_id = self._get_part_id(volume_id) # Create New Partition self._create_partition_with_pool(volume, dst_pool_id, extraspecs) dst_part_id = self._get_part_id( volume_id, pool_id=dst_pool_id) if dst_part_id is None: msg = _('Failed to get new part id in new pool: %(pool_id)s.') % { 'pool_id': dst_pool_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Volume Mirror from old partition into new partition commands = ( 'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id, 'type=mirror' ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) self._execute('DeleteMap', 'part', src_part_id, '-y') self._execute('DeletePartition', src_part_id, '-y') model_dict = { 'system_id': partition_data['system_id'], 'partition_id': dst_part_id, } return model_dict def _wait_replica_complete(self, part_id): start_time = int(time.time()) timeout = self._replica_timeout def _inner(): check_done = False try: rc, replica_list = self._execute('ShowReplica', '-l') for entry in replica_list: if (entry['Target'] == part_id and self._check_replica_completed(entry)): check_done = True self._execute('DeleteReplica', entry['Pair-ID'], '-y') except Exception: check_done = False LOG.exception(_LE('Cannot detect replica status.')) if check_done: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = _('Wait replica complete timeout.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=10).wait() def _check_extraspec_value(self, extraspec, validvalues): if not extraspec: LOG.debug("The given extraspec is None.") elif extraspec not in validvalues: msg = _("The extraspec: %(extraspec)s is not valid.") % { 'extraspec': extraspec} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _get_enable_specs_on_array(self): enable_specs = {} rc, license_list = self._execute('ShowLicense') for key, value in license_list.items(): if value['Support']: enable_specs[key] = value return enable_specs def manage_existing_get_size(self, volume, ref): """Return size of volume to be managed by manage_existing.""" volume_name = self._get_existing_volume_ref_name(ref) part_entry = self._get_latter_volume_dict(volume_name) if part_entry is None: msg = _('Specified logical volume does not exist.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) rc, map_info = self._execute('ShowMap', 'part=%s' % part_entry['ID']) if len(map_info) != 0: msg = _('The specified volume is mapped to a host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return int(math.ceil(mi_to_gi(float(part_entry['Size'])))) def manage_existing(self, volume, ref): volume_name = self._get_existing_volume_ref_name(ref) volume_id = volume['id'].replace('-', '') part_entry = self._get_latter_volume_dict(volume_name) if part_entry is None: msg = _('Specified logical volume does not exist.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) self._execute('SetPartition', part_entry['ID'], 'name=%s' % volume_id) model_dict = { 'system_id': self._get_system_id(self.ip), 'partition_id': part_entry['ID'], } model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info(_LI('Rename Volume %(volume_id)s completed.'), { 'volume_id': volume['id']}) return model_update def _get_existing_volume_ref_name(self, ref): volume_name = None if 'source-name' in ref: volume_name = ref['source-name'] elif 'source-id' in ref: volume_name = self._get_unmanaged_volume_name( ref['source-id'].replace('-', '')) else: msg = _('Reference must contain source-id or source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) return volume_name def unmanage(self, volume): volume_id = volume['id'].replace('-', '') part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume_id) new_vol_name = self._get_unmanaged_volume_name(volume_id) self._execute('SetPartition', part_id, 'name=%s' % new_vol_name) LOG.info(_LI('Unmanage volume %(volume_id)s completed.'), { 'volume_id': volume_id}) def _get_unmanaged_volume_name(self, volume_id): return self.unmanaged_prefix % volume_id[:-17] def _get_specific_volume_dict(self, volume_id): ref_dict = {} rc, part_list = self._execute('ShowPartition') for entry in part_list: if entry['Name'] == volume_id: ref_dict = entry break return ref_dict def _get_latter_volume_dict(self, volume_name): rc, part_list = self._execute('ShowPartition', '-l') latest_timestamps = 0 ref_dict = None for entry in part_list: if entry['Name'] == volume_name: timestamps = self._get_part_timestamps( entry['Creation-time']) if timestamps > latest_timestamps: ref_dict = entry latest_timestamps = timestamps return ref_dict def _get_part_timestamps(self, time_string): """Transform 'Sat, Jan 11 22:18:40 2020' into timestamps with sec.""" first, value = time_string.split(',') timestamps = time.mktime( time.strptime(value, " %b %d %H:%M:%S %Y")) return timestamps def _check_volume_attachment(self, volume): if not volume['volume_attachment']: return False return True def _check_volume_has_snapshot(self, volume): part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id) if len(snapshot_list) > 0: return True return False def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" if volume['host'] != host['host']: if self._check_volume_attachment(volume): LOG.warning(_LW( 'Volume %(volume_id)s cannot be retyped ' 'during attachment.'), { 'volume_id': volume['id']}) return False if self._check_volume_has_snapshot(volume): LOG.warning(_LW( 'Volume %(volume_id)s cannot be retyped ' 'because it has snapshot.'), { 'volume_id': volume['id']}) return False new_extraspecs = new_type['extra_specs'] rc, model_update = self.migrate_volume( volume, host, new_extraspecs) if rc: LOG.info(_LI( 'Retype Volume %(volume_id)s is done ' 'and migrated to pool %(pool_id)s.'), { 'volume_id': volume['id'], 'pool_id': host['capabilities']['pool_id']}) return (rc, model_update) else: if ('infortrend_provisioning' in diff['extra_specs'] and (diff['extra_specs']['infortrend_provisioning'][0] != diff['extra_specs']['infortrend_provisioning'][1])): LOG.warning(_LW( 'The provisioning: %(provisioning)s ' 'is not valid.'), { 'provisioning': diff['extra_specs']['infortrend_provisioning'][1]}) return False LOG.info(_LI('Retype Volume %(volume_id)s is completed.'), { 'volume_id': volume['id']}) return True def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume.""" src_volume_id = volume['id'].replace('-', '') dst_volume_id = new_volume['id'].replace('-', '') part_id = self._extract_specific_provider_location( new_volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(dst_volume_id) LOG.debug( 'Rename partition %(part_id)s ' 'into new volume %(new_volume)s.', { 'part_id': part_id, 'new_volume': dst_volume_id}) try: self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) except exception.InfortrendCliException: LOG.exception(_LE('Failed to rename %(new_volume)s into ' '%(volume)s.'), {'new_volume': new_volume['id'], 'volume': volume['id']}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), { 'new_volume': new_volume['id']}) model_update = { '_name_id': None, 'provider_location': new_volume['provider_location'], } return model_update cinder-8.0.0/cinder/volume/drivers/infortrend/infortrend_fc_cli.py0000664000567000056710000002553512701406250026561 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fibre Channel Driver for Infortrend Eonstor based on CLI. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class InfortrendCLIFCDriver(driver.FibreChannelDriver): """Infortrend Fibre Channel Driver for Eonstor DS using CLI. Version history: 1.0.0 - Initial driver 1.0.1 - Support DS4000 """ def __init__(self, *args, **kwargs): super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( 'FC', configuration=self.configuration) self.VERSION = self.common.VERSION def check_for_setup_error(self): LOG.debug('check_for_setup_error start') self.common.check_for_setup_error() def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug('create_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug( 'create_volume_from_snapshot volume id=%(volume_id)s ' 'snapshot id=%(snapshot_id)s', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return self.common.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug( 'create_cloned_volume volume id=%(volume_id)s ' 'src_vref provider_location=%(provider_location)s', { 'volume_id': volume['id'], 'provider_location': src_vref['provider_location']}) return self.common.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" LOG.debug( 'extend_volume volume id=%(volume_id)s new size=%(size)s', { 'volume_id': volume['id'], 'size': new_size}) self.common.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" LOG.debug('delete_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { 'volume_id': volume['id'], 'host': host['host']}) return self.common.migrate_volume(volume, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug( 'create_snapshot snapshot id=%(snapshot_id)s ' 'volume id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug( 'delete_snapshot snapshot id=%(snapshot_id)s ' 'volume id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug( 'create_export volume provider_location=%(provider_location)s', { 'provider_location': volume['provider_location']}) return self.common.create_export(context, volume) def remove_export(self, context, volume): """Removes an export for a volume.""" pass @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Initializes the connection and returns connection information. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. The initiator_target_map is a map that represents the remote wwn(s) and a list of wwns which are visible to the remote wwn(s). Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'initiator_target_map': { '1122334455667788': ['1234567890123'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'initiator_target_map': { '1122334455667788': ['1234567890123', '0987654321321'] } } } """ LOG.debug( 'initialize_connection volume id=%(volume_id)s ' 'connector initiator=%(initiator)s', { 'volume_id': volume['id'], 'initiator': connector['initiator']}) return self.common.initialize_connection(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ LOG.debug('get_volume_stats refresh=%(refresh)s', { 'refresh': refresh}) return self.common.get_volume_stats(refresh) def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. existing_ref:{ 'id':lun_id } """ LOG.debug( 'manage_existing volume id=%(volume_id)s ' 'existing_ref source id=%(source_id)s', { 'volume_id': volume['id'], 'source_id': existing_ref['source-id']}) return self.common.manage_existing(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ LOG.debug('unmanage volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.unmanage(volume) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ LOG.debug( 'manage_existing_get_size volume id=%(volume_id)s ' 'existing_ref source id=%(source_id)s', { 'volume_id': volume['id'], 'source_id': existing_ref['source-id']}) return self.common.manage_existing_get_size(volume, existing_ref) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug( 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) return self.common.update_migrated_volume(ctxt, volume, new_volume, original_volume_status) cinder-8.0.0/cinder/volume/drivers/violin/0000775000567000056710000000000012701406543021657 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/violin/__init__.py0000664000567000056710000000000012701406250023751 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/violin/v7000_fcp.py0000664000567000056710000003255012701406250023635 0ustar jenkinsjenkins00000000000000# Copyright 2015 Violin Memory, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Violin 7000 Series All-Flash Array Volume Driver Provides fibre channel specific LUN services for V7000 series flash arrays. This driver requires Concerto v7.0.0 or newer software on the array. You will need to install the Violin Memory REST client library: sudo pip install vmemclient Set the following in the cinder.conf file to enable the VMEM V7000 Fibre Channel Driver along with the required flags: volume_driver=cinder.volume.drivers.violin.v7000_fcp.V7000FCDriver NOTE: this driver file requires the use of synchronization points for certain types of backend operations, and as a result may not work properly in an active-active HA configuration. See OpenStack Cinder driver documentation for more information. """ from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE, _LI from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.violin import v7000_common from cinder.zonemanager import utils as fczm_utils import socket LOG = logging.getLogger(__name__) class V7000FCPDriver(driver.FibreChannelDriver): """Executes commands relating to fibre channel based Violin Memory arrays. Version history: 1.0 - Initial driver """ VERSION = '1.0' def __init__(self, *args, **kwargs): super(V7000FCPDriver, self).__init__(*args, **kwargs) self.gateway_fc_wwns = [] self.stats = {} self.configuration.append_config_values(v7000_common.violin_opts) self.configuration.append_config_values(san.san_opts) self.common = v7000_common.V7000Common(self.configuration) self.lookup_service = fczm_utils.create_lookup_service() LOG.info(_LI("Initialized driver %(name)s version: %(vers)s"), {'name': self.__class__.__name__, 'vers': self.VERSION}) def do_setup(self, context): """Any initialization the driver does while starting.""" super(V7000FCPDriver, self).do_setup(context) self.common.do_setup(context) self.gateway_fc_wwns = self._get_active_fc_targets() # Register the client with the storage array fc_version = self.VERSION + "-FCP" self.common.vmem_mg.utility.set_managed_by_openstack_version( fc_version) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self.common.check_for_setup_error() if len(self.gateway_fc_wwns) == 0: raise exception.ViolinInvalidBackendConfig( reason=_('No FCP targets found')) def create_volume(self, volume): """Creates a volume.""" self.common._create_lun(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self.common._create_volume_from_snapshot(snapshot, volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" self.common._create_lun_from_lun(src_vref, volume) def delete_volume(self, volume): """Deletes a volume.""" self.common._delete_lun(volume) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" self.common._extend_lun(volume, new_size) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.common._create_lun_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.common._delete_lun_snapshot(snapshot) def ensure_export(self, context, volume): """Synchronously checks and re-exports volumes at cinder start time.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" LOG.debug("Initialize_connection: initiator - %(initiator)s host - " "%(host)s wwpns - %(wwpns)s", {'initiator': connector['initiator'], 'host': connector['host'], 'wwpns': connector['wwpns']}) self.common.vmem_mg.client.create_client( name=connector['host'], proto='FC', fc_wwns=connector['wwpns']) lun_id = self._export_lun(volume, connector) target_wwns, init_targ_map = self._build_initiator_target_map( connector) properties = {} properties['target_discovered'] = True properties['target_wwn'] = target_wwns properties['target_lun'] = lun_id properties['initiator_target_map'] = init_targ_map LOG.debug("Return FC data for zone addition: %(properties)s.", {'properties': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties} @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Terminates the connection (target<-->initiator).""" self._unexport_lun(volume, connector) properties = {} if not self._is_initiator_connected_to_array(connector): target_wwns, init_targ_map = self._build_initiator_target_map( connector) properties['target_wwn'] = target_wwns properties['initiator_target_map'] = init_targ_map LOG.debug("Return FC data for zone deletion: %(properties)s.", {'properties': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties} def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, update the stats first. """ if refresh or not self.stats: self._update_volume_stats() return self.stats @utils.synchronized('vmem-export') def _export_lun(self, volume, connector=None): """Generates the export configuration for the given volume. :param volume: volume object provided by the Manager :param connector: connector object provided by the Manager :returns: the LUN ID assigned by the backend """ lun_id = '' v = self.common.vmem_mg if not connector: raise exception.ViolinInvalidBackendConfig( reason=_('No initiators found, cannot proceed')) LOG.debug("Exporting lun %(vol_id)s - initiator wwpns %(i_wwpns)s " "- target wwpns %(t_wwpns)s.", {'vol_id': volume['id'], 'i_wwpns': connector['wwpns'], 't_wwpns': self.gateway_fc_wwns}) try: lun_id = self.common._send_cmd_and_verify( v.lun.assign_lun_to_client, self._is_lun_id_ready, "Assign SAN client successfully", [volume['id'], connector['host'], "ReadWrite"], [volume['id'], connector['host']]) except exception.ViolinBackendErr: LOG.exception(_LE("Backend returned err for lun export.")) raise except Exception: raise exception.ViolinInvalidBackendConfig( reason=_('LUN export failed!')) lun_id = self._get_lun_id(volume['id'], connector['host']) LOG.info(_LI("Exported lun %(vol_id)s on lun_id %(lun_id)s."), {'vol_id': volume['id'], 'lun_id': lun_id}) return lun_id @utils.synchronized('vmem-export') def _unexport_lun(self, volume, connector=None): """Removes the export configuration for the given volume. :param volume: volume object provided by the Manager """ v = self.common.vmem_mg LOG.info(_LI("Unexporting lun %s."), volume['id']) try: self.common._send_cmd(v.lun.unassign_client_lun, "Unassign SAN client successfully", volume['id'], connector['host'], True) except exception.ViolinBackendErr: LOG.exception(_LE("Backend returned err for lun export.")) raise except Exception: LOG.exception(_LE("LUN unexport failed!")) raise def _update_volume_stats(self): """Gathers array stats and converts them to GB values.""" data = {} total_gb = 0 free_gb = 0 v = self.common.vmem_mg.basic array_name_triple = socket.gethostbyaddr(self.configuration.san_ip) array_name = array_name_triple[0] phy_devices = v.get("/batch/physicalresource/physicaldevice") all_devices = [x for x in phy_devices['data']['physical_devices']] for x in all_devices: if socket.getfqdn(x['owner']) == array_name: total_gb += x['size_mb'] // 1024 free_gb += x['availsize_mb'] // 1024 backend_name = self.configuration.volume_backend_name data['volume_backend_name'] = backend_name or self.__class__.__name__ data['vendor_name'] = 'Violin Memory, Inc.' data['driver_version'] = self.VERSION data['storage_protocol'] = 'fibre_channel' data['reserved_percentage'] = 0 data['QoS_support'] = False data['total_capacity_gb'] = total_gb data['free_capacity_gb'] = free_gb for i in data: LOG.debug("stat update: %(name)s=%(data)s", {'name': i, 'data': data[i]}) self.stats = data def _get_active_fc_targets(self): """Get a list of gateway WWNs that can be used as FCP targets. :param mg_conn: active XG connection to one of the gateways :returns: list of WWNs in openstack format """ v = self.common.vmem_mg active_gw_fcp_wwns = [] fc_info = v.adapter.get_fc_info() for x in fc_info.values(): active_gw_fcp_wwns.append(x[0]) return active_gw_fcp_wwns def _get_lun_id(self, volume_name, client_name): """Get the lun ID for an exported volume. If the lun is successfully assigned (exported) to a client, the client info has the lun_id. :param volume_name: name of volume to query for lun ID :param client_name: name of client associated with the volume :returns: integer value of lun ID """ v = self.common.vmem_mg lun_id = -1 client_info = v.client.get_client_info(client_name) for x in client_info['FibreChannelDevices']: if volume_name == x['name']: lun_id = x['lun'] break return int(lun_id) def _is_lun_id_ready(self, volume_name, client_name): """Get the lun ID for an exported volume. If the lun is successfully assigned (exported) to a client, the client info has the lun_id. :param volume_name: name of volume to query for lun ID :param client_name: name of client associated with the volume :returns: Returns True if lun is ready, False otherwise """ lun_id = -1 lun_id = self._get_lun_id(volume_name, client_name) if lun_id != -1: return True else: return False def _build_initiator_target_map(self, connector): """Build the target_wwns and the initiator target map.""" target_wwns = [] init_targ_map = {} if self.lookup_service: dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], self.gateway_fc_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list( set(init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector['wwpns'] target_wwns = self.gateway_fc_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map def _is_initiator_connected_to_array(self, connector): """Check if any initiator wwns still have active sessions.""" v = self.common.vmem_mg client = v.client.get_client_info(connector['host']) if len(client['FibreChannelDevices']): # each entry in the FibreChannelDevices array is a dict # describing an active lun assignment return True return False cinder-8.0.0/cinder/volume/drivers/violin/v7000_common.py0000664000567000056710000010125612701406250024355 0ustar jenkinsjenkins00000000000000# Copyright 2015 Violin Memory, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Violin Memory 7000 Series All-Flash Array Common Driver for OpenStack Cinder Provides common (ie., non-protocol specific) management functions for V7000 series flash arrays. Backend array communication is handled via VMEM's python library called 'vmemclient'. NOTE: this driver file requires the use of synchronization points for certain types of backend operations, and as a result may not work properly in an active-active HA configuration. See OpenStack Cinder driver documentation for more information. """ import math import re import six import time from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units from cinder import context from cinder.db.sqlalchemy import api from cinder import exception from cinder.i18n import _, _LE, _LI from cinder import utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) try: import vmemclient except ImportError: vmemclient = None else: LOG.info(_LI("Running with vmemclient version: %s"), vmemclient.__version__) CONCERTO_SUPPORTED_VERSION_PATTERNS = ['Version 7.[0-9].?[0-9]?'] CONCERTO_DEFAULT_PRIORITY = 'medium' CONCERTO_DEFAULT_SRA_POLICY = 'preserveAll' CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION = True CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD = 50 CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT = '1024MB' CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE = None CONCERTO_DEFAULT_SRA_ENABLE_SHRINK = False CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS = 1000 CONCERTO_DEFAULT_POLICY_RETENTION_MODE = 'All' violin_opts = [ cfg.IntOpt('violin_request_timeout', default=300, help='Global backend request timeout, in seconds.'), ] CONF = cfg.CONF CONF.register_opts(violin_opts) class V7000Common(object): """Contains common code for the Violin V7000 drivers.""" def __init__(self, config): self.vmem_mg = None self.container = "" self.config = config def do_setup(self, context): """Any initialization the driver does while starting.""" if not self.config.san_ip: raise exception.InvalidInput( reason=_('Gateway VIP is not set')) self.vmem_mg = vmemclient.open(self.config.san_ip, self.config.san_login, self.config.san_password, keepalive=True) if self.vmem_mg is None: msg = _('Failed to connect to array') raise exception.VolumeBackendAPIException(data=msg) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if vmemclient is None: msg = _('vmemclient python library not found') raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("CONCERTO version: %s"), self.vmem_mg.version) if not self._is_supported_vmos_version(self.vmem_mg.version): msg = _('CONCERTO version is not supported') raise exception.ViolinInvalidBackendConfig(reason=msg) @utils.synchronized('vmem-lun') def _create_lun(self, volume): """Creates a new lun. :param volume: volume object provided by the Manager """ thin_lun = False dedup = False size_mb = volume['size'] * units.Ki full_size_mb = size_mb pool = None LOG.debug("Creating LUN %(name)s, %(size)s MB.", {'name': volume['name'], 'size': size_mb}) if self.config.san_thin_provision: thin_lun = True # Set the actual allocation size for thin lun # default here is 10% size_mb = size_mb // 10 typeid = volume['volume_type_id'] if typeid: # extra_specs with thin specified overrides san_thin_provision spec_value = self._get_volume_type_extra_spec(volume, "thin") if spec_value and spec_value.lower() == "true": thin_lun = True # Set the actual allocation size for thin lun # default here is 10% size_mb = size_mb // 10 spec_value = self._get_volume_type_extra_spec(volume, "dedup") if spec_value and spec_value.lower() == "true": dedup = True # A dedup lun is always a thin lun thin_lun = True # Set the actual allocation size for thin lun # default here is 10%. The actual allocation may # different, depending on other factors size_mb = full_size_mb // 10 # Extract the storage_pool name if one is specified pool = self._get_violin_extra_spec(volume, "storage_pool") try: # Note: In the following create_lun command for setting up a dedup # or thin lun the size_mb parameter is ignored and 10% of the # full_size_mb specified is the size actually allocated to # the lun. full_size_mb is the size the lun is allowed to # grow. On the other hand, if it is a thick lun, the # full_size_mb is ignored and size_mb is the actual # allocated size of the lun. self._send_cmd(self.vmem_mg.lun.create_lun, "Create resource successfully.", volume['id'], size_mb, dedup, thin_lun, full_size_mb, storage_pool=pool) except Exception: LOG.exception(_LE("Lun create for %s failed!"), volume['id']) raise @utils.synchronized('vmem-lun') def _delete_lun(self, volume): """Deletes a lun. :param volume: volume object provided by the Manager """ success_msgs = ['Delete resource successfully', ''] LOG.debug("Deleting lun %s.", volume['id']) try: # If the LUN has ever had a snapshot, it has an SRA and # policy that must be deleted first. self._delete_lun_snapshot_bookkeeping(volume['id']) # TODO(rdl) force the delete for now to deal with pending # snapshot issues. Should revisit later for a better fix. self._send_cmd(self.vmem_mg.lun.delete_lun, success_msgs, volume['id'], True) except exception.VolumeBackendAPIException: LOG.exception(_LE("Lun %s has dependent snapshots, " "skipping lun deletion."), volume['id']) raise exception.VolumeIsBusy(volume_name=volume['id']) except Exception: LOG.exception(_LE("Lun delete for %s failed!"), volume['id']) raise def _extend_lun(self, volume, new_size): """Extend an existing volume's size. :param volume: volume object provided by the Manager :param new_size: new size in GB to be applied """ v = self.vmem_mg typeid = volume['volume_type_id'] if typeid: spec_value = self._get_volume_type_extra_spec(volume, "dedup") if spec_value and spec_value.lower() == "true": # A Dedup lun's size cannot be modified in Concerto. msg = _('Dedup luns cannot be extended') raise exception.VolumeDriverException(message=msg) size_mb = volume['size'] * units.Ki new_size_mb = new_size * units.Ki # Concerto lun extend requires number of MB to increase size by, # not the final size value. # delta_mb = new_size_mb - size_mb LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s MB.", {'id': volume['id'], 'size': size_mb, 'new_size': new_size_mb}) try: self._send_cmd(v.lun.extend_lun, "Expand resource successfully", volume['id'], delta_mb) except Exception: LOG.exception(_LE("LUN extend failed!")) raise def _create_lun_snapshot(self, snapshot): """Create a new cinder snapshot on a volume. This maps onto a Concerto 'timemark', but we must always first ensure that a snapshot resource area (SRA) exists, and that a snapshot policy exists. :param snapshot: cinder snapshot object provided by the Manager Exceptions: VolumeBackendAPIException: If SRA could not be created, or snapshot policy could not be created RequestRetryTimeout: If backend could not complete the request within the allotted timeout. ViolinBackendErr: If backend reports an error during the create snapshot phase. """ cinder_volume_id = snapshot['volume_id'] cinder_snapshot_id = snapshot['id'] LOG.debug("Creating LUN snapshot %(snap_id)s on volume " "%(vol_id)s %(dpy_name)s.", {'snap_id': cinder_snapshot_id, 'vol_id': cinder_volume_id, 'dpy_name': snapshot['display_name']}) self._ensure_snapshot_resource_area(cinder_volume_id) self._ensure_snapshot_policy(cinder_volume_id) try: self._send_cmd( self.vmem_mg.snapshot.create_lun_snapshot, "Create TimeMark successfully", lun=cinder_volume_id, comment=self._compress_snapshot_id(cinder_snapshot_id), priority=CONCERTO_DEFAULT_PRIORITY, enable_notification=False) except Exception: LOG.exception(_LE("Lun create snapshot for " "volume %(vol)s snapshot %(snap)s failed!"), {'vol': cinder_volume_id, 'snap': cinder_snapshot_id}) raise def _delete_lun_snapshot(self, snapshot): """Delete the specified cinder snapshot. :param snapshot: cinder snapshot object provided by the Manager Exceptions: RequestRetryTimeout: If backend could not complete the request within the allotted timeout. ViolinBackendErr: If backend reports an error during the delete snapshot phase. """ cinder_volume_id = snapshot['volume_id'] cinder_snapshot_id = snapshot['id'] LOG.debug("Deleting snapshot %(snap_id)s on volume " "%(vol_id)s %(dpy_name)s", {'snap_id': cinder_snapshot_id, 'vol_id': cinder_volume_id, 'dpy_name': snapshot['display_name']}) try: self._send_cmd( self.vmem_mg.snapshot.delete_lun_snapshot, "Delete TimeMark successfully", lun=cinder_volume_id, comment=self._compress_snapshot_id(cinder_snapshot_id)) except Exception: LOG.exception(_LE("Lun delete snapshot for " "volume %(vol)s snapshot %(snap)s failed!"), {'vol': cinder_volume_id, 'snap': cinder_snapshot_id}) raise def _create_volume_from_snapshot(self, snapshot, volume): """Create a new cinder volume from a given snapshot of a lun This maps onto a Concerto 'copy snapshot to lun'. Concerto creates the lun and then copies the snapshot into it. :param snapshot: cinder snapshot object provided by the Manager :param volume: cinder volume to be created """ cinder_volume_id = volume['id'] cinder_snapshot_id = snapshot['id'] pool = None result = None LOG.debug("Copying snapshot %(snap_id)s onto volume %(vol_id)s.", {'snap_id': cinder_snapshot_id, 'vol_id': cinder_volume_id}) typeid = volume['volume_type_id'] if typeid: pool = self._get_violin_extra_spec(volume, "storage_pool") try: result = self.vmem_mg.lun.copy_snapshot_to_new_lun( source_lun=snapshot['volume_id'], source_snapshot_comment= self._compress_snapshot_id(cinder_snapshot_id), destination=cinder_volume_id, storage_pool=pool) if not result['success']: self._check_error_code(result) except Exception: LOG.exception(_LE("Copy snapshot to volume for " "snapshot %(snap)s volume %(vol)s failed!"), {'snap': cinder_snapshot_id, 'vol': cinder_volume_id}) raise # get the destination lun info and extract virtualdeviceid info = self.vmem_mg.lun.get_lun_info(object_id=result['object_id']) self._wait_for_lun_or_snap_copy( snapshot['volume_id'], dest_vdev_id=info['virtualDeviceID']) def _create_lun_from_lun(self, src_vol, dest_vol): """Copy the contents of a lun to a new lun (i.e., full clone). :param src_vol: cinder volume to clone :param dest_vol: cinder volume to be created """ pool = None result = None LOG.debug("Copying lun %(src_vol_id)s onto lun %(dest_vol_id)s.", {'src_vol_id': src_vol['id'], 'dest_vol_id': dest_vol['id']}) # Extract the storage_pool name if one is specified typeid = dest_vol['volume_type_id'] if typeid: pool = self._get_violin_extra_spec(dest_vol, "storage_pool") try: # in order to do a full clone the source lun must have a # snapshot resource self._ensure_snapshot_resource_area(src_vol['id']) result = self.vmem_mg.lun.copy_lun_to_new_lun( source=src_vol['id'], destination=dest_vol['id'], storage_pool=pool) if not result['success']: self._check_error_code(result) except Exception: LOG.exception(_LE("Create new lun from lun for source " "%(src)s => destination %(dest)s failed!"), {'src': src_vol['id'], 'dest': dest_vol['id']}) raise self._wait_for_lun_or_snap_copy( src_vol['id'], dest_obj_id=result['object_id']) def _send_cmd(self, request_func, success_msgs, *args, **kwargs): """Run an XG request function, and retry as needed. The request will be retried until it returns a success message, a failure message, or the global request timeout is hit. This wrapper is meant to deal with backend requests that can fail for any variety of reasons, for instance, when the system is already busy handling other LUN requests. If there is no space left, or other "fatal" errors are returned (see _fatal_error_code() for a list of all known error conditions). :param request_func: XG api method to call :param success_msgs: Success messages expected from the backend :param *args: argument array to be passed to the request_func :param **kwargs: argument dictionary to be passed to request_func :returns: the response dict from the last XG call """ resp = {} start = time.time() done = False if isinstance(success_msgs, six.string_types): success_msgs = [success_msgs] while not done: if time.time() - start >= self.config.violin_request_timeout: raise exception.ViolinRequestRetryTimeout( timeout=self.config.violin_request_timeout) resp = request_func(*args, **kwargs) if not resp['msg']: # XG requests will return None for a message if no message # string is passed in the raw response resp['msg'] = '' for msg in success_msgs: if resp['success'] and msg in resp['msg']: done = True break if not resp['success']: self._check_error_code(resp) done = True break return resp def _send_cmd_and_verify(self, request_func, verify_func, request_success_msgs='', rargs=None, vargs=None): """Run an XG request function, retry if needed, and verify success. If the verification fails, then retry the request/verify cycle until both functions are successful, the request function returns a failure message, or the global request timeout is hit. This wrapper is meant to deal with backend requests that can fail for any variety of reasons, for instance, when the system is already busy handling other LUN requests. It is also smart enough to give up if clustering is down (eg no HA available), there is no space left, or other "fatal" errors are returned (see _fatal_error_code() for a list of all known error conditions). :param request_func: XG api method to call :param verify_func: function call to verify request was completed :param request_success_msg: Success message expected for request_func :param *rargs: argument array to be passed to request_func :param *vargs: argument array to be passed to verify_func :returns: the response dict from the last XG call """ resp = {} start = time.time() request_needed = True verify_needed = True if isinstance(request_success_msgs, six.string_types): request_success_msgs = [request_success_msgs] rargs = rargs if rargs else [] vargs = vargs if vargs else [] while request_needed or verify_needed: if time.time() - start >= self.config.violin_request_timeout: raise exception.ViolinRequestRetryTimeout( timeout=self.config.violin_request_timeout) if request_needed: resp = request_func(*rargs) if not resp['msg']: # XG requests will return None for a message if no message # string is passed in the raw response resp['msg'] = '' for msg in request_success_msgs: if resp['success'] and msg in resp['msg']: request_needed = False break if not resp['success']: self._check_error_code(resp) request_needed = False elif verify_needed: success = verify_func(*vargs) if success: # XG verify func was completed verify_needed = False return resp def _ensure_snapshot_resource_area(self, volume_id): """Make sure concerto snapshot resource area exists on volume. :param volume_id: Cinder volume ID corresponding to the backend LUN Exceptions: VolumeBackendAPIException: if cinder volume does not exist on backnd, or SRA could not be created. """ ctxt = context.get_admin_context() volume = api.volume_get(ctxt, volume_id) pool = None if not volume: msg = (_("Failed to ensure snapshot resource area, could not " "locate volume for id %s") % volume_id) raise exception.VolumeBackendAPIException(data=msg) if not self.vmem_mg.snapshot.lun_has_a_snapshot_resource( lun=volume_id): # Per Concerto documentation, the SRA size should be computed # as follows # Size-of-original-LUN Reserve for SRA # < 500MB 100% # 500MB to 2G 50% # >= 2G 20% # Note: cinder volume.size is in GB, vmemclient wants MB. lun_size_mb = volume['size'] * units.Ki if lun_size_mb < 500: snap_size_mb = lun_size_mb elif lun_size_mb < 2000: snap_size_mb = 0.5 * lun_size_mb else: snap_size_mb = 0.2 * lun_size_mb snap_size_mb = int(math.ceil(snap_size_mb)) typeid = volume['volume_type_id'] if typeid: pool = self._get_violin_extra_spec(volume, "storage_pool") LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB " "on %(vol_id)s.", {'ssmb': snap_size_mb, 'lsmb': lun_size_mb, 'vol_id': volume_id}) res = self.vmem_mg.snapshot.create_snapshot_resource( lun=volume_id, size=snap_size_mb, enable_notification=False, policy=CONCERTO_DEFAULT_SRA_POLICY, enable_expansion=CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, expansion_threshold=CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, expansion_increment=CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, expansion_max_size=CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, enable_shrink=CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, storage_pool=pool) if (not res['success']): msg = (_("Failed to create snapshot resource area on " "volume %(vol)s: %(res)s.") % {'vol': volume_id, 'res': res['msg']}) raise exception.VolumeBackendAPIException(data=msg) def _ensure_snapshot_policy(self, volume_id): """Ensure concerto snapshot policy exists on cinder volume. A snapshot policy is required by concerto in order to create snapshots. :param volume_id: Cinder volume ID corresponding to the backend LUN Exceptions: VolumeBackendAPIException: when snapshot policy cannot be created. """ if not self.vmem_mg.snapshot.lun_has_a_snapshot_policy( lun=volume_id): res = self.vmem_mg.snapshot.create_snapshot_policy( lun=volume_id, max_snapshots=CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS, enable_replication=False, enable_snapshot_schedule=False, enable_cdp=False, retention_mode=CONCERTO_DEFAULT_POLICY_RETENTION_MODE) if not res['success']: msg = (_( "Failed to create snapshot policy on " "volume %(vol)s: %(res)s.") % {'vol': volume_id, 'res': res['msg']}) raise exception.VolumeBackendAPIException(data=msg) def _delete_lun_snapshot_bookkeeping(self, volume_id): """Clear residual snapshot support resources from LUN. Exceptions: VolumeBackendAPIException: If snapshots still exist on the LUN. """ # Make absolutely sure there are no snapshots present try: snaps = self.vmem_mg.snapshot.get_snapshots(volume_id) if len(snaps) > 0: msg = (_("Cannot delete LUN %s while snapshots exist.") % volume_id) raise exception.VolumeBackendAPIException(data=msg) except vmemclient.core.error.NoMatchingObjectIdError: pass except vmemclient.core.error.MissingParameterError: pass try: res = self.vmem_mg.snapshot.delete_snapshot_policy( lun=volume_id) if not res['success']: if 'TimeMark is disabled' in res['msg']: LOG.debug("Verified no snapshot policy is on volume %s.", volume_id) else: msg = (_("Unable to delete snapshot policy on " "volume %s.") % volume_id) raise exception.VolumeBackendAPIException(data=msg) else: LOG.debug("Deleted snapshot policy on volume " "%(vol)s, result %(res)s.", {'vol': volume_id, 'res': res}) except vmemclient.core.error.NoMatchingObjectIdError: LOG.debug("Verified no snapshot policy present on volume %s.", volume_id) pass try: res = self.vmem_mg.snapshot.delete_snapshot_resource( lun=volume_id) LOG.debug("Deleted snapshot resource area on " "volume %(vol)s, result %(res)s.", {'vol': volume_id, 'res': res}) except vmemclient.core.error.NoMatchingObjectIdError: LOG.debug("Verified no snapshot resource area present on " "volume %s.", volume_id) pass def _compress_snapshot_id(self, cinder_snap_id): """Compress cinder snapshot ID so it fits in backend. Compresses to fit in 32-chars. """ return ''.join(six.text_type(cinder_snap_id).split('-')) def _get_snapshot_from_lun_snapshots( self, cinder_volume_id, cinder_snap_id): """Locate backend snapshot dict associated with cinder snapshot id. :returns: Cinder snapshot dictionary if found, None otherwise. """ try: snaps = self.vmem_mg.snapshot.get_snapshots(cinder_volume_id) except vmemclient.core.error.NoMatchingObjectIdError: return None key = self._compress_snapshot_id(cinder_snap_id) for s in snaps: if s['comment'] == key: # Remap return dict to its uncompressed form s['comment'] = cinder_snap_id return s def _wait_for_lun_or_snap_copy(self, src_vol_id, dest_vdev_id=None, dest_obj_id=None): """Poll to see when a lun or snap copy to a lun is complete. :param src_vol_id: cinder volume ID of source volume :param dest_vdev_id: virtual device ID of destination, for snap copy :param dest_obj_id: lun object ID of destination, for lun copy :returns: True if successful, False otherwise """ wait_id = None wait_func = None if dest_vdev_id: wait_id = dest_vdev_id wait_func = self.vmem_mg.snapshot.get_snapshot_copy_status elif dest_obj_id: wait_id = dest_obj_id wait_func = self.vmem_mg.lun.get_lun_copy_status else: return False def _loop_func(): LOG.debug("Entering _wait_for_lun_or_snap_copy loop: " "vdev=%s, objid=%s", dest_vdev_id, dest_obj_id) status = wait_func(src_vol_id) if status[0] is None: # pre-copy transient result, status=(None, None, 0) LOG.debug("lun or snap copy prepping.") pass elif status[0] != wait_id: # the copy must be complete since another lun is being copied LOG.debug("lun or snap copy complete.") raise loopingcall.LoopingCallDone(retvalue=True) elif status[1] is not None: # copy is in progress, status = ('12345', 1700, 10) LOG.debug("MB copied:%d, percent done: %d.", status[1], status[2]) pass elif status[2] == 0: # copy has just started, status = ('12345', None, 0) LOG.debug("lun or snap copy started.") pass elif status[2] == 100: # copy is complete, status = ('12345', None, 100) LOG.debug("lun or snap copy complete.") raise loopingcall.LoopingCallDone(retvalue=True) else: # unexpected case LOG.debug("unexpected case (%{id}s, %{bytes}s, %{percent}s)", {'id': six.text_type(status[0]), 'bytes': six.text_type(status[1]), 'percent': six.text_type(status[2])}) raise loopingcall.LoopingCallDone(retvalue=False) timer = loopingcall.FixedIntervalLoopingCall(_loop_func) success = timer.start(interval=1).wait() return success def _is_supported_vmos_version(self, version_string): """Check a version string for compatibility with OpenStack. Compare a version string against the global regex of versions compatible with OpenStack. :param version_string: array's gateway version string :returns: True if supported, false if not """ for pattern in CONCERTO_SUPPORTED_VERSION_PATTERNS: if re.match(pattern, version_string): return True return False def _check_error_code(self, response): """Raise an exception when backend returns certain errors. Error codes returned from the backend have to be examined individually. Not all of them are fatal. For example, lun attach failing becase the client is already attached is not a fatal error. :param response: a response dict result from the vmemclient request """ if "Error: 0x9001003c" in response['msg']: # This error indicates a duplicate attempt to attach lun, # non-fatal error pass elif "Error: 0x9002002b" in response['msg']: # lun unexport failed - lun is not exported to any clients, # non-fatal error pass elif "Error: 0x09010023" in response['msg']: # lun delete failed - dependent snapshot copy in progress, # fatal error raise exception.ViolinBackendErr(message=response['msg']) elif "Error: 0x09010048" in response['msg']: # lun delete failed - dependent snapshots still exist, # fatal error raise exception.ViolinBackendErr(message=response['msg']) elif "Error: 0x90010022" in response['msg']: # lun create failed - lun with same name already exists, # fatal error raise exception.ViolinBackendErrExists() elif "Error: 0x90010089" in response['msg']: # lun export failed - lun is still being created as copy, # fatal error raise exception.ViolinBackendErr(message=response['msg']) else: # assume any other error is fatal raise exception.ViolinBackendErr(message=response['msg']) def _get_volume_type_extra_spec(self, volume, spec_key): """Parse data stored in a volume_type's extra_specs table. :param volume: volume object containing volume_type to query :param spec_key: the metadata key to search for :returns: string value associated with spec_key """ spec_value = None ctxt = context.get_admin_context() typeid = volume['volume_type_id'] if typeid: volume_type = volume_types.get_volume_type(ctxt, typeid) volume_specs = volume_type.get('extra_specs') for key, val in volume_specs.items(): # Strip the prefix "capabilities" if ':' in key: scope = key.split(':') key = scope[1] if key == spec_key: spec_value = val break return spec_value def _get_violin_extra_spec(self, volume, spec_key): """Parse volume_type's extra_specs table for a violin-specific key. :param volume: volume object containing volume_type to query :param spec_key: the metadata key to search for :returns: string value associated with spec_key """ spec_value = None ctxt = context.get_admin_context() typeid = volume['volume_type_id'] if typeid: volume_type = volume_types.get_volume_type(ctxt, typeid) volume_specs = volume_type.get('extra_specs') for key, val in volume_specs.items(): # Strip the prefix "violin" if ':' in key: scope = key.split(':') key = scope[1] if scope[0] == "violin" and key == spec_key: spec_value = val break return spec_value cinder-8.0.0/cinder/volume/drivers/nfs.py0000664000567000056710000004555512701406250021530 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import time from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver from cinder.volume.drivers import remotefs VERSION = '1.3.1' LOG = logging.getLogger(__name__) nfs_opts = [ cfg.StrOpt('nfs_shares_config', default='/etc/cinder/nfs_shares', help='File with the list of available NFS shares'), cfg.BoolOpt('nfs_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space.' 'If set to False volume is created as regular file.' 'In such case volume creation takes a lot of time.')), cfg.StrOpt('nfs_mount_point_base', default='$state_path/mnt', help=('Base dir containing mount points for NFS shares.')), cfg.StrOpt('nfs_mount_options', help=('Mount options passed to the NFS client. See section ' 'of the NFS man page for details.')), cfg.IntOpt('nfs_mount_attempts', default=3, help=('The number of attempts to mount NFS shares before ' 'raising an error. At least one attempt will be ' 'made to mount an NFS share, regardless of the ' 'value specified.')), ] CONF = cfg.CONF CONF.register_opts(nfs_opts) class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver): """NFS based cinder driver. Creates file on NFS share for using it as block device on hypervisor. """ driver_volume_type = 'nfs' driver_prefix = 'nfs' volume_backend_name = 'Generic_NFS' VERSION = VERSION def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(nfs_opts) root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = getattr(self.configuration, 'nfs_mount_point_base', CONF.nfs_mount_point_base) self.base = os.path.realpath(self.base) opts = getattr(self.configuration, 'nfs_mount_options', CONF.nfs_mount_options) nas_mount_options = getattr(self.configuration, 'nas_mount_options', None) if nas_mount_options is not None: LOG.debug('overriding nfs_mount_options with nas_mount_options') opts = nas_mount_options self._remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', root_helper, execute=execute, nfs_mount_point_base=self.base, nfs_mount_options=opts) self._sparse_copy_volume_data = True self.reserved_percentage = self.configuration.reserved_percentage self.max_over_subscription_ratio = ( self.configuration.max_over_subscription_ratio) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(NfsDriver, self).do_setup(context) config = self.configuration.nfs_shares_config if not config: msg = (_("There's no NFS config file configured (%s)") % 'nfs_shares_config') LOG.warning(msg) raise exception.NfsException(msg) if not os.path.exists(config): msg = (_("NFS config file at %(config)s doesn't exist") % {'config': config}) LOG.warning(msg) raise exception.NfsException(msg) self.shares = {} # address : options # Check if mount.nfs is installed on this system; note that we # need to be root, to also find mount.nfs on distributions, where # it is not located in an unprivileged users PATH (e.g. /sbin). package = 'mount.nfs' try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.NfsException(msg) else: raise # Now that all configuration data has been loaded (shares), # we can "set" our final NAS file security options. self.set_nas_security_options(self._is_voldb_empty_at_startup) def _ensure_share_mounted(self, nfs_share): mnt_flags = [] if self.shares.get(nfs_share) is not None: mnt_flags = self.shares[nfs_share].split() num_attempts = max(1, self.configuration.nfs_mount_attempts) for attempt in range(num_attempts): try: self._remotefsclient.mount(nfs_share, mnt_flags) return except Exception as e: if attempt == (num_attempts - 1): LOG.error(_LE('Mount failure for %(share)s after ' '%(count)d attempts.'), { 'share': nfs_share, 'count': num_attempts}) raise exception.NfsException(six.text_type(e)) LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n' 'Retrying mount ...', {'attempt': attempt, 'exc': e}) time.sleep(1) def _find_share(self, volume_size_in_gib): """Choose NFS share among available ones for given volume size. For instances with more than one share that meets the criteria, the share with the least "allocated" space will be selected. :param volume_size_in_gib: int size in GB """ if not self._mounted_shares: raise exception.NfsNoSharesMounted() target_share = None target_share_reserved = 0 for nfs_share in self._mounted_shares: total_size, total_available, total_allocated = ( self._get_capacity_info(nfs_share)) share_info = {'total_size': total_size, 'total_available': total_available, 'total_allocated': total_allocated, } if not self._is_share_eligible(nfs_share, volume_size_in_gib, share_info): continue if target_share is not None: if target_share_reserved > total_allocated: target_share = nfs_share target_share_reserved = total_allocated else: target_share = nfs_share target_share_reserved = total_allocated if target_share is None: raise exception.NfsNoSuitableShareFound( volume_size=volume_size_in_gib) LOG.debug('Selected %s as target NFS share.', target_share) return target_share def _is_share_eligible(self, nfs_share, volume_size_in_gib, share_info=None): """Verifies NFS share is eligible to host volume with given size. First validation step: ratio of actual space (used_space / total_space) is less than used_ratio. Second validation step: apparent space allocated (differs from actual space used when using sparse files) and compares the apparent available space (total_available * oversub_ratio) to ensure enough space is available for the new volume. :param nfs_share: NFS share :param volume_size_in_gib: int size in GB """ # Because the generic NFS driver aggregates over all shares # when reporting capacity and usage stats to the scheduler, # we still have to perform some scheduler-like capacity # checks here, and these have to take into account # configuration for reserved space and oversubscription. # It would be better to do all this in the scheduler, but # this requires either pool support for the generic NFS # driver or limiting each NFS backend driver to a single share. # derive used_ratio from reserved percentage if share_info is None: total_size, total_available, total_allocated = ( self._get_capacity_info(nfs_share)) share_info = {'total_size': total_size, 'total_available': total_available, 'total_allocated': total_allocated, } used_percentage = 100 - self.reserved_percentage used_ratio = used_percentage / 100.0 requested_volume_size = volume_size_in_gib * units.Gi apparent_size = max(0, share_info['total_size'] * self.max_over_subscription_ratio) apparent_available = max(0, apparent_size - share_info['total_allocated']) actual_used_ratio = ((share_info['total_size'] - share_info['total_available']) / float(share_info['total_size'])) if actual_used_ratio > used_ratio: # NOTE(morganfainberg): We check the used_ratio first since # with oversubscription it is possible to not have the actual # available space but be within our oversubscription limit # therefore allowing this share to still be selected as a valid # target. LOG.debug('%s is not eligible - used ratio exceeded.', nfs_share) return False if apparent_available <= requested_volume_size: LOG.debug('%s is not eligible - insufficient (apparent) available ' 'space.', nfs_share) return False if share_info['total_allocated'] / share_info['total_size'] >= ( self.max_over_subscription_ratio): LOG.debug('%s is not eligible - utilization exceeds max ' 'over subscription ratio.', nfs_share) return False return True def _get_mount_point_for_share(self, nfs_share): """Needed by parent class.""" return self._remotefsclient.get_mount_point(nfs_share) def _get_capacity_info(self, nfs_share): """Calculate available space on the NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ run_as_root = self._execute_as_root mount_point = self._get_mount_point_for_share(nfs_share) df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point, run_as_root=run_as_root) block_size, blocks_total, blocks_avail = map(float, df.split()) total_available = block_size * blocks_avail total_size = block_size * blocks_total du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', mount_point, run_as_root=run_as_root) total_allocated = float(du.split()[0]) return total_size, total_available, total_allocated def _get_mount_point_base(self): return self.base def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" LOG.info(_LI('Extending volume %s.'), volume['id']) extend_by = int(new_size) - volume['size'] if not self._is_share_eligible(volume['provider_location'], extend_by): raise exception.ExtendVolumeError(reason='Insufficient space to' ' extend volume %s to %sG' % (volume['id'], new_size)) path = self.local_path(volume) LOG.info(_LI('Resizing file to %sG...'), new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if not self._is_file_size_equal(path, new_size): raise exception.ExtendVolumeError( reason='Resizing image file failed.') def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path, run_as_root=self._execute_as_root) virt_size = data.virtual_size / units.Gi return virt_size == size def set_nas_security_options(self, is_new_cinder_install): """Determine the setting to use for Secure NAS options. Value of each NAS Security option is checked and updated. If the option is currently 'auto', then it is set to either true or false based upon if this is a new Cinder installation. The RemoteFS variable '_execute_as_root' will be updated for this driver. :param is_new_cinder_install: bool indication of new Cinder install """ doc_html = "http://docs.openstack.org/admin-guide-cloud" \ "/blockstorage_nfs_backend.html" self._ensure_shares_mounted() if not self._mounted_shares: raise exception.NfsNoSharesMounted() nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0]) self.configuration.nas_secure_file_permissions = \ self._determine_nas_security_option_setting( self.configuration.nas_secure_file_permissions, nfs_mount, is_new_cinder_install) LOG.debug('NAS variable secure_file_permissions setting is: %s', self.configuration.nas_secure_file_permissions) if self.configuration.nas_secure_file_permissions == 'false': LOG.warning(_LW("The NAS file permissions mode will be 666 " "(allowing other/world read & write access). " "This is considered an insecure NAS environment. " "Please see %s for information on a secure " "NFS configuration."), doc_html) self.configuration.nas_secure_file_operations = \ self._determine_nas_security_option_setting( self.configuration.nas_secure_file_operations, nfs_mount, is_new_cinder_install) # If secure NAS, update the '_execute_as_root' flag to not # run as the root user; run as process' user ID. if self.configuration.nas_secure_file_operations == 'true': self._execute_as_root = False LOG.debug('NAS variable secure_file_operations setting is: %s', self.configuration.nas_secure_file_operations) if self.configuration.nas_secure_file_operations == 'false': LOG.warning(_LW("The NAS file operations will be run as " "root: allowing root level access at the storage " "backend. This is considered an insecure NAS " "environment. Please see %s " "for information on a secure NAS configuration."), doc_html) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return the keys and values updated from NFS for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ # TODO(vhou) This method may need to be updated after # NFS snapshots are introduced. name_id = None if original_volume_status == 'available': current_name = CONF.volume_name_template % new_volume['id'] original_volume_name = CONF.volume_name_template % volume['id'] current_path = self.local_path(new_volume) # Replace the volume name with the original volume name original_path = current_path.replace(current_name, original_volume_name) try: os.rename(current_path, original_path) except OSError: LOG.error(_LE('Unable to rename the logical volume ' 'for volume: %s'), volume['id']) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume['_name_id'] or new_volume['id'] else: # The back-end will not be renamed. name_id = new_volume['_name_id'] or new_volume['id'] return {'_name_id': name_id, 'provider_location': new_volume['provider_location']} def _update_volume_stats(self): """Retrieve stats info from volume group.""" super(NfsDriver, self)._update_volume_stats() self._stats['sparse_copy_volume'] = True data = self._stats global_capacity = data['total_capacity_gb'] global_free = data['free_capacity_gb'] thin_enabled = self.configuration.nfs_sparsed_volumes if thin_enabled: provisioned_capacity = self._get_provisioned_capacity() else: provisioned_capacity = round(global_capacity - global_free, 2) data['provisioned_capacity_gb'] = provisioned_capacity data['max_over_subscription_ratio'] = self.max_over_subscription_ratio data['reserved_percentage'] = self.reserved_percentage data['thin_provisioning_support'] = thin_enabled data['thick_provisioning_support'] = not thin_enabled self._stats = data cinder-8.0.0/cinder/volume/drivers/tegile.py0000664000567000056710000006254512701406250022211 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Tegile storage. """ import ast import json import requests from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from cinder import exception from cinder import utils from cinder.i18n import _, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) default_api_service = 'openstack' TEGILE_API_PATH = 'zebi/api' TEGILE_DEFAULT_BLOCK_SIZE = '32KB' TEGILE_LOCAL_CONTAINER_NAME = 'Local' DEBUG_LOGGING = False tegile_opts = [ cfg.StrOpt('tegile_default_pool', help='Create volumes in this pool'), cfg.StrOpt('tegile_default_project', help='Create volumes in this project')] CONF = cfg.CONF CONF.register_opts(tegile_opts) def debugger(func): """Returns a wrapper that wraps func. The wrapper will log the entry and exit points of the function """ def wrapper(*args, **kwds): if DEBUG_LOGGING: LOG.debug('Entering %(classname)s.%(funcname)s', {'classname': args[0].__class__.__name__, 'funcname': func.__name__}) LOG.debug('Arguments: %(args)s, %(kwds)s', {'args': args[1:], 'kwds': kwds}) f_result = func(*args, **kwds) if DEBUG_LOGGING: LOG.debug('Exiting %(classname)s.%(funcname)s', {'classname': args[0].__class__.__name__, 'funcname': func.__name__}) LOG.debug('Results: %(result)s', {'result': f_result}) return f_result return wrapper class TegileAPIExecutor(object): def __init__(self, classname, hostname, username, password): self._classname = classname self._hostname = hostname self._username = username self._password = password @debugger @utils.retry(exceptions=(requests.ConnectionError, requests.Timeout)) def send_api_request(self, method, params=None, request_type='post', api_service=default_api_service, fine_logging=DEBUG_LOGGING): if params is not None: params = json.dumps(params) url = 'https://%s/%s/%s/%s' % (self._hostname, TEGILE_API_PATH, api_service, method) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'url: %(url)s', {'classname': self._classname, 'method': method, 'url': url}) if request_type == 'post': if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, payload: %(payload)s', {'classname': self._classname, 'method': method, 'payload': params}) req = requests.post(url, data=params, auth=(self._username, self._password), verify=False) else: req = requests.get(url, auth=(self._username, self._password), verify=False) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'return code: %(retcode)s', {'classname': self._classname, 'method': method, 'retcode': req}) try: response = req.json() if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, response: %(response)s', {'classname': self._classname, 'method': method, 'response': response}) except ValueError: response = '' req.close() if req.status_code != 200: msg = _('API response: %(response)s') % {'response': response} raise exception.TegileAPIException(msg) return response class TegileIntelliFlashVolumeDriver(san.SanDriver): """Tegile IntelliFlash Volume Driver.""" VENDOR = 'Tegile Systems Inc.' VERSION = '1.0.0' REQUIRED_OPTIONS = ['san_ip', 'san_login', 'san_password', 'tegile_default_pool'] SNAPSHOT_PREFIX = 'Manual-V-' _api_executor = None def __init__(self, *args, **kwargs): self._context = None super(TegileIntelliFlashVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(tegile_opts) self._protocol = 'iSCSI' # defaults to iscsi hostname = getattr(self.configuration, 'san_ip') username = getattr(self.configuration, 'san_login') password = getattr(self.configuration, 'san_password') self._default_pool = getattr(self.configuration, 'tegile_default_pool') self._default_project = ( getattr(self.configuration, 'tegile_default_project') or 'openstack') self._api_executor = TegileAPIExecutor(self.__class__.__name__, hostname, username, password) @debugger def do_setup(self, context): super(TegileIntelliFlashVolumeDriver, self).do_setup(context) self._context = context self._check_ops(self.REQUIRED_OPTIONS, self.configuration) @debugger def create_volume(self, volume): pool = volume_utils.extract_host(volume['host'], level='pool', default_pool_name=self._default_pool) tegile_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, 'datasetPath': '%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, self._default_project), 'local': 'true', 'name': volume['name'], 'poolName': '%s' % pool, 'projectName': '%s' % self._default_project, 'protocol': self._protocol, 'thinProvision': 'true', 'volSize': volume['size'] * units.Gi} params = list() params.append(tegile_volume) params.append(True) self._api_executor.send_api_request(method='createVolume', params=params) LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."), {'volname': volume['name'], 'volid': volume['id']}) return self.get_additional_info(volume, pool, self._default_project) @debugger def delete_volume(self, volume): """Deletes a snapshot.""" params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) params.append(True) params.append(False) self._api_executor.send_api_request('deleteVolume', params) @debugger def create_snapshot(self, snapshot): """Creates a snapshot.""" snap_name = snapshot['name'] display_list = [getattr(snapshot, 'display_name', ''), getattr(snapshot, 'display_description', '')] snap_description = ':'.join(filter(None, display_list)) # Limit to 254 characters snap_description = snap_description[:254] pool, project, volume_name = self._get_pool_project_volume_name( snapshot['volume']) volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, 'datasetPath': '%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project), 'local': 'true', 'name': volume_name, 'poolName': '%s' % pool, 'projectName': '%s' % project, 'protocol': self._protocol, 'thinProvision': 'true', 'volSize': snapshot['volume']['size'] * units.Gi} params = list() params.append(volume) params.append(snap_name) params.append(False) LOG.info(_LI('Creating snapshot for volume_name=%(vol)s' ' snap_name=%(name)s snap_description=%(desc)s'), {'vol': volume_name, 'name': snap_name, 'desc': snap_description}) self._api_executor.send_api_request('createVolumeSnapshot', params) @debugger def delete_snapshot(self, snapshot): """Deletes a snapshot.""" params = list() pool, project, volume_name = self._get_pool_project_volume_name( snapshot['volume']) params.append('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name, self.SNAPSHOT_PREFIX, snapshot['name'])) params.append(False) self._api_executor.send_api_request('deleteVolumeSnapshot', params) @debugger def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from snapshot.""" params = list() pool, project, volume_name = self._get_pool_project_volume_name( snapshot['volume']) params.append('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name, self.SNAPSHOT_PREFIX, snapshot['name'])) params.append(volume['name']) params.append(True) params.append(True) self._api_executor.send_api_request('cloneVolumeSnapshot', params) return self.get_additional_info(volume, pool, project) @debugger def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" pool, project, volume_name = self._get_pool_project_volume_name( src_vref) data_set_path = '%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project) source_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, 'datasetPath': data_set_path, 'local': 'true', 'name': volume_name, 'poolName': '%s' % pool, 'projectName': '%s' % project, 'protocol': self._protocol, 'thinProvision': 'true', 'volSize': src_vref['size'] * units.Gi} dest_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, 'datasetPath': data_set_path, # clone can reside only in the source project 'local': 'true', 'name': volume['name'], 'poolName': '%s' % pool, 'projectName': '%s' % project, 'protocol': self._protocol, 'thinProvision': 'true', 'volSize': volume['size'] * units.Gi} params = list() params.append(source_volume) params.append(dest_volume) self._api_executor.send_api_request(method='createClonedVolume', params=params) return self.get_additional_info(volume, pool, project) @debugger def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_volume_stats() except Exception: pass return self._stats @debugger def _update_volume_stats(self): """Retrieves stats info from volume group.""" try: data = self._api_executor.send_api_request(method='getArrayStats', request_type='get', fine_logging=False) # fixing values coming back here as String to float data['total_capacity_gb'] = float(data.get('total_capacity_gb', 0)) data['free_capacity_gb'] = float(data.get('free_capacity_gb', 0)) for pool in data.get('pools', []): pool['total_capacity_gb'] = float( pool.get('total_capacity_gb', 0)) pool['free_capacity_gb'] = float( pool.get('free_capacity_gb', 0)) pool['allocated_capacity_gb'] = float( pool.get('allocated_capacity_gb', 0)) data['volume_backend_name'] = getattr(self.configuration, 'volume_backend_name') data['vendor_name'] = self.VENDOR data['driver_version'] = self.VERSION data['storage_protocol'] = self._protocol self._stats = data except Exception as e: LOG.warning(_LW('TegileIntelliFlashVolumeDriver(%(clsname)s) ' '_update_volume_stats failed: %(error)s'), {'clsname': self.__class__.__name__, 'error': e}) @debugger def get_pool(self, volume): """Returns pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ pool = volume_utils.extract_host(volume['host'], level='pool', default_pool_name=self._default_pool) return pool @debugger def extend_volume(self, volume, new_size): params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) vol_size = six.text_type(new_size) params.append(vol_size) params.append('GB') self._api_executor.send_api_request(method='resizeVolume', params=params) @debugger def manage_existing(self, volume, existing_ref): volume['name_id'] = existing_ref['name'] pool, project, volume_name = self._get_pool_project_volume_name(volume) additional_info = self.get_additional_info(volume, pool, project) additional_info['_name_id'] = existing_ref['name'], return additional_info @debugger def manage_existing_get_size(self, volume, existing_ref): params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, existing_ref['name'])) volume_size = self._api_executor.send_api_request( method='getVolumeSizeinGB', params=params) return volume_size @debugger def _get_pool_project_volume_name(self, volume): pool = volume_utils.extract_host(volume['host'], level='pool', default_pool_name=self._default_pool) try: project = volume['metadata']['project'] except (AttributeError, TypeError, KeyError): project = self._default_project if volume['_name_id'] is not None: volume_name = volume['_name_id'] else: volume_name = volume['name'] return pool, project, volume_name @debugger def get_additional_info(self, volume, pool, project): try: metadata = self._get_volume_metadata(volume) except Exception: metadata = dict() metadata['pool'] = pool metadata['project'] = project return {'metadata': metadata} @debugger def _get_volume_metadata(self, volume): volume_metadata = {} if 'volume_metadata' in volume: for metadata in volume['volume_metadata']: volume_metadata[metadata['key']] = metadata['value'] if 'metadata' in volume: metadata = volume['metadata'] for key in metadata: volume_metadata[key] = metadata[key] return volume_metadata @debugger def _check_ops(self, required_ops, configuration): """Ensures that the options we care about are set.""" for attr in required_ops: if not getattr(configuration, attr, None): raise exception.InvalidInput(reason=_('%(attr)s is not ' 'set.') % {'attr': attr}) class TegileISCSIDriver(TegileIntelliFlashVolumeDriver, san.SanISCSIDriver): """Tegile ISCSI Driver.""" def __init__(self, *args, **kwargs): super(TegileISCSIDriver, self).__init__(*args, **kwargs) self._protocol = 'iSCSI' @debugger def do_setup(self, context): super(TegileISCSIDriver, self).do_setup(context) @debugger def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" if getattr(self.configuration, 'use_chap_auth', False): chap_username = getattr(self.configuration, 'chap_username', '') chap_password = getattr(self.configuration, 'chap_password', '') else: chap_username = '' chap_password = '' if volume['provider_location'] is None: params = list() pool, project, volume_name = ( self._get_pool_project_volume_name(volume)) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) initiator_info = { 'initiatorName': connector['initiator'], 'chapUserName': chap_username, 'chapSecret': chap_password } params.append(initiator_info) mapping_info = self._api_executor.send_api_request( method='getISCSIMappingForVolume', params=params) target_portal = mapping_info['target_portal'] target_iqn = mapping_info['target_iqn'] target_lun = mapping_info['target_lun'] else: (target_portal, target_iqn, target_lun) = ( volume['provider_location'].split()) connection_data = dict() connection_data['target_portal'] = target_portal connection_data['target_iqn'] = target_iqn connection_data['target_lun'] = target_lun connection_data['target_discovered'] = False, connection_data['volume_id'] = volume['id'], connection_data['discard'] = False if getattr(self.configuration, 'use_chap_auth', False): connection_data['auth_method'] = 'CHAP' connection_data['auth_username'] = chap_username connection_data['auth_password'] = chap_password return { 'driver_volume_type': 'iscsi', 'data': connection_data } @debugger def terminate_connection(self, volume, connector, **kwargs): pass @debugger def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) if getattr(self.configuration, 'use_chap_auth', False): chap_username = getattr(self.configuration, 'chap_username', '') chap_password = getattr(self.configuration, 'chap_password', '') else: chap_username = '' chap_password = '' initiator_info = { 'initiatorName': connector['initiator'], 'chapUserName': chap_username, 'chapSecret': chap_password } params.append(initiator_info) mapping_info = self._api_executor.send_api_request( method='getISCSIMappingForVolume', params=params) target_portal = mapping_info['target_portal'] target_iqn = mapping_info['target_iqn'] target_lun = mapping_info['target_lun'] provider_location = '%s %s %s' % (target_portal, target_iqn, target_lun) if getattr(self.configuration, 'use_chap_auth', False): provider_auth = ('CHAP %s %s' % (chap_username, chap_password)) else: provider_auth = None return ( {'provider_location': provider_location, 'provider_auth': provider_auth}) class TegileFCDriver(TegileIntelliFlashVolumeDriver, driver.FibreChannelDriver): """Tegile FC driver.""" def __init__(self, *args, **kwargs): super(TegileFCDriver, self).__init__(*args, **kwargs) self._protocol = 'FC' @debugger def do_setup(self, context): super(TegileFCDriver, self).do_setup(context) @fczm_utils.AddFCZone @debugger def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info.""" params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) wwpns = connector['wwpns'] connectors = ','.join(wwpns) params.append(connectors) target_info = self._api_executor.send_api_request( method='getFCPortsForVolume', params=params) initiator_target_map = target_info['initiator_target_map'] connection_data = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_discovered': False, 'target_lun': target_info['target_lun'], 'target_wwn': ast.literal_eval(target_info['target_wwn']), 'initiator_target_map': ast.literal_eval(initiator_target_map) } } return connection_data @fczm_utils.RemoveFCZone @debugger def terminate_connection(self, volume, connector, force=False, **kwargs): params = list() pool, project, volume_name = self._get_pool_project_volume_name(volume) params.append('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, volume_name)) wwpns = connector['wwpns'] connectors = ','.join(wwpns) params.append(connectors) target_info = self._api_executor.send_api_request( method='getFCPortsForVolume', params=params) initiator_target_map = target_info['initiator_target_map'] connection_data = { 'data': { 'target_wwn': ast.literal_eval(target_info['target_wwn']), 'initiator_target_map': ast.literal_eval(initiator_target_map) } } return connection_data cinder-8.0.0/cinder/volume/drivers/windows/0000775000567000056710000000000012701406543022051 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/windows/remotefs.py0000664000567000056710000000413712701406250024247 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from os_brick.remotefs import remotefs from os_win import utilsfactory from cinder import exception from cinder.i18n import _ class WindowsRemoteFsClient(remotefs.RemoteFsClient): def __init__(self, *args, **kwargs): super(WindowsRemoteFsClient, self).__init__(*args, **kwargs) self._smbutils = utilsfactory.get_smbutils() self._pathutils = utilsfactory.get_pathutils() def mount(self, export_path, mnt_options=None): if not os.path.isdir(self._mount_base): os.makedirs(self._mount_base) mnt_point = self.get_mount_point(export_path) norm_path = os.path.abspath(export_path) mnt_options = mnt_options or {} username = (mnt_options.get('username') or mnt_options.get('user')) password = (mnt_options.get('password') or mnt_options.get('pass')) if not self._smbutils.check_smb_mapping( norm_path, remove_unavailable_mapping=True): self._smbutils.mount_smb_share(norm_path, username=username, password=password) if os.path.exists(mnt_point): if not self._pathutils.is_symlink(mnt_point): raise exception.SmbfsException(_("Link path already exists " "and its not a symlink")) else: self._pathutils.create_sym_link(mnt_point, norm_path) cinder-8.0.0/cinder/volume/drivers/windows/constants.py0000664000567000056710000000125312701406250024433 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VHD_TYPE_FIXED = 2 VHD_TYPE_DYNAMIC = 3 VHD_TYPE_DIFFERENCING = 4 cinder-8.0.0/cinder/volume/drivers/windows/__init__.py0000664000567000056710000000000012701406250024143 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/windows/windows.py0000664000567000056710000003033512701406250024114 0ustar jenkinsjenkins00000000000000# Copyright 2012 Pedro Navarro Perez # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Windows Server 2012 This driver requires ISCSI target role installed """ import contextlib import os from os_win import utilsfactory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from cinder.image import image_utils from cinder.volume import driver from cinder.volume import utils LOG = logging.getLogger(__name__) windows_opts = [ cfg.StrOpt('windows_iscsi_lun_path', default='C:\iSCSIVirtualDisks', help='Path to store VHD backed volumes'), ] CONF = cfg.CONF CONF.register_opts(windows_opts) class WindowsDriver(driver.ISCSIDriver): """Executes volume driver commands on Windows Storage server.""" VERSION = '1.0.0' def __init__(self, *args, **kwargs): super(WindowsDriver, self).__init__(*args, **kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(windows_opts) self._vhdutils = utilsfactory.get_vhdutils() self._tgt_utils = utilsfactory.get_iscsi_target_utils() self._hostutils = utilsfactory.get_hostutils() def do_setup(self, context): """Setup the Windows Volume driver. Called one time by the manager after the driver is loaded. Validate the flags we care about """ fileutils.ensure_tree(self.configuration.windows_iscsi_lun_path) fileutils.ensure_tree(CONF.image_conversion_dir) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" self._tgt_utils.get_portal_locations(available_only=True, fail_if_none_found=True) def _get_host_information(self, volume): """Getting the portal and port information.""" # TODO(lpetrut): properly handle multiple existing portals, also # use the iSCSI traffic addresses config options. target_name = self._get_target_name(volume) available_portal_location = self._tgt_utils.get_portal_locations()[0] properties = self._tgt_utils.get_target_information(target_name) # Note(lpetrut): the WT_Host CHAPSecret field cannot be accessed # for security reasons. auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret properties['target_discovered'] = False properties['target_portal'] = available_portal_location properties['target_lun'] = 0 properties['volume_id'] = volume['id'] return properties def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" initiator_name = connector['initiator'] target_name = volume['provider_location'] self._tgt_utils.associate_initiator_with_iscsi_target(initiator_name, target_name) properties = self._get_host_information(volume) return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given initiator can no longer access it. """ initiator_name = connector['initiator'] target_name = volume['provider_location'] self._tgt_utils.deassociate_initiator(initiator_name, target_name) def create_volume(self, volume): """Driver entry point for creating a new volume.""" vhd_path = self.local_path(volume) vol_name = volume['name'] vol_size_mb = volume['size'] * 1024 self._tgt_utils.create_wt_disk(vhd_path, vol_name, size_mb=vol_size_mb) def local_path(self, volume, disk_format=None): base_vhd_folder = self.configuration.windows_iscsi_lun_path if not disk_format: disk_format = self._tgt_utils.get_supported_disk_format() disk_fname = "%s.%s" % (volume['name'], disk_format) return os.path.join(base_vhd_folder, disk_fname) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" vol_name = volume['name'] vhd_path = self.local_path(volume) self._tgt_utils.remove_wt_disk(vol_name) fileutils.delete_if_exists(vhd_path) def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot.""" # Getting WT_Snapshot class vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] self._tgt_utils.create_snapshot(vol_name, snapshot_name) def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for exporting snapshots as volumes.""" snapshot_name = snapshot['name'] vol_name = volume['name'] vhd_path = self.local_path(volume) self._tgt_utils.export_snapshot(snapshot_name, vhd_path) self._tgt_utils.import_wt_disk(vhd_path, vol_name) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" snapshot_name = snapshot['name'] self._tgt_utils.delete_snapshot(snapshot_name) def ensure_export(self, context, volume): # iSCSI targets exported by WinTarget persist after host reboot. pass def _get_target_name(self, volume): return "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" target_name = self._get_target_name(volume) updates = {} if not self._tgt_utils.iscsi_target_exists(target_name): self._tgt_utils.create_iscsi_target(target_name) updates['provider_location'] = target_name if self.configuration.use_chap_auth: chap_username = (self.configuration.chap_username or utils.generate_username()) chap_password = (self.configuration.chap_password or utils.generate_password()) self._tgt_utils.set_chap_credentials(target_name, chap_username, chap_password) updates['provider_auth'] = ' '.join(('CHAP', chap_username, chap_password)) # This operation is idempotent self._tgt_utils.add_disk_to_target(volume['name'], target_name) return updates def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" target_name = self._get_target_name(volume) self._tgt_utils.delete_iscsi_target(target_name) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and create a volume using it.""" # Convert to VHD and file back to VHD vhd_type = self._tgt_utils.get_supported_vhd_type() with image_utils.temporary_file(suffix='.vhd') as tmp: volume_path = self.local_path(volume) image_utils.fetch_to_vhd(context, image_service, image_id, tmp, self.configuration.volume_dd_blocksize) # The vhd must be disabled and deleted before being replaced with # the desired image. self._tgt_utils.change_wt_disk_status(volume['name'], enabled=False) os.unlink(volume_path) self._vhdutils.convert_vhd(tmp, volume_path, vhd_type) self._vhdutils.resize_vhd(volume_path, volume['size'] << 30, is_file_max_size=False) self._tgt_utils.change_wt_disk_status(volume['name'], enabled=True) @contextlib.contextmanager def _temporary_snapshot(self, volume_name): try: snap_uuid = uuidutils.generate_uuid() snapshot_name = '%s-tmp-snapshot-%s' % (volume_name, snap_uuid) self._tgt_utils.create_snapshot(volume_name, snapshot_name) yield snapshot_name finally: self._tgt_utils.delete_snapshot(snapshot_name) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" disk_format = self._tgt_utils.get_supported_disk_format() temp_vhd_path = os.path.join(self.configuration.image_conversion_dir, str(image_meta['id']) + '.' + disk_format) try: with self._temporary_snapshot(volume['name']) as tmp_snap_name: # qemu-img cannot access VSS snapshots, for which reason it # must be exported first. self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path) image_utils.upload_volume(context, image_service, image_meta, temp_vhd_path, 'vhd') finally: fileutils.delete_if_exists(temp_vhd_path) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" src_vol_name = src_vref['name'] vol_name = volume['name'] vol_size = volume['size'] new_vhd_path = self.local_path(volume) with self._temporary_snapshot(src_vol_name) as tmp_snap_name: self._tgt_utils.export_snapshot(tmp_snap_name, new_vhd_path) self._vhdutils.resize_vhd(new_vhd_path, vol_size << 30, is_file_max_size=False) self._tgt_utils.import_wt_disk(new_vhd_path, vol_name) def _get_capacity_info(self): drive = os.path.splitdrive( self.configuration.windows_iscsi_lun_path)[0] (size, free_space) = self._hostutils.get_volume_info(drive) total_gb = size / units.Gi free_gb = free_space / units.Gi return (total_gb, free_gb) def _update_volume_stats(self): """Retrieve stats info for Windows device.""" LOG.debug("Updating volume stats") total_gb, free_gb = self._get_capacity_info() data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'Microsoft' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['total_capacity_gb'] = total_gb data['free_capacity_gb'] = free_gb data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False self._stats = data def extend_volume(self, volume, new_size): """Extend an Existing Volume.""" old_size = volume['size'] LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", {'old_size': old_size, 'new_size': new_size}) additional_size_mb = (new_size - old_size) * 1024 self._tgt_utils.extend_wt_disk(volume['name'], additional_size_mb) cinder-8.0.0/cinder/volume/drivers/windows/smbfs.py0000664000567000056710000002237712701406250023543 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from os_win import utilsfactory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LI from cinder.image import image_utils from cinder.volume.drivers import remotefs as remotefs_drv from cinder.volume.drivers import smbfs from cinder.volume.drivers.windows import remotefs VERSION = '1.1.0' LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.set_default('smbfs_shares_config', r'C:\OpenStack\smbfs_shares.txt') CONF.set_default('smbfs_allocation_info_file_path', r'C:\OpenStack\allocation_data.txt') CONF.set_default('smbfs_mount_point_base', r'C:\OpenStack\_mnt') CONF.set_default('smbfs_default_volume_format', 'vhd') class WindowsSmbfsDriver(smbfs.SmbfsDriver): VERSION = VERSION _MINIMUM_QEMU_IMG_VERSION = '1.6' def __init__(self, *args, **kwargs): super(WindowsSmbfsDriver, self).__init__(*args, **kwargs) self.base = getattr(self.configuration, 'smbfs_mount_point_base', CONF.smbfs_mount_point_base) opts = getattr(self.configuration, 'smbfs_mount_options', CONF.smbfs_mount_options) self._remotefsclient = remotefs.WindowsRemoteFsClient( 'cifs', root_helper=None, smbfs_mount_point_base=self.base, smbfs_mount_options=opts) self._vhdutils = utilsfactory.get_vhdutils() self._pathutils = utilsfactory.get_pathutils() self._smbutils = utilsfactory.get_smbutils() def do_setup(self, context): self._check_os_platform() super(WindowsSmbfsDriver, self).do_setup(context) def _check_os_platform(self): if sys.platform != 'win32': _msg = _("This system platform (%s) is not supported. This " "driver supports only Win32 platforms.") % sys.platform raise exception.SmbfsException(_msg) def _do_create_volume(self, volume): volume_path = self.local_path(volume) volume_format = self.get_volume_format(volume) volume_size_bytes = volume['size'] * units.Gi if os.path.exists(volume_path): err_msg = _('File already exists at: %s') % volume_path raise exception.InvalidVolume(err_msg) if volume_format not in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX): err_msg = _("Unsupported volume format: %s ") % volume_format raise exception.InvalidVolume(err_msg) self._vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes) def _ensure_share_mounted(self, smbfs_share): mnt_options = {} if self.shares.get(smbfs_share) is not None: mnt_flags = self.shares[smbfs_share] mnt_options = self.parse_options(mnt_flags)[1] self._remotefsclient.mount(smbfs_share, mnt_options) def _delete(self, path): fileutils.delete_if_exists(path) def _get_capacity_info(self, smbfs_share): """Calculate available space on the SMBFS share. :param smbfs_share: example //172.18.194.100/var/smbfs """ total_size, total_available = self._smbutils.get_share_capacity_info( smbfs_share) total_allocated = self._get_total_allocated(smbfs_share) return_value = [total_size, total_available, total_allocated] LOG.info(_LI('Smb share %(share)s Total size %(size)s ' 'Total allocated %(allocated)s'), {'share': smbfs_share, 'size': total_size, 'allocated': total_allocated}) return [float(x) for x in return_value] def _img_commit(self, snapshot_path): self._vhdutils.merge_vhd(snapshot_path) def _rebase_img(self, image, backing_file, volume_format): # Relative path names are not supported in this case. image_dir = os.path.dirname(image) backing_file_path = os.path.join(image_dir, backing_file) self._vhdutils.reconnect_parent_vhd(image, backing_file_path) def _qemu_img_info(self, path, volume_name=None): # This code expects to deal only with relative filenames. # As this method is needed by the upper class and qemu-img does # not fully support vhdx images, for the moment we'll use Win32 API # for retrieving image information. parent_path = self._vhdutils.get_vhd_parent_path(path) file_format = os.path.splitext(path)[1][1:].lower() if parent_path: backing_file_name = os.path.split(parent_path)[1].lower() else: backing_file_name = None class ImageInfo(object): def __init__(self, image, backing_file): self.image = image self.backing_file = backing_file self.file_format = file_format return ImageInfo(os.path.basename(path), backing_file_name) def _do_create_snapshot(self, snapshot, backing_file, new_snap_path): backing_file_full_path = os.path.join( self._local_volume_dir(snapshot['volume']), backing_file) self._vhdutils.create_differencing_vhd(new_snap_path, backing_file_full_path) def _do_extend_volume(self, volume_path, size_gb, volume_name=None): self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi) @remotefs_drv.locked_volume_id_operation def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" # If snapshots exist, flatten to a temporary image, and upload it active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) backing_file = self._vhdutils.get_vhd_parent_path(active_file_path) root_file_fmt = self.get_volume_format(volume) temp_path = None try: if backing_file or root_file_fmt == self._DISK_FORMAT_VHDX: temp_file_name = '%s.temp_image.%s.%s' % ( volume['id'], image_meta['id'], self._DISK_FORMAT_VHD) temp_path = os.path.join(self._local_volume_dir(volume), temp_file_name) self._vhdutils.convert_vhd(active_file_path, temp_path) upload_path = temp_path else: upload_path = active_file_path image_utils.upload_volume(context, image_service, image_meta, upload_path, self._DISK_FORMAT_VHD) finally: if temp_path: self._delete(temp_path) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" volume_path = self.local_path(volume) volume_format = self.get_volume_format(volume, qemu_format=True) self._delete(volume_path) image_utils.fetch_to_volume_format( context, image_service, image_id, volume_path, volume_format, self.configuration.volume_dd_blocksize) self._vhdutils.resize_vhd(self.local_path(volume), volume['size'] * units.Gi) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume.""" LOG.debug("snapshot: %(snap)s, volume: %(vol)s, " "volume_size: %(size)s", {'snap': snapshot['id'], 'vol': volume['id'], 'size': snapshot['volume_size']}) info_path = self._local_path_volume_info(snapshot['volume']) snap_info = self._read_info_file(info_path) vol_dir = self._local_volume_dir(snapshot['volume']) forward_file = snap_info[snapshot['id']] forward_path = os.path.join(vol_dir, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path) snapshot_path = os.path.join(vol_dir, img_info.backing_file) volume_path = self.local_path(volume) self._delete(volume_path) self._vhdutils.convert_vhd(snapshot_path, volume_path) self._vhdutils.resize_vhd(volume_path, volume_size * units.Gi) cinder-8.0.0/cinder/volume/drivers/dothill/0000775000567000056710000000000012701406543022016 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/dothill/__init__.py0000664000567000056710000000000012701406250024110 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/drivers/dothill/dothill_fc.py0000664000567000056710000001464512701406250024504 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging import cinder.volume.driver from cinder.volume.drivers.dothill import dothill_common from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class DotHillFCDriver(cinder.volume.driver.FibreChannelDriver): """OpenStack Fibre Channel cinder drivers for DotHill Arrays. Version history: 0.1 - Base version developed for HPMSA FC drivers: "https://github.com/openstack/cinder/tree/stable/juno/ cinder/volume/drivers/san/hp" 1.0 - Version developed for DotHill arrays with the following modifications: - added support for v3 API(virtual pool feature) - added support for retype volume - added support for manage/unmanage volume - added initiator target mapping in FC zoning - added https support """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(DotHillFCDriver, self).__init__(*args, **kwargs) self.common = None self.configuration.append_config_values(dothill_common.common_opts) self.configuration.append_config_values(san.san_opts) self.lookup_service = fczm_utils.create_lookup_service() def _init_common(self): return dothill_common.DotHillCommon(self.configuration) def _check_flags(self): required_flags = ['san_ip', 'san_login', 'san_password'] self.common.check_flags(self.configuration, required_flags) def do_setup(self, context): self.common = self._init_common() self._check_flags() self.common.do_setup(context) def check_for_setup_error(self): self._check_flags() def create_volume(self, volume): self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, src_vref): self.common.create_volume_from_snapshot(volume, src_vref) def create_cloned_volume(self, volume, src_vref): self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.common.delete_volume(volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): self.common.client_login() try: data = {} data['target_lun'] = self.common.map_volume(volume, connector, 'wwpns') ports, init_targ_map = self.get_init_targ_map(connector) data['target_discovered'] = True data['target_wwn'] = ports data['initiator_target_map'] = init_targ_map info = {'driver_volume_type': 'fibre_channel', 'data': data} return info finally: self.common.client_logout() @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): self.common.unmap_volume(volume, connector, 'wwpns') info = {'driver_volume_type': 'fibre_channel', 'data': {}} if not self.common.client.list_luns_for_host(connector['wwpns'][0]): ports, init_targ_map = self.get_init_targ_map(connector) info['data'] = {'target_wwn': ports, 'initiator_target_map': init_targ_map} return info def get_init_targ_map(self, connector): init_targ_map = {} target_wwns = [] ports = self.common.get_active_fc_target_ports() if self.lookup_service is not None: dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], ports) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector['wwpns'] target_wwns = ports for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map def get_volume_stats(self, refresh=False): stats = self.common.get_volume_stats(refresh) stats['storage_protocol'] = 'FC' stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return stats def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def create_snapshot(self, snapshot): self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.common.delete_snapshot(snapshot) def extend_volume(self, volume, new_size): self.common.extend_volume(volume, new_size) def retype(self, context, volume, new_type, diff, host): return self.common.retype(volume, new_type, diff, host) def manage_existing(self, volume, existing_ref): self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): pass cinder-8.0.0/cinder/volume/drivers/dothill/dothill_iscsi.py0000664000567000056710000001610112701406250025213 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from cinder import exception from cinder.i18n import _ import cinder.volume.driver from cinder.volume.drivers.dothill import dothill_common as dothillcommon from cinder.volume.drivers.san import san DEFAULT_ISCSI_PORT = "3260" LOG = logging.getLogger(__name__) class DotHillISCSIDriver(cinder.volume.driver.ISCSIDriver): """OpenStack iSCSI cinder drivers for DotHill Arrays. Version history: 0.1 - Base structure for DotHill iSCSI drivers based on HPMSA FC drivers: "https://github.com/openstack/cinder/tree/stable/juno/ cinder/volume/drivers/san/hp" 1.0 - Version developed for DotHill arrays with the following modifications: - added iSCSI support - added CHAP support in iSCSI - added support for v3 API(virtual pool feature) - added support for retype volume - added support for manage/unmanage volume - added https support """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(DotHillISCSIDriver, self).__init__(*args, **kwargs) self.common = None self.configuration.append_config_values(dothillcommon.common_opts) self.configuration.append_config_values(dothillcommon.iscsi_opts) self.configuration.append_config_values(san.san_opts) self.iscsi_ips = self.configuration.dothill_iscsi_ips def _init_common(self): return dothillcommon.DotHillCommon(self.configuration) def _check_flags(self): required_flags = ['san_ip', 'san_login', 'san_password'] self.common.check_flags(self.configuration, required_flags) def do_setup(self, context): self.common = self._init_common() self._check_flags() self.common.do_setup(context) self.initialize_iscsi_ports() def initialize_iscsi_ports(self): iscsi_ips = [] if self.iscsi_ips: for ip_addr in self.iscsi_ips: ip = ip_addr.split(':') if len(ip) == 1: iscsi_ips.append([ip_addr, DEFAULT_ISCSI_PORT]) elif len(ip) == 2: iscsi_ips.append([ip[0], ip[1]]) else: msg = _("Invalid IP address format: '%s'") % ip_addr LOG.error(msg) raise exception.InvalidInput(reason=(msg)) self.iscsi_ips = iscsi_ips else: msg = _('At least one valid iSCSI IP address must be set.') LOG.error(msg) raise exception.InvalidInput(reason=(msg)) def check_for_setup_error(self): self._check_flags() def create_volume(self, volume): self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, src_vref): self.common.create_volume_from_snapshot(volume, src_vref) def create_cloned_volume(self, volume, src_vref): self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.common.delete_volume(volume) def initialize_connection(self, volume, connector): self.common.client_login() try: data = {} data['target_lun'] = self.common.map_volume(volume, connector, 'initiator') iqns = self.common.get_active_iscsi_target_iqns() data['target_discovered'] = True data['target_iqn'] = iqns[0] iscsi_portals = self.common.get_active_iscsi_target_portals() for ip_port in self.iscsi_ips: if (ip_port[0] in iscsi_portals): data['target_portal'] = ":".join(ip_port) break if 'target_portal' not in data: raise exception.DotHillNotTargetPortal() if self.configuration.use_chap_auth: chap_secret = self.common.get_chap_record( connector['initiator'] ) if not chap_secret: chap_secret = self.create_chap_record( connector['initiator'] ) data['auth_password'] = chap_secret data['auth_username'] = connector['initiator'] data['auth_method'] = 'CHAP' info = {'driver_volume_type': 'iscsi', 'data': data} return info finally: self.common.client_logout() def terminate_connection(self, volume, connector, **kwargs): self.common.unmap_volume(volume, connector, 'initiator') def get_volume_stats(self, refresh=False): stats = self.common.get_volume_stats(refresh) stats['storage_protocol'] = 'iSCSI' stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return stats def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def create_snapshot(self, snapshot): self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.common.delete_snapshot(snapshot) def extend_volume(self, volume, new_size): self.common.extend_volume(volume, new_size) def create_chap_record(self, initiator_name): chap_secret = self.configuration.chap_password # Chap secret length should be 12 to 16 characters if 12 <= len(chap_secret) <= 16: self.common.create_chap_record(initiator_name, chap_secret) else: msg = _('CHAP secret should be 12-16 bytes.') LOG.error(msg) raise exception.InvalidInput(reason=(msg)) return chap_secret def retype(self, context, volume, new_type, diff, host): return self.common.retype(volume, new_type, diff, host) def manage_existing(self, volume, existing_ref): self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): pass cinder-8.0.0/cinder/volume/drivers/dothill/dothill_client.py0000664000567000056710000003465012701406250025370 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from hashlib import md5 import math import time from lxml import etree from oslo_log import log as logging import requests import six from cinder import exception from cinder.i18n import _LE LOG = logging.getLogger(__name__) class DotHillClient(object): def __init__(self, host, login, password, protocol, ssl_verify): self._login = login self._password = password self._base_url = "%s://%s/api" % (protocol, host) self._session_key = None self.ssl_verify = ssl_verify def _get_auth_token(self, xml): """Parse an XML authentication reply to extract the session key.""" self._session_key = None tree = etree.XML(xml) if tree.findtext(".//PROPERTY[@name='response-type']") == "success": self._session_key = tree.findtext(".//PROPERTY[@name='response']") def login(self): """Authenticates the service on the device.""" hash_ = "%s_%s" % (self._login, self._password) if six.PY3: hash_ = hash_.encode('utf-8') hash_ = md5(hash_) digest = hash_.hexdigest() url = self._base_url + "/login/" + digest try: xml = requests.get(url, verify=self.ssl_verify) except requests.exceptions.RequestException: raise exception.DotHillConnectionError self._get_auth_token(xml.text.encode('utf8')) if self._session_key is None: raise exception.DotHillAuthenticationError def _assert_response_ok(self, tree): """Parses the XML returned by the device to check the return code. Raises a DotHillRequestError error if the return code is not 0 or if the return code is None. """ # Get the return code for the operation, raising an exception # if it is not present. return_code = tree.findtext(".//PROPERTY[@name='return-code']") if not return_code: raise exception.DotHillRequestError(message="No status found") # If no error occurred, just return. if return_code == '0': return # Format a message for the status code. msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"), return_code) raise exception.DotHillRequestError(message=msg) def _build_request_url(self, path, *args, **kargs): url = self._base_url + path if kargs: url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v) for (k, v) in kargs.items()]) if args: url += '/' + '/'.join(args) return url def _request(self, path, *args, **kargs): """Performs an HTTP request on the device. Raises a DotHillRequestError if the device returned but the status is not 0. The device error message will be used in the exception message. If the status is OK, returns the XML data for further processing. """ url = self._build_request_url(path, *args, **kargs) LOG.debug("DotHill Request URL: %s", url) headers = {'dataType': 'api', 'sessionKey': self._session_key} try: xml = requests.get(url, headers=headers, verify=self.ssl_verify) tree = etree.XML(xml.text.encode('utf8')) except Exception: raise exception.DotHillConnectionError if path == "/show/volumecopy-status": return tree self._assert_response_ok(tree) return tree def logout(self): url = self._base_url + '/exit' try: requests.get(url, verify=self.ssl_verify) return True except Exception: return False def create_volume(self, name, size, backend_name, backend_type): # NOTE: size is in this format: [0-9]+GB path_dict = {'size': size} if backend_type == "linear": path_dict['vdisk'] = backend_name else: path_dict['pool'] = backend_name self._request("/create/volume", name, **path_dict) return None def delete_volume(self, name): self._request("/delete/volumes", name) def extend_volume(self, name, added_size): self._request("/expand/volume", name, size=added_size) def create_snapshot(self, volume_name, snap_name): self._request("/create/snapshots", snap_name, volumes=volume_name) def delete_snapshot(self, snap_name): self._request("/delete/snapshot", "cleanup", snap_name) def backend_exists(self, backend_name, backend_type): try: if backend_type == "linear": path = "/show/vdisks" else: path = "/show/pools" self._request(path, backend_name) return True except exception.DotHillRequestError: return False def _get_size(self, size): return int(math.ceil(float(size) * 512 / (10 ** 9))) def backend_stats(self, backend_name, backend_type): stats = {'free_capacity_gb': 0, 'total_capacity_gb': 0} prop_list = [] if backend_type == "linear": path = "/show/vdisks" prop_list = ["size-numeric", "freespace-numeric"] else: path = "/show/pools" prop_list = ["total-size-numeric", "total-avail-numeric"] tree = self._request(path, backend_name) size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0]) if size: stats['total_capacity_gb'] = self._get_size(size) size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1]) if size: stats['free_capacity_gb'] = self._get_size(size) return stats def list_luns_for_host(self, host): tree = self._request("/show/host-maps", host) return [int(prop.text) for prop in tree.xpath( "//PROPERTY[@name='lun']")] def _get_first_available_lun_for_host(self, host): luns = self.list_luns_for_host(host) lun = 1 while True: if lun not in luns: return lun lun += 1 def map_volume(self, volume_name, connector, connector_element): if connector_element == 'wwpns': lun = self._get_first_available_lun_for_host(connector['wwpns'][0]) host = ",".join(connector['wwpns']) else: host = connector['initiator'] host_status = self._check_host(host) if host_status != 0: hostname = self._safe_hostname(connector['host']) self._request("/create/host", hostname, id=host) lun = self._get_first_available_lun_for_host(host) self._request("/map/volume", volume_name, lun=str(lun), host=host, access="rw") return lun def unmap_volume(self, volume_name, connector, connector_element): if connector_element == 'wwpns': host = ",".join(connector['wwpns']) else: host = connector['initiator'] self._request("/unmap/volume", volume_name, host=host) def get_active_target_ports(self): ports = [] tree = self._request("/show/ports") for obj in tree.xpath("//OBJECT[@basetype='port']"): port = {prop.get('name'): prop.text for prop in obj.iter("PROPERTY") if prop.get('name') in ["port-type", "target-id", "status"]} if port['status'] == 'Up': ports.append(port) return ports def get_active_fc_target_ports(self): return [port['target-id'] for port in self.get_active_target_ports() if port['port-type'] == "FC"] def get_active_iscsi_target_iqns(self): return [port['target-id'] for port in self.get_active_target_ports() if port['port-type'] == "iSCSI"] def linear_copy_volume(self, src_name, dest_name, dest_bknd_name): """Copy a linear volume.""" self._request("/volumecopy", dest_name, dest_vdisk=dest_bknd_name, source_volume=src_name, prompt='yes') # The copy has started; now monitor until the operation completes. count = 0 while True: tree = self._request("/show/volumecopy-status") return_code = tree.findtext(".//PROPERTY[@name='return-code']") if return_code == '0': status = tree.findtext(".//PROPERTY[@name='progress']") progress = False if status: progress = True LOG.debug("Volume copy is in progress: %s", status) if not progress: LOG.debug("Volume copy completed: %s", status) break else: if count >= 5: LOG.error(_LE('Error in copying volume: %s'), src_name) raise exception.DotHillRequestError time.sleep(1) count += 1 time.sleep(5) def copy_volume(self, src_name, dest_name, dest_bknd_name, backend_type='virtual'): """Copy a linear or virtual volume.""" if backend_type == 'linear': return self.linear_copy_volume(src_name, dest_name, dest_bknd_name) # Copy a virtual volume to another in the same pool. self._request("/copy/volume", src_name, name=dest_name) LOG.debug("Volume copy of source_volume: %(src_name)s to " "destination_volume: %(dest_name)s started.", {'src_name': src_name, 'dest_name': dest_name, }) # Loop until this volume copy is no longer in progress. while self.volume_copy_in_progress(src_name): time.sleep(5) # Once the copy operation is finished, check to ensure that # the volume was not deleted because of a subsequent error. An # exception will be raised if the named volume is not present. self._request("/show/volumes", dest_name) LOG.debug("Volume copy of source_volume: %(src_name)s to " "destination_volume: %(dest_name)s completed.", {'src_name': src_name, 'dest_name': dest_name, }) def volume_copy_in_progress(self, src_name): """Check if a volume copy is in progress for the named volume.""" # 'show volume-copies' always succeeds, even if none in progress. tree = self._request("/show/volume-copies") # Find 0 or 1 job(s) with source volume we're interested in q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name joblist = tree.xpath(q) if len(joblist) == 0: return False LOG.debug("Volume copy of volume: %(src_name)s is " "%(pc)s percent completed.", {'src_name': src_name, 'pc': joblist[0].findtext("PROPERTY[@name='progress']"), }) return True def _check_host(self, host): host_status = -1 tree = self._request("/show/hosts") for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']" % host): host_status = 0 return host_status def _safe_hostname(self, hostname): """Modify an initiator name to match firmware requirements. Initiator name cannot include certain characters and cannot exceed 15 bytes in 'T' firmware (32 bytes in 'G' firmware). """ for ch in [',', '"', '\\', '<', '>']: if ch in hostname: hostname = hostname.replace(ch, '') index = len(hostname) if index > 15: index = 15 return hostname[:index] def get_active_iscsi_target_portals(self): # This function returns {'ip': status,} portals = {} prop = 'ip-address' tree = self._request("/show/ports") for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"): prop = 'primary-ip-address' break iscsi_ips = [ip.text for ip in tree.xpath( "//PROPERTY[@name='%s']" % prop)] if not iscsi_ips: return portals for index, port_type in enumerate(tree.xpath( "//PROPERTY[@name='port-type' and text()='iSCSI']")): status = port_type.getparent().findtext("PROPERTY[@name='status']") if status == 'Up': portals[iscsi_ips[index]] = status return portals def get_chap_record(self, initiator_name): tree = self._request("/show/chap-records") for prop in tree.xpath("//PROPERTY[@name='initiator-name' and " "text()='%s']" % initiator_name): chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator" "-secret']") return chap_secret def create_chap_record(self, initiator_name, chap_secret): self._request("/create/chap-record", name=initiator_name, secret=chap_secret) def get_serial_number(self): tree = self._request("/show/system") return tree.findtext(".//PROPERTY[@name='midplane-serial-number']") def get_owner_info(self, backend_name, backend_type): if backend_type == 'linear': tree = self._request("/show/vdisks", backend_name) else: tree = self._request("/show/pools", backend_name) return tree.findtext(".//PROPERTY[@name='owner']") def modify_volume_name(self, old_name, new_name): self._request("/set/volume", old_name, name=new_name) def get_volume_size(self, volume_name): tree = self._request("/show/volumes", volume_name) size = tree.findtext(".//PROPERTY[@name='size-numeric']") return self._get_size(size) cinder-8.0.0/cinder/volume/drivers/dothill/dothill_common.py0000664000567000056710000005066312701406250025404 0ustar jenkinsjenkins00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver common utilities for DotHill Storage array """ import base64 import six import uuid from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE from cinder.volume.drivers.dothill import dothill_client as dothill LOG = logging.getLogger(__name__) common_opts = [ cfg.StrOpt('dothill_backend_name', default='A', help="Pool or Vdisk name to use for volume creation."), cfg.StrOpt('dothill_backend_type', choices=['linear', 'virtual'], default='virtual', help="linear (for Vdisk) or virtual (for Pool)."), cfg.StrOpt('dothill_api_protocol', choices=['http', 'https'], default='https', help="DotHill API interface protocol."), cfg.BoolOpt('dothill_verify_certificate', default=False, help="Whether to verify DotHill array SSL certificate."), cfg.StrOpt('dothill_verify_certificate_path', help="DotHill array SSL certificate path."), ] iscsi_opts = [ cfg.ListOpt('dothill_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts) CONF.register_opts(iscsi_opts) class DotHillCommon(object): VERSION = "1.0" stats = {} def __init__(self, config): self.config = config self.vendor_name = "DotHill" self.backend_name = self.config.dothill_backend_name self.backend_type = self.config.dothill_backend_type self.api_protocol = self.config.dothill_api_protocol ssl_verify = False if (self.api_protocol == 'https' and self.config.dothill_verify_certificate): ssl_verify = self.config.dothill_verify_certificate_path or True self.client = dothill.DotHillClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) def get_version(self): return self.VERSION def do_setup(self, context): self.client_login() self._validate_backend() self._get_owner_info() self._get_serial_number() self.client_logout() def client_login(self): LOG.debug("Connecting to %s Array.", self.vendor_name) try: self.client.login() except exception.DotHillConnectionError as ex: msg = _("Failed to connect to %(vendor_name)s Array %(host)s: " "%(err)s") % {'vendor_name': self.vendor_name, 'host': self.config.san_ip, 'err': six.text_type(ex)} LOG.error(msg) raise exception.DotHillConnectionError(message=msg) except exception.DotHillAuthenticationError: msg = _("Failed to log on %s Array " "(invalid login?).") % self.vendor_name LOG.error(msg) raise exception.DotHillAuthenticationError(message=msg) def _get_serial_number(self): self.serialNumber = self.client.get_serial_number() def _get_owner_info(self): self.owner = self.client.get_owner_info(self.backend_name, self.backend_type) def _validate_backend(self): if not self.client.backend_exists(self.backend_name, self.backend_type): self.client_logout() raise exception.DotHillInvalidBackend(backend=self.backend_name) def client_logout(self): self.client.logout() LOG.debug("Disconnected from %s Array.", self.vendor_name) def _get_vol_name(self, volume_id): volume_name = self._encode_name(volume_id) return "v%s" % volume_name def _get_snap_name(self, snapshot_id): snapshot_name = self._encode_name(snapshot_id) return "s%s" % snapshot_name def _encode_name(self, name): """Get converted DotHill volume name. Converts the openstack volume id from fceec30e-98bc-4ce5-85ff-d7309cc17cc2 to v_O7DDpi8TOWF_9cwnMF We convert the 128(32*4) bits of the uuid into a 24 characters long base64 encoded string. This still exceeds the limit of 20 characters in some models so we return 19 characters because the _get_{vol,snap}_name functions prepend a character. """ uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes) if six.PY3: vol_encoded = vol_encoded.decode('ascii') return vol_encoded[:19] def check_flags(self, options, required_flags): for flag in required_flags: if not getattr(options, flag, None): msg = _('%s configuration option is not set.') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def create_volume(self, volume): self.client_login() # Use base64 to encode the volume name (UUID is too long for DotHill) volume_name = self._get_vol_name(volume['id']) volume_size = "%dGB" % volume['size'] LOG.debug("Create Volume having display_name: %(display_name)s " "name: %(name)s id: %(id)s size: %(size)s", {'display_name': volume['display_name'], 'name': volume['name'], 'id': volume_name, 'size': volume_size, }) try: self.client.create_volume(volume_name, volume_size, self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: LOG.exception(_LE("Creation of volume %s failed."), volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def _assert_enough_space_for_copy(self, volume_size): """The DotHill creates a snap pool before trying to copy the volume. The pool is 5.27GB or 20% of the volume size, whichever is larger. Verify that we have enough space for the pool and then copy """ pool_size = max(volume_size * 0.2, 5.27) required_size = pool_size + volume_size if required_size > self.stats['pools'][0]['free_capacity_gb']: raise exception.DotHillNotEnoughSpace(backend=self.backend_name) def _assert_source_detached(self, volume): """The DotHill requires a volume to be dettached to clone it. Make sure that the volume is not in use when trying to copy it. """ if (volume['status'] != "available" or volume['attach_status'] == "attached"): LOG.error(_LE("Volume must be detached for clone operation.")) raise exception.VolumeAttached(volume_id=volume['id']) def create_cloned_volume(self, volume, src_vref): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) self._assert_source_detached(src_vref) LOG.debug("Cloning Volume %(source_id)s to (%(dest_id)s)", {'source_id': src_vref['id'], 'dest_id': volume['id'], }) if src_vref['name_id']: orig_name = self._get_vol_name(src_vref['name_id']) else: orig_name = self._get_vol_name(src_vref['id']) dest_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: LOG.exception(_LE("Cloning of volume %s failed."), src_vref['id']) raise exception.Invalid(ex) finally: self.client_logout() def create_volume_from_snapshot(self, volume, snapshot): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) LOG.debug("Creating Volume from snapshot %(source_id)s to " "(%(dest_id)s)", {'source_id': snapshot['id'], 'dest_id': volume['id'], }) orig_name = self._get_snap_name(snapshot['id']) dest_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: LOG.exception(_LE("Create volume failed from snapshot: %s"), snapshot['id']) raise exception.Invalid(ex) finally: self.client_logout() def delete_volume(self, volume): LOG.debug("Deleting Volume: %s", volume['id']) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.delete_volume(volume_name) except exception.DotHillRequestError as ex: # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return LOG.exception(_LE("Deletion of volume %s failed."), volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def get_volume_stats(self, refresh): if refresh: self.client_login() try: self._update_volume_stats() finally: self.client_logout() return self.stats def _update_volume_stats(self): # storage_protocol and volume_backend_name are # set in the child classes stats = {'driver_version': self.VERSION, 'storage_protocol': None, 'vendor_name': self.vendor_name, 'volume_backend_name': None, 'pools': []} pool = {'QoS_support': False} try: src_type = "%sVolumeDriver" % self.vendor_name backend_stats = self.client.backend_stats(self.backend_name, self.backend_type) pool.update(backend_stats) pool['location_info'] = ('%s:%s:%s:%s' % (src_type, self.serialNumber, self.backend_name, self.owner)) pool['pool_name'] = self.backend_name except exception.DotHillRequestError: err = (_("Unable to get stats for backend_name: %s") % self.backend_name) LOG.exception(err) raise exception.Invalid(reason=err) stats['pools'].append(pool) self.stats = stats def _assert_connector_ok(self, connector, connector_element): if not connector[connector_element]: msg = _("Connector does not provide: %s") % connector_element LOG.error(msg) raise exception.InvalidInput(reason=msg) def map_volume(self, volume, connector, connector_element): self._assert_connector_ok(connector, connector_element) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) try: data = self.client.map_volume(volume_name, connector, connector_element) return data except exception.DotHillRequestError as ex: LOG.exception(_LE("Error mapping volume: %s"), volume_name) raise exception.Invalid(ex) def unmap_volume(self, volume, connector, connector_element): self._assert_connector_ok(connector, connector_element) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.unmap_volume(volume_name, connector, connector_element) except exception.DotHillRequestError as ex: LOG.exception(_LE("Error unmapping volume: %s"), volume_name) raise exception.Invalid(ex) finally: self.client_logout() def get_active_fc_target_ports(self): try: return self.client.get_active_fc_target_ports() except exception.DotHillRequestError as ex: LOG.exception(_LE("Error getting active FC target ports.")) raise exception.Invalid(ex) def get_active_iscsi_target_iqns(self): try: return self.client.get_active_iscsi_target_iqns() except exception.DotHillRequestError as ex: LOG.exception(_LE("Error getting active ISCSI target iqns.")) raise exception.Invalid(ex) def get_active_iscsi_target_portals(self): try: return self.client.get_active_iscsi_target_portals() except exception.DotHillRequestError as ex: LOG.exception(_LE("Error getting active ISCSI target portals.")) raise exception.Invalid(ex) def create_snapshot(self, snapshot): LOG.debug("Creating snapshot (%(snap_id)s) from %(volume_id)s)", {'snap_id': snapshot['id'], 'volume_id': snapshot['volume_id'], }) if snapshot['volume']['name_id']: vol_name = self._get_vol_name(snapshot['volume']['name_id']) else: vol_name = self._get_vol_name(snapshot['volume_id']) snap_name = self._get_snap_name(snapshot['id']) self.client_login() try: self.client.create_snapshot(vol_name, snap_name) except exception.DotHillRequestError as ex: LOG.exception(_LE("Creation of snapshot failed for volume: %s"), snapshot['volume_id']) raise exception.Invalid(ex) finally: self.client_logout() def delete_snapshot(self, snapshot): snap_name = self._get_snap_name(snapshot['id']) LOG.debug("Deleting snapshot (%s)", snapshot['id']) self.client_login() try: self.client.delete_snapshot(snap_name) except exception.DotHillRequestError as ex: # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return LOG.exception(_LE("Deleting snapshot %s failed"), snapshot['id']) raise exception.Invalid(ex) finally: self.client_logout() def extend_volume(self, volume, new_size): if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) old_size = volume['size'] growth_size = int(new_size) - old_size LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to " "%(new_size)s, by %(growth_size)s GB.", {'volume_name': volume_name, 'old_size': old_size, 'new_size': new_size, 'growth_size': growth_size, }) self.client_login() try: self.client.extend_volume(volume_name, "%dGB" % growth_size) except exception.DotHillRequestError as ex: LOG.exception(_LE("Extension of volume %s failed."), volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def get_chap_record(self, initiator_name): try: return self.client.get_chap_record(initiator_name) except exception.DotHillRequestError as ex: LOG.exception(_LE("Error getting chap record.")) raise exception.Invalid(ex) def create_chap_record(self, initiator_name, chap_secret): try: self.client.create_chap_record(initiator_name, chap_secret) except exception.DotHillRequestError as ex: LOG.exception(_LE("Error creating chap record.")) raise exception.Invalid(ex) def migrate_volume(self, volume, host): """Migrate directly if source and dest are managed by same storage. :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. :returns: (False, None) if the driver does not support migration, (True, None) if successful """ false_ret = (False, None) if volume['attach_status'] == "attached": return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_back_name, dest_owner) = info.split(':') except ValueError: return false_ret if not (dest_type == 'DotHillVolumeDriver' and dest_id == self.serialNumber and dest_owner == self.owner): return false_ret if volume['name_id']: source_name = self._get_vol_name(volume['name_id']) else: source_name = self._get_vol_name(volume['id']) # DotHill Array does not support duplicate names dest_name = "m%s" % source_name[1:] self.client_login() try: self.client.copy_volume(source_name, dest_name, dest_back_name, self.backend_type) self.client.delete_volume(source_name) self.client.modify_volume_name(dest_name, source_name) return (True, None) except exception.DotHillRequestError as ex: LOG.exception(_LE("Error migrating volume: %s"), source_name) raise exception.Invalid(ex) finally: self.client_logout() def retype(self, volume, new_type, diff, host): ret = self.migrate_volume(volume, host) return ret[0] def manage_existing(self, volume, existing_ref): """Manage an existing non-openstack DotHill volume existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = existing_ref['source-name'] modify_target_vol_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.modify_volume_name(target_vol_name, modify_target_vol_name) except exception.DotHillRequestError as ex: LOG.exception(_LE("Error manage existing volume.")) raise exception.Invalid(ex) finally: self.client_logout() def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = existing_ref['source-name'] self.client_login() try: size = self.client.get_volume_size(target_vol_name) return size except exception.DotHillRequestError as ex: LOG.exception(_LE("Error manage existing get volume size.")) raise exception.Invalid(ex) finally: self.client_logout() cinder-8.0.0/cinder/volume/volume_types.py0000664000567000056710000002567512701406250022020 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Ken Pepple # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in volume type properties.""" from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception from cinder.i18n import _, _LE from cinder import quota CONF = cfg.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def create(context, name, extra_specs=None, is_public=True, projects=None, description=None): """Creates volume types.""" extra_specs = extra_specs or {} projects = projects or [] elevated = context if context.is_admin else context.elevated() try: type_ref = db.volume_type_create(elevated, dict(name=name, extra_specs=extra_specs, is_public=is_public, description=description), projects=projects) except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.VolumeTypeCreateFailed(name=name, extra_specs=extra_specs) return type_ref def update(context, id, name, description, is_public=None): """Update volume type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() old_volume_type = get_volume_type(elevated, id) try: type_updated = db.volume_type_update(elevated, id, dict(name=name, description=description, is_public=is_public)) # Rename resource in quota if volume type name is changed. if name: old_type_name = old_volume_type.get('name') if old_type_name != name: QUOTAS.update_quota_resource(elevated, old_type_name, name) except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.VolumeTypeUpdateFailed(id=id) return type_updated def destroy(context, id): """Marks volume types as deleted.""" if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) else: elevated = context if context.is_admin else context.elevated() db.volume_type_destroy(elevated, id) def get_all_types(context, inactive=0, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Get all non-deleted volume_types. Pass true as argument if you want deleted volume types returned also. """ vol_types = db.volume_type_get_all(context, inactive, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) return vol_types def get_volume_type(ctxt, id, expected_fields=None): """Retrieves single volume type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.volume_type_get(ctxt, id, expected_fields=expected_fields) def get_volume_type_by_name(context, name): """Retrieves single volume type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidVolumeType(reason=msg) return db.volume_type_get_by_name(context, name) def get_default_volume_type(): """Get the default volume type.""" name = CONF.default_volume_type vol_type = {} if name is not None: ctxt = context.get_admin_context() try: vol_type = get_volume_type_by_name(ctxt, name) except exception.VolumeTypeNotFoundByName: # Couldn't find volume type with the name in default_volume_type # flag, record this issue and move on # TODO(zhiteng) consider add notification to warn admin LOG.exception(_LE('Default volume type is not found. ' 'Please check default_volume_type config:')) return vol_type def get_volume_type_extra_specs(volume_type_id, key=False): volume_type = get_volume_type(context.get_admin_context(), volume_type_id) extra_specs = volume_type['extra_specs'] if key: if extra_specs.get(key): return extra_specs.get(key) else: return False else: return extra_specs def is_public_volume_type(context, volume_type_id): """Return is_public boolean value of volume type""" volume_type = db.volume_type_get(context, volume_type_id) return volume_type['is_public'] def add_volume_type_access(context, volume_type_id, project_id): """Add access to volume type for project_id.""" if volume_type_id is None: msg = _("volume_type_id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_volume_type(elevated, volume_type_id): msg = _("Type access modification is not applicable to public volume " "type.") raise exception.InvalidVolumeType(reason=msg) return db.volume_type_access_add(elevated, volume_type_id, project_id) def remove_volume_type_access(context, volume_type_id, project_id): """Remove access to volume type for project_id.""" if volume_type_id is None: msg = _("volume_type_id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_volume_type(elevated, volume_type_id): msg = _("Type access modification is not applicable to public volume " "type.") raise exception.InvalidVolumeType(reason=msg) return db.volume_type_access_remove(elevated, volume_type_id, project_id) def is_encrypted(context, volume_type_id): return get_volume_type_encryption(context, volume_type_id) is not None def get_volume_type_encryption(context, volume_type_id): if volume_type_id is None: return None encryption = db.volume_type_encryption_get(context, volume_type_id) return encryption def get_volume_type_qos_specs(volume_type_id): ctxt = context.get_admin_context() res = db.volume_type_qos_specs_get(ctxt, volume_type_id) return res def volume_types_diff(context, vol_type_id1, vol_type_id2): """Returns a 'diff' of two volume types and whether they are equal. Returns a tuple of (diff, equal), where 'equal' is a boolean indicating whether there is any difference, and 'diff' is a dictionary with the following format: {'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), ...} 'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), ...} 'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type), {'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type), ...} """ def _fix_qos_specs(qos_specs): if qos_specs: qos_specs.pop('id', None) qos_specs.pop('name', None) qos_specs.update(qos_specs.pop('specs', {})) def _fix_encryption_specs(encryption): if encryption: encryption = dict(encryption) for param in ['volume_type_id', 'created_at', 'updated_at', 'deleted_at']: encryption.pop(param, None) return encryption def _dict_diff(dict1, dict2): res = {} equal = True if dict1 is None: dict1 = {} if dict2 is None: dict2 = {} for k, v in dict1.items(): res[k] = (v, dict2.get(k)) if k not in dict2 or res[k][0] != res[k][1]: equal = False for k, v in dict2.items(): res[k] = (dict1.get(k), v) if k not in dict1 or res[k][0] != res[k][1]: equal = False return (res, equal) all_equal = True diff = {} vol_type_data = [] for vol_type_id in (vol_type_id1, vol_type_id2): if vol_type_id is None: specs = {'extra_specs': None, 'qos_specs': None, 'encryption': None} else: specs = {} vol_type = get_volume_type(context, vol_type_id) specs['extra_specs'] = vol_type.get('extra_specs') qos_specs = get_volume_type_qos_specs(vol_type_id) specs['qos_specs'] = qos_specs.get('qos_specs') _fix_qos_specs(specs['qos_specs']) specs['encryption'] = get_volume_type_encryption(context, vol_type_id) specs['encryption'] = _fix_encryption_specs(specs['encryption']) vol_type_data.append(specs) diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'], vol_type_data[1]['extra_specs']) if not equal: all_equal = False diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'], vol_type_data[1]['qos_specs']) if not equal: all_equal = False diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'], vol_type_data[1]['encryption']) if not equal: all_equal = False return (diff, all_equal) cinder-8.0.0/cinder/volume/manager.py0000664000567000056710000047431312701406257020703 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume manager manages creating, attaching, detaching, and persistent storage. Persistent storage volumes keep their state independent of instances. You can attach to an instance, terminate the instance, spawn a new instance (even one from a different image) and re-attach the volume with the same data intact. **Related Flags** :volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`). :volume_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.volume.manager.Manager`). :volume_driver: Used by :class:`Manager`. Defaults to :class:`cinder.volume.drivers.lvm.LVMVolumeDriver`. :volume_group: Name of the group that will contain exported volumes (default: `cinder-volumes`) :num_shell_tries: Number of times to attempt to run commands (default: 3) """ import requests import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils profiler = importutils.try_import('osprofiler.profiler') import six from taskflow import exceptions as tfe from cinder import compute from cinder import context from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import cache as image_cache from cinder.image import glance from cinder import manager from cinder import objects from cinder.objects import fields from cinder import quota from cinder import utils from cinder import volume as cinder_volume from cinder.volume import configuration as config from cinder.volume.flows.manager import create_volume from cinder.volume.flows.manager import manage_existing from cinder.volume.flows.manager import manage_existing_snapshot from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as vol_utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS CGQUOTAS = quota.CGQUOTAS VALID_REMOVE_VOL_FROM_CG_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') VALID_ADD_VOL_TO_CG_STATUS = ( 'available', 'in-use') VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',) VALID_CREATE_CG_SRC_CG_STATUS = ('available',) volume_manager_opts = [ cfg.StrOpt('volume_driver', default='cinder.volume.drivers.lvm.LVMVolumeDriver', help='Driver to use for volume creation'), cfg.IntOpt('migration_create_volume_timeout_secs', default=300, help='Timeout for creating the volume to migrate to ' 'when performing volume migration (seconds)'), cfg.BoolOpt('volume_service_inithost_offload', default=False, help='Offload pending volume delete during ' 'volume service startup'), cfg.StrOpt('zoning_mode', help='FC Zoning mode configured'), cfg.StrOpt('extra_capabilities', default='{}', help='User defined capabilities, a JSON formatted string ' 'specifying key/value pairs. The key/value pairs can ' 'be used by the CapabilitiesFilter to select between ' 'backends when requests specify volume types. For ' 'example, specifying a service level or the geographical ' 'location of a backend, then creating a volume type to ' 'allow the user to select by these different ' 'properties.'), cfg.BoolOpt('suppress_requests_ssl_warnings', default=False, help='Suppress requests library SSL certificate warnings.'), ] CONF = cfg.CONF CONF.register_opts(volume_manager_opts) MAPPING = { 'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver': 'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver', 'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver': 'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver', 'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver': 'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver', 'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver': 'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver', 'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver': 'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver', 'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver': 'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', 'cinder.volume.drivers.hds.nfs.HDSNFSDriver': 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver', 'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver': 'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver', 'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver': 'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver', 'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver': 'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver', 'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver': 'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver', 'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver': 'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver', } def locked_volume_operation(f): """Lock decorator for volume operations. Takes a named lock prior to executing the operation. The lock is named with the operation executed and the id of the volume. This lock can then be used by other operations to avoid operation conflicts on shared volumes. Example use: If a volume operation uses this decorator, it will block until the named lock is free. This is used to protect concurrent operations on the same volume e.g. delete VolA while create volume VolB from VolA is in progress. """ def lvo_inner1(inst, context, volume_id, **kwargs): @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True) def lvo_inner2(*_args, **_kwargs): return f(*_args, **_kwargs) return lvo_inner2(inst, context, volume_id, **kwargs) return lvo_inner1 def locked_detach_operation(f): """Lock decorator for volume detach operations. Takes a named lock prior to executing the detach call. The lock is named with the operation executed and the id of the volume. This lock can then be used by other operations to avoid operation conflicts on shared volumes. This locking mechanism is only for detach calls. We can't use the locked_volume_operation, because detach requires an additional attachment_id in the parameter list. """ def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs): @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True) def ldo_inner2(*_args, **_kwargs): return f(*_args, **_kwargs) return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs) return ldo_inner1 def locked_snapshot_operation(f): """Lock decorator for snapshot operations. Takes a named lock prior to executing the operation. The lock is named with the operation executed and the id of the snapshot. This lock can then be used by other operations to avoid operation conflicts on shared snapshots. Example use: If a snapshot operation uses this decorator, it will block until the named lock is free. This is used to protect concurrent operations on the same snapshot e.g. delete SnapA while create volume VolA from SnapA is in progress. """ def lso_inner1(inst, context, snapshot, **kwargs): @utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True) def lso_inner2(*_args, **_kwargs): return f(*_args, **_kwargs) return lso_inner2(inst, context, snapshot, **kwargs) return lso_inner1 class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" RPC_API_VERSION = '2.0' target = messaging.Target(version=RPC_API_VERSION) # On cloning a volume, we shouldn't copy volume_type, consistencygroup # and volume_attachment, because the db sets that according to [field]_id, # which we do copy. We also skip some other values that are set during # creation of Volume object. _VOLUME_CLONE_SKIP_PROPERTIES = { 'id', '_name_id', 'name_id', 'name', 'status', 'attach_status', 'migration_status', 'volume_type', 'consistencygroup', 'volume_attachment'} def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.additional_endpoints.append(_VolumeV1Proxy(self)) self.configuration = config.Configuration(volume_manager_opts, config_group=service_name) self.stats = {} if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warning(_LW("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] vol_db_empty = self._set_voldb_empty_at_startup_indicator( context.get_admin_context()) LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty) # We pass the current setting for service.active_backend_id to # the driver on init, incase there was a restart or something curr_active_backend_id = None svc_host = vol_utils.extract_host(self.host, 'backend') try: service = objects.Service.get_by_args( context.get_admin_context(), svc_host, 'cinder-volume') except exception.ServiceNotFound: # NOTE(jdg): This is to solve problems with unit tests LOG.info(_LI("Service not found for updating " "active_backend_id, assuming default " "for driver init.")) else: curr_active_backend_id = service.active_backend_id if self.configuration.suppress_requests_ssl_warnings: LOG.warning(_LW("Suppressing requests library SSL Warnings")) requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecurePlatformWarning) self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db, host=self.host, is_vol_db_empty=vol_db_empty, active_backend_id=curr_active_backend_id) if CONF.profiler.enabled and profiler is not None: self.driver = profiler.trace_cls("driver")(self.driver) try: self.extra_capabilities = jsonutils.loads( self.driver.configuration.extra_capabilities) except AttributeError: self.extra_capabilities = {} except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Invalid JSON: %s"), self.driver.configuration.extra_capabilities) if self.driver.configuration.safe_get( 'image_volume_cache_enabled'): max_cache_size = self.driver.configuration.safe_get( 'image_volume_cache_max_size_gb') max_cache_entries = self.driver.configuration.safe_get( 'image_volume_cache_max_count') self.image_volume_cache = image_cache.ImageVolumeCache( self.db, cinder_volume.API(), max_cache_size, max_cache_entries ) LOG.info(_LI('Image-volume cache enabled for host %(host)s.'), {'host': self.host}) else: LOG.info(_LI('Image-volume cache disabled for host %(host)s.'), {'host': self.host}) self.image_volume_cache = None def _add_to_threadpool(self, func, *args, **kwargs): self._tp.spawn_n(func, *args, **kwargs) def _count_allocated_capacity(self, ctxt, volume): pool = vol_utils.extract_host(volume['host'], 'pool') if pool is None: # No pool name encoded in host, so this is a legacy # volume created before pool is introduced, ask # driver to provide pool info if it has such # knowledge and update the DB. try: pool = self.driver.get_pool(volume) except Exception: LOG.exception(_LE('Fetch volume pool name failed.'), resource=volume) return if pool: new_host = vol_utils.append_host(volume['host'], pool) self.db.volume_update(ctxt, volume['id'], {'host': new_host}) else: # Otherwise, put them into a special fixed pool with # volume_backend_name being the pool name, if # volume_backend_name is None, use default pool name. # This is only for counting purpose, doesn't update DB. pool = (self.driver.configuration.safe_get( 'volume_backend_name') or vol_utils.extract_host( volume['host'], 'pool', True)) try: pool_stat = self.stats['pools'][pool] except KeyError: # First volume in the pool self.stats['pools'][pool] = dict( allocated_capacity_gb=0) pool_stat = self.stats['pools'][pool] pool_sum = pool_stat['allocated_capacity_gb'] pool_sum += volume['size'] self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum self.stats['allocated_capacity_gb'] += volume['size'] def _set_voldb_empty_at_startup_indicator(self, ctxt): """Determine if the Cinder volume DB is empty. A check of the volume DB is done to determine whether it is empty or not at this point. :param ctxt: our working context """ vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None) if len(vol_entries) == 0: LOG.info(_LI("Determined volume DB was empty at startup.")) return True else: LOG.info(_LI("Determined volume DB was not empty at startup.")) return False def _sync_provider_info(self, ctxt, volumes, snapshots): # NOTE(jdg): For now this just updates provider_id, we can add more # add more items to the update if theyr'e releveant but we need # to be safe in what we allow and add a list of allowed keys # things that make sense are provider_*, replication_status etc updates, snapshot_updates = self.driver.update_provider_info( volumes, snapshots) host_vols = utils.list_of_dicts_to_dict(volumes, 'id') for u in updates or []: update = {} # NOTE(JDG): Make sure returned item is in this hosts volumes if host_vols.get(u['id'], None): update['provider_id'] = u['provider_id'] if update: self.db.volume_update(ctxt, u['id'], update) # NOTE(jdg): snapshots are slighty harder, because # we do not have a host column and of course no get # all by host, so we use a get_all and bounce our # response off of it if snapshot_updates: cinder_snaps = self.db.snapshot_get_all(ctxt) for snap in cinder_snaps: # NOTE(jdg): For now we only update those that have no entry if not snap.get('provider_id', None): update = ( [updt for updt in snapshot_updates if updt['id'] == snap['id']][0]) if update: self.db.snapshot_update( ctxt, updt['id'], {'provider_id': updt['provider_id']}) def init_host(self): """Perform any required initialization.""" ctxt = context.get_admin_context() LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"), {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: LOG.exception(_LE("Failed to initialize driver."), resource={'type': 'driver', 'id': self.__class__.__name__}) # we don't want to continue since we failed # to initialize the driver correctly. return # Initialize backend capabilities list self.driver.init_capabilities() volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) snapshots = self.db.snapshot_get_by_host(ctxt, self.host) self._sync_provider_info(ctxt, volumes, snapshots) # FIXME volume count for exporting is wrong try: self.stats['pools'] = {} self.stats.update({'allocated_capacity_gb': 0}) for volume in volumes: # available volume should also be counted into allocated if volume['status'] in ['in-use', 'available']: # calculate allocated capacity for driver self._count_allocated_capacity(ctxt, volume) try: if volume['status'] in ['in-use']: self.driver.ensure_export(ctxt, volume) except Exception: LOG.exception(_LE("Failed to re-export volume, " "setting to ERROR."), resource=volume) volume.status = 'error' volume.save() elif volume['status'] in ('downloading', 'creating'): LOG.warning(_LW("Detected volume stuck " "in %(curr_status)s " "status, setting to ERROR."), {'curr_status': volume['status']}, resource=volume) if volume['status'] == 'downloading': self.driver.clear_download(ctxt, volume) volume.status = 'error' volume.save() elif volume.status == 'uploading': # Set volume status to available or in-use. self.db.volume_update_status_based_on_attachment( ctxt, volume.id) else: pass snapshots = objects.SnapshotList.get_by_host( ctxt, self.host, {'status': 'creating'}) for snapshot in snapshots: LOG.warning(_LW("Detected snapshot stuck in creating " "status, setting to ERROR."), resource=snapshot) snapshot.status = 'error' snapshot.save() except Exception: LOG.exception(_LE("Error during re-export on driver init."), resource=volume) return self.driver.set_throttle() # at this point the driver is considered initialized. # NOTE(jdg): Careful though because that doesn't mean # that an entry exists in the service table self.driver.set_initialized() for volume in volumes: if volume['status'] == 'deleting': if CONF.volume_service_inithost_offload: # Offload all the pending volume delete operations to the # threadpool to prevent the main volume service thread # from being blocked. self._add_to_threadpool(self.delete_volume, ctxt, volume['id']) else: # By default, delete volumes sequentially self.delete_volume(ctxt, volume['id']) LOG.info(_LI("Resume volume delete completed successfully."), resource=volume) # collect and publish service capabilities self.publish_service_capabilities(ctxt) LOG.info(_LI("Driver initialization completed successfully."), resource={'type': 'driver', 'id': self.driver.__class__.__name__}) def init_host_with_rpc(self): LOG.info(_LI("Initializing RPC dependent components of volume " "driver %(driver_name)s (%(version)s)"), {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) stats = self.driver.get_volume_stats(refresh=True) svc_host = vol_utils.extract_host(self.host, 'backend') try: service = objects.Service.get_by_args( context.get_admin_context(), svc_host, 'cinder-volume') except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("Service not found for updating " "replication_status.")) if service.replication_status != ( fields.ReplicationStatus.FAILED_OVER): if stats and stats.get('replication_enabled', False): service.replication_status = fields.ReplicationStatus.ENABLED else: service.replication_status = fields.ReplicationStatus.DISABLED service.save() LOG.info(_LI("Driver post RPC initialization completed successfully."), resource={'type': 'driver', 'id': self.driver.__class__.__name__}) def is_working(self): """Return if Manager is ready to accept requests. This is to inform Service class that in case of volume driver initialization failure the manager is actually down and not ready to accept any requests. """ return self.driver.initialized def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): """Creates the volume.""" # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) context_elevated = context.elevated() if filter_properties is None: filter_properties = {} if request_spec is None: request_spec = {} try: # NOTE(flaper87): Driver initialization is # verified by the task itself. flow_engine = create_volume.get_flow( context_elevated, self, self.db, self.driver, self.scheduler_rpcapi, self.host, volume.id, allow_reschedule, context, request_spec, filter_properties, image_volume_cache=self.image_volume_cache, ) except Exception: msg = _("Create manager volume flow failed.") LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) raise exception.CinderException(msg) snapshot_id = request_spec.get('snapshot_id') source_volid = request_spec.get('source_volid') source_replicaid = request_spec.get('source_replicaid') if snapshot_id is not None: # Make sure the snapshot is not deleted until we are done with it. locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot') elif source_volid is not None: # Make sure the volume is not deleted until we are done with it. locked_action = "%s-%s" % (source_volid, 'delete_volume') elif source_replicaid is not None: # Make sure the volume is not deleted until we are done with it. locked_action = "%s-%s" % (source_replicaid, 'delete_volume') else: locked_action = None def _run_flow(): # This code executes create volume flow. If something goes wrong, # flow reverts all job that was done and reraises an exception. # Otherwise, all data that was generated by flow becomes available # in flow engine's storage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() @utils.synchronized(locked_action, external=True) def _run_flow_locked(): _run_flow() # NOTE(dulek): Flag to indicate if volume was rescheduled. Used to # decide if allocated_capacity should be incremented. rescheduled = False vol_ref = None try: if locked_action is None: _run_flow() else: _run_flow_locked() finally: try: vol_ref = flow_engine.storage.fetch('volume_ref') except tfe.NotFound: # If there's no vol_ref, then flow is reverted. Lets check out # if rescheduling occurred. try: rescheduled = flow_engine.storage.get_revert_result( create_volume.OnFailureRescheduleTask.make_name( [create_volume.ACTION])) except tfe.NotFound: pass if not rescheduled: if not vol_ref: # Flow was reverted and not rescheduled, fetching # volume_ref from the DB, because it will be needed. vol_ref = objects.Volume.get_by_id(context, volume.id) # NOTE(dulek): Volume wasn't rescheduled so we need to update # volume stats as these are decremented on delete. self._update_allocated_capacity(vol_ref) LOG.info(_LI("Created volume successfully."), resource=vol_ref) return vol_ref.id @locked_volume_operation def delete_volume(self, context, volume_id, unmanage_only=False, volume=None, cascade=False): """Deletes and unexports volume. 1. Delete a volume(normal case) Delete a volume and update quotas. 2. Delete a migration volume If deleting the volume in a migration, we want to skip quotas but we need database updates for the volume. """ context = context.elevated() try: # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: volume = objects.Volume.get_by_id(context, volume_id) else: volume.refresh() except exception.VolumeNotFound: # NOTE(thingee): It could be possible for a volume to # be deleted when resuming deletes from init_host(). LOG.debug("Attempted delete of non-existent volume: %s", volume_id) return if context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id if volume['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if vol_utils.extract_host(volume.host) != self.host: raise exception.InvalidVolume( reason=_("volume is not local to this node")) if unmanage_only and cascade: # This could be done, but is ruled out for now just # for simplicity. raise exception.Invalid( reason=_("Unmanage and cascade delete options " "are mutually exclusive.")) # The status 'deleting' is not included, because it only applies to # the source volume to be deleted after a migration. No quota # needs to be handled for it. is_migrating = volume.migration_status not in (None, 'error', 'success') is_migrating_dest = (is_migrating and volume.migration_status.startswith( 'target:')) self._notify_about_volume_usage(context, volume, "delete.start") try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) self.driver.remove_export(context, volume) if unmanage_only: self.driver.unmanage(volume) elif cascade: LOG.debug('Performing cascade delete.') snapshots = objects.SnapshotList.get_all_for_volume(context, volume.id) for s in snapshots: if s.status != 'deleting': self._clear_db(context, is_migrating_dest, volume, 'error_deleting') msg = (_("Snapshot %(id)s was found in state " "%(state)s rather than 'deleting' during " "cascade delete.") % {'id': s.id, 'state': s.status}) raise exception.InvalidSnapshot(reason=msg) self.delete_snapshot(context, s) LOG.debug('Snapshots deleted, issuing volume delete') self.driver.delete_volume(volume) else: self.driver.delete_volume(volume) except exception.VolumeIsBusy: LOG.error(_LE("Unable to delete busy volume."), resource=volume) # If this is a destination volume, we have to clear the database # record to avoid user confusion. self._clear_db(context, is_migrating_dest, volume, 'available') return except Exception: with excutils.save_and_reraise_exception(): # If this is a destination volume, we have to clear the # database record to avoid user confusion. self._clear_db(context, is_migrating_dest, volume, 'error_deleting') # If deleting source/destination volume in a migration, we should # skip quotas. if not is_migrating: # Get reservations try: reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Failed to update usages deleting volume."), resource=volume) # Delete glance metadata if it exists self.db.volume_glance_metadata_delete_by_volume(context, volume_id) volume.destroy() # If deleting source/destination volume in a migration, we should # skip quotas. if not is_migrating: self._notify_about_volume_usage(context, volume, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) pool = vol_utils.extract_host(volume.host, 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or vol_utils.extract_host( volume.host, 'pool', True) size = volume.size try: self.stats['pools'][pool]['allocated_capacity_gb'] -= size except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=-size) self.publish_service_capabilities(context) LOG.info(_LI("Deleted volume successfully."), resource=volume) def _clear_db(self, context, is_migrating_dest, volume_ref, status): # This method is called when driver.unmanage() or # driver.delete_volume() fails in delete_volume(), so it is already # in the exception handling part. if is_migrating_dest: volume_ref.destroy() LOG.error(_LE("Unable to delete the destination volume " "during volume migration, (NOTE: database " "record needs to be deleted)."), resource=volume_ref) else: volume_ref.status = status volume_ref.save() def create_snapshot(self, context, volume_id, snapshot): """Creates and exports the snapshot.""" context = context.elevated() self._notify_about_snapshot_usage( context, snapshot, "create.start") try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the snapshot status updated. utils.require_driver_initialized(self.driver) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. snapshot.context = context model_update = self.driver.create_snapshot(snapshot) if model_update: snapshot.update(model_update) snapshot.save() except Exception: with excutils.save_and_reraise_exception(): snapshot.status = 'error' snapshot.save() vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot( context, snapshot.id, volume_id) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass except exception.CinderException as ex: LOG.exception(_LE("Failed updating snapshot" " metadata using the provided volumes" " %(volume_id)s metadata"), {'volume_id': volume_id}, resource=snapshot) snapshot.status = 'error' snapshot.save() raise exception.MetadataCopyFailure(reason=six.text_type(ex)) snapshot.status = 'available' snapshot.progress = '100%' snapshot.save() self._notify_about_snapshot_usage(context, snapshot, "create.end") LOG.info(_LI("Create snapshot completed successfully"), resource=snapshot) return snapshot.id @locked_snapshot_operation def delete_snapshot(self, context, snapshot, unmanage_only=False): """Deletes and unexports snapshot.""" context = context.elevated() snapshot._context = context project_id = snapshot.project_id self._notify_about_snapshot_usage( context, snapshot, "delete.start") try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the snapshot status updated. utils.require_driver_initialized(self.driver) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. snapshot.context = context snapshot.save() if unmanage_only: self.driver.unmanage_snapshot(snapshot) else: self.driver.delete_snapshot(snapshot) except exception.SnapshotIsBusy: LOG.error(_LE("Delete snapshot failed, due to snapshot busy."), resource=snapshot) snapshot.status = 'available' snapshot.save() return except Exception: with excutils.save_and_reraise_exception(): snapshot.status = 'error_deleting' snapshot.save() # Get reservations try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': -1} else: reserve_opts = { 'snapshots': -1, 'gigabytes': -snapshot.volume_size, } volume_ref = self.db.volume_get(context, snapshot.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Update snapshot usages failed."), resource=snapshot) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() self._notify_about_snapshot_usage(context, snapshot, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) LOG.info(_LI("Delete snapshot completed successfully"), resource=snapshot) def attach_volume(self, context, volume_id, instance_uuid, host_name, mountpoint, mode): """Updates db to show volume is attached.""" @utils.synchronized(volume_id, external=True) def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get( context.elevated(), volume_id) if volume['status'] == 'attaching': if (volume_metadata.get('attached_mode') and volume_metadata.get('attached_mode') != mode): raise exception.InvalidVolume( reason=_("being attached by different mode")) if (volume['status'] == 'in-use' and not volume['multiattach'] and not volume['migration_status']): raise exception.InvalidVolume( reason=_("volume is already attached")) attachment = None host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None if instance_uuid: attachment = \ self.db.volume_attachment_get_by_instance_uuid( context, volume_id, instance_uuid) else: attachment = \ self.db.volume_attachment_get_by_host(context, volume_id, host_name_sanitized) if attachment is not None: self.db.volume_update(context, volume_id, {'status': 'in-use'}) return self._notify_about_volume_usage(context, volume, "attach.start") values = {'volume_id': volume_id, 'attach_status': 'attaching', } attachment = self.db.volume_attach(context.elevated(), values) volume_metadata = self.db.volume_admin_metadata_update( context.elevated(), volume_id, {"attached_mode": mode}, False) attachment_id = attachment['id'] if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_attachment_update(context, attachment_id, {'attach_status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) volume = self.db.volume_get(context, volume_id) if volume_metadata.get('readonly') == 'True' and mode != 'ro': self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) LOG.debug('Attaching volume %(volume_id)s to instance ' '%(instance)s at mountpoint %(mount)s on host ' '%(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update( context, attachment_id, {'attach_status': 'error_attaching'}) volume = self.db.volume_attached(context.elevated(), attachment_id, instance_uuid, host_name_sanitized, mountpoint, mode) self._notify_about_volume_usage(context, volume, "attach.end") LOG.info(_LI("Attach volume completed successfully."), resource=volume) return self.db.volume_attachment_get(context, attachment_id) return do_attach() @locked_detach_operation def detach_volume(self, context, volume_id, attachment_id=None): """Updates db to show volume is detached.""" # TODO(vish): refactor this into a more general "unreserve" volume = self.db.volume_get(context, volume_id) attachment = None if attachment_id: try: attachment = self.db.volume_attachment_get(context, attachment_id) except exception.VolumeAttachmentNotFound: LOG.info(_LI("Volume detach called, but volume not attached."), resource=volume) # We need to make sure the volume status is set to the correct # status. It could be in detaching status now, and we don't # want to leave it there. self.db.volume_detached(context, volume_id, attachment_id) return else: # We can try and degrade gracefully here by trying to detach # a volume without the attachment_id here if the volume only has # one attachment. This is for backwards compatibility. attachments = self.db.volume_attachment_get_used_by_volume_id( context, volume_id) if len(attachments) > 1: # There are more than 1 attachments for this volume # we have to have an attachment id. msg = _("Detach volume failed: More than one attachment, " "but no attachment_id provided.") LOG.error(msg, resource=volume) raise exception.InvalidVolume(reason=msg) elif len(attachments) == 1: attachment = attachments[0] else: # there aren't any attachments for this volume. # so set the status to available and move on. LOG.info(_LI("Volume detach called, but volume not attached."), resource=volume) self.db.volume_update(context, volume_id, {'status': 'available', 'attach_status': 'detached'}) return self._notify_about_volume_usage(context, volume, "detach.start") try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) LOG.debug('Detaching volume %(volume_id)s from instance ' '%(instance)s.', {'volume_id': volume_id, 'instance': attachment.get('instance_uuid')}, resource=volume) self.driver.detach_volume(context, volume, attachment) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update( context, attachment.get('id'), {'attach_status': 'error_detaching'}) self.db.volume_detached(context.elevated(), volume_id, attachment.get('id')) self.db.volume_admin_metadata_delete(context.elevated(), volume_id, 'attached_mode') # NOTE(jdg): We used to do an ensure export here to # catch upgrades while volumes were attached (E->F) # this was necessary to convert in-use volumes from # int ID's to UUID's. Don't need this any longer # We're going to remove the export here # (delete the iscsi target) volume = self.db.volume_get(context, volume_id) try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context.elevated(), volume) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Detach volume failed, due to " "uninitialized driver."), resource=volume) except Exception as ex: LOG.exception(_LE("Detach volume failed, due to " "remove-export failure."), resource=volume) raise exception.RemoveExportException(volume=volume_id, reason=six.text_type(ex)) self._notify_about_volume_usage(context, volume, "detach.end") LOG.info(_LI("Detach volume completed successfully."), resource=volume) def _create_image_cache_volume_entry(self, ctx, volume_ref, image_id, image_meta): """Create a new image-volume and cache entry for it. This assumes that the image has already been downloaded and stored in the volume described by the volume_ref. """ image_volume = None try: if not self.image_volume_cache.ensure_space( ctx, volume_ref['size'], volume_ref['host']): LOG.warning(_LW('Unable to ensure space for image-volume in' ' cache. Will skip creating entry for image' ' %(image)s on host %(host)s.'), {'image': image_id, 'host': volume_ref['host']}) return image_volume = self._clone_image_volume(ctx, volume_ref, image_meta) if not image_volume: LOG.warning(_LW('Unable to clone image_volume for image ' '%(image_id)s will not create cache entry.'), {'image_id': image_id}) return self.image_volume_cache.create_cache_entry( ctx, image_volume, image_id, image_meta ) except exception.CinderException as e: LOG.warning(_LW('Failed to create new image-volume cache entry.' ' Error: %(exception)s'), {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume.id) def _clone_image_volume(self, ctx, volume, image_meta): volume_type_id = volume.get('volume_type_id') reserve_opts = {'volumes': 1, 'gigabytes': volume.size} QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id) reservations = QUOTAS.reserve(ctx, **reserve_opts) try: new_vol_values = {k: volume[k] for k in set(volume.keys()) - self._VOLUME_CLONE_SKIP_PROPERTIES} new_vol_values['volume_type_id'] = volume_type_id new_vol_values['attach_status'] = 'detached' new_vol_values['status'] = 'creating' new_vol_values['project_id'] = ctx.project_id new_vol_values['display_name'] = 'image-%s' % image_meta['id'] new_vol_values['source_volid'] = volume.id LOG.debug('Creating image volume entry: %s.', new_vol_values) image_volume = objects.Volume(context=ctx, **new_vol_values) image_volume.create() except Exception as ex: LOG.exception(_LE('Create clone_image_volume: %(volume_id)s' 'for image %(image_id)s, ' 'failed (Exception: %(except)s)'), {'volume_id': volume.id, 'image_id': image_meta['id'], 'except': ex}) QUOTAS.rollback(ctx, reservations) return False QUOTAS.commit(ctx, reservations, project_id=new_vol_values['project_id']) try: self.create_volume(ctx, image_volume.id, allow_reschedule=False) image_volume = self.db.volume_get(ctx, image_volume.id) if image_volume.status != 'available': raise exception.InvalidVolume(_('Volume is not available.')) self.db.volume_admin_metadata_update(ctx.elevated(), image_volume.id, {'readonly': 'True'}, False) return image_volume except exception.CinderException: LOG.exception(_LE('Failed to clone volume %(volume_id)s for ' 'image %(image_id)s.'), {'volume_id': volume.id, 'image_id': image_meta['id']}) try: self.delete_volume(ctx, image_volume.id) except exception.CinderException: LOG.exception(_LE('Could not delete the image volume %(id)s.'), {'id': volume.id}) return False def _clone_image_volume_and_add_location(self, ctx, volume, image_service, image_meta): """Create a cloned volume and register its location to the image.""" if (image_meta['disk_format'] != 'raw' or image_meta['container_format'] != 'bare'): return False image_volume_context = ctx if self.driver.configuration.image_upload_use_internal_tenant: internal_ctx = context.get_internal_tenant_context() if internal_ctx: image_volume_context = internal_ctx image_volume = self._clone_image_volume(image_volume_context, volume, image_meta) if not image_volume: return False uri = 'cinder://%s' % image_volume.id image_registered = None try: image_registered = image_service.add_location( ctx, image_meta['id'], uri, {}) except (exception.NotAuthorized, exception.Invalid, exception.NotFound): LOG.exception(_LE('Failed to register image volume location ' '%(uri)s.'), {'uri': uri}) if not image_registered: LOG.warning(_LW('Registration of image volume URI %(uri)s ' 'to image %(image_id)s failed.'), {'uri': uri, 'image_id': image_meta['id']}) try: self.delete_volume(image_volume_context, image_volume) except exception.CinderException: LOG.exception(_LE('Could not delete failed image volume ' '%(id)s.'), {'id': image_volume.id}) return False image_volume_meta = {'glance_image_id': image_meta['id'], 'image_owner': ctx.project_id} self.db.volume_metadata_update(image_volume_context, image_volume.id, image_volume_meta, False) return True def copy_volume_to_image(self, context, volume_id, image_meta): """Uploads the specified volume to Glance. image_meta is a dictionary containing the following keys: 'id', 'container_format', 'disk_format' """ payload = {'volume_id': volume_id, 'image_id': image_meta['id']} image_service = None try: volume = self.db.volume_get(context, volume_id) # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) image_service, image_id = \ glance.get_remote_image_service(context, image_meta['id']) if (self.driver.configuration.image_upload_use_cinder_backend and self._clone_image_volume_and_add_location( context, volume, image_service, image_meta)): LOG.debug("Registered image volume location to glance " "image-id: %(image_id)s.", {'image_id': image_meta['id']}, resource=volume) else: self.driver.copy_volume_to_image(context, volume, image_service, image_meta) LOG.debug("Uploaded volume to glance image-id: %(image_id)s.", {'image_id': image_meta['id']}, resource=volume) except Exception as error: LOG.error(_LE("Upload volume to image encountered an error " "(image-id: %(image_id)s)."), {'image_id': image_meta['id']}, resource=volume) if image_service is not None: # Deletes the image if it is in queued or saving state self._delete_image(context, image_meta['id'], image_service) with excutils.save_and_reraise_exception(): payload['message'] = six.text_type(error) finally: self.db.volume_update_status_based_on_attachment(context, volume_id) LOG.info(_LI("Copy volume to image completed successfully."), resource=volume) def _delete_image(self, context, image_id, image_service): """Deletes an image stuck in queued or saving state.""" try: image_meta = image_service.show(context, image_id) image_status = image_meta.get('status') if image_status == 'queued' or image_status == 'saving': LOG.warning(_LW("Deleting image in unexpected status: " "%(image_status)s."), {'image_status': image_status}, resource={'type': 'image', 'id': image_id}) image_service.delete(context, image_id) except Exception: LOG.warning(_LW("Image delete encountered an error."), exc_info=True, resource={'type': 'image', 'id': image_id}) def _driver_data_namespace(self): return self.driver.configuration.safe_get('driver_data_namespace') \ or self.driver.configuration.safe_get('volume_backend_name') \ or self.driver.__class__.__name__ def _get_driver_initiator_data(self, context, connector): data = None initiator = connector.get('initiator', False) if initiator: if not isinstance(initiator, six.string_types): msg = _('Invalid initiator value received') raise exception.InvalidInput(reason=msg) namespace = self._driver_data_namespace() try: data = self.db.driver_initiator_data_get( context, initiator, namespace ) except exception.CinderException: LOG.exception(_LE("Failed to get driver initiator data for" " initiator %(initiator)s and namespace" " %(namespace)s"), {'initiator': initiator, 'namespace': namespace}) raise return data def _save_driver_initiator_data(self, context, connector, model_update): if connector.get('initiator', False) and model_update: namespace = self._driver_data_namespace() try: self.db.driver_initiator_data_update(context, connector['initiator'], namespace, model_update) except exception.CinderException: LOG.exception(_LE("Failed to update initiator data for" " initiator %(initiator)s and backend" " %(backend)s"), {'initiator': connector['initiator'], 'backend': namespace}) raise def initialize_connection(self, context, volume_id, connector): """Prepare volume for connection from host represented by connector. This method calls the driver initialize_connection and returns it to the caller. The connector parameter is a dictionary with information about the host that will connect to the volume in the following format:: { 'ip': ip, 'initiator': initiator, } ip: the ip address of the connecting machine initiator: the iscsi initiator name of the connecting machine. This can be None if the connecting machine does not support iscsi connections. driver is responsible for doing any necessary security setup and returning a connection_info dictionary in the following format:: { 'driver_volume_type': driver_volume_type, 'data': data, } driver_volume_type: a string to identify the type of volume. This can be used by the calling code to determine the strategy for connecting to the volume. This could be 'iscsi', 'rbd', 'sheepdog', etc. data: this is the data that the calling code will use to connect to the volume. Keep in mind that this will be serialized to json in various places, so it should not contain any non-json data types. """ # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) volume = self.db.volume_get(context, volume_id) model_update = None try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=six.text_type(err)) except Exception as err: err_msg = (_("Validate volume connection failed " "(error: %(err)s).") % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: model_update = self.driver.create_export(context.elevated(), volume, connector) except exception.CinderException: err_msg = (_("Create export for volume failed.")) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: if model_update: volume = self.db.volume_update(context, volume_id, model_update) except exception.CinderException as ex: LOG.exception(_LE("Model update failed."), resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) initiator_data = self._get_driver_initiator_data(context, connector) try: if initiator_data: conn_info = self.driver.initialize_connection(volume, connector, initiator_data) else: conn_info = self.driver.initialize_connection(volume, connector) except Exception as err: err_msg = (_("Driver initialize connection failed " "(error: %(err)s).") % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) self.driver.remove_export(context.elevated(), volume) raise exception.VolumeBackendAPIException(data=err_msg) initiator_update = conn_info.get('initiator_update', None) if initiator_update: self._save_driver_initiator_data(context, connector, initiator_update) del conn_info['initiator_update'] # Add qos_specs to connection info typeid = volume['volume_type_id'] specs = None if typeid: res = volume_types.get_volume_type_qos_specs(typeid) qos = res['qos_specs'] # only pass qos_specs that is designated to be consumed by # front-end, or both front-end and back-end. if qos and qos.get('consumer') in ['front-end', 'both']: specs = qos.get('specs') qos_spec = dict(qos_specs=specs) conn_info['data'].update(qos_spec) # Add access_mode to connection info volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) access_mode = volume_metadata.get('attached_mode') if access_mode is None: # NOTE(zhiyan): client didn't call 'os-attach' before access_mode = ('ro' if volume_metadata.get('readonly') == 'True' else 'rw') conn_info['data']['access_mode'] = access_mode # Add encrypted flag to connection_info if not set in the driver. if conn_info['data'].get('encrypted') is None: encrypted = bool(volume.get('encryption_key_id')) conn_info['data']['encrypted'] = encrypted # Add discard flag to connection_info if not set in the driver and # configured to be reported. if conn_info['data'].get('discard') is None: discard_supported = (self.driver.configuration .safe_get('report_discard_supported')) if discard_supported: conn_info['data']['discard'] = True LOG.info(_LI("Initialize volume connection completed successfully."), resource=volume) return conn_info def terminate_connection(self, context, volume_id, connector, force=False): """Cleanup connection from host represented by connector. The format of connector is the same as for initialize_connection. """ # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.terminate_connection(volume_ref, connector, force=force) except Exception as err: err_msg = (_('Terminate volume connection failed: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info(_LI("Terminate volume connection completed successfully."), resource=volume_ref) def remove_export(self, context, volume_id): """Removes an export for a volume.""" utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.remove_export(context, volume_ref) except Exception: msg = _("Remove volume export failed.") LOG.exception(msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("Remove volume export completed successfully."), resource=volume_ref) def accept_transfer(self, context, volume_id, new_user, new_project): # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) # NOTE(jdg): need elevated context as we haven't "given" the vol # yet volume_ref = self.db.volume_get(context.elevated(), volume_id) # NOTE(jdg): Some drivers tie provider info (CHAP) to tenant # for those that do allow them to return updated model info model_update = self.driver.accept_transfer(context, volume_ref, new_user, new_project) if model_update: try: self.db.volume_update(context.elevated(), volume_id, model_update) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Update volume model for " "transfer operation failed."), resource=volume_ref) self.db.volume_update(context.elevated(), volume_id, {'status': 'error'}) LOG.info(_LI("Transfer volume completed successfully."), resource=volume_ref) return model_update def _connect_device(self, conn): use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) root_access = True if not connector.check_valid_device(vol_handle['path'], root_access): if isinstance(vol_handle['path'], six.string_types): raise exception.DeviceUnavailable( path=vol_handle['path'], reason=(_("Unable to access the backend storage via the " "path %(path)s.") % {'path': vol_handle['path']})) else: raise exception.DeviceUnavailable( path=None, reason=(_("Unable to access the backend storage via file " "handle."))) return {'conn': conn, 'device': vol_handle, 'connector': connector} def _attach_volume(self, ctxt, volume, properties, remote=False): status = volume['status'] if remote: rpcapi = volume_rpcapi.VolumeAPI() try: conn = rpcapi.initialize_connection(ctxt, volume, properties) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to attach volume %(vol)s."), {'vol': volume['id']}) self.db.volume_update(ctxt, volume['id'], {'status': status}) else: conn = self.initialize_connection(ctxt, volume['id'], properties) return self._connect_device(conn) def _detach_volume(self, ctxt, attach_info, volume, properties, force=False, remote=False): connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) if remote: rpcapi = volume_rpcapi.VolumeAPI() rpcapi.terminate_connection(ctxt, volume, properties, force=force) rpcapi.remove_export(ctxt, volume) else: try: self.terminate_connection(ctxt, volume['id'], properties, force=force) self.remove_export(ctxt, volume['id']) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to terminate volume connection: ' '%(err)s.') % {'err': err}) def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): """Copy data from src_vol to dest_vol.""" LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {'src': src_vol['name'], 'dest': dest_vol['name']}) properties = utils.brick_get_connector_properties() dest_remote = remote in ['dest', 'both'] dest_attach_info = self._attach_volume(ctxt, dest_vol, properties, remote=dest_remote) try: src_remote = remote in ['src', 'both'] src_attach_info = self._attach_volume(ctxt, src_vol, properties, remote=src_remote) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to attach source volume for copy.")) self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, remote=dest_remote) # Check the backend capabilities of migration destination host. rpcapi = volume_rpcapi.VolumeAPI() capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'], False) sparse_copy_volume = bool(capabilities and capabilities.get('sparse_copy_volume', False)) copy_error = True try: size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB vol_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], size_in_mb, self.configuration.volume_dd_blocksize, sparse=sparse_copy_volume) copy_error = False except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."), {'src': src_vol['id'], 'dest': dest_vol['id']}) finally: try: self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, force=copy_error, remote=dest_remote) finally: self._detach_volume(ctxt, src_attach_info, src_vol, properties, force=copy_error, remote=src_remote) def _migrate_volume_generic(self, ctxt, volume, host, new_type_id): rpcapi = volume_rpcapi.VolumeAPI() # Create new volume on remote host skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'} new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip} if new_type_id: new_vol_values['volume_type_id'] = new_type_id new_volume = objects.Volume( context=ctxt, host=host['host'], status='creating', attach_status='detached', migration_status='target:%s' % volume['id'], **new_vol_values ) new_volume.create() rpcapi.create_volume(ctxt, new_volume, host['host'], None, None, allow_reschedule=False) # Wait for new_volume to become ready starttime = time.time() deadline = starttime + CONF.migration_create_volume_timeout_secs # TODO(thangp): Replace get_by_id with refresh when it is available new_volume = objects.Volume.get_by_id(ctxt, new_volume.id) tries = 0 while new_volume.status != 'available': tries += 1 now = time.time() if new_volume.status == 'error': msg = _("failed to create new_volume on destination host") self._clean_temporary_volume(ctxt, volume, new_volume, clean_db_only=True) raise exception.VolumeMigrationFailed(reason=msg) elif now > deadline: msg = _("timeout creating new_volume on destination host") self._clean_temporary_volume(ctxt, volume, new_volume, clean_db_only=True) raise exception.VolumeMigrationFailed(reason=msg) else: time.sleep(tries ** 2) # TODO(thangp): Replace get_by_id with refresh when it is # available new_volume = objects.Volume.get_by_id(ctxt, new_volume.id) # Copy the source volume to the destination volume try: attachments = volume.volume_attachment if not attachments: # Pre- and post-copy driver-specific actions self.driver.before_volume_copy(ctxt, volume, new_volume, remote='dest') self._copy_volume_data(ctxt, volume, new_volume, remote='dest') self.driver.after_volume_copy(ctxt, volume, new_volume, remote='dest') # The above call is synchronous so we complete the migration self.migrate_volume_completion(ctxt, volume.id, new_volume.id, error=False) else: nova_api = compute.API() # This is an async call to Nova, which will call the completion # when it's done for attachment in attachments: instance_uuid = attachment['instance_uuid'] nova_api.update_server_volume(ctxt, instance_uuid, volume.id, new_volume.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE( "Failed to copy volume %(vol1)s to %(vol2)s"), { 'vol1': volume.id, 'vol2': new_volume.id}) self._clean_temporary_volume(ctxt, volume, new_volume) def _clean_temporary_volume(self, ctxt, volume, new_volume, clean_db_only=False): # If we're in the migrating phase, we need to cleanup # destination volume because source volume is remaining if volume.migration_status == 'migrating': try: if clean_db_only: # The temporary volume is not created, only DB data # is created new_volume.destroy() else: # The temporary volume is already created rpcapi = volume_rpcapi.VolumeAPI() rpcapi.delete_volume(ctxt, new_volume) except exception.VolumeNotFound: LOG.info(_LI("Couldn't find the temporary volume " "%(vol)s in the database. There is no need " "to clean up this volume."), {'vol': new_volume.id}) else: # If we're in the completing phase don't delete the # destination because we may have already deleted the # source! But the migration_status in database should # be cleared to handle volume after migration failure try: new_volume.migration_status = None new_volume.save() except exception.VolumeNotFound: LOG.info(_LI("Couldn't find destination volume " "%(vol)s in the database. The entry might be " "successfully deleted during migration " "completion phase."), {'vol': new_volume.id}) LOG.warning(_LW("Failed to migrate volume. The destination " "volume %(vol)s is not deleted since the " "source volume may have been deleted."), {'vol': new_volume.id}) def migrate_volume_completion(self, ctxt, volume_id, new_volume_id, error=False, volume=None, new_volume=None): # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None or new_volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(ctxt, volume_id) new_volume = objects.Volume.get_by_id(ctxt, new_volume_id) try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the migration status updated. utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() LOG.debug("migrate_volume_completion: completing migration for " "volume %(vol1)s (temporary volume %(vol2)s", {'vol1': volume.id, 'vol2': new_volume.id}) rpcapi = volume_rpcapi.VolumeAPI() orig_volume_status = volume.previous_status if error: LOG.info(_LI("migrate_volume_completion is cleaning up an error " "for volume %(vol1)s (temporary volume %(vol2)s"), {'vol1': volume['id'], 'vol2': new_volume.id}) rpcapi.delete_volume(ctxt, new_volume) updates = {'migration_status': 'error', 'status': orig_volume_status} volume.update(updates) volume.save() return volume.id volume.migration_status = 'completing' volume.save() # Detach the source volume (if it fails, don't fail the migration) try: if orig_volume_status == 'in-use': attachments = volume.volume_attachment for attachment in attachments: self.detach_volume(ctxt, volume.id, attachment['id']) except Exception as ex: LOG.error(_LE("Detach migration source volume failed: %(err)s"), {'err': ex}, resource=volume) # Give driver (new_volume) a chance to update things as needed # after a successful migration. # Note this needs to go through rpc to the host of the new volume # the current host and driver object is for the "existing" volume. rpcapi.update_migrated_volume(ctxt, volume, new_volume, orig_volume_status) volume.refresh() new_volume.refresh() # Swap src and dest DB records so we can continue using the src id and # asynchronously delete the destination id updated_new = volume.finish_volume_migration(new_volume) updates = {'status': orig_volume_status, 'previous_status': volume.status, 'migration_status': 'success'} if orig_volume_status == 'in-use': attachments = volume.volume_attachment for attachment in attachments: rpcapi.attach_volume(ctxt, volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw') volume.update(updates) volume.save() # Asynchronous deletion of the source volume in the back-end (now # pointed by the target volume id) try: rpcapi.delete_volume(ctxt, updated_new) except Exception as ex: LOG.error(_LE('Failed to request async delete of migration source ' 'vol %(vol)s: %(err)s'), {'vol': volume.id, 'err': ex}) LOG.info(_LI("Complete-Migrate volume completed successfully."), resource=volume) return volume.id def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, new_type_id=None, volume=None): """Migrate the volume to the specified host (called on source host).""" # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(ctxt, volume_id) try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the migration status updated. utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() model_update = None moved = False status_update = None if volume.status in ('retyping', 'maintenance'): status_update = {'status': volume.previous_status} volume.migration_status = 'migrating' volume.save() if not force_host_copy and new_type_id is None: try: LOG.debug("Issue driver.migrate_volume.", resource=volume) moved, model_update = self.driver.migrate_volume(ctxt, volume, host) if moved: updates = {'host': host['host'], 'migration_status': 'success', 'previous_status': volume.status} if status_update: updates.update(status_update) if model_update: updates.update(model_update) volume.update(updates) volume.save() except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() if not moved: try: self._migrate_volume_generic(ctxt, volume, host, new_type_id) except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() LOG.info(_LI("Migrate volume completed successfully."), resource=volume) @periodic_task.periodic_task def _report_driver_status(self, context): if not self.driver.initialized: if self.driver.configuration.config_group is None: config_group = '' else: config_group = ('(config name %s)' % self.driver.configuration.config_group) LOG.warning(_LW("Update driver status failed: %(config_group)s " "is uninitialized."), {'config_group': config_group}, resource={'type': 'driver', 'id': self.driver.__class__.__name__}) else: volume_stats = self.driver.get_volume_stats(refresh=True) if self.extra_capabilities: volume_stats.update(self.extra_capabilities) if volume_stats: # Append volume stats with 'allocated_capacity_gb' self._append_volume_stats(volume_stats) # Append filter and goodness function if needed volume_stats = ( self._append_filter_goodness_functions(volume_stats)) # queue it to be sent to the Schedulers. self.update_service_capabilities(volume_stats) def _append_volume_stats(self, vol_stats): pools = vol_stats.get('pools', None) if pools and isinstance(pools, list): for pool in pools: pool_name = pool['pool_name'] try: pool_stats = self.stats['pools'][pool_name] except KeyError: # Pool not found in volume manager pool_stats = dict(allocated_capacity_gb=0) pool.update(pool_stats) def _append_filter_goodness_functions(self, volume_stats): """Returns volume_stats updated as needed.""" # Append filter_function if needed if 'filter_function' not in volume_stats: volume_stats['filter_function'] = ( self.driver.get_filter_function()) # Append goodness_function if needed if 'goodness_function' not in volume_stats: volume_stats['goodness_function'] = ( self.driver.get_goodness_function()) return volume_stats def publish_service_capabilities(self, context): """Collect driver status and then publish.""" self._report_driver_status(context) self._publish_service_capabilities(context) def _notify_about_volume_usage(self, context, volume, event_suffix, extra_usage_info=None): vol_utils.notify_about_volume_usage( context, volume, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_snapshot_usage(self, context, snapshot, event_suffix, extra_usage_info=None): vol_utils.notify_about_snapshot_usage( context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_consistencygroup_usage(self, context, group, event_suffix, volumes=None, extra_usage_info=None): vol_utils.notify_about_consistencygroup_usage( context, group, event_suffix, extra_usage_info=extra_usage_info, host=self.host) if not volumes: volumes = self.db.volume_get_all_by_group(context, group.id) if volumes: for volume in volumes: vol_utils.notify_about_volume_usage( context, volume, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_cgsnapshot_usage(self, context, cgsnapshot, event_suffix, snapshots=None, extra_usage_info=None): vol_utils.notify_about_cgsnapshot_usage( context, cgsnapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) if not snapshots: snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) if snapshots: for snapshot in snapshots: vol_utils.notify_about_snapshot_usage( context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def extend_volume(self, context, volume_id, new_size, reservations, volume=None): # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.status = 'error_extending' volume.save() project_id = volume.project_id size_increase = (int(new_size)) - volume.size self._notify_about_volume_usage(context, volume, "resize.start") try: self.driver.extend_volume(volume, new_size) except Exception: LOG.exception(_LE("Extend volume failed."), resource=volume) try: self.db.volume_update(context, volume.id, {'status': 'error_extending'}) raise exception.CinderException(_("Volume %s: Error trying " "to extend volume") % volume.id) finally: QUOTAS.rollback(context, reservations, project_id=project_id) return QUOTAS.commit(context, reservations, project_id=project_id) volume.update({'size': int(new_size), 'status': 'available'}) volume.save() pool = vol_utils.extract_host(volume.host, 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or vol_utils.extract_host( volume.host, 'pool', True) try: self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=size_increase) self._notify_about_volume_usage( context, volume, "resize.end", extra_usage_info={'size': int(new_size)}) LOG.info(_LI("Extend volume completed successfully."), resource=volume) def retype(self, ctxt, volume_id, new_type_id, host, migration_policy='never', reservations=None, volume=None, old_reservations=None): def _retype_error(context, volume, old_reservations, new_reservations, status_update): try: volume.update(status_update) volume.save() finally: QUOTAS.rollback(context, old_reservations) QUOTAS.rollback(context, new_reservations) context = ctxt.elevated() # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) status_update = {'status': volume.previous_status} if context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): # NOTE(flaper87): Other exceptions in this method don't # set the volume status to error. Should that be done # here? Setting the volume back to it's original status # for now. volume.update(status_update) volume.save() # If old_reservations has been passed in from the API, we should # skip quotas. # TODO(ntpttr): These reservation checks are left in to be backwards # compatible with Liberty and can be removed in N. if not old_reservations: # Get old reservations try: reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) # NOTE(wanghao): We don't need to reserve volumes and gigabytes # quota for retyping operation since they didn't changed, just # reserve volume_type and type gigabytes is fine. reserve_opts.pop('volumes') reserve_opts.pop('gigabytes') old_reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: volume.update(status_update) volume.save() msg = _("Failed to update quota usage while retyping volume.") LOG.exception(msg, resource=volume) raise exception.CinderException(msg) # We already got the new reservations new_reservations = reservations # If volume types have the same contents, no need to do anything retyped = False diff, all_equal = volume_types.volume_types_diff( context, volume.volume_type_id, new_type_id) if all_equal: retyped = True # Call driver to try and change the type retype_model_update = None # NOTE(jdg): Check to see if the destination host is the same # as the current. If it's not don't call the driver.retype # method, otherwise drivers that implement retype may report # success, but it's invalid in the case of a migrate. # We assume that those that support pools do this internally # so we strip off the pools designation if (not retyped and vol_utils.hosts_are_equivalent(self.driver.host, host['host'])): try: new_type = volume_types.get_volume_type(context, new_type_id) ret = self.driver.retype(context, volume, new_type, diff, host) # Check if the driver retype provided a model update or # just a retype indication if type(ret) == tuple: retyped, retype_model_update = ret else: retyped = ret if retyped: LOG.info(_LI("Volume %s: retyped successfully"), volume.id) except Exception: retyped = False LOG.exception(_LE("Volume %s: driver error when trying to " "retype, falling back to generic " "mechanism."), volume.id) # We could not change the type, so we need to migrate the volume, where # the destination volume will be of the new type if not retyped: if migration_policy == 'never': _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Retype requires migration but is not allowed.") raise exception.VolumeMigrationFailed(reason=msg) snaps = objects.SnapshotList.get_all_for_volume(context, volume.id) if snaps: _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Volume must not have snapshots.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Don't allow volume with replicas to be migrated rep_status = volume.replication_status if rep_status is not None and rep_status != 'disabled': _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Volume must not be replicated.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume.migration_status = 'starting' volume.save() try: self.migrate_volume(context, volume.id, host, new_type_id=new_type_id) except Exception: with excutils.save_and_reraise_exception(): _retype_error(context, volume, old_reservations, new_reservations, status_update) else: model_update = {'volume_type_id': new_type_id, 'host': host['host'], 'status': status_update['status']} if retype_model_update: model_update.update(retype_model_update) volume.update(model_update) volume.save() if old_reservations: QUOTAS.commit(context, old_reservations, project_id=project_id) if new_reservations: QUOTAS.commit(context, new_reservations, project_id=project_id) self._notify_about_volume_usage( context, volume, "retype", extra_usage_info={'volume_type': new_type_id}) self.publish_service_capabilities(context) LOG.info(_LI("Retype volume completed successfully."), resource=volume) def manage_existing(self, ctxt, volume_id, ref=None): try: flow_engine = manage_existing.get_flow( ctxt, self.db, self.driver, self.host, volume_id, ref) except Exception: msg = _("Failed to create manage_existing flow.") LOG.exception(msg, resource={'type': 'volume', 'id': volume_id}) raise exception.CinderException(msg) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() # Fetch created volume from storage vol_ref = flow_engine.storage.fetch('volume') # Update volume stats pool = vol_utils.extract_host(vol_ref['host'], 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or vol_utils.extract_host( vol_ref['host'], 'pool', True) try: self.stats['pools'][pool]['allocated_capacity_gb'] \ += vol_ref['size'] except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=vol_ref['size']) LOG.info(_LI("Manage existing volume completed successfully."), resource=vol_ref) return vol_ref['id'] def promote_replica(self, ctxt, volume_id): """Promote volume replica secondary to be the primary volume.""" volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Promote volume replica failed."), resource=volume) try: model_update = self.driver.promote_replica(ctxt, volume) except exception.CinderException: err_msg = (_('Error promoting secondary volume to primary')) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_("Failed updating model" " with driver provided model %(model)s") % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) LOG.info(_LI("Promote volume replica completed successfully."), resource=volume) def reenable_replication(self, ctxt, volume_id): """Re-enable replication of secondary volume with primary volumes.""" volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Sync volume replica failed."), resource=volume) try: model_update = self.driver.reenable_replication(ctxt, volume) except exception.CinderException: err_msg = (_("Synchronizing secondary volume to primary failed.")) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_("Failed updating model" " with driver provided model %(model)s") % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) def _update_replication_relationship_status(self, ctxt): # Only want volumes that do not have a 'disabled' replication status filters = {'replication_status': ['active', 'copying', 'error', 'active-stopped', 'inactive']} volumes = self.db.volume_get_all_by_host(ctxt, self.host, filters=filters) for vol in volumes: model_update = None try: model_update = self.driver.get_replication_status( ctxt, vol) if model_update: self.db.volume_update(ctxt, vol['id'], model_update) except Exception: LOG.exception(_LE("Get replication status for volume failed."), resource=vol) def create_consistencygroup(self, context, group): """Creates the consistency group.""" context = context.elevated() status = fields.ConsistencyGroupStatus.AVAILABLE model_update = None self._notify_about_consistencygroup_usage( context, group, "create.start") try: utils.require_driver_initialized(self.driver) LOG.info(_LI("Consistency group %s: creating"), group.name) model_update = self.driver.create_consistencygroup(context, group) if model_update: if (model_update['status'] == fields.ConsistencyGroupStatus.ERROR): msg = (_('Create consistency group failed.')) LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.ConsistencyGroupStatus.ERROR group.save() LOG.error(_LE("Consistency group %s: create failed"), group.name) group.status = status group.created_at = timeutils.utcnow() group.save() LOG.info(_LI("Consistency group %s: created successfully"), group.name) self._notify_about_consistencygroup_usage( context, group, "create.end") LOG.info(_LI("Create consistency group completed successfully."), resource={'type': 'consistency_group', 'id': group.id}) return group def create_consistencygroup_from_src(self, context, group, cgsnapshot=None, source_cg=None): """Creates the consistency group from source. The source can be a CG snapshot or a source CG. """ source_name = None snapshots = None source_vols = None try: volumes = self.db.volume_get_all_by_group(context, group.id) if cgsnapshot: try: # Check if cgsnapshot still exists cgsnapshot = objects.CGSnapshot.get_by_id( context, cgsnapshot.id) except exception.CgSnapshotNotFound: LOG.error(_LE("Create consistency group " "from snapshot-%(snap)s failed: " "SnapshotNotFound."), {'snap': cgsnapshot.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = _("snapshot-%s") % cgsnapshot.id snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) for snap in snapshots: if (snap.status not in VALID_CREATE_CG_SRC_SNAP_STATUS): msg = (_("Cannot create consistency group " "%(group)s because snapshot %(snap)s is " "not in a valid state. Valid states are: " "%(valid)s.") % {'group': group.id, 'snap': snap['id'], 'valid': VALID_CREATE_CG_SRC_SNAP_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) if source_cg: try: source_cg = objects.ConsistencyGroup.get_by_id( context, source_cg.id) except exception.ConsistencyGroupNotFound: LOG.error(_LE("Create consistency group " "from source cg-%(cg)s failed: " "ConsistencyGroupNotFound."), {'cg': source_cg.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = _("cg-%s") % source_cg.id source_vols = self.db.volume_get_all_by_group( context, source_cg.id) for source_vol in source_vols: if (source_vol['status'] not in VALID_CREATE_CG_SRC_CG_STATUS): msg = (_("Cannot create consistency group " "%(group)s because source volume " "%(source_vol)s is not in a valid " "state. Valid states are: " "%(valid)s.") % {'group': group.id, 'source_vol': source_vol['id'], 'valid': VALID_CREATE_CG_SRC_CG_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) # Sort source snapshots so that they are in the same order as their # corresponding target volumes. sorted_snapshots = None if cgsnapshot and snapshots: sorted_snapshots = self._sort_snapshots(volumes, snapshots) # Sort source volumes so that they are in the same order as their # corresponding target volumes. sorted_source_vols = None if source_cg and source_vols: sorted_source_vols = self._sort_source_vols(volumes, source_vols) self._notify_about_consistencygroup_usage( context, group, "create.start") utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, sorted_snapshots, source_cg, sorted_source_vols)) if volumes_model_update: for update in volumes_model_update: self.db.volume_update(context, update['id'], update) if model_update: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() LOG.error(_LE("Create consistency group " "from source %(source)s failed."), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) # Update volume status to 'error' as well. for vol in volumes: self.db.volume_update( context, vol['id'], {'status': 'error'}) now = timeutils.utcnow() status = 'available' for vol in volumes: update = {'status': status, 'created_at': now} self._update_volume_from_src(context, vol, update, group=group) self._update_allocated_capacity(vol) group.status = status group.created_at = now group.save() self._notify_about_consistencygroup_usage( context, group, "create.end") LOG.info(_LI("Create consistency group " "from source-%(source)s completed successfully."), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) return group def _sort_snapshots(self, volumes, snapshots): # Sort source snapshots so that they are in the same order as their # corresponding target volumes. Each source snapshot in the snapshots # list should have a corresponding target volume in the volumes list. if not volumes or not snapshots or len(volumes) != len(snapshots): msg = _("Input volumes or snapshots are invalid.") LOG.error(msg) raise exception.InvalidInput(reason=msg) sorted_snapshots = [] for vol in volumes: found_snaps = [snap for snap in snapshots if snap['id'] == vol['snapshot_id']] if not found_snaps: LOG.error(_LE("Source snapshot cannot be found for target " "volume %(volume_id)s."), {'volume_id': vol['id']}) raise exception.SnapshotNotFound( snapshot_id=vol['snapshot_id']) sorted_snapshots.extend(found_snaps) return sorted_snapshots def _sort_source_vols(self, volumes, source_vols): # Sort source volumes so that they are in the same order as their # corresponding target volumes. Each source volume in the source_vols # list should have a corresponding target volume in the volumes list. if not volumes or not source_vols or len(volumes) != len(source_vols): msg = _("Input volumes or source volumes are invalid.") LOG.error(msg) raise exception.InvalidInput(reason=msg) sorted_source_vols = [] for vol in volumes: found_source_vols = [source_vol for source_vol in source_vols if source_vol['id'] == vol['source_volid']] if not found_source_vols: LOG.error(_LE("Source volumes cannot be found for target " "volume %(volume_id)s."), {'volume_id': vol['id']}) raise exception.VolumeNotFound( volume_id=vol['source_volid']) sorted_source_vols.extend(found_source_vols) return sorted_source_vols def _update_volume_from_src(self, context, vol, update, group=None): try: snapshot_id = vol.get('snapshot_id') if snapshot_id: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) orig_vref = self.db.volume_get(context, snapshot.volume_id) if orig_vref.bootable: update['bootable'] = True self.db.volume_glance_metadata_copy_to_volume( context, vol['id'], snapshot_id) except exception.SnapshotNotFound: LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."), {'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = 'error' group.save() raise except exception.VolumeNotFound: LOG.error(_LE("The source volume %(volume_id)s " "cannot be found."), {'volume_id': snapshot.volume_id}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = 'error' group.save() raise except exception.CinderException as ex: LOG.error(_LE("Failed to update %(volume_id)s" " metadata using the provided snapshot" " %(snapshot_id)s metadata."), {'volume_id': vol['id'], 'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = 'error' group.save() raise exception.MetadataCopyFailure(reason=six.text_type(ex)) self.db.volume_update(context, vol['id'], update) def _update_allocated_capacity(self, vol): # Update allocated capacity in volume stats pool = vol_utils.extract_host(vol['host'], 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or vol_utils.extract_host( vol['host'], 'pool', True) try: self.stats['pools'][pool]['allocated_capacity_gb'] += ( vol['size']) except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=vol['size']) def delete_consistencygroup(self, context, group): """Deletes consistency group and the volumes in the group.""" context = context.elevated() project_id = group.project_id if context.project_id != group.project_id: project_id = group.project_id else: project_id = context.project_id volumes = self.db.volume_get_all_by_group(context, group.id) for volume_ref in volumes: if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_ref['id']) # self.host is 'host@backend' # volume_ref['host'] is 'host@backend#pool' # Extract host before doing comparison if volume_ref['host']: new_host = vol_utils.extract_host(volume_ref['host']) if new_host != self.host: raise exception.InvalidVolume( reason=_("Volume is not local to this node")) self._notify_about_consistencygroup_usage( context, group, "delete.start") volumes_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.delete_consistencygroup(context, group, volumes)) if volumes_model_update: for volume in volumes_model_update: update = {'status': volume['status']} self.db.volume_update(context, volume['id'], update) # If we failed to delete a volume, make sure the status # for the cg is set to error as well if (volume['status'] in ['error_deleting', 'error'] and model_update['status'] not in ['error_deleting', 'error']): model_update['status'] = volume['status'] if model_update: if model_update['status'] in ['error_deleting', 'error']: msg = (_('Delete consistency group failed.')) LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() # Update volume status to 'error' if driver returns # None for volumes_model_update. if not volumes_model_update: for vol in volumes: self.db.volume_update( context, vol['id'], {'status': 'error'}) # Get reservations for group try: reserve_opts = {'consistencygroups': -1} cgreservations = CGQUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: cgreservations = None LOG.exception(_LE("Delete consistency group " "failed to update usages."), resource={'type': 'consistency_group', 'id': group.id}) for volume_ref in volumes: # Get reservations for volume try: volume_id = volume_ref['id'] reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Delete consistency group " "failed to update usages."), resource={'type': 'consistency_group', 'id': group.id}) # Delete glance metadata if it exists self.db.volume_glance_metadata_delete_by_volume(context, volume_id) self.db.volume_destroy(context, volume_id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.stats['allocated_capacity_gb'] -= volume_ref['size'] if cgreservations: CGQUOTAS.commit(context, cgreservations, project_id=project_id) group.destroy() self._notify_about_consistencygroup_usage( context, group, "delete.end", volumes) self.publish_service_capabilities(context) LOG.info(_LI("Delete consistency group " "completed successfully."), resource={'type': 'consistency_group', 'id': group.id}) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates consistency group. Update consistency group by adding volumes to the group, or removing volumes from the group. """ add_volumes_ref = [] remove_volumes_ref = [] add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes_list = remove_volumes.split(',') for add_vol in add_volumes_list: try: add_vol_ref = self.db.volume_get(context, add_vol) except exception.VolumeNotFound: LOG.error(_LE("Update consistency group " "failed to add volume-%(volume_id)s: " "VolumeNotFound."), {'volume_id': add_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume is in an invalid " "state: %(status)s. Valid states are: %(valid)s.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # self.host is 'host@backend' # volume_ref['host'] is 'host@backend#pool' # Extract host before doing comparison new_host = vol_utils.extract_host(add_vol_ref['host']) if new_host != self.host: raise exception.InvalidVolume( reason=_("Volume is not local to this node.")) add_volumes_ref.append(add_vol_ref) for remove_vol in remove_volumes_list: try: remove_vol_ref = self.db.volume_get(context, remove_vol) except exception.VolumeNotFound: LOG.error(_LE("Update consistency group " "failed to remove volume-%(volume_id)s: " "VolumeNotFound."), {'volume_id': remove_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS: msg = (_("Cannot remove volume %(volume_id)s from consistency " "group %(group_id)s because volume is in an invalid " "state: %(status)s. Valid states are: %(valid)s.") % {'volume_id': remove_vol_ref['id'], 'group_id': group.id, 'status': remove_vol_ref['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) remove_volumes_ref.append(remove_vol_ref) self._notify_about_consistencygroup_usage( context, group, "update.start") try: utils.require_driver_initialized(self.driver) model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_consistencygroup( context, group, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref)) if add_volumes_update: for update in add_volumes_update: self.db.volume_update(context, update['id'], update) if remove_volumes_update: for update in remove_volumes_update: self.db.volume_update(context, update['id'], update) if model_update: if model_update['status'] in ( [fields.ConsistencyGroupStatus.ERROR]): msg = (_('Error occurred when updating consistency group ' '%s.') % group.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) group.update(model_update) group.save() except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred in the volume driver when " "updating consistency group %(group_id)s."), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when updating consistency " "group %(group_id)s."), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) now = timeutils.utcnow() group.status = 'available' group.update_at = now group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'consistencygroup_id': group.id, 'updated_at': now}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'consistencygroup_id': None, 'updated_at': now}) self._notify_about_consistencygroup_usage( context, group, "update.end") LOG.info(_LI("Update consistency group " "completed successfully."), resource={'type': 'consistency_group', 'id': group.id}) def create_cgsnapshot(self, context, cgsnapshot): """Creates the cgsnapshot.""" caller_context = context context = context.elevated() LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) self._notify_about_cgsnapshot_usage( context, cgsnapshot, "create.start") snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.", {'cgsnap_id': cgsnapshot.id}) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context model_update, snapshots_model_update = ( self.driver.create_cgsnapshot(context, cgsnapshot, snapshots)) if snapshots_model_update: for snap_model in snapshots_model_update: # Update db for snapshot. # NOTE(xyang): snapshots is a list of snapshot objects. # snapshots_model_update should be a list of dicts. self.db.snapshot_update(context, snap_model['id'], snap_model) if (snap_model['status'] in ['error_deleting', 'error'] and model_update['status'] not in ['error_deleting', 'error']): model_update['status'] = snap_model['status'] if model_update: if model_update['status'] == 'error': msg = (_('Error occurred when creating cgsnapshot ' '%s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() # Update snapshot status to 'error' if driver returns # None for snapshots_model_update. if not snapshots_model_update: for snapshot in snapshots: snapshot.status = 'error' snapshot.save() for snapshot in snapshots: volume_id = snapshot['volume_id'] snapshot_id = snapshot['id'] vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot( context, snapshot_id, volume_id) except exception.CinderException as ex: LOG.error(_LE("Failed updating %(snapshot_id)s" " metadata using the provided volumes" " %(volume_id)s metadata"), {'volume_id': volume_id, 'snapshot_id': snapshot_id}) # TODO(thangp): Switch over to use snapshot.update() # after cgsnapshot-objects bugs are fixed self.db.snapshot_update(context, snapshot_id, {'status': 'error'}) raise exception.MetadataCopyFailure( reason=six.text_type(ex)) self.db.snapshot_update(context, snapshot['id'], {'status': 'available', 'progress': '100%'}) cgsnapshot.status = 'available' cgsnapshot.save() LOG.info(_LI("cgsnapshot %s: created successfully"), cgsnapshot.id) self._notify_about_cgsnapshot_usage( context, cgsnapshot, "create.end") return cgsnapshot def delete_cgsnapshot(self, context, cgsnapshot): """Deletes cgsnapshot.""" caller_context = context context = context.elevated() project_id = cgsnapshot.project_id LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) self._notify_about_cgsnapshot_usage( context, cgsnapshot, "delete.start") snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug("cgsnapshot %(cgsnap_id)s: deleting", {'cgsnap_id': cgsnapshot.id}) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context model_update, snapshots_model_update = ( self.driver.delete_cgsnapshot(context, cgsnapshot, snapshots)) if snapshots_model_update: for snap_model in snapshots_model_update: # NOTE(xyang): snapshots is a list of snapshot objects. # snapshots_model_update should be a list of dicts. snap = next((item for item in snapshots if item.id == snap_model['id']), None) if snap: snap.status = snap_model['status'] snap.save() if (snap_model['status'] in ['error_deleting', 'error'] and model_update['status'] not in ['error_deleting', 'error']): model_update['status'] = snap_model['status'] if model_update: if model_update['status'] in ['error_deleting', 'error']: msg = (_('Error occurred when deleting cgsnapshot ' '%s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() # Update snapshot status to 'error' if driver returns # None for snapshots_model_update. if not snapshots_model_update: for snapshot in snapshots: snapshot.status = 'error' snapshot.save() for snapshot in snapshots: # Get reservations try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': -1} else: reserve_opts = { 'snapshots': -1, 'gigabytes': -snapshot['volume_size'], } volume_ref = self.db.volume_get(context, snapshot['volume_id']) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Failed to update usages deleting snapshot")) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot['id']) # TODO(thangp): Switch over to use snapshot.destroy() # after cgsnapshot-objects bugs are fixed self.db.snapshot_destroy(context, snapshot['id']) # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) cgsnapshot.destroy() LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end", snapshots) def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): """Finalize migration process on backend device.""" # FIXME(thangp): Remove this in v2.0 of RPC API. if (not isinstance(volume, objects.Volume) or not isinstance(new_volume, objects.Volume)): volume = objects.Volume.get_by_id(ctxt, volume['id']) new_volume = objects.Volume.get_by_id(ctxt, new_volume['id']) model_update = None model_update_default = {'_name_id': new_volume.name_id, 'provider_location': new_volume.provider_location} try: model_update = self.driver.update_migrated_volume(ctxt, volume, new_volume, volume_status) except NotImplementedError: # If update_migrated_volume is not implemented for the driver, # _name_id and provider_location will be set with the values # from new_volume. model_update = model_update_default if model_update: model_update_default.update(model_update) # Swap keys that were changed in the source so we keep their values # in the temporary volume's DB record. # Need to convert 'metadata' and 'admin_metadata' since # they are not keys of volume, their corresponding keys are # 'volume_metadata' and 'volume_admin_metadata'. model_update_new = dict() for key in model_update: if key == 'metadata': if volume.get('volume_metadata'): model_update_new[key] = { metadata['key']: metadata['value'] for metadata in volume.volume_metadata} elif key == 'admin_metadata': model_update_new[key] = { metadata['key']: metadata['value'] for metadata in volume.volume_admin_metadata} else: model_update_new[key] = volume[key] with new_volume.obj_as_admin(): new_volume.update(model_update_new) new_volume.save() with volume.obj_as_admin(): volume.update(model_update_default) volume.save() # Replication V2.1 methods def failover_host(self, context, secondary_backend_id=None): """Failover a backend to a secondary replication target. Instructs a replication capable/configured backend to failover to one of it's secondary replication targets. host=None is an acceptable input, and leaves it to the driver to failover to the only configured target, or to choose a target on it's own. All of the hosts volumes will be passed on to the driver in order for it to determine the replicated volumes on the host, if needed. :param context: security context :param secondary_backend_id: Specifies backend_id to fail over to """ svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args( context, svc_host, 'cinder-volume') volumes = objects.VolumeList.get_all_by_host(context, self.host) exception_encountered = False try: # expected form of volume_update_list: # [{volume_id: , updates: {'provider_id': xxxx....}}, # {volume_id: , updates: {'provider_id': xxxx....}}] (active_backend_id, volume_update_list) = ( self.driver.failover_host( context, volumes, secondary_backend_id)) except exception.UnableToFailOver: LOG.exception(_LE("Failed to perform replication failover")) service.replication_status = ( fields.ReplicationStatus.FAILOVER_ERROR) service.save() exception_encountered = True except exception.InvalidReplicationTarget: LOG.exception(_LE("Invalid replication target specified " "for failover")) # Preserve the replication_status if secondary_backend_id == "default": service.replication_status = ( fields.ReplicationStatus.FAILED_OVER) else: service.replication_status = fields.ReplicationStatus.ENABLED service.save() exception_encountered = True except exception.VolumeDriverException: # NOTE(jdg): Drivers need to be aware if they fail during # a failover sequence, we're expecting them to cleanup # and make sure the driver state is such that the original # backend is still set as primary as per driver memory LOG.error(_LE("Driver reported error during " "replication failover.")) service.status = 'error' service.save() exception_encountered = True if exception_encountered: LOG.error( _LE("Error encountered during failover on host: " "%(host)s invalid target ID %(backend_id)"), {'host': self.host, 'backend_id': secondary_backend_id}) return if secondary_backend_id == "default": service.replication_status = fields.ReplicationStatus.ENABLED service.active_backend_id = "" service.disabled = False service.disabled_reason = "" service.save() else: service.replication_status = fields.ReplicationStatus.FAILED_OVER service.active_backend_id = active_backend_id service.disabled = True service.disabled_reason = "failed-over" service.save() for update in volume_update_list: # Response must include an id key: {volume_id: } if not update.get('volume_id'): raise exception.UnableToFailOver( reason=_("Update list, doesn't include volume_id")) # Key things to consider (attaching failed-over volumes): # provider_location # provider_auth # provider_id # replication_status vobj = objects.Volume.get_by_id(context, update['volume_id']) vobj.update(update.get('updates', {})) vobj.save() LOG.info(_LI("Failed over to replication target successfully.")) def freeze_host(self, context): """Freeze management plane on this backend. Basically puts the control/management plane into a Read Only state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context """ # TODO(jdg): Return from driver? or catch? # Update status column in service entry try: self.driver.freeze_backend(context) except exception.VolumeDriverException: # NOTE(jdg): In the case of freeze, we don't really # need the backend's consent or anything, we'll just # disable the service, so we can just log this and # go about our business LOG.warning(_LW('Error encountered on Cinder backend during ' 'freeze operation, service is frozen, however ' 'notification to driver has failed.')) svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args( context, svc_host, 'cinder-volume') service.disabled = True service.disabled_reason = "frozen" service.save() LOG.info(_LI("Set backend status to frozen successfully.")) return True def thaw_host(self, context): """UnFreeze management plane on this backend. Basically puts the control/management plane back into a normal state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context """ # TODO(jdg): Return from driver? or catch? # Update status column in service entry try: self.driver.thaw_backend(context) except exception.VolumeDriverException: # NOTE(jdg): Thaw actually matters, if this call # to the backend fails, we're stuck and can't re-enable LOG.error(_LE('Error encountered on Cinder backend during ' 'thaw operation, service will remain frozen.')) return False svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args( context, svc_host, 'cinder-volume') service.disabled = False service.disabled_reason = "" service.save() LOG.info(_LI("Thawed backend successfully.")) return True def manage_existing_snapshot(self, ctxt, snapshot, ref=None): LOG.debug('manage_existing_snapshot: managing %s.', ref) try: flow_engine = manage_existing_snapshot.get_flow( ctxt, self.db, self.driver, self.host, snapshot.id, ref) except Exception: msg = _LE("Failed to create manage_existing flow: " "%(object_type)s %(object_id)s.") LOG.exception(msg, {'object_type': 'snapshot', 'object_id': snapshot.id}) raise exception.CinderException( _("Failed to create manage existing flow.")) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() return snapshot.id def get_capabilities(self, context, discover): """Get capabilities of backend storage.""" if discover: self.driver.init_capabilities() capabilities = self.driver.capabilities LOG.debug("Obtained capabilities list: %s.", capabilities) return capabilities def get_backup_device(self, ctxt, backup): (backup_device, is_snapshot) = ( self.driver.get_backup_device(ctxt, backup)) secure_enabled = self.driver.secure_file_operations_enabled() backup_device_dict = {'backup_device': backup_device, 'secure_enabled': secure_enabled, 'is_snapshot': is_snapshot, } return backup_device_dict def secure_file_operations_enabled(self, ctxt, volume): secure_enabled = self.driver.secure_file_operations_enabled() return secure_enabled # TODO(dulek): This goes away immediately in Newton and is just present in # Mitaka so that we can receive v1.x and v2.0 messages class _VolumeV1Proxy(object): target = messaging.Target(version='1.40') def __init__(self, manager): self.manager = manager def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): return self.manager.create_volume( context, volume_id, request_spec=request_spec, filter_properties=filter_properties, allow_reschedule=allow_reschedule, volume=volume) def delete_volume(self, context, volume_id, unmanage_only=False, volume=None, cascade=False): return self.manager.delete_volume( context, volume_id, unmanage_only=unmanage_only, volume=volume, cascade=cascade) def create_snapshot(self, context, volume_id, snapshot): return self.manager.create_snapshot(context, volume_id, snapshot) def delete_snapshot(self, context, snapshot, unmanage_only=False): return self.manager.delete_snapshot(context, snapshot, unmanage_only=unmanage_only) def attach_volume(self, context, volume_id, instance_uuid, host_name, mountpoint, mode): return self.manager.attach_volume(context, volume_id, instance_uuid, host_name, mountpoint, mode) def detach_volume(self, context, volume_id, attachment_id=None): return self.manager.detach_volume(context, volume_id, attachment_id=attachment_id) def copy_volume_to_image(self, context, volume_id, image_meta): return self.manager.copy_volume_to_image(context, volume_id, image_meta) def initialize_connection(self, context, volume_id, connector): return self.manager.initialize_connection(context, volume_id, connector) def terminate_connection(self, context, volume_id, connector, force=False): return self.manager.terminate_connection(context, volume_id, connector, force=force) def remove_export(self, context, volume_id): return self.manager.remove_export(context, volume_id) def accept_transfer(self, context, volume_id, new_user, new_project): return self.manager.accept_transfer(context, volume_id, new_user, new_project) def migrate_volume_completion(self, ctxt, volume_id, new_volume_id, error=False, volume=None, new_volume=None): return self.manager.migrate_volume_completion( ctxt, volume_id, new_volume_id, error=error, volume=volume, new_volume=new_volume) def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, new_type_id=None, volume=None): return self.manager.migrate_volume( ctxt, volume_id, host, force_host_copy=force_host_copy, new_type_id=new_type_id, volume=volume) def publish_service_capabilities(self, context): return self.manager.publish_service_capabilities(context) def extend_volume(self, context, volume_id, new_size, reservations, volume=None): return self.manager.extend_volume(context, volume_id, new_size, reservations, volume=volume) def retype(self, ctxt, volume_id, new_type_id, host, migration_policy='never', reservations=None, volume=None, old_reservations=None): return self.manager.retype(ctxt, volume_id, new_type_id, host, migration_policy=migration_policy, reservations=reservations, volume=volume, old_reservations=old_reservations) def manage_existing(self, ctxt, volume_id, ref=None): return self.manager.manage_existing(ctxt, volume_id, ref=ref) def promote_replica(self, ctxt, volume_id): return self.manager.promote_replica(ctxt, volume_id) def reenable_replication(self, ctxt, volume_id): return self.manager.reenable_replication(ctxt, volume_id) def create_consistencygroup(self, context, group): return self.manager.create_consistencygroup(context, group) def create_consistencygroup_from_src(self, context, group, cgsnapshot=None, source_cg=None): return self.manager.create_consistencygroup_from_src( context, group, cgsnapshot=cgsnapshot, source_cg=source_cg) def delete_consistencygroup(self, context, group): return self.manager.delete_consistencygroup(context, group) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): return self.manager.update_consistencygroup( context, group, add_volumes=add_volumes, remove_volumes=remove_volumes) def create_cgsnapshot(self, context, cgsnapshot): return self.manager.create_cgsnapshot(context, cgsnapshot) def delete_cgsnapshot(self, context, cgsnapshot): return self.manager.delete_cgsnapshot(context, cgsnapshot) def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): return self.manager.update_migrated_volume(ctxt, volume, new_volume, volume_status) def failover_host(self, context, secondary_backend_id=None): return self.manager.failover_host( context, secondary_backend_id=secondary_backend_id) def freeze_host(self, context): return self.manager.freeze_host(context) def thaw_host(self, context): return self.manager.thaw_host(context) def manage_existing_snapshot(self, ctxt, snapshot, ref=None): return self.manager.manage_exisiting_snapshot(ctxt, snapshot, ref=ref) def get_capabilities(self, context, discover): return self.manager.get_capabilities(context, discover) def get_backup_device(self, ctxt, backup): return self.manager.get_backup_device(ctxt, backup) def secure_file_operations_enabled(self, ctxt, volume): return self.manager.secure_file_operations_enabled(ctxt, volume) cinder-8.0.0/cinder/volume/flows/0000775000567000056710000000000012701406543020033 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/__init__.py0000664000567000056710000000000012701406250022125 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/common.py0000664000567000056710000001053512701406250021674 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. # Copyright (c) 2013 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _LE from cinder import objects LOG = logging.getLogger(__name__) # When a volume errors out we have the ability to save a piece of the exception # that caused said failure, but we don't want to save the whole message since # that could be very large, just save up to this number of characters. REASON_LENGTH = 128 def make_pretty_name(method): """Makes a pretty name for a function/method.""" meth_pieces = [method.__name__] # If its an instance method attempt to tack on the class name if hasattr(method, '__self__') and method.__self__ is not None: try: meth_pieces.insert(0, method.__self__.__class__.__name__) except AttributeError: pass return ".".join(meth_pieces) def restore_source_status(context, db, volume_spec): # NOTE(harlowja): Only if the type of the volume that was being created is # the source volume type should we try to reset the source volume status # back to its original value. if not volume_spec or volume_spec.get('type') != 'source_vol': return source_volid = volume_spec['source_volid'] source_status = volume_spec['source_volstatus'] try: LOG.debug('Restoring source %(source_volid)s status to %(status)s' % {'status': source_status, 'source_volid': source_volid}) db.volume_update(context, source_volid, {'status': source_status}) except exception.CinderException: # NOTE(harlowja): Don't let this cause further exceptions since this is # a non-critical failure. LOG.exception(_LE("Failed setting source " "volume %(source_volid)s back to" " its initial %(source_status)s status") % {'source_status': source_status, 'source_volid': source_volid}) def _clean_reason(reason): if reason is None: return '???' reason = six.text_type(reason) if len(reason) <= REASON_LENGTH: return reason else: return reason[0:REASON_LENGTH] + '...' def _update_object(context, db, status, reason, object_type, object_id): update = { 'status': status, } try: LOG.debug('Updating %(object_type)s: %(object_id)s with %(update)s' ' due to: %(reason)s', {'object_type': object_type, 'object_id': object_id, 'reason': reason, 'update': update}) if object_type == 'volume': db.volume_update(context, object_id, update) elif object_type == 'snapshot': snapshot = objects.Snapshot.get_by_id(context, object_id) snapshot.update(update) snapshot.save() except exception.CinderException: # Don't let this cause further exceptions. LOG.exception(_LE("Failed updating %(object_type)s %(object_id)s with" " %(update)s"), {'object_type': object_type, 'object_id': object_id, 'update': update}) def error_out_volume(context, db, volume_id, reason=None): reason = _clean_reason(reason) _update_object(context, db, 'error', reason, 'volume', volume_id) def error_out_snapshot(context, db, snapshot_id, reason=None): reason = _clean_reason(reason) _update_object(context, db, 'error', reason, 'snapshot', snapshot_id) cinder-8.0.0/cinder/volume/flows/manager/0000775000567000056710000000000012701406543021445 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/manager/__init__.py0000664000567000056710000000000012701406250023537 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/manager/manage_existing.py0000664000567000056710000001145312701406250025160 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE from cinder.volume.flows.api import create_volume as create_api from cinder.volume.flows import common as flow_common from cinder.volume.flows.manager import create_volume as create_mgr LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' class PrepareForQuotaReservationTask(flow_utils.CinderTask): """Gets the volume size from the driver.""" default_provides = set(['size', 'volume_type_id', 'volume_properties', 'volume_spec']) def __init__(self, db, driver): super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, volume_ref, manage_existing_ref): volume_id = volume_ref['id'] if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to manage existing volume. " "Volume driver %s not initialized.") % driver_name) flow_common.error_out_volume(context, self.db, volume_id, reason=_("Volume driver %s " "not initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_get_size(volume_ref, manage_existing_ref) return {'size': size, 'volume_type_id': volume_ref['volume_type_id'], 'volume_properties': volume_ref, 'volume_spec': {'status': volume_ref['status'], 'volume_name': volume_ref['name'], 'volume_id': volume_ref['id']}} class ManageExistingTask(flow_utils.CinderTask): """Brings an existing volume under Cinder management.""" default_provides = set(['volume']) def __init__(self, db, driver): super(ManageExistingTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, volume_ref, manage_existing_ref, size): model_update = self.driver.manage_existing(volume_ref, manage_existing_ref) if not model_update: model_update = {} model_update.update({'size': size}) try: volume_ref.update(model_update) volume_ref.save() except exception.CinderException: LOG.exception(_LE("Failed updating model of volume %(volume_id)s" " with creation provided model %(model)s") % {'volume_id': volume_ref['id'], 'model': model_update}) raise return {'volume': volume_ref} def get_flow(context, db, driver, host, volume_id, ref): """Constructs and returns the manager entrypoint flow.""" flow_name = ACTION.replace(":", "_") + "_manager" volume_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'volume_id': volume_id, 'manage_existing_ref': ref, 'optional_args': {'is_quota_committed': False} } volume_flow.add(create_mgr.ExtractVolumeRefTask(db, host), create_mgr.NotifyVolumeActionTask(db, "manage_existing.start"), PrepareForQuotaReservationTask(db, driver), create_api.QuotaReserveTask(), ManageExistingTask(db, driver), create_api.QuotaCommitTask(), create_mgr.CreateVolumeOnFinishTask(db, "manage_existing.end")) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(volume_flow, store=create_what) cinder-8.0.0/cinder/volume/flows/manager/manage_existing_snapshot.py0000664000567000056710000003407612701406250027105 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from taskflow.utils import misc from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI from cinder import objects from cinder import quota from cinder.volume.flows import common as flow_common from cinder.volume import utils as volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS ACTION = 'snapshot:manage_existing' class ExtractSnapshotRefTask(flow_utils.CinderTask): """Extracts snapshot reference for given snapshot id.""" default_provides = 'snapshot_ref' def __init__(self, db): super(ExtractSnapshotRefTask, self).__init__(addons=[ACTION]) self.db = db def execute(self, context, snapshot_id): # NOTE(wanghao): this will fetch the snapshot from the database, if # the snapshot has been deleted before we got here then this should # fail. # # In the future we might want to have a lock on the snapshot_id so that # the snapshot can not be deleted while its still being created? snapshot_ref = objects.Snapshot.get_by_id(context, snapshot_id) LOG.debug("ExtractSnapshotRefTask return" " snapshot_ref: %s", snapshot_ref) return snapshot_ref def revert(self, context, snapshot_id, result, **kwargs): if isinstance(result, misc.Failure): return flow_common.error_out_snapshot(context, self.db, snapshot_id) LOG.error(_LE("Snapshot %s: create failed"), snapshot_id) class NotifySnapshotActionTask(flow_utils.CinderTask): """Performs a notification about the given snapshot when called. Reversion strategy: N/A """ def __init__(self, db, event_suffix, host): super(NotifySnapshotActionTask, self).__init__(addons=[ACTION, event_suffix]) self.db = db self.event_suffix = event_suffix self.host = host def execute(self, context, snapshot_ref): snapshot_id = snapshot_ref['id'] try: volume_utils.notify_about_snapshot_usage(context, snapshot_ref, self.event_suffix, host=self.host) except exception.CinderException: # If notification sending of snapshot database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for snapshots to operate LOG.exception(_LE("Failed notifying about the snapshot " "action %(event)s for snapshot %(snp_id)s."), {'event': self.event_suffix, 'snp_id': snapshot_id}) class PrepareForQuotaReservationTask(flow_utils.CinderTask): """Gets the snapshot size from the driver.""" default_provides = set(['size', 'snapshot_properties']) def __init__(self, db, driver): super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, snapshot_ref, manage_existing_ref): snapshot_id = snapshot_ref['id'] if not self.driver.initialized: driver_name = (self.driver.configuration. safe_get('volume_backend_name')) LOG.error(_LE("Unable to manage existing snapshot. " "Volume driver %s not initialized."), driver_name) flow_common.error_out_snapshot(context, self.db, snapshot_id, reason=_("Volume driver %s " "not initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_snapshot_get_size( snapshot=snapshot_ref, existing_ref=manage_existing_ref) return {'size': size, 'snapshot_properties': snapshot_ref} class QuotaReserveTask(flow_utils.CinderTask): """Reserves a single snapshot with the given size. Reversion strategy: rollback the quota reservation. Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ default_provides = set(['reservations']) def __init__(self): super(QuotaReserveTask, self).__init__(addons=[ACTION]) def execute(self, context, size, optional_args): try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': size} reservations = QUOTAS.reserve(context, **reserve_opts) return { 'reservations': reservations, } except exception.OverQuota as e: overs = e.kwargs['overs'] quotas = e.kwargs['quotas'] usages = e.kwargs['usages'] volume_utils.process_reserve_over_quota(context, overs, usages, quotas, size) def revert(self, context, result, optional_args, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, misc.Failure): return if optional_args['is_quota_committed']: # The reservations have already been committed and can not be # rolled back at this point. return # We actually produced an output that we can revert so lets attempt # to use said output to rollback the reservation. reservations = result['reservations'] try: QUOTAS.rollback(context, reservations) except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. LOG.exception(_LE("Failed rolling back quota for" " %s reservations."), reservations) class QuotaCommitTask(flow_utils.CinderTask): """Commits the reservation. Reversion strategy: N/A (the rollback will be handled by the task that did the initial reservation (see: QuotaReserveTask). Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ def __init__(self): super(QuotaCommitTask, self).__init__(addons=[ACTION]) def execute(self, context, reservations, snapshot_properties, optional_args): QUOTAS.commit(context, reservations) # updating is_quota_committed attribute of optional_args dictionary optional_args['is_quota_committed'] = True return {'snapshot_properties': snapshot_properties} def revert(self, context, result, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return snapshot = result['snapshot_properties'] try: reserve_opts = {'snapshots': -1, 'gigabytes': -snapshot['volume_size']} reservations = QUOTAS.reserve(context, project_id=context.project_id, **reserve_opts) if reservations: QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: LOG.exception(_LE("Failed to update quota while deleting " "snapshots: %s"), snapshot['id']) class ManageExistingTask(flow_utils.CinderTask): """Brings an existing snapshot under Cinder management.""" default_provides = set(['snapshot', 'new_status']) def __init__(self, db, driver): super(ManageExistingTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, snapshot_ref, manage_existing_ref, size): model_update = self.driver.manage_existing_snapshot( snapshot=snapshot_ref, existing_ref=manage_existing_ref) if not model_update: model_update = {} model_update.update({'size': size}) try: snapshot_object = objects.Snapshot.get_by_id(context, snapshot_ref['id']) snapshot_object.update(model_update) snapshot_object.save() except exception.CinderException: LOG.exception(_LE("Failed updating model of snapshot " "%(snapshot_id)s with creation provided model " "%(model)s."), {'snapshot_id': snapshot_ref['id'], 'model': model_update}) raise return {'snapshot': snapshot_ref, 'new_status': 'available'} class CreateSnapshotOnFinishTask(NotifySnapshotActionTask): """Perform final snapshot actions. When a snapshot is created successfully it is expected that MQ notifications and database updates will occur to 'signal' to others that the snapshot is now ready for usage. This task does those notifications and updates in a reliable manner (not re-raising exceptions if said actions can not be triggered). Reversion strategy: N/A """ def __init__(self, db, event_suffix, host): super(CreateSnapshotOnFinishTask, self).__init__(db, event_suffix, host) def execute(self, context, snapshot, new_status): LOG.debug("Begin to call CreateSnapshotOnFinishTask execute.") snapshot_id = snapshot['id'] LOG.debug("New status: %s", new_status) update = { 'status': new_status } try: # TODO(harlowja): is it acceptable to only log if this fails?? # or are there other side-effects that this will cause if the # status isn't updated correctly (aka it will likely be stuck in # 'building' if this fails)?? snapshot_object = objects.Snapshot.get_by_id(context, snapshot_id) snapshot_object.update(update) snapshot_object.save() # Now use the parent to notify. super(CreateSnapshotOnFinishTask, self).execute(context, snapshot) except exception.CinderException: LOG.exception(_LE("Failed updating snapshot %(snapshot_id)s with " "%(update)s."), {'snapshot_id': snapshot_id, 'update': update}) # Even if the update fails, the snapshot is ready. LOG.info(_LI("Snapshot %s created successfully."), snapshot_id) def get_flow(context, db, driver, host, snapshot_id, ref): """Constructs and returns the manager entry point flow.""" LOG.debug("Input parmeter: context=%(context)s, db=%(db)s," "driver=%(driver)s, host=%(host)s, " "snapshot_id=(snapshot_id)s, ref=%(ref)s.", {'context': context, 'db': db, 'driver': driver, 'host': host, 'snapshot_id': snapshot_id, 'ref': ref} ) flow_name = ACTION.replace(":", "_") + "_manager" snapshot_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'snapshot_id': snapshot_id, 'manage_existing_ref': ref, 'optional_args': {'is_quota_committed': False} } notify_start_msg = "manage_existing_snapshot.start" notify_end_msg = "manage_existing_snapshot.end" snapshot_flow.add(ExtractSnapshotRefTask(db), NotifySnapshotActionTask(db, notify_start_msg, host=host), PrepareForQuotaReservationTask(db, driver), QuotaReserveTask(), ManageExistingTask(db, driver), QuotaCommitTask(), CreateSnapshotOnFinishTask(db, notify_end_msg, host=host)) LOG.debug("Begin to return taskflow.engines." "load(snapshot_flow,store=create_what).") # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(snapshot_flow, store=create_what) cinder-8.0.0/cinder/volume/flows/manager/create_volume.py0000664000567000056710000012636212701406250024656 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import traceback from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import context as cinder_context from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import glance from cinder.image import image_utils from cinder import objects from cinder import utils from cinder.volume.flows import common from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) ACTION = 'volume:create' CONF = cfg.CONF # These attributes we will attempt to save for the volume if they exist # in the source image metadata. IMAGE_ATTRIBUTES = ( 'checksum', 'container_format', 'disk_format', 'min_disk', 'min_ram', 'size', ) class OnFailureRescheduleTask(flow_utils.CinderTask): """Triggers a rescheduling request to be sent when reverting occurs. If rescheduling doesn't occur this task errors out the volume. Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets sent to the scheduler rpc api to allow for an attempt X of Y for scheduling this volume elsewhere. """ def __init__(self, reschedule_context, db, scheduler_rpcapi, do_reschedule): requires = ['filter_properties', 'request_spec', 'volume_ref', 'context'] super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], requires=requires) self.do_reschedule = do_reschedule self.scheduler_rpcapi = scheduler_rpcapi self.db = db self.reschedule_context = reschedule_context # These exception types will trigger the volume to be set into error # status rather than being rescheduled. self.no_reschedule_types = [ # Image copying happens after volume creation so rescheduling due # to copy failure will mean the same volume will be created at # another place when it still exists locally. exception.ImageCopyFailure, # Metadata updates happen after the volume has been created so if # they fail, rescheduling will likely attempt to create the volume # on another machine when it still exists locally. exception.MetadataCopyFailure, exception.MetadataCreateFailure, exception.MetadataUpdateFailure, # The volume/snapshot has been removed from the database, that # can not be fixed by rescheduling. exception.VolumeNotFound, exception.SnapshotNotFound, exception.VolumeTypeNotFound, exception.ImageUnacceptable, ] def execute(self, **kwargs): pass def _pre_reschedule(self, context, volume): """Actions that happen before the rescheduling attempt occur here.""" try: # Update volume's timestamp and host. # # NOTE(harlowja): this is awkward to be done here, shouldn't # this happen at the scheduler itself and not before it gets # sent to the scheduler? (since what happens if it never gets # there??). It's almost like we need a status of 'on-the-way-to # scheduler' in the future. # We don't need to update the volume's status to creating, since # we haven't changed it to error. update = { 'scheduled_at': timeutils.utcnow(), 'host': None, } LOG.debug("Updating volume %(volume_id)s with %(update)s.", {'update': update, 'volume_id': volume.id}) volume.update(update) volume.save() except exception.CinderException: # Don't let updating the state cause the rescheduling to fail. LOG.exception(_LE("Volume %s: update volume state failed."), volume.id) def _reschedule(self, context, cause, request_spec, filter_properties, volume): """Actions that happen during the rescheduling attempt occur here.""" create_volume = self.scheduler_rpcapi.create_volume if not filter_properties: filter_properties = {} if 'retry' not in filter_properties: filter_properties['retry'] = {} retry_info = filter_properties['retry'] num_attempts = retry_info.get('num_attempts', 0) request_spec['volume_id'] = volume.id LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s " "attempt %(num)d due to %(reason)s", {'volume_id': volume.id, 'method': common.make_pretty_name(create_volume), 'num': num_attempts, 'reason': cause.exception_str}) if all(cause.exc_info): # Stringify to avoid circular ref problem in json serialization retry_info['exc'] = traceback.format_exception(*cause.exc_info) return create_volume(context, CONF.volume_topic, volume.id, request_spec=request_spec, filter_properties=filter_properties, volume=volume) def _post_reschedule(self, volume): """Actions that happen after the rescheduling attempt occur here.""" LOG.debug("Volume %s: re-scheduled", volume.id) def revert(self, context, result, flow_failures, volume_ref, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store # through get_revert_result method. # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Use a different context when rescheduling. if self.reschedule_context: cause = list(flow_failures.values())[0] context = self.reschedule_context try: self._pre_reschedule(context, volume_ref) self._reschedule(context, cause, volume=volume_ref, **kwargs) self._post_reschedule(volume_ref) return True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), volume_ref.id) return False class ExtractVolumeRefTask(flow_utils.CinderTask): """Extracts volume reference for given volume id.""" default_provides = 'volume_ref' def __init__(self, db, host, set_error=True): super(ExtractVolumeRefTask, self).__init__(addons=[ACTION]) self.db = db self.host = host self.set_error = set_error def execute(self, context, volume_id): # NOTE(harlowja): this will fetch the volume from the database, if # the volume has been deleted before we got here then this should fail. # # In the future we might want to have a lock on the volume_id so that # the volume can not be deleted while its still being created? return objects.Volume.get_by_id(context, volume_id) def revert(self, context, volume_id, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _('Volume create failed while extracting volume ref.') common.error_out_volume(context, self.db, volume_id, reason=reason) LOG.error(_LE("Volume %s: create failed"), volume_id) class ExtractVolumeSpecTask(flow_utils.CinderTask): """Extracts a spec of a volume to be created into a common structure. This task extracts and organizes the input requirements into a common and easier to analyze structure for later tasks to use. It will also attach the underlying database volume reference which can be used by other tasks to reference for further details about the volume to be. Reversion strategy: N/A """ default_provides = 'volume_spec' def __init__(self, db): requires = ['volume_ref', 'request_spec'] super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, volume_ref, request_spec): get_remote_image_service = glance.get_remote_image_service volume_name = volume_ref['name'] volume_size = utils.as_int(volume_ref['size'], quiet=False) # Create a dictionary that will represent the volume to be so that # later tasks can easily switch between the different types and create # the volume according to the volume types specifications (which are # represented in this dictionary). specs = { 'status': volume_ref['status'], 'type': 'raw', # This will have the type of the volume to be # created, which should be one of [raw, snap, # source_vol, image] 'volume_id': volume_ref['id'], 'volume_name': volume_name, 'volume_size': volume_size, } if volume_ref.get('snapshot_id'): # We are making a snapshot based volume instead of a raw volume. specs.update({ 'type': 'snap', 'snapshot_id': volume_ref['snapshot_id'], }) elif volume_ref.get('source_volid'): # We are making a source based volume instead of a raw volume. # # NOTE(harlowja): This will likely fail if the source volume # disappeared by the time this call occurred. source_volid = volume_ref.get('source_volid') source_volume_ref = objects.Volume.get_by_id(context, source_volid) specs.update({ 'source_volid': source_volid, # This is captured incase we have to revert and we want to set # back the source volume status to its original status. This # may or may not be sketchy to do?? 'source_volstatus': source_volume_ref['status'], 'type': 'source_vol', }) elif request_spec.get('source_replicaid'): # We are making a clone based on the replica. # # NOTE(harlowja): This will likely fail if the replica # disappeared by the time this call occurred. source_volid = request_spec['source_replicaid'] source_volume_ref = objects.Volume.get_by_id(context, source_volid) specs.update({ 'source_replicaid': source_volid, 'source_replicastatus': source_volume_ref['status'], 'type': 'source_replica', }) elif request_spec.get('image_id'): # We are making an image based volume instead of a raw volume. image_href = request_spec['image_id'] image_service, image_id = get_remote_image_service(context, image_href) specs.update({ 'type': 'image', 'image_id': image_id, 'image_location': image_service.get_location(context, image_id), 'image_meta': image_service.show(context, image_id), # Instead of refetching the image service later just save it. # # NOTE(harlowja): if we have to later recover this tasks output # on another 'node' that this object won't be able to be # serialized, so we will have to recreate this object on # demand in the future. 'image_service': image_service, }) return specs def revert(self, context, result, **kwargs): if isinstance(result, ft.Failure): return volume_spec = result.get('volume_spec') # Restore the source volume status and set the volume to error status. common.restore_source_status(context, self.db, volume_spec) class NotifyVolumeActionTask(flow_utils.CinderTask): """Performs a notification about the given volume when called. Reversion strategy: N/A """ def __init__(self, db, event_suffix): super(NotifyVolumeActionTask, self).__init__(addons=[ACTION, event_suffix]) self.db = db self.event_suffix = event_suffix def execute(self, context, volume_ref): volume_id = volume_ref['id'] try: volume_utils.notify_about_volume_usage(context, volume_ref, self.event_suffix, host=volume_ref['host']) except exception.CinderException: # If notification sending of volume database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for volumes to operate LOG.exception(_LE("Failed notifying about the volume" " action %(event)s for volume %(volume_id)s"), {'event': self.event_suffix, 'volume_id': volume_id}) class CreateVolumeFromSpecTask(flow_utils.CinderTask): """Creates a volume from a provided specification. Reversion strategy: N/A """ default_provides = 'volume' def __init__(self, manager, db, driver, image_volume_cache=None): super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION]) self.manager = manager self.db = db self.driver = driver self.image_volume_cache = image_volume_cache def _handle_bootable_volume_glance_meta(self, context, volume_id, **kwargs): """Enable bootable flag and properly handle glance metadata. Caller should provide one and only one of snapshot_id,source_volid and image_id. If an image_id specified, an image_meta should also be provided, otherwise will be treated as an empty dictionary. """ log_template = _("Copying metadata from %(src_type)s %(src_id)s to " "%(vol_id)s.") exception_template = _("Failed updating volume %(vol_id)s metadata" " using the provided %(src_type)s" " %(src_id)s metadata") src_type = None src_id = None self._enable_bootable_flag(context, volume_id) try: if kwargs.get('snapshot_id'): src_type = 'snapshot' src_id = kwargs['snapshot_id'] snapshot_id = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume_id}) self.db.volume_glance_metadata_copy_to_volume( context, volume_id, snapshot_id) elif kwargs.get('source_volid'): src_type = 'source volume' src_id = kwargs['source_volid'] source_volid = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume_id}) self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_volid, volume_id) elif kwargs.get('source_replicaid'): src_type = 'source replica' src_id = kwargs['source_replicaid'] source_replicaid = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume_id}) self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_replicaid, volume_id) elif kwargs.get('image_id'): src_type = 'image' src_id = kwargs['image_id'] image_id = src_id image_meta = kwargs.get('image_meta', {}) LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume_id}) self._capture_volume_image_metadata(context, volume_id, image_id, image_meta) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass except exception.CinderException as ex: LOG.exception(exception_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume_id}) raise exception.MetadataCopyFailure(reason=ex) def _create_from_snapshot(self, context, volume_ref, snapshot_id, **kwargs): volume_id = volume_ref['id'] snapshot = objects.Snapshot.get_by_id(context, snapshot_id) model_update = self.driver.create_volume_from_snapshot(volume_ref, snapshot) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). make_bootable = False try: originating_vref = objects.Volume.get_by_id(context, snapshot.volume_id) make_bootable = originating_vref.bootable except exception.CinderException as ex: LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s " "bootable" " flag using the provided glance snapshot " "%(snapshot_ref_id)s volume reference"), {'snapshot_id': snapshot_id, 'snapshot_ref_id': snapshot.volume_id}) raise exception.MetadataUpdateFailure(reason=ex) if make_bootable: self._handle_bootable_volume_glance_meta(context, volume_id, snapshot_id=snapshot_id) return model_update def _enable_bootable_flag(self, context, volume_id): try: LOG.debug('Marking volume %s as bootable.', volume_id) self.db.volume_update(context, volume_id, {'bootable': True}) except exception.CinderException as ex: LOG.exception(_LE("Failed updating volume %(volume_id)s bootable " "flag to true"), {'volume_id': volume_id}) raise exception.MetadataUpdateFailure(reason=ex) def _create_from_source_volume(self, context, volume_ref, source_volid, **kwargs): # NOTE(harlowja): if the source volume has disappeared this will be our # detection of that since this database call should fail. # # NOTE(harlowja): likely this is not the best place for this to happen # and we should have proper locks on the source volume while actions # that use the source volume are underway. srcvol_ref = objects.Volume.get_by_id(context, source_volid) model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). if srcvol_ref.bootable: self._handle_bootable_volume_glance_meta( context, volume_ref.id, source_volid=srcvol_ref.id) return model_update def _create_from_source_replica(self, context, volume_ref, source_replicaid, **kwargs): # NOTE(harlowja): if the source volume has disappeared this will be our # detection of that since this database call should fail. # # NOTE(harlowja): likely this is not the best place for this to happen # and we should have proper locks on the source volume while actions # that use the source volume are underway. srcvol_ref = objects.Volume.get_by_id(context, source_replicaid) model_update = self.driver.create_replica_test_volume(volume_ref, srcvol_ref) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). if srcvol_ref.bootable: self._handle_bootable_volume_glance_meta( context, volume_ref['id'], source_replicaid=source_replicaid) return model_update def _copy_image_to_volume(self, context, volume_ref, image_id, image_location, image_service): """Downloads Glance image to the specified volume.""" copy_image_to_volume = self.driver.copy_image_to_volume volume_id = volume_ref['id'] LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location}) try: copy_image_to_volume(context, volume_ref, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s"), {'volume_id': volume_id, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), {'volume_id': volume_id}) raise exception.ImageUnacceptable(ex) except Exception as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s"), {'volume_id': volume_id, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location}) def _capture_volume_image_metadata(self, context, volume_id, image_id, image_meta): # Save some base attributes into the volume metadata base_metadata = { 'image_id': image_id, } name = image_meta.get('name', None) if name: base_metadata['image_name'] = name # Save some more attributes into the volume metadata from the image # metadata for key in IMAGE_ATTRIBUTES: if key not in image_meta: continue value = image_meta.get(key, None) if value is not None: base_metadata[key] = value # Save all the image metadata properties into the volume metadata property_metadata = {} image_properties = image_meta.get('properties', {}) for (key, value) in image_properties.items(): if value is not None: property_metadata[key] = value volume_metadata = dict(property_metadata) volume_metadata.update(base_metadata) LOG.debug("Creating volume glance metadata for volume %(volume_id)s" " backed by image %(image_id)s with: %(vol_metadata)s.", {'volume_id': volume_id, 'image_id': image_id, 'vol_metadata': volume_metadata}) self.db.volume_glance_metadata_bulk_create(context, volume_id, volume_metadata) def _clone_image_volume(self, context, volume, image_location, image_meta): """Create a volume efficiently from an existing image. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred """ if not image_location: return None, False if (image_meta.get('container_format') != 'bare' or image_meta.get('disk_format') != 'raw'): LOG.info(_LI("Requested image %(id)s is not in raw format."), {'id': image_meta.get('id')}) return None, False image_volume = None direct_url, locations = image_location urls = set([direct_url] + [loc.get('url') for loc in locations or []]) image_volume_ids = [url[9:] for url in urls if url and url.startswith('cinder://')] image_volumes = self.db.volume_get_all_by_host( context, volume['host'], filters={'id': image_volume_ids}) for image_volume in image_volumes: # For the case image volume is stored in the service tenant, # image_owner volume metadata should also be checked. image_owner = None volume_metadata = image_volume.get('volume_metadata') or {} for m in volume_metadata: if m['key'] == 'image_owner': image_owner = m['value'] if (image_meta['owner'] != volume['project_id'] and image_meta['owner'] != image_owner): LOG.info(_LI("Skipping image volume %(id)s because " "it is not accessible by current Tenant."), {'id': image_volume.id}) continue LOG.info(_LI("Will clone a volume from the image volume " "%(id)s."), {'id': image_volume.id}) break else: LOG.debug("No accessible image volume for image %(id)s found.", {'id': image_meta['id']}) return None, False try: return self.driver.create_cloned_volume(volume, image_volume), True except (NotImplementedError, exception.CinderException): LOG.exception(_LE('Failed to clone image volume %(id)s.'), {'id': image_volume['id']}) return None, False def _create_from_image_download(self, context, volume_ref, image_location, image_id, image_service): # TODO(harlowja): what needs to be rolled back in the clone if this # volume create fails?? Likely this should be a subflow or broken # out task in the future. That will bring up the question of how # do we make said subflow/task which is only triggered in the # clone image 'path' resumable and revertable in the correct # manner. model_update = self.driver.create_volume(volume_ref) updates = dict(model_update or dict(), status='downloading') try: volume_ref = self.db.volume_update(context, volume_ref['id'], updates) except exception.CinderException: LOG.exception(_LE("Failed updating volume %(volume_id)s with " "%(updates)s"), {'volume_id': volume_ref['id'], 'updates': updates}) self._copy_image_to_volume(context, volume_ref, image_id, image_location, image_service) return model_update def _create_from_image_cache(self, context, internal_context, volume_ref, image_id, image_meta): """Attempt to create the volume using the image cache. Best case this will simply clone the existing volume in the cache. Worst case the image is out of date and will be evicted. In that case a clone will not be created and the image must be downloaded again. """ LOG.debug('Attempting to retrieve cache entry for image = ' '%(image_id)s on host %(host)s.', {'image_id': image_id, 'host': volume_ref['host']}) try: cache_entry = self.image_volume_cache.get_entry(internal_context, volume_ref, image_id, image_meta) if cache_entry: LOG.debug('Creating from source image-volume %(volume_id)s', {'volume_id': cache_entry['volume_id']}) model_update = self._create_from_source_volume( context, volume_ref, cache_entry['volume_id'] ) return model_update, True except exception.CinderException as e: LOG.warning(_LW('Failed to create volume from image-volume cache, ' 'will fall back to default behavior. Error: ' '%(exception)s'), {'exception': e}) return None, False def _create_from_image(self, context, volume_ref, image_location, image_id, image_meta, image_service, **kwargs): LOG.debug("Cloning %(volume_id)s from image %(image_id)s " " at location %(image_location)s.", {'volume_id': volume_ref['id'], 'image_location': image_location, 'image_id': image_id}) # Create the volume from an image. # # First see if the driver can clone the image directly. # # NOTE (singn): two params need to be returned # dict containing provider_location for cloned volume # and clone status. model_update, cloned = self.driver.clone_image(context, volume_ref, image_location, image_meta, image_service) # Try and clone the image if we have it set as a glance location. if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: model_update, cloned = self._clone_image_volume(context, volume_ref, image_location, image_meta) # Try and use the image cache. should_create_cache_entry = False if self.image_volume_cache and not cloned: internal_context = cinder_context.get_internal_tenant_context() if not internal_context: LOG.info(_LI('Unable to get Cinder internal context, will ' 'not use image-volume cache.')) else: model_update, cloned = self._create_from_image_cache( context, internal_context, volume_ref, image_id, image_meta ) if not cloned: should_create_cache_entry = True # Fall back to default behavior of creating volume, # download the image data and copy it into the volume. original_size = volume_ref['size'] try: if not cloned: with image_utils.TemporaryImages.fetch( image_service, context, image_id) as tmp_image: # Try to create the volume as the minimal size, then we can # extend once the image has been downloaded. if should_create_cache_entry: data = image_utils.qemu_img_info(tmp_image) virtual_size = int( math.ceil(float(data.virtual_size) / units.Gi)) if virtual_size > volume_ref.size: params = {'image_size': virtual_size, 'volume_size': volume_ref.size} reason = _("Image virtual size is %(image_size)dGB" " and doesn't fit in a volume of size" " %(volume_size)dGB.") % params raise exception.ImageUnacceptable( image_id=image_id, reason=reason) if virtual_size and virtual_size != original_size: volume_ref.size = virtual_size volume_ref.save() model_update = self._create_from_image_download( context, volume_ref, image_location, image_id, image_service ) if should_create_cache_entry: # Update the newly created volume db entry before we clone it # for the image-volume creation. if model_update: volume_ref.update(model_update) volume_ref.save() self.manager._create_image_cache_volume_entry(internal_context, volume_ref, image_id, image_meta) finally: # If we created the volume as the minimal size, extend it back to # what was originally requested. If an exception has occurred we # still need to put this back before letting it be raised further # up the stack. if volume_ref.size != original_size: self.driver.extend_volume(volume_ref, original_size) volume_ref.size = original_size volume_ref.save() self._handle_bootable_volume_glance_meta(context, volume_ref.id, image_id=image_id, image_meta=image_meta) return model_update def _create_raw_volume(self, volume_ref, **kwargs): return self.driver.create_volume(volume_ref) def execute(self, context, volume_ref, volume_spec): volume_spec = dict(volume_spec) volume_id = volume_spec.pop('volume_id', None) if not volume_id: volume_id = volume_ref['id'] # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to create volume. " "Volume driver %s not initialized"), driver_name) raise exception.DriverNotInitialized() create_type = volume_spec.pop('type', None) LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s " "with specification: %(volume_spec)s"), {'volume_spec': volume_spec, 'volume_id': volume_id, 'create_type': create_type}) if create_type == 'raw': model_update = self._create_raw_volume(volume_ref=volume_ref, **volume_spec) elif create_type == 'snap': model_update = self._create_from_snapshot(context, volume_ref=volume_ref, **volume_spec) elif create_type == 'source_vol': model_update = self._create_from_source_volume( context, volume_ref=volume_ref, **volume_spec) elif create_type == 'source_replica': model_update = self._create_from_source_replica( context, volume_ref=volume_ref, **volume_spec) elif create_type == 'image': model_update = self._create_from_image(context, volume_ref=volume_ref, **volume_spec) else: raise exception.VolumeTypeNotFound(volume_type_id=create_type) # Persist any model information provided on creation. try: if model_update: volume_ref.update(model_update) volume_ref.save() except exception.CinderException: # If somehow the update failed we want to ensure that the # failure is logged (but not try rescheduling since the volume at # this point has been created). LOG.exception(_LE("Failed updating model of volume %(volume_id)s " "with creation provided model %(model)s"), {'volume_id': volume_id, 'model': model_update}) raise return volume_ref class CreateVolumeOnFinishTask(NotifyVolumeActionTask): """On successful volume creation this will perform final volume actions. When a volume is created successfully it is expected that MQ notifications and database updates will occur to 'signal' to others that the volume is now ready for usage. This task does those notifications and updates in a reliable manner (not re-raising exceptions if said actions can not be triggered). Reversion strategy: N/A """ def __init__(self, db, event_suffix): super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix) self.status_translation = { 'migration_target_creating': 'migration_target', } def execute(self, context, volume, volume_spec): new_status = self.status_translation.get(volume_spec.get('status'), 'available') update = { 'status': new_status, 'launched_at': timeutils.utcnow(), } try: # TODO(harlowja): is it acceptable to only log if this fails?? # or are there other side-effects that this will cause if the # status isn't updated correctly (aka it will likely be stuck in # 'creating' if this fails)?? volume.update(update) volume.save() # Now use the parent to notify. super(CreateVolumeOnFinishTask, self).execute(context, volume) except exception.CinderException: LOG.exception(_LE("Failed updating volume %(volume_id)s with " "%(update)s"), {'volume_id': volume.id, 'update': update}) # Even if the update fails, the volume is ready. LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): " "created successfully"), {'volume_name': volume_spec['volume_name'], 'volume_id': volume.id}) def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id, allow_reschedule, reschedule_context, request_spec, filter_properties, image_volume_cache=None): """Constructs and returns the manager entrypoint flow. This flow will do the following: 1. Determines if rescheduling is enabled (ahead of time). 2. Inject keys & values for dependent tasks. 3. Selects 1 of 2 activated only on *failure* tasks (one to update the db status & notify or one to update the db status & notify & *reschedule*). 4. Extracts a volume specification from the provided inputs. 5. Notifies that the volume has started to be created. 6. Creates a volume from the extracted volume specification. 7. Attaches a on-success *only* task that notifies that the volume creation has ended and performs further database status updates. """ flow_name = ACTION.replace(":", "_") + "_manager" volume_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'filter_properties': filter_properties, 'request_spec': request_spec, 'volume_id': volume_id, } volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False)) retry = filter_properties.get('retry', None) # Always add OnFailureRescheduleTask and we handle the change of volume's # status when reverting the flow. Meanwhile, no need to revert process of # ExtractVolumeRefTask. do_reschedule = allow_reschedule and request_spec and retry volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, scheduler_rpcapi, do_reschedule)) LOG.debug("Volume reschedule parameters: %(allow)s " "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry}) volume_flow.add(ExtractVolumeSpecTask(db), NotifyVolumeActionTask(db, "create.start"), CreateVolumeFromSpecTask(manager, db, driver, image_volume_cache), CreateVolumeOnFinishTask(db, "create.end")) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(volume_flow, store=create_what) cinder-8.0.0/cinder/volume/flows/api/0000775000567000056710000000000012701406543020604 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/api/__init__.py0000664000567000056710000000000012701406250022676 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/volume/flows/api/manage_existing.py0000664000567000056710000001365712701406250024327 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils from cinder.i18n import _LE from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' CONF = cfg.CONF class EntryCreateTask(flow_utils.CinderTask): """Creates an entry for the given volume creation in the database. Reversion strategy: remove the volume_id created from the database. """ default_provides = set(['volume_properties', 'volume']) def __init__(self, db): requires = ['availability_zone', 'description', 'metadata', 'name', 'host', 'bootable', 'volume_type', 'ref'] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, **kwargs): """Creates a database entry for the given inputs and returns details. Accesses the database and creates a new entry for the to be created volume using the given volume properties which are extracted from the input kwargs. """ volume_type = kwargs.pop('volume_type') volume_type_id = volume_type['id'] if volume_type else None volume_properties = { 'size': 0, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', # Rename these to the internal name. 'display_description': kwargs.pop('description'), 'display_name': kwargs.pop('name'), 'host': kwargs.pop('host'), 'availability_zone': kwargs.pop('availability_zone'), 'volume_type_id': volume_type_id, 'metadata': kwargs.pop('metadata'), 'bootable': kwargs.pop('bootable'), } volume = self.db.volume_create(context, volume_properties) return { 'volume_properties': volume_properties, # NOTE(harlowja): it appears like further usage of this volume # result actually depend on it being a sqlalchemy object and not # just a plain dictionary so that's why we are storing this here. # # In the future where this task results can be serialized and # restored automatically for continued running we will need to # resolve the serialization & recreation of this object since raw # sqlalchemy objects can't be serialized. 'volume': volume, } def revert(self, context, result, optional_args, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return vol_id = result['volume_id'] try: self.db.volume_destroy(context.elevated(), vol_id) except exception.CinderException: LOG.exception(_LE("Failed destroying volume entry: %s."), vol_id) class ManageCastTask(flow_utils.CinderTask): """Performs a volume manage cast to the scheduler and to the volume manager. This which will signal a transition of the api workflow to another child and/or related workflow. """ def __init__(self, scheduler_rpcapi, db): requires = ['volume', 'volume_properties', 'volume_type', 'ref'] super(ManageCastTask, self).__init__(addons=[ACTION], requires=requires) self.scheduler_rpcapi = scheduler_rpcapi self.db = db def execute(self, context, **kwargs): volume = kwargs.pop('volume') request_spec = kwargs.copy() request_spec['volume_id'] = volume.id # Call the scheduler to ensure that the host exists and that it can # accept the volume self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic, volume['id'], request_spec=request_spec) def revert(self, context, result, flow_failures, **kwargs): # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: manage failed."), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info) def get_flow(scheduler_rpcapi, db_api, create_what): """Constructs and returns the api entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extracts and validates the input keys & values. 3. Creates the database entry. 4. Casts to volume manager and scheduler for further processing. """ flow_name = ACTION.replace(":", "_") + "_api" api_flow = linear_flow.Flow(flow_name) # This will cast it out to either the scheduler or volume manager via # the rpc apis provided. api_flow.add(EntryCreateTask(db_api), ManageCastTask(scheduler_rpcapi, db_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(api_flow, store=create_what) cinder-8.0.0/cinder/volume/flows/api/create_volume.py0000664000567000056710000011347312701406250024014 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LW from cinder import objects from cinder import policy from cinder import quota from cinder import utils from cinder.volume.flows import common from cinder.volume import utils as vol_utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) ACTION = 'volume:create' CONF = cfg.CONF GB = units.Gi QUOTAS = quota.QUOTAS # Only in these 'sources' status can we attempt to create a volume from a # source volume or a source snapshot, other status states we can not create # from, 'error' being the common example. SNAPSHOT_PROCEED_STATUS = ('available',) SRC_VOL_PROCEED_STATUS = ('available', 'in-use',) REPLICA_PROCEED_STATUS = ('active', 'active-stopped',) CG_PROCEED_STATUS = ('available', 'creating',) CGSNAPSHOT_PROCEED_STATUS = ('available',) class ExtractVolumeRequestTask(flow_utils.CinderTask): """Processes an api request values into a validated set of values. This tasks responsibility is to take in a set of inputs that will form a potential volume request and validates those values against a set of conditions and/or translates those values into a valid set and then returns the validated/translated values for use by other tasks. Reversion strategy: N/A """ # This task will produce the following outputs (said outputs can be # saved to durable storage in the future so that the flow can be # reconstructed elsewhere and continued). default_provides = set(['availability_zone', 'size', 'snapshot_id', 'source_volid', 'volume_type', 'volume_type_id', 'encryption_key_id', 'source_replicaid', 'consistencygroup_id', 'cgsnapshot_id', 'qos_specs']) def __init__(self, image_service, availability_zones, **kwargs): super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION], **kwargs) self.image_service = image_service self.availability_zones = availability_zones @staticmethod def _extract_resource(resource, allowed_vals, exc, resource_name, props=('status',)): """Extracts the resource id from the provided resource. This method validates the input resource dict and checks that the properties which names are passed in `props` argument match corresponding lists in `allowed` argument. In case of mismatch exception of type exc is raised. :param resource: Resource dict. :param allowed_vals: Tuple of allowed values lists. :param exc: Exception type to raise. :param resource_name: Name of resource - used to construct log message. :param props: Tuple of resource properties names to validate. :return: Id of a resource. """ resource_id = None if resource: for prop, allowed_states in zip(props, allowed_vals): if resource[prop] not in allowed_states: msg = _("Originating %(res)s %(prop)s must be one of " "'%(vals)s' values") msg = msg % {'res': resource_name, 'prop': prop, 'vals': ', '.join(allowed_states)} # TODO(harlowja): what happens if the status changes after # this initial resource status check occurs??? Seems like # someone could delete the resource after this check passes # but before the volume is officially created? raise exc(reason=msg) resource_id = resource['id'] return resource_id def _extract_consistencygroup(self, consistencygroup): return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,), exception.InvalidConsistencyGroup, 'consistencygroup') def _extract_cgsnapshot(self, cgsnapshot): return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,), exception.InvalidCgSnapshot, 'CGSNAPSHOT') def _extract_snapshot(self, snapshot): return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,), exception.InvalidSnapshot, 'snapshot') def _extract_source_volume(self, source_volume): return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,), exception.InvalidVolume, 'source volume') def _extract_source_replica(self, source_replica): return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS, REPLICA_PROCEED_STATUS), exception.InvalidVolume, 'replica', ('status', 'replication_status')) @staticmethod def _extract_size(size, source_volume, snapshot): """Extracts and validates the volume size. This function will validate or when not provided fill in the provided size variable from the source_volume or snapshot and then does validation on the size that is found and returns said validated size. """ def validate_snap_size(size): if snapshot and size < snapshot.volume_size: msg = _("Volume size '%(size)s'GB cannot be smaller than" " the snapshot size %(snap_size)sGB. " "They must be >= original snapshot size.") msg = msg % {'size': size, 'snap_size': snapshot.volume_size} raise exception.InvalidInput(reason=msg) def validate_source_size(size): if source_volume and size < source_volume['size']: msg = _("Volume size '%(size)s'GB cannot be smaller than " "original volume size %(source_size)sGB. " "They must be >= original volume size.") msg = msg % {'size': size, 'source_size': source_volume['size']} raise exception.InvalidInput(reason=msg) def validate_int(size): if not isinstance(size, int) or size <= 0: msg = _("Volume size '%(size)s' must be an integer and" " greater than 0") % {'size': size} raise exception.InvalidInput(reason=msg) # Figure out which validation functions we should be applying # on the size value that we extract. validator_functors = [validate_int] if source_volume: validator_functors.append(validate_source_size) elif snapshot: validator_functors.append(validate_snap_size) # If the size is not provided then try to provide it. if not size and source_volume: size = source_volume['size'] elif not size and snapshot: size = snapshot.volume_size size = utils.as_int(size) LOG.debug("Validating volume '%(size)s' using %(functors)s" % {'size': size, 'functors': ", ".join([common.make_pretty_name(func) for func in validator_functors])}) for func in validator_functors: func(size) return size def _check_image_metadata(self, context, image_id, size): """Checks image existence and validates that the image metadata.""" # Check image existence if image_id is None: return # NOTE(harlowja): this should raise an error if the image does not # exist, this is expected as it signals that the image_id is missing. image_meta = self.image_service.show(context, image_id) # check whether image is active if image_meta['status'] != 'active': msg = _('Image %(image_id)s is not active.')\ % {'image_id': image_id} raise exception.InvalidInput(reason=msg) # Check image size is not larger than volume size. image_size = utils.as_int(image_meta['size'], quiet=False) image_size_in_gb = (image_size + GB - 1) // GB if image_size_in_gb > size: msg = _('Size of specified image %(image_size)sGB' ' is larger than volume size %(volume_size)sGB.') msg = msg % {'image_size': image_size_in_gb, 'volume_size': size} raise exception.InvalidInput(reason=msg) # Check image min_disk requirement is met for the particular volume min_disk = image_meta.get('min_disk', 0) if size < min_disk: msg = _('Volume size %(volume_size)sGB cannot be smaller' ' than the image minDisk size %(min_disk)sGB.') msg = msg % {'volume_size': size, 'min_disk': min_disk} raise exception.InvalidInput(reason=msg) def _get_image_volume_type(self, context, image_id): """Get cinder_img_volume_type property from the image metadata.""" # Check image existence if image_id is None: return None image_meta = self.image_service.show(context, image_id) # check whether image is active if image_meta['status'] != 'active': msg = (_('Image %(image_id)s is not active.') % {'image_id': image_id}) raise exception.InvalidInput(reason=msg) # Retrieve 'cinder_img_volume_type' property from glance image # metadata. image_volume_type = "cinder_img_volume_type" properties = image_meta.get('properties') if properties: try: img_vol_type = properties.get(image_volume_type) if img_vol_type is None: return None volume_type = volume_types.get_volume_type_by_name( context, img_vol_type) except exception.VolumeTypeNotFoundByName: LOG.warning(_LW("Failed to retrieve volume_type from image " "metadata. '%(img_vol_type)s' doesn't match " "any volume types."), {'img_vol_type': img_vol_type}) return None LOG.debug("Retrieved volume_type from glance image metadata. " "image_id: %(image_id)s, " "image property: %(image_volume_type)s, " "volume_type: %(volume_type)s." % {'image_id': image_id, 'image_volume_type': image_volume_type, 'volume_type': volume_type}) return volume_type @staticmethod def _check_metadata_properties(metadata=None): """Checks that the volume metadata properties are valid.""" if not metadata: metadata = {} for (k, v) in metadata.items(): if len(k) == 0: msg = _("Metadata property key blank") LOG.warning(msg) raise exception.InvalidVolumeMetadata(reason=msg) if len(k) > 255: msg = _("Metadata property key %s greater than 255 " "characters") % k LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property key %s value greater than" " 255 characters") % k LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) def _extract_availability_zone(self, availability_zone, snapshot, source_volume): """Extracts and returns a validated availability zone. This function will extract the availability zone (if not provided) from the snapshot or source_volume and then performs a set of validation checks on the provided or extracted availability zone and then returns the validated availability zone. """ # Try to extract the availability zone from the corresponding snapshot # or source volume if either is valid so that we can be in the same # availability zone as the source. if availability_zone is None: if snapshot: try: availability_zone = snapshot['volume']['availability_zone'] except (TypeError, KeyError): pass if source_volume and availability_zone is None: try: availability_zone = source_volume['availability_zone'] except (TypeError, KeyError): pass if availability_zone is None: if CONF.default_availability_zone: availability_zone = CONF.default_availability_zone else: # For backwards compatibility use the storage_availability_zone availability_zone = CONF.storage_availability_zone if availability_zone not in self.availability_zones: if CONF.allow_availability_zone_fallback: original_az = availability_zone availability_zone = ( CONF.default_availability_zone or CONF.storage_availability_zone) LOG.warning(_LW("Availability zone '%(s_az)s' " "not found, falling back to " "'%(s_fallback_az)s'."), {'s_az': original_az, 's_fallback_az': availability_zone}) else: msg = _("Availability zone '%(s_az)s' is invalid.") msg = msg % {'s_az': availability_zone} raise exception.InvalidInput(reason=msg) # If the configuration only allows cloning to the same availability # zone then we need to enforce that. if CONF.cloned_volume_same_az: snap_az = None try: snap_az = snapshot['volume']['availability_zone'] except (TypeError, KeyError): pass if snap_az and snap_az != availability_zone: msg = _("Volume must be in the same " "availability zone as the snapshot") raise exception.InvalidInput(reason=msg) source_vol_az = None try: source_vol_az = source_volume['availability_zone'] except (TypeError, KeyError): pass if source_vol_az and source_vol_az != availability_zone: msg = _("Volume must be in the same " "availability zone as the source volume") raise exception.InvalidInput(reason=msg) return availability_zone def _get_encryption_key_id(self, key_manager, context, volume_type_id, snapshot, source_volume): encryption_key_id = None if volume_types.is_encrypted(context, volume_type_id): if snapshot is not None: # creating from snapshot encryption_key_id = snapshot['encryption_key_id'] elif source_volume is not None: # cloning volume encryption_key_id = source_volume['encryption_key_id'] # NOTE(joel-coffman): References to the encryption key should *not* # be copied because the key is deleted when the volume is deleted. # Clone the existing key and associate a separate -- but # identical -- key with each volume. if encryption_key_id is not None: encryption_key_id = key_manager.copy_key(context, encryption_key_id) else: encryption_key_id = key_manager.create_key(context) return encryption_key_id def _get_volume_type_id(self, volume_type, source_volume, snapshot): if not volume_type and source_volume: return source_volume['volume_type_id'] elif snapshot is not None: if volume_type: current_volume_type_id = volume_type.get('id') if current_volume_type_id != snapshot['volume_type_id']: msg = _LW("Volume type will be changed to " "be the same as the source volume.") LOG.warning(msg) return snapshot['volume_type_id'] else: return volume_type.get('id') def execute(self, context, size, snapshot, image_id, source_volume, availability_zone, volume_type, metadata, key_manager, source_replica, consistencygroup, cgsnapshot): utils.check_exclusive_options(snapshot=snapshot, imageRef=image_id, source_volume=source_volume) policy.enforce_action(context, ACTION) # TODO(harlowja): what guarantee is there that the snapshot or source # volume will remain available after we do this initial verification?? snapshot_id = self._extract_snapshot(snapshot) source_volid = self._extract_source_volume(source_volume) source_replicaid = self._extract_source_replica(source_replica) size = self._extract_size(size, source_volume, snapshot) consistencygroup_id = self._extract_consistencygroup(consistencygroup) cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot) self._check_image_metadata(context, image_id, size) availability_zone = self._extract_availability_zone(availability_zone, snapshot, source_volume) # TODO(joel-coffman): This special handling of snapshots to ensure that # their volume type matches the source volume is too convoluted. We # should copy encryption metadata from the encrypted volume type to the # volume upon creation and propagate that information to each snapshot. # This strategy avoids any dependency upon the encrypted volume type. def_vol_type = volume_types.get_default_volume_type() if not volume_type and not source_volume and not snapshot: image_volume_type = self._get_image_volume_type(context, image_id) volume_type = (image_volume_type if image_volume_type else def_vol_type) # When creating a clone of a replica (replication test), we can't # use the volume type of the replica, therefore, we use the default. # NOTE(ronenkat): this assumes the default type is not replicated. if source_replicaid: volume_type = def_vol_type volume_type_id = self._get_volume_type_id(volume_type, source_volume, snapshot) if image_id and volume_types.is_encrypted(context, volume_type_id): msg = _('Create encrypted volumes with type %(type)s ' 'from image %(image)s is not supported.') msg = msg % {'type': volume_type_id, 'image': image_id, } raise exception.InvalidInput(reason=msg) encryption_key_id = self._get_encryption_key_id(key_manager, context, volume_type_id, snapshot, source_volume) specs = {} if volume_type_id: qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id) if qos_specs['qos_specs']: specs = qos_specs['qos_specs'].get('specs', {}) if not specs: # to make sure we don't pass empty dict specs = None self._check_metadata_properties(metadata) return { 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, 'availability_zone': availability_zone, 'volume_type': volume_type, 'volume_type_id': volume_type_id, 'encryption_key_id': encryption_key_id, 'qos_specs': specs, 'source_replicaid': source_replicaid, 'consistencygroup_id': consistencygroup_id, 'cgsnapshot_id': cgsnapshot_id, } class EntryCreateTask(flow_utils.CinderTask): """Creates an entry for the given volume creation in the database. Reversion strategy: remove the volume_id created from the database. """ default_provides = set(['volume_properties', 'volume_id', 'volume']) def __init__(self, db): requires = ['availability_zone', 'description', 'metadata', 'name', 'reservations', 'size', 'snapshot_id', 'source_volid', 'volume_type_id', 'encryption_key_id', 'source_replicaid', 'consistencygroup_id', 'cgsnapshot_id', 'multiattach', 'qos_specs'] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, optional_args, **kwargs): """Creates a database entry for the given inputs and returns details. Accesses the database and creates a new entry for the to be created volume using the given volume properties which are extracted from the input kwargs (and associated requirements this task needs). These requirements should be previously satisfied and validated by a pre-cursor task. """ volume_properties = { 'size': kwargs.pop('size'), 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'encryption_key_id': kwargs.pop('encryption_key_id'), # Rename these to the internal name. 'display_description': kwargs.pop('description'), 'display_name': kwargs.pop('name'), 'replication_status': 'disabled', 'multiattach': kwargs.pop('multiattach'), } # Merge in the other required arguments which should provide the rest # of the volume property fields (if applicable). volume_properties.update(kwargs) volume = objects.Volume(context=context, **volume_properties) volume.create() return { 'volume_id': volume['id'], 'volume_properties': volume_properties, # NOTE(harlowja): it appears like further usage of this volume # result actually depend on it being a sqlalchemy object and not # just a plain dictionary so that's why we are storing this here. # # In the future where this task results can be serialized and # restored automatically for continued running we will need to # resolve the serialization & recreation of this object since raw # sqlalchemy objects can't be serialized. 'volume': volume, } def revert(self, context, result, optional_args, **kwargs): if isinstance(result, ft.Failure): # We never produced a result and therefore can't destroy anything. return if optional_args['is_quota_committed']: # If quota got committed we shouldn't rollback as the volume has # already been created and the quota has already been absorbed. return volume = result['volume'] try: volume.destroy() except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. # # NOTE(harlowja): Being unable to destroy a volume is pretty # bad though!! LOG.exception(_LE("Failed destroying volume entry %s"), volume.id) class QuotaReserveTask(flow_utils.CinderTask): """Reserves a single volume with the given size & the given volume type. Reversion strategy: rollback the quota reservation. Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ default_provides = set(['reservations']) def __init__(self): super(QuotaReserveTask, self).__init__(addons=[ACTION]) def execute(self, context, size, volume_type_id, optional_args): try: values = {'per_volume_gigabytes': size} QUOTAS.limit_check(context, project_id=context.project_id, **values) except exception.OverQuota as e: quotas = e.kwargs['quotas'] raise exception.VolumeSizeExceedsLimit( size=size, limit=quotas['per_volume_gigabytes']) try: reserve_opts = {'volumes': 1, 'gigabytes': size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) return { 'reservations': reservations, } except exception.OverQuota as e: overs = e.kwargs['overs'] quotas = e.kwargs['quotas'] usages = e.kwargs['usages'] def _consumed(name): usage = usages[name] return usage['reserved'] + usage['in_use'] + usage.get( 'allocated', 0) def _get_over(name): for over in overs: if name in over: return over return None over_name = _get_over('gigabytes') exceeded_vol_limit_name = _get_over('volumes') if over_name: # TODO(mc_nair): improve error message for child -1 limit msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG volume (%(d_consumed)dG " "of %(d_quota)dG already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 's_size': size, 'd_consumed': _consumed(over_name), 'd_quota': quotas[over_name]}) raise exception.VolumeSizeExceedsAvailableQuota( name=over_name, requested=size, consumed=_consumed(over_name), quota=quotas[over_name]) elif exceeded_vol_limit_name: msg = _LW("Quota %(s_name)s exceeded for %(s_pid)s, tried " "to create volume (%(d_consumed)d volume(s) " "already consumed).") LOG.warning(msg, {'s_name': exceeded_vol_limit_name, 's_pid': context.project_id, 'd_consumed': _consumed(exceeded_vol_limit_name)}) # TODO(mc_nair): improve error message for child -1 limit raise exception.VolumeLimitExceeded( allowed=quotas[exceeded_vol_limit_name], name=exceeded_vol_limit_name) else: # If nothing was reraised, ensure we reraise the initial error raise def revert(self, context, result, optional_args, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return if optional_args['is_quota_committed']: # The reservations have already been committed and can not be # rolled back at this point. return # We actually produced an output that we can revert so lets attempt # to use said output to rollback the reservation. reservations = result['reservations'] try: QUOTAS.rollback(context, reservations) except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. LOG.exception(_LE("Failed rolling back quota for" " %s reservations"), reservations) class QuotaCommitTask(flow_utils.CinderTask): """Commits the reservation. Reversion strategy: N/A (the rollback will be handled by the task that did the initial reservation (see: QuotaReserveTask). Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ def __init__(self): super(QuotaCommitTask, self).__init__(addons=[ACTION]) def execute(self, context, reservations, volume_properties, optional_args): QUOTAS.commit(context, reservations) # updating is_quota_committed attribute of optional_args dictionary optional_args['is_quota_committed'] = True return {'volume_properties': volume_properties} def revert(self, context, result, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return volume = result['volume_properties'] try: reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume['volume_type_id']) reservations = QUOTAS.reserve(context, project_id=context.project_id, **reserve_opts) if reservations: QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: LOG.exception(_LE("Failed to update quota for deleting " "volume: %s"), volume['id']) class VolumeCastTask(flow_utils.CinderTask): """Performs a volume create cast to the scheduler or to the volume manager. This will signal a transition of the api workflow to another child and/or related workflow on another component. Reversion strategy: rollback source volume status and error out newly created volume. """ def __init__(self, scheduler_rpcapi, volume_rpcapi, db): requires = ['image_id', 'scheduler_hints', 'snapshot_id', 'source_volid', 'volume_id', 'volume', 'volume_type', 'volume_properties', 'source_replicaid', 'consistencygroup_id', 'cgsnapshot_id', ] super(VolumeCastTask, self).__init__(addons=[ACTION], requires=requires) self.volume_rpcapi = volume_rpcapi self.scheduler_rpcapi = scheduler_rpcapi self.db = db def _cast_create_volume(self, context, request_spec, filter_properties): source_volid = request_spec['source_volid'] source_replicaid = request_spec['source_replicaid'] volume_id = request_spec['volume_id'] volume = request_spec['volume'] snapshot_id = request_spec['snapshot_id'] image_id = request_spec['image_id'] cgroup_id = request_spec['consistencygroup_id'] host = None cgsnapshot_id = request_spec['cgsnapshot_id'] if cgroup_id: # If cgroup_id existed, we should cast volume to the scheduler # to choose a proper pool whose backend is same as CG's backend. cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) # FIXME(wanghao): CG_backend got added before request_spec was # converted to versioned objects. We should make sure that this # will be handled by object version translations once we add # RequestSpec object. request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. # # If snapshot_id is set and CONF.snapshot_same_host is True, make # the call create volume directly to the volume host where the # snapshot resides instead of passing it through the scheduler, so # snapshot can be copied to the new volume. snapshot = objects.Snapshot.get_by_id(context, snapshot_id) source_volume_ref = objects.Volume.get_by_id(context, snapshot.volume_id) host = source_volume_ref.host elif source_volid: source_volume_ref = objects.Volume.get_by_id(context, source_volid) host = source_volume_ref.host elif source_replicaid: source_volume_ref = objects.Volume.get_by_id(context, source_replicaid) host = source_volume_ref.host if not host: # Cast to the scheduler and let it handle whatever is needed # to select the target host for this volume. self.scheduler_rpcapi.create_volume( context, CONF.volume_topic, volume_id, snapshot_id=snapshot_id, image_id=image_id, request_spec=request_spec, filter_properties=filter_properties, volume=volume) else: # Bypass the scheduler and send the request directly to the volume # manager. volume.host = host volume.scheduled_at = timeutils.utcnow() volume.save() if not cgsnapshot_id: self.volume_rpcapi.create_volume( context, volume, volume.host, request_spec, filter_properties, allow_reschedule=False) def execute(self, context, **kwargs): scheduler_hints = kwargs.pop('scheduler_hints', None) request_spec = kwargs.copy() filter_properties = {} if scheduler_hints: filter_properties['scheduler_hints'] = scheduler_hints self._cast_create_volume(context, request_spec, filter_properties) def revert(self, context, result, flow_failures, **kwargs): if isinstance(result, ft.Failure): return # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.restore_source_status(context, self.db, kwargs) common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info) def get_flow(db_api, image_service_api, availability_zones, create_what, scheduler_rpcapi=None, volume_rpcapi=None): """Constructs and returns the api entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extracts and validates the input keys & values. 3. Reserves the quota (reverts quota on any failures). 4. Creates the database entry. 5. Commits the quota. 6. Casts to volume manager or scheduler for further processing. """ flow_name = ACTION.replace(":", "_") + "_api" api_flow = linear_flow.Flow(flow_name) api_flow.add(ExtractVolumeRequestTask( image_service_api, availability_zones, rebind={'size': 'raw_size', 'availability_zone': 'raw_availability_zone', 'volume_type': 'raw_volume_type'})) api_flow.add(QuotaReserveTask(), EntryCreateTask(db_api), QuotaCommitTask()) if scheduler_rpcapi and volume_rpcapi: # This will cast it out to either the scheduler or volume manager via # the rpc apis provided. api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(api_flow, store=create_what) cinder-8.0.0/cinder/volume/throttling.py0000664000567000056710000001043212701406250021444 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume copy throttling helpers.""" import contextlib from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception from cinder.i18n import _LW, _LE from cinder import utils LOG = logging.getLogger(__name__) class Throttle(object): """Base class for throttling disk I/O bandwidth""" DEFAULT = None @staticmethod def set_default(throttle): Throttle.DEFAULT = throttle @staticmethod def get_default(): return Throttle.DEFAULT or Throttle() def __init__(self, prefix=None): self.prefix = prefix or [] @contextlib.contextmanager def subcommand(self, srcpath, dstpath): """Sub-command that reads from srcpath and writes to dstpath. Throttle disk I/O bandwidth used by a sub-command, such as 'dd', that reads from srcpath and writes to dstpath. The sub-command must be executed with the generated prefix command. """ yield {'prefix': self.prefix} class BlkioCgroup(Throttle): """Throttle disk I/O bandwidth using blkio cgroups.""" def __init__(self, bps_limit, cgroup_name): self.bps_limit = bps_limit self.cgroup = cgroup_name self.srcdevs = {} self.dstdevs = {} try: utils.execute('cgcreate', '-g', 'blkio:%s' % self.cgroup, run_as_root=True) except processutils.ProcessExecutionError: LOG.error(_LE('Failed to create blkio cgroup \'%(name)s\'.'), {'name': cgroup_name}) raise def _get_device_number(self, path): try: return utils.get_blkdev_major_minor(path) except exception.Error as e: LOG.error(_LE('Failed to get device number for throttling: ' '%(error)s'), {'error': e}) def _limit_bps(self, rw, dev, bps): try: utils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d' % (rw, dev, bps), self.cgroup, run_as_root=True) except processutils.ProcessExecutionError: LOG.warning(_LW('Failed to setup blkio cgroup to throttle the ' 'device \'%(device)s\'.'), {'device': dev}) def _set_limits(self, rw, devs): total = sum(devs.values()) for dev in sorted(devs): self._limit_bps(rw, dev, self.bps_limit * devs[dev] / total) @utils.synchronized('BlkioCgroup') def _inc_device(self, srcdev, dstdev): if srcdev: self.srcdevs[srcdev] = self.srcdevs.get(srcdev, 0) + 1 self._set_limits('read', self.srcdevs) if dstdev: self.dstdevs[dstdev] = self.dstdevs.get(dstdev, 0) + 1 self._set_limits('write', self.dstdevs) @utils.synchronized('BlkioCgroup') def _dec_device(self, srcdev, dstdev): if srcdev: self.srcdevs[srcdev] -= 1 if self.srcdevs[srcdev] == 0: del self.srcdevs[srcdev] self._set_limits('read', self.srcdevs) if dstdev: self.dstdevs[dstdev] -= 1 if self.dstdevs[dstdev] == 0: del self.dstdevs[dstdev] self._set_limits('write', self.dstdevs) @contextlib.contextmanager def subcommand(self, srcpath, dstpath): srcdev = self._get_device_number(srcpath) dstdev = self._get_device_number(dstpath) if srcdev is None and dstdev is None: yield {'prefix': []} return self._inc_device(srcdev, dstdev) try: yield {'prefix': ['cgexec', '-g', 'blkio:%s' % self.cgroup]} finally: self._dec_device(srcdev, dstdev) cinder-8.0.0/cinder/volume/driver.py0000664000567000056710000034405712701406250020556 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drivers for volumes.""" import abc import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.image import image_utils from cinder import objects from cinder import utils from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import throttling LOG = logging.getLogger(__name__) deprecated_use_chap_auth_opts = [cfg.DeprecatedOpt('eqlx_use_chap')] deprecated_chap_username_opts = [cfg.DeprecatedOpt('eqlx_chap_login')] deprecated_chap_password_opts = [cfg.DeprecatedOpt('eqlx_chap_password')] volume_opts = [ cfg.IntOpt('num_shell_tries', default=3, help='Number of times to attempt to run flakey shell commands'), cfg.IntOpt('reserved_percentage', default=0, min=0, max=100, help='The percentage of backend capacity is reserved'), cfg.StrOpt('iscsi_target_prefix', default='iqn.2010-10.org.openstack:', help='Prefix for iSCSI volumes'), cfg.StrOpt('iscsi_ip_address', default='$my_ip', help='The IP address that the iSCSI daemon is listening on'), cfg.ListOpt('iscsi_secondary_ip_addresses', default=[], help='The list of secondary IP addresses of the iSCSI daemon'), cfg.PortOpt('iscsi_port', default=3260, help='The port that the iSCSI daemon is listening on'), cfg.IntOpt('num_volume_device_scan_tries', default=3, help='The maximum number of times to rescan targets' ' to find volume'), cfg.StrOpt('volume_backend_name', help='The backend name for a given driver implementation'), cfg.BoolOpt('use_multipath_for_image_xfer', default=False, help='Do we attach/detach volumes in cinder using multipath ' 'for volume to image and image to volume transfers?'), cfg.BoolOpt('enforce_multipath_for_image_xfer', default=False, help='If this is set to True, attachment of volumes for ' 'image transfer will be aborted when multipathd is not ' 'running. Otherwise, it will fallback to single path.'), cfg.StrOpt('volume_clear', default='zero', choices=['none', 'zero', 'shred'], help='Method used to wipe old volumes'), cfg.IntOpt('volume_clear_size', default=0, help='Size in MiB to wipe at start of old volumes. 0 => all'), cfg.StrOpt('volume_clear_ionice', help='The flag to pass to ionice to alter the i/o priority ' 'of the process used to zero a volume after deletion, ' 'for example "-c3" for idle only priority.'), cfg.StrOpt('iscsi_helper', default='tgtadm', choices=['tgtadm', 'lioadm', 'scstadmin', 'iseradm', 'iscsictl', 'ietadm', 'fake'], help='iSCSI target user-land tool to use. tgtadm is default, ' 'use lioadm for LIO iSCSI support, scstadmin for SCST ' 'target support, iseradm for the ISER protocol, ietadm ' 'for iSCSI Enterprise Target, iscsictl for Chelsio iSCSI ' 'Target or fake for testing.'), cfg.StrOpt('volumes_dir', default='$state_path/volumes', help='Volume configuration file storage ' 'directory'), cfg.StrOpt('iet_conf', default='/etc/iet/ietd.conf', help='IET configuration file'), cfg.StrOpt('chiscsi_conf', default='/etc/chelsio-iscsi/chiscsi.conf', help='Chiscsi (CXT) global defaults configuration file'), cfg.StrOpt('iscsi_iotype', default='fileio', choices=['blockio', 'fileio', 'auto'], help=('Sets the behavior of the iSCSI target ' 'to either perform blockio or fileio ' 'optionally, auto can be set and Cinder ' 'will autodetect type of backing device')), cfg.StrOpt('volume_dd_blocksize', default='1M', help='The default block size used when copying/clearing ' 'volumes'), cfg.StrOpt('volume_copy_blkio_cgroup_name', default='cinder-volume-copy', help='The blkio cgroup name to be used to limit bandwidth ' 'of volume copy'), cfg.IntOpt('volume_copy_bps_limit', default=0, help='The upper limit of bandwidth of volume copy. ' '0 => unlimited'), cfg.StrOpt('iscsi_write_cache', default='on', choices=['on', 'off'], help='Sets the behavior of the iSCSI target to either ' 'perform write-back(on) or write-through(off). ' 'This parameter is valid if iscsi_helper is set ' 'to tgtadm or iseradm.'), cfg.StrOpt('iscsi_target_flags', default='', help='Sets the target-specific flags for the iSCSI target. ' 'Only used for tgtadm to specify backing device flags ' 'using bsoflags option. The specified string is passed ' 'as is to the underlying tool.'), cfg.StrOpt('iscsi_protocol', default='iscsi', choices=['iscsi', 'iser'], help='Determines the iSCSI protocol for new iSCSI volumes, ' 'created with tgtadm or lioadm target helpers. In ' 'order to enable RDMA, this parameter should be set ' 'with the value "iser". The supported iSCSI protocol ' 'values are "iscsi" and "iser".'), cfg.StrOpt('driver_client_cert_key', help='The path to the client certificate key for verification, ' 'if the driver supports it.'), cfg.StrOpt('driver_client_cert', help='The path to the client certificate for verification, ' 'if the driver supports it.'), cfg.BoolOpt('driver_use_ssl', default=False, help='Tell driver to use SSL for connection to backend ' 'storage if the driver supports it.'), cfg.FloatOpt('max_over_subscription_ratio', default=20.0, help='Float representation of the over subscription ratio ' 'when thin provisioning is involved. Default ratio is ' '20.0, meaning provisioned capacity can be 20 times of ' 'the total physical capacity. If the ratio is 10.5, it ' 'means provisioned capacity can be 10.5 times of the ' 'total physical capacity. A ratio of 1.0 means ' 'provisioned capacity cannot exceed the total physical ' 'capacity. The ratio has to be a minimum of 1.0.'), cfg.StrOpt('scst_target_iqn_name', help='Certain ISCSI targets have predefined target names, ' 'SCST target driver uses this name.'), cfg.StrOpt('scst_target_driver', default='iscsi', help='SCST target implementation can choose from multiple ' 'SCST target drivers.'), cfg.BoolOpt('use_chap_auth', default=False, help='Option to enable/disable CHAP authentication for ' 'targets.', deprecated_opts=deprecated_use_chap_auth_opts), cfg.StrOpt('chap_username', default='', help='CHAP user name.', deprecated_opts=deprecated_chap_username_opts), cfg.StrOpt('chap_password', default='', help='Password for specified CHAP account name.', deprecated_opts=deprecated_chap_password_opts, secret=True), cfg.StrOpt('driver_data_namespace', help='Namespace for driver private data values to be ' 'saved in.'), cfg.StrOpt('filter_function', help='String representation for an equation that will be ' 'used to filter hosts. Only used when the driver ' 'filter is set to be used by the Cinder scheduler.'), cfg.StrOpt('goodness_function', help='String representation for an equation that will be ' 'used to determine the goodness of a host. Only used ' 'when using the goodness weigher is set to be used by ' 'the Cinder scheduler.'), cfg.BoolOpt('driver_ssl_cert_verify', default=False, help='If set to True the http client will validate the SSL ' 'certificate of the backend endpoint.'), cfg.StrOpt('driver_ssl_cert_path', help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates of ' 'trusted CAs, which will be used to validate the backend'), cfg.ListOpt('trace_flags', help='List of options that control which trace info ' 'is written to the DEBUG log level to assist ' 'developers. Valid values are method and api.'), cfg.MultiOpt('replication_device', item_type=types.Dict(), help="Multi opt of dictionaries to represent a replication " "target device. This option may be specified multiple " "times in a single config section to specify multiple " "replication target devices. Each entry takes the " "standard dict config form: replication_device = " "target_device_id:," "key1:value1,key2:value2..."), cfg.BoolOpt('image_upload_use_cinder_backend', default=False, help='If set to True, upload-to-image in raw format will ' 'create a cloned volume and register its location to ' 'the image service, instead of uploading the volume ' 'content. The cinder backend and locations support ' 'must be enabled in the image service, and ' 'glance_api_version must be set to 2.'), cfg.BoolOpt('image_upload_use_internal_tenant', default=False, help='If set to True, the image volume created by ' 'upload-to-image will be placed in the internal tenant. ' 'Otherwise, the image volume is created in the current ' 'context\'s tenant.'), cfg.BoolOpt('image_volume_cache_enabled', default=False, help='Enable the image volume cache for this backend.'), cfg.IntOpt('image_volume_cache_max_size_gb', default=0, help='Max size of the image volume cache for this backend in ' 'GB. 0 => unlimited.'), cfg.IntOpt('image_volume_cache_max_count', default=0, help='Max number of entries allowed in the image volume cache. ' '0 => unlimited.'), cfg.BoolOpt('report_discard_supported', default=False, help='Report to clients of Cinder that the backend supports ' 'discard (aka. trim/unmap). This will not actually ' 'change the behavior of the backend or the client ' 'directly, it will only notify that it can be used.'), ] # for backward compatibility iser_opts = [ cfg.IntOpt('num_iser_scan_tries', default=3, help='The maximum number of times to rescan iSER target' 'to find volume'), cfg.StrOpt('iser_target_prefix', default='iqn.2010-10.org.openstack:', help='Prefix for iSER volumes'), cfg.StrOpt('iser_ip_address', default='$my_ip', help='The IP address that the iSER daemon is listening on'), cfg.PortOpt('iser_port', default=3260, help='The port that the iSER daemon is listening on'), cfg.StrOpt('iser_helper', default='tgtadm', help='The name of the iSER target user-land tool to use'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) CONF.register_opts(iser_opts) @six.add_metaclass(abc.ABCMeta) class BaseVD(object): """Executes commands relating to Volumes. Base Driver for Cinder Volume Control Path, This includes supported/required implementation for API calls. Also provides *generic* implementation of core features like cloning, copy_image_to_volume etc, this way drivers that inherit from this base class and don't offer their own impl can fall back on a general solution here. Key thing to keep in mind with this driver is that it's intended that these drivers ONLY implement Control Path details (create, delete, extend...), while transport or data path related implementation should be a *member object* that we call a connector. The point here is that for example don't allow the LVM driver to implement iSCSI methods, instead call whatever connector it has configured via conf file (iSCSI{LIO, TGT, IET}, FC, etc). In the base class and for example the LVM driver we do this via a has-a relationship and just provide an interface to the specific connector methods. How you do this in your own driver is of course up to you. """ VERSION = "N/A" def __init__(self, execute=utils.execute, *args, **kwargs): # NOTE(vish): db is set by Manager self.db = kwargs.get('db') self.host = kwargs.get('host') self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(volume_opts) self.configuration.append_config_values(iser_opts) utils.setup_tracing(self.configuration.safe_get('trace_flags')) self._execute = execute self._stats = {} self._throttle = None self.pools = [] self.capabilities = {} # We set these mappings up in the base driver so they # can be used by children # (intended for LVM and BlockDevice, but others could use as well) self.target_mapping = { 'fake': 'cinder.volume.targets.fake.FakeTarget', 'ietadm': 'cinder.volume.targets.iet.IetAdm', 'iseradm': 'cinder.volume.targets.iser.ISERTgtAdm', 'lioadm': 'cinder.volume.targets.lio.LioAdm', 'tgtadm': 'cinder.volume.targets.tgt.TgtAdm', 'scstadmin': 'cinder.volume.targets.scst.SCSTAdm', 'iscsictl': 'cinder.volume.targets.cxt.CxtAdm'} # set True by manager after successful check_for_setup self._initialized = False def _is_non_recoverable(self, err, non_recoverable_list): for item in non_recoverable_list: if item in err: return True return False def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. non_recoverable = kwargs.pop('no_retry_list', []) tries = 0 while True: try: self._execute(*command, **kwargs) return True except processutils.ProcessExecutionError as ex: tries = tries + 1 if tries >= self.configuration.num_shell_tries or\ self._is_non_recoverable(ex.stderr, non_recoverable): raise LOG.exception(_LE("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False): """Disconnect the volume from the host.""" # Use Brick's code to do attach/detach connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) if remote: # Call remote manager's terminate_connection which includes # driver's terminate_connection and remove export rpcapi = volume_rpcapi.VolumeAPI() rpcapi.terminate_connection(context, volume, properties, force=force) else: # Call local driver's terminate_connection and remove export. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. try: self.terminate_connection(volume, properties, force=force) except Exception as err: err_msg = (_('Unable to terminate volume connection: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) try: LOG.debug("volume %s: removing export", volume['id']) self.remove_export(context, volume) except Exception as ex: LOG.exception(_LE("Error detaching volume %(volume)s, " "due to remove export failure."), {"volume": volume['id']}) raise exception.RemoveExportException(volume=volume['id'], reason=ex) def _detach_snapshot(self, context, attach_info, snapshot, properties, force=False, remote=False): """Disconnect the snapshot from the host.""" # Use Brick's code to do attach/detach connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) # NOTE(xyang): This method is introduced for non-disruptive backup. # Currently backup service has to be on the same node as the volume # driver. Therefore it is not possible to call a volume driver on a # remote node. In the future, if backup can be done from a remote # node, this function can be modified to allow RPC calls. The remote # flag in the interface is for anticipation that it will be enabled # in the future. if remote: LOG.error(_LE("Detaching snapshot from a remote node " "is not supported.")) raise exception.NotSupportedOperation( operation=_("detach snapshot from remote node")) else: # Call local driver's terminate_connection and remove export. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. try: self.terminate_connection_snapshot(snapshot, properties, force=force) except Exception as err: err_msg = (_('Unable to terminate volume connection: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) try: LOG.debug("Snapshot %s: removing export.", snapshot.id) self.remove_export_snapshot(context, snapshot) except Exception as ex: LOG.exception(_LE("Error detaching snapshot %(snapshot)s, " "due to remove export failure."), {"snapshot": snapshot.id}) raise exception.RemoveExportException(volume=snapshot.id, reason=ex) def set_initialized(self): self._initialized = True @property def initialized(self): return self._initialized def set_throttle(self): bps_limit = ((self.configuration and self.configuration.safe_get('volume_copy_bps_limit')) or CONF.volume_copy_bps_limit) cgroup_name = ((self.configuration and self.configuration.safe_get( 'volume_copy_blkio_cgroup_name')) or CONF.volume_copy_blkio_cgroup_name) self._throttle = None if bps_limit: try: self._throttle = throttling.BlkioCgroup(int(bps_limit), cgroup_name) except processutils.ProcessExecutionError as err: LOG.warning(_LW('Failed to activate volume copy throttling: ' '%(err)s'), {'err': err}) throttling.Throttle.set_default(self._throttle) def get_version(self): """Get the current version of this driver.""" return self.VERSION @abc.abstractmethod def check_for_setup_error(self): return @abc.abstractmethod def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. If volume_type extra specs includes 'capabilities:replication True' the driver needs to create a volume replica (secondary), and setup replication between the newly created volume and the secondary volume. Returned dictionary should include: volume['replication_status'] = 'copying' volume['replication_extended_status'] = driver specific value volume['driver_data'] = driver specific value """ return @abc.abstractmethod def delete_volume(self, volume): """Deletes a volume. If volume_type extra specs includes 'replication: True' then the driver needs to delete the volume replica too. """ return def secure_file_operations_enabled(self): """Determine if driver is running in Secure File Operations mode. The Cinder Volume driver needs to query if this driver is running in a secure file operations mode. By default, it is False: any driver that does support secure file operations should override this method. """ return False def get_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. For replication the following state should be reported: replication = True (None or false disables replication) """ return def get_prefixed_property(self, property): """Return prefixed property name :returns: a prefixed property name string or None """ if property and self.capabilities.get('vendor_prefix'): return self.capabilities.get('vendor_prefix') + ':' + property def _set_property(self, properties, entry, title, description, type, **kwargs): prop = dict(title=title, description=description, type=type) allowed_keys = ('enum', 'default', 'minimum', 'maximum') for key in kwargs: if key in allowed_keys: prop[key] = kwargs[key] properties[entry] = prop def _init_standard_capabilities(self): """Create a dictionary of Cinder standard capabilities. This method creates a dictionary of Cinder standard capabilities and returns the created dictionary. The keys of this dictionary don't contain prefix and separator(:). """ properties = {} self._set_property( properties, "thin_provisioning", "Thin Provisioning", _("Sets thin provisioning."), "boolean") self._set_property( properties, "compression", "Compression", _("Enables compression."), "boolean") self._set_property( properties, "qos", "QoS", _("Enables QoS."), "boolean") self._set_property( properties, "replication", "Replication", _("Enables replication."), "boolean") return properties def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. This method creates a dictionary of vendor unique properties and returns both created dictionary and vendor name. Returned vendor name is used to check for name of vendor unique properties. - Vendor name shouldn't include colon(:) because of the separator and it is automatically replaced by underscore(_). ex. abc:d -> abc_d - Vendor prefix is equal to vendor name. ex. abcd - Vendor unique properties must start with vendor prefix + ':'. ex. abcd:maxIOPS Each backend driver needs to override this method to expose its own properties using _set_property() like this: self._set_property( properties, "vendorPrefix:specific_property", "Title of property", _("Description of property"), "type") : return dictionary of vendor unique properties : return vendor name Example of implementation:: properties = {} self._set_property( properties, "abcd:compression_type", "Compression type", _("Specifies compression type."), "string", enum=["lossy", "lossless", "special"]) self._set_property( properties, "abcd:minIOPS", "Minimum IOPS QoS", _("Sets minimum IOPS if QoS is enabled."), "integer", minimum=10, default=100) return properties, 'abcd' """ return {}, None def init_capabilities(self): """Obtain backend volume stats and capabilities list. This stores a dictionary which is consisted of two parts. First part includes static backend capabilities which are obtained by get_volume_stats(). Second part is properties, which includes parameters correspond to extra specs. This properties part is consisted of cinder standard capabilities and vendor unique properties. Using this capabilities list, operator can manage/configure backend using key/value from capabilities without specific knowledge of backend. """ # Set static backend capabilities from get_volume_stats() stats = self.get_volume_stats(True) if stats: self.capabilities = stats.copy() # Set cinder standard capabilities self.capabilities['properties'] = self._init_standard_capabilities() # Set Vendor unique properties vendor_prop, vendor_name = self._init_vendor_properties() if vendor_name and vendor_prop: updated_vendor_prop = {} old_name = None # Replace colon in vendor name to underscore. if ':' in vendor_name: old_name = vendor_name vendor_name = vendor_name.replace(':', '_') LOG.warning(_LW('The colon in vendor name was replaced ' 'by underscore. Updated vendor name is ' '%(name)s".'), {'name': vendor_name}) for key in vendor_prop: # If key has colon in vendor name field, we replace it to # underscore. # ex. abc:d:storagetype:provisioning # -> abc_d:storagetype:provisioning if old_name and key.startswith(old_name + ':'): new_key = key.replace(old_name, vendor_name, 1) updated_vendor_prop[new_key] = vendor_prop[key] continue if not key.startswith(vendor_name + ':'): LOG.warning(_LW('Vendor unique property "%(property)s" ' 'must start with vendor prefix with colon ' '"%(prefix)s". The property was ' 'not registered on capabilities list.'), {'prefix': vendor_name + ':', 'property': key}) continue updated_vendor_prop[key] = vendor_prop[key] # Update vendor unique properties to the dictionary self.capabilities['vendor_prefix'] = vendor_name self.capabilities['properties'].update(updated_vendor_prop) LOG.debug("Initialized capabilities list: %s.", self.capabilities) def _update_pools_and_stats(self, data): """Updates data for pools and volume stats based on provided data.""" # provisioned_capacity_gb is set to None by default below, but # None won't be used in calculation. It will be overridden by # driver's provisioned_capacity_gb if reported, otherwise it # defaults to allocated_capacity_gb in host_manager.py. if self.pools: for pool in self.pools: new_pool = {} new_pool.update(dict( pool_name=pool, total_capacity_gb=0, free_capacity_gb=0, provisioned_capacity_gb=None, reserved_percentage=100, QoS_support=False, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function() )) data["pools"].append(new_pool) else: # No pool configured, the whole backend will be treated as a pool single_pool = {} single_pool.update(dict( pool_name=data["volume_backend_name"], total_capacity_gb=0, free_capacity_gb=0, provisioned_capacity_gb=None, reserved_percentage=100, QoS_support=False, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function() )) data["pools"].append(single_pool) self._stats = data def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" LOG.debug('copy_image_to_volume %s.', volume['name']) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = utils.brick_get_connector_properties(use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) try: image_utils.fetch_to_raw(context, image_service, image_id, attach_info['device']['path'], self.configuration.volume_dd_blocksize, size=volume['size']) finally: self._detach_volume(context, attach_info, volume, properties) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" LOG.debug('copy_volume_to_image %s.', volume['name']) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = utils.brick_get_connector_properties(use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) try: image_utils.upload_volume(context, image_service, image_meta, attach_info['device']['path']) finally: self._detach_volume(context, attach_info, volume, properties) def before_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions before copyvolume data. This method will be called before _copy_volume_data during volume migration """ pass def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions after copyvolume data. This method will be called after _copy_volume_data during volume migration """ pass def get_filter_function(self): """Get filter_function string. Returns either the string from the driver instance or global section in cinder.conf. If nothing is specified in cinder.conf, then try to find the default filter_function. When None is returned the scheduler will always pass the driver instance. :returns: a filter_function string or None """ ret_function = self.configuration.filter_function if not ret_function: ret_function = CONF.filter_function if not ret_function: ret_function = self.get_default_filter_function() return ret_function def get_goodness_function(self): """Get good_function string. Returns either the string from the driver instance or global section in cinder.conf. If nothing is specified in cinder.conf, then try to find the default goodness_function. When None is returned the scheduler will give the lowest score to the driver instance. :returns: a goodness_function string or None """ ret_function = self.configuration.goodness_function if not ret_function: ret_function = CONF.goodness_function if not ret_function: ret_function = self.get_default_goodness_function() return ret_function def get_default_filter_function(self): """Get the default filter_function string. Each driver could overwrite the method to return a well-known default string if it is available. :returns: None """ return None def get_default_goodness_function(self): """Get the default goodness_function string. Each driver could overwrite the method to return a well-known default string if it is available. :returns: None """ return None def _attach_volume(self, context, volume, properties, remote=False): """Attach the volume.""" if remote: # Call remote manager's initialize_connection which includes # driver's create_export and initialize_connection rpcapi = volume_rpcapi.VolumeAPI() try: conn = rpcapi.initialize_connection(context, volume, properties) except Exception: with excutils.save_and_reraise_exception(): # It is possible that initialize_connection fails due to # timeout. In fact, the volume is already attached after # the timeout error is raised, so the connection worths # a try of terminating. try: rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning(_LW("Failed terminating the connection " "of volume %(volume_id)s, but it is " "acceptable."), {'volume_id': volume['id']}) else: # Call local driver's create_export and initialize_connection. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. model_update = None try: LOG.debug("Volume %s: creating export", volume['id']) model_update = self.create_export(context, volume, properties) if model_update: volume = self.db.volume_update(context, volume['id'], model_update) except exception.CinderException as ex: if model_update: LOG.exception(_LE("Failed updating model of volume " "%(volume_id)s with driver provided " "model %(model)s"), {'volume_id': volume['id'], 'model': model_update}) raise exception.ExportFailure(reason=ex) try: conn = self.initialize_connection(volume, properties) except Exception as err: try: err_msg = (_('Unable to fetch connection information from ' 'backend: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg) LOG.debug("Cleaning up failed connect initialization.") self.remove_export(context, volume) except Exception as ex: ex_msg = (_('Error encountered during cleanup ' 'of a failed attach: %(ex)s') % {'ex': six.text_type(ex)}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=ex_msg) raise exception.VolumeBackendAPIException(data=err_msg) try: attach_info = self._connect_device(conn) except exception.DeviceUnavailable as exc: # We may have reached a point where we have attached the volume, # so we have to detach it (do the cleanup). attach_info = exc.kwargs.get('attach_info', None) if attach_info: try: LOG.debug('Device for volume %s is unavailable but did ' 'attach, detaching it.', volume['id']) self._detach_volume(context, attach_info, volume, properties, force=True, remote=remote) except Exception: LOG.exception(_LE('Error detaching volume %s'), volume['id']) raise return (attach_info, volume) def _attach_snapshot(self, context, snapshot, properties, remote=False): """Attach the snapshot.""" # NOTE(xyang): This method is introduced for non-disruptive backup. # Currently backup service has to be on the same node as the volume # driver. Therefore it is not possible to call a volume driver on a # remote node. In the future, if backup can be done from a remote # node, this function can be modified to allow RPC calls. The remote # flag in the interface is for anticipation that it will be enabled # in the future. if remote: LOG.error(_LE("Attaching snapshot from a remote node " "is not supported.")) raise exception.NotSupportedOperation( operation=_("attach snapshot from remote node")) else: # Call local driver's create_export and initialize_connection. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. model_update = None try: LOG.debug("Snapshot %s: creating export.", snapshot.id) model_update = self.create_export_snapshot(context, snapshot, properties) if model_update: snapshot.provider_location = model_update.get( 'provider_location', None) snapshot.provider_auth = model_update.get( 'provider_auth', None) snapshot.save() except exception.CinderException as ex: if model_update: LOG.exception(_LE("Failed updating model of snapshot " "%(snapshot_id)s with driver provided " "model %(model)s."), {'snapshot_id': snapshot.id, 'model': model_update}) raise exception.ExportFailure(reason=ex) try: conn = self.initialize_connection_snapshot( snapshot, properties) except Exception as err: try: err_msg = (_('Unable to fetch connection information from ' 'backend: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg) LOG.debug("Cleaning up failed connect initialization.") self.remove_export_snapshot(context, snapshot) except Exception as ex: ex_msg = (_('Error encountered during cleanup ' 'of a failed attach: %(ex)s') % {'ex': six.text_type(ex)}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=ex_msg) raise exception.VolumeBackendAPIException(data=err_msg) return (self._connect_device(conn), snapshot) def _connect_device(self, conn): # Use Brick's code to do attach/detach use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) device = connector.connect_volume(conn['data']) host_device = device['path'] attach_info = {'conn': conn, 'device': device, 'connector': connector} unavailable = True try: # Secure network file systems will NOT run as root. root_access = not self.secure_file_operations_enabled() unavailable = not connector.check_valid_device(host_device, root_access) except Exception: LOG.exception(_LE('Could not validate device %s'), host_device) if unavailable: raise exception.DeviceUnavailable(path=host_device, attach_info=attach_info, reason=(_("Unable to access " "the backend storage " "via the path " "%(path)s.") % {'path': host_device})) return attach_info def clone_image(self, context, volume, image_location, image_meta, image_service): return None, False def backup_use_temp_snapshot(self): return False def snapshot_remote_attachable(self): # TODO(lixiaoy1): the method will be deleted later when remote # attach snapshot is implemented. return False def get_backup_device(self, context, backup): """Get a backup device from an existing volume. The function returns a volume or snapshot to backup service, and then backup service attaches the device and does backup. """ backup_device = None is_snapshot = False if (self.backup_use_temp_snapshot() and self.snapshot_remote_attachable()): (backup_device, is_snapshot) = ( self._get_backup_volume_temp_snapshot(context, backup)) else: backup_device = self._get_backup_volume_temp_volume( context, backup) is_snapshot = False return (backup_device, is_snapshot) def _get_backup_volume_temp_volume(self, context, backup): """Return a volume to do backup. To backup a snapshot, create a temp volume from the snapshot and back it up. Otherwise to backup an in-use volume, create a temp volume and back it up. """ volume = objects.Volume.get_by_id(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) temp_vol_ref = None device_to_backup = volume # NOTE(xyang): If it is to backup from snapshot, create a temp # volume from the source snapshot, backup the temp volume, and # then clean up the temp volume. if snapshot: temp_vol_ref = self._create_temp_volume_from_snapshot( context, volume, snapshot) backup.temp_volume_id = temp_vol_ref['id'] backup.save() device_to_backup = temp_vol_ref else: # NOTE(xyang): Check volume status if it is not to backup from # snapshot; if 'in-use', create a temp volume from the source # volume, backup the temp volume, and then clean up the temp # volume; if 'available', just backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_vol_ref = self._create_temp_cloned_volume( context, volume) backup.temp_volume_id = temp_vol_ref['id'] backup.save() device_to_backup = temp_vol_ref return device_to_backup def _get_backup_volume_temp_snapshot(self, context, backup): """Return a device to backup. If it is to backup from snapshot, back it up directly. Otherwise for in-use volume, create a temp snapshot and back it up. """ volume = self.db.volume_get(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) device_to_backup = volume is_snapshot = False temp_snapshot = None # NOTE(xyang): If it is to backup from snapshot, back it up # directly. No need to clean it up. if snapshot: device_to_backup = snapshot is_snapshot = True else: # NOTE(xyang): If it is not to backup from snapshot, check volume # status. If the volume status is 'in-use', create a temp snapshot # from the source volume, backup the temp snapshot, and then clean # up the temp snapshot; if the volume status is 'available', just # backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_snapshot = self._create_temp_snapshot(context, volume) backup.temp_snapshot_id = temp_snapshot.id backup.save() device_to_backup = temp_snapshot is_snapshot = True return (device_to_backup, is_snapshot) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" # NOTE(xyang): _backup_volume_temp_snapshot and # _backup_volume_temp_volume are splitted into two # functions because there were concerns during code # reviews that it is confusing to put all the logic # into one function. There's a trade-off between # reducing code duplication and increasing code # readability here. Added a note here to explain why # we've decided to have two separate functions as # there will always be arguments from both sides. if self.backup_use_temp_snapshot(): self._backup_volume_temp_snapshot(context, backup, backup_service) else: self._backup_volume_temp_volume(context, backup, backup_service) def _backup_volume_temp_volume(self, context, backup, backup_service): """Create a new backup from an existing volume or snapshot. To backup a snapshot, create a temp volume from the snapshot and back it up. Otherwise to backup an in-use volume, create a temp volume and back it up. """ volume = self.db.volume_get(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) temp_vol_ref = None device_to_backup = volume # NOTE(xyang): If it is to backup from snapshot, create a temp # volume from the source snapshot, backup the temp volume, and # then clean up the temp volume. if snapshot: temp_vol_ref = self._create_temp_volume_from_snapshot( context, volume, snapshot) backup.temp_volume_id = temp_vol_ref['id'] backup.save() device_to_backup = temp_vol_ref else: # NOTE(xyang): Check volume status if it is not to backup from # snapshot; if 'in-use', create a temp volume from the source # volume, backup the temp volume, and then clean up the temp # volume; if 'available', just backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_vol_ref = self._create_temp_cloned_volume( context, volume) backup.temp_volume_id = temp_vol_ref['id'] backup.save() device_to_backup = temp_vol_ref self._backup_device(context, backup, backup_service, device_to_backup) if temp_vol_ref: self._delete_temp_volume(context, temp_vol_ref) backup.temp_volume_id = None backup.save() def _backup_volume_temp_snapshot(self, context, backup, backup_service): """Create a new backup from an existing volume or snapshot. If it is to backup from snapshot, back it up directly. Otherwise for in-use volume, create a temp snapshot and back it up. """ volume = self.db.volume_get(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) device_to_backup = volume is_snapshot = False temp_snapshot = None # NOTE(xyang): If it is to backup from snapshot, back it up # directly. No need to clean it up. if snapshot: device_to_backup = snapshot is_snapshot = True else: # NOTE(xyang): If it is not to backup from snapshot, check volume # status. If the volume status is 'in-use', create a temp snapshot # from the source volume, backup the temp snapshot, and then clean # up the temp snapshot; if the volume status is 'available', just # backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_snapshot = self._create_temp_snapshot(context, volume) backup.temp_snapshot_id = temp_snapshot.id backup.save() device_to_backup = temp_snapshot is_snapshot = True self._backup_device(context, backup, backup_service, device_to_backup, is_snapshot) if temp_snapshot: self._delete_temp_snapshot(context, temp_snapshot) backup.temp_snapshot_id = None backup.save() def _backup_device(self, context, backup, backup_service, device, is_snapshot=False): """Create a new backup from a volume or snapshot.""" LOG.debug('Creating a new backup for %s.', device['name']) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = utils.brick_get_connector_properties(use_multipath, enforce_multipath) if is_snapshot: attach_info, device = self._attach_snapshot(context, device, properties) else: attach_info, device = self._attach_volume(context, device, properties) try: device_path = attach_info['device']['path'] # Secure network file systems will not chown files. if self.secure_file_operations_enabled(): with open(device_path) as device_file: backup_service.backup(backup, device_file) else: with utils.temporary_chown(device_path): with open(device_path) as device_file: backup_service.backup(backup, device_file) finally: if is_snapshot: self._detach_snapshot(context, attach_info, device, properties) else: self._detach_volume(context, attach_info, device, properties) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" LOG.debug(('Restoring backup %(backup)s to ' 'volume %(volume)s.'), {'backup': backup['id'], 'volume': volume['name']}) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = utils.brick_get_connector_properties(use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) try: volume_path = attach_info['device']['path'] # Secure network file systems will not chown files. if self.secure_file_operations_enabled(): with open(volume_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) else: with utils.temporary_chown(volume_path): with open(volume_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) finally: self._detach_volume(context, attach_info, volume, properties) def _create_temp_snapshot(self, context, volume): kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': None, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'progress': '0%', 'volume_size': volume['size'], 'display_name': 'backup-snap-%s' % volume['id'], 'display_description': None, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id'], 'metadata': {}, } temp_snap_ref = objects.Snapshot(context=context, **kwargs) temp_snap_ref.create() try: self.create_snapshot(temp_snap_ref) except Exception: with excutils.save_and_reraise_exception(): with temp_snap_ref.obj_as_admin(): self.db.volume_glance_metadata_delete_by_snapshot( context, temp_snap_ref.id) temp_snap_ref.destroy() temp_snap_ref.status = 'available' temp_snap_ref.save() return temp_snap_ref def _create_temp_cloned_volume(self, context, volume): temp_volume = { 'size': volume['size'], 'display_name': 'backup-vol-%s' % volume['id'], 'host': volume['host'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'availability_zone': volume.availability_zone, } temp_vol_ref = self.db.volume_create(context, temp_volume) try: self.create_cloned_volume(temp_vol_ref, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_destroy(context.elevated(), temp_vol_ref['id']) self.db.volume_update(context, temp_vol_ref['id'], {'status': 'available'}) return temp_vol_ref def _create_temp_volume_from_snapshot(self, context, volume, snapshot): temp_volume = { 'size': volume['size'], 'display_name': 'backup-vol-%s' % volume['id'], 'host': volume['host'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'availability_zone': volume.availability_zone, } temp_vol_ref = self.db.volume_create(context, temp_volume) try: self.create_volume_from_snapshot(temp_vol_ref, snapshot) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_destroy(context.elevated(), temp_vol_ref['id']) self.db.volume_update(context, temp_vol_ref['id'], {'status': 'available'}) return temp_vol_ref def _delete_temp_snapshot(self, context, snapshot): self.delete_snapshot(snapshot) with snapshot.obj_as_admin(): self.db.volume_glance_metadata_delete_by_snapshot( context, snapshot.id) snapshot.destroy() def _delete_temp_volume(self, context, volume): self.delete_volume(volume) context = context.elevated() self.db.volume_destroy(context, volume['id']) def clear_download(self, context, volume): """Clean up after an interrupted image copy.""" pass def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): """Callback for volume attached to instance or host.""" pass def detach_volume(self, context, volume, attachment=None): """Callback for volume detached.""" pass def do_setup(self, context): """Any initialization the volume driver does while starting.""" pass def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by driver.""" pass def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. Each driver implementing this method needs to be responsible for the values of _name_id and provider_location. If None is returned or either key is not set, it means the volume table does not need to change the value(s) for the key(s). The return format is {"_name_id": value, "provider_location": value}. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ msg = _("The method update_migrated_volume is not implemented.") raise NotImplementedError(msg) @staticmethod def validate_connector_has_setting(connector, setting): pass def retype(self, context, volume, new_type, diff, host): return False, None def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary) and setup replication between the newly created volume and the secondary volume. """ raise NotImplementedError() # ####### Interface methods for DataPath (Connector) ######## @abc.abstractmethod def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" return @abc.abstractmethod def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ return def create_export_snapshot(self, context, snapshot, connector): """Exports the snapshot. Can optionally return a Dictionary of changes to the snapshot object to be persisted. """ return @abc.abstractmethod def remove_export(self, context, volume): """Removes an export for a volume.""" return def remove_export_snapshot(self, context, snapshot): """Removes an export for a snapshot.""" return @abc.abstractmethod def initialize_connection(self, volume, connector, initiator_data=None): """Allow connection to connector and return connection info. :param volume: The volume to be attached :param connector: Dictionary containing information about what is being connected to. :param initiator_data (optional): A dictionary of driver_initiator_data objects with key-value pairs that have been saved for this initiator by a driver in previous initialize_connection calls. :returns conn_info: A dictionary of connection information. This can optionally include a "initiator_updates" field. The "initiator_updates" field must be a dictionary containing a "set_values" and/or "remove_values" field. The "set_values" field must be a dictionary of key-value pairs to be set/updated in the db. The "remove_values" field must be a list of keys, previously set with "set_values", that will be deleted from the db. """ return def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allow connection to connector and return connection info. :param snapshot: The snapshot to be attached :param connector: Dictionary containing information about what is being connected to. :returns conn_info: A dictionary of connection information. This can optionally include a "initiator_updates" field. The "initiator_updates" field must be a dictionary containing a "set_values" and/or "remove_values" field. The "set_values" field must be a dictionary of key-value pairs to be set/updated in the db. The "remove_values" field must be a list of keys, previously set with "set_values", that will be deleted from the db. """ return @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" return def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallow connection from connector.""" return def get_pool(self, volume): """Return pool name where volume reside on. :param volume: The volume hosted by the the driver. :returns: name of the pool where given volume is in. """ return None def update_provider_info(self, volumes, snapshots): """Get provider info updates from driver. :param volumes: List of Cinder volumes to check for updates :param snapshots: List of Cinder snapshots to check for updates :returns: tuple (volume_updates, snapshot_updates) where volume updates {'id': uuid, provider_id: } and snapshot updates {'id': uuid, provider_id: } """ return None, None def migrate_volume(self, context, volume, host): """Migrate volume stub. This is for drivers that don't implement an enhanced version of this operation. """ return (False, None) def manage_existing(self, volume, existing_ref): """Manage exiting stub. This is for drivers that don't implement manage_existing(). """ msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def unmanage(self, volume): """Unmanage stub. This is for drivers that don't implement unmanage(). """ msg = _("Unmanage volume not implemented.") raise NotImplementedError(msg) def freeze_backend(self, context): """Notify the backend that it's frozen. We use set to prohibit the creation of any new resources on the backend, or any modifications to existing items on a backend. We set/enforce this by not allowing scheduling of new volumes to the specified backend, and checking at the api for modifications to resources and failing. In most cases the driver may not need to do anything, but this provides a handle if they need it. :param context: security context :response: True|False """ return True def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. Returns the backend to a normal state after a freeze operation. In most cases the driver may not need to do anything, but this provides a handle if they need it. :param context: security context :response: True|False """ return True def failover_host(self, context, volumes, secondary_id=None): """Failover a backend to a secondary replication target. Instructs a replication capable/configured backend to failover to one of it's secondary replication targets. host=None is an acceptable input, and leaves it to the driver to failover to the only configured target, or to choose a target on it's own. All of the hosts volumes will be passed on to the driver in order for it to determine the replicated volumes on the host, if needed. Response is a tuple, including the new target backend_id AND a lit of dictionaries with volume_id and updates. *Key things to consider (attaching failed-over volumes): provider_location provider_auth provider_id replication_status :param context: security context :param volumes: list of volume objects, in case the driver needs to take action on them in some way :param secondary_id: Specifies rep target backend to fail over to :returns : ID of the backend that was failed-over to and model update for volumes """ # Example volume_updates data structure: # [{'volume_id': , # 'updates': {'provider_id': 8, # 'replication_status': 'failed-over', # 'replication_extended_status': 'whatever',...}},] raise NotImplementedError() def get_replication_updates(self, context): """Old replication update method, deprecate.""" raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class LocalVD(object): @abc.abstractmethod def local_path(self, volume): return @six.add_metaclass(abc.ABCMeta) class SnapshotVD(object): @abc.abstractmethod def create_snapshot(self, snapshot): """Creates a snapshot.""" return @abc.abstractmethod def delete_snapshot(self, snapshot): """Deletes a snapshot.""" return @abc.abstractmethod def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary), and setup replication between the newly created volume and the secondary volume. """ return @six.add_metaclass(abc.ABCMeta) class ConsistencyGroupVD(object): @abc.abstractmethod def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" return @abc.abstractmethod def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" return @abc.abstractmethod def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" return @abc.abstractmethod def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" return @six.add_metaclass(abc.ABCMeta) class CloneableImageVD(object): @abc.abstractmethod def clone_image(self, volume, image_location, image_id, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. image_id is a string which represents id of the image. It can be used by the driver to introspect internal stores or registry to do an efficient image clone. image_meta is a dictionary that includes 'disk_format' (e.g. raw, qcow2) and other image attributes that allow drivers to decide whether they can clone the image without first requiring conversion. image_service is the reference of the image_service to use. Note that this is needed to be passed here for drivers that will want to fetch images from the image service directly. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred """ return None, False @six.add_metaclass(abc.ABCMeta) class MigrateVD(object): @abc.abstractmethod def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ return (False, None) @six.add_metaclass(abc.ABCMeta) class ExtendVD(object): @abc.abstractmethod def extend_volume(self, volume, new_size): return @six.add_metaclass(abc.ABCMeta) class TransferVD(object): def accept_transfer(self, context, volume, new_user, new_project): """Accept the transfer of a volume for a new user/project.""" pass @six.add_metaclass(abc.ABCMeta) class ManageableVD(object): @abc.abstractmethod def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ return @abc.abstractmethod def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ return @abc.abstractmethod def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ pass @six.add_metaclass(abc.ABCMeta) class ManageableSnapshotsVD(object): # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. """ return # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. """ return # NOTE: Can't use abstractmethod before all drivers implement it def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. """ pass # TODO(jdg): Remove this after the V2.1 code merges @six.add_metaclass(abc.ABCMeta) class ReplicaVD(object): @abc.abstractmethod def reenable_replication(self, context, volume): """Re-enable replication between the replica and primary volume. This is used to re-enable/fix the replication between primary and secondary. One use is as part of the fail-back process, when you re-synchorize your old primary with the promoted volume (the old replica). Returns model_update for the volume to reflect the actions of the driver. The driver is expected to update the following entries: 'replication_status' 'replication_extended_status' 'replication_driver_data' Possible 'replication_status' values (in model_update) are: 'error' - replication in error state 'copying' - replication copying data to secondary (inconsistent) 'active' - replication copying data to secondary (consistent) 'active-stopped' - replication data copy on hold (consistent) 'inactive' - replication data copy on hold (inconsistent) Values in 'replication_extended_status' and 'replication_driver_data' are managed by the driver. :param context: Context :param volume: A dictionary describing the volume """ return def get_replication_status(self, context, volume): """Query the actual volume replication status from the driver. Returns model_update for the volume. The driver is expected to update the following entries: 'replication_status' 'replication_extended_status' 'replication_driver_data' Possible 'replication_status' values (in model_update) are: 'error' - replication in error state 'copying' - replication copying data to secondary (inconsistent) 'active' - replication copying data to secondary (consistent) 'active-stopped' - replication data copy on hold (consistent) 'inactive' - replication data copy on hold (inconsistent) Values in 'replication_extended_status' and 'replication_driver_data' are managed by the driver. :param context: Context :param volume: A dictionary describing the volume """ return None @abc.abstractmethod def promote_replica(self, context, volume): """Promote the replica to be the primary volume. Following this command, replication between the volumes at the storage level should be stopped, the replica should be available to be attached, and the replication status should be in status 'inactive'. Returns model_update for the volume. The driver is expected to update the following entries: 'replication_status' 'replication_extended_status' 'replication_driver_data' Possible 'replication_status' values (in model_update) are: 'error' - replication in error state 'inactive' - replication data copy on hold (inconsistent) Values in 'replication_extended_status' and 'replication_driver_data' are managed by the driver. :param context: Context :param volume: A dictionary describing the volume """ return @abc.abstractmethod def create_replica_test_volume(self, volume, src_vref): """Creates a test replica clone of the specified replicated volume. Create a clone of the replicated (secondary) volume. """ return class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD, CloneableImageVD, ManageableSnapshotsVD, SnapshotVD, ReplicaVD, LocalVD, MigrateVD, BaseVD): """This class will be deprecated soon. Please use the abstract classes above for new drivers. """ def check_for_setup_error(self): raise NotImplementedError() def create_volume(self, volume): raise NotImplementedError() def create_volume_from_snapshot(self, volume, snapshot): raise NotImplementedError() def create_replica_test_volume(self, volume, src_vref): raise NotImplementedError() def delete_volume(self, volume): raise NotImplementedError() def create_snapshot(self, snapshot): raise NotImplementedError() def delete_snapshot(self, snapshot): raise NotImplementedError() def local_path(self, volume): raise NotImplementedError() def clear_download(self, context, volume): pass def extend_volume(self, volume, new_size): msg = _("Extend volume not implemented") raise NotImplementedError(msg) def manage_existing(self, volume, existing_ref): msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def manage_existing_get_size(self, volume, existing_ref): msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def unmanage(self, volume): pass def manage_existing_snapshot(self, snapshot, existing_ref): msg = _("Manage existing snapshot not implemented.") raise NotImplementedError(msg) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): msg = _("Manage existing snapshot not implemented.") raise NotImplementedError(msg) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" def retype(self, context, volume, new_type, diff, host): return False, None def reenable_replication(self, context, volume): msg = _("sync_replica not implemented.") raise NotImplementedError(msg) def promote_replica(self, context, volume): msg = _("promote_replica not implemented.") raise NotImplementedError(msg) # ####### Interface methods for DataPath (Connector) ######## def ensure_export(self, context, volume): raise NotImplementedError() def create_export(self, context, volume, connector): raise NotImplementedError() def create_export_snapshot(self, context, snapshot, connector): raise NotImplementedError() def remove_export(self, context, volume): raise NotImplementedError() def remove_export_snapshot(self, context, snapshot): raise NotImplementedError() def initialize_connection(self, volume, connector, **kwargs): raise NotImplementedError() def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allow connection from connector for a snapshot.""" def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector""" def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallow connection from connector for a snapshot.""" def create_consistencygroup(self, context, group): """Creates a consistencygroup. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ raise NotImplementedError() def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: model_update, volumes_model_update The source can be cgsnapshot or a source cg. param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ raise NotImplementedError() def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be deleted. :param volumes: a list of volume dictionaries in the group. :returns: model_update, volumes_model_update param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of cinder.db.sqlalchemy.models.Volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ raise NotImplementedError() def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'available' at the end of the manager function. """ raise NotImplementedError() def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be deleted. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def clone_image(self, volume, image_location, image_id, image_meta, image_service): return None, False def get_pool(self, volume): """Return pool name where volume reside on. :param volume: The volume hosted by the the driver. :returns: name of the pool where given volume is in. """ return None def migrate_volume(self, context, volume, host): return (False, None) class ProxyVD(object): """Proxy Volume Driver to mark proxy drivers If a driver uses a proxy class (e.g. by using __setattr__ and __getattr__) without directly inheriting from base volume driver this class can help marking them and retrieve the actual used driver object. """ def _get_driver(self): """Returns the actual driver object. Can be overloaded by the proxy. """ return getattr(self, "driver", None) class ISCSIDriver(VolumeDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): super(ISCSIDriver, self).__init__(*args, **kwargs) def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) LOG.warning(_LW("ISCSI provider_location not " "stored, using discovery")) volume_name = volume['name'] try: # NOTE(griff) We're doing the split straight away which should be # safe since using '@' in hostname is considered invalid (out, _err) = self._execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: LOG.error(_LE("ISCSI discovery attempt failed for:%s"), volume['host'].split('@')[0]) LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) return None for target in out.splitlines(): if (self.configuration.iscsi_ip_address in target and volume_name in target): return target return None def _get_iscsi_properties(self, volume, multipath=False): """Gets iscsi configuration We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the id of the volume (currently used by xen) :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :discard: boolean indicating if discard is supported In some of drivers that support multiple connections (for multipath and for single path with failover on connection failure), it returns :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. Note that some of drivers don't return :target_portals even if they support multipath. Then the connector should use sendtargets discovery to find the other portals if it supports multipath. """ properties = {} location = volume['provider_location'] if location: # provider_location is the same format as iSCSI discovery output properties['target_discovered'] = False else: location = self._do_iscsi_discovery(volume) if not location: msg = (_("Could not find iSCSI export for volume %s") % (volume['name'])) raise exception.InvalidVolume(reason=msg) LOG.debug("ISCSI Discovery: Found %s", location) properties['target_discovered'] = True results = location.split(" ") portals = results[0].split(",")[0].split(";") iqn = results[1] nr_portals = len(portals) try: lun = int(results[2]) except (IndexError, ValueError): if (self.configuration.volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and self.configuration.iscsi_helper in ('tgtadm', 'iseradm')): lun = 1 else: lun = 0 if nr_portals > 1: properties['target_portals'] = portals properties['target_iqns'] = [iqn] * nr_portals properties['target_luns'] = [lun] * nr_portals properties['target_portal'] = portals[0] properties['target_iqn'] = iqn properties['target_lun'] = lun properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret geometry = volume.get('provider_geometry', None) if geometry: (physical_block_size, logical_block_size) = geometry.split() properties['physical_block_size'] = physical_block_size properties['logical_block_size'] = logical_block_size encryption_key_id = volume.get('encryption_key_id', None) properties['encrypted'] = encryption_key_id is not None return properties def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", {'command': iscsi_command, 'out': out, 'err': err}) return (out, err) def _run_iscsiadm_bare(self, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('iscsiadm', *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", {'command': iscsi_command, 'out': out, 'err': err}) return (out, err) def _iscsiadm_update(self, iscsi_properties, property_key, property_value, **kwargs): iscsi_command = ('--op', 'update', '-n', property_key, '-v', property_value) return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'discard': False, } } If the backend driver supports multiple connections for multipath and for single path with failover, "target_portals", "target_iqns", "target_luns" are also populated:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume1', 'target_iqns': ['iqn.2010-10.org.openstack:volume1', 'iqn.2010-10.org.openstack:volume1-2'], 'target_portal': '10.0.0.1:3260', 'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'] 'target_lun': 1, 'target_luns': [1, 1], 'volume_id': 1, 'discard': False, } } """ # NOTE(jdg): Yes, this is duplicated in the volume/target # drivers, for now leaving it as there are 3'rd party # drivers that don't use target drivers, but inherit from # this base class and use this init data iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': self.configuration.safe_get('iscsi_protocol'), 'data': iscsi_properties } def validate_connector(self, connector): # iSCSI drivers require the initiator information required = 'initiator' if required not in connector: LOG.error(_LE('The volume driver requires %(data)s ' 'in the connector.'), {'data': required}) raise exception.InvalidConnectorException(missing=required) def terminate_connection(self, volume, connector, **kwargs): pass def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_iSCSI' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = 'iSCSI' data["pools"] = [] data["replication_enabled"] = False self._update_pools_and_stats(data) class FakeISCSIDriver(ISCSIDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) def _update_pools_and_stats(self, data): fake_pool = {} fake_pool.update(dict( pool_name=data["volume_backend_name"], total_capacity_gb='infinite', free_capacity_gb='infinite', provisioned_capacity_gb=0, reserved_percentage=100, QoS_support=False, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function() )) data["pools"].append(fake_pool) self._stats = data def create_volume(self, volume): pass def check_for_setup_error(self): """No setup necessary in fake mode.""" pass def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'iscsi', 'discard': False, } def initialize_connection_snapshot(self, snapshot, connector): return { 'driver_volume_type': 'iscsi', } def terminate_connection(self, volume, connector, **kwargs): pass def terminate_connection_snapshot(self, snapshot, connector, **kwargs): pass @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" LOG.debug("FAKE ISCSI: %s", cmd) return (None, None) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" pass def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" pass def delete_volume(self, volume): """Deletes a volume.""" pass def create_snapshot(self, snapshot): """Creates a snapshot.""" pass def delete_snapshot(self, snapshot): """Deletes a snapshot.""" pass def local_path(self, volume): return '/tmp/volume-%s' % volume.id def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ pass def create_export_snapshot(self, context, snapshot, connector): """Exports the snapshot. Can optionally return a Dictionary of changes to the snapshot object to be persisted. """ pass def remove_export(self, context, volume): """Removes an export for a volume.""" pass def remove_export_snapshot(self, context, snapshot): """Removes an export for a snapshot.""" pass class ISERDriver(ISCSIDriver): """Executes commands relating to ISER volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSER target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): super(ISERDriver, self).__init__(*args, **kwargs) # for backward compatibility self.configuration.num_volume_device_scan_tries = \ self.configuration.num_iser_scan_tries self.configuration.iscsi_target_prefix = \ self.configuration.iser_target_prefix self.configuration.iscsi_ip_address = \ self.configuration.iser_ip_address self.configuration.iscsi_port = self.configuration.iser_port def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iser driver returns a driver_volume_type of 'iser'. The format of the driver data is defined in _get_iser_properties. Example return value:: { 'driver_volume_type': 'iser' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.iser.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, } } """ iser_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iser', 'data': iser_properties } def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_iSER' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = 'iSER' data["pools"] = [] self._update_pools_and_stats(data) class FakeISERDriver(FakeISCSIDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeISERDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'iser', 'data': {} } @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" LOG.debug("FAKE ISER: %s", cmd) return (None, None) class FibreChannelDriver(VolumeDriver): """Executes commands relating to Fibre Channel volumes.""" def __init__(self, *args, **kwargs): super(FibreChannelDriver, self).__init__(*args, **kwargs) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'discard': False, } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'discard': False, } } """ msg = _("Driver must implement initialize_connection") raise NotImplementedError(msg) def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by driver. Do a check on the connector and ensure that it has wwnns, wwpns. """ self.validate_connector_has_setting(connector, 'wwpns') self.validate_connector_has_setting(connector, 'wwnns') @staticmethod def validate_connector_has_setting(connector, setting): """Test for non-empty setting in connector.""" if setting not in connector or not connector[setting]: LOG.error(_LE( "FibreChannelDriver validate_connector failed. " "No '%(setting)s'. Make sure HBA state is Online."), {'setting': setting}) raise exception.InvalidConnectorException(missing=setting) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_FC' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = 'FC' data["pools"] = [] self._update_pools_and_stats(data) cinder-8.0.0/cinder/volume/rpcapi.py0000664000567000056710000004415312701406250020533 0ustar jenkinsjenkins00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume RPC API. """ from oslo_config import cfg from oslo_serialization import jsonutils from cinder import exception from cinder.i18n import _ from cinder import quota from cinder import rpc from cinder.volume import utils CONF = cfg.CONF QUOTAS = quota.QUOTAS class VolumeAPI(rpc.RPCAPI): """Client side of the volume rpc API. API version history: 1.0 - Initial version. 1.1 - Adds clone volume option to create_volume. 1.2 - Add publish_service_capabilities() method. 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. 1.4 - Add request_spec, filter_properties and allow_reschedule arguments to create_volume(). 1.5 - Add accept_transfer. 1.6 - Add extend_volume. 1.7 - Adds host_name parameter to attach_volume() to allow attaching to host rather than instance. 1.8 - Add migrate_volume, rename_volume. 1.9 - Add new_user and new_project to accept_transfer. 1.10 - Add migrate_volume_completion, remove rename_volume. 1.11 - Adds mode parameter to attach_volume() to support volume read-only attaching. 1.12 - Adds retype. 1.13 - Adds create_export. 1.14 - Adds reservation parameter to extend_volume(). 1.15 - Adds manage_existing and unmanage_only flag to delete_volume. 1.16 - Removes create_export. 1.17 - Add replica option to create_volume, promote_replica and sync_replica. 1.18 - Adds create_consistencygroup, delete_consistencygroup, create_cgsnapshot, and delete_cgsnapshot. Also adds the consistencygroup_id parameter in create_volume. 1.19 - Adds update_migrated_volume 1.20 - Adds support for sending objects over RPC in create_snapshot() and delete_snapshot() 1.21 - Adds update_consistencygroup. 1.22 - Adds create_consistencygroup_from_src. 1.23 - Adds attachment_id to detach_volume. 1.24 - Removed duplicated parameters: snapshot_id, image_id, source_volid, source_replicaid, consistencygroup_id and cgsnapshot_id from create_volume. All off them are already passed either in request_spec or available in the DB. 1.25 - Add source_cg to create_consistencygroup_from_src. 1.26 - Adds support for sending objects over RPC in create_consistencygroup(), create_consistencygroup_from_src(), update_consistencygroup() and delete_consistencygroup(). 1.27 - Adds support for replication V2 1.28 - Adds manage_existing_snapshot 1.29 - Adds get_capabilities. 1.30 - Adds remove_export 1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot() and delete_cgsnapshot() to cast method only with necessary args. Forwarding CGSnapshot object instead of CGSnapshot_id. 1.32 - Adds support for sending objects over RPC in create_volume(). 1.33 - Adds support for sending objects over RPC in delete_volume(). 1.34 - Adds support for sending objects over RPC in retype(). 1.35 - Adds support for sending objects over RPC in extend_volume(). 1.36 - Adds support for sending objects over RPC in migrate_volume(), migrate_volume_completion(), and update_migrated_volume(). 1.37 - Adds old_reservations parameter to retype to support quota checks in the API. 1.38 - Scaling backup service, add get_backup_device() and secure_file_operations_enabled() 1.39 - Update replication methods to reflect new backend rep strategy 1.40 - Add cascade option to delete_volume(). ... Mitaka supports messaging version 1.40. Any changes to existing methods in 1.x after that point should be done so that they can handle the version_cap being set to 1.40. 2.0 - Remove 1.x compatibility """ RPC_API_VERSION = '1.40' TOPIC = CONF.volume_topic BINARY = 'cinder-volume' def _compat_ver(self, current, legacy): if self.client.can_send_version(current): return current else: return legacy def _get_cctxt(self, host, version): new_host = utils.get_volume_rpc_host(host) return self.client.prepare(server=new_host, version=version) def create_consistencygroup(self, ctxt, group, host): version = self._compat_ver('2.0', '1.26') cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'create_consistencygroup', group=group) def delete_consistencygroup(self, ctxt, group): version = self._compat_ver('2.0', '1.26') cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'delete_consistencygroup', group=group) def update_consistencygroup(self, ctxt, group, add_volumes=None, remove_volumes=None): version = self._compat_ver('2.0', '1.26') cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'update_consistencygroup', group=group, add_volumes=add_volumes, remove_volumes=remove_volumes) def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None, source_cg=None): version = self._compat_ver('2.0', '1.31') cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'create_consistencygroup_from_src', group=group, cgsnapshot=cgsnapshot, source_cg=source_cg) def create_cgsnapshot(self, ctxt, cgsnapshot): version = self._compat_ver('2.0', '1.31') cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, version) cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot) def delete_cgsnapshot(self, ctxt, cgsnapshot): version = self._compat_ver('2.0', '1.31') cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, version) cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot=cgsnapshot) def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'allow_reschedule': allow_reschedule} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.32'): version = '1.32' msg_args['volume'] = volume else: version = '1.24' cctxt = self._get_cctxt(host, version) request_spec_p = jsonutils.to_primitive(request_spec) cctxt.cast(ctxt, 'create_volume', **msg_args) def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): msg_args = {'volume_id': volume.id, 'unmanage_only': unmanage_only} version = '1.15' if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume if cascade: msg_args['cascade'] = cascade elif self.client.can_send_version('1.40'): version = '1.40' msg_args['volume'] = volume if cascade: msg_args['cascade'] = cascade elif cascade: msg = _('Cascade option is not supported.') raise exception.Invalid(reason=msg) elif self.client.can_send_version('1.33'): version = '1.33' msg_args['volume'] = volume cctxt = self._get_cctxt(volume.host, version) cctxt.cast(ctxt, 'delete_volume', **msg_args) def create_snapshot(self, ctxt, volume, snapshot): version = self._compat_ver('2.0', '1.20') cctxt = self._get_cctxt(volume['host'], version=version) cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'], snapshot=snapshot) def delete_snapshot(self, ctxt, snapshot, host, unmanage_only=False): version = self._compat_ver('2.0', '1.20') cctxt = self._get_cctxt(host, version=version) cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot, unmanage_only=unmanage_only) def attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): version = self._compat_ver('2.0', '1.11') cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'attach_volume', volume_id=volume['id'], instance_uuid=instance_uuid, host_name=host_name, mountpoint=mountpoint, mode=mode) def detach_volume(self, ctxt, volume, attachment_id): version = self._compat_ver('2.0', '1.20') cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'], attachment_id=attachment_id) def copy_volume_to_image(self, ctxt, volume, image_meta): version = self._compat_ver('2.0', '1.3') cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta) def initialize_connection(self, ctxt, volume, connector): version = self._compat_ver('2.0', '1.0') cctxt = self._get_cctxt(volume['host'], version=version) return cctxt.call(ctxt, 'initialize_connection', volume_id=volume['id'], connector=connector) def terminate_connection(self, ctxt, volume, connector, force=False): version = self._compat_ver('2.0', '1.0') cctxt = self._get_cctxt(volume['host'], version=version) return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'], connector=connector, force=force) def remove_export(self, ctxt, volume): version = self._compat_ver('2.0', '1.30') cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'remove_export', volume_id=volume['id']) def publish_service_capabilities(self, ctxt): version = self._compat_ver('2.0', '1.2') cctxt = self.client.prepare(fanout=True, version=version) cctxt.cast(ctxt, 'publish_service_capabilities') def accept_transfer(self, ctxt, volume, new_user, new_project): version = self._compat_ver('2.0', '1.9') cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'], new_user=new_user, new_project=new_project) def extend_volume(self, ctxt, volume, new_size, reservations): msg_args = {'volume_id': volume.id, 'new_size': new_size, 'reservations': reservations} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.35'): version = '1.35' msg_args['volume'] = volume else: version = '1.14' cctxt = self._get_cctxt(volume.host, version) cctxt.cast(ctxt, 'extend_volume', **msg_args) def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} msg_args = {'volume_id': volume.id, 'host': host_p, 'force_host_copy': force_host_copy} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.36'): version = '1.36' msg_args['volume'] = volume else: version = '1.8' cctxt = self._get_cctxt(volume.host, version) cctxt.cast(ctxt, 'migrate_volume', **msg_args) def migrate_volume_completion(self, ctxt, volume, new_volume, error): msg_args = {'volume_id': volume.id, 'new_volume_id': new_volume.id, 'error': error} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume msg_args['new_volume'] = new_volume elif self.client.can_send_version('1.36'): version = '1.36' msg_args['volume'] = volume msg_args['new_volume'] = new_volume else: version = '1.10' cctxt = self._get_cctxt(volume.host, version) return cctxt.call(ctxt, 'migrate_volume_completion', **msg_args) def retype(self, ctxt, volume, new_type_id, dest_host, migration_policy='never', reservations=None, old_reservations=None): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} msg_args = {'volume_id': volume.id, 'new_type_id': new_type_id, 'host': host_p, 'migration_policy': migration_policy, 'reservations': reservations} if self.client.can_send_version('2.0'): version = '2.0' msg_args.update(volume=volume, old_reservations=old_reservations) elif self.client.can_send_version('1.37'): version = '1.37' msg_args.update(volume=volume, old_reservations=old_reservations) elif self.client.can_send_version('1.34'): if old_reservations is not None: QUOTAS.rollback(ctxt, old_reservations) version = '1.34' msg_args['volume'] = volume else: if old_reservations is not None: QUOTAS.rollback(ctxt, old_reservations) version = '1.12' cctxt = self._get_cctxt(volume.host, version) cctxt.cast(ctxt, 'retype', **msg_args) def manage_existing(self, ctxt, volume, ref): version = self._compat_ver('2.0', '1.15') cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref) def promote_replica(self, ctxt, volume): version = self._compat_ver('2.0', '1.17') cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id']) def reenable_replication(self, ctxt, volume): version = self._compat_ver('2.0', '1.17') cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id']) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): version = self._compat_ver('2.0', '1.36') cctxt = self._get_cctxt(new_volume['host'], version) cctxt.call(ctxt, 'update_migrated_volume', volume=volume, new_volume=new_volume, volume_status=original_volume_status) def freeze_host(self, ctxt, host): """Set backend host to frozen.""" version = self._compat_ver('2.0', '1.39') cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'freeze_host') def thaw_host(self, ctxt, host): """Clear the frozen setting on a backend host.""" version = self._compat_ver('2.0', '1.39') cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'thaw_host') def failover_host(self, ctxt, host, secondary_backend_id=None): """Failover host to the specified backend_id (secondary). """ version = self._compat_ver('2.0', '1.39') cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'failover_host', secondary_backend_id=secondary_backend_id) def manage_existing_snapshot(self, ctxt, snapshot, ref, host): version = self._compat_ver('2.0', '1.28') cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'manage_existing_snapshot', snapshot=snapshot, ref=ref) def get_capabilities(self, ctxt, host, discover): version = self._compat_ver('2.0', '1.29') cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'get_capabilities', discover=discover) def get_backup_device(self, ctxt, backup, volume): if (not self.client.can_send_version('1.38') and not self.client.can_send_version('2.0')): msg = _('One of cinder-volume services is too old to accept such ' 'request. Are you running mixed Liberty-Mitaka ' 'cinder-volumes?') raise exception.ServiceTooOld(msg) version = self._compat_ver('2.0', '1.38') cctxt = self._get_cctxt(volume.host, version) return cctxt.call(ctxt, 'get_backup_device', backup=backup) def secure_file_operations_enabled(self, ctxt, volume): if (not self.client.can_send_version('1.38') and not self.client.can_send_version('2.0')): msg = _('One of cinder-volume services is too old to accept such ' 'request. Are you running mixed Liberty-Mitaka ' 'cinder-volumes?') raise exception.ServiceTooOld(msg) version = self._compat_ver('2.0', '1.38') cctxt = self._get_cctxt(volume.host, version) return cctxt.call(ctxt, 'secure_file_operations_enabled', volume=volume) cinder-8.0.0/cinder/volume/configuration.py0000664000567000056710000000510312701406250022114 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Configuration support for all drivers. This module allows support for setting configurations either from default or from a particular FLAGS group, to be able to set multiple configurations for a given set of values. For instance, two lvm configurations can be set by naming them in groups as [lvm1] volume_group=lvm-group-1 ... [lvm2] volume_group=lvm-group-2 ... And the configuration group name will be passed in so that all calls to configuration.volume_group within that instance will be mapped to the proper named group. This class also ensures the implementation's configuration is grafted into the option group. This is due to the way cfg works. All cfg options must be defined and registered in the group in which they are used. """ from oslo_config import cfg from oslo_log import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class Configuration(object): def __init__(self, volume_opts, config_group=None): """Initialize configuration. This takes care of grafting the implementation's config values into the config group """ self.config_group = config_group # set the local conf so that __call__'s know what to use if self.config_group: self._ensure_config_values(volume_opts) self.local_conf = CONF._get(self.config_group) else: self.local_conf = CONF def _ensure_config_values(self, volume_opts): CONF.register_opts(volume_opts, group=self.config_group) def append_config_values(self, volume_opts): self._ensure_config_values(volume_opts) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, value): # Don't use self.local_conf to avoid reentrant call to __getattr__() local_conf = object.__getattribute__(self, 'local_conf') return getattr(local_conf, value) cinder-8.0.0/cinder/volume/api.py0000664000567000056710000022707612701406257020044 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to volumes.""" import ast import collections import datetime import functools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from cinder.api import common from cinder import context from cinder import db from cinder.db import base from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import cache as image_cache from cinder.image import glance from cinder import keymgr from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields import cinder.policy from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils from cinder.volume.flows.api import create_volume from cinder.volume.flows.api import manage_existing from cinder.volume import qos_specs from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils from cinder.volume import volume_types allow_force_upload_opt = cfg.BoolOpt('enable_force_upload', default=False, help='Enables the Force option on ' 'upload_to_image. This enables ' 'running upload_volume on in-use ' 'volumes for backends that ' 'support it.') volume_host_opt = cfg.BoolOpt('snapshot_same_host', default=True, help='Create volume from snapshot at the host ' 'where snapshot resides') volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az', default=True, help='Ensure that the new volumes are the ' 'same AZ as snapshot or source volume') az_cache_time_opt = cfg.IntOpt('az_cache_duration', default=3600, help='Cache volume availability zones in ' 'memory for the provided duration in ' 'seconds') CONF = cfg.CONF CONF.register_opt(allow_force_upload_opt) CONF.register_opt(volume_host_opt) CONF.register_opt(volume_same_az_opt) CONF.register_opt(az_cache_time_opt) CONF.import_opt('glance_core_properties', 'cinder.image.glance') LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution This decorator requires the first 3 args of the wrapped function to be (self, context, volume) """ @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped def check_policy(context, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } if isinstance(target_obj, objects_base.CinderObject): # Turn object into dict so target.update can work target.update( target_obj.obj_to_primitive()['versioned_object.data'] or {}) else: target.update(target_obj or {}) _action = 'volume:%s' % action cinder.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the volume manager.""" AVAILABLE_MIGRATION_STATUS = (None, 'deleting', 'error', 'success') def __init__(self, db_driver=None, image_service=None): self.image_service = (image_service or glance.get_default_image_service()) self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zones = [] self.availability_zones_last_fetched = None self.key_manager = keymgr.API() super(API, self).__init__(db_driver) def list_availability_zones(self, enable_cache=False): """Describe the known availability zones :retval tuple of dicts, each with a 'name' and 'available' key """ refresh_cache = False if enable_cache: if self.availability_zones_last_fetched is None: refresh_cache = True else: cache_age = timeutils.delta_seconds( self.availability_zones_last_fetched, timeutils.utcnow()) if cache_age >= CONF.az_cache_duration: refresh_cache = True if refresh_cache or not enable_cache: topic = CONF.volume_topic ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic(ctxt, topic) az_data = [(s.availability_zone, s.disabled) for s in services] disabled_map = {} for (az_name, disabled) in az_data: tracked_disabled = disabled_map.get(az_name, True) disabled_map[az_name] = tracked_disabled and disabled azs = [{'name': name, 'available': not disabled} for (name, disabled) in disabled_map.items()] if refresh_cache: now = timeutils.utcnow() self.availability_zones = azs self.availability_zones_last_fetched = now LOG.debug("Availability zone cache updated, next update will" " occur around %s.", now + datetime.timedelta( seconds=CONF.az_cache_duration)) else: azs = self.availability_zones LOG.info(_LI("Availability Zones retrieved successfully.")) return tuple(azs) def _retype_is_possible(self, context, first_type_id, second_type_id, first_type=None, second_type=None): safe = False elevated = context.elevated() services = objects.ServiceList.get_all_by_topic(elevated, 'cinder-volume', disabled=True) if len(services.objects) == 1: safe = True else: type_a = first_type or volume_types.get_volume_type( elevated, first_type_id) type_b = second_type or volume_types.get_volume_type( elevated, second_type_id) if (volume_utils.matching_backend_name(type_a['extra_specs'], type_b['extra_specs'])): safe = True return safe def _is_volume_migrating(self, volume): # The migration status 'none' means no migration has ever been done # before. The migration status 'error' means the previous migration # failed. The migration status 'success' means the previous migration # succeeded. The migration status 'deleting' means the source volume # fails to delete after a migration. # All of the statuses above means the volume is not in the process # of a migration. return (volume['migration_status'] not in self.AVAILABLE_MIGRATION_STATUS) def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None, source_volume=None, scheduler_hints=None, source_replica=None, consistencygroup=None, cgsnapshot=None, multiattach=False, source_cg=None): check_policy(context, 'create') # NOTE(jdg): we can have a create without size if we're # doing a create from snap or volume. Currently # the taskflow api will handle this and pull in the # size from the source. # NOTE(jdg): cinderclient sends in a string representation # of the size value. BUT there is a possibility that somebody # could call the API directly so the is_int_like check # handles both cases (string representation of true float or int). if size and (not strutils.is_int_like(size) or int(size) <= 0): msg = _('Invalid volume size provided for create request: %s ' '(size argument must be an integer (or string ' 'representation of an integer) and greater ' 'than zero).') % size raise exception.InvalidInput(reason=msg) if consistencygroup and (not cgsnapshot and not source_cg): if not volume_type: msg = _("volume_type must be provided when creating " "a volume in a consistency group.") raise exception.InvalidInput(reason=msg) cg_voltypeids = consistencygroup.get('volume_type_id') if volume_type.get('id') not in cg_voltypeids: msg = _("Invalid volume_type provided: %s (requested " "type must be supported by this consistency " "group).") % volume_type raise exception.InvalidInput(reason=msg) if volume_type and 'extra_specs' not in volume_type: extra_specs = volume_types.get_volume_type_extra_specs( volume_type['id']) volume_type['extra_specs'] = extra_specs if source_volume and volume_type: if volume_type['id'] != source_volume['volume_type_id']: if not self._retype_is_possible( context, volume_type['id'], source_volume['volume_type_id'], volume_type): msg = _("Invalid volume_type provided: %s (requested type " "is not compatible; either match source volume, " "or omit type argument).") % volume_type['id'] raise exception.InvalidInput(reason=msg) # When cloning replica (for testing), volume type must be omitted if source_replica and volume_type: msg = _("No volume_type should be provided when creating test " "replica.") raise exception.InvalidInput(reason=msg) if snapshot and volume_type: if volume_type['id'] != snapshot.volume_type_id: if not self._retype_is_possible(context, volume_type['id'], snapshot.volume_type_id, volume_type): msg = _("Invalid volume_type provided: %s (requested " "type is not compatible; recommend omitting " "the type argument).") % volume_type['id'] raise exception.InvalidInput(reason=msg) # Determine the valid availability zones that the volume could be # created in (a task in the flow will/can use this information to # ensure that the availability zone requested is valid). raw_zones = self.list_availability_zones(enable_cache=True) availability_zones = set([az['name'] for az in raw_zones]) if CONF.storage_availability_zone: availability_zones.add(CONF.storage_availability_zone) create_what = { 'context': context, 'raw_size': size, 'name': name, 'description': description, 'snapshot': snapshot, 'image_id': image_id, 'raw_volume_type': volume_type, 'metadata': metadata or {}, 'raw_availability_zone': availability_zone, 'source_volume': source_volume, 'scheduler_hints': scheduler_hints, 'key_manager': self.key_manager, 'source_replica': source_replica, 'optional_args': {'is_quota_committed': False}, 'consistencygroup': consistencygroup, 'cgsnapshot': cgsnapshot, 'multiattach': multiattach, } try: sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and not source_cg) else None) volume_rpcapi = (self.volume_rpcapi if (not cgsnapshot and not source_cg) else None) flow_engine = create_volume.get_flow(self.db, self.image_service, availability_zones, create_what, sched_rpcapi, volume_rpcapi) except Exception: msg = _('Failed to create api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinders debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vref = flow_engine.storage.fetch('volume') LOG.info(_LI("Volume created successfully."), resource=vref) return vref @wrap_check_policy def delete(self, context, volume, force=False, unmanage_only=False, cascade=False): if context.is_admin and context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id if not volume.host: volume_utils.notify_about_volume_usage(context, volume, "delete.start") # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Failed to update quota while " "deleting volume.")) volume.destroy() if reservations: QUOTAS.commit(context, reservations, project_id=project_id) volume_utils.notify_about_volume_usage(context, volume, "delete.end") LOG.info(_LI("Delete volume request issued successfully."), resource={'type': 'volume', 'id': volume.id}) return # Build required conditions for conditional update expected = {'attach_status': db.Not('attached'), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'consistencygroup_id': None} # If not force deleting we have status conditions if not force: expected['status'] = ('available', 'error', 'error_restoring', 'error_extending') if cascade: # Allow deletion if all snapshots are in an expected state filters = [~db.volume_has_undeletable_snapshots_filter()] else: # Don't allow deletion of volume with snapshots filters = [~db.volume_has_snapshots_filter()] values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()} result = volume.conditional_update(values, expected, filters) if not result: status = utils.build_or_str(expected.get('status'), _('status must be %s and')) msg = _('Volume %s must not be migrating, attached, belong to a ' 'consistency group or have snapshots.') % status LOG.info(msg) raise exception.InvalidVolume(reason=msg) if cascade: values = {'status': 'deleting'} expected = {'status': ('available', 'error', 'deleting'), 'cgsnapshot_id': None} snapshots = objects.snapshot.SnapshotList.get_all_for_volume( context, volume.id) for s in snapshots: result = s.conditional_update(values, expected, filters) if not result: volume.update({'status': 'error_deleting'}) volume.save() msg = _('Failed to update snapshot.') raise exception.InvalidVolume(reason=msg) cache = image_cache.ImageVolumeCache(self.db, self) entry = cache.get_by_image_volume(context, volume.id) if entry: cache.evict(context, entry) # If the volume is encrypted, delete its encryption key from the key # manager. This operation makes volume deletion an irreversible process # because the volume cannot be decrypted without its key. encryption_key_id = volume.get('encryption_key_id', None) if encryption_key_id is not None: try: self.key_manager.delete_key(context, encryption_key_id) except Exception as e: LOG.warning(_LW("Unable to delete encryption key for " "volume: %s."), e.msg, resource=volume) self.volume_rpcapi.delete_volume(context, volume, unmanage_only, cascade) LOG.info(_LI("Delete volume request issued successfully."), resource=volume) @wrap_check_policy def update(self, context, volume, fields): if volume['status'] == 'maintenance': LOG.info(_LI("Unable to update volume, " "because it is in maintenance."), resource=volume) msg = _("The volume cannot be updated during maintenance.") raise exception.InvalidVolume(reason=msg) # NOTE(thangp): Update is called by various APIs, some of which are # not yet using oslo_versionedobjects. We need to handle the case # where volume is either a dict or an oslo_versionedobject. if isinstance(volume, objects_base.CinderObject): volume.update(fields) volume.save() LOG.info(_LI("Volume updated successfully."), resource=volume) else: vref = self.db.volume_update(context, volume['id'], fields) LOG.info(_LI("Volume updated successfully."), resource=vref) def get(self, context, volume_id, viewable_admin_meta=False): volume = objects.Volume.get_by_id(context, volume_id) if viewable_admin_meta: ctxt = context.elevated() admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume_id) volume.admin_metadata = admin_metadata volume.obj_reset_changes() try: check_policy(context, 'get', volume) except exception.PolicyNotAuthorized: # raise VolumeNotFound instead to make sure Cinder behaves # as it used to raise exception.VolumeNotFound(volume_id=volume_id) LOG.info(_LI("Volume info retrieved successfully."), resource=volume) return volume def get_all(self, context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): check_policy(context, 'get_all') if filters is None: filters = {} allTenants = utils.get_bool_param('all_tenants', filters) try: if limit is not None: limit = int(limit) if limit < 0: msg = _('limit param must be positive') raise exception.InvalidInput(reason=msg) except ValueError: msg = _('limit param must be an integer') raise exception.InvalidInput(reason=msg) # Non-admin shouldn't see temporary target of a volume migration, add # unique filter data to reflect that only volumes with a NULL # 'migration_status' or a 'migration_status' that does not start with # 'target:' should be returned (processed in db/sqlalchemy/api.py) if not context.is_admin: filters['no_migration_targets'] = True if filters: LOG.debug("Searching by: %s.", six.text_type(filters)) if context.is_admin and allTenants: # Need to remove all_tenants to pass the filtering below. del filters['all_tenants'] volumes = objects.VolumeList.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) else: if viewable_admin_meta: context = context.elevated() volumes = objects.VolumeList.get_all_by_project( context, context.project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) LOG.info(_LI("Get all volumes completed successfully.")) return volumes def get_snapshot(self, context, snapshot_id): snapshot = objects.Snapshot.get_by_id(context, snapshot_id) # FIXME(jdg): The objects don't have the db name entries # so build the resource tag manually for now. LOG.info(_LI("Snapshot retrieved successfully."), resource={'type': 'snapshot', 'id': snapshot.id}) return snapshot def get_volume(self, context, volume_id): check_policy(context, 'get_volume') volume = objects.Volume.get_by_id(context, volume_id) LOG.info(_LI("Volume retrieved successfully."), resource=volume) return volume def get_all_snapshots(self, context, search_opts=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): check_policy(context, 'get_all_snapshots') search_opts = search_opts or {} if context.is_admin and 'all_tenants' in search_opts: # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] snapshots = objects.SnapshotList.get_all( context, search_opts, marker, limit, sort_keys, sort_dirs, offset) else: snapshots = objects.SnapshotList.get_all_by_project( context, context.project_id, search_opts, marker, limit, sort_keys, sort_dirs, offset) LOG.info(_LI("Get all snapshots completed successfully.")) return snapshots @wrap_check_policy def reserve_volume(self, context, volume): expected = {'multiattach': volume.multiattach, 'status': (('available', 'in-use') if volume.multiattach else 'available')} result = volume.conditional_update({'status': 'attaching'}, expected) if not result: expected_status = utils.build_or_str(expected['status']) msg = _('Volume status must be %s to reserve.') % expected_status LOG.error(msg) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Reserve volume completed successfully."), resource=volume) @wrap_check_policy def unreserve_volume(self, context, volume): expected = {'status': 'attaching'} # Status change depends on whether it has attachments (in-use) or not # (available) value = {'status': db.Case([(db.volume_has_attachments_filter(), 'in-use')], else_='available')} volume.conditional_update(value, expected) LOG.info(_LI("Unreserve volume completed successfully."), resource=volume) @wrap_check_policy def begin_detaching(self, context, volume): # If we are in the middle of a volume migration, we don't want the # user to see that the volume is 'detaching'. Having # 'migration_status' set will have the same effect internally. expected = {'status': 'in-use', 'attach_status': 'attached', 'migration_status': self.AVAILABLE_MIGRATION_STATUS} result = volume.conditional_update({'status': 'detaching'}, expected) if not (result or self._is_volume_migrating(volume)): msg = _("Unable to detach volume. Volume status must be 'in-use' " "and attach_status must be 'attached' to detach.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Begin detaching volume completed successfully."), resource=volume) @wrap_check_policy def roll_detaching(self, context, volume): volume.conditional_update({'status': 'in-use'}, {'status': 'detaching'}) LOG.info(_LI("Roll detaching of volume completed successfully."), resource=volume) @wrap_check_policy def attach(self, context, volume, instance_uuid, host_name, mountpoint, mode): if volume['status'] == 'maintenance': LOG.info(_LI('Unable to attach volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The volume cannot be attached in maintenance mode.") raise exception.InvalidVolume(reason=msg) # We add readonly metadata if it doesn't already exist readonly = self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': 'False'}, update=False)['readonly'] if readonly == 'True' and mode != 'ro': raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume['id']) attach_results = self.volume_rpcapi.attach_volume(context, volume, instance_uuid, host_name, mountpoint, mode) LOG.info(_LI("Attach volume completed successfully."), resource=volume) return attach_results @wrap_check_policy def detach(self, context, volume, attachment_id): if volume['status'] == 'maintenance': LOG.info(_LI('Unable to detach volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The volume cannot be detached in maintenance mode.") raise exception.InvalidVolume(reason=msg) detach_results = self.volume_rpcapi.detach_volume(context, volume, attachment_id) LOG.info(_LI("Detach volume completed successfully."), resource=volume) return detach_results @wrap_check_policy def initialize_connection(self, context, volume, connector): if volume['status'] == 'maintenance': LOG.info(_LI('Unable to initialize the connection for ' 'volume, because it is in ' 'maintenance.'), resource=volume) msg = _("The volume connection cannot be initialized in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) init_results = self.volume_rpcapi.initialize_connection(context, volume, connector) LOG.info(_LI("Initialize volume connection completed successfully."), resource=volume) return init_results @wrap_check_policy def terminate_connection(self, context, volume, connector, force=False): self.volume_rpcapi.terminate_connection(context, volume, connector, force) LOG.info(_LI("Terminate volume connection completed successfully."), resource=volume) self.unreserve_volume(context, volume) @wrap_check_policy def accept_transfer(self, context, volume, new_user, new_project): if volume['status'] == 'maintenance': LOG.info(_LI('Unable to accept transfer for volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The volume cannot accept transfer in maintenance mode.") raise exception.InvalidVolume(reason=msg) results = self.volume_rpcapi.accept_transfer(context, volume, new_user, new_project) LOG.info(_LI("Transfer volume completed successfully."), resource=volume) return results def _create_snapshot(self, context, volume, name, description, force=False, metadata=None, cgsnapshot_id=None): snapshot = self.create_snapshot_in_db( context, volume, name, description, force, metadata, cgsnapshot_id) self.volume_rpcapi.create_snapshot(context, volume, snapshot) return snapshot def create_snapshot_in_db(self, context, volume, name, description, force, metadata, cgsnapshot_id): check_policy(context, 'create_snapshot', volume) if volume['status'] == 'maintenance': LOG.info(_LI('Unable to create the snapshot for volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The snapshot cannot be created when the volume is in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) if self._is_volume_migrating(volume): # Volume is migrating, wait until done msg = _("Snapshot cannot be created while volume is migrating.") raise exception.InvalidVolume(reason=msg) if volume['status'].startswith('replica_'): # Can't snapshot secondary replica msg = _("Snapshot of secondary replica is not allowed.") raise exception.InvalidVolume(reason=msg) if ((not force) and (volume['status'] != "available")): msg = _("Volume %(vol_id)s status must be available, " "but current status is: " "%(vol_status)s.") % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) elif 'snapshots' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.SnapshotLimitExceeded( allowed=quotas[over]) self._check_metadata_properties(metadata) snapshot = None try: kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id'], 'metadata': metadata or {} } snapshot = objects.Snapshot(context=context, **kwargs) snapshot.create() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: if snapshot.obj_attr_is_set('id'): snapshot.destroy() finally: QUOTAS.rollback(context, reservations) return snapshot def create_snapshots_in_db(self, context, volume_list, name, description, force, cgsnapshot_id): snapshot_list = [] for volume in volume_list: self._create_snapshot_in_db_validate(context, volume, force) reservations = self._create_snapshots_in_db_reserve( context, volume_list) options_list = [] for volume in volume_list: options = self._create_snapshot_in_db_options( context, volume, name, description, cgsnapshot_id) options_list.append(options) try: for options in options_list: snapshot = objects.Snapshot(context=context, **options) snapshot.create() snapshot_list.append(snapshot) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: for snap in snapshot_list: snap.destroy() finally: QUOTAS.rollback(context, reservations) return snapshot_list def _create_snapshot_in_db_validate(self, context, volume, force): check_policy(context, 'create_snapshot', volume) if volume['status'] == 'maintenance': LOG.info(_LI('Unable to create the snapshot for volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The snapshot cannot be created when the volume is in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) if self._is_volume_migrating(volume): # Volume is migrating, wait until done msg = _("Snapshot cannot be created while volume is migrating.") raise exception.InvalidVolume(reason=msg) if ((not force) and (volume['status'] != "available")): msg = _("Snapshot cannot be created because volume %(vol_id)s " "is not available, current volume status: " "%(vol_status)s.") % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) def _create_snapshots_in_db_reserve(self, context, volume_list): reserve_opts_list = [] total_reserve_opts = {} try: for volume in volume_list: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reserve_opts_list.append(reserve_opts) for reserve_opts in reserve_opts_list: for (key, value) in reserve_opts.items(): if key not in total_reserve_opts.keys(): total_reserve_opts[key] = value else: total_reserve_opts[key] = \ total_reserve_opts[key] + value reservations = QUOTAS.reserve(context, **total_reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] volume_utils.process_reserve_over_quota(context, overs, usages, quotas, volume['size']) return reservations def _create_snapshot_in_db_options(self, context, volume, name, description, cgsnapshot_id): options = {'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id']} return options def create_snapshot(self, context, volume, name, description, metadata=None, cgsnapshot_id=None): result = self._create_snapshot(context, volume, name, description, False, metadata, cgsnapshot_id) LOG.info(_LI("Snapshot create request issued successfully."), resource=result) return result def create_snapshot_force(self, context, volume, name, description, metadata=None): result = self._create_snapshot(context, volume, name, description, True, metadata) LOG.info(_LI("Snapshot force create request issued successfully."), resource=result) return result @wrap_check_policy def delete_snapshot(self, context, snapshot, force=False, unmanage_only=False): # Build required conditions for conditional update expected = {'cgsnapshot_id': None} # If not force deleting we have status conditions if not force: expected['status'] = ('available', 'error') result = snapshot.conditional_update({'status': 'deleting'}, expected) if not result: status = utils.build_or_str(expected.get('status'), _('status must be %s and')) msg = (_('Snapshot %s must not be part of a consistency group.') % status) LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) # Make RPC call to the right host volume = objects.Volume.get_by_id(context, snapshot.volume_id) self.volume_rpcapi.delete_snapshot(context, snapshot, volume.host, unmanage_only=unmanage_only) LOG.info(_LI("Snapshot delete request issued successfully."), resource=snapshot) @wrap_check_policy def update_snapshot(self, context, snapshot, fields): snapshot.update(fields) snapshot.save() @wrap_check_policy def get_volume_metadata(self, context, volume): """Get all metadata associated with a volume.""" rv = self.db.volume_metadata_get(context, volume['id']) LOG.info(_LI("Get volume metadata completed successfully."), resource=volume) return dict(rv) @wrap_check_policy def delete_volume_metadata(self, context, volume, key, meta_type=common.METADATA_TYPES.user): """Delete the given metadata item from a volume.""" if volume['status'] == 'maintenance': LOG.info(_LI('Unable to delete the volume metadata, ' 'because it is in maintenance.'), resource=volume) msg = _("The volume metadata cannot be deleted when the volume " "is in maintenance mode.") raise exception.InvalidVolume(reason=msg) self.db.volume_metadata_delete(context, volume['id'], key, meta_type) LOG.info(_LI("Delete volume metadata completed successfully."), resource=volume) def _check_metadata_properties(self, metadata=None): if not metadata: metadata = {} for k, v in metadata.items(): if len(k) == 0: msg = _("Metadata property key blank.") LOG.warning(msg) raise exception.InvalidVolumeMetadata(reason=msg) if len(k) > 255: msg = _("Metadata property key greater than 255 characters.") LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property value greater than 255 characters.") LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) @wrap_check_policy def update_volume_metadata(self, context, volume, metadata, delete=False, meta_type=common.METADATA_TYPES.user): """Updates or creates volume metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if volume['status'] == 'maintenance': LOG.info(_LI('Unable to update the metadata for volume, ' 'because it is in maintenance.'), resource=volume) msg = _("The volume metadata cannot be updated when the volume " "is in maintenance mode.") raise exception.InvalidVolume(reason=msg) self._check_metadata_properties(metadata) db_meta = self.db.volume_metadata_update(context, volume['id'], metadata, delete, meta_type) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update volume metadata completed successfully."), resource=volume) return db_meta @wrap_check_policy def get_volume_admin_metadata(self, context, volume): """Get all administration metadata associated with a volume.""" rv = self.db.volume_admin_metadata_get(context, volume['id']) LOG.info(_LI("Get volume admin metadata completed successfully."), resource=volume) return dict(rv) @wrap_check_policy def update_volume_admin_metadata(self, context, volume, metadata, delete=False, add=True, update=True): """Updates or creates volume administration metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ self._check_metadata_properties(metadata) db_meta = self.db.volume_admin_metadata_update(context, volume['id'], metadata, delete, add, update) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update volume admin metadata completed successfully."), resource=volume) return db_meta def get_snapshot_metadata(self, context, snapshot): """Get all metadata associated with a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot.id) LOG.info(_LI("Get snapshot metadata completed successfully."), resource=snapshot) return snapshot_obj.metadata def delete_snapshot_metadata(self, context, snapshot, key): """Delete the given metadata item from a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot.id) snapshot_obj.delete_metadata_key(context, key) LOG.info(_LI("Delete snapshot metadata completed successfully."), resource=snapshot) def update_snapshot_metadata(self, context, snapshot, metadata, delete=False): """Updates or creates snapshot metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: orig_meta = snapshot.metadata _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(_metadata) snapshot.metadata = _metadata snapshot.save() # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update snapshot metadata completed successfully."), resource=snapshot) return snapshot.metadata def get_snapshot_metadata_value(self, snapshot, key): LOG.info(_LI("Get snapshot metadata value not implemented."), resource=snapshot) # FIXME(jdg): Huh? Pass? pass def get_volumes_image_metadata(self, context): check_policy(context, 'get_volumes_image_metadata') db_data = self.db.volume_glance_metadata_get_all(context) results = collections.defaultdict(dict) for meta_entry in db_data: results[meta_entry['volume_id']].update({meta_entry['key']: meta_entry['value']}) return results @wrap_check_policy def get_volume_image_metadata(self, context, volume): db_data = self.db.volume_glance_metadata_get(context, volume['id']) LOG.info(_LI("Get volume image-metadata completed successfully."), resource=volume) return {meta_entry.key: meta_entry.value for meta_entry in db_data} def get_list_volumes_image_metadata(self, context, volume_id_list): db_data = self.db.volume_glance_metadata_list_get(context, volume_id_list) results = collections.defaultdict(dict) for meta_entry in db_data: results[meta_entry['volume_id']].update({meta_entry['key']: meta_entry['value']}) return results def _check_volume_availability(self, volume, force): """Check if the volume can be used.""" if volume['status'] not in ['available', 'in-use']: msg = _('Volume %(vol_id)s status must be ' 'available or in-use, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) if not force and 'in-use' == volume['status']: msg = _('Volume status is in-use.') raise exception.InvalidVolume(reason=msg) @wrap_check_policy def copy_volume_to_image(self, context, volume, metadata, force): """Create a new image from the specified volume.""" if not CONF.enable_force_upload and force: LOG.info(_LI("Force upload to image is disabled, " "Force option will be ignored."), resource={'type': 'volume', 'id': volume['id']}) force = False self._check_volume_availability(volume, force) glance_core_properties = CONF.glance_core_properties if glance_core_properties: try: volume_image_metadata = self.get_volume_image_metadata(context, volume) custom_property_set = (set(volume_image_metadata).difference (set(glance_core_properties))) if custom_property_set: properties = {custom_property: volume_image_metadata[custom_property] for custom_property in custom_property_set} metadata.update(dict(properties=properties)) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass recv_metadata = self.image_service.create( context, self.image_service._translate_to_glance(metadata)) self.update(context, volume, {'status': 'uploading'}) self.volume_rpcapi.copy_volume_to_image(context, volume, recv_metadata) response = {"id": volume['id'], "updated_at": volume['updated_at'], "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": recv_metadata['id'], "container_format": recv_metadata['container_format'], "disk_format": recv_metadata['disk_format'], "image_name": recv_metadata.get('name', None)} LOG.info(_LI("Copy volume to image completed successfully."), resource=volume) return response @wrap_check_policy def extend(self, context, volume, new_size): if volume.status != 'available': msg = _('Volume %(vol_id)s status must be available ' 'to extend, but current status is: ' '%(vol_status)s.') % {'vol_id': volume.id, 'vol_status': volume.status} raise exception.InvalidVolume(reason=msg) size_increase = (int(new_size)) - volume.size if size_increase <= 0: msg = (_("New size for extend must be greater " "than current size. (current: %(size)s, " "extended: %(new_size)s).") % {'new_size': new_size, 'size': volume.size}) raise exception.InvalidInput(reason=msg) try: reserve_opts = {'gigabytes': size_increase} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=volume.project_id, **reserve_opts) except exception.OverQuota as exc: usages = exc.kwargs['usages'] quotas = exc.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " "already consumed).") LOG.error(msg, {'s_pid': context.project_id, 's_size': size_increase, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.VolumeSizeExceedsAvailableQuota( requested=size_increase, consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) self.update(context, volume, {'status': 'extending'}) self.volume_rpcapi.extend_volume(context, volume, new_size, reservations) LOG.info(_LI("Extend volume request issued successfully."), resource=volume) @wrap_check_policy def migrate_volume(self, context, volume, host, force_host_copy, lock_volume): """Migrate the volume to the specified host.""" if volume.status not in ['available', 'in-use']: msg = _('Volume %(vol_id)s status must be available or in-use, ' 'but current status is: ' '%(vol_status)s.') % {'vol_id': volume.id, 'vol_status': volume.status} LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure volume is not part of a migration. if self._is_volume_migrating(volume): msg = _("Volume %s is already part of an active " "migration.") % volume.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) # We only handle volumes without snapshots for now snaps = objects.SnapshotList.get_all_for_volume(context, volume.id) if snaps: msg = _("Volume %s must not have snapshots.") % volume.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) # We only handle non-replicated volumes for now if (volume.replication_status is not None and volume.replication_status != 'disabled'): msg = _("Volume %s must not be replicated.") % volume.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume.consistencygroup_id: msg = _("Volume %s must not be part of a consistency " "group.") % volume.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure the host is in the list of available hosts elevated = context.elevated() topic = CONF.volume_topic services = objects.ServiceList.get_all_by_topic( elevated, topic, disabled=False) found = False for service in services: svc_host = volume_utils.extract_host(host, 'backend') if utils.service_is_up(service) and service.host == svc_host: found = True if not found: msg = _('No available service named %s') % host LOG.error(msg) raise exception.InvalidHost(reason=msg) # Make sure the destination host is different than the current one if host == volume.host: msg = _('Destination host must be different ' 'than the current host.') LOG.error(msg) raise exception.InvalidHost(reason=msg) # When the migration of an available volume starts, both the status # and the migration status of the volume will be changed. # If the admin sets lock_volume flag to True, the volume # status is changed to 'maintenance', telling users # that this volume is in maintenance mode, and no action is allowed # on this volume, e.g. attach, detach, retype, migrate, etc. updates = {'migration_status': 'starting', 'previous_status': volume.status} if lock_volume and volume.status == 'available': updates['status'] = 'maintenance' self.update(context, volume, updates) # Call the scheduler to ensure that the host exists and that it can # accept the volume volume_type = {} if volume.volume_type_id: volume_type = volume_types.get_volume_type(context.elevated(), volume.volume_type_id) request_spec = {'volume_properties': volume, 'volume_type': volume_type, 'volume_id': volume.id} self.scheduler_rpcapi.migrate_volume_to_host(context, CONF.volume_topic, volume.id, host, force_host_copy, request_spec, volume=volume) LOG.info(_LI("Migrate volume request issued successfully."), resource=volume) @wrap_check_policy def migrate_volume_completion(self, context, volume, new_volume, error): # This is a volume swap initiated by Nova, not Cinder. Nova expects # us to return the new_volume_id. if not (volume.migration_status or new_volume.migration_status): # When we're not migrating and haven't hit any errors, we issue # volume attach and detach requests so the volumes don't end in # 'attaching' and 'detaching' state if not error: attachments = volume.volume_attachment for attachment in attachments: self.detach(context, volume, attachment.id) self.attach(context, new_volume, attachment.instance_uuid, attachment.attached_host, attachment.mountpoint, 'rw') return new_volume.id if not volume.migration_status: msg = _('Source volume not mid-migration.') raise exception.InvalidVolume(reason=msg) if not new_volume.migration_status: msg = _('Destination volume not mid-migration.') raise exception.InvalidVolume(reason=msg) expected_status = 'target:%s' % volume.id if not new_volume.migration_status == expected_status: msg = (_('Destination has migration_status %(stat)s, expected ' '%(exp)s.') % {'stat': new_volume.migration_status, 'exp': expected_status}) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Migrate volume completion issued successfully."), resource=volume) return self.volume_rpcapi.migrate_volume_completion(context, volume, new_volume, error) @wrap_check_policy def update_readonly_flag(self, context, volume, flag): if volume['status'] != 'available': msg = _('Volume %(vol_id)s status must be available ' 'to update readonly flag, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': six.text_type(flag)}) LOG.info(_LI("Update readonly setting on volume " "completed successfully."), resource=volume) @wrap_check_policy def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" if volume.status not in ['available', 'in-use']: msg = _('Unable to update type due to incorrect status: ' '%(vol_status)s on volume: %(vol_id)s. Volume status ' 'must be available or ' 'in-use.') % {'vol_status': volume.status, 'vol_id': volume.id} LOG.error(msg) raise exception.InvalidVolume(reason=msg) if self._is_volume_migrating(volume): msg = (_("Volume %s is already part of an active migration.") % volume.id) LOG.error(msg) raise exception.InvalidVolume(reason=msg) if migration_policy and migration_policy not in ['on-demand', 'never']: msg = _('migration_policy must be \'on-demand\' or \'never\', ' 'passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume.consistencygroup_id: msg = _("Volume must not be part of a consistency group.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): vol_type = volume_types.get_volume_type(context.elevated(), new_type) else: vol_type = volume_types.get_volume_type_by_name( context.elevated(), new_type) except exception.InvalidVolumeType: msg = _('Invalid volume_type passed: %s.') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) vol_type_id = vol_type['id'] vol_type_qos_id = vol_type['qos_specs_id'] old_vol_type = None old_vol_type_id = volume.volume_type_id old_vol_type_qos_id = None # Error if the original and new type are the same if volume.volume_type_id == vol_type_id: msg = _('New volume_type same as original: %s.') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume.volume_type_id: old_vol_type = volume_types.get_volume_type( context, old_vol_type_id) old_vol_type_qos_id = old_vol_type['qos_specs_id'] # We don't support changing encryption requirements yet old_enc = volume_types.get_volume_type_encryption(context, old_vol_type_id) new_enc = volume_types.get_volume_type_encryption(context, vol_type_id) if old_enc != new_enc: msg = _('Retype cannot change encryption requirements.') raise exception.InvalidInput(reason=msg) # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). if (volume.status != 'available' and old_vol_type_qos_id != vol_type_qos_id): for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: if qos_id: specs = qos_specs.get_qos_specs(context.elevated(), qos_id) if specs['consumer'] != 'back-end': msg = _('Retype cannot change front-end qos specs for ' 'in-use volume: %s.') % volume.id raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation( context, volume, vol_type_id, reserve_vol_type_only=True) # Get old reservations try: reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, old_vol_type_id) # NOTE(wanghao): We don't need to reserve volumes and gigabytes # quota for retyping operation since they didn't changed, just # reserve volume_type and type gigabytes is fine. reserve_opts.pop('volumes') reserve_opts.pop('gigabytes') old_reservations = QUOTAS.reserve(context, project_id=volume.project_id, **reserve_opts) except Exception: volume.status = volume.previous_status volume.save() msg = _("Failed to update quota usage while retyping volume.") LOG.exception(msg, resource=volume) raise exception.CinderException(msg) self.update(context, volume, {'status': 'retyping', 'previous_status': volume.status}) request_spec = {'volume_properties': volume, 'volume_id': volume.id, 'volume_type': vol_type, 'migration_policy': migration_policy, 'quota_reservations': reservations, 'old_reservations': old_reservations} self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) LOG.info(_LI("Retype volume request issued successfully."), resource=volume) def manage_existing(self, context, host, ref, name=None, description=None, volume_type=None, metadata=None, availability_zone=None, bootable=False): if volume_type and 'extra_specs' not in volume_type: extra_specs = volume_types.get_volume_type_extra_specs( volume_type['id']) volume_type['extra_specs'] = extra_specs if availability_zone is None: elevated = context.elevated() try: svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( elevated, svc_host, 'cinder-volume') except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service: %(service)s for ' 'given host: %(host)s.'), {'service': CONF.volume_topic, 'host': host}) if service.disabled: LOG.error(_LE('Unable to manage_existing volume on a disabled ' 'service.')) raise exception.ServiceUnavailable() availability_zone = service.get('availability_zone') manage_what = { 'context': context, 'name': name, 'description': description, 'host': host, 'ref': ref, 'volume_type': volume_type, 'metadata': metadata, 'availability_zone': availability_zone, 'bootable': bootable, } try: flow_engine = manage_existing.get_flow(self.scheduler_rpcapi, self.db, manage_what) except Exception: msg = _('Failed to manage api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinder's debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vol_ref = flow_engine.storage.fetch('volume') LOG.info(_LI("Manage volume request issued successfully."), resource=vol_ref) return vol_ref def manage_existing_snapshot(self, context, ref, volume, name=None, description=None, metadata=None): host = volume_utils.extract_host(volume['host']) try: # NOTE(jdg): We don't use this, we just make sure it's valid # and exists before sending off the call service = objects.Service.get_by_args( context.elevated(), host, 'cinder-volume') except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service: %(service)s for ' 'given host: %(host)s.'), {'service': CONF.volume_topic, 'host': host}) if service.disabled: LOG.error(_LE('Unable to manage_existing snapshot on a disabled ' 'service.')) raise exception.ServiceUnavailable() snapshot_object = self.create_snapshot_in_db(context, volume, name, description, False, metadata, None) self.volume_rpcapi.manage_existing_snapshot(context, snapshot_object, ref, host) return snapshot_object # FIXME(jdg): Move these Cheesecake methods (freeze, thaw and failover) # to a services API because that's what they are def failover_host(self, ctxt, host, secondary_id=None): ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( ctxt, svc_host, 'cinder-volume') expected = {'replication_status': [fields.ReplicationStatus.ENABLED, fields.ReplicationStatus.FAILED_OVER]} result = service.conditional_update( {'replication_status': fields.ReplicationStatus.FAILING_OVER}, expected) if not result: expected_status = utils.build_or_str( expected['replication_status']) msg = (_('Host replication_status must be %s to failover.') % expected_status) LOG.error(msg) raise exception.InvalidInput(reason=msg) self.volume_rpcapi.failover_host(ctxt, host, secondary_id) def freeze_host(self, ctxt, host): ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( ctxt, svc_host, 'cinder-volume') expected = {'frozen': False} result = service.conditional_update( {'frozen': True}, expected) if not result: msg = _('Host is already Frozen.') LOG.error(msg) raise exception.InvalidInput(reason=msg) # Should we set service status to disabled to keep # scheduler calls from being sent? Just use existing # `cinder service-disable reason=freeze` self.volume_rpcapi.freeze_host(ctxt, host) def thaw_host(self, ctxt, host): ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( ctxt, svc_host, 'cinder-volume') expected = {'frozen': True} result = service.conditional_update( {'frozen': False}, expected) if not result: msg = _('Host is NOT Frozen.') LOG.error(msg) raise exception.InvalidInput(reason=msg) if not self.volume_rpcapi.thaw_host(ctxt, host): return "Backend reported error during thaw_host operation." def check_volume_filters(self, filters): '''Sets the user filter value to accepted format''' booleans = self.db.get_booleans_for_table('volume') # To translate any true/false equivalent to True/False # which is only acceptable format in database queries. accepted_true = ['True', 'true', 'TRUE'] accepted_false = ['False', 'false', 'FALSE'] for k, v in filters.items(): try: if k in booleans: if v in accepted_false: filters[k] = False elif v in accepted_true: filters[k] = True else: filters[k] = bool(v) elif k == 'display_name': # Use the raw value of display name as is for the filter # without passing it through ast.literal_eval(). If the # display name is a properly quoted string (e.g. '"foo"') # then literal_eval() strips the quotes (i.e. 'foo'), so # the filter becomes different from the user input. continue else: filters[k] = ast.literal_eval(v) except (ValueError, SyntaxError): LOG.debug('Could not evaluate value %s, assuming string', v) class HostAPI(base.Base): def __init__(self): super(HostAPI, self).__init__() """Sub-set of the Volume Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new volumes.""" raise NotImplementedError() def get_host_uptime(self, context, host): """Returns the result of calling "uptime" on the target host.""" raise NotImplementedError() def host_power_action(self, context, host, action): raise NotImplementedError() def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers volume evacuation. """ raise NotImplementedError() cinder-8.0.0/cinder/utils.py0000664000567000056710000011172712701406257017117 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import abc import contextlib import datetime import functools import inspect import logging as py_logging import math import os import pyclbr import random import re import shutil import socket import stat import sys import tempfile import time import types from xml.dom import minidom from xml.parsers import expat from xml import sax from xml.sax import expatreader from os_brick.initiator import connector from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils import retrying import six import webob.exc from cinder import exception from cinder.i18n import _, _LE, _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" VALID_TRACE_FLAGS = {'method', 'api'} TRACE_METHOD = False TRACE_API = False synchronized = lockutils.synchronized_with_prefix('cinder-') def find_config(config_path): """Find a configuration file using the given hint. :param config_path: Full or relative path to the config. :returns: Full path of the config, if it exists. :raises: `cinder.exception.ConfigNotFound` """ possible_locations = [ config_path, os.path.join(CONF.state_path, "etc", "cinder", config_path), os.path.join(CONF.state_path, "etc", config_path), os.path.join(CONF.state_path, config_path), "/etc/cinder/%s" % config_path, ] for path in possible_locations: if os.path.exists(path): return os.path.abspath(path) raise exception.ConfigNotFound(path=os.path.abspath(config_path)) def as_int(obj, quiet=True): # Try "2" -> 2 try: return int(obj) except (ValueError, TypeError): pass # Try "2.5" -> 2 try: return int(float(obj)) except (ValueError, TypeError): pass # Eck, not sure what this is then. if not quiet: raise TypeError(_("Can not translate %s to integer.") % (obj)) return obj def check_exclusive_options(**kwargs): """Checks that only one of the provided options is actually not-none. Iterates over all the kwargs passed in and checks that only one of said arguments is not-none, if more than one is not-none then an exception will be raised with the names of those arguments who were not-none. """ if not kwargs: return pretty_keys = kwargs.pop("pretty_keys", True) exclusive_options = {} for (k, v) in kwargs.items(): if v is not None: exclusive_options[k] = True if len(exclusive_options) > 1: # Change the format of the names from pythonic to # something that is more readable. # # Ex: 'the_key' -> 'the key' if pretty_keys: names = [k.replace('_', ' ') for k in kwargs.keys()] else: names = kwargs.keys() names = ", ".join(sorted(names)) msg = (_("May specify only one of %s") % (names)) raise exception.InvalidInput(reason=msg) def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = get_root_helper() return processutils.execute(*cmd, **kwargs) def check_ssh_injection(cmd_list): ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<'] # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(command=cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(command=cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(command=cmd_list) def cinderdir(): import cinder return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0] def last_completed_audit_period(unit=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.volume_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end) def list_of_dicts_to_dict(seq, key): """Convert list of dicts to an indexed dict. Takes a list of dicts, and converts it to a nested dict indexed by :param seq: list of dicts :parm key: key in dicts to index by example: lst = [{'id': 1, ...}, {'id': 2, ...}...] key = 'id' returns {1:{'id': 1, ...}, 2:{'id':2, ...} """ return {d[key]: dict(d, index=d[key]) for (i, d) in enumerate(seq)} class ProtectedExpatParser(expatreader.ExpatParser): """An expat parser which disables DTD's and entities by default.""" def __init__(self, forbid_dtd=True, forbid_entities=True, *args, **kwargs): # Python 2.x old style class expatreader.ExpatParser.__init__(self, *args, **kwargs) self.forbid_dtd = forbid_dtd self.forbid_entities = forbid_entities def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise ValueError("Inline DTD forbidden") def entity_decl(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName): raise ValueError(" forbidden") def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise ValueError(" forbidden") def reset(self): expatreader.ExpatParser.reset(self) if self.forbid_dtd: self._parser.StartDoctypeDeclHandler = self.start_doctype_decl if self.forbid_entities: self._parser.EntityDeclHandler = self.entity_decl self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl def safe_minidom_parse_string(xml_string): """Parse an XML string using minidom safely. """ try: if six.PY3 and isinstance(xml_string, bytes): # On Python 3, minidom.parseString() requires Unicode when # the parser parameter is used. # # Bet that XML used in Cinder is always encoded to UTF-8. xml_string = xml_string.decode('utf-8') return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except sax.SAXParseException: raise expat.ExpatError() def is_valid_boolstr(val): """Check if the provided string is a valid bool string or not.""" val = str(val).lower() return val in ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0') def is_none_string(val): """Check if a string represents a None value.""" if not isinstance(val, six.string_types): return False return val.lower() == 'none' def monkey_patch(): """Patches decorators for all functions in a specified module. If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'cinder.api.ec2.cloud:' \ cinder.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See cinder.openstack.common.notifier.api.notify_decorator) :param name: name of the function :param function: object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # On Python 3, unbound methods are regular functions predicate = inspect.isfunction if six.PY3 else inspect.ismethod for method, func in inspect.getmembers(clz, predicate): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function elif isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def make_dev_path(dev, partition=None, base='/dev'): """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def sanitize_hostname(hostname): """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" if six.PY3: hostname = hostname.encode('latin-1', 'ignore') hostname = hostname.decode('latin-1') else: if isinstance(hostname, six.text_type): hostname = hostname.encode('latin-1', 'ignore') hostname = re.sub('[ _]', '-', hostname) hostname = re.sub('[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = (timeutils.utcnow(with_timezone=True) - last_heartbeat).total_seconds() return abs(elapsed) <= CONF.service_down_time def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except processutils.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) def robust_file_write(directory, filename, data): """Robust file write. Use "write to temp file and rename" model for writing the persistence file. :param directory: Target directory to create a file. :param filename: File name to store specified data. :param data: String data. """ tempname = None dirfd = None try: dirfd = os.open(directory, os.O_DIRECTORY) # write data to temporary file with tempfile.NamedTemporaryFile(prefix=filename, dir=directory, delete=False) as tf: tempname = tf.name tf.write(data.encode('utf-8')) tf.flush() os.fdatasync(tf.fileno()) tf.close() # Fsync the directory to ensure the fact of the existence of # the temp file hits the disk. os.fsync(dirfd) # If destination file exists, it will be replaced silently. os.rename(tempname, os.path.join(directory, filename)) # Fsync the directory to ensure the rename hits the disk. os.fsync(dirfd) except OSError: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to write persistence file: %(path)s."), {'path': os.path.join(directory, filename)}) if os.path.isfile(tempname): os.unlink(tempname) finally: if dirfd: os.close(dirfd) @contextlib.contextmanager def temporary_chown(path, owner_uid=None): """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', owner_uid, path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', orig_uid, path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs): tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.debug('Could not remove tmpdir: %s', six.text_type(e)) def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def get_root_helper(): return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config def brick_get_connector_properties(multipath=False, enforce_multipath=False): """Wrapper to automatically set root_helper in brick calls. :param multipath: A boolean indicating whether the connector can support multipath. :param enforce_multipath: If True, it raises exception when multipath=True is specified but multipathd is not running. If False, it falls back to multipath=False when multipathd is not running. """ root_helper = get_root_helper() return connector.get_connector_properties(root_helper, CONF.my_ip, multipath, enforce_multipath) def brick_get_connector(protocol, driver=None, execute=processutils.execute, use_multipath=False, device_scan_attempts=3, *args, **kwargs): """Wrapper to get a brick connector object. This automatically populates the required protocol as well as the root_helper needed to execute commands. """ root_helper = get_root_helper() return connector.InitiatorConnector.factory(protocol, root_helper, driver=driver, execute=execute, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, *args, **kwargs) def require_driver_initialized(driver): """Verifies if `driver` is initialized If the driver is not initialized, an exception will be raised. :params driver: The driver instance. :raises: `exception.DriverNotInitialized` """ # we can't do anything if the driver didn't init if not driver.initialized: driver_name = driver.__class__.__name__ LOG.error(_LE("Volume driver %s not initialized"), driver_name) raise exception.DriverNotInitialized() def get_file_mode(path): """This primarily exists to make unit testing easier.""" return stat.S_IMODE(os.stat(path).st_mode) def get_file_gid(path): """This primarily exists to make unit testing easier.""" return os.stat(path).st_gid def get_file_size(path): """Returns the file size.""" return os.stat(path).st_size def _get_disk_of_partition(devpath, st=None): """Gets a disk device path and status from partition path. Returns a disk device path from a partition device path, and stat for the device. If devpath is not a partition, devpath is returned as it is. For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is for '/dev/disk1p1' ('p' is prepended to the partition number if the disk name ends with numbers). """ diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath) if diskpath != devpath: try: st_disk = os.stat(diskpath) if stat.S_ISBLK(st_disk.st_mode): return (diskpath, st_disk) except OSError: pass # devpath is not a partition if st is None: st = os.stat(devpath) return (devpath, st) def get_bool_param(param_string, params): param = params.get(param_string, False) if not is_valid_boolstr(param): msg = _('Value %(param)s for %(param_string)s is not a ' 'boolean.') % {'param': param, 'param_string': param_string} raise exception.InvalidParameterValue(err=msg) return strutils.bool_from_string(param, strict=True) def get_blkdev_major_minor(path, lookup_for_file=True): """Get 'major:minor' number of block device. Get the device's 'major:minor' number of a block device to control I/O ratelimit of the specified path. If lookup_for_file is True and the path is a regular file, lookup a disk device which the file lies on and returns the result for the device. """ st = os.stat(path) if stat.S_ISBLK(st.st_mode): path, st = _get_disk_of_partition(path, st) return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev)) elif stat.S_ISCHR(st.st_mode): # No I/O ratelimit control is provided for character devices return None elif lookup_for_file: # lookup the mounted disk which the file lies on out, _err = execute('df', path) devpath = out.split("\n")[1].split()[0] if devpath[0] is not '/': # the file is on a network file system return None return get_blkdev_major_minor(devpath, False) else: msg = _("Unable to get a block device for file \'%s\'") % path raise exception.Error(msg) def check_string_length(value, name, min_length=0, max_length=None): """Check the length of specified string. :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ if not isinstance(value, six.string_types): msg = _("%s is not a string or unicode") % name raise exception.InvalidInput(message=msg) if len(value) < min_length: msg = _("%(name)s has a minimum character requirement of " "%(min_length)s.") % {'name': name, 'min_length': min_length} raise exception.InvalidInput(message=msg) if max_length and len(value) > max_length: msg = _("%(name)s has more than %(max_length)s " "characters.") % {'name': name, 'max_length': max_length} raise exception.InvalidInput(message=msg) _visible_admin_metadata_keys = ['readonly', 'attached_mode'] def add_visible_admin_metadata(volume): """Add user-visible admin metadata to regular metadata. Extracts the admin metadata keys that are to be made visible to non-administrators, and adds them to the regular metadata structure for the passed-in volume. """ visible_admin_meta = {} if volume.get('volume_admin_metadata'): if isinstance(volume['volume_admin_metadata'], dict): volume_admin_metadata = volume['volume_admin_metadata'] for key in volume_admin_metadata: if key in _visible_admin_metadata_keys: visible_admin_meta[key] = volume_admin_metadata[key] else: for item in volume['volume_admin_metadata']: if item['key'] in _visible_admin_metadata_keys: visible_admin_meta[item['key']] = item['value'] # avoid circular ref when volume is a Volume instance elif (volume.get('admin_metadata') and isinstance(volume.get('admin_metadata'), dict)): for key in _visible_admin_metadata_keys: if key in volume['admin_metadata'].keys(): visible_admin_meta[key] = volume['admin_metadata'][key] if not visible_admin_meta: return # NOTE(zhiyan): update visible administration metadata to # volume metadata, administration metadata will rewrite existing key. if volume.get('volume_metadata'): orig_meta = list(volume.get('volume_metadata')) for item in orig_meta: if item['key'] in visible_admin_meta.keys(): item['value'] = visible_admin_meta.pop(item['key']) for key, value in visible_admin_meta.items(): orig_meta.append({'key': key, 'value': value}) volume['volume_metadata'] = orig_meta # avoid circular ref when vol is a Volume instance elif (volume.get('metadata') and isinstance(volume.get('metadata'), dict)): volume['metadata'].update(visible_admin_meta) else: volume['metadata'] = visible_admin_meta def remove_invalid_filter_options(context, filters, allowed_search_options): """Remove search options that are not valid for non-admin API/context.""" if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in filters if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%s' from query.", bad_options) for opt in unknown_options: del filters[opt] def is_blk_device(dev): try: if stat.S_ISBLK(os.stat(dev).st_mode): return True return False except Exception: LOG.debug('Path %s not found in is_blk_device check', dev) return False class ComparableMixin(object): def _compare(self, other, method): try: return method(self._cmpkey(), other._cmpkey()) except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def retry(exceptions, interval=1, retries=3, backoff_rate=2, wait_random=False): def _retry_on_exception(e): return isinstance(e, exceptions) def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms): exp = backoff_rate ** previous_attempt_number wait_for = interval * exp if wait_random: random.seed() wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0) else: wait_val = wait_for * 1000.0 LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0)) return wait_val def _print_stop(previous_attempt_number, delay_since_first_attempt_ms): delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0 LOG.debug("Failed attempt %s", previous_attempt_number) LOG.debug("Have been at this for %s seconds", delay_since_first_attempt) return previous_attempt_number == retries if retries < 1: raise ValueError('Retries must be greater than or ' 'equal to 1 (received: %s). ' % retries) def _decorator(f): @six.wraps(f) def _wrapper(*args, **kwargs): r = retrying.Retrying(retry_on_exception=_retry_on_exception, wait_func=_backoff_sleep, stop_func=_print_stop) return r.call(f, *args, **kwargs) return _wrapper return _decorator def convert_str(text): """Convert to native string. Convert bytes and Unicode strings to native strings: * convert to bytes on Python 2: encode Unicode using encodeutils.safe_encode() * convert to Unicode on Python 3: decode bytes from UTF-8 """ if six.PY2: return encodeutils.safe_encode(text) else: if isinstance(text, bytes): return text.decode('utf-8') else: return text def trace_method(f): """Decorates a function if TRACE_METHOD is true.""" @functools.wraps(f) def trace_method_logging_wrapper(*args, **kwargs): if TRACE_METHOD: return trace(f)(*args, **kwargs) return f(*args, **kwargs) return trace_method_logging_wrapper def trace_api(f): """Decorates a function if TRACE_API is true.""" @functools.wraps(f) def trace_api_logging_wrapper(*args, **kwargs): if TRACE_API: return trace(f)(*args, **kwargs) return f(*args, **kwargs) return trace_api_logging_wrapper def trace(f): """Trace calls to the decorated function. This decorator should always be defined as the outermost decorator so it is defined last. This is important so it does not interfere with other decorators. Using this decorator on a function will cause its execution to be logged at `DEBUG` level with arguments, return values, and exceptions. :returns: a function decorator """ func_name = f.__name__ @functools.wraps(f) def trace_logging_wrapper(*args, **kwargs): if len(args) > 0: maybe_self = args[0] else: maybe_self = kwargs.get('self', None) if maybe_self and hasattr(maybe_self, '__module__'): logger = logging.getLogger(maybe_self.__module__) else: logger = LOG # NOTE(ameade): Don't bother going any further if DEBUG log level # is not enabled for the logger. if not logger.isEnabledFor(py_logging.DEBUG): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) logger.debug('==> %(func)s: call %(all_args)r', {'func': func_name, 'all_args': all_args}) start_time = time.time() * 1000 try: result = f(*args, **kwargs) except Exception as exc: total_time = int(round(time.time() * 1000)) - start_time logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', {'func': func_name, 'time': total_time, 'exc': exc}) raise total_time = int(round(time.time() * 1000)) - start_time logger.debug('<== %(func)s: return (%(time)dms) %(result)r', {'func': func_name, 'time': total_time, 'result': result}) return result return trace_logging_wrapper class TraceWrapperMetaclass(type): """Metaclass that wraps all methods of a class with trace_method. This metaclass will cause every function inside of the class to be decorated with the trace_method decorator. To use the metaclass you define a class like so: @six.add_metaclass(utils.TraceWrapperMetaclass) class MyClass(object): """ def __new__(meta, classname, bases, classDict): newClassDict = {} for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): # replace it with a wrapped version attribute = functools.update_wrapper(trace_method(attribute), attribute) newClassDict[attributeName] = attribute return type.__new__(meta, classname, bases, newClassDict) class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass): """Metaclass that wraps all methods of a class with trace.""" pass def setup_tracing(trace_flags): """Set global variables for each trace flag. Sets variables TRACE_METHOD and TRACE_API, which represent whether to log method and api traces. :param trace_flags: a list of strings """ global TRACE_METHOD global TRACE_API try: trace_flags = [flag.strip() for flag in trace_flags] except TypeError: # Handle when trace_flags is None or a test mock trace_flags = [] for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag) TRACE_METHOD = 'method' in trace_flags TRACE_API = 'api' in trace_flags def resolve_hostname(hostname): """Resolves host name to IP address. Resolves a host name (my.data.point.com) to an IP address (10.12.143.11). This routine also works if the data passed in hostname is already an IP. In this case, the same IP address will be returned. :param hostname: Host name to resolve. :returns: IP Address for Host name. """ result = socket.getaddrinfo(hostname, None)[0] (family, socktype, proto, canonname, sockaddr) = result LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.', {'host': hostname, 'ip': sockaddr[0]}) return sockaddr[0] def build_or_str(elements, str_format=None): """Builds a string of elements joined by 'or'. Will join strings with the 'or' word and if a str_format is provided it will be used to format the resulted joined string. If there are no elements an empty string will be returned. :param elements: Elements we want to join. :type elements: String or iterable of strings. :param str_format: String to use to format the response. :type str_format: String. """ if not elements: return '' if not isinstance(elements, six.string_types): elements = _(' or ').join(elements) if str_format: return str_format % elements return elements def calculate_virtual_free_capacity(total_capacity, free_capacity, provisioned_capacity, thin_provisioning_support, max_over_subscription_ratio, reserved_percentage): """Calculate the virtual free capacity based on thin provisioning support. :param total_capacity: total_capacity_gb of a host_state or pool. :param free_capacity: free_capacity_gb of a host_state or pool. :param provisioned_capacity: provisioned_capacity_gb of a host_state or pool. :param thin_provisioning_support: thin_provisioning_support of a host_state or a pool. :param max_over_subscription_ratio: max_over_subscription_ratio of a host_state or a pool :param reserved_percentage: reserved_percentage of a host_state or a pool. :returns: the calculated virtual free capacity. """ total = float(total_capacity) reserved = float(reserved_percentage) / 100 if thin_provisioning_support: free = (total * max_over_subscription_ratio - provisioned_capacity - math.floor(total * reserved)) else: # Calculate how much free space is left after taking into # account the reserved space. free = free_capacity - math.floor(total * reserved) return free def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_length: the min_length of the integer :param max_length: the max_length of the integer :returns: integer """ try: value = int(value) except (TypeError, ValueError, UnicodeEncodeError): raise webob.exc.HTTPBadRequest(explanation=( _('%s must be an integer.') % name)) if min_value is not None and value < min_value: raise webob.exc.HTTPBadRequest( explanation=(_('%(value_name)s must be >= %(min_value)d') % {'value_name': name, 'min_value': min_value})) if max_value is not None and value > max_value: raise webob.exc.HTTPBadRequest( explanation=(_('%(value_name)s must be <= %(max_value)d') % {'value_name': name, 'max_value': max_value})) return value cinder-8.0.0/cinder/__init__.py0000664000567000056710000000162212701406250017477 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder` -- Cloud IaaS Platform =================================== .. automodule:: cinder :platform: Unix :synopsis: Infrastructure-as-a-Service Cloud platform. """ cinder-8.0.0/cinder/testing/0000775000567000056710000000000012701406543017047 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/testing/README.rst0000664000567000056710000000272712701406250020541 0ustar jenkinsjenkins00000000000000===================================== OpenStack Cinder Testing Infrastructure ===================================== A note of clarification is in order, to help those who are new to testing in OpenStack cinder: - actual unit tests are created in the "tests" directory; - the "testing" directory is used to house the infrastructure needed to support testing in OpenStack Cinder. This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests and utilizing the convenience code provided in cinder.testing. For more detailed information on cinder unit tests visit: http://docs.openstack.org/developer/cinder/devref/unit_tests.html Running Tests ----------------------------------------------- In the root of the cinder source code run the run_tests.sh script. This will offer to create a virtual environment and populate it with dependencies. If you don't have dependencies installed that are needed for compiling cinder's direct dependencies, you'll have to use your operating system's method of installing extra dependencies. To get help using this script execute it with the -h parameter to get options `./run_tests.sh -h` Writing Unit Tests ------------------ - All new unit tests are to be written in python-mock. - Old tests that are still written in mox should be updated to use python-mock. Usage of mox has been deprecated for writing Cinder unit tests. - use addCleanup in favor of tearDown cinder-8.0.0/cinder/replication/0000775000567000056710000000000012701406543017703 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/replication/__init__.py0000664000567000056710000000136212701406250022011 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils CONF = cfg.CONF cls = CONF.replication_api_class API = importutils.import_class(cls) cinder-8.0.0/cinder/replication/api.py0000664000567000056710000001017312701406250021023 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to volume replication. """ import functools from oslo_log import log as logging from cinder.db import base from cinder import exception from cinder.i18n import _ from cinder import policy from cinder import volume as cinder_volume from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) PROMOTE_PROCEED_STATUS = ('active', 'active-stopped') REENABLE_PROCEED_STATUS = ('inactive', 'active-stopped', 'error') def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution. This decorator requires the first 3 args of the wrapped function to be (self, context, relationship_id) """ @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped def check_policy(context, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } target.update(target_obj or {}) _action = 'volume_extension:replication:%s' % action policy.enforce(context, _action, target) class API(base.Base): """API for interacting with volume replication relationships.""" def __init__(self, db_driver=None): super(API, self).__init__(db_driver) self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.volume_api = cinder_volume.API() @wrap_check_policy def promote(self, context, vol): if vol['replication_status'] == 'disabled': msg = _("Replication is not enabled for volume") raise exception.ReplicationError( reason=msg, volume_id=vol['id']) if vol['replication_status'] not in PROMOTE_PROCEED_STATUS: msg = _("Replication status for volume must be active or " "active-stopped, but current status " "is: %s") % vol['replication_status'] raise exception.ReplicationError( reason=msg, volume_id=vol['id']) if vol['status'] != 'available': msg = _("Volume status for volume must be available, but current " "status is: %s") % vol['status'] raise exception.ReplicationError( reason=msg, volume_id=vol['id']) volume_utils.notify_about_replication_usage(context, vol, 'promote') self.volume_rpcapi.promote_replica(context, vol) @wrap_check_policy def reenable(self, context, vol): if vol['replication_status'] == 'disabled': msg = _("Replication is not enabled") raise exception.ReplicationError( reason=msg, volume_id=vol['id']) if vol['replication_status'] not in REENABLE_PROCEED_STATUS: msg = _("Replication status for volume must be inactive," " active-stopped, or error, but current status " "is: %s") % vol['replication_status'] raise exception.ReplicationError( reason=msg, volume_id=vol['id']) volume_utils.notify_about_replication_usage(context, vol, 'sync') self.volume_rpcapi.reenable_replication(context, vol) cinder-8.0.0/cinder/quota_utils.py0000664000567000056710000002443312701406257020325 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from oslo_config import cfg from oslo_log import log as logging from keystoneclient.auth.identity.generic import token from keystoneclient import client from keystoneclient import exceptions from keystoneclient import session from cinder import db from cinder import exception from cinder.i18n import _, _LW CONF = cfg.CONF CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__', 'keystone_authtoken') LOG = logging.getLogger(__name__) class GenericProjectInfo(object): """Abstraction layer for Keystone V2 and V3 project objects""" def __init__(self, project_id, project_keystone_api_version, project_parent_id=None, project_subtree=None, project_parent_tree=None): self.id = project_id self.keystone_api_version = project_keystone_api_version self.parent_id = project_parent_id self.subtree = project_subtree self.parents = project_parent_tree def get_volume_type_reservation(ctxt, volume, type_id, reserve_vol_type_only=False): from cinder import quota QUOTAS = quota.QUOTAS # Reserve quotas for the given volume type try: reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(ctxt, reserve_opts, type_id) # If reserve_vol_type_only is True, just reserve volume_type quota, # not volume quota. if reserve_vol_type_only: reserve_opts.pop('volumes') reserve_opts.pop('gigabytes') # Note that usually the project_id on the volume will be the same as # the project_id in the context. But, if they are different then the # reservations must be recorded against the project_id that owns the # volume. project_id = volume['project_id'] reservations = QUOTAS.reserve(ctxt, project_id=project_id, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: s_size = volume['size'] d_quota = quotas[over] d_consumed = _consumed(over) LOG.warning( _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG volume - (%(d_consumed)dG of " "%(d_quota)dG already consumed)"), {'s_pid': ctxt.project_id, 's_size': s_size, 'd_consumed': d_consumed, 'd_quota': d_quota}) raise exception.VolumeSizeExceedsAvailableQuota( requested=s_size, quota=d_quota, consumed=d_consumed) elif 'volumes' in over: LOG.warning( _LW("Quota exceeded for %(s_pid)s, tried to create " "volume (%(d_consumed)d volumes " "already consumed)"), {'s_pid': ctxt.project_id, 'd_consumed': _consumed(over)}) raise exception.VolumeLimitExceeded( allowed=quotas[over]) return reservations def _filter_domain_id_from_parents(domain_id, tree): """Removes the domain_id from the tree if present""" new_tree = None if tree: parent, children = next(iter(tree.items())) # Don't add the domain id to the parents hierarchy if parent != domain_id: new_tree = {parent: _filter_domain_id_from_parents(domain_id, children)} return new_tree def get_project_hierarchy(context, project_id, subtree_as_ids=False, parents_as_ids=False): """A Helper method to get the project hierarchy. Along with hierarchical multitenancy in keystone API v3, projects can be hierarchically organized. Therefore, we need to know the project hierarchy, if any, in order to do nested quota operations properly. If the domain is being used as the top most parent, it is filtered out from the parent tree and parent_id. """ try: keystone = _keystone_client(context) generic_project = GenericProjectInfo(project_id, keystone.version) if keystone.version == 'v3': project = keystone.projects.get(project_id, subtree_as_ids=subtree_as_ids, parents_as_ids=parents_as_ids) generic_project.parent_id = None if project.parent_id != project.domain_id: generic_project.parent_id = project.parent_id generic_project.subtree = ( project.subtree if subtree_as_ids else None) generic_project.parents = None if parents_as_ids: generic_project.parents = _filter_domain_id_from_parents( project.domain_id, project.parents) except exceptions.NotFound: msg = (_("Tenant ID: %s does not exist.") % project_id) raise webob.exc.HTTPNotFound(explanation=msg) return generic_project def get_parent_project_id(context, project_id): return get_project_hierarchy(context, project_id).parent_id def get_all_projects(context): # Right now this would have to be done as cloud admin with Keystone v3 return _keystone_client(context, (3, 0)).projects.list() def get_all_root_project_ids(context): project_list = get_all_projects(context) # Find every project which does not have a parent, meaning it is the # root of the tree project_roots = [project.id for project in project_list if not project.parent_id] return project_roots def update_alloc_to_next_hard_limit(context, resources, deltas, res, expire, project_id): from cinder import quota QUOTAS = quota.QUOTAS reservations = [] projects = get_project_hierarchy(context, project_id, parents_as_ids=True).parents hard_limit_found = False # Update allocated values up the chain til we hit a hard limit or run out # of parents while projects and not hard_limit_found: cur_proj_id = list(projects)[0] projects = projects[cur_proj_id] cur_quota_lim = QUOTAS.get_by_project_or_default( context, cur_proj_id, res) hard_limit_found = (cur_quota_lim != -1) cur_quota = {res: cur_quota_lim} cur_delta = {res: deltas[res]} try: reservations += db.quota_reserve( context, resources, cur_quota, cur_delta, expire, CONF.until_refresh, CONF.max_age, cur_proj_id, is_allocated_reserve=True) except exception.OverQuota: db.reservation_rollback(context, reservations) raise return reservations def validate_setup_for_nested_quota_use(ctxt, resources, nested_quota_driver, fix_allocated_quotas=False): """Validates the setup supports using nested quotas. Ensures that Keystone v3 or greater is being used, that the current user is of the cloud admin role, and that the existing quotas make sense to nest in the current hierarchy (e.g. that no child quota would be larger than it's parent). :param resources: the quota resources to validate :param nested_quota_driver: nested quota driver used to validate each tree :param fix_allocated_quotas: if True, parent projects "allocated" total will be calculated based on the existing child limits and the DB will be updated. If False, an exception is raised reporting any parent allocated quotas are currently incorrect. """ try: project_roots = get_all_root_project_ids(ctxt) # Now that we've got the roots of each tree, validate the trees # to ensure that each is setup logically for nested quotas for root in project_roots: root_proj = get_project_hierarchy(ctxt, root, subtree_as_ids=True) nested_quota_driver.validate_nested_setup( ctxt, resources, {root_proj.id: root_proj.subtree}, fix_allocated_quotas=fix_allocated_quotas ) except exceptions.VersionNotAvailable: msg = _("Keystone version 3 or greater must be used to get nested " "quota support.") raise exception.CinderException(message=msg) except exceptions.Forbidden: msg = _("Must run this command as cloud admin using " "a Keystone policy.json which allows cloud " "admin to list and get any project.") raise exception.CinderException(message=msg) def _keystone_client(context, version=(3, 0)): """Creates and returns an instance of a generic keystone client. :param context: The request context :param version: version of Keystone to request :return: keystoneclient.client.Client object """ auth_plugin = token.Token( auth_url=CONF.keystone_authtoken.auth_uri, token=context.auth_token, project_id=context.project_id) client_session = session.Session(auth=auth_plugin) return client.Client(auth_url=CONF.keystone_authtoken.auth_uri, session=client_session, version=version) cinder-8.0.0/cinder/config/0000775000567000056710000000000012701406543016637 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/config/generate_cinder_opts.py0000664000567000056710000001650312701406250023374 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import textwrap if __name__ == "__main__": opt_file = open("cinder/opts.py", 'a') opt_dict = {} dir_trees_list = [] REGISTER_OPTS_STR = "CONF.register_opts(" REGISTER_OPT_STR = "CONF.register_opt(" license_str = textwrap.dedent( """ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License.\n """) opt_file.write(license_str) opt_file.write("import itertools\n\n") targetdir = os.environ['TARGETDIR'] basedir = os.environ['BASEDIRESC'] common_string = ('find ' + targetdir + ' -type f -name "*.py" ! ' '-path "*/tests/*" -exec grep -l "%s" {} ' '+ | sed -e "s/^' + basedir + '\///g" | sort -u') cmd_opts = common_string % REGISTER_OPTS_STR output_opts = subprocess.check_output('{}'.format(cmd_opts), shell = True) dir_trees_list = output_opts.split() cmd_opt = common_string % REGISTER_OPT_STR output_opt = subprocess.check_output('{}'.format(cmd_opt), shell = True) temp_list = output_opt.split() for item in temp_list: dir_trees_list.append(item) dir_trees_list.sort() flag = False def _check_import(aline): if len(aline) > 79: new_lines = aline.partition(' as ') return new_lines else: return [aline] for atree in dir_trees_list: if atree in ["cinder/config/generate_cinder_opts.py", "cinder/hacking/checks.py", "cinder/volume/configuration.py", "cinder/test.py"]: continue dirs_list = atree.split('/') import_module = "from " init_import_module = "" import_name = "" for dir in dirs_list: if dir.find(".py") == -1: import_module += dir + "." init_import_module += dir + "." import_name += dir + "_" else: if dir[:-3] != "__init__": import_name += dir[:-3].replace("_", "") import_module = (import_module[:-1] + " import " + dir[:-3] + " as " + import_name) lines = _check_import(import_module) if len(lines) > 1: opt_file.write(lines[0] + lines[1] + "\\\n") opt_file.write(" " + lines[2] + "\n") else: opt_file.write(lines[0] + "\n") else: import_name = import_name[:-1].replace('/', '.') init_import = atree[:-12].replace('/', '.') opt_file.write("import " + init_import + "\n") flag = True if flag is False: opt_dict[import_name] = atree else: opt_dict[init_import_module.strip(".")] = atree flag = False registered_opts_dict = {'DEFAULT': [], } def _write_item(opts): list_name = opts[-3:] if list_name.lower() == "opt": line_to_write = " [" + opts.strip("\n") + "],\n" opt_line = _check_line_length(line_to_write) if len(opt_line) > 1: opt_file.write(opt_line[0] + opt_line[1] + "\n") opt_file.write(" " + opt_line[2]) else: opt_file.write(opt_line[0]) else: line_to_write = " " + opts.strip("\n") + ",\n" opt_line = _check_line_length(line_to_write) if len(opt_line) > 1: opt_file.write(opt_line[0] + opt_line[1] + "\n") opt_file.write(" " + opt_line[2]) else: opt_file.write(opt_line[0]) def _retrieve_name(aline): if REGISTER_OPT_STR in aline: str_to_replace = REGISTER_OPT_STR else: str_to_replace = REGISTER_OPTS_STR return aline.replace(str_to_replace, "") def _check_line_length(aline): if len(aline) > 79: temp = aline.split(".") lines_to_write = [] for section in temp: lines_to_write.append(section) lines_to_write.append('.') return lines_to_write else: return [aline] for key in opt_dict: fd = os.open(opt_dict[key], os.O_RDONLY) afile = os.fdopen(fd, "r") for aline in afile: exists = aline.find("CONF.register_opt") if exists != -1: # TODO(kjnelson) FIX THIS LATER. These are instances where # CONF.register_opts is happening without actually registering # real lists of opts exists = aline.find('base_san_opts') if (exists != -1) or (key == 'cinder_volume_configuration'): continue group_exists = aline.find(', group=') formatted_opt = _retrieve_name(aline[: group_exists]) formatted_opt = formatted_opt.replace(')', '').strip() if group_exists != -1: group_name = aline[group_exists:-1].replace(', group=\"\'', '').\ replace(', group=', '').strip("\'\")").upper() if group_name in registered_opts_dict: line = key + "." + formatted_opt registered_opts_dict[group_name].append(line) else: line = key + "." + formatted_opt registered_opts_dict[group_name] = [line] else: line = key + "." + formatted_opt registered_opts_dict['DEFAULT'].append(line) setup_str = ("\n\n" "def list_opts():\n" " return [\n") opt_file.write(setup_str) for key in registered_opts_dict: section_start_str = (" ('" + key + "',\n" " itertools.chain(\n") opt_file.write(section_start_str) for item in registered_opts_dict[key]: _write_item(item) section_end_str = " )),\n" opt_file.write(section_end_str) closing_str = (" ]\n") opt_file.write(closing_str) opt_file.close() cinder-8.0.0/cinder/config/cinder-config-generator.conf0000664000567000056710000000121212701406250024170 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/cinder/cinder.conf.sample wrap_width = 79 namespace = cinder namespace = keystonemiddleware.auth_token namespace = oslo.config namespace = oslo.concurrency namespace = oslo.context namespace = oslo.db namespace = oslo.i18n namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware namespace = oslo.policy namespace = oslo.reports namespace = oslo.rootwrap namespace = oslo.serialization namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.sslutils namespace = oslo.service.wsgi namespace = oslo.utils namespace = oslo.versionedobjects namespace = oslo.vmware cinder-8.0.0/cinder/cmd/0000775000567000056710000000000012701406543016135 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/cmd/rtstool.py0000664000567000056710000002476112701406257020231 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 - 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys # We always use rtslib-fb, but until version 2.1.52 it didn't have its own # namespace, so we must be backwards compatible. try: import rtslib_fb except ImportError: import rtslib as rtslib_fb from cinder import i18n from cinder.i18n import _ i18n.enable_lazy() class RtstoolError(Exception): pass class RtstoolImportError(RtstoolError): pass def create(backing_device, name, userid, password, iser_enabled, initiator_iqns=None, portals_ips=None, portals_port=3260): # List of IPS that will not raise an error when they fail binding. # Originally we will fail on all binding errors. ips_allow_fail = () try: rtsroot = rtslib_fb.root.RTSRoot() except rtslib_fb.utils.RTSLibError: print(_('Ensure that configfs is mounted at /sys/kernel/config.')) raise # Look to see if BlockStorageObject already exists for x in rtsroot.storage_objects: if x.name == name: # Already exists, use this one return so_new = rtslib_fb.BlockStorageObject(name=name, dev=backing_device) target_new = rtslib_fb.Target(rtslib_fb.FabricModule('iscsi'), name, 'create') tpg_new = rtslib_fb.TPG(target_new, mode='create') tpg_new.set_attribute('authentication', '1') lun_new = rtslib_fb.LUN(tpg_new, storage_object=so_new) if initiator_iqns: initiator_iqns = initiator_iqns.strip(' ') for i in initiator_iqns.split(','): acl_new = rtslib_fb.NodeACL(tpg_new, i, mode='create') acl_new.chap_userid = userid acl_new.chap_password = password rtslib_fb.MappedLUN(acl_new, lun_new.lun, lun_new.lun) tpg_new.enable = 1 # If no ips are given we'll bind to all IPv4 and v6 if not portals_ips: portals_ips = ('0.0.0.0', '::0') # TODO(emh): Binding to IPv6 fails sometimes -- let pass for now. ips_allow_fail = ('::0',) for ip in portals_ips: try: portal = rtslib_fb.NetworkPortal(tpg_new, ip, portals_port, mode='any') except rtslib_fb.utils.RTSLibError: raise_exc = ip not in ips_allow_fail msg_type = 'Error' if raise_exc else 'Warning' print(_('%(msg_type)s: creating NetworkPortal: ensure port ' '%(port)d on ip %(ip)s is not in use by another service.') % {'msg_type': msg_type, 'port': portals_port, 'ip': ip}) if raise_exc: raise else: try: if iser_enabled == 'True': portal.iser = True except rtslib_fb.utils.RTSLibError: print(_('Error enabling iSER for NetworkPortal: please ensure ' 'that RDMA is supported on your iSCSI port %(port)d ' 'on ip %(ip)s.') % {'port': portals_port, 'ip': ip}) raise def _lookup_target(target_iqn, initiator_iqn): try: rtsroot = rtslib_fb.root.RTSRoot() except rtslib_fb.utils.RTSLibError: print(_('Ensure that configfs is mounted at /sys/kernel/config.')) raise # Look for the target for t in rtsroot.targets: if t.wwn == target_iqn: return t raise RtstoolError(_('Could not find target %s') % target_iqn) def add_initiator(target_iqn, initiator_iqn, userid, password): target = _lookup_target(target_iqn, initiator_iqn) tpg = next(target.tpgs) # get the first one for acl in tpg.node_acls: # See if this ACL configuration already exists if acl.node_wwn.lower() == initiator_iqn.lower(): # No further action required return acl_new = rtslib_fb.NodeACL(tpg, initiator_iqn, mode='create') acl_new.chap_userid = userid acl_new.chap_password = password rtslib_fb.MappedLUN(acl_new, 0, tpg_lun=0) def delete_initiator(target_iqn, initiator_iqn): target = _lookup_target(target_iqn, initiator_iqn) tpg = next(target.tpgs) # get the first one for acl in tpg.node_acls: if acl.node_wwn.lower() == initiator_iqn.lower(): acl.delete() return print(_('delete_initiator: %s ACL not found. Continuing.') % initiator_iqn) # Return successfully. def get_targets(): rtsroot = rtslib_fb.root.RTSRoot() for x in rtsroot.targets: print(x.wwn) def delete(iqn): rtsroot = rtslib_fb.root.RTSRoot() for x in rtsroot.targets: if x.wwn == iqn: x.delete() break for x in rtsroot.storage_objects: if x.name == iqn: x.delete() break def verify_rtslib(): for member in ['BlockStorageObject', 'FabricModule', 'LUN', 'MappedLUN', 'NetworkPortal', 'NodeACL', 'root', 'Target', 'TPG']: if not hasattr(rtslib_fb, member): raise RtstoolImportError(_("rtslib_fb is missing member %s: You " "may need a newer python-rtslib-fb.") % member) def usage(): print("Usage:") print(sys.argv[0] + " create [device] [name] [userid] [password] [iser_enabled]" + " [-a] [-pPORT]") print(sys.argv[0] + " add-initiator [target_iqn] [userid] [password] [initiator_iqn]") print(sys.argv[0] + " delete-initiator [target_iqn] [initiator_iqn]") print(sys.argv[0] + " get-targets") print(sys.argv[0] + " delete [iqn]") print(sys.argv[0] + " verify") print(sys.argv[0] + " save [path_to_file]") sys.exit(1) def save_to_file(destination_file): rtsroot = rtslib_fb.root.RTSRoot() try: # If default destination use rtslib default save file if not destination_file: destination_file = rtslib_fb.root.default_save_file path_to_file = os.path.dirname(destination_file) # NOTE(geguileo): With default file we ensure path exists and # create it if doesn't. # Cinder's LIO target helper runs this as root, so it will have no # problem creating directory /etc/target. # If run manually from the command line without being root you will # get an error, same as when creating and removing targets. if not os.path.exists(path_to_file): os.makedirs(path_to_file, 0o755) except OSError as exc: raise RtstoolError(_('targetcli not installed and could not create ' 'default directory (%(default_path)s): %(exc)s') % {'default_path': path_to_file, 'exc': exc}) try: rtsroot.save_to_file(destination_file) except (OSError, IOError) as exc: raise RtstoolError(_('Could not save configuration to %(file_path)s: ' '%(exc)s') % {'file_path': destination_file, 'exc': exc}) def restore_from_file(configration_file): rtsroot = rtslib_fb.root.RTSRoot() # If configuration file is None, use rtslib default save file. if not configration_file: configration_file = rtslib_fb.root.default_save_file try: rtsroot.restore_from_file(configration_file) except (OSError, IOError) as exc: raise RtstoolError(_('Could not restore configuration file ' '%(file_path)s: %(exc)s'), {'file_path': configration_file, 'exc': exc}) def parse_optional_create(argv): optional_args = {} for arg in argv: if arg.startswith('-a'): ips = [ip for ip in arg[2:].split(',') if ip] if not ips: usage() optional_args['portals_ips'] = ips elif arg.startswith('-p'): try: optional_args['portals_port'] = int(arg[2:]) except ValueError: usage() else: optional_args['initiator_iqns'] = arg return optional_args def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 2: usage() if argv[1] == 'create': if len(argv) < 7: usage() if len(argv) > 10: usage() backing_device = argv[2] name = argv[3] userid = argv[4] password = argv[5] iser_enabled = argv[6] if len(argv) > 7: optional_args = parse_optional_create(argv[7:]) else: optional_args = {} create(backing_device, name, userid, password, iser_enabled, **optional_args) elif argv[1] == 'add-initiator': if len(argv) < 6: usage() target_iqn = argv[2] userid = argv[3] password = argv[4] initiator_iqn = argv[5] add_initiator(target_iqn, initiator_iqn, userid, password) elif argv[1] == 'delete-initiator': if len(argv) < 4: usage() target_iqn = argv[2] initiator_iqn = argv[3] delete_initiator(target_iqn, initiator_iqn) elif argv[1] == 'get-targets': get_targets() elif argv[1] == 'delete': if len(argv) < 3: usage() iqn = argv[2] delete(iqn) elif argv[1] == 'verify': # This is used to verify that this script can be called by cinder, # and that rtslib_fb is new enough to work. verify_rtslib() return 0 elif argv[1] == 'save': if len(argv) > 3: usage() destination_file = argv[2] if len(argv) > 2 else None save_to_file(destination_file) return 0 elif argv[1] == 'restore': if len(argv) > 3: usage() configuration_file = argv[2] if len(argv) > 2 else None restore_from_file(configuration_file) return 0 else: usage() return 0 cinder-8.0.0/cinder/cmd/manage.py0000664000567000056710000005065112701406250017741 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for cinder management. """ from __future__ import print_function import logging as python_logging import os import sys from oslo_config import cfg from oslo_db.sqlalchemy import migration from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import timeutils from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder import context from cinder import db from cinder.db import migration as db_migration from cinder.db.sqlalchemy import api as db_api from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils from cinder import version from cinder.volume import utils as vutils CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable """ self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable """ self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable """ self.run('python') @args('--shell', dest="shell", metavar='', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: from IPython import embed embed() except ImportError: try: # Ipython < 0.11 # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. import IPython shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: # no IPython module shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. import rlcompleter # noqa readline.parse_and_bind("tab:complete") code.interact() @args('--path', required=True, help='Script path') def script(self, path): """Runs the script from the specified path with flags set properly.""" exec(compile(open(path).read(), path, 'exec'), locals(), globals()) def _db_error(caught_exception): print('%s' % caught_exception) print(_("The above error may show that the database has not " "been created.\nPlease create a database using " "'cinder-manage db sync' before running this command.")) sys.exit(1) class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Can be filtered by zone. args: [zone] """ print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) if zone: services = [s for s in services if s.availability_zone == zone] hosts = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print(_("%(host)-25s\t%(availability_zone)-15s") % {'host': h['host'], 'availability_zone': h['availability_zone']}) class DbCommands(object): """Class for managing the database.""" def __init__(self): pass @args('version', nargs='?', default=None, help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return db_migration.db_sync(version) def version(self): """Print the current database version.""" print(migration.db_version(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, db_migration.INIT_VERSION)) @args('age_in_days', type=int, help='Purge deleted rows older than age in days') def purge(self, age_in_days): """Purge deleted rows older than a given age from cinder tables.""" age_in_days = int(age_in_days) if age_in_days <= 0: print(_("Must supply a positive, non-zero value for age")) sys.exit(1) ctxt = context.get_admin_context() db.purge_deleted_rows(ctxt, age_in_days) class VersionCommands(object): """Class for exposing the codebase version.""" def __init__(self): pass def list(self): print(version.version_string()) def __call__(self): self.list() class VolumeCommands(object): """Methods for dealing with a cloud in an odd state.""" def __init__(self): self._client = None def _rpc_client(self): if self._client is None: if not rpc.initialized(): rpc.init(CONF) target = messaging.Target(topic=CONF.volume_topic) serializer = objects.base.CinderObjectSerializer() self._client = rpc.get_client(target, serializer=serializer) return self._client @args('volume_id', help='Volume ID to be deleted') def delete(self, volume_id): """Delete a volume, bypassing the check that it must be available.""" ctxt = context.get_admin_context() volume = objects.Volume.get_by_id(ctxt, volume_id) host = vutils.extract_host(volume.host) if volume.host else None if not host: print(_("Volume not yet assigned to host.")) print(_("Deleting volume from database and skipping rpc.")) volume.destroy() return if volume.status == 'in-use': print(_("Volume is in-use.")) print(_("Detach volume from instance and then try again.")) return cctxt = self._rpc_client().prepare(server=host) cctxt.cast(ctxt, "delete_volume", volume_id=volume.id, volume=volume) @args('--currenthost', required=True, help='Existing volume host name') @args('--newhost', required=True, help='New volume host name') def update_host(self, currenthost, newhost): """Modify the host name associated with a volume. Particularly to recover from cases where one has moved their Cinder Volume node, or modified their backend_name in a multi-backend config. """ ctxt = context.get_admin_context() volumes = db.volume_get_all_by_host(ctxt, currenthost) for v in volumes: db.volume_update(ctxt, v['id'], {'host': newhost}) class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def __init__(self): pass @args('param', nargs='?', default=None, help='Configuration parameter to display (default: %(default)s)') def list(self, param=None): """List parameters configured for cinder. Lists all parameters configured for cinder unless an optional argument is specified. If the parameter is specified we only print the requested parameter. If the parameter is not found an appropriate error is produced by .get*(). """ param = param and param.strip() if param: print('%s = %s' % (param, CONF.get(param))) else: for key, value in CONF.items(): print('%s = %s' % (key, value)) class GetLogCommands(object): """Get logging information.""" def errors(self): """Get all of the errors from the log files.""" error_found = 0 if CONF.log_dir: logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 for index, line in enumerate(lines): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: print(log_file + ":-") print_name = 1 print(_("Line %(dis)d : %(line)s") % {'dis': len(lines) - index, 'line': line}) if error_found == 0: print(_("No errors in logfiles!")) @args('num_entries', nargs='?', type=int, default=10, help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): """Get of the cinder syslog events.""" entries = int(num_entries) count = 0 log_file = '' if os.path.exists('/var/log/syslog'): log_file = '/var/log/syslog' elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: print(_("Unable to find system log file!")) sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print(_("Last %s cinder syslog entries:-") % (entries)) for line in lines: if line.find("cinder") > 0: count += 1 print(_("%s") % (line)) if count == entries: break if count == 0: print(_("No cinder entries in syslog!")) class BackupCommands(object): """Methods for managing backups.""" def list(self): """List all backups. List all backups (including ones in progress) and the host on which the backup operation is running. """ ctxt = context.get_admin_context() backups = objects.BackupList.get_all(ctxt) hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" print(hdr % (_('ID'), _('User ID'), _('Project ID'), _('Host'), _('Name'), _('Container'), _('Status'), _('Size'), _('Object Count'))) res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" for backup in backups: object_count = 0 if backup['object_count'] is not None: object_count = backup['object_count'] print(res % (backup['id'], backup['user_id'], backup['project_id'], backup['host'], backup['display_name'], backup['container'], backup['status'], backup['size'], object_count)) @args('--currenthost', required=True, help='Existing backup host name') @args('--newhost', required=True, help='New backup host name') def update_backup_host(self, currenthost, newhost): """Modify the host name associated with a backup. Particularly to recover from cases where one has moved their Cinder Backup node, and not set backup_use_same_backend. """ ctxt = context.get_admin_context() backups = objects.BackupList.get_all_by_host(ctxt, currenthost) for bk in backups: bk.host = newhost bk.save() class ServiceCommands(object): """Methods for managing services.""" def list(self): """Show a list of all cinder services.""" ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s" print(print_format % (_('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated At'), _('RPC Version'), _('Object Version'))) for svc in services: alive = utils.service_is_up(svc) art = ":-)" if alive else "XXX" status = 'enabled' if svc.disabled: status = 'disabled' updated_at = svc.updated_at if updated_at: updated_at = timeutils.normalize_time(updated_at) rpc_version = (svc.rpc_current_version or rpc.LIBERTY_RPC_VERSIONS.get(svc.binary, '')) object_version = (svc.object_current_version or 'liberty') print(print_format % (svc.binary, svc.host.partition('.')[0], svc.availability_zone, status, art, updated_at, rpc_version, object_version)) @args('binary', type=str, help='Service to delete from the host.') @args('host_name', type=str, help='Host from which to remove the service.') def remove(self, binary, host_name): """Completely removes a service.""" ctxt = context.get_admin_context() try: svc = objects.Service.get_by_args(ctxt, host_name, binary) svc.destroy() except exception.ServiceNotFound as e: print(_("Host not found. Failed to remove %(service)s" " on %(host)s.") % {'service': binary, 'host': host_name}) print (u"%s" % e.args) return 2 print(_("Service %(service)s on host %(host)s removed.") % {'service': binary, 'host': host_name}) CATEGORIES = { 'backup': BackupCommands, 'config': ConfigCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, 'service': ServiceCommands, 'shell': ShellCommands, 'version': VersionCommands, 'volume': VolumeCommands, } def methods_of(obj): """Return non-private methods from an object. Get all callable methods of an object that don't start with underscore :return: a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): arg = None if args[0] == '-': # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg arg = args[2:] else: arg = args[1:] else: arg = args return arg def fetch_func_args(func): fn_args = [] for args, kwargs in getattr(func, 'args', []): arg = get_arg_string(args[0]) fn_args.append(getattr(CONF.category, arg)) return fn_args def main(): objects.register_all() """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack Cinder version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action []") print(_("Available categories:")) for category in CATEGORIES: print(_("\t%s") % category) sys.exit(2) try: CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) except cfg.ConfigDirNotFoundError as details: print(_("Invalid directory: %s") % details) sys.exit(2) except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_('sudo failed, continuing as if nothing happened')) print(_('Please re-run cinder-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args) cinder-8.0.0/cinder/cmd/all.py0000664000567000056710000000731212701406250017255 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack, LLC # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for All cinder services. This script attempts to start all the cinder services in one process. Each service is started in its own greenthread. Please note that exceptions and sys.exit() on the starting of a service are logged and the script will continue attempting to launch the rest of the services. """ import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.cmd import volume as volume_cmd from cinder.common import config # noqa from cinder.db import api as session from cinder.i18n import _LE from cinder import objects from cinder import rpc from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF # TODO(e0ne): get a rid of code duplication in cinder.cmd module in Mitaka def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) config.set_middleware_defaults() logging.setup(CONF, "cinder") LOG = logging.getLogger('cinder.all') utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) rpc.init(CONF) launcher = service.process_launcher() # cinder-api try: server = service.WSGIService('osapi_volume') launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load osapi_volume')) for binary in ['cinder-scheduler', 'cinder-backup']: try: launcher.launch_service(service.Service.create(binary=binary)) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), binary) # cinder-volume try: if CONF.enabled_backends: for backend in CONF.enabled_backends: CONF.register_opt(volume_cmd.host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) server = service.Service.create(host=host, service_name=backend, binary='cinder-volume') # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases # where child processes share DB connections which results # in errors. session.dispose_engine() launcher.launch_service(server) else: server = service.Service.create(binary='cinder-volume') launcher.launch_service(server) except (Exception, SystemExit): LOG.exception(_LE('Failed to load conder-volume')) launcher.wait() cinder-8.0.0/cinder/cmd/__init__.py0000664000567000056710000000000012701406250020227 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/cmd/scheduler.py0000664000567000056710000000326012701406250020461 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Scheduler.""" import eventlet eventlet.monkey_patch() import logging as python_logging import sys from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder import objects from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='cinder-scheduler') service.serve(server) service.wait() cinder-8.0.0/cinder/cmd/volume.py0000664000567000056710000000670212701406250020016 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Volume.""" import logging as python_logging import os import eventlet from cinder import objects if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.monkey_patch(os=False) else: eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder.db import api as session from cinder.i18n import _ from cinder import service from cinder import utils from cinder import version deprecated_host_opt = cfg.DeprecatedOpt('host') host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.', deprecated_opts=[deprecated_host_opt]) cfg.CONF.register_cli_opt(host_opt) CONF = cfg.CONF def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.get_launcher() LOG = logging.getLogger(__name__) service_started = False if CONF.enabled_backends: for backend in CONF.enabled_backends: CONF.register_opt(host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) try: server = service.Service.create(host=host, service_name=backend, binary='cinder-volume') except Exception: msg = _('Volume service %s failed to start.') % host LOG.exception(msg) else: # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases where # child processes share DB connections which results in errors. session.dispose_engine() launcher.launch_service(server) service_started = True else: server = service.Service.create(binary='cinder-volume') launcher.launch_service(server) service_started = True if not service_started: msg = _('No volume service(s) started successfully, terminating.') LOG.error(msg) sys.exit(1) launcher.wait() cinder-8.0.0/cinder/cmd/backup.py0000664000567000056710000000315312701406250017751 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Volume Backup.""" import logging as python_logging import sys import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts eventlet.monkey_patch() from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder import objects from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='cinder-backup') service.serve(server) service.wait() cinder-8.0.0/cinder/cmd/volume_usage_audit.py0000664000567000056710000002534612701406250022375 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cron script to generate usage notifications for volumes existing during the audit period. Together with the notifications generated by volumes create/delete/resize, over that time period, this allows an external system consuming usage notification feeds to calculate volume usage for each tenant. Time periods are specified as 'hour', 'month', 'day' or 'year' hour = previous hour. If run at 9:07am, will generate usage for 8-9am. month = previous month. If the script is run April 1, it will generate usages for March 1 through March 31. day = previous day. if run on July 4th, it generates usages for July 3rd. year = previous year. If run on Jan 1, it generates usages for Jan 1 through Dec 31 of the previous year. """ from __future__ import print_function import datetime import sys from oslo_config import cfg from oslo_log import log as logging from cinder import i18n i18n.enable_lazy() from cinder import context from cinder import db from cinder.i18n import _, _LE from cinder import objects from cinder import rpc from cinder import utils from cinder import version import cinder.volume.utils CONF = cfg.CONF script_opts = [ cfg.StrOpt('start_time', help="If this option is specified then the start time " "specified is used instead of the start time of the " "last completed audit period."), cfg.StrOpt('end_time', help="If this option is specified then the end time " "specified is used instead of the end time of the " "last completed audit period."), cfg.BoolOpt('send_actions', default=False, help="Send the volume and snapshot create and delete " "notifications generated in the specified period."), ] CONF.register_cli_opts(script_opts) def main(): objects.register_all() admin_context = context.get_admin_context() CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") LOG = logging.getLogger("cinder") rpc.init(CONF) begin, end = utils.last_completed_audit_period() if CONF.start_time: begin = datetime.datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S") if CONF.end_time: end = datetime.datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S") if not end > begin: msg = _("The end time (%(end)s) must be after the start " "time (%(start)s).") % {'start': begin, 'end': end} LOG.error(msg) sys.exit(-1) LOG.debug("Starting volume usage audit") msg = _("Creating usages for %(begin_period)s until %(end_period)s") LOG.debug(msg, {"begin_period": str(begin), "end_period": str(end)}) extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } volumes = db.volume_get_active_by_window(admin_context, begin, end) LOG.debug("Found %d volumes", len(volumes)) for volume_ref in volumes: try: LOG.debug("Send exists notification for " "<%(extra_info)s>", {'volume_id': volume_ref.id, 'project_id': volume_ref.project_id, 'extra_info': extra_info}) cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'exists', extra_usage_info=extra_info) except Exception as exc_msg: LOG.exception(_LE("Exists volume notification failed: %s"), exc_msg, resource=volume_ref) if (CONF.send_actions and volume_ref.created_at > begin and volume_ref.created_at < end): try: local_extra_info = { 'audit_period_beginning': str(volume_ref.created_at), 'audit_period_ending': str(volume_ref.created_at), } LOG.debug("Send create notification for " " " " <%(extra_info)s>", {'volume_id': volume_ref.id, 'project_id': volume_ref.project_id, 'extra_info': local_extra_info}) cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'create.start', extra_usage_info=local_extra_info) cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'create.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.exception(_LE("Create volume notification failed: %s"), exc_msg, resource=volume_ref) if (CONF.send_actions and volume_ref.deleted_at and volume_ref.deleted_at > begin and volume_ref.deleted_at < end): try: local_extra_info = { 'audit_period_beginning': str(volume_ref.deleted_at), 'audit_period_ending': str(volume_ref.deleted_at), } LOG.debug("Send delete notification for " " " " <%(extra_info)s>", {'volume_id': volume_ref.id, 'project_id': volume_ref.project_id, 'extra_info': local_extra_info}) cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'delete.start', extra_usage_info=local_extra_info) cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'delete.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.exception(_LE("Delete volume notification failed: %s"), exc_msg, resource=volume_ref) snapshots = objects.SnapshotList.get_active_by_window(admin_context, begin, end) LOG.debug("Found %d snapshots", len(snapshots)) for snapshot_ref in snapshots: try: LOG.debug("Send notification for " " <%(extra_info)s>", {'snapshot_id': snapshot_ref.id, 'project_id': snapshot_ref.project_id, 'extra_info': extra_info}) cinder.volume.utils.notify_about_snapshot_usage(admin_context, snapshot_ref, 'exists', extra_info) except Exception as exc_msg: LOG.exception(_LE("Exists snapshot notification failed: %s"), exc_msg, resource=snapshot_ref) if (CONF.send_actions and snapshot_ref.created_at > begin and snapshot_ref.created_at < end): try: local_extra_info = { 'audit_period_beginning': str(snapshot_ref.created_at), 'audit_period_ending': str(snapshot_ref.created_at), } LOG.debug("Send create notification for " " " " <%(extra_info)s>", {'snapshot_id': snapshot_ref.id, 'project_id': snapshot_ref.project_id, 'extra_info': local_extra_info}) cinder.volume.utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'create.start', extra_usage_info=local_extra_info) cinder.volume.utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'create.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.exception(_LE("Create snapshot notification failed: %s"), exc_msg, resource=snapshot_ref) if (CONF.send_actions and snapshot_ref.deleted_at and snapshot_ref.deleted_at > begin and snapshot_ref.deleted_at < end): try: local_extra_info = { 'audit_period_beginning': str(snapshot_ref.deleted_at), 'audit_period_ending': str(snapshot_ref.deleted_at), } LOG.debug("Send delete notification for " " " " <%(extra_info)s>", {'snapshot_id': snapshot_ref.id, 'project_id': snapshot_ref.project_id, 'extra_info': local_extra_info}) cinder.volume.utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'delete.start', extra_usage_info=local_extra_info) cinder.volume.utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'delete.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.exception(_LE("Delete snapshot notification failed: %s"), exc_msg, resource=snapshot_ref) LOG.debug("Volume usage audit completed") cinder-8.0.0/cinder/cmd/api.py0000664000567000056710000000346512701406250017263 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Cinder OS API.""" import eventlet eventlet.monkey_patch() import logging as python_logging import sys from cinder import objects from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config from cinder import rpc from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) config.set_middleware_defaults() logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) rpc.init(CONF) launcher = service.process_launcher() server = service.WSGIService('osapi_volume') launcher.launch_service(server, workers=server.workers) launcher.wait() cinder-8.0.0/cinder/transfer/0000775000567000056710000000000012701406543017216 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/transfer/__init__.py0000664000567000056710000000166012701406250021325 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.transfer import ' elsewhere. from oslo_config import cfg from oslo_utils import importutils CONF = cfg.CONF API = importutils.import_class(CONF.transfer_api_class) cinder-8.0.0/cinder/transfer/api.py0000664000567000056710000002541412701406250020342 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to transferring ownership of volumes. """ import hashlib import hmac import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from cinder.db import base from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import quota from cinder.volume import api as volume_api from cinder.volume import utils as volume_utils volume_transfer_opts = [ cfg.IntOpt('volume_transfer_salt_length', default=8, help='The number of characters in the salt.'), cfg.IntOpt('volume_transfer_key_length', default=16, help='The number of characters in the ' 'autogenerated auth key.'), ] CONF = cfg.CONF CONF.register_opts(volume_transfer_opts) LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting volume transfers.""" def __init__(self, db_driver=None): self.volume_api = volume_api.API() super(API, self).__init__(db_driver) def get(self, context, transfer_id): rv = self.db.transfer_get(context, transfer_id) return dict(rv) def delete(self, context, transfer_id): """Make the RPC call to delete a volume transfer.""" volume_api.check_policy(context, 'delete_transfer') transfer = self.db.transfer_get(context, transfer_id) volume_ref = self.db.volume_get(context, transfer.volume_id) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.start") if volume_ref['status'] != 'awaiting-transfer': LOG.error(_LE("Volume in unexpected state")) self.db.transfer_destroy(context, transfer_id) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.end") def get_all(self, context, filters=None): filters = filters or {} volume_api.check_policy(context, 'get_all_transfers') if context.is_admin and 'all_tenants' in filters: transfers = self.db.transfer_get_all(context) else: transfers = self.db.transfer_get_all_by_project(context, context.project_id) return transfers def _get_random_string(self, length): """Get a random hex string of the specified length.""" rndstr = "" # Note that the string returned by this function must contain only # characters that the recipient can enter on their keyboard. The # function ssh224().hexdigit() achieves this by generating a hash # which will only contain hexidecimal digits. while len(rndstr) < length: rndstr += hashlib.sha224(os.urandom(255)).hexdigest() return rndstr[0:length] def _get_crypt_hash(self, salt, auth_key): """Generate a random hash based on the salt and the auth key.""" if not isinstance(salt, (six.binary_type, six.text_type)): salt = str(salt) if isinstance(salt, six.text_type): salt = salt.encode('utf-8') if not isinstance(auth_key, (six.binary_type, six.text_type)): auth_key = str(auth_key) if isinstance(auth_key, six.text_type): auth_key = auth_key.encode('utf-8') return hmac.new(salt, auth_key, hashlib.sha1).hexdigest() def create(self, context, volume_id, display_name): """Creates an entry in the transfers table.""" volume_api.check_policy(context, 'create_transfer') LOG.info(_LI("Generating transfer record for volume %s"), volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.InvalidVolume(reason=_("status must be available")) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.start") # The salt is just a short random string. salt = self._get_random_string(CONF.volume_transfer_salt_length) auth_key = self._get_random_string(CONF.volume_transfer_key_length) crypt_hash = self._get_crypt_hash(salt, auth_key) # TODO(ollie): Transfer expiry needs to be implemented. transfer_rec = {'volume_id': volume_id, 'display_name': display_name, 'salt': salt, 'crypt_hash': crypt_hash, 'expires_at': None} try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: LOG.error(_LE("Failed to create transfer record " "for %s"), volume_id) raise volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.end") return {'id': transfer['id'], 'volume_id': transfer['volume_id'], 'display_name': transfer['display_name'], 'auth_key': auth_key, 'created_at': transfer['created_at']} def accept(self, context, transfer_id, auth_key): """Accept a volume that has been offered for transfer.""" # We must use an elevated context to see the volume that is still # owned by the donor. volume_api.check_policy(context, 'accept_transfer') transfer = self.db.transfer_get(context.elevated(), transfer_id) crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key) if crypt_hash != transfer['crypt_hash']: msg = (_("Attempt to transfer %s with invalid auth key.") % transfer_id) LOG.error(msg) raise exception.InvalidAuthKey(reason=msg) volume_id = transfer['volume_id'] vol_ref = self.db.volume_get(context.elevated(), volume_id) if vol_ref['consistencygroup_id']: msg = _("Volume %s must not be part of a consistency " "group.") % vol_ref['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume_utils.notify_about_volume_usage(context, vol_ref, "transfer.accept.start") try: reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size} QUOTAS.add_volume_type_opts(context, reserve_opts, vol_ref.volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG volume (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 's_size': vol_ref['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeSizeExceedsAvailableQuota( requested=vol_ref['size'], consumed=_consumed(over), quota=quotas[over]) elif 'volumes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "volume (%(d_consumed)d volumes " "already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.VolumeLimitExceeded(allowed=quotas[over], name=over) try: donor_id = vol_ref['project_id'] reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size} QUOTAS.add_volume_type_opts(context, reserve_opts, vol_ref.volume_type_id) donor_reservations = QUOTAS.reserve(context.elevated(), project_id=donor_id, **reserve_opts) except Exception: donor_reservations = None LOG.exception(_LE("Failed to update quota donating volume" " transfer id %s"), transfer_id) try: # Transfer ownership of the volume now, must use an elevated # context. self.volume_api.accept_transfer(context, vol_ref, context.user_id, context.project_id) self.db.transfer_accept(context.elevated(), transfer_id, context.user_id, context.project_id) QUOTAS.commit(context, reservations) if donor_reservations: QUOTAS.commit(context, donor_reservations, project_id=donor_id) LOG.info(_LI("Volume %s has been transferred."), volume_id) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) if donor_reservations: QUOTAS.rollback(context, donor_reservations, project_id=donor_id) vol_ref = self.db.volume_get(context, volume_id) volume_utils.notify_about_volume_usage(context, vol_ref, "transfer.accept.end") return {'id': transfer_id, 'display_name': transfer['display_name'], 'volume_id': vol_ref['id']} cinder-8.0.0/cinder/manager.py0000664000567000056710000001454712701406250017364 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from cinder.db import base from cinder.i18n import _LI from cinder import rpc from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import version from eventlet import greenpool CONF = cfg.CONF LOG = logging.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' target = messaging.Target(version=RPC_API_VERSION) def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host self.additional_endpoints = [] super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. A hook point for services to execute tasks before the services are made available (i.e. showing up on RPC and starting to accept RPC calls) to other components. Child classes should override this method. """ pass def init_host_with_rpc(self): """A hook for service to do jobs after RPC is ready. Like init_host(), this method is a hook where services get a chance to execute tasks that *need* RPC. Child classes should override this method. """ pass def service_version(self): return version.version_string() def service_config(self): config = {} for key in CONF: config[key] = CONF.get(key, None) return config def is_working(self): """Method indicating if service is working correctly. This method is supposed to be overriden by subclasses and return if manager is working correctly. """ return True def reset(self): """Method executed when SIGHUP is caught by the process. We're utilizing it to reset RPC API version pins to avoid restart of the service when rolling upgrade is completed. """ LOG.info(_LI('Resetting cached RPC version pins.')) rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. Services that need to update the Scheduler of their capabilities should derive from this class. Otherwise they can derive from manager.Manager directly. Updates are only sent after update_service_capabilities is called with non-None values. """ def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._tp = greenpool.GreenPool() super(SchedulerDependentManager, self).__init__(host, db_driver) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities @periodic_task.periodic_task def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug('Notifying Schedulers of capabilities ...') self.scheduler_rpcapi.update_service_capabilities( context, self.service_name, self.host, self.last_capabilities) def _add_to_threadpool(self, func, *args, **kwargs): self._tp.spawn_n(func, *args, **kwargs) def reset(self): super(SchedulerDependentManager, self).reset() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() cinder-8.0.0/cinder/objects/0000775000567000056710000000000012701406543017023 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/objects/volume_attachment.py0000664000567000056710000000674112701406250023117 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_versionedobjects import fields from cinder import db from cinder import objects from cinder.objects import base LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': fields.StringField(nullable=True), 'attach_mode': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, attachment, db_attachment): for name, field in attachment.fields.items(): value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 attachment[name] = value attachment._context = context attachment.obj_reset_changes() return attachment @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() @base.CinderObjectRegistry.register class VolumeAttachmentList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('VolumeAttachment'), } child_versions = { '1.0': '1.0', } @base.remotable_classmethod def get_all_by_volume_id(cls, context, volume_id): attachments = db.volume_attachment_get_used_by_volume_id(context, volume_id) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @base.remotable_classmethod def get_all_by_host(cls, context, volume_id, host): attachments = db.volume_attachment_get_by_host(context, volume_id, host) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @base.remotable_classmethod def get_all_by_instance_uuid(cls, context, volume_id, instance_uuid): attachments = db.volume_attachment_get_by_instance_uuid( context, volume_id, instance_uuid) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) cinder-8.0.0/cinder/objects/volume_type.py0000664000567000056710000001121612701406250021741 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_versionedobjects import fields from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.volume import volume_types OPTIONAL_FIELDS = ['extra_specs', 'projects'] LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class VolumeType(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'is_public': fields.BooleanField(default=True, nullable=True), 'projects': fields.ListOfStringsField(nullable=True), 'extra_specs': fields.DictOfStringsField(nullable=True), } @classmethod def _get_expected_attrs(cls, context): return 'extra_specs', 'projects' @staticmethod def _from_db_object(context, type, db_type, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in type.fields.items(): if name in OPTIONAL_FIELDS: continue value = db_type[name] if isinstance(field, fields.IntegerField): value = value or 0 type[name] = value # Get data from db_type object that was queried by joined query # from DB if 'extra_specs' in expected_attrs: type.extra_specs = {} specs = db_type.get('extra_specs') if specs and isinstance(specs, list): type.extra_specs = {item['key']: item['value'] for item in specs} elif specs and isinstance(specs, dict): type.extra_specs = specs if 'projects' in expected_attrs: type.projects = db_type.get('projects', []) type._context = context type.obj_reset_changes() return type @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) db_volume_type = volume_types.create(self._context, self.name, self.extra_specs, self.is_public, self.projects, self.description) self._from_db_object(self._context, self, db_volume_type) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: volume_types.update(self._context, self.id, self.name, self.description) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): volume_types.destroy(self._context, self.id) @base.CinderObjectRegistry.register class VolumeTypeList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Add pagination support to volume type VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('VolumeType'), } child_versions = { '1.0': '1.0', '1.1': '1.0', } @base.remotable_classmethod def get_all(cls, context, inactive=0, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): types = volume_types.get_all_types(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) expected_attrs = VolumeType._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.VolumeType, types.values(), expected_attrs=expected_attrs) cinder-8.0.0/cinder/objects/__init__.py0000664000567000056710000000277612701406250021143 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Instance, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('cinder.objects.backup') __import__('cinder.objects.cgsnapshot') __import__('cinder.objects.consistencygroup') __import__('cinder.objects.service') __import__('cinder.objects.snapshot') __import__('cinder.objects.volume') __import__('cinder.objects.volume_attachment') __import__('cinder.objects.volume_type') cinder-8.0.0/cinder/objects/cgsnapshot.py0000664000567000056710000001357212701406250021551 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from oslo_versionedobjects import fields OPTIONAL_FIELDS = ['consistencygroup', 'snapshots'] @base.CinderObjectRegistry.register class CGSnapshot(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'consistencygroup_id': fields.UUIDField(nullable=True), 'project_id': fields.UUIDField(), 'user_id': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } @staticmethod def _from_db_object(context, cgsnapshot, db_cgsnapshots, expected_attrs=None): expected_attrs = expected_attrs or [] for name, field in cgsnapshot.fields.items(): if name in OPTIONAL_FIELDS: continue value = db_cgsnapshots.get(name) setattr(cgsnapshot, name, value) if 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_cgsnapshots[ 'consistencygroup']) cgsnapshot.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotsList(context), objects.Snapshots, db_cgsnapshots['snapshots']) cgsnapshot.snapshots = snapshots cgsnapshot._context = context cgsnapshot.obj_reset_changes() return cgsnapshot @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) db_cgsnapshots = db.cgsnapshot_create(self._context, updates) self._from_db_object(self._context, self, db_cgsnapshots) def obj_load_attr(self, attrname): if attrname not in OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'consistencygroup': self.consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) if attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_cgsnapshot( self._context, self.id) self.obj_reset_changes(fields=[attrname]) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) db.cgsnapshot_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.cgsnapshot_destroy(self._context, self.id) @base.CinderObjectRegistry.register class CGSnapshotList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('CGSnapshot') } child_version = { '1.0': '1.0' } @base.remotable_classmethod def get_all(cls, context, filters=None): cgsnapshots = db.cgsnapshot_get_all(context, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) @base.remotable_classmethod def get_all_by_project(cls, context, project_id, filters=None): cgsnapshots = db.cgsnapshot_get_all_by_project(context, project_id, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) @base.remotable_classmethod def get_all_by_group(cls, context, group_id, filters=None): cgsnapshots = db.cgsnapshot_get_all_by_group(context, group_id, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) cinder-8.0.0/cinder/objects/volume.py0000664000567000056710000005033512701406257020714 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base CONF = cfg.CONF LOG = logging.getLogger(__name__) class MetadataObject(dict): # This is a wrapper class that simulates SQLAlchemy (.*)Metadata objects to # maintain compatibility with older representations of Volume that some # drivers rely on. This is helpful in transition period while some driver # methods are invoked with volume versioned object and some SQLAlchemy # object or dict. def __init__(self, key=None, value=None): super(MetadataObject, self).__init__() self.key = key self.value = value def __getattr__(self, name): if name in self: return self[name] else: raise AttributeError("No such attribute: " + name) def __setattr__(self, name, value): self[name] = value @base.CinderObjectRegistry.register class Volume(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() VERSION = '1.3' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', 'snapshots') fields = { 'id': fields.UUIDField(), '_name_id': fields.UUIDField(nullable=True), 'ec2_id': fields.UUIDField(nullable=True), 'user_id': fields.UUIDField(nullable=True), 'project_id': fields.UUIDField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'attach_status': fields.StringField(nullable=True), 'migration_status': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'provider_id': fields.UUIDField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_auth': fields.StringField(nullable=True), 'provider_geometry': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), 'multiattach': fields.BooleanField(default=False, nullable=True), 'replication_status': fields.StringField(nullable=True), 'replication_extended_status': fields.StringField(nullable=True), 'replication_driver_data': fields.StringField(nullable=True), 'previous_status': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'admin_metadata': fields.DictOfStringsField(nullable=True), 'glance_metadata': fields.DictOfStringsField(nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_attachment': fields.ObjectField('VolumeAttachmentList', nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod def _get_expected_attrs(cls, context): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id # TODO(dulek): Three properties below are for compatibility with dict # representation of volume. The format there is different (list of # SQLAlchemy models) so we need a conversion. Anyway - these should be # removed when we stop this class from deriving from DictObjectCompat. @property def volume_metadata(self): md = [MetadataObject(k, v) for k, v in self.metadata.items()] return md @volume_metadata.setter def volume_metadata(self, value): md = {d['key']: d['value'] for d in value} self.metadata = md @property def volume_admin_metadata(self): md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] return md @volume_admin_metadata.setter def volume_admin_metadata(self, value): md = {d['key']: d['value'] for d in value} self.admin_metadata = md @property def volume_glance_metadata(self): md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] return md @volume_glance_metadata.setter def volume_glance_metadata(self, value): md = {d['key']: d['value'] for d in value} self.glance_metadata = md def __init__(self, *args, **kwargs): super(Volume, self).__init__(*args, **kwargs) self._orig_metadata = {} self._orig_admin_metadata = {} self._orig_glance_metadata = {} self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Volume, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) @classmethod def _obj_from_primitive(cls, context, objver, primitive): obj = super(Volume, Volume)._obj_from_primitive(context, objver, primitive) obj._reset_metadata_tracking() return obj def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) if fields is None or 'admin_metadata' in fields: self._orig_admin_metadata = (dict(self.admin_metadata) if 'admin_metadata' in self else {}) if fields is None or 'glance_metadata' in fields: self._orig_glance_metadata = (dict(self.glance_metadata) if 'glance_metadata' in self else {}) def obj_what_changed(self): changes = super(Volume, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if ('admin_metadata' in self and self.admin_metadata != self._orig_admin_metadata): changes.add('admin_metadata') if ('glance_metadata' in self and self.glance_metadata != self._orig_glance_metadata): changes.add('glance_metadata') return changes def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, volume, db_volume, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in volume.fields.items(): if name in Volume.OPTIONAL_FIELDS: continue value = db_volume.get(name) if isinstance(field, fields.IntegerField): value = value or 0 volume[name] = value # Get data from db_volume object that was queried by joined query # from DB if 'metadata' in expected_attrs: metadata = db_volume.get('volume_metadata', []) volume.metadata = {item['key']: item['value'] for item in metadata} if 'admin_metadata' in expected_attrs: metadata = db_volume.get('volume_admin_metadata', []) volume.admin_metadata = {item['key']: item['value'] for item in metadata} if 'glance_metadata' in expected_attrs: metadata = db_volume.get('volume_glance_metadata', []) volume.glance_metadata = {item['key']: item['value'] for item in metadata} if 'volume_type' in expected_attrs: db_volume_type = db_volume.get('volume_type') if db_volume_type: vt_expected_attrs = [] if 'volume_type.extra_specs' in expected_attrs: vt_expected_attrs.append('extra_specs') volume.volume_type = objects.VolumeType._from_db_object( context, objects.VolumeType(), db_volume_type, expected_attrs=vt_expected_attrs) if 'volume_attachment' in expected_attrs: attachments = base.obj_make_list( context, objects.VolumeAttachmentList(context), objects.VolumeAttachment, db_volume.get('volume_attachment')) volume.volume_attachment = attachments if 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_volume['consistencygroup']) volume.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotList(context), objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots volume._context = context volume.obj_reset_changes() return volume @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.volume_destroy(self._context, self.id) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'metadata': self.metadata = db.volume_metadata_get(self._context, self.id) elif attrname == 'admin_metadata': self.admin_metadata = {} if self._context.is_admin: self.admin_metadata = db.volume_admin_metadata_get( self._context, self.id) elif attrname == 'glance_metadata': try: # NOTE(dulek): We're using alias here to have conversion from # list to dict done there. self.volume_glance_metadata = db.volume_glance_metadata_get( self._context, self.id) except exception.GlanceMetadataNotFound: # NOTE(dulek): DB API raises when volume has no # glance_metadata. Silencing this because at this level no # metadata is a completely valid result. self.glance_metadata = {} elif attrname == 'volume_type': # If the volume doesn't have volume_type, VolumeType.get_by_id # would trigger a db call which raise VolumeTypeNotFound exception. self.volume_type = (objects.VolumeType.get_by_id( self._context, self.volume_type_id) if self.volume_type_id else None) elif attrname == 'volume_attachment': attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self._context, self.id) self.volume_attachment = attachments elif attrname == 'consistencygroup': consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) self.consistencygroup = consistencygroup elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, key): db.volume_metadata_delete(self._context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) def finish_volume_migration(self, dest_volume): # We swap fields between source (i.e. self) and destination at the # end of migration because we want to keep the original volume id # in the DB but now pointing to the migrated volume. skip = ({'id', 'provider_location', 'glance_metadata', 'volume_type_id', 'volume_type'} | set(self.obj_extra_fields)) for key in set(dest_volume.fields.keys()) - skip: # Only swap attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if not dest_volume.obj_attr_is_set(key): continue value = getattr(dest_volume, key) value_to_dst = getattr(self, key) # Destination must have a _name_id since the id no longer matches # the volume. If it doesn't have a _name_id we set one. if key == '_name_id': if not dest_volume._name_id: setattr(dest_volume, key, self.id) continue elif key == 'migration_status': value = None value_to_dst = 'deleting' elif key == 'display_description': value_to_dst = 'migration src for ' + self.id elif key == 'status': value_to_dst = 'deleting' setattr(self, key, value) setattr(dest_volume, key, value_to_dst) dest_volume.save() return dest_volume @base.CinderObjectRegistry.register class VolumeList(base.ObjectListBase, base.CinderObject): VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Volume'), } child_versions = { '1.0': '1.0', '1.1': '1.1', } @classmethod def _get_expected_attrs(cls, context): expected_attrs = ['metadata', 'volume_type'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @base.remotable_classmethod def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): volumes = db.volume_get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_by_host(cls, context, host, filters=None): volumes = db.volume_get_all_by_host(context, host, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_by_group(cls, context, group_id, filters=None): volumes = db.volume_get_all_by_group(context, group_id, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_by_project(cls, context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): volumes = db.volume_get_all_by_project(context, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) cinder-8.0.0/cinder/objects/base.py0000664000567000056710000004321712701406250020311 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder common internal object model""" import contextlib import datetime from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import base from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects LOG = logging.getLogger('object') remotable = base.remotable remotable_classmethod = base.remotable_classmethod obj_make_list = base.obj_make_list class CinderObjectVersionsHistory(dict): """Helper class that maintains objects version history. Current state of object versions is aggregated in a single version number that explicitily identifies a set of object versions. That way a service is able to report what objects it supports using a single string and all the newer services will know exactly what that mean for a single object. """ def __init__(self): super(CinderObjectVersionsHistory, self).__init__() # NOTE(dulek): This is our pre-history and a starting point - Liberty. # We want Mitaka to be able to talk to Liberty services, so we need to # handle backporting to these objects versions (although I don't expect # we've made a lot of incompatible changes inside the objects). # # If an object doesn't exist in Liberty, RPC API compatibility layer # shouldn't send it or convert it to a dictionary. # # Please note that we do not need to add similar entires for each # release. Liberty is here just for historical reasons. self.versions = ['liberty'] self['liberty'] = { 'Backup': '1.1', 'BackupImport': '1.1', 'BackupList': '1.0', 'ConsistencyGroup': '1.1', 'ConsistencyGroupList': '1.0', 'Service': '1.0', 'ServiceList': '1.0', 'Snapshot': '1.0', 'SnapshotList': '1.0', 'Volume': '1.1', 'VolumeAttachment': '1.0', 'VolumeAttachmentList': '1.0', 'VolumeList': '1.1', 'VolumeType': '1.0', 'VolumeTypeList': '1.0', } def get_current(self): return self.versions[-1] def get_current_versions(self): return self[self.get_current()] def add(self, ver, updates): self[ver] = self[self.get_current()].copy() self.versions.append(ver) self[ver].update(updates) OBJ_VERSIONS = CinderObjectVersionsHistory() # NOTE(dulek): You should add a new version here each time you bump a version # of any object. As a second parameter you need to specify only what changed. # # When dropping backward compatibility with an OpenStack release we can rework # this and remove some history while keeping the versions order. OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3', 'CGSnapshot': '1.0', 'CGSnapshotList': '1.0', 'ConsistencyGroup': '1.2', 'ConsistencyGroupList': '1.1', 'Service': '1.1', 'Volume': '1.3', 'VolumeTypeList': '1.1'}) OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'}) OBJ_VERSIONS.add('1.3', {'Service': '1.3'}) class CinderObjectRegistry(base.VersionedObjectRegistry): def registration_hook(self, cls, index): setattr(objects, cls.obj_name(), cls) # For Versioned Object Classes that have a model store the model in # a Class attribute named model try: cls.model = db.get_model_for_versioned_object(cls) except (ImportError, AttributeError): pass class CinderObject(base.VersionedObject): # NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova, # cinder, and other objects can exist on the same bus and be distinguished # from one another. OBJ_PROJECT_NAMESPACE = 'cinder' # NOTE(thangp): As more objects are added to cinder, each object should # have a custom map of version compatibility. This just anchors the base # version compatibility. VERSION_COMPATIBILITY = {'7.0.0': '1.0'} Not = db.Not Case = db.Case def cinder_obj_get_changes(self): """Returns a dict of changed fields with tz unaware datetimes. Any timezone aware datetime field will be converted to UTC timezone and returned as timezone unaware datetime. This will allow us to pass these fields directly to a db update method as they can't have timezone information. """ # Get dirtied/changed fields changes = self.obj_get_changes() # Look for datetime objects that contain timezone information for k, v in changes.items(): if isinstance(v, datetime.datetime) and v.tzinfo: # Remove timezone information and adjust the time according to # the timezone information's offset. changes[k] = v.replace(tzinfo=None) - v.utcoffset() # Return modified dict return changes @classmethod def _get_expected_attrs(cls, context): return None @base.remotable_classmethod def get_by_id(cls, context, id, *args, **kwargs): # To get by id we need to have a model and for the model to # have an id field if 'id' not in cls.fields: msg = (_('VersionedObject %s cannot retrieve object by id.') % (cls.obj_name())) raise NotImplementedError(msg) model = db.get_model_for_versioned_object(cls) orm_obj = db.get_by_id(context, model, id, *args, **kwargs) expected_attrs = cls._get_expected_attrs(context) kargs = {} if expected_attrs: kargs = {'expected_attrs': expected_attrs} return cls._from_db_object(context, cls(context), orm_obj, **kargs) def conditional_update(self, values, expected_values=None, filters=(), save_all=False, session=None, reflect_changes=True): """Compare-and-swap update. A conditional object update that, unlike normal update, will SAVE the contents of the update to the DB. Update will only occur in the DB and the object if conditions are met. If no expected_values are passed in we will default to make sure that all fields have not been changed in the DB. Since we cannot know the original value in the DB for dirty fields in the object those will be excluded. We have 4 different condition types we can use in expected_values: - Equality: {'status': 'available'} - Inequality: {'status': vol_obj.Not('deleting')} - In range: {'status': ['available', 'error'] - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) Method accepts additional filters, which are basically anything that can be passed to a sqlalchemy query's filter method, for example: [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] We can select values based on conditions using Case objects in the 'values' argument. For example: has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') volume.conditional_update({'status': case_values}, {'status': 'available'})) And we can use DB fields using model class attribute for example to store previous status in the corresponding field even though we don't know which value is in the db from those we allowed: volume.conditional_update({'status': 'deleting', 'previous_status': volume.model.status}, {'status': ('available', 'error')}) :param values: Dictionary of key-values to update in the DB. :param expected_values: Dictionary of conditions that must be met for the update to be executed. :param filters: Iterable with additional filters :param save_all: Object may have changes that are not in the DB, this will say whether we want those changes saved as well. :param session: Session to use for the update :param reflect_changes: If we want changes made in the database to be reflected in the versioned object. This may mean in some cases that we have to reload the object from the database. :returns number of db rows that were updated, which can be used as a boolean, since it will be 0 if we couldn't update the DB and 1 if we could, because we are using unique index id. """ if 'id' not in self.fields: msg = (_('VersionedObject %s does not support conditional update.') % (self.obj_name())) raise NotImplementedError(msg) # If no conditions are set we will require object in DB to be unchanged if expected_values is None: changes = self.obj_what_changed() expected = {key: getattr(self, key) for key in self.fields.keys() if self.obj_attr_is_set(key) and key not in changes and key not in self.OPTIONAL_FIELDS} else: # Set the id in expected_values to limit conditional update to only # change this object expected = expected_values.copy() expected['id'] = self.id # If we want to save any additional changes the object has besides the # ones referred in values if save_all: changes = self.cinder_obj_get_changes() changes.update(values) values = changes result = db.conditional_update(self._context, self.model, values, expected, filters) # If we were able to update the DB then we need to update this object # as well to reflect new DB contents and clear the object's dirty flags # for those fields. if result and reflect_changes: # If we have used a Case, a db field or an expression in values we # don't know which value was used, so we need to read the object # back from the DB if any(isinstance(v, self.Case) or db.is_orm_value(v) for v in values.values()): # Read back object from DB obj = type(self).get_by_id(self._context, self.id) db_values = obj.obj_to_primitive()['versioned_object.data'] # Only update fields were changes were requested values = {field: db_values[field] for field, value in values.items()} # NOTE(geguileo): We don't use update method because our objects # will eventually move away from VersionedObjectDictCompat for key, value in values.items(): setattr(self, key, value) self.obj_reset_changes(values.keys()) return result def refresh(self): # To refresh we need to have a model and for the model to have an id # field if 'id' not in self.fields: msg = (_('VersionedObject %s cannot retrieve object by id.') % (self.obj_name())) raise NotImplementedError(msg) current = self.get_by_id(self._context, self.id) for field in self.fields: # Only update attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if self.obj_attr_is_set(field): if self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def __contains__(self, name): # We're using obj_extra_fields to provide aliases for some fields while # in transition period. This override is to make these aliases pass # "'foo' in obj" tests. return name in self.obj_extra_fields or super(CinderObject, self).__contains__(name) class CinderObjectDictCompat(base.VersionedObjectDictCompat): """Mix-in to provide dictionary key access compat. If an object needs to support attribute access using dictionary items instead of object attributes, inherit from this class. This should only be used as a temporary measure until all callers are converted to use modern attribute access. NOTE(berrange) This class will eventually be deleted. """ def get(self, key, value=base._NotSpecifiedSentinel): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ if key not in self.obj_fields: # NOTE(jdg): There are a number of places where we rely on the # old dictionary version and do a get(xxx, None). # The following preserves that compatibility but in # the future we'll remove this shim altogether so don't # rely on it. LOG.debug('Cinder object %(object_name)s has no ' 'attribute named: %(attribute_name)s', {'object_name': self.__class__.__name__, 'attribute_name': key}) return None if (value != base._NotSpecifiedSentinel and key not in self.obj_extra_fields and not self.obj_attr_is_set(key)): return value else: try: return getattr(self, key) except (exception.ObjectActionError, NotImplementedError): # Exception when haven't set a value for non-lazy # loadable attribute, but to mimic typical dict 'get' # behavior we should still return None return None class CinderPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ fields = { 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), } @contextlib.contextmanager def obj_as_admin(self): """Context manager to make an object call as an admin. This temporarily modifies the context embedded in an object to be elevated() and restores it after the call completes. Example usage: with obj.obj_as_admin(): obj.save() """ if self._context is None: raise exception.OrphanedObjectError(method='obj_as_admin', objtype=self.obj_name()) original_context = self._context self._context = self._context.elevated() try: yield finally: self._context = original_context class CinderComparableObject(base.ComparableVersionedObject): def __eq__(self, obj): if hasattr(obj, 'obj_to_primitive'): return self.obj_to_primitive() == obj.obj_to_primitive() return False class ObjectListBase(base.ObjectListBase): pass class CinderObjectSerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = CinderObject def __init__(self, version_cap=None): super(CinderObjectSerializer, self).__init__() self.version_cap = version_cap def _get_capped_obj_version(self, obj): objname = obj.obj_name() version_dict = OBJ_VERSIONS.get(self.version_cap, {}) version_cap = version_dict.get(objname, None) if version_cap: cap_tuple = versionutils.convert_version_to_tuple(version_cap) obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION) if cap_tuple > obj_tuple: # NOTE(dulek): Do not set version cap to be higher than actual # object version as we don't support "forwardporting" of # objects. If service will receive an object that's too old it # should handle it explicitly. version_cap = None return version_cap def serialize_entity(self, context, entity): if isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.serialize_entity, entity) elif (hasattr(entity, 'obj_to_primitive') and callable(entity.obj_to_primitive)): # NOTE(dulek): Backport outgoing object to the capped version. backport_ver = self._get_capped_obj_version(entity) entity = entity.obj_to_primitive(backport_ver) return entity cinder-8.0.0/cinder/objects/snapshot.py0000664000567000056710000002617012701406250021235 0ustar jenkinsjenkins00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base CONF = cfg.CONF LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class Snapshot(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They # are typically the relationship in the sqlalchemy object. OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot') fields = { 'id': fields.UUIDField(), 'user_id': fields.UUIDField(nullable=True), 'project_id': fields.UUIDField(nullable=True), 'volume_id': fields.UUIDField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'status': fields.StringField(nullable=True), 'progress': fields.StringField(nullable=True), 'volume_size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_id': fields.UUIDField(nullable=True), 'metadata': fields.DictOfStringsField(), 'provider_auth': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=True), 'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True), } @classmethod def _get_expected_attrs(cls, context): return 'metadata', # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'volume_name'] @property def name(self): return CONF.snapshot_name_template % self.id @property def volume_name(self): return self.volume.name def __init__(self, *args, **kwargs): super(Snapshot, self).__init__(*args, **kwargs) self._orig_metadata = {} self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Snapshot, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if self.obj_attr_is_set('metadata') else {}) def obj_what_changed(self): changes = super(Snapshot, self).obj_what_changed() if hasattr(self, 'metadata') and self.metadata != self._orig_metadata: changes.add('metadata') return changes def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Snapshot, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, snapshot, db_snapshot, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in snapshot.fields.items(): if name in Snapshot.OPTIONAL_FIELDS: continue value = db_snapshot.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 setattr(snapshot, name, value) if 'volume' in expected_attrs: volume = objects.Volume(context) volume._from_db_object(context, volume, db_snapshot['volume']) snapshot.volume = volume if 'cgsnapshot' in expected_attrs: cgsnapshot = objects.CGSnapshot(context) cgsnapshot._from_db_object(context, cgsnapshot, db_snapshot['cgsnapshot']) snapshot.cgsnapshot = cgsnapshot if 'metadata' in expected_attrs: metadata = db_snapshot.get('snapshot_metadata') if metadata is None: raise exception.MetadataAbsent() snapshot.metadata = {item['key']: item['value'] for item in metadata} snapshot._context = context snapshot.obj_reset_changes() return snapshot @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'volume' in updates: raise exception.ObjectActionError(action='create', reason=_('volume assigned')) if 'cgsnapshot' in updates: raise exception.ObjectActionError(action='create', reason=_('cgsnapshot assigned')) db_snapshot = db.snapshot_create(self._context, updates) self._from_db_object(self._context, self, db_snapshot) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) if 'cgsnapshot' in updates: raise exception.ObjectActionError( action='save', reason=_('cgsnapshot changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.snapshot_metadata_update(self._context, self.id, metadata, True) db.snapshot_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): db.snapshot_destroy(self._context, self.id) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': self.volume = objects.Volume.get_by_id(self._context, self.volume_id) if attrname == 'cgsnapshot': self.cgsnapshot = objects.CGSnapshot.get_by_id(self._context, self.cgsnapshot_id) self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, context, key): db.snapshot_metadata_delete(context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) @base.remotable_classmethod def snapshot_data_get_for_project(cls, context, project_id, volume_type_id=None): return db.snapshot_data_get_for_project(context, project_id, volume_type_id) @base.CinderObjectRegistry.register class SnapshotList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('Snapshot'), } child_versions = { '1.0': '1.0' } @base.remotable_classmethod def get_all(cls, context, search_opts, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): snapshots = db.snapshot_get_all(context, search_opts, marker, limit, sort_keys, sort_dirs, offset) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @base.remotable_classmethod def get_by_host(cls, context, host, filters=None): snapshots = db.snapshot_get_by_host(context, host, filters) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_by_project(cls, context, project_id, search_opts, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): snapshots = db.snapshot_get_all_by_project( context, project_id, search_opts, marker, limit, sort_keys, sort_dirs, offset) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_for_volume(cls, context, volume_id): snapshots = db.snapshot_get_all_for_volume(context, volume_id) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @base.remotable_classmethod def get_active_by_window(cls, context, begin, end): snapshots = db.snapshot_get_active_by_window(context, begin, end) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @base.remotable_classmethod def get_all_for_cgsnapshot(cls, context, cgsnapshot_id): snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) cinder-8.0.0/cinder/objects/service.py0000664000567000056710000001466612701406250021045 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class Service(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Add rpc_current_version and object_current_version fields # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version() # Version 1.3: Add replication fields VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(default=0), 'disabled': fields.BooleanField(default=False, nullable=True), 'availability_zone': fields.StringField(nullable=True, default='cinder'), 'disabled_reason': fields.StringField(nullable=True), 'modified_at': fields.DateTimeField(nullable=True), 'rpc_current_version': fields.StringField(nullable=True), 'object_current_version': fields.StringField(nullable=True), # Replication properties 'replication_status': c_fields.ReplicationStatusField(nullable=True), 'frozen': fields.BooleanField(default=False), 'active_backend_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, service, db_service): for name, field in service.fields.items(): value = db_service.get(name) if isinstance(field, fields.IntegerField): value = value or 0 elif isinstance(field, fields.DateTimeField): value = value or None service[name] = value service._context = context service.obj_reset_changes() return service @base.remotable_classmethod def get_by_host_and_topic(cls, context, host, topic): db_service = db.service_get_by_host_and_topic(context, host, topic) return cls._from_db_object(context, cls(context), db_service) @base.remotable_classmethod def get_by_args(cls, context, host, binary_key): db_service = db.service_get_by_args(context, host, binary_key) return cls._from_db_object(context, cls(context), db_service) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() db_service = db.service_create(self._context, updates) self._from_db_object(self._context, self, db_service) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: db.service_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.service_destroy(self._context, self.id) @classmethod def _get_minimum_version(cls, attribute, context, binary): services = ServiceList.get_all_by_binary(context, binary) min_ver = None min_ver_str = None for s in services: ver_str = getattr(s, attribute) if ver_str is None: # FIXME(dulek) None in *_current_version means that this # service is in Liberty version, so we must assume this is the # lowest one. We use handy and easy to remember token to # indicate that. This may go away as soon as we drop # compatibility with Liberty, possibly in early N. return 'liberty' ver = versionutils.convert_version_to_int(ver_str) if min_ver is None or ver < min_ver: min_ver = ver min_ver_str = ver_str return min_ver_str @base.remotable_classmethod def get_minimum_rpc_version(cls, context, binary): return cls._get_minimum_version('rpc_current_version', context, binary) @base.remotable_classmethod def get_minimum_obj_version(cls, context, binary): return cls._get_minimum_version('object_current_version', context, binary) @base.CinderObjectRegistry.register class ServiceList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Service object 1.2 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Service'), } child_versions = { '1.0': '1.0', '1.1': '1.2', } @base.remotable_classmethod def get_all(cls, context, filters=None): services = db.service_get_all(context, filters) return base.obj_make_list(context, cls(context), objects.Service, services) @base.remotable_classmethod def get_all_by_topic(cls, context, topic, disabled=None): services = db.service_get_all_by_topic(context, topic, disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) @base.remotable_classmethod def get_all_by_binary(cls, context, binary, disabled=None): services = db.service_get_all_by_binary(context, binary, disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) cinder-8.0.0/cinder/objects/backup.py0000664000567000056710000002013412701406250020635 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_utils import versionutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields CONF = cfg.CONF LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class Backup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add new field num_dependent_backups and extra fields # is_incremental and has_dependent_backups. # Version 1.2: Add new field snapshot_id and data_timestamp. # Version 1.3: Changed 'status' field to use BackupStatusField # Version 1.4: Add restore_volume_id VERSION = '1.4' fields = { 'id': fields.UUIDField(), 'user_id': fields.UUIDField(), 'project_id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), 'status': c_fields.BackupStatusField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), # NOTE(dulek): Metadata field is used to store any strings by backup # drivers, that's why it can't be DictOfStringsField. 'service_metadata': fields.StringField(nullable=True), 'service': fields.StringField(nullable=True), 'object_count': fields.IntegerField(nullable=True), 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), 'num_dependent_backups': fields.IntegerField(nullable=True), 'snapshot_id': fields.StringField(nullable=True), 'data_timestamp': fields.DateTimeField(nullable=True), 'restore_volume_id': fields.StringField(nullable=True), } obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] @property def name(self): return CONF.backup_name_template % self.id @property def is_incremental(self): return bool(self.parent_id) @property def has_dependent_backups(self): return bool(self.num_dependent_backups) def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Backup, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, backup, db_backup): for name, field in backup.fields.items(): value = db_backup.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 backup[name] = value backup._context = context backup.obj_reset_changes() return backup @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: db.backup_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.backup_destroy(self._context, self.id) @staticmethod def decode_record(backup_url): """Deserialize backup metadata from string into a dictionary. :raises: InvalidInput """ try: return jsonutils.loads(base64.decode_as_text(backup_url)) except TypeError: msg = _("Can't decode backup record.") except ValueError: msg = _("Can't parse backup record.") raise exception.InvalidInput(reason=msg) @base.remotable def encode_record(self, **kwargs): """Serialize backup object, with optional extra info, into a string.""" # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = {name: field.to_primitive(self, name, getattr(self, name)) for name, field in self.fields.items()} # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) retval = jsonutils.dump_as_bytes(kwargs) return base64.encode_as_text(retval) @base.CinderObjectRegistry.register class BackupList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('Backup'), } child_versions = { '1.0': '1.0' } @base.remotable_classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): backups = db.backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) return base.obj_make_list(context, cls(context), objects.Backup, backups) @base.remotable_classmethod def get_all_by_host(cls, context, host): backups = db.backup_get_all_by_host(context, host) return base.obj_make_list(context, cls(context), objects.Backup, backups) @base.remotable_classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): backups = db.backup_get_all_by_project(context, project_id, filters, marker, limit, offset, sort_keys, sort_dirs) return base.obj_make_list(context, cls(context), objects.Backup, backups) @base.remotable_classmethod def get_all_by_volume(cls, context, volume_id, filters=None): backups = db.backup_get_all_by_volume(context, volume_id, filters) return base.obj_make_list(context, cls(context), objects.Backup, backups) @base.CinderObjectRegistry.register class BackupImport(Backup): """Special object for Backup Imports. This class should not be used for anything but Backup creation when importing backups to the DB. On creation it allows to specify the ID for the backup, since it's the reference used in parent_id it is imperative that this is preserved. Backup Import objects get promoted to standard Backups when the import is completed. """ @base.remotable def create(self): updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) cinder-8.0.0/cinder/objects/consistencygroup.py0000664000567000056710000001541512701406250023014 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yahoo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields from oslo_versionedobjects import fields OPTIONAL_FIELDS = ['cgsnapshots', 'volumes'] @base.CinderObjectRegistry.register class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added cgsnapshots and volumes relationships # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField VERSION = '1.2' fields = { 'id': fields.UUIDField(), 'user_id': fields.UUIDField(), 'project_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'status': c_fields.ConsistencyGroupStatusField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'source_cgid': fields.UUIDField(nullable=True), 'cgsnapshots': fields.ObjectField('CGSnapshotList', nullable=True), 'volumes': fields.ObjectField('VolumeList', nullable=True), } @staticmethod def _from_db_object(context, consistencygroup, db_consistencygroup, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in consistencygroup.fields.items(): if name in OPTIONAL_FIELDS: continue value = db_consistencygroup.get(name) setattr(consistencygroup, name, value) if 'cgsnapshots' in expected_attrs: cgsnapshots = base.obj_make_list( context, objects.CGSnapshotsList(context), objects.CGSnapshot, db_consistencygroup['cgsnapshots']) consistencygroup.cgsnapshots = cgsnapshots if 'volumes' in expected_attrs: volumes = base.obj_make_list( context, objects.VolumeList(context), objects.Volume, db_consistencygroup['volumes']) consistencygroup.cgsnapshots = volumes consistencygroup._context = context consistencygroup.obj_reset_changes() return consistencygroup @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'cgsnapshots' in updates: raise exception.ObjectActionError(action='create', reason=_('cgsnapshots assigned')) if 'volumes' in updates: raise exception.ObjectActionError(action='create', reason=_('volumes assigned')) db_consistencygroups = db.consistencygroup_create(self._context, updates) self._from_db_object(self._context, self, db_consistencygroups) def obj_load_attr(self, attrname): if attrname not in OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'cgsnapshots': self.cgsnapshots = objects.CGSnapshotList.get_all_by_group( self._context, self.id) if attrname == 'volumes': self.volumes = objects.VolumeList.get_all_by_group(self._context, self.id) self.obj_reset_changes(fields=[attrname]) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: if 'cgsnapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('cgsnapshots changed')) if 'volumes' in updates: raise exception.ObjectActionError( action='save', reason=_('volumes changed')) db.consistencygroup_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.consistencygroup_destroy(self._context, self.id) @base.CinderObjectRegistry.register class ConsistencyGroupList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Add pagination support to consistency group VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('ConsistencyGroup') } child_version = { '1.0': '1.0', '1.1': '1.1', } @base.remotable_classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): consistencygroups = db.consistencygroup_get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.ConsistencyGroup, consistencygroups) @base.remotable_classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): consistencygroups = db.consistencygroup_get_all_by_project( context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.ConsistencyGroup, consistencygroups) cinder-8.0.0/cinder/objects/fields.py0000664000567000056710000000455012701406250020642 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom fields for Cinder objects.""" from oslo_versionedobjects import fields BaseEnumField = fields.BaseEnumField Enum = fields.Enum Field = fields.Field FieldType = fields.FieldType class BackupStatus(Enum): ERROR = 'error' ERROR_DELETING = 'error_deleting' CREATING = 'creating' AVAILABLE = 'available' DELETING = 'deleting' DELETED = 'deleted' RESTORING = 'restoring' ALL = (ERROR, ERROR_DELETING, CREATING, AVAILABLE, DELETING, DELETED, RESTORING) def __init__(self): super(BackupStatus, self).__init__(valid_values=BackupStatus.ALL) class BackupStatusField(BaseEnumField): AUTO_TYPE = BackupStatus() class ConsistencyGroupStatus(Enum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' UPDATING = 'updating' IN_USE = 'in-use' ERROR_DELETING = 'error_deleting' ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, IN_USE, ERROR_DELETING) def __init__(self): super(ConsistencyGroupStatus, self).__init__( valid_values=ConsistencyGroupStatus.ALL) class ConsistencyGroupStatusField(BaseEnumField): AUTO_TYPE = ConsistencyGroupStatus() class ReplicationStatus(Enum): ERROR = 'error' ENABLED = 'enabled' DISABLED = 'disabled' NOT_CAPABLE = 'not-capable' FAILING_OVER = 'failing-over' FAILOVER_ERROR = 'failover-error' FAILED_OVER = 'failed-over' ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER, FAILED_OVER) def __init__(self): super(ReplicationStatus, self).__init__( valid_values=ReplicationStatus.ALL) class ReplicationStatusField(BaseEnumField): AUTO_TYPE = ReplicationStatus() cinder-8.0.0/cinder/zonemanager/0000775000567000056710000000000012701406543017700 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/zonemanager/utils.py0000664000567000056710000001015212701406250021404 0ustar jenkinsjenkins00000000000000# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Utility functions related to the Zone Manager. """ from oslo_log import log from cinder.i18n import _LI, _LW from cinder.volume import configuration from cinder.volume import manager from cinder.zonemanager import fc_san_lookup_service from cinder.zonemanager import fc_zone_manager LOG = log.getLogger(__name__) def create_zone_manager(): """If zoning is enabled, build the Zone Manager.""" config = configuration.Configuration(manager.volume_manager_opts) LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Zone Manager enabled.") zm = fc_zone_manager.ZoneManager() LOG.info(_LI("Using FC Zone Manager %(zm_version)s," " Driver %(drv_name)s %(drv_version)s."), {'zm_version': zm.get_version(), 'drv_name': zm.driver.__class__.__name__, 'drv_version': zm.driver.get_version()}) return zm else: LOG.debug("FC Zone Manager not enabled in cinder.conf.") return None def create_lookup_service(): config = configuration.Configuration(manager.volume_manager_opts) LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Lookup Service enabled.") lookup = fc_san_lookup_service.FCSanLookupService() LOG.info(_LI("Using FC lookup service %s."), lookup.lookup_service) return lookup else: LOG.debug("FC Lookup Service not enabled in cinder.conf.") return None def get_formatted_wwn(wwn_str): """Utility API that formats WWN to insert ':'.""" if (len(wwn_str) != 16): return wwn_str.lower() else: return (':'.join([wwn_str[i:i + 2] for i in range(0, len(wwn_str), 2)])).lower() def AddFCZone(initialize_connection): """Decorator to add a FC Zone.""" def decorator(self, *args, **kwargs): conn_info = initialize_connection(self, *args, **kwargs) if not conn_info: LOG.warning(_LW("Driver didn't return connection info, " "can't add zone.")) return None vol_type = conn_info.get('driver_volume_type', None) if vol_type == 'fibre_channel': if 'initiator_target_map' in conn_info['data']: zm = create_zone_manager() if zm: LOG.debug("AddFCZone connection info: %(conninfo)s.", {'conninfo': conn_info}) zm.add_connection(conn_info) return conn_info return decorator def RemoveFCZone(terminate_connection): """Decorator for FC drivers to remove zone.""" def decorator(self, *args, **kwargs): conn_info = terminate_connection(self, *args, **kwargs) if not conn_info: LOG.warning(_LW("Driver didn't return connection info from " "terminate_connection call.")) return None vol_type = conn_info.get('driver_volume_type', None) if vol_type == 'fibre_channel': if 'initiator_target_map' in conn_info['data']: zm = create_zone_manager() if zm: LOG.debug("RemoveFCZone connection info: %(conninfo)s.", {'conninfo': conn_info}) zm.delete_connection(conn_info) return conn_info return decorator cinder-8.0.0/cinder/zonemanager/__init__.py0000664000567000056710000000164512701406250022012 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ :mod:`cinder.zonemanager` -- FC Zone manager ===================================================== .. automodule:: cinder.zonemanager :platform: Unix :synopsis: Module containing all the FC Zone Manager classes """ cinder-8.0.0/cinder/zonemanager/fc_common.py0000664000567000056710000000160612701406250022210 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # class FCCommon(object): """Common interface for FC operations.""" VERSION = "1.0" def __init__(self, **kwargs): pass def get_version(self): return self.VERSION cinder-8.0.0/cinder/zonemanager/drivers/0000775000567000056710000000000012701406543021356 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/zonemanager/drivers/driver_utils.py0000664000567000056710000000600212701406250024434 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_log import log from cinder.i18n import _LI LOG = log.getLogger(__name__) def get_friendly_zone_name(zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, supported_chars): """Utility function implementation of _get_friendly_zone_name. Get friendly zone name is used to form the zone name based on the details provided by the caller :param zoning_policy - determines the zoning policy is either initiator-target or initiator :param initiator - initiator WWN :param target - target WWN :param host_name - Host name returned from Volume Driver :param storage_system - Storage name returned from Volume Driver :param zone_name_prefix - user defined zone prefix configured in cinder.conf :param supported_chars - Supported character set of FC switch vendor. Example: 'abc123_-$'. These are defined in the FC zone drivers. """ if host_name is None: host_name = '' if storage_system is None: storage_system = '' if zoning_policy == 'initiator-target': host_name = host_name[:14] storage_system = storage_system[:14] if len(host_name) > 0 and len(storage_system) > 0: zone_name = (host_name + "_" + initiator.replace(':', '') + "_" + storage_system + "_" + target.replace(':', '')) else: zone_name = (zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) LOG.info(_LI("Zone name created using prefix because either " "host name or storage system is none.")) else: host_name = host_name[:47] if len(host_name) > 0: zone_name = (host_name + "_" + initiator.replace(':', '')) else: zone_name = (zone_name_prefix + initiator.replace(':', '')) LOG.info(_LI("Zone name created using prefix because host " "name is none.")) LOG.info(_LI("Friendly zone name after forming: %(zonename)s"), {'zonename': zone_name}) zone_name = re.sub('[^%s]' % supported_chars, '', zone_name) return zone_name cinder-8.0.0/cinder/zonemanager/drivers/fc_zone_driver.py0000664000567000056710000000667612701406250024740 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base Zone Driver is responsible to manage access control using FC zoning Vendor specific implementations should extend this class to provide concrete implementation for add_connection and delete_connection interfaces. **Related Flags** :zoning_policy: Used by: class: 'FCZoneDriver'. Defaults to 'none' :zone_driver: Used by: class: 'FCZoneDriver'. Defaults to 'none' """ from oslo_log import log as logging from cinder.zonemanager import fc_common LOG = logging.getLogger(__name__) class FCZoneDriver(fc_common.FCCommon): """Interface to manage Connection control during attach/detach.""" def __init__(self, **kwargs): super(FCZoneDriver, self).__init__(**kwargs) LOG.debug("Initializing FCZoneDriver") def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Add connection control. Abstract method to add connection control. All implementing drivers should provide concrete implementation for this API. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets Example initiator_target_map: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } Note that WWPN can be in lower or upper case and can be ':' separated strings """ raise NotImplementedError() def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Delete connection control. Abstract method to remove connection control. All implementing drivers should provide concrete implementation for this API. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets Example initiator_target_map: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } Note that WWPN can be in lower or upper case and can be ':' separated strings """ raise NotImplementedError() def get_san_context(self, target_wwn_list): """Get SAN context for end devices. Abstract method to get SAN contexts for given list of end devices All implementing drivers should provide concrete implementation for this API. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets Example initiator_target_map: ['20240002ac000a50', '20240002ac000a40'] Note that WWPN can be in lower or upper case and can be ':' separated strings """ raise NotImplementedError() cinder-8.0.0/cinder/zonemanager/drivers/cisco/0000775000567000056710000000000012701406543022456 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/zonemanager/drivers/cisco/fc_zone_constants.py0000664000567000056710000000176312701406250026551 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by Cisco FC Zone Driver. """ ACTIVE_ZONE_CONFIG = 'active_zone_config' CFG_ZONESET = 'zoneset' CFG_ZONE = 'zone' CFG_ZONE_MEMBER = 'pwwn' CFG_ZONES = 'zones' """ CLI Commands for FC zoning operations. """ GET_ACTIVE_ZONE_CFG = 'show zoneset active vsan ' FCNS_SHOW = 'show fcns database vsan ' GET_ZONE_STATUS = 'show zone status vsan ' cinder-8.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py0000664000567000056710000003464712701406250030563 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant from cinder.zonemanager import fc_san_lookup_service as fc_service from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) class CiscoFCSanLookupService(fc_service.FCSanLookupService): """The SAN lookup service that talks to Cisco switches. Version History: 1.0.0 - Initial version """ VERSION = "1.0.0" def __init__(self, **kwargs): """Initializing the client.""" super(CiscoFCSanLookupService, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) self.create_configuration() self.switch_user = "" self.switch_port = "" self.switch_pwd = "" self.switch_ip = "" self.sshpool = None def create_configuration(self): """Configuration specific to SAN context values.""" config = self.configuration fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] LOG.debug('Fabric Names: %s', fabric_names) # There can be more than one SAN in the network and we need to # get credentials for each for SAN context lookup later. # Cisco Zonesets require VSANs if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up fcns database of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises: Exception when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(zm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(zm_utils.get_formatted_wwn(i)) for fabric_name in fabrics: self.switch_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') self.switch_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') self.switch_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') self.switch_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and find the targets # logged in nsinfo = '' LOG.debug("show fcns database for vsan %s", zoning_vsan) nsinfo = self.get_nameserver_info(zoning_vsan) LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo) LOG.debug("Lookup service:initiator list from caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the fcns database" " for vsan %s", zoning_vsan) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the fcns database" " for vsan %s", zoning_vsan) fabric_map = {'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[zoning_vsan] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map def get_nameserver_info(self, fabric_vsan): """Get fcns database info from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric """ cli_output = None nsinfo_list = [] try: cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more']) cli_output = self._get_switch_info(cmd) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting show fcns database for" " fabric")) if cli_output: nsinfo_list = self._parse_ns_output(cli_output) LOG.debug("Connector returning fcns info-%s", nsinfo_list) return nsinfo_list def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True, 1) LOG.debug("CLI output from ssh - output: %s", stdout) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: list of device port wwn from ns info """ nsinfo_list = [] for line in switch_data: if not(" N " in line): continue linesplit = line.split() if len(linesplit) > 2: node_port_wwn = linesplit[2] nsinfo_list.append(node_port_wwn) else: msg = _("Malformed fcns output string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return nsinfo_list def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: msg = _("Exception: %s") % six.text_type(e) LOG.error(msg) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands where status return is expected. cmd_list is a list of commands, where each command is itself a list of parameters. We use utils.check_ssh_injection to check each command, but then join then with " ; " to form a single command. """ # Check that each command is secure for cmd in cmd_list: utils.check_ssh_injection(cmd) # Combine into a single command. command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) greenthread.sleep(random.randint(20, 500) / 100.0) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh:%s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: msg = _("Exception: %s") % six.text_type(e) LOG.error(msg) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH:%s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_("Error executing command via ssh: %s") % six.text_type(e)) LOG.error(msg) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def cleanup(self): self.sshpool = None cinder-8.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py0000664000567000056710000005726312701406250027216 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Cisco Zone Driver is responsible to manage access control using FC zoning for Cisco FC fabrics. This is a concrete implementation of FCZoneDriver interface implementing add_connection and delete_connection interfaces. **Related Flags** :zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True :zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' """ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import six import string from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers import driver_utils from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) SUPPORTED_CHARS = string.ascii_letters + string.digits + '$' + '-' + '^' + '_' cisco_opts = [ cfg.StrOpt('cisco_sb_connector', default='cinder.zonemanager.drivers.cisco' '.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI', help='Southbound connector for zoning operation'), ] CONF = cfg.CONF CONF.register_opts(cisco_opts, group='fc-zone-manager') class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): """Cisco FC zone driver implementation. OpenStack Fibre Channel zone driver to manage FC zoning in Cisco SAN fabrics. Version history: 1.0 - Initial Cisco FC zone driver 1.1 - Added friendly zone name support """ VERSION = "1.1.0" def __init__(self, **kwargs): super(CiscoFCZoneDriver, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(cisco_opts) # Adding a hack to handle parameters from super classes # in case configured with multi backends. fabric_names = self.configuration.safe_get('fc_fabric_names') activate = self.configuration.safe_get('cisco_zone_activate') prefix = self.configuration.safe_get('cisco_zone_name_prefix') base_san_opts = [] if not fabric_names: base_san_opts.append( cfg.StrOpt('fc_fabric_names', help='Comma separated list of fibre channel ' 'fabric names. This list of names is used to' ' retrieve other SAN credentials for connecting' ' to each SAN fabric' )) if not activate: base_san_opts.append( cfg.BoolOpt('cisco_zone_activate', default=True, help='Indicates whether zone should ' 'be activated or not')) if not prefix: base_san_opts.append( cfg.StrOpt('cisco_zone_name_prefix', default="openstack", help="A prefix to be used when naming zone")) if len(base_san_opts) > 0: CONF.register_opts(base_san_opts) self.configuration.append_config_values(base_san_opts) fabric_names = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] # There can be more than one SAN in the network and we need to # get credentials for each SAN. if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) @lockutils.synchronized('cisco', 'fcfabric-', True) def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric: %s", fabric) LOG.info(_LI("CiscoFCZoneDriver - Add connection " "for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [ zm_utils.get_formatted_wwn(initiator), zm_utils.get_formatted_wwn(target)] zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, self.configuration.cisco_zone_name_prefix, SUPPORTED_CHARS)) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info(_LI("Zone exists in I-T mode. " "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [ zm_utils.get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, self.configuration.cisco_zone_name_prefix, SUPPORTED_CHARS)) if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info(_LI("Zone map to add: %s"), zone_map) if len(zone_map) > 0: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) @lockutils.synchronized('cisco', 'fcfabric-', True) def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric: %s", fabric) LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = zm_utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, self.configuration.cisco_zone_name_prefix, SUPPORTED_CHARS)) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, self.configuration.cisco_zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # We find the filtered list and if it is non-empty, # add initiator to it and update zone if filtered # list is empty, we remove that zone. LOG.debug("Zone delete - I mode: filtered targets: %s", filtered_members) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: %s", filtered_members) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone Map to update: %s", zone_map) else: zones_to_delete.append(zone_name) else: LOG.info(_LI("Zoning Policy: %s, not recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_map: conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ('%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % ( zone_name_string, ';', zones_to_delete[i])) conn.delete_zones(zone_name_string, self.configuration. cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception: msg = _("Failed to update or delete zoning configuration") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fabrics = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] LOG.debug("Fabric List: %s", fabrics) LOG.debug("Target wwn List: %s", target_wwn_list) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append( zm_utils.get_formatted_wwn(t.lower())) LOG.debug("Formatted Target wwn List: %s", formatted_target_list) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and get the targets # logged in. nsinfo = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) nsinfo = conn.get_nameserver_info() LOG.debug("show fcns database info from fabric: %s", nsinfo) conn.cleanup() except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting show fcns database " "info.")) except Exception: msg = _("Failed to get show fcns database info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) visible_targets = filter( lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %s"), {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = six.text_type( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets are in the fcns info for SAN %s", fabric_name) LOG.debug("Return SAN context output: %s", fabric_map) return fabric_map def get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets active zoneset config for vsan.""" cfgmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) cfgmap = conn.get_active_zone_set() conn.cleanup() except Exception: msg = _("Failed to access active zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %s", cfgmap) return cfgmap def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets zoneset status and mode.""" statusmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) statusmap = conn.get_zoning_status() conn.cleanup() except Exception: msg = _("Failed to access zoneset status:%s") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zoneset status from fabric: %s", statusmap) return statusmap cinder-8.0.0/cinder/zonemanager/drivers/cisco/__init__.py0000664000567000056710000000000012701406250024550 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py0000664000567000056710000004330512701406250030020 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Script to push the zone configuration to Cisco SAN switches. """ import random import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import ssh_utils from cinder import utils import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant LOG = logging.getLogger(__name__) class CiscoFCZoneClientCLI(object): """Cisco FC zone client cli implementation. OpenStack Fibre Channel zone client cli connector to manage FC zoning in Cisco SAN fabrics. Version history: 1.0 - Initial Cisco FC zone client cli """ switch_ip = None switch_port = '22' switch_user = 'admin' switch_pwd = 'none' def __init__(self, ipaddress, username, password, port, vsan): """initializing the client.""" self.switch_ip = ipaddress self.switch_port = port self.switch_user = username self.switch_pwd = password self.fabric_vsan = vsan self.sshpool = None def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, ' | no-more']) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed getting active zone set " "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split('[\s\[\]]+', line) if ZoneConstant.CFG_ZONESET in line_split: # zoneset name [name] vsan [vsan] zone_set_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONESET) + 2] continue if ZoneConstant.CFG_ZONE in line_split: # zone name [name] vsan [vsan] zone_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2] zone[zone_name] = list() continue if ZoneConstant.CFG_ZONE_MEMBER in line_split: # Examples: # pwwn c0:50:76:05:15:9f:00:12 # * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2] zone_member = \ line_split[ line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[ZoneConstant.CFG_ZONES] = zone zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_config': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % six.text_type(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_set def add_zones(self, zones, activate, fabric_vsan, active_zone_set, zone_status): """Add zone configuration. This method will add the zone configuration passed by user. input params: zones - zone names mapped to members and VSANs. zone members are colon separated but case-insensitive { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: {'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } activate - True/False """ LOG.debug("Add Zones - Zones passed: %s", zones) LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[ZoneConstant.CFG_ZONES] LOG.debug("zone list: %s", zone_list) LOG.debug("zone status: %s", zone_status) cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] zone_cmds = [['conf'], ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] for zone in zones.keys(): # if zone exists, its an update. Delete & insert LOG.debug("Update call") if zone in zone_list: # Response from get_active_zone_set strips colons from WWPNs current_zone = set(zone_list[zone]) new_wwpns = map(lambda x: x.lower().replace(':', ''), zones[zone]) new_zone = set(new_wwpns) if current_zone != new_zone: try: self.delete_zones(zone, activate, fabric_vsan, active_zone_set, zone_status) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Deleting zone failed %s"), zone) LOG.debug("Deleted Zone before insert : %s", zone) zone_cmds.append(['zone', 'name', zone]) for member in zones[zone]: zone_cmds.append(['member', 'pwwn', member]) zone_cmds.append(['end']) try: LOG.debug("Add zones: Config cmd to run: %s", zone_cmds) self._ssh_execute(zone_cmds, True, 1) if activate: self.activate_zoneset(cfg_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Creating and activating zone set failed: " "(Zone set=%(zoneset)s error=%(err)s)." ) % {'zoneset': cfg_name, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def activate_zoneset(self, cfgname, fabric_vsan, zone_status): """Method to Activate the zone config. Param cfgname - ZonesetName.""" LOG.debug("zone status: %s", zone_status) cmd_list = [['conf'], ['zoneset', 'activate', 'name', cfgname, 'vsan', self.fabric_vsan]] if zone_status['mode'] == 'enhanced': cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan]) cmd_list.append(['end']) return self._ssh_execute(cmd_list, True, 1) def get_zoning_status(self): """Return the zoning mode and session for a zoneset.""" zone_status = {} try: switch_data = self._get_switch_info( [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed getting zone status " "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split('[\s\[\]]+', line) if 'mode:' in line_split: # mode: zone_status['mode'] = line_split[line_split.index('mode:') + 1] continue if 'session:' in line_split: # session: zone_status['session'] = \ line_split[line_split.index('session:') + 1] continue except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone status: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_status': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % six.text_type(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_status def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set, zone_status): """Delete zones from fabric. Method to delete the active zone config zones params zone_names: zoneNames separated by semicolon params activate: True/False """ LOG.debug("zone_names %s", zone_names) active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] cmds = [['conf'], ['zoneset', 'name', active_zoneset_name, 'vsan', fabric_vsan]] try: for zone in set(zone_names.split(';')): cmds.append(['no', 'zone', 'name', zone]) cmds.append(['end']) LOG.debug("Delete zones: Config cmd to run: %s", cmds) self._ssh_execute(cmds, True, 1) if activate: self.activate_zoneset(active_zoneset_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." ) % {'cmd': cmds, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def get_nameserver_info(self): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric show fcns database """ cli_output = None return_list = [] try: cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting fcns database " "info for fabric %s"), self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list) return return_list @utils.retry(processutils.ProcessExecutionError, retries=5) def _cfg_save(self): cmd = ['copy', 'running-config', 'startup-config'] self._run_ssh(cmd, True) def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True) LOG.debug("CLI output from ssh - output: %s", stdout) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: List -- list of device port wwn from ns info """ return_list = [] for line in switch_data: if not(" N " in line): continue linesplit = line.split() if len(linesplit) > 2: node_port_wwn = linesplit[2] return_list.append(node_port_wwn) else: msg = _("Malformed show fcns database string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return return_list def _run_ssh(self, cmd_list, check_exit_code=True): command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) try: with self.sshpool.item() as ssh: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception: with excutils.save_and_reraise_exception(): LOG.warning(_LW("Error running SSH command: %s"), command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands where status return is expected. cmd_list is a list of commands, where each command is itself a list of parameters. We use utils.check_ssh_injection to check each command, but then join then with " ; " to form a single command. """ # Check that each command is secure for cmd in cmd_list: utils.check_ssh_injection(cmd) # Combine into a single command. command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error executing command via ssh.")) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def cleanup(self): self.sshpool = None cinder-8.0.0/cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py0000664000567000056710000000367212701406250026506 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume import configuration cisco_zone_opts = [ cfg.StrOpt('cisco_fc_fabric_address', default='', help='Management IP of fabric'), cfg.StrOpt('cisco_fc_fabric_user', default='', help='Fabric user ID'), cfg.StrOpt('cisco_fc_fabric_password', default='', help='Password for user', secret=True), cfg.PortOpt('cisco_fc_fabric_port', default=22, help='Connecting port'), cfg.StrOpt('cisco_zoning_policy', default='initiator-target', help='overridden zoning policy'), cfg.BoolOpt('cisco_zone_activate', default=True, help='overridden zoning activation state'), cfg.StrOpt('cisco_zone_name_prefix', help='overridden zone name prefix'), cfg.StrOpt('cisco_zoning_vsan', help='VSAN of the Fabric'), ] CONF = cfg.CONF CONF.register_opts(cisco_zone_opts, group='CISCO_FABRIC_EXAMPLE') def load_fabric_configurations(fabric_names): fabric_configs = {} for fabric_name in fabric_names: config = configuration.Configuration(cisco_zone_opts, fabric_name) fabric_configs[fabric_name] = config return fabric_configs cinder-8.0.0/cinder/zonemanager/drivers/__init__.py0000664000567000056710000000165412701406250023470 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ :mod:`cinder.zonemanager.driver` -- FC Zone Drivers ===================================================== .. automodule:: cinder.zonemanager.driver :platform: Unix :synopsis: Module containing all the FC Zone drivers. """ cinder-8.0.0/cinder/zonemanager/drivers/brocade/0000775000567000056710000000000012701406543022755 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/zonemanager/drivers/brocade/fc_zone_constants.py0000664000567000056710000000600212701406250027037 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by Brocade FC Zone Driver. """ YES = 'y' ACTIVE_ZONE_CONFIG = 'active_zone_config' CFG_ZONESET = 'cfg:' CFG_ZONES = 'zones' OPENSTACK_CFG_NAME = 'OpenStack_Cfg' SUCCESS = 'Success' TRANS_ABORTABLE = 'It is abortable' """ CLI Commands for FC zoning operations. """ GET_ACTIVE_ZONE_CFG = 'cfgactvshow' ZONE_CREATE = 'zonecreate ' ZONESET_CREATE = 'cfgcreate ' CFG_SAVE = 'cfgsave' CFG_ADD = 'cfgadd ' ACTIVATE_ZONESET = 'cfgenable ' DEACTIVATE_ZONESET = 'cfgdisable' CFG_DELETE = 'cfgdelete ' CFG_REMOVE = 'cfgremove ' ZONE_DELETE = 'zonedelete ' CFG_SHOW_TRANS = 'cfgtransshow' CFG_ZONE_TRANS_ABORT = 'cfgtransabort' NS_SHOW = 'nsshow' NS_CAM_SHOW = 'nscamshow' """ HTTPS connector constants """ AUTH_HEADER = "Authorization" PROTOCOL_HTTPS = "HTTPS" STATUS_OK = 200 SECINFO_PAGE = "/secinfo.html" AUTHEN_PAGE = "/authenticate.html" GET_METHOD = "GET" POST_METHOD = "POST" SECINFO_BEGIN = "--BEGIN SECINFO" SECINFO_END = "--END SECINFO" RANDOM = "RANDOM" AUTH_STRING = "Custom_Basic " # Trailing space is required, do not remove AUTHEN_BEGIN = "--BEGIN AUTHENTICATE" AUTHEN_END = "--END AUTHENTICATE" AUTHENTICATED = "authenticated" SESSION_PAGE_ACTION = "/session.html?action=query" SESSION_BEGIN = "--BEGIN SESSION" SESSION_END = "--END SESSION" SESSION_PAGE = "/session.html" LOGOUT_PAGE = "/logout.html" ZONEINFO_BEGIN = "--BEGIN ZONE INFO" ZONEINFO_END = "--END ZONE INFO" SWITCH_PAGE = "/switch.html" SWITCHINFO_BEGIN = "--BEGIN SWITCH INFORMATION" SWITCHINFO_END = "--END SWITCH INFORMATION" FIRMWARE_VERSION = "swFWVersion" VF_ENABLED = "vfEnabled" MANAGEABLE_VF = "manageableLFList" CHANGE_VF = ("Session=--BEGIN SESSION\n\taction=apply\n\tLFId= {vfid} " "\b\t--END SESSION") ZONE_TRAN_STATUS = "/gzoneinfo.htm?txnId={txnId}" CFG_DELIM = "\x01" ZONE_DELIM = "\x02" ALIAS_DELIM = "\x03" QLP_DELIM = "\x04" ZONE_END_DELIM = "\x05&saveonly=" IFA_DELIM = "\x06" ACTIVE_CFG_DELIM = "\x07" DEFAULT_CFG = "d__efault__Cfg" NS_PAGE = "/nsinfo.htm" NSINFO_BEGIN = "--BEGIN NS INFO" NSINFO_END = "--END NS INFO" NS_DELIM = ";N ;" ZONE_TX_BEGIN = "--BEGIN ZONE_TXN_INFO" ZONE_TX_END = "--END ZONE_TXN_INFO" ZONE_ERROR_CODE = "errorCode" ZONE_PAGE = "/gzoneinfo.htm" CFG_NAME = "openstack_cfg" ZONE_STRING_PREFIX = "zonecfginfo=" ZONE_ERROR_MSG = "errorMessage" ZONE_TX_ID = "txnId" ZONE_TX_STATUS = "status" SESSION_LF_ID = "sessionLFId" HTTP = "http" HTTPS = "https" cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py0000664000567000056710000002344512701406250030666 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant from cinder.zonemanager import fc_san_lookup_service as fc_service from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class BrcdFCSanLookupService(fc_service.FCSanLookupService): """The SAN lookup service that talks to Brocade switches. Version History: 1.0.0 - Initial version """ VERSION = "1.0.0" def __init__(self, **kwargs): """Initializing the client.""" super(BrcdFCSanLookupService, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) self.create_configuration() def create_configuration(self): """Configuration specific to SAN context values.""" config = self.configuration fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] LOG.debug('Fabric Names: %s', fabric_names) # There can be more than one SAN in the network and we need to # get credentials for each for SAN context lookup later. if len(fabric_names) > 0: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up nameserver of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises: Exception when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names fabrics = None if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(fczm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(fczm_utils. get_formatted_wwn(i)) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_port') ssh_pool = ssh_utils.SSHPool(fabric_ip, fabric_port, None, fabric_user, password=fabric_pwd) # Get name server data from fabric and find the targets # logged in nsinfo = '' try: LOG.debug("Getting name server data for " "fabric %s", fabric_ip) nsinfo = self.get_nameserver_info(ssh_pool) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting name server info from" " fabric %s"), fabric_ip) except Exception as e: msg = _("SSH connection failed " "for %(fabric)s with error: %(err)s" ) % {'fabric': fabric_ip, 'err': e} LOG.error(msg) raise exception.FCSanLookupServiceException(message=msg) LOG.debug("Lookup service:nsinfo-%s", nsinfo) LOG.debug("Lookup service:initiator list from " "caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from " "caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the nameserver for SAN %s", fabric_name) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the nameserver " "for SAN %s", fabric_name) fabric_map = { 'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map def get_nameserver_info(self, ssh_pool): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric :param ssh_pool: SSH connections for the current fabric """ cli_output = None nsinfo_list = [] try: cli_output = self._get_switch_data(ssh_pool, zone_constant.NS_SHOW) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting nsshow info for fabric")) if cli_output: nsinfo_list = self._parse_ns_output(cli_output) try: cli_output = self._get_switch_data(ssh_pool, zone_constant.NS_CAM_SHOW) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting nscamshow")) if cli_output: nsinfo_list.extend(self._parse_ns_output(cli_output)) LOG.debug("Connector returning nsinfo-%s", nsinfo_list) return nsinfo_list def _get_switch_data(self, ssh_pool, cmd): utils.check_ssh_injection([cmd]) with ssh_pool.item() as ssh: try: switch_data, err = processutils.ssh_execute(ssh, cmd) except processutils.ProcessExecutionError as e: msg = (_("SSH Command failed with error: '%(err)s', Command: " "'%(command)s'") % {'err': six.text_type(e), 'command': cmd}) LOG.error(msg) raise exception.FCSanLookupServiceException(message=msg) return switch_data def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: list of device port wwn from ns info """ nsinfo_list = [] lines = switch_data.split('\n') for line in lines: if not(" NL " in line or " N " in line): continue linesplit = line.split(';') if len(linesplit) > 2: node_port_wwn = linesplit[2].strip() nsinfo_list.append(node_port_wwn) else: msg = _("Malformed nameserver string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return nsinfo_list cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py0000664000567000056710000000617312701406250031547 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade Zone Connector Factory is responsible to dynamically create the connection object based on the configuration """ from oslo_log import log as logging from oslo_utils import importutils from cinder.zonemanager.drivers.brocade import fc_zone_constants LOG = logging.getLogger(__name__) class BrcdFCZoneFactory(object): def __init__(self): self.sb_conn_map = {} def get_connector(self, fabric, sb_connector): """Returns Device Connector. Factory method to create and return correct SB connector object based on the protocol """ fabric_ip = fabric.safe_get('fc_fabric_address') client = self.sb_conn_map.get(fabric_ip) if not client: fabric_user = fabric.safe_get('fc_fabric_user') fabric_pwd = fabric.safe_get('fc_fabric_password') fabric_port = fabric.safe_get('fc_fabric_port') fc_vfid = fabric.safe_get('fc_virtual_fabric_id') fabric_ssh_cert_path = fabric.safe_get('fc_fabric_ssh_cert_path') LOG.debug("Client not found. Creating connection client for" " %(ip)s with %(connector)s protocol " "for the user %(user)s at port %(port)s.", {'ip': fabric_ip, 'connector': sb_connector, 'user': fabric_user, 'port': fabric_port, 'vf_id': fc_vfid}) if sb_connector.lower() in (fc_zone_constants.HTTP, fc_zone_constants.HTTPS): client = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_http_fc_zone_client.BrcdHTTPFCZoneClient", ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vfid=fc_vfid, protocol=sb_connector ) else: client = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_fc_zone_client_cli.BrcdFCZoneClientCLI", ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, key=fabric_ssh_cert_path, port=fabric_port ) self.sb_conn_map.update({fabric_ip: client}) return client cinder-8.0.0/cinder/zonemanager/drivers/brocade/__init__.py0000664000567000056710000000171412701406250025064 0ustar jenkinsjenkins00000000000000# (c) Copyright 2013 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ :mod:`cinder.zonemanager.driver.brocade` -- Brocade FC Zone Drivers ===================================================== .. automodule:: cinder.zonemanager.driver.brocade :platform: Unix :synopsis: Module containing all the Brocade FC Zone drivers. """ cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py0000664000567000056710000005547012701406257030146 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Script to push the zone configuration to brocade SAN switches. """ import random import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE from cinder import ssh_utils from cinder import utils import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant LOG = logging.getLogger(__name__) class BrcdFCZoneClientCLI(object): switch_ip = None switch_port = '22' switch_user = 'admin' switch_pwd = 'none' switch_key = 'none' patrn = re.compile('[;\s]+') def __init__(self, ipaddress, username, password, port, key): """Initializing the client.""" self.switch_ip = ipaddress self.switch_port = port self.switch_user = username self.switch_pwd = password self.switch_key = key self.sshpool = None def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [zone_constant.GET_ACTIVE_ZONE_CFG]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed getting active zone set " "from fabric %s"), self.switch_ip) try: for line in switch_data: line_split = re.split('\\t', line) if len(line_split) > 2: line_split = [x.replace( '\n', '') for x in line_split] line_split = [x.replace( ' ', '') for x in line_split] if zone_constant.CFG_ZONESET in line_split: zone_set_name = line_split[1] continue if line_split[1]: zone_name = line_split[1] zone[zone_name] = list() if line_split[2]: zone_member = line_split[2] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[zone_constant.CFG_ZONES] = zone zone_set[zone_constant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_config': switch_data} LOG.exception(msg) raise exception.FCZoneDriverException(reason=msg) switch_data = None return zone_set def add_zones(self, zones, activate, active_zone_set=None): """Add zone configuration. This method will add the zone configuration passed by user. input params: zones - zone names mapped to members. zone members are colon separated but case-insensitive { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: {'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } activate - True/False active_zone_set - active zone set dict retrieved from get_active_zone_set method """ LOG.debug("Add Zones - Zones passed: %s", zones) cfg_name = None iterator_count = 0 zone_with_sep = '' if not active_zone_set: active_zone_set = self.get_active_zone_set() LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[zone_constant.CFG_ZONES] LOG.debug("zone list: %s", zone_list) for zone in zones.keys(): # If zone exists, its an update. Delete & insert # TODO(skolathur): This still need to be optimized # to an update call later. Now we just handled the # same zone name with same zone members. if (zone in zone_list): if set(zones[zone]) == set(zone_list[zone]): break try: self.delete_zones(zone, activate, active_zone_set) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Deleting zone failed %s"), zone) LOG.debug("Deleted Zone before insert : %s", zone) zone_members_with_sep = ';'.join(str(member) for member in zones[zone]) LOG.debug("Forming command for add zone") cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % { 'zone': zone, 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Adding zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) LOG.debug("Created zones on the switch") if(iterator_count > 0): zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone if not zone_with_sep: return try: # Get active zone set from device, as some of the zones # could be deleted. active_zone_set = self.get_active_zone_set() cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG] cmd = None if not cfg_name: cfg_name = zone_constant.OPENSTACK_CFG_NAME cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \ % {'zoneset': cfg_name, 'zones': zone_with_sep} else: cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \ % {'zoneset': cfg_name, 'zones': zone_with_sep} LOG.debug("New zone %s", cmd) self.apply_zone_change(cmd.split()) if activate: self.activate_zoneset(cfg_name) else: self._cfg_save() except Exception as e: self._cfg_trans_abort() msg = _("Creating and activating zone set failed: " "(Zone set=%(cfg_name)s error=%(err)s)." ) % {'cfg_name': cfg_name, 'err': six.text_type(e)} LOG.error(msg) raise exception.BrocadeZoningCliException(reason=msg) def activate_zoneset(self, cfgname): """Method to Activate the zone config. Param cfgname - ZonesetName.""" cmd_list = [zone_constant.ACTIVATE_ZONESET, cfgname] return self._ssh_execute(cmd_list, True, 1) def deactivate_zoneset(self): """Method to deActivate the zone config.""" return self._ssh_execute([zone_constant.DEACTIVATE_ZONESET], True, 1) def delete_zones(self, zone_names, activate, active_zone_set=None): """Delete zones from fabric. Method to delete the active zone config zones :param zone_names: zoneNames separated by semicolon :param activate: True/False :param active_zone_set: the active zone set dict retrieved from get_active_zone_set method """ active_zoneset_name = None zone_list = [] if not active_zone_set: active_zone_set = self.get_active_zone_set() active_zoneset_name = active_zone_set[ zone_constant.ACTIVE_ZONE_CONFIG] zone_list = active_zone_set[zone_constant.CFG_ZONES] zones = self.patrn.split(''.join(zone_names)) cmd = None try: if len(zones) == len(zone_list): self.deactivate_zoneset() cmd = 'cfgdelete "%(active_zoneset_name)s"' \ % {'active_zoneset_name': active_zoneset_name} # Active zoneset is being deleted, hence reset activate flag activate = False else: cmd = 'cfgremove "%(active_zoneset_name)s", "%(zone_names)s"' \ % {'active_zoneset_name': active_zoneset_name, 'zone_names': zone_names } LOG.debug("Delete zones: Config cmd to run: %s", cmd) self.apply_zone_change(cmd.split()) for zone in zones: self._zone_delete(zone) if activate: self.activate_zoneset(active_zoneset_name) else: self._cfg_save() except Exception as e: msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." ) % {'cmd': cmd, 'err': six.text_type(e)} LOG.error(msg) self._cfg_trans_abort() raise exception.BrocadeZoningCliException(reason=msg) def get_nameserver_info(self): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric """ cli_output = None return_list = [] try: cmd = '%(nsshow)s;%(nscamshow)s' % { 'nsshow': zone_constant.NS_SHOW, 'nscamshow': zone_constant.NS_CAM_SHOW} cli_output = self._get_switch_info([cmd]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting nsshow " "info for fabric %s"), self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) cli_output = None return return_list def _cfg_save(self): self._ssh_execute([zone_constant.CFG_SAVE], True, 1) def _zone_delete(self, zone_name): cmd = 'zonedelete "%(zone_name)s"' % {'zone_name': zone_name} self.apply_zone_change(cmd.split()) def _cfg_trans_abort(self): is_abortable = self._is_trans_abortable() if(is_abortable): self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT]) def _is_trans_abortable(self): is_abortable = False stdout, stderr = None, None stdout, stderr = self._run_ssh( [zone_constant.CFG_SHOW_TRANS], True, 1) output = stdout.splitlines() is_abortable = False for line in output: if(zone_constant.TRANS_ABORTABLE in line): is_abortable = True break if stderr: msg = _("Error while checking transaction status: %s") % stderr raise exception.BrocadeZoningCliException(reason=msg) else: return is_abortable def apply_zone_change(self, cmd_list): """Execute zoning cli with no status update. Executes CLI commands such as addZone where status return is not expected. """ stdout, stderr = None, None LOG.debug("Executing command via ssh: %s", cmd_list) stdout, stderr = self._run_ssh(cmd_list, True, 1) # no output expected, so output means there is an error if stdout: msg = _("Error while running zoning CLI: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': stdout} LOG.error(msg) self._cfg_trans_abort() raise exception.BrocadeZoningCliException(reason=msg) def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. """ cmd = ['version'] firmware = 0 try: stdout, stderr = self._execute_shell_cmd(cmd) if (stdout): for line in stdout: if 'Fabric OS: v' in line: LOG.debug("Firmware version string: %s", line) ver = line.split('Fabric OS: v')[1].split('.') if (ver): firmware = int(ver[0] + ver[1]) return firmware > 63 else: LOG.error(_LE("No CLI output for firmware version check")) return False except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd, 'err': six.text_type(e)} LOG.error(msg) raise exception.BrocadeZoningCliException(reason=msg) def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True, 1) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': six.text_type(e)} LOG.error(msg) raise exception.BrocadeZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: List -- list of device port wwn from ns info """ return_list = [] for line in switch_data: if not(" NL " in line or " N " in line): continue linesplit = line.split(';') if len(linesplit) > 2: node_port_wwn = linesplit[2] return_list.append(node_port_wwn) else: msg = _("Malformed nameserver string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return return_list def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): # TODO(skolathur): Need to implement ssh_injection check # currently, the check will fail for zonecreate command # as zone members are separated by ';'which is a danger char command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands such as cfgsave where status return is expected. """ utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) stdin.write("%s\n" % zone_constant.YES) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after " "SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error executing command via ssh: %s"), e) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def _execute_shell_cmd(self, cmd): """Run command over shell for older firmware versions. Invokes shell and issue the command and return the output. This is primarily used for issuing read commands when we are not sure if the firmware supports exec_command. """ utils.check_ssh_injection(cmd) command = ' '. join(cmd) stdout, stderr = None, None if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) with self.sshpool.item() as ssh: LOG.debug('Running cmd (SSH): %s', command) channel = ssh.invoke_shell() stdin_stream = channel.makefile('wb') stdout_stream = channel.makefile('rb') stderr_stream = channel.makefile('rb') stdin_stream.write('''%s exit ''' % command) stdin_stream.flush() stdout = stdout_stream.readlines() stderr = stderr_stream.readlines() stdin_stream.close() stdout_stream.close() stderr_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if exit_status != 0: LOG.debug("command %s failed", command) raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) try: channel.close() except Exception: LOG.exception(_LE('Error closing channel.')) LOG.debug("_execute_cmd: stdout to return: %s", stdout) LOG.debug("_execute_cmd: stderr to return: %s", stderr) return (stdout, stderr) def cleanup(self): self.sshpool = None cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py0000664000567000056710000011260612701406250030342 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade south bound connector to communicate with switch using HTTP or HTTPS protocol. """ from oslo_log import log as logging from oslo_serialization import base64 import requests import six import time from cinder import exception from cinder.i18n import _, _LI import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant LOG = logging.getLogger(__name__) class BrcdHTTPFCZoneClient(object): def __init__(self, ipaddress, username, password, port, vfid, protocol): """Initializing the client with the parameters passed. Creates authentication token and authenticate with switch to ensure the credentials are correct and change the VF context. :param ipaddress: IP Address of the device. :param username: User id to login. :param password: User password. :param port: Device Communication port :param vfid: Virtual Fabric ID. :param protocol: Communication Protocol. """ self.switch_ip = ipaddress self.switch_user = username self.switch_pwd = password self.protocol = protocol self.vfid = vfid self.cfgs = {} self.zones = {} self.alias = {} self.qlps = {} self.ifas = {} self.active_cfg = '' self.parsed_raw_zoneinfo = "" self.random_no = '' self.session = None # Create and assign the authentication header based on the credentials self.auth_header = self.create_auth_token() # Authenticate with the switch # If authenticated successfully, save the auth status and # create auth header for future communication with the device. self.is_auth, self.auth_header = self.authenticate() self.check_change_vf_context() def connect(self, requestType, requestURL, payload='', header=None): """Connect to the switch using HTTP/HTTPS protocol. :param requestType: Connection Request method :param requestURL: Connection URL :param payload: Data to send with POST request :param header: Request Headers :returns: HTTP response data :raises: BrocadeZoningHttpException """ try: if header is None: header = {} header.update({"User-Agent": "OpenStack Zone Driver"}) # Ensure only one connection is made throughout the life cycle protocol = zone_constant.HTTP if self.protocol == zone_constant.PROTOCOL_HTTPS: protocol = zone_constant.HTTPS if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(protocol + '://', adapter) url = protocol + "://" + self.switch_ip + requestURL response = None if requestType == zone_constant.GET_METHOD: response = self.session.get(url, headers=(header), verify=False) elif requestType == zone_constant.POST_METHOD: response = self.session.post(url, payload, headers=(header), verify=False) # Throw exception when response status is not OK if response.status_code != zone_constant.STATUS_OK: msg = _("Error while querying page %(url)s on the switch, " "reason %(error)s.") % {'url': url, 'error': response.reason} raise exception.BrocadeZoningHttpException(msg) else: return response.text except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': six.text_type(e)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) except exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': requestURL, 'error': six.text_type(ex)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def create_auth_token(self): """Create the authentication token. Creates the authentication token to use in the authentication header return authentication header (Base64(username:password:random no)). :returns: Authentication Header :raises: BrocadeZoningHttpException """ try: # Send GET request to secinfo.html to get random number response = self.connect(zone_constant.GET_METHOD, zone_constant.SECINFO_PAGE) parsed_data = self.get_parsed_data(response, zone_constant.SECINFO_BEGIN, zone_constant.SECINFO_END) # Extract the random no from secinfo.html response self.random_no = self.get_nvp_value(parsed_data, zone_constant.RANDOM) # Form the authentication string auth_string = (self.switch_user + ":" + self.switch_pwd + ":" + self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = (zone_constant.AUTH_STRING + auth_token) # Build the proper header except Exception as e: msg = (_("Error while creating authentication token: %s") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return auth_header def authenticate(self): """Authenticate with the switch. Returns authentication status with modified authentication header (Base64(username:xxx:random no)). :returns: Authentication status :raises: BrocadeZoningHttpException """ headers = {zone_constant.AUTH_HEADER: self.auth_header} try: # GET Request to authenticate.html to verify the credentials response = self.connect(zone_constant.GET_METHOD, zone_constant.AUTHEN_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.AUTHEN_BEGIN, zone_constant.AUTHEN_END) isauthenticated = self.get_nvp_value( parsed_data, zone_constant.AUTHENTICATED) if isauthenticated == "yes": # Replace password in the authentication string with xxx auth_string = (self.switch_user + ":" + "xxx" + ":" + self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = zone_constant.AUTH_STRING + auth_token return True, auth_header else: auth_error_code = self.get_nvp_value(parsed_data, "errCode") msg = (_("Authentication failed, verify the switch " "credentials, error code %s.") % auth_error_code) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) except Exception as e: msg = (_("Error while authenticating with switch: %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def get_session_info(self): """Get the session information from the switch :returns: Connection status information. """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to session.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SESSION_PAGE_ACTION, header=headers) except Exception as e: msg = (_("Error while getting session information %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return response def get_parsed_data(self, data, delim1, demil2): """Return the sub string between the delimiters. :param data: String to manipulate :param delim1 : Delimiter 1 :param delim2 : Delimiter 2 :returns: substring between the delimiters """ try: start = data.index(delim1) start = start + len(delim1) end = data.index(demil2) return data[start:end] except ValueError as e: msg = (_("Error while parsing the data: %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def get_nvp_value(self, data, keyname): """Get the value for the key passed. :param data: NVP to manipulate :param keyname: Key name :returns: value for the NVP """ try: start = data.index(keyname) start = start + len(keyname) temp = data[start:] end = temp.index("\n") return (temp[:end].lstrip('= ')) except ValueError as e: msg = (_("Error while getting nvp value: %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def get_managable_vf_list(self, session_info): """List of VFIDs that can be managed. :param session_info: Session information from the switch :returns: manageable VF list :raises: BrocadeZoningHttpException """ try: # Check the value of manageableLFList NVP, # throw exception as not supported if the nvp not available vf_list = self.get_nvp_value(session_info, zone_constant.MANAGEABLE_VF) if vf_list: vf_list = vf_list.split(",") # convert the string to list except exception.BrocadeZoningHttpException as e: msg = (_("Error while checking whether " "VF is available for management %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return vf_list[:-1] def change_vf_context(self, vfid, session_data): """Change the VF context in the session. :param vfid: VFID to which context should be changed. :param session_data: Session information from the switch :raises: BrocadeZoningHttpException """ try: managable_vf_list = self.get_managable_vf_list(session_data) LOG.debug("Manageable VF IDs are %(vflist)s.", {'vflist': managable_vf_list}) # proceed changing the VF context # if VF id can be managed if not throw exception if vfid in managable_vf_list: headers = {zone_constant.AUTH_HEADER: self.auth_header} data = zone_constant.CHANGE_VF.format(vfid=vfid) response = self.connect(zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) parsed_info = self.get_parsed_data(response, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) session_LF_Id = self.get_nvp_value(parsed_info, zone_constant.SESSION_LF_ID) if session_LF_Id == vfid: LOG.info(_LI("VF context is changed in the session.")) else: msg = _("Cannot change VF context in the session.") LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) else: msg = (_("Cannot change VF context, " "specified VF is not available " "in the manageable VF list %(vf_list)s.") % {'vf_list': managable_vf_list}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) except exception.BrocadeZoningHttpException as e: msg = (_("Error while changing VF context %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def get_zone_info(self): """Parse all the zone information and store it in the dictionary.""" try: self.cfgs = {} self.zones = {} self.active_cfg = '' self.alias = {} self.qlps = {} self.ifas = {} headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to gzoneinfo.htm response = self.connect(zone_constant.GET_METHOD, zone_constant.ZONE_PAGE, header=headers) # get the zone string from the response self.parsed_raw_zoneinfo = self.get_parsed_data( response, zone_constant.ZONEINFO_BEGIN, zone_constant.ZONEINFO_END).strip("\n") LOG.debug("Original zone string from the switch: %(zoneinfo)s", {'zoneinfo': self.parsed_raw_zoneinfo}) # convert the zone string to list zoneinfo = self.parsed_raw_zoneinfo.split() i = 0 while i < len(zoneinfo): info = zoneinfo[i] # check for the cfg delimiter if zone_constant.CFG_DELIM in info: # extract the cfg name cfg_name = info.lstrip(zone_constant.CFG_DELIM) # update the dict as # self.cfgs={cfg_name:zone_name1;zone_name2} self.cfgs.update({cfg_name: zoneinfo[i + 1]}) i = i + 2 # check for the zone delimiter elif zone_constant.ZONE_DELIM in info: # extract the zone name zone_name = info.lstrip(zone_constant.ZONE_DELIM) # update the dict as # self.zones={zone_name:members1;members2} self.zones.update({zone_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ALIAS_DELIM in info: alias_name = info.lstrip(zone_constant.ALIAS_DELIM) # update the dict as # self.alias={alias_name:members1;members2} self.alias.update({alias_name: zoneinfo[i + 1]}) i = i + 2 # check for quickloop zones elif zone_constant.QLP_DELIM in info: qlp_name = info.lstrip(zone_constant.QLP_DELIM) # update the map as self.qlps={qlp_name:members1;members2} self.qlps.update({qlp_name: zoneinfo[i + 1]}) i = i + 2 # check for fabric assist zones elif zone_constant.IFA_DELIM in info: ifa_name = info.lstrip(zone_constant.IFA_DELIM) # update the map as self.ifas={ifa_name:members1;members2} self.ifas.update({ifa_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ACTIVE_CFG_DELIM in info: # update the string self.active_cfg=cfg_name self.active_cfg = info.lstrip( zone_constant.ACTIVE_CFG_DELIM) if self.active_cfg == zone_constant.DEFAULT_CFG: self.active_cfg = "" i = i + 2 else: i = i + 1 except Exception as e: msg = (_("Error while changing VF context %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. :returns: True if firmware is supported else False. :raises: BrocadeZoningHttpException """ isfwsupported = False try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to switch.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SWITCH_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.SWITCHINFO_BEGIN, zone_constant.SWITCHINFO_END) # get the firmware version nvp value fwVersion = self.get_nvp_value( parsed_data, zone_constant.FIRMWARE_VERSION).lstrip('v') ver = fwVersion.split(".") LOG.debug("Firmware version: %(version)s.", {'version': ver}) if int(ver[0] + ver[1]) > 63: isfwsupported = True except Exception as e: msg = (_("Error while checking the firmware version %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return isfwsupported def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } :raises: BrocadeZoningHttpException """ active_zone_set = {} zones_map = {} try: self.get_zone_info() # get the zone information of the switch if self.active_cfg != '': # get the zones list of the active_Cfg zones_list = self.cfgs[self.active_cfg].split(";") for n in zones_list: # build the zones map zones_map.update( {n: self.zones[n].split(";")}) # Format map in the correct format active_zone_set = { "active_zone_config": self.active_cfg, "zones": zones_map} return active_zone_set except Exception as e: msg = (_("Failed getting active zone set from fabric %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def add_zones(self, add_zones_info, activate, active_zone_set=None): """Add zone configuration. This method will add the zone configuration passed by user. :param add_zones_info: Zone names mapped to members. zone members are colon separated but case-insensitive { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: {'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] }R :param activate: True will activate the zone config. :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :raises: BrocadeZoningHttpException """ LOG.debug("Add zones - zones passed: %(zones)s.", {'zones': add_zones_info}) cfg_name = zone_constant.CFG_NAME cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with new information zones, cfgs, active_cfg = self.add_update_zones_cfgs(cfgs, zones, add_zones_info, active_cfg, cfg_name) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Add zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) # Post the zone data to the switch error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def form_zone_string(self, cfgs, active_cfg, zones, alias, qlps, ifas, activate): """Build the zone string in the required format. :param cfgs: cfgs map :param active_cfg: Active cfg string :param zones: zones map :param alias: alias map :param qlps: qlps map :param ifas: ifas map :param activate: True will activate config. :returns: zonestring in the required format :raises: BrocadeZoningHttpException """ try: zoneString = zone_constant.ZONE_STRING_PREFIX # based on the activate save only will be changed saveonly = "false" if activate is True else "true" # Form the zone string based on the dictionary of each items for cfg in cfgs.keys(): zoneString += (zone_constant.CFG_DELIM + cfg + " " + cfgs.get(cfg) + " ") for zone in zones.keys(): zoneString += (zone_constant.ZONE_DELIM + zone + " " + zones.get(zone) + " ") for al in alias.keys(): zoneString += (zone_constant.ALIAS_DELIM + al + " " + alias.get(al) + " ") for qlp in qlps.keys(): zoneString += (zone_constant.QLP_DELIM + qlp + " " + qlps.get(qlp) + " ") for ifa in ifas.keys(): zoneString += (zone_constant.IFA_DELIM + ifa + " " + ifas.get(ifa) + " ") # append the active_cfg string only if it is not null and activate # is true if active_cfg != "" and activate: zoneString += (zone_constant.ACTIVE_CFG_DELIM + active_cfg + " null ") # Build the final zone string zoneString += zone_constant.ZONE_END_DELIM + saveonly except Exception as e: msg = (_("Exception while forming the zone string: %s.") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return zoneString def add_update_zones_cfgs(self, cfgs, zones, add_zones_info, active_cfg, cfg_name): """Add or update the zones and cfgs map based on the new zones info. This method will return the updated zones,cfgs and active_cfg :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param add_zones_info :Zones map to add :param active_cfg :Existing active cfg :param cfg_name : New cfg name :returns: updated zones, zone configs map, and active_cfg """ cfg_string = "" delimiter = "" zones_in_active_cfg = "" try: if active_cfg: zones_in_active_cfg = cfgs.get(active_cfg) for zone_name, members in add_zones_info.items(): # if new zone is not active_cfg, build the cfg string with the # new zones if zone_name not in zones_in_active_cfg: cfg_string += delimiter + zone_name delimiter = ";" # update the zone string # if zone name already exists and dont have the new members # already if (zone_name in zones and set(members) != set(zones.get(zone_name).split(";"))): # update the existing zone with new members zones.update( {zone_name: (";".join(members) + ";" + zones.get(zone_name))}) else: # add a new zone with the members zones.update({zone_name: ";".join(members)}) # update cfg string if active_cfg: if cfg_string: # update the existing active cfg map with cfgs string cfgs.update( {active_cfg: cfg_string + ";" + cfgs.get(active_cfg)}) else: # create new cfg and update that cfgs map with the new cfg active_cfg = cfg_name cfgs.update({cfg_name: cfg_string}) except Exception as e: msg = (_("Error while updating the new zones and cfgs " "in the zone string. Error %(description)s.") % {'description': six.text_type(e)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def is_vf_enabled(self): """To check whether VF is enabled or not. :returns: boolean to indicate VF enabled and session information """ session_info = self.get_session_info() parsed_data = self.get_parsed_data(session_info, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) try: is_vf_enabled = bool(self.get_nvp_value( parsed_data, zone_constant.VF_ENABLED)) except exception.BrocadeZoningHttpException: is_vf_enabled = False parsed_data = None return is_vf_enabled, parsed_data def get_nameserver_info(self): """Get name server data from fabric. Return the connected node port wwn list(local and remote) for the given switch fabric. :returns: name server information. """ nsinfo = [] headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.NS_PAGE, header=headers) # GET request to nsinfo.html parsed_raw_zoneinfo = self.get_parsed_data( response, zone_constant.NSINFO_BEGIN, zone_constant.NSINFO_END).strip("\t\n\r") # build the name server information in the correct format for line in parsed_raw_zoneinfo.splitlines(): start_index = line.find(zone_constant.NS_DELIM) + 7 if start_index != -1: nsinfo.extend([line[start_index:start_index + 23].strip()]) return nsinfo def delete_update_zones_cfgs( self, cfgs, zones, delete_zones_info, active_cfg): """Add or update the zones and cfgs map based on the new zones info. Return the updated zones, cfgs and active_cfg after deleting the required items. :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param delete_zones_info :Zones map to add :param active_cfg :Existing active cfg :returns: updated zones, zone config sets, and active zone config :raises: BrocadeZoningHttpException """ try: delete_zones_info = delete_zones_info.split(";") for zone in delete_zones_info: # remove the zones from the zone map zones.pop(zone) # iterated all the cfgs, but need to check since in SSH only # active cfg is iterated for k, v in cfgs.items(): v = v.split(";") if zone in v: # remove the zone from the cfg string v.remove(zone) # if all the zones are removed, remove the cfg from the # cfg map if not v: cfgs.pop(k) # update the original cfg with the updated string else: cfgs[k] = ";".join(v) # if all the zones are removed in the active_cfg, update it with # empty string if active_cfg not in cfgs: active_cfg = "" except KeyError as e: msg = (_("Error while removing the zones and cfgs " "in the zone string: %(description)s.") % {'description': six.text_type(e)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def delete_zones(self, delete_zones_info, activate, active_zone_set=None): """Delete zones from fabric. Deletes zones in the active zone config. :param zone_names: zoneNames separated by semicolon :param activate: True/False :param active_zone_set: the active zone set dict retrieved from get_active_zone_set method """ cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with required information # being removed zones, cfgs, active_cfg = self.delete_update_zones_cfgs( cfgs, zones, delete_zones_info, active_cfg) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Delete zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def post_zone_data(self, data): """Send POST request to the switch with the payload. :param data: payload to be sent to switch """ status = "progress" parsed_data_txn = "" headers = {zone_constant.AUTH_HEADER: self.auth_header} LOG.debug("Requesting the switch with posting the zone string.") # POST request to gzoneinfo with zonestring as payload response = self.connect(zone_constant.POST_METHOD, zone_constant.ZONE_PAGE, data, headers) parsed_data = self.get_parsed_data(response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) transID = self.get_nvp_value(parsed_data, zone_constant.ZONE_TX_ID) transURL = zone_constant.ZONE_TRAN_STATUS.format(txnId=transID) timeout = 360 sleep_time = 3 time_elapsed = 0 while(status != "done"): txn_response = self.connect( zone_constant.GET_METHOD, transURL, "", headers) parsed_data_txn = self.get_parsed_data(txn_response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) status = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_TX_STATUS) time.sleep(sleep_time) time_elapsed += sleep_time if time_elapsed > timeout: break if status != "done": errorCode = -1 errorMessage = ("Timed out, waiting for zone transaction on " "the switch to complete") else: errorCode = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_CODE) errorMessage = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_MSG) return errorCode, errorMessage def check_change_vf_context(self): """Check whether VF related configurations is valid and proceed.""" vf_enabled, session_data = self.is_vf_enabled() # VF enabled will be false if vf is disable or not supported LOG.debug("VF enabled on switch: %(vfenabled)s.", {'vfenabled': vf_enabled}) # Change the VF context in the session if vf_enabled: if self.vfid is None: msg = _("No VF ID is defined in the configuration file.") LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) elif self.vfid != 128: self.change_vf_context(self.vfid, session_data) else: if self.vfid is not None: msg = _("VF is not enabled.") LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def _disconnect(self): """Disconnect from the switch using HTTP/HTTPS protocol. :raises: BrocadeZoningHttpException """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.LOGOUT_PAGE, header=headers) return response except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': six.text_type(e)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) except exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': zone_constant.LOG_OUT_PAGE, 'error': six.text_type(ex)}) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) def cleanup(self): """Close session.""" self._disconnect() self.session.close() cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py0000664000567000056710000005064512701406250027324 0ustar jenkinsjenkins00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade Zone Driver is responsible to manage access control using FC zoning for Brocade FC fabrics. This is a concrete implementation of FCZoneDriver interface implementing add_connection and delete_connection interfaces. **Related Flags** :zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True :zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' """ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import six import string from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager.drivers.brocade import fc_zone_constants from cinder.zonemanager.drivers import driver_utils from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import utils LOG = logging.getLogger(__name__) SUPPORTED_CHARS = string.ascii_letters + string.digits + '_' brcd_opts = [ cfg.StrOpt('brcd_sb_connector', default=fc_zone_constants.HTTP.upper(), help='South bound connector for zoning operation'), ] CONF = cfg.CONF CONF.register_opts(brcd_opts, group='fc-zone-manager') class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): """Brocade FC zone driver implementation. OpenStack Fibre Channel zone driver to manage FC zoning in Brocade SAN fabrics. Version history: 1.0 - Initial Brocade FC zone driver 1.1 - Implements performance enhancements 1.2 - Added support for friendly zone name 1.3 - Added HTTP connector support 1.4 - Adds support to zone in Virtual Fabrics """ VERSION = "1.4" def __init__(self, **kwargs): super(BrcdFCZoneDriver, self).__init__(**kwargs) self.sb_conn_map = {} self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(brcd_opts) # Adding a hack to handle parameters from super classes # in case configured with multiple back ends. fabric_names = self.configuration.safe_get('fc_fabric_names') base_san_opts = [] if not fabric_names: base_san_opts.append( cfg.StrOpt('fc_fabric_names', help='Comma separated list of fibre channel ' 'fabric names. This list of names is used to' ' retrieve other SAN credentials for connecting' ' to each SAN fabric' )) if len(base_san_opts) > 0: CONF.register_opts(base_san_opts) self.configuration.append_config_values(base_san_opts) fc_fabric_names = self.configuration.fc_fabric_names fabric_names = [x.strip() for x in fc_fabric_names.split(',')] # There can be more than one SAN in the network and we need to # get credentials for each SAN. if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) @lockutils.synchronized('brcd', 'fcfabric-', True) def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Add connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for Fabric %(policy)s"), {'policy': zoning_policy}) if (zoning_policy != 'initiator' and zoning_policy != 'initiator-target'): LOG.info(_LI("Zoning policy is not valid, " "no zoning will be performed.")) return client = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(client) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() target_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for target in target_list: zone_members = [utils.get_formatted_wwn(initiator), utils.get_formatted_wwn(target)] zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. LOG.info(_LI("Zone exists in I-T mode. Skipping " "zone creation for %(zonename)s"), {'zonename': zone_name}) elif zoning_policy == 'initiator': zone_members = [utils.get_formatted_wwn(initiator)] for target in target_list: zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) zone_map[zone_name] = zone_members LOG.info(_LI("Zone map to add: %(zonemap)s"), {'zonemap': zone_map}) if len(zone_map) > 0: try: client.add_zones( zone_map, zone_activate, cfgmap_from_fabric) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %(zonemap)s", {'zonemap': zone_map}) client.cleanup() @lockutils.synchronized('brcd', 'fcfabric-', True) def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Delete connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for fabric %(policy)s"), {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push changes to # fabric. This operation could result in an update for zone config # with new member list or deleting zones from active cfg. LOG.debug("zone config from Fabric: %(cfgmap)s", {'cfgmap': cfgmap_from_fabric}) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) LOG.debug("Zone name to delete: %(zonename)s", {'zonename': zone_name}) if len(zone_names) > 0 and (zone_name in zone_names): # delete zone. LOG.debug("Added zone to delete to list: %(zonename)s", {'zonename': zone_name}) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always there # in the zone as it is 'initiator' policy. We find the # filtered list and if it is non-empty, add initiator # to it and update zone if filtered list is empty, we # remove that zone. LOG.debug("Zone delete - initiator mode: " "filtered targets: %(targets)s", {'targets': filtered_members}) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: " "%(members)s", {'members': filtered_members}) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone map to update: %(zonemap)s", {'zonemap': zone_map}) else: zones_to_delete.append(zone_name) else: LOG.warning(_LW("Zoning policy not recognized: %(policy)s"), {'policy': zoning_policy}) LOG.debug("Final zone map to update: %(zonemap)s", {'zonemap': zone_map}) LOG.debug("Final zone list to delete: %(zones)s", {'zones': zones_to_delete}) try: # Update zone membership. if zone_map: conn.add_zones( zone_map, zone_activate, cfgmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = '%s;%s' % ( zone_name_string, zones_to_delete[i]) conn.delete_zones( zone_name_string, zone_activate, cfgmap_from_fabric) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to update or delete zoning " "configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fc_fabric_names = self.configuration.fc_fabric_names fabrics = [x.strip() for x in fc_fabric_names.split(',')] LOG.debug("Fabric List: %(fabrics)s", {'fabrics': fabrics}) LOG.debug("Target WWN list: %(targetwwns)s", {'targetwwns': target_wwn_list}) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(utils.get_formatted_wwn(t)) LOG.debug("Formatted target WWN list: %(targetlist)s", {'targetlist': formatted_target_list}) for fabric_name in fabrics: conn = self._get_southbound_client(fabric_name) # Get name server data from fabric and get the targets # logged in. nsinfo = None try: nsinfo = conn.get_nameserver_info() LOG.debug("Name server info from fabric: %(nsinfo)s", {'nsinfo': nsinfo}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting name server info.")) except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() visible_targets = filter( lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %(targets)s"), {'targets': visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = str( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets found in the nameserver " "for fabric: %(fabric)s", {'fabric': fabric_name}) LOG.debug("Return SAN context output: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map def _get_active_zone_set(self, conn): cfgmap = None try: cfgmap = conn.get_active_zone_set() except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.error(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting name server info.")) except Exception as e: msg = (_("Failed to retrieve active zoning configuration %s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %(cfgmap)s", {'cfgmap': cfgmap}) return cfgmap def _get_southbound_client(self, fabric): """Implementation to get SouthBound Connector. South bound connector will be dynamically selected based on the configuration :param fabric: fabric information """ fabric_info = self.fabric_configs[fabric] fc_ip = fabric_info.safe_get('fc_fabric_address') sb_connector = fabric_info.safe_get('fc_southbound_protocol') if sb_connector is None: sb_connector = self.configuration.brcd_sb_connector try: conn_factory = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_fc_zone_connector_factory." "BrcdFCZoneFactory") client = conn_factory.get_connector(fabric_info, sb_connector.upper()) except Exception: msg = _("Failed to create south bound connector for %s.") % fc_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) return client cinder-8.0.0/cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py0000664000567000056710000000524512701406250026615 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from cinder.volume import configuration brcd_zone_opts = [ cfg.StrOpt('fc_southbound_protocol', default='HTTP', choices=('SSH', 'HTTP', 'HTTPS'), help='South bound connector for the fabric.'), cfg.StrOpt('fc_fabric_address', default='', help='Management IP of fabric.'), cfg.StrOpt('fc_fabric_user', default='', help='Fabric user ID.'), cfg.StrOpt('fc_fabric_password', default='', help='Password for user.', secret=True), cfg.PortOpt('fc_fabric_port', default=22, help='Connecting port'), cfg.StrOpt('fc_fabric_ssh_cert_path', default='', help='Local SSH certificate Path.'), cfg.StrOpt('zoning_policy', default='initiator-target', help='Overridden zoning policy.'), cfg.BoolOpt('zone_activate', default=True, help='Overridden zoning activation state.'), cfg.StrOpt('zone_name_prefix', default='openstack', help='Overridden zone name prefix.'), cfg.StrOpt('fc_virtual_fabric_id', default=None, help='Virtual Fabric ID.'), cfg.StrOpt('principal_switch_wwn', default=None, deprecated_for_removal=True, help='Principal switch WWN of the fabric. This option is not ' 'used anymore.') ] CONF = cfg.CONF CONF.register_opts(brcd_zone_opts, group='BRCD_FABRIC_EXAMPLE') LOG = logging.getLogger(__name__) def load_fabric_configurations(fabric_names): fabric_configs = {} for fabric_name in fabric_names: config = configuration.Configuration(brcd_zone_opts, fabric_name) LOG.debug("Loaded FC fabric config %(fabricname)s", {'fabricname': fabric_name}) fabric_configs[fabric_name] = config return fabric_configs cinder-8.0.0/cinder/zonemanager/fczm_constants.py0000664000567000056710000000134112701406250023277 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by FC Zone Manager. """ IT_MAP = 'initiator_target_map' DATA = 'data' HOST = 'host_name' STORAGE = 'storage_system' SYSTEM = 'system' cinder-8.0.0/cinder/zonemanager/fc_zone_manager.py0000664000567000056710000002637512701406250023377 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ ZoneManager is responsible to manage access control using FC zoning when zoning mode is set as 'fabric'. ZoneManager provides interfaces to add connection and remove connection for given initiator and target list associated with a FC volume attach and detach operation. **Related Flags** :zone_driver: Used by:class:`ZoneManager`. Defaults to `cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver` :zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none' """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import six from cinder import exception from cinder.i18n import _, _LI from cinder.volume import configuration as config from cinder.zonemanager import fc_common import cinder.zonemanager.fczm_constants as zone_constant LOG = logging.getLogger(__name__) zone_manager_opts = [ cfg.StrOpt('zone_driver', default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' '.BrcdFCZoneDriver', help='FC Zone Driver responsible for zone management'), cfg.StrOpt('zoning_policy', default='initiator-target', help='Zoning policy configured by user; valid values include ' '"initiator-target" or "initiator"'), cfg.StrOpt('fc_fabric_names', help='Comma separated list of Fibre Channel fabric names.' ' This list of names is used to retrieve other SAN credentials' ' for connecting to each SAN fabric'), cfg.StrOpt('fc_san_lookup_service', default='cinder.zonemanager.drivers.brocade' '.brcd_fc_san_lookup_service.BrcdFCSanLookupService', help='FC SAN Lookup Service') ] CONF = cfg.CONF CONF.register_opts(zone_manager_opts, group='fc-zone-manager') class ZoneManager(fc_common.FCCommon): """Manages Connection control during attach/detach. Version History: 1.0 - Initial version 1.0.1 - Added __new__ for singleton 1.0.2 - Added friendly zone name """ VERSION = "1.0.2" driver = None fabric_names = [] def __new__(class_, *args, **kwargs): if not hasattr(class_, "_instance"): class_._instance = object.__new__(class_) return class_._instance def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" super(ZoneManager, self).__init__(**kwargs) self.configuration = config.Configuration(zone_manager_opts, 'fc-zone-manager') self._build_driver() def _build_driver(self): zone_driver = self.configuration.zone_driver LOG.debug("Zone driver from config: %(driver)s", {'driver': zone_driver}) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object( zone_driver, configuration=zm_config) def get_zoning_state_ref_count(self, initiator_wwn, target_wwn): """Zone management state check. Performs state check for given I-T pair to return the current count of active attach for the pair. """ # TODO(sk): ref count state management count = 0 # check the state for I-T pair return count def add_connection(self, conn_info): """Add connection control. Adds connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: eg: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[ zone_constant.DATA][ zone_constant.HOST].replace(" ", "_") if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.debug("Target list : %(targets)s", {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Fabric map after context lookup: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, True) LOG.info(_LI("Final filtered map for fabric: %(i_t_map)s"), {'i_t_map': valid_i_t_map}) # Call driver to add connection control self.driver.add_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.info(_LI("Add connection: finished iterating " "over all target list")) except Exception as e: msg = _("Failed adding connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': six.text_type(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def delete_connection(self, conn_info): """Delete connection. Updates/deletes connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: eg: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[zone_constant.DATA][zone_constant.HOST] if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.info(_LI("Delete connection target list: %(targets)s"), {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Delete connection fabric map from SAN " "context: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, False) LOG.info(_LI("Final filtered map for delete connection: " "%(i_t_map)s"), {'i_t_map': valid_i_t_map}) # Call driver to delete connection control if len(valid_i_t_map) > 0: self.driver.delete_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.debug("Delete connection - finished iterating over all" " target list") except Exception as e: msg = _("Failed removing connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': six.text_type(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def get_san_context(self, target_wwn_list): """SAN lookup for end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ fabric_map = self.driver.get_san_context(target_wwn_list) LOG.debug("Got SAN context: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map def get_valid_initiator_target_map(self, initiator_target_map, add_control): """Reference count check for end devices. Looks up the reference count for each initiator-target pair from the map and returns a filtered list based on the operation type add_control - operation type can be true for add connection control and false for remove connection control """ filtered_i_t_map = {} for initiator in initiator_target_map.keys(): t_list = initiator_target_map[initiator] for target in t_list: count = self.get_zoning_state_ref_count(initiator, target) if add_control: if count > 0: t_list.remove(target) # update count = count + 1 else: if count > 1: t_list.remove(target) # update count = count - 1 if t_list: filtered_i_t_map[initiator] = t_list else: LOG.info(_LI("No targets to add or remove connection for " "initiator: %(init_wwn)s"), {'init_wwn': initiator}) return filtered_i_t_map cinder-8.0.0/cinder/zonemanager/fc_san_lookup_service.py0000664000567000056710000000674212701406250024620 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base Lookup Service for name server lookup to find the initiator to target port mapping for available SAN contexts. Vendor specific lookup classes are expected to implement the interfaces defined in this class. """ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _, _LE from cinder.volume import configuration as config from cinder.zonemanager import fc_common from cinder.zonemanager import fc_zone_manager LOG = logging.getLogger(__name__) class FCSanLookupService(fc_common.FCCommon): """Base Lookup Service. Base Lookup Service for name server lookup to find the initiator to target port mapping for available SAN contexts. """ lookup_service = None def __init__(self, **kwargs): super(FCSanLookupService, self).__init__(**kwargs) opts = fc_zone_manager.zone_manager_opts self.configuration = config.Configuration(opts, 'fc-zone-manager') def get_device_mapping_from_network(self, initiator_list, target_list): """Get device mapping from FC network. Gets a filtered list of initiator ports and target ports for each SAN available. :param initiator_list list of initiator port WWN :param target_list list of target port WWN :returns: device wwn map in following format { : { 'initiator_port_wwn_list': ('200000051E55A100', '200000051E55A121'..) 'target_port_wwn_list': ('100000051E55A100', '100000051E55A121'..) } } :raise: Exception when a lookup service implementation is not specified in cinder.conf:fc_san_lookup_service """ # Initialize vendor specific implementation of FCZoneDriver if (self.configuration.fc_san_lookup_service): lookup_service = self.configuration.fc_san_lookup_service LOG.debug("Lookup service to invoke: " "%s", lookup_service) self.lookup_service = importutils.import_object( lookup_service, configuration=self.configuration) else: msg = _("Lookup service not configured. Config option for " "fc_san_lookup_service needs to specify a concrete " "implementation of the lookup service.") LOG.error(msg) raise exception.FCSanLookupServiceException(msg) try: device_map = self.lookup_service.get_device_mapping_from_network( initiator_list, target_list) except Exception as e: LOG.exception(_LE('Unable to get device mapping from network.')) raise exception.FCSanLookupServiceException(e) return device_map cinder-8.0.0/cinder/i18n.py0000664000567000056710000000260212701406250016516 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ import oslo_i18n as i18n DOMAIN = 'cinder' _translators = i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def enable_lazy(enable=True): return i18n.enable_lazy(enable) def translate(value, user_locale=None): return i18n.translate(value, user_locale) def get_available_languages(): return i18n.get_available_languages(DOMAIN) cinder-8.0.0/cinder/locale/0000775000567000056710000000000012701406543016631 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ru/0000775000567000056710000000000012701406543017257 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ru/LC_MESSAGES/0000775000567000056710000000000012701406543021044 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ru/LC_MESSAGES/cinder.po0000664000567000056710000150663412701406257022671 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # eshumakher, 2013 # FIRST AUTHOR , 2011 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 02:35+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "Версия OpenStack Cinder: %(version)s\n" #, python-format msgid " but size is now %d" msgstr " но сейчас размер: %d" #, python-format msgid " but size is now %d." msgstr " но сейчас размер %d." msgid " or " msgstr "или " #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s не задан." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing не поддерживает управление томом, подключенным к " "хостам. Отключите том от существующих хостов перед импортом" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "Результат: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: доступ запрещен." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: Сбой с непредвиденным выводом CLI.\n" " Команда: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Код состояния: %(_status)s\n" "Тело: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: создание NetworkPortal: убедитесь, что порт %(port)d на IP-" "адресе %(ip)s не занят другой службой." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" "Минимальное число символов в %(name)s должно составлять %(min_length)s." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s содержит более %(max_length)s символов." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: не удалось создать резервную копию %(bck_id)s, том %(vol_id)s. " "Объект резервной копии находится в неожиданном режиме. Поддерживаются " "резервные копии файлов и образов. Фактический режим: %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "Служба %(service)s не находится в состоянии %(status)s в устройстве хранения " "%(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s должно быть <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s должен быть >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "Значение %(worker_name)s %(workers)d недопустимо. Значение должно быть " "больше 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "В результате нет элемента \"data\" %s." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s недоступен. Убедитесь, что GPFS активна и файловая система смонтирована." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "Размер %s нельзя изменить с помощью операции дублирования, так как он не " "содержит блоков." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "Размер %s нельзя изменить с помощью операции дублирования, так как он " "находится на сжатом томе" #, python-format msgid "%s configuration option is not set." msgstr "Не задан параметр конфигурации %s." #, python-format msgid "%s does not exist." msgstr "%s не существует." #, python-format msgid "%s is not a directory." msgstr "%s не является каталогом." #, python-format msgid "%s is not a string or unicode" msgstr "%s не является строкой или unicode" #, python-format msgid "%s is not installed" msgstr "%s не установлен" #, python-format msgid "%s is not installed." msgstr "%s не установлен." #, python-format msgid "%s is not set" msgstr "%s - не множество" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "Значение %s не задано. Оно необходимо для правильной работы устройства " "репликации." #, python-format msgid "%s is not set." msgstr "%s не задан." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s должно быть допустимым образом raw или qcow2." #, python-format msgid "%s must be an absolute path." msgstr "%s должен быть абсолютным путем." #, python-format msgid "%s must be an integer." msgstr "%s должен быть целым числом." #, python-format msgid "%s not set in cinder.conf" msgstr "%s не задан в cinder.conf" #, python-format msgid "%s not set." msgstr "%s не задан." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' недопустим для flashsystem_connection_protocol в файле " "конфигурации. Допустимые значения: %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "Должно быть active при записи snap_info." msgid "'consistencygroup_id' must be specified" msgstr "Необходимо указать consistencygroup_id" msgid "'qemu-img info' parsing failed." msgstr "Ошибка анализа 'qemu-img info'." msgid "'status' must be specified." msgstr "Должно быть указано значение status." msgid "'volume_id' must be specified" msgstr "Должен быть указан параметр volume_id" msgid "'{}' object has no attribute '{}'" msgstr "Объект '{}' не имеет атрибута '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Команда: %(cmd)s) (Код возврата: %(exit_code)s) (stdout: %(stdout)s) " "(stderr: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "LUN (HLUN) не найден. (LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "Параллельно выполняется другой, вероятно, конфликтующий запрос." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Не найден свободный LUN (HLUN). Добавьте другую группу хостов. (LDEV: " "%(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "Не удалось добавить группу хостов. (порт: %(port)s, имя: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Не удалось удалить группу хостов. (порт: %(port)s, ИД группы: %(gid)s, имя: " "%(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Недопустимая группа хостов. (группа хостов: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "Не удалось удалить пару. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Не удалось создать пару. Превышено максимальное число пар. (метод " "копирования: %(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Недопустимый параметр. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Недопустимое значение параметра. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Не найден пул. (ИД пул: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Недопустимое состояние моментальной копии. (состояние: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Для переключения после сбоя необходимо указать допустимый вторичный целевой " "объект." msgid "A volume ID or share was not specified." msgstr "Не указан ИД тома или общий ресурс." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Недопустимое состояние тома. (состояние: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "Сбой API %(name)s, строка ошибки %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Недопустимый формат строки версии API %(version)s. Требуется формат: " "MajorNum.MinorNum." msgid "API key is missing for CloudByte driver." msgstr "Отсутствует ключ API для драйвера CloudByte." #, python-format msgid "API response: %(response)s" msgstr "Ответ API: %(response)s" #, python-format msgid "API response: %s" msgstr "Ответ API: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Версия API %(version)s не поддерживается этим методом." msgid "API version could not be determined." msgstr "Не удалось определить версию API." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Попытка удаления дочерних проектов с ненулевой квотой. Это не следует делать" msgid "Access list not available for public volume types." msgstr "Список прав доступа недоступен для общедоступных типов томов." msgid "Activate or deactivate QoS error." msgstr "Ошибка активации/деактивации QoS." msgid "Activate snapshot error." msgstr "Ошибка активации моментальной копии." msgid "Add FC port to host error." msgstr "Ошибка добавления порта Fibre Channel к хосту." msgid "Add fc initiator to array error." msgstr "Ошибка добавления инициатора Fibre Channel в массив." msgid "Add initiator to array error." msgstr "Ошибка добавления инициатора в массив." msgid "Add lun to cache error." msgstr "Ошибка добавления LUN в кэш." msgid "Add lun to partition error." msgstr "Ошибка добавления LUN в раздел." msgid "Add mapping view error." msgstr "Ошибка добавления представления связей." msgid "Add new host error." msgstr "Ошибка добавления нового хоста." msgid "Add port to port group error." msgstr "Ошибка добавления порта в группу портов." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Все указанные пулы памяти для управления не существуют. Проверьте " "конфигурацию. Несуществующие пулы: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "Запрос версии API должен сравниваться с объектом VersionedMethod." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "Обнаружена ошибка в SheepdogDriver. (Причина: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Ошибка операции резервного копирования" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "Ошибка при попытке изменения моментальной копии '%s'." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Возникла ошибка при поиске тома \"%s\"." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Ошибка во время операции LUNcopy. Имя LUNcopy: %(luncopyname)s. Статус " "LUNcopy: %(luncopystatus)s. Состояние LUNcopy: %(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Возникла ошибка при чтении тома \"%s\"." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Возникла ошибка при записи на том \"%s\"." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" "Не удалось добавить пользователя CHAP iSCSI. (имя пользователя: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" "Не удалось удалить пользователя CHAP iSCSI. (имя пользователя: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Не удалось добавить целевое расположение iSCSI. (порт: %(port)s, псевдоним: " "%(alias)s, причина: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Не удалось удалить целевое расположение iSCSI. (порт: %(port)s, номер " "целевого расположения: %(tno)s, псевдоним: %(alias)s)" msgid "An unknown exception occurred." msgstr "Обнаружено неизвестное исключение." msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Пользователи с маркером, связанным с подпроектом, не могут видеть квоту " "родительских объектов." msgid "Append port group description error." msgstr "Ошибка добавления описания группы портов." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Не удалось применить зоны и конфигурации для коммутатора (код ошибки=" "%(err_code)s сообщение об ошибке=%(err_msg)s." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "Массив не существует или выключен. Текущее состояние массива: %s." msgid "Associate host to hostgroup error." msgstr "Ошибка связывания хоста с группой хостов." msgid "Associate host to mapping view error." msgstr "Ошибка связывания хоста с представлением связей." msgid "Associate initiator to host error." msgstr "Ошибка связывания инициатора с хостом." msgid "Associate lun to QoS error." msgstr "Ошибка связывания LUN с QoS." msgid "Associate lun to lungroup error." msgstr "Ошибка связывания LUN с группой LUN." msgid "Associate lungroup to mapping view error." msgstr "Ошибка связывания группы LUN с представлением связей." msgid "Associate portgroup to mapping view error." msgstr "Ошибка связывания группы портов с представлением связей." msgid "At least one valid iSCSI IP address must be set." msgstr "Необходимо указать хотя бы один допустимый IP-адрес iSCSI." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Попытка передачи %s с недопустимым ключом авторизации." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "Не найдена информация группы идентификации [%s] в хранилище CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "" "В хранилище CloudByte не найдена информация о пользователе для идентификации." msgid "Authentication error" msgstr "Ошибка аутентификации" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Ошибка идентификации. Проверьте идентификационные данные. Код ошибки %s." msgid "Authorization error" msgstr "Ошибка авторизации" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Зона доступности %(s_az)s недопустима." msgid "Available categories:" msgstr "Доступные категории:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Спецификации QoS базовой системы не поддерживаются в этом семействе систем " "хранения и версии ONTAP." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Базовый сервер не существует (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "" "Переключение базовой системы после сбоя уже выполнено. Обратное переключение " "невозможно." #, python-format msgid "Backend reports: %(message)s" msgstr "Отчеты базовой программы: %(message)s" msgid "Backend reports: item already exists" msgstr "Отчеты базовой программы: элемент уже существует" msgid "Backend reports: item not found" msgstr "Отчеты базовой программы: элемент не найден" msgid "Backend server not NaServer." msgstr "Базовая программа не NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Тайм-аут повторов службы базовой программы: %(timeout)s с" msgid "Backend storage did not configure fiber channel target." msgstr "Память базовой системы не настроила цель оптоволоконного канала." msgid "Backing up an in-use volume must use the force flag." msgstr "" "Для резервного копирования используемого тома требуется флаг принудительного " "выполнения." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "Не удалось найти резервную копию %(backup_id)s." msgid "Backup RBD operation failed" msgstr "Сбой операции резервного копирования RBD" msgid "Backup already exists in database." msgstr "Резервная копия уже есть в базе данных." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Драйвер резервного копирования выдал ошибку: %(message)s" msgid "Backup id required" msgstr "Требуется ИД резервной копии" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "Резервное копирование не поддерживается для томов GlusterFS с моментальными " "копиями." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "Резервное копирование поддерживается только для томов SOFS с форматированием " "raw без базового файла." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "Резервное копирование поддерживается только для томов GlusterFS с " "форматированием raw." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" "Резервное копирование поддерживается только для томов SOFS с форматированием " "raw." msgid "Backup operation of an encrypted volume failed." msgstr "Операция резервного копирования зашифрованного тома не выполнена." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Служба резервного копирования %(configured_service)s не поддерживает " "проверку. ИД резервной копии %(id)s не проверен. Пропуск проверки." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Служба резервного копирования %(service)s не поддерживает проверку. ИД " "резервной копии %(id)s не проверен. Пропуск сброса." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "Резервная копия должна иметь только одну моментальную копию, а имеет %s" msgid "Backup status must be available" msgstr "Состояние резервной копии должно быть доступным" #, python-format msgid "Backup status must be available and not %s." msgstr "Состояние резервной копии должно быть available, а не '%s'." msgid "Backup status must be available or error" msgstr "Требуемое состояние резервной копии: доступен или ошибка" msgid "Backup to be restored has invalid size" msgstr "Резервная копия для восстановления имеет недопустимый размер" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Возвращена неверная строка состояния: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Неверные ключи в наборе квот: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Неправильный или непредвиденный ответ от API базовой программы тома " "хранилища: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "Неправильный формат проекта: проект имеет неправильный формат (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Недопустимый запрос отправлен в кластер Datera:Недопустимые аргументы: " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Неправильный ответ API Datera" msgid "Bad response from SolidFire API" msgstr "Неправильный ответ от API SolidFire" #, python-format msgid "Bad response from XMS, %s" msgstr "Неверный ответ из XMS, %s" msgid "Binary" msgstr "Двоичный" msgid "Blank components" msgstr "Пустые компоненты" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Схема идентификации API Blockbridge (token или password)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Пароль API Blockbridge (для схемы идентификации 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Маркер API Blockbridge (для схемы идентификации 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Пользователь API Blockbridge (для схемы идентификации 'password')" msgid "Blockbridge api host not configured" msgstr "Не настроен хост API Blockbridge" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge настроен с недопустимой схемой идентификации '%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "Пул Blockbridge по умолчанию не существует" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Не настроен пароль Blockbridge (требуется для схемы идентификации 'password')" msgid "Blockbridge pools not configured" msgstr "Пулы Blockbridge не настроены" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Не настроен маркер Blockbridge (требуется для схемы идентификации 'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Не настроен пользователь Blockbridge (требуется для схемы идентификации " "'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Ошибка CLI зонирования Brocade Fibre Channel: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Ошибка HTTP зонирования Brocade Fibre Channel: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "Пароль CHAP должен быть от 12 до 16 байт." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Вывод исключительной ситуации CLI:\n" " команда: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Вывод исключительной ситуации CLI:\n" " команда: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E Связь VDisk с хостом не создана, так как VDisk уже связан с " "хостом.\n" "\"" msgid "CONCERTO version is not supported" msgstr "Версия CONCERTO не поддерживается" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) не существует в массиве" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "Имя кэша - None. Укажите smartcache:cachename в ключе." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "Том кэша %(cache_vol)s не имеет моментальной копии %(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "Для тома кэша %s не заданы обязательные свойства" msgid "Call returned a None object" msgstr "Вызов возвратил объект None" msgid "Can not add FC port to host." msgstr "Не удалось добавить порт Fibre Channel в хост." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "Не удалось найти ИД кэша по имени кэша %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Не найден ИД раздела по имени %(name)s." #, python-format msgid "Can not get pool info. pool: %s" msgstr "Не удаётся получить информацию о пуле: %s" #, python-format msgid "Can not translate %s to integer." msgstr "Невозможно преобразовать %s в целое число." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Нет доступа к scality_sofs_config: %s" msgid "Can't attach snapshot." msgstr "Не удается присоединить моментальную копию." msgid "Can't decode backup record." msgstr "Не удалось декодировать запись резервной копии." #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "Невозможно расширить том репликации %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "Не найден LUN массива, проверьте правильность source-name или source-id." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "Не найдено имя кэша в массиве. Имя кэша: %(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "Не удалось найти ИД lun в базе данных, том: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "Не удалось найти информацию о lun в массиве. Том: %(id)s, имя lun: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "Не найдено имя раздела в массиве. Имя раздела: %(name)s." #, python-format msgid "Can't find service: %s" msgstr "Служба не найдена: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "Не найдена моментальная копия в массиве, проверьте правильность source-name " "или source-id." msgid "Can't find the same host id from arrays." msgstr "Не найден такой ИД хоста в массивах." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "Не удалось получить ИД тома из моментальной копии: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Не удаётся получить ИД тома. Имя тома: %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Не удается импортировать LUN %(lun_id)s в Cinder. Несовпадение типов LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Не удается импортировать LUN %s в Cinder. Он уже есть в HyperMetroPair." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Не удается импортировать LUN %s в Cinder. Он уже есть в задаче копирования " "LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в группе LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в зеркале LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в SplitMirror." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Не удается импортировать LUN %s в Cinder. Он уже есть в задаче переноса." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Не удается импортировать LUN %s в Cinder. Он уже есть в задаче удаленной " "репликации." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "Не удается импортировать LUN %s в Cinder. Состояние LUN указывает на ошибку." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Не удается импортировать моментальную копию %s в Cinder. Моментальная копия " "не принадлежит тому." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Не удается импортировать моментальную копию %s в Cinder. Моментальная копия " "экспортирована для инициатора." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Не удается импортировать моментальную копию %s в Cinder. Состояние " "моментальной копии указывает на ошибку или на то, что она недоступна." #, python-format msgid "Can't open config file: %s" msgstr "Не удалось открыть файл конфигурации %s" msgid "Can't parse backup record." msgstr "Не удалось проанализировать запись резервной копии." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " "том не имеет типа тома." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: он " "уже есть в группе согласования %(orig_group)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " "том не найден." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " "том не существует." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " "том находится в недопустимом состоянии %(status)s. Допустимые состояния: " "%(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " "тип тома %(volume_type)s не поддерживается группой." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Невозможно подключить уже подключенный том %s. Множественное подключение " "выключено в параметре конфигурации 'netapp_enable_multiattach'." msgid "Cannot change VF context in the session." msgstr "Не удаётся изменить контекст VF в сеансе." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "Не удаётся изменить контекст VF, данный VF недоступен в списке управляемых " "VF %(vf_list)s." msgid "Cannot connect to ECOM server." msgstr "Не удалось подключиться к серверу ECOM." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Не удается создать копию с размером %(vol_size)s для тома с размером " "%(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Не удалось создать группу согласования %(group)s, поскольку моментальная " "копия %(snap)s не находится в допустимом состоянии. Допустимые состояния: " "%(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Не удалось создать группу согласования %(group)s, поскольку исходный том " "%(source_vol)s не в допустимом состоянии. Допустимые состояния: %(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Не удалось создать каталог %s." msgid "Cannot create encryption specs. Volume type in use." msgstr "Невозможно создать спецификацию шифрования. Тип тома используется." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Невозможно создать образ формата диска %s. Принимается только формат диска " "vmdk." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Создать маскирующее представление %(maskingViewName)s невозможно. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Нельзя создать больше %(req)s томов в массиве ESeries, когда параметр " "'netapp_enable_multiattach' равен true." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Не удалось создать или найти группу носителей с именем %(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Не удается создать том размером %(vol_size)s из моментальной копии размером " "%(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Не удалось создать том размера %s: не кратен 8 ГБ." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Невозможно создать volume_type с именем %(name)s и спецификациями " "%(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "Нельзя удалить LUN %s, когда есть моментальные копии." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Не удалось удалить том кэша %(cachevol_name)s. Он обновлен в%(updated_at)s и " "сейчас содержит %(numclones)d экземпляры тома." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Не удалось удалить том кэша %(cachevol_name)s. Он обновлен в %(updated_at)s " "и сейчас содержит %(numclones)s экземпляры тома." msgid "Cannot delete encryption specs. Volume type in use." msgstr "Невозможно удалить спецификацию шифрования. Тип тома используется." msgid "Cannot determine storage pool settings." msgstr "Не удается определить параметры пула памяти." msgid "Cannot execute /sbin/mount.sofs" msgstr "Не удалось выполнить /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "Не найдена группа согласования %s." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "Не удалось найти службу конфигурации контроллеров для системы хранения " "%(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "Не найдена служба репликации для создания тома для моментальной копии %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "Не найдена служба репликации для удаления моментальной копии %s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Не найдена служба репликации в системе %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "Не найден том %(id)s. Операция удаления из управления. Выход..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "Том %(volumename)s не найден. Операция Extend. Выполняется выход..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "Не найден номер устройства для тома %(volumeName)s." msgid "Cannot find migration task." msgstr "Не найдена задача переноса." #, python-format msgid "Cannot find replication service on system %s." msgstr "Не найдена служба репликации в системе %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "Не найден исходный экземпляр группы согласования. consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "Не удалось получить mcs_id по ИД канала: %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "" "Не удалось получить необходимую информацию о пуле или системе хранения." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Получить или создать группу носителей %(sgGroupName)s для тома " "%(volumeName)s невозможно" #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "Cannot get or create initiator group: %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Получить группу портов %(pgGroupName)s невозможно. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Не удалось получить группу носителей %(sgGroupName)s от маскирующего " "представления %(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Не удалось получить диапазон поддерживаемых размеров для %(sps)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Не удалось получить группу носителей по умолчанию для стратегии FAST " "%(fastPolicyName)s." msgid "Cannot get the portgroup from the masking view." msgstr "Не удается получить группу портов из маскирующего представления." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" "Не удалось смонтировать Scality SOFS. Проверьте системный протокол на " "наличие ошибок" msgid "Cannot ping DRBDmanage backend" msgstr "Не удалось проверить связь с базовой программой DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Не удалось поместить том %(id)s на %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Нельзя одновременно указывать 'cgsnapshot_id' и 'source_cgid' для создания " "группы согласования %(name)s из источника." msgid "Cannot register resource" msgstr "Не удалось зарегистрировать ресурс" msgid "Cannot register resources" msgstr "Не удалось зарегистрировать ресурсы" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Не удалось удалить том %(volume_id)s из группы согласования %(group_id)s: он " "не находится в группе." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Не удалось удалить том %(volume_id)s из группы согласования %(group_id)s: " "том находится в недопустимом состоянии %(status)s. Допустимые состояния: " "%(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Изменить тип с HPE3PARDriver на %s невозможно." msgid "Cannot retype from one 3PAR array to another." msgstr "Изменить тип с одного массива 3PAR на другой невозможно." msgid "Cannot retype to a CPG in a different domain." msgstr "Невозможно изменить тип на CPG из другого домена." msgid "Cannot retype to a snap CPG in a different domain." msgstr "Невозможно изменить тип на snapCPG из другого домена." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "Не удалось выполнить команду vgc-cluster. Убедитесь, что программное " "обеспечение установлено и права доступа настроены правильно." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "hitachi_serial_number и hitachi_unit_name нельзя указывать одновременно." msgid "Cannot specify both protection domain name and protection domain id." msgstr "Нельзя одновременно указывать имя домена защиты и ИД домена защиты." msgid "Cannot specify both storage pool name and storage pool id." msgstr "Нельзя одновременно указывать имя пула памяти и ИД пула памяти." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Не удалось обновить группу согласования %(group_id)s: недопустимое значение " "параметра name, description, add_volumes или remove_volumes." msgid "Cannot update encryption specs. Volume type in use." msgstr "Невозможно обновить спецификацию шифрования. Тип тома используется." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "Не удалось обновить volume_type %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Не удается проверить существование объекта %(instanceName)s." msgid "Cascade option is not supported." msgstr "Каскадная опция не поддерживается." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "Моментальная копия группы согласования %(cgsnapshot_id)s не найдена." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Пустой cgsnahost. Группа согласования создана не будет." msgid "Cgsnapshot status must be available or error" msgstr "" "Состояние моментальной копии группы согласования должно быть available или " "error" msgid "Change hostlun id error." msgstr "Ошибка изменения ИД lun хоста." msgid "Change lun priority error." msgstr "Ошибка изменения приоритета LUN." msgid "Change lun smarttier policy error." msgstr "Ошибка изменения стратегии smarttier LUN." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Изменение будет использовать менее 0 для следующих ресурсов: %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Проверьте права доступа для общего раздела ZFS, присвоенного этому драйверу." msgid "Check hostgroup associate error." msgstr "Ошибка проверки связывания группы хостов." msgid "Check initiator added to array error." msgstr "Ошибка проверки инициатора, добавленного в массив." msgid "Check initiator associated to host error." msgstr "Ошибка проверки инициатора, связанного с хостом." msgid "Check lungroup associate error." msgstr "Ошибка проверки связывания группы LUN." msgid "Check portgroup associate error." msgstr "Ошибка проверки связывания группы портов." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Проверьте состояние службы http. Убедитесь также, что номер порта https " "number совпадает с указанным в cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "Размер фрагмента не кратен размеру блока для создания хэша." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Ошибка CLI зонирования Cisco Fibre Channel: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "Функция создания копии не лицензирована в %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "Недопустимый тип дубликата '%(clone_type)s'. Допустимые типы: " "'%(full_clone)s' и '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "Кластер не отформатирован. Необходимо выполнить команду \"dog cluster format" "\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Ошибка драйвера Coho Data Cinder: %(message)s" msgid "Coho rpc port is not configured" msgstr "Не настроен порт RPC Coho" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Команда %(cmd)s заблокирована в CLI и была отменена" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: тайм-аут %s " #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: тайм-аут %s." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Программа включения сжатия не установлена. Создать сжатый том невозможно." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Вычислительный кластер: не удалось найти %(cluster)s." msgid "Condition has no field." msgstr "В условии нет поля." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "Недопустимое значение 'max_over_subscription_ratio'. Должно быть > 0: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Ошибка конфигурации: dell_sc_ssn не задан." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Файл конфигурации %(configurationFile)s не существует." msgid "Configuration is not found." msgstr "Конфигурация не найдена." #, python-format msgid "Configuration value %s is not set." msgstr "Значение конфигурации %s не задано." msgid "Configured host type is not supported." msgstr "Настроенный тип хоста не поддерживается." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Конфликт спецификаций QoS в типе тома %s: когда спецификация QoS связана с " "типом тома, устаревшая спецификация \"netapp:qos_policy_group\" запрещена в " "дополнительных спецификациях типа тома." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Не удалось подключиться к glance: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Ошибка соединения с swift: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Не передан коннектор: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Коннектор не содержит требуемую информацию: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "Группа согласования %s еще содержит тома. Требуется флаг force, чтобы ее " "удалить." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "Группа согласования %s имеет зависимые моментальные копии." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "" "Группа согласования пустая. Моментальные копии группы согласования " "создаваться не будут." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "Состояние группы согласования должно быть available или error. Текущее " "состояние: %s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "Состояние группы согласования должно быть available, однако текущее " "состояние - %s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "Группа согласования %(consistencygroup_id)s не найдена." msgid "Container" msgstr "контейнер" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Формат контейнера %s не поддерживается драйвером VMDK. Только 'bare' " "поддерживается." msgid "Container size smaller than required file size." msgstr "Размер контейнера меньше размера требуемого файла." msgid "Content type not supported." msgstr "Тип содержимого не поддерживается." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "Служба конфигурации контроллеров не найдена в %(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "Не удалось определить IP-адрес контроллера '%(host)s': %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Выполнено преобразование в %(f1)s, но в данный момент формат - %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" "Выполнено преобразование в %(vol_format)s, но в данный момент формат - " "%(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Преобразование в необработанный, но текущий формат %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Выполнено преобразование в raw, но в данный момент формат - %s." msgid "Coordinator uninitialized." msgstr "Координатор деинициализирован." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Сбой задачи копирования тома: convert_to_base_volume: id=%(id)s, status=" "%(status)s." #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "Ошибка задачи копирования тома: create_cloned_volume id=%(id)s, состояние=" "%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Копирование метаданных из %(src_type)s %(src_id)s в %(vol_id)s." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Не удается определить, какую конечную точку Keystone следует использовать. " "Задать ее можно в каталоге служб или посредством опции конфигурации cinder." "conf, 'backup_swift__auth_url'." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Не удается определить, какую конечную точку Swift следует использовать. " "Задать ее можно в каталоге служб или посредством опции конфигурации cinder." "conf, 'backup_swift_url'." msgid "Could not find DISCO wsdl file." msgstr "Не найден файл WSDL DISCO." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "Не найден ИД кластера GPFS: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "Не найдено устройство файловой системы GPFS: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "Не найден хост для тома %(volume_id)s с типом %(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Невозможно найти конфигурацию по адресу %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "Не найден экспорт iSCSI для тома %(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Не удалось найти экспорт iSCSI для тома %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "Не найден целевой объект iSCSI для тома %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "Не найден ключ в выводе команды %(cmd)s: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Не удалось найти параметр %(param)s" #, python-format msgid "Could not find target %s" msgstr "Не удалось найти целевой объект %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "Не найден родительский том для моментальной копии '%s' в массиве." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "Не найдена уникальная моментальная копия %(snap)s для тома %(vol)s." msgid "Could not get system name." msgstr "Не удалось получить имя системы." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Невозможно загрузить приложение '%(name)s' из %(path)s" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "Невозможно прочитать %s. Повторное выполнение с помощью sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "Не удалось прочитать информацию для моментальной копии %(name)s. Код: " "%(code)s. Причина: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "Не удалось восстановить файл конфигурации %(file_path)s: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "Не удалось сохранить конфигурацию в %(file_path)s: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "" "Не удалось запустить создание моментальной копии группы согласования %s." #, python-format msgid "Counter %s not found" msgstr "Счетчик %s не найден" msgid "Create QoS policy error." msgstr "Ошибка создания стратегии QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Создание резервной копии прервано: ожидалось состояние резервной копии " "%(expected_status)s, получено %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Создание резервной копии прервано: ожидалось состояние тома " "%(expected_status)s, получено %(actual_status)s." msgid "Create consistency group failed." msgstr "Не удалось создать группу согласования." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "Создание зашифрованных томов с типом %(type)s из образа %(image)s не " "поддерживается." msgid "Create export for volume failed." msgstr "Не удалось создать экспорт для тома." msgid "Create hostgroup error." msgstr "Ошибка создания группы хостов." #, python-format msgid "Create hypermetro error. %s." msgstr "Ошибка создания hypermetro. %s." msgid "Create lun error." msgstr "Ошибка создания LUN." msgid "Create lun migration error." msgstr "Ошибка создания переноса LUN." msgid "Create luncopy error." msgstr "Ошибка создания копии LUN." msgid "Create lungroup error." msgstr "Ошибка создания группы LUN." msgid "Create manager volume flow failed." msgstr "Не удалось создать поток тома администратора." msgid "Create port group error." msgstr "Ошибка создания группы портов." msgid "Create replication error." msgstr "Ошибка создания репликации." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "Ошибка создания пары репликации: %s." msgid "Create snapshot error." msgstr "Ошибка создания моментальной копии." #, python-format msgid "Create volume error. Because %s." msgstr "Ошибка создания тома. Причина: %s." msgid "Create volume failed." msgstr "Сбой создания тома." msgid "Creating a consistency group from a source is not currently supported." msgstr "Создание группы согласования из источника пока не поддерживается." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Не удалось создать и активировать набор зон: (набор зон=%(cfg_name)s ошибка=" "%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Не удалось создать и активировать набор зон: (набор зон=%(zoneset)s ошибка=" "%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "" "Создаются сведения об использовании с %(begin_period)s по %(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "Текущий хост не является частью домена HGST." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Текущий хост недопустим для тома %(id)s с типом %(type)s. Перенос запрещен" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Связанный с томом %(vol)s хост находится в неподдерживаемой группе хостов " "%(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "УСТАРЕЛО: Развернуть версию 1 API Cinder." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "УСТАРЕЛО: Развернуть версию 2 API Cinder." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "Ошибка драйвера DRBDmanage: в ответе отсутствует ожидаемый ключ \"%s\", " "неверная версия DRBDmanage?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Ошибка настройки драйвера DRBDmanage: некоторые необходимые библиотеки " "(dbus, drbdmanage.*) не найдены." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "Программа DRBDmanage ожидала один ресурс (\"%(res)s\"), получила %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "Тайм-аут ожидания DRBDmanage для нового тома после восстановления " "моментальной копии; ресурс \"%(res)s\", том \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "Тайм-аут ожидания DRBDmanage при создании моментальной копии; ресурс " "\"%(res)s\", моментальная копия \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "Тайм-аут ожидания DRBDmanage при создании тома; ресурс \"%(res)s\", том " "\"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "Тайм-аут ожидания DRBDmanage для размера тома; ИД тома \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "Не удалось определить версию API ONTAP данных." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "ONTAP данных, работающий в режиме 7, не поддерживает группы стратегий QoS." msgid "Database schema downgrade is not allowed." msgstr "Понижение версии схемы базы данных не разрешено." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "Набор данных %s не является общим в устройстве Nexenta Store" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Группа набора данных %s не найдена в Nexenta SA" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Дедупликация - допустимый тип предоставления ресурсов, но требуется, чтобы " "версия WSAPI %(dedup_version)s %(version)s была установлена." msgid "Dedup luns cannot be extended" msgstr "LUN с дедупликацией нельзя расширять" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Программа включения запрета дубликатов не установлена. Создать том без " "дубликатов невозможно" msgid "Default pool name if unspecified." msgstr "Имя пула по умолчанию, если не указано." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Квота по умолчанию для ресурса %(res)s, заданная флагом квоты по умолчанию " "quota_%(res)s, устарела. Используйте класс квоты по умолчанию для квоты по " "умолчанию." msgid "Default volume type can not be found." msgstr "Не удается найти тип тома по умолчанию." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Определяет набор экспортированных пулов и связанных с ними строк запроса " "базовой системы" msgid "Delete LUNcopy error." msgstr "Ошибка удаления LUNcopy." msgid "Delete QoS policy error." msgstr "Ошибка удаления стратегии QoS." msgid "Delete associated lun from lungroup error." msgstr "Ошибка удаления связанного LUN из группы LUN." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Удаление резервной копии прервано: настроенная в данный момент служба " "резервного копирования [%(configured_service)s] не является службой " "резервного копирования, которая использовалась для создания этой резервной " "копии [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "Не удалось удалить группу согласования." msgid "Delete hostgroup error." msgstr "Ошибка удаления группы хостов." msgid "Delete hostgroup from mapping view error." msgstr "Ошибка удаления группы хостов из представления связей." msgid "Delete lun error." msgstr "Ошибка удаления LUN." msgid "Delete lun migration error." msgstr "Ошибка удаления переноса LUN." msgid "Delete lungroup error." msgstr "Ошибка удаления группы LUN." msgid "Delete lungroup from mapping view error." msgstr "Ошибка удаления группы LUN из представления связей." msgid "Delete mapping view error." msgstr "Ошибка удаления представления связей." msgid "Delete port group error." msgstr "Ошибка удаления группы портов." msgid "Delete portgroup from mapping view error." msgstr "Ошибка удаления группы портов из представления связей." msgid "Delete snapshot error." msgstr "Ошибка удаления моментальной копии." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "Удаление моментальной копии тома не поддерживается в состоянии %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Удаление резервной копии прервано: ожидалось состояние резервной копии " "%(expected_status)s, получено %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Удаление тома из базы данных и пропуск rpc." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Не удалось удалить зоны: (команда=%(cmd)s ошибка=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Для поддержки групп согласования требуется Dell API версии 2.1 или выше" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "Ошибка конфигурации драйвера Dell Cinder. Репликация не поддерживается для " "прямого подключения." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Ошибка конфигурации драйвера Dell Cinder. replication_device %s не найден" msgid "Deploy v3 of the Cinder API." msgstr "Развернуть версию 3 API Cinder." msgid "Describe-resource is admin only functionality" msgstr "Функция Describe-resource доступна только администраторам" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "migration_status целевого хоста - %(stat)s, ожидалось %(exp)s." msgid "Destination host must be different than the current host." msgstr "Целевой хост должен отличаться от текущего хоста." msgid "Destination volume not mid-migration." msgstr "Целевой том не в процессе переноса." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Не удалось отключить том: подключений несколько, но не указан attachment_id." msgid "Detach volume from instance and then try again." msgstr "Отключите том от экземпляра и повторите операцию." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Обнаружено больше одного тома с именем %(vol_name)s" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "Не найден ожидаемый столбец в %(fun)s: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "Не найден ожидаемый ключ %(key)s в %(fun)s: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "Причина выключения содержит неверные символы или слишком длинна" #, python-format msgid "Domain with name %s wasn't found." msgstr "Не найден домен с именем %s." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Обнаружено понижение уровня кластера GPFS. Функция дублирования GPFS не " "включена на уровне демона кластера %(cur)s: должен быть уровень не ниже " "%(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Драйверу не удалось инициализировать соединение (ошибка: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "Драйвер не может изменить тип, так как том (LUN {}) содержит моментальную " "копию, перенос которой запрещен." msgid "Driver must implement initialize_connection" msgstr "Драйвер должен реализовать initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Драйвер успешно декодировал импортированные данные резервной копии, но в них " "нет полей (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "Версия API прокси-сервера E-Series %(current_version)s не поддерживает " "полный набор дополнительных спецификаций SSC. Версия прокси-сервера должна " "быть не ниже %(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Исключительная ситуация клиента драйвера EMC VNX Cinder: %(cmd)s (код " "возврата: %(rc)s) (вывод: %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "Драйвер EMC VNX Cinder, SPUnavailableException: %(cmd)s (код возврата: " "%(rc)s) (вывод: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword должны иметь " "допустимые значения." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "Необходимо указать 'cgsnapshot_id' или 'source_cgid' для создания группы " "согласования %(name)s из источника." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s или рабочая схема %(workload)s недопустимы. См. предыдущее " "сообщение об ошибке. Там указаны допустимые значения." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Необходимо указать либо hitachi_serial_number, либо hitachi_unit_name." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "Служба составления элементов не найдена в %(storageSystemName)s." msgid "Enables QoS." msgstr "Включает QoS." msgid "Enables compression." msgstr "Включает сжатие." msgid "Enables replication." msgstr "Включает репликацию." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Убедитесь, что configfs смонтирована в /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка добавления инициатора: %(initiator)s в groupInitiatorGroup: " "%(initiatorgroup)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при добавлении в целевую группу %(targetgroup)s с IQN: %(iqn)s, код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Ошибка подключения тома %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при дублировании моментальной копии %(snapshot)s, том: %(lun)s, пул: " "%(pool)s, проект: %(project)s, дубликат проекта: %(clone_proj)s, код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Ошибка создания копии тома %(cloneName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при создании копии тома. Копия: %(cloneName)s Исходный том: " "%(sourceName)s. Код возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при создании группы: %(groupName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Ошибка при создании маскирующего представления: %(groupName)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при создании тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при создании тома: %(volumename)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка функции CreateGroupReplica. Исходная группа: %(source)s. Целевая " "группа: %(target)s. Код возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка при создании инициатора %(initiator)s для псевдонима %(alias)s. Код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка при создании проекта %(project)s в пуле %(pool)s, код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка создания свойства %(property)s, тип: %(type)s, описание: " "%(description)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при создании общего ресурса: %(name)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при создании моментальной копии: %(snapshot)s, том: %(lun)s, пул: " "%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " "%(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка создания моментальной копии %(snapshot)s для общего ресурса %(share)s " "в пуле: %(pool)s, проект: %(project)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Ошибка при создании цели: %(alias)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка при создании целевой группы %(targetgroup)s с IQN: %(iqn)s, код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Ошибка при создании тома %(lun)s, размер %(size)s, код возврата " "%(ret.status)d Сообщение: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при создании нового составного тома. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка действия создания репликации в пуле %(pool)s, проект: %(proj)s, том: " "%(vol)s, для целевого объекта %(tgt)s и пула %(tgt_pool)s. Код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "Ошибка при создании несвязанного тома в операции Extend." msgid "Error Creating unbound volume." msgstr "Ошибка создания несвязанного тома." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка удаления тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Ошибка удаления группы %(storageGroupName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Ошибка удаления группы инициатора: %(initiatorGroupName)s. Код возврата: " "%(rc)lu. Ошибка: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при удалении моментальной копии %(snapshot)s, общий ресурс: " "%(share)s, пул: %(pool)s, проект: %(project)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при удалении моментальной копии %(snapshot)s, том: %(lun)s, пул: " "%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " "%(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Ошибка удаления тома %(lun)s из пула %(pool)s, проект: %(project)s. Код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка удаления проекта %(project)s в пуле %(pool)s. Код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Ошибка действия удаления репликации %(id)s. Код возврата: %(ret.status)d " "Сообщение: %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка расширения тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка при получении инициаторов: InitiatorGroup: %(initiatorgroup)s, Код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Ошибка при получении статистики пула: пул: %(pool)s код возврата: %(status)d " "сообщение: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка получения статистики проекта: Пул: %(pool)s Проект: %(project)s Код " "возврата: %(ret.status)d Сообщение: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка получения общего ресурса: %(share)s, пул: %(pool)s, проект: " "%(project)s код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при получении моментальной копии %(snapshot)s, том: %(lun)s, пул: " "%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " "%(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Ошибка при получении цели: %(alias)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при получении тома %(lun)s в пуле %(pool)s, проект: %(project)s, код " "возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Ошибка при переносе тома из одного пула в другой. Код возврата: %(rc)lu. " "Ошибка: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Ошибка при модификации маскирующего представления: %(groupName)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "Ошибка принадлежности пула: пул %(pool)s не принадлежит %(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при задании свойств %(props)s, том: %(lun)s, пул: %(pool)s, проект: " "%(project)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка при завершении сеанса переноса. Код возврата: %(rc)lu. Ошибка: " "%(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при проверке инициатора: %(iqn)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при проверке пула: %(pool)s, код возврата: %(ret.status)d, сообщение: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при проверке проекта %(project)s в пуле %(pool)s, код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при проверке службы: %(service)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при проверке цели: %(alias)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Ошибка при проверке общего ресурса %(share)s в проекте %(project)s и пуле " "%(pool)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Ошибка при добавлении тома %(volumeName)s с путем к экземпляру " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Ошибка при добавлении инициатора в группу: %(groupName)s. Код возврата: " "%(rc)lu. Ошибка: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "Ошибка при добавлении тома в составной том. Ошибка: %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "Ошибка при добавлении тома %(volumename)s в целевой базовый том." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Ошибка при связывании группы носителей %(storageGroupName)s со стратегией " "FAST %(fastPolicyName)s, описание ошибки: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "Ошибка подключения тома %s. Количество целевых объектов предельное!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Ошибка разрыва взаимосвязи копии. Имя синхронизации: %(syncName)s Код " "возврата: %(rc)lu. Ошибка: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Ошибка подключения к кластеру ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Ошибка при соединении посредством ssh: %s" #, python-format msgid "Error creating volume: %s." msgstr "Ошибка создания тома: %s." msgid "Error deleting replay profile." msgstr "Ошибка удаления профайла повтора." #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Ошибка удаления тома %(ssn)s: %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Ошибка удаления тома %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Ошибка во время анализа вычислителя: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Ошибка при изменении общего ресурса: %(share)s, пул: %(pool)s, код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Ошибка включения iSER для NetworkPortal: убедитесь, что RDMA поддерживается " "на порте iSCSI %(port)d по IP-адресу %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "Ошибка очистки после сбоя подключения: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Ошибка выполнения API CloudByte [%(cmd)s], ошибка: %(err)s." msgid "Error executing EQL command" msgstr "Ошибка выполнения команды EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Ошибка выполнения команды через ssh: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Ошибка расширения тома %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Ошибка расширения тома: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Ошибка при поиске %(name)s." #, python-format msgid "Error finding %s." msgstr "Ошибка при поиске %s." #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка получения ReplicationSettingData. Код возврата: %(rc)lu. Ошибка: " "%(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Ошибка получения сведений о версии устройства. Код возврата: %(ret.status)d " "Сообщение: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "Ошибка получения ИД домена из имени %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "Ошибка получения ИД домена из имени %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Ошибка при получении групп инициаторов." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Ошибка получения ИД пула из имени %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Ошибка получения ИД пула из имени %(pool_name)s: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Ошибка действия получения репликации %(id)s. Код возврата: %(ret.status)d " "Сообщение: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Ошибка получения сведений об источнике репликации. Код возврата: %(ret." "status)d Сообщение: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Ошибка получения сведений о цели репликации. Код возврата: %(ret.status)d " "Сообщение: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка при получении версии svc %(svc)s. Код возврата: %(ret.status)d " "Сообщение: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Ошибка операции [%(operation)s] для тома [%(cb_volume)s] в хранилище " "CloudByte: [%(cb_error)s], код ошибки: [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Ошибка в ответе API SolidFire: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "" "Ошибка в операции создания пространства для %(space)s размера %(size)d ГБ" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "Ошибка в операции расширения пространства для тома %(space)s на %(size)d ГБ" #, python-format msgid "Error managing volume: %s." msgstr "Ошибка управления томом: %s." #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Ошибка при преобразовании тома %(vol)s: %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Ошибка изменения синхронизации копии %(sv)s, операция: %(operation)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Ошибка изменения службы: %(service)s, код возврата: %(ret.status)d, " "сообщение: %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка перемещения тома %(vol)s из исходного проекта %(src)s в целевой " "проект %(tgt)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." msgid "Error not a KeyError." msgstr "Тип ошибки отличается от KeyError." msgid "Error not a TypeError." msgstr "Тип ошибки отличается от TypeError." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Ошибка создания моментальной копии группы согласования %s." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Ошибка удаления моментальной копии группы согласования %s." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "Ошибка изменения группы согласования %s." #, python-format msgid "Error parsing config file: %s" msgstr "Ошибка анализа файла конфигурации: %s" msgid "Error promoting secondary volume to primary" msgstr "Ошибка при попытке продвинуть вспомогательный том до уровня основного" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Ошибка при удалении тома %(vol)s: %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Ошибка переименования тома %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Сообщение об ошибке: %s" msgid "Error retrieving volume size" msgstr "Ошибка получения размера тома" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка передачи обновления репликации для ИД действия %(id)s. Код возврата: " "%(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Ошибка отправки обновления репликации. Возвращенная ошибка: %(err)s. " "Действие: %(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка установки значения наследования репликации %(set)s для тома %(vol)s. " "Проект %(project)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Ошибка преобразования пакета %(package)s из источника %(src)s в локальный " "пакет. Код возврата: %(ret.status)d, сообщение: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "Ошибка удаления привязки тома %(vol)s к пулу: %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Ошибка при проверке размера дубликата тома, %(clone)s, размер: %(size)d, " "моментальная копия: %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Ошибка при идентификации на коммутаторе: %s." #, python-format msgid "Error while changing VF context %s." msgstr "Ошибка при изменении контекста VF %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Ошибка при проверке версии встроенного ПО %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Ошибка проверки состояния транзакции: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "Ошибка при проверке доступности VF для управления %s." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Ошибка при подключении коммутатора %(switch_id)s по протоколу %(protocol)s. " "Ошибка: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Ошибка при создании маркера идентификации: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "Ошибка создания моментальной копии [состояние] %(stat)s - [результат] " "%(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "Ошибка создания тома [состояние] %(stat)s - [результат] %(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Ошибка удаления моментальной копии [состояние] %(stat)s - [результат] " "%(res)s." #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "Ошибка удаления тома [состояние] %(stat)s - [результат] %(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "Ошибка расширения тома [состояние] %(stat)s - [результат] %(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "Ошибка получения сведений о %(op)s , код возврата: %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "Ошибка получения данных через ssh: (команда=%(cmd)s ошибка=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Ошибка при получении информации disco [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Ошибка при получении значения nvp: %s." #, python-format msgid "Error while getting session information %s." msgstr "Ошибка при получении информации о сеансе %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Ошибка при анализе данных: %s" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "Ошибка запроса страницы %(url)s на коммутаторе, причина %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Ошибка при удалении зон и конфигураций из строки зоны: %(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Ошибка во время запроса API %(service)s." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "Ошибка выполнения распределения по зонам через интерфейс командной строки: " "(команда=%(cmd)s ошибка=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Ошибка при обновлении новых зон и конфигураций в строке зоны. Ошибка: " "%(description)s." msgid "Error writing field to database" msgstr "Ошибка записи поля в базу данных" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Ошибка [%(stat)s - %(res)s] при получении ИД тома." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Ошибка [%(stat)s - %(res)s] при восстановлении моментальной копии " "[%(snap_id)s] в том [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "Ошибка [состояние] %(stat)s - [результат] %(res)s] при получении ИД тома." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Превышено максимальное число попыток планирования %(max_attempts)d для тома " "%(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Превышено максимально допустимое число моментальных копий тома" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Возникла исключительная ситуация при добавлении тома метаданных к целевому " "тому %(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Возникла исключительная ситуация при создании реплики элемента. Имя " "дубликата %(cloneName)s Исходное имя: %(sourceName)s Дополнительные " "спецификации: %(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Исключительная ситуация в _select_ds_for_volume: %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "Исключительная ситуация при формировании строки зоны: %s." #, python-format msgid "Exception: %s" msgstr "Исключительная ситуация: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Ожидался uuid, а получен %(uuid)s." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Ожидался ровно один узел с именем \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Ожидалось целочисленное значение для node_count, команда svcinfo lsiogrp " "вернула %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "Вывод из команды %(cmd)s интерфейса командной строки не ожидался. Получен " "%(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Во время фильтрации по vdisk_UID от команды lsvdisk ожидался один vdisk. " "Возвращено %(count)s." #, python-format msgid "Expected volume size was %d" msgstr "Ожидаемый размер тома: %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Экспорт резервной копии прерван: ожидалось состояние резервной копии " "%(expected_status)s, получено %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Экспорт записи прерван: настроенная в данный момент служба резервного " "копирования [%(configured_service)s] не является службой резервного " "копирования, которая использовалась для создания этой резервной копии " "[%(backup_service)s]." msgid "Extend volume error." msgstr "Ошибка расширения тома." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Расширение тома для этого драйвера поддерживается, только когда нет " "моментальных копий." msgid "Extend volume not implemented" msgstr "Том расширения не реализован" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "Программа включения VP FAST не установлена. Задать стратегию слоев для тома " "невозможно" msgid "FAST is not supported on this array." msgstr "FAST не поддерживается в этом массиве." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "Протокол - FC, но не получены WWPN от OpenStack." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Отменить назначение %(volume)s не удалось" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "Не удалось создать том кэша %(volume)s. Ошибка: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Не удалось добавить соединение для коммутируемой сети=%(fabric)s. Ошибка:" "%(err)s" msgid "Failed cgsnapshot" msgstr "Сбойная моментальная копия группы согласования" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "Не удалось создать моментальную копию для группы %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "Не удалось создать моментальную копию для тома %(volname)s: %(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "Не удалось получить набор активных зон из фабрики %s." #, python-format msgid "Failed getting details for pool %s." msgstr "Не удалось получить сведения для пула %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Не удалось удалить соединение для коммутируемой сети=%(fabric)s. Ошибка:" "%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Не удалось расширить том %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Не удалось войти в 3PAR (%(url)s), причина: %(err)s" msgid "Failed to access active zoning configuration." msgstr "Нет доступа к активной конфигурации распределения по зонам." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Не удалось получить доступ к состоянию набора областей: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Не удалось получить блокировку ресурса. (порядковый номер: %(serial)s, " "экземпляр: %(inst)s, код возврата: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "Добавить %(vol)s в %(sg)s после %(retries)s попыток не удалось." msgid "Failed to add the logical device." msgstr "Не удалось добавить логическое устройство." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Не удалось добавить том %(volumeName)s в группу согласования %(cgName)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." msgid "Failed to add zoning configuration." msgstr "Не удалось добавить конфигурацию распределения по зонам." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Не удалось присвоить IQN инициатора iSCSI. (порт: %(port)s, причина: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Не удалось связать qos_specs %(specs_id)s с типом %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Не удалось подключить целевой объект iSCSI для тома %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Не удалось создать резервную копию метаданных тома - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Не удалось создать резервную копию метаданных тома: объект резервной копии " "метаданных backup.%s.meta уже существует" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Не удалось создать дубликат тома из моментальной копии %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Не удалось подключиться к %(vendor_name)s Массив %(host)s: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Не удается подключиться к API REST Dell" msgid "Failed to connect to array" msgstr "Не удалось подключиться к массиву" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Не удалось подключиться к демону sheep. Адрес: %(addr)s, порт: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Не удалось скопировать образ на том: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Не удалось скопировать метаданные на том: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Не удалось скопировать том: недоступно целевое устройство." msgid "Failed to copy volume, source device unavailable." msgstr "Не удалось скопировать том: недоступно исходное устройство." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "Не удалось создать группу согласования %(cgName)s из моментальной копии " "%(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "Не удалось создать группу инициаторов, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "Не удалось создать образ-том SolidFire" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Не удалось создать группу тома %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Не удалось создать файл. (файл: %(file)s, код возврата: %(ret)s, stderr: " "%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "Не удалось создать временную моментальную копию для тома %s." msgid "Failed to create api volume flow." msgstr "Не удалось создать поток тома api." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "Не удалось создать моментальную копию cg %(id)s, причина: %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "Не удалось создать группу согласования %(id)s, причина: %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Не удалось создать группу согласования %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Не удалось создать группу согласования %s, поскольку группа согласования VNX " "не принимает сжатые LUN в качестве своих элементов." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Не удалось создать группу согласования: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "Не удалось создать группу согласования %(cgid)s. Ошибка: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Не удалось создать группу согласования %(consistencyGroupName)s Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Не удалось создать ИД аппаратного обеспечения в %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "Не удалось создать хост: %(name)s. Проверьте, есть ли он в массиве." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Не удалось создать группу хостов: %(name)s. Проверьте, есть ли она в массиве." msgid "Failed to create iqn." msgstr "Не удалось создать iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Не удалось создать iscsi target для тома %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Не удалось создать управление существующим потоком." msgid "Failed to create manage_existing flow." msgstr "Не удалось создать поток manage_existing." msgid "Failed to create map on mcs, no channel can map." msgstr "" "Не удалось создать карту связей в mcs: нет канала поддерживающего карты " "связей." msgid "Failed to create map." msgstr "Не удалось создать карту." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Не удалось создать метаданные для тома: %(reason)s" msgid "Failed to create partition." msgstr "Не удалось создать раздел." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "Не удалось создать qos_specs %(name)s со спецификацией %(qos_specs)s." msgid "Failed to create replica." msgstr "Не удалось создать реплику." msgid "Failed to create scheduler manager volume flow" msgstr "Не удалось создать поток тома администратора планировщика" #, python-format msgid "Failed to create snapshot %s" msgstr "Не удалось создать моментальную копию %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "Не удалось создать моментальную копию, так как не указан ИД LUN" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "" "Не удалось создать моментальную копию для группы согласования %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Не удалось создать моментальную копию для тома %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "Не удалось создать стратегию моментальной копии на томе %(vol)s: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "Не удалось найти область ресурсов моментальной копии на томе %(vol)s: " "%(res)s." msgid "Failed to create snapshot." msgstr "Не удалось создать моментальную копию." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Не удалось создать моментальную копию. Не найдена информация о томе " "CloudByte для тома OpenStack [%s]." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "Не удалось создать группу носителей %(storageGroupName)s." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Не удалось создать оперативный пул, сообщение об ошибке: %s" #, python-format msgid "Failed to create volume %s" msgstr "Не удалось создать том %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "Не удалось удалить SI для volume_id %(volume_id)s, поскольку у него есть " "пара." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Не удалось удалить логическое устройство. (LDEV: %(ldev)s, причина: " "%(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "" "Не удалось удалить моментальную копию cgsnapshot %(id)s, причина: %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "Не удалось удалить группу согласования %(id)s, причина: %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Не удалось удалить группу согласования %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Не удалось удалить группу согласования %(consistencyGroupName)s. Код " "возврата %(rc)lu. Ошибка: %(error)s." msgid "Failed to delete device." msgstr "Не удалось удалить устройство." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Не удалось удалить набор файлов для группы согласования %(cgname)s. Ошибка: " "%(excmsg)s." msgid "Failed to delete iqn." msgstr "Не удалось удалить iqn." msgid "Failed to delete map." msgstr "Не удалось удалить карту связей." msgid "Failed to delete partition." msgstr "Не удалось удалить раздел." msgid "Failed to delete replica." msgstr "Не удалось удалить реплику." #, python-format msgid "Failed to delete snapshot %s" msgstr "Не удалось удалить моментальную копию %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "" "Не удалось удалить моментальную копию для группы согласования %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "Не удалось удалить моментальную копию для snapshot_id %s, поскольку у нее " "есть пара." msgid "Failed to delete snapshot." msgstr "Не удалось удалить снимок." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Не удалось удалить том %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Не удалось удалить том для volume_id %(volume_id)s, поскольку у него есть " "пара." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Не удалось отключить целевой объект iSCSI для тома %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Не удалось определить конфигурацию API Blockbridge" msgid "Failed to disassociate qos specs." msgstr "Не удалось удалить связь спецификации QoS." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Не удалось удалить связь qos_specs %(specs_id)s с типом %(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Не удалось обеспечить область ресурсов моментальной копии. Не найден том с " "ИД %s" msgid "Failed to establish SSC connection." msgstr "Не удалось установить соединение SSC." msgid "Failed to establish connection with Coho cluster" msgstr "Не удалось установить соединение с кластером Coho" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Не удалось выполнить API CloudByte [%(cmd)s]. Состояние Http: %(status)s, " "Ошибка: %(error)s." msgid "Failed to execute common command." msgstr "Не удалось выполнить общую команду." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Не удалось экспортировать для тома: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "Не удалось расширить том %(name)s, ошибка: %(msg)s." msgid "Failed to find QoSnode" msgstr "Не найден QoSnode" msgid "Failed to find Storage Center" msgstr "Не удалось найти Storage Center" msgid "Failed to find a vdisk copy in the expected pool." msgstr "Не найдена копия vdisk в ожидаемом пуле." msgid "Failed to find account for volume." msgstr "Не найдена учетная запись для тома." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "Не найден набор файлов для пути %(path)s, вывод команды: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "Не найдена моментальная копия группы: %s" #, python-format msgid "Failed to find host %s." msgstr "Не найден хост %s." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "Не найдена группа инициаторов iSCSI, содержащая %(initiator)s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "Найти пул носителей для исходного тома %s не удалось." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Не удалось получить сведения об учетной записи CloudByte [%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Не удалось получить целевые сведения LUN для LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "Не удалось получить сведения о целевом объекте LUN для LUN %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Не удалось получить целевой список LUN для LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Не удалось получить ИД раздела для тома %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "Не удалось получить ИД моментальной копии RAID из моментальной копии " "%(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "Не удалось получить ИД моментальной копии RAID из моментальной копии " "%(snapshot_id)s." msgid "Failed to get SplitMirror." msgstr "Не удалось получить SplitMirror." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Не удалось получить ресурс памяти. Система попытается получить ресурс памяти " "повторно. (ресурс: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "Не удалось получить все связи спецификаций QoS %s" msgid "Failed to get channel info." msgstr "Не удалось получить информацию о канале." #, python-format msgid "Failed to get code level (%s)." msgstr "Не удалось получить уровень кода (%s)." msgid "Failed to get device info." msgstr "Не удалось получить информацию об устройстве." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "Не удалось получить домен: в массиве нет CPG (%s)." msgid "Failed to get image snapshots." msgstr "Не удалось получить моментальные копии образа." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "Не удалось получить IP-адрес в канале %(channel_id)s с томом %(volume_id)s." msgid "Failed to get iqn info." msgstr "Не удалось получить информацию о iqn." msgid "Failed to get license info." msgstr "Не удалось получить информацию о лицензии." msgid "Failed to get lv info." msgstr "Не удалось получить информацию о логическом томе." msgid "Failed to get map info." msgstr "Не удалось получить информацию о карте связей." msgid "Failed to get migration task." msgstr "Не удалось получить задачу переноса." msgid "Failed to get model update from clone" msgstr "Не удалось получить обновление модели из копии" msgid "Failed to get name server info." msgstr "Не удалось получить информацию о серверах имен." msgid "Failed to get network info." msgstr "Не удалось получить информацию о сети." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Не удалось получить ИД нового раздела в новом пуле %(pool_id)s." msgid "Failed to get partition info." msgstr "Не удалось получить информацию о разделе." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Не удалось получить ИД пула с томом %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "Не удаётся получить информацию удаленного копирования для %(volume)s, " "причина: %(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "Не удаётся получить информацию удаленного копирования для %(volume)s. " "Исключительная ситуация: %(err)s." msgid "Failed to get replica info." msgstr "Не удалось получить информацию о реплике." msgid "Failed to get show fcns database info." msgstr "Не удалось получить информацию из базы данных команды show fcns." msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "Не удалось получить размер существующего тома %(vol). Передача тома под " "управление не выполнена." #, python-format msgid "Failed to get size of volume %s" msgstr "Не удалось получить размер тома %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Не удалось получить моментальную копию для тома %s." msgid "Failed to get snapshot info." msgstr "Не удалось получить информацию о моментальной копии." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Не удалось получить целевой IQN для LUN %s" msgid "Failed to get target LUN of SplitMirror." msgstr "Не удалось получить целевой LUN SplitMirror." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Не удалось получить целевой портал для LUN %s" msgid "Failed to get targets" msgstr "Не удалось получить целевые объекты" msgid "Failed to get wwn info." msgstr "Не удалось получить информацию о WWN." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Не удалось получить, создать или добавить том %(volumeName)s для " "маскирующего представления %(maskingViewName)s. Сообщение об ошибке: " "%(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Не удалось идентифицировать базовую программу тома." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "Не удалось связать набор файлов для общей группы согласования %(cgname)s. " "Ошибка: %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Не удалось войти в массив %s (неправильное имя пользователя?)." #, python-format msgid "Failed to login for user %s." msgstr "Не удалось войти для пользователя %s." msgid "Failed to login with all rest URLs." msgstr "Не удалось войти через все URL REST." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Не удалось создать запрос к конечной точке кластера Datera по следующей " "причине: %s" msgid "Failed to manage api volume flow." msgstr "Сбой управления потоком тома api." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Сбой управления существующим %(type)s %(name)s: размер %(size)s не число с " "плавающей точкой." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Управление существующим томом %(name)s невозможно из-за ошибки при получении " "размера тома." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Управление существующим томом %(name)s невозможно, поскольку операцию " "переименования выполнить не удалось. Сообщение об ошибке: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Сбой управления существующего тома %(name)s: размер %(size)s не число с " "плавающей точкой." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Сбой управления существующим томом, поскольку пул выбранного типа тома не " "соответствует общему ресурсу NFS, переданному в ссылке тома." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Сбой управления существующим томом, поскольку пул выбранного типа тома не " "соответствует файловой системе, переданной в ссылке на том." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Сбой управления существующим томом, поскольку пул выбранного типа тома не " "соответствует пулу хоста." #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "Не удаётся сделать управляемым существующий том из-за несовпадения группы " "ввода-вывода. Группа ввода-вывода тома, передаваемого в управление, - это " "%(vdisk_iogrp)s. Группа ввода-вывода выбранного типа - %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "Не удаётся сделать управляемым существующий том из-за несовпадения пулов " "тома и базовой системы. Пул тома, передаваемого в управление, - это " "%(vdisk_pool)s. Пул базовой системы - %(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "Не удаётся сделать управляемым существующий том, который является сжатым, но " "тип которого указан как несжатый." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "Не удаётся сделать управляемым существующий том, который является несжатым, " "но тип которого указан как сжатый." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "Не удаётся сделать управляемым существующий том, который не включен в " "допустимую группу ввода-вывода." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "Не удаётся сделать управляемым существующий том, который является " "расширенным, но тип которого указан как простой." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "Не удаётся сделать управляемым существующий том, который является простым, " "но тип которого указан как расширенный." #, python-format msgid "Failed to manage volume %s." msgstr "Сбой управления томом %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Не удалось связать логическое устройство. (LDEV: %(ldev)s, LUN: %(lun)s, " "порт: %(port)s, ИД: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Перенести том в первый раз не удалось." msgid "Failed to migrate volume for the second time." msgstr "Перенести том во второй раз не удалось." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "Не удалось переместить связи LUN. Код возврата: %s" #, python-format msgid "Failed to move volume %s." msgstr "Не удалось переместить том %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Не удалось открыть файл. (файл: %(file)s, код возврата: %(ret)s, stderr: " "%(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Ошибка анализа вывода интерфейса командной строки:\n" " команда: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Выполнить синтаксический анализ опции конфигурации 'keystone_catalog_info' " "не удалось, она должна быть в формате <тип_службы>:<имя_службы>:" "<тип_конечной_точки>" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Выполнить синтаксический анализ опции конфигурации 'swift_catalog_info' не " "удалось, она должна быть в формате <тип_службы>:<имя_службы>:" "<тип_конечной_точки>" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Не удалось затребовать обнуленные страницы. (LDEV: %(ldev)s, причина: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "Не удалось удалить экспорт для тома %(volume)s: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "Не удалось удалить целевой объект iscsi для тома %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Не удалось удалить том %(volumeName)s из группы согласования %(cgName)s. Код " "возврата: %(rc)lu. Ошибка: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" "Не удалось удалить том %(volumeName)s из группы носителей по умолчанию." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "Не удалось удалить том %(volumeName)s из группы носителей по умолчанию: " "%(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "Не удалось удалить том %(volumename)s из группы носителей по умолчанию для " "стратегии FAST %(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Не удалось переименовать логический том %(name)s, сообщение об ошибке: " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Извлечь активную конфигурацию распределения по зонам, %s, не удалось" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "Не удалось настроить идентификацию CHAP для целевого IQN %(iqn)s. " "Подробности: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Не удалось задать QoS для существующего тома %(name)s. Сообщение об ошибке: " "%(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" "Не удалось задать атрибут Входящий пользователь для целевого объекта SCST." msgid "Failed to set partition." msgstr "Не удалось задать раздел." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Не удалось настроить права доступа для группы согласования %(cgname)s. " "Ошибка: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Не удалось указать логическое устройство для тома %(volume_id)s, связь " "которого необходимо удалить." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Не удалось указать удаляемое логическое устройство. (метод: %(method)s, ИД: " "%(id)s)" msgid "Failed to terminate migrate session." msgstr "Завершить сеанс переноса не удалось." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "Не удалось удалить привязку тома %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Не удалось удалить связь набора файлов для группы согласования %(cgname)s. " "Ошибка: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Не удалось удалить связь логического устройства. (LDEV: %(ldev)s, причина: " "%(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Не удалось изменить группу согласования: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Не удалось обновить метаданные для тома: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Не удалось обновить или удалить конфигурацию распределения по зонам" msgid "Failed to update or delete zoning configuration." msgstr "Не удалось обновить или удалить конфигурацию распределения по зонам." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Не удалось изменить qos_specs %(specs_id)s со спецификацией %(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "Не удалось обновить использование квот при изменении типа тома." msgid "Failed to update snapshot." msgstr "Не удалось обновить моментальную копию." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "Обновить модель с предоставленной драйвером моделью %(model)s не удалось" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Не удалось обновить метаданные тома %(vol_id)s с помощью указанных " "метаданных %(src_type)s %(src_id)s" #, python-format msgid "Failure creating volume %s." msgstr "Сбой создания тома %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Сбой получения информации LUN для %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Сбой в update_volume_key_value_pair:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Сбой при перемещении новой копии LUN в %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Сбой промежуточного копирования LUN %s в tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Неустранимая ошибка: у пользователя нет прав доступа для запроса томов " "NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "Fexvisor не удалось добавить том %(id)s, причина: %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor не удалось добавить том %(vol)s в группу %(group)s. Причина: " "%(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor не удалось удалить том %(vol)s из группы %(group)s. Причина: " "%(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Fexvisor не удалось удалить том %(id)s. Причина: %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Сбой поиска в SAN Fibre Channel: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Сбой операции зоны Fibre Channel: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Сбой управления соединением Fibre Channel: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "Файл %(file_path)s не может быть найден." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Файл %(path)s содержит недопустимый базовый файл %(bfile)s, принудительное " "завершение." #, python-format msgid "File already exists at %s." msgstr "Файл уже существует в %s." #, python-format msgid "File already exists at: %s" msgstr "Файл уже существует в %s" msgid "Find host in hostgroup error." msgstr "Ошибка поиска хоста в группе хостов." msgid "Find host lun id error." msgstr "Ошибка поиска ИД LUN хоста." msgid "Find lun group from mapping view error." msgstr "Ошибка поиска группы LUN из представления связей." msgid "Find lun number error." msgstr "Ошибка поиска номера LUN." msgid "Find mapping view error." msgstr "Ошибка поиска представления связей." msgid "Find portgroup error." msgstr "Ошибка поиска группы портов." msgid "Find portgroup from mapping view error." msgstr "Ошибка поиска группы портов из представления связей." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Для стратегии кэша во флэш-памяти требуется, чтобы версия WSAPI " "%(fcache_version)s %(version)s была установлена." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor не удалось назначить том: %(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor не удалось назначить том: %(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor не удалось найти моментальную копию тома %(id)s в группе %(vgid)s. " "Моментальная копия: %(vgsid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor не удалось создать том: %(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor не удалось удалить том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor не удалось добавить том %(id)s в группу %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor не удалось назначить том %(id)s из-за невозможности запросить " "состояние по ИД события." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor не удалось назначить том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor не удалось назначить том %(volume)s: %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor не удалось скопировать том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось скопировать том (не удалось получить событие) %(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor не удалось создать моментальную копию тома %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось создать моментальную копию тома (не удалось получить " "событие) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor не удалось создать том %(id)s в группе %(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor не удалось создать том %(volume)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor не удалось создать том (получить событие) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor не удалось создать том из моментальной копии %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor не удалось создать том из моментальной копии %(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось создать том из моментальной копии (не удалось получить " "событие) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor не удалось удалить моментальную копию %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось удалить моментальную копию (не удалось получить " "событие) %(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor не удалось удалить том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor не удалось расширить том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor не удалось расширить том %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось расширить том (не удалось получить событие) %(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor не удалось получить информацию о пуле %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor не удалось получить ИД моментальной копии тома %(id)s из группы " "%(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor не удалось удалить том %(id)s из группы %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor не удалось породить том из моментальной копии %(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor не удалось породить том из моментальной копии (не удалось получить " "событие) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor не удалось отменить назначение тома %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor не удалось отменить назначение тома (получить событие) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor не удалось отменить назначение тома %(id)s: %(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor не удалось найти информацию об исходном томе %(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Flexvisor не удалось отменить назначение тома: %(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Тому Flexvisor %(id)s не удалось присоединить группу %(vgid)s." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "Папка %s не существует в устройстве Nexenta Store" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS не выполняется, состояние: %s." msgid "Gateway VIP is not set" msgstr "Не задан VIP шлюза" msgid "Get FC ports by port group error." msgstr "Ошибка получения портов FC по группе портов." msgid "Get FC ports from array error." msgstr "Ошибка получения портов Fibre Channel из массива." msgid "Get FC target wwpn error." msgstr "Ошибка получения целевого WWPN Fibre Channel." msgid "Get HyperMetroPair error." msgstr "Ошибка получения HyperMetroPair." msgid "Get LUN group by view error." msgstr "Ошибка получения группы LUN по представлению." msgid "Get LUNcopy information error." msgstr "Ошибка получения информации LUNcopy." msgid "Get QoS id by lun id error." msgstr "Ошибка получения ИД QoS по ИД LUN." msgid "Get QoS information error." msgstr "Ошибка получения информации QoS." msgid "Get QoS policy error." msgstr "Ошибка получения стратегии QoS." msgid "Get SplitMirror error." msgstr "Ошибка получения SplitMirror." msgid "Get active client failed." msgstr "Не удалось получить активного клиента." msgid "Get array info error." msgstr "Ошибка получения информации о массиве." msgid "Get cache by name error." msgstr "Ошибка получения кэша по имени." msgid "Get connected free FC wwn error." msgstr "Ошибка получения подключенного свободного WWN Fibre Channel." msgid "Get engines error." msgstr "Ошибка получения модулей." msgid "Get host initiators info failed." msgstr "Ошибка получения информации об инициаторах хоста." msgid "Get hostgroup information error." msgstr "Ошибка получения информации о группе хостов." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Ошибка получения информации о портах iSCSI. Проверьте целевой IP-адрес, " "настроенный в файле конфигурации huawei." msgid "Get iSCSI port information error." msgstr "Ошибка получения информации о портах iSCSI." msgid "Get iSCSI target port error." msgstr "Ошибка получения порта целевого объекта iSCSI." msgid "Get lun id by name error." msgstr "Ошибка получения ИД LUN по имени." msgid "Get lun migration task error." msgstr "Ошибка получения задачи переноса LUN." msgid "Get lungroup id by lun id error." msgstr "Ошибка получения ИД группы LUN по ИД LUN." msgid "Get lungroup information error." msgstr "Ошибка получения информации о группе LUN." msgid "Get migration task error." msgstr "Ошибка получения задачи переноса." msgid "Get pair failed." msgstr "Не удалось получить пару." msgid "Get partition by name error." msgstr "Ошибка получения раздела по имени." msgid "Get partition by partition id error." msgstr "Ошибка получения раздела по ИД раздела." msgid "Get port group by view error." msgstr "Ошибка получения группы портов по представлению." msgid "Get port group error." msgstr "Ошибка получения группы портов." msgid "Get port groups by port error." msgstr "Ошибка получения группы портов по порту." msgid "Get ports by port group error." msgstr "Ошибка получения портов по группе портов." msgid "Get remote device info failed." msgstr "Ошибка получения удаленного устройства." msgid "Get remote devices error." msgstr "Ошибка получения удаленных устройств." msgid "Get smartcache by cache id error." msgstr "Ошибка получения smartcache по ИД кэша." msgid "Get snapshot error." msgstr "Ошибка получения моментальной копии." msgid "Get snapshot id error." msgstr "Ошибка получения ИД моментальной копии." msgid "Get target IP error." msgstr "Ошибка получения целевого IP-адреса." msgid "Get target LUN of SplitMirror error." msgstr "Ошибка получения целевого LUN или SplitMirror." msgid "Get views by port group error." msgstr "Ошибка получения представлений по группе портов." msgid "Get volume by name error." msgstr "Ошибка получения тома по имени." msgid "Get volume error." msgstr "Ошибка получения тома." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Не удается обновить метаданные Glance, ключ %(key)s существует для ИД тома " "%(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "Не найдены метаданные glance для тома/моментальной копии %(id)s." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Файл конфигурации Gluster в %(config)s не существует" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Ошибка API Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Ошибка связи с Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Ошибка oauth2 Google Cloud Storage: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "Получена неправильная информация о пути от DRBDmanage! (%s)" msgid "HBSD error occurs." msgstr "Ошибка HBSD." msgid "HNAS has disconnected SSC" msgstr "HNAS отключил SSC" msgid "HPELeftHand url not found" msgstr "URL HPELeftHand не найден" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "Запрошенная проверка сертификата HTTPS невозможна с версией %(version)s " "модуля purestorage. Для работы с этой функцией обновите версию." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "Размер блока хэша изменился с момента последнего резервного копирования. " "Новый размер блока хэша: %(new)s. Прежний размер блока хэша: %(old)s. " "Необходимо полное резервное копирование." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Не созданы слои %(tier_levels)s." #, python-format msgid "Hint \"%s\" not supported." msgstr "Всплывающая подсказка \"%s\" не поддерживается." msgid "Host" msgstr "Узел" #, python-format msgid "Host %(host)s could not be found." msgstr "Узел %(host)s не найден." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "Хост %(host)s не соответствует содержимому сертификата x509: CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "У хоста %s нет инициаторов FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "У хоста %s нет инициатора iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "Не удалось найти хост '%s'." #, python-format msgid "Host group with name %s not found" msgstr "Не найдена группа хостов с именем %s" #, python-format msgid "Host group with ref %s not found" msgstr "Не найдена группа хостов со ссылкой %s" msgid "Host is NOT Frozen." msgstr "Хост не заморожен." msgid "Host is already Frozen." msgstr "Хост уже заморожен." msgid "Host not found" msgstr "Узел не найден" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Хост не найден. Не удалось переместить %(service)s на %(host)s." #, python-format msgid "Host replication_status must be %s to failover." msgstr "Для аварийного переключения состояние репликации хоста должно быть %s." #, python-format msgid "Host type %s not supported." msgstr "Тип хоста %s не поддерживается." #, python-format msgid "Host with ports %(ports)s not found." msgstr "Не найден хост с портами %(ports)s." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "" "Нельзя использовать Hypermetro и репликацию с одним и тем же типом тома." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "Группа ввода-вывода %(iogrp)d недопустимая. Доступные группы ввода-вывода: " "%(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "IP-адрес/имя хоста API Blockbridge." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Если параметру сжатия присвоено значение True, необходимо также указать " "значение rsize (не равное -1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Если параметру nofmtdisk присвоено значение True, то rsize должен быть равен " "-1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Для flashsystem_connection_protocol указано недопустимое значение " "'%(prot)s': допустимые значения: %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Указано недопустимое значение для IOTYPE: 0, 1 или 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Указано недопустимое значение для smarttier. Допустимые значения: 0, 1, 2 и " "3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Для storwize_svc_vol_grainsize указано недопустимое значение: допустимые " "значения: 32, 64, 128 и 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Указано недопустимое значение для тонкого резервирования. Нельзя указывать " "тонкое и толстое резервирование одновременно." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Образ %(image_id)s не найден." #, python-format msgid "Image %(image_id)s is not active." msgstr "Образ %(image_id)s не активен." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Образ %(image_id)s недопустим: %(reason)s" msgid "Image location not present." msgstr "Не указано расположение образа." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Виртуальный размер образа %(image_size)d ГБ, он слишком большой для тома " "размером %(volume_size)d ГБ." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Возникла ошибка ImageBusy при удалении тома rbd. Причиной может быть сбой " "клиентского соединения. В этом случае повторите удаление через 30 с." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Сбой импорта записи: не найдена служба резервного копирования для выполнения " "импорта. Запрос службы %(service)s" msgid "Incorrect request body format" msgstr "Неправильный формат тела запроса" msgid "Incorrect request body format." msgstr "Недопустимый формат тела запроса." msgid "Incremental backups exist for this backup." msgstr "Для этой резервной копии существует дополняющие резервные копии." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Исключительная ситуация CLI Infortrend: %(err)s. Параметр: %(param)s (Код " "возврата: %(rc)s) (Вывод: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Недопустимые параметры: начальный уровень: {}, стратегия: {}." msgid "Input type {} is not supported." msgstr "Тип ввода {} не поддерживается." msgid "Input volumes or snapshots are invalid." msgstr "Недопустимые входные тома или моментальные копии." msgid "Input volumes or source volumes are invalid." msgstr "Недопустимые входные или выходные тома." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "Не удалось найти экземпляр %(uuid)s." msgid "Insufficient free space available to extend volume." msgstr "Недостаточно места для расширения тома." msgid "Insufficient privileges" msgstr "Недостаточно прав доступа" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "Интервал в секундах между повторными подключениями к кластеру ceph." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "Недопустимые порты %(protocol)s %(port)s указаны в io_port_list." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Недопустимый домен 3PAR: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Недопустимое значение ALUA. Значение ALUA должно быть 1 или 0." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" "Для операции резервного копирования rbd указаны недопустимые аргументы Ceph" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Недопустимая моментальная копия группы согласования: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "Недопустимая группа согласования: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "Недопустимая группа согласования: состояние группы согласования должно быть " "available или error. Текущее состояние: in-use" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "Недопустимая группа согласования: состояние группы согласования должно быть " "available, однако текущее состояние - %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "Недопустимая группа согласования: нет хоста для создания группы согласования" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Обнаружена недопустимая версия API HPELeftHand: %(found)s. Версия " "%(minimum)s или выше требуется для поддержки управления/отмены управления." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Недопустимый формат IP-адреса: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Обнаружена недопустимая спецификация QoS при получении стратегии QoS для " "тома %s" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Недопустимый целевой объект репликации: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Недопустимый тип идентификации VNX: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Недопустимая спецификация общего ресурса Virtuozzo Storage: %r. Должно быть: " "[MDS1[,MDS2],...:/]<ИМЯ-КЛАСТЕРА>[:ПАРОЛЬ]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "Недопустимая версия XtremIO %(cur)s, требуется версия не ниже %(min)s" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "Неправильно определены квоты для следующего проекта: %s" msgid "Invalid argument" msgstr "Недопустимый аргумент" msgid "Invalid argument - negative seek offset." msgstr "Недопустимый аргумент - отрицательное смещение функции seek." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Недопустимый аргумент - whence=%s не поддерживается" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Недопустимый аргумент - whence=%s не поддерживается." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Недопустимый режим подключения %(mode)s для тома %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Недопустимый ключ идентификации: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Недопустимая резервная копия: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "Недопустимый url barbican api: необходимо указать версию, например, " "'http[s]://|<полное-имя>[:порт]/<версия>'; указан url: %s" msgid "Invalid cgsnapshot" msgstr "Недопустимая моментальная копия группы согласования" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Обнаружена недопустимая информация о пользователе chap в хранилище CloudByte." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "Недопустимый ответ об инициализации соединения от тома %(name)s:" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Недопустимый ответ об инициализации соединения от тома %(name)s: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Недопустимый тип содержимого %(content_type)s." msgid "Invalid credentials" msgstr "Недопустимые идентификационные данные" #, python-format msgid "Invalid directory: %s" msgstr "Недопустимый каталог: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Недопустимый тип адаптера диска: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Недопустимая база диска: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Недопустимый тип диска: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Недопустимый тип диска: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Недопустимый хост: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Обнаружена недопустимая версия hpe3parclient (%(found)s). Требуется версия " "%(minimum)s или более поздняя. Выполните команду \"pip install --upgrade " "python-3parclient\" для обновления hpe3parclient." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Обнаружена недопустимая версия hpelefthandclient (%(found)s). Требуется " "версия %(minimum)s или более поздняя. Выполните команду 'pip install --" "upgrade python-lefthandclient' для обновления hpelefthandclient." #, python-format msgid "Invalid image href %(image_href)s." msgstr "Недопустимый образ href %(image_href)s." msgid "Invalid image identifier or unable to access requested image." msgstr "" "Недопустимый идентификатор образа или отсутствует доступ к запрошенному " "образу." msgid "Invalid imageRef provided." msgstr "Указан неверный imageRef." msgid "Invalid initiator value received" msgstr "Недопустимое значение инициатора" msgid "Invalid input" msgstr "Недопустимый ввод" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Получены недопустимые входные данные: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Неверный фильтр is_public [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "Настроен недопустимый тип lun %s." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Недопустимый размер метаданных: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Недопустимые метаданные: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Недопустимая база точки монтирования: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Недопустимая база точки монтирования: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Недопустимое новое имя snapCPG для изменения типа. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Недопустимый порт %(config)s для порта RPC Coho" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Настроен недопустимый тип предварительной выборки '%s'. Тип PrefetchType " "должен быть 0,1,2,3." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Недопустимая спецификация QoS: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "" "Недопустимый запрос на подключение тома к недопустимому целевому объекту" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Недопустимый запрос на подключение тома с недопустимым режимом. Режим " "подключения должен быть 'rw' или 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Неверный срок резервирования %(expire)s." msgid "Invalid response header from RPC server" msgstr "Недопустимый заголовок ответа от сервера RPC" #, python-format msgid "Invalid secondary id %s." msgstr "Недопустимый вторичный ИД %s." #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "Указан недопустимый secondary_backend_id. Допустимый ИД базовой системы: %s." msgid "Invalid service catalog json." msgstr "Недопустимый json каталога службы." msgid "Invalid sheepdog cluster status." msgstr "Недопустимое состояние кластера sheepdog." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Недопустимая моментальная копия: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Недопустимое состояние: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "Запрошен недопустимый пул памяти %s. Сбой изменения типа." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Указан недопустимый пул памяти %s." msgid "Invalid storage pool is configured." msgstr "Настроен недопустимый пул памяти." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "Указан недопустимый режим синхронизации. Допустимый режим: %s." msgid "Invalid transport type." msgstr "Недопустимый тип транспорта." #, python-format msgid "Invalid update setting: '%s'" msgstr "Недопустимый параметр обновления: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "Недопустимый url: вместо формата 'http[s]://|<полное-имя>[:порт]/" "<версия>' указан url: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Неверное значение '%s' для принудительного применения." #, python-format msgid "Invalid value '%s' for force. " msgstr "Недопустимое значение '%s' для принудительного использования. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "" "Недопустимое значение '%s' для is_public. Допустимые значения: True или " "False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Недопустимое значение '%s' для skip_validation." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Недопустимое значение для 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Недопустимое значение для 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Недопустимое значение для 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" "Недопустимое значение для 'scheduler_max_attempts', значение должно быть >=1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "Недопустимое значение параметра конфигурации netapp_host_type NetApp." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "Недопустимое значение параметра конфигурации netapp_lun_ostype NetApp." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Недопустимое значение возраста: %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Недопустимое значение: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "В запросе на создание указан недопустимый размер тома: %s (аргумент размера " "должен быть целым числом или строковым представлением числа и быть больше " "нуля)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Недопустимый тип тома: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Недопустимый том: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Недопустимый том: нельзя добавить том %(volume_id)s в группу согласования " "%(group_id)s, поскольку он находится в недопустимом состоянии %(status)s. " "Допустимые состояния: ('available', 'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Недопустимый том: нельзя добавить том %(volume_id)s в группу согласования " "%(group_id)s, поскольку тип тома %(volume_type)s не поддерживается ." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Недопустимый том: нельзя добавить том fake-volume-uuid в группу согласования " "%(group_id)s, поскольку он не найден." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Недопустимый том: нельзя удалить том fake-volume-uuid из группы согласования " "%(group_id)s, поскольку он отсутствует в этой группе." #, python-format msgid "Invalid volume_type passed: %s." msgstr "Передано недопустимое значение volume_type: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Указано недопустимое значение volume_type: %s (запрошенный тип несовместим " "или совпадает с исходным томом, или не указан аргумент типа)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "Указано недопустимое значение volume_type: %s (запрошенный тип несовместим, " "рекомендуется убрать аргумент типа)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "Указано недопустимое значение volume_type: %s (запрошенный тип должен " "поддерживаться данной группой согласования)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Недопустимый формат wwpns %(wwpns)s" msgid "Invoking web service failed." msgstr "Ошибка вызова веб-службы." msgid "Issue encountered waiting for job." msgstr "Во время ожидания задания обнаружена неполадка." msgid "Issue encountered waiting for synchronization." msgstr "Во время ожидания синхронизации обнаружена неполадка." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Переключение после сбоя невозможно, так как репликация настроена неверно." msgid "Item not found" msgstr "объект не найден" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "Не найден ИД задания в ответе на запрос создания тома CloudByte [%s]." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "Не найден ИД задания в ответе на запрос удаления тома CloudByte [%s]." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Имена ключей могут содержать только алфавитно-цифровые символы, символы " "подчеркивания, точки, двоеточия и дефисы." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "Поддержка вложенных квот доступна в keystone версии не ниже 3." #, python-format msgid "LU does not exist for volume: %s" msgstr "LU не существует для тома: %s" msgid "LUN export failed!" msgstr "Сбой экспорта LUN!" msgid "LUN id({}) is not valid." msgstr "Недопустимый LUN id({})." msgid "LUN map overflow on every channel." msgstr "Переполнение карты LUN на каждом канале." #, python-format msgid "LUN not found with given ref %s." msgstr "LUN не найден по данной ссылке %s." msgid "LUN number ({}) is not an integer." msgstr "Номер LUN ({}) не является целым числом." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" "Номер LUN в ИД канала %(ch_id)s выходит за пределы диапазона допустимых " "значений." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "LUN с данной ссылкой %(ref)s не удовлетворяет типу тома. Убедитесь, что том " "LUN с функциями ssc есть на vserver %(vs)s." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Последние записи системного протокола cinder %s:-" msgid "LeftHand cluster not found" msgstr "Кластер LeftHand не найден" msgid "License is unavailable." msgstr "Лицензия недоступна." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Строка %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Путь ссылки уже существует и не является символьной ссылкой" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Связанный дубликат исходного тома не поддерживается в состоянии %s." msgid "Lock acquisition failed." msgstr "Не удается установить блокировку." msgid "Logout session error." msgstr "Ошибка завершения сеанса." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Служба поиска не настроена. В параметре конфигурации fc_san_lookup_service " "необходимо указать конкретную реализацию службы поиска." msgid "Lun migration error." msgstr "Ошибка переноса LUN." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "MD5 объекта %(object_name)s до: %(md5)s и после: %(etag)s не совпадают." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Неправильный формат строки вывода fcns: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Неправильное тело сообщения: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Неверный формат строки сервера имен: %s" msgid "Malformed request body" msgstr "Неправильное тело запроса" msgid "Malformed request body." msgstr "Неверный формат тела запроса." msgid "Malformed request url" msgstr "Неправильный запрос url" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Неправильный ответ на команду %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Неверный формат атрибута scheduler_hints" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Неправильный формат строки базы данных команды show fcns: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Неверный формат конфигурации зон: (коммутатор=%(switch)s конфигурация зон=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Неправильный формат области: (коммутатор=%(switch)s конфигурация области=" "%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Управление существующим - для получения размера требуется 'id'." msgid "Manage existing snapshot not implemented." msgstr "Управление существующей моментальной копией не реализовано." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Сбой управления существующим томом: недопустимая ссылка на основную " "программу %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Сбой управления существующим томом из-за несоответствия типа тома: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Управление существующим томом не реализовано." msgid "Manage existing volume requires 'source-id'." msgstr "Для управления существующим томом требуется 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "Управление томом не поддерживается, если включен режим FAST. Стратегия FAST: " "%(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "Передача управления моментальными копиями томам, переключенным после сбоя, " "не разрешена." msgid "Map info is None due to array version not supporting hypermetro." msgstr "Пустая информация карты: версия массива на поддерживает hypermetro." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "Подготовка преобразования %(id)s не выполнена за отведенный тайм-аут %(to)d " "секунд. Завершается." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "Маскирующее представление %(maskingViewName)s не было успешно удалено" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Превышено максимальное число резервных копий (%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" "Превышено максимально разрешенное число моментальных копий (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Превышено максимально разрешенное число томов (%(allowed)d) для квоты " "'%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "Можно указать только одно из следующих значений: %s" msgid "Metadata backup already exists for this volume" msgstr "Резервная копия метаданных уже существует для этого тома" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "Объект резервной копии метаданных %s уже существует" msgid "Metadata item was not found" msgstr "Элемент метаданных не найден" msgid "Metadata item was not found." msgstr "Элемент метаданных не найден." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Ключ свойства метаданных %s длиннее 255 символов" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Значение %s ключа свойства метаданных длиннее 255 символов" msgid "Metadata property key blank" msgstr "Ключ свойства метаданных пуст" msgid "Metadata property key blank." msgstr "Пустой ключ свойства метаданных." msgid "Metadata property key greater than 255 characters." msgstr "Длина ключа свойства метаданных превышает 255 символов." msgid "Metadata property value greater than 255 characters." msgstr "Значение свойства метаданных превышает 255 символов." msgid "Metadata restore failed due to incompatible version" msgstr "Не удалось восстановить метаданные: несовместимая версия" msgid "Metadata restore failed due to incompatible version." msgstr "Не удалось восстановить метаданные: несовместимая версия." #, python-format msgid "Migrate volume %(src)s failed." msgstr "Не удалось перенести том %(src)s." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "Не удалось перенести том между исходным томом %(src)s и целевым томом " "%(dst)s." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "Миграция LUN %s остановлена или возникла неполадка." msgid "MirrorView/S enabler is not installed." msgstr "Программа включения MirrorView/S не установлена." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Отсутствует модуль python 'purestorage', убедитесь, что библиотека " "установлена и нет." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "Не указан параметр конфигурации SAN Fibre Channel - fc_fabric_names" msgid "Missing request body" msgstr "Отсутствует тело запроса" msgid "Missing request body." msgstr "Отсутствует тело запроса." #, python-format msgid "Missing required element '%s' in request body" msgstr "В теле запроса отсутствует обязательный элемент %s." #, python-format msgid "Missing required element '%s' in request body." msgstr "В теле запроса отсутствует обязательный элемент '%s'." msgid "Missing required element 'consistencygroup' in request body." msgstr "В теле запроса отсутствует обязательный элемент 'consistencygroup'." msgid "Missing required element 'host' in request body." msgstr "В теле запроса отсутствует обязательный элемент 'host'." msgid "Missing required element quota_class_set in request body." msgstr "Отсутствует требуемый параметр quota_class_set в теле запроса." msgid "Missing required element snapshot in request body." msgstr "В теле запроса отсутствует обязательный элемент snapshot." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Обнаружено несколько значений SerialNumber, но только одно ожидалось для " "этой операции. Измените файл конфигурации EMC." #, python-format msgid "Multiple copies of volume %s found." msgstr "Обнаружено несколько копий тома %s." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Для '%s' найдено несколько совпадений, для более конкретного поиска " "используйте ИД." msgid "Multiple profiles found." msgstr "Обнаружено несколько профайлов." msgid "Must implement a fallback schedule" msgstr "Необходимо реализовать резервное расписание" msgid "Must implement find_retype_host" msgstr "Необходимо реализовать implement find_retype_host" msgid "Must implement host_passes_filters" msgstr "Необходимо реализовать host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "Необходимо реализовать schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "Необходимо реализовать schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "Необходимо реализовать schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "В lsfabric должно быть передано глобальное имя порта или хост." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Эту команду может выполнять только администратор облака с использованием " "Keystone policy.json, где администратору облака предоставлены права на вывод " "списка проектов и получение любого проекта." msgid "Must specify 'connector'" msgstr "Необходимо указать 'connector'" msgid "Must specify 'connector'." msgstr "Необходимо указать 'connector'." msgid "Must specify 'host'." msgstr "Необходимо указать 'host'." msgid "Must specify 'new_volume'" msgstr "Необходимо указать 'new_volume'" msgid "Must specify 'status'" msgstr "Необходимо указать 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Необходимо указать 'status', 'attach_status' или 'migration_status' для " "обновления." msgid "Must specify a valid attach status" msgstr "Необходимо указать допустимое состояние вложения" msgid "Must specify a valid migration status" msgstr "Необходимо указать допустимое состояние переноса" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Необходимо указать допустимого пользователя %(valid)s, значение " "'%(persona)s' недопустимо." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Необходимо указать допустимый тип предоставления ресурсов %(valid)s, " "значение '%(prov)s' недопустимо." msgid "Must specify a valid status" msgstr "Необходимо указать допустимое состояние" msgid "Must specify an ExtensionManager class" msgstr "Необходимо указать класс ExtensionManager" msgid "Must specify bootable in request." msgstr "Необходимо указать загружаемый файл в запросе." msgid "Must specify protection domain name or protection domain id." msgstr "Необходимо указать имя или ИД домена защиты." msgid "Must specify readonly in request." msgstr "В запросе должен быть указан параметр readonly." msgid "Must specify snapshot source-name or source-id." msgstr "Необходимо указать source-name или source-id моментальной копии." msgid "Must specify source-name or source-id." msgstr "Необходимо указать source-name или source-id." msgid "Must specify storage pool name or id." msgstr "Необходимо указать имя или ИД пула памяти." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Необходимо указать пулы памяти. Опция: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Должно быть указано положительное значение возраста" msgid "Must supply a positive, non-zero value for age" msgstr "" "В качестве возраста необходимо указать положительное число, не равное 0" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "Недопустимая конфигурация NAS '%(name)s=%(value)s'. Допустимые значения: " "'auto', 'true' или 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "Файл конфигурации NFS в %(config)s не существует" #, python-format msgid "NFS file %s not discovered." msgstr "Файл NFS %s не найден." msgid "NFS file could not be discovered." msgstr "Не удалось найти файл NFS." msgid "NaElement name cannot be null." msgstr "Имя NaElement не может быть пустым." msgid "Name" msgstr "Имя" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Параметры name, description, add_volumes и remove_volumes не могут быть все " "пустыми в теле запроса." msgid "Need non-zero volume size" msgstr "Требуется ненулевой размер тома" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Ни MSG_DENIED, ни MSG_ACCEPTED: %r" msgid "NetApp Cinder Driver exception." msgstr "Исключительная ситуация драйвера NetApp Cinder." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "Новый размер расширения должен превышать текущий размер. (текущий размер: " "%(size)s, расширенный: %(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "Новый размер должен быть больше фактического размера в базовой системе " "хранения. Фактический размер: %(oldsize)s, новый размер: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "Новый размер тома необходимо указывать в виде целого числа." msgid "New volume type must be specified." msgstr "Должен быть указан тип нового тома." msgid "New volume type not specified in request_spec." msgstr "Новый тип тома не указан в request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "Новое значение volume_type совпадает с прежним: %s." msgid "Nimble Cinder Driver exception" msgstr "Исключительная ситуация драйвера Nimble Cinder" msgid "No FC initiator can be added to host." msgstr "Инициатор Fibre Channel не добавлен в хост." msgid "No FC port connected to fabric." msgstr "Никакой порт Fibre Channel не подключен к фабрике." msgid "No FCP targets found" msgstr "Не найден ы целевые объекты FCP" msgid "No Port Group elements found in config file." msgstr "Элементы группы портов не найдены в файле конфигурации." msgid "No VF ID is defined in the configuration file." msgstr "В файле конфигурации не задан VF ID." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Нет активных порталов iSCSI с указанными IP-адресами iSCSI" #, python-format msgid "No available service named %s" msgstr "Нет доступной службы с именем %s" #, python-format msgid "No backup with id %s" msgstr "Отсутствует резервная копия с ИД %s" msgid "No backups available to do an incremental backup." msgstr "Нет резервных копий для создания дополняющей резервной копии." msgid "No big enough free disk" msgstr "Отсутствуют достаточно большие свободные диски" #, python-format msgid "No cgsnapshot with id %s" msgstr "Нет моментальной копии группы согласования с ИД %s" msgid "No cinder entries in syslog!" msgstr "Нет записей cinder в системном протоколе!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "" "В утилите для работы с файловой системой не найден дубликат LUN с именем %s" msgid "No config node found." msgstr "Не найден узел конфигурации." #, python-format msgid "No consistency group with id %s" msgstr "Нет группы согласования с ИД %s" #, python-format msgid "No element by given name %s." msgstr "Не найден элемент с именем %s." msgid "No errors in logfiles!" msgstr "Нет ошибок в файлах протоколов!" #, python-format msgid "No file found with %s as backing file." msgstr "Не найден файл с %s в качестве базового файла." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Не осталось свободных ИД LUN. Максимальное число томов, которое можно " "подключать к хосту (%s) превышено." msgid "No free disk" msgstr "Отсутствуют свободные диски" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Не найден правильный портал iscsi в указанном списке %s." #, python-format msgid "No good iscsi portals found for %s." msgstr "Не найдены правильные порталы iscsi для %s." #, python-format msgid "No host to create consistency group %s." msgstr "Нет хоста для создания группы согласования %s." msgid "No iSCSI-enabled ports on target array." msgstr "В целевом массиве нет портов с поддержкой iSCSI." msgid "No image_name was specified in request." msgstr "В запросе не указан параметр image_name." msgid "No initiator connected to fabric." msgstr "Никакой инициатор не подключен к фабрике." #, python-format msgid "No initiator group found for initiator %s" msgstr "Группа инициатора не найдена для инициатора %s" msgid "No initiators found, cannot proceed" msgstr "Инициаторы не найдены продолжение работы невозможно" #, python-format msgid "No interface found on cluster for ip %s" msgstr "В кластере не найден интерфейс для ip-адреса %s" msgid "No ip address found." msgstr "Не найден IP-адрес." msgid "No iscsi auth groups were found in CloudByte." msgstr "Не найдены группы идентификации iSCSI в CloudByte." msgid "No iscsi initiators were found in CloudByte." msgstr "В CloudByte не найдены инициаторы iscsi." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Не найдена служба iscsi для тома CloudByte [%s]." msgid "No iscsi services found in CloudByte storage." msgstr "Службы iscsi не найдены в хранилище CloudByte." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "Не указан файл ключа, и невозможно загрузить ключ из %(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "Не обнаружено смонтированных общих ресурсов Gluster" msgid "No mounted NFS shares found" msgstr "Не обнаружено смонтированных общих ресурсов NFS" msgid "No mounted SMBFS shares found." msgstr "Не обнаружено смонтированных общих ресурсов SMBFS." msgid "No mounted Virtuozzo Storage shares found" msgstr "Не найдены смонтированные общие ресурсы Virtuozzo Storage" msgid "No mounted shares found" msgstr "Не обнаружено смонтированных общих ресурсов" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "Не найден узел в группе ввода-вывода %(gid)s для тома %(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Нет пулов для предоставления ресурсов томам. Убедитесь, что правильно " "настроен параметр конфигурации netapp_pool_name_search_pattern." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Не получен ответ из вызова API идентификации пользователей iSCSI списка " "хранилища CloudByte." msgid "No response was received from CloudByte storage list tsm API call." msgstr "Вызов API tsm списка хранилища CloudByte не вернул ответа." msgid "No response was received from CloudByte's list filesystem api call." msgstr "Вызов api файловой системы списка CloudByte не вернул ответа." msgid "No service VIP configured and no nexenta_client_address" msgstr "Не настроен VIP службы и не задан nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "Не найдена моментальная копия с %s в качестве базового файла." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "Не найден образ моментальной копии в группе моментальных копий %s." #, python-format msgid "No snapshots could be found on volume %s." msgstr "Не найдены моментальные копии для тома %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Не указаны исходные моментальные копии для создания группы согласования %s." #, python-format msgid "No storage path found for export path %s" msgstr "Не найден путь хранения для пути экспорта %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Нет такой спецификации QoS: %(specs_id)s." msgid "No suitable discovery ip found" msgstr "Подходящий поисковый IP не найден" #, python-format msgid "No support to restore backup version %s" msgstr "Нет поддержки восстановления резервной версии %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Не найден target id для тома %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "На хосте нет неиспользуемых ИД LUN. Включены множественные подключения, а " "это требует, чтобы все ИД LUN были уникальны в пределах всей группы хостов." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Допустимый узел не найден. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Нет допустимых хостов для тома %(id)s с типом %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "Vdisk с UID, заданным по ссылке %s, отсутствует." #, python-format msgid "No views found for LUN: %s" msgstr "Не найдены представления для LUN: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "В кластере с vserver %(vserver)s и путем пересечения %(junction)s " "отсутствует том " msgid "No volume service(s) started successfully, terminating." msgstr "Не удалось запустить службы томов. Завершение." msgid "No volume was found at CloudByte storage." msgstr "Не найден том в хранилище CloudByte." msgid "No volume_type should be provided when creating test replica." msgstr "Нельзя указывать volume_type при создании пробной копии." msgid "No volumes found in CloudByte storage." msgstr "В хранилище CloudByte не найдены тома." msgid "No weighed hosts available" msgstr "Нет хостов с весами" #, python-format msgid "Not a valid string: %s" msgstr "Недопустимая строка: %s" msgid "Not a valid value for NaElement." msgstr "Недопустимое значение для NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "Не найдено подходящее хранилище данных для тома %s." msgid "Not an rbd snapshot" msgstr "Не является моментальной копией rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Нет доступа к образу %(image_id)s." msgid "Not authorized." msgstr "Не авторизировано." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Недостаточно места на базовом сервере (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "В общем ресурсе ZFS отсутствует свободное пространство для выполнения этой " "операции." msgid "Not stored in rbd" msgstr "Не сохранено в rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "" "Во время создания моментальной копии из Nova возвращено состояние error." msgid "Null response received from CloudByte's list filesystem." msgstr "Файловая система списка CloudByte вернула пустой ответ." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "Получен пустой ответ из групп идентификации iSCSI списка CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "Инициаторы iscsi списков CloudByte вернули пустой ответ." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "Служба iscsi списков томов CloudByte вернула пустой ответ." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "При создании тома [%s] в хранилище CloudByte получен пустой ответ." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "Получен пустой ответ при удалении тома [%s] в хранилище CloudByte." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Получен пустой ответ на запрос выполнения операции [%(operation)s] с помощью " "задания [%(job)s] в хранилище CloudByte." msgid "Number of retries if connection to ceph cluster failed." msgstr "Число повторов в случае сбоя подключения к кластеру ceph." msgid "Object Count" msgstr "Количество объектов" msgid "Object Version" msgstr "Версия объекта" msgid "Object is not a NetApp LUN." msgstr "Объект не находится в LUN NetApp." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Ошибка в операции Extend при добавлении тома в составной том %(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "Одна из служб cinder-volume устарела и не поддерживает этот запрос. " "Возможно, в системе есть тома cinder из смеси версий Liberty-Mitaka?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "Не найден один из обязательных входных параметров из хоста, порта или схемы." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, каждые " "%(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "В спецификации QoS можно указать только одно ограничение." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Только пользователи с маркером, связанным с непосредственными родительскими " "объектами и корневыми проектами могут просматривать квоты дочерних элементов." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "" "Вывести из-под управления можно только тома, находящиеся под управлением " "OpenStack." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "Операция не выполнена, состояние=%(status)s. Полный дамп: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Операция не поддерживается: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "Опция gpfs_images_dir указана неправильно." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "Опция gpfs_images_share_mode указана неправильно." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Опция gpfs_mount_point_base указана неправильно." msgid "Option map (cls._map) is not defined." msgstr "Карта опций (cls._map) не определена." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "Исходное значение %(res)s %(prop)s должно быть одно из '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "Переопределить порт HTTPS для подключения к серверу API Blockbridge." #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "Имя раздела - None. Укажите smartpartition:partitionname в ключе." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "Для идентификации требуется пароль или личный ключ SSH: задайте опцию " "san_password или san_private_key." msgid "Path to REST server's certificate must be specified." msgstr "Необходимо указать путь к сертификату сервера REST." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Заранее создайте пул %(pool_list)s!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Заранее создайте слой %(tier_levels)s в пуле %(pool)s!" msgid "Please re-run cinder-manage as root." msgstr "Выполните команду cinder-manage повторно от имени пользователя root." msgid "Please specify a name for QoS specs." msgstr "Укажите имя спецификации QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Политика не допускает выполнения %(action)s." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Пул %(poolNameInStr)s не найден." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "Пул %s не существует в устройстве Nexenta Store" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Не найден пул из volume['host'] %(host)s." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "Сбой пула из volume['host']. Исключительная ситуация: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "Не указан пул в поле хоста тома." msgid "Pool is not available in the volume host fields." msgstr "Пул недоступен в полях хоста тома." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Не найден пул с именем %(pool)s в домене %(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "Не найден пул с именем %(pool_name)s в домене %(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Пул %(poolName)s. не связан со слоем памяти для быстрой стратегии " "%(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "Значение PoolName должно быть указано в файле %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "Пулы %s не существуют" msgid "Pools name is not set." msgstr "Не задано имя пула." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Состояние главной копии: %(status)s, синхронизирована: %(sync)s." msgid "Project ID" msgstr " ID проекта" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "В проекте неправильно настроена поддержка вложенных квот: %(reason)s." msgid "Protection Group not ready." msgstr "Группа защиты не готова." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Протокол %(storage_protocol)s не поддерживается для семейства памяти " "%(storage_family)s." msgid "Provided backup record is missing an id" msgstr "В указанной записи резервной копии отсутствует ИД" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Указанное состояние моментальной копии %(provided)s запрещено для " "моментальной копии с состоянием %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Не найдена информация поставщика о хранилище CloudByte для тома OpenStack " "[%s]." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Сбой драйвера Pure Storage Cinder: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Спецификация QoS %(specs_id)s уже существует." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Спецификация QoS %(specs_id)s еще связана с сущностями." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "Недопустимая конфигурация QoS. %s должен быть > 0." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "В стратегии QoS необходимо указать IOTYPE и другой параметр qos_specs, " "стратегия QoS: %(qos_policy)s " #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "В стратегии QoS необходимо указать IOTYPE: 0, 1 или 2, стратегия QoS: " "%(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "В стратегии QoS конфликтуют параметры upper_limit и lower_limit, стратегия " "QoS: %(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "Спецификация QoS %(specs_id)s не имеет спецификации с ключом %(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Спецификации QoS не поддерживаются в этом семействе систем хранения и версии " "ONTAP." msgid "Qos specs still in use." msgstr "Спецификация QoS еще используется." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "Запрос по параметру службы устарело. Вместо него следует использовать " "двоичный параметр." msgid "Query resource pool error." msgstr "Ошибка запроса пула ресурсов." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "Ограничение %s квоты должно быть не меньше существующих ресурсов." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Класс квоты %(class_name)s не найден." msgid "Quota could not be found" msgstr "Квота не найдена" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Квота превышена для ресурсов: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Квота превышена: код=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Квота проекта %(project_id)s не найдена." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Неверно задано ограничение квот в проекте '%(proj)s' для ресурса '%(res)s': " "порог %(limit)d меньше, чем текущее использование %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Резервирование квоты %(uuid)s не найдено." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Использование квоты для проекта %(project_id)s не найдено." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "Сбой операции diff RBD - (код возврата=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "Должен быть указан IP-адрес сервера REST." msgid "REST server password must by specified." msgstr "Должен быть указан пароль сервера REST." msgid "REST server username must by specified." msgstr "Должно быть указано имя пользователя сервера REST." msgid "RPC Version" msgstr "Версия RPC" msgid "RPC server response is incomplete" msgstr "Неполный ответ от сервера RPC" msgid "Raid did not have MCS Channel." msgstr "У RAID нет канала MCS." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Достигнуто ограничение, заданное опцией конфигурации " "max_luns_per_storage_group. Операция добавления %(vol)s в группу носителей " "%(sg)s отклонена." #, python-format msgid "Received error string: %s" msgstr "Получена ошибочная строка: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "Ссылка должна быть указана для неуправляемой моментальной копии." msgid "Reference must be for an unmanaged virtual volume." msgstr "Для неуправляемого виртуального тома должна быть ссылка." msgid "Reference must be the name of an unmanaged snapshot." msgstr "Ссылка должна быть именем неуправляемой моментальной копии." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "Для неуправляемого виртуального тома ссылка должна быть именем тома." msgid "Reference must contain either source-id or source-name element." msgstr "Ссылка должна содержать элемент source-id или source-name." msgid "Reference must contain either source-name or source-id element." msgstr "Ссылка должна содержать элемент source-name или source-id." msgid "Reference must contain source-id or source-name element." msgstr "Ссылка должна содержать элемент source-id или source-name." msgid "Reference must contain source-id or source-name key." msgstr "Ссылка должна содержать ключ source-id или source-name." msgid "Reference must contain source-id or source-name." msgstr "Ссылка должна содержать source-id или source-name." msgid "Reference must contain source-id." msgstr "Ссылка должна содержать source-id." msgid "Reference must contain source-name element." msgstr "Ссылка должна содержать элемент имени источника." msgid "Reference must contain source-name or source-id." msgstr "Указатель должен содержать имя источника и ИД источника." msgid "Reference must contain source-name." msgstr "Ссылка должна содержать элемент source-name." msgid "Reference to volume to be managed must contain source-name." msgstr "" "Ссылка на том, передаваемый под управление, должна содержать элемент source-" "name." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "" "Ссылка на том %s, передаваемый под управление, должна содержать элемент " "source-name." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Перенос ИД тома %(id)s отклонен. Проверьте конфигурацию, так как исходный и " "целевой объекты относятся к одной группе томов: %(name)s." msgid "Remote pool cannot be found." msgstr "Удаленный пул не найден." msgid "Remove CHAP error." msgstr "Ошибка удаления CHAP." msgid "Remove fc from host error." msgstr "Ошибка удаления Fibre Channel с хоста." msgid "Remove host from array error." msgstr "Ошибка удаления хоста из массива." msgid "Remove host from hostgroup error." msgstr "Ошибка удаления хоста из группы хостов." msgid "Remove iscsi from host error." msgstr "Ошибка удаления iSCSI с хоста." msgid "Remove lun from QoS error." msgstr "Ошибка удаления LUN из QoS." msgid "Remove lun from cache error." msgstr "Ошибка удаления LUN из кэша." msgid "Remove lun from partition error." msgstr "Ошибка удаления LUN из раздела." msgid "Remove port from port group error." msgstr "Ошибка удаления порта из группы портов." msgid "Remove volume export failed." msgstr "Не удалось удалить экспорт тома." msgid "Rename lun on array error." msgstr "Ошибка переименования LUN в массиве." msgid "Rename snapshot on array error." msgstr "Ошибка переименования моментальной копии в массиве." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "Репликация %(name)s в %(ssn)s не выполнена." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "Функция службы репликации не найдена в %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Служба репликации не найдена в %(storageSystemName)s." msgid "Replication is not enabled" msgstr "Репликация не включена" msgid "Replication is not enabled for volume" msgstr "Репликация не включена для тома" msgid "Replication not allowed yet." msgstr "Репликация еще не разрешена." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "Состояние репликации тома должно быть Активна или Активна-Остановлена, но " "текущее состояние - %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "Состояние репликации тома должно быть Неактивна, Активна-Остановлена или " "Ошибка, но текущее состояние - %s" msgid "Request body and URI mismatch" msgstr "Тело запроса и URI не совпадают" msgid "Request body contains too many items" msgstr "Тело запроса содержит избыточное количество объектов" msgid "Request body contains too many items." msgstr "Тело запроса содержит слишком много элементов." msgid "Request body empty" msgstr "Пустое тело запроса" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Запрос к кластеру Datera вернул неверное состояние: %(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Размер запрошенной резервной копии превышает разрешенную квоту резервных " "копий в ГБ. Запрошено: %(requested)s ГБ, квота %(quota)s ГБ, использовано " "%(consumed)s ГБ." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Запрошенный том или моментальная копия превышают разрешенную квоту %(name)s. " "Запрошено %(requested)s ГБ, квота %(quota)s ГБ, использовано %(consumed)s ГБ." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "Запрошенный размер тома %(size)d больше максимально допустимого (%(limit)d)." msgid "Required configuration not found" msgstr "Не найдена требуемая конфигурация" #, python-format msgid "Required flag %s is not set" msgstr "Не указан требуемый флаг %s" msgid "Requires an NaServer instance." msgstr "Требуется экземпляр NaServer." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Сброс состояния резервной копии прерван. Настроенная служба резервного " "копирования [%(configured_service)s] не является службой резервного " "копирования, которая использовалась для создания этой резервной копии " "[%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Не удалось изменить размер дубликата %s." msgid "Resizing image file failed." msgstr "Не удалось изменить размер файла образа." msgid "Resource could not be found." msgstr "Ресурс не может быть найден." msgid "Resource not ready." msgstr "Ресурс не готов." #, python-format msgid "Response error - %s." msgstr "Ошибка ответа: %s." msgid "Response error - The storage-system is offline." msgstr "Ошибка ответа - система хранения выключена." #, python-format msgid "Response error code - %s." msgstr "Код ошибки ответа: %s." msgid "RestURL is not configured." msgstr "RestURL не настроен." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Восстановление резервной копии прервано: ожидалось состояние тома " "%(expected_status)s, получено %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Восстановление резервной копии прервано: настроенная в данный момент служба " "резервного копирования [%(configured_service)s] не является службой " "резервного копирования, которая использовалась для создания этой резервной " "копии [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Восстановление резервной копии прервано: ожидалось состояние резервной копии " "%(expected_status)s, получено %(actual_status)s." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Полученное непредвиденное число томов SolidFire для предоставленных " "моментальных копий Cinder. Получено: %(ret)s Ожидалось: %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Полученное непредвиденное число томов SolidFire для предоставленных томов " "Cinder. Получено: %(ret)s Ожидалось: %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Превышено число попыток для команды: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Обнаружена повторяемая исключительная ситуация SolidFire" msgid "Retype cannot change encryption requirements." msgstr "Изменение типа не может изменить требования к шифрованию." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Изменение типа не изменяет спецификации QoS для клиентов у используемых " "томов: %s." msgid "Retype requires migration but is not allowed." msgstr "Для изменения типа требуется перенос, но он запрещен." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Выполнить откат для тома %(volumeName)s не удалось. Обратитесь к системному " "администратору, чтобы вручную вернуть том в группу носителей по умолчанию " "для стратегии FAST %(fastPolicyName)s." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Выполняется откат %(volumeName)s путем его удаления." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "Работа Cinder с версией VMware vCenter ниже %s невозможна." msgid "SAN product is not configured." msgstr "Продукт SAN не настроен." msgid "SAN protocol is not configured." msgstr "Протокол SAN не настроен." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "Недопустимая конфигурация SMBFS smbfs_oversub_ratio. Значение должно быть " "больше 0: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "Недопустимая конфигурация SMBFS smbfs_used_ratio. Значение должно быть " "больше 0 и не больше 1,0: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "Файл конфигурации SMBFS в %(config)s не существует." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Файл конфигурации SMBFS не указан (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "После '%(total_attempts)r' попыток не выполнена команда SSH: '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "Сбой команды SSH. Ошибка: '%(err)s'. Команда: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Обнаружено внедрение команды SSH: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "Сбой соединения SSH для %(fabric)s, ошибка: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "Срок действия сертификата SSL истек %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Ошибка SSL: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Фильтр узлов диспетчера %(filter_name)s не найден" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" "Не удалось найти определитель весовых коэффициентов хостов планировщика " "%(weigher_name)s." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Состояние вспомогательной копии: %(status)s, состояние синхронизации: " "%(sync)s, ход синхронизации %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "Вторичный ИД не может совпадать с основным массивом, backend_id = " "%(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "Значение SerialNumber должно быть указано в файле %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Служба %(service)s удалена с хоста %(host)s." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "Служба %(service_id)s не найдена на хосте %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "Служба %(service_id)s не найдена." #, python-format msgid "Service %s not found." msgstr "Служба %s не найдена." msgid "Service is too old to fulfil this request." msgstr "Служба устарела и не поддерживает этот запрос." msgid "Service is unavailable at this time." msgstr "В данный момент служба недоступна." msgid "Service not found." msgstr "Служба не найдена." msgid "Set pair secondary access error." msgstr "Ошибка настройки вспомогательного доступа к паре." msgid "Sets thin provisioning." msgstr "Задает тонкое резервирование." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "Установка группы стратегий QoS LUN не поддерживается в этом семействе памяти " "и версии ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Установка группы стратегий QoS для файлов не поддерживается в этом семействе " "памяти и версии ontap." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Общий ресурс %s проигнорирован: недопустимый формат. Должен быть формат " "адрес:/экспорт. Проверьте параметры nas_ip и nas_share_path." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "Общий ресурс в %(dir)s недоступен на запись службе томов Cinder. Операции " "для моментальных копий поддерживаться не будут." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Ошибка ввода-вывода Sheepdog, команда: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Операции Показать доступны только для проектов в той же иерархии проекта, " "где находятся пользователи." msgid "Size" msgstr "Размер" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "Размер для тома %s не найден, защищенное удаление невозможно." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Размер %(image_size)d ГБ не умещается на томе размером %(volume_size)d ГБ." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "Размер указанного образа (%(image_size)s ГБ) превышает размер тома " "(%(volume_size)s ГБ)." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "Отправлен запрос на удаление моментальной копии %(id)s во время ожидания ее " "готовности. Вероятно, параллельно выполняется другой запрос." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "В ходе каскадного удаления обнаружена моментальная копия %(id)s в состоянии " "%(state)s вместо 'deleting'." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Снимок %(snapshot_id)s не может быть найден." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "У снимка %(snapshot_id)s нет метаданных с ключом %(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "Моментальная копия %s не должна быть частью группы согласования." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "Моментальная копия '%s' не существует в массиве." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "Невозможно создать моментальную копию, поскольку том %(vol_id)s недоступен. " "Текущее состояние тома: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "Моментальную копию нельзя создать во время переноса тома." msgid "Snapshot of secondary replica is not allowed." msgstr "Создавать моментальную копию вспомогательной копии не разрешено." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Моментальная копия тома не поддерживается в состоянии %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "" "Создать моментальную копию для ресурса \"%s\", который нигде не был " "развернут?" msgid "Snapshot size must be multiple of 1 GB." msgstr "Размер моментальной копии должен быть кратен 1 ГБ." #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "Состояние моментальной копии %(cur)s запрещено для update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "Для дублирования состояние моментальной копии должно быть available." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "Моментальная копия для резервного копирования должна быть доступна, но " "текущее состояние - \"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "Моментальная копия с ИД %s не найдена." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Моментальная копия='%(snap)s' не существует в базовом образе='%(base)s' - " "прерывание дополняющего резервного копирования" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "Моментальные копии не поддерживаются для этого формата тома: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Ошибка сокета: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Исключительная ситуация драйвера SolidFire Cinder" msgid "Sort direction array size exceeds sort key array size." msgstr "" "Размер массива значений направления сортировки превышает размер массива " "ключей сортировки." msgid "Source CG is empty. No consistency group will be created." msgstr "" "Исходная группа согласования пустая. Группа согласования создана не будет." msgid "Source host details not found." msgstr "Не найдены сведения об исходном хосте." msgid "Source volume device ID is required." msgstr "Требуется ИД устройства исходного тома." msgid "Source volume not mid-migration." msgstr "Исходный том не в процессе переноса." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "Источник с IP-адресом/именем хоста %s не найден на целевом устройстве для " "переноса с поддержкой базовой системы. Переход к выполнению переноса по " "умолчанию." msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo возвратил недопустимый byarray" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Указанный хост для связывания с томом %(vol)s находится в неподдерживаемой " "группе хостов %(group)s." msgid "Specified logical volume does not exist." msgstr "Указанный логический том не существует." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "Указанная группа моментальных копий с ИД %s не найдена." msgid "Specify a password or private_key" msgstr "Укажите пароль или личный_ключ" msgid "Specify san_password or san_private_key" msgstr "Задайте san_password или san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Укажите такие параметры, как тип тома, имя, описание, is_public или их " "сочетание." msgid "Split pair error." msgstr "Ошибка разделения пары." msgid "Split replication failed." msgstr "Ошибка разделения репликации." msgid "Start LUNcopy error." msgstr "Ошибка запуска LUNcopy." msgid "State" msgstr "Состояние" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "Неверное состояние узла. Текущее состояние: %s." msgid "Status" msgstr "Статус" msgid "Stop snapshot error." msgstr "Ошибка остановки моментальной копии." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "Служба конфигурации носителей не найдена в %(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "Служба управления ИД аппаратного обеспечения носителей не найдена в " "%(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "Не найден профайл хранилища %s." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "Служба перемещения носителей не найдена в %(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "Семейство памяти %s не поддерживается." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "Группа носителей %(storageGroupName)s не была удалена успешно" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Хост памяти %(svr)s не обнаружен. Укажите имя" msgid "Storage pool is not configured." msgstr "Пул памяти не настроен." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Профайл хранилища: %(storage_profile)s не найден." msgid "Storage resource could not be found." msgstr "Ресурс памяти не найден." msgid "Storage system id not set." msgstr "Не указан ИД системы хранения." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Для пула %(poolNameInStr)s не найдена система памяти." msgid "Storage-assisted migration failed during manage volume." msgstr "Перенос с помощью носителя не выполнен при операции управления томом." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "Система хранения %(array)s не найдена." #, python-format msgid "String with params: %s" msgstr "Строка с параметрами: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "Общее использование дочерним объектом '%(sum)s' превышает свободную квоту " "'%(free)s' в проекте '%(proj)s' для ресурса '%(res)s'. Уменьшите требования " "или использование для одного или нескольких следующих проектов: " "'%(child_ids)s'" msgid "Switch over pair error." msgstr "Ошибка переключения пары." msgid "Sync pair error." msgstr "Ошибка синхронизации пары." msgid "Synchronizing secondary volume to primary failed." msgstr "Сбой синхронизации вспомогательного тома с основным." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "Система %(id)s находится в состоянии ошибки пароля - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "Система %(id)s находится в неверном состоянии (%(status)s)." msgid "System does not support compression." msgstr "Система не поддерживает сжатие." msgid "System is busy, retry operation." msgstr "Система занята. Повторите операцию." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s] не найден в хранилище CloudByte для учетной записи " "[%(account)s]." msgid "Target volume type is still in use." msgstr "Тип целевого тома еще используется." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Несоответствие дерева шаблонов: добавление подчиненного %(slavetag)s в " "главный %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "ИД арендатора %s не существует." msgid "Terminate connection failed" msgstr "Завершение соединения не выполнено" msgid "Terminate connection unable to connect to backend." msgstr "" "Операции Завершить соединение не удалось подключиться к базовому серверу." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Не удалось закрыть соединение с томом: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "Реплицируемый исходный том %(type)s %(id)s не найден." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Параметры sort_key и sort_dir устарели и не могут использоваться с " "параметром sort." msgid "The EQL array has closed the connection." msgstr "Массив EQL закрыл соединение." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "Уровень выпуска файловой системы GPFS %(fs)s не соответствует требуемому. " "Текущий уровень - %(cur)s, должен быть %(min)s." msgid "The IP Address was not found." msgstr "Не найден IP-адрес." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "Запрос WebDAV не выполнен. Причина: %(msg)s, Код возврата/причина: %(code)s, " "Исходный том: %(src)s, Целевой том: %(dst)s, Метод: %(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "Приведенная выше ошибка могла показать, что база данных не была создана.\n" "Перед выполнением этой команды создайте базу данных с помощью команды " "'cinder-manage db sync'." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "Массив не поддерживает параметр пула памяти для SLO %(slo)s и нагрузки " "%(workload)s. Проверьте правильность настройки SLO и нагрузок для массива." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "Репликация не включена в базовой системе, где создан том." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Команда %(cmd)s не выполнена. (код возврата: %(ret)s, stdout: %(out)s, " "stderr: %(err)s)" msgid "The copy should be primary or secondary" msgstr "Копия должна быть первичной или вторичной" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "Не удалось создать логическое устройство. (LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "Метод с декоратором должен принимать либо том, либо объект моментальной копии" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "Устройство в пути %(path)s недоступно: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "Конечное время (%(end)s) должно указывать на время после начального времени " "(%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "Значение extra_spec %s недопустимо." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Недопустимая дополнительная спецификация %(extraspec)s." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "Нельзя удалить том, для которого выполнено переключение после сбоя: %s" #, python-format msgid "The following elements are required: %s" msgstr "Требуются следующие элементы: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Для следующих переносов требуется понижение версии, что недопустимо: \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "Не удалось добавить группу хостов или целевое расположение iSCSI." msgid "The host group or iSCSI target was not found." msgstr "Не найдена группа хостов или целевое расположение iSCSI." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "Хост не готов к обратному переключению. Выполните повторную синхронизацию " "томов и возобновите репликацию в базовых системах 3PAR." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "Хост не готов к обратному переключению. Выполните повторную синхронизацию " "томов и возобновите репликацию в базовых системах LeftHand." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "Хост не готов к обратному переключению. Выполните повторную синхронизацию " "томов и возобновите репликацию в базовых системах Storwize." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "Пользователь %(user)s CHAP iSCSI не существует." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "Импортированный LUN %(lun_id)s находится в пуле %(lun_pool)s, который не " "управляется хостом %(host)s." msgid "The key cannot be None." msgstr "Ключ не может быть None." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "Логическое устройство для указанного %(type)s %(id)s уже удалено." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "Истек тайм-аут у метода %(method)s. (значение тайм-аута: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "Метод update_migrated_volume не реализован." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "Путь монтирования %(mount_path)s не является допустимым томом USP Quobyte. " "Ошибка: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "Параметр базовой системы хранения. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "" "Родительская резервная копия должна быть доступна для создания дополняющей " "резервной копии." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "Моментальная копия '%s' не является моментальной копией данного тома." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "Ссылка на том в базовой системе должна указываться в формате: файловая-" "система/имя-тома. Имя тома не может содержать символ '/'" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "Режим репликации настроен неверно в параметрах extra_specs для типа тома. " "Если replication:mode задан как periodic, то необходимо указать replication:" "sync_period в пределах от 300 до 31622400 секунд." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "Минимальное значение периода репликации составляет %s секунд." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "Запрошенный размер %(requestedSize)s не совпадает с итоговым размером " "%(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "Ресурс %(resource)s не найден." msgid "The results are invalid." msgstr "Неверные результаты." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "Моментальную копию нельзя создать, когда том находится в режиме обслуживания." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "Исходный том %s отсутствует в пуле, управляемом текущим хостом." msgid "The source volume for this WebDAV operation not found." msgstr "Не найден исходный том для этой операции WebDAV." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "Тип исходного тома '%(src)s' отличается от типа целевого тома '%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "Тип исходного тома '%s' не доступен. " #, python-format msgid "The specified %(desc)s is busy." msgstr "Указанное %(desc)s занято." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "LUN не принадлежит данному пулу: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "Не удалось включить управление для заданного ldev %(ldev)s. ldev не должен " "преобразован." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "Не удалось включить управление для заданного ldev %(ldev)s. ldev не должен " "иметь парное устройство." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "Не удалось включить управление для заданного ldev %(ldev)s. Размер ldev " "должен указываться в единицах, кратных гигабайтам." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "Не удалось включить управление для заданного ldev %(ldev)s. Требуемый тип " "тома: DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "Указанная операция не поддерживается. Размер тома должен совпадать с " "исходным %(type)s. (том: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Указанный vdisk связан с хостом." msgid "The specified volume is mapped to a host." msgstr "Указанный том связан с хостом." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "Неверный пароль массива хранения для %s. Обновите пароль в конфигурации." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "Базовая система хранения пригодна для использования. (config_group: " "%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "Носитель не поддерживает %(prot)s. Настройте поддержку %(prot)s в устройстве " "или переключитесь на драйвер с другим протоколом." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "Счетчик чередующихся метаданных %(memberCount)s слишком мал для тома " "%(volumeName)s с размером %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "Тип метаданных %(metadata_type)s для тома/моментальной копии %(id)s " "недопустимый." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "Том %(volume_id)s не удалось расширить. Тип тома должен быть Normal." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "Не удалось отменить управление для тома %(volume_id)s. Требуемый тип тома: " "%(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "Управление тома %(volume_id)s выполняется успешно. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "Управление томом %(volume_id)s отменено успешно. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Связываемый том %(volume_id)s не найден." msgid "The volume cannot accept transfer in maintenance mode." msgstr "Том не может принять передачу в режиме обслуживания." msgid "The volume cannot be attached in maintenance mode." msgstr "Том невозможно подключить в режиме обслуживания." msgid "The volume cannot be detached in maintenance mode." msgstr "Том невозможно отключить в режиме обслуживания." msgid "The volume cannot be updated during maintenance." msgstr "Том невозможно обновить во время обслуживания." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "Невозможно инициализировать соединение с томом в режиме обслуживания." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Для драйвера тома требуется имя инициатора iSCSI в коннекторе." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Том в настоящее время занят в 3PAR и не может быть удален. Вы можете " "повторить попытку позже." msgid "The volume label is required as input." msgstr "Требуется метка тома во входных данных." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Метаданные тома нельзя удалить, когда том находится в режиме обслуживания." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Метаданные тома нельзя обновить, когда том находится в режиме обслуживания." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "Нет доступных ресурсов. (ресурс: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Нет допустимых хостов ESX." #, python-format msgid "There are no valid datastores attached to %s." msgstr "Нет допустимых хранилищ данных, подключенных к %s." msgid "There are no valid datastores." msgstr "Нет допустимых хранилищ данных." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Отсутствует обозначение %(param)s. Заданное хранилище требуется для " "управления томом." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Отсутствует обозначение для ldev. Указанное ldev требуется для управления " "томом." msgid "There is no metadata in DB object." msgstr "Нет метаданных в объекте базы данных." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "Отсутствуют общие ресурсы, которые могут управлять %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "Нет общих ресурсов, способных вместить %(volume_size)s ГБ." #, python-format msgid "There is no such action: %s" msgstr "Не существует такого действия: %s" msgid "There is no virtual disk device." msgstr "Нет устройства виртуального диска." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "Ошибка при добавлении тома в группу удаленного копирования: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "При создании cgsnapshot возникла ошибка: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "Ошибка при создании группы удаленного копирования: %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Ошибка при настройке периода синхронизации для группы удаленного " "копирования: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Ошибка при настройке группы удаленного копирования в массивах 3PAR ('%s'). " "Неизвестный тип репликации для тома." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Ошибка при настройке группы удаленного расписания в массивах LeftHand " "('%s'). Неизвестный тип репликации для тома." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Ошибка при запуске удаленного копирования: %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Отсутствует настроенный файл конфигурации Gluster (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Отсутствует настроенный файл конфигурации NFS (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Нет настроенного тома Quobyte (%s). Пример: quobyte:///<имя-тома>" msgid "Thin provisioning not supported on this version of LVM." msgstr "Оперативное выделение ресурсов не поддерживается в этой версии LVM." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "Программа включения оперативного выделения ресурсов не установлена. Создать " "том с оперативным выделением ресурсов невозможно" msgid "This driver does not support deleting in-use snapshots." msgstr "Этот драйвер не поддерживает удаление используемых моментальных копий." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Этот драйвер не поддерживает создание моментальных копий используемых томов." msgid "This request was rate-limited." msgstr "Этот запрос ограничен по частоте." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Платформа %s не поддерживается. Этот драйвер поддерживает только платформы " "Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "Служба стратегии слоев для %(storageSystemName)s не найдена." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Истек тайм-аут ожидания обновления Nova для создания моментальной копии %s." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Истек тайм-аут ожидания обновления Nova для удаления моментальной копии " "%(id)s." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Значение тайм-аута (в секундах), используемое при подключении к кластеру " "ceph. Если оно отрицательно, то тайм-аут не задан и используется значение " "librados по умолчанию." #, python-format msgid "Timeout while calling %s " msgstr "Тайм-аут при вызове %s " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Тайм-аут во время запроса API %(service)s." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "Тайм-аут во время функциональности от базовой системы %(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Не удалось найти передачу %(transfer_id)s." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Передача %(transfer_id)s: ИД тома %(volume_id)s в непредвиденном состоянии " "%(status)s, ожидалось состояние Ожидание-передачи" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Попытка импортировать метаданные резервной копии из ИД %(meta_id)s в " "резервную копию %(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Выполнение задачи тонкой настройки тома преждевременно прервалось: том " "%(volume_name)s, состояние задачи %(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "Тип %(type_id)s уже связан с другими спецификациями QoS: %(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "Изменение типа доступа неприменимо к типу общедоступного тома." msgid "Type cannot be converted into NaElement." msgstr "Тип невозможно преобразовать в NaElement." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" "UUID %s находятся и в списке добавления томов, и в списке удаления томов." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Не удаётся получить доступ к базовой системе Storwize для тома %s." msgid "Unable to access the backend storage via file handle." msgstr "Нет доступа к памяти базовой системы через ссылку на файл." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" "Не удалось получить доступ к хранилищу непереданных сообщений с помощью пути " "%(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "Не удалось добавить хост Cinder в apphosts для пространства %(space)s" #, python-format msgid "Unable to complete failover of %s." msgstr "Не удается выполнить переключение после сбоя %s." msgid "Unable to connect or find connection to host" msgstr "Не удалось подключиться к хосту или найти соединение с ним" msgid "Unable to create Barbican Client without project_id." msgstr "Невозможно создать Barbican Client без project_id." #, python-format msgid "Unable to create consistency group %s" msgstr "Не удалось создать группу согласования %s" msgid "Unable to create lock. Coordination backend not started." msgstr "" "Не удается создать блокировку. Базовая система координации не запущена." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Создать или получить группу носителей по умолчанию для стратегии FAST " "%(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Не удалось создать реплику дубликата для тома %s." #, python-format msgid "Unable to create the relationship for %s." msgstr "Не удаётся создать взаимосвязь для %s." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "Не удается создать том %(name)s из %(snap)s." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "Не удается создать том %(name)s из %(vol)s." #, python-format msgid "Unable to create volume %s" msgstr "Не удается создать том %s" msgid "Unable to create volume. Backend down." msgstr "Не удается создать том. Выключена базовая система." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "Не удалось удалить моментальную копию группы согласования %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "Не удалось удалить моментальную копию %(id)s, состояние: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "Не удалось удалить стратегию моментальной копии на томе %s." #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "Не удаётся удалить целевой том для %(vol)s. Исключительная ситуация: %(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Отсоединить том невозможно. Для отсоединения состояние должно быть " "'используется', а attach_status должен быть 'подсоединен'." #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "Не удается определить secondary_array из параметра: %(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Не удалось определить имя моментальной копии в Purity для моментальной копии " "%(id)s." msgid "Unable to determine system id." msgstr "Не удалось определить ИД системы." msgid "Unable to determine system name." msgstr "Не удалось определить имя системы." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Невозможно выполнять операции управления моментальной копией через API REST " "Purity версии %(api_version)s, требуется версия %(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Невозможно выполнить репликацию с версией API %(api_version)s Purity REST, " "требуется одна из версий %(required_versions)s." msgid "Unable to enable replication and snapcopy at the same time." msgstr "" "Нельзя использовать репликацию и моментальное копирование одновременно." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Не удаётся установить партнёрство с кластером Storwize %s." #, python-format msgid "Unable to extend volume %s" msgstr "Не удалось расширить том %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "Не удаётся переключить том %(id)s на вторичную базовую систему, так как " "невозможно переключить взаимосвязь репликации: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "Не удается восстановить состояние \"default\", это возможно только по " "завершении переключения после сбоя." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" "Не удается переключиться после сбоя на целевой объект репликации: " "%(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "Не удалось получить информацию о соединении из базовой программы." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" "Не удалось получить информацию о соединении из базовой программы: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "Не удалось найти ссылку на Purity с именем=%s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Не удалось найти группу томов %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "Не найден целевой объект переключения в случае сбоя, не настроены " "вспомогательные целевые объекты." msgid "Unable to find iSCSI mappings." msgstr "Не найдены связи iSCSI." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "Не найден ssh_hosts_key_file: %s" msgid "Unable to find system log file!" msgstr "Не найден файл системного протокола!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "Не найдена пригодная моментальная копия группы защиты для переключения в " "случае сбоя в выбранном вспомогательном массиве: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "Среди настроенных целевых объектов не найден пригодный вспомогательный " "массив: %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "Не найден том %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Получить блокирующее устройство для файла '%s' невозможно" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Извлечь информацию о конфигурации, необходимую для создания тома, " "невозможно: %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Не удалось получить соответствующую запись для пула." #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Не удалось получить информацию о пространстве %(space)s. Убедитесь, что " "кластер работает и подключен." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Не удалось получить список IP-адресов на этом хосте. Проверьте права доступа " "и работу сети." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Не удалось получить список элементов домена. Убедитесь, что кластер работает." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Не удалось получить список пространств для создания нового имени. Убедитесь, " "что кластер работает." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Не удалось получить статистику для backend_name: %s" msgid "Unable to get storage volume from job." msgstr "Не удается получить том носителя из задания." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Не удалось получить целевые конечные точки для ИД аппаратного обеспечения " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "Не удается получить имя маскирующего представления." msgid "Unable to get the name of the portgroup." msgstr "Не удается получить имя группы портов." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "Не удаётся получить взаимосвязь репликации для тома %s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Не удалось импортировать том %(deviceId)s в Cinder. Это исходный том сеанса " "репликации %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Не удалось импортировать том %(deviceId)s в Cinder. Внешний том не в пуле, " "управляемом текущим хостом Cinder." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Не удалось импортировать том %(deviceId)s в Cinder. Том находится в " "маскирующем представлении %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "Не удалось загрузить CA из %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Не удалось загрузить сертификат из %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Не удалось загрузить ключ из %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" "Не удалось найти учетную запись %(account_name)s на устройстве Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "Не удалось найти SVM, управляющую IP-адресом '%s'" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "Не найдены указанные профили повтора %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Невозможно управлять существующим томом. Том %(volume_ref)s уже находится " "под управлением." #, python-format msgid "Unable to manage volume %s" msgstr "Невозможно управлять томом %s" msgid "Unable to map volume" msgstr "Не удалось преобразовать том" msgid "Unable to map volume." msgstr "Не удалось связать том." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "Выполнить синтаксический анализ запроса XML невозможно. Укажите XML в " "правильном формате." msgid "Unable to parse attributes." msgstr "Не удалось проанализировать атрибуты." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "Не удалось сделать копию главной для тома %s. Вспомогательной копии нет." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Нельзя повторно использовать хост, который не управляется Cinder с " "параметром use_chap_auth=True," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Нельзя повторно использовать хост с неизвестными идентификационными данными " "CHAP." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Не удалось переименовать том %(existing)s в %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "Не удалось получить группу моментальных копий с ИД %s." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "Не удается изменить тип %(specname)s, ожидалось получение текущих и " "запрошенных значений %(spectype)s. Полученное значение: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Изменить тип невозможно: существует копия тома %s. Изменение типа приведет к " "превышению ограничения в 2 копии." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Не удалось изменить тип: для текущего действия требуется том-копия, что не " "разрешено, если в качестве нового типа указана репликация. Том: %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "Не удаётся настроить зеркальную репликацию для %(vol)s. Исключительная " "ситуация: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Не удалось создать моментальную копию группы согласования %s" msgid "Unable to terminate volume connection from backend." msgstr "Не удалось разорвать соединение с томом из базовой программы." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Не удалось закрыть соединение с томом: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Не удалось изменить группу согласования %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Не удалось изменить тип из-за неверного состояния %(vol_status)s у тома " "%(vol_id)s. Состояние тома должно быть available или in-use." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "Проверить группу инициатора %(igGroupName)s в маскирующем представлении " "%(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Недопустимые параметры." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Непредвиденное состояние %(status)s для связывания %(id)s. Атрибуты: " "%(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Непредвиденный ответ интерфейса командной строки: несоответствие заголовка/" "строки. Заголовок: %(header)s, строка: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Непредвиденное состояние %(status)s для связывания %(id)s. Атрибуты: " "%(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "Непредвиденный вывод. Вместо ожидаемого [%(expected)s] получен [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "Непредвиденный ответ от API Nimble" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Непредвиденный ответ от Tegile IntelliFlash API" msgid "Unexpected status code" msgstr "Непредвиденный код состояния" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Непредвиденный код состояния коммутатора %(switch_id)s с протоколом " "%(protocol)s для url %(page)s. Ошибка: %(error)s" msgid "Unknown Gluster exception" msgstr "Неизвестная исключительная ситуация Gluster" msgid "Unknown NFS exception" msgstr "Неизвестная исключительная ситуация NFS" msgid "Unknown RemoteFS exception" msgstr "Неизвестная исключительная ситуация в RemoteFS" msgid "Unknown SMBFS exception." msgstr "Неизвестная исключительная ситуация в SMBFS." msgid "Unknown Virtuozzo Storage exception" msgstr "Неизвестная исключительная ситуация Virtuozzo Storage" msgid "Unknown action" msgstr "Неизвестное действие" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Не удаётся установить, передан ли уже том %s под управление Cinder. Передача " "под управление прервана. Добавьте пользовательское свойство схемы " "'cinder_managed' для тома и присвойте ему значение False. Для устранения " "этого ограничения также можно присвоить свойству 'zfssa_manage_policy' " "значение 'loose'." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Не удаётся установить, передан ли уже том %s под управление Cinder. Передача " "под управление прервана. Добавьте пользовательское свойство схемы " "'cinder_managed' для тома и присвойте ему значение False. Для устранения " "этого ограничения также можно присвоить свойству 'zfssa_manage_policy' " "значение 'loose'." #, python-format msgid "Unknown operation %s." msgstr "Неизвестная операция %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Неизвестная или неподдерживаемая команда %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Неизвестный протокол: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Неизвестные ресурсы квоты: %(unknown)s." msgid "Unknown service" msgstr "Неизвестная служба" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Неизвестное направление сортировки. Оно должно быть 'desc' или 'asc'." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "Опции отмены управления и каскадного удаления несовместимы." msgid "Unmanage volume not implemented." msgstr "Отмена управления томом не реализована." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "Отмена управления моментальными копиями томами, переключенными после сбоя, " "не разрешена." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "Отмена управления моментальными копиями томами, переключенными после сбоя, " "не разрешена." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Нераспознанное ключевое слово QoS: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Нераспознанный формат базового файл: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Нераспознанное значение read_deleted '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "Сбросить параметры gcs: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "Ошибка iscsiadm. Исключительная ситуация: %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Неподдерживаемая версия ONTAP кластерных данных." msgid "Unsupported Content-Type" msgstr "Не поддерживаемый тип содержимого" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Неподдерживаемая версия ONTAP данных. Поддерживается версия ONTAP данных " "7.3.1 и выше." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Неподдерживаемая версия метаданных резервной копии (%s)" msgid "Unsupported backup metadata version requested" msgstr "Запрошена неподдерживаемая версия метаданных резервной копии" msgid "Unsupported backup verify driver" msgstr "Неподдерживаемый драйвер проверки резервной копии" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Неподдерживаемое встроенное ПО на коммутаторе %s. Убедитесь, что на " "коммутаторе работает встроенное ПО версии 6.4 или выше" #, python-format msgid "Unsupported volume format: %s " msgstr "Неподдерживаемый формат тома: %s " msgid "Update QoS policy error." msgstr "Ошибка изменения стратегии QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Операции изменения и удаления квоты разрешены только для администратора " "непосредственного родительского объекта и для администратора облака." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Операции изменения и удаления квоты разрешены только для проектов в той " "жеиерархии проекта, где находятся пользователи." msgid "Update list, doesn't include volume_id" msgstr "Список обновления не включает volume_id" msgid "Updated At" msgstr "Обновлено" msgid "Upload to glance of attached volume is not supported." msgstr "Передача в glance подключенного тома не поддерживается." msgid "Use ALUA to associate initiator to host error." msgstr "Ошибка использования ALUA для связывания инициатора с хостом." msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Ошибка использования CHAP для связывания инициатора с хостом. Проверьте имя " "пользователя и пароль CHAP." msgid "User ID" msgstr "ID пользователя" msgid "User does not have admin privileges" msgstr "Пользователь не имеет административных привилегий" msgid "User is not authorized to use key manager." msgstr "У пользователя нет прав на использование Администратора ключей." msgid "User not authorized to perform WebDAV operations." msgstr "У пользователя нет прав доступа для выполнения операций WebDAV." msgid "UserName is not configured." msgstr "UserName не настроен." msgid "UserPassword is not configured." msgstr "UserPassword не настроен." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "Откат V2: том не в группе носителей по умолчанию." msgid "V2 rollback, volume is not in any storage group." msgstr "Откат V2: том не принадлежит ни одной группе носителей." msgid "V3 rollback" msgstr "Откат V3" msgid "VF is not enabled." msgstr "VF не включен." #, python-format msgid "VV Set %s does not exist." msgstr "Набор VV %s не существует." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Допустимый приемник спецификаций QoS: %s" #, python-format msgid "Valid control location are: %s" msgstr "Допустимое расположение управления: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Сбой проверки соединения с томом (ошибка: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "Значение \"%(value)s\" недопустимо для параметра конфигурации \"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "Значение %(param)s для %(param_string)s не булевское." msgid "Value required for 'scality_sofs_config'" msgstr "Требуется значение для 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "Vdisk %(name)s не участвует в связи %(src)s -> %(tgt)s." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Версия %(req_ver)s не поддерживается в API. Минимальная требуемая версия: " "%(min_ver)s, максимальная: %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s не удается получить объект по ИД." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s не поддерживает условное обновление." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Виртуальный том '%s' не существует в массиве." #, python-format msgid "Vol copy job for dest %s failed." msgstr "Не удалось выполнить задание копирования тома для целевого тома %s." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Том %(deviceID)s не найден." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Том %(name)s не найден в массиве. Не удается определить, преобразованы ли " "тома." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "Том %(name)s создан в VNX, но в состоянии %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Не удалось создать том %(vol)s в пуле %(pool)s." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "Том %(vol1)s не соответствует snapshot.volume_id %(vol2)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Состояние %(vol_id)s тома должно быть available или in-use, однако текущее " "состояние - %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "Для расширения состояние тома %(vol_id)s должно быть available, однако " "текущее состояние - %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "Том %(vol_id)s должен находиться в состоянии available, чтобы можно было " "изменить флаг readonly, однако текущее состояние - %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "Состояние %(vol_id)s тома должно быть available, однако текущее состояние - " "%(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Том %(volume_id)s не найден." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "У тома %(volume_id)s нет метаданных администрирования с ключом " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "Том %(volume_id)s связан с неподдерживаемой группой хостов %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "Том %(volume_id)s не связан с хостом %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "Том %(volume_id)s все еще присоединен, сначала отсоедините его." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Ошибка репликации тома %(volume_id)s: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Том %(volume_name)s занят." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Не удалось создать том %s из исходного тома." #, python-format msgid "Volume %s could not be created on shares." msgstr "Не удалось создать том %s в общих ресурсах." #, python-format msgid "Volume %s could not be created." msgstr "Не удалось создать том %s." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "Том %s не существует в Nexenta SA" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "Том %s не существует в устройстве Nexenta Store" #, python-format msgid "Volume %s does not exist on the array." msgstr "Том %s не существует в массиве." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "Для тома %s не указан параметр provider_location - пропущено." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Том %s не существует в массиве." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "Том %s не существует в базовой системе ZFSSA." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "Том %s уже находится под управлением OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Том %s уже часть активной операции переноса." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "Тип тома %s не поддерживает репликацию. Для поддержки репликации в " "параметрах типа тома необходимо указать replication_enabled со значением " "' True'." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "Том %s включен. Выключите его, чтобы передать под управление OpenStack." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "Том %s не должен быть в процессе переноса или присоединен, не должен " "принадлежать группе согласования или иметь моментальные копии." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Том %s не должен быть частью группы согласования." #, python-format msgid "Volume %s must not be replicated." msgstr "Репликация тома %s запрещена." #, python-format msgid "Volume %s must not have snapshots." msgstr "Том %s не должен иметь моментальных копий." #, python-format msgid "Volume %s not found." msgstr "Том %s не найден." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Том %s: ошибка расширения тома" #, python-format msgid "Volume (%s) already exists on array" msgstr "Том (%s) уже существует в массиве" #, python-format msgid "Volume (%s) already exists on array." msgstr "Том (%s) уже есть в массиве." #, python-format msgid "Volume Group %s does not exist" msgstr "Группа томов %s не существует" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Тип тома %(id)s уже существует." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Тип тома %(type_id)s не имеет дополнительных спецификаций с ключом %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "Удаление типа тома %(volume_type_id)s запрещено, если есть тома с таким " "типом." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом " "%(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "ИД типа тома не должен быть None." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Том [%(cb_vol)s] не найден в носителе CloudByte, соответствующем тому " "OpenStack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "Том [%s] не найден в хранилище CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "Подключение тома не удалось найти с фильтром %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "Недопустимая конфигурация базовой программы тома: %(reason)s" msgid "Volume by this name already exists" msgstr "Том с таким именем уже существует" msgid "Volume cannot be restored since it contains snapshots." msgstr "Том невозможно восстановить, поскольку он содержит моментальные копии." msgid "Volume create failed while extracting volume ref." msgstr "Ошибка создания тома при извлечении ссылки на том." #, python-format msgid "Volume device file path %s does not exist." msgstr "Путь к файлу устройства тома %s не существует." #, python-format msgid "Volume device not found at %(device)s." msgstr "Не найдено устройство тома в %(device)s." #, python-format msgid "Volume driver %s not initialized." msgstr "Драйвер тома %s не инициализирован." msgid "Volume driver not ready." msgstr "Драйвер тома не готов." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Драйвер тома выдал ошибку: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "У тома есть временная моментальная копия, которую невозможно удалить в " "данный момент." msgid "Volume has children and cannot be deleted!" msgstr "Невозможно удалить том, у которого есть дочерние объекты." #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "Том в группе согласования %s подключен. Его необходимо сначала отключить." msgid "Volume in consistency group still has dependent snapshots." msgstr "Том в группе согласования еще имеет зависимые моментальные копии." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "Том подключен к серверу. (%s)" msgid "Volume is in-use." msgstr "Том используется." msgid "Volume is not available." msgstr "Том недоступен." msgid "Volume is not local to this node" msgstr "Том не является локальным для этого узла" msgid "Volume is not local to this node." msgstr "Том не является локальным для этого узла." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Запрошено резервное копирование метаданных тома, но этот драйвер еще не " "поддерживает эту функцию." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Не удалось выполнить перенос тома: %(reason)s" msgid "Volume must be available" msgstr "Том должен быть доступен" msgid "Volume must be in the same availability zone as the snapshot" msgstr "Том должен находиться в одной зоне доступности с моментальной копией" msgid "Volume must be in the same availability zone as the source volume" msgstr "Том должен находиться в одной зоне доступности с исходным томом" msgid "Volume must have a volume type" msgstr "Должен быть задан тип тома" msgid "Volume must not be part of a consistency group." msgstr "Том не должен быть частью группы согласования." msgid "Volume must not be replicated." msgstr "Репликация тома запрещена." msgid "Volume must not have snapshots." msgstr "Том не должен иметь моментальных копий." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Не найден том для копии %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "Том не найден в настроенной базовой системе хранения." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Том не найден в настроенной базовой системе хранения. Если имя тома содержит " "\"/\", переименуйте его и повторите операцию." msgid "Volume not found on configured storage pools." msgstr "Не найден том в настроенных пулах памяти." msgid "Volume not found." msgstr "Том не найден." msgid "Volume not unique." msgstr "Том не является уникальным." msgid "Volume not yet assigned to host." msgstr "Том не связан с хостом." msgid "Volume reference must contain source-name element." msgstr "Ссылка на том должна содержать элемент source-name." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "Копия тома %(volume_id)s не найдена." #, python-format msgid "Volume service %s failed to start." msgstr "Не удалось запустить службу томов %s." msgid "Volume should have agent-type set as None." msgstr "В параметре agent-type тома должно быть указано значение None." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "Размер тома (%(volume_size)s ГБ) не может быть меньше minDisk образа " "(%(min_disk)s ГБ)." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "Размер тома '%(size)s' должен быть целым числом, превышающим 0" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "Размер тома ('%(size)s' ГБ) не может быть меньше размера исходного тома " "(%(source_size)s ГБ). Размер тома должен быть не меньше размера исходного " "тома." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "Размер тома ('%(size)s' ГБ) не может быть меньше размера моментальной копии " "(%(snap_size)s ГБ). Размер тома должен быть не меньше размера исходной " "моментальной копии." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "Размер тома увеличился с момента последнего резервного копирования. " "Необходимо полное резервное копирование." msgid "Volume size must be a multiple of 1 GB." msgstr "Размер тома должен быть кратным 1 ГБ." msgid "Volume size must be multiple of 1 GB." msgstr "Размер тома должен быть кратным 1 ГБ." msgid "Volume size must multiple of 1 GB." msgstr "Размер тома должен быть кратен 1 ГБ." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "Состояние тома должно быть Доступен, но текущее состояние - %s" msgid "Volume status is in-use." msgstr "Состояние тома: используется" #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "Состояние тома должно быть available или in-use для моментальной копии. " "(Фактическое состояние: %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" "Состояние тома должно быть available или in-use для моментальной копии." #, python-format msgid "Volume status must be %s to reserve." msgstr "Для резервирования том должен находиться в состоянии %s." msgid "Volume status must be 'available'." msgstr "Состояние тома должно быть available." msgid "Volume to Initiator Group mapping already exists" msgstr "Связь тома с группой инициаторов уже существует" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "Том для создания резервной копии должен быть доступным или используемым, но " "текущее состояние - \"%s\"." msgid "Volume to be restored to must be available" msgstr "Том для восстановления должен быть доступен" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Тип тома %(volume_type_id)s не может быть найден." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "ИД типа тома '%s' недопустим." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "Права доступа к типу тома для комбинации %(volume_type_id)s / %(project_id)s " "уже существуют." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "Не найдены права доступа к комбинации типа тома %(volume_type_id)s / " "%(project_id)s ." #, python-format msgid "Volume type does not match for share %s." msgstr "Тип тома не совпадает с типом для %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "Шифрование типа тома для типа %(type_id)s уже существует." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "Шифрование типа тома для типа %(type_id)s не существует." msgid "Volume type name can not be empty." msgstr "Имя типа тома не должно быть пустым." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." #, python-format msgid "Volume with volume id %s does not exist." msgstr "Том с ИД тома %s не существует." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Том: %(volumeName)s не является объединенным томом. Выполнить Extend можно " "только над объединенным томом. Выполняется выход..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "Том %(volumeName)s не был добавлен в группу носителей %(sgGroupName)s. " #, python-format msgid "Volume: %s could not be found." msgstr "Не найден том %s." #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Том %s уже находится под управлением Cinder." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "Тома будут разбиты на блоки этого размера (в мегабайтах)." msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" "Число томов превышено и для основной, и для вторичной учетной записи " "SolidFire." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "Конфигурация VzStorage 'vzstorage_used_ratio' недопустима. Значение должно " "быть больше 0 и не больше 1,0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "Файл конфигурации VzStorage в %(config)s не существует." msgid "Wait replica complete timeout." msgstr "Ожидание тайм-аута завершения репликации." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Ошибка ожидания синхронизации. Состояние выполнения: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "Ожидание добавления всех узлов в кластер. Убедитесь, что работают все демоны " "sheep." msgid "We should not do switch over on primary array." msgstr "Переключение для основного массива не разрешено." msgid "Wrong resource call syntax" msgstr "Синтаксическая ошибка вызова ресурса" msgid "X-IO Volume Driver exception!" msgstr "Исключительная ситуация драйвера тома X-IO!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "Поддержка XML устарела и будет удалена в выпуске N. " msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "Неправильно настроен XtremIO: не найдены порталы iSCSI" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO неправильно инициализирован, кластеры не найдены" msgid "You must implement __call__" msgstr "Отсутствует реализация __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Необходимо установить hpe3parclient, прежде чем использовать драйверы 3PAR. " "Выполните команду \"pip install --install python-3parclient\" для установки " "hpe3parclient." msgid "You must supply an array in your EMC configuration file." msgstr "Необходимо указать массив в файле конфигурации EMC." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Исходный размер, %(originalVolumeSize)s ГБ, превышает %(newSize)s ГБ. " "Поддерживается только Extend. Выполняется выход..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Зона" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Стратегия распределения по зонам %s не распознана" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data: не удалось получить атрибуты для vdisk %s." msgid "_create_host failed to return the host name." msgstr "Функции _create_host не удалось вернуть имя хоста." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: Не удалось преобразовать имя хоста. Имя хоста не типа unicode " "или string." msgid "_create_host: No connector ports." msgstr "_create_host: отсутствуют порты коннекторов." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume, служба репликации не найдена." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, имя тома: %(volumename)s, имя исходного тома: " "%(sourcevolumename)s, экземпляр исходного тома: %(source_volume)s, экземпляр " "целевого тома: %(target_volume)s, код возврата: %(rc)lu, ошибка: " "%(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - в выводе CLI не найдено сообщение об успехе.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, id_code равен None." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, не найдена служба репликации" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, не определен тип сеанса копировния, сеанс копирования: " "%(cpsession)s, тип копирования: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, сеанс копирования: %(cpsession)s, операция: " "%(operation)s, код возврата: %(rc)lu, ошибка: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, имя тома: %(volumename)s, код возврата: %(rc)lu, ошибка: " "%(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, имя тома: %(volumename)s, служба настройки хранилища не " "найдена." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, имя класса: %(classname)s, InvokeMethod, не удается " "подключиться к ETERNUS." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: расширение тома с моментальными копиями не поддерживается." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, коннектор: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, не удается подключиться к ETERNUS." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, коннектор: %(connector)s, EnumerateInstanceNames, не " "удается подключиться к ETERNUS." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, коннектор: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, не удается подключиться к ETERNUS." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, экземпляр тома: %(vol_instance_path)s, не " "удается подключиться к ETERNUS." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, имя класса: %(classname)s, EnumerateInstanceNames, не " "удается подключиться к ETERNUS." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "_find_initiator_names, коннектор: %(connector)s, инициатор не найден." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, имя тома: %(volumename)s, EnumerateInstanceNames, не удается " "подключиться к ETERNUS." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, пул eternus:%(eternus_pool)s, EnumerateInstances, не удается " "подключиться к ETERNUS." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, имя файла: %(filename)s, имя тега: %(tagname)s, пустые данные. " "Исправьте ошибку в файле конфигурации драйвера." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, имя файла: %(filename)s, ip: %(ip)s, порт: " "%(port)s, пользователь: %(user)s, пароль: ****, url: %(url)s, ошибка." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn не найден." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, не удается подключиться к ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "не удается подключиться к ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, не удается " "подключиться к ETERNUS." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: заголовки и значения атрибутов не совпадают.\n" " Заголовки: %(header)s\n" " Значения: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "Функции _get_host_from_connector не удалось вернуть имя хоста для коннектора." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, ошибка получения host-affinity из aglist/vol_instance, " "группа привязки: %(ag)s, ReferenceNames, не удается подключиться к ETERNUS." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, ошибка получения экземпляра host-affinity, volmap: " "%(volmap)s, GetInstance, не удается подключиться к ETERNUS." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, не удается " "подключиться к ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, группа привязки: %(ag)s, ReferenceNames, не удается " "подключиться к ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, экземпляр тома: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, не удается подключиться к ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, не удается подключиться " "к ETERNUS." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances, не удается подключиться к ETERNUS." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port, протокол: %(protocol)s, целевой порт не найден." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay: не найдена моментальная копия %s" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: не найден том с ИД %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: необходимо указать source-name." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: не удалось получить информацию о соединении FC " "для соединения хост-том. Правильно ли настроен хост для соединений FC?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: не найдены узлы в группе ввода-вывода %(gid)s для " "тома %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, имя тома: %(volumename)s, uid тома: " "%(uid)s, инициатор: %(initiator)s, цель: %(tgt)s, aglist: %(aglist)s, служба " "настройки хранилища не найдена." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, имя тома: %(volumename)s, uid " "тома: %(uid)s, aglist: %(aglist)s, служба настройки контроллера не найдена." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, имя тома: %(volumename)s, uid тома: %(volume_uid)s, " "AffinityGroup: %(ag)s, код возврата: %(rc)lu, ошибка: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, не удается подключиться к ETERNUS." msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats: не удалось получить данные пула памяти." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s, состояние copysession: " "BROKEN." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "Операция add_vdisk_copy не выполнена: существует копия тома %s. Добавление " "другой копии приведет к превышению ограничения в 2 копии." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "Функция add_vdisk_copy начала выполнение без копии vdisk в ожидаемом пуле." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "Значение all_tenants должно быть булевским. Получено: %s." msgid "already created" msgstr "уже создан" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "подключить моментальную копию с удаленного узла" #, python-format msgid "attribute %s not lazy-loadable" msgstr "атрибут %s не с отложенной загрузкой" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "При создании резервной копии %(vol_id)s не удалось создать жесткую ссылку на " "устройство с %(vpath)s на %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "При создании резервной копии %(vol_id)s не удалось получить уведомление об " "успешности от сервера.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Во время выполнения резервного копирования %(vol_id)s не удалось выполнить " "команду dsmc: недопустимые аргументы в %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Во время резервного копирования %(vol_id)s не удалось выполнить команду dsmc " "в %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "Сбой создания резервной копии %(vol_id)s. %(path)s не файл." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "Сбой создания резервной копии %(vol_id)s. %(path)s - неподдерживаемый тип " "файла. Поддерживаются блоковые и обычные файлы. Фактический режим файла: " "%(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "Сбой создания резервной копии %(vol_id)s. Не удалось получить фактический " "путь к тому в %(path)s." msgid "being attached by different mode" msgstr "подключается с другим режимом" #, python-format msgid "call failed: %r" msgstr "вызов не выполнен: %r" msgid "call failed: GARBAGE_ARGS" msgstr "вызов не выполнен: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "вызов не выполнен: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "вызов не выполнен: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "вызов не выполнен: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "не найден элемент lun-map, группа инициаторов: %(ig)s, том: %(vol)s" msgid "can't find the volume to extend" msgstr "не найден том для расширения" msgid "can't handle both name and index in req" msgstr "нельзя обработать и имя и индекс в запросе" msgid "cannot understand JSON" msgstr "невозможно понять JSON" msgid "cannot understand XML" msgstr "Не удается проанализировать XML" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "cgsnapshot присвоен" msgid "cgsnapshot changed" msgstr "cgsnapshot изменен" msgid "cgsnapshots assigned" msgstr "cgsnapshots присвоены" msgid "cgsnapshots changed" msgstr "cgsnapshots изменены" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: для идентификации требуется пароль или личный ключ " "SSH: задайте опцию san_password или san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: не удалось определить ИД системы." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: не удалось определить имя системы." msgid "check_hypermetro_exist error." msgstr "Ошибка check_hypermetro_exist." #, python-format msgid "clone depth exceeds limit of %s" msgstr "Глубина дублирования превышает ограничение (%s)" msgid "consistencygroup assigned" msgstr "consistencygroup присвоена" msgid "consistencygroup changed" msgstr "consistencygroup изменена" msgid "control_location must be defined" msgstr "Должен быть определен параметр control_location" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, том источника не существует в ETERNUS." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, имя экземпляра целевого тома: %(volume_instancename)s, " "ошибка получения экземпляра." msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: различаются исходный и целевой размеры." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: размер исходного тома %(src_vol)s равен %(src_size)d " "ГБ и превышает размер целевого тома %(tgt_vol)s, %(tgt_size)d ГБ." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src: необходимо создавать из моментальной копии " "группы согласования, а не их исходной группы согласования." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src поддерживает только источник cgsnapshot или " "источник группы согласования. Несколько источников использовать нельзя." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src поддерживает только источник cgsnapshot или " "источник группы согласования. Несколько источников использовать нельзя." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: Исходный vdisk %(src)s (%(src_id)s) не существует." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: исходный vdisk %(src)s не существует." msgid "create_host: Host name is not unicode or string." msgstr "create_host: Имя хоста не типа string или unicode." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: Не переданы инициаторы или глобальные имена портов." msgid "create_hypermetro_pair error." msgstr "Ошибка create_hypermetro_pair." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "create_snapshot, пул eternus: %(eternus_pool)s, пул не найден." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, имя моментальной копии: %(snapshotname)s, имя исходного " "тома: %(volumename)s, путь к экземпляру: %(vol_instance)s, имя целевого " "тома: %(d_volumename)s, пул: %(pool)s, код возврата: %(rc)lu, ошибка: " "%(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, имя тома: %(s_volumename)s, исходный том не найден в " "ETERNUS." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, имя тома: %(volumename)s, служба репликации не найдена." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: Требуемое состояние тома для моментальной копии: \"available" "\" или \"in-use\". Недопустимое состояние: %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: не удалось получить исходный том." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, том: %(volume)s, EnumerateInstances, не удается подключиться " "к ETERNUS." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, том: %(volume)s, имя тома: %(volumename)s, пул eternus: " "%(eternus_pool)s, служба настройки хранилища не найдена." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, имя тома: %(volumename)s, имя пула: %(eternus_pool)s, код " "возврата: %(rc)lu, ошибка: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot, том источника не существует в ETERNUS." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, имя экземпляра целевого тома: " "%(volume_instancename)s, ошибка получения экземпляра." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" "create_volume_from_snapshot: Моментальная копия %(name)s не существует." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: Для состояние тома требуется состояние " "моментальной копии \"available\". Недопустимое состояние: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot: различаются исходный и целевой размеры." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: размер тома отличается от размера тома на " "основе моментальной копии." msgid "deduplicated and auto tiering can't be both enabled." msgstr "" "Включить запрет дубликатов и автоматическое разбиение на слои одновременно " "нельзя." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "Во время удаления %(vol_id)s не удалось выполнить команду dsmc: недопустимые " "аргументы с stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Во время удаления %(vol_id)s не удалось выполнить команду dsmc, stdout: " "%(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "Ошибка delete_hypermetro." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s ACL не найден. Выполнение продолжается." msgid "delete_replication error." msgstr "Ошибка delete_replication." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "удаляемый снимок %(snapshot_name)s имеет зависимые тома" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "удаление тома %(volume_name)s, который имеет снимок" msgid "detach snapshot from remote node" msgstr "отключить моментальную копию от удаленного узла" msgid "do_setup: No configured nodes." msgstr "do_setup: Нет настроенных узлов." msgid "element is not a child" msgstr "элемент не является потомком" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "Значение eqlx_cli_max_retries должно быть больше или равно 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "Ошибка записи объекта в swift, MD5 объекта в swift %(etag)s не совпадает с " "MD5 объекта, отправленного в swift %(md5)s" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume, пул eternus: %(eternus_pool)s, пул не найден." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, том: %(volume)s, имя тома: %(volumename)s, пул eternus: " "%(eternus_pool)s, служба настройки хранилища не найдена." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, имя тома: %(volumename)s, код возврата: %(rc)lu, ошибка: " "%(errordesc)s, тип пула: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume, имя тома: %(volumename)s, том не найден." msgid "failed to create new_volume on destination host" msgstr "не удалось создать new_volume на целевом хосте" msgid "fake" msgstr "поддельный" #, python-format msgid "file already exists at %s" msgstr "файл уже существует в %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno не поддерживается SheepdogIOWrapper" msgid "fileno() not supported by RBD()" msgstr "Функция fileno() не поддерживается RBD()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "Файловая система %s не существует в устройстве Nexenta Store" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "Параметру flashsystem_multihostmap_enabled присвоено значение False, не " "разрешать преобразование нескольких хостов. CMMVC6071E Преобразование VDisk-" "в-хост не создано, так как VDisk уже преобразован в хост." msgid "flush() not supported in this version of librbd" msgstr "Функция flush() не поддерживается в этой версии librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s backed by: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "резервная копия fmt=%(fmt)s создана :%(backing_file)s" msgid "force delete" msgstr "принудительно удалить" msgid "get_hyper_domain_id error." msgstr "Ошибка get_hyper_domain_id." msgid "get_hypermetro_by_id error." msgstr "Ошибка get_hypermetro_by_id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: Не удалось получить целевой IP-адрес для инициатора " "%(ini)s. Проверьте файл конфигурации." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: не удалось получить атрибуты для тома %s." msgid "glance_metadata changed" msgstr "glance_metadata изменены" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "Параметру gpfs_images_share_mode присвоено значение copy_on_write, но " "%(vol)s и %(img)s относятся к разным файловым системам." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "Параметру gpfs_images_share_mode присвоено значение copy_on_write, но " "%(vol)s и %(img)s относятся к разным наборам файлов." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s и hgst_user %(usr)s должны быть связаны с пользователями/" "группами в файле cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "Указанное в файле cinder.conf значение hgst_net %(net)s не найдено в кластере" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "Значение hgst_redundancy в файле cinder.conf должно быть 0 (не высокая " "готовность) или 1 (высокая готовность)." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "" "Значение hgst_space_mode в файле cinder.conf должно быть восьмеричным/" "целочисленным" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "Сервер hgst_storage %(svr)s не в формате <хост>:<устройство>" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "Должно быть указано значение hgst_storage_servers в файле cinder.conf" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "Служба http может быть неожиданно отключена или переведена в режим " "обслуживания посреди выполнения операции." msgid "id cannot be None" msgstr "Недопустимое значение для ИД: None" #, python-format msgid "image %s not found" msgstr "не найден образ %s" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection, том: %(volume)s, том не найден." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection: Не удалось получить атрибуты для тома %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection: Отсутствует атрибут тома для тома %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: Не найдены узлы в группе ввода-вывода %(gid)s для " "тома %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s не определен." #, python-format msgid "invalid user '%s'" msgstr "недопустимый пользователь %s" #, python-format msgid "iscsi portal, %s, not found" msgstr "портал iSCSI %s не найден" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "В файле конфигурации должен быть указан параметр iscsi_ip_address, когда " "используется протокол 'iSCSI'." msgid "iscsiadm execution failed. " msgstr "Ошибка выполнения iscsiadm. " #, python-format msgid "key manager error: %(reason)s" msgstr "ошибка администратора ключей: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key не определен" msgid "limit param must be an integer" msgstr "Параметр limit должен быть целым числом" msgid "limit param must be positive" msgstr "Параметр limit должен быть положительным" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing не поддерживает управление томом, подключенным к хостам. " "Отключите том от существующих хостов перед импортом" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "Для manage_existing требуется ключ 'name' для идентификации существующего " "тома." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: ошибка при обработке существующего повтора %(ss)s " "для тома %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "маркер [%s] не найден" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "у mdiskgrp отсутствуют кавычки %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" "Значение migration_policy должно быть on-demand или never, передано значение " "%s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "Сбой команды mkfs для тома %(vol)s, сообщение об ошибке: %(err)s." msgid "mock" msgstr "ложный" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs не установлена" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage обнаружил несколько ресурсов с именем %s" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "Обнаружено несколько ресурсов с ИД моментальной копии %s" msgid "name cannot be None" msgstr "Недопустимое значение для имени: None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: инструмент NAVISECCLI %(path)s не найден." #, python-format msgid "no REPLY but %r" msgstr "нет REPLY за исключением %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "В drbdmanage не обнаружено моментальных копий с ИД %s" #, python-format msgid "not exactly one snapshot with id %s" msgstr "Обнаружено несколько моментальных копий с ИД %s" #, python-format msgid "not exactly one volume with id %s" msgstr "Обнаружено несколько томов с ИД %s" #, python-format msgid "obj missing quotes %s" msgstr "у obj отсутствуют кавычки %s" msgid "open_access_enabled is not off." msgstr "Параметр open_access_enabled не отключен." msgid "progress must be an integer percentage" msgstr "ход выполнения должен быть целым значением в процентах" msgid "promote_replica not implemented." msgstr "promote_replica не реализован." msgid "provider must be defined" msgstr "должен быть определен провайдер" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "Этому драйверу тома требуется qemu-img версии %(minimum_version)s. Текущая " "версия qemu-img: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img не установлен, и образ имеет тип %s. Только образы RAW могут " "использоваться, когда qemu-img не установлен." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img не установлен, и не указан формат диска. Только образы RAW могут " "использоваться, когда qemu-img не установлен." msgid "rados and rbd python libraries not found" msgstr "Не найдены библиотеки python rados и rbd" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted может принимать значения 'no', 'yes' или 'only', значение %r " "недопустимо" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "В базовой системе должно быть настроено устройство репликации: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "Отсутствует устройство репликации с backend_id [%s]." #, python-format msgid "replication_failover failed. %s not found." msgstr "Ошибка replication_failover. %s не найден." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "Ошибка replication_failover. Базовая система не настроена для переключения " "после сбоя" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Во время выполнения восстановления %(vol_id)s не удалось выполнить команду " "dsmc: недопустимые аргументы в %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Во время восстановления %(vol_id)s не удалось выполнить команду dsmc в " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "сбой восстановления %(vol_id)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "Операция restore_backup прервана, список фактических объектов не совпадает " "со списком объектов в метаданных." msgid "root element selecting a list" msgstr "корневой элемент выбирает список" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "В rtslib_fb отсутствует элемент %s: может потребоваться более новая версия " "python-rtslib-fb." msgid "san_ip is not set." msgstr "Не задано значение san_ip." msgid "san_ip must be set" msgstr "san_ip должен быть назначен" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: обязательное поле. san_ip не задан." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "Не указано значение параметра san_login или san_password для драйвера Datera " "в cinder.conf. Укажите эту информацию и запустите службу cinder-volume еще " "раз." msgid "serve() can only be called once" msgstr "serve() может быть вызван только один раз" msgid "service not found" msgstr "служба не найдена" msgid "snapshot does not exist" msgstr "моментальная копия не существует" #, python-format msgid "snapshot id:%s not found" msgstr "Не найден ИД моментальной копии %s" #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" msgid "snapshots assigned" msgstr "моментальные копии присвоены" msgid "snapshots changed" msgstr "моментальные копии изменены" #, python-format msgid "source vol id:%s not found" msgstr "Не найден ИД исходного тома %s" #, python-format msgid "source volume id:%s is not replicated" msgstr "исходный том с ИД %s не скопирован" msgid "source-name cannot be empty." msgstr "source-name не может быть пустым." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "source-name должно указываться в формате: 'vmdk_path@vm_inventory_path'." #, python-format msgid "status must be %s and" msgstr " состоянием должно быть %s и " msgid "status must be available" msgstr "Требуемое состояние: Доступен" msgid "stop_hypermetro error." msgstr "Ошибка stop_hypermetro." msgid "subclasses must implement construct()!" msgstr "подклассы должны реализовывать construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "" "не удалось выполнить sudo, выполнение продолжается, как будто ничего не " "произошло" msgid "sync_hypermetro error." msgstr "Ошибка sync_hypermetro." msgid "sync_replica not implemented." msgstr "sync_replica не реализован." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "Программа targetcli не установлена, поэтому не удалось создать каталог по " "умолчанию (%(default_path)s): %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: Не удалось получить имя хоста из коннектора." msgid "timeout creating new_volume on destination host" msgstr "истек тайм-аут создания new_volume на целевом хосте" msgid "too many body keys" msgstr "слишком много ключей тела" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: не смонтирован" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s: цель занята" msgid "umount: : some other error" msgstr "umount: <путь_к_mnt>: другая ошибка" msgid "umount: : target is busy" msgstr "umount: <путь_к_mnt>: цель занята" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: не найдена моментальная копия %s" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: не найден том с ИД %s" #, python-format msgid "unrecognized argument %s" msgstr "Нераспознанный аргумент %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "Неподдерживаемый алгоритм сжатия: %s" msgid "valid iqn needed for show_target" msgstr "Для show_target требуется допустимый iqn" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s не определен." msgid "vmemclient python library not found" msgstr "Не найдена библиотека vmemclient для языка Python" #, python-format msgid "volume %s not found in drbdmanage" msgstr "Том %s не найден в drbdmanage" msgid "volume assigned" msgstr "том присвоен" msgid "volume changed" msgstr "том изменен" msgid "volume does not exist" msgstr "Том не существует" msgid "volume is already attached" msgstr "том уже подключен" msgid "volume is not local to this node" msgstr "Том не является локальным для этого узла" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "Размер тома %(volume_size)d слишком мал для восстановления резервной копии " "размером %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "Недопустимый размер тома %d." msgid "volume_type cannot be None" msgstr "volume_type не может быть задан как None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "Должен быть указан volume_type, когда создается том в группе согласования ." msgid "volume_type_id cannot be None" msgstr "Недопустимое значение для volume_type_id: None" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "Необходимо указать volume_types для создания группы согласования %(name)s." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "Необходимо указать volume_types для создания группы согласования %s." msgid "volumes assigned" msgstr "тома присвоены" msgid "volumes changed" msgstr "тома изменены" #, python-format msgid "wait_for_condition: %s timed out." msgstr "Истек тайм-аут wait_for_condition: %s." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "Свойству zfssa_manage_policy должно быть присвоено значение 'strict' или " "'loose'. Текущее значение: %s." msgid "{} is not a valid option." msgstr "{} не является допустимым параметром." cinder-8.0.0/cinder/locale/cs/0000775000567000056710000000000012701406543017236 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/cs/LC_MESSAGES/0000775000567000056710000000000012701406543021023 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po0000664000567000056710000030765612701406257024560 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Zbyněk Schwarz , 2014 # OpenStack Infra , 2015. #zanata # Zbyněk Schwarz , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-12-21 12:13+0000\n" "Last-Translator: Zbyněk Schwarz \n" "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Czech\n" #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Failed to remove from new volume set %(new_vvs)s." msgstr "" "%(exception)s: Výjimka během vrácení přetypování svazku %(volume_name)s. " "Nelze odstranit z nové sady svazků %(new_vvs)s." #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Original volume set/QOS settings may not have been fully restored." msgstr "" "%(exception)s: Výjimka během vrácení přetypování svazku %(volume_name)s. " "Původní nastavení sady svazků/QoS nemusí být zcela obnovena." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" msgstr "" "%(fun)s: selhalo s nečekaným výstupem příkazového řádku.\n" "Příkaz: %(cmd)s\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s\n" #, python-format msgid "" "%(method)s %(url)s unexpected response status: %(response)s (expects: " "%(expects)s)." msgstr "" "%(method)s %(url)s neočekávaná odpověď stavu: %(response)s (očekáváno: " "%(expects)s)." #, python-format msgid "%(name)s: %(value)s" msgstr "%(name)s: %(value)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" msgstr "'%(value)s' je neplatná hodnota pro dodatečnou specifikaci '%(key)s'" #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting create_snapshot operation!" msgstr "" "Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " "provedení operace vytvoření snímku!" #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting delete_volume operation!" msgstr "" "Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " "provedení operace smazání svazku!" #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting unmanage operation!" msgstr "" "Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " "provedení operace zrušení správy!" #, python-format msgid "Array Serial Number must be in the file %(fileName)s." msgstr "Sériové číslo pole musí být v souboru %(fileName)s." #, python-format msgid "Array mismatch %(myid)s vs %(arid)s" msgstr "Neshoda pole: %(myid)s proti %(arid)s" #, python-format msgid "Array query failed - No response (%d)!" msgstr "Dotaz na pole selhal - Žádná odpověď (%d)!" msgid "Array query failed. No capabilities in response!" msgstr "Dotaz na pole selhal. V odpovědi nebyly uvedeny schopnosti!" msgid "Array query failed. No controllers in response!" msgstr "Dotaz na pole selhal. V odpovědi nebyly uvedeny kontroléry!" msgid "Array query failed. No global id in XML response!" msgstr "Dotaz na pole selhal. V odpovědi XML nebylo globální id!" msgid "Attaching snapshot from a remote node is not supported." msgstr "Připojení snímku ke vzdálenému uzlu není podporováno." #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "Ověřování žádosti: %(zfssaurl)s pokus: %(retry)d ." msgid "Backend returned err for lun export." msgstr "Při export lun vrátila podpůrná vrstva chybu!" #, python-format msgid "Backup id %s is not invalid. Skipping reset." msgstr "ID zálohy %s není neplatné. Resetování je přeskočeno." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Zálohovací služba %(configured_service)s nepodporuje ověřování. Záloha s id " "%(id)s není ověřena. Ověřování přeskočeno." #, python-format msgid "Backup volume metadata failed: %s." msgstr "Záložní popisná data svazku selhala: %s" #, python-format msgid "Bad response from server: %(url)s. Error: %(err)s" msgstr "Špatná odpověď od serveru: %(url)s. Chyba: %(err)s" #, python-format msgid "" "CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " "source." msgstr "" "Nebyl nalezen snímek skupiny jednotnosti %(cgsnap)s při vytváření skupiny " "%(cg)s ze zdroje." #, python-format msgid "" "CLI fail: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgstr "" "Selhání rozhraní příkazového řádku: '%(cmd)s' = %(code)s\n" "výstup: %(stdout)s\n" "chyba: %(stderr)s" msgid "Call to Nova delete snapshot failed" msgstr "Volání Nova pro smazání snímku selhalo" msgid "Call to Nova to create snapshot failed" msgstr "Volání Nova pro vytvoření snímku selhalo" #, python-format msgid "Call to json.loads() raised an exception: %s." msgstr "Volání json.loads() vyvolalo výjimku: %s." #, python-format msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." msgstr "Nelze přidat lun %(lun)s do skupiny jednotnosti %(cg_name)s." #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "Nelze zjišťovat v %(target_ip)s pomocí %(target_iqn)s." msgid "Can not open the recent url, login again." msgstr "Nelze otevřít nedávnou adresu url, probíhá znovu přihlašování." #, python-format msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." msgstr "Nelze umístit nové LUN %(luns)s do skupiny jednotnosti %(cg_name)s." #, python-format msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." msgstr "Nelze odebrat lun %(luns)s ze skupiny jednotnosti %(cg_name)s." #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "Nelze najít svazek k mapování %(key)s, %(msg)s" msgid "Can't open the recent url, relogin." msgstr "Nelze otevřít nedávnou adresu url, probíhá znovu přihlašování." #, python-format msgid "" "Cannot add and verify tier policy association for storage group : " "%(storageGroupName)s to FAST policy : %(fastPolicyName)s." msgstr "" "Nelze přidat a ověřit přidružení zásady vrstvení skupiny úložiště : " "%(storageGroupName)s k zásadě FAST : %(fastPolicyName)s." #, python-format msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." msgstr "Nelze klonovat obraz %(image)s do svazku %(volume)s. Chyba: %(error)s." #, python-format msgid "Cannot create or find an initiator group with name %(igGroupName)s." msgstr "Nelze vytvořit nebo najít skupinu zavaděče s názvem %(igGroupName)s." #, python-format msgid "Cannot delete file %s." msgstr "Nelze smazat soubor %s." msgid "Cannot detect replica status." msgstr "Nelze zjistit stav repliky." msgid "Cannot determine if Tiering Policies are supported." msgstr "Nelze zjistit zda zásady vrstvení jsou podporovány." msgid "Cannot determine whether Tiering Policy is supported on this array." msgstr "Nelze zjistit zdali je na tomto poli podporována zásada vrstvení." #, python-format msgid "Cannot find Consistency Group %s" msgstr "Nelze najít skupinu jednotnosti %s." #, python-format msgid "" "Cannot find a portGroup with name %(pgGroupName)s. The port group for a " "masking view must be pre-defined." msgstr "" "Nelze najít skupinu portů s názvem %(pgGroupName)s. Skupina pro zamaskování " "musí být předem určena." #, python-format msgid "Cannot find the fast policy %(fastPolicyName)s." msgstr "Nelze najít zásadu fast %(fastPolicyName)s." #, python-format msgid "" "Cannot find the new masking view just created with name %(maskingViewName)s." msgstr "Nelze najít právě vytvořené zamaskování s názvem %(maskingViewName)s." #, python-format msgid "Cannot get QoS spec for volume %s." msgstr "Nelze získat specifikaci QoS pro svazek %s." #, python-format msgid "Cannot get storage Group from job : %(storageGroupName)s." msgstr "Nelze získat skupinu úložiště z úkolu : %(storageGroupName)s." msgid "Cannot get storage system." msgstr "Nelze získat úložný systém." #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" msgstr "Změna názvu svazku z %(tmp)s na %(orig)s selhala protože %(reason)s" #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." msgstr "Změna názvu svazku z %(tmp)s na %(orig)s selhala protože %(reason)s." #, python-format msgid "Clone %s not in prepared state!" msgstr "Klon %s není v stavu připraveno!" #, python-format msgid "Clone Volume:%(volume)s failed from source volume:%(src_vref)s" msgstr "Klonování svazku %(volume)s ze zdrojového svazku %(src_vref)s selhalo." #, python-format msgid "" "Clone volume \"%s\" already exists. Please check the results of \"dog vdi " "list\"." msgstr "" "Klon svazku \"%s\" již existuje. Prosím zkontrolujte výstup příkazu \"dog " "vdi list\"." #, python-format msgid "Cloning of volume %s failed." msgstr "Klonování svazku %s selhalo." #, python-format msgid "" "CloudByte does not have a volume corresponding to OpenStack volume [%s]." msgstr "CloudByte nemá svazek odpovídající svazku OpenStack [%s]." #, python-format msgid "" "CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " "all [%(max)s] attempts." msgstr "" "Operace CloudByte [%(operation)s] selhala u svazku [%(vol)s]. Využito všech " "[%(max)s] pokusů." #, python-format msgid "" "CloudByte snapshot information is not available for OpenStack volume [%s]." msgstr "" "Informace o snímku CloudByte nejsou dostupné pro svazek OpenStack [%s]." #, python-format msgid "CloudByte volume information not available for OpenStack volume [%s]." msgstr "" "Informace o svazku CloudByte nejsou dostupné pro svazek OpenStack [%s]." #, python-format msgid "Cmd :%s" msgstr "Příkaz :%s" #, python-format msgid "Commit clone failed: %(name)s (%(status)d)!" msgstr "Odevzdání klonu selhalo: %(name)s (%(status)d)!" #, python-format msgid "Commit failed for %s!" msgstr "Odevzdání selhalo pro %s!" #, python-format msgid "Compute cluster: %s not found." msgstr "Výpočetní cluster: %s nenalezen." #, python-format msgid "Configuration value %s is not set." msgstr "Konfigurační volba %s není nastavena." #, python-format msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" msgstr "V sadě virtuálního svazku %(volume_set)s zjištěn konflikt: %(error)s" #, python-format msgid "Connect to Flexvisor error: %s." msgstr "Chyba při připojení k Flexvisor: %s." #, python-format msgid "Connect to Flexvisor failed: %s." msgstr "Připojení k Flexvisor selhalo: %s." msgid "Connection error while sending a heartbeat to coordination backend." msgstr "" "Chyba připojení při odesílání informací o aktivitě do vrstvy pro koordinaci." #, python-format msgid "Connection to %s failed and no secondary!" msgstr "Připojení k %s selhalo a žádné druhotné připojení není nastaveno!" #, python-format msgid "Consistency group %s: create failed" msgstr "Skupina jednotnosti %s: Vytvoření selhalo" #, python-format msgid "Controller GET failed (%d)" msgstr "Získání kontroléru selhalo (%d)!" #, python-format msgid "Copy offload workflow unsuccessful. %s" msgstr "Kopírování postupu snížení zátěže bylo neúspěšné. %s" #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "" "Kopírování snímku do svazku pro snímek %(snap)s a svazek %(vol)s selhalo!" #, python-format msgid "Could not GET allocation information (%d)!" msgstr "Nelze získat informace o přidělení (%d)!" #, python-format msgid "Could not connect to %(primary)s or %(secondary)s!" msgstr "Nelze se připojit k %(primary)s nebo %(secondary)s!" #, python-format msgid "Could not create snapshot set. Error: '%s'" msgstr "Nelze vytvořit sadu snímků. Chyba: '%s'" msgid "Could not decode scheduler options." msgstr "Nelze rozšifrovat volby plánovače." #, python-format msgid "Could not delete failed image volume %(id)s." msgstr "Nelze smazat nezdařený svazek obrazu %(id)s." #, python-format msgid "Could not delete the image volume %(id)s." msgstr "Nelze smazat svazek obrazu %(id)s." #, python-format msgid "" "Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "Nelze smazat snímek %s ve správci souborů, smazání bude provedeno spuštěním " "příkazu \"rm\"." #, python-format msgid "" "Could not do delete of volume %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "Nelze smazat svazek %s ve správci souborů, smazání bude provedeno spuštěním " "příkazu \"rm\"." #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "Nelze najít hostitele pro skupinu jednotnosti %(group_id)s." #, python-format msgid "Could not find any hosts (%s)" msgstr "Nelze najít žádné hostitele (%s)" #, python-format msgid "" "Could not find port group : %(portGroupName)s. Check that the EMC " "configuration file has the correct port group name." msgstr "" "Nelze najít skupinu portů : %(portGroupName)s. Zkontrolujte, že soubor s " "nastavením EMC má správný název skupiny." #, python-format msgid "Could not find volume with name %(name)s. Error: %(error)s" msgstr "Nelze najít svazek s názvem %(name)s. Chyba: %(error)s" #, python-format msgid "Could not log in to 3PAR array (%s) with the provided credentials." msgstr "" "Nelze se přihlásit do pole 3PAR (%s) pomocí zadaných přihlašovacích údajů." #, python-format msgid "Could not stat scheduler options file %(filename)s." msgstr "Nelze vyhodnotit soubor voleb plánovače %(filename)s." #, python-format msgid "Could not validate device %s" msgstr "Nelze ověřit zařízení %s" #, python-format msgid "Create cg snapshot %s failed." msgstr "Vytvoření snímku skupiny jednotnosti %s selhalo." #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " "(Exception: %(except)s)" msgstr "" "Vytvoření klonu svazku obrazu: %(volume_id)s pro obraz %(image_id)s selhalo " "(Výjimka: %(except)s)" #, python-format msgid "Create consistency group %s failed." msgstr "Vytvoření skupiny jednotnosti %s selhalo." #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." msgstr "" "Vytvoření skupiny jednotnosti ze snímku %(snap)s selhalo: Snímek nebyl " "nalezen." #, python-format msgid "Create consistency group from source %(source)s failed." msgstr "Vytvoření skupiny jednotnosti ze zdroje %(source)s selhalo." #, python-format msgid "" "Create consistency group from source cg-%(cg)s failed: " "ConsistencyGroupNotFound." msgstr "" "Vytvoření skupiny jednotnosti ze zdrojové skupiny jednotnosti %(cg)s " "selhalo: Skupina nebyla nalezena." #, python-format msgid "Create hypermetro error: %s." msgstr "Chyba při vytváření hypermetra: %s." #, python-format msgid "" "Create new lun from lun for source %(src)s => destination %(dest)s failed!" msgstr "Vytvoření nového lun z lun pro zdroj %(src)s => cíl %(dest)s selhalo!" #, python-format msgid "Create snapshot notification failed: %s" msgstr "Oznámení o vytvořeni snímku selhalo: %s" #, python-format msgid "Create volume failed from snapshot: %s" msgstr "Vytvoření svazku ze snímku selhalo: %s" #, python-format msgid "Create volume notification failed: %s" msgstr "Oznámení o vytvoření svazku selhalo: %s" #, python-format msgid "Creation of snapshot failed for volume: %s" msgstr "Vytvoření snímku selhalo u svazku: %s" #, python-format msgid "Creation of volume %s failed." msgstr "Vytvoření svazku %s selhalo." msgid "" "Creation request failed. Please verify the extra-specs set for your volume " "types are entered correctly." msgstr "" "Žádost o vytvoření selhala. Prosím ověřte správnost dodatečných specifikací " "nastavených pro vaše typy svazků." msgid "DB error:" msgstr "Chyba databáze:" #, python-format msgid "DBError detected when purging from table=%(table)s" msgstr "Při čištění tabulky %(table)s byla zjištěna chyba databáze" msgid "DBError encountered: " msgstr "Zjištěna chyba databáze:" msgid "Default Storage Profile was not found." msgstr "Výchozí profil úložiště nebyl nalezen." msgid "" "Default volume type is not found. Please check default_volume_type config:" msgstr "" "Výchozí typ svazku nenalezen. Prosím zkontrolujte nastavení " "default_volume_type:" #, python-format msgid "Delete cgsnapshot %s failed." msgstr "Smazání snímku skupiny jednotnosti %s selhalo." #, python-format msgid "Delete consistency group %s failed." msgstr "Smazání skupiny jednotnosti %s selhalo." msgid "Delete consistency group failed to update usages." msgstr "Nelze aktualizovat využití při mazání skupin jednotnosti." #, python-format msgid "Delete hypermetro error: %s." msgstr "Chyba při mazání hypermetra: %s" msgid "Delete snapshot failed, due to snapshot busy." msgstr "Smazání snímku selhalo, protože snímek je zaneprázdněn." #, python-format msgid "Delete snapshot notification failed: %s" msgstr "Oznámení o smazáni snímku selhalo: %s" #, python-format msgid "Delete volume notification failed: %s" msgstr "Oznámení o smazáni svazku selhalo: %s" #, python-format msgid "Deleting snapshot %s failed" msgstr "Mazání snímku %s selhalo" #, python-format msgid "Deleting zone failed %s" msgstr "Mazání zóny %s selhalo" #, python-format msgid "Deletion of volume %s failed." msgstr "Smazání svazku %s selhalo." #, python-format msgid "Destination Volume Group %s does not exist" msgstr "Cílová skupina svazku %s neexistuje" #, python-format msgid "Detach attachment %(attach_id)s failed." msgstr "Odpojení zařízení %(attach_id)s selhalo." #, python-format msgid "Detach migration source volume failed: %(err)s" msgstr "Odpojení zdrojového svazku přesunu selhalo: %(err)s" msgid "Detach volume failed, due to remove-export failure." msgstr "Chyba při odpojování svazku, kvůli selhání při odebrání exportu." msgid "Detach volume failed, due to uninitialized driver." msgstr "Odpojení svazku selhalo kvůli nezavedenému ovladači." msgid "Detaching snapshot from a remote node is not supported." msgstr "Odpojení snímku od vzdáleného uzle není podporováno." #, python-format msgid "Did not find expected column name in lsvdisk: %s." msgstr "Nenalezen očekávaný sloupec v lsvdisku: %s." msgid "Differential restore failed, trying full restore" msgstr "Rozdílová obnova selhala, pokus o celkovou obnovu" #, python-format msgid "Disconnection failed with message: %(msg)s." msgstr "Odpojení selhalo se zprávou: %(msg)s." #, python-format msgid "" "Driver-based migration of volume %(vol)s failed. Move from %(src)s to " "%(dst)s failed with error: %(error)s." msgstr "" "Přesun svazku %(vol)s za pomocí ovladače selhal. Přesun z %(src)s do %(dst)s " "selhal s chybou: %(error)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Chyba při připojování svazku %(vol)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vyváření skupiny: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " "Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Chyba při získávání snímku: %(snapshot)s ve svazku %(lun)s v zásobě " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při nastavování svazku: %(lun)s do skupiny zavaděče " "%(initiatorgroup)s, zásoba %(pool)s, projekt %(project)s, návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." msgid "Error activating LV" msgstr "Chyba při aktivaci logického svazku" msgid "Error adding HBA to server" msgstr "Chyba při přidávání HBA k serveru" #, python-format msgid "Error attaching volume %s" msgstr "Chyba při připojování svazku %s" #, python-format msgid "Error cleaning up failed volume creation. Msg - %s." msgstr "Chyba při čištění selhaného vytváření svazku. Zpráva - %s" msgid "Error cloning volume" msgstr "Chyba při klonování svazku" msgid "Error closing channel." msgstr "Chyba při uzavírání kanálu." #, python-format msgid "" "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" "Chyba při kontaktování serveru glance '%(netloc)s' pro '%(method)s', " "%(extra)s." msgid "Error copying key." msgstr "Chyba při kopírování klíče." msgid "Error creating Barbican client." msgstr "Chyba při vytváření klienta Barbican." #, python-format msgid "Error creating QOS rule %s" msgstr "Chyba při vytváření pravidla QOS %s" msgid "Error creating Volume" msgstr "Chyba při vytváření svazku" msgid "Error creating Volume Group" msgstr "Chyba při vytváření skupiny svazku" msgid "Error creating chap record." msgstr "Chyba při vytváření záznamu chap." msgid "Error creating cloned volume" msgstr "Chyba při vytváření klonovaného svazku" msgid "Error creating key." msgstr "Chyba při vytváření klíče." msgid "Error creating snapshot" msgstr "Chyba při vytváření snímku" msgid "Error creating volume" msgstr "Chyba při vytváření svazku" #, python-format msgid "Error creating volume. Msg - %s." msgstr "Chyba při vytváření svazku. Zpráva - %s." msgid "Error deactivating LV" msgstr "Chyba při deaktivaci logického svazku" msgid "Error deleting key." msgstr "Chyba při mazání klíče." msgid "Error deleting snapshot" msgstr "Chyba při mazání snímku" msgid "Error deleting volume" msgstr "Chyba při mazání svazku" #, python-format msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." msgstr "" "Chyba při odpojování snímku %(snapshot)s, kvůli selhání při odebrání exportu." #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" "Chyba při odpojování svazku %(volume)s, kvůli selhání při odebrání exportu." #, python-format msgid "Error detaching volume %s" msgstr "Chyba při odpojování svazku %s" #, python-format msgid "Error disassociating storage group from policy: %s." msgstr "Chyba při odloučení skupiny úložiště od zásady: %s" msgid "Error during re-export on driver init." msgstr "Chyba při znovu exportování během zavádění ovladače." msgid "Error executing SSH command." msgstr "Chyba při provádění příkazu SSH." msgid "Error executing command via ssh." msgstr "Chyba při provádění příkazu pomocí ssh." #, python-format msgid "Error executing command via ssh: %s" msgstr "Chyba při provádění příkazu pomocí ssh: %s" msgid "Error extending Volume" msgstr "Chyba při rozšiřování svazku" msgid "Error extending volume" msgstr "Chyba při rozšiřování svazku" #, python-format msgid "Error extending volume %(id)s. Ex: %(ex)s" msgstr "Chyba při rozšiřování svazku %(id)s. Výjimka: %(ex)s" #, python-format msgid "Error extending volume: %(vol)s. Exception: %(ex)s" msgstr "Chyba při rozšiřování svazku: %(vol)s. Výjimka: %(ex)s" #, python-format msgid "Error finding target pool instance name for pool: %(targetPoolName)s." msgstr "Chyba při hledání cílového názvu instance zásoby: %(targetPoolName)s." #, python-format msgid "Error getting LUN attribute. Exception: %s" msgstr "Chyba při získávání vlastnosti LUN. Výjimka: %s" msgid "Error getting active FC target ports." msgstr "Chyba při získávání aktivních cílových portů FC." msgid "Error getting active ISCSI target iqns." msgstr "Chyba při získávání aktivních cílových iqn iSCSI." msgid "Error getting active ISCSI target portals." msgstr "Chyba při získávání aktivních cílových portálů iSCSI." msgid "Error getting array, pool, SLO and workload." msgstr "Chyba při získávání pole, zásobu, SLO a vytížení." msgid "Error getting chap record." msgstr "Chyba při získávání záznamu chap." #, python-format msgid "Error getting iSCSI target info from EVS %(evs)s." msgstr "Chyba při získávání informací o cíli iSCSI z EVS %(evs)s." msgid "Error getting key." msgstr "Chyba při získávání klíče." msgid "Error getting name server info." msgstr "Při získávání informací o jmenném serveru nastala chyba." msgid "Error getting secret data." msgstr "Chyba při získávání tajných dat." msgid "Error getting secret metadata." msgstr "Chyba při získávání tajných popisných dat." msgid "Error getting show fcns database info." msgstr "Při získávání informací o zobrazení databáze fcns nastala chyba." msgid "Error getting target pool name and array." msgstr "Chyba při získávání názvu cílového pole a zásoby." #, python-format msgid "Error happened during storage pool querying, %s." msgstr "Při dotazu na zásobu úložiště se stala chyba, %s." #, python-format msgid "Error in copying volume: %s" msgstr "Chyba při kopírování svazku %s" #, python-format msgid "" "Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " "with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" msgstr "" "Chyba při rozšiřování velikost svazku: Svazek: %(volume)s, velikost svazku: " "%(vol_size)d se snímkem: %(snapshot)s, velikost svazku: %(snap_size)d" #, python-format msgid "Error in workflow copy from cache. %s." msgstr "Chyba v postupu při kopírování z mezipaměti. %s." #, python-format msgid "Error invalid json: %s" msgstr "Chyba neplatný json: %s" msgid "Error manage existing get volume size." msgstr "Chyba při získávání velikosti spravovaného svazku." msgid "Error manage existing volume." msgstr "Chyba při správě existujícího svazku." #, python-format msgid "Error mapping volume: %s" msgstr "Chyba při mapování svazku: %s" #, python-format msgid "" "Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." msgstr "" "Chyba při přesunu svazku: %(volumename)s. do cílové zásoby " "%(targetPoolName)s." #, python-format msgid "Error migrating volume: %s" msgstr "Chyba při přesunování svazku: %s" #, python-format msgid "" "Error occurred in the volume driver when updating consistency group " "%(group_id)s." msgstr "" "Při aktualizaci skupiny jednotnosti %(group_id)s nastala chyba v ovladači " "svazku." msgid "" "Error occurred when adding hostgroup and lungroup to view. Remove lun from " "lungroup now." msgstr "" "Při přidávání skupiny hostitele a lun do zobrazení nastala chyba. Prosím " "odstraňte lun ze skupiny." #, python-format msgid "" "Error occurred when building request spec list for consistency group %s." msgstr "" "Při sestavování seznamu žádaných specifikací pro skupinu jednotnosti %s " "nastala chyba." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Při vytváření snímku skupiny jednotnosti %s nastala chyba." #, python-format msgid "" "Error occurred when creating cloned volume in the process of creating " "consistency group %(group)s from source CG %(source_cg)s." msgstr "" "Při vytváření klonovaného svazku během vytváření skupiny jednotnosti " "%(group)s ze zdrojové skupiny jednotnosti %(source_cg)s nastala chyba." #, python-format msgid "" "Error occurred when creating consistency group %(cg)s from cgsnapshot " "%(cgsnap)s." msgstr "" "Při vytváření skupiny jednotnosti %(cg)s ze snímku %(cgsnap)s nastala chyba." #, python-format msgid "" "Error occurred when creating consistency group %(group)s from cgsnapshot " "%(cgsnap)s." msgstr "" "Při vytváření skupiny jednotnosti %(group)s ze snímku %(cgsnap)s nastala " "chyba." #, python-format msgid "" "Error occurred when creating consistency group %(group)s from source CG " "%(source_cg)s." msgstr "" "Při vytváření skupiny jednotnosti %(group)s ze zdrojové skupiny jednotnosti " "%(source_cg)s nastala chyba." #, python-format msgid "Error occurred when creating consistency group %s." msgstr "Při vytváření skupiny jednotnosti %s nastala chyba." #, python-format msgid "" "Error occurred when creating volume entry from snapshot in the process of " "creating consistency group %(group)s from cgsnapshot %(cgsnap)s." msgstr "" "Při vytváření záznamu o svazku ze snímku během vytváření skupiny jednotnosti " "%(group)s ze snímku skupiny jednotnosti %(cgsnap)s nastala chyba." #, python-format msgid "Error occurred when updating consistency group %(group_id)s." msgstr "Při aktualizaci skupiny jednotnosti %(group_id)s nastala chyba." #, python-format msgid "Error occurred while cloning backing: %s during retype." msgstr "Při klonování zálohy nastala chyba: %s během přetypování." #, python-format msgid "Error occurred while copying %(src)s to %(dst)s." msgstr "Při kopírování %(src)s do %(dst)s nastala chyba." #, python-format msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." msgstr "Při kopírování obrazu nastala chyba: %(id)s do svazku %(vol)s." #, python-format msgid "Error occurred while copying image: %(image_id)s to %(path)s." msgstr "Při kopírování obrazu nastala chyba: %(image_id)s do %(path)s." msgid "Error occurred while creating temporary backing." msgstr "Při vytváření dočasné zálohy nastala chyba." #, python-format msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "Při vytváření svazku nastala chyba: %(id)s z obrazu %(image_id)s." msgid "Error occurred while selecting datastore." msgstr "Při zvolení datového úložiště nastala chyba." #, python-format msgid "Error on adding lun to consistency group. %s" msgstr "Chyba při přidávání lun do skupiny jednotnosti. %s" #, python-format msgid "Error on enable compression on lun %s." msgstr "Chyba při povolení komprimace v lun %s." #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "Chyba při provádění %(command)s. Kód chyby: %(exit_code)d Chybová zpráva: " "%(result)s" #, python-format msgid "" "Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "Chyba při provádění příkazu. Kód chyby: %(exit_code)d Chybová zpráva: " "%(result)s" msgid "Error parsing array from host capabilities." msgstr "Chyba při zpracování pole ze schopností hostitele." msgid "Error parsing array, pool, SLO and workload." msgstr "Chyba při zpracování pole, zásoby, SLO a vytížení." msgid "Error parsing target pool name, array, and fast policy." msgstr "Chyba při zpracování názvu cílového pole, zásoby a zásady fast." #, python-format msgid "" "Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" msgstr "" "Chyba při poskytování svazku %(lun_name)s v %(volume_name)s. Podrobnosti: " "%(ex)s" msgid "Error querying thin pool about data_percent" msgstr "Chyba při dotazování mělké zásoby o procentech dat" msgid "Error renaming logical volume" msgstr "Chyba při přejmenování logického svazku" #, python-format msgid "Error resolving host %(host)s. Error - %(e)s." msgstr "Nelze převést na ip adresu hostitele %(host)s. Chyba - %(e)s." #, python-format msgid "Error retrieving LUN %(vol)s number" msgstr "Chyba při získávání čísla LUN %(vol)s" #, python-format msgid "Error running SSH command: \"%s\"." msgstr "Chyba při provádění příkazu SSH: \"%s\"." #, python-format msgid "Error running SSH command: %s" msgstr "Chyba při provádění příkazu SSH: %s" msgid "Error running command." msgstr "Chyba při provádění příkazu." #, python-format msgid "" "Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" msgstr "" "Chyba při plánování %(volume_id)s z poslední služby svazku: %(last_host)s : " "%(exc)s" msgid "Error sending a heartbeat to coordination backend." msgstr "Chyba při odesílání informací o aktivitě do vrstvy pro koordinaci." #, python-format msgid "Error setting Flash Cache policy to %s - exception" msgstr "Chyba při nastavování zásady mezipaměti Flash na %s - došlo k výjimce" msgid "Error starting coordination backend." msgstr "Chyba při spouštění podpůrné vrstvy pro koordinaci." msgid "Error storing key." msgstr "Chyba při ukládání klíče." #, python-format msgid "Error unmapping volume: %s" msgstr "Chyba při zrušení mapování svazku: %s" #, python-format msgid "Error verifying LUN container %(bkt)s" msgstr "Chyba při ověřování kontejneru LUN %(bkt)s" #, python-format msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" msgstr "Chyba při ověřování služby iSCSI %(serv)s, na hostiteli %(hst)s" msgid "Error: unable to snap replay" msgstr "Chyba: Nelze vytvořit snímek ze zpětného načtení" #, python-format msgid "Exception cloning volume %(name)s from source volume %(source)s." msgstr "Výjimka při klonování svazku %(name)s ze zdrojového svazku %(source)s." #, python-format msgid "Exception creating LUN %(name)s in pool %(pool)s." msgstr "Při vytváření LUN %(name)s v zásobě %(pool)s došlo k výjimce." #, python-format msgid "Exception creating vol %(name)s on pool %(pool)s." msgstr "Výjimka při vytváření svazku %(name)s v zásobě %(pool)s." #, python-format msgid "" "Exception creating volume %(name)s from source %(source)s on share %(share)s." msgstr "" "Výjimka při vytváření svazku %(name)s ze zdroje %(source)s ve sdílení " "%(share)s." #, python-format msgid "Exception details: %s" msgstr "Podrobnosti výjimky: %s" #, python-format msgid "Exception during mounting %s" msgstr "Při připojování %s došlo k výjimce" #, python-format msgid "Exception during mounting %s." msgstr "Při připojování %s došlo k výjimce." #, python-format msgid "Exception during snapCPG revert: %s" msgstr "Výjimka během vrácení snímku společné skupiny poskytování: %s" msgid "Exception encountered: " msgstr "Zjištěna výjimka:" #, python-format msgid "Exception handling resource: %s" msgstr "Zachycování výjimky zdroje: %s" msgid "Exception in string format operation" msgstr "Výjimka při operaci s formátem řetězce" msgid "Exception loading extension." msgstr "Výjimka během načítání rozšíření." #, python-format msgid "Exception: %(ex)s" msgstr "Výjimka: %(ex)s" #, python-format msgid "Exception: %s" msgstr "Výjimka: %s" #, python-format msgid "Exception: %s." msgstr "Výjimka: %s." #, python-format msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." msgstr "Spuštění příkazu \"rm\" na záložní soubor %s bylo neúspěšné." #, python-format msgid "Exists snapshot notification failed: %s" msgstr "Oznámení o existenci snímku selhalo: %s" #, python-format msgid "Exists volume notification failed: %s" msgstr "Oznámení o existenci svazku selhalo: %s" msgid "Extend volume failed." msgstr "Rozšíření svazku selhalo." #, python-format msgid "Extension of volume %s failed." msgstr "Rozšíření snímku %s selhalo." msgid "" "Extra spec replication:mode must be set and must be either 'sync' or " "'periodic'." msgstr "" "Dodatečná specifikace replication:mode musí být zadána a musí mít hodnotu " "'sync' nebo 'periodic'." msgid "" "Extra spec replication:sync_period must be greater than 299 and less than " "31622401 seconds." msgstr "" "Dodatečná specifikace replication:sync_period musí být větší než 299 a " "menší než 31622401 vteřin." msgid "FAST is not supported on this array." msgstr "FAST není podporován v tomto poli." #, python-format msgid "Failed collecting fcns database info for fabric %s" msgstr "Shromažďování informací databáze fcns pro fabric %s selhalo" #, python-format msgid "Failed collecting name server info from fabric %s" msgstr "Shromáždění informací o jmenném serveru z fabric %s selhalo" msgid "Failed collecting nscamshow" msgstr "Shromažďování nscamshow selhalo" msgid "Failed collecting nsshow info for fabric" msgstr "Shromáždění informací nsshow pro fabric selhalo" #, python-format msgid "Failed collecting nsshow info for fabric %s" msgstr "Shromáždění informací nsshow pro fabric %s selhalo" msgid "Failed collecting show fcns database for fabric" msgstr "Shromažďování zobrazení databáze fcns pro fabric selhalo" #, python-format msgid "Failed destroying volume entry %s" msgstr "Nelze zničit položku svazku %s" #, python-format msgid "Failed destroying volume entry: %s." msgstr "Nelze zničit položku svazku: %s." #, python-format msgid "" "Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " "glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" "Získání příznaku zaveditelnosti snímku %(snapshot_id)s selhalo pomocí " "zadaného odkazu na svazek snímku glance %(snapshot_ref_id)s" #, python-format msgid "Failed getting active zone set from fabric %s" msgstr "Získání aktivní zóny nastavené z fabric %s selhalo" #, python-format msgid "Failed getting zone status from fabric %s" msgstr "Získání stavu zóny z fabric %s selhalo" #, python-format msgid "Failed image conversion during cache creation: %s" msgstr "Během vytváření mezipaměti došlo k chybě v převodu obrazu: %s" #, python-format msgid "" "Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." msgstr "Oznámení činnosti snímku %(event)s selhalo u snímku %(snp_id)s." #, python-format msgid "" "Failed notifying about the volume action %(event)s for volume %(volume_id)s" msgstr "Oznámení činnosti svazku %(event)s selhalo u svazku %(volume_id)s" #, python-format msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "Oznámení %(topic)s s obsahem %(payload)s selhalo." #, python-format msgid "" "Failed recovery attempt to create iscsi backing lun for Volume ID:" "%(vol_id)s: %(e)s" msgstr "" "Selhal pokus o obnovu vytvoření lun zálohující iscsi pro svazek s ID:" "%(vol_id)s: %(e)s" #, python-format msgid "Failed rolling back quota for %s reservations" msgstr "Nelze vrátit zpět kvóty pro rezervace %s" #, python-format msgid "Failed rolling back quota for %s reservations." msgstr "Nelze vrátit zpět kvóty pro rezervace %s." #, python-format msgid "" "Failed setting source volume %(source_volid)s back to its initial " "%(source_status)s status" msgstr "" "Nelze nastavit zdrojový svazek %(source_volid)s zpět na původní stav " "%(source_status)s " #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " "volume returned to the default storage group." msgstr "" "Nelze se vrátit zpět do bodu znovu přidání svazku %(volumeName)s do výchozí " "skupiny úložiště pro zásadu fast %(fastPolicyName)s. Prosím kontaktujte " "svého správce systému, aby svazek vrátil zpět do výchozí skupiny úložiště." #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " "volume re-added manually." msgstr "" "Nelze se vrátit zpět do bodu znovu přidání svazku %(volumeName)s do výchozí " "skupiny úložiště pro zásadu fast %(fastPolicyName)s: Prosím kontaktujte " "svého správce systému, aby svazek přidal zpět ručně." #, python-format msgid "" "Failed to add %(volumeName)s to default storage group for fast policy " "%(fastPolicyName)s." msgstr "" "Nelze přidat %(volumeName)s do výchozí skupiny úložiště pro zásadu fast " "%(fastPolicyName)s." #, python-format msgid "Failed to add %s to cg." msgstr "Nelze odstranit %s ze skupiny jednotnosti." #, python-format msgid "Failed to add device to handler %s" msgstr "Nelze přidat zařízení do obslužné rutiny %s" #, python-format msgid "Failed to add initiator iqn %s to target" msgstr "Nelze přidat zavaděč iqn %s do cíle" #, python-format msgid "Failed to add initiator to group for SCST target %s" msgstr "Nelze přidat zavaděč do skupiny pro cíl SCST %s" #, python-format msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" msgstr "Nelze přidat lun do cíle SCST s id: %(vol_id)s: %(e)s" #, python-format msgid "Failed to add multihost-access for volume \"%s\"." msgstr "Nelze přidat přístup více hostitelů ke svazku \"%s\"." #, python-format msgid "" "Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " "%(tierPolicyRuleInstanceName)s." msgstr "" "Nelze přidat skupinu úložiště %(storageGroupInstanceName)s do pravidla " "zásady vrstvení %(tierPolicyRuleInstanceName)s." #, python-format msgid "Failed to add target(port: %s)" msgstr "Nelze přidat cíl (port: %s)" msgid "Failed to attach source volume for copy." msgstr "Nelze připojit zdrojový svazek pro kopírování." #, python-format msgid "Failed to attach volume %(vol)s." msgstr "Nelze připojit svazek %(vol)s." msgid "Failed to authenticate user." msgstr "Nelze ověřit uživatele." #, python-format msgid "Failed to check cluster status.(command: %s)" msgstr "Nelze zkontrolovat stav clusteru. (příkaz: %s)" #, python-format msgid "Failed to clone image volume %(id)s." msgstr "Klonování svazku obrazu %(id)s selhalo." #, python-format msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." msgstr "Nelze klonovat svazek %(volume_id)s pro obraz %(image_id)s." #, python-format msgid "Failed to clone volume.(command: %s)" msgstr "Nelze klonovat svazek.(příkaz: %s)" #, python-format msgid "Failed to close disk device %s" msgstr "Nelze uzavřít diskové zařízení %s" #, python-format msgid "" "Failed to collect return properties for volume %(vol)s and connector " "%(conn)s." msgstr "" "Shromáždění vlastností pro návrat svazku %(vol)s a konektoru %(conn)s " "selhalo." #, python-format msgid "Failed to commit reservations %s" msgstr "Nelze odevzdat rezervace %s" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "Nelze se připojit k daemonu sheep. Adresa: %(addr)s, port: %(port)s" #, python-format msgid "Failed to copy %(src)s to %(dest)s." msgstr "Nelze zkopírovat %(src)s do %(dest)s." #, python-format msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" msgstr "Kopírování obrazu %(image_id)s do svazku %(volume_id)s selhalo" #, python-format msgid "Failed to copy image to volume: %(volume_id)s" msgstr "Kopírování obrazu do svazku %(volume_id)s selhalo" #, python-format msgid "Failed to copy volume %(src)s to %(dest)s." msgstr "Nelze zkopírovat svazek %(src)s do %(dest)s." #, python-format msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "Nelze zkopírovat svazek %(vol1)s do %(vol2)s" #, python-format msgid "Failed to create %(conf)s for volume id:%(vol_id)s" msgstr "Nelze vytvořit %(conf)s pro svazek s id: %(vol_id)s" #, python-format msgid "Failed to create CGSnapshot. Exception: %s." msgstr "Nelze vytvořit snímek skupiny jednotnosti. Výjimka: %s." msgid "" "Failed to create SOAP client.Check san_ip, username, password and make sure " "the array version is compatible" msgstr "" "Nelze vytvořit klienta SOAP. Zkontrolujte san_ip, uživatelské jméno, heslo a " "ujistěte se, že verze pole je kompatibilní" #, python-format msgid "" "Failed to create a first volume for storage group : %(storageGroupName)s." msgstr "" "Nelze vytvořit první svazek pro skupinu úložiště : %(storageGroupName)s." #, python-format msgid "Failed to create blkio cgroup '%(name)s'." msgstr "Nelze vytvořit kontrolní skupinu vstupu/výstupu bloku '%(name)s'." #, python-format msgid "Failed to create clone of volume \"%s\"." msgstr "Nelze vytvořit klon svazku \"%s\"." #, python-format msgid "Failed to create cloned volume %s." msgstr "Nelze vytvořit klonovaný svazek %s." #, python-format msgid "Failed to create consistency group %(group_id)s." msgstr "Nelze vytvořit skupinu jednotnosti %(group_id)s." #, python-format msgid "" "Failed to create default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "Nelze vytvořit výchozí skupinu úložiště pro zásadu FAST : %(fastPolicyName)s." #, python-format msgid "Failed to create group to SCST target %s" msgstr "Nelze vytvořit skupinu pro cíl SCST %s" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Nelze vytvořit id hardwaru v %(storageSystemName)s." #, python-format msgid "" "Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " "tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" "Nelze vytvořit cílové iscsi pro svazek s ID %(vol_id)s. Prosím ujistěte se, " "že váš soubor s nastavením tgtd obsahuje 'include %(volumes_dir)s/*'" #, python-format msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "Nelze vytvořit cílové iscsi pro svazek s ID %(vol_id)s: %(e)s" #, python-format msgid "" "Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " "configuration in %(volumes_dir)s'" msgstr "" "Nelze vytvořit cílové iscsi pro svazek s id %(vol_id)s. Prosím ověřte své " "nastavení v %(volumes_dir)s'" #, python-format msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "Nelze vytvořit cílové iscsi pro svazek s id %(vol_id)s: %(e)s" #, python-format msgid "Failed to create iscsi target for volume id:%s" msgstr "Nelze vytvořit cílové iscsi pro svazek s id: %s" #, python-format msgid "Failed to create iscsi target for volume id:%s." msgstr "Nelze vytvořit cílové iscsi pro svazek s id: %s." #, python-format msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." msgstr "" "Nelze vytvořit postup pro správu existujících: %(object_type)s %(object_id)s." #, python-format msgid "Failed to create snapshot of volume \"%s\"." msgstr "Nelze vytvořit snímek svazku \"%s\"." #, python-format msgid "Failed to create snapshot. (command: %s)" msgstr "Nelze vytvořit snímek. (příkaz: %s)" #, python-format msgid "Failed to create transfer record for %s" msgstr "Nelze vytvořit záznam o přenosu pro %s" #, python-format msgid "Failed to create volume \"%s\"." msgstr "Nelze vytvořit svazek \"%s\"." #, python-format msgid "Failed to create volume %s" msgstr "Nelze vytvořit svazek %s" #, python-format msgid "Failed to create volume %s." msgstr "Nelze vytvořit svazek %s." #, python-format msgid "Failed to create volume from snapshot \"%s\"." msgstr "Nelze vytvořit svazek ze snímku \"%s\"." #, python-format msgid "Failed to create volume. %s" msgstr "Nelze vytvořit svazek. %s" #, python-format msgid "Failed to create volume: %(name)s (%(status)s)" msgstr "Nelze vytvořit svazek: %(name)s (%(status)s)" #, python-format msgid "Failed to created Cinder secure environment indicator file: %s" msgstr "Nelze vytvořit soubor indikující bezpečné prostředí Cinder: %s" #, python-format msgid "Failed to delete initiator iqn %s from target." msgstr "Nelze smazat zavaděč iqn %s z cíle." #, python-format msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." msgstr "Nelze smazat snímek %(snap)s svazku %(vol)s." #, python-format msgid "Failed to delete snapshot. (command: %s)" msgstr "Nelze smazat snímek. (příkaz: %s)" #, python-format msgid "" "Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " "%(exception)s." msgstr "" "Nelze smazat snímek %(snap)s ze snímku skupiny jednotnosti. Výjimka: " "%(exception)s." #, python-format msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." msgstr "" "Nelze smazat svazek %(vol)s skupiny jednotnosti. Výjimka: %(exception)s." #, python-format msgid "Failed to delete volume \"%s\"." msgstr "Nelze smazat svazek \"%s\"." #, python-format msgid "Failed to delete volume %s" msgstr "Nelze smazat svazek %s" #, python-format msgid "Failed to delete volume. %s" msgstr "Nelze smazat svazek. %s" #, python-format msgid "Failed to ensure export of volume \"%s\"." msgstr "Nelze zajistit export svazku \"%s\"." #, python-format msgid "Failed to ensure export of volume %s" msgstr "Nelze zajistit export svazku %s" #, python-format msgid "Failed to export fiber channel target due to %s" msgstr "Nelze exportovat cíl fiber channel z důvodu %s" #, python-format msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." msgstr "Nelze rozšířit svazek: %(vol)s na velikost %(size)s GB." #, python-format msgid "" "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." msgstr "Nelze rozšířit svazek %(name)s z %(current_size)sGB na %(new_size)sGB." #, python-format msgid "Failed to find %(s)s. Result %(r)s" msgstr "Nelze najít %(s)s. Výsledek %(r)s" #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "Nelze získat dostupné cíle iSCSI pro %s." msgid "Failed to get IQN!" msgstr "Nelze získat IQN!" msgid "Failed to get LUN information!" msgstr "Nelze získat informace o LUN!" #, python-format msgid "Failed to get allocation information (%d)!" msgstr "Získání informací o přidělení (%d) selhalo!" #, python-format msgid "Failed to get allocation information: %(host)s (%(status)d)!" msgstr "Nelze získat informace o přidělení: %(host)s (%(status)d)!" #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "Nelze získat číslo zařízení pro škrcení: %(error)s" #, python-format msgid "" "Failed to get driver initiator data for initiator %(initiator)s and " "namespace %(namespace)s" msgstr "" "Nelze získat data zavedení ovladače pro zavaděč %(initiator)s a jmenný " "prostor %(namespace)s" #, python-format msgid "Failed to get fiber channel info from storage due to %(stat)s" msgstr "Nelze získat informace o fiber channel z úložiště kvůli %(stat)s" #, python-format msgid "Failed to get fiber channel target from storage server due to %(stat)s" msgstr "Nelze získat cíl fiber channel ze serverového úložiště kvůli %(stat)s" #, python-format msgid "Failed to get or create storage group %(storageGroupName)s." msgstr "Nelze získat nebo vytvořit skupinu úložiště %(storageGroupName)s." #, python-format msgid "Failed to get response: %s." msgstr "Nelze získat odpověď: %s." #, python-format msgid "Failed to get server info due to %(state)s." msgstr "Nelze získat informace o disku z důvodu %(state)s." msgid "Failed to get sns table" msgstr "Nelze získat tabulku sns" #, python-format msgid "Failed to get target wwpns from storage due to %(stat)s" msgstr "Nelze získat cílov= wwpns z úložiště kvůli %(stat)s" msgid "Failed to get updated stats from Datera Cluster." msgstr "Nelze získat aktualizované statistiky z clusteru Datera." msgid "Failed to get updated stats from Datera cluster." msgstr "Nelze získat aktualizované statistiky z clusteru Datera." #, python-format msgid "Failed to get volume status. %s" msgstr "Nelze získat stav svazku. %s" msgid "Failed to initialize connection" msgstr "Nelze zavést připojení" #, python-format msgid "Failed to initialize connection to volume \"%s\"." msgstr "Nelze zavést připojení ke svazku \"%s\"." msgid "Failed to initialize connection." msgstr "Nelze zavést připojení." msgid "Failed to initialize driver." msgstr "Nelze zavést ovladač." #, python-format msgid "Failed to issue df command for path %(path)s, error: %(error)s." msgstr "Nelze zadat příkaz df pro cestu %(path)s, chyba: %(error)s." #, python-format msgid "Failed to issue mmgetstate command, error: %s." msgstr "Nelze zadat příkaz mmgetstate, chyba: %s." #, python-format msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." msgstr "Nelze zadat příkaz mmlsattr pro cestu %(path)s, chyba: %(error)s" #, python-format msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" msgstr "Nelze zadat příkaz mmlsattr v cestě %(path)s, chyba: %(error)s" #, python-format msgid "Failed to issue mmlsconfig command, error: %s." msgstr "Nelze zadat příkaz mmtsconfig, chyba: %s." #, python-format msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." msgstr "Nelze zadat příkaz mmlsfs pro cestu %(path)s, chyba: %(error)s." #, python-format msgid "Failed to issue mmlsfs command, error: %s." msgstr "Nelze zadat příkaz mmlsfs, chyba: %s." #, python-format msgid "Failed to load %s" msgstr "Nelze načíst %s" msgid "Failed to load conder-volume" msgstr "Nelze načíst svazek conder" msgid "Failed to load osapi_volume" msgstr "Nelze načíst svazek osapi" #, python-format msgid "Failed to open iet session list for %s" msgstr "Nelze otevřít seznam sezení iet pro %s" #, python-format msgid "Failed to open volume from %(path)s." msgstr "Nelze otevřít svazek z %(path)s." #, python-format msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "Nelze darovat svazek %(name)s (%(status)d)!" msgid "Failed to query migration status of LUN." msgstr "Dotaz na stav přesunu LUN selhal." msgid "Failed to re-export volume, setting to ERROR." msgstr "Nelze znovu exportovat svazek, je nastavován na ERROR." #, python-format msgid "Failed to register image volume location %(uri)s." msgstr "Nelze registrovat umístění svazku obrazu %(uri)s." #, python-format msgid "" "Failed to remove %(volumeName)s from the default storage group for the FAST " "Policy." msgstr "" "Nelze odstranit %(volumeName)s z výchozí skupiny úložiště pro zásadu FAST." #, python-format msgid "Failed to remove %s from cg." msgstr "Nelze odstranit %s ze skupiny jednotnosti." #, python-format msgid "Failed to remove LUN %s" msgstr "Nelze odstranit LUN %s" #, python-format msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "Nelze odstranit cílové iscsi pro svazek s ID: %(vol_id)s: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "Nelze odstranit cílové iscsi pro svazek s id %(vol_id)s: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%s" msgstr "Nelze odstranit cílové iscsi pro svazek s id: %s" #, python-format msgid "Failed to remove iscsi target for volume id:%s." msgstr "Nelze odstranit cílové iscsi pro svazek s id: %s." #, python-format msgid "Failed to rename %(new_volume)s into %(volume)s." msgstr "Nelze přejmenovat %(new_volume)s na %(volume)s." msgid "Failed to rename the created snapshot, reverting." msgstr "Nelze přejmenovat vytvořený snímek, probíhá vrácení zpět." #, python-format msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" msgstr "" "Nelze zažádat o asynchronní smazání zdrojového svazku přesunu %(vol)s: " "%(err)s" #, python-format msgid "" "Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " "size: %(size)s" msgstr "" "Nelze změnit velikost vdi. Zmenšování vdi není podporováno. VDI: " "%(vdiname)s, nová velikost: %(size)s" #, python-format msgid "" "Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " "%(size)s" msgstr "" "Nelze změnit velikost vdi. Velikost je příliš velká. VDI: %(vdiname)s, nová " "velikost: %(size)s" #, python-format msgid "Failed to resize vdi. vdi not found. %s" msgstr "Nelze změnit velikost vdi, vdi nenalezeno. %s" #, python-format msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" msgstr "Nelze změnit velikost vdi. VDI: %(vdiname)s, nová velikost: %(size)s" #, python-format msgid "Failed to resize volume %(volume_id)s, error: %(error)s." msgstr "Nelze změnit velikost svazku %(volume_id)s, chyba: %(error)s." #, python-format msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "Nelze získat svazek SolidFire s ID: %s v získání podle účtu!" #, python-format msgid "" "Failed to return volume %(volumeName)s to original storage pool. Please " "contact your system administrator to return it to the correct location." msgstr "" "Nelze vrátit svazek %(volumeName)s do původní zásoby úložiště. Prosím " "kontaktujte svého správce systému, aby ho vrátil na správné místo." #, python-format msgid "Failed to roll back reservations %s" msgstr "Nelze vrátit zpět rezervace %s" #, python-format msgid "Failed to run task %(name)s: %(cause)s" msgstr "Nelze spustit úkol %(name)s: %(cause)s" #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "Nelze naplánovat %(method)s: %(ex)s" #, python-format msgid "Failed to send request: %s." msgstr "Nelze odeslat požadavek: %s." #, python-format msgid "Failed to set 'enable' attribute for SCST target %s" msgstr "Nelze nastavit vlastnost 'enable' pro cíl SCST %s" #, python-format msgid "Failed to set attribute for enable target driver %s" msgstr "Nelze nastavit vlastnost pro povolení cílového ovladače %s" msgid "Failed to setup the Dell EqualLogic driver." msgstr "Nastavení ovladače Dell EqualLogic selhalo." msgid "Failed to shutdown horcm." msgstr "Ukončování horcm selhalo." #, python-format msgid "Failed to snap Consistency Group %s" msgstr "Nelze vytvořit snímek skupiny jednotnosti %s" msgid "Failed to start horcm." msgstr "Spouštění horcm selhalo." msgid "Failed to terminate connection" msgstr "Nelze ukončit připojení" #, python-format msgid "Failed to terminate connection %(initiator)s %(vol)s" msgstr "Nelze ukončit připojení %(initiator)s %(vol)s" #, python-format msgid "Failed to terminate connection to volume \"%s\"." msgstr "Nelze ukončit připojení ke svazku \"%s\"." #, python-format msgid "Failed to umount %(share)s, reason=%(stderr)s" msgstr "Nelze odpojit %(share)s, důvod: %(stderr)s" #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target" msgstr "" "Nelze aktualizovat %(conf)s pro svazek s id %(vol_id)s po odstranění cíle " "iscsi" #, python-format msgid "Failed to update %(conf)s for volume id:%(vol_id)s" msgstr "Nelze aktualizovat %(conf)s pro svazek s id: %(vol_id)s" #, python-format msgid "" "Failed to update %(volume_id)s metadata using the provided snapshot " "%(snapshot_id)s metadata." msgstr "" "Nelze aktualizovat popisná data %(volume_id)s pomocí popisných dat zadaného " "snímku %(snapshot_id)s." #, python-format msgid "" "Failed to update initiator data for initiator %(initiator)s and backend " "%(backend)s" msgstr "" "Nelze aktualizovat data zavedení ovladače pro zavaděč %(initiator)s a " "podpůrnou vrstvu %(backend)s" #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "Nelze aktualizovat id přenosu svazku dodávajícího kvótu %s." #, python-format msgid "Failed to update quota for consistency group %s." msgstr "Nelze aktualizovat kvótu skupiny jednotnosti %s." #, python-format msgid "Failed to update quota for deleting volume: %s" msgstr "Nelze aktualizovat kvóty kvůli smazání svazku: %s" #, python-format msgid "Failed to update quota while deleting snapshots: %s" msgstr "Nelze aktualizovat kvóty při mazání snímků: %s" msgid "Failed to update quota while deleting volume." msgstr "Nelze aktualizovat kvóty při mazání svazku." msgid "Failed to update usages deleting backup" msgstr "Nelze aktualizovat využití při mazání zálohy" msgid "Failed to update usages deleting snapshot" msgstr "Nelze aktualizovat využití při mazání snímku." msgid "Failed to update usages deleting volume." msgstr "Nelze aktualizovat využití při mazání svazku." #, python-format msgid "Failed to update volume status: %s" msgstr "Nelze aktualizovat stav svazku: %s" #, python-format msgid "" "Failed to verify that volume was added to storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Nelze ověřit zda svazek byl přidán do skupiny úložiště pro zásadu FAST: " "%(fastPolicyName)s." msgid "Failed to write in /etc/scst.conf." msgstr "Nelze zapisovat do/etc/scst.conf." #, python-format msgid "Failed to write persistence file: %(path)s." msgstr "Nelze zapsat soubor přetrvání: %(path)s." #, python-format msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" msgstr "Nelze aktualizovat %(object_type)s %(object_id)s pomocí %(update)s" #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" "Nelze aktualizovat popisná data %(snapshot_id)s pomocí popisných dat " "zadaného svazku %(volume_id)s" #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with creation provided " "model %(model)s." msgstr "" "Nelze aktualizovat model snímku %(snapshot_id)s pomocí modelu %(model)s " "poskytnutého při vytváření." #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with driver provided model " "%(model)s." msgstr "" "Nelze aktualizovat model snímku %(snapshot_id)s pomocí ovladače poskytnutého " "modelem %(model)s." #, python-format msgid "" "Failed updating model of volume %(volume_id)s with creation provided model " "%(model)s" msgstr "" "Nelze aktualizovat model svazku %(volume_id)s pomocí modelu %(model)s " "poskytnutého při vytváření" #, python-format msgid "" "Failed updating model of volume %(volume_id)s with driver provided model " "%(model)s" msgstr "" "Nelze aktualizovat model svazku %(volume_id)s pomocí ovladače poskytnutého " "modelem %(model)s" #, python-format msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." msgstr "Aktualizace snímku %(snapshot_id)s pomocí %(update)s selhala." #, python-format msgid "" "Failed updating snapshot metadata using the provided volumes %(volume_id)s " "metadata" msgstr "" "Nelze aktualizovat popisná data snímku pomocí popisných dat zadaného svazku " "%(volume_id)s" #, python-format msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" "Aktualizace příznaku zaveditelnosti svazku %(volume_id)s na true selhala" #, python-format msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "Aktualizace svazku %(volume_id)s pomocí %(update)s selhala" #, python-format msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "Aktualizace svazku %(volume_id)s pomocí %(updates)s selhala" #, python-format msgid "Failure deleting staged tmp LUN %s." msgstr "Smazání zařazeného dočasného LUN %s selhalo." msgid "Fetch volume pool name failed." msgstr "Získání názvu zásoby svazku selhalo." #, python-format msgid "" "FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " "HBA state is Online." msgstr "" "Ověření konektoru ovladačem Fibre Channel selhalo. '%(setting)s' nenalezeno. " "Ujistěte se, že stav HBA je Online." #, python-format msgid "Flexvisor failed to get event %(volume)s (%(status)s)." msgstr "Flexvisor nemohl získat událost %(volume)s (%(status)s)." #, python-format msgid "Flexvisor failed to get pool %(id)s info." msgstr "Flexvisor nemohl získat informace o zásobě %(id)s." #, python-format msgid "Flexvisor failed to get pool list due to %s." msgstr "Flexvisor nemohl získat seznam zásob z důvodu %s." #, python-format msgid "Flexvisor failed to get pool list.(Error: %d)" msgstr "Flexvisor nemohl získat seznam zásob. (Chyba: %d)" #, python-format msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "Nalezeno %(count)s svazků mapovaných k id: %(uuid)s." msgid "Free capacity not set: volume node info collection broken." msgstr "" "Volná kapacita není nastavena: Shromažďování informací o uzlech svazku je " "nefunkční." #, python-format msgid "GPFS is not active. Detailed output: %s." msgstr "GPFS není aktivní. Podrobný výstup: %s." msgid "Get LUN migration error." msgstr "Chyba při získávání přesunu LUN." msgid "Get method error." msgstr "Chyba získávání metody." msgid "Get replication status for volume failed." msgstr "Získání stavu replikace svazku selhalo." #, python-format msgid "HDP not found: %s" msgstr "HDP nenalezeno: %s" #, python-format msgid "Host PUT failed (%s)." msgstr "PUT hostitele selhal (%s)." msgid "Host could not be found!" msgstr "Hostitel nemohl být nalezen!" #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "Pokus o zjištění ISCSI selhal pro: %s" msgid "ISE FW version is not compatible with OpenStack!" msgstr "Verze ISE FW není kompatibilní s OpenStack!" msgid "ISE globalid not set!" msgstr "ISE globalid není nastaveno!" #, python-format msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." msgstr "" "Velikost obrazu %(img_size)dGB je větší než velikost svazku %(vol_size)dGB." #, python-format msgid "Invalid API object: %s" msgstr "Neplatný objekt API: %s" #, python-format msgid "Invalid JSON: %s" msgstr "Neplatný JSON: %s" #, python-format msgid "Invalid ReplayList return: %s" msgstr "Neplatné předání seznamu zpětného načtení: %s" #, python-format msgid "Invalid hostname %(host)s" msgstr "Neplatný název hostitele %(host)s" #, python-format msgid "Invalid value for %(key)s, value is %(value)s." msgstr "Neplatná hodnota pro %(key)s, hodnota je %(value)s." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Vyvolání zavedení záložního systému, protože replikace není správně " "nastavena." #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "Chyba parametrů kódování JSON %(param)s: %(status)s." #, python-format msgid "JSON transfer data error. %s." msgstr "Chyba přesunu dat JSON: %s" #, python-format msgid "JSON transfer error: %s." msgstr "Chyba přesunu JSON: %s." #, python-format msgid "LUN %(path)s geometry failed. Message - %(msg)s" msgstr "Geometrie LUN %(path)s selhala. Zpráva - %(msg)s" msgid "LUN extend failed!" msgstr "Rozšíření LUN selhalo!" msgid "LUN unexport failed!" msgstr "Zrušení exportu LUN selhalo!" #, python-format msgid "" "Location info needed for backend enabled volume migration not in correct " "format: %s. Continuing with generic volume migration." msgstr "" "Informace o umístění potřebné pro přesun svazku za pomocí podpůrné vrstvy " "nejsou ve správném formátu: %s. Bude použita obecná metoda přesunutí svazku." #, python-format msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." msgstr "Zdá se, že zamaskování: %(maskingViewName)s bylo nedávno smazáno." #, python-format msgid "Lun %s has dependent snapshots, skipping lun deletion." msgstr "Lun %s má na sobě závislé snímky, smazání lun přeskočeno." #, python-format msgid "Lun create for %s failed!" msgstr "Vytvoření Lun pro %s selhalo!" #, python-format msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "Vytvoření snímku Lun pro svazek %(vol)s a snímek %(snap)s selhalo!" #, python-format msgid "Lun delete for %s failed!" msgstr "Smazání Lun pro %s selhalo!" #, python-format msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "Smazání snímku Lun pro svazek %(vol)s a snímek %(snap)s selhalo!" msgid "Lun mapping returned null!" msgstr "Mapování Lun vrátilo prázdný obsah!" #, python-format msgid "MSGID%(id)04d-E: %(msg)s" msgstr "MSGID%(id)04d-E: %(msg)s" #, python-format msgid "" "Manager for service %(binary)s %(host)s is reporting problems, not sending " "heartbeat. Service will appear \"down\"." msgstr "" "Správce služby %(binary)s %(host)s hlásí problémy, neodesílá informace o " "aktivitě. Služba bude vypadat \"mimo provoz\"." #, python-format msgid "" "Masking View creation or retrieval was not successful for masking view " "%(maskingViewName)s. Attempting rollback." msgstr "" "Vytvoření nebo získání zamaskování nebylo úspěšné u %(maskingViewName)s. " "Pokus o zpětné vrácení." #, python-format msgid "" "Max retries reached deleting backup %(basename)s image of volume %(volume)s." msgstr "" "Při mazání zálohy obrazu %(basename)s svazku %(volume)s bylo dosaženo " "maximálního množství pokusů." #, python-format msgid "Message: %s" msgstr "Zpráva: %s" #, python-format msgid "Migration of LUN %s failed to complete." msgstr "Přesun LUN %s nelze dokončit." msgid "Model update failed." msgstr "Aktualizace modelu selhala." #, python-format msgid "Modify volume PUT failed: %(name)s (%(status)d)." msgstr "Změna PUT svazku selhala: %(name)s (%(status)d)." #, python-format msgid "Mount failure for %(share)s after %(count)d attempts." msgstr "Selhání připojení k %(share)s po %(count)d pokusech." #, python-format msgid "Mount failure for %(share)s." msgstr "Selhání připojení pro %(share)s." #, python-format msgid "Multiple replay profiles under name %s" msgstr "Nalezeno mnoho profilů rychlého načtení s názvem %s." #, python-format msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" msgstr "Sdílení NFS %(share)s nemá žádnou položku o službě: %(svc)s -> %(hdp)s" msgid "No CLI output for firmware version check" msgstr "" "U kontroly verzi firmware nebyl žádný výstup v rozhraní příkazového řádku" #, python-format msgid "No VIP configured for service %s" msgstr "Nenastaveno žádné VIP pro službu %s" #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." msgstr "" "Není vyžadována žádná činnost. Svazek: %(volumeName)s již je součástí " "zásoby: %(pool)s." #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of slo/workload " "combination: %(targetCombination)s." msgstr "" "Není vyžadována žádná činnost. Svazek: %(volumeName)s již je součástí " "kombinace slo/vytížení: %(targetCombination)s." #, python-format msgid "No configuration found for service: %s" msgstr "Pro službu nebylo nalezeno žádné nastavení: %s" #, python-format msgid "No configuration found for service: %s." msgstr "Pro službu nebylo nalezeno žádné nastavení: %s." msgid "No more targets avaliable." msgstr "Žádné cíle nejsou dostupné." #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " "%(backing_file)s!" msgstr "" "V databázi nebyly nalezeny žádné snímky, ale %(path)s má záložní soubor " "%(backing_file)s!" #, python-format msgid "Not able to configure PBM for vCenter server: %s" msgstr "Nelze nastavit PBM pro server vCenter: %s" #, python-format msgid "OSError: command is %(cmd)s." msgstr "Chyba OS: příkaz je %(cmd)s." #, python-format msgid "OSError: command is %s." msgstr "Chyba OS: příkaz je %s." #, python-format msgid "" "One of the components of the original masking view %(maskingViewName)s " "cannot be retrieved so please contact your system administrator to check " "that the correct initiator(s) are part of masking." msgstr "" "Jedna ze součástí původního zamaskování %(maskingViewName)s nemůže být " "získána. Prosím kontaktujte svého správce systému, aby zkontroloval, že " "správné zavaděče jsou součástí zamaskování." #, python-format msgid "" "Only SLO/workload migration within the same SRP Pool is supported in this " "version The source pool : %(sourcePoolName)s does not match the target " "array: %(targetPoolName)s. Skipping storage-assisted migration." msgstr "" "V této verzi je přesun SLO/vytížení podporován pouze v rámci stejné zásoby " "SRP. Zdrojová zásoba : %(sourcePoolName)s se neshoduje s cílovým polem: " "%(targetPoolName)s. Přesun za pomocí úložiště bude přeskočen." msgid "Only available volumes can be migrated between different protocols." msgstr "Mezi různými protokoly lze přesunovat pouze dostupné svazky." #, python-format msgid "POST for host create failed (%s)!" msgstr "POST pro vytvoření hostitele selhalo (%s)!" #, python-format msgid "Pipe1 failed - %s " msgstr "Roura1 selhala - %s" #, python-format msgid "Pipe2 failed - %s " msgstr "Roura2 selhala - %s" msgid "" "Please check your xml for format or syntax errors. Please see documentation " "for more details." msgstr "" "Prosím zkontrolujte váš xml soubor, zda v něm nejsou chyby ve formátu, či " "syntaxi. Pro další informace si přečtěte dokumentaci." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "Název zásoby musí být v souboru %(fileName)s." #, python-format msgid "Prepare clone failed for %s." msgstr "Příprava klonu selhala pro %s." msgid "Primary IP must be set!" msgstr "Hlavní IP musí být nastavena!" msgid "Problem cleaning incomplete backup operations." msgstr "Při čištění nedokončených záložních operací se vyskytl problém." #, python-format msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." msgstr "" "Při čištění dočasných svazků a snímků zálohy %(bkup)s se vyskytl problém." #, python-format msgid "Problem cleaning up backup %(bkup)s." msgstr "Při čištění zálohy %(bkup)s se vyskytl problém." msgid "Promote volume replica failed." msgstr "Povýšení repliky svazku selhalo." #, python-format msgid "" "Purity host %(host_name)s is managed by Cinder but CHAP credentials could " "not be retrieved from the Cinder database." msgstr "" "Cinder spravuje hostitele Purity %(host_name)s, ale přihlašovací údaje CHAP " "nemohly být získány z databáze Cinder." #, python-format msgid "" "Purity host %(host_name)s is not managed by Cinder and can't have CHAP " "credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." msgstr "" "Cinder nespravuje hostitele Purity %(host_name)s a proto nelze měnit jeho " "přihlašovací údaje CHAP. Pro vyřešení problému odstraňte IQN %(iqn)s z " "hostitele." #, python-format msgid "Qemu-img is not installed. OSError: command is %(cmd)s." msgstr "Qemu-img není nainstalováno. Chyba OS: příkaz je %(cmd)s." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" "U %(s_pid)s překročena kvóta, pokus o rozšíření svazku o %(s_size)sG, (již " "využíváno (%(d_consumed)dG z %(d_quota)dG)." #, python-format msgid "REST Not Available: %s" msgstr "REST není dostupné: %s" #, python-format msgid "Re-throwing Exception %s" msgstr "Znovu vyvolávání výjimky %s" #, python-format msgid "Read response raised an exception: %s." msgstr "Čtení odpovědi vyvolalo výjimku: %s." msgid "Recovered model server connection!" msgstr "Obnoveno připojení modelového serveru!" #, python-format msgid "Recovering from a failed execute. Try number %s" msgstr "Obnovování ze selhaného spuštění. Pokus číslo %s" msgid "Replication must be specified as ' True' or ' False'." msgstr "Replikace musí být zadána jako ' True' nebo ' False'." msgid "" "Requested to setup thin provisioning, however current LVM version does not " "support it." msgstr "" "Žádost o nastavení mělkého poskytování, ale současná verze LVM ho " "nepodporuje." #, python-format msgid "Resizing %s failed. Cleaning volume." msgstr "Změna velikosti %s selhala. Probíhá čištění svazku." #, python-format msgid "Restore to volume %(volume)s finished with error - %(error)s." msgstr "Obnovení svazku %(volume)s bylo dokončeno s chybou - %(error)s." #, python-format msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" msgstr "Pokus proveden %(retry)skrát: %(method)s selhala %(rc)s: %(reason)s" msgid "Retype volume error." msgstr "Chyba přetypování svazku." #, python-format msgid "" "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " "Diamond, Optimized, NONE." msgstr "" "SLO: %(slo)s není platné. Platné hodnoty jsou Bronze, Silver, Gold, " "Platinum, Diamond, Optimized, NONE." msgid "" "ScVolume returned success with empty payload. Attempting to locate volume" msgstr "" "SCVolume předal zprávu o úspěchu s prázdným obsahem. Pokus o nalezení svazku" #, python-format msgid "Server Busy retry request: %s" msgstr "Server zaneprázdněn, žádost o opakování: %s" #, python-format msgid "Setting QoS for %s failed" msgstr "Nastavení QoS pro %s selhalo" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" "Sdílení %s ignorováno kvůli neplatnému formátu. Musí být ve formátu adresa:/" "export." #, python-format msgid "Sheepdog is not installed. OSError: command is %s." msgstr "Sheepdog není nainstalován. Chyba OS: příkaz je %s." #, python-format msgid "" "Skipping remove_export. No iscsi_target ispresently exported for volume: %s" msgstr "" "Odstranění exportu přeskočeno. Žádný cíl iscsi není v současnosti exportován " "pro svazek: %s" #, python-format msgid "Snapshot \"%s\" already exists." msgstr "Snímek \"%s\" již existuje." #, python-format msgid "" "Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Snímek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog vdi list\"." #, python-format msgid "Snapshot %s: create failed" msgstr "Vytvoření snímku %s selhalo" #, python-format msgid "Snapshot %s: has clones" msgstr "Snímek %s má klony" msgid "Snapshot did not exist. It will not be deleted" msgstr "Snímek neexistuje. Nebude smazán" #, python-format msgid "" "Source CG %(source_cg)s not found when creating consistency group %(cg)s " "from source." msgstr "" "Při vytváření skupiny %(cg)s ze zdroje nebyla nalezena zdrojová skupina " "jednotnosti %(source_cg)s." #, python-format msgid "Source snapshot %(snapshot_id)s cannot be found." msgstr "Zdrojový snímek %(snapshot_id)s nelze nalézt." #, python-format msgid "Source snapshot cannot be found for target volume %(volume_id)s." msgstr "Zdrojový snímek pro cílový svazek %(volume_id)s nenalezen." #, python-format msgid "Source volume %s not ready!" msgstr "Zdrojový svazek %s není připraven!" #, python-format msgid "Source volumes cannot be found for target volume %(volume_id)s." msgstr "Zdrojové svazky pro cílový svazek %(volume_id)s nenalezeny. " #, python-format msgid "" "Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Zdrojový svazek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog " "vdi list\"." #, python-format msgid "StdErr :%s" msgstr "Chybový výstup :%s" #, python-format msgid "StdOut :%s" msgstr "Standardní výstup :%s" #, python-format msgid "Storage Profile %s was not found." msgstr "Profil úložiště %s nebyl nalezen." #, python-format msgid "Storage profile: %s cannot be found in vCenter." msgstr "Profil úložiště: %s nenalezen ve vCenter." msgid "Sync volume replica failed." msgstr "Synchronizace repliky svazku selhala." #, python-format msgid "TSM [%s] not found in CloudByte storage." msgstr "TSM [%s] nenalezeno v úložišti CloudByte." #, python-format msgid "Target end points do not exist for hardware Id: %(hardwareIdInstance)s." msgstr "" "Cílové koncové body neexistují u hardwaru s id: %(hardwareIdInstance)s." msgid "The Flexvisor service is unavailable." msgstr "Služba Flexvisor je nedostupná." #, python-format msgid "The NFS Volume %(cr)s does not exist." msgstr "Svazek NFS %(cr)s neexistuje." msgid "The connector does not contain the required information." msgstr "Konektor neobsahuje požadované informace." msgid "" "The connector does not contain the required information: initiator is missing" msgstr "Konektor neobsahuje požadované informace: zavaděč chybí" msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Konektor neobsahuje požadované informace: wwpns chybí" msgid "The given extra_spec or valid_values is None." msgstr "Zadané dodatečné specifikace, nebo platné hodnoty jsou None." msgid "The list of iscsi_ip_addresses is empty" msgstr "Seznam ip adres iscsi je prázdný." msgid "The snapshot cannot be deleted because it is a clone point." msgstr "Snímek nemůže být smazán protože je to bod klonování." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s skipping storage-assisted migration." msgstr "" "Zdrojové pole : %(sourceArraySerialNumber)s se neshoduje cílovému poli: " "%(targetArraySerialNumber)s, přesun za pomocí úložiště bude přeskočen." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s, skipping storage-assisted migration." msgstr "" "Zdrojové pole : %(sourceArraySerialNumber)s se neshoduje cílovému poli: " "%(targetArraySerialNumber)s, přesun za pomocí úložiště bude přeskočen." #, python-format msgid "The source volume %(volume_id)s cannot be found." msgstr "Zdrojový svazek %(volume_id)s nelze nalézt." #, python-format msgid "The volume driver requires %(data)s in the connector." msgstr "Ovladač svazku vyžaduje %(data)s v konektoru." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Ovladač svazku vyžaduje název zavaděče iSCSI v konektoru." #, python-format msgid "There are no valid hosts available in configured cluster(s): %s." msgstr "V nastaveném clusteru nejsou dostupní žádní platní hostitele: %s." #, python-format msgid "There is no valid datastore satisfying requirements: %s." msgstr "Žádné platné datové úložiště splňující požadavky: %s." msgid "There must be at least one valid replication device configured." msgstr "Musí být zadáno alespoň jedno platné replikační zařízení." #, python-format msgid "There was an error deleting snapshot %(id)s: %(error)." msgstr "Při mazání snímku %(id)s nastala chyba: %(error)." #, python-format msgid "There was an error deleting volume %(id)s: %(error)." msgstr "Při mazání svazku %(id)s nastala chyba: %(error)." msgid "This usually means the volume was never successfully created." msgstr "To většinou znamená, že svazek nikdy nebyl úspěšně vytvořen." msgid "Tiering Policy is not supported on this array." msgstr "Zásada vrstvení není podporována na tomto poli." #, python-format msgid "Timed out deleting %s!" msgstr "Při mazání %s vypršel časový limit!" #, python-format msgid "Trying to create snapshot by non-existent LV: %s" msgstr "Pokus o vytvoření svazku z neexistujícího logického svazku. %s" #, python-format msgid "URLError: %s" msgstr "Chyba URL: %s" #, python-format msgid "Unable to create folder path %s" msgstr "Nelze vytvořit cestu složky %s" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Nelze vytvořit nebo získat výchozí skupinu úložiště pro zásadu FAST: " "%(fastPolicyName)s." #, python-format msgid "Unable to create volume %s from replay" msgstr "Nelze vytvořit svazek %s ze zpětného načtení" #, python-format msgid "Unable to create volume. Volume driver %s not initialized" msgstr "Nelze vytvořit svazek. Ovladač svazku %s není zaveden" msgid "Unable to delete busy volume." msgstr "Nelze smazat zaneprázdněný svazek." #, python-format msgid "Unable to delete due to existing snapshot for volume: %s" msgstr "Nelze smazat kvůli existujícímu snímku svazku. %s" msgid "" "Unable to delete the destination volume during volume migration, (NOTE: " "database record needs to be deleted)." msgstr "" "Nelze smazat cílový svazek během přesunování, (POZNÁMKA: záznam v databázi " "je třeba smazat)." #, python-format msgid "Unable to determine whether %(volumeName)s is composite or not." msgstr "Nelze zjistit zda %(volumeName)s je složený nebo ne." msgid "Unable to find FC initiators" msgstr "Nelze najít zavaděče FC" #, python-format msgid "Unable to find VG: %s" msgstr "Nelze najít skupinu svazku: %s" #, python-format msgid "Unable to find controller port iscsi configuration: %s" msgstr "Nelze najít port kontroléru pro nastavení iscsi: %s" #, python-format msgid "Unable to find controller port: %s" msgstr "Nelze najít port kontroléru: %s" #, python-format msgid "" "Unable to find default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "Nelze najít výchozí skupinu úložiště pro zásadu FAST : %(fastPolicyName)s." #, python-format msgid "Unable to find service: %(service)s for given host: %(host)s." msgstr "Nelze najít službu: %(service)s pro daného hostitele: %(host)s." msgid "Unable to get associated pool of volume." msgstr "Nelze získat přidruženou zásobu svazku." #, python-format msgid "Unable to get default storage group %(defaultSgName)s." msgstr "Nelze získat výchozí skupinu úložiště %(defaultSgName)s." msgid "Unable to get device mapping from network." msgstr "Nelze získat mapování zařízení ze sítě." #, python-format msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." msgstr "Nelze získat pravidlo zásady pro zásadu FAST: %(fastPolicyName)s." #, python-format msgid "Unable to locate Volume Group %s" msgstr "Nelze najít skupinu svazku %s" #, python-format msgid "Unable to locate snapshot %s" msgstr "Nelze nalézt snímek %s" #, python-format msgid "Unable to manage existing snapshot. Volume driver %s not initialized." msgstr "Nelze spravovat existující snímek. Ovladač svazku %s není zaveden." #, python-format msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "Nelze spravovat existující svazek. Ovladač svazku %s není zaveden." #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "Nelze mapovat %(vol)s do %(srv)s" #, python-format msgid "Unable to rename lun %s on array." msgstr "Nelze přejmenovat lun %s v poli." #, python-format msgid "Unable to rename the logical volume for volume %s." msgstr "Nelze přejmenovat logický svazek ve svazku %s." #, python-format msgid "Unable to rename the logical volume for volume: %s" msgstr "Nelze přejmenovat logický svazek ve svazku: %s" #, python-format msgid "Unable to retrieve VolumeConfiguration: %s" msgstr "Nelze získat nastavení svazku: %s" #, python-format msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." msgstr "Nelze získat instanci zásoby %(poolName)s v poli %(array)s." #, python-format msgid "Unable to terminate volume connection: %(err)s." msgstr "Nelze ukončit připojení ke svazku: %(err)s." #, python-format msgid "Unable to unmap Volume %s" msgstr "Nelze zrušit mapování svazku %s" msgid "Unexpected build error:" msgstr "Neočekávaná chyba při sestavování:" msgid "Unexpected error occurs in horcm." msgstr "V horcm se vyskytly neočekávané chyby." msgid "Unexpected error occurs in snm2." msgstr "V snm2 se vyskytly neočekávané chyby." #, python-format msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" msgstr "" "Neočekávaná chyba když se vracel pokus přetypování o smazání sady svazků (%s)" #, python-format msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" msgstr "" "Neočekávaná chyba když přetypování se pokusilo o smazání sady svazků (%s)" #, python-format msgid "Unexpected error while invoking web service. Error - %s." msgstr "" "Při volání internetové služby se objevila neočekávaná chyba. Chyba - %s." #, python-format msgid "Unknown exception in post clone resize LUN %s." msgstr "Neznámá výjimka při zvětšení LUN %s po klonování." #, python-format msgid "Unrecognized Login Response: %s" msgstr "Nerozpoznaná odpověď přihlášení: %s" #, python-format msgid "" "Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." msgstr "" "Aktualizace skupiny jednotnosti nemohla přidat svazek %(volume_id)s: Svazek " "nenalezen." #, python-format msgid "" "Update consistency group failed to remove volume-%(volume_id)s: " "VolumeNotFound." msgstr "" "Aktualizace skupiny jednotnosti nemohla odstranit svazek %(volume_id)s: " "Svazek nenalezen." msgid "Update snapshot usages failed." msgstr "Aktualizace využití snímku selhala." msgid "Update volume model for transfer operation failed." msgstr "Aktualizace modelu svazku pro operaci přenosu selhala." #, python-format msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." msgstr "" "Při nahrávání svazku do obrazu nastala chyba (id obrazu: %(image_id)s)." msgid "User does not have permission to change Storage Profile selection." msgstr "Uživatel nemá oprávnění por změnu výběru profilu úložiště." msgid "VGC-CLUSTER command blocked and cancelled." msgstr "Příkaz clusteru VGC zablokován a zrušen." #, python-format msgid "Version string '%s' is not parseable" msgstr "Řetězec verze '%s' nelze zpracovat" #, python-format msgid "Virtual Volume Set %s does not exist." msgstr "Sada virtuálního svazku %s neexistuje." #, python-format msgid "Virtual disk device of backing: %s not found." msgstr "Zařízení zálohy virtuálního disku. %s nenalezeno." #, python-format msgid "Vol copy job status %s." msgstr "Stav úkolu kopírování svazku: %s." #, python-format msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Svazek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog vdi list\"." #, python-format msgid "" "Volume %(name)s is not suitable for storage assisted migration using retype." msgstr "" "Svazek %(name)s není vhodný pro přesun využitím úložiště pomocí přetypování." #, python-format msgid "Volume %(name)s not found on the array." msgstr "Svazek %(name)s nenalezen v poli." #, python-format msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "Svazek %(name)s nenalezen v poli. Žádný svazek ke smazání." #, python-format msgid "" "Volume %(name)s not found on the array. No volume to migrate using retype." msgstr "" "Svazek %(name)s nenalezen v poli. Žádný svazek pro přesun pomocí přetypování." #, python-format msgid "" "Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " "%(output)s" msgstr "" "Svazek %(volumeid)s nemohl odeslat příkaz pro přidělení, vráceno: " "%(status)s, výstup: %(output)s" #, python-format msgid "Volume %s doesn't exist on array." msgstr "Svazek %s neexistuje v poli." #, python-format msgid "Volume %s, not found on SF Cluster." msgstr "Svazek %s nebyl nalezen v clusteru SF." #, python-format msgid "Volume %s: create failed" msgstr "Vytvoření svazku %s selhalo" #, python-format msgid "" "Volume %s: driver error when trying to retype, falling back to generic " "mechanism." msgstr "" "Svazek %s: chyba ovladače při pokusu o přetypování, bude použit obecný " "mechanismus." #, python-format msgid "Volume %s: manage failed." msgstr "Správa svazku %s selhala." #, python-format msgid "Volume %s: rescheduling failed" msgstr "Znovu naplánování svazku %s selhalo" #, python-format msgid "Volume %s: update volume state failed." msgstr "Aktualizace stavu svazku %s selhala." #, python-format msgid "" "Volume : %(volumeName)s has not been added to target storage group " "%(storageGroup)s." msgstr "" "Svazek : %(volumeName)s nebyl přidán do cílové skupiny úložiště " "%(storageGroup)s." #, python-format msgid "" "Volume : %(volumeName)s has not been removed from source storage group " "%(storageGroup)s." msgstr "" "Svazek : %(volumeName)s nebyl odstraněn ze zdrojové skupiny úložiště " "%(storageGroup)s." #, python-format msgid "" "Volume : %(volumeName)s. was not successfully migrated to target pool " "%(targetPoolName)s." msgstr "" "Svazek : %(volumeName)s nebyl úspěšně přesunut do cílové zásoby " "%(targetPoolName)s." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "accept_transfer operation!" msgstr "" "Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " "operace přijetí přenosu!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "attach_volume operation!" msgstr "" "Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " "operace připojení svazku!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "delete_volume operation!" msgstr "" "Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " "operace smazání svazku!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "detach_volume operation!" msgstr "" "Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " "operace odpojení svazku!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "extend_volume operation!" msgstr "" "Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " "operace rozšíření svazku!" #, python-format msgid "" "Volume ID %s was not found on the zfssa device while attempting " "delete_volume operation." msgstr "" "Svazek s ID %s nebyl nalezen na zařízení zfssa při pokusu o operaci smazání " "svazku." #, python-format msgid "Volume already exists. %s" msgstr "Svazek již existuje. %s" msgid "Volume appears unmapped" msgstr "Svazek se nezdá být mapován" msgid "Volume did not exist. It will not be deleted" msgstr "Svazek neexistuje. Nebude smazán" #, python-format msgid "Volume driver %s not initialized" msgstr "Ovladač svazku %s není zaveden" msgid "Volume in unexpected state" msgstr "Svazek je v neočekávaném stavu" #, python-format msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "Svazek v neočekávaném stavu %s, očekáván awaiting-transfer" #, python-format msgid "Volume migration failed due to exception: %(reason)s." msgstr "Přesun svazku selhal kvůli výjimce: %(reason)s." msgid "Volume must be detached for clone operation." msgstr "Svazek musí být pro operaci klonování odpojen." #, python-format msgid "Volume size \"%sG\" is too large." msgstr "Velikost svazku \"%sG\" je příliš velká." #, python-format msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "Mazání typu svazku %s selhalo, typ je používán." #, python-format msgid "" "WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " "attempt %(retry)s in progress." msgstr "" "Operace WebDAV selhala s chybovým kódem: %(code)s, důvod: %(reason)s. " "Probíhá pokus %(retry)s." #, python-format msgid "WebDAV returned with %(code)s error during %(method)s call." msgstr "WebDAV předal chybu %(code)s během volání %(method)s." #, python-format msgid "" "Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " "OLTP_REP, NONE." msgstr "" "Vytížení: %(workload)s není platné. Platné hodnoty jsou DSS_REP, DSS, OLTP, " "OLTP_REP, NONE." msgid "_find_mappings: volume is not active" msgstr "Nalezení mapování: svazek není aktivní" #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " "operation: orig=%(orig)s new=%(new)s." msgstr "" "Odstranění operace kopírování virtuálního disku: Svazek %(vol)s nemá zadanou " "operaci kopírování: původní=%(orig)s, nové=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "Odstranění operace kopírování virtuálního disku: Popisná data svazku %(vol)s " "nemají zadanou operaci kopírování: původní=%(orig)s, nové=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " "operations." msgstr "" "Odstranění operace kopírování virtuálního disku: Svazek %s nemají žádné " "registrované operace kopírování virtuálního disku." #, python-format msgid "" "_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " "copy operations." msgstr "" "Odstranění operace kopírování virtuálního disku: Popisná data svazku %s " "nemají žádné registrované operace kopírování virtuálního disku." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " "%(host_name)s found." msgstr "" "Zrušení mapování virtuálního disku na hostiteli: Nenalezeno žádné mapování " "svazku %(vol_name)s k hostiteli %(host_name)s." #, python-format msgid "_wait_for_job_complete failed after %(retries)d tries." msgstr "Čekání na dokončení úkolu selhalo po %(retries)d pokusech." #, python-format msgid "_wait_for_sync failed after %(retries)d tries." msgstr "Čekání na synchronizaci selhalo po %(retries)d pokusech." #, python-format msgid "" "backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "záloha: %(vol_id)s nemohl odstranit pevný odkaz na zálohu z %(vpath)s do " "%(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s" #, python-format msgid "can't create 2 volumes with the same name, %s" msgstr "Nelze vytvořit 2 svazky se stejným názvem, %s" msgid "cinder-rtstool is not installed correctly" msgstr "cinder-rtstool není správně nainstalováno" #, python-format msgid "" "delete: %(vol_id)s failed with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "smazání: %(vol_id)s selhalo, standardní výstup: %(out)s.\n" "chybový výstup: %(err)s." msgid "delete_vol: provider location empty." msgstr "Smazání svazku: Umístění poskytovatele je prázdné." #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "Zajištění exportu: Svazek %s nebyl nalezen v úložišti." #, python-format msgid "error opening rbd image %s" msgstr "chyba při otevírání obrazu rbd %s" msgid "error refreshing volume stats" msgstr "Při obnově statistik svazku došlo k chybě" msgid "horcm command timeout." msgstr "Příkazu horcm vypršel časový limit." #, python-format msgid "iSCSI portal not found for service: %s" msgstr "Nenalezen žádný portál iSCSI pro službu: %s" #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s." msgstr "" "Zavedení spojení: Shromáždění vlastností pro návrat svazku %(vol)s a " "konektoru %(conn)s selhalo." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s.\n" msgstr "" "Zavedení spojení: Shromáždění vlastností pro návrat svazku %(vol)s a " "konektoru %(conn)s selhalo.\n" msgid "iscsi_ip_address must be set!" msgstr "iscsi_ip_address musí být nastavena!" msgid "manage_existing: No source-name in ref!" msgstr "Správa existujících: V odkazu není název zdroje!" #, python-format msgid "manage_existing_get_size: %s does not exist!" msgstr "Správa existujících: Získání velikosti:%s neexistuje!" msgid "manage_existing_get_size: No source-name in ref!" msgstr "Správa existujících: Získání velikosti: V odkazu není název zdroje!" msgid "model server went away" msgstr "modelový server je nedostupný" #, python-format msgid "modify volume: %s does not exist!" msgstr "změna svazku: %s neexistuje!" msgid "san ip must be configured!" msgstr "san ip musí být nastaveno!" msgid "san_login must be configured!" msgstr "san_login musí být nastaveno!" msgid "san_password must be configured!" msgstr "san_password musí být nastaveno!" #, python-format msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" "Povolen režim ověření pomocí jednoho uživatele, ale %(param)s není nastaveno" msgid "snm2 command timeout." msgstr "Příkazu snm2 vypršel časový limit." msgid "" "storwize_svc_multihostmap_enabled is set to False, not allowing multi host " "mapping." msgstr "" "storwize_svc_multihostmap_enabled je nastaven na False, neumožňující " "mapování více hostitelů." #, python-format msgid "unmanage: Volume %s does not exist!" msgstr "Zrušení správy: Svazek %s neexistuje!" msgid "" "zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " "for backend enabled volume migration. Continuing with generic volume " "migration." msgstr "" "zfssa_replication_ip není nastaveno v cinder.conf. zfssa_replication_ip not " "je potřebné pro přesun svazku za pomoci podpůrné vrstvy. Bude použita obecná " "metoda přesunutí svazku." cinder-8.0.0/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po0000664000567000056710000017205412701406250025055 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # Zbyněk Schwarz , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-12-22 01:37+0000\n" "Last-Translator: Zbyněk Schwarz \n" "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Czech\n" #, python-format msgid "%(path)s is being set with open permissions: %(perm)s" msgstr "%(path)s je nastavována s volnými oprávněními: %(perm)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s is already mounted" msgstr "%s již je připojeno" #, python-format msgid "%s not found!" msgstr "%s nenalezeno!" msgid "" "'hpe3par:cpg' is not supported as an extra spec in a volume type. CPG's are " "chosen by the cinder scheduler, as a pool, from the cinder.conf entry " "'hpe3par_cpg', which can be a list of CPGs." msgstr "" "'hpe3par:cpg' není podporováno jako dodatečná specifikace v typu svazku. " "Společné skupiny poskytování jsou voleny plánovačem cinder, jako zásoba, z " "položky 'hpe3par_cpg' v cinder.conf, který může být seznamem těchto skupin." #, python-format msgid "3PAR vlun for volume %(name)s not found on host %(host)s" msgstr "3PAR vlun pro svazek %(name)s nenalezen v hostiteli %(host)s" #, python-format msgid "AttachSnapTask.revert: detach mount point %s" msgstr "Vrácení úkolu připojení snímku: odpojení bodu připojení %s" msgid "Attempted to delete a space that's not there." msgstr "Pokus smazat prostor který neexistuje." #, python-format msgid "" "Attempting a rollback of: %(volumeName)s to original pool " "%(sourcePoolInstanceName)s." msgstr "" "Pokus o zpětné vrácení %(volumeName)s do původní zásoby " "%(sourcePoolInstanceName)s." msgid "Attempting recreate of backing lun..." msgstr "Pokus o znovuvytvoření záložního lun..." #, python-format msgid "" "Availability zone '%(s_az)s' not found, falling back to '%(s_fallback_az)s'." msgstr "" "Zóna dostupnosti '%(s_az)s' nebyla nalezena, bude použita záložní " "'%(s_fallback_az)s'." #, python-format msgid "Availability zone '%s' is invalid" msgstr "Zóna dostupnosti '%s' je neplatná" #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping verify." msgstr "" "Zálohovací služba %(service)s nepodporuje ověřování. Záloha s id %(id)s není " "ověřena. Ověřování přeskočeno." msgid "" "Both 'storagetype:prvosioning' and 'provisioning:type' are set in the extra " "specs, the value of 'provisioning:type' will be used. The key 'storagetype:" "provisioning' may be deprecated in the next release." msgstr "" "V dodatečných specifikacích jsou zadány 'storagetype:prvosioning' a " "'provisioning:type', bude použita hodnota z 'provisioning:type'. Klíč " "'storagetype:provisioning' může být v příští verzi zastaralý." #, python-format msgid "CG %(cg_name)s does not exist. Message: %(msg)s" msgstr "Skupina jednotnosti %(cg_name)s neexistuje. Zpráva: %(msg)s" #, python-format msgid "CG %(cg_name)s is deleting. Message: %(msg)s" msgstr "Probíhá mazání skupiny jednotnosti %(cg_name)s. Zpráva: %(msg)s" #, python-format msgid "CHAP is enabled, but server secret not configured on server %s" msgstr "CHAP je povolen, ale soukromý klíč serveru není nastaven na serveru %s" #, python-format msgid "CHAP secret exists for host %s but CHAP is disabled" msgstr "Soukromý klíč CHAP pro hostitele %s existuje, ale CHAP je zakázán." msgid "CHAP secret exists for host but CHAP is disabled." msgstr "Soukromý klíč CHAP pro hostitele existuje, ale CHAP je zakázán." msgid "Can't find lun on the array." msgstr "Nelze najít lun v poli." msgid "Can't find snapshot on the array." msgstr "Nelze najít snímek v poli." msgid "Can't find target iqn from rest." msgstr "Nelze najít cílové iqn z REST." msgid "Cannot determine the hardware type." msgstr "Nelze zjistit typ hardwaru." #, python-format msgid "Cannot get volume status %(exc)s." msgstr "Nelze získat stav svazku %(exc)s." #, python-format msgid "" "Cannot undo volume rename; old name was %(old_name)s and new name is " "%(new_name)s." msgstr "" "Nelze vrátit přejmenování svazku zpět; starý název byl %(old_name)s a nový " "název je %(new_name)s." #, python-format msgid "Cgsnapshot name %(name)s already exists. Message: %(msg)s" msgstr "Snímek skupiny jednotnosti %(name)s již existuje. Zpráva: %(msg)s" #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "Změna využití se sníží na méně než 0 pro následující zdroje: %s" #, python-format msgid "" "Changing backing: %(backing)s name from %(new_name)s to %(old_name)s failed." msgstr "Změna názvu zálohy %(backing)s z %(new_name)s na %(old_name)s selhala." #, python-format msgid "" "Clone failed on V3. Cleaning up the target volume. Clone name: %(cloneName)s " msgstr "" "Klonování selhalo ve V3. Čištění cílového svazku. Název klona: %(cloneName)s " msgid "" "Configuration option eqlx_cli_timeout is deprecated and will be removed in M " "release. Use ssh_conn_timeout instead." msgstr "" "Volba nastavení eqlx_cli_timeout je zastaralá a bude odstraněna ve verzi M. " "Místo toho používejte ssh_conn_timeout." msgid "" "Configuration options eqlx_use_chap, eqlx_chap_login and eqlx_chap_password " "are deprecated. Use use_chap_auth, chap_username and chap_password " "respectively for the same." msgstr "" "Volby nastavení eqlx_use_chap, eqlx_chap_login a eqlx_chap_password jsou " "zastaralé. Byly nahrazeny volbami use_chap_auth, chap_username a " "chap_password respectively se stejnou funkcí." #, python-format msgid "Consistency group %(name)s already exists. Message: %(msg)s" msgstr "Skupina jednotnosti %(name)s již existuje. Zpráva: %(msg)s" #, python-format msgid "" "CopySnapshotTask.revert: delete the copied snapshot %(new_name)s of " "%(source_name)s." msgstr "" "Vrácení úkolu kopírování snímku: Mazání kopírovaného snímku %(new_name)s z " "%(source_name)s." #, python-format msgid "Could not create target because it already exists for volume: %s" msgstr "Nelze vytvořit cíl protože již ve svazku existuje: %s" #, python-format msgid "Could not determine root volume name on %s." msgstr "Nelze zjistit název kořenového svazku v %s." #, python-format msgid "Could not get pool information (%s)!" msgstr "Nelze získat informace o zásobě (%s)!" #, python-format msgid "Could not get status for %(name)s (%(status)d)." msgstr "Nelze získat stav %(name)s (%(status)d)." #, python-format msgid "CreateDestLunTask.revert: delete temp lun %s" msgstr "Vrácení úkolu vytvoření cílového LUN: Mazání dočasného lun %s" #, python-format msgid "CreateSMPTask.revert: delete mount point %s" msgstr "Vrácení vytvoření úkolu SMPT: Smazání bodu připojení %s" #, python-format msgid "CreateSnapshotTask.revert: delete temp cgsnapshot %s" msgstr "" "Vrácení úkolu vytvoření snímku: Mazání dočasného snímku skupiny jednotnosti " "%s" #, python-format msgid "CreateSnapshotTask.revert: delete temp snapshot %s" msgstr "Vrácení úkolu vytvoření snímku: Mazání dočasného snímku %s" #, python-format msgid "" "CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" "%(ret)s." msgstr "" "Vytvoření ID hardwaru úložiště selhalo. Zavaděč %(initiator)s, rc=%(rc)d, " "ret=%(ret)s." #, python-format msgid "DELETE call failed for %s!" msgstr "Volání smazání selhalo u %s!" #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "" "Při provádění '%(func_name)s' zjištěno zablokování. Bude proveden nový " "pokus..." #, python-format msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" "ID mazaného snímku nenalezeno. Odstraňování z cinder: %(id)s Výjimka: %(msg)s" #, python-format msgid "Delete temp LUN after migration start failed. LUN: %s" msgstr "Smazání dočasného LUN po zahájení přesunu selhalo. LUN: %s" #, python-format msgid "" "Delete the temporary cgsnapshot %(name)s failed. This temporary cgsnapshot " "can be deleted manually. Message: %(msg)s" msgstr "" "Smazání dočasného snímku skupiny jednotnosti %(name)s selhalo. Tento snímek " "lze smazat ručně. Zpráva: %(msg)s." #, python-format msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" "ID mazanéhosvazku nenalezeno. Odstraňování z cinder: %(id)s Výjimka: %(msg)s" #, python-format msgid "Deleting image in unexpected status: %(image_status)s." msgstr "Mazání obrazu v neočekávaném stavu: %(image_status)s." #, python-format msgid "Destination %s already exists." msgstr "Cíl %s již existuje." msgid "" "Destination volume type is different from source volume type for an " "encrypted volume. Encrypted backup restore has failed." msgstr "" "Cílový typ svazku se liší od zdrojového typu zašifrovaného svazku. Obnovení " "zašifrované zálohy selhalo." msgid "Detected snapshot stuck in creating status, setting to ERROR." msgstr "Objeven snímek zaseknutý ve stavu creating, nastavování na ERROR." #, python-format msgid "Detected volume stuck in %(curr_status)s status, setting to ERROR." msgstr "" "Objeven svazek zaseknutý ve stavu %(curr_status)s, nastavování na ERROR." #, python-format msgid "" "Didn't get the pool information of the host %s. Storage assisted Migration " "is not supported. The host may be using a legacy driver." msgstr "" "Z hostitele %s nebyly získány informace o zásobě. Přesun za pomocí úložiště " "není podporován. Hostitel možná používá zastaralý ovladač." msgid "Discover file retries exhausted." msgstr "Vyčerpány pokusy o zjištění souboru." msgid "Driver didn't return connection info from terminate_connection call." msgstr "Ovladač nevrátil informace o připojení z volání o ukončení připojení." msgid "Driver didn't return connection info, can't add zone." msgstr "Ovladač nevrátil informace o připojení, nelze přidat zónu." #, python-format msgid "" "Driver path %s is deprecated, update your configuration to the new path." msgstr "" "Cesta ovladače %s je zastaralá, aktualizujte svá nastavení na novou cestu." #, python-format msgid "Error encountered translating config_string: %(config_string)s to dict" msgstr "" "Při překladu řetězce nastavení se objevila chyba: %(config_string)s do dict" #, python-format msgid "Error finding LUNs for volume %s. Verify volume exists." msgstr "Chyba při hledání LUN pro svazek %s. Ověřování zda svazek existuje." #, python-format msgid "" "Error in filtering function '%(function)s' : '%(error)s' :: failing host" msgstr "" "Chyba ve funkce filtrování '%(function)s' : '%(error)s' :: hostitel selhává" #, python-format msgid "" "Error in goodness_function function '%(function)s' : '%(error)s' :: " "Defaulting to a goodness of 0" msgstr "" "Chyba ve funkcí Goodness '%(function)s' : '%(error)s' :: Je použita její " "minimální hodnota 0" #, python-format msgid "Error mapping LUN. Code :%(code)s, Message: %(message)s" msgstr "Chyba při mapování LUN. Kód: %(code)s, zpráva: %(message)s" #, python-format msgid "Error occurred while deleting backing: %s." msgstr "Při mazání zálohy nastala chyba: %s." #, python-format msgid "Error occurred while deleting descriptor: %s." msgstr "Při mazání popisovače nastala chyba: %s." #, python-format msgid "Error occurred while deleting temporary disk: %s." msgstr "Při mazání dočasného disku nastala chyba: %s." msgid "Error on parsing target_pool_name/target_array_serial." msgstr "Chyba při zpracování názvu cílové zásoby a pole." #, python-format msgid "Error refreshing volume info. Message: %s" msgstr "Chyba při obnovování informaci o svazku. Zpráva: %s" #, python-format msgid "Error running SSH command: %s" msgstr "Chyba při provádění příkazu SSH: %s" #, python-format msgid "Error unmapping LUN. Code :%(code)s, Message: %(message)s" msgstr "Chyba při rušení mapování LUN. Kód: %(code)s, zpráva: %(message)s" #, python-format msgid "Error updating agent-type for volume %s." msgstr "Chyba při aktualizaci typu agentu ve svazku %s." msgid "Error while listing objects, continuing with delete." msgstr "Chyba při vypisování objektů, pokračuje se ve smazání." #, python-format msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" msgstr "Výjimka při čištění mezipaměti %(share)s. Zpráva - %(ex)s" #, python-format msgid "Exception during deleting %s" msgstr "Během mazání %s nastala výjimka" #, python-format msgid "Exception during mounting %s" msgstr "Při připojování %s došlo k výjimce" #, python-format msgid "Exception during unmounting %s" msgstr "Při odpojování nastala výjimka %s" #, python-format msgid "Exception moving file %(src)s. Message - %(e)s" msgstr "Výjimka při přesunování souboru %(src)s. Zpráva - %(e)s" #, python-format msgid "Exception moving file %(src)s. Message: %(e)s" msgstr "Výjimka při přesunování souboru %(src)s. Zpráva: %(e)s" #, python-format msgid "" "Exception while creating image %(image_id)s snapshot. Exception: %(exc)s" msgstr "Při vytváření snímku obrazu %(image_id)s nastala výjimka: %(exc)s" #, python-format msgid "" "Exception while registering image %(image_id)s in cache. Exception: %(exc)s" msgstr "Výjimka při registraci obrazu %(image_id)s v mezipaměti: %(exc)s" #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "" "Rozšíření %(ext_name)s: nelze rozšířit zdroj %(collection)s: Žádný takový " "zdroj" #, python-format msgid "Extra spec %(old)s is deprecated. Use %(new)s instead." msgstr "" "Dodatečná specifikace %(old)s je zastaralá. Místo toho použijte %(new)s." #, python-format msgid "Extra spec %(old)s is obsolete. Use %(new)s instead." msgstr "Dodatečná specifikace %(old)s je zrušena. Místo toho použijte %(new)s." msgid "" "Extra spec key 'storagetype:pool' is obsoleted since driver version 5.1.0. " "This key will be ignored." msgstr "" "Klíč dodatečné specifikace 'storagetype:pool' je zastaralý od verze ovladače " "5.1.0. Tento klíč bude ignorován." msgid "" "Extra spec key 'storagetype:provisioning' may be deprecated in the next " "release. It is recommended to use extra spec key 'provisioning:type' instead." msgstr "" "Klíč dodatečné specifikace 'storagetype:provisioning' může být v příští " "verzi zastaralý. Doporučuje se místo toho použít klíč 'provisioning:type'." #, python-format msgid "FAST is enabled. Policy: %(fastPolicyName)s." msgstr "FAST je povoleno. Zásada: %(fastPolicyName)s." #, python-format msgid "Fail to connect host %(host)s back to storage group %(sg)s." msgstr "Nelze připojit hostitele %(host)s zpět do skupiny úložiště %(sg)s." #, python-format msgid "" "Failed target removal because target or ACL's couldn't be found for iqn: %s." msgstr "" "Odstranění cíle selhalo protože cíl nebo ACL nemohly být v iqn nalezeny: %s." #, python-format msgid "" "Failed terminating the connection of volume %(volume_id)s, but it is " "acceptable." msgstr "" "Ukončení spojení se svazkem %(volume_id)s selhalo, selhání je přijatelné." #, python-format msgid "Failed to activate volume copy throttling: %(err)s" msgstr "Nelze aktivovat přiškrcení kopie svazku: %(err)s" #, python-format msgid "Failed to add host group: %s" msgstr "Nelze přidat skupinu hostitele: %s" #, python-format msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "Přidružení specifikace qos %(id)s k typu: %(vol_type_id)s selhalo" #, python-format msgid "Failed to create new image-volume cache entry. Error: %(exception)s" msgstr "" "Nelze vytvořit novou položku obrazu-svazku v mezipaměti. Výjimka: " "%(exception)s" #, python-format msgid "Failed to create pair: %s" msgstr "Nelze vytvořit pár: %s" #, python-format msgid "" "Failed to create volume from image-volume cache, will fall back to default " "behavior. Error: %(exception)s" msgstr "" "Nelze vytvořit svazek z mezipaměti obrazu-svazku, bude použito výchozí " "chování. Chyba: %(exception)s" #, python-format msgid "Failed to deregister %(itor)s because: %(msg)s." msgstr "Nelze zrušit registraci %(itor)s protože: %(msg)s." #, python-format msgid "Failed to destroy Storage Group %s." msgstr "Nelze zničit skupinu úložiště %s." #, python-format msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "Odloučení specifikace qos %(id)s od typu: %(vol_type_id)s selhalo" #, python-format msgid "Failed to disassociate qos specs %s." msgstr "Odloučení specifikace qos %s selhalo." #, python-format msgid "Failed to discard zero page: %s" msgstr "Nelze zahodit nulovou stránku: %s" #, python-format msgid "Failed to extract initiators of %s, so ignore deregistration operation." msgstr "" "Nelze extrahovat zavaděče %s, operace pro zrušení registrace je ignorována." msgid "Failed to get Raid Snapshot ID and did not store in snapshot." msgstr "Nelze získat ID Raid snímku a ve snímku nebylo uloženo." msgid "Failed to get target pool id." msgstr "Nelze získat id cílové zásoby." msgid "" "Failed to get target_pool_name and target_array_serial. 'location_info' is " "not in host['capabilities']." msgstr "" "Nelze získat název cílové zásoby a pole. 'location_info' není v " "hostiteli['capabilities']." #, python-format msgid "Failed to invoke ems. Message : %s" msgstr "Nelze zavolat ems. Zpráva: %s" #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" #, python-format msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "Nelze načít rozšíření %(ext_factory)s: %(exc)s" #, python-format msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" #, python-format msgid "Failed to manage virtual volume %(disp)s due to error during retype." msgstr "" "Nelze spravovat virtuální svazek %(disp)s kvůli chybě během přetypování." #, python-format msgid "" "Failed to migrate volume. The destination volume %(vol)s is not deleted " "since the source volume may have been deleted." msgstr "" "Nelze přesunout svazek. Cílový svazek %(vol)s nebyl smazán protože zdrojový " "svazek mohl být smazán." #, python-format msgid "" "Failed to migrate: %(volumeName)s from default source storage group for FAST " "policy: %(sourceFastPolicyName)s. Attempting cleanup... " msgstr "" "Nelze přesunout: %(volumeName)s z výchozí skupiny zdrojového úložiště pro " "zásadu FAST %(sourceFastPolicyName)s. Pokus o vyčištění..." #, python-format msgid "Failed to query pool %(id)s status %(ret)d." msgstr "Dotaz na stav %(ret)d zásoby %(id)s selhal." #, python-format msgid "Failed to refresh mounts, reason=%s" msgstr "Nelze obnovit připojení, důvod=%s" #, python-format msgid "" "Failed to register %(itor)s to SP%(sp)s port %(portid)s because: %(msg)s." msgstr "" "Nelze registrovat %(itor)s do SP%(sp)s port %(portid)s protože: %(msg)s." #, python-format msgid "Failed to restart horcm: %s" msgstr "Nelze restartovat horcm: %s" #, python-format msgid "Failed to run command: %s." msgstr "Nelze provést příkaz: %s." #, python-format msgid "Failed to run lsguicapability. Exception: %s." msgstr "Nelze spustit lsguicapability. Výjimka: %s." #, python-format msgid "" "Failed to save iscsi LIO configuration when modifying volume id: %(vol_id)s." msgstr "Nelze uložit nastavení iscsi LIO při měnění id svazku %(vol_id)s." #, python-format msgid "Failed to setup blkio cgroup to throttle the device '%(device)s'." msgstr "" "Nelze nastavit kontrolní skupinu vstupu/výstupu bloku pro škrcení zařízení " "'%(device)s'." #, python-format msgid "Failed to unmount previous mount: %s" msgstr "Nelze odpojit předchozí připojení: %s" #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target. %(conf)s does not exist." msgstr "" "Nelze aktualizovat %(conf)s pro svazek s id %(vol_id)s po odstranění cíle " "iscsi. %(conf)s neexistuje." #, python-format msgid "Failure deleting job %s." msgstr "Nelze smazat úkol %s." #, python-format msgid "Failure deleting temp snapshot %s." msgstr "Mazání dočasného snímku %s selhalo." #, python-format msgid "Failure deleting the snapshot %(snapshot_id)s of volume %(volume_id)s." msgstr "Mazání snímku %(snapshot_id)s svazku %(volume_id)s selhalo." msgid "" "Fallocate not supported by current version of glusterfs. So falling back to " "dd." msgstr "" "Fallocate není podporováno současnou verzí glusterfs. Přechází se zpět na dd." #, python-format msgid "" "Flexvisor failed to delete volume %(id)s from group %(vgid)s due to " "%(status)s." msgstr "" "Flexvisor nemohl smazat svazek %(id)s ze skupiny %(vgid)s z důvodu%(status)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s from the group %(vgid)s." msgstr "Flexvisor nemohl smazat svazek %(id)s ze skupiny %(vgid)s." #, python-format msgid "" "Found invalid iSCSI IP address(s) in configuration option(s) " "hpe3par_iscsi_ips or iscsi_ip_address '%s.'" msgstr "" "Ve volbách nastavení hpe3par_iscsi_ips nebo iscsi_ip_address nalezeny " "neplatné IP adresy iSCSI: '%s'." msgid "Goodness function not set :: defaulting to minimal goodness rating of 0" msgstr "Funkce Goodness není nastavena :: je použita její minimální hodnota 0" #, python-format msgid "Got disconnected; trying to reconnect. (%s)" msgstr "Došlo k odpojení; pokus o znovu připojení. (%s)" #, python-format msgid "" "Group sync name not found for target group %(target)s on %(storageSystem)s." msgstr "" "Název synchronizace skupiny nenalezen v cílové skupině %(target)s na " "%(storageSystem)s." #, python-format msgid "HLU %(hlu)s has already been removed from %(sgname)s. Message: %(msg)s" msgstr "HLU %(hlu)s již bylo odstraněno z %(sgname)s. Zpráva: %(msg)s" #, python-format msgid "" "HPELeftHand API is version %(current)s. A minimum version of %(min)s is " "needed for manage/unmanage support." msgstr "" "Verze API HPELeftHand: %(current)s. Pro podporu vytváření/rušení spravování " "je vyžadována verze %(min)s nebo vyšší." #, python-format msgid "" "Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." msgstr "" "Nápověda \"%s\" zahozena, protože rozšířené vlastnosti serveru nejsou " "aktivní v Nova." #, python-format msgid "" "Hint \"%s\" dropped because Nova did not return enough information. Either " "Nova policy needs to be changed or a privileged account for Nova should be " "specified in conf." msgstr "" "Nápověda \"%s\" zahozena, protože Nova nevrátila dostatek informací. Buď je " "třeba změnit zásadu Nova, nebo je třeba v nastavení zadat výsadní účet pro " "Nova se všemi potřebnými oprávněními." #, python-format msgid "" "Host %(host)s has already disconnected from storage group %(sgname)s. " "Message: %(msg)s" msgstr "" "Hostitel %(host)s již byl odpojen od skupiny úložiště %(sgname)s. Zpráva: " "%(msg)s" msgid "" "Host exists without CHAP credentials set and has iSCSI attachments but CHAP " "is enabled. Updating host with new CHAP credentials." msgstr "" "Hostitel je bez nastavených přihlašovacích údajů CHAP, má připojení iSCSI, " "ale CHAP je povolen. Aktualizování hostitele pomocí nových přihlašovacích " "údajů CHAP." msgid "Host has no CHAP key, but CHAP is enabled." msgstr "Hostiteli nemá žádný klíč CHAP, ale CHAP je povolen." #, python-format msgid "IOnetworks GET failed (%d)" msgstr "Získání IOnetworks selhalo (%d)" msgid "IQN already existed." msgstr "IQN již existuje." msgid "IQN has been used to create map." msgstr "IQN bylo použito k vytvoření mapy." msgid "ISCSI provider_location not stored, using discovery" msgstr "Umístění poskytovatele ISCSI neuloženo, bude se zjišťovat" msgid "" "ISERTgtAdm is deprecated, you should now just use LVMVolumeDriver and " "specify iscsi_helper for the target driver you wish to use. In order to " "enable iser, please set iscsi_protocol=iser with lioadm or tgtadm target " "helpers." msgstr "" "ISERTgtAdm je zastaralé, nyní byste měli používat LVMVolumeDriver a zadat " "pomocníka iscsi pro cílový ovladač, který chcete použít. Abyste mohli použít " "iser, prosím nastavte protokol iscsi s hodnotou iser s cílovými pomocníky " "lioadm nebo tgtadm." msgid "Id not in sort_keys; is sort_keys unique?" msgstr "id není v seřazení klíčů; je seřazení jedinečné?" msgid "Image delete encountered an error." msgstr "Při mazání obrazu nastala chyba." #, python-format msgid "Image-volume cache for host %(host)s does not have enough space (GB)." msgstr "" "Mezipaměť obrazu-svazku pro hostitele %(host)s nemá dostatek místa (GB)." msgid "Inconsistent Luns." msgstr "Nekonzistentní Lun." #, python-format msgid "" "Incorrect value error: %(blocksize)s, it may indicate that " "'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" "Chyba kvůli nesprávné hodnotě: %(blocksize)s, to může naznačovat, že " "'volume_dd_blocksize' nebylo nastaveno správně. Bude použita výchozí hodnota." #, python-format msgid "" "Insufficient free space for thin provisioning. The ratio of provisioned " "capacity over total capacity %(provisioned_ratio).2f has exceeded the " "maximum over subscription ratio %(oversub_ratio).2f on host %(host)s." msgstr "" "Pro mělké poskytování není dostatek volného místa. Poměr poskytované " "kapacity na celkovou %(provisioned_ratio).2f překročil maximální poměr " "odběru %(oversub_ratio).2f na hostiteli %(host)s." #, python-format msgid "" "Insufficient free space for volume creation on host %(host)s (requested / " "avail): %(requested)s/%(available)s" msgstr "" "Není dostatek volného místa pro vytvoření svazku na hostiteli %(host)s. " "(Požadováno: %(requested)s/Dostupné %(available)s)" #, python-format msgid "" "Insufficient free space for volume creation. Total capacity is %(total).2f " "on host %(host)s." msgstr "" "Není dostatek volného místa pro vytvoření svazku. Celková kapacita je " "%(total).2f na hostiteli %(host)s." #, python-format msgid "Invalid IP address format '%s'" msgstr "Neplatný formát IP adresy '%s'" #, python-format msgid "" "Invalid goodness result. Result must be between 0 and 100. Result " "generated: '%s' :: Defaulting to a goodness of 0" msgstr "" "Neplatný výsledek funkce goodness. Výsledek musí být mezi 0 až 100. " "Vypočtený výsledek '%s' :: Je použita její minimální hodnota 0" #, python-format msgid "" "Invalid iSCSI port %(sp)s-%(port)s-%(vlan)s found in io_port_list, will be " "ignored." msgstr "" "V seznamu portů vstupu/výstupu nalezen neplatný port iSCSI %(sp)s-%(port)s-" "%(vlan)s, bude ignorován." #, python-format msgid "Invalid trace flag: %s" msgstr "Neplatný příznak sledování: %s" msgid "" "It is not the recommended way to use drivers by NetApp. Please use " "NetAppDriver to achieve the functionality." msgstr "" "Toto není doporučený způsob používání ovladačů od NetApp. Pro dosažení této " "funkce prosím použijte NetAppDriver." #, python-format msgid "LUN %(name)s is already expanded. Message: %(msg)s" msgstr "LUN %(name)s již je rozšířen. Zpráva: %(msg)s" #, python-format msgid "LUN %(name)s is not ready for extension: %(out)s" msgstr "LUN %(name)s není připraven na rozšíření: %(out)s" #, python-format msgid "LUN %(name)s is not ready for snapshot: %(out)s" msgstr "LUN %(name)s není připraven k pořízeni snímku: %(out)s" #, python-format msgid "LUN already exists, LUN name %(name)s. Message: %(msg)s" msgstr "LUN již existuje, název LUN %(name)s. Zpráva: %(msg)s" #, python-format msgid "" "LUN corresponding to %s is still in some Storage Groups.Try to bring the LUN " "out of Storage Groups and retry the deletion." msgstr "" "LUN odpovídající %s je stále v některých skupinách úložiště. Zkuste ho " "vyjmout ze skupin a znovu proveďte smazání." #, python-format msgid "LUN is already deleted, LUN name %(name)s. Message: %(msg)s" msgstr "LUN již je smazán, název LUN %(name)s. Zpráva: %(msg)s" #, python-format msgid "" "LUN misalignment may occur for current initiator group %(ig_nm)s) with host " "OS type %(ig_os)s. Please configure initiator group manually according to " "the type of the host OS." msgstr "" "U současné skupině zavaděče %(ig_nm)s s hostitelem mající typ OS %(ig_os)s " "se může objevit nevyrovnanost LUN. Prosím nastavte skupinu zavaděče ručně " "podle typu OS na hostiteli." #, python-format msgid "LUN with id %(remove_id)s is not present in cg %(cg_name)s, skip it." msgstr "" "LUN s id %(remove_id)s není přítomna ve skupině jednotnosti %(cg_name)s, je " "přeskočena." msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" "Nejméně zaneprázdněný port iSCSI nenalezen, použit první port v seznamu." #, python-format msgid "" "Lun is not in lungroup. Lun id: %(lun_id)s, lungroup id: %(lungroup_id)s" msgstr "" "Lun není ve skupine lun. ID LUN %(lun_id)s, ID skupiny LUN: %(lungroup_id)s." #, python-format msgid "" "Maximum number of Pool LUNs, %s, have been created. No more LUN creation can " "be done." msgstr "" "Byl vytvořen maximální počet LUN zásoby %s. Nelze vytvořit žádné další LUN." #, python-format msgid "Message - %s." msgstr "Zpráva - %s." #, python-format msgid "" "Migration command may get network timeout. Double check whether migration in " "fact started successfully. Message: %(msg)s" msgstr "" "Příkazu pro přesun může vypršet časový limit sítě. Zkontrolujte, zda přesun " "byl opravdu úspěšně zahájen. Zpráva: %(msg)s" #, python-format msgid "More than one valid preset was detected, using %s" msgstr "Byly zjištěny více než jedny platné předvolby, jsou použity %s" #, python-format msgid "Mount point %(name)s already exists. Message: %(msg)s" msgstr "Bod připojení %(name)s již existuje. Zpráva %(msg)s" msgid "No VLUN contained CHAP credentials. Generating new CHAP key." msgstr "" "Žádný VLUN neobsahoval přihlašovací údaje CHAP. Vytváření nového klíče CHAP." msgid "No array serial number returned, set as unknown." msgstr "Nebylo předáno sériové číslo pole, nastaveno na neznámé." #, python-format msgid "No backing file found for %s, allowing snapshot to be deleted." msgstr "Pro %s nenalezen žádný zálohovací soubor, smazání snímku povoleno." #, python-format msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "Žádná položka v tablce LUN pro svazek/snímek %(name)s." msgid "No host or VLUNs exist. Generating new CHAP key." msgstr "Žádný VLUN nebo hostitel neexistuje. Vytváření nového klíče CHAP." msgid "No mapping." msgstr "Žádné mapování." #, python-format msgid "No port group found in masking view %(mv)s." msgstr "V maskování %(mv)s nenalezena žádná skupiny portů." msgid "No protection domain name or id was specified in configuration." msgstr "V nastavení nebylo zadán žádný název nebo id ochranné domény." msgid "No shares found hence skipping ssc refresh." msgstr "Sdílení nenalezena a proto bude obnovení ssc přeskočeno." #, python-format msgid "No status payload for volume %s." msgstr "Žádný obsah stavu svazku %s." #, python-format msgid "" "No storage group found. Performing rollback on Volume: %(volumeName)s To " "return it to the default storage group for FAST policy %(fastPolicyName)s." msgstr "" "Nenalezena žádná skupina úložiště. Provádění zpětného vrácení svazku " "%(volumeName)s pro návrat do výchozí skupiny úložiště pro zásadu FAST " "%(fastPolicyName)s." #, python-format msgid "No storage pool found with available capacity %s." msgstr "Nenalezena žádná zásoba úložiště s dostupnou vlastností %s." msgid "No storage pool name or id was found." msgstr "Nebyla nalezen žádný název zásoby úložiště nebo jeho id." msgid "No such host alias name." msgstr "Žádný takový název zkratky hostitele." #, python-format msgid "No target ports found in masking view %(maskingView)s." msgstr "V maskování %(maskingView)s nenalezeny žádné cílové porty." msgid "No volume node in XML content." msgstr "V obsahu XML není žádný uzel svazku." #, python-format msgid "No weighed hosts found for volume with properties: %s" msgstr "Nenalezeni žádní vážení hostitelé pro svazek s vlastnostmi %s" msgid "Non-iSCSI VLUN detected." msgstr "Zjištěn VLUN mimo iSCSI." #, python-format msgid "Not deleting key %s" msgstr "Klíč %s nebude smazán" #, python-format msgid "Persistence file already exists for volume, found file at: %s" msgstr "Soubor přetrvávání svazku již existuje, nalezen v %s" #, python-format msgid "" "Pre check for deletion. Volume: %(volumeName)s is part of a storage group. " "Attempting removal from %(storageGroupInstanceNames)s." msgstr "" "Předkontrola mazání. Svazek %(volumeName)s je součástí skupiny úložiště. " "Pokus o odstranění z %(storageGroupInstanceNames)s." #, python-format msgid "" "Production use of \"%(backend)s\" backend requires the Cinder controller to " "have multipathing properly set up and the configuration option \"%(mpflag)s" "\" to be set to \"True\"." msgstr "" "Použití podpůrné vrstvy \"%(backend)s\" v ostrém provozu vyžaduje, aby " "kontrolér Cinder měl správně nastaveny vícenásobné cesty a volba nastavení " "\"%(mpflag)s\" nastavenu na \"True\"." #, python-format msgid "Property %s already exists." msgstr "Vlastnost %s již existuje." #, python-format msgid "Purity host deletion failed: %(msg)s." msgstr "Smazání hostitele Purity selhalo: %(msg)s." #, python-format msgid "" "Quota %(s_name)s exceeded for %(s_pid)s, tried to create volume " "(%(d_consumed)d volume(s) already consumed)." msgstr "" "Kvóta %(s_name)s překročena u %(s_pid)s, pokus o vytvoření svazku (již " "využíváno (%(d_consumed)d svazků)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG backup " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření zálohy o velikosti " "%(s_size)sG - (již využíváno (%(d_consumed)dG z %(d_quota)dG)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření snímku o velikosti " "%(s_size)sG (již využíváno (%(d_consumed)dG z %(d_quota)dG)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření svazku o velikosti " "%(s_size)sG (již využíváno (%(d_consumed)dG z %(d_quota)dG)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření svazku o velikosti " "%(s_size)sG - (již využíváno (%(d_consumed)dG z %(d_quota)dG)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create backups (%(d_consumed)d " "backups already consumed)" msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření záloh (již využíváno " "(%(d_consumed)d záloh)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " "snapshots already consumed)." msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření snímku (již využíváno " "(%(d_consumed)d snímků)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d volumes " "already consumed)" msgstr "" "U %(s_pid)s překročena kvóta, pokus o vytvoření svazku (již využíváno " "(%(d_consumed)d svazků)." #, python-format msgid "" "RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " "backup metadata." msgstr "" "Obraz RBD zálohy %(backup)s svazku %(volume)s nenalezen. Mazání popisných " "dat zálohy." #, python-format msgid "Reconnect attempt %(attempt)s failed. Next try in %(backoff).2fs." msgstr "" "Pokus o znovu připojení %(attempt)s selhal. Další pokus za %(backoff).2fs." #, python-format msgid "Registration of image volume URI %(uri)s to image %(image_id)s failed." msgstr "Registrace URI svazku obrazu %(uri)s k obrazu %(image_id)s selhala." #, python-format msgid "" "Remaining capacity %(remainingCapacityGb)s GBs is determined from SRP pool " "capacity and not the SLO capacity. Performance may not be what you expect." msgstr "" "Zbývající místo %(remainingCapacityGb)s GB je odvozeno od schopnosti zásoby " "SRP a ne SLO. Výkon nemusí být takový, jaký jste očekávali." #, python-format msgid "Rename failure in cleanup of cDOT QOS policy group %(name)s: %(ex)s" msgstr "" "Selhání přejmenování při čistění skupiny zásady QOS cDOT %(name)s: %(ex)s" #, python-format msgid "" "Report interval must be less than service down time. Current config " "service_down_time: %(service_down_time)s, report_interval for this: service " "is: %(report_interval)s. Setting global service_down_time to: " "%(new_down_time)s" msgstr "" "Interval hlášení musí být menší než doba nečinnosti. Současné nastavení doby " "nečinnosti služby: %(service_down_time)s, interval hlášení pro tuto službu " "je %(report_interval)s. Nastavování doby nečinnosti globální služby na: " "%(new_down_time)s" msgid "Requested image is not accessible by current Tenant." msgstr "Požadovaný obraz není dostupný současnému nájemníku." msgid "Returning as clean tmp vol job already running." msgstr "Probíhá vrácení protože úkol čištění dočasného svazku již probíhá." #, python-format msgid "" "ScaleIO only supports volumes with a granularity of 8 GBs. The new volume " "size is: %d." msgstr "" "ScaleIO podporuje pouze svazky s rozdělením po 8GB. Nová velikost svazku je: " "%d." #, python-format msgid "See unavailable iSCSI target: %s" msgstr "Prohlédněte si nedostupný cíl iSCSI: %s" msgid "Silent failure of target removal detected, retry...." msgstr "Zjištěno tiché selhání odstranění cíle, bude provede nový pokus..." #, python-format msgid "Snapshot \"%s\" not found." msgstr "Snímek \"%s\" nenalezen." #, python-format msgid "Snapshot %(name)s already exists. Message: %(msg)s" msgstr "Snímek %(name)s již existuje. Zpráva: %(msg)s" #, python-format msgid "" "Snapshot %(name)s for consistency group does not exist. Message: %(msg)s" msgstr "Snímek %(name)s pro skupinu jednotnosti neexistuje. Zpráva: %(msg)s" #, python-format msgid "Snapshot %(name)s is in use, retry. Message: %(msg)s" msgstr "Snímek %(name)s je používán, bude proveden nový pokus. Zpráva: %(msg)s" #, python-format msgid "Snapshot %(name)s may deleted already. Message: %(msg)s" msgstr "Snímek %(name)s již mohl být smazán. Zpráva: %(msg)s" #, python-format msgid "" "Snapshot %(snapname)s is attached to snapshot mount point %(mpname)s " "already. Message: %(msg)s" msgstr "" "Snímek %(snapname)s již je připojen k bodu připojení %(mpname)s. Zpráva: " "%(msg)s" #, python-format msgid "Snapshot %s already deleted." msgstr "Snímek %s již je smazán." #, python-format msgid "Snapshot still %(status)s Cannot delete snapshot." msgstr "Snímek je stále ve stavu %(status)s. Nelze ho smazat." #, python-format msgid "Start migration failed. Message: %s" msgstr "Zahájení přesunu selhalo. Zpráva: %s" #, python-format msgid "Storage Group %s is not found." msgstr "Skupina úložiště %s nebyla nalezena." #, python-format msgid "Storage Group %s is not found. Create it." msgstr "Skupina úložiště %s nenalezena. Bude vytvořena." #, python-format msgid "Storage Group %s is not found. terminate_connection() is unnecessary." msgstr "Skupina úložiště %s nenalezena. Není třeba ukončovat připojení." #, python-format msgid "Storage Pool '%(pool)s' is '%(state)s'." msgstr "Zásoba úložiště '%(pool)s' je '%(state)s'." #, python-format msgid "Storage group %(name)s already exists. Message: %(msg)s" msgstr "Skupina úložiště %(name)s již existuje. Zpráva: %(msg)s" #, python-format msgid "" "Storage group %(name)s doesn't exist, may have already been deleted. " "Message: %(msg)s" msgstr "" "Skupina úložiště %(name)s neexistuje, možná byla již smazána. Zpráva: %(msg)s" #, python-format msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." msgstr "" "Název synchronizace úložiště nenalezen v cíli %(target)s na " "%(storageSystem)s." msgid "Storage-assisted migration failed during retype." msgstr "Přesun za pomocí úložiště selhal během přetypování." #, python-format msgid "" "The \"netapp_size_multiplier\" configuration option is deprecated and will " "be removed in the Mitaka release. Please set \"reserved_percentage = %d\" " "instead." msgstr "" "Volba nastavení \"netapp_size_multiplier\" je zastaralá a bude odstraněna " "ve verzi Mitaka. Místo toho prosím nastavte \"reserved_percentage = %d\"." msgid "The 'hplh' prefix is deprecated. Use 'hpelh' instead." msgstr "Předpona 'hplh' je zastaralá. Místo toho použijte 'hpelh'." msgid "The MCS Channel is grouped." msgstr "Kanál MCS je seskupen." msgid "" "The NAS file operations will be run as root, allowing root level access at " "the storage backend." msgstr "" "Operace se souborem NAS budou spouštěny pod účtem root, umožňující přístup k " "podpůrné vrstvě úložiště na úrovni správce." #, python-format msgid "" "The NAS file operations will be run as root: allowing root level access at " "the storage backend. This is considered an insecure NAS environment. Please " "see %s for information on a secure NAS configuration." msgstr "" "Operace se souborem NAS budou spouštěny pod účtem root: umožňující přístup k " "podpůrné vrstvě úložiště na úrovni správce. Toto je považováno za nebezpečné " "prostředí NAS. Pro další informace o bezpečném nastavení NFS si přečtěte %s." msgid "" "The NAS file permissions mode will be 666 (allowing other/world read & write " "access)." msgstr "" "Režim oprávnění souboru NAS bude 666 (umožňující ostatním/světu přístup ke " "čtení a zápisu)." #, python-format msgid "" "The NAS file permissions mode will be 666 (allowing other/world read & write " "access). This is considered an insecure NAS environment. Please see %s for " "information on a secure NFS configuration." msgstr "" "Režim oprávnění souboru NAS bude 666 (umožňující ostatním/světu přístup ke " "čtení a zápisu). Toto je považováno za nebezpečné prostředí NAS. Pro další " "informace o bezpečném nastavení NFS si přečtěte %s." msgid "" "The VMAX plugin only supports Retype. If a pool based migration is necessary " "this will happen on a Retype From the command line: cinder --os-volume-api-" "version 2 retype --migration-policy on-demand" msgstr "" "Zásuvný modul VMAX podporuje pouze přetypování. Pokud je přesun pomocí " "zásoby nutný, bude proveden při přetypování z příkazové řádky: cinder --os-" "volume-api-version 2 retype --migration-policy on-" "demand" #, python-format msgid "" "The colon in vendor name was replaced by underscore. Updated vendor name is " "%(name)s\"." msgstr "" "Pomlčka v názvu prodejce byla nahrazena podtržítkem. Aktualizovaný název " "prodejce je %(name)s\"." #, python-format msgid "The device %s won't be cleared." msgstr "Zařízení %s nebude vyčištěno." #, python-format msgid "" "The following specified storage pools do not exist: %(unexist)s. This host " "will only manage the storage pools: %(exist)s" msgstr "" "Následující zadané zásoby úložiště neexistují: %(unexist)s. Tento hostitel " "bude spravovat pouze tyto zásoby: %(exist)s" msgid "" "The option 'netapp_storage_pools' is deprecated and will be removed in the " "future releases. Please use the option 'netapp_pool_name_search_pattern' " "instead." msgstr "" "Volba 'netapp_storage_pools' je zastaralá a bude odstraněna v budoucích " "verzích. Místo toho prosím použijte 'netapp_pool_name_search_pattern'." msgid "" "The option 'netapp_volume_list' is deprecated and will be removed in the " "future releases. Please use the option 'netapp_pool_name_search_pattern' " "instead." msgstr "" "Volba 'netapp_volume_list' je zastaralá a bude odstraněna v budoucích " "verzích. Místo toho prosím použijte 'netapp_pool_name_search_pattern'." msgid "" "The primary array is not reachable at this time. Since replication is " "enabled, listing replication targets and failing over a volume can still be " "performed." msgstr "" "Hlavní pole není v současnosti dostupné. Protože je replikace povolená může " "být stále proveden výpis cílů replikace a zavedení záložního systému." #, python-format msgid "The provisioning: %(provisioning)s is not valid." msgstr "Poskytování %(provisioning)s není platné." #, python-format msgid "" "The source volume is a legacy volume. Create volume in the pool where the " "source volume %s is created." msgstr "" "Zdrojový svazek je zastaralý. Vytvořte svazek v zásobě, kde byl vytvořen " "zdrojový svazek %s." #, python-format msgid "The specified Snapshot mount point %s is not currently attached." msgstr "Zadaný bod připojení snímku %s není v současnosti připojen." #, python-format msgid "" "The user does not have access or sufficient privileges to use all netapp " "APIs. The following extra_specs will fail or be ignored: %s" msgstr "" "Uživatel nemá přístup nebo dostatečná oprávnění k použití všech API netapp. " "Následující dodatečné specifikace selžou nebo budou ignorovány: %s" #, python-format msgid "" "The volume: %(volumename)s was not first part of the default storage group " "for FAST policy %(fastPolicyName)s." msgstr "" "Svazek %(volumename)s nebyl první částí výchozí skupiny úložiště pro zásadu " "FAST %(fastPolicyName)s." #, python-format msgid "" "The volume: %(volumename)s. was not first part of the default storage group " "for FAST policy %(fastPolicyName)s." msgstr "" "Svazek %(volumename)s nebyl první částí výchozí skupiny úložiště pro zásadu " "FAST %(fastPolicyName)s." #, python-format msgid "" "There are no datastores matching new requirements; can't retype volume: %s." msgstr "" "Neexistují žádná datová úložiště odpovídající novým požadavkům; nelze " "přetypovat svazek %s." #, python-format msgid "Trying to boot from an empty volume: %s." msgstr "Pokus o zavedení z prázdného svazku: %s." #, python-format msgid "" "Unable to clone image_volume for image %(image_id)s will not create cache " "entry." msgstr "" "Nelze klonovat svazek obrazu %(image_id)s, položka v mezipaměti nebude " "vytvořena." #, python-format msgid "Unable to create folder %s" msgstr "Nelze vytvořit složku %s" #, python-format msgid "Unable to create snapshot %s" msgstr "Nelze vytvořit snímek %s" #, python-format msgid "Unable to delete Protection Group Snapshot: %s" msgstr "Nelze smazat snímek ochranné skupiny: %s" #, python-format msgid "Unable to delete Protection Group: %s" msgstr "Nelze smazat ochrannou skupinu: %s" #, python-format msgid "Unable to delete space %(space)s" msgstr "Nelze smazat prostor %(space)s" #, python-format msgid "" "Unable to ensure space for image-volume in cache. Will skip creating entry " "for image %(image)s on host %(host)s." msgstr "" "Nelze zajistit prostor v mezipaměti pro obraz-svazek. Bude přeskočeno " "vytváření záznamu obrazu %(image)s na hostiteli %(host)s." #, python-format msgid "" "Unable to extend volume: %(vol)s to size: %(size)s on current datastore due " "to insufficient space." msgstr "" "Nelze rozšířit svazek: %(vol)s na velikost %(size)s GB na současném datovém " "úložišti z důvodu nedostatku místa." #, python-format msgid "Unable to fetch datastores connected to host %s." msgstr "Nelze získat datová úložiště spojená s hostitelem %s." #, python-format msgid "Unable to find Masking view: %(view)s." msgstr "Nelze najít maskování: %(view)s." #, python-format msgid "Unable to find snapshot %s" msgstr "Nelze najít snímek %s" msgid "" "Unable to get internal tenant context: Missing required config parameters." msgstr "" "Nelze získat vnitřní kontext nájemníka: Chybí požadované parametry nastavení." msgid "Unable to get rados pool stats." msgstr "Nelze získat statistiky zásoby rados." msgid "Unable to get storage tiers from tier policy rule." msgstr "Nelze získat vrstvy úložiště z pravidla zásady vrstvy." #, python-format msgid "Unable to locate volume:%s" msgstr "Nelze nalézt svazek: %s" msgid "Unable to poll cluster free space." msgstr "Nelze se dotázat na prostor bez clusteru." #, python-format msgid "Unable to rename %(old_name)s, error message: %(error)s" msgstr "Nelze přejmenovat %(old_name)s, chybová zpráva: %(error)s" #, python-format msgid "Unable to update host type for host with label %(l)s. %(e)s" msgstr "Nelze aktualizovat typ hostitele se jmenovkou %(l)s. %(e)s" #, python-format msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "Nelze aktualizovat statistiky v nezavedené skupině svazku: %s" #, python-format msgid "Unexpected exception during image cloning in share %s" msgstr "Neočekávaná výjimka při klonování obrazu ve sdílení %s" msgid "Unexpected exception while listing used share." msgstr "Neočekávaná výjimka při vypisování použitých sdílení." msgid "Unexpected exception while short listing used share." msgstr "" "Při zkráceném vypisování použitého sdílení došlo k neočekávané výjimce." #, python-format msgid "" "Unknown error occurred while checking mount point: %s Trying to continue." msgstr "" "Při kontrole bodu připojení nastala neznámá chyba: %s. Probíhá pokus o " "pokračování." #, python-format msgid "Update driver status failed: %(config_group)s is uninitialized." msgstr "Aktualizace stavu ovladače selhala: %(config_group)s není zavedeno." msgid "Use of empty request context is deprecated" msgstr "Použití kontextu prázdné žádosti je zastaralé" #, python-format msgid "" "Vendor unique property \"%(property)s\" must start with vendor prefix with " "colon \"%(prefix)s\". The property was not registered on capabilities list." msgstr "" "Vlastnost jedinečná prodejci \"%(property)s\" musí začínat předponou " "prodejce \"%(prefix)s\" s pomlčkou. Vlastnost nebyla registrována v seznamu " "schopností." msgid "Verify certificate is not set, using default of False." msgstr "Ověření certifikátu nenastaveno, použito výchozí nastavení: False." #, python-format msgid "Virtual Volume Set '%s' doesn't exist on array." msgstr "Sada virtuálního svazku '%s' neexistuje v poli." #, python-format msgid "Volume \"%s\" not found." msgstr "Svazek \"%s\" nenalezen." #, python-format msgid "Volume %(name)s already presented (%(status)d)!" msgstr "Svazek %(name)s již je přítomen na (%(status)d)!" #, python-format msgid "Volume %(vol)s was not in Storage Group %(sg)s." msgstr "Svazek %(vol)s není ve skupině úložiště %(sg)s." #, python-format msgid "Volume %(volume)s is not in any masking view." msgstr "Svazek %(volume)s není v žádném maskování." #, python-format msgid "" "Volume %(volumeName)s was not first part of the default storage group for " "the FAST Policy." msgstr "" "Svazek %(volumeName)s nebyl první částí výchozí skupiny úložiště pro zásadu " "FAST." #, python-format msgid "Volume %(volume_id)s already deleted." msgstr "Svazek %(volume_id)s již byl smazán." #, python-format msgid "Volume %(volume_id)s cannot be retyped because it has snapshot." msgstr "Svazek %(volume_id)s nemůže být přetypován protože má snímek." #, python-format msgid "Volume %(volume_id)s cannot be retyped during attachment." msgstr "Svazek %(volume_id)s nemůže být během připojování přetypován." #, python-format msgid "Volume %s already deleted." msgstr "Svazek %s již je smazán." #, python-format msgid "Volume %s does not exist." msgstr "Svazek %s neexistuje." #, python-format msgid "Volume %s does not have provider_location specified, skipping" msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen." #, python-format msgid "Volume %s exists but can't be deleted" msgstr "Svazek %s existuje ale nemůže být smazán" #, python-format msgid "Volume %s is not found!, it may have been deleted." msgstr "Svazek %s nenalezen! Mohl být smazán." #, python-format msgid "Volume %s was not found while trying to delete it." msgstr "Svazek %s nebyla nalezen při pokusu o jeho smazání." #, python-format msgid "" "Volume : %(volumeName)s is not currently belonging to any storage group." msgstr "Svazek: %(volumeName)s v současnosti nepatří k žádné skupině úložiště." #, python-format msgid "Volume copy job for src vol %s not found." msgstr "Úkol kopírování svazku pro zdrojový svazek %s nenalezen." #, python-format msgid "Volume deletion failed with message: %s" msgstr "Smazání svazku selhalo se zprávou: %s" #, python-format msgid "Volume initialization failure. (%s)" msgstr "Zavedení ovladače selhalo. (%s)" #, python-format msgid "" "Volume is masked but not to host %(host)s as expected. Returning empty " "dictionary." msgstr "" "Svazek je zamaskován ale ne vůči hostiteli %(host)s jak se očekávalo. " "Vracení prázdného slovníku." #, python-format msgid "Volume is not writable. Please broaden the file permissions. Mount: %s" msgstr "" "Nelze zapisovat na svazek. Prosím rozšiřte oprávnění souboru. Připojení: %s" #, python-format msgid "Volume not found. %s" msgstr "Svazek nenalezen. %s" #, python-format msgid "Volume path %s does not exist, nothing to remove." msgstr "Cesta svazku %s neexistuje, není nic k odstranění." msgid "Volume refresh job already running. Returning..." msgstr "Úkol obnovy svazku již běží. Vrácení..." #, python-format msgid "Volume still %(status)s Cannot delete volume." msgstr "Svazek je stále ve stavu %(status)s. Nelze ho smazat." msgid "Volume type will be changed to be the same as the source volume." msgstr "Typ svazku bude změněn aby odpovídal typu zdrojového svazku" #, python-format msgid "" "Volume: %(volumeName)s Does not belong to storage group %(defaultSgName)s." msgstr "Svazek: %(volumeName)s nepatří do skupiny úložiště %(defaultSgName)s." #, python-format msgid "Volume: %(volumeName)s is not currently belonging to any storage group." msgstr "Svazek %(volumeName)s v současnosti nepatří k žádné skupině úložiště." #, python-format msgid "Volume: %s is in use, can't retype." msgstr "Svazek %s se používá, nelze přetypovat." #, python-format msgid "_get_vdisk_map_properties: Did not find a preferred node for vdisk %s." msgstr "" "Získání vlastnosti mapy virtuálního disku: Nenalezen upřednostňovaný uzel " "pro virtuální disk %s." #, python-format msgid "_migrate_cleanup on : %(volumeName)s." msgstr "Vyčištění přesunu pro %(volumeName)s." #, python-format msgid "_migrate_rollback on : %(volumeName)s." msgstr "Zpětné vrácení přesunu pro %(volumeName)s." msgid "_remove_device: invalid properties or device." msgstr "Odstranění zařízení: Neplatné vlastnosti nebo zařízení." #, python-format msgid "" "_unmap_vdisk_from_host: Multiple mappings of volume %(vdisk_name)s found, no " "host specified." msgstr "" "Odmapování virtuálního disku od hostitele: Nalezeno vícero mapování svazku " "%(vdisk_name)s, nezadán žádný hostitel." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" "Odmapování virtuálního disku od hostitele: Nenalezeno žádné mapování svazku " "%(vol_name)s k žádnému hostiteli." msgid "" "config option keymgr.fixed_key has not been defined: some operations may " "fail unexpectedly" msgstr "" "Volba nastavení keymgr.fixed_key není zadána: některé operace mohou nečekaně " "selhat" #, python-format msgid "delete_volume: unable to find volume %s" msgstr "Smazání svazku: Nelze najít svazek %s" msgid "" "destroy_empty_storage_group: True. Empty storage group will be deleted after " "volume is detached." msgstr "" "destroy_empty_storage_group: True. Prázdná skupina úložiště bude smazána po " "odpojení svazku." msgid "flush() not supported in this version of librbd" msgstr "flush() není podporován touto verzí librbd" msgid "force_delete_lun_in_storagegroup=True" msgstr "force_delete_lun_in_storagegroup=True" #, python-format msgid "get_evs: %(out)s -- No find for %(fsid)s" msgstr "Získání evs: %(out)s -- Nenalezeno pro %(fsid)s" #, python-format msgid "get_fsid: %(out)s -- No info for %(fslabel)s" msgstr "Získání fsid: %(out)s -- Žádné informace pro %(fslabel)s" msgid "" "glance_num_retries shouldn't be a negative value. The number of retries will " "be set to 0 until this iscorrected in the cinder.conf." msgstr "" "Počet pokusů pro glance by neměl mí zápornou hodnotu. Počet pokusů bude " "nastaven na 0, dokud nebude opraven v conder.conf." #, python-format msgid "" "iSCSI IP: '%s' was not found in hpe3par_iscsi_ips list defined in cinder." "conf." msgstr "" "iSCSI IP: '%s' nebylo nalezeno v seznamu hpe3par_iscsi_ips zadaném v cinder." "conf." msgid "" "ignore_pool_full_threshold: True. LUN creation will still be forced even if " "the pool full threshold is exceeded." msgstr "" "ignore_pool_full_threshold: True. Vytvoření LUN bude vynuceno i když bude " "překročen práh naplnění zásoby." #, python-format msgid "initialize_connection: Did not find a preferred node for volume %s." msgstr "Zavedení spojení: Nenalezen upřednostňovaný uzel pro svazek %s." #, python-format msgid "ldev(%(ldev)d) is already mapped (hlun: %(hlu)d)" msgstr "ldev(%(ldev)d) již je mapováno (hlun: %(hlu)d)" #, python-format msgid "object %(key)s of type %(typ)s not found, %(err_msg)s" msgstr "Objekt %(key)s typu %(typ)s nenalezen, %(err_msg)s" msgid "qemu-img is not installed." msgstr "qemu-img není nainstalováno." msgid "refresh stale ssc job in progress. Returning... " msgstr "Úkol obnovy starého ssc probíhá. Vrácení..." msgid "san_secondary_ip is configured as the same value as san_ip." msgstr "san_secondary_ip je nastaven na stejnou hodnotu jako san_ip." msgid "snapcopy metadata is ignored when creating volume." msgstr "popisná data kopie snímku jsou při vytváření snímku ignorována." #, python-format msgid "snapshot: %s not found, skipping delete operation" msgstr "snímek: %s nenalezeno, operace mazání je přeskočena" #, python-format msgid "snapshot: %s not found, skipping delete operations" msgstr "snímek: %s nenalezeno, operace mazání jsou přeskočeny" #, python-format msgid "" "srstatld requires WSAPI version '%(srstatld_version)s' version '%(version)s' " "is installed." msgstr "" "srstatld vyžaduje verzi WSAPI '%(srstatld_version)s'. Naisntalovaná verze je " "'%(version)s'." msgid "ssc job in progress. Returning... " msgstr "Úkol ssc probíhá. Vrácení..." msgid "terminate_conn: provider location empty." msgstr "Ukončení připojení: Umístění poskytovatele je prázdné." msgid "terminate_connection: lun map not found" msgstr "Ukončení připojení: Mapa lun nenalezena" #, python-format msgid "" "unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no host " "specified." msgstr "" "Odmapování svazku od hostitele: Nalezeno vícero mapování svazku " "%(vol_name)s, nezadán žádný hostitel." #, python-format msgid "" "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" "Odmapování svazku od hostitele: Nenalezeno žádné mapování svazku " "%(vol_name)s k žádnému hostiteli." #, python-format msgid "" "unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host)s " "found." msgstr "" "Odmapování svazku od hostitele: Nenalezeno žádné mapování svazku " "%(vol_name)s k hostiteli %(host)s." #, python-format msgid "volume service is down. (host: %s)" msgstr "Služba svazku je mimo provoz. (Hostitel: %s)" msgid "volume_tmp_dir is now deprecated, please use image_conversion_dir." msgstr "" "volume_tmp_dir je nyní zastaralé, prosím použijte image_conversion_dir." #, python-format msgid "warning: Tried to delete vdisk %s but it does not exist." msgstr "Varování: Pokus o smazání virtuálního disku %s, který ale neexistuje." #, python-format msgid "" "zfssa_initiator: %(ini)s wont be used on zfssa_initiator_group= %(inigrp)s." msgstr "" "Zavaděč zfssa: %(ini)s nebude použit ve skupině zavaděče zfssa= %(inigrp)s." msgid "" "zfssa_initiator_config not found. Using deprecated configuration options." msgstr "zfssa_initiator_config nenalezeno. Použity zastaralé volby nastavení." cinder-8.0.0/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po0000664000567000056710000030205412701406250024336 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # Zbyněk Schwarz , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-12-22 01:35+0000\n" "Last-Translator: Zbyněk Schwarz \n" "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Czech\n" #, python-format msgid "\t%(name)-35s : %(value)s" msgstr "\t%(name)-35s : %(value)s" #, python-format msgid "\t%(param)-35s : %(value)s" msgstr "\t%(param)-35s : %(value)s" #, python-format msgid "\t%(prefix)-35s : %(version)s" msgstr "\t%(prefix)-35s : %(version)s" #, python-format msgid "\t%(request)-35s : %(value)s" msgstr "\t%(request)-35s : %(value)s" #, python-format msgid "" "\n" "\n" "\n" "\n" "Request URL: %(url)s\n" "\n" "Call Method: %(method)s\n" "\n" "Request Data: %(data)s\n" "\n" "Response Data:%(res)s\n" "\n" msgstr "" "\n" "\n" "\n" "\n" "URL požadavku: %(url)s\n" "\n" "Metoda volání: %(method)s\n" "\n" "Požadovaná data: %(data)s\n" "\n" "Vrácená data:%(res)s\n" "\n" #, python-format msgid "%(element)s: %(val)s" msgstr "%(element)s: %(val)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s vrátilo chybu: %(e)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s vrácena s HTTP %(status)d" #, python-format msgid "%(volume)s assign type fibre_channel, properties %(properties)s" msgstr "%(volume)s má typ přidělení kanál fibre, vlastnosti %(properties)s" #, python-format msgid "%s is already umounted" msgstr "%s již je odpojeno" #, python-format msgid "3PAR driver cannot perform migration. Retype exception: %s" msgstr "Ovladač 3PAR nemůže provést přesun. Výjimka při přetypování: %s" #, python-format msgid "3PAR vlun %(name)s not found on host %(host)s" msgstr "3PAR vlun %(name)s nenalezen v hostiteli %(host)s" #, python-format msgid "" "3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " "deleted because: %(reason)s" msgstr "" "3PAR vlun pro svazek '%(name)s' byl smazán, ale hostitel '%(host)s' nebyl, " "protože: %(reason)s" #, python-format msgid "AUTH properties: %(authProps)s" msgstr "Vlastnosti ověření: %(authProps)s" #, python-format msgid "AUTH properties: %s." msgstr "Vlastnosti ověření: %s." #, python-format msgid "Accepting transfer %s" msgstr "Přijímání přenosu %s" msgid "Activate Flexvisor cinder volume driver." msgstr "Aktivovat ovladač Flexvisor pro svazek cinder." #, python-format msgid "Add volume response: %s" msgstr "Odpověď přidání svazku: %s" #, python-format msgid "Added %s to cg." msgstr "Přidávání %s do skupiny jednotnosti." #, python-format msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." msgstr "" "Přidán svazek: %(volumeName)s do existující skupiny úložiště " "%(sgGroupName)s. " #, python-format msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" msgstr "Přidávání ACL do svazku %(vol)s se skupinou zavaděče s názvem %(igrp)s" #, python-format msgid "" "Adding volume: %(volumeName)s to default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Přidávání svazku: %(volumeName)s do výchozí skupiny úložiště por zásadu " "FAST: %(fastPolicyName)s." #, python-format msgid "Adding volumes to cg %s." msgstr "Přidávání svazků do skupiny jednotnosti %s." msgid "Attach volume completed successfully." msgstr "Připojení svazku úspěšně dokončeno." msgid "Availability Zones retrieved successfully." msgstr "Zóny dostupnosti úspěšně získány." #, python-format msgid "Available services: %s" msgstr "Dostupné služby: %s" #, python-format msgid "Available services: %s." msgstr "Dostupné služby: %s." #, python-format msgid "Backend name is %s." msgstr "Název podpůrné vrstvy je %s." #, python-format msgid "Backend type: %s" msgstr "Typ podpůrné vrstvy: %s" #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "Záloha virtuálního stroje %(backing)s přejmenována na %(new_name)s." msgid "Backing not available, no operation to be performed." msgstr "Zálohování není dostupné, nebude provedena žádná operace." #, python-format msgid "Backing not found, creating for volume: %s" msgstr "Záloha nenalezena, vytváření zálohy pro svazek %s" #, python-format msgid "" "Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " "skipping base image delete." msgstr "" "Základní obraz zálohy svazku %(volume)s stále má snímky %(snapshots)s. " "Mazání základního obrazu je přeskočeno." #, python-format msgid "" "Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " "in %(delay)ss." msgstr "" "Záložní obraz svazku %(volume)s je zaneprázdněn. Bude zopakováno ještě " "%(retries)skrát za %(delay)ss." #, python-format msgid "Backup service: %s." msgstr "Zálohovací služba: %s." #, python-format msgid "Bandwidth limit is: %s." msgstr "Limit šířky pásma je: %s." #, python-format msgid "Begin backup of volume %s." msgstr "Spuštění zálohování svazku %s." msgid "Begin detaching volume completed successfully." msgstr "Zahájení odpojení svazku úspěšně dokončeno." msgid "CHAP authentication disabled." msgstr "Ověření pomocí CHAP zakázáno." #, python-format msgid "CONCERTO version: %s" msgstr "Verze CONCERTO: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "Volání os-brick pro odpojení svazku ScaleIO." #, python-format msgid "Cancelling Migration from LUN %s." msgstr "Rušení přesunu z LUN %s." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " "exists in different management group." msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " "cluster existuje v jiné správní skupině." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has been exported." msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " "svazek byl exportován." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has snapshots." msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " "svazek má snímky." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume does " "not exist in this management group." msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " "svazek není v této správní skupině." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume is " "from a different backend." msgstr "" "Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože je " "z jiné podpůrné vrstvy." #, python-format msgid "" "Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu" msgstr "" "Statistiky kapacity pro zásobu SRP %(poolName)s v poli %(arrayName)s: " "celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita v gb: " "%(free_capacity_gb)lu" #, python-format msgid "Cgsnapshot %s: creating." msgstr "Snímek skupiny jednotnosti %s: vytváření" #, python-format msgid "Change volume capacity request: %s." msgstr "Žádost o změnu kapacity svazku: %s." #, python-format msgid "Checking image clone %s from glance share." msgstr "Kontrolování klona obrazu %s ze sdílení glance." #, python-format msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "Kontrola původu %(origin)s svazku %(volume)s." #, python-format msgid "" "Cinder ISCSI volume with current path %(path)s is no longer being managed. " "The new name is %(unm)s." msgstr "" "Svazek ISCSI Cinder se současnou cestou %(path)s již není spravován. Nový " "název je %(unm)s." #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "Svazek NFS Cinder se současnou cestou \"%(cr)s\" již není spravován." #, python-format msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." msgstr "Svazek NFS Cinder se současnou cestou %(cr)s již není spravován." msgid "Cinder secure environment indicator file exists." msgstr "Soubor indikující bezpečné prostředí Cinder existuje." #, python-format msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" msgstr "CiscoFCZoneDriver - Přidáno připojení pro mapu I-T: %s" #, python-format msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" msgstr "CiscoFCZoneDriver - Smazáno připojení pro mapu I-T: %s" #, python-format msgid "Cleaning cache for share %s." msgstr "Čištění mezipaměti pro sídlení %s." msgid "Cleaning up incomplete backup operations." msgstr "Čištění nedokončené operace zálohování." #, python-format msgid "Clone %s created." msgstr "Klon %s vytvořen." #, python-format msgid "Cloning from cache to destination %s" msgstr "Klonování z mezipaměti do cíle %s" #, python-format msgid "Cloning from snapshot to destination %s" msgstr "Klonování ze snímku do cíle %s" #, python-format msgid "Cloning image %s from cache" msgstr "Klonování obrazu %s z mezipaměti." #, python-format msgid "Cloning image %s from snapshot." msgstr "Klonování obrazu %s ze snímku." #, python-format msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "Klonování svazku %(src)s do svazku %(dst)s" #, python-format msgid "" "Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" "%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " "perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " "multi-initiator=%(multi-initiator)s" msgstr "" "Klonování svazku ze svazku snímku %(vol)s, snímek %(snap)s, klon %(clone)s, " "velikost snímku %(size)s, rezerva%(reserve)s, typ agenta %(agent-type)s, " "název zásady výkonu %(perfpol-name)s, šifrování %(encryption)s, šifra " "%(cipher)s, vícenásobný zavaděč %(multi-initiator)s" #, python-format msgid "" "Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" msgstr "" "Klonování svazku s názvem %(vname)s, název klonu %(cname)s, cesta pro export " "%(epath)s" #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "CloudByte API úspěšně provedeno pro příkaz [%s]." #, python-format msgid "" "CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." msgstr "" "Operace CloudByte [%(operation)s] úspěšně provedena ve svazku " "[%(cb_volume)s]." msgid "Complete-Migrate volume completed successfully." msgstr "Dokončení přenosu svazku úspěšně provedeno." #, python-format msgid "Completed: convert_to_base_volume: id=%s." msgstr "Dokončeno: Převod na základní svazek: id=%s." #, python-format msgid "Configured pools: %s" msgstr "Nastavené zásoby: %s" #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " "%(properties)s" msgstr "" "informace o zavedení připojení: {typ ovladače svazku: kanál fibre, data: " "%(properties)s" #, python-format msgid "Connecting to host: %s." msgstr "Připojování k hostiteli %s." #, python-format msgid "Connecting to target host: %s for backend enabled migration." msgstr "" "Připojování k cílovému hostiteli %s pro přesun za pomoci podpůrné vrstvy." #, python-format msgid "Connector returning fcnsinfo-%s" msgstr "Konektor vrací fcnsinfo-%s" #, python-format msgid "Consistency group %(cg)s is created successfully." msgstr "Skupina jednotnosti %(cg)s byla úspěšně vytvořena." #, python-format msgid "Consistency group %s was deleted successfully." msgstr "Skupina jednotnosti %s byla úspěšně smazána." #, python-format msgid "Consistency group %s: created successfully" msgstr "Skupina jednotnosti %s: úspěšně vytvořena" #, python-format msgid "Consistency group %s: creating" msgstr "Skupina jednotnosti %s: vytváření" #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "Převeden obraz o velikosti %(sz).2f MB rychlostí %(mbps).2f MB/s" #, python-format msgid "" "Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" msgstr "" "Převádění %(volume_name)s na úplné poskytování pomocí uživatele společné " "skupiny poskytování %(new_cpg)s" #, python-format msgid "" "Converting %(volume_name)s to thin dedup provisioning with userCPG=" "%(new_cpg)s" msgstr "" "Převádění %(volume_name)s na mělké deduplikované poskytování pomocí " "uživatele společné skupiny poskytování %(new_cpg)s" #, python-format msgid "" "Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" msgstr "" "Převádění %(volume_name)s na mělké poskytování pomocí uživatele společné " "skupiny poskytování %(new_cpg)s" msgid "Coordination backend started successfully." msgstr "Podpůrná vrstva pro koordinaci byla úspěšně spuštěna." #, python-format msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." msgstr "" "Obraz %(img)s zkopírován do svazku %(vol)s použitím postupu kopírování " "zátěže." #, python-format msgid "Copied image %(img)s to volume %(vol)s using local image cache." msgstr "" "Obraz %(img)s zkopírován do svazku %(vol)s použitím místní mezipaměti obrazu." #, python-format msgid "Copied image to volume %s using regular download." msgstr "Obraz zkopírován do svazku %s použitím normálního stažení." #, python-format msgid "Copy job to dest vol %s completed." msgstr "Úkol kopírování do cílového svazku %s byl dokončen." msgid "Copy volume to image completed successfully." msgstr "Kopírování svazku do obrazu úspěšně dokončeno." #, python-format msgid "Copying src vol %(src)s to dest vol %(dst)s." msgstr "Kopírování zdrojového svazku %(src)s do cílového svazku %(dst)s." #, python-format msgid "Could not find replica to delete of volume %(vol)s." msgstr "Nelze najít repliku ke smazání svazku %(vol)s." #, python-format msgid "Could not run dpkg-query command: %(msg)s." msgstr "Nelze spustit příkaz dpkg-query: %(msg)s." #, python-format msgid "Could not run rpm command: %(msg)s." msgstr "Nelze spustit příkaz rpm: %(msg)s." #, python-format msgid "" "Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" msgstr "" "Nelze aktualizovat zásobu úložiště pomocí mmchattr na %(pool)s, chyba: " "%(error)s" #, python-format msgid "" "Couldn't find destination volume %(vol)s in the database. The entry might be " "successfully deleted during migration completion phase." msgstr "" "V databázi nelze najít cílový svazek %(vol)s. Položka mohla být úspěšně " "smazána během fáze dokončení přenosu." #, python-format msgid "" "Couldn't find the temporary volume %(vol)s in the database. There is no need " "to clean up this volume." msgstr "" "V databázi nelze najít dočasný svazek %(vol)s. Není třeba tento svazek " "čistit." #, python-format msgid "Create Cloned Volume %(volume_id)s completed." msgstr "Vytváření klonovaného svazku %(volume_id)s dokončeno." #, python-format msgid "Create Consistency Group: %(group)s." msgstr "Vytvořit skupinu jednotnosti: %(group)s." #, python-format msgid "Create Volume %(volume_id)s completed." msgstr "Vytváření svazku %(volume_id)s dokončeno." #, python-format msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" "Vytváření svazku %(volume_id)s ze snímku %(snapshot_id)s bylo dokončeno." #, python-format msgid "" "Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " "%(provisioning)s tiering: %(tiering)s " msgstr "" "Vytvoření svazku: %(volume)s, velikost: %(size)s, zásoba: %(pool)s, " "poskytování: %(provisioning)s, vrstvení: %(tiering)s " #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " "%(sourceName)s." msgstr "" "Vytvoření repliky ze svazku: Klon svazku: %(cloneName)s, zdrojový svazek: " "%(sourceName)s." #, python-format msgid "Create backup finished. backup: %s." msgstr "Vytváření zálohy dokončeno. Záloha: %s." #, python-format msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Vytváření zálohy bylo zahájeno: záloha: %(backup_id)s svazek: %(volume_id)s." msgid "Create consistency group completed successfully." msgstr "Vytvoření skupiny jednotnosti úspěšně dokončeno." #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "Vytvoření skupiny jednotnosti ze zdroje %(source)s úspěšně dokončeno." #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "Vytvoření exportu pro svazek %(volume_id)s bylo úspěšně dokončeno. " msgid "Create snapshot completed successfully" msgstr "Vytvoření snímku úspěšně dokončeno" #, python-format msgid "" "Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Vytvořit snímek skupiny jednotnosti %(cgId)s, id snímku: %(cgsnapshot)s." #, python-format msgid "Create snapshot from volume %s" msgstr "Vytvořit snímek ze svazku %s" #, python-format msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "Vytvoření snímku: %(snapshot)s, svazek: %(volume)s" #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " "%(raid_snapshot_id)s, volume: %(volume)s." msgstr "" "Vytvoření bylo úspěšné. Snímek: %(snapshot)s,id snímku v raid: " "%(raid_snapshot_id)s, svazek: %(volume)s." #, python-format msgid "Create target consistency group %(targetCg)s." msgstr "Vytvořit cílovou skupinu jednotnosti %(targetCg)s." #, python-format msgid "Create volume of %s GB" msgstr "Vytvořit svazek o %s GB" #, python-format msgid "" "Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]." msgstr "" "Snímek CloudByte úspěšně vytvořen [%(cb_snap)s] s ohledem na svazky " "CloudByte [%(cb_vol)s] a OpenStack [%(stack_vol)s]." #, python-format msgid "Created Consistency Group %s" msgstr "Vytvořena skupina jednotnosti %s" #, python-format msgid "" "Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." "t parent OpenStack volume [%(stack_vol)s]." msgstr "" "Klon [%(cb_clone)s] vytvořen v cestě snímků CloudByte [%(cb_snap)s] s " "ohledem na nadřazený svazek OpenStack [%(stack_vol)s]." #, python-format msgid "Created datastore folder: %s." msgstr "Vytvořena složka datového úložiště %s." #, python-format msgid "" "Created lun-map:\n" "%s" msgstr "" "Vytvořena mapa lun:\n" "%s" #, python-format msgid "" "Created multi-attach E-Series host group %(label)s with clusterRef " "%(clusterRef)s" msgstr "" "Vytvořena skupina vícenásobného připojení hostitele E-Series '%(label)s' s " "odkazem clusteru %(clusterRef)s" #, python-format msgid "Created new initiator group name: %(igGroupName)s." msgstr "Vytvořen nový název skupiny zavaděče: %(igGroupName)s." #, python-format msgid "Created new masking view : %(maskingViewName)s." msgstr "Vytvořeno nové zamaskování : %(maskingViewName)s." #, python-format msgid "Created new storage group: %(storageGroupName)s." msgstr "Vytvořena nová skupina úložiště: %(storageGroupName)s." #, python-format msgid "Created snap grp with label %s." msgstr "Vytvořena skupina snímku se jmenovkou %s." #, python-format msgid "Created volume %(instanceId)s: %(name)s" msgstr "Vytvořen svazek %(instanceId)s: %(name)s" #, python-format msgid "Created volume %(volname)s, volume id %(volid)s." msgstr "Svazek %(volname)s vytvořen, id svazku %(volid)s." msgid "Created volume successfully." msgstr "Svazek úspěšně vytvořen." #, python-format msgid "Created volume with label %s." msgstr "Vytvořen svazek se jmenovkou %s." #, python-format msgid "Creating %(volume)s on %(device)s" msgstr "Vytváření %(volume)s na %(device)s" #, python-format msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "Vytváření zálohy svazku %(volume_id)s v kontejneru %(container)s" #, python-format msgid "Creating cgsnapshot %(name)s." msgstr "Vytváření snímku skupiny jednotnosti %(name)s." #, python-format msgid "Creating clone of volume: %s" msgstr "Vytváření klona svazku %s" #, python-format msgid "Creating clone of volume: %s." msgstr "Vytváření klona svazku %s." #, python-format msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." msgstr "Vytváření skupiny jednotnosti %(name)s ze snímku %(snap)s." #, python-format msgid "" "Creating consistency group %(name)s from source consistency group " "%(source_cgid)s." msgstr "" "Vytváření skupiny jednotnosti %(name)s ze zdrojové skupiny jednotnosti " "%(source_cgid)s." #, python-format msgid "Creating consistency group %(name)s." msgstr "Vytváření skupiny jednotnosti %(name)s." #, python-format msgid "Creating host object %(host_name)r with IQN: %(iqn)s." msgstr "Vytváření objektu hostitele %(host_name)r s IQN: %(iqn)s." #, python-format msgid "Creating host object %(host_name)r with WWN: %(wwn)s." msgstr "Vytváření objektu hostitele %(host_name)r s WWN: %(wwn)s." #, python-format msgid "Creating host with ports %s." msgstr "Vytváření hostitele s porty %s." #, python-format msgid "Creating image snapshot %s" msgstr "Vytváření snímku obrazu %s" #, python-format msgid "Creating initiator group %(grp)s with initiator %(iname)s" msgstr "Vytváření skupiny zavaděče %(grp)s se zavaděčem %(iname)s" #, python-format msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" msgstr "Vytváření skupiny zavaděče %(igrp)s s jedním zavaděčem %(iname)s" #, python-format msgid "Creating iscsi_target for volume: %s" msgstr "Vytváření cíle iscsi pro svazek %s" #, python-format msgid "" "Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " "snap_description=%(desc)s" msgstr "" "Vytváření snímku pro svazek %(vol)s, název snímku: %(name)s, popis snímku " "%(desc)s" #, python-format msgid "Creating snapshot: %s" msgstr "Vytváření snímku: %s" #, python-format msgid "Creating transfer of volume %s" msgstr "Vytváření přenosu pro svazek %s" #, python-format msgid "Creating volume %s from snapshot." msgstr "Vytváření svazku %s ze snímku." #, python-format msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." msgstr "" "Vytváření svazku s velikostí %(size)s GB pro obnovu zálohy %(backup_id)s." #, python-format msgid "Creating volume snapshot: %s." msgstr "Vytváření snímku svazku: %s." #, python-format msgid "Creatng volume from snapshot. volume: %s" msgstr "Vytváření svazku ze snímku. Svazek: %s" #, python-format msgid "Delete Consistency Group: %(group)s." msgstr "Smazat skupiny jednotnosti: %(group)s." #, python-format msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "Mazání snímku %(snapshot_id)s dokončeno." #, python-format msgid "Delete Snapshot: %(snapshot)s" msgstr "Smazat snímek: %(snapshot)s" #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Smazání snímku: %(snapshot)s." #, python-format msgid "Delete Snapshot: %(snapshotName)s." msgstr "Mazání snímku: %(snapshotName)s." #, python-format msgid "Delete Volume %(volume_id)s completed." msgstr "Mazání svazku %(volume_id)s dokončeno." #, python-format msgid "Delete backup finished, backup %s deleted." msgstr "Smazání zálohy bylo dokončeno, záloha %s smazána." #, python-format msgid "Delete backup started, backup: %s." msgstr "Smazání zálohy bylo zahájeno: záloha: %s." #, python-format msgid "Delete backup with id: %s" msgstr "Smazat zálohu s id: %s" #, python-format msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" msgstr "Mazání snímku %(snap_name)s skupiny jednotnosti: %(group_name)s" #, python-format msgid "Delete cgsnapshot with id: %s" msgstr "Smazání snímku skupiny jednotnosti s id: %s" msgid "Delete consistency group completed successfully." msgstr "Smazání skupiny jednotnosti úspěšně dokončeno." #, python-format msgid "Delete consistency group with id: %s" msgstr "Smazání skupiny jednotnosti s id: %s" #, python-format msgid "" "Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." msgstr "" "Smazání zálohy '%(backup)s' svazku '%(volume)s' bylo dokončeno s varováním." msgid "Delete snapshot completed successfully" msgstr "Smazání snímku úspěšně dokončeno" #, python-format msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Smazat snímek zdrojové skupiny jednotnosti %(cgId)s, id snímku: " "%(cgsnapshot)s." msgid "Delete snapshot metadata completed successfully." msgstr "Mazání popisných dat snímku úspěšně dokončeno." #, python-format msgid "Delete snapshot with id: %s" msgstr "Smazat snímek s id: %s" #, python-format msgid "Delete transfer with id: %s" msgstr "Mazání přenosu s id: %s" msgid "Delete volume metadata completed successfully." msgstr "Mazání popisných dat svazku úspěšně dokončeno." msgid "Delete volume request issued successfully." msgstr "Žádost o smazání svazku úspěšně vytvořena." #, python-format msgid "Delete volume with id: %s" msgstr "Smazat svazek s id: %s" #, python-format msgid "Deleted %(row)d rows from table=%(table)s" msgstr "Smazáno %(row)d řádků z tabulky %(table)s" #, python-format msgid "" "Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " "[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." msgstr "" "Snímek CloudByte úspěšně smazán [%(snap)s] s ohledem na nadřazené svazky " "CloudByte [%(cb_vol)s] a OpenStack [%(stack_vol)s]." #, python-format msgid "Deleted the VM backing: %s." msgstr "Záloha virtuálního stroje %s smazána." #, python-format msgid "Deleted vmdk file: %s." msgstr "Smazán soubor vmdk %s." msgid "Deleted volume successfully." msgstr "Svazek úspěšně smazán." #, python-format msgid "Deleting Volume: %(volume)s" msgstr "Mazání svazku: %(volume)s" #, python-format msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." msgstr "Mazání základního obrazu zálohy='%(basename)s' svazku %(volume)s." #, python-format msgid "Deleting deleteInitiatorGrp %s " msgstr "Rušení mazání skupiny zavaděče %s" #, python-format msgid "Deleting snapshot %(ss)s from %(pro)s" msgstr "Mazání snímku %(ss)s z %(pro)s" #, python-format msgid "Deleting snapshot %s " msgstr "Mazání snímku %s" #, python-format msgid "Deleting snapshot: %s" msgstr "Mazání snímku: %s" #, python-format msgid "Deleting stale snapshot: %s" msgstr "Mazání starého snímku: %s" #, python-format msgid "Deleting unneeded host %(host_name)r." msgstr "Mazání nepotřebného hostitele %(host_name)r." #, python-format msgid "Deleting volume %s " msgstr "Mazání svazku %s" #, python-format msgid "Detach Volume, metadata is: %s." msgstr "Odpojení svazku, popisná data jsou %s." msgid "Detach volume completed successfully." msgstr "Odpojení svazku úspěšně dokončeno." msgid "Determined volume DB was empty at startup." msgstr "Databáze určeného svazku byla při spuštění prázdná." msgid "Determined volume DB was not empty at startup." msgstr "Databáze určeného svazku nebyla při spuštění prázdná." #, python-format msgid "" "Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " "delete anything." msgstr "" "Snímek %(name)s nenalezen pro zálohu %(backing)s. Není třeba nic mazat." #, python-format msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" msgstr "" "Zjišťovací ip adresa %(disc_ip)s byla nalezena na datové+správní podsíti " "%(net_label)s" #, python-format msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" msgstr "" "Zjišťovací ip adresa %(disc_ip)s je použita na datové podsíti %(net_label)s" #, python-format msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" msgstr "Zjišťovací ip adresa %(disc_ip)s je použita na podsíti %(net_label)s" #, python-format msgid "Discovery ip %s is used on mgmt+data subnet" msgstr "Zjišťovací ip adresa %s je použita na datové+správní podsíti" #, python-format msgid "Dissociating volume %s " msgstr "Odlučování svazku %s" #, python-format msgid "Domain id is %s." msgstr "ID domény je %s." #, python-format msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "Kopírování obrazu dokončeno: %(id)s do svazku: %(vol)s." #, python-format msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "Svazek %(vol)s zkopírován do nového obrazu %(img)s" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " "in cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Zjištěn cluster GPFS nižší úrovně. Funkce šifrování při nečinnosti GPFS není " "povolena v daemonu clusteru na úrovni %(cur)s - úroveň musí být alespoň " "%(min)s." msgid "Driver initialization completed successfully." msgstr "Zavedení ovladače úspěšně dokončeno." #, python-format msgid "Driver stats: %s" msgstr "Statistiky ovladače: %s" #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " "extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "Verze API E-series proxy %(version)s nepodporuje úplnou sadu dodatečných " "specifikací SSC. Verze proxy musí být alespoň %(min_version)s." #, python-format msgid "E-series proxy API version %s does not support autosupport logging." msgstr "Verze API E-series proxy %s nepodporuje záznam automatické podpory." #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "Ovladač EQL: Nastavení dokončeno, IP adresa skupiny je \"%s\"." #, python-format msgid "EQL-driver: executing \"%s\"." msgstr "Ovladač EQL: spouštění \"%s\"." #, python-format msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "Upravování svazku %(vol)s maskou %(mask)s" #, python-format msgid "Elapsed time for clear volume: %.2f sec" msgstr "Uplynulá doba čištění svazku: %.2f vteřin" msgid "Embedded mode detected." msgstr "Zjištěn režim vnoření." msgid "Enabling LVM thin provisioning by default because a thin pool exists." msgstr "" "Protože existuje mělká zásoba, je standardně povoleno mělké poskytování LVM." msgid "Enabling LVM thin provisioning by default because no LVs exist." msgstr "" "Protože neexistují žádné LV, je standardně povoleno mělké poskytování LVM." #, python-format msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" msgstr "Spouštění rozšíření svazku %(vol)s, nová velikost=%(size)s" #, python-format msgid "" "Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s" msgstr "" "Spouštění zavedení spojení pro svazek %(vol)s, konektor=%(conn)s, umístění=" "%(loc)s" #, python-format msgid "" "Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s." msgstr "" "Spouštění ukončení spojení se svazkem %(vol)s, konektor=%(conn)s, umístění=" "%(loc)s" #, python-format msgid "Entering unmanage_volume volume = %s" msgstr "Spouštění rušení správy svazku %s." #, python-format msgid "Exploring array subnet label %s" msgstr "Prozkoumávání jmenovky podsítě pole %s" #, python-format msgid "Export record finished, backup %s exported." msgstr "Exportování záznamu bylo dokončeno: záloha: %s exportována." #, python-format msgid "Export record started, backup: %s." msgstr "Exportování záznamu bylo zahájeno: záloha: %s." #, python-format msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." msgstr "Exportován lun %(vol_id)s s id %(lun_id)s." msgid "Extend volume completed successfully." msgstr "Rozšíření svazku úspěšně dokončeno." msgid "Extend volume request issued successfully." msgstr "Žádost o rozšíření svazku úspěšně vytvořena." #, python-format msgid "Extending volume %s." msgstr "Rozšiřování svazku %s." #, python-format msgid "" "FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "FAST: statistiky kapacity pro zásadu %(fastPolicyName)s v poli " "%(arrayName)s. Celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita " "v gb: %(free_capacity_gb)lu." #, python-format msgid "FC Initiators %(in)s of %(ins)s need registration" msgstr "Zavaděče FC %(in)s v %(ins)s je třeba registrovat" #, python-format msgid "Failed to create host: %(name)s. Check if it exists on the array." msgstr "Nelze vytvořit hostitele: %(name)s. Zkontrolujte zda existuje v poli." #, python-format msgid "" "Failed to create hostgroup: %(name)s. Please check if it exists on the array." msgstr "" "Nelze vytvořit skupinu hostitele: %(name)s. Prosím zkontrolujte zda existuje " "v poli." #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "Nelze otevřít seznam sezení iet pro %(vol_id)s: %(e)s" #, python-format msgid "Fault thrown: %s" msgstr "Vyvolána chyba: %s" #, python-format msgid "Fetched vCenter server version: %s" msgstr "Získaná verze serveru vCenter: %s" #, python-format msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" msgstr "Filtr %(cls_name)s vrátil %(obj_len)d hostitelů" #, python-format msgid "Filtered targets for SAN is: %s" msgstr "Filtrované cíle pro SAN jsou: %s" #, python-format msgid "Fixing previous mount %s which was not unmounted correctly." msgstr "Oprava předchozího připojení %s, které nebylo správně odpojeno." #, python-format msgid "Flash Cache policy set to %s" msgstr "Zásada mezipaměti Flash nastavena na %s" #, python-format msgid "Flexvisor already unassigned volume %(id)s." msgstr "Flexvisor již zrušil přidělení svazku %(id)s." #, python-format msgid "Flexvisor snapshot %(id)s not existed." msgstr "Snímek Flexvisor %(id)s neexistuje." #, python-format msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor úspěšně přidal svazek %(id)s do skupiny %(cgid)s." #, python-format msgid "Flexvisor succeeded to clone volume %(id)s." msgstr "Flexvisor úspěšně naklonoval svazek %(id)s." #, python-format msgid "Flexvisor succeeded to create volume %(id)s from snapshot." msgstr "Flexvisor úspěšně vytvořil svazek %(id)s ze snímku." #, python-format msgid "Flexvisor succeeded to create volume %(id)s." msgstr "Flexvisor úspěšně vytvořil svazek %(id)s." #, python-format msgid "Flexvisor succeeded to delete snapshot %(id)s." msgstr "Flexvisor úspěšně smazal snímek %(id)s." #, python-format msgid "Flexvisor succeeded to extend volume %(id)s." msgstr "Flexvisor úspěšně rozšířil svazek %(id)s." #, python-format msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor úspěšně odstranil svazek %(id)s ze skupiny %(cgid)s." #, python-format msgid "Flexvisor succeeded to unassign volume %(id)s." msgstr "Flexvisor úspěšně zrušil přidělení svazku %(id)s." #, python-format msgid "Flexvisor volume %(id)s does not exist." msgstr "Svazek Flexvisor %(id)s neexistuje." msgid "Force upload to image is disabled, Force option will be ignored." msgstr "Vynucení nahrání do obrazu je zakázáno, vynucení bude ignorováno." #, python-format msgid "Found existing masking view: %(maskingViewName)s." msgstr "Nalezeno existující zamaskování: %(maskingViewName)s." #, python-format msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." msgstr "" "Volná kapacita podpůrné vrstvy je: %(free)s, celková kapacita: %(total)s." #, python-format msgid "Generating transfer record for volume %s" msgstr "Vytváření záznamu o přenosu pro svazek %s" #, python-format msgid "Get FC targets %(tg)s to register initiator %(in)s." msgstr "Získávání cílů FC %(tg)s pro registraci zavaděče %(in)s." #, python-format msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." msgstr "Získávání cílů ISCSI %(tg)s pro registraci zavaděče %(in)s." msgid "Get all snapshots completed successfully." msgstr "Získání všech snímků úspěšně dokončeno." msgid "Get all volumes completed successfully." msgstr "Získání všech svazků úspěšně dokončeno." #, python-format msgid "Get domain by name response: %s" msgstr "Získání domény pomocí odpovědi názvem: %s" #, python-format msgid "Get service: %(lbl)s->%(svc)s" msgstr "Získání služby: %(lbl)s->%(svc)s" msgid "Get snapshot metadata completed successfully." msgstr "Získání popisných dat snímku úspěšně dokončeno." msgid "Get snapshot metadata value not implemented." msgstr "Získání hodnot popisných dat snímku není zavedeno." #, python-format msgid "Get the default ip: %s." msgstr "Získání výchozí ip adresy: %s." msgid "Get volume admin metadata completed successfully." msgstr "Získání popisných dat správce svazku úspěšně dokončeno." msgid "Get volume image-metadata completed successfully." msgstr "Získání popisných dat obrazu svazku úspěšně dokončeno." msgid "Get volume metadata completed successfully." msgstr "Získání popisných dat svazku úspěšně dokončeno." msgid "Getting getInitiatorGrpList" msgstr "Získávání seznamu skupin zavaděče" #, python-format msgid "Getting volume information for vol_name=%s" msgstr "Získávání informací o svazku s názvem %s" #, python-format msgid "Going to perform request again %s with valid token." msgstr "Žádost %s bude znovu vytvořena s novou známkou." #, python-format msgid "HDP list: %s" msgstr "Seznam HDP: %s" #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "Společné části HPE3PAR %(common_ver)s, klient hp3par %(rest_ver)s" #, python-format msgid "HPELeftHand API version %s" msgstr "Verze API HPELeftHand: %s" #, python-format msgid "HTTP exception thrown: %s" msgstr "Vyvolána výjimka HTTP: %s" #, python-format msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "ID hypermetra: %(metro_id)s. ID vzdáleného LUN: %(remote_lun_id)s." #, python-format msgid "ISCSI properties: %(properties)s" msgstr "Vlastnosti ISCSI: %(properties)s" msgid "ISCSI provider_location not stored, using discovery." msgstr "Umístění poskytovatele ISCSI neuloženo, bude se zjišťovat." #, python-format msgid "ISCSI volume is: %(volume)s" msgstr "Svazek ISCSI je: %(volume)s" #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "Obraz %(pool)s/%(image)s závisí na snímku %(snap)s." #, python-format msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" msgstr "Klonování obrazu %(image_id)s bylo neúspěšné. Zpráva: %(msg)s" #, python-format msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" msgstr "Stažen obraz o velikosti %(sz).2f MB rychlostí %(mbps).2f MB/s" #, python-format msgid "Image will locally be converted to raw %s" msgstr "Obraz bude na místní úrovni převeden na prostý %s" #, python-format msgid "Image-volume cache disabled for host %(host)s." msgstr "Mezipaměť obrazu svazku zakázána pro hostitele %(host)s." #, python-format msgid "Image-volume cache enabled for host %(host)s." msgstr "Mezipaměť obrazu svazku povolena pro hostitele %(host)s." #, python-format msgid "Import record id %s metadata from driver finished." msgstr "Importování popisných dat záznamu s id %s z ovladače bylo dokončeno." #, python-format msgid "Import record started, backup_url: %s." msgstr "Importování záznamu bylo zahájeno: záloha: %s." #, python-format msgid "Initialize connection: %(volume)s." msgstr "Zavedení spojení: %(volume)s." msgid "Initialize volume connection completed successfully." msgstr "Zavedení spojení se svazkem úspěšně dokončeno." #, python-format msgid "Initialized driver %(name)s version: %(vers)s" msgstr "Zaveden ovladač %(name)s s verzí: %(vers)s" msgid "Initializing extension manager." msgstr "Zavádění správce rozšíření." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." msgstr "Názvy zavaděče %(initiatorNames)s nejsou v poli %(storageSystemName)s." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " msgstr "" "Názvy zavaděče %(initiatorNames)s nejsou v poli %(storageSystemName)s. " #, python-format msgid "Initiator group name is %(grp)s for initiator %(iname)s" msgstr "Název skupiny zavaděče je %(grp)s pro zavaděč %(iname)s" #, python-format msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s rozšířen na %(size)s GB." #, python-format msgid "LUN %(lun)s extended to %(size)s GB." msgstr "LUN %(lun)s rozšířen na %(size)s GB." #, python-format msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "Vytvořen LUn %(lun)s o velikosti %(sz)s MB." #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" "LUN se zadaným odkazem %s není třeba během operace správy přejmenovávat." #, python-format msgid "" "Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " "%(name)s." msgstr "" "Ukončování vytváření svazku: %(volumeName)s, návratový kód: %(rc)lu, slovník " "svazku: %(name)s." #, python-format msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." msgstr "Ukončování mazání svazku: %(volumename)s, návratový kód: %(rc)lu." #, python-format msgid "Leaving initialize_connection: %s" msgstr "Ukončování zavedení připojení: %s" #, python-format msgid "Loaded extension: %s" msgstr "Načteno rozšíření: %s" #, python-format msgid "" "Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" "%(lv)s" msgstr "" "Logický svazek nenalezen při dotazování na informace LVM. (Název skupiny " "svazku: %(vg)s, název logického svazku: %(lv)s)" msgid "Manage existing volume completed successfully." msgstr "Správa existujícího svazku úspěšně dokončena." #, python-format msgid "" "Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." msgstr "" "Operace správy byla dokončena pro LUN s novou cestou %(path)s a uuid " "%(uuid)s." #, python-format msgid "" "Manage operation completed for volume with new label %(label)s and wwn " "%(wwn)s." msgstr "" "Operace správy byla dokončena pro svazek s novou jmenovkou %(label)s a wwn " "%(wwn)s." #, python-format msgid "Manage volume %s" msgstr "Správa svazku %s" msgid "Manage volume request issued successfully." msgstr "Žádost o správu svazku úspěšně vytvořena." #, python-format msgid "Masking view %(maskingViewName)s successfully deleted." msgstr "Maskování %(maskingViewName)s úspěšně smazáno." #, python-format msgid "Migrate Volume %(volume_id)s completed." msgstr "Přesun svazku %(volume_id)s dokončen." msgid "Migrate volume completed successfully." msgstr "Přesun svazku úspěšně dokončen." msgid "Migrate volume completion issued successfully." msgstr "Dokončení přesunutí svazku úspěšně vytvořeno." msgid "Migrate volume request issued successfully." msgstr "Žádost o přesun svazku úspěšně vytvořena." #, python-format msgid "Migrating using retype Volume: %(volume)s." msgstr "Přesunování pomocí přetypování svazku: %(volume)s." #, python-format msgid "" "Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." msgstr "" "Změna snímku společné skupiny poskytování svazku %(volume_name)s z " "%(old_snap_cpg)s na %(new_snap_cpg)s." #, python-format msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" msgstr "" "Změna uživatele společné skupiny poskytování svazku %(volume_name)s z " "%(old_cpg)s na %(new_cpg)s" #, python-format msgid "Modifying %s comments." msgstr "Měnění komentářů svazku %s" msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "Modul PyWBEM není nainstalován. Nainstalujte ho pomocí balíčku python-" "pywbem." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "Modul PyWBEM není nainstalován. Nainstalujte ho pomocí balíčku python-pywbem." #, python-format msgid "Mounting volume: %s ..." msgstr "Připojování svazku: %s ..." #, python-format msgid "Mounting volume: %s succeeded" msgstr "Připojování svazku: %s úspěšně provedeno" #, python-format msgid "" "NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "NON-FAST: statistiky kapacity pro zásobu %(poolName)s v poli %(arrayName)s. " "Celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita v gb: " "%(free_capacity_gb)lu." msgid "Need to remove FC Zone, building initiator target map" msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavaděče" msgid "Need to remove FC Zone, building initiator target map." msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavaděče." msgid "" "Neither security file nor plain text credentials are specified. Security " "file under home directory will be used for authentication if present." msgstr "" "Není zadán bezpečnostní soubor ani ověřovací údaje v prostém textu. Pokud " "existuje bezpečnostní soubor v domovském adresáři, bude použit pro ověření." #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " "loaded." msgstr "" "Načteny ovladač NetApp druhu %(storage_family)s a protokol " "%(storage_protocol)s." #, python-format msgid "New Cinder secure environment indicator file created at path %s." msgstr "Nový soubor indikující bezpečné prostředí Cinder vytvořen v cestě %s." #, python-format msgid "New str info is: %s." msgstr "Nové informace o str jsou: %s" #, python-format msgid "No dpkg-query info found for %(pkg)s package." msgstr "Nenalezeny žádné informace balíčku %(pkg)s pomocí dpkq-query." #, python-format msgid "No igroup found for initiator %s" msgstr "Pro zavaděče %s nebyla nalezena žádná zaváděcí skupina" #, python-format msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" msgstr "Není přítomen žádný cíl iscsi pro svazek s id %(vol_id)s: %(e)s" #, python-format msgid "No need to extend volume %s as it is already the requested new size." msgstr "" "Není třeba rozšiřovat svazek %s protože již má požadovanou novou velikost." #, python-format msgid "" "No replication synchronization session found associated with source volume " "%(source)s on %(storageSystem)s." msgstr "" "Zdrojový svazek %(source)s na %(storageSystem)s nemá přiděleno žádné sezení " "synchronizace replikace." #, python-format msgid "" "No restore point found for backup='%(backup)s' of volume %(volume)s although " "base image is found - forcing full copy." msgstr "" "Pro zálohu='%(backup)s' svazku %(volume)s nebyl nalezen žádný bod obnovy, " "ale základní obraz byl nalezen - je vynucena úplné kopírování." #, python-format msgid "No rpm info found for %(pkg)s package." msgstr "Nenalezeny žádné informace balíčku %(pkg)s pomocí rpm." #, python-format msgid "No volume found for CG: %(cg)s." msgstr "Nenalezen žádný cílové svazek pro skupinu jednotnosti: %(cg)s." #, python-format msgid "OpenStack OS Version Info: %(info)s" msgstr "Informace o verzi Openstack OS: %(info)s" #, python-format msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "Přepisování svazku %(volume_id)s obnovou zálohy %(backup_id)s" #, python-format msgid "Params for add volume request: %s." msgstr "Parametry pro žádost o přidání svazku: %s." #, python-format msgid "Parse_loc: %s" msgstr "Zpracování umístění: %s" #, python-format msgid "Performing post clone for %s" msgstr "Provádění operací po klonování pro %s" #, python-format msgid "Performing secure delete on volume: %s" msgstr "Provádění bezpečného smazání svazku: %s" msgid "Plain text credentials are being used for authentication" msgstr "Pro ověření jsou použity přihlašovací údaje v prostém textu" #, python-format msgid "Pool id is %s." msgstr "ID zásoby je %s." #, python-format msgid "Port group instance name is %(foundPortGroupInstanceName)s." msgstr "Název instance skupiny portu je %(foundPortGroupInstanceName)s." #, python-format msgid "Post clone resize LUN %s" msgstr "Provádění změny velikosti po klonování pro LUN %s" #, python-format msgid "Prefer use target wwpn %(wwpn)s" msgstr "Upřednostňováno použití cílového wwpn %(wwpn)s" #, python-format msgid "Profile %s has been deleted." msgstr "Profil %s byl smazán." msgid "Promote volume replica completed successfully." msgstr "Povýšení repliky svazku úspěšně dokončeno." #, python-format msgid "Protection domain id: %(domain_id)s." msgstr "ID ochranné domény: %(domain_id)s." #, python-format msgid "Protection domain name: %(domain_name)s." msgstr "Název ochranné domény: %(domain_name)s." msgid "Proxy mode detected." msgstr "Zjištěn režim proxy." #, python-format msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" msgstr "" "Odstraňování smazaných řádků starších než %(age)d dní z tabulky %(table)s" #, python-format msgid "QoS: %s." msgstr "QoS: %s." #, python-format msgid "Query capacity stats response: %s." msgstr "Odpověď dotazu na statistiky kapacity: %s." msgid "" "RBD striping not supported - ignoring configuration settings for rbd striping" msgstr "" "Odejmutí RBD není podporováno - nastavení konfigurace pro odebrání rbd bude " "ignorováno" #, python-format msgid "RBD volume %s not found, allowing delete operation to proceed." msgstr "Svazek RBD %s nebyl nalezen umožňující v pokračování operace smazání." #, python-format msgid "" "REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " "certificate: %(verify_cert)s." msgstr "" "IP adresa serveru REST: %(ip)s, port: %(port)s, uživatelské jméno: %(user)s. " "Ověření certifikátu serveru: %(verify_cert)s." #, python-format msgid "Re-using existing purity host %(host_name)r" msgstr "Znovu používání existujícího hostitele Purity %(host_name)r" msgid "Reconnected to coordination backend." msgstr "Znovu připojeno k podpůrné vrstvě pro koordinaci." msgid "Reconnecting to coordination backend." msgstr "Znovu připojování k podpůrné vrstvě pro koordinaci." #, python-format msgid "Registering image in cache %s" msgstr "Registrování obrazu v mezipaměti %s" #, python-format msgid "" "Relocating volume: %s to a different datastore due to insufficient disk " "space on current datastore." msgstr "" "Přemisťování svazku %s do jiného datového úložiště z důvodu nedostatečného " "místa na disku v současném úložišti." #, python-format msgid "Remote return FC info is: %s." msgstr "Informace o vzdálené FC vrátily: %s." msgid "Remove volume export completed successfully." msgstr "Odstranění exportu svazku úspěšně dokončeno." #, python-format msgid "Removed %s from cg." msgstr "Odebírání %s ze skupiny jednotnosti." #, python-format msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" msgstr "Odstraňování ACL ze svazku %(vol)s pro skupinu zavaděče %(igrp)s" #, python-format msgid "Removing iscsi_target for Volume ID: %s" msgstr "Odstraňování cíle iscsi pro svazek s ID %s" #, python-format msgid "Removing iscsi_target for volume: %s" msgstr "Odstraňování cíle iscsi pro svazek %s" #, python-format msgid "Removing iscsi_target for: %s" msgstr "Odstraňování cíle iscsi pro %s" #, python-format msgid "Removing iscsi_target: %s" msgstr "Odstraňování cíle iscsi %s" #, python-format msgid "Removing non-active host: %(host)s from scheduler cache." msgstr "Odstraňování neaktivního hostitele: %(host)s z mezipaměti plánovače." #, python-format msgid "Removing volumes from cg %s." msgstr "Odebírání svazků ze skupiny jednotnosti %s." #, python-format msgid "Rename Volume %(volume_id)s completed." msgstr "Přejmenování svazku %(volume_id)s dokončeno." #, python-format msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." msgstr "Přejmenovávání %(id)s z %(current_name)s na %(new_name)s." #, python-format msgid "Renaming backing VM: %(backing)s to %(new_name)s." msgstr "Přejmenovávání zálohy virtuálního stroje %(backing)s na %(new_name)s." #, python-format msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" msgstr "Přejmenovávání existujícího snímku %(ref_name)s na %(new_name)s" #, python-format msgid "Renaming existing volume %(ref_name)s to %(new_name)s" msgstr "Přejmenovávání existujícího svazku %(ref_name)s na %(new_name)s" #, python-format msgid "Requested image %(id)s is not in raw format." msgstr "Požadovaný obraz %(id)s není v prostém formátu." #, python-format msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." msgstr "" "Zažádáno o sjednocené nastavení: %(storage_family)s a %(storage_protocol)s." msgid "Reserve volume completed successfully." msgstr "Rezervace všech svazků úspěšně dokončena." #, python-format msgid "" "Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." msgstr "" "Resetování stavu zálohy bylo zahájeno, id zálohy: %(backup_id)s, stav: " "%(status)s." #, python-format msgid "Resetting backup %s to available (was restoring)." msgstr "Resetování zálohy %s na available (stav byl restoring)." #, python-format msgid "Resetting backup %s to error (was creating)." msgstr "Resetování zálohy %s na error (stav byl creating)." #, python-format msgid "" "Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." msgstr "" "Resetování svazku %(vol_id)s do předchozího stavu %(status)s (stav byl " "\"backing-up\")." #, python-format msgid "Resizing LUN %s directly to new size." msgstr "Měnění velikosti LUN %s přímo na její velikost." #, python-format msgid "Resizing file to %sG" msgstr "Měnění velikosti souboru na %sG" #, python-format msgid "Resizing file to %sG..." msgstr "Měnění velikosti souboru na %sG..." #, python-format msgid "" "Restore backup finished, backup %(backup_id)s restored to volume " "%(volume_id)s." msgstr "" "Obnovení zálohy bylo dokončeno: záloha: %(backup_id)s obnovena do svazku: " "%(volume_id)s." #, python-format msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Obnovení zálohy bylo zahájeno: záloha: %(backup_id)s svazek: %(volume_id)s." #, python-format msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "Obnovování zálohy %(backup)s do svazku %(volume)s" #, python-format msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "Obnovování zálohy %(backup_id)s do svazku %(volume_id)s" msgid "Resume volume delete completed successfully." msgstr "Obnovení smazání svazku úspěšné dokončeno." #, python-format msgid "Resuming delete on backup: %s." msgstr "Pokračování ve smazání zálohy: %s." #, python-format msgid "Retrieving secret for service: %s." msgstr "Získávání tajného klíče pro službu: %s." #, python-format msgid "Retrieving target for service: %s." msgstr "Získávání cílů pro službu: %s." #, python-format msgid "Return FC info is: %s." msgstr "Informace o FC vrátily: %s." #, python-format msgid "" "Returning connection_info: %(info)s for volume: %(volume)s with connector: " "%(connector)s." msgstr "" "Předané informace o připojení: %(info)s pro svazek: %(volume)s s konektorem: " "%(connector)s." #, python-format msgid "Returning random Port Group: %(portGroupName)s." msgstr "Vrácení náhodné skupiny portu: %(portGroupName)s." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." msgstr "" "Přetypování chytré mezipaměti LUN(id: %(lun_id)s) z (název: %(old_name)s, " "id: %(old_id)s) na (název: %(new_name)s, id: %(new_id)s) bylo úspěšně " "dokončeno." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." msgstr "" "Přetypování chytrého oddílu LUN(id: %(lun_id)s) z (název: %(old_name)s, id: " "%(old_id)s) na (název: %(new_name)s, id: %(new_id)s) bylo úspěšně dokončeno. " #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " "success." msgstr "" "Přetypování chytré QoS LUN(id: %(lun_id)s) z %(old_qos_value)s na " "%(new_qos)s bylo úspěšně dokončeno. " #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " "%(new_policy)s success." msgstr "" "Přetypování chytré vrstvy LUN(id: %(lun_id)s) z %(old_policy)s na " "%(new_policy)s bylo úspěšně dokončeno. " #, python-format msgid "Retype Volume %(volume_id)s is completed." msgstr "Přetypování svazku %(volume_id)s dokončeno." #, python-format msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." msgstr "" "Přetypování svazku %(volume_id)s bylo provedeno a svazek byl přesunut do " "zásoby %(pool_id)s." #, python-format msgid "" "Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " "%(old_snap_cpg)s." msgstr "" "Přetypování společné skupiny poskytování svazku %(volume_name)s vráceno z " "%(new_snap_cpg)s zpět na %(old_snap_cpg)s." msgid "Retype volume completed successfully." msgstr "Přetypování svazku úspěšně dokončeno." msgid "Retype volume request issued successfully." msgstr "Žádost o přetypování svazku úspěšně vytvořena." msgid "Retype was to same Storage Profile." msgstr "Přetypování bylo na stejný profil úložiště." #, python-format msgid "Review shares: %s" msgstr "Kontrola sdílení: %s" msgid "Roll detaching of volume completed successfully." msgstr "Provedení odpojení svazku úspěšně dokončeno." #, python-format msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "" "Spouštění úkolu shlukování nejnovějšího ssc pro %(server)s a virtuálního " "serveru %(vs)s" #, python-format msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "" "Spouštění úkolu obnovy zastaralého ssc pro %(server)s a virtuálního serveru " "%(vs)s" #, python-format msgid "Running with vmemclient version: %s" msgstr "Spuštěno s vmemclient verze %s" #, python-format msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" msgstr "Uložit informace o službě pro %(svc)s -> %(hdp)s, %(path)s" #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " "image id: %(id)s." msgstr "" "ScaleIO kopírování obrazu do svazku %(vol)s, služba obrazu: %(service)s, id " "obrazu: %(id)s." #, python-format msgid "" "ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " "image meta: %(meta)s." msgstr "" "ScaleIO kopírování svazku do obrazu. Svazek: %(vol)s, služba obrazu: " "%(service)s, popisná data obrazu: %(meta)s." #, python-format msgid "" "ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." msgstr "" "ScaleIO vytváření klonovaného svazku: zdrojový svazek %(src)s do cílového " "svazku %(tgt)s." #, python-format msgid "" "ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " "%(volname)s." msgstr "" "ScaleIO vytvoření svazku ze snímku: snímek %(snapname)s do svazku " "%(volname)s." msgid "ScaleIO delete snapshot." msgstr "ScaleIO mazání snímku." #, python-format msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." msgstr "ScaleIO rozšíření svazku: svazek %(volname)s na velikost %(new_size)s." #, python-format msgid "ScaleIO get domain id by name request: %s." msgstr "ScaleIO získání id domény pomocí zažádání o název: %s." #, python-format msgid "ScaleIO get pool id by name request: %s." msgstr "ScaleIO získání id zásoby pomocí zažádání o název: %s." #, python-format msgid "ScaleIO rename volume request: %s." msgstr "Žádost o přejmenování svazku ScaleIO: %s." #, python-format msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." msgstr "Svazek ScaleIO %(vol)s byl přejmenován na %(new_name)s." #, python-format msgid "" "Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " "from /etc/cinder.conf." msgstr "" "Druhotný soubor ssh klíčů hostitele %(kwargs)s bude načten spolu s %(conf)s " "z /etc/cinder.conf." msgid "Session might have expired. Trying to relogin" msgstr "Sezení mohlo vypršet. Bude proveden pokus o další přihlášení" #, python-format msgid "Set newly managed Cinder volume name to %(name)s." msgstr "Nastavit název nově spravovaného svazku Cinder na %(name)s." #, python-format msgid "Set tgt CHAP secret for service: %s." msgstr "Nastavování tajného klíče cíle CHAP pro službu: %s." #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "Nastavování hostitele %(host)s na %(state)s." #, python-format msgid "Setting snapshot %(snap)s to online_flag %(flag)s" msgstr "Nastavování svazku %(snap)s příznakem online %(flag)s" #, python-format msgid "Setting volume %(vol)s to online_flag %(flag)s" msgstr "Nastavování svazku %(vol)s příznakem online %(flag)s" #, python-format msgid "Skipping deletion of volume %s as it does not exist." msgstr "Přeskakování mazání svazku %s protože neexistuje." #, python-format msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" "Zajištění exportu přeskočeno. Svazku %s není poskytnut žádný cíl iscsi." #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." msgstr "" "Přeskakování svazku obrazu %(id)s protože pro současného nájemníka není " "dostupný." #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume: %s" msgstr "" "Odstranění exportu přeskočeno. Žádný cíl iscsi není v současnosti exportován " "pro svazek: %s" #, python-format msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" "Odstranění exportu přeskočeno. Svazku %s není poskytnut žádný cíl iscsi." #, python-format msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "" "Sdílení SMB %(share)s. Celková velikost %(size)s, celkem přiděleno " "%(allocated)s" #, python-format msgid "Snapshot %(disp)s '%(new)s' is now being managed." msgstr "Snímek %(disp)s '%(new)s' je nyní spravován." #, python-format msgid "" "Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " "'%(new)s'." msgstr "" "Snímek %(disp)s '%(vol)s' již není spravován. Snímek přejmenován na " "'%(new)s'." #, python-format msgid "Snapshot %s created successfully." msgstr "Snímek %s úspěšně vytvořen." #, python-format msgid "Snapshot %s does not exist in backend." msgstr "Snímek %s neexistuje v podpůrné vrstvě," #, python-format msgid "Snapshot %s not found" msgstr "Snímek %s nenalezen" #, python-format msgid "Snapshot %s was deleted successfully." msgstr "Snímek %s byl úspěšně smazán." #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "Snímek '%(ref)s' přejmenován na '%(new)s'. " msgid "Snapshot create request issued successfully." msgstr "Žádost o vytvoření snímku úspěšně zadána." #, python-format msgid "" "Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." msgstr "" "Vytvoření snímku %(cloneName)s dokončeno. Zdrojový svazek: %(sourceName)s." msgid "Snapshot delete request issued successfully." msgstr "Žádost o smazání snímku úspěšně vytvořena." msgid "Snapshot force create request issued successfully." msgstr "Žádost o vynucení vytvoření snímku úspěšně zadána." #, python-format msgid "" "Snapshot record for %s is not present, allowing snapshot_delete to proceed." msgstr "Záznam snímku %s není přítomen, operace smazání snímku je povolena." msgid "Snapshot retrieved successfully." msgstr "Snímek úspěšně získán." #, python-format msgid "Snapshot volume %(vol)s into snapshot %(id)s." msgstr "Svazek snímku %(vol)s do snímku %(id)s." #, python-format msgid "Snapshot volume response: %s." msgstr "Odpověď svazku snímku: %s." #, python-format msgid "Snapshot: %(snapshot)s: not found on the array." msgstr "Snímek: %(snapshot)s: nenalezen v poli." #, python-format msgid "Source Snapshot: %s" msgstr "Zdrojový snímek: %s" #, python-format msgid "" "Source and destination ZFSSA shares are the same. Do nothing. volume: %s" msgstr "" "Zdroj a cíl sdílení ZFSAA jsou stejné. Nebude nic provedeno, svazek: %s" #, python-format msgid "Start to create cgsnapshot for consistency group: %(group_name)s" msgstr "Vytvoření snímku pro skupinu jednotnosti %(group_name)s zahájeno." #, python-format msgid "Start to create consistency group: %(group_name)s id: %(id)s" msgstr "Vytvoření skupiny jednotnosti zahájeno: %(group_name)s id: %(id)s" #, python-format msgid "Start to delete consistency group: %(cg_name)s" msgstr "Mazání skupiny jednotnosti zahájeno: %(cg_name)s" #, python-format msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "Spouštění uzle %(topic)s (verze %(version_string)s)" #, python-format msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "Spouštění ovladače svazku %(driver_name)s (%(version)s)" #, python-format msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "Skupina úložiště %(storageGroupName)s úspěšně smazána." #, python-format msgid "Storage Group %s was empty." msgstr "Skupina úložiště %s byla prázdná." #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Skupina úložiště není přidružena k zásadě. Výjimka je %s." #, python-format msgid "" "Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " "%(pool_id)s." msgstr "" "Názvy zásob úložiště: %(pools)s, název zásoby úložiště: %(pool)s, id zásoby: " "%(pool_id)s." #, python-format msgid "Successful login by user %s" msgstr "Uživatel %s se úspěšně přihlásil" #, python-format msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "%(volumeName)s úspěšně přidáno do %(sgGroupName)s." #, python-format msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "Úspěšně proveden úkol ssc pro %(server)s a virtuální server %(vs)s" #, python-format msgid "" "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" "Úspěšně proveden úkol obnovy zastaralého ssc pro %(server)s a virtuální " "server %(vs)s" #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "Úspěšně zkopírován disk %(src)s do %(dest)s." #, python-format msgid "Successfully create volume %s" msgstr "Svazek %s úspěšně vytvořen" #, python-format msgid "" "Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " "[%(stack_vol)s]." msgstr "" "Svazek CloudByte úspěšně vytvořen [%(cb_vol)s] s ohledem na svazek OpenStack " "[%(stack_vol)s]." #, python-format msgid "Successfully created clone: %s." msgstr "Úspěšně vytvořen klon: %s." #, python-format msgid "" "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "Snímek %(snap)s pro zálohu svazku %(backing)s úspěšně vytvořen." #, python-format msgid "Successfully created snapshot: %s." msgstr "Snímek %s úspěšně vytvořen." #, python-format msgid "Successfully created volume backing: %s." msgstr "Záloha svazku %s úspěšně vytvořena." #, python-format msgid "Successfully deleted %s." msgstr "%s úspěšně smazáno." #, python-format msgid "Successfully deleted file: %s." msgstr "Soubor %s úspěšně smazán." #, python-format msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "Snímek %(name)s pro zálohu %(backing)s byl úspěšně smazán." #, python-format msgid "Successfully deleted snapshot: %s" msgstr "Snímek %s úspěšně smazán" #, python-format msgid "Successfully deleted snapshot: %s." msgstr "Snímek %s úspěšně smazán." #, python-format msgid "" "Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]." msgstr "" "Svazek CloudByte [%(cb_vol)s], odpovídající svazku OpenStack " "[%(stack_vol)s], byl úspěšně smazán." #, python-format msgid "Successfully deleted volume: %s" msgstr "Svazek %s úspěšně smazán" #, python-format msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." msgstr "Virtuální disk pěněně rozšířen: %(path)s na %(size)s GB." #, python-format msgid "Successfully extended volume %(volume_id)s to size %(size)s." msgstr "Svazek %(volume_id)s úspěšně rozšířen na velikost %(size)s." #, python-format msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." msgstr "Svazek %(vol)s úspěšně rozšířen na velikost %(size)s GB." #, python-format msgid "Successfully got volume information for volume %s" msgstr "Informace o svazku %s úspěšně získány" #, python-format msgid "Successfully initialized connection with volume: %(volume_id)s." msgstr "Spojení se svazkem %(volume_id)s úspěšně zavedeno." #, python-format msgid "" "Successfully initialized connection. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." msgstr "" "Připojení úspěšně zavedeno. Cílové wwn: %(target_wwn)s, mapa cílů zavaděče: " "%(initiator_target_map)s, lun: %(target_lun)s." #, python-format msgid "" "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "Záloha svazku %(backing)s úspěšně přesunuta do složky %(fol)s." #, python-format msgid "" "Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " "resource pool: %(rp)s." msgstr "" "Záloha svazku %(backing)s úspěšně přemístěna do datového úložiště %(ds)s a " "zásoby zdrojů %(rp)s." msgid "Successfully retrieved InitiatorGrpList" msgstr "Seznam skupin zavaděče úspěšně získán" #, python-format msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "Ovladač %(driver)s úspěšně nastaven pro server %(ip)s." #, python-format msgid "Successfully terminated connection for volume: %(volume_id)s." msgstr "Spojení se svazkem %(volume_id)s úspěšně ukončeno." #, python-format msgid "" "Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " "%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " "%(storage_protocol)s." msgstr "" "Statistiky svazku úspěšně aktualizovány. Podpůrná vrstva: " "%(volume_backend_name)s, prodejce: %(vendor_name)s, verze ovladače: " "%(driver_version)s, protokol úložiště: %(storage_protocol)s." #, python-format msgid "" "Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Svazek CloudByte úspěšně aktualizován [%(cb_vol)s] s ohledem na svazek " "OpenStack [%(ops_vol)s]." #, python-format msgid "Switching volume %(vol)s to profile %(prof)s." msgstr "Přepínání svazku %(vol)s do profilu %(prof)s." #, python-format msgid "System %(id)s has %(status)s status." msgstr "Systém %(id)s má stav %(status)s." #, python-format msgid "" "System with controller addresses [%s] is not registered with web service." msgstr "" "Systém s adresami kontroléru [%s] není zaregistrován u internetové služby." #, python-format msgid "Target is %(map)s! Targetlist = %(tgtl)s." msgstr "Cíl je %(map)s! Seznam cílů = %(tgtl)s." #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "Cílové wwns v zamaskování %(maskingView)s: %(targetWwns)s." #, python-format msgid "Terminate connection: %(volume)s." msgstr "Ukončení připojení: %(volume)s." msgid "Terminate volume connection completed successfully." msgstr "Ukončení připojení svazku úspěšně dokončeno." msgid "" "The NAS file operations will be run as non privileged user in secure mode. " "Please ensure your libvirtd settings have been configured accordingly (see " "section 'OpenStack' in the Quobyte Manual." msgstr "" "Operace se souborem NAS budou spouštěny pod účtem uživatele bez oprávnění " "správce v zabezpečeném režimu. Ujistěte se prosím, že vaše nastavení " "libvirtd je správné (přečtěte si část 'OpenStack' v příručce Quobyte)." #, python-format msgid "The QoS sepcs is: %s." msgstr "Specifikace QoS jsou %s." #, python-format msgid "" "The image was successfully converted, but image size is unavailable. src " "%(src)s, dest %(dest)s. %(error)s" msgstr "" "Obraz byl úspěšně převeden, ale velikost obrazu není dostupná. Zdroj " "%(src)s, cíl %(dest)s. %(error)s" #, python-format msgid "" "The multi-attach E-Series host group '%(label)s' already exists with " "clusterRef %(clusterRef)s" msgstr "" "Již existuje skupina vícenásobného připojení hostitele E-Series '%(label)s' " "s odkazem clusteru %(clusterRef)s" #, python-format msgid "The pool_name from extraSpecs is %(pool)s." msgstr "Název zásoby z dodatečných specifikací je %(pool)s." #, python-format msgid "The same hostid is: %s." msgstr "Stejné id hostitele je: %s." #, python-format msgid "The storage group found is %(foundStorageGroupInstanceName)s." msgstr "Nalezená skupiny úložiště je %(foundStorageGroupInstanceName)s." #, python-format msgid "" "The volume belongs to more than one storage group. Returning storage group " "%(sgName)s." msgstr "" "Svazek patří do více než jedné skupině úložiště. Předána skupina úložiště " "%(sgName)s." #, python-format msgid "" "There is no backing for the snapshotted volume: %(snap)s. Not creating any " "backing for the volume: %(vol)s." msgstr "" "Pro snímek svazku %(snap)s neexistuje záloha. Nebude vytvořena žádná záloha " "pro svazek: %(vol)s." #, python-format msgid "" "There is no backing for the source volume: %(src)s. Not creating any backing " "for volume: %(vol)s." msgstr "" "Pro zdrojový svazek %(src)s neexistuje záloha. Nebude vytvořena žádná záloha " "pro svazek: %(vol)s." #, python-format msgid "There is no backing for the volume: %s. Need to create one." msgstr "Svazek %s nemá žádnou zálohu. Je třeba ji vytvořit." #, python-format msgid "There is no backing for volume: %s; no need to extend the virtual disk." msgstr "Svazek %s nemá žádnou zálohu; není třeba rozšiřovat virtuální disk." #, python-format msgid "There is no backing, and so there is no snapshot: %s." msgstr "Záloha neexistuje, tím pádem ani snímek %s neexistuje." #, python-format msgid "There is no backing, so will not create snapshot: %s." msgstr "Záloha neexistuje, snímek %s nebude vytvořen." #, python-format msgid "" "There is no snapshot point for the snapshotted volume: %(snap)s. Not " "creating any backing for the volume: %(vol)s." msgstr "" "Pro snímek svazku %(snap)s neexistuje žádný bod snímku. Záloha pro svazek " "%(vol)s nebude vytvořena." msgid "Token is invalid, going to re-login and get a new one." msgstr "Známka je neplatná, přihlášení bude opakováno pro získání nového." msgid "Transfer volume completed successfully." msgstr "Přenos svazku úspěšně dokončen." #, python-format msgid "Tried to delete non-existent vdisk %s." msgstr "Pokus o smazání neexistujícího virtuálního disku %s." #, python-format msgid "" "Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " "with delete." msgstr "" "Pokus o smazání snímku %s, který ale nebyl nalezen v clusteru Datera. " "Pokračuje se ve smazání." #, python-format msgid "" "Tried to delete volume %s, but it was not found in the Datera cluster. " "Continuing with delete." msgstr "" "Pokus o smazání svazku %s, který ale nebyl nalezen v clusteru Datera. " "Pokračuje se ve smazání." #, python-format msgid "Trying to unmap volume from all sdcs before deletion: %s." msgstr "Pokus o zrušení mapování svazku ze všech sdcs před smazáním: %s" msgid "Unable to accept transfer for volume, because it is in maintenance." msgstr "Nelze přijmout přenos svazku, protože je v údržbě." msgid "Unable to attach volume, because it is in maintenance." msgstr "Nelze připojit svazek, protože je v údržbě." msgid "Unable to create the snapshot for volume, because it is in maintenance." msgstr "Nelze vytvořit snímek svazku, protože je v údržbě." msgid "Unable to delete the volume metadata, because it is in maintenance." msgstr "Popisná data svazku nelze smazat, protože svazek je v režimu údržby." msgid "Unable to detach volume, because it is in maintenance." msgstr "Nelze odpojit svazek, protože je v údržbě." msgid "" "Unable to initialize the connection for volume, because it is in maintenance." msgstr "Nelze zavést připojení ke svazku, protože je v údržbě." msgid "Unable to parse XML input." msgstr "Nelze zpracovat vstup XML." #, python-format msgid "Unable to serialize field '%s' - excluding from backup" msgstr "Nelze serializovat pole '%s' - je vynecháno ze zásoby" msgid "Unable to update the metadata for volume, because it is in maintenance." msgstr "" "Nelze aktualizovat popisná data svazku, protože svazek je v režimu údržby." msgid "Unable to update volume, because it is in maintenance." msgstr "Nelze aktualizovat svazek, protože je v údržbě." #, python-format msgid "Unexporting lun %s." msgstr "Rušení exportu lun %s." #, python-format msgid "Unmanage snapshot with id: %s" msgstr "Zrušit správu snímku s id: %s" #, python-format msgid "Unmanage volume %(volume_id)s completed." msgstr "Rušení správy svazku %(volume_id)s dokončeno." #, python-format msgid "Unmanage volume %s" msgstr "Zrušit správu svazku %s" #, python-format msgid "Unmanage volume with id: %s" msgstr "Zrušit správu svazku s id: %s" #, python-format msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." msgstr "Správa zrušena u LUN se současnou cestou %(path)s a uuid %(uuid)s." #, python-format msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." msgstr "" "Správa byla zrušena pro svazek se současnou jmenovkou %(label)s a wwn " "%(wwn)s." #, python-format msgid "Unmap volume: %(volume)s." msgstr "Zrušení mapování svazku: %(volume)s." msgid "Unreserve volume completed successfully." msgstr "Zrušení rezervace všech svazků úspěšně dokončena." #, python-format msgid "" "Update Consistency Group: %(group)s. This adds and/or removes volumes from a " "CG." msgstr "" "Aktualizace skupiny jednotnosti: %(group)s. Toto přidá a/nebo odstraní " "svazky ze skupiny jednotnosti." #, python-format msgid "Update migrated volume %(new_volume)s completed." msgstr "Aktualizace přesunutého svazku %(new_volume)s dokončena." msgid "Update readonly setting on volume completed successfully." msgstr "Aktualizace nastavení položek pro čtení ve svazku úspěšně dokončena." msgid "Update snapshot metadata completed successfully." msgstr "Aktualizace popisných dat snímku úspěšně dokončena." msgid "Update volume admin metadata completed successfully." msgstr "Aktualizace popisných dat správce svazku úspěšně dokončena." msgid "Update volume metadata completed successfully." msgstr "Aktualizace popisných dat svazku úspěšně dokončena." #, python-format msgid "Updated Consistency Group %s" msgstr "Skupina jednotnosti %s aktualizována" #, python-format msgid "" "Updating consistency group %(id)s with name %(name)s description: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." msgstr "" "Aktualizování skupiny jednotnosti %(id)s s názvem %(name)s, popisem: " "%(description)s, přidané svazky: %(add_volumes)s odstraněné svazky: " "%(remove_volumes)s." #, python-format msgid "Updating snapshot %(id)s with info %(dict)s" msgstr "Aktualizace snímku %(id)s informacemi %(dict)s" #, python-format msgid "Updating status for CG: %(id)s." msgstr "Aktualizace stavu skupiny jednotnosti: %(id)s." #, python-format msgid "Updating storage service catalog information for backend '%s'" msgstr "" "Aktualizování informací o katalogu služby úložiště pro podpůrnou vrstvu '%s'" msgid "Use ALUA when adding initiator to host." msgstr "Použít ALUA při přidávání zavaděče k hostiteli." msgid "Use CHAP when adding initiator to host." msgstr "Použít CHAP při přidávání zavaděče k hostiteli." #, python-format msgid "" "Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." msgstr "" "Používán správce zóny FC %(zm_version)s, ovladač %(drv_name)s " "%(drv_version)s." #, python-format msgid "Using compute cluster(s): %s." msgstr "Použit výpočtové clustery: %s." #, python-format msgid "Using existing initiator group name: %(igGroupName)s." msgstr "Použit existující název skupiny zavaděče: %(igGroupName)s." #, python-format msgid "Using overridden vmware_host_version from config: %s" msgstr "Použita potlačena verze hostitele vmware z nastavení: %s" #, python-format msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "Používání zásoby %(pool)s místo %(cpg)s" #, python-format msgid "Using security file in %s for authentication" msgstr "Pro ověření je použit bezpečnostní soubor v %s" #, python-format msgid "Using service label: %s" msgstr "Použita jmenovka služby: %s" #, python-format msgid "Using target label: %s." msgstr "Použita jmenovka cíle: %s." #, python-format msgid "Value with type=%s is not serializable" msgstr "Hodnota typ=%s není serializovatelná" #, python-format msgid "Virtual volume %(disp)s '%(new)s' is being retyped." msgstr "Probíhá přetypování virtuálního svazku %(disp)s '%(new)s'" #, python-format msgid "Virtual volume %(disp)s '%(new)s' is now being managed." msgstr "Virtuálního svazek %(disp)s '%(new)s' je nyní spravován." #, python-format msgid "" "Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " "%(cpg)s" msgstr "" "Snímek společné skupiny poskytování virtuálního svazku %(disp)s '%(new)s' je " "prázdný, proto bude nastavena na: %(cpg)s" #, python-format msgid "" "Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " "'%(new)s'." msgstr "" "Virtuální svazek %(disp)s '%(vol)s' již není spravován. Byl přejmenován na " "'%(new)s'." #, python-format msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." msgstr "Virtuálního svazek %(disp)s úspěšně přetypován na %(new_type)s." #, python-format msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." msgstr "Virtuální svazek '%(ref)s' přejmenován na '%(new)s'." #, python-format msgid "Vol copy job completed for dest %s." msgstr "Úkol kopírování svazku dokončen u cíle %s." #, python-format msgid "Volume %(volume)s does not have meta device members." msgstr "Svazek %(volume)s nemá členy meta zařízení." #, python-format msgid "" "Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." msgstr "" "Svazek %(volume)s již je namapován. Číslo zařízení je %(deviceNumber)s." #, python-format msgid "Volume %(volumeName)s not in any storage group." msgstr "Svazek %(volumeName)s není v žádné skupině úložiště." #, python-format msgid "" "Volume %(volume_id)s: being created as %(create_type)s with specification: " "%(volume_spec)s" msgstr "" "Svazek %(volume_id)s je vytvářen jako %(create_type)s se specifikací: " "%(volume_spec)s" #, python-format msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "Svazek %(volume_name)s (%(volume_id)s) úspěšně vytvořen" #, python-format msgid "Volume %s converted." msgstr "Svazek %s převeden." #, python-format msgid "Volume %s created" msgstr "Svazek %s vytvořen" #, python-format msgid "Volume %s has been transferred." msgstr "Svazek %s byl přenesen." #, python-format msgid "Volume %s is mapping to multiple hosts." msgstr "Mapování svazku %s k více hostitelům." #, python-format msgid "Volume %s is not mapped. No volume to unmap." msgstr "Svazek %s není namapován. Není třeba rušit mapování." #, python-format msgid "Volume %s presented." msgstr "Svazek %s darován." #, python-format msgid "Volume %s retyped." msgstr "Svazek %s přetypován." #, python-format msgid "Volume %s unmanaged." msgstr "Zrušena správa svazku %s." #, python-format msgid "Volume %s: retyped successfully" msgstr "Svazek %s úspěšně přetypován" #, python-format msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" msgstr "Svazek již je namapován. Získávání %(ig)s, %(vol)s" #, python-format msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" msgstr "" "Kopírování svazku o velikosti %(size_in_m).2f MB rychlostí %(mbps).2f MB/s" #, python-format msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." msgstr "" "Kopírování svazku dokončeno (%(size_in_m).2f MB rychlostí %(mbps).2f MB/s)." msgid "Volume created successfully." msgstr "Svazek úspěšně vytvořen." msgid "Volume detach called, but volume not attached." msgstr "Zavoláno odpojení svazku, ale svazek není připojen." msgid "Volume info retrieved successfully." msgstr "Informace o svazku úspěšně získány." #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s" msgstr "Název svazku změněn z %(tmp)s na %(orig)s" #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s." msgstr "Název svazku změněn z %(tmp)s na %(orig)s." msgid "Volume retrieved successfully." msgstr "Svazek úspěšně získán." #, python-format msgid "Volume service: %(label)s. Casted to: %(loc)s" msgstr "Služba svazku: %(label)s. Obsazena do: %(loc)s" #, python-format msgid "Volume status is: %s." msgstr "Stav svazku je: %s." #, python-format msgid "Volume type is %s." msgstr "Typ svazku je %s." #, python-format msgid "" "Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " "id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " "name: %(domain_name)s." msgstr "" "Typ svazku: %(volume_type)s, název zásoby úložiště: %(pool_name)s, id zásoby " "úložiště: %(pool_id)s, id ochranné domény: %(domain_id)s, název ochranné " "domény: %(domain_name)s." msgid "Volume updated successfully." msgstr "Svazek úspěšně aktualizován." #, python-format msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "Svazek zadaným odkazem %s není třeba během operace správy přejmenovávat." #, python-format msgid "Volume with the name %s wasn't found, can't unmanage" msgstr "Svazek s názvem %s nebyl nalezen, nelze zrušit správu" #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " "size: %(backup_size)d, continuing with restore." msgstr "" "Svazek: %(vol_id)s, velikost: %(vol_size)d je větší než záloha: " "%(backup_id)s, velikost: %(backup_size)d, pokračuje se v obnově." #, python-format msgid "WWPN on node %(node)s: %(wwpn)s." msgstr "WWPN na uzlu %(node)s: %(wwpn)s." #, python-format msgid "" "Waiting for volume expansion of %(vol)s to complete, current remaining " "actions are %(action)s. ETA: %(eta)s mins." msgstr "" "Čekání na dokončení rozšíření svazku %(vol)s. Činnosti které je třeba ještě " "provést: %(action)s. Předpokládaný čas dokončení: %(eta)s min." msgid "Waiting for web service array communication." msgstr "Čeká se na komunikaci s polem internetové služby." msgid "Waiting for web service to validate the configured password." msgstr "Čeká se na ověření nastaveného hesla internetovou službou." #, python-format msgid "Will clone a volume from the image volume %(id)s." msgstr "Svazek bude klonován ze svazku obrazu %(id)s." #, python-format msgid "XtremIO SW version %s" msgstr "Verze softwaru XtremIO: %s" #, python-format msgid "ZFSSA version: %s" msgstr "Verze ZFSSA: %s" #, python-format msgid "Zone exists in I-T mode. Skipping zone creation %s" msgstr "Zóna existuje v režimu I-T. Vytváření zóny %s přeskočeno" #, python-format msgid "Zone map to add: %s" msgstr "Mapa zóny pro přidání: %s" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Zásada zónování: %s nebylo rozpoznáno" #, python-format msgid "Zoning policy for Fabric %s" msgstr "Zásady zónování pro Fabric %s" #, python-format msgid "Zoning policy for fabric %s" msgstr "Zásady zónování pro Fabric %s" #, python-format msgid "" "_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "Operace kontroly kopie svazku: Svazek %(vol)s nemá zadanou operaci " "kopírování virtuálního disku: původní=%(orig)s, nové=%(new)s." #, python-format msgid "_get_service_target hdp: %s." msgstr "Získávání cíle služby hdp: %s" #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "Získání cílové iup adrey ze skupiny portů: Získaná IP adresa: %s." #, python-format msgid "_get_tgt_iqn: iSCSI target iqn is: %s." msgstr "Získání cílového iqn: Cílové iqn pro iSCSI je: %s." #, python-format msgid "" "add_host_with_check. create host success. host name: %(name)s, host id: " "%(id)s" msgstr "" "Přidání hostitele s kontrolou proběhlo úspěšně. Název hostitele: %(name)s, " "id hostitele: %(id)s" #, python-format msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" msgstr "" "Přidat hostitele s kontrolou, název hostitele: %(name)s, id hostitele: %(id)s" #, python-format msgid "casted to %s" msgstr "obsazeno do %s" #, python-format msgid "cgsnapshot %s: created successfully" msgstr "Snímek skupiny jednotnosti %s: úspěšně vytvořen" #, python-format msgid "cgsnapshot %s: deleted successfully" msgstr "Snímek skupiny jednotnosti %s: úspěšně smazán" #, python-format msgid "cgsnapshot %s: deleting" msgstr "Snímek skupiny jednotnosti %s: mazání" #, python-format msgid "config[services]: %s." msgstr "nastavení[služby]: %s." #, python-format msgid "" "create_hostgroup_with_check. Create hostgroup success. hostgroup name: " "%(name)s, hostgroup id: %(id)s" msgstr "" "Vytvoření skupiny hostitele s kontrolou proběhlo úspěšně. Název skupiny " "hostitele: %(name)s, id skupiny hostitele: %(id)s" #, python-format msgid "" "create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" msgstr "" "Vytvořit skupinu hostitele s kontrolou, název skupiny hostitele: %(name)s, " "id skupiny hostitele: %(id)s" #, python-format msgid "create_volume: create_lu returns %s" msgstr "Vytvoření svazku: Vytvoření LU vrátilo %s" #, python-format msgid "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." msgstr "" "Vytváření svazku ze snímku: ID zdrojového lun: %(src_lun_id)s,ID cílového " "lun: %(tgt_lun_id)s, název kopie: %(copy_name)s." #, python-format msgid "del_iscsi_conn: hlun not found %s." msgstr "Smazání připojení iSCSI: hlun nenalezen %s." #, python-format msgid "delete lun loc %s" msgstr "Smazání umístění lun %s" #, python-format msgid "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." msgstr "" "Provést mapování, skupina lun: %(lun_group)s, id zobrazení: %(view_id)s, id " "lun: %(lun_id)s." #, python-format msgid "do_setup: %s" msgstr "Zavedení: %s" #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" "Volná kapacita zásoby %(pool)s je: %(free)s, celková kapacita: %(total)s." #, python-format msgid "iSCSI Initiators %(in)s of %(ins)s need registration." msgstr "Zavaděče iSCSI %(in)s v %(ins)s je třeba registrovat." #, python-format msgid "iSCSI portal found for service: %s" msgstr "Portál iSCSI nalezen pro službu: %s" #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "Pro zavaděč %(iname)s nalezena skupina zavaděče %(grp)s" #, python-format msgid "initialize volume %(vol)s connector %(conn)s" msgstr "Zavedení konektoru %(conn)s pro svazek %(vol)s" #, python-format msgid "initialize_ connection: %(vol)s:%(initiator)s" msgstr "Zavedení spojení: %(vol)s:%(initiator)s" #, python-format msgid "initialize_connection success. Return data: %s." msgstr "Spojení úspěšně zavedeno. Vrácená data: %s" #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "Zavedení spojení se svazkem %(volume)s, konektor %(connector)s" #, python-format msgid "initialize_connection, host lun id is: %s." msgstr "Zavedení spojení, id hostitele lun: %s." #, python-format msgid "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " "portgroup_id: %(portgroup_id)s." msgstr "" "Zvadenení spojení: IQN iSCSI: %(iscsi_iqn)s, cílová IP adresa: " "%(target_ip)s, ID skupiny portů: %(portgroup_id)s." #, python-format msgid "initialize_connection, metadata is: %s." msgstr "Zavedení spojení, popisná data. %s." #, python-format msgid "" "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "Zavedení spojení s FC: Zavaděč %(wwpns)s, název svazku: %(volume)s." #, python-format msgid "initiate: connection %s" msgstr "Zavedení: Spojení %s" msgid "initiator has no password while using chap,adding it" msgstr "Zavaděč nemá žádné heslo při používání chap, heslo je přidáno" msgid "" "initiator_auto_registration: False. Initiator auto registration is not " "enabled. Please register initiator manually." msgstr "" "initiator_auto_registration: False. Automatická registrace zavaděče není " "povolena. Prosím registrujte ho ručně." #, python-format msgid "iops limit is: %s." msgstr "Limit iops je: %s." #, python-format msgid "iscsi_initiators: %s" msgstr "Zavaděče iscsi: %s" #, python-format msgid "location is: %(location)s" msgstr "Umístění je: %(location)s" #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " "(temporary volume %(vol2)s" msgstr "" "Dokončení přenosu svazku čistí chybu ve svazku %(vol1)s (dočasný svazek " "%(vol2)s" #, python-format msgid "new cloned volume: %s" msgstr "Nový klonovaný svazek: %s" #, python-format msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" msgstr "Informace NFS: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "Připojení otevřeno do %(ssn)s na adrese %(ip)s" #, python-format msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "Nastavování svazku %s na error_restoring (stav byl restoring-backup)." #, python-format msgid "share: %(share)s -> %(info)s" msgstr "Sdílení: %(share)s -> %(info)s" #, python-format msgid "share: %s incorrect entry" msgstr "Sdílení: Nesprávná položka %s" #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "Provedení zjištění iSCSI pomocí SMI-S předalo: %(out)s." #, python-format msgid "snapshot %s doesn't exist" msgstr "snímek %s neexistuje" #, python-format msgid "source volume for cloning: %s" msgstr "Zdrojový svazek pro klonování: %s" #, python-format msgid "stats: stats: %s." msgstr "Statistiky: %s." #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "" "Zastavení snímku: Název snímku: %(snapshot)s, název svazku: %(volume)s." #, python-format msgid "targetlist: %s" msgstr "seznam cílů: %s" #, python-format msgid "terminate: connection %s" msgstr "Ukončení: Spojení %s" #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "Ukončení připojení se svazkem: %(volume)s, konektor: %(con)s" #, python-format msgid "terminate_connection, return data is: %s." msgstr "Ukončení spojení, vrácená data: %s." #, python-format msgid "" "terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " "%(lunid)s." msgstr "" "Ukončení spojení s fc: Název svazku %(volume)s, wwpns: %(wwns)s, id lun: " "%(lunid)s." #, python-format msgid "tunevv failed because the volume '%s' has snapshots." msgstr "tunevv selhalo protože svazek '%s' má snímky." #, python-format msgid "username: %(username)s, verify_cert: %(verify)s." msgstr "Uživatelské jméno: %(username)s, ověření certifikátu: %(verify)s." #, python-format msgid "vol=%s" msgstr "svazek=%s" #, python-format msgid "vol_name=%(name)s provider_location=%(loc)s" msgstr "název svazku %(name)s, umístění poskytovatele %(loc)s" #, python-format msgid "volume %(name)s extended to %(size)d." msgstr "Svazek %(name)s rozšířen na %(size)d." #, python-format msgid "volume %s doesn't exist" msgstr "Svazek %s neexistuje" #, python-format msgid "volume %s no longer exists in backend" msgstr "Svazek %s již v podpůrné vrstvě neexistuje" msgid "volume_file does not support fileno() so skipping fsync()" msgstr "Soubor svazku nepodporuje fileno(), a proto je fsync() přeskočen" cinder-8.0.0/cinder/locale/cs/LC_MESSAGES/cinder.po0000664000567000056710000104242412701406257022640 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Zbyněk Schwarz , 2015 # OpenStack Infra , 2015. #zanata # Zbyněk Schwarz , 2015. #zanata # Zbyněk Schwarz , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-02-22 01:35+0000\n" "Last-Translator: Zbyněk Schwarz \n" "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Czech\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder verze: %(version)s\n" #, python-format msgid " but size is now %d" msgstr "ale velikost je nyní %d" #, python-format msgid " but size is now %d." msgstr "ale velikost je nyní %d." msgid " or " msgstr "nebo" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s nelze spravovat existující svazek připojený k hostitelům. Před " "importem ho prosím odpojte od existujících hostitelů." #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "výsledek: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: Oprávnění zamítnuto." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: selhalo s nečekaným výstupem příkazového řádku.\n" "Příkaz: %(cmd)s\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Kód stavu: %(_status)s\n" "Tělo: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, alternativní název subjektu: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: vytváření síťového portálu: ujistěte se, že port %(port)d na " "ip adrese %(ip)s nepoužívá jiná služba." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s požaduje minimálně %(min_length)s znaků." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s má více než %(max_length)s znaků." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: záloha %(bck_id)s svazku %(vol_id)s selhala. Objekt zálohy má " "neočekávaný režim. Podporovány jsou zálohy obrazu nebo souborů, současný " "režim je %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "Služba %(service)s není ve stavu %(status)s v zařízení úložiště: %(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s musí být <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s musí být >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "Hodnota %(worker_name)s ve %(workers)d je neplatná, musí být větší než 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "Nelze získat přístup k %s. Ověřte, zda GPFS je aktivní a že systém souborů " "je připojen." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "nelze změnit velikost %s pomocí operace klonování, protože neobsahuje žádné " "bloky." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "nelze změnit velikost %s pomocí operace klonování, protože je umístěn na " "komprimovaném svazku" #, python-format msgid "%s configuration option is not set." msgstr "Konfigurační volba %s není nastavena." #, python-format msgid "%s is not a directory." msgstr "%s není adresář." #, python-format msgid "%s is not a string or unicode" msgstr "%s není řetězec nebo unicode" #, python-format msgid "%s is not installed" msgstr "%s není nainstlaováno" #, python-format msgid "%s is not installed." msgstr "%s není nainstalováno." #, python-format msgid "%s is not set" msgstr "%s není nastaveno" #, python-format msgid "%s is not set." msgstr "%s není nastaveno." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s musí být platný obraz raw nebo qcow2." #, python-format msgid "%s must be an absolute path." msgstr "%s musí být absolutní cesta." #, python-format msgid "%s must be an integer." msgstr "%s musí být celé číslo." #, python-format msgid "%s not set in cinder.conf" msgstr "%s není nastaveno v cinder.conf" #, python-format msgid "%s not set." msgstr "%s není nastaveno." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' v souboru nastavení je neplatný pro protokol připojení k " "flashsystem. Platné hodnoty jsou %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "při zápisu informací o snímku musí být přítomno 'active'." msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' musí být zadáno" msgid "'qemu-img info' parsing failed." msgstr "zpracování 'qemu-img info' selhalo." msgid "'status' must be specified." msgstr "'status' musí být zadáno." msgid "'volume_id' must be specified" msgstr "'volume_id' svazku musí být zadáno." msgid "'{}' object has no attribute '{}'" msgstr "Objekt '{}' nemá vlastnost '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Příkaz: %(cmd)s) (Návratový kód: %(exit_code)s) (Standardní výstup: " "%(stdout)s) (Chybový výstup: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr " LUN (HLUN) nebyl nalezen. (Logické zařízení: %(ldev)s)" #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Volný LUN (HLUN) nebyl nalezen. Přidejte jinou skupinu hostitele. (Logické " "zařízení: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Skupina hostitele nemohla být smazána. (Port: %(port)s název: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Skupina hostitele nemohla být smazána. (Port: %(port)s, gid: %(gid)s, název: " "%(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Skupina hostitele je neplatná. (Skupina hostitele: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "" "Pár nemůže být smazán. (primární svazek: %(pvol)s, sekundární svazek: " "%(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Pár nemohl být vytvořen. Překročen maximální počet párů. (Metoda kopírování: " "%(copy_method)s, primární svazek: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Parametr je neplatný. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Hodnota parametru je neplatná. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Zásoba nemohla být nalezena. (Id zásoby: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Stav snímku je neplatný. (Stav: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "Pro zavedení záložního systému MUSÍ být zadán platný druhotní cíl." msgid "A volume ID or share was not specified." msgstr "Nezadáno ID svazku nebo sdílení." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Stav svazku je neplatný. (Stav: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s selhalo s chybovým řetězcem %(err)s" msgid "API key is missing for CloudByte driver." msgstr "Ovladači CloudByte chybí klíč API." #, python-format msgid "API response: %s" msgstr "Odpověď API: %s" msgid "API version could not be determined." msgstr "Nelze zjistit verzi API." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Chystáte se smazat podřazené projekty mající nenulovou kvótu. Toto by nemělo " "být prováděno" msgid "Access list not available for public volume types." msgstr "Seznam přístupu není dostupný pro veřejné typy svazku." msgid "Activate or deactivate QoS error." msgstr "Chyba při aktivaci nebo deaktivaci QoS." msgid "Activate snapshot error." msgstr "Chyba při aktivaci snímku." msgid "Add FC port to host error." msgstr "Chyba při přidávání FC portu k hostiteli." msgid "Add fc initiator to array error." msgstr "Chyba při přidávání zavaděče fc do pole." msgid "Add initiator to array error." msgstr "Chyba při přidávání zavaděče do pole." msgid "Add lun to cache error." msgstr "Chyba při přidávání lun do mezipaměti." msgid "Add lun to partition error." msgstr "Chyba při přidávání lun do oddílu." msgid "Add mapping view error." msgstr "Chyba při přidávání zobrazení mapování." msgid "Add new host error." msgstr "Chyba při přidávání nového hostitele." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Žádné ze zadaných zásob úložiště, které mají být spravovány, neexistují. " "Prosím zkontrolujte své nastavení. Neexistující zásoby: %s" #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "V ovladači SheepDog nastala chyba. (Důvod: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Během operace zálohování nastala chyba" #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Při hledání svazku \"%s\" nastal problém." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Při operaci kopírování LUN došlo k chybě. Název operace: %(luncopyname)s. " "Status: %(luncopystatus)s. Stav: %(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Při čtení svazku \"%s\" nastal problém." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Při zápisu do svazku \"%s\" nastal problém." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "CHAP uživatel iSCSI nemohl být přidán. (Jméno uživatele: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "CHAP uživatel iSCSI nemohl být smazán. (uživatelské jméno: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Cíl iSCSI nemohl být přidán. (Port: %(port)s, přezdívka: %(alias)s, důvod: " "%(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Cíl iSCSI nemohl být smazán. (Port: %(port)s, tno: %(tno)s, přezdívka: " "%(alias)s)" msgid "An unknown exception occurred." msgstr "Vyskytla se neočekávaná výjimka." msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Uživatel s příznakem z podprojektu nemá povoleno zobrazit kvótu nadřazených " "projektů." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "Pole neexistuje nebo je offline. Současný stav pole je %s." msgid "Associate host to hostgroup error." msgstr "Chyba při přidružování hostitele ke skupině hostitele." msgid "Associate host to mapping view error." msgstr "Chyba při přidružování hostitele k zobrazení mapování." msgid "Associate initiator to host error." msgstr "Chyba při přidružování zavaděče k hostiteli." msgid "Associate lun to lungroup error." msgstr "Chyba při přidružování lun ke skupině lun." msgid "Associate lungroup to mapping view error." msgstr "Chyba při přidružování skupiny lun k zobrazení mapování." msgid "Associate portgroup to mapping view error." msgstr "Chyba při přidružování skupiny portu k zobrazení mapování." msgid "At least one valid iSCSI IP address must be set." msgstr "Musí být nastavena alespoň jedna platná IP adresa iSCSI." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Pokus o přenos %s s neplatným ověřovacím klíčem." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "Údaje ověřovací skupiny [%s] nebyly nalezeny v úložišti CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "Přihlašovací údaje uživatele nebyly nalezeny v úložišti CloudByte." msgid "Authentication error" msgstr "Chyba ověření" msgid "Authorization error" msgstr "Chyba oprávnění" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Zóna dostupnosti '%(s_az)s' je neplatná." msgid "Available categories:" msgstr "Dostupné kategorie:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Specifikace QoS podpůrné vrstvy nejsou podporovány u tohoto druhu úložišť a " "verze ONTAP." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Podpůrná vrstva neexistuje (%(backend)s)" #, python-format msgid "Backend reports: %(message)s" msgstr "Podpůrná vrstva hlásí: %(message)s" msgid "Backend reports: item already exists" msgstr "Podpůrná vrstva hlásí: Položka již existuje" msgid "Backend reports: item not found" msgstr "Podpůrná vrstva hlásí: Položka nenalezena" msgid "Backend server not NaServer." msgstr "Server podpůrné vrstvy není NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" "Služba podpůrné vrstvy dosáhla časového limitu na nový pokus: %(timeout)s " "vteřin" msgid "Backend storage did not configure fiber channel target." msgstr "Úložiště podpůrné vrstvy nenastavilo cíl fiber channel." msgid "Backing up an in-use volume must use the force flag." msgstr "Při zálohování používaného svazku je zapotřebí použít příznak force." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "Záloha %(backup_id)s nemohla být nalezena." msgid "Backup RBD operation failed" msgstr "Záložní operace RBD selhala" msgid "Backup already exists in database." msgstr "Záloha již existuje v databázi." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Záložní ovladač nahlásil chybu: %(message)s" msgid "Backup id required" msgstr "ID zálohy je vyžadováno" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "Záloha není podporována svazky GlusterFS se snímky." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "Zálohy jsou podporovány pouze u svazků SOFS bez záložního souboru." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "Záloha je podporována pouze u prostě-formátovaných svazků GlusterFS." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "Zálohy jsou podporovány pouze v prostě-formátovaných svazcích SOFS." msgid "Backup operation of an encrypted volume failed." msgstr "Záložní operace zašifrovaného svazku selhala." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Zálohovací služba %(configured_service)s nepodporuje ověřování. Záloha s id " "%(id)s není ověřena. Ověřování přeskočeno." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Zálohovací služba %(service)s nepodporuje ověřování. Záloha s id %(id)s není " "ověřena. Resetování přeskočeno." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "Záloha by měla mít pouze jeden snímek ale místo toho má %s" msgid "Backup status must be available" msgstr "Stav zálohy musí být dostupný" #, python-format msgid "Backup status must be available and not %s." msgstr "Stav zálohy musí být dostupný a ne %s." msgid "Backup status must be available or error" msgstr "Stav zálohy musí být dostupný nebo chybný" msgid "Backup to be restored has invalid size" msgstr "Obnovovaná záloha má neplatnou velikost" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Vrácen špatný stav řádku: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Špatné klíče v sadě kvót: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Špatná nebo neočekávaná odpověď od API podpůrné vrstvy úložiště svazků: " "%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "Špatný formát projektu: projekt není ve správném formátu (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Clusteru Datera zaslán špatný požadavek:neplatné argumenty: %(args)s | " "%(message)s" msgid "Bad response from Datera API" msgstr "Špatná odpověď od API Datera" msgid "Bad response from SolidFire API" msgstr "Špatná odpověď od SolidFire API" #, python-format msgid "Bad response from XMS, %s" msgstr "Špatná odpověď od XMS, %s" msgid "Binary" msgstr "Binární soubor" msgid "Blank components" msgstr "Prázdné součásti" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Způsob ověření Blockbridge API (příznak nebo heslo)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Heslo API Blockbridge (pro způsob ověření 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Příznak API Blockbridge (pro způsob ověření 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Uživatel API Blockbridge (pro způsob ověření 'password')" msgid "Blockbridge api host not configured" msgstr "Hostitel API Blockbridge není nastaven" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "Blockbridge je nastaven s neplatným způsobem ověření '%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "Výchozí zásoba Blockbridge neexistuje" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Heslo Blockbridge není nastaven (vyžadováno pro způsob ověření 'password')" msgid "Blockbridge pools not configured" msgstr "Zásoby Blockbridge nejsou nastaveny" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Příznak Blockbridge není nastaven (vyžadováno pro způsob ověření 'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Uživatel Blockbridge není nastaven (vyžadováno pro způsob ověření 'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Chyba vyhrazování CLI Brocade Fibre Channel: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Chyba vyhrazování HTTP Brocade Fibre Channel: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "Soukromý klíč CHAP by měl mít alespoň 12-16 bajtů." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Výstup výjimky rozhraní příkazového řádku:\n" "příkaz: %(cmd)s\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Výstup výjimky rozhraní příkazového řádku:\n" "příkaz: %(cmd)s\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E, mapování virtuálního disku na hostitele nebylo vytvořeno protože " "disk k hostiteli již je namapován.\n" "\"" msgid "CONCERTO version is not supported" msgstr "Verze CONCERTO není podporována" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "Společná skupiny poskytování (%s) neexistuje v poli" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "" "Název mezipaměti je None, prosím nastavte smartcache:cachename v klíči." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "Svazek mezipaměti %(cache_vol)s nemá snímek %(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "Svazek mezipaměti %s nemá požadované vlastnosti" msgid "Can not add FC port to host." msgstr "Nelze přidat FC port k hostiteli." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "Nelze najít id mezipaměti podle jejího názvu %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Nelze najít id oddílu podle názvu %(name)s." #, python-format msgid "Can not translate %s to integer." msgstr "Nelze převést %s na celé číslo." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Nelze získat přístup k 'scality_sofs_config': %s" msgid "Can't decode backup record." msgstr "Nelze rozšifrovat záznam zálohy." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "Nelze v poli najít název mezipaměti, její název je: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "Nelze v poli najít název oddílu, jeho název je: %(name)s." msgid "Can't find the same host id from arrays." msgstr "V polích nelze najít stejné id hostitele ." #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Nelze získat id svazku. Název svazku: %s." #, python-format msgid "Can't open config file: %s" msgstr "Nelze otevřít soubor s nastavením: %s" msgid "Can't parse backup record." msgstr "Nelze zpracovat záznam zálohy." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože nemá žádný typ svazku." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože již je ve skupině %(orig_group)s.." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože svazek nelze nalézt." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože svazek neexistuje." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože svazek je v neplatném stavu: %(status)s. Platné stavy jsou %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " "protože typ svazku %(volume_type)s tato skupina nepodporuje." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Nelze připojit již připojený svazek %s; vícenásobné připojení je zakázáno " "přes volbu nastavení 'netapp_enable_multiattach'." msgid "Cannot connect to ECOM server." msgstr "Nelze se připojit k serveru ECOM." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Nelze vytvořit klon s velikostí %(vol_size)s ze svazku s velikostí " "%(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Nelze vytvořit skupinu jednotnosti %(group)s protože snímek %(snap)s není v " "platném stavu. Platné stavy jsou: %(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Nelze vytvořit skupinu jednotnosti %(group)s protože zdrojový svazek " "%(source_vol)s není v platném stavu. Platné stavy jsou: %(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Nelze vytvořit adresář %s." msgid "Cannot create encryption specs. Volume type in use." msgstr "Nelze vytvořit specifikace šifrování. Typ svazku se používá." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Nelze vytvořit obraz formátu disku: %s. Je přijímán pouze formát disku vmdk." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Nelze vytvořit zamaskování: %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Při nastavení 'netapp_enable_multiattach' na true nelze v poli ESeries " "vytvořit více jak %(req)s svazků." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "Nelze vytvořit nebo najít skupinu úložiště s názvem %(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Nelze vytvořit svazek o velikosti %(vol_size)s ze snímku s velikostí " "%(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Nelze vytvořit svazek o velikosti %s: není násobkem 8GB." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Nelze vytvořit typ svazku s názvem %(name)s a specifikacemi %(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "LUN %s nelze smazat, pokud existují snímky." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Nelze smazat svazek mezipaměti: %(cachevol_name)s. Byl aktualizována " "%(updated_at)s a v současnosti má %(numclones)d instancí svazku." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Nelze smazat svazek mezipaměti: %(cachevol_name)s. Byl aktualizována " "%(updated_at)s a v současnosti má %(numclones)s instancí svazku." msgid "Cannot delete encryption specs. Volume type in use." msgstr "Nelze smazat specifikace šifrování. Typ svazku se používá." msgid "Cannot determine storage pool settings." msgstr "Nelze zjistit typ nastavení zásoby úložiště." msgid "Cannot execute /sbin/mount.sofs" msgstr "Nelze spustit /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "Nelze najít skupinu CG %s." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "Nelze najít službu konfigurace kontroléru pro systém úložiště " "%(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "Nelze najít službu replikace pro vytvoření svazku pro snímek %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "Nelze najít službu repliakce pro smazání svazku %s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Nelze najít službu replikace na systému %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "Nelze najít svazek: %(id)s, operace zrušení správy. Ukončování..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "Nelze najít svazek: %(volumename)s. Operace rozšíření. Ukončování..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "Nelze najít číslo zařízení pro svazek %(volumeName)s." msgid "Cannot find migration task." msgstr "Nelze najít úkol pro přesun." #, python-format msgid "Cannot find replication service on system %s." msgstr "Nelze najít službu replikace v systému %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "Nelze najít zdrojovou instanci skupiny jednotnosti. ID skupiny jednotnosti: " "%s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "Nelze získat id mcs podle id kanálu: %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "Nelze získat potřebné informace o systému zásoby nebo úložiště." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Nelze získat nebo vytvořit skupinu úložiště: %(sgGroupName)s pro svazek " "%(volumeName)s" #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "Nelze získat nebo vytvořit skupinu zavaděče: %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Nelze získat skupinu portu: %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Nelze získat skupinu úložiště: %(sgGroupName)s pro zamaskování " "%(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Nelze získat podporovaný rozsah velikosti pro %(sps)s. Návratový kód: " "%(rc)lu. Chyba: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Nelze získat výchozí skupiny úložiště pro zásadu FAST: %(fastPolicyName)s." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" "Nelze připojit Scality SOFS, zkontrolujte záznam systému pro možné chyby" msgid "Cannot ping DRBDmanage backend" msgstr "Nelze zjistit spojení s podpůrnou vrstvou DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Nelze umístit svazek %(id)s na %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Při vytváření skupiny jednotnosti %(name)s ze zdroje nelze zadávat " "'cgsnapshot_id' a 'source_cgid' najednou." msgid "Cannot register resource" msgstr "Nelze registrovat zdroj" msgid "Cannot register resources" msgstr "Nelze registrovat zdroje" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Nelze odstranit svazek %(volume_id)s ze skupiny jednotnosti %(group_id)s " "protože není ve skupině." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Nelze odstranit svazek %(volume_id)s ze skupiny jednotnosti %(group_id)s " "protože svazek je v neplatném stavu: %(status)s. Platné stavy jsou: " "%(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Nelze přetypovat z HPE3PARDriver na %s." msgid "Cannot retype from one 3PAR array to another." msgstr "Nelze přetypovat z jednoho pole 3PAR na jiné." msgid "Cannot retype to a CPG in a different domain." msgstr "Nelze přetypovat do společné skupiny poskytování v jiné doméně." msgid "Cannot retype to a snap CPG in a different domain." msgstr "Nelze přetypovat do snímku společné skupiny poskytování v jiné doméně." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "Nelze spustit příkaz vgc-cluster, ujistěte se prosím, že je software " "nainstalován a máte správně nastavená oprávnění." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "Nelze nastavit sériové číslo a název jednotky hitachi najednou." msgid "Cannot specify both protection domain name and protection domain id." msgstr "Nelze zadat název a id ochranné domény najednou." msgid "Cannot specify both storage pool name and storage pool id." msgstr "Nelze zadat název a id zásoby úložiště najednou." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Nelze aktualizovat skupinu jednotnosti %(group_id)s protože nebylo zadán " "platný název, popis, či přidání/odstranění svazku." msgid "Cannot update encryption specs. Volume type in use." msgstr "Nelze aktualizovat specifikace šifrování. Typ svazku se používá." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "Nelze aktualizovat typ svazku %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Nelze ověřit existenci objektu: %(instanceName)s." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "Snímek skupiny jednotnosti %(cgsnapshot_id)s nemohl být nalezen." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Snímek skupiny jednotnosti je prázdný. Žádná skupina nebude vytvořena." msgid "Cgsnapshot status must be available or error" msgstr "Stav snímku skupiny jednotnosti musí být dostupný nebo chybný" msgid "Change hostlun id error." msgstr "Chyba při změně id hostitele lun." msgid "Change lun priority error." msgstr "Chyba při získávání priority lun." msgid "Change lun smarttier policy error." msgstr "Chyba při změny chytré vrstvy zásady lun." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Změna by využití změnila na méně než 0 pro následující zdroje: %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Zkontrolujte oprávnění k přístupu pro sdílení ZFS přidělené k tomuto " "ovladači." msgid "Check hostgroup associate error." msgstr "Chyba při kontrole přidružení skupiny hostitele." msgid "Check initiator added to array error." msgstr "Chyba při kontrole přidání zavaděče do pole." msgid "Check initiator associated to host error." msgstr "Chyba při kontrole přidružení zavaděče k hostiteli." msgid "Check lungroup associate error." msgstr "Chyba při kontrole přidružení skupiny lun." msgid "Check portgroup associate error." msgstr "Chyba při kontrole přidružení skupiny portu." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Zkontrolujte stav služby http. Také se ujistěte, že číslo portu https je " "stejné jako port zadaný v cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "Velikost kusu není násobkem velikosti bloku z kterého je možné vytvořit " "kontrolní součet." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Chyba vyhrazování CLI Cisco Fibre Channel: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "Funkce klonování není licencována na %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "Typ klonu '%(clone_type)s' je neplatný; platné hodnoty jsou: " "'%(full_clone)s' a '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "Cluster není zformátován. Pravděpodobně byste měli provést příkaz \"dog " "cluster format\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Selhání ovladače Cinder Coho Data: %(message)s" msgid "Coho rpc port is not configured" msgstr "RPC port Coho není nastaven" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Příkaz %(cmd)s byl v klientském řádku zablokován a zrušen" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper. čekání na podmínku: vypršel časový limit %s" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" "Pomocník příkazového řádku: Čekání na podmínku: Vypršel časový limit %s." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Povolovač komprimace není nainstalován. Nelze vytvořit komprimovaný svazek." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Výpočetní cluster: %(cluster)s nenalezen." msgid "Condition has no field." msgstr "Podmínka nemá žádné pole." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Konfigurační soubor %(configurationFile)s neexistuje." #, python-format msgid "Configuration value %s is not set." msgstr "Konfigurační volba %s není nastavena." msgid "Configured host type is not supported." msgstr "Nastavený typ hostitele není podporován." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Konfliktní specifikace QoS pro typ svazku %s: Když je specifiakce přidružena " "k typu svazku legacy, \"netapp:qos_policy_group\" není povolena v " "dodatečných specifikací typu svazku." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Připojení ke glance selhalo: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Připojení k swift selhalo: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Konektor nepodporuje: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Konektor nemá požadované informace: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "Skupina jednotnosti %s stále obsahuje svazky. Pro jejich smazání je třeba " "použít příznak force." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "Skupina jednotnosti %s má stále na sobě závislé snímky." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "" "Skupina jednotnosti je prázdná. Nebude vytvořen žádný snímek skupiny " "jednotnosti." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "Stav skupiny jednotnosti musí být dostupný nebo chybný, ale současný stav " "je: %s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "Stav skupiny jednotnosti musí být dostupný, ale současný stav je: %s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "Skupina jednotnosti %(consistencygroup_id)s nemohla být nalezena." msgid "Container" msgstr "Kontejner" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Formát kontejneru: %s není podporován ovladačem VMDK, poproován je pouze " "'bare'." msgid "Container size smaller than required file size." msgstr "Velikost kontejneru je menší než požadovaná velikost souboru." msgid "Content type not supported." msgstr "Typ obsahu není podporován." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "Služba nastavení kontroléru nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "IP kontroléru '%(host)s' nešlo zpracovat: %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Převedeno na %(f1)s, ale formát je nyní %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "Převedeno na %(vol_format)s, ale formát je nyní %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Převedeno na prosté, ale formát je nyní %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Převedeno na prostý, ale formát je nyní %s." msgid "Coordinator uninitialized." msgstr "Koordinátor nezaveden." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Úkol kopírování svazku selhal: převedení na základní svazek: Id=%(id)s, stav=" "%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Kopírování popisných dat z %(src_type)s %(src_id)s do %(vol_id)s." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Nelze zjistit, který koncový bod Keystone použít. Ten může být nastaven buď " "v katalogu služby nebo pomocí volby 'backup_swift_auth_url' v souboru cinder." "conf." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Nelze zjistit, který koncový bod Swift použít. Ten může být nastaven buď v " "katalogu služby nebo pomocí volby 'backup_swift_url' v souboru cinder.conf." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "Nelze najít id clusteru GPFS: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "Nelze najít zařízení systému souborů GPFS: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "Nelze najít hostitele pro svazek %(volume_id)s s typem %(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Nelze najít nastavení v %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "Nelze najít export iSCSI pro svazek %(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Nelze najít export iSCSI pro svazek %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "Nelze najít cílové iSCSI pro svazek %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "Ve výstupu příkazu %(cmd)s nelze najít klíč: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Nelze najít parametr %(param)s" #, python-format msgid "Could not find target %s" msgstr "Nelze najít cíl %s" msgid "Could not get system name." msgstr "Nelze získat název systému." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "Nelze přečíst %s. Znovu prováděno pomocí sudo" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "Nelze uložit nastavení do %(file_path)s: %(exc)s" msgid "Create QoS policy error." msgstr "Chyba při vytváření zásady QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Vytvoření zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Vytvoření zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." msgid "Create consistency group failed." msgstr "Vytvoření skupiny jednotnosti selhalo." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "Vytváření šifrovaných svazků typu %(type)s z obrazu %(image)s není " "podporováno." msgid "Create export for volume failed." msgstr "Vytvoření exportu pro svazek selhalo." msgid "Create hostgroup error." msgstr "Chyba při vytváření skupiny hostitele." #, python-format msgid "Create hypermetro error. %s." msgstr "Chyba při vytváření hypermetra. %s" msgid "Create lun migration error." msgstr "Chyba při vytváření přesunu lun." msgid "Create luncopy error." msgstr "Chyba při vytváření kopie lun." msgid "Create lungroup error." msgstr "Chyba při vytváření skupiny lun." msgid "Create manager volume flow failed." msgstr "Vytvoření postupu správce svazku selhalo." msgid "Create snapshot error." msgstr "Chyba při vytváření snímku." #, python-format msgid "Create volume error. Because %s." msgstr "Chyba při vytváření svazku: Protože %s." msgid "Create volume failed." msgstr "Vytvoření svazku selhalo." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "Vytváření skupiny jednotnosti ze zdroje není v současnosti podporováno." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Vytvoření a aktivování sady zóny selhalo: (Sada zóny=%(cfg_name)s chyba=" "%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Vytvoření a aktivování sady zóny selhalo: (Sada zóny=%(zoneset)s chyba=" "%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "Vytváření využití pro období od %(begin_period)s do %(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "Současný hostitel není součástí domény HGST." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Současný hostitel není platný pro svazek %(id)s s typem %(type)s, přesun " "není povolen" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Současný mapovaný hostitel svazku %(vol)s je v nepodporované skupině " "hostitele s %(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "ZASTARALÉ: Nasazení Cinder API v1." msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Chyba nastavení ovladače DRBDmanage: nenalezeny některé požadované knihovny " "(dbus, drbdmanage.*)." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage očekával jeden zdroj (\"%(res)s\"), obdrženo %(n)d" msgid "Data ONTAP API version could not be determined." msgstr "Nelze zjistit verzi API Data ONTAP." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "Data ONTAP pracuje v režimu 7, který nepodporuje skupiny zásad QoS." msgid "Database schema downgrade is not allowed." msgstr "Snížení verze schématu databáze na nižší verzi není povoleno." #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup je platný typ poskytování, ale vyžaduje WSAPI verze " "'%(dedup_version)s', je nainstalována verze '%(version)s'." msgid "Dedup luns cannot be extended" msgstr "Deduplikované lun nemohou být rozšířeny" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Povolovač komprimace není nainstalován. Nelze vytvořit deduplikovaný svazek." msgid "Default pool name if unspecified." msgstr "Název výchozí zásoby není zadán." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Výchozí kvóta pro zdroj: %(res)s je nastavena příznakem výchozí kvóty: quota_" "%(res)s, je nyní zastaralé. Pro výchozí kvótu prosím použijte třídu výchozí " "kvóty." msgid "Default volume type can not be found." msgstr "Výchozí typ svazku nenalezen." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Určuje sadu vystavených zásob a jejich přidružené dotazovací řetězce z " "podpůrné vrstvy." msgid "Delete LUNcopy error." msgstr "Chyba při mazání kopírování LUN." msgid "Delete QoS policy error." msgstr "Chyba při mazání zásady QoS." msgid "Delete associated lun from lungroup error." msgstr "Chyba při mazání přidružené lun ze skupiny lun." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Smazání zálohy zrušeno, v současnosti nastavená služba záloh " "[%(configured_service)s] není stejnou službou použitou k vytvoření této " "zálohy [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "Smazání skupiny jednotnosti selhalo." msgid "Delete hostgroup error." msgstr "Chyba při mazání skupiny hostitele." msgid "Delete hostgroup from mapping view error." msgstr "Chyba při mazání skupiny hostitele ze zobrazení mapování." msgid "Delete lun error." msgstr "Chyba při mazání lun." msgid "Delete lun migration error." msgstr "Chyba při mazání přesunu lun." msgid "Delete lungroup error." msgstr "Chyba při mazání skupiny lun." msgid "Delete lungroup from mapping view error." msgstr "Chyba při mazání skupiny lun ze zobrazení mapování." msgid "Delete mapping view error." msgstr "Chyba při mazání zobrazení mapování." msgid "Delete portgroup from mapping view error." msgstr "Chyba při mazání skupiny portu ze zobrazení mapování." msgid "Delete snapshot error." msgstr "Chyba při mazání snímku." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "Smazání snímku svazku není podporováno ve stavu: %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Smazání zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Odpojování svazku od databáze a přeskakování rpc." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Mazání zón selhalo: (příkaz=%(cmd)s chyba=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Pro podporu skupin jednotnosti je vyžadována API Dell verze 2.1 a vyšší." msgid "Describe-resource is admin only functionality" msgstr "Describe-resource je funkce pouze pro správce" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "Cíl má stav přesunu %(stat)s, očekáváno %(exp)s." msgid "Destination host must be different than the current host." msgstr "Cílový hostitel musí být odlišný od současného." msgid "Destination volume not mid-migration." msgstr "Cílový svazek není uprostřed přesunu." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Odpojení svazku selhalo. Více než jedno připojení, ale nezadáno žádné " "attachment_id." msgid "Detach volume from instance and then try again." msgstr "Odpojte svazek od instance a zkuste to znovu." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)s" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "Nenalezen očekávaný sloupec v %(fun)s: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "Očekávaný klíč %(key)s nenalezen v %(fun)s: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "Důvod zakázání obsahuje neplatné znaky nebo je příliš dlouhý." #, python-format msgid "Domain with name %s wasn't found." msgstr "Doména s názvem %s nebyla nalezena." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Zjištěn cluster GPFS nižší úrovně. Funkce klonování GPFS není povolena v " "daemonu clusteru na úrovni %(cur)s - úroveň musí být alespoň %(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Zavedení připojení ovladačem selhalo (chyba: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "Ovladač není schopen provést přetypování protože svazek (LUN {}) má snímek, " "který má přesunování zakázáno." msgid "Driver must implement initialize_connection" msgstr "Ovladač musí zavést initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Ovladač úspěšně rozšifroval data importované zálohy, ale některá pole chybí " "(%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "Verze API E-series proxy %(current_version)s nepodporuje úplnou sadu " "dodatečných specifikací SSC. Verze proxy musí být alespoň %(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Výjimka CLI ovladače EMC VNX Cinder: %(cmd)s (Návratový kód: %(rc)s) " "(Výstup: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword musí mít platné " "hodnoty." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "Pro vytvoření skupiny jednotnosti %(name)s ze zdroje musí být zadán buď " "'cgsnapshot_id' nebo 'source_cgid'." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO: %(slo)s nebo vytížení %(workload)s jsou neplatné. Prozkoumejte " "předchozí chybový výpis pro platné hodnoty." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Je vyžadováno buď hitachi_serial_number nebo hitachi_unit_name." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "Služba složení prvku nebyla nalezena v %(storageSystemName)s." msgid "Enables QoS." msgstr "Povolí QoS." msgid "Enables compression." msgstr "Povolí komprimaci." msgid "Enables replication." msgstr "Povolí replikaci." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Ujistěte se, že configfs je připojen k /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při přidávání zavaděče: %(initiator)s ve skupině zavaděče: " "%(initiatorgroup)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při přidávání do cílové skupiny: %(targetgroup)s mající IQN: %(iqn)s. " "Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Chyba při připojování svazku %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při klonování snímku: %(snapshot)s ve svazku %(lun)s ze zásoby " "%(pool)s, projekt %(project)s, klon projektu: %(clone_proj)s, návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Chyba při vytváření klonovaného svazku: %(cloneName)s. Návratový kód: " "%(rc)lu. Chyba: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vytváření klonovaného svazku: Svazek: %(cloneName)s, zdrojový " "svazek %(sourceName)s. Návratový kód: %(rc)lu. Chyba: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vyváření skupiny: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Chyba při vytváření maskování: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vytváření svazku: %(volumeName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vytváření svazku: %(volumename)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vytváření skupiny repliky: zdroj: %(source)s cíl: %(target)s. " "Návratový kód: %(rc)lu. Chyba: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při vytváření zavaděče: %(initiator)s s přezdívkou: %(alias)s. " "Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při vytváření projektu: %(project)s v zásobě %(pool)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při vytváření vlastnosti: %(property)s, typ: %(type)s, popis: " "%(description)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při vytváření sdílení: %(name)s, návratový kód: %(ret.status)d, " "zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při vytváření snímku: %(snapshot)s ve svazku %(lun)s do zásoby " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při vytváření snímku: %(snapshot)s ve sdílení %(share)s do zásoby " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Chyba při vytváření cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při vytváření cílové skupiny: %(targetgroup)s mající IQN: %(iqn)s. " "Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Chyba při vytváření svazku: %(lun)s, velikost: %(size)s, návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při vytváření nového složeného svazku. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při provádění replikace na: zásobu: %(pool)s, projekt %(proj)s, " "svazek: %(vol)s pro cíl %(tgt)s a zásobu: %(tgt_pool)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "Chyba při vytváření nesvázaného svazku v operaci rozšíření." msgid "Error Creating unbound volume." msgstr "Chyba při vytváření nesvázaného svazku." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při mazání svazku: %(volumeName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při mazání snímku: %(snapshot)s ve sdílení %(share)s do zásoby " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při mazání snímku: %(snapshot)s ve svazku %(lun)s do zásoby %(pool)s, " "projekt %(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Chyba při mazání svazku: %(lun)s ze zásoby %(pool)s, projekt %(project)s, " "návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při mazání projektu: %(project)s v zásobě %(pool)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Chyba při odstraňování provedení replikace: %(id)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při získávání zavaděčů: Skupina zavaděče: %(initiatorgroup)s. " "Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při získávání statistik projektu: Zásoba %(pool)s, projekt " "%(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při získávání sdílení: %(share)s v zásobě %(pool)s, projekt " "%(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při získávání snímku: %(snapshot)s ve svazku %(lun)s do zásoby " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Chyba při získávání cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při získávání svazku: %(lun)s v zásobě %(pool)s, projekt %(project)s, " "návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Chyba při přesunu svazku z jedné zásoby do druhé. Návratový kód: %(rc)lu. " "Chyba: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Chyba při úpravě maskování : %(groupName)s. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při nastavování vlastností: %(props)s ve svazku %(lun)s v zásobě " "%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při ukončování sezení přesunu. Návratový kód: %(rc)lu. Chyba: " "%(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při ověřování zavaděče: %(iqn)s. Návratový kód: %(ret.status)d, " "zpráva: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při ověřování zásoby: %(pool)s: Návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při ověřování projektu: %(project)s v zásobě %(pool)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při ověřování služby: Služba: %(service)s, návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při ověřování cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Chyba při ověřování sdílení: %(share)s v projektu %(project)s a zásobě " "%(pool)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Chyba při přidávání svazku: %(volumeName)s mající cestu instance: " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Chyba při přidávání zavaděče do skupiny : %(groupName)s. Návratový kód: " "%(rc)lu. Chyba: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "Chyba při přidávání svazku do složeného svazku. Chyba byla: %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "Chyba při připojování svazku %(volumename)s do cílového základního svazku." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Chyba při přidružování skupiny úložiště: %(storageGroupName)s k zásadě FAST: " "%(fastPolicyName)s. Popis chyby: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "Chyba při připojování svazku %s. Možná bylo dosaženo limitu v cílovém " "zařízení!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Chyba při rušení vztahu klona. Synchronizovaný název: %(syncName)s. " "Návratový kód: %(rc)lu. Chyba: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Chyba při připojování ke clusteru ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Chyba při připojování pomocí ssh: %s" #, python-format msgid "Error creating volume: %s." msgstr "Chyba při vytváření svazku: %s." msgid "Error deleting replay profile." msgstr "Chyba při mazání profilu rychlého načtení." #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Chyba při mazání svazku %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Chyba během zpracování hodnotitele: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Chyba při úpravě sdílení: %(share)s v zásobě: %(pool)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Chyba při povolování iSER pro síťový portál: ujistěte se prosím, že RDMA je " "podporováno pro port %(port)d vašeho iSCSI na ip adrese %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "Během čištění selhaného připojení se vyskytla chyba: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Chyba při provádění [%(cmd)s] CloudByte API, chyba: %(err)s." msgid "Error executing EQL command" msgstr "Chyba při provádění příkazu EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Chyba při provádění příkazu pomocí ssh: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Chyba při rozšiřování svazku %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Chyba při rozšiřování svazku: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Chyba při hledání %(name)s." #, python-format msgid "Error finding %s." msgstr "Chyba při hledání %s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Chyba při získávání podrobností o verzi zařízení. Návratový kód: %(ret." "status)d, zpráva: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "Chyba při získávání id domény z názvu %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "Chyba při získávání id domény z názvu %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Chyba při získávání skupin zavaděče." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Chyba při získávání id zásoby s názvem %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Chyba při získávání id zásoby z názvu %(pool_name)s: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Chyba při získávání provádění replikace: %(id)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Chyba při získávání podrobností o zdroji replikace. Návratový kód: %(ret." "status)d, zpráva: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Chyba při získávání podrobností o cíli replikace. Návratový kód: %(ret." "status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při získávání verze: svc: %(svc)s. Návratový kód: %(ret.status)d, " "zpráva: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Chyba v operaci [%(operation)s] pro svazek [%(cb_volume)s] v úložišti " "CloudByte: [%(cb_error)s], kód chyby: [%(error_code)s]" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "Chyba při vytváření prostoru pro %(space)s s velikostí %(size)d GB" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "Chyba při rozšiřování prostoru svazku %(space)s o dalších %(size)d GB" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Chyba při mapování svazku %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Chyba při změně synchronizace repliky: %(sv)s, operace: %(operation)s. " "Návratový kód: %(rc)lu. Chyba: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Chyba při změně služby: %(service)s, návratový kód: %(ret.status)d, zpráva: " "%(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při přesunování svazku: %(vol)s ze zdrojového projektu %(src)s do " "cílového %(tgt)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." msgid "Error not a KeyError." msgstr "Chyba není chyba klíče." msgid "Error not a TypeError." msgstr "Chyba není chyba typu." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Při vytváření snímku skupiny jednotnosti %s nastala chyba." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Při mazání snímku skupiny jednotnosti %s nastala chyba." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "Při aktualizaci skupiny jednotnosti %s nastala chyba." #, python-format msgid "Error parsing config file: %s" msgstr "Nelze zpracovat soubor s nastavením: %s" msgid "Error promoting secondary volume to primary" msgstr "Chyba při propagování druhotného svazku na hlavní" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Chyba při mazání svazku: %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Chyba při přejmenování svazku %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Chybová odpověď: %s" msgid "Error retrieving volume size" msgstr "Chyba při získávání velikosti svazku" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při odesílání aktualizace replikace pro činnost s id: %(id)s. " "Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Chyba při odesílání aktualizace replikace. Předaná chyba: %(err)s. Činnost: " "%(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při nastavování dědění replikace na %(set)s pro svazek: %(vol)s, " "projekt %(project)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Chyba při oddělování balíčku: %(package)s od zdroje: %(src)s. Návratový kód: " "%(ret.status)d, zpráva: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "Chyba při rozvazování svazku %(vol)s ze zásoby. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Chyba při ověřování velikosti klonu svazku: %(clone)s, velikost: %(size)d ve " "snímku: %(snapshot)s" #, python-format msgid "Error while checking transaction status: %s" msgstr "Chyba při kontrole stavu přenosu: %s" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "Chyba při získávání dat pomocí ssh: (příkaz=%(cmd)s chyba=%(err)s)." #, python-format msgid "Error while requesting %(service)s API." msgstr "Chyba při žádání API %(service)s." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "Chyba při spouštění rozhraní příkazového řádku pro zónu: (příkaz=%(cmd)s " "chyba=%(err)s)." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Překročen maximální počet pokusů %(max_attempts)d pro svazek %(volume_id)s." msgid "Exceeded the limit of snapshots per volume" msgstr "Překročeno omezení snímků na svazek" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Při připojování meta svazku do cílového svazku %(volumename)s se objevila " "výjimka." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Při vytváření repliky prvku nastala výjimka. Název klonu: %(cloneName)s, " "zdrojový název: %(sourceName)s, dodatečné specifikace: %(extraSpecs)s" #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Výjimka ve výběru diskového prostoru pro svazek: %s." #, python-format msgid "Exception: %s" msgstr "Výjimka: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Očekáváno uuid ale obdrženo %(uuid)s." #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "U počtu uzlů očekáváno číslo, svcinfo lsiogrp předalo: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "Od příkazu %(cmd)s v příkazovém řádku neočekáván žádný výstup, předáno " "%(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Při filtrování pomocí UID virtuálního disku bylo očekáváno vrácení jednoho " "disku z lsvdisk. Bylo předáno %(count)s." #, python-format msgid "Expected volume size was %d" msgstr "Očekávaná velikost snímku byla %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Exportování zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Exportování záznamu zrušeno, v současnosti nastavená služba záloh " "[%(configured_service)s] není stejnou službou použitou k vytvoření této " "zálohy [%(backup_service)s]." msgid "Extend volume error." msgstr "Chyba při rozšiřování svazku" msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Rozšíření svazku je tímto ovladačem podporováno pouze když svazek nemá " "snímky." msgid "Extend volume not implemented" msgstr "Rozšíření svazku není zavedeno" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "Povolovač rychlého VP není nainstalován. Nelze nastavit zásadu vrstvení pro " "svazek" msgid "FAST is not supported on this array." msgstr "FAST není podporován v tomto poli." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC je protokol, ale OpenStack nedodává wwpn." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Nelze zrušit přidělení %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "Nelze vytvořit svazek mezipaměti %(volume)s. Chyba: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "Nelze přidat připojení pro fabric=%(fabric)s: Chyba: %(err)s" msgid "Failed cgsnapshot" msgstr "Snímek jednotnosti selhal" #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "Nelze vytvořit snímek ze svazku %(volname)s: %(response)s." #, python-format msgid "Failed getting details for pool %s." msgstr "Nelze získat podrobnosti zásoby %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "Nelze odstranit připojení pro fabric=%(fabric)s: Chyba: %(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Nelze rozšířit svazek %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Nelze se přihlásit do 3PAR (%(url)s) kvůli %(err)s" msgid "Failed to access active zoning configuration." msgstr "Nelze získat přístup k aktivním nastavení zónování." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Nelze získat přístup ke stavu sady zóny: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Nelze získat zámek zdroje. (Sériová konzole: %(serial)s, inst: %(inst)s, " "ret: %(ret)s, chybový výstup: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "Nelze přidat %(vol)s do %(sg)s po %(retries)s pokusech." msgid "Failed to add the logical device." msgstr "Nelze přidat logické zařízení." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Nelze přidat svazek %(volumeName)s do skupiny jednotnosti %(cgName)s. " "Návratový kód: %(rc)lu. Chyba: %(error)s." msgid "Failed to add zoning configuration." msgstr "Nelze přidat nastavení zónování." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "Nelze přidělit zavaděč iSCSI IQN. (Port: %(port)s, důvod: %(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Nelze asociovat specifikace qos: %(specs_id)s s typem %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Nelze připojit cílové iSCSI pro svazek %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Nelze zálohovat popisná data svazku - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Nelze zálohovat popisná data svazku - objekt zálohy popisných dat 'backup.%s." "meta' již existuje" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Nelze klonovat svazek ze snímku %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Nelze se připojit k %(vendor_name)s Pole %(host)s: %(err)s" msgid "Failed to connect to array" msgstr "Nelze se připojit k poli" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "Nelze se připojit k daemonu sheep. Adresa: %(addr)s, port: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Nelze zkopírovat obraz do svazku: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Nelze zkopírovat popisná data do svazku: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Nelze zkopírovat svazek, cílové zařízení je nedostupné." msgid "Failed to copy volume, source device unavailable." msgstr "Nelze zkopírovat svazek, zdrojové zařízení je nedostupné." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "Nelze vytvořit skupinu jednotnosti %(cgName)s ze snímku %(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "Nelze vytvořit IG, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "Nelze vytvořit obraz-svazek SolidFire" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Nelze vytvořit skupinu svazku: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Nelze vytvořit soubor. (Soubor: %(file)s, ret: %(ret)s, chybový výstup: " "%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "Nelze vytvořit dočasný snímek pro svazek %s." msgid "Failed to create api volume flow." msgstr "Nelze vytvořit postup api svazku." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "" "Vytvoření snímku skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "Vytvoření skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Nelze vytvořit skupinu jednotnosti %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Nelze vytvořit skupinu jednotnosti %s protože skupina jednotnosti VNX nemůže " "přijmout komprimované LUN jako členy." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Nelze vytvořit skupinu jednotnosti: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "Nelze vytvořit skupinu jednotnosti: %(cgid)s. Chyba: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Nelze vytvořit skupinu jednotnosti: %(consistencyGroupName)s. Návratový kód: " "%(rc)lu. Chyba: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Nelze vytvořit id hardwaru v %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "Nelze vytvořit hostitele: %(name)s. Zkontrolujte zda existuje v poli." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Nelze vytvořit skupinu hostitele: %(name)s. Zkontrolujte zda existuje v poli." msgid "Failed to create iqn." msgstr "Nelze vytvořit iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Nelze vytvořit cílové iscsi pro svazek %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Nelze vytvořit správu existujícího postupu." msgid "Failed to create manage_existing flow." msgstr "Nelze vytvořit postup pro správu existujících." msgid "Failed to create map on mcs, no channel can map." msgstr "Nelze vytvořit mapu na mcs, žádný kanál nemůže mapovat." msgid "Failed to create map." msgstr "Nelze vytvořit mapu." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Nelze vytvořit popisná data pro svazek: %(reason)s" msgid "Failed to create partition." msgstr "Nelze vytvořit oddíl." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "Nelze vytvořit specifikace qos: %(name)s se specifikacemi %(qos_specs)s." msgid "Failed to create replica." msgstr "Nelze vytvořit repliku." msgid "Failed to create scheduler manager volume flow" msgstr "Nelze vytvořit postup správce plánovače svazku" #, python-format msgid "Failed to create snapshot %s" msgstr "Nelze vytvořit snímek %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "Nelze vytvořit snímek protože nebylo zadáno LUN ID" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Nelze vytvořit snímek pro skupinu jednotnosti: %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Nelze vytvořit snímek pro svazek %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "Nelze vytvořit zásadu snímku ve svazku %(vol)s: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "Nelze vytvořit oblast zdrojů snímku ve svazku %(vol)s: %(res)s." msgid "Failed to create snapshot." msgstr "Nelze vytvořit snímek." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Nelze vytvořit snímek. Informace o svazku CloudByte nenalezeny ve svazku " "Openstack [%s]." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Nelze vytvořit mělkou zásobu, chybová zpráva byla: %s" #, python-format msgid "Failed to create volume %s" msgstr "Nelze vytvořit svazek %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "Nelze smazat SI svazku s id: %(volume_id)s protože existují dva s tímto id." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Nelze smazat logické zařízení. (Logické zařízení: %(ldev)s, důvod: " "%(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "Smazání snímku skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "Smazání skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Nelze smazat skupinu jednotnosti: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Nelze smazat skupinu jednotnosti. %(consistencyGroupName)s. Návratový kód: " "%(rc)lu. Chyba: %(error)s." msgid "Failed to delete device." msgstr "Smazání zařízení selhalo." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Nelze smazat sadu souborů ze skupinu jednotnosti %(cgname)s. Chyba: " "%(excmsg)s." msgid "Failed to delete iqn." msgstr "Nelze smazat iqn." msgid "Failed to delete map." msgstr "Nelze smazat mapu." msgid "Failed to delete partition." msgstr "Nelze smazat oddíl." msgid "Failed to delete replica." msgstr "Nelze smazat repliku." #, python-format msgid "Failed to delete snapshot %s" msgstr "Nelze smazat snímek %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "Nelze smazat snímek pro skupinu jednotnosti: %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "Nelze smazat snímek s id %s protože existují dva s tímto id." msgid "Failed to delete snapshot." msgstr "Nelze smazat snímek." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Nelze smazat svazek %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Nelze smazat svazek pro svazek s id: %(volume_id)s protože existují dva s " "tímto id." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Nelze odpojit cílové iSCSI pro svazek %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Nelze zjistit nastavení blockbridge API" msgid "Failed to disassociate qos specs." msgstr "Nelze odloučit specifikace qos." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Nelze odloučit specifikace qos: %(specs_id)s s typem %(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "Nelze zajistit oblast zdrojů snímku, nelze nalézt svazek s id %s" msgid "Failed to establish SSC connection." msgstr "Nelze vytvořit připojení SSC." msgid "Failed to establish connection with Coho cluster" msgstr "Zavedení připojení s clusterem Coho selhalo" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Nelze provést [%(cmd)s] CloudByte API. Stav http: %(status)s, chyba: " "%(error)s." msgid "Failed to execute common command." msgstr "Nelze provést běžný příkaz." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Nelze exportovat pro svazek: %(reason)s" msgid "Failed to find Storage Center" msgstr "Nelze najít centrum úložiště" msgid "Failed to find a vdisk copy in the expected pool." msgstr "V zadané zásobě nelze najít kopii virtuálního disku." msgid "Failed to find account for volume." msgstr "Nelze najít účet pro svazek." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "Nelze najít sadu souborů pro cestu %(path)s, výstup příkazu: %(cmdout)s." #, python-format msgid "Failed to find host %s." msgstr "Nelze najít hostitele %s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "Nelze najít zásobu úložiště pro zdrojový svazek %s." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Nelze získat údaje účtu CloudByte [%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Nelze získat podrobnosti cíle LUN pro LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "Nelze získat podrobnosti cílového LUN %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Nelze získat seznam cílů LUN pro LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Nelze získat ID oddílu pro svazek %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "Nelze získat ID Raid snímku %(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "Nelze získat ID Raid snímku %(snapshot_id)s." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Nelze získat zdroj úložiště. Systém se ho pokusí získat znovu. (Zdroj: " "%(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "Nelze získat všechna přidružení specifikací qos %s" msgid "Failed to get channel info." msgstr "Nelze získat informace o kanálu." #, python-format msgid "Failed to get code level (%s)." msgstr "Nelze získat úroveň kódu (%s)." msgid "Failed to get device info." msgstr "Nelze získat informace o zařízení." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" "Nelze získat doménu protože společná skupiny poskytování (%s) neexistuje v " "poli." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "Nelze získat ip na kanálu %(channel_id)s se svazkem: %(volume_id)s." msgid "Failed to get iqn info." msgstr "Nelze získat informace o iqn." msgid "Failed to get license info." msgstr "Nelze získat informace o licenci." msgid "Failed to get lv info." msgstr "Nelze získat informace o lv." msgid "Failed to get map info." msgstr "Nelze získat informace o mapě." msgid "Failed to get model update from clone" msgstr "Nelze získat aktualizaci modelu z klona" msgid "Failed to get name server info." msgstr "Nelze získat informace o jmenném serveru." msgid "Failed to get network info." msgstr "Nelze získat informace o síti." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Nelze získat nové id části v nové zásobě: %(pool_id)s." msgid "Failed to get partition info." msgstr "Nelze získat informace o oddílu." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Nelze získat zásobu svazku s id %(volume_id)s." msgid "Failed to get replica info." msgstr "Nelze získat informace o replice." msgid "Failed to get show fcns database info." msgstr "Nelze získat zobrazení informací o databázi fcns." #, python-format msgid "Failed to get size of volume %s" msgstr "Nelze získat velikost svazku %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Nelze získat snímek pro svazek %s." msgid "Failed to get snapshot info." msgstr "Nelze získat informace o snímku." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Nelze získat cílové IQN pro LUN %s" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Nelze získat cílový portál LUN pro LUN %s" msgid "Failed to get targets" msgstr "Nelze získat cíle" msgid "Failed to get wwn info." msgstr "Nelze získat informace o wwn." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Nelze získat, vytvořit, nebo přidat svazek %(volumeName)s pro zamaskování " "%(maskingViewName)s. Obdržená chybová zpráva byla %(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Nelze zjistit podpůrnou vrstvu svazku." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "Nelze propojit sadu souborů se sdílením %(cgname)s. Chyba: %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Nelze se přihlásit k %s Pole (neplatné přihlášení?)." #, python-format msgid "Failed to login for user %s." msgstr "Nelze se přihlásit jako uživatel %s." msgid "Failed to login with all rest URLs." msgstr "Nelze se přihlásit pomocí jakékoliv z REST URL." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Nelze zažádat pro koncový bod clusteru Datera z následujícího důvodu: %s" msgid "Failed to manage api volume flow." msgstr "Nelze spravovat postup api svazku." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Nelze spravovat existující %(type)s %(name)s, protože nahlášená velikost " "%(size)s není číslo s plovoucí desetinnou čárkou." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Nelze spravovat existující svazek %(name)s, protože při získávání velikosti " "svazku došlo k chybě." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Nelze spravovat existující svazek %(name)s, protože operace pro přejmenování " "selhala: Chybová zpráva: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Nelze spravovat existující svazek %(name)s, protože nahlášená velikost " "%(size)s není číslo s plovoucí desetinnou čárkou." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " "neodpovídá sdílení NFS předanému v odkazu svazku." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " "neodpovídá souborovému systému předanému v odkazu svazku." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Nelze spravovat existující svazky protože zásoba zvoleného typu svazku " "neodpovídá zásobě hostitele." #, python-format msgid "Failed to manage volume %s." msgstr "Nelze spravovat svazek %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Nelze mapovat logické zařízení. (Logické zařízení: %(ldev)s, LUN: %(lun)s, " "port: %(port)s, id: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Nelze přesunout svazek poprvé." msgid "Failed to migrate volume for the second time." msgstr "Nelze přesunout svazek podruhé." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "Nelze přesunout mapování LUN. Návratový kód: %s" #, python-format msgid "Failed to move volume %s." msgstr "Nelze přesunout svazek %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Nelze otevřít soubor. (Soubor: %(file)s, ret: %(ret)s, chybový výstup: " "%(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Nelze zpracovat výstup rozhraní příkazového řádku:\n" "příkaz: %(cmd)s\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Nelze zpracovat volbu nastavení 'keystone_catalog_info', musí být ve formátu " "::" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Nelze zpracovat volbu nastavení 'swift_catalog_info', musí být ve formátu " "::" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Nelze provést reklamaci nulté stránky. (Logické zařízení: %(ldev)s, důvod: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "Nelze odstranit exportování svazku %(volume)s: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "Nelze odstranit cílové iscsi pro svazek %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Nelze odstranit svazek %(volumeName)s ze skupiny jednotnosti %(cgName)s. " "Návratový kód: %(rc)lu. Chyba: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "Nelze odstranit svazek %(volumeName)s z výchozí skupiny úložiště." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "Nelze odstranit svazek %(volumeName)s z výchozí skupiny úložiště: " "%(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "Nelze odstranit: %(volumename)s. z výchozí skupiny úložiště pro zásadu FAST " "%(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Nelze přejmenovat logický svazek %(name)s, chybová zpráva byla: %(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Nelze získat aktivní nastavení zónování %s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Nelze nastavit Qos pro existující svazek %(name)s, chybová zpráva: %(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "Nelze nastavit vlastnost 'Incoming user' pro cíl SCST." msgid "Failed to set partition." msgstr "Nelze nastavit oddíl." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Nelze nastavit oprávnění pro skupinu jednotnosti %(cgname)s. Chyba: " "%(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Nelze zadat logické zařízení pro svazek %(volume_id)s pro zrušení namapování." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Nelze zadat logické zařízení ke smazání. (Metoda: %(method)s, id: %(id)s)" msgid "Failed to terminate migrate session." msgstr "Nelze ukončit sezení přesunu." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "Nelze rozvázat svazek %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Nelze odpojit sadu souborů pro skupinu jednotnosti %(cgname)s. Chyba: " "%(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Nelze zrušit mapování logického zařízení. (Logické zařízení: %(ldev)s, " "důvod: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Nelze aktualizovat skupinu jednotnosti: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Nelze aktualizovat popisná data svazku: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Nelze aktualizovat nebo smazat nastavení zónování" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Nelze aktualizovat specifikace qos: %(specs_id)s se specifikacemi " "%(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "Nelze aktualizovat využití kvóty při přetypování svazku." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "Nelze aktualizovat model s ovladačem poskytnutým modelem %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Nelze aktualizovat popisná data svazku %(vol_id)s pomocí zadaných dat " "%(src_type)s %(src_id)s" #, python-format msgid "Failure creating volume %s." msgstr "Nelze vytvořit svazek %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Nelze získat informace LUN pro %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Selhání v aktualizaci páru klíč-hodnota svazku:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Nelze přesunout nově klonovaný LUN do %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Nelze zařadit LUN %s do tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "Kritická chyba: Uživatel nemá oprávnění dotazovat se na svazky NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "Flexvisor nemohl přidat svazek %(id)s, z důvodu %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Flexvisor nemohl připojit svazek %(vol)s ve skupině %(group)s z důvodu " "%(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Flexvisor nemohl odstranit svazek %(vol)s ve skupině %(group)s z důvodu " "%(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Flexvisor nemohl odstranit svazek %(id)s, z důvodu %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Selhání vyhledávání SAN Fibre Channel: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Selhání zóny operací Fibre Channel: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Selhání kontroly připojení Fibre Channel: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "Soubor %(file_path)s nemohl být nalezen." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Soubor %(path)s má neplatný záložní soubor %(bfile)s, operace přerušena." #, python-format msgid "File already exists at %s." msgstr "Soubor s nastavením již existuje v %s." #, python-format msgid "File already exists at: %s" msgstr "Soubor již existuje v: %s" msgid "Find host in hostgroup error." msgstr "Chyba při hledání hostitele ve skupině hostitele." msgid "Find host lun id error." msgstr "Chyba při hledání id hostitele lun." msgid "Find lun group from mapping view error." msgstr "Chyba při hledání skupiny lun v zobrazení mapování." msgid "Find lun number error." msgstr "Chyba při hledání čísla lun." msgid "Find mapping view error." msgstr "Chyba při hledání zobrazení mapování." msgid "Find portgroup error." msgstr "Chyba při hledání skupiny portu." msgid "Find portgroup from mapping view error." msgstr "Chyba při hledání skupiny portu v zobrazení mapování." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Zásada mezipaměti Flash vyžaduje WSAPI verze '%(fcache_version)s', je " "nainstalována verze '%(version)s'." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor nemohl přidělit svazek.:%(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor nemohl přidělit svazek:%(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor nemohl najít snímek svazku %(id)s ve snímku %(vgsid)s skupiny " "%(vgid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Vytvoření svazku pomocí Flexvisor selhalo:%(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor selhal při mazání svazku %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor nemohl přidat svazek %(id)s do skupiny %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor nemohl přidělit svazek %(id)s, protože se nešlo dotázat na stav " "pomocí id události." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor nemohl přidělit svazek %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor nemohl přidělit svazek %(volume)s iqn %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor nemohl klonovat svazek %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "Flexvisor nemohl klonovat svazek (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "Flexvisor nemohl vytvořit snímek svazku %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "Flexvisor nemohl vytvořit snímek svazku (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor nemohl vytvořit svazek %(id)s ve skupině %(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor nemohl vytvořit svazek %(volume)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor nemohl vytvořit svazek (získání události) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "Flexvisor nemohl vytvořit svazek ze snímku %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor nemohl vytvořit svazek ze snímku %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor nemohl vytvořit svazek ze snímku (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor nemohl smazat snímek %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "Flexvisor nemohl smazat svazek (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor nemohl smazat svazek %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor nemohl rozšířit svazek %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor nemohl rozšířit svazek %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "Flexvisor nemohl rozšířit svazek (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor nemohl získat informace o zásobě %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "Flexvisor nemohl získat id snímku svazku %(id)s ze skupiny %(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor nemohl odstranit svazek %(id)s ze skupiny %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor nemohl spustit svazek ze snímku %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor nemohl spustit svazek ze snímku (nelze získat událost) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor nemohl zrušit přidělení svazku %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor nemohl zrušit přidělení svazku (získání události) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor nemohl zrušit přidělení svazku:%(id)s:%(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor nemohl najít informace o zdrojovém svazku %(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Zrušení přidělení svazku pomocí Flexvisor selhalo:%(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Svazek Flexvisor %(id)s nemohl být připojen ke skupině %(vgid)s." #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS neběží, stav: %s." msgid "Gateway VIP is not set" msgstr "Brána VIP není nastavena" msgid "Get FC ports from array error." msgstr "Chyba při získávání portů FC z pole." msgid "Get FC target wwpn error." msgstr "Chyba při získávání cílového FC wwpn." msgid "Get LUNcopy information error." msgstr "Chyba při získávání informací o kopírování LUN." msgid "Get QoS id by lun id error." msgstr "Chyba při získávání id QoS pomocí id lun." msgid "Get QoS information error." msgstr "Chyba při získávání informací o QoS." msgid "Get QoS policy error." msgstr "Chyba při získávání zásad QoS." msgid "Get cache by name error." msgstr "Chyba při získávání mezipaměti pomocí názvu." msgid "Get connected free FC wwn error." msgstr "Chyba při připojování volného FC wwn." msgid "Get host initiators info failed." msgstr "Získávání informací o zavaděčích hostitele selhalo." msgid "Get hostgroup information error." msgstr "Chyba při získávání informací o skupině hostitele." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Chyba při získávání informací o portu iSCSI, prosím zkontrolujte cílovou IP " "zadanou v souboru s nastavením huawei." msgid "Get iSCSI port information error." msgstr "Chyba při získávání informací o portu iSCSI." msgid "Get iSCSI target port error." msgstr "Chyba při získávání portu cíle iSCSI." msgid "Get lun migration task error." msgstr "Chyba při získávání úkolu o přesunu lun." msgid "Get lungroup id by lun id error." msgstr "Chyba při získávání id skupiny lun pomocí id lun." msgid "Get lungroup information error." msgstr "Chyba při získávání informací o skupině lun." msgid "Get partition by name error." msgstr "Chyba při získávání oddílu podle názvu." msgid "Get partition by partition id error." msgstr "Chyba při získávání oddílu pomocí id oddílu." msgid "Get smartcache by cache id error." msgstr "Chyba při získávání chytré mezipaměti pomocí id mezipaměti." msgid "Get snapshot id error." msgstr "Chyba při získávání id snímku." msgid "Get target IP error." msgstr "Chyba při získávání cílové IP adresy." msgid "Get volume by name error." msgstr "Chyba při získávání svazku podle názvu." msgid "Get volume error." msgstr "Chyba při získávání svazku." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Popisná data glance nemohou být aktualizována, klíč %(key)s existuje pro " "svazek s id %(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "Popisná data Glance pro svazek/snímek %(id)s nemohla být nalezena." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Konfigurační soubor Gluster v %(config)s neexistuje." #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Selhání api úložiště Google Cloud: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Chyba v připojení k úložišti Google Cloud: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Chyba v ověření oauth2 v úložišti Google Cloud: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "Získány špatné informace o cestě z DRBDmanage! (%s)" msgid "HBSD error occurs." msgstr "Objevena chyba HBSD." msgid "HPELeftHand url not found" msgstr "HPELeftHand URL nenalezena" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "Velikost bloku kontrolního součtu byla od poslední zálohy změněna, Nová " "velikost bloku kontrolního součtu: %(new)s. Stará velikost bloku kontrolního " "součtu: %(old)s. Proveďte úplnou zálohu." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Vrstvy %(tier_levels)s nebyly vytvořeny." #, python-format msgid "Hint \"%s\" not supported." msgstr "Nápověda \"%s\" není podporována." msgid "Host" msgstr "Hostitel" #, python-format msgid "Host %(host)s could not be found." msgstr "Hostitel %(host)s nemohl být nalezen." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "Hostitel %(host)s neodpovídá obsahu certifikátu x509: Běžný název: " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "Hostitel %s nemá žádné zavaděče FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "Hostitel %s nemá žádný zavaděč iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "Hostitel '%s' nemohl být nalezen." #, python-format msgid "Host group with name %s not found" msgstr "Skupina hostitele s názvem %s nebyla nalezena" #, python-format msgid "Host group with ref %s not found" msgstr "Skupina hostitele mající odkaz %s nebyla nalezena" msgid "Host not found" msgstr "Hostitel nenalezen" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Hostitel nenalezen. Nelze odstranit %(service)s na %(host)s." #, python-format msgid "Host type %s not supported." msgstr "Hostitel typu %s není podporován." #, python-format msgid "Host with ports %(ports)s not found." msgstr "Hostitel s porty %(ports)s nenalezen." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "Skupina vstupu/výstupu %(iogrp)d není platná; platné skupiny jsou %(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "IP adresa/název hostitele API Blockbridge." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Je-li komprese nastavena na True, pak rsize musí být také nastaveno (nesmí " "se rovnat -1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Je-li notfmtdisk nastaveno na True, pak rsize musí být také nastaveno na " "hodnotu -1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "V protokolu připojení k flashsystem byla nalezena neplatná hodnota " "'%(prot)s': platné hodnoty jsou %(enabled)s." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Zadána neplatná hodnota pro chytrou vrstvu: nastavte buď na 0, 1, 2, či 3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Zadána neplatná hodnota pro storwize_svc_vol_grainsize: nastavte buď na 32, " "64, 128, nebo 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Zadána neplatná hodnota pro hloubku: Nelze zadat najednou thin i thick." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Obraz %(image_id)s nemohl být nalezen." #, python-format msgid "Image %(image_id)s is not active." msgstr "Obraz %(image_id)s není aktivní." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" msgid "Image location not present." msgstr "Umístění obrazu není přítomno." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Virtuální velikost obrazu je %(image_size)dGB, a proto se nevejde do svazku " "s velikostí %(volume_size)dGB." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Při mazání svazku rbd nastala chyba Obraz je zaneprázdněn. To může být " "způsobeno připojením od klienta, které bylo přerušeno a, pokud tomu tak je, " "může být vyřešeno novým pokus o smazání po vypršení 30 vteřin." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Importování záznamu selhalo, nelze najít zálohovací službu pro provedení " "importu. Požadovaná služba %(service)s" msgid "Incorrect request body format" msgstr "Nesprávný formát těla požadavku" msgid "Incorrect request body format." msgstr "Nesprávný formát těla požadavku." msgid "Incremental backups exist for this backup." msgstr "Pro tuto zálohu existují přírůstkové zálohy." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Výjimka CLI Infortrend: %(err)s, parametr: %(param)s (Návratový kód: %(rc)s) " "(Výstup: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Původní vrstva: {}, zásada: {} není platné." msgid "Input type {} is not supported." msgstr "Typ vstupu {} není podporován." msgid "Input volumes or snapshots are invalid." msgstr "Vstupní svazky nebo snímky jsou neplatné." msgid "Input volumes or source volumes are invalid." msgstr "Vstupní nebo zdrojové svazky jsou neplatné." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "Instance %(uuid)s nemohla být nalezena." msgid "Insufficient free space available to extend volume." msgstr "Pro rozšíření svazku není dostatek volného místa." msgid "Insufficient privileges" msgstr "Nedostatečná oprávnění" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" "Hodnota intervalu ( ve vteřinách) mezi jednotlivými pokusy o připojení ke " "clusteru ceph." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" "V seznamu vstupních/výstupních portů zadány neplatné porty %(port)s pro " "protokol %(protocol)s." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Neplatná doména 3PAR: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Neplatná hodnota ALUA. Hodnota musí být 1 nebo 0." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "Zadány neplatné argumenty Ceph pro záložní operaci rbd" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Neplatný snímek skupiny jednotnosti: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "Neplatná skupina jednotnosti: %(reason)s" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "Neplatná skupina jednotnosti: Stav skupiny jednotnosti musí být dostupný, " "ale současný stav je: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "Neplatná skupina jednotnosti: Žádný hostitel pro vytvoření skupiny " "jednotnosti." #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Nalezena neplatná verze API HPELeftHand: %(found)s. Pro podporu spravování " "je vyžadována verze %(minimum)s nebo vyšší." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Neplatný formát IP adresy: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Při získávání zásady QoS pro svazek %s byla zjištěna neplatná specifikace QoS" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Neplatný typ ověření VNX: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Neplatná specifikace sdílení úložiště Virtuozzo: %r. Musí být [MDS1[," "MDS2],...:/][:HESLO]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "Neplatná verze XtremIO %(cur)s, je vyžadována verze %(min)s nebo vyšší" msgid "Invalid argument" msgstr "Neplatný argument" msgid "Invalid argument - negative seek offset." msgstr "Neplatný argument - záporná odchylka hledání." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Neplatný argument - whence=%s není podporováno" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Neplatný argument - whence=%s není podporováno." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Neplatný režim připojení '%(mode)s' pro svazek %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Neplatný ověřovací klíč: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Neplatná záloha: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "Neplatná url barbican api: je požadována verze, např. 'http[s]://|" "[:port]/', zadaná url je: %s" msgid "Invalid cgsnapshot" msgstr "Neplatný snímek skupiny jednotnosti" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "V úložišti CloudByte nalezeny neplatné přihlašovací údaje CHAP pro uživatele." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "Neplatná odpověď zavedení připojení od svazku %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "Neplatná odpověď zavedení připojení od svazku %(name)s: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Neplatný typ obsahu %(content_type)s." msgid "Invalid credentials" msgstr "Neplatné přihlašovací údaje" #, python-format msgid "Invalid directory: %s" msgstr "Neplatný adresář: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Neplatný typ adaptéru disku: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Neplatné zálohování disku: %s" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Neplatný typ disku: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Neplatný typ disku: %s" #, python-format msgid "Invalid host: %(reason)s" msgstr "Neplatný hostitel: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Nalezena neplatná verze hpe3parclient (%(found)s). Je vyžadována verze " "%(minimum)s a vyšší. Spusťte \"pip install --upgrade python-3parclient\" pro " "aktualizaci klienta." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Nalezena neplatná verze hpelefthandclient (%(found)s). Je vyžadována verze " "%(minimum)s a vyšší. Spusťte \"pip install --upgrade python-lefthandclient\" " "pro aktualizaci klienta." #, python-format msgid "Invalid image href %(image_href)s." msgstr "Neplatný href %(image_href)s obrazu." msgid "Invalid image identifier or unable to access requested image." msgstr "" "Neplatný identifikátor obrazu nrbo nelze získat přístup k požadovanému " "obrazu." msgid "Invalid imageRef provided." msgstr "Zadáno neplatné imageRef." msgid "Invalid input" msgstr "Neplatný vstup" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Obdržen neplatný vstup: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Neplatný filtr is_public [%s]" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Neplatná velikost popisných dat: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Neplatná popisná data: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Neplatná základna bodu připojení: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Neplatná základna bodu připojení: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" "Neplatný nový název snímku společné skupiny poskytování pro přetypování. " "Nový název='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Neplatné číslo portu %(config)s pro port RPC Coho" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Neplatné specifikace qos: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "Neplatná žádost o připojení svazku k neplatnému hostiteli" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Neplatná žádost o připojení svazku s neplatným režimem. Režim připojení by " "měl být 'rw' nebo 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Neplatné vypršení rezervace %(expire)s." msgid "Invalid response header from RPC server" msgstr "Neplatná hlavička odpovědi od serveru RPC" msgid "Invalid service catalog json." msgstr "Neplatný json katalog služeb" msgid "Invalid sheepdog cluster status." msgstr "Neplatný stav clusteru sheepdog." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Neplatný snímek: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Neplatný stav: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "Zažádáno o neplatnou zásobu úložiště %s. Přetypování selhalo." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Zadána neplatná zásoba úložiště %s." msgid "Invalid transport type." msgstr "Neplatný typ přenosu." #, python-format msgid "Invalid update setting: '%s'" msgstr "Neplatné nastavení aktualizace: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "Neplatná URL: Musí být ve formátu 'http[s]://|[:port]/" "', zadaná url je: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Neplatná hodnota '%s' pro vynucení." #, python-format msgid "Invalid value '%s' for force. " msgstr "Neplatná hodnota '%s' pro force." #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "Neplatná hodnota '%s' pro is_public. Patné hodnoty: True nebo False. " #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Neplatná hodnota '%s' pro přeskočení ověření." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Neplatná hodnota pro 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Neplatná hodnota pro 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Neplatná hodnota pro 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Neplatná hodnota pro 'scheduler_max_attempts', musí být >=1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "Neplatná hodnota pro volbu nastavení NetApp netapp_host_type." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "Neplatná hodnota pro volbu nastavení NetApp netapp_lun_ostype." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Neplatná hodnota pro věk, %(age)s" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "Zadána neplatná velikost svazku při žádosti o vytvoření: %s (argument " "velikosti musí být celé číslo (nebo celé číslo zadané pomocí řetězce) a " "větší než nula)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Neplatný typ svazku: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Neplatný svazek: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Neplatný svazek: Nelze přidat svazek %(volume_id)s do skupiny jednotnosti " "%(group_id)s protože svazek je v neplatném stavu: %(status)s. Platné stavy " "jsou: ('available', 'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Neplatný svazek. Nelze přidat svazek %(volume_id)s do skupiny jednotnosti " "%(group_id)s protože typ svazku %(volume_type)s tato skupina nepodporuje." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Neplatný svazek: Nelze přidat svazek valešné-uuid-svazku do skupiny " "jednotnosti %(group_id)s protože svazek nelze nalézt." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Neplatný svazek: Nelze odstranit svazek falešné-uuid-svazku ze skupiny " "jednotnosti %(group_id)s protože není ve skupině." #, python-format msgid "Invalid volume_type passed: %s." msgstr "Předán neplatný typ svazku: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Zadán neplatný typ svazku: %s (požadovaný typ není kompatibilní; buď se musí " "shodovat se zdrojovým svazkem, nebo vynechejte argument zadávající typ)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "Zadán neplatný typ svazku: %s (požadovaný typ není kompatibilní; doporučuje " "se vynechat argument zadávající typ)" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "Zadán neplatný typ svazku: %s (požadovaný typ musí být podporován touto " "skupinou jednotnosti)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Neplatný formát wwpns %(wwpns)s" msgid "Invoking web service failed." msgstr "Vyvolání internetové služby selhalo." msgid "Issue encountered waiting for job." msgstr "Při čekání na úkol se vyskytl problém." msgid "Issue encountered waiting for synchronization." msgstr "Při čekání na synchronizaci se vyskytl problém." msgid "Item not found" msgstr "Položka nenalezena" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "Úkol nebyl nalezen v CloudByte odpovědi pro vytvoření svazku [%s]." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "Úkol nebyl nalezen v CloudByte odpovědi pro smazání svazku [%s]." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Názvy klíče mohou obsahovat pouze alfanumerické znaky, podtržítka, tečky, " "dvojtečky a pomlčky." #, python-format msgid "KeyError: %s" msgstr "Chyba klíče: %s" msgid "LUN export failed!" msgstr "Export LUN selhal!" msgid "LUN id({}) is not valid." msgstr "LUN id({}) není platné." msgid "LUN map overflow on every channel." msgstr "Přetečení LUN mapy ve všech kanálech." #, python-format msgid "LUN not found with given ref %s." msgstr "LUN nenalezena pomocí zadaného odkazu %s." msgid "LUN number ({}) is not an integer." msgstr "LUN číslo ({}) není celé číslo." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "Číslo LUN je mimo rozsah v kanálu s id: %(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "LUN se zadaným odkazem %(ref)s nesplňuje typ svazku. Ujistěte se, že je na " "virtuálním serveru %(vs)s přítomen svazek LUN mající funkce ssc." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Posledních %s položek cinder v záznamu systému:-" msgid "LeftHand cluster not found" msgstr "Cluster LeftHand nebyl nalezen" #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Řádek %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Cesta odkazu již existuje a není to symbolický odkaz." #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Propojený klon zdrojového svazku není podporován ve stavu: %s." msgid "Lock acquisition failed." msgstr "Získání zámku selhalo." msgid "Logout session error." msgstr "Chyba odhlášení sezení." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Služba vyhledávání není nastavena. Volba nastavení fc_san_lookup_service " "musí udávat konkrétní zavedení této služby." msgid "Lun migration error." msgstr "Chyba při přesunu Lun." #, python-format msgid "MSG_DENIED: %r" msgstr "Zpráva zamítnuta: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "Zpráva zamítnuta: Chyba při ověření: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "Zpráva zamítnuta: Neshoda vzdáleného volání procedur: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Poškozený řetězec výstupu fcns: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Poškozené tělo zprávy: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Poškozený řetězec jmenného serveru: %s" msgid "Malformed request body" msgstr "Poškozené tělo požadavku" msgid "Malformed request body." msgstr "Poškozené tělo požadavku." msgid "Malformed request url" msgstr "Poškozená url požadavku" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Poškozená odpověď na příkaz %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Poškozená vlastnost scheduler_hints" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Poškozené zobrazení řetězce databáze fcns: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Poškozené nastavení zóny: (rozbočovač=%(switch)s nastavení zóny=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Poškozený stav zóny: (rozbočovač=%(switch)s nastavení zóny=%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Správa existujícího získání velikosti vyžaduje 'id'." msgid "Manage existing snapshot not implemented." msgstr "Správa existujícího snímku není zavedena." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Správa existujícího svazku selhala kvůli neplatnému odkazu na podpůrnou " "vrstvu %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Správa existujícího svazku selhala kvůli neshodě s typem svazku: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Správa existujícího svazku není zavedena." msgid "Manage existing volume requires 'source-id'." msgstr "Správa existujícího svazku vyžaduje 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "Správa svazku není podporována, pokud je FAST povoleno. Zásada FAST: " "%(fastPolicyName)s." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "Připravení mapování %(id)s nemohlo být dokončeno v rámci přiděleného " "časového limitu %(to)d vteřin. Ukončování." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "Maskování %(maskingViewName)s nebylo úspěšně smazáno" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Překročen maximální povolený počet záloh (%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "Překročen maximální povolený počet snímků (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Maximální povolený počet svazků (%(allowed)d) překračuje kvótu '%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "Lze zadat pouze jedno z %s" msgid "Metadata backup already exists for this volume" msgstr "Záloha popisných dat je pro tento svazek již vytvořena" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "Object zálohy popisných dat '%s' již existuje" msgid "Metadata item was not found" msgstr "Položka popisných dat nenalezena" msgid "Metadata item was not found." msgstr "Položka popisných dat nebyla nalezena." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Klíč vlastnosti popisných dat %s je větší než 255 znaků" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Hodnota vlastnosti klíče popisných dat %s je vetší než 255 znaků" msgid "Metadata property key blank" msgstr "Klíč vlastnosti popisných dat je prázdný" msgid "Metadata property key blank." msgstr "Klíč vlastnosti popisných dat je prázdný." msgid "Metadata property key greater than 255 characters." msgstr "Klíč vlastnosti popisných dat je větší než 255 znaků." msgid "Metadata property value greater than 255 characters." msgstr "Hodnota vlastnosti popisných dat je vetší než 255 znaků." msgid "Metadata restore failed due to incompatible version" msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi" msgid "Metadata restore failed due to incompatible version." msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi." #, python-format msgid "Migrate volume %(src)s failed." msgstr "Přesunutí svazku %(src)s selhalo." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "Přesun svazku selhal mezi zdrojovým %(src)s a cílovým %(dst)s svazkem." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "Přesun LUN %s byl zastaven nebo je chybný." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Chybí python modul 'purestorage', ujistěte se, že knihovna je nainstalovaná " "a dostupná." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "Chybí parametr nastavení Fibre Channel SAN - fc_fabric_names" msgid "Missing request body" msgstr "Chybí tělo žádosti" msgid "Missing request body." msgstr "Chybí tělo žádosti." #, python-format msgid "Missing required element '%s' in request body" msgstr "V těle žádosti chybí povinný prvek '%s'" #, python-format msgid "Missing required element '%s' in request body." msgstr "V těle žádosti chybí povinný prvek '%s'." msgid "Missing required element 'consistencygroup' in request body." msgstr "V těle žádosti chybí povinný prvek 'consistencygroup'." msgid "Missing required element 'host' in request body." msgstr "V těle žádosti chybí povinný prvek 'host'." msgid "Missing required element quota_class_set in request body." msgstr "V těle žádosti chybí povinný prvek quota_class_set." msgid "Missing required element snapshot in request body." msgstr "V těle žádosti chybí požadovaný prvek snapshot." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Nalezeno mnoho sériových čísel, ale pro tuto operaci bylo očekáváno pouze " "jedno. Prosím upravte váš soubor s nastavením EMC." #, python-format msgid "Multiple copies of volume %s found." msgstr "Nalezeno mnoho kopií svazku %s." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "Nalezeno mnoho shod pro '%s', použijte ID pro zpřesnění hledání." msgid "Multiple profiles found." msgstr "Nalezeno mnoho profilů." msgid "Must implement a fallback schedule" msgstr "Je nutné zavést záložní plán" msgid "Must implement find_retype_host" msgstr "Je nutné zavést find_retype_host" msgid "Must implement host_passes_filters" msgstr "Je nutné zavést host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "Je nutné zavést schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "Je nutné zavést schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "Je nutné zavést schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "lsfabric je nutné předat wwpn nebo host." msgid "Must specify 'connector'" msgstr "Musíte zadat 'connector'" msgid "Must specify 'connector'." msgstr "Musíte zadat 'connector'." msgid "Must specify 'host'." msgstr "Musíte zadat 'host'." msgid "Must specify 'new_volume'" msgstr "Musíte zadat 'new_volume'" msgid "Must specify 'status'" msgstr "Musíte zadat 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Pro aktualizaci musíte zadat 'status', 'attach_status' nebo " "'migration_status'." msgid "Must specify a valid attach status" msgstr "Musíte zadat platný stav připojení" msgid "Must specify a valid migration status" msgstr "Musíte zadat platný stav přesunu" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Musíte zadat platnou persónu %(valid)s, hodnota '%(persona)s' není platná." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Musíte zadat platný typ poskytování %(valid)s, hodnota '%(prov)s' je " "neplatná." msgid "Must specify a valid status" msgstr "Musíte zadat platný stav" msgid "Must specify an ExtensionManager class" msgstr "Musí být určena třída ExtensionManager" msgid "Must specify bootable in request." msgstr "V žádosti musíte zadat bootable." msgid "Must specify protection domain name or protection domain id." msgstr "Musíte zadat název nebo id ochranné domény." msgid "Must specify readonly in request." msgstr "V žádosti musíte zadat readonly." msgid "Must specify storage pool name or id." msgstr "Název nebo id zásoby úložiště musí být zadáno." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Musíte zadat zásoby úložiště. Volba: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Věk musí být zadán pomocí kladné hodnoty" msgid "Must supply a positive, non-zero value for age" msgstr "Musíte zadat kladnou, nenulovou hodnotu pro věk" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "Nastavení NAS '%(name)s=%(value)s' je neplatné. Musí být buď 'auto', 'true', " "nebo 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "Soubor s nastavením NFS v %(config)s neexistuje" #, python-format msgid "NFS file %s not discovered." msgstr "Soubor NFS %s nebyl nalezen." msgid "NFS file could not be discovered." msgstr "Soubor NFS nemohl být nalezen." msgid "NaElement name cannot be null." msgstr "Název NaElement nemůže být prázdný." msgid "Name" msgstr "Název" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "V těle žádosti nemohou být název, popis, přidání svazků, či odebrání svazků " "všechny prázdné." msgid "Need non-zero volume size" msgstr "Je třeba zadat nenulovou velikost svazku" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Zpráva nebyla zamítnuta ani přijata: %r" msgid "NetApp Cinder Driver exception." msgstr "Výjimka ovladače NetApp Cinder." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "Nová velikost pro rozšíření musí být větší než současná. (současná: " "%(size)s, rozšířená: %(new_size)s)." msgid "New volume size must be specified as an integer." msgstr "Nová velikost svazku musí být zadána jako celé číslo." msgid "New volume type must be specified." msgstr "Musíte zadat nový typ svazku." msgid "New volume type not specified in request_spec." msgstr "Nový typ svazku nebyl zadán ve specifikaci žádosti." #, python-format msgid "New volume_type same as original: %s." msgstr "Nový typ svazku stejný jako původní: %s." msgid "Nimble Cinder Driver exception" msgstr "Výjimka ovladače Nimble Cinder" msgid "No FCP targets found" msgstr "Žádné cíle FCP nenalezeny" msgid "No Port Group elements found in config file." msgstr "V souboru nastavení nenalezeny žádné prvky skupiny portu." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Žádné aktivní portály iSCSI zadané pomocí IP iSCSI" #, python-format msgid "No available service named %s" msgstr "Žádná dostupná služba nazvaná %s" #, python-format msgid "No backup with id %s" msgstr "Žádná záloha s id %s" msgid "No backups available to do an incremental backup." msgstr "Žádná záloha pro provedení přírůstkové zálohy." msgid "No big enough free disk" msgstr "Žádný dostatečně velký volný disk" #, python-format msgid "No cgsnapshot with id %s" msgstr "Žádný snímek jednotnosti s id %s" msgid "No cinder entries in syslog!" msgstr "Žádné položky cinder v záznamu systému!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Ve správci souborů nebyl nalezen klonovaný LUN s názvem %s" msgid "No config node found." msgstr "Nenalezen žádný uzel nastavení." #, python-format msgid "No consistency group with id %s" msgstr "Žádná skupina jednotnosti s id %s" #, python-format msgid "No element by given name %s." msgstr "Žádný prvek podle zadaného názvu %s." msgid "No errors in logfiles!" msgstr "Žádné chyby v souborech záznamu!" #, python-format msgid "No file found with %s as backing file." msgstr "V %s nenalezen žádný záložní soubor." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Žádné další volné ID LUN. Byl překročen maximální počet svazků, které lze " "připojit k hostiteli (%s)." msgid "No free disk" msgstr "Žádný volný disk" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Pro %s nebyly v zadaném seznamu nalezeny žádné vhodné portály iscsi." #, python-format msgid "No good iscsi portals found for %s." msgstr "Pro %s nenalezeny žádné vhodné portály iscsi." #, python-format msgid "No host to create consistency group %s." msgstr "Žádný hostitel pro vytvoření skupiny jednotnosti %s." msgid "No iSCSI-enabled ports on target array." msgstr "V cílovém poli není žádný port umožňující iSCSI." msgid "No image_name was specified in request." msgstr "V žádosti nebyl zadán název obrazu." #, python-format msgid "No initiator group found for initiator %s" msgstr "Pro zavaděče %s nebyla nalezena žádná skupina zavaděče" msgid "No initiators found, cannot proceed" msgstr "Nebyly nalezeny žádné zavaděče, nelze pokračovat" #, python-format msgid "No interface found on cluster for ip %s" msgstr "V clusteru s ip adresou %s nenalezeno žádné rozhraní." msgid "No ip address found." msgstr "Žádná ip adresa nenalezena." msgid "No iscsi auth groups were found in CloudByte." msgstr "V CloudByte nebyly nalezeny žádné ověřovací skupiny iscsi." msgid "No iscsi initiators were found in CloudByte." msgstr "V CloudByte nebyly nalezeny žádné zavaděče iscsi." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Ve svazku Cloudbyte [%s] nebyly nalezeny žádné služby iscsi." msgid "No iscsi services found in CloudByte storage." msgstr "V úložišti Cloudbyte nebyly nalezeny žádné služby iscsi." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "Nezadán žádný klíč a nelze ho načíst z %(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "Nenalezena žádné připojená sdílení Gluster" msgid "No mounted NFS shares found" msgstr "Nenalezena žádná připojená sdílení NFS" msgid "No mounted SMBFS shares found." msgstr "Nenalezena žádná připojená sdílení SMBFS" msgid "No mounted Virtuozzo Storage shares found" msgstr "Nenalezena žádné připojená sdílení Virtuozzo" msgid "No mounted shares found" msgstr "Nenalezena žádná připojená sdílení" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "Ve skupině vstupu/výstupu %(gid)s nenalezen žádný uzel pro svazek %(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Pro poskytující svazky nejsou dostupné žádné zásoby. Ujistěte se, že volba " "netapp_pool_name_search_pattern je nastavena správně." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Žádná odpověď při výpisu volání API přihlašovacích údajů uživatele iSCSI " "úložiště CloudByte." msgid "No response was received from CloudByte storage list tsm API call." msgstr "Žádná odpověď od výpisu volání api tsm úložiště CloudByte." msgid "No response was received from CloudByte's list filesystem api call." msgstr "Žádná odpověď od výpisu volání api souborového systému CloudByte." msgid "No service VIP configured and no nexenta_client_address" msgstr "Nenastavena žádné VIP pro službu a nejsou žádné adresy klienta nexenta" #, python-format msgid "No snap found with %s as backing file." msgstr "Nenalezen žádný snímek kde %s je záložní soubor." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "Obraz snímku nebyl nalezen ve skupině snímku %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Nebyly zadány žádné zdrojové snímky pro vytvoření skupiny jednotnosti %s." #, python-format msgid "No storage path found for export path %s" msgstr "Nenalezena žádná cesta úložiště pro cestu exportu %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Žádné takové specifikace QoS: %(specs_id)s." msgid "No suitable discovery ip found" msgstr "Nenalezena žádná vhodná zjišťovací ip adresa" #, python-format msgid "No support to restore backup version %s" msgstr "Pro obnovení zálohy verze %s není žádná podpora" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "Hostitel nemá k dispozici nepoužité ID LUN; vícenásobné připojení je " "povoleno, což vyžaduje jedinečné ID LUN napříč celou skupinou hostitele." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Žádný platní hostitelé pro svazek %(id)s s typem %(type)s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "Žádný svazek v clusteru na virtuálním serveru %(vserver)s a cestě spojení " "%(junction)s" msgid "No volume service(s) started successfully, terminating." msgstr "Žádné služby svazku nebyly úspěšně spuštěny, ukončování." msgid "No volume was found at CloudByte storage." msgstr "V úložišti CloudByte nebyl nalezen žádný svazek." msgid "No volume_type should be provided when creating test replica." msgstr "Při vytváření zkušební repliky by neměl být zadán žádný typ svazku." msgid "No volumes found in CloudByte storage." msgstr "V úložišti Cloudbyte nebyly nalezeny žádné svazky." msgid "No weighed hosts available" msgstr "Žádní vážení hostitelé nejsou dostupní" #, python-format msgid "Not a valid string: %s" msgstr "%s není platný řetězec." msgid "Not a valid value for NaElement." msgstr "Neplatná hodnota pro NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "Nelze nalézt vhodné datové úložiště pro svazek: %s." msgid "Not an rbd snapshot" msgstr "Není snímkem rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Nemáte oprávnění k obrazu %(image_id)s." msgid "Not authorized." msgstr "Není povoleno." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Na podpůrné vrstvě je nedostatek místa (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "Ve sdílení ZFS není dostatek úložného prostoru k provedení této operace." msgid "Not stored in rbd" msgstr "Neuloženo v rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "Při vytváření snímku Nova vrátila stav \"error\"." msgid "Null response received from CloudByte's list filesystem." msgstr "Při výpisu souborového sytému CloudByte byla obdržena nulová odpověď." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "Obdržena prázdná odpověď od výpisu ověřovacích skupin iscsi CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "Od výpisu zavaděčů iscsi CloudByte byla obdržena prázdná odpověď." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "Od výpisu služeb iscsi svazku CloudByte byla obdržena prázdná odpověď." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Při vytváření svazku [%s] v úložišti CloudByte byla obdržena prázdná odpověď." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Při mazání svazku [%s] v úložišti CloudByte byla obdržena prázdná odpověď." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Při dotazování úkolu [%(job)s] využívající [%(operation)s] v úložišti " "CloudByte byla obdržena prázdná odpověď." msgid "Number of retries if connection to ceph cluster failed." msgstr "Počet pokusů pokud připojení ke clusteru ceph selže." msgid "Object Count" msgstr "Počet objektů" msgid "Object is not a NetApp LUN." msgstr "Objekt není NetApp LUN." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Chyba při přidávání svazku do složeného svazku během operace rozšíření: " "%(volumename)s." msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "Jeden z požadovaných vstupů od hostitele, portu, nebo schématu nebyl nalezen." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s každých " "%(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "Ve specifikaci QoS lze nastavit pouze jednu mez." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Pouze uživatelé mající příznak z nadřazených nebo kořenových projektů mají " "povoleno zobrazit kvóty podřazených projektů" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "Zrušit správu lze pouze u svazků, jenž spravuje OpenStack." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "Operace selhala se stavem=%(status)s. Úplný výpis: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Operace není podporována: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "Volba gpfs_images_dir není správně nastavena." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "Volba gpfs_images_share_mode není správně nastavena." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Volba gpfs_mount_point_base není správně nastavena." msgid "Option map (cls._map) is not defined." msgstr "Mapa voleb (cls._map) není zadána." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "Původní %(res)s %(prop)s musí mít jednu z hodnot '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "Potlačit port HTTPS pro připojení k API serveru Blockbridge." #, python-format msgid "ParseException: %s" msgstr "Chyba zpracování: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "Název oddílu je None, prosím nastavte smartpartition:partitionname v klíči." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "Heslo nebo soukromý klíč SSH jsou vyžadovány pro ověření: nastavte volbu " "san_password nebo san_private_key." msgid "Path to REST server's certificate must be specified." msgstr "Cesta k certifikátu serveru REST musí být zadána." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Prosím předem vytvořte zásobu %(pool_list)s!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Prosím předem vytvořte vrstvu %(tier_levels)s v zásobě %(pool)s!" msgid "Please re-run cinder-manage as root." msgstr "Prosím spusťte znovu cinder-manage jako root." msgid "Please specify a name for QoS specs." msgstr "Prosím zadejte název pro specifikaci QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Zásada nedovoluje, aby bylo %(action)s provedeno." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Zásoba %(poolNameInStr)s nebyla nalezena." #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Zásoba ze svazku['hostitel] %(host)s nebyla nalezena." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "Zásoba ze svazku['hostitel] selhala: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "Zásoba není dostupná v poli hostitel svazku." msgid "Pool is not available in the volume host fields." msgstr "Zásoba není dostupná v polích hostitele svazku." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Zásoba s názvem %(pool)s nebyla nalezena v doméně %(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "Zásoba s názvem %(pool_name)s nebyla nalezena v doméně %(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Zásoba: %(poolName)s. není přidružena k vrstvě úložiště pro zásadu fast " "%(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "Název zásoby musí být v souboru %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "Zásoby %s neexistují" msgid "Pools name is not set." msgstr "Název zásoby není nastaven." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Stav hlavní kopie: %(status)s a synchronizováný: %(sync)s." msgid "Project ID" msgstr "ID projektu" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "Kvóty projekty nejsou pro vnořené kvóty správně nastaveny: %(reason)s." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Protokol %(storage_protocol)s není podporován u úložišť druhu " "%(storage_family)s." msgid "Provided backup record is missing an id" msgstr "Zadanému záznamu zálohy chybí id" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Zadaný stav snímku %(provided)s není povolen pro snímek se stavem " "%(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Informace poskytovatele s ohledem na úložiště Cloudbyte nebyly nalezeny pro " "svazek OpenStack [%s]." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Selhání ovladače Pure Storage Cinder: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Specifikace QoS %(specs_id)s již existuje." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Specifikace QoS %(specs_id)s je stále přidružena k entitám." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "Specifikace QoS %(specs_id)s nemají specifikace s klíčem %(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Specifikace QoS nejsou podporovány u tohoto druhu úložišť a verze ONTAP." msgid "Qos specs still in use." msgstr "Specifikace QoS se stále používá." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "Parametr dotazů podle služby je zastaralý. Místo toho použijte binární " "parametr." msgid "Query resource pool error." msgstr "Chyba při dotazování zásoby zdroje." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "Limit kvóty %s musí být větší nebo rovno existujícím zdrojům." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Třída kvóty %(class_name)s nemohla být nalezena." msgid "Quota could not be found" msgstr "Kvóta nemohla být nalezena." #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Kvóta překročena pro zdroje: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Kvóta překročena: kód=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Limit kvóty je pro zdroj '%(res)s' projektu '%(proj)s neplatný: limit " "%(limit)d je menší než použitá hodnota %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Rezervace kvóty %(uuid)s nemohla být nalezena." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Využití kvóty pro projekt %(project_id)s nemohla být nalezena." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" "RBD operace porovnání selhala - (ret=%(ret)s chybový výstup=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s, hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "IP serveru REST musí být zadáno." msgid "REST server password must by specified." msgstr "Heslo serveru REST musí být zadáno." msgid "REST server username must by specified." msgstr "Uživatelské jméno serveru REST musí být zadáno." msgid "RPC server response is incomplete" msgstr "Odpověď serveru RPC je nedokončená" msgid "Raid did not have MCS Channel." msgstr "RAID neměl kanál MCS." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Dosaženo omezení nastavené konfigurační volbou max_luns_per_storage_group. " "Operace přidávající %(vol)s do skupiny úložiště %(sg)s byla zamítnuta." #, python-format msgid "Received error string: %s" msgstr "Obdržen chybový řetězec: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "Odkaz musí být pro nespravovaný snímek." msgid "Reference must be for an unmanaged virtual volume." msgstr "Odkaz musí být pro nespravovaný virtuální svazek." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "Odkaz musí být název nespravovaného virtuálního svazku." msgid "Reference must contain either source-id or source-name element." msgstr "Odkaz musí obsahovat prvek source-name nebo source-id." msgid "Reference must contain either source-name or source-id element." msgstr "Odkaz musí obsahovat buď prvek source-name nebo source-id." msgid "Reference must contain source-id or source-name key." msgstr "Odkaz musí obsahovat zdrojové id, nebo klíč mající názem zdroje." msgid "Reference must contain source-id or source-name." msgstr "Odkaz musí obsahovat id nebo název zdroje." msgid "Reference must contain source-name element." msgstr "Odkaz musí obsahovat prvek source-name." msgid "Reference must contain source-name or source-id." msgstr "Odkaz musí obsahovat source-name nebo source-id." msgid "Reference must contain source-name." msgstr "Odkaz musí obsahovat název zdroje." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Přesun svazku s ID: %(id)s zamítnut. Prosím zkontrolujte své nastavené " "protože zdroj a cíl jsou ve stejné skupině svazku: %(name)s." msgid "Remove CHAP error." msgstr "Chyba při odstraňování CHAP." msgid "Remove fc from host error." msgstr "Chyba při odstraňování fc z hostitele." msgid "Remove host from array error." msgstr "Chyba při odstraňování hostitele z pole." msgid "Remove host from hostgroup error." msgstr "Chyba při odstraňování hostitele ze skupiny hostitele." msgid "Remove iscsi from host error." msgstr "Chyba při odstraňování iscsi z hostitele." msgid "Remove lun from cache error." msgstr "Chyba při odstraňování lun z mezipaměti." msgid "Remove lun from partition error." msgstr "Chyba při odstraňování lun z oddílu." msgid "Remove volume export failed." msgstr "Odstranění exportu svazku selhalo." msgid "Rename lun on array error." msgstr "Chyba při přejmenování lun v poli." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "Schopnost služby replikace nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Služba replikace nebyla nalezena v %(storageSystemName)s." msgid "Replication is not enabled" msgstr "Replikace není povolena" msgid "Replication is not enabled for volume" msgstr "Replikace není povolena pro tento svazek" #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "Stav replikace pro svazek musí být aktivní, nebo aktivní-zastavený, ale " "současný stav je: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "Stav replikace pro svazek musí být neaktivní, aktivní-zastavený, nebo " "chybný, ale současný stav je: %s" msgid "Request body and URI mismatch" msgstr "Neshoda s tělem požadavku a URI" msgid "Request body contains too many items" msgstr "Tělo požadavku obsahuje příliš mnoho položek" msgid "Request body contains too many items." msgstr "Tělo požadavku obsahuje příliš mnoho položek." msgid "Request body empty" msgstr "Tělo žádosti je prázdné" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Požadavek pro cluster Datera vrátil špatný stav: %(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Požadovaná záloha překračuje povolenou kvótu gigabajtů zálohy. Požadováno " "%(requested)sG, kvóta je %(quota)sG a bylo spotřebováno %(consumed)sG." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Požadovaný svazek nebo snímek překračuje povolenou kvótu %(name)s. " "Požadováno %(requested)sG, kvóta je %(quota)sG a bylo spotřebováno " "%(consumed)sG." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "Požadovaná velikost svazku %(size)d je větší než maximální povolený limit " "%(limit)d." msgid "Required configuration not found" msgstr "Požadované nastavení nenalezeno." #, python-format msgid "Required flag %s is not set" msgstr "Požádovaný příznak %s není nastaven" msgid "Requires an NaServer instance." msgstr "Vyžaduje instanci NaServeru." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Resetování stavu zálohy zrušeno, v současnosti nastavená služba záloh " "[%(configured_service)s] není stejnou službou použitou k vytvoření této " "zálohy [%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Změna velikosti klonu %s selhala." msgid "Resizing image file failed." msgstr "Změna velikosti obrazu selhala." msgid "Resource could not be found." msgstr "Zdroj nemohl být nalezen." msgid "Resource not ready." msgstr "Zdroj není připraven." #, python-format msgid "Response error - %s." msgstr "Chyba odpovědi - %s." msgid "Response error - The storage-system is offline." msgstr "Chyba v odpovědi - Systém úložiště je mimo provoz." #, python-format msgid "Response error code - %s." msgstr "Chybový kód odpovědi - %s." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Obnovení zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Obnovení zálohy zrušeno, v současnosti nastavená služba záloh " "[%(configured_service)s] není stejnou službou použitou k vytvoření této " "zálohy [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Obnovení zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " "zjištěný stav je %(actual_status)s." #, python-format msgid "Retry count exceeded for command: %s" msgstr "Počet pokusů překročen pro příkaz: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Zjištěna opakovatelná výjimka SolidFire" msgid "Retype cannot change encryption requirements." msgstr "Přetypování nemůže změnit požadavky na šifrování." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Přetypování nemůže změnit specifikace qos rozhraní pro používaný svazek: %s." msgid "Retype requires migration but is not allowed." msgstr "Přetypování vyžaduje přesun ale ten není povolen." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Zpětné vrácení svazku: %(volumeName)s selhalo. Prosím kontaktujte svého " "správce systému pro ruční vrácení vašeho svazku do výchozí skupiny úložiště, " "protože zásada FAST %(fastPolicyName)s selhala." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Vrácení %(volumeName)s zpět jeho vymazáním." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "Spouštět Cinder pomocí VMware vCenter s verzí starší než %s není povoleno." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "Nastavení SMBFS 'smbfs_oversub_ratio' je neplatné. Musí být > 0: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "Nastavení SMBFS 'smbfs_used_ratio' je neplatné. Musí být > 0 a <= 1.0: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "Soubor s nastavením SMBFS %(config)s neexistuje." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Soubor s nastavením SMBFS není zadán " #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "Příkaz SSH selhal po '%(total_attempts)r' pokusech : '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "Příkaz SSH selhal s chybou: '%(err)s', příkaz: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Zjištěno vložení příkazů SSH: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "SSH připojení selhalo v %(fabric)s s chybou: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "Certifikát SSL vypršel %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Chyba SSL: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Filtr hostitelů plánovače %(filter_name)s nemohl být nalezen." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Váha plánovače hostitele %(weigher_name)s nemohla být nalezena." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Stav druhé kopie: %(status)s a synchronizováný: %(sync)s, postup " "synchronizace je: %(progress)s%%." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "Sériové číslo musí být v souboru %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Služba %(service)s odstraněna na hostiteli %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "Služba %(service_id)s nemohla být nalezena." #, python-format msgid "Service %s not found." msgstr "Služba %s nenalezena." msgid "Service is unavailable at this time." msgstr "Služba je v tuto chvíli nedostupná." msgid "Service not found." msgstr "Služba nenalezena." msgid "Sets thin provisioning." msgstr "Nastaví mělké poskytování." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "Nastavení skupin zásad QoS LUN není podporováno u tohoto druhu úložišť a " "verze ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Nastavení souboru skupiny zásad QoS není podporováno u tohoto druhu úložišť " "a verze ONTAP." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Sdílení %s je ignorováno kvůli špatnému formátu. Musí mít formátu adresa:/" "export. Prosím zkontrolujte nastavení IP NAS a cesty ke sdílení NAS." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "Do sdílení v %(dir)s nemůže služba svazků Cinder zapisovat. Operace se " "snímky nebudou podporovány." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Chyba vstupu/výstupu Sheepdog, příkaz byl: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Zobrazení operací lze pouze u projektů ve stejné hierarchii jako projekt do " "kterého jsou uživatelé zařazeni." msgid "Size" msgstr "Velikost" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "Velikost pro svazek: %s nenalezeno, nelze bezpečně smazat." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Velikost je %(image_size)dGB a nevejde se do svazku o velikosti " "%(volume_size)dGB." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "Velikost zadaného obrazu %(image_size)sGB je větší než velikost svazku " "%(volume_size)sGB." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Snímek %(snapshot_id)s nemohl být nalezen." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "Snímek %(snapshot_id)s nemá žádná popisná data mající klíč %(metadata_key)s." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "Snímek '%s' neexistuje v poli." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "Snímek nelze vytvořit protože svazek %(vol_id)s není dostupný, současný stav " "svazku: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "Snímek nemůže být vytvořen při přesunu svazku." msgid "Snapshot of secondary replica is not allowed." msgstr "Snímek druhotné repliky není povolen." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Snímek svazku není podporován ve stavu: %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Zdroj snímku \"%s\" který není nikde zaveden?" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Stav snímku %(cur)s není povolen při aktualizaci stavu snímku" msgid "Snapshot status must be \"available\" to clone." msgstr "Stav snímku musí být pro klonování \"available\"." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "Zálohovaný snímek musí být dostupný, ale jeho stav je nyní \"%s\"." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Snímek='%(snap)s' neexistuje v základním obrazu='%(base)s' - rušení " "přírůstkové zálohy" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "Snímky nejsou podporovány pro tento formát snímku: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Chyba soketu: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Výjimka ovladače SolidFire Cinder" msgid "Sort direction array size exceeds sort key array size." msgstr "Pole směru řazení překračuje velikost pole klíče řazení." msgid "Source CG is empty. No consistency group will be created." msgstr "" "Zdrojová skupina jednotnosti je prázdná. Žádná skupina nebude vytvořena." msgid "Source host details not found." msgstr "Podrobnosti zdrojového hostitele nenalezeny." msgid "Source volume device ID is required." msgstr "ID zařízení zdrojového svazku je vyžadováno." msgid "Source volume not mid-migration." msgstr "Zdrojový svazek není uprostřed přesunu." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "Zdroj s ip/názvem hostitele: %s nebyl nalezen v cílovém zařízení pro přesun " "svazku pomocí podpůrné vrstvy, pokračuje se ve výchozím přesunu." msgid "SpaceInfo returned byarray is invalid" msgstr "Byarray vrácený od Spaceinfo je neplatný" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Zadaný hostitel pro namapování ke svazku %(vol)s je v nepodporované skupině " "hostitele s %(group)s." msgid "Specified logical volume does not exist." msgstr "Zadaný logický svazek neexistuje." msgid "Specify a password or private_key" msgstr "Zadejte heslo nebo soukromý klíč" msgid "Specify san_password or san_private_key" msgstr "Zadejte san_password nebo san_private_key" msgid "Start LUNcopy error." msgstr "Chyba při spuštění kopírování LUN." msgid "State" msgstr "Stav" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "Špatný stav uzlu. Současný stav je %s." msgid "Status" msgstr "Stav" msgid "Stop snapshot error." msgstr "Chyba při zastavování snímku." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "Služba nastavení úložiště nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "Služba správy id hardwaru úložiště nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "Profil úložiště %s nenalezen." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "Služba změny umístění úložiště nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "Druhy úložiště %s nejsou podporovány." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "Skupina úložiště %(storageGroupName)s nebyla úspěšně smazána" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Hostitel úložiště %(svr)s nebyl zjištěn, ověřte název" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Profil úložiště: %(storage_profile)s nenalezen." msgid "Storage resource could not be found." msgstr "Zdroj úložiště nemohl být nalezen." msgid "Storage system id not set." msgstr "ID úložného systému nebylo nastaveno." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Systém úložiště nebyl nalezen pro zásobu %(poolNameInStr)s." msgid "Storage-assisted migration failed during manage volume." msgstr "Přesun za pomocí úložiště selhal během správy svazku." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "Systém úložiště %(array)s nebyl nalezen." #, python-format msgid "String with params: %s" msgstr "Řetězec s parametry: %s" msgid "Synchronizing secondary volume to primary failed." msgstr "Synchronizace druhotného svazku na hlavní selhala." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "Systém %(id)s nalezen se stavem špatného hesla - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "Byl nalezen systém %(id)s se špatným stavem - %(status)s." msgid "System does not support compression." msgstr "Systém nepodporuje kompresi." msgid "System is busy, retry operation." msgstr "Systém je zaneprázdněn, zopakujte operaci." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s] nebylo v úložišti CloudByte nalezeno pro účet [%(account)s]." msgid "Target volume type is still in use." msgstr "Cílový typ svazku se stále používá." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "ID nájemníka: %s neexistuje." msgid "Terminate connection failed" msgstr "Ukončení připojení selhalo" msgid "Terminate connection unable to connect to backend." msgstr "Nelze se připojit k podpůrné vrstvě pro ukončení připojení." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Ukončení připojení svazku selhalo: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "Zdroj %(type)s %(id)s pro replikování nebyl nalezen." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Parametry 'sort_key' a 'sort_dir' jsou zastaralé a nemohou být použity s " "parametrem 'sort'." msgid "The EQL array has closed the connection." msgstr "Pole EQL uzavřelo připojení." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "Systém souborů GPFS %(fs)s není na požadované úrovni vydání. Současná úroveň " "je %(cur)s, musí být alespoň %(min)s." msgid "The IP Address was not found." msgstr "IP adresa nebyla nalezena." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "Požadavek WebDAV selhal. Důvod: %(msg)s, návratový kód/důvod: %(code)s, " "zdrojový svazek: %(src)s, cílový svazek: %(dst)s. metoda %(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "Chyba výše naznačuje, že databáze nebyla vytvořena.\n" "Prosím vytvořte ji pomocí 'cinder-manage db sync' před zadáním tohoto " "příkazu." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "Pole nepodporuje nastavení zásoby úložiště pro SLO %(slo)s a zatížení " "%(workload)s. Prosím zkontrolujte pole pro platné SLO a zatížení." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Příkaz %(cmd)s selhal. (ret: %(ret)s, standardní výstup: %(out)s, chybový " "výstup: %(err)s." msgid "The copy should be primary or secondary" msgstr "Kopie by měla být primární nebo sekundární" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "Vytvoření logického zařízení nemohlo být dokončeno. (Logické zařízení: " "%(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "Dekorovaná metoda musí přijímat buď objekt svazku nebo objekt snímku " #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "Zařízení na cestě %(path)s není dostupné: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "Doba konce (%(end)s) musí být po době začátku (%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "Dodatečná specifikace %s je neplatná." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Dodatečná specifikace: %(extraspec)s není platná." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "Záložní svazek nelze smazat: %s." #, python-format msgid "The following elements are required: %s" msgstr "Jsou vyžadovány následující prvky: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Následující přesuny mají snížení na nižší verzi, což není povoleno:\n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "Skupina hostitele nebo cíl iSCSI nemohly být přidány." msgid "The host group or iSCSI target was not found." msgstr "Skupina hostitele nebo cíl iSCSI nebyly nalezeny." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "CHAP uživatel iSCSI %(user)s neexistuje." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "Importovaný lun %(lun_id)s je v zásobě %(lun_pool)s, kterou nespravuje " "hostitel %(host)s." msgid "The key cannot be None." msgstr "Klíč nemůže být None." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "Logické zařízení pro zadané %(type)s %(id)s již je smazáno." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "Metodě %(method)s vypršel časový limit. (Hodnota limitu: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "Metoda update_migrated_volume není zavedena." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "Připojení %(mount_path)s není platný svazek Quobyte USP. Chyba %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "" "Parametr podpůrné vrstvy úložiště. (Skupina nastavení: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "Pro provedení přírůstkové zálohy musí být dostupná nadřazená záloha." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "Zadaný snímek '%s' není snímek zadaného svazku." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "Odkaz na svazek v podpůrné vrstvě by měl být ve formátu souborový systém/" "název svazku (název svazku nemůže obsahovat '/')" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "Režim replikace nebyl v dodatečných specifikacích svazku správně nastaven. " "Pokud je režim periodic, musí být také zadáno replication:sync_period a mít " "zvolen interval mající hodnotu od 300 až 31622400 vteřin." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "Požadovaná velikost : %(requestedSize)s není stejná jako výsledná velikost: " "%(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "Zdroj %(resource)s nebyl nalezen." msgid "The results are invalid." msgstr "Výsledky jsou neplatné." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "Snímek nelze vytvořit, zatímco svazek je v režimu údržby." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "Zdrojový svazek %s není v zásobě spravované současným hostitelem." msgid "The source volume for this WebDAV operation not found." msgstr "Zdrojový svazek nebyl nalezen pro tuto operaci WebDAV." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "Typ zdrojového svazku '%(src)s' je jiný než cílový typ '%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "Typ zdrojového svazku '%s' není dostupný." #, python-format msgid "The specified %(desc)s is busy." msgstr "Zadané %(desc)s je zaneprázdněno." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "Zadané logické zařízení %(ldev)s nemůže být spravováno. Zařízení nesmí být " "mapováno." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "Zadané logické zařízení %(ldev)s nemůže být spravováno. Zařízení nesmí být " "spárováno." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "Zadané logické zařízení %(ldev)s nemůže být spravováno. Velikost zařízení " "musí být v násobcích gigabajtu." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "Zadané logické zařízení %(ldev)s nemůže být spravováno. Typ svazku musí být " "DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "Zadaná operace není podporována. Velikost svazku musí být stejná jako zdroj " "%(type)s. (svazek: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Zadaný virtuální disk je mapován k hostiteli." msgid "The specified volume is mapped to a host." msgstr "Zadaný svazek je namapován k hostiteli." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "Heslo pole úložiště pro %s není správné, prosím aktualizujte nastavené heslo." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "Podpůrná vrstva úložiště může být použita. (Skupina nastavení: " "%(config_group)s)" #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "Odebraný počet popisných dat z %(memberCount)s je příliš malý pro svazek: " "%(volumeName)s, s velikostí %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "Typ popisných dat: %(metadata_type)s pro svazek/snímek %(id)s je neplatný." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "Svazek %(volume_id)s nemohl být rozšířen. Typ svazku musí být Normální." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "Nelze zrušit správu svazku %(volume_id)s. Typ svazku musí být " "%(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "" "Svazek %(volume_id)s byl úspěšně zařazen pod správu. (Logické zařízení: " "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "Svazek %(volume_id)s byl úspěšně odstraněn ze správy. (Logické zařízení: " "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Svazek %(volume_id)s pro mapování nemohl být nalezen." msgid "The volume cannot accept transfer in maintenance mode." msgstr "Svazek nemůže v režimu údržby přijímat přenosy v režimu údržby." msgid "The volume cannot be attached in maintenance mode." msgstr "Svazek nemůže být během údržby připojen." msgid "The volume cannot be detached in maintenance mode." msgstr "Svazek nemůže být během údržby odpojen." msgid "The volume cannot be updated during maintenance." msgstr "Svazek nemůže být během údržby spravován." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "Připojení ke svazku nemůže být zavedeno v režimu údržby." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Ovladač svazku vyžaduje název zavaděče iSCSI v konektoru." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Svazek je v současnosti zaneprázdněn v 3PAR a nemůže být nyní smazán. Prosím " "zkuste to znovu později." msgid "The volume label is required as input." msgstr "Jmenovka svazku je vyžadována jako vstup." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Popisná data svazku nemohou být smazána, zatímco je svazek v režimu údržby." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Popisná data svazku nemohou být aktualizována, zatímco je svazek v režimu " "údržby." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "Žádné dostupné zdroje k použití. (Zdroj: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Žádní platní hostitelé ESX." #, python-format msgid "There are no valid datastores attached to %s." msgstr "K %s nejsou připojena žádná platná datová úložiště." msgid "There are no valid datastores." msgstr "Žádná platná datová úložiště." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Neexistuje označení pro %(param)s. Zadané úložiště je pro správu svazku " "nezbytné." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Neexistuje označení pro logické zařízení. Zadané logické zařízení je " "nezbytné pro správu svazku." msgid "There is no metadata in DB object." msgstr "V objektu DB nejsou žádná popisná data." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "Neexistuje sdílení schopné hostit %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "Neexistuje sdílení schopné hostit %(volume_size)sG." #, python-format msgid "There is no such action: %s" msgstr "Žádná taková činnost: %s" msgid "There is no virtual disk device." msgstr "Neexistuje žádné zařízení virtuálního disku." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "Při přidávání svazku do skupiny vzdálené kopie se vyskytla chyba: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Při vytváření snímku skupiny jednotnosti nastala chyba: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "Při vytváření skupiny vzdálené kopie se vyskytla chyba: %s" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Při nastavování doby synchronizace skupiny vzdálené kopie se vyskytla chyba: " "%s" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Při nastavování skupiny vzdálené kopie na polích 3PAR se vyskytla chyba: " "('%s'). Svazek nebude rozpoznán jako mající typ pocházející od replikace." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Při spouštění vzdálené kopie se vyskytla chyba: %s" #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Není nastaven žádný konfigurační soubor Gluster (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Není zadán žádný soubor s nastavením NFS (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Není nastaven žádný svazek Quobyte (%s). Příklad: quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "Mělké poskytování není podporováno v této verzi LVM." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "Povolovač mělkého poskytování není nainstalován. Nelze vytvořit mělký svazek" msgid "This driver does not support deleting in-use snapshots." msgstr "Tento ovladač nepodporuje mazání právě používaných snímků." msgid "This driver does not support snapshotting in-use volumes." msgstr "Tento ovladač nepodporuje vytváření snímků u právě používaných svazků." msgid "This request was rate-limited." msgstr "Tento požadavek má omezené množství." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Tato systémová platforma (%s) není podporována. Tento ovladač podporuje " "pouze platformy Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "Služba zásad nebyla nalezena v %(storageSystemName)s." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "Při čekání na aktualizaci od Nova při vytváření snímku %s vypršel čas." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Při čekání na aktualizaci od Nova při mazání snímku %(id)s vypršel čas." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Hodnota časového limitu (ve vteřinách) použitá při připojování do clusteru " "ceph. Pokud je hodnota < 0, není nastaven žádný limit a bude použita výchozí " "hodnota librados." #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Při žádání o API %(service)s vypršel časový limit." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Přenos %(transfer_id)s nemohl být nalezen." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Přenos %(transfer_id)s: Svazek s ID %(volume_id)s je v neočekávaném stavu " "%(status)s, předpokládáno čekání na přenos" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Pokus o importování popisných dta zálohy z id %(meta_id)s do zálohy %(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Úkol ladění svazku zastaven pře jeho dokončením: název svazku=" "%(volume_name)s, stav úkoolu=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "Typ %(type_id)s již je přidružen k jiné specifikaci qos: %(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "Změna typu přístupu se nevztahuje k veřejnému typu svazku." msgid "Type cannot be converted into NaElement." msgstr "Typ nemůže být převeden na NaElement." #, python-format msgid "TypeError: %s" msgstr "Chyba typu: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s jsou zároveň v seznamu přidání i odstranění svazků." msgid "Unable to access the backend storage via file handle." msgstr "" "Nelze získat přístup k podpůrné vrstvě úložiště pomocí obslužné rutiny " "souboru." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "Nelze získat přístup k podpůrné vrstvě úložiště pomocí cesty %(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "Nelze přidat hostitele Cinder do hostitelů aplikací v prostoru %(space)s" msgid "Unable to connect or find connection to host" msgstr "Nelze se připojit nebo nalézt připojení k hostiteli" msgid "Unable to create Barbican Client without project_id." msgstr "Nelze vytvořit klienta Barbican bez id projektu." #, python-format msgid "Unable to create consistency group %s" msgstr "Nelze vytvořit skupinu jednotnosti %s" msgid "Unable to create lock. Coordination backend not started." msgstr "Nelze vytvořit zámek. Podpůrná vrstva koordinátora nebyla spuštěna." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Nelze vytvořit nebo získat výchozí skupinu úložiště pro zásadu FAST: " "%(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Nelze vytvořit klon repliky svazku %s." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "Nelze smazat snímek skupiny jednotnosti %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "Nelze smazat snímek %(id)s, stav: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "Nelze smazat zásadu snímku ve svazku %s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Nelze odpojit svazek. Pro odpojení musí být stav svazku 'in-use' a stav " "připojení musí být 'attached'." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "Nelze zjistit název snímku v Purity pro snímek %(id)s." msgid "Unable to determine system id." msgstr "Nelze zjistit id systému." msgid "Unable to determine system name." msgstr "Nelze zjistit název systému." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Nelze provést operace správy snímku pomocí Purity REST API verze " "%(api_version)s, vyžaduje %(required_versions)s." #, python-format msgid "Unable to extend volume %s" msgstr "Nelze rozšířit svazek %s" msgid "Unable to fetch connection information from backend." msgstr "Nelze získat informace o připojení z podpůrné vrstvy." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "Nelze získat informace o připojení z podpůrné vrstvy: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "Nelze najít odkaz na Purity s názvem=%s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Nelze najít skupinu svazku: %(vg_name)s" msgid "Unable to find iSCSI mappings." msgstr "Nelze najít mapování iSCSI." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "Nelze najít soubor klíčů hostitelů ssh: %s" msgid "Unable to find system log file!" msgstr "Nelze najít soubor záznamu systému!" #, python-format msgid "Unable to find volume %s" msgstr "Nelze najít svazek %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Nelze získat blokové zařízení pro soubor '%s'" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Nelze získat informace o nastavení potřebné k vytvoření svazku: " "%(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Nelze získat odpovídající záznam pro zásobu" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Nelze získat informace o prostoru %(space)s, prosím ověřte, že cluster běží " "a je připojen." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Nelze získat seznam IP adres na tomto hostiteli. Zkontrolujte oprávnění a " "sítě." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "Nelze získat seznam členů domény. Zkontrolujte, že cluster je spuštěn." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Nelze získat seznam prostorů pro vytvoření nového názvu. Ověřte prosím, zda " "cluster běží." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Nelze získat statistiky s podpůrné vrstvy s názvem: %s" #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Nelze získat cílové koncové body pro hardware s ID %(hardwareIdInstance)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Nelze importovat svazek %(deviceId)s do cinder. Je to zdrojový svazek sezení " "replikace %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Nelze importovat svazek %(deviceId)s do cinder. Vnější svazek není v zásobě " "spravované současným hostitelem cinder." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Nelze importovat svazek %(deviceId)s do cinder. Zobrazení svazku je " "maskováno %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "Nelze načíst certifikační autoritu z %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Nelze načíst certifikát z %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Nelze načíst klíč z %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "Nelze nalézt účet %(account_name)s na zařízení Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "Nelze nalézt SVM které spravuje IP adresu '%s'" #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Nelze spravovat existující svazek. Svazek %(volume_ref)s již je spravován." #, python-format msgid "Unable to manage volume %s" msgstr "Nelze spravovat svazek %s" msgid "Unable to map volume" msgstr "Nelze mapovat svazek" msgid "Unable to map volume." msgstr "Nelze mapovat svazek." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "Nelze zpracovat žádost XML. Zadejte prosím XML ve správném formátu." msgid "Unable to parse attributes." msgstr "Nelze zpracovat vlastnosti." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "Nelze povýšit repliku na hlavní pro svazek %s. Druhá kopie neexistuje." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Nelze znovu použít hostitele, kterého nespravuje Cinder, pomocí " "use_chap_auth=True." msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Nelze znovu použít hostitele, pokud má nastaveno neznámé ověřovací údaje " "CHAP." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Nelze přejmenovat svazek %(existing)s na %(newname)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Nezle přetypovat: Kopie svazku %s již existuje, Přetypování by překročilo " "omezení 2 kopií." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Nezle přetypovat: Současná činnost vyžaduje kopii svazku, není povoleno, " "pokud je nový typ replikace. Svazek = %s" #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Nelze vytvořit snímek skupiny jednotnosti %s" msgid "Unable to terminate volume connection from backend." msgstr "Nelze ukončit připojení svazku v podpůrné vrstvě." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Nelze ukončit připojení k ovladači: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Nelze aktualizovat skupinu jednotnosti %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Nelze aktualizovat typ kvůli nesprávnému stavu: %(vol_status)s ve svazku: " "%(vol_id)s. Stav svazku musí být dostupný, nebo používaný." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "Nelze ověřit skupinu zavaděče: %(igGroupName)s por zamaskování " "%(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Nepřijatelné parametry." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "Neočekávaný stav mapování %(status)s pro %(id)s. Vlastnosti: %(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Neočekávaná odpověď rozhraní příkazového řádku: neshoda hlavičky/řádku. " "Hlavička: %(header)s, řádek: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "Neočekávaný stav mapování %(status)s pro %(id)s. Vlastnosti: %(attr)s." msgid "Unexpected response from Nimble API" msgstr "Neočekávaná odpověď od Nimble API" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Neočekávaná odpověď od Tegile IntelliFlash API" msgid "Unexpected status code" msgstr "Neočekávaný kód stavu" msgid "Unknown Gluster exception" msgstr "Neznámá výjimka Gluster" msgid "Unknown NFS exception" msgstr "Neznámá výjimka NFS" msgid "Unknown RemoteFS exception" msgstr "Neznámá výjimka RemoteFS" msgid "Unknown SMBFS exception." msgstr "Neznámá výjimka SMBFS." msgid "Unknown Virtuozzo Storage exception" msgstr "Neznámá výjimka úložiště Virtuozzo" msgid "Unknown action" msgstr "Neznámá činnost" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Neznámý nebo nepodporovaný příkaz %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Neznámý protokol: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Neznámý zdroj kvóty %(unknown)s." msgid "Unknown service" msgstr "Neznámá služba" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Neznámý směr řazení, musí být 'desc' nebo 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Neznámý směr řazení, musí být buď 'desc' nebo 'asc'." msgid "Unmanage volume not implemented." msgstr "Zrušení správy svazku není zavedeno." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Nerozpoznané klíčové slovo QoS: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Nerozpoznaný formát zálohy: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Nerozpoznaná hodnota read_deleted '%s'" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "Neúspěšně iscsiadm. Zjištěná výjimka %(ex)s." msgid "Unsupported Clustered Data ONTAP version." msgstr "Nepodporovaná verze clusterovaného Data ONTAP." msgid "Unsupported Content-Type" msgstr "Nepodporovaný Content-Type" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "Nepodporovaná verze Data ONTAP. Podporované jsou verze 7.3.1 a vyšší." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Nepodporovaná verze zálohy popisných dat (%s)" msgid "Unsupported backup metadata version requested" msgstr "Zažádána nepodporovaná verze popisných dat zálohy" msgid "Unsupported backup verify driver" msgstr "Nepodporovaný ovladač ověření zálohy" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Nepodporovaný frimware na rozbočovači %s. Ujistěte se, že na rozbočovači je " "nainstalována verze 6.4 nebo vyšší." #, python-format msgid "Unsupported volume format: %s " msgstr "Nepodporovaný formát svazku: %s" msgid "Update QoS policy error." msgstr "Chyba při aktualizování zásady QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Operace aktualizace a smazání kvóty může provést pouze správce přímého " "nadřazeného nebo správce Cloudu." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Operace aktualizace a smazání kvóty mohou být provedeny pouze v projektech " "ve stejné hierarchii jako projekt do kterého jsou uživatelé zařazeni." msgid "Updated At" msgstr "Aktualizováno" msgid "Upload to glance of attached volume is not supported." msgstr "Nahrávání na glance připojeného svazku není podporováno." msgid "Use ALUA to associate initiator to host error." msgstr "Chyba při použití ALUA k přidružení zavaděče k hostiteli." msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Chyba při použití CHAP k přidružení zavaděče k hostiteli. Zkontrolujte " "prosím uživatelské jméno a heslo CHAP." msgid "User ID" msgstr "ID uživatele" msgid "User does not have admin privileges" msgstr "Uživatel nemá správcovská oprávnění" msgid "User is not authorized to use key manager." msgstr "Uživatel nemá oprávnění používat správce klíčů." msgid "User not authorized to perform WebDAV operations." msgstr "Uživatel nemá oprávnění provádět operace WebDAV." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "Zpětné vrácení V2 - Svazek je v jiné skupině úložiště mimo výchozí skupinu." msgid "V2 rollback, volume is not in any storage group." msgstr "Zpětné vrácení V2, svazek není v žádné skupině úložiště." msgid "V3 rollback" msgstr "Zpětné vrácení V3" #, python-format msgid "VV Set %s does not exist." msgstr "Sada virtuálních svazků %s neexistuje." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Platní spotřebitelé specifikace QoS jsou: %s" #, python-format msgid "Valid control location are: %s" msgstr "Platná ovládací umístění jsou: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Ověření připojení svazku selhalo (chyba: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "Hodnota \"%(value)s\" není platná pro volbu nastavení \"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "Hodnota %(param)s pro %(param_string)s není boolean." msgid "Value required for 'scality_sofs_config'" msgstr "Je třeba zadat hodnotu pro 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "Chyba hodnoty: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "Virtuální disk %(name)s není součástí mapování %(src)s -> %(tgt)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "Objekt s verzí %s nemůže získat objekt podle id." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "Objekt s verzí %s nepodporuje podmíněné aktualizace." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Virtuální svazek '%s' neexistuje v poli." #, python-format msgid "Vol copy job for dest %s failed." msgstr "Úkol kopírování svazku do cíle %s selhal." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Svazek %(deviceID)s nenalezen." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Svazek %(name)s nebyl nalezen v poli. Nelze zjistit zda jsou svazky " "namapovány." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "Svazek %(name)s byl vytvořen ve VNX, ale je ve stavu %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Svazek %(vol)s nemohl být vytvořen v zásobě %(pool)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Stav svazku %(vol_id)s musí být dostupný, nebo používaný, ale současný stav " "je: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "Stav svazku %(vol_id)s musí být pro rozšíření dostupný, ale současný stav " "je: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "Stav svazku %(vol_id)s musí být dostupný pro aktualizaci příznaku pouze pro " "čtení, ale současný stav: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "Stav svazku %(vol_id)s musí být dostupný, nebo chybný, ale současný stav je: " "%(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Svazek %(volume_id)s nemohl být nalezen." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "Svazek %(volume_id)s nemá žádná popisná data správy mající klíč " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "Svazek %(volume_id)s je v současnosti mapován k nepodporované skupině " "hostitele %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "Svazek %(volume_id)s není v současnosti mapován k hostiteli %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "Svazek %(volume_id)s je stále připojen, nejdříve odpojte svazek." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Chyba replikace svazku %(volume_id)s: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Svazek %(volume_name)s je zaneprázdněn." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Svazek %s nemohl být vytvořen ze zdrojového svazku." #, python-format msgid "Volume %s could not be created on shares." msgstr "Svazek %s nemohl být vytvořen ve sdílení." #, python-format msgid "Volume %s could not be created." msgstr "Svazek %s nemohl být vytvořen." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Svazek %s neexistuje v poli." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "Svazek %s již je spravován OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Svazek %s již je součástí aktivního přesunu." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "Svazek %s je online. Pro správu pomocí OpenStack ho nastavte na offline." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Svazek %s nesmí být součástí skupiny jednotnosti." #, python-format msgid "Volume %s must not be replicated." msgstr "Svazek %s nesmí být replikován." #, python-format msgid "Volume %s must not have snapshots." msgstr "Svazek %s nesmí mít snímky." #, python-format msgid "Volume %s not found." msgstr "Svazek %s nenalezen." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Svazek %s: Chyba při pokusu o rozšíření svazku" #, python-format msgid "Volume (%s) already exists on array" msgstr "Svazek (%s) již existuje v poli" #, python-format msgid "Volume (%s) already exists on array." msgstr "Svazek (%s) již existuje v poli." #, python-format msgid "Volume Group %s does not exist" msgstr "Skupina svazku %s neexistuje" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Typ svazku %(id)s již existuje." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Typ svazku %(type_id)s nemá žádné dodatečné specifikace s klíčem %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "Smazání typu svazku %(volume_type_id)s není povoleno, když existují svazky s " "tímto typem." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " "%(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "ID typu svazku nesmí být None." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Svazek [%(cb_vol)s] nebyl nalezen v úložišti CloudByte odpovídající svazku " "Openstack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "Svazek [%s] nebyl nalezen v úložišti CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "Připojení svazku nebylo nalezen ve filtru: %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "Nastavení podpůrné vrstvy svazku je neplatné: %(reason)s" msgid "Volume by this name already exists" msgstr "Svazek s tímto názvem již existuje" msgid "Volume cannot be restored since it contains snapshots." msgstr "Svazek nemůže být obnoven protože obsahuje snímky." msgid "Volume create failed while extracting volume ref." msgstr "Vytvoření svazku selhalo při extrahování odkazu svazku." #, python-format msgid "Volume device file path %s does not exist." msgstr "Cesta k souboru zařízení svazku %s neexistuje." #, python-format msgid "Volume device not found at %(device)s." msgstr "Zařízení svazku nenalezeno na %(device)s." #, python-format msgid "Volume driver %s not initialized." msgstr "Ovladač svazku %s není zaveden." msgid "Volume driver not ready." msgstr "Ovladač svazku není připraven." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Ovladač svazku nahlásil chybu: %(message)s" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "Svazek je ve skupině jednotnosti %s připojen. Je třeba ho nejdříve odpojit." msgid "Volume in consistency group still has dependent snapshots." msgstr "Svazek ve skupině jednotnosti stále má na sobě závislé snímky." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "Svazek je připojen k serveru. (%s)" msgid "Volume is in-use." msgstr "Svazek se používá." msgid "Volume is not available." msgstr "Svazek není dostupný." msgid "Volume is not local to this node" msgstr "Svazek není pro tento uzel místním" msgid "Volume is not local to this node." msgstr "Svazek není pro tento uzel místním." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Zažádáno o zálohu popisných dat svazku, ale tento ovladač tuto funkci zatím " "nepodporuje." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Přesunutí svazku selhalo: %(reason)s" msgid "Volume must be available" msgstr "Svazek musí být dostupný" msgid "Volume must be in the same availability zone as the snapshot" msgstr "Svazek musí být ve stejné zóně dostupnosti jako snímek" msgid "Volume must be in the same availability zone as the source volume" msgstr "Svazek musí být ve stejné zóně dostupnosti jako zdrojový svazek" msgid "Volume must not be part of a consistency group." msgstr "Svazek nesmí být součástí skupiny jednotnosti." msgid "Volume must not be replicated." msgstr "Svazek nesmí být replikován." msgid "Volume must not have snapshots." msgstr "Svazek nesmí mít snímky." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Svazek není nalezen v instanci %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "Svazek nebyl nalezen na nastavené podpůrné vrstvě úložiště." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Svazek nebyl nalezen na nastavené podpůrné vrstvě úložiště. Pokud obsah " "vašeho svazku obsahuje \"/\", je třeba ho přejmenovat a pak se pokus znovu o " "správu." msgid "Volume not found on configured storage pools." msgstr "Svazek nebyl nalezen v nastavených zásobách úložiště." msgid "Volume not found." msgstr "Svazek nenalezen." msgid "Volume not yet assigned to host." msgstr "Svazek ještě není přidělen k hostiteli." msgid "Volume reference must contain source-name element." msgstr "Odkaz na svazek musí obsahovat prvek source-name." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "Replikace svazku %(volume_id)s nemohla být nalezena." #, python-format msgid "Volume service %s failed to start." msgstr "Služba svazku %s nemohla být spuštěna." msgid "Volume should have agent-type set as None." msgstr "Svazek by měl mít typ agenta nastaven na None." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "Velikost svazku %(volume_size)sGB nemůže být menší než minimální velikost " "disku v obrazu %(min_disk)sGB." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "Velikost svazku '%(size)s' musí být celé číslo a větší než 0" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "Velikost svazku'%(size)s'GB nemůže být menší než původní velikost svazku " "%(source_size)sGB. Musí být >= původní velikosti svazku." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "Velikost svazku'%(size)s'GB nemůže být menší než velikost snímku " "%(snap_size)sGB. Musí být <= původní velikosti snímku." msgid "Volume size increased since the last backup. Do a full backup." msgstr "Velikost svazku se od poslední zálohy zvýšila. Proveďte úplnou zálohu." msgid "Volume size must multiple of 1 GB." msgstr "Velikost svazku musí být násobkem 1GB." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "Stav svazku musí být dostupný, ale současný stav je: %s" msgid "Volume status is in-use." msgstr "Stav svazku je používaný." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "Stav svazku musí být pro vytvoření snímku \"available\" nebo \"in-use\". " "(nyní je %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "Stav svazku musí být \"available\" nebo \"in-use\"." #, python-format msgid "Volume status must be %s to reserve." msgstr "Stav svazku musí být pro rezervaci %s." msgid "Volume status must be 'available'." msgstr "Stav svazku musí být 'available'." msgid "Volume to Initiator Group mapping already exists" msgstr "Mapování svazku ke skupině zavaděče již existuje" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "Zálohovaný svazek musí být dostupný nebo používaný, ale jeho stav je nyní " "\"%s\"." msgid "Volume to be restored to must be available" msgstr "Obnovovaná záloha musí být dostupná" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "Typ svazku ID '%s' je neplatné." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "Přístup k typu svazku pro kombinaci %(volume_type_id)s / %(project_id)s již " "existuje." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "Přístup k typu svazku nenalezen pro kombinaci %(volume_type_id)s / " "%(project_id)s." #, python-format msgid "Volume type does not match for share %s." msgstr "Typ svazku neodpovídá sdílení %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "Šifrování typu svazku pro typ %(type_id)s již existuje." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "Šifrování typu svazku pro typ %(type_id)s neexistuje." msgid "Volume type name can not be empty." msgstr "Název typu svazku nemůže být prázdný." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." #, python-format msgid "Volume with volume id %s does not exist." msgstr "Svazek s id %s neexistuje." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Svazek: %(volumeName)s není zřetězený svazek. Rozšíření lze provádět pouze " "na zřetězeném svazku. Ukončování..." #, python-format msgid "Volume: %s could not be found." msgstr "Svazek: %s nemohlo být nalezeno." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" "Svazky mnohou být rozkouskovány do objektů této velikosti (v megabajtech)." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "Nastavení VzStorage 'vzstorage_used_ratio' je neplatné. Musí být > 0 a <= " "1.0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "Soubor s nastavením VzStorage %(config)s nebyl nalezen." msgid "Wait replica complete timeout." msgstr "Vypršel časový limit čekání na dokončení replikace." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "Čekání na připojení uzlů ke clusteru. Ujistěte se, že všichni daemoni sheep " "jsou spuštěni." msgid "Wrong resource call syntax" msgstr "Špatná syntaxe volání zdroje" msgid "X-IO Volume Driver exception!" msgstr "Výjimka ovladače svazku X-IO!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "Podpora XML je zastaralá a bude odebrána ve verzi N." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO není správně nastaveno, nenalezeny žádné portály iscsi" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO nebyl zaveden správně, žádné clustery nenalezeny" msgid "You must implement __call__" msgstr "Musíte zavést __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Před použitím ovladačů 3PAR musíte nainstalovat hpe3parclient. Spusťte \"pip " "install python-3parclient\" pro instalaci klienta." msgid "You must supply an array in your EMC configuration file." msgstr "Ve vašem souboru s nastavením musíte zadat pole EMC." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Vaše původní velikost: %(originalVolumeSize)s GB je větší než: %(newSize)s " "GB. Podporováno je pouze rozšíření. Uknčování..." #, python-format msgid "ZeroDivisionError: %s" msgstr "Chyba dělení nulou: %s" msgid "Zone" msgstr "Zóna" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Zásada zónování: %s nebylo rozpoznáno" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "Vytvoření a kopírování dat virtuálního disku: Nelze získat vlastnosti " "virtuálního disku %s." msgid "_create_host failed to return the host name." msgstr "Vytvoření hostitele nevrátilo název hostitele." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "Vytvoření hostitele: Nelze převést název hostitele. Název není unicode nebo " "řetězec." msgid "_create_host: No connector ports." msgstr "Vytvoření hostitele: Žádné porty pro připojení." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Vytvoření virtuálního disku %(name)s - ve výstupu příkazového řádku nebyla " "nalezena zpráva o úspěchu.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "Získání slovníku hlavičky: hlavičky vlastnosti a jejich hodnoty se " "neshodují.\n" "Hlavičky: %(header)s\n" "Hodnoty: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "Získání hostitele od konektoru nevrátilo název jeho hostitele." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "Získání vlastností mapy virtuálního disku: Nelze získat informace o " "připojení FC pro připojení svazek-hostitel. Je hostitel správně nastaven pro " "připojení FC?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "Získání vlastností mapy virtuálního disku: Ve skupině vstupu/výstupu %(gid)s " "nebyl nalezen žádný uzel pro svazek %(vol)s." msgid "_update_volume_stats: Could not get storage pool data." msgstr "Aktualizace statistik svazku: Nelze získat data zásoby úložiště." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "Přidání kopie virtuálního disku selhalo: Kopie svazku %s již existuje. " "Přidání další kopie by překročilo omezení 2 kopií." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "Přidání kopie virtuálního disku bylo spuštěno bez kopie v očekávané zásobě." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants musí být boolean, obdrženo %s" msgid "already created" msgstr "již vytvořeno" msgid "already_created" msgstr "již vytvořeno" msgid "attach snapshot from remote node" msgstr "připojit snímek ke vzdálenému uzlu" #, python-format msgid "attribute %s not lazy-loadable" msgstr "vlastnost %s nelze líně načíst" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "záloha: %(vol_id)s nemohl vytvořit pevný odkaz na zařízení v %(vpath)s do " "%(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "záloha: %(vol_id)s nemohla získat oznámení o úspěšném dokončení zálohy od " "serveru.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "záloha: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům na " "%(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "záloha: %(vol_id)s nemohla spustit dsmc na %(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "záloha: %(vol_id)s selhala. %(path)s není soubor." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "záloha: %(vol_id)s selhala. %(path)s odkazuej na nečekaný typ souboru. " "Podporován blok, nebo normální soubory, současný režim souboru je " "%(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "záloha: %(vol_id)s selhala. Nelze získat skutečnou cestu ke svazku na " "%(path)s." msgid "being attached by different mode" msgstr "je připojen v jiném režimu" #, python-format msgid "call failed: %r" msgstr "Volání selhalo: %r" msgid "call failed: GARBAGE_ARGS" msgstr "Volání selhalo: Zbytečné argumenty" msgid "call failed: PROC_UNAVAIL" msgstr "Volání selhalo: Proces je nedostupný" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "Volání selhalo: Neshoda programu: %r" msgid "call failed: PROG_UNAVAIL" msgstr "Volání selhalo: Program je nedostupný" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "nelze najít lun-map, ig:%(ig)s svazek:%(vol)s" msgid "can't find the volume to extend" msgstr "nelze najít svazek pro rozšíření" msgid "can't handle both name and index in req" msgstr "v žádosti nelze zpracovat jak název tak index najednou" msgid "cannot understand JSON" msgstr "JSON nelze porozumět" msgid "cannot understand XML" msgstr "XML nelze porozumět" #, python-format msgid "cg-%s" msgstr "skupinajednotnosti-%s" msgid "cgsnapshot assigned" msgstr "snímek skupiny jednotnosti přidělen" msgid "cgsnapshot changed" msgstr "snímek skupiny jednotnosti změněn" msgid "cgsnapshots assigned" msgstr "snímky skupiny jednotnosti přiděleny" msgid "cgsnapshots changed" msgstr "snímky skupiny jednotnosti změněny" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "Kontrola chyby v nastavení: Heslo nebo soukromý klíč SSH jsou vyžadovány pro " "ověření: nastavte volbu san_password nebo san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "Kontrola chyby v nastavení: Nelze zjistit id systému." msgid "check_for_setup_error: Unable to determine system name." msgstr "Kontrola chyby v nastavení: Nelze zjistit název systému." msgid "check_hypermetro_exist error." msgstr "Chyba při kontrole existence hypermetra." #, python-format msgid "clone depth exceeds limit of %s" msgstr "hloubka klonování překračuje omezení %s" msgid "consistencygroup assigned" msgstr "skupina jednotnosti přidělena" msgid "consistencygroup changed" msgstr "skupina jednotnosti změněna" msgid "control_location must be defined" msgstr "ovládací umístění musí být zadáno" msgid "create_cloned_volume: Source and destination size differ." msgstr "Vytvoření klonovaného svazku: Velikost zdroje a cíle se liší." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "vytváření skupiny jednotnosti ze zdroje podporuje zdroj snímku skupiny " "jednotnosti, nebo zdroj skupiny jednotnosti. Nelze použít více zdrojů." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" "Vytvoření kopie: Zdrojový virtuální disk %(src)s (%(src_id)s) neexistuje." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "Vytvoření kopie: Zdrojový virtuální disk %(src)s neexistuje." msgid "create_host: Host name is not unicode or string." msgstr "Vytvoření hostitele: Název není unicode nebo řetězec." msgid "create_host: No initiators or wwpns supplied." msgstr "Vytvoření hostitele: Nepředány žádné zavaděče nebo wwpns." msgid "create_hypermetro_pair error." msgstr "Chyba při vytváření páru hypermetro." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "Vytvoření snímku: Stav svazku musí být pro vytvoření snímku \"available\" " "nebo \"in-use\". Neplatný stav je %s." msgid "create_snapshot: get source volume failed." msgstr "Vytvoření snímku: Získání zdrojového svazku selhalo." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "Vytváření svazku ze snímku: Svazek %(name)s neexistuje." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "Vytvoření svazku ze snímku: Pro vytvoření svazku musí být stav snímku " "\"dostupný\". Neplatný stav je: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "Vytvoření svazku ze snímku: Velikost zdroje a cíle se liší." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "Vytvoření svazku ze snímku: Velikost svazku se liší od svazku ze snímku." msgid "deduplicated and auto tiering can't be both enabled." msgstr "Automatické a deduplikující vrstvení nemůže být povoleno současně." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "smazání: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům se " "standardním výstupem: %(out)s.\n" "chybový výstup: %(err)s." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "smazání: %(vol_id)s nemohla spustit dsmc pomocí standardního výstupu: " "%(out)s.\n" "chybový výstup: %(err)s." msgid "delete_hypermetro error." msgstr "Chyba při mazání hypermetra." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "Mazání iniciátora: %s ACL nenalezeno. Pokračování." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "mazání snímku %(snapshot_name)s který má nezávislé svazky" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "mazání svazku %(volume_name)s který má snímek" msgid "detach snapshot from remote node" msgstr "odpojit snímek ze vzdáleného uzle" msgid "do_setup: No configured nodes." msgstr "Zavedení: Nenastaveny žádné uzly." msgid "element is not a child" msgstr "prvek není podřazený" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries musí být větší nebo rovno 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "chyba při zápisu objektu do swift, MD5 objektu ve swift %(etag)s se liší od " "MD5 objektu odeslaného do swift %(md5)s" msgid "failed to create new_volume on destination host" msgstr "nelze vytvořit nový svazek na cílovém hostiteli" msgid "fake" msgstr "falešný" #, python-format msgid "file already exists at %s" msgstr "soubor již existuje v %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno nepodporuje SheepdogIOWrapper" msgid "fileno() not supported by RBD()" msgstr "fileno() není podporován RBD()" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled je nastaven na False, neumožňující mapování " "více hostitelů. CMMVC6071E, mapování virtuálního disku na hostitele nebylo " "vytvořeno protože disk k hostiteli již je namapován." msgid "flush() not supported in this version of librbd" msgstr "flush() není podporován touto verzí librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s zálohováno: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s zálohováno:%(backing_file)s" msgid "force delete" msgstr "vynutit smazání" msgid "get_hyper_domain_id error." msgstr "chyb při získávání id domény hypermetra." msgid "get_hypermetro_by_id error." msgstr "Chyba při získávání hypermetra podle id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "Získání parametrů iSCSI: Nelze získat cílovou IP adresu pro zavaděč %(ini)s, " "prosím zkontrolujte soubor s nastavením." msgid "glance_metadata changed" msgstr "popisná data glance změněna" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode je nastaven na kopírování při zápisu, ale %(vol)s a " "%(img)s mají odlišné systémy souborů." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode je nastaven na kopírování při zápisu, ale %(vol)s a " "%(img)s mají odlišné sady souborů." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "skupina hgst %(grp)s a uživatel hgst %(usr)s musí být namapovány k platným " "živatelům/skupinám v cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "síť hgst %(net)s zadaná v cinder.conf nebyla nalezena v clusteru" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "redundance hgst musí být v cinder.conf nastavena na 0 (pokud nechcete HA) " "nebo 1 (pro HA)." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "" "režim prostoru hgst musí být v cinder.conf zadán osmičkové soustavě/" "celočíselně" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "server úložiště hgst %(svr)s nemá formát :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "V cinder.conf musí být určeny servery úložiště hgst" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "Služba http mohla být uprostřed operace náhle ukončena, nebo převedena do " "stavu údržby." msgid "id cannot be None" msgstr "id nemůže být None" #, python-format msgid "image %s not found" msgstr "Obraz %s nebyl nalezen" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "Zavedení spojení: Nelze získat vlastnosti svazku %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "Zavedení spojení: Svazku %s chybí vlastnost." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "Zavedení spojení: Ve skupině vstupu/výstupu %(gid)s nebyl nalezen žádný uzel " "pro svazek %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "Zavedení připojení: Virtuální disk %s není určen." #, python-format msgid "invalid user '%s'" msgstr "Neplatný uživatel '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "portál iscsi %s nebyl nalezen" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "IP adresa iscsi musí být při použití protokolu 'iSCSI' uvedena v souboru s " "nastavením." msgid "iscsiadm execution failed. " msgstr "Spuštění iscsiadm selhalo." #, python-format msgid "key manager error: %(reason)s" msgstr "chyba správce klíčů: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key není zadáno" msgid "limit param must be an integer" msgstr "parametr limit musí být celé číslo" msgid "limit param must be positive" msgstr "parametr limit musí být kladný" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing vyžaduje klíč 'name' pro identifikaci existujícího svazku." #, python-format msgid "marker [%s] not found" msgstr "značka [%s] nenalezena" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp chybí uvozovky %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "zásady přesunu musí být 'on-demand' nebo 'never', předáno: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs selhalo u svazku %(vol)s, chybová zpráva byla: %(err)s." msgid "mock" msgstr "nepravý" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs není nainstalováno" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage nalezl mnoho zdrojů s názvem %s" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "nalezeno mnoho zdrojů mající ID snímku %s" msgid "name cannot be None" msgstr "název nemůže být Žádný" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: Nelze najít nástroj NAVISECCLI %(path)s." #, python-format msgid "no REPLY but %r" msgstr "Žádná odpověď ale %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "v drbdmanage nenalezen žádný snímek s id %s" #, python-format msgid "not exactly one snapshot with id %s" msgstr "více než jeden snímek s id %s" #, python-format msgid "not exactly one volume with id %s" msgstr "id %s nemá právě jeden svazek" #, python-format msgid "obj missing quotes %s" msgstr "objektu chybí uvozovky %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled není vypnuto." msgid "progress must be an integer percentage" msgstr "postup musí být procento vyjádřené celým číslem" msgid "promote_replica not implemented." msgstr "promote_replica není zavedeno." msgid "provider must be defined" msgstr "poskytovatel musí být zadán" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "Ovladač tohoto svazku vyžaduje qemu-img %(minimum_version)s nebo novější. " "Současná verze qemu-img: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img není nainstalováno a obraz je typu %s. Pokud qemu-img není " "nainstalován, lze použít pouze obrazy s typem RAW." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img není nainstalováno a formát disku není zadán. Pokud qemu-img není " "nainstalován, lze použít pouze obrazy s typem RAW." msgid "rados and rbd python libraries not found" msgstr "Python knihovny rados a rbd nebyly nalezeny" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted může být buď 'no', 'yes' nebo 'only', ne %r" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "obnova: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům na " "%(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "obnova: %(vol_id)s nemohla spustit dsmc na %(bpath)s.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "obnova: %(vol_id)s selhala.\n" "standardní výstup: %(out)s\n" "chybový výstup: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "obnovení zálohy zrušeno, současný seznam objektů neodpovídá seznamu " "uloženého v popisných datech." msgid "root element selecting a list" msgstr "kořenový prvek volí seznam" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb chybí člen %s: Možná budete potřebovat novější python-rtslib-fb." msgid "san_ip is not set." msgstr "san_ip není nastaveno." msgid "san_ip must be set" msgstr "san_ip musí být nastaveno" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: Povinné pole nastavení san_ip není nastaveno." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login a /nebo san_password není v cinder.conf nastaveno pro ovladač " "Datera. Nastavte tyto údaje a spusťte znovu službu cinder-volume." msgid "serve() can only be called once" msgstr "serve() může být voláno pouze jednou" msgid "service not found" msgstr "služba nenalezena" msgid "snapshot does not exist" msgstr "snímek neexistuje" #, python-format msgid "snapshot id:%s not found" msgstr "id snímku:%s nenalezeno" #, python-format msgid "snapshot-%s" msgstr "snímek-%s" msgid "snapshots assigned" msgstr "snímky přiděleny" msgid "snapshots changed" msgstr "snímek změněn" #, python-format msgid "source vol id:%s not found" msgstr "id zdrojového svazku:%s nenalezeno" #, python-format msgid "source volume id:%s is not replicated" msgstr "id zdrojového svazku:%s není replikováno" msgid "status must be available" msgstr "stav musí být dostupný" msgid "stop_hypermetro error." msgstr "Chyba při zastavování hypermetra." msgid "subclasses must implement construct()!" msgstr "podtřídy musí zavádět construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo selhalo, pokračuje se, jako by se nic nestalo" msgid "sync_hypermetro error." msgstr "Chyba při synchronizaci hypermetra." msgid "sync_replica not implemented." msgstr "sync_replica není zavedeno." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli není nainstalováno a nelze vytvořit výchozí adresář " "(%(default_path)s): %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "Ukončení spojení: Nelze získat název hostitele z konektoru." msgid "timeout creating new_volume on destination host" msgstr "při vytváření nového svazku na cílovém hostiteli vypršel časový limit" msgid "too many body keys" msgstr "příliš mnoho klíčů těla" #, python-format msgid "umount: %s: not mounted" msgstr "odpojení: %s: není připojeno" #, python-format msgid "umount: %s: target is busy" msgstr "odpojení: %s: cíl je zaneprázdněn" msgid "umount: : some other error" msgstr "odpojení: : nějaká jiná chyba" msgid "umount: : target is busy" msgstr "odpojení: : zařízení je zaneprázdněno" #, python-format msgid "unrecognized argument %s" msgstr "nerozpoznaný argument %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "nepodporovaný kompresní algoritmus: %s" msgid "valid iqn needed for show_target" msgstr "Pro zobrazení cíle je třeba platné iqn" #, python-format msgid "vdisk %s is not defined." msgstr "virtuální disk %s není určen." msgid "vmemclient python library not found" msgstr "Python knihovna vmemclient nebyla nalezena" #, python-format msgid "volume %s not found in drbdmanage" msgstr "svazek %s není nalezen v drbdmanage" msgid "volume assigned" msgstr "svazek přidělen" msgid "volume changed" msgstr "svazek změněn" msgid "volume does not exist" msgstr "svazek neexistuje" msgid "volume is already attached" msgstr "svazek již je připojen" msgid "volume is not local to this node" msgstr "svazek není pro tento uzel místním" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "velikost svazku %(volume_size)d je příliš malá pro obnovení zálohy o " "velikosti %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "velikost svazku %d je neplatná." msgid "volume_type cannot be None" msgstr "Typ svazku nemůže být None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "při vytváření svazku ve skupině jednotnosti musí být zadán jeho typ." msgid "volume_type_id cannot be None" msgstr "volume_type_id nemůže být None" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "pro vytvoření skupiny jednotnosti %(name)s musí být zadány typy svazků." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "pro vytvoření skupiny jednotnosti %s musí být zadány typy svazků." msgid "volumes assigned" msgstr "svazky přiděleny" msgid "volumes changed" msgstr "svazky změněny" #, python-format msgid "wait_for_condition: %s timed out." msgstr "Čekání na podmínku: %s vypršel časový limit." msgid "{} is not a valid option." msgstr "{} není platná volba." cinder-8.0.0/cinder/locale/tr_TR/0000775000567000056710000000000012701406543017663 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/tr_TR/LC_MESSAGES/0000775000567000056710000000000012701406543021450 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po0000664000567000056710000023020512701406257025166 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # ADİL REŞİT DURSUN , 2015 # Andreas Jaeger , 2015. #zanata # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-16 08:22+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Failed to remove from new volume set %(new_vvs)s." msgstr "" "%(exception)s: %(volume_name)s mantıksal sürücüsü için retype geri alınırken " "olağandışı durum. Yeni mantıksal sürücüden %(new_vvs)s ayarı kaldırılamadı." #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Original volume set/QOS settings may not have been fully restored." msgstr "" "%(exception)s: %(volume_name)s mantıksal sürücüsü için retype geri alınırken " "olağandışı durum. Özgün mantıksal sürücü küme/QOS ayarları tamamen geri " "yüklenmiş olmayabilir." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" msgstr "" "%(fun)s: Beklenmeyen CLI çıktısı ile başarısız oldu.\n" " Komut: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" #, python-format msgid "" "%(method)s %(url)s unexpected response status: %(response)s (expects: " "%(expects)s)." msgstr "" "%(method)s %(url)s beklenmeyen yanıt durumu: %(response)s (expects: " "%(expects)s)." #, python-format msgid "%(name)s: %(value)s" msgstr "%(name)s: %(value)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" msgstr "'%(value)s', '%(key)s' fazladan özelliği için geçersiz değerdir" #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting create_snapshot operation!" msgstr "" "create_snapshot işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " "Bilgisi için hesap bulunamadı!" #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting unmanage operation!" msgstr "" "unmanage işlemi denenirken SolidFire Kümesinde Mantıksal Sürücü Kimliği %s " "için hesap bulunamadı!" #, python-format msgid "Array mismatch %(myid)s vs %(arid)s" msgstr "Dizi uyuşmazlığı %(myid)s ve %(arid)s" #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "İstek yetkilendiriliyor: %(zfssaurl)s tekrar deneme: %(retry)d ." msgid "Backend returned err for lun export." msgstr "Artalanda çalışan uygulama lun dışa aktarımı için hata döndürdü." #, python-format msgid "Backup id %s is not invalid. Skipping reset." msgstr "Yedekleme numarası %s geçersizdir. Sıfırlama atlanıyor." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Yedekleme servisi %(configured_service)s doğrulamayı desteklemez. Yedekleme " "numarası %(id)s doğrulanabilir değil. Doğrulama atlanıyor." #, python-format msgid "Backup volume metadata failed: %s." msgstr "Mantıksal sürücü metadata yedekleme başarısız oldu: %s." #, python-format msgid "" "CLI fail: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgstr "" "CLI başarısız: '%(cmd)s' = %(code)s\n" "çıktı: %(stdout)s\n" "hata: %(stderr)s" msgid "Call to Nova delete snapshot failed" msgstr "Anlık sistem görüntüsü silmek için Nova çağrısı başarısız oldu" msgid "Call to Nova to create snapshot failed" msgstr "Anlık sistem görüntüsü oluşturmak için Nova çağrısı başarısız oldu" #, python-format msgid "Call to json.loads() raised an exception: %s." msgstr "json.loads() çağrısı bir istisna oluşturdu: %s." #, python-format msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." msgstr "%(cg_name)s tutarsızlık grubuna %(lun)s lun eklenemiyor." #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "%(target_iqn)s ile %(target_ip)s keşfedilemiyor." #, python-format msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." msgstr "" "%(cg_name)s tutarsızlık grubunda yeni %(luns)s LUN'lar yerleştirilemiyor." #, python-format msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." msgstr "%(cg_name)s tutarsızlık grubunda %(luns)s LUN'lar kaldırılamıyor." #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "%(key)s eşleştirmek için mantıksal sürücü bulunamıyor, %(msg)s" #, python-format msgid "" "Cannot add and verify tier policy association for storage group : " "%(storageGroupName)s to FAST policy : %(fastPolicyName)s." msgstr "" "Depolama grubu için katman ilke ilişkisi doğrulanamaz ve eklenemez : FAST " "ilkesine %(storageGroupName)s depolama grubu : %(fastPolicyName)s." #, python-format msgid "Cannot create or find an initiator group with name %(igGroupName)s." msgstr "" "%(igGroupName)s adı ile başlatıcı bir grup bulunamıyor ya da oluşturulamıyor." msgid "Cannot detect replica status." msgstr "Kopya durumu tespit edilemez." msgid "Cannot determine if Tiering Policies are supported." msgstr "Eğer Katmanlama İlkeleri destekliyorsa tespit edilemez." msgid "Cannot determine whether Tiering Policy is supported on this array." msgstr "" "Bu dizide Katmanlama İlkesinin desteklenip desteklenmediği belirlenemiyor." #, python-format msgid "Cannot find Consistency Group %s" msgstr "Tutarlılık Grubu %s bulunamıyor" #, python-format msgid "" "Cannot find a portGroup with name %(pgGroupName)s. The port group for a " "masking view must be pre-defined." msgstr "" "%(pgGroupName)s adında bir bağlantıNoktasıGrubu bulunamıyor. Maskeleme " "görünümü için bağlantı noktası grubu önceden tanımlanmış olmalıdır." #, python-format msgid "Cannot find the fast policy %(fastPolicyName)s." msgstr "Fast ilkesi %(fastPolicyName)s bulunamadı." #, python-format msgid "" "Cannot find the new masking view just created with name %(maskingViewName)s." msgstr "" "%(maskingViewName)s adı ile az önce oluşturulan yeni maskeleme görünümü " "bulunamıyor." #, python-format msgid "Cannot get QoS spec for volume %s." msgstr "%s mantıksal sürücüsü için QoS özelliği alınamadı." #, python-format msgid "Cannot get storage Group from job : %(storageGroupName)s." msgstr "İşten depolama Grubu alınamıyor : %(storageGroupName)s." msgid "Cannot get storage system." msgstr "Depolama sistemi alınamaz." #, python-format msgid "Clone Volume:%(volume)s failed from source volume:%(src_vref)s" msgstr "" "Kopya Mantıksal Sürücü:Kaynak mantıksal sürücüden %(volume)s başarısız oldu:" "%(src_vref)s" #, python-format msgid "Cloning of volume %s failed." msgstr "%s mantıksal sürücüsünün kopyalaması başarısız oldu." #, python-format msgid "" "CloudByte does not have a volume corresponding to OpenStack volume [%s]." msgstr "" "CloudByte, OpenStack mantıksal sürücüsüne [%s] uyumlu bir mantıksal sürücüye " "sahip değil." #, python-format msgid "" "CloudByte snapshot information is not available for OpenStack volume [%s]." msgstr "" "CloudByte anlık sistem görüntü bilgisi OpenStack mantıksal sürücüsü [%s] " "için kullanılamaz." #, python-format msgid "CloudByte volume information not available for OpenStack volume [%s]." msgstr "" "CloudByte mantıksal sürücü bilgisi OpenStack mantıksal sürücüsü [%s] için " "kullanılamaz." #, python-format msgid "Cmd :%s" msgstr "Cmd :%s" #, python-format msgid "Configuration value %s is not set." msgstr "Yapılandırma değeri %s ayarlanmamış." #, python-format msgid "Connect to Flexvisor error: %s." msgstr "Flexvisor hatasına bağlan: %s." #, python-format msgid "Connect to Flexvisor failed: %s." msgstr "Flexvisor'a bağlanılamadı: %s." #, python-format msgid "Consistency group %s: create failed" msgstr "Tutarlılık grubu %s: oluşturma başarısız oldu" #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "" "%(snap)s anlık sistem görüntüsü %(vol)s mantıksal sürücüsü için mantıksal " "sürücüye anlık sistem görüntüsü kopyalayamadı!" msgid "Could not decode scheduler options." msgstr "Zamanlayıcı seçenekleri şifresi çözülemedi." #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "Tutarlılık grubu %(group_id)s için bir istemci bulunamadı." #, python-format msgid "" "Could not find port group : %(portGroupName)s. Check that the EMC " "configuration file has the correct port group name." msgstr "" "Bağlantı noktası grubu bulunamadı : %(portGroupName)s. EMC yapılandırma " "dosyasının doğru bağlantı grup adına sahip olup olmadığını kontrol edin." #, python-format msgid "Could not stat scheduler options file %(filename)s." msgstr "%(filename)s zamanlayıcı seçenek dosyalarının bilgileri gösterilemedi." #, python-format msgid "Create cg snapshot %s failed." msgstr "%s cg anlık sistem görüntüsü oluşturma başarısız oldu." #, python-format msgid "Create consistency group %s failed." msgstr "%s tutarlılık grubu oluşturma başarısız oldu." #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." msgstr "" "Anlık sistem görüntüsü-%(snap)s'den tutarlılık grubu oluşturma başarısız " "oldu: AnlıkSistemGörüntüsüBulunamadı." #, python-format msgid "" "Create new lun from lun for source %(src)s => destination %(dest)s failed!" msgstr "" "Kaynak %(src)s => hedef %(dest)s için lun'dan yeni bir lun oluşturulamadı!" #, python-format msgid "Create snapshot notification failed: %s" msgstr "Anlık sistem görüntüsü bildirimi oluşturma başarısız oldu: %s" #, python-format msgid "Create volume failed from snapshot: %s" msgstr "" "Anlık sistem görüntüsünden mantıksal sürücü oluşturma başarısız oldu: %s" #, python-format msgid "Create volume notification failed: %s" msgstr "Mantıksal sürücü bildirimi oluşturma başarısız oldu: %s" #, python-format msgid "Creation of snapshot failed for volume: %s" msgstr "" "Mantıksal sürücü için anlık sistem görüntüsü oluşturma başarısız oldu: %s" #, python-format msgid "Creation of volume %s failed." msgstr "%s mantıksal sürücü oluşturma başarısız oldu." msgid "DB error:" msgstr "DB hatası:" #, python-format msgid "DBError detected when purging from table=%(table)s" msgstr "Tablo=%(table)s temizlenirken DBError tespit edildi" msgid "DBError encountered: " msgstr "DBError ile karşılaşıldı: " #, python-format msgid "Delete cgsnapshot %s failed." msgstr "%s cgsnapshot silme başarısız oldu." #, python-format msgid "Delete consistency group %s failed." msgstr "%s tutarlılık grubu silme başarısız oldu." msgid "Delete consistency group failed to update usages." msgstr "Kullanımları güncellemek için tutarlılık grubu silme başarısız oldu." msgid "Delete snapshot failed, due to snapshot busy." msgstr "Anlık sistem görüntüsü meşgul olduğundan silme başarısız oldu." #, python-format msgid "Delete snapshot notification failed: %s" msgstr "Anlık sistem görüntüsü bildirimi silme başarısız oldu: %s" #, python-format msgid "Delete volume notification failed: %s" msgstr "Mantıksal sürücü bildirimi silme başarısız oldu: %s" #, python-format msgid "Deleting snapshot %s failed" msgstr "Anlık sistem görüntüsü %s silme başarısız oldu" #, python-format msgid "Deleting zone failed %s" msgstr "Bölge silme başarısız oldu %s" #, python-format msgid "Deletion of volume %s failed." msgstr "%s mantıksal sürücüsünün silinmesi başarısız oldu." #, python-format msgid "Destination Volume Group %s does not exist" msgstr "Hedef Mantıksal Sürücü Grubu %s yok" msgid "Detach volume failed, due to remove-export failure." msgstr "Mantıksal sürücü ayırma remove-export hatası nedeniyle başarısız oldu." msgid "Detach volume failed, due to uninitialized driver." msgstr "Sürücü başlatılamadığından mantıksal sürücü ayırma başarısız oldu." #, python-format msgid "Did not find expected column name in lsvdisk: %s." msgstr "lsvdisk içinde beklenen sütun adı bulunamadı: %s." msgid "Differential restore failed, trying full restore" msgstr "Kademeli geri yükleme başarısız oldu, tam geri yükleme deneniyor" #, python-format msgid "Disconnection failed with message: %(msg)s." msgstr "Bağlantıyı kesme şu ileti ile başarısız oldu: %(msg)s." #, python-format msgid "" "Driver-based migration of volume %(vol)s failed. Move from %(src)s to " "%(dst)s failed with error: %(error)s." msgstr "" "%(vol)s mantıksal sürücüsünün, sürücü tabanlı göçü başarısız oldu. Kaynaktan " "%(src)s %(dst)s hedefe taşıma şu hata ile başarısız oldu: %(error)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "%(vol)s mantıksal sürücüsü eklenirken hata." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Grup Oluşturma Hatası: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Mantıksal Sürücü Ayarlama Hatası: BaşlatıcıGrubu: %(initiatorgroup)s için " "%(lun)s Havuz: %(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d " "İleti: %(ret.data)s." msgid "Error activating LV" msgstr "LV etkinleştirilirken hata" msgid "Error adding HBA to server" msgstr "Sunucuya HBA ekleme hatası" #, python-format msgid "Error attaching volume %s" msgstr "Mantıksal sürücü %s eklenirken hata" msgid "Error closing channel." msgstr "Kanal kapatılırken hata." #, python-format msgid "" "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" "'%(method)s' için '%(netloc)s' glance sunucusuna bağlantı kurulurken hata, " "%(extra)s. " msgid "Error copying key." msgstr "Anahtar kopyalama hatası." msgid "Error creating Barbican client." msgstr "Barbican istemcisi oluşturulurken hata." #, python-format msgid "Error creating QOS rule %s" msgstr "QOS kuralı %s oluşturulurken hata" msgid "Error creating Volume" msgstr "Mantıksal sürücü oluşturulurken hata" msgid "Error creating Volume Group" msgstr "Mantıksal Sürücü Grubu oluşturulurken hata" msgid "Error creating chap record." msgstr "Chap kaydı oluşturulurken hata." msgid "Error creating key." msgstr "Anahtar oluşturma hatası." msgid "Error creating snapshot" msgstr "Anlık sistem görüntüsü oluşturma hatası" #, python-format msgid "Error creating volume. Msg - %s." msgstr "Mantıksal sürücü oluşturulurken hata. İleti - %s." msgid "Error deleting key." msgstr "Anahtar silinirken hata." #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" "Dışa aktarma hatası kaldırma nedeniyle, %(volume)s mantıksal sürücüsü " "ayrılırken hata." #, python-format msgid "Error detaching volume %s" msgstr "Mantıksal sürücü %s ayrılırken hata" #, python-format msgid "Error disassociating storage group from policy: %s." msgstr "İlkeden depolama grubu ayırırken hata: %s." msgid "Error during re-export on driver init." msgstr "Sürücü init'inde yeniden dışa aktarma sırasında hata." msgid "Error executing SSH command." msgstr "SSH komutu yürütülürken hata." msgid "Error executing command via ssh." msgstr "ssh yoluyla komut yürütürken hata." #, python-format msgid "Error executing command via ssh: %s" msgstr "ssh üzerinden komut yürütülürken hata: %s" msgid "Error extending Volume" msgstr "Mantıksal sürücü genişletilirken hata" #, python-format msgid "Error extending volume %(id)s. Ex: %(ex)s" msgstr "%(id)s mantıksal sürücüsü genişletilirken hata. Ex: %(ex)s" #, python-format msgid "Error extending volume: %(vol)s. Exception: %(ex)s" msgstr "Mantıksal sürücü genişletilirken hata: %(vol)s. İstisna: %(ex)s" #, python-format msgid "Error finding target pool instance name for pool: %(targetPoolName)s." msgstr "Havuz için hedef havuz örnek adı bulunurken hata: %(targetPoolName)s." #, python-format msgid "Error getting LUN attribute. Exception: %s" msgstr "LUN özniteliği alınırken hata. İstisna: %s" msgid "Error getting active FC target ports." msgstr "Etkin FC hedef bağlantı noktaları alınırken hata." msgid "Error getting active ISCSI target iqns." msgstr "Etkin ISCSI hedef iqns alınırken hata." msgid "Error getting active ISCSI target portals." msgstr "Etkin ISCSI hedef kapısı alınırken hata." msgid "Error getting array, pool, SLO and workload." msgstr "Dizi, havuz, SLO ve iş yükü alma hatası." msgid "Error getting chap record." msgstr "Chap kaydı alınırken hata." msgid "Error getting key." msgstr "Anahtar alınırken hata." msgid "Error getting name server info." msgstr "Ad sunucu bilgisi alınırken hata." msgid "Error getting secret data." msgstr "Gizli veri alırken hata." msgid "Error getting secret metadata." msgstr "Gizli metadata alınırken hata." msgid "Error getting show fcns database info." msgstr "fcns veritabanı bilgisini göster sonucu alınırken hata." msgid "Error getting target pool name and array." msgstr "Hedef havuz adı ve dizisi alınırken hata." #, python-format msgid "Error happened during storage pool querying, %s." msgstr "Depolama havuzu sorgulama sırasında hata oluştu, %s." #, python-format msgid "Error in copying volume: %s" msgstr "Mantıksal sürücü kopyalamada hata: %s" #, python-format msgid "" "Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " "with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" msgstr "" "Mantıksal sürücü boyutu genişletmede hata. Mantıksal sürücü: Anlık sistem " "görüntüsü: %(snapshot)s Snap_Size: %(snap_size)d ile %(volume)s Vol_Size: " "%(vol_size)d" #, python-format msgid "Error in workflow copy from cache. %s." msgstr "Önbellekten iş akışı kopyalamasında hata. %s." #, python-format msgid "Error invalid json: %s" msgstr "Geçersiz json hatası: %s" msgid "Error manage existing get volume size." msgstr "Varolan mantıksal sürücü boyutu alma işleminde hata." msgid "Error manage existing volume." msgstr "Varolan mantıksal sürücüyü yönetme hatası." #, python-format msgid "Error mapping volume: %s" msgstr "Mantıksal sürücü eşleştirme hatası: %s" #, python-format msgid "" "Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." msgstr "" "Mantıksal sürücü hedef havuza %(targetPoolName)s taşınırken hata: " "%(volumename)s." #, python-format msgid "Error migrating volume: %s" msgstr "Mantıksal sürücü göç hatası: %s" #, python-format msgid "" "Error occurred in the volume driver when updating consistency group " "%(group_id)s." msgstr "" "Tutarlılık grubu %(group_id)s güncellenirken, mantıksal sürücüde hata " "meydana geldi." msgid "" "Error occurred when adding hostgroup and lungroup to view. Remove lun from " "lungroup now." msgstr "" "Görüntülenecek hostgroup ve lungroup eklenirken hata oluştu. Şimdi " "lungroup'tan lun'u kaldır." #, python-format msgid "" "Error occurred when building request spec list for consistency group %s." msgstr "" "%s tutarlılık grubu için istek özellik listesi oluşturulurken hata oluştu." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "cgsnapshot %s oluşturulurken hata meydana geldi." #, python-format msgid "" "Error occurred when creating consistency group %(cg)s from cgsnapshot " "%(cgsnap)s." msgstr "" "%(cgsnap)s cgsnapshot'ından %(cg)s tutarlılık grubu oluşturulurken hata " "meydana geldi." #, python-format msgid "" "Error occurred when creating consistency group %(group)s from cgsnapshot " "%(cgsnap)s." msgstr "" "%(cgsnap)s cgsnapshot'ından %(group)s tutarlılık grubu oluşturulurken hata " "meydana geldi." #, python-format msgid "Error occurred when creating consistency group %s." msgstr "Tutarlılık grubu %s oluşturulurken hata meydana geldi." #, python-format msgid "" "Error occurred when creating volume entry from snapshot in the process of " "creating consistency group %(group)s from cgsnapshot %(cgsnap)s." msgstr "" "%(cgsnap)s cgsnapshot'ından %(group)s tutarlılık grubu oluşturma sürecinde, " "anlık sistem görüntüsünden mantıksal sürücü girdisi oluşturulurken hata " "meydana geldi." #, python-format msgid "Error occurred when updating consistency group %(group_id)s." msgstr "Tutarlılık grubu %(group_id)s güncellenirken hata meydana geldi." #, python-format msgid "Error occurred while cloning backing: %s during retype." msgstr "Destekleme kopyalanırken hata oluştu: retype sırasında %s." #, python-format msgid "Error occurred while copying %(src)s to %(dst)s." msgstr "%(src)s kaynağı %(dst)s hedefine kopyalanırken hata oluştu." #, python-format msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." msgstr "İmaj kopyalanırken hata oluştu: %(id)s mantıksal sürücüye: %(vol)s." #, python-format msgid "Error occurred while copying image: %(image_id)s to %(path)s." msgstr "İmaj kopyalanırken hata oluştu: %(image_id)s %(path)s." msgid "Error occurred while creating temporary backing." msgstr "Geçici destekleme oluşturulurken hata oluştu." #, python-format msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" "Mantıksal sürücü oluşturulurken hata oluştu: imajdan %(id)s: %(image_id)s." msgid "Error occurred while selecting datastore." msgstr "Verideposu seçilirken hata oluştu." #, python-format msgid "Error on adding lun to consistency group. %s" msgstr "Tutarsızlık grubuna lun eklenirken hata. %s" #, python-format msgid "Error on enable compression on lun %s." msgstr "Lun %s sıkıştırma etkinleştirmede hata." #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "%(command)s yürütümünde hata. Hata kodu: %(exit_code)d Hata iletisi: " "%(result)s" #, python-format msgid "" "Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "Komut yürütme hatası. Hata kodu: %(exit_code)d Hata iletisi: %(result)s" msgid "Error parsing array from host capabilities." msgstr "İstemci yeteneklerinden dizi ayrıştırma hatası." msgid "Error parsing array, pool, SLO and workload." msgstr "Dizi, havuz, SLO ve iş yükü ayrıştırma hatası." msgid "Error parsing target pool name, array, and fast policy." msgstr "Hedef havuz adı, dizisi ve fast ilkesi ayrıştırma hatası." #, python-format msgid "" "Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" msgstr "" "%(volume_name)s üzerinde %(lun_name)s mantıksal sürücü hazırlama hatası. " "Ayrıntılar: %(ex)s" msgid "Error querying thin pool about data_percent" msgstr "data_percent ile ilgili ince havuz sorgularken hata" msgid "Error renaming logical volume" msgstr "Mantıksal sürücü yeniden adlandırılırken hata" #, python-format msgid "Error resolving host %(host)s. Error - %(e)s." msgstr "%(host)s istemci çözülürken hata. Hata - %(e)s." #, python-format msgid "Error running SSH command: \"%s\"." msgstr "SSH komutu çalıştırma hatası: \"%s\"." #, python-format msgid "Error running SSH command: %s" msgstr "SSH komutu çalıştırılırken hata: %s" msgid "Error running command." msgstr "Komut çalıştırma hatası." #, python-format msgid "" "Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" msgstr "" "Son vol-service'den %(volume_id)s zamanlaması yapılırken hata: " "%(last_host)s : %(exc)s" #, python-format msgid "Error setting Flash Cache policy to %s - exception" msgstr "%s için Flash Cache ilkesi ayarlanırken hata - istisna" msgid "Error storing key." msgstr "Anahtar depolama hatası." #, python-format msgid "Error unmapping volume: %s" msgstr "Mantıksal sürücü eşleştirmesi kaldırılırken hata: %s" #, python-format msgid "Exception cloning volume %(name)s from source volume %(source)s." msgstr "" "%(source)s kaynak mantıksal sürücüsünden %(name)s mantıksal sürücüsü " "kopyalanırken olağandışı durum." #, python-format msgid "Exception creating LUN %(name)s in pool %(pool)s." msgstr "Havuz %(pool)s içinde LUN %(name)s oluşturulurken olağandışı durum." #, python-format msgid "Exception creating vol %(name)s on pool %(pool)s." msgstr "" "%(pool)s havuzu üzerinde %(name)s mantıksal sürücüsü oluşturulurken " "olağandışı durum." #, python-format msgid "" "Exception creating volume %(name)s from source %(source)s on share %(share)s." msgstr "" "%(share)s paylaşımı üzerinde %(source)s kaynağından %(name)s mantıksal " "sürücüsü oluşturulurken olağandışı durum." #, python-format msgid "Exception details: %s" msgstr "İstisna ayrıntıları: %s" #, python-format msgid "Exception during mounting %s" msgstr "%s bağlama sırasında olağandışı durum" #, python-format msgid "Exception during mounting %s." msgstr "%s bağlama sırasında istisna." #, python-format msgid "Exception during snapCPG revert: %s" msgstr "snapCPG geri alınırken olağandışı durum: %s" #, python-format msgid "Exception handling resource: %s" msgstr "Kaynak işlenirken olağandışı durum: %s" msgid "Exception in string format operation" msgstr "Karakter dizisi biçimi işlemde olağandışı durum" msgid "Exception loading extension." msgstr "Uzantı yüklenirken olağandışı durum." #, python-format msgid "Exception: %s" msgstr "İstisna: %s" #, python-format msgid "Exception: %s." msgstr "İstisna: %s." #, python-format msgid "Exists snapshot notification failed: %s" msgstr "Varolan anlık sistem görüntü bildirimi başarısız oldu: %s" #, python-format msgid "Exists volume notification failed: %s" msgstr "Mantıksal sürücü bildirimi başarısız oldu: %s" msgid "Extend volume failed." msgstr "Disk bölümü genişletme başarısız oldu." #, python-format msgid "Extension of volume %s failed." msgstr "%s mantıksal sürücüsünü genişletme başarısız oldu." msgid "FAST is not supported on this array." msgstr "Bu dizide FAST desteklenmiyor." #, python-format msgid "Failed collecting fcns database info for fabric %s" msgstr "Fabric %s için fcns veritabanı bilgisi toplanamadı" #, python-format msgid "Failed collecting name server info from fabric %s" msgstr "Fabric %s'den ad sunucu bilgisi toplanamadı" msgid "Failed collecting nscamshow" msgstr "nscamshow toplanamadı" msgid "Failed collecting nsshow info for fabric" msgstr "Fabric için nsshow bilgisi toplanamadı" #, python-format msgid "Failed collecting nsshow info for fabric %s" msgstr "Fabric %s için nsshow bilgisi toplanamadı" msgid "Failed collecting show fcns database for fabric" msgstr "Fabric için show fcns veritabanı toplanamadı" #, python-format msgid "Failed destroying volume entry %s" msgstr "Mantıksal sürücü girdisi %s silinemedi" #, python-format msgid "" "Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " "glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" "Verilen glance anlık sistem görüntüsü %(snapshot_ref_id)s mantıksal sürücü " "kaynağı kullanarak anlık sistem görüntüsü %(snapshot_id)s önyükleme bayrağı " "getirilemedi" #, python-format msgid "Failed getting active zone set from fabric %s" msgstr "Fabric %s'den etkin bölge ayarı alınamadı" #, python-format msgid "Failed getting zone status from fabric %s" msgstr "Fabric %s'den bölge durumu alınamadı" #, python-format msgid "Failed image conversion during cache creation: %s" msgstr "Önbellek oluşturma sırasında imaj dönüşümü başarısız oldu: %s" #, python-format msgid "" "Failed notifying about the volume action %(event)s for volume %(volume_id)s" msgstr "" "%(volume_id)s mantıksal sürücüsü için %(event)s mantıksal sürücü eylemi " "bildirilemedi" #, python-format msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "%(topic)s üzerindeki %(payload)s yük bildirilemedi" #, python-format msgid "Failed rolling back quota for %s reservations" msgstr "%s ayrılmışları için kota geri alınamadı" #, python-format msgid "" "Failed setting source volume %(source_volid)s back to its initial " "%(source_status)s status" msgstr "" "Kaynak mantıksal sürücü %(source_volid)s başlangıç %(source_status)s " "durumuna geri ayarlanamadı" #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " "volume returned to the default storage group." msgstr "" "Fast ilkesi %(fastPolicyName)s için öntanımlı depolama grubuna " "%(volumeName)s mantıksal sürücü yeniden ekleme geri alınamadı. Lütfen " "mantıksal sürücüyü öntanımlı depolama grubuna geri döndürmek için sistem " "yöneticinize başvurun." #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " "volume re-added manually." msgstr "" "Fast ilkesi %(fastPolicyName)s için öntanımlı depolama grubuna " "%(volumeName)s mantıksal sürücüsünü yeniden ekleme geri alınamadı: Lütfen " "elle eklenmiş mantıksal sürücüyü almak için sistem yöneticinize danışın." #, python-format msgid "" "Failed to add %(volumeName)s to default storage group for fast policy " "%(fastPolicyName)s." msgstr "" "%(fastPolicyName)s fast ilkesi için öntanımlı depolama grubuna " "%(volumeName)s eklenemedi." #, python-format msgid "Failed to add %s to cg." msgstr "cg'ye %s eklenemedi." #, python-format msgid "Failed to add device to handler %s" msgstr "%s işleyicisi için aygıt eklenemedi" #, python-format msgid "Failed to add initiator iqn %s to target" msgstr "Hedefe başlatıcı iqn %s eklenemedi" #, python-format msgid "Failed to add initiator to group for SCST target %s" msgstr "SCST hedef %s için gruba başlatıcı eklenemedi" #, python-format msgid "Failed to add multihost-access for volume \"%s\"." msgstr "\"%s\" mantıksal sürücüsü için multihost-access eklenemedi." #, python-format msgid "" "Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " "%(tierPolicyRuleInstanceName)s." msgstr "" "%(storageGroupInstanceName)s depolama grubu %(tierPolicyRuleInstanceName)s " "katman ilke kuralına eklenemedi." #, python-format msgid "Failed to add target(port: %s)" msgstr "Hedef eklenemedi(bağlantı noktası: %s)" msgid "Failed to authenticate user." msgstr "Kullanıcı kimlik doğrulaması yapılamadı." #, python-format msgid "Failed to close disk device %s" msgstr "Disk aygıtı %s kapatılamadı" #, python-format msgid "" "Failed to collect return properties for volume %(vol)s and connector " "%(conn)s." msgstr "" "%(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı için dönüş özellikleri " "toplanamadı." #, python-format msgid "Failed to commit reservations %s" msgstr "%s rezervasyonları gönderilemedi" #, python-format msgid "Failed to copy %(src)s to %(dest)s." msgstr "%(src)s kaynaktan %(dest)s hedefe kopyalanamadı." #, python-format msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" msgstr "%(image_id)s imajı mantıksal sürücüye kopyalanamadı: %(volume_id)s" #, python-format msgid "Failed to copy image to volume: %(volume_id)s" msgstr "İmaj mantıksal sürücüye kopyalanamadı: %(volume_id)s" #, python-format msgid "Failed to copy volume %(src)s to %(dest)s." msgstr "Disk bölümü kaynağını %(src)s hedefe %(dest)s kopyalama başarısız." #, python-format msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "%(vol1)s disk bölümünün %(vol2)s'ye kopyalaması başarısız" #, python-format msgid "Failed to create %(conf)s for volume id:%(vol_id)s" msgstr "Mantıksal sürücü kimliği:%(vol_id)s için %(conf)s oluşturulamadı" #, python-format msgid "Failed to create CGSnapshot. Exception: %s." msgstr "CGSnapshot oluşturulamadı. İstisna: %s." msgid "" "Failed to create SOAP client.Check san_ip, username, password and make sure " "the array version is compatible" msgstr "" "SOAP istemcisi oluşturulamadı. san_ip, kullanıcı adı, parolayı kontrol edin " "ve dizi sürümünün uyumlu olduğundan emin olun" #, python-format msgid "" "Failed to create a first volume for storage group : %(storageGroupName)s." msgstr "" "Depolama grubu için birinci mantıksal sürücü oluşturulamadı : " "%(storageGroupName)s." #, python-format msgid "Failed to create blkio cgroup '%(name)s'." msgstr "blkio cgroup '%(name)s' oluşturma başarısız." #, python-format msgid "Failed to create clone of volume \"%s\"." msgstr "\"%s\" mantıksal sürücüsünün klonu oluşturulamadı." #, python-format msgid "Failed to create consistency group %(group_id)s." msgstr "Tutarlılık grubu %(group_id)s oluşturulamadı." #, python-format msgid "" "Failed to create default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "FAST ilkesi için öntanımlı depolama grubu oluşturulamadı: %(fastPolicyName)s." #, python-format msgid "Failed to create group to SCST target %s" msgstr "SCST hedef %s için grup oluşturulamadı" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "%(storageSystemName)s üzerinde donanım kimlik(leri) oluşturulamadı." #, python-format msgid "" "Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " "tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" "Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi oluşturulamadı. " "Lütfen tgtd yapılandırma dosyanızın 'include %(volumes_dir)s/*' içerdiğine " "emin olun" #, python-format msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" "Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi oluşturulamadı: %(e)s" #, python-format msgid "Failed to create iscsi target for volume id:%s" msgstr "Mantıksal sürücü kimliği için iscsi hedefi oluşturulamadı:%s" #, python-format msgid "Failed to create iscsi target for volume id:%s." msgstr "Mantıksal sürücü kimliği için iscsi hedefi oluşturulamadı:%s." #, python-format msgid "Failed to create snapshot of volume \"%s\"." msgstr "\"%s\" mantıksal sürücüsünün anlık sistem görüntüsü oluşturulamadı." #, python-format msgid "Failed to create transfer record for %s" msgstr "%s için aktarım kaydı oluşturma başarısız" #, python-format msgid "Failed to create volume \"%s\"." msgstr "\"%s\" disk bölümü oluşturma başarısız." #, python-format msgid "Failed to create volume %s" msgstr "%s mantıksal sürücüsü oluşturulamadı" #, python-format msgid "Failed to create volume from snapshot \"%s\"." msgstr "\"%s\" anlık sistem görüntüsünden mantıksal sürücü oluşturulamadı." #, python-format msgid "Failed to created Cinder secure environment indicator file: %s" msgstr "Cinder güvenli ortam gösterge dosyası oluşturulamadı: %s" #, python-format msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." msgstr "" "%(vol)s mantıksal sürücüsünün %(snap)s anlık sistem görüntüsü silinemedi." #, python-format msgid "" "Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " "%(exception)s." msgstr "" "CGSnapshot'ın %(snap)s anlık sistem görüntüsü silinemedi. İstisna: " "%(exception)s." #, python-format msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." msgstr "CG'nin %(vol)s mantıksal sürücüsü silinemedi. İstisna: %(exception)s." #, python-format msgid "Failed to delete volume \"%s\"." msgstr "\"%s\" disk bölümü silme başarısız." #, python-format msgid "Failed to delete volume %s" msgstr "%s mantıksal sürücüsü silinemedi" #, python-format msgid "Failed to ensure export of volume \"%s\"." msgstr "\"%s\" mantıksal sürücüsünün dışa aktarımı sağlanamadı." #, python-format msgid "Failed to ensure export of volume %s" msgstr "%s mantıksal sürücüsünün dışa aktarımı sağlanamadı" #, python-format msgid "Failed to export fiber channel target due to %s" msgstr "%s nedeniyle fiber kanal hedefi dışa aktarılamadı" #, python-format msgid "" "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." msgstr "" "Mantıksal sürücü %(name)s için %(current_size)sGB mevcut boyutundan " "%(new_size)sGB boyutuna extend_volume işlemi başarısız oldu." #, python-format msgid "Failed to find %(s)s. Result %(r)s" msgstr "%(s)s bulunamadı. Sonuç %(r)s" #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "%s için kullanılabilir iSCSI hedefleri bulunamadı." #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "Ayarlama için aygıt numarası alınamadı: %(error)s" #, python-format msgid "" "Failed to get driver initiator data for initiator %(initiator)s and " "namespace %(namespace)s" msgstr "" "%(initiator)s başlatıcısı ve %(namespace)s ad alanı için sürücü başlatıcı " "verisi alınamadı" #, python-format msgid "Failed to get fiber channel info from storage due to %(stat)s" msgstr "%(stat)s nedeniyle depolamadan fiber kanal bilgisi alınamadı" #, python-format msgid "Failed to get fiber channel target from storage server due to %(stat)s" msgstr "%(stat)s nedeniyle depolama sunucusundan fiber kanal hedefi alınamadı" #, python-format msgid "Failed to get or create storage group %(storageGroupName)s." msgstr "%(storageGroupName)s depolama grubu oluşturulamadı ya da alınamadı." #, python-format msgid "Failed to get response: %s." msgstr "Yanıt alınamadı: %s." #, python-format msgid "Failed to get server info due to %(state)s." msgstr "%(state)s nedeniyle sunucu bilgisi alınamadı." msgid "Failed to get sns table" msgstr "Sns tablosu alınamadı" #, python-format msgid "Failed to get target wwpns from storage due to %(stat)s" msgstr "%(stat)s nedeniyle depolamadan hedef wwpns alınamadı" msgid "Failed to get updated stats from Datera Cluster." msgstr "Datera Kümesinden güncellenmiş durum bilgileri alınamadı." msgid "Failed to get updated stats from Datera cluster." msgstr "Datera kümesinden güncellenmiş durum bilgisi alınamadı." #, python-format msgid "Failed to initialize connection to volume \"%s\"." msgstr "\"%s\" disk bölümü bağlantı başlatma başarısız." msgid "Failed to initialize connection." msgstr "Bağlantı başlatılamadı." msgid "Failed to initialize driver." msgstr "Sürücü başlatma başarısız." #, python-format msgid "Failed to issue df command for path %(path)s, error: %(error)s." msgstr "%(path)s yolu için df komutu sonuçlanmadı, hata: %(error)s." #, python-format msgid "Failed to issue mmgetstate command, error: %s." msgstr "mmgetstate komutu sonuçlanmadı, hata: %s." #, python-format msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." msgstr "%(path)s yolu için mmlsattr komutu sonuçlanmadı, hata: %(error)s." #, python-format msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" msgstr "%(path)s yolundaki mmlsattr komutu sonuçlanmadı, hata: %(error)s" #, python-format msgid "Failed to issue mmlsconfig command, error: %s." msgstr "mmlsconfig komutu sonuçlanmadı, hata: %s." #, python-format msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." msgstr "%(path)s yolu için mmlsfs komutu sonuçlanmadı, hata: %(error)s." #, python-format msgid "Failed to load %s" msgstr "%s yükleme başarısız" msgid "Failed to load osapi_volume" msgstr "osapi_volume yükleme başarısız" #, python-format msgid "Failed to open iet session list for %s" msgstr "%s için iet oturum listesi açılamadı" msgid "Failed to query migration status of LUN." msgstr "LUN'un göç durumu sorgulanamadı." msgid "Failed to re-export volume, setting to ERROR." msgstr "Mantıksal sürücü yeniden dışa aktarılamadı, HATA durumuna ayarlıyor." #, python-format msgid "" "Failed to remove %(volumeName)s from the default storage group for the FAST " "Policy." msgstr "" "FAST İlkesi için öntanımlı depolama grubundan %(volumeName)s kaldırılamadı." #, python-format msgid "Failed to remove %s from cg." msgstr "cg'den %s kaldırılamadı." #, python-format msgid "Failed to remove LUN %s" msgstr "LUN %s kaldırılamadı" #, python-format msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" "Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi kaldırılamadı: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" "Mantıksal sürücü kimliği:%(vol_id)s için iscsi hedefi kaldırılamadı: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%s" msgstr "Mantıksal sürücü kimliği için iscsi hedefi kaldırılamadı:%s" #, python-format msgid "Failed to remove iscsi target for volume id:%s." msgstr "Mantıksal sürücü kimliği için iscsi hedefi kaldırılamadı:%s." #, python-format msgid "Failed to rename %(new_volume)s into %(volume)s." msgstr "%(volume)s %(new_volume)s olarak yeniden adlandırılamadı." msgid "Failed to rename the created snapshot, reverting." msgstr "" "Oluşturulan anlık sistem görüntüsü yeniden adlandırılamadı, eski haline " "döndürülüyor." #, python-format msgid "Failed to resize volume %(volume_id)s, error: %(error)s." msgstr "" "Mantıksal sürücü %(volume_id)s yeniden boyutlandırılamadı, hata: %(error)s." #, python-format msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "SolidFire-ID mantıksal sürücüsü alınamadı: %s get_by_account! " #, python-format msgid "" "Failed to return volume %(volumeName)s to original storage pool. Please " "contact your system administrator to return it to the correct location." msgstr "" "Mantıksal sürücü %(volumeName)s özgün depolama havuzuna dönemedi. Lütfen " "doğru konuma döndürmek için sistem yöneticinize başvurun." #, python-format msgid "Failed to roll back reservations %s" msgstr "%s rezervasyonları geri alma başarısız" #, python-format msgid "Failed to run task %(name)s: %(cause)s" msgstr "%(name)s adındaki görevi çalıştırma başarısız: %(cause)s" #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "schedule_%(method)s başarısız oldu: %(ex)s" #, python-format msgid "Failed to send request: %s." msgstr "İstek gönderilemedi: %s." #, python-format msgid "Failed to set 'enable' attribute for SCST target %s" msgstr "SCST hedef %s öznitelik 'enable' ayarlanamadı" #, python-format msgid "Failed to set attribute for enable target driver %s" msgstr "%s hedef sürücüsünü etkinleştirmek için öznitelik ayarlanamadı" msgid "Failed to setup the Dell EqualLogic driver." msgstr "Dell EqualLogic sürücüsü kurulamadı." msgid "Failed to shutdown horcm." msgstr "Horcm kapatılamadı." #, python-format msgid "Failed to snap Consistency Group %s" msgstr "Tutarlılık Grubu %s anlık sistem görüntüsü alınamadı" msgid "Failed to start horcm." msgstr "Horcm başlatılamadı." msgid "Failed to terminate connection" msgstr "Bağlantı sonlandırılamadı" #, python-format msgid "Failed to terminate connection %(initiator)s %(vol)s" msgstr "%(initiator)s %(vol)s bağlantısı sonlandırılamadı" #, python-format msgid "Failed to terminate connection to volume \"%s\"." msgstr "\"%s\" disk bölümü bağlantı sonlandırma başarısız." #, python-format msgid "Failed to umount %(share)s, reason=%(stderr)s" msgstr "%(share)s ayırma başarısız, nedeni=%(stderr)s" #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target" msgstr "" "iscsi hedefi kaldırıldıktan sonra mantıksal sürücü kimliği %(vol_id)s için " "%(conf)s güncellenemedi" #, python-format msgid "Failed to update %(conf)s for volume id:%(vol_id)s" msgstr "Mantıksal sürücü kimliği:%(vol_id)s için %(conf)s güncellenemedi" #, python-format msgid "" "Failed to update %(volume_id)s metadata using the provided snapshot " "%(snapshot_id)s metadata." msgstr "" "Verilen anlık sistem görüntüsü %(snapshot_id)s metadata'sı kullanılarak " "%(volume_id)s metadata'sı güncellenemedi." #, python-format msgid "" "Failed to update initiator data for initiator %(initiator)s and backend " "%(backend)s" msgstr "" "%(backend)s art alanda çalışan uygulama ve %(initiator)s başlatıcısı için " "başlatıcı veri güncellenemedi" #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "%s mantıksal sürücü aktarım kimliğine verilen kota güncellenemedi" #, python-format msgid "Failed to update quota for consistency group %s." msgstr "Tutarlılık grubu %s için kota güncellenemedi." #, python-format msgid "Failed to update quota for deleting volume: %s" msgstr "Mantıksal sürücüyü silmek için kota güncellenemedi: %s" msgid "Failed to update quota while deleting volume." msgstr "Mantıksal sürücü silinirken kota güncellenemedi." msgid "Failed to update usages deleting backup" msgstr "Kullanımları güncelleme başarısız yedek siliniyor" msgid "Failed to update usages deleting snapshot" msgstr "Anlık sistem görüntüsü silinirken kullanımlar güncellenemedi" msgid "Failed to update usages deleting volume." msgstr "Mantıksal sürücü silme kullanımları güncellenemedi." #, python-format msgid "Failed to update volume status: %s" msgstr "Mantıksal sürücü durumu güncellenemedi: %s" #, python-format msgid "" "Failed to verify that volume was added to storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST ilkesi için mantıksal sürücünün depolama grubuna eklenmesi " "doğrulanamadı: %(fastPolicyName)s." msgid "Failed to write in /etc/scst.conf." msgstr "/etc/scst.conf dosyasına yazılamadı." #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" "Verilen %(volume_id)s mantıksal sürücü metadata'sı kullanarak " "%(snapshot_id)s metadata'sı güncellenemedi" #, python-format msgid "" "Failed updating model of volume %(volume_id)s with creation provided model " "%(model)s" msgstr "" "Verilen %(model)s modelini oluşturma ile %(volume_id)s mantıksal sürücü " "modeli güncellenemedi" #, python-format msgid "" "Failed updating model of volume %(volume_id)s with driver provided model " "%(model)s" msgstr "" "Verilen %(model)s model sürücü ile %(volume_id)s mantıksal sürücü modeli " "güncellemesi başarısız oldu" #, python-format msgid "" "Failed updating snapshot metadata using the provided volumes %(volume_id)s " "metadata" msgstr "" "Verilen mantıksal sürücü %(volume_id)s metadata'sı kullanılarak anlık sistem " "görüntü metadata güncellemesi başarısız oldu" #, python-format msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" "Mantıksal sürücü %(volume_id)s önyükleme bayrağı doğru olarak güncellenemedi" #, python-format msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "%(update)s ile %(volume_id)s mantıksal sürücüsü güncellenemedi" #, python-format msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "" "%(updates)s güncellemeleri ile %(volume_id)s mantıksal sürücü güncellenemedi" #, python-format msgid "Failure deleting staged tmp LUN %s." msgstr "Hata silme tmp LUN %s hazırladı." msgid "Fetch volume pool name failed." msgstr "Mantıksal sürücü havuz adı getirme başarısız oldu." #, python-format msgid "" "FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " "HBA state is Online." msgstr "" "FibreChannelDriver validate_connector başarısız oldu. '%(setting)s' yok. HBA " "durumunun çevrim içi olduğuna emin olun." #, python-format msgid "Flexvisor failed to get event %(volume)s (%(status)s)." msgstr "Flexvisor %(volume)s (%(status)s) olayı alamadı." #, python-format msgid "Flexvisor failed to get pool %(id)s info." msgstr "Flexvisor havuz %(id)s bilgisini alamadı." #, python-format msgid "Flexvisor failed to get pool list due to %s." msgstr "Flexvisor %s nedeniyle havuz listesini alamadı." #, python-format msgid "Flexvisor failed to get pool list.(Error: %d)" msgstr "Flexvisor havuz listesini alamadı.(Hata: %d)" #, python-format msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "Şu kimliğe eşleştirilmiş %(count)s mantıksal sürücü bulundu: %(uuid)s." msgid "Free capacity not set: volume node info collection broken." msgstr "" "Boş kapasite ayarlı değil: mantıksal sürücü düğüm bilgisi koleksiyonu bozuk." #, python-format msgid "GPFS is not active. Detailed output: %s." msgstr "GPFS etkin değil. Detaylı çıktı: %s." msgid "Get method error." msgstr "Get metodu hatası." msgid "Get replication status for volume failed." msgstr "Mantıksal sürücü için kopyalama durumu alma başarısız oldu." #, python-format msgid "HDP not found: %s" msgstr "HDP bulunamadı: %s" #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "ISCSI keşif girişimi başarısız oldu:%s" #, python-format msgid "Invalid API object: %s" msgstr "Geçersiz API nesnesi: %s" #, python-format msgid "Invalid JSON: %s" msgstr "Geçersiz JSON: %s" #, python-format msgid "Invalid ReplayList return: %s" msgstr "Geçersiz ReplayList dönüşü: %s" #, python-format msgid "Invalid value for %(key)s, value is %(value)s." msgstr "%(key)s için geçersiz değer, değer %(value)s." #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "JSON %(param)s parametre şifreleme hatası: %(status)s." #, python-format msgid "JSON transfer error: %s." msgstr "JSON aktarım hatası: %s." #, python-format msgid "LUN %(path)s geometry failed. Message - %(msg)s" msgstr "LUN %(path)s geometrisi başarısız oldu. İleti - %(msg)s" msgid "LUN extend failed!" msgstr "LUN genişletme işlemi başarısız oldu!" #, python-format msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." msgstr "" "Maskeleme görünümü gibi görünür: %(maskingViewName)s yakın zamanda silindi." #, python-format msgid "Lun %s has dependent snapshots, skipping lun deletion." msgstr "Lun %s bağımlı anlık görüntülere sahip, lun silme işlemi atlanıyor." #, python-format msgid "Lun create for %s failed!" msgstr "%s için Lun oluşturma başarısız oldu!" #, python-format msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" "Lun %(vol)s mantıksal sürücüsü %(snap)s anlık görüntüsü için anlık görüntü " "oluşturamadı!" #, python-format msgid "Lun delete for %s failed!" msgstr "%s için Lun silme başarısız oldu!" #, python-format msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" "Lun %(vol)s mantıksal sürücüsü %(snap)s anlık görüntüsü için anlık görüntüyü " "silemedi!" msgid "Lun mapping returned null!" msgstr "Lun eşleştirmesi boş değer döndü!" #, python-format msgid "MSGID%(id)04d-E: %(msg)s" msgstr "MSGID%(id)04d-E: %(msg)s" #, python-format msgid "" "Masking View creation or retrieval was not successful for masking view " "%(maskingViewName)s. Attempting rollback." msgstr "" "%(maskingViewName)s maskeleme görünümü için Maskeleme Görünümü oluşturma ya " "da alma başarılı değil. Geri alma deneniyor." #, python-format msgid "" "Max retries reached deleting backup %(basename)s image of volume %(volume)s." msgstr "" "%(volume)s mantıksal sürücüsünün %(basename)s yedek imajı silerken azami " "yeniden denemeye ulaşıldı." #, python-format msgid "Message: %s" msgstr "İleti: %s" #, python-format msgid "Migration of LUN %s failed to complete." msgstr "%s LUN göçü tamamlanamadı." msgid "Model update failed." msgstr "Model güncellemesi başarısız oldu." #, python-format msgid "Mount failure for %(share)s after %(count)d attempts." msgstr "%(share)s paylaşımları için %(count)d denemeden sonra bağlama hatası." #, python-format msgid "Mount failure for %(share)s." msgstr "%(share)s için bağlama hatası." #, python-format msgid "Multiple replay profiles under name %s" msgstr "%s adı altında birden fazla tekrar profilleri" #, python-format msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" msgstr "" "NFS paylaşımı %(share)s hiçbir servis girdisine sahip değil: %(svc)s -> " "%(hdp)s" msgid "No CLI output for firmware version check" msgstr "Donanım yazılımı sürüm kontrolü için hiçbir CLI çıktısı yok" #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." msgstr "" "Hiçbir eylem gerekli değil. Mantıksal sürücü: %(volumeName)s zaten havuzun " "bir parçasıdır: %(pool)s." #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of slo/workload " "combination: %(targetCombination)s." msgstr "" "Hiçbir eylem gerekmez. Mantıksal sürücü: %(volumeName)s zaten slo/workload " "birleşiminin parçasıdır: %(targetCombination)s." #, python-format msgid "No configuration found for service: %s" msgstr "Servis için hiçbir yapılandırma bulunamadı: %s" #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " "%(backing_file)s!" msgstr "" "Veritabanında hiçbir anlık sistem görüntüsü bulunamadı, ancak %(path)s " "yolunda destek dosyaları %(backing_file)s var!" #, python-format msgid "" "One of the components of the original masking view %(maskingViewName)s " "cannot be retrieved so please contact your system administrator to check " "that the correct initiator(s) are part of masking." msgstr "" "%(maskingViewName)s özgün maskeleme görünümünün bileşenlerinden biri " "alınamıyor, bu yüzden lütfen doğru başlatıcıların maskelemenin bir parçası " "olup olmadığını kontrol etmek için sistem yöneticinize başvurun." #, python-format msgid "" "Only SLO/workload migration within the same SRP Pool is supported in this " "version The source pool : %(sourcePoolName)s does not match the target " "array: %(targetPoolName)s. Skipping storage-assisted migration." msgstr "" "Bu sürümde aynı SRP havuzu içinde sadece SLO/iş yükü göçü destekleniyor " "Kaynak havuz : %(sourcePoolName)s hedef dizi ile eşleşmez: " "%(targetPoolName)s. Depolama destekli göç atlanıyor." msgid "Only available volumes can be migrated between different protocols." msgstr "" "Sadece mevcut mantıksal sürücüler farklı protokoller arasında taşınabilir." #, python-format msgid "Pipe1 failed - %s " msgstr "Pipe1 başarısız - %s " #, python-format msgid "Pipe2 failed - %s " msgstr "Pipe2 başarısız - %s " msgid "Promote volume replica failed." msgstr "Mantıksal sürücü kopyasını yükseltme başarısız oldu." #, python-format msgid "" "Purity host %(host_name)s is managed by Cinder but CHAP credentials could " "not be retrieved from the Cinder database." msgstr "" "Purity istemcisi %(host_name)s Cinder tarafından yönetilir ancak CHAP kimlik " "bilgileri Cinder veritabanından alınamaz." #, python-format msgid "" "Purity host %(host_name)s is not managed by Cinder and can't have CHAP " "credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." msgstr "" "Purity istemcisi %(host_name)s Cinder tarafından yönetilemez ve CHAP kimlik " "bilgileri değiştirilemez. Bu sorunu çözmek için istemciden IQN %(iqn)s'ini " "kaldır." #, python-format msgid "REST Not Available: %s" msgstr "REST Kullanılamaz: %s" #, python-format msgid "Re-throwing Exception %s" msgstr "İstisna yeniden fırlatılıyor %s" #, python-format msgid "Read response raised an exception: %s." msgstr "Okuma yanıtı bir istisna oluşturdu: %s." msgid "Recovered model server connection!" msgstr "Kurtarılmış model sunucu bağlantısı!" #, python-format msgid "Recovering from a failed execute. Try number %s" msgstr "Başarısız bir yürütme kurtarılıyor. %s numara dene" msgid "Replication must be specified as ' True' or ' False'." msgstr "Kopyalama ' True' ya da ' False' olarak belirtilmiş olmalıdır." msgid "" "Requested to setup thin provisioning, however current LVM version does not " "support it." msgstr "" "İnce hazırlık kurulumu istendi, ancak mevcut LVM sürümü bunu desteklemiyor." #, python-format msgid "Resizing %s failed. Cleaning volume." msgstr "" "%s'nin yeniden boyutlandırılması başarısız oldu. Mantıksal sürücü " "temizleniyor." #, python-format msgid "Restore to volume %(volume)s finished with error - %(error)s." msgstr "%(volume)s mantıksal sürücü geri yüklemesi hata ile bitti - %(error)s." #, python-format msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" msgstr "" "Yeniden deneme %(retry)s süreleri: %(method)s Başarısız oldu %(rc)s: " "%(reason)s" #, python-format msgid "" "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " "Diamond, Optimized, NONE." msgstr "" "SLO: %(slo)s geçersiz. Geçerli değerler Bronz, Gümüş, Altın, Platin, Elmas, " "Optimize Edilmiş, HİÇBİRİ." msgid "" "ScVolume returned success with empty payload. Attempting to locate volume" msgstr "" "ScVolume boş yük ile başarı döndürdü. Mantıksal sürücü konumlandırma " "deneniyor" #, python-format msgid "Server Busy retry request: %s" msgstr "Sunucu Meşgul tekrar deneme isteği: %s" #, python-format msgid "Setting QoS for %s failed" msgstr "%s için QoS ayarı başarısız oldu" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" "%s paylaşımı geçersiz biçim nedeniyle yoksayılır. Form adresi olmalıdır:/" "export." #, python-format msgid "" "Skipping remove_export. No iscsi_target ispresently exported for volume: %s" msgstr "" "remove_export atlanıyor. Mantıksal sürücü için şimdilik hiçbir iscsi_target " "dışa aktarılmadı: %s" #, python-format msgid "Snapshot %s: has clones" msgstr "Anlık sistem görüntüsü %s: kopyalara sahip" msgid "Snapshot did not exist. It will not be deleted" msgstr "Anlık sistem görüntüsü olmasaydı silinemeyecekti" #, python-format msgid "Source snapshot %(snapshot_id)s cannot be found." msgstr "Kaynak anlık sistem görüntüsü %(snapshot_id)s bulunamıyor." #, python-format msgid "Source snapshot cannot be found for target volume %(volume_id)s." msgstr "" "Hedef mantıksal sürücü %(volume_id)s için kaynak anlık sistem görüntüsü " "bulunamadı." #, python-format msgid "StdErr :%s" msgstr "StdErr :%s" #, python-format msgid "StdOut :%s" msgstr "StdOut :%s" #, python-format msgid "Storage profile: %s cannot be found in vCenter." msgstr "Depolama profili: %s vCenter'da bulunamıyor." msgid "Sync volume replica failed." msgstr "Mantıksal sürücü kopyasını eşzamanlandırma başarısız oldu." #, python-format msgid "TSM [%s] not found in CloudByte storage." msgstr "CloudByte depolamasında TSM [%s] bulunamadı." #, python-format msgid "Target end points do not exist for hardware Id: %(hardwareIdInstance)s." msgstr "Donanım kimliği için hedef uç noktalar yoktur: %(hardwareIdInstance)s." msgid "The Flexvisor service is unavailable." msgstr "Flexvisor servisi kullanılabilir değil." msgid "The connector does not contain the required information." msgstr "Bağlayıcı gerekli bilgileri içermez." msgid "" "The connector does not contain the required information: initiator is missing" msgstr "Bağlayıcı gerekli bilgileri içermiyor: başlatıcı eksik" msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Bağlayıcı gerekli bilgileri içermiyor: wwpns eksik" msgid "The given extra_spec or valid_values is None." msgstr "Verilen extra_spec ya da valid_values hiçbiridir." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s skipping storage-assisted migration." msgstr "" "Kaynak dizi : %(sourceArraySerialNumber)s hedef dizi ile eşleşmiyor: " "%(targetArraySerialNumber)s, depolama destekli göç atlanıyor." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s, skipping storage-assisted migration." msgstr "" "Kaynak dizi : %(sourceArraySerialNumber)s hedef dizi ile eşleşmiyor: " "%(targetArraySerialNumber)s, depolama destekli göç atlanıyor." #, python-format msgid "The source volume %(volume_id)s cannot be found." msgstr "%(volume_id)s kaynak disk bölümü bulunamıyor." #, python-format msgid "The volume driver requires %(data)s in the connector." msgstr "Mantıksal sürücü bağlayıcıda %(data)s ister." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Mantıksal sürücü bağlayıcıda iSCSI başlatıcı adı gerektirir." #, python-format msgid "There is no valid datastore satisfying requirements: %s." msgstr "Gereksinimleri karşılayacak geçerli hiçbir verideposu yok: %s." msgid "This usually means the volume was never successfully created." msgstr "" "Bu genellikle mantıksal sürücü asla başarılı bir şekilde oluşturulamaz " "anlamına gelir." msgid "Tiering Policy is not supported on this array." msgstr "Bu dizide Katmanlama İlkesi desteklenmiyor." #, python-format msgid "Trying to create snapshot by non-existent LV: %s" msgstr "Varolmayan LV ile anlık sistem görüntüsü oluşturma deneniyor: %s" #, python-format msgid "URLError: %s" msgstr "URLHata: %s" #, python-format msgid "Unable to create folder path %s" msgstr "%s klasör yolu oluşturulamadı" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST ilkesi için öntanımlı depolama grubu alınamadı ya da oluşturulamadı: " "%(fastPolicyName)s." #, python-format msgid "Unable to create volume %s from replay" msgstr "Tekrardan %s mantıksal sürücüsü oluşturulamadı" #, python-format msgid "Unable to create volume. Volume driver %s not initialized" msgstr "Mantıksal sürücü oluşturulamadı. Mantıksal sürücü %s ilklendirilmemiş" msgid "Unable to delete busy volume." msgstr "Kullanılan mantıksal sürücü silinemedi." #, python-format msgid "Unable to delete due to existing snapshot for volume: %s" msgstr "" "Mantıksal sürücü için varolan anlık sistem görüntüsü nedeniyle silinemedi: %s" msgid "" "Unable to delete the destination volume during volume migration, (NOTE: " "database record needs to be deleted)." msgstr "" "Disk bölümü geçişi sırasında hedef disk bölümü silinemedi, (NOT: veritabanı " "kaydının silinmesi gerekir)." #, python-format msgid "Unable to determine whether %(volumeName)s is composite or not." msgstr "" "%(volumeName)s mantıksal sürücüsünün bileşik olup olmadığı belirlenemedi." msgid "Unable to find FC initiators" msgstr "FC başlatıcısı bulunamadı" #, python-format msgid "Unable to find VG: %s" msgstr "VG bulunamadı: %s" #, python-format msgid "Unable to find controller port: %s" msgstr "Bağlantı noktası bağlayıcısı bulunamadı: %s" #, python-format msgid "" "Unable to find default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "FAST ilkesi için öntanımlı depolama grubu bulunamadı : %(fastPolicyName)s." msgid "Unable to get associated pool of volume." msgstr "Mantıksal sürücünün ilişkili olduğu havuz alınamadı." #, python-format msgid "Unable to get default storage group %(defaultSgName)s." msgstr "%(defaultSgName)s öntanımlı depolama grubu alınamadı." msgid "Unable to get device mapping from network." msgstr "Ağdan aygıt eşleştirmesi alınamadı." #, python-format msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." msgstr "Fast ilkesi için ilke kuralı alınamadı: %(fastPolicyName)s." #, python-format msgid "Unable to locate Volume Group %s" msgstr "Mantıksal Sürücü Grubu %s yerleştirilemedi" #, python-format msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "" "Varolan mantıksal sürücü yönetilemedi. Mantıksal sürücü %s ilklendirilmemiş." #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "%(vol)s %(srv)s sunucusuna eşleştirilemedi" #, python-format msgid "Unable to rename the logical volume for volume: %s" msgstr "Mantıksal sürücü yeniden adlandırılamadı: %s" #, python-format msgid "Unable to retrieve VolumeConfiguration: %s" msgstr "Mantıksal Sürücü Yapılandırması alınamadı: %s" #, python-format msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." msgstr "%(array)s dizisindeki %(poolName)s havuz örneği alınamadı." #, python-format msgid "Unable to unmap Volume %s" msgstr "Mantıksal sürücü %s eşleştirmesi kaldırılamadı" msgid "Unexpected build error:" msgstr "Beklenmeyen inşa hatası:" msgid "Unexpected error occurs in horcm." msgstr "Horcm'da beklenmeyen bir hata meydana geldi." msgid "Unexpected error occurs in snm2." msgstr "snm2 komutunda beklenmeyen bir hata meydana geldi." #, python-format msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" msgstr "deleteVolumeSet(%s) için retype() geri alınırken beklenmeyen hata" #, python-format msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" msgstr "deleteVolumeSet(%s) için retype() denenirken beklenmeyen hata" #, python-format msgid "Unknown exception in post clone resize LUN %s." msgstr "Kopyalama sonrası LUN %s yeniden boyutlandırılırken bilinmeyen hata." #, python-format msgid "" "Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." msgstr "" "Mantıksal sürücü-%(volume_id)s eklemek için tutarlılık grubu güncellemesi " "başarısız oldu: MantıksalSürücüBulunamadı." #, python-format msgid "" "Update consistency group failed to remove volume-%(volume_id)s: " "VolumeNotFound." msgstr "" "Mantıksal Sürücü-%(volume_id)s kaldırmak için tutarlılık grubu güncellemesi " "başarısız oldu: MantıksalSürücüBulunamadı." msgid "Update snapshot usages failed." msgstr "Anlık sistem görüntü kullanımları güncellemesi başarısız oldu." msgid "Update volume model for transfer operation failed." msgstr "" "Aktarım işlemi için mantıksal sürücü modeli güncellemesi başarısız oldu." #, python-format msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." msgstr "" "İmaja mantıksal sürücü yükleme bir hata ile karşılaştı (imaj bilgisi: " "%(image_id)s)." msgid "VGC-CLUSTER command blocked and cancelled." msgstr "VGC-CLUSTER komutu bloklandı ve durduruldu." #, python-format msgid "Version string '%s' is not parseable" msgstr "'%s' sürüm karakter dizisi ayrıştırılabilir değildir" #, python-format msgid "Virtual disk device of backing: %s not found." msgstr "Desteklemenin sanal disk aygıtı: %s bulunamadı." #, python-format msgid "Vol copy job status %s." msgstr "Mantıksal sürücü kopyalama iş durumu %s." #, python-format msgid "" "Volume %(name)s is not suitable for storage assisted migration using retype." msgstr "" "Retype kullanılan depolama destekli göç için %(name)s mantıksal sürücüsü " "uygun değil." #, python-format msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" "Dizide mantıksal sürücü %(name)s bulunamadı. Silinecek hiçbir mantıksal " "sürücü yok." #, python-format msgid "" "Volume %(name)s not found on the array. No volume to migrate using retype." msgstr "" "Dizide mantıksal sürücü %(name)s bulunamadı. Retype kullanarak göç için " "hiçbir mantıksal sürücü yok." #, python-format msgid "" "Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " "%(output)s" msgstr "" "Mantıksal sürücü %(volumeid)s atama komutunu gönderemedi, dönüş: %(status)s " "çıktı: %(output)s" #, python-format msgid "Volume %s doesn't exist on array." msgstr "Mantıksal sürücü %s dizide yok." #, python-format msgid "Volume %s, not found on SF Cluster." msgstr "Mantıksal %s, SF Kümesinde bulunamadı." #, python-format msgid "Volume %s: create failed" msgstr "Mantıksal sürücü %s: oluşturma başarısız oldu" #, python-format msgid "" "Volume %s: driver error when trying to retype, falling back to generic " "mechanism." msgstr "" "%s Mantıksal Sürücü: retype denenirken sürücü hatası, genel mekanizmaya geri " "dönülüyor." #, python-format msgid "Volume %s: rescheduling failed" msgstr "Mantıksal sürücü %s: yeniden zamanlama başarısız oldu" #, python-format msgid "Volume %s: update volume state failed." msgstr "" "Mantıksal sürücü %s: mantıksal sürücü durumu güncelleme başarısız oldu." #, python-format msgid "" "Volume : %(volumeName)s has not been added to target storage group " "%(storageGroup)s." msgstr "" "Mantıksal sürücü : %(volumeName)s %(storageGroup)s hedef depolama grubuna " "eklenmemiş." #, python-format msgid "" "Volume : %(volumeName)s has not been removed from source storage group " "%(storageGroup)s." msgstr "" "Mantıksal Sürücü : %(volumeName)s kaynak depolama grubundan %(storageGroup)s " "kaldırılmadı." #, python-format msgid "" "Volume : %(volumeName)s. was not successfully migrated to target pool " "%(targetPoolName)s." msgstr "" "Mantıksal sürücü %(volumeName)s. hedef havuza %(targetPoolName)s başarılı " "bir şekilde taşınamadı." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "accept_transfer operation!" msgstr "" "accept_transfer işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " "Bilgisi bulunamadı!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "attach_volume operation!" msgstr "" "attach_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " "Bilgisi bulunamadı!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "delete_volume operation!" msgstr "" "delete_volume işlemi denenirken SolidFire Kümesinde Mantıksal Sürücü Bilgisi " "%s bulunamadı!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "detach_volume operation!" msgstr "" "detach_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " "Bilgisi bulunamadı!" #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "extend_volume operation!" msgstr "" "extend_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " "Bilgisi bulunamadı!" msgid "Volume did not exist. It will not be deleted" msgstr "Mantıksal sürücü olmasaydı silinemeyecekti" #, python-format msgid "Volume driver %s not initialized" msgstr "Mantıksal sürücü sürücüsü %s başlatılamadı" msgid "Volume in unexpected state" msgstr "Beklenmeyen durumdaki mantıksal sürücü" #, python-format msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" "Mantıksal sürücü beklenmeyen %s durumunda, beklenen durum bekleyen aktarım" msgid "Volume must be detached for clone operation." msgstr "Mantıksal sürücü kopyalama işlemi için ayrılmış olmalıdır." #, python-format msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "VolumeType %s silme başarısız oldu, VolumeType kullanımda." #, python-format msgid "" "WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " "attempt %(retry)s in progress." msgstr "" "WebDAV işlemi hata kodu: %(code)s neden: %(reason)s ilerleme sırasındaki " "tekrar deneme girişimi %(retry)s ile başarısız oldu." #, python-format msgid "WebDAV returned with %(code)s error during %(method)s call." msgstr "WebDAV %(method)s çağrısı sırasında %(code)s hatası döndürdü." #, python-format msgid "" "Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " "OLTP_REP, NONE." msgstr "" "İşyükü: %(workload)s geçerli değil. Geçerli değerler DSS_REP, DSS, OLTP, " "OLTP_REP, HİÇBİRİ." msgid "_find_mappings: volume is not active" msgstr "_find_mappings: mantıksal sürücü etkin değil" #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " "operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: %(vol)s mantıksal sürücü belirtilen vdisk kopyalama " "işlemine sahip değil: orjinal=%(orig)s yeni=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: Mantıksal sürücü %(vol)s metadata belirtilen vdisk " "kopyalama işlemine sahip değil: orjinal=%(orig)s yeni=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " "operations." msgstr "" "_rm_vdisk_copy_op: %s mantıksal sürücü kayıtlı hiçbir vdisk kopyalama " "işlemine sahip değil." #, python-format msgid "" "_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " "copy operations." msgstr "" "_rm_vdisk_copy_op: Mantıksal sürücü metadata %s kayıtlı herhangi bir vdisk " "kopyalama işlemine sahip değil." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " "%(host_name)s found." msgstr "" "_unmap_vdisk_from_host: %(host_name)s istemcisi için hiçbir mantıksal sürücü " "%(vol_name)s eşleştirmesi bulunamadı." #, python-format msgid "_wait_for_job_complete failed after %(retries)d tries." msgstr "_wait_for_job_complete %(retries)d denemeden sonra başarısız oldu." #, python-format msgid "_wait_for_sync failed after %(retries)d tries." msgstr "_wait_for_sync %(retries)d denemeden sonra başarısız oldu." #, python-format msgid "" "backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "yedek: %(vol_id)s yedek katı bağlantısı %(vpath)s den %(bpath)s e " "silinemedi.\n" "stdout: %(out)s\n" " stderr: %(err)s." #, python-format msgid "can't create 2 volumes with the same name, %s" msgstr "aynı ad ile 2 mantıksal sürücü oluşturulamıyor, %s" msgid "cinder-rtstool is not installed correctly" msgstr "cinder-rtstool doğru bir şekilde kurulu değil" #, python-format msgid "" "delete: %(vol_id)s failed with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "sil: %(vol_id)s stdout ile başarısız oldu: %(out)s\n" " stderr: %(err)s" msgid "delete_vol: provider location empty." msgstr "delete_vol: sağlayıcı konumu boş." #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: Mantıksal sürücü %s depolama üzerinde bulunamadı." #, python-format msgid "error opening rbd image %s" msgstr "rbd imajı %s açma hatası" msgid "error refreshing volume stats" msgstr "mantıksal sürücü durum bilgisi tazeleme hatası" msgid "horcm command timeout." msgstr "horcm komutu zaman aşımı." #, python-format msgid "iSCSI portal not found for service: %s" msgstr "Servis için iSCSI kapısı bulunamadı: %s" #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s." msgstr "" "initialize_connection: %(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı " "için dönüş özellikleri toplanamadı." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s.\n" msgstr "" "initialize_connection: %(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı " "için dönüş özellikleri toplanamadı.\n" msgid "model server went away" msgstr "Model sunucusu gitti." #, python-format msgid "single_user auth mode enabled, but %(param)s not set" msgstr "single_user kimlik doğrulama kipi etkin, fakat %(param)s ayarlı değil" msgid "snm2 command timeout." msgstr "snm2 komutu zaman aşımı." msgid "" "storwize_svc_multihostmap_enabled is set to False, not allowing multi host " "mapping." msgstr "" "storwize_svc_multihostmap_enabled Yanlış olarak ayarlı, çoklu istemci " "eşlemeye izin vermez." cinder-8.0.0/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po0000664000567000056710000014331712701406250025502 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Ying Chun Guo , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0b4.dev61\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-05 05:24+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-26 03:31+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(path)s is being set with open permissions: %(perm)s" msgstr "%(path)s açık izinlerle ayarlanıyor: %(perm)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "AttachSnapTask.revert: detach mount point %s" msgstr "AttachSnapTask.revert: %s bağlantı noktasını ayır" msgid "Attempted to delete a space that's not there." msgstr "Orada olmayan bir alan silinmeye çalışıldı." #, python-format msgid "" "Attempting a rollback of: %(volumeName)s to original pool " "%(sourcePoolInstanceName)s." msgstr "" "%(volumeName)s'in asıl havuz %(sourcePoolInstanceName)s'e geri alınması " "deneniyor." msgid "Attempting recreate of backing lun..." msgstr "Destekleyen lun tekrar oluşturulmaya çalışılıyor..." #, python-format msgid "Availability zone '%s' is invalid" msgstr "'%s' kullanılabilir bölgesi geçersiz" #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping verify." msgstr "" "Yedek servisi %(service)s doğrulamayı desteklemiyor. Yedek %(id)s " "doğrulanmamış. Doğrulama atlanıyor." msgid "" "Both 'storagetype:prvosioning' and 'provisioning:type' are set in the extra " "specs, the value of 'provisioning:type' will be used. The key 'storagetype:" "provisioning' may be deprecated in the next release." msgstr "" "Hem 'storagetype:prvosioning' hem 'provisioning:type' ek özelliklerde " "ayarlanmış, 'provisioning:type' değeri kullanılacak. 'storagetype:" "provisioning' anahtarı sonraki sürümlerde kaldırılabilir." #, python-format msgid "CG %(cg_name)s does not exist. Message: %(msg)s" msgstr "CG %(cg_name)s mevcut değil. İleti: %(msg)s" #, python-format msgid "CG %(cg_name)s is deleting. Message: %(msg)s" msgstr "CG %(cg_name)s siliyor. İleti: %(msg)s" #, python-format msgid "CHAP is enabled, but server secret not configured on server %s" msgstr "CHAP etkin, ama %s sunucusu üzerinde sunucu gizi yapılandırılmamış" #, python-format msgid "CHAP secret exists for host %s but CHAP is disabled" msgstr "CHAP gizi %s istemcisi için mevcut ama CHAP kapalı" msgid "CHAP secret exists for host but CHAP is disabled." msgstr "CHAP gizi istemci için mevcut ama CHAP kapatılmış." msgid "Can't find lun on the array." msgstr "Dizide lun bulunamıyor." msgid "Can't find snapshot on the array." msgstr "Dizide anlık görüntü bulunamıyor." msgid "Can't find target iqn from rest." msgstr "Rest'den hedef iqn bulunamadı." msgid "Cannot determine the hardware type." msgstr "Donanım türü algılanamadı." #, python-format msgid "Cannot get volume status %(exc)s." msgstr "Mantıksal sürücü durumu %(exc)s alınamıyor." #, python-format msgid "" "Cannot undo volume rename; old name was %(old_name)s and new name is " "%(new_name)s." msgstr "" "Mantıksal sürücü yeniden adlandırma geri alınamaz; eski isim %(old_name)s " "idi ve yeni isim %(new_name)s." #, python-format msgid "Cgsnapshot name %(name)s already exists. Message: %(msg)s" msgstr "Cgsnapshot ismi %(name)s zaten mevcut. İleti: %(msg)s" #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "Değişiklik, şu kaynaklar için kullanımı 0'ın altına düşürecek: %s" #, python-format msgid "" "Changing backing: %(backing)s name from %(new_name)s to %(old_name)s failed." msgstr "" "Destekleyici: %(backing)s ismi %(new_name)s'den %(old_name)s'e değişme " "başarısız." #, python-format msgid "" "Clone failed on V3. Cleaning up the target volume. Clone name: %(cloneName)s " msgstr "" "V3 üzerinde kopya başarısız. Hedef mantıksal sürücü temizleniyor. Kopya " "ismi: %(cloneName)s " msgid "" "Configuration options eqlx_use_chap, eqlx_chap_login and eqlx_chap_password " "are deprecated. Use use_chap_auth, chap_username and chap_password " "respectively for the same." msgstr "" "Yapılandırma seçenekleri eqlx_use_chap, eqlx_chap_login ve " "eqlx_chap_password artık kullanılmıyor. Aynıları için sırayla chap_auth, " "chap_username ve chap_password kullanın." #, python-format msgid "Consistency group %(name)s already exists. Message: %(msg)s" msgstr "Tutarlılık grubu %(name)s zaten mevcut. İleti: %(msg)s" #, python-format msgid "" "CopySnapshotTask.revert: delete the copied snapshot %(new_name)s of " "%(source_name)s." msgstr "" "CopySnapshotTask.revert: %(source_name)s'in %(new_name)s kopyalanan anlık " "görüntüsünü sil." #, python-format msgid "Could not create target because it already exists for volume: %s" msgstr "Hedef oluşturulamadı çünkü mantıksal sürücü: %s için zaten mevcut" #, python-format msgid "Could not determine root volume name on %s." msgstr "%s üzerinde kök mantıksal sürücü ismi belirlenemiyor." #, python-format msgid "CreateDestLunTask.revert: delete temp lun %s" msgstr "CreateDestLunTask.revert: geçici lun %s'i sil" #, python-format msgid "CreateSMPTask.revert: delete mount point %s" msgstr "CreateSMPTask.revert: %s bağlantı noktasını sil" #, python-format msgid "CreateSnapshotTask.revert: delete temp cgsnapshot %s" msgstr "CreateSnapshotTask.revert: geçici cgsnapshot %s'i sil" #, python-format msgid "CreateSnapshotTask.revert: delete temp snapshot %s" msgstr "CreateSnapshotTask.revert: geçici anlık görüntü %s'i sil" #, python-format msgid "" "CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" "%(ret)s." msgstr "" "CreateStorageHardwareID başarısız. başlatan: %(initiator)s, rc=%(rc)d, ret=" "%(ret)s." #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "" "'%(func_name)s' çalıştırılırken ölükilit algılandı: Tekrar deneniyor..." #, python-format msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" "Anlık görüntü kimliği sil bulunamadı. Cinder'den kaldırılıyor: %(id)s " "İstisna: %(msg)s" #, python-format msgid "Delete temp LUN after migration start failed. LUN: %s" msgstr "Göç başladıktan sonra geçici LUN silme başarısız. LUN: %s" #, python-format msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" "Mantıksal sürücü kimliği sil bulunamadı. Cinder'den kaldırılıyor: %(id)s " "İstisna: %(msg)s" #, python-format msgid "Deleting image in unexpected status: %(image_status)s." msgstr "Beklenmedik durumdaki imaj siliniyor: %(image_status)s." #, python-format msgid "Destination %s already exists." msgstr "Hedef %s zaten mevcut." msgid "" "Destination volume type is different from source volume type for an " "encrypted volume. Encrypted backup restore has failed." msgstr "" "Şifreli bir mantıksal sürücü için hedef mantıksal sürücü türü kaynak " "mantıksal sürücü türünden farklı. Şifreli yedeğin geri yüklenmesi başarısız." msgid "Detected snapshot stuck in creating status, setting to ERROR." msgstr "" "Oluşturma durumunda kalmış anlık görüntü algılandı, HATA olarak ayarlanıyor." msgid "Discover file retries exhausted." msgstr "Dosya keşfi yeniden denemeleri tükendi." msgid "Driver didn't return connection info from terminate_connection call." msgstr "Sürücü terminate_connection çağrısından bağlantı bilgisi döndürmedi." msgid "Driver didn't return connection info, can't add zone." msgstr "Sürücü bağlantı bilgisi döndürmedi, bölge eklenemiyor." #, python-format msgid "" "Driver path %s is deprecated, update your configuration to the new path." msgstr "" "Sürücü yolu %s artık kullanılmıyor, yapılandırmanızı yeni yola göre " "güncelleyin." #, python-format msgid "Error finding LUNs for volume %s. Verify volume exists." msgstr "" "%s mantıksal sürücüsü için LUN bulmada hata. Mantıksal sürücünün varlığını " "doğrula." #, python-format msgid "" "Error in filtering function '%(function)s' : '%(error)s' :: failing host" msgstr "" "'%(function)s' filtreleme fonksiyonunda hata : '%(error)s' :: failing host" #, python-format msgid "" "Error in goodness_function function '%(function)s' : '%(error)s' :: " "Defaulting to a goodness of 0" msgstr "" "'%(function)s' goodness_function fonksiyonunda hata : '%(error)s' :: iyilik " "0 olarak varsayılıyor" #, python-format msgid "Error mapping LUN. Code :%(code)s, Message: %(message)s" msgstr "LUN eşleştirmede hata. Kod:%(code)s, İleti: %(message)s" #, python-format msgid "Error occurred while deleting backing: %s." msgstr "Destekleyici silinirken hata oluştu: %s." #, python-format msgid "Error occurred while deleting descriptor: %s." msgstr "Tanımlayıcı silinirken hata oluştu: %s." #, python-format msgid "Error occurred while deleting temporary disk: %s." msgstr "Geçici disk silinirken hata oluştu: %s." msgid "Error on parsing target_pool_name/target_array_serial." msgstr "target_pool_name/target_array_serial ayrıştırmada hata." #, python-format msgid "Error refreshing volume info. Message: %s" msgstr "Mantıksal sürücü bilgisi tazelenirken hata. İleti: %s" #, python-format msgid "Error unmapping LUN. Code :%(code)s, Message: %(message)s" msgstr "LUN eşleştirmesi kaldırmada hata. Kod :%(code)s, İleti: %(message)s" #, python-format msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" msgstr "%(share)s zula temizliği sırasında istisna. İleti - %(ex)s" #, python-format msgid "Exception during deleting %s" msgstr "%s silme sırasında istisna" #, python-format msgid "Exception during unmounting %s" msgstr "%s ayrılırken istisna" #, python-format msgid "Exception moving file %(src)s. Message - %(e)s" msgstr "%(src)s dosyası taşınırken istisna. İleti - %(e)s" #, python-format msgid "Exception moving file %(src)s. Message: %(e)s" msgstr "%(src)s dosyasının taşınması sırasında istisna. İleti: %(e)s" #, python-format msgid "" "Exception while creating image %(image_id)s snapshot. Exception: %(exc)s" msgstr "" "%(image_id)s imajı anlık görüntüsü oluşturulurken istisna. İstisna: %(exc)s" #, python-format msgid "" "Exception while registering image %(image_id)s in cache. Exception: %(exc)s" msgstr "%(image_id)s imajı zulaya kaydedilirken istisna. İstisna: %(exc)s" #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "" "%(ext_name)s uzantısı: %(collection)s kaynağı genişletilemiyor: Böyle bir " "kaynak yok" #, python-format msgid "Extra spec %(old)s is deprecated. Use %(new)s instead." msgstr "Ek özellik %(old)s artık kullanılmıyor. Yerine %(new)s kullanın." #, python-format msgid "Extra spec %(old)s is obsolete. Use %(new)s instead." msgstr "Ek özellik %(old)s artık kullanılmıyor. Yerine %(new)s kullanın." msgid "" "Extra spec key 'storagetype:pool' is obsoleted since driver version 5.1.0. " "This key will be ignored." msgstr "" "Ek özellik anahtarı 'storagetype:pool' sürücü sürümü 5.1.0'dan itibaren " "kullanılmıyor. Bu anahtar atlanıyor." msgid "" "Extra spec key 'storagetype:provisioning' may be deprecated in the next " "release. It is recommended to use extra spec key 'provisioning:type' instead." msgstr "" "Ek özellik anahtarı 'storagetype:provisioning' sonraki sürümde " "kaldırılabilir. Bunun yerine 'provisioning:type' ek özellik anahtarının " "kullanımı önerilir." #, python-format msgid "FAST is enabled. Policy: %(fastPolicyName)s." msgstr "FAST etkin. İlke: %(fastPolicyName)s." #, python-format msgid "Fail to connect host %(host)s back to storage group %(sg)s." msgstr "" "%(host)s istemcisinin %(sg)s depolama grubuna geri bağlanması başarısız." #, python-format msgid "" "Failed target removal because target or ACL's couldn't be found for iqn: %s." msgstr "Hedef silme başarısız veya iqn: %s için ACL'ler bulunamadı." #, python-format msgid "" "Failed terminating the connection of volume %(volume_id)s, but it is " "acceptable." msgstr "" "%(volume_id)s mantıksal sürücüsü bağlantısının sonlandırılması başarısız, " "ama bu kabul edilebilir." #, python-format msgid "Failed to activate volume copy throttling: %(err)s" msgstr "Mantıksal sürücü kopyalama daraltma etkinleştirilemedi: %(err)s" #, python-format msgid "Failed to add host group: %s" msgstr "İstemci grubu ekleme başarısız: %s" #, python-format msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "%(vol_type_id)s türündeki %(id)s qos özellikleri ilişkilendirilemedi" #, python-format msgid "Failed to create pair: %s" msgstr "Çift oluşturma başarısız: %s" #, python-format msgid "Failed to deregister %(itor)s because: %(msg)s." msgstr "%(itor)s kaydı silinmesi başarısız çünkü: %(msg)s." #, python-format msgid "Failed to destroy Storage Group %s." msgstr "Depolama Grubu %s silinemedi." #, python-format msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "%(vol_type_id)s türündeki %(id)s qos özellikleri ilişkileri kesilemedi" #, python-format msgid "Failed to disassociate qos specs %s." msgstr "Qos özellikleri %s ilişkisi kesilemedi." #, python-format msgid "Failed to discard zero page: %s" msgstr "Sıfır sayfası atılamadı: %s" #, python-format msgid "Failed to extract initiators of %s, so ignore deregistration operation." msgstr "" "%s ilklendiricilerinin çıkarılması başarısız, kayıt silme işlemini göz ardı " "et." msgid "Failed to get Raid Snapshot ID and did not store in snapshot." msgstr "" "Raid Anlık Görüntü Kimliği alınamadı ve anlık görüntü içine kaydedilmedi." msgid "Failed to get target pool id." msgstr "Hedef havuz kimliği alınamadı." msgid "" "Failed to get target_pool_name and target_array_serial. 'location_info' is " "not in host['capabilities']." msgstr "" "target_pool_name ve target_array_serial alınamadı. 'location_info' " "host['capabilities'] içinde değil." #, python-format msgid "Failed to invoke ems. Message : %s" msgstr "ems başlatma başarısızı. İleti : %s" #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "%(classpath)s uzantısı yüklemede hata: %(exc)s" #, python-format msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "%(ext_factory)s uzantısı yüklemede hata: %(exc)s" #, python-format msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "%(ext_name)s eklentisi yüklenemedi: %(exc)s" #, python-format msgid "Failed to manage virtual volume %(disp)s due to error during retype." msgstr "" "Retype sırasındaki hata sebebiyle %(disp)s sanal mantıksal sürücüsü " "yönetilemedi." #, python-format msgid "" "Failed to migrate volume. The destination volume %(vol)s is not deleted " "since the source volume may have been deleted." msgstr "" "Mantıksal sürücü göçü başarısız. Hedef mantıksal sürücü %(vol)s kaynak " "mantıksal sürücü silinmiş olabileceğinden silinmiyor." #, python-format msgid "" "Failed to migrate: %(volumeName)s from default source storage group for FAST " "policy: %(sourceFastPolicyName)s. Attempting cleanup... " msgstr "" "%(volumeName)s'in %(sourceFastPolicyName)s FAST ilkesi için varsayılan " "kaynak depolama grubundan göçü başarısız. Temizlik deneniyor... " #, python-format msgid "Failed to query pool %(id)s status %(ret)d." msgstr "%(id)s havuzu sorgulanamadı durum %(ret)d." #, python-format msgid "Failed to refresh mounts, reason=%s" msgstr "Bağlar tazelenemedi, sebep=%s" #, python-format msgid "" "Failed to register %(itor)s to SP%(sp)s port %(portid)s because: %(msg)s." msgstr "" "%(itor)s'in SP%(sp)s %(portid)s bağlantı noktasına kaydı başarısız: %(msg)s." #, python-format msgid "Failed to restart horcm: %s" msgstr "horcm yeniden başlatılamadı: %s" #, python-format msgid "Failed to run command: %s." msgstr "Komut çalıştırma başarısız: %s." #, python-format msgid "" "Failed to save iscsi LIO configuration when modifying volume id: %(vol_id)s." msgstr "" "Mantıksal sürücü: %(vol_id)s değiştirilirken iscsi LIO yapılandırması " "kaydedilemedi." #, python-format msgid "Failed to setup blkio cgroup to throttle the device '%(device)s'." msgstr "'%(device)s' aygıtını daraltmak için blkio cgroup kurulumu başarısız." #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target. %(conf)s does not exist." msgstr "" "Iscsi hedefini kaldırdıktan sonra %(vol_id)s mantıksal sürücüsü kimliği için " "%(conf)s güncellemesi başarısız. %(conf)s mevcut değil." #, python-format msgid "Failure deleting job %s." msgstr "%s işinin silinmesi başarısız." #, python-format msgid "Failure deleting temp snapshot %s." msgstr "Geçici anlık görüntü %s silinemedi." #, python-format msgid "Failure deleting the snapshot %(snapshot_id)s of volume %(volume_id)s." msgstr "" "%(volume_id)s mantıksal sürücüsünün %(snapshot_id)s anlık görüntüsünün " "silinmesi başarısız." #, python-format msgid "" "Flexvisor failed to delete volume %(id)s from group %(vgid)s due to " "%(status)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsünü %(vgid)s grubundan %(status)s " "sebebiyle silemedi." #, python-format msgid "Flexvisor failed to delete volume %(id)s from the group %(vgid)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(vgid)s grubundan silemedi." msgid "Goodness function not set :: defaulting to minimal goodness rating of 0" msgstr "" "İyilik fonksiyonu ayarlanmamış :: asgari iyilik değeri olan 0 varsayılıyor" #, python-format msgid "Got disconnected; trying to reconnect. (%s)" msgstr "Bağlantı kesildi; tekrar bağlanılmaya çalışılıyor. (%s)" #, python-format msgid "" "Group sync name not found for target group %(target)s on %(storageSystem)s." msgstr "" "%(storageSystem)s üzerindeki %(target)s hedef grubu için grup eş zamanlama " "ismi bulunamadı." #, python-format msgid "HLU %(hlu)s has already been removed from %(sgname)s. Message: %(msg)s" msgstr "HLU %(hlu)s zaten %(sgname)s'den ayrılmış. İleti: %(msg)s" #, python-format msgid "" "Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." msgstr "" "ExtendedServerAttributes Nova'da etkin olmadığından \"%s\" ipucu düşürüldü." #, python-format msgid "" "Hint \"%s\" dropped because Nova did not return enough information. Either " "Nova policy needs to be changed or a privileged account for Nova should be " "specified in conf." msgstr "" "\"%s\" ipucu düşürüldü çünkü Nova yeterli bilgi döndürmedi. Nova ilkesinin " "değiştirilmesi gerekiyor ya da yapılandırmada Nova için ayrıcalıklı bir " "hesap belirtilmeli." #, python-format msgid "" "Host %(host)s has already disconnected from storage group %(sgname)s. " "Message: %(msg)s" msgstr "" "İstemci %(host)s bağlantısı %(sgname)s depolama grubundan zaten kesilmiş. " "İleti: %(msg)s" msgid "" "Host exists without CHAP credentials set and has iSCSI attachments but CHAP " "is enabled. Updating host with new CHAP credentials." msgstr "" "İstemci CHAP kimlik bilgileri ayarlanmamış halde ve iSCSI eklentileri var " "ama CHAP etkin. İstemci yeni CHAP kimlik bilgileriyle güncelleniyor." msgid "Host has no CHAP key, but CHAP is enabled." msgstr "İstemcinin CHAP anahtarı yok, ama CHAP etkin." msgid "IQN already existed." msgstr "IQN zaten mevcut." msgid "IQN has been used to create map." msgstr "IQN eşleştirme oluşturmak için kullanılmış." msgid "ISCSI provider_location not stored, using discovery" msgstr "ISCSI provider_location kaydedilmemiş, keşif kullanılıyor" msgid "" "ISERTgtAdm is deprecated, you should now just use LVMVolumeDriver and " "specify iscsi_helper for the target driver you wish to use. In order to " "enable iser, please set iscsi_protocol=iser with lioadm or tgtadm target " "helpers." msgstr "" "ISERTgtAdm artık kullanılmıyor, artık yalnızca LVMVolumeDriver kullanmalı ve " "kullanmak istediğiniz hedef sürücü için iscsi_helper belirtmelisiniz. iser'i " "etkinleştirmek için, lütfen iscsi_protocol=iser ayarını hedef yardımcılar " "olarak lioadm veya tgtadm ile ayarlayın." msgid "Id not in sort_keys; is sort_keys unique?" msgstr "Id sort_keys içinde değil, sort_keys benzersiz mi?" msgid "Image delete encountered an error." msgstr "İmaj silme bir hatayla karşılaştı." msgid "Inconsistent Luns." msgstr "Tutarsız Lun." #, python-format msgid "" "Incorrect value error: %(blocksize)s, it may indicate that " "'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" "Geçersiz değer hatası: %(blocksize)s, 'volume_dd_blocksize'nin yanlış " "yapılandırıldığını gösteriyor olabilir. Varsayılana dön." #, python-format msgid "" "Insufficient free space for thin provisioning. The ratio of provisioned " "capacity over total capacity %(provisioned_ratio).2f has exceeded the " "maximum over subscription ratio %(oversub_ratio).2f on host %(host)s." msgstr "" "İnce hazırlığı için yetersiz boş alan. Hazırlık kapasitesinin toplam " "kapasiteye oranı %(provisioned_ratio).2f %(host)s istemcisi üzerinde azami " "aşım oranı %(oversub_ratio).2f değerini geçti." #, python-format msgid "" "Insufficient free space for volume creation on host %(host)s (requested / " "avail): %(requested)s/%(available)s" msgstr "" "%(host)s istemcisi üzerinde mantıksal sürücü oluşturma için yetersiz boş " "alan (istenen / kullanılabilir): %(requested)s/%(available)s" #, python-format msgid "" "Insufficient free space for volume creation. Total capacity is %(total).2f " "on host %(host)s." msgstr "" "Mantıksal sürücü oluşturma için yetersiz boş alan. %(host)s istemcisi " "üzerinde toplam kapasite %(total).2f." #, python-format msgid "Invalid IP address format '%s'" msgstr "Geçersiz IP adresi biçimi '%s'" #, python-format msgid "" "Invalid goodness result. Result must be between 0 and 100. Result " "generated: '%s' :: Defaulting to a goodness of 0" msgstr "" "Geçersiz iyilik sonucu. Sonuç 0 ve 100 arasında olmalı. Üretilen sonuç: " "'%s' :: İyilik değeri 0 olarak varsayılıyor" #, python-format msgid "Invalid trace flag: %s" msgstr "Geçersiz takip bayrağı: %s" msgid "" "It is not the recommended way to use drivers by NetApp. Please use " "NetAppDriver to achieve the functionality." msgstr "" "NetApp sürücüleri kullanmak önerilen bir yol değildir. Lütfen işlevselliğe " "erişmek için NetAppDriver kullanın." #, python-format msgid "LUN %(name)s is already expanded. Message: %(msg)s" msgstr "LUN %(name)s zaten genişletildi. İleti: %(msg)s" #, python-format msgid "LUN %(name)s is not ready for extension: %(out)s" msgstr "LUN %(name)s eklenti için hazır değil: %(out)s" #, python-format msgid "LUN %(name)s is not ready for snapshot: %(out)s" msgstr "LUN %(name)s anlık görüntü için hazır değil: %(out)s" #, python-format msgid "LUN already exists, LUN name %(name)s. Message: %(msg)s" msgstr "LUN zaten mevcut, LUN ismi %(name)s. İleti: %(msg)s" #, python-format msgid "" "LUN corresponding to %s is still in some Storage Groups.Try to bring the LUN " "out of Storage Groups and retry the deletion." msgstr "" "%s'e denk gelen LUN hala bazı Depolama Gruplarında. LUN'u Depolama " "Gruplarından çıkarmaya ve silmeyi tekrarlamaya çalışın." #, python-format msgid "LUN is already deleted, LUN name %(name)s. Message: %(msg)s" msgstr "LUN zaten silindi, LUN ismi %(name)s. İleti: %(msg)s" #, python-format msgid "" "LUN misalignment may occur for current initiator group %(ig_nm)s) with host " "OS type %(ig_os)s. Please configure initiator group manually according to " "the type of the host OS." msgstr "" "%(ig_os)s istemci OS türlü %(ig_nm)s) başlatıcı grubu için LUN yanlış " "hizalaması oluşabilir. Lütfen istemci OS türüne göre başlatıcı grubunu elle " "ayarlayın." #, python-format msgid "LUN with id %(remove_id)s is not present in cg %(cg_name)s, skip it." msgstr "%(remove_id)s kimlikli LUN %(cg_name)s cg de mevcut değil, atla." msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" "En az meşgul iSCSI bağlantı noktası bulunamadı, listedeki ilk iSCSI bağlantı " "noktası kullanılıyor." #, python-format msgid "" "Maximum number of Pool LUNs, %s, have been created. No more LUN creation can " "be done." msgstr "" "Azami Havuz LUN'ları sayısı, %s, oluşturuldu. Daha fazla LUN oluşturulamaz." #, python-format msgid "Message - %s." msgstr "İleti - %s." #, python-format msgid "" "Migration command may get network timeout. Double check whether migration in " "fact started successfully. Message: %(msg)s" msgstr "" "Göç komutu ağ zaman aşımı alabilir. Göçün başarılı başladığını iki kere " "kontrol edin. İleti: %(msg)s" #, python-format msgid "More than one valid preset was detected, using %s" msgstr "Birden fazla geçerli ön ayar algılandı, %s kullanılıyor" #, python-format msgid "Mount point %(name)s already exists. Message: %(msg)s" msgstr "Bağlantı noktası %(name)s zaten mevcut. İleti: %(msg)s" msgid "No VLUN contained CHAP credentials. Generating new CHAP key." msgstr "" "Hiçbir VLUN CHAP kimlik bilgileri içermiyor. Yeni CHAP anahtarı üretiliyor." msgid "No array serial number returned, set as unknown." msgstr "" "Herhangi bir dizi seri numarası dönmedi, bilinmeyen olarak ayarlanıyor." #, python-format msgid "No backing file found for %s, allowing snapshot to be deleted." msgstr "" "%s için destekleyen dosya bulunamadı, anlık görüntünün silinmesine izin " "veriliyor." #, python-format msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "%(name)s mantıksal sürücü/anlık görüntü için LUN tablosunda girdi yok." msgid "No host or VLUNs exist. Generating new CHAP key." msgstr "İstemci veya VLUN mevcut değil. Yeni CHAP anahtarı üretiliyor." msgid "No mapping." msgstr "Eşleştirme yok." #, python-format msgid "No port group found in masking view %(mv)s." msgstr "%(mv)s maskeleme görünümünde bağlantı noktası grubu bulunamadı." msgid "No protection domain name or id was specified in configuration." msgstr "Yapılandırmada herhangi bir koruma alan adı veya kimliği belirtilmedi." msgid "No shares found hence skipping ssc refresh." msgstr "Paylaşım bulunamadı ssc tazelemesi atlanıyor." #, python-format msgid "" "No storage group found. Performing rollback on Volume: %(volumeName)s To " "return it to the default storage group for FAST policy %(fastPolicyName)s." msgstr "" "Depolama grubu bulunamadı. %(fastPolicyName)s FAST ilkesi için varsayılan " "depolama grubuna döndürmek için Mantıksal sürücü: %(volumeName)s üzerinde " "geri döndürme yapılıyor." #, python-format msgid "No storage pool found with available capacity %s." msgstr "%s kullanılabilir kapasitesine sahip depolama havuzu bulunamadı." msgid "No storage pool name or id was found." msgstr "Depolama havuzu ismi veya kimliği bulunamadı." msgid "No such host alias name." msgstr "Böyle bir istemci rumuzu yok." #, python-format msgid "No target ports found in masking view %(maskingView)s." msgstr "" "%(maskingView)s maskeleme görünümünde hedef bağlantı noktası bulunamadı." #, python-format msgid "No weighed hosts found for volume with properties: %s" msgstr "" "Şu özelliklere sahip mantıksal sürücü için ağırlık verilmiş istemci " "bulunamadı: %s" msgid "Non-iSCSI VLUN detected." msgstr "iSCSI olmayan VLUN algılandı." #, python-format msgid "Not deleting key %s" msgstr "%s anahtarı silinmiyor" #, python-format msgid "Persistence file already exists for volume, found file at: %s" msgstr "" "Kalıcılık dosyası mantıksal sürücü için zaten mevcut, dosya şurada bulundu: " "%s" #, python-format msgid "" "Pre check for deletion. Volume: %(volumeName)s is part of a storage group. " "Attempting removal from %(storageGroupInstanceNames)s." msgstr "" "Silme için ön kontrol. Mantıksal sürücü: %(volumeName)s bir depolama " "grubunun parçası. %(storageGroupInstanceNames)s'den silme deneniyor." #, python-format msgid "" "Production use of \"%(backend)s\" backend requires the Cinder controller to " "have multipathing properly set up and the configuration option \"%(mpflag)s" "\" to be set to \"True\"." msgstr "" "\"%(backend)s\" in üretimde kullanılması Cinder kontrolcüsünün çokluyolunun " "düzgün ayarlanması ve yapılandırma seçeneği \"%(mpflag)s\" in \"True\" " "olarak ayarlanmasını gerektirir." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG backup " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "%(s_pid)s için kota aşıldı, %(s_size)sG yedek oluşturulmaya çalışıldı " "(%(d_consumed)dG / %(d_quota)dG zaten tüketilmiş)" #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" "%(s_pid)s için kota aşıldı, %(s_size)sG anlık görüntü oluşturulmaya " "çalışıldı (%(d_consumed)dG / %(d_quota)dG zaten tüketildi)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "%(s_pid)s için kota aşıldı, %(s_size)sG mantıksal sürücü oluşturulmaya " "çalışıldı (%(d_consumed)dG / %(d_quota)dG zaten tüketilmiş)" #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" "%(s_pid)s için kota aşıldı, %(s_size)sG mantıksal sürücü oluşturulmaya " "çalışıldı - (%(d_consumed)dG / %(d_quota)dG zaten kullanılmış)" #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create backups (%(d_consumed)d " "backups already consumed)" msgstr "" "%(s_pid)s için kota aşıldı, yedek oluşturulmaya çalışıldı (%(d_consumed)d " "yedek zaten tüketilmiş)" #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " "snapshots already consumed)." msgstr "" "%(s_pid)s için kota aşıldı, anlık görüntü oluşturulmaya çalışıldı " "(%(d_consumed)d anlık görüntü zaten tüketilmiş)." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d volumes " "already consumed)" msgstr "" "%(s_pid)s için kota aşıldı, mantıksal sürücü oluşturulmaya çalışıldı " "(%(d_consumed)d mantıksal sürücü zaten tüketilmiş)" #, python-format msgid "" "RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " "backup metadata." msgstr "" "%(volume)s mantıksal sürücüsü için %(backup)s yedeği için RBD imajı " "bulunamadı. Metadata yedeği siliniyor." #, python-format msgid "Rename failure in cleanup of cDOT QOS policy group %(name)s: %(ex)s" msgstr "" "cDOT QOS ilke grubu %(name)s temizliğinde yeniden adlandırma hatası: %(ex)s" #, python-format msgid "" "Report interval must be less than service down time. Current config " "service_down_time: %(service_down_time)s, report_interval for this: service " "is: %(report_interval)s. Setting global service_down_time to: " "%(new_down_time)s" msgstr "" "Rapor aralığı servisin kapalı kaldığı süreden küçük olmalı. Mevcut " "service_down_time yapılandırması: %(service_down_time)s, bu servis için " "report_interval: %(report_interval)s. Genel service_down_time: " "%(new_down_time)s olarak ayarlanıyor" msgid "Requested image is not accessible by current Tenant." msgstr "İstenen imaj mevcut Kiracı tarafından erişilebilir değil." msgid "Returning as clean tmp vol job already running." msgstr "Geçici mantıksal sürücü temizleme işi hala çalıştığından dönülüyor." #, python-format msgid "See unavailable iSCSI target: %s" msgstr "Kullanılamaz iSCSI hedefine bak: %s" msgid "Silent failure of target removal detected, retry...." msgstr "Hedef silmenin sessizce başarısız olduğu algılandı, tekrar dene..." #, python-format msgid "Snapshot %(name)s already exists. Message: %(msg)s" msgstr "Anlık görüntü %(name)s zaten mevcut. İleti: %(msg)s" #, python-format msgid "" "Snapshot %(name)s for consistency group does not exist. Message: %(msg)s" msgstr "" "Tutarlılık grubu için %(name)s anlık görüntüsü mevcut değil. İleti: %(msg)s" #, python-format msgid "Snapshot %(name)s is in use, retry. Message: %(msg)s" msgstr "Anlık görüntü %(name)s kullanımda, tekrar dene. İleti: %(msg)s" #, python-format msgid "Snapshot %(name)s may deleted already. Message: %(msg)s" msgstr "Anlık görüntü %(name)s zaten silinmiş olabilir. İleti: %(msg)s" #, python-format msgid "" "Snapshot %(snapname)s is attached to snapshot mount point %(mpname)s " "already. Message: %(msg)s" msgstr "" "Anlık görüntü %(snapname)s anlık görüntü bağlantı noktası %(mpname)s'e zaten " "bağlı. İleti: %(msg)s" #, python-format msgid "Snapshot %s already deleted." msgstr "Anlık görüntü %s zaten silinmiş." #, python-format msgid "Snapshot still %(status)s Cannot delete snapshot." msgstr "Anlık görüntü hala %(status)s Anlık görüntü silinemiyor." #, python-format msgid "Start migration failed. Message: %s" msgstr "Göç başlatma başarısız. İleti: %s" #, python-format msgid "Storage Group %s is not found." msgstr "Depolama Grubu %s bulunamadı." #, python-format msgid "Storage Group %s is not found. Create it." msgstr "Depolama Grubu %s bulunamadı. Oluştur." #, python-format msgid "Storage Group %s is not found. terminate_connection() is unnecessary." msgstr "Depolama Grubu %s bulunamadı. terminate_connection() gereksiz." #, python-format msgid "Storage Pool '%(pool)s' is '%(state)s'." msgstr "Depolama Havuzu '%(pool)s' '%(state)s'." #, python-format msgid "Storage group %(name)s already exists. Message: %(msg)s" msgstr "Depolama grubu %(name)s zaten mevcut. İleti: %(msg)s" #, python-format msgid "" "Storage group %(name)s doesn't exist, may have already been deleted. " "Message: %(msg)s" msgstr "" "Depolama grubu %(name)s mevcut değil, zaten silinmiş olabilir. İleti: %(msg)s" #, python-format msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." msgstr "" "%(storageSystem)s üzerindeki %(target)s hedefi için depolama eş zamanlama " "ismi bulunamadı." msgid "Storage-assisted migration failed during retype." msgstr "Depolama-destekli göç retype sırasında başarısız oldu." msgid "The MCS Channel is grouped." msgstr "MCS Kanalı gruplandırılmış." #, python-format msgid "" "The NAS file operations will be run as root: allowing root level access at " "the storage backend. This is considered an insecure NAS environment. Please " "see %s for information on a secure NAS configuration." msgstr "" "NAS dosya işlemleri root olarak çalıştırılacak: depolama arka ucunda root " "düzeyinde erişime izin verilecek. Bu güvensiz bir NAS ortamı olarak kabul " "edilir. Güvenli bir NAS yapılandırması için lütfen %s'e göz atın." #, python-format msgid "" "The NAS file permissions mode will be 666 (allowing other/world read & write " "access). This is considered an insecure NAS environment. Please see %s for " "information on a secure NFS configuration." msgstr "" "NAS dosya izinleri kipi 666 olacak (diğer/dünya'ya okuma&yazma izni verir). " "Bu güvensiz bir NAS ortamı olarak kabul edilir. Güvenli bir NFS " "yapılandırması için lütfen %s'e göz atın." msgid "" "The VMAX plugin only supports Retype. If a pool based migration is necessary " "this will happen on a Retype From the command line: cinder --os-volume-api-" "version 2 retype --migration-policy on-demand" msgstr "" "VMAX eklentisi yalnızca Retype destekler. Eğer havuz tabanlı göç gerekliyse " "bu komut satırından bir Retype üzerinde gerçekleşir: cinder --os-volume-api-" "version 2 retype --migration-policy on-demand" #, python-format msgid "" "The following specified storage pools do not exist: %(unexist)s. This host " "will only manage the storage pools: %(exist)s" msgstr "" "Belirtilen şu depolama havuzları mevcut değil: %(unexist)s. İstemci yalnızca " "şu depolama havuzlarını yönetecek: %(exist)s" #, python-format msgid "The provisioning: %(provisioning)s is not valid." msgstr "Hazırlık: %(provisioning)s geçerli değil." #, python-format msgid "" "The source volume is a legacy volume. Create volume in the pool where the " "source volume %s is created." msgstr "" "Kaynak mantıksal sürücü eski bir mantıksal sürücü. Mantıksal sürücüyü %s " "kaynak mantıksal sürücüsünün oluşturulduğu yerde oluştur." #, python-format msgid "The specified Snapshot mount point %s is not currently attached." msgstr "Belirtilen Anlık Görüntü bağlama noktası %s şu an eklenmiş değil." #, python-format msgid "" "The user does not have access or sufficient privileges to use all netapp " "APIs. The following extra_specs will fail or be ignored: %s" msgstr "" "Kullanıcının tüm netapp API'lerini kullanmaya erişimi ya da yeterli izni " "yok. Şu ek_özellikler başarısız olacak ya da atlanacak: %s" #, python-format msgid "" "The volume: %(volumename)s was not first part of the default storage group " "for FAST policy %(fastPolicyName)s." msgstr "" "Mantıksal sürücü: %(volumename)s FAST ilkesi %(fastPolicyName)s için " "varsayılan depolama grubunun ilk bölümü değildi." #, python-format msgid "" "The volume: %(volumename)s. was not first part of the default storage group " "for FAST policy %(fastPolicyName)s." msgstr "" "Mantıksal sürücü: %(volumename)s. %(fastPolicyName)s FAST ilkesi için " "varsayılan depolama grubunun ilk bölümü değildi." #, python-format msgid "" "There are no datastores matching new requirements; can't retype volume: %s." msgstr "" "Yeni gereksinimlerle eşleşen veri deposu yok; mantıksal sürücü retype " "edilemiyor: %s." #, python-format msgid "Trying to boot from an empty volume: %s." msgstr "Boş bir mantıksal sürücüden ön yükleme yapılmaya çalışılıyor: %s." #, python-format msgid "Unable to create folder %s" msgstr "%s dizini oluşturulamadı" #, python-format msgid "Unable to create snapshot %s" msgstr "%s anlık görüntüsü oluşturulamadı" #, python-format msgid "Unable to delete Protection Group Snapshot: %s" msgstr "Koruma Grubu Anlık Görüntüsü silinemiyor: %s" #, python-format msgid "Unable to delete Protection Group: %s" msgstr "Koruma Grubu silinemiyor: %s" #, python-format msgid "Unable to delete space %(space)s" msgstr "%(space)s alanı silinemedi" #, python-format msgid "Unable to fetch datastores connected to host %s." msgstr "%s istemcisine bağlı veri depoları getirilemedi." #, python-format msgid "Unable to find Masking view: %(view)s." msgstr "Maskeleme görünümü bulunamadı: %(view)s." #, python-format msgid "Unable to find snapshot %s" msgstr "%s anlık görüntüsü bulunamadı" msgid "Unable to get rados pool stats." msgstr "Rados havuz istatistikleri alınamıyor." msgid "Unable to get storage tiers from tier policy rule." msgstr "Depolama aşamaları aşama ilke kuralından alınamıyor." #, python-format msgid "Unable to locate volume:%s" msgstr "Mantıksal sürücü:%s bulunamadı" msgid "Unable to poll cluster free space." msgstr "Küme boş alanı çekilemedi." #, python-format msgid "Unable to update host type for host with label %(l)s. %(e)s" msgstr "%(l)s etiketli istemci için istemci türü güncellenemiyor. %(e)s" #, python-format msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" "İlklendirilmemiş Mantıksal Sürücü Grubu üzerinde istatistikler " "güncellenemedi: %s" #, python-format msgid "Unexpected exception during image cloning in share %s" msgstr "%s paylaşımında imaj kopyalanırken beklenmedik istisna" msgid "Unexpected exception while listing used share." msgstr "Kullanılan paylaşım listelenirken beklenmedik istisna." msgid "Unexpected exception while short listing used share." msgstr "Kullanılan paylaşım kısaca listelenirken beklenmedik istisna." #, python-format msgid "Update driver status failed: %(config_group)s is uninitialized." msgstr "Sürücü durumu güncelleme başarısız: %(config_group)s ilklendirilmemiş." msgid "Verify certificate is not set, using default of False." msgstr "" "Sertifika doğrulama ayarlanmamış, varsayılan değer olan False kullanılıyor." #, python-format msgid "Volume %(vol)s was not in Storage Group %(sg)s." msgstr "Mantıksal sürücü %(vol)s Depolama Grubu %(sg)s'de değildi." #, python-format msgid "Volume %(volume)s is not in any masking view." msgstr "Mantıksal sürücü %(volume)s herhangi bir maskeleme görünümünde değil." #, python-format msgid "" "Volume %(volumeName)s was not first part of the default storage group for " "the FAST Policy." msgstr "" "Mantıksal sürücü %(volumeName)s FAST İlkesi için varsayılan depolama " "grubunun ilk bölümü değildi." #, python-format msgid "Volume %(volume_id)s already deleted." msgstr "Mantıksal sürücü %(volume_id)s zaten silinmiş." #, python-format msgid "Volume %(volume_id)s cannot be retyped because it has snapshot." msgstr "" "Mantıksal sürücü %(volume_id)s retype edilemez çünkü anlık görüntüsü var." #, python-format msgid "Volume %(volume_id)s cannot be retyped during attachment." msgstr "Mantıksal sürücü %(volume_id)s ekleme sırasında retype edilemez." #, python-format msgid "Volume %s does not exist." msgstr "Mantıksal sürücü %s mevcut değil." #, python-format msgid "Volume %s does not have provider_location specified, skipping" msgstr "Mantıksal sürücü %s provider_location belirtmemiş, atlanıyor" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "Mantıksal sürücü %s provider_location belirtmemiş, atlanıyor." #, python-format msgid "Volume %s is not found!, it may have been deleted." msgstr "Mantıksal sürücü %s bulunamadı!, silinmiş olabilir." #, python-format msgid "Volume %s was not found while trying to delete it." msgstr "Mantıksal sürücü %s silinmeye çalışılırken bulunamadı." #, python-format msgid "" "Volume : %(volumeName)s is not currently belonging to any storage group." msgstr "" "Mantıksal sürücü : %(volumeName)s şu an herhangi bir depolama grubuna ait " "değil." #, python-format msgid "Volume copy job for src vol %s not found." msgstr "" "%s kaynak mantıksal sürücüsü için mantıksal sürücü kopyalama işi bulunamadı." #, python-format msgid "Volume deletion failed with message: %s" msgstr "Mantıksal sürücü silme şu iletiyle başarısız oldu: %s" #, python-format msgid "Volume initialization failure. (%s)" msgstr "Mantıksal sürücü ilklendirme başarısız. (%s)" #, python-format msgid "Volume path %s does not exist, nothing to remove." msgstr "%s mantıksal sürücü yolu mevcut değil, kaldırılacak bir şey yok." msgid "Volume refresh job already running. Returning..." msgstr "Mantıksal sürücü tazeleme işi zaten çalışıyor. Dönülüyor..." #, python-format msgid "Volume still %(status)s Cannot delete volume." msgstr "Mantıksal sürücü hala %(status)s Mantıksal sürücü silinemez." msgid "Volume type will be changed to be the same as the source volume." msgstr "" "Mantıksal sürücü türü kaynak mantıksal sürücüyle aynı olacak şekilde " "değiştirilecek." #, python-format msgid "" "Volume: %(volumeName)s Does not belong to storage group %(defaultSgName)s." msgstr "" "Mantıksal sürücü: %(volumeName)s %(defaultSgName)s depolama grubuna ait " "değil." #, python-format msgid "" "Volume: %(volumeName)s is already part of storage group %(sgGroupName)s." msgstr "" "Mantıksal sürücü: %(volumeName)s zaten %(sgGroupName)s depolama grubunun " "parçası." #, python-format msgid "Volume: %(volumeName)s is not currently belonging to any storage group." msgstr "" "Mantıksal sürücü: %(volumeName)s şu an herhangi bir depolama grubuna ait " "değil." #, python-format msgid "Volume: %s is in use, can't retype." msgstr "Mantıksal sürücü: %s hala kullanımda, retype yapılamaz." #, python-format msgid "_get_vdisk_map_properties: Did not find a preferred node for vdisk %s." msgstr "" "_get_vdisk_map_properties: %s vdisk'i için tercih edilen bir düğüm " "bulunamadı." #, python-format msgid "_migrate_cleanup on : %(volumeName)s." msgstr "%(volumeName)s üzerinde _migrate_cleanup." #, python-format msgid "_migrate_rollback on : %(volumeName)s." msgstr "%(volumeName)s üzerinde _migrate_rollback." msgid "_remove_device: invalid properties or device." msgstr "_remove_device: geçersiz özellik ya da aygıt." #, python-format msgid "" "_unmap_vdisk_from_host: Multiple mappings of volume %(vdisk_name)s found, no " "host specified." msgstr "" "_unmap_vdisk_from_host: %(vdisk_name)s mantıksal sürücüsünün birden çok " "eşleşmesi bulundu, istemci belirtilmedi." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" "_unmap_vdisk_from_host: %(vol_name)s mantıksal sürücüsünün hiçbir istemciye " "eşleşmesi bulunamadı." msgid "" "config option keymgr.fixed_key has not been defined: some operations may " "fail unexpectedly" msgstr "" "keymgr.fixed_key yapılandırma seçeneği tanımlanmamış. bazı işlemler " "beklenmedik şekilde başarısız olabilir" #, python-format msgid "delete_volume: unable to find volume %s" msgstr "delete_volume: %s mantıksal sürücüsü bulunamadı" msgid "" "destroy_empty_storage_group: True. Empty storage group will be deleted after " "volume is detached." msgstr "" "destroy_empty_storage_group: True. Boş depolama grubu mantıksal sürücü " "ayrıldıktan sonra silinecek." msgid "flush() not supported in this version of librbd" msgstr "flush() librbd'nin bu sürümünde desteklenmiyor" msgid "force_delete_lun_in_storagegroup=True" msgstr "force_delete_lun_in_storagegroup=True" #, python-format msgid "get_evs: %(out)s -- No find for %(fsid)s" msgstr "get_evs: %(out)s -- %(fsid)s için bulgu yok" #, python-format msgid "get_fsid: %(out)s -- No info for %(fslabel)s" msgstr "get_fsid: %(out)s -- %(fslabel)s için bilgi yok" msgid "" "glance_num_retries shouldn't be a negative value. The number of retries will " "be set to 0 until this iscorrected in the cinder.conf." msgstr "" "glance_num_retries negatif bir değer olmamalı. Bu cinder.conf'da düzeltilene " "kadar tekrar deneme sayıları 0 olarak ayarlanacak." msgid "" "ignore_pool_full_threshold: True. LUN creation will still be forced even if " "the pool full threshold is exceeded." msgstr "" "ignore_pool_full_threshold: True. Havuz dolu eşiği aşılsa bile LUN oluşturma " "zorlanacak." #, python-format msgid "initialize_connection: Did not find a preferred node for volume %s." msgstr "" "initialize_connection: %s mantıksal sürücüsü için tercih edilen düğüm " "bulunamadı." #, python-format msgid "ldev(%(ldev)d) is already mapped (hlun: %(hlu)d)" msgstr "ldev(%(ldev)d) zaten eşleştirilmiş (hlun: %(hlu)d)" #, python-format msgid "object %(key)s of type %(typ)s not found, %(err_msg)s" msgstr "%(typ)s türündeki %(key)s nesnesi bulunamadı, %(err_msg)s" msgid "qemu-img is not installed." msgstr "qemu-img kurulu değil." msgid "refresh stale ssc job in progress. Returning... " msgstr "vadesi geçmiş ssc işi tazeleme sürüyor. Dönülüyor... " msgid "san_secondary_ip is configured as the same value as san_ip." msgstr "san_secondary_ip san_ip ile aynı değer olarak yapılandırılmış." #, python-format msgid "snapshot: %s not found, skipping delete operation" msgstr "anlık görüntü: %s bulunamadı, silme işlemi atlanıyor" #, python-format msgid "snapshot: %s not found, skipping delete operations" msgstr "anlık görüntü: %s bulunamadı, silme işlemleri atlanıyor" msgid "ssc job in progress. Returning... " msgstr "ssc işi sürüyor. Dönülüyor... " msgid "terminate_conn: provider location empty." msgstr "terminate_conn: sağlayıcı konumu boş." msgid "terminate_connection: lun map not found" msgstr "terminate_connection: lun eşleştirmesi bulunamadı" #, python-format msgid "" "unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no host " "specified." msgstr "" "unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünün birden fazla " "eşleşmesi var, istemci belirtilmedi." #, python-format msgid "" "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" "unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünün hiçbir istemciye " "eşleşmesi bulunamadı." #, python-format msgid "" "unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host)s " "found." msgstr "" "unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünden %(host)s " "istemcisine eşleştirme bulunamadı." #, python-format msgid "volume service is down. (host: %s)" msgstr "mantıksal sürücü servisi çalışmıyor. (host: %s)" msgid "volume_tmp_dir is now deprecated, please use image_conversion_dir." msgstr "" "volume_tmp_dir artık kullanılmıyor, lütfen image_conversion_dir kullanın." #, python-format msgid "warning: Tried to delete vdisk %s but it does not exist." msgstr "uyarı: vdisk %s silinmeye çalışıldı ama mevcut değil." #, python-format msgid "" "zfssa_initiator: %(ini)s wont be used on zfssa_initiator_group= %(inigrp)s." msgstr "" "zfssa_initiator: %(ini)s zfssa_initiator_group= %(inigrp)s üzerinde " "kullanılmayacak." msgid "" "zfssa_initiator_config not found. Using deprecated configuration options." msgstr "" "zfssa_initiator_config bulunamadı. Artık kullanılmayan yapılandırma " "seçenekleri kullanılıyor." cinder-8.0.0/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po0000664000567000056710000022776012701406250024775 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Andreas Jaeger , 2015. #zanata # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-16 08:22+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "\t%(name)-35s : %(value)s" msgstr "\t%(name)-35s : %(value)s" #, python-format msgid "\t%(param)-35s : %(value)s" msgstr "\t%(param)-35s : %(value)s" #, python-format msgid "\t%(prefix)-35s : %(version)s" msgstr "\t%(prefix)-35s : %(version)s" #, python-format msgid "\t%(request)-35s : %(value)s" msgstr "\t%(request)-35s : %(value)s" #, python-format msgid "" "\n" "\n" "\n" "\n" "Request URL: %(url)s\n" "\n" "Call Method: %(method)s\n" "\n" "Request Data: %(data)s\n" "\n" "Response Data:%(res)s\n" "\n" msgstr "" "\n" "\n" "\n" "\n" "İstek URL'si: %(url)s\n" "\n" "İstek Metodu: %(method)s\n" "\n" "İstek Verisi: %(data)s\n" "\n" "Yanıt Verisi:%(res)s\n" "\n" #, python-format msgid "%(element)s: %(val)s" msgstr "%(element)s: %(val)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s hata döndürdü: %(e)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s HTTP %(status)d ile geri döndü" #, python-format msgid "%(volume)s assign type fibre_channel, properties %(properties)s" msgstr "%(volume)s atama türü fibre_channel, özellikler %(properties)s" #, python-format msgid "%s is already umounted" msgstr "%s zaten ayrılmış" #, python-format msgid "3PAR driver cannot perform migration. Retype exception: %s" msgstr "3PAR sürücüsü göçü gerçekleştiremiyor. Retype istisnası: %s" #, python-format msgid "3PAR vlun %(name)s not found on host %(host)s" msgstr "3PAR vlun %(name)s %(host)s istemcisinde bulunamadı" #, python-format msgid "" "3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " "deleted because: %(reason)s" msgstr "" "'%(name)s' mantıksal sürücüsü için 3PAR vlun silindi, ama '%(host)s' " "istemcisi silinmedi çünkü: %(reason)s" #, python-format msgid "AUTH properties: %(authProps)s" msgstr "AUTH özellikleri: %(authProps)s" #, python-format msgid "AUTH properties: %s." msgstr "AUTH özellikleri: %s." #, python-format msgid "Accepting transfer %s" msgstr "%s aktarımı kabul ediliyor" msgid "Activate Flexvisor cinder volume driver." msgstr "Flexvisor cinder mantıksal sürücü sürücüsünü etkinleştir." #, python-format msgid "Add volume response: %s" msgstr "Mantıksal sürücü ekleme yanıtı: %s" #, python-format msgid "Added %s to cg." msgstr "%s cg'ye eklendi." #, python-format msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." msgstr "" "Mantıksal sürücü: %(volumeName)s mevcut depolama grubuna %(sgGroupName)s " "eklendi." #, python-format msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" msgstr "" "%(igrp)s başlatıcı grup ismine sahip mantıksal sürücüye=%(vol)s ACL ekleniyor" #, python-format msgid "" "Adding volume: %(volumeName)s to default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Mantıksal sürücü: %(volumeName)s FAST ilkesi: %(fastPolicyName)s için " "varsayılan depolama grubuna ekleniyor." #, python-format msgid "Adding volumes to cg %s." msgstr "Mantıksal sürücüler cg %s'e ekleniyor" msgid "Attach volume completed successfully." msgstr "Mantıksal sürücü ekleme başarıyla tamamlandı." msgid "Availability Zones retrieved successfully." msgstr "Kullanılabilir Bölgeler başarıyla alındı." #, python-format msgid "Available services: %s" msgstr "Kullanılabilir servisler: %s" #, python-format msgid "Backend name is %s." msgstr "Arka uç ismi %s." #, python-format msgid "Backend type: %s" msgstr "Arka uç türü: %s" #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "Destekleyen VM: %(backing)s %(new_name)s olarak yeniden adlandırıldı." msgid "Backing not available, no operation to be performed." msgstr "Destek kullanılabilir değil, hiçbir işlem yapılmayacak." #, python-format msgid "Backing not found, creating for volume: %s" msgstr "Destek bulunamadı, mantıksal sürücü: %s için oluşturuluyor" #, python-format msgid "" "Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " "skipping base image delete." msgstr "" "%(volume)s mantıksal sürücüsünün yedek taban imajı hala %(snapshots)s anlık " "görüntüye sahip bu yüzden taban imaj silme atlanıyor." #, python-format msgid "" "Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " "in %(delay)ss." msgstr "" "%(volume)s mantıksal sürücüsünün yedek imajı meşgul, %(delay)ss içinde " "%(retries)s kere tekrar deneniyor." #, python-format msgid "Backup service: %s." msgstr "Yedek servisi: %s." #, python-format msgid "Bandwidth limit is: %s." msgstr "Bant genişliği sınırı: %s." #, python-format msgid "Begin backup of volume %s." msgstr "Mantıksal sürücü %s yedeğine başla." msgid "Begin detaching volume completed successfully." msgstr "Mantıksal sürücünün ayrılmasına başlanması başarıyla tamamlandı." #, python-format msgid "CONCERTO version: %s" msgstr "CONCERTO sürümü: %s" #, python-format msgid "Cancelling Migration from LUN %s." msgstr "LUN %s'den Göç iptal ediliyor" #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " "exists in different management group." msgstr "" "Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü küme " "farklı bir yönetim grubunda." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has been exported." msgstr "" "Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " "sürücü dışa aktarılmış." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has snapshots." msgstr "" "Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " "sürücü anlık görüntülere sahip." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume does " "not exist in this management group." msgstr "" "Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " "sürücü bu yönetim grubunda değil." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume is " "from a different backend." msgstr "" "Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " "sürücü başka bir arka uçta." #, python-format msgid "Cgsnapshot %s: creating." msgstr "Cgsnapshot %s: oluşturuluyor." #, python-format msgid "Change volume capacity request: %s." msgstr "Mantıksal sürücü kapasite isteğini değiştir: %s." #, python-format msgid "Checking image clone %s from glance share." msgstr "İmaj klonu %s glance paylaşımından kontrol ediliyor." #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "" "\"%(cr)s\" mevcut yoluna sahip Cinder NFS mantıksal sürücüsü artık " "yönetilmiyor." msgid "Cinder secure environment indicator file exists." msgstr "Cinder güvenli ortam göstergesi dosyası mevcut." #, python-format msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" msgstr "CiscoFCZoneDriver - I-T haritası için bağlantı ekle: %s" #, python-format msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" msgstr "CiscoFCZoneDriver - I-T haritası için bağlantı sil: %s" #, python-format msgid "Cleaning cache for share %s." msgstr "%s paylaşımı için zula temizleniyor." msgid "Cleaning up incomplete backup operations." msgstr "Tamamlanmamış yedek işlemleri temizleniyor." #, python-format msgid "Cloning from cache to destination %s" msgstr "Zuladan %s hedefine klonlanıyor" #, python-format msgid "Cloning from snapshot to destination %s" msgstr "Anlık görüntüden %s hedefine klonlanıyor" #, python-format msgid "Cloning image %s from cache" msgstr "%s imajı zuladan klonlanıyor" #, python-format msgid "Cloning image %s from snapshot." msgstr "İmaj %s anlık görüntüden klonlanıyor." #, python-format msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "Mantıksal sürücü %(src)s %(dst)s mantıksal sürücüsüne klonlanıyor" #, python-format msgid "" "Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" msgstr "" "volume_name %(vname)s clone_name %(cname)s export_path %(epath)s ile " "klonlanıyor" #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "CloudByte API'si [%s] komutu için başarıyla çalıştırıldı." msgid "Complete-Migrate volume completed successfully." msgstr "Mantıksal sürücü göçü-tamamlama başarıyla tamamlandı." #, python-format msgid "Completed: convert_to_base_volume: id=%s." msgstr "Tamamlandı: convert_to_base_volume: id=%s." #, python-format msgid "Configured pools: %s" msgstr "Yapılandırılan havuzlar: %s" #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " "%(properties)s" msgstr "" "Bağlantı ilklendirme bilgisi: {driver_volume_type: fibre_channel, veri: " "%(properties)s" #, python-format msgid "Connecting to host: %s." msgstr "İstemciye bağlanılıyor: %s." #, python-format msgid "Connector returning fcnsinfo-%s" msgstr "Bağlayıcı fcnsinfo-%s döndürüyor" #, python-format msgid "Consistency group %s was deleted successfully." msgstr "Tutarlılık grubu %s başarıyla silindi." #, python-format msgid "Consistency group %s: created successfully" msgstr "Tutarlılık grubu %s: başarıyla oluşturuldu" #, python-format msgid "Consistency group %s: creating" msgstr "Tutarlılık grubu %s: oluşturuluyor" #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "%(sz).2f MB imaj %(mbps).2f MB/s hızda dönüştürüldü" #, python-format msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." msgstr "" "%(img)s imajı %(vol)s mantıksal sürücüsüne başkasına yükleme iş akışıyla " "kopyalandı." #, python-format msgid "Copied image to volume %s using regular download." msgstr "Normal indirme kullanılarak imaj %s mantıksal sürücüsüne kopyalandı." #, python-format msgid "Copy job to dest vol %s completed." msgstr "%s hedef mantıksal sürücüsüne kopyalama işi tamamlandı." msgid "Copy volume to image completed successfully." msgstr "Mantıksal sürücüyü imaja kopyalama başarıyla tamamlandı." #, python-format msgid "Copying src vol %(src)s to dest vol %(dst)s." msgstr "" "%(src)s kaynak mantıksal sürücüsü %(dst)s hedef mantıksal sürücüsüne " "kopyalanıyor." #, python-format msgid "Could not find replica to delete of volume %(vol)s." msgstr "%(vol)s mantıksal sürücüsünün silimi için kopya bulunamadı." #, python-format msgid "Could not run dpkg-query command: %(msg)s." msgstr "dpkg-query komutu çalıştırılamadı: %(msg)s." #, python-format msgid "Could not run rpm command: %(msg)s." msgstr "Rpm komutu çalıştırılamadı: %(msg)s." #, python-format msgid "" "Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" msgstr "" "mmchattr'ye sahip depolama havuzu %(pool)s'e güncellenemedi, hata: %(error)s" #, python-format msgid "" "Couldn't find destination volume %(vol)s in the database. The entry might be " "successfully deleted during migration completion phase." msgstr "" "Hedef mantıksal sürücü %(vol)s veri tabanında bulunamadı. Girdi göç " "tamamlama aşamasında başarıyla silinmiş olabilir." #, python-format msgid "" "Couldn't find the temporary volume %(vol)s in the database. There is no need " "to clean up this volume." msgstr "" "Geçici mantıksal sürücü %(vol)s veri tabanında bulunamadı. Bu mantıksal " "sürücüyü temizlemeye gerek yok." #, python-format msgid "Create Cloned Volume %(volume_id)s completed." msgstr "%(volume_id)s Mantıksal sürücüsü klonlama bitti." #, python-format msgid "Create Consistency Group: %(group)s." msgstr "Tutarlılık Grubu Oluştur: %(group)s." #, python-format msgid "Create Volume %(volume_id)s completed." msgstr "%(volume_id)s Mantıksal sürücüsü oluşturuldu." #, python-format msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" "%(snapshot_id)s anlık görüntüsünden %(volume_id)s mantıksal sürücüsü " "oluşturma tamamlandı." #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " "%(sourceName)s." msgstr "" "Mantıksal sürücüden bir kopya oluştur: Kopya Mantıksal Sürücü: %(cloneName)s " "Kaynak Mantıksal Sürücü: %(sourceName)s." #, python-format msgid "Create backup finished. backup: %s." msgstr "Yedek oluşturma bitti. yedek: %s." #, python-format msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Yedek oluşturma başlatıldı, yedek: %(backup_id)s mantıksal sürücü: " "%(volume_id)s." msgid "Create consistency group completed successfully." msgstr "Tutarlılık grubu oluşturma başarıyla tamamlandı." #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "%(volume_id)s Mantıksal sürücüsünden dışa aktarma oluşturma yapıldı." msgid "Create snapshot completed successfully" msgstr "Anlık görüntü oluşturma başarıyla tamamlandı" #, python-format msgid "" "Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Tutarlılık Grubu %(cgId)s cgsnapshotID: %(cgsnapshot)s için anlık görüntü " "oluştur." #, python-format msgid "Create snapshot from volume %s" msgstr "%s biriminden sistem görüntüsü oluşturuluyor" #, python-format msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "Anlık görüntü oluştur: %(snapshot)s: mantıksal sürücü: %(volume)s" #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " "%(raid_snapshot_id)s, volume: %(volume)s." msgstr "" "Oluşturma başarılı. Anlık görüntü: %(snapshot)s, Raid'deki anlık görüntü " "ID'si: %(raid_snapshot_id)s, mantıksal sürücü: %(volume)s." #, python-format msgid "Create target consistency group %(targetCg)s." msgstr "Hedef tutarlılık grubu %(targetCg)s oluştur." #, python-format msgid "Create volume of %s GB" msgstr "%s GB'lık birim oluştur" #, python-format msgid "" "Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]." msgstr "" "CloudByte anlık görüntüsü [%(cb_snap)s] w.r.t CloudByte mantıksal sürücüsü " "[%(cb_vol)s] ve OpenStack mantıksal sürücüsü [%(stack_vol)s] oluşturuldu." #, python-format msgid "Created Consistency Group %s" msgstr "Tutarlılık Grubu %s oluşturuldu" #, python-format msgid "Created datastore folder: %s." msgstr "Veri deposu dizini oluşturuldu: %s." #, python-format msgid "" "Created multi-attach E-Series host group %(label)s with clusterRef " "%(clusterRef)s" msgstr "" "clusterRef %(clusterRef)s ile çoklu-ekleme E-serisi istemci grubu %(label)s " "oluşturuldu" #, python-format msgid "Created new initiator group name: %(igGroupName)s." msgstr "Yeni başlatıcı grubu ismi oluşturuldu: %(igGroupName)s." #, python-format msgid "Created new masking view : %(maskingViewName)s." msgstr "Yeni maskeleme görünümü oluşturuldu : %(maskingViewName)s." #, python-format msgid "Created new storage group: %(storageGroupName)s." msgstr "Yeni depolama grubu oluşturuldu: %(storageGroupName)s." #, python-format msgid "Created snap grp with label %s." msgstr "%s etiketli anlık görüntü grubu oluşturuldu." #, python-format msgid "Created volume %(instanceId)s: %(name)s" msgstr "%(instanceId)s mantıksal sürücüsü oluşturuldu: %(name)s" #, python-format msgid "Created volume %(volname)s, volume id %(volid)s." msgstr "" "Mantıksal sürücü %(volname)s oluşturuldu, mantıksal sürücü kimliği %(volid)s." msgid "Created volume successfully." msgstr "Mantıksal sürücü başarıyla tamamlandı." #, python-format msgid "Created volume with label %s." msgstr "%s etiketine sahip mantıksal sürücü oluşturuldu." #, python-format msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" "%(container)s kabındaki %(volume_id)s mantıksal sürücüsünün yedeği " "oluşturuluyor" #, python-format msgid "Creating cgsnapshot %(name)s." msgstr "Cgsnapshot %(name)s oluşturuluyor." #, python-format msgid "Creating clone of volume: %s" msgstr "Mantıksal sürücü klonu oluşturuluyor: %s" #, python-format msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." msgstr "%(name)s tutarlılık grubu %(snap)s cgsnapshot'dan oluşturuluyor." #, python-format msgid "Creating consistency group %(name)s." msgstr "%(name)s tutarlılık grubu oluşturuluyor." #, python-format msgid "Creating host object %(host_name)r with IQN: %(iqn)s." msgstr "IQN: %(iqn)s e sahip istemci nesnesi %(host_name)r oluşturuluyor." #, python-format msgid "Creating host object %(host_name)r with WWN: %(wwn)s." msgstr "WWN: %(wwn)s'e sahip %(host_name)r istemci nesnesi oluşturuluyor." #, python-format msgid "Creating host with ports %s." msgstr "%s bağlantı noktasına sahip istemci oluşturuluyor." #, python-format msgid "Creating image snapshot %s" msgstr "İmaj anlık görüntüsü %s oluşturuluyor" #, python-format msgid "Creating initiator group %(grp)s with initiator %(iname)s" msgstr "Başlatıcı grubu %(grp)s %(iname)s başlatıcısı ile oluşturuluyor" #, python-format msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" msgstr "Başlatıcı grubu %(igrp)s bir başlatıcı ile oluşturuluyor %(iname)s" #, python-format msgid "Creating iscsi_target for volume: %s" msgstr "Mantıksal sürücü: %s için iscsi_target oluşturuluyor" #, python-format msgid "" "Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " "snap_description=%(desc)s" msgstr "" "volume_name=%(vol)s snap_name=%(name)s snap_description=%(desc)s için anlık " "görüntü oluşturuluyor" #, python-format msgid "Creating snapshot: %s" msgstr "Anlık görüntü oluşturuluyor: %s" #, python-format msgid "Creating transfer of volume %s" msgstr "%s mantıksal sürücüsünün aktarımı oluşturuluyor" #, python-format msgid "Creatng volume from snapshot. volume: %s" msgstr "Anlık görüntüden mantıksal sürücü oluşturuluyor. mantıksal sürücü: %s" #, python-format msgid "Delete Consistency Group: %(group)s." msgstr "Tutarlılık Grubunu sil: %(group)s." #, python-format msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "%(snapshot_id)s anlık görüntü silme tamamlandı." #, python-format msgid "Delete Snapshot: %(snapshot)s" msgstr "Anlık görüntü sil: %(snapshot)s" #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Anlık görüntü sil: %(snapshot)s." #, python-format msgid "Delete Snapshot: %(snapshotName)s." msgstr "Anlık görüntüyü sil: %(snapshotName)s." #, python-format msgid "Delete Volume %(volume_id)s completed." msgstr "%(volume_id)s Mantıksal sürücüsü silindi." #, python-format msgid "Delete backup finished, backup %s deleted." msgstr "Yedek silme bitti, yedek %s silindi." #, python-format msgid "Delete backup started, backup: %s." msgstr "Yedek silme başladı, yedek: %s." #, python-format msgid "Delete backup with id: %s" msgstr "Şu kimliğe sahip yedeği sil: %s" #, python-format msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" msgstr "Tutarlılık grubu için %(snap_name)s cgsnapshot'ını sil: %(group_name)s" #, python-format msgid "Delete cgsnapshot with id: %s" msgstr "Şu kimliğe sahip cgsnapshot'u sil: %s" msgid "Delete consistency group completed successfully." msgstr "Tutarlılık grubunun silinmesi başarıyla tamamlandı." #, python-format msgid "Delete consistency group with id: %s" msgstr "Şu kimliğe sahip tutarlılık grubunu sil: %s" #, python-format msgid "" "Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." msgstr "" "'%(volume)s' mantıksal sürücüsü için '%(backup)s' yedeği silme uyarıyla " "bitti." msgid "Delete snapshot completed successfully" msgstr "Anlık görüntü silme başarıyla tamamlandı" #, python-format msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Kaynak CG %(cgId)s için anlık görüntüyü sil cgsnapshotID: %(cgsnapshot)s." msgid "Delete snapshot metadata completed successfully." msgstr "Anlık görüntü metadata'sı sil başarıyla tamamlandı." #, python-format msgid "Delete snapshot with id: %s" msgstr "%s id'li sistem görüntüsü siliniyor" #, python-format msgid "Delete transfer with id: %s" msgstr "Şu kimliğe sahip aktarımı sil: %s" msgid "Delete volume metadata completed successfully." msgstr "Mantıksal sürücü metadata'sı silme başarıyla tamamlandı." msgid "Delete volume request issued successfully." msgstr "Mantıksal sürücü silme isteği başarıyla yapıldı." #, python-format msgid "Delete volume with id: %s" msgstr "%s id'li birim siliniyor" #, python-format msgid "Deleted %(row)d rows from table=%(table)s" msgstr "%(row)d satır table=%(table)s'den silindi" #, python-format msgid "" "Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " "[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." msgstr "" "CloudByte anlık görüntüsü [%(snap)s] w.r.t üst CloudByte mantıksal sürücüsü " "[%(cb_vol)s] ve üst OpenStack mantıksal sürücüsü [%(stack_vol)s] silindi." #, python-format msgid "Deleted the VM backing: %s." msgstr "VM desteği: %s silindi." #, python-format msgid "Deleted vmdk file: %s." msgstr "Vmdk dosyası silindi: %s." msgid "Deleted volume successfully." msgstr "Mantıksal sürücü başarıyla silindi." #, python-format msgid "Deleting Volume: %(volume)s" msgstr "Mantıksal sürücü siliniyor: %(volume)s" #, python-format msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." msgstr "" "%(volume)s mantıksal sürücüsünün yedek taban imajı='%(basename)s' siliniyor." #, python-format msgid "Deleting deleteInitiatorGrp %s " msgstr "deleteInitiatorGrp %s siliniyor " #, python-format msgid "Deleting snapshot %(ss)s from %(pro)s" msgstr "Anlık görüntü %(ss)s %(pro)s'den siliniyor" #, python-format msgid "Deleting snapshot %s " msgstr "Anlık görüntü %s siliniyor " #, python-format msgid "Deleting snapshot: %s" msgstr "Anlık görüntü siliniyor: %s" #, python-format msgid "Deleting stale snapshot: %s" msgstr "Eski anlık görüntü siliniyor: %s" #, python-format msgid "Deleting unneeded host %(host_name)r." msgstr "İhtiyaç duyulmayan istemci %(host_name)r siliniyor." #, python-format msgid "Deleting volume %s " msgstr "Mantıksal sürücü %s siliniyor " msgid "Detach volume completed successfully." msgstr "Mantıksal sürücü ayır başarıyla tamamlandı." msgid "Determined volume DB was empty at startup." msgstr "Tespit edilen mantıksal sürücü DB'si başlangıçta boştu." msgid "Determined volume DB was not empty at startup." msgstr "Tespit edilen mantıksal sürücü DB'si başlangıçta boş değildi." #, python-format msgid "" "Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " "delete anything." msgstr "" "Destek: %(backing)s için anlık görüntü: %(name)s bulunamadı. Hiçbir şeyin " "silinmesi gerekmiyor." #, python-format msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" msgstr "Keşif ip'si %(disc_ip)s %(net_label)s mgmt+veri alt ağında bulundu" #, python-format msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" msgstr "Keşif ip'si %(disc_ip)s %(net_label)s veri alt ağında kullanılıyor" #, python-format msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" msgstr "Keşif ip'si %(disc_ip)s %(net_label)s alt ağında kullanılıyor" #, python-format msgid "Discovery ip %s is used on mgmt+data subnet" msgstr "Keşif ip'si %s mgmt+data alt ağında kullanılıyor" #, python-format msgid "Dissociating volume %s " msgstr "Mantıksal sürücü %s ilişiği kesiliyor " #, python-format msgid "Domain id is %s." msgstr "Alan id'si %s." #, python-format msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "İmaj %(id)s nin mantıksal sürücü: %(vol)s e kopyalanması bitti." #, python-format msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "%(vol)s mantıksal sürücüsünün %(img)s yeni imajına kopyalanması bitti" msgid "Driver initialization completed successfully." msgstr "Sürücü ilklendirme başarıyla tamamlandı." #, python-format msgid "Driver stats: %s" msgstr "Sürücü istatistikleri: %s" #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "EQL-sürücüsü: Kurulum tamamlandı, grup IP'si \"%s\"." #, python-format msgid "EQL-driver: executing \"%s\"." msgstr "EQL-sürücüsü: \"%s\" çalıştırılıyor." #, python-format msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "%(mask)s maskesine sahip %(vol)s mantıksal sürücüsü düzenleniyor" #, python-format msgid "Elapsed time for clear volume: %.2f sec" msgstr "Mantıksal sürücü temizleme için kalan zaman: %.2f sn" msgid "Embedded mode detected." msgstr "Gömülü kip algılandı." #, python-format msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" msgstr "extend_volume volume=%(vol)s new_size=%(size)s durumuna giriliyor" #, python-format msgid "" "Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s" msgstr "" "initialize_connection'a giriliyor mantıksal sürücü=%(vol)s bağlayıcı=" "%(conn)s konum=%(loc)s" #, python-format msgid "" "Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s." msgstr "" "terminate_connection'a giriliyor mantıksal sürücü=%(vol)s bağlayıcı=%(conn)s " "konum=%(loc)s." #, python-format msgid "Exploring array subnet label %s" msgstr "Dizi alt ağ etiketi %s keşfediliyor" #, python-format msgid "Export record finished, backup %s exported." msgstr "Kayıt dışa aktarma bitti, yedek %s dışa aktarıldı." #, python-format msgid "Export record started, backup: %s." msgstr "Kayıt dışa aktarma başladı, yedek: %s." #, python-format msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." msgstr "%(vol_id)s lun_id %(lun_id)s üzerine aktarıldı." msgid "Extend volume completed successfully." msgstr "Mantıksal sürücü büyütme başarıyla tamamlandı." msgid "Extend volume request issued successfully." msgstr "Mantıksal sürücü büyütme isteği başarıyla yapıldı." #, python-format msgid "Extending volume %s." msgstr "Mantıksal sürücü %s büyütülüyor." #, python-format msgid "FC Initiators %(in)s of %(ins)s need registration" msgstr "FC Başlatıcıları %(in)s / %(ins)s kaydolmalı" #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "%(vol_id)s için iet oturumu açılamadı: %(e)s" #, python-format msgid "Fault thrown: %s" msgstr "Hata fırlatıldı: %s" #, python-format msgid "Filtered targets for SAN is: %s" msgstr "SAN için filtrelenen hedefler: %s" #, python-format msgid "Fixing previous mount %s which was not unmounted correctly." msgstr "Düzgün bağı ayrılmamış önceki %s bağı düzeltiliyor." #, python-format msgid "Flash Cache policy set to %s" msgstr "Zula sıfırlama ilkesi %s olarak ayarlandı" #, python-format msgid "Flexvisor already unassigned volume %(id)s." msgstr "Flexvisor %(id)s mantıksal sürücü atamasını zaten kaldırdı." #, python-format msgid "Flexvisor snapshot %(id)s not existed." msgstr "Flexvisor anlık görüntüsü %(id)s mevcut değildi." #, python-format msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubuna başarıyla ekledi." #, python-format msgid "Flexvisor succeeded to clone volume %(id)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü klonlamayı başardı." #, python-format msgid "Flexvisor succeeded to create volume %(id)s from snapshot." msgstr "" "Flexvisor başarıyla anlık görüntüden %(id)s mantıksal sürücüsünü oluşturdu." #, python-format msgid "Flexvisor succeeded to create volume %(id)s." msgstr "Flexvisor başarıyla %(id)s mantıksal sürücüsünü oluşturdu." #, python-format msgid "Flexvisor succeeded to delete snapshot %(id)s." msgstr "Flexvisor %(id)s anlık görüntüsünü silmeyi başardı." #, python-format msgid "Flexvisor succeeded to extend volume %(id)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmeyi başardı." #, python-format msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor başarıyla %(id)s mantıksal sürücüsünü %(cgid)s grubundan çıkardı." #, python-format msgid "Flexvisor succeeded to unassign volume %(id)s." msgstr "Flexvisor %(id)s mantıksal sürücüsü atamasını kesebildi." #, python-format msgid "Flexvisor volume %(id)s does not exist." msgstr "Flexvisor mantıksal sürücüsü %(id)s mevcut değil." msgid "Force upload to image is disabled, Force option will be ignored." msgstr "İmaja zorla yükleme kapalı, Zorlama seçeneği göz ardı edilecek." #, python-format msgid "Found existing masking view: %(maskingViewName)s." msgstr "Mevcut maskeleme görünümü bulundu: %(maskingViewName)s." #, python-format msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." msgstr "Arka uç için boş kapasite: %(free)s, toplam kapasite: %(total)s." #, python-format msgid "Generating transfer record for volume %s" msgstr "Mantıksal sürücü %s için aktarım kaydı üretiliyor" #, python-format msgid "Get FC targets %(tg)s to register initiator %(in)s." msgstr "%(in)s başlatıcısını kaydetmek için FC hedeflerini %(tg)s getir." #, python-format msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." msgstr "%(in)s başlatıcısını kaydetmek için ISCSI hedefleri %(tg)s'yi getir." msgid "Get all volumes completed successfully." msgstr "Tüm mantıksal sürücülerin getirilmesi başarıyla bitti." #, python-format msgid "Get domain by name response: %s" msgstr "İsimle alan adı alma yanıtı: %s" #, python-format msgid "Get service: %(lbl)s->%(svc)s" msgstr "Servis getir: %(lbl)s->%(svc)s" msgid "Get snapshot metadata completed successfully." msgstr "Anlık görüntü metadata'sı getir başarıyla tamamlandı." msgid "Get snapshot metadata value not implemented." msgstr "Anlık görüntü metadata değeri getirme uygulanmadı." msgid "Get volume admin metadata completed successfully." msgstr "Mantıksal sürücü yönetici metadata'sını getir başarıyla tamamlandı." msgid "Get volume image-metadata completed successfully." msgstr "Mantıksal sürücü imaj-metadata'sı getir başarıyla tamamlandı." msgid "Get volume metadata completed successfully." msgstr "Mantıksal sürücü metadata alma başarıyla tamamlandı." msgid "Getting getInitiatorGrpList" msgstr "getInitiatorGrpList getiriliyor" #, python-format msgid "Getting volume information for vol_name=%s" msgstr "vol_name=%s için mantıksal sürücü bilgisi alınıyor" #, python-format msgid "Going to perform request again %s with valid token." msgstr "İstek %s geçerli jetonla tekrar gerçekleşecek." #, python-format msgid "HDP list: %s" msgstr "HDP listesi: %s" #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP istisnası fırlatıldı: %s" #, python-format msgid "ISCSI properties: %(properties)s" msgstr "ISCSI özellikleri: %(properties)s" msgid "ISCSI provider_location not stored, using discovery." msgstr "ISCSI provider_location kaydedilmemiş, keşif kullanılıyor." #, python-format msgid "ISCSI volume is: %(volume)s" msgstr "ISCSI mantıksal sürücüsü: %(volume)s" #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "İmaj %(pool)s/%(image)s %(snap)s anlık görüntüsüne bağımlı." #, python-format msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" msgstr "%(image_id)s imajı için imaj klonlama başarısız. İleti: %(msg)s" #, python-format msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" msgstr "İmaj indirme %(sz).2f MB %(mbps).2f MB/s hızında" #, python-format msgid "Image will locally be converted to raw %s" msgstr "İmaj yerel olarak ham %s'e döüştürülecek" #, python-format msgid "Import record id %s metadata from driver finished." msgstr "Sürücüden kayıt id %s metadata içe aktarma bitti." #, python-format msgid "Import record started, backup_url: %s." msgstr "Kayıt içe aktarma başladı, backup_url: %s." #, python-format msgid "Initialize connection: %(volume)s." msgstr "Bağlantıyı ilklendir: %(volume)s." msgid "Initialize volume connection completed successfully." msgstr "Mantıksal sürücü bağlantısını ilklendirme başarıyla tamamlandı." #, python-format msgid "Initialized driver %(name)s version: %(vers)s" msgstr "Sürücü %(name)s sürüm: %(vers)s ilklendirildi" msgid "Initializing extension manager." msgstr "Genişletme yöneticisi başlatılıyor" #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." msgstr "" "Başlatıcı İsim(ler)i %(initiatorNames)s %(storageSystemName)s dizisinde " "değil." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " msgstr "" "Başlatıcı İsim(ler)i %(initiatorNames)s %(storageSystemName)s dizisinde " "değil. " #, python-format msgid "Initiator group name is %(grp)s for initiator %(iname)s" msgstr "%(iname)s başlatıcısı için başlatıcı grup ismi %(grp)s" #, python-format msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s %(size)s GB'ye büyütüldü." #, python-format msgid "LUN %(lun)s extended to %(size)s GB." msgstr "LUN %(lun)s %(size)s GB boyutuna büyütüldü." #, python-format msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "%(sz)s MB boyutunda LUN %(lun)s oluşturuldu." #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" "Verilen referans %s e sahip LUN yönetim işlemi sırasında yeniden " "isimlendirilemez." #, python-format msgid "" "Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " "%(name)s." msgstr "" "create_volume: %(volumeName)s terk ediliyor Dönüş kodu: %(rc)lu mantıksal " "sürücü sözlüğü: %(name)s." #, python-format msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." msgstr "delete_volume: %(volumename)s terk ediliyor Dönüş kodu: %(rc)lu." #, python-format msgid "Leaving initialize_connection: %s" msgstr "initialize_connection: %s terk ediliyor" #, python-format msgid "Loaded extension: %s" msgstr "Yüklenen bölüm: %s" #, python-format msgid "" "Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" "%(lv)s" msgstr "" "LVM bilgisi sorgulanırken Mantıksal Sürücü bulunamadı. (vg_name=%(vg)s, " "lv_name=%(lv)s" msgid "Manage existing volume completed successfully." msgstr "Mevcut mantıksal sürücüyü yönetme başarıyla tamamlandı." #, python-format msgid "" "Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." msgstr "" "LUN için yönetim işlemi yeni yol %(path)s ve uuid %(uuid)s ile tamamlandı." #, python-format msgid "" "Manage operation completed for volume with new label %(label)s and wwn " "%(wwn)s." msgstr "" "%(label)s yeni etiketli ve %(wwn)s wwn'li mantıksal sürücü için yönetme " "işlemi tamamlandı." #, python-format msgid "Manage volume %s" msgstr "Mantıksal sürücü %s'i yönet" msgid "Manage volume request issued successfully." msgstr "Mantıksal sürücü yönetim isteği başarıyla yapıldı." #, python-format msgid "Migrate Volume %(volume_id)s completed." msgstr "%(volume_id)s mantıksal sürücü göçü tamamlandı." msgid "Migrate volume completed successfully." msgstr "Mantıksal sürücü göçü başarıyla tamamlandı." msgid "Migrate volume completion issued successfully." msgstr "Mantıksal sürücü göç tamamlama başarıyla yapıldı." msgid "Migrate volume request issued successfully." msgstr "Mantıksal sürücü göç isteği başarıyla yapıldı." #, python-format msgid "Migrating using retype Volume: %(volume)s." msgstr "Retype kullanarak göç yapılıyor Mantıksal sürücü: %(volume)s." #, python-format msgid "" "Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." msgstr "" " %(volume_name)s snap_cpg %(old_snap_cpg)s den %(new_snap_cpg)s e " "değiştiriliyor." #, python-format msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" msgstr "%(volume_name)s userCPG %(old_cpg)s den %(new_cpg)s e değiştiriliyor" #, python-format msgid "Modifying %s comments." msgstr "%s yorum değiştiriliyor." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "PyWBEM modülü kurulu değil. python-pywbem paketini kullanarak PyWBEM kur." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "PyWBEM Modülü kurulu değil. python-pywbem paketini kullanarak PyWBEM kurun." msgid "Need to remove FC Zone, building initiator target map" msgstr "FC Bölgesi silinmeli, başlatıcı hedef haritası inşa ediliyor" msgid "Need to remove FC Zone, building initiator target map." msgstr "FC Bölgesi kaldırılmalı, başlatıcı hedef haritası inşa ediliyor." msgid "" "Neither security file nor plain text credentials are specified. Security " "file under home directory will be used for authentication if present." msgstr "" "Güvenlik dosyası ya da düz metin kimlik bilgileri belirtilmedi. Eğer " "mevcutsa ev dizini altındaki güvenlik dosyası kimlik doğrulama için " "kullanılacak." #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " "loaded." msgstr "" "%(storage_family)s ailesi ve %(storage_protocol)s iletişim kuralının NetApp " "sürücüsü yüklendi." #, python-format msgid "New Cinder secure environment indicator file created at path %s." msgstr "%s yolunda yeni Cinder güvenli ortam göstergesi dosyası oluşturuldu." #, python-format msgid "New str info is: %s." msgstr "Yeni str bilgisi: %s." #, python-format msgid "No dpkg-query info found for %(pkg)s package." msgstr "%(pkg)s paketi için dpkg-query bilgisi bulunamadı." #, python-format msgid "No igroup found for initiator %s" msgstr "%s başlatıcısı için igroup bulunamadı" #, python-format msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" msgstr "Mantıksal sürücü id:%(vol_id)s için iscsi hedefi mevcut değil: %(e)s" #, python-format msgid "No need to extend volume %s as it is already the requested new size." msgstr "" "Mantıksal sürücü %s istenen yeni boyutta olduğundan mantıksal sürücüyü " "büyütmeye gerek yok." #, python-format msgid "" "No replication synchronization session found associated with source volume " "%(source)s on %(storageSystem)s." msgstr "" "%(storageSystem)s üzerinde %(source)s kaynak mantıksal sürücüsü ile ilişkili " "kopyalama eş zamanlama oturumu bulunamadı." #, python-format msgid "No rpm info found for %(pkg)s package." msgstr "%(pkg)s paketi için rpm bilgisi bulunamadı." #, python-format msgid "OpenStack OS Version Info: %(info)s" msgstr "OpenStack OS Sürüm Bilgisi: %(info)s" #, python-format msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" "%(volume_id)s mantıksal sürücüsünün üstüne %(backup_id)s yedeğinin geri " "yüklemesi yazılıyor" #, python-format msgid "Params for add volume request: %s." msgstr "Mantıksal sürücü ekleme isteği için parametreler: %s." #, python-format msgid "Parse_loc: %s" msgstr "Parse_loc: %s" #, python-format msgid "Performing post clone for %s" msgstr "%s için klon sonrası işler gerçekleştiriliyor" #, python-format msgid "Performing secure delete on volume: %s" msgstr "Mantıksal sürücü güvenle siliniyor: %s" msgid "Plain text credentials are being used for authentication" msgstr "Kimlik doğrulama için düz metin kimlik bilgileri kullanılıyor" #, python-format msgid "Pool id is %s." msgstr "Havuz id'si %s." #, python-format msgid "Port group instance name is %(foundPortGroupInstanceName)s." msgstr "Bağlantı noktası grubu sunucu ismi %(foundPortGroupInstanceName)s." #, python-format msgid "Post clone resize LUN %s" msgstr "LUN %s klon yeniden boyutlandırma sonrası" #, python-format msgid "Prefer use target wwpn %(wwpn)s" msgstr "hedef wwpn %(wwpn)s kullanmayı tercih et" #, python-format msgid "Profile %s has been deleted." msgstr "%s profili silindi." msgid "Promote volume replica completed successfully." msgstr "Mantıksal sürücü kopyasının terfisi başarıyla tamamlandı." #, python-format msgid "Protection domain name: %(domain_name)s." msgstr "Koruma alan adı: %(domain_name)s." msgid "Proxy mode detected." msgstr "Vekil kipi algılandı." #, python-format msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" msgstr "" "tablo=%(table)s tablosundan yaş=%(age)d den büyük silinmiş satırlar siliniyor" #, python-format msgid "Query capacity stats response: %s." msgstr "Kapasite istatistikleri sorgusu yanıtı: %s." msgid "" "RBD striping not supported - ignoring configuration settings for rbd striping" msgstr "" "RBD şeritleme desteklenmiyor - rbd şeritleme için yapılandırma ayarları göz " "ardı ediliyor" #, python-format msgid "RBD volume %s not found, allowing delete operation to proceed." msgstr "" "RBD mantıksal sürücüsü %s bulunamadı, devam etmek için silme işlemine izin " "veriliyor." #, python-format msgid "" "REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " "certificate: %(verify_cert)s." msgstr "" "REST sunucu IP'si: %(ip)s, bağlantı noktası: %(port)s, kullanıcıadı: " "%(user)s. Sunucu sertifikasını doğrula: %(verify_cert)s." #, python-format msgid "Re-using existing purity host %(host_name)r" msgstr "Mevcut purity istemcisi %(host_name)r tekrar kullanılıyor" #, python-format msgid "Registering image in cache %s" msgstr "%s zulasında imaj kaydediliyor" #, python-format msgid "Removed %s from cg." msgstr "%s cg'den silindi." #, python-format msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" msgstr "" "%(igrp)s başlatıcı grubu için mantıksal sürücü=%(vol)s'den ACL kaldırılıyor" #, python-format msgid "Removing iscsi_target for Volume ID: %s" msgstr "Mantıksal sürücü ID: %s için iscsi_target kaldırılıyor" #, python-format msgid "Removing iscsi_target for volume: %s" msgstr "Mantıksal sürücü: %s için iscsi_target kaldırılıyor" #, python-format msgid "Removing iscsi_target for: %s" msgstr "%s için iscsi_target kaldırılıyor" #, python-format msgid "Removing iscsi_target: %s" msgstr "iscsi_target kaldırılıyor: %s" #, python-format msgid "Removing non-active host: %(host)s from scheduler cache." msgstr "Etkin olmayan istemci:%(host)s zamanlayıcı zulasından siliniyor." #, python-format msgid "Removing volumes from cg %s." msgstr "Mantıksal sürücüler cg %s'den çıkarılıyor." #, python-format msgid "Rename Volume %(volume_id)s completed." msgstr "%(volume_id)s mantıksal sürücü yeniden isimlendirme tamamlandı." #, python-format msgid "Renaming backing VM: %(backing)s to %(new_name)s." msgstr "" "Destekleyen VM: %(backing)s %(new_name)s olarak yeniden adlandırılıyor." #, python-format msgid "Renaming existing volume %(ref_name)s to %(new_name)s" msgstr "" "Mevcut mantıksal sürücü %(ref_name)s %(new_name)s olarak yeniden " "adlandırılıyor" #, python-format msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." msgstr "" "Benzersizleştirilmiş yapılandırma istendi: %(storage_family)s ve " "%(storage_protocol)s." msgid "Reserve volume completed successfully." msgstr "Mantıksal sürücüyü rezerve etme başarıyla bitti." #, python-format msgid "" "Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." msgstr "" "Yedek durumunu sıfırlama başlatıldı, yedek_id: %(backup_id)s, durum: " "%(status)s." #, python-format msgid "Resetting backup %s to available (was restoring)." msgstr "Yedek %s kullanılabilir olarak sıfırlanıyor (geri yükleniyordu)." #, python-format msgid "Resetting backup %s to error (was creating)." msgstr "Yedek %s hataya sıfırlanıyor (oluşturuluyordu)." #, python-format msgid "Resizing LUN %s directly to new size." msgstr "LUN %s doğrudan yeni boyuta boyutlandırılıyor." #, python-format msgid "Resizing file to %sG" msgstr "Dosya %sG olarak yeniden boyutlanıyor" #, python-format msgid "Resizing file to %sG..." msgstr "Dosya %sG'ye yeniden boyutlanıyor..." #, python-format msgid "" "Restore backup finished, backup %(backup_id)s restored to volume " "%(volume_id)s." msgstr "" "Yedek geri yükleme bitti, yedek %(backup_id)s %(volume_id)s mantıksal " "sürücüsüne geri yüklendi." #, python-format msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Yedek geri yükleme başladı, yedek: %(backup_id)s mantıksal sürücü: " "%(volume_id)s." #, python-format msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "%(backup)s yedeği %(volume)s mantıksal sürücüsüne geri yükleniyor." #, python-format msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "Yedek %(backup_id)s %(volume_id)s mantıksal sürücüsüne geri yükleniyor" msgid "Resume volume delete completed successfully." msgstr "Mantıksal sürücü silmeyi sürdürme başarıyla tamamlandı." #, python-format msgid "Resuming delete on backup: %s." msgstr "Yedek üzerinde silme sürdürülüyor: %s." #, python-format msgid "" "Returning connection_info: %(info)s for volume: %(volume)s with connector: " "%(connector)s." msgstr "" "%(connector)s bağlayıcısına sahip %(volume)s mantıksal sürücüsü için " "connection_info: %(info)s döndürülüyor." #, python-format msgid "Retype Volume %(volume_id)s is completed." msgstr "%(volume_id)s Mantıksal sürücüsü retype tamamlandı." #, python-format msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." msgstr "" "%(volume_id)s Mantıksal sürücüsü retype tamamlandı ve %(pool_id)s havuzuna " "göç edildi." #, python-format msgid "" "Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " "%(old_snap_cpg)s." msgstr "" "Retype %(volume_name)s snap_cpg %(new_snap_cpg)s den %(old_snap_cpg)s e geri " "al." msgid "Retype volume completed successfully." msgstr "Mantıksal sürücü rtype başarıyla tamamlandı." msgid "Retype volume request issued successfully." msgstr "Mantıksal sürücü retype isteği başarıyla yapıldı." #, python-format msgid "Review shares: %s" msgstr "Paylaşımları gözden geçir: %s" msgid "Roll detaching of volume completed successfully." msgstr "Mantıksal sürücünün ayrılmasının yuvarlanması başarıyla tamamlandı." #, python-format msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "%(server)s ve vserver %(vs)s için küme son ssc işi çalıştırılıyor" #, python-format msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "%(server)s ve vserver %(vs)s için eski ssc tazeleme işi çalıştırılıyor" #, python-format msgid "Running with vmemclient version: %s" msgstr "vmemclient sürüm: %s ile çalışıyor" #, python-format msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" msgstr " %(svc)s -> %(hdp)s, %(path)s için servis bilgisi kaydet" #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " "image id: %(id)s." msgstr "" "ScaleIO copy_image_to_volume mantıksal sürücü: %(vol)s imaj servisi: " "%(service)s imaj id: %(id)s." #, python-format msgid "" "ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " "image meta: %(meta)s." msgstr "" "ScaleIO copy_volume_to_image mantıksal sürücü: %(vol)s imaj servisi: " "%(service)s imaj meta: %(meta)s." #, python-format msgid "" "ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." msgstr "" "ScaleIO klonlanmış mantıksal sürücü oluştur: %(src)s kaynak mantıksal " "sürücüden %(tgt)s hedef mantıksal sürücüye." #, python-format msgid "" "ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " "%(volname)s." msgstr "" "ScaleIO anlık görüntüden: %(snapname)s %(volname)s mantıksal sürücüsüne " "mantıksal sürücü oluştur." msgid "ScaleIO delete snapshot." msgstr "ScaleIO anlık görüntüyü sil." #, python-format msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." msgstr "" "ScaleIO mantıksal sürücü büyüt: %(volname)s mantıksal sürücüsünü " "%(new_size)s boyutuna." #, python-format msgid "ScaleIO get domain id by name request: %s." msgstr "ScaleIO isimle alan id'si getirme isteği: %s." #, python-format msgid "ScaleIO get pool id by name request: %s." msgstr "ScaleIO isimle havuz id'si getirme isteği: %s." #, python-format msgid "" "Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " "from /etc/cinder.conf." msgstr "" "İkincil ssh istemci anahtarı %(kwargs)s /etc/cinder.conf'dan gelen %(conf)s " "ile birlikte yüklenecek." msgid "Session might have expired. Trying to relogin" msgstr "Oturum sona ermiş olabilir. Tekrar giriş deneniyor" #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "%(host)s istemcisi %(state)s olarak ayarlanıyor." #, python-format msgid "Setting snapshot %(snap)s to online_flag %(flag)s" msgstr "Anlık görüntü %(snap)s online_flag %(flag)s olarak ayarlanıyor" #, python-format msgid "Setting volume %(vol)s to online_flag %(flag)s" msgstr "%(vol)s mantıksal sürücüsü online_flag %(flag)s olarak ayarlanıyor" #, python-format msgid "Skipping deletion of volume %s as it does not exist." msgstr "%s mantıksal sürücüsünün silinmesi atlanıyor çünkü mevcut değil." #, python-format msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" "ensure_export atlanıyor. Mantıksal sürücü: %s için hiçbir iscsi_target " "hazırlanmamış" #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume: %s" msgstr "" "remove_export atlanıyor. Mantıksal sürücü: %s için hiçbiri iscsi_target şu " "an dışa aktarılmamış" #, python-format msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" "remove_export atlanıyor. Mantıksal sürücü: %s için hiçbir iscsi_target " "hazırlanmadı" #, python-format msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "" "Smb paylaşımı %(share)s Toplam boyut %(size)s Toplam ayrılan %(allocated)s" #, python-format msgid "Snapshot %s was deleted successfully." msgstr "Anlık görüntü %s başarıyla silindi." msgid "Snapshot create request issued successfully." msgstr "Anlık görüntü oluşturma isteği başarıyla yapıldı." #, python-format msgid "" "Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." msgstr "" "Anlık görüntü oluşturma %(cloneName)s tamamlandı. Kaynak Mantıksal sürücü: " "%(sourceName)s." msgid "Snapshot delete request issued successfully." msgstr "Anlık görüntü silme isteği başarıyla yapıldı." msgid "Snapshot force create request issued successfully." msgstr "Anlık görüntü zorla oluşturma isteği başarıyla yapıldı." #, python-format msgid "" "Snapshot record for %s is not present, allowing snapshot_delete to proceed." msgstr "" "%s için anlık görüntü kaydı mevcut değil, devam etmek için " "snapshot_delete'ye izin veriliyor." msgid "Snapshot retrieved successfully." msgstr "Anlık görüntü başarıyla getirildi." #, python-format msgid "Snapshot: %(snapshot)s: not found on the array." msgstr "Anlık görüntü: %(snapshot)s: dizide bulunamadı." #, python-format msgid "Source Snapshot: %s" msgstr "Kaynak Anlık görüntü: %s" #, python-format msgid "Start to create cgsnapshot for consistency group: %(group_name)s" msgstr "Tutarlılık grubu için cgsnapshot oluşturmaya başla: %(group_name)s" #, python-format msgid "Start to create consistency group: %(group_name)s id: %(id)s" msgstr "Tutarlılık grubu oluşturmaya başla: %(group_name)s id: %(id)s" #, python-format msgid "Start to delete consistency group: %(cg_name)s" msgstr "Tutarlılık grubu silmeye başla: %(cg_name)s" #, python-format msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "%(topic)s düğüm başlatılıyor (sürüm %(version_string)s)" #, python-format msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "Mantıksal sürücü %(driver_name)s (%(version)s) başlatılıyor" #, python-format msgid "Storage Group %s was empty." msgstr "Depolama Grubu %s boştu." #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Depolama grubu ilke ile ilişkilendirilmemiş. İstisna %s." #, python-format msgid "" "Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " "%(pool_id)s." msgstr "" "Depolama havuzları isimleri: %(pools)s, depolama havuz ismi: %(pool)s, pool " "id: %(pool_id)s." #, python-format msgid "Successful login by user %s" msgstr "%s kullanıcısı tarafından başarılı giriş" #, python-format msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "%(server)s ve vserver %(vs)s için ssc işi başarıyla tamamlandı" #, python-format msgid "" "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" "%(server)s ve vserver %(vs)s için eski tazeleme işi başarıyla tamamlandı" #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "%(src)s konumundaki disk başarıyla %(dest)s konumuna kopyalandı." #, python-format msgid "Successfully create volume %s" msgstr "%s mantıksal sürücüsünü başarıyla oluştur" #, python-format msgid "" "Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " "[%(stack_vol)s]." msgstr "" "Başarıyla bir CloudByte mantıksal sürücüsü [%(cb_vol)s] w.r.t OpenStack " "mantıksal sürücüsü [%(stack_vol)s] oluşturuldu." #, python-format msgid "Successfully created clone: %s." msgstr "Klon başarıyla oluşturuldu: %s." #, python-format msgid "" "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "" "Anlık görüntü: %(snap)s başarıyla mantıksal sürücü desteği: %(backing)s için " "oluşturuldu." #, python-format msgid "Successfully created snapshot: %s." msgstr "Anlık görüntü başarıyla oluşturuldu: %s." #, python-format msgid "Successfully created volume backing: %s." msgstr "Mantıksal sürücü desteği başarıyla oluşturuldu: %s." #, python-format msgid "Successfully deleted file: %s." msgstr "Dosya başarıyla silindi: %s." #, python-format msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "%(backing)s desteği için anlık görüntü: %(name)s başarıyla silindi." #, python-format msgid "Successfully deleted snapshot: %s" msgstr "Anlık görüntü başarıyla silindi: %s" #, python-format msgid "Successfully deleted snapshot: %s." msgstr "Anlık görüntü başarıyla silindi: %s." #, python-format msgid "" "Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]." msgstr "" "[%(stack_vol)s] OpenStack mantıksal sürücüsüne karşılık gelen [%(cb_vol)s] " "mantıksal sürücüsü CloudByte'da başarıyla silindi." #, python-format msgid "Successfully deleted volume: %s" msgstr "Mantıksal sürücü başarıyla silindi: %s" #, python-format msgid "Successfully extended volume %(volume_id)s to size %(size)s." msgstr "Mantıksal sürücü %(volume_id)s başarıyla %(size)s boyutuna büyütüldü." #, python-format msgid "Successfully got volume information for volume %s" msgstr "%s mantıksal sürücüsü için mantıksal sürücü bilgisi başarıyla alındı" #, python-format msgid "Successfully initialized connection with volume: %(volume_id)s." msgstr "Mantıksal sürücü: %(volume_id)s ile bağlantı başarıyla ilklendirildi." #, python-format msgid "" "Successfully initialized connection. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." msgstr "" "Bağlantı başarıyla ilklendirildi. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." #, python-format msgid "" "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "" "Mantıksal sürücü desteği: %(backing)s başarıyla %(fol)s dizinine taşındı." #, python-format msgid "" "Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " "resource pool: %(rp)s." msgstr "" "Mantıksal sürücü desteği: %(backing)s başarıyla veri deposu: %(ds)s e ve " "kaynak havuzu: %(rp)s ye konumlandırıldı." msgid "Successfully retrieved InitiatorGrpList" msgstr "InitiatorGrpList başarıyla alındı" #, python-format msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "%(ip)s sunucusu için sürücü: %(driver)s başarıyla kuruldu." #, python-format msgid "Successfully terminated connection for volume: %(volume_id)s." msgstr "Mantıksal sürücü: %(volume_id)s için bağlantı başarıyla sonlandırıldı." #, python-format msgid "" "Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " "%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " "%(storage_protocol)s." msgstr "" "Mantıksal sürücü istatistikleri başarıyla güncellendi: " "%(volume_backend_name)s, satıcı: %(vendor_name)s, sürücü sürümü: " "%(driver_version)s, depolama iletişim kuralı: %(storage_protocol)s." #, python-format msgid "System %(id)s has %(status)s status." msgstr "Sistem %(id)s %(status)s durumuna sahip." #, python-format msgid "" "System with controller addresses [%s] is not registered with web service." msgstr "Kontrol adreslerine [%s] sahip sistem web servisine kayıtlı değil." #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "%(maskingView)s maskeleme görünümündeki hedef wwn'ler: %(targetWwns)s." #, python-format msgid "Terminate connection: %(volume)s." msgstr "Bağlantıyı sonlandır: %(volume)s." msgid "Terminate volume connection completed successfully." msgstr "Mantıksal sürücü bağlantısını sonlandırma başarıyla tamamlandı." #, python-format msgid "The QoS sepcs is: %s." msgstr "QoS özelliği: %s." #, python-format msgid "" "The multi-attach E-Series host group '%(label)s' already exists with " "clusterRef %(clusterRef)s" msgstr "" "Çoklu-ekleme E-Serisi istemci grubu '%(label)s' zaten clusterRef " "%(clusterRef)s ile mevcut" #, python-format msgid "The storage group found is %(foundStorageGroupInstanceName)s." msgstr "Bulunan depolama grubu %(foundStorageGroupInstanceName)s." #, python-format msgid "" "The volume belongs to more than one storage group. Returning storage group " "%(sgName)s." msgstr "" "Mantıksal sürücü birden fazla depolama grubuna ait. %(sgName)s depolama " "grubu döndürülüyor." #, python-format msgid "" "There is no backing for the snapshotted volume: %(snap)s. Not creating any " "backing for the volume: %(vol)s." msgstr "" "Anlık görüntülenen mantıksal sürücü: %(snap)s için destek yok. Mantıksal " "sürücü: %(vol)s için deste oluşturulmuyor." #, python-format msgid "" "There is no backing for the source volume: %(src)s. Not creating any backing " "for volume: %(vol)s." msgstr "" "Kaynak mantıksal sürücü: %(src)s için destek yok. Mantıksal sürücü: %(vol)s " "için herhangi bir destek oluşturulmuyor." #, python-format msgid "There is no backing for the volume: %s. Need to create one." msgstr "Mantıksal sürücü: %s için destek yok. Bir tane oluşturmalı." #, python-format msgid "There is no backing, and so there is no snapshot: %s." msgstr "Destek yok, o yüzden anlık görüntü de yok: %s." #, python-format msgid "There is no backing, so will not create snapshot: %s." msgstr "Destek yok, yani anlık görüntü oluşturulmayacak: %s." #, python-format msgid "" "There is no snapshot point for the snapshotted volume: %(snap)s. Not " "creating any backing for the volume: %(vol)s." msgstr "" "Anlık görüntülenen mantıksal sürücü: %(snap)s için anlık görüntü noktası " "yok. Mantıksal sürücü: %(vol)s için destek oluşturulmuyor." msgid "Token is invalid, going to re-login and get a new one." msgstr "Jeton geçersiz, tekrar giriş yapıp yeni bir tane alınacak." msgid "Transfer volume completed successfully." msgstr "Mantıksal sürücü aktarımı başarıyla tamamlandı." #, python-format msgid "Tried to delete non-existent vdisk %s." msgstr "Mevcut olmayan vdisk %s silinmeye çalışıldı." #, python-format msgid "" "Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " "with delete." msgstr "" "%s anlık görüntüsü silinmeye çalışıldı, ama Datera kümesinde bulunamadı. " "Silmeye devam ediliyor." #, python-format msgid "" "Tried to delete volume %s, but it was not found in the Datera cluster. " "Continuing with delete." msgstr "" "%s mantıksal sürücüsü silinmeye çalışıldı, ama Datera kümesinde bulunamadı. " "Silmeye devam ediliyor." #, python-format msgid "Trying to unmap volume from all sdcs before deletion: %s." msgstr "" "Silmeden önce tüm sdc'lerden mantıksal sürücü eşleştirmesini kaldırmaya " "çalışılıyor: %s." msgid "Unable to parse XML input." msgstr "XML girdisi ayrıştırılamadı." #, python-format msgid "Unable to serialize field '%s' - excluding from backup" msgstr "'%s' alanı serileştirilemiyor - yedekten çıkarılıyor" #, python-format msgid "Unexporting lun %s." msgstr "lun %s aktarımı geri alınıyor" #, python-format msgid "Unmanage volume %(volume_id)s completed." msgstr "%(volume_id)s yönetimini bırakma tamamlandı." #, python-format msgid "Unmanage volume %s" msgstr "Mantıksal sürücü %s'i yönetmeyi durdur" #, python-format msgid "Unmanage volume with id: %s" msgstr "Şu kimliğe sahip mantıksal sürücü yönetimini bırak: %s" #, python-format msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." msgstr "" "%(path)s mevcut yolu ve %(uuid)s uuid'ine sahip LUN yönetimi bırakıldı." #, python-format msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." msgstr "" "%(label)s etiketli ve %(wwn)s wwn'li mantıksal sürücünün yönetimi bırakıldı." #, python-format msgid "Unmap volume: %(volume)s." msgstr "Mantıksal sürücü eşleştirmesini kes: %(volume)s." msgid "Unreserve volume completed successfully." msgstr "Mantıksal sürücünün rezervinin kaldırılması başarıyla bitti." #, python-format msgid "Update migrated volume %(new_volume)s completed." msgstr "Göç etmiş mantıksal sürücü %(new_volume)s güncellemesi tamamlandı." msgid "Update readonly setting on volume completed successfully." msgstr "" "Mantıksal sürücü üstünde yalnızca okunabilir ayarın güncellenmesi başarıyla " "tamamlandı." msgid "Update snapshot metadata completed successfully." msgstr "Anlık görüntü metadata'sı güncelle başarıyla tamamlandı." msgid "Update volume admin metadata completed successfully." msgstr "Mantıksal sürücü yönetici metadata'sını güncelle başarıyla tamamlandı." msgid "Update volume metadata completed successfully." msgstr "Mantıksal sürücü metadata güncellemesi başarıyla tamamlandı." #, python-format msgid "Updated Consistency Group %s" msgstr "Tutarlılık Grubu %s güncellendi" #, python-format msgid "" "Updating consistency group %(id)s with name %(name)s description: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." msgstr "" "%(name)s isimli %(id)s tutarlılık grubu güncelleniyor tanım: %(description)s " "add_volumes: %(add_volumes)s remove_volumes: %(remove_volumes)s." #, python-format msgid "Updating snapshot %(id)s with info %(dict)s" msgstr "%(id)s anlık görüntüsü %(dict)s bilgisi ile güncelleniyor" #, python-format msgid "Updating storage service catalog information for backend '%s'" msgstr "'%s' arka ucu için depolama servisi kataloğu bilgisi güncelleniyor" #, python-format msgid "" "Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." msgstr "" "FC Zone Manager %(zm_version)s, Sürücü %(drv_name)s %(drv_version)s " "kullanılıyor." #, python-format msgid "Using existing initiator group name: %(igGroupName)s." msgstr "Mevcut başlatıcı grubu ismi kullanılıyor: %(igGroupName)s." #, python-format msgid "Using overridden vmware_host_version from config: %s" msgstr "" "%s yapılandırmasından üzerine yazılmış vmware_host_version kullanılıyor" #, python-format msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "%(cpg)s yerine %(pool)s havuzu kullanılıyor" #, python-format msgid "Using security file in %s for authentication" msgstr "Kimlik doğrulama için %s içindeki güvenlik dosyası kullanılıyor" #, python-format msgid "Using service label: %s" msgstr "Servis etiketi kullanılıyor: %s" #, python-format msgid "Value with type=%s is not serializable" msgstr "type=%s sahip değer serileştirilemez" #, python-format msgid "Virtual volume %(disp)s '%(new)s' is being retyped." msgstr "Sanal mantıksal sürücü %(disp)s '%(new)s' retype ediliyor." #, python-format msgid "Virtual volume %(disp)s '%(new)s' is now being managed." msgstr "Sanal mantıksal sürücü %(disp)s '%(new)s' artık yönetiliyor." #, python-format msgid "" "Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " "%(cpg)s" msgstr "" "Sanal mantıksal sürücü %(disp)s '%(new)s' snapCPG boş, bu yüzden %(cpg)s " "olarak ayarlanacak" #, python-format msgid "" "Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " "'%(new)s'." msgstr "" "Sanal mantıksal sürücü %(disp)s '%(vol)s' artık yönetilmiyor. Mantıksal " "sürücü '%(new)s' olarak yeniden adlandırıldı." #, python-format msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." msgstr "" "Sanal mantıksal sürücü %(disp)s başarıyla %(new_type)s olarak retype edildi." #, python-format msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." msgstr "Sanal mantıksal sürücü '%(ref)s' '%(new)s' olarak adlandırıldı." #, python-format msgid "Vol copy job completed for dest %s." msgstr "Mantıksal sürücü kopyalama işi %s hedefi için tamamlandı." #, python-format msgid "Volume %(volume)s does not have meta device members." msgstr "Mantıksal sürücü %(volume)s meta aygıt üyelerine sahip değil." #, python-format msgid "" "Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." msgstr "" "Mantıksal sürücü %(volume)s zaten eşleştirilmiş. Aygıt numarası " "%(deviceNumber)s." #, python-format msgid "Volume %(volumeName)s not in any storage group." msgstr "Mantıksal sürücü %(volumeName)s hiçbir depolama grubunda değil." #, python-format msgid "" "Volume %(volume_id)s: being created as %(create_type)s with specification: " "%(volume_spec)s" msgstr "" "Mantıksal sürücü %(volume_id)s: %(volume_spec)s özelliğine sahip " "%(create_type)s olarak oluşturuluyor" #, python-format msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "" "Mantıksal sürücü %(volume_name)s (%(volume_id)s): başarıyla oluşturuldu" #, python-format msgid "Volume %s has been transferred." msgstr "Mantıksal sürücü %s aktarıldı." #, python-format msgid "Volume %s is mapping to multiple hosts." msgstr "Mantıksal sürücü %s birden fazla istemciyle eşleşiyor." #, python-format msgid "Volume %s is not mapped. No volume to unmap." msgstr "" "Mantıksal sürücü %s eşleştirilmemiş. Eşleştirmesi kesilecek bir mantıksal " "sürücü yok." #, python-format msgid "Volume %s: retyped successfully" msgstr "Mantıksal sürücü %s: retype başarılı" #, python-format msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" msgstr "Mantıksal sürücü kopyalama %(size_in_m).2f MB %(mbps).2f MB/s hızında" msgid "Volume created successfully." msgstr "Mantıksal sürücü başarıyla oluşturuldu." msgid "Volume info retrieved successfully." msgstr "Mantıksal sürücü bilgisi başarıyla getirildi." msgid "Volume retrieved successfully." msgstr "Mantıksal sürücü başarıyla alındı." #, python-format msgid "Volume service: %(label)s. Casted to: %(loc)s" msgstr "Mantıksal sürücü servisi: %(label)s. %(loc)s'a gönderildi" #, python-format msgid "Volume status is: %s." msgstr "Mantıksal sürücü durumu: %s." #, python-format msgid "Volume type is %s." msgstr "Mantıksal sürücü türü %s." #, python-format msgid "" "Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " "id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " "name: %(domain_name)s." msgstr "" "Mantıksal sürücü türü: %(volume_type)s, depolama havuzu ismi: %(pool_name)s, " "depolama havuzu kimliği: %(pool_id)s, koruma alan kimliği: %(domain_id)s, " "koruma alan ismi: %(domain_name)s." msgid "Volume updated successfully." msgstr "Mantıksal sürücü başarıyla güncellendi." #, python-format msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "Verilen %s referanslı mantıksal sürücü yönetme işlemi süresinde yeniden " "adlandırılmamalı." #, python-format msgid "Volume with the name %s wasn't found, can't unmanage" msgstr "%s isimli mantıksal sürücü bulunamadı, yönetim bırakılamıyor" #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " "size: %(backup_size)d, continuing with restore." msgstr "" "Mantıksal sürücü: %(vol_id)s, boyut: %(vol_size)d yedek: %(backup_id)s, " "boyut: %(backup_size)d'den büyük, geri yüklemeyle devam ediliyor." #, python-format msgid "WWPN on node %(node)s: %(wwpn)s." msgstr "%(node)s düğümü üzerinde WWPN: %(wwpn)s." msgid "Waiting for web service array communication." msgstr "Web servis dizisi iletişimi bekleniyor." #, python-format msgid "XtremIO SW version %s" msgstr "XtremIO SW sürümü %s" #, python-format msgid "ZFSSA version: %s" msgstr "ZFSSA sürümü: %s" #, python-format msgid "Zone exists in I-T mode. Skipping zone creation %s" msgstr "Bölge I-T kipinde mevcut. Bölge oluşturma %s atlanıyor" #, python-format msgid "Zone map to add: %s" msgstr "Eklenecek bölge haritası: %s" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Bölgeleme İlkesi: %s, tanınmıyor" #, python-format msgid "Zoning policy for Fabric %s" msgstr "Fabric %s için bölgeleme ilkesi" #, python-format msgid "Zoning policy for fabric %s" msgstr "Fabric %s için bölgeleme haritası" #, python-format msgid "" "_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_check_volume_copy_ops: Mantıksal sürücü %(vol)s belirtilen vdisk kopyalama " "işlemine sahip değil asıl=%(orig)s yeni=%(new)s." #, python-format msgid "_get_tgt_iqn: iSCSI target iqn is: %s." msgstr "_get_tgt_iqn: iSCSI hedef iqn: %s." #, python-format msgid "casted to %s" msgstr "%s'e gönderildi" #, python-format msgid "cgsnapshot %s: created successfully" msgstr "cgsnapshot %s: başarıyla oluşturuldu" #, python-format msgid "cgsnapshot %s: deleted successfully" msgstr "cgsnapshot %s: başarıyla silindi" #, python-format msgid "cgsnapshot %s: deleting" msgstr "cgsnapshot %s: siliniyor" #, python-format msgid "create_volume: create_lu returns %s" msgstr "create_volume: create_lu %s döndürüyor" #, python-format msgid "delete lun loc %s" msgstr "lun loc %s sil" #, python-format msgid "do_setup: %s" msgstr "do_setup: %s" #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" "%(pool)s havuzu için boş kapasite: %(free)s, toplam kapasite: %(total)s." #, python-format msgid "iSCSI Initiators %(in)s of %(ins)s need registration." msgstr "iSCSI Başlatıcıları %(in)s / %(ins)s kaydolmalı." #, python-format msgid "iSCSI portal found for service: %s" msgstr "Servis içi iSCSI portalı bulundu: %s" #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "%(iname)s başlatıcısı için %(grp)s igroup bulundu" #, python-format msgid "initialize volume %(vol)s connector %(conn)s" msgstr "%(vol)s mantıksal sürücüsü %(conn)s bağlayıcısını ilklendir" #, python-format msgid "initialize_ connection: %(vol)s:%(initiator)s" msgstr "initialize_ connection: %(vol)s:%(initiator)s" #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "" "initialize_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(connector)s" #, python-format msgid "initiate: connection %s" msgstr "başlat: bağlantı %s" msgid "" "initiator_auto_registration: False. Initiator auto registration is not " "enabled. Please register initiator manually." msgstr "" "initiator_auto_registration: False. Başlatıcı otomatik kaydı etkin değil. " "Lütfen başlatıcıyı elle kaydedin." #, python-format msgid "iops limit is: %s." msgstr "iops sınırı: %s." #, python-format msgid "iscsi_initiators: %s" msgstr "iscsi_initiators: %s" #, python-format msgid "location is: %(location)s" msgstr "konum: %(location)s" #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " "(temporary volume %(vol2)s" msgstr "" "migrate_volume_completion %(vol1)s mantıksal sürücüsü için bir hatayı " "temizliyor (geçici mantıksal sürücü %(vol2)s" #, python-format msgid "new cloned volume: %s" msgstr "yeni klonlanan mantıksal sürücü: %s" #, python-format msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "%(ip)s ye %(ssn)s open_connection" #, python-format msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "" "%s mantıksal sürücüsü error_restoring olarak ayarlanıyor (yedek geri " "yükleniyordu)." #, python-format msgid "share: %(share)s -> %(info)s" msgstr "paylaşım: %(share)s -> %(info)s" #, python-format msgid "share: %s incorrect entry" msgstr "paylaşım: %s geçersiz girdi" #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery: %(out)s." #, python-format msgid "snapshot %s doesn't exist" msgstr "anlık görüntü %s mevcut değil" #, python-format msgid "source volume for cloning: %s" msgstr "klon için kaynak mantıksal sürücü: %s" #, python-format msgid "targetlist: %s" msgstr "hedeflistesi: %s" #, python-format msgid "terminate: connection %s" msgstr "sonlandır: bağlantı %s" #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(con)s" #, python-format msgid "tunevv failed because the volume '%s' has snapshots." msgstr "tunew başarısız çünkü '%s' mantıksal sürücüsü anlık görüntülere sahip." #, python-format msgid "username: %(username)s, verify_cert: %(verify)s." msgstr "kullanıcı adı: %(username)s, verify_cert: %(verify)s." #, python-format msgid "vol=%s" msgstr "birim=%s" #, python-format msgid "vol_name=%(name)s provider_location=%(loc)s" msgstr "vol_name=%(name)s provider_location=%(loc)s" #, python-format msgid "volume %s doesn't exist" msgstr "mantıksal sürücü %s mevcut değil" #, python-format msgid "volume %s no longer exists in backend" msgstr "mantıksal sürücü %s artık arka uçta mevcut değil" msgid "volume_file does not support fileno() so skipping fsync()" msgstr "volume_file fileno() desteklemiyor bu yüzden fsync() atlanıyor" cinder-8.0.0/cinder/locale/tr_TR/LC_MESSAGES/cinder.po0000664000567000056710000067074012701406250023265 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Andreas Jaeger , 2015. #zanata # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0b4.dev196\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 14:33+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-16 08:16+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder sürümü: %(version)s\n" #, python-format msgid " but size is now %d" msgstr " ancak boyut şu anda %d" #, python-format msgid " but size is now %d." msgstr " ama şimdiki boyut %d." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing istemcilere bağlı bir mantıksal sürücü " "yönetilemez. Lütfen içe aktarmadan önce bu mantıksal sürücüyü mevcut " "istemcilerden ayırın" #, python-format msgid "%(err)s" msgstr "Hatalar: %(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "sonuç: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: İzin reddedildi." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: Beklenmedik CLI çıktısı ile başarısız oldu.\n" " Komut: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Durum Kodu: %(_status)s\n" "Gövde: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: NetworkPortal oluşturuluyor: %(ip)s ip üzerinde %(port)d " "bağlantı noktasının başka bir servis tarafından kullanılmadığından emin olun." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s %(min_length)s asgari karakter gereksinimine sahip." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" "%(name)s %(max_length)s azami karakter sayısından daha fazla karaktere sahip." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: mantıksal sürücü %(vol_id)s yedekleme %(bck_id)s başarısız oldu. " "Yedekleme nesnesi beklenmeyen kipe sahip. İmaj ya da dosya yedeklemeleri " "destekleniyor, gerçek kip %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "%(service)s Servisi depolama aygıtında %(status)s değil: %(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s <= %(max_value)d olmalı" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s >= %(min_value)d olmalı" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "%(workers)d'ın %(worker_name)s değeri geçersiz, 0'dan daha büyük olmalıdır." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s erişilebilir değil. GPFS'in etkin olduğunu ve dosya sisteminin bağlı " "olduğunu doğrulayın." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s klon işlemi kullanılarak yeniden boyutlandırılamaz çünkü blok içermiyor." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s klon işlemi kullanılarak yeniden boyutlandırılamaz çünkü sıkıştırılmış " "bir mantıksal sürücü üzerinde" #, python-format msgid "%s configuration option is not set." msgstr "%s yapılandırma seçeneği ayarlı değil." #, python-format msgid "%s is not a directory." msgstr "%s bir dizin değil." #, python-format msgid "%s is not a string or unicode" msgstr "%s bir karakter dizisi ya da unicode değildir" #, python-format msgid "%s is not installed" msgstr "%s kurulu değil" #, python-format msgid "%s is not installed." msgstr "%s kurulu değil." #, python-format msgid "%s is not set" msgstr "%s ayarlanmamış" #, python-format msgid "%s is not set." msgstr "%s ayarlı değil." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s geçerli bir raw ya da qcow2 imajı olmalıdır." #, python-format msgid "%s must be an absolute path." msgstr "%s mutlak yol olmalı." #, python-format msgid "%s not set in cinder.conf" msgstr "%s cinder.conf'da ayarlanmamış" #, python-format msgid "%s not set." msgstr "%s ayarlanmamış." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' yapılandırma dosyasında flashsystem_connection_protocol için " "geçersiz. geçerli değer(ler) %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "snap_info yazılırken 'active' olması gerekir." msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' belirtilmiş olmalıdır" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' ayrıştırması başarısız oldu." msgid "'status' must be specified." msgstr "'status' belirtilmelidir." msgid "'volume_id' must be specified" msgstr "'volume_id' belirtilmelidir" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "Bir LUN (HLUN) bulunamadı. (LDEV: %(ldev)s)" #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Boş bir LUN (HLUN) bulunamadı. Farklı bir istemci grubu ekleyin. (LDEV: " "%(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Bir istemci grubu eklenemedi. (bağlantı noktası: %(port)s, isim: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Bir istemci grubu silinemedi. (bağlantı noktası: %(port)s, gid: %(gid)s, " "isim: %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Bir istemci grubu geçersiz. (istemci grubu: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "Bir çift silinemedi. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Bir çift oluşturulamadı. Azami çift sayısı aşıldı. (kopyalama yöntemi: " "%(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Bir parametre geçersiz. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Bir parametre değeri geçersiz. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Bir havuz bulunamadı. (havuz kimliğİ: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Bir anlık görüntü durumu geçersiz. (durum: %(status)s)" msgid "A volume ID or share was not specified." msgstr "Bir mantıksal sürücü ID ya da paylaşım belirtilmemiş." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Bir mantıksal sürücünün durumu geçersiz. (durum: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s %(err)s hatası ile başarısız oldu" msgid "API key is missing for CloudByte driver." msgstr "CloudByte sürücüsü için API anahtarı eksik." #, python-format msgid "API response: %s" msgstr "API yanıtı: %s" msgid "API version could not be determined." msgstr "API sürümü belirlenemedi." msgid "Access list not available for public volume types." msgstr "" "Ortak mantıksal sürücü türleri için erişim listesi kullanılabilir değil." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Belirtilen yönetilecek depolama havuzlarından hepsi mevcut değil. Lütfen " "yapılandırmanızı kontrol edin. Mevcut olmayan havuzlar: %s" msgid "An error has occurred during backup operation" msgstr "Yedekleme işlemi sırasında bir hata oluştu" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "LUNcopy işlemi sırasında bir hata oluştu. LUNcopy ismi: %(luncopyname)s. " "LUNcopy durumu: %(luncopystatus)s. LUNcopy durumu: %(luncopystate)s." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "Bir iSCSI CHAP kullanıcısı eklenemedi. (kullanıcı adı: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "Bir iSCSI CHAP kullanıcısı silinemedi. (kullanıcı adı: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Bir iSCSI hedefi eklenemedi. (bağlantı noktası: %(port)s, rumuz: %(alias)s, " "sebep: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Bir iSCSI hedefi silinemedi. (bağlanıt noktası: %(port)s, tno: %(tno)s, " "rumuz: %(alias)s)" msgid "An unknown exception occurred." msgstr "Bilinmeyen bir istisna oluştu." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "Dizi mevcut değil ya da çevrim dışı. Dizinin mevcut durumu %s." msgid "At least one valid iSCSI IP address must be set." msgstr "Geçerli en az bir iSCSI IP adresi ayarlamalıdır." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Geçersiz kimlik doğrulama anahtarı ile %s aktarımı dene." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" "CloudByte depolamasında kimlik doğrulama grubu [%s] ayrıntıları bulunamadı." msgid "Auth user details not found in CloudByte storage." msgstr "" "CloudByte depolamada kimlik doğrulama kullanıcı ayrıntıları bulunamadı." msgid "Authentication error" msgstr "Kimlik doğrulama hatası" msgid "Authorization error" msgstr "Yetkilendirme hatası" msgid "Available categories:" msgstr "Kullanılabilir kategoriler:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Arka-uç QoS özellikleri bu depolama ailesi ve ONTAP sürümünde desteklenmiyor." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Art alanda çalışan uygulama (%(backend)s) yok" #, python-format msgid "Backend reports: %(message)s" msgstr "Art alanda çalışan uygulama raporları: %(message)s" msgid "Backend reports: item already exists" msgstr "Art alanda çalışan uygulama raporları: öge zaten mevcut" msgid "Backend reports: item not found" msgstr "Art alanda çalışan uygulama raporları: öge bulunamadı" msgid "Backend server not NaServer." msgstr "Arkauç sunucusu NaServer değil." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Arka uç servis yeniden deneme zaman aşımına erişildi: %(timeout)s sn" msgid "Backend storage did not configure fiber channel target." msgstr "Arka uç depolama fiber kanal hedefini yapılandırmadı." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "%(backup_id)s yedekleme bulunamadı." msgid "Backup RBD operation failed" msgstr "RBD Yedekleme işlemi başarısız oldu" #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Yedekleme sürücüsü bir hata bildirdi: %(message)s" msgid "Backup id required" msgstr "Yedekleme kimliği gereklidir" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "Yedekleme anlık sistem görüntüleri ile GlusterFS mantıksal sürücüsü için " "desteklenmiyor." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "Yedekleme sadece destek dosyası olmayan SOFS mantıksal sürücüleri için " "desteklenir." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "Yedekleme sadece raw-biçimli GlusterFS mantıksal sürücüleri için desteklenir." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" "Yedekleme sadece raw-biçimli SOFS mantıksal sürücüleri için desteklenir." msgid "Backup operation of an encrypted volume failed." msgstr "Şifreli mantıksal sürücünün yedekleme işlemi başarısız oldu." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Yedekleme servisi %(configured_service)s doğrulamayı desteklemiyor. Yedek " "kimliği %(id)s doğrulanmadı. Doğrulama atlanıyor." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Yedekleme servisi %(service)s doğrulamayı desteklemiyor. Yedek kimliği " "%(id)s doğrulanmadı. Sıfırlama atlanıyor." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "Yedekleme %s yerine sadece bir anlık sistem görüntüsüne sahip olmalıdır" msgid "Backup status must be available" msgstr "Yedek durumu kullanılabilir olmalıdır" #, python-format msgid "Backup status must be available and not %s." msgstr "Yedekleme durumu kullanılabilir olmalı ve %s olmamalıdır." msgid "Backup status must be available or error" msgstr "Yedek durumu kullanılabilir ya da hatalı olmalıdır" msgid "Backup to be restored has invalid size" msgstr "Geri yüklenecek yedek geçersiz boyuta sahip" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Bozuk Durum satırı döndürüldü: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Kota kümesinde bozuk anahtar(lar): %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Depolama mantıksal sürücü art alanda çalışan uygulama API'sinden bozuk ya da " "beklenmeyen yanıt: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "Bozuk proje biçimi: proje (%s) doğru biçiminde değil" msgid "Bad response from Datera API" msgstr "Datera API'sinden bozuk yanıt" msgid "Bad response from SolidFire API" msgstr "SolidFire API'den bozuk yanıt" #, python-format msgid "Bad response from XMS, %s" msgstr "XMS'den kötü yanıt, %s" msgid "Binary" msgstr "İkili Değer" msgid "Blank components" msgstr "Boş bileşenler" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Blockbridge API kimlik doğrulama şeması (jeton ya da parola)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Blockbridge API parola (kimlik doğrulama şeması için 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Blockbridge API jetonu (kimlik doğrulama şeması için 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Blockbridge API kullanıcı (kimlik doğrulama şeması için 'password')" msgid "Blockbridge api host not configured" msgstr "Blockbridge api istemcisi yapılandırılmadı" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge geçersiz kimlik doğrulama şeması '%(auth_scheme)s' ile " "yapılandırıldı" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge öntanımlı havuz yok" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Blockbridge parolası yapılandırılamadı (kimlik doğrulama şeması 'password' " "için gerekli)" msgid "Blockbridge pools not configured" msgstr "Blockbridge havuzları yapılandırılmamış" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Blockbridge jetonu yapılandırılmamış (kimlik doğrulama şeması için gerekli " "'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Blockbridge kullanıcısı yapılandırılmadı (kimlik doğrulama şeması 'password' " "için gerekli)" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP gizi 12-16 bayt olmalı." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI İstisnası çıktısı:\n" " komut: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI İstisna çıktısı:\n" " komut: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E VDisk-to-host eşleştirmesi oluşturulmadı çünkü VDisk zaten bir " "istemciye eşleştirilmiş.\n" "\"" msgid "CONCERTO version is not supported" msgstr "CONCERTO sürümü desteklenmiyor" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) dizide mevcut değil" #, python-format msgid "Can not translate %s to integer." msgstr "%s, tam sayı değere çevrilemez." #, python-format msgid "Can't open config file: %s" msgstr "Yapılandırma dosyası açılamıyor: %s" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " "çünkü hiçbir mantıksal sürücü türüne sahip değil." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " "çünkü mantıksal sürücü bulunamıyor." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " "çünkü mantıksal sürücü yok." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " "çünkü mantıksal sürücü geçersiz bir durumda: %(status)s. Geçerli durumlar: " "%(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " "çünkü %(volume_type)s mantıksal sürücü türü grup tarafından desteklenmiyor." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Zaten eklenmiş mantıksal sürücü %s eklenemez; çoklu ekleme " "'netapp_enable_multiattach' yapılandırma seçeneğiyle kapatılmış." msgid "Cannot connect to ECOM server." msgstr "ECOM sunucusuna bağlanılamıyor." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "%(src_vol_size)s boyutunda mantıksal sürücüden %(vol_size)s boyutunda klon " "oluşturulamıyor" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "%(group)s tutarlılık grubu oluşturulamıyor çünkü %(snap)s anlık sistem " "görüntüsü geçerli bir durumda değil. Geçerli durumlar: %(valid)s." msgid "Cannot create encryption specs. Volume type in use." msgstr "" "Şifreleme özellikleri oluşturulamıyor. Mantıksal sürücü türü kullanımda." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "%s disk biçiminin imajı oluşturulamıyor. Yalnızca vmdk disk biçimi kabul " "edilir." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Maskeleme görünümü oluşturulamıyor: %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "'netapp_enable_multiattach' true olarak ayarlandığında ESeries dizisinde " "%(req)s mantıksal sürücüden fazlası oluşturulamaz." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "%(sgGroupName)s ismine sahip bir depolama grubu oluşturulamıyor ya da " "bulunamıyor." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "%(snap_size)s boyutunda anlık görüntüden %(vol_size)s boyutunda mantıksal " "sürücü oluşturulamıyor" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "%s boyutunda mantıksal sürücü oluşturulamıyor: 8GB katı değil." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "%(name)s adı ve %(extra_specs)s özellikleri ile volume_type oluşturulamıyor" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "Anlık görüntüler varken LUN %s silinemez." msgid "Cannot delete encryption specs. Volume type in use." msgstr "Şifrelem özellikleri silinemez. Mantıksal sürücü türü kullanımda." msgid "Cannot execute /sbin/mount.sofs" msgstr "/sbin/mount.sofs yürütülemiyor" #, python-format msgid "Cannot find CG group %s." msgstr "CG grubu %s bulunamadı." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "%(storage_system)s depolama sistemi için Kontrolcü Yapılandırma Servisi " "bulunamıyor." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "%s anlık görüntüsü için mantıksal sürücü oluşturmak için Çoğaltma Servisi " "bulunamıyor." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "%s anlık görüntüsünü silmek için Çoğaltma Servisi bulunamıyor." #, python-format msgid "Cannot find Replication service on system %s." msgstr "%s sisteminde Çoğaltma servisi bulunamadı." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "Mantıksal sürücü bulunamıyor: %(id)s. işlemi yönetmeyi bırak. Çıkılıyor..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" "Mantıksal sürücü bulunamıyor: %(volumename)s. İşlemi genişlet. Çıkılıyor..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "%(volumeName)s mantıksal sürücüsü için aygıt numarası bulunamıyor." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "mcs_id channel_id: %(channel_id)s tarafından alınamıyor." msgid "Cannot get necessary pool or storage system information." msgstr "Gerekli havuz ya da depolama sistem bilgisi alınamıyor." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Depolama grubu oluşturulamıyor ya da alınamıyor: %(volumeName)s mantıksal " "sürücüsü için %(sgGroupName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "%(igGroupName)s başlatıcı grubu alınamıyor ya da oluşturulamıyor. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Bağlantı noktası grubu alınamıyor: %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Depolama grubu: %(sgGroupName)s %(maskingViewInstanceName)s maskeleme " "görünümünden alınamıyor. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "%(sps)s için desteklenen boyut aralığı alınamıyor. Dönüş kodu: %(rc)lu. " "Hata: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "FAST ilkesi için varsayılan depolama grubu alınamıyor: %(fastPolicyName)s." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "Scality SOFS bağlanamıyor, hatalar için syslog dosyasını kontrol edin" msgid "Cannot ping DRBDmanage backend" msgstr "DRBDmanage art alanda çalışan uygulamasına ping atılamıyor" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "%(host)s üzerine %(id)s mantıksal sürücüsü yerleştirilemiyor" msgid "Cannot register resource" msgstr "Kaynak kaydedilemez" msgid "Cannot register resources" msgstr "Kaynaklar kaydedilemez" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "%(group_id)s tutarlılık grubundan %(volume_id)s mantıksal sürücüsü " "kaldırılamaz çünkü grup içinde değil." msgid "Cannot retype from one 3PAR array to another." msgstr "Bir 3PAR dizisinden diğerine retype yapılamaz." msgid "Cannot retype to a CPG in a different domain." msgstr "Bir CPG'ye farklı alanda retype yapılamaz." msgid "Cannot retype to a snap CPG in a different domain." msgstr "Farklı bir alandaki bir kavrama CPG'ye retype yapılamaz." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "vgc-cluster komutu çalıştırılamıyor, lütfen yazılımın kurulu ve izinlerin " "doğru ayarlanmış olduğundan emin olun." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "Hem hitachi_serial_number hem hitachi_unit_name ayarlanamaz." msgid "Cannot specify both protection domain name and protection domain id." msgstr "Hem koruma alan ismi he koruma alan kimliği belirtilemez." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "Depolama havuzu ismi ve depolama havuzu kimliği aynı anda belirtilemez." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "%(group_id)s tutarlılık grubu güncellenemiyor çünkü hiçbir geçerli ad, " "tanımlama, add_volumes ya da remove_volumes sağlanmadı." msgid "Cannot update encryption specs. Volume type in use." msgstr "Şifreleme özellikleri güncellenemez. Mantıksal sürücü türü kullanımda." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "volume_type %(id)s güncellenemiyor" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Nesnenin varlığı doğrulanamıyor:%(instanceName)s." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s bulunamadı." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost boş. Hiçbir tutarlılık grubu oluşturulamayacak." msgid "Cgsnapshot status must be available or error" msgstr "Cgsnapshot durumu kullanılabilir ya da hata olmalıdır" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Aşağıdaki kaynaklar için değiştirme kullanımı 0'dan daha az yapacak:" "%(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "Bu sürücüye atanmış ZFS paylaşımı için erişim izinlerini kontrol edin." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Http servisinin durumunu kontrol et. Ayrıca https bağlantı noktası " "numarasının cinder.conf'da belirtilenle aynı olduğundan emin olun." msgid "Chunk size is not multiple of block size for creating hash." msgstr "Özet oluşturmak için parça boyutu blok boyutunun katı değil." #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "Çoğaltma özelliği %(storageSystem)s üzerinde lisanslı değil." #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "CLI içindeki %(cmd)s komutu bloklandı ve iptal edildi" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: %s zaman aşımı" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s zaman aşımına uğradı." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Sıkıştırma Etkinleştirici kurulu değil. Sıkıştırılmış mantıksal sürücü " "oluşturulamıyor." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Yapılandırma dosyası %(configurationFile)s bulunamıyor." #, python-format msgid "Configuration value %s is not set." msgstr "Yapılandırma değeri %s ayarlanmamış." msgid "Configured host type is not supported." msgstr "Yapılandırılan istemci türü desteklenmiyor." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "%s mantıksal sürücü türündeki QoS özellikleri çelişiyor: QoS özelliği " "mantıksal sürücü türüne bağlı olduğunda, mantıksal sürücü türü ek " "özelliklerinde eski \"netapp:qos_policy_group\" özelliğine izin verilmez." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Glance bağlantısı başarısız oldu: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Swift bağlantısı başarısız oldu: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Bağlayıcı şunu sağlamıyor: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Bağlayıcı gerekli bilgilere sahip değil: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "%s tutarlılık grubu hala mantıksal sürücüler içeriyor. Bunları silmek için " "zorlama bayrağı gereklidir." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "Tutarlılık grubu %s hala bağımlı cgsnapshots sahiptir." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "Tutarlılık grubu boş. Hiçbir cgsnapshot oluşturulamayacaktır." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "Tutarlılık grubu durumu kullanılabilir ya da hata olmalıdır ancak mevcut " "durum: %s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "Tutarlılık grubu durumu kullanılabilir olmalıdır ancak mevcut durum: %s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "TutarlılıkGrubu %(consistencygroup_id)s bulunamadı." msgid "Container" msgstr "Kap" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Kap biçimi: %s VMDK sürücüsü tarafından desteklenmiyor, yalnızca 'bare' " "desteklenir." msgid "Container size smaller than required file size." msgstr "Kap boyutu gerekli dosya boyutundan küçük." msgid "Content type not supported." msgstr "İçerik türü desteklenmiyor." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" "Kontrolcü Yapılandırma Servisi %(storageSystemName)s üzerinde bulunamadı." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "Kontrolcü IP '%(host)s' çözülemedi: %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "%(f1)s e dönüştürüldü, ama biçim şu an %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" "%(vol_format)s biçimine dönüştürüldü ancak şimdiki biçim %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Raw biçimine dönüştürüldü ancak şu anda biçim %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Ham hale dönüştürüldü, ama biçim artık %s." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Mantıksal sürücü kopyalama görevi başarısız: convert_to_base_volume: id=" "%(id)s, durum=%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Metadata %(src_type)s %(src_id)s den %(vol_id)s e kopyalanıyor." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "GPFS küme kimliği bulunamadı: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "GPFS dosya sistemi aygıtı bulunamadı: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "%(type_id)s türü ile %(volume_id)s mantıksal sürücüsü için bir istemci " "bulunamadı." #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s'deki yapılandırma bulunamadı" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "%(volumeName)s mantıksal sürücüsü için iSCSI dışa aktarımı bulunamadı." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "%s mantıksal sürücü için iSCSI dışa aktarımı bulunamadı" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi bulunamadı." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "Anahtar %(cmd)s: %(out)s komutu çıktısında bulunamadı." #, python-format msgid "Could not find parameter %(param)s" msgstr "%(param)s parametresi bulunamadı" #, python-format msgid "Could not find target %s" msgstr "%s hedefi bulunamadı" msgid "Could not get system name." msgstr "Sistem ismi alınamadı." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "%(path)s den yapıştırma uygulaması '%(name)s' yüklenemedi" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "%s okunamadı. sudo ile yeniden çalıştırılıyor" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "Yapılandırma %(file_path)s yoluna kaydedilemedi: %(exc)s" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Yedek oluşturma durduruldu, beklenen yedek durumu %(expected_status)s ancak " "mevcut yedek durumu %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Yedek oluşturma durduruldu, beklenen mantıksal sürücü durumu " "%(expected_status)s ancak mevcut durum %(actual_status)s." msgid "Create export for volume failed." msgstr "Mantıksal sürücü için dışa aktarım oluşturma başarısız oldu." msgid "Create manager volume flow failed." msgstr "Yönetici mantıksal sürücü akışı oluşturma işlemi başarısız oldu." msgid "Create volume failed." msgstr "Mantıksal sürücü oluşturma başarısız oldu." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Bölge kümesinin oluşturulması ve etkinleştirilmesi başarısız: (Bölge kümesi=" "%(cfg_name)s hata=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Bölge kümesinin oluşturulup etkinleştirilmesi başarısız: (Bölge kümesi=" "%(zoneset)s hata=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "%(begin_period)s %(end_period)s aralığı için kullanımlar oluşturuluyor" msgid "Current host isn't part of HGST domain." msgstr "Mevcut istemci HGST alanının parçası değildir." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "%(type)s türü ile %(id)s mantıksal sürücüsü için mevcut istemci geçersizdir, " "taşımaya izin verilmez" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "%(vol)s mantıksal sürücüsü için şu an eşleştirilmiş istemci %(group)s ile " "desteklenmeyen istemci grubunda." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "ÖNERİLMİYOR: Cinder API v1 sürümünü dağıt." msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage sürücü kurulum hatası: bazı gerekli kütüphaneler (dbus, " "drbdmanage.*) bulunamadı." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage bir kaynak (\"%(res)s\") bekledi, alınan %(n)d" msgid "Data ONTAP API version could not be determined." msgstr "Veri ONTAP API sürümü belirlenemedi." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "7-Kipte çalışan Veri ONTAP QoS ilke gruplarını desteklemiyor." #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup geçerli bir hazırlık türü, ama WSAPI sürümü '%(dedup_version)s' " "gerektirir. '%(version)s' sürümü kurulu." msgid "Dedup luns cannot be extended" msgstr "Dedup lun'lar büyütülemez" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Kopyaları kaldırma etkinleştirici kurulu değil. Kopyaları kaldırılmış " "mantıksal sürücü oluşturulamıyor" msgid "Default pool name if unspecified." msgstr "Belirtilmezse öntanımlı havuz adı." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Kaynak için öntanımlı kota: %(res)s öntanımlı kota bayrağı: quota_%(res)s " "ile ayarlanır, ancak şu anda önerilmiyor. Lütfen öntanımlı kota için " "öntanımlı kota sınıfı kullanın." msgid "Default volume type can not be found." msgstr "Öntanımlı mantıksal sürücü türü bulunamadı." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Ortaya çıkarılan havuzlar ve onların ilişkili arka uç sorgu karakter " "dizilerini tanımlar" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Yedek silme işlemi durduruldu, şu anda yapılandırılan yedekleme servisi " "[%(configured_service)s], bu yedeğin [%(backup_service)s] oluşturulması için " "kullanılan yedekleme servisi değildir." msgid "Delete consistency group failed." msgstr "Tutarlılık grubu silme başarısız oldu." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "" "Mantıksal sürücünün anlık görüntüsünün silinmesi %s durumunda desteklenmiyor." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup durduruldu, beklenen yedekleme durumu %(expected_status)s " "ancak alınan %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Veritabanından mantıksal sürücü siliniyor ve rpc atlanıyor." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Bölgelerin silinmesi başarısız: (komut=%(cmd)s hata=%(err)s)." msgid "Describe-resource is admin only functionality" msgstr "Kaynak-tanımla sadece yönetici işlevidir" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "Hedef migration_status %(stat)s durumuna sahip, beklenen %(exp)s." msgid "Destination host must be different than the current host." msgstr "Hedef istemci mevcut istemciden farklı olmalıdır." msgid "Destination volume not mid-migration." msgstr "Hedef mantıksal sürücü taşıma ortasında değildir." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Mantıksal sürücü ayırma başarısız oldu: Birden fazla ek sağlandı, ancak " "hiçbir attachment_id sağlanamadı." msgid "Detach volume from instance and then try again." msgstr "Sunucudan mantıksal sürücüyü ayırın ve sonrasında tekrar deneyin." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "%(vol_name)s adında birden fazla mantıksal sürücü tespit edildi" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "Beklenen sütun %(fun)s de bulunamadı: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "Beklenen anahtar %(key)s %(fun)s de bulunamadı: %(raw)s" msgid "Disabled reason contains invalid characters or is too long" msgstr "" "Devre dışı bırakılma nedeni geçersiz karakterler içermesi ya da çok uzun " "olmasıdır" #, python-format msgid "Domain with name %s wasn't found." msgstr "%s ismine sahip alan bulunamadı." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Downlevel GPFS Kümesi Algılandı. GPFS Çoğaltma özelliği %(cur)s küme art " "alan işi seviyesinde etkin değil - en az %(min)s seviye olmalı." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Sürücü bağlantı başlatamadı (hata: %(err)s)." msgid "Driver must implement initialize_connection" msgstr "Sürücü initialize_connection gerçekleştirmelidir" #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "EMC VNX Cinder Sürücü CLI istisnası: %(cmd)s (Dönüş Kodu: %(rc)s)(Çıktı: " "%(out)s)." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO: %(slo)s veya iş yükü %(workload)s geçersiz. Önceki hata ifadesini " "geçerli değerler için inceleyin." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Ya hitachi_serial_number ya da hitachi_unit_name gerekli." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "Öğe Dizgi Servisi %(storageSystemName)s üzerinde bulunamadı." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "" "Configfs'in /sys/kernel/config yolunda bağlanmış olduğunu garantileyin." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "groupInitiatorGroup: %(initiatorgroup)s üzerinde Başlatıcı Eklemede Hata: " "%(initiator)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "IQN: %(iqn)s e sahip TargetGroup: %(targetgroup)s a eklemede hata Dönüş " "kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "%(vol)s mantıksal sürücüsü eklenirken hata." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Çoğaltılmış Mantıksal Sürücü Oluşturmada Hata: %(cloneName)s Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Çoğaltılmış Mantıksal Sürüc Oluşturmada Hata: Mantıksal Sürücü: " "%(cloneName)s Kaynak Mantıksal Sürücü:%(sourceName)s. Dönüş kodu: %(rc)lu. " "Hata: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Grup Oluşturmada Hata: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Maskeleme Görünümü Oluşturmada Hata: %(groupName)s. Dönüş kodu: %(rc)lu. " "Hata: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Mantıksal Sürücü Oluşturma Hatası: %(volumeName)s. Dönüş kodu: %(rc)lu. " "Hata: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "%(volumename)s Mantıksal Sürücünün Oluşturulmasında hata. Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "CreateGroupReplica Hatası: kaynak: %(source)s hedef: %(target)s. Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Başlatıcı Oluşturmada Hata: %(initiator)s Rumuz: %(alias)s Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Proje Oluşturmada Hata: %(project)s Havuz: %(pool)s Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Paylaşım Oluşturmada Hata: %(name)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Anlık Görüntü Oluşturmada Hata: %(snapshot)s Mantıksal Sürücü: %(lun)s " "Havuz: %(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Anlık Görüntü Oluşturmada Hata: %(snapshot)s onshare: %(share)s Havuz: " "%(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Hedef Oluşturmada Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s ." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "IQN: %(iqn)s e sahip TargetGroup: %(targetgroup)s oluşturmada hata Dönüş " "kodu: %(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Mantıksal Sürücü Oluşturmada Hata: %(lun)s Boyut: %(size)s Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Yeni biletiş Mantıksal sürücü oluşturmada hata Dönüş kodu: %(rc)lu. Hata: " "%(error)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "Büyütme işleminde bağımsız mantıksal sürücü oluşturmada hata." msgid "Error Creating unbound volume." msgstr "Bağımsız mantıksal sürücü oluşturmada hata." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Mantıksal Sürücü Silme Hatası: %(volumeName)s. Dönüş kodu: %(rc)lu. Hata: " "%(error)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Anlık Görüntü Silmede Hata: %(snapshot)s Mantıksal Sürücü: %(lun)s Havuz: " "%(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Başlatıcıları Almada Hata: InitiatorGroup: %(initiatorgroup)s Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Paylaşım Almada Hata: %(share)s %(pool)s Havuzunda Proje: %(project)s Dönüş " "kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Hedef Almada Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Mantıksal Sürücü Almada Hata: %(lun)s Havuz: %(pool)s Proje: %(project)s " "Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Mantıksal sürücünün bir havuzdan diğerine göçünde hata. Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Maskeleme görünümünü değiştirmede hata : %(groupName)s. Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Göç oturumunu sonlandırmada hata. Dönüş kodu: %(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Başlatıcının Doğrulanmasında Hata: %(iqn)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Havuzu Doğrulamada Hata: %(pool)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s Havuzundaki %(project)s Projesinin Doğrulanmasında Hata Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Servis Doğrulamada Hata: %(service)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Hedefin Doğrulanmasında Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Paylaşımın Onaylanmasında Hata: %(share)s Proje: %(project)s ve Havuz: " "%(pool)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "%(volumeInstancePath)s. sunucu yoluna sahip %(volumeName)s mantıksal " "sürücüsünü eklemede hata." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Başlatıcı, gruba eklenemedi: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: " "%(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "" "Mantıksal sürücünün bileşik mantıksal sürücüye eklenmesinde hata. Hata: " "%(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "%(volumename)s mantıksal sürücüsünün hedef taban mantıksal sürücüye " "eklenmesinde hata." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Depolama grubuyla ilişkilendirmede hata : %(storageGroupName)s. Fast " "İlkesine: %(fastPolicyName)s %(errordesc)s hata tanımıyla." #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Çoğaltma bağını kırmada hata: Eşzamanlama İsmi: %(syncName)s Dönüş kodu: " "%(rc)lu. Hata: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Ceph kümesine bağlanırken hata." #, python-format msgid "Error connecting via ssh: %s" msgstr "Ssh yoluyla bağlanırken hata: %s" #, python-format msgid "Error creating volume: %s." msgstr "Mantıksal sürücü oluşturmada hata: %s." msgid "Error deleting replay profile." msgstr "Yeniden oynatma profilinin silinmesinde hata." #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "%(vol)s mantıksal sürücüsünün silinmesinde hata: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Değerlendirici ayrıştırma sırasında hata: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Paylaşım düzenlemede hata: %(share)s %(pool)s Havuzunda Dönüş kodu: " "%(ret.status)d İleti: %(ret.data)s ." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "NetworkPortal için iSER etkinleştirilirken hata: lütfen %(ip)s üzerinde " "iSCSI bağlantı noktanızın %(port)d RDMA tarafından desteklendiğine emin " "olun." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "Başarısız eklemenin temizlenmesi sırasında hata oluştu: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "CloudByte API [%(cmd)s] yürütülürken hata, Hata: %(err)s." msgid "Error executing EQL command" msgstr "EQL komutu yürütülürken hata" #, python-format msgid "Error executing command via ssh: %s" msgstr "Ssh ile komut çalıştırmada hata: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "%(vol)s mantıksal sürücüsünün büyütülmesinde hata: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Mantıksal sürücü genişletilirken hata: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "%(name)s bulunurken hata." #, python-format msgid "Error finding %s." msgstr "%s bulunurken hata." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "%(name)s isimden alan kimliği almada hata: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "%(name)s isminden alan kimliği almada hata: %(id)s." msgid "Error getting initiator groups." msgstr "Başlatıcı grupların alınmasında hata." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "%(pool)s isimden havuz kimliği almada hata: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Havuz kimliği %(pool_name)s isminden alınamadı: %(err_msg)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Sürüm almada hata: svc: %(svc)s.Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API yanıtında hata: veri=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "%(size)d GB boyutundaki %(space)s için space-create sırasında hata" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "%(size)d ek GB'ye sahip %(space)s mantıksal sürücüsü için space-extend " "sırasında hata" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "%(vol)s mantıksal sürücüsü eşleştirilirken hata. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Kopyanın eşzamanlanmasının değiştirilmesinde hata: %(sv)s işlem: " "%(operation)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Servisi değiştirmede hata: %(service)s Dönüş kodu: %(ret.status)d İleti: " "%(ret.data)s." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "%s cgsnapshot oluşturulurken hata oluştu." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "cgsnapshot %s silinirken hata oluştu." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "%s tutarlılık grubu güncellenirken hata oluştu." #, python-format msgid "Error parsing config file: %s" msgstr "Yapılandırma dosyasını ayrıştırmada hata: %s" msgid "Error promoting secondary volume to primary" msgstr "İkincil mantıksal sürücünün birincil hale getirilmesinde hata" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "%(vol)s mantıksal sürücüsü kaldırılırken hata. %(error)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" "%(vol)s mantıksal sürücüsünün havuz bağlantısını ayırmada hata. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Mantıksal sürücü klonu: %(clone)s klon boyutunu doğrulamada hata Boyut: " "%(size)d Anlık görüntü: %(snapshot)s" #, python-format msgid "Error while checking transaction status: %s" msgstr "Aktarım durumu kontrol edilirken hata: %s" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "Ssh ile veri almada hata: (komut=%(cmd)s hata=%(err)s)." #, python-format msgid "Error while requesting %(service)s API." msgstr "API %(service)s istenirken hata." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "Bölgeleme CLI'si çalıştırılırken hata: (komut=%(cmd)s hata=%(err)s)." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "%(volume_id)s mantıksal sürücüsü için zamanlama denemeleri azami " "%(max_attempts)d sınırı aşıldı" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Meta mantıksal sürücünün %(volumename)s hedef mantıksal sürücüye " "eklenmesinde istisna." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Öge kopyası oluşturma sırasında istisna. Kopya adı: %(cloneName)s Kaynak " "adı: %(sourceName)s Ek özellikler: %(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume'de istisna: %s." #, python-format msgid "Exception: %s" msgstr "İstisna: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Bir uuid bekleniyor ancak alınan %(uuid)s." #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "node_count için tam sayı beklendi, svcinfo lsiogrp şunu döndürdü: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "%(cmd)s CLI komutundan çıktı beklenmiyordu, %(out)s alındı." #, python-format msgid "Expected volume size was %d" msgstr "Beklenen mantıksal sürücü boyutu %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Yedek dışa aktarımı durduruldu, beklenen yedekleme durumu " "%(expected_status)s ancak alınan %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Kayıt dışa aktarımı durduruldu, şu anda yapılandırılan yedekleme servisi " "[%(configured_service)s], bu yedeğin [%(backup_service)s] oluşturulması için " "kullanılan yedekleme servisi değildir." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Bu sürücücü için mantıksal sürücü genişletme sadece anlık sistem görüntüsü " "olmadığında desteklenir." msgid "Extend volume not implemented" msgstr "Mantıksal sürücü genişletme uygulanmadı" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "FAST VP Etkinleştirici kurulu değil. Mantıksal sürücü için aşama ilkesi " "ayarlanamıyor" msgid "FAST is not supported on this array." msgstr "FAST bu dizi üzerinde desteklenmiyor." #, python-format msgid "Faield to unassign %(volume)s" msgstr "%(volume)s ataması kaldırılamadı" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "fabric=%(fabric)s için bağlantı ekleme başarısız: Hata: %(err)s" msgid "Failed cgsnapshot" msgstr "cgsnapshot başarısız oldu" #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "%(volname)s mantıksal sürücüsü için anlık görüntü oluşturma başarısız: " "%(response)s." #, python-format msgid "Failed getting details for pool %s." msgstr "%s havuzu için detayların getirilmesi başarısız." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "fabric=%(fabric)s için bağlantı kaldırma başarısız: Hata: %(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Mantıksal Sürücü %(volname)s Genişletilemedi" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "3PAR'a giriş başarısız (%(url)s) çünkü %(err)s" msgid "Failed to access active zoning configuration." msgstr "Etkin bölgeleme yapılandırmasına erişim başarısız." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Bölge kümesi durumuna erişim başarısız: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Kaynak kilidi alma başarısız. (seri: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "" "%(vol)s in %(sg)s e eklenmesi %(retries)s denemeden sonra başarısız oldu." msgid "Failed to add the logical device." msgstr "Mantıksal aygıt ekleme başarısız." msgid "Failed to add zoning configuration." msgstr "Bölgeleme yapılandırması eklenmesi başarısız." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "iSCSI başlatıcı IQN atanamadı. (bağlantı noktası: %(port)s, sebep: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "qos_specs ilişkilendirilemedi: %(type_id)s türü ile %(specs_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi eklenemedi." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Mantıksal sürücü metadata'sı yedeklenemedi - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Mantıksal sürücü metadata'sı yedeklenemedi - Metadata yedekleme nesnesi " "'backup.%s.meta' zaten var" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "%s anlık sistem görüntüsünden mantıksal sürücü kopyalanamadı." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "%(vendor_name)s dizisine bağlantı başarısız %(host)s: %(err)s" msgid "Failed to connect to array" msgstr "Diziye bağlanma başarısız" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "İmaj mantıksal sürücüye kopyalanamadı: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Metadata mantıksal sürücüye kopyalanamadı: %(reason)s" #, python-format msgid "Failed to create IG, %s" msgstr "IG oluşturma başarısız, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "SolidFire İmaj-Mantıksal Sürücü oluşturulamadı" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Mantıksal Sürücü Grubu oluşturulamadı: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Bir dosya oluşturma başarısız. (dosya: %(file)s, ret: %(ret)s, stderr: " "%(err)s)" msgid "Failed to create api volume flow." msgstr "Mantıksal sürücü api'si oluşturulamadı." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "%(reason)s sebebiyle cg anlık görüntüsü %(id)s oluşturulamadı." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "Tutarlılık grubu %(id)s %(reason)s sebebiyle oluşturulamadı." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Tutarlılık grubu %(id)s oluşturma başarısız:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "%s tutarlılık grubu oluşturulamıyor çünkü VNX tutarlılık grubu sıkıştırılmış " "LUN'ları üye olarak kabul edemiyor." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Tutarlılık grubu oluşturulamadı: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "Tutarlılık grubu oluşturulamıyor: %(cgid)s. Hata: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "%(consistencyGroupName)s tutarlılık grubunu oluşturma başarısız Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "" "%(storageSystemName)s üzerinde donanım kimlik(ler)i oluşturma başarısız." msgid "Failed to create iqn." msgstr "Iqn oluşturulamadı." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için iscsi hedefi oluşturulamadı." msgid "Failed to create manage_existing flow." msgstr "manage_existing akışı oluşturulamadı." msgid "Failed to create map on mcs, no channel can map." msgstr "Mcs üzerinde eşleştirme oluşturma başarısız, hiçbir kanal eşleşemez." msgid "Failed to create map." msgstr "Eşleştirme oluşturma başarısız." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Mantıksal sürücü için metadata oluşturulamadı: %(reason)s" msgid "Failed to create partition." msgstr "Bölüm oluşturma başarısız." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "qos_specs oluşturulamadı: %(qos_specs)s özellikleri ile %(name)s." msgid "Failed to create replica." msgstr "Kopya oluşturulamadı." msgid "Failed to create scheduler manager volume flow" msgstr "Zamanlayıcı yönetici mantıksal sürücü akışı oluşturma başarısız" #, python-format msgid "Failed to create snapshot %s" msgstr "%s anlık sistem görüntüsü oluşturulamadı" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "LUN ID belirtilmediğinden anlık görüntü oluşturma başarısız" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "cg: %(cgName)s için anlık görüntü oluşturma başarısız." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "%s mantıksal sürücüsü için anlık sistem görüntüsü oluşturulamadı." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "%(vol)s mantıksal sürücüsü üzerinde anlık görüntü ilkesi oluşturma " "başarısız: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "%(vol)s mantıksal sürücüsü üzerinde anlık görüntü kaynak alanı oluşturma " "başarısız: %(res)s." msgid "Failed to create snapshot." msgstr "Anlık görüntü oluşturma başarısız." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Anlık sistem görüntüsü oluşturulamadı. [%s] OpenStack mantıksal sürücüsü " "için CloudByte mantıksal sürücü bilgisi bulunamadı." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "İnce havuz oluşturma başarısız, hata iletisi: %s" #, python-format msgid "Failed to create volume %s" msgstr "%s mantıksal sürücüsü oluşturulamadı" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "volume_id: %(volume_id)s için SI silinemiyor çünkü bir çifti var." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "Bir mantıksal aygıt silinemedi. (LDEV: %(ldev)s, kaynak: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "%(reason)s sebebiyle cg anlık görüntüsü %(id)s silinemedi." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "Tutarlılık grubu %(id)s %(reason)s sebebiyle silinemedi." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Tutarlılık grubu silme başarısız: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "%(consistencyGroupName)s tutarlılık grubunu silme başarısız Dönüş kodu: " "%(rc)lu. Hata: %(error)s." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "%(cgname)s tutarlılık grubu için dosya kümesi silinemedi. Hata: %(excmsg)s." msgid "Failed to delete iqn." msgstr "Iqn silme başarısız." msgid "Failed to delete map." msgstr "Eşleştirme silme başarısız." msgid "Failed to delete partition." msgstr "Bölüm silme başarısız." msgid "Failed to delete replica." msgstr "Kopya silinemedi." #, python-format msgid "Failed to delete snapshot %s" msgstr "%s anlık sistem görüntüsü silinemedi" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "cg: %(cgId)s için anlık görüntü silme başarısız." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "snapshot_id: %s için anlık görüntü silinemiyor çünkü çifti var." msgid "Failed to delete snapshot." msgstr "Anlık görüntü silme başarısız." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "%(volumeName)s mantıksal sürücüsü silinemedi." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "volume_id: %(volume_id)s için bir mantıksal sürücü silinemedi çünkü çifti " "var." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi ayrılamadı." msgid "Failed to determine blockbridge API configuration" msgstr "Blockbridge API yapılandırması belirlenemedi" msgid "Failed to disassociate qos specs." msgstr "Qos özellikleri ilişkisi kesilemedi." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "%(type_id)s türündeki qos_specs: %(specs_id)s ilişiğini kesme başarısız." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Anlık görüntü kaynak alanından emin olunamadı, id %s için mantıksal sürücü " "bulunamadı" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "CloudByte API [%(cmd)s] yürütülemedi. Http durumu: %(status)s, Hata: " "%(error)s." msgid "Failed to execute common command." msgstr "Yaygın komutun çalıştırılması başarısız." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Mantıksal sürücü dışa aktarılamadı: %(reason)s" msgid "Failed to find Storage Center" msgstr "Depolama Merkezi bulunamadı" msgid "Failed to find a vdisk copy in the expected pool." msgstr "Beklenen havuzda bir vdisk kopyası bulunamadı." msgid "Failed to find account for volume." msgstr "Mantıksal sürücü için kullanıcı bulunamadı." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "%(path)s yolu için dosya kümesi bulma başarısız, komut çıktısı: %(cmdout)s." #, python-format msgid "Failed to find host %s." msgstr "%s istemcisi bulunamadı." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "%s kaynak mantıksal sürücüsü için depolama havuzu bulma başarısız." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "[%s] hesabı için CloudByte hesap ayrıntıları alınamadı." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "LUN %s için LUN hedef detayları alma başarısız" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "LUN %s için LUN hedef detaylarını alma başarısız." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "LUN %s için LUN hedef listesi alınamadı" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için Bölüm ID'si alınamadı." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "%(snapshot_id)s anlık görüntüsünden Raid Anlık Görüntü Kimliği alınamıyor." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "%(snapshot_id)s anlık görüntüsünden Raid Anlık Görüntü Kimliği alınamadı." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Bir depolama kaynağı alınamadı. Sistem depolama kaynağını tekrar almaya " "çalışacak. (kaynak: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "%s qos özelliklerinin bütün ilişkileri alınamadı" msgid "Failed to get channel info." msgstr "Kanal bilgisi alınamadı." #, python-format msgid "Failed to get code level (%s)." msgstr "Kod seviyesi alınamadı (%s)." msgid "Failed to get device info." msgstr "Aygıt bilgisi alınamadı." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "Alan alınamadı çünkü CPG (%s) dizide mevcut değil." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "%(volume_id)s mantıksal sürücüsüne sahip %(channel_id)s kanalından ip " "alınamadı." msgid "Failed to get iqn info." msgstr "Iqn bilgisi alma başarısız." msgid "Failed to get license info." msgstr "Lisans bilgisi alma başarısız." msgid "Failed to get lv info." msgstr "lv bilgisi alınamadı." msgid "Failed to get map info." msgstr "Eşleştirme bilgisi alma başarısız." msgid "Failed to get model update from clone" msgstr "Kopyadan model güncellemesi alınamadı" msgid "Failed to get name server info." msgstr "İsim sunucusu bilgisi alınamadı." msgid "Failed to get network info." msgstr "Ağ bilgisi alma başarısız." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Yeni havuzda yeni bölüm kimliği alınamadı: %(pool_id)s." msgid "Failed to get partition info." msgstr "Bölüm bilgisi alınamadı." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsüne sahip havuz kimliği alınamadı." msgid "Failed to get replica info." msgstr "Kopya bilgisi alma başarısız." msgid "Failed to get show fcns database info." msgstr "Fcns veri tabanı bilgisi göstermeyi alma başarısız." #, python-format msgid "Failed to get size of volume %s" msgstr "%s mantıksal sürücü boyutu alınamadı" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "%s mantıksal sürücüsü için anlık sistem görüntüsü alınamadı." msgid "Failed to get snapshot info." msgstr "Anlık görüntü bilgisi alınamadı." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "LUN %s için hedef IQN alınması başarısız" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "LUN %s için hedef portal alınması başarısız" msgid "Failed to get targets" msgstr "Hedefler alınamadı" msgid "Failed to get wwn info." msgstr "wwn bilgisi alma başarısız." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "%(volumeName)s mantıksal sürücüsünün %(maskingViewName)s maskeleme " "görünümüne alınması, oluşturulması ya da eklenmesi başarısız. Alınan hata " "iletisi %(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Mantıksal sürücü art alanda çalışan uygulama tanımlanamadı." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "%(cgname)s paylaşımı için dosya kümesi bağlantısı başarısız. Hata: " "%(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "%s Dizisine giriş başarısız (geçersiz giriş?)." #, python-format msgid "Failed to login for user %s." msgstr "%s kullanıcısı giriş yapamadı." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Aşağıdaki neden nedeniyle Datera kümesi uç noktasına bir istek yapılamadı: %s" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Mevcut mantıksal sürücü %(name)s yönetilemedi, çünkü mantıksal sürücü boyutu " "alınamadı." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Mevcut mantıksal sürücü %(name)s yönetilemiyor, çünkü yeniden adlandırma " "işlemi başarısız oldu: Hata iletisi: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Varolan %(name)s mantıksal sürücüsü yönetilemedi çünkü bildirilen boyut " "%(size)s kayan noktalı sayı değildi." #, python-format msgid "Failed to manage volume %s." msgstr "%s mantıksal sürücüsü yönetilemedi." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Bir mantıksal aygıt eşleştirilemedi. (LDEV: %(ldev)s, LUN: %(lun)s, bağlantı " "noktası: %(port)s, id: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Mantıksal sürücüyü ilk defa göç ettirme başarısız." msgid "Failed to migrate volume for the second time." msgstr "Bir ikinci defa mantıksal sürücüyü göç ettirme başarısız." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "LUN eşleştirmesi taşınamadı. Dönüş kodu: %s" #, python-format msgid "Failed to move volume %s." msgstr "%s mantıksal sürücüsü taşınamadı." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "Bir dosya açılamadı. (dosya: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI çıktısı ayrıştırılamadı:\n" " komut: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" ":: biçeminde olması gereken " "'swift_catalog_info' yapılandırma seçeneği ayrıştırılamadı" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Sıfır-sayfa geri kazanımı başarısız. (LDEV: %(ldev)s, sebep: %(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" "%(volume)s mantıksal sürücüsü için dışa aktarım kaldırılamadı: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için iscsi hedefi kaldırılamadı." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "%(volumeName)s mantıksal sürücüsü varsayılan SG'den kaldırılamadı." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "%(volumeName)s varsayılan SG'den kaldırılamadı: %(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "FAST ilkesi %(fastPolicyName)s için öntanımlı depolama grubundan " "%(volumename)s kaldırılamadı." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "%(name)s adındaki mantıksal sürücü yeniden adlandırılamadı, hata iletisi: " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Etkin bölgeleme yapılandırması %s alınamadı" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Mevcut mantıksal sürücü %(name)s için QoS ayarlanamadı, Hata iletisi: " "%(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" "SCST hedefi için 'Gelen kullanıcı' özniteliğinin ayarlanması başarısız." msgid "Failed to set partition." msgstr "Bölüm ayarlama başarısız." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "%(cgname)s tutarlılık grubu için izinler ayarlanamadı. Hata: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Eşleştirmesi kaldırılacak %(volume_id)s mantıksal sürücüsü için bir " "mantıksal aygıt belirtme başarısız." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Silinecek mantıksal aygıt belirtme başarısız. (metod: %(method)s, id: %(id)s)" msgid "Failed to terminate migrate session." msgstr "Göç oturumunu sonlandırma başarısız." #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "%(cgname)s tutarlılık grubu için dosya kümesi bağı ayırma başarısız. Hata: " "%(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Bir mantıksal aygıtın eşleştirilmesi kaldırılamadı. (LDEV: %(ldev)s, sebep: " "%(reason)s)" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Mantıksal sürücü için metadata güncellenemedi: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Bölgeleme yapılandırması güncellemesi ya da silinmesi başarısız" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "qos_specs güncellenemedi: %(qos_specs)s özellikleri ile %(specs_id)s." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "%(model)s sürücü sağlanmış modele sahip modelin güncellenmesi başarısız" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "%(vol_id)s mantıksal sürücüsünün metadata'sının sağlanan %(src_type)s " "%(src_id)s metadata ile güncellenmesi başarısız" #, python-format msgid "Failure creating volume %s." msgstr "%s mantıksal sürücüsünün oluşturulması başarısız." #, python-format msgid "Failure getting LUN info for %s." msgstr "%s için LUN bilgisi alınması başarısız." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "update_volume_key_value_pair başarısız: %s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Yeni çoğaltılmış LUN'un %s e taşınması başarısız." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "LUN %s'in tmp'ye hazırlanması başarısız." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Ölümcül hata: Kullanıcı NetApp mantıksal sürücülerini sorgulamaya yetkili " "değil." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "Flexvisor %(reason)s sebebiyle %(id)s mantıksal sürücüsünü ekleyemedi." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Flexvisor %(ret)s sebebiyle %(vol)s mantıksal sürücüsünü %(group)s grubuna " "katamadı." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Flexvisor %(ret)s sebebiyle %(vol)s mantıksal sürücüsünü %(group)s grubundan " "çıkaramadı." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Flexvisor %(id)s mantıksal sürücüsnü %(reason)s sebebiyle kaldıramadı." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Fiber Kanal SAN Arama başarısız: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Fiber Kanal Bölge işlemi başarısız oldu: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Fiber Kanal bağlantısı kontrol hatası: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "%(file_path)s dosyası bulunamadı." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "%(path)s dosyası geçersiz %(bfile)s destek dosyasına sahip, iptal ediliyor." #, python-format msgid "File already exists at %s." msgstr "%s konumunda dosya zaten var." #, python-format msgid "File already exists at: %s" msgstr "Dosya konumda zaten mevcut: %s" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Flash Zula İlkesi WSAPI sürümü '%(fcache_version)s' gerektirir, " "'%(version)s' kurulu." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor mantıksal sürücü atama başarısız.:%(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor mantıksal sürücü atama başarısız:%(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor %(id)s mantıksal sürücü anlık görüntüsünü %(vgid)s grubu %(vgsid)s " "anlık görüntüsünde bulamadı." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" "Flexvisor mantıksal sürücü oluşturma başarısız.:%(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü silmede başarısız: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubuna ekleyemedi." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor durumu olay kimliğiyle sorgulayamadığından %(id)s mantıksal " "sürücüsünü atayamadı." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor %(id)s mantıksal sürücü atamasında başarısız: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor %(volume)s mantıksal sürücü %(iqn)s iqn ataması başarısız." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsünü çoğaltmada başarısız: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor mantıksal sürücüyü çoğaltmada başarısız (olay alma başarısız) " "%(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsü için anlık görüntü oluşturma başarısız: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsü için anlık görüntü oluşturmada başarısız " "(olay alma başarısız)." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Flexvisor %(vgid)s grubunda %(id)s mantıksal sürücüsü oluşturmada başarısız." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "" "Flexvisor %(volume)s mantıksal sürücüsünü oluşturmada başarısız: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor mantıksal sürücü oluşturmada başarısız (alma olayı) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor anlık görüntüden mantıksal sürücü oluşturmada başarısız %(id)s: " "%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor %(id)s anlık görüntüsünden mantıksal sürücü oluşturamadı:" "%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor anlık görüntüden mantıksal sürücü oluşturma başarısız (olay alma " "başarısız) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisr %(id)s anlık görüntüsünü silmede başarısız: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor %(id)s anlık görüntüsünü silmede başarısız (olay alma başarısız)." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü silmede başarısız: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmede başarısız: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmede başarısız:%(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor mantıksal sürücüsünü büyütme başarısız (olay almada başarısız) " "%(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor havuz bilgisi %(id)s alamadı: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor %(id)s mantıksal sürücüsünün anlık görüntü kimliğini %(vgid)s " "grubundan alamadı." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubundan kaldıramadı." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor anlık görüntüden mantıksal sürücü başlatamadı %(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor anlık görüntüden mantıksal sürücü başlatamadı (olay almada " "başarısız) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor %(id)s mantıksal sürücü atamasını kaldıramadı: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor %(id)s mantıksal sürücü atamasını kaldıramadı (alma olayı)." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" "Flexvisor mantıksal sürücü atamasını kaldırma başarısız:%(id)s:%(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor %(id)s kaynak mantıksal sürücü bilgisini bulamadı." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor mantıksal sürücü atamasını kaldırma başarısız:%(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "" "Flexvisor mantıksal sürücüsü %(id)s %(vgid)s grubuna katılmada başarısız." #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS çalışmıyor, durum: %s." msgid "Gateway VIP is not set" msgstr "Geçit VIP ayarlanmamış" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Glance metadata güncellenemez, mantıksal sürücü kimliği %(volume_id)s için " "%(key)s anahtarı mevcuttur" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "Mantıksal sürücü/anlık sistem görüntüsü %(id)s için glance metadata " "bulunamaz." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "%(config)s konumunda Gluster yapılandırma dosyası yok" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "DRBDmanage'den kötü yol bilgisi alındı! (%s)" msgid "HBSD error occurs." msgstr "HBSD hatası oluşur." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "Hash blok boyutu son yedeklemeden bu yana değişti. Yeni hash blok boyutu: " "%(new)s. Eski hash blok boyutu: %(old)s. Tam bir yedekleme yapın." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "%(tier_levels)s aşama oluşturulmadı." #, python-format msgid "Hint \"%s\" not supported." msgstr "\"%s\" ipucu desteklenmiyor." msgid "Host" msgstr "Host" #, python-format msgid "Host %(host)s could not be found." msgstr "%(host)s sunucusu bulunamadı." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "%(host)s istemcisi x509 sertifika içerikleriyle eşleşmiyor: CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "%s istemcisinin FC başlatıcısı yok" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "%s istemcisinin iSCSI başlatıcısı yok" #, python-format msgid "Host '%s' could not be found." msgstr "'%s' istemcisi bulunamadı." #, python-format msgid "Host group with name %s not found" msgstr "%s isimli istemci grubu bulunamadı" #, python-format msgid "Host group with ref %s not found" msgstr "%s başvurusuna sahip istemci grubu bulunamadı" msgid "Host not found" msgstr "İstemci bulunamadı" #, python-format msgid "Host type %s not supported." msgstr "İstemci türü %s desteklenmiyor." #, python-format msgid "Host with ports %(ports)s not found." msgstr "%(ports)s bağlantı noktalarına sahip istemci bulunamadı." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "I/O grubu %(iogrp)d geçerli değil; kullanılabilir I/O grupları %(avail)s." msgid "ID" msgstr "KİMLİK" msgid "IP address/hostname of Blockbridge API." msgstr "Blockbridge API IP adresi/bilgisayar adı." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Sıkıştırma True olarak ayarlanırsa, rsize da ayrıca ayarlanmalı (-1 e eşit " "değil)." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "flashsystem_connection_protocol için geçersiz değer'%(prot)s' belirtilmiş: " "geçerli değer(ler) %(enabled)s." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "storwize_svc_vol_grainsize için geçersiz değer belirtildi: 32, 64, 128 veya " "256 olarak ayarlayın." #, python-format msgid "Image %(image_id)s could not be found." msgstr "%(image_id)s imaj kaynak dosyası bulunamadı." #, python-format msgid "Image %(image_id)s is not active." msgstr "İmaj %(image_id)s etkin değil." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "%(image_id)s imajı kabul edilemez: %(reason)s" msgid "Image location not present." msgstr "İmaj konumu mevcut değil." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Rbd mantıksal sürücüsü silinirken ImageBusy hatası yükseldi. Bu çökmüş bir " "istemciden gelen bir bağlantı yüzünden olabilir, bu durumda, 30 saniye " "geçtikten sonra silmeyi tekrar denemek çözebilir." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Kayıt içe aktarımı başarısız oldu, içe aktarımı gerçekleştirecek yedekleme " "servisi bulunamıyor. %(service)s servisini iste" msgid "Incorrect request body format" msgstr "Geçersiz gövde biçimi isteği." msgid "Incorrect request body format." msgstr "Hatalı istek gövde biçimi." msgid "Incremental backups exist for this backup." msgstr "Bu yedek için artımlı yedeklemeler mevcut." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI istisnası: %(err)s Param: %(param)s (Dönüş Kodu: %(rc)s) " "(Çıktı: %(out)s)" msgid "Input volumes or snapshots are invalid." msgstr "Girdi mantıksal sürücüleri ve anlık sistem görüntüleri geçersizdir." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "%(uuid)s örneği bulunamadı." msgid "Insufficient privileges" msgstr "Yetersiz ayrıcalıklar" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "Ceph kümesi için bağlantılar arasındaki dahili değer (saniye cinsinde)" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Geçersiz 3PAR Alanı: %(err)s" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "Rbd işlem yedeklemesi için verilen Ceph argümanları geçersiz" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Geçersiz CgSnapshot: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "Geçersiz TutarlılıkGrubu: %(reason)s" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "Geçersiz TutarlılıkGrubu: Tutarlılık grubu durumu kullanılabilir olmalıdır " "ancak mevcut durum: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "Geçersiz TutarlılıkGrubu: Tutarlılık grubu oluşturmak için istemci yok" #, python-format msgid "Invalid IP address format: '%s'" msgstr "Geçersiz IP adres biçimi: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "%s mantıksal sürücüsü için QoS ilkesi alırken geçersiz QoS özellikleri " "algılandı" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Geçersiz VNX kimlik doğrulama türü: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Geçersiz Virtuozzo Depolama paylaşım belirtimi: %r. Şu şekilde olmalıdır: " "[MDS1[,MDS2],...:/][:PASSWORD]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "Geçersiz XtremIO sürümü %(cur)s, sürüm %(min)s veya yukarısı gerekli" msgid "Invalid argument" msgstr "Geçersiz değişken" #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Geçersiz değişken - whence=%s desteklenmez" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "%(volume_id)s mantıksal sürücüsü için geçersiz ekleme kipi '%(mode)s'." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Geçersiz kimlik doğrulama anahtarı: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Geçersiz yedekleme: %(reason)s" msgid "Invalid cgsnapshot" msgstr "Geçersiz cgsnapshot" msgid "Invalid chap user details found in CloudByte storage." msgstr "CloudByte depolamada geçersiz chap kullanıcı ayrıntıları bulundu." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "%(name)s mantıksal sürücüsünün geçersiz bağlantı ilklendirme yanıtı" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "%(name)s mantıksal sürücüsünün geçersiz bağlantı ilklendirme yanıtı: " "%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Geçersiz içerik türü %(content_type)s." msgid "Invalid credentials" msgstr "Geçersiz kimlik bilgileri" #, python-format msgid "Invalid directory: %s" msgstr "Geçersiz dizin: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Geçersiz disk bağdaştırıcı türü: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Geçersiz disk desteği: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Geçersiz disk türü: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Geçersiz disk türü: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Geçersiz istemci: %(reason)s" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Geçersiz %(image_href)s imaj kaynak dosyası." msgid "Invalid image identifier or unable to access requested image." msgstr "Geçersiz imaj tanımlayıcı ya da istenen imaja erişilemedi." msgid "Invalid imageRef provided." msgstr "Geçersiz imaj referansı verildi." msgid "Invalid input" msgstr "Geçersiz girdi" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Geçersiz girdi aldı: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Geçersiz is_public süzgeci [%s]" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Geçersiz metadata boyutu: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Geçersiz metadata: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Geçersiz bağlantı noktası tabanı: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Geçersiz paylaşım noktası tabanı: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Retype için geçersiz yeni snapCPG ismi. new_snap_cpg='%s'." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Geçersiz qos özellikleri: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "Geçersiz bir hedefe geçersiz mantıksal sürücü ekleme isteği" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Geçersiz bir kip ile geçersiz mantıksal sürücü ekleme isteği. Ekleme kipi " "'rw' ya da 'ro' olmalıdır" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Geçersiz koşul sonu %(expire)s." msgid "Invalid service catalog json." msgstr "Geçersiz servis katalogu json." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Geçersiz anlık sistem görüntüsü: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Geçersiz durum: %s" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "%s geçersiz depolama havuzu istendi. Retype başarısız." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Geçersiz depolama havuzu %s belirtildi." #, python-format msgid "Invalid update setting: '%s'" msgstr "Geçersiz güncelleme ayarı: '%s'" #, python-format msgid "Invalid value '%s' for force." msgstr "Zorlama için geçersiz değer '%s'." #, python-format msgid "Invalid value '%s' for force. " msgstr "Zorlama için geçersiz değer '%s'. " #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "'bootable' için geçersiz değer: '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "'force' için geçersiz değer: '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "'readonly' için geçersiz değer: '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "'scheduler_max_attempts' için geçersiz değer, değer >=1 olmalıdır" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp yapılandırma seçeneği netapp_host_type için geçersiz değer." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp yapılandırma seçeneği netapp_lun_ostype için geçersiz değer." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Yaş için geçersiz değer, %(age)s" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "İstek oluşturmak için sağlanan geçersiz mantıksal sürücü boyutu: %s (boyut " "değişkeni bir tam sayı (ya da bir tam sayının karakter dizisi gösterimi) ve " "sıfırdan büyük olmalıdır)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Geçersiz mantıksal sürücü türü: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Geçersiz mantıksal sürücü: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Geçersiz mantıksal sürücü: %(volume_id)s mantıksal sürücüsü geçersiz " "durumda: %(status)s olduğundan dolayı %(group_id)s tutarlılık grubuna " "eklenemiyor. Geçerli durumlar: ('available', 'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Geçersiz mantıksal sürücü: %(volume_id)s mantıksal sürücüsü, %(volume_type)s " "mantıksal sürücü türü grup tarafından desteklenmediğinden dolayı " "%(group_id)s tutarlılık grubuna eklenemiyor." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Geçersiz mantıksal sürücü: fake-volume-uuid mantıksal sürücüsü %(group_id)s " "tutarlılık grubuna eklenemiyor çünkü mantıksal sürücü bulunamıyor." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Geçersiz mantıksal sürücü: fake-volume-uuid mantıksal sürücüsü grupta " "olmadığından dolayı %(group_id)s tutarlılık grubundan kaldırılamıyor." #, python-format msgid "Invalid volume_type passed: %s." msgstr "Geçersiz volume_type değeri geçildi: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Sağlanan volume_type geçersiz: %s (istenen tür uyumlu değil; ya kaynak " "mantıksal sürüyü eşleştirin ya da tür değişkenini ihmal edin)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "Sağlanan volume_type geçersiz: %s (istenen tür uyumlu değil; tür " "değişkeninin ihmal edilmesi önerilir)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "Geçersiz volume_type verildi: %s (istenen tür bu tutarlılık grubu tarafından " "desteklenmelidir)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Geçersiz wwpns biçimi %(wwpns)s" msgid "Issue encountered waiting for job." msgstr "İş için beklenirken durumla karşılaşıldı." msgid "Issue encountered waiting for synchronization." msgstr "Eşzamanlama için beklenirken durumla karşılaşıldı." msgid "Item not found" msgstr "Öğe bulunamadı" msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Anahtar adları sadece alfanumerik karakter, altçizgi, virgül, iki nokta üst " "üste ve tire içerebilir." #, python-format msgid "KeyError: %s" msgstr "AnahtarHatası: %s" msgid "LUN export failed!" msgstr "LUN dışa aktarma başarısız!" msgid "LUN map overflow on every channel." msgstr "Her kanalda LUN eşleştirme taşması." #, python-format msgid "LUN not found with given ref %s." msgstr "Verilen %s referansına sahip LUN bulunamadı." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN sayısı %(ch_id)s kanal kimliğinde sınırların dışında." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "Verilen %(ref)s başvurusuna sahip LUN mantıksal sürücü türünü karşılamıyor. " "%(vs)s vserver'inde ssc özellikleri olan LUN mantıksal sürücü olduğuna emin " "olun." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Son %s cinder syslog girdileri:-" msgid "LeftHand cluster not found" msgstr "LeftHand kümesi bulunamadı" #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Satır %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Bağlantı yolu zaten mevcut ve sembolik bağlantı değil" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "" "Kaynak mantıksal sürücüsünün bağlantılı klonu bu durumda desteklenmiyor: %s." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Arama servisi yapılandırılmamış. fc_san_lookup_service için yapılandırma " "seçeneğinin arama servisinin somut bir uygulamasını belirtmesi gerekir." #, python-format msgid "Malformed fcns output string: %s" msgstr "Bozuk fcns çıktı karakter dizisi: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Bozuk isim sunucusu karakter dizisi: %s" msgid "Malformed request body" msgstr "Kusurlu istek gövdesi" msgid "Malformed request url" msgstr "Bozuk istel adresi" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "%(cmd)s komutu için bozuk yanıt: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Bozuk scheduler_hints özelliği" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Bozuk fcns veri tabanı gösterme karakter dizisi: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Bozuk bölge yapılandırması: (anahtar=%(switch)s zone_config=%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "Bozuk bölge durumu: (anahtar=%(switch)s zone_config=%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Mevcut alma boyutunu yönetme 'id' gerektirir." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Geçersiz art alanda çalışan uygulama kaynağı %(existing_ref)s nedeniyle " "varolan mantıksal sürücü yönetimi başarısız oldu: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Mantıksal sürücü türü uyumsuzluğu nedeniyle varolan mantıksal sürücü " "yönetimi başarısız oldu: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Mevcut mantıksal sürücünün yönetimi henüz uygulanmadı." msgid "Manage existing volume requires 'source-id'." msgstr "Mevcut mantıksal sürücü yönetimi 'source-id' gerektirir." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST etkinse mantıksal sürücü yönetme desteklenmez. FAST ilkesi: " "%(fastPolicyName)s." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "%(id)s eşleştirmesine hazırlık ayrılan %(to)d saniye zaman aşımını içinde " "başarılamadı. Çıkılıyor." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "Maskeleme görünümü %(maskingViewName)s başarıyla silinemedi" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "İzin verilen yedeklemelerin azami sayısı (%(allowed)d) aşıldı" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" "İzin verilen anlık sistem görüntülerinin azami sayısı (%(allowed)d) aşıldı" #, python-format msgid "May specify only one of %s" msgstr "%s'nin sadece biri belirtilebilir" msgid "Metadata backup already exists for this volume" msgstr "Bu mantıksal sürücü için metadata yedeği zaten var" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "Metadata yedek nesnesi '%s' zaten var" msgid "Metadata item was not found" msgstr "İçerik özelliği bilgisi bulunamadı" #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Metadata özellik anahtarı %s 255 karakterden büyük" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Metadata özellik anahtarı %s değeri 255 karakterden büyük" msgid "Metadata property key blank" msgstr "Metadata özellik anahtarı boş" msgid "Metadata property key blank." msgstr "Metadata özellik anahtarı boş." msgid "Metadata property key greater than 255 characters." msgstr "Metadata özellik anahtarı 255 karakterden büyük." msgid "Metadata property value greater than 255 characters." msgstr "255 karakterden daha fazla metadata özellik değeri." msgid "Metadata restore failed due to incompatible version" msgstr "Uyumsuz sürüm nedeniyle metadata geri yüklemesi başarısız oldu" msgid "Metadata restore failed due to incompatible version." msgstr "Metadata geri yüklemesi uyumsuz sürüm nedeniyle başarısız oldu." #, python-format msgid "Migrate volume %(src)s failed." msgstr "%(src)s mantıksal sürücü göçü başarısız." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "%(src)s kaynak mantıksal sürücüsü ile %(dst)s hedef mantıksal sürücü " "arasında göç başarısız oldu." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "LUN %s göçü durduruldu ya da arızalandı." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Eksik 'purestorage' python modülü, kütüphanenin kurulu ve kullanılabilir " "olduğuna emin olun." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "Fiber Kanal SAN yapılandırma parametresi eksik - fc_fabric_names" msgid "Missing request body" msgstr "Eksik istek gövdesi" msgid "Missing request body." msgstr "Eksik istek gövdesi." #, python-format msgid "Missing required element '%s' in request body" msgstr "İstek gövdesinde gerekli öge '%s' eksik" #, python-format msgid "Missing required element '%s' in request body." msgstr "İstek gövdesinde gerekli öge '%s' eksik." msgid "Missing required element 'consistencygroup' in request body." msgstr "İstek gövdesinde gerekli öge 'consistencygroup' eksik." msgid "Missing required element 'host' in request body." msgstr "İstek gövdesinde gerekli 'host' ögesi eksik." msgid "Missing required element quota_class_set in request body." msgstr "İstek gövdesinde gerekli quota_class_set ögesi eksik." #, python-format msgid "Multiple copies of volume %s found." msgstr "%s mantıksal sürücüsünün birden fazla kopyası bulundu." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "'%s' için birden fazla eşleşme bulundu, daha belirli olacak bir ID kullanın." msgid "Multiple profiles found." msgstr "Birden fazla profil bulundu." msgid "Must implement a fallback schedule" msgstr "Bir geri dönüş programı uygulanmalı" msgid "Must implement find_retype_host" msgstr "find_retype_host uygulanmalıdır" msgid "Must implement host_passes_filters" msgstr "host_passes_filters uygulanmalıdır" msgid "Must implement schedule_create_consistencygroup" msgstr "schedule_create_consistencygroup uygulanmalıdır" msgid "Must implement schedule_create_volume" msgstr "schedule_create_volume uygulanmalıdır" msgid "Must implement schedule_get_pools" msgstr "schedule_get_pools uygulanmalıdır" msgid "Must pass wwpn or host to lsfabric." msgstr "lsfabric'e wwpn veya istemci geçirilmeli." msgid "Must specify 'connector'" msgstr "'connector' belirtilmelidir" msgid "Must specify 'new_volume'" msgstr "'new_volume' belirtilmelidir" msgid "Must specify 'status'" msgstr "'status' belirtilmelidir" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Güncelleme için 'status', 'attach_status' ya da 'migration_status' " "belirtilmelidir." msgid "Must specify a valid attach status" msgstr "Geçerli bir ekleme durumu belirtmelisiniz" msgid "Must specify a valid migration status" msgstr "Geçerli bir göç durumu belirtilmelidir" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "Geçerli bir kişi belirtilmeli %(valid)s, değer '%(persona)s' geçersiz." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Geçerli bir hazırlık türü belirtilmeli %(valid)s, değer '%(prov)s' geçersiz." msgid "Must specify a valid status" msgstr "Geçerli bir durum belirtilmelidir" msgid "Must specify an ExtensionManager class" msgstr "UzantıYöneticisi sınıfı belirlenmek zorunda" msgid "Must specify bootable in request." msgstr "İstekte önyüklenebilir belirtilmelidir." msgid "Must specify protection domain name or protection domain id." msgstr "Koruma alan ismi veya koruma alan id'si belirtmeli." msgid "Must specify readonly in request." msgstr "İstekte salt okunur belirtilmelidir." msgid "Must specify storage pool name or id." msgstr "Depolama havuzu ismi veya id'si belirtmeli." msgid "Must supply a positive value for age" msgstr "Devir için pozitif bir değer verilmelidir" msgid "Must supply a positive, non-zero value for age" msgstr "Yaş için pozitif, sıfırdan farklı bir değer sağlanmalı" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS yapılandırması '%(name)s=%(value)s' geçersiz. 'auto', 'true' ya da " "'false' olmalıdır." #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "%(config)s konumunda NFS yapılandırma dosyası yok" #, python-format msgid "NFS file %s not discovered." msgstr "NFS dosyası %s keşfedilmemiş." msgid "NFS file could not be discovered." msgstr "NFS dosyası keşfedilemedi." msgid "Name" msgstr "Ad" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "İstek gövdesinde ad, tanımlama, add_volumes ve remove_volumes seçeneklerinin " "tümü boş olamaz." msgid "Need non-zero volume size" msgstr "Sıfır olmayan mantıksal sürücü boyutu gerekir" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder Sürücü istisnası." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "Genişletmek için yeni boyut mevcut boyuttan daha fazla olmalıdır. (mevcut: " "%(size)s, genişletilmiş: %(new_size)s)." msgid "New volume size must be specified as an integer." msgstr "Yeni mantıksal sürücü boyutu bir tam sayı olarak belirtilmelidir." msgid "New volume type must be specified." msgstr "Yeni mantıksal sürücü türü belirtilmelidir." msgid "New volume type not specified in request_spec." msgstr "request_spec içinde yeni mantıksal sürücü türü belirtilmemiş." #, python-format msgid "New volume_type same as original: %s." msgstr "Özgün hale benzeyen yeni volume_type: %s." msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder Sürücü hatası" msgid "No FCP targets found" msgstr "FCP hedefi bulunamadı" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Sağlanan iSCSI IP'lerine sahip etkin iSCSI portalı yok" #, python-format msgid "No available service named %s" msgstr "%s adında kullanılabilir servis yok" #, python-format msgid "No backup with id %s" msgstr "%s kimlikli yedekleme yok" msgid "No backups available to do an incremental backup." msgstr "Artımlı yedekleme için kullanılabilir hiçbir yedek yok." msgid "No big enough free disk" msgstr "Yeterince büyük boş disk yok" #, python-format msgid "No cgsnapshot with id %s" msgstr "%s kimlikli hiçbir cgsnapshot yok" msgid "No cinder entries in syslog!" msgstr "syslog içinde hiçbir cinder girdisi yok!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Dosyalayıcıda %s isimli çoğaltılmış LUN bulunamadı" msgid "No config node found." msgstr "Yapılandırma düğümü bulunamadı." #, python-format msgid "No consistency group with id %s" msgstr "%s kimlikli hiçbir tutarlılık grubu yok" msgid "No errors in logfiles!" msgstr "logfiles dosyasında hiçbir hata yok!" #, python-format msgid "No file found with %s as backing file." msgstr "%s için destek dosyası olacak bir dosya bulunamadı." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Boş LUN ID'si kalmadı. İstemciye (%s) eklenebilecek azami mantıksal sürücü " "sayısı aşıldı." msgid "No free disk" msgstr "Boş disk yok" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "%s için sağlanan listede iyi iscsi portalı bulunamadı." #, python-format msgid "No good iscsi portals found for %s." msgstr "%s için iyi iscsi portalı bulunamadı." #, python-format msgid "No host to create consistency group %s." msgstr "%s tutarlılık grubu oluşturulacak istemci yok." msgid "No image_name was specified in request." msgstr "İstekte hiçbir image_name belirtilmemiş." #, python-format msgid "No initiator group found for initiator %s" msgstr "%s başlatıcısı için hiçbir başlatıcı grup bulunamadı" msgid "No initiators found, cannot proceed" msgstr "Başlatıcı bulunamadı, devam edilemiyor" #, python-format msgid "No interface found on cluster for ip %s" msgstr "Kümede %s ip'si için arayüz bulunamadı" msgid "No ip address found." msgstr "Ip adresi bulunamadı." msgid "No iscsi auth groups were found in CloudByte." msgstr "CloudByte'da iscsi yetkilendirme grubu bulunamadı." msgid "No iscsi initiators were found in CloudByte." msgstr "CloudByte içinde hiçbir iscsi başlatıcı bulunamadı." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "" "[%s] CloudByte mantıksal sürücüsü için hiçbir iscsi servisi bulunamadı." msgid "No iscsi services found in CloudByte storage." msgstr "CloudByte depolamada hiçbir iscsi servisi bulunamadı." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "Bir anahtar dosya belirtilmemiş ve anahtar %(cert)s den yüklenemiyor %(e)s." msgid "No mounted Gluster shares found" msgstr "Bağlı Gluster paylaşımı bulunamadı" msgid "No mounted NFS shares found" msgstr "Bağlı NFS paylaşımı bulunamadı" msgid "No mounted SMBFS shares found." msgstr "Bağlı SMBFS paylaşımı bulunamadı." msgid "No mounted Virtuozzo Storage shares found" msgstr "Bağlı Virtuozzo Depolama paylaşımı bulunamadı" msgid "No mounted shares found" msgstr "Bağlı paylaşım bulunamadı" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "%(vol)s mantıksal sürücüsü için %(gid)s I/O grubunda düğüm bulunamadı." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "CloudByte depolama listesi iSCSI auth user API çağrısından yanıt alınamadı." msgid "No response was received from CloudByte storage list tsm API call." msgstr "CloudByte storage list tsm API çağrısından yanıt alınamadı." msgid "No response was received from CloudByte's list filesystem api call." msgstr "CloudByte'ın list filesystem api çağrısından bir yanıt alınmadı." #, python-format msgid "No snap found with %s as backing file." msgstr "%s destek dosyası olacak bir anlık görüntü bulunamadı." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "%s anlık görüntü grubundan anlık görüntü imajı bulunamadı." #, python-format msgid "No storage path found for export path %s" msgstr "Dışa aktarma yolu %s için depolama yolu bulunamadı" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Böyle bir %(specs_id)s QoS özelliği yok." msgid "No suitable discovery ip found" msgstr "Uygun ip keşfedilemedi" #, python-format msgid "No support to restore backup version %s" msgstr "%s yedekleme sürümünü geri yükleme desteklenmiyor" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "%(volume_id)s bölümü için hedef id bulunamadı." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "İstemci üzerinde kullanılabilir LUN ID'si yok; çoklu ekleme etkin ki bu tüm " "LUN ID'lerinin tüm istemci grupları arasında benzersiz olmasını gerektirir." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Geçerli bir sunucu bulunamadı: %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "%(type)s türü ile %(id)s mantıksal sürücüsü için geçerli istemci yok" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "%(vserver)s vserver'e ve %(junction)s kavşağına sahip kümede mantıksal " "sürücü yok " msgid "No volume was found at CloudByte storage." msgstr "CloudByte depolamada hiçbir mantıksal sürücü bulunamadı." msgid "No volume_type should be provided when creating test replica." msgstr "Test kopyası oluşturulurken volume_type verilmesi gerekmez." msgid "No volumes found in CloudByte storage." msgstr "CloudByte depolamada hiçbir mantıksal sürücüsü bulunamadı." msgid "No weighed hosts available" msgstr "Kullanılabilir ağırlıklı istemci yok" #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "Mantıksal sürücü için uygun bir veri deposu bulunamadı: %s." msgid "Not an rbd snapshot" msgstr "Bir rbd anlık sistem görüntüsü değildir" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "%(image_id)s imajı için yetkilendirilemedi." msgid "Not authorized." msgstr "Yetkiniz yok." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Art alanda çalışan uygulamada (%(backend)s) yeterli alan yok" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "ZFS paylaşımında bu işlemi yapacak kadar depolama alanı yok." msgid "Not stored in rbd" msgstr "rbd içinde depolanmıyor" msgid "Nova returned \"error\" status while creating snapshot." msgstr " Nova anlık sistem görüntüsü oluşturulurken \"error\" durumu döndürdü." msgid "Null response received from CloudByte's list filesystem." msgstr "CloudByte listesinde dosya sisteminden boş yanıt alındı." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "CloudByte'ın iscsi yetkilendirme grubu listesinden yanıt dönmedi." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "CloudByte listesinde iscsi başlatıcılarından boş yanıt alındı." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "CloudByte listesinde mantıksal sürücü iscsi servisinden boş yanıt alındı." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "CloudByte depolamasında [%s] mantıksal sürücüsü oluşturulurken boş yanıt " "alındı." msgid "Number of retries if connection to ceph cluster failed." msgstr "Ceph kümesine bağlantı başarısız olursa tekrar deneme sayısı." msgid "Object Count" msgstr "Nesne Sayısı" msgid "Object is not a NetApp LUN." msgstr "Nesne bir NetApp LUN değil." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Bir Büyütme İşleminde, mantıksal sürücünün bileşik mantıksal sürücüye " "eklenmesinde hata: %(volumename)s." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Yalnızca %(value)s %(verb)s istek(ler)i %(uri)s ye her %(unit_string)s " "yapılabilir." msgid "Only one limit can be set in a QoS spec." msgstr "QoS özelliğinde yalnızca bir sınır ayarlanabilir." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "İşlem şu durum ile başarısız oldu=%(status)s. Tam dökümü: %(data)s" msgid "Option gpfs_images_dir is not set correctly." msgstr "gpfs_images_dir seçeneği doğru ayarlanmamış." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "gpfs_images_share_mode seçeneği doğru ayarlanmamış." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base seçeneği doğru ayarlanmamış." msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "Blockbridge API sunucusuna bağlanmak için HTTPS bağlantı noktasının üzerine " "yaz." #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "Yetkilendirme için parola veya SSH özel anahtarı gerekli: ya san_password ya " "da san_private_key seçeneğini ayarlayın." msgid "Path to REST server's certificate must be specified." msgstr "REST sunucusunun sertifikasına olan yol belirtilmeli." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Lütfen %(pool_list)s havuzunu önceden oluşturun!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Lütfen önceden %(pool)s havuzunda %(tier_levels)s aşamasını oluşturun!" msgid "Please re-run cinder-manage as root." msgstr "Lütfen cinder-manage'i root olarak yeniden çalıştırın." msgid "Please specify a name for QoS specs." msgstr "Lütfen QoS özellikleri için bir ad belirtin." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "%(action)s uygulanmasına izin verilmiyor." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "%(poolNameInStr)s havuzu bulunamadı." msgid "Pool is not available in the volume host field." msgstr "Havzu mantıksal sürücü istemci alanında kullanılabilir değil." msgid "Pool is not available in the volume host fields." msgstr "Havuz mantıksal sürücü istemci alanlarında kullanılabilir değil." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "%(pool)s ismine sahip havuz %(domain)s alanında bulunamadı." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "%(pool_name)s ismine sahip havuz %(domain_id)s alanında bulunamadı." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Havuz: %(poolName)s. %(fastPolicy)s. fast ilkesi için depolama aşamasıyla " "ilişkilendirilmemiş." #, python-format msgid "Pools %s does not exist" msgstr "Havuz %s mevcut değil" msgid "Pools name is not set." msgstr "Havuz ismi ayarlanmamış." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Birincil kopyalama durumu: %(status)s ve eşzamanlanan: %(sync)s." msgid "Project ID" msgstr "Proje ID" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "%(storage_protocol)s iletişim kuralı %(storage_family)s depolama ailesi için " "desteklenmiyor." #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "%(current)s durumundaki anlık sistem görüntüsünde verilen anlık sistem " "görüntüsü durumuna %(provided)s izin verilmez." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder sürücü hatası: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS Özellikleri %(specs_id)s zaten var." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "%(specs_id)s QoS Özellikleri hala varlıklar ile ilişkilidir." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "%(specs_id)s QoS özelliği, %(specs_key)s anahtarı ile hiçbir özelliğe sahip " "değil." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "QoS özellikleri bu depolama ailesi ve ONTAP sürümünde desteklenmiyor." msgid "Qos specs still in use." msgstr "Qos özellikleri hala kullanımda." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "Servis parametreleri ile sorgu önerilmiyor. Lütfen bunun yerine ikili değer " "parametrelerini kullanın." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Kota sınıfı %(class_name)s bulunamadı." msgid "Quota could not be found" msgstr "Kota bulunamadı." #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Kota kaynaklar için aşıldı: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Kota aşıldı: kod=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "%(project_id)s projesi için bir kota bulunamadı." #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Kota koşulu %(uuid)s bulunamadı." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "%(project_id)s projesi için kota kullanımı bulunamadı." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD diff işlemi başarısız oldu - (ret=%(ret)s stderr=%(stderr)s)" msgid "REST server IP must by specified." msgstr "REST sunucu IP'si belirtilmelidir." msgid "REST server password must by specified." msgstr "REST sunucu parolası belirtilmelidir." msgid "REST server username must by specified." msgstr "REST sunucu kullanıcı adı belirtilmelidir." msgid "Raid did not have MCS Channel." msgstr "Raid MCS Kanalına sahip değil." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "max_luns_per_storage_group yapılandırma seçeneği ile ayarlanan " "sınırlandırmaya eriş. %(vol)s in %(sg)s Depolama Grubuna eklenmesi " "reddedildi." #, python-format msgid "Received error string: %s" msgstr "Alınan hata: %s" msgid "Reference must be for an unmanaged virtual volume." msgstr "Başvuru yönetilmeyen bir sanal mantıksal sürücü için olmalı." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" "Referans yönetilmeyen bir sanal mantıksal sürücünün mantıksal sürücü ismi " "olmalı." msgid "Reference must contain either source-id or source-name element." msgstr "Başvuru kaynak-id veya kaynak-isim öğelerini içermeli." msgid "Reference must contain either source-name or source-id element." msgstr "Başvuru ya kaynak-ismi ya da kaynak-kimliği öğelerini içermeli." msgid "Reference must contain source-id or source-name key." msgstr "Başvuru source-id veya source-name anahtarını içermeli." msgid "Reference must contain source-id or source-name." msgstr "Başvuru kaynak-id veya kaynak-ismi içermeli." msgid "Reference must contain source-name element." msgstr "Kaynak kaynak-ad ögesi içermelidir." msgid "Reference must contain source-name or source-id." msgstr "Başvuru kaynak-ismi veya kaynak-kimliği içermeli." msgid "Reference must contain source-name." msgstr "Referans kaynak ismi içermeli." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Şu mantıksal sürücü göçü reddediliyor: %(id)s. Lütfen yapılandırma " "ayarlarınızı kontrol edin çünkü kaynak ve hedef aynı Mantıksal Sürücü " "Grubundalar: %(name)s." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "Çoğaltma Servisi Yeteneği %(storageSystemName)s üzerinde bulunamadı." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Çoğaltma Servisi %(storageSystemName)s üzerinde bulunamadı." msgid "Replication is not enabled" msgstr "Kopyalama etkin değildir" msgid "Replication is not enabled for volume" msgstr "Mantıksal sürücü için kopyalama etkin değildir" #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "Mantıksal sürücü için kopyalama durumu etkin ya da etkin-durdu olmalıdır " "ancak mevcut durum: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "Mantıksal sürücü için kopyalama durumu pasif, etkin-durdu ya da hata " "olmalıdır ancak mevcut durum: %s" msgid "Request body and URI mismatch" msgstr "URI ve gövde isteği uyumsuz" msgid "Request body contains too many items" msgstr "İstek gövdesi çok sayıda öğe içeriyor" msgid "Request body contains too many items." msgstr "İstek gövdesi çok fazla öge içerir." msgid "Request body empty" msgstr "İstek gövdesi boş" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Datera kümesine yapılan istek kötü durum döndürdü: %(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "İstenen yedekleme izin verilen yedekleme kotasını aşıyor. İstenen " "%(requested)sG, kota %(quota)sG ve tüketilen %(consumed)sG." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "İstenen mantıksal sürücü ya da anlık sistem görüntüsü izin verilen %(name)s " "kotayı aşıyor. İstenen %(requested)sG, kota %(quota)sG ve tüketilen " "%(consumed)sG." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "İstenen mantıksal sürücü boyutu %(size)d izin verilen azami sınırdan " "%(limit)d daha büyük." msgid "Required configuration not found" msgstr "Gerekli yapılandırma bulunamadı" #, python-format msgid "Required flag %s is not set" msgstr "İstenen %s bayrağı ayarlı değil" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Yedekleme durumu sıfırlama durduruldu, şu anda yapılandırılan yedekleme " "servisi [%(configured_service)s], bu yedeği [%(backup_service)s] oluşturmak " "için kullanılan yedekleme servisi değildir." #, python-format msgid "Resizing clone %s failed." msgstr "%s klonunun yeniden boyutlandırılması başarısız." msgid "Resizing image file failed." msgstr "İmaj dosyası yeniden boyutlandırma başarısız oldu." msgid "Resource could not be found." msgstr "Kaynak bulunamadı." msgid "Resource not ready." msgstr "Kaynak hazır değil." #, python-format msgid "Response error - %s." msgstr "Yanıt hatası - %s." #, python-format msgid "Response error code - %s." msgstr "Yanıt hata kodu - %s." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Yedek geri yükleme durduruldu, beklenen mantıksal sürücü durumu " "%(expected_status)s fakat mevcut durum %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Yedek geri yükleme durduruldu, şu anda yapılandırılan yedekleme servisi " "[%(configured_service)s] bu yedeğin [%(backup_service)s] oluşturulması için " "kullanılan yedekleme servisi değildir." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Yedek geri yükleme durduruldu: beklenen yedekleme durumu %(expected_status)s " "ancak alınan %(actual_status)s." #, python-format msgid "Retry count exceeded for command: %s" msgstr "Komut için yeniden deneme sayısı aşıldı: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Yinelenebilir SolidFire İstisnası oluştu" msgid "Retype cannot change encryption requirements." msgstr "Retype şifreleme gereksinimlerini değiştiremiyor." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Retype, kullanımdaki mantıksal sürücünün ön-uç qos özelliklerini " "değiştiremiyor: %s." msgid "Retype requires migration but is not allowed." msgstr "Retype göçe ihtiyaç duyuyor ama izin verilmiyor." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Mantıksal sürücü: %(volumeName)s için geri dönüş başarısız. Lütfen mantıksal " "sürücünüzü %(fastPolicyName)s fast ilkesi için varsayılan depolama grubuna " "elle döndürmesi için sistem yöneticinizle iletişime geçin." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "%(volumeName)s silinerek geri alınıyor." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "SMBFS yapılandırması 'smbfs_oversub_ratio' geçersiz. Değer > 0 olmalıdır: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "SMBFS yapılandırması 'smbfs_used_ratio' geçersiz. Değer > 0 ve <= 1.0 " "olmalıdır: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s konumunda SMBFS yapılandırma dosyası yok." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS yapılandırma dosyası ayarlı değil (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "SSH komutu '%(total_attempts)r' girişimden sonra başarısız oldu: " "'%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH komut ekleme algılandı: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "%(fabric)s için SSH bağlantısı başarısız, hata: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "SSL Sertifikasının süresi %s de doldu." #, python-format msgid "SSL error: %(arg)s." msgstr "SSL hatası: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "%(filter_name)s zamanlayıcı sunucu filtresi bulunamadı." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Zamanlayıcı İstemci Tartıcı %(weigher_name)s bulunamadı." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "İkincil kopyalama durumu: %(status)s ve eşzamanlanan: %(sync)s, eşzamanlama " "ilerlemesi: %(progress)s%%." #, python-format msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s servisi bulunamadı." msgid "Service is unavailable at this time." msgstr "Şu anda servis kullanılamıyor." msgid "Service not found." msgstr "Servis bulunamadı." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "LUN QoS ilke grubu ayarlama bu depolama ailesi ve ONTAP sürümünde " "desteklenmiyor." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Dosya qos ilke grubu ayarlama bu depolama ailesi ve ontap sürümünde " "desteklenmiyor." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Paylaşım %s geçersiz biçim sebebiyle atlandı. adres:/export biçiminde " "olmalı. Lütfen nas_ip ve nas_share_path ayarlarını kontrol edin." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "%(dir)s dizinindeki paylaşım Cinder mantıksal sürücü servisi tarafından " "yazılabilir değildir. Anlık sistem görüntüsü işlemleri desteklenmiyor." msgid "Size" msgstr "Boyut" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "Mantıksal sürücü boyutu: %s bulunamadı, güvenli silinemiyor." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Boyut %(image_size)dGB ve %(volume_size)dGB boyutundaki mantıksal sürücü ile " "uymuyor." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "Belirtilen imajın boyutu %(image_size)sGB %(volume_size)sGB mantıksal sürücü " "boyutundan büyük." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(snapshot_id)s anlık sistem görüntüsü %(metadata_key)s anahtarı ile hiçbir " "metadata'ya sahip değil." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "Anlık sistem görüntüsü oluşturulamıyor çünkü %(vol_id)s mantıksal sürücüsü " "kullanılabilir değil, mevcut mantıksal sürücü durumu: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "Mantıksal sürücü taşınırken anlık sistem görüntüsü oluşturulamaz." msgid "Snapshot of secondary replica is not allowed." msgstr "İkincil kopyanın anlık sistem görüntüsüne izin verilmez." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Mantıksal sürücünün anlık görüntüsü %s durumunda desteklenmiyor." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Hiçbir yere yerleştirilmemiş anlık görüntü res \"%s\"?" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "Anlık sistem görüntüsü durumuna %(cur)s update_snapshot_status için izin " "verilmez" msgid "Snapshot status must be \"available\" to clone." msgstr "" "Kopyalamak için anlık sistem görüntüsü durumu \"kullanılabilir\" olmalıdır." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Anlık sistem görüntüsü='%(snap)s' temel imaj='%(base)s' içinde mevcut değil " "- artırımlı yedekleme durduruluyor" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "" "Anlık sistem görüntüleri bu mantıksal sürücü biçimi için desteklenmiyor: %s" msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder Sürücü istisnası" msgid "Sort direction array size exceeds sort key array size." msgstr "Sıralama yönü dizi boyutu anahtar dizi boyutu sınırını aştı." msgid "Source host details not found." msgstr "Kaynak istemci detayları bulunamadı." msgid "Source volume device ID is required." msgstr "Kaynak mantıksal sürücü aygıt kimliği gerekli." msgid "Source volume not mid-migration." msgstr "Kaynak mantıksal sürücü taşıma ortasında değildir." msgid "SpaceInfo returned byarray is invalid" msgstr "Dizi tarafından döndürülen SpaceInfo geçersiz" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "%(vol)s mantıksal sürücüsüne eşleştirilmesi için belirtilen istemci " "%(group)s ile desteklenmeyen istemci grubunda." msgid "Specified logical volume does not exist." msgstr "Belirtilen mantıksal sürücü mevcut değil." msgid "Specify a password or private_key" msgstr "Bir parola ya da private_key belirtin" msgid "Specify san_password or san_private_key" msgstr "san_password veya san_private_key belirtin" msgid "State" msgstr "Durum" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "Düğüm durumu yanlış. Mevcut durum %s." msgid "Status" msgstr "Statü" #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" "Depolama Yapılandırma Servisi %(storageSystemName)s üzerinde bulunamadı." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "Depolama HardwareId yönetim Servisi %(storageSystemName)s üzerinde " "bulunamadı." #, python-format msgid "Storage Profile %s not found." msgstr "Depolama Profili %s bulunamadı." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "Depolama Yeniden Konumlandırma Servisi %(storageSystemName)s üzerinde " "bulunamadı." #, python-format msgid "Storage family %s is not supported." msgstr "Depolama ailesi %s desteklenmiyor." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "Depolama grubu %(storageGroupName)s başarılı bir şekilde silindi" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Depolama sunucusu %(svr)s tespit edilemedi, adı doğrulayın" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Depolama profili: %(storage_profile)s bulunamadı." msgid "Storage resource could not be found." msgstr "Depolama kaynağı bulunamadı." msgid "Storage system id not set." msgstr "Depolama sistemi kimliği ayarlanmamış." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "%(poolNameInStr)s havuzu için depolama sistemi bulunamadı." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "Depolama Sistemi %(array)s bulunamadı." #, python-format msgid "String with params: %s" msgstr "Parametreler ile karakter dizisi: %s" msgid "Synchronizing secondary volume to primary failed." msgstr "" "İkincil mantıksal sürücünün birincil mantıksal sürücüye eşleştirilmesi " "başarısız oldu." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "Kötü durumda sistem %(id)s bulundu - %(status)s." msgid "System does not support compression." msgstr "Sistem sıkıştırmayı desteklemiyor." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "CloudByte depolamasında [%(account)s] hesabı için TSM [%(tsm)s] bulunamadı." msgid "Target volume type is still in use." msgstr "Hedef mantıksal sürücü türü hala kullanımda." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Şablon ağacı uyumsuz; %(mastertag)s ana kaydına %(slavetag)s bağımlı birimi " "ekleniyor" msgid "Terminate connection failed" msgstr "Bağlantı sonlandırma başarısız oldu" msgid "Terminate connection unable to connect to backend." msgstr "Bağlantıyı sonlandır arka uca bağlanılamıyor." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Mantıksal sürücü bağlantı sonlandırma başarısız oldu: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "Çoğaltılacak %(type)s %(id)s kaynağı bulunamadı." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "'sort_key' ve 'sort_dir' parametreleri önerilmiyor ve 'sort' parametresi ile " "kullanılamaz." msgid "The EQL array has closed the connection." msgstr "EQL dizisi bağlantıyı kapattı." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS dosya sistemi %(fs)s gerekli sürüm seviyesinde değil. Mevcut seviye " "%(cur)s, en az %(min)s olmalı." msgid "The IP Address was not found." msgstr "IP Adresi bulunamadı." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "WebDAV isteği başarısız oldu. Neden: %(msg)s, Dönüş kodu/nedeni: %(code)s, " "Kaynak Mantıksal Sürücü: %(src)s, Hedef Mantıksal Sürücü: %(dst)s, Yöntem: " "%(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "Yukarıdaki hata veritabanının oluşturulamadığını gösterebilir.\n" "Lütfen bu komutu çalıştırmadan önce 'cinder-manage db sync' kullanarak bir " "veritabanı oluşturun." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "%(cmd)s komutu başarısız. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgid "The copy should be primary or secondary" msgstr "Kopyalamanın birincil ya da ikincil olması gerekir" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "Mantıksal bir aygıtın oluşturulması tamamlanamadı. (LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "Dekore edilen metod ya bir mantıksal sürüc üya da anlık görüntü nesnesi " "kabul etmeli" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "%(path)s yolundaki aygıt kullanılabilir değil: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "Bitiş zamanı (%(end)s) başlangıç zamanından (%(start)s) sonra olmalıdır." #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec: %s geçersizdir." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "extraspec: %(extraspec)s geçersizdir." #, python-format msgid "The following elements are required: %s" msgstr "Aşağıdaki ögeler gereklidir: %s" msgid "The host group or iSCSI target could not be added." msgstr "İstemci grubu veya iSCSI hedefi eklenemedi." msgid "The host group or iSCSI target was not found." msgstr "İstemci grubu ya da iSCSI hedefi bulunamadı." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP kullanıcısı %(user)s mevcut değil." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "İçe aktarılan lun %(lun_id)s %(host)s istemcisi tarafından yönetilmeyen " "%(lun_pool)s havuzunda." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "Belirtilen %(type)s %(id)s için mantıksal aygıt zaten silinmiş." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "%(method)s metodu zaman aşımına uğradı. (zaman aşımı değeri: %(timeout)s)" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "Depolama arka ucunun parametresi. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "Artımlı yedekleme için ana yedekleme kullanılabilir olmalıdır." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "İstenen boyut : %(requestedSize)s sonuçta oluşan boyut: %(resultSize)s ile " "aynı değildir." #, python-format msgid "The resource %(resource)s was not found." msgstr "Kaynak %(resource)s bulunamadı." msgid "The results are invalid." msgstr "Sonuçlar geçersizdir." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "" "Kaynak mantıksal sürücü %s mevcut istemci tarafından yönetilen havuzda değil." msgid "The source volume for this WebDAV operation not found." msgstr "Bu WebDAV işlemi için kaynak mantıksal sürücü bulunamadı." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "Kaynak mantıksal sürücü türü '%(src)s' hedef mantıksal sürücü türünden " "'%(dest)s' farklıdır." #, python-format msgid "The source volume type '%s' is not available." msgstr "Kaynak mantıksal sürücü türü '%s' kullanılabilir değil." #, python-format msgid "The specified %(desc)s is busy." msgstr "Belirtilen %(desc)s meşgul." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "Belirtilen ldev %(ldev)s yönetilemedi. ldev eşleşmiyor olmalı." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "Belirtilen ldev %(ldev)s yönetilemedi. ldev çiftlenmemiş olmalı." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "Belirtilen ldev %(ldev)s yönetilemedi. ldev boyutu gigabayt'ın katı olmalı." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "Belirtilen ldev %(ldev)s yönetilemedi. Mantıksal sürücü türü DP-VOL olmalı." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "Belirtilen işlem desteklenmiyor. Mantıksal sürücü boyutu kaynak %(type)s ile " "aynı olmalı. (mantıksal sürücü: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Belirtilen vdisk bir istemciye eşleştirilmiş." msgid "The specified volume is mapped to a host." msgstr "Belirtilen mantıksal sürücü bir istemciye eşleştirilmiş." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "Depolama arka ucu kullanılabilir. (config_group: %(config_group)s)" #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "%(memberCount)s biçimlenmiş meta sayısı %(volumeSize)s boyutundaki " "%(volumeName)s mantıksal sürücüsü için çok küçük." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "%(volume_id)s mantıksal sürücüsü genişletilemedi. Mantıksal sürücü türü " "Normal olmalı." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "%(volume_id)s mantıksal sürücüsünün yönetimi bırakılamadı. Mantıksal sürücü " "türü %(volume_type)s olmalı." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "%(volume_id)s mantıksal sürücüsü başarıyla yönetildi. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "%(volume_id)s mantıksal sürücüsünün yönetimi başarıyla bırakıldı. (LDEV: " "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Eşleştirilecek %(volume_id)s mantıksal sürücüsü bulunamadı." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "Mantıksal sürücü sürücüsü iSCSI ilklendirici ismini bağlayıcıda istiyor." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Mantıksal sürücü şu an 3PAR üzerinde meşgul ve silinemez. Daha sonra tekrar " "deneyebilirsiniz." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "Kullanım için uygun kaynak yok. (kaynak: %(resource)s)" #, python-format msgid "There are no valid datastores attached to %s." msgstr "%s'e ekli geçerli veridepoları yok." msgid "There are no valid datastores." msgstr "Geçerli veri depoları yok." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "%(param)s ın ataması yok. Belirtilen depolama mantıksal sürücüyü yönetmek " "için gerekli." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Ldev'in ataması yok. Belirtilen ldev mantıksal sürücüyü yönetmek için " "gerekli." msgid "There is no metadata in DB object." msgstr "Veritabanı nesnelerinde hiçbir metadata yok." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "%(volume_size)sG sahibi paylaşım yok" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "%(volume_size)sG sahibi paylaşım yok." #, python-format msgid "There is no such action: %s" msgstr "Böyle bir işlem yok: %s" msgid "There is no virtual disk device." msgstr "Sanal disk aygıtı yok." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Yapılandırılmış Gluster yapılandırma dosyası yok (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Yapılandırılmış NFS yapılandırma dosyası yok (%s)" msgid "Thin provisioning not supported on this version of LVM." msgstr "LVM'in bu sürümünde ince hazırlık desteklenmiyor." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "ThinProvisioning Etkinleştirici kurulu değil. İnce mantıksal sürücü " "oluşturulamıyor" msgid "This driver does not support deleting in-use snapshots." msgstr "" "Bu sürücü kullanımdaki anlık sistem görüntülerinin silinmesini desteklemez." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Bu sürücü kullanımdaki mantıksal sürücülerin anlık sistem görüntüsünü almayı " "desteklemez." msgid "This request was rate-limited." msgstr "Bu istek sayı limitlidir." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Sistem platformu (%s) desteklenmiyor. Bu sürücü yalnızca Win32 platformunu " "destekler." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "Aşama İlke Servisi %(storageSystemName)s için bulunamadı." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "%s anlık sistem görüntüsü oluşumunda Nova güncellemesi beklenirken zaman " "aşımı oluştu." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "%(id)s anlık sistem görüntüsünü silmek için Nova güncellemesi beklenirken " "zaman aşımı oluştu." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Ceph kümesine bağlanırken zaman aşımı değeri (saniye) kullanılır. Eğer değer " "< 0 ise, zaman aşımı ayarlanmamıştır ve öntanımlı librados değeri kullanılır." #, python-format msgid "Timeout while requesting %(service)s API." msgstr "API %(service)s istenirken zaman aşımı." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "%(transfer_id)s aktarımı bulunamadı." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Aktarım %(transfer_id)s: %(volume_id)s mantıksal sürücü kimliği beklenmeyen " "durumda %(status)s, beklenen bekleyen-aktarım" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Mantıksal sürücü ayarlama görevi bitmeden durduruldu: volume_name=" "%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "%(type_id)s türü zaten diğer qos özellikleri ile ilişkilidir:%(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "" "Tür erişim değişiklikleri ortak mantıksal sürücü türü için uygulanamaz." #, python-format msgid "TypeError: %s" msgstr "TürHatası: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUIDs %s, hem ekleme hem de kaldırma mantıksal sürücü listesindedir." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "Art alanda çalışan depolamaya %(path)s yolu ile erişilemedi." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "Cinder istemcisi %(space)s alanı için apphosts'a eklenemedi" msgid "Unable to connect or find connection to host" msgstr "Bağlanılamadı ya da istemci için bağlantı bulunamadı" msgid "Unable to create Barbican Client without project_id." msgstr "Barbican İstemcisi project_id olmadan oluşturulamadı." #, python-format msgid "Unable to create consistency group %s" msgstr "%s tutarlılık grubu oluşturulamadı" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST ilkesi için öntanımlı depolama grubu alınamadı ya da oluşturulamadı: " "%(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "%s mantıksal sürücüsü için kopya çoğaltma oluşturulamıyor." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "%s Tutarlılık Grubu anlık görüntüsü silinemedi" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "%(id)s anlık sistem görüntüsü silinemedi, durum: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "%s mantıksal sürücüsü üzerindeki anlık görüntü ilkesi silinemedi." msgid "Unable to determine system id." msgstr "Sistem kimliği belirlenemiyor." msgid "Unable to determine system name." msgstr "Sistem ismi belirlenemiyor." #, python-format msgid "Unable to extend volume %s" msgstr "%s mantıksal sürücüsü genişletilemedi" msgid "Unable to fetch connection information from backend." msgstr "Art alanda çalışan uygulamadan bağlantı bilgisi getirilemedi." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "Art alanda çalışan uygulamadan bağlantı bilgisi getirilemedi: %(err)s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Mantıksal Sürücü Grubu bulunamadı: %(vg_name)s" msgid "Unable to find iSCSI mappings." msgstr "iSCSI eşleştirmeleri bulunamadı." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file bulunamadı: %s" msgid "Unable to find system log file!" msgstr "Sistem günlük dosyası bulunamadı!" #, python-format msgid "Unable to find volume %s" msgstr "%s mantıksal sürücüsü bulunamadı" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "'%s' dosyası için bir blok aygıtı alınamadı" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "%(space)s alanında bilgi alınamadı, lütfen kümenin bağlandığını ve " "çalıştığını doğrulayın." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "İstemci üzerindeki IP adreslerinin listesi alınamadı, ağ oluşturma ve " "izinleri kontrol edin." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Alan üyelerinin listesi alınamadı, kümenin çalışıp çalışmadığını kontrol " "edin." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Yeni ad oluşturulacak alanların listesi alınamadı. Lütfen kümenin " "çalıştığını doğrulayın." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Backend_name için bilgiler alınamadı: %s" #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "%(hardwareIdInstance)s hardwareid için hedef uç noktalar alınamıyor." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. %(sync)s çoğaltma " "oturumunun kaynak mantıksal sürücüsü." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. Harici mantıksal " "sürücü mevcut cinder istemcisi tarafından yönetilen havuzda değil." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. Mantıksal sürücü " "%(mv)s maskeleme görünümünde." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "CA %(cert)s den yüklenemedi %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Sertifika yüklenemedi %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Anahtar %(cert)s den yüklenemiyor %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "Solidfire aygıtında %(account_name)s hesabı bulunamadı" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "'%s' IP adresini yöneten bir SVM bulunamıyor" msgid "Unable to map volume" msgstr "Mantıksal sürücü eşleştirilemedi" msgid "Unable to map volume." msgstr "Mantıksal sürücü eşleştirilemedi." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "XML isteği ayrıştırılamadı. Lütfen doğru biçimde XML sağlayın." msgid "Unable to parse attributes." msgstr "Öznitelikler ayrıştırılamadı." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "%s mantıksal sürücüsü için kopya birincil olarak terfi ettirilemiyor. " "Kullanılabilir ikincil kopya yok." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "use_chap_auth=Doğru ile Cinder tarafından yönetilmeyen bir istemci yeniden " "kullanılamadı," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Bilinmeyen yapılandırılmış CHAP kimlik bilgileri ile istemci yeniden " "kullanılamadı." #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Retype yapılamıyor: %s mantıksal sürücüsünün bir kopyası mevcut. Retype " "yapma 2 kopya limitinin aşılmasına sebep olur." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Retype yapılamıyor: Mevcut eylem mantıksal sürücü kopyalamaya ihtiyaç duyar, " "yeni tür çoğaltma olduğunda buna izin verilmez. Mantıksal sürücü = %s" #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Tutarlılık Grubu %s kavranamıyor" msgid "Unable to terminate volume connection from backend." msgstr "" "Art alanda çalışan uygulamadan mantıksal sürücü bağlantısı sonlandırılamadı." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Mantıksal sürücü bağlantısı sonlandırılamadı: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "%s tutarlılık grubu güncellenemedi" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Hatalı durum nedeniyle tür güncellenemedi: %(vol_id)s sürücüsü durumu: " "%(vol_status)s. Mantıksal sürücü durumu kullanılabilir ya da kullanımda " "olmalıdır." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "%(maskingViewName)s maskeleme görünümündeki %(igGroupName)s başlatıcı grubu " "doğrulanamıyor. " msgid "Unacceptable parameters." msgstr "Kabul edilemez parametreler var." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "%(id)s eşleştirmesi için beklenmeyen eşleştirme durumu %(status)s. " "Öznitelikler: %(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Beklenmedik CLI yanıtı: başlık/satır eşleşmiyor. başlık: %(header)s, satır: " "%(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "%(id)s eşleştirmesi için beklenmeyen eşleştirme durumu %(status)s. " "Öznitelikler: %(attr)s." msgid "Unexpected response from Nimble API" msgstr "Nimble API'sinden beklenmeyen yanıt" msgid "Unexpected status code" msgstr "Beklenmeyen durum kodu" msgid "Unknown Gluster exception" msgstr "Bilinmeyen Gluster istisnası" msgid "Unknown NFS exception" msgstr "Bilinmeyen NFS istisnası" msgid "Unknown RemoteFS exception" msgstr "Bilinmeyen RemoteFS istisnası" msgid "Unknown SMBFS exception." msgstr "Bilinmeyen SMBFS istisnası." msgid "Unknown Virtuozzo Storage exception" msgstr "Bilinmeyen Virtuozzo Depolama istisnası" msgid "Unknown action" msgstr "Bilinmeyen eylem" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Bilinmeyen ya da desteklenmeyen komut %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Bilinmeyen iletişim kuralı: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "%(unknown)s bilinmeyen kota kaynakları." msgid "Unknown service" msgstr "Bilinmeyen servis" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc' olmalıdır." msgid "Unmanage volume not implemented." msgstr "Mantıksal sürücünün yönetimini bırakma uygulanmadı." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Tanınmayan QOS anahtarı: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Tanınmayan destekleme biçimi: %s " #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Tanınmayan silinmiş okuma değeri '%s'" msgid "Unsupported Clustered Data ONTAP version." msgstr "Desteklenmeyen Kümelenmiş Veri ONTAP sürümü." msgid "Unsupported Content-Type" msgstr "Desteklenmeyen içerik türü" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Desteklenmeyen Veri ONTAP sürümü. Veri ONTAP sürümü 7.3.1 ve yukarısı " "destekleniyor." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Desteklenmeyen yedekleme metadata sürümü (%s)" msgid "Unsupported backup metadata version requested" msgstr "Desteklenmeyen bir yedekleme metadata sürümü isteniyor" msgid "Unsupported backup verify driver" msgstr "Desteklenmeyen yedekleme doğrulama sürücüsü" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "%s anahtarında desteklenmeyen üretici yazılımı. Anahtarın v6.4 ya da daha " "yüksek üretici yazılımı kullandığından emin olun" #, python-format msgid "Unsupported volume format: %s " msgstr "Desteklenmeyen mantıksal sürücü biçimi: %s " msgid "Updated At" msgstr "Güncelleme saati" msgid "Upload to glance of attached volume is not supported." msgstr "Eklenti mantıksal sürücüsü glance'ine yükleme desteklenmiyor." msgid "User ID" msgstr "Kullanıcı ID" msgid "User does not have admin privileges" msgstr "Kullanıcı yönetici ayrıcalıklarına sahip değil" msgid "User is not authorized to use key manager." msgstr "Kullanıcı anahtar yöneticisi kullanarak yetkilendirilemez." msgid "User not authorized to perform WebDAV operations." msgstr "Kullanıcı WebDAV işlemleri yapmaya yetkili değil." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "V2 rollback - Öntanımlı depolama grubunun dışında başka depolama grubundaki " "mantıksal sürücü" msgid "V2 rollback, volume is not in any storage group." msgstr "V2 rollback, mantıksal sürücü herhangi bir depolama grubunda değil." msgid "V3 rollback" msgstr "V3 geridönüş" #, python-format msgid "VV Set %s does not exist." msgstr "VV Kümesi %s mevcut değil." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "QoS özelliklerinin geçerli tüketicisi: %s" #, python-format msgid "Valid control location are: %s" msgstr "Geçerli kontrol konumu: %s" #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "\"%(option)s\" yapılandırma seçeneği için \"%(value)s\" değeri geçersizdir" msgid "Value required for 'scality_sofs_config'" msgstr "'scality_sofs_config' için gerekli değer" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "%(name)s vdiski %(src)s -> %(tgt)s eşleştirmesiyle ilgili değil." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Sanal mantıksal sürücü '%s' dizide mevcut değil." #, python-format msgid "Vol copy job for dest %s failed." msgstr "%s hedefi için mantıksal sürücü kopyalama işi başarısız." #, python-format msgid "Volume %(deviceID)s not found." msgstr "%(deviceID)s mantıksal sürücüsü bulunamadı." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Dizide %(name)s mantıksal sürücüsü bulunamadı. Eşleştirilmiş mantıksal " "sürücünün olup olmadığı belirlenemiyor." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Mantıksal sürücü %(vol)s %(pool)s havuzunda oluşturulamadı." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Mantıksal sürücü %(vol_id)s durumu kullanılabilir ya da kullanımda olmalıdır " "ancak mevcut durum: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "Mantıksal sürücü %(vol_id)s durumu genişletmek için kullanılabilir olmalıdır " "ancak mevcut durum: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" " %(vol_id)s mantıksal sürücü durumu salt okunur bayrağını güncelleyebilmek " "için kullanılabilir olmalıdır ancak mevcut durum: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "%(vol_id)s mantıksal sürücü durumu kullanılabilir olmalıdır ancak mevcut " "durum: %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "%(volume_id)s bölümü bulunamadı." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "%(volume_id)s mantıksal sürücüsü %(metadata_key)s anahtarı ile hiçbir " "yönetici metadata'sına sahip değil." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(volume_id)s mantıksal sürücüsü %(metadata_key)s anahtarı ile hiçbir " "metadata'ya sahip değil." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "Mantıksal sürücü %(volume_id)s %(group)s desteklenmeyen istemci grubuyla " "eşleştirilmiş" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" "%(volume_id)s mantıksal sürücüsü şu an %(host)s istemcisiyle eşleştirilmemiş" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "Mantıksal sürücü %(volume_id)s hala ekli, ilk olarak mantıksal sürücüyü ayır." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Mantıksal sürücü %(volume_id)s kopyalama hatası: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Mantıksal sürücü %(volume_name)s meşgul." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Mantıksal sürücü %s kaynak mantıksal sürücüden oluşturulamadı." #, python-format msgid "Volume %s could not be created on shares." msgstr "Mantıksal sürücü %s paylaşımlarda oluşturulamadı." #, python-format msgid "Volume %s could not be created." msgstr "Mantıksal sürücü %s oluşturulamadı." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" "Mantıksal sürücü %s belirtilmiş provider_location değerine sahip değil, bu " "adım geçiliyor." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Mantıksal sürücü %s dizide mevcut değil." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Mantıksal sürücü %s zaten aktif göçün parçasıdır." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Mantıksal sürücü %s bir tutarlılık grubununun parçası olmamalıdır." #, python-format msgid "Volume %s must not be replicated." msgstr "Mantıksal sürücü %s çoğaltılmış olmamalıdır." #, python-format msgid "Volume %s must not have snapshots." msgstr "Mantıksal sürücü %s anlık sistem görüntülerine sahip olmamalıdır." #, python-format msgid "Volume %s not found." msgstr "%s mantıksal sürücüsü bulunamadı." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Mantıksal Sürücü %s: Mantıksal sürücü genişletme denenirken hata" #, python-format msgid "Volume (%s) already exists on array" msgstr "Mantıksal sürücü (%s) zaten dizi üzerinde mevcut" #, python-format msgid "Volume (%s) already exists on array." msgstr "Mantıksal sürücü (%s) dizide zaten mevcut." #, python-format msgid "Volume Group %s does not exist" msgstr "Mantıksal Sürücü %s yok" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Mantıksal Sürücü Türü %(id)s zaten var." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "%(type_id)s Mantıksal Sürücü Türü, %(id)s anahtarlı hiçbir ek özelliğe sahip " "değil." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "Türde mevcut mantıksal sürücü varsa %(volume_type_id)s mantıksal sürücü " "silmeye izin verilmez." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "%(volume_type_id)s mantıksal sürücü türü %(extra_specs_key)s anahtarı ile " "hiçbir ek özelliğe sahip değil." msgid "Volume Type id must not be None." msgstr "Mantıksal Sürücü Türü bilgisi Hiçbiri olamaz." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "CloudByte depolamada [%s] mantıksal sürücüsü bulunamadı." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "Mantıksal sürücü eki şu süzgeç ile bulunamadı: %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "" "Mantıksal sürücü art alanda çalışan uygulama yapılandırması geçersizdir: " "%(reason)s" msgid "Volume by this name already exists" msgstr "Bu isimde mantıksal sürücü zaten mevcut" msgid "Volume cannot be restored since it contains snapshots." msgstr "Mantıksal sürücü anlık görüntüler içerdiğinden geri yüklenemiyor." #, python-format msgid "Volume device file path %s does not exist." msgstr "Mantıksal sürücü dosya yolu %s yok." #, python-format msgid "Volume device not found at %(device)s." msgstr "Mantıksal sürücü aygıtı %(device)s'da bulunamadı." #, python-format msgid "Volume driver %s not initialized." msgstr "Mantıksal sürücü sürücüsü %s ilklendirilmemiş." msgid "Volume driver not ready." msgstr "Mantıksal sürücü hazır değil." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Mantıksal sürücü bir hata bildirdi: %(message)s" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "%s tutarlılık grubunda mantıksal sürücü ekli. İlk önce ayrılması gerekir." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "Tutarlılık grubundaki mantıksal sürücü hala bağımlı anlık sistem " "görüntülerine sahiptir." msgid "Volume is in-use." msgstr "Mantıksal sürücü kullanımda." msgid "Volume is not local to this node" msgstr "Mantıksal sürücü bu düğüme yerel değil" msgid "Volume is not local to this node." msgstr "Mantıksal sürücü bu düğüme yerel değil." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Mantıksal sürücü metadata yedekleme istedi ancak bu sürücü henüz bu özelliği " "desteklemiyor." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Mantıksal sürücü göçü başarısız oldu: %(reason)s" msgid "Volume must be available" msgstr "Mantıksal sürücü kullanılabilir olmalıdır" msgid "Volume must be in the same availability zone as the snapshot" msgstr "Mantıksal sürücü anlık görüntüyle aynı kullanılabilir bölgede olmalı" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "Mantıksal sürücü kaynak mantıksal sürücüyle aynı kullanılabilir bölgede " "olmalı" msgid "Volume must not be part of a consistency group." msgstr "Mantıksal sürücü bir tutarlılık grubunun parçası olmamalıdır." msgid "Volume must not be replicated." msgstr "Mantıksal sürücü çoğaltılmış olmamalı." msgid "Volume must not have snapshots." msgstr "Mantıksal sürücü anlık sistem görüntülerine sahip olmamalıdır." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "%(instance_id)s sunucusu için mantıksal sürücü bulunamadı." msgid "Volume not found on configured storage backend." msgstr "Mantıksal sürücü yapılandırılmış depolama arka ucunda bulunamadı." msgid "Volume not found on configured storage pools." msgstr "Mantıksal sürücü yapılandırılan depolama havuzlarında bulunamadı." msgid "Volume not found." msgstr "Mantıksal sürücü bulunamadı." msgid "Volume not yet assigned to host." msgstr "Mantıksal sürücü henüz bir istemciye atanmadı." msgid "Volume reference must contain source-name element." msgstr "Mantıksal sürücü kaynağı kaynak-ad ögesi içermelidir." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "%(volume_id)s için mantıksal sürücü kopyalaması bulunamadı." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "Mantıksal sürücü boyutu %(volume_size)sGB %(min_disk)sGB imaj asgari " "boyutundan küçük olamaz." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "Mantıksal sürücü boyutu '%(size)s' bir tam sayı ve 0'dan büyük olmalı" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "Mantıksal sürücü boyutu '%(size)s'GB %(source_size)sGB asıl mantıksal sürücü " "boyutundan küçük olamaz. Asıl mantıksal sürücü boyutundan >= olmalıdırlar." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "Mantıksal sürücü boyutu '%(size)s'GB %(snap_size)sGB anlık görüntü " "boyutundan küçük olamaz. Asıl anlık görüntü boyutundan >= olmalıdırlar." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "Mantıksal sürücü boyutu son yedeklemeden bu yana arttı. Tam bir yedekleme " "yapın." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "Mantıksal sürücü için mantıksal sürücü durumu kullanılabilir olmalıdır ancak " "mevcut durum: %s" msgid "Volume status is in-use." msgstr "Mantıksal sürücü durumu kullanımda." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "Mantıksal sürücü durumu \"available\" ya da \"in-use\" olmalıdır. (is %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "Mantıksal sürücü durumu \"available\" ya da \"in-use\" olmalıdır." msgid "Volume status must be 'available'." msgstr "Mantıksal sürücü durumu 'available' olmalıdır." msgid "Volume to Initiator Group mapping already exists" msgstr "Başlatıcı Gruba mantıksal sürücü eşleme zaten var" msgid "Volume to be restored to must be available" msgstr "Geri yüklenecek mantıksal sürücü kullanılabilir olmalıdır" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "%(volume_type_id)s mantıksal sürücü türü bulunamadı." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "Mantıksal sürücü tür kimliği '%s' geçersiz." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "%(volume_type_id)s / %(project_id)s birleşimi için mantıksal sürücü türü " "erişimi zaten var." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "%(volume_type_id)s / %(project_id)s birleşimi için mantıksal sürücü erişimi " "bulunamadı." #, python-format msgid "Volume type does not match for share %s." msgstr "Mantıksal sürücü türü %s paylaşımıyla uyuşmuyor." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "%(type_id)s türü için mantıksal sürücü şifreleme zaten var." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "%(type_id)s türü için mantıksal sürücü türü yok." msgid "Volume type name can not be empty." msgstr "Mantıksal sürücü türü boş olamaz." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "%(volume_type_name)s adında mantıksal sürücü türü bulunamadı." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Mantıksal sürücü: %(volumeName)s bitiştirilmiş bir mantıksal sürücü değil. " "Büyütmeyi ancak bitiştirilmiş mantıksal sürücü üzerinde yapabilirsiniz. " "Çıkılıyor..." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "Mantıksal sürücüler bu boyutta nesnelere bölünecek (megabayt olarak)." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage 'vzstorage_used_ratio' yapılandırması geçersiz. Değer > 0 ve <= " "1.0 olmalıdır: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s konumunda VzStorage yapılandırma dosyası yok." msgid "Wait replica complete timeout." msgstr "Kopyanın tamamlanmasını bekleme zaman aşımına uğradı." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO düzgün yapılandırılmamış, iscsi portalı bulunamadı" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO düzgün ilklendirilmemiş, küme bulunamadı" msgid "You must implement __call__" msgstr "__call__ fonksiyonunu uygulamalısınız." msgid "You must supply an array in your EMC configuration file." msgstr "EMC yapılandırma dosyasında bir dizi sağlamalısınız." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Özgün boyutunuz: %(originalVolumeSize)s GB yeni boyuttan daha büyük: " "%(newSize)s GB. Sadece Genişletme destekleniyor. Çıkılıyor..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Bölge" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Bölgeleme İlkesi: %s, tanınmıyor" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data: %s vdiski için özniteliklerin alınması " "başarısız." msgid "_create_host failed to return the host name." msgstr "_create_host istemci adını döndürmede başarısız." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: Makine adı dönüştürülemieyor. İstemci ismi evrensel kodda veya " "karakter dizisi değil." msgid "_create_host: No connector ports." msgstr "_create_host: Bağlayıcı bağlantı noktası yok." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - CLI çıktısında başarı iletisi bulunamadı.\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: öznitelik başlıkları ve değerleri eşleşmiyor.\n" " Başlıklar: %(header)s\n" " Değerler: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector bağlayıcı için istemci adını getirmeyi başaramadı." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: İstemci-mantıksal sürücü bağlantısı için FC " "bağlantı bilgisi alınamadı. İstemci FC bağlantıları için düzgün " "yapılandırılmış mı?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: %(vol)s mantıksal sürücüsü için %(gid)s I/O " "grubunda düğüm bulunamadı." msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats: Depolama havuzu verisi alınamadı." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy başarısız: %s mantıksal sürücüsünün kopyası mevcut. Başka bir " "kopya eklemek 2 kopya sınırının aşılmasına sebep olur." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "add_vdisk_copy beklenen havuzda bir vdisk kopyası olmadan başlatıldı." msgid "already created" msgstr "zaten oluşturuldu" #, python-format msgid "attribute %s not lazy-loadable" msgstr "%s özniteliği tembel-yüklenebilir değil" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "yedek: %(vol_id)s %(vpath)s den %(bpath)s e aygıt linki oluşturmada " "başarısız.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s sunucudan yedekleme başarı bildirimini alamadı.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s geçersiz değişkenler nedeniyle %(bpath)s yolundaki dsmc " "komutunu çalıştıramadı.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s %(bpath)s yolundaki dsmc komutunu çalıştıramadı.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "backup: %(vol_id)s başarısız oldu. %(path)s dosya değildir." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "backup: %(vol_id)s başarısız oldu. %(path)s beklenmeyen dosya türü. Blok ya " "da normal dosyalar desteklenir, mevcut dosya kipi %(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "backup: %(vol_id)s başarısız oldu. %(path)s yolundaki mantıksal sürücüye " "gerçek yol sağlanamıyor." msgid "being attached by different mode" msgstr "farklı kipler ile eklenme" msgid "can't find the volume to extend" msgstr "büyütülecek mantıksal sürücü bulunamadı" msgid "can't handle both name and index in req" msgstr "istekte hem isim hem indis işlenemez" msgid "cannot understand JSON" msgstr "JSON dosyası anlaşılamadı" msgid "cannot understand XML" msgstr "XML anlaşılamaz" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: Yetkilendirme için parola veya SSH özel anahtarı " "gerekiyor: san_password veya san_private_key seçeneklerinden birini " "ayarlayın." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: Sistem kimliği belirlenemiyor." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: Sistem ismi belirlenemiyor." #, python-format msgid "clone depth exceeds limit of %s" msgstr "çoğaltma derinliği %s sınırını aşıyor" msgid "control_location must be defined" msgstr "control_location tanımlanmalıdır" msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: Kaynak ve hedef boyutu farklı." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: Kaynak vdisk %(src)s (%(src_id)s) mevcut değil." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: Kaynak vdisk %(src)s mevcut değil." msgid "create_host: Host name is not unicode or string." msgstr "create_host: İstemci adı evrensel kod ya da karakter dizisi değil." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: Başlatıcılar veya wwpn'ler sağlanmadı." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: Anlık görüntü için mantıksal sürücü durumu \"kullanılabilir" "\" veya \"kullanımda\" olmalı. Geçersiz durum %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: kaynak mantıksal sürücüyü alma başarısız." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot: Anlık görüntü %(name)s mevcut değil." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: Mantıksal sürücü oluşturma için anlık görüntü " "durumu \"kullanılabilir\" olmalı. Geçersiz durum: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot: Kaynak ve hedef boyutu farklı." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: Mantıksal sürücü boyutu anlık görüntü tabanlı " "mantıksal sürücüden farklı." msgid "deduplicated and auto tiering can't be both enabled." msgstr "kopyasız sıkıştırma ve otomatik aşamalama aynı anda etkin olamaz." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "delete: %(vol_id)s geçersiz değişkenler nedeniyle dsmc komutunu " "çalıştıramadı stdout çıktısı ile: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "delete: %(vol_id)s dsmc komutunu çalıştıramadı stdout çıktısı ile: %(out)s\n" " stderr: %(err)s" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "bağımlı mantıksal sürücülere sahip olan %(snapshot_name)s anlık sistem " "görüntüsü siliniyor" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" "anlık sistem görüntüsüne sahip olan %(volume_name)s mantıksal sürücüsü " "siliniyor" msgid "do_setup: No configured nodes." msgstr "do_setup: Yapılandırılmış düğüm yok." msgid "element is not a child" msgstr "eleman çocuk değil" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries 0'a eşit ya da daha büyük olmalıdır" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "swift'e nesne yazarken hata, swift içindeki nesnenin MD5 değeri %(etag)s " "swift'e gönderilen nesnenin MD5 %(md5)s değeri ile aynı değildir" msgid "failed to create new_volume on destination host" msgstr "Hedef istemci üzerinde yeni mantıksal sürücü oluşturulamadı" msgid "fake" msgstr "sahte" #, python-format msgid "file already exists at %s" msgstr "dosya %s konumunda zaten var" msgid "fileno() not supported by RBD()" msgstr "fileno() RBD() tarafından desteklenmez" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled False olarak ayarlanmış, çoklu istemci " "eşleştirmeye izin verme. CMMVC6071E VDisk-to-host eşleştirmesi oluşturulmadı " "çünkü VDisk zaten bir istemciye eşleştirilmiş." msgid "flush() not supported in this version of librbd" msgstr "flush() librbd kütüphanesinin bu sürümünde desteklenmiyor" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s şununla desteklenir: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s şununla desteklenir:%(backing_file)s" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode copy_on_write olarak ayarlanmış, ama %(vol)s ve " "%(img)s farklı dosya sistemlerine ait." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode copy_on_write olarak ayarlanmış, ama %(vol)s ve " "%(img)s farklı dosya kümelerine ait." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s ve hgst_user %(usr)s cinder.conf dosyasında geçerli " "kullanıcılar/gruplar olarak eşlenmelidir" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "cinder.conf dosyasında belirtilen hgst_net %(net)s küme içinde bulunamadı" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy cinder.conf dosyasında 0 (non-HA) ya da 1 (HA) olarak " "ayarlanmalıdır." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode cinder.conf dosyasında bir sekizli/tam sayı olmalıdır" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "hgst_storage sunucusu %(svr)s : biçiminde değil" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers cinder.conf dosyasında tanımlanmalıdır" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "http servisi bu işlemin ortasında ansızın kapatılmış ya da bakım durumuna " "alınmış olabilir." msgid "id cannot be None" msgstr "id Hiçbiri olamaz" #, python-format msgid "image %s not found" msgstr "imaj %s bulunamadı" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" "initialize_connection: %s mantıksal sürücüsü için özniteliklerin alınması " "başarısız." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "" "initialize_connection: %s mantıksal sürücüsü için mantıksal sürücü " "özniteliği eksik." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: %(vol)s mantıksal sürücüsü için %(gid)s I/O grubundan " "düğüm bulunamadı." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s tanımlanmamış." #, python-format msgid "invalid user '%s'" msgstr "geçersiz kullanıcı '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "iscsi portalı, %s bulunamadı" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "'iSCSI' iletişim kuralı kullanılırken iscsi_ip_address yapılandırma " "dosyasında ayarlanmalı." #, python-format msgid "key manager error: %(reason)s" msgstr "anahtar yönetici hatası: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key tanımlanamadı" msgid "limit param must be an integer" msgstr "Sınır parametresi tam sayı olmak zorunda" msgid "limit param must be positive" msgstr "Sınır parametresi pozitif olmak zorunda" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing varolan bir mantıksal sürücüyü tanımlamak için bir 'name' " "anahtarı ister." #, python-format msgid "marker [%s] not found" msgstr " [%s] göstergesi bulunamadı" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp eksik kotalar %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy 'on-demand' ya da 'never' olmalıdır, geçilen: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" "mkfs %(vol)s mantıksal sürücüsü üzerinde başarısız oldu, hata iletisi: " "%(err)s." msgid "mock" msgstr "sahte" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs kurulu değil" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage tarafından %s ismine sahip birden fazla kaynak bulundu" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "%s anlık sistem görüntüsü ile birden fazla kaynak bulundu" msgid "name cannot be None" msgstr "ad Hiçbiri olamaz" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: NAVISECCLI araç %(path)s bulunamadı." #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "drbdmanage içinde %s anlık sistem görüntüsü bulunamadı" #, python-format msgid "not exactly one snapshot with id %s" msgstr "tam olarak %s kimliğine sahip tek bir anlık görüntü değil" #, python-format msgid "not exactly one volume with id %s" msgstr "tam olarak %s kimliğine sahip tek bir mantıksal sürücü değil" #, python-format msgid "obj missing quotes %s" msgstr "obj eksik kotalar %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled kapalı değil." msgid "progress must be an integer percentage" msgstr "ilerleme bir tam sayı yüzdesi olmalıdır" msgid "promote_replica not implemented." msgstr "promote_replica uygulanmadı." msgid "provider must be defined" msgstr "sağlayıcı tanımlanmalıdır" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s ya da sonraki sürümler bu mantıksal sürücü için " "gereklidir. Mevcut qemu-img sürümü: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img kurulu değil ve imaj türü %s. Eğer qemu-img kurulu değilse, sadece " "RAW imajlar kullanılabilir." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img kurulu değil ve disk biçimi belirtilmemiş. Eğer qemu-img kurulu " "değilse, sadece RAW imajlar kullanılabilir." msgid "rados and rbd python libraries not found" msgstr "rados ve rbd python kütüphaneleri bulunamadı" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted sadece 'no', 'yes' ya da 'only', %r hariç seçeneklerinden biri " "olabilir" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restore: %(vol_id)s geçersiz değişkenler nedeniyle %(bpath)s yolundaki dsmc " "komutunu çalıştıramadı.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restore: %(vol_id)s %(bpath)s yolundaki dsmc komutunu çalıştıramadı.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "restore: %(vol_id)s başarısız oldu.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup durduruldu, gerçek nesne listesi metadata'da depolanan nesne " "listesi ile eşleşmiyor." msgid "root element selecting a list" msgstr "listeden kök elemanı seçiliyor" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb eksik %s üyesidir: Daha yeni bir python-rtslib-fb kütüphanesine " "ihtiyacınız olabilir." msgid "san_ip is not set." msgstr "san_ip ayarlanmamış." msgid "san_ip must be set" msgstr "san_ip ayarlanmış olmalı" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: Gerekli alan yapılandırması. san_ip ayarlanmamış." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "cinder.conf dosyasında Datera sürücüsü için san_login ve/ya da san_password " "ayarlı değil. Bu bilgileri ayarla ve cinder-volume servisini tekrar başlat." msgid "serve() can only be called once" msgstr "serve() sadece bir kez çağrılabilir" msgid "service not found" msgstr "servis bulunamadı" msgid "snapshot does not exist" msgstr "anlık sistem görüntüsü yok" #, python-format msgid "snapshot id:%s not found" msgstr "anlık sistem görüntü kimliği:%s bulunamadı" #, python-format msgid "source vol id:%s not found" msgstr "kaynak mantıksal sürücü kimliği:%s bulunamadı" #, python-format msgid "source volume id:%s is not replicated" msgstr "kaynak mantıksal sürücü kimliği:%s çoğaltılmamıştır" msgid "status must be available" msgstr "durum kullanılabilir olmalıdır" msgid "subclasses must implement construct()!" msgstr "alt sınıflar yapıcı() oluşturmak zorunda!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo başarısız oldu, hiçbir şey olmamış gibi devam ediliyor" msgid "sync_replica not implemented." msgstr "sync_replica uygulanmadı." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli kurulu değil ve öntanımlı dizin (%(default_path)s) oluşturulamadı: " "%(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: Bağlayıcıdan istemci adı alma başarısız." msgid "timeout creating new_volume on destination host" msgstr "" "Hedef istemci üzerinde yeni mantıksal sürücü oluşturulurken zaman aşımı" msgid "too many body keys" msgstr "Çok sayıda gövde anahtarları" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: bağlı değil" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s: hedef meşgul" msgid "umount: : some other error" msgstr "umount: : bazı diğer hata" msgid "umount: : target is busy" msgstr "umount: : hedef meşgul" #, python-format msgid "unrecognized argument %s" msgstr "tanınmayan değişken %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "desteklenmeyen sıkıştırma algoritması: %s" msgid "valid iqn needed for show_target" msgstr "show_target için geçerli iqn gerekli" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s tanımlanmamış." msgid "vmemclient python library not found" msgstr "vmemclient python kitaplığı bulunamadı" #, python-format msgid "volume %s not found in drbdmanage" msgstr "drbdmanage içinde %s mantıksal sürücüsü bulunamadı" msgid "volume assigned" msgstr "mantıksal sürücü atandı" msgid "volume changed" msgstr "mantıksal sürücü değiştirildi" msgid "volume does not exist" msgstr "mantıksal sürücü yok" msgid "volume is already attached" msgstr "mantıksal sürücü zaten ekli" msgid "volume is not local to this node" msgstr "mantıksal sürücü bu düğüme yerel değil" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "%(volume_size)d mantıksal sürücü boyutu %(size)d boyutundaki yedeği geri " "yüklemek için çok küçük." msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "Tutarlılık grubunda bir mantıksal sürücü oluşturulurken volume_type " "verilmelidir." msgid "volume_type_id cannot be None" msgstr "volume_type_id Hiçbiri olamaz" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "volume_types, tutarlılık grubu %(name)s oluşturmak için verilmelidir." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "%s tutarlılık grubu oluşturmak için volume_types verilmelidir." cinder-8.0.0/cinder/locale/zh_CN/0000775000567000056710000000000012701406543017632 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000012701406543021417 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/zh_CN/LC_MESSAGES/cinder.po0000664000567000056710000116163012701406257023235 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Kai Zhang , 2013 # Kai Zhang , 2013 # openstack , 2013 # Shuwen SUN , 2014 # Tom Fifield , 2013 # 颜海峰 , 2014 # Yu Zhang, 2014 # 颜海峰 , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Linda , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 10:28+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder 版本:%(version)s\n" #, python-format msgid " but size is now %d" msgstr "但现在大小为 %d" #, python-format msgid " but size is now %d." msgstr "但现在大小为 %d。" msgid " or " msgstr "或者" #, python-format msgid "%(attr)s is not set." msgstr "未设置 %(attr)s。" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing 无法管理已连接至主机的卷。在进行导入之前,请从现有" "主机断开与此卷的连接" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "结果:%(res)s。" #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "发生异常 %(exception)s:原因 %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s:许可权被拒绝。" #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s:失败,产生了意外 CLI 输出。\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "状态码: %(_status)s\n" "主体: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s,subjectAltName:%(sanList)s。" #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s:创建 NetworkPortal:请确保 IP %(ip)s 上的端口 %(port)d未被另一" "项服务使用。" #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s 必须至少具有的字符数为 %(min_length)s。" #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s 包含的字符超过 %(max_length)s 个。" #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s:备份 %(bck_id)s(针对卷 %(vol_id)s)失败。备份对象具有意外方式。支持" "映像或文件备份,实际方式为 %(vol_mode)s。" #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "在以下存储设备上,%(service)s 服务并非处于 %(status)s 状态:%(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s 必须小于或等于 %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s 必须大于或等于 %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "%(workers)d 的 %(worker_name)s 值无效,必须大于 0。" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "结果中没有 %s “数据”。" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "%s 无法访问。请验证 GPFS 是否处于活动状态并且文件系统是否已安装。" #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "%s 无法使用克隆操作来调整大小,因为它未包含任何块。" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "%s 无法使用克隆操作来调整大小,因为它托管于压缩卷上" #, python-format msgid "%s configuration option is not set." msgstr "未设置 %s 配置选项。" #, python-format msgid "%s does not exist." msgstr "%s 不存在。" #, python-format msgid "%s is not a directory." msgstr "%s 不是一个目录。" #, python-format msgid "%s is not a string or unicode" msgstr "%s 不是字符串或 Unicode" #, python-format msgid "%s is not installed" msgstr "未安装 %s" #, python-format msgid "%s is not installed." msgstr "未安装 %s。" #, python-format msgid "%s is not set" msgstr "未设置 %s " #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "未设置 %s,它是使复制设备生效所必需的。" #, python-format msgid "%s is not set." msgstr "未设置 %s。" #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s 必须为有效的 raw 映像或 qcow2 映像。" #, python-format msgid "%s must be an absolute path." msgstr "%s 必须为绝对路径。" #, python-format msgid "%s must be an integer." msgstr "%s 必须为整数。" #, python-format msgid "%s not set in cinder.conf" msgstr "cinder.conf 中未设置 %s" #, python-format msgid "%s not set." msgstr "未设置 %s。" #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "对于配置文件中的 flashsystem_connection_protocol,“%(prot)s”无效。有效值为 " "%(enabled)s。" msgid "'active' must be present when writing snap_info." msgstr "写入 snap_info 时,状态必须为“活动”。" msgid "'consistencygroup_id' must be specified" msgstr "必须指定“consistencygroup_id”" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info'解析失败" msgid "'status' must be specified." msgstr "必须指定“status”。" msgid "'volume_id' must be specified" msgstr "必须指定“volume_id”" msgid "'{}' object has no attribute '{}'" msgstr "'{}' 对象没有属性 '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(命令:%(cmd)s)(返回码:%(exit_code)s)(标准输出:%(stdout)s)(标准错" "误:%(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "找不到 LUN (HLUN)。(逻辑设备:%(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "发出了并行的可能对立的请求。" #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "找不到可用的 LUN (HLUN)。请添加另一主机组。(逻辑设备:%(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "未能添加主机组。(端口为 %(port)s,名称为 %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "未能删除主机组。(端口为 %(port)s,组标识为 %(gid)s,名称为 %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "主机组无效。(主机组:%(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "无法删除对。(P-VOL 为 %(pvol)s,S-VOL 为 %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "未能创建对。超过最大对数。(复制方法为 %(copy_method)s,P-VOL 为 %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "参数无效。(%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "参数值无效。(%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "找不到池。(池标识:%(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "快照状态无效。(状态:%(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "必须指定有效辅助以进行故障转移。" msgid "A volume ID or share was not specified." msgstr "未指定卷标识或者共享。" #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "卷状态无效。(状态:%(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s 失败,带有错误字符串 %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API 版本字符串 %(version)s 为无效格式。必须为以下格式:MajorNum.MinorNum。" msgid "API key is missing for CloudByte driver." msgstr "CloudByte 驱动程序缺少 API 键。" #, python-format msgid "API response: %(response)s" msgstr "API 响应:%(response)s" #, python-format msgid "API response: %s" msgstr "API 响应:%s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API 版本 %(version)s 在此方法上不受支持。" msgid "API version could not be determined." msgstr "未能确定 API 版本。" msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "将删除具有非零配额的子项目。不应执行此操作" msgid "Access list not available for public volume types." msgstr "对于公用卷类型,未提供访问列表。" msgid "Activate or deactivate QoS error." msgstr "激活或者取消激活 QoS 时发生错误。" msgid "Activate snapshot error." msgstr "激活快照时发生错误。" msgid "Add FC port to host error." msgstr "将 FC 端口添加至主机时发生错误。" msgid "Add fc initiator to array error." msgstr "将 FC 启动程序添加至阵列时发生错误。" msgid "Add initiator to array error." msgstr "将启动程序添加至阵列时发生错误。" msgid "Add lun to cache error." msgstr "将 LUN 添加至高速缓存时发生错误。" msgid "Add lun to partition error." msgstr "将 LUN 添加至分区时发生错误。" msgid "Add mapping view error." msgstr "添加映射视图时发生错误。" msgid "Add new host error." msgstr "添加新主机时发生错误。" msgid "Add port to port group error." msgstr "向端口组添加端口时出错。" #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "所指定的要管理的所有存储池都不存在。请检查配置。不存在的池:%s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "必须将 API 版本请求与 VersionedMethod 对象进行比较。" #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "SheepdogDriver 中发生了错误。(原因:%(reason)s)" msgid "An error has occurred during backup operation" msgstr "在备份过程中出现一个错误" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "尝试修改快照“%s”时发生了错误。" #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "查找卷“%s”时发生错误。" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "在 LUNcopy 操作期间发生错误。LUNcopy 名称为 %(luncopyname)s。LUNcopy 状态为 " "%(luncopystatus)s。LUNcopy 状态为 %(luncopystate)s。" #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "读取卷“%s”时发生错误。" #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "写入卷“%s”时发生错误。" #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "未能添加 iSCSI CHAP 用户。(用户名:%(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "未能删除 iSCSI CHAP 用户。(用户名:%(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "未能添加 iSCSI 目标。(端口为 %(port)s,别名为 %(alias)s,原因为 %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "未能删除 iSCSI 目标。(端口为 %(port)s,目标号为 %(tno)s,别名为 %(alias)s)" msgid "An unknown exception occurred." msgstr "发生未知异常。" msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "不允许令牌作用域仅限于子项目的用户查看其父代的配额。" msgid "Append port group description error." msgstr "附加端口组描述时出错。" #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "对交换机应用 zones 和 cfgs 失败(错误代码为 %(err_code)s,错误消息为 " "%(err_msg)s)。" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "阵列不存在或者处于脱机状态。阵列的当前状态为 %s。" msgid "Associate host to hostgroup error." msgstr "使主机与主机组关联时发生错误。" msgid "Associate host to mapping view error." msgstr "使主机与映射视图关联时发生错误。" msgid "Associate initiator to host error." msgstr "使启动程序与主机相关联时发生错误。" msgid "Associate lun to QoS error." msgstr "将 LUN 关联至 QoS 时出错。" msgid "Associate lun to lungroup error." msgstr "使 LUN 与 LUN 组关联时发生错误。" msgid "Associate lungroup to mapping view error." msgstr "使 LUN 组与映射视图关联时发生错误。" msgid "Associate portgroup to mapping view error." msgstr "使端口组与映射视图关联时发生错误。" msgid "At least one valid iSCSI IP address must be set." msgstr "必须至少设置一个有效 iSCSI IP 地址。" #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "请尝试使用有效的认证密钥传输 %s。" #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "在 CloudByte 存储器中找不到认证组 [%s] 详细信息。" msgid "Auth user details not found in CloudByte storage." msgstr "在 CloudByte 存储器中找不到认证用户详细信息。" msgid "Authentication error" msgstr "认证错误" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "认证失败,请验证交换机凭证,错误代码:%s。" msgid "Authorization error" msgstr "授权错误" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "可用性区域“%(s_az)s”无效。" msgid "Available categories:" msgstr "可用的类别:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "在此存储器系列和 ONTAP 版本上,后端 QoS 规范不受支持。" #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "后端不存在 (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "后端已故障转移。无法故障返回。" #, python-format msgid "Backend reports: %(message)s" msgstr "后端报告:%(message)s" msgid "Backend reports: item already exists" msgstr "后端报告:项已存在" msgid "Backend reports: item not found" msgstr "后端报告:找不到项" msgid "Backend server not NaServer." msgstr "后端服务器不是 NaServer。" #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "后端服务重试超时匹配项:%(timeout)s 秒" msgid "Backend storage did not configure fiber channel target." msgstr "后端存储器未配置光纤通道目标。" msgid "Backing up an in-use volume must use the force flag." msgstr "备份一个正在使用的卷时必须使用强制标志。" #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "找不到备份 %(backup_id)s。" msgid "Backup RBD operation failed" msgstr "备份RBD操作失败" msgid "Backup already exists in database." msgstr "数据库中已存在备份。" #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "备份驱动程序已报告错误:%(message)s" msgid "Backup id required" msgstr "需要备份标识" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "具有快照的 GlusterFS 卷不支持备份。" msgid "Backup is only supported for SOFS volumes without backing file." msgstr "仅那些不带支持文件的 SOFS 卷支持备份。" msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "仅原始格式的 GlusterFS 卷支持备份。" msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "仅原始格式的 SOFS 卷支持备份。" msgid "Backup operation of an encrypted volume failed." msgstr "已加密卷的备份操作失败。" #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "备份服务 %(configured_service)s 不支持验证。未验证备份标识 %(id)s。正在跳过验" "证。" #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "备份服务 %(service)s 不支持验证。未验证备份标识 %(id)s。正在跳过重置。" #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "备份应该仅具有一个快照,但是具有 %s 个快照" msgid "Backup status must be available" msgstr "备份状态必须为“可用”" #, python-format msgid "Backup status must be available and not %s." msgstr "备份状态必须为“可用”,不能是 %s。" msgid "Backup status must be available or error" msgstr "备份状态必须为“可用”或“错误”" msgid "Backup to be restored has invalid size" msgstr "要复原的备份具有无效大小" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "返回的状态行不正确:%(arg)s。" #, python-format msgid "Bad key(s) in quota set: %s" msgstr "配额集中的键不正确:%s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "从存储卷后端 API 返回了不正确或意外的响应:%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "项目格式不正确:项目没有采用正确格式 (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "已将错误请求发送至 Datera 集群:无效参数:%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "来自 Datera API 的响应不正确" msgid "Bad response from SolidFire API" msgstr "来自SolidFire API的错误响应" #, python-format msgid "Bad response from XMS, %s" msgstr "来自 XMS 的响应不正确,%s" msgid "Binary" msgstr "二进制" msgid "Blank components" msgstr "空组件" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Blockbridge API 认证方案(令牌或密码)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Blockbridge API 密码(对于认证方案“密码”)" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Blockbridge API 令牌(对于认证方案“令牌”)" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Blockbridge API 用户(对于认证方案“密码”)" msgid "Blockbridge api host not configured" msgstr "未配置 Blockbridge API 主机" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "为 Blockbridge 配置了无效认证方案“%(auth_scheme)s”" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge 缺省池不存在" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "未配置 Blockbridge 密码(对于认证方案“密码”,这是必需的)" msgid "Blockbridge pools not configured" msgstr "未配置 Blockbridge 池" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "未配置 Blockbridge 令牌(对于认证方案“令牌”,这是必需的)" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "未配置 Blockbridge 用户(对于认证方案“密码”,这是必需的)" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Brocade 光纤通道分区 CLI 错误:%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Brocade 光纤通道分区 HTTP 错误:%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 密钥应为 12 到 16 个字节。" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 异常输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 异常输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s。" msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E 未创建 VDisk 至主机的映射,因为该 VDisk 已映射至主机。\n" "\"" msgid "CONCERTO version is not supported" msgstr "不支持 CONCERTO 版本" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "数组中不存在 CPG (%s)" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "高速缓存名称为 None,请在键中设置 smartcache:cachename。" #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "高速缓存卷 %(cache_vol)s 没有快照 %(cache_snap)s。" #, python-format msgid "Cache volume %s does not have required properties" msgstr "高速缓存卷 %s 没有必需属性。" msgid "Call returned a None object" msgstr "调用返回了 None 对象" msgid "Can not add FC port to host." msgstr "无法将 FC 端口添加至主机。" #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "按高速缓存名称 %(name)s 找不到高速缓存标识。" #, python-format msgid "Can not find partition id by name %(name)s." msgstr "按名称 %(name)s 找不到分区标识。" #, python-format msgid "Can not get pool info. pool: %s" msgstr "无法获取池信息。池:%s" #, python-format msgid "Can not translate %s to integer." msgstr "无法把 %s 转换成整数" #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "无法访问“scality_sofs_config”:%s" msgid "Can't attach snapshot." msgstr "无法附加快照。" msgid "Can't decode backup record." msgstr "无法将备份记录解码。" #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "无法扩展复制卷,卷:%(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "在阵列上找不到 LUN,请检查 source-name 或 source-id。" #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "在阵列上找不到高速缓存名称,高速缓存名称为 %(name)s。" #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "在数据库中找不到 LUN 标识,卷:%(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "在阵列上找不到 LUN 信息。卷:%(id)s。LUN 名称:%(name)s。" #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "在阵列上找不到分区名称,分区名称为 %(name)s。" #, python-format msgid "Can't find service: %s" msgstr "找不到以下服务:%s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "在阵列上找不到快照,请检查 source-name 或 source-id。" msgid "Can't find the same host id from arrays." msgstr "在阵列中找不到同一主机标识。" #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "无法通过快照获取卷,快照:%(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "无法获取卷标识,卷名:%s。" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "无法将 LUN %(lun_id)s 导入至 Cinder。LUN 类型不匹配。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "无法将 LUN %s 导入至 Cinder。它在 HyperMetroPair 中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 复制任务中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 组中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 镜像中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "无法将 LUN %s 导入至 Cinder。它在 SplitMirror 中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "无法将 LUN %s 导入至 Cinder。它在迁移任务中已存在。" #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "无法将 LUN %s 导入至 Cinder。它在远程复制任务中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "无法将 LUN %s 导入至 Cinder。LUN 状态异常。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "无法将快照 %s 导入至 Cinder。快照不属于卷。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "无法将快照 %s 导入至 Cinder。快照已展示给启动程序。" #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "无法将 LUN %s 导入至 Cinder。快照状态异常或运行状态并非“在线”。" #, python-format msgid "Can't open config file: %s" msgstr "无法打开配置文件 %s" msgid "Can't parse backup record." msgstr "无法解析备份记录。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为它没有任何卷类型。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为它已经存在于一致性组 " "%(orig_group)s 中。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为找不到该卷。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷不存在。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷处于无效状态:" "%(status)s。以下是有效状态:%(valid)s。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该组不支持卷类型 " "%(volume_type)s。" #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "无法连接已经连接的卷 %s;通过“netapp_enable_multiattach”配置选项禁用了多个连" "接。" msgid "Cannot change VF context in the session." msgstr "无法更改会话中的 VF 上下文。" #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "无法更改 VF 上下文,指定的 VF 在管理 VF 列表 %(vf_list)s 中不可用。" msgid "Cannot connect to ECOM server." msgstr "无法连接至 ECOM 服务器。" #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "无法根据大小为 %(src_vol_size)s 的卷创建大小为 %(vol_size)s 的克隆" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "无法创建一致性组 %(group)s,因为快照 %(snap)s 未处于有效状态。以下是有效状" "态:%(valid)s。" #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "无法创建一致性组 %(group)s,因为源卷 %(source_vol)s 未处于有效状态。有效状态" "为 %(valid)s。" #, python-format msgid "Cannot create directory %s." msgstr "无法创建目录 %s。" msgid "Cannot create encryption specs. Volume type in use." msgstr "无法创建加密规范。卷类型在使用中。" #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "无法创建磁盘格式为 %s 映像。仅接受 vmdk 磁盘格式。" #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "无法创建掩码视图:%(maskingViewName)s。" #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "当“netapp_enable_multiattach”设置为 true 时,无法在 ESeries 阵列上创建多个 " "%(req)s 卷。" #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "无法创建或找到名称为 %(sgGroupName)s 的存储器组。" #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "无法根据大小为 %(snap_size)s 的快照创建大小为 %(vol_size)s 的卷" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "无法创建大小为 %s 的卷:该大小不是 8GB 的倍数。" #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "存在快照时,无法删除 LUN %s。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "无法删除高速缓存卷:%(cachevol_name)s。在 %(updated_at)s 对其进行了更新,它当" "前具有 %(numclones)d 卷实例。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "无法删除高速缓存卷:%(cachevol_name)s。在 %(updated_at)s 对其进行了更新,它当" "前具有 %(numclones)s 卷实例。" msgid "Cannot delete encryption specs. Volume type in use." msgstr "无法删除加密规范。卷类型在使用中。" msgid "Cannot determine storage pool settings." msgstr "无法确定存储池设置。" msgid "Cannot execute /sbin/mount.sofs" msgstr "无法执行 /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "找不到 CG 组 %s。" #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "找不到对应存储系统 %(storage_system)s 的控制器配置服务。" #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "找不到复制服务,无法为快照 %s 创建卷。" #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "找不到复制服务,无法删除快照 %s。" #, python-format msgid "Cannot find Replication service on system %s." msgstr "在系统 %s 上找不到复制服务。" #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "找不到卷:%(id)s。取消管理操作。正在退出..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "找不到卷 %(volumename)s。扩展操作。正在退出...." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "找不到卷 %(volumeName)s 的设备号。" msgid "Cannot find migration task." msgstr "找不到迁移任务。" #, python-format msgid "Cannot find replication service on system %s." msgstr "在系统 %s 上找不到复制服务。" #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "找不到源 CG 实例。consistencygroup_id:%s。" #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "无法通过通道标识 %(channel_id)s 获取 mcs_id。" msgid "Cannot get necessary pool or storage system information." msgstr "无法获取必需池或存储系统信息。" #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "无法获取或创建对应卷 %(volumeName)s 的存储器组:%(sgGroupName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "无法获取或创建启动程序组:%(igGroupName)s。" #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "无法获取端口组:%(pgGroupName)s。" #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "无法从掩码视图 %(maskingViewInstanceName)s 获取存储器组 %(sgGroupName)s。" #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "对于 %(sps)s,无法获取受支持的大小范围。返回码为 %(rc)lu。错误为 %(error)s。" #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "无法获取对应快速策略 %(fastPolicyName)s 的缺省存储器组。" msgid "Cannot get the portgroup from the masking view." msgstr "无法通过掩码视图获取端口组。" msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "无法安装 Scality SOFS,请检查系统日志以获取错误" msgid "Cannot ping DRBDmanage backend" msgstr "无法对 DRBDmanage 后端执行 ping 操作" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "无法将卷 %(id)s 置于 %(host)s 上" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "无法同时提供“cgsnapshot_id”和“source_cgid”以从源创建一致性组 %(name)s。" msgid "Cannot register resource" msgstr "无法注册资源" msgid "Cannot register resources" msgstr "无法注册多个资源" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "无法从一致性组 %(group_id)s 移除卷 %(volume_id)s因为它没有在该组中。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "无法从一致性组 %(group_id)s 移除卷 %(volume_id)s因为该卷处于无效状态:" "%(status)s。以下是有效状态:%(valid)s。" #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "无法将 HPE3PARDriver 转型为 %s。" msgid "Cannot retype from one 3PAR array to another." msgstr "一个 3PAR 阵列无法通过 retype 操作变为另一个阵列。" msgid "Cannot retype to a CPG in a different domain." msgstr "无法执行 retype 操作,以变为另一个域中的 CPG。" msgid "Cannot retype to a snap CPG in a different domain." msgstr "无法执行 retype 操作,以变为另一个域中的 SNAP CPG。" msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "无法运行 vgc-cluster 命令,请确保已安装软件,并且正确设置了许可权。" msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "无法同时设置 hitachi_serial_number 和 hitachi_unit_name。" msgid "Cannot specify both protection domain name and protection domain id." msgstr "无法同时指定保护域名和保护域标识。" msgid "Cannot specify both storage pool name and storage pool id." msgstr "无法同时指定存储池名称和存储池标识。" #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "无法更新一致性组 %(group_id)s,因为未提供任何有效名称、描述、add_volumes 或 " "remove_volumes。" msgid "Cannot update encryption specs. Volume type in use." msgstr "无法更新加密规范。卷类型在使用中。" #, python-format msgid "Cannot update volume_type %(id)s" msgstr "无法更新 volume_type %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "无法验证对象 %(instanceName)s 的存在。" msgid "Cascade option is not supported." msgstr "不支持级联选项。" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "找不到 Cg 快照 %(cgsnapshot_id)s。" msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cg 快照为空。将不创建任何一致性组。" msgid "Cgsnapshot status must be available or error" msgstr "Cg 快照状态必须为“可用”或“错误”" msgid "Change hostlun id error." msgstr "更改 hostlun 标识时出错。" msgid "Change lun priority error." msgstr "更改 LUN 优先级时发生错误。" msgid "Change lun smarttier policy error." msgstr "更改 LUN smarttier 策略时发生错误。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "对于下列资源,更改将导致使用量小于 0:%(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "请检查分配给此驱动程序的 ZFS 共享项的访问许可权。" msgid "Check hostgroup associate error." msgstr "检查主机组关联时发生错误。" msgid "Check initiator added to array error." msgstr "检查已添加至阵列的启动程序时发生错误。" msgid "Check initiator associated to host error." msgstr "检查与主机相关联的启动程序时发生错误。" msgid "Check lungroup associate error." msgstr "检查 LUN 组关联时发生错误。" msgid "Check portgroup associate error." msgstr "检查端口组关联时发生错误。" msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "请检查 HTTP 服务的状态。另外,请确保 HTTPS 端口号与 cinder.conf 中指定的 " "HTTPS 端口号相同。" msgid "Chunk size is not multiple of block size for creating hash." msgstr "区块大小不是用于创建散列的块大小的倍数。" #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Cisco 光纤通道分区 CLI 错误:%(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "克隆功能在 %(storageSystem)s 上未获许可。" #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "克隆“%(clone_type)s”无效;有效值为:“%(full_clone)s”和“%(linked_clone)s”。" msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "集群未格式化。您可能应该执行“dog cluster format”。" #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Coho Data Cinder 驱动程序故障:%(message)s" msgid "Coho rpc port is not configured" msgstr "未配置 Coho rpc 端口" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "命令 %(cmd)s 在 CLI 中被阻塞,并且已取消" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition:%s 超时" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition:%s 超时。" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "未安装压缩启用程序。无法创建压缩卷。" #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "找不到计算集群 %(cluster)s。" msgid "Condition has no field." msgstr "条件没有任何字段。" #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "配置“max_over_subscription_ratio”无效。必须大于 0:%s" msgid "Configuration error: dell_sc_ssn not set." msgstr "配置错误:未设置 dell_sc_ssn。" #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "配置文件 %(configurationFile)s 不存在。" msgid "Configuration is not found." msgstr "找不到配置。" #, python-format msgid "Configuration value %s is not set." msgstr "未设置配置值 %s。" msgid "Configured host type is not supported." msgstr "不支持已配置的主机类型。" #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "卷类型 %s 中存在冲突的 QoS 规范:当 QoS 规范与卷类型相关联时,不允许卷类型额" "外规范中存在旧的“netapp:qos_policy_group”。" #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "连接glance失败: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "连接 Swift 失败:%(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "连接器未提供:%s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "连接器没有必需信息:%(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "一致性组 %s 仍然包含卷。需要 force 标记,以将其删除。" #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "一致性组 %s 仍然具有从属 cg 快照。" msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "一致性组为空组。将不创建任何 cg 快照。" #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "一致性组状态必须为“可用”或“错误”,但当前状态为:%s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "一致性组状态必须为“可用”,但当前状态为:%s。" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "找不到一致性组 %(consistencygroup_id)s。" msgid "Container" msgstr "容器" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "VMDK 驱动程序不支持容器格式 %s,仅支持“bare”。" msgid "Container size smaller than required file size." msgstr "容器大小小于所需文件大小。" msgid "Content type not supported." msgstr "不支持内容类型。" #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到控制器配置服务。" #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "未能解析控制器 IP“%(host)s”:%(e)s。" #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "已转换为 %(f1)s,但现在格式为 %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "已转换为 %(vol_format)s,但现在格式为 %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "转化为裸格式,但目前格式是 %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "已转换为原始文件,但现在格式为 %s。" msgid "Coordinator uninitialized." msgstr "协调程序未初始化。" #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "“复制卷”任务失败:convert_to_base_volume:id=%(id)s,status=%(status)s。" #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "复制卷任务失败:create_cloned_volume id=%(id)s,status=%(status)s。" #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "正在将元数据从 %(src_type)s %(src_id)s 复制到 %(vol_id)s。" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "无法确定要使用的 Keystone 端点。可在服务目录中设置此项,也可使用 cinder.conf " "配置选项 “backup_swift_auth_url”设置此项。" msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "无法确定要使用的 Swift 端点。可在服务目录中设置此项,也可使用 cinder.conf 配" "置选项 “backup_swift_url”设置此项。" msgid "Could not find DISCO wsdl file." msgstr "找不到 DISCO wsdl 文件。" #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "找不到 GPFS 集群标识:%s。" #, python-format msgid "Could not find GPFS file system device: %s." msgstr "找不到 GPFS 文件系统设备:%s。" #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "对于类型为 %(type_id)s 的卷 %(volume_id)s,找不到主机。" #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 找不到配置文件。" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "对于卷 %(volumeName)s,找不到 iSCSI 导出。" #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "对于卷 %s,找不到 iSCSI 导出" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "找不到卷 %(volume_id)s 的 iSCSI 目标。" #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "在命令 %(cmd)s 的输出 %(out)s 中找不到键。" #, python-format msgid "Could not find parameter %(param)s" msgstr "找不到参数 %(param)s" #, python-format msgid "Could not find target %s" msgstr "找不到目标 %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "在阵列上找不到快照“%s”的父卷。" #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "在卷 %(vol)s 上找不到唯一快照 %(snap)s。" msgid "Could not get system name." msgstr "未能获取系统名称。" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "未能读取 %s。正在使用 sudo 重新运行" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "无法读取快照 %(name)s 的信息。代码:%(code)s。原因:%(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "无法复原配置文件 %(file_path)s:%(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "未能将配置保存到 %(file_path)s:%(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "无法启动一致性组快照 %s。" #, python-format msgid "Counter %s not found" msgstr "找不到计数器 %s" msgid "Create QoS policy error." msgstr "创建 QoS 策略时发生错误。" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份创建已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份创建已异常中止,需要的卷状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" msgid "Create consistency group failed." msgstr "创建一致性组失败。" #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "不支持从映像 %(image)s 创建类型为 %(type)s的加密卷。" msgid "Create export for volume failed." msgstr "为卷创建导出失败。" msgid "Create hostgroup error." msgstr "创建主机组时发生错误。" #, python-format msgid "Create hypermetro error. %s." msgstr "创建 hypermetro 错误。%s。" msgid "Create lun error." msgstr "创建 LUN 时出错。" msgid "Create lun migration error." msgstr "创建 LUN 迁移时发生错误。" msgid "Create luncopy error." msgstr "创建 LUNcopy 时发生错误。" msgid "Create lungroup error." msgstr "创建 LUN 组时发生错误。" msgid "Create manager volume flow failed." msgstr "创建管理器卷流失败。" msgid "Create port group error." msgstr "创建端口组时出错。" msgid "Create replication error." msgstr "创建复制错误。" #, python-format msgid "Create replication pair failed. Error: %s." msgstr "创建复制对失败。错误:%s。" msgid "Create snapshot error." msgstr "创建快照时发生错误。" #, python-format msgid "Create volume error. Because %s." msgstr "创建卷错误。因为 %s。" msgid "Create volume failed." msgstr "创建卷失败。" msgid "Creating a consistency group from a source is not currently supported." msgstr "当前不支持通过源创建一致性组。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "创建并激活区域集失败(区域集为 %(cfg_name)s,发生的错误为 %(err)s)。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "创建并激活区域集失败(区域集为 %(zoneset)s,发生的错误为 %(err)s)。" #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "正在为 %(begin_period)s 直到 %(end_period)s 创建使用情况" msgid "Current host isn't part of HGST domain." msgstr "当前主机不存在于 HGST 域中。" #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "对于类型为 %(type)s 的卷 %(id)s,当前主机无效,不允许迁移" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "卷 %(vol)s 的当前已映射的主机位于具有 %(group)s 的不受支持的主机组中。" msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "建议不要使用:请部署 Cinder API V1。" msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "已不推荐使用:Cinder API 的 Deploy v2。" #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage 驱动程序错误:回复中没有预期关键字“%s”,DRBDmanage 版本是否正确?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage 驱动程序设置错误:找不到某些必需的库(dbus 和 drbdmanage.*)。" #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage 期望一个资源 (\"%(res)s\"),但是获得了 %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "快照复原后 DRBDmanage 等待新卷时超时;资源“%(res)s”,卷“%(vol)s”" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "DRBDmanage 等待创建快照时超时;资源“%(res)s”,快照“%(sn)s”" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "DRBDmanage 等待创建卷时超时;资源“%(res)s”,卷“%(vol)s”" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "DRBDmanage 等待卷大小时超时;卷标识“%(id)s”(res \"%(res)s\",vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "未能确定数据 ONTAP API 版本。" msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "以 7 方式运行的 Data ONTAP 不支持 QoS 策略组。" msgid "Database schema downgrade is not allowed." msgstr "不允许对数据库模式进行降级。" #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "数据集 %s 在 Nexenta 存储设备中未共享" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "在 Nexenta SA 中找不到数据库组 %s" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "去重是有效的供应类型,但是要求安装了 WSAPI 版本“%(dedup_version)s”版" "本“%(version)s”。" msgid "Dedup luns cannot be extended" msgstr "无法扩展 Dedup lun" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "未安装去重启用程序。无法创建去重卷" msgid "Default pool name if unspecified." msgstr "缺省池名称(如果未指定)。" #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "资源 %(res)s 的缺省配额由缺省配额标记 quota_%(res)s 设置,现在不推荐使用。请" "对缺省配额使用缺省配额类。 " msgid "Default volume type can not be found." msgstr "找不到缺省卷类型。" msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "定义一组公开池及其相关联的后端查询字符串" msgid "Delete LUNcopy error." msgstr "删除 LUNcopy 时发生错误。" msgid "Delete QoS policy error." msgstr "删除 QoS 策略时发生错误。" msgid "Delete associated lun from lungroup error." msgstr "从 LUN 组中删除相关联的 LUN 时发生错误。" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "备份删除已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" msgid "Delete consistency group failed." msgstr "删除一致性组失败。" msgid "Delete hostgroup error." msgstr "删除主机组时发生错误。" msgid "Delete hostgroup from mapping view error." msgstr "从映射视图删除主机组时发生错误。" msgid "Delete lun error." msgstr "删除 LUN 时发生错误。" msgid "Delete lun migration error." msgstr "删除 LUN 迁移时发生错误。" msgid "Delete lungroup error." msgstr "删除 LUN 组时发生错误。" msgid "Delete lungroup from mapping view error." msgstr "从映射视图删除 LUN 组时发生错误。" msgid "Delete mapping view error." msgstr "删除映射视图时发生错误。" msgid "Delete port group error." msgstr "删除端口组时出错。" msgid "Delete portgroup from mapping view error." msgstr "从映射视图删除端口组时发生错误。" msgid "Delete snapshot error." msgstr "删除快照时发生错误。" #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "不支持对处于以下状态的卷删除快照:%s。" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup 已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" msgid "Deleting volume from database and skipping rpc." msgstr "正在从数据库删除卷并跳过 RPC。" #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "删除区域失败:(命令为 %(cmd)s,发生的错误为 %(err)s)。" msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "要提供“一致性组”支持,需要 Dell API 2.1 或更高版本" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "直接连接不支持 Dell Cinder 驱动程序配置错误复制。" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "找不到 Dell Cinder 驱动程序配置错误 replication_device %s" msgid "Deploy v3 of the Cinder API." msgstr "Cinder API 的 Deploy v3。" msgid "Describe-resource is admin only functionality" msgstr "Describe-resource是只有管理员才能执行的功能。" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "目标具有 migration_status %(stat)s,原应为 %(exp)s。" msgid "Destination host must be different than the current host." msgstr "目标主机必须与当前主机不同。" msgid "Destination volume not mid-migration." msgstr "目标卷未在迁移中。" msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "拆离卷失败:存在多个连接,但是未提供 attachment_id。" msgid "Detach volume from instance and then try again." msgstr "请断开卷与实例的连接,然后再次进行尝试。" #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "检测到多个具有名称 %(vol_name)s 的卷" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "%(fun)s 中找不到需要的列:%(hdr)s。" #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "在 %(fun)s 中找不到期望的键 %(key)s:%(raw)s。" msgid "Disabled reason contains invalid characters or is too long" msgstr "禁用的原因包含无效字符或太长" #, python-format msgid "Domain with name %s wasn't found." msgstr "找不到名称为 %s 的域。" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "检测到下层 GPFS 集群。在集群守护程序级别 %(cur)s 中未启用“GPFS 克隆”功能 - 必" "须至少处于级别 %(min)s。" #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "驱动程序初始化连接失败(错误:%(err)s)。" msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "驱动程序无法转型,因为卷 (LUN {}) 的快照被禁止迁移。" msgid "Driver must implement initialize_connection" msgstr "驱动程序必须实现 initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "驱动程序已成功将所导入的备份数据解码,但是缺少字段 (%s)。" #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E-series 代理 API 版本 %(current_version)s 不支持完整的一组 SSC 额外规范。代" "理版本必须至少为 %(min_version)s。" #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "发生 EMC VNX Cinder 驱动程序 CLI 异常:%(cmd)s(返回码为 %(rc)s)(输出为 " "%(out)s)。" msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp、EcomServerPort、EcomUserName 和 EcomPassword 必须具有有效值。" #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "必须提供“cgsnapshot_id”或者“source_cgid”,以从源创建一致性组 %(name)s。" #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s 或工作负载 %(workload)s 无效。请查看先前的错误说明以了解有效值。" msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "需要 hitachi_serial_number 或 hitachi_unit_name。" #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到元素组合服务。" msgid "Enables QoS." msgstr "启用 QoS。" msgid "Enables compression." msgstr "启用压缩。" msgid "Enables replication." msgstr "启用复制。" msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "请确保 configfs 安装在 /sys/kernel/config 处。" #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "在 groupInitiatorGroup %(initiatorgroup)s 上添加启动程序 %(initiator)s 时出" "错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "添加至带有 IQN %(iqn)s 的目标组 %(targetgroup)s 时出错。返回码:" "%(ret.status)d,消息:%(ret.data)s。" #, python-format msgid "Error Attaching volume %(vol)s." msgstr "连接卷 %(vol)s 时出错。" #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "在池 %(pool)s 的卷 %(lun)s 上克隆快照 %(snapshot)s 时出错。项目:%(project)s " "克隆项目:%(clone_proj)s返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "创建克隆卷 %(cloneName)s 时出错。返回码为 %(rc)lu。错误为 %(error)s。" #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "创建克隆卷时出错:卷:%(cloneName)s 源卷为 %(sourceName)s。返回码为 " "%(rc)lu。错误为 %(error)s。" #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "创建组 %(groupName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "创建掩码视图 %(groupName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "创建卷 %(volumeName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "创建卷 %(volumename)s 时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "创建组副本时出错:源为 %(source)s,目标为 %(target)s。返回码为 %(rc)lu。错误" "为 %(error)s。" #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "对别名 %(alias)s 创建发起方 %(initiator)s 时出错。返回码:%(ret.status)d 消" "息:%(ret.data)s。" #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "在池 %(pool)s 上创建项目 %(project)s 时出错。返回码:%(ret.status)d 消息:" "%(ret.data)s。" #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "创建属性 %(property)s、类型 %(type)s 和描述 %(description)s 时出错。返回码:" "%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "创建共享项 %(name)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在卷 %(lun)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:%(project)s " "返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在共享项 %(share)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:" "%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "创建目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "创建带有 IQN %(iqn)s 的目标组 %(targetgroup)s 时出错。返回码:" "%(ret.status)d,消息:%(ret.data)s。" #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "创建大小为 %(size)s 的卷 %(lun)s 时出错。返回码:%(ret.status)d消息:" "%(ret.data)s。" #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "创建新的组合卷时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "对于目标 %(tgt)s 和池 %(tgt_pool)s,在池 %(pool)s、项目 %(proj)s 和卷 " "%(vol)s 上创建复制操作时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" msgid "Error Creating unbound volume on an Extend operation." msgstr "对扩展操作创建未绑定卷时出错。" msgid "Error Creating unbound volume." msgstr "创建未绑定卷时出错。" #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "删除卷 %(volumeName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "删除组 %(storageGroupName)s 时出错。返回码:%(rc)lu。错误:%(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "删除启动程序组 %(initiatorGroupName)s 时出错。返回码:%(rc)lu。错误:" "%(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在共享项 %(share)s 上对池 %(pool)s 删除快照 %(snapshot)s 时出错。项目:" "%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在卷 %(lun)s 上对池 %(pool)s 删除快照 %(snapshot)s 时出错。项目:%(project)s " "返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "从池 %(pool)s 中删除卷 %(lun)s 时出错,项目:%(project)s。返回码:" "%(ret.status)d,消息:%(ret.data)s。" #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "删除池 %(pool)s 上的项目 %(project)s 时出错。返回码:%(ret.status)d 消息:" "%(ret.data)s。" #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "删除复制操作 %(id)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "扩展卷 %(volumeName)s 出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "获取发起方时出错:发起方组为 %(initiatorgroup)s,返回码为 %(ret.status)d 消" "息:%(ret.data)s。" #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "获取池统计信息时出错:池:%(pool)s 返回码:%(status)d 消息:%(data)s。" #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "获取项目状态时出错:池:%(pool)s 项目:%(project)s返回码:%(ret.status)d 消" "息:%(ret.data)s。" #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在池 %(pool)s 上获取共享项 %(share)s 时出错。项目:%(project)s 返回码:" "%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在卷 %(lun)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:%(project)s " "返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "获取目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在池 %(pool)s 上获取卷 %(lun)s 时出错。项目:%(project)s 返回码:" "%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "在池之间迁移卷时出错。返回码:%(rc)lu。错误为 %(error)s。" #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "修改掩码视图 %(groupName)s 时出错。返回码:%(rc)lu。错误为 %(error)s。" #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "池所有权出错:池 %(pool)s 并非归 %(host)s 所有。" #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在池 %(pool)s 的卷 %(lun)s 上设置属性 Props %(props)s 时出错。项目:" "%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "终止迁移会话时出错。返回码:%(rc)lu。错误:%(error)s。" #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "验证启动程序 %(iqn)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "验证池 %(pool)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "在池 %(pool)s 上验证项目 %(project)s 时出错。返回码:%(ret.status)d,消息:" "%(ret.data)s。" #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "验证服务 %(service)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "验证目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在项目 %(project)s 和池 %(pool)s 上验证共享项 %(share)s 时出错。返回码为 " "%(ret.status)d,消息为 %(ret.data)s。" #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "使用以下实例路径添加卷 %(volumeName)s 时出错:%(volumeInstancePath)s。" #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "向组 %(groupName)s 添加启动程序时出错。返回码:%(rc)lu。错误为 %(error)s。" #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "向组合卷添加卷时出错。错误为:%(error)s。" #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "将卷 %(volumename)s 追加至目标基本卷时出错。" #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "将存储器组 %(storageGroupName)s 关联至快速策略 %(fastPolicyName)s 时出错,错" "误描述:%(errordesc)s。" #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "连接卷 %s 时出错。可能已到达目标限制!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "破坏克隆关系时出错:同步名称为 %(syncName)s。返回码为 %(rc)lu。错误为 " "%(error)s。" msgid "Error connecting to ceph cluster." msgstr "连接至 ceph 集群时出错。" #, python-format msgid "Error connecting via ssh: %s" msgstr "通过 ssh 进行连接时出错:%s" #, python-format msgid "Error creating volume: %s." msgstr "创建卷时出错:%s。" msgid "Error deleting replay profile." msgstr "删除重放概要文件时出错。" #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "删除卷 %(ssn)s 时出错:%(volume)s " #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "删除卷 %(vol)s 时出错:%(err)s。" #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "在评估程序解析期间,发生错误:%(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "在池 %(pool)s 上编辑共享项 %(share)s 时出错。返回码:%(ret.status)d,消息:" "%(ret.data)s。" #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "为 NetworkPortal 启用 iSER 时出错:请确保 RDMA 在 IP %(ip)s 上的 iSCSI 端口 " "%(port)d 中受支持。" #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "在清除失败的连接期间遇到错误:%(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "执行 CloudByte API [%(cmd)s] 时出错,错误为 %(err)s。" msgid "Error executing EQL command" msgstr "执行 EQL 命令时出错" #, python-format msgid "Error executing command via ssh: %s" msgstr "通过 ssh 执行命令时发生错误:%s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "扩展卷 %(vol)s 时出错:%(err)s。" #, python-format msgid "Error extending volume: %(reason)s" msgstr "扩展卷时出错:%(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "查找 %(name)s 时出错。" #, python-format msgid "Error finding %s." msgstr "查找 %s 时出错。" #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "获取 ReplicationSettingData 时出错。返回码:%(rc)lu。错误:%(error)s。" msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "获取设备版本详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "从名称 %(name)s 中获取域标识时出错:%(err)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "从名称 %(name)s 中获取域标识 %(id)s 时出错。" msgid "Error getting initiator groups." msgstr "获取发起方组时,发生错误。" #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "从名称 %(pool)s 中获取池标识时出错:%(err)s。" #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "从名称 %(pool_name)s 中获取池标识时出错:%(err_msg)s。" #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "获取复制操作 %(id)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "获取复制源详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "获取复制目标详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "获取版本时出错:svc:%(svc)s。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "对 CloudByte 存储器中的卷 [%(cb_volume)s] 执行操作 [%(operation)s] 时出错:" "[%(cb_error)s],错误代码:[%(error_code)s]。" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API响应里发生错误:data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "对大小为 %(size)d GB 的 %(space)s 进行空间创建时出错" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "对卷 %(space)s 进行空间扩充,额外扩充 %(size)d GB 时出错" #, python-format msgid "Error managing volume: %s." msgstr "管理卷 %s 时出错。" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "映射卷 %(vol)s 时出错。%(error)s。" #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "修改副本同步 %(sv)s 操作 %(operation)s 时出错。返回码为 %(rc)lu。错误为 " "%(error)s。" #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "修改服务 %(service)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "将卷 %(vol)s 从源项目 %(src)s 移至目标项目 %(tgt)s 时出错。返回码:" "%(ret.status)d 消息:%(ret.data)s。" msgid "Error not a KeyError." msgstr "错误并非 KeyError。" msgid "Error not a TypeError." msgstr "错误并非 TypeError。" #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "创建 cg 快照 %s 时发生了错误。" #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "删除 cg 快照 %s 时发生了错误。" #, python-format msgid "Error occurred when updating consistency group %s." msgstr "更新一致性组 %s 时发生了错误。" #, python-format msgid "Error parsing config file: %s" msgstr "解析配置文件 %s 时出错" msgid "Error promoting secondary volume to primary" msgstr "将辅助卷升级为主卷时出错" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "移除卷 %(vol)s 时出错。%(error)s。" #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "重命名卷 %(vol)s 时出错:%(err)s。" #, python-format msgid "Error response: %s" msgstr "错误响应:%s" msgid "Error retrieving volume size" msgstr "检索卷大小时出错" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "为操作标识 %(id)s 发送复制更新时出错。返回码:%(ret.status)d 消息:" "%(ret.data)s。" #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "发送复制更新时出错。所返回的错误:%(err)s。操作:%(id)s。" #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "对于卷 %(vol)s,将复制继承设置为 %(set)s 时出错。项目:%(project)s 返回码:" "%(ret.status)d 消息:%(ret.data)s。" #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "从源 %(src)s 分割软件包 %(package)s 时出错。返回码:%(ret.status)d 消息:" "%(ret.data)s。" #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "从池取消绑定卷 %(vol)s 时出错。%(error)s。" #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "在快照 %(snapshot)s 上的卷克隆 %(clone)s(大小为 %(size)d)上验证克隆大小时发" "生了错误 " #, python-format msgid "Error while authenticating with switch: %s." msgstr "向交换机认证时出错:%s。" #, python-format msgid "Error while changing VF context %s." msgstr "更改 VF 上下文 %s 时出错。" #, python-format msgid "Error while checking the firmware version %s." msgstr "检查固件版本 %s 时出错。" #, python-format msgid "Error while checking transaction status: %s" msgstr "检查事务状态时发生错误:%s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "检查 VF 对管理 %s 是否可用时出错。" #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "连接带有协议 %(protocol)s 的交换机 %(switch_id)s 时出错。错误:%(error)s。" #, python-format msgid "Error while creating authentication token: %s" msgstr "创建认证令牌时出错:%s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "创建快照 [status] %(stat)s - [result] %(res)s 时出错。" #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "创建卷 [status] %(stat)s - [result] %(res)s 时出错。" #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "删除快照 [status] %(stat)s - [result] %(res)s 时出错。" #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "删除卷 [status] %(stat)s - [result] %(res)s 时出错。" #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "扩展卷 [status] %(stat)s - [result] %(res)s 时出错。" #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "获取 %(op)s 详细信息时出错,返回码:%(status)s。" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "通过 ssh 获取数据时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" #, python-format msgid "Error while getting disco information [%s]." msgstr "获取 disco 信息 [%s] 时出错。" #, python-format msgid "Error while getting nvp value: %s." msgstr "获取 nvp 值时出错:%s。" #, python-format msgid "Error while getting session information %s." msgstr "获取会话信息 %s 时出错。" #, python-format msgid "Error while parsing the data: %s." msgstr "解析数据时出错:%s。" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "在交换机上查询页面 %(url)s 时出错,原因:%(error)s。" #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "移除区域字符串中的 zones 和 cgfs 时出错:%(description)s。" #, python-format msgid "Error while requesting %(service)s API." msgstr "请求 %(service)s API 时出错。" #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "运行分区 CLI 时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "更新区域字符串中的新 zones 和 cgfs 时出错。错误:%(description)s。" msgid "Error writing field to database" msgstr "将字段写至数据库时出错。" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "获取卷标识时发生错误 [%(stat)s - %(res)s]。" #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "将快照 [%(snap_id)s] 复原至卷 [%(vol)s] 时发生错误 [%(stat)s - %(res)s]。" #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "获取卷标识时发生错误 [status] %(stat)s - [result] %(res)s]。" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "对于卷 %(volume_id)s,已超过最大调度尝试次数 %(max_attempts)d" msgid "Exceeded the limit of snapshots per volume" msgstr "超出每个卷的快照数限制" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "将元卷追加到目标卷 %(volumename)s 时发生异常。" #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "创建元素副本时发生异常。克隆名称:%(cloneName)s,源名称:%(sourceName)s,额外" "规范:%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume %s 中发生异常。" #, python-format msgid "Exception while forming the zone string: %s." msgstr "构建区域字符串时发生异常:%s。" #, python-format msgid "Exception: %s" msgstr "异常:%s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "期望 uuid,但是接收到 %(uuid)s。" #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "刚好需要一个名为“%s”的节点" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "期望 node_count 的值为整数,已返回 svcinfo lsiogrp:%(node)s。" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "期望 CLI 命令 %(cmd)s 没有任何输出,但是获得了 %(out)s。" #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "在 vdisk_UID 上进行过滤时,从 lsvdisk 返回了所需的单个 vdisk。返回了 " "%(count)s。" #, python-format msgid "Expected volume size was %d" msgstr "需要的卷大小为 %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份导出已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "记录导出已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" msgid "Extend volume error." msgstr "扩展卷时发生错误。" msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "不存在任何快照时,扩展卷仅受此驱动程序支持。" msgid "Extend volume not implemented" msgstr "扩展卷未实现" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "未安装 FAST VP 启用程序。无法对该卷设置分层策略" msgid "FAST is not supported on this array." msgstr "快速策略在此阵列上不受支持。" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC 为协议,但 OpenStack 未提供 wwpns。" #, python-format msgid "Faield to unassign %(volume)s" msgstr "无法取消分配 %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "未能创建高速缓存卷 %(volume)s。错误:%(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "对于光纤网 %(fabric)s,未能添加连接:发生错误:%(err)s" msgid "Failed cgsnapshot" msgstr "已使 cg 快照失效" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "为组创建快照失败:%(response)s。" #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "为卷 %(volname)s 创建快照失败:%(response)s。" #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "通过光纤网络 %s 获取活动区域集失败。" #, python-format msgid "Failed getting details for pool %s." msgstr "获取池 %s 的详细信息失败。" #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "对于光纤网 %(fabric)s,未能移除连接:发生错误:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "未能扩展卷 %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "未能登录到 3PAR (%(url)s),因为存在 %(err)s" msgid "Failed to access active zoning configuration." msgstr "未能访问活动分区配置。" #, python-format msgid "Failed to access zoneset status:%s" msgstr "未能访问区域集状态:%s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "未能获取资源锁定。(序列为 %(serial)s,实例为 %(inst)s,返回为 %(ret)s,标准" "错误为 %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "%(retries)s 次尝试后将 %(vol)s 添加至 %(sg)s 失败。" msgid "Failed to add the logical device." msgstr "未能添加逻辑设备。" #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "未能将卷 %(volumeName)s 添加至一致性组 %(cgName)s。返回码为 %(rc)lu。错误为 " "%(error)s。" msgid "Failed to add zoning configuration." msgstr "未能添加分区配置。" #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "未能分配 iSCSI 发起方 IQN。(端口为 %(port)s,原因为 %(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 关联。" #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 连接 iSCSI 目标。" #, python-format msgid "Failed to backup volume metadata - %s" msgstr "未能备份卷元数据 - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "未能备份卷元数据 - 元数据备份对象“backup.%s.meta”已存在" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "未能从快照 %s 克隆卷。" #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "未能连接至 %(vendor_name)s 阵列 %(host)s:%(err)s" msgid "Failed to connect to Dell REST API" msgstr "无法连接至 Dell REST API" msgid "Failed to connect to array" msgstr "未能连接至阵列" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "未能连接至 sheep 守护程序。地址:%(addr)s,端口:%(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "未能将映像复制到卷:%(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "未能复制元数据到卷:%(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "未能复制卷,目标设备不可用。" msgid "Failed to copy volume, source device unavailable." msgstr "未能复制卷,源设备不可用。" #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "未能从快照 %(cgSnapshot)s 创建 CG %(cgName)s。" #, python-format msgid "Failed to create IG, %s" msgstr "未能创建映像 %s" msgid "Failed to create SolidFire Image-Volume" msgstr "未能创建 SolidFire 映像卷" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "未能创建卷组: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "未能创建文件。(文件为 %(file)s,返回为 %(ret)s,标准错误为 %(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "未能为卷 %s 创建临时快照。" msgid "Failed to create api volume flow." msgstr "未能创建 api 卷流。" #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能创建 cg 快照 %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能创建一致性组 %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "未能创建一致性组 %(id)s:%(ret)s。" #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "未能创建一致性组 %s,因为 VNX 一致性组无法接受压缩的 LUN 作为成员。" #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "未能创建一致性组:%(cgName)s。" #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "未能创建一致性组:%(cgid)s。错误为 %(excmsg)s。" #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "未能创建一致性组 %(consistencyGroupName)s。返回码为 %(rc)lu。错误为 " "%(error)s。" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "未能在 %(storageSystemName)s 上创建硬件标识。" #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "未能创建主机:%(name)s。请检查它在阵列上是否存在。" #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "未能创建主机组:%(name)s。请检查它在阵列上是否存在。" msgid "Failed to create iqn." msgstr "未能创建 IQN。" #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 创建 iscsi 目标。" msgid "Failed to create manage existing flow." msgstr "未能创建 manage_existing 流。" msgid "Failed to create manage_existing flow." msgstr "未能创建 manage_existing 流。" msgid "Failed to create map on mcs, no channel can map." msgstr "未能在 MCS 上创建映射,没有通道可以映射。" msgid "Failed to create map." msgstr "未能创建映射。" #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "未能为卷创建元数据:%(reason)s" msgid "Failed to create partition." msgstr "未能创建分区。" #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "未能通过规范 %(qos_specs)s 创建 qos_specs:%(name)s。" msgid "Failed to create replica." msgstr "未能创建副本。" msgid "Failed to create scheduler manager volume flow" msgstr "未能创建调度程序管理器卷流" #, python-format msgid "Failed to create snapshot %s" msgstr "未能创建快照 %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "未能创建快照,因为没有指定任何 LUN 标识" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "未能针对 cg %(cgName)s 创建快照。" #, python-format msgid "Failed to create snapshot for volume %s." msgstr "未能为卷 %s 创建快照。" #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "未能在卷 %(vol)s 上创建快照策略:%(res)s。" #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "未能在卷 %(vol)s 上创建快照资源区域:%(res)s。" msgid "Failed to create snapshot." msgstr "未能创建快照。" #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "未能创建快照。对于 OpenStack 卷 [%s],找不到 CloudByte 卷信息。" #, python-format msgid "Failed to create south bound connector for %s." msgstr "无法为 %s 创建南向连接器。" #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "无法创建存储器组 %(storageGroupName)s。" #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "未能创建瘦池,错误消息如下:%s" #, python-format msgid "Failed to create volume %s" msgstr "未能创建卷 %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "未能删除卷标识 %(volume_id)s 的 SI,因为它有配对。" #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "未能删除逻辑设备。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能删除 cg 快照 %(id)s。" #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能删除一致性组 %(id)s。" #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "未能删除一致性组:%(cgName)s。" #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "未能删除一致性组 %(consistencyGroupName)s。返回码为 %(rc)lu。错误为 " "%(error)s。" msgid "Failed to delete device." msgstr "无法删除设备。" #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 删除文件集。错误为 %(excmsg)s。" msgid "Failed to delete iqn." msgstr "未能删除 IQN。" msgid "Failed to delete map." msgstr "未能删除映射。" msgid "Failed to delete partition." msgstr "未能删除分区。" msgid "Failed to delete replica." msgstr "未能删除副本。" #, python-format msgid "Failed to delete snapshot %s" msgstr "未能删除快照 %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "未能针对 cg %(cgId)s 删除快照。" #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "未能删除快照标识 %s 的快照,因为它有配对。" msgid "Failed to delete snapshot." msgstr "未能删除快照。" #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "未能删除卷 %(volumeName)s。" #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "未能删除卷标识 %(volume_id)s 的卷,因为它有配对。" #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 与 iSCSI 目标断开连接。" msgid "Failed to determine blockbridge API configuration" msgstr "未能确定 Blockbridge API 配置" msgid "Failed to disassociate qos specs." msgstr "未能取消关联 Qos 规范。" #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 取消关联。" #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "未能确保快照资源区域,找不到标识 %s 的卷" msgid "Failed to establish SSC connection." msgstr "未能建立 SSC 连接。" msgid "Failed to establish connection with Coho cluster" msgstr "无法建立与 Coho 集群的连接。" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "未能执行 CloudByte API [%(cmd)s]。Http 状态为 %(status)s,错误为 %(error)s。" msgid "Failed to execute common command." msgstr "未能执行常见命令。" #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "输出卷失败:%(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "无法扩展卷 %(name)s,错误消息:%(msg)s。" msgid "Failed to find QoSnode" msgstr "找不到 QoSnode" msgid "Failed to find Storage Center" msgstr "找不到存储中心" msgid "Failed to find a vdisk copy in the expected pool." msgstr "在所需池中找不到 vdisk 副本。" msgid "Failed to find account for volume." msgstr "未能查找卷的帐户。" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "对于路径 %(path)s,未能找到文件集,命令输出:%(cmdout)s。" #, python-format msgid "Failed to find group snapshot named: %s" msgstr "找不到名为 %s 的组快照" #, python-format msgid "Failed to find host %s." msgstr "未能找到主机 %s。" #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "找不到包含 %(initiator)s 的 iSCSI 启动程序组。" #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "找不到源卷 %s 的存储池。" #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "对于帐户 [%s],未能获取 CloudByte 帐户详细信息。" #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "未能获取 LUN %s 的 LUN 目标详细信息" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "未能获取 LUN %s 的 LUN 目标详细信息。" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "未能获取 LUN %s 的 LUN 目标列表" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "未能获取卷 %(volume_id)s 的分区标识。" #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "未能从快照 %(snapshot_id)s 获取 RAID 快照标识。" #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "未能从快照 %(snapshot_id)s 获取 RAID 快照标识。" msgid "Failed to get SplitMirror." msgstr "无法获取 SplitMirror。" #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "未能获取存储资源。系统将再次尝试获取该存储资源。(资源:%(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "未能获取 qos 规范 %s 的所有关联" msgid "Failed to get channel info." msgstr "未能获取通道信息。" #, python-format msgid "Failed to get code level (%s)." msgstr "未能获取代码级别 (%s)。" msgid "Failed to get device info." msgstr "未能获取设备信息。" #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "未能获取域,因为阵列上不存在 CPG (%s)。" msgid "Failed to get image snapshots." msgstr "无法获取映像快照。" #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "未能获取具有卷 %(volume_id)s 的通道 %(channel_id)s 上的 IP。" msgid "Failed to get iqn info." msgstr "未能获取 IQN 信息。" msgid "Failed to get license info." msgstr "未能获取许可证信息。" msgid "Failed to get lv info." msgstr "未能获取 lv 信息。" msgid "Failed to get map info." msgstr "未能获取映射信息。" msgid "Failed to get migration task." msgstr "无法获取迁移任务。" msgid "Failed to get model update from clone" msgstr "未能从克隆获取模型更新" msgid "Failed to get name server info." msgstr "未能获取名称服务器信息。" msgid "Failed to get network info." msgstr "未能获取网络信息。" #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "未能在新池 %(pool_id)s 中获取新的部件标识。" msgid "Failed to get partition info." msgstr "未能获取分区信息。" #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "未能获取具有卷 %(volume_id)s 的池标识。" #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "无法获取 %(volume)s 的远程复制信息,因为发生了 %(err)s。" #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "无法获取 %(volume)s 的远程复制信息。异常:%(err)s。" msgid "Failed to get replica info." msgstr "未能获取副本信息。" msgid "Failed to get show fcns database info." msgstr "未能获取显示 fcns 数据库信息。" msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "无法获取现有卷:%(vol)。卷管理失败。" #, python-format msgid "Failed to get size of volume %s" msgstr "未能获取卷 %s 的大小" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "未能获取卷 %s 的快照。" msgid "Failed to get snapshot info." msgstr "未能获取快照信息。" #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "未能获取 LUN %s 的目标 IQN" msgid "Failed to get target LUN of SplitMirror." msgstr "无法获取 SplitMirror 的目标 LUN。" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "未能获取 LUN %s 的目标门户网站" msgid "Failed to get targets" msgstr "未能获取目标" msgid "Failed to get wwn info." msgstr "未能获取 WWN 信息。" #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "未能获取或创建掩码视图 %(maskingViewName)s,或者未能对该掩码视图添加卷 " "%(volumeName)s。接收到的错误消息为 %(errorMessage)s。" msgid "Failed to identify volume backend." msgstr "未能识别卷后端。" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "未能针对共享项 %(cgname)s 链接文件集。错误为 %(excmsg)s。" #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "未能登录到 %s 阵列(无效登录?)。" #, python-format msgid "Failed to login for user %s." msgstr "未能让用户 %s 登录。" msgid "Failed to login with all rest URLs." msgstr "未能使用所有 REST URL 进行登录。" #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "由于以下原因,未能对 Datera 集群端点进行请求:%s" msgid "Failed to manage api volume flow." msgstr "未能管理 API 卷流。" #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "未能管理现有 %(type)s %(name)s,因为所报告的大小 %(size)s不是浮点数。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "未能管理现有卷 %(name)s,因为获取卷大小时出错。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "未能管理现有卷 %(name)s,因为重命名操作失败:错误消息为 %(msg)s。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "未能管理现有卷 %(name)s,因为已报告的大小 %(size)s 不是浮点数。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "未能管理现有卷,因为所选卷类型的池与传入卷引用的 NFS 共享不匹配。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "未能管理现有卷,因为所选卷类型的池与传入卷引用的文件系统不匹配。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "未能管理现有卷,因为所选卷类型的池与主机的池不匹配。" #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "无法管理现有卷,因为 I/O 组不匹配。要管理的卷的 I/O 组为 %(vdisk_iogrp)s。所" "选类型的 I/O 组为 %(opt_iogrp)s。" #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "无法管理现有卷,因为要管理的卷的池与后端池不匹配。要管理的卷的池为 " "%(vdisk_pool)s。后端的池为 %(backend_pool)s。" msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "无法管理现有卷,要管理的卷为压缩卷,但所选卷类型并非压缩卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "无法管理现有卷,要管理的卷并非压缩卷,但所选卷类型为压缩卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "无法管理现有卷,因为要管理的卷未包含在有效 I/O 组中。" msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "无法管理现有卷,因为要管理的卷为厚卷,但所选卷类型为薄卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "无法管理现有卷,要管理的卷为薄卷,但所选卷类型为厚卷。" #, python-format msgid "Failed to manage volume %s." msgstr "未能管理卷 %s。" #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "未能映射逻辑设备。(逻辑设备为 %(ldev)s,LUN 为 %(lun)s,端口为 %(port)s,标" "识为 %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "第一次迁移卷失败。" msgid "Failed to migrate volume for the second time." msgstr "第二次迁移卷失败。" #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "未能移动 LUN 映射。返回码:%s" #, python-format msgid "Failed to move volume %s." msgstr "未能移动卷 %s。" #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "未能打开文件。(文件为 %(file)s,返回为 %(ret)s,标准错误为 %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "未能解析 CLI 输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s。" msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "解析配置选项“swift_catalog_info”失败,必须为以下格式::" ":" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "解析配置选项“swift_catalog_info”失败,必须为以下格式::" ":" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "未能执行零页面回收。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "未能针对卷 %(volume)s 移除导出:%(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 除去 iscsi 目标。" #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "未能将卷 %(volumeName)s 从一致性组 %(cgName)s 中移除。返回码为 %(rc)lu。错误" "为 %(error)s。" #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "未能从缺省 SG 中移除卷 %(volumeName)s。" #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "无法移除卷 %(volumeName)s(从缺省 SG %(volumeName)s)。" #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "无法从快速策略 %(fastPolicyName)s 的缺省存储器组中移除 %(volumename)s。 " #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "未能重命名逻辑卷 %(name)s,错误消息如下:%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "无法检索处于活动状态的分区配置 %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "无法为目标 IQN %(iqn)s 设置 CHAP 认证。详细信息:%(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "未能对现有卷 %(name)s 设置 QoS,错误消息:%(msg)s。" msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "未能对 SCST 目标设置属性“新用户”。" msgid "Failed to set partition." msgstr "未能设置分区。" #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 设置许可权。错误为 %(excmsg)s。" #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "未能针对要取消映射的卷 %(volume_id)s 指定逻辑设备。" #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "未能指定要删除的逻辑设备。(方法为 %(method)s,标识为 %(id)s)" msgid "Failed to terminate migrate session." msgstr "未能终止迁移会话。" #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "未能解除绑定卷 %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 取消链接文件集。错误为 %(excmsg)s。" #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "未能取消映射逻辑设备。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "未能更新一致性组:%(cgName)s。" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "未能更新卷的元数据:%(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "未能更新或删除分区配置" msgid "Failed to update or delete zoning configuration." msgstr "无法更新或删除分区配置。" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "未能通过规范 %(qos_specs)s 更新 qos_specs:%(specs_id)s。" msgid "Failed to update quota usage while retyping volume." msgstr "对卷进行转型时,更新配额使用率失败" msgid "Failed to update snapshot." msgstr "无法更新快照。" #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "使用驱动程序提供的模型 %(model)s 更新模型失败" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "未能使用提供的 %(src_type)s %(src_id)s 元数据更新卷 %(vol_id)s 元数据" #, python-format msgid "Failure creating volume %s." msgstr "创建卷 %s 时发生故障。" #, python-format msgid "Failure getting LUN info for %s." msgstr "针对 %s 获取 LUN 信息时发生故障。" #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "update_volume_key_value_pair 中发生故障:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "将新克隆的 LUN 移至 %s 时发生故障。" #, python-format msgid "Failure staging LUN %s to tmp." msgstr "将 LUN %s 登台至临时文件夹时发生故障。" msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "发生致命错误:不允许用户查询 NetApp 卷。" #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "由于 %(reason)s,Fexvisor 未能添加卷 %(id)s。" #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "由于 %(ret)s,Fexvisor 未能将卷 %(vol)s加入组 %(group)s。" #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "由于 %(ret)s,Fexvisor 未能移除组 %(group)s中的卷 %(vol)s。" #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "由于 %(reason)s,Fexvisor 未能移除卷 %(id)s。" #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "光纤通道 SAN 查找失败:%(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "“光纤通道区域”操作失败:%(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "光纤通道连接控制失败:%(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "找不到文件 %(file_path)s。" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "文件 %(path)s 具有无效支持文件 %(bfile)s,正在异常中止。" #, python-format msgid "File already exists at %s." msgstr "%s 处已存在文件。" #, python-format msgid "File already exists at: %s" msgstr "在以下位置处,已存在文件:%s" msgid "Find host in hostgroup error." msgstr "在主机组中查找主机时发生错误。" msgid "Find host lun id error." msgstr "查找主机 LUN 标识时发生错误。" msgid "Find lun group from mapping view error." msgstr "从映射视图查找 LUN 组时发生错误。" msgid "Find lun number error." msgstr "查找 LUN 号时发生错误。" msgid "Find mapping view error." msgstr "查找映射视图时发生错误。" msgid "Find portgroup error." msgstr "查找端口组时发生错误。" msgid "Find portgroup from mapping view error." msgstr "从映射视图查找端口组时发生错误。" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "闪存高速缓存策略要求安装了 WSAPI 版本“%(fcache_version)s”版本“%(version)s”。" #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "Flexvisor 在组 %(vgid)s 快照 %(vgsid)s 中找不到卷 %(id)s 快照。" #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor 创建卷失败:%(volumeid)s:%(status)s。" #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor 未能将卷 %(id)s 添加至组 %(cgid)s。" #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "Flexvisor 无法分配卷 %(id)s,因为无法按事件标识查询状态。 " #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor 无法分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor 未能分配卷 %(volume)s iqn %(iqn)s。" #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor 无法克隆卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "Flexvisor 无法克隆卷(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "Flexvisor 无法对卷 %(id)s 创建快照:%(status)s。" #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "Flexvisor 无法对卷 %(id)s 创建快照(无法获取事件)。 " #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor 未能在组 %(vgid)s 中创建卷 %(id)s。" #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor 无法创建卷 %(volume)s:%(status)s。" #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor 无法创建卷(获取事件)%s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "Flexvisor 未能从快照 %(id)s 创建卷:%(status)s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 无法从快照 %(id)s 创建卷:%(status)s。" #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法从快照创建卷(无法获取事件)%(id)s。 " #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor 无法删除快照 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法删除快照(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor 未能扩展卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor 无法扩展卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "Flexvisor 无法扩展卷(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor 无法获取池信息 %(id)s:%(status)s。" #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "Flexvisor 未能从组 %(vgid)s 获取卷 %(id)s 的快照标识。" #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor 未能从组 %(cgid)s 中移除卷 %(id)s。" #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 无法从快照 %(id)s 衍生卷:%(status)s。" #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法从快照衍生卷(无法获取事件)%(id)s。 " #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor 无法取消分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor 无法取消分配卷(获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor 未能取消分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor 找不到源卷 %(id)s 信息。" #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 取消分配卷失败:%(id)s:%(status)s。" #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor 卷 %(id)s 未能加入组 %(vgid)s。" #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "文件夹 %s 在 Nexenta 存储设备中不存在" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS 没有在运行,状态:%s。" msgid "Gateway VIP is not set" msgstr "未设置网关 VIP" msgid "Get FC ports by port group error." msgstr "按端口组获取 FC 端口时出错。" msgid "Get FC ports from array error." msgstr "从阵列中获取 FC 端口时发生错误。" msgid "Get FC target wwpn error." msgstr "获取 FC 目标 WWPN 时发生错误。" msgid "Get HyperMetroPair error." msgstr "获取 HyperMetroPair 时出错。" msgid "Get LUN group by view error." msgstr "按视图获取 LUN 组时出错。" msgid "Get LUNcopy information error." msgstr "获取 LUNcopy 信息时发生错误。" msgid "Get QoS id by lun id error." msgstr "通过 LUN 标识获取 QoS 标识时发生错误。" msgid "Get QoS information error." msgstr "获取 QoS 信息时发生错误。" msgid "Get QoS policy error." msgstr "获取 QoS 策略时发生错误。" msgid "Get SplitMirror error." msgstr "获取 SplitMirror 时出错。" msgid "Get active client failed." msgstr "获取活动客户机失败。" msgid "Get array info error." msgstr "获取阵列信息时出错。" msgid "Get cache by name error." msgstr "按名称获取高速缓存时发生错误。" msgid "Get connected free FC wwn error." msgstr "获取已连接的空闲 FC wwn 时发生错误。" msgid "Get engines error." msgstr "获取引擎时出错。" msgid "Get host initiators info failed." msgstr "获取主机启动程序信息失败。" msgid "Get hostgroup information error." msgstr "获取主机组信息时发生错误。" msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "获取 iSCSI 端口信息时发生错误,请检查 huawei conf 文件中所配置的目标 IP。" msgid "Get iSCSI port information error." msgstr "获取 iSCSI 端口信息时发生错误。" msgid "Get iSCSI target port error." msgstr "获取 iSCSI 目标端口时发生错误。" msgid "Get lun id by name error." msgstr "通过名称获取 LUN 标识时出错。" msgid "Get lun migration task error." msgstr "获取 LUN 迁移任务时发生错误。" msgid "Get lungroup id by lun id error." msgstr "通过 LUN 标识获取 LUN 组标识时发生错误。" msgid "Get lungroup information error." msgstr "获取 LUN 组信息时发生错误。" msgid "Get migration task error." msgstr "获取迁移任务时出错。" msgid "Get pair failed." msgstr "获取对失败。" msgid "Get partition by name error." msgstr "按名称获取分区时发生错误。" msgid "Get partition by partition id error." msgstr "按分区标识获取分区时发生错误。" msgid "Get port group by view error." msgstr "按视图获取端口组时出错。" msgid "Get port group error." msgstr "获取端口组时出错。" msgid "Get port groups by port error." msgstr "按端口获取端口组时出错。" msgid "Get ports by port group error." msgstr "按端口组获取端口时出错。" msgid "Get remote device info failed." msgstr "获取远程设备信息失败。" msgid "Get remote devices error." msgstr "获取远程设备时出错。" msgid "Get smartcache by cache id error." msgstr "按高速缓存标识获取 smartcache 时发生错误。" msgid "Get snapshot error." msgstr "获取快照时出错。" msgid "Get snapshot id error." msgstr "获取快照标识时发生错误。" msgid "Get target IP error." msgstr "获取目标 IP 时发生错误。" msgid "Get target LUN of SplitMirror error." msgstr "获取 SplitMirror 的目标 LUN 时出错。" msgid "Get views by port group error." msgstr "按端口组获取视图时出错。" msgid "Get volume by name error." msgstr "按名称获取卷时发生错误。" msgid "Get volume error." msgstr "获取卷时发生错误。" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "无法更新 Glance 元数据,对于卷标识 %(volume_id)s,键 %(key)s 存在" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "Glance中无法找到卷/镜像 %(id)s 的元数据" #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "%(config)s 处不存在 Gluster 配置文件" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google 云存储器 API 故障:%(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google 云存储器连接故障:%(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google 云存储器 oauth2 故障:%(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "从 DRBDmanage 中获得了错误路径信息!(%s)" msgid "HBSD error occurs." msgstr "发生 HBSD 错误。" msgid "HNAS has disconnected SSC" msgstr "HNAS 已断开 SSC " msgid "HPELeftHand url not found" msgstr "找不到 HPELeftHand URL" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "已请求 HTTPS 证书验证,但无法使用 purestorage 模块版本 %(version)s 启用。请升" "级至更高版本以启用此功能。" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "自从最近一次备份以来,散列块大小已更改。新的散列块大小:%(new)s。旧的散列块大" "小:%(old)s。请执行完全备份。" #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "尚未创建 %(tier_levels)s 层。" #, python-format msgid "Hint \"%s\" not supported." msgstr "提示“%s”不受支持。" msgid "Host" msgstr "主机" #, python-format msgid "Host %(host)s could not be found." msgstr "主机 %(host)s 没有找到。" #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "主机 %(host)s 与 x509 证书内容不匹配:CommonName %(commonName)s。" #, python-format msgid "Host %s has no FC initiators" msgstr "主机 %s 没有 FC 启动程序" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "主机 %s 没有 iSCSI 启动程序" #, python-format msgid "Host '%s' could not be found." msgstr "找不到主机“%s”。" #, python-format msgid "Host group with name %s not found" msgstr "找不到名称为 %s 的主机组" #, python-format msgid "Host group with ref %s not found" msgstr "找不到具有 ref %s 的主机组" msgid "Host is NOT Frozen." msgstr "主机未冻结。" msgid "Host is already Frozen." msgstr "主机已冻结。" msgid "Host not found" msgstr "没有找到主机" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "找不到主机。未能在 %(host)s 上移除 %(service)s。" #, python-format msgid "Host replication_status must be %s to failover." msgstr "主机 replication_status 必须为 %s 才能进行故障转移。" #, python-format msgid "Host type %s not supported." msgstr "不支持主机类型 %s。" #, python-format msgid "Host with ports %(ports)s not found." msgstr "找不到具有端口 %(ports)s 的主机。" msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro 和复制不能用于同一 volume_type。" #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "I/O 组 %(iogrp)d 无效;可用的 I/O 组为 %(avail)s。" msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Blockbridge API 的 IP 地址/主机名。" msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "如果 compression 设置为 True,那么还必须设置 rsize(不等于 -1)。" msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "如果 nofmtdisk 设置为 True,rsize 必须也设置为 -1。" #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "为 flashsystem_connection_protocol 指定的值“%(prot)s”非法:有效值为 " "%(enabled)s。" msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "对 IOTYPE 指定了非法值:0、1 或 2。" msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "为 smarttier 指定了非法值:请将值设置为 0、1、2 或者 3。" msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "为 storwize_svc_vol_grainsize 指定了非法值:请将值设置为 32、64、128 或 256。" msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "为 thin 指定了非法值:不能同时设置 thin 和 thick。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "找不到映像 %(image_id)s。" #, python-format msgid "Image %(image_id)s is not active." msgstr "映像 %(image_id)s 处于不活动状态。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "映像 %(image_id)s 无法接受,原因是: %(reason)s" msgid "Image location not present." msgstr "映像位置不存在。" #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "映像虚拟大小为 %(image_size)dGB,在大小为 %(volume_size)dGB 的卷中将无法容" "纳。" msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "删除 rbd 卷时,发生 ImageBusy 错误。这可能是由于客户机的已崩溃连接导致,如果" "是这样,那么可通过在 30 秒之后重试该删除来解决问题。" #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "记录导入失败,找不到要执行导入的备份服务。请求服务 %(service)s" msgid "Incorrect request body format" msgstr "不正确的请求主体格式" msgid "Incorrect request body format." msgstr "请求主体格式不正确。" msgid "Incremental backups exist for this backup." msgstr "对于此备份,存在增量备份。" #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI 异常:%(err)s 参数:%(param)s(返回码:%(rc)s)(输出:" "%(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "初始层:{},策略:{} 无效。" msgid "Input type {} is not supported." msgstr "不支持输入类型 {}。" msgid "Input volumes or snapshots are invalid." msgstr "输入卷或快照无效。" msgid "Input volumes or source volumes are invalid." msgstr "输入卷或源卷无效。" #, python-format msgid "Instance %(uuid)s could not be found." msgstr "找不到实例 %(uuid)s。" msgid "Insufficient free space available to extend volume." msgstr "可用空间不足,无法扩展卷。" msgid "Insufficient privileges" msgstr "特权不足" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "连接至 ceph 集群的连接重试之间的时间间隔值(秒)。" #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "为 io_port_list 指定了无效 %(protocol)s 端口 %(port)s。" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "3PAR 域无效:%(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "ALUA 值无效。ALUA 值必须为 1 或 0。" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "为备份rbd操作提供的Ceph参数无效" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Cg 快照无效:%(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "一致性组无效:%(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "无效 ConsistencyGroup:一致性组状态必须为“available”或“error”,但当前状态为:" "in-use" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "一致性组无效:一致性组状态必须为“可用”,但当前状态为:%s。" msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "一致性组无效:没有任何主机用于创建一致性组" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "发现无效 HPELeftHand API 版本 (%(found)s)。需要版本 %(minimum)s 或更高版本以" "获取管理/取消管理支持。" #, python-format msgid "Invalid IP address format: '%s'" msgstr "IP 地址格式“%s”无效" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "获取卷 %s 的 QoS 策略时,检测到无效 QoS 规范" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "无效复制目标:%(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "无效 VNX 认证类型:%s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "无效 Virtuozzo 存储器共享规范:%r。必须为 [MDS1[,MDS2],...:/][:" "PASSWORD]。" #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "XtremIO V%(cur)s 无效,需要 V%(min)s 或更高版本" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "对以下项目配额定义的已分配配额无效:%s" msgid "Invalid argument" msgstr "自变量无效" msgid "Invalid argument - negative seek offset." msgstr "无效参数 - 查找偏移量为负数。" #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "自变量无效 - whence=%s 不受支持" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "无效参数 - whence=%s 不受支持。" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "加载模式 '%(mode)s' 对于卷 %(volume_id)s 无效。" #, python-format msgid "Invalid auth key: %(reason)s" msgstr "认证密钥无效:%(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "备份无效:%(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "无效 barbican api URL:需要版本,例如,“http[s]://|[:port]/" "”,指定的 URL 为:%s" msgid "Invalid cgsnapshot" msgstr "cg 快照无效" msgid "Invalid chap user details found in CloudByte storage." msgstr "在 CloudByte 存储器中找到了无效 chap 用户详细信息。" #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "卷 %(name)s 的连接初始化响应无效" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "卷 %(name)s 的连接初始化响应无效:%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "无效的内容类型 %(content_type)s。" msgid "Invalid credentials" msgstr "无效凭证" #, python-format msgid "Invalid directory: %s" msgstr "无效目录:%s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "无效磁盘适配器类型:%(invalid_type)s。" #, python-format msgid "Invalid disk backing: %s." msgstr "无效磁盘备份:%s。" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "无效磁盘类型:%(disk_type)s。" #, python-format msgid "Invalid disk type: %s." msgstr "无效磁盘类型:%s。" #, python-format msgid "Invalid host: %(reason)s" msgstr "主机无效:%(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "发现无效 hpe3parclient 版本 (%(found)s)。需要版本 %(minimum)s 或更高版本。请" "运行“pip install --upgrade python-3parclient”以升级 hpe3parclient。" #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "发现无效 hpelefthandclient 版本 (%(found)s)。需要版本 %(minimum)s 或更高版" "本。请运行“pip install --upgrade python-lefthandclient”以升级 " "hpelefthandclient。" #, python-format msgid "Invalid image href %(image_href)s." msgstr "无效映像 href %(image_href)s。" msgid "Invalid image identifier or unable to access requested image." msgstr "映像标识无效,或无法访问所请求映像。" msgid "Invalid imageRef provided." msgstr "提供了无效的imageRef。" msgid "Invalid initiator value received" msgstr "接收到无效启动程序值。" msgid "Invalid input" msgstr "输入无效" #, python-format msgid "Invalid input received: %(reason)s" msgstr "输入无效: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "is_public 过滤器 [%s] 无效" #, python-format msgid "Invalid lun type %s is configured." msgstr "配置了无效 LUN 类型 %s。" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "元数据大小无效: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "元数据无效: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "安装点基准无效:%s" #, python-format msgid "Invalid mount point base: %s." msgstr "安装点基准无效:%s。" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新 snapCPG 名称对执行 retype 操作无效。new_snap_cpg='%s'。" #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Coho rpc 端口的端口号 %(config)s 无效" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "配置了无效预取类型“%s”。PrefetchType 必须为 0、1、2 和 3 的其中之一。" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "qos 规范无效:%(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "关于将卷连接至无效目标的请求无效" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "以无效方式连接卷的请求无效。连接方式应该为“rw”或“ro”" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "预留到期 %(expire)s 无效。" msgid "Invalid response header from RPC server" msgstr "RPC 服务器发送的响应头无效" #, python-format msgid "Invalid secondary id %s." msgstr "无效辅助标识 %s。" #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "指定了无效 secondary_backend_id。有效后端为 %s。" msgid "Invalid service catalog json." msgstr "服务目录 json 无效。" msgid "Invalid sheepdog cluster status." msgstr "sheepdog 集群状态无效。" #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "快照无效: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "无效的状态:'%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "请求的存储池 %s 无效。转型失败。" #, python-format msgid "Invalid storage pool %s specificed." msgstr "指定的存储池 %s 无效。" msgid "Invalid storage pool is configured." msgstr "配置了无效存储池。" #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "指定了无效同步方式,允许的方式为 %s。" msgid "Invalid transport type." msgstr "无效传输类型。" #, python-format msgid "Invalid update setting: '%s'" msgstr "无效的更新设置:'%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL 无效:必须为“http[s]://|[:port]/”格式,指定的 URL " "为:%s" #, python-format msgid "Invalid value '%s' for force." msgstr "值“%s”对于 force 无效。" #, python-format msgid "Invalid value '%s' for force. " msgstr "值“%s”对于 force 无效。" #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "is_public 的值“%s”无效。接受的值:True 或 False。" #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "skip_validation 的值“%s”无效。" #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "“bootable”的值无效:“%s”" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "“force”的值无效:“%s”" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "“readonly”的值无效:“%s”" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "值对于“scheduler_max_attempts”无效,必须 >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp 配置选项 netapp_host_type 的值无效。" msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp 配置选项 netapp_lun_ostype 的值无效。" #, python-format msgid "Invalid value for age, %(age)s" msgstr "age 的值 %(age)s 无效" #, python-format msgid "Invalid value: \"%s\"" msgstr "无效值:“%s”" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "针对创建请求提供的以下卷大小无效:%s(自变量 size 必须是整数(也可以是整数的" "字符串表示法)并且大于零)。" #, python-format msgid "Invalid volume type: %(reason)s" msgstr "卷类型无效:%(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "卷无效: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "卷无效:无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷处于无效" "状态:%(status)s。以下是有效状态:(“可用”、“正在使用”)。" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "卷无效:无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为卷类型 " "%(volume_type)s 不受该组支持。" #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "卷无效:无法将卷 fake-volume-uuid 添加至一致性组 %(group_id)s,因为找不到该" "卷。" #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "卷无效:无法将卷 fake-volume-uuid 从一致性组 %(group_id)s 移除,因为它没有在" "该组中。" #, python-format msgid "Invalid volume_type passed: %s." msgstr "已传递的 volume_type 无效:%s。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "所提供的以下 volume_type 无效:%s(所请求的类型不兼容;要么与源卷相匹配,要么" "省略类型参数)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "所提供的 volume_type %s 无效(所请求的类型不兼容;建议省略类型参数)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "提供的以下 volume_type 无效:%s(所请求类型必须受此一致性组支持)。" #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "无效 WWPN 格式 %(wwpns)s" msgid "Invoking web service failed." msgstr "调用 Web Service 失败。" msgid "Issue encountered waiting for job." msgstr "等待作业时遇到问题。" msgid "Issue encountered waiting for synchronization." msgstr "等待同步时遇到问题。" msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "发出故障转移失败,因为未正确配置复制。" msgid "Item not found" msgstr "条目没有找到" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "在 CloudByte 的创建卷 [%s] 响应中找不到作业标识。" #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "在 CloudByte 的删除卷 [%s] 响应中找不到作业标识。" msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "键名只能包含字母数字字符、下划线、句点、冒号和连字符。" #, python-format msgid "KeyError: %s" msgstr "KeyError:%s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "必须使用 Keystone 版本 3 或更高版本来获取嵌套配额支持。" #, python-format msgid "LU does not exist for volume: %s" msgstr "卷 %s 没有 LU" msgid "LUN export failed!" msgstr "LUN 导出失败!" msgid "LUN id({}) is not valid." msgstr "LUN 标识 ({}) 无效。" msgid "LUN map overflow on every channel." msgstr "LUN 映射在每个通道上溢出。" #, python-format msgid "LUN not found with given ref %s." msgstr "找不到具有给定引用 %s 的 LUN。" msgid "LUN number ({}) is not an integer." msgstr "LUN 编号 ({}) 并非整数。" #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 号超出了通道标识 %(ch_id)s 的范围。" #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "具有给定引用 %(ref)s 的 LUN 没有满足卷类型。请确保 vserver %(vs)s 上存在具有 " "ssc 功能的 LUN 卷。" #, python-format msgid "Last %s cinder syslog entries:-" msgstr "以下是最后 %s 个 cinder 系统日志条目:-" msgid "LeftHand cluster not found" msgstr "找不到 LeftHand 集群" msgid "License is unavailable." msgstr "许可证不可用。" #, python-format msgid "Line %(dis)d : %(line)s" msgstr "行 %(dis)d:%(line)s" msgid "Link path already exists and its not a symlink" msgstr "链接路径已存在,并且它不是符号链接" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "不支持处于以下状态的源卷的已链接克隆:%s。" msgid "Lock acquisition failed." msgstr "锁定获取失败。" msgid "Logout session error." msgstr "注销会话错误。" msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "未配置查找服务。fc_san_lookup_service 的配置选项需要指定查找服务的具体实现。" msgid "Lun migration error." msgstr "Lun 迁移错误。" #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "对象 %(object_name)s 的 MD5 在 %(md5)s 之前和 %(etag)s 之后不相同。" #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED:%r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED:AUTH_ERROR:%r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED:RPC_MISMATCH:%r" #, python-format msgid "Malformed fcns output string: %s" msgstr "以下 fcns 输出字符串的格式不正确:%s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "错误格式的消息体: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "以下名称服务器字符串的格式不正确:%s" msgid "Malformed request body" msgstr "错误格式的请求主体" msgid "Malformed request body." msgstr "请求主体的格式不正确。" msgid "Malformed request url" msgstr "错误格式的请求url" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "对命令 %(cmd)s 的响应的格式不正确:%(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "错误格式的 scheduler_hints 属性" #, python-format msgid "Malformed show fcns database string: %s" msgstr "以下显示 fcns 数据库字符串的格式不正确:%s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "区域配置的格式不正确:(switch=%(switch)s zone_config=%(zone_config)s)。" #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "区域状态的格式不正确:(交换机为 %(switch)s,zone_config 为 " "%(zone_config)s)。" msgid "Manage existing get size requires 'id'." msgstr "管理现有 get 大小需要“id”。" msgid "Manage existing snapshot not implemented." msgstr "未实现对现有快照的管理。" #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "由于后端引用 %(existing_ref)s 无效,管理现有卷失败:%(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "由于卷类型不匹配,管理现有卷失败:%(reason)s" msgid "Manage existing volume not implemented." msgstr "未实现对现有卷的管理。" msgid "Manage existing volume requires 'source-id'." msgstr "管理现有卷将需要“source-id”。" #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "如果启用了 FAST,那么不支持管理卷。快速策略:%(fastPolicyName)s。" msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "不允许管理到达已故障转移的卷的快照。" msgid "Map info is None due to array version not supporting hypermetro." msgstr "无映射信息,因为阵列版本不支持 hypermetro。" #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "映射 %(id)s 准备未能在已分配的 %(to)d 秒超时内完成。正在终止。" #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "未成功删除掩码视图 %(maskingViewName)s" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "已超过允许的最大备份数 (%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "已超过允许的最大快照数 (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "对于定额“%(name)s”,超过了允许的最大卷数 (%(allowed)d)。" #, python-format msgid "May specify only one of %s" msgstr "只能指定 %s 中的一个" msgid "Metadata backup already exists for this volume" msgstr "对于此卷,已存在元数据备份" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "元数据备份对象“%s”已存在" msgid "Metadata item was not found" msgstr "元数据项目未找到" msgid "Metadata item was not found." msgstr "找不到元数据项。" #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "元数据属性关键字 %s 超过 255 个字符" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "元数据属性关键字 %s 值超过 255 个字符" msgid "Metadata property key blank" msgstr "元数据属性关键字为空白" msgid "Metadata property key blank." msgstr "元数据属性关键字为空白。" msgid "Metadata property key greater than 255 characters." msgstr "元数据属性关键字超过 255 个字符。" msgid "Metadata property value greater than 255 characters." msgstr "元数据属性值超过 255 个字符。" msgid "Metadata restore failed due to incompatible version" msgstr "由于版本不兼容,元数据复原失败" msgid "Metadata restore failed due to incompatible version." msgstr "由于版本不兼容,元数据复原失败。" #, python-format msgid "Migrate volume %(src)s failed." msgstr "迁移卷 %(src)s 失败。" #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "在源卷 %(src)s 与目标卷 %(dst)s 之间迁移卷失败。" #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "LUN %s 的迁移已停止或者发生故障。" msgid "MirrorView/S enabler is not installed." msgstr "未安装 MirrorView/S 启用程序。" msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "缺少“purestorage”python 模块,请确保库已安装并且可用。" msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "缺少光纤通道 SAN 配置参数 - fc_fabric_names" msgid "Missing request body" msgstr "缺少请求主体" msgid "Missing request body." msgstr "缺少请求主体。" #, python-format msgid "Missing required element '%s' in request body" msgstr "在请求主体中缺少必需元素“%s”" #, python-format msgid "Missing required element '%s' in request body." msgstr "请求主体中缺少必需元素“%s”。" msgid "Missing required element 'consistencygroup' in request body." msgstr "请求主体中缺少必需元素“consistencygroup”。" msgid "Missing required element 'host' in request body." msgstr "请求主体中缺少必需元素“host”。" msgid "Missing required element quota_class_set in request body." msgstr "在请求主体中缺少必需元素 quota_class_set。" msgid "Missing required element snapshot in request body." msgstr "请求主体中缺少必需元素 snapshot。" msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "在期望此操作只有一个序列号时,却找到了多个序列号。请更改 EMC 配置文件。" #, python-format msgid "Multiple copies of volume %s found." msgstr "找到了卷 %s 的多个副本。" #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "对于“%s”,找到多个匹配项,请使用标识以更具体地进行查找。" msgid "Multiple profiles found." msgstr "找到了多个概要文件。" msgid "Must implement a fallback schedule" msgstr "必须实现一个回滚 schedule" msgid "Must implement find_retype_host" msgstr "必须实现 find_retype_host" msgid "Must implement host_passes_filters" msgstr "必须实现 host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "必须实现 schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "必须实现 schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "必须实现 schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "必须将 wwpn 或 host 传递给 lsfabric。" msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "必须以云管理员身份使用 Keystone policy.json(它允许云管理员列示和获取任何项" "目)运行此命令。" msgid "Must specify 'connector'" msgstr "必须指定“connector”" msgid "Must specify 'connector'." msgstr "必须指定“connector”。" msgid "Must specify 'host'." msgstr "必须指定“host”。" msgid "Must specify 'new_volume'" msgstr "必须指定“new_volume”" msgid "Must specify 'status'" msgstr "必须指定“status”" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "必须指定“status”、“attach_status”或“migration_status”以进行更新。" msgid "Must specify a valid attach status" msgstr "必须指定有效连接状态" msgid "Must specify a valid migration status" msgstr "必须指定有效迁移状态" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "必须指定有效角色 %(valid)s,值“%(persona)s”无效。" #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "指定有效供应类型 %(valid)s,值“%(prov)s”无效。" msgid "Must specify a valid status" msgstr "必须指定有效状态" msgid "Must specify an ExtensionManager class" msgstr "必须明确一个ExtensionManager类" msgid "Must specify bootable in request." msgstr "必须在请求中指定 bootable。" msgid "Must specify protection domain name or protection domain id." msgstr "必须指定保护域名或者保护域标识。" msgid "Must specify readonly in request." msgstr "必须在请求中指定 readonly。" msgid "Must specify snapshot source-name or source-id." msgstr "必须指定快照 source-name 或 source-id。" msgid "Must specify source-name or source-id." msgstr "必须指定 source-name 或 source-id。" msgid "Must specify storage pool name or id." msgstr "必须指定存储池名称或标识。" msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "必须指定存储库。选项:sio_storage_pools。" msgid "Must supply a positive value for age" msgstr "必须为 age 提供正值" msgid "Must supply a positive, non-zero value for age" msgstr "必须为时效提供非零正值" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "NAS 配置“%(name)s=%(value)s”无效。必须为“auto”、“true”或“false”" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr " %(config)s 处不存在 NFS 配置文件" #, python-format msgid "NFS file %s not discovered." msgstr "未发现 NFS 文件 %s。" msgid "NFS file could not be discovered." msgstr "未能发现 NFS 文件。" msgid "NaElement name cannot be null." msgstr "NaElement 名称不能为空。" msgid "Name" msgstr "名称" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "在请求主体中,名称、描述、add_volumes 和 remove_volumes 不能全部为空。" msgid "Need non-zero volume size" msgstr "需要非零卷大小" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "既无 MSG_DENIED,也无 MSG_ACCEPTED:%r" msgid "NetApp Cinder Driver exception." msgstr "发生“NetApp Cinder 驱动程序”异常。" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "用于扩展的新大小必须大于当前大小。(当前:%(size)s,已扩展:%(new_size)s)。" #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "新大小应该大于后端存储器中的实际大小。realsize:%(oldsize)s,newsize:" "%(newsize)s。" msgid "New volume size must be specified as an integer." msgstr "新卷的大小必须指定为整数。" msgid "New volume type must be specified." msgstr "必须指定新的卷类型。" msgid "New volume type not specified in request_spec." msgstr "在 request_spec 中,未指定新的卷类型。" #, python-format msgid "New volume_type same as original: %s." msgstr "新的 volume_type 与原始的相同:%s。" msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder 驱动程序异常" msgid "No FC initiator can be added to host." msgstr "无法将任何 FC 启动程序添加至主机。" msgid "No FC port connected to fabric." msgstr "没有任何 FC 端口连接至光纤网络。" msgid "No FCP targets found" msgstr "找不到任何 FCP 目标" msgid "No Port Group elements found in config file." msgstr "配置文件中找不到端口组。" msgid "No VF ID is defined in the configuration file." msgstr "未在配置文件中定义 VF 标识。" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "不存在具有所提供 iSCSI IP 的活动 iSCSI 门户网站" #, python-format msgid "No available service named %s" msgstr "不存在任何名为 %s 的可用服务" #, python-format msgid "No backup with id %s" msgstr "不存在任何具有标识 %s 的备份" msgid "No backups available to do an incremental backup." msgstr "没有任何备份可用于执行增量备份。" msgid "No big enough free disk" msgstr "不存在任何足够大的可用磁盘" #, python-format msgid "No cgsnapshot with id %s" msgstr "不存在任何具有标识 %s 的 cg 快照" msgid "No cinder entries in syslog!" msgstr "系统日志中没有任何 cinder 条目!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "在文件管理器上,找不到名为 %s 的已克隆 LUN" msgid "No config node found." msgstr "找不到配置节点。" #, python-format msgid "No consistency group with id %s" msgstr "不存在任何具有标识 %s 的一致性组" #, python-format msgid "No element by given name %s." msgstr "没有具备给定名称 %s 的元素。" msgid "No errors in logfiles!" msgstr "日志文件中没有任何错误!" #, python-format msgid "No file found with %s as backing file." msgstr "在将 %s 作为支持文件的情况下,找不到任何文件。" #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "未剩余可用 LUN 标识。已超过可以连接至主机的最大卷数 (%s)。" msgid "No free disk" msgstr "不存在任何可用磁盘" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "对于 %s,在所提供列表中,找不到任何有用的 iSCSI 门户网站。" #, python-format msgid "No good iscsi portals found for %s." msgstr "对于 %s,找不到任何有用的 iSCSI 门户网站。" #, python-format msgid "No host to create consistency group %s." msgstr "没有任何主机用于创建一致性组 %s。" msgid "No iSCSI-enabled ports on target array." msgstr "目标阵列上没有可支持 iSCSI 的端口。" msgid "No image_name was specified in request." msgstr "未在请求中指定任何 image_name。" msgid "No initiator connected to fabric." msgstr "没有任何启动程序连接至光纤网络。" #, python-format msgid "No initiator group found for initiator %s" msgstr "找不到对应启动程序 %s 的启动程序组" msgid "No initiators found, cannot proceed" msgstr "找不到任何发起方,无法继续" #, python-format msgid "No interface found on cluster for ip %s" msgstr "集群中找不到 IP %s 的接口" msgid "No ip address found." msgstr "找不到 IP 地址。" msgid "No iscsi auth groups were found in CloudByte." msgstr "在 CloudByte 中,找不到任何 iscsi 认证组。" msgid "No iscsi initiators were found in CloudByte." msgstr "在 CloudByte 中,找不到任何 iscsi 发起方。" #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "对于 CloudByte 卷 [%s],找不到任何 iscsi 服务。" msgid "No iscsi services found in CloudByte storage." msgstr "在 CloudByte 存储器中,找不到任何 iscsi 服务。" #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "没有指定任何密钥文件,无法从 %(cert)s %(e)s 装入密钥。" msgid "No mounted Gluster shares found" msgstr "找不到任何已安装的 Gluster 共享项" msgid "No mounted NFS shares found" msgstr "找不到任何已安装的 NFS 共享项" msgid "No mounted SMBFS shares found." msgstr "找不到任何已安装的 SMBFS 共享项。" msgid "No mounted Virtuozzo Storage shares found" msgstr "找不到任何已安装的 Virtuozzo 存储器共享项" msgid "No mounted shares found" msgstr "找不到任何已安装的共享项" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "没有池可用于提供卷。请确保正确设置了 netapp_pool_name_search_pattern 配置选" "项。" msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "未从 CloudByte 存储器列表 iSCSI 认证用户 API 调用接收到任何响应。" msgid "No response was received from CloudByte storage list tsm API call." msgstr "未从 CloudByte 存储器列表 tsm API 调用接收到任何响应。" msgid "No response was received from CloudByte's list filesystem api call." msgstr "未从 CloudByte 的列表文件系统 API 调用接收到任何响应。" msgid "No service VIP configured and no nexenta_client_address" msgstr "未配置服务 VIP 并且没有 nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "在将 %s 作为支持文件的情况下,找不到任何 snap。" #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "在快照组 %s 中,找不到快照映像。" #, python-format msgid "No snapshots could be found on volume %s." msgstr "在卷 %s 上找不到快照。" #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "未提供源快照,无法创建一致性组 %s。" #, python-format msgid "No storage path found for export path %s" msgstr "对于导出路径 %s,找不到存储路径" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "不存在任何此类 QoS 规范 %(specs_id)s。" msgid "No suitable discovery ip found" msgstr "找不到合适的发现 IP" #, python-format msgid "No support to restore backup version %s" msgstr "不支持复原备份版本 %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "没有为卷 %(volume_id)s 找到目标id。" msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "主机中没有可用的未使用 LUN 标识;已启用多个连接,这要求所有 LUN 标识在整个主" "机组中唯一。" #, python-format msgid "No valid host was found. %(reason)s" msgstr "找不到有效主机,原因是 %(reason)s。" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "对于类型为 %(type)s 的卷 %(id)s,不存在任何有效主机" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "没有具备引用 %s 指定的 UID 的 vdisk。" #, python-format msgid "No views found for LUN: %s" msgstr "找不到 LUN %s 的视图" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "集群上不存在任何具有 vserver %(vserver)s 和结点路径 %(junction)s 的卷" msgid "No volume service(s) started successfully, terminating." msgstr "未成功启动卷服务,正在终止。" msgid "No volume was found at CloudByte storage." msgstr "在 CloudByte 存储器上,找不到任何卷。" msgid "No volume_type should be provided when creating test replica." msgstr "当创建测试副本时,不应该提供任何 volume_type。" msgid "No volumes found in CloudByte storage." msgstr "在 CloudByte 存储器中找不到任何卷。" msgid "No weighed hosts available" msgstr "没有加权主机可用" #, python-format msgid "Not a valid string: %s" msgstr "无效字符串:%s" msgid "Not a valid value for NaElement." msgstr "此值对 NaElement 无效。" #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "无法找到适合卷 %s 的数据存储器。" msgid "Not an rbd snapshot" msgstr "不是 rbd 快照" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "未针对映像 %(image_id)s 授权。" msgid "Not authorized." msgstr "未授权。" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "后端 (%(backend)s) 上没有足够的空间" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "ZFS 共享项中的存储空间不足,无法执行此操作。" msgid "Not stored in rbd" msgstr "未存储在 rbd 中" msgid "Nova returned \"error\" status while creating snapshot." msgstr "在创建快照时,Nova 返回了“错误”状态。" msgid "Null response received from CloudByte's list filesystem." msgstr "从 CloudByte 的列表文件系统接收到空响应。" msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "从 CloudByte 的列表 iscsi 认证组接收到空响应。" msgid "Null response received from CloudByte's list iscsi initiators." msgstr "从 CloudByte 的列表 iscsi 发起方接收到空响应。" msgid "Null response received from CloudByte's list volume iscsi service." msgstr "从 CloudByte 的列表卷 iscsi 服务接收到空响应。" #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "在 CloudByte 存储器上创建卷 [%s] 时,接收到空响应。" #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "删除 CloudByte 存储器上的卷 [%s] 时,接收到空响应。" #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "在 CloudByte 存储器中查询基于 [%(operation)s] 的作业[%(job)s] 时接收到空响" "应。" msgid "Number of retries if connection to ceph cluster failed." msgstr "连接至 ceph 集群失败时的重试次数。" msgid "Object Count" msgstr "对象计数" msgid "Object Version" msgstr "对象版本" msgid "Object is not a NetApp LUN." msgstr "对象不是 NetApp LUN。" #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "执行扩展操作期间,向组合卷 %(volumename)s 添加卷时出错。 " msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "某个 cinder-volume 服务太旧,无法接受这类请求。您是否正在运行混合 Liberty-" "Mitaka cinder-volume?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "找不到来自主机、端口或方案的必需输入之一。" #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 " "%(unit_string)s。" msgid "Only one limit can be set in a QoS spec." msgstr "在 QoS 规范中只能设置一个限制。" msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "只允许令牌作用域仅限于直系父代或者根项目的用户查看其子代配额。" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "只有 OpenStack 管理的卷才能为非受管卷。" #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "操作失败,并且 status=%(status)s。完全转储:%(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "操作 %(operation)s 不受支持。" msgid "Option gpfs_images_dir is not set correctly." msgstr "选项 gpfs_images_dir 未正确设置。" msgid "Option gpfs_images_share_mode is not set correctly." msgstr "选项 gpfs_images_share_mode 未正确设置。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "选项 gpfs_mount_point_base 未正确设置。" msgid "Option map (cls._map) is not defined." msgstr "未定义选项映射 (cls._map)。" #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "始发 %(res)s %(prop)s 必须为其中一个“%(vals)s”值" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "覆盖 HTTPS 端口以连接至 Blockbridge API 服务器。" #, python-format msgid "ParseException: %s" msgstr "ParseException:%s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "分区名称为 None,请在键中设置 smartpartition:partitionname。" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "进行认证需要密码或 SSH 专用密钥:请设置 san_password 或 san_private_key 选" "项。" msgid "Path to REST server's certificate must be specified." msgstr "必须指定 REST 服务器的证书的路径。" #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "请提前创建 %(pool_list)s 池!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "请提前在池 %(pool)s 中创建 %(tier_levels)s 层!" msgid "Please re-run cinder-manage as root." msgstr "请以 root 用户身份重新运行 cinder-manage。" msgid "Please specify a name for QoS specs." msgstr "请为 QoS 规范指定名称。" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "政策不允许 %(action)s 被执行。" #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "未能找到池 %(poolNameInStr)s。" #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "池 %s 在 Nexenta 存储设备中不存在" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "找不到 volume['host'] %(host)s 中的池。" #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "volume['host'] 中的池失败,产生了异常:%(ex)s。" msgid "Pool is not available in the volume host field." msgstr "在卷主机字段中,未提供池。" msgid "Pool is not available in the volume host fields." msgstr "在卷主机字段中,未提供池。" #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "在域 %(domain)s 中找不到名称为 %(pool)s 的池。" #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "在域 %(domain_id)s 中找不到名称为 %(pool_name)s 的池。" #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "池 %(poolName)s 与快速策略 %(fastPolicy)s 的存储层无关联。 " #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "池名称必须存在于 %(fileName)s 文件中。" #, python-format msgid "Pools %s does not exist" msgstr "池 %s 不存在" msgid "Pools name is not set." msgstr "未设置池名称。" #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "主副本状态为 %(status)s,并且已同步:%(sync)s" msgid "Project ID" msgstr "项目ID" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "未正确设置要用作嵌套配额的项目配额:%(reason)s。" msgid "Protection Group not ready." msgstr "保护组未就绪。" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "存储器系列 %(storage_family)s 不支持协议 %(storage_protocol)s。" msgid "Provided backup record is missing an id" msgstr "所提供的备份记录缺少标识" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "对于状态为 %(current)s 的快照,不允许提供的快照状态 %(provided)s。" #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "提供程序信息 w.r.t:找不到对应 OpenStack 卷 [%s] 的 CloudByte 存储器。" #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder 驱动程序故障:%(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS 规范 %(specs_id)s 已存在。" #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS 规范 %(specs_id)s 仍然与实体关联。" #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "QoS 配置不正确。%s 必须大于 0。" #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "QoS 策略必须指定 IOTYPE 和另一 qos_specs,QoS 策略:%(qos_policy)s。" #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "QoS 策略必须指定 IOTYPE:0、1 或 2。QoS 策略:%(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "QoS 策略 upper_limit 和 lower_limit 存在冲突,QoS 策略:%(qos_policy)s。" #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "QoS 规范 %(specs_id)s 没有任何具有键 %(specs_key)s 的规范。" msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "在此存储器系列和 ONTAP 版本上,QoS 规范不受支持。" msgid "Qos specs still in use." msgstr "Qos 规范仍在使用中。" msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "建议不要按 service 参数进行查询。请改为使用 binary 参数。" msgid "Query resource pool error." msgstr "查询资源池时发生错误。" #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "配额 %s 限制必须大于或等于现有资源。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "找不到配额类 %(class_name)s。" msgid "Quota could not be found" msgstr "配额没有找到。" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "对于资源,已超过配额:%(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "配额用尽:code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "没有为项目 %(project_id)s 找到配额。" #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "资源“%(res)s”的项目“%(proj)s”的配合限制无效:限制 %(limit)d 小于“in-use”值 " "%(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "找不到配额预留 %(uuid)s。" #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "找不到项目 %(project_id)s 的配额使用量。" #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD 差集操作失败 - (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "必须指定 REST 服务器 IP。" msgid "REST server password must by specified." msgstr "必须指定 REST 服务器密码。" msgid "REST server username must by specified." msgstr "必须指定 REST 服务器用户名。" msgid "RPC Version" msgstr "RPC 版本" msgid "RPC server response is incomplete" msgstr "PRC 服务器响应不完整" msgid "Raid did not have MCS Channel." msgstr "RAID 不具备 MCS 通道。" #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "达到配置选项 max_luns_per_storage_group 设置的限制。将 %(vol)s 添加至存储器" "组 %(sg)s 的操作被拒绝。" #, python-format msgid "Received error string: %s" msgstr "接收到错误字符串:%s" msgid "Reference must be for an unmanaged snapshot." msgstr "引用必须针对非受管快照。" msgid "Reference must be for an unmanaged virtual volume." msgstr "引用必须对应非受管虚拟卷。" msgid "Reference must be the name of an unmanaged snapshot." msgstr "引用必须是非受管快照的名称。" msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "引用必须是非受管虚拟卷的卷名称。" msgid "Reference must contain either source-id or source-name element." msgstr "引用必须包含 source-id 或 source-name 元素。" msgid "Reference must contain either source-name or source-id element." msgstr "引用必须包含 source-name 或 source-id 元素。" msgid "Reference must contain source-id or source-name element." msgstr "引用必须包含 source-id 或 source-name 元素。" msgid "Reference must contain source-id or source-name key." msgstr "引用必须包含 source-id 或 source-name 键。" msgid "Reference must contain source-id or source-name." msgstr "引用必须包含 source-id 或 source-name。" msgid "Reference must contain source-id." msgstr "引用必须包含 source-id。" msgid "Reference must contain source-name element." msgstr "引用必须包含 source-name 元素。" msgid "Reference must contain source-name or source-id." msgstr "引用必须包含 source-name 或 source-id。" msgid "Reference must contain source-name." msgstr "引用必须包含源名称。" msgid "Reference to volume to be managed must contain source-name." msgstr "对要管理的卷的引用必须包含 source-name。" #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "对要管理的卷 %s 的引用必须包含 source-name。" #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "正在拒绝迁移卷标识:%(id)s。请检查配置因为源和目标是同一卷组:%(name)s。" msgid "Remote pool cannot be found." msgstr "找不到远程池。" msgid "Remove CHAP error." msgstr "移除 CHAP 时发生错误。" msgid "Remove fc from host error." msgstr "从主机中移除 FC 时发生错误。" msgid "Remove host from array error." msgstr "从阵列中移除主机时发生错误。" msgid "Remove host from hostgroup error." msgstr "从主机组中移除主机时发生错误。" msgid "Remove iscsi from host error." msgstr "从主机中移除 iSCSI 时发生错误。" msgid "Remove lun from QoS error." msgstr "从 QoS 移除 LUN 时出错。" msgid "Remove lun from cache error." msgstr "从高速缓存移除 LUN 时发生错误。" msgid "Remove lun from partition error." msgstr "从分区移除 LUN 时发生错误。" msgid "Remove port from port group error." msgstr "从端口组移除端口时出错。" msgid "Remove volume export failed." msgstr "除去卷导出失败。" msgid "Rename lun on array error." msgstr "在阵列上重命名 LUN 时发生错误。" msgid "Rename snapshot on array error." msgstr "在阵列上重命名快照时出错。" #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "将 %(name)s 复制到 %(ssn)s 失败。" #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到复制服务功能。" #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到复制服务。" msgid "Replication is not enabled" msgstr "未启用复制" msgid "Replication is not enabled for volume" msgstr "未对卷启用复制" msgid "Replication not allowed yet." msgstr "尚不允许复制。" #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "卷的复制状态必须为“活动”或“活动 - 已停止”,但当前状态为:%s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "卷的复制状态必须为“不活动”、“活动 - 已停止”或“错误”,但当前状态为:%s" msgid "Request body and URI mismatch" msgstr "请求主体和URI不匹配" msgid "Request body contains too many items" msgstr "请求主体包含太多items" msgid "Request body contains too many items." msgstr "请求主体包含太多项。" msgid "Request body empty" msgstr "请求主体是空的" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "针对 Datera 集群的请求返回了不正确的状态:%(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所请求备份超过允许的备份千兆字节配额。已请求 %(requested)sG,配额为 " "%(quota)sG,并且已耗用 %(consumed)sG。" #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所请求的卷或快照超过允许的 %(name)s 配额。已请求 %(requested)sG,配额为 " "%(quota)sG,已耗用 %(consumed)sG。" #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "所请求的卷大小 %(size)d超过了允许的最大限制 %(limit)d。" msgid "Required configuration not found" msgstr "找不到必需的配置选项" #, python-format msgid "Required flag %s is not set" msgstr "未设置必需标记 %s" msgid "Requires an NaServer instance." msgstr "需要 NaServer 实例。" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "重置备份状态已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用" "来创建此备份的备份服务 [%(backup_service)s]。" #, python-format msgid "Resizing clone %s failed." msgstr "调整克隆 %s 的大小失败。" msgid "Resizing image file failed." msgstr "对映像文件调整大小失败。" msgid "Resource could not be found." msgstr "资源没有找到。" msgid "Resource not ready." msgstr "资源未就绪。" #, python-format msgid "Response error - %s." msgstr "响应错误 - %s。" msgid "Response error - The storage-system is offline." msgstr "响应错误 - 存储器系统已脱机。" #, python-format msgid "Response error code - %s." msgstr "响应错误代码 - %s。" msgid "RestURL is not configured." msgstr "未配置 RestURL。" #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份复原已异常中止,需要的卷状态为 %(expected_status)s,但获得的是 " "%(actual_status)s。" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "备份复原已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份复原已异常中止:需要的备份状态为 %(expected_status)s,但获得的是 " "%(actual_status)s。" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "检索到所提供 Cinder 快照的不同 SolidFire 卷量。已检索到:%(ret)s 期望:" "%(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "检索到所提供 Cinder 卷的不同 SolidFire 卷量。已检索到:%(ret)s 期望:%(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "对于命令,超过重试次数:%s" msgid "Retryable SolidFire Exception encountered" msgstr "遇到可重试的 SolidFire 异常" msgid "Retype cannot change encryption requirements." msgstr "转型无法更改加密要求。" #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "转型无法为正在使用的卷 %s 更改前端 qos 规范。" msgid "Retype requires migration but is not allowed." msgstr "转型需要迁移,但是不允许。" #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "针对卷 %(volumeName)s 的回滚失败。请与系统管理员联系以将该卷手动返回至对应快" "速策略 %(fastPolicyName)s 的缺省存储器组。" #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "正在通过删除 %(volumeName)s 而对其进行回滚。" #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "不允许运行 VMware vCenter 版本低于 %s 的 Cinder。" msgid "SAN product is not configured." msgstr "未配置 SAN 产品。" msgid "SAN protocol is not configured." msgstr "未配置 SAN 协议。" #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "SMBFS 配置“smbfs_oversub_ratio”无效。必须大于 0:%s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "SMBFS 配置“smbfs_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s 处不存在 SMBFS 配置文件。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 配置文件未设置 (smbfs_shares_config)。" #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "SSH 命令在“%(total_attempts)r”之后失败,尝试次数:“%(command)s”" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "SSH 命令失败,发生了错误:“%(err)s”,命令:“%(command)s”" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "检测到 SSH 命令注入:%(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "对于 %(fabric)s,SSH 连接失败,发生错误:%(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "在 %s 上,SSL 证书已到期。" #, python-format msgid "SSL error: %(arg)s." msgstr "SSL 错误:%(arg)s。" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "找不到调度程序主机衡量器 %(weigher_name)s。" #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "辅助副本状态为 %(status)s,并且已同步:%(sync)s,同步进度为:%(progress)s%%。" #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "辅助标识不能与主阵列相同,backend_id = %(secondary)s。" #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "序列号必须存在于 %(fileName)s 文件中。" #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "已移除主机 %(host)s 上的服务 %(service)s。" #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "在主机 %(host)s 上找不到服务 %(service_id)s。" #, python-format msgid "Service %(service_id)s could not be found." msgstr "服务 %(service_id)s 没有找到。" #, python-format msgid "Service %s not found." msgstr "找不到服务 %s。" msgid "Service is too old to fulfil this request." msgstr "服务太旧,无法实现此请求。" msgid "Service is unavailable at this time." msgstr "该时刻服务无法使用。" msgid "Service not found." msgstr "找不到服务。" msgid "Set pair secondary access error." msgstr "设置对辅助访问时出错。" msgid "Sets thin provisioning." msgstr "设置自动精简配置。" msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "不支持对此存储器系列和 ONTAP 版本设置 LUN QoS 策略组。" msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "不支持对此存储器系列和 ONTAP 版本设置文件 QoS 策略组。" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "由于格式无效,已忽略共享项 %s。格式必须为 address:/export。请检查 nas_ip 和 " "nas_share_path 设置。" #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "对于 Cinder 卷服务,%(dir)s 处的共享项不可写。快照操作将不受支持。" #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Sheepdog I/O 错误,命令为:\"%s\"。" msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "只能对与用户作用域仅限于的项目位于同一层次结构中的项目执行显示操作。" msgid "Size" msgstr "配置" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "找不到卷 %s 的大小,无法进行安全删除。" #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "大小为 %(image_size)dGB,无法容纳在大小为 %(volume_size)dGB 的卷中。" #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "所指定映像的大小 %(image_size)sGB 大于卷大小 %(volume_size)sGB。" #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "在等待快照 %(id)s 变为可用时请求删除该快照。可能发出了并行请求。" #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "级联删除期间,发现快照 %(id)s 处于 %(state)s 状态而不是“deleting”状态。" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "快照 %(snapshot_id)s 没有找到。" #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "快照 %(snapshot_id)s 没有任何具有键 %(metadata_key)s 的元数据。" #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "快照 %s 不能属于某个一致性组。" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "快照“%s”在阵列上不存在。" #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "无法创建快照,因为卷 %(vol_id)s 不可用,当前卷状态为 %(vol_status)s。" msgid "Snapshot cannot be created while volume is migrating." msgstr "无法在迁移卷时创建快照。" msgid "Snapshot of secondary replica is not allowed." msgstr "不允许获取辅助副本的快照。" #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "不支持对处于以下状态的卷生成快照:%s。" #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "未在任何位置部署的快照资源“%s”?" msgid "Snapshot size must be multiple of 1 GB." msgstr "快照大小必须是 1 GB 的倍数。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "对于 update_snapshot_status,不允许快照状态 %(cur)s" msgid "Snapshot status must be \"available\" to clone." msgstr "快照状态必须为“可用”,才能进行克隆。" #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "要备份的快照必须可用,但当前状态为“%s”。" #, python-format msgid "Snapshot with id of %s could not be found." msgstr "找不到标识为 %s 的快照。" #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "基本映像“%(base)s”中不存在快照“%(snap)s”- 正在异常中止增量备份" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "以下卷格式不支持快照:%s" #, python-format msgid "Socket error: %(arg)s." msgstr "套接字错误:%(arg)s。" msgid "SolidFire Cinder Driver exception" msgstr "发生“SolidFire Cinder 驱动程序”异常" msgid "Sort direction array size exceeds sort key array size." msgstr "排序方向阵列大小超过排序键阵列大小。" msgid "Source CG is empty. No consistency group will be created." msgstr "源 CG 为空。将不会创建任何一致性组。" msgid "Source host details not found." msgstr "找不到源主机详细信息。" msgid "Source volume device ID is required." msgstr "需要源卷设备标识。" msgid "Source volume not mid-migration." msgstr "源卷未在迁移中。" #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "对于后端启用的卷迁移,在目标设备上找不到具有主机 IP/名称 %s 的源,正在继续进" "行缺省迁移。" msgid "SpaceInfo returned byarray is invalid" msgstr "阵列返回的 SpaceInfo 无效" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "要映射至卷 %(vol)s 的所指定主机位于具有 %(group)s 的不受支持的主机组中。" msgid "Specified logical volume does not exist." msgstr "所指定的逻辑卷不存在。" #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "找不到标识为 %s 的指定快照组。" msgid "Specify a password or private_key" msgstr "请指定密码或 private_key" msgid "Specify san_password or san_private_key" msgstr "指定san_password或者san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "指定卷类型名称、描述、is_public 或它们的组合。" msgid "Split pair error." msgstr "拆分对时出错。" msgid "Split replication failed." msgstr "拆分复制失败。" msgid "Start LUNcopy error." msgstr "启动 LUNcopy 时发生错误。" msgid "State" msgstr "状态" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "节点的状态错误。当前状态为 %s。" msgid "Status" msgstr "状态" msgid "Stop snapshot error." msgstr "停止快照时发生错误。" #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上,找不到存储器配置服务。" #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到存储器硬件标识管理服务。" #, python-format msgid "Storage Profile %s not found." msgstr "找不到存储器概要文件 %s。" #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到存储器重定位服务。" #, python-format msgid "Storage family %s is not supported." msgstr "不支持存储器系列 %s。" #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "未成功删除存储器组 %(storageGroupName)s" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "未检测到存储器主机 %(svr)s,请验证名称" msgid "Storage pool is not configured." msgstr "未配置存储池。" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "找不到存储器概要文件 %(storage_profile)s。" msgid "Storage resource could not be found." msgstr "找不到存储资源。" msgid "Storage system id not set." msgstr "未设置存储系统标识。" #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "找不到池 %(poolNameInStr)s 的存储系统。" msgid "Storage-assisted migration failed during manage volume." msgstr "管理卷期间,存储器辅助进行的迁移失败。" #, python-format msgid "StorageSystem %(array)s is not found." msgstr "找不到存储系统 %(array)s。" #, python-format msgid "String with params: %s" msgstr "带有参数的字符串:%s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "子代使用率之和“%(sum)s”大于资源“%(res)s”的项目“%(proj)s”的可用配" "额“%(free)s”。请降低以下一个或多个项目的限制或使用率“%(child_ids)s”" msgid "Switch over pair error." msgstr "切换对时出错。" msgid "Sync pair error." msgstr "同步对时出错。" msgid "Synchronizing secondary volume to primary failed." msgstr "使辅助卷与主卷同步失败。" #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "发现系统 %(id)s 的密码状态无效 - %(pass_status)s。" #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "找到具有以下不正确状态的系统 %(id)s:%(status)s。" msgid "System does not support compression." msgstr "系统不支持压缩。" msgid "System is busy, retry operation." msgstr "系统繁忙,请重试操作。" #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "在帐户 [%(account)s] 的 CloudByte 存储器中找不到 TSM [%(tsm)s]。" msgid "Target volume type is still in use." msgstr "目标卷类型仍在使用中。" #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "租户标识 %s 不存在。" msgid "Terminate connection failed" msgstr "终止连接发生故障" msgid "Terminate connection unable to connect to backend." msgstr "终止连接无法连接至后端。" #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "终止卷连接失败:%(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "找不到要复制的 %(type)s %(id)s 源。" msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "“sort_key”和“sort_dir”参数已建议不要使用,并且不能与“sort”参数配合使用。" msgid "The EQL array has closed the connection." msgstr "EQL 阵列已关闭连接。" #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS 文件系统 %(fs)s 未处于所要求的发行版级别。当前级别为 %(cur)s,而要求的级" "别必须至少为 %(min)s。" msgid "The IP Address was not found." msgstr "找不到 IP 地址。" #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "WebDAV 请求失败。原因为 %(msg)s,返回码/原因码为 %(code)s,源卷为 %(src)s,目" "标卷为 %(dst)s,方法为 %(method)s。" msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "上面的错误可能指示尚未创建数据库。\n" "在运行此命令之前,请使用“cinder-manage db sync”来创建数据库。" #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "该阵列不支持 SLO %(slo)s 和工作负载 %(workload)s 的存储池设置。请检查该阵列以" "获取有效 SLO 和工作负载。" msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "创建该卷的后端未启用复制。" #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "命令 %(cmd)s 失败。(返回为 %(ret)s,标准输出为 %(out)s,标准错误为 %(err)s)" msgid "The copy should be primary or secondary" msgstr "副本应为主副本或者辅助副本" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "未能完成逻辑设备的创建。(逻辑设备:%(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "已装饰的方法必须接受卷或快照对象" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "路径%(path)s 指向的设备不可用:%(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "结束时间 (%(end)s) 必须在开始时间 (%(start)s) 之后。" #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec %s 无效。" #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "额外规范 %(extraspec)s 无效。" #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "无法删除已故障转移的卷:%s" #, python-format msgid "The following elements are required: %s" msgstr "需要下列元素:%s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "以下迁移存在降级,这是不允许的:\n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "未能添加主机组或 iSCSI 目标。" msgid "The host group or iSCSI target was not found." msgstr "找不到主机组或 iSCSI 目标。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "主机未准备好故障返回。请重新同步卷并在 3PAR 后端上继续进行复制。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "主机未准备好故障返回。请重新同步卷并在 LeftHand 后端上继续进行复制。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "主机未准备好故障返回。请重新同步卷并在 Storwize 后端上继续进行复制。" #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 用户 %(user)s 不存在。" #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "已导入的 LUN %(lun_id)s 位于池 %(lun_pool)s 中,该池并非由主机 %(host)s 管" "理。" msgid "The key cannot be None." msgstr "键不能为“无”。" #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "已删除所指定 %(type)s %(id)s 的逻辑设备。" #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "方法 %(method)s 超时。(超时值:%(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "未实现方法 update_migrated_volume。" #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "安装 %(mount_path)s 不是有效 Quobyte USP 卷。发生错误:%(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "存储器后端的参数。(config_group:%(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "父备份必须可用于增量备份。" #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "所提供快照“%s”并非所提供卷的快照。" msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "对后端中的卷的引用应具有以下格式:file_system/volume_name(volume_name 不能包" "含“/”)" #, python-format msgid "The remote retention count must be %s or less." msgstr "远程保留计数不得高于 %s。" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "未在卷类型 extra_specs 中正确配置复制方式。如果 replication:mode 为 " "periodic,那么必须同时指定 replication:sync_period 并且周期必须介于 300 秒到 " "31622400 秒之间。" #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "同步复制周期必须至少为 %s 秒。" #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "所请求大小 %(requestedSize)s 与生成的大小 %(resultSize)s 不同。" #, python-format msgid "The resource %(resource)s was not found." msgstr "找不到资源 %(resource)s。" msgid "The results are invalid." msgstr "结果无效。" #, python-format msgid "The retention count must be %s or less." msgstr "保留计数不得高于 %s。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "当卷处于维护方式时,无法创建快照。" #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "源卷 %s 不在由当前主机管理的池中。" msgid "The source volume for this WebDAV operation not found." msgstr "找不到此 WebDAV 操作的源卷。" #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "原卷类型'%(src)s'与目标卷'%(dest)s'不一致。" #, python-format msgid "The source volume type '%s' is not available." msgstr "原卷类型'%s'不可用。" #, python-format msgid "The specified %(desc)s is busy." msgstr "指定的 %(desc)s 处于繁忙状态。" #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "指定的 LUN 不属于给定池:%s。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "未能管理指定的逻辑设备 %(ldev)s。该逻辑设备不能是映射。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "未能管理指定的逻辑设备 %(ldev)s。该逻辑设备不能成对。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "未能管理指定的逻辑设备 %(ldev)s。逻辑设备大小必须为千兆字节的倍数。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "未能管理指定的逻辑设备 %(ldev)s。卷类型必须为 DP-VOL。" #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "指定的操作不受支持。卷大小必须与源 %(type)s 大小相同。(卷:%(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "指定的 vdisk 已映射到主机。" msgid "The specified volume is mapped to a host." msgstr "所指定的卷已映射至主机。" #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "%s 的存储阵列密码不正确,请更新所配置密码。" #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "可使用存储器后端。(config_group:%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "存储器设备不支持 %(prot)s。请配置该设备以支持 %(prot)s 或切换至使用另一协议的" "驱动程序。" #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "分割元计数 %(memberCount)s 对于卷%(volumeName)s 太小,大小为 %(volumeSize)s。" #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "卷/快照 %(id)s 的元数据类型 %(metadata_type)s无效。" #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "未能扩展卷 %(volume_id)s。卷类型必须为“常规”。" #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "未能取消管理卷 %(volume_id)s。卷类型必须为 %(volume_type)s。" #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "已成功管理卷 %(volume_id)s。(逻辑设备:%(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "已成功取消管理卷 %(volume_id)s。(逻辑设备:%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "找不到要映射的卷 %(volume_id)s。" msgid "The volume cannot accept transfer in maintenance mode." msgstr "在维护方式下卷无法接受传输。" msgid "The volume cannot be attached in maintenance mode." msgstr "在维护方式下无法连接卷。" msgid "The volume cannot be detached in maintenance mode." msgstr "在维护方式下无法拆离卷。" msgid "The volume cannot be updated during maintenance." msgstr "维护期间无法更新卷。" msgid "The volume connection cannot be initialized in maintenance mode." msgstr "在维护方式下无法初始化卷连接。" msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "卷驱动程序在连接器中需要 iSCSI 发起方名称。" msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "该卷当前在 3PAR 上繁忙,此时无法删除。可稍后重试。" msgid "The volume label is required as input." msgstr "需要卷标作为输入。" msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "当卷处于维护方式时,无法删除卷元数据。" msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "当卷处于维护方式时,无法更新卷元数据。" #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "不存在任何可供使用的资源。(资源:%(resource)s)" msgid "There are no valid ESX hosts." msgstr "不存在有效的 ESX 主机。" #, python-format msgid "There are no valid datastores attached to %s." msgstr "不存在任何已连接至 %s 的有效数据存储器。" msgid "There are no valid datastores." msgstr "不存在任何有效数据存储器。" #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "不存在任何对 %(param)s 的指定。在管理卷时,必须使用指定的存储器。" msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "不存在任何对逻辑设备的指定。在管理卷时,必须使用指定的逻辑设备。" msgid "There is no metadata in DB object." msgstr "数据库对象中没有元数据。" #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "不存在任何可主管 %(volume_size)sG 的共享项" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "不存在任何可托管 %(volume_size)sG 的共享项。" #, python-format msgid "There is no such action: %s" msgstr "没有该动作:%s" msgid "There is no virtual disk device." msgstr "不存在任何虚拟盘设备。" #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "将该卷添加至远程复制组时发生了错误:%s。" #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "创建 cgsnapshot 时发生错误:%s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "创建远程复制组时发生了错误:%s。" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "为远程复制组设置同步周期时发生了错误:%s。" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "在 3PAR 阵列上设置远程复制组时发生了错误:(“%s”)。该卷未被识别为复制类型。" #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "在 LeftHand 阵列上设置远程调度时发生了错误:(“%s”)。该卷未被识别为复制类" "型。" #, python-format msgid "There was an error starting remote copy: %s." msgstr "启动远程复制时发生了错误:%s。" #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "未配置 Gluster 配置文件 (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "未配置 NFS 配置文件 (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "未配置 Quobyte 卷 (%s)。示例:quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "在此版本的 LVM 上,不支持瘦供应。" msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "未安装瘦供应启用程序。无法创建瘦卷" msgid "This driver does not support deleting in-use snapshots." msgstr "此驱动程序不支持对正在使用的快照进行删除。" msgid "This driver does not support snapshotting in-use volumes." msgstr "此驱动程序不支持对正在使用的卷生成快照。" msgid "This request was rate-limited." msgstr "这个请求受到频率限制。" #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "此系统平台 (%s) 不受支持。此驱动程序仅支持 Win32 平台。" #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "对于 %(storageSystemName)s,找不到分层策略服务。" #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "等待 Nova 更新(以便创建快照 %s)时超时。" #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "等待 Nova 更新(以便删除快照 %(id)s)时超时。" msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "连接至 CEPH 集群时使用的超时值(以秒计)。如果值小于 0,那么不会设置超时并且" "会使用缺省 librados 值。" #, python-format msgid "Timeout while calling %s " msgstr "调用 %s 时超时 " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "请求 %(service)s API 时超时。" #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "从后端请求 %(service)s 功能时超时。" #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "无法找到转换器%(transfer_id)s" #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "传输 %(transfer_id)s:具有标识 %(volume_id)s 的卷处于意外状态 %(status)s,需" "要的状态为正在等待传输" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "正在尝试将备份元数据从标识 %(meta_id)s 导入到备份 %(id)s。" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "调整卷任务未完成就已停止:volume_name=%(volume_name)s, task-status=" "%(status)s。" #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "已使类型 %(type_id)s 与另一 qos 规范 %(qos_specs_id)s 关联" msgid "Type access modification is not applicable to public volume type." msgstr "类型访问修改不适用于公共卷类型。" msgid "Type cannot be converted into NaElement." msgstr "此类型不能转换为 NaElement。" #, python-format msgid "TypeError: %s" msgstr "TypeError:%s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s 同时位于“添加卷”和“移除卷”列表中。" #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "无法访问卷 %s 的 Storwize 后端。" msgid "Unable to access the backend storage via file handle." msgstr "通过文件句柄无法访问后端存储器。" #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "无法通过路径 %(path)s 访问后端存储器。" #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "无法将 Cinder 主机添加至空间 %(space)s 的 apphosts" #, python-format msgid "Unable to complete failover of %s." msgstr "无法完成 %s 的故障转移。" msgid "Unable to connect or find connection to host" msgstr "无法连接至主机,或找不到与主机的连接" msgid "Unable to create Barbican Client without project_id." msgstr "不具备 project_id,无法创建 Barbican 客户机。" #, python-format msgid "Unable to create consistency group %s" msgstr "无法创建一致性组 %s" msgid "Unable to create lock. Coordination backend not started." msgstr "无法创建锁定。协调后端未启动。" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "无法创建或获取快速策略 %(fastPolicyName)s 的缺省存储器组。 " #, python-format msgid "Unable to create replica clone for volume %s." msgstr "无法为卷 %s 创建副本克隆。" #, python-format msgid "Unable to create the relationship for %s." msgstr "无法为 %s 创建该关系。" #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "无法通过 %(snap)s 创建卷 %(name)s。" #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "无法通过 %(vol)s 创建卷 %(name)s。" #, python-format msgid "Unable to create volume %s" msgstr "无法创建卷 %s" msgid "Unable to create volume. Backend down." msgstr "无法创建卷。后端已关闭。" #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "无法删除一致性组快照 %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "无法删除快照 %(id)s,状态:%(status)s。" #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "无法删除卷 %s 上的快照策略。" #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "无法删除卷 %(vol)s 的目标卷。异常:%(err)s。" msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "无法拆离卷。卷状态必须为“in-use”,并且 attach_status 必须为“attached”才能拆" "离。" #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "无法根据所提供辅助项来确定 secondary_array:%(secondary)s。" #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "在快照 %(id)s 的 Purity 中无法确定快照名称。" msgid "Unable to determine system id." msgstr "无法确定系统标识。" msgid "Unable to determine system name." msgstr "无法确定系统名称。" #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "无法对 Purity REST API 版本%(api_version)s 执行“管理快照”操作,需要版本 " "%(required_versions)s。" #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "无法使用 Purity REST API 版本 %(api_version)s 执行复制,需要 " "%(required_versions)s 的其中之一。" msgid "Unable to enable replication and snapcopy at the same time." msgstr "无法同时启用复制和 snapcopy。" #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "无法建立与 Storwize 集群 %s 的伙伴关系。" #, python-format msgid "Unable to extend volume %s" msgstr "无法扩展卷 %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "无法将卷 %(id)s 故障转移至辅助后端,因为复制关系无法切换:%(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "无法故障返回至“default”,此操作只能在故障转换完成后进行。" #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "无法故障转移至复制目标:%(reason)s)。" msgid "Unable to fetch connection information from backend." msgstr "无法从后端访存连接信息。" #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "无法从后端访存连接信息:%(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "找不到名称为 %s 的 Purity ref" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "找不到卷组: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "找不到故障转移目标,未配置辅助目标。" msgid "Unable to find iSCSI mappings." msgstr "找不到 iSCSI 映射。" #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "找不到 ssh_hosts_key_file:%s" msgid "Unable to find system log file!" msgstr "不能发现系统日志文件" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "找不到可行 pg 快照,无法在所选辅助阵列上使用故障转移:%(id)s。" #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "无法根据所配置目标找到可行辅助阵列:%(targets)s。" #, python-format msgid "Unable to find volume %s" msgstr "找不到卷 %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "无法获取对应文件“%s”的块设备" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "无法获取创建卷所需要的配置信息:%(errorMessage)s。" msgid "Unable to get corresponding record for pool." msgstr "无法为池获取相应的记录。" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "无法获取有关空间 %(space)s 的信息,请验证集群是否正在运行并且已连接。" msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "无法获取此主机上的 IP 地址列表,请检查许可权和联网。" msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "无法获取域成员列表,请检查集群是否正在运行。" msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "无法获取用于生成新名称的空间的列表。请验证集群是否正在运行。" #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "无法获取 backend_name %s 的统计信息" msgid "Unable to get storage volume from job." msgstr "无法通过作业获取存储器卷。" #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "无法获取对应硬件标识 %(hardwareIdInstance)s 的目标端点。" msgid "Unable to get the name of the masking view." msgstr "无法获取掩码视图的名称。" msgid "Unable to get the name of the portgroup." msgstr "无法获取端口组的名称。" #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "无法获取卷 %s 的复制关系。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "无法将卷 %(deviceId)s 导入到 Cinder。它是复制会话 %(sync)s 的源卷。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "无法将卷 %(deviceId)s 导入到 Cinder。外部卷不在由当前 Cinder 主机管理的池中。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "无法将卷 %(deviceId)s 导入到 Cinder。该卷位于掩码视图 %(mv)s 中。" #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "无法从 %(cert)s %(e)s 装入 CA。" #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "无法从 %(cert)s %(e)s 装入证书。" #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "无法从 %(cert)s %(e)s 装入密钥。" #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "无法在 Solidfire 设备上找到帐户 %(account_name)s" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "无法找到正在管理 IP 地址“%s”的 SVM" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "找不到指定重放概要文件 %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "无法管理现有卷。已管理卷 %(volume_ref)s。" #, python-format msgid "Unable to manage volume %s" msgstr "无法管理卷 %s" msgid "Unable to map volume" msgstr "无法映射卷" msgid "Unable to map volume." msgstr "无法映射卷。" msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "无法解析 XML 请求,请提供正确格式的 XML。" msgid "Unable to parse attributes." msgstr "无法解析属性。" #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "对于卷 %s,无法将副本升级为主副本。没有任何辅助副本可用。" msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "无法在 use_chap_auth=True 的情况下复用并非由 Cinder 管理的主机," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "无法在配置了未知 CHAP 凭证的情况下复用主机。" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "无法将卷 %(existing)s 重命名为 %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "无法检索标识为 %s 的快照组。" #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "无法对 %(specname)s 进行转型,需要接收当前的和请求的 %(spectype)s 值。接收到" "的值:%(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "无法执行 retype:卷 %s 的副本已存在。执行 retype 将超过2 个副本的限制。" #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "无法转型:当前操作需要卷拷贝,当新类型为复制时,不允许卷拷贝。卷为 %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "无法对 %(vol)s 设置镜像方式复制。异常:%(err)s。" #, python-format msgid "Unable to snap Consistency Group %s" msgstr "无法为一致性组 %s 创建快照" msgid "Unable to terminate volume connection from backend." msgstr "无法从后端终止卷连接。" #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "无法终止卷连接:%(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "无法更新一致性组 %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "由于以下卷上的状态 %(vol_status)s 不正确,无法更新类型:%(vol_id)s。卷状态必" "须为“可用”或“正在使用”。" #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "无法在掩码视图 %(maskingViewName)s 中验证发起方组 %(igGroupName)s。" msgid "Unacceptable parameters." msgstr "无法接受的参数。" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "映射 %(id)s 的意外映射状态 %(status)s。属性:%(attr)s。" #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "出现意外 CLI 响应:头/行不匹配。头:%(header)s,行:%(row)s。" #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "映射 %(id)s 的意外映射状态 %(status)s。属性:%(attr)s。" #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "意外输出。需要 [%(expected)s],但接收到 [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "来自 Nimble API 的意外响应" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Tegile IntelliFlash API 给出了意外响应" msgid "Unexpected status code" msgstr "意外的状态码" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "对于 url %(page)s,带有协议 %(protocol)s 的交换机 %(switch_id)s 发出意外状态" "码。错误:%(error)s" msgid "Unknown Gluster exception" msgstr "Gluster 异常未知" msgid "Unknown NFS exception" msgstr "NFS 异常未知" msgid "Unknown RemoteFS exception" msgstr "RemoteFS 异常未知" msgid "Unknown SMBFS exception." msgstr "SMBFS 异常未知。" msgid "Unknown Virtuozzo Storage exception" msgstr "未知 Virtuozzo 存储器异常" msgid "Unknown action" msgstr "操作未知" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "不知道要管理的卷 %s 是否已由 Cinder 管理。正在异常中止管理卷。请" "将“cinder_managed”定制模式属性添加至该卷,并将其值设置为 False。或者,将 " "Cinder 配置策略“zfssa_manage_policy”的值设置为“loose”以移除此限制。" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "不知道要管理的卷 %s 是否已由 Cinder 管理。正在异常中止管理卷。请" "将“cinder_managed”定制模式属性添加至该卷,并将其值设置为 False。或者,将 " "Cinder 配置策略“zfssa_manage_policy”的值设置为“loose”以移除此限制。" #, python-format msgid "Unknown operation %s." msgstr "未知操作 %s。" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "命令 %(cmd)s 未知或不受支持" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "未知协议:%(protocol)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "配额资源 %(unknown)s 未知。" msgid "Unknown service" msgstr "未知服务" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "排序方向未知,必须为“降序”或“升序”。" msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "非管理选项与级联删除选项互斥。" msgid "Unmanage volume not implemented." msgstr "未实现非管理卷。" msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "不允许取消管理来自“已故障转移”的卷的快照。" msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "不允许取消管理来自已故障转移的卷的快照。" #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "不识别 QOS 关键字:“%s”" #, python-format msgid "Unrecognized backing format: %s" msgstr "无法识别支持格式:%s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "无法识别的 read_deleted 取值”%s“" #, python-format msgid "Unset gcs options: %s" msgstr "取消设置 gcs 选项:%s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "iscsiadm 失败。异常为 %(ex)s。 " msgid "Unsupported Clustered Data ONTAP version." msgstr "不支持 Clustered Data ONTAP 版本。" msgid "Unsupported Content-Type" msgstr "不支持的Content-Type" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "不支持该数据 ONTAP 版本。支持数据 ONTAP V7.3.1 和更高版本。" #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "不支持备份元数据版本 (%s)" msgid "Unsupported backup metadata version requested" msgstr "不支持请求的备份元数据版本" msgid "Unsupported backup verify driver" msgstr "不支持备份验证驱动程序" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "在交换机 %s 上存在不受支持的固件。请确保交换机正在运行固件 V6.4 或更高版本" #, python-format msgid "Unsupported volume format: %s " msgstr "以下卷格式不受支持:%s " msgid "Update QoS policy error." msgstr "更新 QoS 策略时发生错误。" msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "只能由直系父代的管理员或者云管理员执行更新和删除配额操作。" msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "只能对与用户作用域仅限于的项目位于同一层次结构中的项目执行更新和删除配额操" "作。" msgid "Update list, doesn't include volume_id" msgstr "更新列表未包含 volume_id" msgid "Updated At" msgstr "已更新于" msgid "Upload to glance of attached volume is not supported." msgstr "不支持上载至所连接卷的 Glance。" msgid "Use ALUA to associate initiator to host error." msgstr "使用 ALUA 使启动程序与主机相关联时发生错误。" msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "使用 CHAP 使启动程序与主机相关联时发生错误。请检查 CHAP 用户名和密码。" msgid "User ID" msgstr "用户ID" msgid "User does not have admin privileges" msgstr "用户没有管理员权限" msgid "User is not authorized to use key manager." msgstr "用户无权使用密钥管理器。" msgid "User not authorized to perform WebDAV operations." msgstr "用户无权执行 WebDAV 操作。" msgid "UserName is not configured." msgstr "未配置 UserName。" msgid "UserPassword is not configured." msgstr "未配置 UserPassword。" msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "V2 回滚 - 卷在除了缺省存储器组之外的另一个存储器组中。" msgid "V2 rollback, volume is not in any storage group." msgstr "V2 回滚,卷不在任何存储器组中。" msgid "V3 rollback" msgstr "V3 回滚" msgid "VF is not enabled." msgstr "未启用 VF。" #, python-format msgid "VV Set %s does not exist." msgstr "VV 集 %s 不存在。" #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "以下是 QoS 规范的有效使用者:%s" #, python-format msgid "Valid control location are: %s" msgstr "以下是有效控制位置:%s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "验证卷连接失败(错误:%(err)s)。" #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "值“%(value)s”对于配置选项“%(option)s”无效" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "%(param_string)s 的值 %(param)s 不是布尔值。" msgid "Value required for 'scality_sofs_config'" msgstr "“scality_sofs_config”的必需值" #, python-format msgid "ValueError: %s" msgstr "ValueError:%s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "从 %(src)s 到 %(tgt)s 的映射中未涉及到 Vdisk %(name)s。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "此 API 不支持版本 %(req_ver)s。最低版本为 %(min_ver)s,最高版本为 " "%(max_ver)s。" #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s 无法按标识检索对象。" #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s 不支持带条件更新。" #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "虚拟卷“%s”在阵列上不存在。" #, python-format msgid "Vol copy job for dest %s failed." msgstr "针对目标 %s 的卷复制作业失败。" #, python-format msgid "Volume %(deviceID)s not found." msgstr "找不到卷 %(deviceID)s。" #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "数组中找不到卷 %(name)s。无法确定是否存在已映射的卷。" #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "在 VNX 中创建了卷 %(name)s,但此卷处于 %(state)s 状态。" #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "未能在池 %(pool)s 中创建卷 %(vol)s。" #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "卷 %(vol1)s 与 snapshot.volume_id %(vol2)s 不匹配。" #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "卷 %(vol_id)s 状态必须为“可用”或“正在使用”,但当前状态为:%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "卷 %(vol_id)s 状态必须为“可用”,才能进行扩展,但当前状态为:%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "卷 %(vol_id)s 状态必须为“可用”,才能更新只读标记,但当前状态为:" "%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "卷 %(vol_id)s 状态必须为“可用”,但当前状态为:%(vol_status)s。" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "卷 %(volume_id)s 没有找到。" #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "卷 %(volume_id)s 没有任何具有键 %(metadata_key)s 的元数据。" #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "卷 %(volume_id)s 当前已映射至不受支持的主机组 %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "卷 %(volume_id)s 当前未映射至主机 %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "卷 %(volume_id)s 仍然处于连接状态,请先从卷断开连接。" #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "卷 %(volume_id)s 复制错误:%(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "卷 %(volume_name)s 处于繁忙状态。" #, python-format msgid "Volume %s could not be created from source volume." msgstr "未能从源卷创建卷 %s。" #, python-format msgid "Volume %s could not be created on shares." msgstr "在共享项上,未能创建卷 %s。" #, python-format msgid "Volume %s could not be created." msgstr "未能创建卷 %s。" #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "卷 %s 在 Nexenta SA 中不存在" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "卷 %s 在 Nexenta 存储设备中不存在" #, python-format msgid "Volume %s does not exist on the array." msgstr "卷 %s 在阵列上不存在。" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "卷 %s 没有指定 provider_location,正在跳过。" #, python-format msgid "Volume %s doesn't exist on array." msgstr "卷 %s 在阵列上不存在。" #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "卷 %s 在 ZFSSA 后端上不存在。" #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "卷 %s 已由 OpenStack 管理。" #, python-format msgid "Volume %s is already part of an active migration." msgstr "卷 %s 已作为活动迁移的一部分。" #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "卷 %s 并非被复制类型。此卷必须为 extra spec replication_enabled 设置为“ " "True”的卷类型以支持复制操作。" #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "卷 %s 已联机。将该卷设置为脱机以便使用 OpenStack 进行管理。" #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "卷 %s 不能正在迁移、已附加、属于某个一致性组或具有快照。" #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "卷 %s 不得是一致性组的一部分。" #, python-format msgid "Volume %s must not be replicated." msgstr "卷 %s 不得复制。" #, python-format msgid "Volume %s must not have snapshots." msgstr "卷 %s 不得具有快照。" #, python-format msgid "Volume %s not found." msgstr "找不到卷 %s。" #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "卷 %s:尝试扩展卷时出错" #, python-format msgid "Volume (%s) already exists on array" msgstr "数组中已存在卷 (%s)" #, python-format msgid "Volume (%s) already exists on array." msgstr "阵列上已存在卷 (%s)。" #, python-format msgid "Volume Group %s does not exist" msgstr "卷组 %s 不存在" #, python-format msgid "Volume Type %(id)s already exists." msgstr "卷类型 %(id)s 已存在。" #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "卷类型 %(type_id)s 没有任何具有键 %(id)s 的额外规范。" #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "当存在类型为 %(volume_type_id)s 的卷时,不允许删除该卷类型。" #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" msgid "Volume Type id must not be None." msgstr "卷类型不能为空。" #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "在对应 OpenStack 卷 [%(ops_vol)s] 的 CloudByte 存储器上找不到卷 " "[%(cb_vol)s]。" #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "在 CloudByte 存储器中找不到卷 [%s]。" #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "在使用过滤器 %(filter)s 的情况下,找不到卷连接。" #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "卷后端配置无效:%(reason)s" msgid "Volume by this name already exists" msgstr "使用此名称的卷已存在" msgid "Volume cannot be restored since it contains snapshots." msgstr "卷无法复原,因为它包含快照。" msgid "Volume create failed while extracting volume ref." msgstr "抽取卷引用时创建卷失败。" #, python-format msgid "Volume device file path %s does not exist." msgstr "卷设备文件路径 %s 不存在。" #, python-format msgid "Volume device not found at %(device)s." msgstr "在 %(device)s 上找不到卷设备。" #, python-format msgid "Volume driver %s not initialized." msgstr "卷驱动程序 %s 未初始化。" msgid "Volume driver not ready." msgstr "卷驱动未准备好。" #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "卷驱动程序已报告错误:%(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "卷具有此时不能删除的临时快照。" msgid "Volume has children and cannot be deleted!" msgstr "卷具有子代,不能删除!" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "已连接一致性组 %s 中的卷。需要先断开连接。" msgid "Volume in consistency group still has dependent snapshots." msgstr "一致性组中的卷仍然具有从属快照。" #, python-format msgid "Volume is attached to a server. (%s)" msgstr "卷已连接至服务器。(%s)" msgid "Volume is in-use." msgstr "卷在使用中。" msgid "Volume is not available." msgstr "卷不可用。" msgid "Volume is not local to this node" msgstr "卷不是此节点的本地卷" msgid "Volume is not local to this node." msgstr "该卷不是此节点的本地卷。" msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "已请求卷元数据备份,但此驱动程序尚不支持此功能。" #, python-format msgid "Volume migration failed: %(reason)s" msgstr "卷迁移失败:%(reason)s" msgid "Volume must be available" msgstr "卷必须可用" msgid "Volume must be in the same availability zone as the snapshot" msgstr "卷必须与快照位于同一可用性区域中" msgid "Volume must be in the same availability zone as the source volume" msgstr "卷必须与源卷位于同一可用性区域中" msgid "Volume must have a volume type" msgstr "卷必须具有卷类型" msgid "Volume must not be part of a consistency group." msgstr "卷不能是一致性组的一部分。" msgid "Volume must not be replicated." msgstr "不得复制卷。" msgid "Volume must not have snapshots." msgstr "卷不能具有快照。" #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "没有为实例 %(instance_id)s 找到卷。" msgid "Volume not found on configured storage backend." msgstr "在已配置的存储器后端上找不到卷。" msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "在所配置存储器后端找不到卷。如果卷名包含“/”,请重命名并重试管理。" msgid "Volume not found on configured storage pools." msgstr "在已配置的存储池上找不到卷。" msgid "Volume not found." msgstr "找不到卷。" msgid "Volume not unique." msgstr "卷并非唯一。" msgid "Volume not yet assigned to host." msgstr "卷尚未分配给主机。" msgid "Volume reference must contain source-name element." msgstr "卷引用必须包含 source-name 元素。" #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "找不到 %(volume_id)s 的卷复制。" #, python-format msgid "Volume service %s failed to start." msgstr "卷服务 %s 未能启动。" msgid "Volume should have agent-type set as None." msgstr "卷应该将 agent-type 设置为“无”。" #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "卷大小 %(volume_size)sGB 不能小于映像 minDisk 大小 %(min_disk)sGB。" #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "卷大小“%(size)s”必须为正整数" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "卷大小“%(size)s”GB 不能小于原始卷大小 %(source_size)sGB。它们必须不小于原始卷" "大小。" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "卷大小“%(size)s”GB 不能小于快照大小 %(snap_size)sGB。它们必须不小于原始快照大" "小。" msgid "Volume size increased since the last backup. Do a full backup." msgstr "自从最近一次备份以来,卷大小已增加。请执行完全备份。" msgid "Volume size must be a multiple of 1 GB." msgstr "卷大小必须为 1 GB 的倍数。" msgid "Volume size must be multiple of 1 GB." msgstr "卷大小必须为 1 GB 的倍数。" msgid "Volume size must multiple of 1 GB." msgstr "卷大小必须是 1 GB 的倍数。" #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "卷状态必须为“可用”,但当前状态为 %s" msgid "Volume status is in-use." msgstr "卷状态为“in-use”。" #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "对于快照,卷状态必须为“available”或“in-use”。(卷状态现在为 %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "卷状态必须为“available”或“in-use”。" #, python-format msgid "Volume status must be %s to reserve." msgstr "卷状态必须为 %s 才能保留。" msgid "Volume status must be 'available'." msgstr "卷状态必须为“可用”。" msgid "Volume to Initiator Group mapping already exists" msgstr "卷至发起方组的映射已存在" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "要备份的卷必须可用或者正在使用,但是当前状态为“%s”。" msgid "Volume to be restored to must be available" msgstr "要复原至的卷必须可用" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "卷类型 %(volume_type_id)s 没有找到。" #, python-format msgid "Volume type ID '%s' is invalid." msgstr "卷类型标识“%s”无效。" #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "已存在针对 %(volume_type_id)s / %(project_id)s 组合的卷类型访问权限。" #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "对于 %(volume_type_id)s / %(project_id)s 组合,找不到卷类型访问权限。" #, python-format msgid "Volume type does not match for share %s." msgstr "对于共享项 %s,卷类型不匹配。" #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "类型 %(type_id)s 的卷类型加密已存在。" #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "类型 %(type_id)s 的卷类型加密不存在。" msgid "Volume type name can not be empty." msgstr "卷类型名称不能为 空." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" #, python-format msgid "Volume with volume id %s does not exist." msgstr "卷标识为 %s 的卷不存在。" #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "卷 %(volumeName)s 不是并置卷。只能对并置卷执行扩展。正在退出..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "卷 %(volumeName)s 未添加至存储器组 %(sgGroupName)s。" #, python-format msgid "Volume: %s could not be found." msgstr "找不到卷 %s。" #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "卷 %s 已由 Cinder 管理。" msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "卷将分块为此大小(以兆字节计)的对象。" msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "卷/帐户同时超出主 SolidFire 帐户和辅助 SolidFire 帐户的限制。" #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage 配置“vzstorage_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s。" #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s 处不存在 VzStorage 配置文件。" msgid "Wait replica complete timeout." msgstr "等待副本完成时发生超时。" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "等待同步失败。运行状态:%s。" msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "正在等待所有节点加入集群。请确保所有 sheep 守护程序都在运行。" msgid "We should not do switch over on primary array." msgstr "不应在主阵列上切换。" msgid "Wrong resource call syntax" msgstr "资源调用语法不正确" msgid "X-IO Volume Driver exception!" msgstr "发生 X-IO 卷驱动程序异常!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "已不推荐使用 XML 支持,将在 N 发行版中移除此项。" msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "未正确配置 XtremIO,找不到任何 iSCSI 门户网站" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO 未正确初始化,找不到任何集群" msgid "You must implement __call__" msgstr "你必须执行 __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "使用 3PAR 驱动程序之前,必须安装 hpe3parclient。运行“pip install " "python-3parclient”以安装 hpe3parclient。" msgid "You must supply an array in your EMC configuration file." msgstr "必须在 EMC 配置文件中提供阵列。" #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "原始大小 %(originalVolumeSize)s GB 大于%(newSize)s GB。仅支持扩展。正在退" "出..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError:%s" msgid "Zone" msgstr "域" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "分区策略:%s,无法识别" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data:未能获取 vdisk %s 的属性。" msgid "_create_host failed to return the host name." msgstr "_create_host 未能返回主机名。" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "_create_host:无法翻译主机名。主机名不是 Unicode或字符串。" msgid "_create_host: No connector ports." msgstr "_create_host:不存在任何连接器端口。" msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume,找不到复制服务。" #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume,volumenam:%(volumename)s,sourcevolumename:" "%(sourcevolumename)s,源卷实例:%(source_volume)s,目标卷实例:" "%(target_volume)s,返回码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - 找不到 CLI 输出形式的成功消息。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name,id_code 为 None。" msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession,找不到复制服务" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession,未定义复制会话类型!复制会话:%(cpsession)s,复制类型:" "%(copytype)s。" #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession,copysession:%(cpsession)s,操作:%(operation)s,返回" "码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" "%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "_delete_volume,volumename:%(volumename)s,找不到存储器配置服务。" #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service,classname:%(classname)s,InvokeMethod,无法连接至 " "ETERNUS。" msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "_extend_volume_op:不支持扩展带有快照的卷。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,关联者:" "FUJITSU_AuthorizedTarget,无法连接至 ETERNUS。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,EnumerateInstanceNames,无法连接" "至 ETERNUS。" #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit,无法连接至 ETERNUS。" #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession,ReferenceNames,vol_instance:%(vol_instance_path)s,无法" "连接至 ETERNUS。" #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service,classname:%(classname)s,EnumerateInstanceNames,无法" "连接至 ETERNUS。" #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "_find_initiator_names,连接器:%(connector)s,找不到启动程序。" #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun,volumename:%(volumename)s,EnumerateInstanceNames,无法连接至 " "ETERNUS。" #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool,eternus_pool:%(eternus_pool)s,EnumerateInstances,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg,filename:%(filename)s,tagname:%(tagname)s,没有数据!请编辑驱" "动配置文件并更正。" #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection,filename:%(filename)s,ip:%(ip)s,端口:%(port)s," "用户:%(user)s,密码:****,URL:%(url)s,失败!" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties,iscsiip 列表:%(iscsiip_list)s,找不到 iqn。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,AssociatorName:" "CIM_BindsTo,无法连接至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,EnumerateInstanceNames," "无法连接至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,GetInstance,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic:属性头和值不匹配。\n" "头为 %(header)s\n" "值为 %(row)s。" msgid "_get_host_from_connector failed to return the host name for connector." msgstr "_get_host_from_connector 未能返回连接器的主机名。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,从 aglist/vol_instance 获取主机亲缘关系失败,affinitygroup:" "%(ag)s,ReferenceNames,无法连接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,获取主机亲缘关系实例失败,volmap:%(volmap)s,GetInstance," "无法连接至 ETERNUS。" msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,关联者:FUJITSU_SAPAvailableForElement,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi,affinitygroup:%(ag)s,ReferenceNames,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,vol_instance:%(vol_instance)s,ReferenceNames: " "CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi,volmap:%(volmap)s,GetInstance,无法连接至 ETERNUS。" msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port,EnumerateInstances,无法连接至 ETERNUS。" #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port,协议:%(protocol)s,找不到 target_port。" #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay:找不到名为 %s 的快照" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay:找不到卷标识 %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay:必须指定 source-name。" msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties:对于主机/卷连接,未能获取 FC 连接信息。已针对 FC " "连接正确配置主机吗?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到任何节点。" #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun,vol_instance.path:%(vol)s,volumename:%(volumename)s," "volume_uid:%(uid)s,启动程序:%(initiator)s,目标:%(tgt)s,aglist:" "%(aglist)s,找不到存储器配置服务。" #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun,vol_instance.path:%(volume)s,volumename:%(volumename)s," "volume_uid:%(uid)s,aglist:%(aglist)s,找不到控制器配置服务。" #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun,volumename:%(volumename)s,volume_uid:%(volume_uid)s," "AffinityGroup:%(ag)s,返回码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path:%(volume)s,AssociatorName:" "CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats:未能获取存储池数据。" #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete,cpsession:%(cpsession)s,copysession 状态为 " "BROKEN。" #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy 失败:卷 %s 的副本已存在。添加另一个副本将超过 2 个副本的限" "制。" msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "在所需池中没有 vdisk 副本的情况下,add_vdisk_copy 已开始。" #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants 必须为布尔值,但是获得了“%s”。" msgid "already created" msgstr "已创建" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "从远程节点连接快照" #, python-format msgid "attribute %s not lazy-loadable" msgstr "属性 %s 不可延迟装入" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "备份:%(vol_id)s 未能创建从 %(vpath)s 至 %(bpath)s 的设备硬链接。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "备份:%(vol_id)s 未能从服务器获取“备份成功”通知。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "备份:由于 %(bpath)s 上的自变量无效,使得 %(vol_id)s 未能运行 dsmc。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "备份:%(vol_id)s 未能对 %(bpath)s 运行 dsmc。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "备份:%(vol_id)s 失败。%(path)s 不是文件。" #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "备份:%(vol_id)s 失败。%(path)s 是意外的文件类型。支持块文件或常规文件,实际" "文件方式为 %(vol_mode)s。" #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "备份:%(vol_id)s 失败。无法获取 %(path)s 处卷的实际路径。" msgid "being attached by different mode" msgstr "正在通过另一方式连接" #, python-format msgid "call failed: %r" msgstr "调用失败:%r" msgid "call failed: GARBAGE_ARGS" msgstr "调用失败:GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "调用失败:PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "调用失败:PROG_MISMATCH:%r" msgid "call failed: PROG_UNAVAIL" msgstr "调用失败:PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "找不到 LUN 映射,ig:%(ig)s 卷:%(vol)s" msgid "can't find the volume to extend" msgstr "找不到要扩展的卷" msgid "can't handle both name and index in req" msgstr "无法同时处理请求中的名称和索引" msgid "cannot understand JSON" msgstr "无法理解JSON" msgid "cannot understand XML" msgstr "无法理解XML" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "已分配 cgsnapshot" msgid "cgsnapshot changed" msgstr "已更改 cgsnapshot" msgid "cgsnapshots assigned" msgstr "已分配 cgsnapshot" msgid "cgsnapshots changed" msgstr "已更改 cgsnapshot" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error:认证需要密码或 SSH 专用密钥:请设置 san_password 或 " "san_private_key 选项。" msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error:无法确定系统标识。" msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error:无法确定系统名称。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist 错误。" #, python-format msgid "clone depth exceeds limit of %s" msgstr "克隆深度超过 %s 的限制" msgid "consistencygroup assigned" msgstr "已分配 consistencygroup" msgid "consistencygroup changed" msgstr "已更改 consistencygroup" msgid "control_location must be defined" msgstr "必须定义 control_location" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume,源卷在 ETERNUS 中不存在。" #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume,目标卷实例名:%(volume_instancename)s,获取实例失败。" msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume:源和目标大小不同。" #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume:源卷 %(src_vol)s 大小为 %(src_size)dGB,无法拟合大小为 " "%(tgt_size)dGB 的目标卷 %(tgt_vol)s。" msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "create_consistencygroup_from_src 必须为通过 CG 快照或源 CG 创建。" msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src 仅支持 cgsnapshot 源或一致性组源。不能使用多" "个源。" msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src 支持 cgsnapshot 源或一致性组源。不能使用多个" "源。" #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy:源 vdisk %(src)s (%(src_id)s) 不存在。" #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy:源 vdisk %(src)s 不存在。" msgid "create_host: Host name is not unicode or string." msgstr "create_host:主机名不是 Unicode 或字符串。" msgid "create_host: No initiators or wwpns supplied." msgstr "create_host:未提供任何发起方或 wwpn。" msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair 错误。" #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "create_snapshot,eternus_pool:%(eternus_pool)s,找不到池。" #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot,snapshotname:%(snapshotname)s,源卷名:%(volumename)s," "vol_instance.path: %(vol_instance)s,目标卷名:%(d_volumename)s,池:" "%(pool)s,返回码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot,volumename:%(s_volumename)s,在 ETERNUS 上找不到源卷。" #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "create_snapshot,volumename:%(volumename)s,找不到复制服务。" #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot:对于快照,卷状态必须为“available”或“in-use”。无效状态为 %s。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot:获取源卷失败。" #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume,卷:%(volume)s,EnumerateInstances,无法连接至 ETERNUS。" #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume,卷:%(volume)s,卷名:%(volumename)s,eternus_pool:" "%(eternus_pool)s,找不到存储器配置服务。" #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume,volumename:%(volumename)s,poolname:%(eternus_pool)s,返回" "码:%(rc)lu,错误:%(errordesc)s。" msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot,源卷在 ETERNUS 中不存在。" #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot,目标卷实例名:%(volume_instancename)s,获取实例" "失败。" #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot:快照 %(name)s 不存在。" #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot:快照状态必须为“可用”,以便创建卷。无效状态为 " "%s。" msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot:源和目标大小不同。" msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot:卷大小不同于基于快照的卷。" msgid "deduplicated and auto tiering can't be both enabled." msgstr "不能同时启用去重和自动分层。" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "删除:%(vol_id)s 由于自变量无效而未能运行 dsmc,标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "删除:%(vol_id)s 未能运行 dsmc,标准输出:%(out)s\n" "标准错误:%(err)s" msgid "delete_hypermetro error." msgstr "delete_hypermetro 错误。" #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "找不到 delete_initiator: %s ACL。正在继续。" msgid "delete_replication error." msgstr "delete_replication 错误。" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "正在删除具有从属卷的快照 %(snapshot_name)s" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "正在删除有快照的卷 %(volume_name)s" msgid "detach snapshot from remote node" msgstr "从远程节点拆离快照" msgid "do_setup: No configured nodes." msgstr "do_setup:不存在任何已配置的节点。" msgid "element is not a child" msgstr "元素不是子节点" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries 必须大于或等于 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "将对象写入 swift 时出错,swift %(etag)s 中对象的 MD5 与发送至 swift %(md5)s " "的对象的 MD5 不同" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume,eternus_pool:%(eternus_pool)s,找不到池。" #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume,卷:%(volume)s, volumename:%(volumename)s,eternus_pool:" "%(eternus_pool)s,找不到存储器配置服务。" #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" "%(errordesc)s,池类型:%(pooltype)s。" #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume,volumename:%(volumename)s,找不到卷。" msgid "failed to create new_volume on destination host" msgstr "未能在目标主机上创建新卷" msgid "fake" msgstr "fake" #, python-format msgid "file already exists at %s" msgstr "文件已存在在 %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "SheepdogIOWrapper 不支持 fileno" msgid "fileno() not supported by RBD()" msgstr "fileno() 不受 RBD() 支持" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "文件系统 %s 在 Nexenta 存储设备中不存在" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled 设置为 False,因而不允许多主机映射。" "CMMVC6071E 该 VDisk 至主机的映射未创建,因为该 VDisk 已映射到某个主机。" msgid "flush() not supported in this version of librbd" msgstr "在 librbd 的此版本中,flush() 不受支持" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s 受以下项支持:%(backing_file)s" msgid "force delete" msgstr "强制删除" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id 错误。" msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id 错误。" #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "get_iscsi_params:未能获取发起方 %(ini)s 的目标 IP,请检查配置文件。" #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool:无法获取卷 %s 的属性" msgid "glance_metadata changed" msgstr "已更改 glance_metadata" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" "文件系统。" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" "文件集。" #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "在 cinder.conf 中,hgst_group %(grp)s 和 hgst_user %(usr)s必须映射至有效用户/" "组" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "在集群中找不到 cinder.conf 中所指定的 hgst_net %(net)s" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "在 cinder.conf 中,hgst_redundancy 必须设置为 0(非 HA)或者 1 (HA)。" msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "在 cinder.conf 中,hgst_space_mode 必须为 octal/int" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "hgst_storage 服务器 %(svr)s 不是 : 格式" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "必须在 cinder.conf 中定义 hgst_storage_servers" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "HTTP 服务可能已突然禁用,或在此操作的中途进入维护状态。" msgid "id cannot be None" msgstr "id不能是None" #, python-format msgid "image %s not found" msgstr "找不到映像 %s " #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection,卷:%(volume)s,找不到卷。" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection:未能获取卷 %s 的属性。" #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection:缺少卷 %s 的卷属性。" #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "initialize_connection:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection:未定义 vdisk %s。" #, python-format msgid "invalid user '%s'" msgstr "用户 '%s' 无效" #, python-format msgid "iscsi portal, %s, not found" msgstr "找不到 iscsi 门户网站 %s" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "使用“iSCSI”协议时,必须在配置文件中设置 iscsi_ip_address。" msgid "iscsiadm execution failed. " msgstr "iscsiadm 执行失败。" #, python-format msgid "key manager error: %(reason)s" msgstr "发生密钥管理器错误:%(reason)s" msgid "keymgr.fixed_key not defined" msgstr "未定义 keymgr.fixed_key" msgid "limit param must be an integer" msgstr "limit 参数必须是整数" msgid "limit param must be positive" msgstr "limit参数必须是正数" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing 无法管理连接至主机的卷。请断开此卷与现有主机的连接,然后导入" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing 需要“name”键以标识现有卷。" #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "manage_existing_snapshot:管理卷 %(vol)s 上的现有重放 %(ss)s 时出错" #, python-format msgid "marker [%s] not found" msgstr "没有找到标记 [%s]" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "Mdisk 组缺少引号 %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy 必须为“on-demand”或“never”,已传递:%s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs 在卷 %(vol)s 上发生故障,错误消息如下:%(err)s。" msgid "mock" msgstr "mock" msgid "mount.glusterfs is not installed" msgstr "未安装 mount.glusterfs" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage 找到多个名称为 %s 的资源" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "找到多个快照标识为 %s 的资源" msgid "name cannot be None" msgstr "name不能是None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path:找不到 NAVISECCLI 工具 %(path)s。" #, python-format msgid "no REPLY but %r" msgstr "无回复,但收到 %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "在 drbdmanage 中找不到标识为 %s 的快照" #, python-format msgid "not exactly one snapshot with id %s" msgstr "而不是刚好只有一个标识为 %s 的快照" #, python-format msgid "not exactly one volume with id %s" msgstr "而不是刚好只有一个标识为 %s 的卷" #, python-format msgid "obj missing quotes %s" msgstr "对象缺少引号 %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled 未关闭。" msgid "progress must be an integer percentage" msgstr "进度必须为整数百分比" msgid "promote_replica not implemented." msgstr "未实现 promote_replica。" msgid "provider must be defined" msgstr "必须定义提供程序" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "此卷驱动程序需要 qemu-img %(minimum_version)s 或更高版本。当前 qemu-img 版" "本:%(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img 未安装,并且映像的类型为 %s。仅当 qemu-img 未安装时,才能使用原始映" "像。" msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img 未安装,并且磁盘格式未指定。仅当 qemu-img 未安装时,才能使用原始映" "像。" msgid "rados and rbd python libraries not found" msgstr "找不到 rados 和 rbd python 库" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是“no”、“yes”或“only”其中一项,而不能是 %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "应在后端上配置 replication_device:%s。" #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "缺少 backend_id 为 [%s] 的 replication_device。" #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover 失败。找不到 %s。" msgid "replication_failover failed. Backend not configured for failover" msgstr "replication_failover 失败。未配置后端,无法进行故障转移" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "复原:%(vol_id)s 由于 %(bpath)s 上的自变量无效而未能运行 dsmc。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "复原:%(vol_id)s 未能对 %(bpath)s 运行 dsmc。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "复原:%(vol_id)s 失败。\n" "标准输出:%(out)s\n" "标准错误:%(err)s。" msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup 已异常中止,实际的对象列表与存储在元数据中的对象列表不匹配。" msgid "root element selecting a list" msgstr "根元素选择列表" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "rtslib_fb 缺少成员 %s:您可能需要较新的 python-rtslib-fb。" msgid "san_ip is not set." msgstr "未设置 san_ip。" msgid "san_ip must be set" msgstr "san_ip必须设置" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip:必需字段配置。未设置 san_ip。" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "没有在 cinder.conf 中为 Datera 驱动程序设置 san_login 和/或 san_password。请" "设置此信息并再次启动 cinder-volume服务。" msgid "serve() can only be called once" msgstr "serve() 只能调用一次" msgid "service not found" msgstr "找不到服务" msgid "snapshot does not exist" msgstr "快照不存在" #, python-format msgid "snapshot id:%s not found" msgstr "找不到快照标识 %s" #, python-format msgid "snapshot-%s" msgstr "快照 - %s" msgid "snapshots assigned" msgstr "已更改快照" msgid "snapshots changed" msgstr "已更改快照" #, python-format msgid "source vol id:%s not found" msgstr "找不到源卷标识 %s" #, python-format msgid "source volume id:%s is not replicated" msgstr "未复制源卷标识 %s" msgid "source-name cannot be empty." msgstr "source-name 不能为空。" msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "source-name 格式应为“vmdk_path@vm_inventory_path”。" #, python-format msgid "status must be %s and" msgstr "状态必须为 %s,并且" msgid "status must be available" msgstr "状态必须可用" msgid "stop_hypermetro error." msgstr "stop_hypermetro 错误。" msgid "subclasses must implement construct()!" msgstr "subclasses必须执行construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo 发生故障,在继续运行,如同没有发生任何情况一样" msgid "sync_hypermetro error." msgstr "sync_hypermetro 错误。" msgid "sync_replica not implemented." msgstr "未实现 sync_replica。" #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "未安装 targetcli,并且未能创建缺省目录(%(default_path)s):%(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection:未能从连接器获取主机名。" msgid "timeout creating new_volume on destination host" msgstr "在目标主机上创建新卷超时" msgid "too many body keys" msgstr "过多主体密钥" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s:未安装" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s:目标正忙" msgid "umount: : some other error" msgstr "umount: :某个其他错误" msgid "umount: : target is busy" msgstr "umount: :目标正忙" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot:找不到名为 %s 的快照" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot:找不到卷标识 %s" #, python-format msgid "unrecognized argument %s" msgstr "无法识别自变量 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "以下压缩算法不受支持:%s" msgid "valid iqn needed for show_target" msgstr "show_target 需要有效 iqn" #, python-format msgid "vdisk %s is not defined." msgstr "未定义 vdisk %s。" msgid "vmemclient python library not found" msgstr "找不到 vmemclient python 库" #, python-format msgid "volume %s not found in drbdmanage" msgstr "在 drbdmanage 中找不到卷 %s" msgid "volume assigned" msgstr "卷已分配" msgid "volume changed" msgstr "卷已更改" msgid "volume does not exist" msgstr "卷不存在" msgid "volume is already attached" msgstr "卷已连接" msgid "volume is not local to this node" msgstr "卷不是此节点的本地卷" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "卷大小 %(volume_size)d 太小,无法复原大小为 %(size)d 的备份。" #, python-format msgid "volume size %d is invalid." msgstr "卷大小 %d 无效。" msgid "volume_type cannot be None" msgstr "volume_type 不能为 None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "在一致性组中创建卷时,必须提供 volume_type。" msgid "volume_type_id cannot be None" msgstr "volume_type_id 不能为“无”" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "必须提供 volume_types,才能创建一致性组 %(name)s。" #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "必须提供 volume_types,才能创建一致性组 %s。" msgid "volumes assigned" msgstr "已分配卷" msgid "volumes changed" msgstr "已更改卷" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition:%s 已超时。" #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "zfssa_manage_policy 属性需要设置为“strict”或“loose”。当前值为:%s。" msgid "{} is not a valid option." msgstr "{} 是无效选项。" cinder-8.0.0/cinder/locale/es/0000775000567000056710000000000012701406543017240 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/es/LC_MESSAGES/0000775000567000056710000000000012701406543021025 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/es/LC_MESSAGES/cinder.po0000664000567000056710000130621412701406257022642 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Eduardo Gonzalez Gutierrez , 2015 # FIRST AUTHOR , 2011 # Jose Enrique Ruiz Navarro , 2014 # Eduardo Gonzalez Gutierrez , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Eugènia Torrella , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev24\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-27 07:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-27 07:57+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "Versión de OpenStack Cinder: %(version)s\n" #, python-format msgid " but size is now %d" msgstr " pero el tamaño ahora es %d" #, python-format msgid " but size is now %d." msgstr " pero el tamaño es ahora %d." msgid " or " msgstr " o " #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s no está establecido." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing no puede gestionar un volumen conectado con " "hosts. Desconecte este volumen de los hosts existentes antes de realizar la " "importación." #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "Resultado: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: Permiso denegado." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: se ha encontrado un error con una salida de CLI inesperada.\n" " Mandato: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Código de estado: %(_status)s\n" "Cuerpo: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: se está creando NetworkPortal: asegúrese de que el puerto " "%(port)d en la IP %(ip)s no lo esté utilizando otro servicio." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s requiere de, al menos, %(min_length)s caracteres." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s tiene más de %(max_length)s caracteres." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: copia de seguridad %(bck_id)s, volumen %(vol_id)s ha fallado. El " "objeto de copia de seguridad tiene una modalidad inesperada. Se soportan las " "copias de seguridad de imagen o archivo, la modalidad real es %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "El servicio %(service)s no está %(status)s en el dispositivo de " "almacenamiento: %(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s debe ser <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s debe ser >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "El valor %(worker_name)s de %(workers)d no es válido, debe ser mayor que 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "%s \"data\" no está en el resultado." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "No se puede acceder a %s. Verifique que GPFS está activo y que el sistema de " "archivos está montado." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s no se puede redimensionar utilizando la operación de clonación ya que no " "contiene bloques." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s no se puede redimensionar utilizando la operación de clonación ya que se " "encuentra en un volumen comprimido" #, python-format msgid "%s configuration option is not set." msgstr "La opción de configuración %s no está establecida." #, python-format msgid "%s does not exist." msgstr "%s no existe." #, python-format msgid "%s is not a directory." msgstr "%s no es un directorio." #, python-format msgid "%s is not a string or unicode" msgstr "%s no es una serie o unicode" #, python-format msgid "%s is not installed" msgstr "%s no está instalado" #, python-format msgid "%s is not installed." msgstr "%s no está instalado." #, python-format msgid "%s is not set" msgstr "%s no está establecido" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s no está definido y es necesario para que el dispositivo de replicación " "sea válido." #, python-format msgid "%s is not set." msgstr "%s no está establecido." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s debe ser una imagen raw o qcow2 válida." #, python-format msgid "%s must be an absolute path." msgstr "%s debe ser una ruta absoluta." #, python-format msgid "%s must be an integer." msgstr "%s debe ser un entero." #, python-format msgid "%s not set in cinder.conf" msgstr "%s no está definido en cinder.conf" #, python-format msgid "%s not set." msgstr "%s no establecido." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' no es válido para flashsystem_connection_protocol en el archivo " "de configuración. Los valores válidos son %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "'activo' debe estar presente al escribir snap_info." msgid "'consistencygroup_id' must be specified" msgstr "Es necesario especificar el 'consistencygroup_id'" msgid "'qemu-img info' parsing failed." msgstr "Se ha encontrado un error en el análisis de 'qemu-img info'." msgid "'status' must be specified." msgstr "se debe especificar 'status'." msgid "'volume_id' must be specified" msgstr "Se debe especificar 'volume_id'" msgid "'{}' object has no attribute '{}'" msgstr "El objeto '{}' no tiene ningún atributo '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Mandato: %(cmd)s) (Código de retorno: %(exit_code)s) (Salida estándar: " "%(stdout)s) (Error estándar: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "No se ha encontrado un LUN (HLUN). (LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "Se ha realizado una solicitud simultánea, posiblemente contradictoria." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "No se ha encontrado un LUN (HLUN) libre. Añada un grupo de host diferente. " "(LDEV: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "No se ha podido añadir un grupo de host. (puerto: %(port)s, nombre: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "No se ha podido eliminar un grupo de host. (puerto: %(port)s, ID de grupo: " "%(gid)s, nombre: %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Un grupo de host no es válido. (grupo de host: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "No se puede suprimir un par. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "No se ha podido crear un par. Se ha excedido el número de par máximo. " "(método de copia: %(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Un parámetro no es válido. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Un valor de parámetro no es válido. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "No se ha encontrado una agrupación. (ID de agrupación: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Un estado de instantánea no es válido. (estado: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Se DEBE especificar un destino secundario válido para poder realizar la " "migración tras error." msgid "A volume ID or share was not specified." msgstr "No se ha especificado un ID de volumen o compartición." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Un estado de volumen no es válido. (estado: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "La API %(name)s ha fallado con serie de error %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "La cadena de la versión de la API %(version)s tiene un formato no válido. " "Debe ser un formato MajorNum.MinorNum." msgid "API key is missing for CloudByte driver." msgstr "Falta la clave de API para el controlador CloudByte." #, python-format msgid "API response: %(response)s" msgstr "Respuesta de la API: %(response)s" #, python-format msgid "API response: %s" msgstr "Respuesta de la API: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "La versión de la API %(version)s, no está soportada en este método." msgid "API version could not be determined." msgstr "No se ha podido determinar la versión de API." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "A punto de suprimir proyectos hijo que tienen cuota distinta de cero. Esto " "no debe realizarse." msgid "Access list not available for public volume types." msgstr "La lista de acceso no está disponible para tipos de volumen públicos." msgid "Activate or deactivate QoS error." msgstr "Error al activar o desactivar QoS." msgid "Activate snapshot error." msgstr "Error al activar una instantánea." msgid "Add FC port to host error." msgstr "Error al añadir el puerto FC al host." msgid "Add fc initiator to array error." msgstr "Error al añadir el iniciador fc a la matriz." msgid "Add initiator to array error." msgstr "Error al añadir el iniciar a la matriz." msgid "Add lun to cache error." msgstr "Error al añadir lun a la caché." msgid "Add lun to partition error." msgstr "Error al añadir lun a la partición." msgid "Add mapping view error." msgstr "Error al añadir la vista de correlaciones." msgid "Add new host error." msgstr "Error al añadir host nuevo." msgid "Add port to port group error." msgstr "Error al añadir el puerto al grupo de puertos." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "No existen las agrupaciones de almacenamiento especificadas que se van a " "gestionar. Compruebe su configuración. Agrupaciones no existentes: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "Una solicitud de versión de la API se debe comparar con un objeto " "VersionedMethod." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "Se ha producido un error en SheepdogDriver. (Razón: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Un error ha ocurrido durante la operación de copia de seguridad" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "Se ha producido un error al intentar modificar la instantánea '%s'." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Se ha producido un error al buscar el volumen \"%s\"." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Se ha producido un error durante la operación LUNcopy. Nombre de LUNcopy: " "%(luncopyname)s. Situación de LUNcopy: %(luncopystatus)s. Estado de LUNcopy: " "%(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Se ha producido un error al leer el volumen \"%s\"." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Se ha producido un error al escribir en el volumen \"%s\"." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" "No se ha podido añadir un usuario CHAP de iSCSI. (nombre de usuario: " "%(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" "No se ha podido eliminar un usuario CHAP de iSCSI. (username: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "No se ha podido añadir un destino de iSCSI. (puerto: %(port)s, alias: " "%(alias)s, razón: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "No se ha podido suprimir un destino de iSCSI. (puerto: %(port)s, tno: " "%(tno)s, alias: %(alias)s)" msgid "An unknown exception occurred." msgstr "Una excepción desconocida ha ocurrido" msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Un usuario con ámbito de señal para un subproyecto no puede ver la cuota de " "sus padres." msgid "Append port group description error." msgstr "Error al anexar la descripción del grupo de puertos." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Ha fallado la aplicación de zonas y cfg al conmutador (código de error=" "%(err_code)s mensaje de error=%(err_msg)s." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "La matriz no existe o está fuera de línea. El estado actual de la matriz es " "%s." msgid "Associate host to hostgroup error." msgstr "Error al asociar host con el grupo de host." msgid "Associate host to mapping view error." msgstr "Error al asociar el host con la vista de correlaciones." msgid "Associate initiator to host error." msgstr "Error al asociar el iniciador con el host." msgid "Associate lun to QoS error." msgstr "Error al asociar el LUN a QoS." msgid "Associate lun to lungroup error." msgstr "Error al asociar LUN con el grupo de LUN." msgid "Associate lungroup to mapping view error." msgstr "Error al asociar el grupo de LUN con la vista de correlaciones." msgid "Associate portgroup to mapping view error." msgstr "Error al asociar el grupo de puertos a la vista de correlaciones." msgid "At least one valid iSCSI IP address must be set." msgstr "Por lo menos se debe establecer una dirección IP de iSCSI válida." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Intento de transferir %s con clave de aut no válida." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" "No se han encontrado detalles del grupo de autenticación [%s] en el " "almacenamiento CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "" "No se han encontrado detalles del usuario de autenticación en el " "almacenamiento CloudByte." msgid "Authentication error" msgstr "Error de autenticación" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Ha fallado la autenticación, compruebe las credenciales del conmutador, " "código de error %s." msgid "Authorization error" msgstr "Error de autorización" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "La zona de disponibilidad '%(s_az)s' no es válida." msgid "Available categories:" msgstr "Categorías disponibles:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Las especificaciones QoS de programa de fondo no se admiten en esta familia " "de almacenamiento y versión ONTAP ." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "El programa de fondo no existe %(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "" "El programa de fondo ya ha hecho una migración tras error. No se puede " "restaurar." #, python-format msgid "Backend reports: %(message)s" msgstr "Informes de fondo: %(message)s" msgid "Backend reports: item already exists" msgstr "Informes de fondo: el elemento ya existe" msgid "Backend reports: item not found" msgstr "Informes de fondo: elemento no encontrado" msgid "Backend server not NaServer." msgstr "El servidor Backend no es NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" "Tiempo de espera de reintento de servicio de fondo: %(timeout)s segundos" msgid "Backend storage did not configure fiber channel target." msgstr "" "El almacenamiento del programa de fondo no ha configurado el destino de " "canal de fibra." msgid "Backing up an in-use volume must use the force flag." msgstr "" "La copia de seguridad de un volumen en uso debe utilizar el distintivo force." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "No se ha podido encontrar la copia de seguridad %(backup_id)s." msgid "Backup RBD operation failed" msgstr "La operación de RBD de copia de seguridad ha fallado" msgid "Backup already exists in database." msgstr "La copia de seguridad ya existe en la base de datos." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Driver de copia de seguridad ha reportado un error: %(message)s" msgid "Backup id required" msgstr "Se necesita una copia de seguridad" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "No se soporta la copia de seguridad para volúmenes GlusterFS con " "instantáneas." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "La copia de seguridad sólo se admite en volúmenes SOFS sin archivo de " "respaldo." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "La copia de seguridad solo se soporta para volúmenes GlusterFS con formato " "raw." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "La copia de seguridad sólo se admite en volúmenes SOFS sin formato." msgid "Backup operation of an encrypted volume failed." msgstr "" "No se ha podido realizar la operación de copia de seguridad de un volumen " "cifrado." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "El servicio de copia de seguridad %(configured_service)s no admite la " "verificación. El id de copia de seguridad %(id)s no se ha verificado. Se " "omite la verificación." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "El servicio de copia de seguridad %(service)s no soporta la verificación. El " "ID de copia de seguridad %(id)s no se ha verificado. Se omite el " "restablecimiento." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "La copia de seguridad solo debe tener una instantánea pero en lugar de ello " "tiene %s" msgid "Backup status must be available" msgstr "El estado de la copia de seguridad debe ser available" #, python-format msgid "Backup status must be available and not %s." msgstr "El estado de copia de seguridad debe ser disponible y no %s." msgid "Backup status must be available or error" msgstr "El estado de la copia de seguridad debe ser available o error" msgid "Backup to be restored has invalid size" msgstr "La copia de seguridad que restaurar tiene un tamaño no válido" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Se ha devuelto una línea de estado errónea: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Clave(s) incorrecta(s) en conjunto de cuotas: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Respuesta errónea o inesperada de la API de programa de fondo del volumen de " "almacenamiento: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato de proyecto erróneo: el proyecto no tiene un formato correcto (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Solicitud incorrecta enviada al clúster Datera: Argumentos no válidos: " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Respuesta errónea de la API Datera" msgid "Bad response from SolidFire API" msgstr "Respuesta errónea de la API SolidFire" #, python-format msgid "Bad response from XMS, %s" msgstr "Respuesta errónea de XMS, %s" msgid "Binary" msgstr "Binario" msgid "Blank components" msgstr "Componentes en blanco" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Esquema de autenticación de API Blockbridge (señal o contraseña)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "" "Contraseña de API Blockbridge (para esquema de autenticación 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Señal de API Blockbridge (para el esquema de autenticación 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "" "Usuario de API Blockbridge (para el esquema de autenticación 'password')" msgid "Blockbridge api host not configured" msgstr "No se ha configurado el host de API Blockbridge" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge se ha configurado con un esquema de autenticación no válido " "'%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "La agrupación predeterminada de Blockbridge no existe" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Contraseña de Blockbridge no configurada (obligatoria para el esquema de " "autenticación 'password')" msgid "Blockbridge pools not configured" msgstr "Agrupaciones de Blockbridge no configuradas" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Señal Blockbridge no configurada (obligatoria para el esquema de " "autenticación 'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Usuario de API Blockbridge no configurado (obligatorio para el esquema de " "autenticación 'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "" "Error de CLI de distribución en zonas de canal de fibra de Brocade: " "%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "" "Error de HTTP de distribución en zonas de canal de fibra de Brocade: " "%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "El secreto de CHAP debe tener entre 12 y 16 bytes." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Salida de excepción de CLI:\n" " mandato: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Salida de excepción de CLI:\n" " mandato: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E La correlación de disco virtual a host no se ha creado, ya que el " "disco virtual ya se ha correlacionado con un host.\n" "\"" msgid "CONCERTO version is not supported" msgstr "No se admite la versión CONCERTO." #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) no existe en la matriz" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "" "El nombre de caché es Ninguno, establezca smartcache:cachename en clave." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "" "El volumen de la memoria caché %(cache_vol)s no tiene la instantánea " "%(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "El volumen de la memoria caché %s no tiene las propiedades necesarias" msgid "Call returned a None object" msgstr "La llamada ha devuelto un objeto None" msgid "Can not add FC port to host." msgstr "No se puede añadir el puerto FC al host." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "No se encuentra el ID de caché por nombre de caché %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "No se encuentra el ID de partición por nombre %(name)s." #, python-format msgid "Can not get pool info. pool: %s" msgstr "" "No se ha podido obtener la información de la agrupación. Agrupación: %s" #, python-format msgid "Can not translate %s to integer." msgstr "No se puede traducir %s a un entero." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "No se puede acceder a 'scality_sofs_config': %s" msgid "Can't attach snapshot." msgstr "No se puede adjuntar la instantánea." msgid "Can't decode backup record." msgstr "No se puede decodificar el registro de copia de seguridad." #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "No se puede extender el volumen de replicación, volumen: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "No se puede encontrar el LUN en la matriz, compruebe el nombre de origen o " "el ID de origen." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "" "No se encuentra el nombre de la memoria caché en la matriz, el nombre de la " "memoria caché es: %(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "" "No se puede encontrar el ID de lun en la base de datos, volumen: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "No se puede encontrar la información del LUN en la matriz. Volumen: %(id)s, " "nombre de lun: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "No se puede encontrar el nombre de la partición en la matriz, el nombre de " "la partición es: %(name)s." #, python-format msgid "Can't find service: %s" msgstr "No se ha podido encontrar el servicio: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "No se puede encontrar la instantánea en la matriz, compruebe el nombre de " "origen o el ID de origen." msgid "Can't find the same host id from arrays." msgstr "No se puede encontrar mismo ID de host en las matrices." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" "No se puede obtener el ID de volumen de la instantánea, instantánea: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "No se puede obtener el ID de volumen. Nombre de volumen %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "No se puede importar el LUN %(lun_id)s en Cinder. El tipo de LUN no es " "coincidente." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en un HyperMetroPair." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en una tarea de copia de " "LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en un grupo de LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en un reflejo de LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "No se puede importar el LUN %s en Cinder. Ya existe en un SplitMirror." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en una tarea de " "migración." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "No se puede importar el LUN %s en Cinder. Ya existe en una tarea de " "replicación remota." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "No se puede importar el LUN %s en Cinder. El estado del LUN no es normal." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "No se puede importar la instantánea %s en Cinder. La instantánea no " "pertenece al volumen." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "No se puede importar la instantánea %s en Cinder. Se ha expuesto la " "instantánea al iniciador." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "No se puede importar la instantánea %s en Cinder. El estado de la " "instantánea no es normal o el estado de ejecución no es en línea." #, python-format msgid "Can't open config file: %s" msgstr "No se puede abrir el archivo de configuración: %s" msgid "Can't parse backup record." msgstr "No se puede analizar el registro de copia de seguridad." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " "que no tiene tipo de volumen." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo de consistencia " "%(group_id)s porque ya está en el grupo de consistencia %(orig_group)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " "que no se puede encontrar el volumen." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " "el volumen no existe." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " "que el volumen está en un estado no válido: %(status)s. Los estados válidos " "son: %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " "que el grupo no soporta el tipo de volumen %(volume_type)s." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "No se puede conectar un volumen ya conectado %s; la conexión múltiple está " "inhabilitada mediante la opción de configuración 'netapp_enable_multiattach'." msgid "Cannot change VF context in the session." msgstr "No se puede cambiar el contexto de la VF en la sesión." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "No se puede cambiar el contexto de la VF, la VF especificada no está " "disponible en la lista de VF gestionables %(vf_list)s." msgid "Cannot connect to ECOM server." msgstr "No se puede conectar al servidor de ECOM.." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "No se puede crear un clon con un tamaño de %(vol_size)s a partir de un " "volumen con un tamaño de %(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "No se puede crear el grupo de consistencia %(group)s porque la instantánea " "%(snap)s no está en un estado válido. Los estados válidos son: %(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "No se puede crear el grupo de consistencia %(group)s porque el volumen de " "origen %(source_vol)s no está en un estado válido. Los estados válidos son: " "%(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "No se puede crear el directorio %s." msgid "Cannot create encryption specs. Volume type in use." msgstr "" "No se pueden crear especificaciones de cifrado. El tipo de volumen se está " "utilizando." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "No se puede crear la imagen del formato de disco: %s. Solo se acepta el " "formato de disco vmdk." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "No se puede crear la vista de máscara: %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "No se pueden crear más de %(req)s volúmenes en la matriz ESeries si " "'netapp_enable_multiattach' se ha establecido en true." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "No se puede crear o encontrar un grupo de almacenamiento con el nombre " "%(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "No se puede crear un volumen con un tamaño de %(vol_size)s a partir de una " "instantánea con un tamaño de %(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "No se puede crear el volumen de tamaño %s: no es múltiplo de 8GB." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "No se puede crear el tipo de volumen con el nombre %(name)s y las " "especificaciones %(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "No se puede suprimir LUN %s mientras haya instantáneas." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "No se puede suprimir el volumen de la memoria caché: %(cachevol_name)s. Se " "ha actualizado a %(updated_at)s y actualmente tiene %(numclones)d instancias " "de volumen." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "No se puede suprimir el volumen de la memoria caché: %(cachevol_name)s. Se " "ha actualizado a %(updated_at)s y actualmente tiene %(numclones)s instancias " "de volumen." msgid "Cannot delete encryption specs. Volume type in use." msgstr "" "No se pueden suprimir especificaciones de cifrado. El tipo de volumen se " "está utilizando." msgid "Cannot determine storage pool settings." msgstr "" "No se puede determinar la configuración de la agrupación de almacenamiento." msgid "Cannot execute /sbin/mount.sofs" msgstr "No se puede ejecutar /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "No se puede encontrar el grupo CG %s." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "No se puede encontrar el servicio de configuración de controlador para el " "sistema de almacenamiento %(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "No se puede encontrar el servicio de réplica para crear el volumen para la " "instantánea %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "No se encuentra el servicio de réplica para suprimir la instantánea %s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "No se ha podido encontrar el servicio de réplica en el sistema %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "No se encuentra el volumen: %(id)s. No se puede gestionar la operación. " "Saliendo..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" "No se puede encontrar el volumen: %(volumename)s. Ampliar operación. " "Saliendo...." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "" "No se puede encontrar el número de dispositivo para el volumen " "%(volumeName)s." msgid "Cannot find migration task." msgstr "No se puede encontrar la tarea de migración." #, python-format msgid "Cannot find replication service on system %s." msgstr "No se ha podido encontrar el servicio de réplica en el sistema %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "No se encuentra la instancia de grupo de consistencia de origen. " "consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "No se puede obtener mcs_id por el ID de canal: %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "" "No se puede obtener la información del sistema de almacenamiento o " "agrupación necesaria" #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "No se puede obtener o crear un grupo de almacenamiento: %(sgGroupName)s para " "el volumen %(volumeName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "No se puede obtener o crear el grupo de iniciadores: %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "No se puede obtener el grupo de puertos: %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "No se puede obtener el grupo de almacenamiento: %(sgGroupName)s de la vista " "de máscara %(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "No se puede obtener el rango de tamaño soportado para %(sps)s Código de " "retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "No se puede obtener el grupo de almacenamiento predeterminado para la " "política FAST: %(fastPolicyName)s." msgid "Cannot get the portgroup from the masking view." msgstr "No se puede obtener el grupo de puertos de la vista de máscara." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores" msgid "Cannot ping DRBDmanage backend" msgstr "No se puede realizar ping en el programa de fondo DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "No se puede colocar el volumen %(id)s en %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "No se puede proporcionar 'cgsnapshot_id' y 'source_cgid' para crear el grupo " "de consistencia %(name)s desde el origen." msgid "Cannot register resource" msgstr "No se puede registrar el recurso" msgid "Cannot register resources" msgstr "No se puede registrar los recursos" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "No se puede eliminar el volumen %(volume_id)s del grupo de consistencia " "%(group_id)s porque no está en el grupo." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "No se puede eliminar el volumen %(volume_id)s del grupo de consistencia " "%(group_id)s porque el volumen está en un estado no válido: %(status)s. Los " "estados válidos son: %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "No se puede reescribir de HPE3PARDriver a %s." msgid "Cannot retype from one 3PAR array to another." msgstr "No se puede volver a escribir de una matriz de 3PAR a otra." msgid "Cannot retype to a CPG in a different domain." msgstr "No se puede volver a escribir en un CPG en un dominio diferente." msgid "Cannot retype to a snap CPG in a different domain." msgstr "" "No se puede volver a escribir en un CPG de instantánea en un dominio " "diferente." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "No se puede ejecutar el mandato vgc-cluster, asegúrese de que el software se " "haya instalado y que los permisos se hayan configurado correctamente." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "No se pueden especificar ambos, hitachi_serial_number e hitachi_unit_name." msgid "Cannot specify both protection domain name and protection domain id." msgstr "" "No se puede especificar el nombre de dominio de protección y el ID de " "dominio de protección." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "No se puede especificar el nombre de agrupación de almacenamiento y el ID de " "agrupación de almacenamiento." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "No se puede actualizar el grupo de consistencia %(group_id)s porque no se " "han proporcionado nombre, descripción, add_volumes o remove_volumes válidos." msgid "Cannot update encryption specs. Volume type in use." msgstr "" "No se pueden actualizar especificaciones de cifrado. El tipo de volumen se " "está utilizando." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "No se puede actualizar volume_type (tipo de volumen): %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "No se puede verificar la existencia del objeto: %(instanceName)s." msgid "Cascade option is not supported." msgstr "No se admite la opción de cascada." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "No se ha podido encontrar el CgSnapshot %(cgsnapshot_id)s." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost está vacío. No se creará ningún grupo de consistencia." msgid "Cgsnapshot status must be available or error" msgstr "El estado de cgsnapshot debe ser disponible o error" msgid "Change hostlun id error." msgstr "Error al cambiar el ID de hostlun." msgid "Change lun priority error." msgstr "Error al cambiar la prioridad de lun." msgid "Change lun smarttier policy error." msgstr "Error al cambiar la política smarttier de lun." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "El cambio produciría un uso inferior a 0 para los recursos siguientes: " "%(unders)s." msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Compruebe los permisos de acceso para la unidad compartida ZFS asignada a " "este controlador." msgid "Check hostgroup associate error." msgstr "Error al comprobar la asociación del grupo de host." msgid "Check initiator added to array error." msgstr "Error al comprobar el iniciador añadido a la matriz." msgid "Check initiator associated to host error." msgstr "Error al comprobar el iniciador asociado con el host." msgid "Check lungroup associate error." msgstr "Error al comprobar la asociación del grupo de LUN." msgid "Check portgroup associate error." msgstr "Error al comprobar la asociación del grupo de puertos." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Compruebe el estado del servicio HTTP. Asegúrese también de que el número de " "puerto HTTPS es el mismo que el que se ha especificado en cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "El tamaño de trozo no es múltiplo del tamaño de bloque para la creación de " "hash." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "" "Error de CLI de distribución en zonas de canal de fibra de Cisco: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "La función Clonar no tiene licencia en %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "El tipo de clon '%(clone_type)s' no es válido; los valores válidos son: " "'%(full_clone)s' y '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "El clúster no se ha formateado. Debe realizar probablemente \"dog cluster " "format\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Error de controlador Coho Data Cinder: %(message)s" msgid "Coho rpc port is not configured" msgstr "No se ha configurado el puerto RPC de Coho" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Mandato %(cmd)s bloqueado en la CLI que se ha cancelado" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: %s tiempo de espera" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" "CommandLineHelper._wait_for_condition: %s ha agotado el tiempo de espera." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "El habilitador de compresión no está instalado. No se puede crear un volumen " "comprimido." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Clúster de cálculo: no se ha encontrado %(cluster)s." msgid "Condition has no field." msgstr "La condición no tiene ningún campo." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "La configuración 'max_over_subscription_ratio' no es válida. Debe ser > 0: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Error de configuración: dell_sc_ssn no está establecido." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "El archivo de configuración %(configurationFile)s no existe." msgid "Configuration is not found." msgstr "No se ha encontrado la configuración." #, python-format msgid "Configuration value %s is not set." msgstr "No se ha establecido el valor de configuración %s." msgid "Configured host type is not supported." msgstr "No hay soporte para el tipo de host configurado." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Especificaciones QoS en conflicto en el tipo de volumen %s: cuando la " "especificación QoS se asocia al tipo de volumen, no se permite el valor " "heredado \"netapp:qos_policy_group\" en las especificaciones adicionales del " "tipo de volumen." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Conexión a glance falló: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "La conexión a swift ha fallado: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "El conector no proporciona: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "El conector no dispone de la información necesaria: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "El grupo de consistencia %s contiene todavía volúmenes. Se requiere el " "distintivo de fuerza para eliminarlo." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "El grupo de consistencia %s tiene todavía cgsnapshots dependientes." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "El grupo de consistencia está vacío. No se creará ningún cgsnapshot." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "El estado del grupo de consistencia debe ser disponible o error, pero el " "estado actual es:%s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "El estado del grupo de consistencia debe estar disponible, pero el estado " "actual es: %s" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "No se ha encontrado el ConsistencyGroup %(consistencygroup_id)s." msgid "Container" msgstr "Contenedor" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Formato de contenedor: el controlador VMDK no da soporte a %s, sólo 'bare' " "se admite." msgid "Container size smaller than required file size." msgstr "Tamaño de contenedor menor que tamaño de archivo necesario." msgid "Content type not supported." msgstr "Tipo de contenido no soportado." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" "El servicio de configuración de controlador no se ha encontrado en " "%(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "No se ha podido resolver el IP de controlador '%(host)s': %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Se ha convertido a %(f1)s, pero ahora el formato es %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" "Se ha convertido a %(vol_format)s, pero ahora el formato es %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertido a sin formato, pero el formato es ahora %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Se ha convertido a sin formato, pero el formato es ahora %s." msgid "Coordinator uninitialized." msgstr "Coordinador desinicializado." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "La tarea de copia de volumen ha fallado: convert_to_base_volume: id=%(id)s, " "estado=%(status)s." #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "La tarea Copiar volumen ha fallado: create_cloned_volume id=%(id)s, status=" "%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Copiando metadatos de %(src_type)s %(src_id)s a %(vol_id)s." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "No se ha podido determinar qué punto final Keystone debe utilizarse. Puede " "establecerse en el catálogo de servicio o con la opción de configuración " "cinder.conf 'backup_swift_url'." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "No se ha podido determinar qué punto final Swift debe utilizarse. Puede " "establecerse en el catálogo de servicio o con la opción de configuración " "cinder.conf 'backup_swift_url'." msgid "Could not find DISCO wsdl file." msgstr "No se ha podido encontrar el archivo wsdl de DISCO." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "No se ha podido encontrar el id de clúster GPFS: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "" "No se ha podido encontrar el dispositivo de sistema de archivos GPFS: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "No se ha podido encontrar un host para el volumen %(volume_id)s con el tipo " "%(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "No se ha podido encontrar configuración en %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "No se ha podido encontrar la exportación iSCSI para el volumen " "%(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "No se ha podido encontrar la exportación iSCSI para el volumen %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "No se ha encontrado el destino iSCSI del volumen: %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "No se ha podido encontrar la clave en la salida del mandato %(cmd)s: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "No se ha podido encontrar el parámetro %(param)s" #, python-format msgid "Could not find target %s" msgstr "No se ha podido encontrar el destino %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" "No se ha podido encontrar el volumen padre de la instantánea '%s' en la " "matriz." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "No se ha podido encontrar la instantánea exclusiva %(snap)s en el volumen " "%(vol)s." msgid "Could not get system name." msgstr "No se ha podido obtener el nombre del sistema." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s " #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "No se puede leer %s. Re-ejecutando con sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "No se ha podido leer la información de la instantánea %(name)s. Código: " "%(code)s. Motivo: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" "No se ha podido restaurar el archivo de configuración %(file_path)s: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "No se ha podido guardar la configuración en %(file_path)s: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "No se ha podido iniciar la instantánea del grupo de consistencia %s." #, python-format msgid "Counter %s not found" msgstr "No se ha encontrado el contador %s " msgid "Create QoS policy error." msgstr "Error al crear la política QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "La creación de copia de seguridad ha terminado anormalmente, se esperaba el " "estado de copia de seguridad %(expected_status)s pero se ha obtenido " "%(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "La creación de copia de seguridad ha terminado anormalmente, se esperaba el " "estado de volumen %(expected_status)s pero se ha obtenido %(actual_status)s." msgid "Create consistency group failed." msgstr "Ha fallado la operación de crear un grupo de consistencia." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "No se admite la creación de volúmenes cifrados con el tipo %(type)s de la " "imagen %(image)s ." msgid "Create export for volume failed." msgstr "Error al crear la exportación de volumen." msgid "Create hostgroup error." msgstr "Error al crear el grupo de host." #, python-format msgid "Create hypermetro error. %s." msgstr "Error al crear el hypermetro. %s." msgid "Create lun error." msgstr "Error al crear el LUN." msgid "Create lun migration error." msgstr "Error al crear la migración de lun." msgid "Create luncopy error." msgstr "Error de crear luncopy." msgid "Create lungroup error." msgstr "Error al crear el grupo de LUN." msgid "Create manager volume flow failed." msgstr "Error al crear un flujo de volumen de gestor." msgid "Create port group error." msgstr "Error al crear el grupo de puertos." msgid "Create replication error." msgstr "Error al crear la replicación." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "Crear par de replicación ha fallado. Error: %s." msgid "Create snapshot error." msgstr "Error crear instantánea." #, python-format msgid "Create volume error. Because %s." msgstr "Error al crear volumen. Razón: %s." msgid "Create volume failed." msgstr "La creación de volumen ha fallado." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "Actualmente no se da soporte a crear un grupo de consistencia desde un " "origen." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Han fallado la creación y activación del conjunto de zonas: (Zone set=" "%(cfg_name)s error=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Han fallado la creación y activación del conjunto de zonas: (Zone set=" "%(zoneset)s error=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "Creando usos desde %(begin_period)s hasta %(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "El host actual no forma parte del dominio HGST." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Host actual no válido para el volumen %(id)s con el tipo %(type)s, migración " "no permitida" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "El host correlacionado actualmente para el volumen %(vol)s está en un grupo " "de hosts no admitido con %(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "OBSOLETO: Despliegue v1 de la API de Cinder." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "EN DESUSO: Despliegue la v2 de la API de Cinder." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "Erro de controlador DRBDmanage: no se esperaba la clave \"%s\"en la " "respuesta, ¿se trata de una versión de DRBDmanage incorrecta?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Error de configuración de controlador DRBDmanage: algunas bibliotecas " "obligatorias (dbus, drbdmanage.*) no encontradas." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage esperaba un recurso (\"%(res)s\"), ha obtenido %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "Tiempo de espera excedido para DRBDmanage esperando el nuevo volumen después " "de restaurar la instantánea; recurso \"%(res)s\", volumen \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "Tiempo de espera excedido para DRBDmanage esperando la creación de la " "instantánea; recurso \"%(res)s\", instantánea \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "Tiempo de espera excedido para DRBDmanage esperando la creación del volumen; " "recurso \"%(res)s\", volumen \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "Tiempo de espera excedido para DRBDmanage esperando el tamaño del volumen; " "ID de volumen \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "No se ha podido determinar la versión de API de ONTAP de datos." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "Los datos ONTAP que funcionan en 7-Mode no dan soporte a los grupos de " "política QoS." msgid "Database schema downgrade is not allowed." msgstr "No se permite degradar el esquema de base de datos." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "" "El conjunto de datos %s no está compartido en la aplicación Nexenta Store" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "No se ha encontrado el grupo de conjuntos de datos %s en Nexenta SA" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup es un tipo de aprovisionamiento válido pero requiere que la versión de " "WSAPI '%(dedup_version)s' versión '%(version)s' esté instalada." msgid "Dedup luns cannot be extended" msgstr "No se pueden ampliar los LUN dedup" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "El habilitador de deduplicación no está instalado. No se puede crear un " "volumen deduplicado." msgid "Default pool name if unspecified." msgstr "Nombre de agrupación predeterminado si no se especifica." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "La cuota predeterminada para el recurso: %(res)s se establece por medio del " "indicador de cuota predeterminada: quota_%(res)s, ahora se ha desaprobado. " "Use la clase de cuota predeterminada para la cuota predeterminada." msgid "Default volume type can not be found." msgstr "No se ha podido encontrar el tipo de volumen predeterminado." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Define el conjunto de agrupaciones expuestas y sus series de consulta de " "programa de fondo asociadas" msgid "Delete LUNcopy error." msgstr "Error al suprimir LUNcopy." msgid "Delete QoS policy error." msgstr "Error al suprimir la política QoS." msgid "Delete associated lun from lungroup error." msgstr "Error al suprimir el LUN asociado del grupo de LUN." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "La supresión de la copia de seguridad ha terminado anormalmente, el servicio " "de copia de seguridad configurado actualmente [%(configured_service)s] no es " "el servicio de copia de seguridad que se usó para crear esta copia de " "seguridad [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "Ha fallado la supresión del grupo de consistencia." msgid "Delete hostgroup error." msgstr "Error al suprimir el grupo de host." msgid "Delete hostgroup from mapping view error." msgstr "Error al suprimir el grupo de host de la vista de correlaciones." msgid "Delete lun error." msgstr "Error al suprimir lun." msgid "Delete lun migration error." msgstr "Error al suprimir la migración de lun." msgid "Delete lungroup error." msgstr "Error al suprimir el grupo de LUN." msgid "Delete lungroup from mapping view error." msgstr "Error al suprimir el grupo de LUN de la vista de correlaciones." msgid "Delete mapping view error." msgstr "Error al suprimir la vista de correlaciones." msgid "Delete port group error." msgstr "Error al suprimir el grupo de puertos." msgid "Delete portgroup from mapping view error." msgstr "Error al suprimir el grupo de puertos de la vista de correlaciones." msgid "Delete snapshot error." msgstr "Error al suprimir una instantánea." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "La supresión de instantánea del volumen no se soporta en estado: %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup ha terminado anormalmente, se esperaba el estado de copia de " "seguridad %(expected_status)s pero se ha obtenido %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Suprimiendo el volumen de la base de datos y omitiendo rpc." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Ha fallado la supresión de zonas: (command=%(cmd)s error=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Dell API 2.1 o superior necesario para soporte del grupo de consistencia." msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "No se da soporte a la replicación de errores de configuración del " "controlador Cinder de Dell con conexión directa." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Error de configuración del controlador Cinder de Dell, no se ha encontrado " "el dispositivo de replicación (replication_device) %s" msgid "Deploy v3 of the Cinder API." msgstr "Despliegue la v3 de la API de Cinder." msgid "Describe-resource is admin only functionality" msgstr "El recurso de descripción es funcionalidad sólo de administrador" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "El destino tiene migration_status %(stat)s, esperado %(exp)s." msgid "Destination host must be different than the current host." msgstr "El host de destino debe ser diferente del host actual." msgid "Destination volume not mid-migration." msgstr "El volumen de destino no mid-migration." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Error al desconectar el volumen: más de una conexión, pero ningún " "attachment_id proporcionado." msgid "Detach volume from instance and then try again." msgstr "Desconecte el volumen de la instancia y vuelva a intentarlo." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Se ha detectado más de un volumen con el nombre %(vol_name)s" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "No se ha encontrado la columna esperada en %(fun)s: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "No se ha encontrado la clave esperada %(key)s en %(fun)s: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "Disabled reason contiene caracteres inválidos o es demasiado larga." #, python-format msgid "Domain with name %s wasn't found." msgstr "No se ha encontrado el dominio con el nombre %s." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Downlevel GPFS Cluster detectado. La característica GPFS Clone no está " "habilitada en nivel cluster daemon %(cur)s - debe estar al menos en nivel " "%(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "El controlador no ha podido inicializar la conexión (error: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "El controlador no puede realizar la reescritura porque el volumen (LUN {}) " "tiene una instantánea que está prohibido migrar." msgid "Driver must implement initialize_connection" msgstr "El controlador debe implementar initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "El controlador ha decodificado correctamente los datos de la copia de " "seguridad importados, pero faltan campos (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "La API de proxy E-series versión %(current_version)s no da soporte a todo el " "conjunto de especificaciones adicionales SSC. La versión de proxy debe ser " "como mínimo %(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Excepción de CLI del controlador de EMC VNX Cinder: %(cmd)s (Código de " "retorno: %(rc)s) (Salida: %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "Excepción SPUnavailableException del controlador de EMC VNX Cinder: %(cmd)s " "(Código de retorno: %(rc)s) (Salida: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword deben tener valores " "válidos." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "Debe proporcionar 'cgsnapshot_id' o 'source_cgid' para crear el grupo de " "consistencia %(name)s del origen." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "El SLO: %(slo)s o la carga de trabajo %(workload)s no son válidos. Examine " "sentencias de error anteriores para valores válidos." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Se necesita hitachi_serial_number o hitachi_unit_name." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "" "El servicio de composición de elementos no se ha encontrado en " "%(storageSystemName)s" msgid "Enables QoS." msgstr "Habilita la calidad de servicio." msgid "Enables compression." msgstr "Habilita la compresión." msgid "Enables replication." msgstr "Habilita la réplica." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Asegúrese de que configfs está montado en /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al añadir el iniciador: %(initiator)s en groupInitiatorGroup: " "%(initiatorgroup)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s ." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Error al añadir TargetGroup: %(targetgroup)s con IQN: %(iqn)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Error al conectar el volumen %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Error al clonar la instantánea: %(snapshot)s en el volumen: %(lun)s de la " "agrupación: %(pool)s Proyecto: %(project)s Clonar proyecto: %(clone_proj)s " "Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Error al crear el volumen clonado: %(cloneName)s. Código de retorno: " "%(rc)lu. Error: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al crear el volumen clonado: Volumen: %(cloneName)s Origen Volumen: " "%(sourceName)s. Código de retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al crear el grupo: %(groupName)s. Código de retorno: %(rc)lu. Error: " "%(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Error al crear la vista de máscara: %(groupName)s. Código de retorno: " "%(rc)lu. Error: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al crear el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al crear el volumen: %(volumename)s. Código de retorno: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Error CreateGroupReplica: origen: %(source)s destino: %(target)s. Código de " "retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al crear el iniciador: %(initiator)s en el Alias: %(alias)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al crear el proyecto: %(project)s en la agrupación: %(pool)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al crear la propiedad: %(property)s Tipo: %(type)s Descripción: " "%(description)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s ." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al crear la unidad compartida: %(name)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al crear la instantánea: %(snapshot)s en el volumen: %(lun)s en la " "agrupación: %(pool)s Proyecto: %(project)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al crear la instantánea: %(snapshot)s de la unidad compartida: " "%(share)s en la agrupación: %(pool)s Proyecto: %(project)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Error al crear el destino: %(alias)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al crear TargetGroup: %(targetgroup)s con IQN: %(iqn)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Error al crear el volumen: %(lun)s Tamaño: %(size)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al crear el nuevo código de retorno de volumen compuesto: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al crear la acción de réplica en: agrupación: %(pool)s Proyecto: " "%(proj)s volumen: %(vol)s para destino: %(tgt)s y agrupación: %(tgt_pool)s " "código de retorno: %(ret.status)d Mensaje: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "Error al crear un volumen desenlazado en una operación de ampliación" msgid "Error Creating unbound volume." msgstr "Error al crear el volumen desenlazado." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al suprimir el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Error al suprimir el grupo: %(storageGroupName)s. Código de retorno: " "%(rc)lu. Error: %(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Error al suprimir el grupo de iniciadores: %(initiatorGroupName)s. Código de " "retorno: %(rc)lu. Error: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al suprimir la instantánea: %(snapshot)s en la unidad compartida: " "%(share)s en la agrupación: %(pool)s Proyecto: %(project)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al suprimir la instantánea: %(snapshot)s en el volumen: %(lun)s en la " "agrupación: %(pool)s Proyecto: %(project)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Error al suprimir el volumen: %(lun)s de la agrupación: %(pool)s, Proyecto: " "%(project)s. Código de retorno: %(ret.status)d, Mensaje: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Error al suprimir el proyecto: %(project)s enla agrupación: %(pool)s Código " "de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Error al suprimir la acción de réplica: %(id)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al extender el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al obtener los iniciadores: InitiatorGroup: %(initiatorgroup)s Código " "de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Error al obtener las estadísticas de la agrupación: Agrupación: %(pool)s " "Código de retorno: %(status)d Mensaje: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al obtener el estado del proyecto: agrupación: %(pool)s Proyecto: " "%(project)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al obtener la unidad compartida: %(share)s en la agrupación: %(pool)s " "Proyecto: %(project)s Código de retorno: %(ret.status)d Mensaje: " "%(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al obtener la instantánea: %(snapshot)s en el volumen: %(lun)s en la " "agrupación: %(pool)s Proyecto: %(project)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Error al obtener el destino: %(alias)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al obtener el volumen: %(lun)s en la agrupación: %(pool)s Proyecto: " "%(project)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Error al migrar el volumen de una agrupación a otra. Código de retorno: " "%(rc)lu. Error: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Error al modificar la vista de máscara: %(groupName)s. Código de retorno: " "%(rc)lu. Error: %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "Error en la propiedad de la agrupación: La agrupación %(pool)s no es " "propiedad de %(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al establecer las propiedades. Propiedades: %(props)s en el volumen: " "%(lun)s de la agrupación: %(pool)s Proyecto: %(project)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al terminar la sesión de migración. Código de retorno: %(rc)lu. " "Error: %(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al verificar el iniciador: %(iqn)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al verificar la agrupación: %(pool)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Error al verificar el proyecto: %(project)s en la agrupación: %(pool)s " "Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al verificar el servicio: %(service)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al verificar el destino: %(alias)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Error al verificar la unidad compartida: %(share)s en el Proyecto: " "%(project)s y la Agrupación: %(pool)s Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s ." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Error al añadir el volumen: %(volumeName)s con vía de acceso de instancia: " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Error al añadir el iniciador al grupo: %(groupName)s. Código de retorno: " "%(rc)lu. Error: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "" "Error al añadir el volumen al volumen compuesto. El error es: %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "Error al añadir el volumen %(volumename)s al volumen base de destino" #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Error al asociar el grupo de almacenamiento: %(storageGroupName)s con la " "política fast: %(fastPolicyName)s con la descripción de error: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "Error al conectar el volumen %s. Podría alcanzarse el límite de destino." #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Error al interrumpir la relación de clonación: Nombre de sincronización: " "%(syncName)s Código de retorno: %(rc)lu. Error: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Error al conectarse con un clúster ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Error al conectar mediante ssh: %s" #, python-format msgid "Error creating volume: %s." msgstr "Error al crear el volumen: %s." msgid "Error deleting replay profile." msgstr "Error al suprimir el perfil de reproducción." #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Error al suprimir el volumen %(ssn)s: %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Error al suprimir el volumen %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Error durante el análisis de evaluador: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Error al editar la unidad compartida: %(share)s en la agrupación: %(pool)s " "Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Error al habilitar iSER para NetworkPortal: asegúrese de que hay soporte " "para RDMA en el puerto iSCSI %(port)d en la IP %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "" "Se ha encontrado un error durante la limpieza de una conexión anómala: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Error al ejecutar la API de CloudByte [%(cmd)s], Error: %(err)s." msgid "Error executing EQL command" msgstr "Error al ejecutar el mandato EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Error al ejecutar mandato mediante ssh: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Error al ampliar el volumen %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Error al extender volumen: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Error al buscar %(name)s" #, python-format msgid "Error finding %s." msgstr "Error al buscar %s" #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al configurar ReplicationSettingData. Código de retorno: %(rc)lu. " "Error: %(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Error al obtener los detalles de la versión de dispositivo. Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "" "Error al obtener el ID de dominio a partir del nombre %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "" "Error al obtener el ID de dominio a partir del nombre %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Error al obtener los grupos de iniciador." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "" "Error al obtener el ID de agrupación a partir del nombre %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "" "Error al obtener el ID de agrupación a partir del nombre %(pool_name)s: " "%(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Error al obtener la acción de réplica: %(id)s. Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Error al obtener los detalles de origen de réplica. Código de retorno: %(ret." "status)d Mensaje: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Error al obtener los detalles de destino de réplica. Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al obtener la versión: svc: %(svc)s. Código de retorno: %(ret.status)d " "Mensaje: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Error en la operación [%(operation)s] para el volumen [%(cb_volume)s] en el " "almacenamiento de CloudByte: [%(cb_error)s], código de error: " "[%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Error en respuesta de API SolidFire: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "Error en la creación de espacio para %(space)s de tamaño %(size)d GB" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "Error en space-extend para el volumen %(space)s con %(size)d GB adicionales" #, python-format msgid "Error managing volume: %s." msgstr "Error al gestionar el volumen: %s." #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Error al correlacionar el volumen %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Error al modificar la sincronización de réplica: %(sv)s operación: " "%(operation)s. Código de retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Error al modificar el servicio: %(service)s Código de retorno: " "%(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al mover el volumen: %(vol)s del proyecto de origen: %(src)s al " "proyecto de destino: %(tgt)s Código de retorno: %(ret.status)d Mensaje: " "%(ret.data)s ." msgid "Error not a KeyError." msgstr "El error no es un KeyError." msgid "Error not a TypeError." msgstr "El error no es un TypeError." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Se producido un error al crear el cgsnapshot %s." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Se producido un error al suprimir el cgsnapshot %s." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "Se ha producido un error al actualizar el grupo de consistencia %s." #, python-format msgid "Error parsing config file: %s" msgstr "Error al analizar el archivo de configuración: %s" msgid "Error promoting secondary volume to primary" msgstr "Error al promocionar el volumen secundario al primario" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Error al eliminar el volumen %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Error al cambiar el nombre del volumen %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Respuesta de tipo error: %s" msgid "Error retrieving volume size" msgstr "Error al recuperar el tamaño de volumen" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al enviar una actualización de réplica para el ID de acción: %(id)s. " "Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Error al enviar la actualización de réplica. Error devuelto: %(err)s. " "Acción: %(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al establecer la herencia de réplica en %(set)s para el volumen: " "%(vol)s del proyecto %(project)s Código de retorno: %(ret.status)d Mensaje: " "%(ret.data)s ." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Error al cortar el paquete: %(package)s del origen: %(src)s Código de " "retorno: %(ret.status)d Mensaje: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "Error al desenlazar el volumen %(vol)s de la agrupación. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Error al verificar el tamaño de clon en el clon de volumen: %(clone)s " "Tamaño: %(size)d onSnapshot: %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Error durante la autenticación con el conmutador: %s." #, python-format msgid "Error while changing VF context %s." msgstr "Error al cambiar el contexto de la VF %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Error al comprobar la versión de firmware %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Error al comprobar estado de transacción: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "Error al comprobar si la VF está disponible para gestión %s." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Error al conectar el conmutador %(switch_id)s con el protocolo %(protocol)s. " "Error: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Error al crear el token de autenticación: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "Error al crear la instantánea [estado] %(stat)s - [resultado] %(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "Error al crear el volumen [estado] %(stat)s - [resultado] %(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Error al suprimir la instantánea [estado] %(stat)s - [resultado] %(res)s." #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "Error al suprimir el volumen [estado] %(stat)s - [resultado] %(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "Error al extender el volumen [estado] %(stat)s - [resultado] %(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "" "Error al obtener detalles de %(op)s, se ha devuelto el código: %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "Error al obtener datos mediante ssh: (command=%(cmd)s error=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Error al obtener la información de Disco [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Error al obtener el valor nvp: %s." #, python-format msgid "Error while getting session information %s." msgstr "Error al obtener la información de sesión %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Error al analizar los datos: %s" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "Error al consultar a la página %(url)s sobre el conmutador, motivo%(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Error al eliminar las zonas y cfg de la cadena de zonas: %(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Error al solicitar la API de %(service)s." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "Error al ejecutar CLI de distribución en zonas: (command=%(cmd)s error=" "%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Error al actualizar las nuevas zonas y cfg en la cadena de zonas. Error " "%(description)s." msgid "Error writing field to database" msgstr "Error al escribir el campo en la base de datos" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Error[%(stat)s - %(res)s] al obtener el ID del volumen." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Error[%(stat)s - %(res)s] al restaurar la instantánea [%(snap_id)s] en el " "volumen [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "Error[estado] %(stat)s - [resultado] %(res)s] al obtener el ID del volumen." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Se ha superado el máximo de intentos de planificación %(max_attempts)d para " "el volumen %(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Se ha superado el límite de instantáneas por volumen" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Excepción al añadir el volumen meta al volumen de destino %(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Exception durante la creación de réplica de elemento. Nombre de clonación: " "%(cloneName)s Nombre de origen : %(sourceName)s Especificaciones " "adicionales: %(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Excepción en _select_ds_for_volume: %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "Exception al formar la cadena de zonas: %s." #, python-format msgid "Exception: %s" msgstr "Excepción: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Se esperaba exactamente un volumen denominado \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Se esperaba un entero para node_count, svcinfo lsiogrp ha devuelto: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "No se esperaba ninguna salida del mandato CLI %(cmd)s, se ha obtenido " "%(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Se esperaba que se devolviera un disco virtual único de lsvdisk al filtrar " "en vdisk_UID. Se han devuelto %(count)s." #, python-format msgid "Expected volume size was %d" msgstr "El tamaño de volumen esperado era %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "La exportación de copia de seguridad ha terminado anormalmente, se esperaba " "el estado de copia de seguridad %(expected_status)s pero se ha obtenido " "%(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "La exportación de registro ha terminado anormalmente, el servicio de copia " "de seguridad configurado actualmente [%(configured_service)s] no es el " "servicio de copia de seguridad que se usó para crear esta copia de seguridad " "[%(backup_service)s]." msgid "Extend volume error." msgstr "Error al ampliar el volumen." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Ampliar el volumen solo es compatible para este controlador cuando no " "existen instantáneas." msgid "Extend volume not implemented" msgstr "Ampliar el volumen no se ha implementado" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "El habilitador de FAST VP no está instalado. No se puede establecer la " "política de capas para el volumen" msgid "FAST is not supported on this array." msgstr "FAST no se admite en esta matriz." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC es el protocolo pero OpenStack no proporciona wwpns." #, python-format msgid "Faield to unassign %(volume)s" msgstr "No se ha podido desasignar %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "" "No se ha podido crear el volumen de la memoria caché %(volume)s. Error: " "%(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "No se ha podido añadir conexión para tejido=%(fabric)s: Error:%(err)s" msgid "Failed cgsnapshot" msgstr "Ha fallado cgsnapshot" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "No se ha podido crear la instantánea del grupo: %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "No se ha podido crear la instantánea del volumen %(volname)s: %(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "No se ha podido obtener el conjunto de zonas activas del tejido %s." #, python-format msgid "Failed getting details for pool %s." msgstr "No se han podido obtener detalles para la agrupación %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "No se ha podido eliminar conexión para tejido=%(fabric)s: Error:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "No se ha podido ampliar el volumen %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "No se ha podido iniciar la sesión en 3PAR (%(url)s) debido a %(err)s" msgid "Failed to access active zoning configuration." msgstr "" "No se ha podido acceder a configuración de distribución en zonas activa." #, python-format msgid "Failed to access zoneset status:%s" msgstr "No se ha podido acceder al estado de zoneset:%s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "No se ha podido obtener un bloqueo de recurso. (serie: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "" "No se ha podido añadir %(vol)s a %(sg)s después de %(retries)s intentos." msgid "Failed to add the logical device." msgstr "No se ha podido añadir el dispositivo lógico." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "No se ha podido añadir el volumen %(volumeName)s al grupo de consistencia " "%(cgName)s. Código de retorno: %(rc)lu. Error: %(error)s." msgid "Failed to add zoning configuration." msgstr "No se ha podido añadir configuración de distribución en zonas." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Error al asignar el IQN del iniciador iSCSI. (puerto: %(port)s, razón: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Error al asociar qos_specs: %(specs_id)s con el tipo %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" "Se ha encontrado un error en al conectar el destino iSCSI para el volumen " "%(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "" "No se ha podido realizar la copia de seguridad de los metadatos de volumen - " "%s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "No se ha podido hacer copia de seguridad de metadatos de volumen - Objeto de " "copia de seguridad de metadatos 'backup.%s.meta' ya existe" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "No se ha podido clonar el volumen de la instantánea %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "No se ha podido conectar a %(vendor_name)s Matriz %(host)s: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "No se ha podido conectar con la API REST de Dell" msgid "Failed to connect to array" msgstr "No se ha podido conectar a la matriz" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "No se ha podido conectar al daemon sheep. Dirección: %(addr)s, puerto: " "%(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Copiar la imagen al volumen ha fallado: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "No se ha podido copiar los metadatos a volumen: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Error al copiar el volumen, dispositivo de destino no disponible." msgid "Failed to copy volume, source device unavailable." msgstr "Error al copiar el volumen, dispositivo de origen no disponible." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "No se ha podido crear el grupo de consistencia %(cgName)s desde la " "instantánea %(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "No se ha podido crear IG, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "No se ha podido crear SolidFire Image-Volume" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "No se ha podido crear el grupo de volumen: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Error al crear un archivo. (archivo: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "No se ha podido crear una instantánea temporal para el volumen %s." msgid "Failed to create api volume flow." msgstr "No se ha podido crear flujo de volumen de la API." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "No se ha podido crear el cgsnapshot %(id)s debido a %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "" "No se ha podido crear el grupo de consistencia %(id)s debido a %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "No se ha podido crear el grupo de consistencia %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "No se ha podido crear el grupo de consistencia %s porque el grupo de " "consistencia VNX no puede aceptar LUN comprimidos como miembros." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "No se ha podido crear el grupo de consistencia: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "" "No se ha podido crear el grupo de consistencia: %(cgid)s. Error: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "No se ha podido crear el grupo de consistencia: %(consistencyGroupName)s " "Código de retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "No se han podido crear los ID de hardware en %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "No se ha podido crear el host: %(name)s. Compruebe si existe en la matriz." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "No se ha podido crear el grupo de host: %(name)s. Compruebe si existe en la " "matriz." msgid "Failed to create iqn." msgstr "No se ha podido crear el iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" "Se ha encontrado un error al crear el destino iscsi para el volumen " "%(volume_id)s." msgid "Failed to create manage existing flow." msgstr "No se ha podido crear la gestión del flujo existente." msgid "Failed to create manage_existing flow." msgstr "No se ha podido crear el flujo manage_existing." msgid "Failed to create map on mcs, no channel can map." msgstr "" "No se ha podido crear la correlación en mcs, ningún canal se puede " "correlacionar." msgid "Failed to create map." msgstr "No se ha podido crear la correlación." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "No se ha podido crear los metadatos para volumen: %(reason)s" msgid "Failed to create partition." msgstr "No se ha podido crear la partición." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "Error al crear qos_specs: %(name)s con especificaciones %(qos_specs)s." msgid "Failed to create replica." msgstr "No se ha podido crear la réplica." msgid "Failed to create scheduler manager volume flow" msgstr "No se ha podido crear flujo de volumen de gestor de planificador" #, python-format msgid "Failed to create snapshot %s" msgstr "No se ha podido crear la instantánea %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "" "No se ha podido crear la instantánea porque no se ha especificado ningún ID " "de LUN" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "No se ha podido crear una instantánea para cg: %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "No se ha podido crear una instantánea para el volumen %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "No se ha podido crear la política de instantáneas en el volumen %(vol)s: " "%(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "No se ha podido crear el área de recursos de la instantánea en el volumen " "%(vol)s: %(res)s." msgid "Failed to create snapshot." msgstr "No se ha podido crear la instantánea." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "No se ha podido crear la instantánea. No se ha encontrado la información del " "volumen de CloudByte para el volumen de OpenStack [%s]." #, python-format msgid "Failed to create south bound connector for %s." msgstr "No se ha podido crear la conexión en sentido sur para %s." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "No se ha podido crear el grupo de almacenamiento %(storageGroupName)s." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "" "No se ha podido crear la agrupación ligera, el mensaje de error ha sido: %s" #, python-format msgid "Failed to create volume %s" msgstr "Error al crear volumen %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "No se puede suprimir SI de volume_id: %(volume_id)s porque tiene un par." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "No se ha podido suprimir un dispositivo lógico. (LDEV: %(ldev)s, razón: " "%(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "No se ha podido suprimir el cgsnapshot %(id)s debido a %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "" "No se ha podido suprimir el grupo de consistencia %(id)s debido a %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "No se ha podido suprimir el grupo de consistencia: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "No se ha podido suprimir el grupo de consistencia: %(consistencyGroupName)s " "Código de retorno: %(rc)lu. Error: %(error)s." msgid "Failed to delete device." msgstr "No se ha podido suprimir el dispositivo." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "No se ha podido suprimir el conjunto de archivos para el grupo de " "consistencia %(cgname)s. Error: %(excmsg)s." msgid "Failed to delete iqn." msgstr "No se ha podido suprimir el iqn." msgid "Failed to delete map." msgstr "No se ha podido suprimir la correlación." msgid "Failed to delete partition." msgstr "No se ha podido suprimir la partición." msgid "Failed to delete replica." msgstr "No se ha podido suprimir la réplica." #, python-format msgid "Failed to delete snapshot %s" msgstr "No se ha podido suprimir la instantánea %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "No se ha podido suprimir una instantánea para cg: %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "No se puede suprimir la instantánea de snapshot_id: %s porque tiene un par." msgid "Failed to delete snapshot." msgstr "No se ha podido suprimir la instantánea." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "No se ha podido suprimir el volumen %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "No se ha podido suprimir el volumen de volume_id: %(volume_id)s porque tiene " "un par." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "" "Se ha encontrado un error al desconectar el destino iSCSI para el volumen " "%(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "No se ha podido determinar la configuración de API Blockbridge" msgid "Failed to disassociate qos specs." msgstr "Error al desasociar especificaciones de qos." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Error al desasociar qos_specs: %(specs_id)s con el tipo %(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "No se ha podido garantizar el área de recursos de la instantánea, no se ha " "encontrado el volumen para el ID %s" msgid "Failed to establish SSC connection." msgstr "No se ha podido establecer la conexión SSC." msgid "Failed to establish connection with Coho cluster" msgstr "No se ha podido establecer conexión con el clúster Coho." #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "No se ha podido ejecutar la API de CloudByte [%(cmd)s]. Estado HTTP: " "%(status)s, Error: %(error)s." msgid "Failed to execute common command." msgstr "No se ha podido ejecutar el mandato común." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "No se ha podido exportar para volumen: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "" "No se ha podido ampliar el volumen existente %(name)s, Mensaje de error: " "%(msg)s." msgid "Failed to find QoSnode" msgstr "No se ha podido encontrar QoSnode" msgid "Failed to find Storage Center" msgstr "No se ha podido encontrar Storage Center" msgid "Failed to find a vdisk copy in the expected pool." msgstr "" "No se ha podido encontrar una copia de vdisk en la agrupación esperada." msgid "Failed to find account for volume." msgstr "No se ha podido encontrar la cuenta para el volumen." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "No se ha podido encontrar el conjunto de archivos para la vía de acceso " "%(path)s, salida de mandato: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "No se ha podido encontrar la instantánea de grupo denominada: %s" #, python-format msgid "Failed to find host %s." msgstr "No se ha podido encontrar el host %s." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "No se ha podido encontrar el grupo de iniciadores iSCSI que contiene " "%(initiator)s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "" "No se ha podido encontrar la agrupación de almacenamiento para el volumen de " "origen %s." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "" "No se han podido obtener los detalles de cuenta de CloudByte para la cuenta " "[%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "" "Se ha encontrado un error en la obtención de los detalles de destino de LUN " "para el LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "No se han podido obtener detalles de destino de LUN para el LUN %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "No se ha podido obtener la lista de destino de LUN para el LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "No se ha podido obtener el ID de partición del volumen %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "No se ha podido obtener el ID de instantánea Raid de la instantánea " "%(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "No se ha podido obtener el ID de instantánea Raid de la instantánea " "%(snapshot_id)s." msgid "Failed to get SplitMirror." msgstr "No se ha podido obtener SplitMirror." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Error al obtener un recurso de almacenamiento. El sistema intentará obtener " "el recurso de almacenamiento otra vez. (recurso: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "Error al obtener todas las asociaciones de qos specs %s" msgid "Failed to get channel info." msgstr "No se ha podido obtener información de canal." #, python-format msgid "Failed to get code level (%s)." msgstr "No se ha podido obtener el nivel de código (%s)." msgid "Failed to get device info." msgstr "No se ha podido obtener información de dispositivo." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "No ha obtenido dominio debido a que CPG (%s) no existe en la matriz." msgid "Failed to get image snapshots." msgstr "No se han podido obtener las instantáneas de imagen." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "No se ha podido obtener la IP en el canal %(channel_id)s con el volumen: " "%(volume_id)s." msgid "Failed to get iqn info." msgstr "No se ha podido obtener información de iqn." msgid "Failed to get license info." msgstr "No se ha podido obtener información de licencia." msgid "Failed to get lv info." msgstr "No se ha podido obtener información de volumen lógico." msgid "Failed to get map info." msgstr "No se ha podido obtener información de correlación." msgid "Failed to get migration task." msgstr "No se ha podido obtener la tarea de migración." msgid "Failed to get model update from clone" msgstr "" "Se ha encontrado un error en la obtención de la actualización del modelo " "desde el clon" msgid "Failed to get name server info." msgstr "No se ha podido obtener información de servidor de nombres." msgid "Failed to get network info." msgstr "No se ha podido obtener información de red." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "" "No se ha podido obtener el nuevo ID de part en la nueva agrupación: " "%(pool_id)s." msgid "Failed to get partition info." msgstr "No se ha podido obtener información de partición." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "" "No se ha podido obtenr el ID de agrupación con el volumen %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "No se ha podido obtener la información de la copia remota para %(volume)s " "debido a %(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "No se ha podido obtener la información de la copia remota para %(volume)s. " "Excepción: %(err)s." msgid "Failed to get replica info." msgstr "No se ha podido obtener información de réplica." msgid "Failed to get show fcns database info." msgstr "No se ha podido obtener información de base de datos fcns." msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "No se ha podido obtener el tamaño del volumen existente: %(vol). Ha fallafo " "la acción de gestionar volumen." #, python-format msgid "Failed to get size of volume %s" msgstr "No se ha podido obtener el tamaño del volumen %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "No se ha podido obtener una instantánea para el volumen %s." msgid "Failed to get snapshot info." msgstr "No se ha podido obtener información de instantánea." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "" "Se ha encontrado un error en la obtención del IQN de destino para el LUN %s" msgid "Failed to get target LUN of SplitMirror." msgstr "No se ha podido obtener el LUN de destino de SplitMirror." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "" "Se ha encontrado un error en la obtención del portal de destino para el LUN " "%s" msgid "Failed to get targets" msgstr "No se han podido obtener los destinos" msgid "Failed to get wwn info." msgstr "No se ha podido obtener información de wwn." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "No se ha podido obtener, crear o añadir el volumen %(volumeName)s a la vista " "de máscara %(maskingViewName)s. El mensaje de error recibido ha sido " "%(errorMessage)s." msgid "Failed to identify volume backend." msgstr "No se ha podido identificar el programa de fondo de volumen." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "No se ha podido enlazar el conjunto de archivos para el %(cgname)s " "compartido. Error: %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "" "No se ha podido iniciar sesión en %s Matriz (¿inicio de sesión no válido?)." #, python-format msgid "Failed to login for user %s." msgstr "Error al iniciar sesión para el usuario %s." msgid "Failed to login with all rest URLs." msgstr "No se ha podido iniciar sesión con todos los URL rest." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "No se ha podido hacer una solicitud al punto final del clúster de Datera " "debido al siguiente motivo: %s" msgid "Failed to manage api volume flow." msgstr "No se ha podido gestionar el flujo de volumen de la API." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "No se ha podido gestionar %(type)s %(name)s existentes, porque el tamaño " "reportado %(size)s no era un número de coma flotante." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "No se ha podido gestionar el volumen existente %(name)s, debido a un error " "en la obtención del tamaño del volumen." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "No se ha podido gestionar el volumen existente %(name)s, porque la operación " "de cambio de nombre ha fallado: Mensaje de error: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "No se ha podido gestionar el volumen existente %(name)s, porque el archivo " "indicado %(size)s no era un número de coma flotante." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "No se ha podido gestionar el volumen existente porque la agrupación del tipo " "de volumen seleccionado no coincide con el uso compartido NFS pasado en la " "referencia de volumen." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "No se ha podido gestionar el volumen existente porque la agrupación del tipo " "de volumen seleccionado no coincide con el sistema de archivos pasado en la " "referencia de volumen." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "No se ha podido gestionar el volumen existente porque la agrupación del tipo " "de volumen seleccionado no coincide con la agrupación del host." #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "No se ha podido gestionar el volumen existente debido a que el grupo de E/S " "no coincide. El grupo de E/S del volumen a gestionar es %(vdisk_iogrp)s. El " "grupo de E/S del tipo elegido es %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "No se ha podido gestionar el volumen existente debido a que la agrupación " "del volumen a gestionar no coincide con la agrupación del programa fondo. " "La agrupación del volumen a gestionar es %(vdisk_pool)s. La agrupación del " "programa fondo es %(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "No se ha podido gestionar el volumen existente debido a que el volumen a " "gestionar es comprimido, pero el tipo de volumen elegido es no comprimido." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "No se ha podido gestionar el volumen existente debido a que el volumen a " "gestionar es no comprimido, pero el tipo de volumen elegido es comprimido." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "No se ha podido gestionar el volumen existente debido a que el volumen a " "gestionar no es un grupo de E/S válido." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "No se ha podido gestionar el volumen existente debido a que el volumen a " "gestionar es pesado, pero el tipo de volumen elegido es ligero." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "No se ha podido gestionar el volumen existente debido a que el volumen a " "gestionar es ligero, pero el tipo de volumen elegido es pesado." #, python-format msgid "Failed to manage volume %s." msgstr "No se ha podido gestionar el volumen %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Error al correlacionar un dispositivo lógico. (LDEV: %(ldev)s, LUN: %(lun)s, " "puerto: %(port)s, id: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "No se ha podido migrar el volumen por primera vez" msgid "Failed to migrate volume for the second time." msgstr "No se ha podido migrar el volumen por segunda vez" #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "No se ha podido mover la correlación de LUN. Código de retorno: %s" #, python-format msgid "Failed to move volume %s." msgstr "No se ha podido mover el volumen %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Error al abrir un archivo. (archivo: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "No se ha podido analizar la salida de CLI:\n" " mandato: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Error al analizar la opción de configuración 'keystone_catalog_info'; debe " "tener el formato ::" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Error al analizar la opción de configuración 'swift_catalog_info'; debe " "tener el formato ::" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Error al realizar una reclamación de página cero. (LDEV: %(ldev)s, razón: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" "No se ha podido eliminar la exportación para el volumen %(volume)s: " "%(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" "Se ha encontrado un error al eliminar el destino iscsi para el volumen " "%(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "No se ha podido eliminar el volumen %(volumeName)s del grupo de consistencia " "%(cgName)s. Código de retorno: %(rc)lu. Error: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" "No se ha podido eliminar el volumen %(volumeName)s del SG predeterminado." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "No se ha podido eliminar el volumen %(volumeName)s del SG predeterminado: " "%(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "No se ha podido eliminar %(volumename)s del grupo de almacenamiento " "predeterminado para la política FAST %(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "No se ha podido renombrar el volumen lógico %(name)s, el mensaje de error " "era: %(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "" "No se ha podido recuperar la configuración de distribución en zonas activas " "%s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "No se ha podido establecer la autenticación de CHAP para el IQN de destino " "%(iqn)s. Detalles: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "No se ha podido establecer QoS para el volumen existente %(name)s, Mensaje " "de error: %(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" "No se ha podido establecer el atributo 'Incoming user' para el destino SCST." msgid "Failed to set partition." msgstr "No se ha podido establecer la partición." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "No se han podido establecer permisos para el grupo de consistencia " "%(cgname)s. Error: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "No se ha podido especificar un dispositivo lógico para el volumen " "%(volume_id)s cuya correlación se va a anular." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "No se ha podido especificar un dispositivo lógico para suprimir. (method: " "%(method)s, id: %(id)s)" msgid "Failed to terminate migrate session." msgstr "No se ha podido terminar la sesión de migración" #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "No se ha podido desenlazar el volumen %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "No se ha podido desenlazar el conjunto de archivos para el grupo de " "consistencia %(cgname)s. Error: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "No se ha podido anular correlación de un dispositivo lógico. (LDEV: " "%(ldev)s, razón: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "No se ha podido actualizar el grupo de consistencia :%(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "No se ha podido actualizar los metadatos para volumen: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "" "No se ha podido actualizar o suprimir la configuración de distribución en " "zonas" msgid "Failed to update or delete zoning configuration." msgstr "" "No se ha podido actualizar o suprimir la configuración de distribución en " "zonas." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Error al actualizar qos_specs: %(specs_id)s con especificaciones " "%(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "No se ha podido actualizar el uso de cuota al rescribir el volumen." msgid "Failed to update snapshot." msgstr "Fallo al actualizar la instantánea." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "No se ha podido actualizar el modelo con modelo proporcionado del " "controlador %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Error al actualizar metadatos de volumen %(vol_id)s con los %(src_type)s " "%(src_id)s metadatos proporcionados" #, python-format msgid "Failure creating volume %s." msgstr "Anomalía al crear el volumen %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Error al obtener la información de LUN para %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Anomalía en update_volume_key_value_pair:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Error al mover el nuevo LUN clonado a %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Error de transferencia de LUN %s a tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Error muy grave: No se permite al usuario consultar los volúmenes de NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "Fexvisor no ha podido añadir el volumen %(id)s debido a %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor no ha podido unir el volumen %(vol)s en el grupo %(group)s debido a " "%(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor no ha podido eliminar el volumen %(vol)s en el grupo %(group)s " "debido a %(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Fexvisor no ha podido eliminar el volumen %(id)s debido a %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Anomalía de búsqueda de SAN de canal de fibra: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Ha fallado la operación de zona de canal de fibra: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Anomalía de control de conexión de canal de fibra: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "No se ha podido encontrar el archivo %(file_path)s." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "El archivo %(path)s tiene un archivo de respaldo %(bfile)s no válido, " "terminando de forma anormal." #, python-format msgid "File already exists at %s." msgstr "Ya existe el archivo en %s." #, python-format msgid "File already exists at: %s" msgstr "El archivo ya existe en: %s" msgid "Find host in hostgroup error." msgstr "Error al buscar el host en el grupo de host." msgid "Find host lun id error." msgstr "Error al buscar el ID de LUN de host." msgid "Find lun group from mapping view error." msgstr "Error al buscar el grupo de LUN en la vista de correlaciones." msgid "Find lun number error." msgstr "Error al buscar el número de lun." msgid "Find mapping view error." msgstr "Error al buscar la vista de correlaciones." msgid "Find portgroup error." msgstr "Error al buscar el grupo de puertos." msgid "Find portgroup from mapping view error." msgstr "Error al buscar el grupo de puertos en la vista de correlaciones." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "La política de memoria caché de flash requiere que la versión de WSAPI " "'%(fcache_version)s' versión '%(version)s' esté instalada." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "" "La desasignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "La asignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor no ha podido encontrar la instantánea del volumen %(id)s en la " "instantánea %(vgid)s del grupo %(vgsid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" "La creación de volumen de Flexvisor ha fallado: %(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido suprimir el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor no ha podido añadir el volumen %(id)s al grupo %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor no ha podido asignar el volumen %(id)s porque no ha podido " "consultar el estado por id de suceso." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido asignar el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor no ha podido asignar al volumen %(volume)s el iqn %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido clonar el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido clonar el volumen (no ha podido obtener el suceso) " "%(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor no ha podido crear la instantánea para el volumen %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido crear la instantánea para el volumen (no ha podido " "obtener el suceso) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor no ha podido crear el volumen %(id)s en el grupo %(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor no ha podido crear el volumen %(volume)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor no ha podido crear el volumen (obtener el suceso) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor no ha podido crear el volumen a partir de la instantánea %(id)s: " "%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor no ha podido crear el volumen a partir de la instantánea %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido crear el volumen a partir de la instantánea (no ha " "podido obtener el suceso) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor no ha podido suprimir la instantánea %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido suprimir la instantánea (no ha podido obtener el " "suceso) %(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido suprimir el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido ampliar el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor no ha podido ampliar el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido ampliar el volumen (no ha podido obtener el suceso) " "%(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" "Flexvisor no ha podido obtener la información de agrupación %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor no ha podido obtener el ID de instantánea del volumen %(id)s del " "grupo %(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor no ha podido eliminar el volumen %(id)s del grupo %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor no ha podido generar el volumen a partir de la instantánea %(id)s:" "%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor no ha podido generar el volumen a partir de la instantánea (no ha " "podido obtener el suceso) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor no ha podido desasignar el volumen %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor no ha podido desasignar el volumen (obtener el suceso) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor no ha podido desasignar el volumen: %(id)s:%(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "" "Flexvisor no ha podido encontrar la información del volumen de origen %(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "La desasignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "El volumen de Flexvisor %(id)s no se ha podido unir al grupo %(vgid)s." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "La carpeta %s no existe en la aplicación Nexenta Store" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS no se está ejecutando, estado: %s." msgid "Gateway VIP is not set" msgstr "No se ha establecido el VIP de pasarela." msgid "Get FC ports by port group error." msgstr "Error al obtener los puertos FC por grupo de puertos." msgid "Get FC ports from array error." msgstr "Error al obtener los puertos FC de la matriz." msgid "Get FC target wwpn error." msgstr "Error al obtener wwpn de destino FC." msgid "Get HyperMetroPair error." msgstr "Error al obtener HyperMetroPair." msgid "Get LUN group by view error." msgstr "Error al obtener el grupo de LUN por vista." msgid "Get LUNcopy information error." msgstr "Error al obtener información de LUNcopy." msgid "Get QoS id by lun id error." msgstr "Error al obtener el ID QoS por ID de lun." msgid "Get QoS information error." msgstr "Error al obtener información QoS." msgid "Get QoS policy error." msgstr "Error al obtener política QoS." msgid "Get SplitMirror error." msgstr "Error al obtener SplitMirror." msgid "Get active client failed." msgstr "La operación Obtener cliente activo ha fallado." msgid "Get array info error." msgstr "Error al obtener información de la matriz." msgid "Get cache by name error." msgstr "Error al obtener la caché por nombre." msgid "Get connected free FC wwn error." msgstr "Error al obtener wwn FC libre conectado." msgid "Get engines error." msgstr "Error al obtener los motores." msgid "Get host initiators info failed." msgstr "Error al obtener la información de los iniciadores del host." msgid "Get hostgroup information error." msgstr "Error al obtener la información de grupo de host." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Error al obtener información de puerto iSCSI, compruebe la IP de destino " "configurada en el archivo config de huawei ." msgid "Get iSCSI port information error." msgstr "Error al obtener información de puerto iSCSI." msgid "Get iSCSI target port error." msgstr "Error al obtener el puerto de destino iSCSI." msgid "Get lun id by name error." msgstr "Error al obtener el LUN por nombre." msgid "Get lun migration task error." msgstr "Error al obtener la tarea de migración de lun." msgid "Get lungroup id by lun id error." msgstr "Error al obtener el ID de grupo de LUN por ID de lun." msgid "Get lungroup information error." msgstr "Error al obtener la información de grupo de LUN." msgid "Get migration task error." msgstr "Error al obtener la tarea de migración." msgid "Get pair failed." msgstr "Obtener par ha fallado." msgid "Get partition by name error." msgstr "Error al obtener partición por nombre." msgid "Get partition by partition id error." msgstr "Error al obtener partición por ID de partición." msgid "Get port group by view error." msgstr "Error al obtener el grupo de puertos por vista." msgid "Get port group error." msgstr "Error al obtener el grupo de puertos." msgid "Get port groups by port error." msgstr "Error al obtener los grupos de puertos por puerto." msgid "Get ports by port group error." msgstr "Error al obtener los puertos por grupo de puertos." msgid "Get remote device info failed." msgstr "Obtener dispositivo remoto ha fallado." msgid "Get remote devices error." msgstr "Error al obtener los dispositivos remotos." msgid "Get smartcache by cache id error." msgstr "Error al obtener smartcache por ID de caché." msgid "Get snapshot error." msgstr "Error al obtener la instantánea." msgid "Get snapshot id error." msgstr "Error al obtener el ID de instantánea." msgid "Get target IP error." msgstr "Error al obtener la IP de destino." msgid "Get target LUN of SplitMirror error." msgstr "Error al obtener el LUN de destino de SplitMirror." msgid "Get views by port group error." msgstr "Error al obtener vistas por grupo de puertos." msgid "Get volume by name error." msgstr "Error al obtener el volumen por nombre." msgid "Get volume error." msgstr "Error al obtener el volumen." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Los metadatos de Glance no se pueden actualizar, la clave %(key)s existe " "para el ID de volumen %(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "No se ha podido encontrar los metadatos de vistazo para los metadatos/" "instantánea %(id)s." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "El archivo de configuración de Gluster en %(config)s no existe" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Error de la api de Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Error de conexión de Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Error de oauth2 de Google Cloud Storage: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "" "Se ha obtenido información incorrecta sobre vía de acceso de DRBDmanage (%s)" msgid "HBSD error occurs." msgstr "Se ha producido un error HBSD." msgid "HNAS has disconnected SSC" msgstr "HNAS ha desconectado SSC" msgid "HPELeftHand url not found" msgstr "URL de HPELeftHand no encontrado" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "Se ha solicitado la verificación de certificados HTTPS, pero no se puede " "habilitar con la versión de módulo de purestorage %(version)s. Actualícese a " "una nueva versión para habilitar esta característica." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "El tamaño de bloque de hash ha cambiado desde la última copia de seguridad. " "Nuevo tamaño de bloque hash: %(new)s. Tamaño de bloque hash antiguo: " "%(old)s. Haga una copia de seguridad completa." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "No se han creado los niveles %(tier_levels)s." #, python-format msgid "Hint \"%s\" not supported." msgstr "Sugerencia \"%s\" no soportada." msgid "Host" msgstr "Host" #, python-format msgid "Host %(host)s could not be found." msgstr "No se ha podido encontrar el host %(host)s." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "El host %(host)s no coincide con el contenido del certificado x509: " "CommonName %(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "El host %s no tiene ningún iniciador FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "El host %s no tiene ningún iniciador iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "El host '%s' no se ha encontrado." #, python-format msgid "Host group with name %s not found" msgstr "No se ha encontrado el grupo de host con el nombre %s." #, python-format msgid "Host group with ref %s not found" msgstr "No se ha encontrado el grupo de host con ref %s" msgid "Host is NOT Frozen." msgstr "El host NO está inmovilizado." msgid "Host is already Frozen." msgstr "El host ya está inmovilizado." msgid "Host not found" msgstr "Host no encontrado" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" "No se ha encontrado el host. No se ha podido eliminar %(service)s en " "%(host)s." #, python-format msgid "Host replication_status must be %s to failover." msgstr "" "El estado de replicación (replication_status) del host debe ser %s para " "poder realizar la migración tras error." #, python-format msgid "Host type %s not supported." msgstr "El tipo de host %s no se soporta." #, python-format msgid "Host with ports %(ports)s not found." msgstr "No se ha encontrado el host con los puertos %(ports)s." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "No se puede utilizar Hipermetro y Replicación en el mismo volume_type." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "El grupo de E/S %(iogrp)d no es válido; los grupos de E/S disponibles son " "%(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Dirección IP/nombre de host de la API of Blockbridge." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Si compression se define como True, rsize también debe definirse (distinto a " "-1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "Si nofmtdisk está definido a True, rsize también se debe definir a -1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Valor no permitido '%(prot)s' especificado para " "flashsystem_connection_protocol: Los valores válidos son %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Se ha especificado un valor no permitido en IOTYPE: 0, 1 o 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "Valor no válido especificado para smarttier: establezca 0, 1, 2 o 3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Valor ilegal especificado para storwize_svc_vol_grainsize: establecido en " "32, 64, 128 o 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Valor no válido especificado en thin: No se puede establecer thin y thick al " "mismo tiempo." #, python-format msgid "Image %(image_id)s could not be found." msgstr "No se ha podido encontrar la imagen %(image_id)s. " #, python-format msgid "Image %(image_id)s is not active." msgstr "La imagen %(image_id)s no está activa." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "La imagen %(image_id)s es inaceptable: %(reason)s" msgid "Image location not present." msgstr "Ubicación de imagen no presente." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "El tamaño virtual de la imgen es %(image_size)d GB y no cabe en un volumen " "de tamaño %(volume_size)dGB." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Se ha producido un error de ImageBusy al suprimir el volumen rbd. Puede " "haberse producido debido a una conexión de un cliente que ha colgado y, si " "es así, se puede resolver volviendo a intentar la supresión después de que " "hayan transcurrido 30 segundos." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Ha fallado la importación de registro, no se puede encontrar el servicio de " "copia de seguridad para realizar la importación. Solicite el servicio " "%(service)s" msgid "Incorrect request body format" msgstr "Formato de cuerpo de solicitud incorrecto" msgid "Incorrect request body format." msgstr "Formato de cuerpo de solicitud incorrecto." msgid "Incremental backups exist for this backup." msgstr "" "Existen copias de seguridad incrementales para esta copia de seguridad." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Excepción de CLI Infortrend: %(err)s Parám: %(param)s (Código de retorno: " "%(rc)s) (Salida: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Nivel inicial: {}, la política: {} no es válida." msgid "Input type {} is not supported." msgstr "El tipo de entrada {} no está soportado." msgid "Input volumes or snapshots are invalid." msgstr "Los volúmenes de entrada o instantáneas no son válidos." msgid "Input volumes or source volumes are invalid." msgstr "Los volúmenes de entrada o los volúmenes de origen no son válidos." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "No se ha podido encontrar la instancia %(uuid)s." msgid "Insufficient free space available to extend volume." msgstr "No hay suficiente espacio libre disponible para extender el volumen." msgid "Insufficient privileges" msgstr "Privilegios insuficientes" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" "Valor de intervalo (en segundos) entre los reintentos de conexión al clúster " "ceph." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" "Se han especificado puertos %(protocol)s %(port)s no válidos para " "io_port_list." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Dominio 3PAR no válido: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Valor ALUA no válido. El valor ALUA debe ser 1 o 0." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" "Arg de ceph no válidos proporcionados para operación rbd de copia de " "seguridad" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "CgSnapshot no válido: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "ConsistencyGroup no válido: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "ConsistencyGroup no válido: el estado del grupo de consistencia debe ser " "disponible o error, pero el estado actual es: en uso" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "ConsistencyGroup no válido: El estado del grupo de consistencia debe estar " "disponible, pero el estado actual es: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "ConsistencyGroup no válido: No hay host para crear grupo de consistencia" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Se ha encontrado una versión de API de HPELeftHand no válida:%(found)s). Se " "necesita la versión %(minimum)s o superior para tener soporte para gestionar/" "dejar de gestionar." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Formato de dirección IP no válido: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Se ha detectado una especificación QoS no válida al obtener la política QoS " "del volumen %s" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de replicación no válido: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Tipo de autenticación VNX no válido: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Especificación de compartición de Virtuozzo Storage no válida: %r. Debe ser: " "[MDS1[,MDS2],...:/][:PASSWORD]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "Versión de XtremIO no válida %(cur)s, se requiere la versión %(min)s o " "superior" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "Se han definido unas cuotas asignadas no válidas en las siguientes cuotas de " "proyecto: %s" msgid "Invalid argument" msgstr "Argumento no válido" msgid "Invalid argument - negative seek offset." msgstr "Argumento no válido - desplazamiento de búsqueda negativo." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Argumento no válido - whence=%s no admitido" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Argumento no válido - whence=%s no admitido." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Método de conexión '%(mode)s' inválido para el volumen %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Claves de autenticación inválidas: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Copia de seguridad no válida: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "URL de API de Barbican no válido: debe tener el formato siguiente: " "'http[s]://|[:puerto]/', el URL especificado es: %s" msgid "Invalid cgsnapshot" msgstr "Cgsnapshot no válido" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Se han encontrado detalles de usuario chap no válidos en el almacenamiento " "CloudByte." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "Respuesta de inicialización de conexión no válida del volumen %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Respuesta de inicialización de conexión no válida del volumen %(name)s: " "%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de contenido invalido %(content_type)s." msgid "Invalid credentials" msgstr "Credenciales no válidas" #, python-format msgid "Invalid directory: %s" msgstr "Directorio no válido: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Tipo de adaptador de disco no válido: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Respaldo de disco no válido: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Tipo de disco no válido: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Tipo de disco no válido: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Host inválido: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Se ha encontrado una versión de hpe3parclient no válida (%(found)s). Se " "requiere la versión %(minimum)s o superior. Ejecute \"pip install --upgrade " "python-3parclient\" para actualizar hpe3parclient." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Se ha encontrado una versión no válida de hpelefthandclient (%(found)s). Se " "requiere la versión %(minimum)s o superior. Ejecute 'pip install --upgrade " "python-lefthandclient' para actualizar hpelefthandclient." #, python-format msgid "Invalid image href %(image_href)s." msgstr "Href de imagen %(image_href)s no válida." msgid "Invalid image identifier or unable to access requested image." msgstr "" "El identificador de imagen no es válido o no se ha podido acceder a la " "imagen solicitada." msgid "Invalid imageRef provided." msgstr "Se ha proporcionado una referencia de imagen no válida." msgid "Invalid initiator value received" msgstr "Se ha recibido un valor de iniciador no válido" msgid "Invalid input" msgstr "Entrada no válida" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrada inválida recibida: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public no válido [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "Se ha configurado un tipo de lun %s no válido." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Tamaño de metadatos inválido: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadatos inválidos: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Base de punto de montaje no válida: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Base de punto de montaje no válida: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" "Nuevo nombre de snapCPG no válido para la reescritura. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Número de puerto no válido %(config)s para el puerto RPC de Coho" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Se ha configurado un tipo de captación previa %s no válido. El tipo de " "captación previa debe estar dentro de 0,1,2,3." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Especificaciones de qos no válidas: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "Solicitud no válida para adjuntar un volumen a un destino no válido" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Solicitud no válida para adjuntar un volumen con el modo no válido. " "Adjuntando modo debe ser 'rw' o 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Caducidad de reserva no válida %(expire)s." msgid "Invalid response header from RPC server" msgstr "Respuesta no válida procedente del servidor RPC" #, python-format msgid "Invalid secondary id %s." msgstr "ID secundarios no válidos %s." #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "Se ha especificado un ID de programa de fondo secundario no válido " "(secondary_backend_id). El ID de programa de fondo válido es %s." msgid "Invalid service catalog json." msgstr "JSON de catálogo de servicios no válido." msgid "Invalid sheepdog cluster status." msgstr "Estado de clúster sheepdog no válido." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Instantánea no válida: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Estado no válido: '%s' " #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "" "Se ha solicitado una agrupación de almacenamiento no válida %s. Ha fallado " "la reescritura." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Se ha especificado una agrupación de almacenamiento no válida %s." msgid "Invalid storage pool is configured." msgstr "Se ha configurado una agrupación de almacenamiento no válida." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "" "Se ha especificado un modo de sincronización no válido, el modo permitido es " "%s." msgid "Invalid transport type." msgstr "Tipo de transporte no válido." #, python-format msgid "Invalid update setting: '%s'" msgstr "Valor de actualización no válido: '%s' " #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL no válido: debe tener el formato siguiente: 'http[s]://|[:" "puerto]/', el URL especificado es: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Valor no válido %s' para forzar." #, python-format msgid "Invalid value '%s' for force. " msgstr "Valor no válido %s' para forzar." #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "Valor no válido '%s' para is_public. Valores aceptados: True o False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Valor no válido '%s' para skip_validation." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Valor no válido para 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Valor no válido para 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Valor no válido para 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Valor no válido para 'scheduler_max_attempts', debe ser >= 1 " msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "" "Valor no válido para la opción de configuración netapp_host_type de NetApp." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "" "Valor no válido para la opción de configuración netapp_lun_ostype de NetApp." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Valor no válido para la edad, %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Valor no válido: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "Se ha proporcionado un tamaño de volumen no válido para crear la solicitud: " "%s (el argumento de tamaño debe ser un entero (o una representación de " "cadena de un entero) y mayor que cero)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Tipo de volumen inválido: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volumen inválido: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Volumen no válido: No se puede añadir el volumen %(volume_id)s al grupo de " "consistencia %(group_id)s porque el volumen está en un estado no válido: " "%(status)s. Los estados válidos son: ('available', 'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Volumen no válido: No se puede añadir el volumen %(volume_id)s al grupo de " "consistencia %(group_id)s porque el grupos de consistencia no soporta el " "tipo de volumen %(volume_type)s ." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Volumen no válido: No se puede añadir el volumen fake-volume-uuid al grupo " "de consistencia %(group_id)s porque no se ha encontrado el volumen." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Volumen no válido: No se puede eliminar el volumen fake-volume-uuid del " "grupo de consistencia %(group_id)s porque no está en el grupo." #, python-format msgid "Invalid volume_type passed: %s." msgstr "El volume_type no válido ha pasado: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Se ha proporcionado un tipo volume_type no válido: %s (el tipo solicitado no " "es compatible; debe hacer coincidir el volumen de origen o debe omitir el " "argumento de tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "Se ha proporcionado un tipo volume_type no válido: %s (el tipo solicitado no " "es compatible; se recomienda omitir el argumento de tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "volume_type proporcionado no válido: %s (este grupo de consistencia debe " "soportar el tipo solicitado)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Formato de wwpns no válido %(wwpns)s" msgid "Invoking web service failed." msgstr "Ha fallado la invocación al servicio web." msgid "Issue encountered waiting for job." msgstr "Se ha detectado un problema al esperar el trabajo." msgid "Issue encountered waiting for synchronization." msgstr "Se ha detectado un problema al esperar la sincronización." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Se está emitiendo un mensaje de migración tras error fallida porque la " "replicación no está configurada correctamente." msgid "Item not found" msgstr "Elemento no encontrado" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "No se ha encontrado el ID de trabajo en la respuesta de creación de volumen " "[%s] de CloudByte." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "No se ha encontrado el ID de trabajo en la respuesta de supresión de volumen " "[%s] de CloudByte." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Los nombres de clave sólo pueden contener caracteres alfanuméricos, " "subrayados, puntos, dos puntos y guiones." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "Se debe utilizar la versión 3 o superior de Keystone para tener soporte para " "cuotas anidadas." #, python-format msgid "LU does not exist for volume: %s" msgstr "No existe ningún LU para el volumen: %s" msgid "LUN export failed!" msgstr "Error al exportar LUN." msgid "LUN id({}) is not valid." msgstr "El ID de LUN ({}) no es válido." msgid "LUN map overflow on every channel." msgstr "Desbordamiento de correlación de LUN en todos los canales." #, python-format msgid "LUN not found with given ref %s." msgstr "No se ha encontrado un LUN con la referencia dada %s." msgid "LUN number ({}) is not an integer." msgstr "El número de LUN ({}) no es un entero." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "El número de LUN está fuera de limites en el ID de canal: %(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "El LUN con la referencia dada %(ref)s no satisface el tipo de volumen. " "Asegúrese de que el volumen de LUN con las características ssc está presente " "en vserver %(vs)s." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Últimas %s entradas de syslog de Cinder:-" msgid "LeftHand cluster not found" msgstr "Clúster LeftHand no encontrado" msgid "License is unavailable." msgstr "La licencia no está disponible." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Línea %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "La vía de acceso al enlace existe y no es un symlink" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "No se soporta el clon enlazado del volumen de origen en estado: %s." msgid "Lock acquisition failed." msgstr "Ha fallado la adquisición del bloqueo." msgid "Logout session error." msgstr "Error al cerrar la sesión." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Servicio de búsqueda no configurado. La opción de configuración para " "fc_san_lookup_service necesita especificar una implementación concreta del " "servicio de búsqueda." msgid "Lun migration error." msgstr "Error de migración de LUN." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "El MD5 del objeto: %(object_name)s antes de: %(md5)s y después de: %(etag)s " "no es el mismo." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Cadena de salida de fcns con formato incorrecto: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Serie de servidor de nombres mal formada: %s" msgid "Malformed request body" msgstr "Cuerpo de solicitud formado incorrectamente" msgid "Malformed request body." msgstr "Cuerpo de solicitud mal formado." msgid "Malformed request url" msgstr "URL de solicitud formado incorrectamente" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Respuesta con formato incorrecto para el mandato %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Atributo scheduler_hints formado incorrectamente" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Cadena de base de datos show fcns con formato incorrecto: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Configuración de zona mal formada: (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Estado de zona mal formado: (switch=%(switch)s zone_config=%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "La gestión para obtener tamaño necesita 'id'." msgid "Manage existing snapshot not implemented." msgstr "No se ha implementado la gestión de la instantánea existente." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "La gestión del volumen existente ha fallado porque la referencia de programa " "de fondo no es válida %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "La gestión del volumen existente ha fallado debido a una discrepancia de " "tipo de volumen: %(reason)s" msgid "Manage existing volume not implemented." msgstr "La gestión de volumen existente no se ha implementado." msgid "Manage existing volume requires 'source-id'." msgstr "La gestión del volumen existente necesita 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "Gestionar volumen no se admite si FAST está habilitado. Política de FAST: " "%(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "No se permite gestionar instantáneas a volúmenes que han dado error." msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "La información de correlación es None debido a que la versión de la matriz " "no admite hypermetro." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "La preparación de la correlación %(id)s no se ha podido completar en el " "tiempo de espera asignado de %(to)d segundos. Terminando." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "" "La vista de máscara %(maskingViewName)s no se ha suprimido correctamente" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "" "Se ha superado el número máximo de copias de seguridad permitidas " "(%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "Se ha superado el número máximo de volúmenes permitidos (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Se ha superado el número máximo de volúmenes permitidos (%(allowed)d) para " "la cuota '%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "Puede especificar solo uno de %s" msgid "Metadata backup already exists for this volume" msgstr "La copia de seguridad de metadatos ya existe para este volumen" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "El objeto de copia de seguridad de metadatos '%s' ya existe" msgid "Metadata item was not found" msgstr "No se ha encontrado el elemento metadatos" msgid "Metadata item was not found." msgstr "No se ha encontrado el elemento de metadatos." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Clave de propiedad de metadatos %s mayor que 255 caracteres" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Valor de clave de propiedad de metadatos %s mayor que 255 caracteres" msgid "Metadata property key blank" msgstr "Clave de propiedad de metadatos en blanco" msgid "Metadata property key blank." msgstr "Clave de propiedad de metadatos en blanco" msgid "Metadata property key greater than 255 characters." msgstr "La clave de propiedad de metadatos tiene más de 255 caracteres" msgid "Metadata property value greater than 255 characters." msgstr "El valor de propiedad de metadatos tiene más de 255 caracteres" msgid "Metadata restore failed due to incompatible version" msgstr "" "La restauración de metadatos ha fallado debido a la versión incompatible" msgid "Metadata restore failed due to incompatible version." msgstr "" "La restauración de metadatos ha fallado debido a una versión incompatible." #, python-format msgid "Migrate volume %(src)s failed." msgstr "No se ha podido migrar el volumen %(src)s." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "No se ha podido migrar el volumen entre el volumen de origen %(src)s y el " "volumen de destino %(dst)s." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "La migración de LUN %s se ha detenido o tiene un error." msgid "MirrorView/S enabler is not installed." msgstr "El habilitador de MirrorView/S no está instalado." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Falta el módulo de Python 'purestorage', asegúrese de que la biblioteca está " "instalada y disponible." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" "Falta parámetro de configuración de SAN de canal de fibra - fc_fabric_names" msgid "Missing request body" msgstr "Falta el cuerpo de la solicitud" msgid "Missing request body." msgstr "No se ha hallado el cuerpo de la solicitud." #, python-format msgid "Missing required element '%s' in request body" msgstr "Falta el elemento requerido '%s' en el cuerpo de la solicitud" #, python-format msgid "Missing required element '%s' in request body." msgstr "Falta el elemento oblitatorio '%s' en el cuerpo de la solicitud." msgid "Missing required element 'consistencygroup' in request body." msgstr "" "Falta el elemento obligatorio 'consistencygroup' en el cuerpo de la " "solicitud." msgid "Missing required element 'host' in request body." msgstr "Falta el elemento obligatorio 'host' en el cuerpo de la solicitud." msgid "Missing required element quota_class_set in request body." msgstr "Falta el elemento necesario quota_class_set en cuerpo de solicitud." msgid "Missing required element snapshot in request body." msgstr "" "Falta la instantánea de elemento obligatoria en el cuerpo de solicitud." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Se han encontrado varios SerialNumbers, cuando sólo se esperaba uno para " "esta operación. Cambie el archivo de configuración EMC." #, python-format msgid "Multiple copies of volume %s found." msgstr "Se han encontrado varias copias del volumen %s." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Se han encontrado varias coincidencias para '%s', utilice un ID para ser más " "específico." msgid "Multiple profiles found." msgstr "Se han encontrado varios perfiles." msgid "Must implement a fallback schedule" msgstr "Debe de implementar un horario de reserva" msgid "Must implement find_retype_host" msgstr "Debe implementar find_retype_host" msgid "Must implement host_passes_filters" msgstr "Debe implementar host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "Es necesario implementar schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "Debe implementar schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "Es necesario implementar schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "Se debe pasar wwpn o host a lsfabric." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Debe ejecutar este comando como administrador de nube utilizando un archivo " "policy.json de Keystone, que permite al administrador de nube listar y " "obtener cualquier proyecto." msgid "Must specify 'connector'" msgstr "Debe especificar 'connector'" msgid "Must specify 'connector'." msgstr "Debe especificar 'connector'." msgid "Must specify 'host'." msgstr "Debe especificar 'host'." msgid "Must specify 'new_volume'" msgstr "Debe especificar 'new_volume'" msgid "Must specify 'status'" msgstr "Debe especificar 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Debe especificar 'status', 'attach_status' o 'migration_status' para la " "actualización." msgid "Must specify a valid attach status" msgstr "Debe especificar un estado de conexión válido" msgid "Must specify a valid migration status" msgstr "Debe especificar un estado de migración válido" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Debe especificar un valor válido de persona %(valid)s, el valor " "'%(persona)s' no es válido." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Debe especificar un tipo de suministro válido %(valid)s, el valor '%(prov)s' " "no es válido." msgid "Must specify a valid status" msgstr "Debe especificar un estado válido" msgid "Must specify an ExtensionManager class" msgstr "Debe especificar una clase ExtensionManager" msgid "Must specify bootable in request." msgstr "Debe especificar bootable en la solicitud." msgid "Must specify protection domain name or protection domain id." msgstr "" "Se debe especificar el nombre del dominio de protección o el ID del dominio " "de protección." msgid "Must specify readonly in request." msgstr "Debe especificar sólo lectura en solicitud." msgid "Must specify snapshot source-name or source-id." msgstr "" "Se debe especificar el nombre de origen o el ID de origen de la instantánea." msgid "Must specify source-name or source-id." msgstr "Debe especificar source-name o source-id." msgid "Must specify storage pool name or id." msgstr "" "Se debe especificar el nombre o el ID de la agrupación de almacenamiento." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "Se deben especificar las agrupaciones de almacenamiento. Opción: " "sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Debe proporcionar un valor positivo para la edad" msgid "Must supply a positive, non-zero value for age" msgstr "Debe proporcionar un valor positivo distinto de cero para la edad" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "La configuración de NAS '%(name)s=%(value)s' no es válida. Debe ser 'auto', " "'true' o 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "El archivo de configuración de NFS en %(config)s no existe" #, python-format msgid "NFS file %s not discovered." msgstr "Archivo NFS %s no descubierto." msgid "NFS file could not be discovered." msgstr "El archivo NFS no se ha podido descubrir." msgid "NaElement name cannot be null." msgstr "El nombre de NaElement no puede ser nulo." msgid "Name" msgstr "Nombre" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Nombre, descripción, add_volumes y remove_volumes no pueden estar vacíos en " "el cuerpo de la solicitud." msgid "Need non-zero volume size" msgstr "Se necesita un tamaño de volumen distinto de cero" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Ni MSG_DENIED ni MSG_ACCEPTED: %r" msgid "NetApp Cinder Driver exception." msgstr "Excepción de controlador NetApp Cinder." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "El nuevo tamaño para ampliar debe ser mayor que el tamaño actual. (actual: " "%(size)s, ampliado: %(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "El nuevo tamaño debe ser mayor que el tamaño real del almacenamiento de " "fondo. tamaño real: %(oldsize)s, tamaño nuevo: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "Se debe especificar el nuevo tamaño de volumen como un entero." msgid "New volume type must be specified." msgstr "Se debe especificar tipo de volumen nuevo." msgid "New volume type not specified in request_spec." msgstr "No se ha especificado el tipo de volumen nuevo en request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "El nuevo volume_type igual que el original: %s." msgid "Nimble Cinder Driver exception" msgstr "Excepción de controlador Nimble Cinder" msgid "No FC initiator can be added to host." msgstr "No se puede añadir ningún iniciador FC al host." msgid "No FC port connected to fabric." msgstr "No hay ningún puerto FC conectado al tejido." msgid "No FCP targets found" msgstr "No se han encontrado destinos FCP" msgid "No Port Group elements found in config file." msgstr "" "No se han encontrado elementos de grupo de puertos en el archivo de " "configuración." msgid "No VF ID is defined in the configuration file." msgstr "No se ha definido ningún ID de VF en el archivo de configuración." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "No hay portales iSCSI activos con las IP iSCSI proporcionadas" #, python-format msgid "No available service named %s" msgstr "No se ha nombrado el servicio disponible %s" #, python-format msgid "No backup with id %s" msgstr "No hay ninguna copia de seguridad con el ID %s" msgid "No backups available to do an incremental backup." msgstr "" "No hay copias de seguridad disponibles para hacer una copia de seguridad " "incremental." msgid "No big enough free disk" msgstr "No hay suficiente espacio libre en el disco" #, python-format msgid "No cgsnapshot with id %s" msgstr "No hay ningún cgsnapshot con el ID %s" msgid "No cinder entries in syslog!" msgstr "No hay entradas de Cinder en syslog" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "" "No se ha encontrado ningún LUN clonado denominado %s en el gestor de archivos" msgid "No config node found." msgstr "No se ha encontrado ningún nodo de configuración." #, python-format msgid "No consistency group with id %s" msgstr "No existe ningún grupo de consistencia con el id %s" #, python-format msgid "No element by given name %s." msgstr "No hay ningún elemento con el nombre indicado %s." msgid "No errors in logfiles!" msgstr "¡No hay errores en los ficheros de log!" #, python-format msgid "No file found with %s as backing file." msgstr "No se ha encontrado el archivo con %s como archivo de respaldo." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "No queda libre ningún ID de LUN. El número máximo de volúmenes que se puede " "conectar al host (%s) se ha superado." msgid "No free disk" msgstr "No hay disco libre" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "" "No se ha encontrado ningún portal iscsi bueno en la lista proporcionada para " "%s." #, python-format msgid "No good iscsi portals found for %s." msgstr "No se ha encontrado ningún portal iscsi bueno para %s." #, python-format msgid "No host to create consistency group %s." msgstr "No hay host para crear grupo de consistencia%s." msgid "No iSCSI-enabled ports on target array." msgstr "No hay puertos habilitados para iSCSI en la matriz de destino." msgid "No image_name was specified in request." msgstr "" "No se ha especificado ningún nombre de imagen (image_name) en la solicitud." msgid "No initiator connected to fabric." msgstr "No hay ningún iniciador conectado al tejido." #, python-format msgid "No initiator group found for initiator %s" msgstr "No se ha encontrado ningún grupo de iniciadores para el iniciador %s" msgid "No initiators found, cannot proceed" msgstr "No se han encontrado iniciadores, no se puede continuar" #, python-format msgid "No interface found on cluster for ip %s" msgstr "No se ha encontrado ninguna interfaz en el clúster para la IP %s" msgid "No ip address found." msgstr "No se ha encontrado la dirección IP." msgid "No iscsi auth groups were found in CloudByte." msgstr "No se ha encontrado ningún grupo de autenticación iscsi en CloudByte." msgid "No iscsi initiators were found in CloudByte." msgstr "No se ha encontrado ningún iniciador iscsi en CloudByte." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "" "No se ha encontrado ningún servicio iscsi para el volumen de CloudByte [%s]." msgid "No iscsi services found in CloudByte storage." msgstr "" "No se ha encontrado ningún servicio iscsi en el almacenamiento de CloudByte." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "No se ha especificado archivo de claves y no se puede cargar la clave desde " "%(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "No se han encontrado unidades compartidas Gluster montadas" msgid "No mounted NFS shares found" msgstr "No se han encontrado unidades compartidas NFS montadas" msgid "No mounted SMBFS shares found." msgstr "No se han encontrado unidades compartidas SMBFS montadas" msgid "No mounted Virtuozzo Storage shares found" msgstr "No se han encontrado unidades compartidas de Virtuozzo Storage" msgid "No mounted shares found" msgstr "No se han encontrado unidades compartidas montadas" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "No se ha encontrado ningún nodo en el grupo de E/S %(gid)s del volumen " "%(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "No hay agrupaciones disponibles para el suministro de volúmenes. Asegúrese " "de que la opción de configuración netapp_pool_name_search_pattern se haya " "establecido correctamente." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "No se ha recibido ninguna respuesta desde la llamada de API de usuario de " "autenticación iSCSI de la lista de almacenamiento CloudByte ." msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "No se ha recibido ninguna respuesta de la llamada a la API de tsm de la " "lista de almacenamiento de CloudByte." msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "No se ha recibido ninguna respuesta de la llamada a la API del sistema de " "archivos de la lista de CloudByte." msgid "No service VIP configured and no nexenta_client_address" msgstr "" "No se ha configurado ningún servicio VIP y no hay ninguna " "nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "No se ha encontrado archivo con %s como archivo de respaldo." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "" "No se ha encontrado ninguna imagen de instantánea en el grupo de " "instantáneas %s." #, python-format msgid "No snapshots could be found on volume %s." msgstr "No se han podido encontrar instantáneas en el volumen %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "No se han proporcionado instantáneas de origen para crear el grupo de " "consistencia %s." #, python-format msgid "No storage path found for export path %s" msgstr "" "No se ha encontrado ninguna vía de acceso de almacenamiento para la vía de " "acceso de exportación %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "No hay especificaciones de QoS %(specs_id)s." msgid "No suitable discovery ip found" msgstr "No se ha encontrado ningún IP de descubrimiento adecuado" #, python-format msgid "No support to restore backup version %s" msgstr "No hay soporte para restaurar la versión de copia de seguridad %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "" "No se ha encontrado ningún ID de destino para el volumen %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "No hay disponibles ID de LUN sin utilizar en el host; la conexión múltiple " "está habilitada, lo cual requiere que todos los ID de LUN sean exclusivos en " "todo el grupo de hosts." #, python-format msgid "No valid host was found. %(reason)s" msgstr "No se ha encontrado ningún host válido. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "No hay hosts válidos para el volumen %(id)s con el tipo %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "No hay ningún vdisk con el UID especificado en ref %s." #, python-format msgid "No views found for LUN: %s" msgstr "No se ha encontrado ninguna vista para el LUN: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "No hay ningún volumen en el clúster con vserver %(vserver)s y vía de acceso " "de cruce %(junction)s " msgid "No volume service(s) started successfully, terminating." msgstr "" "No se ha iniciado correctamente ningún servicio de volumen, terminando." msgid "No volume was found at CloudByte storage." msgstr "No se ha encontrado ningún volumen en el almacenamiento de CloudByte." msgid "No volume_type should be provided when creating test replica." msgstr "" "No debe proporcionarse ningún volume_type cuando se crea la réplica de " "prueba." msgid "No volumes found in CloudByte storage." msgstr "No se ha encontrado ningún volumen en el almacenamiento de CloudByte." msgid "No weighed hosts available" msgstr "No hay hosts ponderados disponibles" #, python-format msgid "Not a valid string: %s" msgstr "Cadena no válida: %s" msgid "Not a valid value for NaElement." msgstr "Valor no válido para NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "" "No se puede encontrar un almacén de datos adecuado para el volumen: %s." msgid "Not an rbd snapshot" msgstr "No es una instantánea rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "No está autorizado para la imagen %(image_id)s." msgid "Not authorized." msgstr "No Autorizado" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "No hay espacio suficiente en el el programa de fondo (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "No hay espacio de almacenamiento suficiente en la unidad compartida ZFS para " "realizar esta operación." msgid "Not stored in rbd" msgstr "No está almacenado en rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "Nova devolvió el estado \"error\" mientras creaba la instantánea." msgid "Null response received from CloudByte's list filesystem." msgstr "" "Se ha recibido una respuesta nula desde el sistema de archivos de la lista " "de CloudByte." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" "Se ha recibido una respuesta nula desde los grupos de autenticación iscsi de " "la lista de CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" "Se ha recibido una respuesta nula desde los iniciadores iscsi de la lista de " "CloudByte." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "Se ha recibido una respuesta nula desde el servicio iscsi del volumen de la " "lista de CloudByte." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Respuesta nula recibida al crear el volumen [%s] en el almacenamiento de " "CloudByte." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Respuesta nula recibida al suprimir el volumen [%s] en el almacenamiento de " "CloudByte." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Se ha recibido una respuesta nula al realizar la consulta para el trabajo " "basado en [%(operation)s] [%(job)s] en el almacenamiento CloudByte." msgid "Number of retries if connection to ceph cluster failed." msgstr "Número de reintentos si la conexión al clúster ceph ha fallado." msgid "Object Count" msgstr "Recuento de objetos" msgid "Object Version" msgstr "Versión del objeto" msgid "Object is not a NetApp LUN." msgstr "El objeto no es un LUN de NetApp." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "En una operación de ampliación, error al añadir volumen al volumen " "compuesto: %(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "Uno de los servicios de volumen Cinder es demasiado antiguo para aceptar " "esta solucitud. ¿Está ejecutando volúmenes de Cinder con mezcla de Liberty y " "Mitaka?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "No se ha encontrado una de las entradas necesarias procedentes del host, " "puerto o esquema." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Solo se pueden realizar %(value)s solicitud(es) de %(verb)s para %(uri)s " "cada %(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "Sólo se puede establecer un límite en una especificación QoS." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Sólo los usuarios con ámbito de señal para padres inmediatos o proyectos " "root pueden ver las cuotas hijo." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "" "Solo los volúmenes gestionados por OpenStack pueden dejarse de gestionar." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" "Se ha encontrado un error en la operación con el estado=%(status)s. Volcado " "completo: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Operación no admitida: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "La opción gpfs_images_dir no se ha establecido correctamente." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "La opción gpfs_images_share_mode no se ha establecido correctamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "La opción gpfs_mount_point_base no se ha establecido correctamente." msgid "Option map (cls._map) is not defined." msgstr "La correlación de opciones (cls._map) no está definida." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "%(res)s %(prop)s de origen debe ser uno de los valores '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "Sustituya el puerto HTTPS para conectarse al servidor de API Blockbridge." #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "Nombre de la partición es Ninguno, establezca smartpartition:partitionname " "en clave." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "La contraseña o clave privada SSH es necesaria para la autenticación: " "establezca la opción san_password o san_private_key." msgid "Path to REST server's certificate must be specified." msgstr "Se debe especificar la vía de acceso al certificado del servidor REST." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Cree la agrupación %(pool_list)s con antelación." #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "" "Cree el nivel %(tier_levels)s en la agrupación %(pool)s con antelación." msgid "Please re-run cinder-manage as root." msgstr "Vuelva a ejecutar cinder-manage como root." msgid "Please specify a name for QoS specs." msgstr "Especifique un nombre para especificaciones de QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "La política no permite realizar %(action)s. " #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "La agrupación %(poolNameInStr)s no se ha encontrado." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "La agrupación %s no existe en la aplicación Nexenta Store" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "No se ha encontrado la agrupación del volumen['host'] %(host)s." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "La agrupación del volumen ['host'] ha fallado con: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "La agrupación no está disponible en el campo del host del volumen." msgid "Pool is not available in the volume host fields." msgstr "La agrupación no está disponibles en los campos de host del volumen." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "" "No se ha encontrado la agrupación con el nombre %(pool)s en el dominio " "%(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "No se ha encontrado la agrupación con el nombre %(pool_name)s en el dominio " "%(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "La agrupación %(poolName)s no está asociada con el nivel de almacenamiento " "de la política fast %(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName debe estar en el archivo %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "La agrupaciones %s no existen" msgid "Pools name is not set." msgstr "No se ha establecido el nombre de agrupaciones." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Estado de copia primaria: %(status)s y sincronizado: %(sync)s." msgid "Project ID" msgstr "ID del proyecto" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "No se han configurado correctamente las cuotas del proyecto para las cuotas " "anidadas: %(reason)s." msgid "Protection Group not ready." msgstr "El grupo de protección no está preparado." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "El protocolo %(storage_protocol)s no es admitido para la familia de " "almacenamiento %(storage_family)s." msgid "Provided backup record is missing an id" msgstr "Al registro de copia de seguridad proporcionado le falta un ID." #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "El estado de instantánea proporcionado %(provided)s no está permitido para " "instantánea con estado %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "No se ha encontrado la información del proveedor sobre el almacenamiento de " "CloudByte para el volumen de OpenStack [%s]." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Error de controlador Pure Storage Cinder: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Las especificaciones de QoS %(specs_id)s ya existen." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Especificaciones de QoS %(specs_id)s está asociado con las entidades." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "La configuración de QoS es incorrecta. %s debe ser > 0." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "La política QoS debe especificar para IOTYPE y otras qos_specs. Política " "QoS: %(qos_policy)s " #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "La política QoS debe especificar para IOTYPE el valor: 0, 1, o 2. Política " "QoS: %(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "Conflicto entre upper_limit y lower_limit en la política QoS. Política QoS: " "%(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "La especificación QoS %(specs_id)s no tiene especificación con clave " "%(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Las especificaciones QoS no se admiten en esta familia de almacenamiento y " "versión ONTAP." msgid "Qos specs still in use." msgstr "Especificaciones de QoS aún en uso." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "La consulta por parámetro de servicio está en desuso. Use el parámetro " "binario en su lugar." msgid "Query resource pool error." msgstr "Error al consultar la agrupación de recursos." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" "El límite de cuota %s debe ser igual o mayor que los recursos existentes." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "No se ha podido encontrar la clase de cuota %(class_name)s." msgid "Quota could not be found" msgstr "No se ha podido encontrar la cuota" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cuota superada para recursos: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Cuota excedida: código=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "No se ha podido encontrar la cuota para el proyecto %(project_id)s." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Límite de cuota no válido para el proyecto '%(proj)s' para el recurso " "'%(res)s': el límite de %(limit)d es menor que el valor en uso, de %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "No se ha podido encontrar la reserva de cuota %(uuid)s." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "" "No se ha podido encontrar el uso de cuota para el proyecto %(project_id)s." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "Op. dif. RBD ha fallado - (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "Se debe especificar la IP de servidor REST." msgid "REST server password must by specified." msgstr "Se debe especificar la contraseña del servidor REST." msgid "REST server username must by specified." msgstr "Se debe especificar el nombre de usuario del servidor REST." msgid "RPC Version" msgstr "Versión de RPC" msgid "RPC server response is incomplete" msgstr "La respuesta del servidor RPC es incompleta" msgid "Raid did not have MCS Channel." msgstr "Raid no tiene el canal MCS." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Se ha alcanzado el límite establecido por la opción de configuración " "max_luns_per_storage_group. La operación de añadir %(vol)s al grupo de " "almacenamiento %(sg)s se ha rechazado." #, python-format msgid "Received error string: %s" msgstr "Serie de error recibida: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "La referencia debe ser para una instantánea no gestionada." msgid "Reference must be for an unmanaged virtual volume." msgstr "La referencia debe ser para un volumen virtual no gestionado." msgid "Reference must be the name of an unmanaged snapshot." msgstr "La referencia debe ser el nombre de una instantánea no gestionada." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" "La referencia debe ser el nombre de volumen de un volumen virtual no " "gestionado." msgid "Reference must contain either source-id or source-name element." msgstr "La referencia debe contener el elemento id-source o source-name." msgid "Reference must contain either source-name or source-id element." msgstr "La referencia debe contener el elemento source-name o source-id." msgid "Reference must contain source-id or source-name element." msgstr "La referencia debe contener el elemento id-source o source-name." msgid "Reference must contain source-id or source-name key." msgstr "La referencia debe contener la clave source-id o source-name." msgid "Reference must contain source-id or source-name." msgstr "La referencia debe contener source-id o source-name." msgid "Reference must contain source-id." msgstr "La referencia debe contener el source-id." msgid "Reference must contain source-name element." msgstr "La referencia debe contener el elemento source-name." msgid "Reference must contain source-name or source-id." msgstr "La referencia debe contener source-name o source-id." msgid "Reference must contain source-name." msgstr "La referencia debe contener el elemento source-name." msgid "Reference to volume to be managed must contain source-name." msgstr "" "La referencia al volumen a gestionar debe contener el elemento source-name." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "" "La referencia al volumen: %s a gestionar debe contener el elemento source-" "name." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Rechazando la migración del ID de volumen: %(id)s. Compruebe la " "configuración, ya que el origen y el destino son el mismo grupo de " "volúmenes: %(name)s." msgid "Remote pool cannot be found." msgstr "No se puede encontrar la agrupación remota." msgid "Remove CHAP error." msgstr "Error al eliminar CHAP." msgid "Remove fc from host error." msgstr "Error al eliminar fc del host." msgid "Remove host from array error." msgstr "Error al eliminar el host de la matriz." msgid "Remove host from hostgroup error." msgstr "Error al eliminar el host del grupo de host." msgid "Remove iscsi from host error." msgstr "Error al eliminar iscsi del host." msgid "Remove lun from QoS error." msgstr "Error al eliminar LUN de QoS ." msgid "Remove lun from cache error." msgstr "Error al eliminar lun de la caché." msgid "Remove lun from partition error." msgstr "Error al eliminar lun de la partición." msgid "Remove port from port group error." msgstr "Error al eliminar el puerto del grupo de puertos." msgid "Remove volume export failed." msgstr "Error al eliminar la exportación del volumen." msgid "Rename lun on array error." msgstr "Error al renombrar lun en la matriz." msgid "Rename snapshot on array error." msgstr "Error al renombrar la instantánea en la matriz." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "Ha fallado la replicación %(name)s a %(ssn)s." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "" "La capacidad del servicio de réplica no se ha encontrado en " "%(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "El servicio de réplica no se ha encontrado en %(storageSystemName)s." msgid "Replication is not enabled" msgstr "La réplica no está habilitada" msgid "Replication is not enabled for volume" msgstr "La réplica no está habilitada para el volumen" msgid "Replication not allowed yet." msgstr "Aún no se permite la replicación." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "El estado de réplica del volumen debe ser active o active-stopped, pero el " "estado actual es: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "El estado de réplica del volumen debe ser inactive, active-stopped o error, " "pero el estado actual es: %s" msgid "Request body and URI mismatch" msgstr "Discrepancia de URI y cuerpo de solicitud" msgid "Request body contains too many items" msgstr "El cuerpo de solicitud contiene demasiados elementos" msgid "Request body contains too many items." msgstr "El cuerpo de solicitud contiene demasiados elementos." msgid "Request body empty" msgstr "Cuerpo de la solicitud vacío" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "La solicitud al clúster de Datera ha devuelto un estado incorrecto: " "%(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "La copia de seguridad que se ha solicitado supera la cuota de gigabytes " "permitida para copias de seguridad. Se ha solicitado %(requested)sG, la " "cuota es %(quota)sG y se ha consumido %(consumed)sG." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "El volumen o la instantánea que se ha solicitado supera la cuota %(name)s " "permitida. Se ha solicitado %(requested)sG, la cuota es %(quota)sG y se ha " "consumido %(consumed)sG." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "El tamaño del volumen solicitado %(size)d es mayor que el límite máximo " "permitido %(limit)d." msgid "Required configuration not found" msgstr "Configuración necesaria no encontrada" #, python-format msgid "Required flag %s is not set" msgstr "El distintivo necesario %s no se ha establecido" msgid "Requires an NaServer instance." msgstr "Requiere una instancia de NaServer." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "El restablecimiento del estado de la copia de seguridad ha terminado " "anormalmente, el servicio de copia de seguridad configurado actualmente " "[%(configured_service)s] no es el servicio de copia de seguridad que se usó " "para crear esta copia de seguridad [%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Error al cambiar el tamaño de clononación %s." msgid "Resizing image file failed." msgstr "Cambiar tamaño de archivo de imagen ha fallado." msgid "Resource could not be found." msgstr "No se ha podido encontrar el recurso." msgid "Resource not ready." msgstr "Recurso no preparado." #, python-format msgid "Response error - %s." msgstr "Error de respuesta - %s." msgid "Response error - The storage-system is offline." msgstr "Error de respuesta - El sistema de almacenamiento está fuera de línea." #, python-format msgid "Response error code - %s." msgstr "Código de error de respuesta - %s." msgid "RestURL is not configured." msgstr "RestURL no está configurado." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "La restauración de la copia de seguridad ha terminado anormalmente, se " "esperaba el estado de volumen %(expected_status)s pero se ha obtenido " "%(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "La restauración de la copia de seguridad ha terminado anormalmente, el " "servicio de copia de seguridad configurado actualmente " "[%(configured_service)s] no es el servicio de copia de seguridad que se usó " "para crear esta copia de seguridad [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "La restauración de la copia de seguridad ha terminado anormalmente: se " "esperaba el estado de copia de seguridad %(expected_status)s pero se ha " "obtenido %(actual_status)s." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Se ha recuperado una cantidad distinta de volúmenes de SolidFire para las " "instancias Cinder proporcionadas. Recuperados: %(ret)s Deseados: %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Se ha recuperado una cantidad distinta de volúmenes de SolidFire para los " "volúmenes Cinder proporcionados. Recuperados: %(ret)s Deseados: %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Se ha superado el recuento de reintentos para el mandato: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Se ha detectado una excepción reintentable de SolidFire" msgid "Retype cannot change encryption requirements." msgstr "La reescritura no puede cambiar los requisitos de cifrado" #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "La reescritura no puede cambiar las especificaciones de calidad de servicio " "frontal para volúmenes en uso: %s." msgid "Retype requires migration but is not allowed." msgstr "La reescritura requiere migración, pero no está permitido." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "La retrotracción del volumen: %(volumeName)s ha fallado. Póngase en contacto " "con el administrador del sistema para devolver manualmente el volumen al " "grupo de almacenamiento predeterminado para la política fast " "%(fastPolicyName)s que ha fallado." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Retrotrayendo %(volumeName)s mediante su supresión." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "No se permite ejecutar Cinder con una versión de VMware vCenter inferior a " "la versión %s." msgid "SAN product is not configured." msgstr "Producto SAN no está configurado." msgid "SAN protocol is not configured." msgstr "Protocolo SAN no está configurado." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "El archivo de configuración de SMBFS 'smbfs_oversub_ratio' no es válido. " "Debe ser > 0: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "El archivo de configuración de SMBFS 'smbfs_used_ratio' no es válido. Debe " "ser > 0 y <= 1.0: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "El archivo de configuración SMBFS en %(config)s no existe." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "" "El archivo de configuración SMBFS no se ha configurado (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "Se ha encontrado un error en el mandato SSH tras '%(total_attempts)r' " "intentos: '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "" "El mandato SSH ha fallado con el error: '%(err)s', mandato: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Se ha detectado inyección de mandato SSH: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "La conexión SSH ha fallado para %(fabric)s con el error: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "El certificado SSL ha caducado el %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Error de SSL: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "No se ha podido encontrar el filtro de host de planificador %(filter_name)s." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "El Scheduler Host Weigher %(weigher_name)s no se ha podido encontrar." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Estado de copia secundaria: %(status)s y sincronizado: %(sync)s, el progreso " "de la sincronización es: %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "El ID secundario no puede ser el mismo que la matriz primaria, backend_id = " "%(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber debe estar en el archivo %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Se ha eliminado el servicio %(service)s en el host %(host)s." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "" "No se ha podido encontrar el servicio %(service_id)s en el host %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "No se ha podido encontrar el servicio %(service_id)s." #, python-format msgid "Service %s not found." msgstr "El servicio %s no se ha encontrado." msgid "Service is too old to fulfil this request." msgstr "El servicio es demasiado antiguo para cumplir esta solicitud." msgid "Service is unavailable at this time." msgstr "El servicio no esta disponible en este momento" msgid "Service not found." msgstr "Servicio no encontrado." msgid "Set pair secondary access error." msgstr "Error al definir el acceso secundario del par." msgid "Sets thin provisioning." msgstr "Establece suministro ligero." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "No se admite el establecimiento del grupo de política de calidad de servicio " "de LUN en esta familia de almacenamiento y versión de ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "No se admite el establecimiento del grupo de política de calidad de servicio " "del archivo en esta familia de almacenamiento y versión de ontap." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "La unidad compartida %s se ignora debido a un formato no válido. Debe tener " "el formato address:/export. Compruebe los valores nas_ip y nas_share_path." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "El servicio de volumen Cinder no puede grabar en la unidad compartida en " "%(dir)s. Las operaciones de instantánea no se admitirán." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Error de E/S Sheepdog, el mandato era: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Las operaciones de mostrar sólo se pueden realizar en proyectos de la misma " "jerarquía del proyecto en el que los usuarios tienen alcance." msgid "Size" msgstr "Tamaño" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" "Tamaño para volumen: %s no se ha encontrado, no puede asegurar la supresión." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "El tamaño es %(image_size)dGB y no se ajusta en un volumen de tamaño " "%(volume_size)dGB." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "El tamaño de la imagen especificada %(image_size)sGB es mayor que el tamaño " "de volumen %(volume_size)sGB." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "Se ha solicitado suprimir la instantánea %(id)s mientras se esperaba a que " "estuviera disponible. Quizás se ha realizado una solicitud simultánea." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "La instantánea %(id)s se ha encontrado con el estado%(state)s en lugar de " "'borrando' durante la supresión en cascada." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "La instantánea %(snapshot_id)s no tiene metadatos con la clave " "%(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "La instantánea %s no puede formar parte de un grupo de consistencia." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "La instantánea '%s' no existe en la matriz." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "No se puede crear la instantánea porque el volumen %(vol_id)s no está " "disponible, el estado actual del volumen es: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "No se puede crear una instantánea mientras se está migrando el volumen" msgid "Snapshot of secondary replica is not allowed." msgstr "La instantánea de la réplica secundaria no está permitida." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "La instantánea del volumen no se soporta en estado: %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "¿Recurso de instantánea \"%s\" no desplegado en ningún sitio?" msgid "Snapshot size must be multiple of 1 GB." msgstr "El tamaño de la instantánea debe ser múltiplo de 1 GB." #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Estado de instantánea %(cur)s no permitido para update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "El estado de la instantánea debe ser \"disponible\" para clonar." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "La instantánea de la que se va a hacer una copia de seguridad debe estar " "disponible, pero el estado actual es \"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "No se ha encontrado la instantánea con el ID %s." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "La instantánea='%(snap)s' no existe en la imagen de base='%(base)s' - " "terminando anormalmente copia de seguridad incremental" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "Este formato de volumen no admite instantáneas: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Error de socket: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Excepción de controlador SolidFire Cinder" msgid "Sort direction array size exceeds sort key array size." msgstr "" "El tamaño de la matriz de dirección de ordenación excede el tamaño de matriz " "de la clave de ordenación." msgid "Source CG is empty. No consistency group will be created." msgstr "" "El grupo de consistencia está vacío. No se creará ningún grupo de " "consistencia." msgid "Source host details not found." msgstr "Detalles de host de origen no encontrados." msgid "Source volume device ID is required." msgstr "El ID de dispositivo de volumen de origen es obligatorio." msgid "Source volume not mid-migration." msgstr "El volumen de origen no mid-migration." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "No se ha encontrado el origen con la IP/nombre de host: %s en el dispositivo " "de destino para la migración de volumen habilitada para programa de fondo, " "se sigue con la migración predeterminada." msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo ha devuelto una byarray que no es válida" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "El host especificado para correlacionarse con el volumen %(vol)s está en un " "grupo de hosts no admitido con %(group)s." msgid "Specified logical volume does not exist." msgstr "El volumen lógico especificado no existe." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "" "No se ha encontrado el grupo de instantáneas especificado con el ID %s." msgid "Specify a password or private_key" msgstr "Especifique una contraseña o private_key" msgid "Specify san_password or san_private_key" msgstr "Especifique san_password o san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Especifique el nombre del tipo de volumen, la descripción, is_public o una " "combinación de los mismos." msgid "Split pair error." msgstr "Error al dividir el par." msgid "Split replication failed." msgstr "Dividir replicación ha fallado." msgid "Start LUNcopy error." msgstr "Error al iniciar LUNcopy." msgid "State" msgstr "Estado" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "El estado del nodo es incorrecto. El estado actual es %s." msgid "Status" msgstr "Estado" msgid "Stop snapshot error." msgstr "Error al detener una instantánea." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" "El servicio de configuración de almacenamiento no se ha encontrado en " "%(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "El servicio de gestión de ID de hardware de almacenamiento no se ha " "encontrado en %(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "No se ha encontrado el perfil de almacenamiento %s." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "El servicio de reubicación de almacenamiento no se ha encontrado en " "%(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "No hay soporte para la familia de almacenamiento %s." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "" "El grupo de almacenamiento %(storageGroupName)s no se ha suprimido " "correctamente" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Host de almacenamiento %(svr)s no detectado, compruebe el nombre." msgid "Storage pool is not configured." msgstr "No se ha configurado la agrupación de almacenamiento." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "No se ha encontrado el perfil de almacenamiento %(storage_profile)s." msgid "Storage resource could not be found." msgstr "No se he encontrado el recurso de almacenamiento." msgid "Storage system id not set." msgstr "ID de sistema de almacenamiento no establecido." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "" "No se ha encontrado el sistema de almacenamiento para la agrupación " "%(poolNameInStr)s." msgid "Storage-assisted migration failed during manage volume." msgstr "" "Ha fallado la migración asistida con almacenamiento al gestionar el volumen." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "El sistema de almacenamiento %(array)s no se ha encontrado." #, python-format msgid "String with params: %s" msgstr "Serie con parámetros: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "La suma de uso de hijos '%(sum)s' es superior a la cuota libre de '%(free)s' " "para el proyecto '%(proj)s' para el recurso '%(res)s'. Rebaje el límite de " "uso de uno o más de los siguientes proyectos: '%(child_ids)s'" msgid "Switch over pair error." msgstr "Error al cambiar al par." msgid "Sync pair error." msgstr "Error al sincronizar el par." msgid "Synchronizing secondary volume to primary failed." msgstr "Error en la sincronización del volumen secundario con primario." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "" "Se ha encontrado el sistema %(id)s con un estado de contraseña incorrecto - " "%(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "El sistema%(id)s se ha encontrado con estado incorrecto - %(status)s." msgid "System does not support compression." msgstr "El sistema no soporta la compresión." msgid "System is busy, retry operation." msgstr "El sistema está ocupado, vuelva a intentar la operación." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "No se ha encontrado TSM [%(tsm)s] en el almacenamiento CloudByte para la " "cuenta [%(account)s]." msgid "Target volume type is still in use." msgstr "El tipo de volumen de destino aún se está utilizando." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Discrepancia de árbol de plantilla; adición de esclavo %(slavetag)s a " "maestro %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "ID de arrendatario: %s no existe." msgid "Terminate connection failed" msgstr "No se ha podido terminar la conexión" msgid "Terminate connection unable to connect to backend." msgstr "" "La terminación de la conexión no se ha podido conectar con el programa de " "fondo." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Error al terminar la conexión del volumen: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "El origen %(type)s %(id)s para replicar no se ha encontrado." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Los parámetros 'sort_key' y 'sort_dir' están en desuso y no se pueden " "utilizar con el parámetro 'sort'." msgid "The EQL array has closed the connection." msgstr "La matriz EQL ha cerrado la conexión." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "El GPFS filesystem %(fs)s no está en el nivel de release requerido. El " "nivel actual es %(cur)s, debe ser al menos %(min)s." msgid "The IP Address was not found." msgstr "No se ha encontrado la dirección IP." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "La solicitud WebDAV ha fallado. Motivo: %(msg)s, Código de retorno/motivo: " "%(code)s, Volumen de origen: %(src)s, Volumen de destino: %(dst)s, Método: " "%(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "El error anterior puede mostrar que la base de datos no se ha creado.\n" "Cree una base de datos utilizando 'cinder-manage db sync' antes de ejecutar " "este mandato." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "La matriz no da soporte al valor de la agrupación de almacenamiento para SLO " "%(slo)s y la carga de trabajo %(workload)s. Busque SLO y cargas de trabajo " "válidos en la matriz." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "" "El programa de fondo donde se ha creado el volumen no tiene la replicación " "habilitada." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "El mandato %(cmd)s ha fallado. (ret: %(ret)s, stdout: %(out)s, stderr: " "%(err)s)" msgid "The copy should be primary or secondary" msgstr "La copia debe ser primaria o secundaria" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "No se ha podido completar la creación de un dispositivo lógico. (LDEV: " "%(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "El método decorated debe aceptar un volumen o un objeto de instantánea." #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "El dispositivo en la ruta %(path)s no está disponible: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "La hora de finalización (%(end)s) debe ser posterior a la hora de inicio " "(%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec: %s no es válido." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "La especificación extraspec: %(extraspec)s no es válida." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "No se ha podido suprimir el volumen que ha dado error: %s." #, python-format msgid "The following elements are required: %s" msgstr "Se necesitan los elementos siguientes: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Las siguientes migraciones tienen una degradación, lo cual no les está " "permitido: \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "No se ha podido añadir el grupo de host o el destino iSCSI." msgid "The host group or iSCSI target was not found." msgstr "No se ha encontrado el grupo de host o el destino iSCSI." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "El host no está a punto para restablecerlo. Vuelva a sincronizar los " "volúmenes y reanude la replicación en los programas de fondo 3PAR." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "El host no está a punto para restablecerlo. Vuelva a sincronizar los " "volúmenes y reanude la replicación en los programas de fondo LeftHand." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "El host no está a punto para restablecerlo. Vuelva a sincronizar los " "volúmenes y reanude la replicación en los programas de fondo de Storwize." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "El usuario CHAP de iSCSI %(user)s no existe." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "El LUN importado %(lun_id)s está en la agrupación %(lun_pool)s que no está " "gestionada por el host %(host)s." msgid "The key cannot be None." msgstr "La clave no puede ser Ninguno." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "" "El dispositivo lógico del %(type)s %(id)s especificado ya se había eliminado." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "El método %(method)s ha excedido el tiempo de espera. (valor de tiempo de " "espera: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "No se ha implementado el método update_migrated_volume." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "El montaje %(mount_path)s no es un volumen Quobyte USP válido. Error: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "" "El parámetro del back-end de almacenamiento. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "" "La copia de seguridad padre debe estar disponible para una copia de " "seguridad incremental." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" "La instantánea proporcionada '%s' no es una instantánea del volumen " "proporcionado." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "La referencia al volumen del programa de fondo debería tener el formato " "file_system/volume_name (volume_name no puede contener '/')" #, python-format msgid "The remote retention count must be %s or less." msgstr "El recuento de retención remota debe ser de %s o inferior." msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "El modo de replicación no se ha configurado correctamente en las " "especificaciones adicionales (extra_specs) del tipo de volumen. Si " "replication:mode es periodic, se debe especificar también replication:" "sync_period y debe ser entre 300 y 31622400 segundos." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" "El periodo de sincronización de replicación debe ser al menos de %s segundos." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "El tamaño solicitado: %(requestedSize)s no es el mismo que el tamaño " "resultante: %(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "No se ha encontrado el recurso %(resource)s." msgid "The results are invalid." msgstr "Los resultados no son válidos." #, python-format msgid "The retention count must be %s or less." msgstr "El recuento de retención debe ser de %s o inferior." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "La instantánea no se puede crear cuando el volumen está en modalidad de " "mantenimiento." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "" "El volumen de origen %s no está en la agrupación gestionada por el host " "actual." msgid "The source volume for this WebDAV operation not found." msgstr "No se ha encontrado el volumen de origen para esta operación WebDAV." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "El tipo de volumen de origen '%(src)s' es distinto del tipo de volumen de " "destino '%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "El tipo de volumen de origen '%s' no está disponible." #, python-format msgid "The specified %(desc)s is busy." msgstr "El %(desc)s especificado está ocupado." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "El LUN especificado no pertenece a la agrupación indicada: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "No se ha podido gestionar el ldev %(ldev)s especificado. No debe " "correlacionarse el ldev." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "No se ha podido gestionar el ldev %(ldev)s especificado. No debe emparejarse " "el ldev." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "No se ha podido gestionar el ldev %(ldev)s especificado. El tamaño de ldev " "debe ser un múltiplo de gigabyte." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "No se ha podido gestionar el ldev %(ldev)s especificado. El tipo de volumen " "debe ser DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "La operación especificada no se admite. El tamaño del volumen debe ser el " "mismo que el origen %(type)s. (volumen: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "El disco virtual especificado se correlaciona con un host." msgid "The specified volume is mapped to a host." msgstr "El volumen especificado se ha correlacionado con un host." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "La contraseña de la matriz de almacenamiento para %s es incorrecta, " "actualice la contraseña configurada." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "Se puede utilizar el back-end de almacenamiento. (config_group: " "%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "El dispositivo de almacenamiento no admite %(prot)s. Configure el " "dispositivo para que admita %(prot)s o cambie a un controlador que utilice " "otro protocolo." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "El metarrecuento en bandas de %(memberCount)s es demasiado pequeño para el " "volumen: %(volumeName)s, con tamaño %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "El tipo de metadatos %(metadata_type)s del volumen/instantánea %(id)s no es " "válido." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "El volumen %(volume_id)s no se ha podido ampliar. El tipo de volumen debe " "ser Normal." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "El volumen %(volume_id)s no ha podido quedar como no gestionado. El tipo de " "volumen debe ser %(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "" "Se ha gestionado correctamente el volumen %(volume_id)s. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "El volumen %(volume_id)s no se ha gestionado correctamente. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "El volumen %(volume_id)s para correlacionar no se ha encontrado." msgid "The volume cannot accept transfer in maintenance mode." msgstr "" "El volumen no puede aceptar transferencias en la modalidad de mantenimiento." msgid "The volume cannot be attached in maintenance mode." msgstr "El volumen no se puede conectar en la modalidad de mantenimiento." msgid "The volume cannot be detached in maintenance mode." msgstr "El volumen no se puede desconectar en la modalidad de mantenimiento." msgid "The volume cannot be updated during maintenance." msgstr "El volumen no se puede actualizar durante el mantenimiento." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "" "La conexión de volumen no se puede inicializar en modalidad de mantenimiento." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "La unidad de volumen requiere el nombre del iniciador iSCSI en el conector." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "El volumen está ocupado actualmente en el 3PAR y no puede suprimirse en este " "momento. Inténtelo de nuevo más tarde." msgid "The volume label is required as input." msgstr "Se necesita la etiqueta de volumen como entrada." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Los metadatos de volumen no se pueden suprimir cuando el volumen está en " "modalidad de mantenimiento." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Los metadatos de volumen no se pueden actualizar cuando el volumen está en " "modalidad de mantenimiento." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "No hay recursos disponibles para utilizar. (recurso: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "No hay hosts ESX válidos." #, python-format msgid "There are no valid datastores attached to %s." msgstr "No hay almacenes de datos válidos conectados a %s." msgid "There are no valid datastores." msgstr "No hay almacenes de datos válidos." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "No hay designación de %(param)s. El almacenamiento especificado es esencial " "para gestionar el volumen." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "No hay designación del ldev. El ldev especificado es esencial para gestionar " "el volumen." msgid "There is no metadata in DB object." msgstr "No hay metadatos en el objeto de base de datos." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "No hay ninguna unidad compartida con este host %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "No hay ninguna unidad compartida que pueda alojar %(volume_size)sG" #, python-format msgid "There is no such action: %s" msgstr "No existe esta acción: %s" msgid "There is no virtual disk device." msgstr "No hay ningún dispositivo de disco virtual." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "Se producido un error al añadir el volumen al grupo de copias remotas: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Se producido un error al crear el cgsnapshot: %s." #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "Se producido un error al crear el grupo de copias remotas: %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Se producido un error al definir el periodo de sincronización del grupo de " "copias remotas: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Se producido un error al definir el grupo de copias remotas en las matrices " "de 3PAR: ('%s'). El volumen no se reconocerá como un tipo de replicación." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Se producido un error al definir una planificación remota en las matrices de " "LeftHand : ('%s'). El volumen no se reconocerá como un tipo de replicación." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Se producido un error al iniciar la copia remota: %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "No hay ningún archivo de configuración de Gluster configurado (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "No hay ningún archivo de configuración de NFS configurado (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "No se ha configurado un volumen Quobyte (%s). Ejemplo: quobyte:///" "" msgid "Thin provisioning not supported on this version of LVM." msgstr "No se admite el aprovisionamiento ligero en esta versión de LVM." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "El habilitador de ThinProvisioning no está instalado. No se puede crear un " "volumen ligero" msgid "This driver does not support deleting in-use snapshots." msgstr "Este controlador no admite suprimir instantáneas en uso." msgid "This driver does not support snapshotting in-use volumes." msgstr "Este controlador no admite instantáneas de volúmenes en uso." msgid "This request was rate-limited." msgstr "Esta solicitud estaba limitada por tipo." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "No se admite esta plataforma de sistema (%s). Este controlador solo admite " "plataformas Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "" "El servicio de política de niveles no se ha encontrado para " "%(storageSystemName)s." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Se desactivó mientras esperaba la actualización de Nova para la creación de " "la instantánea %s." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Se ha desactivado mientras esperaba la actualización de Nova para suprimir " "la instantánea %(id)s." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Valor de tiempo de espera (en segundos) que se utiliza al conectarse al " "clúster ceph. Si el valor. < 0, no se establece ningún tiempo de espera y se " "utiliza el valor librados predeterminado." #, python-format msgid "Timeout while calling %s " msgstr "Tiempo de espera excedido al llamar a %s " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Se ha agotado el tiempo de espera al solicitar la API de %(service)s." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "Se ha agotado el tiempo de espera al solicitar capacidades al programa de " "fondo %(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "No se ha podido encontrar la transferencia %(transfer_id)s)." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Transferencia %(transfer_id)s: id de volumen %(volume_id)s en estado " "inesperado %(status)s, awaiting-transfer esperado" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Intentando importar metadatos de copia de seguridad de ID %(meta_id)s a la " "copia de seguridad %(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "La tarea de ajustar el volumen se ha detenido antes de finalizar: " "volume_name=%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "El tipo %(type_id)s ya está asociado con otro qos specs: %(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "" "La modificación del acceso de tipo no es aplicable al tipo de volumen " "público." msgid "Type cannot be converted into NaElement." msgstr "El tipo no se puede convertir a NaElement." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" "Los UUID %s están tanto en la lista de volumen de añadir como de eliminar." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "" "No se ha podido acceder al programa de fondo de Storwize para el volumen %s." msgid "Unable to access the backend storage via file handle." msgstr "" "No se ha podido acceder al almacenamiento de programa de fondo a través del " "descriptor de archivo." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" "No se ha podido obtener acceso al almacenamiento de extremo trasero por " "medio de la ruta %(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "No se puede añadir el host Cinder a apphosts para el espacio %(space)s" #, python-format msgid "Unable to complete failover of %s." msgstr "No se ha podido completar la migración tras error de %s." msgid "Unable to connect or find connection to host" msgstr "No se ha podido conectar o encontrar una conexión con el host" msgid "Unable to create Barbican Client without project_id." msgstr "No se puede crear el cliente Barbican sin un project_id." #, python-format msgid "Unable to create consistency group %s" msgstr "No se ha podido crear el grupo de consistencia %s" msgid "Unable to create lock. Coordination backend not started." msgstr "" "No se puede crear el bloqueo. El programa de fondo de coordinación no se ha " "iniciado" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "No se puede crear u obtener el grupo de almacenamiento predeterminado para " "la política FAST: %(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "No se ha podido crear un clon de réplica para el volumen %s." #, python-format msgid "Unable to create the relationship for %s." msgstr "No ha sido posible crear la relación para %s.." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "No se ha podido crear el volumen %(name)s a partir de %(snap)s." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "No se ha podido crear el volumen %(name)s a partir de %(vol)s." #, python-format msgid "Unable to create volume %s" msgstr "No se ha podido crear el volumen %s" msgid "Unable to create volume. Backend down." msgstr "No se ha podido crear el volumen. El programa de fondo está inactivo." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "No se ha podido suprimir la instantánea del grupo de consistencia %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "No se ha podido suprimir la instantánea %(id)s, estado: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "No se puede suprimir la política de instantáneas en el volumen %s." #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "No se ha podido suprimir el volumen de destino para el volumen %(vol)s. " "Excepción: %(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "No se puede desasociar el volumen. El estado del volumen debe ser 'in-use' y " "attach_status debe ser 'attached' para poder desasociarlo." #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "No se ha podido determinar la matriz secundaria (secondary_array) a partir " "del secundario indicado: %(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "No se ha podido determinar el nombre de instantánea en Purity de la " "instantánea %(id)s." msgid "Unable to determine system id." msgstr "No se ha podido determinar ID del sistema." msgid "Unable to determine system name." msgstr "No se ha podido determinar el nombre del sistema." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "No se pueden realizar operaciones de gestión de instantáneas con Purity REST " "API versión %(api_version)s, se necesita %(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "No se ha podido realizar la replicación con la API REST de Purity versión " "%(api_version)s, se necesita una de las versiones siguientes: " "%(required_versions)s." msgid "Unable to enable replication and snapcopy at the same time." msgstr "" "No se ha podido habilitar la replicación y la copia instantánea a la vez." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "" "No se ha podido establecer la asociación con el clúster de Storwize %s." #, python-format msgid "Unable to extend volume %s" msgstr "No se ha podido ampliar el volumen %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "No se ha podido migrar tras error el volumen %(id)s al programa de fondo " "secundario porque la relación de replicación no puede conmutar: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "No se ha podido restablecer al \"valor predeterminado\", esto sólo se puede " "hacer una vez se ha completado una migración tras error." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" "No se ha podido realizar la migración tras error al destino de replicación:" "%(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "No se puede captar información de conexión de programa de fondo." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" "No se puede captar información de conexión desde el programa de fondo: " "%(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "No se ha encontrado la ref Purity con name=%s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "No se puede encontrar el grupo de volumen: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "No se ha podido encontrar el destino de migración tras error, no se han " "configurado destinos secundarios." msgid "Unable to find iSCSI mappings." msgstr "No se pueden encontrar correlaciones iSCSI." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "No se puede encontrar ssh_hosts_key_file: %s" msgid "Unable to find system log file!" msgstr "¡No ha sido posible encontrar el fichero de log del sistema!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "No se ha podido encontrar una instantánea pg viable para utilizar para la " "migración tras error en la matriz secundaria seleccionada: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "No se ha podido encontrar una matriz secundaria viable a partir de los " "destinos configurados: %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "No se puede encontrar el volumen %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "No se puede obtener un dispositivo de bloque para el archivo '%s'" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "No se puede obtener la información de configuración necesaria para crear un " "volumen: %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "No se puede obtener el registro correspondiente a la agrupación." #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "No se puede obtener información acerca del espacio %(space)s, verifique que " "el clúster se esté ejecutando y esté conectado." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "No se puede obtener la lista de direcciones IP en este host, compruebe los " "permisos y las redes." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "No se puede obtener la lista de miembros de dominio, compruebe que el " "clúster se está ejecutando." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "No se puede obtener la lista de espacios para hacer un nuevo nombre. " "Verifique que el clúster se esté ejecutando." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "No se puede obtener estadísticas para backend_name: %s" msgid "Unable to get storage volume from job." msgstr "No ha sido posible obtener el volumen de almacenamiento del trabajo." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "No se pueden obtener los puntos finales de destino para hardwareId " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "No ha sido posible obtener el nombre de la vista de máscara." msgid "Unable to get the name of the portgroup." msgstr "No ha sido posible obtener el nombre del grupo de puertos." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "No se ha podido obtener la relación de replicación para el volumen %s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "No se puede importar el volumen %(deviceId)s en cinder. Es el volumen de " "origen de sesión de réplica %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "No se puede importar el volumen %(deviceId)s a cinder. El volumen externo no " "está en la agrupación gestionada por el host cinder actual." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "No se puede importar el volumen %(deviceId)s a cinder. El volumen está en " "vista de máscara %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "No se puede cargar CA desde %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "No se puede cargar el certificado desde %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "No se puede cargar la clave desde %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" "No se ha podido localizar la cuenta %(account_name)s en el dispositivo " "Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "No se ha podido localizar un SVM que gestione la dirección IP '%s'" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "" "No se han podido encontrar los perfiles de reproducción specificados %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "No se puede gestionar el volumen existente. El volumen %(volume_ref)s ya se " "ha gestionado." #, python-format msgid "Unable to manage volume %s" msgstr "No se puede gestionar el volumen %s" msgid "Unable to map volume" msgstr "No se ha podido correlacionar el volumen" msgid "Unable to map volume." msgstr "No se ha podido correlacionar el volumen." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "No se puede analizar la solicitud XML. Proporcione XML con el formato " "correcto." msgid "Unable to parse attributes." msgstr "No se pueden analizar los atributos." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "No se puede ascender la réplica a primaria para el volumen %s. Ni hay " "ninguna copia secundaria disponible." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "No se puede volver a utilizar un host que no está gestionado por Cinder con " "use_chap_auth=True," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "No se puede volver a utilizar un host con credenciales CHAP desconocidas " "configuradas." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "No se puede renombrar el volumen %(existing)s a %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "No se ha podido recuperar el grupo de instantáneas con el id %s." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "No se ha podido volver a escribir %(specname)s, se esperaba recibir los " "valores actuales y solicitados de %(spectype)s. Valor recibido: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "No se puede volver a escribir: ya existe una copia de volumen %s. La " "reescritura superaría el límite de 2 copias." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "No se ha podido volver a escribir: la acción requiere una copia de volumen " "(volume-copy), que no se permite cuando el nuevo tipo es replicación. " "Volumen = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "No se ha podido configurar la replicación en modo reflejo para %(vol)s. " "Excepción: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "No se ha podido crear una instantánea del grupo de consistencia %s" msgid "Unable to terminate volume connection from backend." msgstr "No se puede terminar conexión de volumen desde programa de fondo." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "No se puede terminar la conexión de volumen: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "No se ha actualizar el grupo de consistencia %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "No se puede actualizar el tipo debido a un estado incorrecto: %(vol_status)s " "en volumen: %(vol_id)s. El estado de volumen debe ser disponible o en uso." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "No se puede verificar el grupo de iniciadores: %(igGroupName)s en la vista " "de máscara %(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Parametros inaceptables" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Estado de correlación inesperado %(status)s para correlación %(id)s. " "Atributos: %(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Respuesta de CLI inesperada: discrepancia de cabecera/fila. cabecera: " "%(header)s, fila: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Estado de correlación no esperado %(status)s para la correlación %(id)s. " "Atributos: %(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "Salida inesperada. Se esperaba[%(expected)s] pero se ha recibido [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "Respuesta inesperada de la API Nimble" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Respuesta inesperada de la API de Tegile IntelliFlash" msgid "Unexpected status code" msgstr "Código de estado inesperado" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Código de estado inesperado del conmutador %(switch_id)s con el protocolo " "%(protocol)s para el URL %(page)s. Error: %(error)s" msgid "Unknown Gluster exception" msgstr "Excepción de Gluster desconocida" msgid "Unknown NFS exception" msgstr "Excepción de NFS desconocida" msgid "Unknown RemoteFS exception" msgstr "Excepción de RemoteFS desconocida" msgid "Unknown SMBFS exception." msgstr "Excepción de SMBFS desconocida" msgid "Unknown Virtuozzo Storage exception" msgstr "Excepción desconocida de Virtuozzo Storage" msgid "Unknown action" msgstr "Acción desconocida" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "No se sabe si el volumen: %s que se tiene que gestionar ya está siendo " "gestionado por Cinder. Abortando Gestionar volumen. Añada la propiedad de " "esquema personalizada 'cinder_managed' al volumen y establezca su valor en " "False. Como alternativa, establezca el valor de la política de configuración " "de Cinder 'zfssa_manage_policy' en 'loose' para eliminar esta restricción." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "No se sabe si el volumen: %s que se tiene que gestionar ya está siendo " "gestionado por Cinder. Abortando Gestionar volumen. Añada la propiedad de " "esquema personalizada 'cinder_managed' al volumen y establezca su valor en " "False. Como alternativa, establezca el valor de la política de configuración " "de Cinder 'zfssa_manage_policy' en 'loose' para eliminar esta restricción." #, python-format msgid "Unknown operation %s." msgstr "Operación desconocida %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Mandato desconocido o no soportado %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Protocolo desconocido: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." msgid "Unknown service" msgstr "Servicio desconocido" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Dirección de ordenación desconocida, debe ser 'desc' o 'asc'." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "" "Las opciones de dejar de administrar y suprimir en cascada son mútuamente " "excluyentes." msgid "Unmanage volume not implemented." msgstr "No se ha implementdo la opción de dejar de administrar un volumen." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "No se permite dejar de gestionar instantáneas desde volúmenes que han dado " "error ('failed-over')." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "No se permite dejar de gestionar instantáneas desde volúmenes que han dado " "error." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Palabra clave de QOS no reconocida: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Formato de respaldo no reconocido: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valor de read_deleted no reconocido '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "Inhabilite las opciones de gcs: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "iscsiadm no satisfactorio. La excepción es %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Versión ONTAP de datos en clúster no soportada." msgid "Unsupported Content-Type" msgstr "Tipo de contenido no soportado" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Versión de ONTAP de datos no soportada. Hay soporte para la versión de ONTAP " "de datos 7.3.1 y posterior." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Versión de metadatos de copia de seguridad no soportada (%s)" msgid "Unsupported backup metadata version requested" msgstr "" "Se ha solicitado una versión de metadatos de copia de seguridad no soportada" msgid "Unsupported backup verify driver" msgstr "Controlador de verificación de copia de seguridad no admitido" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Firmware no soportado en el conmutador %s. Asegúrese de que el conmutador " "ejecuta firmware v6.4 o superior" #, python-format msgid "Unsupported volume format: %s " msgstr "Formato de volumen no admitido: %s " msgid "Update QoS policy error." msgstr "Error al actualizar la política QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Las operaciones de actualizar y suprimir cuota sólo las puede realizar un " "administrador de padre inmediato o un admin de CLOUD." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Las operaciones de actualizar y suprimir cuota sólo se pueden realizar en " "proyectos de la misma jerarquía del proyecto en el que los usuarios tienen " "alcance." msgid "Update list, doesn't include volume_id" msgstr "La lista de actulización no incluye el ID de volumen (volume_id)" msgid "Updated At" msgstr "Actualizado el" msgid "Upload to glance of attached volume is not supported." msgstr "No se soporta la carga en Glance del volumen conectado." msgid "Use ALUA to associate initiator to host error." msgstr "Error al utilizar ALUA para asociar el iniciador con el host." msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Error al utilizar CHAP para asociar el iniciador con el host. Verifique el " "nombre de usuario y contraseña CHAP." msgid "User ID" msgstr "ID de usuario" msgid "User does not have admin privileges" msgstr "El usuario no tiene privilegios de administrador" msgid "User is not authorized to use key manager." msgstr "El usuario no está autorizado a usar el gestor de clave." msgid "User not authorized to perform WebDAV operations." msgstr "El usuario no tiene autorización para realizar operaciones WebDAV." msgid "UserName is not configured." msgstr "Nombre de usuario no está configurado." msgid "UserPassword is not configured." msgstr "Contraseña de usuario no está configurada." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "Retrotracción V2 - El volumen de otro grupo de almacenamiento además del " "grupo de almacenamiento predeterminado." msgid "V2 rollback, volume is not in any storage group." msgstr "" "Retrotracción V2, el volumen no está en ningún grupo de almacenamiento." msgid "V3 rollback" msgstr "Retrotracción V3" msgid "VF is not enabled." msgstr "VF no está habilitado." #, python-format msgid "VV Set %s does not exist." msgstr "Conjunto VV %s no existe." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Consumidor válido de QoS specs son: %s" #, python-format msgid "Valid control location are: %s" msgstr "La ubicación de control válido es: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Error al validar la conexión del volumen (error: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "El valor \"%(value)s\" no es valido para la opción de configuración " "\"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "El valor %(param)s de %(param_string)s no es un booleano." msgid "Value required for 'scality_sofs_config'" msgstr "Valor necesario para 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "" "El disco virtual %(name)s no está implicado en la correlación %(src)s -> " "%(tgt)s." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "La versión %(req_ver)s no está soportada por la API. La versión mínima es " "la %(min_ver)s y la máxima es la %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s no puede recuperar el objeto por su ID." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s no da soporte a la actualización condicional." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "El volumen virtual '%s' no existe en la matriz." #, python-format msgid "Vol copy job for dest %s failed." msgstr "El trabajo de copia de volumen para destino %s ha fallado." #, python-format msgid "Volume %(deviceID)s not found." msgstr "No se ha encontrado el volumen %(deviceID)s." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "El volumen %(name)s no se ha encontrado en la matriz. No se puede determinar " "si hay volúmenes correlacionados." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "El volumen %(name)s se ha creado en VNX, pero con el estado %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "No se ha podido crear el volumen %(vol)s en la agrupación %(pool)s." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "" "El volumen %(vol1)s no coincide con el valor de snapshot.volume_id %(vol2)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "El estado de volumen %(vol_id)s debe ser disponible o en uso, pero el estado " "actual es: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "El estado de volumen %(vol_id)s debe ser disponible para ampliar, pero el " "estado actual es: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "El estado de volumen %(vol_id)s debe ser disponible para actualizar " "distintivo de sólo lectura, pero el estado actual es: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "El estado de volumen %(vol_id)s debe ser disponible, pero el estado actual " "es: %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "El volumen %(volume_id)s no se ha podido encontrar." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "Volumen %(volume_id)s no tiene metadatos de administración con la clave " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "El volumen %(volume_id)s no tiene metadatos con la clave %(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "El volumen %(volume_id)s está correlacionado actualmente con un grupo de " "host no admitido %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" "El volumen %(volume_id)s no está correlacionado actualmente con el host " "%(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "El volumen %(volume_id)s todavía están conectados, en primer lugar " "desconecte el volumen." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Error de réplica de volumen %(volume_id)s: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "El volumen %(volume_name)s está ocupado." #, python-format msgid "Volume %s could not be created from source volume." msgstr "No se ha podido crear el volumen %s desde el volumen de origen." #, python-format msgid "Volume %s could not be created on shares." msgstr "El volumen %s no se puede crear en las unidades compartidas." #, python-format msgid "Volume %s could not be created." msgstr "No se ha podido crear el volumen %s." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "El volumen %s no existe en Nexenta SA" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "El volumen %s no existe en la aplicación Nexenta Store" #, python-format msgid "Volume %s does not exist on the array." msgstr "El volumen %s no existe en la matriz." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "El volumen %s no tiene especificado provider_location, se salta." #, python-format msgid "Volume %s doesn't exist on array." msgstr "El volumen %s no existe en la matriz." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "El volumen %s no existe en el programa de fondo ZFSSA." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "El volumen %s ya se gestiona en OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "El volumen %s ya forma parte de una migración activa." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "El volumen %s no es de tipo replicado. Este volumen tiene que tener un tipo " "de volumen con la especificación adicional replication_enabled establecida " "en ' True' para admitir acciones de replicación." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "El volumen %s está en línea. Defina el volumen como fuera de línea para " "gestionarlo con OpenStack." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "El volumen %s no se puede migrar ni adjuntar, no puede pertenecer a un " "grupo de consistencia ni tener instantáneas." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "El volumen %s no debe formar parte de un grupo de consistencia." #, python-format msgid "Volume %s must not be replicated." msgstr "El volumen %s no debe replicarse." #, python-format msgid "Volume %s must not have snapshots." msgstr "El volumen %s no debe tener instantáneas." #, python-format msgid "Volume %s not found." msgstr "No se ha encontrado el volumen %s." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Volumen %s: Error al intentar ampliar el volumen" #, python-format msgid "Volume (%s) already exists on array" msgstr "El volumen (%s) ya existe en la matriz" #, python-format msgid "Volume (%s) already exists on array." msgstr "El volumen (%s) ya existe en la matriz." #, python-format msgid "Volume Group %s does not exist" msgstr "El grupo de volúmenes %s no existe" #, python-format msgid "Volume Type %(id)s already exists." msgstr "El tipo de volumen %(id)s ya existe. " #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "El tipo de volumen %(type_id)s no tiene una especificación adicional con la " "clave %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "La eliminación del tipo de volumen %(volume_type_id)s no está permitida con " "los volúmenes presente con el tipo." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "El tipo de volumen %(volume_type_id)s no tiene especificaciones adicionales " "con la clave %(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "La id del tipo de volumen no debe ser Ninguno." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "No se ha encontrado el volumen [%(cb_vol)s] en el almacenamiento de " "CloudByte correspondiente al volumen de OpenStack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "No se ha encontrado el volumen [%s] en el almacenamiento de CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "" "El archivo adjunto de volumen no se ha podido encontrar con el filtro: " "%(filter)s." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "La configuración de fondo del volumen no es válida: %(reason)s" msgid "Volume by this name already exists" msgstr "Ya existe un volumen con este nombre" msgid "Volume cannot be restored since it contains snapshots." msgstr "El volumen no se puede restaurar porque contiene instantáneas." msgid "Volume create failed while extracting volume ref." msgstr "" "Ha fallado la creación del volumen al extraer la referencia del volumen." #, python-format msgid "Volume device file path %s does not exist." msgstr "La vía de acceso de archivo de dispositivo de volumen %s no existe." #, python-format msgid "Volume device not found at %(device)s." msgstr "Dispositivo de volumen no encontrado en: %(device)s" #, python-format msgid "Volume driver %s not initialized." msgstr "Controlador de volumen %s no inicializado." msgid "Volume driver not ready." msgstr "Driver de volumen no está preparado." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Driver de volumen ha reportado un error: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "El volumen tiene una instantánea temporal que no se puede suprimir en este " "momento." msgid "Volume has children and cannot be deleted!" msgstr "El volumen tiene hijos y no se puede suprimir." #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "El volumen del grupo de consistencia %s está conectado. Es necesario " "desconectarlo primero." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "El volumen del grupo de consistencia tiene todavía instantáneas dependientes." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "El volumen está conectado a un servidor. (%s)" msgid "Volume is in-use." msgstr "El volumen está en uso." msgid "Volume is not available." msgstr "El volumen no está disponible." msgid "Volume is not local to this node" msgstr "El volumen no es local para este nodo" msgid "Volume is not local to this node." msgstr "El volumen no es local para este nodo." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Se ha solicitado copia de seguridad de metadatos de volumen pero este " "controlador no soporta aún esta característica." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Ha fallado la migración en volumen: %(reason)s" msgid "Volume must be available" msgstr "El volumen deber estar disponible" msgid "Volume must be in the same availability zone as the snapshot" msgstr "El volumen debe estar en la misma disponibilidad que la instantánea" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "El volumen debe estar en la misma zona de disponibilidad que el volumen de " "origen" msgid "Volume must have a volume type" msgstr "El volumen debe tener un tipo de volumen" msgid "Volume must not be part of a consistency group." msgstr "El volumen no puede formar parte de un grupo de consistencia." msgid "Volume must not be replicated." msgstr "El volumen no debe replicarse." msgid "Volume must not have snapshots." msgstr "El volumen no debe tener instantáneas." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "No se ha encontrado el volumen para la instancia %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "" "No se ha encontrado el volumen en el programa de fondo de almacenamiento " "configurado." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "No se ha encontrado el volumen en el programa de fondo de almacenamiento " "configurado. Si el nombre del volumen contiene \"/\", renómbrelo y vuelva a " "intentarlo." msgid "Volume not found on configured storage pools." msgstr "" "No se ha encontrado el volumen en las agrupaciones de almacenamiento " "configuradas." msgid "Volume not found." msgstr "No se ha encontrado el volumen." msgid "Volume not unique." msgstr "El volumen no es exclusivo." msgid "Volume not yet assigned to host." msgstr "Aún no se ha asignado el volumen al host." msgid "Volume reference must contain source-name element." msgstr "La referencia de volumen debe contener el elemento source-name." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "La réplica de volumen de %(volume_id)s no se ha encontrado." #, python-format msgid "Volume service %s failed to start." msgstr "No se ha podido iniciar el servicio de volumen %s." msgid "Volume should have agent-type set as None." msgstr "El volumen debe tener agent-type establecido como None." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "El tamaño de volumen %(volume_size)sGB no puede ser menor que el tamaño de " "minDisk de imagen %(min_disk)sGB." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "El tamaño de volumen '%(size)s' debe ser un entero y mayor que 0" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "El tamaño del volumen '%(size)s'GB no puede ser menor que el tamaño original " "del volumen %(source_size)sGB. Deben ser >= tamaño de volumen original." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "El tamaño de volumen '%(size)s'GB no puede ser menor que el tamaño de la " "instantánea %(snap_size)sGB. Deben ser >= el tamaño de instantánea original." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "El tamaño del volumen ha aumentado desde la última copia de seguridad. Haga " "una copia de seguridad completa." msgid "Volume size must be a multiple of 1 GB." msgstr "El tamaño del volumen debe ser un múltiplo de 1 GB." msgid "Volume size must be multiple of 1 GB." msgstr "El tamaño del volumen debe ser múltiplo de 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "El tamaño de volumen debe ser múltiplo de 1 GB." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "El estado de volumen debe estar disponible, pero el estado actual es: %s" msgid "Volume status is in-use." msgstr "El estado del volumen es en uso." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "El estado de volumen debe ser \"disponible\" o \"en-uso\" para la " "instantánea. (es %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "El estado de volumen debe ser \"disponible\" o \"en-uso\"." #, python-format msgid "Volume status must be %s to reserve." msgstr "El estado de volumen debe ser %s para poder reservarlo." msgid "Volume status must be 'available'." msgstr "El estado de volumen debe ser 'disponible'." msgid "Volume to Initiator Group mapping already exists" msgstr "El volumen para la correlación del grupo de iniciadores ya existe" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "El volumen del que se va a hacer una copia de seguridad debe estar " "disponible o en uso, pero el estado actual es \"%s\"." msgid "Volume to be restored to must be available" msgstr "El volumen que restaurar debe estar disponible" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "No se ha podido encontrar el tipo de volumen %(volume_type_id)s." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "El ID de tipo de volumen '%s' no es válido." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "El acceso de tipo de volumen para la combinación %(volume_type_id)s / " "%(project_id)s ya existe." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "No se ha encontrado el acceso de tipo de volumen para la combinación " "%(volume_type_id)s / %(project_id)s." #, python-format msgid "Volume type does not match for share %s." msgstr "El tipo de volumen no coincide para la unidad compartida %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "El cifrado del tipo de volumen para el tipo %(type_id)s ya existe." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "El cifrado de tipo de volumen para el tipo %(type_id)s no existe." msgid "Volume type name can not be empty." msgstr "EL nombre de tipo de volumen no puede estar vacío." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "No se ha podido encontrar el tipo de volumen con el nombre " "%(volume_type_name)s." #, python-format msgid "Volume with volume id %s does not exist." msgstr "El volumen con el ID de volumen %s no existe." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "El volumen %(volumeName)s no es un volumen concatenado. Sólo puede realizar " "una ampliación del volumen concatenado. Saliendo..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "El volumen %(volumeName)s no se ha añadido al grupo de almacenamiento " "%(sgGroupName)s." #, python-format msgid "Volume: %s could not be found." msgstr "Volumen: %s no se ha encontrado." #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "El volumen %s ya se gestiona en Cinder." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" "Los volúmenes se fragmentarán en objetos de este tamaño (en megabytes)." msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" "Se ha superado el número de volúmenes por cuenta en las cuentas de " "SolidFire, tanto primarias como secundarias." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage config 'vzstorage_used_ratio' no válido. Debe ser > 0 y <= 1.0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "El archivo de config VzStorage en %(config)s no existe." msgid "Wait replica complete timeout." msgstr "Al esperar la réplica se ha agotado el tiempo de espera." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Esperar sincronización ha fallado. Estado de ejecución: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "Esperando a que todos los nodos se unan al clúster. Asegúrese de que los " "daemons sheep se estén ejecutando." msgid "We should not do switch over on primary array." msgstr "No deberíamos hace conmutación en la matriz principal." msgid "Wrong resource call syntax" msgstr "Sintaxis de llamada a recurso incorrecta" msgid "X-IO Volume Driver exception!" msgstr "Excepción del controlador de volumen X-IO" msgid "XML support has been deprecated and will be removed in the N release." msgstr "El soporte XML está en desuso y se eliminará en el release N." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "" "XtremIO no configurado correctamente, no se ha encontrado ningún portal iscsi" msgid "XtremIO not initialized correctly, no clusters found" msgstr "" "XtremIO no se ha inicializado correctamente, no se han encontrado clústeres" msgid "You must implement __call__" msgstr "Debe implementar __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Debe instalar hpe3parclient para poder utilizar controladores 3PAR. Ejecute " "\"pip install python-3parclient\" para instalar hpe3parclient." msgid "You must supply an array in your EMC configuration file." msgstr "Debe proporcionar una matriz en el archivo de configuración EMC" #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Su tamaño original de %(originalVolumeSize)s GB es mayor que: %(newSize)s " "GB. Sólo se admite la ampliación. Saliendo..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Zona" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Política de distribución en zonas: %s, no reconocida" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data: no se han podido obtener los atributos para " "vdisk %s." msgid "_create_host failed to return the host name." msgstr "_create_host no ha devuelto el nombre de host." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: no puede convertir el nombre de host. El nombre de host no " "está en unicode o serie." msgid "_create_host: No connector ports." msgstr "_create_host: no hay puertos de conector." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "" "_create_local_cloned_volume, no se ha encontrado el servicio de replicación." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, nombre de volumen: %(volumename)s, nombre de " "volumen de origen: %(sourcevolumename)s, instancia de volumen de origen: " "%(source_volume)s, instancia de volumen de destino: %(target_volume)s, " "código de retorno: %(rc)lu, error: %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - no se han encontrado mensajes de realizado " "satisfactoriamente en la salida de la CLI.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, id_code es None." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, no se puede encontrar el servicio de replicación" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, no se ha definido el tipo de sesión de copia. Sesión de " "copia: %(cpsession)s, tipo de copia: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, sesión de copia: %(cpsession)s, operación: " "%(operation)s, Código de retorno: %(rc)lu, Error: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, nombre de volumen: %(volumename)s, código de retorno: " "%(rc)lu, error: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, nombre de volumen: %(volumename)s, nombre de volumenel " "servicio de configuración de almacenamiento." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, nombre de clase: %(classname)s, InvokeMethod, no se " "puede establecer conexión con ETERNUS." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "_extend_volume_op: no se permite ampliar un volumen con instantáneas." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, conector: %(connector)s, Asociadores: " "FUJITSU_AuthorizedTarget, no se puede establecer conexión con ETERNUS." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, conector: %(connector)s, EnumerateInstanceNames, no se " "puede establecer conexión con ETERNUS." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,conector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, no se puede establecer conexión con " "ETERNUS." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, no " "se puede establecer conexión con ETERNUS." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, nombre de clase: %(classname)s, " "EnumerateInstanceNames, no se puede establecer conexión con ETERNUS." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names, conector: %(connector)s, no se ha encontrado el " "iniciador." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, nombre de volumen: %(volumename)s, EnumerateInstanceNames, no se " "puede establecer conexión con ETERNUS." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, no se puede " "establecer conexión con ETERNUS." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, nombre de archivo: %(filename)s, nombre de etiqueta: " "%(tagname)s,los datos son None. Edite la configuración del controlador y " "corríjalo." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, nombre de archivo: %(filename)s, ip: %(ip)s, " "puerto: %(port)s, usuario: %(user)s, contraseña: ****, URL: %(url)s, ERROR." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn no " "encontrado." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, Nombres asociador: " "CIM_BindsTo, no se puede establecer conexión con ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "no se puede establecer conexión con ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, no se " "puede establecer conexión con ETERNUS." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: las cabeceras y los valores del atributo no coinciden.\n" " Cabeceras: %(header)s\n" " Valores: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector no ha podido devolver el nombre de host para el " "conector." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, no se ha podido obtener host-affinity de aglist/" "vol_instance, affinitygroup: %(ag)s, ReferenceNames, no se puede establecer " "conexión con ETERNUS." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, no se ha podido obtener la instancia de host-affinity, " "volmap: %(volmap)s, GetInstance, no se puede establecer conexión con ETERNUS." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Asociadores: FUJITSU_SAPAvailableForElement, no se puede " "establecer conexión con ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, no se puede " "establecer conexión con ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, no se puede establecer conexión con ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, no se puede establecer " "conexión con ETERNUS." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances, no se puede establecer conexión con " "ETERNUS." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port, protocolo: %(protocol)s, no se ha encontrado target_port." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "" "_get_unmanaged_replay: No se puede encontrar la instantánea denominada %s" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: No se puede encontrar el ID de volumen %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: Debe especificar source-name." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: no se ha podido obtener la información de " "conexión de FC para la conexión host-volumen. ¿Está el host configurado " "correctamente para conexiones de FC?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: no se ha encontrado ningún nodo en el grupo de E/" "S %(gid)s para el volumen %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, nombre de volumen: %(volumename)s, " "volume_uid: %(uid)s, iniciador: %(initiator)s, destino: %(tgt)s, aglist: " "%(aglist)s, no se ha encontrado el servicio de configuración de " "almacenamiento." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, nombre de volumen: " "%(volumename)s, volume_uid: %(uid)s, aglist: %(aglist)s, no se ha encontrado " "el servicio de configuración de controlador." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, nombre de volumen: %(volumename)s, volume_uid: %(volume_uid)s, " "Grupo de afinidad: %(ag)s, Código de retorno: %(rc)lu, Error: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, nombres de asociadores: " "CIM_ProtocolControllerForUnit, no se puede establecer conexión con ETERNUS." msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats: no se han podido obtener los datos de la agrupación de " "almacenamiento." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s, el estado de la sesión de " "copia es INTERRUMPIDO." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "Error de add_vdisk_copy: ya existe una copia de volumen %s. La adición de " "otra copia superaría el límite de 2 copias." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "add_vdisk_copy se ha iniciado sin una copia de disco virtual en la " "agrupación esperada." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants debe ser un valor booleano, se ha obtenido '%s'." msgid "already created" msgstr "ya creado" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "conecte instantánea del nodo remoto" #, python-format msgid "attribute %s not lazy-loadable" msgstr "el atributo %s no es de carga diferida" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "copia de seguridad: %(vol_id)s no ha podido crear enlace fijo de dispositivo " "de %(vpath)s a %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "copia de seguridad: %(vol_id)s no ha podido obtener notificación de éxito de " "copia de seguridad de servidor.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "copia de seguridad: %(vol_id)s no ha podido ejecutar dsmc debido a " "argumentos no válidos en %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "copia de seguridad: %(vol_id)s no ha podido ejecutar dsmc en %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "copia de seguridad: %(vol_id)s ha fallado. %(path)s no es un archivo." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "copia de seguridad: %(vol_id)s ha fallado. %(path)s es de un tipo de archivo " "inesperado. Se soportan archivos de bloque o normales, la modalidad de " "archivo real es %(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "copia de seguridad: %(vol_id)s ha fallado. No se puede obtener vía de acceso " "real al volumen en %(path)s." msgid "being attached by different mode" msgstr "conectado por medio de un modo diferente" #, python-format msgid "call failed: %r" msgstr "Ha fallado la llamada: %r" msgid "call failed: GARBAGE_ARGS" msgstr "Ha fallado la llamada: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "Ha fallado la llamada: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "Ha fallado la llamada: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "Ha fallado la llamada: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "No se encuentra lun-map, ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "no se puede encontrar el volumen para ampliar" msgid "can't handle both name and index in req" msgstr "no se pueden gestionar el nombre y el índice en la solicitud" msgid "cannot understand JSON" msgstr "no se puede entender JSON" msgid "cannot understand XML" msgstr "no se puede entender XML" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "cgsnapshot asignada" msgid "cgsnapshot changed" msgstr "cgsnapshot modificada" msgid "cgsnapshots assigned" msgstr "cgsnapshots asignado" msgid "cgsnapshots changed" msgstr "cgsnapshots modificadas" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: se necesita contraseña o clave privada SSH para la " "autenticación: establezca la opción san_password o san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: no se ha podido determinar el ID del sistema." msgid "check_for_setup_error: Unable to determine system name." msgstr "" "check_for_setup_error: no se ha podido determinar el nombre del sistema." msgid "check_hypermetro_exist error." msgstr "Error de check_hypermetro_exist." #, python-format msgid "clone depth exceeds limit of %s" msgstr "la profundidad de clon excede el límite de %s" msgid "consistencygroup assigned" msgstr "consistencygroup asignado" msgid "consistencygroup changed" msgstr "consistencygroup modificado" msgid "control_location must be defined" msgstr "control_location se debe definir" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, el volumen de origen no existe en ETERNUS." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, nombre de instancia del volumen de destino: " "%(volume_instancename)s, error al obtener la instancia." msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: el tamaño de origen y de destino son distintos." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: el tamaño del volumen de origen %(src_vol)s es " "%(src_size)dGB y no cabe en el volumen de destino %(tgt_vol)s of size " "%(tgt_size)dGB." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src se debe crear desde una instantánea de CG " "o desde un origen de CG." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src solo admite un origen de cgsnapshot o bien " "un origen de grupo de consistencia. No se pueden utilizar diversos orígenes." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src admite un origen de cgsnapshot o bien un " "origen de grupo de consistencia. No se pueden utilizar diversos orígenes." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" "create_copy: El disco virtual de origen %(src)s (%(src_id)s) no existe." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: el disco virtual de origen %(src)s no existe." msgid "create_host: Host name is not unicode or string." msgstr "create_host: El nombre de host no está en Unicode ni es una serie." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: No se han proporcionado iniciadores o wwpns." msgid "create_hypermetro_pair error." msgstr "Error de create_hypermetro_pair." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "create_snapshot, eternus_pool: %(eternus_pool)s, no se ha encontrado la " "aprupación." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, nombre de instantánea: %(snapshotname)s, nombre de volumen " "de origen: %(volumename)s, vol_instance.path: %(vol_instance)s, nombre de " "volumen de destino: %(d_volumename)s, agrupación: %(pool)s, código de " "retorno: %(rc)lu, error: %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, nombre de volumen: %(s_volumename)s, no se ha encontrado el " "volumen de origen en ETERNUS." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, nombre de volumen: %(volumename)s, no se ha encontrado el " "servicio de replicación." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: el estado de volumen debe ser \"available\" (disponible) o " "\"in-use\" (en uso) para la instantánea. El estado no válido es %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: la obtención de volumen de origen ha fallado." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, volumen: %(volume)s, EnumerateInstances, no puede conectar " "con ETERNUS." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, volumen: %(volume)s, nombre de volumen: %(volumename)s, " "eternus_pool: %(eternus_pool)s, no se ha encontrado el servicio de " "configuración de almacenamiento." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, nombre de volumen: %(volumename)s, nombre de agrupación: " "%(eternus_pool)s, código de retorno: %(rc)lu, error: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "create_volume_from_snapshot, el volumen de origen no existe en ETERNUS." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, nombre de instancia del volumen de destino: " "%(volume_instancename)s, error al obtener la instancia." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot: la instantánea %(name)s no existe." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: el estado de la instantánea debe ser \"available" "\" (disponible) para crear el volumen. El estado no válido es: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" "create_volume_from_snapshot: el tamaño de origen y de destino son distintos." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: el tamaño del volumen es distinto al volumen " "basado en la instantánea." msgid "deduplicated and auto tiering can't be both enabled." msgstr "" "los niveles deduplicados y automáticos no pueden estar ambos habilitados." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "suprimir: %(vol_id)s no ha podido ejecutar dsmc debido a argumentos no " "válidos con stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "suprimir: %(vol_id)s no ha podido ejecutar dsmc sin salida estándar: " "%(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "Error de delete_hypermetro." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s ACL no encontrado. Continuando." msgid "delete_replication error." msgstr "Error de delete_replication." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "suprimiendo la instantánea %(snapshot_name)s que tiene volúmenes dependientes" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "suprimiendo el volumen %(volume_name)s que tiene instantánea" msgid "detach snapshot from remote node" msgstr "desconecte instantánea del nodo remoto" msgid "do_setup: No configured nodes." msgstr "do_setup: No hay nodos configurado." msgid "element is not a child" msgstr "el elemento no es un hijo" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries debe ser mayor o igual que 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "error al grabar archivo en Swift, el MD5 del archivo en Swift %(etag)s no es " "el mismo que el MD5 del archivo enviado a Swift %(md5)s" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "extend_volume, eternus_pool: %(eternus_pool)s, no se ha encontrado la " "aprupación." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, volumen: %(volume)s, nombre de volumen: %(volumename)s, " "eternus_pool: %(eternus_pool)s, no se ha encontrado el servicio de " "configuración de almacenamiento." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, nombre de volumen: %(volumename)s, código de retorno: " "%(rc)lu, error: %(errordesc)s, tipo de agrupación: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "" "extend_volume, nombre del volumen: %(volumename)s, no se ha encontrado el " "volumen." msgid "failed to create new_volume on destination host" msgstr "error al crear new_volume en el host de destino" msgid "fake" msgstr "ficticio" #, python-format msgid "file already exists at %s" msgstr "el archivo ya existe en %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno no admitido por SheepdogIOWrapper" msgid "fileno() not supported by RBD()" msgstr "fileno() no admitido por RBD()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "El sistema de archivos %s no existe en la aplicación Nexenta Store" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled está establecido en False y no permite la " "correlación de varios hosts. CMMVC6071E La correlación de disco virtual a " "host no se ha creado, ya que el disco virtual ya está correlacionado con un " "host." msgid "flush() not supported in this version of librbd" msgstr "nivel() no admitido en esta versión de librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" msgid "force delete" msgstr "forzar supresión" msgid "get_hyper_domain_id error." msgstr "Error de get_hyper_domain_id." msgid "get_hypermetro_by_id error." msgstr "Error de get_hypermetro_by_id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: No se ha podido obtener la IP de destino para el iniciador " "%(ini)s, compruebe el archivo config." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: No se han podido obtener los atributos para el volumen %s" msgid "glance_metadata changed" msgstr "glance_metadata modificado" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode está establecido en copy_on_write, pero %(vol)s y " "%(img)s pertenece a sistemas de archivos diferentes." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode está establecido en copy_on_write, pero %(vol)s y " "%(img)s pertenecen a conjuntos de archivos diferentes." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s y hgst_user %(usr)s deben correlacionarse con usuarios/" "grupos válidos en cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "No se ha encontrado hgst_net %(net)s especificado en cinder.conf en el " "clúster" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy debe establecerse en 0 (no HA) o 1 (HA) en cinder.conf." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode debe ser un octal/ent en cinder.conf" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "El servidor hgst_storage %(svr)s no tiene el formato :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers deben definirse en cinder.conf" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "Es posible que el servicio HTTP se haya inhabilitado de forma abrupta o que " "se haya puesto en estado de mantenimiento en el transcurso de esta operación." msgid "id cannot be None" msgstr "el ID no puede ser None" #, python-format msgid "image %s not found" msgstr "no se ha encontrado la imagen %s" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "" "initialize_connection, volumen: %(volume)s, no se ha encontrado el volumen." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" "initialize_connection: No se han podido obtener los atributos para el " "volumen %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection: Falta un atributo para el volumen %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: No se ha encontrado ningún nodo en el grupo de E/S " "%(gid)s para el volumen %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: el disco virtual %s no está definido." #, python-format msgid "invalid user '%s'" msgstr "usuario no válido '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "portal iscsi portal, %s, no encontrado" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "iscsi_ip_address debe establecerse en el archivo de configuración al " "utilizar el protocolo 'iSCSI'." msgid "iscsiadm execution failed. " msgstr "Ha fallado la ejecución de iscsiadm." #, python-format msgid "key manager error: %(reason)s" msgstr "error de gestor clave: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr:fixed_key no está definido" msgid "limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" msgid "limit param must be positive" msgstr "el parámetro de límite debe ser positivo" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" " manage_existing no puede gestionar un volumen conectado con hosts. " "Desconecte este volumen de los hosts existentes antes de realizar la " "importación." msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing necesita una clave 'name' para identificar un volumen " "existente." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: Error al gestionar la reproducción existente " "%(ss)s en el volumen %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "no se ha encontrado el marcador [%s]" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp no tiene comillas %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy debe ser 'on-demand' o 'never', se ha pasado: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" "mkfs ha fallado en el volumen %(vol)s, el mensaje de error era: %(err)s." msgid "mock" msgstr "simulación" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs no está instalado" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage ha encontrado varios recursos con el nombre %s" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "se han encontrado varios recursos con el ID de instantánea %s" msgid "name cannot be None" msgstr "el nombre no puede ser None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "" "naviseccli_path: No se ha podido encontrar la herramienta NAVISECCLI " "%(path)s." #, python-format msgid "no REPLY but %r" msgstr "ninguna RESPUESTA, sino %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "no se ha encontrado ninguna instantánea con el id %s en drbdmanage" #, python-format msgid "not exactly one snapshot with id %s" msgstr "no exactamente una instantánea con el id %s" #, python-format msgid "not exactly one volume with id %s" msgstr "no exactamente un volumen con el id %s" #, python-format msgid "obj missing quotes %s" msgstr "obj no tiene comillas %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled no está inactivo." msgid "progress must be an integer percentage" msgstr "el progreso debe ser un porcentaje de entero" msgid "promote_replica not implemented." msgstr "promote_replica no se ha implementado." msgid "provider must be defined" msgstr "Se debe definir el proveedor" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "El controlador de volumen necesita qemu-img %(minimum_version)s o posterior. " "Versión qemu-img actual: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img no está instalado y la imagen es de tipo %s. Solo se puede usar " "imágenes RAW si qemu-img no está instalado." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img no está instalado y el formato del disco no está especificado. " "Solo se pueden usar las imágenes RAW si qemu-img no está instalado." msgid "rados and rbd python libraries not found" msgstr "no se han encontrado las bibliotecas rados y rbd python" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted solo puede ser 'no', 'yes' o 'only', no %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "" "Se debe configurar el dispositivo de replicación (replication_device) en el " "programa de fondo: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "" "Falta el dispositivo de replicación (replication_device) con el ID de " "programa de fondo (backend_id) [%s]." #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover ha fallado. No se ha encontrado %s." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "replication_failover ha fallado. No se ha configurado un programa de fondo " "para la migración tras error" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restaurar: %(vol_id)s no ha podido ejecutar dsmc debido a argumentos no " "válidos en %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restaurar: %(vol_id)s no ha podido ejecutar dsmc en %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "restaurar: %(vol_id)s ha fallado.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup terminada anormalmente, la lista de objetos real no coincide " "con la lista de objetos almacenada en metadatos." msgid "root element selecting a list" msgstr "elemento raíz que selecciona una lista" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "Falta el miembro %s de rtslib_fb: es posible que necesite un python-rtslib-" "fb más reciente." msgid "san_ip is not set." msgstr "san_ip no está establecido." msgid "san_ip must be set" msgstr "se debe establecer san_ip" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "" "san_ip: Configuración de campo obligatorio. san_ip no se ha establecido" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login y/o san_password no se han establecido para el controlador Datera " "en cinder.conf. Establezca esta información e inicie el servicio cinder-" "volume de nuevo." msgid "serve() can only be called once" msgstr "serve() sólo se puede llamar una vez " msgid "service not found" msgstr "no se ha encontrado el servicio" msgid "snapshot does not exist" msgstr "la instantánea no existe" #, python-format msgid "snapshot id:%s not found" msgstr "id:%s de instantánea no encontrado" #, python-format msgid "snapshot-%s" msgstr "instantánea-%s" msgid "snapshots assigned" msgstr "instantáneas asignadas" msgid "snapshots changed" msgstr "instantáneas modificadas" #, python-format msgid "source vol id:%s not found" msgstr "id:%s de volumen de origen no encontrado" #, python-format msgid "source volume id:%s is not replicated" msgstr "El ID de volumen de origen: %s no se replica" msgid "source-name cannot be empty." msgstr "source-name no puede estar vacío." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "El formato de source-name debería ser: 'vmdk_path@vm_inventory_path'." #, python-format msgid "status must be %s and" msgstr "el estado debe ser %s y" msgid "status must be available" msgstr "el estado debe ser available" msgid "stop_hypermetro error." msgstr "Error de stop_hypermetro." msgid "subclasses must implement construct()!" msgstr "las subclases deben implementar construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo fallido, continuando como si nada hubiera pasado" msgid "sync_hypermetro error." msgstr "Error de sync_hypermetro." msgid "sync_replica not implemented." msgstr "sync_replica no se ha implementado." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli no instalado. No se ha podido crear un directorio predeterminado " "(%(default_path)s): %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "" "terminate_connection: No se ha podido obtener el nombre de host del conector." msgid "timeout creating new_volume on destination host" msgstr "tiempo de desactivación al crear new_volume en el host de destino" msgid "too many body keys" msgstr "demasiadas claves de cuerpo" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: no montado" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s: el destino está ocupado" msgid "umount: : some other error" msgstr "umount: : algún otro error" msgid "umount: : target is busy" msgstr "umount: : el destino está ocupado" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: No se puede encontrar la instantánea denominada %s" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: No se puede encontrar el ID de volumen %s" #, python-format msgid "unrecognized argument %s" msgstr "argumento no reconocido %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "algoritmo de compresión no soportado: %s" msgid "valid iqn needed for show_target" msgstr "es necesario un iqn válido para show_target" #, python-format msgid "vdisk %s is not defined." msgstr "El disco virtual %s no está definido." msgid "vmemclient python library not found" msgstr "No se ha encontrado la biblioteca python vmemclient." #, python-format msgid "volume %s not found in drbdmanage" msgstr "No se ha encontrado el volumen %s en drbdmanage" msgid "volume assigned" msgstr "volumen asignado" msgid "volume changed" msgstr "volumen modificado" msgid "volume does not exist" msgstr "el volumen no existe" msgid "volume is already attached" msgstr "El volumen ya está conectado" msgid "volume is not local to this node" msgstr "el volumen no es local para este nodo" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "el tamaño de volumen %(volume_size)d es demasiado pequeño para restaurar una " "copia de seguridad con un tamaño de %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "el tamaño de volumen %d no es válido." msgid "volume_type cannot be None" msgstr "volume_type no puede ser None (Ninguno)" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "Es necesario proporcionar el tipo_volumen al crear un volumen en un grupo " "de ." msgid "volume_type_id cannot be None" msgstr "volume_type_id no puede ser None (Ninguno)" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "Es necesario proporcionar tipos_volumen para crear el grupo de consistencia " "%(name)s." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "" "Es necesario proporcionar tipos_volumen para crear el grupo de consistencia " "%s." msgid "volumes assigned" msgstr "volúmenes asignados" msgid "volumes changed" msgstr "volúmenes modificados" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s ha agotado el tiempo de espera." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "La propiedad zfssa_manage_policy se debe establecer a 'strict' o 'loose'. El " "valor actual es: %s." msgid "{} is not a valid option." msgstr "{} no es una opción válida." cinder-8.0.0/cinder/locale/ja/0000775000567000056710000000000012701406543017223 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ja/LC_MESSAGES/0000775000567000056710000000000012701406543021010 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ja/LC_MESSAGES/cinder.po0000664000567000056710000142342612701406257022632 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Ray Akimoto , 2015 # Ryo Fujita , 2013 # Tomoyuki KATO , 2013 # Akihiro Motoki , 2015. #zanata # KATO Tomoyuki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Kyohei Moriyama , 2016. #zanata # Tsutomu Kimura , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev29\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-29 01:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-29 06:07+0000\n" "Last-Translator: Tsutomu Kimura \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder バージョン: %(version)s\n" #, python-format msgid " but size is now %d" msgstr "しかし、現在のサイズは %d です" #, python-format msgid " but size is now %d." msgstr "しかし、現在のサイズは %d です。" msgid " or " msgstr "または" #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s が設定されていません。" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing がホストに接続したボリュームを管理できません。イン" "ポート前に既存のホストからこのボリュームの接続を解除してください。" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "結果: %(res)s。" #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: アクセス権が拒否されました。" #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: 予期しない CLI 出力により失敗しました。\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "状況コード: %(_status)s\n" "本体: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s、subjectAltName: %(sanList)s。" #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "NetworkPortal の作成に関する %(msg_type)s: 他のサービスが IP %(ip)s 上のポー" "ト %(port)d を使用していないことを確認してください。" #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s の最小文字数要件は %(min_length)s です。" #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s が %(max_length)s 文字を超えています。" #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: バックアップ %(bck_id)s、ボリューム %(vol_id)s が失敗しました。バック" "アップ・オブジェクトが予期しないモードです。イメージまたはファイルのバック" "アップがサポートされています。実際のモードは %(vol_mode)s です。" #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "%(service)s サービスはストレージ・アプライアンス %(host)s で %(status)s に" "なっていません" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s は %(max_value)d 以下である必要があります" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s は %(min_value)d 以上である必要があります" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "%(workers)d の %(worker_name)s 値が無効です。0 より大きい値にしなければなりま" "せん。" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "結果内に %s \"data\" がありません。" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s にアクセスできません。GPFS がアクティブであること、およびファイル・システ" "ムがマウントされていることを確認してください。" #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s はブロックを含んでいないため、複製操作を使用してサイズ変更できません。" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s は圧縮ボリューム上でホストされているため、複製操作を使用してサイズ変更する" "ことはできません" #, python-format msgid "%s configuration option is not set." msgstr "%s の設定オプションが設定されていません。" #, python-format msgid "%s does not exist." msgstr "%s は存在しません。" #, python-format msgid "%s is not a directory." msgstr "%s はディレクトリーではありません。" #, python-format msgid "%s is not a string or unicode" msgstr "%s が文字列でもユニコードでもありません" #, python-format msgid "%s is not installed" msgstr "%s がインストールされていません" #, python-format msgid "%s is not installed." msgstr "%s がインストールされていません。" #, python-format msgid "%s is not set" msgstr "%s が設定されていません" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s が設定されていません。これはレプリケーションデバイスを有効にするために必要" "です。" #, python-format msgid "%s is not set." msgstr "%s が設定されていません。" #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s は有効な raw または qcow2 イメージでなければなりません。" #, python-format msgid "%s must be an absolute path." msgstr "%s は絶対パスである必要があります。" #, python-format msgid "%s must be an integer." msgstr "%s は整数である必要があります。" #, python-format msgid "%s not set in cinder.conf" msgstr "%s が cinder.conf に設定されていません" #, python-format msgid "%s not set." msgstr "%s が設定されていません。" #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "設定ファイルの flashsystem_connection_protocol で '%(prot)s' は無効です。有効" "な値は %(enabled)s です。" msgid "'active' must be present when writing snap_info." msgstr "snap_info の書き込み時には 'active' が存在しなければなりません。" msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' を指定する必要があります" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' の解析に失敗しました。" msgid "'status' must be specified." msgstr "'status' を指定する必要があります。" msgid "'volume_id' must be specified" msgstr "'volume_id' を指定する必要があります" msgid "'{}' object has no attribute '{}'" msgstr "'{}' オブジェクトに属性 '{}' がありません" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "LUN (HLUN) が見つかりませんでした。(LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "矛盾する可能性のある同時実行リクエストが行われました。 " #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "フリー LUN (HLUN) が見つかりませんでした。異なるホスト・グループを追加してく" "ださい。(LDEV: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "ホスト・グループを追加できませんでした。(ポート: %(port)s、名前: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "ホスト・グループを削除できませんでした。(ポート: %(port)s、gid: %(gid)s、名" "前: %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "ホスト・グループが無効です。(ホスト・グループ: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "ペアを削除できません。(P-VOL: %(pvol)s、S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "ペアを作成できませんでした。ペアの最大数を超過しています。(コピー・メソッド: " "%(copy_method)s、P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "パラメーターが無効です。(%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "パラメーター値が無効です。(%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "プールが見つかりませんでした。(プール ID: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "スナップショット状況が無効です。(状況: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "フェイルオーバーを行うために、有効なセカンダリーターゲットを指定する必要があ" "ります。" msgid "A volume ID or share was not specified." msgstr "ボリューム ID またはシェアが指定されませんでした。" #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "ボリューム状況が無効です。(状況: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s がエラー文字列 %(err)s で失敗しました" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API バージョンの文字列 %(version)s が無効な形式です。MajorNum.MinorNum の形式" "である必要があります。" msgid "API key is missing for CloudByte driver." msgstr "CloudByte ドライバーの API キーがありません。" #, python-format msgid "API response: %(response)s" msgstr "API レスポンス: %(response)s" #, python-format msgid "API response: %s" msgstr "API 応答: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "このメソッドでは API バージョン %(version)s はサポートされていません。" msgid "API version could not be determined." msgstr "API バージョンを判別できませんでした。" msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "ゼロでないクォータを持つ子プロジェクトを削除しようとしています。これは実施す" "べきではありません。" msgid "Access list not available for public volume types." msgstr "パブリックボリュームタイプではアクセスリストを使用できません。" msgid "Activate or deactivate QoS error." msgstr "QoS のアクティブ化またはアクティブ化解除のエラー。" msgid "Activate snapshot error." msgstr "スナップショットのアクティブ化のエラー。" msgid "Add FC port to host error." msgstr "ホストへの FC ポート追加のエラー。" msgid "Add fc initiator to array error." msgstr "アレイへの FC イニシエーター追加のエラー。" msgid "Add initiator to array error." msgstr "アレイへのイニシエーター追加のエラー。" msgid "Add lun to cache error." msgstr "キャッシュへの LUN 追加のエラー。" msgid "Add lun to partition error." msgstr "パーティションへの LUN 追加のエラー。" msgid "Add mapping view error." msgstr "マッピングビュー追加のエラー。" msgid "Add new host error." msgstr "新規ホスト追加のエラー。" msgid "Add port to port group error." msgstr "ポートグループへのポート追加のエラー。" #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "管理対象となる指定されたすべてのストレージプールが存在しません。設定を確認し" "てください。存在しないプール: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "API バージョンのリクエストは VersionedMethod オブジェクトと比較する必要があり" "ます。" #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "SheepdogDriverでエラーが発生しました (Reason: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "バックアップ操作中にエラーが発生しました" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "スナップショット '%s' を変更しようとしたときにエラーが発生しました。" #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "ボリューム \"%s\" の検出中にエラーが発生しました。" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "LUNcopy 操作中にエラーが発生しました。LUNcopy 名: %(luncopyname)s。LUNcopy 状" "況: %(luncopystatus)s。LUNcopy 状態: %(luncopystate)s。" #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "ボリューム \"%s\" の読み取り中にエラーが発生しました。" #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "ボリューム \"%s\" への書き込み中にエラーが発生しました。" #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "iSCSI CHAP ユーザーを追加できませんでした。(ユーザー名: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "iSCSI CHAP ユーザーを削除できませんでした。(ユーザー名: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "iSCSI ターゲットを追加できませんでした。(ポート: %(port)s、別名: %(alias)s、" "理由: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "iSCSI ターゲットを削除できませんでした。(ポート: %(port)s、tno: %(tno)s、別" "名: %(alias)s)" msgid "An unknown exception occurred." msgstr "不明な例外が発生しました。" msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "サブプロジェクトに割り当てられたトークンを持つユーザーは、親のクォータを参照" "することはできません。" msgid "Append port group description error." msgstr "ポートグループの説明追加のエラー。" #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "スイッチへのゾーンおよび cfgs の適用が失敗しました (エラーコード =" "%(err_code)s エラーメッセージ =%(err_msg)s。" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "アレイが存在しないかオフラインになっています。現在のアレイの状態は %s です。" msgid "Associate host to hostgroup error." msgstr "ホストグループへのホストの関連付けのエラー。" msgid "Associate host to mapping view error." msgstr "マッピングビューへのホストの関連付けのエラー。" msgid "Associate initiator to host error." msgstr "ホストへのイニシエーターの関連付けのエラー。" msgid "Associate lun to QoS error." msgstr "QoS への LUN の関連付けのエラー。" msgid "Associate lun to lungroup error." msgstr "LUN グループへの LUN の関連付けのエラー。" msgid "Associate lungroup to mapping view error." msgstr "マッピングビューへの LUN グループの関連付けのエラー。" msgid "Associate portgroup to mapping view error." msgstr "マッピングビューへのポートグループの関連付けのエラー。" msgid "At least one valid iSCSI IP address must be set." msgstr "有効な iSCSI IP アドレスを 1 つ以上設定する必要があります。" #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "無効な認証キーを使用して %s を転送しようとしています。" #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "CloudByte のストレージで認証グループ [%s] の詳細が見つかりません。" msgid "Auth user details not found in CloudByte storage." msgstr "CloudByte のストレージで認証ユーザーの詳細が見つかりません。" msgid "Authentication error" msgstr "認証エラー" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "認証が失敗しました。スイッチのクレデンシャルを検証してください。エラーコード " "%s。" msgid "Authorization error" msgstr "許可エラー" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "アベイラビリティーゾーン '%(s_az)s' は無効です。" msgid "Available categories:" msgstr "使用可能カテゴリー:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "バックエンド QoS 仕様はこのストレージファミリーおよび ONTAP バージョンでサ" "ポートされません。" #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "バックエンドが存在しません(%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "" "バックエンドで既にフェイルオーバーが完了しました。フェイルバックすることはで" "きません。" #, python-format msgid "Backend reports: %(message)s" msgstr "バックエンド・レポート: %(message)s" msgid "Backend reports: item already exists" msgstr "バックエンド・レポート: 項目は既に存在します" msgid "Backend reports: item not found" msgstr "バックエンド・レポート: 項目が見つかりません" msgid "Backend server not NaServer." msgstr "バックエンド・サーバーが NaServer ではありません。" #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "バックエンド・サービス再試行タイムアウト・ヒット: %(timeout)s 秒" msgid "Backend storage did not configure fiber channel target." msgstr "" "バックエンド・ストレージによってファイバー・チャネル・ターゲットは構成されま" "せんでした。" msgid "Backing up an in-use volume must use the force flag." msgstr "" "使用中のボリュームのバックアップを行う際は、force フラグを使用する必要があり" "ます。" #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "バックアップ %(backup_id)s が見つかりませんでした。" msgid "Backup RBD operation failed" msgstr "バックアップ RBD 操作が失敗しました" msgid "Backup already exists in database." msgstr "データベースのバックアップが既に存在しています。" #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "バックアップドライバーがエラーを報告しました: %(message)s" msgid "Backup id required" msgstr "バックアップ ID が必要です" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "スナップショットが含まれる GlusterFS ボリュームのバックアップはサポートされて" "いません。" msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "バックアップはバッキング・ファイルのない SOFS ボリュームでのみサポートされま" "す。" msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "バックアップは、ロー形式の GlusterFS ボリュームに対してのみサポートされます。" msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" "バックアップは、ロー形式の SOFS ボリュームに対してのみサポートされます。" msgid "Backup operation of an encrypted volume failed." msgstr "暗号化ボリュームのバックアップ操作が失敗しました。" #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "バックアップ・サービス %(configured_service)s では検査がサポートされていませ" "ん。バックアップ ID %(id)s は検査されません。検査をスキップします。" #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "バックアップ・サービス %(service)s では検査がサポートされていません。バック" "アップ ID %(id)s は検査されません。リセットをスキップします。" #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "バックアップに含まれるスナップショットは 1 つのみでなければなりませんが、%s " "個含まれています" msgid "Backup status must be available" msgstr "バックアップ状況は「使用可能」でなければなりません" #, python-format msgid "Backup status must be available and not %s." msgstr "バックアップ状況は %s ではなく「使用可能」でなければなりません。" msgid "Backup status must be available or error" msgstr "バックアップ状況は「使用可能」または「エラー」でなければなりません" msgid "Backup to be restored has invalid size" msgstr "復元するバックアップのサイズが無効です" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "正しくない状況表示行が返されました: %(arg)s。" #, python-format msgid "Bad key(s) in quota set: %s" msgstr "無効なキーが quota set 内にあります: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "ストレージボリュームバックエンド API からの不正な応答または想定しない応答: " "%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "不正なプロジェクト形式: プロジェクトの形式が正しくありません (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Datera クラスターに不正な要求 (無効な引数) が送信されました: %(args)s | " "%(message)s" msgid "Bad response from Datera API" msgstr "Datera API からの正しくない応答" msgid "Bad response from SolidFire API" msgstr "SolidFire API からの正しくない応答" #, python-format msgid "Bad response from XMS, %s" msgstr "XMS からの正しくない応答、%s" msgid "Binary" msgstr "バイナリー" msgid "Blank components" msgstr "空白コンポーネント" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Blockbridge API の認証スキーム (トークンまたはパスワード)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Blockbridge API のパスワード ('password' の認証スキーム)" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Blockbridge API のトークン ('token' の認証スキーム )" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Blockbridge API のユーザー ('password' の認証スキーム)" msgid "Blockbridge api host not configured" msgstr "Blockbridge API のホストが設定されていません" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "Blockbridge に無効な認証スキーム '%(auth_scheme)s' が設定されています" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge のデフォルトプールが存在しません" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Blockbridge のパスワードが設定されていません ('password' の認証スキームに必" "要)" msgid "Blockbridge pools not configured" msgstr "Blockbridge プールが設定されていません" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Blockbridge のトークンが設定されていません ('token' の認証スキームに必要)" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Blockbridge のユーザーが設定されていません ('password' の認証スキームに必要)" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "BrocadeファイバーチャネルゾーニングCLIエラー:%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "BrocadeファイバーチャネルゾーニングHTTPエラー:%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 秘密は 12 バイトから 16 バイトである必要があります。" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 例外出力:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 例外出力:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s。" msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E VDisk は既にホストにマップされているため、VDisk からホストへのマッ" "ピングは作成されませんでした。\n" "\"" msgid "CONCERTO version is not supported" msgstr "CONCERTO バージョンはサポートされません" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) がアレイ上に存在しません" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "" "キャッシュ名がありません。キーで smartcache:cachename を設定してください。" #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "" "キャッシュボリューム %(cache_vol)s にスナップショット %(cache_snap)s がありま" "せん。" #, python-format msgid "Cache volume %s does not have required properties" msgstr "キャッシュボリューム %s に必須のプロパティーがありません" msgid "Call returned a None object" msgstr "呼び出しが None オブジェクトを返しました" msgid "Can not add FC port to host." msgstr "ホストに FC ポートを追加できません。" #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "" "キャッシュ名 %(name)s によってキャッシュ ID を見つけることができません。" #, python-format msgid "Can not find partition id by name %(name)s." msgstr "名前 %(name)s によってパーティション ID を見つけることができません。" #, python-format msgid "Can not get pool info. pool: %s" msgstr "プール情報を取得できません。プール: %s" #, python-format msgid "Can not translate %s to integer." msgstr "%s を整数に変換できません。" #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "'scality_sofs_config' にアクセスできません: %s" msgid "Can't attach snapshot." msgstr "スナップショットを追加できません。" msgid "Can't decode backup record." msgstr "バックアップレコードを復号化できません。" #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "レプリケーションボリュームを拡張できません。ボリューム: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "アレイで LUN を見つけることができません。source-name または source-id を確認" "してください。" #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "" "アレイでキャッシュ名を見つけることができません。キャッシュ名は %(name)s で" "す。" #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "DB から LUN ID を見つけることができません。ボリューム: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "アレイで LUN 情報が見つかりません。ボリューム: %(id)s、LUN 名: %(name)s。" #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "アレイでパーティション名を見つけることができません。パーティション名は " "%(name)s です。" #, python-format msgid "Can't find service: %s" msgstr "サービスが見つかりません: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "アレイでスナップショットを見つけることができません。source-name または " "source-id を確認してください。" msgid "Can't find the same host id from arrays." msgstr "アレイから同一のホスト ID が見つかりません。" #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" "スナップショットからボリューム ID を取得できません。スナップショット: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "ボリューム ID を取得できません。ボリューム名: %s。" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Cinder に LUN %(lun_id)s をインポートできません。LUN タイプが一致しません。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Cinder に LUN %s をインポートできません。すでに HyperMetroPair 内に存在しま" "す。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN コピータスク内に存在しま" "す。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN グループ内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN ミラー内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "Cinder に LUN %s をインポートできません。すでに SplitMirror 内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Cinder に LUN %s をインポートできません。すでにマイグレーションタスク内に存在" "します。" #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Cinder に LUN %s をインポートできません。すでにリモートのレプリケーションタス" "ク内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "Cinder にスナップショット%s をインポートできません。LUN 状態が通常ではありま" "せん。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Cinder にスナップショット%s をインポートできません。スナップショットはボ" "リュームに属していません。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Cinder にスナップショット%s をインポートできません。スナップショットはイニシ" "エーターに公開されています。" #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Cinder にスナップショット %s をインポートできません。スナップショットの状態が" "通常ではないか、実行状態がオンラインではありません。" #, python-format msgid "Can't open config file: %s" msgstr "構成ファイルを開くことができません: %s" msgid "Can't parse backup record." msgstr "バックアップレコードを解析できません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "ボリューム %(volume_id)s にはボリュームタイプがないため、このボリュームを整合" "性グループ %(group_id)s に追加できません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "ボリューム %(volume_id)s が既に整合性グループ %(orig_group)s 内に存在するた" "め、このボリュームを整合性グループ %(group_id)s に追加することはできません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "ボリューム %(volume_id)s は見つからないため、整合性グループ %(group_id)s に追" "加できません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "ボリューム %(volume_id)s は存在しないため、整合性グループ %(group_id)s に追加" "できません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "ボリューム %(volume_id)s は無効な状態 %(status)s であるため、整合性グルー" "プ%(group_id)s に追加できません。有効な状態は %(valid)s です。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "ボリュームタイプ %(volume_type)s は整合性グループ %(group_id)s ではサポートさ" "れていないため、ボリューム %(volume_id)s をこの整合性グループに追加できませ" "ん。" #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "すでに接続されているボリューム%s を接続できません。マルチ接続は " "'netapp_enable_multiattach' 設定オプションにより無効になっています。" msgid "Cannot change VF context in the session." msgstr "VF コンテキストをセッション内で変更できません。" #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "VF コンテキストを変更できません。指定された VF は管理可能な VF リスト " "%(vf_list)s で使用可能ではありません。" msgid "Cannot connect to ECOM server." msgstr "ECOM サーバーに接続できません。" #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "サイズ %(src_vol_size)s のボリュームからサイズ %(vol_size)s の複製を作成でき" "ません" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "スナップショット %(snap)s は有効な状態ではないため、整合性グループ%(group)s " "を作成できません。有効な状態は %(valid)s です。" #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "ソースボリューム %(source_vol)s が有効な状態にないため、整合性グループ " "%(group)s を作成できません。有効な状態は %(valid)s です。" #, python-format msgid "Cannot create directory %s." msgstr "ディレクトリー %s を作成できません。" msgid "Cannot create encryption specs. Volume type in use." msgstr "暗号化仕様を作成できません。ボリューム・タイプは使用中です。" #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "ディスク形式 %s のイメージを作成できません。vmdk ディスク形式のみが受け入れら" "れます。" #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "マスキングビュー %(maskingViewName)s を作成できません。" #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" " 'netapp_enable_multiattach' が true に設定されている場合、%(req)s 以上のボ" "リュームを ESeries アレイに作成できません。" #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "名前が %(sgGroupName)s のストレージグループを作成または検出できません。" #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "サイズ %(snap_size)s のスナップショットからサイズ %(vol_size)s のボリュームを" "作成できません" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "サイズが %s のボリュームを作成できません: 8GB の倍数ではありません。" #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "名前 %(name)s および仕様 %(extra_specs)s を使用して volume_type を作成できま" "せん" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "スナップショットが存在する間は、LUN %s は削除できません。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "キャッシュボリューム: %(cachevol_name)s を削除できません。%(updated_at)s に更" "新されたこのキャッシュボリュームには現在 %(numclones)d のボリュームインスタン" "スがあります。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "キャッシュボリューム: %(cachevol_name)s を削除できません。%(updated_at)s に更" "新されたこのキャッシュボリュームには 現在 %(numclones)s のボリュームインスタ" "ンスがあります。" msgid "Cannot delete encryption specs. Volume type in use." msgstr "暗号化仕様を削除できません。ボリューム・タイプは使用中です。" msgid "Cannot determine storage pool settings." msgstr "ストレージプールの設定を決定できません。" msgid "Cannot execute /sbin/mount.sofs" msgstr "/sbin/mount.sofs を実行できません" #, python-format msgid "Cannot find CG group %s." msgstr "CG グループ %s が見つかりません。" #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "ストレージ・システム %(storage_system)s のコントローラー構成サービスが見つか" "りません。" #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "スナップショット %s のボリュームを作成するための複製サービスが見つかりませ" "ん。" #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "スナップショット %s を削除するレプリケーションサービスが見つかりません。" #, python-format msgid "Cannot find Replication service on system %s." msgstr "複製サービスがシステム %s に見つかりません。" #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "ボリュームが見つかりません: %(id)s。処理の管理を解除します。処理を終了しま" "す。" #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "ボリューム: %(volumename)s が見つかりません。拡張操作。終了中..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "ボリューム %(volumeName)s の装置番号が見つかりません。" msgid "Cannot find migration task." msgstr "マイグレーションタスクを見つけることができません。" #, python-format msgid "Cannot find replication service on system %s." msgstr "システム %s でレプリケーションサービスが見つかりません。" #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "ソース CG のインスタンスが見つかりません。consistencygroup_id: %s。" #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "チャンネル ID %(channel_id)s によって mcs_id を取得できません。" msgid "Cannot get necessary pool or storage system information." msgstr "必要なプールまたはストレージ・システムの情報を取得できません。" #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "ボリューム %(volumeName)s のストレージグループ %(sgGroupName)s を取得または作" "成できません " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "イニシエーターグループ %(igGroupName)s を取得または作成できません。" #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "ポートグループ %(pgGroupName)s を取得できません。" #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "ストレージ・グループ %(sgGroupName)s をマスキングビュー " "%(maskingViewInstanceName)s から取得できません。" #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "%(sps)s でサポートされるサイズ範囲を取得できません。戻りコード: %(rc)lu。エ" "ラー: %(error)s。" #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "FAST ポリシー %(fastPolicyName)s のデフォルトのストレージグループを取得できま" "せん。" msgid "Cannot get the portgroup from the masking view." msgstr "マスキングビューからポートグループを取得できません。" msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" "Scality SOFS をマウントできません。syslog でエラーについて確認してください" msgid "Cannot ping DRBDmanage backend" msgstr "DRBDmanage のバックエンドに ping を送信できません" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "ボリューム %(id)s をホスト %(host)s 上に配置できません" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "ソースから整合性グループ %(name)s を作成するために、'cgsnapshot_id' または " "'source_cgid' の両方を提供することができません。" msgid "Cannot register resource" msgstr "リソースを登録できません" msgid "Cannot register resources" msgstr "リソースを登録できません" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "ボリューム %(volume_id)s は整合性グループ %(group_id)s にないため、このグルー" "プから削除できません。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "ボリューム %(volume_id)s は無効な状態 %(status)s であるため、整合性グルー" "プ%(group_id)s から削除できません。有効な状態は %(valid)s です。" #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "HPE3PARDriver から %s にタイプ変更することはできません。" msgid "Cannot retype from one 3PAR array to another." msgstr "3PAR アレイから別のアレイにタイプ変更することはできません。" msgid "Cannot retype to a CPG in a different domain." msgstr "別のドメインの CPG にタイプ変更できません。" msgid "Cannot retype to a snap CPG in a different domain." msgstr "別のドメインのスナップ CPG にタイプ変更できません。" msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "vgc-cluster コマンドを実行できません。ソフトウェアが実装済みで、権限が適切に" "設定されていることを確認してください。" msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "hitachi_serial_number と hitachi_unit_name の両方を設定することはできません。" msgid "Cannot specify both protection domain name and protection domain id." msgstr "保護ドメイン名と保護ドメイン ID の両方を指定することはできません。" msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "ストレージプール名とストレージプール ID の両方を指定することはできません。" #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "有効な名前、説明、add_volumes、または remove_volumes が指定されなかったため、" "整合性グループ %(group_id)s を更新できません。" msgid "Cannot update encryption specs. Volume type in use." msgstr "暗号化仕様を更新できません。ボリューム・タイプは使用中です。" #, python-format msgid "Cannot update volume_type %(id)s" msgstr "volume_type %(id)s を更新できません" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "オブジェクト %(instanceName)s の存在を確認できません。" msgid "Cascade option is not supported." msgstr "カスケードオプションはサポートされていません。" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s が見つかりませんでした。" msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost が空です。整合性グループは作成されません。" msgid "Cgsnapshot status must be available or error" msgstr "cgsnapshot 状況は「使用可能」または「エラー」でなければなりません" msgid "Change hostlun id error." msgstr "hostlun ID 変更のエラー。" msgid "Change lun priority error." msgstr "LUN 優先順位変更のエラー。" msgid "Change lun smarttier policy error." msgstr "LUN smarttier ポリシー変更のエラー。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "変更によって、次のリソースの使用量が 0 未満になります: %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "このドライバーに割り当てられている ZFS 共有のアクセス権を確認してください。" msgid "Check hostgroup associate error." msgstr "ホストグループ関連付けの確認のエラー。" msgid "Check initiator added to array error." msgstr "アレイに追加されたイニシエーターの確認のエラー。" msgid "Check initiator associated to host error." msgstr "ホストに関連付けられたイニシエーターの確認のエラー。" msgid "Check lungroup associate error." msgstr "LUN グループ関連付けの確認のエラー。" msgid "Check portgroup associate error." msgstr "ポートグループ関連付けの確認のエラー。" msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "HTTP サービスの状態を確認してください。また、HTTPS ポート番号が cinder.conf " "に指定されている番号と同じであることも確認してください。" msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "チャンクサイズが、ハッシュを作成するためのブロックサイズの倍数ではありませ" "ん。" #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "CiscoファイバーチャネルゾーニングCLIエラー:%(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "%(storageSystem)s では複製フィーチャーはライセンス交付されていません。" #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "クローンタイプ '%(clone_type)s' は無効です。有効な値は '%(full_clone)s' およ" "び '%(linked_clone)s' です。" msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "クラスターの形式が正しく設定されていません。ドッグクラスター形式を実行する必" "要があるかもしれません。" #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Coho Data Cinder ドライバーの失敗: %(message)s" msgid "Coho rpc port is not configured" msgstr "Coho の rpc ポートが設定されていません" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "CLI でブロックされたコマンド %(cmd)s を取り消しました" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: %s タイムアウト" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s タイムアウト。" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "圧縮イネーブラーがインストールされていません。圧縮されたボリュームを作成でき" "ません。" #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "コンピュートクラスター: %(cluster)s が見つかりません。" msgid "Condition has no field." msgstr "条件にフィールドがありません。" #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "設定 'max_over_subscription_ratio' は無効です。0 より大きくなければなりませ" "ん: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "設定エラー: dell_sc_ssn not が設定されていません。" #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "構成ファイル %(configurationFile)s が存在しません。" msgid "Configuration is not found." msgstr "設定が見つかりません。" #, python-format msgid "Configuration value %s is not set." msgstr "構成値 %s が設定されていません。" msgid "Configured host type is not supported." msgstr "構成済みホスト・タイプはサポートされていません。" #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "ボリューム種別 %s に競合する QoS 仕様があります。QoS 仕様がボリューム種別に関" "連付けられている場合、レガシーの \"netapp:qos_policy_group\" はボリューム種別" "の追加仕様で許可されません。" #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Glance との接続に失敗しました: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Swift との接続に失敗しました: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "コネクターが %s を提供しません" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "コネクターは必要な情報を持っていません: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "整合性グループ %s には、まだボリュームがあります。これを削除するには「強制」" "フラグが必要です。" #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "整合性グループ %s には、まだ従属 cgsnapshot があります。" msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "整合性グループが空です。cgsnapshot は作成されません。" #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "整合性グループ状況は「使用可能」または「エラー」でなければなりませんが、現在" "の状況は %s です" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "整合性グループ状況は「使用可能」でなければなりませんが、現在の状況は%s です。" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "ConsistencyGroup %(consistencygroup_id)s が見つかりませんでした。" msgid "Container" msgstr "コンテナー" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "コンテナーフォーマット: %s は VMDK ドライバーでサポートされません。 'bare' の" "みがサポートされます。" msgid "Container size smaller than required file size." msgstr "コンテナーサイズが必要なファイルサイズを下回っています。" msgid "Content type not supported." msgstr "コンテンツ・タイプはサポートされていません。" #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "コントローラー構成サービスが %(storageSystemName)s に見つかりません。" #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "コントローラー IP「%(host)s」を解決できませんでした: %(e)s。" #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "%(f1)s に変換されましたが、現在の形式は %(f2)s です" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "%(vol_format)s に変換されましたが、現在の形式は %(file_format)s です" #, python-format msgid "Converted to raw, but format is now %s" msgstr "ローに変換されましたが、現在の形式は %s です" #, python-format msgid "Converted to raw, but format is now %s." msgstr "ローに変換されましたが、現在の形式は %s です。" msgid "Coordinator uninitialized." msgstr "初期化されていないコーディネーター。" #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "ボリュームのコピー・タスクが失敗しました: convert_to_base_volume: id=%(id)s、" "status=%(status)s。" #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "ボリュームタスクのコピーが失敗しました: create_cloned_volume id=%(id)s、状態 " "=%(status)s。" #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" "%(src_type)s %(src_id)s から %(vol_id)s にメタデータをコピーしています。" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" "には、サービスカタログまたは cinder.conf config のオプションである " "'backup_swift_auth_url' を使用します。" msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" "には、サービスカタログまたは cinder.conf config のオプションである " "'backup_swift_url' を使用します。" msgid "Could not find DISCO wsdl file." msgstr "DISCO の wsdl ファイルが見つかりませんでした。" #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "GPFS クラスター ID が見つかりませんでした: %s。" #, python-format msgid "Could not find GPFS file system device: %s." msgstr "GPFS ファイル・システム・デバイスが見つかりませんでした: %s。" #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "タイプ %(type_id)s を持つボリューム %(volume_id)s のホストが見つかりませんで" "した。" #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s で config が見つかりませんでした" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "ボリューム %(volumeName)s の iSCSI エクスポートが見つかりませんでした。" #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "ボリューム %s の iSCSI エクスポートが見つかりませんでした" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "" "ボリューム: %(volume_id)s の iSCSI ターゲットを見つけることができませんでし" "た。" #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "コマンド %(cmd)s: %(out)sの出力でキーを見つけることができませんでした。" #, python-format msgid "Could not find parameter %(param)s" msgstr "パラメーター %(param)s が見つかりませんでした" #, python-format msgid "Could not find target %s" msgstr "ターゲット %s が見つかりませんでした" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" "スナップショット '%s' の親ボリュームをアレイで見つけることができませんでし" "た。" #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "ボリューム %(vol)s で一意のスナップショット %(snap)s を見つけることができませ" "んでした。" msgid "Could not get system name." msgstr "システム名を取得できませんでした。" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" "paste アプリケーション '%(name)s' を %(path)s からロードできませんでした" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "%s を読み取ることができませんでした。sudo で再実行します" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "スナップショット %(name)s の情報を読み取ることができませんでした。コード: " "%(code)s。理由: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "設定ファイル %(file_path)s をリストアできませんでした: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "設定を %(file_path)s に保存できませんでした: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "整合性グループのスナップショット %s を開始できませんでした。" #, python-format msgid "Counter %s not found" msgstr "カウンター %s が見つかりません" msgid "Create QoS policy error." msgstr "QoS ポリシー作成のエラー。" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの作成が中止しました。予期していたバックアップ状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの作成が中止しました。予期していたボリューム状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" msgid "Create consistency group failed." msgstr "整合性グループの作成に失敗しました。" #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "イメージ %(image)s からのタイプ %(type)s の暗号化されたボリュームの作成はサ" "ポートされません。" msgid "Create export for volume failed." msgstr "ボリュームのエクスポートの作成に失敗しました。" msgid "Create hostgroup error." msgstr "ホストグループ作成のエラー。" #, python-format msgid "Create hypermetro error. %s." msgstr "hypermetro 作成のエラー。%s。" msgid "Create lun error." msgstr "LUN 作成のエラー。" msgid "Create lun migration error." msgstr "LUN マイグレーション作成のエラー。" msgid "Create luncopy error." msgstr "LUN コピー作成のエラー。" msgid "Create lungroup error." msgstr "LUN グループ作成のエラー。" msgid "Create manager volume flow failed." msgstr "マネージャーボリュームフローの作成が失敗しました" msgid "Create port group error." msgstr "ポートグループ作成のエラー。" msgid "Create replication error." msgstr "レプリケーション作成のエラー。" #, python-format msgid "Create replication pair failed. Error: %s." msgstr "レプリケーションペアの作成が失敗しました。エラー: %s。" msgid "Create snapshot error." msgstr "スナップショット作成のエラー。" #, python-format msgid "Create volume error. Because %s." msgstr "ボリューム作成のエラー。理由 %s。" msgid "Create volume failed." msgstr "ボリュームの作成に失敗しました。" msgid "Creating a consistency group from a source is not currently supported." msgstr "ソースからの整合性グループの作成は現在サポートされません。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "ゾーン・セットの作成およびアクティブ化に失敗しました: (Zone set=%(cfg_name)s " "error=%(err)s)。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "ゾーン・セットの作成およびアクティブ化に失敗しました: (Zone set=%(zoneset)s " "error=%(err)s)。" #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "%(begin_period)s から %(end_period)s までの使用状況を作成中" msgid "Current host isn't part of HGST domain." msgstr "現在のホストが HGST ドメインに含まれません。" #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "現在のホストは、タイプ %(type)s のボリューム %(id)s に対して無効です。マイグ" "レーションは許可されません" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "現在、ボリューム %(vol)s のマップ済みホストは、サポート対象ではない " "%(group)s のホストグループ内にあります。" msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "非推奨: Cinder API の v1 をデプロイしてください。" msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "提供を終了しています: Cinder API の v2 を実装してください。" #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage ドライバーのエラー: 予期されたキー \"%s\" が答えに含まれていませ" "ん。DRBDmanage のバージョンが間違っていませんか。" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage ドライバーの設定エラー: 必要なライブラリー (dbus や drbdmanage.* " "など) が見つかりません。" #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "" "DRBDmanage が 1 つのリソース (\"%(res)s\") を予期しましたが、%(n)d が得られま" "した" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "DRBDmanage でスナップショットのリストア後新規のボリュームの待機のタイムアウト" "が発生しました。リソース \"%(res)s\"、ボリューム \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "DRBDmanage でスナップショットの作成のタイムアウトが発生しました。リソース " "\"%(res)s\"、スナップショット \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "DRBDmanage でボリューム作成の待機のタイムアウトが発生しました。リソース " "\"%(res)s\"、ボリューム \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "DRBDmanage でボリュームサイズの待機のタイムアウトが発生しました。ボリューム " "ID \"%(id)s\" (res \"%(res)s\"、vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "データ ONTAP API バージョンを判別できませんでした。" msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "7-Mode で動作する Data ONTAP は QoS ポリシーグループをサポートしません。" msgid "Database schema downgrade is not allowed." msgstr "データベーススキーマのダウングレードはできません。" #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "データセット %s は Nexenta Store アプライアンスで共有されません" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "データセットグループ %s が Nexenta SA で見つかりません" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup は有効なプロビジョニング・タイプですが、WSAPI バージョン" "「%(dedup_version)s」バージョン「%(version)s」がインストールされていることを" "必要としています。" msgid "Dedup luns cannot be extended" msgstr "Dedup luns は拡張できません" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "重複排除イネーブラーがインストールされていません。重複排除されたボリュームを" "作成できません" msgid "Default pool name if unspecified." msgstr "デフォルトのプール名 (プール名を指定していない場合)" #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "リソース %(res)s のデフォルトの割り当て量は、デフォルトの割り当て量フラグ: " "quota_%(res)s によって設定されていますが、これは現在推奨されていません。デ" "フォルトの割り当て量にデフォルトの割り当て量クラスを使用してください。" msgid "Default volume type can not be found." msgstr "デフォルトのボリュームタイプが見つかりません。" msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "一連の影響を受けるプールに加え関連するバックエンドのクエリー文字列を定義しま" "す。" msgid "Delete LUNcopy error." msgstr "LUN コピー削除のエラー。" msgid "Delete QoS policy error." msgstr "QoS ポリシー削除のエラー。" msgid "Delete associated lun from lungroup error." msgstr "LUN グループからの関連付けされた LUN 削除のエラー。" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップの削除が中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ・" "サービス [%(backup_service)s] ではありません。" msgid "Delete consistency group failed." msgstr "整合性グループの削除に失敗しました。" msgid "Delete hostgroup error." msgstr "ホストグループ削除のエラー。" msgid "Delete hostgroup from mapping view error." msgstr "マッピングビューからのホストグループ削除のエラー。" msgid "Delete lun error." msgstr "LUN 削除のエラー。" msgid "Delete lun migration error." msgstr "LUN マイグレーション削除のエラー。" msgid "Delete lungroup error." msgstr "LUN グループ削除のエラー。" msgid "Delete lungroup from mapping view error." msgstr "マッピングビューからの LUN グループ削除のエラー。" msgid "Delete mapping view error." msgstr "マッピングビュー削除のエラー。" msgid "Delete port group error." msgstr "ポートグループ削除のエラー。" msgid "Delete portgroup from mapping view error." msgstr "マッピングビューからのポートグループ削除のエラー。" msgid "Delete snapshot error." msgstr "スナップショット削除のエラー。" #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "状態 %s でのボリュームのスナップショット削除はサポートされていません。" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの削除が中止しました。予期していたバックアップ状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" msgid "Deleting volume from database and skipping rpc." msgstr "データベースからボリュームを作成中。rpc をスキップします。" #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "ゾーンの削除に失敗しました: (command=%(cmd)s error=%(err)s)。" msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "整合性グループをサポートするには Dell API 2.1 以降が必要です" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "直接接続では Dell Cinder ドライバーの設定エラーの複製を行うことはできません。" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dell Cinder ドライバーの設定エラー の replication_device %s が見つかりません" msgid "Deploy v3 of the Cinder API." msgstr "Cinder API の v3 を実装してください。" msgid "Describe-resource is admin only functionality" msgstr "Describe-resource は管理者専用の機能です" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" "宛先の migration_status は %(stat)s ですが、予期されたのは %(exp)s です。" msgid "Destination host must be different than the current host." msgstr "宛先ホストは現行ホストと異なっていなければなりません。" msgid "Destination volume not mid-migration." msgstr "宛先ボリュームはマイグレーション中ではありません" msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "ボリュームの切り離しが失敗しました: 2 つ以上の接続が存在するものの、" "attachment_id が提供されていません。" msgid "Detach volume from instance and then try again." msgstr "ボリュームをインスタンスから切り離して、再試行してください。" #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "名前 %(vol_name)s を持つ複数のボリュームが検出されました" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "予期された列が %(fun)s で見つかりませんでした: %(hdr)s" #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "予期されるキー %(key)s が %(fun)s: %(raw)s で見つかりませんでした。" msgid "Disabled reason contains invalid characters or is too long" msgstr "「無効理由」に無効な文字が含まれているか、理由が長すぎます" #, python-format msgid "Domain with name %s wasn't found." msgstr "名前が %s のドメインが見つかりませんでした。" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "ダウン・レベル GPFS クラスターが検出されました。クラスター・デーモン・レベル " "%(cur)s で GPFS 複製フィーチャーが有効になっていません。レベル %(min)s 以上は" "必要です。" #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "ドライバーの初期化接続に失敗しました (エラー: %(err)s)。" msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "ボリューム (LUN {}) にマイグレーションが禁止されているスナップショットが含ま" "れているため、ドライバーのタイプを変更することができません。" msgid "Driver must implement initialize_connection" msgstr "ドライバーは initialize_connection を実装する必要があります" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "ドライバーがインポートされたバックアップデータを正常に復号化しましたが、欠け" "ているフィールド (%s) があります。" #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E シリーズのプロキシー API バージョン %(current_version)s は SSC の追加仕様を" "すべてサポートするわけではありません。このプロキシーバージョンは少なくとも " "%(min_version)s でなければなりません。" #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "EMC VNX Cinder ドライバー CLI 例外: %(cmd)s (戻りコード: %(rc)s)(出力: " "%(out)s)。" #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "EMC VNX Cinder ドライバーの SP が使用できない例外: %(cmd)s (戻りコード: " "%(rc)s)(出力: %(out)s)。" msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp、EcomServerPort、EcomUserName、EcomPassword に有効な値を設定する" "必要があります。" #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "ソースから整合性グループ %(name)s を作成するには、'cgsnapshot_id' または " "'source_cgid' を指定する必要があります。" #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s またはワークロード %(workload)s のいずれかが無効です。以前のエ" "ラーステートメントで有効な値を調べてください。" msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "hitachi_serial_number または hitachi_unit_name のいずれかが必要です。" #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "エレメント構成サービスが %(storageSystemName)s に見つかりません。" msgid "Enables QoS." msgstr "QoS を有効化します。" msgid "Enables compression." msgstr "圧縮を有効化します。" msgid "Enables replication." msgstr "レプリケーションを有効化します。" msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "" "configfs が /sys/kernel/config でマウントされていることを確認してください。" #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "groupInitiatorGroup %(initiatorgroup)s でのイニシエーター %(initiator)s の追" "加中にエラーが発生しました。戻りコード: %(ret.status)d メッセージ: " "%(ret.data)s。" #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "IQN %(iqn)s でターゲットグループ %(targetgroup)s への追加中にエラーが発生しま" "した。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "Error Attaching volume %(vol)s." msgstr " ボリューム %(vol)s の追加に失敗しました。" #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "プール: %(pool)s プロジェクト: %(project)s 複製プロジェクト: %(clone_proj)s " "のボリューム: %(lun)s でのスナップショット: %(snapshot)s の複製中にエラーが発" "生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "複製ボリュームの作成エラー: %(cloneName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "複製ボリュームの作成エラー: ボリューム: %(cloneName)s、ソースボリューム: " "%(sourceName)s。戻りコード: %(rc)lu。エラー: %(error)s。" #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "グループの作成エラー: %(groupName)s。戻りコード: %(rc)lu。エラー: %(error)s。" #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "マスキングビューの作成エラー: %(groupName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "ボリュームの作成エラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "ボリュームの作成エラー: %(volumename)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "CreateGroupReplica エラー: ソース: %(source)s ターゲット: %(target)s。戻り" "コード: %(rc)lu。エラー: %(error)s。" #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "エイリアス %(alias)s でのイニシエーター %(initiator)s の作成中にエラーが発生" "しました。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s" #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "プール %(pool)s でのプロジェクト %(project)s の作成中にエラーが発生しました。" "戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プロパティー: %(property)s タイプ: %(type)s 説明: %(description)s の作成中に" "エラーが発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "共有 %(name)s の作成中にエラーが発生しました。戻りコード: %(ret.status)d、" "メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "ボリューム %(lun)s でスナップショット %(snapshot)s をプール %(pool)s に作成し" "ているときにエラーが発生しました。プロジェクト: %(project)s、戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "共有 %(share)s でスナップショット %(snapshot)s をプール %(pool)s に作成中にエ" "ラーが発生しました。プロジェクト: %(project)s 戻りコード: %(ret.status)d " "メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "ターゲットの作成中にエラーが発生しました: %(alias)s。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "IQN %(iqn)s でターゲット・グループ %(targetgroup)s の作成中にエラーが発生しま" "した。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "ボリュームの作成中にエラーが発生しました: %(lun)s。サイズ: %(size)s、戻りコー" "ド: %(ret.status)d、メッセージ: %(ret.data)s。 " #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "新規複合ボリュームの作成エラー。戻りコード: %(rc)lu。エラー: %(error)s。" #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "プール: %(pool)s でのレプリケーションアクションの作成中にエラーが発生しまし" "た。プロジェクト: %(proj)s ターゲット: %(tgt)s のボリューム: %(vol)s および" "プール: %(tgt_pool)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" msgid "Error Creating unbound volume on an Extend operation." msgstr "拡張操作でのアンバインド済みボリュームの作成エラーです。" msgid "Error Creating unbound volume." msgstr "アンバインドボリュームの作成エラーです。" #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "ボリュームの削除エラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "グループの削除のエラー: %(storageGroupName)s。戻りコード: %(rc)lu。エラー: " "%(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "イニシエーターグループの削除のエラー: %(initiatorGroupName)s。戻りコード: " "%(rc)lu。エラー: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "共有 %(share)s でスナップショット %(snapshot)s をプール %(pool)s から削除中に" "エラーが発生しました。プロジェクト: %(project)s、戻りコード: %(ret.status)d、" "メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "ボリューム %(lun)s でスナップショット %(snapshot)s をプール %(pool)s から削除" "しているときにエラーが発生しました。プロジェクト: %(project)s、戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "プール: %(pool)s、プロジェクト: %(project)s からのボリューム: %(lun)s の削除" "中にエラーが発生しました。戻りコード: %(ret.status)d、メッセージ: " "%(ret.data)s。" #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "プール: %(pool)s のプロジェクト: %(project)s の削除中にエラーが発生しました。" "戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "レプリケーションアクション: %(id)s の削除中にエラーが発生しました。戻りコー" "ド: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "拡張ボリュームのエラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "イニシエーターの取得中にエラーが発生しました。InitiatorGroup: " "%(initiatorgroup)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "プール統計: プール: %(pool)sの取得中にエラーが発生しました 戻りコード: " "%(status)d メッセージ: %(data)s。" #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プロジェクト統計: プール: %(pool)s プロジェクト: %(project)s の取得中にエラー" "が発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プール %(pool)s での共有 %(share)s の取得中にエラーが発生しました。プロジェク" "ト: %(project)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "ボリューム %(lun)s からプール %(pool)s へのスナップショット %(snapshot)s の取" "得中にエラーが発生しました。プロジェクト: %(project)s、戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "ターゲットの取得中にエラーが発生しました: %(alias)s。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プール %(pool)s でのボリューム %(lun)s の取得中にエラーが発生しました。プロ" "ジェクト: %(project)s、戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "あるプールから別のプールへのボリュームのマイグレーション中にエラーが発生しま" "した。戻りコード: %(rc)lu。 エラー: %(error)s。" #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "マスキング・ビューの変更エラー: %(groupName)s。戻りコード: %(rc)lu。 エ" "ラー: %(error)s。" #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "プール所有権のエラーが発生しました: %(host)s はプール %(pool)s を所有していま" "せん。" #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プール %(pool)s のボリューム %(lun)s でのプロパティー %(props)s の設定中にエ" "ラーが発生しました。プロジェクト: %(project)s、戻りコード: %(ret.status)d、" "メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "マイグレーションセッションの終了エラー。戻りコード: %(rc)lu。エラー: " "%(error)s。" #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "イニシエーターの検査中にエラーが発生しました: %(iqn)s。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "プールの検査中にエラーが発生しました: %(pool)s。戻りコード: %(ret.status)d、" "メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "プール %(pool)s でのプロジェクト %(project)s の検査中にエラーが発生しました。" "戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "サービス %(service)s の検査中にエラーが発生しました。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "ターゲットの検査中にエラーが発生しました: %(alias)s。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "プロジェクト %(project)s およびプール %(pool)s で共有 %(share)s を検証中にエ" "ラーが発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "インスタンス・パス %(volumeInstancePath)s によるボリューム %(volumeName)s の" "追加エラーです。" #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "イニシエーターのグループへの追加エラー: %(groupName)s。戻りコード: " "%(rc)lu。 エラー: %(error)s。" #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "複合ボリュームへのボリュームの追加エラー。エラー: %(error)s。" #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "ボリューム %(volumename)s のターゲット基本ボリュームへの追加エラーです。" #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "ストレージグループ %(storageGroupName)s の FAST ポリシー %(fastPolicyName)sへ" "の関連付けエラーです。エラーの説明: %(errordesc)s。" #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "ボリューム %s の接続エラー。ターゲットの制限に達します。" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "複製関係の切断エラー: 同期名: %(syncName)s。戻りコード: %(rc)lu。エラー: " "%(error)s。" msgid "Error connecting to ceph cluster." msgstr "ceph クラスターへの接続エラーです。" #, python-format msgid "Error connecting via ssh: %s" msgstr "ssh を介した接続中にエラーが発生しました: %s" #, python-format msgid "Error creating volume: %s." msgstr "ボリュームの作成中にエラーが発生しました: %s。" msgid "Error deleting replay profile." msgstr "リプレープロファイルの削除でエラーが発生しました。" #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "ボリューム %(ssn)s の削除でエラーが発生しました: %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "ボリューム %(vol)s の削除中にエラーが発生しました: %(err)s。" #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "エバリュエーター構文解析中にエラーが発生しました: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "プール %(pool)s での共有 %(share)s の編集中にエラーが発生しました。戻りコー" "ド: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "NetworkPortal の iSER の有効化に関するエラー: IP %(ip)s 上の iSCSI ポート " "%(port)d で RDMA がサポートされていることを確認してください。" #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "失敗した接続のクリーンアップ中にエラーが検出されました: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "" "CloudByte API [%(cmd)s] の実行中にエラーが発生しました。エラー: %(err)s。" msgid "Error executing EQL command" msgstr "EQL コマンドを実行するときにエラーが発生しました" #, python-format msgid "Error executing command via ssh: %s" msgstr "ssh を介したコマンドの実行エラー: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "ボリューム %(vol)s の拡張中にエラーが発生しました: %(err)s。" #, python-format msgid "Error extending volume: %(reason)s" msgstr "ボリュームの拡張エラーです: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "%(name)s の検索中にエラーが発生しました。" #, python-format msgid "Error finding %s." msgstr "%s の検索中にエラーが発生しました。" #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "ReplicationSettingData の取得エラー。戻りコード: %(rc)lu。エラー: %(error)s。" msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "アプライアンスバージョンの詳細の取得中にエラーが発生しました。戻りコード: " "%(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "" "名前 %(name)s からドメイン ID を取得中にエラーが発生しました: %(err)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "名前 %(name)s からドメイン ID を取得中にエラーが発生しました: %(id)s。" msgid "Error getting initiator groups." msgstr "イニシエーター・グループの取得エラー。" #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "名前 %(pool)s からプール ID を取得中にエラーが発生しました: %(err)s。" #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "" "名前 %(pool_name)s からプール ID を取得中にエラーが発生しました: " "%(err_msg)s。" #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "レプリケーションアクション: %(id)s の取得中にエラーが発生しました。戻りコー" "ド: %(ret.status)d メッセージ: %(ret.data)s。" msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "レプリケーションソースの詳細の取得中にエラーが発生しました。戻りコード: " "%(ret.status)d メッセージ: %(ret.data)s。" msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "レプリケーションターゲットの詳細の取得中にエラーが発生しました。戻りコード: " "%(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "バージョンの取得中にエラーが発生しました: svc: %(svc)s。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "CloudByte のストレージでボリューム [%(cb_volume)s] に関する処理 " "[%(operation)s] でエラーが発生しました: [%(cb_error)s]。エラーコード: " "[%(error_code)s]。" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API 応答にエラーがあります: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "%(size)d GB のサイズの %(space)s のスペースの作成のエラー" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "追加の %(size)d GB のボリューム %(space)s のスペース拡張のエラー" #, python-format msgid "Error managing volume: %s." msgstr "ボリュームの管理中にエラーが発生しました: %s。" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "ボリューム %(vol)s のマッピングエラー。%(error)s。" #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "レプリカ同期の変更エラー: %(sv)s。操作: %(operation)s。戻りコード: %(rc)lu。" "エラー: %(error)s。" #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "サービス %(service)s の変更中にエラーが発生しました。戻りコード: " "%(ret.status)d、メッセージ: %(ret.data)s。" #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "ソースプロジェクト: %(src)s からターゲットプロジェクト: %(tgt)s へのボリュー" "ム: %(vol)s の移動中にエラーが発生しました。戻りコード: %(ret.status)d メッ" "セージ: %(ret.data)s。 " msgid "Error not a KeyError." msgstr "KeyError ではなくエラーです。" msgid "Error not a TypeError." msgstr "TypeError ではなくエラーです。" #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "cgsnapshot %s を作成中にエラーが発生しました。" #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "cgsnapshot %s を削除中にエラーが発生しました。" #, python-format msgid "Error occurred when updating consistency group %s." msgstr "整合性グループ %s を更新中にエラーが発生しました。" #, python-format msgid "Error parsing config file: %s" msgstr "構成ファイルの解析エラー: %s" msgid "Error promoting secondary volume to primary" msgstr "2 次ボリュームの 1 次ボリュームへのプロモート中にエラーが発生しました" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "ボリューム %(vol)s の削除エラー。%(error)s。" #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "ボリューム %(vol)s の名前を変更中にエラーが発生しました: %(err)s。" #, python-format msgid "Error response: %s" msgstr "エラー応答: %s" msgid "Error retrieving volume size" msgstr "ボリュームサイズの抽出でエラーが発生しました" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "アクション ID: %(id)s のレプリケーション更新の送信中にエラーが発生しました。" "戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "レプリケーション更新の送信中にエラーが発生しました。戻されたエラー: %(err)s。" "アクション: %(id)s。" #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "ボリューム: %(vol)s プロジェクト %(project)s の %(set)s へのレプリケーション" "継承の設定中にエラーが発生しました。戻りコード: %(ret.status)d メッセージ: " "%(ret.data)s。" #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "ソース: %(src)s からのパッケージ: %(package)s の提供中にエラーが発生しまし" "た。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "プールからのボリューム %(vol)s のアンバインドエラー。%(error)s。" #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "スナップショット: %(snapshot)s のボリュームクローン: %(clone)s サイズ: " "%(size)d のクローンサイズの検証中にエラーが発生しました。" #, python-format msgid "Error while authenticating with switch: %s." msgstr "スイッチによる認証中にエラーが発生しました: %s。" #, python-format msgid "Error while changing VF context %s." msgstr "VF コンテキスト %s の変更中にエラーが発生しました。" #, python-format msgid "Error while checking the firmware version %s." msgstr "ファームウェアバージョン %s の検査中にエラーが発生しました。" #, python-format msgid "Error while checking transaction status: %s" msgstr "トランザクション状況の検査中にエラーが発生しました: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "VF が管理 %s に対して使用可能かどうかを検査中にエラーが発生しました。" #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "プロトコル %(protocol)s を指定したスイッチ %(switch_id)s の接続中にエラーが発" "生しました。エラー: %(error)s。" #, python-format msgid "Error while creating authentication token: %s" msgstr "認証トークンの作成中にエラーが発生しました: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "スナップショットの作成中 [status] %(stat)s にエラーが発生しました: [result] " "%(res)s。" #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" "ボリュームの作成中 [status] %(stat)s にエラーが発生しました: [result] " "%(res)s。" #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "スナップショットの削除中 [status] %(stat)s にエラーが発生しました: [result] " "%(res)s" #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" "ボリュームの削除中 [status] %(stat)s にエラーが発生しました: [result] " "%(res)s。" #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" "ボリュームの拡張中 [status] %(stat)s にエラーが発生しました: [result] " "%(res)s。" #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "%(op)s の詳細の取得中にエラーが発生しました。戻りコード: %(status)s。" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "ssh を介してデータを取得中にエラーが発生しました: (command=%(cmd)s error=" "%(err)s)。" #, python-format msgid "Error while getting disco information [%s]." msgstr "disco の情報 [%s] の取得中にエラーが発生しました。" #, python-format msgid "Error while getting nvp value: %s." msgstr "nvp 値の取得中にエラーが発生しました: %s。" #, python-format msgid "Error while getting session information %s." msgstr "セッション情報 %s の取得中にエラーが発生しました。" #, python-format msgid "Error while parsing the data: %s." msgstr "データの解析中にエラーが発生しました: %s。" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "スイッチでのページ %(url)s の照会中にエラーが発生しました。理由 %(error)s。" #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "ゾーンストリングでのゾーンおよび cfgs の削除中にエラーが発生しました: " "%(description)s。" #, python-format msgid "Error while requesting %(service)s API." msgstr "%(service)s API の要求中にエラーが発生しました。" #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "ゾーニング CLI の実行中にエラーが発生しました: (command=%(cmd)s error=" "%(err)s)。" #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "ゾーンストリングでの新規ゾーンおよび cfgs の更新中にエラーが発生しました。エ" "ラー %(description)s。" msgid "Error writing field to database" msgstr "データベースへのフィールドの書き込みに失敗しました" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "ボリューム ID の取得中にエラーが発生しました [%(stat)s - %(res)s]。" #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "スナップショット[%(snap_id)s] のボリューム [%(vol)s] へのリストア中にエラーが" "発生しました [%(stat)s - %(res)s]。" #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "ボリュームの取得中にエラーが発生しました [status] %(stat)s - [result] " "%(res)s]。" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "ボリューム %(volume_id)s のスケジュールの最大試行回数 %(max_attempts)d を超過" "しました" msgid "Exceeded the limit of snapshots per volume" msgstr "ボリュームごとのスナップショットの制限を超えました。" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr " ターゲットボリューム %(volumename)s へのメタボリュームの追加の例外。" #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "要素のレプリカの作成中の例外。クローン名: %(cloneName)s、ソース名: " "%(sourceName)s、追加の仕様: %(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume で例外が発生しました: %s。" #, python-format msgid "Exception while forming the zone string: %s." msgstr "ゾーンストリングの形成中に例外が発生しました: %s。" #, python-format msgid "Exception: %s" msgstr "例外: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID が必要ですが、%(uuid)s を受け取りました。" #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "1 つのノードの呼び出しを予期していました \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "node_count に対して整数が予期され、svcinfo lsiogrp が返されました: %(node)s。" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "CLI コマンド %(cmd)s からの出力がないことが予期されます。%(out)s を受け取りま" "す。" #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "vdisk_UID でフィルタリングする場合、lsvdisk から 1 つの vdisk が返されること" "が予期されます。%(count)s が返されました。" #, python-format msgid "Expected volume size was %d" msgstr "予期されたボリューム・サイズは %d でした" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのエクスポートが中止しました。予期していたバックアップ状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "レコードのエクスポートが中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" "サービス [%(backup_service)s] ではありません。" msgid "Extend volume error." msgstr "ボリューム拡張のエラー。" msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "ボリュームの拡張は、スナップショットが存在しない場合にのみ、このドライバーに" "対してサポートされます。" msgid "Extend volume not implemented" msgstr "ボリュームの拡張が実装されていません" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "FAST VP イネーブラーがインストールされていません。ボリュームの階層化ポリシー" "を設定できません" msgid "FAST is not supported on this array." msgstr "FAST はこのアレイでサポートされていません。" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC がプロトコルですが、wwpns が OpenStack によって提供されていません。" #, python-format msgid "Faield to unassign %(volume)s" msgstr "%(volume)s の割り当て解除に失敗しました" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "キャッシュボリューム %(volume)s の作成に失敗しました。エラー: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "ファブリック %(fabric)s の接続の追加に失敗しました。エラー: %(err)s" msgid "Failed cgsnapshot" msgstr "cgsnapshot が失敗しました" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "グループのスナップショットの作成に失敗しました: %(response)s。" #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "ボリューム %(volname)s のスナップショットの作成に失敗しました: %(response)s。" #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "ファブリック %s からのアクティブなゾーンセットの取得に失敗しました。" #, python-format msgid "Failed getting details for pool %s." msgstr "プール %s の詳細の取得に失敗しました。" #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "ファブリック %(fabric)s の接続の削除に失敗しました。エラー: %(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "ボリューム %(volname)s を拡張できませんでした" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "3PAR (%(url)s) へのログインに失敗しました。理由: %(err)s" msgid "Failed to access active zoning configuration." msgstr "アクティブなゾーニング設定へのアクセスに失敗しました。" #, python-format msgid "Failed to access zoneset status:%s" msgstr "ゾーンセットステータスにアクセスできませんでした: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "リソース・ロックを獲得できませんでした。(シリアル: %(serial)s、inst: " "%(inst)s、ret: %(ret)s、stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "%(vol)s の %(sg)s への追加が、%(retries)s 回の再試行後に失敗しました。" msgid "Failed to add the logical device." msgstr "論理デバイスを追加できませんでした。" #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "整合性グループ %(cgName)s に ボリューム %(volumeName)s を追加できませんでし" "た。戻りコード: %(rc)lu。エラー: %(error)s。" msgid "Failed to add zoning configuration." msgstr "ゾーニング設定の追加に失敗しました。" #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "iSCSI イニシエーター IQN を割り当てることができませんでした。(ポート: " "%(port)s、理由: %(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs %(specs_id)s をタイプ %(type_id)s に関連付けることができませんでし" "た。" #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの接続に失敗しました。" #, python-format msgid "Failed to backup volume metadata - %s" msgstr "ボリュームメタデータのバックアップに失敗しました: %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "ボリューム・メタデータのバックアップに失敗しました - メタデータ・バックアッ" "プ・オブジェクト 'backup.%s.meta' は既に存在します" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "スナップショット %s のボリュームの複製に失敗しました。" #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "%(vendor_name)s 配列 %(host)s への接続に失敗しました: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Dell REST API への接続に失敗しました" msgid "Failed to connect to array" msgstr "アレイへの接続に失敗しました" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "シープデーモンへの接続に失敗しました。アドレス: %(addr)s、 ポート: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "イメージをボリュームにコピーできませんでした: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "メタデータをボリュームにコピーできませんでした: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "ボリュームのコピーに失敗しました。宛先デバイスが使用できません。" msgid "Failed to copy volume, source device unavailable." msgstr "ボリュームのコピーに失敗しました。ソースデバイスが使用できません。" #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "スナップショット %(cgSnapshot)s から CG %(cgName)s の作成に失敗しました。" #, python-format msgid "Failed to create IG, %s" msgstr "IG を作成できませんでした。%s" msgid "Failed to create SolidFire Image-Volume" msgstr "SolidFire イメージ・ボリュームの作成に失敗しました" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "ボリュームグループを作成できませんでした: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "ファイルを作成できませんでした。(ファイル: %(file)s、ret: %(ret)s, stderr: " "%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "%s の一時スナップショットの作成に失敗しました。" msgid "Failed to create api volume flow." msgstr "API ボリュームフローの作成に失敗しました。" #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "%(reason)s が原因で cg スナップショット %(id)s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "%(reason)s が原因で整合性グループ %(id)s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "整合性グループ %(id)s の作成に失敗しました: %(ret)s。" #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "VNX 整合性グループは圧縮された LUN をメンバーとして受け入れられないため、整合" "性グループ %s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "整合性グループ %(cgName)s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "整合性グループ %(cgid)s の作成に失敗しました。エラー: %(excmsg)s。" #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "整合性グループ %(consistencyGroupName)s の作成に失敗しました。戻りコード: " "%(rc)lu。エラー: %(error)s。" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "%(storageSystemName)s 上でハードウェア ID を作成できませんでした。" #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "ホスト: %(name)s の作成に失敗しました。このホストがアレイに存在しているかどう" "か確認してください。" #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "ホストグループ: %(name)s の作成に失敗しました。このホストグループがアレイに存" "在しているかどうか確認してください。" msgid "Failed to create iqn." msgstr "iqn の作成に失敗しました。" #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの作成に失敗しました。" msgid "Failed to create manage existing flow." msgstr "既存の管理フローの作成に失敗しました。" msgid "Failed to create manage_existing flow." msgstr "manage_existing フローの作成に失敗しました。" msgid "Failed to create map on mcs, no channel can map." msgstr "" "MCS でのマップ作成に失敗しました。チャンネルはマップを行うことができません。" msgid "Failed to create map." msgstr "マップの作成に失敗しました。" #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "ボリュームのメタデータの作成に失敗しました: %(reason)s" msgid "Failed to create partition." msgstr "パーティションの作成に失敗しました。" #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "仕様 %(qos_specs)s を使用して qos_specs %(name)s を作成することができませんで" "した。" msgid "Failed to create replica." msgstr "複製の作成に失敗しました。" msgid "Failed to create scheduler manager volume flow" msgstr "スケジューラー・マネージャー・ボリューム・フローを作成できませんでした" #, python-format msgid "Failed to create snapshot %s" msgstr "スナップショット %s の作成に失敗しました" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "LUN ID が指定されていないため、スナップショットの作成に失敗しました" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "整合性グループ %(cgName)s のスナップショット作成に失敗しました。" #, python-format msgid "Failed to create snapshot for volume %s." msgstr "ボリューム %s のスナップショットの作成に失敗しました。" #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "ボリューム %(vol)s 上でのスナップショットポリシーの作成に失敗しました: " "%(res)s。" #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "ボリューム %(vol)s 上でのスナップショットリソースエリアの作成に失敗しました: " "%(res)s。" msgid "Failed to create snapshot." msgstr "スナップショットの作成に失敗しました。" #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "スナップショットの作成に失敗しました。OpenStack ボリューム [%s] に関して" "CloudByte のボリューム情報が見つかりませんでした。" #, python-format msgid "Failed to create south bound connector for %s." msgstr "%s のサウスバウンドコネクターの作成に失敗しました。" #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "ストレージグループ %(storageGroupName)s を作成できませんでした。" #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "シンプールの作成に失敗しました。エラーメッセージ: %s" #, python-format msgid "Failed to create volume %s" msgstr "ボリューム %s の作成に失敗しました" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "volume_id: %(volume_id)s の SI にペアが存在するため、その削除に失敗しました。" #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "論理デバイスを削除できませんでした。(LDEV: %(ldev)s、理由: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "%(reason)s が原因で cgsnapshot %(id)s の削除に失敗しました。" #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "%(reason)s が原因で整合性グループ %(id)s の削除に失敗しました。" #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "整合性グループ %(cgName)s の削除に失敗しました。" #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "整合性グループ %(consistencyGroupName)s の削除に失敗しました。戻りコード: " "%(rc)lu。エラー: %(error)s。" msgid "Failed to delete device." msgstr "デバイスの削除に失敗しました。" #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "整合性グループ %(cgname)s のファイル・セットの削除に失敗しました。エラー: " "%(excmsg)s。" msgid "Failed to delete iqn." msgstr "iqn の削除に失敗しました。" msgid "Failed to delete map." msgstr "マップの削除に失敗しました。" msgid "Failed to delete partition." msgstr "パーティションの削除に失敗しました。" msgid "Failed to delete replica." msgstr "複製の削除に失敗しました。" #, python-format msgid "Failed to delete snapshot %s" msgstr "スナップショット %s の削除に失敗しました" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "整合性グループ %(cgId)s のスナップショット削除に失敗しました。" #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "snapshot_id: %s のスナップショットにペアが存在するため、その削除に失敗しまし" "た。" msgid "Failed to delete snapshot." msgstr "スナップショットの削除に失敗しました。" #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "ボリューム %(volumeName)s の削除に失敗しました。" #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "volume_id: %(volume_id)s のボリュームにペアが存在するため、その削除に失敗しま" "した。" #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの切り離しに失敗しました。" msgid "Failed to determine blockbridge API configuration" msgstr "Blockbridge API の設定を決定できませんでした。" msgid "Failed to disassociate qos specs." msgstr "qos 仕様の関連付けを解除できませんでした。" #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs %(specs_id)s とタイプ %(type_id)s の関連付けを解除できませんでし" "た。" #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "スナップショットリソースエリアの確認に失敗しました。ID %s のボリュームを見つ" "けることができませんでした" msgid "Failed to establish SSC connection." msgstr "SSC 接続の確立に失敗しました。" msgid "Failed to establish connection with Coho cluster" msgstr "Cohoクラスターとの接続に失敗しました" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "CloudByte API [%(cmd)s] の実行に失敗しました。HTTP 状況: %(status)s、エラー: " "%(error)s。" msgid "Failed to execute common command." msgstr "共通のコマンドの実行に失敗しました。" #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "ボリュームのエクスポートに失敗しました: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "ボリューム %(name)s の拡張に失敗しました。エラーメッセージ: %(msg)s。" msgid "Failed to find QoSnode" msgstr "QoSNode が見つかりません" msgid "Failed to find Storage Center" msgstr "ストレージセンターが見つかりませんでした" msgid "Failed to find a vdisk copy in the expected pool." msgstr "予期されるプールに vdisk コピーが見つかりませんでした。" msgid "Failed to find account for volume." msgstr "ボリュームのアカウントが見つかりませんでした" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "パス %(path)s のファイル・セットが見つかりませんでした。コマンド出力: " "%(cmdout)s。" #, python-format msgid "Failed to find group snapshot named: %s" msgstr "%s という名前のグループスナップショットが見つかりませんでした" #, python-format msgid "Failed to find host %s." msgstr "ホスト %s を見つけることに失敗しました。" #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "%(initiator)s を含む iSCSI イニシエーターグループを見つけることに失敗しまし" "た。" #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "ソースボリューム %s のストレージプールの検出に失敗しました。" #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "アカウント [%s] の CloudByte アカウント詳細を取得できませんでした。" #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "LUN %s の LUN ターゲット詳細の取得に失敗しました" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "LUN %s の LUN ターゲット詳細の取得に失敗しました。" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "LUN %s の LUN ターゲット・リストを取得できませんでした" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s のパーティション ID の取得に失敗しました。" #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "スナップショット %(snapshot_id)s からの RAID スナップショット ID の取得に失敗" "しました。" #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "スナップショット: %(snapshot_id)s からの RAID スナップショット ID の取得に失" "敗しました。" msgid "Failed to get SplitMirror." msgstr "SplitMirror の取得に失敗しました。" #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "ストレージ・リソースを取得できませんでした。システムは、もう一度ストレージ・" "リソースの取得を試みます。(リソース: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "qos 仕様 %s のすべての関連付けは取得できませんでした" msgid "Failed to get channel info." msgstr "チャンネル情報の取得に失敗しました。" #, python-format msgid "Failed to get code level (%s)." msgstr "コードレベル (%s) を取得できませんでした。" msgid "Failed to get device info." msgstr "デバイス情報の取得に失敗しました。" #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "CPG (%s) がアレイ上に存在しないため、ドメインを取得できませんでした。" msgid "Failed to get image snapshots." msgstr "イメージ のスナップショットの獲得に失敗しました。" #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" " ボリューム: %(volume_id)s を持つチャンネル %(channel_id)s の IP の取得に失敗" "しました。" msgid "Failed to get iqn info." msgstr "iqn 情報の取得に失敗しました。" msgid "Failed to get license info." msgstr "ライセンス情報の取得に失敗しました。" msgid "Failed to get lv info." msgstr "Iv 情報の取得に失敗しました。" msgid "Failed to get map info." msgstr "マップ情報の取得に失敗しました。" msgid "Failed to get migration task." msgstr "マイグレーションタスクの取得に失敗しました。" msgid "Failed to get model update from clone" msgstr "複製からのモデル更新の取得に失敗しました" msgid "Failed to get name server info." msgstr "ネームサーバー情報の取得に失敗しました。" msgid "Failed to get network info." msgstr "ネットワーク情報の取得に失敗しました。" #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "新規プール: %(pool_id)s での新規パート ID の取得に失敗しました。" msgid "Failed to get partition info." msgstr "パーティション情報の取得に失敗しました。" #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "ボリューム %(volume_id)s を持つプール ID の取得に失敗しました。" #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "%(err)s が原因で、%(volume)s のリモートコピー情報の取得に失敗しました。" #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "%(volume)s のリモートコピー情報の取得に失敗しました。例外: %(err)s。" msgid "Failed to get replica info." msgstr "レプリカ情報の取得に失敗しました。" msgid "Failed to get show fcns database info." msgstr "fcns データベース情報の表示に失敗しました。" msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "既存のボリューム: %(vol) のサイズの取得に失敗しました。ボリューム管理が失敗し" "ました。" #, python-format msgid "Failed to get size of volume %s" msgstr "ボリューム %s のサイズを得ることに失敗しました。" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "ボリューム %s のスナップショットの獲得に失敗しました。" msgid "Failed to get snapshot info." msgstr "スナップショット情報の取得に失敗しました。" #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "LUN %s のターゲット IQN の取得に失敗しました" msgid "Failed to get target LUN of SplitMirror." msgstr "SplitMirror のターゲット LUN の取得に失敗しました。" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "LUN %s のターゲット・ポータルの取得に失敗しました" msgid "Failed to get targets" msgstr "ターゲットを取得できませんでした" msgid "Failed to get wwn info." msgstr "wwn 情報の取得に失敗しました。" #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "ボリューム %(volumeName)s の取得、作成、またはマスキングビュー " "%(maskingViewName)s への追加が失敗しました。受け取ったエラー・メッセージは " "%(errorMessage)s です。" msgid "Failed to identify volume backend." msgstr "ボリュームバックエンドを識別できませんでした。" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "共有 %(cgname)s のファイル・セットへのリンクに失敗しました。エラー: " "%(excmsg)s。" #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "%s 配列へのログインに失敗しました (無効なログイン?)。" #, python-format msgid "Failed to login for user %s." msgstr "ユーザー %s のログインに失敗しました。" msgid "Failed to login with all rest URLs." msgstr "すべての rest URL のログインに失敗しました。" #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "次の理由で、Datera クラスターエンドポイントへの要求を実行できませんでした: %s" msgid "Failed to manage api volume flow." msgstr "API ボリュームフローの管理に失敗しました。" #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "報告されたサイズ %(size)s が浮動小数点値でないため、既存の %(type)s %(name)s " "の管理に失敗しました。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "ボリューム・サイズ取得エラーのため、既存ボリューム %(name)s の管理に失敗しま" "した。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "名前変更操作が失敗したため、既存ボリューム %(name)s の管理に失敗しました: エ" "ラー・メッセージ: %(msg)s。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "変更されたサイズ %(size)s が浮動小数点数ではなかったため、既存ボリューム " "%(name)s の管理に失敗しました。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "選択されたボリューム種別のプールがボリューム参照で渡された NFS 共有と一致しな" "いため、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "選択されたボリューム種別のプールが、ボリューム参照で渡されたファイルシステム" "と一致しないため、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "選択されたボリューム種別のプールがホストのプールと一致しないため、既存のボ" "リュームの管理に失敗しました。" #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "入出力グループの不一致が原因で既存のボリュームの管理に失敗しました。管理対象" "となるボリュームの入出力グループは %(vdisk_iogrp)s です。選択された種別の入出" "力グループは %(opt_iogrp)s です。" #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "管理対象となるボリュームのプールがバックエンドプールと一致しないことが原因" "で、既存のボリュームの管理に失敗しました。管理対象となるボリュームのプールは " "%(vdisk_pool)s です。バックエンドのプールは %(backend_pool)s です。" msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "管理対象となるボリュームは compress ですが、選択されたボリューム種別は " "compress でないことが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "管理対象となるボリュームは compress ではありませんが、選択されたボリューム種" "別は compress であることが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "管理対象となるボリュームが有効な入出力グループになかったことが原因で、既存の" "ボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "管理対象となるボリュームは thick ですが、選択されたボリューム種別は thin であ" "ることが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "管理対象となるボリュームは thin ですが、選択されたボリューム種別は thick であ" "ることが原因で、既存のボリュームの管理に失敗しました。" #, python-format msgid "Failed to manage volume %s." msgstr "ボリューム %s の管理に失敗しました。" #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "論理デバイスをマップできませんでした。(LDEV: %(ldev)s、LUN: %(lun)s、ポート: " "%(port)s、ID: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "ボリュームのマイグレーションに失敗しました (初回)。" msgid "Failed to migrate volume for the second time." msgstr "ボリュームのマイグレーションに失敗しました (2 回目)。" #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "LUN マッピングの移動に失敗しました。戻りコード: %s" #, python-format msgid "Failed to move volume %s." msgstr "ボリューム %s の移動に失敗しました。" #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "ファイルを開くことができませんでした。(ファイル: %(file)s、ret: %(ret)s、" "stderr: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 出力の解析に失敗しました:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s。" msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "設定オプションの 'keystone_catalog_info' を解析できませんでした。本オプション" "は、:: の形式を持つ必要がありま" "す。" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "構成オプション 'swift_catalog_info' の解析に失敗しました。:" ": という形式でなければなりません" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "ゼロ・ページ・レクラメーションを実行できませんでした。(LDEV: %(ldev)s、理由: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "ボリューム %(volume)s のエクスポートを削除できませんでした: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの削除に失敗しました。" #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "整合性グループ %(cgName)s から ボリューム %(volumeName)s を削除できませんでし" "た。戻りコード: %(rc)lu。エラー: %(error)s。" #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" "デフォルトの SG からボリューム %(volumeName)s を削除できませんでした。 " #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "ボリューム %(volumeName)s をデフォルト SG %(volumeName)s から削除できませんで" "した。" #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "FAST ポリシー %(fastPolicyName)s のデフォルト・ストレージ・グループから " "%(volumename)s を削除できませんでした。" #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "論理ボリューム %(name)s の名前変更に失敗しました。エラーメッセージ: " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "アクティブなゾーニング構成 %s の取得に失敗しました" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "ターゲット IQN %(iqn)s の CHAP 認証の設定に失敗しました。詳細: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "既存のボリューム %(name)s 用の QoS の設定に失敗しました。エラーメッセージ: " "%(msg)s。" msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "SCST ターゲットの「着信ユーザー」属性の設定に失敗しました。" msgid "Failed to set partition." msgstr "パーティションの設定に失敗しました。" #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "整合性グループ %(cgname)s に対する許可の設定に失敗しました。エラー: " "%(excmsg)s。" #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "マップ解除するボリューム %(volume_id)s の論理デバイスを指定できませんでした。" #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "削除する論理デバイスを指定できませんでした。(メソッド: %(method)s、ID: " "%(id)s)" msgid "Failed to terminate migrate session." msgstr "マイグレーションセッションの終了に失敗しました。" #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "ボリューム %(volume)s のアンバインドに失敗しました" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "整合性グループ %(cgname)s のファイル・セットのリンク解除に失敗しました。エ" "ラー: %(excmsg)s。" #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "論理デバイスをマップ解除できませんでした。(LDEV: %(ldev)s、理由: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "整合性グループの更新に失敗しました: %(cgName)s。" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "ボリュームのメタデータの更新に失敗しました: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "ゾーニング構成の更新または削除に失敗しました" msgid "Failed to update or delete zoning configuration." msgstr "ゾーニング設定の更新または削除に失敗しました。" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "仕様 %(qos_specs)s を使用して qos_specs %(specs_id)s を更新することができませ" "んでした。" msgid "Failed to update quota usage while retyping volume." msgstr "ボリュームのタイプを変更中にクォータの使用量を更新できませんでした。" msgid "Failed to update snapshot." msgstr "スナップショットの更新に失敗しました。" #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "ドライバーで指定されたモデル %(model)s によるモデルの更新に失敗しました" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "指定の %(src_type)s %(src_id)s メタデータを使用してボリューム %(vol_id)s メタ" "データを更新することができませんでした" #, python-format msgid "Failure creating volume %s." msgstr "ボリューム %s の作成に失敗しました。" #, python-format msgid "Failure getting LUN info for %s." msgstr "%s の LUN 情報の取得中に障害が発生しました。" #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "update_volume_key_value_pair で障害が発生しました: %s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "複製された新規 LUN を %s へ移動中に障害が発生しました。" #, python-format msgid "Failure staging LUN %s to tmp." msgstr "LUN %s を一時 lun へステージング中に障害が発生しました。" msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "致命的エラー: ユーザーには NetApp ボリュームの照会が許可されていません。" #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "Fexvisor は %(reason)s が原因でボリューム %(id)s の追加に失敗しました。" #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor は %(ret)s が原因でグループ %(group)s でのボリューム %(vol)s の結合" "に失敗しました。" #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor は %(ret)s が原因で グループ %(group)s でのボリューム %(vol)s の削" "除に失敗しました。" #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "Fexvisor は %(reason)s が原因でボリューム %(id)s の削除に失敗しました。" #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "ファイバーチャネル SAN ルックアップ障害: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "ファイバーチャネルゾーン操作が失敗しました: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "ファイバーチャネル接続制御障害: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "ファイル %(file_path)s が見つかりませんでした。" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "ファイル %(path)s に無効なバッキング・ファイル %(bfile)s があります。打ち切り" "ます。" #, python-format msgid "File already exists at %s." msgstr "ファイルは %s に既に存在します。" #, python-format msgid "File already exists at: %s" msgstr "ファイルは既に存在します: %s" msgid "Find host in hostgroup error." msgstr "ホストグループでのホスト検索のエラー。" msgid "Find host lun id error." msgstr "ホスト LUN ID 検索のエラー。" msgid "Find lun group from mapping view error." msgstr "マッピングビューからの LUN グループ検索のエラー。" msgid "Find lun number error." msgstr "LUN 番号検索のエラー。" msgid "Find mapping view error." msgstr "マッピングビュー検索のエラー。" msgid "Find portgroup error." msgstr "ポートグループ検索のエラー。" msgid "Find portgroup from mapping view error." msgstr "マッピングビューからのポートグループ検索のエラー。" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Flash キャッシュ・ポリシーは、WSAPI バージョン「%(fcache_version)s」バージョ" "ン「%(version)s」がインストールされていることを必要としています。" #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor はグループ %(vgid)s スナップショット %(vgsid)s でボリューム %(id)s " "スナップショットを見つけられませんでした。" #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" "Flexvisor によるボリュームの作成が失敗しました: %(volumeid)s:%(status)s。" #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor はボリューム %(id)s をグループ %(cgid)s に追加できませんでした。" #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor は、イベント ID でステータスを照会できないことが原因でボリューム " "%(id)s を割り当てることに失敗しました。" #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の割り当てに失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "" "Flexvisor はボリューム %(volume)s iqn %(iqn)s の割り当てに失敗しました。" #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の複製に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の複製に失敗しました (イベントの取得に失敗しま" "した)。" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しまし" "た: %(status)s。" #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しました " "(イベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Flexvisor はグループ %(vgid)s 内でボリューム %(id)s を作成できませんでした。" #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor はボリューム %(volume)s の作成に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor はボリューム %s の作成 (イベントの取得) に失敗しました。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" "ベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor はスナップショット %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s の削除に失敗しました (イベントの取得に失" "敗しました)。" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の拡張に失敗しました (イベントの取得に失敗しま" "した)。" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor はプール情報 %(id)s の取得に失敗しました: %(status)s。" #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor はグループ %(vgid)s からボリューム %(id)s のスナップショット ID を" "取得できませんでした。" #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor はグループ %(cgid)s からのボリューム %(id)s の削除に失敗しました。" #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" "ベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" "Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の割り当て解除 (イベントの取得) に失敗しまし" "た。" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s" #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor はソース・ボリューム %(id)s 情報を検出できません。" #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当て解除が失敗しました: %(id)s:%(status)s。" #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor ボリューム %(id)s はグループ %(vgid)s の結合に失敗しました。" #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "フォルダー %s は Nexenta Store アプライアンスに存在しません" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS が実行されていません。状態: %s。" msgid "Gateway VIP is not set" msgstr "ゲートウェイ VIP が設定されていません" msgid "Get FC ports by port group error." msgstr "ポートグループによる FC ポート取得のエラー。" msgid "Get FC ports from array error." msgstr "アレイからの FC ポート取得のエラー。" msgid "Get FC target wwpn error." msgstr "FC ターゲット wwpn 取得のエラー。" msgid "Get HyperMetroPair error." msgstr "HyperMetroPair 取得のエラー。" msgid "Get LUN group by view error." msgstr "ビューによる LUN グループ取得のエラー。" msgid "Get LUNcopy information error." msgstr "LUN コピーの情報取得のエラー。" msgid "Get QoS id by lun id error." msgstr "LUN による QoS ID 取得のエラー。" msgid "Get QoS information error." msgstr "QoS 情報取得のエラー。 " msgid "Get QoS policy error." msgstr "QoS ポリシー取得のエラー。" msgid "Get SplitMirror error." msgstr "SplitMirror 取得のエラー。" msgid "Get active client failed." msgstr "アクティブなクライアントの取得が失敗しました。" msgid "Get array info error." msgstr "アレイ情報取得のエラー。" msgid "Get cache by name error." msgstr "名前によるキャッシュ取得のエラー。" msgid "Get connected free FC wwn error." msgstr "空き FC wwn 接続のエラー。" msgid "Get engines error." msgstr "エンジン取得のエラー。" msgid "Get host initiators info failed." msgstr "ホストイニシエーター情報の取得が失敗しました。" msgid "Get hostgroup information error." msgstr "ホストグループの情報取得のエラー。" msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "iSCSI ポート情報取得のエラー。huawei 設定ファイルに設定されたターゲット IP を" "確認してください。" msgid "Get iSCSI port information error." msgstr "iSCSI ポート情報取得のエラー。" msgid "Get iSCSI target port error." msgstr "iSCSI ターゲットポート取得のエラー。" msgid "Get lun id by name error." msgstr "名前による LUN ID 取得のエラー。" msgid "Get lun migration task error." msgstr "LUN マイグレーションタスク取得のエラー。" msgid "Get lungroup id by lun id error." msgstr "LUN ID による LUN グループ ID 取得のエラー。" msgid "Get lungroup information error." msgstr "LUN グループの情報取得のエラー。" msgid "Get migration task error." msgstr "マイグレーションタスク取得のエラー。" msgid "Get pair failed." msgstr "ペアの取得が失敗しました。" msgid "Get partition by name error." msgstr "名前によるパーティション取得のエラー。" msgid "Get partition by partition id error." msgstr "パーティション ID によるパーティション取得のエラー。" msgid "Get port group by view error." msgstr "ビューによるポートグループ取得のエラー。" msgid "Get port group error." msgstr "ポートグループ取得のエラー。" msgid "Get port groups by port error." msgstr "ポートによるポートグループ取得のエラー。" msgid "Get ports by port group error." msgstr "ポートグループによるポート取得のエラー。" msgid "Get remote device info failed." msgstr "リモートデバイス情報の取得が失敗しました。" msgid "Get remote devices error." msgstr "リモートデバイス取得のエラー。" msgid "Get smartcache by cache id error." msgstr "キャッシュ ID によるスマートキャッシュ取得のエラー。" msgid "Get snapshot error." msgstr "スナップショット取得のエラー。" msgid "Get snapshot id error." msgstr "スナップショット ID 取得のエラー。" msgid "Get target IP error." msgstr "ターゲット IP 取得のエラー。" msgid "Get target LUN of SplitMirror error." msgstr "SplitMirror のターゲット LUN 取得のエラー。" msgid "Get views by port group error." msgstr "ポートグループによるビュー取得のエラー。" msgid "Get volume by name error." msgstr "名前によるボリューム取得のエラー。" msgid "Get volume error." msgstr "ボリューム取得のエラー。" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Glance メタデータを更新できません。ボリューム ID %(volume_id)s に対するキー " "%(key)s が存在します" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "ボリューム/スナップショット %(id)s の Glance メタデータが見つかりません。" #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Gluster 構成ファイルが %(config)s に存在しません" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google Cloud Storage の API エラー: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google Cloud Storage の接続エラー: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google Cloud Storage の oauth2 エラー: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "DRBDmanage (%s) から不正なパスの情報が提供されました" msgid "HBSD error occurs." msgstr "HBSD エラーが発生しました。" msgid "HNAS has disconnected SSC" msgstr "HNAS に接続解除された SSC があります" msgid "HPELeftHand url not found" msgstr "HPELeftHand url が見つかりません" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "HTTPS の証明書の検証が要求されましたが、Pure Storage モジュールのバージョン " "%(version)s では有効化できません。この機能を有効化するにはより新しいバージョ" "ンにアップグレードしてください。" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "最後のバックアップ以降にハッシュブロックサイズが変更されました。新規ハッシュ" "ブロックサイズ: %(new)s。旧ハッシュブロックサイズ: %(old)s。フルバックアップ" "を実行してください。" #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "%(tier_levels)s 層が作成されていません。" #, python-format msgid "Hint \"%s\" not supported." msgstr "ヒント「%s」はサポートされていません。" msgid "Host" msgstr "ホスト" #, python-format msgid "Host %(host)s could not be found." msgstr "ホスト %(host)s が見つかりませんでした。" #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "ホスト %(host)s は x509 証明書の内容に一致しません: CommonName " "%(commonName)s。" #, python-format msgid "Host %s has no FC initiators" msgstr "ホスト %s に FC イニシエーターがありません" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "ホスト %s に iSCSI イニシエーターがありません" #, python-format msgid "Host '%s' could not be found." msgstr "ホスト '%s' が見つかりませんでした。" #, python-format msgid "Host group with name %s not found" msgstr "名前が %s のホストグループが見つかりません" #, python-format msgid "Host group with ref %s not found" msgstr "参照 %s が指定されたホストグループが見つかりません" msgid "Host is NOT Frozen." msgstr "ホストは固定化されていません。" msgid "Host is already Frozen." msgstr "ホストは既に固定化されています。" msgid "Host not found" msgstr "ホストが見つかりません" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" "ホストが見つかりません。%(host)s 上で %(service)s の削除に失敗しました。" #, python-format msgid "Host replication_status must be %s to failover." msgstr "" "フェイルオーバーを行うにはホストの replication_status が %s である必要があり" "ます。" #, python-format msgid "Host type %s not supported." msgstr "ホスト・タイプ %s はサポートされていません。" #, python-format msgid "Host with ports %(ports)s not found." msgstr "ポート %(ports)s が設定されたホストが見つかりません。" msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro とレプリケーションは、同一の volume_type で使用できません。" #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "入出力グループ %(iogrp)d iは無効です。使用できる入出力グループは %(avail)s で" "す。" msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Blockbridge API の IP アドレスとホスト名。" msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "圧縮が True に設定される場合、rsize も (not equal to -1) に設定しなければなり" "ません。" msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "nofmtdisk が True に設定される場合、rsize も -1 に設定しなければなりません。" #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "正しくない値「%(prot)s」が flashsystem_connection_protocol に指定されていま" "す。有効な値は %(enabled)s です。" msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "IOTYPE に正しくない値が指定されています: 0、1、または 2。" msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "smarttier に正しくない値が指定されています: 0、1、2、または 3 のいずれかに設" "定してください。" msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "正しくない値が storwize_svc_vol_grainsize に指定されています。32、64、128、" "256 のいずれかに設定してください。" msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "thin に正しくない値が指定されています: thin と thick を同時に設定することはで" "きません。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "イメージ %(image_id)s が見つかりませんでした。" #, python-format msgid "Image %(image_id)s is not active." msgstr "イメージ %(image_id)s はアクティブではありません。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "イメージ %(image_id)s は受け入れられません: %(reason)s" msgid "Image location not present." msgstr "イメージロケーションが存在しません。" #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "イメージの仮想サイズは %(image_size)dGB であり、サイズ %(volume_size)dGB のボ" "リュームに適合しません。" msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "rbd ボリュームの削除中に ImageBusy エラーが発生しました。これは、異常終了した" "クライアントからの接続が原因である可能性があります。その場合、30 秒経過後に削" "除を再試行すると、解決できることがあります。" #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "レコードのインポートに失敗しました。インポートを実行するバックアップサービス" "が見つかりません。要求サービス %(service)s" msgid "Incorrect request body format" msgstr "要求本体の形式が正しくありません" msgid "Incorrect request body format." msgstr "要求本体の形式が正しくありません。" msgid "Incremental backups exist for this backup." msgstr "このバックアップには増分バックアップが存在します。" #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI の例外: %(err)s。パラメーター: %(param)s (戻りコード: %(rc)s) " "(出力: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "当初の層 {} のポリシー {} が無効です。" msgid "Input type {} is not supported." msgstr "入力タイプ {} はサポートされません。" msgid "Input volumes or snapshots are invalid." msgstr "入力ボリュームまたはスナップショットが無効です。" msgid "Input volumes or source volumes are invalid." msgstr "入力ボリュームまたはソースボリュームが無効です。" #, python-format msgid "Instance %(uuid)s could not be found." msgstr "インスタンス %(uuid)s が見つかりませんでした。" msgid "Insufficient free space available to extend volume." msgstr "ボリュームを拡張するために十分な空きスペースがありません。" msgid "Insufficient privileges" msgstr "不十分な権限" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "ceph クラスターへの接続と接続の間の間隔の値 (秒)。" #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "無効な %(protocol)s ポート %(port)s が io_port_list に指定されました。" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "無効な 3PAR ドメイン: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "無効な ALUA 値。ALUA 値は、1 または 0 でなければなりません。" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "バックアップ rbd 操作に指定された Ceph 引数が無効です" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "無効な CgSnapshot: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "無効な ConsistencyGroup: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "無効な ConsistencyGroup: 整合性グループの状態は「利用可能」か「エラー」のいず" "れかである必要がありますが、現在の状態は「使用中」です。" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "無効な ConsistencyGroup: 整合性グループ状況は「使用可能」でなければなりません" "が、現在の状況は %s です。" msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "無効な ConsistencyGroup: 整合性グループを作成するためのホストがありません。" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "無効な HPELeftHand API バージョンが見つかりました: %(found)s。管理/非管理のサ" "ポートには、バージョン %(minimum)s 以上が必要です。" #, python-format msgid "Invalid IP address format: '%s'" msgstr "無効な IP アドレスの形式: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "ボリューム %s の QoS ポリシーの取得中に 無効な QoS 仕様が検出されました" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "無効なレプリケーションターゲット: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "無効な VNX 認証タイプ: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Virtuozzo Storage のシェアの指定が無効です: %r。[MDS1[,MDS2],...:/][:PASSWORD] である必要があります。" #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "XtremIO バージョン %(cur)s は無効です。バージョン %(min)s 以上が必要です" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "以下のプロジェクトのクォータに定義した割り当て済みのクォータが無効です: %s" msgid "Invalid argument" msgstr "引数が無効です" msgid "Invalid argument - negative seek offset." msgstr "引数が無効です。シークオフセットが負の値です。" #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "引数が無効です: whence=%s はサポートされていません" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "引数が無効です。whence=%s はサポートされません。" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "接続モード '%(mode)s' はボリューム %(volume_id)s には無効です。" #, python-format msgid "Invalid auth key: %(reason)s" msgstr "認証キーが無効です: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "無効なバックアップ: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "Barbican API の URL が無効です: バージョンが必要です ('http[s]://|" "[:port]/' など)。指定された URL は %s です。" msgid "Invalid cgsnapshot" msgstr "無効な cgsnapshot" msgid "Invalid chap user details found in CloudByte storage." msgstr "CloudByte のストレージで無効な chap ユーザーの詳細が見つかりました。" #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "ボリューム %(name)s の接続初期化応答が無効です" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "ボリューム %(name)s の接続初期化応答が無効です: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無効なコンテンツタイプ %(content_type)s。" msgid "Invalid credentials" msgstr "無効な認証情報" #, python-format msgid "Invalid directory: %s" msgstr "無効なディレクトリー: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "ディスク・アダプター・タイプが無効です: %(invalid_type)s。" #, python-format msgid "Invalid disk backing: %s." msgstr "ディスク・バッキングが無効です: %s。" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "ディスク・タイプが無効です: %(disk_type)s。" #, python-format msgid "Invalid disk type: %s." msgstr "ディスクタイプが無効です: %s。" #, python-format msgid "Invalid host: %(reason)s" msgstr "無効なホスト: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "無効な hpe3parclient バージョンが見つかりました (%(found)s)。バージョン " "%(minimum)s 以上が必要です。 \"pip install --upgrade python-3parclient\" を実" "行して hpe3parclient をアップグレードしてください。" #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "無効な hpelefthandclient バージョンが見つかりました (%(found)s)。バージョン " "%(minimum)s 以上が必要です。 'pip install --upgrade python-lefthandclient' " "を実行して hpelefthandclient をアップグレードしてください。" #, python-format msgid "Invalid image href %(image_href)s." msgstr "無効なイメージ href %(image_href)s。" msgid "Invalid image identifier or unable to access requested image." msgstr "イメージ ID が無効か、要求されたイメージにアクセスできません。" msgid "Invalid imageRef provided." msgstr "無効な imageRef が指定されました。" msgid "Invalid initiator value received" msgstr "無効なイニシエーター値を受信しました" msgid "Invalid input" msgstr "無効な入力" #, python-format msgid "Invalid input received: %(reason)s" msgstr "無効な入力を受信しました: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "無効な is_public フィルター [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "無効な LUN タイプ %s が設定されています。" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "無効なメタデータサイズ: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "メタデータが無効です: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "無効なマウントポイントベース: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "無効なマウントポイントベース: %s" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新規 snapCPG 名がタイプ変更には無効です。new_snap_cpg='%s'。" #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Coho の rpc ポートの無効なポート番号 %(config)s" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "無効なプリフェッチタイプ '%s' が設定されています。プリフェッチタイプは 0、1、" "2、3 でなければなりません。" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "qos 仕様が無効です: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "ボリュームを無効なターゲットに接続する要求は無効です" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "無効なモードでボリュームを接続しようとしているため、要求は無効です。接続モー" "ドは 'rw' または 'ro' でなければなりません" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "予約の有効期限 %(expire)s が無効です。" msgid "Invalid response header from RPC server" msgstr "RPC サーバーからの無効な応答ヘッダー" #, python-format msgid "Invalid secondary id %s." msgstr "無効なセカンダリー ID %s。" #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "無効な secondary_backend_id が指定されました。有効なバックエンド ID は %s で" "す。" msgid "Invalid service catalog json." msgstr "無効なサービス・カタログ JSON。" msgid "Invalid sheepdog cluster status." msgstr "シープドッグクラスターの状態が無効です。" #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "無効なスナップショット: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "無効な状況: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "無効なストレージ・プール %s が要求されました。再入力は失敗しました。" #, python-format msgid "Invalid storage pool %s specificed." msgstr "無効なストレージ・プール %s が指定されました。" msgid "Invalid storage pool is configured." msgstr "無効なストレージプールが設定されています。" #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "無効な同期モードが指定されました。許可されるモードは %s です。" msgid "Invalid transport type." msgstr "無効なトランスポートタイプ。" #, python-format msgid "Invalid update setting: '%s'" msgstr "無効な更新設定: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL が無効です: 'http[s]://|[:port]/' の形式である必要" "があります。指定された URL は %s です" #, python-format msgid "Invalid value '%s' for force." msgstr "force の値 '%s' は無効です。" #, python-format msgid "Invalid value '%s' for force. " msgstr "force の値 '%s' は無効です。" #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "is_publicの値 '%s' が無効です。許容される値は True または False です。" #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "skip_validation の値 '%s' が無効です。" #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr " 'bootable' の値 '%s' は無効です。" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "force の値 '%s' は無効です。" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr " 'readonly' の値 '%s' は無効です。" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "'scheduler_max_attempts' の値が無効です。1 以上でなければなりません" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp の設定オプション netapp_host_type の値が無効です。" msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp の設定オプション netapp_lun_ostype の値が無効です。" #, python-format msgid "Invalid value for age, %(age)s" msgstr "年齢 %(age)s の値が無効です" #, python-format msgid "Invalid value: \"%s\"" msgstr "無効な値: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "作成要求に指定されたボリューム・サイズ %s は無効です (size 引数は整数(または" "整数のストリング表記) でなければならず、またゼロより大きくなければなりませ" "ん)。" #, python-format msgid "Invalid volume type: %(reason)s" msgstr "無効なボリューム種別: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "無効なボリューム: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "無効なボリューム: ボリューム %(volume_id)s は無効な状態 %(status)s であるた" "め、整合性グループ %(group_id)s に追加できません。有効な状態は次のとおりで" "す: (「使用可能」、「使用中」)。" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "無効なボリューム: ボリューム・タイプ %(volume_type)s は整合性グルー" "プ%(group_id)s ではサポートされていないため、ボリューム %(volume_id)s をこの" "整合性グループに追加できません。" #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "無効なボリューム: ボリューム fake-volume-uuid は見つからないため、整合性グ" "ループ %(group_id)s に追加できません。" #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "無効なボリューム: ボリューム fake-volume-uuid は整合性グループ%(group_id)s に" "存在しないため、このグループから削除できません。" #, python-format msgid "Invalid volume_type passed: %s." msgstr "無効な volume_type が渡されました: %s。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" "ません。ソースボリュームと合致するか、 タイプの引数を排除する必要がありま" "す)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" "ません。タイプの引数を排除することを推奨します)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "無効な volume_type %s が指定されました (要求するタイプは、この整合性グループ" "でサポートされていなければなりません)。" #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "wwpn 形式 %(wwpns)s は無効です" msgid "Invoking web service failed." msgstr "Web サービスの呼び出しが失敗しました。" msgid "Issue encountered waiting for job." msgstr "ジョブの待機中に問題が発生しました。" msgid "Issue encountered waiting for synchronization." msgstr "同期の待機中に問題が発生しました。" msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "レプリケーションが適切に設定されていないため、fail-over の発行が失敗しまし" "た。" msgid "Item not found" msgstr "項目が見つかりません" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "CloudByte のボリューム [%s] 作成に関するレスポンスにジョブ ID が見つかりませ" "ん。" #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "CloudByte のボリューム [%s] 削除に関するレスポンスにジョブ ID が見つかりませ" "ん。" msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "鍵の名前に含めることができるのは、英数字、アンダースコアー、ピリオド、コロ" "ン、およびハイフンのみです。" #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "ネストされたクォータを使用するには、Keystone のバージョン 3 以降を使用する必" "要があります。" #, python-format msgid "LU does not exist for volume: %s" msgstr "ボリューム: %s 用の LU は存在しません" msgid "LUN export failed!" msgstr "LUN のエクスポートが失敗しました。" msgid "LUN id({}) is not valid." msgstr "LUN id({}) は無効です。" msgid "LUN map overflow on every channel." msgstr "すべてのチャンネルでの LUN マップのオーバーフロー。" #, python-format msgid "LUN not found with given ref %s." msgstr "指定された参照 %s を持つ LUN が見つかりません。" msgid "LUN number ({}) is not an integer." msgstr "LUN 番号 ({}) が整数ではありません。" #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 番号がチャンネル ID: %(ch_id)s の境界を越えています。" #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "指定された参照 %(ref)s を持つ LUN はボリューム・タイプを満たしていません。" "ssc フィーチャーを持つ LUN ボリュームが vserver %(vs)s 上に存在することを確認" "してください。" #, python-format msgid "Last %s cinder syslog entries:-" msgstr "最後の %s cinder syslog 項目:-" msgid "LeftHand cluster not found" msgstr "LeftHand クラスターが見つかりません" msgid "License is unavailable." msgstr "ライセンスが使用できません。" #, python-format msgid "Line %(dis)d : %(line)s" msgstr "行 %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "リンク・パスは既に存在しますが、symlink ではありません" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "" "状態 %s でのソース・ボリュームのリンクされた複製はサポートされていません。" msgid "Lock acquisition failed." msgstr "ロックの取得に失敗しました。" msgid "Logout session error." msgstr "ログアウトセッションのエラー。" msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "ルックアップ・サービスが構成されていません。fc_san_lookup_service の構成オプ" "ションはルックアップ・サービスの具体的実装の指定を必要とします。" msgid "Lun migration error." msgstr "LUN マイグレーションのエラー。" #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "オブジェクトの MD5: %(md5)s の前と %(etag)s の後の %(object_name)s が同じで" "はありません。" #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "fcns 出力ストリングの形式が誤っています: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "メッセージ本体の形式に誤りがあります: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "誤った形式のネーム・サーバー・ストリング: %s" msgid "Malformed request body" msgstr "誤った形式の要求本体" msgid "Malformed request body." msgstr "誤った形式のリクエスト本文。" msgid "Malformed request url" msgstr "誤った形式の要求 URL" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "コマンド %(cmd)s への応答の形式が誤っています: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "scheduler_hints 属性の形式に誤りがあります" #, python-format msgid "Malformed show fcns database string: %s" msgstr "fcns データベース文字列の形式が誤っています: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "誤った形式のゾーン構成: (switch=%(switch)s zone_config=%(zone_config)s)。" #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "ゾーンステータスの形式が誤っています: (switch=%(switch)s zone_config=" "%(zone_config)s)" msgid "Manage existing get size requires 'id'." msgstr "既存の get サイズを管理するには 'id' が必要です。" msgid "Manage existing snapshot not implemented." msgstr "既存のスナップショットの管理が実装されていません。" #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "無効なバックエンド参照 %(existing_ref)s のため、既存ボリュームの管理に失敗し" "ました: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "ボリューム種別の不一致のため、既存ボリュームの管理に失敗しました: %(reason)s" msgid "Manage existing volume not implemented." msgstr "既存ボリュームの管理は実装されていません。" msgid "Manage existing volume requires 'source-id'." msgstr "既存のボリュームを管理するには 'source-id' が必要です。" #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST が有効化されている場合、ボリュームの管理はサポートされません。FAST ポリ" "シー: %(fastPolicyName)s。" msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "フェイルオーバーされたボリュームへのスナップショットを管理対象にすることは許" "可されません。" msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "アレイのバージョンが hypermetro をサポートしないことが原因で、マップ情報があ" "りません。" #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "マッピング %(id)s の準備を、割り当てられたタイムアウトの %(to)d 秒以内に完了" "できませんでした。終了します。" #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "マスキングビュー %(maskingViewName)s は正常に削除されませんでした" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "バックアップの許容最大数 (%(allowed)d) を超えました" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "スナップショットの許容最大数 (%(allowed)d) を超えました" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "許容されるぼるーむの最大数 (%(allowed)d) がクォータ '%(name)s' を超えました。" #, python-format msgid "May specify only one of %s" msgstr "指定できる %s は 1 つのみです" msgid "Metadata backup already exists for this volume" msgstr "このボリュームのメタデータバックアップは既に存在します" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "メタデータのバックアップオブジェクト '%s' は既に存在します" msgid "Metadata item was not found" msgstr "メタデータ項目が見つかりませんでした" msgid "Metadata item was not found." msgstr "メタデータ項目が見つかりませんでした。" #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "メタデータのプロパティーキー %s が 255 文字を超えています" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "メタデータのプロパティーキー %s 値が 255 文字を超えています" msgid "Metadata property key blank" msgstr "メタデータ・プロパティー・キーがブランクです" msgid "Metadata property key blank." msgstr "メタデータプロパティーのキーがブランクです。" msgid "Metadata property key greater than 255 characters." msgstr "メタデータプロパティーのキーが 255 文字を超えています。" msgid "Metadata property value greater than 255 characters." msgstr "メタデータプロパティー値が 255 文字を超えています。" msgid "Metadata restore failed due to incompatible version" msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました" msgid "Metadata restore failed due to incompatible version." msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました。" #, python-format msgid "Migrate volume %(src)s failed." msgstr "ボリューム %(src)s のマイグレーションが失敗しました。" #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "ソースボリューム %(src)s と宛先ボリューム %(dst)s の間のボリュームのマイグ" "レーションが失敗しました。" #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "LUN %s のマイグレーションが停止したか、障害が発生しました。" msgid "MirrorView/S enabler is not installed." msgstr "MirrorView/S イネーブラーがインストールされていません。" msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "「purestorage」python モジュールがありません。ライブラリーがインストールさ" "れ、使用可能であることを確認してください。" msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" "Fibre Channel の SAN 構成パラメーターの fc_fabric_names が欠落しています" msgid "Missing request body" msgstr "要求本体がありません" msgid "Missing request body." msgstr "要求本体がありません。" #, python-format msgid "Missing required element '%s' in request body" msgstr "要求本体に必須エレメント '%s' がありません" #, python-format msgid "Missing required element '%s' in request body." msgstr "リクエストの本文に必要な要素 '%s' がありません。" msgid "Missing required element 'consistencygroup' in request body." msgstr "リクエストの本文に必要な要素の 'consistencygroup' がありません。" msgid "Missing required element 'host' in request body." msgstr "リクエストの本文に必要な要素 'host' がありません。" msgid "Missing required element quota_class_set in request body." msgstr "要求本体に必須エレメント quota_class_set がありません。" msgid "Missing required element snapshot in request body." msgstr "リクエストの本文に必要な要素のスナップショットがありません。" msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "この処理では 1 つの SerialNumber を予期していたものの、複数の SerialNumber が" "見つかりました。EMC の設定ファイルを変更してください。" #, python-format msgid "Multiple copies of volume %s found." msgstr "ボリューム %s の複数のコピーが見つかりました。" #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "「%s」に関して複数の一致が見つかりました。ID を使用して絞り込んでください。" msgid "Multiple profiles found." msgstr "複数のプロファイルが見つかりました。" msgid "Must implement a fallback schedule" msgstr "予備の(fallback)スケジューラを実装する必要があります。" msgid "Must implement find_retype_host" msgstr "find_retype_host を実装する必要があります" msgid "Must implement host_passes_filters" msgstr "host_passes_filters を実装する必要があります。" msgid "Must implement schedule_create_consistencygroup" msgstr "schedule_create_consistencygroup を実装する必要があります" msgid "Must implement schedule_create_volume" msgstr "schedule_create_volume を実装する必要があります。" msgid "Must implement schedule_get_pools" msgstr "schedule_get_pools を実装する必要があります" msgid "Must pass wwpn or host to lsfabric." msgstr "wwpn またはホストを lsfabric に渡す必要があります。" msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "クラウド管理者があらゆるプロジェクトのリストと取得を行える Keystone の " "policy.json を使用して、クラウド管理者としてこのコマンドを実行する必要があり" "ます。" msgid "Must specify 'connector'" msgstr "'connector' を指定する必要があります" msgid "Must specify 'connector'." msgstr "「コネクター」を指定する必要があります。" msgid "Must specify 'host'." msgstr "'host' を指定する必要があります。" msgid "Must specify 'new_volume'" msgstr "'new_volume' を指定する必要があります" msgid "Must specify 'status'" msgstr "'status' を指定する必要があります" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "更新のために 'status'、'attach_status'、または 'migration_status' を指定する" "必要があります。" msgid "Must specify a valid attach status" msgstr "有効な接続状況を指定してください" msgid "Must specify a valid migration status" msgstr "有効なマイグレーション状況を指定してください" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "有効な個人 %(valid)s を指定する必要があります。値 '%(persona)s' は無効です。" #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "有効なプロビジョニング・タイプ %(valid)s を指定する必要があります。値 " "'%(prov)s' は無効です。" msgid "Must specify a valid status" msgstr "有効な状況を指定してください" msgid "Must specify an ExtensionManager class" msgstr "ExtensionManager クラスを指定する必要があります" msgid "Must specify bootable in request." msgstr "要求にブート可能を指定する必要があります。" msgid "Must specify protection domain name or protection domain id." msgstr "保護ドメインの名前か ID を指定しなければなりません。" msgid "Must specify readonly in request." msgstr "要求内で読み取り専用を指定する必要があります。" msgid "Must specify snapshot source-name or source-id." msgstr "" "スナップショットの source-name または source-id を指定する必要があります。" msgid "Must specify source-name or source-id." msgstr "ソース名またはソース ID を指定する必要があります。" msgid "Must specify storage pool name or id." msgstr "ストレージプールの名前か ID を指定しなければなりません。" msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "ストレージプールを指定しなければなりません。オプション: sio_storage_pools。" msgid "Must supply a positive value for age" msgstr "年齢には正の値を提供する必要があります" msgid "Must supply a positive, non-zero value for age" msgstr "存続期間には正の非ゼロ値を指定してください" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS 構成「%(name)s=%(value)s」は無効です。「auto」、「true」、「false」のいず" "れかでなければなりません" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "NFS 構成ファイルが %(config)s に存在しません" #, python-format msgid "NFS file %s not discovered." msgstr "NFS ファイル %s は検出されていません。" msgid "NFS file could not be discovered." msgstr "NFS ファイルを検出できませんでした。" msgid "NaElement name cannot be null." msgstr "NaElement 名は NULL にできません。" msgid "Name" msgstr "名前" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "要求本体で、名前、説明、add_volumes、および remove_volumes をすべて空にするこ" "とはできません。" msgid "Need non-zero volume size" msgstr "ゼロでないボリュームサイズが必要です" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "MSG_DENIED でも MSG_ACCEPTED でもありません: %r" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder Driver 例外です。" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "拡張用に指定する新しいサイズは、現行サイズより大きくなければなりません。(現" "行: %(size)s、拡張用: %(new_size)s)。" #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "新規サイズはバックエンドストレージの実サイズよりも大きくなければなりません。" "実サイズ: %(oldsize)s、新規サイズ: %(newsize)s。" msgid "New volume size must be specified as an integer." msgstr "新しいボリューム・サイズを整数で指定する必要があります。" msgid "New volume type must be specified." msgstr "新規ボリュームタイプを指定する必要があります。" msgid "New volume type not specified in request_spec." msgstr "新規ボリューム・タイプが要求仕様に指定されていません。" #, python-format msgid "New volume_type same as original: %s." msgstr "新規 volume_type が元の値と同じです: %s。" msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder ドライバー例外" msgid "No FC initiator can be added to host." msgstr "ホストには FC イニシエーターを追加できません。" msgid "No FC port connected to fabric." msgstr "ファブリックに接続された FC ポートはありません。" msgid "No FCP targets found" msgstr "FCP ターゲットが見つかりません" msgid "No Port Group elements found in config file." msgstr "設定ファイルにポートグループの要素が見つかりません。" msgid "No VF ID is defined in the configuration file." msgstr "設定ファイルに VF ID が定義されていません。" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "提供されたiSCSI IPのiSCSIポータルがアクティブではありません。" #, python-format msgid "No available service named %s" msgstr "%s という名前の使用可能なサービスはありません" #, python-format msgid "No backup with id %s" msgstr "ID %s のバックアップがありません" msgid "No backups available to do an incremental backup." msgstr "増分バックアップを実行するために使用可能なバックアップがありません。" msgid "No big enough free disk" msgstr "十分な大きさの空きディスクがありません" #, python-format msgid "No cgsnapshot with id %s" msgstr "ID %s の cgsnapshot は存在しません" msgid "No cinder entries in syslog!" msgstr "cinder 項目が syslog にありません。" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "複製された %s という名前の LUN はファイラーで見つかりません" msgid "No config node found." msgstr "設定ノードが見つかりません。" #, python-format msgid "No consistency group with id %s" msgstr "ID %s の整合性グループは存在しません" #, python-format msgid "No element by given name %s." msgstr "指定された名前 %s の要素はありません。" msgid "No errors in logfiles!" msgstr "ログファイル内にエラーはありません。" #, python-format msgid "No file found with %s as backing file." msgstr "バッキング・ファイルとして %s を持つファイルが見つかりません。" #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "空いている LUN ID が残っていません。ホスト (%s) に接続できるボリュームの最大" "数を超過しています。" msgid "No free disk" msgstr "空きディスクはありません" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "正しい iscsi ポータルが %s の指定されたリストに見つかりません。" #, python-format msgid "No good iscsi portals found for %s." msgstr "%s の正しい iscsi ポータルが見つかりません。" #, python-format msgid "No host to create consistency group %s." msgstr "整合性グループ %s を作成するためのホストがありません。" msgid "No iSCSI-enabled ports on target array." msgstr "ターゲット配列に iSCSI に対応するポートがありません。" msgid "No image_name was specified in request." msgstr "要求に image_name が指定されていませんでした。" msgid "No initiator connected to fabric." msgstr "ファブリックに接続されたイニシエーターはありません。" #, python-format msgid "No initiator group found for initiator %s" msgstr "イニシエーター %s のイニシエーター・グループが見つかりません" msgid "No initiators found, cannot proceed" msgstr "イニシエーターが見つからないため、続行できません" #, python-format msgid "No interface found on cluster for ip %s" msgstr "ip %s のクラスター上にインターフェースが見つかりませんでした" msgid "No ip address found." msgstr "IP アドレスが見つかりません。" msgid "No iscsi auth groups were found in CloudByte." msgstr "CloudByte で iscsi 認証グループが見つかりませんでした。" msgid "No iscsi initiators were found in CloudByte." msgstr "iscsi イニシエーターが CloudByte に見つかりませんでした。" #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "CloudByte ボリューム [%s] の iscsi サービスが見つかりません。" msgid "No iscsi services found in CloudByte storage." msgstr "iscsi サービスが CloudByte ストレージに見つかりません。" #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "鍵ファイルが指定されていないため、%(cert)s %(e)s から鍵をロードできません。" msgid "No mounted Gluster shares found" msgstr "マウントされた Gluster 共有が見つかりません" msgid "No mounted NFS shares found" msgstr "マウントされた NFS 共有が見つかりません" msgid "No mounted SMBFS shares found." msgstr "マウントされた SMBFS 共有が見つかりません。" msgid "No mounted Virtuozzo Storage shares found" msgstr "マウントされた Virtuozzo Storage 共有が見つかりません" msgid "No mounted shares found" msgstr "マウントされた共有が見つかりません" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "ボリューム %(vol)s の入出力グループ %(gid)s でノードが見つかりません。" msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "ボリュームのプロビジョニングに使用できるプールがありません。設定オプション " "netapp_pool_name_search_pattern が正しく設定されていることを確認してくださ" "い。" msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "CloudByte ストレージのリスト iSCSI 認証ユーザーの API の呼び出しからレスポン" "スがありませんでした。" msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "CloudByte ストレージのリスト tsm API の呼び出しから応答を受け取りませんでし" "た。" msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "CloudByte のリストファイルシステムの API 呼び出しから応答を受け取りませんでし" "た。" msgid "No service VIP configured and no nexenta_client_address" msgstr "サービス VIP が設定されておらず、nexenta_client_address がありません" #, python-format msgid "No snap found with %s as backing file." msgstr "バッキング・ファイルとして %s を持つスナップが見つかりません。" #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "" "スナップショットグループ %s にスナップショットイメージが見つかりません。" #, python-format msgid "No snapshots could be found on volume %s." msgstr "ボリューム %s でスナップショットを見つけることができませんでした。" #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "整合性グループ %s を作成するためのソーススナップショットが提供されていませ" "ん。" #, python-format msgid "No storage path found for export path %s" msgstr "エクスポート・パス %s 用のストレージ・パスが見つかりません" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "そのような QoS 仕様 %(specs_id)s は存在しません。" msgid "No suitable discovery ip found" msgstr "適切なディスカバリー ip が見つかりません" #, python-format msgid "No support to restore backup version %s" msgstr "バックアップバージョン %s をリストアすることができません" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s のターゲット ID が見つかりません。" msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "ホスト上に使用可能な未使用の LUN ID がありません。すべての LUN ID がホストグ" "ループ全体で一意である必要のある、マルチ接続が有効になっています。" #, python-format msgid "No valid host was found. %(reason)s" msgstr "有効なホストが見つかりませんでした。%(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "タイプ %(type)s のボリューム %(id)s に対して有効なホストがありません" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "参照 %s によって指定された UID を持つ vdisk がありません。" #, python-format msgid "No views found for LUN: %s" msgstr "LUN: %s 用のビューが見つかりませんでした" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "仮想サーバー %(vserver)s および接合パス %(junction)s を含むボリュームはクラス" "ターにありません" msgid "No volume service(s) started successfully, terminating." msgstr "どのボリュームサービス も正常に起動しませんでした。処理を終了します。" msgid "No volume was found at CloudByte storage." msgstr "CloudByte ストレージでボリュームが見つかりませんでした。" msgid "No volume_type should be provided when creating test replica." msgstr "テストレプリカの作成時に volume_type を指定してはなりません。" msgid "No volumes found in CloudByte storage." msgstr "ボリュームが CloudByte ストレージに見つかりません。" msgid "No weighed hosts available" msgstr "重み付けを設定したホストが存在しません" #, python-format msgid "Not a valid string: %s" msgstr "有効な文字列ではありません: %s" msgid "Not a valid value for NaElement." msgstr "NaElement に無効な値です。" #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "ボリューム %s に適したデータストアが見つかりません。" msgid "Not an rbd snapshot" msgstr "rbd スナップショットではありません" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "イメージ %(image_id)s では許可されません。" msgid "Not authorized." msgstr "許可されていません。" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "バックエンド容量が不十分です (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "この操作を実行するために十分なストレージ・スペースが ZFS 共有にありません。" msgid "Not stored in rbd" msgstr "rbd 内に保管されていません" msgid "Nova returned \"error\" status while creating snapshot." msgstr "スナップショットの作成時に Nova から「エラー」状況が返されました。" msgid "Null response received from CloudByte's list filesystem." msgstr "" "CloudByte のリストファイルシステムから Null のレスポンスを受信しました。" msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" "CloudByte の リスト iscsi 認証グループから Null のレスポンスを受信しました。" msgid "Null response received from CloudByte's list iscsi initiators." msgstr "CloudByte のリスト iscsi イニシエーターからヌル応答を受け取りました。" msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "CloudByte のリスト・ボリューム iscsi サービスからヌル応答を受け取りました。" #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "CloudByte のストレージでボリューム [%s] を作成中に Null のレスポンスを受信し" "ました。" #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "CloudByte のストレージでボリューム [%s] を削除中に Null のレスポンスを受信し" "ました。" #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "CloudByte のストレージで [%(operation)s] に 関するジョブ [%(job)s] の検索中" "に Null のレスポンスを受信しました。" msgid "Number of retries if connection to ceph cluster failed." msgstr "ceph クラスターへの接続が失敗した場合の再接続の回数。" msgid "Object Count" msgstr "オブジェクト数" msgid "Object Version" msgstr "オブジェクトのバージョン" msgid "Object is not a NetApp LUN." msgstr "オブジェクトは NetApp LUN ではありません。" #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "拡張操作時に複合ボリューム %(volumename)s へのボリュームの追加中にエラーが発" "生しました。" msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "cinder-volume サービスの 1 つが古すぎるため、このようなリクエストを受け付ける" "ことができません。Liberty と Mitaka のさまざまな cinder-volumes の組み合わせ" "を実行していますか。" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "ホスト、ポート、またはスキーマからの必須の入力の 1 つが見つかりませんでした。" #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "%(uri)s に対して実行できる要求は、%(unit_string)s につき %(value)s %(verb)s " "要求に限られます。" msgid "Only one limit can be set in a QoS spec." msgstr "QoS 仕様に設定できる制限は 1 つのみです。" msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "直近の親またはルートプロジェクトに割り当てられたトークンを持つユーザーのみ" "が、子のクォータを参照することができます。" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "非管理に設定できるのは、OpenStack が管理するボリュームのみです。" #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "状況=%(status)s で操作が失敗しました。フルダンプ: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "操作はサポートされていません: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "gpfs_images_dir オプションが正しく設定されていません。" msgid "Option gpfs_images_share_mode is not set correctly." msgstr "gpfs_images_share_mode オプションが正しく設定されていません。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base オプションが正しく設定されていません。" msgid "Option map (cls._map) is not defined." msgstr "オプションマップ (cls._map) が定義されていません。" #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "作成元の %(res)s %(prop)s は '%(vals)s' 値のいずれかでなければなりません。" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "Blockbridge API サーバーにアクセスするために HTTPS ポートを上書きする。" #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "パーティション名がありません。キーで smartpartition:partitionname を設定して" "ください。" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "認証にはパスワードまたは SSH 秘密鍵が必要です: san_password または " "san_private_key オプションを設定してください。" msgid "Path to REST server's certificate must be specified." msgstr "REST サーバーの証明書へのパスを指定しなければなりません。" #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "事前に %(pool_list)s プールを作成してください。" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "事前にプール %(pool)s に %(tier_levels)s 層を作成してください。" msgid "Please re-run cinder-manage as root." msgstr "cinder-manage を root として再実行してください。" msgid "Please specify a name for QoS specs." msgstr "QoS 仕様の名前を指定してください。" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "ポリシーは %(action)s の実行を許可していません。" #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "プール %(poolNameInStr)s が見つかりません。" #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "プール %s は Nexenta Store アプライアンスに存在しません" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "ボリュームのプール ['host'] %(host)s が見つかりません。" #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "ボリュームのプール ['host'] が以下のため失敗しました: %(ex)s。" msgid "Pool is not available in the volume host field." msgstr "プールがボリューム・ホスト・フィールドにありません。" msgid "Pool is not available in the volume host fields." msgstr "プールがボリューム・ホスト・フィールドにありません。" #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "名前が %(pool)s のプールがドメイン %(domain)s で見つかりませんでした。" #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "名前が %(pool_name)s のプールがドメイン %(domain_id)s で見つかりませんでし" "た。" #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "プール %(poolName)s は、FAST ポリシー %(fastPolicy)s のストレージ層に関連付け" "られていません。" #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName は %(fileName)s ファイル内に存在する必要があります。" #, python-format msgid "Pools %s does not exist" msgstr "プール %s は存在しません" msgid "Pools name is not set." msgstr "プール名が設定されていません。" #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "プライマリーコピーの状態: %(status)s および同期済み: %(sync)s。" msgid "Project ID" msgstr "プロジェクト ID" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "ネストされたクォータに対してプロジェクトのクォータが適切に設定されていませ" "ん: %(reason)s" msgid "Protection Group not ready." msgstr "保護グループの準備ができていません。" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "ストレージ・ファミリー %(storage_family)s ではプロトコル " "%(storage_protocol)s はサポートされません。" msgid "Provided backup record is missing an id" msgstr "提供されたバックアップレコードに ID がありません" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "指定されたスナップショット・ステータス %(provided)s は、ステータスが " "%(current)s となっているスナップショットには許可されません。" #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "OpenStack のボリューム [%s] について、CloudByte のストレージに関するプロバイ" "ダー情報が見つかりませんでした。" #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder ドライバー障害です: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS 仕様 %(specs_id)s は既に存在します。" #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS 仕様 %(specs_id)s はまだエンティティーと関連付けられています。" #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "QoS 設定が誤っています。%s は 0 より大きくなければなりません。" #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "IOTYPE ともう 1 つの qos_specs に QoS ポリシーを指定する必要があります。QoS " "ポリシー: %(qos_policy)s。" #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "IOTYPE に QoS ポリシーを指定する必要があります: 0、1、または 2、QoS ポリ" "シー: %(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "QoS ポリシー upper_limit と lower_limit が競合しています。QoS ポリシー: " "%(qos_policy)s。" #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "QoS 仕様 %(specs_id)s には、キー %(specs_key)s を持つ仕様はありません。" msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "QoS 仕様はこのストレージファミリーおよび ONTAP バージョンでサポートされませ" "ん。" msgid "Qos specs still in use." msgstr "Qos 仕様はまだ使用中です。" msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "サービス・パラメーターによる照会は推奨されません。代わりにバイナリー・パラ" "メーターを使用してください。" msgid "Query resource pool error." msgstr "リソースプール照会のエラー。" #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "クォータ %s の制限は既存のリソース以上である必要があります。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "割り当て量クラス %(class_name)s が見つかりませんでした。" msgid "Quota could not be found" msgstr "割り当て量が見つかりませんでした" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "リソースの割り当て量を超過しました: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "割り当て量を超過しました: code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "プロジェクト %(project_id)s の割り当て量が見つかりませんでした。" #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "リソース '%(res)s' のプロジェクト'%(proj)s' に関するクォーター上限が無効で" "す : %(limit)d の上限が %(used)d の使用中の値よりも小さくなっています" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "割り当て量の予約 %(uuid)s が見つかりませんでした。" #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "プロジェクト %(project_id)s の割り当て量使用率が見つかりませんでした。" #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD diff 操作が失敗しました: (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "REST サーバーの IP を指定しなければなりません。" msgid "REST server password must by specified." msgstr "REST サーバーのパスワードを指定しなければなりません。" msgid "REST server username must by specified." msgstr "REST サーバーのユーザー名を指定しなければなりません。" msgid "RPC Version" msgstr "RPC のバージョン" msgid "RPC server response is incomplete" msgstr "RPC サーバーの応答が完了していません" msgid "Raid did not have MCS Channel." msgstr "RAID に MCS チャンネルがありません。" #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "構成オプション max_luns_per_storage_group によって設定された制限に達しまし" "た。%(vol)s をストレージ・グループ %(sg)s に追加する操作は拒否されます。" #, python-format msgid "Received error string: %s" msgstr "エラー文字列を受信しました: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "参照は非管理対象のスナップショットに対するものでなければなりません。" msgid "Reference must be for an unmanaged virtual volume." msgstr "非管理対象の仮想ボリュームに対する参照でなければなりません。" msgid "Reference must be the name of an unmanaged snapshot." msgstr "参照は非管理対象のスナップショットの名前でなければなりません。" msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "参照は非管理対象仮想ボリュームのボリューム名でなければなりません。" msgid "Reference must contain either source-id or source-name element." msgstr "" "参照には source-id または source-name のいずれかのエレメントが含まれていなけ" "ればなりません。" msgid "Reference must contain either source-name or source-id element." msgstr "" "参照には source-name または source-id のいずれかのエレメントが含まれていなけ" "ればなりません。" msgid "Reference must contain source-id or source-name element." msgstr "" "参照には source-id または source-name の要素が含まれていなければなりません。" msgid "Reference must contain source-id or source-name key." msgstr "" "参照には source-id または source-name キーが含まれていなければなりません。" msgid "Reference must contain source-id or source-name." msgstr "参照には source-id または source-name が含まれていなければなりません。" msgid "Reference must contain source-id." msgstr "参照には source-id が含まれていなければなりません。" msgid "Reference must contain source-name element." msgstr "参照には source-name エレメントが含まれていなければなりません。" msgid "Reference must contain source-name or source-id." msgstr "参照には source-name または source-id が含まれていなければなりません。" msgid "Reference must contain source-name." msgstr "参照には source-name が含まれていなければなりません。" msgid "Reference to volume to be managed must contain source-name." msgstr "" "管理対象のボリュームへの参照には source-name が含まれていなければなりません。" #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "" "管理対象のボリューム: %s への参照には source-name が含まれていなければなりま" "せん。" #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "ボリューム ID %(id)s のマイグレーションを拒否中。ソースと宛先が同じボリュー" "ム・グループ %(name)s であるため、構成を確認してください。" msgid "Remote pool cannot be found." msgstr "リモートプールが見つかりません。" msgid "Remove CHAP error." msgstr "CHAP 削除のエラー。" msgid "Remove fc from host error." msgstr "ホストからの FC 削除のエラー。" msgid "Remove host from array error." msgstr "アレイからのホスト削除のエラー。" msgid "Remove host from hostgroup error." msgstr "ホストグループからのホスト削除のエラー。" msgid "Remove iscsi from host error." msgstr "ホストからの iSCSI 削除のエラー。" msgid "Remove lun from QoS error." msgstr "QoS からの LUN 削除のエラー。" msgid "Remove lun from cache error." msgstr "キャッシュからの LUN 削除のエラー。" msgid "Remove lun from partition error." msgstr "パーティションからの LUN 削除のエラー" msgid "Remove port from port group error." msgstr "ポートグループからのポート削除のエラー。" msgid "Remove volume export failed." msgstr "ボリュームのエクスポートの削除に失敗しました。" msgid "Rename lun on array error." msgstr "アレイでの LUN 名前変更のエラー。" msgid "Rename snapshot on array error." msgstr "アレイでのスナップショット名前変更のエラー。" #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "%(name)s の %(ssn)s へのレプリケーションが失敗しました。" #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "複製サービス機能が %(storageSystemName)s に見つかりません。" #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "複製サービスが %(storageSystemName)s に見つかりません。" msgid "Replication is not enabled" msgstr "複製が有効になっていません" msgid "Replication is not enabled for volume" msgstr "ボリュームの複製が有効になっていません" msgid "Replication not allowed yet." msgstr "まだレプリケーションを行うことはできません。" #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "ボリュームの複製状況は「アクティブ」または「アクティブ - 停止」でなければなり" "ませんが、現在の状況は %s です" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "ボリュームの複製状況は「非アクティブ」、「アクティブ - 停止」、または「エ" "ラー」でなければなりませんが、現在の状況は %s です" msgid "Request body and URI mismatch" msgstr "要求本体と URI の不一致" msgid "Request body contains too many items" msgstr "要求本体に含まれる項目が多すぎます" msgid "Request body contains too many items." msgstr "要求本体に含まれる項目が多すぎます。" msgid "Request body empty" msgstr "要求本体が空です" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Datera クラスターに対する要求から、正しくない状況が返されました: %(status)s " "| %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "要求されたバックアップが許容バックアップ割り当て量 (ギガバイト) を超えていま" "す。要求量 %(requested)s G、割り当て量 %(quota)s G、消費量 %(consumed)s. G。" #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "スナップショットにリクエストされたボリュームが許容される %(name)s のクォータ" "を超えています。%(requested)sG がリクエストされ、%(quota)sG のクォータが設定" "され、%(consumed)sG が使用されています。" #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "リクエストされたボリュームサイズ %(size)d が許容される最大サイズ %(limit)d を" "超えています。" msgid "Required configuration not found" msgstr "必要な構成が見つかりません" #, python-format msgid "Required flag %s is not set" msgstr "必須フラグ %s が設定されていません" msgid "Requires an NaServer instance." msgstr "NaServer インスタンスが必要です。" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップ状況のリセットが中止しました。現在構成されているバックアップサー" "ビス [%(configured_service)s] は、このバックアップの作成に使用されたバック" "アップサービス [%(backup_service)s] ではありません。" #, python-format msgid "Resizing clone %s failed." msgstr "複製 %s のリサイズが失敗しました。" msgid "Resizing image file failed." msgstr "イメージファイルのサイズ変更が失敗しました。" msgid "Resource could not be found." msgstr "リソースが見つかりませんでした。" msgid "Resource not ready." msgstr "リソースが作動不能です。" #, python-format msgid "Response error - %s." msgstr "応答エラー - %s。" msgid "Response error - The storage-system is offline." msgstr "応答エラー - 該当の storage-system はオフラインです。" #, python-format msgid "Response error code - %s." msgstr "応答エラーコード - %s。" msgid "RestURL is not configured." msgstr "Rest URL は設定されていません。" #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのリストアが中止しました。予期していたボリューム状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップのリストアが中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" "サービス [%(backup_service)s] ではありません。" #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのリストアが中止しました。予期していたバックアップ状況は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "指定された Cinder スナップショットについて異なる量の SolidFire ボリュームを検" "出しました。%(ret)s を検出しましたが、%(des)s を期待していました" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "指定された Cinder ボリュームについて異なる量の SolidFire ボリュームを検出しま" "した。%(ret)s を検出しましたが、%(des)s を期待していました" #, python-format msgid "Retry count exceeded for command: %s" msgstr "コマンドの再試行回数を超過しました: %s" msgid "Retryable SolidFire Exception encountered" msgstr "再試行可能な SolidFire 例外が発生しました" msgid "Retype cannot change encryption requirements." msgstr "タイプ変更によって暗号化要件を変更することはできません。" #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "タイプ変更によって使用中ボリュームのフロントエンド qos 仕様を変更することはで" "きません: %s。" msgid "Retype requires migration but is not allowed." msgstr "タイプ変更するにはマイグレーションが必要ですが、許可されていません。" #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "ボリューム %(volumeName)s のロールバックに失敗しました。システム管理者に連絡" "して、ボリュームを失敗した FAST ポリシー %(fastPolicyName)s のデフォルトのス" "トレージグループに手動で戻してください。" #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "%(volumeName)s を削除してロールバックしています。" #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "Cinder をバージョン %s 以前の VMware vCenter と共に実行することは許可されてい" "ません。" msgid "SAN product is not configured." msgstr "SAN 製品は設定されていません。" msgid "SAN protocol is not configured." msgstr "SAN プロトコルは設定されていません。" #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "SMBFS 構成 'smbfs_oversub_ratio' は無効です。0 より大きくなければなりません: " "%s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "SMBFS 構成 'smbfs_used_ratio' は無効です。0 より大きく、1.0 以下でなければな" "りません: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s の SMBFS 構成ファイルは存在しません。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 構成ファイルが設定されていません (smbfs_shares_config)。" #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "'%(total_attempts)r' 回の試行後に SSH コマンドが失敗しました: '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "" "SSH コマンドがエラー: '%(err)s' 、コマンド: '%(command)s' で失敗しました。" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH コマンド注入が検出されました: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "%(fabric)s の SSH 接続がエラー %(err)s で失敗しました" #, python-format msgid "SSL Certificate expired on %s." msgstr "%s で SSL 証明書の有効期限が切れました。" #, python-format msgid "SSL error: %(arg)s." msgstr "SSL エラー: %(arg)s。" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "スケジューラーホストフィルター %(filter_name)s が見つかりませんでした。" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "スケジューラーホスト Weigher %(weigher_name)s が見つかりませんでした。" #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "セカンダリーコピーの状態: %(status)s および同期済み: %(sync)s、同期が進行中: " "%(progress)s%%。" #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "セカンダリー ID はプライマリー配列と同じであってはいけません。backend_id は " "%(secondary)s です。" #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber は %(fileName)s ファイル内に存在する必要があります。" #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "ホスト %(host)s 上のサービス %(service)s を削除しました。" #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "ホスト %(host)s でサービス%(service_id)s が見つかりませんでした。" #, python-format msgid "Service %(service_id)s could not be found." msgstr "サービス %(service_id)s が見つかりませんでした。" #, python-format msgid "Service %s not found." msgstr "サービス %s が見つかりません。" msgid "Service is too old to fulfil this request." msgstr "サービスが古すぎるため、このリクエストに対応できません。" msgid "Service is unavailable at this time." msgstr "現在サービスは使用できません。" msgid "Service not found." msgstr "サービスが見つかりません。" msgid "Set pair secondary access error." msgstr "ペアのセカンダリーアクセス設定のエラー。" msgid "Sets thin provisioning." msgstr "シンプロビジョニングを設定します。" msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "LUN QoS ポリシー・グループの設定は、このストレージ・ファミリーおよびONTAP " "バージョンではサポートされていません。" msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "ファイル qos ポリシー・グループの設定は、このストレージ・ファミリーおよび" "ontap バージョンではサポートされていません。" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "形式が無効であるため、共有 %s は無視されました。address:/export 形式でなけれ" "ばなりません。nas_ip および nas_share_path の設定を確認してください。" #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "%(dir)s にある共有は、Cinder ボリュームサービスによって書き込み可能ではありま" "せん。スナップショット操作はサポートされません。" #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "シープドッグの I/O エラー。実行されたコマンド: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "処理の表示を行えるのは、ユーザーが割り当てられたプロジェクトと同じ階層にある" "プロジェクトに限られます。" msgid "Size" msgstr "サイズ" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "ボリュームのサイズ %s が見つかりません、セキュアな削除ができません。" #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "サイズは %(image_size)d GB で、サイズ %(volume_size)d GB のボリュームに適合し" "ません。" #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "指定されたイメージのサイズ %(image_size)s GB がボリュームサイズ " "%(volume_size)s GB を上回っています。" #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "スナップショット ID %(id)s が使用可能になるよう待機している途中に削除対象に" "指定されました。同時実行リクエストが行われた可能性があります。" #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "カスケードの削除中に、「削除中」ではなく %(state)s の状態でスナップショット " "%(id)s が見つかりました。" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "スナップショット %(snapshot_id)s が見つかりませんでした。" #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "スナップショット %(snapshot_id)s にはキー %(metadata_key)s を持つメタデータは" "ありません。" #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "スナップショット %s は整合性グループの一部であってはなりません。" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "スナップショット '%s' はアレイに存在しません。" #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "ボリューム %(vol_id)s が「使用可能」ではないため、スナップショットを作成でき" "ません。現在のボリューム状況は %(vol_status)s です。" msgid "Snapshot cannot be created while volume is migrating." msgstr "" "ボリュームのマイグレーション中にスナップショットを作成することはできません。" msgid "Snapshot of secondary replica is not allowed." msgstr "2 次レプリカのスナップショットは許可されません。" #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "状態 %s でのボリュームのスナップショットはサポートされていません。" #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "スナップショットリソース \"%s\" が実装されていますか" msgid "Snapshot size must be multiple of 1 GB." msgstr "スナップショットのサイズは 1 GB の倍数である必要があります。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "スナップショット状況 %(cur)s は update_snapshot_status には許可されません" msgid "Snapshot status must be \"available\" to clone." msgstr "" "複製を行うには、スナップショット状況が「使用可能」でなければなりません。" #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "バックアップ対象のスナップショットが利用可能である必要がありますが、現在の状" "態は \"%s\" です。" #, python-format msgid "Snapshot with id of %s could not be found." msgstr "%s の ID を持つスナップショットを見つけることができませんでした。" #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "スナップショット '%(snap)s' は基本イメージ '%(base)s' 内に存在しません: 増分" "バックアップを打ち切ります" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "このボリューム形式では、スナップショットはサポートされていません: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "ソケットエラー: %(arg)s。" msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder Driver 例外" msgid "Sort direction array size exceeds sort key array size." msgstr "ソート方向の配列サイズがソートキーの配列サイズを超えています。" msgid "Source CG is empty. No consistency group will be created." msgstr "ソース CG が空です。整合性グループは作成されません。" msgid "Source host details not found." msgstr "ソース・ホスト詳細が見つかりません" msgid "Source volume device ID is required." msgstr "ソースボリュームのデバイス ID が必要です。" msgid "Source volume not mid-migration." msgstr "ソース・ボリュームはマイグレーション中ではありません" #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "ホスト IP/名前: %s を持つソースがターゲットアプライアンスで見つかりませんでし" "た。これはバックエンドが有効になっているボリュームマイグレーションのためのア" "プライアンスであり、デフォルトのマイグレーションを使用して開始されます。" msgid "SpaceInfo returned byarray is invalid" msgstr "アレイによって返された SpaceInfo が無効です" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "ボリューム %(vol)s にマップする指定されたホストは、サポート対象ではない " "%(group)s のホストグループ内にあります。" msgid "Specified logical volume does not exist." msgstr "指定された論理ボリュームは存在しません。" #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "" "ID %s の指定されたスナップショットグループを見つけることができませんでした。" msgid "Specify a password or private_key" msgstr "パスワードまたは private_key を指定してください" msgid "Specify san_password or san_private_key" msgstr "san_password または san_private_key を指定してください" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "ボリュームタイプの名前、説明、is_public、またはこれらの組み合わせを指定してく" "ださい。" msgid "Split pair error." msgstr "ペア分割のエラー。" msgid "Split replication failed." msgstr "レプリケーションの分割が失敗しました。" msgid "Start LUNcopy error." msgstr "LUN コピー開始のエラー。" msgid "State" msgstr "状態" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "ノードの状態が誤っています。現在の状態は %s です。" msgid "Status" msgstr "ステータス" msgid "Stop snapshot error." msgstr "スナップショット停止のエラー。" #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "ストレージ構成サービスが %(storageSystemName)s に見つかりません。" #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "ストレージ・ハードウェア ID 管理サービスが %(storageSystemName)s に見つかりま" "せん。" #, python-format msgid "Storage Profile %s not found." msgstr "ストレージプロファイル %s が見つかりません。" #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "ストレージ再配置サービスが %(storageSystemName)s に見つかりません。" #, python-format msgid "Storage family %s is not supported." msgstr "ストレージ・ファミリー %s はサポートされていません。" #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "ストレージグループ %(storageGroupName)s は正常に削除されませんでした" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "ストレージホスト %(svr)s が検出されません。名前を検証してください" msgid "Storage pool is not configured." msgstr "ストレージプールが設定されていません。" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "ストレージ・プロファイル %(storage_profile)s が見つかりません。" msgid "Storage resource could not be found." msgstr "ストレージリソースが見つかりませんでした。" msgid "Storage system id not set." msgstr "ストレージ・システム ID が設定されていません。" #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "プール %(poolNameInStr)s のストレージ・システムが見つかりません。" msgid "Storage-assisted migration failed during manage volume." msgstr "" "ボリュームの管理中にストレージによってサポートされるマイグレーションが失敗し" "ました。" #, python-format msgid "StorageSystem %(array)s is not found." msgstr "ストレージシステム %(array)s が見つかりません。" #, python-format msgid "String with params: %s" msgstr "パラメーターが指定された文字列: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "子の使用量の合計 '%(sum)s' がリソース '%(res)s' のプロジェクト'%(proj)s' に" "関するクォータ '%(free)s' よりも大きくなっています。以下のプロジェクトの 1 つ" "以上について上限または使用量を減らしてください: '%(child_ids)s'" msgid "Switch over pair error." msgstr "ペア切り替えのエラー。" msgid "Sync pair error." msgstr "ペア同期のエラー。" msgid "Synchronizing secondary volume to primary failed." msgstr "2 次ボリュームの 1 次ボリュームへの同期に失敗しました。" #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "正しくない状態 - %(pass_status)s のシステム %(id)s が見つかりました。" #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "システム %(id)s で正しくない状況 %(status)s が見つかりました。" msgid "System does not support compression." msgstr "システムは圧縮をサポートしません。" msgid "System is busy, retry operation." msgstr "システムがビジー状態です。再試行してください。" #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "アカウント [%(account)s] の CloudByte ストレージで、TSM [%(tsm)s] が見つかり" "ませんでした。" msgid "Target volume type is still in use." msgstr "ターゲットボリュームタイプはまだ使用中です。" #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "テンプレート・ツリーの不一致。スレーブ %(slavetag)s をマスター %(mastertag)s " "に追加しています" #, python-format msgid "Tenant ID: %s does not exist." msgstr "テナント ID %s が存在しません。" msgid "Terminate connection failed" msgstr "接続を強制終了できませんでした" msgid "Terminate connection unable to connect to backend." msgstr "バックエンドに接続できない接続を強制終了します。" #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "ボリューム接続の終了に失敗しました: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "複製する %(type)s %(id)s ソースが見つかりませんでした。" msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "「sort_key」パラメーターおよび「sort_dir」パラメーターは非推奨であり、" "「sort」パラメーターと併用することはできません。" msgid "The EQL array has closed the connection." msgstr "EQL アレイが接続を閉じました。" #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS ファイル・システム %(fs)s は必要なリリース・レベルに達していません。現在" "のレベルは %(cur)s です。%(min)s 以上は必要です。" msgid "The IP Address was not found." msgstr "IP アドレスが見つかりませんでした。" #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "WebDAV 要求が失敗しました。理由: %(msg)s、戻りコード/理由: %(code)s、ソース・" "ボリューム: %(src)s、宛先ボリューム: %(dst)s、メソッド: %(method)s。" msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "上のエラーは、データベースが作成されなかったことを示している可能性がありま" "す。\n" "このコマンドを実行する前に、'cinder-manage db sync' を使用してデータベースを" "作成してください。" #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "配列が SLO %(slo)s とワークロード %(workload)s のストレージプール設置をサポー" "トしません。配列で有効な SLO とワークロードを確認してください。" msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "" "ボリュームが作成されるバックエンドに有効になっているレプリケーションがありま" "せん。" #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "コマンド %(cmd)s が失敗しました。(ret: %(ret)s、stdout: %(out)s、stderr: " "%(err)s)" msgid "The copy should be primary or secondary" msgstr "コピーは 1 次または 2 次であることが必要です" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "論理デバイスの作成を完了できませんでした。(LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "装飾されたメソッドは、ボリュームとスナップショットオブジェクトのいずれもを受" "け付けることができません。" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "パス %(path)s のデバイスは使用不可です: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "終了時刻 (%(end)s) は開始時刻 (%(start)s) より後でなければなりません。" #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec: %s が無効です。" #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "追加仕様: %(extraspec)s は無効です。" #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "フェイルオーバーされたボリュームを削除することができませんでした: %s" #, python-format msgid "The following elements are required: %s" msgstr "次のエレメントが必要です: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "以下の移行にはダウングレードがありますが、これは許容されません: \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "ホスト・グループまたは iSCSI ターゲットを追加できませんでした。" msgid "The host group or iSCSI target was not found." msgstr "ホスト・グループまたは iSCSI ターゲットが見つかりませんでした。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "ホストはフェイルバックを行う準備ができていません。3PAR バックエンドでボリュー" "ムを再同期し、レプリケーションを再開してください。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "ホストはフェイルバックを行う準備ができていません。LeftHand バックエンドでボ" "リュームを再同期し、複製を再開してください。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "ホストはフェイルバックを行う準備ができていません。Storwize バックエンドでボ" "リュームを再同期し、複製を再開してください。" #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP ユーザー %(user)s は存在しません。" #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "インポートされた LUN %(lun_id)s はホスト %(host)s が管理しないプール " "%(lun_pool)s にあります。" msgid "The key cannot be None." msgstr "キーは None に設定することはできません。" #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "指定された %(type)s %(id)s の論理デバイスは既に削除されました。" #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "メソッド %(method)s がタイムアウトになりました。(タイムアウト値: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "update_migrated_volume メソッドが実装されていません。" #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "マウント %(mount_path)s は有効な Quobyte USP ボリュームではありません。エ" "ラー: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "ストレージバックエンドのパラメーター。(config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "増分バックアップでは親バックアップが使用可能でなければなりません。" #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" "指定されたスナップショット '%s' は指定されたボリュームのスナップショットでは" "ありません。" msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "バックエンドのボリュームへの参照の形式は file_system/volume_name " "(volume_name には '/' を使用できません) でなければなりません" #, python-format msgid "The remote retention count must be %s or less." msgstr "リモートの保存数は %s 以下でなければなりません。" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "ボリューム種別の extra_specs でレプリケーションモードが正しく構成されていませ" "ん。replication:mode が periodic の場合、replication:sync_period must も 300 " "秒から 31622400 秒の間に設定しなければなりません。" #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "レプリケーション同期期間は少なくとも %s 秒でなければなりません。" #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "要求されたサイズ %(requestedSize)s が、結果として作成されたサイ" "ズ%(resultSize)s と同一ではありません。" #, python-format msgid "The resource %(resource)s was not found." msgstr "リソース %(resource)s が見つかりませんでした。" msgid "The results are invalid." msgstr "結果が無効です。" #, python-format msgid "The retention count must be %s or less." msgstr "保存数は %s 以下でなければなりません。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "ボリュームがメンテナンスモードの場合は、スナップショットを作成できません。" #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "ソースボリューム %s は現在のホストが管理するプールにありません。" msgid "The source volume for this WebDAV operation not found." msgstr "この WebDAV 操作のソース・ボリュームが見つかりません。" #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "ソースのボリュームタイプ '%(src)s' が宛先のボリュームタイプ '%(dest)s' と異な" "ります。" #, python-format msgid "The source volume type '%s' is not available." msgstr "ソースボリュームタイプ '%s' は使用できません。" #, python-format msgid "The specified %(desc)s is busy." msgstr "指定された %(desc)s は使用中です。" #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "指定された LUN は指定のプールに属していません: %s。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "指定された ldev %(ldev)s を管理できませんでした。ldev をマッピングしてはなり" "ません。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "指定された ldev %(ldev)s を管理できませんでした。ldev をペアにしてはなりませ" "ん。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "指定された ldev %(ldev)s を管理できませんでした。ldev サイズはギガバイトの倍" "数でなければなりません。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "指定された ldev %(ldev)s を管理できませんでした。ボリューム・タイプは DP-VOL " "でなければなりません。" #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "指定された操作はサポートされていません。ボリューム・サイズはソース %(type)s " "と同じでなければなりません。(ボリューム: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "指定された vdisk はホストにマップされています。" msgid "The specified volume is mapped to a host." msgstr "指定されたボリュームはホストにマップされています。" #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "%s のストレージアレイのパスワードが正しくありません。設定されたパスワードを更" "新してください。" #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "ストレージバックエンドを使用できます。(config_group: %(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "ストレージデバイスは %(prot)s をサポートしません。デバイスが %(prot)s をサ" "ポートするように設定するか、別のプロトコルを使用するドライバーに切り替えてく" "ださい。" #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "%(memberCount)s のストライプメタ数がボリュームに対して小さすぎます: " "%(volumeSize)s のサイズを持つ %(volumeName)s。" #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "メタデータのタイプ: ボリューム/スナップショット %(id)s の %(metadata_type)s " "が無効です。" #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "ボリューム %(volume_id)s を拡張できませんでした。ボリューム・タイプは「標準」" "でなければなりません。" #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "ボリューム %(volume_id)s を管理解除できませんでした。ボリューム・タイプは " "%(volume_type)s でなければなりません。" #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "ボリューム %(volume_id)s は正常に管理されています。(LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "ボリューム %(volume_id)s は正常に管理解除されています。(LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "マップするボリューム %(volume_id)s が見つかりませんでした。" msgid "The volume cannot accept transfer in maintenance mode." msgstr "メンテナンスモードではボリュームを転送できません。" msgid "The volume cannot be attached in maintenance mode." msgstr "メンテナンスモードではボリュームを追加できません。" msgid "The volume cannot be detached in maintenance mode." msgstr "メンテナンスモードではボリュームを切り離すことができません。" msgid "The volume cannot be updated during maintenance." msgstr "メンテナンス中にはボリュームを更新することはできません。" msgid "The volume connection cannot be initialized in maintenance mode." msgstr "メンテナンスモードではボリューム接続を初期化できません。" msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "ボリューム・ドライバーには、コネクター内の iSCSI イニシエーター名が必要です。" msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "ボリュームは 3PAR 上で現在使用中のため、この時点では削除できません。後で再試" "行できます。" msgid "The volume label is required as input." msgstr "入力としてボリュームラベルが必要です。" msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "ボリュームがメンテナンスモードの場合は、ボリュームのメタデータを削除できませ" "ん。" msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "ボリュームがメンテナンスモードの場合、ボリュームのメタデータは更新できませ" "ん。" #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "使用できるリソースがありません。(リソース: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "有効な ESX ホストがありません。" #, python-format msgid "There are no valid datastores attached to %s." msgstr "%s に接続された有効なデータ・ストアがありません。" msgid "There are no valid datastores." msgstr "有効なデータストアがありません。" #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "%(param)s の宛先がありません。指定されたストレージは、ボリュームを管理するた" "めに不可欠です。" msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "ldev の宛先がありません。指定された ldev は、ボリュームを管理するために不可欠" "です。" msgid "There is no metadata in DB object." msgstr "DBオブジェクトの中にメタデータがありません。" #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "%(volume_size)sG をホストできる共有がありません" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "%(volume_size)sG をホストできる共有がありません。" #, python-format msgid "There is no such action: %s" msgstr "このようなアクションはありません: %s" msgid "There is no virtual disk device." msgstr "仮想ディスク・デバイスがありません。" #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "リモートコピーグループへのボリュームの追加中にエラーが発生しました: %s。" #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "cgsnapshot の作成中にエラーが発生しました: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "リモートコピーグループの作成中にエラーが発生しました: %s。" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "リモートコピーグループの同期期間の設定中にエラーが発生しました: %s。" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "3PAR アレイでのリモートコピーグループのセットアップ中にエラーが発生しました: " "('%s')。ボリュームはレプリケーションタイプとして認識されません。" #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "LeftHand アレイのリモートスケジュールのセットアップ中にエラーが発生しました: " "('%s')。ボリュームはレプリケーションタイプとして認識されません。" #, python-format msgid "There was an error starting remote copy: %s." msgstr "リモートコピーの開始中にエラーが発生しました: %s。" #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Gluster 構成ファイルが構成されていません (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "NFS 構成ファイルが構成されていません (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "設定済みの Quobyte ボリューム (%s) が存在しません。 例: quobyte:///" "" msgid "Thin provisioning not supported on this version of LVM." msgstr "" "このバージョンの LVM ではシンプロビジョニングはサポートされていません。" msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "シン・プロビジョニング・イネーブラーがインストールされていません。シン・ボ" "リュームを作成できません" msgid "This driver does not support deleting in-use snapshots." msgstr "" "このドライバーは、使用中のスナップショットの削除をサポートしていません。" msgid "This driver does not support snapshotting in-use volumes." msgstr "" "このドライバーは、使用中のボリュームのスナップショット作成をサポートしていま" "せん。" msgid "This request was rate-limited." msgstr "この要求は rate-limited でした。" #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "このシステム・プラットフォーム (%s) はサポートされていません。このドライバー" "は、Win32 プラットフォームのみサポートします。" #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "%(storageSystemName)s の層ポリシー・サービスが見つかりません。" #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "スナップショット %s を作成するために Nova の更新を待機している間にタイムアウ" "トになりました。" #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "スナップショット %(id)s を削除するために Nova の更新を待機している間にタイム" "アウトになりました。" msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "ceph クラスターへの接続時にタイムアウト値 (秒) が使用されます。値が 0 より小" "さい場合、タイムアウトは設定されず、デフォルトの librados 値が使用されます。" #, python-format msgid "Timeout while calling %s " msgstr " %s の呼び出し中にタイムアウトが発生しました" #, python-format msgid "Timeout while requesting %(service)s API." msgstr "%(service)s API の要求中にタイムアウトになりました。" #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "バックエンドの %(service)s から機能をリクエストする際にタイムアウトが発生しま" "した。" #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "転送 %(transfer_id)s が見つかりませんでした。" #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "転送 %(transfer_id)s: ボリューム ID %(volume_id)s が予期しない状態%(status)s " "です。awaiting-transfer が予期されていました" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "ID %(meta_id)s からバックアップ %(id)s にバックアップのメタデータをインポート" "しようとしています。" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "ボリュームタスクの調整が完了前に停止しました: volume_name=%(volume_name)s、" "task-status=%(status)s。" #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "タイプ %(type_id)s は既に別の qos 仕様 %(qos_specs_id)s に関連付けられていま" "す" msgid "Type access modification is not applicable to public volume type." msgstr "" "パブリックなボリュームタイプでは、タイプアクセスの変更を行うことはできませ" "ん。" msgid "Type cannot be converted into NaElement." msgstr "タイプは NaElement に変換できません。" #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s が、ボリュームの追加リストと削除リストの両方に存在します。" #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "ボリューム %s の Storwize バックエンドにアクセスできません。" msgid "Unable to access the backend storage via file handle." msgstr "ファイルハンドル経由でバックエンドストレージにアクセスできません。" #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "パス %(path)s を介してバックエンド・ストレージにアクセスできません。" #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "スペース %(space)s のスナップショットに Cinder のホストを追加できません" #, python-format msgid "Unable to complete failover of %s." msgstr "%s のフェイルオーバーを完了できません。" msgid "Unable to connect or find connection to host" msgstr "ホストに接続できないか、ホストへの接続が見つかりません" msgid "Unable to create Barbican Client without project_id." msgstr "project_id なしでは Barbican Client を作成できません。" #, python-format msgid "Unable to create consistency group %s" msgstr "整合性グループ %s を作成できません" msgid "Unable to create lock. Coordination backend not started." msgstr "" "ロックを作成できません。コーディネーションバックエンドがスタートしていませ" "ん。" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST ポリシー %(fastPolicyName)s のデフォルトストレージグループを作成または取" "得できません。" #, python-format msgid "Unable to create replica clone for volume %s." msgstr "ボリューム %s のレプリカ複製を作成できません。" #, python-format msgid "Unable to create the relationship for %s." msgstr "%s の関係を作成できません。" #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "%(snap)s からボリューム %(name)s を作成できません" #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "%(vol)s からボリューム %(name)s を作成できません。" #, python-format msgid "Unable to create volume %s" msgstr "ボリューム %s を作成できません" msgid "Unable to create volume. Backend down." msgstr "ボリュームを作成できません。バックエンドがダウンしています。" #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "整合性グループ %s を削除できません" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "スナップショット %(id)s を削除できません。状況: %(status)s。" #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "ボリューム %s 上のスナップショットポリシーを削除できません。" #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "ボリューム %(vol)s のターゲットボリュームを削除できません。例外: %(err)s。" msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "ボリュームを切り離すことができません。切り離すには、ボリューム状況が「使用" "中」で、attach_status が「接続済み」でなければなりません。" #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "提供されたセカンダリー配列から secondary_array を検出できません: " "%(secondary)s" #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Purity でスナップショット %(id)s のスナップショット名を判別できません。" msgid "Unable to determine system id." msgstr "システム ID を判別できません。" msgid "Unable to determine system name." msgstr "システム名を判別できません。" #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Purity REST API のバージョン %(api_version)s でスナップショット処理を管理でき" "ません。%(required_versions)s が必要です。" #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Purity REST API のバージョン %(api_version)s ではレプリケーションを行うことが" "できません。%(required_versions)s のうちのいずれかのバージョンが必要です。" msgid "Unable to enable replication and snapcopy at the same time." msgstr "レプリケーションとスナップコピーを同時に有効化することはできません。" #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Storwize クラスター %s とのパートナーシップを確立できません。" #, python-format msgid "Unable to extend volume %s" msgstr "ボリューム %s を拡張できません" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "レプリケーション関係を切り替えられないため、ボリューム %(id)s をセカンダリー" "バックエンドにフェイルオーバーできません: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "デフォルトにフェイルバックすることできません。フェイルバックができるのは、" "フェイルオーバーの完了後に限られます。" #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" "レプリケーションターゲットへのフェイルオーバーが有効ではありません:" "%(reason)s)" msgid "Unable to fetch connection information from backend." msgstr "バックエンドから接続情報を取り出すことができません。" #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "バックエンドから接続情報を取り出すことができません: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "名前 %s を持つ Purity 参照が見つかりません" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "ボリュームグループが見つかりません: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "フェイルオーバーのターゲットが見つかりません。セカンダリーターゲットが設定さ" "れていません。" msgid "Unable to find iSCSI mappings." msgstr "iSCSI のマッピングが見つかりません。" #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file が見つかりません: %s" msgid "Unable to find system log file!" msgstr "システムログファイルが見つかりません。" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "特定のセカンダリー配列のフェイルオーバーで使用すべき適切な pg スナップショッ" "トを見つけることができません: %(id)s" #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "設定されたターゲットから適切なセカンダリー配列を見つけることができません: " "%(targets)s" #, python-format msgid "Unable to find volume %s" msgstr "ボリューム %s が見つかりません" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "ファイル '%s' のブロック・デバイスを取得できません" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "ボリュームの作成に必要な設定情報を取得できません: %(errorMessage)s。" msgid "Unable to get corresponding record for pool." msgstr "プールに該当するレコードを取得できません。" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "スペース %(space)s に関する情報が得られません。クラスターが稼働中であることを" "確認し、是正してください。" msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "このホスト上の IP アドレスのリストが得られません。権限とネットワークを確認し" "てください。" msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "ドメインメンバーのリストが得られません。クラスターが稼働していることを確認し" "てください。" msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "新規の名前を作成するためのスペースのリストが得られません。クラスターが稼働し" "ていることを確認してください。" #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "backend_name の統計情報を取得できません: %s" msgid "Unable to get storage volume from job." msgstr "ジョブからストレージボリュームを取得できません。" #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "ハードウェア ID %(hardwareIdInstance)s のターゲット・エンドポイントを取得でき" "ません。" msgid "Unable to get the name of the masking view." msgstr "マスキングビューの名前を取得できません。" msgid "Unable to get the name of the portgroup." msgstr "ポートグループの名前を取得できません。" #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "ボリューム %s のレプリケーション関係を取得できません。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Cinder にボリューム %(deviceId)s をインポートできません。これはレプリケーショ" "ンセッション %(sync)s のソースボリュームです。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Uinder にボリューム %(deviceId)s をインポートできません。外部ボリュームは、現" "在の cinder ホストが管理するプールに含まれません。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Cinder にボリューム %(deviceId)s を追加できません。マスキングビューのボリュー" "ムは %(mv)s です。" #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "%(cert)s %(e)s から認証局をロードできません。" #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "%(cert)s %(e)s から証明書をロードできません。" #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "%(cert)s %(e)s から鍵をロードできません。" #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" "Solidfire デバイス上でアカウント %(account_name)s を見つけることができません" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "IP アドレス「%s」を管理している SVM が見つかりません" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "指定されたリプレープロファイル %s を特定できません" #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "既に管理されているボリュームが存在するため、ボリューム %(volume_ref)s の管理" "に失敗しました" #, python-format msgid "Unable to manage volume %s" msgstr "ボリューム %s を管理できません" msgid "Unable to map volume" msgstr "ボリュームをマッピングできません" msgid "Unable to map volume." msgstr "ボリュームのマッピングができません。" msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "XML 要求を解析できません。正しい形式の XML を提供してください。" msgid "Unable to parse attributes." msgstr "属性を解析できません。" #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "レプリカをボリューム %s の 1 次レプリカにプロモートできません。2 次コピーがあ" "りません。" msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "use_chap_auth=True が指定されている、Cinder で管理されていないホストを再使用" "することはできません。" msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "不明な CHAP 資格情報が構成されているホストを再使用することはできません。" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できません" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "%s の ID を持つスナップショットグループを取得できません。" #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "%(specname)s を再入力できません。要求した最新の %(spectype)s の値を受信するこ" "とを予期していたものの、%(spec)s の値を受信しました" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "種別変更ができません。ボリューム %s のコピーが存在します。タイプ変更を行う" "と、コピー数 2 という制限を超えます。" #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "種別変更ができません: 現行アクションにはボリュームコピーが必要ですが、新しい" "タイプが複製の場合は許可されません。ボリューム = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "%(vol)s のミラーモードレプリケーションをセットアップできません。例外: " "%(err)s。" #, python-format msgid "Unable to snap Consistency Group %s" msgstr "整合性グループ %s を移動できません" msgid "Unable to terminate volume connection from backend." msgstr "バックエンドからのボリューム接続を終了することができません。" #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "ボリューム接続を終了することができません: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "整合性グループ %s を更新できません" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "ボリューム %(vol_id)s の状況 %(vol_status)s が正しくないため、タイプを更新で" "きません。ボリューム状況は「使用可能」または「使用中」でなければなりません。" #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "マスキングビュー %(maskingViewName)s のイニシエーターグループ " "%(igGroupName)s を検査できません。" msgid "Unacceptable parameters." msgstr "受け入れられないパラメーター。" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" " マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "予期しない CLI 応答: ヘッダー/行の不一致。ヘッダー: %(header)s、行: %(row)s。" #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "予期しない出力。[%(expected)s] が予期されましたが、[%(output)s] を受け取りま" "した" msgid "Unexpected response from Nimble API" msgstr "Nimble API からの予期しない応答" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Tegile IntelliFlash API からの予期しない応答" msgid "Unexpected status code" msgstr "予期しない状況コード" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "URL %(page)s 用にプロトコル %(protocol)s を指定したスイッチ%(switch_id)s から" "返された予期しない状況コード。エラー: %(error)s" msgid "Unknown Gluster exception" msgstr "不明な Gluster 例外" msgid "Unknown NFS exception" msgstr "不明な NFS 例外" msgid "Unknown RemoteFS exception" msgstr "不明な RemoteFS 例外" msgid "Unknown SMBFS exception." msgstr "不明な SMBFS 例外。" msgid "Unknown Virtuozzo Storage exception" msgstr "Virtuozzo Storage で不明な例外が発生しました" msgid "Unknown action" msgstr "不明なアクション" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "管理対象のボリューム: %s がすでに Cinder によって管理されている場合は不明で" "す。ボリュームの管理を中止します。 'cinder_managed' カスタムスキーマプロパ" "ティーをそのボリュームに追加し、その値を False に設定してください。あるいは、" "Cinder の設定ポリシーの値 'zfssa_manage_policy' を 'loose' に変更してこの制限" "を取り除きます。" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "管理対象のボリューム: %s がすでに Cinder によって管理されている場合は不明で" "す。ボリュームの管理を中止します。 'cinder_managed' カスタムスキーマプロパ" "ティーをそのボリュームに追加し、その値を False に設定してください。あるいは、" "Cinder の設定ポリシーの値 'zfssa_manage_policy' を 'loose' に変更してこの制限" "を取り除きます。" #, python-format msgid "Unknown operation %s." msgstr "不明な処理 %s。" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "不明またはサポートされないコマンド (%(cmd)s) です" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "不明なプロトコル: %(protocol)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明な割り当て量リソース %(unknown)s。" msgid "Unknown service" msgstr "不明なサービス" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません。" msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "削除オプションの非管理とカスケーディングを同時に行うことはできません。" msgid "Unmanage volume not implemented." msgstr "ボリュームの非管理が実装されていません。" msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "'failed-over' ボリュームからのスナップショットを非管理対象にすることは許可さ" "れません。" msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "フェイルオーバーされたボリュームからのスナップショットを非管理対象にすること" "は許可されません。" #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "認識されない QoS キーワード: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "認識されないバッキングフォーマット: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "認識されない read_deleted 値 '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "gcs オプションの設定を解除します: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "iscsiadm が正常に終了しませんでした。例外は %(ex)s です。 " msgid "Unsupported Clustered Data ONTAP version." msgstr "サポートされない Clustered Data ONTAP バージョンです。" msgid "Unsupported Content-Type" msgstr "サポートされない Content-Type" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "サポートされない Data ONTAP バージョンです。Data ONTAP バージョン 7.3.1 以上" "がサポートされています。" #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "サポートされないバックアップのメタデータバージョン (%s)" msgid "Unsupported backup metadata version requested" msgstr "サポートされないバックアップメタデータバージョンが要求されました" msgid "Unsupported backup verify driver" msgstr "サポートされないバックアップ検証ドライバー" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "スイッチ %s でサポートされないファームウェアです。スイッチでファームウェア " "v6.4 以上が実行されていることを確認してください" #, python-format msgid "Unsupported volume format: %s " msgstr "ボリューム形式はサポートされていません: %s " msgid "Update QoS policy error." msgstr "QoS ポリシー更新のエラー。" msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "クォータ処理の更新と削除を行えるのは、直近の親の管理者または CLOUD 管理者のい" "ずれかです。" msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "クォータ処理の更新と削除を行えるのは、ユーザーが割り当てられたプロジェクトと" "同じ階層にあるプロジェクトに限られます。" msgid "Update list, doesn't include volume_id" msgstr "リストを更新します。volume_id が含まれません。" msgid "Updated At" msgstr "最終更新" msgid "Upload to glance of attached volume is not supported." msgstr "" "接続されたボリュームの glance へのアップロードはサポートされていません。" msgid "Use ALUA to associate initiator to host error." msgstr "ALUA を使用したホストへのイニシエーターの関連付けのエラー。" msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "CHAP を使用したホストへのイニシエーターの関連付けのエラー。CHAP のユーザー名" "とパスワードを確認してください。" msgid "User ID" msgstr "ユーザー ID" msgid "User does not have admin privileges" msgstr "ユーザーに管理者特権がありません" msgid "User is not authorized to use key manager." msgstr "ユーザーは鍵マネージャーの使用を許可されていません。" msgid "User not authorized to perform WebDAV operations." msgstr "ユーザーは WebDAV 操作の実行が許可されていません。" msgid "UserName is not configured." msgstr "ユーザー名は設定されていません。" msgid "UserPassword is not configured." msgstr "ユーザーパスワードは設定されていません。" msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "V2 のロールバック。デフォルトのストレージグループとは別のストレージグループの" "ボリューム。" msgid "V2 rollback, volume is not in any storage group." msgstr "" "V2 のロールバック。どのストレージグループにもボリュームが存在しません。" msgid "V3 rollback" msgstr "V3 のロールバック" msgid "VF is not enabled." msgstr "VF は有効になっていません。" #, python-format msgid "VV Set %s does not exist." msgstr "VV セット %s は存在しません。" #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "QoS 仕様の有効なコンシューマー: %s" #, python-format msgid "Valid control location are: %s" msgstr "有効な制御ロケーション: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "ボリューム接続の検証に失敗しました (エラー: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "構成オプション \"%(option)s\" の値 \"%(value)s\" は無効です。" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "%(param_string)s の値 %(param)s がブール値ではありません。" msgid "Value required for 'scality_sofs_config'" msgstr "'scality_sofs_config' の値が必要です" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "%(src)s から %(tgt)s へのマッピングに関連しない Vdisk %(name)s。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "バージョン %(req_ver)s はこのAPIではサポートされていません。最大値は " "%(min_ver)s で、最小値は %(max_ver)s です。" #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s が ID ごとのオブジェクトを抽出できません。" #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s が条件の変更を行うことができません。" #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "仮想ボリューム '%s' がアレイに存在しません。" #, python-format msgid "Vol copy job for dest %s failed." msgstr "宛先 %s のボリューム・コピー・ジョブが失敗しました。" #, python-format msgid "Volume %(deviceID)s not found." msgstr "ボリューム %(deviceID)s が見つかりません。" #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "ボリューム %(name)s がアレイ上に見つかりません。マップされるボリュームがある" "かどうかを判別できません。" #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "ボリューム %(name)s は VNX で作成されましたが、%(state)s 状態です。" #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "ボリューム %(vol)s をプール %(pool)s に作成できませんでした。" #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "ボリューム%(vol1)s が snapshot.volume_id %(vol2)s と一致しません。" #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "ボリューム %(vol_id)s の状況は「使用可能」または「使用中」でなければなりませ" "んが、現在の状況は %(vol_status)s です。" #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "拡張するにはボリューム %(vol_id)s の状況が「使用可能」でなければなりません" "が、現在の状況は %(vol_status)s です。" #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "読み取り専用フラグを更新するには、ボリューム %(vol_id)s の状況が「使用可能」" "でなければなりませんが、現在の状況は %(vol_status)s です。" #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "ボリューム %(vol_id)s の状況は「使用可能」でなければなりませんが、現在の状況" "は %(vol_status)s です。" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "ボリューム %(volume_id)s が見つかりませんでした。" #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "ボリューム %(volume_id)s には、キー %(metadata_key)s を持つ管理メタデータがあ" "りません。" #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "ボリューム %(volume_id)s にはキー %(metadata_key)s を持つメタデータはありませ" "ん。" #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "ボリューム %(volume_id)s は現在、サポート対象ではないホストグループ " "%(group)s にマップされています" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" "ボリューム %(volume_id)s は現在、ホスト %(host)s にマップされていません" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "ボリューム %(volume_id)s はまだ接続されています。最初にボリュームを切り離して" "ください。" #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "ボリューム %(volume_id)s 複製エラー: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "ボリューム %(volume_name)s は使用中です。" #, python-format msgid "Volume %s could not be created from source volume." msgstr "ボリューム %s をソースボリュームから作成できませんでした。" #, python-format msgid "Volume %s could not be created on shares." msgstr "共有上でボリューム %s を作成できませんでした。" #, python-format msgid "Volume %s could not be created." msgstr "ボリューム %s を作成できませんでした。" #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "ボリューム %s は Nexenta SA に存在しません" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "ボリューム %s は Nexenta Store アプライアンスに存在しません" #, python-format msgid "Volume %s does not exist on the array." msgstr "ボリューム %s does はアレイに存在しません。" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" "ボリューム %s で provider_location が指定されていません。スキップします。" #, python-format msgid "Volume %s doesn't exist on array." msgstr "ボリューム %s がアレイに存在しません。" #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "ボリューム %s が ZFSSA バックエンドに存在しません。" #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "ボリューム %s は OpenStack により既に管理されています。" #, python-format msgid "Volume %s is already part of an active migration." msgstr "ボリューム %s は既にアクティブ・マイグレーションの一部になっています。" #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "ボリューム %s は複製された種別のものではありません。このボリュームは、レプリ" "ケーションアクションをサポートするために追加仕様 replication_enabled を " "' True' に設定したボリューム種別のものでなければなりません。" #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "ボリューム %s がオンラインです。OpenStack を使用して管理するために、ボリュー" "ムをオフラインに設定してください。" #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "ボリューム %s の移行と追加を行うことはできず、整合性グループに含まれることは" "できず、スナップショットを持つこともできません。" #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "ボリューム %s は整合性グループの一部であってはなりません。" #, python-format msgid "Volume %s must not be replicated." msgstr "ボリューム %s を複製してはいけません。" #, python-format msgid "Volume %s must not have snapshots." msgstr "ボリューム %s にスナップショットがあってはなりません。" #, python-format msgid "Volume %s not found." msgstr "ボリューム %s が見つかりません。" #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "ボリューム %s: ボリュームの拡張を試行中にエラーが発生しました" #, python-format msgid "Volume (%s) already exists on array" msgstr "ボリューム (%s) はすでにアレイ上に存在します" #, python-format msgid "Volume (%s) already exists on array." msgstr "ボリューム (%s) は既にアレイ上にあります。" #, python-format msgid "Volume Group %s does not exist" msgstr "ボリュームグループ %s は存在しません" #, python-format msgid "Volume Type %(id)s already exists." msgstr "ボリューム種別 %(id)s は既に存在します。" #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "ボリュームタイプ %(type_id)s には、キー %(id)s に関する追加の仕様がありませ" "ん。" #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "ボリューム種別 %(volume_type_id)s を持つボリュームでは、そのボリューム種別は" "削除できません。" #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "ボリューム種別 %(volume_type_id)s にはキー %(extra_specs_key)s を持つ追加の仕" "様はありません。" msgid "Volume Type id must not be None." msgstr "ボリューム・タイプ ID を None に設定することはできません。" #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "OpenStack のボリューム [%(ops_vol)s] に相当する CloudByte のストレージでボ" "リューム [%(cb_vol)s] が見つかりませんでした。" #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "ボリューム [%s] が CloudByte ストレージに見つかりません。" #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "ボリューム接続がフィルター %(filter)s で見つかりませんでした。" #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "ボリューム・バックエンド構成が無効です: %(reason)s" msgid "Volume by this name already exists" msgstr "この名前のボリュームは既に存在します" msgid "Volume cannot be restored since it contains snapshots." msgstr "スナップショットが含まれているため、ボリュームを復元できません。" msgid "Volume create failed while extracting volume ref." msgstr "ボリューム参照の抽出中にボリュームの作成に失敗しました。" #, python-format msgid "Volume device file path %s does not exist." msgstr "ボリュームデバイスのファイルパス %s が存在しません" #, python-format msgid "Volume device not found at %(device)s." msgstr "%(device)s でボリュームデバイスが見つかりません。" #, python-format msgid "Volume driver %s not initialized." msgstr "ボリューム・ドライバー %s が初期化されていません。" msgid "Volume driver not ready." msgstr "ボリュームドライバーが準備できていません。" #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "ボリュームドライバーがエラーを報告しました: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "ボリュームには今回削除できない一時的なスナップショットが含まれています。" msgid "Volume has children and cannot be deleted!" msgstr "ボリュームには子が含まれており、削除できません。" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "整合性グループ %s のボリュームが接続されています。まず切り離してください。" msgid "Volume in consistency group still has dependent snapshots." msgstr "整合性グループのボリュームには、まだ従属スナップショットがあります。" #, python-format msgid "Volume is attached to a server. (%s)" msgstr "ボリュームがサーバーに追加されています (%s)。" msgid "Volume is in-use." msgstr "ボリュームが使用中です。" msgid "Volume is not available." msgstr "ボリュームが利用できません。" msgid "Volume is not local to this node" msgstr "ボリュームは、このノードに対してローカルではありません" msgid "Volume is not local to this node." msgstr "ボリュームがこのノードに対してローカルではありません。" msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "ボリューム・メタデータ・バックアップが要求されましたが、このドライバーではま" "だこの機能はサポートされていません。" #, python-format msgid "Volume migration failed: %(reason)s" msgstr "ボリュームマイグレーションが失敗しました: %(reason)s" msgid "Volume must be available" msgstr "ボリュームは使用可能である必要があります" msgid "Volume must be in the same availability zone as the snapshot" msgstr "ボリュームはスナップショットと同じ可用性ゾーンになければなりません" msgid "Volume must be in the same availability zone as the source volume" msgstr "ボリュームはソースボリュームと同じ可用性ゾーンになければなりません" msgid "Volume must have a volume type" msgstr "ボリュームにはボリューム種別が必要です" msgid "Volume must not be part of a consistency group." msgstr "ボリュームを整合性グループの一部にしないでください。" msgid "Volume must not be replicated." msgstr "ボリュームを複製することはできません。" msgid "Volume must not have snapshots." msgstr "ボリュームにスナップショットがあってはなりません。" #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "インスタンス %(instance_id)s のボリュームが見つかりませんでした。" msgid "Volume not found on configured storage backend." msgstr "ボリュームが構成済みストレージバックエンドに見つかりません。" msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "設定されたストレージバックエンドでボリュームが見つかりません。ボリューム名に " "\"/\" が使用されている場合、名前を変更し、再度管理を試行してください。" msgid "Volume not found on configured storage pools." msgstr "ボリュームが構成済みストレージ・プールに見つかりません。" msgid "Volume not found." msgstr "ボリュームが見つかりません。" msgid "Volume not unique." msgstr "ボリュームが一意でありません" msgid "Volume not yet assigned to host." msgstr "ボリュームがまだホストに割り当てられていません。" msgid "Volume reference must contain source-name element." msgstr "ボリュームの参照にはソース名の要素が含まれる必要があります。" #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "%(volume_id)s のボリューム複製が見つかりませんでした。" #, python-format msgid "Volume service %s failed to start." msgstr "ボリュームサービス %s が起動できませんでした" msgid "Volume should have agent-type set as None." msgstr "ボリュームには agent-type として None を設定する必要があります。" #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "ボリューム・サイズ %(volume_size)s GB をイメージの minDisk サイズ " "%(min_disk)s GB より小さくすることはできません。" #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "" "ボリュームサイズ '%(size)s' は、整数であり、かつ 0 より大きくなければなりませ" "ん" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "ボリューム・サイズ「%(size)s」GB を元のボリューム・サイズ %(source_size)s GB " "より小さくすることはできません。このサイズは元のボリュームサイズ以上でなけれ" "ばなりません。" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "ボリューム・サイズ「%(size)s」GB をスナップショット・サイズ %(snap_size)s GB " "より小さくすることはできません。このサイズは元のスナップショット・サイズ以上" "でなければなりません。" msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "最後のバックアップ以降にボリュームサイズが増加しました。フルバックアップを実" "行してください。" msgid "Volume size must be a multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" msgid "Volume size must be multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" msgid "Volume size must multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります" #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "ボリュームのボリューム状況は「使用可能」でなければなりませんが、現在の状況は " "%s です" msgid "Volume status is in-use." msgstr "ボリューム状況は「使用中」です。" #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "スナップショットに関しては、ボリューム状況が「使用可能」または「使用中」でな" "ければなりません (現在は %s です)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "ボリューム状況は「使用可能」または「使用中」でなければなりません。" #, python-format msgid "Volume status must be %s to reserve." msgstr "ボリュームを予約するにはボリューム状態が %s である必要があります。" msgid "Volume status must be 'available'." msgstr "ボリューム状況は「使用可能」でなければなりません。" msgid "Volume to Initiator Group mapping already exists" msgstr "ボリュームからイニシエーター・グループへのマッピングは既に存在します" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "バックアップ対象のボリュームが利用可能か使用中である必要がありますが、現在の" "状況は \"%s\" です。" msgid "Volume to be restored to must be available" msgstr "復元するボリュームは「使用可能」でなければなりません" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "ボリューム種別 %(volume_type_id)s が見つかりませんでした。" #, python-format msgid "Volume type ID '%s' is invalid." msgstr "ボリューム・タイプ ID '%s' は無効です。" #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスは既" "に存在します。" #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスが見" "つかりません。" #, python-format msgid "Volume type does not match for share %s." msgstr "共有 %s に関してボリューム・タイプが一致しません。" #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "タイプ %(type_id)s のボリューム種別暗号化は既に存在します。" #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "タイプ %(type_id)s に対するボリューム種別暗号化は存在しません。" msgid "Volume type name can not be empty." msgstr "ボリューム種別名を空にすることはできません" #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名前 %(volume_type_name)s を持つボリューム種別が見つかりませんでした。" #, python-format msgid "Volume with volume id %s does not exist." msgstr "ボリューム ID %s のボリュームが存在しません。" #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "ボリューム %(volumeName)s は連結されたボリュームではありません。拡張を実行で" "きる対象は、連結されたボリュームのみです。終了中..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "ボリューム %(volumeName)s がストレージグループ %(sgGroupName)s に追加されませ" "んでした。" #, python-format msgid "Volume: %s could not be found." msgstr "ボリューム %s が見つかりませんでした。" #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "ボリューム: %s はすでに Cinder によって管理されています。" msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" "ボリュームは、このサイズ (メガバイト) のオブジェクトにチャンク化されます。" msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" " プライマリーとセカンダリーの SolidFire アカウント上で、ボリュームとアカウン" "トの数量が超過しました。" #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage の設定の 'vzstorage_used_ratio' が無効です。0 より大きく 1.0 以下で" "ある必要があります: %s。" #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s の VzStorage のコンフィグファイルが存在しません" msgid "Wait replica complete timeout." msgstr "レプリカの完了を待機するタイムアウト。" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "同期の待機が失敗しました。実行状態: %s。" msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "すべてのノードがクラスターに接続するのを待機しています。すべてのシープデーモ" "ンが稼働中であることを確認してください。" msgid "We should not do switch over on primary array." msgstr "プライマリーアレイで切り替えを行ってはなりません。" msgid "Wrong resource call syntax" msgstr "正しくないリソース呼び出し構文" msgid "X-IO Volume Driver exception!" msgstr "X-IO ボリュームドライバー例外が発生しました。" msgid "XML support has been deprecated and will be removed in the N release." msgstr "XML サポートは提供を終了し、N リリースでは削除されます。" msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO は正しく設定されていません。iscsi ポータルが見つかりません" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO は正しく初期化されていません。クラスターが見つかりません" msgid "You must implement __call__" msgstr "__call__ を実装しなければなりません" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "3PAR ドライバーを使用するには hpe3parclient をインストールしておく必要があり" "ます。 \"pip install python-3parclient\" を実行して hpe3parclient をインス" "トールしてください。" msgid "You must supply an array in your EMC configuration file." msgstr "EMC 構成ファイルにアレイを指定する必要があります。" #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "元のサイズ %(originalVolumeSize)s GB が、%(newSize)s GB より大きくなっていま" "す。拡張のみがサポートされます。終了中..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "ゾーン" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "ゾーニングポリシー %s は認識されていません" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data: vdisk %s の属性を取得できませんでした。" msgid "_create_host failed to return the host name." msgstr "_create_host でホスト名を返すことができませんでした。" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: ホスト名を変換できません。ホスト名はユニコードでもストリングで" "もありません。" msgid "_create_host: No connector ports." msgstr "_create_host: コネクター・ポートがありません。" msgid "_create_local_cloned_volume, Replication Service not found." msgstr "" "_create_local_cloned_volume、レプリケーションサービスが見つかりません。" #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume、ボリューム名: %(volumename)s、ソースボリューム" "名: %(sourcevolumename)s、ソースボリュームインスタンス: %(source_volume)s、" "ターゲットボリュームインスタンス: %(target_volume)s、戻りコード: %(rc)lu、エ" "ラー: %(errordesc)s。" #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - 成功メッセージが CLI 出力内に見つかりませんでし" "た。\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name、id_code がありません。" msgid "_delete_copysession, Cannot find Replication Service" msgstr "" "_delete_copysession、レプリケーションサービスを見つけることができません" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession、コピーセッションのタイプが定義されていません。コピーセッ" "ション: %(cpsession)s、コピータイプ: %(copytype)s。" #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession、コピーセッション: %(cpsession)s、操作: %(operation)s、戻" "りコード: %(rc)lu、エラー: %(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " "%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume、ボリューム名: %(volumename)s、ストレージ設定サービスが見つか" "りません。" #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service、クラス名: %(classname)s、InvokeMethod、ETERNUS に接続" "できません。" msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: スナップショットを持つボリュームの拡張はサポートされていま" "せん。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、アソシエーター: " "FUJITSU_AuthorizedTarget を ETERNUS に接続できません。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、EnumerateInstanceNames を " "ETERNUS に接続できません。" #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、AssocNames: " "FUJITSU_ProtocolControllerForUnit を ETERNUS に接続できません。" #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession、ReferenceNames、vol_instance: %(vol_instance_path)s、" "ETERNUS に接続できません。" #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service、クラス名: %(classname)s、EnumerateInstanceNames、" "ETERNUS に接続できません。" #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names、コネクター: %(connector)s、イニシエーターが見つかりま" "せん。" #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun、ボリューム名: %(volumename)s、EnumerateInstanceNames、ETERNUS に接" "続できません。" #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool、eternus_pool:%(eternus_pool)s、EnumerateInstances、ETERNUS に接続" "できません。" #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg、ファイル名: %(filename)s、tagname: %(tagname)s、データがありませ" "ん。ドライバー設定ファイルを編集して修正してください。" #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection、ファイル名: %(filename)s、ip: %(ip)s、ポート: " "%(port)s、ユーザー: %(user)s、パスワード: ****、url: %(url)s、失敗しました。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties、iscsiip list: %(iscsiip_list)s、iqn が見つかり" "ません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、AssociatorNames: " "CIM_BindsTo を ETERNUS に接続できません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、EnumerateInstanceNames " "を ETERNUS に接続できません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、GetInstance を ETERNUS " "に接続できません。" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: 属性のヘッダーと値が適合していません。\n" " ヘッダー: %(header)s\n" " 値: %(row)s。" msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector がコネクターのホスト名を返すことができませんでした。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc、aglist/vol_instance からの host-affinity の取得が失敗しまし" "た。affinitygroup: %(ag)s、ReferenceNames を ETERNUS に接続できません。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc、host-affinity インスタンスの取得が失敗しました。volmap: " "%(volmap)s、GetInstance、ETERNUS に接続できません。" msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi、アソシエーター: FUJITSU_SAPAvailableForElement、ETERNUS " "に接続できません。" #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi、affinitygroup: %(ag)s, ReferenceNames、ETERNUS に接続でき" "ません。" #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi、vol_instance: %(vol_instance)s、ReferenceNames: " "CIM_ProtocolControllerForUnit、ETERNUS に接続できません。" #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi、volmap: %(volmap)s、GetInstance、ETERNUS に接続できませ" "ん。" msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port、EnumerateInstances を ETERNUS に接続できません。" #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port、プロトコル: %(protocol)s、target_port が見つかりません。" #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay: %s という名前のスナップショットが見つかりません" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: ボリューム ID %s が見つかりません" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: ソース名を指定する必要があります。" msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: ホスト - ボリューム接続の FC 接続情報を取得できま" "せんでした。ホストが FC 接続用に正しく構成されていますか?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: ボリューム %(vol)s の入出力グループ %(gid)s でノー" "ドが見つかりませんでした。" #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun、vol_instance.path:%(vol)s、ボリューム名: %(volumename)s、" "volume_uid: %(uid)s、イニシエーター: %(initiator)s、ターゲット: %(tgt)s、" "aglist: %(aglist)s、ストレージ設定サービスが見つかりません。" #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun、vol_instance.path: %(volume)s、ボリューム名: %(volumename)s、" "volume_uid: %(uid)s、aglist: %(aglist)s、コントローラー設定サービスが見つかり" "ません。" #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun、ボリューム名 %(volumename)s、volume_uid: %(volume_uid)s、" "AffinityGroup: %(ag)s、戻りコード: %(rc)lu、エラー: %(errordesc)s。" #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun、vol_instance.path: %(volume)s、AssociatorNames: " "CIM_ProtocolControllerForUnit を ETERNUS に接続できません。" msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats: ストレージ・プール・データを取得できませんでした。" #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete、cpsession: %(cpsession)s、コピーセッション状態は " "BROKEN です。" #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy が失敗しました。ボリューム %s のコピーが存在します。別のコピー" "を追加すると、コピー数 2 という制限を超えます。" msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "期待されたプール内の vdisk コピーなしで add_vdisk_copy が開始されました。" #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants はブール値である必要がありますが、'%s' が得られました。" msgid "already created" msgstr "既に作成済み" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "リモートノードにスナップショットを追加します。" #, python-format msgid "attribute %s not lazy-loadable" msgstr "属性 %s は遅延ロードできません" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "バックアップ: %(vol_id)s での %(vpath)s から %(bpath)s へのデバイス・ハードリ" "ンクの作成に失敗しました。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "バックアップ: %(vol_id)s サーバーからバックアップ成功通知を取得できませんでし" "た。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "バックアップ: %(vol_id)s で、%(bpath)s 上の無効な引数のため、dsmc の実行に失" "敗しました。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "バックアップ: %(vol_id)s %(bpath)s で dsmc を実行できませんでした。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "" "バックアップ: %(vol_id)s に障害が発生しました。%(path)s はファイルではありま" "せん。" #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "バックアップ: %(vol_id)s に障害が発生しました。%(path)s は予期されていない" "ファイル・タイプです。ブロック化または通常のファイルがサポートされています。" "実際のファイル・モードは %(vol_mode)s です。" #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "バックアップ: %(vol_id)s に障害が発生しました。ボリュームへの実際のパス " "%(path)s を取得できません。" msgid "being attached by different mode" msgstr "別のモードで接続しています" #, python-format msgid "call failed: %r" msgstr "呼び出しが失敗しました: %r" msgid "call failed: GARBAGE_ARGS" msgstr "呼び出しが失敗しました: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "呼び出しが失敗しました: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "呼び出しが失敗しました: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "呼び出しが失敗しました: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "lun-map を見つけることができません。ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "拡張するボリュームが見つかりません" msgid "can't handle both name and index in req" msgstr "req にある名前と索引はどちらも処理できません" msgid "cannot understand JSON" msgstr "JSON を解釈できません" msgid "cannot understand XML" msgstr "XML を解釈できません" #, python-format msgid "cg-%s" msgstr "cg: %s" msgid "cgsnapshot assigned" msgstr "割り当てられた cgsnapshot" msgid "cgsnapshot changed" msgstr "変更された cgsnapshot" msgid "cgsnapshots assigned" msgstr "割り当てられた cgsnapshot" msgid "cgsnapshots changed" msgstr "変更された cgsnapshot" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: 認証にはパスワードまたは SSH 秘密鍵が必要です: " "san_password または san_private_key オプションを設定してください。" msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: システム ID を判別できません。" msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: システム名を判別できません。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist エラー。" #, python-format msgid "clone depth exceeds limit of %s" msgstr "複製の深さが限度 %s を超えています" msgid "consistencygroup assigned" msgstr "割り当てられた整合性グループ" msgid "consistencygroup changed" msgstr "変更された整合性グループ" msgid "control_location must be defined" msgstr "control_location を定義する必要があります" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume、ETERNUS にソースボリュームが存在しません。" #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume、ターゲットボリュームインスタンス名: " "%(volume_instancename)s、インスタンスの取得が失敗しました。" msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: ソースと宛先のサイズが異なっています。" #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: ソースボリューム %(src_vol)s のサイズは %(src_size)dGB " "で、サイズ %(tgt_size)dGBand のターゲットボリューム %(tgt_vol)s に適合しませ" "ん。" msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src は CG スナップショットまたはソース CG から作" "成しなければなりません。" msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src は 1 つの cgsnapshot ソースまたは整合性グ" "ループソースのみをサポートします。複数ソースは使用できません。" msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src は 1 つの cgsnapshot source または整合性グ" "ループソースのみをサポートします。複数ソースは使用できません。" #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: ソース vdisk %(src)s (%(src_id)s) は存在しません。" #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: ソース vdisk %(src)s は存在しません。" msgid "create_host: Host name is not unicode or string." msgstr "create_host: ホスト名はユニコードでもストリングでもありません。" msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: イニシエーターも wwpn も指定されていません。" msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair エラー。" #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "create_snapshot、eternus_pool: %(eternus_pool)s、プールが見つかりません。" #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot、スナップショット名: %(snapshotname)s、ソースボリューム名: " "%(volumename)s、vol_instance.path: %(vol_instance)s、宛先ボリューム名: " "%(d_volumename)s、プール: %(pool)s、戻りコード: %(rc)lu、エラー: " "%(errordesc)s。" #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot、ボリューム名: %(s_volumename)s、ETERNUS でソースボリュームが" "見つかりません。" #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot、ボリューム名: %(volumename)s、レプリケーションサービスが見つ" "かりません。" #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: スナップショットのボリューム状況は「使用可能」または「使用" "中」でなければなりません。無効な状況は %s です。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: ソース・ボリュームの取得に失敗しました。" #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume、ボリューム: %(volume)s、EnumerateInstances、ETERNUS に接続でき" "ません。" #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume、ボリューム: %(volume)s、ボリューム名: %(volumename)s、" "eternus_pool: %(eternus_pool)s、ストレージ設定サービスが見つかりません。" #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume、ボリューム名: %(volumename)s、プール名: %(eternus_pool)s、戻り" "コード: %(rc)lu、エラー: %(errordesc)s。" msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "create_volume_from_snapshot、ETERNUS にソースボリュームが存在しません。" #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot、ターゲットボリュームインスタンス名: " "%(volume_instancename)s、インスタンスの取得が失敗しました。" #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" "create_volume_from_snapshot: スナップショット %(name)s は存在しません。" #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: ボリュームを作成するには、スナップショット状況が" "「使用可能」でなければなりません。無効な状況は %s です。" msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot: ソースと宛先のサイズが異なっています。" msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: ボリューム・サイズがスナップショット・ベース・ボ" "リュームと異なります。" msgid "deduplicated and auto tiering can't be both enabled." msgstr "重複排除と自動階層化の両方を有効にすることはできません。" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "削除: %(vol_id)s で、以下の無効な引数のため、dsmc の実行に失敗しました。 " "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "削除: %(vol_id)s dsmc を実行できませんでした。stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "delete_hypermetro エラー。" #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s ACL が見つかりません。処理を続行します。" msgid "delete_replication error." msgstr "delete_replication エラー。" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "従属ボリュームを持つスナップショット %(snapshot_name)s の削除中" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "スナップショットを含むボリューム %(volume_name)s の削除中" msgid "detach snapshot from remote node" msgstr "リモートノードからスナップショットを切断します" msgid "do_setup: No configured nodes." msgstr "do_setup: 構成されたノードがありません。" msgid "element is not a child" msgstr "エレメントは子ではありません" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries は 0 以上でなければなりません" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "Swift へのオブジェクトの書き込み中にエラーが発生しました。Swift 内のオブジェ" "クトの MD5 %(etag)s が Swift に送信されたオブジェクトの MD5 %(md5)s と同じで" "はありません" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "extend_volume、eternus_pool: %(eternus_pool)s、プールが見つかりません。" #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume、ボリューム: %(volume)s、ボリューム名: %(volumename)s、" "eternus_pool: %(eternus_pool)s、ストレージ設定サービスが見つかりません。" #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " "%(errordesc)s、プールタイプ: %(pooltype)s。" #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "" "extend_volume、ボリューム名: %(volumename)s、ボリュームが見つかりません。" msgid "failed to create new_volume on destination host" msgstr "宛先ホスト上に new_volume を作成できませんでした" msgid "fake" msgstr "偽" #, python-format msgid "file already exists at %s" msgstr "ファイルは %s に既に存在します" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "SheepdogIOWrapper は fileno をサポートしません" msgid "fileno() not supported by RBD()" msgstr "fileno() は RBD() でサポートされていません" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "ファイルシステム%s は Nexenta Store アプライアンスに存在しません" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled は False に設定されています。マルチホスト・" "マッピングは許可されていません。CMMVC6071E VDisk は既にホストにマップされてい" "るため、VDisk からホストへのマッピングは作成されませんでした。" msgid "flush() not supported in this version of librbd" msgstr "このバージョンの librbd では flush() はサポートされていません" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s の基盤: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s の基盤: %(backing_file)s" msgid "force delete" msgstr "強制削除" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id エラー。" msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id エラー。" #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: イニシエーター %(ini)s のターゲット IP の取得に失敗しまし" "た。設定ファイルを確認してください。" #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: ボリューム %s の属性の取得に失敗しました。" msgid "glance_metadata changed" msgstr "変更された glance_metadata" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " "%(img)s は異なるファイル・システムに属しています。" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " "%(img)s は異なるファイル・セットに属しています。" #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "cinder.conf で hgst_group %(grp)s と hgst_user %(usr)s が適切なユーザーとグ" "ループに合致する必要があります" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "cinder.conf で指定した hgst_net %(net)s がクラスターで見つかりません" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "cinder.conf で hgst_redundancy を 0 (HA でない) または 1 (HA) に設定する必要" "があります" msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr " cinder.conf で hgst_space_mode は octal/int である必要があります" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "" "hgst_storage サーバー %(svr)s で : の形式が設定されていません" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "cinder.conf で hgst_storage_servers を定義する必要があります" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "この操作の途中で HTTP サービスが急に無効または保守状態になった可能性がありま" "す。" msgid "id cannot be None" msgstr "ID を None にすることはできません" #, python-format msgid "image %s not found" msgstr "イメージ %s が見つかりません" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "" "initialize_connection、ボリューム: %(volume)s、ボリュームが見つかりません。" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection: ボリューム %s の属性の取得に失敗しました。" #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "" "initialize_connection: ボリューム %s のボリューム属性が欠落しています。" #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: ボリューム %(vol)s の入出力グループ %(gid)s でノードが" "見つかりませんでした。" #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s が定義されていません。" #, python-format msgid "invalid user '%s'" msgstr "ユーザー '%s' は無効です" #, python-format msgid "iscsi portal, %s, not found" msgstr "iscsi ポータル %s が見つかりません" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "プロトコル 'iSCSI' を使用する場合、設定ファイルに iscsi_ip_address を設定しな" "ければなりません。" msgid "iscsiadm execution failed. " msgstr "iscsiadm の実行が失敗しました。" #, python-format msgid "key manager error: %(reason)s" msgstr "鍵マネージャーのエラー: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key が定義されていません" msgid "limit param must be an integer" msgstr "limit パラメーターは整数でなければなりません" msgid "limit param must be positive" msgstr "limit パラメーターは正でなければなりません" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing はホストに接続されたボリュームを管理できません。インポートを" "行う前にこのボリュームを既存のホストから切断してください" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "既存のボリュームを特定するには、manage_existing で 'name' キーが必要です。" #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: ボリューム %(vol)s 上で既存のリプレー %(ss)s の管理" "でエラーが発生しました" #, python-format msgid "marker [%s] not found" msgstr "マーカー [%s] が見つかりません" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp に引用符 %s がありません" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" "migration_policy は 'on-demand' または 'never' でなければなりません。%s が渡" "されました" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" "ボリューム %(vol)s 上で mkfs が失敗しました。エラー・メッセージ: %(err)s。" msgid "mock" msgstr "モック" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs がインストールされていません" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage が名前 %s を持つ複数のリソースを発見しました" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "スナップショット ID %s を持つ複数のリソースが見つかりました" msgid "name cannot be None" msgstr "名前を None に設定することはできません" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: NAVISECCLI ツール %(path)s が見つかりませんでした。" #, python-format msgid "no REPLY but %r" msgstr "REPLY がないものの %r があります" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "drbdmanage で ID %s を持つスナップショットが見つかりません" #, python-format msgid "not exactly one snapshot with id %s" msgstr "ID %s を持つスナップショットは 1つだけではありません" #, python-format msgid "not exactly one volume with id %s" msgstr "ID %s を持つボリュームは 1 つだけではありません" #, python-format msgid "obj missing quotes %s" msgstr "obj に引用符 %s がありません" msgid "open_access_enabled is not off." msgstr "open_access_enabled がオフになっていません。" msgid "progress must be an integer percentage" msgstr "進行状況は整数のパーセンテージでなければなりません" msgid "promote_replica not implemented." msgstr "promote_replica が実装されていません。" msgid "provider must be defined" msgstr "プロバイダーを定義する必要があります" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s 以降がこのボリュームドライバーに必要です。現在" "の qemu-img バージョン: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img がインストールされていません。また、イメージのタイプは %s です。" "qemu-img がインストールされていない場合は、RAW イメージのみが使用可能です。" msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img がインストールされておらず、ディスク形式が指定されていません。qemu-" "img がインストールされていない場合は、RAW イメージのみが使用可能です。" msgid "rados and rbd python libraries not found" msgstr "" "rados python ライブラリーおよび rbd python ライブラリーが見つかりません" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted には 'no', 'yes', 'only' のいずれかのみを指定できます。%r は指定" "できません" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "replication_device は backend: %s で設定する必要があります。" #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "backend_id [%s] の replication_device が欠落しています。" #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover が失敗しました。%s が見つかりません。" msgid "replication_failover failed. Backend not configured for failover" msgstr "" "replication_failover が失敗しました。バックエンドがフェイルオーバーのために設" "定されていません。" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "復元: %(vol_id)s で、%(bpath)s 上の無効な引数のため、dsmc の実行に失敗しまし" "た。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "復元: %(vol_id)s %(bpath)s で dsmc を実行できませんでした。\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "復元: %(vol_id)s 失敗しました。\n" " stdout: %(out)s\n" "stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup が打ち切られました。実際のオブジェクトリストが、メタデータ内に" "保管されているオブジェクトリストと一致しません。" msgid "root element selecting a list" msgstr "ルート・エレメントのリスト選択" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb にメンバー %s がありません: より新しい python-rtslib-fb が必要かも" "しれません。" msgid "san_ip is not set." msgstr "san_ip が設定されていません。" msgid "san_ip must be set" msgstr "san_ip を設定する必要があります" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: 必須フィールド構成。san_ip が設定されていません。" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login と san_password のいずれかまたは両方が cinder.conf の Dateraドライ" "バーに設定されていません。この情報を設定して、cinder-volume サービスを再開し" "てください。" msgid "serve() can only be called once" msgstr "serve() は一度しか呼び出せません" msgid "service not found" msgstr "サービスが見つかりません" msgid "snapshot does not exist" msgstr "スナップショットが存在しません" #, python-format msgid "snapshot id:%s not found" msgstr "スナップショット ID %s が見つかりません" #, python-format msgid "snapshot-%s" msgstr "スナップショット: %s" msgid "snapshots assigned" msgstr "割り当てられたスナップショット" msgid "snapshots changed" msgstr "変更されたスナップショット" #, python-format msgid "source vol id:%s not found" msgstr "ソースボリューム ID %s が見つかりません" #, python-format msgid "source volume id:%s is not replicated" msgstr "ソース・ボリューム ID %s が複製されていません" msgid "source-name cannot be empty." msgstr "source-name は空にできません。" msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "source-name 形式は 'vmdk_path@vm_inventory_path' でなければなりません。" #, python-format msgid "status must be %s and" msgstr "状態は %s である必要があります" msgid "status must be available" msgstr "状況は「使用可能」でなければなりません" msgid "stop_hypermetro error." msgstr "stop_hypermetro エラー。" msgid "subclasses must implement construct()!" msgstr "サブクラスは construct() を実装する必要があります。" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo が失敗しました。何も起こらなかったものとして続行します" msgid "sync_hypermetro error." msgstr "sync_hypermetro エラー。" msgid "sync_replica not implemented." msgstr "sync_replica が実装されていません。" #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli がインストールされておらず、デフォルトのディレクトリー " "(%(default_path)s) を作成できませんでした: %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: コネクターからホスト名を取得できませんでした。" msgid "timeout creating new_volume on destination host" msgstr "宛先ホスト上に new_volume を作成しているときにタイムアウトになりました" msgid "too many body keys" msgstr "本体キーが多すぎます" #, python-format msgid "umount: %s: not mounted" msgstr "アンマウント: %s: マウントされていません" #, python-format msgid "umount: %s: target is busy" msgstr "アンマウント: %s: ターゲットが使用中です" msgid "umount: : some other error" msgstr "アンマウント: : その他のエラー" msgid "umount: : target is busy" msgstr "アンマウント: : ターゲットが使用中です" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: %s という名前のスナップショットが見つかりません" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: ボリューム ID %s が見つかりません" #, python-format msgid "unrecognized argument %s" msgstr "認識されない引数 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "サポートされない圧縮アルゴリズム: %s" msgid "valid iqn needed for show_target" msgstr "show_target に必要とされる有効な iqn" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s が定義されていません。" msgid "vmemclient python library not found" msgstr "vmemclient python ライブラリーが見つかりません" #, python-format msgid "volume %s not found in drbdmanage" msgstr "drbdmanage でボリューム %s が見つかりません" msgid "volume assigned" msgstr "割り当てられたボリューム" msgid "volume changed" msgstr "変更されたボリューム" msgid "volume does not exist" msgstr "ボリュームが存在しません" msgid "volume is already attached" msgstr "ボリュームは既に接続されています" msgid "volume is not local to this node" msgstr "ボリュームは、このノードに対してローカルではありません" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "ボリュームサイズ %(volume_size)d は、サイズ %(size)d のバックアップを復元する" "には小さすぎます" #, python-format msgid "volume size %d is invalid." msgstr "ボリュームサイズ %d は無効です。" msgid "volume_type cannot be None" msgstr "volume_type に None を設定することはできません" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "ボリュームを整合性グループに作成する場合は、volume_type を指定する必要があり" "ます。" msgid "volume_type_id cannot be None" msgstr "volume_type_id を None に設定することはできません" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "整合性グループ %(name)s を作成するには、volume_types を指定する必要がありま" "す。" #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "" "整合性グループ %s を作成するには、volume_types を指定する必要があります。" msgid "volumes assigned" msgstr "割り当てられたボリューム" msgid "volumes changed" msgstr "変更されたボリューム" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s はタイムアウトしました。" #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "zfssa_manage_policy プロパティーは 'strict' または 'loose' に設定する必要があ" "ります。現在の値は %s です。" msgid "{} is not a valid option." msgstr "{} は有効なオプションではありません。" cinder-8.0.0/cinder/locale/cinder-log-error.pot0000664000567000056710000032504512701406257022542 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the cinder project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 06:32+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: cinder/coordination.py:95 msgid "Error starting coordination backend." msgstr "" #: cinder/coordination.py:152 msgid "Connection error while sending a heartbeat to coordination backend." msgstr "" #: cinder/coordination.py:156 msgid "Error sending a heartbeat to coordination backend." msgstr "" #: cinder/exception.py:111 msgid "Exception in string format operation" msgstr "" #: cinder/exception.py:113 #, python-format msgid "%(name)s: %(value)s" msgstr "" #: cinder/quota.py:1041 #, python-format msgid "Failed to commit reservations %s" msgstr "" #: cinder/quota.py:1062 #, python-format msgid "Failed to roll back reservations %s" msgstr "" #: cinder/service.py:305 #, python-format msgid "" "Manager for service %(binary)s %(host)s is reporting problems, not " "sending heartbeat. Service will appear \"down\"." msgstr "" #: cinder/service.py:332 msgid "Recovered model server connection!" msgstr "" #: cinder/service.py:337 msgid "model server went away" msgstr "" #: cinder/service.py:344 msgid "DBError encountered: " msgstr "" #: cinder/service.py:349 msgid "Exception encountered: " msgstr "" #: cinder/utils.py:487 #, python-format msgid "Failed to write persistence file: %(path)s." msgstr "" #: cinder/utils.py:596 #, python-format msgid "Volume driver %s not initialized" msgstr "" #: cinder/api/extensions.py:244 msgid "Exception loading extension." msgstr "" #: cinder/api/middleware/fault.py:47 #, python-format msgid "Caught error: %(type)s %(error)s" msgstr "" #: cinder/api/openstack/wsgi.py:820 #, python-format msgid "Exception handling resource: %s" msgstr "" #: cinder/api/openstack/wsgi.py:1199 msgid "Get method error." msgstr "" #: cinder/backup/chunkeddriver.py:577 #, python-format msgid "Backup volume metadata failed: %s." msgstr "" #: cinder/backup/manager.py:124 msgid "Problem cleaning incomplete backup operations." msgstr "" #: cinder/backup/manager.py:142 #, python-format msgid "Problem cleaning up backup %(bkup)s." msgstr "" #: cinder/backup/manager.py:148 cinder/backup/manager.py:788 #, python-format msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." msgstr "" #: cinder/backup/manager.py:207 #, python-format msgid "Detach attachment %(attach_id)s failed." msgstr "" #: cinder/backup/manager.py:513 msgid "Failed to update usages deleting backup" msgstr "" #: cinder/backup/manager.py:763 #, python-format msgid "Backup id %s is not invalid. Skipping reset." msgstr "" #: cinder/backup/manager.py:767 #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" #: cinder/backup/drivers/ceph.py:458 #, python-format msgid "" "Max retries reached deleting backup %(basename)s image of volume " "%(volume)s." msgstr "" #: cinder/backup/drivers/ceph.py:494 #, python-format msgid "Pipe1 failed - %s " msgstr "" #: cinder/backup/drivers/ceph.py:508 #, python-format msgid "Pipe2 failed - %s " msgstr "" #: cinder/backup/drivers/ceph.py:969 msgid "Differential restore failed, trying full restore" msgstr "" #: cinder/backup/drivers/ceph.py:1159 #, python-format msgid "Restore to volume %(volume)s finished with error - %(error)s." msgstr "" #: cinder/backup/drivers/swift.py:203 #, python-format msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" #: cinder/backup/drivers/tsm.py:252 #, python-format msgid "" "backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" #: cinder/backup/drivers/tsm.py:524 #, python-format msgid "" "delete: %(vol_id)s failed with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/brick/local_dev/lvm.py:89 msgid "Error creating Volume Group" msgstr "" #: cinder/brick/local_dev/lvm.py:90 cinder/brick/local_dev/lvm.py:184 #: cinder/brick/local_dev/lvm.py:558 cinder/brick/local_dev/lvm.py:589 #: cinder/brick/local_dev/lvm.py:611 cinder/brick/local_dev/lvm.py:653 #: cinder/brick/local_dev/lvm.py:742 cinder/brick/local_dev/lvm.py:780 #, python-format msgid "Cmd :%s" msgstr "" #: cinder/brick/local_dev/lvm.py:91 cinder/brick/local_dev/lvm.py:185 #: cinder/brick/local_dev/lvm.py:559 cinder/brick/local_dev/lvm.py:590 #: cinder/brick/local_dev/lvm.py:612 cinder/brick/local_dev/lvm.py:654 #: cinder/brick/local_dev/lvm.py:743 cinder/brick/local_dev/lvm.py:781 #, python-format msgid "StdOut :%s" msgstr "" #: cinder/brick/local_dev/lvm.py:92 cinder/brick/local_dev/lvm.py:186 #: cinder/brick/local_dev/lvm.py:560 cinder/brick/local_dev/lvm.py:591 #: cinder/brick/local_dev/lvm.py:613 cinder/brick/local_dev/lvm.py:655 #: cinder/brick/local_dev/lvm.py:744 cinder/brick/local_dev/lvm.py:782 #, python-format msgid "StdErr :%s" msgstr "" #: cinder/brick/local_dev/lvm.py:96 #, python-format msgid "Unable to locate Volume Group %s" msgstr "" #: cinder/brick/local_dev/lvm.py:183 msgid "Error querying thin pool about data_percent" msgstr "" #: cinder/brick/local_dev/lvm.py:410 #, python-format msgid "Unable to find VG: %s" msgstr "" #: cinder/brick/local_dev/lvm.py:497 msgid "" "Requested to setup thin provisioning, however current LVM version does " "not support it." msgstr "" #: cinder/brick/local_dev/lvm.py:557 msgid "Error creating Volume" msgstr "" #: cinder/brick/local_dev/lvm.py:574 #, python-format msgid "Trying to create snapshot by non-existent LV: %s" msgstr "" #: cinder/brick/local_dev/lvm.py:588 #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:205 msgid "Error creating snapshot" msgstr "" #: cinder/brick/local_dev/lvm.py:610 msgid "Error deactivating LV" msgstr "" #: cinder/brick/local_dev/lvm.py:652 msgid "Error activating LV" msgstr "" #: cinder/brick/local_dev/lvm.py:741 msgid "Error extending Volume" msgstr "" #: cinder/brick/local_dev/lvm.py:779 msgid "Error renaming logical volume" msgstr "" #: cinder/cmd/all.py:78 msgid "Failed to load osapi_volume" msgstr "" #: cinder/cmd/all.py:84 #, python-format msgid "Failed to load %s" msgstr "" #: cinder/cmd/all.py:106 msgid "Failed to load conder-volume" msgstr "" #: cinder/cmd/volume_usage_audit.py:120 #, python-format msgid "Exists volume notification failed: %s" msgstr "" #: cinder/cmd/volume_usage_audit.py:146 #, python-format msgid "Create volume notification failed: %s" msgstr "" #: cinder/cmd/volume_usage_audit.py:172 #, python-format msgid "Delete volume notification failed: %s" msgstr "" #: cinder/cmd/volume_usage_audit.py:190 #, python-format msgid "Exists snapshot notification failed: %s" msgstr "" #: cinder/cmd/volume_usage_audit.py:216 #, python-format msgid "Create snapshot notification failed: %s" msgstr "" #: cinder/cmd/volume_usage_audit.py:242 #, python-format msgid "Delete snapshot notification failed: %s" msgstr "" #: cinder/consistencygroup/api.py:151 #, python-format msgid "Error occurred when creating consistency group %s." msgstr "" #: cinder/consistencygroup/api.py:183 #, python-format msgid "" "CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s " "from source." msgstr "" #: cinder/consistencygroup/api.py:197 #, python-format msgid "" "Source CG %(source_cg)s not found when creating consistency group %(cg)s " "from source." msgstr "" #: cinder/consistencygroup/api.py:228 #, python-format msgid "" "Error occurred when creating consistency group %(cg)s from cgsnapshot " "%(cgsnap)s." msgstr "" #: cinder/consistencygroup/api.py:281 #, python-format msgid "" "Error occurred when creating volume entry from snapshot in the process of" " creating consistency group %(group)s from cgsnapshot %(cgsnap)s." msgstr "" #: cinder/consistencygroup/api.py:292 #, python-format msgid "" "Error occurred when creating consistency group %(group)s from cgsnapshot " "%(cgsnap)s." msgstr "" #: cinder/consistencygroup/api.py:342 #, python-format msgid "" "Error occurred when creating cloned volume in the process of creating " "consistency group %(group)s from source CG %(source_cg)s." msgstr "" #: cinder/consistencygroup/api.py:353 #, python-format msgid "" "Error occurred when creating consistency group %(group)s from source CG " "%(source_cg)s." msgstr "" #: cinder/consistencygroup/api.py:411 #, python-format msgid "Error occurred when building request spec list for consistency group %s." msgstr "" #: cinder/consistencygroup/api.py:437 #, python-format msgid "Failed to update quota for consistency group %s." msgstr "" #: cinder/consistencygroup/api.py:756 #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "" #: cinder/db/sqlalchemy/api.py:2874 #, python-format msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" #: cinder/db/sqlalchemy/api.py:3896 #, python-format msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" #: cinder/db/sqlalchemy/api.py:4269 #, python-format msgid "DBError detected when purging from table=%(table)s" msgstr "" #: cinder/image/glance.py:181 #, python-format msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" #: cinder/keymgr/barbican.py:115 msgid "Error creating Barbican client." msgstr "" #: cinder/keymgr/barbican.py:152 msgid "Error creating key." msgstr "" #: cinder/keymgr/barbican.py:208 msgid "Error storing key." msgstr "" #: cinder/keymgr/barbican.py:236 msgid "Error copying key." msgstr "" #: cinder/keymgr/barbican.py:273 msgid "Error getting secret data." msgstr "" #: cinder/keymgr/barbican.py:292 msgid "Error getting secret metadata." msgstr "" #: cinder/keymgr/barbican.py:321 msgid "Error getting key." msgstr "" #: cinder/keymgr/barbican.py:338 msgid "Error deleting key." msgstr "" #: cinder/scheduler/filter_scheduler.py:214 #, python-format msgid "" "Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " "%(exc)s" msgstr "" #: cinder/scheduler/manager.py:110 #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "" #: cinder/scheduler/manager.py:117 #, python-format msgid "Failed to create consistency group %(group_id)s." msgstr "" #: cinder/scheduler/manager.py:297 #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" #: cinder/scheduler/scheduler_options.py:69 #, python-format msgid "Could not stat scheduler options file %(filename)s." msgstr "" #: cinder/scheduler/scheduler_options.py:79 msgid "Could not decode scheduler options." msgstr "" #: cinder/scheduler/filters/capacity_filter.py:46 msgid "Free capacity not set: volume node info collection broken." msgstr "" #: cinder/scheduler/flows/create_volume.py:98 #, python-format msgid "Failed to run task %(name)s: %(cause)s" msgstr "" #: cinder/scheduler/flows/create_volume.py:115 #, python-format msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" #: cinder/transfer/api.py:72 msgid "Volume in unexpected state" msgstr "" #: cinder/transfer/api.py:137 #, python-format msgid "Failed to create transfer record for %s" msgstr "" #: cinder/transfer/api.py:220 #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "" #: cinder/volume/api.py:357 msgid "Failed to update quota while deleting volume." msgstr "" #: cinder/volume/api.py:1225 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" #: cinder/volume/api.py:1531 cinder/volume/api.py:1583 #, python-format msgid "Unable to find service: %(service)s for given host: %(host)s." msgstr "" #: cinder/volume/api.py:1535 msgid "Unable to manage_existing volume on a disabled service." msgstr "" #: cinder/volume/api.py:1587 msgid "Unable to manage_existing snapshot on a disabled service." msgstr "" #: cinder/volume/driver.py:367 cinder/volume/drivers/hitachi/hnas_nfs.py:379 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:279 #, python-format msgid "Recovering from a failed execute. Try number %s" msgstr "" #: cinder/volume/driver.py:401 #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" #: cinder/volume/driver.py:423 msgid "Detaching snapshot from a remote node is not supported." msgstr "" #: cinder/volume/driver.py:444 #, python-format msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." msgstr "" #: cinder/volume/driver.py:881 #, python-format msgid "" "Failed updating model of volume %(volume_id)s with driver provided model " "%(model)s" msgstr "" #: cinder/volume/driver.py:920 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2347 #, python-format msgid "Error detaching volume %s" msgstr "" #: cinder/volume/driver.py:936 msgid "Attaching snapshot from a remote node is not supported." msgstr "" #: cinder/volume/driver.py:957 #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with driver provided " "model %(model)s." msgstr "" #: cinder/volume/driver.py:1006 #, python-format msgid "Could not validate device %s" msgstr "" #: cinder/volume/driver.py:2342 cinder/volume/targets/iscsi.py:167 #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "" #: cinder/volume/driver.py:2531 #, python-format msgid "The volume driver requires %(data)s in the connector." msgstr "" #: cinder/volume/driver.py:2803 #, python-format msgid "" "FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure" " HBA state is Online." msgstr "" #: cinder/volume/manager.py:295 #, python-format msgid "Invalid JSON: %s" msgstr "" #: cinder/volume/manager.py:332 msgid "Fetch volume pool name failed." msgstr "" #: cinder/volume/manager.py:428 msgid "Failed to initialize driver." msgstr "" #: cinder/volume/manager.py:456 msgid "Failed to re-export volume, setting to ERROR." msgstr "" #: cinder/volume/manager.py:486 msgid "Error during re-export on driver init." msgstr "" #: cinder/volume/manager.py:532 msgid "Service not found for updating replication_status." msgstr "" #: cinder/volume/manager.py:747 msgid "Unable to delete busy volume." msgstr "" #: cinder/volume/manager.py:776 msgid "Failed to update usages deleting volume." msgstr "" #: cinder/volume/manager.py:817 msgid "" "Unable to delete the destination volume during volume migration, (NOTE: " "database record needs to be deleted)." msgstr "" #: cinder/volume/manager.py:862 #, python-format msgid "" "Failed updating snapshot metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" #: cinder/volume/manager.py:905 msgid "Delete snapshot failed, due to snapshot busy." msgstr "" #: cinder/volume/manager.py:933 msgid "Update snapshot usages failed." msgstr "" #: cinder/volume/manager.py:1121 msgid "Detach volume failed, due to uninitialized driver." msgstr "" #: cinder/volume/manager.py:1125 msgid "Detach volume failed, due to remove-export failure." msgstr "" #: cinder/volume/manager.py:1193 #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " "(Exception: %(except)s)" msgstr "" #: cinder/volume/manager.py:1218 #, python-format msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." msgstr "" #: cinder/volume/manager.py:1225 #, python-format msgid "Could not delete the image volume %(id)s." msgstr "" #: cinder/volume/manager.py:1255 #, python-format msgid "Failed to register image volume location %(uri)s." msgstr "" #: cinder/volume/manager.py:1265 #, python-format msgid "Could not delete failed image volume %(id)s." msgstr "" #: cinder/volume/manager.py:1310 #, python-format msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." msgstr "" #: cinder/volume/manager.py:1362 #, python-format msgid "" "Failed to get driver initiator data for initiator %(initiator)s and " "namespace %(namespace)s" msgstr "" #: cinder/volume/manager.py:1379 #, python-format msgid "" "Failed to update initiator data for initiator %(initiator)s and backend " "%(backend)s" msgstr "" #: cinder/volume/manager.py:1453 msgid "Model update failed." msgstr "" #: cinder/volume/manager.py:1583 msgid "Update volume model for transfer operation failed." msgstr "" #: cinder/volume/manager.py:1631 #, python-format msgid "Failed to attach volume %(vol)s." msgstr "" #: cinder/volume/manager.py:1657 #, python-format msgid "Unable to terminate volume connection: %(err)s." msgstr "" #: cinder/volume/manager.py:1678 msgid "Failed to attach source volume for copy." msgstr "" #: cinder/volume/manager.py:1701 #, python-format msgid "Failed to copy volume %(src)s to %(dest)s." msgstr "" #: cinder/volume/manager.py:1786 #, python-format msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" #: cinder/volume/manager.py:1878 #, python-format msgid "Detach migration source volume failed: %(err)s" msgstr "" #: cinder/volume/manager.py:1913 #, python-format msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" msgstr "" #: cinder/volume/manager.py:2127 msgid "Extend volume failed." msgstr "" #: cinder/volume/manager.py:2269 #, python-format msgid "" "Volume %s: driver error when trying to retype, falling back to generic " "mechanism." msgstr "" #: cinder/volume/manager.py:2377 msgid "Promote volume replica failed." msgstr "" #: cinder/volume/manager.py:2410 msgid "Sync volume replica failed." msgstr "" #: cinder/volume/manager.py:2446 msgid "Get replication status for volume failed." msgstr "" #: cinder/volume/manager.py:2481 #, python-format msgid "Consistency group %s: create failed" msgstr "" #: cinder/volume/manager.py:2516 #, python-format msgid "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." msgstr "" #: cinder/volume/manager.py:2544 #, python-format msgid "" "Create consistency group from source cg-%(cg)s failed: " "ConsistencyGroupNotFound." msgstr "" #: cinder/volume/manager.py:2603 #, python-format msgid "Create consistency group from source %(source)s failed." msgstr "" #: cinder/volume/manager.py:2647 #, python-format msgid "Source snapshot cannot be found for target volume %(volume_id)s." msgstr "" #: cinder/volume/manager.py:2670 #, python-format msgid "Source volumes cannot be found for target volume %(volume_id)s." msgstr "" #: cinder/volume/manager.py:2691 #, python-format msgid "Source snapshot %(snapshot_id)s cannot be found." msgstr "" #: cinder/volume/manager.py:2700 #, python-format msgid "The source volume %(volume_id)s cannot be found." msgstr "" #: cinder/volume/manager.py:2710 #, python-format msgid "" "Failed to update %(volume_id)s metadata using the provided snapshot " "%(snapshot_id)s metadata." msgstr "" #: cinder/volume/manager.py:2818 cinder/volume/manager.py:2837 msgid "Delete consistency group failed to update usages." msgstr "" #: cinder/volume/manager.py:2886 #, python-format msgid "" "Update consistency group failed to add volume-%(volume_id)s: " "VolumeNotFound." msgstr "" #: cinder/volume/manager.py:2915 #, python-format msgid "" "Update consistency group failed to remove volume-%(volume_id)s: " "VolumeNotFound." msgstr "" #: cinder/volume/manager.py:2965 #, python-format msgid "" "Error occurred in the volume driver when updating consistency group " "%(group_id)s." msgstr "" #: cinder/volume/manager.py:2978 #, python-format msgid "Error occurred when updating consistency group %(group_id)s." msgstr "" #: cinder/volume/manager.py:3085 #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" #: cinder/volume/manager.py:3199 msgid "Failed to update usages deleting snapshot" msgstr "" #: cinder/volume/manager.py:3302 msgid "Failed to perform replication failover" msgstr "" #: cinder/volume/manager.py:3308 msgid "Invalid replication target specified for failover" msgstr "" #: cinder/volume/manager.py:3323 msgid "Driver reported error during replication failover." msgstr "" #: cinder/volume/manager.py:3330 #, python-format msgid "" "Error encountered during failover on host: %(host)s invalid target ID " "%(backend_id)" msgstr "" #: cinder/volume/manager.py:3418 msgid "" "Error encountered on Cinder backend during thaw operation, service will " "remain frozen." msgstr "" #: cinder/volume/manager.py:3444 #, python-format msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." msgstr "" #: cinder/volume/qos_specs.py:87 cinder/volume/qos_specs.py:108 #: cinder/volume/qos_specs.py:158 cinder/volume/qos_specs.py:200 #: cinder/volume/qos_specs.py:214 cinder/volume/qos_specs.py:228 #: cinder/volume/volume_types.py:56 cinder/volume/volume_types.py:83 msgid "DB error:" msgstr "" #: cinder/volume/throttling.py:72 #, python-format msgid "Failed to create blkio cgroup '%(name)s'." msgstr "" #: cinder/volume/throttling.py:80 #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "" #: cinder/volume/utils.py:375 #, python-format msgid "Failed to open volume from %(path)s." msgstr "" #: cinder/volume/volume_types.py:148 msgid "Default volume type is not found. Please check default_volume_type config:" msgstr "" #: cinder/volume/drivers/datera.py:118 msgid "" "Logging into the Datera cluster failed. Please check your username and " "password set in the cinder.conf and start the cinder-volume service " "again." msgstr "" #: cinder/volume/drivers/datera.py:156 msgid "" "Creation request failed. Please verify the extra-specs set for your " "volume types are entered correctly." msgstr "" #: cinder/volume/drivers/datera.py:367 msgid "Failed to get updated stats from Datera cluster." msgstr "" #: cinder/volume/drivers/datera.py:377 msgid "Failed to get updated stats from Datera Cluster." msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:1006 msgid "DRBDmanage: too many assignments returned." msgstr "" #: cinder/volume/drivers/eqlx.py:227 #, python-format msgid "%s" msgstr "" #: cinder/volume/drivers/eqlx.py:265 msgid "Error running command." msgstr "" #: cinder/volume/drivers/eqlx.py:275 #, python-format msgid "Error running SSH command: \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:412 msgid "Failed to setup the Dell EqualLogic driver." msgstr "" #: cinder/volume/drivers/eqlx.py:429 #, python-format msgid "Failed to create volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:439 #, python-format msgid "Failed to add multihost-access for volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:454 #, python-format msgid "Failed to delete volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:470 #, python-format msgid "Failed to create snapshot of volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:484 #, python-format msgid "Failed to create volume from snapshot \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:497 #, python-format msgid "Failed to create clone of volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:507 #, python-format msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." msgstr "" #: cinder/volume/drivers/eqlx.py:528 #, python-format msgid "Failed to initialize connection to volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:543 #, python-format msgid "Failed to terminate connection to volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:569 #, python-format msgid "Failed to ensure export of volume \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:588 #, python-format msgid "" "Failed to extend_volume %(name)s from %(current_size)sGB to " "%(new_size)sGB." msgstr "" #: cinder/volume/drivers/glusterfs.py:124 #, python-format msgid "Failed to umount %(share)s, reason=%(stderr)s" msgstr "" #: cinder/volume/drivers/glusterfs.py:372 cinder/volume/drivers/remotefs.py:270 #, python-format msgid "Exception during mounting %s" msgstr "" #: cinder/volume/drivers/glusterfs.py:431 #, python-format msgid "Mount failure for %(share)s." msgstr "" #: cinder/volume/drivers/glusterfs.py:458 #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " "%(backing_file)s!" msgstr "" #: cinder/volume/drivers/hgst.py:109 #, python-format msgid "" "CLI fail: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgstr "" #: cinder/volume/drivers/hgst.py:253 msgid "VGC-CLUSTER command blocked and cancelled." msgstr "" #: cinder/volume/drivers/lvm.py:372 cinder/volume/drivers/nfs.py:408 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:638 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2844 #, python-format msgid "Unable to rename the logical volume for volume: %s" msgstr "" #: cinder/volume/drivers/lvm.py:417 #, python-format msgid "Unable to delete due to existing snapshot for volume: %s" msgstr "" #: cinder/volume/drivers/lvm.py:691 #, python-format msgid "Destination Volume Group %s does not exist" msgstr "" #: cinder/volume/drivers/lvm.py:723 #, python-format msgid "Volume migration failed due to exception: %(reason)s." msgstr "" #: cinder/volume/drivers/nfs.py:155 cinder/volume/drivers/nexenta/nfs.py:695 #, python-format msgid "Mount failure for %(share)s after %(count)d attempts." msgstr "" #: cinder/volume/drivers/nimble.py:182 msgid "" "Failed to create SOAP client.Check san_ip, username, password and make " "sure the array version is compatible" msgstr "" #: cinder/volume/drivers/nimble.py:539 #, python-format msgid "Re-throwing Exception %s" msgstr "" #: cinder/volume/drivers/pure.py:433 msgid "Unable to disconnect host from volume, could not determine Purity host" msgstr "" #: cinder/volume/drivers/pure.py:456 #, python-format msgid "Disconnection failed with message: %(msg)s." msgstr "" #: cinder/volume/drivers/pure.py:1128 #, python-format msgid "Disable replication on volume failed with message: %s" msgstr "" #: cinder/volume/drivers/pure.py:1451 #, python-format msgid "Error finding replicated pg snapshot on %(secondary)s." msgstr "" #: cinder/volume/drivers/pure.py:1598 #, python-format msgid "" "Purity host %(host_name)s is not managed by Cinder and can't have CHAP " "credentials modified. Remove IQN %(iqn)s from the host to resolve this " "issue." msgstr "" #: cinder/volume/drivers/pure.py:1608 #, python-format msgid "" "Purity host %(host_name)s is managed by Cinder but CHAP credentials could" " not be retrieved from the Cinder database." msgstr "" #: cinder/volume/drivers/rbd.py:224 #, python-format msgid "error opening rbd image %s" msgstr "" #: cinder/volume/drivers/rbd.py:415 msgid "error refreshing volume stats" msgstr "" #: cinder/volume/drivers/rbd.py:1088 #, python-format msgid "Unable to rename the logical volume for volume %s." msgstr "" #: cinder/volume/drivers/remotefs.py:482 #, python-format msgid "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" #: cinder/volume/drivers/remotefs.py:616 #, python-format msgid "Failed to created Cinder secure environment indicator file: %s" msgstr "" #: cinder/volume/drivers/remotefs.py:1278 msgid "Call to Nova to create snapshot failed" msgstr "" #: cinder/volume/drivers/remotefs.py:1370 msgid "Call to Nova delete snapshot failed" msgstr "" #: cinder/volume/drivers/sheepdog.py:91 #, python-format msgid "Sheepdog is not installed. OSError: command is %s." msgstr "" #: cinder/volume/drivers/sheepdog.py:94 #, python-format msgid "OSError: command is %s." msgstr "" #: cinder/volume/drivers/sheepdog.py:127 #, python-format msgid "Qemu-img is not installed. OSError: command is %(cmd)s." msgstr "" #: cinder/volume/drivers/sheepdog.py:130 #, python-format msgid "OSError: command is %(cmd)s." msgstr "" #: cinder/volume/drivers/sheepdog.py:145 #, python-format msgid "Failed to check cluster status.(command: %s)" msgstr "" #: cinder/volume/drivers/sheepdog.py:169 #, python-format msgid "Volume already exists. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:171 #, python-format msgid "Failed to create volume. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:193 #, python-format msgid "Failed to delete volume. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:204 #, python-format msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:210 #, python-format msgid "Snapshot \"%s\" already exists." msgstr "" #: cinder/volume/drivers/sheepdog.py:212 #, python-format msgid "Failed to create snapshot. (command: %s)" msgstr "" #: cinder/volume/drivers/sheepdog.py:239 #, python-format msgid "Failed to delete snapshot. (command: %s)" msgstr "" #: cinder/volume/drivers/sheepdog.py:254 #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" #: cinder/volume/drivers/sheepdog.py:258 #, python-format msgid "" "Clone volume \"%s\" already exists. Please check the results of \"dog vdi" " list\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:262 #, python-format msgid "Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:266 #, python-format msgid "Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:270 #, python-format msgid "Volume size \"%sG\" is too large." msgstr "" #: cinder/volume/drivers/sheepdog.py:272 #, python-format msgid "Failed to clone volume.(command: %s)" msgstr "" #: cinder/volume/drivers/sheepdog.py:283 #, python-format msgid "Failed to resize vdi. vdi not found. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:286 #, python-format msgid "" "Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " "size: %(size)s" msgstr "" #: cinder/volume/drivers/sheepdog.py:291 #, python-format msgid "" "Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " "%(size)s" msgstr "" #: cinder/volume/drivers/sheepdog.py:296 #, python-format msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" msgstr "" #: cinder/volume/drivers/sheepdog.py:305 #, python-format msgid "Failed to get volume status. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:491 #, python-format msgid "Failed to create cloned volume %s." msgstr "" #: cinder/volume/drivers/solidfire.py:432 #, python-format msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" #: cinder/volume/drivers/solidfire.py:627 #, python-format msgid "Volume %s, not found on SF Cluster." msgstr "" #: cinder/volume/drivers/solidfire.py:630 #, python-format msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" #: cinder/volume/drivers/solidfire.py:712 #, python-format msgid "Failed image conversion during cache creation: %s" msgstr "" #: cinder/volume/drivers/solidfire.py:1107 #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting delete_volume operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1110 msgid "This usually means the volume was never successfully created." msgstr "" #: cinder/volume/drivers/solidfire.py:1127 #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "delete_volume operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1157 #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting create_snapshot operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1369 #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "extend_volume operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1436 #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "attach_volume operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1462 #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "detach_volume operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1484 #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "accept_transfer operation!" msgstr "" #: cinder/volume/drivers/solidfire.py:1605 #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting unmanage operation!" msgstr "" #: cinder/volume/drivers/tintri.py:159 #, python-format msgid "Configuration value %s is not set." msgstr "" #: cinder/volume/drivers/tintri.py:179 cinder/volume/drivers/tintri.py:353 #: cinder/volume/drivers/netapp/dataontap/block_base.py:323 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:193 #, python-format msgid "Resizing %s failed. Cleaning volume." msgstr "" #: cinder/volume/drivers/tintri.py:267 #, python-format msgid "Unexpected exception during cache cleanup of snapshot %s" msgstr "" #: cinder/volume/drivers/tintri.py:807 msgid "Exception during mounting." msgstr "" #: cinder/volume/drivers/xio.py:94 msgid "san ip must be configured!" msgstr "" #: cinder/volume/drivers/xio.py:98 msgid "san_login must be configured!" msgstr "" #: cinder/volume/drivers/xio.py:102 msgid "san_password must be configured!" msgstr "" #: cinder/volume/drivers/xio.py:119 #, python-format msgid "Array query failed - No response (%d)!" msgstr "" #: cinder/volume/drivers/xio.py:134 msgid "Array query failed. No capabilities in response!" msgstr "" #: cinder/volume/drivers/xio.py:152 msgid "ISE FW version is not compatible with OpenStack!" msgstr "" #: cinder/volume/drivers/xio.py:159 msgid "Array query failed. No global id in XML response!" msgstr "" #: cinder/volume/drivers/xio.py:164 msgid "Array query failed. No controllers in response!" msgstr "" #: cinder/volume/drivers/xio.py:203 msgid "ISE globalid not set!" msgstr "" #: cinder/volume/drivers/xio.py:214 msgid "Primary IP must be set!" msgstr "" #: cinder/volume/drivers/xio.py:410 #, python-format msgid "Connection to %s failed and no secondary!" msgstr "" #: cinder/volume/drivers/xio.py:421 #, python-format msgid "Could not connect to %(primary)s or %(secondary)s!" msgstr "" #: cinder/volume/drivers/xio.py:494 cinder/volume/drivers/xio.py:532 #, python-format msgid "Controller GET failed (%d)" msgstr "" #: cinder/volume/drivers/xio.py:519 msgid "Failed to get IQN!" msgstr "" #: cinder/volume/drivers/xio.py:559 #, python-format msgid "Failed to get allocation information (%d)!" msgstr "" #: cinder/volume/drivers/xio.py:570 msgid "Failed to get LUN information!" msgstr "" #: cinder/volume/drivers/xio.py:628 #, python-format msgid "Could not GET allocation information (%d)!" msgstr "" #: cinder/volume/drivers/xio.py:693 #, python-format msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "" #: cinder/volume/drivers/xio.py:719 #, python-format msgid "Failed to get allocation information: %(host)s (%(status)d)!" msgstr "" #: cinder/volume/drivers/xio.py:753 #, python-format msgid "Could not find any hosts (%s)" msgstr "" #: cinder/volume/drivers/xio.py:810 #, python-format msgid "POST for host create failed (%s)!" msgstr "" #: cinder/volume/drivers/xio.py:837 #, python-format msgid "Source volume %s not ready!" msgstr "" #: cinder/volume/drivers/xio.py:863 #, python-format msgid "Prepare clone failed for %s." msgstr "" #: cinder/volume/drivers/xio.py:875 #, python-format msgid "Clone %s not in prepared state!" msgstr "" #: cinder/volume/drivers/xio.py:881 #, python-format msgid "Commit clone failed: %(name)s (%(status)d)!" msgstr "" #: cinder/volume/drivers/xio.py:894 #, python-format msgid "Commit failed for %s!" msgstr "" #: cinder/volume/drivers/xio.py:1147 #, python-format msgid "Failed to create volume: %(name)s (%(status)s)" msgstr "" #: cinder/volume/drivers/xio.py:1162 #, python-format msgid "Failed to create volume %s." msgstr "" #: cinder/volume/drivers/xio.py:1221 #, python-format msgid "Timed out deleting %s!" msgstr "" #: cinder/volume/drivers/xio.py:1240 #, python-format msgid "modify volume: %s does not exist!" msgstr "" #: cinder/volume/drivers/xio.py:1249 #, python-format msgid "Modify volume PUT failed: %(name)s (%(status)d)." msgstr "" #: cinder/volume/drivers/xio.py:1277 msgid "manage_existing: No source-name in ref!" msgstr "" #: cinder/volume/drivers/xio.py:1296 msgid "manage_existing_get_size: No source-name in ref!" msgstr "" #: cinder/volume/drivers/xio.py:1302 #, python-format msgid "manage_existing_get_size: %s does not exist!" msgstr "" #: cinder/volume/drivers/xio.py:1312 #, python-format msgid "unmanage: Volume %s does not exist!" msgstr "" #: cinder/volume/drivers/xio.py:1331 msgid "Host could not be found!" msgstr "" #: cinder/volume/drivers/xio.py:1339 #, python-format msgid "Host PUT failed (%s)." msgstr "" #: cinder/volume/drivers/xio.py:1396 msgid "iscsi_ip_address must be set!" msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:304 #, python-format msgid "" "CloudByte operation [%(operation)s] failed for volume [%(vol)s]. " "Exhausted all [%(max)s] attempts." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:630 #, python-format msgid "TSM [%s] not found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:932 #, python-format msgid "CloudByte does not have a volume corresponding to OpenStack volume [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:936 #, python-format msgid "CloudByte volume information not available for OpenStack volume [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1124 #, python-format msgid "CloudByte snapshot information is not available for OpenStack volume [%s]." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:379 #, python-format msgid "Error invalid json: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:382 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:402 #, python-format msgid "Error TypeError. %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:384 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:404 #, python-format msgid "Error JSONDecodeError. %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:399 #, python-format msgid "Invalid API object: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:431 msgid "_check_version_fail: Parsing error." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:476 #, python-format msgid "Unrecognized Login Response: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:502 #, python-format msgid "Failed to find %(s)s. Result %(r)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:574 #, python-format msgid "Unable to create folder path %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:881 msgid "ScVolume returned success with empty payload. Attempting to locate volume" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:887 #, python-format msgid "Unable to create volume on SC: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1055 #, python-format msgid "AddHba error: %(wwn)s to %(srvname)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1159 msgid "Error adding HBA to server" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1234 #, python-format msgid "Error getting FaultDomainList for %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1255 msgid "Unable to find FC initiators" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1288 msgid "_find_mappings: volume is not active" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1307 #, python-format msgid "Unable to find mapping profiles: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1325 #, python-format msgid "Unable to find controller port: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1384 msgid "Volume appears unmapped" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1412 #, python-format msgid "Unable to retrieve VolumeConfiguration: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1460 #, python-format msgid "Unable to find controller port iscsi configuration: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1644 #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1682 #, python-format msgid "Unable to unmap Volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1777 #, python-format msgid "Invalid ReplayList return: %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1801 #, python-format msgid "Error managing replay %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1819 #, python-format msgid "Error unmanaging replay %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1878 #, python-format msgid "Unable to create volume %s from replay" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1906 msgid "Error: unable to snap replay" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1932 #, python-format msgid "Error expanding volume %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1952 #, python-format msgid "Error renaming volume %(original)s to %(name)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1974 msgid "User does not have permission to change Storage Profile selection." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1981 #, python-format msgid "Storage Profile %s was not found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1988 msgid "Default Storage Profile was not found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2002 #, python-format msgid "Error changing Storage Profile for volume %(original)s to %(name)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2064 #, python-format msgid "Multiple replay profiles under name %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2110 #, python-format msgid "Unable to delete profile %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2181 #, python-format msgid "Failed to add %s to cg." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2199 #, python-format msgid "Failed to remove %s from cg." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2318 #, python-format msgid "Unable to locate snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2525 #, python-format msgid "Unable to find or create QoS Node named %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2596 #, python-format msgid "Unable to delete replication for %(vol)s to %(dest)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2621 #, python-format msgid "Unable to find disk folder %(name)s on %(ssn)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2671 #, python-format msgid "Unable to replicate %(volname)s to %(destsc)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:251 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:410 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:471 #, python-format msgid "Failed to create volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:320 #, python-format msgid "Failed to delete volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:525 #, python-format msgid "Failed to ensure export of volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:714 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:752 #, python-format msgid "Cannot find Consistency Group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:750 #, python-format msgid "Failed to snap Consistency Group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:927 #, python-format msgid "Retype unable to find volume %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:939 msgid "Failed to update storage profile" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:950 msgid "Failed to update replay profiles" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:978 msgid "Failed to apply replication:activereplay setting" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:108 msgid "Lun mapping returned null!" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:112 msgid "Failed to initialize connection." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:157 msgid "Failed to terminate connection" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_iscsi.py:133 msgid "Failed to initialize connection" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_iscsi.py:166 #, python-format msgid "Failed to terminate connection %(initiator)s %(vol)s" msgstr "" #: cinder/volume/drivers/dothill/dothill_client.py:273 #, python-format msgid "Error in copying volume: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:179 #, python-format msgid "Creation of volume %s failed." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:204 msgid "Volume must be detached for clone operation." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:226 #, python-format msgid "Cloning of volume %s failed." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:246 #, python-format msgid "Create volume failed from snapshot: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:266 #, python-format msgid "Deletion of volume %s failed." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:328 #, python-format msgid "Error mapping volume: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:344 #, python-format msgid "Error unmapping volume: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:353 msgid "Error getting active FC target ports." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:360 msgid "Error getting active ISCSI target iqns." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:367 msgid "Error getting active ISCSI target portals." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:384 #, python-format msgid "Creation of snapshot failed for volume: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:401 #, python-format msgid "Deleting snapshot %s failed" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:423 #, python-format msgid "Extension of volume %s failed." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:432 msgid "Error getting chap record." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:439 msgid "Error creating chap record." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:484 #: cinder/volume/drivers/zfssa/zfssaiscsi.py:942 #, python-format msgid "Error migrating volume: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:507 msgid "Error manage existing volume." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:525 msgid "Error manage existing get volume size." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:451 #, python-format msgid "Error Attaching volume %(vol)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:718 #, python-format msgid "Volume %(name)s not found on the array. No volume to migrate using retype." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:852 #, python-format msgid "" "Failed to return volume %(volumeName)s to original storage pool. Please " "contact your system administrator to return it to the correct location." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:941 #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:953 #, python-format msgid "" "Failed to verify that volume was added to storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:995 #, python-format msgid "Error finding target pool instance name for pool: %(targetPoolName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1007 #, python-format msgid "Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1022 #, python-format msgid "" "Volume : %(volumeName)s. was not successfully migrated to target pool " "%(targetPoolName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1102 #, python-format msgid "" "Failed to add %(volumeName)s to default storage group for fast policy " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1128 msgid "Error getting array, pool, SLO and workload." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1141 msgid "Error parsing array, pool, SLO and workload." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1144 #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s skipping storage-assisted migration." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1153 #, python-format msgid "" "Only SLO/workload migration within the same SRP Pool is supported in this" " version The source pool : %(sourcePoolName)s does not match the target " "array: %(targetPoolName)s. Skipping storage-assisted migration." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1181 #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of " "slo/workload combination: %(targetCombination)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1208 msgid "Error getting target pool name and array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1220 msgid "Error parsing target pool name, array, and fast policy." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1224 #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s, skipping storage-assisted migration." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1239 #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of pool: " "%(pool)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1249 msgid "Only available volumes can be migrated between different protocols." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1591 #, python-format msgid "Target end points do not exist for hardware Id: %(hardwareIdInstance)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2003 #, python-format msgid "Unable to determine whether %(volumeName)s is composite or not." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2178 #, python-format msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2365 #, python-format msgid "Snapshot %(snapshotname)s not found on the array. No volume to delete." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3043 #: cinder/volume/drivers/emc/emc_vmax_common.py:3166 #, python-format msgid "" "Volume %(name)s is not suitable for storage assisted migration using " "retype." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3108 #, python-format msgid "" "Volume : %(volumeName)s has not been removed from source storage group " "%(storageGroup)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3122 #, python-format msgid "Failed to get or create storage group %(storageGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3135 #, python-format msgid "" "Volume : %(volumeName)s has not been added to target storage group " "%(storageGroup)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3407 #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage " "group for fast policy %(fastPolicyName)s. Please contact your sysadmin to" " get the volume returned to the default storage group." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4207 #, python-format msgid "Exception: %(ex)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4226 #, python-format msgid "Volume %(name)s not found on the array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:53 msgid "Cannot determine whether Tiering Policy is supported on this array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:57 msgid "Tiering Policy is not supported on this array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:90 msgid "Cannot determine if Tiering Policies are supported." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:116 msgid "FAST is not supported on this array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:130 #, python-format msgid "Unable to find default storage group for FAST policy : %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:180 #, python-format msgid "Unable to get default storage group %(defaultSgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:217 #, python-format msgid "Failed to create a first volume for storage group : %(storageGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:228 #, python-format msgid "" "Failed to create default storage group for FAST policy : " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:243 #, python-format msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:283 msgid "Unable to get associated pool of volume." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:506 #, python-format msgid "Cannot find the fast policy %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:524 #, python-format msgid "" "Failed to add storage group %(storageGroupInstanceName)s to tier policy " "rule %(tierPolicyRuleInstanceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:591 #, python-format msgid "Error disassociating storage group from policy: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:667 #, python-format msgid "Exception: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:202 msgid "The list of iscsi_ip_addresses is empty" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:124 #, python-format msgid "" "Masking View creation or retrieval was not successful for masking view " "%(maskingViewName)s. Attempting rollback." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:720 #, python-format msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:777 #, python-format msgid "" "Cannot add and verify tier policy association for storage group : " "%(storageGroupName)s to FAST policy : %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:785 #, python-format msgid "Cannot get storage Group from job : %(storageGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:820 #, python-format msgid "" "Could not find port group : %(portGroupName)s. Check that the EMC " "configuration file has the correct port group name." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1196 #, python-format msgid "" "Cannot find a portGroup with name %(pgGroupName)s. The port group for a " "masking view must be pre-defined." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1225 #, python-format msgid "Cannot create or find an initiator group with name %(igGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1253 #, python-format msgid "" "Cannot find the new masking view just created with name " "%(maskingViewName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1309 #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage " "group for fast policy %(fastPolicyName)s: Please contact your sys admin " "to get the volume re-added manually." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1443 #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1475 #, python-format msgid "" "One of the components of the original masking view %(maskingViewName)s " "cannot be retrieved so please contact your system administrator to check " "that the correct initiator(s) are part of masking." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1705 #, python-format msgid "" "Failed to remove %(volumeName)s from the default storage group for the " "FAST Policy." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2649 #, python-format msgid "Cannot get port group from masking view: %(maskingViewName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2660 msgid "Cannot get port group name." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:379 #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:324 #, python-format msgid "_wait_for_job_complete failed after %(retries)d tries." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:425 #, python-format msgid "_wait_for_sync failed after %(retries)d tries." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:494 msgid "Cannot get storage system." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:968 #, python-format msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1179 msgid "Error parsing array from host capabilities." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1510 #, python-format msgid "" "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, " "Platinum, Diamond, Optimized, NONE." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1515 #, python-format msgid "" "Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP," " OLTP_REP, NONE." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1638 #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2101 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1851 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1865 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1868 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1871 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1874 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2037 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2069 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2072 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2130 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2140 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2143 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2146 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2223 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2233 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2236 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2239 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2277 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2280 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2497 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2500 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2503 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2506 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2519 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2528 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2575 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2579 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1031 #, python-format msgid "Exception: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1888 msgid "" "Please check your xml for format or syntax errors. Please see " "documentation for more details." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1929 #, python-format msgid "Array Serial Number must be in the file %(fileName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1935 #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:792 #, python-format msgid "Error on enable compression on lun %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:803 #, python-format msgid "Error on adding lun to consistency group. %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1042 #, python-format msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1055 #, python-format msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1068 #, python-format msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1374 msgid "Failed to query migration status of LUN." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1390 #, python-format msgid "Migration of LUN %s failed to complete." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1666 #, python-format msgid "Invalid value for %(key)s, value is %(value)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1705 #, python-format msgid "Error happened during storage pool querying, %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2418 msgid "The given extra_spec or valid_values is None." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3162 #, python-format msgid "Create consistency group %s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3180 #, python-format msgid "Delete consistency group %s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3246 #, python-format msgid "Create cg snapshot %s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3271 #, python-format msgid "Delete cgsnapshot %s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3685 #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4010 #, python-format msgid "Failed to failover volume %(volume_id)s to %(target)s: %(error)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1129 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2994 #, python-format msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1174 #, python-format msgid "" "Failed to delete the snapshot %(snap)s of cgsnapshot: %(cgsnapshot_id)s. " "Exception: %(exception)s." msgstr "" #: cinder/volume/drivers/emc/xtremio.py:161 #, python-format msgid "can't create 2 volumes with the same name, %s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:166 #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:337 msgid "Failed to rename the created snapshot, reverting." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:43 msgid "import pywbem failed!! pywbem is necessary for this volume driver." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2090 #, python-format msgid "_wait_for_job_complete, failed after %(retries)d tries." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:171 #, python-format msgid "MSGID%(id)04d-E: %(msg)s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:727 #, python-format msgid "Failed to update volume status: %s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:229 msgid "Failed to shutdown horcm." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:297 msgid "horcm command timeout." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:304 msgid "Failed to authenticate user." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:313 msgid "Failed to start horcm." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:317 msgid "Unexpected error occurs in horcm." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_iscsi.py:182 #, python-format msgid "Failed to add target(port: %s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_snm2.py:76 msgid "snm2 command timeout." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_snm2.py:90 msgid "Unexpected error occurs in snm2." msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:327 #, python-format msgid "Error getting iSCSI target info from EVS %(evs)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:251 #, python-format msgid "No configuration found for service: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:316 msgid "No more targets avaliable." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:456 #, python-format msgid "HDP not found: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:517 #, python-format msgid "iSCSI portal not found for service: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:631 msgid "delete_vol: provider location empty." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:808 #, python-format msgid "Array mismatch %(myid)s vs %(arid)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:221 #, python-format msgid "No configuration found for service: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:500 #, python-format msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:601 #, python-format msgid "Invalid hostname %(host)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:794 #, python-format msgid "The NFS Volume %(cr)s does not exist." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:416 #, python-format msgid "" "The primary array must have an API version of %(min_ver)s or higher, but " "is only on %(current_ver)s, therefore replication is not supported." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:570 #, python-format msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:583 #, python-format msgid "There was an error deleting volume %(id)s: %(error)." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:604 #: cinder/volume/drivers/hpe/hpe_3par_common.py:615 #, python-format msgid "Virtual Volume Set %s does not exist." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:683 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:571 #, python-format msgid "There was an error deleting snapshot %(id)s: %(error)." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1030 #, python-format msgid "Error extending volume: %(vol)s. Exception: %(ex)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1556 #, python-format msgid "Error creating QOS rule %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1589 #, python-format msgid "Error setting Flash Cache policy to %s - exception" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2203 #, python-format msgid "Error extending volume %(id)s. Ex: %(ex)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2334 #, python-format msgid "Error attaching volume %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2409 #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because " "%(reason)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2917 msgid "Issuing a fail-over failed because replication is not properly configured." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2964 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1468 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1610 #, python-format msgid "" "There was a problem with the failover (%(error)s) and it was " "unsuccessful. Volume '%(volume)s will not be available on the failed over" " target." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3111 #, python-format msgid "Could not log in to 3PAR array (%s) with the provided credentials." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3136 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1688 msgid "There must be at least one valid replication device configured." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3146 msgid "" "Extra spec replication:mode must be set and must be either 'sync' or " "'periodic'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3154 msgid "" "Extra spec replication:sync_period must be greater than 299 and less than" " 31622401 seconds." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3596 #, python-format msgid "Exception during snapCPG revert: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3668 #, python-format msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3690 #: cinder/volume/drivers/hpe/hpe_3par_common.py:3693 #, python-format msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3701 #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume " "%(volume_name)s. Original volume set/QOS settings may not have been fully" " restored." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3712 #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume " "%(volume_name)s. Failed to remove from new volume set %(new_vvs)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:679 #, python-format msgid "Volume %s doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:429 msgid "Volume did not exist. It will not be deleted" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:472 #, python-format msgid "There was an error deleting volume %(id)s: %(error)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:509 #, python-format msgid "Could not find volume with name %(name)s. Error: %(error)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:535 #, python-format msgid "Could not create snapshot set. Error: '%s'" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:567 msgid "The snapshot cannot be deleted because it is a clone point." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:606 msgid "Snapshot did not exist. It will not be deleted" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:837 #, python-format msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1062 #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because " "%(reason)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1558 #, python-format msgid "Could not log in to LeftHand array (%s) with the provided credentials." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:225 msgid "" "Extra specs must be specified as replication_type=' sync' or ' " "async'." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:229 #, python-format msgid "Extra specs must be specified as capabilities:%s=' True'." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:319 #, python-format msgid "Create hypermetro error: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:330 msgid "Create replication volume error." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:391 #, python-format msgid "Delete hypermetro error: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:404 msgid "Delete replication error." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:448 msgid "Get LUN migration error." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:506 #, python-format msgid "Unable to rename lun %s on array." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:866 msgid "Retype volume error. Delete replication failed." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:883 msgid "Retype volume error." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:896 msgid "Retype volume error. Create replication failed." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1328 msgid "Manage exist volume failed." msgstr "" #: cinder/volume/drivers/huawei/replication.py:492 #, python-format msgid "Create pair failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:502 #, python-format msgid "Start synchronization failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:96 #, python-format msgid "Bad response from server: %(url)s. Error: %(err)s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:106 #, python-format msgid "JSON transfer error: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:124 #, python-format msgid "" "Login error. URL: %(url)s\n" "Reason: %(reason)s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:161 msgid "Can't open the recent url, relogin." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:466 msgid "" "Error occurred when adding hostgroup and lungroup to view. Remove lun " "from lungroup now." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:619 #, python-format msgid "JSON transfer data error. %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1835 msgid "Can not open the recent url, login again." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:231 #, python-format msgid "Failed to copy %(src)s to %(dest)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:666 #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:826 #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " "%(host_name)s found." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:292 #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:383 msgid "The connector does not contain the required information: wwpns is missing" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:249 #, python-format msgid "" "Failed to collect return properties for volume %(vol)s and connector " "%(conn)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:383 msgid "" "The connector does not contain the required information: initiator is " "missing" msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:143 #, python-format msgid "Failed to issue mmgetstate command, error: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:154 #, python-format msgid "GPFS is not active. Detailed output: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:166 #, python-format msgid "Failed to issue df command for path %(path)s, error: %(error)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:181 cinder/volume/drivers/ibm/gpfs.py:267 #, python-format msgid "Failed to issue mmlsconfig command, error: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:191 #, python-format msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:246 #, python-format msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:284 #, python-format msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:768 #, python-format msgid "Failed to issue mmlsfs command, error: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:933 #, python-format msgid "Failed to resize volume %(volume_id)s, error: %(error)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1014 #, python-format msgid "" "Driver-based migration of volume %(vol)s failed. Move from %(src)s to " "%(dst)s failed with error: %(error)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:307 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2059 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2109 #: cinder/volume/drivers/san/san.py:150 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:427 #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:269 #, python-format msgid "Error running SSH command: %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:382 #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:390 msgid "A valid secondary target MUST be specified in order to failover." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:266 msgid "" "storwize_svc_multihostmap_enabled is set to False, not allowing multi " "host mapping." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:273 msgid "Error mapping VDisk-to-host" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:963 msgid "Replication must be specified as ' True' or ' False'." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1220 #, python-format msgid "Failed to create CGSnapshot. Exception: %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1236 #, python-format msgid "" "Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " "%(exception)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1297 #, python-format msgid "Failed to create CG from CGsnapshot. Exception: %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2091 #, python-format msgid "Error has occurred: %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2119 #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2360 #, python-format msgid "" "_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " "operations." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2364 #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " "operation: orig=%(orig)s new=%(new)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2375 #, python-format msgid "" "_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " "copy operations." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2383 #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified " "vdisk copy operation: orig=%(orig)s new=%(new)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2491 msgid "" "Unable to failover to the secondary. Please make sure that the secondary " "back-end is ready." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2655 #, python-format msgid "" "The replication mode of %(type)s has not successfully established " "partnership with the replica Storwize target %(stor)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:99 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:99 msgid "The connector does not contain the required information." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:143 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:149 #, python-format msgid "Did not find expected column name in lsvdisk: %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:198 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:203 #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s.\n" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py:48 #, python-format msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py:147 #, python-format msgid "Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py:222 #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: " "%(result)s" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1541 #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1700 msgid "Cannot detect replica status." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1930 #, python-format msgid "Failed to rename %(new_volume)s into %(volume)s." msgstr "" #: cinder/volume/drivers/netapp/utils.py:286 #, python-format msgid "Cannot get QoS spec for volume %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:211 #, python-format msgid "Exception creating LUN %(name)s in pool %(pool)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:328 #, python-format msgid "Exception cloning volume %(name)s from source volume %(source)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:466 #, python-format msgid "Message: %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:468 #, python-format msgid "Error getting LUN attribute. Exception: %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:618 #, python-format msgid "Failure deleting staged tmp LUN %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:621 #, python-format msgid "Unknown exception in post clone resize LUN %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:623 #, python-format msgid "Exception details: %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:1013 #, python-format msgid "Volume %(vol)s in the consistency group could not be deleted." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:123 #, python-format msgid "Exception creating vol %(name)s on pool %(pool)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:168 #, python-format msgid "" "Exception creating volume %(name)s from source %(source)s on share " "%(share)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:94 #, python-format msgid "Setting QoS for %s failed" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:367 #, python-format msgid "" "Could not do delete of volume %s on filer, falling back to exec of \"rm\"" " command." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:373 #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:403 #, python-format msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:397 #, python-format msgid "" "Could not do delete of snapshot %s on filer, falling back to exec of " "\"rm\" command." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:446 #, python-format msgid "Copy offload workflow unsuccessful. %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:501 #, python-format msgid "Error in workflow copy from cache. %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:108 #, python-format msgid "Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:208 #, python-format msgid "LUN %(path)s geometry failed. Message - %(msg)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py:51 #: cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py:58 msgid "" "Could not get performance base counter name. Performance-based scheduler " "functions may not be available." msgstr "" #: cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py:91 #: cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py:147 #, python-format msgid "Could not get utilization counters from node %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/performance/perf_base.py:86 #, python-format msgid "Could not calculate node utilization for node %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:88 #, python-format msgid "Unexpected error while invoking web service. Error - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:254 #, python-format msgid "Error resolving host %(host)s. Error - %(e)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:593 #: cinder/volume/drivers/netapp/eseries/library.py:631 #, python-format msgid "Error creating volume. Msg - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:611 #, python-format msgid "Error cleaning up failed volume creation. Msg - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:661 #, python-format msgid "Failure restarting snap vol. Error: %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:691 #, python-format msgid "Vol copy job status %s." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:278 #: cinder/volume/drivers/nexenta/nfs.py:420 #: cinder/volume/drivers/nexenta/ns5/nfs.py:326 #, python-format msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:457 #: cinder/volume/drivers/nexenta/nfs.py:308 #, python-format msgid "Error trying to change %(opt)s from %(old)s to %(new)s" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:105 #, python-format msgid "No VIP configured for service %s" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:112 #, python-format msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:121 #, python-format msgid "Error verifying LUN container %(bkt)s" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:133 #, python-format msgid "Error retrieving LUN %(vol)s number" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:159 msgid "Error creating volume" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:168 msgid "Error deleting volume" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:178 msgid "Error extending volume" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:192 msgid "Error cloning volume" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:218 msgid "Error deleting snapshot" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:238 msgid "Error creating cloned volume" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:345 #, python-format msgid "Volume creation failed, deleting created snapshot %s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:59 #, python-format msgid "Failed to get fiber channel info from storage due to %(stat)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:84 #, python-format msgid "Failed to get fiber channel target from storage server due to %(stat)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:102 #, python-format msgid "Failed to get target wwpns from storage due to %(stat)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:120 msgid "Failed to get sns table" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:148 #, python-format msgid "" "Volume %(volumeid)s failed to send assign command, ret: %(status)s " "output: %(output)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:288 #, python-format msgid "Failed to export fiber channel target due to %s" msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:97 #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:109 #, python-format msgid "Connect to Flexvisor error: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:113 #, python-format msgid "Connect to Flexvisor failed: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:137 #, python-format msgid "Failed to send request: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:146 msgid "The Flexvisor service is unavailable." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:160 #, python-format msgid "Failed to get response: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:169 #, python-format msgid "" "%(method)s %(url)s unexpected response status: %(response)s (expects: " "%(expects)s)." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:187 #: cinder/volume/drivers/prophetstor/dplcommon.py:201 #, python-format msgid "Call to json.loads() raised an exception: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:191 #: cinder/volume/drivers/prophetstor/dplcommon.py:205 #, python-format msgid "Read response raised an exception: %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:783 #, python-format msgid "Flexvisor failed to get event %(volume)s (%(status)s)." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1403 #, python-format msgid "Flexvisor failed to get pool list.(Error: %d)" msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1406 #, python-format msgid "Flexvisor failed to get pool list due to %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1458 #, python-format msgid "Failed to get server info due to %(state)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1487 #, python-format msgid "Flexvisor failed to get pool %(id)s info." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:176 #, python-format msgid "Lun create for %s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:200 #, python-format msgid "Lun %s has dependent snapshots, skipping lun deletion." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:205 #, python-format msgid "Lun delete for %s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:242 msgid "LUN extend failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:285 #, python-format msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:318 #, python-format msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:359 #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:402 #, python-format msgid "Create new lun from lun for source %(src)s => destination %(dest)s failed!" msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:220 #: cinder/volume/drivers/violin/v7000_fcp.py:249 msgid "Backend returned err for lun export." msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:253 msgid "LUN unexport failed!" msgstr "" #: cinder/volume/drivers/vmware/datastore.py:70 #, python-format msgid "Storage profile: %s cannot be found in vCenter." msgstr "" #: cinder/volume/drivers/vmware/datastore.py:222 msgid "Error occurred while selecting datastore." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:460 #, python-format msgid "There are no valid hosts available in configured cluster(s): %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:466 #, python-format msgid "There is no valid datastore satisfying requirements: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:722 #, python-format msgid "Error occurred while copying %(src)s to %(dst)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:830 #, python-format msgid "Error occurred while copying image: %(image_id)s to %(path)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:962 #, python-format msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1037 #: cinder/volume/drivers/vmware/vmdk.py:1114 #, python-format msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1325 #, python-format msgid "Error occurred while cloning backing: %s during retype." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1410 #, python-format msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1529 msgid "Error occurred while creating temporary backing." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1779 #, python-format msgid "Version string '%s' is not parseable" msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1803 #, python-format msgid "Not able to configure PBM for vCenter server: %s" msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1400 #, python-format msgid "Virtual disk device of backing: %s not found." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1600 #, python-format msgid "Compute cluster: %s not found." msgstr "" #: cinder/volume/drivers/zfssa/restclient.py:298 #, python-format msgid "REST Not Available: %s" msgstr "" #: cinder/volume/drivers/zfssa/restclient.py:304 #, python-format msgid "Server Busy retry request: %s" msgstr "" #: cinder/volume/drivers/zfssa/restclient.py:310 #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "" #: cinder/volume/drivers/zfssa/restclient.py:325 #, python-format msgid "URLError: %s" msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:112 #, python-format msgid "WebDAV returned with %(code)s error during %(method)s call." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:117 #, python-format msgid "" "WebDAV operation failed with error code: %(code)s reason: %(reason)s " "Retry attempt %(retry)s in progress." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:194 msgid "zfssa_initiator cannot be empty when creating a zfssa_initiator_group." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:329 #, python-format msgid "" "Volume ID %s was not found on the zfssa device while attempting " "delete_volume operation." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:380 #, python-format msgid "Snapshot %s: has clones" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:492 #, python-format msgid "Clone Volume:%(volume)s failed from source volume:%(src_vref)s" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:539 #: cinder/volume/drivers/zfssa/zfssanfs.py:341 #, python-format msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:566 #: cinder/volume/drivers/zfssa/zfssanfs.py:373 #, python-format msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:851 #: cinder/volume/drivers/zfssa/zfssanfs.py:612 #, python-format msgid "" "Location info needed for backend enabled volume migration not in correct " "format: %s. Continuing with generic volume migration." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:857 msgid "" "zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is " "needed for backend enabled volume migration. Continuing with generic " "volume migration." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1038 #: cinder/volume/drivers/zfssa/zfssanfs.py:669 #, python-format msgid "Failed to rename volume %(existing)s to %(new)s. Volume manage failed." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1066 #: cinder/volume/drivers/zfssa/zfssanfs.py:742 #, python-format msgid "Failed to rename volume %(existing)s to %(new)s. Volume unmanage failed." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:190 #, python-format msgid "Exception during mounting %s." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:260 #, python-format msgid "" "Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d" " with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:680 #, python-format msgid "Failed to set properties for volume %(existing)s. Volume manage failed." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:752 #, python-format msgid "Failed to set properties for volume %(existing)s. Volume unmanage failed." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:775 #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: " "%(pool)s, Project: %(project)s Return code: %(ret.status)d, Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:811 #, python-format msgid "" "Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool:" " %(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1268 #, python-format msgid "Cannot delete file %s." msgstr "" #: cinder/volume/flows/common.py:61 #, python-format msgid "" "Failed setting source volume %(source_volid)s back to its initial " "%(source_status)s status" msgstr "" #: cinder/volume/flows/common.py:96 #, python-format msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" msgstr "" #: cinder/volume/flows/api/create_volume.py:563 #, python-format msgid "Failed destroying volume entry %s" msgstr "" #: cinder/volume/flows/api/create_volume.py:668 #, python-format msgid "Failed rolling back quota for %s reservations" msgstr "" #: cinder/volume/flows/api/create_volume.py:715 #, python-format msgid "Failed to update quota for deleting volume: %s" msgstr "" #: cinder/volume/flows/api/create_volume.py:823 #: cinder/volume/flows/manager/create_volume.py:168 #: cinder/volume/flows/manager/create_volume.py:176 #: cinder/volume/flows/manager/create_volume.py:220 #, python-format msgid "Volume %s: create failed" msgstr "" #: cinder/volume/flows/api/create_volume.py:827 #: cinder/volume/flows/api/manage_existing.py:131 msgid "Unexpected build error:" msgstr "" #: cinder/volume/flows/api/manage_existing.py:95 #, python-format msgid "Failed destroying volume entry: %s." msgstr "" #: cinder/volume/flows/api/manage_existing.py:127 #, python-format msgid "Volume %s: manage failed." msgstr "" #: cinder/volume/flows/manager/create_volume.py:120 #, python-format msgid "Volume %s: update volume state failed." msgstr "" #: cinder/volume/flows/manager/create_volume.py:189 #, python-format msgid "Volume %s: rescheduling failed" msgstr "" #: cinder/volume/flows/manager/create_volume.py:349 #, python-format msgid "" "Failed notifying about the volume action %(event)s for volume " "%(volume_id)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:454 #, python-format msgid "" "Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" " glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" #: cinder/volume/flows/manager/create_volume.py:471 #, python-format msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" #: cinder/volume/flows/manager/create_volume.py:526 #: cinder/volume/flows/manager/create_volume.py:535 #, python-format msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:531 #, python-format msgid "Failed to copy image to volume: %(volume_id)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:633 #, python-format msgid "Failed to clone image volume %(id)s." msgstr "" #: cinder/volume/flows/manager/create_volume.py:651 #, python-format msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:808 #, python-format msgid "Unable to create volume. Volume driver %s not initialized" msgstr "" #: cinder/volume/flows/manager/create_volume.py:846 #: cinder/volume/flows/manager/manage_existing.py:85 #, python-format msgid "" "Failed updating model of volume %(volume_id)s with creation provided " "model %(model)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:889 #, python-format msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "" #: cinder/volume/flows/manager/manage_existing.py:46 #, python-format msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:64 #, python-format msgid "Snapshot %s: create failed" msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:90 #, python-format msgid "" "Failed notifying about the snapshot action %(event)s for snapshot " "%(snp_id)s." msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:111 #, python-format msgid "Unable to manage existing snapshot. Volume driver %s not initialized." msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:180 #, python-format msgid "Failed rolling back quota for %s reservations." msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:224 #, python-format msgid "Failed to update quota while deleting snapshots: %s" msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:251 #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with creation provided " "model %(model)s." msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:297 #, python-format msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." msgstr "" #: cinder/volume/targets/cxt.py:156 cinder/volume/targets/scst.py:132 #, python-format msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/cxt.py:177 #, python-format msgid "" "Failed to create iscsi target for volume id:%(vol_id)s. Please verify " "your configuration in %(volumes_dir)s'" msgstr "" #: cinder/volume/targets/cxt.py:239 cinder/volume/targets/scst.py:337 #: cinder/volume/targets/scst.py:356 #, python-format msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/iet.py:54 #, python-format msgid "Failed to open iet session list for %s" msgstr "" #: cinder/volume/targets/iet.py:104 #, python-format msgid "Failed to create iscsi target for volume id:%s" msgstr "" #: cinder/volume/targets/iet.py:126 #, python-format msgid "Failed to create %(conf)s for volume id:%(vol_id)s" msgstr "" #: cinder/volume/targets/iet.py:142 #, python-format msgid "Failed to update %(conf)s for volume id:%(vol_id)s" msgstr "" #: cinder/volume/targets/iet.py:159 #, python-format msgid "Failed to remove iscsi target for volume id:%s" msgstr "" #: cinder/volume/targets/iet.py:186 #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target" msgstr "" #: cinder/volume/targets/iscsi.py:301 msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" #: cinder/volume/targets/lio.py:41 msgid "cinder-rtstool is not installed correctly" msgstr "" #: cinder/volume/targets/lio.py:129 cinder/volume/targets/lio.py:137 #, python-format msgid "Failed to create iscsi target for volume id:%s." msgstr "" #: cinder/volume/targets/lio.py:157 #, python-format msgid "Failed to remove iscsi target for volume id:%s." msgstr "" #: cinder/volume/targets/lio.py:179 #, python-format msgid "Failed to add initiator iqn %s to target" msgstr "" #: cinder/volume/targets/lio.py:200 #, python-format msgid "Failed to delete initiator iqn %s from target." msgstr "" #: cinder/volume/targets/scst.py:121 #, python-format msgid "Failed to set attribute for enable target driver %s" msgstr "" #: cinder/volume/targets/scst.py:140 #, python-format msgid "Failed to set 'enable' attribute for SCST target %s" msgstr "" #: cinder/volume/targets/scst.py:172 #, python-format msgid "Failed to create group to SCST target %s" msgstr "" #: cinder/volume/targets/scst.py:184 #, python-format msgid "Failed to add initiator to group for SCST target %s" msgstr "" #: cinder/volume/targets/scst.py:201 #, python-format msgid "Failed to add device to handler %s" msgstr "" #: cinder/volume/targets/scst.py:218 #, python-format msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/scst.py:229 msgid "Failed to write in /etc/scst.conf." msgstr "" #: cinder/volume/targets/scst.py:314 #, python-format msgid "" "Skipping remove_export. No iscsi_target ispresently exported for volume: " "%s" msgstr "" #: cinder/volume/targets/scst.py:345 cinder/volume/targets/scst.py:380 #, python-format msgid "Failed to close disk device %s" msgstr "" #: cinder/volume/targets/scst.py:371 #, python-format msgid "Failed to remove LUN %s" msgstr "" #: cinder/volume/targets/tgt.py:101 #, python-format msgid "" "Failed recovery attempt to create iscsi backing lun for Volume " "ID:%(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/tgt.py:197 #, python-format msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/tgt.py:221 #, python-format msgid "" "Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure " "your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" #: cinder/volume/targets/tgt.py:280 cinder/volume/targets/tgt.py:303 #, python-format msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" #: cinder/zonemanager/fc_san_lookup_service.py:91 msgid "Unable to get device mapping from network." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:129 #, python-format msgid "Failed collecting name server info from fabric %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:190 msgid "Failed collecting nsshow info for fabric" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:199 msgid "Failed collecting nscamshow" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:84 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:90 #, python-format msgid "Failed getting active zone set from fabric %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:155 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:176 #, python-format msgid "Deleting zone failed %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:269 #, python-format msgid "Failed collecting nsshow info for fabric %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:341 msgid "No CLI output for firmware version check" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:410 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:473 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:431 msgid "Error executing SSH command." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:492 #, python-format msgid "Error executing command via ssh: %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:551 msgid "Error closing channel." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:393 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:433 msgid "Error getting name server info." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:184 msgid "Failed collecting show fcns database for fabric" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:225 #, python-format msgid "Failed getting zone status from fabric %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:306 #, python-format msgid "Failed collecting fcns database info for fabric %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:449 msgid "Error executing command via ssh." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:454 msgid "Error getting show fcns database info." msgstr "" cinder-8.0.0/cinder/locale/pt_BR/0000775000567000056710000000000012701406543017637 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000012701406543021424 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/pt_BR/LC_MESSAGES/cinder.po0000664000567000056710000126140112701406257023237 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Gabriel Wainer, 2013 # Lucas Ribeiro , 2014 # Rodrigo Felix de Almeida , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Carlos Marques , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev21\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:43+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 01:24+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "Versão do OpenStack Cinder: %(version)s\n" #, python-format msgid " but size is now %d" msgstr " mas o tamanho agora é %d" #, python-format msgid " but size is now %d." msgstr " porém, o tamanho atual é %d." msgid " or " msgstr " ou " #, python-format msgid "%(attr)s is not set." msgstr "O %(attr)s não está configurado." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing não pode gerenciar um volume conectado a hosts. " "Desconecte esse volume dos hosts existentes antes de importar" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "resultado: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: Permissão negada." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: Falha com saída da CLI inesperada.\n" " Comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Código de Status: %(_status)s\n" "Corpo: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: criando NetworkPortal: assegure-se de que a porta %(port)d no " "IP %(ip)s não esteja sendo usada por outro serviço." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s tem um requisito de caracteres mínimo de %(min_length)s." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s possui mais de %(max_length)s caracteres." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s falhou. Objeto de backup possui " "modo inesperado. Backups de arquivo ou imagem suportados, modo real é " "%(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "O Serviço %(service)s não é %(status)s no dispositivo de armazenamento: " "%(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s deve ser <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s deve ser >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "Valor %(worker_name)s de %(workers)d é inválido, deve ser maior que 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "Os \"dados\" %s não estão no resultado." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s não pode ser acessado. Verifique se o GPFS está ativo e o sistema de " "arquivos está montado." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s não pode ser redimensionado usando a operação de clone, pois ele não " "contém blocos." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s não pode ser redimensionado usando a operação de clone, pois ele está " "hospedado no volume compactado" #, python-format msgid "%s configuration option is not set." msgstr "A opção de configuração %s não está definida." #, python-format msgid "%s does not exist." msgstr "%s não existe." #, python-format msgid "%s is not a directory." msgstr "%s não é um diretório." #, python-format msgid "%s is not a string or unicode" msgstr "%s não é uma sequência ou unicode" #, python-format msgid "%s is not installed" msgstr "%s não está instalado" #, python-format msgid "%s is not installed." msgstr "%s não está instalado." #, python-format msgid "%s is not set" msgstr "%s não está configurado" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s não está configurado e é necessário para que o dispositivo de replicação " "seja válido. " #, python-format msgid "%s is not set." msgstr "%s não está configurado." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s deve ser uma imagem bruta ou qcow2 válida." #, python-format msgid "%s must be an absolute path." msgstr "%s deve ser um caminho absoluto." #, python-format msgid "%s must be an integer." msgstr "%s deve ser um número inteiro." #, python-format msgid "%s not set in cinder.conf" msgstr "%s não configurado em cinder.conf" #, python-format msgid "%s not set." msgstr "%s não definido." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' é inválido para flashsystem_connection_protocol no arquivo de " "configuração. valor(es) válido(s) são %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "'ativo' deve estar presente ao gravar snap_info." msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' deve ser especificado" msgid "'qemu-img info' parsing failed." msgstr "Falha na análise de 'qemu-img info'." msgid "'status' must be specified." msgstr "'status' deve ser especificado." msgid "'volume_id' must be specified" msgstr "'volume_id' deve ser especificado" msgid "'{}' object has no attribute '{}'" msgstr "O objeto '{}' não possui nenhum atributo '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Comando: %(cmd)s) (Código de Retorno: %(exit_code)s) (Saída padrão: " "%(stdout)s) (Erro padrão: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "Um LUN (HLUN) não foi localizado. (LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "Foi feita uma solicitação atual possivelmente contraditória." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Um LUN (HLUN) livre não foi localizado. Inclua um grupo de hosts diferente. " "(LDEV: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Um grupo de hosts não pôde ser incluído. (porta: %(port)s, nome: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Um grupo de hosts não pôde ser excluído. (porta: %(port)s, GID: %(gid)s, " "nome: %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Um grupo de hosts é inválido. (grupo de hosts: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "Um par não pode ser excluído. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Um par não pôde ser criado. O número máximo do par é excedido. (método de " "cópia: %(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Um parâmetro é inválido. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Um valor de parâmetro é inválido. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Um conjunto não pôde ser localizado. (ID do conjunto: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Um status de captura instantânea é inválido. (status: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Um destino secundário válido DEVE ser especificado para executar failover." msgid "A volume ID or share was not specified." msgstr "Um ID de volume ou compartilhamento não foi especificado." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Um status de volume é inválido. (status: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s falhou com sequência de erros %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Sequência de Versão de API %(version)s é de formato inválido. Ela deve " "estar no formato MajorNum.MinorNum." msgid "API key is missing for CloudByte driver." msgstr "A chave da API está ausente para o driver CloudByte." #, python-format msgid "API response: %(response)s" msgstr "Resposta da API: %(response)s" #, python-format msgid "API response: %s" msgstr "Resposta da API: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "A versão da API %(version)s não é suportada nesse método." msgid "API version could not be determined." msgstr "A versão da API não pôde ser determinada." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Prestes a excluir projetos filhos que têm cota diferente de zero. Isso não " "deve ser executado" msgid "Access list not available for public volume types." msgstr "Lista de acesso não disponível para tipos de volume público." msgid "Activate or deactivate QoS error." msgstr "Erro ao ativar ou desativar QoS" msgid "Activate snapshot error." msgstr "Erro ao ativar captura instantânea." msgid "Add FC port to host error." msgstr "Erro ao incluir porta FC no host." msgid "Add fc initiator to array error." msgstr "Erro ao incluir inicializador de FC na matriz." msgid "Add initiator to array error." msgstr "Erro ao incluir inicializador na matriz." msgid "Add lun to cache error." msgstr "Erro ao incluir lun no cache." msgid "Add lun to partition error." msgstr "Erro ao incluir LUN na partição." msgid "Add mapping view error." msgstr "Erro ao incluir visualização de mapeamento." msgid "Add new host error." msgstr "Erro ao incluir novo host." msgid "Add port to port group error." msgstr "Erro ao incluir porta no grupo de portas." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Todos os conjuntos de armazenamento especificados para serem gerenciados não " "existem. Verifique sua configuração. Não existem conjuntos: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "Uma solicitação da versão da API deve ser comparada com um objeto " "VersionedMethod object." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "Um erro ocorreu em SheepdogDriver. (Motivo: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Um erro ocorreu durante a operação de backup" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "Ocorreu um erro ao tentar modificar a Captura Instantânea '%s'." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Ocorreu um erro ao buscar o volume \"%s\"." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Ocorreu um erro durante a operação LUNcopy. Nome de LUNcopy: " "%(luncopyname)s. O status de LUNcopy: %(luncopystatus)s. Estado de LUNcopy: " "%(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Ocorreu um erro ao ler o volume \"%s\"." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Ocorreu um erro ao gravar no volume \"%s\"." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" "Um usuário de CHAP iSCSI não pôde ser incluído. (nome de usuário: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" "Um usuário do CHAP iSCSI não pôde ser excluído. (nome de usuário: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Um destino de iSCSI não pôde ser incluído. (porta: %(port)s, alias: " "%(alias)s, motivo: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Um destino de iSCSI não pôde ser excluído. (porta: %(port)s, tno: %(tno)s, " "alias: %(alias)s)" msgid "An unknown exception occurred." msgstr "Ocorreu uma exceção desconhecida." msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Um usuário com escopo do token definido para um subprojeto não tem permissão " "para ver a quota de seus pais." msgid "Append port group description error." msgstr "Erro ao anexar descrição do grupo de portas." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Falha ao aplicar as zonas e cfgs no comutador (error code=%(err_code)s error " "msg=%(err_msg)s." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "A matriz não existe ou está off-line. O status atual da matriz é %s." msgid "Associate host to hostgroup error." msgstr "Erro ao associar host ao grupo de hosts." msgid "Associate host to mapping view error." msgstr "Erro ao associar host ao mapeamento." msgid "Associate initiator to host error." msgstr "Erro ao associar inicializador ao host." msgid "Associate lun to QoS error." msgstr "Erro ao associar LUN ao QoS." msgid "Associate lun to lungroup error." msgstr "Erro ao associar LUN ao grupo de LUNs." msgid "Associate lungroup to mapping view error." msgstr "Erro ao associar grupo de LUNs à visualização de mapeamento." msgid "Associate portgroup to mapping view error." msgstr "Erro ao associar grupo de portas à visualização de mapeamento." msgid "At least one valid iSCSI IP address must be set." msgstr "Pelo menos um endereço IP iSCSI válido deve ser configurado." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Tentativa de transferir %s com chave de auth inválida." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" "Detalhes do grupo de autenticação [%s] não localizados no armazenamento " "CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "" "Detalhes do usuário de autenticação não localizados no armazenamento do " "CloudByte." msgid "Authentication error" msgstr "Erro de Autenticação" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "A autenticação falhou, verifique as credenciais do comutador, código de erro " "%s." msgid "Authorization error" msgstr "Erro de autorização" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "A zona de disponibilidade '%(s_az)s' é inválida." msgid "Available categories:" msgstr "Categorias disponíveis:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "As especificações de QoS backend não são suportadas nesta família de " "armazenamento e versão ONTAP. " #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Backend não existe (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "" "O backend já foi submetido a failover. Não é possível executar failback." #, python-format msgid "Backend reports: %(message)s" msgstr "Relatórios de backend: %(message)s" msgid "Backend reports: item already exists" msgstr "Relatórios de backend: o item já existe" msgid "Backend reports: item not found" msgstr "Relatórios de backend: item não localizado" msgid "Backend server not NaServer." msgstr "O servidor Backend não é NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" "Ocorrência de tempo limite de nova tentativa do serviço de backend: " "%(timeout)s segundos" msgid "Backend storage did not configure fiber channel target." msgstr "Armazenamento de backend não configurou o destino do Fiber Channel." msgid "Backing up an in-use volume must use the force flag." msgstr "O backup de um volume em uso deve usar a sinalização de força." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "O backup %(backup_id)s não pôde ser localizado." msgid "Backup RBD operation failed" msgstr "Operação RBD de backup falhou" msgid "Backup already exists in database." msgstr "Backup já existe no banco de dados." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "O driver de Backup reportou um erro: %(message)s" msgid "Backup id required" msgstr "ID de backup necessário" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "Backup não é suportado para volumes GlusterFS com capturas instantâneas." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "O backup é suportado apenas para volumes SOFS sem fazer backup do arquivo." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "Backup é suportado apenas para volumes GlusterFS em formato bruto." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "O backup é suportado apenas para volumes SOFS não formatados." msgid "Backup operation of an encrypted volume failed." msgstr "A operação de backup de um volume criptografado falhou." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "O serviço de backup %(configured_service)s não suporta verificação. O ID do " "backup %(id)s não foi verificado. Ignorando verificação." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Serviço de backup %(service)s não suporta verificação. ID do backup %(id)s " "não foi verificado. Ignorando reconfiguração." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "Backup deve ter apenas uma captura instantânea mas possui apenas %s" msgid "Backup status must be available" msgstr "O status de backup deve ser disponível" #, python-format msgid "Backup status must be available and not %s." msgstr "Status de backup deve ser disponível e não %s." msgid "Backup status must be available or error" msgstr "O status de backup deve ser disponível ou erro" msgid "Backup to be restored has invalid size" msgstr "O backup a ser restaurado tem tamanho inválido" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Linha de status inválido retornada: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Chave(s) inválida(s) no conjunto de quota: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Resposta inválida ou inesperada da API de backend do volume de " "armazenamento: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato de projeto inválido: o projeto não está no formato adequado (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Solicitação inválida enviada para cluster Datera: Argumentos inválidos: " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Resposta inválida da API Datera" msgid "Bad response from SolidFire API" msgstr "Resposta inválida da API SolidFire" #, python-format msgid "Bad response from XMS, %s" msgstr "Resposta inválida de XMS, %s" msgid "Binary" msgstr "binário" msgid "Blank components" msgstr "Componentes em branco" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Esquema de autenticação da API Blockbridge (token ou senha)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Senha da API Blockbridge (para esquema de autenticação 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Token da API Blockbridge (para esquema de autenticação 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Usuário da API Blockbridge (para esquema de autorização 'password')" msgid "Blockbridge api host not configured" msgstr "Host da API Blockbridge não configurado" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge configurado com esquema de autenticação inválido " "'%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "O conjunto padrão Blockbridge não existe" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Senha Blockbridge não configurada (necessária para esquema de autenticação " "'password')" msgid "Blockbridge pools not configured" msgstr "Conjuntos Blockbridge não configurados" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Token Blockbridge não configurado (necessário para esquema de autenticação " "'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Usuário Blockbridge não configurado (necessário para esquema de autenticação " "'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Erro na CLI do Fibre Channel Zoning Brocade: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Erro HTTP do Fibre Channel Zoning Brocade: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "O segredo do CHAP deve ter de 12 a 16 bytes." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Saída de Exceção da CLI:\n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Saída de Exceção da CLI:\n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E O mapeamento de VDisk para host não foi criado porque o VDisk já " "está mapeado para um host.\n" "\"" msgid "CONCERTO version is not supported" msgstr "versão CONCERTO não é suportada" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) não existe na matriz" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "O nome do cache é Nenhum; configure smartcache:cachename na chave." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "" "O volume de cache %(cache_vol)s não tem a captura instantânea %(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "O volume de cache %s não possui as propriedades necessárias. " msgid "Call returned a None object" msgstr "A chamada retornou um objeto Nenhum" msgid "Can not add FC port to host." msgstr "Não é possível incluir a porta FC no host." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "Não é possível localizar ID de cache pelo nome do cache %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Não é possível localizar o ID da partição por nome %(name)s." #, python-format msgid "Can not get pool info. pool: %s" msgstr "Não é possível obter informações do conjunto. conjunto: %s" #, python-format msgid "Can not translate %s to integer." msgstr "Não foi possível converter %s para inteiro." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Não é possível acessar 'scality_sofs_config': %s" msgid "Can't attach snapshot." msgstr "Não é possível anexar a captura instantânea" msgid "Can't decode backup record." msgstr "Não é possível decodificar registro de backup." #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "Não é possível estender o volume de replicação, volume: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "Não é possível localizar a LUN na matriz, verifique o source-name ou o " "source-id." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "" "Não é possível localizar o nome do cache na matriz; o nome do cache é: " "%(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "Não é possível localizar ID de LUN a partir do BD, volume: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "Não é possível localizar as informações de LUN na matriz, volume: %(id)s, " "nome do LUN: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "Não é possível localizar o nome da partição na matriz; o nome da partição é: " "%(name)s." #, python-format msgid "Can't find service: %s" msgstr "Não é possível localizar o serviço %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "Não é possível localizar a captura instantânea na matriz, verifique o nome " "da origem ou o ID da origem." msgid "Can't find the same host id from arrays." msgstr "Não é possível localizar o mesmo ID de host a partir das matrizes." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" "Não é possível obter ID de volume a partir da captura instantânea, captura " "instantânea: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Não é possível obter ID de volume . Nome do volume %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Não é possível importar a LUN %(lun_id)s no Cinder. Tipo de LUN incompatível." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em um HyperMetroPair." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de cópia " "da LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em um grupo de LUNs." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em um espelho de LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em um SplitMirror." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de " "migração." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de " "replicação remota." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "Não é possível importar a LUN %s no Cinder. O status de LUN não é normal." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Não é possível importar a captura instantânea %s no Cinder. A captura " "instantânea não pertence ao volume." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Não é possível importar a captura instantânea %s no Cinder. A captura " "instantânea é exporta para o iniciador." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Não é possível importar a captura instantânea %s no Cinder. O status captura " "instantânea não é normal ou o status de execução não é on-line." #, python-format msgid "Can't open config file: %s" msgstr "Não é possível abrir o arquivo de configuração: %s" msgid "Can't parse backup record." msgstr "Não é possível analisar o registro de backup." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque ele não possui nenhum tipo de volume." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque ele já está no grupo de consistências %(orig_group)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque o volume não pode ser localizado." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque o volume não existe." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque o volume está em um estado inválido: %(status)s. Os " "estados válidos são: %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Não é possível incluir o volume %(volume_id)s no grupo de consistências " "%(group_id)s porque o tipo de volume %(volume_type)s não é suportado pelo " "grupo." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Não é possível anexar o volume já anexado %s; a anexação múltipla está " "desativada pela opção de configuração 'netapp_enable_multiattach'." msgid "Cannot change VF context in the session." msgstr "Não é possível alterar o contexto do VF na sessão." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "Não é possível alterar o contexto do VF porque o VF especificado não está " "disponível na lista de VFs gerenciáveis %(vf_list)s." msgid "Cannot connect to ECOM server." msgstr "Não é possível conectar-se ao servidor ECOM." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Não é possível criar clone de tamanho %(vol_size)s a partir do volume de " "tamanho %(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Não é possível criar o grupo de consistências %(group)s porque a captura " "instantânea %(snap)s não está em um estado válido. Os estados válidos são: " "%(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Não é possível criar grupo de consistências %(group)s porque o volume de " "origem %(source_vol)s não está em um estado válido. Os estados válidos são: " "%(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Não é possível criar o diretório %s." msgid "Cannot create encryption specs. Volume type in use." msgstr "" "Não foi possível criar especificações de criptografia. Tipo de volume em uso." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Não foi possível criar imagem do formato de disco: %s. Apenas o formato de " "disco vmdk disk é aceito." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "" "Não é possível criar visualização de mascaramento: %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Não é possível criar mais de %(req)s volumes na matriz ESeries quando " "'netapp_enable_multiattach' está configurado como true." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Não é possível criar ou localizar um grupo de armazenamentos com o nome " "%(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Não é possível criar o volume de tamanho %(vol_size)s a partir da captura " "instantânea de tamanho %(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Não é possível criar volume de tamanho %s: não é múltiplo de 8GB." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Não é possível criar o volume_type com o nome %(name)s e as especificações " "%(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "" "Não é possível excluir o LUN %s enquanto existem capturas instantâneas." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Não é possível excluir o volume de cache: %(cachevol_name)s. Ele foi " "atualizado em %(updated_at)s e atualmente tem %(numclones)d instâncias de " "volume." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Não é possível excluir o volume de cache: %(cachevol_name)s. Ele foi " "atualizado em %(updated_at)s e atualmente tem %(numclones)s instâncias de " "volume." msgid "Cannot delete encryption specs. Volume type in use." msgstr "" "Não é possível excluir especificações de criptografia. Tipo de volume em uso." msgid "Cannot determine storage pool settings." msgstr "" "Não é possível determinar as configurações do conjunto de armazenamentos." msgid "Cannot execute /sbin/mount.sofs" msgstr "Não é possível executar /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "Não é possível localizar o grupo CG %s." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "Não é possível localizar o Serviço de Configuração do Controlador para o " "sistema de armazenamento %(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "Não é possível localizar o Serviço de replicação para criar volume para a " "captura instantânea %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "Não é possível localizar o Serviço de replicação para excluir a captura " "instantânea %s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Não é possível localizar o Serviço de replicação no sistema %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "Não é possível localizar o Volume: %(id)s. operação cancelar gerenciamento. " "Saindo..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" "Não é possível localizar Volume:%(volumename)s. Estender operação. Saindo...." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "" "Não é possível localizar o número do dispositivo para o volume " "%(volumeName)s." msgid "Cannot find migration task." msgstr "Não é possível localizar a tarefa de migração." #, python-format msgid "Cannot find replication service on system %s." msgstr "Não é possível localizar o serviço de replicação no sistema %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "Não é possível localizar a instância CG de origem. consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "Não é possível obter mcs_id por ID de canal: %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "" "Não é possível obter as informações necessárias do conjunto ou do sistema de " "armazenamento." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Não é possível obter ou criar um grupo de armazenamento: %(sgGroupName)s " "para o volume %(volumeName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "Não é possível obter ou criar o grupo inicializador: %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Não é possível obter grupo da porta: %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Não é possível obter o grupo de armazenamento: %(sgGroupName)s da " "visualização de mascaramento %(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Não é possível obter o intervalo de tamanho suportado para %(sps)s Código de " "retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Não é possível obter o grupo de armazenamentos padrão para a política FAST: " "%(fastPolicyName)s." msgid "Cannot get the portgroup from the masking view." msgstr "" "Não é possível obter o grupo de portas a partir da visualização de máscara." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "Não é possível montar Scality SOFS, verifique os erros no syslog" msgid "Cannot ping DRBDmanage backend" msgstr "Não é possível executar ping do backend do DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Não é possível colocar o volume %(id)s em %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Não é possível fornecer 'cgsnapshot_id' e 'source_cgid' para criar o grupo " "de consistências %(name)s da origem." msgid "Cannot register resource" msgstr "Não foi possível registrar recurso" msgid "Cannot register resources" msgstr "Não foi possível registrar recursos" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Não é possível remover o volume %(volume_id)s a partir do grupo de " "consistências %(group_id)s porque ele não está no grupo." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Não é possível remover o volume %(volume_id)s a partir do grupo de " "consistências %(group_id)s porque o volume está em um estado inválido: " "%(status)s. Os estados válidos são: %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Não é possível redefinir de HPE3PARDriver para %s." msgid "Cannot retype from one 3PAR array to another." msgstr "Não é possível digitar novamente de uma matriz 3PAR para outra." msgid "Cannot retype to a CPG in a different domain." msgstr "Não é possível digitar novamente para um CPG em um domínio diferente." msgid "Cannot retype to a snap CPG in a different domain." msgstr "" "Não é possível digitar novamente em um CPG de snap em um domínio diferente." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "Não é possível executar o comando vgc-cluster; verifique se o software está " "instalado e as permissões estão configuradas corretamente." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "Não é possível definir hitachi_serial_number e hitachi_unit_name." msgid "Cannot specify both protection domain name and protection domain id." msgstr "Não é possível especificar o nome e o ID do domínio de proteção." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "Não é possível especificar o nome e o ID do conjunto de armazenamentos." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Não é possível atualizar o grupo de consistências %(group_id)s porque nenhum " "nome, descrição, add_volumes ou remove_volumes válido foi fornecido." msgid "Cannot update encryption specs. Volume type in use." msgstr "" "Não é possível atualizar especificações de criptografia. Tipo de volume em " "uso." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "Não é possível atualizar volume_type %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Não é possível verificar a existência do objeto: %(instanceName)s." msgid "Cascade option is not supported." msgstr "A opção em cascata não é suportada" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s não pôde ser encontrado." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost está vazio. Nenhum grupo de consistências será criado." msgid "Cgsnapshot status must be available or error" msgstr "O status da cgsnapshot deve estar disponível ou com erro" msgid "Change hostlun id error." msgstr "Erro ao alterar o ID de hostlun." msgid "Change lun priority error." msgstr "Erro ao mudar prioridade de LUN." msgid "Change lun smarttier policy error." msgstr "Erro ao mudar política smarttier de LUN." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "A mudança usaria menos de 0 para os recursos a seguir: %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Verifique as permissões de acesso para o compartilhamento do ZFS designado a " "este driver." msgid "Check hostgroup associate error." msgstr "Verifique o erro de associação ao grupo de hosts." msgid "Check initiator added to array error." msgstr "Erro ao verificar inicializador incluído na matriz." msgid "Check initiator associated to host error." msgstr "Erro ao verificar inicializador associado ao host." msgid "Check lungroup associate error." msgstr "Verifique o erro de associação ao grupo de LUNs." msgid "Check portgroup associate error." msgstr "Verifique o erro de associação ao grupo de portas." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Verifique o estado do serviço http. Além disso, assegure-se de que o número " "da porta https seja o mesmo que o especificado em cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "O tamanho do chunk não é múltiplo do tamanho de bloco para criar o hash." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Erro na CLI do Fibre Channel Zoning Cisco: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "O recurso de clonagem não possui licença no %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "O tipo de clone '%(clone_type)s' é inválido; os valores válidos são: " "'%(full_clone)s' e '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "O cluster não está formatado. É provável que seja necessário executar \"dog " "cluster format\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Falha no driver Coho Data Cinder: %(message)s" msgid "Coho rpc port is not configured" msgstr "A porta RPC Coho não está configurada" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Comando %(cmd)s bloqueado na CLI e foi cancelado" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: tempo limite de %s" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: tempo limite de %s." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Ativador de Deduplicação não está instalado. Não é possível criar volume " "compactado." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Cluster de cálculo: %(cluster)s não localizado." msgid "Condition has no field." msgstr "A condição não possui campo." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "Configuração de 'max_over_subscription_ratio' inválida. Deve ser > 0: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Erro de configuração: dell_sc_ssn não configurado." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "O arquivo de configuração %(configurationFile)s não existe." msgid "Configuration is not found." msgstr "A configuração não foi localizada" #, python-format msgid "Configuration value %s is not set." msgstr "O valor de configuração %s não está configurado." msgid "Configured host type is not supported." msgstr "O tipo de host configurado não é suportado." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Conflito de especificações de QoS no tipo de volume %s: quando a " "especificação de QoS é associada ao tipo de volume; \"netapp:qos_policy_group" "\" não é permitido nas especificações extras de tipo de volume." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Falha de conexão ao Glance: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Conexão com o swift falhou: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "O conector não fornece: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "O conector não possui as informações necessárias: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "O grupo de consistências %s ainda contém volumes. A sinalização de força é " "necessária para excluí-lo." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "O grupo de consistências %s ainda possui cgsnapshots dependentes." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "O grupo de consistências está vazio. Nenhuma cgsnapshot será criada." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "O status do grupo de consistências deve estar disponível ou com erro, porém, " "o status atual é:%s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "O status do grupo de consistências deve estar disponível, mas o status atual " "é: %s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "ConsistencyGroup %(consistencygroup_id)s não pôde ser encontrado." msgid "Container" msgstr "Contêiner" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Formato de contêiner: %s não é suportado pelo driver VMDK, somente 'bare' é " "suportado." msgid "Container size smaller than required file size." msgstr "Tamanho do contêiner menor do que o tamanho do arquivo necessário." msgid "Content type not supported." msgstr "Tipo de conteúdo não suportado." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" "O Serviço de configuração do Controller não foi localizado em " "%(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "O IP do controlador '%(host)s' não pôde ser resolvido: %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Convertido em %(f1)s, mas o formato agora é %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "Convertido em %(vol_format)s, mas o formato agora é %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertido em bruto, mas o formato é agora %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Convertida para raw, mas o formato agora é %s." msgid "Coordinator uninitialized." msgstr "Coordenador não inicializado." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Cópia da tarefa do volume falhou: convert_to_base_volume: id=%(id)s, " "configuração=%(status)s." #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "A tarefa de cópia de volume falhou: create_cloned_volume id=%(id)s, status=" "%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Copiando metadados de %(src_type)s %(src_id)s para %(vol_id)s." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Não foi possível determinar qual terminal Keystone usar. Isso pode ser " "configurado no catálogo de serviços ou com a opção de configuração cinder." "conf 'backup_swift_auth_url'." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Não foi possível determinar qual terminal Swift usar. Isso pode ser " "configurado no catálogo de serviços ou com a opção de configuração cinder." "conf 'backup_swift_url'." msgid "Could not find DISCO wsdl file." msgstr "Não foi possível localizar o arquivo wsdl DISCO." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "Não foi possível localizar o ID do cluster GPFS: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "" "Não foi possível localizar o dispositivo do sistema de arquivos GPFS: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "Não foi possível localizar um host para o volume %(volume_id)s com o tipo " "%(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Não foi possível localizar a configuração em %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "Não foi possível localizar exportação iSCSI para o volume %(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Não foi possível localizar iSCSI export para o volume %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "" "Não foi possível localizar o destino de iSCSI para o volume: %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "Não foi possível localizar a chave na saída do comando %(cmd)s: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Não foi possível encontrar o parâmetro %(param)s" #, python-format msgid "Could not find target %s" msgstr "Não foi possível localizar o destino %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" "Não foi possível localizar o volume pai para a Captura Instantânea %s na " "matriz." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "Não foi possível localizar a captura instantânea exclusiva %(snap)s no " "volume %(vol)s." msgid "Could not get system name." msgstr "Não foi possível obter o nome do sistema." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" "Não foi possível carregar o aplicativo paste app '%(name)s' a partir do " "%(path)s" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "Não foi possível ler %s. Rodando novamente com sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "Não foi possível ler informações para a captura instantânea %(name)s. " "Código: %(code)s. Razão: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" "Não foi possível restaurar o arquivo de configuração %(file_path)s: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "Não foi possível salvar a configuração para %(file_path)s: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "" "Não foi possível iniciar a captura instantânea %s do grupo de consistências." #, python-format msgid "Counter %s not found" msgstr "Contador %s não localizado" msgid "Create QoS policy error." msgstr "Erro ao criar política de QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Criação de backup interrompida, esperava-se o status de backup " "%(expected_status)s, mas foi obtido %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Criação de backup interrompida, esperava-se o status de volume " "%(expected_status)s, mas foi obtido %(actual_status)s." msgid "Create consistency group failed." msgstr "Falha ao criar grupo de consistências." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "A criação de volumes criptografados com o tipo %(type)s da imagem %(image)s " "não é suportada." msgid "Create export for volume failed." msgstr "Falha ao criar exportação para o volume." msgid "Create hostgroup error." msgstr "Erro ao criar grupo de hosts." #, python-format msgid "Create hypermetro error. %s." msgstr "Erro ao criar hypermetro. %s." msgid "Create lun error." msgstr "Erro de criação de LUN." msgid "Create lun migration error." msgstr "Erro ao criar migração de LUN." msgid "Create luncopy error." msgstr "Erro de criação de luncopy." msgid "Create lungroup error." msgstr "Erro ao criar grupo de LUNs." msgid "Create manager volume flow failed." msgstr "Falha ao criar fluxo de volume do gerenciador." msgid "Create port group error." msgstr "Erro ao criar grupo de portas." msgid "Create replication error." msgstr "Erro ao criar replicação." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "Falha ao criar o par de replicação. Erro %s." msgid "Create snapshot error." msgstr "Erro de criação de captura instantânea." #, python-format msgid "Create volume error. Because %s." msgstr "Erro ao criar volume. Porque %s." msgid "Create volume failed." msgstr "Criar o volume falhou." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "Criar um grupo de consistências a partir de uma origem não é atualmente " "suportado." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Criação e ativação de conjunto de zonas falhou: (Conjunto de Zona=" "%(cfg_name)s erro=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Criação e ativação de conjunto de zonas falhou: (Conjunto de Zona=" "%(zoneset)s erro=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "Criando usos para %(begin_period)s até %(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "O host atual não faz parte do domínio HGST." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "O host atual não é válido para o volume %(id)s com tipo %(type)s, migração " "não permitida" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "O host mapeado atualmente para o volume %(vol)s está no grupo de hosts não " "suportado com %(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "DESCONTINUADO: Implemente v1 da API Cinder." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "DESCONTINUADO: Implemente v2 da API Cinder." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "Erro do driver DRBDmanager: a chave esperada \"%s\" não está na resposta. A " "versão de DRBDmanage está errada?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Erro de configuração do driver DRBDmanage: algumas bibliotecas necessárias " "(dbus, drbdmanage.*) não encontrado." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage esperava um recurso (\"%(res)s\"), foi obtido %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "Tempo limite de DRBDmanage ao aguardar novo volume após restauração da " "captura instantânea; recurso \"%(res)s\", volume \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "Tempo limite de DRBDmanage ao aguardar a criação da captura instantânea; " "recurso \"%(res)s\", captura instantânea\"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "Tempo limite de DRBDmanage ao aguardar a criação do volume; recurso \"%(res)s" "\", volume \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "Tempo limite de DRBDmanage ao aguardar o tamanho do volume; ID de volume " "\"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "A versão da API do Data ONTAP não pôde ser determinada." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "A operação ONTAP de dados em Modo 7 não suporta grupos de política de QoS." msgid "Database schema downgrade is not allowed." msgstr "O downgrade do esquema do banco de dados não é permitido." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "" "O conjunto de dados %s não está compartilhado no dispositivo Nexenta Store." #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Grupo de conjunto de dados %s não localizado no Nexenta SA." #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "A deduplicação é um tipo de fornecimento válido, mas requer a versão WSAPI " "'%(dedup_version)s' versão '%(version)s' está instalada." msgid "Dedup luns cannot be extended" msgstr "LUNs dedup não podem ser estendidos" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Ativador de Deduplicação não está instalado. Não é possível criar volume " "deduplicado" msgid "Default pool name if unspecified." msgstr "Nome do conjunto padrão, se não especificado." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Quota padrão para recurso: %(res)s é definida pela flag de quota padrão: " "quota_%(res)s, isso está deprecado agora. Por favor, use a classe de quota " "padrão para definir a quota padrão." msgid "Default volume type can not be found." msgstr "O tipo de volume padrão não pode ser localizado." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Define o conjunto de conjuntos expostos e suas cadeias de consulta de " "backend associadas" msgid "Delete LUNcopy error." msgstr "Erro ao excluir LUNcopy." msgid "Delete QoS policy error." msgstr "Erro ao excluir política de QoS." msgid "Delete associated lun from lungroup error." msgstr "Erro ao excluir LUN associado do grupo de LUNs." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Exclusão de backup interrompida, o serviço de backup atualmente configurado " "[%(configured_service)s] não é o serviço de backup que foi usado para criar " "esse backup [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "Falha ao excluir grupo de consistências." msgid "Delete hostgroup error." msgstr "Erro ao excluir grupo de hosts." msgid "Delete hostgroup from mapping view error." msgstr "Erro ao excluir grupo de hosts do mapeamento." msgid "Delete lun error." msgstr "Erro ao excluir lun." msgid "Delete lun migration error." msgstr "Erro ao excluir migração de LUN." msgid "Delete lungroup error." msgstr "Erro ao excluir grupo de LUNs." msgid "Delete lungroup from mapping view error." msgstr "Erro ao excluir grupo de LUNs da visualização de mapeamento." msgid "Delete mapping view error." msgstr "Erro ao excluir visualização de mapeamento." msgid "Delete port group error." msgstr "Erro ao excluir grupo de portas." msgid "Delete portgroup from mapping view error." msgstr "Erro ao excluir grupo de portas da visualização de mapeamento." msgid "Delete snapshot error." msgstr "Erro ao excluir captura instantânea." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "Remoção da captura instantânea do volume não suportada no estado: %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup interrompida, esperava-se o status de backup " "%(expected_status)s mas obteve %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Excluindo volume do banco de dados e ignorando rpc." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Exclusão de zonas falhou: (comando=%(cmd)s erro=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Dell API 2.1 ou mais recente é necessária para suporte do Grupo de " "consistências" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "Replicação de erro de configuração do driver Dell Cinder não suportada com " "conexão direta" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "replication_device %s de erro de configuração de driver Dell Cinder não " "localizada" msgid "Deploy v3 of the Cinder API." msgstr "Implemente v3 da API Cinder." msgid "Describe-resource is admin only functionality" msgstr "O Descrever-recurso é uma funcionalidade apenas administrativa" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "O destino possui migration_status %(stat)s, esperado %(exp)s." msgid "Destination host must be different than the current host." msgstr "O host de destino deve ser diferente do host atual." msgid "Destination volume not mid-migration." msgstr "Volume de destino não de migração intermediária." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Falha ao remover volume: mais de um anexo, mas nenhum attachment_id " "fornecido." msgid "Detach volume from instance and then try again." msgstr "Remova o volume da instância e, em seguida, tente novamente." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Detectado mais de um volume com o nome %(vol_name)s" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "A coluna esperada não foi localizada em %(fun)s: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "" "Não foi possível localizar a chave esperada %(key)s em %(fun)s: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "Razão desativada contém caracteres inválidos ou é muito longa" #, python-format msgid "Domain with name %s wasn't found." msgstr "O domínio com o nome %s não foi localizado." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Cluster GPFS de Nível Inferior Detectado. O recurso Clone do GPFS não está " "ativado no nível de daemon do cluster %(cur)s - deve estar pelo menos no " "nível %(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Falha ao inicializar conexão do driver (erro: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "O driver não pode ser redefinido porque o volume (LUN {}) possui captura " "instantânea que é proibida pra migração. " msgid "Driver must implement initialize_connection" msgstr "O driver deve implementar initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Driver decodificado com êxito importou os dados de backup, mas há campos " "ausentes (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "A versão %(current_version)s da API de proxy E-series não suporta o conjunto " "integral de especificações extras SSC. A versão do proxy deve ser pelo menos " "%(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Exceção da CLI do Driver Cinder de EMC VNX: %(cmd)s (Código de Retorno: " "%(rc)s) (Saída: %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "SPUnavailableException do Driver EMC VNX Cinder: %(cmd)s (Código de Retorno: " "%(rc)s) (Saída: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword devem ter valores " "válidos." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "'cgsnapshot_id' ou 'source_cgid' deve ser fornecido para criar o grupo de " "consistências %(name)s a partir da origem." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "O SLO: %(slo)s ou carga de trabalho %(workload)s é inválido. Examine a " "instrução de erro anterior para obter os valores válidos." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "" "Apenas um dos campos hitachi_serial_number ou hitachi_unit_name é necessário." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "" "O Serviço de composição do elemento não foi localizado em " "%(storageSystemName)s." msgid "Enables QoS." msgstr "Permite de QoS." msgid "Enables compression." msgstr "Ativa a compactação." msgid "Enables replication." msgstr "Permite replicação." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Assegure-se de que configfs esteja montado em /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao Incluir Inicializador: %(initiator)s em groupInitiatorGroup: " "%(initiatorgroup)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Incluir ao TargetGroup: %(targetgroup)s com IQN: %(iqn)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Erro ao anexar o volume %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erro Clonando Captura Instantânea: %(snapshot)s no Volume: %(lun)s de " "Conjunto: %(pool)s Projeto: %(project)s Projeto clone: %(clone_proj)s Código " "de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erro ao criar volume clonado: %(cloneName)s Código de retorno: %(rc)lu. " "Erro: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Criar Volume Clonado: Volume: %(cloneName)s Volume Volume: " "%(sourceName)s. Código de Retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Criar Grupo: %(groupName)s. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erro ao Criar Visualização de Mascaramento: %(groupName)s. Código de " "retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Criar Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Criar Volume: %(volumename)s. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Erro CreateGroupReplica: origem: %(source)s destino: %(target)s. Código de " "retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao Criar Inicializador: %(initiator)s em Alias: %(alias)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao Criar Projeto: %(project)s no Conjunto: %(pool)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao criar propriedade: %(property)s Tipo: %(type)s Descrição: " "%(description)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao Criar Compartilhamento: %(name)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Criar Captura Instantânea: %(snapshot)s no Volume: %(lun)s para o " "Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Criar Captura Instantânea: %(snapshot)s no Compartilhamento: " "%(share)s para Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Erro ao Criar Destino: %(alias)s Código de Retorno: %(ret.status)d Mensagem: " "%(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao Criar TargetGroup: %(targetgroup)s withIQN: %(iqn)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Erro ao Criar Volume: %(lun)s Tamanho: %(size)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao criar o novo código de retorno do volume composto: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao criar ação de replicação em: Conjunto: %(pool)s Projeto: %(proj)s " "volume: %(vol)s para destino: %(tgt)s e conjunto: %(tgt_pool)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "Erro ao criar o volume desvinculado em uma operação Estender." msgid "Error Creating unbound volume." msgstr "Erro ao Criar volume desvinculado." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Excluir Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Erro ao excluir grupo: %(storageGroupName)s. Código de retorno:%(rc)lu. " "Erro: %(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Erro ao Excluir Grupo de Iniciadores: %(initiatorGroupName)s. Código de " "retorno:%(rc)lu. Erro: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Excluir Captura Instantânea: %(snapshot)s no Compartilhamento: " "%(share)s para Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Excluir Captura Instantânea: %(snapshot)s no Volume: %(lun)s para " "Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Erro ao excluir volume: %(lun)s do Conjunto: %(pool)s, Projeto: %(project)s. " "Código de retorno: %(ret.status)d, Mensagem: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao excluir projeto: %(project)s no pool: %(pool)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Erro ao excluir ação de replicação: %(id)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao Estender Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao Obter Inicializadores: InitiatorGroup: %(initiatorgroup)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Erro ao Obter Estatísticas do Conjunto: Conjunto %(pool)s, Código de " "Retorno: %(status)d, Mensagem: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao obter estatísticas do projeto: Conjunto: %(pool)s Projeto: " "%(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Obter Compartilhamento: %(share)s no Conjunto: %(pool)s Projeto: " "%(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Obter Captura Instantânea: %(snapshot)s no Volume:%(lun)s para " "Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Erro ao Obter Destino: %(alias)s Código de retorno: %(ret.status)d Mensagem: " "%(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Obter Volume: %(lun)s em Conjunto: %(pool)s Projeto: %(project)s " "Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Erro ao Migrar volume de um conjunto para outro. Código de retorno: " "%(rc)lu. Erro: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erro ao Modificar visualização de mascaramento: %(groupName)s. Código de " "retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "Erro de propriedade do Conjunto: O conjunto %(pool)s não é de propriedade do " "%(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Definir instalações do operador programável: %(props)s no Volume: " "%(lun)s de Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao finalizar a sessão de migração. Código de retorno: %(rc)lu. Erro: " "%(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao Verificar Inicializador: %(iqn)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao Verificar o Conjunto: %(pool)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao Verificar Projeto: %(project)s no Conjunto: %(pool)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao Verificar o Serviço: %(service)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao Verificar Destino: %(alias)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erro ao verificar compartilhamento: %(share)s no Projeto: %(project)s e no " "Conjunto: %(pool)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Erro ao incluir o Volume: %(volumeName)s com o caminho de instância: " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Erro ao incluir inicializador para o grupo :%(groupName)s. Código de " "retorno :%(rc)lu. Erro: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "Erro ao incluir o volume ao volume composto. O erro é: %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "Erro ao anexar o volume %(volumename)s ao volume de base de destino." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Erro de associação de grupo de armazenamento : %(storageGroupName)s. Para " "Política FAST: %(fastPolicyName)s com descrição do erro: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "Erro ao conectar o volume %s. O limite de destino pode ser atingido!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Relacionamento de clone de quebra de erro: Nome da sincronização: " "%(syncName)s Código de retorno: %(rc)lu. Erro: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Erro ao se conectar ao cluster ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Erro ao conectar via ssh: %s" #, python-format msgid "Error creating volume: %s." msgstr "Erro ao criar volume: %s." msgid "Error deleting replay profile." msgstr "Erro ao excluir perfil de reprodução." #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Erro ao excluir o volume %(ssn)s: %(volume)s." #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Erro ao excluir o volume %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Erro durante a análise do avaliador: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erro ao editar compartilhamento: %(share)s no Conjunto: %(pool)s Código de " "retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Erro ao ativar iSER para NetworkPortal: assegure-se de que RDMA seja " "suportado na porta iSCSI %(port)d no IP %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "Erro encontrado durante limpeza de um anexo com falha: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Erro ao executar a API do CloudByte [%(cmd)s], Erro: %(err)s." msgid "Error executing EQL command" msgstr "Erro ao executar o comando EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Erro ao executar comando via ssh: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Erro ao estender o volume %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Erro ao estender volume:%(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Erro ao localizar %(name)s." #, python-format msgid "Error finding %s." msgstr "Erro ao localizar %s." #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao obter ReplicationSettingData. Código de retorno: %(rc)lu. Erro: " "%(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erro ao obter detalhes da versão do dispositivo. Código de retorno: %(ret." "status)d Mensagem: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "Erro ao obter o ID do domínio do nome %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "Erro ao obter ID do domínio do nome %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Erro ao obter grupos do inicializador." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Erro ao obter ID do conjunto do nome %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Erro ao obter o ID do conjunto do nome %(pool_name)s: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erro ao obter ação de replicação: %(id)s. Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erro ao obter detalhes da origem de replicação. Código de retorno: %(ret." "status)d Mensagem: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erro ao obter detalhes do destino de replicação. Código de retorno: %(ret." "status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao obter a versão: svc: %(svc)s. Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Erro na Operação [%(operation)s] para o volume [%(cb_volume)s] no " "armazenamento CloudByte: [%(cb_error)s], código de erro: [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Erro na resposta da API SolidFire: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "Erro na criação de espaço para %(space)s de tamanho %(size)d GB" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "Erro em space-extend para o volume %(space)s com %(size)d GB adicional" #, python-format msgid "Error managing volume: %s." msgstr "Erro ao gerenciar o volume: %s." #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Erro ao mapear o volume %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erro ao modificar a sincronização de réplica: %(sv)s operação: " "%(operation)s. Código de retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erro ao modificar o Serviço: %(service)s Código de retorno: %(ret.status)d " "Mensagem: %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao mover o volume: %(vol)s do projeto de origem: %(src)s para o projeto " "de destino: %(tgt)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." msgid "Error not a KeyError." msgstr "O erro não é um KeyError." msgid "Error not a TypeError." msgstr "O erro não é um TypeError" #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Ocorreu um erro ao criar cgsnapshot %s." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Ocorreu um erro ao excluir cgsnapshot %s." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "Ocorreu um erro ao atualizar o grupo de consistências %s." #, python-format msgid "Error parsing config file: %s" msgstr "Erro ao analisar o arquivo de configuração: %s" msgid "Error promoting secondary volume to primary" msgstr "Erro ao promover o volume secundário para primário" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Erro ao remover o volume %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Erro ao renomear o volume %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Erro de resposta: %s" msgid "Error retrieving volume size" msgstr "Erro ao recuperar o tamanho do volume" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao enviar atualização de replicação para o ID de ação: %(id)s. Código " "de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Erro ao enviar atualização de replicação. Erro retornado: %(err)s. Ação: " "%(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao configurar herança de replicação para %(set)s para o volume: %(vol)s " "projeto %(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erro ao separar o pacote: %(package)s da origem: %(src)s Código de retorno: " "%(ret.status)d Mensagem: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "Erro ao desvincular o volume %(vol)s do conjunto. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Erro ao verificar tamanho do clone no clone do Volume: %(clone)s Tamanho: " "%(size)d onSnapshot: %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Erro ao autenticar-se com o comutador: %s" #, python-format msgid "Error while changing VF context %s." msgstr "Erro ao alterar o contexto do VF %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Erro ao verificar a versão do firmware %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Erro ao verificar status da transação: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "Erro ao verificar se o VF está disponível para gerenciamento %s." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Erro ao conectar o comutador %(switch_id)s com o protocolo %(protocol)s. " "Erro: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Erro ao criar o token de autenticação: %s." #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "Erro ao criar a captura instantânea [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "Erro ao criar o volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Erro ao excluir a captura instantânea [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "Erro ao excluir o volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "Erro ao estender o volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "Erro ao obter detalhes do %(op)s, código retornado: %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "Erro ao obter dados via ssh: (comando=%(cmd)s erro=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Erro ao obter informações do disco [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Erro ao obter o valor de nvp: %s." #, python-format msgid "Error while getting session information %s." msgstr "Erro ao obter informações da sessão %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Erro ao analisar os dados: %s" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "Erro ao consultar a página %(url)s no comutador, razão %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Erro ao remover as novas zonas e cfgs na sequência de zonas. %(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Erro ao solicitar %(service)s da API." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "Erro ao executar CLI de zoneamento: (comando=%(cmd)s erro=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Erro ao atualizar as novas zonas e cfgs na sequência de zonas. Erro " "%(description)s." msgid "Error writing field to database" msgstr "Erro ao gravar campo no banco de dados" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Erro[%(stat)s - %(res)s] ao obter o ID do volume." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Error[%(stat)s - %(res)s] ao restaurar captura instantânea [%(snap_id)s] " "para o volume [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "Error[status] %(stat)s - [result] %(res)s] ao obter o ID do volume." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Excedido o número máximo de tentativas de planejamento %(max_attempts)d para " "o volume %(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Limite de capturas instantâneas por volume excedido" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "Exceção ao anexar metavolume ao volume de destino %(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Exceção durante a criação da réplica do elemento. Nome do clone: " "%(cloneName)s Nome da origem: %(sourceName)s Especificações extras: " "%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Exceção no _select_ds_for_volume: %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "Exceção ao formar a sequência de zonas: %s." #, python-format msgid "Exception: %s" msgstr "Exceção: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Esperado um uuid, mas recebido %(uuid)s." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Esperado exatamente um nó chamado \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Número inteiro esperado para node_count, svcinfo lsiogrp retornou: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "Nenhuma saída esperada do comando da CLI %(cmd)s, obtido %(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "vdisk único esperado retornado de lsvdisk ao filtrar no vdisk_UID. " "%(count)s foram retornados." #, python-format msgid "Expected volume size was %d" msgstr "Tamanho do volume esperado era %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Exportação de backup interrompida, esperava-se o status de backup " "%(expected_status)s mas obteve %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Exportação de registro interrompida, o serviço de backup atualmente " "configurado [%(configured_service)s] não é o serviço de backup que foi usado " "para criar esse backup [%(backup_service)s]." msgid "Extend volume error." msgstr "Erro ao estender volume." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Estender o volume é suportado para este driver apenas quando não existem " "capturas instantâneas." msgid "Extend volume not implemented" msgstr "Estender volume não implementado" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "Ativador VP de FAST não é instalado. Não é possível configurar a política de " "camadas para o volume" msgid "FAST is not supported on this array." msgstr "O FAST não é suportado nesta matriz." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC é o protocolo, mas wwpns não são fornecidos pelo OpenStack." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Falha ao remover designação de %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "Falha ao criar volume de cache %(volume)s. Erro: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "Falha ao incluir conexão para a malha=%(fabric)s: Erro:%(err)s" msgid "Failed cgsnapshot" msgstr "cgsnapshot falhou" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "Falha ao criar captura instantânea para o grupo %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "Falha ao criar captura instantânea para o volume %(volname)s: %(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "Falha ao obter o conjunto de zonas ativas a partir da malha %s." #, python-format msgid "Failed getting details for pool %s." msgstr "Falha ao obter detalhes para o conjunto %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "Falha ao remover conexão da malha=%(fabric)s: Erro:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Falha ao Estender Volume %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Falha ao efetuar login para o 3PAR (%(url)s) porque %(err)s" msgid "Failed to access active zoning configuration." msgstr "Falha ao acessar a configuração de zoneamento ativa." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Falha ao acessar o status do conjunto de zonas:%s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Falha ao adquirir um bloqueio de recurso. (serial: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, erro padrão: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "Falha ao incluir %(vol)s ao %(sg)s após %(retries)s tentativas." msgid "Failed to add the logical device." msgstr "Falha ao incluir o dispositivo lógico." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Falha ao incluir o volume %(volumeName)s no grupo de consistências " "%(cgName)s. Código de retorno: %(rc)lu. Erro: %(error)s." msgid "Failed to add zoning configuration." msgstr "Falha ao incluir a configuração de zoneamento." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Falha ao designar o IQN do inicializador iSCSI. (porta: %(port)s, motivo: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Falha ao associar qos_specs: %(specs_id)s com tipo %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Falha ao anexar destino iSCSI para o volume %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Falha ao fazer o backup dos metadados do volume - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Falha ao fazer backup de metadados de volume – Objeto de backup de metadados " "'backup.%s.meta' já existe" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Falha ao clonar volume da captura instantânea %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Falha ao conectar-se ao %(vendor_name)s, Matriz %(host)s: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Falha ao conectar-se com a API REST Dell" msgid "Failed to connect to array" msgstr "Falha ao conectar-se à matriz" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Falha ao conectar-se ao daemon sheep. Endereço: %(addr)s, porta: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Falha ao copiar imagem para o volume: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Falha ao copiar metadados para o volume: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Falha ao copiar o volume; dispositivo de destino indisponível." msgid "Failed to copy volume, source device unavailable." msgstr "Falha ao copiar o volume; dispositivo de origem indisponível." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "Falha ao criar CG %(cgName)s da captura instantânea %(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "Falha ao criar IG, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "Falha ao criar imagem SolidFire-Volume" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Falha ao criar Grupo de Volumes: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Falha ao criar um arquivo. (arquivo: %(file)s, ret: %(ret)s, erro padrão: " "%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "Falha ao criar uma captura instantânea temporária para o volume %s." msgid "Failed to create api volume flow." msgstr "Falha ao criar o fluxo de volume da API." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "Falha ao criar a captura instantânea cg %(id)s devido a %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "Falha ao criar grupo de consistências %(id)s devido a %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Falha ao criar grupo de consistências %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Falha ao criar o grupo de consistências %s porque o grupo de consistências " "VNX não pode aceitar LUNs compactados como membros." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Falha ao criar grupo de consistências: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "Falha ao criar grupo de consistências: %(cgid)s. Erro: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Falha ao criar grupo de consistências: %(consistencyGroupName)s Código de " "retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Falha ao criar ID(s) de hardware em %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "Falha ao criar o host: %(name)s. Verifique se ele existir na matriz." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Falha ao criar grupo de hosts: %(name)s. Verifique se ele existe na matriz." msgid "Failed to create iqn." msgstr "Falha ao criar iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Falha ao criar destino iscsi para o volume %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Falha ao criar fluxo existente de gerenciamento." msgid "Failed to create manage_existing flow." msgstr "Falha ao criar fluxo manage_existing." msgid "Failed to create map on mcs, no channel can map." msgstr "Falha ao criar mapa no mcs; nenhum canal pode ser mapeado." msgid "Failed to create map." msgstr "Falha ao criar mapa." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Falha ao criar metadados para o volume: %(reason)s" msgid "Failed to create partition." msgstr "Falha ao criar a partição." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "Falha ao criar qos_specs: %(name)s com especificações %(qos_specs)s." msgid "Failed to create replica." msgstr "Falha ao criar réplica." msgid "Failed to create scheduler manager volume flow" msgstr "Falha ao criar fluxo de volume de gerenciador de planejador" #, python-format msgid "Failed to create snapshot %s" msgstr "Falha ao criar a captura instantânea %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "Falha ao criar captura instantânea, nenhum ID de LUN é especificado" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Falha ao criar a captura instantânea para cg: %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Falha ao criar captura instantânea para o volume %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "Falha ao criar política de captura instantânea no volume %(vol)s: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "Falha ao criar área de recursos de captura instantânea no volume %(vol)s: " "%(res)s." msgid "Failed to create snapshot." msgstr "Falha ao criar captura instantânea." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Falha ao criar captura instantânea. As informações de volume do CloudByte " "não foram localizadas para O volume OpenStack [%s]." #, python-format msgid "Failed to create south bound connector for %s." msgstr "Falha ao criar conector de ligação south para %s." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "Falha ao criar grupo de armazenamentos %(storageGroupName)s." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Falha ao criar o conjunto thin, a mensagem de erro foi: %s" #, python-format msgid "Failed to create volume %s" msgstr "Falha ao criar o volume %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "Falha ao excluir SI para volume_id: %(volume_id)s porque ele tem par." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Falha ao excluir um dispositivo lógico. (LDEV: %(ldev)s, motivo: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "Falha ao excluir cgsnapshot %(id)s devido a %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "Falha ao excluir o grupo de consistências %(id)s devido a %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Falha ao excluir o grupo de consistências: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Falha ao excluir o grupo de consistências: %(consistencyGroupName)s Código " "de retorno: %(rc)lu. Erro: %(error)s." msgid "Failed to delete device." msgstr "Falha ao excluir dispositivo." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Falha ao excluir o conjunto de arquivos para o grupo de consistências " "%(cgname)s Erro: %(excmsg)s." msgid "Failed to delete iqn." msgstr "Falha ao excluir iqn." msgid "Failed to delete map." msgstr "Falha ao excluir mapa." msgid "Failed to delete partition." msgstr "Falha ao excluir a partição." msgid "Failed to delete replica." msgstr "Falha ao excluir a réplica." #, python-format msgid "Failed to delete snapshot %s" msgstr "Falha ao excluir a captura instantânea %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "Falha ao excluir a captura instantânea para cg: %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "Falha ao excluir captura instantânea para snapshot_id: %s porque ela tem par." msgid "Failed to delete snapshot." msgstr "Falha ao excluir captura instantânea." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Falha ao excluir o volume %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Falha ao excluir volume para volume_id: %(volume_id)s porque ele tem par." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Falha ao remover o destino de iSCSI para o volume %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Falha ao determinar a configuração da API blockbridge" msgid "Failed to disassociate qos specs." msgstr "Falha ao desassociar qos specs. " #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Falha ao desassociar qos_specs: %(specs_id)s com tipo %(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Falha ao assegurar a área de recursos de captura instantânea; não foi " "possível localizar o volume para o ID %s" msgid "Failed to establish SSC connection." msgstr "Falha ao estabelecer conexão SSC." msgid "Failed to establish connection with Coho cluster" msgstr "Falha ao estabelecer a conexão com o cluster Coho" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Falha ao executar a API do CloudByte [%(cmd)s]. Status de HTTP: %(status)s, " "Erro: %(error)s." msgid "Failed to execute common command." msgstr "Falha ao executar o comando comum." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Falha ao exportar para o volume: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "Falha ao estender o volume %(name)s, Mensagem. de erro: %(msg)s." msgid "Failed to find QoSnode" msgstr "Falha ao localizar o QoSnode" msgid "Failed to find Storage Center" msgstr "Falha ao localizar o Centro de Armazenamento" msgid "Failed to find a vdisk copy in the expected pool." msgstr "Falha ao localizar uma cópia do disco virtual no conjunto esperado." msgid "Failed to find account for volume." msgstr "Falha ao localizar a conta para o volume." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "Falha ao localizar conjunto de arquivos para o caminho %(path)s, saída do " "comando: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "Falha ao localizar a captura instantânea de grupo denominada: %s" #, python-format msgid "Failed to find host %s." msgstr "Falha ao localizar host %s." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Falha ao localizar o grupo de iniciadores iSCSI contendo %(initiator)s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "" "Falha ao localizar o conjunto de armazenamentos para o volume de origem %s." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Falha ao obter detalhes da conta do CloudByte para a conta [%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Falha ao obter detalhes do destino de LUN para o LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "Falha ao obter detalhes do destino do LUN %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Falha ao obter a lista de destinos de LUN para o LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Falha ao obter o ID da partição para o volume %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "Falha ao obter o ID de captura instantânea de RAID da captura instantânea " "%(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "Falha ao obter ID de captura instantânea de RAID da captura instantânea: " "%(snapshot_id)s." msgid "Failed to get SplitMirror." msgstr "Falha ao obter SplitMirror" #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Falha ao obter um recurso de armazenamento. O sistema tentará obter o " "recurso de armazenamento novamente. (recurse: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "Falha ao obter todas as associações de qos specs %s" msgid "Failed to get channel info." msgstr "Falha ao obter informações do canal." #, python-format msgid "Failed to get code level (%s)." msgstr "Falha ao obter nível de código (%s)." msgid "Failed to get device info." msgstr "Falha ao obter informações do dispositivo." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "Falha ao obter o domínio porque o CPG (%s) não existe na matriz." msgid "Failed to get image snapshots." msgstr "Falha ao obter capturas instantâneas da imagem" #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "Falha ao obter IP no Canal %(channel_id)s com o volume: %(volume_id)s." msgid "Failed to get iqn info." msgstr "Falha ao obter informações do iqn." msgid "Failed to get license info." msgstr "Falha ao obter informações de licença." msgid "Failed to get lv info." msgstr "Falha ao obter informações de lv." msgid "Failed to get map info." msgstr "Falha ao obter informações do mapa." msgid "Failed to get migration task." msgstr "Falha ao obter a tarefa de migração." msgid "Failed to get model update from clone" msgstr "Falha ao obter atualização de modelo a partir do clone" msgid "Failed to get name server info." msgstr "Falha ao obter informações do servidor de nomes." msgid "Failed to get network info." msgstr "Falha ao obter informações de rede." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Falha ao obter novo ID da parte no novo conjunto: %(pool_id)s." msgid "Failed to get partition info." msgstr "Falha ao obter informações de partição." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Falha ao obter ID do conjunto com o volume %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "Falha ao obter informações de cópia remota para o %(volume)s devido a " "%(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "Falha ao obter informações de cópia remota para o %(volume)s. Exceção " "%(err)s." msgid "Failed to get replica info." msgstr "Falha ao obter informações de réplica." msgid "Failed to get show fcns database info." msgstr "Falha ao obter/mostrar informações do banco de dados fcns." msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "Falha ao obter o tamanho do volume %(vol) existente. O Gerenciamento de " "Volume falhou." #, python-format msgid "Failed to get size of volume %s" msgstr "Falha ao obter o tamanho do volume %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Falha ao obter captura instantânea para o volume %s." msgid "Failed to get snapshot info." msgstr "Falha ao obter informações de captura instantânea." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Falha ao obter o IQN de destino para o LUN %s" msgid "Failed to get target LUN of SplitMirror." msgstr "Falha ao obter LUN de destino do SplitMirror." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Falha ao obter o portal de destino para o LUN %s" msgid "Failed to get targets" msgstr "Falha ao obter destinos" msgid "Failed to get wwn info." msgstr "Falha ao obter informações de wwn." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Falha ao obter, criar ou incluir o volume %(volumeName)s para visualização " "de mascaramento %(maskingViewName)s. A mensagem de erro recebida foi " "%(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Falha ao identificar backend do volume" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "Falha ao vincular o conjunto de arquivos para o compartilhamento %(cgname)s. " "Erro: %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Falha ao efetuar logon na Matriz %s (login inválido?)." #, python-format msgid "Failed to login for user %s." msgstr "Falha ao efetuar login para o usuário %s." msgid "Failed to login with all rest URLs." msgstr "Falha ao efetuar login com todas as URLs REST." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Falha ao realizar uma solicitação ao terminal do cluster Datera devido ao " "seguinte motivo: %s" msgid "Failed to manage api volume flow." msgstr "Falha ao gerenciar fluxo de volume da API." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Falha ao gerenciar %(type)s %(name)s existente, porque o tamanho relatado " "%(size)s não era um número de vírgula flutuante." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Falha ao gerenciar o volume existente %(name)s devido a um erro na obtenção " "do tamanho do volume." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Falha ao gerenciar o volume existente %(name)s porque a operação de " "renomeação falhou: Mensagem de erro: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Falha ao gerenciar volume existente %(name)s, porque o tamanho relatado " "%(size)s não era um número de vírgula flutuante." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " "escolhido não corresponde ao compartilhamento NFS passado na referência de " "volume." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " "escolhido não corresponde ao sistema de arquivos passado na referência de " "volume." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Falha ao gerenciar o volume existente porque o conjunto do tipo de volume " "escolhido não corresponde ao conjunto do host." #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "Falha ao gerenciar o volume existente devido a uma incompatibilidade de " "grupo de E/S. O grupo de E/S do volume a ser gerenciado é %(vdisk_iogrp)s, e " "o grupo de E/S do tipo escolhido é %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "Falha ao gerenciar o volume existente porque o conjunto do volume a ser " "gerenciado não corresponde ao conjunto de backend. O conjunto do volume a " "ser gerenciado é %(vdisk_pool)s, e o conjunto do backend é %(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "Falha ao gerenciar o volume existente porque o volume a ser gerenciado está " "compactado, e o tipo de volume escolhido não está compactado." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "Falha ao gerenciar o volume existente porque o volume a ser gerenciado não " "está compactado, e o tipo de volume escolhido está compactado." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "Falha ao gerenciar volume existente porque o volume a ser gerenciado não " "está em um grupo de E/S válido." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "Falha ao gerenciar o volume existente porque o volume a ser gerenciado é " "thick, e o tipo de volume escolhido é thin." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "Falha ao gerenciar o volume existente porque o volume a ser gerenciado é " "thin, e o tipo de volume escolhido é thick." #, python-format msgid "Failed to manage volume %s." msgstr "Falha ao gerenciar o volume %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Falha ao mapear um dispositivo lógico. (LDEV: %(ldev)s, LUN: %(lun)s, porta: " "%(port)s, ID: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Falha ao migrar o volume pela primeira vez." msgid "Failed to migrate volume for the second time." msgstr "Falha ao migrar o volume pela segunda vez." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "Falha ao mover mapeamento de LUN. Código de retorno: %s" #, python-format msgid "Failed to move volume %s." msgstr "Falha ao mover o volume %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Falha ao abrir um arquivo. (aquivo: %(file)s, ret: %(ret)s, erro padrão: " "%(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Falha ao analisar saída da CLI \n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Falha ao analisar a opção de configuração 'keystone_catalog_info', deve ser " "na forma ::" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Falha ao analisar a opção de configuração 'swift_catalog_info', deve ser na " "forma ::" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Falha ao realizar uma reclamação com página zero. (LDEV: %(ldev)s, motivo: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "Falha ao remover exportação para o volume %(volume)s: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "Falha ao remover destino iscsi para o volume %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Falha ao remover o volume %(volumeName)s do grupo de consistências " "%(cgName)s. Código de retorno: %(rc)lu. Erro: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "Falha ao remover o volume %(volumeName)s do SG padrão." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "Falha ao remover o volume %(volumeName)s do SG padrão: %(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "Falha ao remover: %(volumename)s. do grupo de armazenamento padrão para " "política FAST %(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Falha ao renomear volume lógico %(name)s, mensagem de erro foi: %(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Falha ao recuperar configuração de zoneamento ativo %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "Falha ao configurar a autenticação CHAP para o IQN de destino %(iqn)s. " "Detalhes: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Falha ao configurar QoS para o volume existente %(name)s. Mensagem de erro: " "%(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "Falha ao configurar atributo 'Incoming user' para SCST de destino." msgid "Failed to set partition." msgstr "Falha ao configurar partição." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Falha ao configurar permissões para o grupo de consistências %(cgname)s " "Erro: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Falha ao especificar um dispositivo lógico para o volume %(volume_id)s a ser " "removido do mapeamento." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Falha ao especificar um dispositivo lógico a ser excluído. (método: " "%(method)s, ID: %(id)s)" msgid "Failed to terminate migrate session." msgstr "Falha ao finalizar a sessão de migração." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "Falha ao desvincular o volume %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Falha ao desvincular o conjunto de arquivos para o grupo de consistências " "%(cgname)s. Erro: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Falha ao remover mapeamento de um dispositivo lógico. (LDEV: %(ldev)s, " "motivo: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Falha ao atualizar grupo de consistências: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Falha ao atualizar metadados para o volume: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Falha ao atualizar ou excluir a configuração de zoneamento" msgid "Failed to update or delete zoning configuration." msgstr "Falha ao atualizar ou excluir a configuração de zoneamento." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Falha ao atualizar qos_specs: %(specs_id)s com especificações %(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "Falha ao atualizar o uso de cota ao digitar novamente o volume." msgid "Failed to update snapshot." msgstr "Falha ao atualizar captura instantânea." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "Falha ao atualizar o modelo com o modelo fornecido do driver %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Falha ao atualizar os metadados do volume %(vol_id)s usando os metadados " "%(src_type)s %(src_id)s fornecidos" #, python-format msgid "Failure creating volume %s." msgstr "Falha ao criar o volume %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Falha ao obter informações de LUN para %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Falha em update_volume_key_value_pair:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Falha ao mover novo LUN clonado para %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Falha na preparação do LUN %s para tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "Erro fatal: Usuário não é permitido para consulta de volumes NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "O Fexvisor falhou ao incluir o volume %(id)s devido a %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor falhou ao associar o volume %(vol)s no grupo %(group)s devido a " "%(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor falhou ao remover o volume %(vol)s no grupo %(group)s devido a " "%(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Fexvisor falhou ao remover o volume %(id)s devido a %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Falha no Fibre Channel SAN Lookup: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Operação Fibre Channel Zone falhou: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Falha no controle de conexão Fibre Channel: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "O arquivo %(file_path)s não pôde ser localizado." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "O arquivo %(path)s tem arquivo de backup inválido %(bfile)s, interrompendo." #, python-format msgid "File already exists at %s." msgstr "O arquivo já existe em %s." #, python-format msgid "File already exists at: %s" msgstr "O arquivo já existe em: %s" msgid "Find host in hostgroup error." msgstr "Erro ao localizar host no grupo de hosts." msgid "Find host lun id error." msgstr "Erro ao localizar ID do LUN do host." msgid "Find lun group from mapping view error." msgstr "Erro ao localizar grupo de LUNs da visualização de mapeamento." msgid "Find lun number error." msgstr "Erro ao localizar número de LUN." msgid "Find mapping view error." msgstr "Erro ao localizar a visualização de mapeamento." msgid "Find portgroup error." msgstr "Erro ao localizar grupo de portas." msgid "Find portgroup from mapping view error." msgstr "Erro ao localizar grupo de portas da visualização de mapeamento." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "A política de cache de atualização requer a versão WSAPI " "'%(fcache_version)s' versão '%(version)s' está instalada." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Volume de designação do Flexvisor com falha: %(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Volume de designação do Flexvisor falhou:%(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "O Flexvisor não pôde localizar a captura instantânea do volume %(id)s no " "grupo %(vgid)s da captura instantânea %(vgsid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor cria volume com falha.:%(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "O Flexvisor falhou ao excluir o volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "O Flexvisor falhou ao incluir o volume %(id)s ao grupo %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor falhou ao designar o volume %(id)s devido a não poder consultar o " "status pelo id de evento." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor falhou ao designar volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "O Flexvisor falhou ao designar o volume %(volume)s iqn %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "O Flexvisor falhou ao clonar o volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "O Flexvisor falhou ao clonar o volume (falha ao obter evento) %(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "O Flexvisor falhou ao criar a captura instantânea para o volume %(id)s:" "%(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "O Flexvisor falhou ao criar a captura instantânea para o volume (falha ao " "obter evento) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "O Flexvisor falhou ao criar o volume %(id)s no grupo %(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor falhou ao criar volume %(volume)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor falhou ao criar volume (get event) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "O Flexvisor falhou ao criar volume da captura instantânea %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "O Flexvisor falhou ao criar volume da captura instantânea %(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "O Flexvisor falhou ao criar volume da captura instantânea (falha ao obter " "evento) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "O Flexvisor falhou ao excluir a captura instantânea %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "O Flexvisor falhou ao excluir a captura instantânea (falha ao obter " "evento)%(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "O Flexvisor falhou ao excluir o volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "O Flexvisor falhou ao estender o volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "O Flexvisor falhou ao estender o volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "O Flexvisor falhou ao estender o volume (falha ao obter evento) %(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" "O Flexvisor falhou ao obter informações do conjunto %(id)s: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "O Flexvisor falhou ao obter o id de captura instantânea do volume %(id)s do " "grupo %(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor falhou ao remover o volume %(id)s do grupo %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "O Flexvisor falhou ao efetuar spawn do volume de captura instantânea %(id)s:" "%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor falhou ao efetuar spawn do volume de captura instantânea (falha ao " "obter evento) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor falhou ao remover a designação do volume %(id)s:%(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor falhou ao remover designação do volume (obter evento) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" "O Flexvisor falhou ao remover a designação do volume: %(id)s:%(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "" "O Flexvisor não conseguiu localizar as informações do volume de origem " "%(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Falha na remoção de designação de volume pelo Flexvisor %(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "O volume do Flexvisor %(id)s falhou ao unir o grupo %(vgid)s." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "O volume %s não existe no dispositivo Nexenta Store." #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS não está em execução, estado: %s." msgid "Gateway VIP is not set" msgstr "Gateway VIP não está configurado" msgid "Get FC ports by port group error." msgstr "Erro ao obter portas FC por grupo de portas." msgid "Get FC ports from array error." msgstr "Erro ao obter portas FC da matriz." msgid "Get FC target wwpn error." msgstr "Erro ao obter wwpn de destino do FC." msgid "Get HyperMetroPair error." msgstr "Erro ao obter HyperMetroPair." msgid "Get LUN group by view error." msgstr "Erro ao obter grupo de LUN por visualização." msgid "Get LUNcopy information error." msgstr "Erro ao obter informações de LUNcopy." msgid "Get QoS id by lun id error." msgstr "Erro ao obter ID de QoS por ID do LUN." msgid "Get QoS information error." msgstr "Erro ao obter informações de QoS." msgid "Get QoS policy error." msgstr "Erro ao obter política de QoS." msgid "Get SplitMirror error." msgstr "Erro ao obter SplitMirror." msgid "Get active client failed." msgstr "Falha ao ativar o cliente" msgid "Get array info error." msgstr "Erro ao obter informações da matriz." msgid "Get cache by name error." msgstr "Erro ao obter cache por nome." msgid "Get connected free FC wwn error." msgstr "Erro ao obter wwn FC livre conectado." msgid "Get engines error." msgstr "Erro ao obter mecanismos." msgid "Get host initiators info failed." msgstr "Falha ao obter informações de inicializadores de host." msgid "Get hostgroup information error." msgstr "Erro ao obter informações do grupo de hosts." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Erro ao obter informações da porta iSCSI; verifique o IP de destino " "configurada no arquivo conf huawei." msgid "Get iSCSI port information error." msgstr "Erro ao obter informações da porta iSCSI." msgid "Get iSCSI target port error." msgstr "Erro ao obter porta de destino iSCSI." msgid "Get lun id by name error." msgstr "Erro ao obter ID de LUN pelo nome." msgid "Get lun migration task error." msgstr "Erro ao obter tarefa de migração de LUN." msgid "Get lungroup id by lun id error." msgstr "Erro ao obter ID do grupo de LUNs por ID do LUN." msgid "Get lungroup information error." msgstr "Erro ao obter informações do grupo de LUNs." msgid "Get migration task error." msgstr "Erro ao obter tarefa de migração." msgid "Get pair failed." msgstr "Erro ao obter par." msgid "Get partition by name error." msgstr "Erro ao obter partição por nome." msgid "Get partition by partition id error." msgstr "Erro ao obter partição por ID da partição." msgid "Get port group by view error." msgstr "Erro ao obter grupo de portas por visualização." msgid "Get port group error." msgstr "Erro ao obter grupo de portas." msgid "Get port groups by port error." msgstr "Erro ao obter grupos de porta por porta." msgid "Get ports by port group error." msgstr "Erro ao obter portas por grupo de portas." msgid "Get remote device info failed." msgstr "Falha ao obter informações do dispositivo remoto." msgid "Get remote devices error." msgstr "Erro ao obter dispositivos remotos." msgid "Get smartcache by cache id error." msgstr "Erro ao obter smartcache por ID de cache." msgid "Get snapshot error." msgstr "Erro ao obter captura instantânea." msgid "Get snapshot id error." msgstr "Erro ao obter ID de captura instantânea." msgid "Get target IP error." msgstr "Erro ao obter IP de destino." msgid "Get target LUN of SplitMirror error." msgstr "Erro ao obter LUN de destino do SplitMirror." msgid "Get views by port group error." msgstr "Erro ao obter visualizações por grupo de portas." msgid "Get volume by name error." msgstr "Erro ao obter volume por nome." msgid "Get volume error." msgstr "Erro ao obter volume." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "A visão de metadados não pode ser atualizada; existe a chave %(key)s para o " "ID do volume %(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "Metadados do Glance para o volume/captura instantânea %(id)s não pôde ser " "encontrado." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "O arquivo de configuração do Gluster em %(config)s não existe" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Falha da API Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Falha de conexão do Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Falha do oauth2 do Google Cloud Storage: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "Informações de caminho inválido obtido do DRBDmanage! (%s)" msgid "HBSD error occurs." msgstr "Erro HBSD ocorreu." msgid "HNAS has disconnected SSC" msgstr "O HNAS possui SSC desconectado" msgid "HPELeftHand url not found" msgstr "URL HPELeftHand não localizada" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "A verificação do certificado HTTPS foi solicitada, mas não pode ser ativada " "com a versão do módulo purestorage %(version)s. Faça upgrade para uma versão " "mais recente para ativar esse recurso." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "O tamanho de bloco hash foi alterado desde o último backup. O novo tamanho " "de bloco hash: %(new)s. Antigo tamanho de bloco hash: %(old)s. Execute um " "backup completo." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Não tem camada(s) %(tier_levels)s criada(s)." #, python-format msgid "Hint \"%s\" not supported." msgstr "Sugestão \"%s\" não suportada." msgid "Host" msgstr "Host" #, python-format msgid "Host %(host)s could not be found." msgstr "O host %(host)s não pôde ser localizado." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "O host %(host)s não corresponde ao conteúdo do certificado x509: CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "Host %s não possui inicializadores do FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "Host %s não possui inicializador do iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "O host '%s' não pôde ser localizado." #, python-format msgid "Host group with name %s not found" msgstr "Grupo de hosts com o nome %s não localizado" #, python-format msgid "Host group with ref %s not found" msgstr "Grupo de hosts com ref %s não localizado" msgid "Host is NOT Frozen." msgstr "O Host NÃO está Paralisado" msgid "Host is already Frozen." msgstr "O Host já está Paralisado" msgid "Host not found" msgstr "Host não localizado" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Host não localizado. Falha ao remover %(service)s no %(host)s." #, python-format msgid "Host replication_status must be %s to failover." msgstr "O replication_status do host deve ser %s para executar failover." #, python-format msgid "Host type %s not supported." msgstr "Tipo de host %s não suportado." #, python-format msgid "Host with ports %(ports)s not found." msgstr "Host com as portas %(ports)s não localizado." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro a Replicação não podem ser usados no mesmo volume_type." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "O grupo de E/S %(iogrp)d não é válido; os grupos de E/S disponíveis são " "%(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Endereço IP/nome do host da API Blockbridge." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Se a compactação estiver configurada como True, rsize também deverá ser " "configurado (não igual a -1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Se nofmtdisk for configurado para True, rsize também deverá ser configurado " "para -1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Valor ilegal '%(prot)s' especificado para flashsystem_connection_protocol: " "valor(es) válido(s) são %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Valor ilegal especificado para IOTYPE: 0, 1 ou 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Valor ilegal especificado para smarttier: configurado para 0, 1, 2 ou 3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Valor ilegal especificado para storwize_svc_vol_grainsize: configurado como " "32, 64, 128 ou 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Valor ilegal especificado para thin: não é possível configurar thin e thick " "ao mesmo tempo." #, python-format msgid "Image %(image_id)s could not be found." msgstr "A imagem %(image_id)s não pôde ser localizada." #, python-format msgid "Image %(image_id)s is not active." msgstr "A imagem %(image_id)s não está ativa." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "A imagem %(image_id)s é inaceitável: %(reason)s" msgid "Image location not present." msgstr "Local da imagem ausente." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "O tamanho virtual da imagem é %(image_size)d GB e não se ajusta a um volume " "de tamanho %(volume_size)dGB." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Erro ImageBusy ocorrido ao excluir volume rbd. Isso pode ter sido causado " "por uma conexão de um cliente que travou e, em caso afirmativo, pode ser " "resolvido tentando novamente a exclusão após 30 segundos." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Importação de registro falhou, não é possível localizar o serviço de backup " "para executar a importação. Solicitar serviço %(service)s" msgid "Incorrect request body format" msgstr "Formato do corpo da solicitação incorreta" msgid "Incorrect request body format." msgstr "Formato do corpo da solicitação incorreto." msgid "Incremental backups exist for this backup." msgstr "Os backups incrementais existem para esse backup." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Exceção da CLI Infortrend: %(err)s Parâmetro: %(param)s (Código de retorno: " "%(rc)s) (Saída: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Camada inicial: {}, a política: {} não é válida." msgid "Input type {} is not supported." msgstr "O tipo de entrada {} não é suportado." msgid "Input volumes or snapshots are invalid." msgstr "Os volumes ou capturas instantâneas de entrada são inválidos." msgid "Input volumes or source volumes are invalid." msgstr "Os volumes de entrada ou de origem são inválidos." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "A instância %(uuid)s não pôde ser localizada." msgid "Insufficient free space available to extend volume." msgstr "Espaço livre insuficiente disponível para o volume de extensão." msgid "Insufficient privileges" msgstr "Privilégios insuficientes" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" "O valor do intervalo (em segundos) entre novas tentativas de conexão com o " "cluster ceph." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" "Portas %(port)s de %(protocol)s inválidas especificadas para io_port_list." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Inválido Domínio 3PAR: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Valor ALUA inválido. O valor ALUA deve ser 1 ou 0." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "Argumentos fornecidos de Ceph inválidos para a operação rbd de backup" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "CgSnapshot inválido: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "ConsistencyGroup inválido: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "ConsistencyGroup inválido: O status do grupo de consistências deve estar " "disponível ou com erro, porém, o status atual é: em uso" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "ConsistencyGroup inválido: O status do grupo de consistências deve estar " "disponível, mas o status atual é: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "ConsistencyGroup inválido: Nenhum host para criar grupo de consistências" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Versão HPELeftHand da API inválida localizada: %(found)s. A versão " "%(minimum)s ou maior são necessárias para gerenciar/não gerenciar o suporte." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Formato de endereço IP inválido: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Especificação de QoS inválida detectada ao obter política de QoS para o " "volume %s" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de Replicação Inválido: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Tipo de autenticação VNX inválido: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Especificação de compartilhamento de armazenamento Virtuozzo inválido: %r. " "Deve ser: [MDS1[,MDS2],...:/][:PASSWORD]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "Versão XtremIO %(cur)s inválida, versão %(min)s ou posterior é necessária" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "Cotas alocadas inválidas definidas para as cotas de projeto a seguir: %s" msgid "Invalid argument" msgstr "Argumento inválido" msgid "Invalid argument - negative seek offset." msgstr "Argumento inválido – deslocamento de busca negativo." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Argumento inválido - whence=%s não suportado" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Argumento inválido – whence=%s não suportado." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Modo de anexamento inválido '%(mode)s' para o volume %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Chave de autenticação inválida: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Backup inválido: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "URL da API barbican inválida: a versão é necessária, por exemplo, " "'http[s]://|[:port]/'. A URL especificada é: %s" msgid "Invalid cgsnapshot" msgstr "cgsnapshot inválida" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Detalhes do usuário chap inválidos localizados no armazenamento do CloudByte." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "Resposta de inicialização de conexão inválida %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Resposta de inicialização de conexão inválida de volume %(name)s: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de conteúdo inválido %(content_type)s." msgid "Invalid credentials" msgstr "Credenciais inválidas" #, python-format msgid "Invalid directory: %s" msgstr "Diretório inválido: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Tipo de adaptador de disco inválido: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Disco inválido auxiliar: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Tipo de disco inválido: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Tipo de disco inválido: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Host inválido: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Versão de hpe3parclient inválida localizada (%(found)s). Versão %(minimum)s " "ou maior é necessária. Execute \"pip install --upgrade python-3parclient\" " "para fazer upgrade do hpe3parclient." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Versão de hpelefthandclient inválida localizada (%(found)s). Versão " "%(minimum)s ou maior é necessária. Execute 'pip install --upgrade python-" "lefthandclient' para fazer upgrade do hpelefthandclient." #, python-format msgid "Invalid image href %(image_href)s." msgstr "Imagem inválida href %(image_href)s." msgid "Invalid image identifier or unable to access requested image." msgstr "" "O identificador da imagem inválido ou incapaz de acessar a imagem solicitada." msgid "Invalid imageRef provided." msgstr "imageRef inválida fornecida." msgid "Invalid initiator value received" msgstr "Valor de iniciador inválido recebido" msgid "Invalid input" msgstr "Entrada inválida" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrada inválida recebida: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public inválido [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "Um tipo de LUN inválido %s foi configurado." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Tamanho de metadados inválido: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadados inválidos: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Base de ponto de montagem inválido: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Base de ponto de montagem inválida: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Novo nome do snapCPG inválido para nova digitação. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Número de porta inválido %(config)s para a porta RPC do Coho." #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Um tipo de pré-busca inválido '%s' está configurado. O PrefetchType deve ser " "em 0,1,2,3." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Inválidas qos specs: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "Solicitação inválida para anexar volume a um destino inválido" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Solicitação inválida para anexar o volume a um modo inválido. O modo de " "anexação deve ser 'rw' ou 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Expiração de reserva inválida %(expire)s." msgid "Invalid response header from RPC server" msgstr "Cabeçalho de resposta inválido a partir do servidor RPC" #, python-format msgid "Invalid secondary id %s." msgstr "ID secundário inválido %s" #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "secondary_backend_id inválido especificado. O ID de backend válido é %s." msgid "Invalid service catalog json." msgstr "Catálogo de serviço json inválido." msgid "Invalid sheepdog cluster status." msgstr "Status do cluster sheepdog inválido." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Snapshot inválido: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Status inválido: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "" "Conjunto de armazenamentos inválido %s solicitado. Digitar novamente com " "falha." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Conjunto de armazenamento inválido %s especificado." msgid "Invalid storage pool is configured." msgstr "Um conjunto de armazenamento inválido foi configurado." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "Modo de sincronização inválido especificado, o modo permitido é %s." msgid "Invalid transport type." msgstr "Tipo de transporte inválido." #, python-format msgid "Invalid update setting: '%s'" msgstr "Configuração de atualização inválida: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL inválida: deve estar no formato 'http[s]://|[:port]/" "', A URL especificada é: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Valor inválido '%s' para força." #, python-format msgid "Invalid value '%s' for force. " msgstr "Valor inválido '%s' para força. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "" "Valor inválido '%s' para valores is_public. Valores aceitos: True ou False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Valor inválido '%s' para skip_validation." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Valor inválido para 'inicializável': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Valor inválido para 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Valor inválido para 'somente leitura': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Valor inválido para 'scheduler_max_attempts'; deve ser >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "" "Valor inválido para a opção de configuração netapp_host_type do NetApp." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "" "Valor inválido para a opção de configuração netapp_lun_ostype do NetApp." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Valor inválido para a idade, %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Valor inválido: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "Tamanho de volume inválido fornecido para a solicitação de criação: %s (o " "tamanho do argumento deve ser um número inteiro (ou representação em " "sequência de um número inteiro) e maior que zero)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Tipo de volume inválido: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume inválido: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Volume inválido: Não é possível incluir o volume %(volume_id)s no grupo de " "consistências %(group_id)s porque o volume está em um estado inválido: " "%(status)s. Os estados válidos são: ('disponível', 'em uso')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Volume inválido: Não é possível incluir o volume %(volume_id)s no grupo de " "consistências %(group_id)s porque o tipo de volume %(volume_type)s não é " "suportado pelo grupo de consistências." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Volume inválido: Não é possível incluir o volume fake-volume-uuid no grupo " "de consistências %(group_id)s porque o volume não pode ser localizado." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Volume inválido: Não é possível remover o volume fake-volume-uuid do grupo " "de consistências %(group_id)s porque ele não está no grupo." #, python-format msgid "Invalid volume_type passed: %s." msgstr "volume_type inválido transmitido: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "volume_type inválido fornecido: %s (o tipo solicitado não é compatível; " "corresponda o volume de origem ou omita o argumento de tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "volume_type inválido fornecido: %s (o tipo solicitado não é compatível; " "recomendar omitir o argumento de tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "volume_type inválido fornecido: %s (o tipo solicitado deve ser suportado por " "este grupo de consistências)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Formato inválido de wwpns %(wwpns)s" msgid "Invoking web service failed." msgstr "A chamada do serviço da web falhou." msgid "Issue encountered waiting for job." msgstr "Emita espera encontrada para a tarefa." msgid "Issue encountered waiting for synchronization." msgstr "Emita encontrado aguardando a sincronização." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Falha ao emitir um failover porque a replicação não está configurada " "corretamente." msgid "Item not found" msgstr "Item não localizado" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "ID da tarefa não localizado na resposta [%s] do volume de criação do " "CloudByte." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "ID da tarefa não localizado na resposta de exclusão de volume [%s] do " "CloudByte." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Nomes de chaves só podem conter caracteres alfanuméricos, sublinhados, " "pontos, vírgulas e hifens." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "O Keystone versão 3 ou maior deve ser usado para obter o suporte de cota " "aninhado." #, python-format msgid "LU does not exist for volume: %s" msgstr "O LU não existe para o volume: %s" msgid "LUN export failed!" msgstr "Falha ao exportar LUN!" msgid "LUN id({}) is not valid." msgstr "O ID de LUN ({}) não é válido." msgid "LUN map overflow on every channel." msgstr "Estouro do mapa de LUN em todos os canais." #, python-format msgid "LUN not found with given ref %s." msgstr "LUN não localizado com ref %s dada." msgid "LUN number ({}) is not an integer." msgstr "O número de LUN ({}) não é um número inteiro." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "O número do LUN está fora do limite no ID de canal: %(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "O LUN com ref %(ref)s dada não satisfaz o tipo de volume. Certifique-se que " "o volume LUN com recurso ssc está presente no vserver %(vs)s." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Últimas %s entradas syslog do cinder:-" msgid "LeftHand cluster not found" msgstr "cluster LeftHand não localizado" msgid "License is unavailable." msgstr "A licença está indisponível." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Linha %(dis)d: %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Caminho do link já existe e não é um symlink" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Clonar vinculado do volume de origem não suportado no estado: %s." msgid "Lock acquisition failed." msgstr "A aquisição de bloqueio falhou." msgid "Logout session error." msgstr "Erro de sessão de logout." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Serviço de consulta não configurado. Opção de configuração para " "fc_san_lookup_service é necessária especificar uma implementação concreta do " "serviço de consulta." msgid "Lun migration error." msgstr "Erro de migração de lun." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "O MD5 do objeto: %(object_name)s antes: %(md5)s e depois: %(etag)s não é o " "mesmo." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Sequência de saída de fcns malformada: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Corpo da mensagem malformado: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Sequência de servidor de nomes mal formada: %s" msgid "Malformed request body" msgstr "Corpo da solicitação malformado" msgid "Malformed request body." msgstr "Corpo da solicitação malformado." msgid "Malformed request url" msgstr "URL da solicitação malformada" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Resposta malformada para o comando %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Atributo scheduler_hints malformado" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Sequência de demonstração do banco de dados fcns malformada: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Configuração de zona mal formada: (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Status da zona malformado: (switch=%(switch)s zone_config=%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Gerenciar o tamanho da obtenção existente requer 'id'." msgid "Manage existing snapshot not implemented." msgstr "" "O gerenciamento de captura instantânea existente não está implementado." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Falha ao gerenciar volume existente devido a uma referência de backend " "inválido %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Falha ao gerenciar volume existente devido a incompatibilidade de tipo de " "volume: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Gerenciar volume existente não implementado." msgid "Manage existing volume requires 'source-id'." msgstr "Gerenciar volume existente requer 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "O gerenciamento de volume não será suportado se FAST estiver ativado. " "Política FAST: %(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "Não é permitido gerenciar capturas instantâneas para volumes com failover " "executado. " msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "As informações do mapa são Nenhum porque a versão da matriz não suporta " "hypermetro." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "Preparar mapeamento de %(id)s falhou ao concluir dentro de theallotted " "%(to)d segundos atribuído. Finalizando." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "" "A visualização de mascaramento %(maskingViewName)s não foi excluída com " "sucesso" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Número máximo de backups permitidos (%(allowed)d) excedido" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" "Número máximo de capturas instantâneas permitido (%(allowed)d) excedido" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Número máximo de volumes permitido (%(allowed)d) excedido para a cota " "'%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "Só é possível especificar um de %s" msgid "Metadata backup already exists for this volume" msgstr "Backup de metadados já existe para esse volume" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "Objeto de backup de metadados '%s' já existe" msgid "Metadata item was not found" msgstr "O item de metadados não foi localizado" msgid "Metadata item was not found." msgstr "Item de metadados não foi localizado." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Chave da propriedade de metadados %s maior que 255 caracteres" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Valor da chave da propriedade de metadados %s maior que 255 caracteres" msgid "Metadata property key blank" msgstr "Chave da propriedade de metadados em branco" msgid "Metadata property key blank." msgstr "A chave da propriedade de metadados está em branco." msgid "Metadata property key greater than 255 characters." msgstr "A chave da propriedade de metadados tem mais de 255 caracteres." msgid "Metadata property value greater than 255 characters." msgstr "O valor da propriedade de metadados tem mais de 255 caracteres." msgid "Metadata restore failed due to incompatible version" msgstr "Restauração de metadados falhou devido à versão incompatível" msgid "Metadata restore failed due to incompatible version." msgstr "A restauração de metadados falhou devido à versão incompatível." #, python-format msgid "Migrate volume %(src)s failed." msgstr "Falha ao migrar o volume %(src)s." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "Falha ao migrar volume entre o volume de origem %(src)s e o volume de " "destino %(dst)s." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "A migração do LUN %s foi interrompida ou falhou." msgid "MirrorView/S enabler is not installed." msgstr "O ativador MirrorView/S não está instalado." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Módulo python 'purestorage' ausente, assegure-se de que a biblioteca esteja " "instalada e disponível." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" "Parâmetro de configuração de SAN Fibre Channel ausente - fc_fabric_names" msgid "Missing request body" msgstr "Corpo da solicitação ausente" msgid "Missing request body." msgstr "Corpo da solicitação ausente." #, python-format msgid "Missing required element '%s' in request body" msgstr "Elemento obrigatório '%s' ausente no corpo da solicitação" #, python-format msgid "Missing required element '%s' in request body." msgstr "Elemento obrigatório '%s' ausente no corpo da solicitação." msgid "Missing required element 'consistencygroup' in request body." msgstr "Elemento requerido ausente 'consistencygroup' no corpo da solicitação." msgid "Missing required element 'host' in request body." msgstr "Elemento necessário ausente 'host' no corpo da solicitação." msgid "Missing required element quota_class_set in request body." msgstr "Faltando elemento obrigatório quota_class_set no corpo da requisição." msgid "Missing required element snapshot in request body." msgstr "" "Captura instantânea de elemento requerido ausente no corpo da solicitação." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Diversos SerialNumbers localizados, quando somente um era esperado para esta " "operação. Mude o arquivo de configuração do EMC." #, python-format msgid "Multiple copies of volume %s found." msgstr "Várias cópias do volume %s localizadas." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Várias correspondências localizadas para '%s', use um ID para ser mais " "específico." msgid "Multiple profiles found." msgstr "Vários perfis localizados." msgid "Must implement a fallback schedule" msgstr "Deve implementar um planejamento de fallback" msgid "Must implement find_retype_host" msgstr "Deve implementar find_retype_host" msgid "Must implement host_passes_filters" msgstr "Deve implementar host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "Deve implementar schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "Deve implementar schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "Deve implementar schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "É necessário passar wwpn ou host para lsfabric." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Deve-se executar esse comando como um administrador em nuvem usando policy." "json do Keystone, que permite que o administrador em nuvem liste e obtenha " "qualquer projeto. " msgid "Must specify 'connector'" msgstr "Deve especificar 'conector'" msgid "Must specify 'connector'." msgstr "Deve especificar 'connector'." msgid "Must specify 'host'." msgstr "Deve especificar 'host'." msgid "Must specify 'new_volume'" msgstr "Deve especificar 'new_volume'" msgid "Must specify 'status'" msgstr "Deve especificar 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Deve especificar 'status', ' attach_status' ou 'migration_status' para " "atualização." msgid "Must specify a valid attach status" msgstr "Deve especificar um status de anexo válido" msgid "Must specify a valid migration status" msgstr "Deve especificar um status de migração válido" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Deve especificar uma pessoa válida %(valid)s, o valor '%(persona)s' é " "inválido." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Deve especificar um tipo de fornecimento válido %(valid)s, o valor " "'%(prov)s' é inválido." msgid "Must specify a valid status" msgstr "Deve especificar um status válido" msgid "Must specify an ExtensionManager class" msgstr "Deve especificar uma classe ExtensionManager" msgid "Must specify bootable in request." msgstr "Deve especificar inicializável na solicitação." msgid "Must specify protection domain name or protection domain id." msgstr "Deve especificar o nome ou o ID do domínio de proteção." msgid "Must specify readonly in request." msgstr "Deve especificar somente leitura na solicitação." msgid "Must specify snapshot source-name or source-id." msgstr "" "Deve-se especificar o source-name ou o source-id da captura instantânea." msgid "Must specify source-name or source-id." msgstr "Deve-se especificar o elemento source-name ou source-id." msgid "Must specify storage pool name or id." msgstr "Deve especificar um nome ou ID do conjunto de armazenamentos." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "Deve-se especificar conjuntos de armazenamentos. Opções: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Deve fornecer um valor positivo para a idade" msgid "Must supply a positive, non-zero value for age" msgstr "Deve fornecer um número positivo, diferente de zero para a idade" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "Configuração de NAS ‘%(name)s=%(value)s' inválida. Deve ser ‘auto', 'true’ " "ou 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "O arquivo de configuração do NFS em %(config)s não existe" #, python-format msgid "NFS file %s not discovered." msgstr "Arquivo NFS %s não descoberto." msgid "NFS file could not be discovered." msgstr "O arquivo NFS não pôde ser descoberto." msgid "NaElement name cannot be null." msgstr "O nome NaElement não pode ser nulo. " msgid "Name" msgstr "Nome" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Nome, descrição, add_volumes e remove_volumes não podem estar todos vazios " "no corpo da solicitação." msgid "Need non-zero volume size" msgstr "Necessário tamanho do volume diferente de zero" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Nem MSG_DENIED nem MSG_ACCEPTED: %r" msgid "NetApp Cinder Driver exception." msgstr "Exceção no driver NetApp Cinder." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "Novo tamanho a ser estendido deve ser maior que o tamanho atual. (atual: " "%(size)s, estendido: %(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "O novo tamanho deve ser maior que o tamanho real a partir do armazenamento " "de backend. realsize: %(oldsize)s, newsize: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "Novo tamanho do volume deve ser especificado como um número inteiro." msgid "New volume type must be specified." msgstr "Novo tipo de volume deve ser especificado." msgid "New volume type not specified in request_spec." msgstr "Tipo de volume novo não especificado em request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "Novo volume_type igual ao original: %s." msgid "Nimble Cinder Driver exception" msgstr "Exceção Nimble Cinder Driver" msgid "No FC initiator can be added to host." msgstr "Nenhum iniciador de FC pode ser incluído no host. " msgid "No FC port connected to fabric." msgstr "Nenhuma porta FC conectada à malha." msgid "No FCP targets found" msgstr "Nenhum destino do FCP localizado" msgid "No Port Group elements found in config file." msgstr "" "Nenhum elemento de Grupo de Portas localizado no arquivo de configuração." msgid "No VF ID is defined in the configuration file." msgstr "Nenhum ID de VF está definido no arquivo de configuração." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Nenhum portal iSCSI ativo com IPs de iSCSI fornecidos" #, python-format msgid "No available service named %s" msgstr "Nenhum serviço disponível denominado %s" #, python-format msgid "No backup with id %s" msgstr "Nenhum backup com o ID %s" msgid "No backups available to do an incremental backup." msgstr "Não há backups disponíveis para fazer um backup incremental." msgid "No big enough free disk" msgstr "Disco livre não é grande o suficiente" #, python-format msgid "No cgsnapshot with id %s" msgstr "Nenhuma cgsnapshot com o ID %s" msgid "No cinder entries in syslog!" msgstr "Nenhuma entrada do cinder no syslog!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Nenhum LUN clonado denominado %s foi localizado no arquivador" msgid "No config node found." msgstr "Nenhum nó de configuração localizado." #, python-format msgid "No consistency group with id %s" msgstr "Nenhum grupo de consistências com o ID %s" #, python-format msgid "No element by given name %s." msgstr "Nenhum elemento pelo nome fornecido %s." msgid "No errors in logfiles!" msgstr "Sem erros nos arquivos de log!" #, python-format msgid "No file found with %s as backing file." msgstr "Nenhum arquivo localizado com %s como arquivo auxiliar." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Não resta nenhum ID de LUN. O número máximo de volumes que pode ser anexado " "ao host (%s) foi excedido." msgid "No free disk" msgstr "Nenhum disco livre" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Nenhum portal de iscsi bom localizado na lista fornecida para %s." #, python-format msgid "No good iscsi portals found for %s." msgstr "Nenhum portal de iscsi bom localizado para %s." #, python-format msgid "No host to create consistency group %s." msgstr "Nenhum host para criar o grupo de consistências %s." msgid "No iSCSI-enabled ports on target array." msgstr "Nenhuma porta ativada para iSCSI na matriz de destino." msgid "No image_name was specified in request." msgstr "Nenhum image_name foi especificado na solicitação." msgid "No initiator connected to fabric." msgstr "Nenhum iniciador conectado à malha." #, python-format msgid "No initiator group found for initiator %s" msgstr "Nenhum grupo de inicializador localizado para o inicializador %s" msgid "No initiators found, cannot proceed" msgstr "Nenhum inicializador localizado, não é possível continuar" #, python-format msgid "No interface found on cluster for ip %s" msgstr "Nenhuma interface localizada no cluster para o IP %s" msgid "No ip address found." msgstr "Nenhum endereço IP localizado." msgid "No iscsi auth groups were found in CloudByte." msgstr "Nenhum grupo de autenticação iscsi foi localizado no CloudByte." msgid "No iscsi initiators were found in CloudByte." msgstr "Nenhum inicializador iscsi foi localizado no CloudByte." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Nenhum serviço de iscsi localizado para o volume do CloudByte [%s]." msgid "No iscsi services found in CloudByte storage." msgstr "Nenhum serviço de iscsi localizado no armazenamento CloudByte." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "Nenhum arquivo-chave especificado e incapaz de carregar a chave a partir de " "%(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "Nenhum compartilhamento de Gluster montado foi localizado" msgid "No mounted NFS shares found" msgstr "Nenhum compartilhamento NFS montado foi localizado" msgid "No mounted SMBFS shares found." msgstr "Nenhum compartilhamento SMBFS montado foi localizado." msgid "No mounted Virtuozzo Storage shares found" msgstr "" "Nenhum compartilhamento de armazenamento Virtuozzo montado foi localizado" msgid "No mounted shares found" msgstr "Nenhum compartilhamento montado foi localizado" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "Nenhum nó foi localizado no grupo de E/S %(gid)s para o volume %(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Nenhum conjunto está disponível para volumes de fornecimento. Assegure-se de " "que a opção de configuração netapp_pool_name_search_pattern esteja definida " "corretamente." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Nenhuma resposta foi recebida da API do usuário de autenticação iSCSI da " "lista de armazenamento do CloudByte chamada." msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "Nenhuma resposta foi recebida da chamada API de tsm da lista de " "armazenamento CloudByte." msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "Nenhuma resposta foi recebida da chamada api do sistema de arquivos da lista " "do CloudByte." msgid "No service VIP configured and no nexenta_client_address" msgstr "Nenhum VIP de serviço configurado e nenhum nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "Nenhum snap localizado com %s como arquivo auxiliar." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "" "Nenhuma imagem de captura instantânea localizada no grupo de capturas " "instantâneas %s." #, python-format msgid "No snapshots could be found on volume %s." msgstr "Nenhuma captura instantânea pôde ser localizada no volume %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Nenhuma captura instantânea de origem fornecida para criar o grupo de " "consistências %s." #, python-format msgid "No storage path found for export path %s" msgstr "" "Nenhum caminho do armazenamento localizado para o caminho de exportação %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Sem spec QoS %(specs_id)s." msgid "No suitable discovery ip found" msgstr "Nenhum IP de descoberta adequado foi localizado" #, python-format msgid "No support to restore backup version %s" msgstr "Não há suporte para restaurar a versão de backup %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Nenhum ID de destino localizado para o volume %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "Nenhum ID de LUN não utilizado está disponível no host; a anexação múltipla " "está ativada, o que requer que todos os IDs de LUN sejam exclusivos em todo " "o grupo de hosts." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Nenhum host válido localizado. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Nenhum host válido para o volume %(id)s com tipo %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "Nenhum vdisk com o UID especificado pela referência %s." #, python-format msgid "No views found for LUN: %s" msgstr "Nenhuma visualização localizada para a LUN: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "Nenhum volume no cluster com vserver %(vserver)s e caminho de junção " "%(junction)s " msgid "No volume service(s) started successfully, terminating." msgstr "Nenhum serviço de volume iniciado com êxito; finalizando." msgid "No volume was found at CloudByte storage." msgstr "Nenhum volume foi localizado no armazenamento CloudByte." msgid "No volume_type should be provided when creating test replica." msgstr "Nenhum volume_type deve ser fornecido ao criar a réplica de teste." msgid "No volumes found in CloudByte storage." msgstr "Nenhum volume localizado no armazenamento CloudByte." msgid "No weighed hosts available" msgstr "Nenhum host ponderado disponível" #, python-format msgid "Not a valid string: %s" msgstr "Não é uma sequência válida: %s" msgid "Not a valid value for NaElement." msgstr "Nenhum valor válido para NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "Não foi possível encontrar um datastore adequado para o volume: %s." msgid "Not an rbd snapshot" msgstr "Não uma captura instantânea de rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Não autorizado para a imagem %(image_id)s." msgid "Not authorized." msgstr "Não autorizado." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Não há espaço suficiente no backend (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "Não há espaço de armazenamento suficiente no compartilhamento do ZFS para " "executar essa operação." msgid "Not stored in rbd" msgstr "Não armazenado em rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "Nova retornou o status \"erro\" ao criar a captura instantânea." msgid "Null response received from CloudByte's list filesystem." msgstr "Resposta nula recebida do sistema de arquivos da lista de CloudByte." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" "Resposta nula recebida dos grupos de autenticação iscsi da lista do " "CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" "Resposta nula recebida de inicializadores de iscsi da lista do CloudByte." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "Resposta nula recebida do serviço de iscsi do volume da lista do CloudByte." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Resposta nula recebida ao criar volume [%s] no armazenamento CloudByte." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Resposta nula recebida ao excluir o volume [%s] no armazenamento do " "CloudByte." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Resposta nula recebida ao consultar a tarefa baseada em [%(operation)s] " "[%(job)s] no armazenamento CloudByte." msgid "Number of retries if connection to ceph cluster failed." msgstr "Número de novas tentativas se a conexão com o cluster ceph falhou." msgid "Object Count" msgstr "Contagem de Objetos" msgid "Object Version" msgstr "Versão do Objeto" msgid "Object is not a NetApp LUN." msgstr "O objeto não é um LUN de NetApp." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Em uma operação Estender, o erro ao incluir o volume para compor o volume " "%(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "Um dos serviços de volume cinder é muito antigo para aceitar tal " "solicitação. Você está executando volumes cincer Liberty-Mitaka combinados?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "Uma das entradas necessárias do host, porta ou esquema não foi localizada." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Apenas solicitações %(value)s %(verb)s podem ser feitas ao %(uri)s a cada " "%(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "Somente um limite pode ser configurado em uma especificação de QoS." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Somente usuários com escopo do token definido para pais imediatos ou " "projetos raiz têm permissão para ver suas cotas filhas." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "Somente volumes gerenciados pelo OpenStack podem ser não gerenciados." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "A operação falhou com o status=%(status)s. Dump completo: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Operação não suportada: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "A opção gpfs_images_dir não está configurada corretamente." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "A opção gpfs_images_share_mode não está configurada corretamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "A opção gpfs_mount_point_base não está configurada corretamente." msgid "Option map (cls._map) is not defined." msgstr "O mapa de opções (cls._map) não está definido." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "A origem de %(res)s %(prop)s deve ser um dos valores '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "Porta HTTPS de substituição para conectar-se ao servidor da API Blockbridge." #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "O nome da partição é Nenhum; configure smartpartition:partitionname na chave." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "A senha ou a chave privada SSH é requerida para autenticação: configure " "opção san_password ou san_private_key." msgid "Path to REST server's certificate must be specified." msgstr "O caminho para o certificado do servidor REST deve ser especificado." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Crie o conjunto %(pool_list)s antecipadamente!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Crie antecipadamente a camada %(tier_levels)s no conjunto %(pool)s!" msgid "Please re-run cinder-manage as root." msgstr "Reexecute o gerenciamento do cinder como raiz." msgid "Please specify a name for QoS specs." msgstr "Por favor, especifique o nome para as especificações QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "A política não permite que %(action)s sejam executadas." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Pool %(poolNameInStr)s não foi encontrado." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "O conjunto %s não existe no dispositivo Nexenta Store." #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Conjunto do volume ['host'] %(host)s não localizado." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "O conjunto do volume ['host'] falhou com: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "O conjunto não está disponível no campo do host de volume." msgid "Pool is not available in the volume host fields." msgstr "O conjunto não está disponível nos campos do host de volume." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Conjunto com o nome %(pool)s não foi localizado no domínio %(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "O conjunto com o nome %(pool_name)s não foi localizado no domínio " "%(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Conjunto: %(poolName)s. não está associado à camada de armazenamento para a " "política fast %(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName deve estar no arquivo %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "Conjuntos %s não existem" msgid "Pools name is not set." msgstr "O nome dos conjuntos não está configurado." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Status da cópia primária: %(status)s e sincronizada: %(sync)s." msgid "Project ID" msgstr "ID do Projeto" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "As Cotas de Projeto não estão configuradas corretamente para as cotas " "aninhadas: %(reason)s." msgid "Protection Group not ready." msgstr "Grupo de Proteção não pronto." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Protocolo %(storage_protocol)s não é suportado para a família de " "armazenamento %(storage_family)s." msgid "Provided backup record is missing an id" msgstr "Registro de backup fornecido tem um ID ausente" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Fornecido um status de captura instantânea %(provided)s não permitido para " "captura instantânea com status %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Informações do provedor no armazenamento w.r.t CloudByte não foram " "localizadas para o volume [%s] do OpenStack." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Falha no driver de Pure Storage do Cinder: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Especificações QoS %(specs_id)s já existem." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Especificações QoS %(specs_id)s ainda estão associadas com entidades." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "A configuração de QoS está errada. %s deve ser > 0." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "A política de QoS deve ser especificada para o IOTYPE e para outras " "qos_specs, política de QoS: %(qos_policy)s." #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "A política de QoS deve ser especificada para IOTYPE: 0, 1 ou 2, política de " "QoS: %(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "Conflito de upper_limit e lower_limit da política do QoS, política do QoS: " "%(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "Especificação QoS %(specs_id)s não tem spec com chave %(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Especificações de QoS não são suportadas nesta família de armazenamento e " "versão de ONTAP." msgid "Qos specs still in use." msgstr "Qos specs ainda em uso." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "Parâmetro de consulta por serviço está depreciado. Por favor, use um " "parâmetro binário no lugar." msgid "Query resource pool error." msgstr "Erro ao consultar conjunto de recursos." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" "O limite de cota %s deve ser igual ou maior que os recursos existentes." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "A classe da cota %(class_name)s não pôde ser localizada." msgid "Quota could not be found" msgstr "A cota não pôde ser localizada" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cota excedida para os recursos: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Quota excedida: codigo=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "A cota para o projeto %(project_id)s não pôde ser localizada." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Limite de cota inválido para o projeto '%(proj)s'para o recurso '%(res)s': O " "limite de %(limit)d é menor que o valor em uso de %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "A reserva da cota %(uuid)s não pôde ser localizada." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "O uso da cota para o projeto %(project_id)s não pôde ser localizado." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "Op de Dif de RBD falhou – (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "O IP do servidor REST deve ser especificado." msgid "REST server password must by specified." msgstr "A senha do servidor REST deve ser especificada." msgid "REST server username must by specified." msgstr "O nome do usuário do servidor REST deve ser especificado." msgid "RPC Version" msgstr "Versão do RPC" msgid "RPC server response is incomplete" msgstr "A resposta do servidor RPC está incompleta" msgid "Raid did not have MCS Channel." msgstr "O RAID não tinha o Canal MCS." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Atinja a limitação definida pela opção de configuração " "max_luns_per_storage_group. Operação para incluir %(vol)s no Grupo de " "Armazenamento %(sg)s será rejeitada." #, python-format msgid "Received error string: %s" msgstr "Sequência de erros recebida: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "A referência deve ser para uma captura instantânea não gerenciada." msgid "Reference must be for an unmanaged virtual volume." msgstr "A referência deve ser para um volume virtual não gerenciado." msgid "Reference must be the name of an unmanaged snapshot." msgstr "A referência deve o nome de uma captura instantânea não gerenciada." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" "A referência deve ser o nome do volume de um volume virtual não gerenciado." msgid "Reference must contain either source-id or source-name element." msgstr "A referência deve conter um elemento source-id ou source-name." msgid "Reference must contain either source-name or source-id element." msgstr "A referência deve conter um elemento source-id ou source-name." msgid "Reference must contain source-id or source-name element." msgstr "A referência deve conter um elemento source-id ou source-name." msgid "Reference must contain source-id or source-name key." msgstr "A referência deve conter a chave source-id ou source-name." msgid "Reference must contain source-id or source-name." msgstr "A referência deve conter source-id ou source-name." msgid "Reference must contain source-id." msgstr "A referência deve conter o source-id." msgid "Reference must contain source-name element." msgstr "A referência deve conter o elemento de nome de origem." msgid "Reference must contain source-name or source-id." msgstr "A referência deve conter source-name ou source-id." msgid "Reference must contain source-name." msgstr "A referência deve conter o nome de origem." msgid "Reference to volume to be managed must contain source-name." msgstr "A referência ao volume a ser gerenciado deve conter o source-name." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "A referência ao volume: %s a ser gerenciado deve conter o source-name." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Recusando migrar o ID do volume: %(id)s. Verifique sua configuração porque a " "origem e o destino são o mesmo Grupo de Volume: %(name)s." msgid "Remote pool cannot be found." msgstr "O conjunto remoto não pode ser localizado." msgid "Remove CHAP error." msgstr "Erro ao remover CHAP." msgid "Remove fc from host error." msgstr "Erro ao remover FC do host." msgid "Remove host from array error." msgstr "Erro ao remover host da matriz." msgid "Remove host from hostgroup error." msgstr "Erro ao remover host do grupo de hosts." msgid "Remove iscsi from host error." msgstr "Erro ao remover iscsi do host." msgid "Remove lun from QoS error." msgstr "Erro ao remover LUN do QoS." msgid "Remove lun from cache error." msgstr "Erro ao remover LUN do cache." msgid "Remove lun from partition error." msgstr "Erro ao remover LUN da partição." msgid "Remove port from port group error." msgstr "Erro ao remover porta no grupo de portas." msgid "Remove volume export failed." msgstr "Falha ao remover exportação de volume." msgid "Rename lun on array error." msgstr "Erro ao renomear LUN na matriz." msgid "Rename snapshot on array error." msgstr "Erro ao renomear a captura instantânea na matriz." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "A replicação de %(name)s para %(ssn)s falhou." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "" "Recurso de serviço de replicação não localizado em %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "O Serviço de replicação não foi localizado em %(storageSystemName)s." msgid "Replication is not enabled" msgstr "A replicação não está ativada" msgid "Replication is not enabled for volume" msgstr "A replicação não está ativada para o volume" msgid "Replication not allowed yet." msgstr "Replicação ainda não permitida." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "O status de replicação para o volume deve estar ativo ou ativo-parado, mas o " "status atual é: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "O status de replicação para o volume deve estar inativo, ativo-parado ou com " "erro, mas o status atual é: %s" msgid "Request body and URI mismatch" msgstr "Corpo da solicitação e incompatibilidade de URI" msgid "Request body contains too many items" msgstr "O corpo da solicitação contém excesso de itens" msgid "Request body contains too many items." msgstr "Corpo da requisição contém itens demais." msgid "Request body empty" msgstr "Corpo da solicitação vazio" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "A solicitação ao cluster Datera retornou o status inválido: %(status)s | " "%(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "O backup solicitado excede a cota permitida de gigabytes de Backup. " "Solicitados %(requested)sG, a cota é %(quota)sG e %(consumed)sG foi " "consumido." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "O volume solicitado ou a captura instantânea excede a cota %(name)s " "permitida. Solicitados %(requested)sG, a cota é %(quota)sG e %(consumed)sG " "foi consumido." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "O tamanho do volume solicitado %(size)d é maior que o limite máximo " "permitido %(limit)d." msgid "Required configuration not found" msgstr "Configuração necessária não localizada" #, python-format msgid "Required flag %s is not set" msgstr "A sinalização %s necessária não está configurada" msgid "Requires an NaServer instance." msgstr "Requer uma instância NaServer." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Reconfiguração do status do backup interrompida, o serviço de backup " "atualmente configurado [%(configured_service)s] não é o serviço de backup " "que foi usado para criar esse backup [%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Falha ao redimensionar clone %s." msgid "Resizing image file failed." msgstr "O redimensionamento do arquivo de imagem falhou." msgid "Resource could not be found." msgstr "O recurso não pôde ser localizado." msgid "Resource not ready." msgstr "O recurso não está pronto." #, python-format msgid "Response error - %s." msgstr "Erro de resposta - %s." msgid "Response error - The storage-system is offline." msgstr "Erro de resposta - O sistema de armazenamento está off-line." #, python-format msgid "Response error code - %s." msgstr "Código de erro de resposta – %s." msgid "RestURL is not configured." msgstr "RestURL não está configurado." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Restauração de backup interrompida, esperava-se o status de volume " "%(expected_status)s, mas obteve %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Restauração de backup interrompida, o serviço de backup atualmente " "configurado [%(configured_service)s] não é o serviço de backup que foi usado " "para criar esse backup [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Restauração de backup interrompida: esperava-se o status de backup " "%(expected_status)s mas obteve %(actual_status)s." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Recuperada uma quantia diferente de volumes SolidFire para as capturas " "instantâneas Cinder fornecidas. Recuperados: %(ret)s, Desejados: %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Recuperada uma quantia diferente de volumes SolidFire para os volumes Cinder " "fornecidos. Recuperados: %(ret)s, Desejados: %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Contagem de novas tentativas excedida para o comando: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Exceção do Retryable SolidFire encontrada" msgid "Retype cannot change encryption requirements." msgstr "Digitar novamente não pode alterar os requisitos de criptografia." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Digitar novamente não pode alterar as especificações da qualidade de serviço " "para o volume em uso: %s." msgid "Retype requires migration but is not allowed." msgstr "Digitar novamente requer migração mas não é permitido." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Retrocesso para Volume: %(volumeName)s falhou. Entre em contato com o " "administrador do sistema para retornar manualmente o volume para o grupo o " "grupo de armazenamentos para a política fast %(fastPolicyName)s falhou." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Retroceder %(volumeName)s excluindo-o." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "Não é permitido executar o Cinder com uma versão do VMware vCenter inferior " "a %s." msgid "SAN product is not configured." msgstr "O produto SAN não está configurado." msgid "SAN protocol is not configured." msgstr "O protocolo SAN não está configurado." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "Configuração SMBFS 'smbfs_oversub_ratio' inválida. Deve ser > 0: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "Configuração SMBFS 'smbfs_used_ratio' inválida. Deve ser > 0 e <= 1,0: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "O arquivo de configuração SMBFS em %(config)s não existe." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Arquivo de configuração SMBFS não definido (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "Comando SSH falhou após '%(total_attempts)r' tentativas: '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "Comando SSH falhou com o erro: '%(err)s', Comando: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Injeção de comando SSH detectada: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "Conexão SSH falhou para %(fabric)s com erro: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "Certificado SSL expirado em %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Erro de SSL: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "O Filtro do Host do Planejador %(filter_name)s não pôde ser localizado." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" "O Ponderador %(weigher_name)s do Host do Planejador não pôde ser localizado." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Status da cópia secundária: %(status)s e sincronizada: %(sync)s, o progresso " "de sincronização é: %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "O ID secundário não pode ser igual à matriz primária, backend_id = " "%(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber deve estar no arquivo %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Serviço %(service)s no host %(host)s removido." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "O serviço %(service_id)s não pôde ser localizado no host %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "O serviço %(service_id)s não pôde ser localizado." #, python-format msgid "Service %s not found." msgstr "Serviço %s não localizado." msgid "Service is too old to fulfil this request." msgstr "O serviço é muito antigo para preencher essa solicitação." msgid "Service is unavailable at this time." msgstr "O serviço está indisponível neste momento." msgid "Service not found." msgstr "Serviço não localizado." msgid "Set pair secondary access error." msgstr "Erro de configuração de acesso secundário do par." msgid "Sets thin provisioning." msgstr "Configura thin provisioning." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "Configurar o grupo de política de LUN QoS não é suportado nesta família de " "armazenamento e versão de ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Configurar o grupo de política de arquivo qos não é suportado nesta família " "de armazenamento e versão de ONTAP." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Compartilhamento %s ignorado devido a um formato inválido. Deve ter o " "endereço com formato:/export. Verifique as configurações de nas_ip e " "nas_share_path." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "O compartilhamento em %(dir)s não pode ser gravado pelo serviço de volume " "Cinder. As operações de captura instantânea não serão suportadas." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Erro de E/S de sheepdog, o comando foi: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "As operações de exibição podem ser feitas somente para projetos na mesma " "hierarquia do projeto no qual os usuários estão com escopo definido." msgid "Size" msgstr "Tamanho" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "Tamanho do volume: %s não localizado, não é seguro excluir." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "O tamanho é de %(image_size)dGB e não se ajusta em um volume de tamanho de " "%(volume_size)dGB." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "Tamanho de imagem especificada %(image_size)sGB é maior que o tamanho do " "volume %(volume_size)sGB." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "A captura instantânea %(id)s foi solicitada para ser excluída enquanto " "aguardava para tornar-se disponível. Uma solicitação simultânea pode ter " "sido feita." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "A captura instantânea %(id)s foi localizada no estado %(state)s em vez de " "'excluída' durante exclusão em cascata." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "A captura instantânea %(snapshot_id)s não pôde ser localizada." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "A captura instantânea %(snapshot_id)s não tem metadados com a chave " "%(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "" "A captura instantânea %s não deve fazer parte de um grupo de consistências." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "A captura instantânea '%s' não existe na matriz." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "A captura instantânea não pode ser criada porque o volume %(vol_id)s não " "está disponível, status atual do volume: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "" "A captura instantânea não pode ser criada enquanto o volume está migrando." msgid "Snapshot of secondary replica is not allowed." msgstr "A captura instantânea da réplica secundária não é permitida." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Captura instantânea do volume não suportada no estado: %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "" "Captura instantânea res \"%s\" que não é implementada em qualquer lugar." msgid "Snapshot size must be multiple of 1 GB." msgstr "O tamanho da captura instantânea deve ser múltiplo de 1 GB." #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "Status de captura instantânea %(cur)s não permitido para " "update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "O status da captura instantânea deve ser \"disponível\" para clonar." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "A captura instantânea a ser feito o backup deve estar disponível, mas o " "status atual é \"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "A captura instantânea com ID %s não pôde ser localizada." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Captura instantânea='%(snap)s' não existe na imagem base='%(base)s' - " "interrompendo backup incremental" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "" "As capturas instantâneas não são suportadas para este formato de volume: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Erro de soquete: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Exceção no driver SolidFire Cinder" msgid "Sort direction array size exceeds sort key array size." msgstr "" "O tamanho da matriz de direção de classificação excede o tamanho da matriz " "de chave de classificação." msgid "Source CG is empty. No consistency group will be created." msgstr "CG de origem está vazio. Nenhum grupo de consistências será criado." msgid "Source host details not found." msgstr "Detalhes do host de origem não localizados." msgid "Source volume device ID is required." msgstr "ID do dispositivo de volume de origem é necessário." msgid "Source volume not mid-migration." msgstr "Volume de origem não de migração intermediária." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "Origem com IP/nome de host: %s não localizada no dispositivo de destino para " "migração de volume ativada por backend; continuando com a migração padrão." msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo retornou byarray é inválido" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "O host especificado a ser mapeado para o volume %(vol)s está no grupo de " "hosts não suportado com %(group)s." msgid "Specified logical volume does not exist." msgstr "O volume lógico especificado não existe." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "" "O grupo de capturas instantâneas especificado com o IDo %s não pôde ser " "localizado." msgid "Specify a password or private_key" msgstr "Especifique uma senha ou private_key" msgid "Specify san_password or san_private_key" msgstr "Especifique san_password ou san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Especifique um nome de tipo de volume , a descrição, is_public ou uma " "combinação deles." msgid "Split pair error." msgstr "Erro ao dividir par." msgid "Split replication failed." msgstr "Falha ao dividir replicação." msgid "Start LUNcopy error." msgstr "Erro ao iniciar LUNcopy." msgid "State" msgstr "Estado" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "O estado do nó está errado. O estado atual é %s." msgid "Status" msgstr "Status" msgid "Stop snapshot error." msgstr "Erro ao parar captura instantânea." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" "O Serviço de configuração de armazenamento não foi localizado em " "%(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "O Serviço mgmt de HardwareId de armazenamento não foi localizado em " "%(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "Perfil de armazenamento %s não localizado." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "O Serviço de realocação de armazenamento não foi localizado em " "%(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "Família de armazenamento %s não é suportada." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "" "O grupo de armazenamentos %(storageGroupName)s não foi excluído com sucesso" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Host de armazenamento %(svr)s não detectado; verifique o nome" msgid "Storage pool is not configured." msgstr "O conjunto de armazenamento não está configurado." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Perfil de armazenamento: %(storage_profile)s não encontrado." msgid "Storage resource could not be found." msgstr "Recurso de armazenamento não pôde ser encontrado." msgid "Storage system id not set." msgstr "ID do sistema de armazenamento não configurado." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Sistema de armazenamento não encontrado para pool %(poolNameInStr)s." msgid "Storage-assisted migration failed during manage volume." msgstr "" "A migração de armazenamento assistida falhou durante gerenciamento de volume." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s não foi encontrado." #, python-format msgid "String with params: %s" msgstr "Sequência com parâmetros: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "A soma de uso filho '%(sum)s' é maior que a cota livre de '%(free)s' para o " "projeto '%(proj)s' do recurso '%(res)s'. Diminua o limite ou o uso de um ou " "mais dos projetos a seguir: '%(child_ids)s'" msgid "Switch over pair error." msgstr "Erro ao executar switch over de par." msgid "Sync pair error." msgstr "Erro de sincronização de par." msgid "Synchronizing secondary volume to primary failed." msgstr "Falha ao sincronizar volume secundário com primário." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "" "Sistema %(id)s localizado com um status de senha inválida - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "Sistema %(id)s localizado com status inválido - %(status)s." msgid "System does not support compression." msgstr "O sistema não suporta compactação." msgid "System is busy, retry operation." msgstr "O sistema está ocupado, tente novamente a operação." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "O TSM [%(tsm)s] não foi localizado no armazenamento CloudByte para a conta " "[%(account)s]." msgid "Target volume type is still in use." msgstr "Tipo de volume de destino ainda está em uso." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Incompatibilidade da árvore de modelo; incluindo escravo %(slavetag)s para " "principal %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "ID do locatário: %s não existe." msgid "Terminate connection failed" msgstr "Finalização da conexão com falha" msgid "Terminate connection unable to connect to backend." msgstr "A finalização da conexão não pode conectar-se ao backend." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Falha ao finalizar a conexão de volume: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "A origem %(type)s %(id)s a ser replicada não foi localizada." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Os parâmetros 'sort_key' e 'sort_dir' foram descontinuados e não podem ser " "usados com o parâmetro 'sort’." msgid "The EQL array has closed the connection." msgstr "A matriz EQL fechou a conexão." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "O sistema de arquivos GPFS %(fs)s não está no nível da liberação necessário. " "O nível atual é %(cur)s, deve ser pelo menos %(min)s." msgid "The IP Address was not found." msgstr "o Endereço IP não foi localizado." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "A solicitação WebDAV falhou. Motivo: %(msg)s, Código de retorno/razão: " "%(code)s, Volume de Origem: %(src)s, Volume de Destino: %(dst)s, Método: " "%(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "O erro acima pode mostrar que o banco de dados não foi criado.\n" "Crie um banco de dados usando ‘cinder-manage db sync’ antes de executar esse " "comando." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "A matriz não suporta a configuração do conjunto de armazenamentos para o SLO " "%(slo)s e carga de trabalho %(workload)s. Verifique se se há SLOs e cargas " "de trabalho válidos na matriz." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "O backend no qual o volume é criado não possui a replicação ativada. " #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Comando %(cmd)s com falha. (ret: %(ret)s, saída padrão: %(out)s, erro " "padrão: %(err)s)" msgid "The copy should be primary or secondary" msgstr "A cópia deve ser primária ou secundária" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "A criação de um dispositivo lógico não pôde ser concluída. (LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "O método decorado deve aceitar um volume ou um objeto de captura instantânea" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "O dispositivo no caminho %(path)s está indisponível: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "O horário de encerramento (%(end)s) deve ser posterior ao horário de início " "(%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "O extra_spec: %s é inválido." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "O extraspec: %(extraspec)s não é válido." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "O volume com failover executado não pôde ser excluído: %s" #, python-format msgid "The following elements are required: %s" msgstr "Os seguintes elementos são necessários: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "As migrações a seguir possuem um downgrade, que não é permitido: \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "O grupo de hosts ou destino de iSCSI não pôde ser incluído." msgid "The host group or iSCSI target was not found." msgstr "O grupo de hosts ou destino iSCSI não foi localizado." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "O host não está pronto para efetuar failback. Ressincronize os volumes e " "continue a replicação nos backends 3PAR." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "O host não está pronto para efetuar failback. Ressincronize os volumes e " "continue a replicação nos backends LEftHand." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "O host não está pronto para efetuar failback. Ressincronize os volumes e " "continue a replicação nos backends Storwize." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "O usuário de CHAP iSCSI %(user)s não existe." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "O lun importado %(lun_id)s está no conjunto %(lun_pool)s que não é " "gerenciado por host %(host)s." msgid "The key cannot be None." msgstr "A chave não pode ser Nenhum." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "" "O dispositivo lógico para o %(type)s %(id)s especificado já foi excluído." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "O método %(method)s atingiu o tempo limite. (valor de tempo limite: " "%(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "O método update_migrated_volume não está implementado." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "A montagem %(mount_path)s não é um volume Quobyte USP válido. Erro: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "" "O parâmetro do backend de armazenamento. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "O backup principal deve estar disponível para backup incremental." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" "A captura instantânea fornecida '%s' não é uma captura instantânea do volume " "fornecido." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "A referência ao volume no backend deve ter o formato file_system/volume_name " "(volume_name não pode conter '/')" #, python-format msgid "The remote retention count must be %s or less." msgstr "A contagem de retenção remota deve ser %s ou menos." msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "O modo de replicação não foi configurado corretamente no tipo de volume " "extra_specs. Se replication:mode for periódico, replication:sync_period " "também deverá ser especificado e estar entre 300 e 31622400 segundos." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" "O período de sincronização de replicação deve ser pelo menos %s segundos." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "O tamanho solicitado : %(requestedSize)s não é o mesmo que o tamanho " "resultante: %(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "O recurso %(resource)s não foi localizado." msgid "The results are invalid." msgstr "Os resultados são inválidos." #, python-format msgid "The retention count must be %s or less." msgstr "A contagem de retenção deve ser %s ou menos." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "A captura instantânea não pode ser criada quando o volume está no modo de " "manutenção." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "" "O volume de origem %s não está no conjunto que é gerenciado pelo host atual." msgid "The source volume for this WebDAV operation not found." msgstr "O volume de origem para esta operação WebDAV não foi localizado." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "O tipo de volume de origem '%(src)s' é diferente do tipo volume de destino " "'%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "O tipo de volume de origem '%s' não está disponível." #, python-format msgid "The specified %(desc)s is busy." msgstr "O %(desc)s especificado está ocupado." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "A LUN especificada não pertence ao conjunto fornecido: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "O ldev %(ldev)s especificado não pôde ser gerenciado. O ldev não deve ser de " "mapeamento." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "O ldev %(ldev)s especificado não pôde ser gerenciado. O ldev não deve ser em " "par." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "O ldev %(ldev)s especificado não pôde ser gerenciado. O tamanho do ldev deve " "ser em múltiplos de gigabyte." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "O ldev %(ldev)s especificado não pôde ser gerenciado. O tipo de volume deve " "ser DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "A operação especificada não é suportada. O tamanho do volume deve ser igual " "ao %(type)s da origem. (volume: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "O vdisk especificado está mapeado para um host." msgid "The specified volume is mapped to a host." msgstr "O volume especificado está mapeado para um host." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "A senha da matriz de armazenamento %s está incorreta, atualize a senha " "configurada. " #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "O backend de armazenamento pode ser utilizado. (config_group: " "%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "O dispositivo de armazenamento não suporta %(prot)s. Configure o dispositivo " "para suportar %(prot)s ou alterne para um driver usando um protocolo " "diferente." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "A contagem dividida de metadados de %(memberCount)s é muito pequena para o " "volume: %(volumeName)s, com o tamanho %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "O tipo de metadados: %(metadata_type)s para volume/captura instantânea " "%(id)s é inválido." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "O volume %(volume_id)s não pôde ser estendido. O tipo de volume deve ser " "Normal." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "O volume %(volume_id)s não pôde ter o gerenciamento cancelado. O tipo de " "volume deve ser %(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "O volume %(volume_id)s é gerenciado com êxito. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "O volume %(volume_id)s teve o gerenciamento cancelado com êxito. (LDEV: " "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "O volume %(volume_id)s a ser mapeado não foi localizado." msgid "The volume cannot accept transfer in maintenance mode." msgstr "O volume não pode aceitar a transferência no modo de manutenção." msgid "The volume cannot be attached in maintenance mode." msgstr "O volume não pode ser conectado no modo de manutenção." msgid "The volume cannot be detached in maintenance mode." msgstr "O volume não pode ser removido no modo de manutenção." msgid "The volume cannot be updated during maintenance." msgstr "O volume não pode ser atualizado durante a manutenção." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "A conexão do volume não pode ser inicializada no modo de manutenção." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "O driver do volume requer o nome do inicializador iSCSI no conector." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "O volume está atualmente ocupado no 3PAR e não pode ser excluído neste " "momento. É possível tentar novamente mais tarde." msgid "The volume label is required as input." msgstr "O rótulo de volume é necessário como entrada." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Os metadados de volume não podem ser excluídos quando o volume está no modo " "de manutenção." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Os metadados de volume não podem ser atualizados quando o volume está no " "modo de manutenção." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "Não há recursos disponíveis para uso. (recurso: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Não há hosts ESX válidos." #, python-format msgid "There are no valid datastores attached to %s." msgstr "Não há armazenamentos de dados válidos conectados ao %s." msgid "There are no valid datastores." msgstr "Não há nenhum datastore válido." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Não há designação dos %(param)s. O armazenamento especificado é essencial " "para gerenciar o volume." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Não há designação do ldev. O ldev especificado é essencial para gerenciar o " "volume." msgid "There is no metadata in DB object." msgstr "Não há metadados no objeto do BD." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "Não há compartilhamento que possa hospedar %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "Não há nenhum compartilhamento que possa armazenar %(volume_size)sG." #, python-format msgid "There is no such action: %s" msgstr "Essa ação não existe: %s" msgid "There is no virtual disk device." msgstr "Não há nenhum dispositivo de disco virtual." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "Ocorreu um erro ao incluir o volume no grupo de cópias remotas: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Ocorreu um erro ao criar o cgsnapshot %s." #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "Ocorreu um erro ao criar o grupo de cópias remotas: %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Ocorreu um erro ao configurar o período de sincronização para o grupo de " "cópias remotas: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Ocorreu um erro ao configurar um grupo de cópias remotas nas matrizes 3PAR:" "('%s'). O volume não será reconhecido com um tipo de replicação." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Ocorreu um erro ao configurar um planejamento remoto nas matrizes " "LeftHand('%s'). O volume não será reconhecido com um tipo de replicação." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Ocorreu um erro ao iniciar a cópia remota: %s" #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Não há nenhum arquivo de configuração do Gluster configurado (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Não há nenhum arquivo de configuração do NFS configurado (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Não há volume Quobyte configurado (%s). Exemplo: quobyte: ///" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin provisioning não suportado nesta versão do LVM." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "Ativador Thin Provisioning não está instalado. Não é possível criar um " "volume thin" msgid "This driver does not support deleting in-use snapshots." msgstr "" "Este driver não oferece suporte à exclusão de capturas instantâneas em uso." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Este driver não oferece suporte a capturas instantâneas de volumes em uso." msgid "This request was rate-limited." msgstr "Essa solicitação estava limitada a taxa." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Essa plataforma do sistema (%s) não é suportada. Esse driver oferece suporte " "somente a plataformas Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "" "O Serviço da política de camada não foi localizado para " "%(storageSystemName)s." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Tempo limite atingido ao aguardar atualização de Nova para criação de " "captura instantânea %s." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Tempo limite atingido ao aguardar atualização de Nova para exclusão de " "captura instantânea %(id)s." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Valor de tempo limite (em segundos) usado ao conectar-se ao cluster ceph. Se " "o valor < 0, nenhum tempo limite foi configurado e o valor librados padrão " "foi usado." #, python-format msgid "Timeout while calling %s " msgstr "Tempo limite ao chamar %s." #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Tempo limite ao solicitar %(service)s da API." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "Tempo limite ao solicitar do backend %(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Transferência %(transfer_id)s não pôde ser encontrada." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Transferência %(transfer_id)s: ID do volume %(volume_id)s em estado " "inesperado %(status)s, esperava-se aguardando transferência" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Tentando importar metadados de backup do ID %(meta_id)s para o backup %(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "O ajuste da tarefa do volume parou antes de ele ter sido feito: volume_name=" "%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "O tipo %(type_id)s já está associado a um outro qos specs: %(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "" "A modificação de tipo de acesso não é aplicável ao tipo de volume público." msgid "Type cannot be converted into NaElement." msgstr "O tipo não pode ser convertido em NaElement." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUIDs %s estão na lista de inclusão e remoção de volume." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Não é possível acessar o backend do Storwise para o volume %s." msgid "Unable to access the backend storage via file handle." msgstr "" "Não é possível acessar o armazenamento de backend por meio da manipulação de " "arquivos." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" "Não é possível acessar o armazenamento de backend por meio do caminho " "%(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "Não é possível incluir o host Cinder nos apphosts para o espaço %(space)s" #, python-format msgid "Unable to complete failover of %s." msgstr "Não é possível concluir o failover de %s." msgid "Unable to connect or find connection to host" msgstr "Não é possível conectar-se ou localizar a conexão com o host" msgid "Unable to create Barbican Client without project_id." msgstr "Não é possível criar o Barbican Client sem project_id." #, python-format msgid "Unable to create consistency group %s" msgstr "Não foi possível criar o grupo de consistências %s" msgid "Unable to create lock. Coordination backend not started." msgstr "Não é possível criar bloqueio. Backend de coordenação não iniciado." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Não é possível criar ou obter o grupo de armazenamento padrão para a " "política FAST: %(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Não é possível criar clone de réplica para o volume %s." #, python-format msgid "Unable to create the relationship for %s." msgstr "Não é possível criar o relacionamento para %s." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "Não é possível criar o volume %(name)s a partir do %(snap)s." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "Não é possível criar o volume %(name)s a partir do %(vol)s." #, python-format msgid "Unable to create volume %s" msgstr "Não é possível criar o volume %s" msgid "Unable to create volume. Backend down." msgstr "Não é possível criar o volume. Backend inativo." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "" "Não é possível excluir a captura instantânea %s do Grupo de consistências" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" "Não é possível excluir a captura instantânea %(id)s, status: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "Não é possível excluir política de captura instantânea no volume %s." #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "Não é possível excluir o volume de destino para o volume %(vol)s. Exceção: " "%(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Não é possível separar o volume. O status do volume deve ser 'em uso' e " "attach_status deve ser 'anexado' para separar. " #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "Não é possível determinar a secondary_array a partir da matriz secundária " "fornecida: %(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Não é possível determinar o nome da captura instantânea na Pureza para " "captura instantânea %(id)s." msgid "Unable to determine system id." msgstr "Não é possível determinar o ID do sistema." msgid "Unable to determine system name." msgstr "Não é possível determinar o nome do sistema." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Não é possível fazer operações de gerenciamento de captura instantânea com a " "versão da API REST Purity %(api_version)s, requer %(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Não é possível executar replicação com a versão da API REST Purity " "%(api_version)s, requer uma das %(required_versions)s." msgid "Unable to enable replication and snapcopy at the same time." msgstr "" "Não é possível ativar a replicação e a cópia instantânea ao mesmo tempo." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Não é possível estabelecer a parceria com o cluster Storwize: %s" #, python-format msgid "Unable to extend volume %s" msgstr "Não é possível estender o volume %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "Não é possível executar failover do volume %(id)s para o backend secundário " "porque o relacionamento da replicação é incapaz de alternar: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "Não é possível executar failover para \"padrão', isso pode ser feito somente " "após um failover ter sido concluído." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" "Não é possível executar failover para o destino de replicação:%(reason)s." msgid "Unable to fetch connection information from backend." msgstr "Não foi possível buscar informações de conexão do backend." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "Não é possível buscar informações de conexão do backend: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "Não é possível localizar ref Pureza com name=%s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Não é possível localizar o Grupo de Volumes: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "Não é possível localizar o destino de failover, nenhum destino secundário " "configurado." msgid "Unable to find iSCSI mappings." msgstr "Não é possível localizar mapeamentos de iSCSI." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "Não foi possível encontrar ssh_hosts_key_file: %s" msgid "Unable to find system log file!" msgstr "Não é possível encontrar o arquivo de log do sistema!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "Não é possível localizar uma captura instantânea pg viável a ser utilizada " "para failover na matriz secundária selecionada: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "Não é possível localizar uma matriz secundária viável a partir dos destinos " "configurados: %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "Não é possível localizar o volume %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Não foi possível obter um dispositivo de bloco para o arquivo '%s'" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Não é possível obter informações de configuração necessárias para criar um " "volume: %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Não é possível obter registro correspondente para o pool." #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Não é possível obter informações sobre %(space)s; verifique se o cluster " "está em execução e conectado." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Não é possível obter lista de endereços IP neste host; verifique as " "permissões e a rede." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Não é possível obter lista de membros do domínio; verifique se o cluster " "está em execução." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Não é possível obter lista de espaços para fazer novo nome. Verifique se o " "cluster está em execução." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Não é possível obter estatísticas para backend_name: %s" msgid "Unable to get storage volume from job." msgstr "Não é possível obter volume de armazenamento a partir da tarefa. " #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Não é possível obter terminais de destino para hardwareId " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "Não é possível obter o nome da visualização de máscara." msgid "Unable to get the name of the portgroup." msgstr "Não é possível obter o nome do grupo de portas." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "Não é possível obter o relacionamento de replicação para o volume %s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Não é possível importar o volume %(deviceId)s para o cinder. É o volume de " "origem da sessão de replicação %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Não é possível importar o volume %(deviceId)s para o cinder. O volume " "externo não está no conjunto gerenciado pelo host cinder atual." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Não é possível importar o volume %(deviceId)s para o cinder. O volume está " "na visualização de mascaramento %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "Não é possível carregar CA a partir de %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Não é possível carregar o certificado a partir de %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Não é possível carregar chave a partir de %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" "Não foi possível localizar a conta %(account_name)s no dispositivo Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "" "Não é possível localizar um SVM que está gerenciando o endereço IP ‘%s'" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "Não é possível localizar perfis de reprodução especificados %s" #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Não é possível gerenciar o volume existente. Volume %(volume_ref)s já " "gerenciado." #, python-format msgid "Unable to manage volume %s" msgstr "Não é possível gerenciar o volume %s" msgid "Unable to map volume" msgstr "Não é possível mapear o volume" msgid "Unable to map volume." msgstr "Não é possível mapear o volume." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "Não foi possível avaliar a requisição XML. Por favor, fornece o XML no " "formato correto." msgid "Unable to parse attributes." msgstr "Não é possível analisar atributos." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "Não é possível promover réplica primária para o volume %s. Nenhuma cópia " "secundária disponível." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Não é possível reutilizar um host que não é gerenciado pelo Cinder com " "use_chap_auth=True," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Não é possível reutilizar o host com credenciais CHAP desconhecidas " "configuradas." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Não é possível renomear o volume %(existing)s para %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "" "Não é possível recuperar o grupo de capturas instantâneas com ID de %s." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "Não é possível redefinir %(specname)s, é esperado receber valores " "%(spectype)s atuais e solicitados. Valor recebido: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Não é possível digitar novamente: Uma cópia do volume %s existe. Digitar " "novamente excederia o limite de 2 cópias." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Não é possível digitar novamente: A ação atual precisa de cópia de volume, " "isso não é permitido quando a nova digitação for a replicação. Volume = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "Não é possível configurar a replicação de modo de espelho para %(vol)s. " "Exceção: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Não é possível capturar o Grupo de consistências %s" msgid "Unable to terminate volume connection from backend." msgstr "Não foi possível finalizar conexão do volume do backend." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Não é possível terminar conexão do volume: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Não é possível atualizar o grupo de consistência %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Não é possível atualizar o tipo devido ao status incorreto: %(vol_status)s " "no volume: %(vol_id)s. O status do volume deve ser disponível ou em uso." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "Não é possível verificar o grupo do inicializador: %(igGroupName)s na " "visualização de mascaramento %(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Parâmetros inaceitáveis." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Status de mapeamento inesperado %(status)s para o mapeamento %(id)s. " "Atributos: %(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Resposta da CLI inesperada: incompatibilidade de cabeçalho/linha. cabeçalho: " "%(header)s, linha: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Status de mapeamento inesperado %(status)s para mapeamento %(id)s. " "Atributos: %(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "Saída inesperada. Esperada [%(expected)s], mas recebida [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "Resposta inesperada da API do Nimble" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Resposta inesperada da API do Tegile IntelliFlash" msgid "Unexpected status code" msgstr "Código de status inesperado" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Código de status inesperado a partir do comutador %(switch_id)s com o " "protocolo %(protocol)s para a URL %(page)s. Erro: %(error)s" msgid "Unknown Gluster exception" msgstr "Exceção de Gluster desconhecida" msgid "Unknown NFS exception" msgstr "Exceção NFS desconhecida" msgid "Unknown RemoteFS exception" msgstr "Exceção RemoteFS desconhecida" msgid "Unknown SMBFS exception." msgstr "Exceção SMBFS desconhecida." msgid "Unknown Virtuozzo Storage exception" msgstr "Exceção de armazenamento Virtuozzo desconhecido" msgid "Unknown action" msgstr "Ação desconhecida" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Desconhecido se o volume: %s a ser gerenciado estiver sendo gerenciado pelo " "Cinder. Interrompendo gerenciamento de volume. Inclua a propriedade de " "esquema customizado 'cinder_managed' no volume e configure seu valor para " "False. Como alternativa, defina o valor da política de configuração do " "Cinder 'zfssa_manage_policy' para 'loose' para remover essa restrição." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Desconhecido se o volume: %s a ser gerenciado estiver sendo gerenciado pelo " "Cinder. Interrompendo gerenciamento de volume. Inclua a propriedade de " "esquema customizado 'cinder_managed' no volume e configure seu valor para " "False. Como alternativa, defina o valor da política de configuração do " "Cinder 'zfssa_manage_policy' para 'loose' para remover essa restrição." #, python-format msgid "Unknown operation %s." msgstr "Operação desconhecida %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Comando desconhecido ou não suportado %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Protocolo desconhecido: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos da cota desconhecidos %(unknown)s." msgid "Unknown service" msgstr "Serviço desconhecido" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "" "As opções de exclusão não gerenciadas e em cascata são mutuamente exclusivas." msgid "Unmanage volume not implemented." msgstr "Volume não gerenciado não implementado" msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "Não é permitido remover gerenciamento de capturas instantâneas para volumes " "'com failover executado'. " msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "Não é permitido remover gerenciamento de capturas instantâneas para volumes " "com failover executado. " #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Palavra-chave de QOS desconhecida: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Formato de auxiliar não reconhecido: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valor read_deleted não reconhecido '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "Desconfigurar opções gcs: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "Falha de iscsiadm. A exceção é %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Versão de ONTAP de Dados em Cluster Não Suportada." msgid "Unsupported Content-Type" msgstr "Tipo de Conteúdo Não Suportado" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Versão do Data ONTAP não suportada. Data ONTAP versão 7.3.1 e acima são " "suportados." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Versão de metadados de backup não suportada (%s)" msgid "Unsupported backup metadata version requested" msgstr "Requisitada versão de metadados de backups não-suportados" msgid "Unsupported backup verify driver" msgstr "Backup não suportado, verificar driver" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Firmware não suportado no comutador %s. Certifique-se de que o comutador " "esteja executando firmware versão v6.4 ou superior" #, python-format msgid "Unsupported volume format: %s " msgstr "Formato de volume não suportado: %s " msgid "Update QoS policy error." msgstr "Erro ao atualizar política de QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "As operações de atualização e exclusão de cota podem ser feitas somente por " "um administrador de pai imediato ou pelo administrador CLOUD." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "As operações de atualização e exclusão de cota podem ser feitas somente para " "projetos na mesma hierarquia do projeto no qual os usuários estão com escopo " "definido." msgid "Update list, doesn't include volume_id" msgstr "A lista de atualização não inclui volume _id" msgid "Updated At" msgstr "Atualizado em" msgid "Upload to glance of attached volume is not supported." msgstr "Upload para glance do volume conectado não é suportado." msgid "Use ALUA to associate initiator to host error." msgstr "Erro ao usar ALUA para associar o inicializador ao host." msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Erro ao usar o CHAP para associar o inicializador ao host. Verifique o nome " "do usuário e a senha do CHAP." msgid "User ID" msgstr "ID de Usuário" msgid "User does not have admin privileges" msgstr "O usuário não tem privilégios de administrador" msgid "User is not authorized to use key manager." msgstr "O usuário não está autorizado a usar o gerenciador de chaves." msgid "User not authorized to perform WebDAV operations." msgstr "O usuário não está autorizado a executar operações do WebDAV." msgid "UserName is not configured." msgstr "UserName não está configurado." msgid "UserPassword is not configured." msgstr "UserPassword não está configurado." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "Retrocesso V2 - Volume em outro grupo de armazenamentos além do grupo de " "armazenamentos padrão." msgid "V2 rollback, volume is not in any storage group." msgstr "Retrocesso V2, o volume não está em nenhum grupo de armazenamentos." msgid "V3 rollback" msgstr "Retrocesso V3" msgid "VF is not enabled." msgstr "O VF não está ativado. " #, python-format msgid "VV Set %s does not exist." msgstr "O Conjunto VV %s não existe." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Os consumidores válidos de QoS specs são: %s" #, python-format msgid "Valid control location are: %s" msgstr "O local de controle válido é: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Falha ao validar conexão do volume (erro: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "O valor \"%(value)s\" não é válido para a opção de configuração \"%(option)s" "\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "O valor %(param)s para %(param_string)s não é um booleano." msgid "Value required for 'scality_sofs_config'" msgstr "Valor necessário para 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "Vdisk %(name)s não envolvido no mapeamento %(src)s -> %(tgt)s." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "A versão %(req_ver)s não é suportada pela API. A versão mínima é " "%(min_ver)s e a máxima é %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "O VersionedObject %s não pode recuperar o objeto pelo ID." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "O VersionedObject %s não suporta atualização condicional." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "O volume virtual '%s' não existe na matriz." #, python-format msgid "Vol copy job for dest %s failed." msgstr "Tarefa de cópia do vol para dest %s falhou." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Volume %(deviceID)s não localizado." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Volume %(name)s não localizado na matriz. Não é possível determinar se há " "volumes mapeados." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "O volume %(name)s foi criado no VNX, mas no estado %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "O volume %(vol)s não pôde ser criado no conjunto %(pool)s." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "O volume %(vol1)s não corresponde ao snapshot.volume_id %(vol2)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "O status do volume %(vol_id)s deve ser disponível ou em uso, mas o status " "atual é: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "O status do volume %(vol_id)s deve estar disponível para estender, mas o " "status atual é: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "O status do volume %(vol_id)s deve estar disponível para atualizar a " "sinalização somente leitura, mas o status atual é: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "O status do volume %(vol_id)s deve ser disponível, mas o status atual é: " "%(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "O volume %(volume_id)s não pôde ser localizado." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "O volume %(volume_id)s não possui metadados de administração com chave " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "O volume %(volume_id)s não possui metadados com a chave %(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "O volume %(volume_id)s está mapeado atualmente para o grupo de hosts não " "suportado %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" "O volume %(volume_id)s não está mapeado atualmente para o host %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "O volume %(volume_id)s ainda está anexado, separe o volume primeiro." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Erro de replicação do volume %(volume_id)s: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "O volume %(volume_name)s está ocupado." #, python-format msgid "Volume %s could not be created from source volume." msgstr "O volume %s não pôde ser criado a partir do volume de origem." #, python-format msgid "Volume %s could not be created on shares." msgstr "O volume %s não pôde ser criado em compartilhamentos." #, python-format msgid "Volume %s could not be created." msgstr "O volume %s não pôde ser criado." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "O volume %s não existe no Nexenta SA." #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "O volume %s não existe no dispositivo Nexenta Store." #, python-format msgid "Volume %s does not exist on the array." msgstr "O volume %s não existe na matriz." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "O volume %s não possui provider_location especificado, ignorando." #, python-format msgid "Volume %s doesn't exist on array." msgstr "O volume %s não existe na matriz." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "O volume %s não existe no backend ZFSSA." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "O volume %s já é gerenciado pelo OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "O volume %s já faz parte de uma migração ativa." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "O volume %s não é do tipo replicado. Esse volume precisa ser de um tipo de " "volume com replication_enabled de especificação extra configurado para ' " "True'para suportar ações de replicação." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "O volume %s está on-line. Configure o volume para off-line gerenciar o uso " "do OpenStack." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "O Volume %s não deve ser de migração, estar conectado, pertencer a um grupo " "de consistências ou possuir capturas instantâneas." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "O volume %s não deve ser parte de um grupo de consistências." #, python-format msgid "Volume %s must not be replicated." msgstr "O volume %s não deve ser replicado." #, python-format msgid "Volume %s must not have snapshots." msgstr "O volume %s não deve ter capturas instantâneas." #, python-format msgid "Volume %s not found." msgstr "Volume %s não localizado." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Volume %s: Erro ao tentar estender o volume" #, python-format msgid "Volume (%s) already exists on array" msgstr "O volume (%s) já existe na matriz" #, python-format msgid "Volume (%s) already exists on array." msgstr "Volume (%s) já existe na matriz." #, python-format msgid "Volume Group %s does not exist" msgstr "O Grupo de Volume %s não existe" #, python-format msgid "Volume Type %(id)s already exists." msgstr "O Tipo de Volume %(id)s já existe." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Tipo de volume %(type_id)s não possui especificação extra com a chave %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "A exclusão do Tipo de Volume %(volume_type_id)s não é permitida com volumes " "presentes com o tipo." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "O Tipo de Volume %(volume_type_id)s não tem specs extras com a chave " "%(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "Identificador do tipo de volume não pode ser None." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "O volume [%(cb_vol)s] não foi localizado no armazenamento CloudByte " "correspondente ao volume OpenStack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "O volume [%s] não foi localizado no armazenamento CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "O anexo do volume não pôde ser localizado com o filtro: %(filter)s." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "A configuração de backend de volume é inválida: %(reason)s" msgid "Volume by this name already exists" msgstr "Já existe um volume com esse nome" msgid "Volume cannot be restored since it contains snapshots." msgstr "" "O volume não pode ser restaurado, uma vez que ele contém capturas " "instantâneas." msgid "Volume create failed while extracting volume ref." msgstr "A criação do volume falhou ao extrair a referência do volume." #, python-format msgid "Volume device file path %s does not exist." msgstr "Caminho do arquivo de dispositivo de volume %s não existe." #, python-format msgid "Volume device not found at %(device)s." msgstr "Dispositivo de volume não localizado em %(device)s." #, python-format msgid "Volume driver %s not initialized." msgstr "Driver do volume %s não inicializou." msgid "Volume driver not ready." msgstr "O driver de volume ainda não está pronto." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "O driver do volume reportou um erro: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "O volume possui uma captura instantânea temporária que não pode ser excluída " "no momento." msgid "Volume has children and cannot be deleted!" msgstr "O volume possui filhos e não pode ser excluído!" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "O volume no grupo de consistências %s está anexado. É necessário desanexá-lo " "primeiro." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "O volume no grupo de consistências ainda possui capturas instantâneas " "dependentes." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "O volume está conectado a um servidor. (%s)" msgid "Volume is in-use." msgstr "O volume está em uso." msgid "Volume is not available." msgstr "Volume não está disponível." msgid "Volume is not local to this node" msgstr "O volume não é local para este nó" msgid "Volume is not local to this node." msgstr "O volume não é local para este nó." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Backup de metadados do volume solicitado, mas este driver ainda não suporta " "este recurso." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Migração de volume falhou: %(reason)s" msgid "Volume must be available" msgstr "Volume deve estar disponível" msgid "Volume must be in the same availability zone as the snapshot" msgstr "" "O volume deve estar na mesma zona de disponibilidade que a captura " "instantânea" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "O volume deve estar na mesma zona de disponibilidade que o volume de origem" msgid "Volume must have a volume type" msgstr "O volume deve ter um tipo de volume" msgid "Volume must not be part of a consistency group." msgstr "O volume não deve ser parte de um grupo de consistências." msgid "Volume must not be replicated." msgstr "O volume não deve ser replicado." msgid "Volume must not have snapshots." msgstr "Volume não deve ter capturas instantâneas." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Volume não localizado para a instância %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "Volume não localizado no backend de armazenamento configurado." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Volume não localizado no backend de armazenamento configurado. Se o nome do " "seu volume contiver \"/\", renomeie-o e tente gerenciar novamente." msgid "Volume not found on configured storage pools." msgstr "" "O volume não foi localizado em conjuntos de armazenamentos configurados." msgid "Volume not found." msgstr "Volume não localizado." msgid "Volume not unique." msgstr "Volume não exclusivo" msgid "Volume not yet assigned to host." msgstr "Volume ainda não designado para o host." msgid "Volume reference must contain source-name element." msgstr "A referência de volume deve conter o elemento de nome de origem." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "Replicação de volume %(volume_id)s não pôde ser encontrada." #, python-format msgid "Volume service %s failed to start." msgstr "Falha ao iniciar serviço de volume %s." msgid "Volume should have agent-type set as None." msgstr "O volume deve ter agent-type configurado como Nenhum." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "Tamanho do volume %(volume_size)sGB não pode ser menor do que o tamanho da " "imagem de minDisk %(min_disk)sGB." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "" "O tamanho do volume '%(size)s' deve ser um número inteiro e maior que 0" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "O tamanho do volume ‘%(size)s‘ GB não pode ser menor que o tamanho do volume " "original %(source_size)sGB. Deve ser >= ao tamanho do volume original." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "O tamanho do volume ‘%(size)s' GB não pode ser menor que o tamanho da " "captura instantânea %(snap_size)sGB. Deve ser >= tamanho da captura " "instantânea original." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "O tamanho do volume aumentou desde o último backup. Execute um backup " "completo." msgid "Volume size must be a multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." msgid "Volume size must be multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "O status do volume para o volume deve estar disponível, mas o status atual " "é: %s" msgid "Volume status is in-use." msgstr "O status do volume é em uso." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "O status do volume deve ser \"disponível\" ou \"em uso\" para captura " "instantânea. (é %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "O status do volume deve ser \"disponível\" ou \"em uso\"." #, python-format msgid "Volume status must be %s to reserve." msgstr "O status do volume deve ser %s ara reservar." msgid "Volume status must be 'available'." msgstr "O status do volume deve ser 'disponível'." msgid "Volume to Initiator Group mapping already exists" msgstr "Já existe um mapeamento de grupos de volume para inicializador" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "O volume a ser feito o backup deve estar disponível ou em uso, mas o status " "atual é \"%s\"." msgid "Volume to be restored to must be available" msgstr "O volume a ser restaurado deve estar disponível" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "O tipo de volume %(volume_type_id)s não pôde ser localizado." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "O ID do tipo '%s' é inválido." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "O acesso do tipo de volume para combinações de %(volume_type_id)s / " "%(project_id)s já existe." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "Acesso do tipo de volume não localizado para a combinação " "%(volume_type_id)s / %(project_id)s ." #, python-format msgid "Volume type does not match for share %s." msgstr "O tipo do volume não corresponde para compartilhamento %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "Encriptação do tipo de volume para o tipo %(type_id)s já existe." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "Encriptação do tipo de volume para o tipo %(type_id)s não existe." msgid "Volume type name can not be empty." msgstr "Nome de tipo de volume não pode ser vazio." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "O tipo de volume com o nome %(volume_type_name)s não pôde ser localizado." #, python-format msgid "Volume with volume id %s does not exist." msgstr "O volume com o ID do volume %s não existe." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Volume: %(volumeName)s não é um volume concatenado. É possível apenas " "desempenhar a extensão no volume concatenado. Saindo..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "O volume: %(volumeName)s não foi incluído no grupo de armazenamentos " "%(sgGroupName)s. " #, python-format msgid "Volume: %s could not be found." msgstr "Volume: %s não pôde ser localizado." #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "O volume %s já está sendo gerenciado pelo Cinder." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "Volumes serão divididos em objetos desse tamanho (em megabytes)." msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" "Os volumes/contas excederam nas contas SolidFire primárias e secundárias." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "Configuração VzStorage 'vzstorage_used_ratio' inválida. Deve ser > 0 e <= " "1.0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "Arquivo de configuração VzStorage em %(config)s não existe." msgid "Wait replica complete timeout." msgstr "Tempo limite de espera da conclusão da réplica." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Falha ao aguardar sincronização. Status de execução: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "Aguardando que todos os nós se associem ao cluster. Assegure-se de que todos " "os daemons sheep estejam em execução." msgid "We should not do switch over on primary array." msgstr "Não é recomendável executar switch over na matriz primária." msgid "Wrong resource call syntax" msgstr "Sintaxe de chamada de recurso errada." msgid "X-IO Volume Driver exception!" msgstr "Exceção do Driver do Volume X-IO!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "O suporte XML foi descontinuado e será removido na liberação N." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO não configurado corretamente; nenhum portal iscsi localizado" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO não inicializado corretamente, nenhum cluster localizado" msgid "You must implement __call__" msgstr "Você deve implementar __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Deve-se instalar o hpe3parclient antes de usar drivers 3PAR drivers. Execute " "\"pip install python-3parclient\" para instalar o hpe3parclient." msgid "You must supply an array in your EMC configuration file." msgstr "Deve-se fornecer uma matriz no arquivo de configuração do EMC." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Seu tamanho original: %(originalVolumeSize)s GB é maior que: %(newSize)s GB. " "Somente Estender é suportado. Saindo..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Zona" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Política de Zoneamento: %s, não reconhecido" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data: Falha ao obter atributos para o vdisk %s." msgid "_create_host failed to return the host name." msgstr "_create_host falhou ao retornar o nome do host." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: Não é possível converter o nome do host. O nome do host não é " "unicode ou vazia." msgid "_create_host: No connector ports." msgstr "_create_host: Nenhuma porta de conector." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume, Serviço de Replicação não localizado." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, instância do volume de serviço: %(source_volume)s, " "instância do volume de destino: %(target_volume)s, Código de retorno: " "%(rc)lu, Erro: %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - não foi localizada uma mensagem de êxito na saída " "da CLI.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, id_code é Nenhum." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, Não é possível localizar o Serviço de Replicação" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, tipo de sessão de cópia é indefinido! Sessão de cópia: " "%(cpsession)s, tipo de cópia: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, copysession: %(cpsession)s, operação: %(operation)s, " "Código de retorno: %(rc)lu, Erro: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, volumename: %(volumename)s, Código de Retorno: %(rc)lu, " "Erro: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, volumename: %(volumename)s, Serviço de Configuração de " "Armazenamento não localizado." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, não é " "possível conectar-se ao ETERNUS." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: Não é suportado estender um volume com capturas " "instantâneas. " #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, conector: %(connector)s, Associadores: " "FUJITSU_AuthorizedTarget, não é possível conectar-se ao ETERNUS." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, não " "é possível conectar-se ao ETERNUS." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, Não " "é possível conectar-se ao ETERNUS." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, não " "é possível conectar-se ao ETERNUS." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names, connector: %(connector)s, iniciador não localizado." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, não é " "possível conectar-se ao ETERNUS." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, não é " "possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, dados são Nenhum! " "Edite o arquivo de configuração do driver e corrija." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, nome do arquivo: %(filename)s, IP: %(ip)s, porta: " "%(port)s, usuário: %(user)s, senha: ****, URL: %(url)s, FALHA!!." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn não " "localizado." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, não é possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "não é possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, não é " "possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: cabeçalhos e valores de atributos não correspondem.\n" " Cabeçalhos: %(header)s\n" " Valores: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector falhou ao retornar o nome do host para o conector." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, falha ao obter afinidade de host a partir do aglist/" "vol_instance, affinitygroup: %(ag)s, ReferenceNames, não é possível conectar-" "se ao ETERNUS." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, falha ao obter instância de afinidade de host, volmap: " "%(volmap)s, GetInstance, não é possível conectar-se ao ETERNUS." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associadores: FUJITSU_SAPAvailableForElement, não é " "possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, não é possível " "conectar-se ao ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, não é possível conectar-" "se ao ETERNUS." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances, não é possível conectar-se ao ETERNUS." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port, protcolo: %(protocol)s, target_port não localizado." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "" "_get_unmanaged_replay: Não é possível localizar a captura instantânea " "denominada %s" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: Não é possível localizar o ID do volume %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: Deve-se especificar source-name." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: Não foi possível obter informações de conexão FC " "para a conexão do volume do host. O host está configurado adequadamente para " "as conexões FC?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: Nenhum nó localizado no grupo de E/S %(gid)s para " "o volume %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, iniciator: %(initiator)s, destino: %(tgt)s, aglist: %(aglist)s, " "Serviço de Configuração de Armazenamento não localizado." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Serviço de Configuração do " "Controlador não lcoalizado." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Código de Retorno: %(rc)lu, Erro: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats: Não foi possível obter dados do conjunto de " "armazenamento." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s, o estado de sessão de " "cópia é BROKEN." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy falhou: Uma cópia de volume %s existe. Incluir outra cópia " "excederia o limite de 2 cópias." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "add_vdisk_copy iniciado sem uma cópia vdisk no conjunto esperado." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants deve ser um booleano, obtido '%s'." msgid "already created" msgstr "já criado" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "anexar captura instantânea do nó remoto" #, python-format msgid "attribute %s not lazy-loadable" msgstr "o atributo %s não realiza carregamento demorado" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s falhou ao criar link físico do dispositivo a partir de " "%(vpath)s para %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s falhou ao obter notificação de sucesso de backup do " "servidor.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s falhou ao executar o dsmc devido a argumentos inválidos " "em %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s falhou ao executar o dsmc em %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "backup: %(vol_id)s falhou. %(path)s não é um arquivo." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "backup: %(vol_id)s falhou. %(path)s é tipo de arquivo inesperado. Bloco ou " "arquivos regulares suportados, modo de arquivo real é %(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "backup: %(vol_id)s falhou. Não é possível obter caminho real para o volume " "em %(path)s." msgid "being attached by different mode" msgstr "sendo anexado por modo diferente" #, python-format msgid "call failed: %r" msgstr "A chamada falhou: %r" msgid "call failed: GARBAGE_ARGS" msgstr "A chamada falhou: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "A chamada falhou: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "A chamada falhou: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "A chamada falhou: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "não é possível localizar mapa de lun, ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "não é possível localizar o volume a ser estendido" msgid "can't handle both name and index in req" msgstr "não é possível lidar com o nome e o índice na solicitação" msgid "cannot understand JSON" msgstr "não é possível entender JSON" msgid "cannot understand XML" msgstr "não é possível entender XML" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "cgsnapshot designada" msgid "cgsnapshot changed" msgstr "cgsnapshot alterada" msgid "cgsnapshots assigned" msgstr "cgsnapshots designadas" msgid "cgsnapshots changed" msgstr "cgsnapshots alteradas" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: A senha ou a chave privada SSH é requerida para " "autenticação: configure a opção san_password ou san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: Não é possível determinar o ID do sistema." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: Não é possível determinar o nome do sistema." msgid "check_hypermetro_exist error." msgstr "Erro de check_hypermetro_exist." #, python-format msgid "clone depth exceeds limit of %s" msgstr "a espessura do clone excede o limite de %s" msgid "consistencygroup assigned" msgstr "consistencygroup designado" msgid "consistencygroup changed" msgstr "consistencygroup alterado" msgid "control_location must be defined" msgstr "control_location deve ser definido" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, o Volume de Origem não existe no ETERNUS." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, nome da instância do volume de destino: " "%(volume_instancename)s, Falha ao Obter Instância." msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: Os tamanhos de origem e destino diferem." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: O tamanho do volume de origem %(src_vol)s é " "%(src_size)dGB e não cabe no volume de destino %(tgt_vol)s de tamanho " "%(tgt_size)dGB." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src deve estar criando a partir de uma captura " "instantânea CG, ou de uma origem CG." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src suporta somente uma origem cgsnapshot ou " "uma origem de grupo de consistências. Diversas origens não podem ser usadas." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src suporta uma origem cgsnapshot ou uma origem " "de grupo de consistências. Diversas origens não podem ser usadas." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: O vdisk de origem %(src)s (%(src_id)s) não existe." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: Vdisk de origem %(src)s não existe." msgid "create_host: Host name is not unicode or string." msgstr "create_host: Nome do host não é unicode ou sequência." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: Nenhum inicializador ou wwpns fornecido." msgid "create_hypermetro_pair error." msgstr "Erro de create_hypermetro_pair." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "create_snapshot, eternus_pool: %(eternus_pool)s, conjunto não localizado." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, snapshotname: %(snapshotname)s, nome do volume de origem: " "%(volumename)s, vol_instance.path: %(vol_instance)s, nome do volume de " "destino: %(d_volumename)s, conjunto: %(pool)s, Código de retorno: %(rc)lu, " "Erro: %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, volumename: %(s_volumename)s, volume de origem não " "localizado no ETERNUS." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, volumename: %(volumename)s, Serviço de Replicação não " "localizado." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: O status do volume deve ser \"disponível \" ou \"em uso\" " "para captura instantânea. O status inválido é %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: obter volume de origem falhou." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, volume: %(volume)s, EnumerateInstances, não é possível " "conectar-se ao ETERNUS." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Serviço de Configuração de Armazenamento não localizado." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Código de retorno: %(rc)lu, Erro: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot, o Volume de Origem não existe no ETERNUS." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, nome da instância do volume de destino: " "%(volume_instancename)s, Falha ao Obter Instância." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" "create_volume_from_snapshot: A captura instantânea %(name)s não existe." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: O status da captura instantânea deve ser " "\"disponível\" para volume de criação. O status inválido é: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot: Os tamanhos de origem e destino diferem." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: O tamanho do volume é diferente do volume " "baseado em captura instantânea." msgid "deduplicated and auto tiering can't be both enabled." msgstr "não é possível ativar ambas as camadas, a deduplicada e a automática." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "exclusão: %(vol_id)s falhou ao executar o dsmc devido a argumentos inválidos " "com stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "exclusão: %(vol_id)s falhou ao executar o dsmc com stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "Erro de delete_hypermetro." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: ACL %s não localizada. Continuando." msgid "delete_replication error." msgstr "Erro de delete_replication." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "excluindo captura instantânea %(snapshot_name)s que possui volumes " "dependentes" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "excluindo o volume %(volume_name)s que possui captura instantânea" msgid "detach snapshot from remote node" msgstr "remover captura instantânea do nó remoto" msgid "do_setup: No configured nodes." msgstr "do_setup: Nenhum nó configurado." msgid "element is not a child" msgstr "o elemento não é um filho" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries deve ser maior ou igual a 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "erro ao gravar objeto no swift; o MD5 do objeto no swift %(etag)s não é o " "mesmo enviado ao swift %(md5)s" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "extend_volume, eternus_pool: %(eternus_pool)s, conjunto não localizado." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Serviço de Configuração de Armazenamento não localizado." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, volumename: %(volumename)s, Código de Retorno: %(rc)lu, Erro: " "%(errordesc)s, PoolType: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume, volumename: %(volumename)s, volume não localizado." msgid "failed to create new_volume on destination host" msgstr "falha ao criar new_volume no host de destino" msgid "fake" msgstr "falso" #, python-format msgid "file already exists at %s" msgstr "o arquivo já existe em %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno não é suportado pelo SheepdogIOWrapper" msgid "fileno() not supported by RBD()" msgstr "fileno() não suportado por RBD()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "O sistema de arquivos %s não existe no dispositivo Nexenta Store." msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled está configurado para False, não permite " "vários mapeamentos de host. CMMVC6071E O mapeamento de VDisk para host não " "foi criado porque o VDisk já está mapeado para um host." msgid "flush() not supported in this version of librbd" msgstr "flush() não suportado nesta versão de librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s retornado por: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s retornado por: %(backing_file)s" msgid "force delete" msgstr "forçar exclusão" msgid "get_hyper_domain_id error." msgstr "Erro de get_hyper_domain_id." msgid "get_hypermetro_by_id error." msgstr "Erro de get_hypermetro_by_id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: Falha ao obter o IP de destino para o inicializador " "%(ini)s, verifique o arquivo de configuração." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: Falha ao obter atributos para o volume %s" msgid "glance_metadata changed" msgstr "glance_metadata alterado" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode está configurada para copy_on_write, mas %(vol)s e " "%(img)s pertencem a sistemas de arquivos diferentes." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode está configurada para copy_on_write, mas %(vol)s e " "%(img)s pertencem a diferentes conjuntos." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s e hgst_user %(usr)s devem ser mapeados para usuários/" "grupos válidos em cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "hgst_net %(net)s especificado em cinder.conf não localizado no cluster" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy deve ser configurado para 0 (não HA) ou 1 (HA) em cinder." "conf." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode must deve ser um octal/int em cinder.conf" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "O servidor %(svr)s hgst_storage não é do formato :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers deve ser definido em cinder.conf" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "O serviço http foi desativado abruptamente ou pode ter sido colocado em " "estado de manutenção no meio dessa operação." msgid "id cannot be None" msgstr "id não pode ser Nenhum" #, python-format msgid "image %s not found" msgstr "imagem %s não localizada" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection, volume: %(volume)s, Volume não localizado." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection: Falha ao obter atributos para o volume %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection: Atributo de volume ausente para o volume %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: Nenhum nó localizado no grupo de E/S %(gid)s para o " "volume %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s não está definido." #, python-format msgid "invalid user '%s'" msgstr "usuário inválido '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "portal iscsi, %s, não localizado" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "iscsi_ip_address deve ser definido no arquivo de configuração ao usar " "protocolo 'iSCSI'." msgid "iscsiadm execution failed. " msgstr "A execução do iscsiadm falhou. " #, python-format msgid "key manager error: %(reason)s" msgstr "Erro do gerenciador de chaves: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key não definido" msgid "limit param must be an integer" msgstr "o parâmetro limit deve ser um número inteiro" msgid "limit param must be positive" msgstr "o parâmetro limit deve ser positivo" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "O manage_existing não pode gerenciar um volume conectado aos hosts. " "Desconecte esse volume dos hosts existentes antes de importar" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing requer uma chave de 'nome' para identificar um volume " "existente." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: Erro ao gerenciar a reprodução existente %(ss)s no " "volume %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "marcador [%s] não localizado" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp não tem aspas %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy deve ser 'on-demand' ou 'never', transmitido: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs falhou no volume %(vol)s, mensagem de erro foi: %(err)s." msgid "mock" msgstr "simulado" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs não está instalado" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "vários recursos com o nome %s localizado como drbdmanage" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "vários recursos com ID de captura instantânea %s localizado" msgid "name cannot be None" msgstr "o nome não pode ser Nenhum" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "" "naviseccli_path: Não foi possível localizar a ferramenta NAVISECCLI %(path)s." #, python-format msgid "no REPLY but %r" msgstr "Nenhuma REPLY, mas %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "nenhuma captura instantânea com ID %s localizada no drbdmanage" #, python-format msgid "not exactly one snapshot with id %s" msgstr "não exatamente uma captura instantânea com ID %s" #, python-format msgid "not exactly one volume with id %s" msgstr "não exatamente um volume com o ID %s" #, python-format msgid "obj missing quotes %s" msgstr "o objeto não tem aspas %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled não está desativado." msgid "progress must be an integer percentage" msgstr "progresso deve ser uma porcentagem de número inteiro" msgid "promote_replica not implemented." msgstr "promote_replica não implementado." msgid "provider must be defined" msgstr "provider deve ser definido" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s ou posterior é necessário para este driver de " "volume. Qemu-img versão atual: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img não está instalado e a imagem é do tipo %s. Apenas imagens RAW " "podem ser usadas se qemu-img não estiver instalado." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img não está instalado e o formato do disco não está especificado. " "Apenas imagens RAW podem ser usadas se qemu-img não estiver instalado." msgid "rados and rbd python libraries not found" msgstr "bibliotecas Python rados e rbd não localizadas" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted pode ser apenas um de 'no', 'yes' ou 'only', não %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "replication_device deve ser configurado no backend: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "replication_device com backend_id [%s] está ausente." #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover falhou. %s não localizado." msgid "replication_failover failed. Backend not configured for failover" msgstr "replication_failover falhou. Backend não configurado para failover" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restauração: %(vol_id)s falhou ao executar o dsmc devido a argumentos " "inválidos em %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restauração: %(vol_id)s falhou ao executar o dsmc em %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "restauração: %(vol_id)s falhou.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup interrompido, a lista de objetos real não corresponde à lista " "de objetos armazenada nos metadados." msgid "root element selecting a list" msgstr "elemento-raiz selecionando uma lista" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb é um membro ausente %s: você pode precisar de um python-rtslib-fb " "mais novo." msgid "san_ip is not set." msgstr "san_ip não está configurado." msgid "san_ip must be set" msgstr "san_ip deve ser configurado" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: Configuração de campo obrigatória. san_ip não foi configurado." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login e/ou san_password não está configurado para o driver Datera no " "cinder.conf. Configure estas informações e inicie o serviço cinder-volume " "novamente." msgid "serve() can only be called once" msgstr "serve() pode ser chamado apenas uma vez" msgid "service not found" msgstr "serviço não encontrado" msgid "snapshot does not exist" msgstr "a captura instantânea não existe" #, python-format msgid "snapshot id:%s not found" msgstr "ID da captura instantânea:%s não localizado" #, python-format msgid "snapshot-%s" msgstr "captura instantânea-%s" msgid "snapshots assigned" msgstr "capturas instantâneas designadas" msgid "snapshots changed" msgstr "capturas instantâneas alteradas" #, python-format msgid "source vol id:%s not found" msgstr "ID do vol. de origem:%s não localizado" #, python-format msgid "source volume id:%s is not replicated" msgstr "ID do volume de origem:%s não é replicado" msgid "source-name cannot be empty." msgstr "O source-name não pode estar vazio." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "O formato source-name deve ser: 'vmdk_path@vm_inventory_path'." #, python-format msgid "status must be %s and" msgstr "status deve ser %s e" msgid "status must be available" msgstr "o status deve estar disponível" msgid "stop_hypermetro error." msgstr "Erro de stop_hypermetro." msgid "subclasses must implement construct()!" msgstr "as subclasses devem implementar a construção()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo falhou, continuando como se nada tivesse acontecido" msgid "sync_hypermetro error." msgstr "Erro de sync_hypermetro." msgid "sync_replica not implemented." msgstr "sync_replica não implementado." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli não instalado e não pôde criar o diretório padrão " "(%(default_path)s): %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: Falha ao obter o nome do host do conector." msgid "timeout creating new_volume on destination host" msgstr "tempo limite ao criar new_volume no host de destino" msgid "too many body keys" msgstr "excesso de chaves de corpo" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: não montado" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s: o destino está ocupado" msgid "umount: : some other error" msgstr "umount: : algum outro erro" msgid "umount: : target is busy" msgstr "umount: : o destino está ocupado" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "" "unmanage_snapshot: Não é possível localizar a captura instantânea denominada " "%s" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: Não é possível localizar o ID do volume %s" #, python-format msgid "unrecognized argument %s" msgstr "argumento não reconhecido %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "algoritmo de compressão não suportado: %s" msgid "valid iqn needed for show_target" msgstr "iqn válido necessário para show_target" #, python-format msgid "vdisk %s is not defined." msgstr "o vdisk %s não está definido." msgid "vmemclient python library not found" msgstr "biblioteca python vmemclient não localizada" #, python-format msgid "volume %s not found in drbdmanage" msgstr "volume %s não localizado no drbdmanage" msgid "volume assigned" msgstr "volume designado" msgid "volume changed" msgstr "volume alterado" msgid "volume does not exist" msgstr "o volume não existe" msgid "volume is already attached" msgstr "o volume já está conectado" msgid "volume is not local to this node" msgstr "o volume não é local para este nó" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "O tamanho do volume %(volume_size)d é muito pequeno para restaurar o backup " "do tamanho %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "O tamanho do volume %d é inválido." msgid "volume_type cannot be None" msgstr "O volume_type não pode ser Nenhum" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "volume_type deve ser fornecido ao criar um volume em um grupo de " "consistências." msgid "volume_type_id cannot be None" msgstr "volume_type_id não pode ser Nenhum" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "volume_types deve ser fornecido para criar o grupo de consistência %(name)s." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "" "volume_types deve ser fornecido para criar o grupo de consistências %s." msgid "volumes assigned" msgstr "volumes designados" msgid "volumes changed" msgstr "volumes alterados" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s atingiu tempo limite." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "A propriedade zfssa_manage_policy precisa ser configurada para 'strict' ou " "'loose'. O valor atual é: %s." msgid "{} is not a valid option." msgstr "{} não é uma opção válida." cinder-8.0.0/cinder/locale/it/0000775000567000056710000000000012701406543017245 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/it/LC_MESSAGES/0000775000567000056710000000000012701406543021032 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/it/LC_MESSAGES/cinder-log-error.po0000664000567000056710000035270512701406250024553 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # Alessandra , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 18:05+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-14 12:54+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Failed to remove from new volume set %(new_vvs)s." msgstr "" "%(exception)s: Eccezione durante il ripristino di retype per il volume " "%(volume_name)s. Impossibile rimuovere dal nuovo insieme di volumi " "%(new_vvs)s." #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Original volume set/QOS settings may not have been fully restored." msgstr "" "%(exception)s: Eccezione durante il ripristino di retype per il volume " "%(volume_name)s. Le impostazioni configurate/QOS del volume originale " "potrebbero non essere state ripristinate correttamente." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" msgstr "" "%(fun)s: non riuscito con un output CLI imprevisto.\n" " Comando: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" #, python-format msgid "" "%(method)s %(url)s unexpected response status: %(response)s (expects: " "%(expects)s)." msgstr "" "%(method)s %(url)s, stato risposta imprevisto: %(response)s (previsto: " "%(expects)s)." #, python-format msgid "%(name)s: %(value)s" msgstr "%(name)s: %(value)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" msgstr "" "'%(value)s' non è un valore valido per la specifica supplementare '%(key)s'" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Una destinazione secondaria valida DEVE essere specificata per poter " "eseguire il failover." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting create_snapshot operation!" msgstr "" "L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " "tentativo di eseguire l'operazione create_snapshot." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting delete_volume operation!" msgstr "" "L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " "tentativo di eseguire l'operazione delete_volume." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting unmanage operation!" msgstr "" "L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " "tentativo di eseguire l'operazione di annullamento gestione." #, python-format msgid "AddHba error: %(wwn)s to %(srvname)s" msgstr "Errore AddHba: %(wwn)s su %(srvname)s" #, python-format msgid "Array Serial Number must be in the file %(fileName)s." msgstr "Il numero di serie dell'array deve essere nel file %(fileName)s." #, python-format msgid "Array mismatch %(myid)s vs %(arid)s" msgstr "Mancata corrispondenza dell'array %(myid)s vs %(arid)s" #, python-format msgid "Array query failed - No response (%d)!" msgstr "Query dell'array non riuscita - Nessuna risposta (%d)." msgid "Array query failed. No capabilities in response!" msgstr "Query dell'array non riuscita - Nessuna funzionalità nella risposta." msgid "Array query failed. No controllers in response!" msgstr "Query dell'array non riuscita. Nessun controller nella risposta." msgid "Array query failed. No global id in XML response!" msgstr "Query dell'array non riuscita. Nessun ID globale nella risposta XML." msgid "Attaching snapshot from a remote node is not supported." msgstr "Collegamento istantanea da un nodo remoto non supportato." #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "" "Autorizzazione della richiesta: %(zfssaurl)s nuovo tentativo: %(retry)d ." msgid "Backend returned err for lun export." msgstr "Il backend ha restituito un errore per l'esportazione della lun." #, python-format msgid "Backup id %s is not invalid. Skipping reset." msgstr "ID backup %s non valido. Reimpostazione ignorata." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Il servizio di backup %(configured_service)s non supporta la verifica. L'ID " "backup %(id)s non è verificato. Ignorare la verifica." #, python-format msgid "Backup volume metadata failed: %s." msgstr "Impossibile eseguire il backup dei metadati del volume : %s." #, python-format msgid "Bad response from server: %(url)s. Error: %(err)s" msgstr "Risposta errata dal server: %(url)s. Errore: %(err)s" #, python-format msgid "" "CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " "source." msgstr "" "Istantanea CG %(cgsnap)s non trovata durante la creazione del gruppo di " "coerenza %(cg)s dall'origine." #, python-format msgid "" "CLI fail: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgstr "" "Errore CLI: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgid "Call to Nova delete snapshot failed" msgstr "Chiamata a Nova per eliminare l'istantanea non riuscita" msgid "Call to Nova to create snapshot failed" msgstr "Chiamata a Nova per creare l'istantanea non riuscita" #, python-format msgid "Call to json.loads() raised an exception: %s." msgstr "La chiamata a json.loads() ha generato un'eccezione: %s." #, python-format msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." msgstr "" "Impossibile aggiungere la lun %(lun)s al gruppo di coerenza %(cg_name)s." #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "" "Impossibile eseguire il rilevamento in %(target_ip)s con %(target_iqn)s." msgid "Can not open the recent url, login again." msgstr "Impossibile aprire l'url recente, rieseguire il login." #, python-format msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." msgstr "" "Impossibile inserire le nuove LUN %(luns)s nel gruppo di coerenza " "%(cg_name)s." #, python-format msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." msgstr "" "Impossibile rimuovere le LUN %(luns)s nel gruppo di coerenza %(cg_name)s." #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "Impossibile trovare il volume da associare %(key)s, %(msg)s" msgid "Can't open the recent url, relogin." msgstr "Impossibile aprire l'url recente, rieseguire il login." #, python-format msgid "" "Cannot add and verify tier policy association for storage group : " "%(storageGroupName)s to FAST policy : %(fastPolicyName)s." msgstr "" "Impossibile aggiungere e verificare l'associazione della politica di " "livellamento per il gruppo di archiviazione: %(storageGroupName)s alla " "politica FAST: %(fastPolicyName)s." #, python-format msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." msgstr "" "Impossibile clonare l'immagine %(image)s sul volume %(volume)s. Errore: " "%(error)s." #, python-format msgid "Cannot create or find an initiator group with name %(igGroupName)s." msgstr "" "Impossibile creare o trovare un gruppo di iniziatori di nome %(igGroupName)s." #, python-format msgid "Cannot delete file %s." msgstr "Impossibile eliminare il file %s." msgid "Cannot detect replica status." msgstr "Impossibile rilevare lo stato della replica." msgid "Cannot determine if Tiering Policies are supported." msgstr "Impossibile stabilire se le politiche di livellamento sono supportate." msgid "Cannot determine whether Tiering Policy is supported on this array." msgstr "" "Impossibile stabilire se la politica di livellamento è supportata su questo " "array." #, python-format msgid "Cannot find Consistency Group %s" msgstr "Impossibile trovare il gruppo di coerenza %s" #, python-format msgid "" "Cannot find a portGroup with name %(pgGroupName)s. The port group for a " "masking view must be pre-defined." msgstr "" "Impossibile trovare un gruppo di porte di nome %(pgGroupName)s. Il gruppo di " "porte per una vista di mascheramento deve essere predefinito." #, python-format msgid "Cannot find the fast policy %(fastPolicyName)s." msgstr "Impossibile trovare la politica FAST %(fastPolicyName)s." #, python-format msgid "" "Cannot find the new masking view just created with name %(maskingViewName)s." msgstr "" "Impossibile trovare la nuova vista di mascheramento appena creata di nome " "%(maskingViewName)s." #, python-format msgid "Cannot get QoS spec for volume %s." msgstr "Impossibile ottenere la specifica QoS spec per il volume %s." #, python-format msgid "Cannot get port group from masking view: %(maskingViewName)s. " msgstr "" "Impossibile ottenere il gruppo di porte dalla vista di mascheramento: " "%(maskingViewName)s. " msgid "Cannot get port group name." msgstr "Impossibile ottenere il nome del gruppo di porte." #, python-format msgid "Cannot get storage Group from job : %(storageGroupName)s." msgstr "" "Impossibile ottenere il gruppo di archiviazione dal lavoro: " "%(storageGroupName)s." msgid "Cannot get storage system." msgstr "Impossibile ottenere il sistema di archivio." #, python-format msgid "Caught error: %(type)s %(error)s" msgstr "Intercettato errore: %(type)s %(error)s" #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" msgstr "" "Modifica del nome volume da %(tmp)s a %(orig)s non riuscita a causa di " "%(reason)s" #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." msgstr "" "Modifica del nome volume da %(tmp)s a %(orig)s non riuscita a causa di " "%(reason)s" #, python-format msgid "Clone %s not in prepared state!" msgstr "Clone %s non in stato preparato." #, python-format msgid "Clone Volume:%(volume)s failed from source volume:%(src_vref)s" msgstr "" "Clonazione del volume:%(volume)s non riuscita dal volume di origine:" "%(src_vref)s" #, python-format msgid "" "Clone volume \"%s\" already exists. Please check the results of \"dog vdi " "list\"." msgstr "" "Il volume clone \"%s\" esiste già. Controllare i risultati di \"dog vdi list" "\"." #, python-format msgid "Cloning of volume %s failed." msgstr "Clonazione del volume %s non riuscita." #, python-format msgid "" "CloudByte does not have a volume corresponding to OpenStack volume [%s]." msgstr "CloudByte non ha un volume corrispondente al volume OpenStack [%s]." #, python-format msgid "" "CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " "all [%(max)s] attempts." msgstr "" "Operazione CloudByte [%(operation)s] non riuscita per il volume [%(vol)s]. " "Esauriti tutti i [%(max)s] tentativi." #, python-format msgid "" "CloudByte snapshot information is not available for OpenStack volume [%s]." msgstr "" "Informazioni sull'istantanea CloudByte non disponibili per il volume " "OpenStack [%s]." #, python-format msgid "CloudByte volume information not available for OpenStack volume [%s]." msgstr "" "Informazioni sul volume CloudByte non disponibili per il volume OpenStack " "[%s]." #, python-format msgid "Cmd :%s" msgstr "Cmd :%s" #, python-format msgid "Commit clone failed: %(name)s (%(status)d)!" msgstr "Commit del clone non riuscito: %(name)s (%(status)d)." #, python-format msgid "Commit failed for %s!" msgstr "Commit non riuscito per %s." #, python-format msgid "Compute cluster: %s not found." msgstr "Cluster di calcolo: %s non trovato." #, python-format msgid "Configuration value %s is not set." msgstr "Valore di configurazione %s non impostato." #, python-format msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" msgstr "" "Conflitto rilevato nell'insieme di volumi virtuali %(volume_set)s: %(error)s" #, python-format msgid "Connect to Flexvisor error: %s." msgstr "Errore di connessione a Flexvisor: %s." #, python-format msgid "Connect to Flexvisor failed: %s." msgstr "Connessione a Flexvisor non riuscita: %s." msgid "Connection error while sending a heartbeat to coordination backend." msgstr "" "Errore di connessione durante l'invio di un heartbeat al backend di " "coordinazione." #, python-format msgid "Connection to %s failed and no secondary!" msgstr "Connessione a %s non riuscita e nessun elemento secondario." #, python-format msgid "Consistency group %s: create failed" msgstr "Creazione del gruppo di coerenza %s: non riuscita" #, python-format msgid "Controller GET failed (%d)" msgstr "Comando GET del controller non riuscito (%d)" #, python-format msgid "Copy offload workflow unsuccessful. %s" msgstr "Copia del carico di lavoro offload non eseguita correttamente. %s" #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "" "La copia dell'istantanea sul volume per l'istantanea %(snap)s volume %(vol)s " "non è riuscita." #, python-format msgid "Could not GET allocation information (%d)!" msgstr "" "Impossibile eseguire il comando GET per le informazioni sull'allocazione " "(%d)." #, python-format msgid "Could not calculate node utilization for node %s." msgstr "Non è stato possibile calcolare l'utilizzo del nodo per il nodo %s" #, python-format msgid "Could not connect to %(primary)s or %(secondary)s!" msgstr "Impossibile connettersi a %(primary)s o %(secondary)s." #, python-format msgid "Could not create snapshot set. Error: '%s'" msgstr "Impossibile creare la serie di istantanee. Errore: '%s'" msgid "Could not decode scheduler options." msgstr "Impossibile decodificare le opzioni dello scheduler." #, python-format msgid "Could not delete failed image volume %(id)s." msgstr "Impossibile eliminare il volume dell'immagine non riuscita %(id)s." #, python-format msgid "Could not delete the image volume %(id)s." msgstr "Impossibile eliminare il volume dell'immagine %(id)s." #, python-format msgid "" "Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "Non è stato possibile eliminare l'istantanea %s sul filer, fallback " "nell'esecuzione del comando \"rm\"." #, python-format msgid "" "Could not do delete of volume %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "Non è stato possibile eliminare il volume %s sul filer, fallback " "nell'esecuzione del comando \"rm\"." #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "Impossibile trovare un host per il gruppo di coerenza %(group_id)s." #, python-format msgid "Could not find any hosts (%s)" msgstr "Impossibile trovare gli host (%s)" #, python-format msgid "" "Could not find port group : %(portGroupName)s. Check that the EMC " "configuration file has the correct port group name." msgstr "" "Impossibile trovare il gruppo di porte: %(portGroupName)s. Verificare che il " "file di configurazione EMC presenti il nome del gruppo di porte corretto." #, python-format msgid "Could not find volume with name %(name)s. Error: %(error)s" msgstr "Impossibile trovare il volume di nome %(name)s. Errore: %(error)s" msgid "" "Could not get performance base counter name. Performance-based scheduler " "functions may not be available." msgstr "" "Non è stato possibile ottenere il nome del contatore di base delle " "prestazioni. Le funzioni dello scheduler basato sulle prestazioni potrebbero " "non essere disponibili." #, python-format msgid "Could not get utilization counters from node %s" msgstr "Non è stato possibile ottenere i contatori di utilizzo dal nodo %s" #, python-format msgid "Could not log in to 3PAR array (%s) with the provided credentials." msgstr "" "Non è stato possibile accedere all'array 3PAR (%s) con le credenziali " "fornite." #, python-format msgid "Could not log in to LeftHand array (%s) with the provided credentials." msgstr "" "Non è stato possibile accedere all'array LeftHand (%s) con le credenziali " "fornite." #, python-format msgid "Could not stat scheduler options file %(filename)s." msgstr "" "Impossibile avviare il file delle opzioni dello scheduler %(filename)s." #, python-format msgid "Could not validate device %s" msgstr "Impossibile convalidare il dispositivo %s" #, python-format msgid "Create cg snapshot %s failed." msgstr "Creazione dell'istantanea cg %s non riuscita." #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " "(Exception: %(except)s)" msgstr "" "Creazione di clone_image_volume: %(volume_id)s per l'immagine %(image_id)s, " "non riuscita (Eccezione: %(except)s)" #, python-format msgid "Create consistency group %s failed." msgstr "Creazione del gruppo di coerenza %s non riuscita." #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." msgstr "" "Creazione del gruppo di coerenza dall'istantanea -%(snap)s non riuscita: " "SnapshotNotFound." #, python-format msgid "Create consistency group from source %(source)s failed." msgstr "Creazione del gruppo di coerenza dall'origine %(source)s non riuscita." #, python-format msgid "" "Create consistency group from source cg-%(cg)s failed: " "ConsistencyGroupNotFound." msgstr "" "Creazione del gruppo di coerenza dall'origine cg-%(cg)s non riuscita: " "ConsistencyGroupNotFound." #, python-format msgid "Create hypermetro error: %s." msgstr "Errore di creazione hypermetro: %s." #, python-format msgid "" "Create new lun from lun for source %(src)s => destination %(dest)s failed!" msgstr "" "La creazione della nuova lun dalla lun per l'origine %(src)s => destinazione " "%(dest)s non è riuscita." #, python-format msgid "Create pair failed. Error: %s." msgstr "Creazione coppia non riuscita. Errore: %s." msgid "Create replication volume error." msgstr "Errore di creazione del volume di replica." #, python-format msgid "Create snapshot notification failed: %s" msgstr "Notifica di creazione istantanea non riuscita: %s" #, python-format msgid "Create volume failed from snapshot: %s" msgstr "Creazione del volume non riuscita dall'istantanea: %s" #, python-format msgid "Create volume notification failed: %s" msgstr "Notifica di creazione volume non riuscita: %s" #, python-format msgid "Creation of snapshot failed for volume: %s" msgstr "Creazione dell'istantanea non riuscita per il volume: %s" #, python-format msgid "Creation of volume %s failed." msgstr "Creazione del volume %s non riuscita." msgid "" "Creation request failed. Please verify the extra-specs set for your volume " "types are entered correctly." msgstr "" "Richiesta di creazione non riuscita. Verificare che le specifiche " "supplementari impostate per i tipi di volume siano immesse correttamente." msgid "DB error:" msgstr "Errore DB:" #, python-format msgid "DBError detected when purging from table=%(table)s" msgstr "DBError rilevato durante l'analisi dalla tabella=%(table)s" msgid "DBError encountered: " msgstr "Rilevato DBError: " msgid "DRBDmanage: too many assignments returned." msgstr "DRBDmanage: restituite troppe assegnazioni." msgid "Default Storage Profile was not found." msgstr "Profilo di memoria predefinito non trovato." msgid "" "Default volume type is not found. Please check default_volume_type config:" msgstr "" "Impossibile trovare il tipo di volume predefinito. Controllare la " "configurazione default_volume_type:" #, python-format msgid "Delete cgsnapshot %s failed." msgstr "Eliminazione dell'istantanea %s non riuscita." #, python-format msgid "Delete consistency group %s failed." msgstr "Eliminazione del gruppo di coerenza %s non riuscita." msgid "Delete consistency group failed to update usages." msgstr "" "L'eliminazione del gruppo di coerenza non è riuscita ad aggiornare gli " "utilizzi." #, python-format msgid "Delete hypermetro error: %s." msgstr "Errore di eliminazione hypermetro: %s." msgid "Delete replication error." msgstr "Errore di eliminazione replica." msgid "Delete snapshot failed, due to snapshot busy." msgstr "Eliminazione istantanea non riuscita, per istantanea occupata." #, python-format msgid "Delete snapshot notification failed: %s" msgstr "Notifica di eliminazione istantanea non riuscita: %s" #, python-format msgid "Delete volume notification failed: %s" msgstr "Notifica di eliminazione volume non riuscita: %s" #, python-format msgid "Deleting snapshot %s failed" msgstr "Eliminazione dell'istantanea %s non riuscita" #, python-format msgid "Deleting zone failed %s" msgstr "Eliminazione della zona non riuscita %s" #, python-format msgid "Deletion of volume %s failed." msgstr "Eliminazione del volume %s non riuscita." #, python-format msgid "Destination Volume Group %s does not exist" msgstr "Il gruppo volumi di destinazione %s non esiste" #, python-format msgid "Detach attachment %(attach_id)s failed." msgstr "Scollegamento collegamento %(attach_id)s non riuscito." #, python-format msgid "Detach migration source volume failed: %(err)s" msgstr "" "Scollegamento del volume di origine della migrazione non riuscito: %(err)s" msgid "Detach volume failed, due to remove-export failure." msgstr "" "Scollegamento volume non riuscito, a causa di un errore di rimozione " "esportazione." msgid "Detach volume failed, due to uninitialized driver." msgstr "" "Scollegamento volume non riuscito, a causa del driver non inizializzato." msgid "Detaching snapshot from a remote node is not supported." msgstr "Scollegamento istantanea da un nodo remoto non supportato." #, python-format msgid "Did not find expected column name in lsvdisk: %s." msgstr "Impossibile trovare il nome colonna previsto in lsvdisk: %s." msgid "Differential restore failed, trying full restore" msgstr "" "Ripristino differenziale non riuscito, viene tentato il ripristino completo" #, python-format msgid "Disable replication on volume failed with message: %s" msgstr "Disabilitazione replica sul volume non riuscita con il messaggio: %s " #, python-format msgid "Disconnection failed with message: %(msg)s." msgstr "Scollegamento non riuscito con il messaggio: %(msg)s." msgid "Driver reported error during replication failover." msgstr "Il driver ha riportato un errore durante il failover della replica." #, python-format msgid "" "Driver-based migration of volume %(vol)s failed. Move from %(src)s to " "%(dst)s failed with error: %(error)s." msgstr "" "Migrazione basata sul driver del volume %(vol)s non riuscita. Spostamento da " "%(src)s a %(dst)s non riuscito con errore: %(error)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Errore di collegamento del volume %(vol)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore nella creazione del gruppo: %(groupName)s. Codice di ritorno: " "%(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " "Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Errore durante l'acquisizione dell'istantanea: %(snapshot)s del volume: " "%(lun)s nel pool: %(pool)s, Progetto: %(project)s Codice di ritorno: " "%(ret.status)d, Messaggio: %(ret.data)s." #, python-format msgid "Error JSONDecodeError. %s" msgstr "Errore JSONDecodeError. %s" #, python-format msgid "" "Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante l'impostazione del volume: %(lun)s nel gruppo iniziatori: " "%(initiatorgroup)s Pool: %(pool)s Progetto: %(project)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "Error TypeError. %s" msgstr "Errore TypeError. %s" msgid "Error activating LV" msgstr "Errore durante l'attivazione di LV" msgid "Error adding HBA to server" msgstr "Errore durante l'aggiunta di HBA al server" #, python-format msgid "Error attaching volume %s" msgstr "Errore durante il collegamento del volume %s" #, python-format msgid "Error changing Storage Profile for volume %(original)s to %(name)s" msgstr "" "Errore durante la modifica del profilo di memoria per il volume %(original)s " "in %(name)s." #, python-format msgid "Error cleaning up failed volume creation. Msg - %s." msgstr "" "Errore durante la ripulitura della creazione del volume non riuscita. " "Messaggio - %s." msgid "Error cloning volume" msgstr "Errore durante la clonazione del volume" msgid "Error closing channel." msgstr "Errore di chiusura del canale." #, python-format msgid "" "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" "Errore durante il tentativo di contattare il server glance '%(netloc)s' per " "'%(method)s', %(extra)s." msgid "Error copying key." msgstr "Errore durante la copia della chiave." msgid "Error creating Barbican client." msgstr "Errore durante la creazione del client Barbican." #, python-format msgid "Error creating QOS rule %s" msgstr "Errore durante la creazione della regola QOS %s" msgid "Error creating Volume" msgstr "Errore durante la creazione del volume" msgid "Error creating Volume Group" msgstr "Errore durante la creazione del gruppo volumi" msgid "Error creating chap record." msgstr "Errore durante la creazione del record chap." msgid "Error creating cloned volume" msgstr "Errore durante la creazione del volume clonato" msgid "Error creating key." msgstr "Errore durante la creazione della chiave." msgid "Error creating snapshot" msgstr "Errore durante la creazione dell'istantanea" msgid "Error creating volume" msgstr "Errore durante la creazione del volume" #, python-format msgid "Error creating volume. Msg - %s." msgstr "Errore durante la creazione del volume. Messaggio - %s." msgid "Error deactivating LV" msgstr "Errore durante la disattivazione di LV" msgid "Error deleting key." msgstr "Errore durante l'eliminazione della chiave." msgid "Error deleting snapshot" msgstr "Errore durante l'eliminazione dell'istantanea" msgid "Error deleting volume" msgstr "Errore durante l'eliminazione del volume" #, python-format msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." msgstr "" "Errore durante lo scollegamento dell'istantanea %(snapshot)s, causato da un " "errore di rimozione dell'esportazione." #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" "Errore durante lo scollegamento del volume %(volume)s, causato da un errore " "di rimozione dell'esportazione." #, python-format msgid "Error detaching volume %s" msgstr "Errore durante lo scollegamento del volume %s" #, python-format msgid "Error disassociating storage group from policy: %s." msgstr "" "Errore durante la disassociazione del gruppo di archiviazione dalla " "politica: %s" msgid "Error during re-export on driver init." msgstr "" "Errore durante la riesportazione durante l'inizializzazione del driver." #, python-format msgid "" "Error encountered during failover on host: %(host)s invalid target ID " "%(backend_id)" msgstr "" "Si è verificato un errore durante il failover sull'host: %(host)s ID " "destinazione non valido %(backend_id)" msgid "" "Error encountered on Cinder backend during thaw operation, service will " "remain frozen." msgstr "" "Si è verificato un errore sul backend Cinder durante l'operazione di " "sblocco, il servizio resterà bloccato." msgid "Error executing SSH command." msgstr "Errore durante l'esecuzione del comando SSH." msgid "Error executing command via ssh." msgstr "Errore di esecuzione del comando tramite ssh." #, python-format msgid "Error executing command via ssh: %s" msgstr "Errore di esecuzione comando tramite ssh: %s" #, python-format msgid "Error expanding volume %s." msgstr "Errore durante l'espansione del volume %s." msgid "Error extending Volume" msgstr "Errore durante l'estensione del volume" msgid "Error extending volume" msgstr "Errore durante l'estensione del volume" #, python-format msgid "Error extending volume %(id)s. Ex: %(ex)s" msgstr "Errore durante l'estensione del volume: %(id)s. Eccezione: %(ex)s" #, python-format msgid "Error extending volume: %(vol)s. Exception: %(ex)s" msgstr "Errore durante l'estensione del volume: %(vol)s. Eccezione: %(ex)s" #, python-format msgid "Error finding replicated pg snapshot on %(secondary)s." msgstr "" "Errore durante la ricerca dell'istantanea pg replicata su %(secondary)s." #, python-format msgid "Error finding target pool instance name for pool: %(targetPoolName)s." msgstr "" "Errore durante la ricerca del nome dell'istanza del pool di destinazione per " "il pool: %(targetPoolName)s." #, python-format msgid "Error getting FaultDomainList for %s" msgstr "Errore durante il richiamo di FaultDomainList per %s" #, python-format msgid "Error getting LUN attribute. Exception: %s" msgstr "Errore durante il richiamo dell'attributo LUN. Eccezione: %s" msgid "Error getting active FC target ports." msgstr "Errore durante il richiamo di porte di destinazione FC attive." msgid "Error getting active ISCSI target iqns." msgstr "Errore durante il richiamo di iqns di destinazione ISCSI attivi." msgid "Error getting active ISCSI target portals." msgstr "Errore durante il richiamo di portali destinazione ISCSI attivi." msgid "Error getting array, pool, SLO and workload." msgstr "Errore durante il richiamo di array, pool, SLO e carico di lavoro." msgid "Error getting chap record." msgstr "Errore durante il richiamo del record chap." #, python-format msgid "Error getting iSCSI target info from EVS %(evs)s." msgstr "" "Errore durante il richiamo delle informazioni sulla destinazione iSCSI da " "EVS %(evs)s." msgid "Error getting key." msgstr "Errore durante il richiamo della chiave." msgid "Error getting name server info." msgstr "Errore durante il richiamo delle informazioni sul server dei nomi. " msgid "Error getting secret data." msgstr "Errore durante il richiamo dei dati segreti." msgid "Error getting secret metadata." msgstr "Errore durante il richiamo dei metadati segreti." msgid "Error getting show fcns database info." msgstr "Impossibile ottenere le informazioni sul database show fcns. " msgid "Error getting target pool name and array." msgstr "" "Errore durante il richiamo del nome del pool di destinazione e dell'array." #, python-format msgid "Error happened during storage pool querying, %s." msgstr "" "Si è verificato un errore durante la query del pool di archiviazione, %s." #, python-format msgid "Error has occurred: %s" msgstr "Si è verificato un errore: %s" #, python-format msgid "Error in copying volume: %s" msgstr "Errore durante la copia del volume: %s" #, python-format msgid "" "Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " "with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" msgstr "" "Errore durante l'estensione della dimensione del volume: Volume: %(volume)s " "Dim_Vol: %(vol_size)d con Istantanea: %(snapshot)s Dim_Istant: %(snap_size)d" #, python-format msgid "Error in workflow copy from cache. %s." msgstr "Errore nella copia del carico di lavoro dalla cache. %s." #, python-format msgid "Error invalid json: %s" msgstr "Errore di json non valido: %s" msgid "Error manage existing get volume size." msgstr "Errore di gestione della dimensione del volume get esistente." msgid "Error manage existing volume." msgstr "Errore di gestione del volume esistente." #, python-format msgid "Error managing replay %s" msgstr "Errore durante la gestione della risposta %s. " msgid "Error mapping VDisk-to-host" msgstr "Errore durante l'associazione di VDisk a host" #, python-format msgid "Error mapping volume: %s" msgstr "Errore durante l'associazione del volume: %s. " #, python-format msgid "" "Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." msgstr "" "Errore durante la migrazione del volume: %(volumename)s.nel pool di " "destinazione %(targetPoolName)s." #, python-format msgid "Error migrating volume: %s" msgstr "Errore durante la migrazione del volume: %s" #, python-format msgid "" "Error occurred in the volume driver when updating consistency group " "%(group_id)s." msgstr "" "Si è verificato un errore nel driver del volume durante l'aggiornamento del " "gruppo di coerenza %(group_id)s." msgid "" "Error occurred when adding hostgroup and lungroup to view. Remove lun from " "lungroup now." msgstr "" "Si è verificato un errore durante l'aggiunta del gruppo di host e del gruppo " "di lun alla vista. Rimuovere la lun dal gruppo di lun ora." #, python-format msgid "" "Error occurred when building request spec list for consistency group %s." msgstr "" "Si è verificato un errore durante la creazione dell'elenco di specifiche " "delle richieste per il gruppo di coerenza %s." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Si è verificato un errore durante la creazione di cgsnapshot %s." #, python-format msgid "" "Error occurred when creating cloned volume in the process of creating " "consistency group %(group)s from source CG %(source_cg)s." msgstr "" "Si è verificato un errore durante la creazione del volume clonato nel " "processo di creazione del gruppo di coerenza %(group)s dalla CG di origine " "%(source_cg)s." #, python-format msgid "" "Error occurred when creating consistency group %(cg)s from cgsnapshot " "%(cgsnap)s." msgstr "" "Si è verificato un errore durante la creazione del gruppo di coerenza %(cg)s " "dall'istantanea %(cgsnap)s." #, python-format msgid "" "Error occurred when creating consistency group %(group)s from cgsnapshot " "%(cgsnap)s." msgstr "" "Si è verificato un errore durante la creazione del gruppo di coerenza " "%(group)s dall'istantanea %(cgsnap)s." #, python-format msgid "" "Error occurred when creating consistency group %(group)s from source CG " "%(source_cg)s." msgstr "" "Si è verificato un errore durante la creazione del gruppo di coerenza " "%(group)s dalla CG di origine %(source_cg)s." #, python-format msgid "Error occurred when creating consistency group %s." msgstr "" "Si è verificato un errore durante la creazione del gruppo di coerenza %s." #, python-format msgid "" "Error occurred when creating volume entry from snapshot in the process of " "creating consistency group %(group)s from cgsnapshot %(cgsnap)s." msgstr "" "Si è verificato un errore durante la creazione della voce volume " "dall'istantanea nel processo di creazione del gruppo di coerenza %(group)s " "dall'istantanea %(cgsnap)s." #, python-format msgid "Error occurred when updating consistency group %(group_id)s." msgstr "" "Si è verificato un errore durante l'aggiornamento del gruppo di coerenza " "%(group_id)s." #, python-format msgid "Error occurred while cloning backing: %s during retype." msgstr "" "Si è verificato un errore durante la clonazione del backup : %s durante la " "riscrittura." #, python-format msgid "Error occurred while copying %(src)s to %(dst)s." msgstr "Si è verificato un errore durante la copia di %(src)s in %(dst)s." #, python-format msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." msgstr "" "Si è verificato un errore durante la copia dell'immagine: %(id)s nel volume: " "%(vol)s." #, python-format msgid "Error occurred while copying image: %(image_id)s to %(path)s." msgstr "" "Si è verificato un errore durante la copia dell'immagine: %(image_id)s in " "%(path)s." msgid "Error occurred while creating temporary backing." msgstr "Si è verificato un errore durante la creazione del backup temporaneo." #, python-format msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" "Si è verificato un errore durante la creazione del volume: %(id)s " "dall'immagine: %(image_id)s." msgid "Error occurred while selecting datastore." msgstr "Si è verificato un errore durante la selezione del datastore." #, python-format msgid "Error on adding lun to consistency group. %s" msgstr "Errore nell'aggiunta della lun al gruppo di coerenza. %s" #, python-format msgid "Error on enable compression on lun %s." msgstr "Errore nell'abilitazione della compressione nella lun %s" #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "Errore nell'esecuzione di %(command)s. Codice di errore: %(exit_code)d " "Messaggio di errore: %(result)s" #, python-format msgid "" "Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "Errore nell'esecuzione del comando. Codice di errore: %(exit_code)d " "Messaggio di errore: %(result)s" msgid "Error parsing array from host capabilities." msgstr "Errore durante l'analisi dell'array dalle funzionalità host." msgid "Error parsing array, pool, SLO and workload." msgstr "Errore durante l'analisi di array, pool, SLO e carico di lavoro." msgid "Error parsing target pool name, array, and fast policy." msgstr "" "Errore durante l'analisi del nome del pool di destinazione, dell'array e " "della politica fast." #, python-format msgid "" "Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" msgstr "" "Errore durante il provisioning del volume %(lun_name)s su %(volume_name)s. " "Dettagli: %(ex)s" msgid "Error querying thin pool about data_percent" msgstr "Errore durante la query del pool thin su data_percent" msgid "Error renaming logical volume" msgstr "Errore durante la ridenominazione del volume logico" #, python-format msgid "Error renaming volume %(original)s to %(name)s" msgstr "Errore durante la ridenominazione del volume %(original)s in %(name)s." #, python-format msgid "Error resolving host %(host)s. Error - %(e)s." msgstr "Errore durante la risoluzione dell'host %(host)s. Errore - %(e)s." #, python-format msgid "Error retrieving LUN %(vol)s number" msgstr "Errore durante il richiamo del numero di LUN %(vol)s" #, python-format msgid "Error running SSH command: \"%s\"." msgstr "Errore durante l'esecuzione del comando SSH: \"%s\"." #, python-format msgid "Error running SSH command: %s" msgstr "Errore durante l'esecuzione del comando SSH: %s" msgid "Error running command." msgstr "Errore durante l'esecuzione del comando." #, python-format msgid "" "Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" msgstr "" "Errore durante la pianificazione di %(volume_id)s dall'ultimo vol-service: " "%(last_host)s : %(exc)s" msgid "Error sending a heartbeat to coordination backend." msgstr "Errore durante l'invio di un heartbeat al backend di coordinazione." #, python-format msgid "Error setting Flash Cache policy to %s - exception" msgstr "" "Errore durante l'impostazione della politica Flash Cache su %s - eccezione" msgid "Error starting coordination backend." msgstr "Errore durante l'avvio del backend di coordinazione." msgid "Error storing key." msgstr "Errore durante l'archiviazione della chiave." #, python-format msgid "Error trying to change %(opt)s from %(old)s to %(new)s" msgstr "Errore durante il tentativo di modificare %(opt)s da %(old)s a %(new)s" #, python-format msgid "Error unmanaging replay %s" msgstr "Errore durante l'annullamento della gestione della risposta %s. " #, python-format msgid "Error unmapping volume: %s" msgstr "Errore durante l'annullamento dell'associazione del volume: %s. " #, python-format msgid "Error verifying LUN container %(bkt)s" msgstr "Errore durante la verifica del contatore LUN %(bkt)s" #, python-format msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" msgstr "" "Errore durante la verifica del servizio iSCSI %(serv)s sull'host %(hst)s" msgid "Error: unable to snap replay" msgstr "Errore: impossibile eseguire l'istantanea della risposta" #, python-format msgid "Exception cloning volume %(name)s from source volume %(source)s." msgstr "" "Eccezione durante la clonazione del volume %(name)s dal volume di origine " "%(source)s." #, python-format msgid "Exception creating LUN %(name)s in pool %(pool)s." msgstr "Eccezione durante la creazione della LUN %(name)s nel pool %(pool)s." #, python-format msgid "Exception creating vol %(name)s on pool %(pool)s." msgstr "Eccezione durante la creazione del volume %(name)s nel pool %(pool)s." #, python-format msgid "" "Exception creating volume %(name)s from source %(source)s on share %(share)s." msgstr "" "Eccezione durante la creazione del volume %(name)s dall'origine %(source)s " "sulla condivisione %(share)s.." #, python-format msgid "Exception details: %s" msgstr "Dettagli eccezione: %s" #, python-format msgid "Exception during mounting %s" msgstr "Eccezione durante il montaggio di %s" #, python-format msgid "Exception during mounting %s." msgstr "Eccezione durante il montaggio di %s." msgid "Exception during mounting." msgstr "Eccezione durante il montaggio." #, python-format msgid "Exception during snapCPG revert: %s" msgstr "Eccezione durante il ripristino snapCPG: %s" msgid "Exception encountered: " msgstr "Rilevata eccezione:" #, python-format msgid "Exception handling resource: %s" msgstr "Eccezione durante la gestione della risorsa: %s" msgid "Exception in string format operation" msgstr "Eccezione nell'operazione di formattazione della stringa" msgid "Exception loading extension." msgstr "Eccezione durante il caricamento dell'estensione." #, python-format msgid "Exception: %(ex)s" msgstr "Eccezione: %(ex)s" #, python-format msgid "Exception: %s" msgstr "Eccezione: %s" #, python-format msgid "Exception: %s." msgstr "Eccezione: %s." #, python-format msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." msgstr "" "L'esecuzione del comando \"rm\" nel file di backup per %s non è stata " "eseguita correttamente." #, python-format msgid "Exists snapshot notification failed: %s" msgstr "Notifica di esistenza istantanea non riuscita: %s" #, python-format msgid "Exists volume notification failed: %s" msgstr "Notifica di esistenza volume non riuscita: %s" msgid "Extend volume failed." msgstr "Estensione del volume non riuscita." #, python-format msgid "Extension of volume %s failed." msgstr "Estensione del volume %s non riuscita" msgid "" "Extra spec replication:mode must be set and must be either 'sync' or " "'periodic'." msgstr "" "La specifica supplementare replication:mode deve essere impostata e deve " "essere 'sync' o 'periodic'." msgid "" "Extra spec replication:sync_period must be greater than 299 and less than " "31622401 seconds." msgstr "" "La specifica supplementare replication:sync_period deve essere maggiore di " "299 e minore di 31622401 secondi." #, python-format msgid "Extra specs must be specified as capabilities:%s=' True'." msgstr "" "Le specifiche supplementari devono essere specificate come funzionalità:" "%s=' True'." msgid "" "Extra specs must be specified as replication_type=' sync' or ' " "async'." msgstr "" "Le specifiche supplementari devono essere specificate come " "replication_type=' sync' o ' async'." msgid "FAST is not supported on this array." msgstr "FAST non è supportato su questo array." #, python-format msgid "Failed collecting fcns database info for fabric %s" msgstr "" "Impossibile raccogliere le informazioni sul database fcns per fabric %s" #, python-format msgid "Failed collecting name server info from fabric %s" msgstr "" "Impossibile raccogliere le informazioni sul server dei nomi da fabric %s" msgid "Failed collecting nscamshow" msgstr "Impossibile raccogliere nscamshow" msgid "Failed collecting nsshow info for fabric" msgstr "Impossibile raccogliere le informazioni nsshow per fabric" #, python-format msgid "Failed collecting nsshow info for fabric %s" msgstr "Impossibile raccogliere le informazioni nsshow per fabric %s" msgid "Failed collecting show fcns database for fabric" msgstr "Errore durante la raccolta del database show fcns per fabric" #, python-format msgid "Failed destroying volume entry %s" msgstr "Impossibile distruggere la voce del volume %s" #, python-format msgid "Failed destroying volume entry: %s." msgstr "Impossibile distruggere la voce del volume: %s" #, python-format msgid "" "Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " "glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" "Impossibile aggiornare il flag avviabile dell'istantanea %(snapshot_id)s " "utilizzando il riferimento del volume dell'istantanea glance fornita " "%(snapshot_ref_id)s" #, python-format msgid "Failed getting active zone set from fabric %s" msgstr "Impossibile ottenere l'insieme di zone attive da fabric %s" #, python-format msgid "Failed getting zone status from fabric %s" msgstr "Impossibile ottenere lo stato della zona da fabric %s" #, python-format msgid "Failed image conversion during cache creation: %s" msgstr "Conversione immagine non riuscita durante la creazione della cache: %s" #, python-format msgid "" "Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." msgstr "" "Impossibile inviare una notifica sull'azione %(event)s dell'istantanea per " "l'istantanea %(snp_id)s." #, python-format msgid "" "Failed notifying about the volume action %(event)s for volume %(volume_id)s" msgstr "" "Impossibile inviare una notifica sull'azione %(event)s del volume per il " "volume %(volume_id)s" #, python-format msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "Impossibile eseguire la notifica su %(topic)s payload %(payload)s" #, python-format msgid "" "Failed recovery attempt to create iscsi backing lun for Volume ID:" "%(vol_id)s: %(e)s" msgstr "" "Tentativo di recupero per creare la lun di backup iscsi per l'ID volume:" "%(vol_id)s: %(e)s non riuscito" #, python-format msgid "Failed rolling back quota for %s reservations" msgstr "Impossibile eseguire il rollback della quota per %s prenotazioni" #, python-format msgid "Failed rolling back quota for %s reservations." msgstr "Impossibile eseguire il rollback della quota per %s prenotazioni." #, python-format msgid "" "Failed setting source volume %(source_volid)s back to its initial " "%(source_status)s status" msgstr "" "Impossibile impostare il volume di origine %(source_volid)s sullo stato " "iniziale %(source_status)s" #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " "volume returned to the default storage group." msgstr "" "Impossibile eseguire il rollback per aggiungere nuovamente il volume " "%(volumeName)s al gruppo di archiviazione predefinito per la politica FAST " "%(fastPolicyName)s. Contattare l'amministratore di sistema per fare in modo " "che il volume venga restituito al gruppo di archiviazione predefinito." #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " "volume re-added manually." msgstr "" "Impossibile eseguire il rollback per aggiungere nuovamente il volume " "%(volumeName)s al gruppo di archiviazione predefinito per la politica FAST " "%(fastPolicyName)s. Contattare l'amministratore di sistema per fare in modo " "che il volume venga aggiunto di nuovo manualmente." #, python-format msgid "" "Failed to add %(volumeName)s to default storage group for fast policy " "%(fastPolicyName)s." msgstr "" "Impossibile aggiungere %(volumeName)s al gruppo di archiviazione predefinito " "per la politica fast %(fastPolicyName)s." #, python-format msgid "Failed to add %s to cg." msgstr "Impossibile aggiungere %s a cg." #, python-format msgid "Failed to add device to handler %s" msgstr "Impossibile aggiungere il dispositivo all'handler %s" #, python-format msgid "Failed to add initiator iqn %s to target" msgstr "Impossibile aggiungere l'iniziatore iqn %s alla destinazione" #, python-format msgid "Failed to add initiator to group for SCST target %s" msgstr "" "Impossibile aggiungere l'iniziatore al gruppo per la destinazione SCST %s" #, python-format msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" msgstr "" "Impossibile aggiungere la lun all'id di destinazione SCST:%(vol_id)s: %(e)s" #, python-format msgid "Failed to add multihost-access for volume \"%s\"." msgstr "Impossibile aggiungere l'accesso multihost per il volume \"%s\"." #, python-format msgid "" "Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " "%(tierPolicyRuleInstanceName)s." msgstr "" "Impossibile aggiungere il gruppo di archiviazione " "%(storageGroupInstanceName)s alla regola delle politiche di livellamento " "%(tierPolicyRuleInstanceName)s." #, python-format msgid "Failed to add target(port: %s)" msgstr "Impossibile aggiungere la destinazione (port: %s)" msgid "Failed to apply replication:activereplay setting" msgstr "Impossibile applicare l'impostazione replication:activereplay" msgid "Failed to attach source volume for copy." msgstr "Impossibile collegare il volume di origine per la copia." #, python-format msgid "Failed to attach volume %(vol)s." msgstr "Impossibile collegare il volume %(vol)s." msgid "Failed to authenticate user." msgstr "Impossibile autenticare l'utente." #, python-format msgid "Failed to check cluster status.(command: %s)" msgstr "Impossibile verificare lo stato del cluster.(command: %s)" #, python-format msgid "Failed to clone image volume %(id)s." msgstr "Impossibile clonare il volume dell'immagine %(id)s." #, python-format msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." msgstr "" "Impossibile clonare il volume %(volume_id)s per l'immagine %(image_id)s." #, python-format msgid "Failed to clone volume.(command: %s)" msgstr "Impossibile clonare il volume.(command: %s)" #, python-format msgid "Failed to close disk device %s" msgstr "Impossibile chiudere il dispositivo disco %s" #, python-format msgid "" "Failed to collect return properties for volume %(vol)s and connector " "%(conn)s." msgstr "" "Impossibile raccogliere le proprietà di ritorno per il volume %(vol)s e il " "connettore %(conn)s." #, python-format msgid "Failed to commit reservations %s" msgstr "Impossibile eseguire il commit delle prenotazioni %s" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Impossibile connettersi al daemon sheep, indirizzo: %(addr)s, porta: %(port)s" #, python-format msgid "Failed to copy %(src)s to %(dest)s." msgstr "Impossibile copiare %(src)s su %(dest)s. " #, python-format msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" msgstr "Impossibile copiare l'immagine %(image_id)s sul volume: %(volume_id)s" #, python-format msgid "Failed to copy image to volume: %(volume_id)s" msgstr "Impossibile copiare l'immagine sul volume: %(volume_id)s" #, python-format msgid "Failed to copy volume %(src)s to %(dest)s." msgstr "Impossibile copiare il volume %(src)s su %(dest)s. " #, python-format msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "Impossibile copiare il volume %(vol1)s su %(vol2)s" #, python-format msgid "Failed to create %(conf)s for volume id:%(vol_id)s" msgstr "Impossibile creare %(conf)s per l'id volume:%(vol_id)s" #, python-format msgid "Failed to create CG from CGsnapshot. Exception: %s" msgstr "Impossibile creare CG da CGsnapshot. Eccezione: %s." #, python-format msgid "Failed to create CGSnapshot. Exception: %s." msgstr "Impossibile creare CGSnapshot. Eccezione: %s." msgid "" "Failed to create SOAP client.Check san_ip, username, password and make sure " "the array version is compatible" msgstr "" "Impossibile creare il cliente SOAP. Controllare san_ip, nome utente, " "password e verificare che la versione array sia compatibile" #, python-format msgid "" "Failed to create a first volume for storage group : %(storageGroupName)s." msgstr "" "Impossibile creare un primo volume per il gruppo di archiviazione: " "%(storageGroupName)s." #, python-format msgid "Failed to create blkio cgroup '%(name)s'." msgstr "Impossibile creare blkio cgroup '%(name)s'." #, python-format msgid "Failed to create clone of volume \"%s\"." msgstr "Impossibile creare il clone del volume \"%s\"." #, python-format msgid "Failed to create cloned volume %s." msgstr "Impossibile creare il volume clonato %s." #, python-format msgid "Failed to create consistency group %(group_id)s." msgstr "Impossibile creare il gruppo di coerenza %(group_id)s." #, python-format msgid "" "Failed to create default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "Impossibile creare il gruppo di archiviazione predefinito per la politica " "FAST: %(fastPolicyName)s." #, python-format msgid "Failed to create group to SCST target %s" msgstr "Impossibile creare la destinazione SCST del gruppo %s" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Impossibile creare l'ID hardware su %(storageSystemName)s." #, python-format msgid "" "Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " "tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" "Impossibile creare la destinazione iscsi per l'ID volume: %(vol_id)s. " "Verificare che il file di configurazione tgtd contenga 'include " "%(volumes_dir)s/*'" #, python-format msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" "Impossibile creare la destinazione iscsi per l'ID volume: %(vol_id)s: %(e)s" #, python-format msgid "" "Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " "configuration in %(volumes_dir)s'" msgstr "" "Impossibile creare la destinazione iscsi per l'id volume:%(vol_id)s. " "Verificare la configurazione in %(volumes_dir)s'" #, python-format msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" "Impossibile creare la destinazione iscsi per l'id volume:%(vol_id)s: %(e)s" #, python-format msgid "Failed to create iscsi target for volume id:%s" msgstr "Impossibile creare la destinazione iscsi per l'id volume:%s" #, python-format msgid "Failed to create iscsi target for volume id:%s." msgstr "Impossibile creare la destinazione iscsi per l'id volume:%s." #, python-format msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." msgstr "" "Impossibile creare il flusso manage_existing: %(object_type)s %(object_id)s." #, python-format msgid "Failed to create snapshot of volume \"%s\"." msgstr "Impossibile creare l'istantanea del volume \"%s\"." #, python-format msgid "Failed to create snapshot. (command: %s)" msgstr "Impossibile creare l'istantanea. (comando: %s)" #, python-format msgid "Failed to create transfer record for %s" msgstr "Impossibile creare il record di trasferimento per %s" #, python-format msgid "Failed to create volume \"%s\"." msgstr "Impossibile creare il volume \"%s\"." #, python-format msgid "Failed to create volume %s" msgstr "Impossibile creare il volume %s" #, python-format msgid "Failed to create volume %s." msgstr "Impossibile creare il volume %s." #, python-format msgid "Failed to create volume from snapshot \"%s\"." msgstr "Impossibile creare il volume dall'istantanea \"%s\"." #, python-format msgid "Failed to create volume. %s" msgstr "Impossibile creare il volume. %s" #, python-format msgid "Failed to create volume: %(name)s (%(status)s)" msgstr "Impossibile creare il volume: %(name)s (%(status)s)" #, python-format msgid "Failed to created Cinder secure environment indicator file: %s" msgstr "Impossibile creare il file indicatore dell'ambiente sicuro Cinder; %s" #, python-format msgid "Failed to delete initiator iqn %s from target." msgstr "Impossibile eliminare l'iniziatore iqn %s dalla destinazione." #, python-format msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." msgstr "Impossibile eliminare l'istantanea %(snap)s del volume %(vol)s." #, python-format msgid "Failed to delete snapshot. (command: %s)" msgstr "Impossibile eliminare l'istantanea. (comando: %s)" #, python-format msgid "" "Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " "%(exception)s." msgstr "" "Impossibile eliminare l'istantanea %(snap)s di CGSnapshot. Eccezione: " "%(exception)s." #, python-format msgid "" "Failed to delete the snapshot %(snap)s of cgsnapshot: %(cgsnapshot_id)s. " "Exception: %(exception)s." msgstr "" "Impossibile eliminare l'istantanea %(snap)s di cgsnapshot: " "%(cgsnapshot_id)s. Eccezione: %(exception)s." #, python-format msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." msgstr "" "Impossibile eliminare il volume %(vol)s di CG. Eccezione: %(exception)s." #, python-format msgid "Failed to delete volume \"%s\"." msgstr "Impossibile eliminare il volume \"%s\"." #, python-format msgid "Failed to delete volume %s" msgstr "Impossibile eliminare il volume %s" #, python-format msgid "Failed to delete volume. %s" msgstr "Impossibile eliminare il volume. %s" #, python-format msgid "Failed to ensure export of volume \"%s\"." msgstr "Impossibile garantire l'esportazione del volume \"%s\"." #, python-format msgid "Failed to ensure export of volume %s" msgstr "Impossibile garantire l'esportazione del volume %s" #, python-format msgid "Failed to export fiber channel target due to %s" msgstr "Impossibile esportare la destinazione fiber channel a causa di %s" #, python-format msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." msgstr "" "Impossibile estendere il volume: %(vol)s alla dimensione di: %(size)s GB." #, python-format msgid "" "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." msgstr "" "Impossibile estendere il volume %(name)s da %(current_size)sGB a " "%(new_size)sGB." #, python-format msgid "Failed to failover volume %(volume_id)s to %(target)s: %(error)s." msgstr "" "Impossibile eseguire il failover del volume %(volume_id)s su %(target)s: " "%(error)s." #, python-format msgid "Failed to find %(s)s. Result %(r)s" msgstr "impossibile trovare %(s)s. Risultato %(r)s" #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "Impossibile trovare le destinazioni iSCSI disponibili per %s." msgid "Failed to get IQN!" msgstr "Impossibile ottenere IQN." msgid "Failed to get LUN information!" msgstr "Impossibile ottenere le informazioni sulla LUN." #, python-format msgid "Failed to get allocation information (%d)!" msgstr "Impossibile ottenere le informazioni sull'allocazione (%d)." #, python-format msgid "Failed to get allocation information: %(host)s (%(status)d)!" msgstr "" "Impossibile ottenere le informazioni sull'allocazione: %(host)s (%(status)d)!" #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "" "Impossibile ottenere il numero di dispositivo per la regolazione: %(error)s" #, python-format msgid "" "Failed to get driver initiator data for initiator %(initiator)s and " "namespace %(namespace)s" msgstr "" "Impossibile ottenere i dati dell'iniziatore del driver per l'iniziatore " "%(initiator)s e lo spazio dei nomi %(namespace)s" #, python-format msgid "Failed to get fiber channel info from storage due to %(stat)s" msgstr "" "Impossibile ottenere le informazioni fiber channel dalla memoria a causa di " "%(stat)s" #, python-format msgid "Failed to get fiber channel target from storage server due to %(stat)s" msgstr "" "Impossibile ottenere la destinazione fiber channel dal server di memoria a " "causa di %(stat)s" #, python-format msgid "Failed to get or create storage group %(storageGroupName)s." msgstr "" "Impossibile ottenere o creare il gruppo di archiviazione " "%(storageGroupName)s." #, python-format msgid "Failed to get response: %s." msgstr "Impossibile ottenere la risposta: %s." #, python-format msgid "Failed to get server info due to %(state)s." msgstr "Impossibile ottenere le informazioni sul server a causa di %(state)s." msgid "Failed to get sns table" msgstr "Impossibile ottenere la tabella sns" #, python-format msgid "Failed to get target wwpns from storage due to %(stat)s" msgstr "" "Impossibile ottenere i wwpn di destinazione dalla memoria a causa di %(stat)s" msgid "Failed to get updated stats from Datera Cluster." msgstr "Impossibile ottenere le statistiche aggiornate dal cluster Datera." msgid "Failed to get updated stats from Datera cluster." msgstr "Impossibile ottenere le statistiche aggiornate dal cluster Datera." #, python-format msgid "Failed to get volume status. %s" msgstr "Impossibile ottenere lo stato del volume. %s" msgid "Failed to initialize connection" msgstr "Impossibile inizializzare la connessione" #, python-format msgid "Failed to initialize connection to volume \"%s\"." msgstr "Impossibile inizializzare la connessione al volume \"%s\"." msgid "Failed to initialize connection." msgstr "Impossibile inizializzare la connessione." msgid "Failed to initialize driver." msgstr "Impossibile inizializzare il driver." #, python-format msgid "Failed to issue df command for path %(path)s, error: %(error)s." msgstr "" "Impossibile eseguire il comando df per il percorso %(path)s, errore: " "%(error)s." #, python-format msgid "Failed to issue mmgetstate command, error: %s." msgstr "Impossibile eseguire il comando mmgetstate, errore: %s." #, python-format msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." msgstr "" "Impossibile eseguire il comando mmlsattr per il percorso %(path)s, errore: " "%(error)s" #, python-format msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" msgstr "" "Impossibile eseguire il comando mmlsattr sul percorso %(path)s, errore: " "%(error)s" #, python-format msgid "Failed to issue mmlsconfig command, error: %s." msgstr "Impossibile eseguire il comando mmlsconfig, errore: %s." #, python-format msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." msgstr "" "Impossibile eseguire il comando mmlsfs per il percorso %(path)s, errore: " "%(error)s." #, python-format msgid "Failed to issue mmlsfs command, error: %s." msgstr "Impossibile eseguire il comando mmlsfs, errore: %s." #, python-format msgid "Failed to load %s" msgstr "Impossibile caricare %s" msgid "Failed to load conder-volume" msgstr "Impossibile caricare conder-volume" msgid "Failed to load osapi_volume" msgstr "Impossibile caricare osapi_volume" #, python-format msgid "Failed to open iet session list for %s" msgstr "Impossibile aprire l'elenco di sessioni iet per %s" #, python-format msgid "Failed to open volume from %(path)s." msgstr "Impossibile aprire il volume da %(path)s." msgid "Failed to perform replication failover" msgstr "Impossibile eseguire il failover della replica" #, python-format msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "Impossibile presentare il volume %(name)s (%(status)d)!" msgid "Failed to query migration status of LUN." msgstr "Impossibile eseguire la query dello stato di migrazione della LUN." msgid "Failed to re-export volume, setting to ERROR." msgstr "Impossibile riesportare il volume, impostazione in ERRORE." #, python-format msgid "Failed to register image volume location %(uri)s." msgstr "Impossibile registrare l'ubicazione del volume dell'immagine %(uri)s." #, python-format msgid "" "Failed to remove %(volumeName)s from the default storage group for the FAST " "Policy." msgstr "" "Impossibile rimuovere: %(volumeName)s dal gruppo di archiviazione " "predefinito per la politica FAST." #, python-format msgid "Failed to remove %s from cg." msgstr "Impossibile rimuovere %s da cg." #, python-format msgid "Failed to remove LUN %s" msgstr "Impossibile rimuovere la LUN %s" #, python-format msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "" "Impossibile rimuovere la destinazione iscsi per l'ID volume: %(vol_id)s: " "%(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" "Impossibile rimuovere la destinazione iscsi per l'id volume:%(vol_id)s: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%s" msgstr "Impossibile rimuovere la destinazione iscsi per l'id volume:%s" #, python-format msgid "Failed to remove iscsi target for volume id:%s." msgstr "Impossibile rimuovere la destinazione iscsi per l'id volume:%s." #, python-format msgid "Failed to rename %(new_volume)s into %(volume)s." msgstr "Impossibile ridenominare %(new_volume)s in %(volume)s." msgid "Failed to rename the created snapshot, reverting." msgstr "Impossibile ridenominare l'istantanea creata, ripristino." #, python-format msgid "Failed to rename volume %(existing)s to %(new)s. Volume manage failed." msgstr "" "Impossibile ridenominare il volume %(existing)s in %(new)s. Gestione del " "volume non riuscita." #, python-format msgid "" "Failed to rename volume %(existing)s to %(new)s. Volume unmanage failed." msgstr "" "Impossibile ridenominare il volume %(existing)s in %(new)s. Annullamento " "della gestione del volume non riuscito." #, python-format msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" msgstr "" "Impossibile richiedere l'eliminazione asincrona del volume di origine della " "migrazione %(vol)s: %(err)s" #, python-format msgid "" "Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " "size: %(size)s" msgstr "" "Impossibile ridimensionare VDI. Riduzione di VDI non supportata. VDI: " "%(vdiname)s nuova dimensione: %(size)s" #, python-format msgid "" "Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " "%(size)s" msgstr "" "Impossibile ridimensionare VDI. Dimensione volume troppo grande. VDI: " "%(vdiname)s nuova dimensione: %(size)s" #, python-format msgid "Failed to resize vdi. vdi not found. %s" msgstr "Impossibile ridimensionare VDI. VDI non trovata. %s" #, python-format msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" msgstr "Impossibile ridimensionare VDI. %(vdiname)s nuova dimensione: %(size)s" #, python-format msgid "Failed to resize volume %(volume_id)s, error: %(error)s." msgstr "Impossibile ridimensionare il volume %(volume_id)s, errore: %(error)s" #, python-format msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "Impossibile recuperare il volume SolidFire-ID: %s in get_by_account!" #, python-format msgid "" "Failed to return volume %(volumeName)s to original storage pool. Please " "contact your system administrator to return it to the correct location." msgstr "" "Impossibile restituire il volume %(volumeName)s al pool di archiviazione " "originale. Contattare l'amministratore di sistema per restituirlo " "all'ubicazione corretta." #, python-format msgid "Failed to roll back reservations %s" msgstr "Impossibile eseguire il rollback delle prenotazioni %s" #, python-format msgid "Failed to run task %(name)s: %(cause)s" msgstr "Impossibile eseguire l'attività %(name)s: %(cause)s" #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "Impossibile eseguire schedule_%(method)s: %(ex)s" #, python-format msgid "Failed to send request: %s." msgstr "Impossibile inviare la richiesta: %s." #, python-format msgid "Failed to set 'enable' attribute for SCST target %s" msgstr "Impossibile impostare l'attributo 'enable' per la destinazione SCST %s" #, python-format msgid "Failed to set attribute for enable target driver %s" msgstr "" "Impossibile impostare l'attributo per abilitare il driver di destinazione %s" #, python-format msgid "Failed to set properties for volume %(existing)s. Volume manage failed." msgstr "" "Impossibile impostare le proprietà per il volume %(existing)s. Gestione del " "volume non riuscita." #, python-format msgid "" "Failed to set properties for volume %(existing)s. Volume unmanage failed." msgstr "" "Impossibile impostare le proprietà per il volume %(existing)s. Annullamento " "della gestione del volume non riuscita." msgid "Failed to setup the Dell EqualLogic driver." msgstr "Impossibile configurare il driver Dell EqualLogic." msgid "Failed to shutdown horcm." msgstr "Impossibile chiudere horcm." #, python-format msgid "Failed to snap Consistency Group %s" msgstr "Impossibile eseguire istantanea del gruppo di coerenza %s" msgid "Failed to start horcm." msgstr "Impossibile avviare horcm." msgid "Failed to terminate connection" msgstr "Impossibile terminare la connessione" #, python-format msgid "Failed to terminate connection %(initiator)s %(vol)s" msgstr "Impossibile terminare la connessione %(initiator)s %(vol)s" #, python-format msgid "Failed to terminate connection to volume \"%s\"." msgstr "Impossibile terminare la connessione al volume \"%s\"." #, python-format msgid "Failed to umount %(share)s, reason=%(stderr)s" msgstr "Impossibile smontare %(share)s, motivo=%(stderr)s" #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target" msgstr "" "Impossibile aggiornare %(conf)s per l'id volume %(vol_id)s dopo la rimozione " "della destinazione iscsi" #, python-format msgid "Failed to update %(conf)s for volume id:%(vol_id)s" msgstr "Impossibile aggiornare %(conf)s per l'id volume:%(vol_id)s" #, python-format msgid "" "Failed to update %(volume_id)s metadata using the provided snapshot " "%(snapshot_id)s metadata." msgstr "" "Impossibile aggiornare i metadati %(volume_id)s utilizzando i metadati " "dell'istantanea fornita %(snapshot_id)s." #, python-format msgid "" "Failed to update initiator data for initiator %(initiator)s and backend " "%(backend)s" msgstr "" "Impossibile aggiornare i dati dell'iniziatore per l'iniziatore %(initiator)s " "e il backend %(backend)s" #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "" "Impossibile aggiornare la quota che denota l'id di trasferimento del volume " "%s" #, python-format msgid "Failed to update quota for consistency group %s." msgstr "Impossibile aggiornare la quota per il gruppo di coerenza %s." #, python-format msgid "Failed to update quota for deleting volume: %s" msgstr "Impossibile aggiornare la quota per l'eliminazione del volume: %s" #, python-format msgid "Failed to update quota while deleting snapshots: %s" msgstr "" "Impossibile aggiornare la quota durante l'eliminazione delle istantanee: %s" msgid "Failed to update quota while deleting volume." msgstr "Impossibile aggiornare la quota durante l'eliminazione del volume." msgid "Failed to update replay profiles" msgstr "Impossibile aggiornare i profili di risposta" msgid "Failed to update storage profile" msgstr "Impossibile aggiornare il profilo di memoria" msgid "Failed to update usages deleting backup" msgstr "Impossibile aggiornare gli utilizzi eliminando il backup" msgid "Failed to update usages deleting snapshot" msgstr "Impossibile aggiornare gli utilizzi eliminando l'istantanea" msgid "Failed to update usages deleting volume." msgstr "Impossibile aggiornare gli utilizzi eliminando il volume." #, python-format msgid "Failed to update volume status: %s" msgstr "Impossibile aggiornare lo stato del volume: %s" #, python-format msgid "" "Failed to verify that volume was added to storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Impossibile verificare quale volume è stato aggiunto al gruppo di " "archiviazione per la politica FAST: %(fastPolicyName)s." msgid "Failed to write in /etc/scst.conf." msgstr "Impossibile scrivere in /etc/scst.conf." #, python-format msgid "Failed to write persistence file: %(path)s." msgstr "Impossibile scrivere il file di persistenza: %(path)s." #, python-format msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" msgstr "Impossibile aggiornare %(object_type)s %(object_id)s con %(update)s" #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" "Impossibile aggiornare i metadati dell'istantanea %(snapshot_id)s " "utilizzando i metadati dei volumi forniti %(volume_id)s" #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with creation provided " "model %(model)s." msgstr "" "Impossibile aggiornare il modello di istantanea %(snapshot_id)s con il " "modello fornito dalla creazione %(model)s." #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with driver provided model " "%(model)s." msgstr "" "Impossibile aggiornare il modello di istantanea %(snapshot_id)s con il " "modello driver fornito %(model)s." #, python-format msgid "" "Failed updating model of volume %(volume_id)s with creation provided model " "%(model)s" msgstr "" "Impossibile aggiornare il modello di volume %(volume_id)s con il modello " "fornito dalla creazione %(model)s" #, python-format msgid "" "Failed updating model of volume %(volume_id)s with driver provided model " "%(model)s" msgstr "" "Impossibile aggiornare il modello di volume %(volume_id)s con il modello " "driver fornito %(model)s" #, python-format msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." msgstr "Impossibile aggiornare l'istantanea %(snapshot_id)s con %(update)s." #, python-format msgid "" "Failed updating snapshot metadata using the provided volumes %(volume_id)s " "metadata" msgstr "" "Impossibile aggiornare i metadati dell'istantanea utilizzando i metadati dei " "volumi forniti %(volume_id)s" #, python-format msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" "Impossibile aggiornare il flag avviabile del volume %(volume_id)s su true" #, python-format msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "Impossibile aggiornare il volume %(volume_id)s con %(update)s" #, python-format msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "Impossibile aggiornare il volume %(volume_id)s con %(updates)s" #, python-format msgid "Failure deleting staged tmp LUN %s." msgstr "Errore durante l'eliminazione della LUN temporanea gestita %s." #, python-format msgid "Failure restarting snap vol. Error: %s." msgstr "Errore durante il riavvio del volume snap. Errore: %s." msgid "Fetch volume pool name failed." msgstr "Recupero del nome del pool di volumi non riuscito." #, python-format msgid "" "FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " "HBA state is Online." msgstr "" "FibreChannelDriver validate_connector non riuscito. Nessun '%(setting)s'. " "Accertarsi che lo stato HBA sia Online." #, python-format msgid "Flexvisor failed to get event %(volume)s (%(status)s)." msgstr "Flexvisor non è riuscito a ottenere l'evento %(volume)s: (%(status)s)." #, python-format msgid "Flexvisor failed to get pool %(id)s info." msgstr "Flexvisor non è riuscito a ottenere le informazioni sul pool %(id)s." #, python-format msgid "Flexvisor failed to get pool list due to %s." msgstr "Flexvisor non è riuscito a ottenere l'elenco di pool a causa di %s." #, python-format msgid "Flexvisor failed to get pool list.(Error: %d)" msgstr "Flexvisor non è riuscito a ottenere l'elenco di pool. (Errore: %d)" #, python-format msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "Trovati %(count)s volumi associati all'id: %(uuid)s." msgid "Free capacity not set: volume node info collection broken." msgstr "" "Capacità libera non impostata: raccolta informazioni sul nodo volumi " "interrotta." #, python-format msgid "GPFS is not active. Detailed output: %s." msgstr "GPFS non è attivo. Output dettagliato: %s." msgid "Get LUN migration error." msgstr "Errore di richiamo migrazione LUN." msgid "Get method error." msgstr "Errore di richiamo metodo. " msgid "Get replication status for volume failed." msgstr "Richiamo dello stato delle replica per il volume non riuscito. " #, python-format msgid "HDP not found: %s" msgstr "HDP non trovato: %s" #, python-format msgid "Host PUT failed (%s)." msgstr "Comando PUT dell'host non riuscito (%s)." msgid "Host could not be found!" msgstr "Impossibile trovare l'host." #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "Tentativo di rilevamento ISCSI non riuscito per: %s" msgid "ISE FW version is not compatible with OpenStack!" msgstr "Versione ISE FW non compatibile con OpenStack!" msgid "ISE globalid not set!" msgstr "ID globale ISE non impostato." #, python-format msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." msgstr "" "La dimensione dell'immagine di %(img_size)dGB è maggiore della dimensione " "del volume di %(vol_size)dGB." #, python-format msgid "Invalid API object: %s" msgstr "Oggetto API non valido: %s" #, python-format msgid "Invalid JSON: %s" msgstr "JSON non valido: %s" #, python-format msgid "Invalid ReplayList return: %s" msgstr "Restituzione ReplayList non valida: %s" #, python-format msgid "Invalid hostname %(host)s" msgstr "Nome host non valido %(host)s" msgid "Invalid replication target specified for failover" msgstr "Destinazione di replica non valida specificata per il failover" #, python-format msgid "Invalid value for %(key)s, value is %(value)s." msgstr "Valore non valido per %(key)s, il valore è %(value)s." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Esecuzione del failover non riuscita perché la replica non è configurata " "correttamente." #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "Errore dei parametri %(param)s di codifica JSON: %(status)s." #, python-format msgid "JSON transfer data error. %s." msgstr "Errore dei dati di trasferimento JSON: %s." #, python-format msgid "JSON transfer error: %s." msgstr "Errore di trasferimento JSON: %s." #, python-format msgid "LUN %(path)s geometry failed. Message - %(msg)s" msgstr "Geometria della LUN %(path)s non riuscita. Messaggio - %(msg)s" msgid "LUN extend failed!" msgstr "Estensione LUN non riuscita. " msgid "LUN unexport failed!" msgstr "Annullamento dell'esportazione della LUN non riuscito." #, python-format msgid "" "Location info needed for backend enabled volume migration not in correct " "format: %s. Continuing with generic volume migration." msgstr "" "Informazioni sull'ubicazione necessarie per la migrazione volumi abilitata " "al backend non in formato corretto: %s. Continuare con la migrazione volumi " "generica." msgid "" "Logging into the Datera cluster failed. Please check your username and " "password set in the cinder.conf and start the cinder-volume service again." msgstr "" "Accesso al cluster Datera non riuscito. Controllare nome utente e password " "impostati in cinder.conf e avviare di nuovo il servizio cinder-volume." #, python-format msgid "" "Login error. URL: %(url)s\n" "Reason: %(reason)s." msgstr "" "Errore di login. URL: %(url)s\n" "Motivo: %(reason)s." #, python-format msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." msgstr "" "La vista di mascheramento: %(maskingViewName)s è stata eliminata di recente." #, python-format msgid "Lun %s has dependent snapshots, skipping lun deletion." msgstr "" "La lun %s presenta istantanee dipendenti, l'eliminazione della lun viene " "ignorata." #, python-format msgid "Lun create for %s failed!" msgstr "Creazione della lun per %s non riuscita." #, python-format msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" "L'istantanea di creazione lun per il volume %(vol)s istantanea %(snap)s non " "è riuscita." #, python-format msgid "Lun delete for %s failed!" msgstr "Eliminazione della lun per %s non riuscita." #, python-format msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "" "L'istantanea di eliminazione lun per il volume %(vol)s istantanea %(snap)s " "non è riuscita." msgid "Lun mapping returned null!" msgstr "L'associazione LUN ha restituito null." #, python-format msgid "MSGID%(id)04d-E: %(msg)s" msgstr "MSGID%(id)04d-E: %(msg)s" msgid "Manage exist volume failed." msgstr "Gestione del volume esistente non riuscita." #, python-format msgid "" "Manager for service %(binary)s %(host)s is reporting problems, not sending " "heartbeat. Service will appear \"down\"." msgstr "" "Il gestore del servizio %(binary)s %(host)s riporta dei problemi, " "l'heartbeat non viene inviato. Il servizio risulterà \"disattivo\"." #, python-format msgid "" "Masking View creation or retrieval was not successful for masking view " "%(maskingViewName)s. Attempting rollback." msgstr "" "La creazione o il richiamo della vista di mascheramento non sono stati " "eseguiti correttamente per la vista di mascheramento %(maskingViewName)s. " "Tentativo di rollback." #, python-format msgid "" "Max retries reached deleting backup %(basename)s image of volume %(volume)s." msgstr "" "Raggiunto numero massimo di tentativi di eliminazione backup %(basename)s " "immagine di volume %(volume)s." #, python-format msgid "Message: %s" msgstr "Messaggio: %s" #, python-format msgid "Migration of LUN %s failed to complete." msgstr "Impossibile completare la migrazione della LUN %s." msgid "Model update failed." msgstr "Aggiornamento del modello non riuscito." #, python-format msgid "Modify volume PUT failed: %(name)s (%(status)d)." msgstr "Modifica del volume PUT non riuscita: %(name)s (%(status)d)." #, python-format msgid "Mount failure for %(share)s after %(count)d attempts." msgstr "Errore di montaggio per %(share)s dopo %(count)d tentativi." #, python-format msgid "Mount failure for %(share)s." msgstr "Errore di montaggio per %(share)s." #, python-format msgid "Multiple replay profiles under name %s" msgstr "Più profili di risposta con nome %s" #, python-format msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" msgstr "" "La condivisione NFS %(share)s non ha alcuna voce di servizio: %(svc)s -> " "%(hdp)s" msgid "No CLI output for firmware version check" msgstr "Nessun output CLI per il controllo della versione firmware" #, python-format msgid "No VIP configured for service %s" msgstr "Nessun VIP configurato per il servizio %s" #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." msgstr "" "Nessuna azione richiesta. Il volume: %(volumeName)s è già parte del pool: " "%(pool)s." #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of slo/workload " "combination: %(targetCombination)s." msgstr "" "Nessuna azione richiesta. Il volume: %(volumeName)s è già parte della " "combinazione slo/carico di lavoro: %(targetCombination)s." #, python-format msgid "No configuration found for service: %s" msgstr "Nessuna configurazione trovata per il servizio: %s" #, python-format msgid "No configuration found for service: %s." msgstr "Nessuna configurazione trovata per il servizio: %s." msgid "No more targets avaliable." msgstr "Nessun'altra destinazione disponibile." #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " "%(backing_file)s!" msgstr "" "Nessuna istantanea trovata nel database, ma %(path)s contiene un file di " "backup %(backing_file)s." #, python-format msgid "Not able to configure PBM for vCenter server: %s" msgstr "Impossibile configurare PBM per il server vCenter: %s" #, python-format msgid "OSError: command is %(cmd)s." msgstr "OSError: il comando è %(cmd)s." #, python-format msgid "OSError: command is %s." msgstr "OSError: il comando è %s." #, python-format msgid "" "One of the components of the original masking view %(maskingViewName)s " "cannot be retrieved so please contact your system administrator to check " "that the correct initiator(s) are part of masking." msgstr "" "Uno dei componenti della vista di mascheramento originale " "%(maskingViewName)s non può essere richiamato, quindi contattare " "l'amministratore di sistema per verificare che l'iniziatore corretto sia " "parte del mascheramento." #, python-format msgid "" "Only SLO/workload migration within the same SRP Pool is supported in this " "version The source pool : %(sourcePoolName)s does not match the target " "array: %(targetPoolName)s. Skipping storage-assisted migration." msgstr "" "Solo la migrazione di SLO/carico di lavoro all'interno dello stesso pool SRP " "è supportata in questa versione. Il pool di origine: %(sourcePoolName)s non " "corrisponde all'array di destinazione: %(targetPoolName)s. La migrazione " "assistita dalla memoria viene ignorata." msgid "Only available volumes can be migrated between different protocols." msgstr "" "Solo i volumi disponibili possono essere migrati tra protocolli diversi." #, python-format msgid "POST for host create failed (%s)!" msgstr "POST per la creazione di host non riuscito (%s)!" #, python-format msgid "Pipe1 failed - %s " msgstr "Pipe1 non riuscito - %s " #, python-format msgid "Pipe2 failed - %s " msgstr "Pipe2 non riuscito - %s " msgid "" "Please check your xml for format or syntax errors. Please see documentation " "for more details." msgstr "" "Verificare il formato e gli errori di sintassi del file xml. Per ulteriori " "dettagli, vedere la documentazione." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "Il nome pool deve essere nel file %(fileName)s." #, python-format msgid "Prepare clone failed for %s." msgstr "Preparazione del clone non riuscita per %s." msgid "Primary IP must be set!" msgstr "L'IP primario deve essere impostato." msgid "Problem cleaning incomplete backup operations." msgstr "Problema durante la ripulitura delle operazioni di backup incomplete." #, python-format msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." msgstr "" "Problema durante la ripulitura di volumi temporanei e istantanee per il " "backup %(bkup)s." #, python-format msgid "Problem cleaning up backup %(bkup)s." msgstr "Problema durante la ripulitura del backup %(bkup)s." msgid "Promote volume replica failed." msgstr "Promozione della replica del volume non riuscita." #, python-format msgid "" "Purity host %(host_name)s is managed by Cinder but CHAP credentials could " "not be retrieved from the Cinder database." msgstr "" "L'host Purity %(host_name)s è gestito da Cinder ma le credenziali CHAP non " "possono essere richiamate dal database Cinder." #, python-format msgid "" "Purity host %(host_name)s is not managed by Cinder and can't have CHAP " "credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." msgstr "" "L'host Purity %(host_name)s non è gestito da Cinder e non può avere " "credenziali CHAP modificate. Rimuovere IQN %(iqn)s dall'host per risolvere " "il problema." #, python-format msgid "Qemu-img is not installed. OSError: command is %(cmd)s." msgstr "Qemu-img non è installato. OSError: il comando è %(cmd)s." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" "Quota superata per %(s_pid)s, si è tentato di estendere il volume per " "%(s_size)sG, (%(d_consumed)dG di %(d_quota)dG già utilizzato)." #, python-format msgid "REST Not Available: %s" msgstr "REST non disponibile: %s" #, python-format msgid "Re-throwing Exception %s" msgstr "Rigenerazione dell'eccezione %s" #, python-format msgid "Read response raised an exception: %s." msgstr "La lettura della risposta ha generato un'eccezione: %s." msgid "Recovered model server connection!" msgstr "Connessione al model server ripristinata." #, python-format msgid "Recovering from a failed execute. Try number %s" msgstr "" "Viene eseguito un recupero da un'esecuzione non riuscita. Provare il numero " "%s" msgid "Replication must be specified as ' True' or ' False'." msgstr "La replica deve essere specificata come ' True' o ' False'." msgid "" "Requested to setup thin provisioning, however current LVM version does not " "support it." msgstr "" "È stata richiesta la configurazione di thin provisioning, tuttavia la " "versione LVM corrente non lo supporta." #, python-format msgid "Resizing %s failed. Cleaning volume." msgstr "Nuovo dimensionamento di %s non riuscito. Ripulitura del volume." #, python-format msgid "Restore to volume %(volume)s finished with error - %(error)s." msgstr "Ripristino su volume %(volume)s completato con errore - %(error)s." #, python-format msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" msgstr "" "Tentativo eseguito %(retry)s volte: %(method)s Non riuscito %(rc)s: " "%(reason)s" #, python-format msgid "Retype unable to find volume %s." msgstr "" "Il comando di riscrittura non è stato in grado di trovare il volume %s." msgid "Retype volume error." msgstr "Errore di riscrittura del volume." msgid "Retype volume error. Create replication failed." msgstr "Errore di riscrittura del volume. Creazione replica non riuscita." msgid "Retype volume error. Delete replication failed." msgstr "Errore di riscrittura del volume. Eliminazione replica non riuscita." #, python-format msgid "" "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " "Diamond, Optimized, NONE." msgstr "" "SLO: %(slo)s non valido. I valori validi sono Bronze, Silver, Gold, " "Platinum, Diamond, Optimized, NONE." msgid "" "ScVolume returned success with empty payload. Attempting to locate volume" msgstr "" "ScVolume restituito correttamente con payload vuoto. Tentativo di " "individuare il volume" #, python-format msgid "Server Busy retry request: %s" msgstr "Il sistema è occupato, ritentare la richiesta: %s" msgid "Service not found for updating replication_status." msgstr "Servizio non trovato per l'aggiornamento di replication_status." #, python-format msgid "Setting QoS for %s failed" msgstr "Impostazione QoS per %s non riuscita" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" "Condivisione %s ignorata a causa di un formato non valido. Deve essere del " "formato address:/export. " #, python-format msgid "Sheepdog is not installed. OSError: command is %s." msgstr "Sheepdog non è installato. OSError: il comando è %s." #, python-format msgid "" "Skipping remove_export. No iscsi_target ispresently exported for volume: %s" msgstr "" "remove_export viene ignorato. Nessun iscsi_target viene al momento esportato " "per il volume: %s" #, python-format msgid "Snapshot \"%s\" already exists." msgstr "L'istantanea \"%s\" esiste già." #, python-format msgid "" "Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Istantanea \"%s\" non trovata. Controllare i risultati di \"dog vdi list\"." #, python-format msgid "Snapshot %(snapshotname)s not found on the array. No volume to delete." msgstr "" "Istantanea %(snapshotname)s non trovata nell'array. Nessun volume da " "eliminare." #, python-format msgid "Snapshot %s: create failed" msgstr "Istantanea %s: creazione non riuscita" #, python-format msgid "Snapshot %s: has clones" msgstr "L'istantanea %s: presenta cloni" msgid "Snapshot did not exist. It will not be deleted" msgstr "L'istantanea non esiste. Non verrà eliminata" #, python-format msgid "" "Source CG %(source_cg)s not found when creating consistency group %(cg)s " "from source." msgstr "" "CG di origine %(source_cg)s non trovata durante la creazione del gruppo di " "coerenza %(cg)s dall'origine." #, python-format msgid "Source snapshot %(snapshot_id)s cannot be found." msgstr "Impossibile trovare l'istantanea di origine %(snapshot_id)s." #, python-format msgid "Source snapshot cannot be found for target volume %(volume_id)s." msgstr "" "Impossibile trovare l'istantanea di origine per il volume di destinazione " "%(volume_id)s." #, python-format msgid "Source volume %s not ready!" msgstr "Volume di origine %s non pronto." #, python-format msgid "Source volumes cannot be found for target volume %(volume_id)s." msgstr "" "Impossibile trovare i volumi di origine per il volume di destinazione " "%(volume_id)s." #, python-format msgid "" "Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Volume src \"%s\" non trovato. Controllare i risultati di \"dog vdi list\"." #, python-format msgid "Start synchronization failed. Error: %s." msgstr "Avvio della sincronizzazione non riuscito. Errore: %s." #, python-format msgid "StdErr :%s" msgstr "StdErr :%s" #, python-format msgid "StdOut :%s" msgstr "StdOut :%s" #, python-format msgid "Storage Profile %s was not found." msgstr "Profilo di memoria %s non trovato." #, python-format msgid "Storage profile: %s cannot be found in vCenter." msgstr "Profilo di archiviazione: %s non trovato in vCenter." msgid "Sync volume replica failed." msgstr "Replica del volume di sincronizzazione non riuscita." #, python-format msgid "TSM [%s] not found in CloudByte storage." msgstr "TSM [%s] non trovato nella memoria CloudByte." #, python-format msgid "Target end points do not exist for hardware Id: %(hardwareIdInstance)s." msgstr "" "I punti finali di destinazione non esistono per l'ID hardware: " "%(hardwareIdInstance)s." msgid "The Flexvisor service is unavailable." msgstr "Il servizio Flexvisor non è disponibile." #, python-format msgid "The NFS Volume %(cr)s does not exist." msgstr "Il volume NFS %(cr)s non esiste." msgid "The connector does not contain the required information." msgstr "Il connettore non contiene le informazioni necessarie." msgid "" "The connector does not contain the required information: initiator is missing" msgstr "" "Il connettore non contiene le informazioni necessarie: iniziatore mancante" msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "Il connettore non contiene le informazioni necessarie: wwpns mancante" msgid "The given extra_spec or valid_values is None." msgstr "I parametri extra_spec o valid_values specificati sono None." msgid "The list of iscsi_ip_addresses is empty" msgstr "L'elenco di iscsi_ip_addresses è vuoto" #, python-format msgid "" "The primary array must have an API version of %(min_ver)s or higher, but is " "only on %(current_ver)s, therefore replication is not supported." msgstr "" "L'array primario deve avere una versione API di %(min_ver)s o superiore, ma " "è solo nella %(current_ver)s, quindi la replica non è supportata." #, python-format msgid "" "The replication mode of %(type)s has not successfully established " "partnership with the replica Storwize target %(stor)s." msgstr "" "La modalità di replica di %(type)s non ha una relazione stabilita " "correttamente con la destinazione Storwize %(stor)s della replica." msgid "The snapshot cannot be deleted because it is a clone point." msgstr "L'istantanea non può essere eliminata perché è un punto di clonazione." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s skipping storage-assisted migration." msgstr "" "L'array di origine: %(sourceArraySerialNumber)s non corrisponde all'array di " "destinazione: %(targetArraySerialNumber)s , la migrazione assistita dalla " "memoria viene ignorata." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s, skipping storage-assisted migration." msgstr "" "L'array di origine: %(sourceArraySerialNumber)s non corrisponde all'array di " "destinazione: %(targetArraySerialNumber)s, la migrazione assistita dalla " "memoria viene ignorata." #, python-format msgid "The source volume %(volume_id)s cannot be found." msgstr "Impossibile trovare il volume di origine %(volume_id)s." #, python-format msgid "The volume driver requires %(data)s in the connector." msgstr "Il driver del volume richiede %(data)s nel connettore." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Il driver del volume richiede il nome iniziatore iSCSI nel connettore." #, python-format msgid "There are no valid hosts available in configured cluster(s): %s." msgstr "Non sono presenti host validi disponibili nel cluster configurato: %s." #, python-format msgid "There is no valid datastore satisfying requirements: %s." msgstr "Non è presente alcun datastore valido che soddisfa i requisiti: %s." msgid "There must be at least one valid replication device configured." msgstr "È necessario configurare almeno un dispositivo di replica valido." #, python-format msgid "" "There was a problem with the failover (%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available on the failed over target." msgstr "" "Si è verificato un problema con il failover (%(error)s) e non è stato " "eseguito correttamente. Il volume '%(volume)s non sarà disponibile sulla " "destinazione sottoposta a failover." #, python-format msgid "There was an error deleting snapshot %(id)s: %(error)." msgstr "" "Si è verificato un errore durante l'eliminazione dell'istantanea %(id)s: " "%(error)." #, python-format msgid "There was an error deleting volume %(id)s: %(error)." msgstr "" "Si è verificato un errore durante l'eliminazione del volume %(id)s: %(error)." #, python-format msgid "There was an error deleting volume %(id)s: %(error)s." msgstr "" "Si è verificato un errore durante l'eliminazione del volume %(id)s: " "%(error)s." msgid "This usually means the volume was never successfully created." msgstr "" "Ciò in genere significa che il volume non è stato mai creato correttamente." msgid "Tiering Policy is not supported on this array." msgstr "La politica di livellamento non è supportata su questo array." #, python-format msgid "Timed out deleting %s!" msgstr "Timeout durante l'eliminazione di %s." #, python-format msgid "Trying to create snapshot by non-existent LV: %s" msgstr "Tentativo di creare un'istantanea da LV non esistente: %s" #, python-format msgid "URLError: %s" msgstr "Errore URL: %s" #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Impossibile accedere al back-end Storwize per il volume %s." #, python-format msgid "Unable to create folder path %s" msgstr "Impossibile trovare il percorso della cartella %s" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Impossibile creare o ottenere il gruppo di archiviazione predefinito per la " "politica FAST: %(fastPolicyName)s." #, python-format msgid "Unable to create volume %s from replay" msgstr "Impossibile creare il volume %s dalla risposta" #, python-format msgid "Unable to create volume on SC: %s" msgstr "Impossibile creare il volume su SC: %s" #, python-format msgid "Unable to create volume. Volume driver %s not initialized" msgstr "Impossibile creare il volume. Driver del volume %s non inizializzato" msgid "Unable to delete busy volume." msgstr "Impossibile eliminare il volume occupato." #, python-format msgid "Unable to delete due to existing snapshot for volume: %s" msgstr "" "Impossibile eseguire l'eliminazione a causa di un'istantanea esistente per " "il volume: %s" #, python-format msgid "Unable to delete profile %s." msgstr "Impossibile eliminare il profilo %s." #, python-format msgid "Unable to delete replication for %(vol)s to %(dest)s." msgstr "Impossibile eliminare la replica per %(vol)s in %(dest)s." msgid "" "Unable to delete the destination volume during volume migration, (NOTE: " "database record needs to be deleted)." msgstr "" "Impossibile eliminare il volume di destinazione durante la migrazione del " "volume, (NOTA: il record del database deve essere eliminato)." #, python-format msgid "Unable to determine whether %(volumeName)s is composite or not." msgstr "Impossibile stabilire se %(volumeName)s è composito o meno." msgid "Unable to disconnect host from volume, could not determine Purity host" msgstr "" "Impossibile scollegare l'host dal volume, non è stato possibile determinare " "l'host Purity" msgid "" "Unable to failover to the secondary. Please make sure that the secondary " "back-end is ready." msgstr "" "Impossibile eseguire il failover sul backend secondario, Accertarsi che il " "backend secondario sia pronto." msgid "Unable to find FC initiators" msgstr "Impossibile trovare gli iniziatori FC" #, python-format msgid "Unable to find VG: %s" msgstr "Impossibile trovare VG: %s" #, python-format msgid "Unable to find controller port iscsi configuration: %s" msgstr "" "Impossibile trovare la configurazione iscsi della porta del controller: %s" #, python-format msgid "Unable to find controller port: %s" msgstr "Impossibile trovare la porta del controller: %s" #, python-format msgid "" "Unable to find default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "Impossibile trovare il gruppo di archiviazione predefinito per la politica " "FAST: %(fastPolicyName)s." #, python-format msgid "Unable to find disk folder %(name)s on %(ssn)s" msgstr "Impossibile trovare la cartella del disco %(name)s su %(ssn)s" #, python-format msgid "Unable to find mapping profiles: %s" msgstr "Impossibile trovare i profili di associazione: %s" #, python-format msgid "Unable to find or create QoS Node named %s" msgstr "Impossibile trovare o creare il nodo QoS denominato %s" #, python-format msgid "Unable to find service: %(service)s for given host: %(host)s." msgstr "" "Impossibile trovare il servizio: %(service)s per l'host specificato: " "%(host)s." msgid "Unable to get associated pool of volume." msgstr "Impossibile ottenere il pool di volume associato." #, python-format msgid "Unable to get default storage group %(defaultSgName)s." msgstr "" "Impossibile ottenere il gruppo di archiviazione predefinito " "%(defaultSgName)s." msgid "Unable to get device mapping from network." msgstr "Impossibile ottenere l'associazione del dispositivo dalla rete." #, python-format msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." msgstr "" "Impossibile ottenere la regola della politica per la politica FAST: " "%(fastPolicyName)s." #, python-format msgid "Unable to locate Volume Group %s" msgstr "Impossibile individuare il gruppo volumi %s" #, python-format msgid "Unable to locate snapshot %s" msgstr "Impossibile individuare l'istantanea %s" #, python-format msgid "Unable to manage existing snapshot. Volume driver %s not initialized." msgstr "" "Impossibile gestire l'istantanea esistente. Driver del volume %s non " "inizializzato. " #, python-format msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "" "Impossibile gestire il volume esistente. Driver del volume %s non " "inizializzato. " msgid "Unable to manage_existing snapshot on a disabled service." msgstr "" "Impossibile gestire l'istantanea esistente su un servizio disabilitato." msgid "Unable to manage_existing volume on a disabled service." msgstr "Impossibile gestire il volume esistente su un servizio disabilitato." #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "Impossibile associare %(vol)s a %(srv)s" #, python-format msgid "Unable to rename lun %s on array." msgstr "Impossibile ridenominare la lun %s sull'array." #, python-format msgid "Unable to rename the logical volume for volume %s." msgstr "Impossibile ridenominare il volume logico per il volume %s." #, python-format msgid "Unable to rename the logical volume for volume: %s" msgstr "Impossibile ridenominare il volume logico per il volume: %s" #, python-format msgid "Unable to replicate %(volname)s to %(destsc)s" msgstr "Impossibile replicare %(volname)s su %(destsc)s" #, python-format msgid "Unable to retrieve VolumeConfiguration: %s" msgstr "Impossibile richiamare la configurazione del volume: %s" #, python-format msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." msgstr "" "Impossibile richiamare l'istanza pool di %(poolName)s sull'array %(array)s." #, python-format msgid "Unable to terminate volume connection: %(err)s." msgstr "Impossibile terminare la connessione del volume: %(err)s" #, python-format msgid "Unable to unmap Volume %s" msgstr "Impossibile annullare l'associazione del volume %s" msgid "Unexpected build error:" msgstr "Errore di generazione non previsto:" msgid "Unexpected error occurs in horcm." msgstr "Si è verificato un errore non previsto in horcm." msgid "Unexpected error occurs in snm2." msgstr "Si è verificato un errore non previsto in snm2." #, python-format msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" msgstr "" "Si è verificato un errore non previsto quando il ripristino retype() ha " "tentato di deleteVolumeSet(%s)" #, python-format msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" msgstr "" "Si è verificato un errore non previsto quando retype() ha tentato di " "deleteVolumeSet(%s)" #, python-format msgid "Unexpected error while invoking web service. Error - %s." msgstr "Errore imprevisto durante il richiamo del servizio Web. Errore - %s." #, python-format msgid "Unexpected exception during cache cleanup of snapshot %s" msgstr "Eccezione imprevista durante la ripulitura cache dell'istantanea %s" #, python-format msgid "Unknown exception in post clone resize LUN %s." msgstr "Eccezione sconosciuta nella LUN di post ridimensionamento clone %s." #, python-format msgid "Unrecognized Login Response: %s" msgstr "Risposta di login non riconosciuta: %s" #, python-format msgid "" "Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." msgstr "" "L'aggiornamento del gruppo di coerenza non è riuscito ad aggiungere il " "volume-%(volume_id)s: VolumeNotFound." #, python-format msgid "" "Update consistency group failed to remove volume-%(volume_id)s: " "VolumeNotFound." msgstr "" "L'aggiornamento del gruppo di coerenza non è riuscito a rimuovere il volume-" "%(volume_id)s: VolumeNotFound." msgid "Update snapshot usages failed." msgstr "Aggiornamento utilizzi istantanea non riuscito." msgid "Update volume model for transfer operation failed." msgstr "" "Aggiornamento del modello di volume per l'operazione di trasferimento non " "riuscito." #, python-format msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." msgstr "" "L'aggiornamento del volume nell'immagine ha riportato un errore (image-id: " "%(image_id)s)." msgid "User does not have permission to change Storage Profile selection." msgstr "" "L'utente non dispone dell'autorizzazione per modificare la selezione del " "profilo di memoria." msgid "VGC-CLUSTER command blocked and cancelled." msgstr "Comando VGC-CLUSTER bloccato e annullato." #, python-format msgid "Version string '%s' is not parseable" msgstr "La stringa di versione '%s'non è analizzabile" #, python-format msgid "Virtual Volume Set %s does not exist." msgstr "L'insieme di volumi virtuali %s non esiste." #, python-format msgid "Virtual disk device of backing: %s not found." msgstr "Dispositivo disco virtuale di backup: %s non trovato." #, python-format msgid "Vol copy job status %s." msgstr "Stato del lavoro di copia del volume %s." #, python-format msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "Volume \"%s\" non trovato. Controllare i risultati di \"dog vdi list\"." #, python-format msgid "" "Volume %(name)s is not suitable for storage assisted migration using retype." msgstr "" "Il volume %(name)s non è adatto per la migrazione assistita dalla memoria " "utilizzando la riscrittura." #, python-format msgid "Volume %(name)s not found on the array." msgstr "Volume %(name)s non trovato nell'array. " #, python-format msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "Volume %(name)s non trovato nell'array. Nessun volume da eliminare." #, python-format msgid "" "Volume %(name)s not found on the array. No volume to migrate using retype." msgstr "" "Volume %(name)s non trovato sull'array. Nessun volume da migrare utilizzando " "la riscrittura." #, python-format msgid "" "Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " "%(output)s" msgstr "" "Il volume %(volumeid)s non è riuscito a inviare il comando di assegnazione, " "ret: %(status)s output: %(output)s" #, python-format msgid "Volume %s doesn't exist on array." msgstr "Il volume %s non esiste nell'array." #, python-format msgid "Volume %s, not found on SF Cluster." msgstr "Volume %s, non trovato nel cluster SF." #, python-format msgid "Volume %s: create failed" msgstr "Volume %s: creazione non riuscita" #, python-format msgid "" "Volume %s: driver error when trying to retype, falling back to generic " "mechanism." msgstr "" "Volume %s: errore del driver nel tentativo di eseguire la riscrittura, " "fallback su meccanismo generico." #, python-format msgid "Volume %s: manage failed." msgstr "Volume %s: gestione non riuscita." #, python-format msgid "Volume %s: rescheduling failed" msgstr "Volume %s: ripianificazione non riuscita" #, python-format msgid "Volume %s: update volume state failed." msgstr "Volume %s: aggiornamento dello stato del volume non riuscito." #, python-format msgid "" "Volume : %(volumeName)s has not been added to target storage group " "%(storageGroup)s." msgstr "" "Il volume: %(volumeName)s non è stato aggiunto al gruppo di archiviazione di " "origine %(storageGroup)s." #, python-format msgid "" "Volume : %(volumeName)s has not been removed from source storage group " "%(storageGroup)s." msgstr "" "Il volume: %(volumeName)s non è stato rimosso dal gruppo di archiviazione di " "origine %(storageGroup)s." #, python-format msgid "" "Volume : %(volumeName)s. was not successfully migrated to target pool " "%(targetPoolName)s." msgstr "" "Il volume : %(volumeName)s. non è stato migrato correttamente nel pool di " "destinazione %(targetPoolName)s." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "accept_transfer operation!" msgstr "" "L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " "eseguire l'operazione accept_transfer." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "attach_volume operation!" msgstr "" "L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " "eseguire l'operazione attach_volume." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "delete_volume operation!" msgstr "" "L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " "eseguire l'operazione delete_volume." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "detach_volume operation!" msgstr "" "L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " "eseguire l'operazione detach_volume." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "extend_volume operation!" msgstr "" "L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " "eseguire l'operazione estend_volume!" #, python-format msgid "" "Volume ID %s was not found on the zfssa device while attempting " "delete_volume operation." msgstr "" "L'ID volume %s non è stato trovato nel dispositivo zfssa nel tentativo di " "eseguire l'operazione delete_volume." #, python-format msgid "Volume already exists. %s" msgstr "Il volume esiste già. %s" msgid "Volume appears unmapped" msgstr "Il volume risulta non associato" #, python-format msgid "" "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" msgstr "" "Creazione del volume non riuscita, eliminazione dell'istantanea creata " "%(volume_name)s@%(name)s" #, python-format msgid "Volume creation failed, deleting created snapshot %s" msgstr "" "Creazione del volume non riuscita, eliminazione dell'istantanea creata %s" msgid "Volume did not exist. It will not be deleted" msgstr "Il volume non esiste. Non verrà eliminato" #, python-format msgid "Volume driver %s not initialized" msgstr "Il driver di volume %s non è inizializzato" msgid "Volume in unexpected state" msgstr "Volume in uno stato imprevisto" #, python-format msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "Volume in uno stato imprevisto %s, previsto awaiting-transfer" #, python-format msgid "Volume migration failed due to exception: %(reason)s." msgstr "Migrazione volume non riuscita a causa di un'eccezione: %(reason)s" msgid "Volume must be detached for clone operation." msgstr "Il volume deve essere scollegato per l'operazione di clonazione." #, python-format msgid "Volume size \"%sG\" is too large." msgstr "La dimensione del volume \"%sG\" è troppo grande." #, python-format msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "Eliminazione di VolumeType %s non riuscita, VolumeType in uso." #, python-format msgid "" "WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " "attempt %(retry)s in progress." msgstr "" "Operazione WebDAV non riuscita con codice di errore: %(code)s motivo: " "%(reason)s Nuovo tentativo %(retry)s in corso." #, python-format msgid "WebDAV returned with %(code)s error during %(method)s call." msgstr "" "WebDAV restituito con errore %(code)s durante la chiamata di %(method)s." #, python-format msgid "" "Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " "OLTP_REP, NONE." msgstr "" "Carico di lavoro: %(workload)s non valido. I valori validi sono DSS_REP, " "DSS, OLTP, OLTP_REP, NONE." msgid "_check_version_fail: Parsing error." msgstr "_check_version_fail: Errore di analisi." msgid "_find_mappings: volume is not active" msgstr "_find_mappings: volume non attivo" #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " "operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: Il volume %(vol)s non presenta l'operazione di copia " "vdisk specificata: orig=%(orig)s nuova=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: I metadati del volume %(vol)s non presentano l'operazione " "di copia vdisk specificata: orig=%(orig)s nuova=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " "operations." msgstr "" "_rm_vdisk_copy_op: Il volume %s non presenta operazioni di copia vdisk " "registrate." #, python-format msgid "" "_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " "copy operations." msgstr "" "_rm_vdisk_copy_op: I metadati del volume %s non presentano operazioni di " "copia vdisk registrate." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " "%(host_name)s found." msgstr "" "_unmap_vdisk_from_host: Non è stata trovata nessuna associazione del volume " "%(vol_name)s all'host %(host_name)s." #, python-format msgid "_wait_for_job_complete failed after %(retries)d tries." msgstr "_wait_for_job_complete non riuscito dopo %(retries)d tentativi." #, python-format msgid "_wait_for_job_complete, failed after %(retries)d tries." msgstr "_wait_for_job_complete, non riuscito dopo %(retries)d tentativi." #, python-format msgid "_wait_for_sync failed after %(retries)d tries." msgstr "_wait_for_sync non riuscito dopo %(retries)d tentativi." #, python-format msgid "" "backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "backup: %(vol_id)s non è stato in grado di rimovere l'hardlinl di backup da " "%(vpath)s a %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." #, python-format msgid "can't create 2 volumes with the same name, %s" msgstr "impossibile creare 2 volumi con lo stesso nome, %s" msgid "cinder-rtstool is not installed correctly" msgstr "cinder-rtstool non è installato correttamente" #, python-format msgid "" "delete: %(vol_id)s failed with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "eliminazione: %(vol_id)s non riuscito con stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_vol: provider location empty." msgstr "delete_vol: ubicazione del fornitore vuota." #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: Volume %s non trovato nella memoria." #, python-format msgid "error opening rbd image %s" msgstr "errore nell'apertura dell'immagine rbd %s" msgid "error refreshing volume stats" msgstr "errore durante l'aggiornamento delle statistiche del volume" msgid "horcm command timeout." msgstr "Timeout del comando horcm." #, python-format msgid "iSCSI portal not found for service: %s" msgstr "Portale iSCSI non trovato per il servizio: %s" msgid "import pywbem failed!! pywbem is necessary for this volume driver." msgstr "" "importazione di pywbem non riuscita. pywbem è necessario per questo driver " "di volume." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s." msgstr "" "initialize_connection: impossibile raccogliere le proprietà di ritorno per " "il volume %(vol)s e il connettore %(conn)s." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s.\n" msgstr "" "initialize_connection: impossibile raccogliere le proprietà di ritorno per " "il volume %(vol)s e il connettore %(conn)s.\n" msgid "iscsi_ip_address must be set!" msgstr "iscsi_ip_address deve essere impostato." msgid "manage_existing: No source-name in ref!" msgstr "manage_existing: Nessun source-name nel riferimento." #, python-format msgid "manage_existing_get_size: %s does not exist!" msgstr "manage_existing_get_size: %s non esiste." msgid "manage_existing_get_size: No source-name in ref!" msgstr "manage_existing_get_size: Nessun source-name nel riferimento." msgid "model server went away" msgstr "model server é scomparso" #, python-format msgid "modify volume: %s does not exist!" msgstr "modify volume: %s does not exist." msgid "san ip must be configured!" msgstr "L'IP SAN deve essere configurato." msgid "san_login must be configured!" msgstr "san_login deve essere configurato." msgid "san_password must be configured!" msgstr "san_password deve essere configurato." #, python-format msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" "Modalità di autorizzazione single_user abilitata, ma %(param)s non impostato" msgid "snm2 command timeout." msgstr "Timeout del comando snm2." msgid "" "storwize_svc_multihostmap_enabled is set to False, not allowing multi host " "mapping." msgstr "" "storwize_svc_multihostmap_enabled è impostato su False, associazione di più " "host non consentita. " #, python-format msgid "unmanage: Volume %s does not exist!" msgstr "unmanage: Il volume %s non esiste." msgid "zfssa_initiator cannot be empty when creating a zfssa_initiator_group." msgstr "" "zfssa_initiator non può essere vuoto durante la creazione di un " "zfssa_initiator_group." msgid "" "zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " "for backend enabled volume migration. Continuing with generic volume " "migration." msgstr "" "zfssa_replication_ip non impostato in cinder.conf. zfssa_replication_ip è " "necessario per la migrazione volumi abilitata al backend. Continuare con la " "migrazione volumi generica." cinder-8.0.0/cinder/locale/it/LC_MESSAGES/cinder-log-info.po0000664000567000056710000035014412701406257024357 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # Alessandra , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-17 10:52+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "\t%(name)-35s : %(value)s" msgstr "\t%(name)-35s : %(value)s" #, python-format msgid "\t%(param)-35s : %(value)s" msgstr "\t%(param)-35s : %(value)s" #, python-format msgid "\t%(prefix)-35s : %(version)s" msgstr "\t%(prefix)-35s : %(version)s" #, python-format msgid "\t%(request)-35s : %(value)s" msgstr "\t%(request)-35s : %(value)s" #, python-format msgid "" "\n" "\n" "\n" "\n" "Request URL: %(url)s\n" "\n" "Call Method: %(method)s\n" "\n" "Request Data: %(data)s\n" "\n" "Response Data:%(res)s\n" "\n" msgstr "" "\n" "\n" "\n" "\n" "URL richiesta: %(url)s\n" "\n" "Metodo di chiamata: %(method)s\n" "\n" "Dati richiesta: %(data)s\n" "\n" "Dati risposta: %(res)s\n" "\n" #, python-format msgid "%(element)s: %(val)s" msgstr "%(element)s: %(val)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s ha restituito un errore: %(e)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s restituito con HTTP %(status)d" #, python-format msgid "%(volume)s assign type fibre_channel, properties %(properties)s" msgstr "%(volume)s assegna tipo fibre_channel, proprietà %(properties)s" #, python-format msgid "%s is already umounted" msgstr "%s è giù smontato" #, python-format msgid "3PAR driver cannot perform migration. Retype exception: %s" msgstr "" "Il driver 3PAR non può eseguire la migrazione. Riscrivere l'eccezione: %s" #, python-format msgid "3PAR vlun %(name)s not found on host %(host)s" msgstr "3PAR vlun %(name)s non trovato sull'host %(host)s" #, python-format msgid "" "3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " "deleted because: %(reason)s" msgstr "" "3PAR vlun per il volume '%(name)s' è stato eliminato ma l'host '%(host)s' " "non è stato eliminato perché: %(reason)s" #, python-format msgid "AUTH properties: %(authProps)s" msgstr "Proprietà AUTH: %(authProps)s" #, python-format msgid "AUTH properties: %s." msgstr "Proprietà AUTH: %s." #, python-format msgid "Accepting transfer %s" msgstr "Accettazione trasferimento %s" msgid "Activate Flexvisor cinder volume driver." msgstr "Attivare il driver del volume cinder Flexvisor." msgid "Add connection: finished iterating over all target list" msgstr "" "Aggiungi connessione: completata iterazione sull'elenco di tutte le " "destinazioni" #, python-format msgid "Add volume response: %s" msgstr "Aggiungi risposta del volume: %s" #, python-format msgid "Added %s to cg." msgstr "Aggiunto %s a cg." #, python-format msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." msgstr "" "Aggiunto volume: %(volumeName)s al gruppo di archiviazione esistente " "%(sgGroupName)s." #, python-format msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" msgstr "Aggiunta di ACL al volume=%(vol)s con nome gruppo iniziatore %(igrp)s" #, python-format msgid "Adding volume %(v)s to consistency group %(cg)s." msgstr "Aggiunta del volume %(v)s al gruppo di coerenza %(cg)s." #, python-format msgid "" "Adding volume: %(volumeName)s to default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Aggiunta del volume: %(volumeName)s al gruppo di archiviazione predefinito " "per la politica FAST: %(fastPolicyName)s." #, python-format msgid "Adding volumes to cg %s." msgstr "Aggiunta volumi a cg %s." #, python-format msgid "Already mounted: %s" msgstr "Già montato: %s" msgid "Attach volume completed successfully." msgstr "Collegamento del volume completato correttamente." #, python-format msgid "" "Automatically selected %(binary)s RPC version %(version)s as minimum service " "version." msgstr "" "Selezionato automaticamente RPC %(binary)s versione %(version)s come " "versione di servizio minima." #, python-format msgid "" "Automatically selected %(binary)s objects version %(version)s as minimum " "service version." msgstr "" "Selezionati automaticamente oggetti %(binary)s versione %(version)s come " "versione di servizio minima." msgid "Availability Zones retrieved successfully." msgstr "Zone di disponibilità richiamate correttamente." #, python-format msgid "Available services: %s" msgstr "Servizi disponibili: %s" #, python-format msgid "Available services: %s." msgstr "Servizi disponibili: %s." #, python-format msgid "Backend name is %s." msgstr "Il nome backend è %s." #, python-format msgid "Backend type: %s" msgstr "Tipo di backend: %s" #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "VM di backup: %(backing)s ridenominata %(new_name)s." #, python-format msgid "Backing consistency group snapshot %s available for deletion" msgstr "Backup istantanea gruppo di coerenza %s disponibile per l'eliminazione" msgid "Backing not available, no operation to be performed." msgstr "Backup non disponibile, nessuna operazione da eseguire." #, python-format msgid "Backing not found, creating for volume: %s" msgstr "Backup non trovato, creazione per volume: %s" #, python-format msgid "" "Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " "skipping base image delete." msgstr "" "L'immagine di base di backup del volume %(volume)s presenta ancora " "%(snapshots)s istantanee, per cui l'eliminazione dell'immagine di base viene " "ignorata." #, python-format msgid "" "Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " "in %(delay)ss." msgstr "" "L'immagine di backup del volume %(volume)s è occupata, il tentativo viene " "eseguito altre %(retries)s volte in %(delay)ss." #, python-format msgid "Backup service: %s." msgstr "Servizio di backup: %s." #, python-format msgid "Bandwidth limit is: %s." msgstr "Il limite della larghezza di banda è: %s." #, python-format msgid "Begin backup of volume %s." msgstr "Inizio backup del volume %s." msgid "Begin detaching volume completed successfully." msgstr "Inizio dello scollegamento del volume completato correttamente." #, python-format msgid "" "BrcdFCZoneDriver - Add connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "" "BrcdFCZoneDriver - Aggiungi connessione per %(fabric)s for I-T map: " "%(i_t_map)s" #, python-format msgid "" "BrcdFCZoneDriver - Delete connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "" "BrcdFCZoneDriver - Eliminare la connessione per fabric %(fabric)s per " "l'associazione I-T: %(i_t_map)s" msgid "CHAP authentication disabled." msgstr "Autenticazione CHAP disabilitata." #, python-format msgid "CONCERTO version: %s" msgstr "Versione CONCERTO: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "Chiamata os-brick per scollegare il volume ScaleIO." #, python-format msgid "Cancelling Migration from LUN %s." msgstr "Annullamento della migrazione da LUN %s." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " "exists in different management group." msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il cluster esiste in un gruppo di gestione diverso." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has been exported." msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il volume è stato esportato." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has snapshots." msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il volume presenta istantanee." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume does " "not exist in this management group." msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il volume non esiste in questo gruppo di gestione." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume is " "from a different backend." msgstr "" "Impossibile fornire la migrazione assistita del backend per il volume: %s in " "quanto il volume deriva da un backend diverso." #, python-format msgid "" "Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu" msgstr "" "Statistiche di capacità per il pool SRP %(poolName)s sull'array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu" #, python-format msgid "Cgsnapshot %s: creating." msgstr "Crazione di cgsnapshot %s:." #, python-format msgid "Change volume capacity request: %s." msgstr "Modificare la richiesta di capacità del volume: %s." #, python-format msgid "Checking image clone %s from glance share." msgstr "Verifica del clone dell'immagine %s dalla condivisione glance." #, python-format msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "Verifica dell'origine %(origin)s del volume %(volume)s." #, python-format msgid "" "Cinder ISCSI volume with current path %(path)s is no longer being managed. " "The new name is %(unm)s." msgstr "" "Il volume ISCSI Cinder con percorso corrente %(path)s non viene più gestito. " "Il nuovo nome è %(unm)s." #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "" "Il volume NFS Cinder con percorso corrente \"%(cr)s\" non è più gestito." #, python-format msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." msgstr "" "Il volume NFS Cinder con percorso corrente %(cr)s non viene più gestito. " msgid "Cinder secure environment indicator file exists." msgstr "Il file indicatore dell'ambiente sicuro Cinder esiste." #, python-format msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" msgstr "CiscoFCZoneDriver - Aggiungi connessione per associazione I-T: %s" #, python-format msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" msgstr "CiscoFCZoneDriver - Elimina connessione per associazione I-T: %s" #, python-format msgid "Cleaning cache for share %s." msgstr "Cancellazione della cache per la condivisione %s." msgid "Cleaning up incomplete backup operations." msgstr "Ripulitura delle operazioni di backup incomplete." #, python-format msgid "Clone %s created." msgstr "Clone %s creato." #, python-format msgid "Cloning from cache to destination %s" msgstr "Clonazione dalla cache alla destinazione %s" #, python-format msgid "Cloning from snapshot to destination %s" msgstr "Clonazione dall'istantanea alla destinazione %s" #, python-format msgid "Cloning image %s from cache" msgstr "Clonazione dell'immagine %s dalla cache" #, python-format msgid "Cloning image %s from snapshot." msgstr "Clonazione dell'immagine %s dall'istantanea." #, python-format msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "Clonazione del volume %(src)s nel volume %(dst)s" #, python-format msgid "" "Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" "%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " "perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " "multi-initiator=%(multi-initiator)s" msgstr "" "Clonazione del volume dal volume dell'istantanea=%(vol)s snapshot=%(snap)s " "clone=%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=" "%(agent-type)s perfpol-name=%(perfpol-name)s encryption=%(encryption)s " "cipher=%(cipher)s multi-initiator=%(multi-initiator)s" #, python-format msgid "" "Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" msgstr "" "Clonazione con volume_name %(vname)s clone_name %(cname)s export_path " "%(epath)s" #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "API CloudByte eseguito correttamente per il comando [%s]." #, python-format msgid "" "CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." msgstr "" "Operazione CloudByte [%(operation)s] eseguita correttamente per il volume " "[%(cb_volume)s]." msgid "Complete-Migrate volume completed successfully." msgstr "Completamento-migrazione del volume completati correttamente." #, python-format msgid "Completed: convert_to_base_volume: id=%s." msgstr "Completato: convert_to_base_volume: id=%s." #, python-format msgid "Configured pools: %s" msgstr "Pool configurati: %s" #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " "%(properties)s" msgstr "" "Informazioni sull'inizializzazione della connessione: {driver_volume_type: " "fibre_channel, dati: %(properties)s" #, python-format msgid "Connecting to host: %s." msgstr "Connessione a host: %s." #, python-format msgid "Connecting to target host: %s for backend enabled migration." msgstr "" "Connessione a host di destinazione: %s per la migrazione abiliata al backend." #, python-format msgid "Connector returning fcnsinfo-%s" msgstr "Il connettore restituisce fcnsinfo-%s" #, python-format msgid "Consistency group %(cg)s is created successfully." msgstr "Gruppo di coerenza %(cg)s creato correttamente." #, python-format msgid "Consistency group %s was deleted successfully." msgstr "Gruppo di coerenza %s eliminato correttamente." #, python-format msgid "Consistency group %s: created successfully" msgstr "Gruppo di coerenza %s: creato correttamente" #, python-format msgid "Consistency group %s: creating" msgstr "Creazione del gruppo di coerenza %s:" #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "Convertita immagine di %(sz).2f MB su %(mbps).2f MB/s" #, python-format msgid "" "Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" msgstr "" "Conversione di %(volume_name)s a full provisioning con userCPG=%(new_cpg)s" #, python-format msgid "" "Converting %(volume_name)s to thin dedup provisioning with userCPG=" "%(new_cpg)s" msgstr "" "Conversione di %(volume_name)s a thin dedup provisioning con userCPG=" "%(new_cpg)s" #, python-format msgid "" "Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" msgstr "" "Conversione di %(volume_name)s a thin provisioning con userCPG=%(new_cpg)s" msgid "Coordination backend started successfully." msgstr "Backend di coordinazione avviato correttamente. " #, python-format msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." msgstr "" "Copiata immagine %(img)s nel volume %(vol)s utilizzando il carico di lavoro " "offload di copia." #, python-format msgid "Copied image %(img)s to volume %(vol)s using local image cache." msgstr "" "Copiata immagine %(img)s in volume %(vol)s utilizzando la cache immagini " "locale." #, python-format msgid "Copied image to volume %s using regular download." msgstr "Copiata immagine nel volume %s utilizzando il download normale." #, python-format msgid "Copy job to dest vol %s completed." msgstr "Lavoro di copia nel volume di destinazione %s completato." msgid "Copy volume to image completed successfully." msgstr "Copia del volume nell'immagine completata correttamente." #, python-format msgid "Copying src vol %(src)s to dest vol %(dst)s." msgstr "" "Copia del volume di origine %(src)s nel volume di destinazione %(dst)s." #, python-format msgid "Could not find replica to delete of volume %(vol)s." msgstr "Impossibile trovare la replica per l'eliminazione del volume %(vol)s." #, python-format msgid "Could not run dpkg-query command: %(msg)s." msgstr "Impossibile eseguire il comando dpkg-query: %(msg)s." #, python-format msgid "Could not run rpm command: %(msg)s." msgstr "Impossibile eseguire il comando rpm: %(msg)s." #, python-format msgid "" "Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" msgstr "" "Impossibile aggiornare il pool di archiviazione con mmchattr a %(pool)s, " "errore: %(error)s" #, python-format msgid "" "Couldn't find destination volume %(vol)s in the database. The entry might be " "successfully deleted during migration completion phase." msgstr "" "Impossibile trovare il volume di destinazione %(vol)s nel database. La voce " "potrebbe essere stata eliminata correttamente durante la fase di " "completamento della migrazione." #, python-format msgid "" "Couldn't find the temporary volume %(vol)s in the database. There is no need " "to clean up this volume." msgstr "" "Impossibile trovare il volume temporaneo %(vol)s nel database. Non c'è " "alcuna necessità di ripulire questo volume." #, python-format msgid "Create Cloned Volume %(volume_id)s completed." msgstr "Creazione del volume clonato %(volume_id)s completata." #, python-format msgid "Create Consistency Group: %(group)s." msgstr "Crea gruppo di coerenza: %(group)s." #, python-format msgid "Create Volume %(volume_id)s completed." msgstr "Creazione del volume %(volume_id)s completata." #, python-format msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" "Creazione volume %(volume_id)s da istantanea %(snapshot_id)s completata." #, python-format msgid "" "Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " "%(provisioning)s tiering: %(tiering)s " msgstr "" "Crea volume: %(volume)s Dimensione: %(size)s pool: %(pool)s provisioning: " "%(provisioning)s livellamento: %(tiering)s " #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " "%(sourceName)s." msgstr "" "Creare una replica dal volume: Volume clone: %(cloneName)s Volume di " "origine: %(sourceName)s." #, python-format msgid "Create backup finished. backup: %s." msgstr "Creazione backup completata, backup: %s." #, python-format msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "Creazione backup avviata, backup: %(backup_id)s volume: %(volume_id)s." msgid "Create consistency group completed successfully." msgstr "Creazione del gruppo di coerenza completata correttamente." #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "" "Creazione del gruppo di coerenza da origine-%(source)s completata " "correttamente." #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "Creazione esportazione eseguita dal volume %(volume_id)s." msgid "Create snapshot completed successfully" msgstr "Creazione istantanea completata correttamente" #, python-format msgid "" "Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Crea istantanea per gruppo di coerenza %(cgId)s cgsnapshotID: %(cgsnapshot)s." #, python-format msgid "Create snapshot from volume %s" msgstr "Crea istantanea dal volume %s" #, python-format msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "Crea istantanea: %(snapshot)s: volume: %(volume)s" #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " "%(raid_snapshot_id)s, volume: %(volume)s." msgstr "" "Creazione eseguita correttamente. Istantanea: %(snapshot)s, ID istantanea in " "raid: %(raid_snapshot_id)s, volume: %(volume)s." #, python-format msgid "Create target consistency group %(targetCg)s." msgstr "Crea gruppo di coerenza di destinazione %(targetCg)s." #, python-format msgid "Create volume of %s GB" msgstr "Crea volume di %s GB" #, python-format msgid "CreateReplay success %s" msgstr "CreateReplay riuscito %s" #, python-format msgid "" "Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]." msgstr "" "Creata correttamente un'istantanea CloudByte [%(cb_snap)s] in relazione al " "volume CloudByte [%(cb_vol)s] e al volume OpenStack [%(stack_vol)s]." #, python-format msgid "Created Consistency Group %s" msgstr "Creato gruppo di coerenza %s" #, python-format msgid "" "Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." "t parent OpenStack volume [%(stack_vol)s]." msgstr "" "Creato un clone [%(cb_clone)s] nel percorso dell'istantanea CloudByte " "[%(cb_snap)s] in relazione al volume OpenStack principale [%(stack_vol)s]." #, python-format msgid "Created datastore folder: %s." msgstr "Creata cartella di datastore: %s." #, python-format msgid "" "Created lun-map:\n" "%s" msgstr "" "Creata associazione lun:\n" "%s" #, python-format msgid "" "Created multi-attach E-Series host group %(label)s with clusterRef " "%(clusterRef)s" msgstr "" "Creato gruppo di host multi-attach E-Series '%(label)s' con clusterRef " "%(clusterRef)s" #, python-format msgid "Created new initiator group name: %(igGroupName)s." msgstr "Creato nome nuovo del gruppo di iniziatori: %(igGroupName)s." #, python-format msgid "Created new masking view : %(maskingViewName)s." msgstr "Creata nuova vista di mascheramento: %(maskingViewName)s." #, python-format msgid "Created new storage group: %(storageGroupName)s." msgstr "Creato nuovo gruppo di archiviazione: %(storageGroupName)s." #, python-format msgid "Created snap grp with label %s." msgstr "Creato grp snap con etichetta %s." #, python-format msgid "Created volume %(instanceId)s: %(name)s" msgstr "Creato volume %(instanceId)s: %(name)s" #, python-format msgid "Created volume %(volname)s, volume id %(volid)s." msgstr "Creato volume %(volname)s, id volume %(volid)s." msgid "Created volume successfully." msgstr "Volume creato correttamente." #, python-format msgid "Created volume with label %s." msgstr "Creato volume con etichetta %s." #, python-format msgid "Creating %(volume)s on %(device)s" msgstr "Creazione di %(volume)s su %(device)s" msgid "Creating Consistency Group" msgstr "Creazione gruppo di coerenza" #, python-format msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" "Creazione backup del volume %(volume_id)s nel contenitore %(container)s" #, python-format msgid "Creating cgsnapshot %(name)s." msgstr "Creazione cgsnapshot %(name)s." #, python-format msgid "Creating clone of volume: %s" msgstr "Creazione clone del volume: %s" #, python-format msgid "Creating clone of volume: %s." msgstr "Creazione clone del volume: %s." #, python-format msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." msgstr "Creazione gruppo di coerenza %(name)s da cgsnapshot %(snap)s." #, python-format msgid "" "Creating consistency group %(name)s from source consistency group " "%(source_cgid)s." msgstr "" "Creazione gruppo di coerenza %(name)s da gruppo di coerenza di origine " "%(source_cgid)s." #, python-format msgid "Creating consistency group %(name)s." msgstr "Creazione gruppo di coerenza %(name)s." #, python-format msgid "Creating host object %(host_name)r with IQN: %(iqn)s." msgstr "Creazione dell'oggetto host %(host_name)r con IQN: %(iqn)s." #, python-format msgid "Creating host object %(host_name)r with WWN: %(wwn)s." msgstr "Creazione dell'oggetto host %(host_name)r con WWN: %(wwn)s." #, python-format msgid "Creating host with ports %s." msgstr "Creazione host con porte %s." #, python-format msgid "Creating image snapshot %s" msgstr "Creazione dell'istantanea dell'immagine %s" #, python-format msgid "Creating initiator group %(grp)s with initiator %(iname)s" msgstr "Creazione del gruppo iniziatore %(grp)s con iniziatore %(iname)s" #, python-format msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" msgstr "Creazione del gruppo iniziatore %(igrp)s con un iniziatore %(iname)s" #, python-format msgid "Creating iscsi_target for volume: %s" msgstr "Creazione di iscsi_target per il volume: %s" #, python-format msgid "Creating regular file: %s.This may take some time." msgstr "" "Creazione del file regolare: %s. Questa operazione potrebbe richiedere del " "tempo." #, python-format msgid "Creating server %s" msgstr "Creazione server %s" #, python-format msgid "Creating snapshot %(snap)s of volume %(vol)s" msgstr "Creazione dell'istantanea %(snap)s del volume %(vol)s." #, python-format msgid "" "Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " "snap_description=%(desc)s" msgstr "" "Creazione dell'istantanea per il nome volume=%(vol)s nome_istantanea=" "%(name)s descrizione_istantanea=%(desc)s" #, python-format msgid "Creating snapshot: %s" msgstr "Creazione istantanea: %s." #, python-format msgid "Creating temp snapshot %(snap)s from volume %(vol)s" msgstr "Creazione dell'istantanea temporanea %(snap)s dal volume %(vol)s." #, python-format msgid "Creating transfer of volume %s" msgstr "Creazione trasferimento di volume %s" #, python-format msgid "Creating volume %s from snapshot." msgstr "Creazione del volume %s dall'istantanea." #, python-format msgid "Creating volume from snapshot: %s" msgstr "Creazione volume dall'istantanea: %s." #, python-format msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." msgstr "" "Creazione del volume di %(size)s GB per il ripristino del backup " "%(backup_id)s." #, python-format msgid "Creating volume snapshot: %s." msgstr "Creazione istantanea del volume: %s." #, python-format msgid "Creatng volume from snapshot. volume: %s" msgstr "Creazione volume dall'istantanea. volume: %s" #, python-format msgid "DRBD connection for %s already removed" msgstr "Connessione DRBD per %s già rimossa" #, python-format msgid "Delete Consistency Group: %(group)s." msgstr "Elimina gruppo di coerenza: %(group)s." #, python-format msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "Eliminazione istantanea %(snapshot_id)s completata." #, python-format msgid "Delete Snapshot: %(snapshot)s" msgstr "Elimina istantanea: %(snapshot)s." #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "Elimina istantanea: %(snapshot)s." #, python-format msgid "Delete Snapshot: %(snapshotName)s." msgstr "Elimina istantanea: %(snapshotName)s." #, python-format msgid "Delete Volume %(volume_id)s completed." msgstr "Eliminazione del volume %(volume_id)s completata." #, python-format msgid "Delete backup finished, backup %s deleted." msgstr "Eliminazione backup completata, backup %s eliminato." #, python-format msgid "Delete backup started, backup: %s." msgstr "Eliminazione backup avviata, backup: %s." #, python-format msgid "Delete backup with id: %s" msgstr "Elimina (delete) backup con l'id: %s" #, python-format msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" msgstr "" "Elimina cgsnapshot %(snap_name)s per il gruppo di coerenza : %(group_name)s" #, python-format msgid "Delete cgsnapshot with id: %s" msgstr "Elimina cgsnapshot con id:%s" #, python-format msgid "Delete connection target list: %(targets)s" msgstr "Elimina elenco di destinazioni di connessione: %(targets)s" msgid "Delete consistency group completed successfully." msgstr "Eliminazione del gruppo di coerenza completata correttamente." #, python-format msgid "Delete consistency group with id: %s" msgstr "Elimina gruppo di coerenza con id: %s" #, python-format msgid "" "Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." msgstr "" "Eliminazione del backup '%(backup)s' per il volume '%(volume)s' completata " "con avvertenza." msgid "Delete snapshot completed successfully" msgstr "Eliminazione istantanea completata correttamente" #, python-format msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" "Elimina istantanea per CG di origine %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgid "Delete snapshot metadata completed successfully." msgstr "Eliminazione dei metadati dell'istantanea completata correttamente." #, python-format msgid "Delete snapshot with id: %s" msgstr "Elimina istantanea con id: %s" #, python-format msgid "Delete transfer with id: %s" msgstr "Elimina trasferimento con id: %s" msgid "Delete volume metadata completed successfully." msgstr "Eliminazione dei metadati del volume completata correttamente." msgid "Delete volume request issued successfully." msgstr "Eliminazione della richiesta del volume eseguita correttamente." #, python-format msgid "Delete volume with id: %s" msgstr "Elimina volume con id: %s" #, python-format msgid "Deleted %(row)d rows from table=%(table)s" msgstr "Eliminate %(row)d righe dalla tabella=%(table)s" #, python-format msgid "" "Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " "[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." msgstr "" "Eliminata un'istantanea CloudByte [%(snap)s] in relazione al volume " "CloudByte principale [%(cb_vol)s] e al volume OpenStack principale " "[%(stack_vol)s]." #, python-format msgid "Deleted the VM backing: %s." msgstr "Eliminato backup VM: %s." #, python-format msgid "Deleted vmdk file: %s." msgstr "Eliminato file vmdk: %s." msgid "Deleted volume successfully." msgstr "Volume eliminato correttamente." msgid "Deleting Consistency Group" msgstr "Eliminazione gruppo di coerenza" #, python-format msgid "Deleting Volume: %(volume)s" msgstr "Eliminazione del volume: %(volume)s" #, python-format msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." msgstr "" "Eliminazione dell'immagine di base di backup='%(basename)s' di volume " "%(volume)s." #, python-format msgid "Deleting deleteInitiatorGrp %s " msgstr "Eliminazione di deleteInitiatorGrp %s " #, python-format msgid "Deleting snapshot %(ss)s from %(pro)s" msgstr "Eliminazione istantanea %(ss)s da %(pro)s" #, python-format msgid "Deleting snapshot %s " msgstr "Eliminazione istantanea %s" #, python-format msgid "Deleting snapshot: %s" msgstr "Eliminazione dell'istantanea: %s" #, python-format msgid "Deleting stale snapshot: %s" msgstr "Eliminazione dell'istantanea obsoleta: %s" #, python-format msgid "Deleting unneeded host %(host_name)r." msgstr "Eliminazione dell'host non necessario %(host_name)r." #, python-format msgid "Deleting volume %s " msgstr "Eliminazione del volume %s" #, python-format msgid "Deleting volume %s." msgstr "Eliminazione del volume %s." #, python-format msgid "Detach Volume, metadata is: %s." msgstr "Scollegare il volume, i metadati sono: %s." msgid "Detach volume completed successfully." msgstr "Scollegamento del volume completato correttamente." msgid "Determined volume DB was empty at startup." msgstr "È stato stabilito che il DB dei volumi era vuoto all'avvio." msgid "Determined volume DB was not empty at startup." msgstr "È stato stabilito che il DB dei volumi non era vuoto all'avvio." #, python-format msgid "" "Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " "delete anything." msgstr "" "Non è stata trovata l'istantanea: %(name)s per il backup: %(backing)s. Non è " "necessario eliminare nulla." #, python-format msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" msgstr "" "L'IP di rilevamento %(disc_ip)s viene trovato nella sottorete gestione+dati " "%(net_label)s" #, python-format msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" msgstr "" "L'IP di rilevamento %(disc_ip)s viene utilizzato nella sottorete di dati " "%(net_label)s" #, python-format msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" msgstr "" "L'IP di rilevamento %(disc_ip)s viene utilizzato nella sottorete " "%(net_label)s" #, python-format msgid "Discovery ip %s is used on mgmt+data subnet" msgstr "L'IP di rilevamento %s viene utilizzato nella sottorete gestione+dati" #, python-format msgid "Dissociating volume %s " msgstr "Dissociazione del volume %s" #, python-format msgid "Domain id is %s." msgstr "L'ID dominio è %s." #, python-format msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "Eseguita copia dell'immagine: %(id)s su volume: %(vol)s." #, python-format msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "Eseguita copia del volume %(vol)s in una nuova immagine %(img)s" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " "in cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Rilevato un cluster GPFS di livello inferiore. La funzione encryption-at-" "rest GPFS non è abilitata nel livello daemon del cluster %(cur)s - deve " "essere almeno di livello %(min)s." msgid "Driver initialization completed successfully." msgstr "Inizializzazione del driver completata correttamente." msgid "Driver post RPC initialization completed successfully." msgstr "Post-inizializzazione RPC del driver completata correttamente." #, python-format msgid "Driver stats: %s" msgstr "Statistiche driver: %s" #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " "extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "API proxy E-series versione %(version)s non supporta la serie completa di " "specifiche supplementari SSC. La versione proxy deve essere almeno " "%(min_version)s. " #, python-format msgid "E-series proxy API version %s does not support autosupport logging." msgstr "" "API proxy E-series versione %s non supporta l'accesso con supporto " "automatico. " #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "Driver EQL: La configurazione è completa, l'IP gruppo è \"%s\"." #, python-format msgid "EQL-driver: executing \"%s\"." msgstr "Driver EQL: esecuzione di \"%s\"." #, python-format msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "Modifica del volume %(vol)s con maschera %(mask)s" #, python-format msgid "Elapsed time for clear volume: %.2f sec" msgstr "Tempo trascorso per la cancellazione del volume: %.2f sec" msgid "Embedded mode detected." msgstr "Rilevata modalità integrata." msgid "Enabling LVM thin provisioning by default because a thin pool exists." msgstr "" "Abilitazione del thin provisioning LVM per impostazione predefinita perché " "esiste un pool thin." msgid "Enabling LVM thin provisioning by default because no LVs exist." msgstr "" "Abilitazione del thin provisioning LVM per impostazione predefinita perché " "non esiste alcun LV." #, python-format msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" msgstr "Inserimento del volume extend_volume=%(vol)s new_size=%(size)s" #, python-format msgid "" "Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s" msgstr "" "Inserimento del volume initialize_connection=%(vol)s connettore=%(conn)s " "ubicazione=%(loc)s" #, python-format msgid "" "Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s." msgstr "" "Inserimento del volume terminate_connection=%(vol)s connettore=%(conn)s " "ubicazione=%(loc)s" #, python-format msgid "Entering unmanage_volume volume = %s" msgstr "Inserimento del volume unmanage_volume = %s" #, python-format msgid "Exploring array subnet label %s" msgstr "Esplorazione dell'etichetta della sottorete dell'array %s" #, python-format msgid "Export record finished, backup %s exported." msgstr "Esportazione record completata, backup %s esportato." #, python-format msgid "Export record started, backup: %s." msgstr "Esportazione record avviata, backup: %s." #, python-format msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." msgstr "Esportata lun %(vol_id)s su lun_id %(lun_id)s." msgid "Extend volume completed successfully." msgstr "Estensione del volume completata correttamente." msgid "Extend volume request issued successfully." msgstr "Richiesta di estensione del volume eseguita correttamente." #, python-format msgid "" "Extend volume: %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" "Estendere il volume: %(volumename)s, oldsize: %(oldsize)s, newsize: " "%(newsize)s." #, python-format msgid "Extending volume %s." msgstr "Estensione del volume %s." #, python-format msgid "Extending volume: %(id)s New size: %(size)s GB" msgstr "Estensione volume: %(id)s Nuova dimensione: %(size)s GB" #, python-format msgid "" "FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "FAST: statistiche di capacità per la politica %(fastPolicyName)s sull'array " "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." #, python-format msgid "FC Initiators %(in)s of %(ins)s need registration" msgstr "Gli iniziatori FC %(in)s di %(ins)s necessitano la registrazione." msgid "Failed over to replication target successfully." msgstr "Failover della destinazione di replica eseguito correttamente." #, python-format msgid "Failed to create host: %(name)s. Check if it exists on the array." msgstr "" "Impossibile creare l'host: %(name)s. Controllare se esiste nell'array. " #, python-format msgid "" "Failed to create hostgroup: %(name)s. Please check if it exists on the array." msgstr "" "Impossibile creare il gruppo di host: %(name)s. Controllare se esiste " "sull'array." #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "Impossibile aprire l'elenco di sessioni iet per %(vol_id)s: %(e)s" #, python-format msgid "Failing backend to %s" msgstr "Esito negativo del backend su %s" #, python-format msgid "Failing over volume %(id)s replication: %(res)s." msgstr "Failover del volume %(id)s replica: %(res)s." #, python-format msgid "Fault thrown: %s" msgstr "Errore generato: %s" #, python-format msgid "Fetched vCenter server version: %s" msgstr "Recuperata versione server vCenter: %s" #, python-format msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" msgstr "Il filtro %(cls_name)s ha restituito %(obj_len)d host(s)" #, python-format msgid "Filtered targets for SAN is: %(targets)s" msgstr "Le destinazioni filtrate per SAN sono: %(targets)s" #, python-format msgid "Filtered targets for SAN is: %s" msgstr "Le destinazioni filtrate per SAN sono: %s" #, python-format msgid "Final filtered map for delete connection: %(i_t_map)s" msgstr "Associazione filtrata finale per eliminazione connessione: %(i_t_map)s" #, python-format msgid "Final filtered map for fabric: %(i_t_map)s" msgstr "Associazione filtrata finale per fabric: %(i_t_map)s" #, python-format msgid "Fixing previous mount %s which was not unmounted correctly." msgstr "" "Correzione del montaggio precedente %s che non è stato smontato " "correttamente." #, python-format msgid "Flash Cache policy set to %s" msgstr "Politica Flash Cache impostata su %s" #, python-format msgid "Flexvisor already unassigned volume %(id)s." msgstr "Flexvisor ha già annullato l'assegnazione del volume %(id)s." #, python-format msgid "Flexvisor snapshot %(id)s not existed." msgstr "L'istantanea Flexvisor %(id)s non esiste." #, python-format msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor è riuscito ad aggiungere il volume %(id)s al gruppo %(cgid)s." #, python-format msgid "Flexvisor succeeded to clone volume %(id)s." msgstr "Flexvisor è riuscito a clonare il volume %(id)s." #, python-format msgid "Flexvisor succeeded to create volume %(id)s from snapshot." msgstr "Flexvisor è riuscito a creare il volume %(id)s dall'istantanea. " #, python-format msgid "Flexvisor succeeded to create volume %(id)s." msgstr "Flexvisor è riuscito a creare il volume %(id)s." #, python-format msgid "Flexvisor succeeded to delete snapshot %(id)s." msgstr "Flexvisor è riuscito ad eliminare l'istantanea %(id)s." #, python-format msgid "Flexvisor succeeded to extend volume %(id)s." msgstr "Flexvisor è riuscito ad estendere il volume %(id)s." #, python-format msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor è riuscito a rimuovere il volume %(id)s dal gruppo %(cgid)s." #, python-format msgid "Flexvisor succeeded to unassign volume %(id)s." msgstr "Flexvisor è riuscito ad annullare l'assegnazione del volume %(id)s." #, python-format msgid "Flexvisor volume %(id)s does not exist." msgstr "Il volume Flexvisor %(id)s non esiste." #, python-format msgid "Folder %s does not exist, it was already deleted." msgstr "La cartella %s non esiste, è già stata eliminata." msgid "Force upload to image is disabled, Force option will be ignored." msgstr "" "Il caricamento forzato nell'immagine è disabilitato, l'opzione Force verrà " "ignorata." #, python-format msgid "Found a temporary snapshot %(name)s" msgstr "Trovata un'istantanea temporanea %(name)s" #, python-format msgid "Found existing masking view: %(maskingViewName)s." msgstr "Trovata vista di mascheramento esistente: %(maskingViewName)s." msgid "Found failover volume. Competing failover." msgstr "Trovato volume di failover. Failover concorrente." #, python-format msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." msgstr "" "La capacità disponibile per il backend è: %(free)s, capacità totale: " "%(total)s." #, python-format msgid "Friendly zone name after forming: %(zonename)s" msgstr "Nome zona breve dopo la formazione: %(zonename)s" #, python-format msgid "Generating transfer record for volume %s" msgstr "Generazione del record di trasferimento per il volume %s" #, python-format msgid "Get FC targets %(tg)s to register initiator %(in)s." msgstr "Ricevi destinazioni FC %(tg)s per registrare l'iniziatore %(in)s." #, python-format msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." msgstr "Ricevi destinazioni ISCSI %(tg)s per registrare l'iniziatore %(in)s." #, python-format msgid "Get Volume response: %s" msgstr "Ricevi risposta del volume: %s" msgid "Get all snapshots completed successfully." msgstr "Richiamo di tutte le istantanee completato correttamente." msgid "Get all volumes completed successfully." msgstr "Richiamo di tutti i volumi completato correttamente." #, python-format msgid "Get domain by name response: %s" msgstr "Ottieni dominio dalla risposta del nome: %s" #, python-format msgid "Get service: %(lbl)s->%(svc)s" msgstr "Richiama servizio: %(lbl)s->%(svc)s" msgid "Get snapshot metadata completed successfully." msgstr "Richiamo dei metadati dell'istantanea completato correttamente." msgid "Get snapshot metadata value not implemented." msgstr "Richiamo del valore dei metadati dell'istantanea non implementato." #, python-format msgid "Get the default ip: %s." msgstr "Richiama l'ip predefinito: %s." msgid "Get volume admin metadata completed successfully." msgstr "Richiamo dei metadati di gestione del volume completato correttamente." msgid "Get volume image-metadata completed successfully." msgstr "" "Richiamo dei metadati dell'immagine del volume completato correttamente." msgid "Get volume metadata completed successfully." msgstr "Richiamo dei metadati del volume completato correttamente." msgid "Getting getInitiatorGrpList" msgstr "Richiamo di getInitiatorGrpList" #, python-format msgid "Getting volume information for vol_name=%s" msgstr "Richiamo delle informazioni sul volume per vol_name=%s" #, python-format msgid "Going to perform request again %s with valid token." msgstr "Eseguire di nuovo la richiesta %s con un token valido." #, python-format msgid "HDP list: %s" msgstr "Elenco HDP: %s" #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" #, python-format msgid "HPELeftHand API version %s" msgstr "Versione API HPELeftHand API %s" #, python-format msgid "HTTP exception thrown: %s" msgstr "Generata eccezione HTTP: %s" #, python-format msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "ID hypermetro: %(metro_id)s. ID lun remota: %(remote_lun_id)s." #, python-format msgid "ISCSI properties: %(properties)s" msgstr "Proprietà ISCSI: %(properties)s" msgid "ISCSI provider_location not stored, using discovery." msgstr "provider_location ISCSI non archiviato, utilizzare il rilevamento." #, python-format msgid "ISCSI volume is: %(volume)s" msgstr "Il volume ISCSI è: %(volume)s" #, python-format msgid "Ignored LU creation error \"%s\" while ensuring export." msgstr "" "Ignorato errore di creazione LU \"%s\" durante la verifica dell'esportazione." #, python-format msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export." msgstr "" "Ignorato errore di aggiunta voce di associazione LUN \"%s\" durante la " "verifica dell'esportazione." #, python-format msgid "Ignored target creation error \"%s\" while ensuring export." msgstr "" "Ignorato errore di creazione destinazione \"%s\" durante la verifica " "dell'esportazione." #, python-format msgid "Ignored target group creation error \"%s\" while ensuring export." msgstr "" "Ignorato errore di creazione gruppo di destinazione \"%s\" durante la " "verifica dell'esportazione." #, python-format msgid "" "Ignored target group member addition error \"%s\" while ensuring export." msgstr "" "Ignorato errore di aggiunta membro del gruppo di destinazione \"%s\" durante " "la verifica dell'esportazione." #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "L'immagine %(pool)s/%(image)s è dipendente nell'istantanea %(snap)s." #, python-format msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" msgstr "" "Clonazione dell'immagine non corretta per l'immagine %(image_id)s. " "Messaggio: %(msg)s" #, python-format msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" msgstr "Download immagine di %(sz).2f MB su %(mbps).2f MB/s" #, python-format msgid "Image will locally be converted to raw %s" msgstr "L'immagine verrà convertita localmente nella riga %s" #, python-format msgid "Image-volume cache disabled for host %(host)s." msgstr "Cache image-volume disabilitata per l'host %(host)s." #, python-format msgid "Image-volume cache enabled for host %(host)s." msgstr "Cache image-volume abilitata per l'host %(host)s." #, python-format msgid "Import record id %s metadata from driver finished." msgstr "Importazione metadati id record %s da driver completata." #, python-format msgid "Import record started, backup_url: %s." msgstr "Importazione record avviata, backup_url: %s." #, python-format msgid "Imported %(fail)s to %(guid)s." msgstr "Importato %(fail)s in %(guid)s." #, python-format msgid "Initialize connection: %(volume)s." msgstr "Inizializza connessione: %(volume)s." msgid "Initialize volume connection completed successfully." msgstr "" "Inizializzazione della connessione del volume completata correttamente." #, python-format msgid "Initialized driver %(name)s version: %(vers)s" msgstr "Inizializzato driver %(name)s versione: %(vers)s" #, python-format msgid "" "Initializing RPC dependent components of volume driver %(driver_name)s " "(%(version)s)" msgstr "" "Inizializzazione dei componenti dipendenti RPC del driver del volume " "%(driver_name)s (%(version)s)" msgid "Initializing extension manager." msgstr "Inizializzazione gestore estensioni." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." msgstr "" "Il nome iniziatore %(initiatorNames)s non si trova nell'array " "%(storageSystemName)s." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " msgstr "" "Il nome iniziatore %(initiatorNames)s non si trova nell'array " "%(storageSystemName)s. " #, python-format msgid "Initiator group name is %(grp)s for initiator %(iname)s" msgstr "Il nome del gruppo iniziatore è %(grp)s per l'iniziatore %(iname)s" #, python-format msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s estesa a %(size)s GB." #, python-format msgid "LUN %(lun)s extended to %(size)s GB." msgstr "LUN %(lun)s estesa a %(size)s GB." #, python-format msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "La LUN %(lun)s di dimensione %(sz)s MB viene creata." #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" "La LUN con un determinato riferimento %s non deve essere ridenominata " "durante l'operazione di gestione." #, python-format msgid "" "Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " "%(name)s." msgstr "" "Lasciare create_volume: %(volumeName)s Codice di ritorno: %(rc)lu volume " "dict: %(name)s." #, python-format msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." msgstr "Lasciare delete_volume: %(volumename)s Codice di ritorno: %(rc)lu." #, python-format msgid "Leaving initialize_connection: %s" msgstr "Lasciare initialize_connection: %s" #, python-format msgid "Loaded extension: %s" msgstr "Estensione caricata: %s" #, python-format msgid "" "Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" "%(lv)s" msgstr "" "Volume logico non trovato durante la query per le informazioni LVM. (vg_name=" "%(vg)s, lv_name=%(lv)s" msgid "Manage existing volume completed successfully." msgstr "Gestione del volume esistente completata correttamente." #, python-format msgid "" "Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." msgstr "" "Operazione di gestione completata per LUN con nuovo percorso %(path)s e uuid " "%(uuid)s." #, python-format msgid "" "Manage operation completed for volume with new label %(label)s and wwn " "%(wwn)s." msgstr "" "Operazione di gestione completata per volume con nuova etichetta %(label)s e " "wwn %(wwn)s." #, python-format msgid "Manage volume %s" msgstr "Gestisci volume %s" msgid "Manage volume request issued successfully." msgstr "Richiesta di gestione del volume eseguita correttamente." #, python-format msgid "Masking view %(maskingViewName)s successfully deleted." msgstr "Vista di mascheramento %(maskingViewName)s eliminata correttamente." #, python-format msgid "Migrate Volume %(volume_id)s completed." msgstr "Migrazione del volume %(volume_id)s completata." msgid "Migrate volume completed successfully." msgstr "Migrazione del volume completata correttamente." msgid "Migrate volume completion issued successfully." msgstr "Completamento della migrazione del volume eseguito correttamente." msgid "Migrate volume request issued successfully." msgstr "Richiesta di migrazione del volume eseguita correttamente." #, python-format msgid "Migrating using retype Volume: %(volume)s." msgstr "Migrazione tramite la riscrittura del volume: %(volume)s." #, python-format msgid "" "Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." msgstr "" "Modifica di %(volume_name)s snap_cpg da %(old_snap_cpg)s a %(new_snap_cpg)s." #, python-format msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" msgstr "Modifica di %(volume_name)s userCPG da %(old_cpg)s a %(new_cpg)s" #, python-format msgid "Modifying %s comments." msgstr "Modifica di %s commenti." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "Modulo PyWBEM non installato. Installare PyWBEM utilizzando il package " "python-pywbem." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "Modulo PyWBEM non installato. Installare PyWBEM utilizzando il package " "python-pywbem." #, python-format msgid "Mounting volume: %s ..." msgstr "Montaggio del volume: %s ..." #, python-format msgid "Mounting volume: %s succeeded" msgstr "Montaggio del volume: %s eseguito correttamente" #, python-format msgid "" "NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "NON-FAST: statistiche di capacità per il pool %(poolName)s sull'array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgid "Need to remove FC Zone, building initiator target map" msgstr "" "È necessario rimuovere la zona FC, creazione dell'associazione di " "destinazione dell'iniziatore" msgid "Need to remove FC Zone, building initiator target map." msgstr "" "È necessario rimuovere la zona FC, creazione dell'associazione di " "destinazione dell'iniziatore." msgid "" "Neither security file nor plain text credentials are specified. Security " "file under home directory will be used for authentication if present." msgstr "" "Non sono specificati né il file di sicurezza né le credenziali del testo " "normale. Il file di sicurezza nella directory home verrà utilizzato per " "l'autenticazione, se presente." #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " "loaded." msgstr "" "Driver NetApp di famiglia %(storage_family)s e protocollo " "%(storage_protocol)s caricato." #, python-format msgid "New Cinder secure environment indicator file created at path %s." msgstr "" "Nuovo file indicatore dell'ambiente sicuro Cinder creato al percorso %s." #, python-format msgid "" "New size is equal to the real size from backend storage, no need to extend. " "realsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" "La nuova dimensione è uguale alla dimensione reale dell'archiviazione di " "backend, non c'è necessità di estenderla. dimensione reale: %(oldsize)s, " "nuova dimensione: %(newsize)s." #, python-format msgid "New str info is: %s." msgstr "La nuova informazione di stringa è: %s." #, python-format msgid "No dpkg-query info found for %(pkg)s package." msgstr "Nessuna informazione dpkg-query trovata per il pacchetto %(pkg)s." #, python-format msgid "No igroup found for initiator %s" msgstr "Nessun igroup trovato per l'iniziatore %s" #, python-format msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" msgstr "Nessuna destinazione iscsi presente per l'id volume:%(vol_id)s: %(e)s" #, python-format msgid "No need to extend volume %s as it is already the requested new size." msgstr "" "Nessuna necessità di estendere il volume %s in quanto è già nella nuova " "dimensione richiesta." #, python-format msgid "" "No replication synchronization session found associated with source volume " "%(source)s on %(storageSystem)s." msgstr "" "Nessuna sessione di sincronizzazione replica trovata associata al volume di " "origine %(source)s su %(storageSystem)s." #, python-format msgid "" "No restore point found for backup='%(backup)s' of volume %(volume)s although " "base image is found - forcing full copy." msgstr "" "Nessun punto di ripristino trovato per il backup='%(backup)s' del volume " "%(volume)s nonostante sia stata trovata l'immagine di base - forzatura della " "copia completa." #, python-format msgid "No rpm info found for %(pkg)s package." msgstr "Nessuna informazione rpm trovata per il pacchetto %(pkg)s." #, python-format msgid "No targets to add or remove connection for initiator: %(init_wwn)s" msgstr "" "Nessuna destinazione per aggiungere o rimuovere la connessione per " "l'iniziatore: %(init_wwn)s" #, python-format msgid "No volume found for CG: %(cg)s." msgstr "Nessun volume trovato per CG: %(cg)s." #, python-format msgid "Non fatal cleanup error: %s." msgstr "Errore di ripulitura non grave: %s." #, python-format msgid "OpenStack OS Version Info: %(info)s" msgstr "Informazioni sulla versione OS OpenStack: %(info)s" #, python-format msgid "" "Origin volume %s appears to be removed, try to remove it from backend if it " "is there." msgstr "" "Il volume di origine %s risulta rimosso, provare a rimuoverlo dal backend se " "presente." #, python-format msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" "Sovrascrittura del volume %(volume_id)s con ripristino del backup " "%(backup_id)s" #, python-format msgid "Params for add volume request: %s." msgstr "Parametri per la richiesta di aggiunta del volume: %s." #, python-format msgid "Parse_loc: %s" msgstr "Parse_loc: %s" #, python-format msgid "Performing post clone for %s" msgstr "Esecuzione del post clone per %s" #, python-format msgid "Performing secure delete on volume: %s" msgstr "Esecuzione di secure delete nel volume: %s" msgid "Plain text credentials are being used for authentication" msgstr "" "Per l'autenticazione vengono utilizzare le credenziali del testo normale" #, python-format msgid "Pool id is %s." msgstr "L'ID pool è %s." #, python-format msgid "Port group instance name is %(foundPortGroupInstanceName)s." msgstr "Il nome istanza del gruppo di porte è %(foundPortGroupInstanceName)s." #, python-format msgid "Post clone resize LUN %s" msgstr "LUN di ridimensionamento post clone %s" #, python-format msgid "Prefer use target wwpn %(wwpn)s" msgstr "Preferire l'utilizzo di wwpn di destinazione %(wwpn)s" #, python-format msgid "Profile %s has been deleted." msgstr "Il profilo %s è stato eliminato." msgid "Promote volume replica completed successfully." msgstr "Promozione della replica del volume completata correttamente." #, python-format msgid "Protection domain id: %(domain_id)s." msgstr "ID dominio di protezione: %(domain_id)s." #, python-format msgid "Protection domain name: %(domain_name)s." msgstr "Nome dominio di protezione: %(domain_name)s." msgid "Proxy mode detected." msgstr "Rilevata modalità proxy." #, python-format msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" msgstr "" "Analisi delle righe eliminate più vecchie di giorni=%(age)d dalla tabella=" "%(table)s" #, python-format msgid "QoS: %s." msgstr "QoS: %s." #, python-format msgid "Query capacity stats response: %s." msgstr "Risposta statistiche di capacità query: %s." msgid "" "RBD striping not supported - ignoring configuration settings for rbd striping" msgstr "" "Striping RBD non supportato - le impostazioni di configurazione per lo " "striping rbd vengono ignorate" #, python-format msgid "RBD volume %s not found, allowing delete operation to proceed." msgstr "" "Volume RBD %s non trovato, consentita la continuazione dell'operazione di " "eliminazione." #, python-format msgid "" "REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " "certificate: %(verify_cert)s." msgstr "" "IP server REST: %(ip)s, port: %(port)s, nome utente: %(user)s. Verificare il " "certificato del server: %(verify_cert)s." #, python-format msgid "Re-using existing purity host %(host_name)r" msgstr "Riutilizzo dell'host purity %(host_name)r esistente" msgid "Reconnected to coordination backend." msgstr "Riconnesso al backend di coordinazione." msgid "Reconnecting to coordination backend." msgstr "Riconnessione al backend di coordinazione." #, python-format msgid "Registering image in cache %s" msgstr "Registrazione immagine nella cache %s" #, python-format msgid "Regular file: %s created." msgstr "File regolare: %s creato." #, python-format msgid "" "Relocating volume: %s to a different datastore due to insufficient disk " "space on current datastore." msgstr "" "Riallocazione del volume: %s in un datastore diverso per spazio su disco " "insufficiente nel datastore corrente." #, python-format msgid "Remote return FC info is: %s." msgstr "Le informazioni FC restituite remote sono: %s." msgid "Remove volume export completed successfully." msgstr "Rimozione esportazione volume completata correttamente." #, python-format msgid "Removed %s from cg." msgstr "Rimosso %s da cg." #, python-format msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" msgstr "Rimozione di ACL dal volume=%(vol)s per gruppo iniziatore %(igrp)s" #, python-format msgid "Removing iscsi_target for Volume ID: %s" msgstr "Rimozione di iscsi_target per l'ID volume: %s" #, python-format msgid "Removing iscsi_target for volume: %s" msgstr "Rimoziome di iscsi_target per il volume: %s" #, python-format msgid "Removing iscsi_target for: %s" msgstr "Rimozione di iscsi_target per: %s" #, python-format msgid "Removing iscsi_target: %s" msgstr "Rimozione di iscsi_target: %s" #, python-format msgid "Removing non-active host: %(host)s from scheduler cache." msgstr "Rimozione dell'host non attivo: %(host)s dalla cache dello scheduler." #, python-format msgid "Removing volume %(v)s from consistency group %(cg)s." msgstr "Rimozione del volume %(v)s dal gruppo di coerenza %(cg)s." #, python-format msgid "Removing volumes from cg %s." msgstr "Rimozione volumi da cg %s." #, python-format msgid "Rename Volume %(volume_id)s completed." msgstr "Ridenominazione del volume %(volume_id)s completata." #, python-format msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." msgstr "Ridenominazione di %(id)s da %(current_name)s a %(new_name)s." #, python-format msgid "Renaming backing VM: %(backing)s to %(new_name)s." msgstr "Ridenominazione VM di backup: %(backing)s in %(new_name)s." #, python-format msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" msgstr "Ridenominazione dell'istantanea esistente %(ref_name)s su %(new_name)s" #, python-format msgid "Renaming existing volume %(ref_name)s to %(new_name)s" msgstr "Ridenominazione del volume esistente %(ref_name)s su %(new_name)s" #, python-format msgid "Replication %(vol)s to %(dest)s." msgstr "Replica di %(vol)s su %(dest)s." #, python-format msgid "Replication created for %(volname)s to %(destsc)s" msgstr "Replica creata per %(volname)s su %(destsc)s" #, python-format msgid "Replication is not configured on backend: %s." msgstr "Replica non configurata sul backend: %s." #, python-format msgid "Requested image %(id)s is not in raw format." msgstr "L'immagine richiesta %(id)s non è nel formato non elaborato." #, python-format msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." msgstr "" "Richiesta configurazione unificata: %(storage_family)s e " "%(storage_protocol)s." msgid "Reserve volume completed successfully." msgstr "Riserva del volume completata correttamente." #, python-format msgid "" "Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." msgstr "" "Reimpostazione stato backup avviata, backup_id: %(backup_id)s, stato: " "%(status)s." #, python-format msgid "Resetting backup %s to available (was restoring)." msgstr "" "Reimpostazione del backup %s su disponibile (era in fase di ripristino)." #, python-format msgid "Resetting backup %s to error (was creating)." msgstr "Reimpostazione del backup %s su errore (era in fase di creazione)." msgid "Resetting cached RPC version pins." msgstr "Reimpostazione dei pin della versione RPC memorizzata nella cache." #, python-format msgid "" "Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." msgstr "" "Reimpostazione del volume %(vol_id)s sullo stato precedente %(status)s (in " "fase di backup)." #, python-format msgid "Resizing LUN %s directly to new size." msgstr "Ridimensionamento della LUN %s direttamente sulla nuova dimensione." #, python-format msgid "Resizing LUN %s using clone operation." msgstr "Ridimensionamento della LUN %s tramite l'operazione di clonazione." #, python-format msgid "Resizing file to %sG" msgstr "Ridimensionamento del file su %sG" #, python-format msgid "Resizing file to %sG..." msgstr "Ridimensionamento del file su %sG..." #, python-format msgid "" "Restore backup finished, backup %(backup_id)s restored to volume " "%(volume_id)s." msgstr "" "Ripristino backup completato, backup: %(backup_id)s ripristinato su volume: " "%(volume_id)s." #, python-format msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" "Ripristino backup avviato, backup: %(backup_id)s volume: %(volume_id)s." #, python-format msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "Ripristino del backup %(backup)s nel volume %(volume)s" #, python-format msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "Ripristino del backup %(backup_id)s nel volume %(volume_id)s" msgid "Restoring iSCSI target from configuration file" msgstr "Ripristino della destinazione iSCSI dal file di configurazione" msgid "Resume volume delete completed successfully." msgstr "Ripresa eliminazione del volume completata correttamente." #, python-format msgid "Resuming delete on backup: %s." msgstr "Ripresa dell'eliminazione al backup: %s." #, python-format msgid "Retrieving secret for service: %s." msgstr "Richiamo del segreto per il servizio: %s." #, python-format msgid "Retrieving target for service: %s." msgstr "Richiamo della destinazione per il servizio: %s." #, python-format msgid "Return FC info is: %s." msgstr "Le informazioni FC restituite sono: %s." #, python-format msgid "" "Returning connection_info: %(info)s for volume: %(volume)s with connector: " "%(connector)s." msgstr "" "Restituzione di connection_info: %(info)s per il volume: %(volume)s con " "connettore: %(connector)s." #, python-format msgid "Returning random Port Group: %(portGroupName)s." msgstr "Viene restituito il gruppo di porte casuale: %(portGroupName)s." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." msgstr "" "Riscrittura di LUN(id: %(lun_id)s) smartcache da (name: %(old_name)s, id: " "%(old_id)s) a (name: %(new_name)s, id: %(new_id)s) riuscita." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." msgstr "" "Riscrittura di LUN(id: %(lun_id)s) smartpartition da (name: %(old_name)s, " "id: %(old_id)s) a (name: %(new_name)s, id: %(new_id)s) riuscita." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " "success." msgstr "" "Riscrittura di LUN(id: %(lun_id)s) smartquos da %(old_qos_value)s a " "%(new_qos)s riuscita." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " "%(new_policy)s success." msgstr "" "Riscrittura di LUN(id: %(lun_id)s) politica smarttier da %(old_policy)s a " "%(new_policy)s riuscita." #, python-format msgid "Retype Volume %(volume_id)s is completed." msgstr "La riscrittura del volume %(volume_id)s viene completata." #, python-format msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." msgstr "" "La riscrittura del volume %(volume_id)s viene eseguita e migrata nel pool " "%(pool_id)s." #, python-format msgid "" "Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " "%(old_snap_cpg)s." msgstr "" "Ripristino riscrittura %(volume_name)s snap_cpg da %(new_snap_cpg)s a " "%(old_snap_cpg)s." msgid "Retype volume completed successfully." msgstr "Riscrittura del volume completata correttamente. " msgid "Retype volume request issued successfully." msgstr "Richiesta di riscrittura del volume eseguita correttamente. " msgid "Retype was to same Storage Profile." msgstr "La riscrittura è stata eseguita nello stesso profilo di archiviazione." #, python-format msgid "Review shares: %s" msgstr "Rivedi condivisioni: %s" msgid "Roll detaching of volume completed successfully." msgstr "Esecuzione dello scollegamento del volume completata correttamente." #, python-format msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "" "Esecuzione dell'ultimo lavoro ssc cluster per %(server)s e vserver %(vs)s" #, python-format msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "" "Esecuzione del lavoro di aggiornamento ssc obsoleto per %(server)s e vserver " "%(vs)s" #, python-format msgid "Running with vmemclient version: %s" msgstr "Esecuzione con versione vmemclient: %s" #, python-format msgid "SC server created %s" msgstr "Server SC creato %s" #, python-format msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" msgstr "Salva informazioni sul servizio per %(svc)s -> %(hdp)s, %(path)s" #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " "image id: %(id)s." msgstr "" "ScaleIO copy_image_to_volume volume: %(vol)s servizio immagine: %(service)s " "id immagine: %(id)s." #, python-format msgid "" "ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " "image meta: %(meta)s." msgstr "" "ScaleIO copy_volume_to_image volume: %(vol)s servizio immagine: %(service)s " "meta immagine: %(meta)s." #, python-format msgid "" "ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." msgstr "" "ScaleIO ha creato il volume clonato: volume di origine %(src)s nel volume di " "destinazione %(tgt)s." #, python-format msgid "" "ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " "%(volname)s." msgstr "" "ScaleIO crea il volume dall'istantanea: istantanea %(snapname)s in volume " "%(volname)s." msgid "ScaleIO delete snapshot." msgstr "ScaleIO elimina l'istantanea." #, python-format msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." msgstr "" "ScaleIO estende il volume: volume %(volname)s alla dimensione di " "%(new_size)s." #, python-format msgid "ScaleIO get domain id by name request: %s." msgstr "ScaleIO recupera l'ID dominio dalla richiesta del nome: %s." #, python-format msgid "ScaleIO get pool id by name request: %s." msgstr "ScaleIO recupera l'ID pool dalla richiesta del nome: %s." #, python-format msgid "ScaleIO get volume by id request: %s." msgstr "ScaleIO recupera il volume dalla richiesta dell'id: %s." #, python-format msgid "ScaleIO rename volume request: %s." msgstr "ScaleIO rinomina la richiesta del volume: %s." msgid "ScaleIO snapshot group of volumes" msgstr "Gruppo di volumi istantanea ScaleIO" #, python-format msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." msgstr "Il volume ScaleIO %(vol)s è stato ridenominato %(new_name)s." #, python-format msgid "" "Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " "from /etc/cinder.conf." msgstr "" "I file di chiavi host ssh secondari %(kwargs)s saranno caricati insieme a " "%(conf)s da /etc/cinder.conf." msgid "" "Service not found for updating active_backend_id, assuming default for " "driver init." msgstr "" "Servizio non trovato per l'aggiornamento di active_backend_id, viene " "utilizzato il valore predefinito per l'inizializzazione del driver." msgid "Session might have expired. Trying to relogin" msgstr "La sessione potrebbe essere scaduta. Provare a rieseguire il login" msgid "Set backend status to frozen successfully." msgstr "" "Impostazione dello stato di backend su bloccato eseguita correttamente." #, python-format msgid "Set newly managed Cinder volume name to %(name)s." msgstr "Impostare il nome volume Cinder appena gestito su %(name)s." #, python-format msgid "Set tgt CHAP secret for service: %s." msgstr "Impostare il segreto tgt CHAP per il servizio: %s." #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "Impostazione dell'host %(host)s su %(state)s." #, python-format msgid "Setting snapshot %(snap)s to online_flag %(flag)s" msgstr "Impostazione dell'istantanea %(snap)s su online_flag %(flag)s" #, python-format msgid "Setting volume %(vol)s to online_flag %(flag)s" msgstr "Impostazione del volume %(vol)s su online_flag %(flag)s" #, python-format msgid "" "Skipping add target %(target_array)s to protection group %(pgname)s since " "it's already added." msgstr "" "Ignorare l'aggiunta della destinazione %(target_array)s al gruppo di " "protezione %(pgname)s perché è già aggiunta." #, python-format msgid "" "Skipping allow pgroup %(pgname)s on target array %(target_array)s since it " "is already allowed." msgstr "" "Ignorare l'autorizzazione di pgroup %(pgname)s sull'array di destinazione " "%(target_array)s perché è già autorizzato." #, python-format msgid "Skipping deletion of volume %s as it does not exist." msgstr "Ignorare l'eliminazione del volume %s perché non esiste." msgid "Skipping ensure_export. Found existing iSCSI target." msgstr "Ignorare ensure_export. Trovata destinazione iSCSI esistente." #, python-format msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" "ensure_export viene ignorato. Nessun provisioning di iscsi_target per il " "volume: %s" #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." msgstr "" "Ignorare il volume dell'immagine %(id)s perché non è accessibile dal Tenant " "corrente." #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume: %s" msgstr "" "remove_export viene ignorato. Nessun iscsi_target viene al momento esportato " "per il volume: %s" #, python-format msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" "remove_export viene ignorato. Nessun provisioning di iscsi_target per il " "volume: %s" #, python-format msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "" "Condivisione smb %(share)s Dimensione totale %(size)s Totale allocato " "%(allocated)s" #, python-format msgid "Snapshot %(disp)s '%(new)s' is now being managed." msgstr "L'istantanea %(disp)s '%(new)s' è ora in fase di gestione." #, python-format msgid "" "Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " "'%(new)s'." msgstr "" "L'istantanea %(disp)s '%(vol)s' non viene più gestita. Istantanea " "ridenominata in '%(new)s'." #, python-format msgid "" "Snapshot %(folder)s@%(snapshot)s does not exist, it was already deleted." msgstr "" "L'istantanea %(folder)s@%(snapshot)s non esiste, è già stata eliminata." #, python-format msgid "" "Snapshot %(folder)s@%(snapshot)s has dependent clones, it will be deleted " "later." msgstr "" "L'istantanea %(folder)s@%(snapshot)s ha cloni dipendenti, verrà eliminata " "successivamente." #, python-format msgid "Snapshot %s created successfully." msgstr "Istantanea %s: creata correttamente." #, python-format msgid "Snapshot %s does not exist in backend." msgstr "L'istantanea %s non esiste nel backend." #, python-format msgid "Snapshot %s does not exist, it seems it was already deleted." msgstr "L'istantanea %s non esiste, sembra che sia stata già eliminata." #, python-format msgid "Snapshot %s does not exist, it was already deleted." msgstr "L'istantanea %s non esiste, è già stata eliminata." #, python-format msgid "Snapshot %s has dependent clones, will be deleted later." msgstr "L'istantanea %s ha cloni dipendenti, verrà eliminata successivamente." #, python-format msgid "Snapshot %s not found" msgstr "Istantanea %s non trovata" #, python-format msgid "Snapshot %s was deleted successfully." msgstr "Istantanea %s eliminata correttamente." #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "Volume virtuale '%(ref)s' ridenominato in '%(new)s'." msgid "Snapshot create request issued successfully." msgstr "Richiesta di creazione dell'istantanea eseguita correttamente." #, python-format msgid "" "Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." msgstr "" "Creazione dell'istantanea %(cloneName)s complata. Volume di origine: " "%(sourceName)s." msgid "Snapshot delete request issued successfully." msgstr "Richiesta di eliminazione dell'istantanea eseguita correttamente." msgid "Snapshot force create request issued successfully." msgstr "Richiesta di creazione forzata dell'istantanea eseguita correttamente." #, python-format msgid "" "Snapshot record for %s is not present, allowing snapshot_delete to proceed." msgstr "" "Il record dell'istantanea per %s non è presente, consentita la continuazione " "dell'operazione di eliminazione dell'istantanea." msgid "Snapshot retrieved successfully." msgstr "Istantanea richiamata correttamente." #, python-format msgid "Snapshot volume %(vol)s into snapshot %(id)s." msgstr "Volume dell'istantanea %(vol)s nell'istantanea %(id)s." #, python-format msgid "Snapshot volume response: %s." msgstr "Risposta del volume dell'istantanea: %s." #, python-format msgid "Snapshot: %(snapshot)s: not found on the array." msgstr "Istantanea : %(snapshot)s: non trovata nell'array." #, python-format msgid "Source Snapshot: %s" msgstr "Istantanea di origine: %s" #, python-format msgid "" "Source and destination ZFSSA shares are the same. Do nothing. volume: %s" msgstr "" "Le condivisioni di origine e destinazione ZFSSA sono le stesse. Non eseguire " "alcuna operazione. volume: %s" #, python-format msgid "Start to create cgsnapshot for consistency group: %(group_name)s" msgstr "Inizia a creare l'istantanea per il gruppo di coerenza: %(group_name)s" #, python-format msgid "Start to create consistency group: %(group_name)s id: %(id)s" msgstr "Inizia a creare il gruppo di coerenza: id %(group_name)s: %(id)s" #, python-format msgid "Start to delete consistency group: %(cg_name)s" msgstr "Inizia a eliminare il gruppo di coerenza: %(cg_name)s" #, python-format msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "Avvio del nodo %(topic)s (versione %(version_string)s)" #, python-format msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "Avvio del driver del volume %(driver_name)s (%(version)s)" #, python-format msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "Gruppo di archiviazione %(storageGroupName)s eliminato correttamente." #, python-format msgid "Storage Group %s was empty." msgstr "Il gruppo di archiviazione %s è vuoto." #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "Gruppo di archiviazione non associato alla politica. L'eccezione è %s." #, python-format msgid "" "Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " "%(pool_id)s." msgstr "" "Nomi pool di archiviazione: %(pools)s, nome pool di archiviazione: %(pool)s, " "id pool: %(pool_id)s." #, python-format msgid "Successful login by user %s" msgstr "Login corretto da parte dell'utente %s" #, python-format msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "Aggiunto correttamente %(volumeName)s a %(sgGroupName)s." #, python-format msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "Completato correttamente lavoro ssc per %(server)s e vserver %(vs)s" #, python-format msgid "" "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" "Completato correttamente lavoro di aggiornamento obsoleto per %(server)s e " "vserver %(vs)s" #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "Copiato correttamente disco su: %(src)s in: %(dest)s." #, python-format msgid "Successfully create volume %s" msgstr "Creazione del volume %s eseguita correttamente " #, python-format msgid "" "Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " "[%(stack_vol)s]." msgstr "" "Creato correttamente un volume CloudByte [%(cb_vol)s] in relazione al volume " "OpenStack [%(stack_vol)s]." #, python-format msgid "Successfully created clone: %s." msgstr "Creato correttamente clone: %s." #, python-format msgid "" "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "" "Creata correttamente istantanea: %(snap)s per backup del volume: %(backing)s." #, python-format msgid "Successfully created snapshot: %s." msgstr "Creata correttamente istantanea: %s." #, python-format msgid "Successfully created volume backing: %s." msgstr "Backup del volume creato correttamente: %s." #, python-format msgid "Successfully deleted %s." msgstr "Eliminato correttamente %s" #, python-format msgid "Successfully deleted file: %s." msgstr "Eliminato correttamente file: %s." #, python-format msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "Eliminata correttamente istantanea: %(name)s di backup: %(backing)s." #, python-format msgid "Successfully deleted snapshot: %s" msgstr "Eliminata correttamente istantanea: %s" #, python-format msgid "Successfully deleted snapshot: %s." msgstr "Eliminata correttamente istantanea: %s." #, python-format msgid "" "Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]." msgstr "" "Eliminato correttamente il volume [%(cb_vol)s] su CloudByte corrispondente " "al volume OpenStack [%(stack_vol)s]." #, python-format msgid "Successfully deleted volume: %s" msgstr "Eliminato correttamente volume: %s" #, python-format msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." msgstr "" "Disco virtuale esteso correttamente: %(path)s alla dimensione di: %(size)s " "GB." #, python-format msgid "Successfully extended volume %(volume_id)s to size %(size)s." msgstr "Volume esteso correttamente %(volume_id)s alla dimensione di %(size)s." #, python-format msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." msgstr "Esteso correttamente volume: %(vol)s alla dimensione di: %(size)s GB." #, python-format msgid "Successfully got volume information for volume %s" msgstr "Informazioni sul volume richiamate correttamente per il volume %s" #, python-format msgid "Successfully initialized connection with volume: %(volume_id)s." msgstr "Connessione inizializzata correttamente con volume: %(volume_id)s." #, python-format msgid "" "Successfully initialized connection. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." msgstr "" "Connessione inizializzata correttamente. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." #, python-format msgid "" "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "" "Spostato correttamente backup del volume: %(backing)s nella cartella: " "%(fol)s." #, python-format msgid "" "Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " "resource pool: %(rp)s." msgstr "" "Rilocato correttamente backup del volume: %(backing)s in datastore: %(ds)s e " "pool di risorse: %(rp)s." msgid "Successfully retrieved InitiatorGrpList" msgstr "InitiatorGrpList richiamato correttamente" #, python-format msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "Configurato correttamente driver: %(driver)s per server: %(ip)s." #, python-format msgid "Successfully setup replication for %s." msgstr "Configurata correttamente replica per %s." #, python-format msgid "Successfully terminated connection for volume: %(volume_id)s." msgstr "Connessione terminata correttamente per volume: %(volume_id)s." #, python-format msgid "" "Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " "%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " "%(storage_protocol)s." msgstr "" "Statistiche del volume aggiornate correttamente. backend: " "%(volume_backend_name)s, fornitore: %(vendor_name)s, versione driver: " "%(driver_version)s, protocollo di archiviazione: %(storage_protocol)s." #, python-format msgid "" "Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Aggiornato correttamente un volume CloudByte [%(cb_vol)s] corrispondente al " "volume OpenStack [%(ops_vol)s]." #, python-format msgid "Switching volume %(vol)s to profile %(prof)s." msgstr "Passaggio del volume %(vol)s a profilo %(prof)s." #, python-format msgid "System %(id)s has %(status)s status." msgstr "Il sistema %(id)s ha lo stato %(status)s." #, python-format msgid "" "System with controller addresses [%s] is not registered with web service." msgstr "" "Il sistema con indirizzi controller [%s] non è registrato con il servizio " "web." #, python-format msgid "Target is %(map)s! Targetlist = %(tgtl)s." msgstr "La destinazione è %(map)s! Targetlist = %(tgtl)s." #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "" "wwns di destinazione nella vista di mascheramento %(maskingView)s: " "%(targetWwns)s." #, python-format msgid "Terminate connection: %(volume)s." msgstr "Termina connessione: %(volume)s." msgid "Terminate volume connection completed successfully." msgstr "Completamento della connessione del volume completato correttamente." msgid "Thawed backend successfully." msgstr "Backend sbloccato correttamente." msgid "" "The NAS file operations will be run as non privileged user in secure mode. " "Please ensure your libvirtd settings have been configured accordingly (see " "section 'OpenStack' in the Quobyte Manual." msgstr "" "Le operazioni del file NAS verranno eseguite come utente non privilegiato in " "modalità sicura. Verificare che le impostazioni libvirtd siano state " "configurate di conseguenza (vedere la sezione 'OpenStack' nel manuale " "Quobyte)." #, python-format msgid "The QoS sepcs is: %s." msgstr "Le specifiche QoS sono: %s." #, python-format msgid "" "The image was successfully converted, but image size is unavailable. src " "%(src)s, dest %(dest)s. %(error)s" msgstr "" "L'immagine è stata convertita correttamente, ma la dimensione dell'immagine " "non è disponibile. origine %(src)s, dest %(dest)s. %(error)s" #, python-format msgid "" "The multi-attach E-Series host group '%(label)s' already exists with " "clusterRef %(clusterRef)s" msgstr "" "Il gruppo di host multi-attach E-Series '%(label)s' esiste già con " "clusterRef %(clusterRef)s" #, python-format msgid "The pool_name from extraSpecs is %(pool)s." msgstr "Il nome_pool da extraSpecs è %(pool)s." #, python-format msgid "The same hostid is: %s." msgstr "Lo stesso id host è: %s." #, python-format msgid "The storage group found is %(foundStorageGroupInstanceName)s." msgstr "" "Il gruppo di archiviazione trovato è %(foundStorageGroupInstanceName)s." #, python-format msgid "The target instance device id is: %(deviceid)s." msgstr "L'ID del dispositivo per l'istanza di destinazione è: %(deviceid)s." #, python-format msgid "" "The volume belongs to more than one storage group. Returning storage group " "%(sgName)s." msgstr "" "Il volume appartiene a più di un gruppo di archiviazione. Viene restituito " "il gruppo di archiviazione %(sgName)s." #, python-format msgid "" "There is no backing for the snapshotted volume: %(snap)s. Not creating any " "backing for the volume: %(vol)s." msgstr "" "Non esiste alcun backup per il volume con istantanea: %(snap)s. Non viene " "creato alcun backup per il volume: %(vol)s." #, python-format msgid "" "There is no backing for the source volume: %(src)s. Not creating any backing " "for volume: %(vol)s." msgstr "" "Non esiste alcun backup per il volume di origine: %(src)s. Non viene creato " "alcun backup per il volume: %(vol)s." #, python-format msgid "There is no backing for the volume: %s. Need to create one." msgstr "" "Non è presente alcun backup per il volume: %s. È necessario crearne uno." #, python-format msgid "There is no backing for volume: %s; no need to extend the virtual disk." msgstr "" "Non è presente alcun backup per il volume: %s; non è necessario estendere il " "disco virtuale." #, python-format msgid "There is no backing, and so there is no snapshot: %s." msgstr "Non esiste un backup per cui non non esiste alcuna istantanea: %s." #, python-format msgid "There is no backing, so will not create snapshot: %s." msgstr "Non esiste un backup per cui non verrà creata un'istantanea: %s." #, python-format msgid "" "There is no snapshot point for the snapshotted volume: %(snap)s. Not " "creating any backing for the volume: %(vol)s." msgstr "" "Non esiste alcun punto di istantanea per il volume con istantanea: %(snap)s. " "Non viene creato alcun backup per il volume: %(vol)s." #, python-format msgid "Toggle san_ip from %(current)s to %(new)s." msgstr "Passare a san_ip da %(current)s a %(new)s." msgid "Token is invalid, going to re-login and get a new one." msgstr "Il token non è valido, rieseguire il login e ottenere un nuovo token." msgid "Transfer volume completed successfully." msgstr "Trasferimento del volume completato correttamente." #, python-format msgid "Tried to delete non-existent vdisk %s." msgstr "Si è tentato di eliminare un vdisk non esistente %s." #, python-format msgid "" "Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " "with delete." msgstr "" "Si è tentato di eliminare l'istantanea %s, ma non è stata trovata nel " "cluster Datera. Continuare con l'eliminazione. " #, python-format msgid "" "Tried to delete volume %s, but it was not found in the Datera cluster. " "Continuing with delete." msgstr "" "Si è tentato di eliminare il volume %s, ma non è stato trovato nel cluster " "Datera. Continuare con l'eliminazione. " #, python-format msgid "" "Tried to detach volume %s, but it was not found in the Datera cluster. " "Continuing with detach." msgstr "" "Si è tentato di scollegare il volume %s, ma non è stato trovato nel cluster " "Datera. Continuare con l'eliminazione. " #, python-format msgid "Trying to unmap volume from all sdcs before deletion: %s." msgstr "" "Tentativo di annullare l'associazione del volume da tutti gli sdcs prima " "dell'eliminazione: %s." msgid "Unable to accept transfer for volume, because it is in maintenance." msgstr "" "Impossibile accettare il trasferimento per il volume perché è in " "manutenzione." msgid "Unable to attach volume, because it is in maintenance." msgstr "Impossibile collegare il volume perché è in manutenzione." msgid "Unable to create the snapshot for volume, because it is in maintenance." msgstr "" "Impossibile creare l'istantanea per il volume perché è in manutenzione." msgid "Unable to delete the volume metadata, because it is in maintenance." msgstr "Impossibile eliminare i metadati del volume perché in manutenzione." msgid "Unable to detach volume, because it is in maintenance." msgstr "Impossibile scollegare il volume perché è in manutenzione." msgid "Unable to get Cinder internal context, will not use image-volume cache." msgstr "" "Impossibile ottenere il contesto interno Cinder, la cache del volume " "immagine non verrà utilizzata." #, python-format msgid "Unable to get remote copy information for volume %s" msgstr "Impossibile ottenere le informazioni di copia remota per il volume %s" msgid "" "Unable to initialize the connection for volume, because it is in maintenance." msgstr "" "Impossibile inizializzare la connessione per il volume perché è in " "manutenzione." msgid "Unable to parse XML input." msgstr "Impossibile analizzare l'input XML." #, python-format msgid "Unable to serialize field '%s' - excluding from backup" msgstr "Impossibile serializzare il campo '%s' - esclusione dal backup" #, python-format msgid "Unable to unprotect snapshot %s." msgstr "Impossibile annullare la protezione dell'istantanea %s." msgid "Unable to update the metadata for volume, because it is in maintenance." msgstr "" "Impossibile aggiornare i metadati per il volume perché è in manutenzione." msgid "Unable to update volume, because it is in maintenance." msgstr "Impossibile aggiornare il volume perché è in manutenzione." #, python-format msgid "Unexporting lun %s." msgstr "Annullata esportazione lun %s." #, python-format msgid "Unmanage snapshot with id: %s" msgstr "Annulla gestione dell'istantanea con id: %s" #, python-format msgid "Unmanage volume %(volume_id)s completed." msgstr "Annullamento della gestione del volume %(volume_id)s completata." #, python-format msgid "Unmanage volume %s" msgstr "Annulla gestione del volume %s" #, python-format msgid "Unmanage volume with id: %s" msgstr "Annulla gestione volume con id: %s" #, python-format msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." msgstr "" "Annullata operazione di gestione LUN con percorso corrente %(path)s e uuid " "%(uuid)s." #, python-format msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." msgstr "Volume non gestito con etichetta corrente %(label)s e wwn %(wwn)s." #, python-format msgid "Unmap volume: %(volume)s." msgstr "Annulla associazione volume: %(volume)s." msgid "Unreserve volume completed successfully." msgstr "Annullamento della riserva del volume completata correttamente." #, python-format msgid "" "Update Consistency Group: %(group)s. This adds and/or removes volumes from a " "CG." msgstr "" "Aggiorna gruppo di coerenza: %(group)s. I volumi vengono aggiunti e/o " "rimossi da un CG." msgid "Update consistency group completed successfully." msgstr "Aggiornamento del gruppo di coerenza completato correttamente." #, python-format msgid "Update migrated volume %(new_volume)s completed." msgstr "Aggiornamento del volume migrato %(new_volume)s completato." msgid "Update readonly setting on volume completed successfully." msgstr "" "Aggiornamento dell'impostazione di sola lettura sul volume completato " "correttamente." msgid "Update snapshot metadata completed successfully." msgstr "Aggiornamento dei metadati dell'istantanea completato correttamente." msgid "Update volume admin metadata completed successfully." msgstr "" "Aggiornamento dei metadati di gestione del volume completato correttamente." msgid "Update volume metadata completed successfully." msgstr "Aggiornamento dei metadati del volume completato correttamente." #, python-format msgid "Updated Consistency Group %s" msgstr "Aggiornato gruppo di coerenza %s" #, python-format msgid "" "Updating consistency group %(id)s with name %(name)s description: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." msgstr "" "Aggiornamento gruppo di coerenza %(id)s di nome %(name)s descrizione: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." #, python-format msgid "Updating snapshot %(id)s with info %(dict)s" msgstr "Aggiornamento dell'istantanea %(id)s con informazioni %(dict)s" #, python-format msgid "Updating status for CG: %(id)s." msgstr "Aggiornamento dello stato per CG: %(id)s." #, python-format msgid "Updating storage service catalog information for backend '%s'" msgstr "" "Aggiornamento delle informazioni sul catalogo del servizio di archiviazione " "per il backend '%s'" msgid "Use ALUA when adding initiator to host." msgstr "Utilizzare ALUA quando si aggiunge l'iniziatore all'host." msgid "Use CHAP when adding initiator to host." msgstr "Utilizzare CHAP quando si aggiunge l'iniziatore all'host." #, python-format msgid "" "Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." msgstr "" "Utilizzo di FC Zone Manager %(zm_version)s, Driver %(drv_name)s " "%(drv_version)s." #, python-format msgid "Using FC lookup service %s." msgstr "Utilizzo del servizio di ricerca FC %s." #, python-format msgid "Using compute cluster(s): %s." msgstr "Utilizzo di cluster di calcolo: %s." #, python-format msgid "Using existing initiator group name: %(igGroupName)s." msgstr "" "Si sta utilizzando il nome del gruppo di iniziatori esistente: " "%(igGroupName)s." msgid "" "Using extra_specs for defining QoS specs will be deprecated in the N release " "of OpenStack. Please use QoS specs." msgstr "" "L'utilizzo di extra_specs per definire le specifiche QoS risulterà obsoleto " "nella release N di OpenStack. Utilizzare le specifiche QoS." #, python-format msgid "Using overridden vmware_host_version from config: %s" msgstr "Utilizzo della versione vmware_host_version sovrascritta da config: %s" #, python-format msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "Utilizzo del pool %(pool)s anziché %(cpg)s" #, python-format msgid "Using security file in %s for authentication" msgstr "Viene utilizzato il file di sicurezza %s per l'autenticazione" #, python-format msgid "Using service label: %s" msgstr "Utilizzo dell'etichetta di servizio: %s" #, python-format msgid "Using target label: %s." msgstr "Utilizzo dell'etichetta di destinazione: %s." msgid "VF context is changed in the session." msgstr "Il contesto VF viene modificato nella sessione." #, python-format msgid "Value with type=%s is not serializable" msgstr "Il valore con tipo=%s non è serializzabile" #, python-format msgid "Virtual volume %(disp)s '%(new)s' is being retyped." msgstr "Il volume virtuale %(disp)s '%(new)s' viene riscritto." #, python-format msgid "Virtual volume %(disp)s '%(new)s' is now being managed." msgstr "Il volume virtuale %(disp)s '%(new)s' è ora in fase di gestione." #, python-format msgid "" "Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " "%(cpg)s" msgstr "" "Il volume virtuale %(disp)s '%(new)s' snapCPG è vuoto per cui verrà " "impostato su: %(cpg)s" #, python-format msgid "" "Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " "'%(new)s'." msgstr "" "Il volume virtuale %(disp)s '%(vol)s' non viene più gestito. Volume " "ridenominato in '%(new)s'." #, python-format msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." msgstr "Volume virtuale %(disp)s riscritto correttamente in %(new_type)s." #, python-format msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." msgstr "Volume virtuale '%(ref)s' ridenominato in '%(new)s'." #, python-format msgid "Vol copy job completed for dest %s." msgstr "Lavoro di copia del volume completato per destinazione %s." #, python-format msgid "Volume %(volume)s does not have meta device members." msgstr "Il volume %(volume)s non presenta membri di metadispositivi." #, python-format msgid "" "Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." msgstr "" "Il volume %(volume)s è già associato. Il numero di dispositivo è " "%(deviceNumber)s." #, python-format msgid "Volume %(volumeName)s not in any storage group." msgstr "Volume %(volumeName)s non presente in alcun gruppo di archiviazione." #, python-format msgid "" "Volume %(volume_id)s: being created as %(create_type)s with specification: " "%(volume_spec)s" msgstr "" "Volume %(volume_id)s: creato come %(create_type)s con specifica: " "%(volume_spec)s" #, python-format msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "Volume %(volume_name)s (%(volume_id)s): creato correttamente" #, python-format msgid "Volume %s converted." msgstr "Volume %s convertito." #, python-format msgid "Volume %s created" msgstr "Volume %s creato" #, python-format msgid "Volume %s does not exist, it seems it was already deleted." msgstr "Il volume %s non esiste, sembra che sia stato già eliminato." #, python-format msgid "Volume %s has been transferred." msgstr "Il volume %s è stato trasferito." #, python-format msgid "Volume %s is mapping to multiple hosts." msgstr "Il volume %s viene associato a più host." #, python-format msgid "Volume %s is not mapped. No volume to unmap." msgstr "" "Il volume %s non è associato. Nessun volume di cui annullare l'associazione." #, python-format msgid "Volume %s presented." msgstr "Volume %s presentato." #, python-format msgid "Volume %s retyped." msgstr "Volume %s riscritto." #, python-format msgid "Volume %s unmanaged." msgstr "Volume %s non gestito." #, python-format msgid "Volume %s will be deleted later." msgstr "Il volume %s verrà eliminato successivamente." #, python-format msgid "Volume %s: retyped successfully" msgstr "Volume %s: riscritto correttamente" #, python-format msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" msgstr "Volume già associato, richiamo di %(ig)s, %(vol)s" #, python-format msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" msgstr "Copia del volume %(size_in_m).2f MB su %(mbps).2f MB/s" #, python-format msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." msgstr "Copia del volume completata %(size_in_m).2f MB su %(mbps).2f MB/s" msgid "Volume created successfully." msgstr "Volume creato correttamente." msgid "Volume detach called, but volume not attached." msgstr "Scollegamento volume chiamato, ma volume non collegato." msgid "Volume info retrieved successfully." msgstr "Informazioni sul volume richiamate correttamente." #, python-format msgid "Volume mappings for %(name)s: %(mappings)s" msgstr "Associazioni volume per %(name)s: %(mappings)s" #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s" msgstr "Nome volume modificato da %(tmp)s a %(orig)s" #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s." msgstr "Nome volume modificato da %(tmp)s a %(orig)s" msgid "Volume retrieved successfully." msgstr "Volume richiamato correttamente." #, python-format msgid "Volume service: %(label)s. Casted to: %(loc)s" msgstr "Servizio volume: %(label)s. Eseguito cast per: %(loc)s" #, python-format msgid "Volume status is: %s." msgstr "Lo stato del volume è: %s." #, python-format msgid "Volume type is %s." msgstr "Il tipo di volume è %s." #, python-format msgid "" "Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " "id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " "name: %(domain_name)s." msgstr "" "Tipo di volume: %(volume_type)s, nome pool di archiviazione: %(pool_name)s, " "id pool di archiviazione: %(pool_id)s, id dominio di protezione: " "%(domain_id)s, nome dominio di protezione: %(domain_name)s." msgid "Volume updated successfully." msgstr "Volume aggiornato correttamente." #, python-format msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "Il volume con un determinato riferimento %s non deve essere ridenominato " "durante l'operazione di gestione." #, python-format msgid "Volume with the name %s wasn't found, can't unmanage" msgstr "" "Impossibile trovare il volume con nome %s, impossibile annullare la gestione" #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " "size: %(backup_size)d, continuing with restore." msgstr "" "Il volume: %(vol_id)s, dimensione: %(vol_size)d è maggiore del backup: " "%(backup_id)s, dimensione: %(backup_size)d, continuare con il ripristino." #, python-format msgid "WWPN on node %(node)s: %(wwpn)s." msgstr "WWPN sul nodo %(node)s: %(wwpn)s." #, python-format msgid "" "Waiting for volume expansion of %(vol)s to complete, current remaining " "actions are %(action)s. ETA: %(eta)s mins." msgstr "" "In attesa del completamento dell'espansione del volume di %(vol)s, le azioni " "restanti correnti sono %(action)s. ETA: %(eta)s min." msgid "Waiting for web service array communication." msgstr "In attesa della comunicazione dell'array del servizio web." msgid "Waiting for web service to validate the configured password." msgstr "In attesa del servizio web per convalidare la password configurata." #, python-format msgid "Will clone a volume from the image volume %(id)s." msgstr "Un volume verrà clonato dal volume dell'immagine %(id)s." #, python-format msgid "XtremIO SW version %s" msgstr "XtremIO versione SW %s" #, python-format msgid "ZFSSA version: %s" msgstr "Versione ZFSSA: %s" #, python-format msgid "Zone exists in I-T mode. Skipping zone creation %s" msgstr "La zona esiste in modalità I-T. Ignorare la creazione zone per %s" #, python-format msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" msgstr "" "La zona esiste in modalità I-T. Ignorare la creazione zone per %(zonename)s" #, python-format msgid "Zone map to add: %(zonemap)s" msgstr "Associazione zone da aggiungere: %(zonemap)s" #, python-format msgid "Zone map to add: %s" msgstr "Associazione zone da aggiungere: %s" msgid "" "Zone name created using prefix because either host name or storage system is " "none." msgstr "" "Nome zona creato utilizzando il prefisso perché il nome host o il sistema di " "archiviazione sono none." msgid "Zone name created using prefix because host name is none." msgstr "Nome zona creato utilizzando il prefisso perché il nome host è none." #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Politica di configurazione zone %s non riconosciuta" #, python-format msgid "Zoning policy for Fabric %(policy)s" msgstr "Politica di configurazione zone per Fabric %(policy)s" #, python-format msgid "Zoning policy for Fabric %s" msgstr "Politica di configurazione zone per Fabric %s" #, python-format msgid "Zoning policy for fabric %(policy)s" msgstr "Politica di configurazione zone per fabric %(policy)s" #, python-format msgid "Zoning policy for fabric %s" msgstr "Politica di configurazione zone per fabric %s" msgid "Zoning policy is not valid, no zoning will be performed." msgstr "" "La politica di configurazione zone non è valida, non verrà eseguita nessuna " "configurazione zone." #, python-format msgid "" "_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_check_volume_copy_ops: Il volume %(vol)s non presenta l'operazione di copia " "vdisk specificata: orig=%(orig)s nuova=%(new)s." msgid "_delete_copysession, The copysession was already completed." msgstr "_delete_copysession, La sessione di copia è già stata completata." #, python-format msgid "" "_delete_volume_setting, volumename:%(volumename)s, volume not found on " "ETERNUS. " msgstr "" "_delete_volume_setting, nome volume:%(volumename)s, volume non trovato su " "ETERNUS. " #, python-format msgid "_get_service_target hdp: %s." msgstr "_get_service_target hdp: %s." #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "_get_tgt_ip_from_portgroup: Richiamare ip: %s." #, python-format msgid "_get_tgt_iqn: iSCSI target iqn is: %s." msgstr "_get_tgt_iqn: iqn di destinazione iSCSI è: %s." #, python-format msgid "_unmap_lun, volumename: %(volumename)s, volume is not mapped." msgstr "_unmap_lun, volumename: %(volumename)s, volume non associato." #, python-format msgid "_unmap_lun, volumename:%(volumename)s, volume not found." msgstr "_unmap_lun, volumename:%(volumename)s, volume non trovato." #, python-format msgid "" "add_host_with_check. create host success. host name: %(name)s, host id: " "%(id)s" msgstr "" "add_host_with_check. creazione host riuscita. nome host: %(name)s, id host: " "%(id)s" #, python-format msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" msgstr "add_host_with_check. nome host: %(name)s, id host: %(id)s" #, python-format msgid "casted to %s" msgstr "eseguito cast per %s" #, python-format msgid "cgsnapshot %s: created successfully" msgstr "cgsnapshot %s: creata correttamente" #, python-format msgid "cgsnapshot %s: deleted successfully" msgstr "cgsnapshot %s: eliminata correttamente" #, python-format msgid "cgsnapshot %s: deleting" msgstr "Eliminazione di cgsnapshot %s:." #, python-format msgid "config[services]: %s." msgstr "config[services]: %s." #, python-format msgid "create_cloned_volume, info: %s, Exit method." msgstr "create_cloned_volume, info: %s, Metodo di uscita." #, python-format msgid "" "create_cloned_volume, target volume id: %(tid)s, source volume id: %(sid)s, " "Enter method." msgstr "" "create_cloned_volume, id volume di destinazione: %(tid)s, id volume di " "origine: %(sid)s, Immettere il metodo." #, python-format msgid "" "create_hostgroup_with_check. Create hostgroup success. hostgroup name: " "%(name)s, hostgroup id: %(id)s" msgstr "" "create_hostgroup_with_check. creazione hostgroup riuscita. Nome hostgroup: " "%(name)s, id hostgroup: %(id)s" #, python-format msgid "" "create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" msgstr "" "create_hostgroup_with_check. nome gruppo host: %(name)s, id gruppo host: " "%(id)s" #, python-format msgid "create_snapshot, info: %s, Exit method." msgstr "create_snapshot, info: %s, Metodo di uscita." #, python-format msgid "create_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" "create_snapshot, snap id: %(sid)s, id volume: %(vid)s, Immettere il metodo." #, python-format msgid "create_volume, info: %s, Exit method." msgstr "create_volume, info: %s, Metodo di uscita." #, python-format msgid "create_volume, volume id: %s, Enter method." msgstr "create_volume, volume id: %s, Metodo di errore." #, python-format msgid "create_volume: create_lu returns %s" msgstr "create_volume: create_lu restituisce %s" #, python-format msgid "create_volume_from_snapshot, info: %s, Exit method." msgstr "create_volume_from_snapshot, info: %s, Metodo di uscita." #, python-format msgid "" "create_volume_from_snapshot, volume id: %(vid)s, snap id: %(sid)s, Enter " "method." msgstr "" "create_volume_from_snapshot, id volume: %(vid)s, snap id: %(sid)s, Immettere " "il metodo." #, python-format msgid "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." msgstr "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." #, python-format msgid "del_iscsi_conn: hlun not found %s." msgstr "del_iscsi_conn: hlun non trovato %s." #, python-format msgid "delete lun loc %s" msgstr "elimina loc lun %s" #, python-format msgid "delete_snapshot, delete: %s, Exit method." msgstr "delete_snapshot, delete: %s, Metodo di uscita." #, python-format msgid "delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" "delete_snapshot, snap id: %(sid)s, id volume: %(vid)s, Immettere il metodo." #, python-format msgid "delete_volume, delete: %s, Exit method." msgstr "delete_volume, delete: %s, Metodo di uscita." #, python-format msgid "delete_volume, volume id: %s, Enter method." msgstr "delete_volume, volume id: %s, Immettere il metodo." #, python-format msgid "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." msgstr "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." #, python-format msgid "do_setup: %s" msgstr "do_setup: %s" #, python-format msgid "extend_volume, used pool name: %s, Exit method." msgstr "extend_volume, nome pool utilizzato: %s, Metodo di uscita." #, python-format msgid "extend_volume, volume id: %s, Enter method." msgstr "extend_volume, id volume: %s, Immettere il metodo." #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" "La capacità disponibile di pool %(pool)s è: %(free)s, capacità totale: " "%(total)s." #, python-format msgid "iSCSI Initiators %(in)s of %(ins)s need registration." msgstr "Gli iniziatori iSCSI %(in)s di %(ins)s necessitano la registrazione." #, python-format msgid "iSCSI portal found for service: %s" msgstr "Portale iSCSI trovato per il servizio: %s" #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "igroup %(grp)s trovato per l'iniziatore %(iname)s" #, python-format msgid "initialize volume %(vol)s connector %(conn)s" msgstr "inizializza volume %(vol)s connettore %(conn)s" #, python-format msgid "initialize_ connection: %(vol)s:%(initiator)s" msgstr "initialize_ connection: %(vol)s:%(initiator)s" #, python-format msgid "initialize_connection success. Return data: %s." msgstr "initialize_connection riuscita. Dati restituiti: %s." #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "initialize_connection volume: %(volume)s, connettore: %(connector)s" #, python-format msgid "initialize_connection, host lun id is: %s." msgstr "initialize_connection, l'id lun host è: %s." #, python-format msgid "initialize_connection, info: %s, Exit method." msgstr "initialize_connection, info: %s, Metodo di uscita." #, python-format msgid "initialize_connection, initiator: %(wwpns)s, LUN ID: %(lun_id)s." msgstr "initialize_connection, iniziatore: %(wwpns)s, ID LUN: %(lun_id)s." #, python-format msgid "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " "portgroup_id: %(portgroup_id)s." msgstr "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " "portgroup_id: %(portgroup_id)s." #, python-format msgid "initialize_connection, metadata is: %s." msgstr "initialize_connection, i metadati sono: %s." #, python-format msgid "" "initialize_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " "method." msgstr "" "initialize_connection, id volume: %(vid)s, initiator: %(initiator)s, " "Immettere il metodo." #, python-format msgid "" "initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " "target_luns: %(target_luns)s, Volume is already mapped." msgstr "" "initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " "target_luns: %(target_luns)s, Il volume è già associato." #, python-format msgid "" "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "" "initialize_connection_fc, iniziatore: %(wwpns)s, nome volume: %(volume)s." #, python-format msgid "initiate: connection %s" msgstr "inizializza: connessione %s" msgid "initiator has no password while using chap,adding it" msgstr "" "l'iniziatore non presenta alcuna password durante l'utilizzo di chap, la " "password viene aggiunta" #, python-format msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." msgstr "nome iniziatore: %(initiator_name)s, ID LUN: %(lun_id)s." msgid "" "initiator_auto_registration: False. Initiator auto registration is not " "enabled. Please register initiator manually." msgstr "" "initiator_auto_registration: False. La registrazione automatica " "dell'iniziatore non è abilitata. Registrare l'iniziatore manualmente." #, python-format msgid "iops limit is: %s." msgstr "Il limite iops è: %s." #, python-format msgid "iscsi_initiators: %s" msgstr "iscsi_initiators: %s" #, python-format msgid "location is: %(location)s" msgstr "location è: %(location)s" #, python-format msgid "" "manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " "renamed to %(id)s and is now managed by Cinder." msgstr "" "manage_existing_snapshot: l'istantanea %(exist)s sul volume %(volume)s è " "stata ridenominata come %(id)s ed è ora gestita da Cinder." #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " "(temporary volume %(vol2)s" msgstr "" "migrate_volume_completion sta ripulendo un errore per il volume %(vol1)s " "(volume temporaneo %(vol2)s" #, python-format msgid "new cloned volume: %s" msgstr "nuovo volume clonato: %s" #, python-format msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "open_connection a %(ssn)s su %(ip)s" #, python-format msgid "open_connection: Updating API version to %s" msgstr "open_connection: Aggiornamento della versione API a %s" #, python-format msgid "replication failover secondary is %(ssn)s" msgstr "Il failover di replica secondario è %(ssn)s" #, python-format msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "" "impostazione del volume %s su error_restoring (era in fase di ripristino del " "backup)." #, python-format msgid "share: %(share)s -> %(info)s" msgstr "condivisione: %(share)s -> %(info)s" #, python-format msgid "share: %s incorrect entry" msgstr "condivisione: %s voce non corretta" #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery è: %(out)s." #, python-format msgid "snapshot %s doesn't exist" msgstr "l'istantanea %s non esiste" #, python-format msgid "source volume for cloning: %s" msgstr "volume di origine per la clonazione: %s" #, python-format msgid "stats: stats: %s." msgstr "stats: stats: %s." #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "stop_snapshot: nome istantanea: %(snapshot)s, nome volume: %(volume)s." #, python-format msgid "targetlist: %s" msgstr "targetlist: %s" #, python-format msgid "terminate: connection %s" msgstr "termina: connessione %s" #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection volume: %(volume)s, connettore: %(con)s" #, python-format msgid "terminate_connection, return data is: %s." msgstr "terminate_connection, i dati restituiti sono: %s." #, python-format msgid "terminate_connection, unmap: %s, Exit method." msgstr "terminate_connection, annullare l'associazione: %s, Metodo di uscita." #, python-format msgid "" "terminate_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " "method." msgstr "" "terminate_connection, id volume: %(vid)s, iniziatore: %(initiator)s, " "Immettere il metodo." #, python-format msgid "terminate_connection: initiator name: %(ini)s, LUN ID: %(lunid)s." msgstr "terminate_connection: nome iniziatore: %(ini)s, ID LUN: %(lunid)s." #, python-format msgid "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." msgstr "terminate_connection: wwpns: %(wwns)s, ID LUN: %(lun_id)s." #, python-format msgid "" "terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " "%(lunid)s." msgstr "" "terminate_connection_fc: nome volume: %(volume)s, wwpns: %(wwns)s, lun_id: " "%(lunid)s." #, python-format msgid "tunevv failed because the volume '%s' has snapshots." msgstr "tunevv non riuscito perché il volume '%s' contiene istantanee." #, python-format msgid "username: %(username)s, verify_cert: %(verify)s." msgstr "nome utente: %(username)s, verify_cert: %(verify)s." #, python-format msgid "vol=%s" msgstr "vol=%s" #, python-format msgid "vol_name=%(name)s provider_location=%(loc)s" msgstr "vol_name=%(name)s provider_location=%(loc)s" #, python-format msgid "volume %(name)s extended to %(size)d." msgstr "volume %(name)s esteso a %(size)d." #, python-format msgid "volume %s doesn't exist" msgstr "il volume %s non esiste" #, python-format msgid "volume %s no longer exists in backend" msgstr "Il volume %s non esiste più nel backend" #, python-format msgid "volume: %(volume)s, lun params: %(params)s." msgstr "volume: %(volume)s, parametri lun: %(params)s." msgid "volume_file does not support fileno() so skipping fsync()" msgstr "volume_file does non supporta fileno(), ignorare fsync()" cinder-8.0.0/cinder/locale/it/LC_MESSAGES/cinder.po0000664000567000056710000126075312701406257022656 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Remo Mattei , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 09:38+0000\n" "Last-Translator: Remo Mattei \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder versione: %(version)s\n" #, python-format msgid " but size is now %d" msgstr " ma la dimensione ora è %d" #, python-format msgid " but size is now %d." msgstr " ma la dimensione ora è %d." msgid " or " msgstr "oppure" #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s non è impostato." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing non può gestire un volume connesso agli host. " "Disconnettere questo volume dagli host esistenti prima dell'importazione" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "risultato: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: Autorizzazione negata." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: non riuscito con un output CLI imprevisto.\n" " Comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Codice dello stato: %(_status)s\n" "Corpo: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: creazione portale di rete: accertarsi che la porta %(port)d " "sull'ip %(ip)s non sia utilizzata da un altro servizio." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" "%(name)s ha un requisito minimo di numero di caratteri pari a %(min_length)s." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s ha più di %(max_length)s caratteri." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s non riuscito. L'oggetto di " "backup riporta una modalità non prevista. I backup di immagine o di file " "sono supportati; la modalità corrente è %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "Il servizio %(service)s non è %(status)s sull'applicazione di archiviazione: " "%(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s deve essere <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s deve essere >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "Il valore %(worker_name)s di %(workers)d non è valido, deve essere superiore " "a 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "\"data\" %s non presente nel risultato." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "Impossibile accedere a %s. Verificare che GPFS sia attiva e che il file " "system sia montato." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "Impossibile ridimensionare %s mediante l'operazione clona, perché non " "contiene blocchi" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "Impossibile ridimensionare %s mediante l'operazione clona, perché ospitato " "su un volume compresso" #, python-format msgid "%s configuration option is not set." msgstr "L'opzione di configurazione %s non è impostata. " #, python-format msgid "%s does not exist." msgstr "%s non esiste." #, python-format msgid "%s is not a directory." msgstr "%s non è una directory." #, python-format msgid "%s is not a string or unicode" msgstr "%s non è una stringa o unicode" #, python-format msgid "%s is not installed" msgstr "%s non installato" #, python-format msgid "%s is not installed." msgstr "%s non installato." #, python-format msgid "%s is not set" msgstr "%s non è impostato" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s non è impostato ed è richiesto affinché il dispositivo di replica sia " "valido." #, python-format msgid "%s is not set." msgstr "%s non impostato." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s deve essere un'immagine qcow2 o raw validi." #, python-format msgid "%s must be an absolute path." msgstr "%s deve essere un percorso assoluto." #, python-format msgid "%s must be an integer." msgstr "%s deve essere un numero intero." #, python-format msgid "%s not set in cinder.conf" msgstr "%s non impostato in cinder.conf" #, python-format msgid "%s not set." msgstr "%s non impostato." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' non è valido per flashsystem_connection_protocol nel file di " "configurazione. i valori validi sono %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "'active' deve essere presente durante la scrittura di snap_info." msgid "'consistencygroup_id' must be specified" msgstr "è necessario specificare 'consistencygroup_id'" msgid "'qemu-img info' parsing failed." msgstr "analisi di 'qemu-img info' non riuscita." msgid "'status' must be specified." msgstr "È necessario specificare 'status'." msgid "'volume_id' must be specified" msgstr "È necessario specificare 'volume_id'" msgid "'{}' object has no attribute '{}'" msgstr "L'oggetto '{}'non ha l'attributo '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Comando: %(cmd)s) (Codice di ritorno: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "Un LUN (HLUN) non è stato trovato. (LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "" "È stata effettuata una richiesta contemporanea, probabilmente " "contraddittoria." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Non è stato trovato un LUN (HLUN) disponibile. Aggiungere un diverso gruppo " "di host. (LDEV: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Impossibile aggiungere un gruppo di host. (porta: %(port)s, nome: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Impossibile eliminare un gruppo di host. (porta: %(port)s, gid: %(gid)s, " "nome: %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Un gruppo di host non è valido. (gruppo host: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "Impossibile eliminare una coppia. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Impossibile creare una coppia. Il numero massimo di coppie è stato superato. " "(metodo di copia: %(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Un parametro non è valido. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Il valore del parametro non è valido. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Impossibile trovare un pool. (id pool: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Lo stato dell'istantanea non è valido. (stato: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Una destinazione secondaria valida DEVE essere specificata per poter " "eseguire il failover." msgid "A volume ID or share was not specified." msgstr "ID volume o condivisione non specificati. " #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Lo stato del volume non è valido. (stato: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s non riuscita con stringa di errore %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Stringa della versione API %(version)s in formato non valido. Deve essere in " "formato MajorNum.MinorNum." msgid "API key is missing for CloudByte driver." msgstr "Chiave API mancante per il driver CloudByte." #, python-format msgid "API response: %(response)s" msgstr "Risposta dell'API: %(response)s" #, python-format msgid "API response: %s" msgstr "Risposta dell'API: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Versione API %(version)s non supportata in questo metodo." msgid "API version could not be determined." msgstr "Impossibile determinare la versione API." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Si sta per eliminare i progetti child con quota non zero. Ciò non dovrebbe " "essere eseguito" msgid "Access list not available for public volume types." msgstr "Elenco accessi non disponibile per i tipi di volume pubblici." msgid "Activate or deactivate QoS error." msgstr "Errore di attivazione o disattivazione QoS. " msgid "Activate snapshot error." msgstr "Errore di attivazione istantanea." msgid "Add FC port to host error." msgstr "Errore di aggiunta porta FC all'host. " msgid "Add fc initiator to array error." msgstr "Errore di aggiunta iniziatore fc all'array. " msgid "Add initiator to array error." msgstr "Errore di aggiunta iniziatore all'array. " msgid "Add lun to cache error." msgstr "Errore di aggiunta lun alla cache. " msgid "Add lun to partition error." msgstr "Errore di aggiunta lun alla partizione. " msgid "Add mapping view error." msgstr "Errore di aggiunta vista associazione. " msgid "Add new host error." msgstr "Errore di aggiunta nuovo host. " msgid "Add port to port group error." msgstr "Errore di aggiunta porta a gruppo di porte." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Tutti i pool di memoria specificati per essere gestiti non esistono. " "Controllare la configurazione. Pool non esistenti: %s " msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "La richiesta di una versione API deve essere confrontata a un oggetto " "VersionedMethod." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "Si è verificato un errore in SheepdogDriver. (Motivo: %(reason)s) " msgid "An error has occurred during backup operation" msgstr "Si è verificato un errore durante l'operazione di backup" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "" "Si è verificato un errore durante il tentativo di modificare l'istantanea " "\"%s\". " #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Si è verificato un errore durante la ricerca del volume \"%s\". " #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Si è verificato un errore durante l'operazione di LUNcopy. Nome LUNcopy: " "%(luncopyname)s. Stato LUNcopy: %(luncopystatus)s. Stato LUNcopy: " "%(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Si è verificato un errore durante la lettura del volume \"%s\". " #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Si è verificato un errore durante la scrittura del volume \"%s\". " #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "Impossibile aggiungere un utente iSCSI CHAP. (nome utente: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "Un utente iSCSI CHAP non può essere eliminato. (nome utente: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Impossibile aggiungere una destinazione iSCSI. (porta: %(port)s, alias: " "%(alias)s, motivo: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Impossibile eliminare una destinazione iSCSI. (porta: %(port)s, tno: " "%(tno)s, alias: %(alias)s)" msgid "An unknown exception occurred." msgstr "E' stato riscontrato un errore sconosciuto" msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "A un utente con un token nell'ambito di un sottoprogetto non è consentito " "visualizzare la quota dei relativi parent." msgid "Append port group description error." msgstr "Errore di aggiunta descrizione gruppo di porte." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Applicazione di zone e cfgs allo switch non riuscita (codice di errore=" "%(err_code)s messaggio di errore=%(err_msg)s." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "L'array non esiste o è offline. Lo stato corrente dell'array è %s. " msgid "Associate host to hostgroup error." msgstr "Errore di associazione host al gruppo host." msgid "Associate host to mapping view error." msgstr "Errore di associazione host alla vista associazione." msgid "Associate initiator to host error." msgstr "Errore di associazione iniziatore all'host. " msgid "Associate lun to QoS error." msgstr "Errore di associazione lun a QoS. " msgid "Associate lun to lungroup error." msgstr "Errore di associazione lun al gruppo lun." msgid "Associate lungroup to mapping view error." msgstr "Errore di associazione gruppo lun alla vista associazione." msgid "Associate portgroup to mapping view error." msgstr "Errore di associazione gruppo porte a vista associazione. " msgid "At least one valid iSCSI IP address must be set." msgstr "È necessario impostare almeno un indirizzo IP iSCSI valido." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "" "Tentativo di trasferimento %s con chiave di autenticazione (auth) non valida." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "Dettagli gruppo aut [%s] non trovati nella memoria CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "Dettagli utente aut non trovati nella memoria CloudByte." msgid "Authentication error" msgstr "Errore di autenticazione" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Autenticazione non riuscita, verificare le credenziali dello switch, codice " "di errore %s." msgid "Authorization error" msgstr "Errore di autorizzazione" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "L'area di disponibilità '%(s_az)s' non è valida." msgid "Available categories:" msgstr "Categorie disponibili:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Spec QoS di backend non supportate in questa famiglia di memoria e versione " "ONTAP." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Il backend non esiste (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "Failover del backend già eseguito. Impossibile eseguire il failback." #, python-format msgid "Backend reports: %(message)s" msgstr "Report Backend: %(message)s" msgid "Backend reports: item already exists" msgstr "Report Backend: l'elemento esiste già" msgid "Backend reports: item not found" msgstr "Report Backend: elemento non trovato" msgid "Backend server not NaServer." msgstr "Il server di backend non è NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Raggiunto timeout di tentativi del servizio backend: %(timeout)s sec" msgid "Backend storage did not configure fiber channel target." msgstr "" "L'archivio di backend non ha configurato la destinazione del canale a fibre " "ottiche" msgid "Backing up an in-use volume must use the force flag." msgstr "" "Il backup di un volume in uso deve utilizzare l'indicatore di forzatura. " #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "Impossibile trovare il backup %(backup_id)s." msgid "Backup RBD operation failed" msgstr "Operazione di backup RBD non riuscita" msgid "Backup already exists in database." msgstr "Il backup esiste già nel database. " #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Il driver di backup ha riportato un errore: %(message)s" msgid "Backup id required" msgstr "ID backup richiesto" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "Backup non supportato per i volumi GlusterFS con istantanee." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "Il backup è supportato solo per i volumi SOFS senza file di backup." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "Il backup è supportato solo per i volumi GlusterFS formattati in modo non " "elaborato." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" "Il backup è supportato solo per i volumi SOFS formattati in modo non " "elaborato." msgid "Backup operation of an encrypted volume failed." msgstr "" "L'operazione di backup di un volume codificato non ha avuto esito positivo." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Il servizio di backup %(configured_service)s non supporta la verifica. L'ID " "backup %(id)s non è verificato. Ignorare la verifica." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Il servizio di backup %(service)s non supporta la verifica. L'id backup " "%(id)s non è verificato. Ignorare la reimpostazione." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "Il backup dovrebbe avere una sola istantanea ma invece ne ha %s" msgid "Backup status must be available" msgstr "Lo stato del backup deve essere available" #, python-format msgid "Backup status must be available and not %s." msgstr "Lo stato del backup deve essere disponibile e non %s." msgid "Backup status must be available or error" msgstr "Lo stato del backup deve essere available o error" msgid "Backup to be restored has invalid size" msgstr "Il backup da ripristinare ha una dimensione non valida" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Restituita riga di stato non corretto: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Chiavi errate nell'insieme di quota: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Risposta non corretta o imprevista dall'API di backend del volume di " "archiviazione: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Formato progetto non corretto: il formato del progetto non è corretto (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Richiesta non valida inviata al cluster Datera: Argomenti non validi: " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Risposta errata dall'API Datera" msgid "Bad response from SolidFire API" msgstr "Risposta dell'API SolidFire non valida" #, python-format msgid "Bad response from XMS, %s" msgstr "Risposta non valida da XMS, %s" msgid "Binary" msgstr "Valore binario" msgid "Blank components" msgstr "Componenti vuoti" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Schema di autenticazione API Blockbridge (token o password)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Password API Blockbridge (per schema aut 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Token API Blockbridge (per schema aut 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Utente API Blockbridge (per schema aut 'password')" msgid "Blockbridge api host not configured" msgstr "Host api Blockbridge non configurato " #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "Blockbridge configurato con schema aut non valido '%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "Il pool predefinito Blockbridge non esiste " msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Password Blockbridge non configurata (richiesta per lo schema aut 'password')" msgid "Blockbridge pools not configured" msgstr "Pool Blockbridge non configurati " msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Token Blockbridge non configurato (richiesto per lo schema aut 'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Utente Blockbridge non configurato (richiesto per lo schema aut 'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Errore CLI di divisione in zone di Brocade Fibre Channel: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Errore HTTP di divisione in zone di Brocade Fibre Channel: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "Il segreto CHAP deve essere 12-16 byte. " #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Output eccezione CLI:\n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Output eccezione CLI:\n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E L'associazione VDisk a host non è stata creata perché il VDisk " "ègià associato ad un host.\n" " \"" msgid "CONCERTO version is not supported" msgstr "Versione CONCERTO non supportata" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) non esiste nell'array" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "" "Il nome della cache è Nessuno, impostare smartcache:cachename nella chiave. " #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "Volume cache %(cache_vol)s non ha istantanea %(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "Il volume della cache %s non presenta le proprietà richieste" msgid "Call returned a None object" msgstr "La chiamata ha restituito un oggetto None" msgid "Can not add FC port to host." msgstr "Impossibile aggiungere la porta FC all'host." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "Impossibile trovare l'ID cache per nome cache %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Impossibile trovare l'ID partizione per nome %(name)s." #, python-format msgid "Can not get pool info. pool: %s" msgstr "Impossibile ottenere le informazioni sul pool. pool: %s" #, python-format msgid "Can not translate %s to integer." msgstr "Impossibile convertire %s in un numero intero." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Impossibile accedere a 'scality_sofs_config': %s" msgid "Can't attach snapshot." msgstr "Impossibile collegare l'istantanea." msgid "Can't decode backup record." msgstr "Impossibile decodificare il record di backup. " #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "Impossibile estendere il volume di replica, volume: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "Impossibile trovare la LUN sull'array, controllare source-name o source-id." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "Impossibile trovare il nome cache nell'array, nome cache: %(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "Impossibile trovare l'id lun da db, volume: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "Impossibile trovare informazioni sulla lun nell'array. volume: %(id)s, nome " "lun: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "Impossibile trovare il nome partizione nell'array, il nome partizione è: " "%(name)s." #, python-format msgid "Can't find service: %s" msgstr "Impossibile trovare il servizio: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "Impossibile trovare l'istantanea sull'array, controllare source-name o " "source-id." msgid "Can't find the same host id from arrays." msgstr "Impossibile trovare lo stesso id host dagli array." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "Impossibile ottenere l'id volume dall'istantanea, istantanea: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Impossibile ottenere l'id volume. Nome volume: %s" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Impossibile importare la LUN %(lun_id)s in Cinder. Tipo di LUN non " "corrispondente." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Impossibile importare LUN %s in Cinder. Esiste già in un HyperMetroPair." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " "copia LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "Impossibile importare LUN %s in Cinder. Esiste già in un gruppo LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "Impossibile importare la LUN %s in Cinder. Esiste già in un mirror LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "Impossibile importare la LUN %s in Cinder. Esiste già in uno SplitMirror." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " "migrazione." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " "replica remota." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "Impossibile importare LUN %s in Cinder. Lo stato LUN non è normal." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Impossibile importare l'istantanea %s in Cinder. L'istantanea non appartiene " "al volume." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Impossibile importare l'istantanea %s in Cinder. L'istantanea è esposta " "all'iniziatore." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Impossibile importare l'istantanea %s in Cinder. Lo stato dell'istantanea " "non è normal o lo stato di esecuzione non è online." #, python-format msgid "Can't open config file: %s" msgstr "Impossibile aprire il file di configurazione: %s" msgid "Can't parse backup record." msgstr "Impossibile analizzare il record di backup. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perché non dispone di un tipo di volume." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perchéè già nel gruppo di coerenza %(orig_group)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perché non è possibile trovare il volume." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perché il volume non esiste." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perché il volume si trova in uno stato non valido: %(status)s. " "Gli stati validi sono: %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " "%(group_id)s perché il tipo di volume %(volume_type)s non è supportato dal " "gruppo." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Impossibile collegare il volume già collegato %s; multicollegamento " "disabilitato tramitel'opzione di configurazione 'netapp_enable_multiattach'. " msgid "Cannot change VF context in the session." msgstr "Impossibile modificare il contesto VF nella sessione." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "Impossibile modificare il contesto VF, il contesto VF specificato non è " "disponibile nell'elenco di VF gestibili %(vf_list)s." msgid "Cannot connect to ECOM server." msgstr "Impossibile effettuare la connessione al server ECOM." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Impossibile creare il clone della dimensione %(vol_size)s dal volume della " "dimensione %(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Impossibile creare il gruppo di coerenza %(group)s perché l'istantanea " "%(snap)s non è in uno stato valido. Gli stati validi sono: %(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Impossibile creare il gruppo di coerenza %(group)s perché il volume di " "origine %(source_vol)s non è in uno stato valido. Gli stati validi sono: " "%(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Impossibile creare la directory %s. " msgid "Cannot create encryption specs. Volume type in use." msgstr "Impossibile creare le specifiche di codifica. Tipo di volume in uso." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Impossibile creare l'immagine del formato disco: %s. Viene accettato solo il " "formato disco vmdk." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Impossibile creare la vista di mascheramento: %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Impossibile creare più di %(req)s volumi nell'array ESeries quando " "'netapp_enable_multiattach' è impostato su true." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Impossibile creare o trovare un gruppo di archiviazione con nome " "%(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Impossibile creare il volume della dimensione %(vol_size)s dall'istantanea " "della dimensione %(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "Impossibile creare il volume di dimensione %s: non multiplo di 8GB. " #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Impossibile creare volume_type con nome %(name)s e specifiche %(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "Impossibile eliminare la LUN %s mentre esistono istantanee. " #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Impossibile eliminare il volume cache: %(cachevol_name)s. È stato aggiornato " "alle%(updated_at)s e attualmente ha %(numclones)d istanze volume." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Impossibile eliminare il volume cache: %(cachevol_name)s. È stato aggiornato " "alle%(updated_at)s e attualmente ha %(numclones)s istanze volume." msgid "Cannot delete encryption specs. Volume type in use." msgstr "" "Impossibile eliminare le specifiche di codifica. Tipo di volume in uso." msgid "Cannot determine storage pool settings." msgstr "Impossibile determinare le impostazioni del pool di archiviazione." msgid "Cannot execute /sbin/mount.sofs" msgstr "Impossibile eseguire /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "Impossibile trovare il gruppo CG %s." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "Impossibile trovare il servizio di configurazione controller per il sistema " "di memorizzazione %(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "Impossibile trovare il servizio di replica per creare il volume per " "l'istantanea %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "Impossibile trovare il servizio di replica per eliminare l'istantanea %s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Impossibile trovare il servizio di replica sul sistema %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "Impossibile trovare il volume: %(id)s. Annullamento gestione. Uscita..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" "Impossibile trovare il volume: %(volumename)s. Operazione di estensione. " "Uscire...." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "Impossibile trovare il numero unità per il volume %(volumeName)s." msgid "Cannot find migration task." msgstr "Impossibile trovare l'attività di migrazione. " #, python-format msgid "Cannot find replication service on system %s." msgstr "Impossibile trovare il servizio di replica sul sistema %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "Impossibile trovare l'istanza GC di origine. consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "Impossibile ottenere mcs_id per id canale: %(channel_id)s. " msgid "Cannot get necessary pool or storage system information." msgstr "" "Impossibile ottenere le informazioni necessarie sul sistema di archiviazione " "o sul pool." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Impossibile ottenere o creare un gruppo di archiviazione: %(sgGroupName)s " "per il volume %(volumeName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "Impossibile ottenere o creare il gruppo iniziatore: %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Impossibile ottenere il gruppo di porte: %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Impossibile ottenere il gruppo di archiviazione: %(sgGroupName)s dalla vista " "di mascheramento %(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Impossibile ottenere l'intervallo della dimensione supportato per %(sps)s " "Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Impossibile ottenere il gruppo di archiviazione predefinito per la politica " "FAST: %(fastPolicyName)s." msgid "Cannot get the portgroup from the masking view." msgstr "Impossibile ottenere il gruppo di porte dalla vista di mascheramento." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "Impossibile montare Scality SOFS, controllare il syslog per gli errori" msgid "Cannot ping DRBDmanage backend" msgstr "Impossibile effettuare il ping al backend DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Impossibile collocare il volume %(id)s su %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Impossibile fornire sia 'cgsnapshot_id' e 'source_cgid' per creareil gruppo " "di coerenza %(name)s dall'origine. " msgid "Cannot register resource" msgstr "Impossibile registrare la risorsa" msgid "Cannot register resources" msgstr "Impossibile registrare le risorse" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Impossibile rimuovere il volume %(volume_id)s dal gruppo di coerenza " "%(group_id)s perché non è presente nel gruppo." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Impossibile rimuovere il volume %(volume_id)s dal gruppo di coerenza " "%(group_id)s perché il volume si trova in uno stato non valido: %(status)s. " "Gli stati validi sono: %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Impossibile riscrivere da HPE3PARDriver a %s." msgid "Cannot retype from one 3PAR array to another." msgstr "Impossibile riscrivere da un array 3PAR ad un altro." msgid "Cannot retype to a CPG in a different domain." msgstr "Impossibile riscrivere in un CPG in un dominio diverso." msgid "Cannot retype to a snap CPG in a different domain." msgstr "Impossibile riscrivere in uno snap CPG in un diverso dominio." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "Impossibile eseguire il comando vgc-cluster, assicurarsi che sia installato " "il software e che le autorizzazioni siano impostate correttamente. " msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "Impossibile impostare entrambi hitachi_serial_number e hitachi_unit_name." msgid "Cannot specify both protection domain name and protection domain id." msgstr "" "Impossibile specificare nome dominio di protezione e id dominio di " "protezione." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "Impossibile specificare il nome del pool di memoria e l'ID del pool di " "memoria. " #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Impossibile aggiornare il gruppo di coerenza %(group_id)s perché non sono " "stati forniti nome, descrizione, add_volumes o remove_volumes validi." msgid "Cannot update encryption specs. Volume type in use." msgstr "" "Impossibile aggiornare le specifiche di codifica. Tipo di volume in uso." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "Impossibile aggiornare volume_type %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Impossibile verificare l'esistenza dell'oggetto:%(instanceName)s." msgid "Cascade option is not supported." msgstr "L'opzione a catena non è supportata." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "Impossibile trovare CgSnapshot %(cgsnapshot_id)s." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost vuoto. Non verrà creato alcun gruppo di coerenza." msgid "Cgsnapshot status must be available or error" msgstr "Lo stato di Cgsnapshot deve essere available o error" msgid "Change hostlun id error." msgstr "Errore di modifica ID hostlun." msgid "Change lun priority error." msgstr "Errore di modifica della priorità lun. " msgid "Change lun smarttier policy error." msgstr "Errore di modifica della politica smarttier lun. " #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "La modifica renderebbe l'utilizzo inferiore a 0 per le seguenti risorse: " "%(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Controllare le autorizzazioni di accesso per la condivisione ZFS assegnata a " "questo driver." msgid "Check hostgroup associate error." msgstr "Errore di controllo associazione gruppo host. " msgid "Check initiator added to array error." msgstr "Errore di controllo aggiunta iniziatore all'array. " msgid "Check initiator associated to host error." msgstr "Errore di controllo iniziatore associato all'host. " msgid "Check lungroup associate error." msgstr "Errore di controllo associazione gruppo lun. " msgid "Check portgroup associate error." msgstr "Errore di controllo associazione gruppo porte. " msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Controllare lo stato del servizio http. Assicurarsi che il numero della " "porta https sia uguale a quello specificato in cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "La dimensione della porzione non è un multiplo della dimensione del blocco " "per la creazione dell'hash." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Errore CLI di divisione in zone di Cisco Fibre Channel: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "" "La funzione di creazione del clone non dispone di licenza in " "%(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "Il tipo di clone '%(clone_type)s' non è valido; i valori validi sono: " "'%(full_clone)s' e '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "Il cluster non è formattato. È probabilmente necessario eseguire \"dog " "cluster format\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Errore driver Coho Data Cinder: %(message)s" msgid "Coho rpc port is not configured" msgstr "La porta RPC Coho non è configurata" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Il comando %(cmd)s è bloccato nella CLI ed è stato annullato " #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "Timeout CommandLineHelper._wait_for_a_condition: %s " #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s timeout." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Compression Enabler non è installato. Impossibile creare il volume compresso." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Cluster di calcolo: %(cluster)s non trovato. " msgid "Condition has no field." msgstr "La condizione non ha campi." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "Configurazione 'max_over_subscription_ratio' non valida. Deve essere > 0: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Errore di configurazione: dell_sc_ssn non impostato." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Il file di configurazione %(configurationFile)s non esiste." msgid "Configuration is not found." msgstr "Configurazione non trovata." #, python-format msgid "Configuration value %s is not set." msgstr "Valore di configurazione %s non impostato." msgid "Configured host type is not supported." msgstr "Il tipo di host configurato non è supportato." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Specifiche QoS in conflitto nel tipo volume %s: quando spec QoS è associata " "al tipo di volume, legacy \"netapp:qos_policy_group\" non consentito nelle " "specifiche supplementari del tipo di volume." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Connessione a glance non riuscita: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Connessione a swift non riuscita: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Il connettore non fornisce: %s " #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Il connettore non ha le informazioni necessarie: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "Il gruppo di coerenza %s ancora contiene volumi. È richiesto l'indicatore " "force per eliminarlo." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "Il gruppo di coerenza %s ancora dispone di cgsnapshot dipendenti." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "Il gruppo di coerenza è vuoto. Non verrà creato nessun cgsnapshot." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "Lo stato del gruppo di coerenza deve essere available o error, ma lo stato " "corrente è:%s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "Lo stato del gruppo di coerenza deve essere available, ma lo stato corrente " "è: %s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "Impossibile trovare ConsistencyGroup %(consistencygroup_id)s." msgid "Container" msgstr "Contenitore" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Formato contenitore: %s non supportato dal driver VMDK, solo 'bare' è " "supportato." msgid "Container size smaller than required file size." msgstr "" "Dimensione del contenitore più piccola della dimensione di file richiesta." msgid "Content type not supported." msgstr "Tipo di contenuto non supportato." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "Controller Configuration Service non trovato in %(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "Impossibile risolvere l'IP del controller '%(host)s': %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Convertito in %(f1)s, ma il formato ora è %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "Convertito in %(vol_format)s, ma il formato ora è %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertito in non elaborato, ma il formato ora è %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Convertito su grezzo, ma il formato è ora %s." msgid "Coordinator uninitialized." msgstr "Coordinatore non inizializzato." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Copia attività di volume non riuscita: convert_to_base_volume: id=%(id)s, " "status=%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Copia dei metadati da %(src_type)s %(src_id)s a %(vol_id)s." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Impossibile determinare quale endpoint Keystone utilizzare. Può essere " "impostato sia nel catalogo del servizio che utilizzando l'opzione di " "configurazione cinder.conf 'backup_swift_auth_url'." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Impossibile determinare quale endpoint Swift utilizzare. Può essere " "impostato sia nel catalogo del servizio che utilizzando l'opzione di " "configurazione cinder.conf 'backup_swift_url'." msgid "Could not find DISCO wsdl file." msgstr "Impossibile trovare il file wsdl DISCO." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "Impossibile trovare l'id cluster GPFS: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "Impossibile trovare il dispositivo di file system GPFS: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "Impossibile trovare un host per il volume %(volume_id)s con tipo %(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Impossibile trovare la configurazione in %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "Impossibile trovare l'esportazione iSCSI per il volume %(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Impossibile trovare l'esportazione iSCSI per il volume %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "" "Impossibile trovare la destinazione iscsi per il volume: %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "Impossibile trovare la chiave nell'output del comando %(cmd)s: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Impossibile trovare il parametro %(param)s" #, python-format msgid "Could not find target %s" msgstr "Impossibile trovare la destinazione %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "Impossibile trovare il volume parent per l'istantanea '%s' sull'array." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "Impossibile trovare l'istantanea univoca %(snap)s sul volume %(vol)s." msgid "Could not get system name." msgstr "Impossibile ottenere il nome sistema." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Impossibile caricare l'app paste '%(name)s' in %(path)s" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "Impossibile leggere %s. Eseguire di nuovo con sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "Impossibile leggere le informazioni per l'istantanea %(name)s. Codice: " "%(code)s. Motivo: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" "Non è stato possibile ripristinare il file di configurazione %(file_path)s:" "%(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "" "Non è stato possibile salvare la configurazione in %(file_path)s:%(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "Impossibile avviare l'istantanea del gruppo di coerenza %s." #, python-format msgid "Counter %s not found" msgstr "Contatore %s non trovato" msgid "Create QoS policy error." msgstr "Errore di creazione della politica QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Creazione del backup interrotta, lo stato del backup previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Creazione del backup interrotta, lo stato del volume previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." msgid "Create consistency group failed." msgstr "Creazione del gruppo di coerenza non riuscita." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "La creazione di volumi codificati con tipo %(type)s dall'immagine %(image)s " "non è supportata." msgid "Create export for volume failed." msgstr "Creazione esportazione per il volume non riuscita." msgid "Create hostgroup error." msgstr "Errore durante la creazione del gruppo host." #, python-format msgid "Create hypermetro error. %s." msgstr "Errore di creazione hypermetro. %s." msgid "Create lun error." msgstr "Errore di creazione lun. " msgid "Create lun migration error." msgstr "Errore di creazione migrazione lun. " msgid "Create luncopy error." msgstr "Errore durante la creazione di luncopy." msgid "Create lungroup error." msgstr "Errore durante la creazione del gruppo lun." msgid "Create manager volume flow failed." msgstr "Creazione flusso del volume del gestore non riuscita. " msgid "Create port group error." msgstr "Errore di creazione gruppo di porte." msgid "Create replication error." msgstr "Errore di creazione replica." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "Creazione coppia di replica non riuscita. Errore: %s." msgid "Create snapshot error." msgstr "Errore durante la creazione dell'istantanea." #, python-format msgid "Create volume error. Because %s." msgstr "Errore di creazione del volume. Perché %s. " msgid "Create volume failed." msgstr "Creazione del volume non riuscita." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "La creazione di un gruppo di coerenza da un'origine non è attualmente " "supportata." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Creazione e attivazione delle serie di zone non riuscita: (Zone set=" "%(cfg_name)s errore=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Creazione e attivazione delle serie di zone non riuscita: (Zone set=" "%(zoneset)s errore=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "Creazione degli utilizzi da %(begin_period)s fino a %(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "L'host corrente non fa parte del dominio HGST. " #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Host corrente non valido per il volume %(id)s con tipo %(type)s, migrazione " "non consentita" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "L'host attualmente associato per il volume %(vol)s è in un gruppo host non " "supportato con%(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "OBSOLETO: distribuire v1 dell'API Cinder." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "OBSOLETO: distribuire v2 dell'API Cinder." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "Errore driver DRBDmanage: prevista chiave \"%s\" non presente nella " "risposta, versione DRBDmanage errata?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Errore di impostazione del driver DRBDmanage: alcune librerie richieste " "(dbus, drbdmanage.*) non trovate." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage prevedeva una risorsa (\"%(res)s\"), ottenute %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "Timeout DRBDmanage in attesa di un nuovo volume dopo il ripristino " "dell'istantanea; risorsa \"%(res)s\", volume \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "Timeout DRBDmanage in attesa della creazione dell'istantanea; risorsa " "\"%(res)s\", istantanea \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "Timeout DRBDmanage in attesa della creazione del volume; risorsa \"%(res)s" "\", volume \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "Timeout DRBDmanage in attesa della dimensione del volume; ID volume \"%(id)s" "\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "Impossibile determinare la versione API di Data ONTAP." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "I dati ONTAP che operano in modalità 7 non supportano i gruppi di politiche " "QoS. " msgid "Database schema downgrade is not allowed." msgstr "Il downgrade dello schema di database non è consentito." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "Il dataset %s non è condiviso nell'applicazione Nexenta Store" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Gruppo di dataset %s non trovato su Nexenta SA" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup è un tipo di acquisizione valido, ma richiede WSAPI versione " "'%(dedup_version)s'; è installata la versione '%(version)s'." msgid "Dedup luns cannot be extended" msgstr "Impossibile estendere le lun dedup" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Deduplication Enabler non è installato. Impossibile creare un volume " "deduplicato" msgid "Default pool name if unspecified." msgstr "Nome pool predefinito se non specificato. " #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Quota predefinita per la risorsa: %(res)s è impostato dall'indicatore quota " "predefinita: quota_%(res)s, è ora obsoleta. Utilizzare la classe di quota " "predefinita per la quota predefinita." msgid "Default volume type can not be found." msgstr "Impossibile trovare il tipo di volume predefinito." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Definisce la serie di pool esposti e le relative stringhe di query backend " "associate " msgid "Delete LUNcopy error." msgstr "Errore di eliminazione LUNcopy. " msgid "Delete QoS policy error." msgstr "Errore di eliminazione della politica QoS." msgid "Delete associated lun from lungroup error." msgstr "Errore di eliminazione lun associata dal gruppo lun. " #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Eliminazione del backup interrotta, il servizio di backup attualmente " "configurato [%(configured_service)s] non è il servizio di backup utilizzato " "per creare questo backup [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "Eliminazione del gruppo di coerenza non riuscita." msgid "Delete hostgroup error." msgstr "Errore di eliminazione gruppo host. " msgid "Delete hostgroup from mapping view error." msgstr "Errore di eliminazione gruppo host dalla vista associazione." msgid "Delete lun error." msgstr "Errore di eliminazione lun. " msgid "Delete lun migration error." msgstr "Errore di eliminazione migrazione lun. " msgid "Delete lungroup error." msgstr "Errore di eliminazione gruppo lun. " msgid "Delete lungroup from mapping view error." msgstr "Errore di eliminazione gruppo lun dalla vista associazione." msgid "Delete mapping view error." msgstr "Errore di eliminazione vista associazione." msgid "Delete port group error." msgstr "Errore di eliminazione gruppo di porte." msgid "Delete portgroup from mapping view error." msgstr "Errore di eliminazione gruppo porte dalla vista associazione." msgid "Delete snapshot error." msgstr "Errore di eliminazione istantanea." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "" "Eliminazione dell'istantanea del volume non supportata nello stato: %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup interrotta, lo stato del backup previsto è %(expected_status)s " "ma è stato ricevuto %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Eliminazione del volume dal database, rpc ignorato." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Eliminazione zone non riuscita: (comando=%(cmd)s errore=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Dell API 2.1 o successiva richiesta per il supporto del gruppo di coerenza" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "Replica dell'errore di configurazione del driver Dell Cinder non supportata " "con la connessione diretta." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dispositivo di replica dell'errore di configurazione del driver Dell Cinder " "%s non trovato" msgid "Deploy v3 of the Cinder API." msgstr "Distribuire v3 dell'API Cinder." msgid "Describe-resource is admin only functionality" msgstr "Describe-resource è una funzionalità solo di admin" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "La destinazione ha migration_status %(stat)s, è previsto %(exp)s." msgid "Destination host must be different than the current host." msgstr "L'host di destinazione deve essere diverso dall'host corrente." msgid "Destination volume not mid-migration." msgstr "Volume di destinazione non migrazione intermedia." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Scollegamento volume non riuscito: più di un collegamento, ma nessun " "attachment_id fornito. " msgid "Detach volume from instance and then try again." msgstr "Scollegare il volume dall'istanza e riprovare." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "È stato rilevato più di un volume con il nome %(vol_name)s" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "La colonna prevista non è stata trovata in %(fun)s: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "Impossibile trovare la chiave prevista %(key)s in %(fun)s: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "La causa disabilitata contiene caratteri non validi o è troppo lunga" #, python-format msgid "Domain with name %s wasn't found." msgstr "Impossibile trovare il dominio con nome %s. " #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Rilevato un cluster GPFS di livello inferiore. La funzione di clonazione " "GPFS non è abilitata nel livello daemon del cluster %(cur)s - deve essere " "almeno di livello %(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "" "Inizializzazione connessione del driver non riuscita (errore: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "Il driver non è in grado di eseguire la riscrittura perché il volume (LUN " "{}) presenta un'istantanea per cui è proibita la migrazione." msgid "Driver must implement initialize_connection" msgstr "Il driver deve implementare initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Il driver ha decodificato correttamente i dati di backup importati, ma ci " "sonocampi mancanti (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "API proxy E-series versione %(current_version)s non supporta la serie " "completa dispecifiche supplementari SSC. La versione proxy deve essere " "almeno %(min_version)s. " #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Eccezione EMC VNX Cinder Driver CLI: %(cmd)s (Codice di ritorno: %(rc)s) " "(Output: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword devono esserevalori " "validi. " #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "È necessario fornire 'cgsnapshot_id' o 'source_cgid' per creare il gruppo di " "congruenza %(name)s dall'origine." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO: %(slo)s o carico di lavoro %(workload)s non validi. Esaminare la " "precedente istruzione di errore per i valori validi." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Sono richiesti sia hitachi_serial_number che hitachi_unit_name." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "Element Composition Service non trovato in %(storageSystemName)s." msgid "Enables QoS." msgstr "Abilita il QoS. " msgid "Enables compression." msgstr "Abilita la compressione. " msgid "Enables replication." msgstr "Abilita la replica. " msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Verificare che configfs sia montato in /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante l'aggiunta dell'iniziatore: %(initiator)s su " "groupInitiatorGroup: %(initiatorgroup)s Codice di ritorno: %(ret.status)d " "Messaggio: %(ret.data)s ." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "errore durante l'aggiunta a TargetGroup: %(targetgroup)s withIQN: " "%(iqn)sCodice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Errore di collegamento del volume %(vol)s. " #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante la clonazione dell'istantanea: %(snapshot)s sul volume: " "%(lun)s del pool: %(pool)s Progetto: %(project)s Progetto clone: " "%(clone_proj)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Errore durante la creazione del volume clonato: %(cloneName)s Codice di " "ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore creazione volume clonato: Volume: %(cloneName)s Volume di origine: " "%(sourceName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore nella creazione del gruppo: %(groupName)s. Codice di ritorno: " "%(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Errore nella creazione della vista di mascheramento: %(groupName)s. Codice " "di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore creazione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " "Errore: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore creazione volume: %(volumename)s. Codice di ritorno: %(rc)lu. " "Errore: %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Errore CreateGroupReplica: origine: %(source)s destinazione: %(target)s. " "Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante la creazione dell'iniziatore: %(initiator)s sull'alias: " "%(alias)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante la creazione del progetto: %(project)s sul pool: %(pool)s " "Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante la creazione delle proprietà: %(property)s Tipo: %(type)s " "Descrizione: %(description)s Codice di ritorno: %(ret.status)d Messaggio: " "%(ret.data)s ." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la creazione della condivisione: %(name)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante la creazione dell'istantanea: %(snapshot)s sul volume: " "%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante la creazione dell'istantanea: %(snapshot)s sulla " "condivisione: %(share)s nel pool: %(pool)s Progetto: %(project)s Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Errore durante la creazione della destinazione: %(alias)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante la creazione di TargetGroup: %(targetgroup)s con IQN: " "%(iqn)sCodice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Errore durante la creazione del volume: %(lun)s Dimensione: %(size)s Codice " "di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore durante la creazione del nuovo volume composito; codice di ritorno: " "%(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante la creazione dell'azione di replica su: pool: %(pool)s " "Progetto: %(proj)s volume: %(vol)s per destinazione: %(tgt)s e pool: " "%(tgt_pool)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "" "Errore durante la creazione di un volume non collegato in un'operazione di " "estensione." msgid "Error Creating unbound volume." msgstr "Errore durante la creazione del volume non collegato." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore eliminazione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " "Errore: %(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Errore durante l'eliminazione del gruppo: %(storageGroupName)s. Codice di " "ritorno: %(rc)lu. Errore: %(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Errore durante l'eliminazione del gruppo di iniziatori: " "%(initiatorGroupName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'eliminazione dell'istantanea: %(snapshot)s sulla " "condivisione: %(share)s nel pool: %(pool)s Progetto: %(project)s Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'eliminazione dell'istantanea: %(snapshot)s sul volume: " "%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Errore durante l'eliminazione del volume: %(lun)s dal pool: %(pool)s, " "Progetto: %(project)s. Codice di ritorno: %(ret.status)d, Messaggio: " "%(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'eliminazione del progetto: %(project)s sul pool: %(pool)s " "Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Errore durante l'eliminazione dell'azione di replica: %(id)s Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore estensione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " "Errore: %(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante l'acquisizione degli iniziatori: InitiatorGroup:" "%(initiatorgroup)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Errore durante il richiamo delle statistiche del pool: Pool: %(pool)s Codice " "di ritorno: %(status)d Messaggio: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'acquisizione delle statistiche del progetto: Pool: %(pool)s " "Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " "%(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'acquisizione della condivisione: %(share)s nel pool: " "%(pool)s Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " "%(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'acquisizione dell'istantanea: %(snapshot)s sul volume: " "%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Errore durante l'acquisizione della destinazione: %(alias)s Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'acquisizione del volume: %(lun)s nel pool: %(pool)s " "Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " "%(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Errore durante la migrazione del volume da un pool ad un altro. Codice di " "ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Errore durante la modifica della vista di mascheramento: %(groupName)s. " "Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "Proprietà del pool degli errori: Il pool %(pool)s non è di proprietà di " "%(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante l'impostazione props Props: %(props)s sul volume: %(lun)s del " "pool: %(pool)s Progetto: %(project)s Codice di ritorno: %(ret.status)d " "Messaggio: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore durante la chiusura della sessione di migrazione. Codice di ritorno: " "%(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la verifica dell'iniziatore: %(iqn)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la verifica del pool: %(pool)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "errore durante la verifica del progetto: %(project)s nel pool: %(pool)s " "Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la verifica del servizio: %(service)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la verifica della destinazione: %(alias)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Errore durante la verifica della condivisione: %(share)s sul Progetto:" "%(project)s e Pool: %(pool)s Codice di ritorno: %(ret.status)d Messaggio: " "%(ret.data)s ." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Errore durante l'aggiunta del volume: %(volumeName)s con percorso istanza: " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Errore durante l'aggiunta dell'iniziatore al gruppo: %(groupName)s. Codice " "di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "" "Errore durante l'aggiunta del volume al volume composito. L'errore è: " "%(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "Errore durante l'aggiunta del volume %(volumename)s al volume di base di " "destinazione." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Errore durante l'associazione del gruppo storage: %(storageGroupName)s. alla " "politica FAST: %(fastPolicyName)s con descrizione dell'errore: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "Errore durante il col legamento del volume %s. Potrebbe esser stato " "raggiunto il limite di destinazione. " #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Errore durante l'interruzione della relazione del clone: Nome " "sincronizzazione: %(syncName)s Codice di ritorno: %(rc)lu. Errore: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Errore durante la connessione al cluster ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Errore durante la connessione mediante ssh: %s" #, python-format msgid "Error creating volume: %s." msgstr "Errore durante la creazione del volume: %s. " msgid "Error deleting replay profile." msgstr "Errore durante l'eliminazione del profilo di risposta. " #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Errore durante l'eliminazione del volume %(ssn)s: %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Errore durante l'eliminazione del volume %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Errore durante l'analisi del programma di valutazione: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Errore durante la modifica della condivisione: %(share)s nel pool: %(pool)s " "Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Errore durante l'abilitazione di iSER per il portale di rete: assicurarsi " "che RDMA sia supportato sulla porta iSCSI %(port)d sull'ip %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "" "Si è verificato un errore durante la ripulitura di un collegamento non " "riuscito: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Errore nell'esecuzione dell'API CloudByte [%(cmd)s], Errore:%(err)s." msgid "Error executing EQL command" msgstr "Errore durante l'esecuzione del comando EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Errore di esecuzione comando tramite ssh: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Errore durante l'estensione del volume %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Errore durante l'estensione del volume: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Errore nella ricerca di %(name)s." #, python-format msgid "Error finding %s." msgstr "Errore nella ricerca di %s." #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore durante il richiamo di ReplicationSettingData. Codice di ritorno: " "%(rc)lu. Errore: %(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Errore nel richiamo dei dettagli della versione del dispositivo. Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "Errore nel recupero dell'id dominio dal nome %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "Errore nel recupero dell'id dominio dal nome %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Errore durante l'acquisizione dei gruppi iniziatori." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Errore nel recupero dell'id pool dal nome %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Errore nel recupero dell'id pool dal nome %(pool_name)s: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Errore nel richiamo dell'azione di replica: %(id)s. Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Errore nel richiamo dei dettagli dell'origine di replica. Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Errore nel richiamo dei dettagli della destinazione di replica. Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante l'acquisizione della versione: svc: %(svc)s.Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Errore nell'operazione [%(operation)s] per il volume [%(cb_volume)s] nella " "memoria CloudByte: [%(cb_error)s], codice di errore: [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Errore nella risposta dell'API SolidFire: data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "Errore in space-create per %(space)s di dimensione %(size)d GB" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "Errore in space-extend per il volume %(space)s con %(size)d GB aggiuntivi" #, python-format msgid "Error managing volume: %s." msgstr "Errore durante la gestione del volume: %s. " #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Errore durante l'associazione del volume %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Errore durante la sincronizzazione della replica di modifica: %(sv)s " "operazione: %(operation)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Errore durante la modifica del servizio: %(service)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante lo spostamento del volume: %(vol)s dal progetto di origine: " "%(src)s al progetto di destinazione: %(tgt)s Codice di ritorno: " "%(ret.status)d Messaggio: %(ret.data)s ." msgid "Error not a KeyError." msgstr "L'errore non è un KeyError." msgid "Error not a TypeError." msgstr "L'errore non è un TypeError." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Si è verificato un errore durante la creazione di cgsnapshot %s." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Si è verificato un errore durante l'eliminazione di cgsnapshot %s." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "" "Si è verificato un errore durante l'aggiornamento del gruppo di coerenza %s." #, python-format msgid "Error parsing config file: %s" msgstr "Errore durante l'analisi del file di configurazione: %s" msgid "Error promoting secondary volume to primary" msgstr "Errore quando si promuove il volume secondario in primario" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Errore durante la rimozione del volume %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Errore durante la ridenominazione del volume %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Risposta errore: %s" msgid "Error retrieving volume size" msgstr "Errore durante il richiamo della dimensione del volume " #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante l'invio dell'aggiornamento di replica per l'id azione: " "%(id)s. Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Errore durante l'invio dell'aggiornamento di replica. Errore restituito: " "%(err)s. Azione: %(id)s. " #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore durante l'impostazione dell'eredità di di replica su %(set)s per il " "volume: %(vol)s progetto %(project)s Codice di ritorno: %(ret.status)d " "Messaggio: %(ret.data)s ." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Errore staccando il pacchetto: %(package)s dall'origine: %(src)s Codice di " "ritorno: %(ret.status)d Messaggio: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" "Errore durante lo scollegamento del volume %(vol)s dal pool. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Errore durante la verifica della dimensione del clone sul clone del volume: " "%(clone)s Dimensione: %(size)d sull'istantanea: %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Errore durante l'autenticazione con lo switch: %s." #, python-format msgid "Error while changing VF context %s." msgstr "Errore durante la modifica del contesto VF %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Errore durante il controllo della versione firmware %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Errore durante il controllo dello stato della transazione: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "" "Errore durante il controllo per verificare se VF è disponibile per la " "gestione di %s." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Errore durante la connessione dello switch %(switch_id)s con protocollo " "%(protocol)s. Errore: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Errore durante la creazione del token di autenticazione: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "Errore durante la creazione dell'istantanea [status] %(stat)s - [result] " "%(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" "Errore durante la creazione del volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Errore durante l'eliminazione dell'istantanea [status] %(stat)s - [result] " "%(res)s" #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" "Errore durante l'eliminazione del volume [status] %(stat)s - [result] " "%(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" "Errore durante l'estensione del volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "" "Errore durante il richiamo dei dettagli %(op)s, codice restituito: " "%(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "Errore durante il ricevimento dei dati tramite ssh: (comando=%(cmd)s errore=" "%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Errore durante il richiamo delle informazioni disco [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Errore durante il richiamo del valore nvp: %s." #, python-format msgid "Error while getting session information %s." msgstr "Errore durante il richiamo delle informazioni sulla sessione %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Errore durante l'analisi dei dati: %s." #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "Errore durante la query della pagina %(url)s sullo switch, motivo %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Errore durante la rimozione di zone e cfgs nella stringa di zona. " "%(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Errore durante la richiesta dell'API di %(service)s." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "Errore durante l'esecuzione di zoning CLI: (comando=%(cmd)s errore=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Errore durante l'aggiornamento di nuove zone e cfgs nella stringa di zona. " "Errore %(description)s." msgid "Error writing field to database" msgstr "Errore durante la scrittura del campo nel database" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Errore [%(stat)s - %(res)s] durante il richiamo dell'id volume." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Errore [%(stat)s - %(res)s] durante il ripristino dell'istantanea " "[%(snap_id)s] nel volume [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "Errore [status] %(stat)s - [result] %(res)s] durante il richiamo dell'id " "volume." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Superamento numero max tentativi di pianificazione %(max_attempts)d per il " "volume %(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Superato il limite di istantanee per volume" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Eccezione durante l'aggiunta del volume meta al volume di destinazione " "%(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Eccezione durante la creazione della replica elemento. Nome clone: " "%(cloneName)s Nome origine: %(sourceName)s Spec supplementari: " "%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Eccezione in _select_ds_for_volume: %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "Eccezione durante la formazione della stringa di zona: %s." #, python-format msgid "Exception: %s" msgstr "Eccezione: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Previsto esattamente un solo nodo chiamato \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Previsto numero intero per node_count, restituito svcinfo lsiogrp: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "Non è previsto alcun output dal comando CLI %(cmd)s, è stato ricevuto " "%(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Prevista restituzione vdisk singolo da lsvdisk durante il filtro su " "vdisk_UID. %(count)s restituito." #, python-format msgid "Expected volume size was %d" msgstr "La dimensione del volume prevista era %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Esportazione del backup interrotta, lo stato del backup previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Esportazione del record interrotta; il servizio di backup attualmente " "configurato [%(configured_service)s] non è il servizio di backup utilizzato " "per creare questo backup [%(backup_service)s]." msgid "Extend volume error." msgstr "Errore di estensione del volume. " msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Estensione volume è supportata solo per questo driver quando non esiste " "nessuna istantanea." msgid "Extend volume not implemented" msgstr "Estensione volume non implementata" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "FAST VP Enabler non è installato. Impossibile impostare la politica a " "livelli per il volume" msgid "FAST is not supported on this array." msgstr "FAST non supportato su questo array." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC è il protocollo ma i wwpn non sono forniti da OpenStack." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Impossibile annullare l'assegnazione %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "Impossibile creare il volume cache %(volume)s. Errore: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Impossibile aggiungere la connessione per fabric=%(fabric)s: Errore:%(err)s" msgid "Failed cgsnapshot" msgstr "cgsnapshot non riuscito" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "Impossibile creare l'istantanea per il gruppo: %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "Impossibile creare l'istantanea per il volume %(volname)s: %(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "Impossibile ottenere la zona attiva impostata da fabric %s." #, python-format msgid "Failed getting details for pool %s." msgstr "Impossibile acquisire i dettagli per il pool %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Impossibile rimuovere la connessione per fabric=%(fabric)s: Errore:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Errore di estensione del volume %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Impossibile accedere a 3PAR (%(url)s) perché %(err)s" msgid "Failed to access active zoning configuration." msgstr "Impossibile accedere alla configurazione di zonatura attiva. " #, python-format msgid "Failed to access zoneset status:%s" msgstr "Impossibile accedere allo stato zoneset:%s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Impossibile acquisire un blocco risorsa. (serial: %(serial)s, inst: " "%(inst)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "Impossibile aggiungere %(vol)s in %(sg)s dopo %(retries)s tentativi." msgid "Failed to add the logical device." msgstr "Impossibile aggiungere l'unità logica." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Impossibile aggiungere il volume %(volumeName)s al gruppo di coerenza " "%(cgName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." msgid "Failed to add zoning configuration." msgstr "Impossibile aggiungere la configurazione di zonatura. " #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Impossibile assegnare l'iniziatore iSCSI IQN. (porta: %(port)s, motivo: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "Impossibile associare qos_specs: %(specs_id)s con il tipo %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Impossibile collegare la destinazione per il volume %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Impossibile eseguire il backup dei metadati del volume - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Impossibile eseguire il backup sui metadati di volume - Oggetto di backup " "dei metadati 'backup.%s.meta' esiste già" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Impossibile clonare il volume dall'istantanea %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Impossibile connettersi a %(vendor_name)s Array %(host)s: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Impossibile collegarsi all'API Dell REST" msgid "Failed to connect to array" msgstr "Impossibile stabilire una connessione all'array" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Impossibile connettersi al daemon sheep, indirizzo: %(addr)s, porta: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Impossibile copiare l'immagine nel volume: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Impossibile copiare i metadati nel volume: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "" "Impossibile copiare il volume, dispositivo di destinazione non disponibile. " msgid "Failed to copy volume, source device unavailable." msgstr "" "Impossibile copiare il volume, dispositivo di origine non disponibile. " #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "Impossibile creare il GC %(cgName)s dall'istantanea %(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "Impossibile creare IG, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "Impossibile creare il volume dell'immagine di SolidFire" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Impossibile creare il gruppo volume: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Impossibile creare un file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "Impossibile creare un'istantanea temporanea per il volume %s. " msgid "Failed to create api volume flow." msgstr "Impossibile creare il flusso del volume api." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "" "Impossibile creare l'istantanea del gruppo di coerenza %(id)s a causa di " "%(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "Impossibile creare il gruppo di coerenza %(id)s a causa di %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Impossibile creare il gruppo di coerenza %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Impossibile creare il gruppo di coerenza %s perché il gruppo di coerenza VNX " "non può accettare LUN compresse come membri." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Impossibile creare il gruppo di coerenza: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "" "Impossibile creare il gruppo di coerenza: %(cgid)s. Errore: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Impossibile creare il gruppo di coerenza: %(consistencyGroupName)s Codice " "di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Impossibile creare l'id hardware su %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "Impossibile creare l'host: %(name)s. Controllare se esiste nell'array. " #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Impossibile creare il gruppo host: %(name)s. Controllare se esiste " "nell'array. " msgid "Failed to create iqn." msgstr "Impossibile creare l'iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Impossibile creare la destinazione iscsi per il volume %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Impossibile creare il flusso di gestione esistente." msgid "Failed to create manage_existing flow." msgstr "Impossibile creare il flusso manage_existing." msgid "Failed to create map on mcs, no channel can map." msgstr "Impossibile creare la mappa su mcs, nessun canale può associarsi. " msgid "Failed to create map." msgstr "Impossibile creare la mappa." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Impossibile creare i metadati per il volume: %(reason)s" msgid "Failed to create partition." msgstr "Impossibile creare la partizione. " #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "Impossibile creare qos_specs: %(name)s con le specifiche %(qos_specs)s." msgid "Failed to create replica." msgstr "Impossibile creare la replica. " msgid "Failed to create scheduler manager volume flow" msgstr "Impossibile creare il flusso del volume del gestore scheduler" #, python-format msgid "Failed to create snapshot %s" msgstr "Impossibile creare istantanea %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "Impossibile creare l'istantanea poiché non è specificato alcun ID LUN" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Impossibile creare l'istantanea per cg: %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Impossibile creare l'istantanea per il volume %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "impossibile creare la politica di istantanea sul volume %(vol)s: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "Impossibile creare l'area risorsa istantanea sul volume %(vol)s:%(res)s. " msgid "Failed to create snapshot." msgstr "Impossibile creare l'istantanea." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Impossibile creare l'istantanea. Non sono state trovate le informazioni sul " "volume CloudByte per il volume OpenStack [%s]." #, python-format msgid "Failed to create south bound connector for %s." msgstr "Impossibile creare il connettore south bound per %s." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "Impossibile creare il gruppo di archiviazione %(storageGroupName)s." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Creazione del thin pool non riuscita, messaggio di errore: %s" #, python-format msgid "Failed to create volume %s" msgstr "Impossibile creare il volume %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "Impossibile eliminare SI per volume_id: %(volume_id)s perché ha coppia. " #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Impossibile eliminare un'unità logica. (LDEV: %(ldev)s, motivo: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "" "Impossibile eliminare l'istantanea del gruppo di coerenza %(id)s a causa di " "%(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "" "Impossibile eliminare il gruppo di coerenza %(id)s a causa di %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Impossibile eliminare il gruppo di coerenza: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Impossibile eliminare il gruppo di coerenza: %(consistencyGroupName)s Codice " "di ritorno: %(rc)lu. Errore: %(error)s." msgid "Failed to delete device." msgstr "Impossibile eliminare il dispositivo." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Impossibile eliminare il fileset per il gruppo di coerenza %(cgname)s. " "Errore: %(excmsg)s." msgid "Failed to delete iqn." msgstr "Impossibile eliminare l'iqn. " msgid "Failed to delete map." msgstr "Impossibile eliminare la mappa. " msgid "Failed to delete partition." msgstr "Impossibile eliminare la partizione. " msgid "Failed to delete replica." msgstr "Impossibile eliminare la replica. " #, python-format msgid "Failed to delete snapshot %s" msgstr "Impossibile eliminare istantanea %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "Impossibile eliminare l'istantanea per cg: %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "Impossibile eliminare l'istantanea per snapshot_id: %s perché ha coppia. " msgid "Failed to delete snapshot." msgstr "Impossibile eliminare l'istantanea." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Impossibile eliminare il volume %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Impossibile eliminare il volume per volume_id: %(volume_id)s perché ha " "coppia. " #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Impossibile scollegare la destinazione iSCSI del volume %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Impossibile determinare la configurazione API blockbridge" msgid "Failed to disassociate qos specs." msgstr "Impossibile annullare l'associazione delle specifiche (specs) qos." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "Impossibile annullare l'associazione di qos_specs: %(specs_id)s con il tipo " "%(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Impossibile garantire l'area risorsa istantanea, impossibile individuare il " "volume per l'ID %s " msgid "Failed to establish SSC connection." msgstr "Impossibile stabilire la connessione SSC. " msgid "Failed to establish connection with Coho cluster" msgstr "Impossibile stabilire la connessione con il cluster Coho" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Impossibile eseguire API CloudByte [%(cmd)s]. Stato Http:%(status)s, Errore: " "%(error)s." msgid "Failed to execute common command." msgstr "Impossibile eseguire il comando comune. " #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Impossibile eseguire l'esportazione per il volume: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "" "Impossibile estendere il volume %(name)s, Messaggio di errore: %(msg)s. " msgid "Failed to find QoSnode" msgstr "Impossibile trovare QoSnode" msgid "Failed to find Storage Center" msgstr "Storage Center non trovato" msgid "Failed to find a vdisk copy in the expected pool." msgstr "Impossibile trovare una copia del disco virtuale nel pool previsto." msgid "Failed to find account for volume." msgstr "Impossibile trovare l'account per il volume. " #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "Impossibile rilevare il fileset per il percorso %(path)s, output del " "comando: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "Impossibile trovare l'istantanea del gruppo denominata: %s" #, python-format msgid "Failed to find host %s." msgstr "Impossibile trovare l'host %s." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Impossibile trovare il gruppo di iniziatori iSCSI contenente %(initiator)s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "" "Impossibile trovare il pool di archiviazione per il volume di origine %s." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Non è stato possibile ottenere i dettagli dell'account CloudByte [%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Impossibile ottenere i dettagli della destinazione LUN per LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "Impossibile ottenere i dettagli della destinazione LUN per la LUN %s" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Impossibile acquisire l'elenco di destinazione LUN per LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Impossibile ottenere l'ID partizione per il volume %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "Impossibile ottenere l'ID istantanea raid dall'istantanea %(snapshot_id)s. " #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "Impossibile ottenere l'ID istantanea raid dall'istantanea %(snapshot_id)s. " msgid "Failed to get SplitMirror." msgstr "Impossibile ottenere SplitMirror." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Impossibile ottenere la risorsa di memoria. Il sistema tenterà di acquisire " "di nuovo la risorsa di memoria. (risorsa: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "" "Impossibile ottenere tutte le associazioni delle specifiche (specs) qos %s" msgid "Failed to get channel info." msgstr "Impossibile ottenere le informazioni canale. " #, python-format msgid "Failed to get code level (%s)." msgstr "Impossibile acquisire il livello di codice (%s)." msgid "Failed to get device info." msgstr "Impossibile ottenere le informazioni dispositivo. " #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "Impossibile ottenere il dominio poiché CPG (%s) non esiste nell'array." msgid "Failed to get image snapshots." msgstr "Impossibile ottenere le istantanee dell'immagine." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "Impossibile richiamare l'ip sul canale %(channel_id)s con il volume: " "%(volume_id)s." msgid "Failed to get iqn info." msgstr "Impossibile ottenere le informazioni iqn. " msgid "Failed to get license info." msgstr "Impossibile ottenere le informazioni licenza. " msgid "Failed to get lv info." msgstr "Impossibile ottenere le informazioni lv. " msgid "Failed to get map info." msgstr "Impossibile ottenere le informazioni mappa. " msgid "Failed to get migration task." msgstr "Impossibile ottenere l'attività di migrazione." msgid "Failed to get model update from clone" msgstr "Impossibile ottenere l'aggiornamento del modello dal clone" msgid "Failed to get name server info." msgstr "Impossibile ottenere le informazioni sul server nomi. " msgid "Failed to get network info." msgstr "Impossibile ottenere le informazioni rete. " #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Impossibile ottenere l'id nuova parte nel nuovo pool: %(pool_id)s. " msgid "Failed to get partition info." msgstr "Impossibile ottenere le informazioni partizione. " #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Impossibile ottenere l'id pool con il volume %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "Impossibile ottenere le informazioni di copia remota per %(volume)s a causa " "di %(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "Impossibile ottenere le informazioni di copia remota per %(volume)s. " "Eccezione: %(err)s." msgid "Failed to get replica info." msgstr "Impossibile ottenere le informazioni replica. " msgid "Failed to get show fcns database info." msgstr "Impossibile visualizzare le informazioni sul database fcns. " msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "Impossibile ottenere la dimensione del volume esistente: %(vol). Gestione " "volume non riuscita." #, python-format msgid "Failed to get size of volume %s" msgstr "Impossibile ottenere la dimensione del volume %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Impossibile ottenere l'istantanea per il volume %s." msgid "Failed to get snapshot info." msgstr "Impossibile ottenere le informazioni istantanea. " #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Impossibile ottenere IQN di destinazione per LUN %s" msgid "Failed to get target LUN of SplitMirror." msgstr "Impossibile ottenere la LUN di destinazione di SplitMirror." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Impossibile ottenere il portale di destinazione per LUN %s" msgid "Failed to get targets" msgstr "Impossibile ottenere le destinazioni" msgid "Failed to get wwn info." msgstr "Impossibile ottenere le informazioni wwn. " #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Impossibile ottenere, creare o aggiungere il volume %(volumeName)s alla " "vista di mascheramento %(maskingViewName)s. Il messaggio di errore ricevuto " "è %(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Impossibile identificare il backend del volume." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "Impossibile collegare il fileset per la condivisione %(cgname)s. Errore: " "%(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Impossibile accedere a %s Array (accesso non valido?). " #, python-format msgid "Failed to login for user %s." msgstr "Impossibile eseguire l'accesso per l'utente %s. " msgid "Failed to login with all rest URLs." msgstr "Impossibile accedere con tutti gli URL rest. " #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Impossibile effettuare la richiesta all'endpoint del cluster Datera a causa " "del seguente motivo: %s" msgid "Failed to manage api volume flow." msgstr "Impossibile gestire il flusso del volume api." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Impossibile gestire %(type)s %(name)s esistente, poiché la dimensione " "%(size)s riportata non è un numero a virgola mobile." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Impossibile gestire il volume esistente %(name)s, a causa di un errore " "durante l'acquisizione della dimensione del volume." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Impossibile gestire il volume esistente %(name)s, perché l'operazione di " "ridenominazione ha avuto esito negativo: messaggio di errore: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Impossibile gestire il volume esistente %(name)s, poiché la dimensione " "%(size)s riportata non è un numero a virgola mobile." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Impossibile gestire il volume esistente poiché il pool del tipo di volume " "scelto non corrisponde alla condivisione NFS trasmessa nel riferimento " "volume. " msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Impossibile gestire il volume esistente poiché il pool del tipo di volume " "scelto non corrisponde al file system trasmesso nel riferimento volume. " msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Impossibile gestire il volume esistente poiché il pool del tipo di volume " "scelto non corrisponde al pool dell'host. " #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "Impossibile gestire il volume esistente a causa di una mancata " "corrispondenza del gruppo I/O. Il gruppo I/O del volume da gestire è " "%(vdisk_iogrp)s. Il gruppo I/O del tipo scelto è %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "Impossibile gestire il volume esistente in quanto il pool del volume da " "gestire non corrisponde al pool del backend. Il pool del volume da gestire è " "%(vdisk_pool)s. Il pool del backend è %(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "Impossibile gestire il volume esistente in quanto il volume da gestire è " "compress, ma il tipo di volume scelto non è compress." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "Impossibile gestire il volume esistente in quanto il volume da gestire non è " "compress, ma il tipo di volume scelto è compress." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "Impossibile gestire il volume esistente in quanto il volume da gestire non " "si trova in un gruppo I/O valido." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "Impossibile gestire il volume esistente in quanto il volume da gestire è " "thick ma il tipo di volume scelto è thin." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "Impossibile gestire il volume esistente in quanto il volume da gestire è " "thin, ma il tipo di volume scelto è thick." #, python-format msgid "Failed to manage volume %s." msgstr "Impossibile gestire il volume %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Impossibile associare un'unità logica. (LDEV: %(ldev)s, LUN: %(lun)s, porta: " "%(port)s, id: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Impossibile migrare il volume per la prima volta." msgid "Failed to migrate volume for the second time." msgstr "Impossibile migrare il volume per la seconda volta." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "Impossibile spostare l'associazione LUN. Codice di ritorno: %s " #, python-format msgid "Failed to move volume %s." msgstr "Impossibile spostare il volume %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Impossibile aprire un file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Impossibile analizzare l'output CLI:\n" " comando: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Impossibile analizzare l'opzione di configurazione 'keystone_catalog_info', " "deve avere il formato ::" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Impossibile analizzare l'opzione di configurazione 'swift_catalog_info', " "deve avere il formato ::" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Impossibile eseguire una correzione a pagina zero. (LDEV: %(ldev)s, motivo: " "%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" "Impossibile rimuovere l'esportazione per il volume %(volume)s: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" "Impossibile rimuovere la destinazione iscsi per il volume %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Impossibile rimuovere il volume %(volumeName)s dal gruppo di coerenza " "%(cgName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "Impossibile rimuovere il volume %(volumeName)s dal GM predefinito. " #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "Impossibile rimuovere il volume %(volumeName)s da SG predefinito: " "%(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "Impossibile rimuovere: %(volumename)s. dal gruppo storage predefinito per la " "politica FAST %(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Impossibile ridenominare il volume logico %(name)s; il messaggio di errore " "è: %(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Impossibile ripristinare la configurazione di zona attiva %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "Impossibile impostare l'autenticazione CHAP per la destinazione IQN %(iqn)s. " "Dettagli: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Impossibile impostare QoS per il volume esistente %(name)s, Messaggio di " "errore: %(msg)s. " msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" "Impossibile impostare l'attributo 'Utente in entrata' per la destinazione " "SCST." msgid "Failed to set partition." msgstr "Impossibile impostare la partizione. " #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Impossibile impostare le autorizzazioni per il gruppo di coerenza " "%(cgname)s. Errore: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Impossibile specificare un'unità logica per il volume %(volume_id)s per cui " "annullare l'associazione." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Impossibile specificare un'unità logica da eliminare. (metodo: %(method)s, " "id: %(id)s)" msgid "Failed to terminate migrate session." msgstr "Impossibile terminare la sessione di migrazione." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "Impossibile scollegare il volume %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Impossibile scollegare la fileset per il gruppo di coerenza %(cgname)s. " "Errore: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Impossibile annullare l'associazione di un'unità logica. (LDEV: %(ldev)s, " "motivo: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Impossibile aggiornare il gruppo di coerenza: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Impossibile aggiornare i metadati per il volume: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Impossibile aggiornare o eliminare la configurazione di zona" msgid "Failed to update or delete zoning configuration." msgstr "Impossibile aggiornare o eliminare la configurazione di zona." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Impossibile aggiornare qos_specs: %(specs_id)s con le specifiche " "%(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "" "Impossibile aggiornare l'utilizzo della quota durante la riscrittura del " "volume." msgid "Failed to update snapshot." msgstr "Impossibile aggiornare l'istantanea." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "Impossibile aggiornare il modello con il modello driver fornito %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Impossibile aggiornare i metadati del volume %(vol_id)s utilizzando i " "metadati %(src_type)s %(src_id)s forniti" #, python-format msgid "Failure creating volume %s." msgstr "Errore nella creazione del volume %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Errore durante l'acquisizione delle informazioni sulla LUN per %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Errore in update_volume_key_value_pair:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Errore durante lo spostamento della nuova LUN clonata in %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Errore di trasferimento della LUN %s in tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Errore irreversibile: l'utente non è autorizzato ad eseguire la query dei " "volumi NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "Fexvisor non è riuscito ad aggiungere il volume %(id)s a causa di %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor non è riuscito ad unirsi al volume %(vol)s nel gruppo %(group)s a " "causa di %(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor non è riuscito a rimuovere il volume %(vol)s nel gruppo %(group)s a " "causa di %(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "Fexvisor non è riuscito a rimuovere il volume %(id)s a causa di %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Errore di controllo SAN di Fibre Channel: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Operazione Fibre Channel Zone non riuscita: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Errore di controllo connessione di Fibre Channel: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "Impossibile trovare il file %(file_path)s." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Il file %(path)s ha un file di backup %(bfile)s non valido, operazione " "interrotta." #, python-format msgid "File already exists at %s." msgstr "Il file già esiste in %s." #, python-format msgid "File already exists at: %s" msgstr "Il file già esiste in: %s" msgid "Find host in hostgroup error." msgstr "Errore di rilevamento host nel gruppo host." msgid "Find host lun id error." msgstr "Errore di rilevamento id lun host." msgid "Find lun group from mapping view error." msgstr "Errore di rilevamento gruppo lun dalla vista associazione." msgid "Find lun number error." msgstr "Errore di rilevamento numero lun." msgid "Find mapping view error." msgstr "Errore di rilevamento vista associazione." msgid "Find portgroup error." msgstr "Errore di rilevamento gruppo porte." msgid "Find portgroup from mapping view error." msgstr "Errore di rilevamento gruppo porte dalla vista associazione." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "La politica della cache flash richiede WSAPI versione '%(fcache_version)s', " "è installata la versione '%(version)s'." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor assegnazione volume non riuscita: %(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor errore di assegnazione volume:%(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor non è riuscito a trovare l'istantanea del volume %(id)s nel gruppo " "%(vgid)s istantanea %(vgsid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor non è riuscito a creare il volume.:%(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor non è riuscito ad eliminare il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor non è riuscito ad aggiungere il volume %(id)s al gruppo %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor non è riuscito ad assegnare il volume %(id)s perché non riesce ad " "eseguire la query dello stato utilizzando l'id evento." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor non è riuscito ad assegnare il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "" "Flexvisor non è riuscito ad assegnare il volume %(volume)s iqn %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor non è riuscito a clonare il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito a clonare il volume (evento get non riuscito) " "%(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor non è riuscito a creare l'istantanea per il volume %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito a creare l'istantanea per il (evento get non " "riuscito) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Flexvisor non è riuscito a creare il volume %(id)s nel gruppo %(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor non è riuscito a creare il volume %(volume)s: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor non è riuscito a creare il volume (evento get) %s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor non è riuscito a creare il volume da un'istantanea %(id)s: " "%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor non è riuscito a creare il volume da un'istantanea %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito a creare il volume da un'istantanea (evento get non " "riuscito) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor non è riuscito ad eliminare l'istantanea %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito ad eliminare l'istantanea (evento get non " "riuscito) %(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor non è riuscito ad eliminare il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor non è riuscito ad estendere il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor non è riuscito ad estendere il volume %(id)s: %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito ad estendere il volume (evento get non riuscito) " "%(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" "Flexvisor non è riuscito ad acquisire le informazioni sul pool %(id)s: " "%(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor non è riuscito ad ottenere l'ID istantanea del volume %(id)s dal " "gruppo %(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor non è riuscito a rimuovere il volume %(id)s dal gruppo %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor non è riuscito a generare il volume da un'istantanea %(id)s:" "%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor non è riuscito a generare il volume da un'istantanea (evento get " "non riuscito) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" "Flexvisor non è riuscito ad annullare l'assegnazione del volume %(id)s: " "%(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor non è riuscito ad annullare l'assegnazione del volume (evento get) " "%(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" "Flexvisor non è riuscito ad annullare l'assegnazione del volume: %(id)s: " "%(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor è riuscito a trovare l'origine del volume %(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor errore annullando l'assegnazione del volume:%(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "" "Il volume Flexvisor %(id)s non è riuscito ad unirsi al gruppo %(vgid)s." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "La cartella %s non esiste nell'applicazione Nexenta Store" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS non in esecuzione, stato: %s." msgid "Gateway VIP is not set" msgstr "VIP Gateway non impostato " msgid "Get FC ports by port group error." msgstr "Errore di richiamo porte FC per gruppo di porte." msgid "Get FC ports from array error." msgstr "Errore di richiamo porte FC dall'array. " msgid "Get FC target wwpn error." msgstr "Errore di richiamo wwpn di destinazione FC." msgid "Get HyperMetroPair error." msgstr "Errore di richiamo HyperMetroPair." msgid "Get LUN group by view error." msgstr "Errore di richiamo gruppo LUN per vista." msgid "Get LUNcopy information error." msgstr "Errore di richiamo delle informazioni LUNcopy. " msgid "Get QoS id by lun id error." msgstr "Errore di richiamo id QoS tramite id lun. " msgid "Get QoS information error." msgstr "Errore di richiamo delle informazioni QoS. " msgid "Get QoS policy error." msgstr "Errore di richiamo della politica QoS." msgid "Get SplitMirror error." msgstr "Errore di richiamo SplitMirror." msgid "Get array info error." msgstr "Errore di richiamo informazioni sull'array." msgid "Get cache by name error." msgstr "Errore di richiamo cache per nome. " msgid "Get connected free FC wwn error." msgstr "Errore di acquisizione wwn FC libero connesso. " msgid "Get engines error." msgstr "Errore di richiamo motori. " msgid "Get host initiators info failed." msgstr "Richiamo info iniziatori host non riuscito. " msgid "Get hostgroup information error." msgstr "Errore di richiamo delle informazioni gruppo host. " msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Errore di richiama info porta iSCSI, controllare l'IP di destinazione " "configurato nelfile conf huawei." msgid "Get iSCSI port information error." msgstr "Errore di richiamo informazioni porta iscsi." msgid "Get iSCSI target port error." msgstr "Errore di richiamo porta di destinazione iscsi." msgid "Get lun id by name error." msgstr "Errore di richiamo dell'ID lun per nome. " msgid "Get lun migration task error." msgstr "Errore di richiamo attività di migrazione lun. " msgid "Get lungroup id by lun id error." msgstr "Errore di richiamo id gruppo lun tramite id lun. " msgid "Get lungroup information error." msgstr "Errore di richiamo delle informazioni gruppo lun. " msgid "Get migration task error." msgstr "Errore di richiamo attività di migrazione. " msgid "Get pair failed." msgstr "Richiamo coppia non riuscito." msgid "Get partition by name error." msgstr "Errore di richiamo partizione per nome. " msgid "Get partition by partition id error." msgstr "Errore di richiamo partizione per id partizione. " msgid "Get port group by view error." msgstr "Errore di richiamo gruppo di porte per vista." msgid "Get port group error." msgstr "Errore di richiamo gruppo di porte." msgid "Get port groups by port error." msgstr "Errore di richiamo gruppi di porte per porta." msgid "Get ports by port group error." msgstr "Errore di richiamo porte per gruppo di porte." msgid "Get remote device info failed." msgstr "Richiamo informazioni dispositivo remoto non riuscito." msgid "Get remote devices error." msgstr "Errore di richiamo dispositivi remoti. " msgid "Get smartcache by cache id error." msgstr "Errore di richiamo smartcache per id cache. " msgid "Get snapshot error." msgstr "Errore di richiamo istantanea." msgid "Get snapshot id error." msgstr "Errore di richiamo id istantanea." msgid "Get target IP error." msgstr "Errore di richiamo IP di destinazione. " msgid "Get target LUN of SplitMirror error." msgstr "Errore di richiamo LUN di destinazione di SplitMirror." msgid "Get views by port group error." msgstr "Errore di richiamo viste per gruppo di porte." msgid "Get volume by name error." msgstr "Errore di richiamo volume per nome. " msgid "Get volume error." msgstr "Errore di richiamo volume. " #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Impossibile aggiornare i metadati Glance, la chiave %(key)s esiste per l'id " "volume %(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "Impossibile trovare i metadati di Glance per il volume/istantanea %(id)s." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Il file di configurazione Gluster in %(config)s non esiste" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Errore api di Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Errore di connessione di Google Cloud Storage: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Errore oauth2 di Google Cloud Storage: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "" "Ricevute informazioni relative al percorso non corrette da DRBDmanage! (%s)" msgid "HBSD error occurs." msgstr "Si è verificato un errore HBSD." msgid "HNAS has disconnected SSC" msgstr "HNAS ha disconnesso SSC" msgid "HPELeftHand url not found" msgstr "URL HPELeftHand non trovato" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "La verifica del certificato HTTPS è stata richiesta ma non può essere " "abilitata con la versione del modulo purestorage %(version)s. Eseguire " "l'aggiornamento a una versione più recente per abilitare questa funzione." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "La dimensione del blocco hash è stata modificata dall'ultimo backup. Nuova " "dimensione del blocco hash: %(new)s. Dimensione del blocco hash precedente: " "%(old)s. Eseguire un backup completo." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Non sono stati creati livelli %(tier_levels)s. " #, python-format msgid "Hint \"%s\" not supported." msgstr "Suggerimento \"%s\" non supportato." msgid "Host" msgstr "Host" #, python-format msgid "Host %(host)s could not be found." msgstr "Impossibile trovare l'host %(host)s." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "L'host %(host)s non corrisponde al contenuto del certificato x509: " "CommonName %(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "L'host %s non ha iniziatori FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "L'host %s non ha un iniziatore iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "Impossibile trovare l'host '%s'." #, python-format msgid "Host group with name %s not found" msgstr "Gruppo host con nome %s non trovato" #, python-format msgid "Host group with ref %s not found" msgstr "Gruppo host con riferimento %s non trovato" msgid "Host is NOT Frozen." msgstr "L'host NON è bloccato." msgid "Host is already Frozen." msgstr "L'host è già bloccato." msgid "Host not found" msgstr "Host non trovato" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Host non trovato. Impossibile rimuovere %(service)s su %(host)s." #, python-format msgid "Host replication_status must be %s to failover." msgstr "L'host replication_status deve essere %s per eseguire il failover." #, python-format msgid "Host type %s not supported." msgstr "Tipo host %s non supportato." #, python-format msgid "Host with ports %(ports)s not found." msgstr "L'host con porte %(ports)s non trovato." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "" "Hypermetro e Replica non possono essere utilizzati nello stesso tipo di " "volume." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "Il gruppo I/O %(iogrp)d non è valido; i gruppi I/O disponibili sono " "%(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Indirizzo IP o nome host dell'API Blockbridge. " msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Se la compressione è impostata su True, anche rsize deve essere impostato " "(non uguale a -1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Se nofmtdisk è impostato su True, anche rsize deve essere impostato su -1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Specificato valore non valido '%(prot)s' per " "flashsystem_connection_protocol: i valori validi sono %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Valore non consentito specificato per IOTYPE: 0, 1 o 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Valore non consentito specificato per smarttier: impostare su 0, 1, 2, o 3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Valore non consentito specificato per storwize_svc_vol_grainsize: impostare " "su 32, 64, 128 o 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Valore non consentito specificato per thin: Impossibile impostare thin e " "thickcontemporaneamente." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Impossibile trovare l'immagine %(image_id)s." #, python-format msgid "Image %(image_id)s is not active." msgstr "L'immagine %(image_id)s non è attiva." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "L'immagine %(image_id)s non è accettabile: %(reason)s" msgid "Image location not present." msgstr "Ubicazione di immagine non presente." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "La dimensione virtuale immagine è %(image_size)d GB e non rientra in un " "volume di dimensione %(volume_size)dGB." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Errore ImageBusy generato durante l'eliminazione del volume rbd. Ciò può " "essere causato da una connessione da un client che si è interrotta e, in " "questo caso, può essere risolto provando a ripetere l'eliminazione dopo 30 " "secondi." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Importazione del record non riuscita; non è stato possibile trovare il " "servizio di backup per eseguire l'importazione. Richiesta del servizio " "%(service)s" msgid "Incorrect request body format" msgstr "Il formato del corpo della richiesta non è corretto" msgid "Incorrect request body format." msgstr "Il formato della struttura della richiesta non è corretto." msgid "Incremental backups exist for this backup." msgstr "Per questo backup esistono backup incrementali." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Eccezione CLI Infortrend: %(err)s Param: %(param)s (Codice di ritorno: " "%(rc)s) (Output: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Livello iniziale: {}, politica: {} non valido." msgid "Input type {} is not supported." msgstr "Tipo di input {} non supportato." msgid "Input volumes or snapshots are invalid." msgstr "Istantanee o volumi di input non validi." msgid "Input volumes or source volumes are invalid." msgstr "Volumi di input o di origine non validi." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "Impossibile trovare l'istanza %(uuid)s." msgid "Insufficient free space available to extend volume." msgstr "Spazio libero disponibile insufficiente per estendere il volume." msgid "Insufficient privileges" msgstr "Privilegi insufficienti" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" "Valore di intervallo (in secondi) tra i tentativi di connessione al cluster " "ceph. " #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "Porte %(protocol)s %(port)s non valide specificate per io_port_list." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Dominio 3PAR non valido: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Valore ALUA non valido. Il valore ALUA deve essere 1 o 0. " msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" "Gli argomenti (args) Ceph forniti per l'operazione di backup rbd non sono " "validi" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "CgSnapshot non valido: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "ConsistencyGroup non valido: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "ConsistencyGroup non valido: Lo stato del gruppo di coerenza deve essere " "available o error ma lo stato corrente è: in-use" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "ConsistencyGroup non valido: lo stato del gruppo di coerenza deve essere " "available, ma lo stato corrente è: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "ConsistencyGroup non valido: nessun host per la creazione del gruppo di " "coerenza" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Trovata versione API HPEeftHand non valida: %(found)s. È richiesta la " "versione %(minimum)s o superiore per gestire/annullare la gestione del " "supporto." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Formato indirizzo IP non valido: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Specifica QoS non valida rilevata durante il richiamo della politica QoS per " "il volume %s " #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Destinazione di replica non valida: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Tipo di autenticazione VNX non valido: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Specifica di condivisione Virtuozzo Storage non valida: %r. Deve essere: " "[MDS1[,MDS2],...:/][:PASSWORD]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "Versione XtremIO non valida %(cur)s, è richiesta la versione %(min)s o " "successiva" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "Quote allocate non valide definite per le seguenti quote del progetto: %s" msgid "Invalid argument" msgstr "Argomento non valido" msgid "Invalid argument - negative seek offset." msgstr "Argomento non valido - offset di ricerca negativo." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Argomento non valido - whence=%s non supportato" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Argomento non valido - whence=%s non supportato." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" "Modalità di collegamento non valida '%(mode)s' per il volume %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Chiave di autenticazione non valida: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Backup non valido: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "URL API Barbican non valido: è richiesta la versione, ad esempio 'http[s]://" "|[:port]/', l'url specificato è: %s" msgid "Invalid cgsnapshot" msgstr "cgsnapshot non valido" msgid "Invalid chap user details found in CloudByte storage." msgstr "Trovati dettagli utente chap non validi nella memoria CloudByte." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "" "Risposta di inizializzazione di connessione del volume non valida %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Risposta di inizializzazione di connessione del volume non valida %(name)s: " "%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo di contenuto non valido%(content_type)s." msgid "Invalid credentials" msgstr "Credenziali non valide" #, python-format msgid "Invalid directory: %s" msgstr "Directory non valida: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Tipo adattatore disco non valido: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Backup del disco non valido: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Tipo disco non valido: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Tipo disco non valido: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Host non valido: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Trovata versione di hpe3parclient non valida (%(found)s). È richiesta la " "versione %(minimum)s o versioni successive. Eseguire \"pip install --" "upgrade python-3parclient\" per aggiornare hpe3parclient." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Trovata versione di hpelefthandclient non valida (%(found)s). È richiesta la " "versione %(minimum)s o versioni successive. Eseguire \"pip install --" "upgrade python-lefthandclient\" per aggiornare hpelefthandclient." #, python-format msgid "Invalid image href %(image_href)s." msgstr "href immagine %(image_href)s non valido." msgid "Invalid image identifier or unable to access requested image." msgstr "" "Identificativo dell'immagine non valido oppure non è possibile accedere " "all'immagine richiesta." msgid "Invalid imageRef provided." msgstr "imageRef specificato non è valido." msgid "Invalid initiator value received" msgstr "Ricevuto valore di iniziatore non valido" msgid "Invalid input" msgstr "Input non valido" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Input ricevuto non valido: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public non valido [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "Configurato tipo di lun non valido %s." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Dimensione metadati non valida: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadati non validi: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Base del punto di montaggio non valida: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Base del punto di montaggio non valida: %s" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Nuovo nome snapCPG non valido per la riscrittura new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Numero di porta non valido %(config)s per la porta RPC Coho" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Configurato tipo di prefetch non valido '%s'. PrefetchType deve essere " "0,1,2,3." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Specifiche qos non valide: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "" "Richiesta non valida per collegare il volume a una destinazione non valida" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Richiesta non valida per collegare il volume con una modalità non valida. La " "modalità di collegamento deve essere 'rw' o 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Scadenza prenotazione non valida %(expire)s." msgid "Invalid response header from RPC server" msgstr "Intestazione di risposta non valida dal server RPC" #, python-format msgid "Invalid secondary id %s." msgstr "L'id secondario %s e' invalido. " #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "Specificato ID di backend secondario non valido. L'ID di backend valido è %s." msgid "Invalid service catalog json." msgstr "json del catalogo del servizio non è valido." msgid "Invalid sheepdog cluster status." msgstr "Stato del cluster sheepdog non valido. " #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Istantanea non valida: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Stato non valido: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "" "Richiesto un pool dell'archivio %s non valido. Nuova immissione non riuscita." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Specificato un pool dell'archivio %s non valido." msgid "Invalid storage pool is configured." msgstr "Configurato pool di archiviazione non valido." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "" "Specificata modalità di sincronizzazione non valida, la modalità consentita " "è %s." msgid "Invalid transport type." msgstr "Tipo di trasporto non valido." #, python-format msgid "Invalid update setting: '%s'" msgstr "Impostazione di aggiornamento non valida: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL non valido: deve essere nel formato 'http[s]://|[:port]/" "', l'url specificato è: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Valore non valido '%s' per force." #, python-format msgid "Invalid value '%s' for force. " msgstr "Valore non valido '%s' per force. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "Valore non valido '%s' per is_public. Valori accettati: True o False. " #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Valore non valido '%s' per skip_validation. " #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Valore non valido per 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Valore non valido per 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Valore non valido per 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Valore non valido per 'scheduler_max_attempts', deve essere >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "" "Valore non valido per l'opzione di configurazione NetApp netapp_host_type." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "" "Valore non valido per l'opzione di configurazione NetApp netapp_lun_ostype." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Valore non valido per l'età, %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Valore non valido: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "Dimensione del volume fornita non valida per la richiesta di creazione: %s " "(l'argomento size deve essere un numero intero (o la rappresentazione " "stringa di un numero intero) e maggiore di zero)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Tipo di volume non valido: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume non valido: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Volume non valido: impossibile aggiungere il volume %(volume_id)s al gruppo " "di coerenza %(group_id)s perché il volume si trova in uno stato non valido: " "%(status)s. Gli stati validi sono: ('available', 'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Volume non valido: impossibile aggiungere il volume %(volume_id)s al gruppo " "di coerenza %(group_id)s perché il tipo di volume %(volume_type)s non è " "supportato dal gruppo." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Volume non valido: impossibile aggiungere fake-volume-uuid del volume dal " "gruppo di coerenza %(group_id)s perché non è possibile trovare il volume." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Volume non valido: impossibile rimuovere fake-volume-uuid del volume dal " "gruppo di coerenza %(group_id)s perché non è presente nel gruppo." #, python-format msgid "Invalid volume_type passed: %s." msgstr "Passato volume_type non valido: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "volume_type fornito non valido: %s (il tipo richiesto non è compatibile; far " "corrisponde col volume di origine oppure omettere l'argomento tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "volume_type fornito non valido: %s (il tipo richiesto non è compatibile;si " "consiglia di omettere l'argomento tipo)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "volume_type fornito non valido: %s (il tipo richiesto deve essere supportato " "da questo gruppo di coerenza)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Formato wwpn non valido %(wwpns)s" msgid "Invoking web service failed." msgstr "Richiamo del servizio Web non riuscito." msgid "Issue encountered waiting for job." msgstr "Si è verificato un problema in attesa del job." msgid "Issue encountered waiting for synchronization." msgstr "Si è verificato un problema in attesa della sincronizzazione." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Esecuzione del failover non riuscita perché la replica non è configurata " "correttamente." msgid "Item not found" msgstr "Elemento non trovato" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "ID lavoro non trovato nella risposta alla creazione volume di CloudByte [%s]." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "ID lavoro non trovato nella risposta all'eliminazione volume di CloudByte " "[%s]." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "I nomi delle chiavi possono contenere solo caratteri alfanumerici, di " "sottolineatura, punti, due punti e trattini." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "Keystone versione 3 o successiva deve essere utilizzato per ottenere il " "supporto delle quote nidificate." #, python-format msgid "LU does not exist for volume: %s" msgstr "LUN non esiste per il volume: %s" msgid "LUN export failed!" msgstr "Esportazione LUN non riuscita. " msgid "LUN id({}) is not valid." msgstr "ID LUN ({}) non valido." msgid "LUN map overflow on every channel." msgstr "Eccedenza mappa LUN su ogni canale. " #, python-format msgid "LUN not found with given ref %s." msgstr "LUN non trovata con il riferimento fornito %s." msgid "LUN number ({}) is not an integer." msgstr "Il numero LUN ({}) non è un numero intero." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "Il numero LUN è fuori dai limiti sul canale id: %(ch_id)s. " #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "La LUN con riferimento fornito %(ref)s non soddisfa il tipo di volume. " "Verificare che il volume LUN con funzioni ssc sia presente su vserver %(vs)s." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Ultime %s voci del syslog cinder:-" msgid "LeftHand cluster not found" msgstr "Cluster LeftHand non trovato" msgid "License is unavailable." msgstr "La licenza non è disponibile." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Riga %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Il percorso di collegamento già esiste e non è un symlink" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Clone collegato del volume di origine non supportato nello stato: %s." msgid "Lock acquisition failed." msgstr "Acquisizione blocco non riuscita." msgid "Logout session error." msgstr "Errore della sessione di logout." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Servizio di ricerca non configurato. L'opzione di configurazione per " "fc_san_lookup_service deve specificare un'implementazione concreta del " "servizio di ricerca." msgid "Lun migration error." msgstr "Errore di migrazione Lun." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "MD5 dell'oggetto: %(object_name)s prima: %(md5)s e dopo: %(etag)s non è lo " "stesso." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Stringa di output fcns non corretta: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Corpo del messaggio non valido: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Stringa nameserver non formata correttamente: %s" msgid "Malformed request body" msgstr "Corpo richiesta non corretto" msgid "Malformed request body." msgstr "Formato del corpo della richiesta non corretto." msgid "Malformed request url" msgstr "url richiesta non corretto" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Risposta non valida per il comando %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Attributo scheduler_hints non corretto" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Stringa comando show fcns database non corretta: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Configurazione di zona non corretta: (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Stato della zona non corretto: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Acquisizione dimensione gestione esistente richiede 'id'." msgid "Manage existing snapshot not implemented." msgstr "Gestione dell'istantanea esistente non implementata." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "La gestione del volume esistente non è riuscita a causa del riferimento di " "backend non valido %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "La gestione del volume esistente non è riuscita a causa della mancata " "corrispondenza del tipo di volume: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Gestione del volume esistente non implementato." msgid "Manage existing volume requires 'source-id'." msgstr "La gestione del volume esistente richiede 'source-id'. " #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "Gestione del volume non supportata se è abilitato FAST. Politica FAST: " "%(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "La gestione di istantanee su volumi sottoposti a failover non è consentita." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "La preparazione dell'associazione %(id)s non è stata completata entro il " "timeout di secondi %(to)d assegnati. Interruzione in corso." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "Vista di mascheramento %(maskingViewName)s non eliminata correttamente" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Numero massimo di backup consentiti (%(allowed)d) superato" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "Numero massimo di istantanee consentite (%(allowed)d) superato" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Numero massimo di volumi consentiti (%(allowed)d) superato per la quota " "'%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "È possibile specificare soltanto uno di %s" msgid "Metadata backup already exists for this volume" msgstr "Il backup dei metadati esiste già per questo volume" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "L'oggetto di backup di metadati '%s' esiste già" msgid "Metadata item was not found" msgstr "L'elemento metadati non è stato trovato" msgid "Metadata item was not found." msgstr "L'elemento metadati non è stato trovato." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "La chiave della proprietà dei metadati %s supera i 255 caratteri" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "" "Il valore della chiave della proprietà dei metadati %s supera i 255 caratteri" msgid "Metadata property key blank" msgstr "La chiave della proprietà dei metadati è vuota" msgid "Metadata property key blank." msgstr "Chiave della proprietà dei metadati vuota." msgid "Metadata property key greater than 255 characters." msgstr "La chiave della proprietà dei metadati contiene più di 255 caratteri." msgid "Metadata property value greater than 255 characters." msgstr "Valore della proprietà dei metadati maggiore di 255 caratteri." msgid "Metadata restore failed due to incompatible version" msgstr "" "Il ripristino dei metadati non è riuscito a causa di una versione non " "compatibile" msgid "Metadata restore failed due to incompatible version." msgstr "" "Ripristino dei metadati non riuscito a causa di una versione non compatibile." #, python-format msgid "Migrate volume %(src)s failed." msgstr "Migrazione volume %(src)s non riuscita. " #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "Migrazione volume non riuscita tra vol origine %(src)s e vol destinazione " "%(dst)s. " #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "La migrazione di LUN %s è stata arrestata o è in errore." msgid "MirrorView/S enabler is not installed." msgstr "L'abilitatore MirrorView/S non è installato." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Modulo python 'purestorage' non presente, accertarsi che la libreria sia " "installata e disponibile." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" "Manca parametro di configurazione di Fibre Channel SAN - fc_fabric_names" msgid "Missing request body" msgstr "Manca il corpo della richiesta" msgid "Missing request body." msgstr "Corpo della richiesta mancante." #, python-format msgid "Missing required element '%s' in request body" msgstr "Manca l'elemento '%s' richiesto nel corpo della richiesta" #, python-format msgid "Missing required element '%s' in request body." msgstr "Manca l'elemento '%s' richiesto nel corpo della richiesta. " msgid "Missing required element 'consistencygroup' in request body." msgstr "" "Manca l'elemento 'consistencygroup' richiesto nel corpo della richiesta. " msgid "Missing required element 'host' in request body." msgstr "Manca l'elemento 'host' richiesto nel corpo della richiesta." msgid "Missing required element quota_class_set in request body." msgstr "" "Elemento quota_class_set obbligatorio mancante nel corpo della richiesta." msgid "Missing required element snapshot in request body." msgstr "Elemento istantanea obbligatorio mancante nel corpo della richiesta." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Trovati più SerialNumber mentre ne era previsto uno solo per questa " "operazione. Modificare il file di configurazione EMC. " #, python-format msgid "Multiple copies of volume %s found." msgstr "trovate più copie del volume %s." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Sono state rilevate più corrispondenze per '%s', utilizzare un ID per essere " "più precisi." msgid "Multiple profiles found." msgstr "Trovati più profili. " msgid "Must implement a fallback schedule" msgstr "È necessario implementare una pianificazione fallback" msgid "Must implement find_retype_host" msgstr "È necessario implementare find_retype_host" msgid "Must implement host_passes_filters" msgstr "È necessario implementare host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "È necessario implementare schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "È necessario implementare schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "È necessario implementare schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "È necessario passare wwpn o host a lsfabric." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Eseguire questo comando come admin cloud utilizzando un Keystone policy.json " "che consenta all'admin cloud di visualizzare e ottenere qualsiasi progetto." msgid "Must specify 'connector'" msgstr "È necessario specificare 'connector'" msgid "Must specify 'connector'." msgstr "È necessario specificare 'connector'." msgid "Must specify 'host'." msgstr "È necessario specificare 'host'." msgid "Must specify 'new_volume'" msgstr "È necessario specificare 'new_volume'" msgid "Must specify 'status'" msgstr "È necessario specificare 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "È necessario specificare 'status', 'attach_status' o 'migration_status' per " "l'aggiornamento." msgid "Must specify a valid attach status" msgstr "È necessario specificare uno stato di allegato valido" msgid "Must specify a valid migration status" msgstr "È necessario specificare uno stato di migrazione valido" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "È necessario specificare un utente tipo %(valid)s valido, il valore " "'%(persona)s' non è valido." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "È necessario specificare un tipo di provisioning %(valid)s valido, il valore " "'%(prov)s' non è valido." msgid "Must specify a valid status" msgstr "È necessario specificare uno stato valido" msgid "Must specify an ExtensionManager class" msgstr "È necessario specificare una classe ExtensionManager" msgid "Must specify bootable in request." msgstr "È necessario specificare bootable nella richiesta." msgid "Must specify protection domain name or protection domain id." msgstr "" "È necessario specificare il nome dominio di protezione o l'ID del dominio di " "protezione. " msgid "Must specify readonly in request." msgstr "È necessario specificare readonly nella richiesta." msgid "Must specify snapshot source-name or source-id." msgstr "È necessario specificare source-name o source-id dell'istantanea." msgid "Must specify source-name or source-id." msgstr "È necessario specificare source-name o source-id." msgid "Must specify storage pool name or id." msgstr "È necessario specificare il nome o l'ID del pool di memoria. " msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "È necessario specificare i pool di archiviazione. Opzione: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "È necessario fornire un valore positivo per l'età" msgid "Must supply a positive, non-zero value for age" msgstr "È necessario fornire un valore positivo, diverso da zero per l'età" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "Configurazione della NAS '%(name)s=%(value)s' non valida. Deve essere " "'auto', 'true', o 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "Il file di configurazione NFS in %(config)s non esiste" #, python-format msgid "NFS file %s not discovered." msgstr "File NFS %s non rilevato." msgid "NFS file could not be discovered." msgstr "Impossibile rilevare il file NFS." msgid "NaElement name cannot be null." msgstr "Il nome NaElement non può essere null." msgid "Name" msgstr "Nome" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Il nome, la descrizione, add_volumes e remove_volumes non possono essere " "vuoti nel corpo della richiesta." msgid "Need non-zero volume size" msgstr "Necessaria dimensione volume diversa da zero" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Né MSG_DENIED né MSG_ACCEPTED: %r" msgid "NetApp Cinder Driver exception." msgstr "Eccezione del driver Cinder di NetApp." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "La nuova dimensione per l'estensione deve essere superiore alla dimensione " "corrente. (corrente: %(size)s, esteso: %(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "La nuova dimensione deve essere maggiore della dimensione reale " "dell'archiviazione di backend. dimensione reale: %(oldsize)s, nuova " "dimensione: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "" "La nuova dimensione di volume deve essere specificata come numero intero." msgid "New volume type must be specified." msgstr "È necessario specificare il tipo del nuovo volume." msgid "New volume type not specified in request_spec." msgstr "Nuovo tipo di volume non specificato in request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "Nuovo volume_type uguale all'originale: %s." msgid "Nimble Cinder Driver exception" msgstr "Eccezione driver Nimble Cinder" msgid "No FC initiator can be added to host." msgstr "Nessun iniziatore FC può essere aggiunto all'host." msgid "No FC port connected to fabric." msgstr "Nessuna porta FC collegata a fabric." msgid "No FCP targets found" msgstr "Nessuna destinazione FCP trovata" msgid "No Port Group elements found in config file." msgstr "" "Nessun elemento del gruppo di porte trovato nel file di configurazione." msgid "No VF ID is defined in the configuration file." msgstr "Nessun ID VF definito nel file di configurazione." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Nessun portale iSCSI attivo con gli IP iSCSI forniti " #, python-format msgid "No available service named %s" msgstr "Nessun servizio disponibile denominato %s" #, python-format msgid "No backup with id %s" msgstr "Nessun backup con id %s" msgid "No backups available to do an incremental backup." msgstr "Nessun backup disponibile per eseguire un backup incrementale." msgid "No big enough free disk" msgstr "Nessun disco disponibile è abbastanza grande" #, python-format msgid "No cgsnapshot with id %s" msgstr "Nessun cgsnapshot con id %s" msgid "No cinder entries in syslog!" msgstr "Nessuna voce cinder nel syslog!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Nessuna LUN clonata chiamata %s è stata trovata nel filer" msgid "No config node found." msgstr "Nessun nodo config trovato. " #, python-format msgid "No consistency group with id %s" msgstr "Nessun gruppo di coerenza con id %s" #, python-format msgid "No element by given name %s." msgstr "Nessun elemento dal nome specificato %s." msgid "No errors in logfiles!" msgstr "Nessun errore nei file di log!" #, python-format msgid "No file found with %s as backing file." msgstr "Nessun file trovato con %s come file di backup." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Nessun ID LUN rimasto libero. Il numero massimo di volumi che possono essere " "collegati all'host (%s) è stato superato. " msgid "No free disk" msgstr "Nessun disco disponibile" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Nessun portale iscsi valido trovato nell'elenco fornito per %s." #, python-format msgid "No good iscsi portals found for %s." msgstr "Nessun portale iscsi valido trovato in %s." #, python-format msgid "No host to create consistency group %s." msgstr "Nessun host per creare il gruppo di coerenza %s." msgid "No iSCSI-enabled ports on target array." msgstr "Nessuna porta abilitata a iSCSI nell'array di destinazione." msgid "No image_name was specified in request." msgstr "Nessun image_name specificato nella richiesta." msgid "No initiator connected to fabric." msgstr "Nessun iniziatore collegato a fabric." #, python-format msgid "No initiator group found for initiator %s" msgstr "Nessun gruppo iniziatore trovato per l'iniziatore %s" msgid "No initiators found, cannot proceed" msgstr "Nessun iniziatore trovato, impossibile continuare" #, python-format msgid "No interface found on cluster for ip %s" msgstr "Nessuna interfaccia trovata nel cluster per l'ip %s" msgid "No ip address found." msgstr "Nessun indirizzo IP rilevato. " msgid "No iscsi auth groups were found in CloudByte." msgstr "Non è stato trovato alcun gruppo aut iscsi in CloudByte" msgid "No iscsi initiators were found in CloudByte." msgstr "Non è stato trovato alcun iniziatore iscsi in CloudByte" #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Nessun servizio iscsi trovato per il volume CloudByte [%s]." msgid "No iscsi services found in CloudByte storage." msgstr "Nessun servizio iscsi rilevato nell'archivio CloudByte." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "Nessun file di chiavi specificato e non è possibile caricare la chiave da " "%(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "Non è stata trovata nessuna condivisione di Gluster montata" msgid "No mounted NFS shares found" msgstr "Non è stata trovata nessuna condivisione di NFS montata" msgid "No mounted SMBFS shares found." msgstr "Non è stata trovata nessuna condivisione SMBFS montata." msgid "No mounted Virtuozzo Storage shares found" msgstr "Non è stata trovata alcuna condivisione Virtuozzo Storage montata" msgid "No mounted shares found" msgstr "Non è stata trovata nessuna condivisione montata" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "Nessun nodo trovato nel gruppo I/O %(gid)s per il volume %(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Nessun pool disponibile per i volumi di provisioning. Assicurarsi " "chel'opzione di configurazione netapp_pool_name_search_pattern sia impostata " "correttamente. " msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Nessuna risposta ricevuta dalla chiamata API utente aut iSCSI dell'elenco " "della memoriaCloudByte." msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "Nessuna risposta ricevuta dalla chiamata API tsm dell'elenco dell'archivio " "CloudByte." msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "Nessuna risposta ricevuta dalla chiamata API del file system dell'elenco " "CloudByte." msgid "No service VIP configured and no nexenta_client_address" msgstr "Nessun VIP di servizio configurato e nessun nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "Nessuna istantanea (snap) trovata con %s come file di backup." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "Nessuna immagine istantanea trovata nel gruppo di istantanee %s." #, python-format msgid "No snapshots could be found on volume %s." msgstr "Nessuna istantanea trovata sul volume %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Nessuna istantanea origine fornita per creare il gruppo di coerenza %s." #, python-format msgid "No storage path found for export path %s" msgstr "" "Nessun percorso di archiviazione trovato per il percorso di esportazione %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Non esiste alcuna specifica QoS %(specs_id)s." msgid "No suitable discovery ip found" msgstr "Non è stato trovato nessun IP di rilevamento adatto" #, python-format msgid "No support to restore backup version %s" msgstr "Nessun supporto per il ripristino della versione di backup %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Nessun id destinazione è stato trovato per il volume %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "Nessun ID LUN non utilizzato disponibile sull'host; è abilitato il " "multicollegamento cherichiede che tutti gli ID LUN siano univoci nell'intero " "gruppo di host. " #, python-format msgid "No valid host was found. %(reason)s" msgstr "Non è stato trovato alcun host valido. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Nessun host valido per il volume %(id)s con tipo %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "Nessun vdisk con l'UID specificato da ref %s." #, python-format msgid "No views found for LUN: %s" msgstr "Nessuna vista trovata per LUN: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "Nessun volume nel cluster con vserver %(vserver)s e percorso di giunzione " "%(junction)s " msgid "No volume service(s) started successfully, terminating." msgstr "Nessun servizio volume avviato con esito positivo, si termina. " msgid "No volume was found at CloudByte storage." msgstr "Nessun volume trovato nell'archivio CloudByte." msgid "No volume_type should be provided when creating test replica." msgstr "" "Non è necessario fornire alcun volume_type durante la creazione della " "replica di test." msgid "No volumes found in CloudByte storage." msgstr "Nessun volume rilevato nell'archivio CloudByte." msgid "No weighed hosts available" msgstr "Nessun host pesato disponibile " #, python-format msgid "Not a valid string: %s" msgstr "Stringa non valida: %s" msgid "Not a valid value for NaElement." msgstr "Non un valore valido per NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "Impossibile trovare un datastore adeguato per il volume: %s." msgid "Not an rbd snapshot" msgstr "Non è un'istantanea rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Non autorizzato per l'immagine %(image_id)s." msgid "Not authorized." msgstr "Non autorizzato." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Spazio insufficiente sul backend (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "Spazio di archiviazione insufficiente nella condivisione ZFS per eseguire " "questa operazione." msgid "Not stored in rbd" msgstr "Non memorizzato in rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "" "Nova ha restituito lo stato \"error\" durante la creazione dell'istantanea." msgid "Null response received from CloudByte's list filesystem." msgstr "Ricevuta risposta Null dal file system dell'elenco di CloudByte." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "Ricevuta risposta Null dei gruppi aut iscsi dell'elenco di CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" "Ricevuta risposta Null dagli iniziatori iscsi dell'elenco di CloudByte." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "Ricevuta risposta Null dal servizio iscsi del volume dell'elenco di " "CloudByte." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Ricevuta risposta null durante la creazione del volume [%s] nell'archivio " "CloudByte." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Ricevuta risposta null durante l'eliminazione del volume [%s] nella memoria " "CloudByte." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Ricevuta risposta null durante l'interrogazione per il lavoro basato su " "[%(operation)s][%(job)s] nella memoria CloudByte." msgid "Number of retries if connection to ceph cluster failed." msgstr "Numero di tentativi se la connessione al cluster ceph non riesce. " msgid "Object Count" msgstr "Numero oggetti" msgid "Object Version" msgstr "Versione oggetto" msgid "Object is not a NetApp LUN." msgstr "L'oggetto non è un NetApp LUN." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "In un'operazione di estensione si è verificato un errore durante l'aggiunta " "di un volume al volume composito. %(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "Uno dei servizi per volumi cinder è troppo vecchio per accettare queste " "richieste. Sono in esecuzione volumi cinder Liberty-Mitaka misti?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "Uno degli input richiesti dall'host, porta o schema non è stato trovato." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Solo le richieste %(value)s %(verb)s request(s) possono essere effettuate a " "%(uri)s ogni %(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "In una specifica QoS può essere impostato un solo limite. " msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Solo agli utenti con token nell'ambito dei parent immediati o progetti root " "è consentito visualizzare le quote child." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "" "È possibile annullare la gestione solo dei volumi gestiti da OpenStack." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "Operazione non riuscita con status=%(status)s. Dump completo: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Operazione non supportata: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "L'opzione gpfs_images_dir non è impostata correttamente." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "L'opzione gpfs_images_share_mode non è impostata correttamente." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "L'opzione gpfs_mount_point_base non è impostata correttamente." msgid "Option map (cls._map) is not defined." msgstr "Mappa delle opzioni (cls._map) non definita." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "%(res)s %(prop)s di origine deve essere uno dei valori '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "Sovrascrivere porta HTTPS per connettersi al server API Blockbridge. " #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "Il nome della partizione è Nessuno, impostare smartpartition:partitionname " "nella chiave. " msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "La password o la chiave privata SSH è obbligatoria per l'autenticazione: " "impostare l'opzione san_password o san_private_key." msgid "Path to REST server's certificate must be specified." msgstr "È necessario specificare il percorso al certificato server REST. " #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Creare il pool %(pool_list)s in anticipo. " #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Creare il livello %(tier_levels)s nel pool %(pool)s in anticipo. " msgid "Please re-run cinder-manage as root." msgstr "Eseguire nuovamente cinder-manage come root." msgid "Please specify a name for QoS specs." msgstr "Specificare un nome per le specifiche (specs) QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "La politica non consente di eseguire l'azione %(action)s." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Il pool %(poolNameInStr)s non è stato trovato." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "Il pool %s non esiste nell'applicazione Nexenta Store" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Pool dal volume['host'] %(host)s non trovato." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "Pool dal volume['host'] non riuscito con: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "Il pool non è disponibile nel campo dell'host del volume." msgid "Pool is not available in the volume host fields." msgstr "Il pool non è disponibile nei campi dell'host del volume." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Impossibile trovare il pool con nome %(pool)s nel trovare %(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "Impossibile trovare il pool con nome %(pool_name)s nel trovare %(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Il pool: %(poolName)s. non è associato al livello di memoria per la politica " "FAST %(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "Il nome pool deve essere nel file %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "I pool %s non esistono" msgid "Pools name is not set." msgstr "Il nome pool non è impostato. " #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Stato della copia primaria: %(status)s e sincronizzata: %(sync)s." msgid "Project ID" msgstr "Identificativo del Progetto" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "Le quote del progetto non sono configurate correttamente per le quote " "nidificate: %(reason)s." msgid "Protection Group not ready." msgstr "Gruppo di protezione non pronto." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Il protocollo %(storage_protocol)s non è supportato per la famiglia di " "archiviazione %(storage_family)s." msgid "Provided backup record is missing an id" msgstr "Nel record di backup fornito manca un id" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Lo stato %(provided)s dell'istantanea fornito non è consentito per " "un'istantanea con lo stato %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Informazioni del provider sulla memoria CloudByte non trovate per il volume " "OpenStack [%s]." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Errore driver Pure Storage Cinder: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Le specifiche QoS %(specs_id)s esistono già." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Le specifiche QoS %(specs_id)s sono ancora associate alle entità." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "La configurazione QoS è errata. %s deve essere > 0." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "La specifica QoS %(specs_id)s non dispone di specifiche con la chiave " "%(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Spec QoS non supportate in questa famiglia di memoria e versione ONTAP. " msgid "Qos specs still in use." msgstr "Le specifiche (specs) Qos sono ancora in uso." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "La query tramite il parametro service è obsoleta. Utilizzare invece il " "parametro bynary." msgid "Query resource pool error." msgstr "Errore di query del pool di risorse. " #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" "Il limite della quota %s deve essere uguale o maggiore delle risorse " "esistenti. " #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Impossibile trovare la classe di quota %(class_name)s." msgid "Quota could not be found" msgstr "Impossibile trovare la quota" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota superata per le risorse: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Quota superata: code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Impossibile trovare la quota per il progetto %(project_id)s." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Limite della quota non valido per il progetto '%(proj)s' per la risorsa " "'%(res)s': il limite di %(limit)d è inferiore al valore in uso di %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Impossibile trovare la prenotazione della quota %(uuid)s." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "" "Impossibile trovare l'utilizzo della quota per il progetto %(project_id)s." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "Operazione diff RBD non riuscita - (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "È necessario specificare l'IP del server REST. " msgid "REST server password must by specified." msgstr "È necessario specificare la password del server REST. " msgid "REST server username must by specified." msgstr "È necessario specificare il nome utente del server REST. " msgid "RPC Version" msgstr "Versione RPC" msgid "RPC server response is incomplete" msgstr "La risposta del server RPC è incompleta" msgid "Raid did not have MCS Channel." msgstr "Il Raid non ha il canale MCS. " #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Raggiunto il limite impostato dall'opzione di configurazione " "max_luns_per_storage_group. L'operazione per aggiungere %(vol)s al gruppo di " "archiviazione %(sg)s è rifiutata." #, python-format msgid "Received error string: %s" msgstr "Ricevuta stringa di errore: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "Il riferimento deve essere per un'istantanea non gestita." msgid "Reference must be for an unmanaged virtual volume." msgstr "Il riferimento deve essere per un volume virtuale non gestito." msgid "Reference must be the name of an unmanaged snapshot." msgstr "Il riferimento deve essere il nome di un'istantanea non gestita." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" "Il riferimento deve essere il nome volume di un volume virtuale non gestito." msgid "Reference must contain either source-id or source-name element." msgstr "Il riferimento deve contenere l'elemento source-id o source-name." msgid "Reference must contain either source-name or source-id element." msgstr "Il riferimento deve contenere l'elemento source-name o source-id." msgid "Reference must contain source-id or source-name element." msgstr "Il riferimento deve contenere l'elemento source-id o source-name." msgid "Reference must contain source-id or source-name key." msgstr "Il riferimento deve contenere la chiave source-id o source-name." msgid "Reference must contain source-id or source-name." msgstr "Il riferimento deve contenere source-id o source-name." msgid "Reference must contain source-id." msgstr "Il riferimento deve contenere source-id." msgid "Reference must contain source-name element." msgstr "Il riferimento deve contenere l'elemento source-name." msgid "Reference must contain source-name or source-id." msgstr "Il riferimento deve contenere il nome e l'id dell'origine." msgid "Reference must contain source-name." msgstr "Il riferimento deve contenere source-name." msgid "Reference to volume to be managed must contain source-name." msgstr "Il riferimento al volume da gestire deve contenere source-name." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "Il riferimento al volume: %s da gestire deve contenere source-name." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Migrazione del volume con ID: %(id)s rifiutata. Verificare la configurazione " "perché il Gruppo di volumi è sia l'origine che la destinazione: %(name)s." msgid "Remote pool cannot be found." msgstr "Impossibile trovare il pool remoto." msgid "Remove CHAP error." msgstr "Errore di rimozione CHAP." msgid "Remove fc from host error." msgstr "Errore di rimozione fc dall'host. " msgid "Remove host from array error." msgstr "Errore di rimozione host dall'array. " msgid "Remove host from hostgroup error." msgstr "Errore di rimozione host dal gruppo host. " msgid "Remove iscsi from host error." msgstr "Errore di rimozione iscsi dall'host. " msgid "Remove lun from QoS error." msgstr "Errore di rimozione lun da QoS. " msgid "Remove lun from cache error." msgstr "Errore di rimozione lun da cache." msgid "Remove lun from partition error." msgstr "Errore di rimozione lun dalla partizione. " msgid "Remove port from port group error." msgstr "Errore di rimozione porta da gruppo di porte." msgid "Remove volume export failed." msgstr "Rimozione esportazione volume non riuscita. " msgid "Rename lun on array error." msgstr "Errore di ridenominazione lun sull'array. " msgid "Rename snapshot on array error." msgstr "Errore di ridenominazione istantanea sull'array. " #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "Replica %(name)s su %(ssn)s non riuscita." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "Funzione del servizio di replica non trovata in %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Replication Service non trovato in %(storageSystemName)s." msgid "Replication is not enabled" msgstr "La replica non è abilitata" msgid "Replication is not enabled for volume" msgstr "La replica non è abilitata per il volume" msgid "Replication not allowed yet." msgstr "Replica non ancora consentita." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "Lo stato della replica per il volume deve essere attivo o attivo-arrestato, " "ma lo stato corrente è: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "Lo stato della replica deve essere inattivo, attivo-arrestato o errore, ma " "lo stato corrente è: %s" msgid "Request body and URI mismatch" msgstr "Il corpo della richiesta e l'URI non corrispondono" msgid "Request body contains too many items" msgstr "Il corpo della richiesta contiene troppi elementi" msgid "Request body contains too many items." msgstr "" "Il corpo della richiesta contiene un numero troppo elevato di elementi." msgid "Request body empty" msgstr "Corpo della richiesta vuoto" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "La richiesta a Datera ha restituito uno stato non corretto: %(status)s | " "%(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Il backup richiesto supera la quota di GB di backup consentita. La quota " "%(requested)sG, richiesta è %(quota)sG e sono stati utilizzati %(consumed)sG." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Il volume o l'istantanea richiesti superano la quota consentita %(name)s. " "Richiesto %(requested)sG, la quota è %(quota)sG e sono stati utilizzati " "%(consumed)sG." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "La dimensione del volume richiesta %(size)d è maggiore del limite massimo " "consentito %(limit)d." msgid "Required configuration not found" msgstr "Configurazione richiesta non trovata" #, python-format msgid "Required flag %s is not set" msgstr "L'indicatore richiesto %s non è impostato" msgid "Requires an NaServer instance." msgstr "Richiede un'istanza NaServer." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "La reimpostazione dello stato del backup è stata interrotta, il servizio di " "backup attualmente configurato [%(configured_service)s] non è il servizio di " "backup utilizzato per creare questo backup [%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Ridimensionamento clone %s non riuscito. " msgid "Resizing image file failed." msgstr "Ridimensionamento del file immagine non riuscita." msgid "Resource could not be found." msgstr "Impossibile trovare la risorsa." msgid "Resource not ready." msgstr "Risorsa non pronta." #, python-format msgid "Response error - %s." msgstr "Errore di risposta - %s." msgid "Response error - The storage-system is offline." msgstr "Errore di risposta - Il sistema di archiviazione è offline." #, python-format msgid "Response error code - %s." msgstr "Codice di errore risposta - %s." msgid "RestURL is not configured." msgstr "RestURL non coinfigurato." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Ripristino del backup interrotto, lo stato del volume previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Ripristino del backup interrotto, il servizio di backup attualmente " "configurato [%(configured_service)s] non è il servizio di backup utilizzato " "per creare questo backup [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Ripristino del backup interrotto: lo stato del backup previsto è " "%(expected_status)s ma è stato ricevuto %(actual_status)s." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Richiamata una quantità diversa di volumi SolidFire per le istantanee Cinder " "fornite. Richiamati: %(ret)s Desiderati: %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Richiamata una quantità diversa di volumi SolidFire per i volumi Cinder " "forniti. Richiamati: %(ret)s Desiderati: %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Conteggio dei tentativi superato per il comando: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Rilevata eccezione Retryable SolidFire" msgid "Retype cannot change encryption requirements." msgstr "" "L'assegnazione di un nuovo tipo non può modificare i requisiti di codifica." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "L'assegnazione di un nuovo tipo non può modificare le modifiche qos front-" "end per il volume in-use: %s." msgid "Retype requires migration but is not allowed." msgstr "" "L'assegnazione del nuovo tipo richiede la migrazione, ma questa operazione " "non è consentita." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "L'esecuzione del roll back per il volume: %(volumeName)s non è riuscita. " "Rivolgersi all'amministratore di sistema per restituire manualmente il " "volume al gruppo di archiviazione predefinito per la politica fast " "%(fastPolicyName)s non riuscita." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Esecuzione roll back %(volumeName)s mediante eliminazione." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "L'esecuzione di Cinder con una versione di VMware vCenter inferiore a %s non " "è consentita." msgid "SAN product is not configured." msgstr "Prodotto SAN non configurato." msgid "SAN protocol is not configured." msgstr "Protocollo SAN non configurato." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "Configurazione SMBFS 'smbfs_oversub_ratio' non valida. Deve essere > 0: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "Configurazione SMBFS 'smbfs_used_ratio' non valida. Deve essere > 0 and <= " "1.0: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "Il file di configurazione SMBFS in %(config)s non esiste." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Il file di configurazione SMBFS non è impostato (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "Comando SSH non riuscito dopo '%(total_attempts)r' tentativi: '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "" "Comando SSH non riuscito con errore: '%(err)s', comando: '%(command)s' " #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Rilevato inserimento comando SSH: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "Connessione SSH non riuscita per %(fabric)s con errore: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "Certificato SSL scaduto il %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Errore SSL: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Impossibile trovare il filtro Scheduler Host %(filter_name)s." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Impossibile trovare Scheduler Host Weigher %(weigher_name)s." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Stato della copia secondaria: %(status)s e sincronizzata: %(sync)s, " "avanzamento sync è: %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "L'id secondario non può essere uguale all'array primario, backend_id = " "%(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber deve essere nel file %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Servizio %(service)s su host %(host)s rimosso." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "Impossibile trovare il servizio %(service_id)s sull'host %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "Impossibile trovare il servizio %(service_id)s." #, python-format msgid "Service %s not found." msgstr "Il servizio %s non è stato trovato." msgid "Service is too old to fulfil this request." msgstr "Il servizio è troppo vecchio per soddisfare la richiesta." msgid "Service is unavailable at this time." msgstr "Il servizio non è disponibile in questo momento." msgid "Service not found." msgstr "Servizio non trovato." msgid "Set pair secondary access error." msgstr "Errore di impostazione dell'accesso secondario alla coppia. " msgid "Sets thin provisioning." msgstr "Imposta il thin provisioning." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "Impostazione del gruppo di politiche QoS della LUN non supportata su questa " "famiglia di archiviazione e versione ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Impostazione del gruppo di politiche qos del file non supportato su questa " "famiglia di archiviazione e versione ontap." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Condivisione %s ignorata a causa di un formato non valido. Deve essere del " "tipo address:/export. Verificare le impostazioni nas_ip e nas_share_path." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "La condivisione %(dir)s non è scrivibile dal servizio del volume Cinder. Le " "operazioni dell'istantanea non saranno supportate." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Errore I/O Sheepdog, il comando era: \"%s\". " msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Operazioni di visualizzazione possono essere eseguite solo per progetti " "nella stessa gerarchia delprogetto che è l'ambito degli utenti. " msgid "Size" msgstr "Dimensione" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" "Dimensione per il volume: %s non trovato, impossibile eseguire eliminazione " "protetta." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "La dimensione è %(image_size)dGB e non è contenuta in un volume di " "dimensione %(volume_size)dGB." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "La dimensione dell'immagine specificata %(image_size)sGB è maggiore della " "dimensione del volume %(volume_size)sGB." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "È stato richiesto di eliminare l'istantanea %(id)s nell'attesa che " "diventasse disponibile. È possibile che sia stata eseguita una richiesta " "contemporanea." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "Istantanea %(id)s trovata nello stato %(state)s anziché nello stato " "'deleting' durante l'eliminazione a catena." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Impossibile trovare l'istantanea %(snapshot_id)s." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "L'istantanea %(snapshot_id)s non contiene metadati con la chiave " "%(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "Lo snapshot %s non può far parte del gruppo di consistenza. " #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "L'istantanea '%s' non esiste sull'array." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "Impossibile creare l'istantanea perché il volume %(vol_id)s non è " "disponibile, stato corrente del volume: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "Impossibile creare l'istantanea durante la migrazione del volume." msgid "Snapshot of secondary replica is not allowed." msgstr "Non è consentita l'istantanea della replica secondaria." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Istantanea del volume non supportata nello stato: %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Risorsa dell'istantanea \"%s\" non distribuita?" msgid "Snapshot size must be multiple of 1 GB." msgstr "La dimensione dell'istantanea deve essere un multiplo di 1 GB. " #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "Lo stato dell'istantanea %(cur)s non è consentito per update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "Lo stato dell'istantanea deve essere \"available\" per la clonazione." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "L'istantanea di cui deve essere eseguito il backup deve essere disponibile, " "ma lo stato corrente è \"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "Impossibile trovare l'istantanea con id %s." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Snapshot='%(snap)s' non esiste in image='%(base)s' di base - interruzione " "del backup incrementale" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "Le istantanee non sono supportate per questo formato di volume: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Errore socket: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Eccezione SolidFire Cinder Driver" msgid "Sort direction array size exceeds sort key array size." msgstr "" "La dimensione dell'array della direzione di ordinamento supera la dimensione " "dell'array della chiave di ordinamento." msgid "Source CG is empty. No consistency group will be created." msgstr "Il GC di origine è vuoto. Non verrà creato alcun gruppo di coerenza." msgid "Source host details not found." msgstr "Dettagli sull'host di origine non trovati." msgid "Source volume device ID is required." msgstr "L'ID periferica volume di origine è obbligatorio. " msgid "Source volume not mid-migration." msgstr "Volume di origine non migrazione intermedia." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "Origine con nome/ip host: %s non trovata sul dispositivo di destinazione " "perla migrazione volume abilitata al backend, si procede con la migrazione " "predefinita. " msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo restituito dall'array non è valido" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "L'host specificato per l'associazione al volume %(vol)s è in un gruppo host " "non supportato con%(group)s." msgid "Specified logical volume does not exist." msgstr "Il volume logico specificato non esiste." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "Impossibile trovare il gruppo di istantanee specificato con id %s. " msgid "Specify a password or private_key" msgstr "Specificare una password o private_key" msgid "Specify san_password or san_private_key" msgstr "Specifica san_password o san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Specificare il nome, la descrizione, is_public o una combinazione dei " "precedenti per il tipo di volume." msgid "Split pair error." msgstr "Errore di divisione della coppia." msgid "Split replication failed." msgstr "Replica divisione non riuscita." msgid "Start LUNcopy error." msgstr "Errore di avvio LUNcopy." msgid "State" msgstr "Stato" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "Lo stato del nodo non è corretto. Lo stato corrente è %s. " msgid "Status" msgstr "Stato" msgid "Stop snapshot error." msgstr "Errore di arresto istantanea." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "Storage Configuration Service non trovato in %(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "Storage HardwareId mgmt Service non trovato in %(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "Profilo di memoria %s non trovato." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "Storage Relocation Service non trovato in %(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "La famiglia di archiviazione %s non è supportata." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "" "Il gruppo di archiviazione %(storageGroupName)s non è stato eliminato " "correttamente" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Host di memoria %(svr)s non rilevato, verificare il nome " msgid "Storage pool is not configured." msgstr "Pool di archiviazione non configurato." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Profilo di archiviazione: %(storage_profile)s non trovato." msgid "Storage resource could not be found." msgstr "Impossibile trovare la risorsa di memoria." msgid "Storage system id not set." msgstr "Id sistema di archivio non impostato." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "" "Impossibile trovare il sistema di archiviazione per il pool " "%(poolNameInStr)s." msgid "Storage-assisted migration failed during manage volume." msgstr "" "La migrazione assistita dall'archiviazione non è riuscita durante la " "gestione del volume." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s non è stato trovato" #, python-format msgid "String with params: %s" msgstr "Stringa con parametri: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "La somma dell'utilizzo child '%(sum)s' è maggiore della quota disponibile di " "'%(free)s' per il progetto '%(proj)s' per la risorsa '%(res)s'. Ridurre il " "limite o l'utilizzo per uno o più dei seguenti progetti: '%(child_ids)s'" msgid "Switch over pair error." msgstr "Errore di passaggio alla coppia." msgid "Sync pair error." msgstr "Errore di sincronizzazione della coppia." msgid "Synchronizing secondary volume to primary failed." msgstr "Sincronizzazione volume secondario con il primario non riuscita." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "Sistema %(id)s trovato con stato password errata - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "%(id)s di sistema trovati in stato non valido - %(status)s." msgid "System does not support compression." msgstr "Il sistema non supporta la compressione," msgid "System is busy, retry operation." msgstr "Il sistema è occupato, ritentare l'operazione. " #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s] non trovato nella memoria CloudByte per l'account " "[%(account)s]." msgid "Target volume type is still in use." msgstr "Il tipo di volume di destinazione è ancora in uso." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Mancata corrispondenza della struttura ad albero del template; aggiunta di " "slave %(slavetag)s a master %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "ID titolare: %s non esiste." msgid "Terminate connection failed" msgstr "Interrompi connessione non riuscito" msgid "Terminate connection unable to connect to backend." msgstr "Interrompi connessione non riesce a collegarsi al backend." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Terminazione connessione volume non riuscita: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "L'origine %(type)s %(id)s da replicare non è stata trovata." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "I parametri 'sort_key' e 'sort_dir' sono obsoleti e non possono essere " "utilizzati con il parametro 'sort'." msgid "The EQL array has closed the connection." msgstr "L'array EQL ha chiuso la connessione." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "Il filesystem %(fs)s GPFS non è del livello di release richiesto. Il " "livello corrente è %(cur)s, deve essere almeno %(min)s." msgid "The IP Address was not found." msgstr "L'indirizzo IP non è stato trovato." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "La richiesta WebDAV non è riuscita. Motivo: %(msg)s, Codice di ritorno/" "motivo:%(code)s, Volume di origine: %(src)s, Volume di destinazione:%(dst)s, " "Metodo:%(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "L'errore sopra riportato potrebbe mostrare che il database non è stato " "creato.\n" "Creare un database utilizzando 'cinder-manage db sync' prima di eseguire " "questo comando." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "L'array non supporta l'impostazione del pool di archiviazione per SLO " "%(slo)s e carico di lavoro %(workload)s. Controllare l'array per SLO e " "carichi di lavoro validi." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "Il back-end in cui viene creato il volume non ha la replica abilitata." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Il comando %(cmd)s non è riuscito. (ret: %(ret)s, stdout: %(out)s, stderr: " "%(err)s)" msgid "The copy should be primary or secondary" msgstr "La copia deve essere primaria o secondaria" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "La creazione dell'unità logica non può essere completata. (LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "Il metodo decorato deve accettare un oggetto volume o istantanea " #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "Il dispositivo nel percorso %(path)s non è disponibile: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "L'ora di fine (%(end)s) deve essere successiva all'ora di inizio (%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "La extra_spec: %s non è valida." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Specifica supplementare: %(extraspec)s non valida." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "Impossibile eliminare il volume sottoposto a failover: %s" #, python-format msgid "The following elements are required: %s" msgstr "Sono richiesti i seguenti elementi: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Nelle seguenti migrazioni è presente un downgrade che non è consentito:\n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "Impossibile aggiungere il gruppo di host o la destinazione iSCSI." msgid "The host group or iSCSI target was not found." msgstr "Non è stato trovato il gruppo di host o la destinazione iSCSI." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "L'host non è pronto per essere sottoposto a failback. Risincronizzare i " "volumi e riprendere la replica sui backend 3PAR." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "L'host non è pronto per essere sottoposto a failback. Risincronizzare i " "volumi e riprendere la replica sui backend LeftHand." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "L'host non è pronto per essere sottoposto a failback. Risincronizzare i " "volumi e riprendere la replica sui backend Storwize." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "L'utente iSCSI CHAP %(user)s non esiste." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "La lun importata %(lun_id)s è nel pool %(lun_pool)s che non è gestito " "dall'host %(host)s." msgid "The key cannot be None." msgstr "La chiave non può essere Nessuno." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "L'unità logica per %(type)s %(id)s specificato è già stata eliminata." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "Il metodo %(method)s è scaduto. (valore timeout: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "Il metodo update_migrated_volume non è implementato." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "Il montaggio %(mount_path)s non è un volume USP Quobyte valido. Errore: " "%(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "Il parametro del backend di memoria. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "Il backup parent deve essere disponibile per il backup incrementale." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "L'istantanea fornita '%s' non è un'istantanea del volume fornito." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "Il riferimento al volume nel backend deve avere il formato file_system/" "volume_name (volume_name non può contenere '/')" #, python-format msgid "The remote retention count must be %s or less." msgstr "Il conteggio memorizzazione remoto deve essere %s o inferiore." msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "La modalità di replica non è stata configurata correttamente nel tipo di " "volume extra_specs. Se replication:mode è periodic, replication:sync_period " "deve essere specificato e deve essere compreso tra 300 e 31622400 secondi." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" "Il periodo di sincronizzazione replica deve essere di almeno %s secondi." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "La dimensione richiesta : %(requestedSize)s non è la stessa dimensione " "risultante %(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "Impossibile trovare la risorsa %(resource)s." msgid "The results are invalid." msgstr "I risultati non sono validi." #, python-format msgid "The retention count must be %s or less." msgstr "Il conteggio memorizzazione deve essere %s o inferiore." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "L'istantanea non può essere creata quando il volume è in modalità di " "manutenzione. " #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "Il volume di origine %s non è nel pool gestito dall'host corrente. " msgid "The source volume for this WebDAV operation not found." msgstr "Volume di origine per questa operazione WebDAV non trovato." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "Il tipo volume origine '%(src)s' è diverso dal tipo di volume di " "destinazione '%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "Il tipo volume origine '%s' non è disponibile." #, python-format msgid "The specified %(desc)s is busy." msgstr "Il %(desc)s specificato è occupato." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "La LUN specificata non appartiene al pool indicato: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "L'ldev specificato %(ldev)s non può essere gestito. L'ldev non deve essere " "associazione." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "L'ldev specificato %(ldev)s non può essere gestito. L'ldev non deve essere " "associato." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "L'ldev specificato %(ldev)s non può essere gestito. La dimensione ldev deve " "essere in multipli di gigabyte." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "L'ldev specificato %(ldev)s non può essere gestito. Il volume deve essere " "del tipo DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "L'operazione specificata non è supportata. La dimensione del volume deve " "essere la stessa dell'origine %(type)s. (volume: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Il disco virtuale specificato è associato a un host." msgid "The specified volume is mapped to a host." msgstr "Il volume specificato è associato a un host." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "La password dell'array di archiviazione per %s non è corretta, aggiornare la " "password configurata." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "È possibile utilizzare il backend di memoria. (config_group: " "%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "Il dispositivo di archiviazione non supporta %(prot)s. Configurare il " "dispositivo per supportare %(prot)s o passare a un driver che utilizza un " "protocollo diverso." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "Il conteggio di metadati striped %(memberCount)s è troppo piccolo per il " "volume: %(volumeName)s, con dimensione %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "Il tipo di metadati: %(metadata_type)s per volume/istantanea %(id)s non è " "valido." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "Il volume %(volume_id)s non può essere esteso. Il tipo di volume deve essere " "Normale." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "Il volume %(volume_id)s non può essere non gestito. Il tipo di volume deve " "essere %(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "La volume %(volume_id)s viene gestito correttamente. (LDEV:%(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "La gestione del volume %(volume_id)s viene annullata correttamente. (LDEV:" "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Il volume %(volume_id)s da associare non è stato trovato." msgid "The volume cannot accept transfer in maintenance mode." msgstr "" "Il volume non può accettare trasferimenti in modalità di manutenzione. " msgid "The volume cannot be attached in maintenance mode." msgstr "Il volume non può essere collegato in modalità di manutenzione. " msgid "The volume cannot be detached in maintenance mode." msgstr "Il volume non può essere scollegato in modalità di manutenzione. " msgid "The volume cannot be updated during maintenance." msgstr "Il volume non può essere aggiornato durante la manutenzione. " msgid "The volume connection cannot be initialized in maintenance mode." msgstr "" "La connessione volume non può essere inizializzata in modalità di " "manutenzione. " msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "Il driver del volume richiede il nome iniziatore iSCSI nel connettore." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Il volume è attualmente occupato su 3PAR e non può essere eliminato in " "questo momento. È possibile ritentare successivamente." msgid "The volume label is required as input." msgstr "L'etichetta volume è richiesta come input." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "I metadati volume non possono essere eliminati quando il volume è in " "modalità di manutenzione. " msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "I metadati volume non possono essere aggiornati quando il volume è in " "modalità di manutenzione. " #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "" "Non sono presenti risorse disponibili per l'utilizzo. (risorsa: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Non vi sono host ESX validi. " #, python-format msgid "There are no valid datastores attached to %s." msgstr "Nessun archivio dati valido collegato a %s." msgid "There are no valid datastores." msgstr "Nessun archivio dati valido." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Non c'è alcuna designazione del %(param)s. L'archivio specificato è " "indispensabile per gestire il volume." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Non c'è alcuna designazione dell'ldev. L'ldev specificato è indispensabile " "per gestire il volume." msgid "There is no metadata in DB object." msgstr "Non sono presenti metadati nell'oggetto DB. " #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "Non esiste alcuna condivisione che può ospitare %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "Non esiste nessuna condivisione che possa ospitare %(volume_size)sG." #, python-format msgid "There is no such action: %s" msgstr "Non esiste alcuna azione simile: %s" msgid "There is no virtual disk device." msgstr "Non esistono unità del disco virtuale." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "Si è verificato un errore durante l'aggiunta del volume al gruppo di copie " "remote: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Si è verificato un errore durante la creazione dell'istantanea cg: %s." #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "" "Si è verificato un errore durante la creazione del gruppo di copie remote: " "%s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Si è verificato un errore durante l'impostazione del periodo di " "sincronizzazione per il gruppo di copie remoto: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Si è verificato un errore durante la configurazione di un gruppo di copie " "remoto sugli array 3PAR:('%s'). Il volume non verrà riconosciuto come tipo " "di replica." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Si è verificato un errore durante la configurazione di una pianificazione " "remota sugli array LeftHand:('%s'). Il volume non verrà riconosciuto come " "tipo di replica." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Si è verificato un errore durante l'avvio della copia remota: %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Nessun file di configurazione Gluster configurato (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Nessun file di configurazione NFS configurato (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Non è configurato alcun volume Quobyte (%s). Esempio: quobyte:///" "" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin provisioning non supportato in questa versione di LVM." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "ThinProvisioning Enabler non è installato. Impossibile creare un volume " "sottile" msgid "This driver does not support deleting in-use snapshots." msgstr "Questo driver non supporta l'eliminazione di istantanee in uso." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Questo driver non supporta la creazione di istantanee nei volumi in uso." msgid "This request was rate-limited." msgstr "Questa richiesta era rate-limited." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Questa piattaforma del sistema (%s) non è supportata. Questo driver supporta " "solo le piattaforme Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "Tier Policy Service non trovato per %(storageSystemName)s." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Timeout in attesa dell'aggiornamento di Nova per la creazione " "dell'istantanea %s." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Timeout in attesa dell'aggiornamento di Nova per l'eliminazione " "dell'istantanea %(id)s." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Il valore di timeout (in secondi) utilizzato quando ci si connette al " "cluster ceph. Se il valore è < 0, nessun timeout viene impostato e viene " "utilizzato il valore librados predefinito." #, python-format msgid "Timeout while calling %s " msgstr "Timeout durante la chiamata di %s " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Richiesta dell'API di %(service)s scaduta." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "Timeout durante la richiesta delle funzionalità dal backend %(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Impossibile trovare il trasferimento %(transfer_id)s." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Trasferimento %(transfer_id)s: id volume %(volume_id)s in uno stato " "imprevisto %(status)s, previsto awaiting-transfer" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Tentativo di importare i metadati di backup dall'id %(meta_id)s nel backup " "%(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Attività di regolazione del volume arrestata prima di essere completata: " "volume_name=%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "Il tipo %(type_id)s è già associato ad altre specifiche (specs) qos: " "%(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "Modifica di accesso tipo non applicabile al tipo di volume pubblico. " msgid "Type cannot be converted into NaElement." msgstr "Il tipo non può essere convertito in NaElement." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" "Gli UUID %s sono presenti nell'elenco dei volumi di aggiunta e rimozione." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Impossibile accedere al back-end Storwize per il volume %s." msgid "Unable to access the backend storage via file handle." msgstr "Impossibile accedere alla memoria backend attraverso la gestione file." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" "Impossibile accedere alla memoria backend attraverso il percorso %(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "Impossibile aggiungere l'host Cinder a apphosts per lo spazio %(space)s" #, python-format msgid "Unable to complete failover of %s." msgstr "Impossibile completare il failover di %s." msgid "Unable to connect or find connection to host" msgstr "Impossibile connettersi o trovare la connessione all'host" msgid "Unable to create Barbican Client without project_id." msgstr "Impossibile creare il client Barbican senza project_id." #, python-format msgid "Unable to create consistency group %s" msgstr "Impossibile creare il gruppo di coerenza %s" msgid "Unable to create lock. Coordination backend not started." msgstr "Impossibile creare il blocco. Backend di coordinazione non avviato." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Impossibile creare o ottenere il gruppo storage predefinito per la politica " "FAST. %(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Impossibile creare il clone della replica per il volume %s." #, python-format msgid "Unable to create the relationship for %s." msgstr "Impossibile creare la relazione per %s." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "Impossibile creare il volume %(name)s da %(snap)s." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "Impossibile creare il volume %(name)s da %(vol)s." #, python-format msgid "Unable to create volume %s" msgstr "Impossibile creare il volume %s" msgid "Unable to create volume. Backend down." msgstr "Impossibile creare il volume. Backend disattivo." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "Impossibile eliminare l'istantanea del gruppo di coerenza %s " #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "Impossibile eliminare l'istantanea %(id)s, stato: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "Impossibile eliminare la politica di istantanea sul volume %s. " #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "Impossibile eliminare il volume di destinazione per il volume %(vol)s. " "Eccezione: %(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Impossibile scollegare il volume. Lo stato del volume deve essere 'in-use' e " "attach_status deve essere 'attached' per scollegarlo. " #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "Impossibile determinare secondary_array dall'array secondario fornito: " "%(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Impossibile determinare il nome dell'istantanea in Purity per l'istantanea " "%(id)s." msgid "Unable to determine system id." msgstr "Impossibile determinare l'id sistema." msgid "Unable to determine system name." msgstr "Impossibile determinare il nome sistema." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Impossibile eseguire operazioni di gestione istantanea con la versione API " "REST Purity %(api_version)s, richiesta %(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Impossibile eseguire la replica con la versione API REST Purity " "%(api_version)s, richiesta una tra le versioni %(required_versions)s." msgid "Unable to enable replication and snapcopy at the same time." msgstr "Impossibile abilitare replica e snapcopy contemporaneamente." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Impossibile stabilire la relazione con il cluster Storwize %s." #, python-format msgid "Unable to extend volume %s" msgstr "Impossibile estendere il volume %s." #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "Impossibile eseguire il failover del volume %(id)s sul back-end secondario, " "in quanto la relazione di replica non è in grado di eseguire il passaggio: " "%(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "Impossibile eseguire il failback su \"default\", è possibile eseguire questa " "operazione solo dopo il completamento di un failover." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" "Impossibile eseguire il failover sulla destinazione di replica:%(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "Impossibile recuperare le informazioni sulla connessione dal backend." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" "Impossibile richiamare le informazioni sulla connessione dal backend: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "Impossibile trovare ref Purity con nome=%s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Impossibile trovare il gruppo volume: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "Impossibile trovare la destinazione di failover, nessuna destinazione " "secondaria configurata." msgid "Unable to find iSCSI mappings." msgstr "Impossibile trovare associazioni iSCSI. " #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "Impossibile trovare ssh_hosts_key_file: %s" msgid "Unable to find system log file!" msgstr "Impossibile trovare il file di log di sistema." #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "Impossibile trovare l'istantanea pg utilizzabile da utilizzare per il " "failover sull'array secondario selezionato: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "Impossibile trovare l'array secondario utilizzabile da destinazioni " "configurate: %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "Impossibile trovare il volume %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Impossibile ottenere un dispositivo di blocco per il file '%s'" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Impossibile ottenere le informazioni di configurazione necessarie per creare " "un volume: %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Impossibile acquisire il record corrispondente per il pool. " #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Impossibile ottenere le informazioni sullo spazio %(space)s, verificare che " "il cluster sia in esecuzione e connesso. " msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Impossibile ottenere l'elenco di indirizzi IP su questo host, controllare " "autorizzazioni e rete." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Impossibile ottenere un elenco di membri del dominio, verificare che il " "cluster sia in esecuzione. " msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Impossibile ottenere l'elenco di spazi per creare il nuovo nome. Verificare " "che il cluster sia in esecuzione. " #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Impossibile richiamare le statistiche per backend_name: %s" msgid "Unable to get storage volume from job." msgstr "Impossibile ottenere il volume di archiviazione dal job." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Impossibile acquisire gli endpoint di destinazione per hardwareId " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "Impossibile ottenere il nome della vista di mascheramento." msgid "Unable to get the name of the portgroup." msgstr "Impossibile ottenere il nome del gruppo di porte." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "Impossibile ottenere la relazione di replica per il volume %s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Impossibile importare il volume %(deviceId)s in cinder. È il volume " "originedella sessione di replica %(sync)s. " #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Impossibile importare il volume %(deviceId)s in cinder. Il volume esterno " "non ènel pool gestito dall'host cinder corrente." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Impossibile importare il volume %(deviceId)s in cinder. Il volume è nella " "vista di mascheramento %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "Impossibile caricare CA da %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Impossibile caricare il certificato da %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Impossibile caricare la chiave da %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" "Impossibile individuare l'account %(account_name)s nell'unità Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "Impossibile individuare un SVM che stia gestendo l'indirizzo IP '%s'" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "Impossibile individuare i profili di risposta specificati %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Impossibile gestire il volume esistente. Il volume %(volume_ref)s è già " "gestito. " #, python-format msgid "Unable to manage volume %s" msgstr "Impossibile gestire il volume %s" msgid "Unable to map volume" msgstr "Impossibile associare il volume" msgid "Unable to map volume." msgstr "Impossibile associare il volume." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "Impossibile analizzare la richiesta XML. Fornire XML nel formato corretto." msgid "Unable to parse attributes." msgstr "Impossibile analizzare gli attributi. " #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "Impossibile promuovere la replica a primaria per il volume %s. Nessuna copia " "secondaria disponibile." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Impossibile utilizzare nuovamente un host non gestito da Cinder con " "use_chap_auth=True," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Impossibile utilizzare nuovamente un host con credenziali CHAP sconosciute " "configurate." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Impossibile ridenominare il volume %(existing)s in %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "Impossibile richiamare il gruppo di istantanee con id %s." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "Impossibile riscrivere %(specname)s, si prevedeva di ricevere i valori " "%(spectype)s corrente e richiesto. Valore ricevuto: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Impossibile eseguire la riscrittura: Una copia del volume %s esiste. La " "riscrittura supererebbe il limite di 2 copie." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Impossibile eseguire la riscrittura: L'azione corrente richiede una copia " "volume, non consentita quando il nuovo tipo è replica. Volume = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "Impossibile configurare la replica della modalità mirror per %(vol)s. " "Eccezione: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Impossibile eseguire istantanea del gruppo di coerenza %s" msgid "Unable to terminate volume connection from backend." msgstr "Impossibile terminare la connessione del volume dal backend." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Impossibile terminare la connessione del volume: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Impossibile aggiornare il gruppo di coerenza %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Impossibile aggiornare il tipo a causa di uno stato non corretto: " "%(vol_status)s sul volume: %(vol_id)s. Lo stato del volume deve essere " "available o in-use." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "Impossibile verificare il gruppo iniziatore: %(igGroupName)s nella vista di " "mascheramento %(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Parametri inaccettabili." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Stato associazione imprevisto %(status)s per l'associazione %(id)s. " "Attributi: %(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Risposta CLI non prevista: mancata corrispondenza intestazione/riga. " "Intestazione: %(header)s, riga: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Stato associazione imprevisto %(status)s per l'associazione %(id)s. " "Attributi: %(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "Output imprevisto. Previsto [%(expected)s] ma ricevuto [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "Risposta imprevista dall'API Nimble" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Risposta imprevista dall'API Tegile IntelliFlash" msgid "Unexpected status code" msgstr "Codice di stato imprevisto" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Codice di stato imprevisto dallo switch %(switch_id)s con protocollo " "%(protocol)s per url %(page)s. Errore: %(error)s" msgid "Unknown Gluster exception" msgstr "Eccezione Gluster sconosciuta" msgid "Unknown NFS exception" msgstr "Eccezione NFS sconosciuta" msgid "Unknown RemoteFS exception" msgstr "Eccezione RemoteFS sconosciuta" msgid "Unknown SMBFS exception." msgstr "Eccezione SMBFS sconosciuta." msgid "Unknown Virtuozzo Storage exception" msgstr "Eccezione Virtuozzo Storage sconosciuta " msgid "Unknown action" msgstr "Azione sconosciuta" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Si ignora se il volume: %s da gestire è già gestito da Cinder. Interruzione " "della gestione del volume. Aggiungere la proprietà dello schema " "personalizzato 'cinder_managed' al volume e impostare il relativo valore su " "False. In alternativa, impostare il valore della politica di configurazione " "di cinder 'zfssa_manage_policy' su 'loose' per rimuovere questa restrizione." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Si ignora se il volume: %s da gestire è già gestito da Cinder. Interruzione " "della gestione del volume. Aggiungere la proprietà dello schema " "personalizzato 'cinder_managed' al volume e impostare il relativo valore su " "False. In alternativa, impostare il valore della politica di configurazione " "di cinder 'zfssa_manage_policy' su 'loose' per rimuovere questa restrizione." #, python-format msgid "Unknown operation %s." msgstr "Operazione %s sconosciuta." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Comando %(cmd)s sconosciuto o non supportato" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Protocollo sconosciuto: %(protocol)s. " #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Risorse quota sconosciute %(unknown)s." msgid "Unknown service" msgstr "Servizio sconosciuto" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "" "Le opzioni di disabilitazione ed eliminazione a catena si escludono " "reciprocamente." msgid "Unmanage volume not implemented." msgstr "Disabilitazione gestione volume non implementata." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "L'annullamento della gestione di istantanee da volumi sottoposti a failover " "non è consentito." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "L'annullamento della gestione di istantanee da volumi sottoposti a failover " "non è consentito." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Parola chiave QoS non riconosciuta: \"%s\" " #, python-format msgid "Unrecognized backing format: %s" msgstr "Formato backup non riconosciuto: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valore read_deleted non riconosciuto '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "Annulla opzioni gcs: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "iscsiadm non riuscito. L'eccezione è %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Versione ONTAP dati in cluster non supportata." msgid "Unsupported Content-Type" msgstr "Tipo-contenuto non supportato" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Versione Data ONTAP non supportata. Data ONTAP versione 7.3.1 e successive " "sono supportate." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "La versione dei metadati di backup non è supportata (%s)" msgid "Unsupported backup metadata version requested" msgstr "La versione dei metadati di backup richiesta non è supportata" msgid "Unsupported backup verify driver" msgstr "Driver di verifica del backup non supportato" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Firmware non supportato per lo switch %s. Assicurarsi che lo switch sia in " "esecuzione su firmware v6.4 o superiore" #, python-format msgid "Unsupported volume format: %s " msgstr "Formato del volume non supportato: %s " msgid "Update QoS policy error." msgstr "Errore di aggiornamento della politica QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Operazioni di aggiornamento e eliminazione quota possono essere eseguite " "solo da un admin del parent immediato o dall'admin CLOUD." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Operazioni di aggiornamento e eliminazione quota possono essere eseguite " "solo a progettinella stessa gerarchia del progetto che è l'ambito degli " "utenti. " msgid "Update list, doesn't include volume_id" msgstr "Aggiorna elenco, non includere volume_id" msgid "Updated At" msgstr "Aggiornato a" msgid "Upload to glance of attached volume is not supported." msgstr "Il caricamento in glance del volume collegato non è supportato." msgid "Use ALUA to associate initiator to host error." msgstr "Errore di utilizzo ALUA per associare l'iniziatore all'host. " msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Errore di utilizzo CHAP per associare l'iniziatore all'host. Controllarenome " "utente e password CHAP. " msgid "User ID" msgstr "Identificativo Utente" msgid "User does not have admin privileges" msgstr "L'utente non ha i privilegi dell'amministratore" msgid "User is not authorized to use key manager." msgstr "L'utente non è autorizzato ad utilizzare il gestore chiavi." msgid "User not authorized to perform WebDAV operations." msgstr "L'utente non è autorizzato ad eseguire le operazioni WebDAV." msgid "UserName is not configured." msgstr "UserName non configurato." msgid "UserPassword is not configured." msgstr "UserPassword non configurato." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "Rollback v2 - Volume in un altro gruppo di memoria oltre al gruppo di " "memoria predefinito. " msgid "V2 rollback, volume is not in any storage group." msgstr "Rollback V2, il volume non è in alcun gruppo di memoria. " msgid "V3 rollback" msgstr "Rollback V3 " msgid "VF is not enabled." msgstr "VF non è abilitato." #, python-format msgid "VV Set %s does not exist." msgstr "L'impostazione VV %s non esiste." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Il consumer valido delle specifiche (specs) QoS è: %s" #, python-format msgid "Valid control location are: %s" msgstr "l'ubicazione di controllo valida è: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Convalida connessione volume non riuscita (errore: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "Il valore \"%(value)s\" non è valido per l'opzione di configurazione " "\"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "Valore %(param)s per %(param_string)s non è un booleano." msgid "Value required for 'scality_sofs_config'" msgstr "Valore richiesto per 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "Vdisk %(name)s non coinvolto nell'associazione %(src)s -> %(tgt)s." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "La versione %(req_ver)s non è supportata dall'API. Il valore minimo è " "%(min_ver)s ed il massimo è %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s non può richiamare l'oggetto mediante l'id." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s non supporta l'aggiornamento condizionale." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Il volume virtuale '%s' non esiste nell'array." #, python-format msgid "Vol copy job for dest %s failed." msgstr "Lavoro di copia del volume per la destinazione %s non riuscito." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Volume %(deviceID)s non trovato." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Volume %(name)s non trovato nell'array. Impossibile determinare se vi sono " "volumi associati." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "Il volume %(name)s è stato creato in VNX, ma nello stato %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Impossibile creare il volume %(vol)s nel pool %(pool)s." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "Il volume %(vol1)s non e' uguale allo snapshot.volume_id %(vol2)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Lo stato del volume %(vol_id)s deve essere available o in-use, ma lo stato " "corrente è: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "Lo stato del volume %(vol_id)s deve essere available per l'estensione, ma lo " "stato corrente è: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "Lo stato del volume %(vol_id)s deve essere available per aggiornare " "l'indicatore di sola lettura, ma lo stato corrente è: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "Lo stato del volume %(vol_id)s deve essere available, ma lo stato corrente " "è: %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Impossibile trovare il volume %(volume_id)s." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "Il volume %(volume_id)s contiene metadati di gestione con la chiave " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "Volume %(volume_id)s non contiene metadati con la chiave %(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "Il volume %(volume_id)s è attualmente associato al gruppo host non " "supportato %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "Il volume %(volume_id)s non è attualmente associato all'host %(host)s " #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "Volume %(volume_id)s è ancora collegato, prima scollegare il volume." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Errore di replica del volume %(volume_id)s : %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Volume %(volume_name)s occupato." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Impossibile creare il volume %s dal volume di origine." #, python-format msgid "Volume %s could not be created on shares." msgstr "Impossibile creare il volume %s nelle condivisioni." #, python-format msgid "Volume %s could not be created." msgstr "Impossibile creare il volume %s." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "Il volume %s non esiste in Nexenta SA" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "Il volume %s non esiste nell'applicazione Nexenta Store" #, python-format msgid "Volume %s does not exist on the array." msgstr "Il volume %s non esiste su questo array. " #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "Sul volume %s non è specificato provider_location; ignorato." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Il volume %s non esiste nell'array." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "Il volume %s non esiste nel backend ZFSSA." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "Il volume %s è già gestito da OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Il volume %s è già parte di una migrazione attiva." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "Il volume %s non è di tipo replicato. Questo volume deve essere di un tipo " "di volume con la specifica supplementare replication_enabled impostata su " "' True' per supportare le azioni di replica." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "Il volume %s è online. Impostare il volume su offline per la gestione " "tramite OpenStack." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Il volume %s non deve fare parte di un gruppo di coerenza." #, python-format msgid "Volume %s must not be replicated." msgstr "Il volume %s non deve essere replicato." #, python-format msgid "Volume %s must not have snapshots." msgstr "Il volume %s non deve avere istantanee." #, python-format msgid "Volume %s not found." msgstr "Volume %s non trovato." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Volume %s: Errore durante il tentativo di estendere il volume" #, python-format msgid "Volume (%s) already exists on array" msgstr "Il volume (%s) esiste già nell'array" #, python-format msgid "Volume (%s) already exists on array." msgstr "Il volume (%s) esiste già sull'array." #, python-format msgid "Volume Group %s does not exist" msgstr "Il gruppo del volume %s non esiste" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Il tipo di volume %(id)s esiste già." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Il tipo di volume %(type_id)s non contiene specifica supplementare con la " "chiave %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "L'eliminazione del tipo di volume %(volume_type_id)s non è consentita con i " "volumi presenti con il tipo." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "Il tipo di volume %(volume_type_id)s non contiene specifiche supplementari " "con la chiave %(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "L'id del tipo di volume non deve essere None." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Volume [%(cb_vol)s] non trovato nella memoria CloudByte corrispondente al " "volume OpenStack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "Volume [%s] non rilevato nell'archivio CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "" "Impossibile trovare il collegamento del volume con il filtro: %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "Configurazione backend del volume non valida: %(reason)s" msgid "Volume by this name already exists" msgstr "Il volume con questo nome già esiste" msgid "Volume cannot be restored since it contains snapshots." msgstr "Impossibile ripristinare il volume perché contiene delle istantanee." msgid "Volume create failed while extracting volume ref." msgstr "" "Creazione del volume non riuscita durante l'estrazione del riferimento del " "volume." #, python-format msgid "Volume device file path %s does not exist." msgstr "Il percorso del file del dispositivo del volume %s non esiste." #, python-format msgid "Volume device not found at %(device)s." msgstr "Il dispositivo del volume non è stato trovato in %(device)s." #, python-format msgid "Volume driver %s not initialized." msgstr "Il driver di volume %s non è inizializzato." msgid "Volume driver not ready." msgstr "Driver del volume non pronto." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Il driver del volume ha riportato un errore: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "Il volume ha un'istantanea temporanea che non può essere eliminata in questo " "momento." msgid "Volume has children and cannot be deleted!" msgstr "Il volume ha elementi child e non può essere eliminato." #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "Il volume nel gruppo di coerenza %s è collegato. È necessario prima " "scollegarlo." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "Il volume nel gruppo di coerenza dispone ancora di istantanee dipendenti." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "Il volume è collegato a un server. (%s) " msgid "Volume is in-use." msgstr "Volume attualmente utilizzato." msgid "Volume is not available." msgstr "Il volume non è disponibile. " msgid "Volume is not local to this node" msgstr "Per questo nodo volume non è locale" msgid "Volume is not local to this node." msgstr "Il volume non è locale rispetto a questo nodo." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "È stato richiesto il backup dei metadati di volume ma questo driver non " "supporta ancora questa funzione." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Migrazione volume non riuscita: %(reason)s" msgid "Volume must be available" msgstr "Il volume deve essere disponibile" msgid "Volume must be in the same availability zone as the snapshot" msgstr "" "Il volume deve trovarsi nell'area di disponibilità così come l'istantanea" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "Il volume deve trovarsi nella stessa area di disponibilità così come il " "volume di origine" msgid "Volume must have a volume type" msgstr "Il volume deve avere un tipo di volume" msgid "Volume must not be part of a consistency group." msgstr "Il volume non deve far parte di un gruppo di coerenza." msgid "Volume must not be replicated." msgstr "Il volume non deve essere replicato." msgid "Volume must not have snapshots." msgstr "Il volume non deve avere istantanee." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Volume non trovato per l'istanza %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "Volume non trovato sul backend di archiviazione configurato." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Volume non trovato nel backend di archiviazione configurato. Se il nome " "volume contiene \"/\", ridenominarlo e provare a gestirlo di nuovo." msgid "Volume not found on configured storage pools." msgstr "Volume non trovato nei pool di archiviazione configurati." msgid "Volume not found." msgstr "Volume non trovato." msgid "Volume not unique." msgstr "Volume non univoco." msgid "Volume not yet assigned to host." msgstr "Il volume non è stato ancora assegnato all'host." msgid "Volume reference must contain source-name element." msgstr "Il riferimento al volume deve contenere l'elemento source-name." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "Impossibile trovare la replica del volume per %(volume_id)s." #, python-format msgid "Volume service %s failed to start." msgstr "Avvio del servizio volume %s non riuscito. " msgid "Volume should have agent-type set as None." msgstr "Il volume deve avere agent-type impostato su Nessuno. " #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "La dimensione del volume %(volume_size)sGB non può essere minore della " "dimensione minDisk dell'immagine %(min_disk)sGB." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "" "La dimensione del volume '%(size)s' deve essere un numero intero e maggiore " "di 0" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "La dimensione del volume '%(size)s'GB non può essere minore della dimensione " "del volume originale %(source_size)sGB. Deve essere >= la dimensione del " "volume originale." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "La dimensione del volume '%(size)s'GB non può essere minore della dimensione " "dell'istantanea %(snap_size)sGB. Deve essere >= la dimensione " "dell'istantanea originale." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "La dimensione del volume è stata ridotta dall'ultimo backup. Eseguire un " "backup completo." msgid "Volume size must be a multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB." msgid "Volume size must be multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB. " #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "Lo stato del volume deve essere disponibile, ma lo stato corrente è: %s" msgid "Volume status is in-use." msgstr "Lo stato del volume è in-use." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "Lo stato del volume deve essere \"available\" o \"in-use\" per l'istantanea. " "(è %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "Lo stato del volume deve essere \"available\" o \"in-use\"." #, python-format msgid "Volume status must be %s to reserve." msgstr "Lo stato del volume deve essere %s per eseguire la prenotazione." msgid "Volume status must be 'available'." msgstr "Lo stato del volume deve essere 'available'." msgid "Volume to Initiator Group mapping already exists" msgstr "L'associazione del volume al gruppo iniziatori già esiste" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "Il volume di cui deve essere eseguito il backup deve essere disponibile o in " "uso, ma lo stato corrente è \"%s\"." msgid "Volume to be restored to must be available" msgstr "Il volume da ripristinare deve essere disponibile" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Impossibile trovare il tipo di volume %(volume_type_id)s." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "L'ID tipo volume '%s' non è valido." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "L'accesso di tipo Volume per la combinazione %(volume_type_id)s / " "%(project_id)s già esiste." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "L'accesso di tipo Volume per la combinazione %(volume_type_id)s / " "%(project_id)s non è stato trovato." #, python-format msgid "Volume type does not match for share %s." msgstr "Il tipo di volume non corrisponde per la condivisione %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "La codifica del tipo di volume per il tipo %(type_id)s esiste già." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "La codifica del tipo di volume per il tipo %(type_id)s non esiste." msgid "Volume type name can not be empty." msgstr "Il nome tipo di volume non può essere vuoto." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "Impossibile trovare il tipo di volume con il nome %(volume_type_name)s." #, python-format msgid "Volume with volume id %s does not exist." msgstr "Il volume con id volume %s non esiste. " #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Il volume: %(volumeName)s non è un volume concatenato. È possibile solo " "eseguire l'estensione su un volume concatenato. Uscire..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "Il volume: %(volumeName)s non è stato aggiunto al gruppo di archiviazione " "%(sgGroupName)s." #, python-format msgid "Volume: %s could not be found." msgstr "Impossibile trovare il volume %s." #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Il volume: %s è già in fase di importazione da Cinder." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" "I volumi verranno suddivisi in oggetti di questa dimensione (in megabyte)." msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "Volumi/account superati sugli account SolidFire primario e secondario." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "Config VzStorage 'vzstorage_used_ratio' non valida, deve essere > 0 e <= " "1.0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "File config VzStorage in %(config)s non esiste." msgid "Wait replica complete timeout." msgstr "Timeout di attesa del completamento replica." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Sincronizzazione attesa non riuscita. Stato esecuzione: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "In attesa che tutti i nodi partecipino al cluster. Verificare che tutti i " "daemon sheep siano in esecuzione. " msgid "We should not do switch over on primary array." msgstr "Non è consigliabile passare all'array primario." msgid "Wrong resource call syntax" msgstr "Sintassi chiamata risorsa errata" msgid "X-IO Volume Driver exception!" msgstr "Eccezione X-IO Volume Driver!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "Il supporto XML è obsoleto e verrà rimosso nella release N." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO non configurato correttamente, nessun portale iscsi trovato" msgid "XtremIO not initialized correctly, no clusters found" msgstr "" "XtremIO non è inizializzato correttamente, non è stato trovato nessun cluster" msgid "You must implement __call__" msgstr "È necessario implementare __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "È necessario installare hpe3parclient prima di utilizzare i driver 3PAR. " "Eseguire \"pip install python-3parclient\" per installare hpe3parclient." msgid "You must supply an array in your EMC configuration file." msgstr "È necessario fornire un array nel file di configurazione EMC." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "La dimensione originale: %(originalVolumeSize)s GB è maggiore di %(newSize)s " "GB. È supportata solo l'operazione di estensione. Uscire..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Zona" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Politica di zona %s non riconosciuta" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data: Impossibile ottenere gli attributi per il vdisk " "%s." msgid "_create_host failed to return the host name." msgstr "_create_host non riuscito nella restituzione del nome host." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: impossibile convertire il nome host. Il nome host non è " "unicode o stringa." msgid "_create_host: No connector ports." msgstr "_create_host: Nessuna porta connettore." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume, Servizio di replica non trovato." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, nome volume: %(volumename)s, nome volume di " "origine: %(sourcevolumename)s, istanza volume di origine: %(source_volume)s, " "istanza volume di destinazione: %(target_volume)s, Codice di ritorno: " "%(rc)lu, Errore: %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - non è stato trovato un messaggio di successo " "nell'output CLI.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, id_code è None." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, Impossibile trovare il servizio di replica" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, copy session type is undefined! sessione di copia: " "%(cpsession)s, tipo di copia: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, volumename: %(volumename)s, Codice di ritorno: %(rc)lu, " "Errore: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "non trovato." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, impossibile " "connettersi a ETERNUS." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: l'estensione di un volume con le istantanee non è " "supportata." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connector: %(connector)s, Associatori: " "FUJITSU_AuthorizedTarget, impossibile connettersi a ETERNUS." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "impossibile connettersi a ETERNUS." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "impossibile connettersi a ETERNUS." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "impossibile connettersi a ETERNUS." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names, connector: %(connector)s, iniziatore non trovato." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, impossibile " "connettersi a ETERNUS." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, impossibile " "connettersi a ETERNUS." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, nome file: %(filename)s, tagname: %(tagname)s, i dati sono " "None. Modificare il file di configurazione del driver e correggerlo." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, nome file: %(filename)s, ip: %(ip)s, porta: " "%(port)s, utente: %(user)s, password: ****, url: %(url)s, NON RIUSCITO." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn non " "trovato." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, impossibile connettersi a ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "impossibile connettersi a ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, " "impossibile connettersi a ETERNUS." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: le intestazioni e i valori dell'attributo non corrispondono.\n" " Intestazioni: %(header)s\n" " Valori: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector non è riuscito a restituire il nome host per il " "connettore." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, richiamo host-affinity da aglist/vol_instance non riuscito, " "affinitygroup: %(ag)s, ReferenceNames, impossibile connettersi a ETERNUS." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, richiamo istanza host-affinity non riuscito, volmap: " "%(volmap)s, GetInstance, impossibile connettersi a ETERNUS." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associatori: FUJITSU_SAPAvailableForElement, impossibile " "connettersi a ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, impossibile " "connettersi a ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, impossibile connettersi " "a ETERNUS." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances, impossibile connettersi a ETERNUS." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port, protcol: %(protocol)s, porta di destinazione non trovata." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay: Impossibile trovare l'istantanea denominata %s" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: Impossibile trovare l'id volume %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: È necessario specificare source-name." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: Impossibile ottenere le informazioni di " "connessione FC per la connessione di host-volume. L'host per le connessioni " "FC è configurato correttamente?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: Nessun nodo trovato nel gruppo I/O %(gid)s per il " "volume %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, iniziatore: %(initiator)s, destinazione: %(tgt)s, aglist: " "%(aglist)s, Storage Configuration Service non trovato." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "non trovato." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats: Impossibile ottenere i dati del pool dell'archivio." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s, lo stato della sessione " "di copia è BROKEN." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy non riuscito: Esiste una copia del volume %s. L'aggiunta di " "un'altra copia eccede il limite di 2 copie." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "add_vdisk_copy avviato senza una copia del disco virtuale nel pool previsto." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants deve essere un booleano, ricevuto '%s'." msgid "already created" msgstr "già creato" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "collegamento istantanea dal nodo remoto " #, python-format msgid "attribute %s not lazy-loadable" msgstr "l'attributo %s non è caricabile" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s non è riuscito a creare un collegamento reale del " "dispositivo da %(vpath)s a %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s non ha ricevuto la notifica di backup riuscito dal " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s non ha eseguito dsmc a causa di argomenti non validi " "presenti su %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "backup: %(vol_id)s non ha eseguito dsmc su %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "backup: %(vol_id)s non riuscito. %(path)s non è un file." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "backup: %(vol_id)s non riuscito. %(path)s è un tipo file non previsto. I " "file di blocco o regolari sono supportati; il modo file reale è %(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "backup: %(vol_id)s non riuscito. Impossibile ottenere il percorso reale del " "volume su %(path)s." msgid "being attached by different mode" msgstr "in fase di collegamento tramite una modalità differente" #, python-format msgid "call failed: %r" msgstr "chiamata non riuscita: %r" msgid "call failed: GARBAGE_ARGS" msgstr "chiamata non riuscita: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "chiamata non riuscita: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "chiamata non riuscita: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "chiamata non riuscita: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "impossibile trovare lun-map, ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "impossibile trovare il volume da estendere" msgid "can't handle both name and index in req" msgstr "impossibile gestire entrambi il nome e l'indice nella richiesta" msgid "cannot understand JSON" msgstr "impossibile riconoscere JSON" msgid "cannot understand XML" msgstr "Impossibile riconoscere XML" #, python-format msgid "cg-%s" msgstr "gc-%s" msgid "cgsnapshot assigned" msgstr "istantanea cg assegnata" msgid "cgsnapshot changed" msgstr "istantanea cg modificata" msgid "cgsnapshots assigned" msgstr "istantanee cg assegnate" msgid "cgsnapshots changed" msgstr "istantanee cg modificate" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: La password o la chiave privata SSH è obbligatoria " "per l'autenticazione: impostare l'opzione san_password o san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: Impossibile determinare l'ID del sistema." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: Impossibile determinare il nome del sistema." msgid "check_hypermetro_exist error." msgstr "Errore check_hypermetro_exist." #, python-format msgid "clone depth exceeds limit of %s" msgstr "la profondità del clone supera il limite di %s" msgid "consistencygroup assigned" msgstr "gruppo di coerenza assegnato" msgid "consistencygroup changed" msgstr "gruppo di coerenza modificato" msgid "control_location must be defined" msgstr "control_location deve essere definito" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, Il volume di origine non esiste in ETERNUS." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, nome istanza volume di destinazione: " "%(volume_instancename)s, Richiamo istanza non riuscito." msgid "create_cloned_volume: Source and destination size differ." msgstr "" "create_cloned_volume: la dimensione dell'origine e della destinazione sono " "differenti." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: la dimensione del volume di origine %(src_vol)s è " "%(src_size)dGB e non si adatta al volume di destinazione %(tgt_vol)s di " "dimensione %(tgt_size)dGB." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src deve essere creato da un'istantanea CG o da " "un CG di origine." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src supporta solo un'origine istantanea cg o " "un'origine gruppo di coerenza. Non possono essere utilizzate più origini." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src supporta un'origine cgsnapshot o un'origine " "del gruppo di coerenza. Non possono essere utilizzate più origini." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: vdisk origine %(src)s (%(src_id)s) non esiste." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: vdisk origine %(src)s non esiste." msgid "create_host: Host name is not unicode or string." msgstr "create_host: il nome host non è unicode o stringa." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: Nessun iniziatore o wwpns fornito." msgid "create_hypermetro_pair error." msgstr "Errore create_hypermetro_pair." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "create_snapshot, eternus_pool: %(eternus_pool)s, pool non trovato." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, snapshotname: %(snapshotname)s, nome volume di origine: " "%(volumename)s, vol_instance.path: %(vol_instance)s, nome volume di " "destinazione: %(d_volumename)s, pool: %(pool)s, Codice di ritorno: %(rc)lu, " "Errore: %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, volumename: %(s_volumename)s, volume di origine non trovato " "su ETERNUS." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, volumename: %(volumename)s, Server di replica non trovato." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: Lo stato del volume deve essere \"available\" o \"in-use\" " "per l'istantanea. Lo stato non valido è %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: richiesta del volume di origine non riuscita." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, volume: %(volume)s, EnumerateInstances, impossibile " "connettersi a ETERNUS." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service non trovato." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "create_volume_from_snapshot, Il volume di origine non esiste in ETERNUS." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, nome istanza volume di destinazione: " "%(volume_instancename)s, Richiamo istanza non riuscito." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot: l'istantanea %(name)s non esiste." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: Lo stato dell'istantanea deve essere \"available" "\" per la creazione del volume. Lo stato non valido è: %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" "create_volume_from_snapshot: la dimensione dell'origine e della destinazione " "sono differenti." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: Dimensione del volume diversa dal volume basato " "sull'istantanea." msgid "deduplicated and auto tiering can't be both enabled." msgstr "" "Non è possibile abilitare entrambi il livellamento automatico e deduplicato." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "eliminazione: %(vol_id)s non ha eseguito dsmc a causa di argomenti non " "validi con stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "eliminazione: %(vol_id)s non ha eseguito dsmc con stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "Errore delete_hypermetro." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: ACL %s non trovato. Continuare." msgid "delete_replication error." msgstr "Errore delete_replication." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "eliminazione dell'istantanea %(snapshot_name)s che contiene dei volume " "dipendenti" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "eliminazione del volume %(volume_name)s che contiene l'istantanea" msgid "detach snapshot from remote node" msgstr "scollegamento istantanea dal nodo remoto " msgid "do_setup: No configured nodes." msgstr "do_setup: Nessun nodo configurato." msgid "element is not a child" msgstr "l'elemento non è un child" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries deve essere maggiore o uguale a 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "errore di scrittura dell'oggetto in swift, MD5 dell'oggetto in swift " "%(etag)s non è uguale a MD5 dell'oggetto inviato a swift %(md5)s" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume, eternus_pool: %(eternus_pool)s, pool non trovato." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service non trovato." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, volumename: %(volumename)s, Codice di ritorno: %(rc)lu, " "Errore: %(errordesc)s, PoolType: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume, volumename: %(volumename)s, volume non trovato." msgid "failed to create new_volume on destination host" msgstr "impossibile creare new_volume nell'host di destinazione" msgid "fake" msgstr "fake" #, python-format msgid "file already exists at %s" msgstr "il file esiste già in %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno non è supportato da SheepdogIOWrapper " msgid "fileno() not supported by RBD()" msgstr "fileno() non supportato da RBD()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "Il filesystem %s non esiste nell'applicazione Nexenta Store" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled è impostato su False, associazione di più " "host non consentita. CMMVC6071E L'associazione VDisk a host non è stata " "creata perché il il VDisk è già associato ad un host." msgid "flush() not supported in this version of librbd" msgstr "flush() non è supportato in questa versione di librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s" msgid "force delete" msgstr "forzare eliminazione " msgid "get_hyper_domain_id error." msgstr "Errore get_hyper_domain_id." msgid "get_hypermetro_by_id error." msgstr "Errore get_hypermetro_by_id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: impossibile ottenere l'IP di destinazione per l'iniziatore " "%(ini)s, controllare il file di configurazione." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: Impossibile ottenere gli attributi per il volume %s" msgid "glance_metadata changed" msgstr "glance_metadata modificato" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode è impostato su copy_on_write, ma %(vol)s e %(img)s " "appartengono a file system differenti." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode è impostato su copy_on_write, ma %(vol)s e %(img)s " "appartengono a fileset differenti." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s e hgst_user %(usr)s devono corrispondere a utenti/gruppi " "validi in cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "hgst_net %(net)s specificato in cinder.conf non trovato nel cluster" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy deve essere impostato su 0 (non-HA) o 1 (HA) in cinder.conf. " msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode deve essere un octal/int in cinder.conf" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "Server hgst_storage %(svr)s non nel formato :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers deve essere definito in cinder.conf" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "Il servizio http potrebbe essere stato bruscamente disabilitato o inserito " "in stato di manutenzione durante questa operazione." msgid "id cannot be None" msgstr "l'id non può essere None" #, python-format msgid "image %s not found" msgstr "impossibile trovare l'immagine %s" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection, volume: %(volume)s, Volume non trovato." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" "initialize_connection: impossibile ottenere gli attributi per il volume %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection: manca l'attributo volume per il volume %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: Nessun nodo trovato nel gruppo I/O %(gid)s per il " "volume %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s non definito." #, python-format msgid "invalid user '%s'" msgstr "utente non valido '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "portale iscsi, %s, non trovato" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "iscsi_ip_address deve essere impostato nel file di configurazione quando si " "usa il protocollo 'iSCSI'." msgid "iscsiadm execution failed. " msgstr "Esecuzione di iscsiadm non riuscita. " #, python-format msgid "key manager error: %(reason)s" msgstr "errore gestore chiavi: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key non definito" msgid "limit param must be an integer" msgstr "parametro limite deve essere un numero intero" msgid "limit param must be positive" msgstr "parametro limite deve essere positivo" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing non può gestire un volume connesso agli host. Disconnettere " "questo volume dagli host esistenti prima dell'importazione" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing richiede una chiave 'name' per identificare un volume " "esistente." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: Errore durante la gestione della risposta " "esistente %(ss)s sul volume %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "indicatore [%s] non trovato" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "apici mancanti per mdiskgrp %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" "migration_policy deve essere 'on-demand' o 'never', è stato passato: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" "mkfs non riuscito sul volume %(vol)s, il messaggio di errore è: %(err)s." msgid "mock" msgstr "mock" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs non è installato" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage ha trovato più risorse con il nome %s" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "Rilevate più risorse con ID istantanea %s" msgid "name cannot be None" msgstr "il nome non può essere None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: Impossibile trovare lo strumento NAVISECCLI %(path)s." #, python-format msgid "no REPLY but %r" msgstr "nessuna RISPOSTA ma %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "in drbdmanage non è stata trovata alcuna istantanea con id %s" #, python-format msgid "not exactly one snapshot with id %s" msgstr "non esattamente una sola istantanea con id %s" #, python-format msgid "not exactly one volume with id %s" msgstr "non esattamente un solo volume con id %s" #, python-format msgid "obj missing quotes %s" msgstr "apici mancanti per obj %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled non è disattivo." msgid "progress must be an integer percentage" msgstr "l'avanzamento deve essere una percentuale a numero intero" msgid "promote_replica not implemented." msgstr "promote_replica non implementata." msgid "provider must be defined" msgstr "il provider deve essere definito" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s o versione successiva richiesto da questo " "driver del volume. Versione qemu-img corrente: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img non è installata e l'immagine è di tipo %s. È possibile utilizzare " "solo le immagini RAW se qemu-img non è installata." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img non è installata e il formato del disco non è specificato. È " "possibile utilizzare solo le immagini RAW se qemu-img non è installata." msgid "rados and rbd python libraries not found" msgstr "le librerie python rados e rbd non sono state trovate" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted può essere solo 'no', 'yes' o 'only', non %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "replication_device deve essere configurato sul backend: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "replication_device con backend_id [%s] mancante." #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover non riuscito. %s non trovato." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "replication_failover non riuscito. Backend non configurato per il failover" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "ripristino: %(vol_id)s non ha eseguito dsmc a causa di argomenti non validi " "presenti su %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "ripristino: %(vol_id)s non ha eseguito dsmc su %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "restore: %(vol_id)s non riuscito.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup interrotto, l'elenco di oggetti effettivo non corrisponde " "all'elenco di oggetti archiviato nei metadati." msgid "root element selecting a list" msgstr "l'elemento root seleziona un elenco" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "Per rtslib_fb manca il membro %s: potrebbe essere necessario un python-" "rtslib-fb più recente." msgid "san_ip is not set." msgstr "san_ip non impostato." msgid "san_ip must be set" msgstr "san_ip deve essere impostato" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: Configurazione campo obbligatorio. san_ip non è impostato." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login e/o san_password non impostati per il driver Datera in cinder." "conf. Impostare tali informazioni ed avviare il servizio cinder-volume " "nuovamente." msgid "serve() can only be called once" msgstr "il servizio() può essere chiamato solo una volta" msgid "service not found" msgstr "servizio non trovato" msgid "snapshot does not exist" msgstr "l'istantanea non esiste" #, python-format msgid "snapshot id:%s not found" msgstr "ID istantanea:%s non trovato" #, python-format msgid "snapshot-%s" msgstr "istantanea-%s " msgid "snapshots assigned" msgstr "istantanee assegnate" msgid "snapshots changed" msgstr "istantanee modificate" #, python-format msgid "source vol id:%s not found" msgstr "ID volume di origine: %s non trovato" #, python-format msgid "source volume id:%s is not replicated" msgstr "ID volume origine:%s non replicato" msgid "source-name cannot be empty." msgstr "source-name non può essere vuoto." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "Il formato di source-name deve essere : 'vmdk_path@vm_inventory_path'." #, python-format msgid "status must be %s and" msgstr "lo stato deve essere %s e" msgid "status must be available" msgstr "lo stato deve essere available" msgid "stop_hypermetro error." msgstr "Errore stop_hypermetro." msgid "subclasses must implement construct()!" msgstr "le sottoclassi devono implementare il costrutto()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo non riuscito, proseguire come se nulla fosse successo" msgid "sync_hypermetro error." msgstr "Errore sync_hypermetro." msgid "sync_replica not implemented." msgstr "sync_replica non implementata." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli non è installato e non è stato possibile creare la directory " "predefinita (%(default_path)s): %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "" "terminate_connection: Impossibile acquisire il nome host dal connettore." msgid "timeout creating new_volume on destination host" msgstr "timeout durante la creazione di new_volume nell'host di destinazione" msgid "too many body keys" msgstr "troppe chiavi del corpo" #, python-format msgid "umount: %s: not mounted" msgstr "smontaggio: %s: non montato" #, python-format msgid "umount: %s: target is busy" msgstr "smontaggio: %s: la destinazione è occupata" msgid "umount: : some other error" msgstr "smontaggio: : qualche altro errore" msgid "umount: : target is busy" msgstr "smontaggio: : la destinazione è occupata" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: Impossibile trovare l'istantanea denominata %s" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: Impossibile trovare l'id volume %s" #, python-format msgid "unrecognized argument %s" msgstr "argomento non riconosciuto %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "algoritmo di compressione non supportato: %s" msgid "valid iqn needed for show_target" msgstr "iqn valido necessario per show_target" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s non definito." msgid "vmemclient python library not found" msgstr "Libreria python vmemclient non trovata " #, python-format msgid "volume %s not found in drbdmanage" msgstr "Il volume %s non è stato trovato in drbdmanage" msgid "volume assigned" msgstr "volume assegnato" msgid "volume changed" msgstr "volume modificato" msgid "volume does not exist" msgstr "il volume non esiste" msgid "volume is already attached" msgstr "il volume è già collegato" msgid "volume is not local to this node" msgstr "Per questo nodo, volume non è locale" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "La dimensione del volume %(volume_size)d è troppo piccola per il ripristino " "del backup la cui dimensione è %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "la dimensione del volume %d non è valida." msgid "volume_type cannot be None" msgstr "volume_type non può essere None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "è necessario fornire volume_type quando si crea un volume in un gruppo di " "gruppo." msgid "volume_type_id cannot be None" msgstr "volume_type_id non può essere None" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "Per creare il gruppo di coerenza %(name)s è necessario fornire volume_types." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "è necessario fornire volume_types per creare un gruppo di coerenza %s." msgid "volumes assigned" msgstr "volumi assegnati" msgid "volumes changed" msgstr "volumi modificati" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s scaduto." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "La proprietà zfssa_manage_policy deve essere impostata su 'strict' o " "'loose'. Il valore corrente è : %s." msgid "{} is not a valid option." msgstr "{} non è un'opzione valida." cinder-8.0.0/cinder/locale/de/0000775000567000056710000000000012701406543017221 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/de/LC_MESSAGES/0000775000567000056710000000000012701406543021006 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/de/LC_MESSAGES/cinder.po0000664000567000056710000131177512701406257022633 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # Alec Hans , 2013 # Ettore Atalan , 2014 # FIRST AUTHOR , 2011 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Frank Kloeker , 2016. #zanata # Monika Wolf , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev21\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:43+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 03:10+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack-Cinder-Version: %(version)s\n" #, python-format msgid " but size is now %d" msgstr "aber die Größe ist jetzt %d" #, python-format msgid " but size is now %d." msgstr "aber die Größe ist jetzt %d." msgid " or " msgstr "oder" #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s ist nicht festgelegt." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing kann einen Datenträger, der mit Hosts verbunden " "ist, nicht verwalten. Trennen Sie die Verbindung dieses Datenträgers zu " "vorhandenen Hosts vor dem Importieren" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "Ergebnis: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: Berechtigung verweigert." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: Fehler mit nicht erwarteter CLI-Ausgabe.\n" " Befehl: %(cmd)s\n" " Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Statuscode: %(_status)s\n" "Nachrichtentext: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: Erstellen von NetworkPortal: Stellen Sie sicher, dass Port " "%(port)d unter IP %(ip)s nicht durch einen anderen Service verwendet wird." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "Für %(name)s sind mindestens %(min_length)s Zeichen erforderlich." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s hast mehr als %(max_length)s Zeichen." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: Sicherung %(bck_id)s, Datenträger %(vol_id)s fehlgeschlagen. " "Sicherungsobjekt weist unerwarteten Modus auf. Image- oder Dateisicherungen " "unterstützt, tatsächlicher Modus ist %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "%(service)s-Service ist nicht %(status)s auf Speicher-Appliance %(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s muss <= %(max_value)d sein" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s muss >= %(min_value)d sein" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "Wert %(worker_name)s von %(workers)d ist ungültig; muss größer als 0 sein." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "%s \"data\" ist nicht im Ergebnis enthalten." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "Auf %s kann nicht zugegriffen werden. Überprüfen Sie, ob GPFS aktiv ist und " "das Dateisystem angehängt ist." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "Größe von %s kann nicht mithilfe der Klonoperation geändert werden, da keine " "Blocks enthalten sind." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "Größe von %s kann nicht mithilfe der Klonoperation geändert werden, da auf " "einem komprimierten Datenträger gehostet" #, python-format msgid "%s configuration option is not set." msgstr "Konfigurationsoption %s ist nicht festgelegt." #, python-format msgid "%s does not exist." msgstr "%s ist nicht vorhanden." #, python-format msgid "%s is not a directory." msgstr "%s ist kein Verzeichnis." #, python-format msgid "%s is not a string or unicode" msgstr "%s ist keine Zeichenkette oder Unicode" #, python-format msgid "%s is not installed" msgstr "%s ist nicht installiert" #, python-format msgid "%s is not installed." msgstr "%s ist nicht installiert." #, python-format msgid "%s is not set" msgstr "%s ist nicht festgelegt" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s ist nicht festgelegt, ist jedoch für ein gültiges Replikationsgerät " "erforderlich." #, python-format msgid "%s is not set." msgstr "%s ist nicht festgelegt." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s muss ein gültiges raw- oder qcow2-Image sein." #, python-format msgid "%s must be an absolute path." msgstr "%s muss ein absoluter Pfad sein." #, python-format msgid "%s must be an integer." msgstr "%s muss eine Ganzzahl sein." #, python-format msgid "%s not set in cinder.conf" msgstr "%s in cinder.conf nicht festgelegt." #, python-format msgid "%s not set." msgstr "%s nicht festgelegt." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' ist für flashsystem_connection_protocol in Konfigurationsdatei " "ungültig. gültige Werte sind %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "'active' muss vorhanden sein, wenn snap_info geschrieben wird." msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' muss angegeben werden" msgid "'qemu-img info' parsing failed." msgstr "Auswertung von 'qemu-img info' fehlgeschlagen." msgid "'status' must be specified." msgstr "'status' muss angegeben werden." msgid "'volume_id' must be specified" msgstr "'volume_id' muss angegeben werden" msgid "'{}' object has no attribute '{}'" msgstr "'{}'-Objekt hat kein Attribut '{}'." #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Befehl: %(cmd)s) (Rückgabecode: %(exit_code)s) (Standardausgabe: " "%(stdout)s) (Standard-Fehlerausgabe: %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "Eine LUN (HLUN) wurde nicht gefunden. (LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "" "Es wurde eine gleichzeitige, möglicherweise widersprüchliche, Anforderung " "gestellt." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Es wurde keine freie LUN (HLUN) gefunden. Fügen Sie eine andere Hostgruppe " "hinzu (Logische Einheit: '%(ldev)s')" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Eine Hostgruppe konnte nicht hinzugefügt werden. (Port: %(port)s, Name: " "%(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Eine Hostgruppe konnte nicht gelöscht werden. (Port: %(port)s, GID: %(gid)s, " "Name: '%(name)s')" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Eine Hostgruppe ist ungültig. (Hostgruppe: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "" "Ein Paar kann nicht gelöscht werden. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Es konnte kein Paar erstellt werden. Die maximale Paaranzahl wurde " "überschritten. (Kopiermethode: %(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Ein Parameter ist ungültig. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Ein Parameterwert ist ungültig. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Ein Pool konnte nicht gefunden werden. (Pool-ID: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Ein Momentaufnahmenstatus ist ungültig. (Status: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "Für ein Failover muss ein gültiges sekundäres Ziel angegeben werden." msgid "A volume ID or share was not specified." msgstr "Es wurde keine Datenträger-ID oder Freigabe angegeben." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Ein Datenträgerstatus ist ungültig. (Status: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s fehlgeschlagen mit Fehlerzeichenkette %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API-Versionszeichenkette %(version)s ist im falschen Format. Muss im Format " "sein MajorNum.MinorNum." msgid "API key is missing for CloudByte driver." msgstr "API-Schlüssel für CloudByte-Treiber fehlt." #, python-format msgid "API response: %(response)s" msgstr "API-Antwort: %(response)s" #, python-format msgid "API response: %s" msgstr "API-Antwort: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API Version %(version)s ist nicht unterstützt für diese Methode." msgid "API version could not be determined." msgstr "API-Version konnte nicht bestimmt werden." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Es wird versucht, untergeordnete Projekte mit einer Quote ungleich null zu " "löschen. Dies sollte nicht ausgeführt werden" msgid "Access list not available for public volume types." msgstr "Zugriffsliste ist für öffentliche Datenträgertypen nicht verfügbar." msgid "Activate or deactivate QoS error." msgstr "Fehler beim Aktivieren oder Inaktivieren von QoS." msgid "Activate snapshot error." msgstr "Fehler beim Aktivieren der Momentaufnahme." msgid "Add FC port to host error." msgstr "Fehler beim Hinzufügen des FC-Ports zum Host." msgid "Add fc initiator to array error." msgstr "Fehler beim Hinzufügen des FC-Initiators zum Array." msgid "Add initiator to array error." msgstr "Fehler beim Hinzufügen des Initiators zum Array." msgid "Add lun to cache error." msgstr "Fehler beim Hinzufügen der LUN zum Cache." msgid "Add lun to partition error." msgstr "Fehler beim Hinzufügen der LUN zur Partition." msgid "Add mapping view error." msgstr "Fehler beim Hinzufügen der Zuordnungsansicht." msgid "Add new host error." msgstr "Fehler beim Hinzufügen des neuen Hosts." msgid "Add port to port group error." msgstr "Fehler beim Hinzufügen von Port zur Portgruppe." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Alle angegebenen Speicherpools, die verwaltet werden sollen, sind nicht " "vorhanden. Überprüfen Sie Ihre Konfiguration. Nicht vorhandene Pools: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "Eine API-Versionsanforderung muss mit einem VersionedMethod-Objekt " "verglichen werden." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "In SheepdogDriver ist ein Fehler aufgetreten. (Grund: %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Während dem Datensicherungsvorgang ist ein Fehler aufgetreten" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "" "Bei dem Versuch, die Schattenkopie '%s' zu ändern, ist ein Fehler " "aufgetreten." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Beim Suchen nach Datenträger \"%s\" ist ein Fehler aufgetreten." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Fehler während der LUNcopy-Operation. LUNcopy-Name: %(luncopyname)s. LUNcopy-" "Status: %(luncopystatus)s. LUNcopy-Zustand: %(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Beim Lesen von Datenträger \"%s\" ist ein Fehler aufgetreten." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Beim Schreiben auf Datenträger \"%s\" ist ein Fehler aufgetreten." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" "Ein iSCSI-CHAP-Benutzer konnte nicht hinzugefügt werden. (Benutzername: " "%(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" "Ein iSCSI-CHAP-Benutzer konnte nicht gelöscht werden. (Benutzername: " "%(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Ein iSCSI-Ziel konnte nicht hinzugefügt werden. (Port: %(port)s, Alias: " "%(alias)s, Ursache: %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Ein iSCSI-Ziel konnte nicht gelöscht werden. (Port: %(port)s, Zielnummer: " "%(tno)s, Alias: %(alias)s)" msgid "An unknown exception occurred." msgstr "Eine unbekannte Ausnahme ist aufgetreten." msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Ein Benutzer mit einem Token, als dessen Bereich ein Unterprojekt festgelegt " "wurde, darf die Quote der zugehörigen übergeordneten Elemente sehen." msgid "Append port group description error." msgstr "Fehler beim Anhängen der Portgruppenbeschreibung." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Anwenden der Zonen und Konfigurationen auf den Switch fehlgeschlagen " "(Fehlercode=%(err_code)s Fehlernachricht=%(err_msg)s)." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "Array ist nicht vorhanden oder ist offline. Aktueller Status des Arrays: %s." msgid "Associate host to hostgroup error." msgstr "Fehler beim Zuordnen des Hosts zur Hostgruppe." msgid "Associate host to mapping view error." msgstr "Fehler beim Zuordnen des Hosts zur Zuordnungsansicht." msgid "Associate initiator to host error." msgstr "Fehler beim Zuordnen des Initiators zum Host." msgid "Associate lun to QoS error." msgstr "Fehler beim Zuordnen der LUN zu QoS." msgid "Associate lun to lungroup error." msgstr "Fehler beim Zuordnen der LUN zur LUN-Gruppe." msgid "Associate lungroup to mapping view error." msgstr "Fehler beim Zuordnen der LUN-Gruppe zur Zuordnungsansicht." msgid "Associate portgroup to mapping view error." msgstr "Fehler beim Zuordnen der Portgruppe zur Zuordnungsansicht." msgid "At least one valid iSCSI IP address must be set." msgstr "Es muss mindestens eine gültige iSCSI-IP-Adresse festgelegt werden." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Versuch, %s mit ungültigem Schlüssel zu übertragen." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" "Details der Authentifizierungsgruppe [%s] in CloudByte-Speicher nicht " "gefunden." msgid "Auth user details not found in CloudByte storage." msgstr "" "Details des Authentifizierungsbenutzers in CloudByte-Speicher nicht gefunden." msgid "Authentication error" msgstr "Authentifizierungsfehler" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "Authentifizierung fehlgeschlagen. Überprüfen Sie die Berechtigungsnachweise " "für den Switch. Fehlercode %s." msgid "Authorization error" msgstr "Autorisierungsfehler" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "Verfügbarkeitszone '%(s_az)s' ist ungültig." msgid "Available categories:" msgstr "Verfügbare Kategorien:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Back-End-QoS-Spezifikationen werden für diese Speicherfamilie und ONTAP-" "Version nicht unterstützt." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Back-End ist nicht vorhanden (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "" "Es wurde bereits ein Failover für das Backend durchgeführt. Zurücksetzen " "nicht möglich." #, python-format msgid "Backend reports: %(message)s" msgstr "Back-End-Berichte: %(message)s" msgid "Backend reports: item already exists" msgstr "Back-End-Berichte: Element bereits vorhanden" msgid "Backend reports: item not found" msgstr "Back-End-Berichte: Element nicht gefunden" msgid "Backend server not NaServer." msgstr "Back-End-Server ist kein NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "Zeitlimittreffer bei Back-End-Service-Wiederholung: %(timeout)s s" msgid "Backend storage did not configure fiber channel target." msgstr "Back-End-Speicher hat Fibre Channel-Ziel nicht konfiguriert. " msgid "Backing up an in-use volume must use the force flag." msgstr "" "Um einen Datenträger, der im Gebrauch ist, zu sichern, muss das force-Flag " "verwendet werden." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "Sicherung %(backup_id)s konnte nicht gefunden werden." msgid "Backup RBD operation failed" msgstr "RBD-Sicherungsoperation ist fehlgeschlagen" msgid "Backup already exists in database." msgstr "Sicherungskopie ist bereits in Datenbank vorhanden." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Datensicherungstreiber meldete einen Fehler: %(message)s" msgid "Backup id required" msgstr "Datensicherungskennung erforderlich" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "Sicherung wird für GlusterFS-Datenträger mit Momentaufnahmen nicht " "unterstützt." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "Sicherung wird nur für SOFS-Datenträger ohne Sicherungsdatei unterstützt." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "Sicherung wird nur für unformatierte GlusterFS-Datenträger unterstützt." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "Sicherung wird nur für unformatierte SOFS-Datenträger unterstützt." msgid "Backup operation of an encrypted volume failed." msgstr "" "Sicherungsoperation eines verschlüsselten Datenträgers ist fehlgeschlagen." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Sicherungsservice %(configured_service)s unterstützt keine Überprüfung. " "Sicherungs-ID %(id)s wurde nicht überprüft. Überprüfung wird übersprungen." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Sicherungsservice %(service)s unterstützt keine Überprüfung. Sicherungs-ID " "%(id)s wurde nicht überprüft. Zurücksetzung wird übersprungen." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "Sicherung sollte nur eine Momentaufnahme enthalten, stattdessen enthält sie " "%s" msgid "Backup status must be available" msgstr "Sicherungsstatus muss 'available' sein" #, python-format msgid "Backup status must be available and not %s." msgstr "Datensicherungsstatus muss verfügbar sein und nicht %s." msgid "Backup status must be available or error" msgstr "Sicherungsstatus muss 'available' oder 'error' sein" msgid "Backup to be restored has invalid size" msgstr "Wiederherzustellende Sicherung hat ungültige Größe" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Falsche Statuszeile zurückgegeben: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Falsche(r) Schlüssel in Quotensatz: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Fehlerhafte oder nicht erwartete Antwort von Back-End-API des Datenträgers: " "%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Fehlerhaftes Projektformat: Projekt weist nicht das richtige Format auf (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Fehlerhafte Anforderung an Datera-Cluster gesendet: Ungültige Argumente: " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Ungültige Antwort von Datera-API" msgid "Bad response from SolidFire API" msgstr "Ungültige Antwort von SolidFire-API" #, python-format msgid "Bad response from XMS, %s" msgstr "Fehlerhafte Antwort vom XMS, %s" msgid "Binary" msgstr "Binärdatei" msgid "Blank components" msgstr "Leere Komponenten" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Authentifizierungsschema für Blockbridge-API (token oder password)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Kennwort für Blockbridge-API (für Authentifizierungsschema 'password')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Token für Blockbridge-API (für Authentifizierungsschema 'token')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Benutzer für Blockbridge-API (für Authentifizierungsschema 'password')" msgid "Blockbridge api host not configured" msgstr "Blockbridge-API-Host nicht konfiguriert" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge mit ungültigem Authentifizierungsschema '%(auth_scheme)s' " "konfiguriert" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge-Standardpool ist nicht vorhanden" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Blockbridge-Kennwort nicht konfiguriert (erforderlich für " "Authentifizierungsschema 'password')" msgid "Blockbridge pools not configured" msgstr "Blockbridge-Pools nicht konfiguriert" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Blockbridge-Token nicht konfiguriert (erforderlich für " "Authentifizierungsschema 'token')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "Blockbridge-Benutzer nicht konfiguriert (erforderlich für " "Authentifizierungsschema 'password')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Fehler bei der Brocade Fibre Channel-Zoning-CLI: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Fehler bei Brocade Fibre Channel-Zoning-HTTP: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "Geheimer CHAP-Schlüssel sollte aus 12-16 Bytes bestehen." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Ausgabe bei CLI-Ausnahme:\n" " Befehl: %(cmd)s\n" " Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Ausgabe bei CLI-Ausnahme:\n" " Befehl: %(cmd)s\n" " Standardausgabe: %(out)s\n" " Standardfehler: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E Die Zuordnung von virtueller Platte zu Host wurde nicht erstellt, " "da die virtuelle Platte bereits einem Host zugeordnet ist.\n" "\"" msgid "CONCERTO version is not supported" msgstr "CONCERTO-Version wird nicht unterstützt" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) ist auf Platteneinheit nicht vorhanden" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "Cachename ist None. Legen Sie smartcache:cachename im Schlüssel fest." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "" "Cachedatenträger %(cache_vol)s enthält Momentaufnahme %(cache_snap)s nicht." #, python-format msgid "Cache volume %s does not have required properties" msgstr "" "Der Zwischenspeicherdatenträger %s hat nicht die erforderlichen " "Eigenschaften." msgid "Call returned a None object" msgstr "Der Aufruf hat ein 'None'-Objekt zurückgegeben." msgid "Can not add FC port to host." msgstr "FC-Port kann nicht zu Host hinzugefügt werden." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "Cache-ID nach Cachename %(name)s wurde nicht gefunden." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Partitions-ID nach Name %(name)s wurde nicht gefunden." #, python-format msgid "Can not get pool info. pool: %s" msgstr "Poolinfo konnte nicht abgerufen werden. Pool: %s" #, python-format msgid "Can not translate %s to integer." msgstr "%s kann nicht in eine Ganzzahl umgesetzt werden." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Zugriff auf 'scality_sofs_config' nicht möglich: %s" msgid "Can't attach snapshot." msgstr "Schattenkopie kann nicht angehängt werden." msgid "Can't decode backup record." msgstr "Sicherungsdatensatz kann nicht decodiert werden." #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "" "Replikationsdatenträger kann nicht erweitert werden, Datenträger: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "LUN kann nicht im Array gefunden werden. Prüfen Sie 'source-name' und " "'source-id'." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "Cachename wurde im Array nicht gefunden. Cachename: %(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "LUN-ID wurde nicht in der Datenbank gefunden, Datenträger: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "LUN-Info wurde nicht im Array gefunden. Datenträger: %(id)s, LUN-Name: " "%(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "Partitionsname wurde im Array nicht gefunden. Partitionsname: %(name)s." #, python-format msgid "Can't find service: %s" msgstr "Der Dienst wurde nicht gefunden: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "Schattenkopie kann nicht im Array gefunden werden. Prüfen Sie 'source-name' " "und 'source-id'." msgid "Can't find the same host id from arrays." msgstr "Dieselbe Host-ID wurde nicht in den Arrays gefunden." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" "Datenträger-ID kann nicht von der Schattenkopie abgerufen werden. " "Schattenkopie: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Die Datenträger-ID kann nicht abgerufen werden. Datenträgername: %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "LUN %(lun_id)s kann nicht in Cinder importiert werden. Der LUN-Typ stimmt " "nicht überein." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem " "HyperMetroPair vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer LUN-" "Kopieraufgabe vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer LUN-" "Gruppe vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem LUN-" "Spiegel vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem " "SplitMirror vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer " "Migrationsaufgabe vorhanden." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer " "fernen Replikationsaufgabe vorhanden." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "LUN %s kan nicht in Cinder importiert werden. Der LUN-Status ist nicht " "normal." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Schattenkopie %s kann nicht in Cinder importiert werden. Die Schattenkopie " "gehört nicht zum Datenträger." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Schattenkopie %s kann nicht in Cinder importiert werden. Die Schattenkopie " "wurde dem Initiator verfügbar gemacht." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Schattenkopie %s kann nicht in Cinder importiert werden. Status der " "Schattenkopie ist nicht normal oder der Aktivitätsstatus ist nicht 'online'. " #, python-format msgid "Can't open config file: %s" msgstr "Konfigurationsdatei kann nicht geöffnet werden: %s" msgid "Can't parse backup record." msgstr "Sicherungsdatensatz kann nicht analysiert werden." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da er über keinen Datenträgertyp verfügt." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da er bereits in Konsistenzgruppe %(orig_group)s " "enthalten ist." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da der Datenträger nicht gefunden werden kann." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da der Datenträger nicht vorhanden ist." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da der Datenträger einen ungültigen Status aufweist: " "%(status)s. Gültige Status sind: %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Datenträger %(volume_id)s kann nicht zu Konsistenzgruppe %(group_id)s " "hinzugefügt werden, da der Datenträgertyp %(volume_type)s von der Gruppe " "nicht unterstützt wird." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Der bereits angehängte Datenträger %s kann nicht angehängt werden. Das " "mehrfache Anhängen ist über die Konfigurationsoption " "'netapp_enable_multiattach' inaktiviert." msgid "Cannot change VF context in the session." msgstr "VF-Kontext kann nicht in der Sitzung geändert werden." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "VF-Kontext kann nicht geändert werden. Die angegebene VF ist nicht in der " "Liste der verwaltbaren VFs %(vf_list)s enthalten." msgid "Cannot connect to ECOM server." msgstr "Verbindung zu ECOM-Server kann nicht hergestellt werden." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Klon mit Größe %(vol_size)s kann nicht von Datenträger der Größe " "%(src_vol_size)s erstellt werden" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Konsistenzgruppe %(group)s kann nicht erstellt werden, da Momentaufnahme " "%(snap)s keinen gültigen Status aufweist. Gültige Status sind: %(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Konsistenzgruppe %(group)s kann nicht erstellt werden, da Quellendatenträger " "%(source_vol)s nicht in einem gültigen Status ist. Gültige Statusangaben: " "%(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Verzeichnis %s kann nicht erstellt werden." msgid "Cannot create encryption specs. Volume type in use." msgstr "" "Verschlüsselungsspezifikationen können nicht erstellt werden. Datenträgertyp " "ist im Gebrauch." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Das Image des Plattenformats: %s kann nicht erstellt werden. Nur das vmdk-" "Plattenformat wird akzeptiert." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Maskenansicht %(maskingViewName)s kann nicht erstellt werden. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Auf dem ESeries-Array können nicht mehr als %(req)s Datenträger erstellt " "werden, wenn 'netapp_enable_multiattach' auf true festgelegt wurde." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Eine Speichergruppe namens %(sgGroupName)s kann nicht erstellt oder gefunden " "werden." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Datenträger mit Größe %(vol_size)s kann nicht von Momentaufnahme der Größe " "%(snap_size)s erstellt werden" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "" "Datenträger der Größe %s kann erstellt werden: Kein Vielfaches von 8 GB." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Kann volume_type mit dem Namen %(name)s und Spezifikationen %(extra_specs)s " "nicht erstellen" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "" "LUN %s kann nicht gelöscht werden, wenn Momentaufnahmen vorhanden sind." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Cachedatenträger %(cachevol_name)s kann nicht gelöscht werden. Er wurde am " "%(updated_at)s aktualisiert und enthält aktuell %(numclones)d " "Datenträgerinstanzen." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Cachedatenträger %(cachevol_name)s kann nicht gelöscht werden. Er wurde am " "%(updated_at)s aktualisiert und enthält aktuell %(numclones)s " "Datenträgerinstanzen." msgid "Cannot delete encryption specs. Volume type in use." msgstr "" "Verschlüsselungsspezifikationen können nicht gelöscht werden. Datenträgertyp " "ist im Gebrauch." msgid "Cannot determine storage pool settings." msgstr "Es konnten keine Speicherpooleinstellungen ermittelt werden." msgid "Cannot execute /sbin/mount.sofs" msgstr "Ausführen von /sbin/mount.sofs nicht möglich" #, python-format msgid "Cannot find CG group %s." msgstr "CG-Gruppe %s kann nicht gefunden werden." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "Controllerkonfigurationsservice für Speichersystem %(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "Replizierungsservice zum Erstellen von Datenträger für Momentaufnahme %s " "kann nicht gefunden werden." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "Replizierungsservice zum Löschen der Momentaufnahme %s wurde nicht gefunden." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Replizierungsservice auf System %s kann nicht gefunden werden." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "Datenträger %(id)s wurde nicht gefunden. Operation zum Aufheben der " "Verwaltung. Wird beendet..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" "Datenträger %(volumename)s kann nicht gefunden werden. " "Erweiterungsoperation. Vorgang wird beendet..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "" "Einheitennummer für Datenträger %(volumeName)s kann nicht gefunden werden." msgid "Cannot find migration task." msgstr "Migrationstask wurde nicht gefunden." #, python-format msgid "Cannot find replication service on system %s." msgstr "Replizierungsservice auf System %s wurde nicht gefunden." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "Instanz der Quellenkonsistenzgruppe wurde nicht gefunden. " "consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "mcs_id kann nicht nach Kanal-ID abgerufen werden: %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "" "Die erforderlichen Informationen zum Pool oder zur Speichereinheit können " "nicht abgerufen werden." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Speichergruppe %(sgGroupName)s kann für Datenträger %(volumeName)s nicht " "abgerufen oder erstellt werden" #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "" "Initiatorgruppe %(igGroupName)s kann nicht abgerufen oder erstellt werden. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Portgruppe %(pgGroupName)s kann nicht abgerufen werden. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Speichergruppe %(sgGroupName)s kann nicht aus Maskenansicht " "%(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Unterstützter Größenbereich kann für %(sps)s nicht abgerufen werden. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Die Standardspeichergruppe für FAST-Richtlinie %(fastPolicyName)s kann nicht " "abgerufen werden." msgid "Cannot get the portgroup from the masking view." msgstr "Die Portgruppe konnte nicht aus der Maskenansicht abgerufen werden." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "Scality-SoFS kann nicht angehängt werden. Syslog auf Fehler überprüfen" msgid "Cannot ping DRBDmanage backend" msgstr "DRBDmanage-Back-End kann nicht mit Ping überprüft werden" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Datenträger %(id)s kann nicht auf %(host)s angeordnet werden" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Es können nicht sowohl 'cgsnapshot_id' als auch 'source_cgid' angegeben " "werden, um Konsistenzgruppe %(name)s aus Quelle zu erstellen." msgid "Cannot register resource" msgstr "Kann Ressource nicht registrieren" msgid "Cannot register resources" msgstr "Kann Ressourcen nicht registrieren" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Datenträger %(volume_id)s kann nicht aus Konsistenzgruppe %(group_id)s " "entfernt werden, da er sich nicht in der Gruppe befindet." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Datenträger %(volume_id)s kann nicht aus Konsistenzgruppe %(group_id)s " "entfernt werden, da der Datenträger einen ungültigen Status aufweist: " "%(status)s. Gültige Status sind: %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Typänderung von HPE3PARDriver in %s nicht möglich." msgid "Cannot retype from one 3PAR array to another." msgstr "" "Typänderung von einer 3PAR-Platteneinheit in eine andere nicht möglich. " msgid "Cannot retype to a CPG in a different domain." msgstr "Typänderung in CPG in einer anderen Domäne nicht möglich. " msgid "Cannot retype to a snap CPG in a different domain." msgstr "Typänderung in snapCPG in einer anderen Domäne nicht möglich. " msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "vgc-cluster-Befehl kann nicht ausgeführt werden. Stellen Sie sicher, dass " "die Software installiert ist und die Berechtigungen ordnungsgemäß festgelegt " "sind." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "hitachi_serial_number und hitachi_unit_name können nicht beide festgelegt " "werden." msgid "Cannot specify both protection domain name and protection domain id." msgstr "" "Schutzdomänenname und Schutzdomänen-ID können nicht gemeinsam angegeben " "werden." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "Speicherpoolname und Speicherpool-ID können nicht gemeinsam angegeben werden." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Konsistenzgruppe %(group_id)s kann nicht aktualisiert werden, da keine " "gültigen Werte für name, description, add_volumes oder remove_volumes " "angegeben wurden." msgid "Cannot update encryption specs. Volume type in use." msgstr "" "Verschlüsselungsspezifikationen können nicht aktualisiert werden. " "Datenträgertyp ist im Gebrauch." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "volume_type %(id)s kann nicht aktualisiert werden" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "" "Das Vorhandensein von object:%(instanceName)s kann nicht geprüft werden." msgid "Cascade option is not supported." msgstr "Weitergabeoption wird nicht unterstützt." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s konnte nicht gefunden werden." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost ist leer. Keine Konsistenzgruppe wird erstellt." msgid "Cgsnapshot status must be available or error" msgstr "Cgsnapshot-Status muss 'available' oder 'error' sein" msgid "Change hostlun id error." msgstr "Fehler beim Ändern der Host-LUN-ID." msgid "Change lun priority error." msgstr "Fehler beim Ändern der LUN-Priorität." msgid "Change lun smarttier policy error." msgstr "Fehler beim Ändern der LUN-smarttier-Richtlinie." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Durch die Änderung wäre die Nutzung kleiner als 0 für die folgenden " "Ressourcen: %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Prüfen Sie die Zugriffsberechtigungen für die diesem Treiber zugeordneten " "freigegebenen Verzeichnisse." msgid "Check hostgroup associate error." msgstr "Fehler beim Prüfen der Hostgruppenzuordnung." msgid "Check initiator added to array error." msgstr "Fehler beim Prüfen des Initiators, der zu Array hinzugefügt wurde." msgid "Check initiator associated to host error." msgstr "Fehler beim Prüfen des Initiators, der zu Host zugeordnet wurde." msgid "Check lungroup associate error." msgstr "Fehler beim Prüfen der LUN-Gruppenzuordnung." msgid "Check portgroup associate error." msgstr "Fehler beim Prüfen der Portgruppenzuordnung." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Prüfen Sie den Zustand des http-Service. Stellen Sie auch sicher, dass die " "https-Portnummer der in cinder.conf angegebenen Portnummer entspricht." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "Chunkgröße ist kein Vielfaches der Blockgröße zur Bildung des Hashwerts." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Fehler bei der Cisco Fibre Channel-Zoning-CLI: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "Klonfunktion ist nicht lizenziert auf %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "Der Klontyp '%(clone_type)s' ist ungültig. Gültige Werte: '%(full_clone)s' " "und '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "Cluster ist nicht formatiert. Sie sollten wahrscheinlich \"dog cluster format" "\" ausführen." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Fehler bei Coho Data-Cinder-Treiber: %(message)s" msgid "Coho rpc port is not configured" msgstr "Der Coho-RPC-Port ist nicht konfiguriert." #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "Befehl %(cmd)s war in der CLI geblockt und wurde abgebrochen" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: %s Zeitlimit" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s Zeitlimit." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "Compression Enabler ist nicht installiert. Es kann kein komprimierter " "Datenträger erstellt werden. " #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Compute-Cluster: %(cluster)s nicht gefunden." msgid "Condition has no field." msgstr "Die Bedingung hat kein Feld." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "Die Konfiguration von 'max_over_subscription_ratio' ist ungültig. Muss > 0 " "sein: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Konfigurationsfehler: dell_sc_ssn ist nicht festgelegt." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Konfigurationsdatei %(configurationFile)s ist nicht vorhanden." msgid "Configuration is not found." msgstr "Die Konfiguration wurde nicht gefunden. " #, python-format msgid "Configuration value %s is not set." msgstr "Konfigurationswert %s ist nicht festgelegt." msgid "Configured host type is not supported." msgstr "Konfigurierter Hosttyp wird nicht unterstützt." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Widersprüchliche QoS-Spezifikationen in Datenträgertyp %s: Wenn QoS-" "Spezifikation zu Datenträgertyp zugeordnet wird, ist das traditionelle " "\"netapp:qos_policy_group\" in den zusätzlichen Spezifikationen des " "Datenträgertyps nicht zulässig." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Verbindung mit glance fehlgeschlagen: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Verbindung mit swift fehlgeschlagen: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Connector stellt nicht bereit: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Connector hat nicht die erforderlichen Informationen: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "Konsistenzgruppe %s enthält noch Datenträger. Das Zwangsausführungsflag ist " "erforderlich, damit die Löschung erfolgen kann." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "Konsistenzgruppe %s verfügt noch immer über abhängige Cgsnapshots." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "Konsistenzgruppe ist leer. Es wird kein Cgsnapshot erstellt." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "Konsistenzgruppenstatus muss 'available' oder 'error' lauten, aber der " "aktuelle Status lautet %s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "Konsistenzgruppenstatus muss verfügbar sein, der aktuelle Status ist jedoch " "%s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "ConsistencyGroup %(consistencygroup_id)s konnte nicht gefunden werden." msgid "Container" msgstr "Container" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Containerformat: %s wird durch den VMDK-Treiber nicht unterstützt, nur " "'bare' wird unterstützt." msgid "Container size smaller than required file size." msgstr "Container ist kleiner als die erforderliche Dateigröße." msgid "Content type not supported." msgstr "Inhaltstyp nicht unterstützt." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" "Controllerkonfigurationsservice nicht gefunden auf %(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "Controller-IP '%(host)s' konnte nicht aufgelöst werden: %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "In %(f1)s konvertiert, Format ist nun jedoch %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "In %(vol_format)s konvertiert, Format ist nun jedoch %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "In unformatierten Zustand konvertiert, Format ist nun jedoch %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "In unformatierten Status konvertiert, Format ist nun jedoch %s." msgid "Coordinator uninitialized." msgstr "Koordinator wurde nicht initialisiert." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Datenträgerkopiertask fehlgeschlagen: convert_to_base_volume: ID=%(id)s, " "'Status=%(status)s' wird ignoriert." #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "Datenträgerkopiertask fehlgeschlagen: create_cloned_volume id=%(id)s, Status=" "%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Metadaten werden von %(src_type)s %(src_id)s nach %(vol_id)s kopiert." msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Es konnte nicht ermittelt werden, welcher Keystone-Endpunkt zu verwenden " "ist. Dies kann entweder im Servicekatalog oder mit der cinder.conf-" "Konfigurationsoption 'backup_swift_auth_url' festgelegt werden." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Es konnte nicht ermittelt werden, welcher Swift-Endpunkt zu verwenden ist. " "Dies kann entweder im Servicekatalog oder mit der cinder.conf-" "Konfigurationsoption 'backup_swift_url' festgelegt werden." msgid "Could not find DISCO wsdl file." msgstr "DISCO-WSDL-Datei wurde nicht gefunden." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "GPFS-Clusterkennung konnte nicht gefunden werden: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "GPFS-Dateisystemgerät konnte nicht gefunden werden: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "Es konnte kein Host für Datenträger %(volume_id)s mit Typ %(type_id)s " "gefunden werden." #, python-format msgid "Could not find config at %(path)s" msgstr "Konfiguration konnte unter %(path)s nicht gefunden werden" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" "iSCSI-Export für Datenträger %(volumeName)s konnte nicht gefunden werden." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "iSCSI-Export nicht gefunden für Datenträger %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "iSCSI-Ziel für Datenträger %(volume_id)s wurde nicht gefunden." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "Schlüssel in Ausgabe des Befehls %(cmd)s wurde nicht gefunden: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Parameter %(param)s konnte nicht gefunden werden" #, python-format msgid "Could not find target %s" msgstr "Ziel %s konnte nicht gefunden werden" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" "Der übergeordnete Datenträger für die Schattenkopie '%s' im Arrary wurde " "nicht gefunden." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "Es wurde keine eindeutige Schattenkopie %(snap)s auf dem Datenträger %(vol)s " "gefunden." msgid "Could not get system name." msgstr "Systemname konnte nicht abgerufen werden." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "paste-App '%(name)s' konnte von %(path)s nicht geladen werden" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "%s konnte nicht gelesen werden. Wiederholte Ausführung mit sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "Informationen für Schattenkopie %(name)s konnten nicht gelesen werden. Code: " "%(code)s. Ursache: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" "Konfigurationsdatei %(file_path)s konnte nicht wiederhergestellt werden: " "%(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "" "Konfiguration konnte nicht in %(file_path)s gespeichert werden: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "Konsistenzgruppenmomentaufnahme %s konnte nicht gestartet werden." #, python-format msgid "Counter %s not found" msgstr "Zähler %s nicht gefunden." msgid "Create QoS policy error." msgstr "Fehler beim Erstellen der QoS-Richtlinie." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Erstellung von Sicherung abgebrochen. Erwartet war Sicherungsstatus " "%(expected_status)s, Status ist jedoch %(actual_status)s." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Erstellung von Sicherung abgebrochen. Erwartet war Volumenstatus " "%(expected_status)s, Status ist jedoch %(actual_status)s." msgid "Create consistency group failed." msgstr "Erstellen der Konsistenzgruppe fehlgeschlagen." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "Das Erstellen von verschlüsselten Datenträgern mit Typ %(type)s aus Image " "%(image)s wird nicht unterstützt." msgid "Create export for volume failed." msgstr "Erstellen des Exports für Datenträger fehlgeschlagen." msgid "Create hostgroup error." msgstr "Fehler beim Erstellen der Hostgruppe." #, python-format msgid "Create hypermetro error. %s." msgstr "Fehler beim Erstellen von hypermetro. %s." msgid "Create lun error." msgstr "Fehler beim Erstellen der LUN." msgid "Create lun migration error." msgstr "Fehler beim Erstellen der LUN-Migration." msgid "Create luncopy error." msgstr "Fehler beim Erstellen der LUN-Kopie." msgid "Create lungroup error." msgstr "Fehler beim Erstellen der LUN-Gruppe." msgid "Create manager volume flow failed." msgstr "Datenträgerfluss zum Erstellen eines Managers fehlgeschlagen." msgid "Create port group error." msgstr "Fehler beim Erstellen der Portgruppe." msgid "Create replication error." msgstr "Fehler beim Erstellen der Replikation." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "Fehler beim Erstellen des Replikationspaars. Fehler: %s." msgid "Create snapshot error." msgstr "Fehler beim Erstellen der Momentaufnahme." #, python-format msgid "Create volume error. Because %s." msgstr "Fehler beim Erstellen des Datenträgers. Grund: %s." msgid "Create volume failed." msgstr "Erstellung des Datenträgers fehlgeschlagen." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "Das Erstellen einer Konsistenzgruppe aus einer Quelle wird zurzeit nicht " "unterstützt. " #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Fehler beim Erstellen und Aktivieren der Zonengruppe: (Zonengruppe=" "%(cfg_name)s Fehler=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Fehler beim Erstellen und Aktivieren der Zonengruppe: (Zonengruppe=" "%(zoneset)s Fehler=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "Nutzungen von %(begin_period)s bis %(end_period)s erstellen" msgid "Current host isn't part of HGST domain." msgstr "Der aktuelle Host ist nicht Teil der HGST-Domäne." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Aktueller Host nicht gültig für Datenträger %(id)s mit Typ %(type)s, " "Migration nicht zulässig" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Der aktuell zugeordnete Host für Datenträger %(vol)s befindet sich in einer " "nicht unterstützten Hostgruppe mit %(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "VERALTET: Version 1 von Cinder-API implementieren." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "Veraltet: Version 2 der Cinder-API implementieren." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage-Treiberfehler: Erwarteter Schlüssel \"%s\" nicht in der Antwort. " "Falsche DRBDmanage-Version?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage-Treiberinstallationsfehler: Einige erforderliche Bibliotheken " "(dbus, drbdmanage.*) nicht gefunden." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage hat eine Ressource erwartet (\"%(res)s\"), erhalten %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "DRBDmanage-Zeitlimitüberschreitung beim Warten auf neuen Datenträger nach " "Wiederherstellung der Schattenkopie: Ressource \"%(res)s\", Datenträger " "\"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "DRBDmanage-Zeitlimitüberschreitung beim Warten auf Schattenkopieerstellung: " "Ressource \"%(res)s\", Schattenkopie \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "DRBDmanage-Zeitlimitüberschreitung beim Warten auf Datenträgererstellung: " "Ressource \"%(res)s\", Datenträger \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "DRBDmanage-Zeitlimitüberschreitung beim Warten auf Datenträgergröße: " "Datenträger-ID \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "Data ONTAP-API-Version konnte nicht bestimmt werden." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "Data ONTAP arbeitet im 7-Modus, der QoS-Richtliniengruppen nicht unterstützt." msgid "Database schema downgrade is not allowed." msgstr "Das Herabstufen des Datenbankschemas ist nicht zulässig." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "" "Die Datengruppe %s wird nicht in der Nexenta Store-Appliance gemeinsam " "genutzt." #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Die Datengruppe %s wurde nicht in Nexenta SA gefunden." #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup ist ein gültiger Bereitstellungstyp, erfordert jedoch WSAPI-Version " "'%(dedup_version)s'. Version '%(version)s' ist installiert." msgid "Dedup luns cannot be extended" msgstr "Dedup-LUNs können nicht erweitert werden" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "Deduplication Enabler ist nicht installiert. Es kann kein deduplizierter " "Datenträger erstellt werden. " msgid "Default pool name if unspecified." msgstr "Standardpoolname, wenn nicht angegeben." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Standardquote für Ressource %(res)s wird durch den Standardquotenflag quota_" "%(res)s festgelegt, der nun veraltet ist. Verwenden Sie die " "Standardquotenklasse für die Standardquote. " msgid "Default volume type can not be found." msgstr "Standarddatenträgertyp wurde nicht gefunden." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Definiert die Gruppe der verfügbaren Pools und deren zugehörige Back-End-" "Abfragezeichenfolgen" msgid "Delete LUNcopy error." msgstr "Fehler beim Löschen der LUN-Kopie." msgid "Delete QoS policy error." msgstr "Fehler beim Löschen der QoS-Richtlinie." msgid "Delete associated lun from lungroup error." msgstr "Fehler beim Löschen der zugeordneten LUN aus LUN-Gruppe." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Löschen von Sicherung abgebrochen, der derzeit konfigurierte " "Sicherungsservice [%(configured_service)s] ist nicht der Sicherungsservice, " "der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." msgid "Delete consistency group failed." msgstr "Löschen der Konsistenzgruppe fehlgeschlagen." msgid "Delete hostgroup error." msgstr "Fehler beim Löschen der Hostgruppe." msgid "Delete hostgroup from mapping view error." msgstr "Fehler beim Löschen der Hostgruppe aus der Zuordnungsansicht." msgid "Delete lun error." msgstr "Fehler beim Löschen der LUN." msgid "Delete lun migration error." msgstr "Fehler beim Löschen der LUN-Migration." msgid "Delete lungroup error." msgstr "Fehler beim Löschen der LUN-Gruppe." msgid "Delete lungroup from mapping view error." msgstr "Fehler beim Löschen der LUN-Gruppe aus der Zuordnungsansicht." msgid "Delete mapping view error." msgstr "Fehler beim Löschen der Zuordnungsansicht." msgid "Delete port group error." msgstr "Fehler beim Löschen der Portgruppe." msgid "Delete portgroup from mapping view error." msgstr "Fehler beim Löschen der Portgruppe aus der Zuordnungsansicht." msgid "Delete snapshot error." msgstr "Fehler beim Löschen der Momentaufnahme." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "" "Löschen der Momentaufnahme des Datenträgers im Status %s nicht unterstützt." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup abgebrochen. Erwartet war Sicherungsstatus " "%(expected_status)s, Status ist jedoch %(actual_status)s." msgid "Deleting volume from database and skipping rpc." msgstr "Datenträger aus Datenbank löschen und rpc überspringen." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Löschen von Zonen fehlgeschlagen: (Befehl=%(cmd)s Fehler=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "Dell API 2.1 oder höher für Konsistenzgruppenunterstützung erforderlich" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "Dell Cinder-Treiberkonfigurationsfehler. Keine Unterstützung für Replikation " "mit direkter Verbindung." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dell Cinder-Treiberkonfigurationsfehler: 'replication_device' %s wurde nicht " "gefunden." msgid "Deploy v3 of the Cinder API." msgstr "Version 3 der Cinder-API implementieren." msgid "Describe-resource is admin only functionality" msgstr "'Describe-resource' ist eine reine Administratorfunktion." #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "Ziel hat migration_status %(stat)s, erwartet %(exp)s." msgid "Destination host must be different than the current host." msgstr "Zielhost muss sich vom aktuellen Host unterscheiden." msgid "Destination volume not mid-migration." msgstr "Zieldatenträger befindet sich nicht in einer Migration." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "Aufheben der Zuordnung des Datenträgers fehlgeschlagen: Mehr als ein Anhang, " "aber keine attachment_id angegeben." msgid "Detach volume from instance and then try again." msgstr "" "Hängen Sie den Datenträger von der Instanz ab und versuchen Sie es dann " "erneut." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Mehrere Datenträger mit dem Namen %(vol_name)s gefunden" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "Erwarteter Spaltenname nicht in %(fun)s gefunden: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "" "Der erwartete Schlüssel %(key)s wurde in %(fun)s nicht gefunden: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "Ursache für Inaktivierung: enthält ungültige Zeichen oder ist zu lang" #, python-format msgid "Domain with name %s wasn't found." msgstr "Domäne mit dem Namen %s wurde nicht gefunden." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Älterer Version von GPFS-Cluster erkannt. GPFS-Klonfunktion ist nicht " "aktiviert in der Cluster-Dämonstufe %(cur)s - es muss mindestens Stufe " "%(min)s vorhanden sein." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "Treiberinitialisierungsverbindung fehlgeschlagen (Fehler: %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "Treiber kann keine Typänderung vornehmen, da der Datenträger (LUN {}) eine " "Schattenkopie hat, die nicht migriert werden darf." msgid "Driver must implement initialize_connection" msgstr "Treiber muss initialize_connection implementieren" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Treiber hat die importierten Sicherungsdaten erfolgreich decodiert, aber es " "fehlen Felder (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E-series-Proxy-API-Version %(current_version)s unterstützt nicht den " "gesamten Satz an zusätzlichen SSC-Spezifikationen. Mindestens erforderliche " "Proxy-Version: %(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "EMC-VNX-Cinder-Treiber-CLI-Ausnahme: %(cmd)s (Rückgabecode: %(rc)s) " "(Ausgabe: %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "EMC-VNX-Cinder-Treiber-Ausnahme vom Typ 'SPUnavailableException': %(cmd)s " "(Rückgabecode: %(rc)s) (Ausgabe: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword müssen gültige " "Werte besitzen." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "Entweder 'cgsnapshot_id' oder 'source_cgid' muss angegeben werden, um " "Konsistenzgruppe %(name)s aus Quelle zu erstellen." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "Entweder SLO %(slo)s oder Workload %(workload)s ist ungültig. Untersuchen " "Sie vorherige Fehlermeldung auf gültige Werte." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "" "Entweder hitachi_serial_number oder hitachi_unit_name ist erforderlich." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "Elementkompositionsservice nicht gefunden auf %(storageSystemName)s. " msgid "Enables QoS." msgstr "Aktiviert QoS." msgid "Enables compression." msgstr "Aktiviert Komprimierung." msgid "Enables replication." msgstr "Aktiviert Replikation." msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Stellen Sie sicher, dass configfs an /sys/kernel/config angehängt ist." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Hinzufügen von Initiator %(initiator)s für groupInitiatorGroup: " "%(initiatorgroup)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Hinzufügen zu Zielgruppe %(targetgroup)s mit IQN %(iqn)s " "Rückgabecode:%(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Fehler beim Anhängen des Datenträgers %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Klonen von Momentaufnahme %(snapshot)s für Datenträger %(lun)s " "von Pool %(pool)s Projekt: %(project)s Klonprojekt: %(clone_proj)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Fehler beim Erstellen eines geklonten Datenträgers: %(cloneName)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler bei Erstellen von geklontem Datenträger: Datenträger: %(cloneName)s " "Quellendatenträger: %(sourceName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Erstellen der Gruppe %(groupName)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Fehler beim Erstellen der Maskenansicht %(groupName)s. Rückgabecode: " "%(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler bei Erstellen von Datenträger: %(volumeName)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler bei Erstellen von Datenträger: %(volumename)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler bei CreateGroupReplica: Quelle: %(source)s Ziel: %(target)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Erstellen von Initiator %(initiator)s für Alias: %(alias)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Erstellen von Projekt %(project)s für Pool %(pool)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Erstellen der Eigenschaft: %(property)s Typ: %(type)s " "Beschreibung: %(description)s Rückgabecode: %(ret.status)d Nachricht: " "%(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Erstellen von freigegebenem Verzeichnis %(name)s Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Erstellen von Momentaufnahme %(snapshot)s für Datenträger " "%(lun)s in Pool: %(pool)s Projekt: %(project)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Erstellen von Momentaufnahme %(snapshot)s für freigegebenes " "Verzeichnis %(share)s in Pool %(pool)s Projekt: %(project)s Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Fehler beim Erstellen von Ziel %(alias)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s ." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Erstellen von Zielgruppe %(targetgroup)s mit IQN %(iqn)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Fehler beim Erstellen von Datenträger %(lun)s Größe: %(size)s Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s. " #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Erstellen des neuen Verbunddatenträgers. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Erstellen der Replikationsaktion für: Pool: %(pool)s Projekt: " "%(proj)s Datenträger: %(vol)s für Ziel: %(tgt)s und Pool: %(tgt_pool)s. " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." msgid "Error Creating unbound volume on an Extend operation." msgstr "" "Fehler beim Erstellen eines nicht gebundenen Datenträgers bei einer " "Erweiterungsoperation." msgid "Error Creating unbound volume." msgstr "Fehler beim Erstellen eines nicht gebundenen Datenträgers." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler bei Löschen von Datenträger: %(volumeName)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Fehler beim Löschen der Gruppe: %(storageGroupName)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Fehler beim Löschen der Initiatorgruppe: %(initiatorGroupName)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Löschen von Momentaufnahme %(snapshot)s für freigegebenes " "Verzeichnis %(share)s aus Pool %(pool)s Projekt: %(project)s Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Löschen von Momentaufnahme %(snapshot)s für Datenträger %(lun)s " "aus Pool: %(pool)s Projekt: %(project)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Fehler beim Löschen des Datenträgers %(lun)s aus Pool %(pool)s, Projekt: " "%(project)s. Rückgabecode: %(ret.status)d, Nachricht: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Löschen des Projekts: %(project)s auf Pool: %(pool)s. " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Fehler beim Löschen der Replikationsaktion: %(id)s. Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Erweitern von Datenträger: %(volumeName)s. Rückgabecode: " "%(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Abrufen von Initiatoren: InitiatorGroup: %(initiatorgroup)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Fehler beim Abrufen der Poolstatistikdaten: Pool: %(pool)s Rückgabecode: " "%(status)d Nachricht: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Abrufen der Projektstatistiken: Pool: %(pool)s Projekt: " "%(project)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Abrufen von freigegebenem Verzeichnis %(share)s für Pool " "%(pool)s Projekt: %(project)s Rückgabecode: %(ret.status)d Nachricht: " "%(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Abrufen von Momentaufnahme %(snapshot)s für Datenträger %(lun)s " "in Pool %(pool)s Projekt: %(project)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Fehler beim Abrufen von Ziel %(alias)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s ." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Abrufen von Datenträger %(lun)s für Pool: %(pool)s Projekt: " "%(project)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Fehler beim Migrieren des Datenträgers von einem Pool zu einem anderen. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Fehler beim Ändern der Maskenansicht %(groupName)s. Rückgabecode: %(rc)lu. " "Fehler: %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "Fehler bei Pooleigentümer: Eigner des Pools %(pool)s ist nicht %(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Festlegen der Eigenschaften %(props)s für Datenträger %(lun)s " "von Pool %(pool)s Projekt: %(project)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Beenden der Migrationssitzung. Rückgabecode: %(rc)lu. Fehler: " "%(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Überprüfen von Initiator %(iqn)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Überprüfen von Pool %(pool)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Überprüfen von Projekt %(project)s für Pool %(pool)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Überprüfen von Service %(service)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Überprüfen von Ziel %(alias)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Fehler beim Überprüfen von freigegebenem Verzeichnis %(share)s für Projekt " "%(project)s und Pool %(pool)s Rückgabecode: %(ret.status)d Nachricht: " "%(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Fehler beim Hinzufügen des Datenträgers %(volumeName)s mit dem Instanzpfad: " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Fehler beim Hinzufügen des Initiators zur Gruppe %(groupName)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "" "Fehler beim Hinzufügen von Datenträger zu Verbunddatenträger. Fehler ist: " "%(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "Fehler beim Anhängen von Datenträger %(volumename)s an Zielbasisdatenträger." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Fehler beim Zuordnen der Speichergruppe %(storageGroupName)s zur FAST-" "Richtlinie %(fastPolicyName)s mit Fehlerbeschreibung: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "Fehler beim Anhängen des Datenträgers %s. Zielgrenzwert möglicherweise " "erreicht!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Unterbrechen der Klonbeziehung: Synchronisationsname: " "%(syncName)s Rückgabecode: %(rc)lu. Fehler: %(error)s." msgid "Error connecting to ceph cluster." msgstr "Fehler beim Herstellen der Verbindung zum ceph-Cluster." #, python-format msgid "Error connecting via ssh: %s" msgstr "Fehler beim Herstellen einer Verbindung über SSH: %s" #, python-format msgid "Error creating volume: %s." msgstr "Fehler beim Erstellen des Datenträgers: %s" msgid "Error deleting replay profile." msgstr "Fehler beim Löschen des Wiedergabeprofils." #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Fehler beim Löschen des Datenträgers %(ssn)s: %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Fehler beim Löschen des Datenträgers %(vol)s: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Fehler während Bewerteranalyse: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Fehler beim Bearbeiten von freigegebenem Verzeichnis %(share)s für Pool " "%(pool)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Fehler beim Aktivieren von iSER für NetworkPortal: Stellen Sie sicher, dass " "RDMA auf Ihrem iSCSI-Port %(port)d unter IP %(ip)s unterstützt wird." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "" "Bei der Bereinigung eines fehlgeschlagenen Anhängversuchs ist ein Fehler " "aufgetreten: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "Fehler beim Ausführen von CloudByte-API [%(cmd)s], Fehler: %(err)s." msgid "Error executing EQL command" msgstr "Fehler beim Ausführen des EQL-Befehls" #, python-format msgid "Error executing command via ssh: %s" msgstr "Fehler beim Ausführen eines Befehls über SSH: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Fehler beim Erweitern des Datenträgers %(vol)s: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Fehler beim Erweitern des Datenträgers: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Fehler bei der Suche nach %(name)s." #, python-format msgid "Error finding %s." msgstr "Fehler bei der Suche nach %s." #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Abrufen von Replikationseinstellungsdaten. Rückgabecode: " "%(rc)lu. Fehler: %(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Fehler beim Abrufen der Details der Einheitenversion. Rückgabecode: %(ret." "status)d Nachricht: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "Fehler beim Abrufen der Domänen-ID von Name %(name)s: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "Fehler beim Abrufen der Domänen-ID von Name %(name)s: %(id)s." msgid "Error getting initiator groups." msgstr "Fehler beim Abrufen von Initiatorgruppen." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Fehler beim Abrufen der Pool-ID von Name %(pool)s: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "Fehler beim Abrufen der Pool-ID von Name %(pool_name)s: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Fehler beim Abrufen der Replikationsaktion: %(id)s. Rückgabecode: " "%(ret.status)d Nachricht: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Fehler beim Abrufen der Details der Replikationsquelle. Rückgabecode: %(ret." "status)d Nachricht: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Fehler beim Abrufen der Details des Replikationsziels. Rückgabecode: %(ret." "status)d Nachricht: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Abrufen der Version: SVC: %(svc)s. Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Fehler in Operation [%(operation)s] für Datenträger [%(cb_volume)s] in " "CloudByte-Speicher: [%(cb_error)s], Fehlercode: [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Fehler in Antwort von SolidFire-API: Data=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "Fehler in Bereichserstellung für %(space)s der Größe von %(size)d GB" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "Fehler in Bereichserweiterung für Datenträger %(space)s mit zusätzlich " "%(size)d GB" #, python-format msgid "Error managing volume: %s." msgstr "Fehler beim Verwalten des Datenträgers: %s" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Fehler beim Zuordnen von Datenträger %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Ändern der Repliksynchronisation: %(sv)s-Operation: " "%(operation)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Fehler beim Ändern von Service %(service)s Rückgabecode: %(ret.status)d " "Nachricht: %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Verschieben des Datenträgers: %(vol)s von Quellenprojekt: " "%(src)s zu Zielprojekt: %(tgt)s Rückgabecode: %(ret.status)d Nachricht: " "%(ret.data)s." msgid "Error not a KeyError." msgstr "Fehler ist nicht vom Typ 'KeyError'." msgid "Error not a TypeError." msgstr "Fehler ist nicht vom Typ 'TypeError'." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "Beim Erstellen von Cgsnapshot %s ist ein Fehler aufgetreten." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "Beim Löschen von Cgsnapshot %s ist ein Fehler aufgetreten." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "Beim Aktualisieren von Konsistenzgruppe %s ist ein Fehler aufgetreten." #, python-format msgid "Error parsing config file: %s" msgstr "Fehler bei der Auswertung der Konfigurationsdatei: %s" msgid "Error promoting secondary volume to primary" msgstr "" "Fehler beim Hochstufen des sekundären Datenträgers zum primären Datenträger" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Fehler beim Entfernen von Datenträger %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "Fehler beim Umbenennen von Datenträger %(vol)s: %(err)s." #, python-format msgid "Error response: %s" msgstr "Fehlerantwort: %s" msgid "Error retrieving volume size" msgstr "Fehler beim Abrufen der Datenträgergröße" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Senden der Replikationsaktualisierung für Aktions-ID: %(id)s. " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Fehler beim Senden der Replikationsaktualisierung. Zurückgegebener Fehler: " "%(err)s. Aktion: %(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Festlegen der Replikationsvererbung auf %(set)s für Datenträger: " "%(vol)s Projekt %(project)s Rückgabecode: %(ret.status)d Nachricht: " "%(ret.data)s." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Fehler beim Abtrennen des Pakets: %(package)s von Quelle: %(src)s " "Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" "Fehler beim Aufheben der Bindung für Datenträger %(vol)s aus dem Pool. " "%(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Fehler beim Überprüfen der Klongröße für Datenträgerklon: %(clone)s Größe: " "%(size)d für Momentaufnahme: %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Fehler beim Authentifizieren am Switch: %s." #, python-format msgid "Error while changing VF context %s." msgstr "Fehler beim Ändern des VF-Kontexts %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Fehler beim Überprüfen der Firmwareversion %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Fehler beim Überprüfen des Transaktionsstatus: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "Fehler beim Überprüfen, ob VF für die Verwaltung von %s verfügbar ist." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Fehler beim Herstellen der Verbindung zum Switch %(switch_id)s mit dem " "Protokoll %(protocol)s. Fehler: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Fehler beim Erstellen des Authentifizierungstokens: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "Fehler beim Erstellen der Schattenkopie [Status] %(stat)s - [Ergebnis] " "%(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" "Fehler beim Erstellen des Datenträgers [Status] %(stat)s - [Ergebnis] " "%(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Fehler beim Löschen der Schattenkopie [Status] %(stat)s - [Ergebnis] %(res)s." #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" "Fehler beim Löschen des Datenträgers [Status] %(stat)s - [Ergebnis] %(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" "Fehler beim Erweitern des Datenträgers [Status] %(stat)s - [Ergebnis] " "%(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "Fehler beim Abrufen der Details von %(op)s. Rückgabecode: %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "Fehler beim Abrufen von Daten über SSH: (Befehl=%(cmd)s Fehler=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Fehler beim Abrufen von Disco-Informationen [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Fehler beim Abrufen des nvp-Werts: %s." #, python-format msgid "Error while getting session information %s." msgstr "Fehler beim Abrufen der Sitzungsinformationen %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Fehler bei der syntaktischen Analyse der Daten: %s." #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "Fehler beim Abrufen der Seite %(url)s für den Switch. Ursache: %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Fehler beim Entfernen der Zonen und Konfigurationen in der " "Zonenzeichenfolge: %(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Fehler beim Anfordern der %(service)s-API." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "Fehler beim Ausführen von Zoning-CLI: (Befehl=%(cmd)s Fehler=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Fehler beim Aktualisieren der neuen Zonen und Konfigurationen in der " "Zonenzeichenfolge. Fehler %(description)s." msgid "Error writing field to database" msgstr "Fehler beim Schreiben eines Felds in Datenbank" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Fehler [%(stat)s - %(res)s] beim Abrufen der Datenträger-ID." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Fehler [%(stat)s - %(res)s] beim Wiederherstellen der Schattenkopie " "[%(snap_id)s] im Datenträger [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "Fehler [Status] %(stat)s - [Ergebnis] %(res)s] beim Abrufen der Datenträger-" "ID." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Maximale Anzahl %(max_attempts)d Planungsversuche überschritten für " "Datenträger %(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Maximale Anzahl an Schattenkopien pro Datenträger überschritten." #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Ausnahmebedingung beim Anhängen des Metadatenträgers an Zieldatenträger " "%(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Ausnahmebedingung beim Erstellen des Elementreplikats. Klonname: " "%(cloneName)s Quellenname: %(sourceName)s Zusätzliche Spezifikationen: " "%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Ausnahme in _select_ds_for_volume: %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "" "Beim Zusammensetzen der Zonenzeichenfolge ist eine Ausnahme eingetreten: %s." #, python-format msgid "Exception: %s" msgstr "Ausnahmesituation: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID erwartet, aber %(uuid)s erhalten." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Es wurde genau ein Knoten mit dem Namen \"%s\" erwartet." #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Erwartet wurde ganze Zahl für node_count. Rückgabe von svcinfo lsiogrp: " "%(node)s" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "Erwartet wurde keine Ausgabe von CLI-Befehl %(cmd)s, erhalten %(out)s." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Beim Filtern nach vdisk_UID wurde die Rückgabe einer einzigen virtuellen " "Platte von lsvdisk erwartet. %(count)s wurden zurückgegeben." #, python-format msgid "Expected volume size was %d" msgstr "Erwartete Volumengröße war %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Exportsicherung abgebrochen, erwarteter Sicherungsstatus " "%(expected_status)s, Status ist jedoch %(actual_status)s." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Exportdatensatz abgebrochen, der derzeit konfigurierte Sicherungsservice " "[%(configured_service)s] ist nicht der Sicherungsservice, der zum Erstellen " "dieser Sicherung [%(backup_service)s] verwendet wurde." msgid "Extend volume error." msgstr "Fehler beim Erweitern des Datenträgers." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "Erweitern des Datenträgers wird für diesen Treiber nur unterstützt, wenn " "keine Momentaufnahmen vorhanden sind." msgid "Extend volume not implemented" msgstr "Erweitern von Datenträgern nicht implementiert" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "FAST VP Enabler ist nicht installiert. Die Tiering-Richtlinie für den " "Datenträger kann nicht festgelegt werden" msgid "FAST is not supported on this array." msgstr "FAST wird auf dieser Platteneinheit nicht unterstützt." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "" "FC ist das Protokoll, aber WWPNs werden von OpenStack nicht bereitgestellt." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Zuordnung für %(volume)s konnte nicht aufgehoben werden" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "" "Fehler beim Erstellen des Cachedatenträgers %(volume)s. Fehler: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Fehler beim Hinzufügen einer Verbindung für Fabric=%(fabric)s: Fehler:%(err)s" msgid "Failed cgsnapshot" msgstr "Cgsnapshot fehlgeschlagen" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "Fehler beim Erstellen einer Schattenkopie für Gruppe: %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "Fehler beim Erstellen einer Momentaufnahme für Datenträger %(volname)s: " "%(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "Fehler beim Abrufen der aktiven über Fabric %s gesetzten Zone." #, python-format msgid "Failed getting details for pool %s." msgstr "Fehler beim Abrufen der Details für Pool %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Fehler beim Entfernen einer Verbindung für Fabric=%(fabric)s: Fehler:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Fehler beim Erweitern von Datenträger %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Anmeldung an 3PAR (%(url)s) nicht möglich. Ursache: %(err)s" msgid "Failed to access active zoning configuration." msgstr "Auf die Zoningkonfiguration konnte nicht zugegriffen werden." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Auf den Zonengruppenstatus konnte nicht zugegriffen werden: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Es konnte keine Ressourcensperre bezogen werden. (Seriennummer: %(serial)s, " "Instanz: %(inst)s, Rückgabe: %(ret)s, Standardfehler: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "" "Fehler beim Hinzufügen von %(vol)s zu %(sg)s nach %(retries)s Versuchen. " msgid "Failed to add the logical device." msgstr "Die logische Einheit wurde nicht hinzugefügt." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Hinzufügen des Datenträgers %(volumeName)s zu Konsistenzgruppe " "%(cgName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." msgid "Failed to add zoning configuration." msgstr "Fehler beim Hinzufügen der Zoningkonfiguration." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Der qualifizierte iSCSI-Initiatorname konnte nicht zugewiesen werden. (Port: " "%(port)s, Ursache: '%(reason)s')" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs konnten nicht zugeordnet werden: %(specs_id)s mit Typ %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Fehler beim Anhängen von iSCSI-Ziel für Datenträger %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Fehler bei der Sicherung von Datenträgermetadaten - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Fehler bei der Sicherung von Datenträgermetadaten - " "Metadatensicherungsobjekt 'backup.%s.meta' ist bereits vorhanden" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Fehler beim Klonen des Datenträgers aus Momentaufnahme %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Fehler beim Verbinden mit %(vendor_name)s Array %(host)s: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Fehler beim Herstellen einer Verbindung zur Dell-REST-API. " msgid "Failed to connect to array" msgstr "Fehler beim Verbinden mit Array" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Verbindung zu sheep-Dämon konnte nicht nicht hergestellt werden. Adresse: " "%(addr)s, Port: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Image konnte nicht auf Datenträger kopiert werden: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Kopieren von Metadaten auf Datenträger fehlgeschlagen: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Fehler beim Kopieren des Datenträgers, Zieleinheit nicht verfügbar." msgid "Failed to copy volume, source device unavailable." msgstr "Fehler beim Kopieren des Datenträgers, Quelleneinheit nicht verfügbar." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "Fehler beim Erstellen der Konsistenzgruppe %(cgName)s aus Momentaufnahme " "%(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "IG konnte nicht erstellt werden %s" msgid "Failed to create SolidFire Image-Volume" msgstr "SolidFire-Imagedatenträger konnte nicht erstellt werden" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Datenträgergruppe konnte nicht erstellt werden: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Fehler beim Erstellen einer Datei. (Datei: %(file)s, Rückgabe: %(ret)s, " "Standardfehler: %(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "" "Fehler beim Erstellen einer temporären Momentaufnahme für Datenträger %s." msgid "Failed to create api volume flow." msgstr "Fehler beim Erstellen des API-Datenträgerflusses." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "" "Fehler beim Erstellen der Momentaufnahme von Konsistenzgruppe (Cgsnapshot) " "%(id)s. Ursache: %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "" "Fehler beim Erstellen von Konsistenzgruppe %(id)s. Ursache: %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Fehler beim Erstellen von Konsistenzgruppe %(id)s:%(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Fehler beim Erstellen von Konsistenzgruppe %s, da VNX-Konsistenzgruppe keine " "komprimierten LUNs als Mitglieder akzeptieren kann." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Fehler beim Erstellen der Konsistenzgruppe: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "" "Fehler beim Erstellen der Konsistenzgruppe: %(cgid)s. Fehler: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Erstellen der Konsistenzgruppe: %(consistencyGroupName)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Fehler beim Erstellen von Hardware-ID(s) auf %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "Fehler beim Erstellen des Hosts: %(name)s. Überprüfen Sie, ob er auf dem " "Array vorhanden ist." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Fehler beim Erstellen der Hostgruppe: %(name)s. Überprüfen Sie, ob sie auf " "dem Array vorhanden ist." msgid "Failed to create iqn." msgstr "Fehler beim Erstellen des IQN." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Fehler beim Erstellen von iSCSI-Ziel für Datenträger %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Fehler beim Erstellen von 'manage existing'-Ablauf." msgid "Failed to create manage_existing flow." msgstr "Fehler beim Erstellen von manage_existing-Ablauf." msgid "Failed to create map on mcs, no channel can map." msgstr "" "Fehler beim Erstellen der Zuordnung für MCS. Es kann kein Kanal zugeordnet " "werden." msgid "Failed to create map." msgstr "Fehler beim Erstellen der Zuordnung." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Erstellen von Metadaten für Datenträger fehlgeschlagen: %(reason)s" msgid "Failed to create partition." msgstr "Fehler beim Erstellen der Partition." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "qos_specs konnten nicht erstellt werden: %(name)s mit Spezifikationen " "%(qos_specs)s." msgid "Failed to create replica." msgstr "Fehler beim Erstellen des Replikats." msgid "Failed to create scheduler manager volume flow" msgstr "Fehler beim Erstellen des Datenträgerflusses für Scheduler-Manager" #, python-format msgid "Failed to create snapshot %s" msgstr "Erstellen von Momentaufnahme %s fehlgeschlagen" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "" "Erstellen von Momentaufnahme fehlgeschlagen, da keine LUN-ID angegeben ist" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "Fehler beim Erstellen von Momentaufnahme für cg: %(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Fehler beim Erstellen einer Momentaufnahme für Datenträger %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "Fehler beim Erstellen der Momentaufnahmerichtlinie auf Datenträger %(vol)s: " "%(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "Fehler beim Erstellen des Momentaufnahmeressourcenbereichs auf Datenträger " "%(vol)s: %(res)s." msgid "Failed to create snapshot." msgstr "Fehler beim Erstellen der Momentaufnahme." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Erstellen der Momentaufnahme fehlgeschlagen. CloudByte-" "Datenträgerinformationen für OpenStack-Datenträger [%s] nicht gefunden." #, python-format msgid "Failed to create south bound connector for %s." msgstr "Fehler beim Erstellen eines untergeordneten Connectors für %s." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "Die Speichergruppe %(storageGroupName)s konnte nicht erstellt werden." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Fehler beim Erstellen von Thin-Pool, Fehlernachricht: %s" #, python-format msgid "Failed to create volume %s" msgstr "Erstellen von Datenträger %s fehlgeschlagen" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "SI für volume_id %(volume_id)s konnte nicht gelöscht werden, da ein Paar " "vorhanden ist." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Eine logische Einheit konnte nicht gelöscht werden. (Logische Einheit: " "%(ldev)s, Ursache: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "" "Fehler beim Löschen der Momentaufnahme von Konsistenzgruppe (Cgsnapshot) " "%(id)s. Ursache: %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "Fehler beim Löschen von Konsistenzgruppe %(id)s. Ursache: %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Fehler beim Löschen der Konsistenzgruppe: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Löschen der Konsistenzgruppe: %(consistencyGroupName)s. " "Rückgabecode: %(rc)lu. Fehler: %(error)s." msgid "Failed to delete device." msgstr "Fehler beim Löschen des Geräts." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Fehler beim Löschen von fileset für Konsistenzgruppe %(cgname)s. Fehler: " "%(excmsg)s." msgid "Failed to delete iqn." msgstr "Fehler beim Löschen des IQN." msgid "Failed to delete map." msgstr "Fehler beim Löschen der Zuordnung." msgid "Failed to delete partition." msgstr "Fehler beim Löschen der Partition." msgid "Failed to delete replica." msgstr "Fehler beim Löschen des Replikats." #, python-format msgid "Failed to delete snapshot %s" msgstr "Löschen von Momentaufnahme %s fehlgeschlagen" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "Fehler beim Löschen von Momentaufnahme für cg: %(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "Momentaufnahme für snapshot_id %s konnte nicht gelöscht werden, da ein Paar " "vorhanden ist." msgid "Failed to delete snapshot." msgstr "Fehler beim Löschen der Momentaufnahme." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Fehler beim Löschen von Datenträger %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Datenträger für volume_id %(volume_id)s konnte nicht gelöscht werden, da ein " "Paar vorhanden ist." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Fehler beim Abhängen von iSCSI-Ziel für Datenträger %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Konfiguration der Blockbridge-API konnte nicht bestimmt werden" msgid "Failed to disassociate qos specs." msgstr "Zuordnung der QoS-Spezifikationen konnte nicht aufgehoben werden." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "Zuordnung von qos_specs konnte nicht aufgehoben werden: %(specs_id)s mit Typ " "%(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Fehler beim Sicherstellen des Momentaufnahmeressourcenbereichs. Datenträger " "für ID %s wurde nicht gefunden" msgid "Failed to establish SSC connection." msgstr "Fehler beim Herstellen der SSC-Verbindung." msgid "Failed to establish connection with Coho cluster" msgstr "Fehler beim Herstellen einer Verbindung mit Coho-Cluster." #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Ausführung von CloudByte-API [%(cmd)s] fehlgeschlagen. Http-Status: " "%(status)s, Fehler: %(error)s." msgid "Failed to execute common command." msgstr "Ausführen des allgemeinen Befehls fehlgeschlagen." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Export für Datenträger fehlgeschlagen: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "" "Fehler beim Erweitern des Datenträgers %(name)s, Fehlernachricht: %(msg)s." msgid "Failed to find QoSnode" msgstr "QoSnode wurde nicht gefunden." msgid "Failed to find Storage Center" msgstr "Storage Center wurde nicht gefunden" msgid "Failed to find a vdisk copy in the expected pool." msgstr "" "Es wurde keine Kopie der virtuellen Platte im erwarteten Pool gefunden." msgid "Failed to find account for volume." msgstr "Konto für Datenträger wurde nicht gefunden." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "Fehler bei der Suche nach einer Dateigruppe für Pfad %(path)s, " "Befehlsausgabe: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "Es konnte keine Gruppenschattenkopie mit dem Namen %s gefunden werden." #, python-format msgid "Failed to find host %s." msgstr "Host %s nicht gefunden." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "Die iSCSI-Initiatorgruppe mit %(initiator)s wurde nicht gefunden." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "Speicherpool für Quellendatenträger %s konnte nicht gefunden werden." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Abrufen der CloudByte-Kontodetails zu Konto [%s] fehlgeschlagen." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Fehler beim Abrufen von LUN-Zieldetails für LUN %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "Fehler beim Abrufen von LUN-Zieldetails für LUN %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Fehler beim Abrufen von LUN-Zielliste für die LUN %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Fehler beim Abrufen der Partitions-ID für Datenträger %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "Fehler beim Abrufen der RAID-Momentaufnahme-ID von Momentaufnahme " "%(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "Fehler beim Abrufen der RAID-Momentaufnahme-ID von Momentaufnahme: " "%(snapshot_id)s." msgid "Failed to get SplitMirror." msgstr "SplitMirror konnte nicht abgerufen werden." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Es konnte keine Speicherressource abgerufen werden. Das System wird " "versuchen, die Speicherressource erneut abzurufen. (Ressource: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "Fehler beim Abrufen aller Zuordnungen von QoS-Spezifikationen %s" msgid "Failed to get channel info." msgstr "Fehler beim Abrufen der Informationen zum Kanal." #, python-format msgid "Failed to get code level (%s)." msgstr "Codeebene (%s) konnte nicht abgerufen werden." msgid "Failed to get device info." msgstr "Fehler beim Abrufen der Informationen zur Einheit." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" "Fehler beim Abrufen der Domäne, weil CPG (%s) auf der Platteneinheit nicht " "vorhanden ist." msgid "Failed to get image snapshots." msgstr "Schattenkopie des Abbilds konnte nicht abgerufen werden." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "Fehler beim Abrufen der IP für Kanal %(channel_id)s mit Datenträger: " "%(volume_id)s." msgid "Failed to get iqn info." msgstr "Fehler beim Abrufen der Informationen zum IQN." msgid "Failed to get license info." msgstr "Fehler beim Abrufen der Informationen zur Lizenz." msgid "Failed to get lv info." msgstr "Fehler beim Abrufen der LV-Informationen." msgid "Failed to get map info." msgstr "Fehler beim Abrufen der Informationen zur Zuordnung." msgid "Failed to get migration task." msgstr "Die Migrationsaufgabe konnte nicht abgerufen werden." msgid "Failed to get model update from clone" msgstr "Fehler beim Abrufen von Modellaktualisierung von Klon" msgid "Failed to get name server info." msgstr "Fehler beim Abrufen der Namensserverinformationen." msgid "Failed to get network info." msgstr "Fehler beim Abrufen der Informationen zum Netz." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "Fehler beim Abrufen der neuen Teilekennung im neuen Pool: %(pool_id)s." msgid "Failed to get partition info." msgstr "Fehler beim Abrufen der Informationen zur Partition." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Fehler beim Abrufen der Pool-ID mit Datenträger %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "Fehler beim Abrufen der Informationen der fernen Kopie für %(volume)s. " "Ursache: %(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "Fehler beim Abrufen der Informationen der fernen Kopie für %(volume)s. " "Ausnahme: %(err)s." msgid "Failed to get replica info." msgstr "Fehler beim Abrufen der Informationen zum Replikat." msgid "Failed to get show fcns database info." msgstr "Fehler beim Abrufen der Anzeige-FCNS-Datenbankinformationen." msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "Fehler beim Abrufen der Größe von Datenträger %(vol). Datenträgerverwaltung " "fehlgeschlagen." #, python-format msgid "Failed to get size of volume %s" msgstr "Fehler beim Abrufen der Größe von Datenträger %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Fehler beim Abrufen einer Momentaufnahme für Datenträger %s." msgid "Failed to get snapshot info." msgstr "Fehler beim Abrufen der Informationen zur Momentaufnahme." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Fehler beim Abrufen von qualifiziertem iSCSI-Zielnamen für LUN %s" msgid "Failed to get target LUN of SplitMirror." msgstr "Ziel-LUN von SplitMirror konnte nicht abgerufen werden." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Fehler beim Abrufen von Zielportal für LUN %s" msgid "Failed to get targets" msgstr "Ziele konnten nicht abgerufen werden" msgid "Failed to get wwn info." msgstr "Fehler beim Abrufen der Informationen zum WWN." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Abrufen, Erstellen oder Hinzufügen von Datenträger %(volumeName)s in " "Maskenansicht %(maskingViewName)s fehlgeschlagen. Die empfangene " "Fehlernachricht war %(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Datenträger-Back-End konnte nicht identifiziert werden." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "Fehler beim Verlinken von fileset für freigegebene %(cgname)s. Fehler: " "%(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Anmeldung bei %s-Array fehlgeschlagen (ungültige Anmeldung?)." #, python-format msgid "Failed to login for user %s." msgstr "Anmeldung für Benutzer %s fehlgeschlagen." msgid "Failed to login with all rest URLs." msgstr "Anmeldung mit allen REST-URLs fehlgeschlagen." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Fehler beim Stellen einer Anforderung an den Datera-Cluster-Endpunkt. " "Ursache dafür ist: %s" msgid "Failed to manage api volume flow." msgstr "Fehler beim Verwalten des API-Datenträgerflusses." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Fehler beim Verwalten des bereits vorhandenen %(type)s %(name)s, da die " "gemeldete Größe %(size)s keine Gleitkommazahl war." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Fehler beim Verwalten des vorhandenen Datenträgers %(name)s, da beim Abrufen " "der Datenträgergröße ein Fehler aufgetreten ist." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Fehler beim Verwalten des vorhandenen Datenträgers %(name)s, da die " "Umbenennungsoperation fehlgeschlagen ist: Fehlernachricht: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Fehler beim Verwalten des bereits vorhandenen Datenträgers %(name)s, da die " "gemeldete Größe %(size)s keine Gleitkommazahl war." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " "gewählten Datenträgertyps nicht mit der NFS-Freigabe, die in der " "Datenträgerreferenz übergeben wurde, übereinstimmt." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " "gewählten Datenträgertyps nicht mit dem Dateisystem, das in der " "Datenträgerreferenz übergeben wurde, übereinstimmt." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Fehler beim Verwalten des vorhandenen Datenträgers, da der Pool des " "gewählten Datenträgertyps nicht mit dem Pool des Hosts übereinstimmt." #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "Verwalten eines vorhandenen Datenträgers aufgrund nicht übereinstimmender E/" "A-Gruppen fehlgeschlagen. Die E/A-Gruppe des zu verwaltenden Datenträgers " "ist %(vdisk_iogrp)s. Die E/A-Gruppe des ausgewählten Typs ist %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der Pool des zu " "verwaltenden Datenträgers nicht mit dem Backend-Pool übereinstimmt. Der Pool " "des zu verwaltenden Datenträgers ist %(vdisk_pool)s. Der Backend-Pool ist " "%(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " "verwaltende Datenträger vom Typ 'compress', der ausgewählte Datenträger " "jedoch vom Typ 'not compress' ist." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " "verwaltende Datenträger vom Typ 'not compress', der ausgewählte Datenträger " "jedoch vom Typ 'compress' ist." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " "verwaltende Datenträger nicht in einer gültigen E/A-Gruppe war." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " "verwaltende Datenträger vom Typ 'thick', der ausgewählte Datenträger jedoch " "vom Typ 'thin' ist." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " "verwaltende Datenträger vom Typ 'thin', der ausgewählte Datenträger jedoch " "vom Typ 'thick' ist." #, python-format msgid "Failed to manage volume %s." msgstr "Fehler beim Verwalten des Datenträgers %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Eine logische Einheit konnte nicht zugeordnet werden. (Logische Einheit: " "%(ldev)s, LUN: %(lun)s, Port: %(port)s, ID: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Migration des Datenträgers zum ersten Mal fehlgeschlagen." msgid "Failed to migrate volume for the second time." msgstr "Migration des Datenträgers zum zweiten Mal fehlgeschlagen." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "Fehler beim Verschieben der LUN-Zuordnung. Rückgabecode: %s" #, python-format msgid "Failed to move volume %s." msgstr "Fehler beim Verschieben des Datenträgers %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Fehler beim Öffnen einer Datei. (Datei: %(file)s, Rückgabe: %(ret)s, " "Standardfehler: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI-Ausgabe konnte nicht analysiert werden:\n" " Befehl: %(cmd)s\n" " Standardausgabe: %(out)s\n" " Standardfehler: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Die Konfigurationsoption 'keystone_catalog_info' muss das Format :" ": haben." msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Die Konfigurationsoption 'swift_catalog_info' konnte nicht analysiert " "werden. Muss das Format :: aufweisen" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Es konnte keine Null-Seiten-Erschließung durchgeführt werden. (Logische " "Einheit: %(ldev)s, Ursache: '%(reason)s')" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" "Entfernen von Export für Datenträger %(volume)s fehlgeschlagen: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "Fehler beim Entfernen von iSCSI-Ziel für Datenträger %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Fehler beim Entfernen des Datenträgers %(volumeName)s aus Konsistenzgruppe " "%(cgName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" "Fehler beim Entfernen des Datenträgers %(volumeName)s aus " "Standardservicegruppe." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "Datenträger %(volumeName)s konnte nicht aus Standardservicegruppe " "%(volumeName)s entfernt werden." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "%(volumename)s konnte nicht aus der Standardspeichergruppe für FAST- " "Richtlinie %(fastPolicyName)s entfernt werden." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Fehler beim Umbenennen des logischen Datenträgers %(name)s, Fehlernachricht: " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Fehler beim Abrufen von aktiver Zoningkonfiguration %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "Fehler beim Definieren der CHAP-Authentifizierung für den Ziel-IQN %(iqn)s. " "Details: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Fehler beim Festlegen von QoS für vorhandenen Datenträger %(name)s. " "Fehlernachricht: %(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "Fehler beim Festlegen des Attributs 'Incoming user' für SCST-Ziel." msgid "Failed to set partition." msgstr "Fehler beim Festlegen der Partition." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Fehler beim Festlegen von Berechtigungen für die Konsistenzgruppe " "%(cgname)s. Fehler: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Es wurde keine logische Einheit für den Datenträger %(volume_id)s angegeben, " "für die die Zuordnung aufgehoben werden soll." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Es wurde keine zu löschende logische Einheit angegeben. (Methode: " "%(method)s, ID: %(id)s)" msgid "Failed to terminate migrate session." msgstr "Die Migrationssitzung konnte nicht beendet werden." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "Fehler beim Aufheben der Bindung von Datenträger %(volume)s" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Fehler beim Aufheben der Verlinkung von fileset für Konsistenzgruppe " "%(cgname)s. Fehler: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Eine logische Einheit konnte nicht abgehängt werden. (Logische Einheit: " "%(ldev)s, Ursache: %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Fehler beim Aktualisieren der Konsistenzgruppe: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Aktualisieren von Metadaten für Datenträger fehlgeschlagen: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "Fehler beim Aktualisieren oder Löschen der Zoningkonfiguration" msgid "Failed to update or delete zoning configuration." msgstr "Fehler beim Aktualisieren oder Löschen der Zoningkonfiguration." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "qos_specs konnten nicht aktualisiert werden: %(specs_id)s mit " "Spezifikationen %(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "" "Kontingentnutzung konnte bei der erneuten Eingabe des Datenträgers nicht " "aktualisiert werden." msgid "Failed to update snapshot." msgstr "Aktualisierung der Schattenkopie fehlgeschlagen." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "Fehler beim Aktualisieren des Models mit dem vom Treiber bereitgestellten " "Modell %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Fehler beim Aktualisieren der Metadaten des Datenträgers %(vol_id)s mithilfe " "der bereitgestellten %(src_type)s %(src_id)s-Metadaten" #, python-format msgid "Failure creating volume %s." msgstr "Fehler beim Erstellen des Datenträgers %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Fehler beim Abrufen von LUN-Informationen für %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Fehler in update_volume_key_value_pair:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Fehler beim Verschieben von neuer geklonter LUN nach %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Fehler beim Zwischenspeichern von LUN %s in temporärem Bereich." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Schwerwiegender Fehler: Benutzer ist nicht zum Abfragen von NetApp-" "Datenträgern berechtigt." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht hinzufügen. Ursache: %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor konnte den Datenträger %(vol)s in der Gruppe %(group)s nicht " "verknüpfen. Grund: %(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor konnte den Datenträger %(vol)s in der Gruppe %(group)s nicht " "entfernen. Grund: %(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht entfernen. Grund: %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Fehler bei der Fibre Channel-SAN-Suche: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Fehler bei der Fibre Channel-Zonenoperation: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Fehler bei der Fibre Channel-Verbindungssteuerung: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "Datei %(file_path)s konnte nicht gefunden werden." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "Datei %(path)s hat ungültige Sicherungsdatei %(bfile)s. Abbruch." #, python-format msgid "File already exists at %s." msgstr "Datei bereits vorhanden bei %s." #, python-format msgid "File already exists at: %s" msgstr "Datei bereits vorhanden bei: %s" msgid "Find host in hostgroup error." msgstr "Fehler beim Suchen des Hosts in Hostgruppe." msgid "Find host lun id error." msgstr "Fehler beim Suchen der Host-LUN-ID." msgid "Find lun group from mapping view error." msgstr "Fehler beim Suchen der LUN-Gruppe in der Zuordnungsansicht." msgid "Find lun number error." msgstr "Fehler beim Suchen der LUN-Nummer." msgid "Find mapping view error." msgstr "Fehler beim Suchen der Zuordnungsansicht." msgid "Find portgroup error." msgstr "Fehler beim Suchen der Portgruppe." msgid "Find portgroup from mapping view error." msgstr "Fehler beim Suchen der Portgruppe in der Zuordnungsansicht." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Flash-Cache-Richtlinie erfordert WSAPI-Version '%(fcache_version)s', Version " "'%(version)s' ist installiert." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor konnte Momentaufnahme von Datenträger %(id)s in Gruppe %(vgid)s " "Momentaufnahme %(vgsid)s nicht finden." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor konnte Datenträger %(volumeid)s nicht erstellen: %(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht löschen: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht zu Gruppe %(cgid)s hinzufügen." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht zuordnen, da der Status nicht " "anhand der Ereignis-ID abgerufen werden konnte. " #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor konnte Datenträger %(volume)s IQN %(iqn)s nicht zuordnen." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht klonen: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht klonen (Ereignis konnte nicht " "abgerufen werden )" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor konnte Momentaufnahme für Datenträger %(id)s nicht erstellen: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Momentaufnahme für Datenträger %(id)s nicht erstellen " "(Ereignis konnte nicht abgerufen werden)." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht in Gruppe %(vgid)s erstellen." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(volume)s nicht erstellen: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor konnte Datenträger %s nicht erstellen (Ereignis abrufen)" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor konnte Datenträger nicht aus Momentaufnahme %(id)s erstellen: " "%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor konnte Datenträger nicht aus Momentaufnahme %(id)s erstellen: " "%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Datenträger nicht aus Momentaufnahme %(id)s nicht erstellen " "(Ereignis konnte nicht abgerufen werden)." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor konnte Momentaufnahme %(id)s nicht löschen: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Momentaufnahme %(id)s nicht löschen (Ereignis konnte nicht " "abgerufen werden)" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht löschen: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht erweitern: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor konnte Datenträger %(id)s nicht erweitern: %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht erweitern (Ereignis konnte nicht " "abgerufen werden)" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" "Flexvisor konnte Poolinformationen zu %(id)s nicht abrufen: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor konnnte Momentaufnahmen-ID von Datenträger %(id)s nicht von Gruppe " "%(vgid)s abrufen." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor konnte Datenträger %(id)s nicht aus Gruppe %(cgid)s entfernen." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor konnte Datenträger nicht aus Momentaufnahme %(id)s generieren: " "%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor konnte Datenträger nicht aus Momentaufnahme %(id)s nicht erstellen " "(Ereignis konnte nicht abgerufen werden)." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" "Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben (Ereignis " "abrufen)" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" "Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "" "Flexvisor konnte die Informationen zum Quellendatenträger %(id)s nicht " "finden." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "" "Flexvisor-Datenträger %(id)s konnte nicht mit Gruppe %(vgid)s verknüpft " "werden." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "Der Ordner %s ist nicht in der Nexenta Store-Appliance vorhanden." #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS läuft nicht, Status: %s." msgid "Gateway VIP is not set" msgstr "Gateway-VIP wurde nicht festgelegt" msgid "Get FC ports by port group error." msgstr "Fehler beim Abrufen der FC-Ports nach Portgruppe." msgid "Get FC ports from array error." msgstr "Fehler beim Abrufen der FC-Ports vom Array." msgid "Get FC target wwpn error." msgstr "Fehler beim Abrufen des WWPN des FC-Ziel." msgid "Get HyperMetroPair error." msgstr "Fehler beim Abrufen von HyperMetroPair." msgid "Get LUN group by view error." msgstr "Fehler beim Abrufen der LUN-Gruppe nach Ansicht." msgid "Get LUNcopy information error." msgstr "Fehler beim Abrufen der Informationen zur LUN-Kopie." msgid "Get QoS id by lun id error." msgstr "Fehler beim Abrufen der QoS-ID nach LUN-ID." msgid "Get QoS information error." msgstr "Fehler beim Abrufen der Informationen zu QoS." msgid "Get QoS policy error." msgstr "Fehler beim Abrufen der QoS-Richtlinie." msgid "Get SplitMirror error." msgstr "Fehler beim Abrufen von SplitMirror." msgid "Get active client failed." msgstr "Abrufen des aktiven Clients ist fehlgeschlagen." msgid "Get array info error." msgstr "Fehler beim Abrufen der Array-Info." msgid "Get cache by name error." msgstr "Fehler beim Abrufen des Cache nach Name." msgid "Get connected free FC wwn error." msgstr "Fehler beim Abrufen der verbundenen freien FC-WWNs." msgid "Get engines error." msgstr "Fehler beim Abrufen von Engines." msgid "Get host initiators info failed." msgstr "Abrufen der Informationen zu Hostinitiatoren fehlgeschlagen." msgid "Get hostgroup information error." msgstr "Fehler beim Abrufen der Informationen zur Hostgruppe." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Fehler beim Abrufen der Informationen zum iSCSI-Port. Überprüfen Sie die " "konfigurierte Ziel-IP in der huawei-Konfigurationsdatei." msgid "Get iSCSI port information error." msgstr "Fehler beim Abrufen der Informationen zum iSCSI-Port." msgid "Get iSCSI target port error." msgstr "Fehler beim Abrufen des iSCSI-Zielports." msgid "Get lun id by name error." msgstr "Fehler beim Abrufen der LUN-ID nach Namen." msgid "Get lun migration task error." msgstr "Fehler beim Abrufen der LUN-Migrationstask." msgid "Get lungroup id by lun id error." msgstr "Fehler beim Abrufen der LUN-Gruppen-ID nach LUN-ID." msgid "Get lungroup information error." msgstr "Fehler beim Abrufen der Informationen zur LUN-Gruppe." msgid "Get migration task error." msgstr "Fehler beim Abrufen der Migrationsaufgabe." msgid "Get pair failed." msgstr "Fehler beim Abrufen von Paar." msgid "Get partition by name error." msgstr "Fehler beim Abrufen der Partition nach Name." msgid "Get partition by partition id error." msgstr "Fehler beim Abrufen der Partition nach Partitions-ID." msgid "Get port group by view error." msgstr "Fehler beim Abrufen der Portgruppe nach Ansicht." msgid "Get port group error." msgstr "Fehler beim Abrufen von Portgruppen." msgid "Get port groups by port error." msgstr "Fehler beim Abrufen der Portgruppen nach Port." msgid "Get ports by port group error." msgstr "Fehler beim Abrufen von Ports nach Portgruppe." msgid "Get remote device info failed." msgstr "Abrufen der fernen Geräteinfo fehlgeschlagen." msgid "Get remote devices error." msgstr "Fehler beim Abrufen ferner Geräte." msgid "Get smartcache by cache id error." msgstr "Fehler beim Abrufen des Smart Cache nach Cache-ID." msgid "Get snapshot error." msgstr "Fehler beim Abrufen der Schattenkopie." msgid "Get snapshot id error." msgstr "Fehler beim Abrufen der Momentaufnahme-ID." msgid "Get target IP error." msgstr "Fehler beim Abrufen der Ziel-IP." msgid "Get target LUN of SplitMirror error." msgstr "Fehler beim Abrufen der Ziel-LUN von SplitMirror." msgid "Get views by port group error." msgstr "Fehler beim Abrufen der Ansichten nach Portgruppe." msgid "Get volume by name error." msgstr "Fehler beim Abrufen des Datenträgers nach Name." msgid "Get volume error." msgstr "Fehler beim Abrufen des Datenträgers." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Glance-Metadaten können nicht aktualisiert werden. Schlüssel %(key)s für " "Datenträger-ID %(volume_id)s vorhanden" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "Glance-Metadaten für Datenträger/Momentaufnahme %(id)s können nicht gefunden " "werden." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Gluster-Konfigurationsdatei unter %(config)s ist nicht vorhanden" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google-Cloudspeicher-API-Fehler: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google-Cloudspeicherverbindungsfehler: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Oauth2-Fehler in Google-Cloudspeicher: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "Fehlerhafte Pfadinformation von DRBDmanage erhalten! (%s)" msgid "HBSD error occurs." msgstr "HBSD-Fehler tritt auf." msgid "HNAS has disconnected SSC" msgstr "HNAS hat die Verbindung zu SSC getrennt." msgid "HPELeftHand url not found" msgstr "HPELeftHand-URL nicht gefunden" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "Es wurde eine Verifizierung des HTTPS-Zertifikats angefordert. Sie kann " "jedoch nicht mit der Purestorage-Modulversion %(version)s aktiviert werden. " "Führen Sie ein Upgrade auf eine neuere Version durch, um diese Funktion zu " "aktivieren." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "Hash-Blockgröße wurde seit der letzten Sicherung geändert. Neue Hash-" "Blockgröße: %(new)s. Alte Hash-Blockgröße: %(old)s. Führen Sie eine " "vollständige Sicherung durch." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Schicht(en) %(tier_levels)s wurde(n) nicht erstellt." #, python-format msgid "Hint \"%s\" not supported." msgstr "Hinweis \"%s\" nicht unterstützt." msgid "Host" msgstr "Host" #, python-format msgid "Host %(host)s could not be found." msgstr "Host %(host)s konnte nicht gefunden werden." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "Host %(host)s entspricht nicht dem Inhalt des x509-Zertifikats: CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "Host %s weist keine FC-Initiatoren auf" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "Host %s weist keinen iSCSI-Initiator auf" #, python-format msgid "Host '%s' could not be found." msgstr "Host '%s' konnte nicht gefunden werden." #, python-format msgid "Host group with name %s not found" msgstr "Hostgruppe mit Name %s nicht gefunden" #, python-format msgid "Host group with ref %s not found" msgstr "Hostgruppe mit ref %s nicht gefunden" msgid "Host is NOT Frozen." msgstr "Der Host ist nicht gesperrt." msgid "Host is already Frozen." msgstr "Der Host ist bereits gesperrt." msgid "Host not found" msgstr "Host nicht gefunden" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" "Host nicht gefunden. Fehler beim Entfernen von %(service)s auf %(host)s." #, python-format msgid "Host replication_status must be %s to failover." msgstr "'replication_status' für Host muss für Failover %s sein." #, python-format msgid "Host type %s not supported." msgstr "Hosttyp %s nicht unterstützt." #, python-format msgid "Host with ports %(ports)s not found." msgstr "Host mit Ports %(ports)s nicht gefunden." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "" "Hypermetro und Replikation dürfen nicht in demselben Datenträgertyp " "(volume_type) verwendet werden." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "E/A-Gruppe %(iogrp)d ist ungültig. Verfügbare E/A-Gruppen sind %(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "IP-Adresse/Hostname der Blockbridge-API." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Wenn die Komprimierung auf True festgelegt wurde, muss auch rsize festgelegt " "werden (auf einen Wert ungleich -1)." msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Wenn 'nofmtdisk' auf 'True' gesetzt ist, muss 'rsize' auf '-1' gesetzt " "werden." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Unzulässiger Wert '%(prot)s' für flashsystem_connection_protocol angegeben: " "gültige Werte sind %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Unzulässiger Wert für IOTYPE angegeben: 0, 1 oder 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Unzulässiger Wert für smarttier angegeben: Auf 0, 1, 2 oder 3 festlegen." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Unzulässiger Wert für storwize_svc_vol_grainsize angegeben: Auf 32, 64, 128 " "oder 256 festlegen." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Unzulässiger Wert für thin angegeben: thin und thick können nicht " "gleichzeitig festgelegt werden." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Image %(image_id)s konnte nicht gefunden werden." #, python-format msgid "Image %(image_id)s is not active." msgstr "Abbild %(image_id)s ist nicht aktiv." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Image %(image_id)s ist nicht zulässig: %(reason)s" msgid "Image location not present." msgstr "Imageposition nicht vorhanden." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Virtuelle Größe des Image beträgt %(image_size)d GB und passt nicht in einen " "Datenträger mit der Größe %(volume_size)d GB." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Beim Löschen von RBD-Datenträger ist ein ImageBusy-Fehler aufgetreten. Dies " "wurde möglicherweise von einer Verbindung von einem Client verursacht, der " "abgestürzt ist; wenn dies der Fall ist, kann das Problem über einen erneuten " "Löschversuch nach Ablauf von 30 Sekunden gelöst werden." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Importdatensatz fehlgeschlagen, Sicherungsservice zum Durchführen des " "Imports kann nicht gefunden werden. Service %(service)s anfordern" msgid "Incorrect request body format" msgstr "Falsches Anforderungshauptteilformat" msgid "Incorrect request body format." msgstr "Falsches Anforderungstextformat." msgid "Incremental backups exist for this backup." msgstr "Inkrementelle Sicherungen sind für diese Sicherung vorhanden." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend-CLI-Ausnahmebedingung: %(err)s Parameter: %(param)s " "(Rückgabecode: %(rc)s) (Ausgabe: %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Ursprüngliche Schicht: {}, Richtlinie: {} ist nicht gültig." msgid "Input type {} is not supported." msgstr "Eingabetyp {} wird nicht unterstützt." msgid "Input volumes or snapshots are invalid." msgstr "Eingabedatenträger oder Momentaufnahmen sind ungültig." msgid "Input volumes or source volumes are invalid." msgstr "Eingabedatenträger oder Quellendatenträger sind ungültig." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "Instanz %(uuid)s konnte nicht gefunden werden." msgid "Insufficient free space available to extend volume." msgstr "" "Es ist nicht genügend freier Speicherplatz vorhanden, um den Datenträger zu " "erweitern." msgid "Insufficient privileges" msgstr "Unzureichende Berechtigungen" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "Intervall (in Sekunden) zwischen Verbindungsversuchen zu ceph-Cluster." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "Ungültige %(protocol)s-Ports %(port)s für io_port_list angegeben." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Ungültige 3PAR-Domäne: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Ungültiger ALUA-Wert. ALUA-Wert muss 1 oder 0 sein." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "Ungültige Ceph-Argumente angegeben für RBD-Sicherungsoperation" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Ungültiger CgSnapshot: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "Ungültige ConsistencyGroup: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "Ungültige Konsistenzgruppe: Konsistenzgruppenstatus muss 'available' oder " "'error' lauten, aber der aktuelle Status ist 'in-use'" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "Ungültige Konsistenzgruppe: Konsistenzgruppenstatus muss 'available' lauten, " "der aktuelle Status lautet jedoch: %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "Ungültige Konsistenzgruppe: Kein Host für die Erstellung der Konsistenzgruppe" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Ungültige HPELeftHand-API-Version gefunden: %(found)s. Version %(minimum)s " "oder höher erforderlich für die Unterstützung der Funktionen Verwaltung/" "Aufheben von Verwaltung." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Ungültiges IP-Adressformat: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Ungültige QoS-Spezifikation beim Abrufen der QoS-Richtlinie für Datenträger " "%s gefunden" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Ungültiges Replikationsziel: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Ungültiger VNX-Authentifizierungstyp: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Ungültige Spezifikation einer Virtuozzo-Speicherfreigabe: %r. Erforderlich: " "[MDS1[,MDS2],...:/][:KENNWORT]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "Ungültige XtremIO-Version %(cur)s, Version ab %(min)s ist erforderlich" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "Es wurden ungültige Kontingentzuordnungen für die folgenden " "Projektkontingente definiert: %s" msgid "Invalid argument" msgstr "Ungültiges Argument" msgid "Invalid argument - negative seek offset." msgstr "Ungültiges Argument - negativer Suchoffset." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Ungültiges Argument - whence=%s wird nicht unterstützt" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Ungültiges Argument - whence=%s nicht unterstützt." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Ungültiger Anhangmodus '%(mode)s' für Datenträger %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Ungültiger Authorisierungsschlüssel: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Ungültige Sicherung: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "Ungültige Barbican-API-URL: Die Version ist erforderlich. Beispiel: " "'http[s]://|[:Port]/'. Angegebene URL: %s" msgid "Invalid cgsnapshot" msgstr "Ungültiger Cgsnapshot" msgid "Invalid chap user details found in CloudByte storage." msgstr "Ungültige Details des CHAP-Benutzers in CloudByte-Speicher gefunden." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "" "Ungültige Antwort für Initialisierung der Verbindung von Datenträger %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Ungültige Antwort für Initialisierung der Verbindung von Datenträger " "%(name)s: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Ungültiger Inhaltstyp %(content_type)s." msgid "Invalid credentials" msgstr "Ungültige Berechtigungsnachweise" #, python-format msgid "Invalid directory: %s" msgstr "Ungültiges Verzeichnis: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Ungültiger Plattenadaptertyp: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Ungültige Plattensicherung: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Ungültiger Plattentyp: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Ungültiger Festplattentyp: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Ungültiger Host: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Ungültige hpe3parclient-Version gefunden (%(found)s). Eine Version ab " "%(minimum)s ist erforderlich. Führen Sie \"pip install --upgrade " "python-3parclient\" aus, um ein Upgrade für hpe3parclient durchzuführen." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Ungültige hpelefthandclient-Version gefunden (%(found)s). Version " "%(minimum)s oder höher erforderlich. Führen Sie 'pip install --upgrade " "python-lefthandclient' aus, um ein Upgrade für hpelefthandclient " "durchzuführen." #, python-format msgid "Invalid image href %(image_href)s." msgstr "Ungültiger Image-Hyperlink %(image_href)s." msgid "Invalid image identifier or unable to access requested image." msgstr "" "Ungültige Image-ID oder auf das angeforderte Image kann nicht zugegriffen " "werden." msgid "Invalid imageRef provided." msgstr "Angabe für imageRef ungültig." msgid "Invalid initiator value received" msgstr "Ungültigen Initiatorwert empfangen" msgid "Invalid input" msgstr "Ungültige Eingabe" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Ungültige Eingabe erhalten: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "'is_public-Filter' [%s] ungültig" #, python-format msgid "Invalid lun type %s is configured." msgstr "Es wurde ein ungültiger LUN-Typ %s konfiguriert." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Ungültige Metadatengröße: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Ungültige Metadaten: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Ungültige Mountpunktbasis: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Ungültige Mountpunktbasis: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Ungültiger neuer snapCPG-Name für Typänderung. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Ungültige Portnummer %(config)s für Coho-RPC-Port." #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Es wurde ein ungültiger PrefetchType '%s' konfiguriert. PrefetchType muss in " "0,1,2,3 sein." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Ungültige QoS-Spezifikationen: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "" "Ungültige Anforderung, einen Datenträger an ein ungültiges Ziel anzuhängen" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Ungültige Anforderung, einen Datenträger mit einem ungültigen Modus " "anzuhängen. Anhangmodus sollte 'rw' oder 'ro' sein " #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Ungültiger Reservierungsablauf %(expire)s." msgid "Invalid response header from RPC server" msgstr "Ungültiger Antwortheader vom RPC-Server" #, python-format msgid "Invalid secondary id %s." msgstr "Ungültige sekundäre ID %s." #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "Die Angabe für 'secondary_backend_id' ist ungültig. Gültige Backend-ID: %s." msgid "Invalid service catalog json." msgstr "Ungültige Servicekatalog-JSON." msgid "Invalid sheepdog cluster status." msgstr "Ungültiger Status des sheepdog-Clusters." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "Ungültige Momentaufnahme: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Ungültiger Status: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "Ungültiger Speicherpool %s angefordert. Typänderung fehlgeschlagen." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Ungültiger Speicherpool %s angegeben." msgid "Invalid storage pool is configured." msgstr "Es wurde ein ungültiger Speicherpool konfiguriert." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "Ungültiger Synchronisationsmodus angegeben. Zulässiger Modus: %s." msgid "Invalid transport type." msgstr "Ungültiger Transporttyp." #, python-format msgid "Invalid update setting: '%s'" msgstr "Ungültige Aktualisierungseinstellung: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "Ungültige URL: Muss das folgende Format haben: 'http[s]://|" "[:Port]/'- Angegebene URL: %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Wert '%s' für Zwangsausführung ungültig." #, python-format msgid "Invalid value '%s' for force. " msgstr "Wert '%s' für Zwangsausführung ungültig. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "Ungültiger Wert '%s' für is_public. Gültige Werte: True oder False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "Ungültiger Wert '%s' für skip_validation." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Ungültiger Wert für 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Ungültiger Wert für 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Ungültiger Wert für 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Ungültiger Wert für scheduler_max_attempts; Wert muss >= 1 sein" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "Ungültiger Wert für NetApp-Konfigurationsoption netapp_host_type." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "Ungültiger Wert für NetApp-Konfigurationsoption netapp_lun_ostype." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Ungültiger Wert für age: %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Ungültiger Wert: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "Ungültige Datenträgergröße für Erstellungsanforderung angegeben: %s " "(Größenargument muss eine Ganzzahl (oder eine Zeichenfolgedarstellung einer " "Ganzzahl) und größer als null sein)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Ungültiges Volumentyp: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Ungültiges Volumen: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Ungültiger Datenträger: %(volume_id)s des Datenträgers kann nicht zur " "Konsistenzgruppe %(group_id)s hinzugefügt werden, da der Datenträger einen " "ungültigen Status aufweist: %(status)s. Gültige Status sind: ('available', " "'in-use')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Ungültiger Datenträger: %(volume_id)s des Datenträgers kann nicht zur " "Konsistenzgruppe %(group_id)s hinzugefügt werden, da der Datenträgertyp " "%(volume_type)s von der angegeben sein." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Ungültiger Datenträger: fake-volume-uuid des Datenträgers kann nicht zur " "Konsistenzgruppe %(group_id)s hinzugefügt werden, da der Datenträger nicht " "gefunden werden kann." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Ungültiger Datenträger: fake-volume-uuid des Datenträgers kann nicht aus der " "Konsistenzgruppe %(group_id)s entfernt werden, da er sich nicht in der " "Gruppe befindet." #, python-format msgid "Invalid volume_type passed: %s." msgstr "Ungültiger volume_type übergeben: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Ungültiger volume_type angegeben: %s (der angeforderte Typ ist nicht " "kompatibel; entweder übereinstimmenden Quellendatenträger verwenden oder " "Typargument weglassen)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "Ungültiger volume_type angegeben: %s (der angeforderte Typ ist nicht " "kompatibel; das Typargument sollte weggelassen werden)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "Ungültiger volume_type angegeben: %s (der angeforderte Typ muss von dieser " "Konsistenzgruppe unterstützt werden)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Ungültiges wwpns-Format %(wwpns)s" msgid "Invoking web service failed." msgstr "Aufrufen des Web-Service fehlgeschlagen." msgid "Issue encountered waiting for job." msgstr "Beim Warten auf den Job ist ein Problem aufgetreten. " msgid "Issue encountered waiting for synchronization." msgstr "Beim Warten auf die Synchronisation ist ein Problem aufgetreten. " msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Ausgabe eines Failovers fehlgeschlagen, weil die Replikation nicht " "ordnungsgemäß konfiguriert wurde. " msgid "Item not found" msgstr "Element nicht gefunden" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "Job-ID in Antwort zu Datenträger erstellen [%s] von CloudByte nicht gefunden." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "Job-ID in Antwort zu Datenträger löschen [%s] von CloudByte nicht gefunden." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Schlüsselnamen dürfen nur alphanumerische Zeichen, Unterstriche, Punkte, " "Doppelpunkte und Bindestriche enthalten." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "Für die Unterstützung von verschachtelten Kontingenten muss Keystone ab " "Version 3 verwendet werden." #, python-format msgid "LU does not exist for volume: %s" msgstr "LU für den Datenträger nicht vorhanden: %s" msgid "LUN export failed!" msgstr "Exportieren der LUN fehlgeschlagen!" msgid "LUN id({}) is not valid." msgstr "LUN-ID({}) ist nicht gültig." msgid "LUN map overflow on every channel." msgstr "Überlauf der LUN-Zuordnung an jedem Kanal." #, python-format msgid "LUN not found with given ref %s." msgstr "LUN mit angegebenem Verweis %s nicht gefunden." msgid "LUN number ({}) is not an integer." msgstr "LUN-Zahl ({}) ist keine Ganzzahl." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" "LUN-Nummer liegt außerhalb des gültigen Bereichs für Kanal-ID: %(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "LUN mit angegebenem Verweis %(ref)s entspricht nicht Datenträgertyp. Stellen " "Sie sicher, dass LUN-Datenträger mit SSC-Funktionen auf vserver %(vs)s " "vorhanden ist." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Letzte %s Cinder-Syslog-Einträge:-" msgid "LeftHand cluster not found" msgstr "LeftHand-Cluster nicht gefunden" msgid "License is unavailable." msgstr "Die Lizenz ist nicht verfügbar." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Zeile %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Linkpfad ist bereits vorhanden und ist kein symbolischer Link" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Verlinkter Klon von Quellendatenträger im Status %s nicht unterstützt." msgid "Lock acquisition failed." msgstr "Erstellen der Sperre fehlgeschlagen." msgid "Logout session error." msgstr "Fehler beim Abmelden der Sitzung." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Suchservice nicht konfiguriert. Konfigurationsoption für " "fc_san_lookup_service muss eine konkrete Implementierung des Suchservice " "angeben." msgid "Lun migration error." msgstr "Fehler bei LUN-Migration." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "MD5 von Objekt: %(object_name)s Vorher: %(md5)s und nachher: %(etag)s nicht " "identisch." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Fehlerhafte FCNS-Ausgabezeichenfolge: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Fehlerhafter Nachrichtentext: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Fehlerhafte Namensserver-Zeichenkette: %s" msgid "Malformed request body" msgstr "Fehlerhafter Anforderungshauptteil" msgid "Malformed request body." msgstr "Fehlerhafter Anforderungshauptteil." msgid "Malformed request url" msgstr "Fehlerhafte Anforderungs-URL" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Fehlerhafte Antwort auf Befehl %(cmd)s: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Attribut 'scheduler_hints' fehlerhaft" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Fehlerhafte Anzeigen-FCNS-Datenbankzeichenfolge: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Fehlerhafte Zonenkonfiguration: (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Fehlerhafter Zonenstatus: (switch=%(switch)s zone_config=%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "Verwaltung eines vorhandenen Abrufs der Größe erfordert 'id'." msgid "Manage existing snapshot not implemented." msgstr "Verwaltung einer vorhandenen Momentaufnahme nicht implementiert." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Verwaltung des vorhandenen Datenträgers aufgrund von ungültiger Back-End-" "Referenz fehlgeschlagen %(existing_ref)s: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Verwaltung des vorhandenen Datenträgers aufgrund eines abweichenden " "Datenträgertyps fehlgeschlagen: %(reason)s" msgid "Manage existing volume not implemented." msgstr "Verwaltung eines vorhandenen Datenträgers nicht implementiert." msgid "Manage existing volume requires 'source-id'." msgstr "Verwaltung eines vorhandenen Datenträgers erfordert 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "Datenträger verwalten wird nicht unterstützt, wenn FAST aktiviert wurde. " "FAST-Richtlinie: %(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "Verwalten von Schattenkopien für Failover-Datenträger ist nicht zulässig." msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "Zuordnungsinfo ist 'None', da die Array-Version hypermetro nicht unterstützt." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "Vorbereitung von %(id)s-Zuordnung wurde nicht innerhalb des zugewiesenen " "Zeitlimits von %(to)d Sekunden abgeschlossen. Wird beendet." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "Maskenansicht %(maskingViewName)s wurde nicht erfolgreich gelöscht" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Maximale Anzahl an zulässigen Sicherungen (%(allowed)d) überschritten" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" "Maximale Anzahl an zulässigen Momentaufnahmen (%(allowed)d) überschritten" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Maximale Anzahl an zulässigen Datenträgern (%(allowed)d) für Quote " "'%(name)s' überschritten." #, python-format msgid "May specify only one of %s" msgstr "Nur Angabe von einem von %s zulässig" msgid "Metadata backup already exists for this volume" msgstr "Für dieses Volumen ist bereits eine Metadatensicherung vorhanden" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "Metadatensicherungsobjekt '%s' bereits vorhanden" msgid "Metadata item was not found" msgstr "Metadatenelement wurde nicht gefunden" msgid "Metadata item was not found." msgstr "Metadatenelement wurde nicht gefunden." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "Metadaten-Eigenschaftsschlüssel %s größer als 255 Zeichen" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "Metadateneigenschaftenschlüsselwert %s ist größer als 255 Zeichen" msgid "Metadata property key blank" msgstr "Metadateneigenschaftenschlüssel leer" msgid "Metadata property key blank." msgstr "Metadateneigenschaftenschlüssel leer." msgid "Metadata property key greater than 255 characters." msgstr "Metadateneigenschaftenschlüssel größer als 255 Zeichen." msgid "Metadata property value greater than 255 characters." msgstr "Metadateneigenschaftenwert größer als 255 Zeichen." msgid "Metadata restore failed due to incompatible version" msgstr "" "Fehler bei der Wiederherstellung der Metadaten aufgrund einer inkompatiblen " "Version" msgid "Metadata restore failed due to incompatible version." msgstr "" "Metadatenwiederherstellung fehlgeschlagen aufgrund einer inkompatiblen " "Version." #, python-format msgid "Migrate volume %(src)s failed." msgstr "Migrieren des Datenträgers %(src)s fehlgeschlagen." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "Migrieren des Quellendatenträger %(src)s zu Zieldatenträger %(dst)s " "fehlgeschlagen." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "Migration von LUN %s wurde gestoppt oder ist fehlerhaft." msgid "MirrorView/S enabler is not installed." msgstr "MirrorView/S-Enabler ist nicht installiert." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Python-Modul 'purestorage' fehlt. Stellen Sie sicher, dass die Bibliothek " "verfügbar." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "Fehlende Fibre Channel-SAN-Konfigurationsparameter - fc_fabric_names" msgid "Missing request body" msgstr "Fehlender Anforderungshauptteil" msgid "Missing request body." msgstr "Anforderungshauptteil fehlt." #, python-format msgid "Missing required element '%s' in request body" msgstr "Fehlendes erforderliches Element '%s' im Anforderungshauptteil" #, python-format msgid "Missing required element '%s' in request body." msgstr "Fehlendes erforderliches Element '%s' im Anforderungshauptteil." msgid "Missing required element 'consistencygroup' in request body." msgstr "" "Fehlendes erforderliches Element 'consistencygroup' im Anforderungshauptteil." msgid "Missing required element 'host' in request body." msgstr "Fehlendes erforderliches Element 'host' im Anforderungshauptteil." msgid "Missing required element quota_class_set in request body." msgstr "" "Fehlendes erforderliches Element 'quota_class_set' im Anforderungshauptteil." msgid "Missing required element snapshot in request body." msgstr "Fehlendes erforderliches Element 'snapshot' im Anforderungshauptteil." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Mehrere SerialNumbers gefunden, wenn nur eine Angabe für diese Operation " "erwartet wurde. Ändern Sie Ihre EMC-Konfigurationsdatei." #, python-format msgid "Multiple copies of volume %s found." msgstr "Mehrere Kopien von Datenträger %s gefunden." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Mehrere Übereinstimmungen für '%s' gefunden. Verwenden Sie eine ID zur " "genaueren Bestimmung." msgid "Multiple profiles found." msgstr "Mehrere Profile gefunden." msgid "Must implement a fallback schedule" msgstr "Implementierung eines Ersatzzeitplans erforderlich" msgid "Must implement find_retype_host" msgstr "find_retype_host muss implementiert werden" msgid "Must implement host_passes_filters" msgstr "host_passes_filters muss implementiert werden" msgid "Must implement schedule_create_consistencygroup" msgstr "schedule_create_consistencygroup muss implementiert werden" msgid "Must implement schedule_create_volume" msgstr "schedule_create_volume muss implementiert werden" msgid "Must implement schedule_get_pools" msgstr "schedule_get_pools muss implementiert werden" msgid "Must pass wwpn or host to lsfabric." msgstr "WWPN oder Host muss an lsfabric übergeben werden." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Dieser Befehl muss als Cloudadministrator zusammen mit einer Keystone-" "Richtliniendatei 'policy.json' ausgeführt werden, damit der " "Cloudadministrator beliebige Projekte auflisten und abrufen kann. " msgid "Must specify 'connector'" msgstr "'connector' muss angegeben werden" msgid "Must specify 'connector'." msgstr "'connector' muss angegeben werden." msgid "Must specify 'host'." msgstr "'host' muss angegeben werden." msgid "Must specify 'new_volume'" msgstr "'new_volume' muss angegeben werden" msgid "Must specify 'status'" msgstr "'status' muss angegeben werden" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "'status', 'attach_status' oder 'migration_status' muss für die " "Aktualisierung angegeben werden. " msgid "Must specify a valid attach status" msgstr "Ein gültiger Anhangstatus muss angegeben werden" msgid "Must specify a valid migration status" msgstr "Ein gültiger Migrationsstatus muss angegeben werden" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Ein gültiger Charakter %(valid)s muss angegeben werden. Wert '%(persona)s' " "ist ungültig." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Ein gültiger Bereitstellungstyp %(valid)s muss angegeben werden. Wert " "'%(prov)s' ist ungültig." msgid "Must specify a valid status" msgstr "Ein gültiger Status muss angegeben werden" msgid "Must specify an ExtensionManager class" msgstr "Eine ExtensionManager-Klasse muss angegeben werden" msgid "Must specify bootable in request." msgstr "'bootable' muss in der Anforderung angegeben werden." msgid "Must specify protection domain name or protection domain id." msgstr "Schutzdomänenname oder Schutzdomänen-ID muss angegeben werden." msgid "Must specify readonly in request." msgstr "'readonly' muss in der Anforderung angegeben werden." msgid "Must specify snapshot source-name or source-id." msgstr "" "'source-name' oder 'source-id' der Schattenkopie müssen angegeben werden." msgid "Must specify source-name or source-id." msgstr "'source-name' oder 'source-id' muss angegeben werden." msgid "Must specify storage pool name or id." msgstr "Speicherpoolname oder -ID muss angegeben werden." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "Speicherpools müssen angegeben werden. Option: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Für age muss ein positiver Wert angegeben werden" msgid "Must supply a positive, non-zero value for age" msgstr "Für age muss ein positiver Wert angegeben werden" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS-Konfiguration '%(name)s=%(value)s' ungültig. Muss 'auto', 'true' oder " "'false' sein" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "NFS-Konfigurationsdatei unter %(config)s ist nicht vorhanden" #, python-format msgid "NFS file %s not discovered." msgstr "NFS-Datei %s nicht erkannt." msgid "NFS file could not be discovered." msgstr "NFS-Datei wurde nicht erkannt." msgid "NaElement name cannot be null." msgstr "NaElement-Name darf nicht null sein." msgid "Name" msgstr "Name" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Name, description, add_volumes und remove_volumes dürfen im " "Anforderungshauptteil nicht alle leer bleiben." msgid "Need non-zero volume size" msgstr "Datenträgergröße ungleich null erforderlich" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Weder MSG_DENIED noch MSG_ACCEPTED: %r" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder-Treiberausnahme" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "Neue Größe für Erweitern muss größer als aktuelle Größe sein (aktuell: " "%(size)s, erweitert: %(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "Die neue Größe muss größer sein als die tatsächliche Größe aus dem " "Backendspeicher. realsize: %(oldsize)s, newsize: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "Neue Datenträgergröße muss als Ganzzahl angegeben werden." msgid "New volume type must be specified." msgstr "Neuer Datenträgertyp muss angegeben werden." msgid "New volume type not specified in request_spec." msgstr "Neuer Datenträgertyp nicht angegeben in request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "Neuer volume_type ist mit dem ursprünglichen identisch: %s." msgid "Nimble Cinder Driver exception" msgstr "Nimble-Cinder-Treiberausnahme" msgid "No FC initiator can be added to host." msgstr "Es kann kein FC-Initiator zum Host hinzugefügt werden. " msgid "No FC port connected to fabric." msgstr "Kein FC-Port mit Fabric verbunden." msgid "No FCP targets found" msgstr "Keine FCP-Ziele gefunden" msgid "No Port Group elements found in config file." msgstr "" "Es wurden keine Portgruppenelemente in der Konfigurationsdatei gefunden." msgid "No VF ID is defined in the configuration file." msgstr "In der Konfigurationsdatei ist keine VF-ID definiert." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Keine aktiven iSCSI-Portale mit angegebenen iSCSI-IPs" #, python-format msgid "No available service named %s" msgstr "Kein verfügbarer Dienst namens %s" #, python-format msgid "No backup with id %s" msgstr "Keine Datensicherung mit Kennung %s" msgid "No backups available to do an incremental backup." msgstr "" "Keine Sicherungen für die Erstellung einer inkrementellen Sicherung " "verfügbar." msgid "No big enough free disk" msgstr "Nicht genügend freier Plattenspeicherplatz" #, python-format msgid "No cgsnapshot with id %s" msgstr "Kein Cgsnapshot mit ID %s" msgid "No cinder entries in syslog!" msgstr "Keine Cinder-Einträge im Syslog!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Keine geklonte LUN mit Namen %s auf dem Dateiserver gefunden" msgid "No config node found." msgstr "Kein Konfigurationsknoten gefunden." #, python-format msgid "No consistency group with id %s" msgstr "Keine Konsistenzgruppe mit ID %s vorhanden" #, python-format msgid "No element by given name %s." msgstr "Kein Element mit dem angegebenen Namen %s vorhanden." msgid "No errors in logfiles!" msgstr "Keine Fehler in den Protokolldateien!" #, python-format msgid "No file found with %s as backing file." msgstr "Keine Datei mit %s als Sicherungsdatei gefunden." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Keine freien LUN-IDs übrig. Die maximale Anzahl der Datenträger, die an Host " "(%s) angehängt werden können, wurde überschritten." msgid "No free disk" msgstr "Keine freie Platte" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Kein gültiges iSCSI-Portal in bereitgestellter Liste für %s gefunden." #, python-format msgid "No good iscsi portals found for %s." msgstr "Keine gültigen iSCSI-Portals für %s gefunden." #, python-format msgid "No host to create consistency group %s." msgstr "Kein Host zum Erstellen der Konsistenzgruppe %s." msgid "No iSCSI-enabled ports on target array." msgstr "Keine iSCSI-fähigen Ports auf Zielarray." msgid "No image_name was specified in request." msgstr "Kein image_name in Anforderung angegeben." msgid "No initiator connected to fabric." msgstr "Kein Initiator mit Fabric verbunden." #, python-format msgid "No initiator group found for initiator %s" msgstr "Keine Initiatorgruppe gefunden für Initiator %s" msgid "No initiators found, cannot proceed" msgstr "Keine Initiatoren gefunden. Fortfahren nicht möglich" #, python-format msgid "No interface found on cluster for ip %s" msgstr "Keine Schnittstelle auf Cluster für IP %s gefunden" msgid "No ip address found." msgstr "Keine IP-Adresse gefunden." msgid "No iscsi auth groups were found in CloudByte." msgstr "In CloudByte wurden keine iSCSI-Authentifizierungsgruppen gefunden." msgid "No iscsi initiators were found in CloudByte." msgstr "Keine iSCSI-Initiatoren in CloudByte gefunden." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Kein iSCSI-Service für CloudByte-Datenträger [%s] gefunden." msgid "No iscsi services found in CloudByte storage." msgstr "Keine iSCSI-Services in CloudByte-Speicher gefunden." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "Keine Schlüsseldatei angegeben und Laden von Schlüssel aus %(cert)s %(e)s " "nicht möglich." msgid "No mounted Gluster shares found" msgstr "Keine angehängten gemeinsam genutzten Gluster-Laufwerke gefunden" msgid "No mounted NFS shares found" msgstr "Kein angehängtes gemeinsam genutztes NFS-Laufwerk gefunden" msgid "No mounted SMBFS shares found." msgstr "Keine eingehängten SMBFS-Freigaben gefunden." msgid "No mounted Virtuozzo Storage shares found" msgstr "Keine angehängten Virtuozzo-Speicherfreigaben gefunden" msgid "No mounted shares found" msgstr "Keine eingehängten Freigaben gefunden." #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "Kein Knoten in E/A-Gruppe %(gid)s für Datenträger %(vol)s gefunden." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Für das Bereitstellen von Datenträgern sind keine Pools verfügbar. Stellen " "Sie sicher, dass die Konfigurationsoption netapp_pool_name_search_pattern " "ordnungsgemäß festgelegt ist." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Keine Antwort von API-Aufruf zum Auflisten der iSCSI-" "Authentifizierungsbenutzer von CloudByte-Speicher erhalten." msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "Von API-Aufruf zum Auflisten von TSM (listTsm) keine Antwort vom CloudByte-" "Speicher erhalten." msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "Von API-Aufruf zum Auflisten vom Dateisystem (listFileSystem) keine Antwort " "von CloudByte erhalten." msgid "No service VIP configured and no nexenta_client_address" msgstr "" "Kein Dienst-VIP konfiguriert und 'nexenta_client_address' nicht vorhanden." #, python-format msgid "No snap found with %s as backing file." msgstr "Keine Momentaufnahme mit %s als Sicherungsdatei gefunden." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "Kein Momentaufnahmeimage in Momentaufnahmegruppe %s gefunden." #, python-format msgid "No snapshots could be found on volume %s." msgstr "Keine Schattenkopien auf dem Datenträger %s gefunden." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Keine Quellenmomentaufnahmen zum Erstellen der Konsistenzgruppe %s angegeben." #, python-format msgid "No storage path found for export path %s" msgstr "Kein Speicherpfad für Exportpfad %s gefunden" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Keine QoS-Spezifikation %(specs_id)s vorhanden." msgid "No suitable discovery ip found" msgstr "Keine geeignete Erkennungs-IP-Adresse gefunden" #, python-format msgid "No support to restore backup version %s" msgstr "Keine Unterstützung für die Wiederherstellung der Sicherungsversion %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Keine Ziel-ID für Datenträger %(volume_id)s gefunden." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "Auf dem Host sind keine nicht verwendeten LUN-IDs verfügbar. Da das " "mehrfache Anhängen aktiviert ist, müssen alle LUN-IDs in der gesamten " "Hostgruppe eindeutig sein." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Es wurde kein gültiger Host gefunden. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Keine gültigen Hosts für Datenträger %(id)s mit Typ %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "" "Keine virtuelle Platte mit der durch die Referenz %s angegebenen UID " "vorhanden." #, python-format msgid "No views found for LUN: %s" msgstr "Keine Ansichten für LUN gefunden: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "Kein Datenträger auf Cluster mit virtuellem Server %(vserver)s und " "Junctionpfad %(junction)s " msgid "No volume service(s) started successfully, terminating." msgstr "Kein Volume Service erfolgreich gestartet. Wird beendet." msgid "No volume was found at CloudByte storage." msgstr "Kein Datenträger bei CloudByte-Speicher gefunden." msgid "No volume_type should be provided when creating test replica." msgstr "" "Beim Erstellen der Testreplik darf keine Angabe für volume_type vorgenommen " "werden." msgid "No volumes found in CloudByte storage." msgstr "Keine Datenträger in CloudByte-Speicher gefunden." msgid "No weighed hosts available" msgstr "Keine gewichteten Hosts verfügbar" #, python-format msgid "Not a valid string: %s" msgstr "Keine gültige Zeichenfolge: %s" msgid "Not a valid value for NaElement." msgstr "Kein gültiger Wert für NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "" "Es kann kein geeigneter Datenspeicher für den Datenträger %s gefunden werden." msgid "Not an rbd snapshot" msgstr "Keine RBD-Momentaufnahme" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Für Image %(image_id)s nicht autorisiert." msgid "Not authorized." msgstr "Nicht autorisiert." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Nicht genug Speicherplatz auf Back-End (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "Es ist nicht genügend Speicherplatz im freigegebenen ZFS-Verzeichnis zum " "Ausführen dieser Operation vorhanden." msgid "Not stored in rbd" msgstr "Nicht in RBD gespeichert" msgid "Nova returned \"error\" status while creating snapshot." msgstr "" "Nova gibt den Status \"error\" beim Erstellen der Momentaufnahme zurück." msgid "Null response received from CloudByte's list filesystem." msgstr "" "Zum Auflisten vom Dateisystem (listFileSystem) Antwort 'Null' von CloudByte " "erhalten." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" "Antwort 'Null' zu iSCSI-Authentifizierungsgruppen auflisten von CloudByte " "erhalten." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" "Zum Auflisten von iSCSI-Initiatoren (listiSCSIInitiator) Antwort 'Null' von " "CloudByte erhalten." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "Zum Auflisten von Datenträger-iSCSI-Services (listVolumeiSCSIService) " "Antwort 'Null' von CloudByte erhalten." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Antwort 'Null' beim Erstellen von Datenträger [%s] bei CloudByte-Speicher " "erhalten." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Antwort 'Null' beim Löschen von Datenträger [%s] bei CloudByte-Speicher " "erhalten." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Antwort 'Null' beim Abfragen nach auf [%(operation)s] basierendem Job " "[%(job)s] bei CloudByte-Speicher erhalten." msgid "Number of retries if connection to ceph cluster failed." msgstr "" "Anzahl der Wiederholungen, wenn Verbindung zu ceph-Cluster fehlgeschlagen " "ist." msgid "Object Count" msgstr "Objektanzahl" msgid "Object Version" msgstr "Objektversion" msgid "Object is not a NetApp LUN." msgstr "Objekt ist keine NetApp-LUN." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Bei einer Erweiterungsoperation ist ein Fehler beim Hinzufügen des " "Datenträgers zu einem Verbunddatenträger aufgetreten: %(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "Einer der cinder-volume-Dienste ist zu alt, um solche Anforderungen zu " "akzeptieren. Führen Sie heterogene Liberty-Mitaka-cinter-volumes aus?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "Eine der erforderlichen Eingaben aus Host, Port oder Schema wurde nicht " "gefunden." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Nur %(value)s %(verb)s-Anforderung(en) an %(uri)s alle %(unit_string)s " "möglich" msgid "Only one limit can be set in a QoS spec." msgstr "In einer QoS-Spezifikation kann nur ein Grenzwert festgelegt werden." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Nur Benutzer mit einem Token, als dessen Bereich direkt übergeordnete oder " "Stammprojekte festgelegt wurden, dürfen die Quoten der zugehörigen " "untergeordneten Elemente sehen." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "" "Aufheben der Verwaltung von Datenträgern ist nur für Datenträger möglich, " "die von OpenStack verwaltet werden." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" "Operation fehlgeschlagen mit status=%(status)s. Vollständiger " "Speicherauszug: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Operation nicht unterstützt: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "Option gpfs_images_dir wurde nicht richtig festgelegt." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "Option gpfs_images_share_mode wurde nicht richtig festgelegt." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "Option gpfs_mount_point_base wurde nicht richtig festgelegt." msgid "Option map (cls._map) is not defined." msgstr "Optionszuordnung (cls._map) ist nicht definiert. " #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "Ursprungs-%(res)s %(prop)s muss einen der Werte '%(vals)s' besitzen" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "HTTPS-Port zum Verbinden mit Blockbridge-API-Server außer Kraft setzen." #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "Partitionsname ist None. Legen Sie smartpartition:partitionname im Schlüssel " "fest." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "Kennwort oder privater SSH-Schlüssel ist für Authentifizierung erforderlich. " "Legen Sie entweder die Option 'san_password' oder die Option " "'san_private_key' fest." msgid "Path to REST server's certificate must be specified." msgstr "Pfad zum Zertifikat des REST-Servers muss angegeben werden." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Erstellen Sie Pool %(pool_list)s bereits im Voraus!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "" "Erstellen Sie Schicht %(tier_levels)s in Pool %(pool)s bereits im Voraus!" msgid "Please re-run cinder-manage as root." msgstr "Führen Sie 'cinder-manage' erneut als Root aus." msgid "Please specify a name for QoS specs." msgstr "Geben Sie einen Namen für die QoS-Spezifikationen an." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Richtlinie lässt Ausführung von %(action)s nicht zu." #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Pool %(poolNameInStr)s wurde nicht gefunden." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "Der Pool %s ist nicht in der Nexenta Store-Appliance vorhanden." #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Pool von Datenträger ['host'] %(host)s nicht gefunden." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "Pool von Datenträger ['host'] fehlgeschlagen mit: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "Pool ist im Datenträger-Hostfeld nicht verfügbar." msgid "Pool is not available in the volume host fields." msgstr "Pool ist in den Datenträger-Hostfeldern nicht verfügbar." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Pool mit Name %(pool)s wurde in der Domäne %(domain)s nicht gefunden." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "Pool mit Name %(pool_name)s wurde in der Domäne %(domain_id)s nicht gefunden." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Pool %(poolName)s ist keiner Speicherschicht für FAST-Richtlinie " "%(fastPolicy)s zugeordnet." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName muss in der Datei %(fileName)s vorhanden sein." #, python-format msgid "Pools %s does not exist" msgstr "Pool %s ist nicht vorhanden" msgid "Pools name is not set." msgstr "Name des Pools wurde nicht festgelegt." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "Status der primären Kopie: %(status)s und synchronisiert: %(sync)s." msgid "Project ID" msgstr "Projekt-ID" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "Projektkontingente sind nicht ordnungsgemäß für verschachtelte Kontingente " "konfiguriert: %(reason)s." msgid "Protection Group not ready." msgstr "Schutzgruppe ist nicht bereit." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Protokoll %(storage_protocol)s wird nicht unterstützt für Speicherfamilie " "%(storage_family)s." msgid "Provided backup record is missing an id" msgstr "Im angegebenen Sicherungsdatensatz fehlt eine ID" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Angegebener Momentaufnahmestatus %(provided)s ist nicht zulässig für " "Momentaufnahme mit Status %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Providerinformation für CloudByte-Speicher wurde für den OpenStack-" "Datenträger [%s] nicht gefunden." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Fehler bei Pure Storage-Cinder-Treiber: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS-Spezifikationen %(specs_id)s sind bereits vorhanden." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS-Spezifikationen %(specs_id)s sind immer noch Entitäten zugeordnet." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "QoS-Konfiguration ist falsch. %s muss > 0 sein." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "QoS-Richtlinie muss für IOTYPE und eine weitere qos_specs angegeben werden. " "QoS-Richtlinie: %(qos_policy)s." #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "QoS policy muss für IOTYPE angegeben werden: 0, 1 oder 2. QoS-Richtlinie: " "%(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "QoS-Richtlinienkonflikt bei 'upper_limit' und 'lower_limit'. QoS-Richtlinie: " "%(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "QoS-Spezifikation %(specs_id)s enthält keine Spezifikation mit dem Schlüssel " "%(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "QoS-Spezifikationen werden für diese Speicherfamilie und ONTAP-Version nicht " "unterstützt." msgid "Qos specs still in use." msgstr "Qos-Spezifikationen sind noch im Gebrauch." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "Die Abfrage nach Serviceparameter ist veraltet. Verwenden Sie stattdessen " "den binären Parameter." msgid "Query resource pool error." msgstr "Fehler beim Abfragen des Ressourcenpools." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" "Grenzwert für Quote %s muss mindestens den vorhandenen Ressourcen " "entsprechen." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Quotenklasse %(class_name)s konnte nicht gefunden werden." msgid "Quota could not be found" msgstr "Quote konnte nicht gefunden werden" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quote für Ressourcen überschritten: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Quote überschritten: code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Quote für Projekt %(project_id)s konnte nicht gefunden werden." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "Kontingentgrenzwert ungültig für Projekt '%(proj)s' für die Ressource " "'%(res)s': Grenzwert von %(limit)d ist kleiner als der Wert von %(used)d, " "der im Gebrauch ist." #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Quotenreservierung %(uuid)s konnte nicht gefunden werden." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Quotennutzung für Projekt %(project_id)s konnte nicht gefunden werden." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" "RBD-Differenzierungsoperation fehlgeschlagen - (ret=%(ret)s stderr=" "%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "IP des REST-Servers muss angegeben werden." msgid "REST server password must by specified." msgstr "Kennwort des REST-Servers muss angegeben werden." msgid "REST server username must by specified." msgstr "Benutzername des REST-Servers muss angegeben werden." msgid "RPC Version" msgstr "RPC-Version" msgid "RPC server response is incomplete" msgstr "RPC-Serverantwort ist unvollständig." msgid "Raid did not have MCS Channel." msgstr "RAID besaß keinen MCS-Channel." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Durch Konfigurationsoption max_luns_per_storage_group festgelegter Grenzwert " "wurde erreicht. Operation zum Hinzufügen von %(vol)s zu Speichergruppe " "%(sg)s wird zurückgewiesen. " #, python-format msgid "Received error string: %s" msgstr "Empfangene Fehlerzeichenfolge: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "Referenz muss eine nicht verwaltete Schattenkopie sein." msgid "Reference must be for an unmanaged virtual volume." msgstr "" "Verweis muss für einen nicht verwalteten virtuellen Datenträger gelten. " msgid "Reference must be the name of an unmanaged snapshot." msgstr "Referenz muss der Name einer nicht verwalteten Schattenkopie sein." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" "Verweis muss der Datenträgername eines nicht verwalteten virtuellen " "Datenträgers sein." msgid "Reference must contain either source-id or source-name element." msgstr "" "Verweis muss entweder Element 'source-id' oder Element 'source-name' " "enthalten." msgid "Reference must contain either source-name or source-id element." msgstr "" "Verweis muss entweder das Element 'source-name' oder das Element 'source-id' " "enthalten." msgid "Reference must contain source-id or source-name element." msgstr "" "Die Referenz muss das Element 'source-id' oder 'source-name' enthalten." msgid "Reference must contain source-id or source-name key." msgstr "Referenz muss Schlüssel 'source-id' oder 'source-name' enthalten." msgid "Reference must contain source-id or source-name." msgstr "Referenz muss 'source-id' oder 'source-name' enthalten." msgid "Reference must contain source-id." msgstr "Referenz muss 'source-id' enthalten." msgid "Reference must contain source-name element." msgstr "Verweis muss Element 'source-name' enthalten. " msgid "Reference must contain source-name or source-id." msgstr "Verweis muss 'source-name' oder 'source-id' enthalten." msgid "Reference must contain source-name." msgstr "Verweis muss Element 'source-name' enthalten." msgid "Reference to volume to be managed must contain source-name." msgstr "" "Die Referenz auf den zu verwaltenden Datenträger muss 'source-name' " "enthalten." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "" "Die Referenz auf den zu verwaltenden Datenträger %s muss 'source-name' " "enthalten." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Die Migration von Datenträger-ID %(id)s wird verweigert. Prüfen Sie Ihre " "Konfiguration, da Quelle und Ziel dieselbe Datenträgergruppe aufweisen: " "%(name)s." msgid "Remote pool cannot be found." msgstr "Der ferne Pool wurde nicht gefunden." msgid "Remove CHAP error." msgstr "Fehler beim Entfernen von CHAP." msgid "Remove fc from host error." msgstr "Fehler beim Entfernen von FC vom Host." msgid "Remove host from array error." msgstr "Fehler beim Entfernen des Hosts aus dem Array." msgid "Remove host from hostgroup error." msgstr "Fehler beim Entfernen des Hosts aus der Hostgruppe." msgid "Remove iscsi from host error." msgstr "Fehler beim Entfernen von iSCSI vom Host." msgid "Remove lun from QoS error." msgstr "Fehler beim Entfernen der LUN aus QoS." msgid "Remove lun from cache error." msgstr "Fehler beim Entfernen der LUN aus dem Cache." msgid "Remove lun from partition error." msgstr "Fehler beim Entfernen der LUN aus der Partition." msgid "Remove port from port group error." msgstr "Fehler beim Entfernen des Ports aus der Portgruppe." msgid "Remove volume export failed." msgstr "Entfernen des Datenträgerexports fehlgeschlagen." msgid "Rename lun on array error." msgstr "Fehler beim Umbenennen der LUN auf dem Array." msgid "Rename snapshot on array error." msgstr "Fehler beim Umbenennen der Schattenkopie auf dem Array." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "Die Replikation %(name)s an %(ssn)s ist fehlgeschlagen." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "Replizierungsservicefunktion nicht gefunden auf %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Replizierungsservice nicht gefunden auf %(storageSystemName)s." msgid "Replication is not enabled" msgstr "Replizierung ist nicht aktiviert" msgid "Replication is not enabled for volume" msgstr "Replizierung ist für Datenträger nicht aktiviert" msgid "Replication not allowed yet." msgstr "Replikation ist noch nicht zulässig." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "Replizierungsstatus für Datenträger muss 'active' oder 'active-stopped' " "lauten, aber der aktuelle Status lautet: %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "Replizierungsstatus für Datenträger muss 'inactive', 'active-stopped' oder " "'error' lauten, aber der aktuelle Status lautet: %s" msgid "Request body and URI mismatch" msgstr "Abweichung zwischen Anforderungshauptteil und URI" msgid "Request body contains too many items" msgstr "Anforderungshauptteil enthält zu viele Elemente" msgid "Request body contains too many items." msgstr "Anforderungstext enthält zu viele Elemente." msgid "Request body empty" msgstr "Anforderungshauptteil leer" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Anforderung an Datera-Cluster gibt unzulässigen Status zurück: %(status)s | " "%(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Angeforderte Sicherung überschreitet zulässige Quote für Sicherungs-" "Gigabytes. Angefordert sind %(requested)s G, die Quote ist %(quota)s G und " "%(consumed)s G wurden verbraucht." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Angeforderter Datenträger oder angeforderte Momentaufnahme überschreitet " "zulässige Quote für %(name)s. Angefordert sind %(requested)s G, die Quote " "ist %(quota)s G und %(consumed)s G wurden verbraucht." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "Angeforderte Datenträgergröße %(size)d liegt über der maximal zulässigen " "Begrenzung %(limit)d." msgid "Required configuration not found" msgstr "Erforderliche Konfiguration nicht gefunden" #, python-format msgid "Required flag %s is not set" msgstr "Erforderliches Flag %s ist nicht gesetzt" msgid "Requires an NaServer instance." msgstr "Erfordert eine NaServer-Instanz." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Zurücksetzen von Sicherungsstatus abgebrochen, der derzeit konfigurierte " "Sicherungsservice [%(configured_service)s] ist nicht der Sicherungsservice, " "der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." #, python-format msgid "Resizing clone %s failed." msgstr "Ändern der Größe des Klons %s fehlgeschlagen." msgid "Resizing image file failed." msgstr "Fehler bei der Größenänderung der Imagedatei." msgid "Resource could not be found." msgstr "Ressource konnte nicht gefunden werden." msgid "Resource not ready." msgstr "Ressource nicht bereit." #, python-format msgid "Response error - %s." msgstr "Antwortfehler - %s." msgid "Response error - The storage-system is offline." msgstr "Antwortfehler - Das Speichersystem ist offline." #, python-format msgid "Response error code - %s." msgstr "Antwortfehlercode - %s." msgid "RestURL is not configured." msgstr "RestURL ist nicht konfiguriert." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Wiederherstellen von Sicherung abgebrochen, erwarteter Datenträgerstatus war " "%(expected_status)s, aber Status ist %(actual_status)s." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Wiederherstellen von Sicherung abgebrochen, der derzeit konfigurierte " "Sicherungsservice [%(configured_service)s] ist nicht der Sicherungsservice, " "der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Wiederherstellen von Sicherung abgebrochen: erwarteter Sicherungsstatus war " "%(expected_status)s, aber Status ist %(actual_status)s." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Die Anzahl der abgerufenen SolidFire-Datenträger unterscheidet sich von der " "für die bereitgestellten Cinder-Schattenkopien. Abgerufen: %(ret)s Sollwert: " "%(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Die Anzahl der abgerufenen SolidFire-Datenträger unterscheidet sich von der " "für die bereitgestellten Cinder-Datenträger. Abgerufen: %(ret)s Sollwert: " "%(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Wiederholungsanzahl für Befehl überschritten: %s" msgid "Retryable SolidFire Exception encountered" msgstr "Retryable SolidFire-Ausnahme angetroffen" msgid "Retype cannot change encryption requirements." msgstr "Erneute Eingabe darf die Verschlüsselungsanforderungen nicht ändern." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Erneute Eingabe darf die Front-End-QoS-Spezifikationen für im Gebrauch " "befindlichen Datenträger nicht ändern: %s. " msgid "Retype requires migration but is not allowed." msgstr "'Retype' erfordert eine Migration; diese ist jedoch nicht zulässig." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Die Rollback-Operation für Datenträger %(volumeName)s ist fehlgeschlagen. " "Wenden Sie sich an Ihren Systemadministrator, damit er den Datenträger " "manuell an die Standardspeichergruppe für FAST-Richtlinie %(fastPolicyName)s " "zurückgibt - fehlgeschlagen." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Für %(volumeName)s wird ein Rollback durch Löschung durchgeführt." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "Es ist nicht zulässig, Cinder mit einer VMware vCenter-Version auszuführen, " "die niedriger ist als %s." msgid "SAN product is not configured." msgstr "SAN-Produkt ist nicht konfiguriert." msgid "SAN protocol is not configured." msgstr "SAN-Protokoll ist nicht konfiguriert." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "SMBFS-Konfiguration 'smbfs_oversub_ratio' ist ungültig. muss > 0 sein: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "SMBFS-Konfiguration 'smbfs_used_ratio' ist ungültig. Muss > 0 und <= 1.0 " "sein: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "SMBFS-Konfigurationsdatei unter %(config)s ist nicht vorhanden." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS-Konfigurationsdatei nicht festgelegt (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "SSH-Befehl fehlgeschlagen nach '%(total_attempts)r' Versuchen : '%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "SSH-Befehl fehlgeschlagen mit Fehler: '%(err)s'. Befehl: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH-Befehlsinjektion erkannt: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "SSH-Verbindung für %(fabric)s fehlgeschlagen mit Fehler: %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "SSL-Zertifikat abgelaufen am %s." #, python-format msgid "SSL error: %(arg)s." msgstr "SSL-Fehler: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Scheduler-Hostfilter %(filter_name)s konnte nicht gefunden werden." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Scheduler-Host-Weigher %(weigher_name)s konnte nicht gefunden werden." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Status der sekundären Kopie: %(status)s und synchronisiert: %(sync)s, " "Fortschritt der Synchronisierung ist: %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "Sekundäre ID darf nicht mit primärem Array übereinstimmen: 'backend_id' = " "%(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber muss in der Datei %(fileName)s vorhanden sein." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Service %(service)s auf Host %(host)s entfernt." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "" "Der Dienst %(service_id)s konnte auf dem Host %(host)s nicht gefunden werden." #, python-format msgid "Service %(service_id)s could not be found." msgstr "Service %(service_id)s konnte nicht gefunden werden." #, python-format msgid "Service %s not found." msgstr "Service %s nicht gefunden." msgid "Service is too old to fulfil this request." msgstr "Der Dienst ist zu alt, um diese Anforderung zu erfüllen." msgid "Service is unavailable at this time." msgstr "Service ist derzeit nicht verfügbar." msgid "Service not found." msgstr "Dienst nicht gefunden." msgid "Set pair secondary access error." msgstr "Fehler beim Festlegen des sekundären Paarzugriffs." msgid "Sets thin provisioning." msgstr "Legt Thin Provisioning fest." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "Die Festlegung der LUN-QoS-Richtliniengruppe wird für diese Speicherfamilie " "und diese ONTAP-Version nicht unterstützt." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "Die Festlegung der Richtliniengruppe 'file qos' wird für diese " "Speicherfamilie und diese ONTAP-Version nicht unterstützt." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Freigegebenes Verzeichnis %s wegen ungültigem Format ignoriert. Muss das " "Format Adresse:/Export aufweisen.Prüfen Sie die Einstellungen 'nas_ip' und " "'nas_share_path'." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "Freigegebenes Verzeichnis unter %(dir)s kann vom Cinder-Datenträgerservice " "nicht beschrieben werden. Momentaufnahmeoperationen werden nicht unterstützt." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Sheepdog-E/A-Fehler. Befehl war: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Operationen zum Anzeigen können nur an Projekten in derselben Hierarchie des " "Projekts, das als Bereich für Benutzer festgelegt wurde, erfolgen." msgid "Size" msgstr "Größe" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" "Größe für Datenträger: %s nicht gefunden, sicherer Löschvorgang nicht " "möglich." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "Größe beträgt %(image_size)d GB und passt nicht in einen Datenträger mit der " "Größe %(volume_size)d GB." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "Größe des angegebenen Images %(image_size)s GB übersteigt Datenträgergröße " "%(volume_size)s GB." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "Es wurde angefordert, die Schattenkopie %(id)s zu löschen, während darauf " "gewartet wurde, dass sie verfügbar wird. Möglicherweise wurde eine " "Anforderung gleichzeitig gestellt." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "Die Schattenkopie %(id)s wurde während des kaskadierenden Löschvorgangs im " "Status %(state)s und nicht im Status 'Wird gelöscht' gefunden." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Momentaufnahme %(snapshot_id)s konnte nicht gefunden werden." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "Momentaufnahme %(snapshot_id)s enthält keine Metadaten mit Schlüssel " "%(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "Schattenkopie %s darf nicht Teil einer Konsistenzgruppe sein." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "Schattenkopie '%s' ist im Arry nicht vorhanden." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "Momentaufnahme kann nicht erstellt werden, da Datenträger %(vol_id)s nicht " "verfügbar ist, aktueller Datenträgerstatus: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "" "Momentaufnahme kann nicht erstellt werden, während der Datenträger migriert " "wird." msgid "Snapshot of secondary replica is not allowed." msgstr "Momentaufnahme der sekundären Replik ist nicht zulässig. " #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Momentaufnahme von Datenträger im Status %s nicht unterstützt." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "" "Momentaufnahmeressource \"%s\", die an keiner Stelle implementiert ist?" msgid "Snapshot size must be multiple of 1 GB." msgstr "Größe der Schattenkopie muss Vielfaches von 1 GB sein." #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "Momentaufnahmestatus %(cur)s ist nicht zulässig für update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "Momentaufnahmestatus muss zum Klonen \"available\" sein." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "Schattenkopie, die gesichert werden soll, muss verfügbar oder im Gebrauch " "sein, aber der aktuelle Status ist \"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "Schattenkopie mit der ID %s wurde nicht gefunden." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Momentaufnahme '%(snap)s' ist in Basisimage '%(base)s' nicht vorhanden - " "inkrementelle Sicherung wird abgebrochen" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "" "Momentaufnahmen werden für dieses Datenträgerformat nicht unterstützt: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Socketfehler: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder-Treiberausnahme" msgid "Sort direction array size exceeds sort key array size." msgstr "" "Array-Größe der Sortierrichtung überschreitet Array-Größe des " "Sortierschlüssels." msgid "Source CG is empty. No consistency group will be created." msgstr "" "Quellenkonsistenzgruppe ist leer. Es wird keine Konsistenzgruppe erstellt." msgid "Source host details not found." msgstr "Quellenhostdetails nicht gefunden." msgid "Source volume device ID is required." msgstr "Einheiten-ID für Quellendatenträger ist erforderlich." msgid "Source volume not mid-migration." msgstr "Quellendatenträger befindet sich nicht in einer Migration." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "Quelle mit Host-IP/Name: %s wurde auf der Zieleinheit für Back-End-" "aktivierte Datenträgermigration nicht gefunden. Es wird mit " "Standardmigration fortgefahren." msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo hat zurückgegeben, dass byarray ungültig ist" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "Der angegebene Host, der zu Datenträger %(vol)s zugeordnet werden soll, " "befindet sich in einer nicht unterstützten Hostgruppe mit %(group)s." msgid "Specified logical volume does not exist." msgstr "Der angegebene logische Datenträger ist nicht vorhanden." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "Angegebene Schattenkopiegruppe mit der ID %s wurde nicht gefunden. " msgid "Specify a password or private_key" msgstr "Geben Sie einen Wert für 'password' oder 'private_key' an" msgid "Specify san_password or san_private_key" msgstr "'san_password' oder 'san_private_key' angeben" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Geben Sie den Namen des Datenträgertyps, eine Beschreibung, 'is_public' oder " "eine Kombination aus allem an." msgid "Split pair error." msgstr "Fehler bei Paaraufteilung." msgid "Split replication failed." msgstr "Aufteilen der Replikation fehlgeschlagen." msgid "Start LUNcopy error." msgstr "Fehler beim Starten der LUN-Kopie." msgid "State" msgstr "Zustand" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "Status des Knotens ist falsch. Aktueller Status: %s." msgid "Status" msgstr "Status" msgid "Stop snapshot error." msgstr "Fehler beim Stoppen der Momentaufnahme." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" "Speicherkonfigurationsservice nicht gefunden auf %(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "Speicher-Hardware-ID-Management-Service nicht gefunden auf " "%(storageSystemName)s. " #, python-format msgid "Storage Profile %s not found." msgstr "Speicherprofil %s nicht gefunden." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "Speicherversetzungsservice nicht gefunden auf %(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "Speicherfamilie %s wird nicht unterstützt." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "Speichergruppe %(storageGroupName)s wurde nicht erfolgreich gelöscht" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "Speicherhost %(svr)s nicht gefunden, Namen überprüfen" msgid "Storage pool is not configured." msgstr "Es ist kein Speicherpool konfiguriert." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Speicherprofil %(storage_profile)s nicht gefunden." msgid "Storage resource could not be found." msgstr "Speicherressource konnte nicht gefunden werden." msgid "Storage system id not set." msgstr "Speichersystem-ID nicht festgelegt." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Speichersystem für Pool %(poolNameInStr)s nicht gefunden." msgid "Storage-assisted migration failed during manage volume." msgstr "" "Speicherunterstützte Migration ist beim Verwalten des Datenträgers " "fehlgeschlagen." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s wurde nicht gefunden. " #, python-format msgid "String with params: %s" msgstr "Zeichenkette mit Parameter: %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "Die Summe der Nutzung untergeordneter Projekte '%(sum)s' ist größer als das " "freie Kontingent von '%(free)s' für das Projekt '%(proj)s' für die Ressource " "'%(res)s'. Reduzieren Sie den Grenzwert oder die Nutzung für mindestens " "eines der folgenden Projekte: '%(child_ids)s'" msgid "Switch over pair error." msgstr "Fehler bei Paarwechsel." msgid "Sync pair error." msgstr "Fehler bei Paarsynchronisierung." msgid "Synchronizing secondary volume to primary failed." msgstr "" "Synchronisieren des sekundären Datenträgers mit Primärdatenträger " "fehlgeschlagen." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "" "System %(id)s mit unzulässigem Kennwortstatus gefunden - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "System %(id)s gefunden mit unzulässigem Status - %(status)s." msgid "System does not support compression." msgstr "System unterstützt Komprimierung nicht." msgid "System is busy, retry operation." msgstr "System ist ausgelastet, Operation wiederholen." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s] wurde in CloudByte-Speicher für Konto [%(account)s] nicht " "gefunden." msgid "Target volume type is still in use." msgstr "Zieldatenträgertyp ist noch im Gebrauch." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Abweichung bei Vorlagenbaum; untergeordnete %(slavetag)s wird zur " "übergeordneten Einheit %(mastertag)s hinzugefügt" #, python-format msgid "Tenant ID: %s does not exist." msgstr "Nutzer-ID %s ist nicht vorhanden." msgid "Terminate connection failed" msgstr "Beenden der Verbindung fehlgeschlagen" msgid "Terminate connection unable to connect to backend." msgstr "" "Beim Beenden der Verbindung konnte keine Verbindung zum Back-End hergestellt " "werden." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "Beenden der Datenträgerverbindung fehlgeschlagen: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "Die zu replizierende Quelle %(type)s %(id)s wurde nicht gefunden." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Die Parameter 'sort_key' und 'sort_dir' sind veraltet und können nicht " "gemeinsam mit dem Parameter 'sort' verwendet werden." msgid "The EQL array has closed the connection." msgstr "Die EQL-Platteneinheit hat die Verbindung geschlossen." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "Das GPFS-Dateisystem %(fs)s ist nicht auf dem erforderlichen Releasestand. " "Aktueller Stand ist %(cur)s, er muss aber mindestens %(min)s sein." msgid "The IP Address was not found." msgstr "Die IP-Adresse wurde nicht gefunden." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "Die WebDAV-Anforderung ist fehlgeschlagen. Ursache: %(msg)s, Rückgabecode/" "Ursache: %(code)s, Quellendatenträger: %(src)s, Zieldatenträger: %(dst)s, " "Methode: %(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "Der oben genannte Fehler gibt möglicherweise an, dass die Datenbank nicht " "erstellt wurde.\n" "Erstellen Sie eine Datenbank mithilfe von 'cinder-manage db sync', bevor Sie " "diesen Befehl ausführen." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "Das Array unterstützt keine Speicherpooleinstellung für SLO %(slo)s und " "Workload %(workload)s. Überprüfen Sie, ob das Array gültige SLOs und " "Workloads enthält." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "" "Die Repliaktion ist in dem Backend, in dem der Datenträger erstellt wurde, " "nicht aktiviert." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Der Befehl %(cmd)s ist fehlgeschlagen. (ret: %(ret)s, stdout: %(out)s, " "stderr: %(err)s)" msgid "The copy should be primary or secondary" msgstr "Die Kopie muss primär oder sekundär sein" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "Die Erstellung einer logischen Einheit konnte nicht abgeschlossen werden. " "(Logische Einheit: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "Die dekorierte Methode muss entweder einen Datenträger oder ein " "Momentaufnahmeobjekt akzeptieren" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "Das Gerät im Pfad %(path)s ist nicht erreichbar: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "Die Endzeit (%(end)s) muss nach der Startzeit (%(start)s) liegen." #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec %s ist ungültig. " #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "Die zusätzliche Spezifikation: %(extraspec)s ist nicht gültig." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "Der Failover-Datenträger konnte nicht gelöscht werden: %s" #, python-format msgid "The following elements are required: %s" msgstr "Die folgenden Elemente sind erforderlich: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Die folgenden Migrationen haben Herabstufungen, die nicht zulässig sind:\n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "Die Hostgruppe oder das iSCSI-Ziel konnte nicht hinzugefügt werden." msgid "The host group or iSCSI target was not found." msgstr "Die Hostgruppe oder das iSCSI-Ziel wurde nicht gefunden." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " "die Datenträger und nehmen Sie die Replikation an den 3PAR-Backends wieder " "auf. " msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " "die Datenträger und nehmen Sie die Replikation an den LeftHand -Backends " "wieder auf. " msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " "die Datenträger und nehmen Sie die Replikation an den Storwize-Backends " "wieder auf. " #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "Der iSCSI-CHAP-Benutzer %(user)s ist nicht vorhanden." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "Die importierte LUN %(lun_id)s befindet sich in Pool %(lun_pool)s, der nicht " "durch Host %(host)s verwaltet wird." msgid "The key cannot be None." msgstr "Der Schlüssel kann nicht None sein." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "" "Die logische Einheit für angegebenen %(type)s %(id)s wurde bereits gelöscht." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "Das zulässige Zeitlimit für Methode %(method)s wurde überschritten. " "(Zeitlimitwert: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "Die Methode update_migrated_volume ist nicht implementiert." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "Beim angehängten Pfad %(mount_path)s handelt es sich nicht um einen gültigen " "Quobyte-USP-Datenträger. Fehler: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "Der Parameter des Speicher-Back-End. (config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "" "Die übergeordnete Sicherung muss für eine inkrementelle Sicherung verfügbar " "sein." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" "Die angegebene Schattenkopie '%s' ist keine Schattenkopie des angegebenen " "Datenträgers." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "Der Verweis auf den Datenträger im Backend muss das Format file_system/" "volume_name haben (volume_name darf keinen Schrägstrich '/' enthalten)." #, python-format msgid "The remote retention count must be %s or less." msgstr "Der Zähler für ferne Aufbewahrung muss kleiner-gleich %s sein. " msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "Der Replikationsmodus wurde nicht ordnungsgemäß mit extra_specs für den " "Datenträgertyp konfiguriert. Wenn replication:mode periodisch ist, muss " "replication:sync_period ebenfalls angegeben werden, und zwar mit einem Wert " "zwischen 300 und 31622400 Sekunden." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" "Der Synchronisierungszeitraum für die Replikation muss mindestens %s " "Sekunden sein." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "Die angeforderte Größe %(requestedSize)s entspricht nicht der sich " "ergebenden Größe %(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "Die Ressource %(resource)s wurde nicht gefunden." msgid "The results are invalid." msgstr "Die Ergebnisse sind ungültig." #, python-format msgid "The retention count must be %s or less." msgstr "Der Zähler für Aufbewahrung muss kleiner-gleich %s sein." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "Die Momentaufnahme kann nicht erstellt werden, wenn der Datenträger im " "Wartungsmodus ist." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "" "Der Quellendatenträger %s befindet sich nicht in dem Pool, der durch den " "aktuellen Host verwaltet wird." msgid "The source volume for this WebDAV operation not found." msgstr "" "Der Quellendatenträger für diese WebDAV-Operation wurde nicht gefunden." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "Der Quellendatenträgertyp '%(src)s' unterscheidet sich vom " "Zieldatenträgertyp '%(dest)s'." #, python-format msgid "The source volume type '%s' is not available." msgstr "Der Quellendatenträgertyp '%s' ist nicht verfügbar." #, python-format msgid "The specified %(desc)s is busy." msgstr "Die angegebene %(desc)s ist ausgelastet." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "Die angegebene LUN gehört nicht zum genannten Pool: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " "logische Einheit darf nicht zuordnend sein." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " "logische Einheit darf nicht paarweise verbunden sein." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " "Größe der logischen Einheit muss ein Vielfaches von Gigabyte sein." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Der " "Datenträgertyp muss 'DP-VOL' sein." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "Die angegebene Operation wird nicht unterstützt. Die Datenträgergröße muss " "der Quelle %(type)s entsprechen. (Datenträger: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Die angegebene virtuelle Platte ist einem Host zugeordnet." msgid "The specified volume is mapped to a host." msgstr "Der angegebene Datenträger ist einem Host zugeordnet." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "Das Speicherarray-Kennwort für %s ist falsch. Aktualisieren Sie das " "konfigurierte Kennwort. " #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "Das Speicher-Back-End kann verwendet werden. (config_group: %(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "Die Speichereinheit unterstützt %(prot)s nicht. Konfigurieren Sie die " "Einheit für die Unterstützung von %(prot)s oder wechseln Sie zu einem " "Treiber, der ein anderes Protokoll verwendet. " #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "Die Striping-Metazahl %(memberCount)s ist zu klein für Datenträger " "%(volumeName)s mit Größe %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "Der Metadatentyp %(metadata_type)s für Datenträger/Momentaufnahme %(id)s ist " "ungültig." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "Der Datenträger %(volume_id)s konnte nicht erweitert werden. Der " "Datenträgertyp muss 'Normal' lauten." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "Die Verwaltung für Datenträger %(volume_id)s konnte nicht gestoppt werden. " "Der Datenträgertyp muss %(volume_type)s sein." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "" "Der Datenträger %(volume_id)s wird erfolgreich verwaltet. (Logische Einheit: " "%(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" "Die Verwaltung von Datenträger %(volume_id)s wird erfolgreich gestoppt. " "(Logische Einheit: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Der zuzuordnende Datenträger %(volume_id)s wurde nicht gefunden." msgid "The volume cannot accept transfer in maintenance mode." msgstr "Der Datenträger kann im Wartungsmodus keine Übertragung akzeptieren." msgid "The volume cannot be attached in maintenance mode." msgstr "Der Datenträger kann im Wartungsmodus nicht angehängt werden." msgid "The volume cannot be detached in maintenance mode." msgstr "Der Datenträger kann im Wartungsmodus nicht abgehängt werden." msgid "The volume cannot be updated during maintenance." msgstr "Der Datenträger kann während der Wartung nicht aktualisiert werden." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "" "Die Datenträgerverbindung kann im Wartungsmodus nicht initialisiert werden." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "Der Datenträgertreiber erfordert den Namen des iSCSI-Initiators im Connector." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Der Datenträger ist in 3PAR derzeit aktiv und kann derzeit nicht gelöscht " "werden. Sie können es später erneut versuchen. " msgid "The volume label is required as input." msgstr "Die Datenträgerbezeichnung ist als Eingabe erforderlich." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Die Datenträgermetadaten können nicht gelöscht werden, wenn der Datenträger " "im Wartungsmodus ist." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Die Datenträgermetadaten können nicht aktualisiert werden, wenn der " "Datenträger im Wartungsmodus ist." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "" "Es sind keine Ressourcen zur Verwendung verfügbar. (Ressource: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Es sind keine gültigen ESX-Hosts vorhanden." #, python-format msgid "There are no valid datastores attached to %s." msgstr "Es sind keine gültigen Datenspeicher an %s angehängt." msgid "There are no valid datastores." msgstr "Es gibt keine gültigen Datenspeicher." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Es gibt keine Bezeichnung der %(param)s. Der angegebene Speicher ist zum " "Verwalten des Datenträgers von wesentlicher Bedeutung." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Es gibt keine Bezeichnung der logischen Einheit. Die angegebene logische " "Einheit ist zum Verwalten des Datenträgers von wesentlicher Bedeutung." msgid "There is no metadata in DB object." msgstr "Im DB-Objekt sind keine Metadaten vorhanden." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "" "Es ist kein gemeinsam genutztes Laufwerk vorhanden, das %(volume_size)sG " "hosten kann" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "" "Es ist kein gemeinsam genutztes Laufwerk vorhanden, das %(volume_size)s G " "hosten kann." #, python-format msgid "There is no such action: %s" msgstr "Aktion existiert nicht: %s" msgid "There is no virtual disk device." msgstr "Es ist keine virtuelle Platteneinheit vorhanden." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "Fehler beim Hinzufügen des Datenträgers zur Gruppe der fernen Kopie: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Beim Erstellen von cgsnapshot ist ein Fehler aufgetreten: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "Fehler beim Erstellen der Gruppe der fernen Kopie: %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Fehler beim Konfigurieren des Synchronisierungsintervalls für die Gruppe der " "fernen Kopie: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Beim Konfigurieren einer Gruppe der fernen Kopie in den 3PAR-Arrays ist ein " "Fehler aufgetreten: ('%s'). Der Datenträger wird nicht als Replikationstyp " "erkannt." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Beim Konfigurieren eines fernen Zeitplans für die LefHand-Arrays ist ein " "Fehler aufgetreten: ('%s'). Der Datenträger wird nicht als Replikationstyp " "erkannt." #, python-format msgid "There was an error starting remote copy: %s." msgstr "Fehler beim Starten der fernen Kopie: %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Keine Gluster-Konfigurationsdatei konfiguriert (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Keine NFS-Konfigurationsdatei konfiguriert (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Es ist kein konfigurierter Quobyte-Datenträger vorhanden (%s). Beispiel: " "quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "Thin Provisioning wird unter dieser Version von LVM nicht unterstützt." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "ThinProvisioning Enabler ist nicht installiert. Es kann kein Thin-" "Datenträger erstellt werden. " msgid "This driver does not support deleting in-use snapshots." msgstr "" "Dieser Treiber unterstützt nicht das Löschen von im Gebrauch befindlichen " "Momentaufnahmen." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Dieser Treiber unterstützt nicht das Erstellen von Momentaufnahmen von im " "Gebrauch befindlichen Datenträgern." msgid "This request was rate-limited." msgstr "Diese Anforderung war kursbegrenzt." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Diese Systemplattform (%s) wird nicht unterstützt. Dieser Treiber " "unterstützt nur Win32-Plattformen." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "Tier-Richtlinienservice nicht gefunden für %(storageSystemName)s. " #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Zeitlimit beim Warten auf Nova-Aktualisierung zum Erstellen von " "Momentaufnahme %s überschritten." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Zeitlimit beim Warten auf Nova-Aktualisierung zum Löschen von Momentaufnahme " "%(id)s überschritten." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Zeitlimitwert (in Sekunden), der beim Herstellen einer Verbindung zum ceph-" "Cluster verwendet wird. Wenn der Wert < 0 ist, ist kein Zeitlimit festgelegt " "und der Standardwert für librados wird verwendet. " #, python-format msgid "Timeout while calling %s " msgstr "Zeitlimitüberschreitung beim Aufruf von %s " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Zeitlimitüberschreitung beim Anfordern der %(service)s-API." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "Zeitlimitüberschreitung beim Anfordern von Leistungsmerkmalen aus dem " "Backend %(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Übertragung %(transfer_id)s konnte nicht gefunden werden." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Übertragung %(transfer_id)s: Datenträger-ID %(volume_id)s in nicht " "erwartetem Zustand %(status)s; erwartet wurde awaiting-transfer" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Es wird versucht, Sicherungsmetadaten aus ID %(meta_id)s in Sicherung %(id)s " "zu importieren." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Die Task zum Optimieren des Datenträgers wurde gestoppt, bevor sie fertig " "war: volume_name=%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "Typ %(type_id)s ist bereits anderen QoS-Spezifikationen zugeordnet: " "%(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "" "Änderung des Typzugriffs ist auf öffentliche Datenträger nicht anwendbar." msgid "Type cannot be converted into NaElement." msgstr "Typ kann nicht in NaElement konvertiert werden." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" "UUIDs %s befinden sich sowohl in der Liste zum Hinzufügen als auch in der " "zum Entfernen von Datenträgern." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Kein Zugriff auf das Storwize-Backend für den Datenträger %s möglich." msgid "Unable to access the backend storage via file handle." msgstr "Zugriff auf Back-End-Speicher über Dateikennung nicht möglich." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "Zugriff auf Back-End-Speicher über den Pfad %(path)s nicht möglich." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "Cinder-Host konnte nicht zu apphosts für Bereich %(space)s hinzugefügt werden" #, python-format msgid "Unable to complete failover of %s." msgstr "Das Failover von %s konnte nicht abgeschlossen werden." msgid "Unable to connect or find connection to host" msgstr "Verbindung zum Host kann nicht hergestellt oder gefunden werden" msgid "Unable to create Barbican Client without project_id." msgstr "Barbican Client kann nicht ohne Projekt-ID erstellt werden." #, python-format msgid "Unable to create consistency group %s" msgstr "Konsistenzgruppe %s konnte nicht erstellt werden" msgid "Unable to create lock. Coordination backend not started." msgstr "" "Es konnte keine Sperre erstellt werden. Das Koordinierungsbackend wurde " "nicht gestartet." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Standardspeichergruppe für FAST-Richtlinie kann nicht erstellt oder " "abgerufen werden: %(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Replikatklon für Datenträger %s konnte nicht erstellt werden." #, python-format msgid "Unable to create the relationship for %s." msgstr "Beziehung für %s kann nicht erstellt werden." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "Der Datenträger %(name)s konnte nicht aus %(snap)s erstellt werden." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "Der Datenträger %(name)s konnte nicht aus %(vol)s erstellt werden." #, python-format msgid "Unable to create volume %s" msgstr "Der Datenträger %s konnte nicht erstellt werden." msgid "Unable to create volume. Backend down." msgstr "Datenträger konnte nicht erstellt werden. Backend inaktiv." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "Konsistenzgruppenmomentaufnahme %s konnte nicht gelöscht werden" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" "Momentaufnahme %(id)s konnte nicht gelöscht werden, Status: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "" "Momentaufnahmerichtlinie auf Datenträger %s konnte nicht gelöscht werden." #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "Der Zieldatenträger für den Datenträger %(vol)s kann nicht gelöscht werden. " "Ausnahme: %(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Der Datenträger kann nicht abgehängt werden. Der Datenträgerstatus muss 'in-" "use' lauten und 'attach_status' muss 'attached' lauten, damit das Abhängen " "erfolgen kann." #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "Anhand des angegebenen Sekundärziels kann 'secondary_array' nicht ermittelt " "werden: %(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Momentaufnahmename in Purity für Momentaufnahme %(id)s kann nicht bestimmt " "werden." msgid "Unable to determine system id." msgstr "System-ID kann nicht bestimmt werden." msgid "Unable to determine system name." msgstr "Systemname kann nicht bestimmt werden." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Es können keine Operationen zum Verwalten von Momentaufnahmen mit Purity-" "REST-API-Version %(api_version)s ausgeführt werden, erfordert " "%(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Replikation mit Purity-REST-API-Version %(api_version)s nicht möglich. " "Erfordert eine der folgenden Versionen: %(required_versions)s." msgid "Unable to enable replication and snapcopy at the same time." msgstr "" "Replikation und Snapcopy können nicht zur selben Zeit aktiviert werden. " #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Es kann keine Beziehung zum Storwizse-Cluster %s hergestellt werden." #, python-format msgid "Unable to extend volume %s" msgstr "Datenträger %s kann nicht erweitert werden" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "Failover für den Datenträger %(id)s auf das sekundäre Backend nicht möglich, " "da die Replikationsbeziehung nicht gewechselt werden kann: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "Zurücksetzen auf Standardeinstellung nicht möglich. Dies kann erst nach " "Abschluss eines Failovers erfolgen." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "Failover für Replikationsziel fehlgeschlagen: %(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "Verbindungsinformationen können nicht vom Back-End abgerufen werden." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" "Verbindungsinformationen können nicht vom Back-End abgerufen werden: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "Purity-ref mit Name=%s wurde nicht gefunden" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Datenträgergruppe konnte nicht gefunden werden: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "Es wurde kein Failoverziel gefunden. Keine sekundären Ziele konfiguriert." msgid "Unable to find iSCSI mappings." msgstr "iSCSI-Zuordnungen wurden nicht gefunden." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file kann nicht gefunden werden: %s" msgid "Unable to find system log file!" msgstr "Systemprotokolldatei kann nicht gefunden werden!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "Es kann keine funktionsfähige pg-Schattenkopie für ein Failover des " "ausgewählten sekundären Arrays gefunden werden: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "Es können keine funktionsfähigen, sekundären Arrays in den konfigurierten " "Zielen gefunden werden: %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "Datenträger %s wurde nicht gefunden" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Es kann keine Blockeinheit für Datei '%s' abgerufen werden" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Fehler beim Abrufen der erforderlichen Konfigurationsinformationen zum " "Erstellen eines Datenträgers: %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Fehler beim Abrufen des entsprechenden Datensatzes für Pool" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Informationen zu Bereich %(space)s konnten nicht abgerufen werden. " "Überprüfen Sie, ob der Cluster ausgeführt wird und verbunden ist." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Liste der IP-Adressen auf diesem Host konnte nicht abgerufen werden. " "Überprüfen Sie Berechtigungen und Netzbetrieb." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Liste der Domänenmitglieder konnte nicht abgerufen werden. Überprüfen Sie, " "ob der Cluster ausgeführt wird." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Liste der Bereiche konnte für die Neubenennung nicht abgerufen werden. " "Überprüfen Sie, ob der Cluster ausgeführt wird." #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Fehler beim Abrufen der Statistiken für backend_name: %s" msgid "Unable to get storage volume from job." msgstr "Der Speicherdatenträger konnte aus der Aufgabe nicht abgerufen werden." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Zielendpunkte können nicht abgerufen werden für hardwareId " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "Der Name der Maskenansicht konnte nicht abgerufen werden." msgid "Unable to get the name of the portgroup." msgstr "Der Name der Portgruppe konnte nicht abgerufen werden." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "" "Die Replikationsbeziehung für den Datenträger %s kann nicht abgerufen werden." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Datenträger %(deviceId)s konnte nicht in cinder importiert werden. Dies ist " "der Quellendatenträger von Replikationssitzung %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Datenträger %(deviceId)s konnte nicht in cinder importiert werden. Der " "externe Datenträger befindet sich nicht in dem Pool, der durch den aktuellen " "cinder-Host verwaltet wird." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Datenträger %(deviceId)s konnte nicht in cinder importiert werden. " "Datenträger ist in Maskierungsansicht %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "Laden von Zertifizierungsstelle aus %(cert)s %(e)s nicht möglich." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Zertifikat kann nicht von %(cert)s %(e)s geladen werden." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Laden von Schlüssel aus %(cert)s %(e)s nicht möglich." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "Konto %(account_name)s auf SolidFire-Gerät nicht auffindbar" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "Es kann keine SVM bestimmt werden, die die IP-Adresse '%s' verwaltet" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "Die angegebenen Wiedergabeprofile %s wurden nicht gefunden." #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Vorhandener Datenträger kann nicht verwaltet werden. Datenträger " "%(volume_ref)s bereits verwaltet." #, python-format msgid "Unable to manage volume %s" msgstr "Datenträger %s kann nicht verwaltet werden" msgid "Unable to map volume" msgstr "Datenträger kann nicht zugeordnet werden" msgid "Unable to map volume." msgstr "Datenträger kann nicht zugeordnet werden." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "XML-Anforderung kann nicht analysiert werden. Geben Sie XML im richtigen " "Format an. " msgid "Unable to parse attributes." msgstr "Attribute können nicht analysiert werden." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "Replik kann nicht zur Primärreplik für Datenträger %s hochgestuft werden. " "Keine sekundäre Kopie verfügbar." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Ein Host, der nicht von Cinder verwaltet wird, kann mituse_chap_auth=True " "nicht erneut verwendet werden" msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Host mit unbekannten konfigurierten CHAP-Berechtigungsnachweisen kann nicht " "erneut verwendet werden." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Datenträger %(existing)s konnte nicht in %(newname)s umbenannt werden" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "Schattenkopiegruppe mit der ID %s kann nicht abgerufen werden. " #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "Typänderung von %(specname)s nicht möglich. Empfang aktueller und " "angeforderter %(spectype)s-Werte wurde erwartet. Empfangener Wert: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Typänderung nicht möglich: Eine Kopie von Datenträger %s ist vorhanden. " "Durch eine Typänderung würde der Grenzwert von 2 Kopien überschritten " "werden. " #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Typänderung nicht möglich: Aktuelle Aktion erfordert eine Datenträgerkopie. " "Dies ist nicht zulässig, wenn der neue Typ eine Replizierung ist. " "Datenträger = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "Spiegelmodusreplikation für %(vol)s kann nicht konfiguriert werden. " "Ausnahme: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Momentaufnahme der Konsistenzgruppe %s konnte nicht erstellt werden" msgid "Unable to terminate volume connection from backend." msgstr "Datenträgerverbindung kann nicht vom Back-End beendet werden." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Datenträgerverbindung kann nicht beendet werden: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Konsistenzgruppe %s konnte nicht aktualisiert werden" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Typ kann aufgrund von falschem Status nicht aktualisiert werden: " "%(vol_status)s auf Datenträger: %(vol_id)s. Datenträgerstatus muss " "'available' oder 'in-use' lauten." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "Initiatorgruppe %(igGroupName)s in Maskenansicht %(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Unzulässige Parameter." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Unerwarteter Zuordnungsstatus %(status)s für %(id)s-Zuordnung. Attribute: " "%(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Unerwartete CLI-Antwort: Abweichung zwischen Header/Zeile. Header: " "%(header)s, Zeile: %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Nicht erwarteter Zuordnungsstatus %(status)s für Zuordnung %(id)s. " "Attribute: %(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "Nicht erwartete Ausgabe. Erwartet wurde [%(expected)s], empfangen wurde " "[%(output)s]" msgid "Unexpected response from Nimble API" msgstr "Unerwartete Antwort von Nimble-API" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Unerwartete Antwort von Tegile IntelliFlash-API" msgid "Unexpected status code" msgstr "Unerwarteter Statuscode" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Unerwarteter Statuscode des Switch %(switch_id)s mit dem Protokoll " "%(protocol)s für URL %(page)s. Fehler: %(error)s" msgid "Unknown Gluster exception" msgstr "Unbekannte Gluster-Ausnahme" msgid "Unknown NFS exception" msgstr "Unbekannte NFS-Ausnahme" msgid "Unknown RemoteFS exception" msgstr "Unbekannte RemoteFS-Ausnahmesituation" msgid "Unknown SMBFS exception." msgstr "Unbekannte SMBFS-Ausnahmesituation." msgid "Unknown Virtuozzo Storage exception" msgstr "Unbekannte Virtuozzo-Speicherausnahmebedingung" msgid "Unknown action" msgstr "Unbekannte Aktion" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Es ist nicht bekannt, ob der zu verwaltende Datenträger %s bereits von " "Cinder verwaltet wird. Verwalten des Datenträgers wird abgebrochen. Fügen " "Sie dem Datenträger die angepasste Schemaeigenschaft 'cinder_managed' hinzu " "und setzen Sie den zugehörigen Wert auf 'False'. Alternativ können Sie den " "Wert der Cinder-Konfigurationsrichtlinie 'zfssa_manage_policy' auf 'loose' " "setzen, um diese Einschränkung zu entfernen." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Es ist nicht bekannt, ob der zu verwaltende Datenträger %s bereits von " "Cinder verwaltet wird. Verwalten des Datenträgers wird abgebrochen. Fügen " "Sie dem Datenträger die angepasste Schemaeigenschaft 'cinder_managed' hinzu " "und setzen Sie den zugehörigen Wert auf 'False'. Alternativ können Sie den " "Wert der Cinder-Konfigurationsrichtlinie 'zfssa_manage_policy' auf 'loose' " "setzen, um diese Einschränkung zu entfernen." #, python-format msgid "Unknown operation %s." msgstr "Unbekannte Operation %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Unbekannter oder nicht unterstützter Befehl %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Unbekanntes Protokoll: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Unbekannte Quotenressourcen %(unknown)s." msgid "Unknown service" msgstr "Unbekannter Dienst" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Unbekannte Sortierrichtung; muss 'desc' oder 'asc' sein" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Unbekannte Sortierrichtung; muss 'desc' oder 'asc' sein." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "" "Die Optionen 'Aufheben der Verwaltung' und 'Kaskadiertes Löschen' heben sich " "gegenseitig auf." msgid "Unmanage volume not implemented." msgstr "Aufheben der Verwaltung für einen Datenträger nicht implementiert." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "Aufheben der Verwaltung von Schattenkopien aus Failover-Datenträgern ist " "nicht zulässig." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "Aufheben der Verwaltung von Schattenkopien aus Failover-Datenträgern ist " "nicht zulässig." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Nicht erkanntes QOS-Schlüsselwort: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Nicht erkanntes Sicherungsformat: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Nicht erkannter read_deleted-Wert '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "gcs-Optionen löschen: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "'iscsiadm' nicht erfolgreich. Ausnahme: %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Nicht unterstützte Clusterdaten-ONTAP-Version." msgid "Unsupported Content-Type" msgstr "Nicht unterstützter Inhaltstyp" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Nicht unterstützte Data ONTAP-Version. Data ONTAP-Versionen ab 7.3.1 werden " "unterstützt." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Nicht unterstützte Datensicherungsmetadatenversion (%s)" msgid "Unsupported backup metadata version requested" msgstr "Nicht unterstützte Metadatenversion für die Sicherung angefordert" msgid "Unsupported backup verify driver" msgstr "Nicht unterstützter Treiber zum Überprüfen der Sicherung" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Nicht unterstützte Firmware auf Switch %s. Stellen Sie sicher, dass auf dem " "Switch Firmware ab Version 6.4 ausgeführt wird" #, python-format msgid "Unsupported volume format: %s " msgstr "Nicht unterstütztes Volumenformat: %s" msgid "Update QoS policy error." msgstr "Fehler beim Aktualisieren der QoS-Richtlinie." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Operationen zum Aktualisieren und zum Löschen einer Quote können nur durch " "einen Administrator der direkt übergeordneten Ebene oder durch den CLOUD-" "Administrator erfolgen." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Operationen zum Aktualisieren und zum Löschen einer Quote können nur an " "Projekten in derselben Hierarchie des Projekts, das als Bereich für Benutzer " "festgelegt wurde, erfolgen." msgid "Update list, doesn't include volume_id" msgstr "Listenaktualisierung enthält nicht 'volume_id'" msgid "Updated At" msgstr "Aktualisiert am" msgid "Upload to glance of attached volume is not supported." msgstr "" "Das Hochladen des angehängten Datenträgers auf Glance wird nicht unterstützt." msgid "Use ALUA to associate initiator to host error." msgstr "Fehler beim Verwenden von ALUA zum Zuordnen des Initiators zum Host." msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Fehler beim Verwenden von CHAP zum Zuordnen des Initiators zum Host. " "Überprüfen Sie den CHAP-Benutzernamen und das Kennwort." msgid "User ID" msgstr "Benutzer ID" msgid "User does not have admin privileges" msgstr "Benutzer hat keine Admin-Berechtigungen" msgid "User is not authorized to use key manager." msgstr "Benutzer ist nicht zum Verwenden des Schlüsselmanagers berechtigt. " msgid "User not authorized to perform WebDAV operations." msgstr "" "Der Benutzer ist zum Ausführen von WebDAV-Operationen nicht berechtigt." msgid "UserName is not configured." msgstr "UserName ist nicht konfiguriert." msgid "UserPassword is not configured." msgstr "UserPassword ist nicht konfiguriert." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "V2-Rollback - Datenträger befindet sich in der Standardspeichergruppe und in " "einer weiteren Speichergruppe." msgid "V2 rollback, volume is not in any storage group." msgstr "V2-Rollback. Datenträger befindet sich in keiner Speichergruppe." msgid "V3 rollback" msgstr "V3-Rollback" msgid "VF is not enabled." msgstr "VF ist nicht aktiviert." #, python-format msgid "VV Set %s does not exist." msgstr "VV-Gruppe %s ist nicht vorhanden." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Gültige Consumer von QoS-Spezifikationen sind: %s" #, python-format msgid "Valid control location are: %s" msgstr "Gültige Steuerposition ist: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "Validieren der Datenträgerverbindung fehlgeschlagen (Fehler: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "Wert \"%(value)s\" ist nicht gültig für die Konfigurationsoption \"%(option)s" "\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "Wert %(param)s für %(param_string)s ist kein boolescher Wert." msgid "Value required for 'scality_sofs_config'" msgstr "Wert für 'scality_sofs_config' erforderlich" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "" "Virtuelle Platte %(name)s nicht in Zuordnung %(src)s -> %(tgt)s enthalten." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Version %(req_ver)s wird von der API nicht unterstützt. Minimum ist " "%(min_ver)s und Maximum ist %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s kann ein Objekt nicht nach ID abrufen." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s unterstützt keine Bedingungsaktualisierung." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Virtueller Datenträger '%s' ist auf Platteneinheit nicht vorhanden. " #, python-format msgid "Vol copy job for dest %s failed." msgstr "Datenträgerkopierjob für Ziel %s ist fehlgeschlagen." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Datenträger %(deviceID)s nicht gefunden." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Datenträger %(name)s nicht auf Platteneinheit gefunden. Es kann nicht " "festgestellt werden, ob zugeordnete Datenträger vorhanden sind." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "" "Datenträger %(name)s wurde in VNX erstellt, hat aber den Status %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Datenträger %(vol)s konnte nicht in Pool %(pool)s erstellt werden." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "" "Datenträger %(vol1)s stimmt nicht mit snapshot.volume_id %(vol2)s überein." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Der Status des Datenträgers %(vol_id)s muss verfügbar sein oder den Status " "'in-use' aufweisen, der aktuelle Status lautet jedoch: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "Der Status des Datenträgers %(vol_id)s muss für eine Erweiterung 'available' " "sein, der aktuelle Status lautet jedoch: %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "Der Status des Datenträgers %(vol_id)s muss für die Aktualisierung des " "readonly-Flags 'available' sein, der aktuelle Status ist jedoch: " "%(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "Der Status des Datenträgers %(vol_id)s muss 'available' lauten, aber der " "aktuelle Status lautet: %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Datenträger %(volume_id)s konnte nicht gefunden werden." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "Volumen %(volume_id)s hat keine Verwaltungsmetadaten mit Schlüssel " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "Datenträger %(volume_id)s enthält keine Metadaten mit Schlüssel " "%(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "Datenträger %(volume_id)s ist aktuell zur nicht unterstützten Hostgruppe " "%(group)s zugeordnet" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" "Datenträger %(volume_id)s ist aktuell nicht zu Host %(host)s zugeordnet" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "Datenträger %(volume_id)s ist noch angehängt und muss zuerst abgehängt " "werden." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Replizierungsfehler für Datenträger %(volume_id)s: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Datenträger %(volume_name)s ist ausgelastet." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Datenträger %s konnte nicht aus Quellendatenträger erstellt werden." #, python-format msgid "Volume %s could not be created on shares." msgstr "" "Datenträger '%s' konnte nicht in den freigegebenen Verzeichnissen erstellt " "werden." #, python-format msgid "Volume %s could not be created." msgstr "Datenträger '%s' konnte nicht erstellt werden." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "Der Datenträger %s ist nicht in Nexenta SA vorhanden." #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "Der Datenträger %s ist nicht in der Nexenta Store-Appliance vorhanden." #, python-format msgid "Volume %s does not exist on the array." msgstr "Der Datenträger %s ist nicht im Array vorhanden." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" "Für Datenträger %s ist provider_location nicht angegeben, wird übersprungen." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Datenträger %s ist auf Platteneinheit nicht vorhanden. " #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "Der Datenträger %s ist nicht im ZFSSA-Backend vorhanden." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "Datenträger %s wird bereits von OpenStack verwaltet." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Datenträger %s ist bereits Teil einer aktiven Migration." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "Der Datenträger %s hat nicht den Typ 'replication'. Dieser Datenträger muss " "ein Datenträgertyp sein, bei dem die extra Spezifikation " "'replication_enabled' auf ' True' gesetzt ist, damit " "Replikationsaktionen unterstützt werden." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "Der Datenträger %s ist 'online'. Setzen Sie den Datenträger für die " "Verwaltung mit OpenStack auf 'offline'." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "Der Datenträger %s darf nicht migriert oder zugeordnet sein, darf zu keiner " "Konsistenzgruppe gehören und keine Schattenkopien haben." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Datenträger %s darf nicht Teil einer Konsistenzgruppe sein." #, python-format msgid "Volume %s must not be replicated." msgstr "Datenträger %s darf nicht repliziert sein." #, python-format msgid "Volume %s must not have snapshots." msgstr "Datenträger %s darf keine Momentaufnahmen enthalten." #, python-format msgid "Volume %s not found." msgstr "Volumen %s nicht gefunden." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Datenträger %s: Fehler beim Versuch, den Datenträger zu erweitern" #, python-format msgid "Volume (%s) already exists on array" msgstr "Datenträger (%s) ist auf Platteneinheit bereits vorhanden" #, python-format msgid "Volume (%s) already exists on array." msgstr "Datenträger (%s) ist auf der Platteneinheit bereits vorhanden." #, python-format msgid "Volume Group %s does not exist" msgstr "Datenträgergruppe %s ist nicht vorhanden" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Datenträgertyp %(id)s ist bereits vorhanden." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Datenträgertyp %(type_id)s besitzt keine zusätzliche Spezifikation mit " "Schlüssel %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "Löschen von Datenträgertyp %(volume_type_id)s ist nicht zulässig mit " "vorhandenen Datenträgern mit dem Typ." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "Datenträgertyp %(volume_type_id)s hat keine Sonderspezifikationen mit " "Schlüssel %(extra_specs_key)s" msgid "Volume Type id must not be None." msgstr "Datenträgertyp-ID darf nicht 'None' sein." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Der Datenträger [%(cb_vol)s] wurde nicht in dem CloudByte-Speicher gefunden, " "der dem OpenStack-Datenträger [%(ops_vol)s] entspricht." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "Datenträger [%s] nicht in CloudByte-Speicher gefunden." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "" "Datenträgeranhängung konnte nicht gefunden werden mit Filter: %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "Datenträger-Back-End-Konfiguration ist ungültig: %(reason)s" msgid "Volume by this name already exists" msgstr "Ein Datenträger mit diesem Namen ist bereits vorhanden" msgid "Volume cannot be restored since it contains snapshots." msgstr "" "Datenträger kann nicht wiederhergestellt werden, da er Momentaufnahmen " "enthält." msgid "Volume create failed while extracting volume ref." msgstr "" "Datenträgererstellung beim Extrahieren der Datenträgerreferenz " "fehlgeschlagen." #, python-format msgid "Volume device file path %s does not exist." msgstr "Dateipfad für Datenträgereinheit %s ist nicht vorhanden." #, python-format msgid "Volume device not found at %(device)s." msgstr "Volumengerät unter %(device)s nicht gefunden." #, python-format msgid "Volume driver %s not initialized." msgstr "Volumentreiber %s nicht initialisiert." msgid "Volume driver not ready." msgstr "Volumentreiber ist nicht bereit." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Volumentreiber meldete einen Fehler: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "Der Datenträger hat eine temporäre Schattenkopie, die zurzeit nicht gelöscht " "werden kann. " msgid "Volume has children and cannot be deleted!" msgstr "" "Der Datenträger hat untergeordnete Elemente und kann nicht gelöscht werden!" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "Datenträger in Konsistenzgruppe %s ist angehängt. Er muss zuerst abgehängt " "werden." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "Datenträger in Konsistenzgruppe verfügt noch immer über abhängige " "Momentaufnahmen." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "Datenträger wurde an einen Server angehängt. (%s)" msgid "Volume is in-use." msgstr "Datenträger ist belegt." msgid "Volume is not available." msgstr "Datenträger ist nicht verfügbar." msgid "Volume is not local to this node" msgstr "Datenträger ist nicht lokal auf diesem Knoten" msgid "Volume is not local to this node." msgstr "Datenträger ist für diesen Knoten nicht lokal." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "Sicherung der Datenträgermetadaten angefordert, dieser Treiber unterstützt " "diese Funktion jedoch noch nicht." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Volumenmigration fehlgeschlagen: %(reason)s" msgid "Volume must be available" msgstr "Volumen muss verfügbar sein" msgid "Volume must be in the same availability zone as the snapshot" msgstr "" "Datenträger muss sich in derselben Verfügbarkeitszone befinden wie die " "Momentaufnahme" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "Datenträger muss sich in derselben Verfügbarkeitszone befinden wie der " "Quellendatenträger" msgid "Volume must have a volume type" msgstr "Datenträger muss einen Datenträgertyp haben" msgid "Volume must not be part of a consistency group." msgstr "Datenträger darf nicht Teil einer Konsistenzgruppe sein." msgid "Volume must not be replicated." msgstr "Datenträger darf nicht repliziert werden. " msgid "Volume must not have snapshots." msgstr "Datenträger darf keine Momentaufnahmen enthalten." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Datenträger für Instanz %(instance_id)s nicht gefunden." msgid "Volume not found on configured storage backend." msgstr "Datenträger nicht auf konfiguriertem Speicher-Back-End gefunden." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Datenträger nicht im konfigurierten Speicherbackend gefunden. Wenn der Name " "Ihres Datenträgers das Zeichen \"/\" enthält, benennen Sie ihn bitte um und " "wiederholen Sie die Verwaltungsoperation." msgid "Volume not found on configured storage pools." msgstr "Datenträger nicht in konfigurierten Speicherpools gefunden." msgid "Volume not found." msgstr "Datenträger nicht gefunden." msgid "Volume not unique." msgstr "Datenträger ist nicht eindeutig." msgid "Volume not yet assigned to host." msgstr "Datenträger noch nicht dem Host zugeordnet." msgid "Volume reference must contain source-name element." msgstr "Datenträgerreferenz muss Element 'source-name' enthalten." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "" "Datenträgerreplizierung für %(volume_id)s konnte nicht gefunden werden. " #, python-format msgid "Volume service %s failed to start." msgstr "Volume Service %s konnte nicht gestartet werden." msgid "Volume should have agent-type set as None." msgstr "Für Datenträger sollte Agententyp auf None festgelegt werden." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "Datenträgergröße %(volume_size)s GB darf nicht kleiner als die Image-minDisk-" "Größe %(min_disk)s GB sein." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "Datenträgergröße '%(size)s' muss eine Ganzzahl und größer als 0 sein" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "Datenträgergröße '%(size)s' GB darf nicht kleiner als die ursprüngliche " "Datenträgergröße %(source_size)s GB sein. Sie muss >= der ursprünglichen " "Datenträgergröße sein." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "Datenträgergröße '%(size)s' GB darf nicht kleiner als die " "Momentaufnahmengröße %(snap_size)s GB sein. Sie muss >= der ursprünglichen " "Momentaufnahmengröße sein." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "Der Datenträger hat sich seit der letzten Sicherung vergrößert. Führen Sie " "eine vollständige Sicherung durch." msgid "Volume size must be a multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." msgid "Volume size must be multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." msgid "Volume size must multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "Datenträgerstatus für Datenträger muss 'available' lauten, aber der aktuelle " "Status lautet: %s" msgid "Volume status is in-use." msgstr "Datenträgerstatus ist 'in-use'." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "Datenträgerstatus muss \"available\" oder \"in-use\" für eine Momentaufnahme " "sein (ist aber %s)." msgid "Volume status must be \"available\" or \"in-use\"." msgstr "Datenträgerstatus muss \"available\" oder \"in-use\" sein." #, python-format msgid "Volume status must be %s to reserve." msgstr "Der Datenträgerstatus muss für eine Reservierung %s sein." msgid "Volume status must be 'available'." msgstr "Datenträgerstatus muss 'available' sein." msgid "Volume to Initiator Group mapping already exists" msgstr "Zuordnung von Datenträger zu Initiatorgruppe ist bereits vorhanden" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "Datenträger, der gesichert werden soll, muss verfügbar oder im Gebrauch " "sein, aber der aktuelle Status ist \"%s\"." msgid "Volume to be restored to must be available" msgstr "Wiederherzustellender Datenträger muss verfügbar sein" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Datenträgertyp %(volume_type_id)s konnte nicht gefunden werden." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "Volumentypkennung '%s' ist ungültig." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "Datenträgertypzugriff für Kombination %(volume_type_id)s / %(project_id)s " "ist bereits vorhanden. " #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "Datenträgertypzugriff für Kombination %(volume_type_id)s / %(project_id)s " "nicht gefunden." #, python-format msgid "Volume type does not match for share %s." msgstr "" "Keine Übereinstimmung des Datenträgertyps für freigegebenes Verzeichnis %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "Volumentypverschlüsselung für Typ %(type_id)s ist bereits vorhanden." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "Volumentypverschlüsselung für Typ %(type_id)s ist nicht vorhanden." msgid "Volume type name can not be empty." msgstr "Datenträgertypname darf nicht leer sein." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" "Datenträgertyp mit dem Namen %(volume_type_name)s konnte nicht gefunden " "werden." #, python-format msgid "Volume with volume id %s does not exist." msgstr "Datenträger mit Datenträger-ID %s ist nicht vorhanden." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Datenträger %(volumeName)s ist kein verketteter Datenträger. Sie können die " "Erweiterung nur für einen verketteten Datenträger ausführen. Vorgang wird " "beendet..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "Der Datenträger %(volumeName)s wurde der Speichergruppe %(sgGroupName)s " "nicht hinzugefügt." #, python-format msgid "Volume: %s could not be found." msgstr "Datenträger: %s wurde nicht gefunden." #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Der Datenträger %s wird bereits von Cinder verwaltet." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "Datenträger werden in Objekte dieser Größe (in Megabyte) aufgeteilt. " msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" "Anzahl Datenträger/Konto sowohl auf primären als auch auf sekundären " "SolidFire-Konten überschritten." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage-Konfiguration 'vzstorage_used_ratio' ist ungültig. Muss > 0 und <= " "1,0 sein: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "VzStorage-Konfigurationsdatei unter %(config)s ist nicht vorhanden." msgid "Wait replica complete timeout." msgstr "Zeitlimitüberschreitung beim Warten auf Fertigstellung des Replikats." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Warten auf Synchronisierung fehlgeschlagen. Ausführungsstatus: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "Es wird gewartet, bis alle Knoten mit Cluster verknüpft sind. Stellen Sie " "sicher, dass alle sheep-Dämonprozesse ausgeführt werden." msgid "We should not do switch over on primary array." msgstr "Wechsel in primären Array nicht empfohlen." msgid "Wrong resource call syntax" msgstr "Falsche Syntax für Ressourcenaufruf" msgid "X-IO Volume Driver exception!" msgstr "Ausnahme bei X-IO-Datenträgertreiber!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "XML-Unterstützung wurde eingestellt und wird im N-Release entfernt." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO nicht ordnungsgemäß konfiguriert, keine iSCSI-Portale gefunden" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO nicht richtig initialisiert, keine Cluster gefunden" msgid "You must implement __call__" msgstr "Sie müssen '__call__' implementieren" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Sie müssen hpe3parclient installieren, bevor Sie 3PAR-Treiber verwenden. " "Führen Sie \"pip install python-3parclient\" aus, um hpe3parclient zu " "installieren." msgid "You must supply an array in your EMC configuration file." msgstr "" "Sie müssen eine Platteneinheit in Ihrer EMC-Konfigurationsdatei angeben." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Die ursprüngliche Größe %(originalVolumeSize)s GB ist größer als %(newSize)s " "GB. Nur Erweiterung wird unterstützt. Vorgang wird beendet..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "Zone" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Zoning-Richtlinie: %s, nicht erkannt" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data: Abrufen von Attributen für virtuelle Platte %s " "fehlgeschlagen." msgid "_create_host failed to return the host name." msgstr "_create_host hat Hostnamen nicht zurückgegeben." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: Hostname kann nicht übersetzt werden. Hostname ist weder " "Unicode noch Zeichenfolge." msgid "_create_host: No connector ports." msgstr "_create_host: keine Connector-Ports." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "'_create_local_cloned_volume', Replikationsdienst nicht gefunden." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "'_create_local_cloned_volume', Datenträgername: %(volumename)s, " "Quellendatenträgername: %(sourcevolumename)s, Quellendatenträgerinstanz: " "%(source_volume)s, Zieldatenträgerinstanz: %(target_volume)s, Rückgabecode: " "%(rc)lu, Fehler: %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - keine Erfolgsnachricht in CLI-Ausgabe gefunden.\n" " Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "'_create_volume_name', 'id_code' ist 'None'." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, Replikationsdienst wurde nicht gefunden." #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, Typ der Kopiersitzung ist nicht definiert! " "Kopiersitzung: %(cpsession)s, Kopiertyp: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "'_delete_volume', Datenträgername: %(volumename)s, Rückgabecode: %(rc)lu, " "Fehler: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "'_delete_volume', Datenträgername: %(volumename)s, " "Speicherkonfigurationsdienst nicht gefunden." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: Erweitern eines Datenträgers mit Schattenkopien wird " "nicht unterstützt." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget. Es kann keine Verbindung zu ETERNUS hergestellt " "werden." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames. Es " "kann keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " "hergestellt werden." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s. Es " "kann keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames. Es " "kann keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "'_find_initiator_names', Connector: %(connector)s, Initiator nicht gefunden." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, Daten sind " "'None'! Bearbeiten Sie die Treiberkonfigurationsdatei und korrigieren Sie " "die Angaben." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "'_get_eternus_connection', Dateiname: %(filename)s, IP: %(ip)s, Port: " "%(port)s, Benutzer: %(user)s, Kennwort: ****, URL: %(url)s, FEHLGESCHLAGEN!" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s. IQN wurde " "nicht gefunden." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo. Es kann keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames. " "Es kann keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance. Es kann " "keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: Attributheader und -Werte stimmen nicht überein.\n" " Header: %(header)s\n" " Werte: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector hat Hostnamen für Connector nicht zurückgegeben." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, Abrufen von Hostaffinität aus 'aglist/vol_instance' " "fehlgeschlagen, affinitygroup: %(ag)s, ReferenceNames. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, Abrufen der Hostaffinitätsinstanz fehlgeschlagen, volmap: " "%(volmap)s, GetInstance. Es kann keine Verbindung zu ETERNUS hergestellt " "werden." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement. Es kann " "keine Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " "hergestellt werden." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances. Es kann keine Verbindung zu ETERNUS " "hergestellt werden." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port, protcol: %(protocol)s, target_port wurde nicht gefunden." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "" "_get_unmanaged_replay: Schattenkopie mit dem Namen %s wurde nicht gefunden." #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: Datenträger-ID %s wurde nicht gefunden." msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: 'source-name' muss angegeben werden." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: Es konnten keine Informationen zur FC-Verbindung " "für die Host-Datenträger-Verbindung abgerufen werden. Ist der Host " "ordnungsgemäß für FC-Verbindungen konfiguriert?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: Kein Knoten in E/A-Gruppe %(gid)s für Datenträger " "%(vol)s gefunden." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Speicherkonfigurationsdienst nicht gefunden." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controllerkonfigurationsdienst " "wurde nicht gefunden." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " "hergestellt werden." msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats: Pooldaten aus Speicher konnten nicht abgerufen werden." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s. Status der Kopiersitzung " "ist BROKEN." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy fehlgeschlagen: Eine Kopie von Datenträger %s ist bereits " "vorhanden. Durch Hinzufügen einer weiteren Kopie würde der Grenzwert von 2 " "Kopien überschritten werden. " msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "add_vdisk_copy ohne eine Kopie der virtuellen Platte im erwarteten Pool " "gestartet." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants muss ein boolescher Wert sein, erhalten '%s'." msgid "already created" msgstr "bereits erstellt" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "Momentaufnahme zu fernem Knoten zuordnen" #, python-format msgid "attribute %s not lazy-loadable" msgstr "Attribut %s kann nicht über Lazy-Loading geladen werden" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Sicherung: %(vol_id)s konnte keine feste Einheitenverbindung von %(vpath)s " "zu %(bpath)s.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Sicherung: %(vol_id)s hat keine Benachrichtigung zur erfolgreichen Sicherung " "vom Server erhalten.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Sicherung: %(vol_id)s konnte DSMC aufgrund von ungültigen Argumenten unter " "%(bpath)s.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Sicherung: %(vol_id)s konnte DSMC nicht unter %(bpath)s ausführen.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "Datensicherung: %(vol_id)s fehlgeschlagen. %(path)s ist keine Datei." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "Sicherung: %(vol_id)s fehlgeschlagen. %(path)s ist ein unerwarteter " "Dateityp. Blockdateien oder reguläre Dateien werden unterstützt, der " "tatsächliche Dateimodus ist %(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "Sicherung: %(vol_id)s fehlgeschlagen. Reeller Pfad zum Datenträger auf " "%(path)s kann nicht abgerufen werden." msgid "being attached by different mode" msgstr "Wird von anderem Modus angehängt" #, python-format msgid "call failed: %r" msgstr "Aufruf fehlgeschlagen: %r" msgid "call failed: GARBAGE_ARGS" msgstr "Aufruf fehlgeschlagen: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "Aufruf fehlgeschlagen: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "Aufruf fehlgeschlagen: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "Aufruf fehlgeschlagen: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "LUN-Zuordnung nicht gefunden, ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "zu erweiternder Datenträger kann nicht gefunden werden" msgid "can't handle both name and index in req" msgstr "" "Es können nicht sowohl der Name als auch der Index in der Anforderung " "verarbeitet werden" msgid "cannot understand JSON" msgstr "kann JSON nicht verstehen" msgid "cannot understand XML" msgstr "Kann XML nicht verstehen" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "cgsnapshot zugeordnet" msgid "cgsnapshot changed" msgstr "cgsnapshot geändert" msgid "cgsnapshots assigned" msgstr "cgsnapshots zugeordnet" msgid "cgsnapshots changed" msgstr "cgsnapshots geändert" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: Kennwort oder privater SSH-Schlüssel ist für die " "Authentifizierung erforderlich: Legen Sie entweder die Option 'san_password' " "oder die Option 'san_private_key' fest." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: System-ID kann nicht bestimmt werden." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: Systemname kann nicht bestimmt werden." msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist-Fehler." #, python-format msgid "clone depth exceeds limit of %s" msgstr "Klontiefe überschreitet den Grenzwert %s" msgid "consistencygroup assigned" msgstr "consistencygroup zugeordnet" msgid "consistencygroup changed" msgstr "consistencygroup geändert" msgid "control_location must be defined" msgstr "control_location muss definiert sein" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "" "'create_cloned_volume', Quellendatenträger ist in ETERNUS nicht vorhanden." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "'create_cloned_volume', Name der Zieldatenträgerinstanz: " "%(volume_instancename)s, Abrufen der Instanz fehlgeschlagen." msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: Größe von Quelle und Ziel sind unterschiedlich." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: Der Quellendatenträger %(src_vol)s mit %(src_size)d GB " "passt nicht in einen Zieldatenträger %(tgt_vol)s mit %(tgt_size)d GB." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src muss aus der Schattenkopie einer " "Konsistenzgruppe oder aus einer Quellenkonsistenzgruppe erstellt werden. " msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "'create_consistencygroup_from_src' unterstützt nur eine cgsnapshot-Quelle " "oder eine Konsistenzgruppenquelle. Die Verwendung mehrerer Quellen ist nicht " "zulässig." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "'create_consistencygroup_from_src' unterstützt eine cgsnapshot-Quelle oder " "eine Konsistenzgruppenquelle. Die Verwendung mehrerer Quellen ist nicht " "zulässig." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" "create_copy: Als Quelle angegebene virtuelle Platte %(src)s (%(src_id)s) ist " "nicht vorhanden." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "" "create_copy: Als Quelle angegebene virtuelle Platte %(src)s ist nicht " "vorhanden." msgid "create_host: Host name is not unicode or string." msgstr "create_host: Hostname ist weder Unicode noch Zeichenfolge." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: Keine Initiatoren oder WWPNs angegeben." msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair-Fehler." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "'create_snapshot', Eternuspool: %(eternus_pool)s, Pool nicht gefunden." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "'create_snapshot', Schattenkopiename: %(snapshotname)s, " "Quellendatenträgername: %(volumename)s, Datenträgerinstanzpfad: " "%(vol_instance)s, Zieldatenträgername: %(d_volumename)s, Pool: %(pool)s, " "Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "'create_snapshot', Datenträgername: %(s_volumename)s, Quellendatenträger " "nicht in ETERNUS gefunden." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "'create_snapshot', Datenträgername: %(volumename)s, Replikationsdienst nicht " "gefunden." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: Datenträgerstatus muss \"available\" oder \"in-use\" für " "eine Momentaufnahme sein. Der ungültige Status lautet %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: Abrufen des Quellendatenträgers fehlgeschlagen." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, volume: %(volume)s, EnumerateInstances. Es kann keine " "Verbindung zu ETERNUS hergestellt werden." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "'create_volume', Datenträger: %(volume)s, Datenträgername: %(volumename)s, " "Eternuspool: %(eternus_pool)s, Speicherkonfigurationsdienst nicht gefunden." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "'create_volume', Datenträgername: %(volumename)s, Poolname: " "%(eternus_pool)s, Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "'create_volume_from_snapshot', Quellendatenträger ist in ETERNUS nicht " "vorhanden." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "'create_volume_from_snapshot', Name der Zieldatenträgerinstanz: " "%(volume_instancename)s, Abrufen der Instanz fehlgeschlagen." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" "create_volume_from_snapshot: Momentaufnahme %(name)s ist nicht vorhanden." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: Der Status der Momentaufnahme muss \"available" "\" zum Erstellen eines Datenträgers sein. Der ungültige Status lautet %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" "create_volume_from_snapshot: Größe von Quelle und Ziel sind unterschiedlich." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot: Datenträgergröße unterscheidet sich von der " "Größe des auf der Momentaufnahme basierenden Datenträgers." msgid "deduplicated and auto tiering can't be both enabled." msgstr "" "'deduplicated' und 'auto tiering' können nicht gleichzeitig aktiviert sein." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "Löschen: %(vol_id)s konnte DSMC aufgrund von ungültigen Argumenten nicht " "ausführen mit Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Löschen: %(vol_id)s konnte DSMC nicht ausführen mit Standardausgabe: " "%(out)s\n" " Standardfehler: %(err)s" msgid "delete_hypermetro error." msgstr "delete_hypermetro-Fehler." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s-ACL nicht gefunden. Wird fortgesetzt." msgid "delete_replication error." msgstr "delete_replication-Fehler." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "Löschen von Momentaufnahme %(snapshot_name)s mit abhängigen Datenträgern" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "Löschen von Datenträger %(volume_name)s mit Momentaufnahme" msgid "detach snapshot from remote node" msgstr "Zuordnung der Momentaufnahme zu fernem Knoten aufheben" msgid "do_setup: No configured nodes." msgstr "do_setup: keine konfigurierten Knoten." msgid "element is not a child" msgstr "Element ist kein untergeordnetes Element" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries muss größer-gleich 0 sein" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "Fehler beim Schreiben von Objekt an Swift. MD5 von Objekt in Swift " "[%(etag)s] entspricht nicht MD5 von an Swift gesendetem Objekt [%(md5)s]" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "'extend_volume', Eternuspool: %(eternus_pool)s, Pool nicht gefunden." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "'extend_volume', Datenträger: %(volume)s, Datenträgername: %(volumename)s, " "Eternuspool: %(eternus_pool)s, Speicherkonfigurationsdienst nicht gefunden." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "'extend_volume', Datenträgername: %(volumename)s, Rückgabecode: %(rc)lu, " "Fehler: %(errordesc)s, Pooltyp: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "" "'extend_volume', Datenträgername: %(volumename)s, Datenträger nicht gefunden." msgid "failed to create new_volume on destination host" msgstr "Erstellen von new_volume auf Zielhost fehlgeschlagen" msgid "fake" msgstr "fake" #, python-format msgid "file already exists at %s" msgstr "Datei ist unter %s bereits vorhanden" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno wird durch SheepdogIOWrapper nicht unterstützt" msgid "fileno() not supported by RBD()" msgstr "fileno() wird von RBD() nicht unterstützt" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "Das Dateisystem %s ist nicht in der Nexenta Store-Appliance vorhanden." msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled ist auf 'False' gesetzt, Multi-Host-" "Zuordnung wird nicht zugelassen. CMMVC6071E Die Zuordnung von virtueller " "Platte zu Host wurde nicht erstellt, da die virtuelle Platte bereits einem " "Host zugeordnet ist." msgid "flush() not supported in this version of librbd" msgstr "flush() wird in dieser Version von librbd nicht unterstützt" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s gesichert durch: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s gesichert durch:%(backing_file)s" msgid "force delete" msgstr "löschen erzwingen" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id-Fehler." msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id-Fehler." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: Fehler beim Abrufen der Ziel-IP für Initiator %(ini)s. " "Überprüfen Sie die Konfigurationsdatei." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: Fehler beim Abrufen der Attribute für den Datenträger %s" msgid "glance_metadata changed" msgstr "'glance_metadata' geändert" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode wurde als copy_on_write festgelegt, aber %(vol)s und " "%(img)s gehören zu anderen Dateisystemen." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode wurde als copy_on_write festgelegt, aber %(vol)s und " "%(img)s gehören zu anderen Dateigruppen." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s und hgst_user %(usr)s müssen in cinder.conf zu gültigen " "Benutzern/Gruppen zugeordnet werden" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "hgst_net %(net)s wurde in cinder.conf angegeben, aber in Cluster nicht " "gefunden" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy muss in cinder.conf auf 0 (keine HA) oder 1 (HA) festgelegt " "werden." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode muss in cinder.conf eine Oktalzahl/Ganzzahl sein" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "hgst_storage-Server %(svr)s nicht im Format :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers müssen in cinder.conf definiert werden" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "http-Service wurde möglicherweise mitten in dieser Operation abrupt " "inaktiviert oder in den Wartungsstatus versetzt." msgid "id cannot be None" msgstr "ID darf nicht 'None' sein" #, python-format msgid "image %s not found" msgstr "Abbild %s nicht gefunden" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "" "'initialize_c'onnection', Datenträger: %(volume)s, Datenträger nicht " "gefunden." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" "initialize_connection: Fehler beim Abrufen der Attribute für Datenträger %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "" "initialize_connection: Fehlendes Datenträgerattribut für Datenträger %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: Kein Knoten in E/A-Gruppe %(gid)s für Datenträger " "%(vol)s gefunden." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: Virtuelle Platte %s ist nicht definiert." #, python-format msgid "invalid user '%s'" msgstr "ungültiger Benutzer '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "iSCSI-Portal %s nicht gefunden" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "iscsi_ip_address muss in Konfigurationsdatei festgelegt werden, wenn " "Protokoll 'iSCSI' verwendet wird." msgid "iscsiadm execution failed. " msgstr "Ausführen von 'iscsiadm' fehlgeschlagen." #, python-format msgid "key manager error: %(reason)s" msgstr "Schlüsselmanagerfehler: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key nicht bestimmt" msgid "limit param must be an integer" msgstr "'limit'-Parameter muss eine Ganzzahl sein" msgid "limit param must be positive" msgstr "'limit'-Parameter muss positiv sein" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "'manage_existing' kann einen Datenträger nicht verwalten, der mit Hosts " "verbunden ist. Trennen Sie vor dem Import die Verbindung dieses Datenträgers " "zu vorhandenen Hosts. " msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing erfordert den Schlüssel 'name' zum Identifizieren eines " "vorhandenen Datenträgers." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: Fehler beim Verwalten der vorhandenen Wiedergabe " "%(ss)s auf dem Datenträger %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "Marker [%s] nicht gefunden" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "Fehlende Anführungszeichen für mdiskgrp %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy muss 'on-demand' oder 'never' sein, übergeben: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs fehlgeschlagen auf Datenträger %(vol)s, Fehlernachricht: %(err)s." msgid "mock" msgstr "mock" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs ist nicht installiert" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "mehrere Ressourcen mit dem Namen %s durch drbdmanage gefunden" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "mehrere Ressourcen mit Momentaufnahme-ID %s gefunden" msgid "name cannot be None" msgstr "Name darf nicht 'None' sein" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "" "naviseccli_path: NAVISECCLI-Tool %(path)s konnte nicht gefunden werden." #, python-format msgid "no REPLY but %r" msgstr "Keine Antwort, aber %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "keine Momentaufnahme mit ID %s in drbdmanage gefunden" #, python-format msgid "not exactly one snapshot with id %s" msgstr "nicht genau eine Momentaufnahme mit ID %s" #, python-format msgid "not exactly one volume with id %s" msgstr "nicht genau ein Datenträger mit ID %s" #, python-format msgid "obj missing quotes %s" msgstr "Fehlende Anführungszeichen für obj %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled ist nicht ausgeschaltet." msgid "progress must be an integer percentage" msgstr "Fortschritt muss ein Ganzzahlprozentsatz sein" msgid "promote_replica not implemented." msgstr "promote_replica nicht implementiert. " msgid "provider must be defined" msgstr "Provider muss definiert sein" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img ab Version %(minimum_version)s ist für diesen Datenträgertreiber " "erforderlich. Aktuelle qemu-img-Version: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img ist nicht installiert und das Image weist den Typ %s auf. Es können " "nur RAW-Images verwendet werden, wenn qemu-img nicht installiert ist." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img ist nicht installiert und das Plattenformat wurde nicht angegeben. " "Es können nur RAW-Images verwendet werden, wenn qemu-img nicht installiert " "ist." msgid "rados and rbd python libraries not found" msgstr "rados- und rbd-python-Bibliotheken nicht gefunden" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "'read_deleted' kann nur 'no', 'yes' oder 'only' sein, nicht '%r'" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "'replication_device' muss im Backend konfiguriert sein: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "'replication_device' mit 'backend_id' [%s] fehlt." #, python-format msgid "replication_failover failed. %s not found." msgstr "'replication_failover' fehlgeschlagen. %s wurde nicht gefunden." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "'replication_failover' fehlgeschlagen. Das Backend ist nicht für ein " "Failover konfiguriert." #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Wiederherstellung: %(vol_id)s konnte DSMC wegen ungültiger Argumente nicht " "unter %(bpath)s.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Wiederherstellung: %(vol_id)s konnte DSMC nicht unter %(bpath)s ausführen.\n" "Standardausgabe: %(out)s\n" " Standardfehler: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Wiederherstellung: %(vol_id)s fehlgeschlagen.\n" "Standardausgabe: %(out)s\n" "Standardfehler: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup abgebrochen, tatsächliche Objektliste entspricht nicht der in " "den Metadaten gespeicherten Liste." msgid "root element selecting a list" msgstr "Stammelement wählt eine Liste aus" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb enthält Mitglied %s nicht: Sie benötigen möglicherweise eine " "neuere python-rtslib-fb." msgid "san_ip is not set." msgstr "san_ip wurde nicht festgelegt." msgid "san_ip must be set" msgstr "'san_ip' muss festgelegt sein" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: Konfiguration für Pflichtfeld. san_ip ist nicht festgelegt. " msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login und/oder san_password ist für Datera-Treiber in der cinder.conf " "nicht festgelegt. Legen Sie diese Informationen fest und starten Sie den " "cinder-volume-Service erneut." msgid "serve() can only be called once" msgstr "serve() kann nur einmal aufgerufen werden." msgid "service not found" msgstr "Dienst nicht gefunden" msgid "snapshot does not exist" msgstr "Momentaufnahme ist nicht vorhanden" #, python-format msgid "snapshot id:%s not found" msgstr "Momentaufnahme-ID %s nicht gefunden" #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" msgid "snapshots assigned" msgstr "Schattenkopien zugeordnet" msgid "snapshots changed" msgstr "Schattenkopien geändert" #, python-format msgid "source vol id:%s not found" msgstr "Quellendatenträger-ID %s nicht gefunden" #, python-format msgid "source volume id:%s is not replicated" msgstr "Quellendatenträger-ID %s wird nicht repliziert" msgid "source-name cannot be empty." msgstr "'source-name' darf nicht leer sein." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "Das Format für 'source-name' muss wie folgt lauten: " "'vmdk_Pfad@vm_Inventarpfad'." #, python-format msgid "status must be %s and" msgstr " Status muss %s sein und " msgid "status must be available" msgstr "Status muss 'available' sein" msgid "stop_hypermetro error." msgstr "stop_hypermetro-Fehler." msgid "subclasses must implement construct()!" msgstr "Unterklassen müssen Konstrukt () implementieren!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo fehlgeschlagen, Vorgang wird fortgesetzt" msgid "sync_hypermetro error." msgstr "sync_hypermetro-Fehler." msgid "sync_replica not implemented." msgstr "sync_replica nicht implementiert. " #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli nicht installiert. Standardverzeichnis (%(default_path)s) konnte " "nicht erstellt werden: %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: Fehler beim Abrufen von Hostname von Connector." msgid "timeout creating new_volume on destination host" msgstr "Zeitlimitüberschreitung beim Erstellen von new_volume auf Zielhost" msgid "too many body keys" msgstr "zu viele Textschlüssel" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: nicht eingehängt" #, python-format msgid "umount: %s: target is busy" msgstr "Abhängen: %s: Ziel ist ausgelastet" msgid "umount: : some other error" msgstr "Abhängen: : ein anderer Fehler" msgid "umount: : target is busy" msgstr "Abhängen: : Ziel ist ausgelastet" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "" "unmanage_snapshot: Schattenkopie mit dem Namen %s wurde nicht gefunden." #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: Datenträger-ID %s wurde nicht gefunden." #, python-format msgid "unrecognized argument %s" msgstr "Nicht erkanntes Argument %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "Nicht unterstützter Komprimierungsalgorithmus: %s" msgid "valid iqn needed for show_target" msgstr "Gültiger qualifizierter iSCSI-Name für show_target erforderlich" #, python-format msgid "vdisk %s is not defined." msgstr "Virtuelle Platte %s ist nicht definiert." msgid "vmemclient python library not found" msgstr "vmemclient-python-Bibliothek nicht gefunden" #, python-format msgid "volume %s not found in drbdmanage" msgstr "Datenträger %s in drbdmanage nicht gefunden" msgid "volume assigned" msgstr "Datenträger zugeordnet" msgid "volume changed" msgstr "Datenträger geändert" msgid "volume does not exist" msgstr "Datenträger ist nicht vorhanden" msgid "volume is already attached" msgstr "Datenträger ist bereits angehängt" msgid "volume is not local to this node" msgstr "Datenträger ist nicht lokal auf diesem Knoten" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "Datenträgergröße %(volume_size)d ist zu klein zum Wiederherstellen von " "Sicherung der Größe %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "Datenträgergröße %d ist ungültig." msgid "volume_type cannot be None" msgstr "'volume_type' darf nicht 'None' sein" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "volume_type muss beim Erstellen eines Datenträgers in einer Konsistenzgruppe " "angegeben sein." msgid "volume_type_id cannot be None" msgstr "volume_type_id darf nicht 'None' sein" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "volume_types muss zum Erstellen der Konsistenzgruppe %(name)s angegeben " "werden." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "" "volume_types muss zum Erstellen der Konsistenzgruppe %s angegeben werden." msgid "volumes assigned" msgstr "Datenträger zugeordnet" msgid "volumes changed" msgstr "Datenträger geändert" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s hat zulässiges Zeitlimit überschritten." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "Die Eigenschaft 'zfssa_manage_policy' muss auf 'strict' oder 'loose' gesetzt " "sein. Aktueller Wert: %s." msgid "{} is not a valid option." msgstr "{} ist keine gültige Option." cinder-8.0.0/cinder/locale/cinder-log-warning.pot0000664000567000056710000020572412701406257023057 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the cinder project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 06:32+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: cinder/context.py:204 msgid "Unable to get internal tenant context: Missing required config parameters." msgstr "" #: cinder/coordination.py:171 #, python-format msgid "Reconnect attempt %(attempt)s failed. Next try in %(backoff).2fs." msgstr "" #: cinder/quota_utils.py:86 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" #: cinder/quota_utils.py:97 cinder/transfer/api.py:201 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " "volumes already consumed)" msgstr "" #: cinder/service.py:95 msgid "" "OSProfiler is enabled.\n" "It means that person who knows any of hmac_keys that are specified in " "/etc/cinder/cinder.conf can trace his requests. \n" "In real life only operator can read this file so there is no security " "issue. Note that even if person can trigger profiler, only admin user can" " retrieve trace information.\n" "To disable OSprofiler set in cinder.conf:\n" "[profiler]\n" "enabled=false" msgstr "" #: cinder/service.py:200 #, python-format msgid "" "Report interval must be less than service down time. Current config " "service_down_time: %(service_down_time)s, report_interval for this: " "service is: %(report_interval)s. Setting global service_down_time to: " "%(new_down_time)s" msgstr "" #: cinder/utils.py:983 #, python-format msgid "Invalid trace flag: %s" msgstr "" #: cinder/api/__init__.py:31 msgid "" "The v1 api is deprecated and is not under active development. You should " "set enable_v1_api=false and enable_v3_api=true in your cinder.conf file." msgstr "" #: cinder/api/extensions.py:276 #, python-format msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" #: cinder/api/extensions.py:345 #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" #: cinder/api/extensions.py:370 #, python-format msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" #: cinder/api/openstack/__init__.py:114 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "" #: cinder/backup/api.py:291 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG backup " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" #: cinder/backup/api.py:303 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create backups (%(d_consumed)d " "backups already consumed)" msgstr "" #: cinder/backup/chunkeddriver.py:725 msgid "Error while listing objects, continuing with delete." msgstr "" #: cinder/backup/driver.py:212 msgid "" "Destination volume type is different from source volume type for an " "encrypted volume. Encrypted backup restore has failed." msgstr "" #: cinder/backup/manager.py:693 #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is " "not verified. Skipping verify." msgstr "" #: cinder/backup/manager.py:830 #, python-format msgid "" "Failed to terminate the connection of volume %(volume_id)s, but it is " "acceptable." msgstr "" #: cinder/backup/drivers/ceph.py:1172 #, python-format msgid "" "RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " "backup metadata." msgstr "" #: cinder/common/sqlalchemyutils.py:68 msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" #: cinder/consistencygroup/api.py:111 #, python-format msgid "Availability zone '%s' is invalid" msgstr "" #: cinder/db/sqlalchemy/api.py:121 msgid "Use of empty request context is deprecated" msgstr "" #: cinder/db/sqlalchemy/api.py:233 #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "" #: cinder/db/sqlalchemy/api.py:1009 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" #: cinder/image/cache.py:168 #, python-format msgid "Image-volume cache for host %(host)s does not have enough space (GB)." msgstr "" #: cinder/image/glance.py:134 msgid "" "glance_num_retries shouldn't be a negative value. The number of retries " "will be set to 0 until this iscorrected in the cinder.conf." msgstr "" #: cinder/image/image_utils.py:72 msgid "qemu-img is not installed." msgstr "" #: cinder/keymgr/conf_key_mgr.py:80 msgid "" "config option keymgr.fixed_key has not been defined: some operations may " "fail unexpectedly" msgstr "" #: cinder/keymgr/conf_key_mgr.py:136 #, python-format msgid "Not deleting key %s" msgstr "" #: cinder/scheduler/filter_scheduler.py:427 #, python-format msgid "No weighed hosts found for volume with properties: %s" msgstr "" #: cinder/scheduler/host_manager.py:458 #, python-format msgid "volume service is down. (host: %s)" msgstr "" #: cinder/scheduler/filters/capacity_filter.py:71 #, python-format msgid "" "Insufficient free space for volume creation. Total capacity is " "%(total).2f on host %(host)s." msgstr "" #: cinder/scheduler/filters/capacity_filter.py:93 #, python-format msgid "" "Insufficient free space for thin provisioning. The ratio of provisioned " "capacity over total capacity %(provisioned_ratio).2f has exceeded the " "maximum over subscription ratio %(oversub_ratio).2f on host %(host)s." msgstr "" #: cinder/scheduler/filters/capacity_filter.py:114 #, python-format msgid "" "Filtering out host %(host)s with an invalid maximum over subscription " "ratio of %(oversub_ratio).2f. The ratio should be a minimum of 1.0." msgstr "" #: cinder/scheduler/filters/capacity_filter.py:124 #, python-format msgid "" "Insufficient free space for volume creation on host %(host)s (requested /" " avail): %(requested)s/%(available)s" msgstr "" #: cinder/scheduler/filters/driver_filter.py:61 #, python-format msgid "Error in filtering function '%(function)s' : '%(error)s' :: failing host" msgstr "" #: cinder/scheduler/filters/instance_locality_filter.py:97 #, python-format msgid "Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." msgstr "" #: cinder/scheduler/filters/instance_locality_filter.py:108 #, python-format msgid "" "Hint \"%s\" dropped because Nova did not return enough information. " "Either Nova policy needs to be changed or a privileged account for Nova " "should be specified in conf." msgstr "" #: cinder/scheduler/weights/goodness.py:54 msgid "Goodness function not set :: defaulting to minimal goodness rating of 0" msgstr "" #: cinder/scheduler/weights/goodness.py:62 #, python-format msgid "" "Error in goodness_function function '%(function)s' : '%(error)s' :: " "Defaulting to a goodness of 0" msgstr "" #: cinder/scheduler/weights/goodness.py:73 #, python-format msgid "" "Invalid goodness result. Result must be between 0 and 100. Result " "generated: '%s' :: Defaulting to a goodness of 0" msgstr "" #: cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py:87 #: cinder/volume/drivers/netapp/dataontap/block_base.py:133 #, python-format msgid "" "The \"netapp_size_multiplier\" configuration option is deprecated and " "will be removed in the Mitaka release. Please set \"reserved_percentage =" " %d\" instead." msgstr "" #: cinder/transfer/api.py:189 cinder/volume/flows/api/create_volume.py:622 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " "(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" #: cinder/volume/api.py:428 #, python-format msgid "Unable to delete encryption key for volume: %s." msgstr "" #: cinder/volume/api.py:759 cinder/volume/utils.py:721 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" #: cinder/volume/api.py:771 cinder/volume/utils.py:733 #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " "snapshots already consumed)." msgstr "" #: cinder/volume/driver.py:471 #, python-format msgid "Failed to activate volume copy throttling: %(err)s" msgstr "" #: cinder/volume/driver.py:672 #, python-format msgid "" "The colon in vendor name was replaced by underscore. Updated vendor name " "is %(name)s\"." msgstr "" #: cinder/volume/driver.py:686 #, python-format msgid "" "Vendor unique property \"%(property)s\" must start with vendor prefix " "with colon \"%(prefix)s\". The property was not registered on " "capabilities list." msgstr "" #: cinder/volume/driver.py:864 #, python-format msgid "" "Failed terminating the connection of volume %(volume_id)s, but it is " "acceptable." msgstr "" #: cinder/volume/driver.py:2328 cinder/volume/targets/iscsi.py:154 msgid "ISCSI provider_location not stored, using discovery" msgstr "" #: cinder/volume/manager.py:246 #, python-format msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" #: cinder/volume/manager.py:272 msgid "Suppressing requests library SSL Warnings" msgstr "" #: cinder/volume/manager.py:462 #, python-format msgid "Detected volume stuck in %(curr_status)s status, setting to ERROR." msgstr "" #: cinder/volume/manager.py:481 msgid "Detected snapshot stuck in creating status, setting to ERROR." msgstr "" #: cinder/volume/manager.py:1147 #, python-format msgid "" "Unable to ensure space for image-volume in cache. Will skip creating " "entry for image %(image)s on host %(host)s." msgstr "" #: cinder/volume/manager.py:1157 #, python-format msgid "" "Unable to clone image_volume for image %(image_id)s will not create cache" " entry." msgstr "" #: cinder/volume/manager.py:1169 #, python-format msgid "Failed to create new image-volume cache entry. Error: %(exception)s" msgstr "" #: cinder/volume/manager.py:1259 #, python-format msgid "Registration of image volume URI %(uri)s to image %(image_id)s failed." msgstr "" #: cinder/volume/manager.py:1332 #, python-format msgid "Deleting image in unexpected status: %(image_status)s." msgstr "" #: cinder/volume/manager.py:1338 msgid "Image delete encountered an error." msgstr "" #: cinder/volume/manager.py:1826 #, python-format msgid "" "Failed to migrate volume. The destination volume %(vol)s is not deleted " "since the source volume may have been deleted." msgstr "" #: cinder/volume/manager.py:1995 #, python-format msgid "Update driver status failed: %(config_group)s is uninitialized." msgstr "" #: cinder/volume/manager.py:3385 msgid "" "Error encountered on Cinder backend during freeze operation, service is " "frozen, however notification to driver has failed." msgstr "" #: cinder/volume/qos_specs.py:201 #, python-format msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" #: cinder/volume/qos_specs.py:215 #, python-format msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" #: cinder/volume/qos_specs.py:229 #, python-format msgid "Failed to disassociate qos specs %s." msgstr "" #: cinder/volume/throttling.py:88 #, python-format msgid "Failed to setup blkio cgroup to throttle the device '%(device)s'." msgstr "" #: cinder/volume/utils.py:284 #, python-format msgid "" "Incorrect value error: %(blocksize)s, it may indicate that " "'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" #: cinder/volume/utils.py:708 #, python-format msgid "Error encountered translating config_string: %(config_string)s to dict" msgstr "" #: cinder/volume/drivers/block_device.py:97 #, python-format msgid "The device %s won't be cleared." msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:169 #, python-format msgid "Got disconnected; trying to reconnect. (%s)" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:329 #, python-format msgid "" "Try #%(try)d: Volume \"%(res)s\"/%(vol)d not yet deployed on " "\"%(host)s\", waiting." msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:718 #, python-format msgid "snapshot: %s not found, skipping delete operation" msgstr "" #: cinder/volume/drivers/eqlx.py:156 msgid "" "Configuration options eqlx_use_chap, eqlx_chap_login and " "eqlx_chap_password are deprecated. Use use_chap_auth, chap_username and " "chap_password respectively for the same." msgstr "" #: cinder/volume/drivers/eqlx.py:170 msgid "" "Configuration option eqlx_cli_timeout is deprecated and will be removed " "in M release. Use ssh_conn_timeout instead." msgstr "" #: cinder/volume/drivers/eqlx.py:450 #, python-format msgid "Volume %s was not found while trying to delete it." msgstr "" #: cinder/volume/drivers/eqlx.py:565 #, python-format msgid "Volume %s is not found!, it may have been deleted." msgstr "" #: cinder/volume/drivers/glusterfs.py:113 #, python-format msgid "Exception during unmounting %s" msgstr "" #: cinder/volume/drivers/glusterfs.py:133 #, python-format msgid "Failed to refresh mounts, reason=%s" msgstr "" #: cinder/volume/drivers/glusterfs.py:241 cinder/volume/drivers/quobyte.py:225 #: cinder/volume/drivers/remotefs.py:282 #, python-format msgid "Volume %s does not have provider_location specified, skipping" msgstr "" #: cinder/volume/drivers/glusterfs.py:350 msgid "" "Fallocate not supported by current version of glusterfs. So falling back " "to dd." msgstr "" #: cinder/volume/drivers/hgst.py:344 msgid "Unable to poll cluster free space." msgstr "" #: cinder/volume/drivers/hgst.py:410 #, python-format msgid "Unable to delete space %(space)s" msgstr "" #: cinder/volume/drivers/hgst.py:415 msgid "Attempted to delete a space that's not there." msgstr "" #: cinder/volume/drivers/lvm.py:191 #, python-format msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" #: cinder/volume/drivers/lvm.py:435 #, python-format msgid "snapshot: %s not found, skipping delete operations" msgstr "" #: cinder/volume/drivers/nfs.py:353 cinder/volume/drivers/remotefs.py:569 #, python-format msgid "" "The NAS file permissions mode will be 666 (allowing other/world read & " "write access). This is considered an insecure NAS environment. Please see" " %s for information on a secure NFS configuration." msgstr "" #: cinder/volume/drivers/nfs.py:374 cinder/volume/drivers/remotefs.py:562 #, python-format msgid "" "The NAS file operations will be run as root: allowing root level access " "at the storage backend. This is considered an insecure NAS environment. " "Please see %s for information on a secure NAS configuration." msgstr "" #: cinder/volume/drivers/nimble.py:169 #, python-format msgid "Error updating agent-type for volume %s." msgstr "" #: cinder/volume/drivers/pure.py:320 #, python-format msgid "Adding Volume to Protection Group failed with message: %s" msgstr "" #: cinder/volume/drivers/pure.py:371 #, python-format msgid "Volume deletion failed with message: %s" msgstr "" #: cinder/volume/drivers/pure.py:402 #, python-format msgid "Unable to delete snapshot, assuming already deleted. Error: %s" msgstr "" #: cinder/volume/drivers/pure.py:470 #, python-format msgid "Purity host deletion failed: %(msg)s." msgstr "" #: cinder/volume/drivers/pure.py:692 #, python-format msgid "Unable to delete Protection Group: %s" msgstr "" #: cinder/volume/drivers/pure.py:765 #, python-format msgid "Unable to delete Protection Group Snapshot: %s" msgstr "" #: cinder/volume/drivers/pure.py:882 #, python-format msgid "Unable to rename %(old_name)s, error message: %(error)s" msgstr "" #: cinder/volume/drivers/pure.py:1125 #, python-format msgid "Disable replication on volume failed: already disabled: %s" msgstr "" #: cinder/volume/drivers/pure.py:1389 #, python-format msgid "Skipping creation of PG %s since it already exists." msgstr "" #: cinder/volume/drivers/pure.py:1397 #, python-format msgid "Protection group %s is deleted but not eradicated - will recreate." msgstr "" #: cinder/volume/drivers/quobyte.py:148 msgid "" "The NAS file operations will be run as root, allowing root level access " "at the storage backend." msgstr "" #: cinder/volume/drivers/quobyte.py:160 msgid "" "The NAS file permissions mode will be 666 (allowing other/world read & " "write access)." msgstr "" #: cinder/volume/drivers/quobyte.py:372 #, python-format msgid "Exception during mounting %s" msgstr "" #: cinder/volume/drivers/quobyte.py:432 #, python-format msgid "Failed to unmount previous mount: %s" msgstr "" #: cinder/volume/drivers/quobyte.py:436 #, python-format msgid "Unknown error occurred while checking mount point: %s Trying to continue." msgstr "" #: cinder/volume/drivers/quobyte.py:455 #, python-format msgid "%s is already mounted" msgstr "" #: cinder/volume/drivers/quobyte.py:474 #, python-format msgid "Volume is not writable. Please broaden the file permissions. Mount: %s" msgstr "" #: cinder/volume/drivers/rbd.py:184 msgid "flush() not supported in this version of librbd" msgstr "" #: cinder/volume/drivers/rbd.py:401 msgid "Unable to get rados pool stats." msgstr "" #: cinder/volume/drivers/rbd.py:909 msgid "volume_tmp_dir is now deprecated, please use image_conversion_dir." msgstr "" #: cinder/volume/drivers/remotefs.py:367 #, python-format msgid "%(path)s is being set with open permissions: %(perm)s" msgstr "" #: cinder/volume/drivers/remotefs.py:979 #, python-format msgid "No backing file found for %s, allowing snapshot to be deleted." msgstr "" #: cinder/volume/drivers/sheepdog.py:177 #, python-format msgid "Volume not found. %s" msgstr "" #: cinder/volume/drivers/sheepdog.py:220 #, python-format msgid "Snapshot \"%s\" not found." msgstr "" #: cinder/volume/drivers/sheepdog.py:222 #, python-format msgid "Volume \"%s\" not found." msgstr "" #: cinder/volume/drivers/smbfs.py:305 #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" #: cinder/volume/drivers/solidfire.py:565 #, python-format msgid "More than one valid preset was detected, using %s" msgstr "" #: cinder/volume/drivers/solidfire.py:1004 msgid "Requested image is not accessible by current Tenant." msgstr "" #: cinder/volume/drivers/tegile.py:376 #, python-format msgid "" "TegileIntelliFlashVolumeDriver(%(clsname)s) _update_volume_stats failed: " "%(error)s" msgstr "" #: cinder/volume/drivers/tintri.py:389 #, python-format msgid "Exception while creating image %(image_id)s snapshot. Exception: %(exc)s" msgstr "" #: cinder/volume/drivers/tintri.py:414 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:452 #, python-format msgid "Exception during deleting %s" msgstr "" #: cinder/volume/drivers/tintri.py:423 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:765 #, python-format msgid "Destination %s already exists." msgstr "" #: cinder/volume/drivers/tintri.py:431 #, python-format msgid "Exception moving file %(src)s. Message: %(e)s" msgstr "" #: cinder/volume/drivers/tintri.py:496 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:511 #, python-format msgid "Unexpected exception during image cloning in share %s" msgstr "" #: cinder/volume/drivers/tintri.py:611 msgid "Unexpected exception while listing used share." msgstr "" #: cinder/volume/drivers/xio.py:464 #, python-format msgid "IOnetworks GET failed (%d)" msgstr "" #: cinder/volume/drivers/xio.py:589 #, python-format msgid "Could not get status for %(name)s (%(status)d)." msgstr "" #: cinder/volume/drivers/xio.py:596 msgid "No volume node in XML content." msgstr "" #: cinder/volume/drivers/xio.py:603 #, python-format msgid "No status payload for volume %s." msgstr "" #: cinder/volume/drivers/xio.py:690 #, python-format msgid "Volume %(name)s already presented (%(status)d)!" msgstr "" #: cinder/volume/drivers/xio.py:958 #, python-format msgid "Could not get pool information (%s)!" msgstr "" #: cinder/volume/drivers/xio.py:1193 #, python-format msgid "%s not found!" msgstr "" #: cinder/volume/drivers/xio.py:1204 #, python-format msgid "DELETE call failed for %s!" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:671 #, python-format msgid "Volume initialization failure. (%s)" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:843 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2384 #, python-format msgid "Unable to create folder %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1010 #, python-format msgid "delete_volume: unable to find volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1085 #, python-format msgid "Unable to find appropriate OS %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1373 msgid "Inconsistent Luns." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1742 #, python-format msgid "Unable to create snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1780 #, python-format msgid "Unable to find snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2571 #, python-format msgid "Unable to locate replication %(vol)s to %(ssn)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:300 #, python-format msgid "Unable to delete replication of Volume %(vname)s to Storage Center %(sc)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:347 #, python-format msgid "Unable to locate volume:%s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1019 #, python-format msgid "SSN %s appears to be down." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:744 msgid "" "The VMAX plugin only supports Retype. If a pool based migration is " "necessary this will happen on a Retype From the command line: cinder " "--os-volume-api-version 2 retype --migration-" "policy on-demand" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:781 #, python-format msgid "" "Failed to migrate: %(volumeName)s from default source storage group for " "FAST policy: %(sourceFastPolicyName)s. Attempting cleanup... " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:809 #, python-format msgid "" "Attempting a rollback of: %(volumeName)s to original pool " "%(sourcePoolInstanceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:841 #, python-format msgid "_migrate_rollback on : %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:877 #, python-format msgid "_migrate_cleanup on : %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1074 #, python-format msgid "" "The volume: %(volumename)s was not first part of the default storage " "group for FAST policy %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1167 #, python-format msgid "Volume: %(volumeName)s is not currently belonging to any storage group." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1536 #, python-format msgid "" "Volume is masked but not to host %(host)s as expected. Returning empty " "dictionary." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2216 #, python-format msgid "" "Pre check for deletion. Volume: %(volumeName)s is part of a storage " "group. Attempting removal from %(storageGroupInstanceNames)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3092 #, python-format msgid "Volume : %(volumeName)s is not currently belonging to any storage group." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3365 #, python-format msgid "" "The volume: %(volumename)s. was not first part of the default storage " "group for FAST policy %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3798 #, python-format msgid "" "Clone failed on V3. Cleaning up the target volume. Clone name: " "%(cloneName)s " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4012 #, python-format msgid "FAST is enabled. Policy: %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:140 #, python-format msgid "Volume: %(volumeName)s Does not belong to storage group %(defaultSgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:411 msgid "Unable to get storage tiers from tier policy rule." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fc.py:258 #, python-format msgid "Volume %(volume)s is not in any masking view." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:209 #, python-format msgid "" "Volume: %(volumeName)s does not belong to storage group " "%(defaultSgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:522 #, python-format msgid "Volume: %(volumeName)s is already part of storage group %(sgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1123 #: cinder/volume/drivers/emc/emc_vmax_masking.py:1390 #, python-format msgid "Unable to find Masking view: %(view)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1292 #, python-format msgid "" "No storage group found. Performing rollback on Volume: %(volumeName)s To " "return it to the default storage group for FAST policy " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1672 #, python-format msgid "" "Volume %(volumeName)s was not first part of the default storage group for" " the FAST Policy." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1819 #, python-format msgid "Volume %(volumeName)s is belong to %(sgNum)s storage groups." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2239 #, python-format msgid "No target ports found in masking view %(maskingView)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2309 #, python-format msgid "No port group found in masking view %(mv)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2328 #, python-format msgid "No Initiator group found in masking view %(mv)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2528 #, python-format msgid "Deletion of initiator path %(hardwareIdPath)s is failed." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2601 #, python-format msgid "" "Initiator group %(initiatorGroupName)s is associated with masking views " "and can't be deleted. Number of associated masking view is: %(nmv)d." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:709 #, python-format msgid "" "Remaining capacity %(remainingCapacityGb)s GBs is determined from SRP " "pool capacity and not the SLO capacity. Performance may not be what you " "expect." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1225 #, python-format msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1266 #, python-format msgid "" "Group sync name not found for target group %(target)s on " "%(storageSystem)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1721 #, python-format msgid "" "CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, " "ret=%(ret)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1741 msgid "Cannot determine the hardware type." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:707 msgid "san_secondary_ip is configured as the same value as san_ip." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:812 #, python-format msgid "LUN already exists, LUN name %(name)s. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:866 #, python-format msgid "LUN is already deleted, LUN name %(name)s. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:940 #, python-format msgid "LUN %(name)s is already expanded. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:996 #, python-format msgid "Consistency group %(name)s already exists. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1080 #, python-format msgid "CG %(cg_name)s does not exist. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1084 #, python-format msgid "CG %(cg_name)s is deleting. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1106 #, python-format msgid "Cgsnapshot name %(name)s already exists. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1135 #, python-format msgid "Snapshot %(name)s for consistency group does not exist. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1154 #: cinder/volume/drivers/emc/emc_vnx_cli.py:1174 #, python-format msgid "Snapshot %(name)s already exists. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1191 #, python-format msgid "Snapshot %(name)s may deleted already. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1198 #, python-format msgid "Snapshot %(name)s is in use, retry. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1225 #, python-format msgid "Mount point %(name)s already exists. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1252 #, python-format msgid "" "Snapshot %(snapname)s is attached to snapshot mount point %(mpname)s " "already. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1272 #, python-format msgid "The specified Snapshot mount point %s is not currently attached." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1305 #, python-format msgid "" "Migration command may get network timeout. Double check whether migration" " in fact started successfully. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1317 #, python-format msgid "Start migration failed. Message: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1320 #, python-format msgid "Delete temp LUN after migration start failed. LUN: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1466 #, python-format msgid "Storage group %(name)s already exists. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1482 #, python-format msgid "" "Storage group %(name)s doesn't exist, may have already been deleted. " "Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1512 #, python-format msgid "" "Host %(host)s has already disconnected from storage group %(sgname)s. " "Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1549 #, python-format msgid "HLU %(hlu)s has already been removed from %(sgname)s. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1725 msgid "No array serial number returned, set as unknown." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1759 #, python-format msgid "Storage Group %s is not found." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1868 #, python-format msgid "" "Invalid iSCSI port %(sp)s-%(port)s-%(vlan)s found in io_port_list, will " "be ignored." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1937 #, python-format msgid "See unavailable iSCSI target: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2155 msgid "" "destroy_empty_storage_group: True. Empty storage group will be deleted " "after volume is detached." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2188 msgid "force_delete_lun_in_storagegroup=True" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2195 msgid "" "ignore_pool_full_threshold: True. LUN creation will still be forced even " "if the pool full threshold is exceeded." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2215 #, python-format msgid "" "The following specified storage pools do not exist: %(unexist)s. This " "host will only manage the storage pools: %(exist)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2344 msgid "snapcopy metadata is ignored when creating volume." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2387 msgid "Unknown migration rate specified, using [high] as migration rate." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2395 msgid "" "Extra spec key 'storagetype:pool' is obsoleted since driver version " "5.1.0. This key will be ignored." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2432 msgid "" "Both 'storagetype:prvosioning' and 'provisioning:type' are set in the " "extra specs, the value of 'provisioning:type' will be used. The key " "'storagetype:provisioning' may be deprecated in the next release." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2441 msgid "" "Extra spec key 'storagetype:provisioning' may be deprecated in the next " "release. It is recommended to use extra spec key 'provisioning:type' " "instead." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2494 #, python-format msgid "" "LUN corresponding to %s is still in some Storage Groups.Try to bring the " "LUN out of Storage Groups and retry the deletion." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2527 #, python-format msgid "LUN %(name)s is not ready for extension: %(out)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2548 msgid "" "Failed to get target_pool_name and target_array_serial. 'location_info' " "is not in host['capabilities']." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2561 msgid "Error on parsing target_pool_name/target_array_serial." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2575 #, python-format msgid "" "Didn't get the pool information of the host %s. Storage assisted " "Migration is not supported. The host may be using a legacy driver." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2708 #: cinder/volume/drivers/huawei/huawei_driver.py:876 msgid "Storage-assisted migration failed during retype." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2781 #, python-format msgid "Storage Pool '%(pool)s' is '%(state)s'." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2791 #, python-format msgid "" "Maximum number of Pool LUNs, %s, have been created. No more LUN creation " "can be done." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2898 #, python-format msgid "LUN %(name)s is not ready for snapshot: %(out)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3216 #, python-format msgid "LUN with id %(remove_id)s is not present in cg %(cg_name)s, skip it." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3311 #, python-format msgid "Storage Group %s is not found. Create it." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3374 #, python-format msgid "Failed to register %(itor)s to SP%(sp)s port %(portid)s because: %(msg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3493 #, python-format msgid "Failed to extract initiators of %s, so ignore deregistration operation." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3500 #, python-format msgid "Failed to deregister %(itor)s because: %(msg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3761 #, python-format msgid "Storage Group %s is not found. terminate_connection() is unnecessary." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3770 #, python-format msgid "Volume %(vol)s was not in Storage Group %(sg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3792 #, python-format msgid "Failed to destroy Storage Group %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3798 #, python-format msgid "Fail to connect host %(host)s back to storage group %(sg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4169 #, python-format msgid "No replication info from this volume: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4269 #, python-format msgid "" "Delete the temporary cgsnapshot %(name)s failed. This temporary " "cgsnapshot can be deleted manually. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4355 #, python-format msgid "" "The source volume is a legacy volume. Create volume in the pool where the" " source volume %s is created." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4431 #, python-format msgid "CreateSMPTask.revert: delete mount point %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4451 #, python-format msgid "AttachSnapTask.revert: detach mount point %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4489 #, python-format msgid "CreateDestLunTask.revert: delete temp lun %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4558 #, python-format msgid "CreateSnapshotTask.revert: delete temp cgsnapshot %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4563 #, python-format msgid "CreateSnapshotTask.revert: delete temp snapshot %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4585 #, python-format msgid "" "CopySnapshotTask.revert: delete the copied snapshot %(new_name)s of " "%(source_name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4696 #, python-format msgid "MirrorView already created, mirror name %(name)s. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4730 #, python-format msgid "MirrorView %(name)s was already deleted. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4803 #, python-format msgid "Getting MirrorView %(name)s failed. Message: %(msg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4923 #, python-format msgid "%(method)s: destroying mirror view %(name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4956 #, python-format msgid "%(method)s: destroying secondary LUN %(name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4982 #, python-format msgid "%(method)s: removing secondary image from %(name)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:131 msgid "No storage pool name or id was found." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:170 msgid "No protection domain name or id was specified in configuration." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:191 msgid "Verify certificate is not set, using default of False." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:239 msgid "QoS specs are overriding extra_specs." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:567 #, python-format msgid "" "ScaleIO only supports volumes with a granularity of 8 GBs. The new volume" " size is: %d." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:658 #, python-format msgid "Ignoring error in delete volume %s: Volume not found." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:662 msgid "" "Volume does not have provider_id thus does not map to a ScaleIO volume. " "Allowing deletion to proceed." msgstr "" #: cinder/volume/drivers/emc/xtremio.py:155 #, python-format msgid "object %(key)s of type %(typ)s not found, %(err_msg)s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:576 msgid "terminate_connection: lun map not found" msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:226 #, python-format msgid "create_volume, volumename: %(volumename)s, Element Name is in use." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:895 #, python-format msgid "" "update_volume_stats, eternus_pool:%(eternus_pool)s, specified pool is not" " found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1720 #, python-format msgid "" "_map_lun, lun_name: %(volume_uid)s, Initiator: %(initiator)s, target: " "%(target)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1753 #, python-format msgid "" "_map_lun, lun_name: %(volume_uid)s, Initiator: %(initiator)s, ag: %(ag)s," " Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:410 #: cinder/volume/drivers/hitachi/hbsd_common.py:416 #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1342 #, python-format msgid "Failed to restart horcm: %s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_fc.py:181 #, python-format msgid "Failed to add host group: %s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:851 #, python-format msgid "Failed to discard zero page: %s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1320 #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1329 #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1336 #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1360 #, python-format msgid "Failed to create pair: %s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_snm2.py:145 #, python-format msgid "ldev(%(ldev)d) is already mapped (hlun: %(hlu)d)" msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:273 #, python-format msgid "get_evs: %(out)s -- No find for %(fsid)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:308 #, python-format msgid "get_fsid: %(out)s -- No info for %(fslabel)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:728 msgid "terminate_conn: provider location empty." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:441 #, python-format msgid "" "srstatld requires WSAPI version '%(srstatld_version)s' version " "'%(version)s' is installed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:566 #, python-format msgid "Virtual Volume Set '%s' doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:678 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2524 #, python-format msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:779 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1145 #, python-format msgid "Failed to manage virtual volume %(disp)s due to error during retype." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1416 #, python-format msgid "3PAR vlun for volume %(name)s not found on host %(host)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1707 msgid "" "'hpe3par:cpg' is not supported as an extra spec in a volume type. CPG's " "are chosen by the cinder scheduler, as a pool, from the cinder.conf entry" " 'hpe3par_cpg', which can be a list of CPGs." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2136 #, python-format msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2318 #, python-format msgid "Issue occurred in clear_volume_key_value_pair: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3088 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1535 #, python-format msgid "" "The secondary array must have an API version of %(min_ver)s or higher. " "Array '%(target)s' is on %(target_ver)s, therefore it will not be added " "as a valid replication target." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3098 #, python-format msgid "" "'%s' is not a valid replication array. In order to be valid, backend_id, " "replication_mode, hpe3par_api_url, hpe3par_username, hpe3par_password, " "cpg_map, san_ip, san_login, and san_password must be specified. If the " "target is managed, managed_backend_name must be set as well." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_fc.py:125 #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:137 msgid "" "The primary array is not reachable at this time. Since replication is " "enabled, listing replication targets and failing over a volume can still " "be performed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:206 #, python-format msgid "Invalid IP address format '%s'" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:238 #, python-format msgid "" "Found invalid iSCSI IP address(s) in configuration option(s) " "hpe3par_iscsi_ips or iscsi_ip_address '%s.'" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:387 #, python-format msgid "" "iSCSI IP: '%s' was not found in hpe3par_iscsi_ips list defined in " "cinder.conf." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:425 msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:578 msgid "" "Host exists without CHAP credentials set and has iSCSI attachments but " "CHAP is enabled. Updating host with new CHAP credentials." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:610 msgid "Host has no CHAP key, but CHAP is enabled." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:614 msgid "No host or VLUNs exist. Generating new CHAP key." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:643 msgid "Non-iSCSI VLUN detected." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:647 msgid "No VLUN contained CHAP credentials. Generating new CHAP key." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:344 #, python-format msgid "" "HPELeftHand API is version %(current)s. A minimum version of %(min)s is " "needed for manage/unmanage support." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:819 msgid "The 'hplh' prefix is deprecated. Use 'hpelh' instead." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:859 #, python-format msgid "CHAP secret exists for host %s but CHAP is disabled" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:862 #, python-format msgid "CHAP is enabled, but server secret not configured on server %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:936 #, python-format msgid "%s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1436 msgid "" "The primary array is currently offline, remote copy has been " "automatically paused." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1545 #, python-format msgid "" "'%s' is not a valid replication array. In order to be valid, backend_id, " "hpelefthand_api_url, hpelefthand_username, hpelefthand_password, and " "hpelefthand_clustername, must be specified. If the target is managed, " "managed_backend_name must be set as well." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1658 #, python-format msgid "" "There was a problem when trying to determine if the volume can be failed-" "back: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1763 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1784 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1806 #, python-format msgid "" "There was no extra_spec value for %(spec_name)s, so the default value of " "%(def_val)s will be used. To overwrite this, set this value in the volume" " type extra_specs." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:710 #, python-format msgid "Failure deleting the snapshot %(snapshot_id)s of volume %(volume_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:841 msgid "Can't find snapshot on the array." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1200 msgid "No license for SplitMirror." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1226 msgid "No license for migration." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1368 #, python-format msgid "Rename lun %(lun_id)s fails when unmanaging volume %(volume)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1463 #, python-format msgid "Can't find snapshot on the array: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1474 #, python-format msgid "" "Failed to rename snapshot %(snapshot_id)s, snapshot name on array is " "%(snapshot_name)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1735 #: cinder/volume/drivers/huawei/huawei_driver.py:1975 #, python-format msgid "LUN is not in lungroup. LUN ID: %(lun_id)s. Lungroup id: %(lungroup_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1982 msgid "Can't find lun on the array." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:227 #, python-format msgid "Lun is not in lungroup. Lun id: %(lun_id)s, lungroup id: %(lungroup_id)s" msgstr "" #: cinder/volume/drivers/huawei/replication.py:197 #, python-format msgid "Split replication exception: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:346 #, python-format msgid "Get remote array wwn failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:354 #, python-format msgid "Get remote devices failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:383 msgid "Remote device is unavailable." msgstr "" #: cinder/volume/drivers/huawei/replication.py:561 #: cinder/volume/drivers/huawei/replication.py:611 #, python-format msgid "No pair id in volume %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:568 #: cinder/volume/drivers/huawei/replication.py:618 #, python-format msgid "No remote lun id in volume %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:147 #, python-format msgid "Login failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1265 #: cinder/volume/drivers/huawei/rest_client.py:1277 msgid "Can't find target iqn from rest." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:317 #, python-format msgid "warning: Tried to delete vdisk %s but it does not exist." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:354 #, python-format msgid "Failed to run command: %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:758 msgid "_remove_device: invalid properties or device." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:811 #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to any host " "found." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:817 #, python-format msgid "" "_unmap_vdisk_from_host: Multiple mappings of volume %(vdisk_name)s found," " no host specified." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:157 #, python-format msgid "Host %(host)s was not found on backend storage." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:188 #, python-format msgid "_get_vdisk_map_properties: Did not find a preferred node for vdisk %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:570 #, python-format msgid "Failed to run lsguicapability. Exception: %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:858 #, python-format msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:864 #, python-format msgid "" "unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " "host specified." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:876 #, python-format msgid "" "unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host)s " "found." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2013 msgid "" "Unable to use san_ip to create SSHPool. Now attempting to use " "storwize_san_secondary_ip to create SSHPool." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2020 msgid "" "Unable to create SSHPool using san_ip and not able to use " "storwize_san_secondary_ip since it is not configured." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2036 #, python-format msgid "" "Unable to execute SSH command with storwize_san_secondary_ip. Attempting " "to switch IP back to san_ip %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2046 #, python-format msgid "Unable to execute SSH command. Attempting to switch IP to %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2054 msgid "" "Unable to execute SSH command. Not able to use storwize_san_secondary_ip " "since it is not configured." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2429 #, python-format msgid "Volume %s does not exist." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:169 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:177 #, python-format msgid "initialize_connection: Did not find a preferred node for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:132 msgid "CHAP secret exists for host but CHAP is disabled." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:90 msgid "The MCS Channel is grouped." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:94 msgid "No mapping." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:102 msgid "IQN already existed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:107 msgid "IQN has been used to create map." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:108 msgid "No such host alias name." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:844 #, python-format msgid "Volume %(volume_id)s already deleted." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:855 #, python-format msgid "Volume still %(status)s Cannot delete volume." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1160 msgid "Failed to get Raid Snapshot ID and did not store in snapshot." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1173 #, python-format msgid "Snapshot still %(status)s Cannot delete snapshot." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1633 msgid "Failed to get target pool id." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1870 #, python-format msgid "Volume %(volume_id)s cannot be retyped during attachment." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1877 #, python-format msgid "Volume %(volume_id)s cannot be retyped because it has snapshot." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1900 #, python-format msgid "The provisioning: %(provisioning)s is not valid." msgstr "" #: cinder/volume/drivers/netapp/utils.py:70 msgid "" "It is not the recommended way to use drivers by NetApp. Please use " "NetAppDriver to achieve the functionality." msgstr "" #: cinder/volume/drivers/netapp/utils.py:145 #, python-format msgid "Extra spec %(old)s is obsolete. Use %(new)s instead." msgstr "" #: cinder/volume/drivers/netapp/utils.py:150 #, python-format msgid "Extra spec %(old)s is deprecated. Use %(new)s instead." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:143 #, python-format msgid "Could not determine root volume name on %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:338 msgid "" "The option 'netapp_volume_list' is deprecated and will be removed in the " "future releases. Please use the option 'netapp_pool_name_search_pattern' " "instead." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:384 msgid "Volume refresh job already running. Returning..." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:392 #, python-format msgid "Error refreshing volume info. Message: %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:246 #, python-format msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:371 #, python-format msgid "" "LUN misalignment may occur for current initiator group %(ig_nm)s) with " "host OS type %(ig_os)s. Please configure initiator group manually " "according to the type of the host OS." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:325 #, python-format msgid "" "Exception while registering image %(image_id)s in cache. Exception: " "%(exc)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:394 #, python-format msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:605 msgid "Discover file retries exhausted." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:663 msgid "Unexpected exception while short listing used share." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:773 #, python-format msgid "Exception moving file %(src)s. Message - %(e)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:244 msgid "No shares found hence skipping ssc refresh." msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:524 msgid "ssc job in progress. Returning... " msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:536 msgid "refresh stale ssc job in progress. Returning... " msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:641 #, python-format msgid "" "The user does not have access or sufficient privileges to use all netapp " "APIs. The following extra_specs will fail or be ignored: %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_7mode.py:179 #, python-format msgid "Error finding LUNs for volume %s. Verify volume exists." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:138 #, python-format msgid "Error mapping LUN. Code :%(code)s, Message: %(message)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:151 #, python-format msgid "Error unmapping LUN. Code :%(code)s, Message: %(message)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:397 #, python-format msgid "Failed to invoke ems. Message : %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:486 #, python-format msgid "Rename failure in cleanup of cDOT QOS policy group %(name)s: %(ex)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:219 #, python-format msgid "" "Production use of \"%(backend)s\" backend requires the Cinder controller " "to have multipathing properly set up and the configuration option " "\"%(mpflag)s\" to be set to \"True\"." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:664 #, python-format msgid "Snapshot volume creation failed for snapshot %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:708 #, python-format msgid "Failure deleting job %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:711 #, python-format msgid "Volume copy job for src vol %s not found." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:728 #, python-format msgid "Failure deleting temp snapshot %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:737 #, python-format msgid "Volume %s already deleted." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:972 #, python-format msgid "Snapshot %s already deleted." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1033 #: cinder/volume/drivers/netapp/eseries/library.py:1062 #, python-format msgid "Unable to remove snapshot group - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1346 #, python-format msgid "Unable to update host type for host with label %(l)s. %(e)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1351 #, python-format msgid "Message - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1649 msgid "" "The option 'netapp_storage_pools' is deprecated and will be removed in " "the future releases. Please use the option " "'netapp_pool_name_search_pattern' instead." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1687 #, python-format msgid "No storage pool found with available capacity %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1948 msgid "Consistency group already deleted." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1955 msgid "Unable to remove CG from the deletion map." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2036 msgid "Returning as clean tmp vol job already running." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:256 #, python-format msgid "Cannot delete snapshot %(origin)s: %(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:284 #: cinder/volume/drivers/nexenta/nfs.py:425 #: cinder/volume/drivers/nexenta/ns5/nfs.py:331 #, python-format msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:344 #: cinder/volume/drivers/nexenta/nfs.py:167 #, python-format msgid "Remote NexentaStor appliance at %s should be SSH-bound." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:364 #: cinder/volume/drivers/nexenta/nfs.py:187 #, python-format msgid "" "Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " "%(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:372 #: cinder/volume/drivers/nexenta/nfs.py:195 #, python-format msgid "" "Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" " %(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:378 #: cinder/volume/drivers/nexenta/nfs.py:201 #, python-format msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:388 #: cinder/volume/drivers/nexenta/nfs.py:211 #, python-format msgid "" "Cannot delete temporary destination snapshot %(dst)s on NexentaStor " "Appliance: %(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:424 #: cinder/volume/drivers/nexenta/nfs.py:269 #, python-format msgid "Cannot retype from %(src_backend)s to %(dst_backend)s." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:140 #, python-format msgid "Volume status must be 'available' or 'retyping'. Current volume status: %s" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:145 msgid "Unsupported host. No capabilities found" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:361 #: cinder/volume/drivers/nexenta/ns5/nfs.py:188 #, python-format msgid "Cannot destroy created folder: %(vol)s/%(folder)s" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:391 #, python-format msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:650 #, python-format msgid "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:701 #, python-format msgid "Mount attempt %(attempt)d failed: %(error)s. Retrying mount ..." msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:232 #, python-format msgid "" "Got error trying to delete volume %(volume)s, assuming it is already " "gone: %(exc)s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:293 #: cinder/volume/drivers/nexenta/ns5/nfs.py:265 #, python-format msgid "Could not delete snapshot %s - it has dependencies" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:351 #, python-format msgid "Failed to delete zfs snapshot %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/nfs.py:304 #, python-format msgid "Cannot destroy cloned filesystem: %(vol)s/%(filesystem)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:153 #, python-format msgid "Cannot get volume status %(exc)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1254 #, python-format msgid "Flexvisor failed to delete volume %(id)s from the group %(vgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1259 #, python-format msgid "" "Flexvisor failed to delete volume %(id)s from group %(vgid)s due to " "%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1428 #, python-format msgid "Failed to query pool %(id)s status %(ret)d." msgstr "" #: cinder/volume/drivers/vmware/datastore.py:227 #, python-format msgid "Unable to fetch datastores connected to host %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:530 #, python-format msgid "Trying to boot from an empty volume: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:708 #, python-format msgid "Error occurred while deleting temporary disk: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:840 #, python-format msgid "Error occurred while deleting descriptor: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:873 #, python-format msgid "Error occurred while deleting backing: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1217 #, python-format msgid "Volume: %s is in use, can't retype." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1286 #, python-format msgid "" "There are no datastores matching new requirements; can't retype volume: " "%s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1340 #, python-format msgid "" "Changing backing: %(backing)s name from %(new_name)s to %(old_name)s " "failed." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1390 #, python-format msgid "" "Unable to extend volume: %(vol)s to size: %(size)s on current datastore " "due to insufficient space." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1597 #, python-format msgid "" "Cannot undo volume rename; old name was %(old_name)s and new name is " "%(new_name)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:189 msgid "zfssa_initiator_config not found. Using deprecated configuration options." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:203 #, python-format msgid "" "zfssa_initiator: %(ini)s wont be used on zfssa_initiator_group= " "%(inigrp)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1014 #, python-format msgid "Volume %s exists but can't be deleted" msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:998 #, python-format msgid "Property %s already exists." msgstr "" #: cinder/volume/flows/api/create_volume.py:254 #, python-format msgid "" "Failed to retrieve volume_type from image metadata. '%(img_vol_type)s' " "doesn't match any volume types." msgstr "" #: cinder/volume/flows/api/create_volume.py:330 #, python-format msgid "" "Availability zone '%(s_az)s' not found, falling back to " "'%(s_fallback_az)s'." msgstr "" #: cinder/volume/flows/api/create_volume.py:392 msgid "Volume type will be changed to be the same as the source volume." msgstr "" #: cinder/volume/flows/api/create_volume.py:635 #, python-format msgid "" "Quota %(s_name)s exceeded for %(s_pid)s, tried to create volume " "(%(d_consumed)d volume(s) already consumed)." msgstr "" #: cinder/volume/flows/manager/create_volume.py:685 #, python-format msgid "" "Failed to create volume from image-volume cache, will fall back to " "default behavior. Error: %(exception)s" msgstr "" #: cinder/volume/targets/cxt.py:131 cinder/volume/targets/tgt.py:166 #, python-format msgid "Persistence file already exists for volume, found file at: %s" msgstr "" #: cinder/volume/targets/cxt.py:194 cinder/volume/targets/tgt.py:255 #, python-format msgid "Volume path %s does not exist, nothing to remove." msgstr "" #: cinder/volume/targets/iet.py:191 #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target. %(conf)s does not exist." msgstr "" #: cinder/volume/targets/iser.py:28 msgid "" "ISERTgtAdm is deprecated, you should now just use LVMVolumeDriver and " "specify iscsi_helper for the target driver you wish to use. In order to " "enable iser, please set iscsi_protocol=iser with lioadm or tgtadm target " "helpers." msgstr "" #: cinder/volume/targets/lio.py:86 #, python-format msgid "" "Failed to save iscsi LIO configuration when modifying volume id: " "%(vol_id)s." msgstr "" #: cinder/volume/targets/lio.py:97 msgid "Failed to restore iscsi LIO configuration." msgstr "" #: cinder/volume/targets/tgt.py:84 msgid "Attempting recreate of backing lun..." msgstr "" #: cinder/volume/targets/tgt.py:192 #, python-format msgid "Could not create target because it already exists for volume: %s" msgstr "" #: cinder/volume/targets/tgt.py:277 #, python-format msgid "" "Failed target removal because target or ACL's couldn't be found for iqn: " "%s." msgstr "" #: cinder/volume/targets/tgt.py:296 msgid "Silent failure of target removal detected, retry...." msgstr "" #: cinder/zonemanager/utils.py:77 msgid "Driver didn't return connection info, can't add zone." msgstr "" #: cinder/zonemanager/utils.py:101 msgid "Driver didn't return connection info from terminate_connection call." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:317 #, python-format msgid "Zoning policy not recognized: %(policy)s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:378 #, python-format msgid "Error running SSH command: %s" msgstr "" cinder-8.0.0/cinder/locale/fr/0000775000567000056710000000000012701406543017240 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/fr/LC_MESSAGES/0000775000567000056710000000000012701406543021025 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/fr/LC_MESSAGES/cinder.po0000664000567000056710000131203512701406257022640 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # EVEILLARD , 2013 # François Bureau, 2013 # FIRST AUTHOR , 2011 # Jonathan Dupart , 2014 # Maxime Coquerel , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Martine Marin , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev29\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-29 01:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-28 08:03+0000\n" "Last-Translator: Martine Marin \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "Version d'OpenStack Cinder : %(version)s\n" #, python-format msgid " but size is now %d" msgstr " mais la taille est maintenant %d" #, python-format msgid " but size is now %d." msgstr " mais la taille est maintenant %d." msgid " or " msgstr " ou " #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s n'est pas défini." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing ne peut pas gérer un volume connecté à des hôtes. " "Déconnectez ce volume des hôtes existants avant de procéder à l'importation" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "Résultat : %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s : %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s : Droit refusé." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s : Echec avec une sortie CLI inattendue.\n" " Commande : %(cmd)s\n" " stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "Code de statut : %(_status)s\n" "Corps : %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName : %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s : création de NetworkPortal : vérifiez que le port %(port)d à " "l'adresse IP %(ip)s n'est pas utilisé par un autre service." #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s doit être long d'au moins %(min_length)s caractères." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s contient plus de %(max_length)s caractères." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s : échec de la sauvegarde %(bck_id)s, volume %(vol_id)s. L'objet de " "sauvegarde possède un mode inattendu. Sauvegardes d'image ou de fichier " "prises en charge, mode réel : %(vol_mode)s." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "Le service %(service)s n'a pas le statut %(status)s sur le dispositif de " "stockage %(host)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s doit être <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s doit être >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "La valeur %(worker_name)s de %(workers)d n'est pas valide. Elle doit être " "supérieure à 0." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "%s \"data\" ne figure pas dans le résultat." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s n'est pas accessible. Vérifiez que GPFS est actif et que le système de " "fichiers est monté." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s ne peut pas être redimensionné par une opération de clonage car il ne " "contient aucun bloc." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s ne peut pas être redimensionné par une opération de clonage car il est " "hébergé sur un volume compressé" #, python-format msgid "%s configuration option is not set." msgstr "L'option de configuration %s n'a pas été définie." #, python-format msgid "%s does not exist." msgstr "%s n'existe pas." #, python-format msgid "%s is not a directory." msgstr "%s n'est pas un répertoire." #, python-format msgid "%s is not a string or unicode" msgstr "%s n'est pas une chaîne ou unicode" #, python-format msgid "%s is not installed" msgstr "%s n'est pas installé" #, python-format msgid "%s is not installed." msgstr "%s n'est pas installé." #, python-format msgid "%s is not set" msgstr "%s n'est pas défini" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s n'est pas défini et est obligatoire pour que l'unité de réplication soit " "valide." #, python-format msgid "%s is not set." msgstr "%s n'est pas défini." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s doit être une image raw ou qcow2 valide." #, python-format msgid "%s must be an absolute path." msgstr "%s doit être un chemin d'accès absolu." #, python-format msgid "%s must be an integer." msgstr "%s doit être un entier." #, python-format msgid "%s not set in cinder.conf" msgstr "%s non défini dans cinder.conf" #, python-format msgid "%s not set." msgstr "%s non défini." #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s' n'est pas valide pour flashsystem_connection_protocol dans le " "fichier de configuration. La ou les valeurs valides sont %(enabled)s." msgid "'active' must be present when writing snap_info." msgstr "'active' doit présent lors de l'écriture de snap_info." msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' doit être spécifié" msgid "'qemu-img info' parsing failed." msgstr "Echec de l'analyse syntaxique de 'qemu-img info'." msgid "'status' must be specified." msgstr "'status' doit être spécifié." msgid "'volume_id' must be specified" msgstr "'volume_id' doit être spécifié." msgid "'{}' object has no attribute '{}'" msgstr "L'objet '{}' n'a pas d'attribut '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(Commande : %(cmd)s) (Code retour : %(exit_code)s) (Sortie standard : " "%(stdout)s) (Erreur standard : %(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "" "Un numéro d'unité logique hôte (HLUN) est introuvable. (unité logique : " "%(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "Une demande simultanée, et peut-être contradictoire, a été effectuée." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "Aucun numéro d'unité logique libre (HLUN) n'a été trouvé. Ajoutez un autre " "groupe d'hôtes. (unité logique : %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" "Un groupe d'hôtes n'a pas pu être ajouté (port : %(port)s, nom : %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "Un groupe d'hôtes n'a pas pu être supprimé. (port : %(port)s, gid : %(gid)s, " "nom : %(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "Un groupe d'hôtes n'est pas valide. (groupe d'hôtes : %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "" "Une paire n'a pas pu être supprimée. (P-VOL : %(pvol)s, S-VOL : %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "Une paire n'a pas pu être créée. Le nombre maximum de paires est dépassé. " "(méthode de copie : %(copy_method)s, P-VOL : %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "Un paramètre n'est pas valide. (%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "Une valeur de paramètre n'est pas valide. (%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "Un pool est introuvable. (ID pool : %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Un statut d'instantané n'est pas valide. (statut : %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "Une cible secondaire valide DOIT être spécifiée pour effectuer le " "basculement." msgid "A volume ID or share was not specified." msgstr "Un ID de volume ou un partage n'a pas été spécifié." #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "Un statut de volume n'est pas valide. (statut : %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "Echec de l'API %(name)s avec la chaîne d'erreur %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "La chaîne de version d'API %(version)s a un format non valide. Elle doit " "être au format NumMajeur.NumMineur." msgid "API key is missing for CloudByte driver." msgstr "Clé d'API manquante pour le pilote CloudByte." #, python-format msgid "API response: %(response)s" msgstr "Réponse de l'API : %(response)s" #, python-format msgid "API response: %s" msgstr "Réponse de l'API : %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "" "La version %(version)s de l'API n'est pas prise en charge avec cette méthode." msgid "API version could not be determined." msgstr "La version d'API n'a pas pu être déterminée." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "Vous êtes sur le point de supprimer des projets enfants avec quota non zéro. " "Ceci ne devrait pas être effectué. " msgid "Access list not available for public volume types." msgstr "Liste d'accès indisponible pour les types de volume publics." msgid "Activate or deactivate QoS error." msgstr "Erreur lors de l'activation ou de la désactivation QoS." msgid "Activate snapshot error." msgstr "Erreur lors de l'activation de l'instantané." msgid "Add FC port to host error." msgstr "Erreur lors de l'ajout du port FC à l'hôte." msgid "Add fc initiator to array error." msgstr "Erreur lors de l'ajout d'initiateur fc à la matrice." msgid "Add initiator to array error." msgstr "Erreur lors de l'ajout de l'initiateur à la matrice." msgid "Add lun to cache error." msgstr "Erreur lors de l'ajout de numéro d'unité logique au cache." msgid "Add lun to partition error." msgstr "Erreur lors de l'ajout de numéro d'unité logique à la partition." msgid "Add mapping view error." msgstr "Erreur lors de l'ajout de la vue de mappage." msgid "Add new host error." msgstr "Erreur lors de l'ajout d'un nouvel hôte." msgid "Add port to port group error." msgstr "Erreur lors de l'ajout de port à un groupe de ports." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "Les pools de stockage spécifiés pour être gérés n'existent pas tous. " "Vérifiez votre configuration. Pools non existants : %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "Une demande de version d'API doit être comparée à un objet VersionedMethod." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "Une erreur s'est produite dans SheepdogDriver. (Motif : %(reason)s)" msgid "An error has occurred during backup operation" msgstr "Une erreur est survenue lors de la sauvegarde" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "" "Une erreur s'est produite lors de la tentative de modification de " "l'instantané '%s'." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "Une erreur s'est produite lors de la recherche du volume \"%s\"." #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "Une erreur s'est produite pendant l'opération LUNcopy. Nom LUNcopy : " "%(luncopyname)s. Statut LUNcopy : %(luncopystatus)s. Etat LUNcopy : " "%(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "Une erreur s'est produite lors de la lecture du volume \"%s\"." #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "Une erreur s'est produite lors de l'écriture dans le volume \"%s\"." #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" "Un utilisateur CHAP iSCSI n'a pas pu être ajouté. (nom d'utilisateur : " "%(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" "Un utilisateur CHAP iSCSI n'a pas pu être supprimé. (nom d'utilisateur : " "%(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "Une cible iSCSI n'a pas pu être ajoutée. (port : %(port)s, alias : " "%(alias)s, raison : %(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "Une cible iSCSI n'a pas pu être supprimée. (port : %(port)s, tno : %(tno)s, " "alias : %(alias)s)" msgid "An unknown exception occurred." msgstr "Une exception inconnue s'est produite." msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "Un utilisateur avec portée de jeton limitée à un sous-projet n'est pas " "autorisé à visualiser le quota de ses parents." msgid "Append port group description error." msgstr "Erreur lors de l'ajout de la description du groupe de ports." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "Echec de l'application de zones et de cfgs au commutateur (code d'erreur=" "%(err_code)s, message d'erreur=%(err_msg)s.)" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "La matrice n'existe pas ou est hors ligne. Statut actuel de la matrice : %s." msgid "Associate host to hostgroup error." msgstr "Erreur lors de l'association de l'hôte à hostgroup." msgid "Associate host to mapping view error." msgstr "Erreur lors de l'association de l'hôte à la vue de mappage." msgid "Associate initiator to host error." msgstr "Erreur lors de l'association de l'initiateur à l'hôte." msgid "Associate lun to QoS error." msgstr "Erreur lors de l'association de numéro d'unité logique (lun) à QoS." msgid "Associate lun to lungroup error." msgstr "Erreur lors de l'association du numéro d'unité logique à lungroup." msgid "Associate lungroup to mapping view error." msgstr "Erreur lors de l'association de lungroup à la vue de mappage." msgid "Associate portgroup to mapping view error." msgstr "Erreur lors de l'association de portgroup à la vue de mappage." msgid "At least one valid iSCSI IP address must be set." msgstr "Au moins une adresse IP iSCSI valide doit être définie." #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "Tentative de transfert de %s avec une clé d'auth non valide." #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" "Les informations du groupe d'authentification [%s] sont introuvables dans le " "stockage CloudByte." msgid "Auth user details not found in CloudByte storage." msgstr "" "Informations d'utilisateur de l'authentification introuvables dans le " "stockage CloudByte." msgid "Authentication error" msgstr "Erreur d'authentification" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "L'authentification a échoué, vérifiez les données d'identification du " "commutateur, code d'erreur : %s." msgid "Authorization error" msgstr "Erreur d'autorisation" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "La zone de disponibilité '%(s_az)s' n'est pas valide." msgid "Available categories:" msgstr "Catégories disponibles :" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "Les spécifications QoS de back-end ne sont pas prises en charge sur cette " "famille de stockage et version ONTAP." #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "Le système dorsal n'existe pas (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "Le back-end a déjà été basculé. Impossible de le rebasculer." #, python-format msgid "Backend reports: %(message)s" msgstr "Rapports de back-end : %(message)s" msgid "Backend reports: item already exists" msgstr "Rapports de back-end : l'élément existe déjà" msgid "Backend reports: item not found" msgstr "Rapports de back-end : élément introuvable" msgid "Backend server not NaServer." msgstr "Le serveur de back-end n'est pas de type NaServer." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" "Le délai de nouvelle tentative du service de back-end est atteint : " "%(timeout)s s" msgid "Backend storage did not configure fiber channel target." msgstr "Le stockage de back-end n'a pas configuré la cible de canal optique." msgid "Backing up an in-use volume must use the force flag." msgstr "" "La sauvegarde d'un volume en cours d'utilisation doit utiliser l'indicateur " "force." #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "Sauvegarde %(backup_id)s introuvable." msgid "Backup RBD operation failed" msgstr "Echec de l'opération RBD de sauvegarde" msgid "Backup already exists in database." msgstr "La sauvegarde existe déjà dans la base de données." #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "Le pilote de sauvegarde a signalé une erreur : %(message)s" msgid "Backup id required" msgstr "ID de sauvegarde requis" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" "La sauvegarde n'est pas prise en charge pour les volumes GlusterFS avec des " "instantanés." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" "La sauvegarde est seulement prise en charge pour les volumes SOFS sans " "fichier de sauvegarde." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" "La sauvegarde est seulement prise en charge pour les volumes GlusterFS au " "format raw." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" "La sauvegarde est seulement prise en charge pour les volumes SOFS au format " "raw." msgid "Backup operation of an encrypted volume failed." msgstr "Echec de l'opération de sauvegarde d'un volume chiffré." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "Le service de sauvegarde %(configured_service)s ne prend pas en charge la " "vérification. L'ID de sauvegarde %(id)s n'est pas vérifié. Vérification " "ignorée." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "Le service de sauvegarde %(service)s ne prend pas en charge la vérification. " "L'ID de sauvegarde %(id)s n'est pas vérifié. Réinitialisation ignorée." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "La sauvegarde comporte %s instantanés alors qu'elle ne doit en contenir qu'un" msgid "Backup status must be available" msgstr "L'état de sauvegarde doit être disponible" #, python-format msgid "Backup status must be available and not %s." msgstr "L'état de sauvegarde doit être disponible mais pas %s." msgid "Backup status must be available or error" msgstr "L'état de sauvegarde doit être Disponible ou Erreur" msgid "Backup to be restored has invalid size" msgstr "La sauvegarde à restaurer a une taille non valide" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "Mauvaise ligne d'état renvoyée : %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "Clé(s) incorrecte(s) dans le quota défini : %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "Réponse erronée ou inattendue de l'API back-end du volume de stockage : " "%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" "Format de projet incorrect : le projet n'est pas au format approprié (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Demande incorrecte envoyée au cluster Datera : arguments non valides : " "%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Réponse erronée de l'API Datera" msgid "Bad response from SolidFire API" msgstr "Réponse erronée de l'API SolidFire" #, python-format msgid "Bad response from XMS, %s" msgstr "Réponse incorrecte de XMS, %s" msgid "Binary" msgstr "binaire" msgid "Blank components" msgstr "Composants vides" msgid "Blockbridge API authentication scheme (token or password)" msgstr "" "Méthode d'authentification de l'API Blockbridge (jeton ou mot de passe)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "" "Mot de passe de l'API Blockbridge (pour méthode d'authentification 'mot de " "passe')" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Jeton de l'API Blockbridge (pour méthode d'authentification 'jeton')" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "" "Utilisateur de l'API Blockbridge (pour méthode d'authentification 'mot de " "passe')" msgid "Blockbridge api host not configured" msgstr "L'hôte de l'API Blockbridge n'a pas été configuré" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" "Blockbridge a été configuré avec une méthode d'authentification non valide " "'%(auth_scheme)s'" msgid "Blockbridge default pool does not exist" msgstr "Le pool Blockbridge par défaut n'existe pas" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" "Le mot de passe Blockbridge n'a pas été configuré (requis pour la méthode " "d'authentification 'mot de passe')" msgid "Blockbridge pools not configured" msgstr "Les pools Blockbridge n'ont pas été configurés" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" "Le jeton Blockbridge n'a pas été configuré (requis pour la méthode " "d'authentification 'jeton')" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" "L'utilisateur Blockbridge n'a pas été configuré (requis pour la méthode " "d'authentification 'mot de passe')" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "" "Erreur d'interface CLI de segmentation Brocade Fibre Channel : %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Erreur HTTP de segmentation Brocade Fibre Channel : %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "Le secret CHAP doit contenir 12 à 16 octets." #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "Sortie exception CLI :\n" " commande : %(cmd)s\n" " stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Sortie exception CLI :\n" " commande : %(cmd)s\n" " stdout : %(out)s\n" " stderr : %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E Le mappage de disque virtuel à hôte n'a pas été créé car le " "disque virtuel est déjà mappé à un hôte.\n" "\"" msgid "CONCERTO version is not supported" msgstr "La version CONCERTO n'est pas prise en charge" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) n'existe pas dans la matrice" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "Le nom du cache est None. Définissez smartcache:cachename dans la clé." #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "Le volume cache %(cache_vol)s n'a pas d'instantané %(cache_snap)s." #, python-format msgid "Cache volume %s does not have required properties" msgstr "Le volume de cache %s n'a pas les propriétés requises" msgid "Call returned a None object" msgstr "L'appel a renvoyé un objet de type None " msgid "Can not add FC port to host." msgstr "Impossible d'ajouter le port FC à l'hôte." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "" "Impossible de déterminer l'ID de cache à partir du nom de cache %(name)s." #, python-format msgid "Can not find partition id by name %(name)s." msgstr "Impossible de déterminer l'ID de partition à partir du nom %(name)s." #, python-format msgid "Can not get pool info. pool: %s" msgstr "Impossible d'obtenir les informations de pool. Pool : %s" #, python-format msgid "Can not translate %s to integer." msgstr "Impossible de transformer %s en entier." #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "Impossible d'accéder à 'scality_sofs_config' : %s" msgid "Can't attach snapshot." msgstr "Impossible de connecter l'instantané." msgid "Can't decode backup record." msgstr "Impossible de décoder l'enregistrement de sauvegarde." #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "Extension impossible du volume de réplication, volume : %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "Numéro d'unité logique (LUN) introuvable sur la matrice, vérifiez la valeur " "de source-name ou source-id." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "Nom de cache %(name)s introuvable sur la matrice." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "" "ID de numéro d'unité logique (lun) introuvable à partir de la base de " "données, volume : %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "Informations de numéro d'unité logique (lun) introuvables sur la matrice. " "Volume : %(id)s, nom lun : %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "Nom de partition introuvable sur la matrice. Nom de la partition : %(name)s." #, python-format msgid "Can't find service: %s" msgstr "Service introuvable : %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "Instantané introuvable sur la matrice, vérifiez la valeur de source-name ou " "source-id." msgid "Can't find the same host id from arrays." msgstr "Impossible de trouver le même ID d'hôte (host id) dans les matrices." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" "Impossible d'obtenir l'ID de volume à partir de l'instantané, instantané : " "%(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "Impossible d'obtenir l'ID du volume. Nom du volume : %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %(lun_id)s dans " "Cinder. Type LUN non concordant." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans HyperMetroPair." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans une tâche de copie LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans un groupe LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans un miroir LUN." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans SplitMirror." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " "numéro existe déjà dans une tâche de migration." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder . Ce " "numéro existe déjà dans une tâche de réplication à distance." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Le " "statut de ce numéro n'est pas normal." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Impossible d'importer l'instantané %s dans Cinder. L'instantané n'appartient " "pas au volume." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Impossible d'importer l'instantané %s dans Cinder. L'instantané est exposé " "dans l'initiateur." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Impossible d'importer l'instantané %s dans Cinder. Le statut de l'instantané " "n'est pas normal ou le statut d'exécution n'est pas connecté (online)." #, python-format msgid "Can't open config file: %s" msgstr "Impossible d'ouvrir le fichier de configuration : %s" msgid "Can't parse backup record." msgstr "Impossible d'analyser l'enregistrement de sauvegarde." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car il ne possède aucun type de volume." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car il fait déjà partie du groupe de cohérence %(orig_group)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car le volume est introuvable." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car le volume n'existe pas." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car le volume est dans un état non valide : %(status)s. Les " "états valides sont : %(valid)s." #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " "%(group_id)s car le type de volume %(volume_type)s n'est pas pris en charge " "par le groupe." #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "Impossible de rattacher le volume %s qui est déjà rattaché ; multiattach est " "désactivé via l'option de configuration 'netapp_enable_multiattach'." msgid "Cannot change VF context in the session." msgstr "Impossible de modifier le contexte VF dans la session." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "Impossible de modifier le contexte VF, VF indiqué non disponible dans la " "liste VF gérable %(vf_list)s." msgid "Cannot connect to ECOM server." msgstr "Connexion au serveur ECOM impossible." #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "Impossible de créer un clone d'une taille de %(vol_size)s depuis un volume " "d'une taille de %(src_vol_size)s" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "Impossible de créer le groupe de cohérence %(group)s car l'instantané " "%(snap)s n'est pas dans un état valide. Les états valides sont : %(valid)s." #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "Impossible de créer le groupe de cohérence %(group)s car le volume source " "%(source_vol)s n'est pas à un état valide. Etats valides : %(valid)s." #, python-format msgid "Cannot create directory %s." msgstr "Impossible de créer le répertoire %s." msgid "Cannot create encryption specs. Volume type in use." msgstr "" "Impossible de créer des spécifications de chiffrement. Type de volume en " "service." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "Impossible de créer une image du format de disque : %s. Seul le format de " "disque vmdk est accepté." #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "Impossible de créer une vue de masquage : %(maskingViewName)s. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "Impossible de créer plus de %(req)s volumes sur la matrice ESeries lorsque " "'netapp_enable_multiattach' est défini à true." #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" "Impossible de créer ou de trouver un groupe de stockage dénommé " "%(sgGroupName)s." #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "Impossible de créer un volume d'une taille de %(vol_size)s depuis un " "instantané d'une taille de %(snap_size)s" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "" "Vous ne pouvez pas créer un volume avec la taille %s : cette taille n'est " "pas un multiple de 8 Go." #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "Impossible de créer volume_type avec le nom %(name)s et les spécifications " "%(extra_specs)s" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "" "Impossible de supprimer le numéro d'unité logique %s alors que des " "instantanés existent." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "Impossible de supprimer le volume cache %(cachevol_name)s. Il a été mis à " "jour le %(updated_at)s et contient actuellement %(numclones)d instances de " "volume." #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "Impossible de supprimer le volume cache : %(cachevol_name)s. Ila été mis à " "jour le %(updated_at)s et contient actuellement %(numclones)s instances de " "volume." msgid "Cannot delete encryption specs. Volume type in use." msgstr "" "Impossible de supprimer des spécifications de chiffrement. Type de volume en " "service." msgid "Cannot determine storage pool settings." msgstr "Impossible de déterminer les paramètres du pool de stockage." msgid "Cannot execute /sbin/mount.sofs" msgstr "Impossible d'exécuter /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "Groupe CG %s introuvable." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "Service de configuration du contrôleur introuvable pour le système de " "stockage %(storage_system)s." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" "Service de réplication introuvable pour créer le volume pour l'instantané %s." #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" "Impossible de trouver le service de réplication pour supprimer l'instantané " "%s." #, python-format msgid "Cannot find Replication service on system %s." msgstr "Service Replication introuvable sur le système %s." #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" "Impossible de trouver le volume : %(id)s. Opération d'arrêt de la gestion " "(unmanage). Sortie..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "Volume %(volumename)s introuvable. Etendez l'opération. Sortie..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "Nombre d'unités introuvable pour le volume %(volumeName)s." msgid "Cannot find migration task." msgstr "Tâche de migration introuvable." #, python-format msgid "Cannot find replication service on system %s." msgstr "Impossible de trouver le service de réplication sur le système %s." #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" "Impossible de trouver l'instance de groupe de cohérence source. " "consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "" "Impossible d'obtenir mcs_id à partir de l'ID de canal : %(channel_id)s." msgid "Cannot get necessary pool or storage system information." msgstr "" "Impossible d'obtenir les informations de pool ou de système de stockage " "nécessaires." #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "Impossible d'obtenir ou de créer un groupe de stockage %(sgGroupName)s pour " "le volume %(volumeName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "" "Impossible d'obtenir ou de créer le groupe de demandeurs : %(igGroupName)s. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "Impossible d'obtenir le groupe de ports : %(pgGroupName)s. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "Impossible d'obtenir le groupe de stockage %(sgGroupName)s de la vue de " "masquage %(maskingViewInstanceName)s. " #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Impossible d'obtenir la plage de tailles prises en charge pour %(sps)s Code " "retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "Impossible d'obtenir le groupe de stockage par défaut pour la règle FAST : " "%(fastPolicyName)s" msgid "Cannot get the portgroup from the masking view." msgstr "Impossible d'obtenir portgroup à partir de la vue de masquage." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" "Impossible de monter Scality SOFS, consultez le fichier syslog pour voir les " "erreurs" msgid "Cannot ping DRBDmanage backend" msgstr "" "Impossible d'exécuter une commande ping sur le système dorsal DRBDmanage" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "Impossible de placer le volume %(id)s sur %(host)s" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "Vous ne pouvez pas spécifier à la fois 'cgsnapshot_id' et 'source_cgid' pour " "créer un groupe de cohérence %(name)s depuis la source." msgid "Cannot register resource" msgstr "Impossible d'enregistrer la ressource" msgid "Cannot register resources" msgstr "Impossible d'enregistrer les ressources" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "Impossible de supprimer le volume %(volume_id)s du groupe de cohérence " "%(group_id)s car il ne se trouve pas dans le groupe." #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "Impossible de supprimer le volume %(volume_id)s du groupe de cohérence " "%(group_id)s car le volume est dans un état non valide : %(status)s. Les " "états valides sont : %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "Confirmation impossible de HPE3PARDriver vers %s." msgid "Cannot retype from one 3PAR array to another." msgstr "Confirmation impossible de matrice 3PAR en une autre." msgid "Cannot retype to a CPG in a different domain." msgstr "Confirmation impossible vers un CPG dans un autre domaine." msgid "Cannot retype to a snap CPG in a different domain." msgstr "" "Confirmation impossible vers un CPG d'instantané dans un autre domaine." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "Impossible d'exécuter la commande vgc-cluster. Vérifiez que le logiciel est " "installé et que les autorisations sont définies correctement." msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" "Impossible de définir à la fois hitachi_serial_number et hitachi_unit_name." msgid "Cannot specify both protection domain name and protection domain id." msgstr "" "Vous ne pouvez pas spécifier à la fois un nom et un ID de domaine de " "protection." msgid "Cannot specify both storage pool name and storage pool id." msgstr "" "Vous ne pouvez pas spécifier à la fois le nom et l'ID du pool de stockage." #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "Impossible de mettre à jour le groupe de cohérence %(group_id)s car aucun " "nom, description, add_volumes ou remove_volumes valide n'ont été fournis." msgid "Cannot update encryption specs. Volume type in use." msgstr "" "Impossible de mettre à jour des spécifications de chiffrement. Type de " "volume en service." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "Impossible de mettre à jour le type de volume %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "Impossible de vérifier l'existence de l'objet %(instanceName)s." msgid "Cascade option is not supported." msgstr "L'option Cascade n'est pas prise en charge." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "L'instantané de groupe de cohérence %(cgsnapshot_id)s est introuvable." msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost est vide. Aucun groupe de cohérence ne sera créé." msgid "Cgsnapshot status must be available or error" msgstr "" "L'état d'instantané de groupe de cohérence doit être Disponible ou Erreur" msgid "Change hostlun id error." msgstr "Erreur lors du changement d'ID hostlun." msgid "Change lun priority error." msgstr "Erreur lors de la modification de priorité de numéro d'unité logique." msgid "Change lun smarttier policy error." msgstr "" "Erreur lors de la modification de stratégie smarttier de numéro d'unité " "logique." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "La modification définira une utilisation inférieure à 0 pour les ressources " "suivantes : %(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" "Vérifiez les autorisations d'accès accordées pour le partage ZFS affecté à " "ce pilote." msgid "Check hostgroup associate error." msgstr "Erreur lors de la vérification de l'associé hostgroup." msgid "Check initiator added to array error." msgstr "" "Erreur lors de la vérification de l'ajout de l'initiateur à la matrice." msgid "Check initiator associated to host error." msgstr "" "Erreur lors de la vérification de l'association de l'initiateur à l'hôte." msgid "Check lungroup associate error." msgstr "Erreur lors de la vérification de l'associé lungroup." msgid "Check portgroup associate error." msgstr "Erreur lors de la vérification de l'associé portgroup." msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "Vérifiez l'état du service HTTP. Assurez-vous également que le numéro de " "port HTTPS est identique à celui indiqué dans cinder.conf." msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "La taille de bloc n'est pas un multiple de la taille de bloc pour la " "création du hachage." #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "" "Erreur d'interface CLI de segmentation Cisco Fibre Channel : %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "La fonction Clone n'est pas autorisée sur %(storageSystem)s." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "Type de clone '%(clone_type)s' non valide, les valeurs admises sont : " "'%(full_clone)s' et '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "Le cluster n'est pas formaté. Vous devriez probablement effectuer \"dog " "cluster format\"." #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Echec du pilote Coho Data Cinder : %(message)s" msgid "Coho rpc port is not configured" msgstr "Le port Coho rpc n'est pas configuré" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "" "La commande %(cmd)s a été bloquée dans l'interface CLI et a été annulée" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition : dépassement du délai de %s " #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" "CommandLineHelper._wait_for_condition : dépassement du délai d'attente %s." msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "L'optimiseur de compression n'est pas installé. Impossible de créer le " "volume compressé." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "Cluster de calcul %(cluster)s introuvable." msgid "Condition has no field." msgstr "La condition n'a aucun champ correspondant." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "Configuration 'max_over_subscription_ratio' non valide. Doit être > 0 : %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "Erreur de configuration : dell_sc_ssn non défini." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "Le fichier de configuration %(configurationFile)s n'existe pas." msgid "Configuration is not found." msgstr "Configuration introuvable." #, python-format msgid "Configuration value %s is not set." msgstr "La valeur de configuration %s n'est pas définie." msgid "Configured host type is not supported." msgstr "Le type d'hôte configuré n'est pas pris en charge." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "Conflit entre les spécifications QoS dans le type de volume %s : lorsque la " "spécification QoS est associée au type de volume, un \"netapp:" "qos_policy_group\" suranné n'est pas autorisédans les spécifications " "supplémentaires du type de volume." #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Connexion à glance échouée : %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Connexion à swift échouée : %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "Le connecteur ne fournit pas : %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "Le connecteur n'a pas les informations requises : %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "Le groupe de cohérence %s contient encore des volumes. L'indicateur force " "est requis pour pouvoir le supprimer." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "" "Le groupe de cohérence %s contient encore des instantanés de groupe de " "cohérence dépendants." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "" "Le groupe de cohérence est vide. Aucun instantané de groupe de cohérence ne " "sera créé." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "L'état de groupe de cohérence doit être Disponible ou Erreur, mais l'état en " "cours est : %s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" "L'état du groupe de cohérence doit être disponible, mais l'état actuel est : " "%s." #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "Groupe de cohérence %(consistencygroup_id)s introuvable." msgid "Container" msgstr "Conteneur" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "Le format de conteneur %s n'est pas pris en charge par le pilote VMDK, seul " "'bare' est pris en charge." msgid "Container size smaller than required file size." msgstr "Taille du conteneur inférieure à la taille de fichier requise." msgid "Content type not supported." msgstr "Type de contenu non pris en charge." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" "Service de configuration de contrôleur introuvable sur %(storageSystemName)s." #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "L'adresse IP du contrôleur '%(host)s' n'a pas pu être résolue : %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "Converti au format %(f1)s, mais le format est maintenant %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" "Converti au format %(vol_format)s, mais le format est maintenant " "%(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "Converti au format brut, mais le format est maintenant %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "Converti au format brut, mais le format est maintenant %s." msgid "Coordinator uninitialized." msgstr "Coordinateur non initialisé." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "Echec de la tâche de copie de volume : convert_to_base_volume: id=%(id)s, " "status=%(status)s." #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "La tâche de copie du volume a échoué : create_cloned_volume id=%(id)s, " "status=%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "Copie de métadonnées depuis %(src_type)s %(src_id)s vers %(vol_id)s" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "Impossible de déterminer le noeud final Keystone à utiliser. Cela peut être " "défini dans le catalogue de service ou à l'aide de l'option de configuration " "cinder.conf 'backup_swift_auth_url'." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "Impossible de déterminer le noeud final Swift à utiliser. Cela peut être " "défini dans le catalogue de service ou à l'aide de l'option de configuration " "cinder.conf 'backup_swift_url'." msgid "Could not find DISCO wsdl file." msgstr "Fichier wsdl DISCO introuvable." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "ID de cluster GPFS introuvable : %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "Périphérique du système de fichiers GPFS introuvable : %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" "Impossible de trouver un hôte pour le volume %(volume_id)s de type " "%(type_id)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Configuration introuvable dans %(path)s" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "Exportation iSCSI introuvable pour le volume %(volumeName)s." #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "Exportation iSCSI trouvée pour le volume %s" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "Cible iSCSI introuvable pour le volume : %(volume_id)s." #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "Impossible de trouver une clé dans la sortie de la commande %(cmd)s: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "Impossible de trouver les paramètres %(param)s" #, python-format msgid "Could not find target %s" msgstr "Cible %s introuvable" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" "Impossible de trouver le volume parent de l'instantané '%s' sur la matrice." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "Impossible de trouver l'instantané unique %(snap)s sur le volume %(vol)s." msgid "Could not get system name." msgstr "Impossible d'obtenir le nom du système." #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Echec du chargement de l'app de collage '%(name)s' depuis %(path)s" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "Impossible de lire %s. Nouvelle exécution avec sudo" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "Impossible de lire les informations pour l'instantané %(name)s. Code : " "%(code)s. Raison : %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" "Impossible de restaurer le fichier de configuration %(file_path)s : %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "Impossible d'enregistrer la configuration dans %(file_path)s: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "Impossible de démarrer l'instantané du groupe de cohérence %s." #, python-format msgid "Counter %s not found" msgstr "Compteur %s non trouvé" msgid "Create QoS policy error." msgstr "Erreur lors de la création de la stratégie QoS." #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Création de la sauvegarde interrompue, état de la sauvegarde attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Création de la sauvegarde interrompue, état du volume attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." msgid "Create consistency group failed." msgstr "La création du groupe de cohérence a échoué." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "La création de volumes chiffrés du type %(type)s depuis l'image %(image)s " "n'est pas prise en charge." msgid "Create export for volume failed." msgstr "La création d'une exportation pour le volume a échoué." msgid "Create hostgroup error." msgstr "Erreur lors de la création de hostgroup." #, python-format msgid "Create hypermetro error. %s." msgstr "Erreur lors de la création d'hypermetro. %s." msgid "Create lun error." msgstr "Erreur de création du numéro d'unité logique (lun)." msgid "Create lun migration error." msgstr "Erreur lors de la création de migration de numéro d'unité logique." msgid "Create luncopy error." msgstr "Erreur lors de la création luncopy." msgid "Create lungroup error." msgstr "Erreur lors de la création de lungroup." msgid "Create manager volume flow failed." msgstr "Echec de la création du flux de volume du gestionnaire. " msgid "Create port group error." msgstr "Erreur lors de la création du groupe de ports." msgid "Create replication error." msgstr "Erreur lors de la création de la réplication." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "" "La création d'une paire de systèmes pour la réplication a échoué. Erreur : " "%s." msgid "Create snapshot error." msgstr "Erreur lors de la création de l'instantané." #, python-format msgid "Create volume error. Because %s." msgstr "Erreur lors de la création du volume. Motif : %s." msgid "Create volume failed." msgstr "Echec de la création du volume." msgid "Creating a consistency group from a source is not currently supported." msgstr "" "La création d'un groupe de cohérence à partir d'une source n'est pas prise " "en charge actuellement." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "Echec de création et d'activation de l'ensemble de zones : (Ensemble de " "zones=%(cfg_name)s erreur=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "" "Echec de création et d'activation de l'ensemble de zones : (Ensemble de " "zones=%(zoneset)s erreur=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "" "Création d'utilisations pour la période comprise entre %(begin_period)s et " "%(end_period)s" msgid "Current host isn't part of HGST domain." msgstr "L'hôte actuel ne fait pas partie du domaine HGST." #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "Hôte actuel non valide pour le volume %(id)s de type %(type)s, migration non " "autorisée" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "L'hôte actuellement mappé pour le volume %(vol)s est dans un groupe d'hôtes " "non pris en charge avec %(group)s." msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "OBSOLETE : Déploiement v1 de l'API Cinder." msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "OBSOLETE : Déploiement v2 de l'API Cinder." #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "Erreur du pilote DRBDmanage : la clé attendue \"%s\" ne figure pas dans la " "réponse, version DRBDmanage incorrecte ?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "Erreur de configuration du pilote DRBDmanage : certaines bibliothèques " "requises (dbus, drbdmanage.*) sont introuvables." #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage attendait une ressource (\"%(res)s\"), mais a reçu %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "Dépassement du délai d'attente de DRBDmanage lors de la création du volume " "après la restauration d'instantané ; ressource \"%(res)s\", volume \"%(vol)s" "\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "Dépassement du délai d'attente de DRBDmanage lors de la création de " "l'instantané ; ressource \"%(res)s\", instantané \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "Dépassement du délai d'attente de DRBDmanage lors de la création du volume ; " "ressource \"%(res)s\", volume \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "Dépassement du délai d'attente de DRBDmanage pour obtenir la taille du " "volume ; ID volume \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "La version d'API Data ONTAP n'a pas pu être déterminée." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "Data ONTAP opérant sous 7-Mode ne prend pas en charge les groupes de " "stratégies QoS." msgid "Database schema downgrade is not allowed." msgstr "Rétrograder le schéma de base de données n'est pas autorisé." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "Dataset %s n'est pas partagé dans Nexenta Store Appliance" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Groupe Dataset %s introuvable sur Nexenta SA" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup est un type de mise à disposition valide, mais nécessite que WSAPI " "version '%(dedup_version)s', version '%(version)s' soit installé." msgid "Dedup luns cannot be extended" msgstr "Les numéros d'unité logique dédoublonnés ne peuvent pas être étendus" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "L'optimiseur de dédoublonnage n'est pas installé. Impossible de créer un " "volume dédupliqué" msgid "Default pool name if unspecified." msgstr "Nom de pool par défaut s'il n'a pas été spécifié." #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "Le quota par défaut de la ressource %(res)s est défini par l'indicateur de " "quota par défaut : quota_%(res)s. Il est désormais obsolète. Utilisez la " "classe de quota par défaut pour le quota par défaut." msgid "Default volume type can not be found." msgstr "Le type de volume par défaut est introuvable." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "" "Définit l'ensemble de pools exposés et leurs chaînes de requête dorsale " "associées" msgid "Delete LUNcopy error." msgstr "Erreur lors de la suppression de LUNcopy." msgid "Delete QoS policy error." msgstr "Erreur lors de la suppression de la stratégie QoS." msgid "Delete associated lun from lungroup error." msgstr "" "Erreur lors de la suppression du numéro d'unité logique associé de lungroup." #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Suppression de la sauvegarde interrompue, le service de sauvegarde " "actuellement configuré [%(configured_service)s] ne correspond pas au service " "de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." msgid "Delete consistency group failed." msgstr "La suppression du groupe de cohérence a échoué." msgid "Delete hostgroup error." msgstr "Erreur lors de la suppression de hostgroup." msgid "Delete hostgroup from mapping view error." msgstr "Erreur lors de la suppression de hostgroup de la vue de mappage." msgid "Delete lun error." msgstr "Erreur lors de la suppression du numéro d'unité logique." msgid "Delete lun migration error." msgstr "Erreur lors de la suppression de migration de numéro d'unité logique." msgid "Delete lungroup error." msgstr "Erreur lors de la suppression de lungroup." msgid "Delete lungroup from mapping view error." msgstr "Erreur lors de la suppression de lungroup de la vue de mappage." msgid "Delete mapping view error." msgstr "Erreur lors de la suppression de la vue de mappage." msgid "Delete port group error." msgstr "Erreur lors de la suppression du groupe de ports." msgid "Delete portgroup from mapping view error." msgstr "Erreur lors de la suppression de portgroup de la vue de mappage." msgid "Delete snapshot error." msgstr "Erreur lors de la suppression de l'instantané." #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "" "Suppression de l'instantané du volume non pris en charge à l'état : %s." #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup interrompu, état de la sauvegarde attendu %(expected_status)s, " "mais état %(actual_status)s obtenu." msgid "Deleting volume from database and skipping rpc." msgstr "Suppression du volume de la base de données et saut de RPC." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "Echec de la suppression des zones : (commande=%(cmd)s erreur=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" "L'API Dell 2.1 ou ultérieure est requise pour prise en charge de groupe de " "cohérence" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "Erreur de configuration du pilote Cinder de Dell, la réplication n'est pas " "prise en charge avec la connexion directe." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Erreur de configuration du pilote Cinder de Dell, replication_device %s " "introuvable" msgid "Deploy v3 of the Cinder API." msgstr "Déploiement v3 de l'API Cinder." msgid "Describe-resource is admin only functionality" msgstr "Describe-resource est une fonctionnalité admin uniquement" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "La destination a pour migration_status %(stat)s, %(exp)s est attendu." msgid "Destination host must be different than the current host." msgstr "L'hôte de destination doit être différent de l'hôte en cours." msgid "Destination volume not mid-migration." msgstr "Le volume de destination n'est pas en cours de migration." msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "La déconnexion du volume a échoué : plusieurs volumes connectés ont été " "indiqués, mais sans attachment_id attachment_id." msgid "Detach volume from instance and then try again." msgstr "Déconnectez le volume de l'instance puis réessayez." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "Plusieurs volumes portant le nom %(vol_name)s détectés" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "Colonne attendue introuvable dans %(fun)s : %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "La clé attendue, %(key)s, est introuvable dans %(fun)s : %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "" "La raison de désactivation contient des caractères invalides ou est trop " "longue" #, python-format msgid "Domain with name %s wasn't found." msgstr "Le domaine nommé %s est introuvable." #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "Rétrogradation de cluster GPFS détectée. Fonction de clonage GPFS non " "activée au niveau du démon de cluster %(cur)s - doit être au moins au niveau " "%(min)s." #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "" "L'initialisation de la connexion par le pilote a échoué (erreur : %(err)s)." msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "Le pilote n'est pas en mesure d'effectuer une nouvelle saisie car le volume " "(LUN {}) contient un instantané qu'il est interdit de migrer." msgid "Driver must implement initialize_connection" msgstr "Le pilote doit implémenter initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "Le pilote a correctement décodé les données de la sauvegarde importée, mais " "des zones sont manquantes (%s)." #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "La version %(current_version)s de l'API de proxy lE-series ne prend pas en " "charge la panoplie complète de spécifications SSC supplémentaires. La " "version du proxy doit être au moins %(min_version)s." #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "Exception CLI du pilote EMC VNX Cinder : %(cmd)s (code retour : %(rc)s) " "(sortie : %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "Exception SPUnavailableException du pilote EMC VNX Cinder : %(cmd)s (code " "retour : %(rc)s) (sortie : %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword doivent avoir des " "valeurs valides." #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "Soit 'cgsnapshot_id', soit 'source_cgid' doit être soumis pour créer le " "groupe de cohérence %(name)s depuis la source." #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO : %(slo)s ou la charge de travail %(workload)s n'est pas valide. " "Examinez l'énoncé de l'erreur pour voir si les valeurs sont valides." msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "Soit hitachi_serial_number, soit hitachi_unit_name est requis." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "" "Service de composition d'élément introuvable sur %(storageSystemName)s." msgid "Enables QoS." msgstr "Active QoS." msgid "Enables compression." msgstr "Active la compression. " msgid "Enables replication." msgstr "Active la réplication. " msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "Assurez-vous que configfs est monté sur /sys/kernel/config." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur d'ajout de l'initiateur %(initiator)s à l'élément groupInitiatorGroup " "%(initiatorgroup)s. Code retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur d'ajout du groupe cible : %(targetgroup)s avec IQN : %(iqn)s Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "Erreur lors du rattachement du volume %(vol)s." #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur de clonage de l'instantané : %(snapshot)s sur le volume : %(lun)s du " "pool : %(pool)s Projet : %(project)s Projet clone : %(clone_proj)s Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erreur de création du volume cloné : %(cloneName)s Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de création d'un volume cloné : Volume : %(cloneName)s Volume :" "%(sourceName)s. Code retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de création du groupe : %(groupName)s. Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erreur de création de la vue de masquage : %(groupName)s. Code retour : " "%(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de création du volume : %(volumeName)s. Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de création du volume : %(volumename)s. Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur CreateGroupReplica : source : %(source)s cible : %(target)s. Code " "retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur de création de l'initiateur %(initiator)s sur l'alias %(alias)s. Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur de création du projet : %(project)s sur le pool : %(pool)s Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de la création de la propriété : %(property)s. Type : %(type)s. " "Description : %(description)s. Code retour : %(ret.status)d Message : " "%(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur lors de la création du partage %(name)s. Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur de création de l'instantané : %(snapshot)s sur le volume %(lun)s vers " "le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de la création de l'instantané %(snapshot)s sur le partage " "%(share)s vers le pool %(pool)s Projet : %(project)s Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Erreur de création de la cible : %(alias)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur de création du groupe cible : %(targetgroup)s avec IQN : %(iqn)s Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Erreur de création du volume : %(lun)s Taille : %(size)s Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de création d'un volume composite. Code retour : %(rc)lu. Erreur : " "%(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur lors de la création de l'action de réplication sur : pool : %(pool)s " "Projet : %(proj)s volume : %(vol)s pour la cible : %(tgt)s et le pool : " "%(tgt_pool)s Code retour : %(ret.status)d Message : %(ret.data)s ." msgid "Error Creating unbound volume on an Extend operation." msgstr "Erreur de création de volume non lié sur une opération Extend." msgid "Error Creating unbound volume." msgstr "Erreur lors de la création d'un volume non lié." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de suppression du volume : %(volumeName)s. Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "Erreur lors de la suppression du groupe : %(storageGroupName)s. Code " "retour : %(rc)lu. Erreur : %(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "Erreur lors de la suppression du groupe d'initiateurs : " "%(initiatorGroupName)s. Code retour : %(rc)lu. Erreur : %(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de la suppression de l'instantané %(snapshot)s sur le partage " "%(share)s vers le pool %(pool)s Projet : %(project)s Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur de suppression de l'instantané : %(snapshot)s sur le volume %(lun)s " "vers le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "Erreur lors de la suppression du volume %(lun)s du pool : %(pool)s, projet : " "%(project)s. Code retour : %(ret.status)d, Message : %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de la suppression du projet %(project)s sur le pool : %(pool)s. " "Code retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "Erreur lors de la suppression de l'action de réplication : %(id)s. Code " "retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur d'extension de volume : %(volumeName)s. Code retour : %(rc)lu. " "Erreur : %(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur d'obtention des initiateurs. InitiatorGroup : %(initiatorgroup)s. " "Code retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "Erreur lors de l'obtention des statistiques du pool : pool : %(pool)s, code " "retour : %(status)d, message : %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de l'obtention des statistiques du projet : Pool : %(pool)s " "Projet: %(project)s Code retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur d'obtention du partage %(share)s sur le pool %(pool)s. Projet: " "%(project)s Code retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur d'obtention de l'instantané %(snapshot)s sur le volume %(lun)s vers " "le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "Erreur d'obtention de la cible : %(alias)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur d'obtention du volume : %(lun)s sur le pool %(pool)s Projet : " "%(project)s Code retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Erreur de migration du volume d'un pool vers un autre. Code retour : " "%(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "Erreur de modification de la vue de masquage : %(groupName)s. Code retour : " "%(rc)lu. Erreur : %(error)s." #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" "Erreur de propriété du pool : le pool %(pool)s n'est pas détenu par %(host)s." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur de définition des propriétés : %(props)s sur le volume : %(lun)s du " "pool : %(pool)s Projet : %(project)s Code retour : %(ret.status)d Message : " "%(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur de fin de la session de migration. Code retour : %(rc)lu. Erreur : " "%(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur de vérification du demandeur : %(iqn)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur de vérification du pool : %(pool)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur de vérification du projet : %(project)s sur le pool : %(pool)s Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur lors de la vérification du service %(service)s. Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur de vérification de la cible : %(alias)s Code retour : %(ret.status)d " "Message : %(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "Erreur lors de la vérification du partage %(share)s sur le projet " "%(project)s et le pool %(pool)s. Code retour : %(ret.status)d Message : " "%(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "Erreur d'ajout du volume : %(volumeName)s avec le chemin d'instance : " "%(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "Erreur d'ajout du demandeur au groupe : %(groupName)s. Code retour : " "%(rc)lu. Erreur : %(error)s." #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "Erreur d'ajout du volume vers le volume composite. Erreur : %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "Erreur d'ajout du volume %(volumename)s au volume de base cible." #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "Erreur d'association du groupe de stockage : %(storageGroupName)s. A la " "règle FAST : %(fastPolicyName)s avec la description d'erreur : %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "Erreur lors du rattachement du volume %s. Il se peut que la limite des " "cibles ait été atteinte. " #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Erreur de rupture de la relation clonée : Nom de synchronisation : " "%(syncName)s Code retour : %(rc)lu. Erreur : %(error)s." msgid "Error connecting to ceph cluster." msgstr "Erreur lors de la connexion au cluster ceph." #, python-format msgid "Error connecting via ssh: %s" msgstr "Erreur lors de la connexion via SSH : %s" #, python-format msgid "Error creating volume: %s." msgstr "Erreur lors de la création du volume : %s." msgid "Error deleting replay profile." msgstr "Erreur lors de la suppression du profil de relecture." #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "Erreur lors de la suppression du volume %(ssn)s : %(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "Erreur lors de la suppression du volume %(vol)s : %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "Erreur lors de l'analyse syntaxique de l'évaluateur : %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erreur lors de l'édition du partage %(share)s sur le pool %(pool)s. Code " "retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "Erreur lors de l'activation d'iSER pour NetworkPortal : assurez-vous que " "RDMA est pris en charge sur votre port iSCSI %(port)d à l'adresse IP %(ip)s." #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "" "Erreur survenue durant le nettoyage d'un rattachement ayant échoué : %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "" "Erreur lors de l'exécution de l'API CloudByte [%(cmd)s]. Erreur : %(err)s." msgid "Error executing EQL command" msgstr "Erreur d'exécution de la commande EQL" #, python-format msgid "Error executing command via ssh: %s" msgstr "Erreur d'exécution de la commande via SSH : %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "Erreur lors de l'extension du volume %(vol)s : %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "Erreur lors de l'extension du volume : %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "Erreur lors de la recherche de %(name)s." #, python-format msgid "Error finding %s." msgstr "Erreur lors de la recherche de %s." #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur lors de l'obtention de ReplicationSettingData. Code retour : %(rc)lu. " "Erreur : %(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erreur lors de l'obtention des détails de version du dispositif. Code " "retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "" "Erreur d'obtention de l'ID de domaine à partir du nom %(name)s : %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "" "Erreur d'obtention de l'ID de domaine à partir du nom %(name)s : %(id)s." msgid "Error getting initiator groups." msgstr "Erreur d'obtention des groupes d'initiateurs." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "Erreur d'obtention de l'ID de pool à partir du nom %(pool)s : %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "" "Erreur d'obtention de l'ID de pool à partir du nom %(pool_name)s : " "%(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erreur lors de l'obtention de l'action de réplication : %(id)s. Code " "retour : %(ret.status)d Message : %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erreur lors de l'obtention des détails de la source de réplication. Code " "retour : %(ret.status)d Message : %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "Erreur lors de l'obtention des détails de la cible de réplication. Code " "retour : %(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur lors de l'obtention de la version : service : %(svc)s. Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "Erreur dans l'opération [%(operation)s] pour le volume [%(cb_volume)s] dans " "le stockage CloudByte : [%(cb_error)s], code d'erreur : [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "Erreur dans la réponse de l'API SolidFire : data = %(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "" "Erreur lors de l'opération space-create pour %(space)s avec une taille de " "%(size)d Go" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" "Erreur dans space-extend pour le volume %(space)s avec %(size)d Go " "additionnels" #, python-format msgid "Error managing volume: %s." msgstr "Erreur lors de la gestion du volume : %s." #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "Erreur de mappage du volume %(vol)s. %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "Erreur lors de la modification de synchronisation de la réplique : %(sv)s " "opération : %(operation)s. Code retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "Erreur lors de la modification du service %(service)s. Code retour : " "%(ret.status)d Message : %(ret.data)s." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur lors du déplacement du volume %(vol)s du projet source %(src)s vers " "le projet cible %(tgt)s. Code retour : %(ret.status)d Message : " "%(ret.data)s." msgid "Error not a KeyError." msgstr "Erreur autre qu'une erreur KeyError." msgid "Error not a TypeError." msgstr "Erreur autre qu'une erreur TypeError." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "" "Une erreur est survenue lors de la création de l'instantané de groupe de " "cohérence %s." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "" "Une erreur est survenue lors de la suppression de l'instantané de groupe de " "cohérence %s." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "" "Une erreur est survenue lors de la mise à jour du groupe de cohérence %s." #, python-format msgid "Error parsing config file: %s" msgstr "Erreur lors de l'analyse syntaxique du fichier de configuration : %s" msgid "Error promoting secondary volume to primary" msgstr "Erreur de promotion du volume secondaire en volume primaire" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "Erreur de suppression du volume %(vol)s. %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "" "Erreur lors de l'attribution d'un nouveau nom au volume %(vol)s : %(err)s." #, python-format msgid "Error response: %s" msgstr "Réponse d'erreur : %s" msgid "Error retrieving volume size" msgstr "Erreur lors de l'obtention de la taille du volume" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur lors de l'envoi de la mise à jour de la réplication pour l'ID " "d'action : %(id)s. Code retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "Erreur lors de l'envoi de la mise à jour de la réplication. Erreur " "renvoyée : %(err)s. Action : %(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur lors de la définition de l'héritage de réplication à %(set)s pour le " "volume : %(vol)s Projet %(project)s. Code retour : %(ret.status)d Message : " "%(ret.data)s." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "Erreur lors de la scission du package %(package)s de la source : %(src)s. " "Code retour : %(ret.status)d Message : %(ret.data)s ." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" "Erreur de suppression de la liaison du volume %(vol)s du pool. %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "Erreur lors de la vérification de la taille du clone sur le clone du " "volume : %(clone)s, Taille : %(size)d onSnapshot : %(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "Erreur lors de l'authentification avec le commutateur : %s." #, python-format msgid "Error while changing VF context %s." msgstr "Erreur lors de la modification du contexte VF %s." #, python-format msgid "Error while checking the firmware version %s." msgstr "Erreur lors de la vérification de la version de microprogramme %s." #, python-format msgid "Error while checking transaction status: %s" msgstr "Erreur de contrôle de l'état de transaction : %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "" "Erreur lors de la détermination de la disponibilité de VF pour la gestion : " "%s." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "Erreur lors de la connexion du commutateur %(switch_id)s avec le protocole " "%(protocol)s. Erreur : %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "Erreur lors de la création du jeton d'authentification : %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "Erreur lors de la création de l'instantané : [status] %(stat)s - [result] " "%(res)s." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" "Erreur lors de la création du volume : [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "Erreur lors de la suppression de l'instantané : [status] %(stat)s - [result] " "%(res)s" #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" "Erreur lors de la suppression du volume : [status] %(stat)s - [result] " "%(res)s." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" "Erreur lors de l'extension du volume [status] %(stat)s - [result] %(res)s." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "" "Erreur lors de l'obtention des détails de %(op)s, code renvoyé : %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "Erreur d'obtention de données via SSH : (commande=%(cmd)s erreur=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "Erreur lors de l'obtention des informations disco [%s]." #, python-format msgid "Error while getting nvp value: %s." msgstr "Erreur lors de l'obtention de la valeur nvp : %s." #, python-format msgid "Error while getting session information %s." msgstr "Erreur lors de l'obtention des informations de session %s." #, python-format msgid "Error while parsing the data: %s." msgstr "Erreur lors de l'analyse syntaxique des données : %s." #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "Erreur lors de l'interrogation de la page %(url)s sur le commutateur, " "raison : %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "Erreur lors de la suppression de zones et cfgs dans la chaîne de zone : " "%(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "Erreur lors de la demande de l'API %(service)s." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "Erreur d'exécution de l'interface CLI de segmentation : (commande=%(cmd)s " "erreur=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "Erreur lors de la mise à jour des nouvelles zones et cfgs dans la chaîne de " "zone. Erreur : %(description)s." msgid "Error writing field to database" msgstr "Erreur lors de l'écriture du champ dans la base de données" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "Erreur [%(stat)s - %(res)s] lors de l'obtention de l'ID du volume." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "Erreur [%(stat)s - %(res)s] lors de la restauration de l'instantané " "[%(snap_id)s] dans le volume [%(vol)s]." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "Erreur [status] %(stat)s - [result] %(res)s] lors de l'obtention de l'ID du " "volume." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "Tentatives de planification max %(max_attempts)d pour le volume %(volume_id)s" msgid "Exceeded the limit of snapshots per volume" msgstr "Nombre limite d'instantanés par volume dépassé" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "Exception lors de l'ajout des métadonnées de volume au volume cible " "%(volumename)s." #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "Exception lors de la création d'une réplique de l'élément. Nom de clone : " "%(cloneName)s Nom source : %(sourceName)s Spécifications supplémentaires : " "%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "Exception dans _select_ds_for_volume : %s." #, python-format msgid "Exception while forming the zone string: %s." msgstr "Exception lors de la formation de la chaîne de zone : %s." #, python-format msgid "Exception: %s" msgstr "Exception : %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID attendu mais %(uuid)s reçu." #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "Un seul noeud précis nommé \"%s\" est attendu" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "Un entier est attendu pour node_count, svcinfo lsiogrp a renvoyé : %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "Aucune sortie n'était attendue de la commande CLI %(cmd)s, %(out)s a été " "renvoyé." #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "Un disque virtuel unique était attendu de lsvdisk lors du filtrage par " "vdisk_UID. %(count)s ont été renvoyés." #, python-format msgid "Expected volume size was %d" msgstr "La taille du volume attendue était %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Exportation de la sauvegarde interrompue, état de la sauvegarde attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Exportation de l'enregistrement interrompue, le service de sauvegarde " "actuellement configuré [%(configured_service)s] ne correspond pas au service " "de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." msgid "Extend volume error." msgstr "Erreur lors de l'extension du volume." msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "" "L'extension de volume n'est prise en charge pour ce pilote que si aucun " "instantané n'est défini." msgid "Extend volume not implemented" msgstr "Extension du volume non implémentée" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "L'optimiseur FAST VP n'est pas installé. Impossible de définir la règle de " "hiérarchisation pour le volume" msgid "FAST is not supported on this array." msgstr "FAST n'est pas pris en charge sur cette matrice." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "" "FC est le protocole mais les noms WWPN ne sont pas fournis par OpenStack." #, python-format msgid "Faield to unassign %(volume)s" msgstr "Echec de la libération de %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "Echec de la création du volume cache %(volume)s. Erreur : %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Echec de l'ajout de la connexion pour fabric=%(fabric)s : Erreur : %(err)s" msgid "Failed cgsnapshot" msgstr "Echec de l'instantané de groupe de cohérence" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "Echec de la création de l'instantané pour le groupe : %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" "Echec de la création d'un instantané pour le volume %(volname)s : " "%(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "" "Echec de l'obtention de la zone active définie à partir de l'ensemble de " "noeuds (fabric) %s." #, python-format msgid "Failed getting details for pool %s." msgstr "Echec d'obtention des informations sur le pool %s." #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" "Echec du retrait de la connexion pour fabric=%(fabric)s : Erreur : %(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "Echec de l'extension du volume %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "Echec de connexion à 3PAR (%(url)s) en raison de %(err)s" msgid "Failed to access active zoning configuration." msgstr "Impossible d'accéder à la configuration de segmentation active." #, python-format msgid "Failed to access zoneset status:%s" msgstr "Impossible d'accéder au statut de l'ensemble de zones : %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "Echec de l'acquisition d'un verrouillage des ressources. (série : " "%(serial)s, inst : %(inst)s, ret : %(ret)s, stderr : %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "Echec de l'ajout de %(vol)s dans %(sg)s après %(retries)s tentatives." msgid "Failed to add the logical device." msgstr "Echec de l'ajout de l'unité logique." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "L'ajout du volume %(volumeName)s au groupe de cohérence %(cgName)s n'a pas " "abouti. Retour : %(rc)lu. Erreur : %(error)s." msgid "Failed to add zoning configuration." msgstr "L'ajout de la configuration de segmentation a échoué." #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "Echec de l'affectation du nom qualifié iSCSI de l'initiateur iSCSI. (port : " "%(port)s, raison : %(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "Echec d'association de qos_specs : %(specs_id)s avec spécif. %(type_id)s." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "Echec de connexion de la cible iSCSI pour le volume %(volume_id)s." #, python-format msgid "Failed to backup volume metadata - %s" msgstr "Echec de sauvegarde des métadonnées de volume de sauvegarde - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "Echec de sauvegarde des métadonnées de volume de sauvegarde - objet de " "sauvegarde de métadonnées 'backup.%s.meta' existe déjà" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "Impossible de cloner un volume depuis l'instantané %s." #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "Echec de la connexion à la matrice %(vendor_name)s %(host)s : %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Echec de connexion à l'API REST Dell " msgid "Failed to connect to array" msgstr "Impossible de se connecter à la matrice" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" "Echec de la connexion au démon sheep. Adresse : %(addr)s, port : %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "Échec de la copie de l'image vers le volume : %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "Échec de la copie des métadonnées pour le volume : %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "Echec de la copie du volume, l'unité de destination est indisponible." msgid "Failed to copy volume, source device unavailable." msgstr "Echec de la copie du volume, l'unité source est indisponible." #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" "Echec de la création du groupe de cohérence %(cgName)s depuis l'instantané " "%(cgSnapshot)s." #, python-format msgid "Failed to create IG, %s" msgstr "Echec de la création du groupe demandeur, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "Echec de la création de l'entité (image-volume) SolidFire" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "Impossible de créer le groupe de volumes : %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Echec de la création d'un fichier. (fichier : %(file)s, ret : %(ret)s, " "stderr : %(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "Impossible de créer un instantané temporaire pour le volume %s." msgid "Failed to create api volume flow." msgstr "Echec de la création du flux de volume d'API." #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "" "Echec de la création de l'instantané du groupe de cohérence %(id)s. Cause : " "%(reason)s." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "" "Echec de la création du groupe de cohérence %(id)s. Cause : %(reason)s." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "Echec de la création du groupe de cohérence %(id)s : %(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "Echec de la création du groupe de cohérence %s car le groupe de cohérence " "VNX ne peut pas accepter de numéros d'unités logiques compressés en tant que " "membres." #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "Echec de la création du groupe de cohérence : %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "" "Echec de la création du groupe de cohérence : %(cgid)s. Erreur : %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Echec de la création du groupe de cohérence : %(consistencyGroupName)s Code " "retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "Echec de la création des ID du matériel sur %(storageSystemName)s." #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "Echec de la création de l'hôte : %(name)s. Vérifiez s'il existe sur la " "matrice." #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "Echec de la création de hostgroup : %(name)s. Vérifiez s'il existe sur la " "matrice." msgid "Failed to create iqn." msgstr "Echec de la création de l'iqn." #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "Echec de la création de la cible iSCSI pour le volume %(volume_id)s." msgid "Failed to create manage existing flow." msgstr "Echec de la création du flux de gestion existant." msgid "Failed to create manage_existing flow." msgstr "Echec de la création du flux manage_existing." msgid "Failed to create map on mcs, no channel can map." msgstr "" "Echec de la création de la mappe sur mcs, aucun canal ne peut être mappé." msgid "Failed to create map." msgstr "Echec de création de la mappe." #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "Échec de la création de métadonnées pour le volume : %(reason)s" msgid "Failed to create partition." msgstr "Echec de la création de la partition." #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "Echec de création de qos_specs : %(name)s avec spécif. %(qos_specs)s." msgid "Failed to create replica." msgstr "Echec de la création de la réplique." msgid "Failed to create scheduler manager volume flow" msgstr "" "Echec de la création du flux de volume du gestionnaire de planificateur" #, python-format msgid "Failed to create snapshot %s" msgstr "Echec de la création de l'instantané %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "" "Echec de la création d'instantané car aucun identificateur de numéro d'unité " "logique (LUN) n'est indiqué" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "" "Echec de la création de l'instantané pour le groupe de cohérence : " "%(cgName)s." #, python-format msgid "Failed to create snapshot for volume %s." msgstr "Echec de création d'un instantané pour le volume %s." #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" "Echec de la création de stratégie d'instantané sur le volume %(vol)s : " "%(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" "Echec de la création de la zone de ressource d'instantané sur le volume " "%(vol)s: %(res)s." msgid "Failed to create snapshot." msgstr "Echec de la création de l'instantané." #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "Echec de la création d'instantané. Les informations du volume CloudByte sont " "introuvables pour le volume OpenStack [%s]." #, python-format msgid "Failed to create south bound connector for %s." msgstr "Impossible de créer le connecteur Southbound pour %s." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "La création du groupe de stockage %(storageGroupName)s a échoué." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "Echec de la création du pool à la demande. Message d'erreur : %s" #, python-format msgid "Failed to create volume %s" msgstr "Echec de création du volume %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "Echec de la suppression de SI pour volume_id : %(volume_id)s car il a une " "paire." #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Echec de la suppression d'une unité logique. (unité logique : %(ldev)s, " "raison : %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "" "Echec de la suppression de l'instantané du groupe de cohérence %(id)s. " "Cause : %(reason)s." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "" "Echec de la suppression du groupe de cohérence %(id)s. Cause : %(reason)s." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "Echec de la suppression du groupe de cohérence : %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "Echec de la suppression du groupe de cohérence : %(consistencyGroupName)s " "Code retour : %(rc)lu. Erreur : %(error)s." msgid "Failed to delete device." msgstr "Echec de suppression de l'unité." #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Echec de suppression de l'ensemble de fichiers pour le groupe de cohérence " "%(cgname)s. Erreur : %(excmsg)s." msgid "Failed to delete iqn." msgstr "Echec de la suppression de l'iqn." msgid "Failed to delete map." msgstr "Echec de la suppression de la mappe." msgid "Failed to delete partition." msgstr "Echec de la suppression de la partition." msgid "Failed to delete replica." msgstr "Echec de la suppression de la réplique." #, python-format msgid "Failed to delete snapshot %s" msgstr "Echec de la suppression de l'instantané %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "" "Echec de la suppression de l'instantané pour le groupe de cohérence : " "%(cgId)s." #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" "Echec de la suppression de l'instantané pour snapshot_id %s car il a une " "paire." msgid "Failed to delete snapshot." msgstr "Echec de la suppression de l'instantané." #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "Echec de suppression du volume %(volumeName)s." #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "Echec de la suppression du volume pour volume_id : %(volume_id)s car il a " "une paire." #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "Echec de déconnexion de la cible iSCSI pour le volume %(volume_id)s." msgid "Failed to determine blockbridge API configuration" msgstr "Impossible de déterminer la configuration de l'API Blockbridge" msgid "Failed to disassociate qos specs." msgstr "Echec de la dissociation des spécifications QoS." #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "Echec de dissociation de qos_specs : %(specs_id)s avec spécif. %(type_id)s." #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" "Impossible de réserver la zone de ressource d'instantané, impossible de " "localiser le volume pour l'ID %s" msgid "Failed to establish SSC connection." msgstr "Echec d'établissement de la connexion SSC." msgid "Failed to establish connection with Coho cluster" msgstr "Connexion impossible à établir avec le cluster Coho " #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "Echec de l'exécution de l'API CloudByte [%(cmd)s]. Statut HTTP : %(status)s. " "Erreur : %(error)s." msgid "Failed to execute common command." msgstr "Echec de l'exécution de la commande common." #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "Echec de l'exportation pour le volume : %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "Echec de l'extension du volume %(name)s, message d'erreur : %(msg)s." msgid "Failed to find QoSnode" msgstr "QoSnode introuvable" msgid "Failed to find Storage Center" msgstr "Echec de la recherche de Storage Center" msgid "Failed to find a vdisk copy in the expected pool." msgstr "Copie de disque virtuel introuvable dans le pool attendu." msgid "Failed to find account for volume." msgstr "Impossible de trouver un compte pour le volume." #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "Ensemble de fichiers introuvable pour le chemin %(path)s, sortie de la " "commande : %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "Impossible de trouver l'instantané de groupe nommé : %s" #, python-format msgid "Failed to find host %s." msgstr "L'hôte %s est introuvable." #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" "Impossible de trouver le groupe d'initiateurs iSCSI contenant %(initiator)s." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "Pool de stockage introuvable pour le volume source %s." #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "Echec de l'obtention des détails CloudByte du compte [%s]." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "Echec d'obtention des détails cible du numéro d'unité logique %s" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "" "Echec de l'obtention des détails de la cible du numéro d'unité logique pour " "le numéro d'unité logique %s." #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "Echec d'obtention de la liste cible du numéro d'unité logique %s" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "Echec d'obtention de l'ID de partition pour le volume %(volume_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" "Echec de l'obtention de l'ID d'instantané Raid à partir de l'instantané " "%(snapshot_id)s." #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "Echec de l'obtention de l'ID d'instantané Raid à partir de l'instantané " "%(snapshot_id)s." msgid "Failed to get SplitMirror." msgstr "Impossible d'obtenir SplitMirror." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "Echec de l'obtention d'une ressource de stockage. Une nouvelle tentative " "d'obtention de la ressource de stockage sera effectuée. (ressource : " "%(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "" "Echec de récupération de toutes les associations de spécifications QoS %s" msgid "Failed to get channel info." msgstr "Echec d'obtention des informations du canal." #, python-format msgid "Failed to get code level (%s)." msgstr "Echec d'obtention du niveau de code (%s)." msgid "Failed to get device info." msgstr "Echec d'obtention des informations de l'unité." #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" "Echec de l'obtention du nom de domaine car CPG (%s) n'existe pas dans la " "matrice." msgid "Failed to get image snapshots." msgstr "Echec d'obtention des instantanés de l'image." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "Echec de l'obtention de l'adresse IP sur le canal %(channel_id)s avec le " "volume : %(volume_id)s." msgid "Failed to get iqn info." msgstr "Echec d'obtention des informations iqn." msgid "Failed to get license info." msgstr "Echec d'obtention des informations de licence." msgid "Failed to get lv info." msgstr "Echec d'obtention des informations lv." msgid "Failed to get map info." msgstr "Echec d'obtention des informations de la mappe." msgid "Failed to get migration task." msgstr "Echec d'obtention de la tâche de migration." msgid "Failed to get model update from clone" msgstr "Echec d'obtention de la mise à jour du modèle depuis le clone" msgid "Failed to get name server info." msgstr "Echec d'obtention des infos du serveur de noms." msgid "Failed to get network info." msgstr "Echec d'obtention des informations réseau." #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "" "Impossible d'obtenir l'ID du nouvel élément dans le nouveau pool : " "%(pool_id)s." msgid "Failed to get partition info." msgstr "Echec d'obtention des informations de la partition." #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "Echec de l'obtention de l'ID de pool à partir du volume %(volume_id)s." #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "Impossible d'obtenir les informations de copie à distance pour %(volume)s en " "raison de l'erreur : %(err)s." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" "Impossible d'obtenir des informations de copie à distance pour %(volume)s. " "Exception : %(err)s." msgid "Failed to get replica info." msgstr "Echec d'obtention des informations de la réplique." msgid "Failed to get show fcns database info." msgstr "Echec de l'obtention des infos de la commande show fcns database." msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "Echec de l'obtention de la taille du volume existant : %(vol). L'opération " "Gérer le volume a échoué." #, python-format msgid "Failed to get size of volume %s" msgstr "Echec de l'obtention de la taille du volume %s" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "Impossible d'obtenir un instantané pour le volume %s." msgid "Failed to get snapshot info." msgstr "Echec d'obtention des informations de l'instantané." #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "Echec d'obtention de l'IQN cible pour le numéro d'unité logique %s" msgid "Failed to get target LUN of SplitMirror." msgstr "Echec d'obtention du numéro d'unité logique (LUN) de SplitMirror." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "Echec d'obtention du portail cible pour le numéro d'unité logique %s" msgid "Failed to get targets" msgstr "Echec de l'obtention des cibles" msgid "Failed to get wwn info." msgstr "Echec d'obtention des informations wwn." #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "Echec de l'obtention, de la création ou de l'ajout du volume %(volumeName)s " "à la vue de masquage %(maskingViewName)s. Message d'erreur reçu : " "%(errorMessage)s." msgid "Failed to identify volume backend." msgstr "Echec d'identification du back-end du volume." #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "Echec de liaison de l'ensemble de fichiers pour le partage %(cgname)s. " "Erreur : %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "Echec de connexion à la matrice %s (connexion non valide ?)." #, python-format msgid "Failed to login for user %s." msgstr "Echec de connexion de l'utilisateur %s." msgid "Failed to login with all rest URLs." msgstr "Echec de connexion à toutes les URL Rest." #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" "Echec de la demande au noeud final du cluster Datera pour la raison " "suivante : %s" msgid "Failed to manage api volume flow." msgstr "Echec de la gestion du flux de volume d'API." #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Echec de gestion du volume %(type)s %(name)s existant vu que la taille " "signalée, %(size)s, n'était pas un nombre en virgule flottante." #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "Echec de gestion du volume %(name)s existant, en raison de l'erreur lors de " "obtention de la taille de volume." #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "Echec de gestion du volume %(name)s existant, en raison de l'échec de " "l'opération Renommer : Message d'erreur : %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "Echec de gestion du volume %(name)s existant, en raison de la taille " "rapportée %(size)s qui n'était pas un nombre à virgule flottante." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "Echec de la gestion du volume existant vu que le pool du type de volume " "choisi ne correspond pas au partage NFS transmis dans le volume de référence." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "Echec de la gestion du volume existant vu que le pool du type de volume " "choisi ne correspond pas au système de fichiers transmis dans le volume de " "référence." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "Echec de la gestion du volume existant vu que le pool du type de volume " "choisi ne correspond pas au pool de l'hôte." #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "Impossible de gérer le volume existant en raison d'un groupe d'E-S non " "concordant. Le groupe d'E-S du volume à gérer est %(vdisk_iogrp)s. Le groupe " "d'E-S du type sélectionné est %(opt_iogrp)s." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "Impossible de gérer le volume existant car le pool du volume à gérer ne " "correspond pas au pool du back-end. Pool du volume à gérer : %(vdisk_pool)s. " "Pool du back-end : %(backend_pool)s." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "Impossible de gérer le volume existant car le volume à gérer est \"compress" "\", alors que le type de volume sélectionné est \"not compress\"." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "Impossible de gérer le volume existant car le volume à gérer est \"not " "compress\", alors que le type de volume sélectionné est \"compress\"." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "Impossible de gérer le volume existant car le volume à gérer ne se trouve " "pas dans un groupe d'E-S valide." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "Impossible de gérer le volume existant car le volume à gérer est de type " "\"thick\", alors que le type de volume sélectionné est \"thin\"." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "Impossible de gérer le volume existant car le volume à gérer est de type " "\"thin\", alors que le type de volume sélectionné est \"thick\"." #, python-format msgid "Failed to manage volume %s." msgstr "Impossible de gérer le volume %s." #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "Echec du mappage d'une unité logique. (unité logique : %(ldev)s, numéro " "d'unité logique : %(lun)s, port : %(port)s, ID : %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "Echec de la migration du volume pour la première fois." msgid "Failed to migrate volume for the second time." msgstr "Echec de la migration du volume pour la seconde fois." #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "" "Echec du déplacement du mappage de numéro d'unité logique. Code retour : %s" #, python-format msgid "Failed to move volume %s." msgstr "Impossible de déplacer le volume %s." #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "Echec de l'ouverture d'un fichier. (fichier : %(file)s, ret : %(ret)s, " "stderr : %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "Echec d'analyse de la sortie CLI :\n" " commande : %(cmd)s\n" " stdout : %(out)s\n" " stderr : %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "Echec de l'analyse syntaxique de l'option de configuration " "'keystone_catalog_info', doit figurer sous la forme :" ":" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "Echec de l'analyse syntaxique de l'option de configuration " "'swift_catalog_info', doit figurer sous la forme :" ":" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "Echec d'une opération zero-page reclamation. (unité logique : %(ldev)s, " "raison : %(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" "Échec de la suppression de l'export pour le volume %(volume)s : %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" "Echec de la suppression de la cible iSCSI pour le volume %(volume_id)s." #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "La suppression du volume %(volumeName)s du groupe de cohérence %(cgName)s " "n'a pas abouti. Code retour : %(rc)lu. Erreur : %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" "Echec de la suppression du volume %(volumeName)s du groupe de stockage par " "défaut." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "Echec de la suppression du volume %(volumeName)s dans le groupe de stockage " "par défaut : %(volumeName)s." #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "Echec de la suppression de %(volumename)s du groupe de stockage par défaut " "pour la règle FAST %(fastPolicyName)s." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "Impossible de renommer le volume logique %(name)s. Message d'erreur : " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "Impossible de récupérer la configuration de segmentation active %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" "Echec de la définition de l'authentification CHAP pour l'IQN cible %(iqn)s. " "Détails : %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "Echec de définition QoS pour le volume existant %(name)s. Message d'erreur : " "%(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" "Echec de la définition de l'attribut 'Utilisateur entrant' pour la cible " "SCST." msgid "Failed to set partition." msgstr "Echec de la définition de la partition." #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "Echec de définition des droits pour le groupe de cohérence %(cgname)s. " "Erreur : %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "Impossible de spécifier une unité logique pour le volume %(volume_id)s dont " "le mappage doit être annulé." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "Impossible de spécifier une unité logique à supprimer. (méthode : " "%(method)s, ID : %(id)s)" msgid "Failed to terminate migrate session." msgstr "Echec de l'arrêt de la session de migration." #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "La suppression de la liaison du volume %(volume)s a échoué" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "Echec de suppression de liaison de l'ensemble de fichiers pour le groupe de " "cohérence %(cgname)s. Erreur : %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "Echec de l'annulation du mappage d'une unité logique. (unité logique : " "%(ldev)s, raison : %(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "Echec de la mise à jour du groupe de cohérence : %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "Échec de la mise à jour des métadonnées pour le volume : %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "" "Impossible de mettre à jour ou supprimer la configuration de segmentation" msgid "Failed to update or delete zoning configuration." msgstr "" "Impossible de mettre à jour ou supprimer la configuration de segmentation." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "Echec de mise à jour de qos_specs : %(specs_id)s avec spécif. %(qos_specs)s." msgid "Failed to update quota usage while retyping volume." msgstr "" "Echec de la mise à jour de l'utilisation de quota lors de la confirmation du " "volume." msgid "Failed to update snapshot." msgstr "Impossible de mettre à jour l'instantané." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" "Echec de la mise à jour du modèle de volume fourni avec le modèle %(model)s" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "Echec de la mise à jour des métadonnées de volume %(vol_id)s avec les " "métadonnées fournies (%(src_type)s %(src_id)s)" #, python-format msgid "Failure creating volume %s." msgstr "Echec de création du volume %s." #, python-format msgid "Failure getting LUN info for %s." msgstr "Echec d'obtention des infos LUN pour %s." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "Incident dans update_volume_key_value_pair : %s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "Echec du déplacement du nouveau numéro d'unité logique cloné vers %s." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "Echec du transfert du numéro d'unité logique %s vers tmp." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" "Erreur fatale : Utilisateur non autorisé à interroger les volumes NetApp." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "Echec de Fexvisor lors de l'ajout du volume %(id)s. Cause : %(reason)s." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor n'est pas parvenu à joindre le volume %(vol)s au groupe %(group)s. " "Motif : %(ret)s." #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor n'est pas parvenu à retirer le volume %(vol)s du groupe %(group)s. " "Motif : %(ret)s." #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "Fexvisor n'est pas parvenu à supprimer le volume %(id)s. Motif : %(reason)s." #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "Echec de recherche de réseau SAN Fibre Channel : %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "Echec d'opération de zone Fibre Channel : %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "Echec de contrôle de la connexion Fibre Channel : %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "Fichier %(file_path)s introuvable." #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Le fichier %(path)s dispose d'un fichier de sauvegarde non valide : " "%(bfile)s. L'opération est abandonnée." #, python-format msgid "File already exists at %s." msgstr "Le fichier existe déjà dans %s." #, python-format msgid "File already exists at: %s" msgstr "Le fichier existe déjà dans : %s" msgid "Find host in hostgroup error." msgstr "Erreur lors de la recherche de l'hôte dans hostgroup." msgid "Find host lun id error." msgstr "Erreur lors de la recherche de l'ID de numéro d'unité logique hôte." msgid "Find lun group from mapping view error." msgstr "" "Erreur lors de la recherche de groupe de numéros d'unité logique dans la vue " "de mappage." msgid "Find lun number error." msgstr "Erreur lors de la recherche du nombre de numéros d'unité logique." msgid "Find mapping view error." msgstr "Erreur lors de la recherche de la vue de mappage." msgid "Find portgroup error." msgstr "Erreur lors de la recherche de portgroup." msgid "Find portgroup from mapping view error." msgstr "Erreur lors de la recherche de portgroup dans la vue de mappage." #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Flash Cache Policy nécessite que WSAPI version '%(fcache_version)s', version " "'%(version)s' soit installé." #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de l'affectation de volume : %(id)s : %(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de l'affectation de volume : %(id)s : %(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor n'a pas trouvé l'instantané %(id)s du volume dans l'instantané " "%(vgsid)s du groupe %(vgid)s." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" "Echec de Flexvisor lors de la création de volume : %(volumeid)s : %(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la suppression du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" "Echec de Flexvisor lors de l'ajout du volume %(id)s au groupe %(cgid)s." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor n'a pas affecté le volume %(id)s car il ne peut demander l'état de " "la requête par ID d'événement." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de l'affectation du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "" "Echec de Flexvisor lors de l'affectation du volume %(volume)s IQN %(iqn)s." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Echec de Flexvisor lors du clonage du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors du clonage du volume (échec de l'obtention de " "l'événement) %(id)s." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la création de l'instantané pour le volume " "%(id)s : %(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors de la création de l'instantané pour le volume (échec " "de l'obtention de l'événement) %(id)s." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Echec de Flexvisor lors de la création du volume %(id)s dans le groupe " "%(vgid)s." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la création du volume %(volume)s : %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "" "Echec de Flexvisor lors de la création du volume (obtention de l'événement) " "%s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la création du volume à partir de l'instantané " "%(id)s:%(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de la création du volume à partir de l'instantané " "%(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors de la création du volume à partir de l'instantané " "(échec de l'obtention de l'événement) %(id)s." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la suppression de l'instantané %(id)s : " "%(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors de la suppression de l'instantané (échec de " "l'obtention de l'événement) %(id)s." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la suppression du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de l'exportation du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de l'exportation du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors de l'extension du volume (échec de l'obtention de " "l'événement) %(id)s." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de l'obtention des informations de pool %(id)s : " "%(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Echec de Flexvisor lors de l'obtention de l'ID instantané du volume %(id)s à " "partir du groupe %(vgid)s." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor n'est pas parvenu à supprimer le volume %(id)s du groupe %(cgid)s." #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de la génération du volume à partir de l'instantané " "%(id)s:%(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Echec de Flexvisor lors de la génération du volume à partir de l'instantané " "(échec de l'obtention de l'événement) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" "Echec de Flexvisor lors de la libération du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Echec de Flexvisor lors de la libération du volume (obtention de " "l'événement) %(id)s." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de la libération du volume %(id)s : %(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor n'a pas trouvé les infos du volume source %(id)s." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Echec de Flexvisor lors de la libération de volume : %(id)s : %(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "" "Le volume Flexvisor %(id)s n'a pas réussi à joindre le groupe %(vgid)s." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "Le dossier %s n'existe pas dans Nexenta Store Appliance" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS n'est pas en cours d'exécution - état : %s." msgid "Gateway VIP is not set" msgstr "Gateway VIP n'a pas été défini" msgid "Get FC ports by port group error." msgstr "Erreur lors de l'obtention de ports FC par groupe de ports." msgid "Get FC ports from array error." msgstr "Erreur lors de l'obtention des ports FC à partir de la matrice." msgid "Get FC target wwpn error." msgstr "Erreur lors de l'obtention du wwpn FC cible." msgid "Get HyperMetroPair error." msgstr "Erreur lors de l'obtention d'HyperMetroPair." msgid "Get LUN group by view error." msgstr "Erreur lors de l'obtention de groupe LUN par vue." msgid "Get LUNcopy information error." msgstr "Erreur lors de l'obtention des informations LUNcopy." msgid "Get QoS id by lun id error." msgstr "" "Erreur lors de l'obtention d'ID QoS à partir de l'ID de numéro d'unité " "logique." msgid "Get QoS information error." msgstr "Erreur lors de l'obtention des informations QoS." msgid "Get QoS policy error." msgstr "Erreur lors de l'obtention de la stratégie QoS." msgid "Get SplitMirror error." msgstr "Erreur lors de l'obtention de SplitMirror." msgid "Get active client failed." msgstr "L'obtention du client actif a échoué." msgid "Get array info error." msgstr "Erreur lors de l'obtention d'informations sur la matrice." msgid "Get cache by name error." msgstr "Erreur lors de l'obtention du cache à partir du nom." msgid "Get connected free FC wwn error." msgstr "Erreur lors de l'obtention du wwn FC libre connecté." msgid "Get engines error." msgstr "Erreur lors de l'obtention de moteurs." msgid "Get host initiators info failed." msgstr "Erreur lors de l'obtention des informations d'initiateurs hôte." msgid "Get hostgroup information error." msgstr "Erreur lors de l'obtention des informations hostgroup." msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "Erreur lors de l'obtention des informations de port iSCSI. Vérifiez que " "l'adresse IP cible a été configurée dans le fichier de configuration huawei." msgid "Get iSCSI port information error." msgstr "Erreur lors de l'obtention des informations de port iSCSI." msgid "Get iSCSI target port error." msgstr "Erreur lors de l'obtention du port iSCSI cible." msgid "Get lun id by name error." msgstr "Erreur d'obtention de l'ID de numéro d'unité logique (lun id) par nom." msgid "Get lun migration task error." msgstr "Erreur lors de la création de migration de numéro d'unité logique." msgid "Get lungroup id by lun id error." msgstr "" "Erreur lors de l'obtention d'ID lungroup à partir de l'ID de numéro d'unité " "logique." msgid "Get lungroup information error." msgstr "Erreur lors de l'obtention des informations lungroup." msgid "Get migration task error." msgstr "Erreur lors de l'obtention de la tâche de migration." msgid "Get pair failed." msgstr "L'obtention d'une paire a échoué." msgid "Get partition by name error." msgstr "Erreur lors de l'obtention de la partition à partir du nom." msgid "Get partition by partition id error." msgstr "" "Erreur lors de l'obtention de la partition à partir de l'ID de partition." msgid "Get port group by view error." msgstr "Erreur lors de l'obtention de groupe de ports par vue." msgid "Get port group error." msgstr "Erreur lors de l'obtention du groupe de ports." msgid "Get port groups by port error." msgstr "Erreur lors de l'obtention de groupes de ports par port." msgid "Get ports by port group error." msgstr "Erreur lors de l'obtention de ports par groupe de ports." msgid "Get remote device info failed." msgstr "L'obtention d'informations sur l'unité distante a échoué." msgid "Get remote devices error." msgstr "Erreur lors de l'obtention d'unités distantes." msgid "Get smartcache by cache id error." msgstr "Erreur lors de l'obtention de smartcache à partir de l'ID du cache." msgid "Get snapshot error." msgstr "Erreur lors de l'obtention de l'instantané." msgid "Get snapshot id error." msgstr "Erreur lors de l'obtention de l'ID de l'instantané." msgid "Get target IP error." msgstr "Erreur lors de l'obtention de l'adresse IP cible." msgid "Get target LUN of SplitMirror error." msgstr "" "Erreur lors de l'obtention du numéro d'unité logique (LUN) cible de " "SplitMirror." msgid "Get views by port group error." msgstr "Erreur lors de l'obtention de vues par port." msgid "Get volume by name error." msgstr "Erreur lors de l'obtention du volume à partir du nom." msgid "Get volume error." msgstr "Erreur lors de l'obtention du volume." #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Impossible de mettre à jour les métadonnées Glance, la clé %(key)s existe " "pour l'ID volume %(volume_id)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "Métadonnées Glance introuvables pour le volume/instantané %(id)s." #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Le fichier de configuration Gluster dans %(config)s n'existe pas" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Echec de l'API Google Cloud Storage : %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Echec de connexion de Google Cloud Storage : %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Echec d'authentification oauth2 de Google Cloud Storage : %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "Informations de chemin incorrectes reçues de DRBDmanage : (%s)" msgid "HBSD error occurs." msgstr "Une erreur HBSD s'est produite." msgid "HNAS has disconnected SSC" msgstr "HNAS a déconnecté SSC" msgid "HPELeftHand url not found" msgstr "URL HPELeftHand introuvable" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "La vérification du certificat HTTPS a été demandée mais ne peut pas être " "activée avec la version de module purestorage %(version)s. Effectuez une " "mise à niveau vers une version plus récente pour activer cette fonction." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "La taille de bloc de hachage a changé depuis la dernière sauvegarde. " "Nouvelle taille de bloc de hachage : %(new)s. Ancienne taille de bloc de " "hachage : %(old)s. Effectuez une sauvegarde intégrale." #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "Vous n'avez pas créé de niveau(x) %(tier_levels)s." #, python-format msgid "Hint \"%s\" not supported." msgstr "Suggestion \"%s\" non prise en charge." msgid "Host" msgstr "Hôte" #, python-format msgid "Host %(host)s could not be found." msgstr "L'hôte %(host)s est introuvable." #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "L'hôte %(host)s ne correspond pas au contenu du certificat x509 : CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "L'hôte %s n'a aucun demandeur FC" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "L'hôte %s n'a aucun initiateur iSCSI" #, python-format msgid "Host '%s' could not be found." msgstr "L'hôte '%s' est introuvable." #, python-format msgid "Host group with name %s not found" msgstr "Le groupe d'hôtes nommé %s est introuvable" #, python-format msgid "Host group with ref %s not found" msgstr "Le groupe d'hôtes avec la référence %s est introuvable" msgid "Host is NOT Frozen." msgstr "L'hôte N'EST PAS figé." msgid "Host is already Frozen." msgstr "L'hôte est déjà figé." msgid "Host not found" msgstr "Hôte introuvable" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "Hôte introuvable. La suppression de %(service)s sur %(host)s a échoué." #, python-format msgid "Host replication_status must be %s to failover." msgstr "" "La valeur de replication_status de l'hôte doit être %s pour l'opération de " "basculement." #, python-format msgid "Host type %s not supported." msgstr "Type d'hôte %s non pris en charge." #, python-format msgid "Host with ports %(ports)s not found." msgstr "Aucun hôte avec ports %(ports)s n'a été trouvé." msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "" "Hypermetro et Replication ne peuvent pas être utilisés dans le même " "paramètre volume_type." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "Le groupe d'E-S %(iogrp)d n'est pas valide ; les groupes d'E-S disponibles " "sont %(avail)s." msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Adresse IP/nom d'hôte de l'API Blockbridge." msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "Si la compression est définie sur True, rsize doit également être définie " "(autre que -1). " msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "Si le paramètre nofmtdisk est défini avec la valeur True, rsize doit " "également être défini avec la valeur -1." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "Valeur non admise '%(prot)s' spécifiée pour " "flashsystem_connection_protocol : la ou les valeurs valides sont %(enabled)s." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "Valeur non conforme indiquée pour IOTYPE : 0, 1 ou 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" "Valeur non valide spécifiée pour smarttier : définissez-la sur 0, 1, 2 ou 3." msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "Valeur non admise spécifiée pour storwize_svc_vol_grainsize : définissez-la " "sur 32, 64, 128 ou 256." msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "Valeur incorrecte spécifiée pour thin : vous ne pouvez pas spécifier thin et " "thick en même temps." #, python-format msgid "Image %(image_id)s could not be found." msgstr "L'image %(image_id)s est introuvable." #, python-format msgid "Image %(image_id)s is not active." msgstr "L'image %(image_id)s n'est pas active." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "L'image %(image_id)s est inacceptable : %(reason)s" msgid "Image location not present." msgstr "Emplacement de l'image introuvable." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "La taille virtuelle de l'image, %(image_size)d Go, ne peut pas être hébergée " "dans un volume avec une taille de %(volume_size)d Go." msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "Erreur ImageBusy générée lors de la suppression du volume rbd. Cela peut " "être dû à une interruption de connexion d'un client et, dans ce cas, il " "suffit parfois d'effectuer une nouvelle tentative de suppression après un " "délai de 30 secondes." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "Echec d'importation d'enregistrement, service de sauvegarde introuvable pour " "réaliser l'importation. Service de requête %(service)s" msgid "Incorrect request body format" msgstr "Format de corps de demande incorrect" msgid "Incorrect request body format." msgstr "Format de corps de demande incorrect." msgid "Incremental backups exist for this backup." msgstr "Les sauvegardes incrémentielles existent pour cette sauvegarde." #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Exception CLI Infortrend : %(err)s Paramètre : %(param)s (Code retour : " "%(rc)s) (Sortie : %(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "Niveau initial : {}, stratégie : {} est non valide." msgid "Input type {} is not supported." msgstr "Le type d'entrée {} n'est pas pris en charge." msgid "Input volumes or snapshots are invalid." msgstr "Les volumes ou les instantanés d'entrée ne sont pas valides." msgid "Input volumes or source volumes are invalid." msgstr "Les volumes d'entrée ou les volumes source ne sont pas valides." #, python-format msgid "Instance %(uuid)s could not be found." msgstr "Instance %(uuid)s introuvable." msgid "Insufficient free space available to extend volume." msgstr "L'espace libre disponible est insuffisant pour l'extension du volume." msgid "Insufficient privileges" msgstr "Privilèges insuffisants" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" "Valeur de l'intervalle (en secondes) entre les nouvelles tentatives de " "connexion au cluster ceph cluster." #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "Ports %(protocol)s %(port)s non valides spécifiés pour io_port_list." #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "Domaine 3PAR non valide : %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "Valeur ALUA non valide. La valeur de ALUA doit être 1 ou 0." msgid "Invalid Ceph args provided for backup rbd operation" msgstr "Args Ceph non valide pour l'opération rbd de sauvegarde" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Instantané de groupe de cohérence non valide : %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "Groupe de cohérence non valide : %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "Groupe de cohérence (ConsistencyGroup) non valide : le statut du groupe de " "cohérence doit être available (disponible) ou error (erreur), mais le statut " "actuel est : in-use (en cours d'utilisation)" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "ConsistencyGroup non valide : l'état du groupe de cohérence doit être " "disponible, mais l'état actuel est : %s." msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" "ConsistencyGroup non valide : Aucun hôte pour créer le groupe de cohérence" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "Version de l'API HPELeftHand API non valide détectée : %(found)s. Version " "%(minimum)s ou ultérieure requise pour la prise en charge des opérations de " "gestion et d'arrêt de gestion (manage/unmanage)." #, python-format msgid "Invalid IP address format: '%s'" msgstr "Format d'adresse IP non valide : '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "Spécification QoS non valide détectée lors de l'obtention de la stratégie " "QoS pour le volume %s" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "Cible de réplication non valide : %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "Type d'authentification VNX non valide : %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Spécification de partage Virtuozzo Storage non valide : %r. Doit être : " "[MDS1[,MDS2],...:/][:MOT DE PASSE]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "Version XtremIO non valide %(cur)s, version %(min)s ou ultérieure est requise" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" "Quotas alloués non valides définis pour les quotas de projet suivants : %s" msgid "Invalid argument" msgstr "Argument non valide" msgid "Invalid argument - negative seek offset." msgstr "Argument non valide - décalage seek négatif." #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "Argument non valide - whence=%s non pris en charge" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "Argument non valide - whence=%s n'est pas pris en charge." #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "Mode de connexion '%(mode)s' non valide pour le volume %(volume_id)s." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "Clé d'auth non valide : %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "Sauvegarde non valide : %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "URL d'API barbican non valide : la version est obligatoire, par exemple " "'http[s]://|[:port]/', l'URL indiquée est : %s" msgid "Invalid cgsnapshot" msgstr "Instantané de groupe de cohérence non valide" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "Informations d'utilisateur chap non valides détectées dans le stockage " "CloudByte." #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "" "Réponse d'initialisation de connexion non valide pour le volume %(name)s" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" "Réponse d'initialisation de connexion non valide pour le volume %(name)s : " "%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Le type de contenu %(content_type)s est invalide" msgid "Invalid credentials" msgstr "Données d'identification non valides" #, python-format msgid "Invalid directory: %s" msgstr "Répertoire non valide : %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "Type de carte d'unité de disque non valide : %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "Sauvegarde de disque non valide : %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "Type de disque non valide : %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "Type de disque non valide : %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "Hôte non valide : %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "Version de hpe3parclient non valide détectée (%(found)s). Version " "%(minimum)s ou ultérieure requise. Exécutez \"pip install --upgrade " "python-3parclient\" pour mettre à niveau le client hpe3parclient." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "Version de hpelefthandclient non valide détectée (%(found)s). Version " "%(minimum)s ou ultérieure requise. Exécutez \"pip install --upgrade python-" "lefthandclient\" pour mettre à niveau le client hpe3lefthandclient." #, python-format msgid "Invalid image href %(image_href)s." msgstr "href %(image_href)s d'image non valide." msgid "Invalid image identifier or unable to access requested image." msgstr "" "Identificateur d'image non valide ou accès impossible à l'image demandée." msgid "Invalid imageRef provided." msgstr "imageRef fournie non valide." msgid "Invalid initiator value received" msgstr "Valeur d'initiateur non valide reçue" msgid "Invalid input" msgstr "Entrée incorrecte" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrée invalide reçue : %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtre is_public non valide [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "Le type lun non valide %s est configuré." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Taille de métadonnée invalide : %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Métadonnée invalide : %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "Base du point de montage non valide : %s" #, python-format msgid "Invalid mount point base: %s." msgstr "Base de point de montage non valide : %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Nouveau nom snapCPG non valide pour confirmation. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Numéro de port %(config)s non valide pour le port Coho rpc " #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "Le paramètre PrefetchType non valide '%s' est configuré. PrefetchType doit " "avoir la valeur 0, 1, 2 ou 3." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "Spécifications QoS non valides : %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "Demande non valide de connexion du volume à une cible non valide" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "Demande non valide de rattachement du volume dans un mode non valide. Le " "mode d'attachement doit être 'rw' ou 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Expiration de réservation non valide %(expire)s." msgid "Invalid response header from RPC server" msgstr "En-tête de réponse non valide du serveur RPC " #, python-format msgid "Invalid secondary id %s." msgstr "ID secondaire non valide %s." #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "ID de back-end secondaire (secondary_backend_id) non valide indiqué. L'ID de " "back-end valide est %s." msgid "Invalid service catalog json." msgstr "json de catalogue de service non valide." msgid "Invalid sheepdog cluster status." msgstr "Statut de cluster Sheepdog non valide." #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "snapshot invalide : %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "Statut non valide : '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "Pool de stockage non valide %s demandé. Echec de la nouvelle saisie." #, python-format msgid "Invalid storage pool %s specificed." msgstr "Pool de stockage %s non valide spécifié." msgid "Invalid storage pool is configured." msgstr "Un pool de stockage non valide est configuré." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "Mode de synchronisation non valide indiqué, le mode autorisé est %s." msgid "Invalid transport type." msgstr "Type de transport non valide." #, python-format msgid "Invalid update setting: '%s'" msgstr "Paramètre de mise à jour non valide : '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "URL non valide : doit être au format 'http[s]://|[:port]/" "', l'URL indiquée est : %s" #, python-format msgid "Invalid value '%s' for force." msgstr "Valeur invalide '%s' pour le 'forçage'." #, python-format msgid "Invalid value '%s' for force. " msgstr "Valeur non valide '%s' pour force. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "" "Valeur '%s' non valide pour is_public. Valeurs admises : True ou False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "La valeur '%s' n'est pas valide pour skip_validation." #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "Valeur non valide pour 'bootable': '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "Valeur non valide pour 'force': '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "Valeur non valide pour 'readonly': '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "Valeur non valide pour 'scheduler_max_attempts', doit être >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "" "Valeur non valide pour l'option de configuration NetApp netapp_host_type." msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "" "Valeur non valide pour l'option de configuration NetApp netapp_lun_ostype." #, python-format msgid "Invalid value for age, %(age)s" msgstr "Valeur non valide pour 'age', %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "Valeur non valide : \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "La taille de volume fournie pour la demande de création n'est pas valide : " "%s (l'argument de taille doit être un entier (ou représentation de chaîne " "d'un entier) et supérieur à zéro)." #, python-format msgid "Invalid volume type: %(reason)s" msgstr "Type de volume non valide : %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume invalide : %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "Volume non valide : Impossible d'ajouter le volume %(volume_id)s au groupe " "de cohérence %(group_id)s car le volume est dans un état non valide : " "%(status)s. Les états valides sont : ('disponible', 'en cours " "d'utilisation')." #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "Volume non valide : Impossible d'ajouter le volume %(volume_id)s au groupe " "de cohérence %(group_id)s car le type de volume %(volume_type)s n'est pas " "pris en charge par le groupe." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "Volume non valide : Impossible d'ajouter l'uuid de volume fictif au groupe " "de cohérence %(group_id)s car le volume est introuvable." #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "Volume non valide : Impossible de supprimer l'uuid de volume fictif du " "groupe de cohérence %(group_id)s car il ne se trouve pas dans le groupe." #, python-format msgid "Invalid volume_type passed: %s." msgstr "volume_type non valide transmis : %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "Un type de volume non valide a été soumis : %s (le type demandé n'est pas " "compatible ; il doit correspondre au volume source ou vous devez omettre " "l'argument type)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "volume_type non valide indiqué : %s (le type demandé n'est pas compatible ; " "il est recommandé d'omettre l'argument type)." #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "volume_type non valide fourni : %s (le type demandé doit être pris en charge " "par ce groupe de cohérence)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "Format de wwpn %(wwpns)s non valide" msgid "Invoking web service failed." msgstr "L'appel du service Web a échoué." msgid "Issue encountered waiting for job." msgstr "Erreur rencontrée durant l'attente du travail." msgid "Issue encountered waiting for synchronization." msgstr "Erreur rencontrée durant l'attente de la synchronisation." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "Le lancement d'un basculement a échoué car la réplication n'a pas été " "configurée correctement." msgid "Item not found" msgstr "Élément introuvable" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" "ID de travail introuvable dans la réponse CloudByte à Créer un volume [%s]." #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" "ID de travail non trouvé dans la réponse CloudByte à la suppression du " "volume [%s]." msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "Les noms de clé peuvent seulement contenir des caractères alphanumériques, " "des traits de soulignement, des points, des signes deux-points et des traits " "d'union." #, python-format msgid "KeyError: %s" msgstr "KeyError : %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" "Keystone version 3 ou ultérieure doit être utilisé pour la prise en charge " "de quotas imbriqués." #, python-format msgid "LU does not exist for volume: %s" msgstr "L'unité logique n'existe pas pour le volume : %s" msgid "LUN export failed!" msgstr "L'exportation de numéro d'unité logique a échoué. " msgid "LUN id({}) is not valid." msgstr "ID LUN ({}) non valide." msgid "LUN map overflow on every channel." msgstr "Dépassement de mappe de numéro d'unité logique sur chaque canal." #, python-format msgid "LUN not found with given ref %s." msgstr "LUN introuvable avec la réf donnée %s." msgid "LUN number ({}) is not an integer." msgstr "Le numéro d'unité logique (LUN) ({}) n'est pas un entier." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" "Le nombre de numéros d'unité logique est hors bornes sur l'ID de canal : " "%(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "LUN avec la réf donnée %(ref)s ne satisfait pas le type de volume. Assurez-" "vous que le volume LUN avec fonctionnalités ssc est présent sur vserver " "%(vs)s." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "Dernières entrées Cinder syslog %s :-" msgid "LeftHand cluster not found" msgstr "Cluster LeftHand introuvable" msgid "License is unavailable." msgstr "La licence n'est pas disponible." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "Ligne %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "Le chemin de lien existe déjà et n'est pas un lien symbolique" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "Le clone lié du volume source n'est pas pris en charge à l'état : %s." msgid "Lock acquisition failed." msgstr "L'acquisition du verrou a échoué." msgid "Logout session error." msgstr "Erreur de déconnexion de la session." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "Service de recherche non configuré. L'option de configuration pour " "fc_san_lookup_service doit indiquer une mise en oeuvre concrète du service " "de recherche." msgid "Lun migration error." msgstr "Erreur de migration du numéro d'unité logique." #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "La valeur MD5 de l'objet : %(object_name)s avant : %(md5)s et après : " "%(etag)s n'est pas la même." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED : %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED : AUTH_ERROR : %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED : RPC_MISMATCH : %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "Format de chaîne de sortie fcns incorrect : %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Format de corps de message non valide : %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "Chaîne de serveur de noms mal formée : %s" msgid "Malformed request body" msgstr "Format de corps de demande incorrect" msgid "Malformed request body." msgstr "Le corps de la requête est mal-formé." msgid "Malformed request url" msgstr "Format d'URL de demande incorrect" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "Format incorrect de la réponse à la commande %(cmd)s : %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "Format incorrect de l'attribut scheduler_hints" #, python-format msgid "Malformed show fcns database string: %s" msgstr "Format de chaîne show fcns database incorrect : %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "Configuration de zone mal formée : (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "Format de statut de zone incorrect : (commutateur = %(switch)s, config de " "zone = %(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "" "L'obtention de la taille dans l'opération de gestion de l'existant requiert " "l'élément 'id'." msgid "Manage existing snapshot not implemented." msgstr "La gestion d'instantané existant n'est pas implémentée." #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "Echec de la gestion du volume existant en raison d'une référence de back-end " "non valide %(existing_ref)s : %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "Echec de la gestion du volume existant en raison de types de volume " "différents : %(reason)s" msgid "Manage existing volume not implemented." msgstr "Gestion de volume existant non implémentée." msgid "Manage existing volume requires 'source-id'." msgstr "La gestion de volume existant requiert 'source-id'." #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "La gestion de volume n'est pas prise en charge si FAST est activé. Stratégie " "FAST : %(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "La gestion des instantanés sur des volumes basculés n'est pas autorisée." msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "La valeur de Map info est None car la version de la matrice ne prend pas en " "charge hypermetro." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "Echec d'achèvement de la préparation du mappage %(id)s dans le délai alloué " "de %(to)d secondes. En cours de fermeture." #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "" "Le masquage de la vue %(maskingViewName)s n'a pas été correctement supprimé" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "Nombre maximum de sauvegardes autorisées (%(allowed)d) dépassé" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "Nombre maximal d'instantanés autorisés (%(allowed)d) dépassé" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "Le nombre maximal de volumes autorisé, (%(allowed)d), a été dépassé pour le " "quota '%(name)s'." #, python-format msgid "May specify only one of %s" msgstr "Un seul %s doit être spécifié" msgid "Metadata backup already exists for this volume" msgstr "Une sauvegarde de métadonnées existe déjà pour ce volume" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "L'objet de sauvegarde des métadonnées '%s' existe déjà" msgid "Metadata item was not found" msgstr "Elément de métadonnées introuvable" msgid "Metadata item was not found." msgstr "L'élément Metadata est introuvable." #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "" "Taille de la clé de propriété de métadonnées %s supérieure à 255 caractères" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "" "Valeur de taille de la clé de propriété de métadonnées (%s) supérieure à 255 " "caractères" msgid "Metadata property key blank" msgstr "Propriété de métadonnées à blanc" msgid "Metadata property key blank." msgstr "Clé de propriété de métadonnées à blanc." msgid "Metadata property key greater than 255 characters." msgstr "" "Taille de la clé de propriété de métadonnées supérieure à 255 caractères." msgid "Metadata property value greater than 255 characters." msgstr "Valeur de la propriété de métadonnées supérieure à 255 caractères." msgid "Metadata restore failed due to incompatible version" msgstr "" "Echec de restauration des métadonnées en raison d'une version incompatible" msgid "Metadata restore failed due to incompatible version." msgstr "" "Echec de restauration des métadonnées en raison d'une version incompatible." #, python-format msgid "Migrate volume %(src)s failed." msgstr "La migration du volume %(src)s a échoué." #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "La migration du volume entre le volume source %(src)s et le volume de " "destination %(dst)s a échoué." #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "" "La migration du numéro d'unité logique %s a été interrompue ou a rencontré " "une erreur." msgid "MirrorView/S enabler is not installed." msgstr "L'optimiseur MirrorView/S n'est pas installé." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "Module Python 'purestorage' manquant. Assurez-vous que la bibliothèque est " "installée et disponible." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "Paramètre de configuration SAN Fibre Channel - fc_fabric_names" msgid "Missing request body" msgstr "Corps de la demande manquant" msgid "Missing request body." msgstr "Corps de la demande manquant." #, python-format msgid "Missing required element '%s' in request body" msgstr "Elément requis manquant '%s' dans le corps de la demande" #, python-format msgid "Missing required element '%s' in request body." msgstr "L'élément requis '%s' est manquant dans le corps de demande." msgid "Missing required element 'consistencygroup' in request body." msgstr "" "L'élément requis 'consistencygroup' est manquant dans le corps de demande." msgid "Missing required element 'host' in request body." msgstr "L'élément requis 'host' est manquant dans le corps de la demande." msgid "Missing required element quota_class_set in request body." msgstr "Elément quota_class_set requis manquant dans le corps de demande." msgid "Missing required element snapshot in request body." msgstr "Elément snapshot requis manquant dans le corps de demande." msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "Plusieurs SerialNumbers ont été détectés alors qu'un seul était attendu pour " "cette opération. Modifiez votre fichier de configuration EMC." #, python-format msgid "Multiple copies of volume %s found." msgstr "Plusieurs copies du volume %s ont été trouvées." #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "Plusieurs correspondances ont été trouvées pour '%s'. Utilisez un ID pour " "être plus précis." msgid "Multiple profiles found." msgstr "Plusieurs profils ont été trouvés." msgid "Must implement a fallback schedule" msgstr "Doit mettre en oeuvre un calendrier de retrait" msgid "Must implement find_retype_host" msgstr "Doit implémenter find_retype_host" msgid "Must implement host_passes_filters" msgstr "Doit implémenter host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "Doit implémenter schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "Doit implémenter schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "Doit implémenter schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "Doit transmettre le wwpn ou l'hôte à lsfabric." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "Cette commande doit être utilisée en tant qu'administrateur de cloud (cloud " "admin) à l'aide d'un fichier Keystone policy.json qui permet à " "l'administrateur de cloud de répertorier et d'obtenir n'importe quel projet." msgid "Must specify 'connector'" msgstr "Vous devez spécifier 'connector'" msgid "Must specify 'connector'." msgstr "Vous devez spécifier le 'connector'" msgid "Must specify 'host'." msgstr "Vous devez spécifier l' 'host'" msgid "Must specify 'new_volume'" msgstr "Vous devez spécifier 'new_volume'" msgid "Must specify 'status'" msgstr "Vous devez spécifier 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "Indiquer 'status', 'attach_status' ou 'migration_status' pour la mise à jour." msgid "Must specify a valid attach status" msgstr "Indiquer un état de connexion valide" msgid "Must specify a valid migration status" msgstr "Indiquer un état de migration valide" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "Spécifiez une personnalité valide %(valid)s, la valeur '%(persona)s' n'est " "pas valide." #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "Spécifiez un type de mise à disposition valide %(valid)s, la valeur " "'%(prov)s' n'est pas valide." msgid "Must specify a valid status" msgstr "Indiquer un état valide" msgid "Must specify an ExtensionManager class" msgstr "Vous devez définir une classe ExtensionManager" msgid "Must specify bootable in request." msgstr "Doit indiquer bootable dans la demande." msgid "Must specify protection domain name or protection domain id." msgstr "Vous devez spécifier un nom ou un ID de domaine de protection." msgid "Must specify readonly in request." msgstr "Doit indiquer readonly dans la demande." msgid "Must specify snapshot source-name or source-id." msgstr "" "Vous devez spécifier la valeur source-name ou source-id de l'instantané." msgid "Must specify source-name or source-id." msgstr "Vous devez spécifier source-name ou source-id." msgid "Must specify storage pool name or id." msgstr "Vous devez spécifier le nom ou l'ID du pool de stockage." msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" "Vous devez spécifier des pools de stockage. Option : sio_storage_pools." msgid "Must supply a positive value for age" msgstr "Vous devez indiquer un entier positif pour 'age'" msgid "Must supply a positive, non-zero value for age" msgstr "Une valeur positive différente de zéro doit être indiquée pour age" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "La configuration NAS '%(name)s=%(value)s' n'est pas valide. Doit être " "'auto', 'true' ou 'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "Fichier de configuration NFS dans %(config)s n'existe pas" #, python-format msgid "NFS file %s not discovered." msgstr "Fichier NFS %s non reconnu." msgid "NFS file could not be discovered." msgstr "Découverte impossible du fichier NFS." msgid "NaElement name cannot be null." msgstr "Le nom NaElement ne peut pas avoir la valeur Null." msgid "Name" msgstr "Nom" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "Nom, description, add_volumes et remove_volumes ne peuvent pas être tous " "vides dans le corps de la demande." msgid "Need non-zero volume size" msgstr "Taille de volume non nulle nécessaire" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "Ni MSG_DENIED ni MSG_ACCEPTED : %r" msgid "NetApp Cinder Driver exception." msgstr "Exception de pilote NetApp Cinder." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "La nouvelle taille pour l'extension doit être supérieure à la taille " "actuelle. (Taille actuelle : %(size)s, taille après l'extension : " "%(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "La nouvelle taille doit être supérieure à la taille réelle du stockage du " "back-end. Taille réelle : %(oldsize)s, nouvelle taille : %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "La nouvelle taille du volume doit être indiquée comme entier." msgid "New volume type must be specified." msgstr "Le nouveau type de volume doit être indiqué." msgid "New volume type not specified in request_spec." msgstr "Nouveau type de volume non indiqué dans request_spec." #, python-format msgid "New volume_type same as original: %s." msgstr "Nouveau volume_type identique à l'original : %s." msgid "Nimble Cinder Driver exception" msgstr "Exception du pilote Nimble Cinder" msgid "No FC initiator can be added to host." msgstr "Aucun initiateur FC ne peut être ajouté à l'hôte." msgid "No FC port connected to fabric." msgstr "Aucun port FC n'est connecté à l'ensemble de noeuds (fabric)." msgid "No FCP targets found" msgstr "Aucune cible FCP détectée" msgid "No Port Group elements found in config file." msgstr "" "Aucun élément de groupe de ports n'a été trouvé dans le fichier de " "configuration." msgid "No VF ID is defined in the configuration file." msgstr "Aucun ID VF n'est défini dans le fichier de configuration." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "Il n'existe aucun portail iSCSI actif dans les IP iSCSI soumis" #, python-format msgid "No available service named %s" msgstr "Aucun service disponible dénommé %s" #, python-format msgid "No backup with id %s" msgstr "Aucune sauvegarde avec l'ID %s" msgid "No backups available to do an incremental backup." msgstr "Aucune sauvegarde disponible pour faire une sauvegarde incrémentielle." msgid "No big enough free disk" msgstr "Aucun disque libre de taille suffisante" #, python-format msgid "No cgsnapshot with id %s" msgstr "Aucun instantané de groupe de cohérence avec l'ID %s" msgid "No cinder entries in syslog!" msgstr "Aucune entrée Cinder dans syslog" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "Aucun LUN cloné nommé %s détecté dans le gestionnaire de fichiers" msgid "No config node found." msgstr "Aucun noeud de configuration n'a été trouvé." #, python-format msgid "No consistency group with id %s" msgstr "Aucun groupe de cohérence avec l'ID %s" #, python-format msgid "No element by given name %s." msgstr "Aucun élément du nom indiqué %s." msgid "No errors in logfiles!" msgstr "Aucune erreur dans le fichier de log !" #, python-format msgid "No file found with %s as backing file." msgstr "Aucun fichier trouvé avec %s comme fichier de sauvegarde." #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "Il ne reste aucun ID de numéro d'unité logique libre. Le nombre maximal de " "volumes pouvant être rattachés à l'hôte (%s) a été dépassé." msgid "No free disk" msgstr "Aucun disque libre" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "Aucun portail iSCSI valide détecté pour %s dans la liste fournie." #, python-format msgid "No good iscsi portals found for %s." msgstr "Aucun portail iSCSI valide détecté pour %s." #, python-format msgid "No host to create consistency group %s." msgstr "Aucun hôte pour créer le groupe de cohérence %s." msgid "No iSCSI-enabled ports on target array." msgstr "Aucun port activé pour iSCSI n'existe sur la matrice cible." msgid "No image_name was specified in request." msgstr "image_name non défini dans la demande." msgid "No initiator connected to fabric." msgstr "Aucun initiateur n'est connecté à l'ensemble de noeuds (fabric)." #, python-format msgid "No initiator group found for initiator %s" msgstr "Aucun groupe demandeur trouvé pour le demandeur %s" msgid "No initiators found, cannot proceed" msgstr "Aucun initiateur détecté : poursuite impossible" #, python-format msgid "No interface found on cluster for ip %s" msgstr "Aucune interface trouvée dans le cluster pour l'IP %s" msgid "No ip address found." msgstr "Aucune adresse IP n'a été trouvée." msgid "No iscsi auth groups were found in CloudByte." msgstr "Aucun groupe d'authentification iscsi n'a été trouvé dans CloudByte." msgid "No iscsi initiators were found in CloudByte." msgstr "Aucun initiateur iSCSI détecté dans CloudByte." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "Aucun service iSCSI détecté pour le volume CloudByte [%s]." msgid "No iscsi services found in CloudByte storage." msgstr "Aucun service iSCSI détecté dans le système de stockage CloudByte." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" "Aucun fichier de clés indiqué et impossible de charger la clé depuis " "%(cert)s %(e)s." msgid "No mounted Gluster shares found" msgstr "Aucun partage Gluster monté trouvé" msgid "No mounted NFS shares found" msgstr "Aucun partage NFS monté trouvé" msgid "No mounted SMBFS shares found." msgstr "Aucun partage SMBFS monté trouvé." msgid "No mounted Virtuozzo Storage shares found" msgstr "Aucun partage de stockage Virtuozzo monté n'a été trouvé" msgid "No mounted shares found" msgstr "Aucun partage monté trouvé" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "Aucun noeud n'a été détecté dans le groupe d'E-S %(gid)s pour le volume " "%(vol)s." msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "Aucun pool n'est disponible pour l'allocation de volumes. Vérifiez que " "l'option de configuration netapp_pool_name_search_pattern a été définie " "correctement." msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "Aucune réponse n'a été reçue de l'appel d'API CloudByte de liste " "d'utilisateur d'authentification iSCSI." msgid "No response was received from CloudByte storage list tsm API call." msgstr "" "Aucune réponse n'a été reçue de CloudByte pour l'affichage de l'appel API " "TSM du système de stockage." msgid "No response was received from CloudByte's list filesystem api call." msgstr "" "Aucune réponse n'a été reçue de CloudByte pour l'affichage de l'appel API du " "système de fichiers." msgid "No service VIP configured and no nexenta_client_address" msgstr "" "Aucune adresse IP virtuelle (VIP) de service configurée et aucun élément " "nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "Aucun instantané trouvé avec %s comme fichier de sauvegarde." #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "" "Aucune image instantanée n'a été trouvée dans le groupe d'instantanés %s." #, python-format msgid "No snapshots could be found on volume %s." msgstr "Aucun instantané n'a été trouvé sur le volume %s." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "Aucun instantané source n'a été fourni pour créer le groupe de cohérence %s." #, python-format msgid "No storage path found for export path %s" msgstr "Aucun chemin de stockage trouvé pour le chemin d'exportation %s" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "Aucune spécif. QoS du type %(specs_id)s." msgid "No suitable discovery ip found" msgstr "Aucune adresse IP correcte trouvée" #, python-format msgid "No support to restore backup version %s" msgstr "" "Aucune prise en charge pour la restauration de la version de sauvegarde %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Aucun ID cible trouvé pour le volume %(volume_id)s." msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "Aucun ID de numéro d'unité logique inutilisé n'est disponible sur l'hôte ; " "multiattach est activé, ce qui requiert que tous les ID de numéro d'unité " "logique soient uniques à travers le groupe d'hôtes complet." #, python-format msgid "No valid host was found. %(reason)s" msgstr "Hôte non valide trouvé. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "Aucun hôte valide pour le volume %(id)s de type %(type)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "Aucun disque virtuel (vdisk) avec l'UID indiqué par ref %s." #, python-format msgid "No views found for LUN: %s" msgstr "Aucune vue n'a été trouvée pour le numéro d'unité logique (LUN) : %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "Aucun volume dans le cluster contenant le serveur virtuel %(vserver)s et le " "chemin de jonction %(junction)s " msgid "No volume service(s) started successfully, terminating." msgstr "Aucun service de volume n'a pu démarrer, abandon." msgid "No volume was found at CloudByte storage." msgstr "Aucun volume n'a été détecté dans le système de stockage CloudByte." msgid "No volume_type should be provided when creating test replica." msgstr "" "Aucun volume_type ne doit être fourni lors de la création de la réplique de " "test." msgid "No volumes found in CloudByte storage." msgstr "Aucun volume détecté dans le système de stockage CloudByte." msgid "No weighed hosts available" msgstr "Aucun hôte pondéré n'est disponible" #, python-format msgid "Not a valid string: %s" msgstr "Chaîne non valide : %s" msgid "Not a valid value for NaElement." msgstr "Valeur non valide pour NaElement." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "" "Impossible de trouver un magasin de données approprié pour le volume : %s." msgid "Not an rbd snapshot" msgstr "N'est pas un instantané rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Non autorisé pour l'image %(image_id)s." msgid "Not authorized." msgstr "Non autorisé." #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "Espace insuffisant sur le système dorsal (%(backend)s)" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" "Espace de stockage insuffisant dans le partage ZFS pour exécuter cette " "opération." msgid "Not stored in rbd" msgstr "Non stocké dans rbd" msgid "Nova returned \"error\" status while creating snapshot." msgstr "Nova a renvoyé l'état \"erreur\" durant la création de l'instantané." msgid "Null response received from CloudByte's list filesystem." msgstr "" "Réponse nulle reçue de CloudByte pour l'affichage du système de fichiers." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" "Réponse Null reçue de la liste des groupes d'authentification iscsi de " "CloudByte." msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" "Réponse nulle reçue de CloudByte pour l'affichage des initiateurs iSCSI." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" "Réponse nulle reçue de CloudByte pour l'affichage du service iSCSI du volume." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "Réponse Null reçue lors de la création du volume [%s] sur le système de " "stockage CloudByte." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "Une réponse Null a été reçue lors de la suppression du volume [%s] du " "stockage CloudByte." #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "Réponse Null reçue lors de l'interrogation du travail [%(operation)s] basé " "[%(job)s] dans le stockage CloudByte." msgid "Number of retries if connection to ceph cluster failed." msgstr "" "Nombre de nouvelles tentatives si la connexion au cluster ceph a échoué." msgid "Object Count" msgstr "Nombre d'objets" msgid "Object Version" msgstr "Version de l'objet" msgid "Object is not a NetApp LUN." msgstr "L'objet n'est pas un numéro d'unité logique NetApp." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "Sur une opération Extend, erreur d'ajout au volume composite : " "%(volumename)s." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "L'un des services cinder-volume est trop ancien pour accepter une demande de " "ce type. Exécutez-vous des volumes cinder Liberty-Mitaka mixtes ?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "" "L'une des entrées requises de l'hôte, du port ou du schéma n'a pas été " "trouvée." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "Seules les demandes %(value)s %(verb)s peuvent être envoyées à %(uri)s " "toutes les %(unit_string)s." msgid "Only one limit can be set in a QoS spec." msgstr "Une seule limite peut être définie dans une spécification QoS." msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "Seuls les utilisateurs avec portée de jeton sur les parents immédiats ou les " "projets racine sont autoriser à visualiser ses quotas enfants." msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "Seuls les volumes gérés par OpenStack peuvent être non gérés." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" "Échec de l'opération avec statut = %(status)s. Vidage complet : %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "Opération non prise en charge : %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "L'option gpfs_images_dir n'est pas correctement définie." msgid "Option gpfs_images_share_mode is not set correctly." msgstr "L'option gpfs_images_share_mode n'est pas correctement définie." msgid "Option gpfs_mount_point_base is not set correctly." msgstr "L'option gpfs_mount_point_base n'est pas correctement définie." msgid "Option map (cls._map) is not defined." msgstr "Mappe d'options (cls._map) non définie." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "L'état d'origine de %(res)s %(prop)s doit être l'une des valeurs '%(vals)s'" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" "Remplacer le port HTTPS pour se connecter au serveur de l'API Blockbridge." #, python-format msgid "ParseException: %s" msgstr "ParseException : %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "Le nom de partition est None. Définissez smartpartition:partitionname dans " "la clé." msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "Mot de passe ou clé privée SSH obligatoire pour l'authentification : " "définissez l'option san_password ou san_private_key ." msgid "Path to REST server's certificate must be specified." msgstr "Le chemin d'accès au certificat du serveur REST doit être spécifié." #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "Créez d'avance le pool %(pool_list)s. " #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "Créez d'avance le niveau %(tier_levels)s dans le pool %(pool)s. " msgid "Please re-run cinder-manage as root." msgstr "Réexécutez cinder-manage avec des droits root." msgid "Please specify a name for QoS specs." msgstr "Veuillez indiquer un nom pour les spécifications QoS." #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "Pool %(poolNameInStr)s introuvable." #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "Le pool %s n'existe pas dans Nexenta Store Appliance" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "Le pool du volume ['host'] %(host)s est introuvable." #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "Le pool du volume ['host'] a échoué en renvoyant : %(ex)s." msgid "Pool is not available in the volume host field." msgstr "Le pool n'est pas disponible dans la zone d'hôte du volume." msgid "Pool is not available in the volume host fields." msgstr "Le pool n'est pas disponible dans les zones d'hôte du volume." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "Le pool nommé %(pool)s est introuvable dans le domaine %(domain)s." #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "Le pool nommé %(pool_name)s est introuvable dans le domaine %(domain_id)s." #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "Pool : %(poolName)s. n'est pas associé au groupe de serveurs d'application " "d'archivage pour la règle FAST %(fastPolicy)s." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName doit être dans le fichier %(fileName)s." #, python-format msgid "Pools %s does not exist" msgstr "Le pool %s n'existe pas" msgid "Pools name is not set." msgstr "Le nom de pools n'a pas été défini." #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "" "Statut de la copie primaire : %(status)s et synchronisation : %(sync)s." msgid "Project ID" msgstr "ID Projet" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" "Les quotas de projet ne sont pas correctement configurés pour les quotas " "imbriqués : %(reason)s." msgid "Protection Group not ready." msgstr "Le groupe de protection n'est pas prêt." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "Le protocole %(storage_protocol)s n'est pas pris en charge pour la famille " "de stockage %(storage_family)s." msgid "Provided backup record is missing an id" msgstr "L'ID est manquant dans l'enregistrement de sauvegarde fourni" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "Etat d'instantané %(provided)s fourni interdit pour l'instantané ayant pour " "état %(current)s." #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "Les informations du fournisseur sur le stockage CloudByte w.r.t n'ont pas " "été trouvées pour le volume OpenStack [%s]." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Echec du pilote Pure Storage Cinder : %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "Spécifications QoS %(specs_id)s déjà existantes." #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "Les spécifications QoS %(specs_id)s sont encore associées aux entités." #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "Configuration incorrecte de QoS. %s doit être > 0." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "La stratégie QoS doit indiquer une valeur pour IOTYPE et une autre " "spécification qos_specs, stratégie QoS : %(qos_policy)s." #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "La stratégie QoS doit indiquer pour IOTYPE : 0, 1 ou 2, stratégie QoS : " "%(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "Conflit entre les valeurs upper_limit et lower_limit dans la stratégie QoS, " "stratégie QoS : %(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "Spécif. QoS %(specs_id)s sans spécif. avec la clé %(specs_key)s." msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" "Les spécifications QoS ne sont pas prises en charge sur cette famille de " "stockage et version ONTAP." msgid "Qos specs still in use." msgstr "Spécifications QoS encore en service." msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "La requête par paramètre de service est obsolète. Utilisez le paramètre " "binaire à la place." msgid "Query resource pool error." msgstr "Erreur lors de l'interrogation du pool de ressources." #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" "La limite de quota %s doit être égale ou supérieure aux ressources " "existantes." #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Classe de quota %(class_name)s introuvable." msgid "Quota could not be found" msgstr "Quota introuvable" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota dépassé pour les ressources : %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Quota dépassé: code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Quota du projet %(project_id)s introuvable." #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "La limite de quota n'est pas valide pour le projet '%(proj)s' de la " "ressource '%(res)s' : la limite %(limit)d est inférieure à la valeur " "utilisée %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Réservation de quota %(uuid)s introuvable." #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Utilisation de quota pour le projet %(project_id)s introuvable." #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "Echec de l'op. diff RBD - (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "L'IP du serveur REST doit être spécifiée." msgid "REST server password must by specified." msgstr "Le mot de passe de l'utilisateur du serveur REST doit être spécifié." msgid "REST server username must by specified." msgstr "Le nom d'utilisateur du serveur REST doit être spécifié." msgid "RPC Version" msgstr "Version RPC" msgid "RPC server response is incomplete" msgstr "Réponse incomplète du serveur RPC" msgid "Raid did not have MCS Channel." msgstr "Raid n'avait pas de canal MCS." #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "Atteignez la limitation définie par l'option de configuration " "max_luns_per_storage_group. L'opération d'ajout de %(vol)s dans le groupe de " "stockage %(sg)s est rejetée." #, python-format msgid "Received error string: %s" msgstr "Chaîne d'erreur reçue : %s" msgid "Reference must be for an unmanaged snapshot." msgstr "La référence doit s'appliquer à un instantané non géré." msgid "Reference must be for an unmanaged virtual volume." msgstr "La référence doit s'appliquer à un volume virtuel non géré." msgid "Reference must be the name of an unmanaged snapshot." msgstr "La référence doit correspondre au nom d'un instantané non géré." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "La référence doit s'appliquer à un nom de volume virtuel non géré." msgid "Reference must contain either source-id or source-name element." msgstr "La référence doit contenir l'élément source-id ou source-name." msgid "Reference must contain either source-name or source-id element." msgstr "La référence doit contenir l'élément source-name ou source-id." msgid "Reference must contain source-id or source-name element." msgstr "La référence doit contenir l'élément source-id ou source-name." msgid "Reference must contain source-id or source-name key." msgstr "La référence doit contenir une clé source-id ou source-name." msgid "Reference must contain source-id or source-name." msgstr "La référence doit contenir source-id ou source-name." msgid "Reference must contain source-id." msgstr "La référence doit contenir source-id." msgid "Reference must contain source-name element." msgstr "La référence doit contenir l'élément source-name." msgid "Reference must contain source-name or source-id." msgstr "La référence doit contenir l'élément source-name ou source-id." msgid "Reference must contain source-name." msgstr "La référence doit contenir source-name." msgid "Reference to volume to be managed must contain source-name." msgstr "La référence au volume à gérer doit contenir l'élément source-name." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "La référence au volume %s à gérer doit contenir l'élément source-name." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "Refus de migrer l'ID volume %(id)s. Vérifiez votre configuration car la " "source et la destination indiquent le même groupe de volumes : %(name)s." msgid "Remote pool cannot be found." msgstr "Pool distant introuvable." msgid "Remove CHAP error." msgstr "Erreur lors de la suppression CHAP." msgid "Remove fc from host error." msgstr "Erreur lors de la suppression fc de l'hôte." msgid "Remove host from array error." msgstr "Erreur lors de la suppression de l'hôte de la matrice." msgid "Remove host from hostgroup error." msgstr "Erreur lors du retrait de l'hôte de hostgroup." msgid "Remove iscsi from host error." msgstr "Erreur lors de la suppression iscsi de l'hôte." msgid "Remove lun from QoS error." msgstr "Erreur lors de la suppression de numéro d'unité logique (lun) de QoS." msgid "Remove lun from cache error." msgstr "Erreur lors de la suppression du numéro d'unité logique du cache." msgid "Remove lun from partition error." msgstr "" "Erreur lors de la suppression de numéro d'unité logique de la partition." msgid "Remove port from port group error." msgstr "Erreur lors de la suppression d'un port du groupe de ports." msgid "Remove volume export failed." msgstr "La suppression de l'exportation du volume a échoué." msgid "Rename lun on array error." msgstr "" "Erreur lors de la modification du nom du numéro d'unité logique sur la " "matrice." msgid "Rename snapshot on array error." msgstr "Erreur lors de la modification du nom de l'instantané sur la matrice." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "La réplication %(name)s vers %(ssn)s a échoué." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "" "Fonction du service de réplication introuvable sur %(storageSystemName)s." #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "Service de réplication introuvable sur %(storageSystemName)s." msgid "Replication is not enabled" msgstr "Réplication non activée" msgid "Replication is not enabled for volume" msgstr "Réplication non activée pour le volume" msgid "Replication not allowed yet." msgstr "La réplication n'est pas autorisée pour le moment." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "L'état de la réplication du volume doit être active ou active-stopped, mais " "l'état actuel est : %s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "L'état de la réplication du volume doit être inactive, active-stopped ou " "error, mais l'état en cours est : %s" msgid "Request body and URI mismatch" msgstr "Corps et URI de demande discordants" msgid "Request body contains too many items" msgstr "Le corps de la demande contient un trop grand nombre d'éléments" msgid "Request body contains too many items." msgstr "Le corps de demande contient trop d'éléments." msgid "Request body empty" msgstr "Le corps de la demande est vide" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "La demande au cluster Datera a renvoyé un statut incorrect : %(status)s | " "%(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "La sauvegarde demandée dépasse le quota de sauvegarde autorisé en Go. " "%(requested)s Go demandés. Le quota est de %(quota)s Go et %(consumed)s Go " "ont été consommés." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "Le volume ou l'instantané demandé dépasse le quota %(name)s autorisé. " "%(requested)s Go demandés. Le quota est de %(quota)s Go et %(consumed)s Go " "ont été consommés." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "La taille de volume demandée, %(size)d, dépasse la limite maximale autorisée " "%(limit)d." msgid "Required configuration not found" msgstr "Configuration obligatoire non trouvée" #, python-format msgid "Required flag %s is not set" msgstr "L'indicateur obligatoire %s n'est pas défini" msgid "Requires an NaServer instance." msgstr "Nécessite une instance NaServer." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Réinitialisation du statut de la sauvegarde interrompue, le service de " "sauvegarde actuellement configuré [%(configured_service)s] ne correspond pas " "au service de sauvegarde utilisé pour créer cette sauvegarde " "[%(backup_service)s]." #, python-format msgid "Resizing clone %s failed." msgstr "Le redimensionnement du clone %s a échoué." msgid "Resizing image file failed." msgstr "Echec de redimensionnement du fichier image." msgid "Resource could not be found." msgstr "Ressource introuvable." msgid "Resource not ready." msgstr "Ressource non prête." #, python-format msgid "Response error - %s." msgstr "Erreur de réponse - %s." msgid "Response error - The storage-system is offline." msgstr "Erreur de réponse - Le système de stockage est hors ligne." #, python-format msgid "Response error code - %s." msgstr "Code d'erreur de réponse - %s." msgid "RestURL is not configured." msgstr "RestURL n'est pas configuré." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Restauration de la sauvegarde interrompue : état du volume attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "Restauration de la sauvegarde interrompue, le service de sauvegarde " "actuellement configuré [%(configured_service)s] ne correspond pas au service " "de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Restauration de la sauvegarde interrompue : état de la sauvegarde attendu " "%(expected_status)s, mais état %(actual_status)s obtenu." #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Nombre différent de volumes Solidfire récupérés pour les instantanés Cinder " "fournis. Récupérés : %(ret)s Souhaités : %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "Nombre différent de volumes Solidfire récupérés pour les volumes Cinder " "fournis. Récupérés : %(ret)s Souhaités : %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "Nombre de nouvelles tentatives dépassé pour la commande : %s" msgid "Retryable SolidFire Exception encountered" msgstr "Exception SolidFire réessayable rencontrée" msgid "Retype cannot change encryption requirements." msgstr "Une nouvelle saisie ne peut pas modifier des exigences de chiffrement." #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "Une nouvelle saisie ne peut pas modifier les spécifications QoS frontales " "pour le volume en cours d'utilisation : %s." msgid "Retype requires migration but is not allowed." msgstr "La nouvelle saisie nécessite la migration, mais n'est pas autorisée." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "Echec de l'annulation du volume %(volumeName)s. Veuillez contacter votre " "administrateur système pour rétablir votre volume au groupe de stockage par " "défaut pour la règle fast %(fastPolicyName)s." #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "Annulation de %(volumeName)s par sa suppression." #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "L'exécution de Cinder avec VMware vCenter d'une version inférieure à %s " "n'est pas autorisée." msgid "SAN product is not configured." msgstr "Le produit SAN n'est pas configuré." msgid "SAN protocol is not configured." msgstr "Le protocole SAN n'est pas configuré." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "Configuration SMBFS 'smbfs_oversub_ratio' non valide. Doit être > 0 : %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "Configuration SMBFS 'smbfs_used_ratio' non valide. Doit être > 0 et <= 1,0 : " "%s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "Le fichier de configuration SMBFS dans %(config)s n'existe pas." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Fichier de configuration SMBFS non défini (smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "Echec de la commande SSH après '%(total_attempts)r' tentatives : " "'%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "" "La commande SSH a échoué avec l'erreur : '%(err)s', Commande : '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Injection de commande SSH détectée : %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "Echec de la connexion SSH pour %(fabric)s avec l'erreur : %(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "Le certificat SSL a expiré le %s." #, python-format msgid "SSL error: %(arg)s." msgstr "Erreur SSL : %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Filtre hôte du planificateur %(filter_name)s introuvable." #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Peseur de l'hôte du planificateur %(weigher_name)s introuvable." #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "Statut de la copie secondaire : %(status)s et synchronisation : %(sync)s, " "progression de la synchronisation : %(progress)s%%." #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "L'ID secondaire ne peut pas être identique à la matrice principale, " "backend_id = %(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber doit être dans le fichier %(fileName)s." #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "Le service %(service)s sur l'hôte %(host)s a été retiré." #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "Service %(service_id)s introuvable sur l'hôte %(host)s." #, python-format msgid "Service %(service_id)s could not be found." msgstr "Le service %(service_id)s est introuvable." #, python-format msgid "Service %s not found." msgstr "Service %s non trouvé." msgid "Service is too old to fulfil this request." msgstr "Service trop ancien pour satisfaire cette demande." msgid "Service is unavailable at this time." msgstr "Le service est indisponible actuellement." msgid "Service not found." msgstr "Service introuvable." msgid "Set pair secondary access error." msgstr "Erreur lors de la définition d'accès secondaire à une paire." msgid "Sets thin provisioning." msgstr "Active l'allocation de ressources à la demande." msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "La définition du groupe de règles LUN QoS n'est pas prise en charge sur " "cette famille de stockage et version ONTAP." msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "La définition du groupe de règles qos du fichier n'est pas prise en charge " "sur cette famille de stockage et version ontap." #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "Le partage %s est ignoré car le format n'est pas valide. Le format valide " "est adresse:/exportation. Vérifiez les paramètres nas_ip et nas_share_path." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "Le partage dans %(dir)s n'est pas accessible en écriture par le service de " "volumes Cinder. Les opérations d'instantané ne seront pas prises en charge." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Erreur d'E-S Sheepdog I/O, commande concernée : \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "Les opérations Show ne peuvent porter que sur des projets dans la même " "hiérarchie de projet que celle définie pour la portée utilisateurs." msgid "Size" msgstr "Taille" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" "La taille du volume %s est introuvable. Impossible d'effectuer une " "suppression sécurisée." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "La taille est %(image_size)d Go et ne s'adapte pas dans un volume d'une " "taille de %(volume_size)d Go." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "La taille de l'image définie %(image_size)s Go est supérieure à la taille du " "volume %(volume_size)s Go." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "Il a été demandé de supprimer l'instantané %(id)s alors qu'on attendait " "qu'il soit disponible. Une demande simultanée a peut-être été effectuée." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "L'instantané %(id)s a été trouvé à l'état %(state)s et non pas à l'état " "'deleting' (suppression en cours) lors de la suppression en cascade." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "L'instantané %(snapshot_id)s est introuvable." #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "L'instantané %(snapshot_id)s n'a pas de métadonnées avec la clé " "%(metadata_key)s." #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "L'instantané %s ne doit pas faire partie d'un groupe de cohérence." #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "L'instantané '%s' n'existe pas sur la matrice." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "L'instantané ne peut pas être créé car le volume %(vol_id)s n'est pas " "disponible, état actuel du volume : %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "L'instantané ne peut pas être créé pendant la migration du volume." msgid "Snapshot of secondary replica is not allowed." msgstr "Instantané de réplique secondaire non autorisé." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "Instantané du volume non pris en charge à l'état : %s." #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "Ressource d'instantané \"%s\" non déployée ailleurs ?" msgid "Snapshot size must be multiple of 1 GB." msgstr "La taille de l'instantané doit être un multiple de 1 Go." #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Statut d'instantané %(cur)s interdit pour update_snapshot_status" msgid "Snapshot status must be \"available\" to clone." msgstr "" "L'état de l'instantané doit être \"disponible\" pour que le clonage soit " "possible." #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "L'instantané à sauvegarder doit être disponible, mais le statut actuel est " "\"%s\"." #, python-format msgid "Snapshot with id of %s could not be found." msgstr "L'instantané avec l'ID %s est introuvable." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "L'instantané='%(snap)s' n'existe pas dans l'image de base='%(base)s' - " "abandon sauvegarde incrémentielle" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "" "Les instantanés ne sont pas pris en charge pour ce format de volume : %s" #, python-format msgid "Socket error: %(arg)s." msgstr "Erreur de socket : %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "Exception du pilote SolidFire Cinder" msgid "Sort direction array size exceeds sort key array size." msgstr "La taille de tableau du sens de tri dépasse celle de la clé de tri." msgid "Source CG is empty. No consistency group will be created." msgstr "" "Le groupe de cohérence source est vide. Aucun groupe de cohérence ne sera " "créé." msgid "Source host details not found." msgstr "Détails de l'hôte source introuvables." msgid "Source volume device ID is required." msgstr "L'ID d'unité du volume source est requis." msgid "Source volume not mid-migration." msgstr "Le volume source n'est pas en cours de migration." #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "La source avec l'adresse IP/nom d'hôte %s est introuvable pour la migration " "de volume activée pour le back-end. Exécution de la migration par défaut." msgid "SpaceInfo returned byarray is invalid" msgstr "SpaceInfo renvoyé par la matrice est incorrect" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "L'hôte spécifié à mapper au volume %(vol)s est dans un groupe d'hôtes non " "pris en charge avec %(group)s." msgid "Specified logical volume does not exist." msgstr "Le volume logique spécifié n'existe pas." #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "Le groupe d'instantanés spécifié avec l'ID %s est introuvable." msgid "Specify a password or private_key" msgstr "Spécifiez un mot de passe ou private_key" msgid "Specify san_password or san_private_key" msgstr "Indiquez san_password ou san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "Indiquez le nom de type de volume, une description, is_public ou une " "combinaison de ces éléments." msgid "Split pair error." msgstr "Erreur lors du fractionnement d'une paire." msgid "Split replication failed." msgstr "L'opération de fractionnement de la réplication a échoué." msgid "Start LUNcopy error." msgstr "Erreur lors du lancement de LUNcopy." msgid "State" msgstr "Etat" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "L'état du noeud est incorrect. Etat actuel : %s." msgid "Status" msgstr "Statut" msgid "Stop snapshot error." msgstr "Erreur lors de l'arrêt de l'instantané." #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" "Service de configuration de stockage introuvable sur %(storageSystemName)s." #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "Service de gestion d'ID matériel de stockage introuvable sur " "%(storageSystemName)s." #, python-format msgid "Storage Profile %s not found." msgstr "Le profil de stockage %s est introuvable." #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "Service de réadressage de stockage introuvable sur %(storageSystemName)s." #, python-format msgid "Storage family %s is not supported." msgstr "La famille de produits de stockage %s n'est pas prise en charge." #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "" "Le groupe de stockage %(storageGroupName)s n'a pas été correctement supprimé" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "L'hôte de stockage %(svr)s n'a pas été détecté, vérifiez son nom" msgid "Storage pool is not configured." msgstr "Le pool de stockage n'est pas configuré." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "Profil de stockage %(storage_profile)s introuvable." msgid "Storage resource could not be found." msgstr "Ressource de stockage introuvable." msgid "Storage system id not set." msgstr "ID du système de stockage non défini." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "Système de stockage introuvable pour le pool %(poolNameInStr)s." msgid "Storage-assisted migration failed during manage volume." msgstr "" "La migration à l'aide du back-end de stockage a échoué lors de l'opération " "de gestion du volume." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "StorageSystem %(array)s introuvable." #, python-format msgid "String with params: %s" msgstr "Chaîne avec paramètres : %s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "La somme d'utilisation du quota enfant '%(sum)s' est supérieure au quota " "disponible '%(free)s' pour le projet '%(proj)s' de la ressource '%(res)s'. " "Réduisez la limite ou l'utilisation pour un ou plusieurs projets parmi les " "suivants : '%(child_ids)s'" msgid "Switch over pair error." msgstr "Erreur lors du basculement d'une paire." msgid "Sync pair error." msgstr "Erreur lors de la synchronisation de paire." msgid "Synchronizing secondary volume to primary failed." msgstr "" "La synchronisation du volume secondaire avec le volume principal a échoué." #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "" "Système %(id)s détecté avec un statut de mot de passe incorrect - " "%(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "Système %(id)s trouvé avec état erroné - %(status)s." msgid "System does not support compression." msgstr "Le système ne gère pas la compression." msgid "System is busy, retry operation." msgstr "Le système est occupé, recommencer l'opération." #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s] n'a pas été trouvé dans le stockage CloudByte pour le compte " "[%(account)s]." msgid "Target volume type is still in use." msgstr "Le type de volume cible est toujours utilisé." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "Discordance d'arborescence de modèles ; ajout de l'esclave %(slavetag)s au " "maître %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "L'ID titulaire %s n'existe pas." msgid "Terminate connection failed" msgstr "Echec de fin de la connexion" msgid "Terminate connection unable to connect to backend." msgstr "" "La fonction mettant fin à la connexion ne peut pas se connecter au back-end." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "La clôture de la connexion au volume a échoué : %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "La source %(type)s %(id)s à répliquer est introuvable." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "Les paramètres 'sort_key' et 'sort_dir' sont obsolètes et ne peuvent pas " "être utilisés avec le paramètre 'sort'." msgid "The EQL array has closed the connection." msgstr "La matrice EQL a fermé la connexion." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "Le système de fichiers GPFS %(fs)s ne se trouve pas au niveau d'édition " "requis. Niveau actuel : %(cur)s, niveau minimal requis : %(min)s." msgid "The IP Address was not found." msgstr "Adresse IP introuvable." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "Echec de la demande WebDAV. Cause : %(msg)s. Code retour/cause : %(code)s, " "volume source : %(src)s, volume de destination : %(dst)s, méthode : " "%(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "L'erreur ci-dessous peut indiquer que la base de données n'a pas été créée.\n" "Créez une base de données avec 'cinder-manage db sync' avant d'exécuter " "cette commande." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "La matrice ne prend pas en charge la valeur de pool de stockage pour SLO " "%(slo)s et la charge de travail %(workload)s. Vérifiez la matrice pour " "connaître les valeurs SLO et les valeurs de charge de travail valides." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "" "La réplication n'est pas activée sur le back-end sur lequel le volume est " "créé." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "Echec de la commande %(cmd)s. (ret : %(ret)s, stdout : %(out)s, stderr : " "%(err)s)" msgid "The copy should be primary or secondary" msgstr "La copie doit être primaire ou secondaire" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" "La création d'une unité logique n'a pas abouti. (unité logique : %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "La méthode décorée doit accepter un instantané ou un objet d'instantané" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "L'unité du chemin %(path)s est indisponible : %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" "L'heure de fin (%(end)s) doit être postérieure à l'heure de début " "(%(start)s)." #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec %s n'est pas valide." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "La spécification supplémentaire %(extraspec)s n'est pas valide." #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "Suppression impossible du volume basculé : %s" #, python-format msgid "The following elements are required: %s" msgstr "Les éléments suivants sont requis : %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "Les migrations suivantes contiennent un retour à une version antérieure, ce " "qui n'est pas autorisé : \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "Impossible d'ajouter le groupe d'hôtes ou la cible iSCSI." msgid "The host group or iSCSI target was not found." msgstr "Le groupe d'hôtes ou la cible iSCSI est introuvable." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " "reprenez la réplication sur les back-ends 3PAR." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " "reprenez la réplication sur les back-ends LeftHand." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " "reprenez la réplication sur les back-ends Storwize." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "L'utilisateur CHAP iSCSI %(user)s n'existe pas." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "Le numéro d'unité logique importé, %(lun_id)s, fait partie du pool " "%(lun_pool)s, lequel n'est pas géré par l'hôte %(host)s." msgid "The key cannot be None." msgstr "La clé ne peut pas être nulle." #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "L'unité logique pour le %(type)s %(id)s spécifié a déjà été supprimée." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "La méthode %(method)s a expiré. (valeur du délai d'expiration : %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "La méthode update_migrated_volume n'est pas implémentée." #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "Le montage %(mount_path)s n'est pas un volume Quobyte USP valide. Erreur : " "%(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "" "Paramètre du back-end de stockage. (groupe de configuration : " "%(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "" "La sauvegarde parent doit être disponible pour la sauvegarde incrémentielle." #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "L'instantané fourni '%s' n'est pas un instantané du volume fourni." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "La référence au volume sur le back-end doit être au format " "système_de_fichiers/nom_volume (nom_volume ne peut pas contenir de barre " "oblique '/')" #, python-format msgid "The remote retention count must be %s or less." msgstr "" "La valeur de la rétention à distance doit être inférieure ou égale à %s." msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "Le mode de réplication n'a pas été configuré correctement dans les " "spécifications extra_specs du type de volume. Si la valeur de replication:" "mode est periodic, replication:sync_period doit également être spécifié avec " "une valeur comprise entre 300 et 31622400 secondes." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" "La période de synchronisation de la réplication doit être d'au moins %s " "secondes." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "La taille demandée : %(requestedSize)s n'est pas identique à la taille " "résultante : %(resultSize)s." #, python-format msgid "The resource %(resource)s was not found." msgstr "Ressource %(resource)s introuvable." msgid "The results are invalid." msgstr "Le résultat n'est pas valide." #, python-format msgid "The retention count must be %s or less." msgstr "La valeur de rétention doit être inférieure ou égale à %s." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "L'instantané ne peut pas être créé alors que le volume est en mode " "maintenance." #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "Le volume source %s n'est pas dans le pool géré par l'hôte actuel." msgid "The source volume for this WebDAV operation not found." msgstr "Volume source introuvable pour cette opération WebDAV." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "Le type de volume source %(src)s est différent du type de volume de " "destination %(dest)s." #, python-format msgid "The source volume type '%s' is not available." msgstr "Le type de volume source %s n'est pas disponible." #, python-format msgid "The specified %(desc)s is busy." msgstr "La %(desc)s spécifiée est occupée." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "" "Le numéro d'unité logique (LUN) indiqué n'appartient pas au pool indiqué : " "%s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "L'unité logique spécifiée %(ldev)s ne peut pas être gérée. L'unité logique " "ne doit pas être en cours de mappage." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "L'unité logique spécifiée %(ldev)s ne peut pas être gérée. L'unité logique " "ne doit pas être appariée." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "L'unité logique spécifiée %(ldev)s ne peut pas être gérée. La taille de " "l'unité logique doit être exprimée en multiples du gigaoctet." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "L'unité logique spécifiée %(ldev)s ne peut pas être gérée. Le type de volume " "doit être DP-VOL." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "L'opération spécifiée n'est pas prise en charge. La taille du volume doit " "être identique à la source %(type)s. (volume : %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "Le disque virtuel spécifié est mappé avec un hôte." msgid "The specified volume is mapped to a host." msgstr "Le volume spécifié est mappé à un hôte." #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "Le mot de passe de la matrice de stockage pour %s est incorrect, mettez à " "jour le mot de passe configuré." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" "Le back-end de stockage peut être utilisé. (groupe de configuration : " "%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "L'unité de stockage ne prend pas en charge %(prot)s. Configurez l'unité pour " "la prise en charge de %(prot)s ou basculez sur un pilote utilisant un autre " "protocole." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "Le compte segmenté de %(memberCount)s est trop faible pour le volume : " "%(volumeName)s, avec la taille %(volumeSize)s." #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "Le type de métadonnées %(metadata_type)s pour le volume/l'instantané %(id)s " "n'est pas valide." #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "Le volume %(volume_id)s n'a pas pu être étendu. Le type de volume doit être " "Normal." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "Le volume %(volume_id)s ne peut pas être non géré. Son type doit être " "%(volume_type)s." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "Le volume %(volume_id)s est géré. (unité logique : %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "Le volume %(volume_id)s n'est plus géré. (unité logique : %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "Le volume %(volume_id)s à mapper est introuvable." msgid "The volume cannot accept transfer in maintenance mode." msgstr "Le volume ne peut pas accepter de transfert en mode maintenance." msgid "The volume cannot be attached in maintenance mode." msgstr "Le volume ne peut pas être rattaché en mode maintenance." msgid "The volume cannot be detached in maintenance mode." msgstr "Le volume ne peut pas être détaché en mode maintenance." msgid "The volume cannot be updated during maintenance." msgstr "Le volume ne peut pas être mis à jour en phase de maintenance." msgid "The volume connection cannot be initialized in maintenance mode." msgstr "" "La connexion de volume ne peut pas être initialisée en mode maintenance." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "Le pilote de volume a besoin du nom de demandeur iSCSI dans le connecteur." msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "Le volume est actuellement occupé sur le 3PAR et ne peut pas être supprimé " "pour le moment. Réessayez plus tard." msgid "The volume label is required as input." msgstr "Le label du volume est requis comme entrée." msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "" "Les métadonnées du volume ne peuvent pas être supprimées alors que le volume " "est en mode maintenance." msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "Les métadonnées du volume ne peuvent pas être mises à jour alors que le " "volume est en mode maintenance." #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "" "Il n'y a aucune ressource disponible utilisable. (ressource : %(resource)s)" msgid "There are no valid ESX hosts." msgstr "Il n'existe aucun hôte ESX valide." #, python-format msgid "There are no valid datastores attached to %s." msgstr "Aucun magasin de données valide connecté à %s." msgid "There are no valid datastores." msgstr "Il n'y a aucun magasin de données valide." #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "Il n'existe aucune désignation du paramètre %(param)s. Le système de " "stockage spécifié est essentiel pour gérer le volume." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" "Il n'existe aucune désignation de l'unité logique. L'unité logique spécifiée " "est essentielle pour gérer le volume." msgid "There is no metadata in DB object." msgstr "Aucune métadonnée n'est présente dans l'objet BD." #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "Aucun partage ne pouvant héberger l'hôte %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "Il n'y a aucun partage pouvant héberger %(volume_size)sG." #, python-format msgid "There is no such action: %s" msgstr "Aucune action de ce type : %s" msgid "There is no virtual disk device." msgstr "Il n'y a pas d'unité de disque virtuel." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "Une erreur s'est produite lors de l'ajout du volume au groupe de copie à " "distance : %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "Une erreur s'est produite lors de la création de cgsnapshot : %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "" "Une erreur s'est produite lors de la création du groupe de copie à " "distance : %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "" "Une erreur s'est produite lors de la définition de la période de " "synchronisation du groupe de copie à distance : %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Une erreur s'est produite lors de la configuration d'un groupe de copie à " "distance sur les matrices 3PAR : ('%s'). Le volume ne sera pas reconnu comme " "type de réplication." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "Une erreur s'est produite lors de la configuration d'une planification à " "distance sur les matrices LeftHand : ('%s'). Le volume ne sera pas reconnu " "en tant que type de réplication." #, python-format msgid "There was an error starting remote copy: %s." msgstr "" "Une erreur s'est produite lors du démarrage de la copie à distance : %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Aucun fichier de configuration Gluster n'est configuré (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "Aucun fichier de configuration NFS n'est configuré (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "Aucun volume Quobyte n'est configuré (%s). Exemple : quobyte:///" "" msgid "Thin provisioning not supported on this version of LVM." msgstr "" "L'allocation de ressources à la demande n'est pas prise en charge sur cette " "version du gestionnaire de volume logique (LVM)." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "L'optimiseur d'allocation de ressources n'est pas installé. Impossible de " "créer un volume fin" msgid "This driver does not support deleting in-use snapshots." msgstr "" "Ce pilote ne prend pas en charge la suppression d'instantanés en cours " "d'utilisation." msgid "This driver does not support snapshotting in-use volumes." msgstr "" "Ce pilote ne prend pas en charge la capture d'instantanés de volumes en " "cours d'utilisation." msgid "This request was rate-limited." msgstr "Cette demande était limitée par la fréquence." #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "Cette plateforme de système (%s) n'est pas prise en charge. Le pilote prend " "en charge les plateformes Win32." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "" "Service de règles de hiérarchisation introuvable pour %(storageSystemName)s." #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "Dépassement du délai d'attente de mise à jour Nova pour la création de " "l'instantané %s." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "Dépassement du délai d'attente de mise à jour Nova pour la suppression de " "l'instantané %(id)s." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "Valeur de délai d'attente (en secondes) utilisée lors de la connexion au " "cluster ceph. Si la valeur < 0, aucun délai d'attente n'est défini et la " "valeur librados par défaut est utilisée." #, python-format msgid "Timeout while calling %s " msgstr "Délai d'attente dépassé lors de l'appel de %s " #, python-format msgid "Timeout while requesting %(service)s API." msgstr "Dépassement du délai lors de la demande de l'API %(service)s." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "Délai d'attente dépassé lors de la demande de fonctionnalités du back-end " "%(service)s." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "Transfert %(transfer_id)s introuvable." #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "Transfert %(transfer_id)s : ID volume %(volume_id)s dans un état inattendu " "%(status)s, awaiting-transfer attendu" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "Tentative d'importation des métadonnées de sauvegarde depuis l'ID " "%(meta_id)s vers la sauvegarde %(id)s." #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "Tâche de réglage du volume arrêtée avant la fin : volume_name=" "%(volume_name)s, task-status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "Type %(type_id)s déjà associé à d'autres spécifications QoS : " "%(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "" "La modification d'accès de type n'est pas applicable aux types de volume " "publics." msgid "Type cannot be converted into NaElement." msgstr "Impossible de convertir le type en NaElement." #, python-format msgid "TypeError: %s" msgstr "TypeError : %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" "Les UUID %s sont tous les deux dans la liste de volumes à ajouter et à " "supprimer." #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "Impossible d'accéder au back-end Storwize pour le volume %s." msgid "Unable to access the backend storage via file handle." msgstr "Impossible d'accéder au stockage dorsal via le descripteur de fichier." #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" "Impossible d'accéder au stockage d'arrière plan par le chemin %(path)s." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" "Impossible d'ajouter l'hôte Cinder aux apphosts pour l'espace %(space)s" #, python-format msgid "Unable to complete failover of %s." msgstr "Impossible d'effectuer le basculement de %s." msgid "Unable to connect or find connection to host" msgstr "Impossible d'établir ou de trouver une connexion à l'hôte" msgid "Unable to create Barbican Client without project_id." msgstr "Impossible de créer le client Barbican sans project_id." #, python-format msgid "Unable to create consistency group %s" msgstr "Impossible de créer le groupe de cohérence %s" msgid "Unable to create lock. Coordination backend not started." msgstr "" "Impossible de créer un verrou. Le back-end de coordination n'est pas démarré." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "Impossible de créer ou d'obtenir le groupe de stockage par défaut pour la " "règle FAST : %(fastPolicyName)s." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "Impossible de créer un clone de réplication pour le volume %s." #, python-format msgid "Unable to create the relationship for %s." msgstr "Impossible de créer la relation pour %s." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "Impossible de créer le volume %(name)s depuis %(snap)s." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "Impossible de créer le volume %(name)s depuis %(vol)s." #, python-format msgid "Unable to create volume %s" msgstr "Impossible de créer le volume %s" msgid "Unable to create volume. Backend down." msgstr "Impossible de créer le volume. Le back-end est arrêté." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "Impossible de supprimer l'instantané de groupe de cohérence %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "Impossible de supprimer l'instantané %(id)s, état : %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "Impossible de supprimer la stratégie d'instantané sur le volume %s." #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" "Impossible de supprimer le volume cible pour le volume %(vol)s. Exception : " "%(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "Impossible de déconnecter le volume. Le statut du volume doit être 'in-use' " "et le statut attach_status doit être 'attached' pour le déconnecter." #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "Impossible de déterminer secondary_array à partir de la matrice secondaire " "fournie : %(secondary)s." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" "Impossible de déterminer le nom de l'instantané dans Purity pour " "l'instantané %(id)s." msgid "Unable to determine system id." msgstr "Impossible de déterminer l'ID système." msgid "Unable to determine system name." msgstr "Impossible de déterminer le nom du système." #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Impossible d'effectuer des opérations d'instantané avec la version d'API " "REST de Purity %(api_version)s, ceci nécessite %(required_versions)s." #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Impossible d'effectuer de réplication avec l'API REST Purity version " "%(api_version)s, l'une des %(required_versions)s est nécessaire." msgid "Unable to enable replication and snapcopy at the same time." msgstr "" "Impossible d'activer la réplication et la fonction snapcopy en même temps." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Impossible d'établir un partenariat avec le cluster Storwize %s." #, python-format msgid "Unable to extend volume %s" msgstr "Impossible d'étendre le volume %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "Impossible de faire basculer le volume %(id)s sur le back-end secondaire, " "car la relation de réplication ne peut pas être basculée : %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "Impossible de rebasculer sur \"default\", cette opération n'est possible " "qu'après la fin d'un basculement." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "Basculement impossible vers la cible de réplication : %(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "" "Impossible d'extraire des informations de connexion depuis le back-end." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" "Impossible d'extraire des informations de connexion depuis le back-end : " "%(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "La référence Purity avec name=%s est introuvable " #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "Impossible de trouver le groupe de volumes : %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "Cible de basculement introuvable, aucune cible secondaire n'a été configurée." msgid "Unable to find iSCSI mappings." msgstr "Impossible de trouver des mappages iSCSI." #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file introuvable: %s" msgid "Unable to find system log file!" msgstr "Fichier de trace système non trouvé!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "Impossible de trouver un instantané pg viable à utiliser pour le basculement " "sur la matrice secondaire sélectionnée : %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" "Impossible de trouver une matrice secondaire viable à partir des cibles " "configurées : %(targets)s." #, python-format msgid "Unable to find volume %s" msgstr "Impossible de trouver le volume %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "Impossible d'obtenir l'unité par bloc pour le fichier '%s'" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "Impossible d'obtenir les informations de configuration nécessaires pour " "créer un volume : %(errorMessage)s." msgid "Unable to get corresponding record for pool." msgstr "Impossible d'obtenir l'enregistrement correspondant pour le pool." #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "Impossible d'obtenir des informations sur l'espace %(space)s, vérifiez que " "le cluster est en cours d'exécution et connecté." msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "Impossible d'obtenir la liste des adresses IP sur cet hôte, vérifiez les " "autorisations et le réseau." msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "Impossible d'obtenir la liste des membres du domaine, vérifiez que le " "cluster est en cours d'exécution." msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "Impossible d'obtenir la liste des espaces pour en créer un nouveau. Vérifiez " "que le cluster est en cours d'exécution. " #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "Impossible d'obtenir des statistiques pour backend_name : %s" msgid "Unable to get storage volume from job." msgstr "Impossible d'extraire le volume de stockage à partir de la tâche." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "Impossible d'obtenir les noeuds finals pour hardwareId " "%(hardwareIdInstance)s." msgid "Unable to get the name of the masking view." msgstr "Impossible d'obtenir le nom de la vue de masquage." msgid "Unable to get the name of the portgroup." msgstr "Impossible d'obtenir le nom de portgroup." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "Impossible d'obtenir la relation de réplication pour le volume %s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "Impossible d'importer le volume %(deviceId)s dans Cinder. Il s'agit du " "volume source de la session de réplication %(sync)s." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "Impossible d'importer le volume %(deviceId)s dans Cinder. Le volume externe " "n'est pas dans le pool géré par l'hôte Cinder actuel." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "Impossible d'importer le volume %(deviceId)s dans Cinder. Le volume est en " "vue de masquage %(mv)s." #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "" "Impossible de charger l'autorité de certification depuis %(cert)s %(e)s." #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "Impossible de charger le certificat depuis %(cert)s %(e)s." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "Impossible de charger la clé depuis %(cert)s %(e)s." #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "Compte %(account_name)s introuvable sur l'unité Solidfire" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "" "Impossible de localiser une machine virtuelle de stockage (SVM) qui gère " "l'adresse IP '%s'" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "Impossible de localiser les profils de relecture indiqués %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "Impossible de gérer le volume existant. Le volume %(volume_ref)s est déjà " "géré." #, python-format msgid "Unable to manage volume %s" msgstr "Impossible de gérer le volume %s" msgid "Unable to map volume" msgstr "Impossible de mapper le volume" msgid "Unable to map volume." msgstr "Impossible de mapper le volume." msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" "Impossible d'effectuer une analyse syntaxique de la demande XML. Fournissez " "le code XML dans un format correct." msgid "Unable to parse attributes." msgstr "Impossible d'analyser les attributs." #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "Impossible de promouvoir la réplique en réplique primaire pour le volume %s. " "Aucune copie secondaire disponible." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Impossible de réutiliser un hôte qui n'est pas géré par Cinder avec " "use_chap_auth=True," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "Impossible de réutiliser l'hôte avec des données d'identification CHAP " "inconnues configurées." #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "Impossible de renommer le volume %(existing)s en %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "Impossible de récupérer le groupe d'instantanés avec l'ID %s." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "Impossible de ressaisir %(specname)s, réception attendue des valeurs en " "cours et demandées %(spectype)s. Valeur reçue : %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "Confirmation impossible : une copie du volume %s existe. La confirmation " "dépasserait la limite de 2 copies." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "Impossible de modifier le type : l'action en cours a besoin du paramètre " "volume_copy. Elle est interdite lorsque le nouveau type est une réplication. " "Volume = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "Impossible de configurer la réplication en mode miroir pour %(vol)s. " "Exception : %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "Impossible de créer un instantané du groupe de cohérence %s" msgid "Unable to terminate volume connection from backend." msgstr "Impossible de terminer la connexion au volume depuis le back-end." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "Impossible de terminer la connexion au volume : %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "Impossible de mettre à jour le groupe de cohérence %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "Impossible de mettre à jour le type en raison d'un état incorrect : " "%(vol_status)s sur le volume : %(vol_id)s. L'état du volume doit être " "Disponible ou En cours d'utilisation." #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "Impossible de vérifier le groupe de demandeurs : %(igGroupName)s dans la vue " "de masquage %(maskingViewName)s. " msgid "Unacceptable parameters." msgstr "Paramètres inacceptables." #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "Statut de mappage %(status)s inattendu pour le mappage %(id)s. Attributs : " "%(attr)s." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "Réponse CLI imprévue : non concordance d'en-tête/ligne. en-tête : " "%(header)s, ligne : %(row)s." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "Statut de mappage inattendu %(status)s pour le mappage %(id)s. Attributs : " "%(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "Sortie inattendue. [%(expected)s] attendu mais [%(output)s] reçu" msgid "Unexpected response from Nimble API" msgstr "Code de réponse inattendu de l'API Nimble" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Réponse inattendue de l'API Tegile IntelliFlash" msgid "Unexpected status code" msgstr "Code de statut inattendu" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "Code de statut inattendu provenant du commutateur %(switch_id)s avec le " "protocole %(protocol)s pour l'URL %(page)s. Erreur : %(error)s" msgid "Unknown Gluster exception" msgstr "Exception Gluster inconnue" msgid "Unknown NFS exception" msgstr "Exception NFS inconnue" msgid "Unknown RemoteFS exception" msgstr "Exception RemoteFS inconnue" msgid "Unknown SMBFS exception." msgstr "Exception SMBFS inconnue." msgid "Unknown Virtuozzo Storage exception" msgstr "Exception Virtuozzo Storage inconnue" msgid "Unknown action" msgstr "Action inconnu" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Inconnu si le volume : %s à gérer est déjà géré par Cinder. Abandon de " "l'opération gérer le volume. Ajoutez la propriété de schéma personnalisé " "'cinder_managed' au volume et définissez sa valeur sur False. Autrement, " "définissez la valeur de la stratégie de configuration cinder " "'zfssa_manage_policy' sur 'loose' pour supprimer cette restriction." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "Inconnu si le volume : %s à gérer est déjà géré par Cinder. Abandon de " "l'opération gérer le volume. Ajoutez la propriété de schéma personnalisé " "'cinder_managed' au volume et définissez sa valeur sur False. Autrement, " "définissez la valeur de la stratégie de configuration cinder " "'zfssa_manage_policy' sur 'loose' pour supprimer cette restriction." #, python-format msgid "Unknown operation %s." msgstr "Opération inconnue %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "Commande inconnue ou non prise en charge %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "Protocole inconnu : %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Ressources de quota inconnues %(unknown)s." msgid "Unknown service" msgstr "Service inconnu" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'." msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "Les options unmanage et cascade delete s'excluent mutuellement." msgid "Unmanage volume not implemented." msgstr "" "La fonction consistant à ne plus gérer un volume (unmanage volume) n'est pas " "implémentée." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "Arrêter de gérer des instantanés dans des volumes basculés ('failed-over') " "n'est pas autorisé." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "Arrêter de gérer des instantanés dans des volumes basculés n'est pas " "autorisé." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "Mot clé QOS non reconnu : \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "Format de sauvegarde non identifié : %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valeur read_deleted non reconnue '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "Options gcs non définies : %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "Echec d'iscsiadm. Exception : %(ex)s. " msgid "Unsupported Clustered Data ONTAP version." msgstr "Version de Clustered Data ONTAP non prise en charge." msgid "Unsupported Content-Type" msgstr "Type de contenu non pris en charge" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "Version de Data ONTAP non prise en charge. Data ONTAP versions 7.3.1 et " "supérieures sont prises en charge." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "Version des métadonnées de sauvegarde non prise en charge (%s)" msgid "Unsupported backup metadata version requested" msgstr "Version des métadonnées de sauvegarde non prise en charge demandée" msgid "Unsupported backup verify driver" msgstr "Sauvegarde non prise en charge ; vérifiez le pilote" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "Microprogramme non pris en charge sur le commutateur %s. Assurez-vous que le " "commutateur exécute le microprogramme 6.4 ou version supérieure" #, python-format msgid "Unsupported volume format: %s " msgstr "Format de volume non pris en charge : %s " msgid "Update QoS policy error." msgstr "Erreur lors de la mise à jour de la stratégie QoS." msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "Les opérations de mise à jour ou de suppression de quota ne peuvent être " "effectuées que par un administrateur de parent immédiat ou par " "l'administrateur CLOUD." msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "Les opérations de mise à jour ou de suppression de quota ne peuvent porter " "que sur des projets dans la même hiérarchie de projet que celle définie pour " "la portée utilisateurs." msgid "Update list, doesn't include volume_id" msgstr "La liste de mise à jour ne comprend pas volume_id" msgid "Updated At" msgstr "Mis à jour à" msgid "Upload to glance of attached volume is not supported." msgstr "" "Le téléchargement vers Glance du volume connecté n'est pas pris en charge." msgid "Use ALUA to associate initiator to host error." msgstr "" "Erreur lors de l'utilisation de ALUA pour associer l'initiateur à l'hôte. " msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "Erreur lors de l'utilisation de CHAP pour associer l'initiateur à l'hôte. " "Vérifiez le nom d'utilisateur et le mot de passe CHAP." msgid "User ID" msgstr "ID Utilisateur" msgid "User does not have admin privileges" msgstr "L’utilisateur n'a pas les privilèges administrateur" msgid "User is not authorized to use key manager." msgstr "L'utilisateur n'est pas autorisé à utiliser le gestionnaire de clés." msgid "User not authorized to perform WebDAV operations." msgstr "Utilisateur non autorisé à exécuter des opérations WebDAV." msgid "UserName is not configured." msgstr "UserName n'est pas configuré." msgid "UserPassword is not configured." msgstr "UserPassword n'est pas configuré." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "Rétromigration vers V2 - Le volume appartient à un autre groupe de stockage " "à part le groupe de stockage par défaut." msgid "V2 rollback, volume is not in any storage group." msgstr "Rétromigration vers V2, le volume n'est dans aucun groupe de stockage." msgid "V3 rollback" msgstr "Rétromigration V3" msgid "VF is not enabled." msgstr "VF n'est pas activé." #, python-format msgid "VV Set %s does not exist." msgstr "VV Set %s inexistant." #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "Client valide de spécifications QoS : %s" #, python-format msgid "Valid control location are: %s" msgstr "Emplacement de contrôle valide : %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "La validation de la connexion de volume a échoué (erreur : %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" "La valeur \"%(value)s\" n'est pas valide pour l'option de configuration " "\"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "La valeur %(param)s pour %(param_string)s n'est pas de type booléen." msgid "Value required for 'scality_sofs_config'" msgstr "Valeur requise pour 'scality_sofs_config'" #, python-format msgid "ValueError: %s" msgstr "ValueError : %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "" "Le disque virtuel %(name)s n'est pas impliqué dans le mappage %(src)s -> " "%(tgt)s." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "La version %(req_ver)s n'est pas prise en charge par l'API. Minimum : " "%(min_ver)s et maximum : %(max_ver)s." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s ne parvient pas à récupérer l'objet par ID." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "" "VersionedObject %s ne prend pas en charge la mise à jour conditionnelle." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "Le volume virtuel '%s' n'existe pas dans la matrice." #, python-format msgid "Vol copy job for dest %s failed." msgstr "Echec du travail de copie du vol. pour la dest %s." #, python-format msgid "Volume %(deviceID)s not found." msgstr "Volume %(deviceID)s introuvable." #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "Volume %(name)s introuvable dans la matrice. Impossible de déterminer si des " "volumes sont mappés." #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "Le volume %(name)s a été créé dans VNX, mais à l'état %(state)s." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "Le volume %(vol)s n'a pas pu être créé dans le pool %(pool)s." #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "Le volume %(vol1)s ne correspond pas à snapshot.volume_id %(vol2)s." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "Le volume %(vol_id)s doit être dans un état Disponible ou En cours " "d'utilisation, mais l'état en cours est : %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "L'état du volume %(vol_id)s doit être Disponible pour extension, mais l'état " "en cours est : %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "L'état du volume %(vol_id)s doit être Disponible pour la mise à jour de " "l'indicateur readonly, mais l'état actuel est : %(vol_status)s." #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "Le volume %(vol_id)s doit être dans un état de disponibilité, mais l'état en " "cours est : %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Le volume %(volume_id)s est introuvable." #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "Le volume %(volume_id)s n'a aucune métadonnée d'administration avec la clé " "%(metadata_key)s." #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "Le volume %(volume_id)s ne comporte pas de métadonnées avec la clé " "%(metadata_key)s." #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "Le volume %(volume_id)s est actuellement mappé à un groupe d'hôtes non pris " "en charge : %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "Le volume %(volume_id)s n'est pas actuellement mappé à l'hôte %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "Le volume %(volume_id)s est toujours attaché. Détachez-le préalablement." #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "Erreur de réplication du volume %(volume_id)s : %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "Le volume %(volume_name)s est occupé." #, python-format msgid "Volume %s could not be created from source volume." msgstr "Le volume %s n'a pas pu être créé à partir du volume source." #, python-format msgid "Volume %s could not be created on shares." msgstr "Le volume %s n'a pas pu être créé sur les partages." #, python-format msgid "Volume %s could not be created." msgstr "Le volume %s n'a pas pu être créé." #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "Le volume %s n'existe pas dans Nexenta SA" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "Le volume %s n'existe pas dans Nexenta Store Appliance" #, python-format msgid "Volume %s does not exist on the array." msgstr "Le volume %s n'existe pas sur la matrice." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" "provider_location n'a pas été spécifié pour le volume %s. Il sera ignoré." #, python-format msgid "Volume %s doesn't exist on array." msgstr "Le volume %s n'existe pas sur la matrice." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "Le volume %s n'existe pas sur le back-end ZFSSA." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "Le volume %s est déjà géré par OpenStack." #, python-format msgid "Volume %s is already part of an active migration." msgstr "Le volume %s fait déjà partie d'une migration active." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "Le volume %s n'est pas du type répliqué. Ce volume doit être d'un type de " "volume avec la spécification supplémentaire (extra spec) définie avec ' " "True' pour prendre en charge les actions de réplication." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "Le volume %s est en ligne (online). Définissez le volume comme étant hors " "ligne (offline) pour le gérer avec OpenStack." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "Le volume %s ne doit pas être en cours de migration ou connecté, il ne doit " "pas appartenir à un groupe de cohérence ou avoir des instantanés." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "Le volume %s ne doit pas faire partie d'un groupe de cohérence." #, python-format msgid "Volume %s must not be replicated." msgstr "Le volume %s ne doit pas être répliqué." #, python-format msgid "Volume %s must not have snapshots." msgstr "Le volume %s ne doit pas avoir d'instantanés." #, python-format msgid "Volume %s not found." msgstr "Le volume %s est introuvable." #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "Volume %s : Erreur lors de la tentative d'extension du volume" #, python-format msgid "Volume (%s) already exists on array" msgstr "Le volume (%s) existe déjà dans la matrice" #, python-format msgid "Volume (%s) already exists on array." msgstr "Le volume (%s) existe déjà dans la matrice." #, python-format msgid "Volume Group %s does not exist" msgstr "Le groupe de volumes %s n'existe pas" #, python-format msgid "Volume Type %(id)s already exists." msgstr "Le type de volume %(id)s existe déjà." #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "Le type de volume %(type_id)s n'a pas de spécifications supplémentaires avec " "la clé %(id)s." #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "Suppression du type de volume %(volume_type_id)s interdite avec les volumes " "de ce type." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "Le type de volume %(volume_type_id)s ne comporte pas de spécs supp avec la " "clé %(extra_specs_key)s." msgid "Volume Type id must not be None." msgstr "L'ID de type de volume ne peut pas être None." #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "Volume [%(cb_vol)s] introuvable sur le stockage CloudByte correspondant au " "volume OpenStack [%(ops_vol)s]." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "Volume [%s] introuvable dans le système de stockage CloudByte." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "" "La pièce jointe du volume est introuvable avec le filtre : %(filter)s ." #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "La configuration de back-end du volume n'est pas valide : %(reason)s" msgid "Volume by this name already exists" msgstr "Un volume ayant ce nom existe déjà" msgid "Volume cannot be restored since it contains snapshots." msgstr "Impossible de restaurer le volume car il contient des instantanés." msgid "Volume create failed while extracting volume ref." msgstr "" "La création du volume a échoué lors de l'extraction de la référence du " "volume (volume ref)." #, python-format msgid "Volume device file path %s does not exist." msgstr "" "Le chemin d'accès du fichier du périphérique de volume %s n'existe pas." #, python-format msgid "Volume device not found at %(device)s." msgstr "Périphérique de volume introuvable à %(device)s." #, python-format msgid "Volume driver %s not initialized." msgstr "Pilote de volume %s non initialisé." msgid "Volume driver not ready." msgstr "Pilote de volume non prêt." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "Le pilote de volume a signalé une erreur : %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "Le volume contient un instantané temporaire qui ne peut pas être supprimé " "pour l'instant." msgid "Volume has children and cannot be deleted!" msgstr "Ce volume a des enfants et ne peut pas être supprimé !" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" "Le volume du groupe de cohérence %s est connecté. Vous devez d'abord le " "déconnecter." msgid "Volume in consistency group still has dependent snapshots." msgstr "" "Le volume du groupe de cohérence contient encore des instantanés dépendants." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "Le volume est attaché à un serveur. (%s)" msgid "Volume is in-use." msgstr "Volume en cours d'utilisation." msgid "Volume is not available." msgstr "Le volume n'est pas disponible." msgid "Volume is not local to this node" msgstr "Le volume n'est pas local sur ce noeud" msgid "Volume is not local to this node." msgstr "Le volume n'est pas local sur ce noeud." msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "La sauvegarde des métadonnées de volume est demandée mais ce pilote ne prend " "pas en charge cette fonction pour le moment." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "Échec de la migration du volume : %(reason)s" msgid "Volume must be available" msgstr "Le volume doit être disponible." msgid "Volume must be in the same availability zone as the snapshot" msgstr "" "Le volume doit être dans la même zone de disponibilité que l'instantané" msgid "Volume must be in the same availability zone as the source volume" msgstr "" "Le volume doit être dans la même zone de disponibilité que le volume source" msgid "Volume must have a volume type" msgstr "Le volume doit comporter un type de volume" msgid "Volume must not be part of a consistency group." msgstr "Le volume ne doit pas faire partie d'un groupe de cohérence." msgid "Volume must not be replicated." msgstr "Le volume ne doit pas être répliqué." msgid "Volume must not have snapshots." msgstr "Le volume ne doit pas avoir d'instantanés." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "Volume introuvable pour l'instance %(instance_id)s." msgid "Volume not found on configured storage backend." msgstr "Volume introuvable sur le back-end de stockage configuré." msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "Volume introuvable sur le back-end de stockage configuré. Si le nom de votre " "volume contient une barre oblique \"/\", renommez-le puis renouvelez " "l'opération." msgid "Volume not found on configured storage pools." msgstr "Volume introuvable sur les pools de stockage configurés." msgid "Volume not found." msgstr "Volume introuvable." msgid "Volume not unique." msgstr "Le volume n'est pas unique." msgid "Volume not yet assigned to host." msgstr "Volume non encore affecté à l'hôte." msgid "Volume reference must contain source-name element." msgstr "La référence du volume doit contenir l'élément source-name." #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "Réplication du volume pour %(volume_id)s introuvable." #, python-format msgid "Volume service %s failed to start." msgstr "Le démarrage du service de volume %s n'a pas abouti." msgid "Volume should have agent-type set as None." msgstr "Le volume doit avoir la valeur agent-type définie à None." #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "La taille %(volume_size)s Go du volume ne peut pas être inférieure à la " "taille de l'image minDisk %(min_disk)s Go." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "La taille de volume '%(size)s' doit être un entier ou supérieure à 0." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "La taille de volume '%(size)s' Go ne peut pas être inférieure à la taille de " "volume d'origine %(source_size)s Go. Elle doit être supérieure ou égale à " "la taille du volume d'origine." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "La taille de volume '%(size)s' Go ne peut pas être inférieure à la taille " "d'instantané %(snap_size)s Go. Elle doit être supérieure ou égale à la " "taille de l'instantané d'origine." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "Taille de volume accrue depuis la dernière sauvegarde. Effectuez une " "sauvegarde intégrale." msgid "Volume size must be a multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." msgid "Volume size must be multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." msgid "Volume size must multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" "Le volume doit être dans un état de disponibilité, mais l'état en cours est :" "%s" msgid "Volume status is in-use." msgstr "L'état de volume est En cours d'utilisation." #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "L'état du volume doit être \"disponible\" ou \"en usage\" pour l'instantané. " "(état en cours : %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "L'état du volume doit être \"disponible\" ou \"en usage\"." #, python-format msgid "Volume status must be %s to reserve." msgstr "Le statut du volume doit être %s pour l'opération de réservation." msgid "Volume status must be 'available'." msgstr "L'état du volume doit être 'disponible'." msgid "Volume to Initiator Group mapping already exists" msgstr "Le volume pour le mappage du groupe initiateur existe déjà" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "Le volume à sauvegarder doit être disponible ou en cours d'utilisation, mais " "son statut actuel indique \"%s\"." msgid "Volume to be restored to must be available" msgstr "Le volume à restaurer doit être disponible" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Le type de volume %(volume_type_id)s est introuvable." #, python-format msgid "Volume type ID '%s' is invalid." msgstr "L'ID de volume '%s' n'est pas valide." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "L'accès de type volume pour la combinaison %(volume_type_id)s / " "%(project_id)s existe déjà." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "Accès de type volume introuvable pour la combinaison %(volume_type_id)s / " "%(project_id)s." #, python-format msgid "Volume type does not match for share %s." msgstr "Le type de volume ne correspond pas pour le partage %s." #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "Le chiffrement du type de volume existe déjà pour le type %(type_id)s." #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "" "Le chiffrement du type de volume pour le type %(type_id)s n'existe pas." msgid "Volume type name can not be empty." msgstr "Le nom de type de volume ne peut pas être vide." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Le type de volume portant le nom %(volume_type_name)s est introuvable." #, python-format msgid "Volume with volume id %s does not exist." msgstr "Le volume avec le volume_id %s n'existe pas." #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "Le volume %(volumeName)s n'est pas un volume concaténé. Vous pouvez " "seulement effectuer une extension sur le volume concaténé. Sortie..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "Le volume : %(volumeName)s n'a pas été ajouté au groupe de stockage " "%(sgGroupName)s." #, python-format msgid "Volume: %s could not be found." msgstr "Volume: %s introuvable" #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Volume %s déjà géré par Cinder." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" "Les volumes seront morcelées en objets de cette taille (en mégaoctets)." msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" "Nombre de volumes/compte dépassé sur les comptes SolidFire principaux et " "secondaires." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "La configuration VzStorage 'vzstorage_used_ratio' n'est pas valide. Doit " "être > 0 et <= 1.0: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "Le fichier de configuration VzStorage %(config)s n'existe pas." msgid "Wait replica complete timeout." msgstr "Dépassement du délai d'attente d'achèvement de la réplique." #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Echec de l'attente de synchronisation. Statut d'exécution : %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "En attente que tous les noeuds aient rejoint le cluster. Vérifiez que tous " "les démons sheep sont en exécution." msgid "We should not do switch over on primary array." msgstr "" "Il est déconseillé d'effectuer un basculement sur la matrice principale." msgid "Wrong resource call syntax" msgstr "Syntaxe d'appel de ressource incorrecte" msgid "X-IO Volume Driver exception!" msgstr "Exception de pilote du volume X-IO" msgid "XML support has been deprecated and will be removed in the N release." msgstr "" "La prise en charge XML est obsolète et sera supprimée dans l'édition N." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "" "XtremIO n'a pas été configuré correctement, aucun portail iscsi n'a été " "détecté" msgid "XtremIO not initialized correctly, no clusters found" msgstr "" "XtremIO ne s'est pas initialisé correctement, aucun cluster n'a été trouvé" msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "Vous devez installer hpe3parclient avant d'utiliser des pilotes 3PAR. " "Exécutez \"pip install python-3parclient\" pour installer hpe3parclient." msgid "You must supply an array in your EMC configuration file." msgstr "" "Vous devez fournir une matrice dans votre fichier de configuration EMC." #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "Votre taille initiale : %(originalVolumeSize)s Go est supérieure à : " "%(newSize)s Go. Seule l'extension est prise en charge. Sortie..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError : %s" msgid "Zone" msgstr "Zone" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "Stratégie de segmentation : %s, non reconnue" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" "_create_and_copy_vdisk_data : échec de l'obtention des attributs du disque " "virtuel %s." msgid "_create_host failed to return the host name." msgstr "Echec de _create_host pour renvoyer le nom d'hôte." msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host : impossible de convertir le nom d'hôte. Le nom d'hôte n'est " "pas de type Unicode ou chaîne." msgid "_create_host: No connector ports." msgstr "_create_host : aucun port de connecteur." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume, Service de réplication introuvable." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, nom du volume : %(volumename)s, nom du volume " "source : %(sourcevolumename)s, instance de volume source : " "%(source_volume)s, instance de volume cible : %(target_volume)s, Code " "retour : %(rc)lu, Erreur : %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - aucun message de réussite trouvé dans la sortie " "CLI.\n" " stdout : %(out)s\n" " stderr : %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, la valeur de id_code est None." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, Service de réplication introuvable" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, le type de session de copie (copy session) n'est pas " "défini ! Session de copie : %(cpsession)s, type de copie : %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, session de copie : %(cpsession)s, opération : " "%(operation)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, nom du volume : %(volumename)s, Code retour : %(rc)lu, " "Erreur : %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, nom du volume : %(volumename)s, Service de configuration de " "stockage introuvable." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, nom de classe : %(classname)s, InvokeMethod, " "impossible de se connecter à ETERNUS." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op : l'extension d'un volume avec des instantanés n'est pas " "prise en charge." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connecteur : %(connector)s, Associateurs : " "FUJITSU_AuthorizedTarget, impossible de se connecter à ETERNUS." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connecteur : %(connector)s, EnumerateInstanceNames, " "impossible de se connecter à ETERNUS." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, connecteur : %(connector)s, AssocNames : " "FUJITSU_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance : %(vol_instance_path)s, " "Impossible de se connecter à ETERNUS." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, nom de classe : %(classname)s, " "EnumerateInstanceNames, impossible de se connecter à ETERNUS." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names, connecteur : %(connector)s, initiateur introuvable." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, nom du volume : %(volumename)s, EnumerateInstanceNames, " "impossible de se connecter à ETERNUS." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool :%(eternus_pool)s, EnumerateInstances, impossible " "de se connecter à ETERNUS." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, nom du fichier : %(filename)s, tagname : %(tagname)s, la valeur " "de data est None ! Editez le fichier de configuration du pilote et corrigez." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, nom du fichier : %(filename)s, adresse IP : %(ip)s, " "port : %(port)s, utilisateur : %(user)s, mot de passe : ****, URL : %(url)s, " "ECHEC !!." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, liste iscsiip : %(iscsiip_list)s, iqn " "introuvable." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, AssociatorNames : " "CIM_BindsTo, impossible de se connecter à ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, " "EnumerateInstanceNames, impossible de se connecter à ETERNUS." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, GetInstance, " "impossible de se connecter à ETERNUS." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic : les en-têtes et valeurs des attributs ne correspondent pas.\n" " En-têtes : %(header)s\n" " Valeurs : %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "Echec de _get_host_from_connector lors du renvoi du nom d'hôte pour le " "connecteur." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, échec d'obtention de host-affinity depuis aglist/" "vol_instance, groupe d'affinité : %(ag)s, ReferenceNames, impossible de se " "connecter à ETERNUS." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, échec d'obtention de l'instance host-affinity, volmap : " "%(volmap)s, GetInstance, impossible de se connecter à ETERNUS." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associateurs : FUJITSU_SAPAvailableForElement, " "impossible de se connecter à ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, groupe d'affinité : %(ag)s, ReferenceNames, impossible " "de se connecter à ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance : %(vol_instance)s, ReferenceNames : " "CIM_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap : %(volmap)s, GetInstance, impossible de se " "connecter à ETERNUS." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" "_get_target_port, EnumerateInstances, impossible de se connecter à ETERNUS." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port, protcole : %(protocol)s, target_port introuvable." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay : instantané nommé %s introuvable" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay : ID volume %s introuvable" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay : vous devez spécifier source-name." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties : impossible d'obtenir les informations de " "connexion FC pour la connexion hôte-volume. L'hôte est-il configuré " "correctement pour les connexions FC ?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties : noeud introuvable dans le groupe d'E-S %(gid)s " "pour le volume %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, chemin vol_instance.path :%(vol)s, nom du volume : %(volumename)s, " "volume_uid: %(uid)s, initiateur : %(initiator)s, cible : %(tgt)s, aglist : " "%(aglist)s, Service de configuration de stockage introuvable." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, chemin vol_instance.path : %(volume)s, nom du volume : " "%(volumename)s, volume_uid : %(uid)s, aglist : %(aglist)s, Service de " "configuration de contrôleur introuvable." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, nom du volume : %(volumename)s, volume_uid : %(volume_uid)s, " "groupe d'affinité : %(ag)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun, chemin vol_instance.path : %(volume)s, AssociatorNames : " "CIM_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." msgid "_update_volume_stats: Could not get storage pool data." msgstr "" "_update_volume_stats : impossible d'obtenir les données du pool de stockage." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession : %(cpsession)s, l'état de copysession " "est BROKEN." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "Echec de add_vdisk_copy : une copie du volume %s existe. L'ajout d'une autre " "copie dépasserait la limite de 2 copies." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "add_vdisk_copy a démarré sans copie de disque virtuel dans le pool attendu." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants doit être une valeur booléenne, '%s' a été renvoyé." msgid "already created" msgstr "déjà créé" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "attacher l'instantané du noeud distant" #, python-format msgid "attribute %s not lazy-loadable" msgstr "l'attribut %s n'est pas de type lazy-loadable" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "sauvegarde : %(vol_id)s - Echec de création du lien physique de périphérique " "entre %(vpath)s et %(bpath)s.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "sauvegarde : %(vol_id)s n'est pas parvenu à extraire la notification de " "réussite de sauvegarde auprès du serveur.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "sauvegarde : échec de %(vol_id)s pour l'exécution de dsmc en raison " "d'arguments non valides sur %(bpath)s.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "sauvegarde : échec de %(vol_id)s pour l'exécution de dsmc sur %(bpath)s.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "sauvegarde : échec de %(vol_id)s. %(path)s n'est pas un fichier." #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "sauvegarde : échec de %(vol_id)s. %(path)s est un type de fichier inattendu. " "Fichiers par blocs ou standard pris en charge, mode de fichier réel : " "%(vol_mode)s." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "sauvegarde : échec de %(vol_id)s. Impossible de récupérer le chemin réel au " "volume %(path)s." msgid "being attached by different mode" msgstr "connecté par un mode différent" #, python-format msgid "call failed: %r" msgstr "échec de l'appel : %r" msgid "call failed: GARBAGE_ARGS" msgstr "échec de l'appel : GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "échec de l'appel : PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "échec de l'appel : PROG_MISMATCH : %r" msgid "call failed: PROG_UNAVAIL" msgstr "échec de l'appel : PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "lun-map, ig:%(ig)s vol introuvable :%(vol)s" msgid "can't find the volume to extend" msgstr "volume à étendre introuvable" msgid "can't handle both name and index in req" msgstr "impossible de gérer à la fois le nom et l'index dans la demande" msgid "cannot understand JSON" msgstr "impossible de comprendre JSON" msgid "cannot understand XML" msgstr "impossible de comprendre XML" #, python-format msgid "cg-%s" msgstr "groupe de cohérence %s" msgid "cgsnapshot assigned" msgstr "instantané cgsnapshot affecté" msgid "cgsnapshot changed" msgstr "instantané cgsnapshot modifié" msgid "cgsnapshots assigned" msgstr "instantanés cgsnapshots affectés" msgid "cgsnapshots changed" msgstr "instantanés cgsnapshots modifiés" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error : mot de passe ou clé privée SSH obligatoire pour " "l'authentification : définissez l'option san_password ou san_private_key." msgid "check_for_setup_error: Unable to determine system id." msgstr "" "check_for_setup_error : impossible de déterminer l'identificateur du système." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error : impossible de déterminer le nom du système." msgid "check_hypermetro_exist error." msgstr "Erreur check_hypermetro_exist." #, python-format msgid "clone depth exceeds limit of %s" msgstr "profondeur de clone dépassant la limite de %s" msgid "consistencygroup assigned" msgstr "groupe de cohérence (consistencygroup) affecté" msgid "consistencygroup changed" msgstr "groupe de cohérence (consistencygroup) modifié" msgid "control_location must be defined" msgstr "control_location doit être défini" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, le volume source n'existe pas dans ETERNUS." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, nom d'instance du volume cible : " "%(volume_instancename)s, La récupération de l'instance a échoué." msgid "create_cloned_volume: Source and destination size differ." msgstr "" "create_cloned_volume : La taille de la source et de la destination diffère." #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume : la taille du volume source %(src_vol)s est " "%(src_size)d Go et ne peut pas tenir dans le volume cible %(tgt_vol)s d'une " "taille de %(tgt_size)d Go." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src doit correspondre à une création à partir " "d'un instantané CG ou d'une source CG." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src prend en charge une source cgsnapshot ou " "une source de groupe de cohérence uniquement. Vous ne pouvez pas utiliser " "plusieurs sources." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src prend en charge une source cgsnapshot ou " "une source de groupe de cohérence. Vous ne pouvez pas utiliser plusieurs " "sources." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" "create_copy : le disque virtuel source %(src)s (%(src_id)s) n'existe pas." #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "" "create_copy : le(s) disque(vs) virtuel(s) source %(src)s n'existe(nt) pas." msgid "create_host: Host name is not unicode or string." msgstr "create_host : le nom d'hôte n'est pas du type Unicode ou chaîne." msgid "create_host: No initiators or wwpns supplied." msgstr "create_host : Aucun initiateur ni wwpns fourni." msgid "create_hypermetro_pair error." msgstr "Erreur create_hypermetro_pair." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "create_snapshot, eternus_pool : %(eternus_pool)s, pool introuvable." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, nom de l'instantané : %(snapshotname)s, nom du volume " "source : %(volumename)s, chemin vol_instance.path : %(vol_instance)s, nom du " "volume de destination : %(d_volumename)s, pool : %(pool)s, Code retour : " "%(rc)lu, Erreur : %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, nom du volume : %(s_volumename)s, volume source introuvable " "sur ETERNUS." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, nom du volume : %(volumename)s, Service de réplication " "introuvable." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot : le statut du volume doit être \"available\" ou \"in-use\" " "pour l'instantané. Le statut non valide est %s." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot : échec de récupération du volume source." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, volume : %(volume)s, EnumerateInstances, impossible de se " "connecter à ETERNUS." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, volume : %(volume)s, nom du volume : %(volumename)s, " "eternus_pool : %(eternus_pool)s, Service de configuration de stockage " "introuvable." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, nom du volume : %(volumename)s, nom du pool : " "%(eternus_pool)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "create_volume_from_snapshot, le volume source n'existe pas dans ETERNUS." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, nom d'instance du volume cible : " "%(volume_instancename)s, La récupération de l'instance a échoué." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot : l'instantané %(name)s n'existe pas." #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot : le statut de l'instantané doit être \"available" "\" pour créer le volume. Le statut non valide est : %s." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" "create_volume_from_snapshot : La taille de la source et de la destination " "diffère." msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "" "create_volume_from_snapshot : la taille du volume est différente de celle du " "volume basé sur l'instantané." msgid "deduplicated and auto tiering can't be both enabled." msgstr "" "Les hiérarchisations dédupliquée et automatique ne peuvent pas être activées " "toutes les deux." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "suppression : échec de %(vol_id)s pour l'exécution de dsmc en raison " "d'arguments non valides avec stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "suppression : échec de %(vol_id)s pour l'exécution de dsmc avec stdout : " "%(out)s\n" " stderr : %(err)s" msgid "delete_hypermetro error." msgstr "Erreur delete_hypermetro." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator : ACL %s introuvable. Poursuite de l'opération." msgid "delete_replication error." msgstr "Erreur lors de la suppression de réplication (delete_replication)." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" "Suppression de l'instantané %(snapshot_name)s ayant des volumes dépendants" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "Suppression du volume %(volume_name)s ayant un instantané" msgid "detach snapshot from remote node" msgstr "détachez l'instantané du noeud distant" msgid "do_setup: No configured nodes." msgstr "do_setup : Aucun noeud configuré." msgid "element is not a child" msgstr "l'élément n'est pas un enfant" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries doit être supérieur ou égal à 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "erreur lors de l'écriture de l'objet à Swift, MD5 de l'objet dans swift " "%(etag)s est différent de MD5 de l'objet envoyé à swift %(md5)s" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume, eternus_pool : %(eternus_pool)s, pool introuvable." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, volume : %(volume)s, nom du volume : %(volumename)s, " "eternus_pool : %(eternus_pool)s, service de configuration de stockage " "introuvable." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, nom du volume : %(volumename)s, Code retour : %(rc)lu, " "Erreur : %(errordesc)s, Type de pool : %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume, nom du volume : %(volumename)s, volume introuvable." msgid "failed to create new_volume on destination host" msgstr "échec de la création de new_volume sur l'hôte de destination" msgid "fake" msgstr "factice" #, python-format msgid "file already exists at %s" msgstr "fichier existe déjà à %s" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "fileno n'est pas pris en charge par SheepdogIOWrapper" msgid "fileno() not supported by RBD()" msgstr "fileno() non pris en charge par RBD()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "Le système de fichiers %s n'existe pas dans Nexenta Store Appliance" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled est défini sur False. Le mappage multihôte " "est interdit. CMMVC6071E Le mappage entre un disque virtuel et un hôte n'a " "pas été créé car le disque virtuel est déjà mappé avec un hôte." msgid "flush() not supported in this version of librbd" msgstr "flush() non pris en charge dans cette version de librbd" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s sauvegardé par : %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt = %(fmt)s sauvegardé par : %(backing_file)s" msgid "force delete" msgstr "Forcer la suppression" msgid "get_hyper_domain_id error." msgstr "Erreur get_hyper_domain_id." msgid "get_hypermetro_by_id error." msgstr "Erreur get_hypermetro_by_id." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params : échec d'obtention de l'IP cible pour l'initiateur " "%(ini)s. Vérifiez le fichier de configuration." #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool : échec d'obtention des attributs pour le volume %s" msgid "glance_metadata changed" msgstr "métadonnées glance_metadata modifiées" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode est défini sur copy_on_write mais %(vol)s et %(img)s " "appartiennent à des systèmes de fichiers différents." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode est défini sur copy_on_write mais %(vol)s et %(img)s " "appartiennent à des ensembles de fichiers différents." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s et hgst_user %(usr)s doivent être mappés à des " "utilisateurs/groupes valides dans cinder.conf" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "L'élément hgst_net %(net)s spécifié dans cinder.conf est introuvable dans le " "cluster" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy doit être défini à 0 (non HA) ou 1 (HA) dans cinder.conf." msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode doit être de type octal/int dans cinder.conf" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "Le serveur hgst_storage %(svr)s ne respecte pas le format :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers doit être défini dans cinder.conf" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "Le service HTTP a pu être subitement désactivé ou mis à l'état de " "maintenance au milieu de cette opération." msgid "id cannot be None" msgstr "l'ID ne peut pas être None" #, python-format msgid "image %s not found" msgstr "image %s non trouvée" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection, volume : %(volume)s, Volume introuvable." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" "initialize_connection : échec d'obtention des attributs pour le volume %s." #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection : attribut de volume manquant pour le volume %s." #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection : aucun noeud n'a été détecté dans le groupe d'E-S " "%(gid)s pour le volume %(vol)s." #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection : le disque virtuel %s n'est pas défini." #, python-format msgid "invalid user '%s'" msgstr "utilisateur non valide '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "portal iSCSI %s introuvable" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "iscsi_ip_address doit être définie dans le fichier de configuration si vous " "utilisez le protocole 'iSCSI'." msgid "iscsiadm execution failed. " msgstr "L'exécution d'iscsiadm a échoué. " #, python-format msgid "key manager error: %(reason)s" msgstr "Erreur du gestionnaire de clés : %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key n'est pas défini" msgid "limit param must be an integer" msgstr "le paramètre limit doit être un entier" msgid "limit param must be positive" msgstr "le paramètre limit doit être positif" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing ne peut pas gérer un volume connecté à des hôtes. Veuillez " "déconnecter ce volume des hôtes existants avant l'importation" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "manage_existing requiert une clé 'name' pour identifier un volume existant." #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot : Erreur lors de la gestion de la relecture " "existante %(ss)s sur le volume %(vol)s" #, python-format msgid "marker [%s] not found" msgstr "le marqueur [%s] est introuvable" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "guillemets manquants pour mdiskgrp %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy doit être 'on-demand' ou 'never', transmis : %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs a échoué sur le volume %(vol)s, message d'erreur : %(err)s." msgid "mock" msgstr "mock" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs n'est pas installé" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "plusieurs ressources avec le nom %s ont été détectées par drbdmanage" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "plusieurs ressources avec l'ID d'instantané %s ont été détectées" msgid "name cannot be None" msgstr "le nom ne peut pas être None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path : outil NAVISECCLI introuvable %(path)s." #, python-format msgid "no REPLY but %r" msgstr "aucune réponse (REPLY) mais %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "aucun instantané avec l'ID %s n'a été détecté dans drbdmanage" #, python-format msgid "not exactly one snapshot with id %s" msgstr "n'est pas exactement un snapshot avec l'ID %s" #, python-format msgid "not exactly one volume with id %s" msgstr "n'est pas exactement un volume avec l'ID %s" #, python-format msgid "obj missing quotes %s" msgstr "guillemets manquants pour obj %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled n'est pas désactivé." msgid "progress must be an integer percentage" msgstr "la progression doit être un pourcentage d'entier" msgid "promote_replica not implemented." msgstr "promote_replica non implémenté." msgid "provider must be defined" msgstr "fournisseur doit être défini" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s ou ultérieur est requis par ce pilote de " "périphérique. Version qemu-img actuelle : %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img non installé et image de type %s. Seules les images RAW peuvent " "être utilisées si qemu-img n'est pas installé." msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img n'est pas installé et le format de disque n'est pas spécifié. " "Seules les images RAW peuvent être utilisées si qemu-img n'est pas installé." msgid "rados and rbd python libraries not found" msgstr "bibliothèques rados et rbd python introuvables" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted peut uniquement correspondre à 'no', 'yes' ou 'only', et non %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "" "L'unité de réplication (replication_device) doit être configurée sur le back-" "end : %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "" "L'unité de réplication (replication_device ) avec l'ID de back-end [%s] est " "manquante." #, python-format msgid "replication_failover failed. %s not found." msgstr "Echec de replication_failover. %s introuvable." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "Echec de replication_failover. Le back-end n'est pas configuré pour le " "basculement" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restauration : échec de %(vol_id)s pour l'exécution de dsmc en raison " "d'arguments non valides sur %(bpath)s.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "restauration : échec de %(vol_id)s pour l'exécution de dsmc sur %(bpath)s.\n" "stdout : %(out)s\n" " stderr : %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "restauration : échec de %(vol_id)s.\n" "stdout : %(out)s\n" " stderr : %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup annulé, la liste d'objets actuelle ne correspond pas à la " "liste d'objets stockée dans les métadonnées." msgid "root element selecting a list" msgstr "élément racine sélectionnant une liste" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "Le membre %s est manquant dans rtslib_fb : vous avez peut-être besoin d'un " "python-rtslib-fb plus récent." msgid "san_ip is not set." msgstr "san_ip n'a pas été défini." msgid "san_ip must be set" msgstr "san_ip doit être défini" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip : Configuration de zone obligatoire. san_ip n'est pas défini." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login et/ou san_password n'est pas défini pour le pilote Datera dans " "cinder.conf. Définissez cette information et relancez le service cinder-" "volume." msgid "serve() can only be called once" msgstr "serve() ne peut être appelé qu'une seule fois" msgid "service not found" msgstr "service introuvable" msgid "snapshot does not exist" msgstr "L'instantané n'existe pas" #, python-format msgid "snapshot id:%s not found" msgstr "ID d'instantané :%s introuvable" #, python-format msgid "snapshot-%s" msgstr "instantané %s" msgid "snapshots assigned" msgstr "instantanés affectés" msgid "snapshots changed" msgstr "instantanés modifiés" #, python-format msgid "source vol id:%s not found" msgstr "ID de volume source :%s introuvable" #, python-format msgid "source volume id:%s is not replicated" msgstr "identificateur de volume source : %s non répliqué" msgid "source-name cannot be empty." msgstr "source-name ne peut pas être vide." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "Le format de source-name doit être : 'chemin_vmdk@chemin_inventaire_vm'." #, python-format msgid "status must be %s and" msgstr "Le statut doit être %s et" msgid "status must be available" msgstr "l'état doit être disponible" msgid "stop_hypermetro error." msgstr "Erreur stop_hypermetro." msgid "subclasses must implement construct()!" msgstr "les sous-classes doivent implémenter construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo a échoué, continue comme si de rien n'était" msgid "sync_hypermetro error." msgstr "Erreur sync_hypermetro." msgid "sync_replica not implemented." msgstr "sync_replica non implémenté." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli n'est pas installé et n'a pas pu créer de répertoire par défaut " "(%(default_path)s) : %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "" "terminate_connection : échec d'obtention du nom d'hôte à partir du " "connecteur." msgid "timeout creating new_volume on destination host" msgstr "délai d'attente de création de new_volume sur l'hôte de destination" msgid "too many body keys" msgstr "trop de clés de corps" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: non monté" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s : cible occupée" msgid "umount: : some other error" msgstr "umount: : autre erreur" msgid "umount: : target is busy" msgstr "umount: : cible occupée" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot : instantané nommé %s introuvable" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot : ID volume %s introuvable" #, python-format msgid "unrecognized argument %s" msgstr "argument non reconnu %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "algorithme de compression non pris en charge : %s" msgid "valid iqn needed for show_target" msgstr "iqn valide requis pour show_target" #, python-format msgid "vdisk %s is not defined." msgstr "Le disque virtuel %s n'a pas été défini." msgid "vmemclient python library not found" msgstr "La bibliothèque Python vmemclient est introuvable" #, python-format msgid "volume %s not found in drbdmanage" msgstr "Le volume %s est introuvable dans drbdmanage" msgid "volume assigned" msgstr "volume affecté" msgid "volume changed" msgstr "volume modifié" msgid "volume does not exist" msgstr "Le volume n'existe pas" msgid "volume is already attached" msgstr "le volume est déjà connecté" msgid "volume is not local to this node" msgstr "le volume n'est pas local sur ce noeud" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "La taille du volume %(volume_size)d est insuffisante pour restaurer la " "sauvegarde d'une taille de %(size)d." #, python-format msgid "volume size %d is invalid." msgstr "taille du volume %d non valide." msgid "volume_type cannot be None" msgstr "La valeur de volume_type ne peut pas être None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "volume_type doit être fourni lors de la création d'un volume dans un groupe " "de cohérence." msgid "volume_type_id cannot be None" msgstr "volume_type_id ne peut pas avoir pour valeur None" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "volume_types doit être indiqué pour créer le groupe de cohérence %(name)s." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "volume_types doit être indiqué pour créer le groupe de cohérence %s." msgid "volumes assigned" msgstr "volumes affectés" msgid "volumes changed" msgstr "volumes modifiés" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition : %s a dépassé le délai d'attente." #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "La propriété zfssa_manage_policy doit être définie avec la valeur 'strict' " "ou 'loose'. Valeur actuelle : %s." msgid "{} is not a valid option." msgstr "{} est une option non valide." cinder-8.0.0/cinder/locale/cinder.pot0000664000567000056710000114257112701406257020636 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the cinder project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-23 06:37+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: cinder/context.py:113 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" #: cinder/coordination.py:119 msgid "Coordinator uninitialized." msgstr "" #: cinder/coordination.py:210 #, python-format msgid "Not a valid string: %s" msgstr "" #: cinder/exception.py:84 msgid "An unknown exception occurred." msgstr "" #: cinder/exception.py:136 #, python-format msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" #: cinder/exception.py:141 #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "" #: cinder/exception.py:145 #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "" #: cinder/exception.py:149 #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "" #: cinder/exception.py:153 msgid "Not authorized." msgstr "" #: cinder/exception.py:158 msgid "User does not have admin privileges" msgstr "" #: cinder/exception.py:162 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" #: cinder/exception.py:166 #, python-format msgid "Not authorized for image %(image_id)s." msgstr "" #: cinder/exception.py:170 msgid "Volume driver not ready." msgstr "" #: cinder/exception.py:174 msgid "Unacceptable parameters." msgstr "" #: cinder/exception.py:179 #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "" #: cinder/exception.py:183 #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" #: cinder/exception.py:188 #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" #: cinder/exception.py:192 msgid "The results are invalid." msgstr "" #: cinder/exception.py:196 #, python-format msgid "Invalid input received: %(reason)s" msgstr "" #: cinder/exception.py:200 #, python-format msgid "Invalid volume type: %(reason)s" msgstr "" #: cinder/exception.py:204 #, python-format msgid "Invalid volume: %(reason)s" msgstr "" #: cinder/exception.py:208 #, python-format msgid "Invalid content type %(content_type)s." msgstr "" #: cinder/exception.py:212 #, python-format msgid "Invalid host: %(reason)s" msgstr "" #: cinder/exception.py:218 #, python-format msgid "%(err)s" msgstr "" #: cinder/exception.py:222 #, python-format msgid "Invalid auth key: %(reason)s" msgstr "" #: cinder/exception.py:226 #, python-format msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" #: cinder/exception.py:231 msgid "Service is unavailable at this time." msgstr "" #: cinder/exception.py:235 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" #: cinder/exception.py:239 #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" #: cinder/exception.py:243 #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "" #: cinder/exception.py:247 #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" #: cinder/exception.py:252 #, python-format msgid "API version %(version)s is not supported on this method." msgstr "" #: cinder/exception.py:256 #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s " "and maximum is %(max_ver)s." msgstr "" #: cinder/exception.py:261 #, python-format msgid "Error while requesting %(service)s API." msgstr "" #: cinder/exception.py:270 #, python-format msgid "Timeout while requesting %(service)s API." msgstr "" #: cinder/exception.py:274 #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" #: cinder/exception.py:280 msgid "Resource could not be found." msgstr "" #: cinder/exception.py:286 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "" #: cinder/exception.py:290 #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "" #: cinder/exception.py:295 #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" #: cinder/exception.py:300 #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" #: cinder/exception.py:305 #, python-format msgid "Invalid metadata: %(reason)s" msgstr "" #: cinder/exception.py:309 #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "" #: cinder/exception.py:313 #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" #: cinder/exception.py:318 #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "" #: cinder/exception.py:322 #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" #: cinder/exception.py:327 #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" #: cinder/exception.py:332 #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" #: cinder/exception.py:337 #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes " "present with the type." msgstr "" #: cinder/exception.py:342 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" #: cinder/exception.py:346 #, python-format msgid "Instance %(uuid)s could not be found." msgstr "" #: cinder/exception.py:350 #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" #: cinder/exception.py:354 #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" #: cinder/exception.py:359 #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "" #: cinder/exception.py:363 #, python-format msgid "Invalid image href %(image_href)s." msgstr "" #: cinder/exception.py:367 #, python-format msgid "Image %(image_id)s could not be found." msgstr "" #: cinder/exception.py:374 #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "" #: cinder/exception.py:377 #, python-format msgid "Service %(service_id)s could not be found." msgstr "" #: cinder/exception.py:382 msgid "Service is too old to fulfil this request." msgstr "" #: cinder/exception.py:386 #, python-format msgid "Host %(host)s could not be found." msgstr "" #: cinder/exception.py:390 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" #: cinder/exception.py:394 #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" #: cinder/exception.py:398 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "" #: cinder/exception.py:402 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " "%(unders)s" msgstr "" #: cinder/exception.py:407 #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "" #: cinder/exception.py:412 msgid "Quota could not be found" msgstr "" #: cinder/exception.py:416 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" #: cinder/exception.py:420 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "" #: cinder/exception.py:424 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "" #: cinder/exception.py:428 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "" #: cinder/exception.py:432 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "" #: cinder/exception.py:436 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "" #: cinder/exception.py:440 #, python-format msgid "File %(file_path)s could not be found." msgstr "" #: cinder/exception.py:448 #, python-format msgid "Volume Type %(id)s already exists." msgstr "" #: cinder/exception.py:452 #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" #: cinder/exception.py:457 #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" #: cinder/exception.py:461 #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "" #: cinder/exception.py:465 #, python-format msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:469 #, python-format msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:473 #, python-format msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:477 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:481 #, python-format msgid "No valid host was found. %(reason)s" msgstr "" #: cinder/exception.py:490 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "" #: cinder/exception.py:497 #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" #: cinder/exception.py:508 #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" #: cinder/exception.py:513 #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" #: cinder/exception.py:519 #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" #: cinder/exception.py:528 #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" #: cinder/exception.py:532 #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "" #: cinder/exception.py:536 #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:540 #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" #: cinder/exception.py:545 #, python-format msgid "Cannot update volume_type %(id)s" msgstr "" #: cinder/exception.py:549 #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "" #: cinder/exception.py:553 #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:557 #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:561 #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "" #: cinder/exception.py:565 #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" #: cinder/exception.py:570 #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:574 #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:578 #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "" #: cinder/exception.py:582 #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "" #: cinder/exception.py:586 #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "" #: cinder/exception.py:590 #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" #: cinder/exception.py:594 #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" #: cinder/exception.py:599 #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "" #: cinder/exception.py:603 msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" #: cinder/exception.py:607 msgid "An error has occurred during backup operation" msgstr "" #: cinder/exception.py:611 msgid "Unsupported backup metadata version requested" msgstr "" #: cinder/exception.py:615 msgid "Unsupported backup verify driver" msgstr "" #: cinder/exception.py:619 msgid "Metadata backup already exists for this volume" msgstr "" #: cinder/exception.py:623 msgid "Backup RBD operation failed" msgstr "" #: cinder/exception.py:627 msgid "Backup operation of an encrypted volume failed." msgstr "" #: cinder/exception.py:631 #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "" #: cinder/exception.py:635 msgid "Failed to identify volume backend." msgstr "" #: cinder/exception.py:639 #, python-format msgid "Invalid backup: %(reason)s" msgstr "" #: cinder/exception.py:643 #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:647 #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:651 #, python-format msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:655 #, python-format msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:659 #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:663 #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" #: cinder/exception.py:668 #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" #: cinder/exception.py:673 #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "" #: cinder/exception.py:677 #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:682 #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:687 #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" #: cinder/exception.py:692 #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "" #: cinder/exception.py:696 #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" #: cinder/exception.py:700 #, python-format msgid "key manager error: %(reason)s" msgstr "" #: cinder/exception.py:704 #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" #: cinder/exception.py:709 #, python-format msgid "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" #: cinder/exception.py:714 #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "" #: cinder/exception.py:718 #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "" #: cinder/exception.py:723 #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "" #: cinder/exception.py:728 #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "" #: cinder/exception.py:733 #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" #: cinder/exception.py:738 #, python-format msgid "Error extending volume: %(reason)s" msgstr "" #: cinder/exception.py:742 #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "" #: cinder/exception.py:746 msgid "Unable to create lock. Coordination backend not started." msgstr "" #: cinder/exception.py:750 msgid "Lock acquisition failed." msgstr "" #: cinder/exception.py:762 #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" #: cinder/exception.py:766 #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" #: cinder/exception.py:770 #, python-format msgid "Volume device not found at %(device)s." msgstr "" #: cinder/exception.py:776 #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "" #: cinder/exception.py:781 msgid "Bad response from SolidFire API" msgstr "" #: cinder/exception.py:785 msgid "SolidFire Cinder Driver exception" msgstr "" #: cinder/exception.py:789 #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "" #: cinder/exception.py:793 #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" #: cinder/exception.py:798 msgid "Retryable SolidFire Exception encountered" msgstr "" #: cinder/exception.py:803 #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "" #: cinder/exception.py:808 msgid "Unknown RemoteFS exception" msgstr "" #: cinder/exception.py:812 msgid "A concurrent, possibly contradictory, request has been made." msgstr "" #: cinder/exception.py:817 msgid "No mounted shares found" msgstr "" #: cinder/exception.py:821 cinder/exception.py:834 cinder/exception.py:860 #: cinder/exception.py:874 #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "" #: cinder/exception.py:826 msgid "Unknown NFS exception" msgstr "" #: cinder/exception.py:830 msgid "No mounted NFS shares found" msgstr "" #: cinder/exception.py:839 msgid "Unknown SMBFS exception." msgstr "" #: cinder/exception.py:843 msgid "No mounted SMBFS shares found." msgstr "" #: cinder/exception.py:847 #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "" #: cinder/exception.py:852 msgid "Unknown Gluster exception" msgstr "" #: cinder/exception.py:856 msgid "No mounted Gluster shares found" msgstr "" #: cinder/exception.py:866 msgid "Unknown Virtuozzo Storage exception" msgstr "" #: cinder/exception.py:870 msgid "No mounted Virtuozzo Storage shares found" msgstr "" #: cinder/exception.py:879 #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "" #: cinder/exception.py:883 #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "" #: cinder/exception.py:887 #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "" #: cinder/exception.py:891 #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "" #: cinder/exception.py:895 #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "" #: cinder/exception.py:899 #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "" #: cinder/exception.py:903 msgid "NetApp Cinder Driver exception." msgstr "" #: cinder/exception.py:907 #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" #: cinder/exception.py:912 #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: " "%(rc)s) (Output: %(out)s)." msgstr "" #: cinder/exception.py:918 #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "" #: cinder/exception.py:922 #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "" #: cinder/exception.py:927 #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "" #: cinder/exception.py:931 #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "" #: cinder/exception.py:936 msgid "HBSD error occurs." msgstr "" #: cinder/exception.py:953 msgid "Storage resource could not be found." msgstr "" #: cinder/exception.py:957 #, python-format msgid "Volume %(volume_name)s is busy." msgstr "" #: cinder/exception.py:962 msgid "Bad response from Datera API" msgstr "" #: cinder/exception.py:967 #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" #: cinder/exception.py:971 #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" #: cinder/exception.py:975 #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" #: cinder/exception.py:979 #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "" #: cinder/exception.py:983 #, python-format msgid "%(error_message)s" msgstr "" #: cinder/exception.py:988 msgid "X-IO Volume Driver exception!" msgstr "" #: cinder/exception.py:993 #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "" #: cinder/exception.py:997 #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "" #: cinder/exception.py:1001 #, python-format msgid "Backend reports: %(message)s" msgstr "" #: cinder/exception.py:1005 msgid "Backend reports: item already exists" msgstr "" #: cinder/exception.py:1009 msgid "Backend reports: item not found" msgstr "" #: cinder/exception.py:1014 #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s," " Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" #: cinder/exception.py:1021 msgid "Volume to Initiator Group mapping already exists" msgstr "" #: cinder/exception.py:1025 msgid "System is busy, retry operation." msgstr "" #: cinder/exception.py:1029 msgid "Exceeded the limit of snapshots per volume" msgstr "" #: cinder/exception.py:1034 #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" #: cinder/exception.py:1040 #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "" #: cinder/exception.py:1044 cinder/exception.py:1048 cinder/exception.py:1056 #: cinder/exception.py:1086 cinder/exception.py:1101 #, python-format msgid "%(message)s" msgstr "" #: cinder/exception.py:1052 #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "" #: cinder/exception.py:1060 msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "" #: cinder/exception.py:1065 #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "" #: cinder/exception.py:1069 #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" #: cinder/exception.py:1076 msgid "There is no metadata in DB object." msgstr "" #: cinder/exception.py:1080 #, python-format msgid "Operation not supported: %(operation)s." msgstr "" #: cinder/exception.py:1091 #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "" #: cinder/exception.py:1096 msgid "Unexpected response from Tegile IntelliFlash API" msgstr "" #: cinder/exception.py:1106 #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "" #: cinder/exception.py:1110 #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "" #: cinder/exception.py:1114 #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "" #: cinder/quota.py:125 #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: " "quota_%(res)s, it is now deprecated. Please use the default quota class " "for default quota." msgstr "" #: cinder/quota.py:490 #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for" " project '%(proj)s' for resource '%(res)s'. Please lower the limit or " "usage for one or more of the following projects: '%(child_ids)s'" msgstr "" #: cinder/quota.py:525 #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "" #: cinder/quota.py:576 #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit " "of %(limit)d is less than in-use value of %(used)d" msgstr "" #: cinder/quota.py:1149 cinder/quota.py:1197 msgid "Cannot register resource" msgstr "" #: cinder/quota.py:1152 cinder/quota.py:1200 msgid "Cannot register resources" msgstr "" #: cinder/quota_utils.py:150 #, python-format msgid "Tenant ID: %s does not exist." msgstr "" #: cinder/quota_utils.py:237 msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "" #: cinder/quota_utils.py:241 msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" #: cinder/service.py:377 #, python-format msgid "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" #: cinder/service.py:463 msgid "serve() can only be called once" msgstr "" #: cinder/ssh_utils.py:76 #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "" #: cinder/ssh_utils.py:135 msgid "Specify a password or private_key" msgstr "" #: cinder/ssh_utils.py:151 #, python-format msgid "Error connecting via ssh: %s" msgstr "" #: cinder/utils.py:109 #, python-format msgid "Can not translate %s to integer." msgstr "" #: cinder/utils.py:140 #, python-format msgid "May specify only one of %s" msgstr "" #: cinder/utils.py:641 #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "" #: cinder/utils.py:672 #, python-format msgid "Unable to get a block device for file '%s'" msgstr "" #: cinder/utils.py:685 #, python-format msgid "%s is not a string or unicode" msgstr "" #: cinder/utils.py:689 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" #: cinder/utils.py:694 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" #: cinder/utils.py:1021 msgid " or " msgstr "" #: cinder/utils.py:1076 #, python-format msgid "%s must be an integer." msgstr "" #: cinder/utils.py:1080 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "" #: cinder/utils.py:1084 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "" #: cinder/api/common.py:125 cinder/volume/api.py:493 msgid "limit param must be an integer" msgstr "" #: cinder/api/common.py:128 cinder/volume/api.py:490 msgid "limit param must be positive" msgstr "" #: cinder/api/common.py:181 #, python-format msgid "marker [%s] not found" msgstr "" #: cinder/api/common.py:214 msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be " "used with the 'sort' parameter." msgstr "" #: cinder/api/xmlutil.py:272 msgid "element is not a child" msgstr "" #: cinder/api/xmlutil.py:474 msgid "root element selecting a list" msgstr "" #: cinder/api/xmlutil.py:797 #, python-format msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" #: cinder/api/xmlutil.py:918 msgid "subclasses must implement construct()!" msgstr "" #: cinder/api/contrib/admin_actions.py:69 msgid "Must specify 'status'" msgstr "" #: cinder/api/contrib/admin_actions.py:72 msgid "Must specify a valid status" msgstr "" #: cinder/api/contrib/admin_actions.py:162 msgid "Must specify a valid attach status" msgstr "" #: cinder/api/contrib/admin_actions.py:169 msgid "Must specify a valid migration status" msgstr "" #: cinder/api/contrib/admin_actions.py:175 msgid "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" #: cinder/api/contrib/admin_actions.py:192 msgid "Must specify 'connector'." msgstr "" #: cinder/api/contrib/admin_actions.py:196 #: cinder/api/contrib/volume_actions.py:245 msgid "Unable to terminate volume connection from backend." msgstr "" #: cinder/api/contrib/admin_actions.py:231 msgid "Must specify 'host'." msgstr "" #: cinder/api/contrib/admin_actions.py:252 msgid "Must specify 'new_volume'" msgstr "" #: cinder/api/contrib/backups.py:258 cinder/api/contrib/volume_transfer.py:160 #: cinder/api/contrib/volume_transfer.py:200 msgid "Incorrect request body format" msgstr "" #: cinder/api/contrib/backups.py:366 msgid "Incorrect request body format." msgstr "" #: cinder/api/contrib/capabilities.py:54 #, python-format msgid "Can't find service: %s" msgstr "" #: cinder/api/contrib/cgsnapshots.py:122 msgid "Invalid cgsnapshot" msgstr "" #: cinder/api/contrib/cgsnapshots.py:125 msgid "Failed cgsnapshot" msgstr "" #: cinder/api/contrib/cgsnapshots.py:167 msgid "'consistencygroup_id' must be specified" msgstr "" #: cinder/api/contrib/consistencygroups.py:160 msgid "Missing required element 'consistencygroup' in request body." msgstr "" #: cinder/api/contrib/consistencygroups.py:169 #, python-format msgid "Invalid value '%s' for force." msgstr "" #: cinder/api/contrib/consistencygroups.py:229 #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" #: cinder/api/contrib/consistencygroups.py:273 #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" #: cinder/api/contrib/consistencygroups.py:279 #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create " "consistency group %(name)s from source." msgstr "" #: cinder/api/contrib/consistencygroups.py:328 msgid "Missing request body." msgstr "" #: cinder/api/contrib/consistencygroups.py:343 msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty " "in the request body." msgstr "" #: cinder/api/contrib/hosts.py:88 cinder/api/openstack/wsgi.py:392 msgid "cannot understand XML" msgstr "" #: cinder/api/contrib/hosts.py:143 #, python-format msgid "Host '%s' could not be found." msgstr "" #: cinder/api/contrib/hosts.py:172 #, python-format msgid "Invalid status: '%s'" msgstr "" #: cinder/api/contrib/hosts.py:175 #, python-format msgid "Invalid update setting: '%s'" msgstr "" #: cinder/api/contrib/hosts.py:213 msgid "Describe-resource is admin only functionality" msgstr "" #: cinder/api/contrib/hosts.py:220 msgid "Host not found" msgstr "" #: cinder/api/contrib/qos_specs_manage.py:81 msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "" #: cinder/api/contrib/qos_specs_manage.py:145 msgid "Please specify a name for QoS specs." msgstr "" #: cinder/api/contrib/qos_specs_manage.py:259 msgid "Failed to disassociate qos specs." msgstr "" #: cinder/api/contrib/qos_specs_manage.py:261 msgid "Qos specs still in use." msgstr "" #: cinder/api/contrib/qos_specs_manage.py:338 #: cinder/api/contrib/qos_specs_manage.py:392 msgid "Volume Type id must not be None." msgstr "" #: cinder/api/contrib/quota_classes.py:77 msgid "Missing required element quota_class_set in request body." msgstr "" #: cinder/api/contrib/quotas.py:74 #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "" #: cinder/api/contrib/quotas.py:102 cinder/api/contrib/quotas.py:114 msgid "" "Update and delete quota operations can only be made by an admin of " "immediate parent or by the CLOUD admin." msgstr "" #: cinder/api/contrib/quotas.py:109 msgid "" "Update and delete quota operations can only be made to projects in the " "same hierarchy of the project in which users are scoped to." msgstr "" #: cinder/api/contrib/quotas.py:134 msgid "" "Show operations can only be made to projects in the same hierarchy of the" " project in which users are scoped to." msgstr "" #: cinder/api/contrib/quotas.py:140 msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" #: cinder/api/contrib/quotas.py:145 msgid "" "An user with a token scoped to a subproject is not allowed to see the " "quota of its parents." msgstr "" #: cinder/api/contrib/quotas.py:225 #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "" #: cinder/api/contrib/quotas.py:240 #, python-format msgid "Bad key(s) in quota set: %s" msgstr "" #: cinder/api/contrib/quotas.py:399 msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" #: cinder/api/contrib/scheduler_hints.py:37 msgid "Malformed scheduler_hints attribute" msgstr "" #: cinder/api/contrib/services.py:102 msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" #: cinder/api/contrib/services.py:192 msgid "Unknown action" msgstr "" #: cinder/api/contrib/services.py:197 msgid "Missing required element 'host' in request body." msgstr "" #: cinder/api/contrib/services.py:204 msgid "Disabled reason contains invalid characters or is too long" msgstr "" #: cinder/api/contrib/services.py:220 msgid "Unknown service" msgstr "" #: cinder/api/contrib/services.py:227 msgid "service not found" msgstr "" #: cinder/api/contrib/snapshot_actions.py:52 msgid "'status' must be specified." msgstr "" #: cinder/api/contrib/snapshot_actions.py:62 #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" #: cinder/api/contrib/snapshot_actions.py:68 #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with " "status %(current)s." msgstr "" #: cinder/api/contrib/snapshot_actions.py:80 msgid "progress must be an integer percentage" msgstr "" #: cinder/api/contrib/snapshot_manage.py:86 msgid "Missing required element snapshot in request body." msgstr "" #: cinder/api/contrib/snapshot_manage.py:97 #: cinder/api/contrib/volume_manage.py:108 #, python-format msgid "The following elements are required: %s" msgstr "" #: cinder/api/contrib/snapshot_manage.py:106 #, python-format msgid "Volume: %s could not be found." msgstr "" #: cinder/api/contrib/snapshot_manage.py:127 #, python-format msgid "Service %s not found." msgstr "" #: cinder/api/contrib/types_extra_specs.py:114 msgid "Request body empty" msgstr "" #: cinder/api/contrib/types_extra_specs.py:118 #: cinder/api/v1/snapshot_metadata.py:76 cinder/api/v1/volume_metadata.py:76 #: cinder/api/v2/snapshot_metadata.py:69 cinder/api/v2/volume_metadata.py:72 msgid "Request body and URI mismatch" msgstr "" #: cinder/api/contrib/types_extra_specs.py:121 #: cinder/api/v1/snapshot_metadata.py:80 cinder/api/v1/volume_metadata.py:80 #: cinder/api/v2/snapshot_metadata.py:73 cinder/api/v2/volume_metadata.py:76 msgid "Request body contains too many items" msgstr "" #: cinder/api/contrib/types_extra_specs.py:146 #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "" #: cinder/api/contrib/types_extra_specs.py:170 msgid "" "Key names can only contain alphanumeric characters, underscores, periods," " colons and hyphens." msgstr "" #: cinder/api/contrib/types_manage.py:66 cinder/api/contrib/types_manage.py:120 msgid "Volume type name can not be empty." msgstr "" #: cinder/api/contrib/types_manage.py:77 cinder/api/contrib/types_manage.py:129 #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "" #: cinder/api/contrib/types_manage.py:124 msgid "Specify volume type name, description, is_public or a combination thereof." msgstr "" #: cinder/api/contrib/types_manage.py:180 msgid "Target volume type is still in use." msgstr "" #: cinder/api/contrib/volume_actions.py:103 msgid "Invalid request to attach volume to an invalid target" msgstr "" #: cinder/api/contrib/volume_actions.py:107 msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode " "should be 'rw' or 'ro'" msgstr "" #: cinder/api/contrib/volume_actions.py:215 #: cinder/api/contrib/volume_actions.py:241 msgid "Must specify 'connector'" msgstr "" #: cinder/api/contrib/volume_actions.py:224 msgid "Unable to fetch connection information from backend." msgstr "" #: cinder/api/contrib/volume_actions.py:258 msgid "No image_name was specified in request." msgstr "" #: cinder/api/contrib/volume_actions.py:266 cinder/api/v2/snapshots.py:182 #, python-format msgid "Invalid value for 'force': '%s'" msgstr "" #: cinder/api/contrib/volume_actions.py:308 msgid "New volume size must be specified as an integer." msgstr "" #: cinder/api/contrib/volume_actions.py:331 msgid "Must specify readonly in request." msgstr "" #: cinder/api/contrib/volume_actions.py:339 #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "" #: cinder/api/contrib/volume_actions.py:353 msgid "New volume type must be specified." msgstr "" #: cinder/api/contrib/volume_actions.py:372 msgid "Must specify bootable in request." msgstr "" #: cinder/api/contrib/volume_actions.py:380 #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "" #: cinder/api/contrib/volume_image_metadata.py:46 #: cinder/api/contrib/volume_image_metadata.py:132 #, python-format msgid "Volume with volume id %s does not exist." msgstr "" #: cinder/api/contrib/volume_image_metadata.py:110 #: cinder/api/contrib/volume_image_metadata.py:135 #: cinder/api/contrib/volume_image_metadata.py:156 msgid "Malformed request body." msgstr "" #: cinder/api/contrib/volume_image_metadata.py:162 msgid "Metadata item was not found." msgstr "" #: cinder/api/contrib/volume_image_metadata.py:169 msgid "The key cannot be None." msgstr "" #: cinder/api/contrib/volume_manage.py:141 msgid "Service not found." msgstr "" #: cinder/api/contrib/volume_type_access.py:99 msgid "Access list not available for public volume types." msgstr "" #: cinder/api/contrib/volume_type_access.py:113 #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "" #: cinder/api/contrib/volume_type_encryption.py:69 msgid "provider must be defined" msgstr "" #: cinder/api/contrib/volume_type_encryption.py:71 msgid "control_location must be defined" msgstr "" #: cinder/api/contrib/volume_type_encryption.py:79 #, python-format msgid "Valid control location are: %s" msgstr "" #: cinder/api/contrib/volume_type_encryption.py:107 msgid "Cannot create encryption specs. Volume type in use." msgstr "" #: cinder/api/contrib/volume_type_encryption.py:137 msgid "Request body contains too many items." msgstr "" #: cinder/api/contrib/volume_type_encryption.py:143 msgid "Cannot update encryption specs. Volume type in use." msgstr "" #: cinder/api/contrib/volume_type_encryption.py:177 msgid "Cannot delete encryption specs. Volume type in use." msgstr "" #: cinder/api/middleware/auth.py:113 msgid "Invalid service catalog json." msgstr "" #: cinder/api/middleware/fault.py:73 #, python-format msgid "%(exception)s: %(explanation)s" msgstr "" #: cinder/api/openstack/__init__.py:79 msgid "Must specify an ExtensionManager class" msgstr "" #: cinder/api/openstack/api_version_request.py:119 msgid "An API version request must be compared to a VersionedMethod object." msgstr "" #: cinder/api/openstack/wsgi.py:367 cinder/api/openstack/wsgi.py:774 msgid "cannot understand JSON" msgstr "" #: cinder/api/openstack/wsgi.py:779 msgid "too many body keys" msgstr "" #: cinder/api/openstack/wsgi.py:1078 #, python-format msgid "There is no such action: %s" msgstr "" #: cinder/api/openstack/wsgi.py:1081 cinder/api/openstack/wsgi.py:1104 #: cinder/api/v1/snapshot_metadata.py:54 cinder/api/v1/snapshot_metadata.py:72 #: cinder/api/v1/snapshot_metadata.py:97 cinder/api/v1/snapshot_metadata.py:122 #: cinder/api/v1/volume_metadata.py:54 cinder/api/v1/volume_metadata.py:72 #: cinder/api/v1/volume_metadata.py:97 cinder/api/v1/volume_metadata.py:122 #: cinder/api/v2/snapshot_metadata.py:112 cinder/api/v2/volume_metadata.py:116 msgid "Malformed request body" msgstr "" #: cinder/api/openstack/wsgi.py:1101 msgid "Unsupported Content-Type" msgstr "" #: cinder/api/openstack/wsgi.py:1113 msgid "Malformed request url" msgstr "" #: cinder/api/openstack/wsgi.py:1455 #, python-format msgid "Missing required element '%s' in request body." msgstr "" #: cinder/api/openstack/wsgi.py:1555 msgid "XML support has been deprecated and will be removed in the N release." msgstr "" #: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:139 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" #: cinder/api/v1/limits.py:266 cinder/api/v2/limits.py:263 msgid "This request was rate-limited." msgstr "" #: cinder/api/v1/snapshot_metadata.py:38 cinder/api/v1/snapshot_metadata.py:118 #: cinder/api/v1/snapshot_metadata.py:157 cinder/api/v2/snapshot_metadata.py:38 #: cinder/api/v2/snapshot_metadata.py:108 #: cinder/api/v2/snapshot_metadata.py:147 msgid "snapshot does not exist" msgstr "" #: cinder/api/v1/snapshot_metadata.py:140 #: cinder/api/v1/snapshot_metadata.py:150 cinder/api/v1/volume_metadata.py:140 #: cinder/api/v1/volume_metadata.py:150 cinder/api/v2/snapshot_metadata.py:130 #: cinder/api/v2/snapshot_metadata.py:140 cinder/api/v2/volume_metadata.py:134 #: cinder/api/v2/volume_metadata.py:144 msgid "Metadata item was not found" msgstr "" #: cinder/api/v1/snapshots.py:171 cinder/api/v2/snapshots.py:162 msgid "'volume_id' must be specified" msgstr "" #: cinder/api/v1/snapshots.py:184 #, python-format msgid "Invalid value '%s' for force. " msgstr "" #: cinder/api/v1/volume_metadata.py:38 cinder/api/v1/volume_metadata.py:118 #: cinder/api/v1/volume_metadata.py:157 msgid "volume does not exist" msgstr "" #: cinder/api/v1/volumes.py:306 cinder/api/v1/volumes.py:310 #: cinder/api/v2/volumes.py:258 msgid "Invalid imageRef provided." msgstr "" #: cinder/api/v1/volumes.py:350 #, python-format msgid "snapshot id:%s not found" msgstr "" #: cinder/api/v1/volumes.py:363 #, python-format msgid "source vol id:%s not found" msgstr "" #: cinder/api/v2/snapshots.py:209 cinder/api/v2/volumes.py:421 msgid "Missing request body" msgstr "" #: cinder/api/v2/snapshots.py:213 cinder/api/v2/volumes.py:425 #, python-format msgid "Missing required element '%s' in request body" msgstr "" #: cinder/api/v2/types.py:77 msgid "Default volume type can not be found." msgstr "" #: cinder/api/v2/types.py:106 #, python-format msgid "Invalid is_public filter [%s]" msgstr "" #: cinder/api/v2/volumes.py:278 #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" #: cinder/api/v2/volumes.py:287 msgid "Invalid image identifier or unable to access requested image." msgstr "" #: cinder/api/v2/volumes.py:360 #, python-format msgid "source volume id:%s is not replicated" msgstr "" #: cinder/backup/api.py:97 msgid "Backup status must be available or error" msgstr "" #: cinder/backup/api.py:101 msgid "force delete" msgstr "" #: cinder/backup/api.py:108 msgid "Incremental backups exist for this backup." msgstr "" #: cinder/backup/api.py:125 #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "" #: cinder/backup/api.py:250 #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "" #: cinder/backup/api.py:256 #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current " "status is \"%s\"." msgstr "" #: cinder/backup/api.py:261 msgid "Backing up an in-use volume must use the force flag." msgstr "" #: cinder/backup/api.py:265 #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is " "\"%s\"." msgstr "" #: cinder/backup/api.py:343 msgid "No backups available to do an incremental backup." msgstr "" #: cinder/backup/api.py:350 msgid "The parent backup must be available for incremental backup." msgstr "" #: cinder/backup/api.py:405 msgid "Backup status must be available" msgstr "" #: cinder/backup/api.py:410 msgid "Backup to be restored has invalid size" msgstr "" #: cinder/backup/api.py:437 msgid "Volume to be restored to must be available" msgstr "" #: cinder/backup/api.py:443 #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size " "%(size)d." msgstr "" #: cinder/backup/api.py:504 #, python-format msgid "Backup status must be available and not %s." msgstr "" #: cinder/backup/api.py:547 msgid "Provided backup record is missing an id" msgstr "" #: cinder/backup/api.py:568 msgid "Backup already exists in database." msgstr "" #: cinder/backup/chunkeddriver.py:85 #, python-format msgid "unsupported compression algorithm: %s" msgstr "" #: cinder/backup/chunkeddriver.py:280 #, python-format msgid "volume size %d is invalid." msgstr "" #: cinder/backup/chunkeddriver.py:428 msgid "Chunk size is not multiple of block size for creating hash." msgstr "" #: cinder/backup/chunkeddriver.py:443 #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" #: cinder/backup/chunkeddriver.py:452 msgid "Volume size increased since the last backup. Do a full backup." msgstr "" #: cinder/backup/chunkeddriver.py:600 msgid "" "restore_backup aborted, actual object list does not match object list " "stored in metadata." msgstr "" #: cinder/backup/chunkeddriver.py:672 #, python-format msgid "No support to restore backup version %s" msgstr "" #: cinder/backup/chunkeddriver.py:703 msgid "Metadata restore failed due to incompatible version." msgstr "" #: cinder/backup/driver.py:200 #, python-format msgid "The source volume type '%s' is not available." msgstr "" #: cinder/backup/driver.py:215 #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume" " type '%(dest)s'." msgstr "" #: cinder/backup/driver.py:307 #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "" #: cinder/backup/manager.py:284 #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got" " %(actual_status)s." msgstr "" #: cinder/backup/manager.py:295 #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got" " %(actual_status)s." msgstr "" #: cinder/backup/manager.py:379 #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but " "got %(actual_status)s." msgstr "" #: cinder/backup/manager.py:390 #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but " "got %(actual_status)s." msgstr "" #: cinder/backup/manager.py:410 #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to " "create this backup [%(backup_service)s]." msgstr "" #: cinder/backup/manager.py:474 #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got" " %(actual_status)s." msgstr "" #: cinder/backup/manager.py:485 #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to " "create this backup [%(backup_service)s]." msgstr "" #: cinder/backup/manager.py:560 #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got" " %(actual_status)s." msgstr "" #: cinder/backup/manager.py:571 #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to " "create this backup [%(backup_service)s]." msgstr "" #: cinder/backup/manager.py:624 #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" #: cinder/backup/manager.py:658 #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" #: cinder/backup/manager.py:667 #, python-format msgid "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" #: cinder/backup/manager.py:726 #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to " "create this backup [%(backup_service)s]." msgstr "" #: cinder/backup/manager.py:746 #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" #: cinder/backup/manager.py:774 #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is " "not verified. Skipping reset." msgstr "" #: cinder/backup/manager.py:812 msgid "Can't attach snapshot." msgstr "" #: cinder/backup/drivers/ceph.py:129 cinder/tests/unit/test_backup_ceph.py:1015 #, python-format msgid "Metadata backup object '%s' already exists" msgstr "" #: cinder/backup/drivers/ceph.py:201 #, python-format msgid "invalid user '%s'" msgstr "" #: cinder/backup/drivers/ceph.py:267 msgid "Backup id required" msgstr "" #: cinder/backup/drivers/ceph.py:426 #, python-format msgid "image %s not found" msgstr "" #: cinder/backup/drivers/ceph.py:551 #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" #: cinder/backup/drivers/ceph.py:629 #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" #: cinder/backup/drivers/ceph.py:785 #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" #: cinder/backup/drivers/ceph.py:811 msgid "Need non-zero volume size" msgstr "" #: cinder/backup/drivers/ceph.py:837 #, python-format msgid "Failed to backup volume metadata - %s" msgstr "" #: cinder/backup/drivers/ceph.py:1128 #: cinder/tests/unit/test_backup_ceph.py:1004 msgid "Metadata restore failed due to incompatible version" msgstr "" #: cinder/backup/drivers/google.py:149 #, python-format msgid "Unset gcs options: %s" msgstr "" #: cinder/backup/drivers/google.py:272 #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not" " same." msgstr "" #: cinder/backup/drivers/nfs.py:66 #, python-format msgid "Required flag %s is not set" msgstr "" #: cinder/backup/drivers/swift.py:148 msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in" " the form ::" msgstr "" #: cinder/backup/drivers/swift.py:164 msgid "" "Could not determine which Swift endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_url'." msgstr "" #: cinder/backup/drivers/swift.py:174 msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be" " in the form ::" msgstr "" #: cinder/backup/drivers/swift.py:190 msgid "" "Could not determine which Keystone endpoint to use. This can either be " "set in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" #: cinder/backup/drivers/swift.py:256 #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the" " same as MD5 of object sent to swift %(md5)s" msgstr "" #: cinder/backup/drivers/tsm.py:79 #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" #: cinder/backup/drivers/tsm.py:114 #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:212 #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" #: cinder/backup/drivers/tsm.py:222 #, python-format msgid "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" #: cinder/backup/drivers/tsm.py:229 #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "" #: cinder/backup/drivers/tsm.py:298 #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:339 #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" #: cinder/backup/drivers/tsm.py:362 msgid "" "Volume metadata backup requested but this driver does not yet support " "this feature." msgstr "" #: cinder/backup/drivers/tsm.py:387 #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:397 #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:447 #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:457 #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:502 #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/backup/drivers/tsm.py:510 #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with " "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/cmd/manage.py:172 msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running " "this command." msgstr "" #: cinder/cmd/manage.py:189 #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "" #: cinder/cmd/manage.py:200 #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "" #: cinder/cmd/manage.py:229 msgid "Must supply a positive, non-zero value for age" msgstr "" #: cinder/cmd/manage.py:273 msgid "Volume not yet assigned to host." msgstr "" #: cinder/cmd/manage.py:274 msgid "Deleting volume from database and skipping rpc." msgstr "" #: cinder/cmd/manage.py:279 msgid "Volume is in-use." msgstr "" #: cinder/cmd/manage.py:280 msgid "Detach volume from instance and then try again." msgstr "" #: cinder/cmd/manage.py:346 #, python-format msgid "Line %(dis)d : %(line)s" msgstr "" #: cinder/cmd/manage.py:349 msgid "No errors in logfiles!" msgstr "" #: cinder/cmd/manage.py:363 msgid "Unable to find system log file!" msgstr "" #: cinder/cmd/manage.py:367 #, python-format msgid "Last %s cinder syslog entries:-" msgstr "" #: cinder/cmd/manage.py:371 #, python-format msgid "%s" msgstr "" #: cinder/cmd/manage.py:376 msgid "No cinder entries in syslog!" msgstr "" #: cinder/cmd/manage.py:392 msgid "ID" msgstr "" #: cinder/cmd/manage.py:393 msgid "User ID" msgstr "" #: cinder/cmd/manage.py:394 msgid "Project ID" msgstr "" #: cinder/cmd/manage.py:395 cinder/cmd/manage.py:440 msgid "Host" msgstr "" #: cinder/cmd/manage.py:396 msgid "Name" msgstr "" #: cinder/cmd/manage.py:397 msgid "Container" msgstr "" #: cinder/cmd/manage.py:398 cinder/cmd/manage.py:442 msgid "Status" msgstr "" #: cinder/cmd/manage.py:399 msgid "Size" msgstr "" #: cinder/cmd/manage.py:400 msgid "Object Count" msgstr "" #: cinder/cmd/manage.py:439 msgid "Binary" msgstr "" #: cinder/cmd/manage.py:441 msgid "Zone" msgstr "" #: cinder/cmd/manage.py:443 msgid "State" msgstr "" #: cinder/cmd/manage.py:444 msgid "Updated At" msgstr "" #: cinder/cmd/manage.py:445 msgid "RPC Version" msgstr "" #: cinder/cmd/manage.py:446 msgid "Object Version" msgstr "" #: cinder/cmd/manage.py:475 #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" #: cinder/cmd/manage.py:480 #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "" #: cinder/cmd/manage.py:566 #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" #: cinder/cmd/manage.py:569 msgid "Available categories:" msgstr "" #: cinder/cmd/manage.py:571 #, python-format msgid "\t%s" msgstr "" #: cinder/cmd/manage.py:580 #, python-format msgid "Invalid directory: %s" msgstr "" #: cinder/cmd/manage.py:586 #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "" #: cinder/cmd/manage.py:590 msgid "sudo failed, continuing as if nothing happened" msgstr "" #: cinder/cmd/manage.py:592 msgid "Please re-run cinder-manage as root." msgstr "" #: cinder/cmd/rtstool.py:52 cinder/cmd/rtstool.py:116 msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "" #: cinder/cmd/rtstool.py:96 #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s " "is not in use by another service." msgstr "" #: cinder/cmd/rtstool.py:106 #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is " "supported on your iSCSI port %(port)d on ip %(ip)s." msgstr "" #: cinder/cmd/rtstool.py:123 #, python-format msgid "Could not find target %s" msgstr "" #: cinder/cmd/rtstool.py:150 #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "" #: cinder/cmd/rtstool.py:178 #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" #: cinder/cmd/rtstool.py:217 #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" #: cinder/cmd/rtstool.py:223 #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "" #: cinder/cmd/rtstool.py:237 #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "" #: cinder/cmd/volume.py:83 #, python-format msgid "Volume service %s failed to start." msgstr "" #: cinder/cmd/volume.py:98 msgid "No volume service(s) started successfully, terminating." msgstr "" #: cinder/cmd/volume_usage_audit.py:89 #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "" #: cinder/cmd/volume_usage_audit.py:95 #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "" #: cinder/common/config.py:104 msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "" #: cinder/common/config.py:107 msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "" #: cinder/common/config.py:110 msgid "Deploy v3 of the Cinder API." msgstr "" #: cinder/common/sqlalchemyutils.py:116 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" #: cinder/consistencygroup/api.py:236 #, python-format msgid "No host to create consistency group %s." msgstr "" #: cinder/consistencygroup/api.py:253 msgid "Cgsnahost is empty. No consistency group will be created." msgstr "" #: cinder/consistencygroup/api.py:314 msgid "Source CG is empty. No consistency group will be created." msgstr "" #: cinder/consistencygroup/api.py:454 #, python-format msgid "" "Consistency group status must be available or error, but current status " "is: %s" msgstr "" #: cinder/consistencygroup/api.py:461 #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "" #: cinder/consistencygroup/api.py:470 #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required " "to delete it." msgstr "" #: cinder/consistencygroup/api.py:477 #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "" #: cinder/consistencygroup/api.py:485 msgid "Volume in consistency group still has dependent snapshots." msgstr "" #: cinder/consistencygroup/api.py:500 #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "" #: cinder/consistencygroup/api.py:518 #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "" #: cinder/consistencygroup/api.py:544 #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" #: cinder/consistencygroup/api.py:583 cinder/volume/manager.py:2923 #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" #: cinder/consistencygroup/api.py:599 #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" #: cinder/consistencygroup/api.py:619 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " volume cannot be found." msgstr "" #: cinder/consistencygroup/api.py:631 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " it is already in consistency group %(orig_group)s." msgstr "" #: cinder/consistencygroup/api.py:641 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " it has no volume type." msgstr "" #: cinder/consistencygroup/api.py:648 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " volume type %(volume_type)s is not supported by the group." msgstr "" #: cinder/consistencygroup/api.py:658 cinder/volume/manager.py:2894 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" #: cinder/consistencygroup/api.py:675 cinder/volume/manager.py:2908 msgid "Volume is not local to this node." msgstr "" #: cinder/consistencygroup/api.py:683 #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because" " volume does not exist." msgstr "" #: cinder/consistencygroup/api.py:727 #: cinder/tests/unit/test_storwize_svc.py:2840 msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "" #: cinder/consistencygroup/api.py:765 msgid "Cgsnapshot status must be available or error" msgstr "" #: cinder/db/api.py:1145 msgid "Condition has no field." msgstr "" #: cinder/db/migration.py:68 msgid "Database schema downgrade is not allowed." msgstr "" #: cinder/db/sqlalchemy/api.py:248 cinder/db/sqlalchemy/api.py:3075 #: cinder/volume/qos_specs.py:83 msgid "Error writing field to database" msgstr "" #: cinder/db/sqlalchemy/api.py:280 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "" #: cinder/db/sqlalchemy/api.py:1791 msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "" #: cinder/db/sqlalchemy/api.py:1802 msgid "Sort direction array size exceeds sort key array size." msgstr "" #: cinder/db/sqlalchemy/api.py:3786 #, python-format msgid "No backup with id %s" msgstr "" #: cinder/db/sqlalchemy/api.py:3870 msgid "Volume must be available" msgstr "" #: cinder/db/sqlalchemy/api.py:3917 #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" #: cinder/db/sqlalchemy/api.py:4096 #, python-format msgid "No consistency group with id %s" msgstr "" #: cinder/db/sqlalchemy/api.py:4210 #, python-format msgid "No cgsnapshot with id %s" msgstr "" #: cinder/db/sqlalchemy/api.py:4235 #, python-format msgid "Invalid value for age, %(age)s" msgstr "" #: cinder/db/sqlalchemy/api.py:4239 msgid "Must supply a positive value for age" msgstr "" #: cinder/image/image_utils.py:91 #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" #: cinder/image/image_utils.py:204 cinder/image/image_utils.py:314 #: cinder/volume/drivers/solidfire.py:654 msgid "'qemu-img info' parsing failed." msgstr "" #: cinder/image/image_utils.py:211 #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" #: cinder/image/image_utils.py:219 cinder/image/image_utils.py:307 #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" #: cinder/image/image_utils.py:266 #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can " "be used if qemu-img is not installed." msgstr "" #: cinder/image/image_utils.py:273 msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW" " images can be used if qemu-img is not installed." msgstr "" #: cinder/image/image_utils.py:321 cinder/image/image_utils.py:384 #: cinder/volume/drivers/solidfire.py:661 #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" #: cinder/image/image_utils.py:340 cinder/volume/drivers/solidfire.py:706 #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" #: cinder/image/image_utils.py:394 #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" #: cinder/keymgr/barbican.py:64 #, python-format msgid "" "Invalid url: must be in the form " "'http[s]://|[:port]/', url specified is: %s" msgstr "" #: cinder/keymgr/barbican.py:70 #, python-format msgid "" "Invalid barbican api url: version is required, e.g. " "'http[s]://|[:port]/' url specified is: %s" msgstr "" #: cinder/keymgr/barbican.py:90 msgid "User is not authorized to use key manager." msgstr "" #: cinder/keymgr/barbican.py:95 msgid "Unable to create Barbican Client without project_id." msgstr "" #: cinder/keymgr/conf_key_mgr.py:82 msgid "keymgr.fixed_key not defined" msgstr "" #: cinder/objects/backup.py:140 msgid "Can't decode backup record." msgstr "" #: cinder/objects/backup.py:142 msgid "Can't parse backup record." msgstr "" #: cinder/objects/base.py:161 cinder/objects/base.py:286 #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "" #: cinder/objects/base.py:231 #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "" #: cinder/objects/cgsnapshot.py:75 cinder/objects/consistencygroup.py:83 msgid "already_created" msgstr "" #: cinder/objects/cgsnapshot.py:80 cinder/objects/volume.py:295 msgid "consistencygroup assigned" msgstr "" #: cinder/objects/cgsnapshot.py:89 cinder/objects/consistencygroup.py:102 #: cinder/objects/snapshot.py:189 cinder/objects/volume.py:340 #, python-format msgid "attribute %s not lazy-loadable" msgstr "" #: cinder/objects/cgsnapshot.py:110 cinder/objects/volume.py:309 msgid "consistencygroup changed" msgstr "" #: cinder/objects/cgsnapshot.py:113 cinder/objects/volume.py:315 msgid "snapshots changed" msgstr "" #: cinder/objects/consistencygroup.py:88 msgid "cgsnapshots assigned" msgstr "" #: cinder/objects/consistencygroup.py:92 msgid "volumes assigned" msgstr "" #: cinder/objects/consistencygroup.py:123 msgid "cgsnapshots changed" msgstr "" #: cinder/objects/consistencygroup.py:126 msgid "volumes changed" msgstr "" #: cinder/objects/service.py:88 cinder/objects/snapshot.py:145 #: cinder/objects/volume.py:290 cinder/objects/volume_type.py:80 msgid "already created" msgstr "" #: cinder/objects/snapshot.py:150 msgid "volume assigned" msgstr "" #: cinder/objects/snapshot.py:153 msgid "cgsnapshot assigned" msgstr "" #: cinder/objects/snapshot.py:164 msgid "volume changed" msgstr "" #: cinder/objects/snapshot.py:167 msgid "cgsnapshot changed" msgstr "" #: cinder/objects/volume.py:298 msgid "snapshots assigned" msgstr "" #: cinder/objects/volume.py:312 msgid "glance_metadata changed" msgstr "" #: cinder/replication/api.py:71 msgid "Replication is not enabled for volume" msgstr "" #: cinder/replication/api.py:76 #, python-format msgid "" "Replication status for volume must be active or active-stopped, but " "current status is: %s" msgstr "" #: cinder/replication/api.py:84 #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "" #: cinder/replication/api.py:97 msgid "Replication is not enabled" msgstr "" #: cinder/replication/api.py:102 #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error," " but current status is: %s" msgstr "" #: cinder/scheduler/driver.py:98 msgid "Must implement host_passes_filters" msgstr "" #: cinder/scheduler/driver.py:103 msgid "Must implement find_retype_host" msgstr "" #: cinder/scheduler/driver.py:107 msgid "Must implement a fallback schedule" msgstr "" #: cinder/scheduler/driver.py:111 msgid "Must implement schedule_create_volume" msgstr "" #: cinder/scheduler/driver.py:117 msgid "Must implement schedule_create_consistencygroup" msgstr "" #: cinder/scheduler/driver.py:122 msgid "Must implement schedule_get_pools" msgstr "" #: cinder/scheduler/filter_scheduler.py:74 #: cinder/scheduler/filter_scheduler.py:88 msgid "No weighed hosts available" msgstr "" #: cinder/scheduler/filter_scheduler.py:115 #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "" #: cinder/scheduler/filter_scheduler.py:133 #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" #: cinder/scheduler/filter_scheduler.py:159 #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration " "not allowed" msgstr "" #: cinder/scheduler/filter_scheduler.py:199 msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" #: cinder/scheduler/filter_scheduler.py:247 #, python-format msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" #: cinder/scheduler/filter_scheduler.py:276 msgid "volume_type cannot be None" msgstr "" #: cinder/scheduler/manager.py:144 msgid "Failed to create scheduler manager volume flow" msgstr "" #: cinder/scheduler/manager.py:227 msgid "New volume type not specified in request_spec." msgstr "" #: cinder/scheduler/manager.py:242 #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" #: cinder/scheduler/evaluator/evaluator.py:50 #, python-format msgid "KeyError: %s" msgstr "" #: cinder/scheduler/evaluator/evaluator.py:53 #, python-format msgid "TypeError: %s" msgstr "" #: cinder/scheduler/evaluator/evaluator.py:62 #, python-format msgid "ValueError: %s" msgstr "" #: cinder/scheduler/evaluator/evaluator.py:108 #, python-format msgid "ZeroDivisionError: %s" msgstr "" #: cinder/scheduler/evaluator/evaluator.py:295 #, python-format msgid "ParseException: %s" msgstr "" #: cinder/scheduler/filters/instance_locality_filter.py:100 #: cinder/scheduler/filters/instance_locality_filter.py:112 #, python-format msgid "Hint \"%s\" not supported." msgstr "" #: cinder/tests/functional/api/client.py:30 #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" #: cinder/tests/functional/api/client.py:40 msgid "Authentication error" msgstr "" #: cinder/tests/functional/api/client.py:48 msgid "Authorization error" msgstr "" #: cinder/tests/functional/api/client.py:56 msgid "Item not found" msgstr "" #: cinder/tests/functional/api/client.py:146 msgid "Unexpected status code" msgstr "" #: cinder/tests/unit/test_backup_ceph.py:494 #: cinder/tests/unit/test_backup_ceph.py:507 #: cinder/tests/unit/test_backup_ceph.py:568 msgid "mock" msgstr "" #: cinder/tests/unit/test_backup_ceph.py:1028 #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object " "'backup.%s.meta' already exists" msgstr "" #: cinder/tests/unit/test_backup_google.py:100 #: cinder/tests/unit/test_backup_swift.py:584 #: cinder/tests/unit/test_backup_swift.py:611 #: cinder/tests/unit/backup/drivers/test_backup_nfs.py:458 #: cinder/tests/unit/backup/drivers/test_backup_nfs.py:485 msgid "fake" msgstr "" #: cinder/tests/unit/test_emc_vmax.py:2259 #: cinder/volume/drivers/emc/emc_vmax_masking.py:1338 msgid "" "V2 rollback - Volume in another storage group besides default storage " "group." msgstr "" #: cinder/tests/unit/test_emc_vmax.py:2268 #: cinder/volume/drivers/emc/emc_vmax_masking.py:1317 msgid "V2 rollback, volume is not in any storage group." msgstr "" #: cinder/tests/unit/test_glusterfs.py:670 msgid "umount: : target is busy" msgstr "" #: cinder/tests/unit/test_glusterfs.py:685 msgid "umount: : some other error" msgstr "" #: cinder/tests/unit/test_glusterfs.py:815 #, python-format msgid "umount: %s: not mounted" msgstr "" #: cinder/tests/unit/test_glusterfs.py:827 #: cinder/tests/unit/test_glusterfs.py:839 #, python-format msgid "umount: %s: target is busy" msgstr "" #: cinder/tests/unit/test_ibm_xiv_ds8k.py:161 #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "" #: cinder/tests/unit/test_misc.py:59 #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" #: cinder/tests/unit/test_rbd.py:1110 msgid "flush() not supported in this version of librbd" msgstr "" #: cinder/tests/unit/test_sheepdog.py:438 #: cinder/tests/unit/test_sheepdog.py:662 #: cinder/tests/unit/test_sheepdog.py:798 cinder/volume/drivers/sheepdog.py:99 #: cinder/volume/drivers/sheepdog.py:186 cinder/volume/drivers/sheepdog.py:231 #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "" #: cinder/tests/unit/test_sheepdog.py:546 cinder/volume/drivers/sheepdog.py:154 msgid "" "Cluster is not formatted. You should probably perform \"dog cluster " "format\"." msgstr "" #: cinder/tests/unit/test_sheepdog.py:560 cinder/volume/drivers/sheepdog.py:157 msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are " "running." msgstr "" #: cinder/tests/unit/test_sheepdog.py:573 cinder/volume/drivers/sheepdog.py:152 msgid "Invalid sheepdog cluster status." msgstr "" #: cinder/tests/unit/test_storwize_svc.py:311 #, python-format msgid "unrecognized argument %s" msgstr "" #: cinder/tests/unit/test_storwize_svc.py:423 #, python-format msgid "obj missing quotes %s" msgstr "" #: cinder/tests/unit/test_storwize_svc.py:658 #: cinder/tests/unit/test_storwize_svc.py:1539 #, python-format msgid "mdiskgrp missing quotes %s" msgstr "" #: cinder/tests/unit/test_storwize_svc.py:1766 msgid "The copy should be primary or secondary" msgstr "" #: cinder/tests/unit/api/contrib/test_backups.py:1484 msgid "Invalid input" msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:557 msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:663 #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:749 #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:775 #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency " "group %(group_id)s because it is not in the group." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:828 #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid " "states are: ('available', 'in-use')." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:862 #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:916 #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but" " current status is: %s." msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:1095 msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "" #: cinder/tests/unit/api/contrib/test_consistencygroups.py:1227 msgid "Create volume failed." msgstr "" #: cinder/tests/unit/api/middleware/test_faults.py:123 #, python-format msgid "String with params: %s" msgstr "" #: cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py:687 #: cinder/volume/drivers/netapp/dataontap/block_base.py:157 msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "" #: cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py:701 #: cinder/volume/drivers/netapp/dataontap/block_base.py:162 msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "" #: cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py:362 msgid "Error not a TypeError." msgstr "" #: cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py:371 msgid "Error not a KeyError." msgstr "" #: cinder/transfer/api.py:118 msgid "status must be available" msgstr "" #: cinder/transfer/api.py:157 #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "" #: cinder/transfer/api.py:165 cinder/volume/api.py:1278 #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "" #: cinder/volume/api.py:228 #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must " "be an integer (or string representation of an integer) and greater than " "zero)." msgstr "" #: cinder/volume/api.py:236 msgid "" "volume_type must be provided when creating a volume in a consistency " "group." msgstr "" #: cinder/volume/api.py:241 #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by " "this consistency group)." msgstr "" #: cinder/volume/api.py:258 #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "either match source volume, or omit type argument)." msgstr "" #: cinder/volume/api.py:265 msgid "No volume_type should be provided when creating test replica." msgstr "" #: cinder/volume/api.py:275 #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" #: cinder/volume/api.py:319 msgid "Failed to create api volume flow." msgstr "" #: cinder/volume/api.py:393 cinder/volume/api.py:945 #, python-format msgid "status must be %s and" msgstr "" #: cinder/volume/api.py:394 #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group " "or have snapshots." msgstr "" #: cinder/volume/api.py:412 msgid "Failed to update snapshot." msgstr "" #: cinder/volume/api.py:443 msgid "The volume cannot be updated during maintenance." msgstr "" #: cinder/volume/api.py:572 #, python-format msgid "Volume status must be %s to reserve." msgstr "" #: cinder/volume/api.py:603 msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status" " must be 'attached' to detach." msgstr "" #: cinder/volume/api.py:624 msgid "The volume cannot be attached in maintenance mode." msgstr "" #: cinder/volume/api.py:651 msgid "The volume cannot be detached in maintenance mode." msgstr "" #: cinder/volume/api.py:665 msgid "The volume connection cannot be initialized in maintenance mode." msgstr "" #: cinder/volume/api.py:690 msgid "The volume cannot accept transfer in maintenance mode." msgstr "" #: cinder/volume/api.py:720 cinder/volume/api.py:852 msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" #: cinder/volume/api.py:725 cinder/volume/api.py:857 msgid "Snapshot cannot be created while volume is migrating." msgstr "" #: cinder/volume/api.py:730 msgid "Snapshot of secondary replica is not allowed." msgstr "" #: cinder/volume/api.py:734 #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" #: cinder/volume/api.py:861 #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" #: cinder/volume/api.py:946 #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "" #: cinder/volume/api.py:978 msgid "" "The volume metadata cannot be deleted when the volume is in maintenance " "mode." msgstr "" #: cinder/volume/api.py:991 msgid "Metadata property key blank." msgstr "" #: cinder/volume/api.py:995 msgid "Metadata property key greater than 255 characters." msgstr "" #: cinder/volume/api.py:999 msgid "Metadata property value greater than 255 characters." msgstr "" #: cinder/volume/api.py:1016 msgid "" "The volume metadata cannot be updated when the volume is in maintenance " "mode." msgstr "" #: cinder/volume/api.py:1134 cinder/volume/api.py:1249 #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status " "is: %(vol_status)s." msgstr "" #: cinder/volume/api.py:1140 msgid "Volume status is in-use." msgstr "" #: cinder/volume/api.py:1197 #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status " "is: %(vol_status)s." msgstr "" #: cinder/volume/api.py:1205 #, python-format msgid "" "New size for extend must be greater than current size. (current: " "%(size)s, extended: %(new_size)s)." msgstr "" #: cinder/volume/api.py:1258 cinder/volume/api.py:1404 #, python-format msgid "Volume %s is already part of an active migration." msgstr "" #: cinder/volume/api.py:1266 #, python-format msgid "Volume %s must not have snapshots." msgstr "" #: cinder/volume/api.py:1273 #, python-format msgid "Volume %s must not be replicated." msgstr "" #: cinder/volume/api.py:1294 #, python-format msgid "No available service named %s" msgstr "" #: cinder/volume/api.py:1300 msgid "Destination host must be different than the current host." msgstr "" #: cinder/volume/api.py:1358 msgid "Source volume not mid-migration." msgstr "" #: cinder/volume/api.py:1362 msgid "Destination volume not mid-migration." msgstr "" #: cinder/volume/api.py:1367 #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" #: cinder/volume/api.py:1380 #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" #: cinder/volume/api.py:1395 #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" #: cinder/volume/api.py:1410 #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" #: cinder/volume/api.py:1416 msgid "Volume must not be part of a consistency group." msgstr "" #: cinder/volume/api.py:1429 #, python-format msgid "Invalid volume_type passed: %s." msgstr "" #: cinder/volume/api.py:1442 #, python-format msgid "New volume_type same as original: %s." msgstr "" #: cinder/volume/api.py:1457 msgid "Retype cannot change encryption requirements." msgstr "" #: cinder/volume/api.py:1469 #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" #: cinder/volume/api.py:1496 cinder/volume/manager.py:2224 msgid "Failed to update quota usage while retyping volume." msgstr "" #: cinder/volume/api.py:1558 msgid "Failed to manage api volume flow." msgstr "" #: cinder/volume/api.py:1618 #, python-format msgid "Host replication_status must be %s to failover." msgstr "" #: cinder/volume/api.py:1635 msgid "Host is already Frozen." msgstr "" #: cinder/volume/api.py:1655 msgid "Host is NOT Frozen." msgstr "" #: cinder/volume/driver.py:392 cinder/volume/driver.py:435 #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "" #: cinder/volume/driver.py:426 msgid "detach snapshot from remote node" msgstr "" #: cinder/volume/driver.py:560 msgid "Sets thin provisioning." msgstr "" #: cinder/volume/driver.py:567 msgid "Enables compression." msgstr "" #: cinder/volume/driver.py:574 msgid "Enables QoS." msgstr "" #: cinder/volume/driver.py:581 msgid "Enables replication." msgstr "" #: cinder/volume/driver.py:892 cinder/volume/driver.py:969 #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" #: cinder/volume/driver.py:899 cinder/volume/driver.py:976 #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "" #: cinder/volume/driver.py:939 msgid "attach snapshot from remote node" msgstr "" #: cinder/volume/driver.py:1011 cinder/volume/manager.py:1611 #: cinder/volume/drivers/ibm/flashsystem_common.py:789 #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "" #: cinder/volume/driver.py:1433 msgid "The method update_migrated_volume is not implemented." msgstr "" #: cinder/volume/driver.py:1565 cinder/volume/driver.py:2003 #: cinder/volume/driver.py:2007 msgid "Manage existing volume not implemented." msgstr "" #: cinder/volume/driver.py:1573 msgid "Unmanage volume not implemented." msgstr "" #: cinder/volume/driver.py:1999 msgid "Extend volume not implemented" msgstr "" #: cinder/volume/driver.py:2014 cinder/volume/driver.py:2018 msgid "Manage existing snapshot not implemented." msgstr "" #: cinder/volume/driver.py:2028 msgid "sync_replica not implemented." msgstr "" #: cinder/volume/driver.py:2032 msgid "promote_replica not implemented." msgstr "" #: cinder/volume/driver.py:2400 cinder/volume/targets/iscsi.py:94 #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "" #: cinder/volume/driver.py:2788 msgid "Driver must implement initialize_connection" msgstr "" #: cinder/volume/manager.py:591 msgid "Create manager volume flow failed." msgstr "" #: cinder/volume/manager.py:698 msgid "volume is not local to this node" msgstr "" #: cinder/volume/manager.py:704 msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "" #: cinder/volume/manager.py:734 #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' " "during cascade delete." msgstr "" #: cinder/volume/manager.py:958 msgid "being attached by different mode" msgstr "" #: cinder/volume/manager.py:963 msgid "volume is already attached" msgstr "" #: cinder/volume/manager.py:1068 msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" #: cinder/volume/manager.py:1210 cinder/volume/drivers/block_device.py:236 msgid "Volume is not available." msgstr "" #: cinder/volume/manager.py:1352 msgid "Invalid initiator value received" msgstr "" #: cinder/volume/manager.py:1434 #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "" #: cinder/volume/manager.py:1443 msgid "Create export for volume failed." msgstr "" #: cinder/volume/manager.py:1466 #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "" #: cinder/volume/manager.py:1537 #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "" #: cinder/volume/manager.py:1552 msgid "Remove volume export failed." msgstr "" #: cinder/volume/manager.py:1617 msgid "Unable to access the backend storage via file handle." msgstr "" #: cinder/volume/manager.py:1743 msgid "failed to create new_volume on destination host" msgstr "" #: cinder/volume/manager.py:1749 msgid "timeout creating new_volume on destination host" msgstr "" #: cinder/volume/manager.py:2132 #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "" #: cinder/volume/manager.py:2279 msgid "Retype requires migration but is not allowed." msgstr "" #: cinder/volume/manager.py:2287 msgid "Volume must not have snapshots." msgstr "" #: cinder/volume/manager.py:2296 msgid "Volume must not be replicated." msgstr "" #: cinder/volume/manager.py:2340 msgid "Failed to create manage_existing flow." msgstr "" #: cinder/volume/manager.py:2383 msgid "Error promoting secondary volume to primary" msgstr "" #: cinder/volume/manager.py:2393 cinder/volume/manager.py:2426 #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "" #: cinder/volume/manager.py:2416 msgid "Synchronizing secondary volume to primary failed." msgstr "" #: cinder/volume/manager.py:2469 msgid "Create consistency group failed." msgstr "" #: cinder/volume/manager.py:2524 #, python-format msgid "snapshot-%s" msgstr "" #: cinder/volume/manager.py:2530 #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is " "not in a valid state. Valid states are: %(valid)s." msgstr "" #: cinder/volume/manager.py:2552 #, python-format msgid "cg-%s" msgstr "" #: cinder/volume/manager.py:2558 #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" #: cinder/volume/manager.py:2638 msgid "Input volumes or snapshots are invalid." msgstr "" #: cinder/volume/manager.py:2661 msgid "Input volumes or source volumes are invalid." msgstr "" #: cinder/volume/manager.py:2763 msgid "Volume is not local to this node" msgstr "" #: cinder/volume/manager.py:2790 msgid "Delete consistency group failed." msgstr "" #: cinder/volume/manager.py:2956 #, python-format msgid "Error occurred when updating consistency group %s." msgstr "" #: cinder/volume/manager.py:3057 #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "" #: cinder/volume/manager.py:3160 #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "" #: cinder/volume/manager.py:3354 msgid "Update list, doesn't include volume_id" msgstr "" #: cinder/volume/manager.py:3449 msgid "Failed to create manage existing flow." msgstr "" #: cinder/volume/qos_specs.py:56 #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "" #: cinder/volume/qos_specs.py:126 cinder/volume/qos_specs.py:143 #: cinder/volume/qos_specs.py:246 cinder/volume/volume_types.py:65 #: cinder/volume/volume_types.py:91 cinder/volume/volume_types.py:117 msgid "id cannot be None" msgstr "" #: cinder/volume/qos_specs.py:159 #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "" #: cinder/volume/qos_specs.py:192 #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" #: cinder/volume/qos_specs.py:258 cinder/volume/volume_types.py:129 msgid "name cannot be None" msgstr "" #: cinder/volume/rpcapi.py:193 msgid "Cascade option is not supported." msgstr "" #: cinder/volume/rpcapi.py:404 cinder/volume/rpcapi.py:416 msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" #: cinder/volume/utils.py:420 msgid "Failed to copy volume, source device unavailable." msgstr "" #: cinder/volume/utils.py:424 msgid "Failed to copy volume, destination device unavailable." msgstr "" #: cinder/volume/volume_types.py:176 cinder/volume/volume_types.py:189 msgid "volume_type_id cannot be None" msgstr "" #: cinder/volume/volume_types.py:180 cinder/volume/volume_types.py:193 msgid "Type access modification is not applicable to public volume type." msgstr "" #: cinder/volume/drivers/block_device.py:100 msgid "Failed to delete device." msgstr "" #: cinder/volume/drivers/block_device.py:206 msgid "No free disk" msgstr "" #: cinder/volume/drivers/block_device.py:220 msgid "No big enough free disk" msgstr "" #: cinder/volume/drivers/block_device.py:228 msgid "Insufficient free space available to extend volume." msgstr "" #: cinder/volume/drivers/blockbridge.py:39 msgid "IP address/hostname of Blockbridge API." msgstr "" #: cinder/volume/drivers/blockbridge.py:41 msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "" #: cinder/volume/drivers/blockbridge.py:46 msgid "Blockbridge API authentication scheme (token or password)" msgstr "" #: cinder/volume/drivers/blockbridge.py:49 msgid "Blockbridge API token (for auth scheme 'token')" msgstr "" #: cinder/volume/drivers/blockbridge.py:52 msgid "Blockbridge API user (for auth scheme 'password')" msgstr "" #: cinder/volume/drivers/blockbridge.py:54 msgid "Blockbridge API password (for auth scheme 'password')" msgstr "" #: cinder/volume/drivers/blockbridge.py:58 msgid "" "Defines the set of exposed pools and their associated backend query " "strings" msgstr "" #: cinder/volume/drivers/blockbridge.py:61 msgid "Default pool name if unspecified." msgstr "" #: cinder/volume/drivers/blockbridge.py:114 msgid "Failed to determine blockbridge API configuration" msgstr "" #: cinder/volume/drivers/blockbridge.py:162 msgid "Invalid credentials" msgstr "" #: cinder/volume/drivers/blockbridge.py:164 msgid "Insufficient privileges" msgstr "" #: cinder/volume/drivers/blockbridge.py:195 msgid "Blockbridge api host not configured" msgstr "" #: cinder/volume/drivers/blockbridge.py:205 msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "" #: cinder/volume/drivers/blockbridge.py:209 msgid "Blockbridge password not configured (required for auth scheme 'password')" msgstr "" #: cinder/volume/drivers/blockbridge.py:215 msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "" #: cinder/volume/drivers/blockbridge.py:219 #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "" #: cinder/volume/drivers/blockbridge.py:226 msgid "Blockbridge pools not configured" msgstr "" #: cinder/volume/drivers/blockbridge.py:231 msgid "Blockbridge default pool does not exist" msgstr "" #: cinder/volume/drivers/coho.py:89 msgid "Failed to establish connection with Coho cluster" msgstr "" #: cinder/volume/drivers/coho.py:133 #, python-format msgid "no REPLY but %r" msgstr "" #: cinder/volume/drivers/coho.py:141 #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "" #: cinder/volume/drivers/coho.py:145 #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "" #: cinder/volume/drivers/coho.py:146 #, python-format msgid "MSG_DENIED: %r" msgstr "" #: cinder/volume/drivers/coho.py:149 #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "" #: cinder/volume/drivers/coho.py:153 msgid "call failed: PROG_UNAVAIL" msgstr "" #: cinder/volume/drivers/coho.py:158 #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "" #: cinder/volume/drivers/coho.py:160 msgid "call failed: PROC_UNAVAIL" msgstr "" #: cinder/volume/drivers/coho.py:162 msgid "call failed: GARBAGE_ARGS" msgstr "" #: cinder/volume/drivers/coho.py:164 #, python-format msgid "call failed: %r" msgstr "" #: cinder/volume/drivers/coho.py:196 msgid "Invalid response header from RPC server" msgstr "" #: cinder/volume/drivers/coho.py:208 msgid "RPC server response is incomplete" msgstr "" #: cinder/volume/drivers/coho.py:315 msgid "Coho rpc port is not configured" msgstr "" #: cinder/volume/drivers/coho.py:319 #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "" #: cinder/volume/drivers/datera.py:130 msgid "" "san_login and/or san_password is not set for Datera driver in the " "cinder.conf. Set this information and start the cinder-volume service " "again." msgstr "" #: cinder/volume/drivers/datera.py:146 msgid "Resource not ready." msgstr "" #: cinder/volume/drivers/datera.py:472 #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "" #: cinder/volume/drivers/datera.py:495 #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" #: cinder/volume/drivers/datera.py:501 #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:193 #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong " "DRBDmanage version?" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:208 msgid "" "DRBDmanage driver setup error: some required libraries (dbus, " "drbdmanage.*) not found." msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:213 msgid "Cannot ping DRBDmanage backend" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:241 #, python-format msgid "Received error string: %s" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:378 #, python-format msgid "volume %s not found in drbdmanage" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:381 #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:387 #, python-format msgid "not exactly one volume with id %s" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:415 #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:419 #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:425 #, python-format msgid "not exactly one snapshot with id %s" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:450 #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:496 #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", " "volume \"%(vol)s\"" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:542 #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:582 #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; " "resource \"%(res)s\", volume \"%(vol)s\"" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:668 #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:692 #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:705 #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:869 #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "" #: cinder/volume/drivers/eqlx.py:187 msgid "The EQL array has closed the connection." msgstr "" #: cinder/volume/drivers/eqlx.py:225 msgid "Error executing EQL command" msgstr "" #: cinder/volume/drivers/eqlx.py:267 #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" #: cinder/volume/drivers/eqlx.py:281 msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "" #: cinder/volume/drivers/glusterfs.py:84 #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "" #: cinder/volume/drivers/glusterfs.py:89 #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "" #: cinder/volume/drivers/glusterfs.py:101 msgid "mount.glusterfs is not installed" msgstr "" #: cinder/volume/drivers/glusterfs.py:300 cinder/volume/drivers/quobyte.py:276 #: cinder/volume/drivers/scality.py:199 #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "" #: cinder/volume/drivers/glusterfs.py:321 cinder/volume/drivers/quobyte.py:306 #: cinder/volume/drivers/scality.py:220 #, python-format msgid "Unrecognized backing format: %s" msgstr "" #: cinder/volume/drivers/glusterfs.py:339 #, python-format msgid "file already exists at %s" msgstr "" #: cinder/volume/drivers/glusterfs.py:446 msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "" #: cinder/volume/drivers/glusterfs.py:465 msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "" #: cinder/volume/drivers/hgst.py:121 msgid "Unable to get list of domain members, check that the cluster is running." msgstr "" #: cinder/volume/drivers/hgst.py:130 msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" #: cinder/volume/drivers/hgst.py:143 msgid "Current host isn't part of HGST domain." msgstr "" #: cinder/volume/drivers/hgst.py:182 msgid "" "Unable to get list of spaces to make new name. Please verify the cluster" " is running." msgstr "" #: cinder/volume/drivers/hgst.py:204 #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the " "cluster is running and connected." msgstr "" #: cinder/volume/drivers/hgst.py:262 #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "" #: cinder/volume/drivers/hgst.py:280 #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "" #: cinder/volume/drivers/hgst.py:316 #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "" #: cinder/volume/drivers/hgst.py:419 #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "" #: cinder/volume/drivers/hgst.py:428 #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "" #: cinder/volume/drivers/hgst.py:440 msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" #: cinder/volume/drivers/hgst.py:452 msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" #: cinder/volume/drivers/hgst.py:461 #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups " "in cinder.conf" msgstr "" #: cinder/volume/drivers/hgst.py:471 msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "" #: cinder/volume/drivers/hgst.py:481 #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" #: cinder/volume/drivers/hgst.py:489 msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "" #: cinder/volume/drivers/hgst.py:567 #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "" #: cinder/volume/drivers/lvm.py:150 #, python-format msgid "Volume device file path %s does not exist." msgstr "" #: cinder/volume/drivers/lvm.py:158 #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" #: cinder/volume/drivers/lvm.py:286 cinder/volume/drivers/lvm.py:295 #, python-format msgid "Volume Group %s does not exist" msgstr "" #: cinder/volume/drivers/lvm.py:322 msgid "Thin provisioning not supported on this version of LVM." msgstr "" #: cinder/volume/drivers/lvm.py:330 #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "" #: cinder/volume/drivers/lvm.py:594 #, python-format msgid "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" #: cinder/volume/drivers/lvm.py:611 cinder/volume/drivers/rbd.py:1030 #: cinder/volume/drivers/emc/xtremio.py:520 #: cinder/volume/drivers/hitachi/hnas_iscsi.py:911 #: cinder/volume/drivers/hitachi/hnas_nfs.py:623 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:851 msgid "Reference must contain source-name element." msgstr "" #: cinder/volume/drivers/lvm.py:628 #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size " "%(size)s was not a floating-point number." msgstr "" #: cinder/volume/drivers/lvm.py:731 #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" #: cinder/volume/drivers/nfs.py:114 #, python-format msgid "There's no NFS config file configured (%s)" msgstr "" #: cinder/volume/drivers/nfs.py:119 #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "" #: cinder/volume/drivers/nfs.py:135 cinder/volume/drivers/zfssa/zfssanfs.py:112 #, python-format msgid "%s is not installed" msgstr "" #: cinder/volume/drivers/nimble.py:80 msgid "Nimble Cinder Driver exception" msgstr "" #: cinder/volume/drivers/nimble.py:84 msgid "Unexpected response from Nimble API" msgstr "" #: cinder/volume/drivers/nimble.py:115 #: cinder/volume/drivers/ibm/flashsystem_common.py:939 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1991 #, python-format msgid "%s is not set." msgstr "" #: cinder/volume/drivers/nimble.py:153 msgid "No suitable discovery ip found" msgstr "" #: cinder/volume/drivers/nimble.py:294 msgid "SpaceInfo returned byarray is invalid" msgstr "" #: cinder/volume/drivers/nimble.py:350 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1383 msgid "Reference must contain source-name." msgstr "" #: cinder/volume/drivers/nimble.py:370 #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "" #: cinder/volume/drivers/nimble.py:377 msgid "Volume should have agent-type set as None." msgstr "" #: cinder/volume/drivers/nimble.py:383 #, python-format msgid "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" #: cinder/volume/drivers/nimble.py:424 msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "" #: cinder/volume/drivers/nimble.py:501 #, python-format msgid "No initiator group found for initiator %s" msgstr "" #: cinder/volume/drivers/nimble.py:516 #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "" #: cinder/volume/drivers/pure.py:194 #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" #: cinder/volume/drivers/pure.py:216 msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" #: cinder/volume/drivers/pure.py:286 #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "" #: cinder/volume/drivers/pure.py:798 msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" #: cinder/volume/drivers/pure.py:827 #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "" #: cinder/volume/drivers/pure.py:845 #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. " "Please disconnect this volume from existing hosts before importing" msgstr "" #: cinder/volume/drivers/pure.py:906 #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" #: cinder/volume/drivers/pure.py:972 #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to " "enable this feature." msgstr "" #: cinder/volume/drivers/pure.py:1078 msgid "Unable to connect or find connection to host" msgstr "" #: cinder/volume/drivers/pure.py:1147 msgid "" "Unable to failback to \"default\", this can only be done after a failover" " has completed." msgstr "" #: cinder/volume/drivers/pure.py:1160 #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" #: cinder/volume/drivers/pure.py:1249 msgid "Protection Group not ready." msgstr "" #: cinder/volume/drivers/pure.py:1261 msgid "Replication not allowed yet." msgstr "" #: cinder/volume/drivers/pure.py:1417 msgid "Unable to find failover target, no secondary targets configured." msgstr "" #: cinder/volume/drivers/pure.py:1429 #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: " "%(secondary)s." msgstr "" #: cinder/volume/drivers/pure.py:1457 #, python-format msgid "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "" #: cinder/volume/drivers/pure.py:1464 #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected " "secondary array: %(id)s." msgstr "" #: cinder/volume/drivers/pure.py:1553 msgid "No iSCSI-enabled ports on target array." msgstr "" #: cinder/volume/drivers/pure.py:1605 msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" #: cinder/volume/drivers/pure.py:1613 msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" #: cinder/volume/drivers/quobyte.py:104 #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" msgstr "" #: cinder/volume/drivers/quobyte.py:298 cinder/volume/drivers/smbfs.py:520 #: cinder/volume/drivers/vzstorage.py:256 msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" #: cinder/volume/drivers/quobyte.py:468 #, python-format msgid "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" #: cinder/volume/drivers/rbd.py:77 msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "" #: cinder/volume/drivers/rbd.py:80 msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value" " < 0, no timeout is set and default librados value is used." msgstr "" #: cinder/volume/drivers/rbd.py:84 msgid "Number of retries if connection to ceph cluster failed." msgstr "" #: cinder/volume/drivers/rbd.py:87 msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "" #: cinder/volume/drivers/rbd.py:169 #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "" #: cinder/volume/drivers/rbd.py:173 msgid "Invalid argument" msgstr "" #: cinder/volume/drivers/rbd.py:193 msgid "fileno() not supported by RBD()" msgstr "" #: cinder/volume/drivers/rbd.py:290 msgid "rados and rbd python libraries not found" msgstr "" #: cinder/volume/drivers/rbd.py:342 msgid "Error connecting to ceph cluster." msgstr "" #: cinder/volume/drivers/rbd.py:442 #, python-format msgid "clone depth exceeds limit of %s" msgstr "" #: cinder/volume/drivers/rbd.py:722 msgid "" "ImageBusy error raised while deleting rbd volume. This may have been " "caused by a connection from a client that has crashed and, if so, may be " "resolved by retrying the delete after 30 seconds has elapsed." msgstr "" #: cinder/volume/drivers/rbd.py:832 msgid "Not stored in rbd" msgstr "" #: cinder/volume/drivers/rbd.py:837 msgid "Blank components" msgstr "" #: cinder/volume/drivers/rbd.py:840 msgid "Not an rbd snapshot" msgstr "" #: cinder/volume/drivers/rbd.py:991 #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "" #: cinder/volume/drivers/rbd.py:1054 #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s" " was not a floating-point number." msgstr "" #: cinder/volume/drivers/remotefs.py:123 msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" #: cinder/volume/drivers/remotefs.py:191 #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or " "'false'" msgstr "" #: cinder/volume/drivers/remotefs.py:420 cinder/volume/drivers/smbfs.py:590 #, python-format msgid "Expected volume size was %d" msgstr "" #: cinder/volume/drivers/remotefs.py:421 #, python-format msgid " but size is now %d" msgstr "" #: cinder/volume/drivers/remotefs.py:448 #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" #: cinder/volume/drivers/remotefs.py:676 msgid "'active' must be present when writing snap_info." msgstr "" #: cinder/volume/drivers/remotefs.py:699 #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" #: cinder/volume/drivers/remotefs.py:824 #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" #: cinder/volume/drivers/remotefs.py:882 msgid "Volume status must be 'available'." msgstr "" #: cinder/volume/drivers/remotefs.py:945 msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" #: cinder/volume/drivers/remotefs.py:1049 #, python-format msgid "No file found with %s as backing file." msgstr "" #: cinder/volume/drivers/remotefs.py:1058 #, python-format msgid "No snap found with %s as backing file." msgstr "" #: cinder/volume/drivers/remotefs.py:1079 msgid "Snapshot status must be \"available\" to clone." msgstr "" #: cinder/volume/drivers/remotefs.py:1233 #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" #: cinder/volume/drivers/remotefs.py:1303 msgid "Nova returned \"error\" status while creating snapshot." msgstr "" #: cinder/volume/drivers/remotefs.py:1308 #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to " "become available. Perhaps a concurrent request was made." msgstr "" #: cinder/volume/drivers/remotefs.py:1322 #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" #: cinder/volume/drivers/remotefs.py:1392 #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" #: cinder/volume/drivers/remotefs.py:1405 #, python-format msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" #: cinder/volume/drivers/scality.py:88 msgid "Value required for 'scality_sofs_config'" msgstr "" #: cinder/volume/drivers/scality.py:100 #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "" #: cinder/volume/drivers/scality.py:107 msgid "Cannot execute /sbin/mount.sofs" msgstr "" #: cinder/volume/drivers/scality.py:140 msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" #: cinder/volume/drivers/scality.py:277 msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "" #: cinder/volume/drivers/scality.py:282 msgid "Backup is only supported for SOFS volumes without backing file." msgstr "" #: cinder/volume/drivers/sheepdog.py:334 #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:341 #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:357 #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:370 #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "" #: cinder/volume/drivers/sheepdog.py:384 #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "" #: cinder/volume/drivers/sheepdog.py:388 msgid "Invalid argument - negative seek offset." msgstr "" #: cinder/volume/drivers/sheepdog.py:404 msgid "fileno is not supported by SheepdogIOWrapper" msgstr "" #: cinder/volume/drivers/sheepdog.py:637 #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "" #: cinder/volume/drivers/smbfs.py:167 msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "" #: cinder/volume/drivers/smbfs.py:171 #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "" #: cinder/volume/drivers/smbfs.py:176 #, python-format msgid "Invalid mount point base: %s" msgstr "" #: cinder/volume/drivers/smbfs.py:180 #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" #: cinder/volume/drivers/smbfs.py:188 #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" #: cinder/volume/drivers/smbfs.py:349 #, python-format msgid "File already exists at %s." msgstr "" #: cinder/volume/drivers/smbfs.py:458 msgid "This driver does not support snapshotting in-use volumes." msgstr "" #: cinder/volume/drivers/smbfs.py:462 msgid "This driver does not support deleting in-use snapshots." msgstr "" #: cinder/volume/drivers/smbfs.py:474 #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "" #: cinder/volume/drivers/smbfs.py:591 #, python-format msgid " but size is now %d." msgstr "" #: cinder/volume/drivers/solidfire.py:127 #, python-format msgid "Retry count exceeded for command: %s" msgstr "" #: cinder/volume/drivers/solidfire.py:330 #: cinder/volume/drivers/solidfire.py:499 #, python-format msgid "API response: %s" msgstr "" #: cinder/volume/drivers/solidfire.py:518 msgid "Failed to get model update from clone" msgstr "" #: cinder/volume/drivers/solidfire.py:753 msgid "Failed to create SolidFire Image-Volume" msgstr "" #: cinder/volume/drivers/solidfire.py:813 msgid "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" #: cinder/volume/drivers/solidfire.py:1199 #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder" " volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" #: cinder/volume/drivers/solidfire.py:1240 #, python-format msgid "Failed to find group snapshot named: %s" msgstr "" #: cinder/volume/drivers/solidfire.py:1319 #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder" " snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" #: cinder/volume/drivers/solidfire.py:1549 msgid "Manage existing volume requires 'source-id'." msgstr "" #: cinder/volume/drivers/solidfire.py:1591 msgid "Manage existing get size requires 'id'." msgstr "" #: cinder/volume/drivers/solidfire.py:1608 msgid "Failed to find account for volume." msgstr "" #: cinder/volume/drivers/tegile.py:139 #, python-format msgid "API response: %(response)s" msgstr "" #: cinder/volume/drivers/tegile.py:471 #, python-format msgid "%(attr)s is not set." msgstr "" #: cinder/volume/drivers/tintri.py:206 #, python-format msgid "Failed to move volume %s." msgstr "" #: cinder/volume/drivers/tintri.py:210 #, python-format msgid "Volume %s not found." msgstr "" #: cinder/volume/drivers/tintri.py:540 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:549 #, python-format msgid "Converted to raw, but format is now %s" msgstr "" #: cinder/volume/drivers/tintri.py:570 #: cinder/volume/drivers/hitachi/hnas_nfs.py:251 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:582 msgid "Resizing image file failed." msgstr "" #: cinder/volume/drivers/tintri.py:624 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:679 msgid "Image location not present." msgstr "" #: cinder/volume/drivers/tintri.py:656 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:756 msgid "Container size smaller than required file size." msgstr "" #: cinder/volume/drivers/tintri.py:673 cinder/volume/drivers/tintri.py:689 msgid "A volume ID or share was not specified." msgstr "" #: cinder/volume/drivers/tintri.py:713 #, python-format msgid "Failed to manage volume %s." msgstr "" #: cinder/volume/drivers/tintri.py:737 #, python-format msgid "Failed to get size of volume %s" msgstr "" #: cinder/volume/drivers/tintri.py:765 msgid "Volume reference must contain source-name element." msgstr "" #: cinder/volume/drivers/tintri.py:786 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2446 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2471 msgid "Volume not found." msgstr "" #: cinder/volume/drivers/tintri.py:874 #, python-format msgid "Failed to login for user %s." msgstr "" #: cinder/volume/drivers/tintri.py:908 #, python-format msgid "Failed to create snapshot for volume %s." msgstr "" #: cinder/volume/drivers/tintri.py:920 #, python-format msgid "Failed to get snapshot for volume %s." msgstr "" #: cinder/volume/drivers/tintri.py:942 msgid "Failed to get image snapshots." msgstr "" #: cinder/volume/drivers/tintri.py:963 #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "" #: cinder/volume/drivers/vzstorage.py:133 #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "" #: cinder/volume/drivers/vzstorage.py:139 #, python-format msgid "Invalid mount point base: %s." msgstr "" #: cinder/volume/drivers/vzstorage.py:145 #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: " "%s." msgstr "" #: cinder/volume/drivers/vzstorage.py:161 #, python-format msgid "%s is not installed." msgstr "" #: cinder/volume/drivers/vzstorage.py:172 #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: " "[MDS1[,MDS2],...:/][:PASSWORD]." msgstr "" #: cinder/volume/drivers/vzstorage.py:313 #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:144 msgid "API key is missing for CloudByte driver." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:165 #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:172 #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, " "Error: %(error)s." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:246 #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account " "[%(account)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:274 #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:292 #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in " "CloudByte storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:327 #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:334 #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:360 #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:367 #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:393 msgid "Null response received from CloudByte's list filesystem." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:400 msgid "No volumes found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:411 #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:472 msgid "Null response received from CloudByte's list iscsi initiators." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:479 msgid "No iscsi initiators were found in CloudByte." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:495 msgid "Null response received from CloudByte's list volume iscsi service." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:502 msgid "No iscsi services found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:513 #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:570 #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:582 msgid "No response was received from CloudByte's list filesystem api call." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:589 msgid "No volume was found at CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:610 msgid "No response was received from CloudByte storage list tsm API call." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:644 msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:651 msgid "No iscsi auth groups were found in CloudByte." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:661 #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:678 msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:685 msgid "Auth user details not found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:692 msgid "Invalid chap user details found in CloudByte storage." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:984 #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1182 #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1201 #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:212 msgid "Configuration error: dell_sc_ssn not set." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:455 msgid "Failed to connect to Dell REST API" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:506 msgid "Failed to find Storage Center" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:768 #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:849 #, python-format msgid "Storage Profile %s not found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:974 #, python-format msgid "Unable to complete failover of %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:980 #, python-format msgid "Multiple copies of volume %s found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1001 #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1582 msgid "Unable to find iSCSI mappings." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2067 msgid "Multiple profiles found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2112 msgid "Error deleting replay profile." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2352 msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2376 msgid "Error retrieving volume size" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2428 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2464 msgid "Volume size must multiple of 1 GB." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2433 #, python-format msgid "Volume is attached to a server. (%s)" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2439 #, python-format msgid "Unable to manage volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2443 #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2468 msgid "Volume not unique." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2492 #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2527 msgid "Failed to find QoSnode" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:101 msgid "" "Dell Cinder driver configuration error replication not supported with " "direct connect." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:116 #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:187 #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:237 #, python-format msgid "Unable to create volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:255 msgid "Unable to create volume. Backend down." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:351 #, python-format msgid "Failed to create snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:392 #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:417 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:478 #, python-format msgid "Failed to create volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:454 #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:498 #, python-format msgid "Failed to delete snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:528 #, python-format msgid "Unable to find volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:552 #, python-format msgid "Unable to extend volume %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:657 #, python-format msgid "Unable to create consistency group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:722 #, python-format msgid "Unable to update consistency group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:758 #, python-format msgid "Unable to snap Consistency Group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:782 #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:834 #: cinder/volume/drivers/dell/dell_storagecenter_common.py:853 #: cinder/volume/drivers/huawei/huawei_driver.py:1337 msgid "Must specify source-name or source-id." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:887 #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1050 msgid "Backend has already been failed over. Unable to fail back." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1088 #, python-format msgid "replication_failover failed. %s not found." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1092 msgid "replication_failover failed. Backend not configured for failover" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1100 msgid "_get_unmanaged_replay: Must specify source-name." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1108 #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1116 #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1155 #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume" " %(vol)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1184 msgid "Volume size must be a multiple of 1 GB." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1203 #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1211 #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:115 msgid "Unable to map volume." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:142 #: cinder/volume/drivers/dell/dell_storagecenter_iscsi.py:171 msgid "Terminate connection failed" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_fc.py:159 msgid "Terminate connection unable to connect to backend." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_iscsi.py:143 msgid "Unable to map volume" msgstr "" #: cinder/volume/drivers/disco/disco.py:123 msgid "Could not find DISCO wsdl file." msgstr "" #: cinder/volume/drivers/disco/disco.py:144 #: cinder/volume/drivers/disco/disco.py:305 #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" #: cinder/volume/drivers/disco/disco.py:164 #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" #: cinder/volume/drivers/disco/disco.py:191 #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" #: cinder/volume/drivers/disco/disco.py:228 #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" #: cinder/volume/drivers/disco/disco.py:251 #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into " "volume [%(vol)s]." msgstr "" #: cinder/volume/drivers/disco/disco.py:276 #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" #: cinder/volume/drivers/disco/disco.py:330 #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "" #: cinder/volume/drivers/disco/disco.py:390 #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" #: cinder/volume/drivers/disco/disco.py:439 #, python-format msgid "Error while getting disco information [%s]." msgstr "" #: cinder/volume/drivers/disco/disco.py:492 #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "" #: cinder/volume/drivers/disco/disco.py:508 #, python-format msgid "Timeout while calling %s " msgstr "" #: cinder/volume/drivers/disco/disco.py:522 #: cinder/volume/drivers/disco/disco.py:539 #, python-format msgid "Unknown operation %s." msgstr "" #: cinder/volume/drivers/disco/disco.py:529 msgid "Call returned a None object" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:99 #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:106 #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:158 #, python-format msgid "%s configuration option is not set." msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:302 #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_common.py:312 #, python-format msgid "Connector does not provide: %s" msgstr "" #: cinder/volume/drivers/dothill/dothill_iscsi.py:82 #, python-format msgid "Invalid IP address format: '%s'" msgstr "" #: cinder/volume/drivers/dothill/dothill_iscsi.py:87 #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:244 msgid "At least one valid iSCSI IP address must be set." msgstr "" #: cinder/volume/drivers/dothill/dothill_iscsi.py:180 msgid "CHAP secret should be 12-16 bytes." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:232 #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:340 #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:458 #, python-format msgid "Error Attaching volume %(vol)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:498 msgid "Unable to get the name of the masking view." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:515 msgid "Unable to get the name of the portgroup." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:520 msgid "Cannot get the portgroup from the masking view." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:557 #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:564 #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: " "%(newSize)s GB. Only Extend is supported. Exiting..." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:603 #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size:" " %(resultSize)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1063 #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST" " policy %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1312 msgid "Cannot connect to ECOM server." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1343 #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1349 #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1461 #, python-format msgid "Error finding %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1574 #: cinder/volume/drivers/emc/emc_vmax_utils.py:2468 #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1667 #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1726 msgid "Unable to get corresponding record for pool." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1742 #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1764 msgid "You must supply an array in your EMC configuration file." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1770 msgid "Cannot get necessary pool or storage system information." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1874 #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1885 #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1967 #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2070 #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2150 #: cinder/volume/drivers/emc/emc_vmax_common.py:2847 #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2311 #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2386 #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2436 #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2478 #: cinder/volume/drivers/emc/emc_vmax_common.py:2582 #: cinder/volume/drivers/emc/emc_vmax_common.py:3875 #, python-format msgid "Cannot find CG group %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2498 #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2652 #, python-format msgid "Cannot find Replication service on system %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2669 #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2718 #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2778 #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2815 #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2877 #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous" " error statement for valid values." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2896 msgid "Cannot determine storage pool settings." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2962 #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform" " extend on concatenated volume. Exiting..." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2997 msgid "Error Creating unbound volume on an Extend operation." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3009 #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3416 #: cinder/volume/drivers/emc/emc_vmax_common.py:3465 #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3541 msgid "Error Creating unbound volume." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3571 #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3579 #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3624 #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source" " name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3966 #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3972 #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4015 #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4034 #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view" " %(mv)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4050 #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume " "of replication session %(sync)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4070 #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is " "not in the pool managed by current cinder host." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4145 #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4208 #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4264 #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4277 #, python-format msgid "Cannot find replication service on system %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4324 #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4356 #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:324 #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:621 msgid "FAST is not supported on this array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:156 #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:162 #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:170 #, python-format msgid "SSL Certificate expired on %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:197 #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:206 #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:216 #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:225 #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:333 #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:337 #, python-format msgid "SSL error: %(arg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:341 #, python-format msgid "Socket error: %(arg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:211 #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:215 msgid "iscsiadm execution failed. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:247 #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:258 #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:169 #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:356 #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:384 #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:407 #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:435 #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:466 #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:498 #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:556 #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:588 #, python-format msgid "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:601 #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:649 #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:872 #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:910 msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:916 #, python-format msgid "Error finding %(name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1065 #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1172 #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1282 msgid "V3 rollback" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1341 #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your " "system administrator to manually return your volume to the default " "storage group for fast policy %(fastPolicyName)s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1509 #: cinder/volume/drivers/emc/emc_vmax_provision.py:171 #: cinder/volume/drivers/emc/emc_vmax_provision.py:212 #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1534 #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1594 #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2116 #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2184 #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2424 #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2458 #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2490 #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:77 #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:78 #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:127 #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:131 #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:293 #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:331 #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:369 #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:411 #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:478 #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:530 #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:572 #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:618 msgid "Failed to terminate migrate session." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:629 msgid "Failed to migrate volume for the second time." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:636 msgid "Failed to migrate volume for the first time." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:663 #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:733 #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source " "Volume:%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:789 #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:830 #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return " "code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:875 #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code:" " %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:927 #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:979 #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s." " Return code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision.py:1037 #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:651 #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:178 #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:196 msgid "Unable to get storage volume from job." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:266 #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:440 #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and " "workloads." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:476 #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:596 #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_provision_v3.py:786 #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:98 #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:129 #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:158 #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:187 #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:215 #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:242 #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:271 #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:336 #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2102 msgid "Issue encountered waiting for job." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:419 msgid "Issue encountered waiting for synchronization." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1458 #, python-format msgid "StorageSystem %(array)s is not found." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1639 #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1666 #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1780 msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2025 msgid "No Port Group elements found in config file." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2076 msgid "" "Multiple SerialNumbers found, when only one was expected for this " "operation. Please change your EMC config file." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2101 #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2114 #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2151 #, python-format msgid "Volume %(deviceID)s not found." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2220 msgid "Source volume device ID is required." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2502 #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:217 msgid "Option map (cls._map) is not defined." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:221 msgid "{} is not a valid option." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:337 msgid "Initial tier: {}, policy: {} is not valid." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:375 msgid "LUN number ({}) is not an integer." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:380 msgid "LUN id({}) is not valid." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:452 msgid "Input type {} is not supported." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:507 msgid "'{}' object has no attribute '{}'" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:697 #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:711 msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:726 #, python-format msgid "Invalid VNX authentication type: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:830 #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:920 #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1161 msgid "Failed to create snapshot as no LUN ID is specified" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1349 #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2181 msgid "MirrorView/S enabler is not installed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2220 #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2269 #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2430 #, python-format msgid "The extra_spec: %s is invalid." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2467 msgid "deduplicated and auto tiering can't be both enabled." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2471 msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2477 msgid "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2483 msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2488 msgid "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2949 #: cinder/volume/drivers/emc/emc_vnx_cli.py:3027 msgid "Unable to enable replication and snapcopy at the same time." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3153 #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group " "cannot accept compressed LUNs as members." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3345 #, python-format msgid "Host %s has no iSCSI initiator" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3356 #, python-format msgid "Host %s has no FC initiators" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3643 #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3659 #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3853 #: cinder/volume/drivers/emc/emc_vnx_cli.py:3951 msgid "Reference must contain source-id or source-name key." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3858 #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed " "by the host %(host)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3910 msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3920 msgid "Storage-assisted migration failed during manage volume." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4002 #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4144 #, python-format msgid "replication_device should be configured on backend: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4155 #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4220 msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4373 #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4380 #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current " "host." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4531 #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4639 #, python-format msgid "Migrate volume %(src)s failed." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4666 #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:174 msgid "Cannot specify both protection domain name and protection domain id." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:179 msgid "REST server IP must by specified." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:183 msgid "REST server username must by specified." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:187 msgid "REST server password must by specified." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:195 msgid "Path to REST server's certificate must be specified." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:199 msgid "Cannot specify both storage pool name and storage pool id." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:204 msgid "Must specify storage pool name or id." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:208 msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:310 msgid "Must specify protection domain name or protection domain id." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:334 #: cinder/volume/drivers/emc/scaleio.py:761 #, python-format msgid "Domain with name %s wasn't found." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:339 #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:366 #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:373 #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:412 #, python-format msgid "Error creating volume: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:426 #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:450 #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:576 #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:667 #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:766 #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:792 #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:799 #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1015 #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1035 msgid "" "manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1069 msgid "Reference must contain source-id." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1075 msgid "Volume must have a volume type" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1091 #, python-format msgid "Error managing volume: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1143 #: cinder/volume/drivers/emc/scaleio.py:1198 #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "" #: cinder/volume/drivers/emc/xtremio.py:112 msgid "can't handle both name and index in req" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:139 #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:251 #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:323 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:127 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:248 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:237 #, python-format msgid "Exception: %s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:163 msgid "Volume by this name already exists" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:175 #, python-format msgid "Bad response from XMS, %s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:219 #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:261 #, python-format msgid "iscsi portal, %s, not found" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:387 #: cinder/volume/drivers/emc/xtremio.py:808 msgid "XtremIO not initialized correctly, no clusters found" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:392 #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:555 msgid "can't find the volume to extend" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:656 msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" #: cinder/volume/drivers/emc/xtremio.py:758 #, python-format msgid "Failed to create IG, %s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:878 msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:926 msgid "Failed to get targets" msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:193 #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, " "eternus_pool: %(eternus_pool)s, Storage Configuration Service not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:233 #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:252 #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:308 msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:322 #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:345 msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:358 #, python-format msgid "" "create_cloned_volume, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:386 msgid "_create_local_cloned_volume, Replication Service not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:400 #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, " "sourcevolumename: %(sourcevolumename)s, source volume instance: " "%(source_volume)s, target volume instance: %(target_volume)s, Return " "code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:507 #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service" " not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:526 #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:562 #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on" " ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:570 #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not " "found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:582 #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:612 #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:678 #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:779 #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:798 #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:813 #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, " "eternus_pool: %(eternus_pool)s, Storage Configuration Service not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:849 #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:952 #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:972 #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: " "%(volmap)s, GetInstance, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1002 #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1023 msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1041 #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect" " to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1063 #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1111 #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!!" " Please edit driver configuration file and correct." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1137 #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: " "%(port)s, user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1161 msgid "_create_volume_name, id_code is None." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1189 #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1228 #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1262 #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1427 #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1470 #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1509 #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1538 #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1549 msgid "_delete_copysession, Cannot find Replication Service" msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1579 #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: " "%(operation)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1606 msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1638 #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1665 #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: " "%(aglist)s, Storage Configuration Service not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1780 #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1805 #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1822 #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1843 #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1903 #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1919 #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service" " not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1960 #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1992 #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, " "EnumerateInstanceNames, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2011 #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2030 #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2046 #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:2067 #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not " "found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:58 #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:60 #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:61 #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:62 #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:67 #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:68 #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:70 #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:71 #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, " "id: %(id)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:73 #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:75 #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:77 #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, " "alias: %(alias)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:79 #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:81 #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:83 #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:85 #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: " "%(ldev)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:87 #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the " "storage resource again. (resource: %(resource)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:89 #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:91 #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:93 #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:95 #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:100 #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: " "%(err)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:102 #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:103 #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:104 #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, " "ret: %(ret)s, stderr: %(err)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:106 msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:107 msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:108 #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. " "(copy method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:110 #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:111 #, python-format msgid "" "The specified operation is not supported. The volume size must be the " "same as the source %(type)s. (volume: %(volume_id)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:113 #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:115 #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:116 #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:117 #, python-format msgid "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:119 #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:121 #, python-format msgid "%(file)s: Permission denied." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:122 msgid "Failed to add the logical device." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:123 #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:124 #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:125 msgid "The host group or iSCSI target could not be added." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:126 #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:127 #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:128 #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:130 msgid "The host group or iSCSI target was not found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:131 #, python-format msgid "The resource %(resource)s was not found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:132 msgid "The IP Address was not found." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:133 #, python-format msgid "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:135 #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:136 #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:137 #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:138 #, python-format msgid "The specified %(desc)s is busy." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:139 #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is " "essential to manage the volume." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:141 msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:143 #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be" " DP-VOL." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:145 #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be " "in multiples of gigabyte." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:147 #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:149 #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_basiclib.py:151 #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:69 #: cinder/volume/drivers/hitachi/hnas_backend.py:115 msgid "Failed to establish SSC connection." msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:73 msgid "HNAS has disconnected SSC" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:108 #: cinder/volume/drivers/hitachi/hnas_nfs.py:92 #, python-format msgid "Can't open config file: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:114 #: cinder/volume/drivers/hitachi/hnas_nfs.py:98 #, python-format msgid "Error parsing config file: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:687 #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:863 msgid "" "Failed to manage existing volume because the pool of the volume type " "chosen does not match the file system passed in the volume reference." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:872 #: cinder/volume/drivers/hitachi/hnas_nfs.py:730 msgid "" "Failed to manage existing volume because the pool of the volume type " "chosen does not match the pool of the host." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:895 msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:942 msgid "" "Volume not found on configured storage backend. If your volume name " "contains \"/\", please rename it and try to manage again." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:272 #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size " "%(snap_size)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:405 #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size " "%(src_vol_size)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:660 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:888 msgid "Volume not found on configured storage backend." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:698 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:928 #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation " "failed: Error msg: %(msg)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:721 msgid "" "Failed to manage existing volume because the pool of the volume type " "chosen does not match the NFS share passed in the volume reference." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:758 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:964 #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:306 #, python-format msgid "%s is not set" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:313 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:355 #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:324 #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to " "upgrade the hpe3parclient." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:341 #: cinder/volume/drivers/hpe/hpe_3par_common.py:370 #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:396 msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip " "install python-3parclient\" to install the hpe3parclient." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:476 #: cinder/volume/drivers/hpe/hpe_3par_common.py:1289 #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:484 #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:648 #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:709 #: cinder/volume/drivers/hpe/hpe_3par_common.py:895 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1093 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1280 #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:742 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1108 #, python-format msgid "Volume type ID '%s' is invalid." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:811 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1180 msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:822 #: cinder/volume/drivers/hpe/hpe_3par_common.py:922 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1207 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1313 #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:830 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1223 #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:886 msgid "Reference must be for an unmanaged virtual volume." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:913 msgid "Reference must be for an unmanaged snapshot." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:950 msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:985 msgid "Reference must contain source-name or source-id." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1565 #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1599 #, python-format msgid "VV Set %s does not exist." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1642 #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1733 #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1749 #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1861 #, python-format msgid "Volume (%s) already exists on array" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2023 #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, " "status=%(status)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2097 msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this " "time. You can try again later." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2120 msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2127 msgid "Volume has children and cannot be deleted!" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2301 #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2469 #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, " "status=%(status)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2493 #, python-format msgid "Volume (%s) already exists on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2625 #: cinder/volume/drivers/hpe/hpe_3par_common.py:2666 #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s," " task-status=%(status)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2689 #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2695 msgid "Cannot retype from one 3PAR array to another." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2701 #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2708 msgid "Cannot retype to a CPG in a different domain." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2712 msgid "Cannot retype to a snap CPG in a different domain." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2934 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1418 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2470 msgid "A valid secondary target MUST be specified in order to failover." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2990 msgid "" "The host is not ready to be failed back. Please resynchronize the volumes" " and resume replication on the 3PAR backends." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3320 msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period " "must also be specified and be between 300 and 31622400 seconds." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3358 #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3378 #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3392 #, python-format msgid "There was an error setting the sync period for the remote copy group: %s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3402 #, python-format msgid "There was an error starting remote copy: %s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3410 #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3472 #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_fc.py:608 #: cinder/volume/drivers/hpe/hpe_3par_iscsi.py:926 #, python-format msgid "Volume %s doesn't exist on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:180 msgid "HPELeftHand url not found" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:260 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:307 msgid "LeftHand cluster not found" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:323 #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s " "or greater required. Run 'pip install --upgrade python-lefthandclient' to" " upgrade the hpelefthandclient." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:361 #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:458 msgid "Creating a consistency group from a source is not currently supported." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1216 #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1237 #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1269 msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1302 msgid "Reference must be the name of an unmanaged snapshot." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1353 msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1393 #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1581 msgid "" "The host is not ready to be failed back. Please resynchronize the volumes" " and resume replication on the LeftHand backends." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1755 #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1776 #, python-format msgid "The retention count must be %s or less." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1797 #, python-format msgid "The remote retention count must be %s or less." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1868 #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" #: cinder/volume/drivers/huawei/fc_zone_helper.py:121 msgid "No FC port connected to fabric." msgstr "" #: cinder/volume/drivers/huawei/fc_zone_helper.py:124 msgid "No initiator connected to fabric." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:93 msgid "RestURL is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:104 msgid "UserName is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:114 msgid "UserPassword is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:124 msgid "SAN product is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:134 msgid "SAN protocol is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:152 #, python-format msgid "Invalid lun type %s is configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:193 #, python-format msgid "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:220 msgid "Storage pool is not configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_conf.py:228 msgid "Invalid storage pool is configured." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:64 msgid "Configuration is not found." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:98 msgid "Get active client failed." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:298 #, python-format msgid "Create volume error. Because %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:342 #: cinder/volume/drivers/huawei/huawei_driver.py:637 msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:433 msgid "Lun migration error." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:438 msgid "Cannot find migration task." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:647 #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:731 #: cinder/volume/drivers/huawei/huawei_driver.py:745 #, python-format msgid "Volume %s does not exist on the array." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:758 #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:766 #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:784 #, python-format msgid "" "New size should be bigger than the real size from backend storage. " "realsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:805 #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:988 #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1000 #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1140 #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1166 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1173 #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1187 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1202 msgid "Failed to get SplitMirror." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1209 msgid "Failed to get target LUN of SplitMirror." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1213 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1228 msgid "Failed to get migration task." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1234 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1242 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1250 #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication " "task." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1257 #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1278 #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1295 #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1343 msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1378 msgid "Volume size must be multiple of 1 GB." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1387 #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or " "running status is not online." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1394 #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1404 msgid "Must specify snapshot source-name or source-id." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1410 msgid "Can't find snapshot on array, please check the source-name or source-id." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1424 #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1453 msgid "Snapshot size must be multiple of 1 GB." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1577 #, python-format msgid "Invalid secondary id %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1855 msgid "No FC initiator can be added to host." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1938 msgid "Can't find the same host id from arrays." msgstr "" #: cinder/volume/drivers/huawei/huawei_utils.py:61 #, python-format msgid "wait_for_condition: %s timed out." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:43 msgid "Remote pool cannot be found." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:70 #, python-format msgid "Create hypermetro error. %s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:127 #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:153 msgid "Can not add FC port to host." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:170 msgid "Map info is None due to array version not supporting hypermetro." msgstr "" #: cinder/volume/drivers/huawei/replication.py:112 #, python-format msgid "Create replication pair failed. Error: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:202 msgid "Split replication failed." msgstr "" #: cinder/volume/drivers/huawei/replication.py:240 msgid "We should not do switch over on primary array." msgstr "" #: cinder/volume/drivers/huawei/replication.py:267 #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "" #: cinder/volume/drivers/huawei/replication.py:480 msgid "Get remote device info failed." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:137 msgid "Failed to login with all rest URLs." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:180 msgid "Logout session error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:184 #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:191 #, python-format msgid "%s \"data\" is not in result." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:203 msgid "Create lun error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:228 msgid "Delete lun error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:233 msgid "Query resource pool error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:268 #, python-format msgid "Can not get pool info. pool: %s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:283 msgid "Get lun id by name error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:291 msgid "Activate snapshot error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:302 msgid "Create snapshot error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:312 #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:332 msgid "Stop snapshot error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:338 msgid "Delete snapshot error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:347 msgid "Get snapshot id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:365 msgid "Create luncopy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:390 msgid "Find portgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:402 msgid "Associate portgroup to mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:409 msgid "Check portgroup associate error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:494 msgid "Get iSCSI target port error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:504 msgid "Get hostgroup information error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:512 msgid "Get lungroup information error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:537 #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:558 msgid "Create hostgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:572 msgid "Create lungroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:581 msgid "Delete lungroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:587 #: cinder/volume/drivers/huawei/rest_client.py:699 msgid "Check lungroup associate error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:597 #: cinder/volume/drivers/huawei/rest_client.py:686 msgid "Check hostgroup associate error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:607 msgid "Find host lun id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:627 msgid "Find host in hostgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:651 #, python-format msgid "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:675 msgid "Add new host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:714 msgid "Associate host to hostgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:724 msgid "Associate lun to lungroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:733 msgid "Delete associated lun from lungroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:740 msgid "Check initiator added to array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:751 msgid "Check initiator associated to host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:767 msgid "Add initiator to array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:778 msgid "Associate initiator to host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:816 msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:839 msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:851 msgid "Use ALUA to associate initiator to host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:861 msgid "Remove CHAP error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:868 msgid "Find mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:877 msgid "Add mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:888 msgid "Associate host to mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:900 msgid "Associate lungroup to mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:910 msgid "Delete lungroup from mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:923 msgid "Delete hostgroup from mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:935 msgid "Delete portgroup from mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:941 msgid "Delete mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:952 msgid "Find lun number error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:962 msgid "Find portgroup from mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:974 msgid "Find lun group from mapping view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:989 msgid "Start LUNcopy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1009 msgid "Get LUNcopy information error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1026 msgid "Delete LUNcopy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1051 msgid "Get connected free FC wwn error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1070 msgid "Add FC port to host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1077 msgid "Get iSCSI port information error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1094 msgid "" "Get iSCSI port info error, please check the target IP configured in " "huawei conf file." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1125 msgid "Get FC target wwpn error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1167 msgid "Get QoS policy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1189 msgid "Update QoS policy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1197 msgid "Get target IP error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1239 #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1308 msgid "Create QoS policy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1318 msgid "Delete QoS policy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1332 msgid "Activate or deactivate QoS error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1338 #: cinder/volume/drivers/huawei/rest_client.py:1530 msgid "Get QoS information error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1362 msgid "Remove lun from QoS error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1374 msgid "Change lun priority error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1385 msgid "Change lun smarttier policy error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1391 msgid "Get QoS id by lun id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1401 msgid "Get lungroup id by lun id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1414 msgid "Get volume error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1424 msgid "Get snapshot error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1436 msgid "Extend volume error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1451 msgid "Create lun migration error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1458 msgid "Get lun migration task error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1464 msgid "Delete lun migration error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1471 msgid "Get partition by name error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1480 msgid "Get partition by partition id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1490 msgid "Add lun to partition error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1497 msgid "Remove lun from partition error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1502 msgid "Get cache by name error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1513 msgid "Get smartcache by cache id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1525 msgid "Remove lun from cache error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1591 msgid "Associate lun to QoS error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1603 msgid "Add lun to cache error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1608 msgid "Get array info error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1618 msgid "Remove host from array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1623 msgid "Delete hostgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1631 msgid "Remove host from hostgroup error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1638 msgid "Remove iscsi from host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1683 msgid "Rename lun on array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1693 msgid "Rename snapshot on array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1715 msgid "Remove fc from host error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1720 msgid "Get host initiators info failed." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1742 msgid "Add fc initiator to array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1754 msgid "Get FC ports from array error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1780 msgid "get_hyper_domain_id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1788 msgid "create_hypermetro_pair error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1797 msgid "delete_hypermetro error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1807 msgid "sync_hypermetro error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1817 msgid "stop_hypermetro error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1824 msgid "get_hypermetro_by_id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1842 msgid "check_hypermetro_exist error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1872 msgid "Change hostlun id error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1880 msgid "Get HyperMetroPair error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1889 #: cinder/volume/drivers/huawei/rest_client.py:1912 msgid "License is unavailable." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1891 msgid "Get SplitMirror error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1900 msgid "Get target LUN of SplitMirror error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1914 msgid "Get migration task error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1922 msgid "Get volume by name error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1939 msgid "Get port groups by port error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1951 msgid "Get views by port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1962 msgid "Get LUN group by view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1973 msgid "Get port group by view error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1983 msgid "Get FC ports by port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1995 msgid "Create port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2006 msgid "Add port to port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2011 msgid "Delete port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2018 msgid "Remove port from port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2024 msgid "Get engines error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2031 msgid "Get port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2043 msgid "Append port group description error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2052 msgid "Get ports by port group error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2061 msgid "Get remote devices error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2068 msgid "Create replication error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2077 msgid "Get pair failed." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2087 msgid "Switch over pair error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2096 msgid "Split pair error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2107 msgid "delete_replication error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2116 msgid "Sync pair error." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:2130 msgid "Set pair secondary access error." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:53 #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:58 msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:67 #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: " "%(qos_policy)s " msgstr "" #: cinder/volume/drivers/huawei/smartx.py:74 #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:83 #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: " "%(qos_policy)s." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:144 #: cinder/volume/drivers/huawei/smartx.py:228 msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:151 #, python-format msgid "Can not find partition id by name %(name)s." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:166 #: cinder/volume/drivers/huawei/smartx.py:217 msgid "Cache name is None, please set smartcache:cachename in key." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:172 #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:193 msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:204 msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:98 #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:119 #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:166 msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:250 #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:287 #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:389 #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:454 msgid "Could not get system name." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:464 msgid "open_access_enabled is not off." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:473 msgid "Unable to parse attributes." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:478 #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:590 #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:690 msgid "_create_host failed to return the host name." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:701 msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the" " VDisk is already mapped to a host." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:802 msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:870 msgid "_update_volume_stats: Could not get storage pool data." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:930 msgid "check_for_setup_error: Unable to determine system name." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:933 msgid "check_for_setup_error: Unable to determine system id." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:945 msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:995 #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:1032 msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based" " volume." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:1039 #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:1060 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2407 msgid "create_cloned_volume: Source and destination size differ." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:86 #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:113 #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:117 msgid "_create_host: No connector ports." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:211 #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:226 msgid "" "_get_vdisk_map_properties: Could not get FC connection information for " "the host-volume connection. Is the host configured properly for FC " "connections?" msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:280 #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:84 #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file." " valid value(s) are %(enabled)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:94 msgid "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:180 #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:237 #, python-format msgid "vdisk %s is not defined." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:305 msgid "No config node found." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:321 #, python-format msgid "State of node is wrong. Current state is %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_iscsi.py:332 msgid "No ip address found." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:156 #, python-format msgid "GPFS is not running, state: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:200 #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:221 #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:336 #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:344 #, python-format msgid "Could not find GPFS file system device: %s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:352 #, python-format msgid "Invalid storage pool %s specificed." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:373 msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:380 msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:386 msgid "Option gpfs_images_dir is not set correctly." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:393 #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:404 #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:413 #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " "cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:427 #, python-format msgid "%s must be an absolute path." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:432 #, python-format msgid "%s is not a directory." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:442 #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current" " level is %(cur)s, must be at least %(min)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1092 #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1125 #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is " "mounted." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1140 #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1150 #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1159 #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1183 #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:1192 #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:125 #, python-format msgid "Unable to create replica clone for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:136 #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:163 #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:192 #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync " "progress is: %(progress)s%%." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:248 #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:331 #, python-format msgid "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:346 #, python-format msgid "Unable to create the relationship for %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:360 #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:371 #, python-format msgid "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:403 #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:409 #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:422 #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because " "the replication relationship is unable to switch: %(error)s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:133 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:375 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:417 #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:151 #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:163 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:429 #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:247 msgid "Must pass wwpn or host to lsfabric." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:269 msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is" " already mapped to a host.\n" "\"" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:548 #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:580 #, python-format msgid "Failed to get code level (%s)." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:602 #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:691 #, python-format msgid "Failed to find host %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:764 msgid "create_host: Host name is not unicode or string." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:775 msgid "create_host: No initiators or wwpns supplied." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:913 msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32," " 64, 128, or 256." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:920 msgid "System does not support compression." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:925 msgid "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:931 #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:938 msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1037 #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1120 msgid "Failed to find a vdisk copy in the expected pool." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1166 #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1176 #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d " "seconds timeout. Terminating." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1256 #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: " "%(attr)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1346 #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1410 #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1536 #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1566 #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy " "would exceed the limit of 2 copies." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1573 msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1677 #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID." " %(count)s were returned." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1732 #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1768 #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1957 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:3159 #, python-format msgid "Failed getting details for pool %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1966 msgid "Unable to determine system name." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1969 msgid "Unable to determine system id." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1974 msgid "do_setup: No configured nodes." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1982 #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device" " to support %(prot)s or switch to a driver using a different protocol." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1997 msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2183 msgid "create_snapshot: get source volume failed." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2197 msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2230 #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB " "and doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2285 msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2303 #, python-format msgid "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2534 msgid "" "The host is not ready to be failed back. Please resynchronize the volumes" " and resume replication on the Storwize backends." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2558 msgid "Issuing a fail-over failed because replication is not properly configured." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2566 #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to " "support replication actions." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2574 msgid "" "The back-end where the volume is created does not have replication " "enabled." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2778 #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed " "when new type is replication. Volume = %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2795 #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2868 msgid "" "Failed to manage existing volume due to the volume to be managed is not " "in a valid I/O group." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2879 msgid "" "Failed to manage existing volume due to the volume to be managed is thin," " but the volume type chosen is thick." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2885 msgid "" "Failed to manage existing volume due to the volume to be managed is " "thick, but the volume type chosen is thin." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2892 msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2899 msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2905 #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group" " of the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen " "type is %(opt_iogrp)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2914 #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be " "managed does not match the backend pool. Pool of the volume to be managed" " is %(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2945 msgid "The specified vdisk is mapped to a host." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:3030 msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or " "a source CG." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:3087 #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:3177 msgid "Reference must contain source-id or source-name element." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:3183 #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:131 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:137 #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:146 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:152 #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:160 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:168 #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:247 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py:238 msgid "terminate_connection: Failed to get host name from connector." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:86 msgid "Failed to create partition." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:87 msgid "Failed to delete partition." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:88 msgid "Failed to set partition." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:91 msgid "Failed to create map." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:95 msgid "Failed to delete map." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:97 msgid "Failed to create snapshot." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:98 msgid "Failed to delete snapshot." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:99 msgid "Failed to create replica." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:100 msgid "Failed to delete replica." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:103 msgid "Failed to create iqn." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:110 msgid "Failed to delete iqn." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:112 msgid "Failed to get lv info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:113 msgid "Failed to get partition info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:114 msgid "Failed to get snapshot info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:115 msgid "Failed to get device info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:116 msgid "Failed to get channel info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:117 msgid "Failed to get map info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:118 msgid "Failed to get network info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:119 msgid "Failed to get license info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:120 msgid "Failed to get replica info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:121 msgid "Failed to get wwn info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:122 msgid "Failed to get iqn info." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:123 msgid "Failed to execute common command." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:190 msgid "san_ip is not set." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:230 msgid "Pools name is not set." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:438 #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:457 #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:551 #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:590 msgid "Failed to create map on mcs, no channel can map." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:652 #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:676 #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:720 msgid "Raid did not have MCS Channel." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:787 msgid "LUN map overflow on every channel." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:819 #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:877 #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:896 #, python-format msgid "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1095 #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1145 #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1151 #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1196 #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1268 #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1391 #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1508 #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1661 #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1706 msgid "Wait replica complete timeout." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1717 #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1739 #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1760 msgid "Specified logical volume does not exist." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1747 msgid "The specified volume is mapped to a host." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1788 msgid "Reference must contain source-id or source-name." msgstr "" #: cinder/volume/drivers/netapp/common.py:72 msgid "Required configuration not found" msgstr "" #: cinder/volume/drivers/netapp/common.py:101 #, python-format msgid "Storage family %s is not supported." msgstr "" #: cinder/volume/drivers/netapp/common.py:107 #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" #: cinder/volume/drivers/netapp/utils.py:78 #, python-format msgid "Configuration value %s is not set." msgstr "" #: cinder/volume/drivers/netapp/utils.py:185 #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "" #: cinder/volume/drivers/netapp/utils.py:190 msgid "Only one limit can be set in a QoS spec." msgstr "" #: cinder/volume/drivers/netapp/utils.py:329 #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not " "allowed in the volume type extra specs." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:108 #: cinder/volume/drivers/netapp/dataontap/nfs_7mode.py:72 msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:112 msgid "API version could not be determined." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:118 #: cinder/volume/drivers/netapp/dataontap/block_cmode.py:87 #: cinder/volume/drivers/netapp/eseries/library.py:229 msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:128 #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:202 msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:423 msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_7mode.py:431 msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:189 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:111 #: cinder/volume/drivers/netapp/eseries/library.py:517 msgid "Pool is not available in the volume host field." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:215 #, python-format msgid "Volume %s could not be created." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:334 #, python-format msgid "Volume %s could not be created from source volume." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:436 msgid "Object is not a NetApp LUN." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:572 #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:578 #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:609 #, python-format msgid "Failure staging LUN %s to tmp." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:614 #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:631 #, python-format msgid "Failure getting LUN info for %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:695 msgid "Reference must contain either source-id or source-name element." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:718 #, python-format msgid "LUN not found with given ref %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:758 #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:767 #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:774 #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:895 #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_cmode.py:150 #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_cmode.py:367 #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN " "volume with ssc features is present on vserver %(vs)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_cmode.py:400 #, python-format msgid "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_7mode.py:76 #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:667 msgid "Data ONTAP API version could not be determined." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_7mode.py:202 msgid "" "Setting file qos policy group is not supported on this storage family and" " ontap version." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_7mode.py:207 msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:134 #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:173 #, python-format msgid "Volume %s could not be created on shares." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:197 #, python-format msgid "Resizing clone %s failed." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:200 #, python-format msgid "NFS file %s not discovered." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:567 msgid "NFS file could not be discovered." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:712 #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:727 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:937 #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:127 #, python-format msgid "Volume type does not match for share %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:460 #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:530 msgid "Source host details not found." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:586 #, python-format msgid "Converted to raw, but format is now %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:521 msgid "Backend server not NaServer." msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:634 msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:467 #, python-format msgid "No element by given name %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:487 msgid "Not a valid value for NaElement." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:491 msgid "NaElement name cannot be null." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:534 msgid "Type cannot be converted into NaElement." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:572 msgid "Requires an NaServer instance." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/api.py:681 #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_7mode.py:161 #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:133 #, python-format msgid "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_7mode.py:367 #, python-format msgid "No storage path found for export path %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_7mode.py:557 #, python-format msgid "No snapshots could be found on volume %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_7mode.py:578 #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:947 #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:310 #: cinder/volume/drivers/netapp/dataontap/performance/perf_base.py:192 #: cinder/volume/drivers/netapp/dataontap/performance/perf_base.py:204 #, python-format msgid "Counter %s not found" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:416 #, python-format msgid "Could not start consistency group snapshot %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:538 #, python-format msgid "No interface found on cluster for ip %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:564 #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path " "%(junction)s " msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:664 msgid "Unsupported Clustered Data ONTAP version." msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_cmode.py:930 #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason:" " %(reason)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:60 msgid "One of the required inputs from host, port or scheme was not found." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:64 msgid "Invalid transport type." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:91 msgid "Invoking web service failed." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:258 msgid "Storage system id not set." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:294 msgid "Content type not supported." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:338 #, python-format msgid "Response error - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:343 #, python-format msgid "" "The storage array password for %s is incorrect, please update the " "configured password." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:347 msgid "Response error - The storage-system is offline." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:349 #, python-format msgid "Response error code - %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:402 #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set " "of SSC extra specs. The proxy version must be at at least " "%(min_version)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:678 #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:705 #, python-format msgid "Host group with ref %s not found" msgstr "" #: cinder/volume/drivers/netapp/eseries/client.py:715 #, python-format msgid "Host group with name %s not found" msgstr "" #: cinder/volume/drivers/netapp/eseries/exception.py:21 #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/exception.py:26 #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group " "%(group)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/host_mapper.py:81 #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the" " 'netapp_enable_multiattach' configuration option." msgstr "" #: cinder/volume/drivers/netapp/eseries/host_mapper.py:106 #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with" " %(group)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/host_mapper.py:135 #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group " "with %(group)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/host_mapper.py:168 msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which" " requires that all LUN IDs to be unique across the entire host group." msgstr "" #: cinder/volume/drivers/netapp/eseries/host_mapper.py:177 #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to " "host (%s) has been exceeded." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:215 msgid "Configured host type is not supported." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:257 #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:357 #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:361 #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:378 msgid "The volume label is required as input." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:391 #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:419 #: cinder/volume/drivers/netapp/eseries/library.py:443 #, python-format msgid "Snapshot with id of %s could not be found." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:450 #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:470 #: cinder/volume/drivers/netapp/eseries/library.py:479 #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:542 #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:578 #, python-format msgid "Pools %s does not exist" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:632 #, python-format msgid "Failure creating volume %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:693 #, python-format msgid "Vol copy job for dest %s failed." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1179 #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1320 #, python-format msgid "No good iscsi portals found for %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1332 #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1368 #, python-format msgid "Host with ports %(ports)s not found." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1389 #, python-format msgid "Host type %s not supported." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2085 msgid "Reference must contain either source-name or source-id element." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2092 msgid "Volume not found on configured storage pools." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:112 #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:119 #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:123 #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "" #: cinder/volume/drivers/nexenta/utils.py:47 #, python-format msgid "Invalid value: \"%s\"" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/iscsi.py:108 msgid "No service VIP configured and no nexenta_client_address" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/jsonrpc.py:58 msgid "Wrong resource call syntax" msgstr "" #: cinder/volume/drivers/nexenta/nexentaedge/jsonrpc.py:99 #, python-format msgid "Error response: %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:119 #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:406 #, python-format msgid "LU does not exist for volume: %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:421 #, python-format msgid "No views found for LUN: %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/nfs.py:102 #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "" #: cinder/volume/drivers/nexenta/ns5/nfs.py:107 #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "" #: cinder/volume/drivers/nexenta/ns5/nfs.py:118 #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:162 #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:57 #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:168 #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:63 #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by" " event id." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:173 #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:192 #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:125 #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:197 #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:130 #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:201 #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:248 msgid "Backend storage did not configure fiber channel target." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:263 #: cinder/volume/drivers/prophetstor/dpl_fc.py:363 #: cinder/volume/drivers/prophetstor/dpl_fc.py:370 #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:313 #, python-format msgid "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:331 #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:389 #, python-format msgid "Faield to unassign %(volume)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:68 #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:105 #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:137 #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:801 #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:806 #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:824 #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:829 #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:846 #, python-format msgid "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:851 #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:879 #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:886 #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:903 #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:939 #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:965 #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:999 #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1011 #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due " "to %(ret)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1024 #: cinder/volume/drivers/prophetstor/dplcommon.py:1105 #: cinder/volume/drivers/prophetstor/dplcommon.py:1195 msgid "Pool is not available in the volume host fields." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1043 #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1048 #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1053 #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1068 #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1087 #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1124 #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1131 #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) " "%(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1136 #: cinder/volume/drivers/prophetstor/dplcommon.py:1179 #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1169 #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1174 #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) " "%(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1215 #: cinder/volume/drivers/prophetstor/dplcommon.py:1225 #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1220 #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1239 #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1272 #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1280 #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1298 #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1304 #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1309 #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1333 #: cinder/volume/drivers/prophetstor/dplcommon.py:1343 #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1338 #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) " "%(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1360 #: cinder/volume/drivers/prophetstor/dplcommon.py:1372 #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1365 #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1480 #: cinder/volume/drivers/prophetstor/dplcommon.py:1492 #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "" #: cinder/volume/drivers/san/san.py:170 msgid "Specify san_password or san_private_key" msgstr "" #: cinder/volume/drivers/san/san.py:174 msgid "san_ip must be set" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:94 msgid "Gateway VIP is not set" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:102 msgid "Failed to connect to array" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:108 msgid "vmemclient python library not found" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:114 msgid "CONCERTO version is not supported" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:221 msgid "Dedup luns cannot be extended" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:539 #, python-format msgid "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:584 #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:612 #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:629 #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:645 #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:91 msgid "No FCP targets found" msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:203 msgid "No initiators found, cannot proceed" msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:225 msgid "LUN export failed!" msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:27 #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:32 #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:37 msgid "There is no virtual disk device." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:42 #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:47 msgid "There are no valid datastores." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:52 #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "" #: cinder/volume/drivers/vmware/exceptions.py:57 msgid "There are no valid ESX hosts." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:203 #, python-format msgid "Invalid disk type: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:258 #, python-format msgid "%s not set." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:297 #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:587 #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:619 #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:657 #, python-format msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:986 #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1081 #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1153 msgid "Upload to glance of attached volume is not supported." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1620 msgid "Volume cannot be restored since it contains snapshots." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1670 msgid "source-name cannot be empty." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1676 msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1682 #, python-format msgid "%s does not exist." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1785 #, python-format msgid "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1910 #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: " "'%(full_clone)s' and '%(linked_clone)s'." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:2012 #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:493 #, python-format msgid "There are no valid datastores attached to %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1416 #, python-format msgid "Invalid disk backing: %s." msgstr "" #: cinder/volume/drivers/windows/remotefs.py:52 msgid "Link path already exists and its not a symlink" msgstr "" #: cinder/volume/drivers/windows/smbfs.py:71 #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only " "Win32 platforms." msgstr "" #: cinder/volume/drivers/windows/smbfs.py:81 #, python-format msgid "File already exists at: %s" msgstr "" #: cinder/volume/drivers/windows/smbfs.py:86 #, python-format msgid "Unsupported volume format: %s " msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:29 msgid "" "Check the state of the http service. Also ensure that the https port " "number is the same as the one specified in cinder.conf." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:34 msgid "User not authorized to perform WebDAV operations." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:37 msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:39 msgid "The source volume for this WebDAV operation not found." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:41 msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "" #: cinder/volume/drivers/zfssa/webdavclient.py:46 msgid "" "http service may have been abruptly disabled or put to maintenance state " "in the middle of this operation." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:239 #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. " "Current value is: %s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:395 #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:607 #, python-format msgid "Cache volume %s does not have required properties" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:624 #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:636 #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:699 #: cinder/volume/drivers/zfssa/zfssanfs.py:463 #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:739 #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1080 #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by " "Cinder. Aborting manage volume. Please add 'cinder_managed' custom schema" " property to the volume and set its value to False. Alternatively, set " "the value of cinder config policy 'zfssa_manage_policy' to 'loose' to " "remove this restriction." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1091 #: cinder/volume/drivers/zfssa/zfssanfs.py:729 #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1099 #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1108 #: cinder/volume/drivers/zfssa/zfssanfs.py:659 #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:1161 #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for " "backend enabled volume migration, procedding with default migration." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:102 #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:129 #, python-format msgid "%s not set in cinder.conf" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:416 #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:701 msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:718 #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by " "Cinder. Aborting manage volume. Please add 'cinder_managed' custom schema" " property to the volume and set its value to False. Alternatively, Set " "the value of cinder config policy 'zfssa_manage_policy' to 'loose' to " "remove this restriction." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:759 msgid "Reference to volume to be managed must contain source-name." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:51 #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:70 #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message:" " %(data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:83 #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:111 #: cinder/volume/drivers/zfssa/zfssarest.py:1130 #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message:" " %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:125 #: cinder/volume/drivers/zfssa/zfssarest.py:1147 #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:138 msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:154 msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:176 #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s " "project %(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:204 #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:229 #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:249 #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:263 #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:279 #, python-format msgid "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:295 msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:328 #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code:" " %(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:350 #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target " "project: %(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:370 #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:391 #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:429 #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:459 #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:483 #: cinder/volume/drivers/zfssa/zfssarest.py:518 #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:498 #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code:" " %(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:557 #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:575 #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:601 #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:621 #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code:" " %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:638 #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:653 #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:670 #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:685 #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:713 #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d" " Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:734 #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:829 #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s." " Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:853 #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s" " Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:876 #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:904 #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s" " Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:934 #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s" " Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:957 #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s" " Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:982 msgid "Error getting initiator groups." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1003 #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1051 #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: " "%(pool)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1075 #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1097 #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1173 #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message:" " %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1198 #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1211 #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1228 #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssarest.py:1296 #, python-format msgid "Cannot create directory %s." msgstr "" #: cinder/volume/flows/api/create_volume.py:98 #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" #: cinder/volume/flows/api/create_volume.py:147 #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" #: cinder/volume/flows/api/create_volume.py:156 #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" #: cinder/volume/flows/api/create_volume.py:165 #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "" #: cinder/volume/flows/api/create_volume.py:205 #: cinder/volume/flows/api/create_volume.py:237 #, python-format msgid "Image %(image_id)s is not active." msgstr "" #: cinder/volume/flows/api/create_volume.py:213 #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" #: cinder/volume/flows/api/create_volume.py:221 #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk " "size %(min_disk)sGB." msgstr "" #: cinder/volume/flows/api/create_volume.py:278 msgid "Metadata property key blank" msgstr "" #: cinder/volume/flows/api/create_volume.py:282 #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "" #: cinder/volume/flows/api/create_volume.py:287 #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "" #: cinder/volume/flows/api/create_volume.py:336 #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "" #: cinder/volume/flows/api/create_volume.py:349 msgid "Volume must be in the same availability zone as the snapshot" msgstr "" #: cinder/volume/flows/api/create_volume.py:358 msgid "Volume must be in the same availability zone as the source volume" msgstr "" #: cinder/volume/flows/api/create_volume.py:444 #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" #: cinder/volume/flows/manager/create_volume.py:218 msgid "Volume create failed while extracting volume ref." msgstr "" #: cinder/volume/flows/manager/create_volume.py:378 #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" #: cinder/volume/flows/manager/create_volume.py:380 #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided " "%(src_type)s %(src_id)s metadata" msgstr "" #: cinder/volume/flows/manager/create_volume.py:753 #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of " "size %(volume_size)dGB." msgstr "" #: cinder/volume/flows/manager/manage_existing.py:49 #: cinder/volume/flows/manager/manage_existing_snapshot.py:114 #, python-format msgid "Volume driver %s not initialized." msgstr "" #: cinder/volume/targets/iscsi.py:318 cinder/volume/targets/scst.py:390 msgid "valid iqn needed for show_target" msgstr "" #: cinder/volume/targets/scst.py:45 msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" #: cinder/volume/targets/scst.py:157 msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "" #: cinder/wsgi/common.py:92 msgid "You must implement __call__" msgstr "" #: cinder/zonemanager/fc_san_lookup_service.py:82 msgid "" "Lookup service not configured. Config option for fc_san_lookup_service " "needs to specify a concrete implementation of the lookup service." msgstr "" #: cinder/zonemanager/fc_zone_manager.py:178 #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:243 #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:94 #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:104 msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:132 #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:212 #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py:237 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:379 #, python-format msgid "Malformed nameserver string: %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:109 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:122 #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:193 #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s " "error=%(err)s)." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:248 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:286 #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:300 #, python-format msgid "Error while checking transaction status: %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:316 #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:344 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py:357 #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:201 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:330 #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:207 #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:240 msgid "Failed to add zoning configuration." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:349 msgid "Failed to update or delete zoning configuration." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:387 #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:427 #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware " "v6.4 or higher" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:395 msgid "Failed to get name server info." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:435 #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:464 #, python-format msgid "Failed to create south bound connector for %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:114 #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:121 #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:849 #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol " "%(protocol)s. Error: %(error)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:129 #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:857 #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:166 #, python-format msgid "Error while creating authentication token: %s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:201 #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:206 #, python-format msgid "Error while authenticating with switch: %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:223 #, python-format msgid "Error while getting session information %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:243 #, python-format msgid "Error while parsing the data: %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:261 #, python-format msgid "Error while getting nvp value: %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:280 #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:314 msgid "Cannot change VF context in the session." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:319 #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable" " VF list %(vf_list)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:326 #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:401 #, python-format msgid "Error while changing VF context %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:438 #, python-format msgid "Error while checking the firmware version %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:476 #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:526 #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:763 #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s" " error msg=%(err_msg)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:577 #, python-format msgid "Exception while forming the zone string: %s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:632 #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:721 #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: " "%(description)s." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:826 msgid "No VF ID is defined in the configuration file." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:833 msgid "VF is not enabled." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:223 #, python-format msgid "Malformed fcns output string: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py:342 #, python-format msgid "Error executing command via ssh: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:195 #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s " "error=%(err)s)." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:243 #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:352 #, python-format msgid "Malformed show fcns database string: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:214 #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:402 msgid "Failed to update or delete zoning configuration" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:457 msgid "Failed to get show fcns database info." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:493 msgid "Failed to access active zoning configuration." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:514 #, python-format msgid "Failed to access zoneset status:%s" msgstr "" cinder-8.0.0/cinder/locale/zh_TW/0000775000567000056710000000000012701406543017664 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000012701406543021451 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/zh_TW/LC_MESSAGES/cinder.po0000664000567000056710000117412512701406257023272 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Jennifer , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 12:46+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder 版本:%(version)s\n" #, python-format msgid " but size is now %d" msgstr "但大小現在為 %d" #, python-format msgid " but size is now %d." msgstr "但是,大小現在卻是 %d。" msgid " or " msgstr "或者" #, python-format msgid "%(attr)s is not set." msgstr "未設定 %(attr)s。" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing 無法管理已連接至主機的磁區。請先斷開此磁區與現有主" "機的連線,然後再匯入" #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "結果:%(res)s。" #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "發生 %(exception)s:%(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s:許可權遭拒。" #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s:失敗,CLI 輸出不符合預期。\n" "指令:%(cmd)s\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "狀態碼:%(_status)s\n" "主體:%(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s,subjectAltName:%(sanList)s。" #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s:正在建立 NetworkPortal:確保 IP %(ip)s 上的埠 %(port)d未在由另" "一個服務使用。" #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s 的字元數目下限必須是 %(min_length)s。" #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s 超過 %(max_length)s 個字元。" #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s:備份 %(bck_id)s,磁區 %(vol_id)s 失敗。備份物件具有非預期的模式。支援" "映像檔或檔案備份,實際模式為%(vol_mode)s。" #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "%(service)s 服務在儲存體軟體驅動裝置 %(host)s 上不是處於 %(status)s" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s 必須 <= %(max_value)d" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s 必須 >= %(min_value)d" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "%(workers)d 的 %(worker_name)s 值無效,必須大於 0。" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "結果中沒有 %s \"data\"。" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "無法存取 %s。請確認 GPFS 在作用中且檔案系統已裝載。" #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "無法使用複製作業來調整 %s 的大小,因為它不包含任何區塊。" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "無法使用複製作業來調整 %s 的大小,因為它是在壓縮磁區上進行管理" #, python-format msgid "%s configuration option is not set." msgstr "未設定 %s 配置選項。" #, python-format msgid "%s does not exist." msgstr "%s 不存在。" #, python-format msgid "%s is not a directory." msgstr "%s 不是目錄。" #, python-format msgid "%s is not a string or unicode" msgstr "%s 不是字串或 Unicode" #, python-format msgid "%s is not installed" msgstr "%s 未安裝" #, python-format msgid "%s is not installed." msgstr "未安裝 %s。" #, python-format msgid "%s is not set" msgstr "未設定 %s" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "%s 未設定,並且是抄寫裝置變成有效所必需的。" #, python-format msgid "%s is not set." msgstr "未設定 %s。" #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s 必須是有效的原始映像檔或 qcow2 映像檔。" #, python-format msgid "%s must be an absolute path." msgstr "%s 必須是絕對路徑。" #, python-format msgid "%s must be an integer." msgstr "%s 必須是整數。" #, python-format msgid "%s not set in cinder.conf" msgstr "%s 未在 cinder.conf 中設定" #, python-format msgid "%s not set." msgstr "未設定 %s。" #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "在配置檔中,'%(prot)s' 不適用於 flashsystem_connection_protocol。有效值為 " "%(enabled)s。" msgid "'active' must be present when writing snap_info." msgstr "寫入 snap_info 時,狀態必須是「作用中」。" msgid "'consistencygroup_id' must be specified" msgstr "必須指定 'consistencygroup_id'" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' 剖析失敗。" msgid "'status' must be specified." msgstr "必須指定 'status'。" msgid "'volume_id' must be specified" msgstr "必須指定 'volume_id'" msgid "'{}' object has no attribute '{}'" msgstr "'{}' 物件不含屬性 '{}'" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(指令:%(cmd)s)(回覆碼:%(exit_code)s)(標準輸出:%(stdout)s)(標準錯" "誤:%(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "找不到 LUN (HLUN)。(LDEV:%(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "已發出並行(可能矛盾)的要求。" #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "找不到可用的 LUN (HLUN)。請新增不同的主機群組。(LDEV:%(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "無法新增主機群組。(埠:%(port)s,名稱:%(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "無法刪除主機群組。(埠:%(port)s,GID:%(gid)s,名稱:%(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "主機群組無效。(主機群組:%(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "無法刪除配對。(P-VOL:%(pvol)s,S-VOL:%(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "無法建立配對。已超出配對數目上限。(複製方法:%(copy_method)s,P-VOL:" "%(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "參數無效。(%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "參數值無效。(%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "找不到儲存區。(儲存區 ID:%(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "Snapshot 狀態無效。(狀態:%(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "必須指定有效的次要目標,才能進行失效接手。" msgid "A volume ID or share was not specified." msgstr "未指定磁區 ID 或共用項目。" #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "磁區狀態無效。(狀態:%(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s 失敗,錯誤字串為 %(err)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "API 版本字串 %(version)s 的格式無效。格式必須是 MajorNum.MinorNum。" msgid "API key is missing for CloudByte driver." msgstr "CloudByte 驅動程式遺漏 API 索引鍵。" #, python-format msgid "API response: %(response)s" msgstr "API 回應:%(response)s" #, python-format msgid "API response: %s" msgstr "API 回應:%s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "此方法不支援 API %(version)s 版。" msgid "API version could not be determined." msgstr "無法判定 API 版本。" msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "即將刪除具有非零配額的子項專案。不應執行此動作" msgid "Access list not available for public volume types." msgstr "存取清單不可用於公用磁區類型。" msgid "Activate or deactivate QoS error." msgstr "啟動或關閉服務品質時發生錯誤。" msgid "Activate snapshot error." msgstr "啟動 Snapshot 時發生錯誤。" msgid "Add FC port to host error." msgstr "將 FC 埠新增至主機時發生錯誤。" msgid "Add fc initiator to array error." msgstr "將 FC 起始器新增至陣列時發生錯誤。" msgid "Add initiator to array error." msgstr "將起始器新增至陣列時發生錯誤。" msgid "Add lun to cache error." msgstr "將 LUN 新增至快取時發生錯誤。" msgid "Add lun to partition error." msgstr "將 LUN 新增至分割區時發生錯誤。" msgid "Add mapping view error." msgstr "新增對映視圖時發生錯誤。" msgid "Add new host error." msgstr "新增主機時發生錯誤。" msgid "Add port to port group error." msgstr "將埠新增至埠群組時發生錯誤。" #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "要管理的所有指定儲存區都不存在。請檢查配置。不存在的儲存區:%s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "API 版本要求必須與 VersionedMethod 物件進行比較。" #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "SheepdogDriver 發生錯誤。(原因:%(reason)s)" msgid "An error has occurred during backup operation" msgstr "執行備份作業期間發生錯誤" #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "嘗試修改 Snapshot '%s' 時發生錯誤。" #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "探查磁區 \"%s\" 時發生錯誤。" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "LUNcopy 作業期間發生錯誤。LUNcopy 名稱:%(luncopyname)s。LUNcopy 狀態 " "(status):%(luncopystatus)s。LUNcopy 狀態 (state):%(luncopystate)s。" #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "讀取磁區 \"%s\" 時發生錯誤。" #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "寫入磁區 \"%s\" 時發生錯誤。" #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "無法新增 iSCSI CHAP 使用者。(使用者名稱:%(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "無法刪除 iSCSI CHAP 使用者。(使用者名稱:%(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "無法新增 iSCSI 目標。(埠:%(port)s,別名:%(alias)s,原因:%(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "無法刪除 iSCSI 目標。(埠:%(port)s,TNO:%(tno)s,別名:%(alias)s)" msgid "An unknown exception occurred." msgstr "發生一個未知例外" msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "不容許具備已限定為某個子專案之記號的使用者來查看其母項的配額。" msgid "Append port group description error." msgstr "附加埠群組說明時發生錯誤。" #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "將區域和配置套用至交換器失敗(錯誤碼 = %(err_code)s,錯誤訊息 = " "%(err_msg)s)。" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "陣列不存在或者已離線。陣列的現行狀態為 %s。" msgid "Associate host to hostgroup error." msgstr "將主機關聯至主機群組時發生錯誤。" msgid "Associate host to mapping view error." msgstr "將主機關聯至對映視圖時發生錯誤。" msgid "Associate initiator to host error." msgstr "將起始器關聯至主機時發生錯誤。" msgid "Associate lun to QoS error." msgstr "將 LUN 與服務品質建立關聯時發生錯誤。" msgid "Associate lun to lungroup error." msgstr "將 LUN 關聯至 LUN 群組時發生錯誤。" msgid "Associate lungroup to mapping view error." msgstr "將 LUN 群組關聯至對映視圖時發生錯誤。" msgid "Associate portgroup to mapping view error." msgstr "將埠群組關聯至對映視圖時發生錯誤。" msgid "At least one valid iSCSI IP address must be set." msgstr "必須至少設定一個有效的 iSCSI IP 位址。" #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "嘗試傳送 %s,但使用的鑑別金鑰無效。" #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到鑑別群組 [%s] 詳細資料。" msgid "Auth user details not found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到鑑別使用者詳細資料。" msgid "Authentication error" msgstr "鑑別錯誤" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "鑑別失敗,請驗證交換器認證,錯誤碼:%s。" msgid "Authorization error" msgstr "授權錯誤" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "可用性區域 '%(s_az)s' 無效。" msgid "Available categories:" msgstr "可用的種類:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "後端服務品質規格在此儲存體系列和 ONTAP 版本上不受支援。" #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "後端不存在 (%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "後端已經失效接手。無法失效回復。" #, python-format msgid "Backend reports: %(message)s" msgstr "後端報告:%(message)s" msgid "Backend reports: item already exists" msgstr "後端報告:項目已存在" msgid "Backend reports: item not found" msgstr "後端報告:找不到項目" msgid "Backend server not NaServer." msgstr "後端伺服器不是 NaServer。" #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "後端服務重試逾時命中:%(timeout)s 秒" msgid "Backend storage did not configure fiber channel target." msgstr "後端儲存體未配置光纖通道目標。" msgid "Backing up an in-use volume must use the force flag." msgstr "備份使用中的磁區必須使用強制旗標。" #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "找不到備份 %(backup_id)s。" msgid "Backup RBD operation failed" msgstr "執行備份 RBD 作業時失敗" msgid "Backup already exists in database." msgstr "備份已經存在於資料庫中。" #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "備份驅動程式報告了錯誤:%(message)s" msgid "Backup id required" msgstr "需要備份 ID" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "具有 Snapshot 的 GlusterFS 磁區不支援備份。" msgid "Backup is only supported for SOFS volumes without backing file." msgstr "僅支援備份沒有支援檔案的 SOFS 磁區。" msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "原始格式的 GlusterFS 磁區僅支援備份。" msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "僅支援備份原始格式的 SOFS 磁區。" msgid "Backup operation of an encrypted volume failed." msgstr "已加密磁區的備份作業失敗。" #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "備份服務 %(configured_service)s 不支援驗證。未驗證備份 ID%(id)s。正在跳過驗" "證。" #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "備份服務 %(service)s 不支援驗證。未驗證備份 ID %(id)s。正在跳過重設。" #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "備份應該只有一個 Snapshot,但具有 %s 個" msgid "Backup status must be available" msgstr "備份狀態必須為可用" #, python-format msgid "Backup status must be available and not %s." msgstr "備份狀態必須為可用,且不是 %s。" msgid "Backup status must be available or error" msgstr "備份狀態必須為可用或者錯誤" msgid "Backup to be restored has invalid size" msgstr "要還原的備份大小無效" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "傳回了不當的狀態行:%(arg)s。" #, python-format msgid "Bad key(s) in quota set: %s" msgstr "配額集中的索引鍵錯誤:%s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "來自儲存磁區後端 API 的回應錯誤或不符合預期:%(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "專案格式不當:專案未採取適當格式 (%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "傳送至 Datera 叢集的要求不當:無效的引數:%(args)s | %(message)s" msgid "Bad response from Datera API" msgstr "Datera API 傳回錯誤的回應" msgid "Bad response from SolidFire API" msgstr "SolidFire API 傳回錯誤的回應" #, python-format msgid "Bad response from XMS, %s" msgstr "XMS 傳回錯誤的回應,%s" msgid "Binary" msgstr "二進位" msgid "Blank components" msgstr "空白元件" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Blockbridge API 鑑別方法(記號或密碼)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Blockbridge API 密碼(適用於鑑別方法「密碼」)" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Blockbridge API 記號(適用於鑑別方法「記號」)" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Blockbridge API 使用者(適用於鑑別方法「密碼」)" msgid "Blockbridge api host not configured" msgstr "未配置 Blockbridge API 主機" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "已使用無效的鑑別方法 '%(auth_scheme)s' 配置了 Blockbridge" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge 預設儲存區不存在" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "未配置 Blockbridge 密碼(鑑別方法「密碼」所需的項目)" msgid "Blockbridge pools not configured" msgstr "未配置 Blockbridge 儲存區" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "未配置 Blockbridge 記號(鑑別方法「記號」所需的項目)" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "未配置 Blockbridge 使用者(鑑別方法「密碼」所需的項目)" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Brocade 光纖通道分區 CLI 錯誤:%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Brocade 光纖通道分區 HTTP 錯誤:%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 密碼應該是 12-16 位元組。" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 異常狀況輸出:\n" " 指令:%(cmd)s\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 異常狀況輸出:\n" " 指令:%(cmd)s\n" " 標準輸出:%(out)s\n" "標準錯誤:%(err)s。" msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E 未建立 VDisk 至主機對映,因為該 VDisk 已經對映至某個主機。\n" "\"" msgid "CONCERTO version is not supported" msgstr "CONCERTO 版本不受支援" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "陣列上不存在 CPG (%s)" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "快取名稱為「無」,請在索引鍵中設定 smartcache:cachename。" #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "快取磁區 %(cache_vol)s 沒有 Snapshot %(cache_snap)s。" #, python-format msgid "Cache volume %s does not have required properties" msgstr "快取磁區 %s 沒有必需的內容" msgid "Call returned a None object" msgstr "呼叫傳回了 None 物件。" msgid "Can not add FC port to host." msgstr "無法將 FC 埠新增至主機。" #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "依快取名稱 %(name)s 找不到快取 ID。" #, python-format msgid "Can not find partition id by name %(name)s." msgstr "依名稱 %(name)s 找不到分割區 ID。" #, python-format msgid "Can not get pool info. pool: %s" msgstr "無法取得儲存區資訊。儲存區:%s" #, python-format msgid "Can not translate %s to integer." msgstr "無法將 %s 轉換為整數。" #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "無法存取 'scality_sofs_config':%s" msgid "Can't attach snapshot." msgstr "無法連接 Snapshot。" msgid "Can't decode backup record." msgstr "無法將備份記錄解碼。" #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "無法延伸抄寫磁區,磁區:%(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "在陣列上找不到 LUN,請檢查 source-name 或 source-id。" #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "在陣列上找不到快取名稱,快取名稱為:%(name)s。" #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "從資料庫中找不到 LUN ID,磁區:%(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "在陣列上找不到 LUN 資訊,磁區:%(id)s,LUN 名稱:%(name)s。" #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "在陣列上找不到分割區名稱,分割區名稱為:%(name)s。" #, python-format msgid "Can't find service: %s" msgstr "找不到服務:%s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "在陣列上找不到 Snapshot,請檢查 source-name 或 source-id。" msgid "Can't find the same host id from arrays." msgstr "從陣列中找不到相同的主機 ID。" #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "無法從 Snapshot 取得磁區 ID,Snapshot:%(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "無法取得磁區 ID。磁區名稱:%s。" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "無法將 LUN %(lun_id)s 匯入 Cinder。LUN 類型不符。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 HyperMetroPair 中。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 LUN 複製作業中。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於某個 LUN 群組中。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於某個 LUN 鏡映中。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 SplitMirror 中。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於移轉作業中。" #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於遠端抄寫作業中。" #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "無法將 LUN %s 匯入 Cinder。LUN 未處於正常狀態。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "無法將 Snapshot %s 匯入 Cinder。Snapshot 不屬於磁區。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "無法將 Snapshot %s 匯入 Cinder。已將 Snapshot 向起始器公開。" #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "無法將 Snapshot %s 匯入 Cinder。Snapshot 未處於正常狀態,或者執行中狀態不在線" "上。" #, python-format msgid "Can't open config file: %s" msgstr "無法開啟配置檔 %s" msgid "Can't parse backup record." msgstr "無法剖析備份記錄。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區沒有磁區類" "型。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區已經位於一" "致性群組 %(orig_group)s 中。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為找不到該磁區。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區不存在。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區處於無效的狀" "態:%(status)s。有效的狀態為:%(valid)s。" #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為此群組不支援磁區" "類型 %(volume_type)s。" #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "無法連接已經連接的磁區 %s;已透過'netapp_enable_multiattach' 配置選項停用了多" "重連接。" msgid "Cannot change VF context in the session." msgstr "無法變更階段作業中的 VF 環境定義。" #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "無法變更 VF 環境定義,在可管理的 VF 清單 %(vf_list)s 中無法使用指定的 VF。" msgid "Cannot connect to ECOM server." msgstr "無法連接至 ECOM 伺服器。" #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "無法從大小為 %(src_vol_size)s 的磁區建立大小為 %(vol_size)s 的複本" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "無法建立一致性群組 %(group)s,因為 Snapshot %(snap)s 不是處於有效的狀態。有效" "的狀態為:%(valid)s。" #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "無法建立一致性群組 %(group)s,因為來源磁區%(source_vol)s 未處於有效狀態。有效" "狀態為:%(valid)s。" #, python-format msgid "Cannot create directory %s." msgstr "無法建立目錄 %s。" msgid "Cannot create encryption specs. Volume type in use." msgstr "無法建立加密規格。磁區類型在使用中。" #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "無法建立磁碟格式為 %s 的映像檔。僅接受 VMDK 磁碟格式。" #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "無法建立遮罩視圖:%(maskingViewName)s。" #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "在下列情況下,無法在 E 系列陣列上建立超過 %(req)s 個磁" "區:'netapp_enable_multiattach' 設定為 true。" #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "無法建立或找不到名稱為 %(sgGroupName)s 的儲存體群組。" #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "無法從大小為 %(snap_size)s 的 Snapshot 建立大小為 %(vol_size)s 的磁區" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "無法建立大小為 %s 的磁區:不是 8 GB 的倍數。" #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "無法使用名稱 %(name)s 及規格 %(extra_specs)s 來建立 volume_type" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "存在 Snapshot 時,無法刪除 LUN %s。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "無法刪除快取磁區:%(cachevol_name)s。該快取磁區已在 %(updated_at)s 得到更新," "且目前具有 %(numclones)d 個磁區實例。" #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "無法刪除快取磁區:%(cachevol_name)s。該快取磁區已在 %(updated_at)s 得到更新," "且目前具有 %(numclones)s 個磁區實例。" msgid "Cannot delete encryption specs. Volume type in use." msgstr "無法刪除加密規格。磁區類型在使用中。" msgid "Cannot determine storage pool settings." msgstr "無法判定儲存區設定。" msgid "Cannot execute /sbin/mount.sofs" msgstr "無法執行 /sbin/mount.sofs" #, python-format msgid "Cannot find CG group %s." msgstr "找不到 CG 群組 %s。" #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "找不到下列儲存體系統的「控制器配置服務」:%(storage_system)s。" #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "找不到「抄寫服務」,無法建立 Snapshot %s 的磁區。" #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "找不到「抄寫服務」以刪除 Snapshot %s。" #, python-format msgid "Cannot find Replication service on system %s." msgstr "在系統 %s 上找不到「抄寫服務」。" #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "找不到磁區:%(id)s。取消管理作業。正在結束..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "找不到磁區:%(volumename)s。「延伸」作業。正在結束..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "找不到磁區 %(volumeName)s 的裝置號碼。" msgid "Cannot find migration task." msgstr "找不到移轉作業。" #, python-format msgid "Cannot find replication service on system %s." msgstr "在系統 %s 上找不到抄寫服務。" #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "找不到來源 CG 實例。consistencygroup_id:%s。" #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "無法依下列通道 ID 取得 mcs_id:%(channel_id)s。" msgid "Cannot get necessary pool or storage system information." msgstr "無法取得必要的儲存區或儲存體系統資訊。" #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "無法取得或建立下列磁區的儲存體群組 %(sgGroupName)s:%(volumeName)s " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "無法取得或建立起始器群組:%(igGroupName)s。" #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "無法取得埠群組:%(pgGroupName)s。" #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "無法從下列遮罩視圖取得儲存體群組 %(sgGroupName)s:" "%(maskingViewInstanceName)s。" #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "無法取得 %(sps)s 的受支援大小範圍。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "無法取得 FAST 原則 %(fastPolicyName)s 的預設儲存體群組。" msgid "Cannot get the portgroup from the masking view." msgstr "無法取得遮罩視圖中的埠群組。" msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "無法裝載 Scality SOFS,請檢查系統日誌以找出錯誤" msgid "Cannot ping DRBDmanage backend" msgstr "無法對 DRBDmanage 後端進行連線測試" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "無法將磁區 %(id)s 置於 %(host)s 上" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "無法同時提供 'cgsnapshot_id' 和 'source_cgid',以從來源建立一致性群組 " "%(name)s。" msgid "Cannot register resource" msgstr "無法登錄資源" msgid "Cannot register resources" msgstr "無法登錄資源" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "無法將磁區 %(volume_id)s 從一致性群組 %(group_id)s 中移除,因為該磁區不在此群" "組中。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "無法將磁區 %(volume_id)s 從一致性群組 %(group_id)s 中移除,因為磁區處於無效的" "狀態:%(status)s。有效的狀態為:%(valid)s。" #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "HPE3PARDriver 無法透過執行 Retype 動作變為 %s。" msgid "Cannot retype from one 3PAR array to another." msgstr "一個 3PAR 陣列無法透過執行 Retype 動作變為另一個陣列。" msgid "Cannot retype to a CPG in a different domain." msgstr "無法執行 Retype 動作,以變為不同網域中的 CPG。" msgid "Cannot retype to a snap CPG in a different domain." msgstr "無法執行 Retype 動作,以變為不同網域中的 snapCPG。" msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "無法執行 vgc-cluster 指令,請確保已安裝了軟體,並且已經妥善設定許可權。" msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "無法設定 hitachi_serial_number 及 hitachi_unit_name。" msgid "Cannot specify both protection domain name and protection domain id." msgstr "不能同時指定保護網域名稱及保護網域 ID。" msgid "Cannot specify both storage pool name and storage pool id." msgstr "不能同時指定儲存區名稱及儲存區 ID。" #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "無法更新一致性群組 %(group_id)s,因為未提供有效的名稱、說明、add_volumes 或 " "remove_volumes。" msgid "Cannot update encryption specs. Volume type in use." msgstr "無法更新加密規格。磁區類型在使用中。" #, python-format msgid "Cannot update volume_type %(id)s" msgstr "無法更新 volume_type %(id)s" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "無法驗證物件 %(instanceName)s 是否存在。" msgid "Cascade option is not supported." msgstr "不支援連鎖選項。" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "找不到 CgSnapshot %(cgsnapshot_id)s。" msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost 是空的。將不會建立一致性群組。" msgid "Cgsnapshot status must be available or error" msgstr "CgSnapshot 狀態必須為可用或者錯誤" msgid "Change hostlun id error." msgstr "變更主機 LUN ID 時發生錯誤。" msgid "Change lun priority error." msgstr "變更 LUN 優先順序時發生錯誤。" msgid "Change lun smarttier policy error." msgstr "變更 LUN smarttier 原則時發生錯誤。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "變更會使下列資源的用量小於 0:%(unders)s" msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "請檢查指派給此驅動程式之 ZFS 共用的存取權限。" msgid "Check hostgroup associate error." msgstr "檢查主機群組關聯時發生錯誤。" msgid "Check initiator added to array error." msgstr "檢查已新增至陣列的起始器時發生錯誤。" msgid "Check initiator associated to host error." msgstr "檢查與主機相關聯的起始器時發生錯誤。" msgid "Check lungroup associate error." msgstr "檢查 LUN 群組關聯時發生錯誤。" msgid "Check portgroup associate error." msgstr "檢查埠群組關聯時發生錯誤。" msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "請檢查 HTTP 服務的狀態。同時確保 HTTPS 埠號與 cinder.conf 中指定的埠號相同。" msgid "Chunk size is not multiple of block size for creating hash." msgstr "片段大小不是用於建立雜湊之區塊大小的倍數。" #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Cisco 光纖通道分區 CLI 錯誤:%(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "複製功能未在 %(storageSystem)s 上獲得授權。" #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "複製類型 '%(clone_type)s' 無效;有效值為:'%(full_clone)s' 和 " "'%(linked_clone)s'。" msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "叢集未格式化。您可能應該執行 \"dog cluster format\"。" #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Coho Data Cinder 驅動程式失敗:%(message)s" msgid "Coho rpc port is not configured" msgstr "未配置 Coho RPC 埠" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "指令 %(cmd)s 在 CLI 中遭到封鎖,且已取消" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition:%s 逾時" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition:%s 逾時。" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "未安裝「壓縮啟用程式」。無法建立已壓縮的磁區。" #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "找不到計算叢集:%(cluster)s。" msgid "Condition has no field." msgstr "條件不含任何欄位。" #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "配置 'max_over_subscription_ratio' 無效。必需大於 0:%s" msgid "Configuration error: dell_sc_ssn not set." msgstr "配置錯誤:未設定 dell_sc_ssn。" #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "配置檔 %(configurationFile)s 不存在。" msgid "Configuration is not found." msgstr "找不到配置。" #, python-format msgid "Configuration value %s is not set." msgstr "未設定配置值 %s。" msgid "Configured host type is not supported." msgstr "不支援所配置的主機類型。" #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "磁區類型 %s 中存在衝突的服務品質規格:將服務品質規格關聯至磁區類型時,不容許" "在磁區類型額外規格中使用舊式 \"netapp:qos_policy_group\"。" #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Glance 連線失敗:%(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Swift 連線失敗:%(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "連接器未提供:%s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "連接器沒有必要資訊:%(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "一致性群組 %s 仍包含磁區。需要強制旗標來將其刪除。" #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "一致性群組 %s 仍具有相依 CgSnapshot。" msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "一致性群組是空的。將不建立 CgSnapshot。" #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "一致性群組狀態必須為可用或錯誤,但現行狀態為:%s" #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "一致性群組狀態必須為可用,但是現行狀態為:%s。" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "找不到 ConsistencyGroup %(consistencygroup_id)s。" msgid "Container" msgstr "容器" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "VMDK 驅動程式不支援儲存器格式 %s,僅支援'bare'。" msgid "Container size smaller than required file size." msgstr "儲存器大小小於必要的檔案大小。" msgid "Content type not supported." msgstr "內容類型不受支援。" #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「控制器配置服務」。" #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "無法解析控制器 IP '%(host)s':%(e)s。" #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "已轉換為 %(f1)s,但格式現在為 %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "已轉換為 %(vol_format)s,但格式現在為 %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "已轉換為原始,但格式現在為 %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "已轉換為原始,但格式現在為 %s。" msgid "Coordinator uninitialized." msgstr "協調者未起始設定。" #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "複製磁區作業失敗:convert_to_base_volume:ID = %(id)s、狀態 = %(status)s。" #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "複製磁區作業失敗:create_cloned_volume ID = %(id)s,狀態 = %(status)s。" #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "正在將 meta 資料從 %(src_type)s %(src_id)s 複製到 %(vol_id)s。" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "無法判定要使用的 Keystone 端點。這可以在服務型錄中進行設定,也可以使用 " "cinder.conf 配置選項 'backup_swift_auth_url' 進行設定。" msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "無法判定要使用的 Swift 端點。這可以在服務型錄中進行設定,也可以使用 cinder." "conf 配置選項 'backup_swift_url' 進行設定。" msgid "Could not find DISCO wsdl file." msgstr "找不到 DISCO WSDL 檔。" #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "找不到 GPFS 叢集 ID:%s。" #, python-format msgid "Could not find GPFS file system device: %s." msgstr "找不到 GPFS 檔案系統裝置:%s。" #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "找不到類型為 %(type_id)s 之磁區 %(volume_id)s 的主機。" #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 處找不到配置" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "找不到磁區 %(volumeName)s 的 iSCSI 匯出。" #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "找不到磁區 %s 的 iSCSI 匯出" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "找不到磁區 %(volume_id)s 的 iSCSI 目標。" #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "在指令 %(cmd)s 的輸出 %(out)s 中找不到索引鍵。" #, python-format msgid "Could not find parameter %(param)s" msgstr "找不到參數 %(param)s" #, python-format msgid "Could not find target %s" msgstr "找不到目標 %s" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "在陣列上,找不到 Snapshot '%s' 的母項磁區。" #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "在磁區 %(vol)s 上找不到唯一的 Snapshot %(snap)s。" msgid "Could not get system name." msgstr "無法取得系統名稱。" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "無法從 %(path)s 載入 paste 應用程式 '%(name)s'" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "無法讀取 %s。正在使用 sudo 來重新執行" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "無法讀取 Snapshot %(name)s 的資訊。程式碼:%(code)s。原因:%(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "無法還原配置檔 %(file_path)s:%(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "無法將配置儲存至 %(file_path)s:%(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "無法啟動一致性群組 Snapshot %s。" #, python-format msgid "Counter %s not found" msgstr "找不到計數器 %s" msgid "Create QoS policy error." msgstr "建立服務品質原則時發生錯誤。" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "已中止建立備份,預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "已中斷建立備份,預期磁區狀態 %(expected_status)s,但取得 %(actual_status)s。" msgid "Create consistency group failed." msgstr "建立一致性群組失敗。" #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "不支援從映像檔 %(image)s 建立類型為 %(type)s 的已加密磁區。" msgid "Create export for volume failed." msgstr "針對磁區建立匯出失敗。" msgid "Create hostgroup error." msgstr "建立主機群組時發生錯誤。" #, python-format msgid "Create hypermetro error. %s." msgstr "建立 hypermetro 時發生錯誤。%s。" msgid "Create lun error." msgstr "建立 LUN 時發生錯誤。" msgid "Create lun migration error." msgstr "建立 LUN 移轉時發生錯誤。" msgid "Create luncopy error." msgstr "建立 LUNcopy 時發生錯誤。" msgid "Create lungroup error." msgstr "建立 LUN 群組時發生錯誤。" msgid "Create manager volume flow failed." msgstr "建立管理程式磁區流程失敗。" msgid "Create port group error." msgstr "建立埠群組時發生錯誤。" msgid "Create replication error." msgstr "建立抄寫時發生錯誤。" #, python-format msgid "Create replication pair failed. Error: %s." msgstr "建立抄寫配對失敗。錯誤:%s。" msgid "Create snapshot error." msgstr "建立 Snapshot 時發生錯誤。" #, python-format msgid "Create volume error. Because %s." msgstr "建立磁區時發生錯誤。因為 %s。" msgid "Create volume failed." msgstr "建立磁區失敗。" msgid "Creating a consistency group from a source is not currently supported." msgstr "目前,不支援從來源建立一致性群組。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "建立及啟動區域集時失敗:(區域集 = %(cfg_name)s 錯誤 = %(err)s)。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "建立及啟動區域集時失敗:(區域集 = %(zoneset)s 錯誤 = %(err)s)。" #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "正在建立從 %(begin_period)s 至 %(end_period)s 的使用情形" msgid "Current host isn't part of HGST domain." msgstr "現行主機不是 HGST 網域的一部分。" #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "現行主機不適用於類型為 %(type)s 的磁區 %(id)s,不容許移轉" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "磁區 %(vol)s 目前對映的主機位於不受支援的主機群組%(group)s 中。" msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "已淘汰:部署 Cinder API 的第 1 版。" msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "已淘汰:部署 Cinder API 的第 2 版。" #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage 驅動程式錯誤:應答中沒有預期的索引鍵 \"%s\",DRBDmanage 版本是否錯" "誤?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage 驅動程式設定錯誤:找不到部分必要程式庫(dbus、drbdmanage.*)。" #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage 預期一個資源 (\"%(res)s\"),但卻取得 %(n)d" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "在 Snapshot 還原之後等待新磁區時,DRBDmanage 逾時;資源 \"%(res)s\",磁區 " "\"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "等待建立 Snapshot 時,DRBDmanage 逾時;資源 \"%(res)s\",Snapshot \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "等待建立磁區時,DRBDmanage 逾時;資源 \"%(res)s\",磁區 \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "等待磁區大小時,DRBDmanage 逾時;磁區 ID \"%(id)s\"(資源 \"%(res)s\",VNR " "%(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "無法判定資料 ONTAP API 版本。" msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "在 7 模式下運作的資料 ONTAP 不支援服務品質原則群組。" msgid "Database schema downgrade is not allowed." msgstr "不容許將資料庫綱目降級。" #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "資料集 %s 未在「Nexenta 儲存庫」軟體驅動裝置中共用" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "在 Nexenta SA 中,找不到資料集群組 %s" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup 是有效的供應類型,但需要 WSAPI '%(dedup_version)s' 版,已安裝 " "'%(version)s' 版。" msgid "Dedup luns cannot be extended" msgstr "無法延伸 Dedup LUN" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "未安裝「刪除重複啟用程式」。無法建立已刪除的重複磁區" msgid "Default pool name if unspecified." msgstr "如果未指定,則將使用預設儲存區名稱。" #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "資源 %(res)s 的預設配額是由預設配額旗標quota_%(res)s 所設定,但它現已淘汰。請" "將預設配額類別用於預設配額。" msgid "Default volume type can not be found." msgstr "找不到預設磁區類型。" msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "定義公開儲存區集及其相關聯的後端查詢字串" msgid "Delete LUNcopy error." msgstr "刪除 LUNcopy 時發生錯誤。" msgid "Delete QoS policy error." msgstr "刪除服務品質原則時發生錯誤。" msgid "Delete associated lun from lungroup error." msgstr "從 LUN 群組中刪除相關聯的 LUN 時發生錯誤。" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "已中止刪除備份,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" "用的備份服務 [%(backup_service)s]。" msgid "Delete consistency group failed." msgstr "刪除一致性群組失敗。" msgid "Delete hostgroup error." msgstr "刪除主機群組時發生錯誤。" msgid "Delete hostgroup from mapping view error." msgstr "從對映視圖中刪除主機群組時發生錯誤。" msgid "Delete lun error." msgstr "刪除 LUN 時發生錯誤。" msgid "Delete lun migration error." msgstr "刪除 LUN 移轉時發生錯誤。" msgid "Delete lungroup error." msgstr "刪除 LUN 群組時發生錯誤。" msgid "Delete lungroup from mapping view error." msgstr "從對映視圖中刪除 LUN 群組時發生錯誤。" msgid "Delete mapping view error." msgstr "刪除對映視圖時發生錯誤。" msgid "Delete port group error." msgstr "刪除埠群組時發生錯誤。" msgid "Delete portgroup from mapping view error." msgstr "從對映視圖中刪除埠群組時發生錯誤。" msgid "Delete snapshot error." msgstr "刪除 Snapshot 時發生錯誤。" #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "狀態 %s 不支援刪除磁區的 Snapshot。" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup 已中止,預期備份狀態 %(expected_status)s,但取得 " "%(actual_status)s。" msgid "Deleting volume from database and skipping rpc." msgstr "正在從資料庫刪除磁區並跳過 RPC。" #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "刪除區域時失敗:(指令 = %(cmd)s 錯誤 = %(err)s)。" msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "一致性群組支援需要 Dell API 2.1 或更新版本" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "Dell Cinder 驅動程式配置錯誤,直接連接不支援抄寫。" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "Dell Cinder 驅動程式配置錯誤,找不到 replication_device %s。" msgid "Deploy v3 of the Cinder API." msgstr "部署 Cinder API 的第 3 版。" msgid "Describe-resource is admin only functionality" msgstr "Describe-resource 是管理者專用功能" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "目的地具有 migration_status %(stat)s,預期狀態為 %(exp)s。" msgid "Destination host must be different than the current host." msgstr "目的地主機必須不同於現行主機。" msgid "Destination volume not mid-migration." msgstr "移轉期間找不到目的地磁區。" msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "分離磁區失敗:存在多個連接,但卻未提供attachment_id。" msgid "Detach volume from instance and then try again." msgstr "請從實例分離磁區,然後再試一次。" #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "已偵測到多個名稱為 %(vol_name)s 的磁區" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "在 %(fun)s 中找不到預期的直欄:%(hdr)s。" #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "在 %(fun)s 中找不到預期的索引鍵 %(key)s:%(raw)s。" msgid "Disabled reason contains invalid characters or is too long" msgstr "停用原因包含無效的字元,或者太長" #, python-format msgid "Domain with name %s wasn't found." msgstr "找不到名稱為 %s 的網域。" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "偵測到舊版 GPFS 叢集。「GPFS 副本」特性未在叢集常駐程式層次 %(cur)s 啟用 - 必" "須至少是層次 %(min)s。" #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "驅動程式起始設定連線失敗(錯誤:%(err)s)。" msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "驅動程式無法執行 Retype 動作,因為磁區 (LUN {}) 具有已禁止移轉的 Snapshot。" msgid "Driver must implement initialize_connection" msgstr "驅動程式必須實作 initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "驅動程式已順利將匯入的備份資料解碼,但是有遺漏的欄位 (%s)。" #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E 系列 Proxy API 版本 %(current_version)s 不支援完整的SSC 額外規格集。Proxy " "版本必須至少是 %(min_version)s。" #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "EMC VNX Cinder 驅動程式 CLI 異常狀況:%(cmd)s(回覆碼:%(rc)s)(輸出:" "%(out)s)。" #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "EMC VNX Cinder 驅動程式 SPUnavailableException:%(cmd)s(回覆碼:%(rc)s)(輸" "出:%(out)s)。" msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp、EcomServerPort、EcomUserName、EcomPassword 必須具有有效的值。" #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "必須提供 'cgsnapshot_id' 或 'source_cgid',才能從來源建立一致性群組 " "%(name)s。" #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s 或工作量 %(workload)s 無效。請檢查前一個錯誤陳述式以取得有效的" "值。" msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "需要 hitachi_serial_number 或 hitachi_unit_name。" #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「元素組合服務」。" msgid "Enables QoS." msgstr "啟用服務品質。" msgid "Enables compression." msgstr "啟用壓縮。" msgid "Enables replication." msgstr "啟用抄寫。" msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "請確保 configfs 已裝載於 /sys/kernel/config 中。" #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "在下列 groupInitiatorGroup 上新增起始器 %(initiator)s 時發生錯誤:" "%(initiatorgroup)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "新增至帶 IQN %(iqn)s 的目標群組 %(targetgroup)s 時發生錯誤。回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "Error Attaching volume %(vol)s." msgstr "連接磁區 %(vol)s 時發生錯誤。" #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "在儲存區 %(pool)s 的磁區 %(lun)s 上複製 Snapshot:%(snapshot)s 時發生錯誤專" "案:%(project)s,副本專案:%(clone_proj)s,回覆碼:%(ret.status)d。訊息:" "%(ret.data)s。" #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "建立副本磁區 %(cloneName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "建立副本磁區時發生錯誤:磁區:%(cloneName)s 來源磁區:%(sourceName)s。回覆" "碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "建立群組 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "建立遮罩視圖 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "建立磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "建立磁區 %(volumename)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "建立群組抄本時發生錯誤:來源:%(source)s 目標:%(target)s。回覆碼:%(rc)lu。" "錯誤:%(error)s。" #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "在別名 %(alias)s 上建立起始器 %(initiator)s 時發生錯誤。回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "在儲存區 %(pool)s 上建立專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" "訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "建立內容 %(property)s 類型 %(type)s 說明%(description)s 時發生錯誤。回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "建立共用 %(name)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在儲存區 %(pool)s 的磁區 %(lun)s 上建立 Snapshot:%(snapshot)s 時發生錯誤專" "案:%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在下列儲存區的共用 %(share)s 上建立 Snapshot %(snapshot)s 時發生錯誤:" "%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d 訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "建立目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "建立帶 IQN %(iqn)s 的目標群組 %(targetgroup)s 時發生錯誤。回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "建立磁區 %(lun)s 時發生錯誤。大小:%(size)s。回覆碼:%(ret.status)d。訊息:" "%(ret.data)s。" #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "建立新的複合磁區時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "針對目標 %(tgt)s 和儲存區 %(tgt_pool)s,在儲存區 %(pool)s 專案 %(proj)s磁區 " "%(vol)s 上建立抄寫動作時發生錯誤。回覆碼:%(ret.status)d。訊息:" "%(ret.data)s。" msgid "Error Creating unbound volume on an Extend operation." msgstr "在「延伸」作業上建立未連結的磁區時發生錯誤。" msgid "Error Creating unbound volume." msgstr "建立未連結的磁區時發生錯誤。" #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "刪除磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "刪除群組 %(storageGroupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "刪除起始器群組 %(initiatorGroupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:" "%(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在下列儲存區的共用 %(share)s 上刪除 Snapshot %(snapshot)s 時發生錯誤:" "%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在下列儲存區的磁區 %(lun)s 上刪除 Snapshot:%(snapshot)s 時發生錯誤:" "%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d 訊息:%(ret.data)s。" #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "從儲存區 %(pool)s 刪除磁區 %(lun)s 時發生錯誤,專案 %(project)s。回覆碼:" "%(ret.status)d,訊息:%(ret.data)s。" #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "刪除儲存區 %(pool)s 上的專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" "訊息:%(ret.data)s。" #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "刪除抄寫動作 %(id)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "延伸磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "取得起始器時發生錯誤:InitiatorGroup:%(initiatorgroup)s 回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "取得儲存區統計資料時發生錯誤:儲存區:%(pool)s,回覆碼:%(status)d,訊息:" "%(data)s。" #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "取得專案統計資料時發生錯誤:儲存區:%(pool)s,專案:%(project)s,回覆碼:" "%(ret.status)d,訊息:%(ret.data)s。" #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "取得儲存區 %(pool)s 上的共用 %(share)s 時發生錯誤。專案:%(project)s回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在儲存區 %(pool)s 的磁區 %(lun)s 上取得 Snapshot:%(snapshot)s 時發生錯誤專" "案:%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "取得目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "取得儲存區 %(pool)s 上的磁區 %(lun)s 時發生錯誤。專案:%(project)s。回覆碼:" "%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "將磁區從一個儲存區移轉至另一個儲存區時發生錯誤。回覆碼:%(rc)lu。錯誤:" "%(error)s。" #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "修改遮罩視圖 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "錯誤的儲存區所有權:儲存區 %(pool)s 不歸 %(host)s 擁有。" #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在儲存區 %(pool)s 的磁區 %(lun)s 上設定內容 Props:%(props)s 時發生錯誤專案:" "%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "終止移轉階段作業時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "驗證起始器 %(iqn)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "驗證儲存區 %(pool)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "在儲存區 %(pool)s 上驗證專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" "訊息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "驗證服務 %(service)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "驗證目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "在專案 %(project)s 及儲存區 %(pool)s 上驗證共用 %(share)s 時發生錯誤。回覆" "碼:%(ret.status)d,訊息:%(ret.data)s。" #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "使用下列實例路徑新增磁區 %(volumeName)s 時發生錯誤:%(volumeInstancePath)s。" #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "將起始器新增至群組 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:" "%(error)s。" #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "將磁區新增至複合磁區時發生錯誤。錯誤:%(error)s。" #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "將磁區 %(volumename)s 附加至目標基本磁區時發生錯誤。" #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "將儲存體群組 %(storageGroupName)s 與下列 FAST 原則建立關聯時發生錯誤:" "%(fastPolicyName)s,錯誤說明:%(errordesc)s。" #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "連接磁區 %s 時發生錯誤。可能已達到目標限制!" #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "岔斷複製關係時發生錯誤:同步名稱:%(syncName)s 回覆碼:%(rc)lu。錯誤:" "%(error)s。" msgid "Error connecting to ceph cluster." msgstr "連接至 ceph 叢集時發生錯誤。" #, python-format msgid "Error connecting via ssh: %s" msgstr "透過 SSH 進行連接時發生錯誤:%s" #, python-format msgid "Error creating volume: %s." msgstr "建立磁區時發生錯誤:%s。" msgid "Error deleting replay profile." msgstr "刪除重播設定檔時發生錯誤。" #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "刪除磁區 %(ssn)s 時發生錯誤:%(volume)s" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "刪除磁區 %(vol)s 時發生錯誤:%(err)s。" #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "評估器剖析期間發生錯誤:%(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "編輯儲存區 %(pool)s 上的共用 %(share)s 時發生錯誤。回覆碼:%(ret.status)d。訊" "息:%(ret.data)s。" #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "對 NetworkPortal 啟用 iSER 時發生錯誤:請確保 RDMA 在 IP %(ip)s 的 iSCSI 埠 " "%(port)d 上受支援。" #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "清除失敗連接期間發生錯誤:%(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "執行 CloudByte API [%(cmd)s] 時發生錯誤,錯誤:%(err)s。" msgid "Error executing EQL command" msgstr "執行 EQL 指令時發生錯誤" #, python-format msgid "Error executing command via ssh: %s" msgstr "透過 SSH 來執行指令時發生錯誤:%s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "延伸磁區 %(vol)s 時發生錯誤:%(err)s。" #, python-format msgid "Error extending volume: %(reason)s" msgstr "延伸磁區時發生錯誤:%(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "尋找 %(name)s 時發生錯誤。" #, python-format msgid "Error finding %s." msgstr "尋找 %s 時發生錯誤。" #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "取得 ReplicationSettingData 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "取得軟體驅動裝置版本詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret." "data)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "從名稱 %(name)s 取得網域 ID 時發生錯誤:%(err)s。" #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "從名稱 %(name)s 取得網域 ID 時發生錯誤:%(id)s。" msgid "Error getting initiator groups." msgstr "取得起始器群組時發生錯誤。" #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "從名稱 %(pool)s 取得儲存區 ID 時發生錯誤:%(err)s。" #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "從名稱 %(pool_name)s 取得儲存區 ID 時發生錯誤:%(err_msg)s。" #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "取得抄寫動作 %(id)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "取得抄寫來源詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "取得抄寫目標詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "取得版本時發生錯誤:SVC:%(svc)s。回覆碼:%(ret.status)d 訊息:%(ret.data)s。" #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "在 CloudByte 儲存體中,對磁區 [%(cb_volume)s] 執行的作業 [%(operation)s] 發生" "錯誤:[%(cb_error)s],錯誤碼:[%(error_code)s]。" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API 回應發生錯誤:資料 = %(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "為 %(space)s 建立大小為 %(size)d GB 的空間時發生錯誤" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "將磁區 %(space)s 的空間額外延伸 %(size)d GB 時發生錯誤" #, python-format msgid "Error managing volume: %s." msgstr "管理磁區時發生錯誤:%s。" #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "對映磁區 %(vol)s 時發生錯誤。%(error)s。" #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "修改抄本同步化時發生錯誤:%(sv)s 作業:%(operation)s。回覆碼:%(rc)lu。錯誤:" "%(error)s。" #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "修改服務 %(service)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "將磁區 %(vol)s 從來源專案 %(src)s 移至下列目標專案時發生錯誤:%(tgt)s。回覆" "碼:%(ret.status)d。訊息:%(ret.data)s。" msgid "Error not a KeyError." msgstr "此錯誤不是 KeyError。" msgid "Error not a TypeError." msgstr "此錯誤不是 TypeError。" #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "建立 CgSnapshot %s 時發生錯誤。" #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "刪除 CgSnapshot %s 時發生錯誤。" #, python-format msgid "Error occurred when updating consistency group %s." msgstr "更新一致性群組 %s 時發生錯誤。" #, python-format msgid "Error parsing config file: %s" msgstr "剖析配置檔 %s 時發生錯誤" msgid "Error promoting secondary volume to primary" msgstr "將次要磁區提升為主要磁區時發生錯誤" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "移除磁區 %(vol)s 時發生錯誤。%(error)s。" #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "重命名磁區 %(vol)s 時發生錯誤:%(err)s。" #, python-format msgid "Error response: %s" msgstr "錯誤的回應:%s" msgid "Error retrieving volume size" msgstr "擷取磁區大小時發生錯誤" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "傳送動作識別碼 %(id)s 的抄寫更新時發生錯誤。回覆碼:%(ret.status)d。訊息:" "%(ret.data)s。" #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "傳送抄寫更新時發生錯誤。傳回的錯誤:%(err)s。動作:%(id)s。" #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "針對磁區 %(vol)s,將抄寫繼承設定為 %(set)s 時發生錯誤,專案:%(project)s。回" "覆碼:%(ret.status)d。訊息:%(ret.data)s。" #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "將套件 %(package)s 從來源 %(src)s 分離時發生錯誤。回覆碼:%(ret.status)d。訊" "息:%(ret.data)s。" #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "從儲存區解除連結磁區 %(vol)s 時發生錯誤。%(error)s。" #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "在 Snapshot:%(snapshot)s 上驗證磁區副本 %(clone)s(大小為 %(size)d)上的副本" "大小時發生錯誤" #, python-format msgid "Error while authenticating with switch: %s." msgstr "對交換器進行鑑別時發生錯誤:%s。" #, python-format msgid "Error while changing VF context %s." msgstr "變更 VF 環境定義 %s 時發生錯誤。" #, python-format msgid "Error while checking the firmware version %s." msgstr "檢查韌體版本 %s 時發生錯誤。" #, python-format msgid "Error while checking transaction status: %s" msgstr "檢查交易狀態時發生錯誤:%s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "檢查 VF 是否適用於管理 %s 時發生錯誤。" #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "連接使用通訊協定 %(protocol)s 的交換器 %(switch_id)s 時發生錯誤。錯誤:" "%(error)s。" #, python-format msgid "Error while creating authentication token: %s" msgstr "建立鑑別記號時發生錯誤:%s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "建立 Snapshot [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "建立磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "刪除 Snapshot [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "刪除磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "延伸磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "取得 %(op)s 詳細資料時發生錯誤,回覆碼:%(status)s。" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "透過 SSH 取得資料時發生錯誤:(指令 = %(cmd)s 錯誤 = %(err)s)。" #, python-format msgid "Error while getting disco information [%s]." msgstr "取得 DISCO 資訊 [%s] 時發生錯誤。" #, python-format msgid "Error while getting nvp value: %s." msgstr "取得 NVP 值時發生錯誤:%s。" #, python-format msgid "Error while getting session information %s." msgstr "取得階段作業資訊 %s 時發生錯誤。" #, python-format msgid "Error while parsing the data: %s." msgstr "剖析資料時發生錯誤:%s。" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "在交換器上查詢頁面 %(url)s 時發生錯誤,原因:%(error)s。" #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "移除區域字串中的區域和配置時發生錯誤:%(description)s。" #, python-format msgid "Error while requesting %(service)s API." msgstr "要求 %(service)s API 時發生錯誤。" #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "執行分區 CLI 時發生錯誤:(指令 = %(cmd)s 錯誤 = %(err)s)。" #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "更新區域字串中的新區域和配置時發生錯誤。錯誤:%(description)s。" msgid "Error writing field to database" msgstr "將欄位寫入資料庫時發生錯誤" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "取得磁區 ID 時發生錯誤 [%(stat)s - %(res)s]。" #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "將 Snapshot [%(snap_id)s] 還原到磁區 [%(vol)s] 中時,發生錯誤 [%(stat)s - " "%(res)s]。" #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "取得磁區 ID 時發生錯誤 [狀態] %(stat)s - [結果] %(res)s。" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "已超出磁區 %(volume_id)s 的排程嘗試次數上限 %(max_attempts)d" msgid "Exceeded the limit of snapshots per volume" msgstr "已超過每個磁區的 Snapshot 數目限制" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "將 meta 磁區附加到目標磁區 %(volumename)s 時發生異常狀況。" #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "建立元素抄本期間發生異常狀況。副本名稱:%(cloneName)s,來源名稱:" "%(sourceName)s,額外規格:%(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume 發生異常狀況:%s。" #, python-format msgid "Exception while forming the zone string: %s." msgstr "對區域字串進行格式化時發生異常狀況:%s。" #, python-format msgid "Exception: %s" msgstr "異常狀況:%s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "需要 UUID,但收到 %(uuid)s。" #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "預期只有一個節點稱為 \"%s\"" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "node_count 預期整數,傳回了 svcinfo lsiogrp:%(node)s。" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "預期 CLI 指令 %(cmd)s 沒有任何輸出,但卻取得 %(out)s。" #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "根據 vdisk_UID 來過濾時,預期從 lsvdisk 傳回單一 vdisk。傳回了 %(count)s 個。" #, python-format msgid "Expected volume size was %d" msgstr "預期磁區大小為 %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "已中止匯出備份,預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "已中止匯出記錄,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" "用的備份服務 [%(backup_service)s]。" msgid "Extend volume error." msgstr "延伸磁區時發生錯誤。" msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "僅當不存在 Snapshot 時,此驅動程式才支援延伸磁區。" msgid "Extend volume not implemented" msgstr "未實作延伸磁區" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "未安裝「FAST VP 啟用程式」。無法設定磁區的層級原則" msgid "FAST is not supported on this array." msgstr "此陣列不支援 FAST 原則。" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC 是通訊協定,但 OpenStack 未提供 WWPN。" #, python-format msgid "Faield to unassign %(volume)s" msgstr "無法取消指派 %(volume)s" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "無法建立快取磁區 %(volume)s。錯誤:%(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "無法給架構 %(fabric)s 新增連線:錯誤:%(err)s" msgid "Failed cgsnapshot" msgstr "失敗的 CgSnapshot" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "無法為群組建立 Snapshot:%(response)s。" #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "無法為磁區 %(volname)s 建立 Snapshot:%(response)s。" #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "無法從光纖 %s 取得作用中的區域集。" #, python-format msgid "Failed getting details for pool %s." msgstr "無法取得儲存區 %s 的詳細資料。" #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "無法移除架構 %(fabric)s 的連線:錯誤:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "無法延伸磁區 %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "無法登入 3PAR (%(url)s),原因:%(err)s" msgid "Failed to access active zoning configuration." msgstr "無法存取作用中的分區配置。" #, python-format msgid "Failed to access zoneset status:%s" msgstr "無法存取區域集狀態:%s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "無法獲得資源鎖定。(序列:%(serial)s,實例:%(inst)s,ret:%(ret)s,標準錯" "誤:%(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "在嘗試 %(retries)s 次之後,無法將 %(vol)s 新增至 %(sg)s 中。" msgid "Failed to add the logical device." msgstr "無法新增邏輯裝置。" #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "無法將磁區 %(volumeName)s 新增至一致性群組 %(cgName)s。回覆碼:%(rc)lu。錯" "誤:%(error)s。" msgid "Failed to add zoning configuration." msgstr "無法新增分區配置。" #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "無法指派 iSCSI 起始器 IQN。(埠:%(port)s,原因:%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "無法使 qos_specs:%(specs_id)s 與類型 %(type_id)s 產生關聯。" #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "無法給磁區 %(volume_id)s 連接 iSCSI 目標。" #, python-format msgid "Failed to backup volume metadata - %s" msgstr "無法備份磁區 meta 資料 - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "無法備份磁區 meta 資料 - meta 資料備份物件'backup.%s.meta' 已存在" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "無法從 Snapshot %s 複製磁區。" #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "無法連接 %(vendor_name)s 陣列 %(host)s:%(err)s" msgid "Failed to connect to Dell REST API" msgstr "無法連接至 Dell REST API" msgid "Failed to connect to array" msgstr "無法連接陣列" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "無法連接 sheep 常駐程式。位址:%(addr)s,埠:%(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "無法將映像檔複製到磁區:%(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "無法將 meta 資料複製到磁區:%(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "未能複製磁區,無法使用目的地裝置。" msgid "Failed to copy volume, source device unavailable." msgstr "未能複製磁區,無法使用來源裝置。" #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "無法從 Snapshot %(cgSnapshot)s 建立 CG %(cgName)s。" #, python-format msgid "Failed to create IG, %s" msgstr "無法建立 IG %s" msgid "Failed to create SolidFire Image-Volume" msgstr "無法建立 SolidFire 映像檔磁區" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "無法建立磁區群組:%(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "無法建立檔案。(檔案:%(file)s,ret:%(ret)s,標準錯誤:%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "無法建立磁區 %s 的暫用 Snapshot。" msgid "Failed to create api volume flow." msgstr "無法建立 API 磁區流程。" #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "由於 %(reason)s,無法建立 cg Snapshot %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "由於 %(reason)s,無法建立一致性群組 %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "無法建立一致性群組 %(id)s:%(ret)s。" #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "無法建立一致性群組 %s,因為 VNX 一致性群組無法接受壓縮的 LUN 作為成員。" #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "無法建立一致性群組 %(cgName)s。" #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "無法建立一致性群組 %(cgid)s。錯誤:%(excmsg)s。" #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "無法建立一致性群組:%(consistencyGroupName)s回覆碼:%(rc)lu。錯誤:" "%(error)s。" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "無法在 %(storageSystemName)s 上建立硬體 ID。" #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "無法建立主機:%(name)s。請檢查該主機在陣列上是否存在。" #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "無法建立主機群組:%(name)s。請檢查該主機群組在陣列上是否存在。" msgid "Failed to create iqn." msgstr "無法建立 IQN。" #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "無法給磁區 %(volume_id)s 建立 iSCSI 目標。" msgid "Failed to create manage existing flow." msgstr "無法建立管理現有流程。" msgid "Failed to create manage_existing flow." msgstr "無法建立 manage_existing 流程。" msgid "Failed to create map on mcs, no channel can map." msgstr "無法在 MCS 上建立對映,沒有通道可對映。" msgid "Failed to create map." msgstr "無法建立對映。" #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "無法給磁區建立 meta 資料:%(reason)s" msgid "Failed to create partition." msgstr "無法建立分割區。" #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "無法使用規格 %(qos_specs)s 來建立 qos_specs:%(name)s。" msgid "Failed to create replica." msgstr "無法建立抄本。" msgid "Failed to create scheduler manager volume flow" msgstr "無法建立排定器管理程式磁區流程" #, python-format msgid "Failed to create snapshot %s" msgstr "無法建立 Snapshot %s" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "無法建立 Snapshot,因為未指定任何 LUN ID" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "無法建立 CG %(cgName)s 的 Snapshot。" #, python-format msgid "Failed to create snapshot for volume %s." msgstr "無法建立磁區 %s 的 Snapshot。" #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "無法在磁區 %(vol)s 上建立 Snapshot 原則:%(res)s。" #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "無法在磁區 %(vol)s 上建立 Snapshot 資源區域:%(res)s。" msgid "Failed to create snapshot." msgstr "無法建立 Snapshot。" #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "無法建立 Snapshot。找不到 OpenStack 磁區[%s] 的 CloudByte 磁區資訊。" #, python-format msgid "Failed to create south bound connector for %s." msgstr "無法建立 %s 的南行連接器。" #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "無法建立儲存體群組 %(storageGroupName)s。" #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "無法建立小型儲存區,錯誤訊息為:%s" #, python-format msgid "Failed to create volume %s" msgstr "無法建立磁區 %s" #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "無法刪除 volume_id %(volume_id)s 的 SI,因為它具有配對。" #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "無法刪除邏輯裝置。(LDEV:%(ldev)s,原因:%(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "由於 %(reason)s,無法刪除 cg Snapshot %(id)s。" #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "由於 %(reason)s,無法刪除一致性群組 %(id)s。" #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "無法刪除一致性群組 %(cgName)s。" #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "無法刪除一致性群組:%(consistencyGroupName)s回覆碼:%(rc)lu。錯誤:" "%(error)s。" msgid "Failed to delete device." msgstr "無法刪除裝置。" #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "無法刪除一致性群組 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" msgid "Failed to delete iqn." msgstr "無法刪除 IQN。" msgid "Failed to delete map." msgstr "無法刪除對映。" msgid "Failed to delete partition." msgstr "無法刪除分割區。" msgid "Failed to delete replica." msgstr "無法刪除抄本。" #, python-format msgid "Failed to delete snapshot %s" msgstr "無法刪除 Snapshot %s" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "無法刪除 CG %(cgId)s 的 Snapshot。" #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "無法刪除 snapshot_id %s 的 Snapshot,因為它具有配對。" msgid "Failed to delete snapshot." msgstr "無法刪除 Snapshot。" #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "無法刪除磁區 %(volumeName)s。" #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "無法刪除 volume_id %(volume_id)s 的磁區,因為它具有配對。" #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "無法分離磁區 %(volume_id)s 的 iSCSI 目標。" msgid "Failed to determine blockbridge API configuration" msgstr "無法判定 Blockbridge API 配置。" msgid "Failed to disassociate qos specs." msgstr "無法解除與服務品質規格的關聯。" #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "無法解除 qos_specs:%(specs_id)s 與類型 %(type_id)s 的關聯。" #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "無法確保 Snapshot 資源區域,找不到 ID %s 的磁區" msgid "Failed to establish SSC connection." msgstr "無法建立 SSC 連線。" msgid "Failed to establish connection with Coho cluster" msgstr "無法建立與 Coho 叢集的連線。" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "無法執行 CloudByte API [%(cmd)s]。HTTP 狀態:%(status)s,錯誤:%(error)s。" msgid "Failed to execute common command." msgstr "無法執行一般指令。" #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "無法針對磁區進行匯出:%(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "無法延伸現有磁區 %(name)s,錯誤訊息:%(msg)s。" msgid "Failed to find QoSnode" msgstr "找不到 QoSnode" msgid "Failed to find Storage Center" msgstr "找不到 Storage Center" msgid "Failed to find a vdisk copy in the expected pool." msgstr "在預期儲存區中找不到 vdisk 副本。" msgid "Failed to find account for volume." msgstr "找不到磁區的帳戶。" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "找不到路徑 %(path)s 的檔案集,指令輸出:%(cmdout)s。" #, python-format msgid "Failed to find group snapshot named: %s" msgstr "找不到名為 %s 的群組 Snapshot" #, python-format msgid "Failed to find host %s." msgstr "找不到主機 %s。" #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "找不到包含 %(initiator)s 的 iSCSI 起始器群組。" #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "找不到來源磁區 %s 的儲存區。" #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "無法取得帳戶 [%s] 的 CloudByte 帳戶詳細資料。" #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "無法取得 LUN %s 的 LUN 目標詳細資料" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "無法取得 LUN %s 的 LUN 目標詳細資料。" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "無法取得 LUN %s 的 LUN 目標清單" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "無法取得磁區 %(volume_id)s 的分割區 ID。" #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "無法從 Snapshot %(snapshot_id)s 取得 Raid Snapshot ID。" #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "無法從 Snapshot %(snapshot_id)s 取得 Raid Snapshot ID。" msgid "Failed to get SplitMirror." msgstr "無法取得 SplitMirror。" #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "無法取得儲存體資源。系統將嘗試再次取得儲存體資源。(資源:%(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "無法取得服務品質規格 %s 的所有關聯" msgid "Failed to get channel info." msgstr "無法取得通道資訊。" #, python-format msgid "Failed to get code level (%s)." msgstr "無法取得程式碼層次 (%s)。" msgid "Failed to get device info." msgstr "無法取得裝置資訊。" #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "無法取得網域,因為 CPG (%s) 不存在於陣列上。" msgid "Failed to get image snapshots." msgstr "無法取得映像檔 Snapshot。" #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "無法在具有磁區 %(volume_id)s 的通道 %(channel_id)s 上取得 IP。" msgid "Failed to get iqn info." msgstr "無法取得 IQN 資訊。" msgid "Failed to get license info." msgstr "無法取得軟體使用權資訊。" msgid "Failed to get lv info." msgstr "無法取得 LV 資訊。" msgid "Failed to get map info." msgstr "無法取得對映資訊。" msgid "Failed to get migration task." msgstr "無法取得移轉作業。" msgid "Failed to get model update from clone" msgstr "無法從複本取得模型更新" msgid "Failed to get name server info." msgstr "無法取得名稱伺服器資訊。" msgid "Failed to get network info." msgstr "無法取得網路資訊。" #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "無法取得新儲存區 %(pool_id)s 中的新組件 ID。" msgid "Failed to get partition info." msgstr "無法取得分割區資訊。" #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "無法取得具有磁區 %(volume_id)s 的儲存區 ID。" #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "無法取得 %(volume)s 的遠端複製資訊,原因:%(err)s。" #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "無法取得 %(volume)s 的遠端複製資訊。異常狀況:%(err)s。" msgid "Failed to get replica info." msgstr "無法取得抄本資訊。" msgid "Failed to get show fcns database info." msgstr "無法取得「顯示 fcns」資料庫資訊。" msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "無法取得現有磁區 %(vol) 的大小。磁區管理失敗。" #, python-format msgid "Failed to get size of volume %s" msgstr "無法取得磁區 %s 的大小" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "無法取得磁區 %s 的 Snapshot。" msgid "Failed to get snapshot info." msgstr "無法取得 Snapshot 資訊。" #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "無法取得 LUN %s 的目標 IQN" msgid "Failed to get target LUN of SplitMirror." msgstr "無法取得 SplitMirror 的目標 LUN。" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "無法取得 LUN %s 的目標入口網站" msgid "Failed to get targets" msgstr "無法取得目標" msgid "Failed to get wwn info." msgstr "無法取得 WWN 資訊。" #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "無法取得、建立磁區 %(volumeName)s,或將其新增至遮罩視圖%(maskingViewName)s。" "接收到的錯誤訊息為 %(errorMessage)s。" msgid "Failed to identify volume backend." msgstr "無法識別磁區後端。" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "無法鏈結共用 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "無法登入 %s 陣列(無效登入?)。" #, python-format msgid "Failed to login for user %s." msgstr "無法以使用者 %s 身分登入。" msgid "Failed to login with all rest URLs." msgstr "無法使用所有其餘 URL 進行登入。" #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "無法對 Datera 叢集端點發出要求,原因如下:%s" msgid "Failed to manage api volume flow." msgstr "無法管理 API 磁區流程。" #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "無法管理現有 %(type)s %(name)s,因為報告的大小 %(size)s 不是浮點數字。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "無法管理現有磁區 %(name)s,因為取得磁區大小時發生錯誤。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "無法管理現有磁區 %(name)s,因為重新命名作業失敗:錯誤訊息:%(msg)s。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "無法管理現有磁區 %(name)s,因為所報告的大小 %(size)s不是浮點數字。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "無法管理現有磁區,因為所選磁區類型的儲存區與磁區參照中傳遞的 NFS 共用項目不" "符。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "無法管理現有磁區,因為所選磁區類型的儲存區與磁區參照中傳遞的檔案系統不符。" msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "無法管理現有磁區,因為所選磁區類型的儲存區與主機的儲存區不符。" #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "因為 I/O 群組不符,所以無法管理現有磁區。要管理之磁區的 I/O 群組是 " "%(vdisk_iogrp)s。所選類型的 I/O 群組是 %(opt_iogrp)s。" #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "因為要管理之磁區的通訊協定與後端通訊協定不符,所以無法管理現有磁區。要管理之" "磁區的通訊協定是 %(vdisk_pool)s。後端的通訊協定是 %(backend_pool)s。" msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "因為要管理的磁區是壓縮磁區,但所選磁區類型卻是未壓縮磁區,所以無法管理現有磁" "區。" msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "因為要管理的磁區是未壓縮磁區,但所選磁區類型卻是壓縮磁區,所以無法管理現有磁" "區。" msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "因為要管理的磁區不在有效的 I/O 群組中,所以無法管理現有磁區。" msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "因為要管理的磁區是豐富磁區,但所選磁區類型卻是精簡磁區,所以無法管理現有磁" "區。" msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "因為要管理的磁區是精簡磁區,但所選磁區類型卻是豐富磁區,所以無法管理現有磁" "區。" #, python-format msgid "Failed to manage volume %s." msgstr "無法管理磁區 %s。" #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "無法對映邏輯裝置。(LDEV:%(ldev)s,LUN:%(lun)s,埠:%(port)s,ID:%(id)s)" msgid "Failed to migrate volume for the first time." msgstr "第一次移轉磁區失敗。" msgid "Failed to migrate volume for the second time." msgstr "第二次移轉磁區失敗。" #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "無法移動 LUN 對映。回覆碼:%s" #, python-format msgid "Failed to move volume %s." msgstr "無法移動磁區 %s。" #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "無法開啟檔案。(檔案:%(file)s,ret:%(ret)s,標準錯誤:%(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "無法剖析 CLI 輸出:\n" " 指令:%(cmd)s\n" " 標準輸出:%(out)s\n" "標準錯誤:%(err)s。" msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "無法剖析配置選項 'keystone_catalog_info',必須採用下列格式::" ":" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "無法剖析配置選項 'swift_catalog_info',必須採用下列格式::" ":" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "無法執行 0 個頁面收回。(LDEV:%(ldev)s,原因:%(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "無法移除磁區 %(volume)s 的匯出項目:%(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "無法移除磁區 %(volume_id)s 的 iSCSI 目標。" #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "無法從一致性群組 %(cgName)s 中移除磁區 %(volumeName)s。回覆碼:%(rc)lu。錯" "誤:%(error)s。" #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "無法從預設 SG 中移除磁區 %(volumeName)s。" #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "無法從下列預設 SG 中移除磁區 %(volumeName)s:%(volumeName)s。" #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "無法將 %(volumename)s 從下列 FAST 原則的預設儲存體群組中移除:" "%(fastPolicyName)s。" #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "無法重新命名邏輯磁區 %(name)s,錯誤訊息為:%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "無法擷取作用中的分區配置 %s" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "無法設定目標 IQN %(iqn)s 的 CHAP 鑑別。詳細資料:%(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "無法設定現有磁區 %(name)s 的服務品質,錯誤訊息:%(msg)s。" msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "無法設定 SCST 目標的「送入使用者」屬性。" msgid "Failed to set partition." msgstr "無法設定分割區。" #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "無法設定一致性群組 %(cgname)s 的許可權。錯誤:%(excmsg)s。" #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "無法指定要取消對映之磁區 %(volume_id)s 的邏輯裝置。" #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "無法指定要刪除的邏輯裝置。(方法:%(method)s,ID:%(id)s)" msgid "Failed to terminate migrate session." msgstr "無法終止移轉階段作業。" #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "無法將磁區 %(volume)s 解除連結" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "無法解除鏈結一致性群組 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "無法取消對映邏輯裝置。(LDEV:%(ldev)s,原因:%(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "無法更新一致性群組:%(cgName)s。" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "無法更新磁區的 meta 資料:%(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "無法更新或刪除分區配置" msgid "Failed to update or delete zoning configuration." msgstr "無法更新或刪除分區配置。" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "無法使用規格 %(qos_specs)s 來更新 qos_specs:%(specs_id)s。" msgid "Failed to update quota usage while retyping volume." msgstr "對磁區執行 Retype 作業時,無法更新配額用量。" msgid "Failed to update snapshot." msgstr "無法更新 Snapshot。" #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "使用驅動程式提供的模型 %(model)s 來更新模型時失敗" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "使用提供的 %(src_type)s %(src_id)s meta 資料來更新磁區 %(vol_id)s meta 資料時" "失敗" #, python-format msgid "Failure creating volume %s." msgstr "建立磁區 %s 時失敗。" #, python-format msgid "Failure getting LUN info for %s." msgstr "取得 %s 的 LUN 資訊時失敗。" #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "update_volume_key_value_pair 失敗:%s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "將新複製的 LUN 移至 %s 時失敗。" #, python-format msgid "Failure staging LUN %s to tmp." msgstr "將 LUN %s 暫置到 tmp 時失敗。" msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "嚴重錯誤:不允許使用者查詢 NetApp 磁區。" #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "由於 %(reason)s,Fexvisor 無法新增磁區 %(id)s。" #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "Fexvisor 無法結合群組 %(group)s 中的磁區 %(vol)s,原因:%(ret)s。" #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "Fexvisor 無法移除群組 %(group)s 中的磁區 %(vol)s,原因:%(ret)s。" #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "Fexvisor 無法移除磁區 %(id)s,原因:%(reason)s。" #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "「光纖通道」SAN 查閱失敗:%(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "「光纖通道」區域作業失敗:%(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "「光纖通道」連線控制失敗:%(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "找不到檔案 %(file_path)s。" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "檔案 %(path)s 具有無效的支援檔案 %(bfile)s,正在中斷。" #, python-format msgid "File already exists at %s." msgstr "%s 處已存在檔案。" #, python-format msgid "File already exists at: %s" msgstr "%s 處已存在檔案" msgid "Find host in hostgroup error." msgstr "在主機群組中尋找主機時發生錯誤。" msgid "Find host lun id error." msgstr "尋找主機 LUN ID 時發生錯誤。" msgid "Find lun group from mapping view error." msgstr "從對映視圖中尋找 LUN 群組時發生錯誤。" msgid "Find lun number error." msgstr "尋找 LUN 號碼時發生錯誤。" msgid "Find mapping view error." msgstr "尋找對映視圖時發生錯誤。" msgid "Find portgroup error." msgstr "尋找埠群組時發生錯誤。" msgid "Find portgroup from mapping view error." msgstr "從對映視圖中尋找埠群組時發生錯誤。" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "「快閃記憶體快取原則」需要 WSAPI '%(fcache_version)s' 版,已安裝 " "'%(version)s' 版。" #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor 指派磁區失敗:%(id)s:%(status)s。" #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 指派磁區失敗:%(id)s:%(status)s。" #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor 在群組 %(vgid)s Snapshot %(vgsid)s 中找不到磁區 %(id)s Snapshot。" #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor 建立磁區失敗:%(volumeid)s:%(status)s。" #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor 無法刪除磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor 無法將磁區 %(id)s 新增至群組 %(cgid)s。" #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "Flexvisor 無法指派磁區 %(id)s,原因是無法依事件 ID 來查詢狀態。" #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor 無法指派磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor 無法指派磁區 %(volume)s iqn %(iqn)s。" #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor 無法複製磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "Flexvisor 無法複製磁區(無法取得事件)%(id)s。" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "Flexvisor 無法建立磁區 %(id)s 的 Snapshot:%(status)s。" #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "Flexvisor 無法建立下列磁區的 Snapshot(無法取得事件):%(id)s。" #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor 無法在群組 %(vgid)s 中建立磁區 %(id)s。" #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor 無法建立磁區 %(volume)s:%(status)s。" #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor 無法建立磁區(取得事件)%s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "Flexvisor 無法從 Snapshot %(id)s 建立磁區:%(status)s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 無法從 Snapshot %(id)s 建立磁區:%(status)s。" #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 無法從下列 Snapshot 建立磁區(無法取得事件):%(id)s。" #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor 無法刪除 Snapshot %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "Flexvisor 無法刪除 Snapshot(無法取得事件)%(id)s。" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor 無法刪除磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor 無法延伸磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor 無法延伸磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "Flexvisor 無法延伸磁區(無法取得事件)%(id)s。" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor 無法取得儲存區資訊 %(id)s:%(status)s。" #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "Flexvisor 無法從群組 %(vgid)s 取得磁區 %(id)s 的 Snapshot ID。" #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor 無法從群組 %(cgid)s 中移除磁區 %(id)s。" #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 無法從 Snapshot %(id)s 大量產生磁區:%(status)s。" #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 無法從下列 Snapshot 大量產生磁區(無法取得事件):%(id)s。" #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor 無法取消指派磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor 無法取消指派磁區(取得事件)%(id)s。" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor 無法取消指派磁區 %(id)s:%(status)s。" #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor 找不到來源磁區 %(id)s 資訊。" #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 取消指派磁區失敗:%(id)s:%(status)s。" #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor 磁區 %(id)s 無法加入群組 %(vgid)s 中。" #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "資料夾 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS 不在執行中,狀態:%s。" msgid "Gateway VIP is not set" msgstr "未設定閘道 VIP" msgid "Get FC ports by port group error." msgstr "依埠群組取得 FC 埠時發生錯誤。" msgid "Get FC ports from array error." msgstr "從陣列中取得 FC 埠時發生錯誤。" msgid "Get FC target wwpn error." msgstr "取得 FC 目標 WWPN 時發生錯誤。" msgid "Get HyperMetroPair error." msgstr "取得 HyperMetroPair 時發生錯誤。" msgid "Get LUN group by view error." msgstr "依視圖取得 LUN 群組時發生錯誤。" msgid "Get LUNcopy information error." msgstr "取得 LUNcopy 資訊時發生錯誤。" msgid "Get QoS id by lun id error." msgstr "依 LUN ID 取得服務品質 ID 時發生錯誤。" msgid "Get QoS information error." msgstr "取得服務品質資訊時發生錯誤。" msgid "Get QoS policy error." msgstr "取得服務品質原則時發生錯誤。" msgid "Get SplitMirror error." msgstr "取得 SplitMirror 時發生錯誤。" msgid "Get active client failed." msgstr "取得作用中的用戶端失敗。" msgid "Get array info error." msgstr "取得陣列資訊時發生錯誤。" msgid "Get cache by name error." msgstr "依名稱取得快取時發生錯誤。" msgid "Get connected free FC wwn error." msgstr "取得已連接的可用 FC WWN 時發生錯誤。" msgid "Get engines error." msgstr "取得引擎時發生錯誤。" msgid "Get host initiators info failed." msgstr "取得主機起始器資訊時失敗。" msgid "Get hostgroup information error." msgstr "取得主機群組資訊時發生錯誤。" msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "取得 iSCSI 埠資訊時發生錯誤,請檢查 huawei 配置檔中配置的目標 IP。" msgid "Get iSCSI port information error." msgstr "取得 iSCSI 埠資訊時發生錯誤。" msgid "Get iSCSI target port error." msgstr "取得 iSCSI 目標埠時發生錯誤。" msgid "Get lun id by name error." msgstr "依名稱取得 LUN ID 時發生錯誤。" msgid "Get lun migration task error." msgstr "取得 LUN 移轉作業時發生錯誤。" msgid "Get lungroup id by lun id error." msgstr "依 LUN ID 取得 LUN 群組 ID 時發生錯誤。" msgid "Get lungroup information error." msgstr "取得 LUN 群組資訊時發生錯誤。" msgid "Get migration task error." msgstr "取得移轉作業時發生錯誤。" msgid "Get pair failed." msgstr "取得配對失敗。" msgid "Get partition by name error." msgstr "依名稱取得分割區時發生錯誤。" msgid "Get partition by partition id error." msgstr "依分割區 ID 取得分割區時發生錯誤。" msgid "Get port group by view error." msgstr "依視圖取得埠群組時發生錯誤。" msgid "Get port group error." msgstr "取得埠群組時發生錯誤。" msgid "Get port groups by port error." msgstr "依埠取得埠群組時發生錯誤。" msgid "Get ports by port group error." msgstr "依埠群組取得埠時發生錯誤。" msgid "Get remote device info failed." msgstr "取得遠端裝置資訊失敗。" msgid "Get remote devices error." msgstr "取得遠端裝置時發生錯誤。" msgid "Get smartcache by cache id error." msgstr "依快取 ID 取得 smartcache 時發生錯誤。" msgid "Get snapshot error." msgstr "取得 Snapshot 時發生錯誤。" msgid "Get snapshot id error." msgstr "取得 Snapshot ID 時發生錯誤。" msgid "Get target IP error." msgstr "取得目標 IP 時發生錯誤。" msgid "Get target LUN of SplitMirror error." msgstr "取得 SplitMirror 的目標 LUN 時發生錯誤。" msgid "Get views by port group error." msgstr "依埠群組取得視圖時發生錯誤。" msgid "Get volume by name error." msgstr "依名稱取得磁區時發生錯誤。" msgid "Get volume error." msgstr "取得磁區時發生錯誤。" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "無法更新 Glance meta 資料,磁區 ID %(volume_id)s 已存在索引鍵 %(key)s" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "找不到磁區/Snapshot %(id)s 的 Glance meta 資料。" #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "%(config)s 處不存在 Gluster 配置檔" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google Cloud Storage API 失敗:%(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google Cloud Storage 連線失敗:%(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google Cloud Storage oauth2 失敗:%(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "從 DRBDmanage 取得不正確的路徑資訊!(%s)" msgid "HBSD error occurs." msgstr "發生 HBSD 錯誤。" msgid "HNAS has disconnected SSC" msgstr "HNAS 具有已切斷連線的 SSC" msgid "HPELeftHand url not found" msgstr "找不到 HPELeftHand URL" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "已經要求 HTTPS 憑證驗證,但卻無法使用 purestorage 模組 %(version)s 版來啟用此" "驗證。請升級至更高版本以啟用此功能。" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "雜湊區塊大小自前次備份以來已變更。新的雜湊區塊大小:%(new)s。舊的雜湊區塊大" "小:%(old)s。請執行完整備份。" #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "尚未建立 %(tier_levels)s 層級。" #, python-format msgid "Hint \"%s\" not supported." msgstr "不支援提示 \"%s\"。" msgid "Host" msgstr "主機" #, python-format msgid "Host %(host)s could not be found." msgstr "找不到主機 %(host)s。" #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "主機 %(host)s 不符合 x509 憑證內容:CommonName %(commonName)s。" #, python-format msgid "Host %s has no FC initiators" msgstr "主機 %s 沒有 FC 起始器" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "主機 %s 沒有 iSCSI 起始器" #, python-format msgid "Host '%s' could not be found." msgstr "找不到主機 '%s'。" #, python-format msgid "Host group with name %s not found" msgstr "找不到名稱為 %s 的主機群組" #, python-format msgid "Host group with ref %s not found" msgstr "找不到參照為 %s 的主機群組" msgid "Host is NOT Frozen." msgstr "主機未處於「凍結」狀態。" msgid "Host is already Frozen." msgstr "主機已經處於「凍結」狀態。" msgid "Host not found" msgstr "找不到主機" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "找不到主機。無法移除 %(host)s 上的 %(service)s。" #, python-format msgid "Host replication_status must be %s to failover." msgstr "主機 replication_status 必須是 %s 才能失效接手。" #, python-format msgid "Host type %s not supported." msgstr "不支援主機類型 %s。" #, python-format msgid "Host with ports %(ports)s not found." msgstr "找不到具有埠 %(ports)s 的主機。" msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "不能在同一 volume_type 中使用 Hypermetro 和抄寫。" #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "I/O 群組 %(iogrp)d 無效;可用的 I/O 群組數目是 %(avail)s。" msgid "ID" msgstr "識別號" msgid "IP address/hostname of Blockbridge API." msgstr "Blockbridge API 的 IP 位址/主機名稱。" msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "如果壓縮設為 True,則也必須設定調整大小(不等於 -1)。" msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "如果 nofmtdisk 設為 True,則 rsize 也必須設為 -1。" #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "為 flashsystem_connection_protocol 指定的值 '%(prot)s' 不正確:有效值為 " "%(enabled)s。" msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "為 IOTYPE 指定的值無效:0、1 或 2。" msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "指定給 smarttier 的值不正確:設定為 0、1、2 或 3。" msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "指定給 storwize_svc_vol_grainsize 的值不正確:應設為 32、64、128 或 256。" msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "指定給 thin 的值不正確:無法同時設定thin 和 thick。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "找不到映像檔 %(image_id)s。" #, python-format msgid "Image %(image_id)s is not active." msgstr "映像檔 %(image_id)s 不在作用中。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "無法接受映像檔 %(image_id)s:%(reason)s" msgid "Image location not present." msgstr "映像檔位置不存在。" #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "映像檔虛擬大小為 %(image_size)d GB,不適合大小為%(volume_size)d GB 的磁區。" msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "刪除 rbd 磁區時發出 ImageBusy 錯誤。造成此問題的原因可能是從已損毀的用戶端進" "行連線,如果是這樣,則可以在30 秒後,透過重試刪除來解決。" #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "匯入記錄時失敗,找不到備份服務來執行匯入。要求服務 %(service)s" msgid "Incorrect request body format" msgstr "要求內文的格式不正確" msgid "Incorrect request body format." msgstr "要求內文的格式不正確。" msgid "Incremental backups exist for this backup." msgstr "此備份的增量備份已存在。" #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI 異常狀況:%(err)s,參數:%(param)s(回覆碼:%(rc)s)(輸出:" "%(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "起始層 {},原則 {} 無效。" msgid "Input type {} is not supported." msgstr "輸入類型 {} 不受支援。" msgid "Input volumes or snapshots are invalid." msgstr "輸入磁區或 Snapshot 無效。" msgid "Input volumes or source volumes are invalid." msgstr "輸入磁區或來源磁區無效。" #, python-format msgid "Instance %(uuid)s could not be found." msgstr "找不到實例 %(uuid)s。" msgid "Insufficient free space available to extend volume." msgstr "可用空間不足,無法延伸磁區。" msgid "Insufficient privileges" msgstr "專用權不足" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "與 ceph 叢集的連線重試之間的間隔值(以秒為單位)。" #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "對 io_port_list 指定了無效的 %(protocol)s 埠 %(port)s。" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "無效的 3PAR 網域:%(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "無效的 ALUA 值。ALUA 值必須是 1 或 0。" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "提供給備份 rbd 作業的 Ceph 引數無效" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "無效的 CgSnapshot:%(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "無效的 ConsistencyGroup:%(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "無效的 ConsistencyGroup:一致性群組狀態必須是「可用」或「錯誤」,但現行狀態卻" "是「使用中」" #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "無效的一致性群組:一致性群組狀態必須為可用,但是現行狀態為:%s。" msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "無效的一致性群組:沒有用來建立一致性群組的主機" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "找到的 HPELeftHand API 版本 %(found)s 無效。管理/取消管理支援需要 " "%(minimum)s 版或更高版本。" #, python-format msgid "Invalid IP address format: '%s'" msgstr "無效的 IP 位址格式:'%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "取得磁區 %s 的服務品質原則時,偵測到無效的服務品質規格" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "無效的抄寫目標:%(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "無效的 VNX 鑑別類型:%s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Virtuozzo 儲存體共用項目規格無效:%r。必須是:[MDS1[,MDS2],...:/][:PASSWORD]。" #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "XtremIO %(cur)s 版無效,需要 %(min)s 版或更高版本" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "為下列專案配額定義的已配置配額無效:%s" msgid "Invalid argument" msgstr "無效的引數" msgid "Invalid argument - negative seek offset." msgstr "無效的引數 - 負數探查偏移。" #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "無效的引數 - whence = %s 不受支援" #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "無效的引數 - 不支援 whence=%s。" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "連接模式 '%(mode)s' 不適用於磁區 %(volume_id)s。" #, python-format msgid "Invalid auth key: %(reason)s" msgstr "無效的鑑別金鑰:%(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "無效的備份:%(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "無效的 Barbican API URL:需要版本,例如 'http[s]://|[:port]/" "' URL,但指定的是:%s" msgid "Invalid cgsnapshot" msgstr "無效的 CgSnapshot" msgid "Invalid chap user details found in CloudByte storage." msgstr "在 CloudByte 儲存體中找到無效的 CHAP 使用者詳細資料。" #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "磁區 %(name)s 的連線起始設定回應無效" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "磁區 %(name)s 的連線起始設定回應無效:%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無效的內容類型 %(content_type)s。" msgid "Invalid credentials" msgstr "認證無效" #, python-format msgid "Invalid directory: %s" msgstr "無效的目錄:%s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "無效的磁碟配接卡類型:%(invalid_type)s。" #, python-format msgid "Invalid disk backing: %s." msgstr "無效的磁碟備用項目:%s。" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "無效的磁碟類型:%(disk_type)s。" #, python-format msgid "Invalid disk type: %s." msgstr "無效的磁碟類型:%s。" #, python-format msgid "Invalid host: %(reason)s" msgstr "無效的主機:%(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "找到的 hpe3parclient 版本 (%(found)s) 無效。需要 %(minimum)s 版或更高版本。請" "執行 \"pip install --upgrade python-3parclient\" 來升級 hpe3parclient。" #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "找到的 hpelefthandclient 版本 (%(found)s) 無效。需要 %(minimum)s 版或更高版" "本。請執行 'pip install --upgrade python-lefthandclient' 來升級 " "hpelefthandclient。" #, python-format msgid "Invalid image href %(image_href)s." msgstr "無效的映像檔 href %(image_href)s。" msgid "Invalid image identifier or unable to access requested image." msgstr "映像檔 ID 無效,或無法存取所要求的映像檔。" msgid "Invalid imageRef provided." msgstr "提供的 imageRef 無效。" msgid "Invalid initiator value received" msgstr "所接收的起始器值無效" msgid "Invalid input" msgstr "無效的輸入" #, python-format msgid "Invalid input received: %(reason)s" msgstr "收到的輸入無效:%(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "無效的 is_public 過濾器 [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "所配置的 LUN 類型 %s 無效。" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "無效的 meta 資料大小:%(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "無效的 meta 資料:%(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "無效的裝載點基本程式:%s" #, python-format msgid "Invalid mount point base: %s." msgstr "無效的裝載點基本程式:%s。" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新 snapCPG 名稱無效,無法執行 Retype 動作。new_snap_cpg='%s'。" #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Coho RPC 埠的埠號 %(config)s 無效" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "所配置的預先提取類型 %s 無效。PrefetchType 必須位於 0、1、2 和 3 中。" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "無效的服務品質規格:%(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "將磁區連接至無效目標的要求無效" msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "以無效模式來連接磁區的要求無效。連接模式應該是 'rw' 或 'ro'" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "無效的預約有效期限 %(expire)s。" msgid "Invalid response header from RPC server" msgstr "來自 RPC 伺服器的回應標頭無效" #, python-format msgid "Invalid secondary id %s." msgstr "次要 ID %s 無效。" #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "指定的 secondary_backend_id 無效。有效的後端 ID 是 %s。" msgid "Invalid service catalog json." msgstr "無效的服務型錄 JSON。" msgid "Invalid sheepdog cluster status." msgstr "sheepdog 叢集狀態無效。" #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "無效的 Snapshot:%(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "無效的狀態:'%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "所要求的儲存區 %s 無效。執行 Retype 動作失敗。" #, python-format msgid "Invalid storage pool %s specificed." msgstr "所指定的儲存區 %s 無效。" msgid "Invalid storage pool is configured." msgstr "所配置的儲存區無效。" #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "指定的同步化模式無效,容許的模式為 %s。" msgid "Invalid transport type." msgstr "傳輸類型無效。" #, python-format msgid "Invalid update setting: '%s'" msgstr "無效的更新設定:'%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "無效的 URL:格式必須是 'http[s]://|[:port]/',指定的 " "URL 是:%s" #, python-format msgid "Invalid value '%s' for force." msgstr "force 的值 '%s' 無效。" #, python-format msgid "Invalid value '%s' for force. " msgstr "force 的值 '%s' 無效。" #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "is_public 的值 '%s' 無效。接受值:True 或 False。" #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "skip_validation 的值 '%s' 無效。" #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "'bootable' 的值無效:'%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "'force' 的值 '%s' 無效" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "'readonly' 的值無效:'%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "'scheduler_max_attempts' 的值無效,必須 >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp 配置選項 netapp_host_type 的值無效。" msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp 配置選項 netapp_lun_ostype 的值無效。" #, python-format msgid "Invalid value for age, %(age)s" msgstr "經歷時間的值 %(age)s 無效" #, python-format msgid "Invalid value: \"%s\"" msgstr "無效的值:\"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "提供給建立要求的磁區大小無效:%s(大小引數必須為整數或整數的字串表示法且大於" "零)。" #, python-format msgid "Invalid volume type: %(reason)s" msgstr "無效的磁區類型:%(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "無效的磁區:%(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "無效的磁區:無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區" "處於無效的狀態:%(status)s。有效的狀態為:(「可用」、「使用中」)。" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "無效的磁區:無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區" "類型 %(volume_type)s 不受 該群組支援。" #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "無效的磁區:無法將磁區 fake-volume-uuid 新增至一致性群組 %(group_id)s,因為找" "不到該磁區。" #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "無效的磁區:無法將磁區 fake-volume-uuid 從一致性群組 %(group_id)s 中移除,因" "為該磁區不在此群組中。" #, python-format msgid "Invalid volume_type passed: %s." msgstr "傳遞的 volume_type 無效:%s。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "提供的 volume_type 無效:%s(所要求的類型不相容;符合來源磁區,或省略 type 引" "數)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "提供的 volume_type 無效:%s(所要求的類型不相容;建議省略該類型引數)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "提供的 volume_type 無效:%s(所要求的類型必須受此一致性群組支援)。" #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "無效的 WWPN 格式 %(wwpns)s" msgid "Invoking web service failed." msgstr "呼叫 Web 服務失敗。" msgid "Issue encountered waiting for job." msgstr "等待工作時遇到問題。" msgid "Issue encountered waiting for synchronization." msgstr "等待同步時遇到問題。" msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "發出失效接手失敗,因為未正確配置抄寫。" msgid "Item not found" msgstr "找不到項目" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "在 CloudByte 的建立磁區 [%s] 回應中找不到工作 ID。" #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "在 CloudByte 的刪除磁區 [%s] 回應中找不到工作 ID。" msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "索引鍵名稱只能包含英數字元、底線、句點、冒號及連字號。" #, python-format msgid "KeyError: %s" msgstr "KeyError:%s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "Keystone 第 3 版或更高版本必須用於取得巢狀配額支援。" #, python-format msgid "LU does not exist for volume: %s" msgstr "磁區不存在 LU:%s" msgid "LUN export failed!" msgstr "LUN 匯出失敗!" msgid "LUN id({}) is not valid." msgstr "LUN ID ({}) 無效。" msgid "LUN map overflow on every channel." msgstr "在每個通道上,LUN 對映溢位。" #, python-format msgid "LUN not found with given ref %s." msgstr "找不到具有給定參照 %s 的 LUN。" msgid "LUN number ({}) is not an integer." msgstr "LUN 號碼 ({}) 不是整數。" #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 號碼已超出通道 ID %(ch_id)s 的範圍。" #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "具有給定參照 %(ref)s 的 LUN 不滿足磁區類型。請確保具有 SSC 特性的 LUN 磁區存" "在於 vserver %(vs)s 上。" #, python-format msgid "Last %s cinder syslog entries:-" msgstr "最後 %s 個 Cinder Syslog 項目:-" msgid "LeftHand cluster not found" msgstr "找不到 LeftHand 叢集" msgid "License is unavailable." msgstr "無法使用授權。" #, python-format msgid "Line %(dis)d : %(line)s" msgstr "第 %(dis)d 行:%(line)s" msgid "Link path already exists and its not a symlink" msgstr "鏈結路徑已經存在,並且該鏈結路徑不是符號鏈結" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "狀態 %s 不支援來源磁區的鏈結複本。" msgid "Lock acquisition failed." msgstr "鎖定獲得失敗。" msgid "Logout session error." msgstr "登出階段作業錯誤。" msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "未配置查閱服務。fc_san_lookup_service 的配置選項需要指定查閱服務的具體實作。" msgid "Lun migration error." msgstr "Lun 移轉錯誤。" #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "之前物件 %(object_name)s 的 MD5 %(md5)s 與之後的 %(etag)s 不同。" #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED:%r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED:AUTH_ERROR:%r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED:RPC_MISMATCH:%r" #, python-format msgid "Malformed fcns output string: %s" msgstr "形態異常的 fcns 輸出字串:%s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "訊息內文的格式不正確:%(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "格式不正確的名稱伺服器字串:%s" msgid "Malformed request body" msgstr "要求內文的格式不正確" msgid "Malformed request body." msgstr "要求內文形態異常。" msgid "Malformed request url" msgstr "要求 URL 的格式不正確" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "對指令 %(cmd)s 的回應格式不正確:%(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "scheduler_hints 屬性的格式不正確" #, python-format msgid "Malformed show fcns database string: %s" msgstr "形態異常的「顯示 fcns」資料庫字串:%s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "格式不正確的區域配置:(交換器 = %(switch)szone_config = %(zone_config)s)。" #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "形態異常的區域狀態:(交換器 = %(switch)s,zone_config = %(zone_config)s)。" msgid "Manage existing get size requires 'id'." msgstr "管理現有取得大小需要 'id'。" msgid "Manage existing snapshot not implemented." msgstr "未實作管理現有 Snapshot。" #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "管理現有磁區時失敗,因為後端參照%(existing_ref)s 無效:%(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "管理現有磁區時失敗,因為磁區類型不符:%(reason)s" msgid "Manage existing volume not implemented." msgstr "未實作管理現有磁區。" msgid "Manage existing volume requires 'source-id'." msgstr "管理現有磁區需要 'source-id'。" #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "如果啟用了 FAST,則不支援管理磁區。FAST 原則:%(fastPolicyName)s。" msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "不容許對已失效接手之磁區的 Snapshot 進行管理。" msgid "Map info is None due to array version not supporting hypermetro." msgstr "由於陣列版本不支援 Hypermetro,對映資訊為「無」。" #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "無法在所分配的 %(to)d 秒逾時時間內完成對映 %(id)s準備。終止中。" #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "未順利刪除遮罩視圖 %(maskingViewName)s" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "已超出容許的備份數目上限 (%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "已超出容許的 Snapshot 數目上限 (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "已超出下列配額容許的磁區數目上限 (%(allowed)d):'%(name)s'。" #, python-format msgid "May specify only one of %s" msgstr "只能指定 %s 的其中之一" msgid "Metadata backup already exists for this volume" msgstr "此磁區已存在 meta 資料備份" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "meta 資料備份物件 '%s' 已存在" msgid "Metadata item was not found" msgstr "找不到 meta 資料項目" msgid "Metadata item was not found." msgstr "找不到 meta 資料項目。" #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "meta 資料內容索引鍵 %s 超過 255 個字元" #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "meta 資料內容索引鍵 %s 值超過 255 個字元" msgid "Metadata property key blank" msgstr "meta 資料內容索引鍵空白" msgid "Metadata property key blank." msgstr "meta 資料內容索引鍵空白。" msgid "Metadata property key greater than 255 characters." msgstr "meta 資料內容索引鍵超過 255 個字元。" msgid "Metadata property value greater than 255 characters." msgstr "meta 資料內容值超過 255 個字元。" msgid "Metadata restore failed due to incompatible version" msgstr "meta 資料還原失敗,因為版本不相容" msgid "Metadata restore failed due to incompatible version." msgstr "由於版本不相容,meta 資料還原失敗。" #, python-format msgid "Migrate volume %(src)s failed." msgstr "移轉磁區 %(src)s 失敗。" #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "在來源磁區 %(src)s 與目的地磁區 %(dst)s 之間移轉磁區失敗。" #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "LUN %s 移轉已停止或發生錯誤。" msgid "MirrorView/S enabler is not installed." msgstr "未安裝 MirrorView/S 啟用程式。" msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "遺漏 'purestorage' Python 模組,請確保該程式庫已安裝且可用。" msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "遺漏了「光纖通道」SAN 配置參數 - fc_fabric_names" msgid "Missing request body" msgstr "遺漏了要求內文" msgid "Missing request body." msgstr "遺漏要求內文。" #, python-format msgid "Missing required element '%s' in request body" msgstr "要求內文中遺漏了必要元素 '%s'" #, python-format msgid "Missing required element '%s' in request body." msgstr "要求內文遺漏了必要元素 '%s'。" msgid "Missing required element 'consistencygroup' in request body." msgstr "要求內文中遺漏了必要元素 'consistencygroup'。" msgid "Missing required element 'host' in request body." msgstr "要求內文中遺漏了必要元素 'host'。" msgid "Missing required element quota_class_set in request body." msgstr "要求內文中遺漏了必要元素 quota_class_set。" msgid "Missing required element snapshot in request body." msgstr "要求內文中遺漏了必要元素 Snapshot。" msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "找到多個 SerialNumber,但這項作業僅預期一個。請變更 EMC 配置檔。" #, python-format msgid "Multiple copies of volume %s found." msgstr "找到磁區 %s 的多個副本。" #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "找到 '%s' 的多個相符項,請使用 ID 以更具體地進行尋找。" msgid "Multiple profiles found." msgstr "找到多個設定檔。" msgid "Must implement a fallback schedule" msgstr "必須實作撤回排程" msgid "Must implement find_retype_host" msgstr "必須實作 find_retype_host" msgid "Must implement host_passes_filters" msgstr "必須實作 host_passes_filters" msgid "Must implement schedule_create_consistencygroup" msgstr "必須實作 schedule_create_consistencygroup" msgid "Must implement schedule_create_volume" msgstr "必須實作 schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "必須實作 schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "必須將 WWPN 或主機傳遞給 lsfabric。" msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "必須以雲端管理者身分使用 Keystone policy.json 來執行此指令,此 policy.json 容" "許雲端管理者列出和取得任何專案。" msgid "Must specify 'connector'" msgstr "必須指定 'connector'" msgid "Must specify 'connector'." msgstr "必須指定 'connector'。" msgid "Must specify 'host'." msgstr "必須指定 'host'。" msgid "Must specify 'new_volume'" msgstr "必須指定 'new_volume'" msgid "Must specify 'status'" msgstr "必須指定 'status'" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "必須指定 'status'、'attach_status' 或 'migration_status' 進行更新。" msgid "Must specify a valid attach status" msgstr "必須指定有效的連接狀態" msgid "Must specify a valid migration status" msgstr "必須指定有效的移轉狀態" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "必須指定有效的 persona %(valid)s,值 '%(persona)s' 無效。" #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "必須指定有效的供應類型 %(valid)s,值 '%(prov)s' 無效。" msgid "Must specify a valid status" msgstr "必須指定有效的狀態" msgid "Must specify an ExtensionManager class" msgstr "必須指定 ExtensionManager 類別" msgid "Must specify bootable in request." msgstr "必須在要求中指定 bootable。" msgid "Must specify protection domain name or protection domain id." msgstr "必須指定保護網域名稱或保護網域 ID。" msgid "Must specify readonly in request." msgstr "必須在要求中指定 readonly。" msgid "Must specify snapshot source-name or source-id." msgstr "必須指定 Snapshot source-name 或 source-id。" msgid "Must specify source-name or source-id." msgstr "必須修改 source-name 或 source-id。" msgid "Must specify storage pool name or id." msgstr "必須指定儲存區名稱或 ID。" msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "必須指定儲存區。選項:sio_storage_pools。" msgid "Must supply a positive value for age" msgstr "必須為經歷時間提供正數值" msgid "Must supply a positive, non-zero value for age" msgstr "必須為經歷時間提供非零正數值" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "NAS 配置 '%(name)s=%(value)s' 無效。必須為 'auto'、'true' 或'false'" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "%(config)s 處不存在 NFS 配置檔" #, python-format msgid "NFS file %s not discovered." msgstr "未探索到 NFS 檔 %s。" msgid "NFS file could not be discovered." msgstr "無法探索 NFS 檔案。" msgid "NaElement name cannot be null." msgstr "NaElement 名稱不能是空值。" msgid "Name" msgstr "名稱" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "要求內文中的名稱、說明、add_volumes 和 remove_volumes 不能全部都為空。" msgid "Need non-zero volume size" msgstr "需要非零磁區大小" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "MSG_DENIED 或 MSG_ACCEPTED 均不 %r" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder 驅動程式異常狀況。" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "用於延伸的新大小必須大於現行大小。(現行大小:%(size)s,延伸後大小:" "%(new_size)s)。" #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "新大小應該大於後端儲存體中的實際大小。實際大小:%(oldsize)s,新大小:" "%(newsize)s。" msgid "New volume size must be specified as an integer." msgstr "必須將新的磁區大小指定為整數。" msgid "New volume type must be specified." msgstr "必須指定新的磁區類型。" msgid "New volume type not specified in request_spec." msgstr "request_spec 中沒有指定新的磁區類型。" #, python-format msgid "New volume_type same as original: %s." msgstr "新的 volume_type 與原始類型相同:%s。" msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder 驅動程式異常狀況" msgid "No FC initiator can be added to host." msgstr "任何 FC 起始器均無法新增至主機。" msgid "No FC port connected to fabric." msgstr "沒有 FC 埠已連接至光纖。" msgid "No FCP targets found" msgstr "找不到 FCP 目標" msgid "No Port Group elements found in config file." msgstr "在配置檔中找不到「埠群組」元素。" msgid "No VF ID is defined in the configuration file." msgstr "配置檔中未定以 VF ID。" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "沒有具備所提供之 iSCSI IP 的作用中 iSCSI 入口網站" #, python-format msgid "No available service named %s" msgstr "沒有名稱為 %s 的可用服務" #, python-format msgid "No backup with id %s" msgstr "沒有 ID 為 %s 的備份" msgid "No backups available to do an incremental backup." msgstr "沒有可用的備份來執行增量備份。" msgid "No big enough free disk" msgstr "沒有足夠大的可用磁碟" #, python-format msgid "No cgsnapshot with id %s" msgstr "沒有 ID 為 %s 的 CgSnapshot" msgid "No cinder entries in syslog!" msgstr "Syslog 中沒有 Cinder 項目!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "在編檔器上找不到名稱為 %s 的複製 LUN" msgid "No config node found." msgstr "找不到任何配置節點。" #, python-format msgid "No consistency group with id %s" msgstr "沒有 ID 為 %s 的一致性群組" #, python-format msgid "No element by given name %s." msgstr "依給定的名稱 %s,找不到元素。" msgid "No errors in logfiles!" msgstr "日誌檔中沒有錯誤!" #, python-format msgid "No file found with %s as backing file." msgstr "找不到含有 %s 的檔案來作為備用檔。" #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "未剩餘任何可用的 LUN ID。已超出可連接至主機的磁區數目上限(%s)。" msgid "No free disk" msgstr "沒有可用磁碟" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "在所提供的清單中找不到 %s 的良好 iSCSI 入口網站。" #, python-format msgid "No good iscsi portals found for %s." msgstr "找不到 %s 的良好 iSCSI 入口網站。" #, python-format msgid "No host to create consistency group %s." msgstr "沒有用來建立一致性群組 %s 的主機。" msgid "No iSCSI-enabled ports on target array." msgstr "目標陣列上沒有支援 iSCSI 的埠。" msgid "No image_name was specified in request." msgstr "未在要求中指定 image_name。" msgid "No initiator connected to fabric." msgstr "沒有起始器已連接至光纖。" #, python-format msgid "No initiator group found for initiator %s" msgstr "找不到起始器 %s 的起始器群組" msgid "No initiators found, cannot proceed" msgstr "找不到起始器,無法繼續進行" #, python-format msgid "No interface found on cluster for ip %s" msgstr "在叢集上找不到 IP %s 的介面" msgid "No ip address found." msgstr "找不到任何 IP 位址。" msgid "No iscsi auth groups were found in CloudByte." msgstr "在 CloudByte 中找不到 iscsi 鑑別群組。" msgid "No iscsi initiators were found in CloudByte." msgstr "在 CloudByte 中找不到 iscsi 起始器。" #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "找不到 CloudByte 磁區 [%s] 的 iscsi 服務。" msgid "No iscsi services found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到 iscsi 服務。" #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "未指定金鑰檔,且無法從 %(cert)s %(e)s 載入金鑰。" msgid "No mounted Gluster shares found" msgstr "找不到已裝載的 Gluster 共用" msgid "No mounted NFS shares found" msgstr "找不到已裝載的 NFS 共用" msgid "No mounted SMBFS shares found." msgstr "找不到已裝載的 SMBFS 共用項目。" msgid "No mounted Virtuozzo Storage shares found" msgstr "找不到已裝載的 Virtuozzo 儲存體共用項目" msgid "No mounted shares found" msgstr "找不到已裝載的共用項目" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "在磁區 %(vol)s 的 I/O 群組 %(gid)s 中找不到任何節點。" msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "沒有儲存區可用於供應磁區。請確保已正確設定配置選項 " "netapp_pool_name_search_pattern。" msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "未從 CloudByte 儲存體清單 iSCSI 鑑別使用者 API 呼叫接收到任何回應。" msgid "No response was received from CloudByte storage list tsm API call." msgstr "為了列出 tsm API 呼叫,未從 CloudByte 儲存體中接收到回應。" msgid "No response was received from CloudByte's list filesystem api call." msgstr "未從 CloudByte 的清單檔案系統 API 呼叫中接收到回應。" msgid "No service VIP configured and no nexenta_client_address" msgstr "未配置服務 VIP,且沒有 nexenta_client_address" #, python-format msgid "No snap found with %s as backing file." msgstr "找不到含有 %s 的 Snapshot 來作為備用檔。" #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "在 Snapshot 群組 %s 中找不到 Snapshot 映像檔。" #, python-format msgid "No snapshots could be found on volume %s." msgstr "在磁區 %s 上找不到 Snapshot。" #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "沒有提供來源 Snapshot 以建立一致性群組 %s。" #, python-format msgid "No storage path found for export path %s" msgstr "找不到匯出路徑 %s 的儲存體路徑" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "沒有這類服務品質規格 %(specs_id)s。" msgid "No suitable discovery ip found" msgstr "找不到適當的探索 IP" #, python-format msgid "No support to restore backup version %s" msgstr "不支援還原備份版本 %s" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "找不到磁區 %(volume_id)s 的目標 ID。" msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "主機上沒有未用的 LUN ID 可供使用;已啟用多重連接,這需要所有 LUN ID 在整個主" "機群組中都是唯一的。" #, python-format msgid "No valid host was found. %(reason)s" msgstr "找不到有效的主機。%(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "類型為 %(type)s 的磁區 %(id)s 不具有有效主機" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "不存在具有由參照 %s 所指定之 UID 的 vdisk。" #, python-format msgid "No views found for LUN: %s" msgstr "找不到 LUN 的視圖:%s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "叢集上沒有含 vserver %(vserver)s 及接合路徑%(junction)s 的磁區" msgid "No volume service(s) started successfully, terminating." msgstr "所有磁區服務均未順利啟動,終止中。" msgid "No volume was found at CloudByte storage." msgstr "在 CloudByte 儲存體中找不到磁區。" msgid "No volume_type should be provided when creating test replica." msgstr "建立測試抄本時,不應提供 volume_type。" msgid "No volumes found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到磁區。" msgid "No weighed hosts available" msgstr "沒有加權主機可用" #, python-format msgid "Not a valid string: %s" msgstr "不是有效的字串:%s" msgid "Not a valid value for NaElement." msgstr "不是 NaElement 的有效值。" #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "找不到適合磁區 %s 的資料儲存庫。" msgid "Not an rbd snapshot" msgstr "不是 rbd Snapshot" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "未獲映像檔 %(image_id)s 的授權。" msgid "Not authorized." msgstr "未被授權" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "後端 (%(backend)s) 上空間不足" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "ZFS 共用中的儲存體空間不足,無法執行此作業。" msgid "Not stored in rbd" msgstr "未儲存在 rbd 中" msgid "Nova returned \"error\" status while creating snapshot." msgstr "建立 Snapshot 時,Nova 傳回了「錯誤」狀態。" msgid "Null response received from CloudByte's list filesystem." msgstr "從 CloudByte 的清單檔案系統中接收到空值回應。" msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "從 CloudByte 的清單 iscsi 鑑別群組接收到空值回應。" msgid "Null response received from CloudByte's list iscsi initiators." msgstr "從 CloudByte 的清單 iscsi 起始器中接收到空值回應。" msgid "Null response received from CloudByte's list volume iscsi service." msgstr "從 CloudByte 的清單磁區 iscsi 服務中接收到空值回應。" #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "在 CloudByte 儲存體中建立磁區 [%s] 時接收到空值回應。" #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "在 CloudByte 儲存體中刪除磁區 [%s] 時接收到空值回應。" #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "在 CloudByte 儲存體中查詢 [%(operation)s] 型工作[%(job)s] 時接收到空值回應。" msgid "Number of retries if connection to ceph cluster failed." msgstr "與 ceph 叢集的連線失敗時的重試次數。" msgid "Object Count" msgstr "物件計數" msgid "Object Version" msgstr "物件版本" msgid "Object is not a NetApp LUN." msgstr "物件不是 NetApp LUN。" #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "在「延伸」作業上,將磁區新增至下列複合磁區時發生錯誤:%(volumename)s。" msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "其中一個 Cinder 磁區服務太舊,無法接受此類要求。您要執行混合 Liberty-Mitaka " "Cinder 磁區嗎?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "找不到來自主機的其中一個必需輸入、埠或綱目。" #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "只能每隔 %(unit_string)s 向 %(uri)s 提出 %(value)s 個 %(verb)s 要求。" msgid "Only one limit can be set in a QoS spec." msgstr "在一個服務品質規格中,只能設定一個限制。" msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "只容許具備已限定為原生母項或 root 專案範圍之記號的使用者來查看其子項配額。" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "只能取消管理由 OpenStack 進行管理的磁區。" #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "作業失敗,狀態 = %(status)s。完整傾出:%(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "不受支援的作業:%(operation)s。" msgid "Option gpfs_images_dir is not set correctly." msgstr "未正確設定選項 gpfs_images_dir。" msgid "Option gpfs_images_share_mode is not set correctly." msgstr "未正確設定選項 gpfs_images_share_mode。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "未正確設定選項 gpfs_mount_point_base。" msgid "Option map (cls._map) is not defined." msgstr "未定義選項對映 (cls._map)。" #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "原始 %(res)s %(prop)s 必須是值 %(vals)s 的其中之一" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "置換 HTTPS 埠以連接 Blockbridge API 伺服器。" #, python-format msgid "ParseException: %s" msgstr "ParseException:%s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "分割區名稱為「無」,請在索引鍵中設定 smartpartition:partitionname。" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "鑑別需要密碼或 SSH 私密金鑰:請設定san_password 或 san_private_key 選項。" msgid "Path to REST server's certificate must be specified." msgstr "必須指定 REST 伺服器憑證的路徑。" #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "請預先建立 %(pool_list)s 儲存區!" #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "請預先在儲存區 %(pool)s 中建立 %(tier_levels)s 層級!" msgid "Please re-run cinder-manage as root." msgstr "請以 root 使用者身分重新執行 cinder-manage。" msgid "Please specify a name for QoS specs." msgstr "請為服務品質規格指定名稱。" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "原則不容許執行 %(action)s。" #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "找不到儲存區 %(poolNameInStr)s。" #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "儲存區 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "找不到 volume['host'] %(host)s 的儲存區。" #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "volume['host'] 中的儲存區失敗:%(ex)s。" msgid "Pool is not available in the volume host field." msgstr "在磁區主機欄位中無法使用儲存區。" msgid "Pool is not available in the volume host fields." msgstr "在磁區主機欄位中無法使用儲存區。" #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "在網域 %(domain)s 中找不到名稱為 %(pool)s 的儲存區。" #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "在網域 %(domain_id)s 中找不到名稱為 %(pool_name)s 的儲存區。" #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "儲存區 %(poolName)s 未與下列 FAST 原則的儲存體層級相關聯:%(fastPolicy)s。" #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName 必須在檔案 %(fileName)s 中。" #, python-format msgid "Pools %s does not exist" msgstr "儲存區 %s 不存在" msgid "Pools name is not set." msgstr "未設定儲存區名稱。" #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "主要副本狀態:%(status)s,已同步:%(sync)s。" msgid "Project ID" msgstr "專案識別號" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "未針對巢狀配額正確設定專案配額:%(reason)s。" msgid "Protection Group not ready." msgstr "保護群組尚未備妥。" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "通訊協定 %(storage_protocol)s 不受儲存體系列 %(storage_family)s 支援。" msgid "Provided backup record is missing an id" msgstr "所提供的備份記錄遺漏了 ID" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "提供的 Snapshot 狀態 %(provided)s,不為 Snapshot(狀態 = %(current)s)所接" "受。" #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "找不到 OpenStack 磁區 [%s] 的提供者資訊 w.r.t CloudByte 儲存體。" #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder 驅動程式失敗:%(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "服務品質規格 %(specs_id)s 已存在。" #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "服務品質規格 %(specs_id)s 仍與實體相關聯。" #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "服務品質配置錯誤。%s 必須大於 0。" #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "必須為 IOTYPE 指定服務品質原則及另一個 qos_specs,服務品質原則:" "%(qos_policy)s。" #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "必須為 IOTYPE 指定服務品質原則:0、1 或 2,服務品質原則:%(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "" "服務品質原則 upper_limit 與 lower_limit 相衝突,服務品質原則:" "%(qos_policy)s。" #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "服務品質規格 %(specs_id)s 沒有索引鍵為 %(specs_key)s 的規格。" msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "服務品質規格在此儲存體系列和 ONTAP 版本上不受支援。" msgid "Qos specs still in use." msgstr "服務品質規格仍在使用中。" msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "「依服務查詢」參數已淘汰。請改用二進位參數。" msgid "Query resource pool error." msgstr "查詢資源儲存區時發生錯誤。" #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "配額 %s 限制必須等於或大於現有資源數目。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "找不到配額類別 %(class_name)s。" msgid "Quota could not be found" msgstr "找不到配額" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "資源已超出配額:%(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "已超出配額:錯誤碼 = %(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "找不到專案 %(project_id)s 的配額。" #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "針對資源 '%(res)s',專案 '%(proj)s' 的配額限制無效:限制 %(limit)d 小於使用中" "的值 %(used)d" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "找不到配額預約 %(uuid)s。" #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "找不到專案 %(project_id)s 的配額用量。" #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD 差異 op 失敗 -(ret = %(ret)s 標準錯誤 = %(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "必須指定 REST 伺服器 IP。" msgid "REST server password must by specified." msgstr "必須指定 REST 伺服器密碼。" msgid "REST server username must by specified." msgstr "必須指定 REST 伺服器使用者名稱。" msgid "RPC Version" msgstr "RPC 版本" msgid "RPC server response is incomplete" msgstr "RPC 伺服器回應不完整" msgid "Raid did not have MCS Channel." msgstr "Raid 沒有 MCS 通道。" #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "達到由配置選項 max_luns_per_storage_group 設定的限制。用於將 %(vol)s 新增至" "「儲存體群組」%(sg)s 中的作業遭到拒絕。" #, python-format msgid "Received error string: %s" msgstr "接收到錯誤字串:%s" msgid "Reference must be for an unmanaged snapshot." msgstr "參照必須是針對未受管理的 Snapshot 進行的。" msgid "Reference must be for an unmanaged virtual volume." msgstr "參照必須針對未受管理的虛擬磁區。" msgid "Reference must be the name of an unmanaged snapshot." msgstr "參照必須是受管理之 Snapshot 的名稱。" msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "參照必須是未受管理虛擬磁區的磁區名稱。" msgid "Reference must contain either source-id or source-name element." msgstr "參照必須包含 source-id 或 source-name 元素。" msgid "Reference must contain either source-name or source-id element." msgstr "參照必須包含 source-name 或 source-id 元素。" msgid "Reference must contain source-id or source-name element." msgstr "參照必須包含 source-id 或 source-name 元素。" msgid "Reference must contain source-id or source-name key." msgstr "參照必須包含 source-id 或 source-name 索引鍵。" msgid "Reference must contain source-id or source-name." msgstr "參照必須包含 source-id 或 source-name。" msgid "Reference must contain source-id." msgstr "參照必須包含 source-id。" msgid "Reference must contain source-name element." msgstr "參照必須包含 source-name 元素。" msgid "Reference must contain source-name or source-id." msgstr "參照必須包含 source-name 或 source-id。" msgid "Reference must contain source-name." msgstr "參照必須包含 source-name。" msgid "Reference to volume to be managed must contain source-name." msgstr "對要管理之磁區的參照必需包含 source-name。" #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "對要管理之磁區 %s 的參照必需包含 source-name。" #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "正在拒絕移轉磁區 ID:%(id)s。請檢查您的配置,因為來源和目的地是相同的磁區群" "組:%(name)s。" msgid "Remote pool cannot be found." msgstr "找不到遠端儲存區。" msgid "Remove CHAP error." msgstr "移除 CHAP 時發生錯誤。" msgid "Remove fc from host error." msgstr "從主機中移除 FC 時發生錯誤。" msgid "Remove host from array error." msgstr "從陣列中移除主機時發生錯誤。" msgid "Remove host from hostgroup error." msgstr "從主機群組中移除主機時發生錯誤。" msgid "Remove iscsi from host error." msgstr "從主機中移除 iSCSI 時發生錯誤。" msgid "Remove lun from QoS error." msgstr "從服務品質中移除 LUN 時發生錯誤。" msgid "Remove lun from cache error." msgstr "從快取中移除 LUN 時發生錯誤。" msgid "Remove lun from partition error." msgstr "從分割區中移除 LUN 時發生錯誤。" msgid "Remove port from port group error." msgstr "從埠群組中移除埠時發生錯誤。" msgid "Remove volume export failed." msgstr "移除磁區匯出失敗。" msgid "Rename lun on array error." msgstr "重新命名陣列上的 LUN 時發生錯誤。" msgid "Rename snapshot on array error." msgstr "重新命名陣列上的 Snapshot 時發生錯誤。" #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "將 %(name)s 抄寫至 %(ssn)s 失敗。" #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「抄寫服務功能」。" #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「抄寫服務」。" msgid "Replication is not enabled" msgstr "未啟用抄寫" msgid "Replication is not enabled for volume" msgstr "尚未對磁區啟用抄寫" msgid "Replication not allowed yet." msgstr "尚未容許抄寫。" #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "磁區的抄寫狀態必須是作用中或作用中已停止,但是現行狀態為:%s" #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "磁區的抄寫狀態必須是非作用中、作用中已停止或錯誤,但是現行狀態為:%s" msgid "Request body and URI mismatch" msgstr "要求內文與 URI 不符" msgid "Request body contains too many items" msgstr "要求內文包含太多項目" msgid "Request body contains too many items." msgstr "要求內文包含太多項目。" msgid "Request body empty" msgstr "要求內文是空的" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "傳送至 Datera 叢集的要求,傳回了不正確的狀態:%(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所要求的備份超出容許的備份 GB 數配額。要求 %(requested)s G,配額為 %(quota)s " "G,並且已耗用 %(consumed)s G。" #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所要求的磁區或 Snapshot 超出容許的 %(name)s 配額。要求 %(requested)s G,配額" "為 %(quota)s G,並且已耗用 %(consumed)s G。" #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "所要求的磁區大小 %(size)d 大於所容許的上限:%(limit)d。" msgid "Required configuration not found" msgstr "找不到必要的配置" #, python-format msgid "Required flag %s is not set" msgstr "未設定必要旗標 %s" msgid "Requires an NaServer instance." msgstr "需要 NaServer 實例。" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "已中止重設備份狀態,目前配置的備份服務[%(configured_service)s] 不是建立此備份" "所使用的備份服務 [%(backup_service)s]。" #, python-format msgid "Resizing clone %s failed." msgstr "調整副本 %s 的大小失敗。" msgid "Resizing image file failed." msgstr "調整映像檔大小時失敗。" msgid "Resource could not be found." msgstr "找不到資源。" msgid "Resource not ready." msgstr "資源未備妥。" #, python-format msgid "Response error - %s." msgstr "回應錯誤 - %s。" msgid "Response error - The storage-system is offline." msgstr "回應錯誤 - 儲存體系統在線上。" #, python-format msgid "Response error code - %s." msgstr "回應錯誤碼 - %s。" msgid "RestURL is not configured." msgstr "未配置 RestURL。" #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "已中止還原備份,預期磁區狀態 %(expected_status)s,但取得 %(actual_status)s。" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "已中止還原備份,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" "用的備份服務 [%(backup_service)s]。" #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "已中止還原備份:預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "針對所提供的 Cinder Snapshot,已擷取不同數量的 SolidFire 磁區。已擷取數目:" "%(ret)s,所需數目:%(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "針對所提供的 Cinder 磁區,已擷取不同數量的 SolidFire 磁區。已擷取數目:" "%(ret)s,所需數目:%(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "已超出指令 %s 的重試次數" msgid "Retryable SolidFire Exception encountered" msgstr "發生「可重試的 SolidFire 異常狀況」" msgid "Retype cannot change encryption requirements." msgstr "Retype 無法變更加密需求。" #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "Retype 無法變更使用中磁區的前端服務品質規格:%s。" msgid "Retype requires migration but is not allowed." msgstr "Retype 需要移轉,但系統不容許。" #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "對磁區 %(volumeName)s 進行的回復失敗。請與系統管理者聯絡,以手動將磁區傳回至" "FAST 原則 %(fastPolicyName)s 的預設儲存體群組。" #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "正在透過刪除 %(volumeName)s 來將其回復。" #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "不容許使用低於 %s 的 VMware vCenter 版本來執行 Cinder。" msgid "SAN product is not configured." msgstr "未配置 SAN 產品。" msgid "SAN protocol is not configured." msgstr "未配置 SAN 通訊協定。" #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "SMBFS 配置 'smbfs_oversub_ratio' 無效。必須大於 0:%s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "SMBFS 配置 'smbfs_used_ratio' 無效。必須大於 0 且小於或等於 1.0:%s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s 處的 SMBFS 配置檔不存在。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "未設定 SMBFS 配置檔 (smbfs_shares_config)。" #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "嘗試 '%(total_attempts)r' 次之後 SSH 指令仍失敗:'%(command)s'" #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "SSH 指令失敗,錯誤為:'%(err)s',指令為:'%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "偵測到 SSH 指令注入:%(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "%(fabric)s 進行 SSH 連線失敗,發生錯誤:%(err)s" #, python-format msgid "SSL Certificate expired on %s." msgstr "SSL 憑證已在 %s 過期。" #, python-format msgid "SSL error: %(arg)s." msgstr "SSL 錯誤:%(arg)s。" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "找不到「排程器主機過濾器」%(filter_name)s。" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "找不到「排程器主機稱量程式」%(weigher_name)s。" #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "次要副本狀態:%(status)s,已同步:%(sync)s,同步進度:%(progress)s%%。" #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "次要 ID 不能與主要陣列相同,backend_id = %(secondary)s。" #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber 必須在檔案 %(fileName)s 中。" #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "已移除主機 %(host)s 上的服務 %(service)s。" #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "在主機 %(host)s 上找不到服務 %(service_id)s。" #, python-format msgid "Service %(service_id)s could not be found." msgstr "找不到服務 %(service_id)s。" #, python-format msgid "Service %s not found." msgstr "找不到服務 %s。" msgid "Service is too old to fulfil this request." msgstr "服務太舊,無法滿足此要求。" msgid "Service is unavailable at this time." msgstr "此時無法使用服務。" msgid "Service not found." msgstr "找不到服務。" msgid "Set pair secondary access error." msgstr "設定配對次要存取時發生錯誤。" msgid "Sets thin provisioning." msgstr "設定精簡供應。" msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "此儲存體系列及 ONTAP 版本不支援設定 LUN 服務品質原則群組。" msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "此儲存體系列及 ONTAP 版本不支援設定檔案服務品質原則群組。" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "由於格式無效,已忽略共用項目 %s。格式必須是 address:/export。請檢查 nas_ip " "及 nas_share_path 設定。" #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "Cinder 磁區服務無法寫入 %(dir)s 處的共用項目。將不支援 Snapshot 作業。" #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Sheepdog I/O 錯誤,指令為:\"%s\"。" msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "顯示作業只能對將使用者限定範圍之相同專案階層中的專案執行。" msgid "Size" msgstr "容量" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "找不到磁區 %s 的大小,無法安全刪除。" #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "大小為 %(image_size)d GB,無法容納大小為%(volume_size)d GB 的磁區。" #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "所指定映像檔的大小 %(image_size)s GB 大於磁區大小%(volume_size)s GB。" #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "在等待 Snapshot %(id)s 變成可用時,已經要求將其刪除。可能發出了並行要求。" #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "在連鎖刪除期間,發現 Snapshot %(id)s 處於 %(state)s 狀態,而不是「刪除中」狀" "態。" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "找不到 Snapshot %(snapshot_id)s。" #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "Snapshot %(snapshot_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "Snapshot %s 不得為一致性群組的一部分。" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "陣列上不存在 Snapshot '%s'。" #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "無法建立 Snapshot,因為磁區 %(vol_id)s 無法使用,現行磁區狀態:" "%(vol_status)s。" msgid "Snapshot cannot be created while volume is migrating." msgstr "移轉磁區時無法建立 Snapshot。" msgid "Snapshot of secondary replica is not allowed." msgstr "不容許使用次要抄本的 Snapshot。" #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "狀態 %s 不支援取得磁區的 Snapshot。" #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "未在任何位置部署 Snapshot 資源 \"%s\"?" msgid "Snapshot size must be multiple of 1 GB." msgstr "Snapshot 大小必須是 1 GB 的倍數。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "Snapshot 狀態 %(cur)s,不為 update_snapshot_status 所接受" msgid "Snapshot status must be \"available\" to clone." msgstr "Snapshot 狀態必須為「可用」才能複製。" #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "要備份的 Snapshot 必須可用,但現行狀態卻是 \"%s\"。" #, python-format msgid "Snapshot with id of %s could not be found." msgstr "找不到 ID 為 %s 的 Snapshot。" #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "Snapshot = '%(snap)s' 不存在於基本映像檔 = '%(base)s' 中 - 正在中止增量備份" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "此磁區格式不支援 Snapshot:%s" #, python-format msgid "Socket error: %(arg)s." msgstr "Socket 錯誤:%(arg)s。" msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder 驅動程式異常狀況" msgid "Sort direction array size exceeds sort key array size." msgstr "排序方向陣列大小超過排序鍵陣列大小。" msgid "Source CG is empty. No consistency group will be created." msgstr "來源 CG 是空的。將不會建立一致性群組。" msgid "Source host details not found." msgstr "找不到來源主機詳細資料。" msgid "Source volume device ID is required." msgstr "需要來源磁區裝置 ID。" msgid "Source volume not mid-migration." msgstr "移轉期間找不到來源磁區。" #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "在目標軟體驅動裝置上,找不到主機 IP/名稱為 %s 的來源(用於支援後端的磁區移" "轉),將使用預設移轉繼續執行。" msgid "SpaceInfo returned byarray is invalid" msgstr "陣列傳回的 SpaceInfo 無效" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "指定要對映至磁區 %(vol)s 的主機位於不受支援的主機群組%(group)s 中。" msgid "Specified logical volume does not exist." msgstr "指定的邏輯磁區不存在。" #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "找不到 ID 為 %s 的指定 Snapshot 群組。" msgid "Specify a password or private_key" msgstr "指定密碼或 private_key" msgid "Specify san_password or san_private_key" msgstr "指定 san_password 或 san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "指定磁區類型名稱、說明、is_public 或這些的組合。" msgid "Split pair error." msgstr "分割配對時發生錯誤。" msgid "Split replication failed." msgstr "分割抄寫失敗。" msgid "Start LUNcopy error." msgstr "啟動 LUNcopy 時發生錯誤。" msgid "State" msgstr "狀態" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "節點的狀態是錯誤的。現行狀態為 %s。" msgid "Status" msgstr "狀態" msgid "Stop snapshot error." msgstr "停止 Snapshot 時發生錯誤。" #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「儲存體配置服務」。" #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「儲存體硬體 ID 管理服務」。" #, python-format msgid "Storage Profile %s not found." msgstr "找不到儲存體設定檔 %s。" #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "在 %(storageSystemName)s 上找不到「儲存體重新定位服務」。" #, python-format msgid "Storage family %s is not supported." msgstr "儲存體系列 %s 不受支援。" #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "未順利刪除儲存體群組 %(storageGroupName)s" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "未偵測到儲存體主機 %(svr)s,請驗證名稱" msgid "Storage pool is not configured." msgstr "未配置儲存區。" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "找不到儲存體設定檔 %(storage_profile)s。" msgid "Storage resource could not be found." msgstr "找不到儲存體資源。" msgid "Storage system id not set." msgstr "未設定儲存體系統 ID。" #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "找不到儲存區 %(poolNameInStr)s 的儲存體系統。" msgid "Storage-assisted migration failed during manage volume." msgstr "在管理磁區期間,儲存體協助型移轉失敗。" #, python-format msgid "StorageSystem %(array)s is not found." msgstr "找不到儲存體系統 %(array)s。" #, python-format msgid "String with params: %s" msgstr "帶參數的字串:%s" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "針對資源 '%(res)s',子項總用量 '%(sum)s' 大於專案 '%(proj)s' 的可用配額 " "'%(free)s'。請降低下列一個以上專案的限制或用量:'%(child_ids)s'" msgid "Switch over pair error." msgstr "切換配對時發生錯誤。" msgid "Sync pair error." msgstr "同步配對時發生錯誤。" msgid "Synchronizing secondary volume to primary failed." msgstr "將次要磁碟區同步至主要磁碟區失敗。" #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "找到具有不正確密碼狀態 %(pass_status)s 的系統 %(id)s。" #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "發現系統 %(id)s 的狀態 %(status)s 不正確。" msgid "System does not support compression." msgstr "系統不支援壓縮。" msgid "System is busy, retry operation." msgstr "系統忙碌中,請重試作業。" #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "在帳戶 [%(account)s] 的 CloudByte 儲存體中找不到 TSM [%(tsm)s]。" msgid "Target volume type is still in use." msgstr "目標磁區類型仍在使用中。" #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "範本樹狀結構不符;正在將從屬 %(slavetag)s 新增至主要 %(mastertag)s" #, python-format msgid "Tenant ID: %s does not exist." msgstr "租戶 ID %s 不存在。" msgid "Terminate connection failed" msgstr "終止連線失敗" msgid "Terminate connection unable to connect to backend." msgstr "終止連線無法連接至後端。" #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "終止磁區連線失敗:%(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "找不到要抄寫的 %(type)s %(id)s 來源。" msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "'sort_key' 和 'sort_dir' 參數已遭淘汰,因此無法與 'sort' 參數搭配使用。" msgid "The EQL array has closed the connection." msgstr "EQL 陣列已關閉連線。" #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS 檔案系統 %(fs)s 不是必要的版次。現行層次是 %(cur)s,必須至少是 %(min)s。" msgid "The IP Address was not found." msgstr "找不到 IP 位址。" #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "WebDAV 要求失敗。原因:%(msg)s,回覆碼/原因:%(code)s,來源磁區:%(src)s,目" "的地磁區:%(dst)s,方法:%(method)s。" msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "上述錯誤可能會顯示尚未建立資料庫。\n" "請使用 'cinder-manage db sync' 來建立資料庫,然後再執行此指令。" #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "陣列不支援 SLO %(slo)s 及工作量 %(workload)s 的儲存區設定。請檢查陣列中的有" "效 SLO 及工作量。" msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "在其中建立該磁區的後端尚未啟用抄寫。" #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "指令 %(cmd)s 失敗。(ret:%(ret)s,標準輸出:%(out)s,標準錯誤:%(err)s)" msgid "The copy should be primary or secondary" msgstr "副本應為主要或次要副本" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "無法完成建立邏輯裝置。(LDEV:%(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "裝飾方法必須接受磁區或 Snapshot 物件" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "路徑 %(path)s 中的裝置無法使用:%(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "結束時間 (%(end)s) 必須晚於開始時間 (%(start)s)。" #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec %s 無效。" #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "額外規格 %(extraspec)s 無效。" #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "無法刪除已失效接手的磁區:%s" #, python-format msgid "The following elements are required: %s" msgstr "需要下列元素:%s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "下列移轉具有降級,這是不容許的:\n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "無法新增主機群組或 iSCSI 目標。" msgid "The host group or iSCSI target was not found." msgstr "找不到主機群組或 iSCSI 目標。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "主機尚未備妥以進行失效回復。請重新同步磁區,並回復 3PAR 後端上的抄寫。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "主機尚未備妥以進行失效回復。請重新同步磁區,並回復 LeftHand 後端上的抄寫。" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "主機尚未備妥以進行失效回復。請重新同步磁區,並回復 Storwize 後端上的抄寫。" #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 使用者 %(user)s 不存在。" #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "匯入的 LUN %(lun_id)s 位於未受下列主機管理的儲存區 %(lun_pool)s 中:" "%(host)s。" msgid "The key cannot be None." msgstr "金鑰不能是「無」。" #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "已經刪除指定 %(type)s %(id)s 的邏輯裝置。" #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "方法 %(method)s 已逾時。(逾時值:%(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "未實作方法 update_migrated_volume。" #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "裝載 %(mount_path)s 不是有效的 Quobyte USP 磁區。錯誤:%(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "儲存體後端的參數。(config_group:%(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "增量備份的母項備份必須可用。" #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "所提供的 Snapshot '%s' 不是所提供之磁區的 Snapshot。" msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "對後端中磁區的參照應該具有下列格式:file_system/volume_name(磁區名稱不得包" "含 '/')" #, python-format msgid "The remote retention count must be %s or less." msgstr "遠端保留計數必須小於或等於 %s。" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "在磁區類型 extra_specs 中,未正確配置抄寫模式。如果 replication:mode 是定期" "的,則 replication:sync_period 也必須予以指定且介於 300 和 31622400 秒之間。" #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "抄寫同步週期必須至少是 %s 秒。" #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "所要求的大小 %(requestedSize)s 與產生的下列大小不相同:%(resultSize)s。" #, python-format msgid "The resource %(resource)s was not found." msgstr "找不到資源 %(resource)s。" msgid "The results are invalid." msgstr "結果無效。" #, python-format msgid "The retention count must be %s or less." msgstr "保留計數必須小於或等於 %s。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "當磁區處於維護模式時,無法建立 Snapshot。" #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "來源磁區 %s 不在現行主機管理的儲存區中。" msgid "The source volume for this WebDAV operation not found." msgstr "找不到此 WebDAV 作業的來源磁區。" #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "來源磁區類型 '%(src)s' 與目的地磁區類型'%(dest)s' 不同。" #, python-format msgid "The source volume type '%s' is not available." msgstr "來源磁區類型 '%s' 無法使用。" #, python-format msgid "The specified %(desc)s is busy." msgstr "指定的 %(desc)s 忙碌中。" #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "指定的 LUN 不屬於給定的儲存區:%s。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "無法管理指定的 LDEV %(ldev)s。不得對映該 LDEV。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "無法管理指定的 LDEV %(ldev)s。不得對該 LDEV進行配對。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "無法管理指定的 LDEV %(ldev)s。LDEV 大小必須為GB 的倍數。" #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "無法管理指定的 LDEV %(ldev)s。磁區類型必須為 DP-VOL。" #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "指定的作業不受支援。磁區大小必須與來源 %(type)s 的大小相同。(磁區:" "%(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "所指定的 vdisk 已對映至主機。" msgid "The specified volume is mapped to a host." msgstr "指定的磁區已對映至主機。" #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "%s 的儲存體陣列密碼不正確,請更新所配置的密碼。" #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "可以使用儲存體後端。(config_group:%(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "儲存裝置不支援 %(prot)s。請配置該裝置以支援 %(prot)s,或者切換至使用其他通訊" "協定的驅動程式。" #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "已分段的 meta 計數 %(memberCount)s 對下列磁區而言太小:%(volumeName)s,大小" "為 %(volumeSize)s。" #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "磁區/Snapshot %(id)s 的 meta 資料類型 %(metadata_type)s無效。" #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "無法延伸磁區 %(volume_id)s。磁區類型必須是「一般」。" #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "無法取消管理磁區 %(volume_id)s。磁區類型必須為%(volume_type)s。" #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "已順利管理磁區 %(volume_id)s。(LDEV:%(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "已順利取消管理磁區 %(volume_id)s。(LDEV:%(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "找不到要對映的磁區 %(volume_id)s。" msgid "The volume cannot accept transfer in maintenance mode." msgstr "磁區無法在維護模式下接受傳送。" msgid "The volume cannot be attached in maintenance mode." msgstr "無法在維護模式下連接磁區。" msgid "The volume cannot be detached in maintenance mode." msgstr "無法在維護模式下分離磁區。" msgid "The volume cannot be updated during maintenance." msgstr "維護期間,無法更新磁區。" msgid "The volume connection cannot be initialized in maintenance mode." msgstr "無法在維護模式下起始設定磁區連線。" msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "磁區驅動程式需要連接器中的 iSCSI 起始器名稱。" msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "磁區目前在 3PAR 上正忙,因此此時無法將其刪除。您可以稍後重試。" msgid "The volume label is required as input." msgstr "需要磁區標籤作為輸入。" msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "當磁區處於維護模式時,無法刪除磁區 meta 資料。" msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "當磁區處於維護模式時,無法更新磁區 meta 資料。" #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "沒有資源可供使用。(資源:%(resource)s)" msgid "There are no valid ESX hosts." msgstr "沒有有效的 ESX 主機。" #, python-format msgid "There are no valid datastores attached to %s." msgstr "%s 沒有連接有效的資料儲存庫。" msgid "There are no valid datastores." msgstr "沒有有效的資料儲存庫。" #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "未指定 %(param)s。指定的儲存體對於管理磁區必不可少。" msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "未指定 LDEV。指定的 LDEV對於管理磁區必不可少。" msgid "There is no metadata in DB object." msgstr "資料庫物件中沒有 meta 資料。" #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "沒有共用可以管理 %(volume_size)sG" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "沒有共用項目可以管理 %(volume_size)s G。" #, python-format msgid "There is no such action: %s" msgstr "沒有這樣的動作:%s" msgid "There is no virtual disk device." msgstr "沒有虛擬磁碟裝置。" #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "將磁區新增至遠端複製群組時發生錯誤:%s。" #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "建立 cgsnapshot 時發生錯誤:%s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "建立遠端複製群組時發生錯誤:%s。" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "設定遠端複製群組的同步週期時發生錯誤:%s。" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "在 3PAR 陣列上設定遠端複製群組時發生錯誤:('%s')。該磁區將不作為抄寫類型予以" "辨識。" #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "在 LeftHand 陣列上設定遠端排程時發生錯誤:('%s')。該磁區將不作為抄寫類型予以" "辨識。" #, python-format msgid "There was an error starting remote copy: %s." msgstr "啟動遠端複製時發生錯誤:%s。" #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "未配置任何 Gluster 配置檔 (%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "未配置任何 NFS 配置檔 (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "沒有已配置的 Quobyte 磁區 (%s)。範例:quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "此版本的 LVM 不支援精簡供應。" msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "未安裝「精簡供應啟用程式」。無法建立精簡磁區" msgid "This driver does not support deleting in-use snapshots." msgstr "此驅動程式不支援刪除使用中 Snapshot。" msgid "This driver does not support snapshotting in-use volumes." msgstr "此驅動程式不支援對使用中磁區建立 Snapshot。" msgid "This request was rate-limited." msgstr "此要求存在頻率限制。" #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "此系統平台 (%s) 不受支援。此驅動程式僅支援Win32 平台。" #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "找不到 %(storageSystemName)s 的「層級原則服務」。" #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "等待 Nova 更新以建立 Snapshot %s 時發生逾時。" #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "等待 Nova 更新以刪除 Snapshot %(id)s 時發生逾時。" msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "連接至 ceph 叢集時使用的逾時值(以秒為單位)。如果值< 0,則不設定任何逾時值," "並使用預設 librados 值。" #, python-format msgid "Timeout while calling %s " msgstr "呼叫 %s 時逾時" #, python-format msgid "Timeout while requesting %(service)s API." msgstr "要求 %(service)s API 時發生逾時。" #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "要求後端 %(service)s 中的功能時逾時。" #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "找不到傳送 %(transfer_id)s。" #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "傳送 %(transfer_id)s:磁區 ID %(volume_id)s 處於非預期的狀態%(status)s,預期" "狀態為 awaiting-transfer" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "正在嘗試將備份 meta 資料從 ID %(meta_id)s 匯入到備份 %(id)s 中。" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "調整磁區作業已停止,因為該作業已完成:volume_name=%(volume_name)s,task-" "status=%(status)s。" #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "類型 %(type_id)s 已經與另一個服務品質規格%(qos_specs_id)s 產生關聯" msgid "Type access modification is not applicable to public volume type." msgstr "類型存取修訂不適用公用磁區類型。" msgid "Type cannot be converted into NaElement." msgstr "無法將類型轉換為 NaElement。" #, python-format msgid "TypeError: %s" msgstr "TypeError:%s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s 同時位於新增和移除磁區清單中。" #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "無法存取磁區 %s 的 Storwize 後端。" msgid "Unable to access the backend storage via file handle." msgstr "無法透過檔案控點來存取後端儲存體。" #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "無法透過路徑 %(path)s 來存取後端儲存體。" #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "無法將 Cinder 主機新增至空間 %(space)s 的應用程式主機" #, python-format msgid "Unable to complete failover of %s." msgstr "無法完成 %s 的失效接手。" msgid "Unable to connect or find connection to host" msgstr "無法連接至主機或找不到與主機的連線" msgid "Unable to create Barbican Client without project_id." msgstr "無法建立不含 project_id 的 Barbican 用戶端。" #, python-format msgid "Unable to create consistency group %s" msgstr "無法建立一致性群組 %s" msgid "Unable to create lock. Coordination backend not started." msgstr "無法建立鎖定。協調後端未啟動。" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "無法建立或取得下列 FAST 原則的預設儲存體群組:%(fastPolicyName)s。" #, python-format msgid "Unable to create replica clone for volume %s." msgstr "無法建立磁區 %s 的抄本副本。" #, python-format msgid "Unable to create the relationship for %s." msgstr "無法建立 %s 的關係。" #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "無法從 %(snap)s 建立磁區 %(name)s。" #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "無法從 %(vol)s 建立磁區 %(name)s。" #, python-format msgid "Unable to create volume %s" msgstr "無法建立磁區 %s" msgid "Unable to create volume. Backend down." msgstr "無法建立磁區。後端已關閉。" #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "無法刪除一致性群組 Snapshot %s" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "無法刪除 Snapshot %(id)s,狀態:%(status)s。" #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "無法刪除磁區 %s 上的 Snapshot 原則。" #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "無法刪除磁區 %(vol)s 的目標磁區。異常狀況:%(err)s。" msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "無法分離磁區。磁區狀態必須是「使用中」,並且 attach_status必須是「已連接」才" "能進行分離。" #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "無法判定來自所提供之次要項目的 secondary_array:%(secondary)s。" #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "針對 Snapshot %(id)s,無法判定純度中的 Snapshot 名稱。" msgid "Unable to determine system id." msgstr "無法判定系統 ID。" msgid "Unable to determine system name." msgstr "無法判定系統名稱。" #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "無法使用純度 REST API 版本 %(api_version)s 來執行管理 Snapshot 作業需要 " "%(required_versions)s。" #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "無法使用純度 REST API %(api_version)s 版來執行抄寫作業,需要下列其中一個:" "%(required_versions)s。" msgid "Unable to enable replication and snapcopy at the same time." msgstr "無法同時啟用抄寫和 Snapcopy。" #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "無法與 Storwize 叢集 %s 建立夥伴關係。" #, python-format msgid "Unable to extend volume %s" msgstr "無法延伸磁區 %s" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "無法將磁區 %(id)s 失效接手至次要後端,因為抄寫關係無法切換:%(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "無法失效回復至「預設」,只有在失效接手完成之後才能執行此作業。" #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "無法失效接手至抄寫目標:%(reason)s。" msgid "Unable to fetch connection information from backend." msgstr "無法從後端提取連線資訊。" #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "無法從後端提取連線資訊:%(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "找不到名稱為 %s 的純度參照" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "找不到磁區群組:%(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "找不到失效接手目標,未配置次要目標。" msgid "Unable to find iSCSI mappings." msgstr "找不到 iSCSI 對映。" #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "找不到 ssh_hosts_key_file:%s" msgid "Unable to find system log file!" msgstr "找不到系統日誌檔!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "找不到可行的 pg Snapshot 以用於選定次要陣列上的失效接手:%(id)s。" #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "找不到來自已配置目標的可行次要陣列:%(targets)s。" #, python-format msgid "Unable to find volume %s" msgstr "找不到磁區 %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "無法取得檔案 '%s' 的區塊裝置" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "無法取得建立磁區所需的配置資訊:%(errorMessage)s。" msgid "Unable to get corresponding record for pool." msgstr "無法取得儲存區的對應記錄。" #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "無法取得空間 %(space)s 的相關資訊,請驗證叢集是否在執行中且已連接。" msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "無法取得此主機上 IP 位址的清單,請檢查許可權和網路。" msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "無法取得網域成員的清單,請檢查叢集是否在執行中。" msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "無法取得空間清單以建立新名稱。請驗證叢集是否在執行中。" #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "無法取得 backend_name 的統計資料:%s" msgid "Unable to get storage volume from job." msgstr "無法從工作中取得儲存磁區。" #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "無法取得 hardwareId %(hardwareIdInstance)s 的目標端點。" msgid "Unable to get the name of the masking view." msgstr "無法取得遮罩視圖的名稱。" msgid "Unable to get the name of the portgroup." msgstr "無法取得埠群組的名稱。" #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "無法取得磁區 %s 的抄寫關係。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "無法將磁區 %(deviceId)s 匯入 Cinder。該磁區是下列抄寫階段作業的來源磁區:" "%(sync)s。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "無法將磁區 %(deviceId)s 匯入 Cinder。外部磁區不在由現行 Cinder 主機管理的儲存" "區中。" #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "無法將磁區 %(deviceId)s 匯入 Cinder。磁區正在遮罩視圖%(mv)s。" #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "無法從 %(cert)s %(e)s 載入 CA。" #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "無法從 %(cert)s %(e)s 載入憑證。" #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "無法從 %(cert)s %(e)s 載入金鑰。" #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "在 SolidFire 裝置上找不到帳戶 %(account_name)s" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "找不到管理 IP 位址 '%s' 的 SVM" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "找不到指定的重播設定檔 %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "無法管理現有磁區。磁區 %(volume_ref)s 已經受管理。" #, python-format msgid "Unable to manage volume %s" msgstr "無法管理磁區 %s" msgid "Unable to map volume" msgstr "無法對映磁區" msgid "Unable to map volume." msgstr "無法對映磁區。" msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "無法剖析 XML 要求。請提供正確格式的 XML。" msgid "Unable to parse attributes." msgstr "無法剖析屬性。" #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "無法將磁區 %s 抄本提升為主要副本。沒有次要副本可用。" msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "無法重複使用未受 Cinder 管理且use_chap_auth=True 的主機。" msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "無法重複使用配置有不明 CHAP 認證的主機。" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "無法將磁區 %(existing)s 重新命名為 %(newname)s" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "無法擷取 ID 為 %s 的 Snapshot 群組。" #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "無法對 %(specname)s 執行 Retype 作業,預期接收現行及要求的 %(spectype)s 值。" "接收到的值:%(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "無法執行 Retype 動作:存在磁區 %s 的副本。如果執行 Retype 動作,則將超過2 份" "副本的限制。" #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "無法執行 Retype 動作:現行動作需要磁區複製,但當新類型為抄寫時,不容許這樣" "做。磁區為 %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "無法設定 %(vol)s 的鏡映模式抄寫。異常狀況:%(err)s。" #, python-format msgid "Unable to snap Consistency Group %s" msgstr "無法貼齊一致性群組 %s" msgid "Unable to terminate volume connection from backend." msgstr "無法從後端終止磁區連線。" #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "無法終止磁區連線:%(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "無法更新一致性群組 %s" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "無法更新類型,因為磁區 %(vol_id)s 的狀態 %(vol_status)s 不正確。磁區狀態必須" "為可用或使用中。" #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "無法在下列遮罩視圖中驗證起始器群組 %(igGroupName)s:%(maskingViewName)s。" msgid "Unacceptable parameters." msgstr "不可接受的參數值" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "對映 %(id)s 的對映狀態 %(status)s 不符合預期。屬性:%(attr)s。" #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "非預期的 CLI 回應:標頭/列不符。標頭:%(header)s、列:%(row)s。" #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "對映 %(id)s 的對映狀態 %(status)s 不符合預期。屬性:%(attr)s。" #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "非預期的輸出。預期 [%(expected)s],但卻接收到 [%(output)s]" msgid "Unexpected response from Nimble API" msgstr "來自 Nimble API 的非預期回應" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "來自 Tegile IntelliFlash API 的非預期回應" msgid "Unexpected status code" msgstr "非預期的狀態碼" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "針對 URL %(page)s,從使用通訊協定 %(protocol)s 的交換器 %(switch_id)s 傳回非" "預期的狀態碼。錯誤:%(error)s" msgid "Unknown Gluster exception" msgstr "不明的 Gluster 異常狀況" msgid "Unknown NFS exception" msgstr "不明的 NFS 異常狀況" msgid "Unknown RemoteFS exception" msgstr "不明的 RemoteFS 異常狀況" msgid "Unknown SMBFS exception." msgstr "不明的 SMBFS 異常狀況。" msgid "Unknown Virtuozzo Storage exception" msgstr "「不明 Virtuozzo 儲存體」異常狀況" msgid "Unknown action" msgstr "不明動作" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "如果要管理的磁區 %s 已經由 Cinder 進行管理,則是不明情況。正在終止管理磁區。" "請將 'cinder_managed' 自訂綱目內容新增至該磁區,並將它的值設為 False。或者," "將 Cinder 配置原則 'zfssa_manage_policy' 的值設為 'loose',以移除此限制。" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "如果要管理的磁區 %s 已經由 Cinder 進行管理,則是不明情況。正在終止管理磁區。" "請將 'cinder_managed' 自訂綱目內容新增至該磁區,並將它的值設為 False。或者," "將 Cinder 配置原則 'zfssa_manage_policy' 的值設為 'loose',以移除此限制。" #, python-format msgid "Unknown operation %s." msgstr "不明作業 %s。" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "不明或不支援的指令 %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "不明的通訊協定:%(protocol)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明的配額資源 %(unknown)s。" msgid "Unknown service" msgstr "不明的服務" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "不明的排序方向,必須為 'desc' 或 'asc'" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "不明的排序方向,必須為 'desc' 或 'asc'。" msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "取消管理與連鎖刪除選項是互斥的。" msgid "Unmanage volume not implemented." msgstr "未實作取消管理磁區。" msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "不容許取消管理「已失效接手」之磁區中的 Snapshot。" msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "不容許取消管理已失效接手之磁區中的 Snapshot。" #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "無法辨識的服務品質關鍵字:\"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "無法辨識的備用格式:%s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "無法辨識 read_deleted 值 '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "取消設定 gcs 選項:%s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "iscsiadm 不成功。異常狀況為 %(ex)s。" msgid "Unsupported Clustered Data ONTAP version." msgstr "不支援的叢集資料 ONTAP 版本。" msgid "Unsupported Content-Type" msgstr "不支援的內容類型" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "資料 ONTAP 版本不受支援。支援資料 ONTAP 7.3.1 版以及更高版本。" #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "不支援的備份 meta 資料版本 (%s)" msgid "Unsupported backup metadata version requested" msgstr "所要求的備份 meta 資料版本不受支援" msgid "Unsupported backup verify driver" msgstr "不受支援的備份驗證驅動程式" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "交換器 %s 上的韌體不受支援。請確保交換器正在執行韌體6.4 版或更高版本" #, python-format msgid "Unsupported volume format: %s " msgstr "不受支援的磁區格式:%s" msgid "Update QoS policy error." msgstr "更新服務品質原則時發生錯誤。" msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "更新和刪除配額作業只能由原生母項的管理者或CLOUD 管理者來執行。" msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "更新和刪除配額作業只能對將使用者限定範圍之相同專案階層中的專案執行。" msgid "Update list, doesn't include volume_id" msgstr "更新清單,不包含 volume_id" msgid "Updated At" msgstr "已更新" msgid "Upload to glance of attached volume is not supported." msgstr "不支援將所連接的磁區上傳至 Glance。" msgid "Use ALUA to associate initiator to host error." msgstr "使用 ALUA 將起始器關聯至主機時發生錯誤。" msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "使用 CHAP 將起始器關聯至主機時發生錯誤。請檢查 CHAP 使用者名稱及密碼。" msgid "User ID" msgstr "使用者識別號" msgid "User does not have admin privileges" msgstr "使用者並沒有管理者權力" msgid "User is not authorized to use key manager." msgstr "使用者未獲授權來使用金鑰管理程式。" msgid "User not authorized to perform WebDAV operations." msgstr "使用者未獲授權來執行 WebDAV 作業。" msgid "UserName is not configured." msgstr "未配置 UserName。" msgid "UserPassword is not configured." msgstr "未配置 UserPassword。" msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "第 2 版回復 - 磁區位於預設儲存體群組旁邊的另一個儲存體群組中。" msgid "V2 rollback, volume is not in any storage group." msgstr "第 2 版回復,磁區不在任何儲存體群組中。" msgid "V3 rollback" msgstr "第 3 版回復" msgid "VF is not enabled." msgstr "未啟用 VF。" #, python-format msgid "VV Set %s does not exist." msgstr "「VV 集」%s 不存在。" #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "服務品質規格的有效消費者為:%s" #, python-format msgid "Valid control location are: %s" msgstr "有效的控制項位置為:%s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "驗證磁區連線失敗(錯誤:%(err)s)。" #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "值 \"%(value)s\" 不適用於配置選項 \"%(option)s\"" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "%(param_string)s 的值 %(param)s 不是布林值。" msgid "Value required for 'scality_sofs_config'" msgstr "'scality_sofs_config' 需要值" #, python-format msgid "ValueError: %s" msgstr "ValueError:%s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "從 %(src)s 到 %(tgt)s 的對映未涉及到 vdisk %(name)s。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "API 不支援 %(req_ver)s 版。下限為 %(min_ver)s,上限為 %(max_ver)s。" #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s 無法依 ID 擷取物件。" #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s 不支援條件式更新。" #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "虛擬磁區 '%s' 不在陣列上。" #, python-format msgid "Vol copy job for dest %s failed." msgstr "對目的地 %s 執行磁區複製工作時失敗。" #, python-format msgid "Volume %(deviceID)s not found." msgstr "找不到磁區 %(deviceID)s。" #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "在陣列上找不到磁區 %(name)s。無法判定是否有已對映的磁區。" #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "已在 VNX 中建立磁區 %(name)s,但該磁區處於 %(state)s 狀態。" #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "無法在儲存區 %(pool)s 中建立磁區 %(vol)s。" #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "磁區 %(vol1)s 與 snapshot.volume_id %(vol2)s 不符。" #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "磁區 %(vol_id)s 狀態必須為可用或使用中,但是現行狀態為%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "磁區 %(vol_id)s 狀態必須為可用才能延伸,但是現行狀態為%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "磁區 %(vol_id)s 狀態必須為可用,才能更新唯讀旗標,但現行狀態為:" "%(vol_status)s。" #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "磁區 %(vol_id)s 狀態必須為可用,但是現行狀態為:%(vol_status)s。" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "找不到磁區 %(volume_id)s。" #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "磁區 %(volume_id)s 沒有索引鍵為%(metadata_key)s 的管理 meta 資料。" #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "磁區 %(volume_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "磁區 %(volume_id)s 目前已對映至不受支援的主機群組 %(group)s" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "磁區 %(volume_id)s 目前未對映至主機 %(host)s" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "磁區 %(volume_id)s 仍處於連接狀態,請先將磁區分離。" #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "磁區 %(volume_id)s 抄寫錯誤:%(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "磁區 %(volume_name)s 繁忙。" #, python-format msgid "Volume %s could not be created from source volume." msgstr "無法從來源磁區建立磁區 %s。" #, python-format msgid "Volume %s could not be created on shares." msgstr "無法在共用上建立磁區 %s。" #, python-format msgid "Volume %s could not be created." msgstr "無法建立磁區 %s。" #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "磁區 %s 不存在於 Nexenta SA 中" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "磁區 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" #, python-format msgid "Volume %s does not exist on the array." msgstr "磁區 %s 不在陣列上。" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "沒有為磁區 %s 指定 provider_location,正在跳過。" #, python-format msgid "Volume %s doesn't exist on array." msgstr "磁區 %s 不在陣列上。" #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "磁區 %s 不存在於 ZFSSA 後端上。" #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "磁區 %s 已經由 OpenStack 進行管理。" #, python-format msgid "Volume %s is already part of an active migration." msgstr "磁區 %s 已經是作用中移轉的一部分。" #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "磁區 %s 不是已抄寫的類型。這個磁區需要是額外規格 replication_enabled 設為 " "' True' 的磁區類型,才能支援抄寫動作。" #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "磁區 %s 在線上。請將磁區設為離線,以使用 OpenStack 進行管理。" #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "磁區 %s 不得在移轉中、已連接、屬於某個一致性群組或具有 Snapshot。" #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "磁區 %s 不得為一致性群組的一部分。" #, python-format msgid "Volume %s must not be replicated." msgstr "不得抄寫磁區 %s。" #, python-format msgid "Volume %s must not have snapshots." msgstr "磁區 %s 不得具有 Snapshot。" #, python-format msgid "Volume %s not found." msgstr "找不到磁區 %s。" #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "磁區 %s:嘗試延伸磁區時發生錯誤" #, python-format msgid "Volume (%s) already exists on array" msgstr "陣列上已存在磁區 (%s)" #, python-format msgid "Volume (%s) already exists on array." msgstr "陣列上已存在磁區 (%s)。" #, python-format msgid "Volume Group %s does not exist" msgstr "磁區群組 %s 不存在" #, python-format msgid "Volume Type %(id)s already exists." msgstr "磁區類型 %(id)s 已存在。" #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "磁區類型 %(type_id)s 沒有索引鍵為 %(id)s 的額外規格。" #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "磁區類型 %(volume_type_id)s 刪除作業,不為該類型的磁區所接受。" #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "磁區類型 %(volume_type_id)s 沒有索引鍵為 %(extra_specs_key)s 的額外規格。" msgid "Volume Type id must not be None." msgstr "磁區類型 ID 不得為 None。" #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "在對應於 OpenStack 磁區 [%(ops_vol)s] 的 CloudByte 儲存體處,找不到磁區 " "[%(cb_vol)s]。" #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "在 CloudByte 儲存體中找不到磁區 [%s]。" #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "使用過濾器 %(filter)s 找不到磁區附件。" #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "磁區後端配置無效:%(reason)s" msgid "Volume by this name already exists" msgstr "具有此名稱的磁區已經存在" msgid "Volume cannot be restored since it contains snapshots." msgstr "無法還原磁區,因為該磁區包含 Snapshot。" msgid "Volume create failed while extracting volume ref." msgstr "擷取磁區參照時,磁區建立失敗。" #, python-format msgid "Volume device file path %s does not exist." msgstr "磁區裝置檔案路徑 %s 不存在。" #, python-format msgid "Volume device not found at %(device)s." msgstr "在 %(device)s 處找不到磁區裝置。" #, python-format msgid "Volume driver %s not initialized." msgstr "未起始設定磁區驅動程式 %s。" msgid "Volume driver not ready." msgstr "磁區驅動程式未備妥。" #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "磁區驅動程式報告了錯誤:%(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "磁區具有目前無法刪除的暫時 Snapshot。" msgid "Volume has children and cannot be deleted!" msgstr "磁區具有子項,且無法予以刪除!" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "已連接一致性群組 %s 中的磁區。需要先將其分離。" msgid "Volume in consistency group still has dependent snapshots." msgstr "一致性群組中的磁區仍具有相依 Snapshot。" #, python-format msgid "Volume is attached to a server. (%s)" msgstr "已將磁區連接至伺服器。(%s)" msgid "Volume is in-use." msgstr "磁區在使用中。" msgid "Volume is not available." msgstr "無法使用磁區。" msgid "Volume is not local to this node" msgstr "磁區不是此節點的本端磁區" msgid "Volume is not local to this node." msgstr "磁區不是此節點的本端磁區。" msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "已要求磁區 meta 資料備份,但此驅動程式尚不支援此功能。" #, python-format msgid "Volume migration failed: %(reason)s" msgstr "移轉磁區失敗:%(reason)s" msgid "Volume must be available" msgstr "磁區必須可用" msgid "Volume must be in the same availability zone as the snapshot" msgstr "磁區和 Snapshot 必須位在同一個可用性區域中" msgid "Volume must be in the same availability zone as the source volume" msgstr "磁區和來源磁區必須位在同一個可用性區域中" msgid "Volume must have a volume type" msgstr "磁區必須具有磁區類型" msgid "Volume must not be part of a consistency group." msgstr "磁區不得隸屬於一致性群組。" msgid "Volume must not be replicated." msgstr "不得抄寫磁區。" msgid "Volume must not have snapshots." msgstr "磁區不得具有 Snapshot。" #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "找不到實例 %(instance_id)s 的磁區。" msgid "Volume not found on configured storage backend." msgstr "在所配置的儲存體後端系統上找不到磁區。" msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "在配置的儲存體後端上找不到磁區。如果磁區名稱包含 \"/\",請將其重命名,然後再" "次嘗試進行管理。" msgid "Volume not found on configured storage pools." msgstr "在所配置的儲存區上找不到磁區。" msgid "Volume not found." msgstr "找不到磁區。" msgid "Volume not unique." msgstr "磁區不是唯一的。" msgid "Volume not yet assigned to host." msgstr "尚未將磁區指派給主機。" msgid "Volume reference must contain source-name element." msgstr "磁區參照必須包含 source-name 元素。" #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "找不到 %(volume_id)s 的磁區抄寫。" #, python-format msgid "Volume service %s failed to start." msgstr "磁區服務 %s 無法啟動。" msgid "Volume should have agent-type set as None." msgstr "磁區應該將代理程式類型設定為「無」。" #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "磁區大小 %(volume_size)s GB 不能小於映像檔 minDisk 大小 %(min_disk)s GB。" #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "磁區大小 '%(size)s' 必須是大於 0 的整數" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "磁區大小 '%(size)s' GB 不能小於原始磁區大小%(source_size)s GB。它們必須大於或" "等於原始磁區大小。" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "磁區大小 '%(size)s' GB 不能小於 Snapshot 大小%(snap_size)s GB。它們必須大於或" "等於原始 Snapshot 大小。" msgid "Volume size increased since the last backup. Do a full backup." msgstr "磁區大小自前次備份以來已增加。請執行完整備份。" msgid "Volume size must be a multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" msgid "Volume size must be multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" msgid "Volume size must multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "磁區的磁區狀態必須為可用,但是現行狀態為:%s" msgid "Volume status is in-use." msgstr "磁區狀態為使用中。" #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "對於 Snapshot,磁區狀態必須為「可用」或「使用中」。(是 %s)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "磁區狀態必須是「可用」或「使用中」。" #, python-format msgid "Volume status must be %s to reserve." msgstr "磁區狀態必須為 %s 才能保留。" msgid "Volume status must be 'available'." msgstr "磁區狀態必須為「可用」。" msgid "Volume to Initiator Group mapping already exists" msgstr "磁區至起始器群組的對映已存在" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "要備份的磁區必須處於可用或使用中狀態,但是現行狀態是 \"%s\"。" msgid "Volume to be restored to must be available" msgstr "要還原至的磁區必須可用" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "找不到磁區類型 %(volume_type_id)s。" #, python-format msgid "Volume type ID '%s' is invalid." msgstr "磁區類型 ID '%s' 無效。" #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "%(volume_type_id)s / %(project_id)s 組合的磁區類型存取已存在。" #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "找不到 %(volume_type_id)s / %(project_id)s 組合的磁區類型存取。" #, python-format msgid "Volume type does not match for share %s." msgstr "共用 %s 的磁區類型不符。" #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "類型 %(type_id)s 的磁區類型加密已存在。" #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "類型 %(type_id)s 的磁區類型加密不存在。" msgid "Volume type name can not be empty." msgstr "磁區類型名稱不能為空。" #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "找不到名稱為 %(volume_type_name)s 的磁區類型。" #, python-format msgid "Volume with volume id %s does not exist." msgstr "磁區 ID 為 %s 的磁區不存在。" #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "磁區 %(volumeName)s 不是所連結的磁區。您只能對所連結的磁區執行「延伸」作業。" "正在結束..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "未將磁區 %(volumeName)s 新增至儲存體群組 %(sgGroupName)s。" #, python-format msgid "Volume: %s could not be found." msgstr "找不到磁區 %s。" #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "磁區 %s 已經由 Cinder 進行管理。" msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "磁區將區塊化成此大小(以 MB 為單位)的物件。" msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "超過了主要及次要 SolidFire 帳戶上的磁區/帳戶。" #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage 配置 'vzstorage_used_ratio' 無效。必須大於 0 且小於或等於 1.0:%s。" #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s 處的 VzStorage 配置檔不存在。" msgid "Wait replica complete timeout." msgstr "等待抄本完成時逾時。" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "等待同步失敗。執行中狀態:%s。" msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "正在等待所有節點結合叢集。請確保所有 sheep 常駐程式都在執行中。" msgid "We should not do switch over on primary array." msgstr "我們不應切換到主要陣列上。" msgid "Wrong resource call syntax" msgstr "資源呼叫語法錯誤" msgid "X-IO Volume Driver exception!" msgstr "X-IO 磁區驅動程式異常狀況!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "XML 支援已遭到淘汰,且將在 N 版本中予以移除。" msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "未正確地配置 XtremIO,找不到 iSCSI 入口網站" msgid "XtremIO not initialized correctly, no clusters found" msgstr "未正確地起始設定 XtremIO,找不到叢集" msgid "You must implement __call__" msgstr "必須實作 __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "在使用 3PAR 驅動程式之前,必須先安裝 hpe3parclient。請執行 \"pip install " "python-3parclient\" 來安裝 hpe3parclient。" msgid "You must supply an array in your EMC configuration file." msgstr "您必須在 EMC 配置檔中提供一個陣列。" #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "原始大小 %(originalVolumeSize)s GB 大於:%(newSize)s GB。僅支援「延伸」作業。" "正在結束..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError:%s" msgid "Zone" msgstr "區域" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "分區原則:%s,無法辨識" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data:無法取得 vdisk %s 的屬性。" msgid "_create_host failed to return the host name." msgstr "_create_host 無法傳回主機名稱。" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "_create_host:無法轉換主機名稱。主機名稱不是 Unicode 或字串。" msgid "_create_host: No connector ports." msgstr "_create_host:無連接器埠。" msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume,找不到「抄寫服務」。" #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume,磁區名稱:%(volumename)s,來源磁區名稱:" "%(sourcevolumename)s,來源磁區實例:%(source_volume)s,目標磁區實例:" "%(target_volume)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - 在 CLI 輸出中找不到成功訊息。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name,id_code 為「無」。" msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession,找不到「抄寫服務」" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession,未定義複製階段作業類型!複製階段作業:%(cpsession)s,複" "製類型:%(copytype)s。" #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession,複製階段作業:%(cpsession)s,作業:%(operation)s,回覆" "碼:%(rc)lu,錯誤:%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume,磁區名稱:%(volumename)s,回覆碼:%(rc)lu,錯誤:" "%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "_delete_volume,磁區名稱:%(volumename)s,找不到「儲存體配置服務」。" #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service,類別名稱:%(classname)s,呼叫方法,無法連接至 " "ETERNUS。" msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "_extend_volume_op:不支援延伸具有 Snapshot 的磁區。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,連接器:%(connector)s,協助程式:" "FUJITSU_AuthorizedTarget,無法連接至 ETERNUS。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group,連接器:%(connector)s,列舉實例名稱,無法連接至 " "ETERNUS。" #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,連接器:%(connector)s,協助程式名稱:" "FUJITSU_ProtocolControllerForUnit,無法連接至 ETERNUS。" #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession,參照名稱,vol_instance:%(vol_instance_path)s,無法連接至 " "ETERNUS。" #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service,類別名稱:%(classname)s,列舉實例名稱,無法連接至 " "ETERNUS。" #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "_find_initiator_names,連接器:%(connector)s,找不到起始器。" #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun,磁區名稱:%(volumename)s,列舉實例名稱,無法連接至 ETERNUS。" #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool,eternus_pool:%(eternus_pool)s,列舉實例,無法連接至 ETERNUS。" #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg,檔名:%(filename)s,標記名稱:%(tagname)s,資料為「無」!請編輯" "驅動程式配置檔並更正。" #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection,檔名:%(filename)s,IP:%(ip)s,埠:%(port)s,使用" "者:%(user)s,密碼:****,URL:%(url)s,失敗!" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties,ISCSI IP 清單:%(iscsiip_list)s,找不到 IQN。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,協助程式名稱:" "CIM_BindsTo,無法連接至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,列舉實例名稱,無法連接" "至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,取得實例,無法連接至 " "ETERNUS。" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic:屬性標頭與值不相符。\n" "標頭:%(header)s\n" "值:%(row)s。" msgid "_get_host_from_connector failed to return the host name for connector." msgstr "_get_host_from_connector 無法傳回連接器的主機名稱。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,從 aglist/vol_instance 取得 host-affinity 失敗,親緣性群組:" "%(ag)s,參照名稱,無法連接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,取得 host-affinity 實例失敗,磁區對映:%(volmap)s,取得實" "例,無法連接至 ETERNUS。" msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,協助程式:FUJITSU_SAPAvailableForElement,無法連接至 " "ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "_get_mapdata_iscsi,親緣性群組:%(ag)s,參照名稱,無法連接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,vol_instance:%(vol_instance)s,參照名稱:" "CIM_ProtocolControllerForUnit,無法連接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi,磁區對映:%(volmap)s,取得實例,無法連接至 ETERNUS。" msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port,列舉實例,無法連接至 ETERNUS。" #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port,通訊協定:%(protocol)s,找不到 target_port。" #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay:找不到名為 %s 的 Snapshot" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay:找不到磁區 ID %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay:必須指定 source-name。" msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties:無法取得主機-磁區連線的 FC 連線資訊。是否已針對 " "FC 連線適當地配置主機?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties:在下列磁區的 I/O 群組 %(gid)s 中找不到節點:" "%(vol)s。" #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun,vol_instance.path:%(vol)s,磁區名稱:%(volumename)s,volume_uid:" "%(uid)s,起始器:%(initiator)s,目標:%(tgt)s,親緣性群組清單:%(aglist)s,找" "不到「儲存體配置服務」。" #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun,vol_instance.path:%(volume)s,磁區名稱:%(volumename)s," "volume_uid:%(uid)s,親緣性群組清單:%(aglist)s,找不到「控制器配置服務」。" #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun,磁區名稱:%(volumename)s,volume_uid:%(volume_uid)s,親緣性群" "組:%(ag)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s,協助程式名稱:" "CIM_ProtocolControllerForUnit,無法連接至 ETERNUS。" msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats:無法取得儲存區資料。" #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete,cpsession:%(cpsession)s,copysession 狀態為 " "BROKEN。" #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy 失敗:存在磁區 %s 的副本。如果新增另一份副本,則將超過 2 份副" "本的限制。" msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "已開始 add_vdisk_copy,但預期儲存區中沒有 vdisk 副本。" #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants 必須是布林值,但卻取得 '%s'。" msgid "already created" msgstr "已建立" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "從遠端節點連接 Snapshot" #, python-format msgid "attribute %s not lazy-loadable" msgstr "屬性 %s 無法延遲載入" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "備份:%(vol_id)s 無法建立從 %(vpath)s 到%(bpath)s 的裝置固定鏈結。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "備份:%(vol_id)s 無法從伺服器取得備份成功通知。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "備份:%(vol_id)s 無法執行 dsmc,因為%(bpath)s 的裝置固定鏈結。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "備份:%(vol_id)s 無法在 %(bpath)s 上執行 dsmc。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "備份:%(vol_id)s 失敗。%(path)s 不是檔案。" #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "備份:%(vol_id)s 失敗。%(path)s 為非預期的檔案類型。支援區塊或一般檔案,實際" "檔案模式為 %(vol_mode)s。" #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "備份:%(vol_id)s 失敗。無法取得 %(path)s 處磁區的實際路徑。" msgid "being attached by different mode" msgstr "正在以不同的模式進行連接" #, python-format msgid "call failed: %r" msgstr "呼叫失敗:%r" msgid "call failed: GARBAGE_ARGS" msgstr "呼叫失敗:GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "呼叫失敗:PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "呼叫失敗:PROG_MISMATCH:%r" msgid "call failed: PROG_UNAVAIL" msgstr "呼叫失敗:PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "找不到 lun-map,ig:%(ig)s 磁區:%(vol)s" msgid "can't find the volume to extend" msgstr "找不到要延伸的磁區" msgid "can't handle both name and index in req" msgstr "無法處理要求中的名稱及索引" msgid "cannot understand JSON" msgstr "無法理解 JSON" msgid "cannot understand XML" msgstr "無法理解 XML" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "已指派 cgsnapshot" msgid "cgsnapshot changed" msgstr "已變更 cgsnapshot" msgid "cgsnapshots assigned" msgstr "已指派 cgsnapshot" msgid "cgsnapshots changed" msgstr "已變更 cgsnapshot" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error:需要密碼或 SSH 私密金鑰以進行鑑別:請設定 " "san_password 或 san_private_key 選項。" msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error:無法判定系統 ID。" msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error:無法判定系統名稱。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist 錯誤。" #, python-format msgid "clone depth exceeds limit of %s" msgstr "複製深度超出了限制 (%s)" msgid "consistencygroup assigned" msgstr "已指派 consistencygroup" msgid "consistencygroup changed" msgstr "已變更 consistencygroup" msgid "control_location must be defined" msgstr "必須定義 control_location" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume,ETERNUS 中不存在「來源磁區」。" #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume,目標磁區實例名稱:%(volume_instancename)s,取得實例失" "敗。" msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume:來源及目的地大小不同。" #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume:來源磁區 %(src_vol)s 大小為 %(src_size)dGB,且不適合大" "小為 %(tgt_size)dGB 的目標磁區 %(tgt_vol)s。" msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src 必須是從 CG Snapshot 或來源 CG 進行建立。" msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src 僅支援一個 cgsnapshot 來源或一個一致性群組來" "源。不能使用多個來源。" msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src 支援一個 cgsnapshot 來源或一個一致性群組來" "源。不能使用多個來源。" #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy:來源 vdisk %(src)s (%(src_id)s) 不存在。" #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy:來源 vdisk %(src)s 不存在。" msgid "create_host: Host name is not unicode or string." msgstr "create_host:主機名稱不是 Unicode 或字串。" msgid "create_host: No initiators or wwpns supplied." msgstr "create_host:未提供任何起始器或 WWPN。" msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair 錯誤。" #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "create_snapshot,eternus_pool:%(eternus_pool)s,找不到儲存區。" #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot,Snapshot 名稱:%(snapshotname)s,來源磁區名稱:" "%(volumename)s,vol_instance.path:%(vol_instance)s,目的地磁區名稱:" "%(d_volumename)s,儲存區:%(pool)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot,磁區名稱:%(s_volumename)s,在 ETERNUS 上找不到來源磁區。" #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "create_snapshot,磁區名稱:%(volumename)s,找不到「抄寫服務」。" #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot:Snapshot 的磁區狀態必須為「可用」或「使用中」。無效狀態為 " "%s。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot:取得來源磁區時失敗。" #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "create_volume,磁區:%(volume)s,列舉實例,無法連接至 ETERNUS。" #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume,磁區:%(volume)s,磁區名稱:%(volumename)s,eternus_pool:" "%(eternus_pool)s,找不到「儲存體配置服務」。" #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume,磁區名稱:%(volumename)s,儲存區名稱:%(eternus_pool)s,回覆" "碼:%(rc)lu,錯誤:%(errordesc)s。" msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot,ETERNUS 中不存在「來源磁區」。" #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot,目標磁區實例名稱:%(volume_instancename)s,取得" "實例失敗。" #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot:Snapshot %(name)s 不存在。" #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot:Snapshot 狀態必須為「可用」,才能建立磁區。無效" "的狀態為:%s。" msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot:來源及目的地大小不同。" msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot:磁區大小與 Snapshot 型磁區不同。" msgid "deduplicated and auto tiering can't be both enabled." msgstr "無法同時啟用已刪除的重複層級和自動層級。" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "刪除:%(vol_id)s 無法執行 dsmc,因為引數無效,標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "刪除:%(vol_id)s 無法執行 dsmc,標準輸出:%(out)s\n" " 標準錯誤:%(err)s" msgid "delete_hypermetro error." msgstr "delete_hypermetro 錯誤。" #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator:找不到 %s。將繼續。" msgid "delete_replication error." msgstr "delete_replication 錯誤。" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "正在刪除具有相依磁區的 Snapshot %(snapshot_name)s" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "正在刪除具有 Snapshot 的磁區 %(volume_name)s" msgid "detach snapshot from remote node" msgstr "將 Snapshot 從遠端節點分離" msgid "do_setup: No configured nodes." msgstr "do_setup:未配置節點。" msgid "element is not a child" msgstr "元素不是子項" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries 必須大於或等於 0" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "將物件寫入 Swift 時發生錯誤,Swift 中物件的 MD5 %(etag)s,與傳送至 Swift 的物" "件 MD5 %(md5)s 不同" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume,eternus_pool:%(eternus_pool)s,找不到儲存區。" #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume,磁區:%(volume)s,磁區名稱:%(volumename)s,eternus_pool:" "%(eternus_pool)s,找不到「儲存體配置服務」。" #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume,磁區名稱:%(volumename)s,回覆碼:%(rc)lu,錯誤:" "%(errordesc)s,儲存區類型:%(pooltype)s。" #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume,磁區名稱:%(volumename)s,找不到磁區。" msgid "failed to create new_volume on destination host" msgstr "無法在目的地主機上建立 new_volume" msgid "fake" msgstr "偽造" #, python-format msgid "file already exists at %s" msgstr "%s 處已存在檔案" msgid "fileno is not supported by SheepdogIOWrapper" msgstr "SheepdogIOWrapper 不支援 fileno" msgid "fileno() not supported by RBD()" msgstr "RBD() 不支援 fileno()" #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "檔案系統 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled 已設定為 False,不容許多重主機對映。" "CMMVC6071E 未建立 VDisk 至主機的對映,因為VDisk 已對映至主機。" msgid "flush() not supported in this version of librbd" msgstr "此版本的 librbd 中不支援 flush()" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援" msgid "force delete" msgstr "強制刪除" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id 錯誤。" msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id 錯誤。" #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "get_iscsi_params:無法取得起始器 %(ini)s 的目標 IP,請檢查配置檔。" #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool:無法取得磁區 %s 的屬性" msgid "glance_metadata changed" msgstr "已變更 glance_metadata" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode 已設為 copy_on_write,但 %(vol)s 及 %(img)s 屬於不同的" "檔案系統。" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode 已設為 copy_on_write,但 %(vol)s 及 %(img)s 屬於不同的" "檔案集。" #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "在 cinder.conf 中,hgst_group %(grp)s 和 hgst_user %(usr)s 必須對映至有效的使" "用者/群組" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "在叢集中找不到 cinder.conf 內指定的 hgst_net %(net)s" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "在 cinder.conf 中,hgst_redundancy 必須設定為 0(非 HA)或 1 (HA)。" msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "在 cinder.conf 中,hgst_space_mode 必須是八進位/整數" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "hgst_storage 伺服器 %(svr)s 的格式不是 :" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "在 cinder.conf 中,必須定義 hgst_storage_servers" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "HTTP 服務可能已在執行此作業的中途意外停用或置於維護狀態。" msgid "id cannot be None" msgstr "ID 不能為 None" #, python-format msgid "image %s not found" msgstr "找不到映像檔 %s" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection,磁區:%(volume)s,找不到「磁區」。" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection:無法取得磁區 %s 的屬性。" #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection:遺漏磁區 %s 的磁區屬性。" #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection:在磁區 %(vol)s 的 I/O 群組 %(gid)s 中找不到節點。" #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection:未定義 vdisk %s。" #, python-format msgid "invalid user '%s'" msgstr "無效的使用者 '%s'" #, python-format msgid "iscsi portal, %s, not found" msgstr "找不到 iSCSI 入口網站 %s" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "使用通訊協定 'iSCSI' 時,必須在配置檔中設定 iscsi_ip_address。" msgid "iscsiadm execution failed. " msgstr "iscsiadm 執行失敗。" #, python-format msgid "key manager error: %(reason)s" msgstr "金鑰管理程式錯誤:%(reason)s" msgid "keymgr.fixed_key not defined" msgstr "未定義 keymgr.fixed_key" msgid "limit param must be an integer" msgstr "限制參數必須是整數" msgid "limit param must be positive" msgstr "限制參數必須是正數" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing 無法管理已連接至主機的磁區。請先斷開此磁區與現有主機的連線," "然後再匯入" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing 需要 'name' 索引鍵來確認現有磁區。" #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot:管理磁區 %(vol)s 上的現有重播 %(ss)s 時發生錯誤" #, python-format msgid "marker [%s] not found" msgstr "找不到標記 [%s]" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp 遺漏引用 %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy 必須是 'on-demand' 或 'never',已傳遞:%s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs 在磁區 %(vol)s 上執行時失敗,錯誤訊息為:%(err)s。" msgid "mock" msgstr "模擬" msgid "mount.glusterfs is not installed" msgstr "未安裝 mount.glusterfs" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "DRBDmanage 找到多個資源具有名稱 %s" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "找到多個資源具有 Snapshot ID %s" msgid "name cannot be None" msgstr "名稱不能為 None" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path:找不到 NAVISECCLI 工具 %(path)s。" #, python-format msgid "no REPLY but %r" msgstr "沒有回覆,但 %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "在 DRBDmanage 中找不到 ID 為 %s 的 Snapshot" #, python-format msgid "not exactly one snapshot with id %s" msgstr "不止一個 Snapshot 具有 ID %s" #, python-format msgid "not exactly one volume with id %s" msgstr "不止一個磁區具有 ID %s" #, python-format msgid "obj missing quotes %s" msgstr "obj 遺漏引用 %s" msgid "open_access_enabled is not off." msgstr "未關閉 open_access_enabled。" msgid "progress must be an integer percentage" msgstr "進度必須是整數百分比" msgid "promote_replica not implemented." msgstr "未實作 promote_replica。" msgid "provider must be defined" msgstr "必須定義提供者" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "此磁區驅動程式需要 qemu-img %(minimum_version)s 或更高版本。現行 qemu-img 版" "本:%(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "未安裝 qemu-img,且映像檔的類型是 %s。如果未安裝 qemu-img,則只能使用原始映像" "檔。" msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "未安裝 qemu-img,且未指定磁碟格式。如果未安裝qemu-img,則只能使用原始映像檔。" msgid "rados and rbd python libraries not found" msgstr "找不到 rados 及 rbd python 程式庫" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是 'no'、'yes' 或 'only' 其中之一,不能是 %r" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "應該在後端上配置 replication_device:%s。" #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "遺漏 backend_id 為 [%s] 的 replication_device。" #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover 失敗。找不到 %s。" msgid "replication_failover failed. Backend not configured for failover" msgstr "replication_failover 失敗。未配置後端以進行失效接手" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "還原:%(vol_id)s 無法執行 dsmc,因為%(bpath)s 上的引數無效。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "還原:%(vol_id)s 無法在 %(bpath)s 上執行 dsmc。\n" " 標準輸出:%(out)s\n" " 標準錯誤:%(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "還原:%(vol_id)s 失敗。\n" " 標準輸出:%(out)s\n" "標準錯誤:%(err)s。" msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "restore_backup 已中斷,實際物件清單與meta 資料中儲存的物件清單不相符。" msgid "root element selecting a list" msgstr "根元素正在選取一個清單" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "rtslib_fb 遺漏成員 %s:您可能需要更新的 python-rtslib-fb。" msgid "san_ip is not set." msgstr "未設定 san_ip。" msgid "san_ip must be set" msgstr "必須設定 san_ip" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip:必要欄位配置。未設定 san_ip。" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "未在 cinder.conf 中設定 Datera 驅動程式的 san_login 及/或 san_password。請設" "定此資訊並重新啟動 cinder-volume 服務。" msgid "serve() can only be called once" msgstr "只能呼叫 serve() 一次" msgid "service not found" msgstr "找不到服務" msgid "snapshot does not exist" msgstr "Snapshot 不存在" #, python-format msgid "snapshot id:%s not found" msgstr "找不到 Snapshot ID:%s" #, python-format msgid "snapshot-%s" msgstr "snapshot-%s" msgid "snapshots assigned" msgstr "已指派 Snapshot" msgid "snapshots changed" msgstr "已變更 Snapshot" #, python-format msgid "source vol id:%s not found" msgstr "找不到來源磁區 ID:%s" #, python-format msgid "source volume id:%s is not replicated" msgstr "未抄寫來源磁區 ID:%s" msgid "source-name cannot be empty." msgstr "source-name 不能是空的。" msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "source-name 格式應該是:'vmdk_path@vm_inventory_path'。" #, python-format msgid "status must be %s and" msgstr " 狀態必須是 %s,並且" msgid "status must be available" msgstr "狀態必須可用" msgid "stop_hypermetro error." msgstr "stop_hypermetro 錯誤。" msgid "subclasses must implement construct()!" msgstr "子類別必須實作 construct()!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo 失敗,正在繼續作業,就好像什麼都沒發生過" msgid "sync_hypermetro error." msgstr "sync_hypermetro 錯誤。" msgid "sync_replica not implemented." msgstr "未實作 sync_replica。" #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "targetcli 尚未安裝,無法建立預設目錄(%(default_path)s):%(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection:無法從連接器取得主機名稱。" msgid "timeout creating new_volume on destination host" msgstr "在目的地主機上建立 new_volume 時發生逾時" msgid "too many body keys" msgstr "主體金鑰太多" #, python-format msgid "umount: %s: not mounted" msgstr "卸載:%s:未裝載" #, python-format msgid "umount: %s: target is busy" msgstr "卸載:%s:目標在忙碌中" msgid "umount: : some other error" msgstr "卸載::其他某個錯誤" msgid "umount: : target is busy" msgstr "卸載::目標在忙碌中" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot:找不到名為 %s 的 Snapshot" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot:找不到磁區 ID %s" #, python-format msgid "unrecognized argument %s" msgstr "無法辨識的引數 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "不支援的壓縮演算法:%s" msgid "valid iqn needed for show_target" msgstr "show_target 需要有效的 IQN" #, python-format msgid "vdisk %s is not defined." msgstr "未定義 vdisk %s。" msgid "vmemclient python library not found" msgstr "找不到 vmemclient Python 程式庫" #, python-format msgid "volume %s not found in drbdmanage" msgstr "在 DRBDmanage 中找不到磁區 %s" msgid "volume assigned" msgstr "已指派磁區" msgid "volume changed" msgstr "已變更磁區" msgid "volume does not exist" msgstr "磁區不存在" msgid "volume is already attached" msgstr "已連接磁區" msgid "volume is not local to this node" msgstr "磁區不是此節點的本端磁區" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "磁區大小 %(volume_size)d 太小,無法還原大小為 %(size)d 的備份。" #, python-format msgid "volume size %d is invalid." msgstr "磁區大小 %d 無效。" msgid "volume_type cannot be None" msgstr "volume_type 不能為 None" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "在一致性群組中建立磁區時,必須提供volume_type。" msgid "volume_type_id cannot be None" msgstr "volume_type_id 不能為 None" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "必須提供 volume_types,才能建立一致性群組 %(name)s。" #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "必須提供 volume_types,才能建立一致性群組 %s。" msgid "volumes assigned" msgstr "已指派磁區" msgid "volumes changed" msgstr "已變更磁區" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition:%s 已逾時。" #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "zfssa_manage_policy 內容需要設為 'strict' 或 'loose'。現行值為:%s。" msgid "{} is not a valid option." msgstr "{} 不是有效的選項。" cinder-8.0.0/cinder/locale/ko_KR/0000775000567000056710000000000012701406543017636 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000012701406543021423 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po0000664000567000056710000035504712701406257025155 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev22\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-25 10:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-25 02:49+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Failed to remove from new volume set %(new_vvs)s." msgstr "" "%(exception)s: 볼륨 %(volume_name)s의 다시 입력 되돌리기 중에 예외가 발생했습" "니다. 새 볼륨 세트 %(new_vvs)s에서 제거하는 데 실패했습니다." #, python-format msgid "" "%(exception)s: Exception during revert of retype for volume %(volume_name)s. " "Original volume set/QOS settings may not have been fully restored." msgstr "" "%(exception)s: 볼륨 %(volume_name)s의 다시 입력을 되돌리는 중에 예외가 발생했" "습니다. 원래 볼륨 세트/QOS 설정이 완전히 복구되지 않았을 수 있습니다." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" msgstr "" "%(fun)s: 예상치 못한 CLI 출력과 함께 실패했습니다. \n" "명령: %(cmd)s\n" "stdout: %(out)s\n" "stderr: %(err)s\n" #, python-format msgid "" "%(method)s %(url)s unexpected response status: %(response)s (expects: " "%(expects)s)." msgstr "" "%(method)s %(url)s 예상치 못한 응답 상태: %(response)s (expects: " "%(expects)s)." #, python-format msgid "%(name)s: %(value)s" msgstr "%(name)s: %(value)s" #, python-format msgid "%s" msgstr "%s" #, python-format msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" msgstr "'%(value)s'이(가) 추가 사양 '%(key)s'에 올바르지 않은 값임" msgid "A valid secondary target MUST be specified in order to failover." msgstr "장애 복구하려면 올바른 보조 대상을 지정해야 합니다." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting create_snapshot operation!" msgstr "" "create_snapshot 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정" "을 찾을 수 없습니다." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting delete_volume operation!" msgstr "" "delete_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정을 " "찾을 수 없습니다." #, python-format msgid "" "Account for Volume ID %s was not found on the SolidFire Cluster while " "attempting unmanage operation!" msgstr "" "관리 취소 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정을 찾" "을 수 없습니다." #, python-format msgid "AddHba error: %(wwn)s to %(srvname)s" msgstr "AddHba 오류: %(wwn)s - %(srvname)s" #, python-format msgid "Array Serial Number must be in the file %(fileName)s." msgstr "배열 일련 번호가 %(fileName)s 파일에 있어야 합니다." #, python-format msgid "Array mismatch %(myid)s vs %(arid)s" msgstr "%(myid)s과(와) %(arid)s 배열 불일치" #, python-format msgid "Array query failed - No response (%d)!" msgstr "배열 쿼리 실패 - 응답이 없음(%d)!" msgid "Array query failed. No capabilities in response!" msgstr "배열 쿼리 실패. 응답에 기능이 없습니다." msgid "Array query failed. No controllers in response!" msgstr "배열 쿼리 실패. 응답에 제어기가 없습니다." msgid "Array query failed. No global id in XML response!" msgstr "배열 쿼리 실패. XML 응답에 글로벌 id가 없습니다." msgid "Attaching snapshot from a remote node is not supported." msgstr "원격 노드에서 스냅샷을 연결하는 기능은 지원되지 않습니다." #, python-format msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." msgstr "요청 승인: %(zfssaurl)s 재시도: %(retry)d ." msgid "Backend returned err for lun export." msgstr "백엔드에서 lun 내보내기 오류를 리턴했습니다." #, python-format msgid "Backup id %s is not invalid. Skipping reset." msgstr "백업 id %s이(가) 올바르지 않습니다. 재설정을 건너뜁니다." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "백업 서비스 %(configured_service)s이(가) 확인을 지원하지 않습니다. 백업 ID " "%(id)s이(가) 확인되지 않습니다. 확인을 건너뜁니다." #, python-format msgid "Backup volume metadata failed: %s." msgstr "백업 볼륨 메타데이터 실패: %s." #, python-format msgid "Bad response from server: %(url)s. Error: %(err)s" msgstr "서버의 잘못된 응답: %(url)s. 오류: %(err)s" #, python-format msgid "" "CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " "source." msgstr "" "소스에서 일관성 그룹 %(cg)s을(를) 작성할 때 CG 스냅샷 %(cgsnap)s을(를) 찾을 " "수 없습니다." #, python-format msgid "" "CLI fail: '%(cmd)s' = %(code)s\n" "out: %(stdout)s\n" "err: %(stderr)s" msgstr "" "CLI 실패: '%(cmd)s' = %(code)s\n" "출력: %(stdout)s\n" "오류: %(stderr)s" msgid "Call to Nova delete snapshot failed" msgstr "Nova를 호출하여 스냅샷을 삭제하는 데 실패" msgid "Call to Nova to create snapshot failed" msgstr "Nova를 호출하여 스냅샷을 작성하는 데 실패" #, python-format msgid "Call to json.loads() raised an exception: %s." msgstr "json.loads() 호출에서 예외 발생: %s." #, python-format msgid "Can not add the lun %(lun)s to consistency group %(cg_name)s." msgstr "lun %(lun)s을(를) 일관성 그룹 %(cg_name)s에 추가할 수 없습니다." #, python-format msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." msgstr "%(target_iqn)s(으)로 %(target_ip)s에서 검색할 수 없습니다." msgid "Can not open the recent url, login again." msgstr "최신 url을 열 수 없습니다. 다시 로그인하십시오." #, python-format msgid "Can not place new LUNs %(luns)s in consistency group %(cg_name)s." msgstr "일관성 그룹 %(cg_name)s에 새 LUNs %(luns)s을(를) 둘 수 없습니다." #, python-format msgid "Can not remove LUNs %(luns)s in consistency group %(cg_name)s." msgstr "일관성 그룹 %(cg_name)s의 LUNs %(luns)s을(를) 제거할 수 없습니다." #, python-format msgid "Can't find volume to map %(key)s, %(msg)s" msgstr "%(key)s을(를) 맵핑할 볼륨을 찾을 수 없음, %(msg)s" msgid "Can't open the recent url, relogin." msgstr "최신 url을 열 수 없음, 다시 로그인" #, python-format msgid "" "Cannot add and verify tier policy association for storage group : " "%(storageGroupName)s to FAST policy : %(fastPolicyName)s." msgstr "" "스토리지 그룹: %(storageGroupName)s의 계층 정책 연관을 확인하고 FAST 정책: " "%(fastPolicyName)s에 추가할 수 없습니다." #, python-format msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." msgstr "" "이미지 %(image)s을(를) 볼륨 %(volume)s에 복제할 수 없습니다. 오류: %(error)s." #, python-format msgid "Cannot create or find an initiator group with name %(igGroupName)s." msgstr "이름이 %(igGroupName)s인 개시자 그룹을 작성하거나 찾을 수 없습니다." #, python-format msgid "Cannot delete file %s." msgstr "%s 파일을 삭제할 수 없습니다." msgid "Cannot detect replica status." msgstr "복제본 상태를 발견할 수 없습니다." msgid "Cannot determine if Tiering Policies are supported." msgstr "계층 지정 정책이 지원되는지 판별할 수 없습니다." msgid "Cannot determine whether Tiering Policy is supported on this array." msgstr "이 배열에서 계층 지정 정책이 지원되는지 판별할 수 없습니다." #, python-format msgid "Cannot find Consistency Group %s" msgstr "일관성 그룹 %s을(를) 찾을 수 없음" #, python-format msgid "" "Cannot find a portGroup with name %(pgGroupName)s. The port group for a " "masking view must be pre-defined." msgstr "" "이름이 %(pgGroupName)s인 portGroup을 찾을 수 없습니다. 마스킹 보기의 포트 그" "룹을 사전 정의해야 합니다." #, python-format msgid "Cannot find the fast policy %(fastPolicyName)s." msgstr "빠른 정책 %(fastPolicyName)s을(를) 찾을 수 없습니다." #, python-format msgid "" "Cannot find the new masking view just created with name %(maskingViewName)s." msgstr "" "방금 작성했으며 이름이 %(maskingViewName)s인 새 마스킹 보기를 찾을 수 없습니" "다." #, python-format msgid "Cannot get QoS spec for volume %s." msgstr "볼륨 %s의 QoS 사양을 가져올 수 없습니다." #, python-format msgid "Cannot get port group from masking view: %(maskingViewName)s. " msgstr "마스킹 보기 %(maskingViewName)s에서 포트 그룹을 가져올 수 없습니다. " msgid "Cannot get port group name." msgstr "포트 그룹 이름을 가져올 수 없습니다." #, python-format msgid "Cannot get storage Group from job : %(storageGroupName)s." msgstr "작업에서 스토리지 그룹을 가져올 수 없음: %(storageGroupName)s." msgid "Cannot get storage system." msgstr "스토리지 시스템을 가져올 수 없습니다." #, python-format msgid "Caught error: %(type)s %(error)s" msgstr "오류 발견: %(type)s %(error)s" #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" msgstr "" "%(reason)s(으)로 인해 볼륨 이름을 %(tmp)s에서 %(orig)s(으)로 변경하는 데 실패" #, python-format msgid "" "Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." msgstr "" "%(reason)s(으)로 인해 볼륨 이름을 %(tmp)s에서 %(orig)s(으)로 변경하는 데 실패" "했습니다." #, python-format msgid "Clone %s not in prepared state!" msgstr "복제본 %s이(가) 준비된 상태가 아닙니다." #, python-format msgid "Clone Volume:%(volume)s failed from source volume:%(src_vref)s" msgstr "소스 볼륨 :%(src_vref)s에서 볼륨:%(volume)s 복제 실패" #, python-format msgid "" "Clone volume \"%s\" already exists. Please check the results of \"dog vdi " "list\"." msgstr "" "복제 볼륨 \"%s\"이(가) 이미 있습니다. \"dog vdi list\"의 결과를 확인하십시오." #, python-format msgid "Cloning of volume %s failed." msgstr "볼륨 %s 복제에 실패했습니다." #, python-format msgid "" "CloudByte does not have a volume corresponding to OpenStack volume [%s]." msgstr "CloudByte에 OpenStack 볼륨 [%s]에 해당하는 볼륨이 없습니다." #, python-format msgid "" "CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " "all [%(max)s] attempts." msgstr "" "볼륨 [%(vol)s]의 CloudByte 조작 [%(operation)s]에 실패했습니다. [%(max)s] 시" "도수가 모두 소진되었습니다." #, python-format msgid "" "CloudByte snapshot information is not available for OpenStack volume [%s]." msgstr "OpenStack 볼륨 [%s]의 CloudByte 스냅샷 정보를 사용할 수 없습니다." #, python-format msgid "CloudByte volume information not available for OpenStack volume [%s]." msgstr "OpenStack 볼륨 [%s]의 CloudByte 볼륨 정보를 사용할 수 없습니다." #, python-format msgid "Cmd :%s" msgstr "Cmd :%s" #, python-format msgid "Commit clone failed: %(name)s (%(status)d)!" msgstr "복제본 커밋에 실패: %(name)s (%(status)d)!" #, python-format msgid "Commit failed for %s!" msgstr "%s의 커밋에 실패했습니다." #, python-format msgid "Compute cluster: %s not found." msgstr "컴퓨트 클러스터: %s을(를) 찾을 수 없음." #, python-format msgid "Configuration value %s is not set." msgstr "구성 값 %s을(를) 설정하지 않았습니다." #, python-format msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" msgstr "가상 볼륨 세트 %(volume_set)s에서 충돌 발견: %(error)s" #, python-format msgid "Connect to Flexvisor error: %s." msgstr "Flexvisor에 연결 오류: %s." #, python-format msgid "Connect to Flexvisor failed: %s." msgstr "Flexvisor에 연결 실패: %s." msgid "Connection error while sending a heartbeat to coordination backend." msgstr "하트비트를 조정 백엔드에 보내는 중에 연결 오류가 발생했습니다." #, python-format msgid "Connection to %s failed and no secondary!" msgstr "%s에 연결에 실패했으며 보조가 없습니다." #, python-format msgid "Consistency group %s: create failed" msgstr "일관성 그룹 %s: 작성 실패" #, python-format msgid "Controller GET failed (%d)" msgstr "제어기 GET 실패(%d)" #, python-format msgid "Copy offload workflow unsuccessful. %s" msgstr "오프로드 워크플로 복사에 실패했습니다. %s" #, python-format msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" msgstr "스냅샷 %(snap)s 볼륨 %(vol)s의 볼륨에 스냅샷을 복사하는 데 실패" #, python-format msgid "Could not GET allocation information (%d)!" msgstr "할당 정보를 가져올 수 없습니다(%d)." #, python-format msgid "Could not calculate node utilization for node %s." msgstr "노드 %s의 노드 활용도를 계산할 수 없습니다." #, python-format msgid "Could not connect to %(primary)s or %(secondary)s!" msgstr "%(primary)s 또는 %(secondary)s에 연결할 수 없습니다." #, python-format msgid "Could not create snapshot set. Error: '%s'" msgstr "스냅샷 세트를 작성할 수 없습니다. 오류: '%s'" msgid "Could not decode scheduler options." msgstr "스케줄러 옵션을 디코딩할 수 없습니다." #, python-format msgid "Could not delete failed image volume %(id)s." msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." #, python-format msgid "Could not delete the image volume %(id)s." msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." #, python-format msgid "" "Could not do delete of snapshot %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "파일 프로그램에서 스냅샷 %s을(를) 삭제할 수 없어, \"rm\" 명령의 실행으로 장" "애 복구할 수 없습니다." #, python-format msgid "" "Could not do delete of volume %s on filer, falling back to exec of \"rm\" " "command." msgstr "" "파일 프로그램에서 볼륨 %s을(를) 삭제할 수 없어, \"rm\" 명령의 실행으로 장애 " "복구할 수 없습니다." #, python-format msgid "Could not find a host for consistency group %(group_id)s." msgstr "일관성 그룹 %(group_id)s의 호스트를 찾을 수 없습니다." #, python-format msgid "Could not find any hosts (%s)" msgstr "호스트(%s)를 찾을 수 없음" #, python-format msgid "" "Could not find port group : %(portGroupName)s. Check that the EMC " "configuration file has the correct port group name." msgstr "" "포트 그룹: %(portGroupName)s을(를) 찾을 수 없습니다. EMC 구성 파일에 올바른 " "포트 그룹 이름이 있는지 확인하십시오." #, python-format msgid "Could not find volume with name %(name)s. Error: %(error)s" msgstr "이름이 %(name)s인 볼륨을 찾을 수 없습니다. 오류: %(error)s" msgid "" "Could not get performance base counter name. Performance-based scheduler " "functions may not be available." msgstr "" "성능 기본 카운터 이름을 가져올 수 없습니다. 성능 기반 스케줄러 기능을 사용할 " "수 없습니다." #, python-format msgid "Could not get utilization counters from node %s" msgstr "노드 %s에서 활용도 카운터를 가져올 수 없음" #, python-format msgid "Could not log in to 3PAR array (%s) with the provided credentials." msgstr "제공된 자격 증명으로 3PAR 배열(%s)에 로그인할 수 없습니다." #, python-format msgid "Could not log in to LeftHand array (%s) with the provided credentials." msgstr "제공된 자격 증명으로 LeftHand 배열(%s)에 로그인할 수 없습니다." #, python-format msgid "Could not stat scheduler options file %(filename)s." msgstr "스케줄러 옵션 파일 %(filename)s의 통계를 낼 수 없습니다." #, python-format msgid "Could not validate device %s" msgstr "%s 장치를 검증할 수 없음" #, python-format msgid "Create cg snapshot %s failed." msgstr "cg 스냅샷 %s 작성에 실패했습니다." #, python-format msgid "" "Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " "(Exception: %(except)s)" msgstr "" "이미지 %(image_id)s의 clone_image_volume: %(volume_id)s 작성 실패(예외: " "%(except)s)" #, python-format msgid "Create consistency group %s failed." msgstr "일관성 그룹 %s을(를) 생성하는 데 실패했습니다." #, python-format msgid "" "Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." msgstr "스냅샷-%(snap)s에서 일관성 그룹 작성 실패." #, python-format msgid "Create consistency group from source %(source)s failed." msgstr "소스 %(source)s에서 일관성 그룹을 작성하는 데 실패했습니다." #, python-format msgid "" "Create consistency group from source cg-%(cg)s failed: " "ConsistencyGroupNotFound." msgstr "소스 cg-%(cg)s에서 일관성 그룹 작성 실패: ConsistencyGroupNotFound." #, python-format msgid "Create hypermetro error: %s." msgstr "hypermetro 작성 오류: %s." #, python-format msgid "" "Create new lun from lun for source %(src)s => destination %(dest)s failed!" msgstr "소스 %(src)s => 대상 %(dest)s의 lun에서 새 lun을 작성하는 데 실패" #, python-format msgid "Create pair failed. Error: %s." msgstr "쌍 작성 실패. 오류: %s." msgid "Create replication volume error." msgstr "복제 볼륨 작성 오류." #, python-format msgid "Create snapshot notification failed: %s" msgstr "스냅샷 작성 알림 실패: %s" #, python-format msgid "Create volume failed from snapshot: %s" msgstr "스냅샷에서 볼륨 작성 실패: %s" #, python-format msgid "Create volume notification failed: %s" msgstr "볼륨 작성 알림 실패: %s" #, python-format msgid "Creation of snapshot failed for volume: %s" msgstr "볼륨의 스냅샷 작성 실패: %s" #, python-format msgid "Creation of volume %s failed." msgstr "볼륨 %s 작성에 실패했습니다." msgid "" "Creation request failed. Please verify the extra-specs set for your volume " "types are entered correctly." msgstr "" "작성 요청에 실패했습니다. 볼륨 유형의 추가 사양 세트가 올바르게 입력되었는지 " "확인하십시오." msgid "DB error:" msgstr "DB 오류:" #, python-format msgid "DBError detected when purging from table=%(table)s" msgstr "table=%(table)s에서 제거할 때 DBError 발견" msgid "DBError encountered: " msgstr "DBError 발생: " msgid "DRBDmanage: too many assignments returned." msgstr "DRBDmanage: 너무 많은 할당이 리턴되었습니다." msgid "Default Storage Profile was not found." msgstr "기본 스토리지 프로파일을 찾을 수 없습니다." msgid "" "Default volume type is not found. Please check default_volume_type config:" msgstr "" "기본 볼륨 유형을 찾을 수 없습니다. default_volume_type 구성을 확인하십시오." #, python-format msgid "Delete cgsnapshot %s failed." msgstr "cgsnapshot %s 삭제에 실패했습니다." #, python-format msgid "Delete consistency group %s failed." msgstr "일관성 그룹 %s 삭제에 실패했습니다. " msgid "Delete consistency group failed to update usages." msgstr "일관성 그룹을 삭제하는 중 사용법을 업데이트하지 못했습니다." #, python-format msgid "Delete hypermetro error: %s." msgstr "hypermetro 삭제 오류: %s." msgid "Delete replication error." msgstr "복제 삭제 오류." msgid "Delete snapshot failed, due to snapshot busy." msgstr "사용 중인 스냅샷으로 인해 스냅샷을 삭제하는 데 실패했습니다." #, python-format msgid "Delete snapshot notification failed: %s" msgstr "스냅샷 삭제 알림 실패: %s" #, python-format msgid "Delete volume notification failed: %s" msgstr "볼륨 삭제 알림 실패: %s" #, python-format msgid "Deleting snapshot %s failed" msgstr "스냅샷 %s 삭제 실패" #, python-format msgid "Deleting zone failed %s" msgstr "구역 삭제 실패 %s" #, python-format msgid "Deletion of volume %s failed." msgstr "볼륨 %s 삭제에 실패했습니다." #, python-format msgid "Destination Volume Group %s does not exist" msgstr "대상 볼륨 그룹 %s이(가) 없음" #, python-format msgid "Detach attachment %(attach_id)s failed." msgstr "첨부 파일 %(attach_id)s의 연결을 끊는 데 실패했습니다." #, python-format msgid "Detach migration source volume failed: %(err)s" msgstr "마이그레이션 소스 볼륨의 연결을 해제하는 데 실패: %(err)s" msgid "Detach volume failed, due to remove-export failure." msgstr "내보내기 제거 실패로 인해 볼륨 연결 해제에 실패했습니다." msgid "Detach volume failed, due to uninitialized driver." msgstr "초기화되지 않는 드라이버로 인해 볼륨 연결 해제에 실패했습니다." msgid "Detaching snapshot from a remote node is not supported." msgstr "원격 노드에서 스냅샷의 연결을 해제하는 기능은 지원되지 않습니다." #, python-format msgid "Did not find expected column name in lsvdisk: %s." msgstr "lsvdisk에 예상 열 이름을 찾지 못함: %s." msgid "Differential restore failed, trying full restore" msgstr "차등 복원 실패, 전체 복원 시도" #, python-format msgid "Disable replication on volume failed with message: %s" msgstr "볼륨에서 복제를 비활성화하는 데 실패하고 다음 메시지가 표시됨: %s" #, python-format msgid "Disconnection failed with message: %(msg)s." msgstr "연결 해제에 실패하고 다음 메시지가 표시됨: %(msg)s." msgid "Driver reported error during replication failover." msgstr "복제 장애 복구 중에 드라이버에서 오류를 보고했습니다." #, python-format msgid "" "Driver-based migration of volume %(vol)s failed. Move from %(src)s to " "%(dst)s failed with error: %(error)s." msgstr "" "볼륨 %(vol)s의 드라이버 기반 마이그레이션에 실패했습니다. %(src)s에서 " "%(dst)s(으)로 이동이 오류로 인해 실패: %(error)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "볼륨 %(vol)s을(를) 연결하는 중에 오류가 발생했습니다. " #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "그룹 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " "Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "%(pool)s 풀에 있는 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅" "샷을 가져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "Error JSONDecodeError. %s" msgstr "JSONDecodeError 오류. %s" #, python-format msgid "" "Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " "%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트에서 InitiatorGroup: %(initiatorgroup)s으로 " "볼륨 %(lun)s을 설정하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "Error TypeError. %s" msgstr "TypeError 오류. %s" msgid "Error activating LV" msgstr "LV 활성화 오류" msgid "Error adding HBA to server" msgstr "HBA를 서버에 추가하는 중 오류 발생" #, python-format msgid "Error attaching volume %s" msgstr "볼륨 %s 연결 오류" #, python-format msgid "Error changing Storage Profile for volume %(original)s to %(name)s" msgstr "" "볼륨의 스토리지 프로파일을 %(original)s에서 %(name)s(으)로 변경 중 오류 발생" #, python-format msgid "Error cleaning up failed volume creation. Msg - %s." msgstr "실패한 볼륨 작성을 정리하는 중에 오류가 발생했습니다. 메시지 - %s." msgid "Error cloning volume" msgstr "볼륨 복제 오류" msgid "Error closing channel." msgstr "채널 닫기 오류." #, python-format msgid "" "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" "'%(method)s', %(extra)s의 Glance 서버 '%(netloc)s'에 접속하는 중 오류가 발생" "했습니다." msgid "Error copying key." msgstr "키 복사 오류." msgid "Error creating Barbican client." msgstr "Barbican 클라이언트 생성 오류." #, python-format msgid "Error creating QOS rule %s" msgstr "QOS 규칙 %s 작성 오류" msgid "Error creating Volume" msgstr "볼륨 작성 오류" msgid "Error creating Volume Group" msgstr "볼륨 그룹 작성 오류" msgid "Error creating chap record." msgstr "chap 레코드를 작성하는 중에 오류가 발생했습니다." msgid "Error creating cloned volume" msgstr "복제된 볼륨 작성 오류" msgid "Error creating key." msgstr "키 작성 오류." msgid "Error creating snapshot" msgstr "스냅샷 작성 오류" msgid "Error creating volume" msgstr "볼륨 작성 오류" #, python-format msgid "Error creating volume. Msg - %s." msgstr "볼륨을 작성하는 중에 오류가 발생했습니다. 메시지 - %s." msgid "Error deactivating LV" msgstr "LV 비활성화 오류" msgid "Error deleting key." msgstr "키 삭제 오류." msgid "Error deleting snapshot" msgstr "스냅샷 삭제 에러" msgid "Error deleting volume" msgstr "볼륨 삭제 에러" #, python-format msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." msgstr "" "내보내기 제거 실패로 인해 스냅샷 %(snapshot)s의 연결을 해제하는 중 오류가 발" "생했습니다." #, python-format msgid "Error detaching volume %(volume)s, due to remove export failure." msgstr "" "내보내기 제거 실패로 인해 볼륨 %(volume)s의 연결을 해제하는 중 오류가 발생했" "습니다." #, python-format msgid "Error detaching volume %s" msgstr "볼륨 %s 연결 해제 오류" #, python-format msgid "Error disassociating storage group from policy: %s." msgstr "정책에서 스토리지 그룹의 연관을 해제하는 중 오류 발생: %s." msgid "Error during re-export on driver init." msgstr "드라이버 초기화 시 다시 내보내는 중에 오류가 발생했습니다." #, python-format msgid "" "Error encountered during failover on host: %(host)s invalid target ID " "%(backend_id)" msgstr "" "호스트에서 장애 복구 중에 오류 발생: %(host)s 올바르지 않은 대상 ID " "%(backend_id)" msgid "" "Error encountered on Cinder backend during thaw operation, service will " "remain frozen." msgstr "" "thaw 조작 중에 Cinder 백엔드에서 오류 발생, 서비스가 동결된 상태로 남습니다." msgid "Error executing SSH command." msgstr "SSH 명령 실행 오류." msgid "Error executing command via ssh." msgstr "ssh를 통해 명령 실행 중에 오류 발생." #, python-format msgid "Error executing command via ssh: %s" msgstr "ssh를 통해 명령 실행 중에 오류 발생: %s" #, python-format msgid "Error expanding volume %s." msgstr "볼륨 %s 확장 오류." msgid "Error extending Volume" msgstr "볼륨 확장 오류" msgid "Error extending volume" msgstr "볼륨 확장 오류" #, python-format msgid "Error extending volume %(id)s. Ex: %(ex)s" msgstr "볼륨 %(id)s 확장 오류. 예: %(ex)s" #, python-format msgid "Error extending volume: %(vol)s. Exception: %(ex)s" msgstr "볼륨 확장 오류: %(vol)s. 예외: %(ex)s" #, python-format msgid "Error finding replicated pg snapshot on %(secondary)s." msgstr "%(secondary)s에서 복제된 pg 스냅샷을 찾는 중 오류가 발생했습니다." #, python-format msgid "Error finding target pool instance name for pool: %(targetPoolName)s." msgstr "풀의 대상 풀 인스턴스 이름을 찾는 중 오류 발생: %(targetPoolName)s." #, python-format msgid "Error getting FaultDomainList for %s" msgstr "%s의 FaultDomainList 가져오기 오류" #, python-format msgid "Error getting LUN attribute. Exception: %s" msgstr "LUN 속성 가져오기 오류. 실행: %s" msgid "Error getting active FC target ports." msgstr "활성 FC 대상 포트를 가져오는 중 오류가 발생했습니다." msgid "Error getting active ISCSI target iqns." msgstr "활성 ISCSI 대상 iqns를 가져오는 중 오류가 발생했습니다." msgid "Error getting active ISCSI target portals." msgstr "활성 ISCSI 대상 포털을 가져오는 중 오류가 발생했습니다." msgid "Error getting array, pool, SLO and workload." msgstr "배열, 풀, SLO 및 워크로드를 가져오는 중 오류가 발생했습니다." msgid "Error getting chap record." msgstr "chap 레코드를 가져오는 중에 오류가 발생했습니다." #, python-format msgid "Error getting iSCSI target info from EVS %(evs)s." msgstr "EVS %(evs)s에서 iSCSI 대상을 가져오는 중에 오류가 발생했습니다." msgid "Error getting key." msgstr "키 가져오기 오류." msgid "Error getting name server info." msgstr "이름 서버 정보 가져오기 오류." msgid "Error getting secret data." msgstr "시크릿 데이터 가져오기 오류." msgid "Error getting secret metadata." msgstr "시크릿 메타데이터 가져오기 오류." msgid "Error getting show fcns database info." msgstr "표시 fcns 데이터베이스 정보 가져오기 오류." msgid "Error getting target pool name and array." msgstr "대상 풀 이름과 배열을 가져오는 중 오류가 발생했습니다." #, python-format msgid "Error happened during storage pool querying, %s." msgstr "스토리지 풀 쿼리 중에 오류 발생, %s." #, python-format msgid "Error has occurred: %s" msgstr "오류 발생: %s" #, python-format msgid "Error in copying volume: %s" msgstr "볼륨 복사 오류: %s" #, python-format msgid "" "Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " "with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" msgstr "" "볼륨 크기 확장 오류: 볼륨: %(volume)s Vol_Size: %(vol_size)d 포함된 스냅샷: " "%(snapshot)s Snap_Size: %(snap_size)d" #, python-format msgid "Error in workflow copy from cache. %s." msgstr "캐시에서 워크플로우를 복사하는 데 실패했습니다. %s." #, python-format msgid "Error invalid json: %s" msgstr "올바르지 않은 JSON 오류: %s" msgid "Error manage existing get volume size." msgstr "기존 볼륨 크기 가져오기를 관리하는 중 오류가 발생했습니다." msgid "Error manage existing volume." msgstr "기존 볼륨 관리 오류." #, python-format msgid "Error managing replay %s" msgstr "재생 관리 오류 %s" msgid "Error mapping VDisk-to-host" msgstr " VDisk-호스트 맵핑 오류" #, python-format msgid "Error mapping volume: %s" msgstr "볼륨 맵핑 오류: %s" #, python-format msgid "" "Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." msgstr "" "볼륨 %(volumename)s을(를) 대상 풀 %(targetPoolName)s(으)로 마이그레이션하는 " "중에 오류가 발생했습니다." #, python-format msgid "Error migrating volume: %s" msgstr "볼륨 마이그레이션 오류: %s" #, python-format msgid "" "Error occurred in the volume driver when updating consistency group " "%(group_id)s." msgstr "" "일관성 그룹 %(group_id)s을(를) 업데이트할 때 볼륨 드라이버에서 오류가 발생했" "습니다." msgid "" "Error occurred when adding hostgroup and lungroup to view. Remove lun from " "lungroup now." msgstr "" "볼 hostgroup 및 lungroup을 추가할 때 오류가 발생했습니다. 이제 lungroup에서 " "lun을 제거합니다." #, python-format msgid "" "Error occurred when building request spec list for consistency group %s." msgstr "일관성 그룹 %s의 요청 사양 목록을 빌드할 때 오류가 발생했습니다." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "cg 스냅샷 %s을(를) 작성하는 중에 오류가 발생했습니다." #, python-format msgid "" "Error occurred when creating cloned volume in the process of creating " "consistency group %(group)s from source CG %(source_cg)s." msgstr "" "소스 CG %(source_cg)s에서 일관성 그룹 %(group)s을(를) 작성하는 프로세스 중에 " "복제된 볼륨을 작성할 때 오류가 발생했습니다." #, python-format msgid "" "Error occurred when creating consistency group %(cg)s from cgsnapshot " "%(cgsnap)s." msgstr "" "cgsnapshot %(cgsnap)s에서 일관성 그룹 %(cg)s을(를) 작성하는 중에 오류가 발생" "했습니다. " #, python-format msgid "" "Error occurred when creating consistency group %(group)s from cgsnapshot " "%(cgsnap)s." msgstr "" "cgsnapshot %(cgsnap)s에서 일관성 그룹 %(group)s을(를) 작성하는 중에 오류가 발" "생했습니다. " #, python-format msgid "" "Error occurred when creating consistency group %(group)s from source CG " "%(source_cg)s." msgstr "" "소스 CG %(source_cg)s에서 일관성 그룹 %(group)s을(를) 작성할 때 오류가 발생했" "습니다." #, python-format msgid "Error occurred when creating consistency group %s." msgstr "일관성 그룹 %s을(를) 작성하는 중에 오류가 발생했습니다. " #, python-format msgid "" "Error occurred when creating volume entry from snapshot in the process of " "creating consistency group %(group)s from cgsnapshot %(cgsnap)s." msgstr "" "cgsnapshot %(cgsnap)s에서 일관성 그룹 %(group)s을(를) 작성하는 프로세스 중에 " "스냅샷에서 볼륨 항목을 작성할 때 오류가 발생했습니다." #, python-format msgid "Error occurred when updating consistency group %(group_id)s." msgstr "일관성 그룹 %(group_id)s을(를) 업데이트하는 중에 오류가 발생했습니다. " #, python-format msgid "Error occurred while cloning backing: %s during retype." msgstr "다시 입력 중에 지원 %s을(를) 복제하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred while copying %(src)s to %(dst)s." msgstr "%(src)s을(를) %(dst)s(으)로 복사하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." msgstr "" "이미지 %(id)s을(를) 볼륨 %(vol)s(으)로 복사하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred while copying image: %(image_id)s to %(path)s." msgstr "" "이미지: %(image_id)s을(를) %(path)s에 복사하는 중에 오류가 발생했습니다." msgid "Error occurred while creating temporary backing." msgstr "임시 지원을 작성하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." msgstr "" "이미지 %(image_id)s에서 볼륨 %(id)s을(를) 작성하는 중에 오류가 발생했습니다." msgid "Error occurred while selecting datastore." msgstr "데이터 저장소를 선택하는 중에 오류가 발생했습니다." #, python-format msgid "Error on adding lun to consistency group. %s" msgstr "일관성 그룹에 lun을 추가하는 중에 오류가 발생했습니다. %s" #, python-format msgid "Error on enable compression on lun %s." msgstr "lun %s에서 압축을 사용하는 중에 오류가 발생했습니다." #, python-format msgid "" "Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" msgstr "" "%(command)s 실행 오류. 오류 코드: %(exit_code)d 오류 메시지: %(result)s" #, python-format msgid "" "Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" msgstr "실행 명령 오류. 오류 코드: %(exit_code)d 오류 메시지: %(result)s" msgid "Error parsing array from host capabilities." msgstr "호스트 기능에서 배열을 구문 분석하는 중 오류가 발생했습니다." msgid "Error parsing array, pool, SLO and workload." msgstr "배열, 풀, SLO 및 워크로드를 구문 분석하는 중 오류가 발생했습니다." msgid "Error parsing target pool name, array, and fast policy." msgstr "" "대상 풀 이름, 배열 및 빠른 정책을 구문 분석하는 중 오류가 발생했습니다." #, python-format msgid "" "Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" msgstr "" "%(volume_name)s에서 볼륨 %(lun_name)s을(를) 프로비저닝하는 중에 오류가 발생했" "습니다. 세부 사항: %(ex)s" msgid "Error querying thin pool about data_percent" msgstr "data_percent에 대한 thin 풀 쿼리 오류" msgid "Error renaming logical volume" msgstr "논리 볼륨의 이름 변경 오류" #, python-format msgid "Error renaming volume %(original)s to %(name)s" msgstr "볼륨의 이름을 %(original)s에서 %(name)s(으)로 변경하는 중 오류 발생" #, python-format msgid "Error resolving host %(host)s. Error - %(e)s." msgstr "호스트 %(host)s을(를) 분석하는 중에 오류가 발생했습니다. 오류 - %(e)s." #, python-format msgid "Error retrieving LUN %(vol)s number" msgstr "LUN %(vol)s 번호 검색 오류" #, python-format msgid "Error running SSH command: \"%s\"." msgstr "SSH 명령 실행 중 오류: \"%s\"" #, python-format msgid "Error running SSH command: %s" msgstr "SSH 명령 실행 중 오류: %s" msgid "Error running command." msgstr "명령 실행 오류." #, python-format msgid "" "Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" msgstr "" "볼륨-서비스 %(last_host)s : %(exc)s에서 %(volume_id)s을(를) 스케줄링하는 중 " "오류 발생" msgid "Error sending a heartbeat to coordination backend." msgstr "하트비트를 조정 백엔드에 보내는 중에 오류가 발생했습니다." #, python-format msgid "Error setting Flash Cache policy to %s - exception" msgstr "플래시 캐시 정책을 %s(으)로 설정하는 중 오류 발생 - 예외" msgid "Error starting coordination backend." msgstr "조정 백엔드를 시작하는 중에 오류가 발생했습니다." msgid "Error storing key." msgstr "키 저장 오류." #, python-format msgid "Error trying to change %(opt)s from %(old)s to %(new)s" msgstr "%(opt)s을(를) %(old)s에서 %(new)s(으)로 변경하려는 중 오류 발생" #, python-format msgid "Error unmanaging replay %s" msgstr "재생 %s 관리 취소 오류" #, python-format msgid "Error unmapping volume: %s" msgstr "볼륨 맵핑 해제 오류: %s" #, python-format msgid "Error verifying LUN container %(bkt)s" msgstr "LUN 컨테이너 %(bkt)s 확인 오류" #, python-format msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" msgstr "호스트 %(hst)s에서 iSCSI 서비스 %(serv)s을(를) 확인하는 중 오류 발생" msgid "Error: unable to snap replay" msgstr "오류: 재생을 스냅할 수 없음" #, python-format msgid "Exception cloning volume %(name)s from source volume %(source)s." msgstr "" "소스 볼륨 %(source)s에서 볼륨 %(name)s을(를) 복제하는 중 예외가 발생했습니다." #, python-format msgid "Exception creating LUN %(name)s in pool %(pool)s." msgstr "풀 %(pool)s에서 LUN %(name)s을(를) 작성하는 중 예외가 발생했습니다." #, python-format msgid "Exception creating vol %(name)s on pool %(pool)s." msgstr "풀 %(pool)s에서 볼륨 %(name)s을(를) 작성하는 중 예외가 발생했습니다." #, python-format msgid "" "Exception creating volume %(name)s from source %(source)s on share %(share)s." msgstr "" "공유 %(share)s하는 소스 %(source)s에서 볼륨 %(name)s을(를) 작성하는 중 예외" "가 발생했습니다." #, python-format msgid "Exception details: %s" msgstr "예외 세부 사항: %s" #, python-format msgid "Exception during mounting %s" msgstr "%s 마운트 중 예외" #, python-format msgid "Exception during mounting %s." msgstr "%s 마운트 중 예외 발생." msgid "Exception during mounting." msgstr "마운트 중에 예외 발생" #, python-format msgid "Exception during snapCPG revert: %s" msgstr "snapCPG 되돌리기 중에 예외 발생: %s" msgid "Exception encountered: " msgstr "예외 발생:" #, python-format msgid "Exception handling resource: %s" msgstr "자원 처리 예외: %s" msgid "Exception in string format operation" msgstr "문자열 형식화 오퍼레이션의 예외" msgid "Exception loading extension." msgstr "확장을 로드하는 중에 예외가 발생했습니다." #, python-format msgid "Exception: %(ex)s" msgstr "예외: %(ex)s" #, python-format msgid "Exception: %s" msgstr "예외: %s" #, python-format msgid "Exception: %s." msgstr "예외: %s." #, python-format msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." msgstr "%s 의 지원 파일에서 \"rm\" 명령을 실행하는 데 실패했습니다." #, python-format msgid "Exists snapshot notification failed: %s" msgstr "스냅샷 존재 알림 실패: %s" #, python-format msgid "Exists volume notification failed: %s" msgstr "볼륨 존재 알림 실패: %s" msgid "Extend volume failed." msgstr "볼륨 확장에 실패했습니다." #, python-format msgid "Extension of volume %s failed." msgstr "볼륨 %s 확장에 실패했습니다." msgid "" "Extra spec replication:mode must be set and must be either 'sync' or " "'periodic'." msgstr "" "추가 사양 replication:mode를 설정해야 하며 'sync' 또는 'periodic'이어야 합니" "다." msgid "" "Extra spec replication:sync_period must be greater than 299 and less than " "31622401 seconds." msgstr "" "추가 사양 replication:sync_period는 299 이상 31622401초 미만이어야 합니다." #, python-format msgid "Extra specs must be specified as capabilities:%s=' True'." msgstr "추가 사양은 capabilities:%s=' True'로 지정해야 합니다" msgid "" "Extra specs must be specified as replication_type=' sync' or ' " "async'." msgstr "" "추가 사양은 replication_type=' sync' 또는 ' async'로 지정해야 합니다." msgid "FAST is not supported on this array." msgstr "이 배열에서는 FAST가 지원되지 않습니다." #, python-format msgid "Failed collecting fcns database info for fabric %s" msgstr "패브릭 %s의 fcns 데이터베이스 정보 수집 실패" #, python-format msgid "Failed collecting name server info from fabric %s" msgstr "패브릭 %s에서 이름 서버 정보를 수집하는 데 실패" msgid "Failed collecting nscamshow" msgstr "nscamshow 수집 실패" msgid "Failed collecting nsshow info for fabric" msgstr "패브릭의 nsshow 정보를 수집하는 데 실패" #, python-format msgid "Failed collecting nsshow info for fabric %s" msgstr "패브릭 %s의 nsshow 정보를 수집하는 데 실패" msgid "Failed collecting show fcns database for fabric" msgstr "패브릭의 표시 fcns 데이터베이스 수집 실패" #, python-format msgid "Failed destroying volume entry %s" msgstr "볼륨 항목 %s 영구 삭제 실패" #, python-format msgid "Failed destroying volume entry: %s." msgstr "볼륨 항목 영구 삭제 실패: %s" #, python-format msgid "" "Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " "glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" "제공된 glance 스냅샷 %(snapshot_ref_id)s 볼륨 참조를 사용하여 스냅샷 " "%(snapshot_id)s 부트 가능 플래그를 가져오는 데 실패" #, python-format msgid "Failed getting active zone set from fabric %s" msgstr "패브릭 %s에서 활성 구역 세트를 가져오는 데 실패" #, python-format msgid "Failed getting zone status from fabric %s" msgstr "패브릭 %s의 구역 상태 가져오기 실패" #, python-format msgid "Failed image conversion during cache creation: %s" msgstr "캐시 작성 중에 이미지 전환 실패: %s" #, python-format msgid "" "Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." msgstr "" "스냅샷 %(snp_id)s의 스냅샷 작업 %(event)s에 대해 알리는 데 실패했습니다." #, python-format msgid "" "Failed notifying about the volume action %(event)s for volume %(volume_id)s" msgstr "볼륨 %(volume_id)s의 볼륨 작업 %(event)s에 대해 알리는 데 실패" #, python-format msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "%(topic)s 페이로드 %(payload)s에 대한 알림 실패" #, python-format msgid "" "Failed recovery attempt to create iscsi backing lun for Volume ID:" "%(vol_id)s: %(e)s" msgstr "" "볼륨 ID:%(vol_id)s의 iscsi 지원 lun을 작성하기 위한 복구 시도에 실패: %(e)s" #, python-format msgid "Failed rolling back quota for %s reservations" msgstr "%s 예약 할당량을 롤백하는 데 실패" #, python-format msgid "Failed rolling back quota for %s reservations." msgstr "%s 예약 할당량을 롤백하는 데 실패." #, python-format msgid "" "Failed setting source volume %(source_volid)s back to its initial " "%(source_status)s status" msgstr "" "소스 볼륨 %(source_volid)s을(를) 초기 %(source_status)s 상태로 다시 설정하는 " "데 실패" #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " "volume returned to the default storage group." msgstr "" "빠른 정책 %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨 %(volumeName)s을" "(를) 다시 추가하기 위한 롤백에 실패했습니다. 기본 스토리지 그룹에 리턴된 볼륨" "을 가져오려면 sysadmin에 문의하십시오." #, python-format msgid "" "Failed to Roll back to re-add volume %(volumeName)s to default storage group " "for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " "volume re-added manually." msgstr "" "빠른 정책 %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨 %(volumeName)s을" "(를) 다시 추가하기 위한 롤백에 실패했습니다. 수동으로 다시 추가한 볼륨을 가져" "오려면 sysadmin에 문의하십시오." #, python-format msgid "" "Failed to add %(volumeName)s to default storage group for fast policy " "%(fastPolicyName)s." msgstr "" "빠른 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹에 %(volumeName)s 을" "(를) 추가하지 못했습니다." #, python-format msgid "Failed to add %s to cg." msgstr "%s을(를) cg에 추가하는 데 실패했습니다." #, python-format msgid "Failed to add device to handler %s" msgstr "핸들러 %s에 장치를 추가하는 데 실패" #, python-format msgid "Failed to add initiator iqn %s to target" msgstr "개시자 iqn %s을(를) 대상에 추가하지 못함" #, python-format msgid "Failed to add initiator to group for SCST target %s" msgstr "SCST 대상 %s의 그룹에 개시자를 추가하는 데 실패" #, python-format msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" msgstr "SCST 대상 id:%(vol_id)s에 lun 추가 실패: %(e)s" #, python-format msgid "Failed to add multihost-access for volume \"%s\"." msgstr "볼륨 \"%s\"의 다중 호스트 액세스를 추가하는 데 실패했습니다." #, python-format msgid "" "Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " "%(tierPolicyRuleInstanceName)s." msgstr "" "스토리지 그룹 %(storageGroupInstanceName)s을(를) 계층 정책 규칙 " "%(tierPolicyRuleInstanceName)s에 추가하지 못했습니다." #, python-format msgid "Failed to add target(port: %s)" msgstr "대상(포트: %s)을 추가하는 데 실패" msgid "Failed to apply replication:activereplay setting" msgstr "replication:activereplay 설정을 적용하는 데 실패" msgid "Failed to attach source volume for copy." msgstr "복사할 소스 볼륨을 연결하는 데 실패했습니다." #, python-format msgid "Failed to attach volume %(vol)s." msgstr "볼륨 %(vol)s에 연결하는 데 실패했습니다." msgid "Failed to authenticate user." msgstr "사용자를 인증하지 못했습니다." #, python-format msgid "Failed to check cluster status.(command: %s)" msgstr "클러스터 상태를 확인하는 데 실패했습니다(명령: %s)." #, python-format msgid "Failed to clone image volume %(id)s." msgstr "이미지 볼륨 %(id)s을(를) 복제하는 데 실패." #, python-format msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." msgstr "" "이미지 %(image_id)s의 볼륨 %(volume_id)s을(를) 복제하는 데 실패했습니다." #, python-format msgid "Failed to clone volume.(command: %s)" msgstr "볼륨을 복제하는 데 실패했습니다(명령: %s)." #, python-format msgid "Failed to close disk device %s" msgstr "디스크 장치 %s 닫기 실패" #, python-format msgid "" "Failed to collect return properties for volume %(vol)s and connector " "%(conn)s." msgstr "" "%(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수집하지 못했습니다." #, python-format msgid "Failed to commit reservations %s" msgstr "%s 예약을 커밋하는 데 실패" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "sheep 디먼에 연결하지 못했습니다. 주소: %(addr)s, 포트: %(port)s" #, python-format msgid "Failed to copy %(src)s to %(dest)s." msgstr "%(src)s을(를) %(dest)s에 복사하는 데 실패했습니다." #, python-format msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" msgstr "이미지 %(image_id)s을(를) 볼륨: %(volume_id)s에 복사하는 데 실패" #, python-format msgid "Failed to copy image to volume: %(volume_id)s" msgstr "볼륨: %(volume_id)s에 이미지를 복사하는 데 실패" #, python-format msgid "Failed to copy volume %(src)s to %(dest)s." msgstr "볼륨 %(src)s을(를) %(dest)s에 복사하는 데 실패했습니다." #, python-format msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "볼륨 %(vol1)s을(를) %(vol2)s에 복사하는 데 실패" #, python-format msgid "Failed to create %(conf)s for volume id:%(vol_id)s" msgstr "볼륨 id:%(vol_id)s의 %(conf)s을(를) 작성하는 데 실패" #, python-format msgid "Failed to create CG from CGsnapshot. Exception: %s" msgstr "CGSnapshot에서 CG 작성 실패. 예외: %s." #, python-format msgid "Failed to create CGSnapshot. Exception: %s." msgstr "CGSnapshot 작성 실패. 예외: %s." msgid "" "Failed to create SOAP client.Check san_ip, username, password and make sure " "the array version is compatible" msgstr "" "SOAP 클라이언트 작성에 실패했습니다. san_ip, 사용자 이름, 비밀번호를 확인하" "고 배열 버전이 호환되는지 확인하십시오." #, python-format msgid "" "Failed to create a first volume for storage group : %(storageGroupName)s." msgstr "" "스토리지 그룹: %(storageGroupName)s의 첫 번째 볼륨을 작성하는 데 실패했습니" "다." #, python-format msgid "Failed to create blkio cgroup '%(name)s'." msgstr "blkio cgroup '%(name)s'을(를) 작성하는 데 실패했습니다." #, python-format msgid "Failed to create clone of volume \"%s\"." msgstr "볼륨 \"%s\"의 복제본을 작성하는 데 실패했습니다." #, python-format msgid "Failed to create cloned volume %s." msgstr "복제된 볼륨 %s을(를) 작성하지 못했습니다." #, python-format msgid "Failed to create consistency group %(group_id)s." msgstr "일관성 그룹 %(group_id)s을(를) 작성하는 데 실패했습니다." #, python-format msgid "" "Failed to create default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "FAST 정책: %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하지 못했습니" "다." #, python-format msgid "Failed to create group to SCST target %s" msgstr "SCST 대상 %s에 그룹을 작성하는 데 실패" #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 하드웨어 ID를 작성하지 못했습니다. " #, python-format msgid "" "Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " "tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" "볼륨 ID: %(vol_id)s의 iscsi 대상을 작성하지 못했습니다. tgtd 구성 파일에 " "'include %(volumes_dir)s/*'가 포함되었는지 확인하십시오." #, python-format msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "볼륨 ID:%(vol_id)s:에 대한 iscsi 대상을 작성하지 못함: %(e)s" #, python-format msgid "" "Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " "configuration in %(volumes_dir)s'" msgstr "" "볼륨 id:%(vol_id)s에 대한 iscsi 대상을 작성하지 못했습니다. " "%(volumes_dir)s'에서 구성을 확인하십시오." #, python-format msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "볼륨 id:%(vol_id)s: %(e)s에 대한 iscsi 대상을 작성하지 못했습니다. " #, python-format msgid "Failed to create iscsi target for volume id:%s" msgstr "볼륨 id:%s에 대한 iscsi 대상을 작성하지 못했습니다. " #, python-format msgid "Failed to create iscsi target for volume id:%s." msgstr "볼륨 id:%s에 대한 iscsi 대상을 작성하지 못했습니다. " #, python-format msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." msgstr "" "manage_existing 흐르을 작성하는 데 실패: %(object_type)s %(object_id)s." #, python-format msgid "Failed to create snapshot of volume \"%s\"." msgstr "볼륨 \"%s\"에 대한 스냅샷을 작성하지 못했습니다. " #, python-format msgid "Failed to create snapshot. (command: %s)" msgstr "스냅샷을 작성하는 데 실패했습니다(명령: %s)." #, python-format msgid "Failed to create transfer record for %s" msgstr "%s의 전송 레코드를 작성하는 데 실패" #, python-format msgid "Failed to create volume \"%s\"." msgstr "볼륨 \"%s\"을(를) 작성하지 못함 " #, python-format msgid "Failed to create volume %s" msgstr "%s 볼륨을 작성하지 못함 " #, python-format msgid "Failed to create volume %s." msgstr "볼륨 %s을(를) 작성하지 못했습니다. " #, python-format msgid "Failed to create volume from snapshot \"%s\"." msgstr "스냅샷 \"%s\"에서 볼륨을 작성하는 데 실패했습니다." #, python-format msgid "Failed to create volume. %s" msgstr "볼륨을 작성하지 못했습니다. %s" #, python-format msgid "Failed to create volume: %(name)s (%(status)s)" msgstr "볼륨을 작성하는 데 실패: %(name)s (%(status)s)" #, python-format msgid "Failed to created Cinder secure environment indicator file: %s" msgstr "Cinder 보안 환경 표시기 파일을 작성하는 데 실패: %s" #, python-format msgid "Failed to delete initiator iqn %s from target." msgstr "대상에서 개시자 iqn %s을(를) 삭제하지 못했습니다." #, python-format msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." msgstr "볼륨 %(vol)s의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니다." #, python-format msgid "Failed to delete snapshot. (command: %s)" msgstr "스냅샷을 삭제하는 데 실패했습니다(명령: %s)." #, python-format msgid "" "Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " "%(exception)s." msgstr "" "CGSnapshot의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니다. 예외: " "%(exception)s." #, python-format msgid "" "Failed to delete the snapshot %(snap)s of cgsnapshot: %(cgsnapshot_id)s. " "Exception: %(exception)s." msgstr "" "cgsnapshot: %(cgsnapshot_id)s의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니" "다. 예외: %(exception)s." #, python-format msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." msgstr "CG의 볼륨 %(vol)s을(를) 삭제하는 데 실패했습니다. 예외: %(exception)s." #, python-format msgid "Failed to delete volume \"%s\"." msgstr "볼륨 \"%s\"을(를) 삭제하는 데 실패했습니다. " #, python-format msgid "Failed to delete volume %s" msgstr "볼륨 %s을(를) 삭제하는 데 실패했습니다." #, python-format msgid "Failed to delete volume. %s" msgstr "볼륨을 삭제하는 데 실패했습니다. %s" #, python-format msgid "Failed to ensure export of volume \"%s\"." msgstr "볼륨 \"%s\" 내보내기를 확인하는 데 실패했습니다." #, python-format msgid "Failed to ensure export of volume %s" msgstr "볼륨 %s 내보내기를 확인하는 데 실패했습니다." #, python-format msgid "Failed to export fiber channel target due to %s" msgstr "%s(으)로 인해 파이버 채널 대상을 내보내는 데 실패" #, python-format msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." msgstr "볼륨 %(vol)s을(를) %(size)sGB 크기로 확장하는 데 실패했습니다." #, python-format msgid "" "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." msgstr "" "%(current_size)sGB에서 %(new_size)sGB(으)로 extend_volume %(name)s을(를) 수행" "하는 데 실패했습니다." #, python-format msgid "Failed to failover volume %(volume_id)s to %(target)s: %(error)s." msgstr "" "볼륨 %(volume_id)s을(를) %(target)s(으)로 장애 복구하는 데 실패: %(error)s." #, python-format msgid "Failed to find %(s)s. Result %(r)s" msgstr "%(s)s을(를) 찾지 못했습니다. 결과 %(r)s" #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "%s의 사용 가능한 iSCSI 대상을 찾지 못했습니다." msgid "Failed to get IQN!" msgstr "IQN을 가져오는 데 실패" msgid "Failed to get LUN information!" msgstr "LUN 정보를 가져오는 데 실패했습니다." #, python-format msgid "Failed to get allocation information (%d)!" msgstr "할당 정보를 가져오는 데 실패했습니다(%d)." #, python-format msgid "Failed to get allocation information: %(host)s (%(status)d)!" msgstr "할당 정보: %(host)s (%(status)d)을(를) 가져오는 데 실패했습니다." #, python-format msgid "Failed to get device number for throttling: %(error)s" msgstr "조절을 위해 장치 번호를 가져오는 데 실패: %(error)s" #, python-format msgid "" "Failed to get driver initiator data for initiator %(initiator)s and " "namespace %(namespace)s" msgstr "" "개시자 %(initiator)s 및 네임스페이스 %(namespace)s의 드라이버 개시자 데이터" "를 가져오는 데 실패" #, python-format msgid "Failed to get fiber channel info from storage due to %(stat)s" msgstr "%(stat)s(으)로 인해 스토리지에서 파이버 채널 정보를 가져오는 데 실패" #, python-format msgid "Failed to get fiber channel target from storage server due to %(stat)s" msgstr "" "%(stat)s(으)로 인해 스토리지 서버에서 파이버 채널 대상을 가져오는 데 실패" #, python-format msgid "Failed to get or create storage group %(storageGroupName)s." msgstr "" "스토리지 그룹 %(storageGroupName)s을(를) 가져오거나 작성하지 못했습니다." #, python-format msgid "Failed to get response: %s." msgstr "응답 가져오기 실패: %s." #, python-format msgid "Failed to get server info due to %(state)s." msgstr "%(state)s(으)로 인해 서버 정보를 가져오는 데 실패했습니다." msgid "Failed to get sns table" msgstr "sns 테이블 가져오기 실패" #, python-format msgid "Failed to get target wwpns from storage due to %(stat)s" msgstr "%(stat)s(으)로 인해 스토리지에서 대상 wwpns를 가져오는 데 실패" msgid "Failed to get updated stats from Datera Cluster." msgstr "Datera 클러스터에서 업데이트된 통계를 가져오는 데 실패했습니다." msgid "Failed to get updated stats from Datera cluster." msgstr "Datera 클러스터에서 업데이트된 통계를 가져오는 데 실패했습니다." #, python-format msgid "Failed to get volume status. %s" msgstr "볼륨 상태를 가져오는 데 실패했습니다. %s" msgid "Failed to initialize connection" msgstr "연결 초기화 실패" #, python-format msgid "Failed to initialize connection to volume \"%s\"." msgstr "볼륨 \"%s\"에 대한 연결을 초기화하는 데 실패했습니다." msgid "Failed to initialize connection." msgstr "연결을 초기화하는 데 실패했습니다." msgid "Failed to initialize driver." msgstr "드라이버를 초기화하는 데 실패했습니다." #, python-format msgid "Failed to issue df command for path %(path)s, error: %(error)s." msgstr "경로 %(path)s에서 df 명령 실행 실패, 오류: %(error)s." #, python-format msgid "Failed to issue mmgetstate command, error: %s." msgstr "mmgetstate 명령 실행 실패, 오류: %s." #, python-format msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." msgstr "경로 %(path)s에서 mmlsattr 명령 실행 실패, 오류: %(error)s." #, python-format msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" msgstr "경로 %(path)s에서 mmlsattr 명령 실행 실패, 오류: %(error)s." #, python-format msgid "Failed to issue mmlsconfig command, error: %s." msgstr "mmlsconfig 명령 실행 실패, 오류: %s." #, python-format msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." msgstr "경로 %(path)s에서 mmlsfs 명령 실행 실패, 오류: %(error)s." #, python-format msgid "Failed to issue mmlsfs command, error: %s." msgstr "mmlsfs 명령 실행 실패, 오류: %s." #, python-format msgid "Failed to load %s" msgstr "%s을(를) 로드하는 데 실패" msgid "Failed to load conder-volume" msgstr "conder-volume을 로드하는 데 실패" msgid "Failed to load osapi_volume" msgstr "osapi_volume을 로드하는 데 실패" #, python-format msgid "Failed to open iet session list for %s" msgstr "%s의 iet 세션 목록을 여는 데 실패" #, python-format msgid "Failed to open volume from %(path)s." msgstr "%(path)s에서 볼륨을 여는 데 실패했습니다." msgid "Failed to perform replication failover" msgstr "복제 장애 복구 수행 실패" #, python-format msgid "Failed to present volume %(name)s (%(status)d)!" msgstr "볼륨 %(name)s (%(status)d)을(를) 표시하는 데 실패했습니다." msgid "Failed to query migration status of LUN." msgstr "LUN의 마이그레이션 상태를 쿼리하지 못했습니다." msgid "Failed to re-export volume, setting to ERROR." msgstr "볼륨을 다시 내보내는 데 실패했습니다. ERROR로 설정합니다." #, python-format msgid "Failed to register image volume location %(uri)s." msgstr "이미지 볼륨 위치 %(uri)s을(를) 등록하는 데 실패했습니다." #, python-format msgid "" "Failed to remove %(volumeName)s from the default storage group for the FAST " "Policy." msgstr "" "FAST 정책에 대한 기본 스토리지 그룹에서 %(volumeName)s 볼륨을 제거하지 못했습" "니다." #, python-format msgid "Failed to remove %s from cg." msgstr "cg에서 %s을(를) 제거하는 데 실패했습니다." #, python-format msgid "Failed to remove LUN %s" msgstr "LUN %s 제거 실패" #, python-format msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" msgstr "볼륨 ID:%(vol_id)s:에 대한 iscsi 대상을 제거하지 못함: %(e)s" #, python-format msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "볼륨 id:%(vol_id)s: %(e)s에 대한 iscsi 대상을 제거하지 못했습니다. " #, python-format msgid "Failed to remove iscsi target for volume id:%s" msgstr "볼륨 id:%s에 대한 iscsi 대상을 제거하지 못했습니다. " #, python-format msgid "Failed to remove iscsi target for volume id:%s." msgstr "볼륨 id:%s에 대한 iscsi 대상을 제거하지 못했습니다. " #, python-format msgid "Failed to rename %(new_volume)s into %(volume)s." msgstr "%(new_volume)s의 이름을 %(volume)s(으)로 바꾸지 못했습니다." msgid "Failed to rename the created snapshot, reverting." msgstr "작성된 스냅샷의 이름을 바꾸는 데 실패, 되돌리는 중입니다." #, python-format msgid "Failed to rename volume %(existing)s to %(new)s. Volume manage failed." msgstr "" "볼륨 %(existing)s의 이름을 %(new)s(으)로 변경하지 못했습니다. 볼륨 관리에 실" "패했습니다." #, python-format msgid "" "Failed to rename volume %(existing)s to %(new)s. Volume unmanage failed." msgstr "" "볼륨 %(existing)s의 이름을 %(new)s(으)로 변경하는 데 실패했습니다. 볼륨 관리 " "취소에 실패했습니다." #, python-format msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" msgstr "" "마이그레이션 소스 볼륨 %(vol)s의 비동기 삭제를 요청하는 데 실패: %(err)s" #, python-format msgid "" "Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " "size: %(size)s" msgstr "" "vdi의 크기를 조정하는 데 실패했습니다. vdi 축소는 지원되지 않습니다. vdi: " "%(vdiname)s 새 크기: %(size)s" #, python-format msgid "" "Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " "%(size)s" msgstr "" "vdi의 크기를 조정하는 데 실패했습니다. 볼륨 크기가 너무 큽니다. vdi: " "%(vdiname)s 새 크기: %(size)s" #, python-format msgid "Failed to resize vdi. vdi not found. %s" msgstr "vdi의 크기를 조정하는 데 실패했습니다. vdi를 찾을 수 없습니다. %s" #, python-format msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" msgstr "" "vdi의 크기를 조정하는 데 실패했습니다. vdi: %(vdiname)s 새 크기: %(size)s" #, python-format msgid "Failed to resize volume %(volume_id)s, error: %(error)s." msgstr "볼륨 %(volume_id)s 크기 조정 실패, 오류: %(error)s." #, python-format msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "%s get_by_account!에서 SolidFire-ID 볼륨을 검색하지 못했습니다. " #, python-format msgid "" "Failed to return volume %(volumeName)s to original storage pool. Please " "contact your system administrator to return it to the correct location." msgstr "" "볼륨 %(volumeName)s을(를) 원래 스토리지 풀로 리턴하는 데 실패했습니다. 시스" "템 관리자에게 문의하여 올바른 위치로 리턴하십시오." #, python-format msgid "Failed to roll back reservations %s" msgstr "%s 예약을 철회하는 데 실패" #, python-format msgid "Failed to run task %(name)s: %(cause)s" msgstr "작업 %(name)s 실행 실패: %(cause)s" #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "schedule_%(method)s 실패: %(ex)s" #, python-format msgid "Failed to send request: %s." msgstr "요청 보내기 실패: %s." #, python-format msgid "Failed to set 'enable' attribute for SCST target %s" msgstr "SCST 대상 %s의 'enable' 속성을 설정하는 데 실패" #, python-format msgid "Failed to set attribute for enable target driver %s" msgstr "대상 드라이버 %s을(를) 활성화하는 속성을 설정하는 데 실패" #, python-format msgid "Failed to set properties for volume %(existing)s. Volume manage failed." msgstr "" "볼륨 %(existing)s의 특성을 설정하지 못했습니다. 볼륨 관리에 실패했습니다." #, python-format msgid "" "Failed to set properties for volume %(existing)s. Volume unmanage failed." msgstr "" "볼륨 %(existing)s의 특성을 설정하지 못했습니다. 볼륨 관리 취소에 실패했습니" "다." msgid "Failed to setup the Dell EqualLogic driver." msgstr "Dell EqualLogic 드라이버를 설정하는 데 실패했습니다." msgid "Failed to shutdown horcm." msgstr "horcm을 시스템 종료하는 데 실패했습니다." #, python-format msgid "Failed to snap Consistency Group %s" msgstr "일관성 그룹 %s을(를) 맞추는 데 실패" msgid "Failed to start horcm." msgstr "horcm을 시작하지 못했습니다." msgid "Failed to terminate connection" msgstr "연결 종료 실패" #, python-format msgid "Failed to terminate connection %(initiator)s %(vol)s" msgstr "연결 %(initiator)s %(vol)s을(를) 종료하는 데 실패" #, python-format msgid "Failed to terminate connection to volume \"%s\"." msgstr "볼륨 \"%s\"에 대한 연결을 종료하는 데 실패했습니다." #, python-format msgid "Failed to umount %(share)s, reason=%(stderr)s" msgstr "%(share)s을(를) umount하는 데 실패, 이유=%(stderr)s" #, python-format msgid "" "Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " "target" msgstr "" "iscsi 대상을 제거한 후 볼륨 id %(vol_id)s의 %(conf)s을(를) 업데이트하는 데 실" "패" #, python-format msgid "Failed to update %(conf)s for volume id:%(vol_id)s" msgstr "볼륨 id:%(vol_id)s의 %(conf)s을(를) 업데이트하는 데 실패" #, python-format msgid "" "Failed to update %(volume_id)s metadata using the provided snapshot " "%(snapshot_id)s metadata." msgstr "" "제공된 스냅샷 %(snapshot_id)s 메타데이터를 사용하여 %(volume_id)s 메타데이터" "를 업데이트하는 데 실패했습니다." #, python-format msgid "" "Failed to update initiator data for initiator %(initiator)s and backend " "%(backend)s" msgstr "" "개시자 %(initiator)s 및 백엔드 %(backend)s의 드라이버 개시자 데이터를 업데이" "트하는 데 실패" #, python-format msgid "Failed to update quota donating volume transfer id %s" msgstr "볼륨 전송 id %s를 제공하는 할당량을 업데이트하는 데 실패" #, python-format msgid "Failed to update quota for consistency group %s." msgstr "일관성 그룹 %s의 할당량을 업데이트하는 데 실패했습니다." #, python-format msgid "Failed to update quota for deleting volume: %s" msgstr "볼륨 삭제를 위해 할당량을 업데이트하는 데 실패: %s" #, python-format msgid "Failed to update quota while deleting snapshots: %s" msgstr "스냅샷을 삭제하는 중에 할당량 업데이트 실패: %s" msgid "Failed to update quota while deleting volume." msgstr "볼륨을 삭제하는 동안 할당량을 업데이트하는 데 실패했습니다." msgid "Failed to update replay profiles" msgstr "재생 프로파일을 업데이트하는 데 실패" msgid "Failed to update storage profile" msgstr "스토리지 프로파일을 업데이트하는 데 실패" msgid "Failed to update usages deleting backup" msgstr "백업을 삭제하는 중 사용법을 업데이트하지 못함" msgid "Failed to update usages deleting snapshot" msgstr "스냅샷을 삭제하는 중 사용법을 업데이트하지 못함" msgid "Failed to update usages deleting volume." msgstr "볼륨을 삭제하는 중 사용법을 업데이트하지 못했습니다." #, python-format msgid "Failed to update volume status: %s" msgstr "볼륨 상태를 업데이트하는 데 실패: %s" #, python-format msgid "" "Failed to verify that volume was added to storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST 정책의 스토리지 그룹에 볼륨이 추가되었는지 확인하는 데 실패: " "%(fastPolicyName)s." msgid "Failed to write in /etc/scst.conf." msgstr "/etc/scst.conf에 쓰지 못했습니다." #, python-format msgid "Failed to write persistence file: %(path)s." msgstr "영구 파일을 쓰는 데 실패: %(path)s." #, python-format msgid "Failed updating %(object_type)s %(object_id)s with %(update)s" msgstr "%(object_type)s %(object_id)s을(를) %(update)s(으)로 업데이트 실패" #, python-format msgid "" "Failed updating %(snapshot_id)s metadata using the provided volumes " "%(volume_id)s metadata" msgstr "" "제공된 %(volume_id)s 메타데이터를 사용하여 %(snapshot_id)s 메타데이터를 업데" "이트하는 데 실패" #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with creation provided " "model %(model)s." msgstr "" "스냅샷 %(snapshot_id)s의 모델을 작성 제공 모델 %(model)s(으)로 업데이트하는 " "데 실패했습니다." #, python-format msgid "" "Failed updating model of snapshot %(snapshot_id)s with driver provided model " "%(model)s." msgstr "" "스냅샷 %(snapshot_id)s의 모델을 드라이버 제공 모델 %(model)s(으)로 업데이트하" "는 데 실패했습니다." #, python-format msgid "" "Failed updating model of volume %(volume_id)s with creation provided model " "%(model)s" msgstr "" "볼륨 %(volume_id)s의 모델을 작성 제공 모델 %(model)s(으)로 업데이트하는 데 실" "패" #, python-format msgid "" "Failed updating model of volume %(volume_id)s with driver provided model " "%(model)s" msgstr "" "볼륨 %(volume_id)s의 모델을 드라이버 제공 모델 %(model)s(으)로 업데이트하는 " "데 실패" #, python-format msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." msgstr "스냅샷 %(snapshot_id)s을(를) %(update)s(으)로 업데이트하는 데 실패." #, python-format msgid "" "Failed updating snapshot metadata using the provided volumes %(volume_id)s " "metadata" msgstr "" "제공된 %(volume_id)s 메타데이터를 사용하여 스냅샷 메타데이터를 업데이트하는 " "데 실패" #, python-format msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "볼륨 %(volume_id)s 부트 가능 플래그를 true로 업데이트하는 데 실패" #, python-format msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "볼륨 %(volume_id)s을(를) %(update)s(으)로 업데이트하는 데 실패" #, python-format msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "볼륨 %(volume_id)s을(를) %(updates)s(으)로 업데이트하는 데 실패" #, python-format msgid "Failure deleting staged tmp LUN %s." msgstr "스테이징된 임시 LUN %s 삭제 실패." #, python-format msgid "Failure restarting snap vol. Error: %s." msgstr "스냅 볼륨을 다시 시작하는 데 실패. 오류: %s." msgid "Fetch volume pool name failed." msgstr "볼륨 풀 이름을 가져오는 데 실패했습니다." #, python-format msgid "" "FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " "HBA state is Online." msgstr "" "FibreChannelDriver validate_connector가 실패했습니다. '%(setting)s'가 없습니" "다. HBA 상태가 온라인인지 확인하십시오." #, python-format msgid "Flexvisor failed to get event %(volume)s (%(status)s)." msgstr "" "Flexviso에서 이벤트 %(volume)s (%(status)s)을(를) 가져오는 데 실패했습니다." #, python-format msgid "Flexvisor failed to get pool %(id)s info." msgstr "Flexvisor에서 풀 %(id)s 정보를 가져오는 데 실패했습니다." #, python-format msgid "Flexvisor failed to get pool list due to %s." msgstr "%s(으)로 인해 Flexvisor에서 풀 목록을 가져오는 데 실패했습니다." #, python-format msgid "Flexvisor failed to get pool list.(Error: %d)" msgstr "Flexvisor에서 풀 목록을 가져오는 데 실패했습니다(오류: %d)" #, python-format msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "id: %(uuid)s에 맵핑된 %(count)s개의 볼륨을 찾을 수 없습니다. " msgid "Free capacity not set: volume node info collection broken." msgstr "여유 공간을 설정하지 않음: 볼륨 노드 정보 콜렉션이 중단되었습니다. " #, python-format msgid "GPFS is not active. Detailed output: %s." msgstr "GPFS가 활성이 아닙니다. 자세한 결과: %s." msgid "Get LUN migration error." msgstr "LUN 마이그레이션 가져오기 오류." msgid "Get method error." msgstr "메소드 가져오기 오류." msgid "Get replication status for volume failed." msgstr "볼륨의 복제본 상태를 가져오는 데 실패했습니다." #, python-format msgid "HDP not found: %s" msgstr "HDP를 찾을 수 없음: %s" #, python-format msgid "Host PUT failed (%s)." msgstr "호스트 PUT 실패(%s)." msgid "Host could not be found!" msgstr "호스트를 찾을 수 없습니다." #, python-format msgid "ISCSI discovery attempt failed for:%s" msgstr "%s에 대한 ISCSI 검색 시도 실패" msgid "ISE FW version is not compatible with OpenStack!" msgstr "ISE FW 버전이 OpenStack과 호환되지 않습니다." msgid "ISE globalid not set!" msgstr "ISE globalid가 설정되지 않습니다." #, python-format msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." msgstr "이미지 크기 %(img_size)dGB가 볼륨 크기 %(vol_size)dGB보다 큽니다." #, python-format msgid "Invalid API object: %s" msgstr "올바르지 않은 API 오브젝트: %s" #, python-format msgid "Invalid JSON: %s" msgstr "올바르지 않은 JSON: %s" #, python-format msgid "Invalid ReplayList return: %s" msgstr "올바르지 않은 ReplayList 리턴: %s" #, python-format msgid "Invalid hostname %(host)s" msgstr "올바르지 않은 호스트 이름 %(host)s" msgid "Invalid replication target specified for failover" msgstr "장애 복구를 위해 올바르지 않은 복제 대상이 지정됨" #, python-format msgid "Invalid value for %(key)s, value is %(value)s." msgstr "%(key)s의 올바르지 않은 값입니다, 값은 %(value)s입니다." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." #, python-format msgid "JSON encode params %(param)s error: %(status)s." msgstr "JSON 인코드 매개 변수 %(param)s 오류: %(status)s." #, python-format msgid "JSON transfer data error. %s." msgstr "JSON 전송 데이터 오류. %s." #, python-format msgid "JSON transfer error: %s." msgstr "JSON 전송 오류: %s." #, python-format msgid "LUN %(path)s geometry failed. Message - %(msg)s" msgstr "LUN %(path)s 지오메트리에 실패했습니다. 메시지 - %(msg)s" msgid "LUN extend failed!" msgstr "LUN 확장 실패" msgid "LUN unexport failed!" msgstr "LUN 내보내기 취소 실패" #, python-format msgid "" "Location info needed for backend enabled volume migration not in correct " "format: %s. Continuing with generic volume migration." msgstr "" "백엔드 사용 볼륨 마이그레이션에 필요한 위치 정보가 올바른 형식: %s이(가) 아닙" "니다. 일반 볼륨 마이그레이션을 계속합니다." msgid "" "Logging into the Datera cluster failed. Please check your username and " "password set in the cinder.conf and start the cinder-volume service again." msgstr "" "Datera 클러스터에 로그인하는 데 실패했습니다. cinder.conf에 설정된 사용자 이" "름과 비밀번호를 확인하고 cinder-volume 서비스를 다시 시작하십시오." #, python-format msgid "" "Login error. URL: %(url)s\n" "Reason: %(reason)s." msgstr "" "로그인 오류. URL: %(url)s\n" "이유: %(reason)s." #, python-format msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." msgstr "마스킹 보기: %(maskingViewName)s이(가) 최근에 삭제된 것으로 보입니다." #, python-format msgid "Lun %s has dependent snapshots, skipping lun deletion." msgstr "Lun %s에 종속 스냅샷이 있으므로 lun 삭제를 건너뜁니다." #, python-format msgid "Lun create for %s failed!" msgstr "%s의 Lun 작성 실패" #, python-format msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "볼륨 %(vol)s 스냅샷 %(snap)s의 Lun 작성 스냅샷 실패" #, python-format msgid "Lun delete for %s failed!" msgstr "%s의 Lun 삭제 실패" #, python-format msgid "Lun delete snapshot for volume %(vol)s snapshot %(snap)s failed!" msgstr "볼륨 %(vol)s 스냅샷 %(snap)s의 Lun 삭제 스냅샷 실패" msgid "Lun mapping returned null!" msgstr "Lun 맵핑에서 널(null)을 리턴했습니다!" #, python-format msgid "MSGID%(id)04d-E: %(msg)s" msgstr "MSGID%(id)04d-E: %(msg)s" msgid "Manage exist volume failed." msgstr "기존 볼륨 관리에 실패했습니다." #, python-format msgid "" "Manager for service %(binary)s %(host)s is reporting problems, not sending " "heartbeat. Service will appear \"down\"." msgstr "" "서비스 %(binary)s %(host)s의 관리자가 하트비트를 보내지 않고 문제점을 보고합" "니다. 서비스가 \"작동 중지\"로 표시됩니다." #, python-format msgid "" "Masking View creation or retrieval was not successful for masking view " "%(maskingViewName)s. Attempting rollback." msgstr "" "마스킹 보기 %(maskingViewName)s의 마스킹 보기 작성 또는 검색에 성공하지 못했" "습니다. 롤백을 시도합니다." #, python-format msgid "" "Max retries reached deleting backup %(basename)s image of volume %(volume)s." msgstr "" "볼륨 %(volume)s의 백업 %(basename)s 이미지를 삭제하도록 허용된 최대 재시도 수" "에 도달했습니다." #, python-format msgid "Message: %s" msgstr "메시지: %s" #, python-format msgid "Migration of LUN %s failed to complete." msgstr "LUN %s의 마이그레이션을 완료하지 못했습니다." msgid "Model update failed." msgstr "모델 업데이트에 실패했습니다." #, python-format msgid "Modify volume PUT failed: %(name)s (%(status)d)." msgstr "수정 볼륨 PUT 실패: %(name)s (%(status)d)." #, python-format msgid "Mount failure for %(share)s after %(count)d attempts." msgstr "%(count)d번의 시도 후에 %(share)s의 마운트 실패." #, python-format msgid "Mount failure for %(share)s." msgstr "%(share)s의 마운트 실패." #, python-format msgid "Multiple replay profiles under name %s" msgstr "%s(이)라는 이름의 여러 재생 프로파일" #, python-format msgid "NFS share %(share)s has no service entry: %(svc)s -> %(hdp)s" msgstr "NFS 공유 %(share)s에 서비스 항목이 없음: %(svc)s -> %(hdp)s" msgid "No CLI output for firmware version check" msgstr "펌웨어 버전 확인을 위한 CLI 출력이 없음" #, python-format msgid "No VIP configured for service %s" msgstr "서비스 %s의 VIP가 구성되지 않음" #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." msgstr "" "작업이 필요하지 않습니다. 볼륨: %(volumeName)s이(가) 이미 풀: %(pool)s의 일부" "가 아닙니다." #, python-format msgid "" "No action required. Volume: %(volumeName)s is already part of slo/workload " "combination: %(targetCombination)s." msgstr "" "작업이 필요하지 않습니다. 볼륨: %(volumeName)s이(가) 이미 slo/워크로드 조합: " "%(targetCombination)s의 일부입니다." #, python-format msgid "No configuration found for service: %s" msgstr "서비스의 구성을 찾을 수 없음: %s" #, python-format msgid "No configuration found for service: %s." msgstr "서비스의 구성을 찾을 수 없음: %s." msgid "No more targets avaliable." msgstr "사용할 수 있는 추가 대상이 없습니다." #, python-format msgid "" "No snapshots found in database, but %(path)s has backing file " "%(backing_file)s!" msgstr "" "데이터베이스에서 스냅샷을 찾을 수 없지만t %(path)s에 지원 파일 " "%(backing_file)s이(가) 있습니다." #, python-format msgid "Not able to configure PBM for vCenter server: %s" msgstr "vCenter 서버의 PBM을 구성할 수 없음: %s" #, python-format msgid "OSError: command is %(cmd)s." msgstr "OSError: 명령이 %(cmd)s입니다." #, python-format msgid "OSError: command is %s." msgstr "OSError: 명령이 %s입니다." #, python-format msgid "" "One of the components of the original masking view %(maskingViewName)s " "cannot be retrieved so please contact your system administrator to check " "that the correct initiator(s) are part of masking." msgstr "" "원래 마스킹 보기 %(maskingViewName)s의 구성 요소 중 하나를 검색할 수 없으므" "로, 올바른 개시자가 마스킹의 일부인지 확인하도록 시스템 관리자에게 요청하십시" "오." #, python-format msgid "" "Only SLO/workload migration within the same SRP Pool is supported in this " "version The source pool : %(sourcePoolName)s does not match the target " "array: %(targetPoolName)s. Skipping storage-assisted migration." msgstr "" "이 버전에서는 동일한 SRP 풀의 SLO/워크로드 마이그레이션만 지원됩니다. 소스 " "풀: %(sourcePoolName)s이(가) 대상 배열: %(targetPoolName)s과(와) 일치하지 않" "습니다. 스토리지 지원 마이그레이션을 건너뜁니다." msgid "Only available volumes can be migrated between different protocols." msgstr "서로 다른 프로토콜 간에 사용 가능한 볼륨만 마이그레이션할 수 있습니다." #, python-format msgid "POST for host create failed (%s)!" msgstr "호스트의 POST 작성에 실패했습니다(%s)." #, python-format msgid "Pipe1 failed - %s " msgstr "Pipe1 실패 - %s " #, python-format msgid "Pipe2 failed - %s " msgstr "Pipe2 실패 - %s " msgid "" "Please check your xml for format or syntax errors. Please see documentation " "for more details." msgstr "" "xml에서 형식 또는 구문 오류를 확인하십시오. 자세한 내용은 문서를 참조하십시" "오." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName은 파일 %(fileName)s에 있어야 합니다. " #, python-format msgid "Prepare clone failed for %s." msgstr "%s의 복제본 준비에 실패했습니다." msgid "Primary IP must be set!" msgstr "기본 IP를 설정해야 합니다!" msgid "Problem cleaning incomplete backup operations." msgstr "불완전한 백업 조작 정리 문제." #, python-format msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." msgstr "백업 %(bkup)s의 임시 볼륨 및 스냅샷 정리 문제." #, python-format msgid "Problem cleaning up backup %(bkup)s." msgstr "백업 %(bkup)s 정리 문제." msgid "Promote volume replica failed." msgstr "볼륨 복제본 홍보에 실패했습니다." #, python-format msgid "" "Purity host %(host_name)s is managed by Cinder but CHAP credentials could " "not be retrieved from the Cinder database." msgstr "" "Cinder에서 Purity 호스트 %(host_name)s을(를) 관리하지만 Cinder 데이터베이스에" "서 CHAP 자격 증명을 검색할 수 없습니다." #, python-format msgid "" "Purity host %(host_name)s is not managed by Cinder and can't have CHAP " "credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." msgstr "" "Cinder에서 Purity 호스트 %(host_name)s을(를) 관리하지 않으므로 CHAP 자격 증명" "을 수정할 수 없습니다. 이 문제를 해결하려면 호스트에서 IQN %(iqn)s을(를) 제거" "하십시오." #, python-format msgid "Qemu-img is not installed. OSError: command is %(cmd)s." msgstr "Qemu-img가 설치되지 않았습니다. OSError: 명령이 %(cmd)s입니다." #, python-format msgid "" "Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " "(%(d_consumed)dG of %(d_quota)dG already consumed)." msgstr "" "%(s_pid)s에 대한 할당량을 초과하여 %(s_size)sG만큼 볼륨을 확장하려고 시도, " "(%(d_quota)dG 중 %(d_consumed)dG는 이미 사용됨)" #, python-format msgid "REST Not Available: %s" msgstr "REST를 사용할 수 없음: %s" #, python-format msgid "Re-throwing Exception %s" msgstr "%s 예외가 다시 발생" #, python-format msgid "Read response raised an exception: %s." msgstr "읽기 응답에서 예외 발생: %s." msgid "Recovered model server connection!" msgstr "모델 서버 연결을 복구했습니다!" #, python-format msgid "Recovering from a failed execute. Try number %s" msgstr "실패한 실행에서 복구 중입니다. 번호 %s 시도" msgid "Replication must be specified as ' True' or ' False'." msgstr "복제는 ' True'나 ' False'로 지정되어야 합니다." msgid "" "Requested to setup thin provisioning, however current LVM version does not " "support it." msgstr "" "씬 프로비저닝을 설정하도록 요청했지만 현재 LVM 버전에서 지원하지 않습니다." #, python-format msgid "Resizing %s failed. Cleaning volume." msgstr "%s 크기 조정에 실패했습니다. 볼륨을 정리합니다." #, python-format msgid "Restore to volume %(volume)s finished with error - %(error)s." msgstr "" "볼륨 %(volume)s(으)로 복원이 완료되었지만 %(error)s 오류가 발생했습니다." #, python-format msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" msgstr "%(retry)s번 재시도: %(method)s 실패 %(rc)s: %(reason)s" #, python-format msgid "Retype unable to find volume %s." msgstr "재입력해도 볼륨 %s을(를) 찾을 수 없습니다." msgid "Retype volume error." msgstr "볼륨 다시 입력 오류." msgid "Retype volume error. Create replication failed." msgstr "" "볼륨을 다시 입력하는 중에 오류가 발생했습니다. 복제 작성에 실패했습니다." msgid "Retype volume error. Delete replication failed." msgstr "" "볼륨을 다시 입력하는 중에 오류가 발생했습니다. 복제 삭제에 실패했습니다." #, python-format msgid "" "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " "Diamond, Optimized, NONE." msgstr "" "SLO: %(slo)s이(가) 올바르지 않습니다. 올바른 값은 Bronze, Silver, Gold, " "Platinum, Diamond, Optimized, NONE입니다." msgid "" "ScVolume returned success with empty payload. Attempting to locate volume" msgstr "" "ScVolume에서 빈 페이로드와 함께 성공을 리턴했습니다. 볼륨을 찾으려고 시도합니" "다." #, python-format msgid "Server Busy retry request: %s" msgstr "서버 사용 중 재시도 요청: %s" msgid "Service not found for updating replication_status." msgstr "replication_status를 업데이트하는 서비스를 찾을 수 없습니다." #, python-format msgid "Setting QoS for %s failed" msgstr "%s의 QoS 설정 실패" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export." msgstr "" "올바르지 않은 형식으로 인해 공유 %s이(가) 무시됩니다. format. address:/" "export 형식이어야 합니다." #, python-format msgid "Sheepdog is not installed. OSError: command is %s." msgstr "Sheepdog가 설치되지 않았습니다. OSError: 명령이 %s입니다." #, python-format msgid "" "Skipping remove_export. No iscsi_target ispresently exported for volume: %s" msgstr "" "remove_export를 건너뜁니다. 현재 %s 볼륨에 대한 iscsi_target을 내보내지 않았" "습니다. " #, python-format msgid "Snapshot \"%s\" already exists." msgstr "스냅샷 \"%s\"이(가) 이미 있습니다." #, python-format msgid "" "Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "스냅샷 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시오." #, python-format msgid "Snapshot %(snapshotname)s not found on the array. No volume to delete." msgstr "" "배열에서 스냅샷 %(snapshotname)s을(를) 찾을 수 없습니다. 삭제할 볼륨이 없습니" "다. " #, python-format msgid "Snapshot %s: create failed" msgstr "스냅샷 %s: 작성 실패" #, python-format msgid "Snapshot %s: has clones" msgstr "스냅샷 %s에 복제본이 있음" msgid "Snapshot did not exist. It will not be deleted" msgstr "스냅샷이 없으므로 삭제되지 않음" #, python-format msgid "" "Source CG %(source_cg)s not found when creating consistency group %(cg)s " "from source." msgstr "" "소스에서 일관성 그룹 %(cg)s을(를) 작성할 때 소스 CG CG %(source_cg)s을(를) " "찾을 수 없습니다." #, python-format msgid "Source snapshot %(snapshot_id)s cannot be found." msgstr "소스 스냅샷 %(snapshot_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Source snapshot cannot be found for target volume %(volume_id)s." msgstr "대상 볼륨 %(volume_id)s의 소스 스냅샷을 찾을 수 없습니다." #, python-format msgid "Source volume %s not ready!" msgstr "소스 볼륨 %s이(가) 준비되지 않았습니다." #, python-format msgid "Source volumes cannot be found for target volume %(volume_id)s." msgstr "대상 볼륨 %(volume_id)s의 소스 볼륨을 찾을 수 없습니다." #, python-format msgid "" "Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "소스 볼륨 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시" "오." #, python-format msgid "Start synchronization failed. Error: %s." msgstr "동기화 시작 실패. 오류: %s." #, python-format msgid "StdErr :%s" msgstr "StdErr :%s" #, python-format msgid "StdOut :%s" msgstr "StdOut: %s" #, python-format msgid "Storage Profile %s was not found." msgstr "스토리지 프로파일 %s을(를) 찾을 수 없습니다. " #, python-format msgid "Storage profile: %s cannot be found in vCenter." msgstr "스토리지 프로파일: vCenter에서 %s을(를) 찾을 수 없습니다." msgid "Sync volume replica failed." msgstr "볼륨 복제본 동기화에 실패했습니다." #, python-format msgid "TSM [%s] not found in CloudByte storage." msgstr "CloudByte 스토리지에서 TSM [%s]을(를) 찾을 수 없습니다." #, python-format msgid "Target end points do not exist for hardware Id: %(hardwareIdInstance)s." msgstr "하드웨어 Id: %(hardwareIdInstance)s의 대상 엔드포인트가 없습니다." msgid "The Flexvisor service is unavailable." msgstr "Flexvisor 서비스를 사용할 수 없습니다." #, python-format msgid "The NFS Volume %(cr)s does not exist." msgstr "NFS 볼륨 %(cr)s이(가) 없습니다." msgid "The connector does not contain the required information." msgstr "커넥터에 필수 정보가 없습니다." msgid "" "The connector does not contain the required information: initiator is missing" msgstr "커넥터에 필수 정보가 없음: 개시자가 누락되어 있음" msgid "" "The connector does not contain the required information: wwpns is missing" msgstr "커넥터에 필수 정보가 없음: wwpns가 누락되어 있음" msgid "The given extra_spec or valid_values is None." msgstr "지정된 extra_spec 또는 valid_values가 None입니다." msgid "The list of iscsi_ip_addresses is empty" msgstr "iscsi_ip_addresses 목록이 비어 있음" #, python-format msgid "" "The primary array must have an API version of %(min_ver)s or higher, but is " "only on %(current_ver)s, therefore replication is not supported." msgstr "" "기본 배열에 %(min_ver)s 이상의 API 버전이 있어야 하지만, %(current_ver)s만 있" "으므로 복제가 지원되지 않습니다." #, python-format msgid "" "The replication mode of %(type)s has not successfully established " "partnership with the replica Storwize target %(stor)s." msgstr "" "%(type)s의 복제 모드가 복제본 Storwize 대상 %(stor)s과(와) 성공적으로 파트너" "십을 맺을 수 없습니다." msgid "The snapshot cannot be deleted because it is a clone point." msgstr "스냅샷이 복제 지점이므로 삭제할 수 없습니다." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s skipping storage-assisted migration." msgstr "" "소스 배열: %(sourceArraySerialNumber)s이(가) 대상 배열: " "%(targetArraySerialNumber)s과(와) 일치하지 않아 스토리지 지원 마이그레이션을 " "건너뜁니다." #, python-format msgid "" "The source array : %(sourceArraySerialNumber)s does not match the target " "array: %(targetArraySerialNumber)s, skipping storage-assisted migration." msgstr "" "소스 배열: %(sourceArraySerialNumber)s이(가) 대상 배열: " "%(targetArraySerialNumber)s과(와) 일치하지 않아 스토리지 지원 마이그레이션을 " "건너뜁니다." #, python-format msgid "The source volume %(volume_id)s cannot be found." msgstr "소스 볼륨 %(volume_id)s을(를) 찾을 수 없습니다." #, python-format msgid "The volume driver requires %(data)s in the connector." msgstr "볼륨 드라이버는 커넥터에 %(data)s이(가) 필요합니다." msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "볼륨 드라이버는 커넥터에 iSCSI 개시자 이름이 필요합니다. " #, python-format msgid "There are no valid hosts available in configured cluster(s): %s." msgstr "구성된 클러스터에서 사용할 수 있는 올바른 호스트가 없음: %s." #, python-format msgid "There is no valid datastore satisfying requirements: %s." msgstr "요구 사항을 만족하는 올바른 데이터 저장소가 없음: %s." msgid "There must be at least one valid replication device configured." msgstr "하나 이상의 올바른 복제 장치가 구성되어야 합니다." #, python-format msgid "" "There was a problem with the failover (%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available on the failed over target." msgstr "" "장애 복구에 문제점이 있어(%(error)s) 실패했습니다. 장애 복구된 대상에서 볼륨 " "'%(volume)s을(를) 사용할 수 없습니다." #, python-format msgid "There was an error deleting snapshot %(id)s: %(error)." msgstr "스냅샷 %(id)s 삭제 오류: %(error)." #, python-format msgid "There was an error deleting volume %(id)s: %(error)." msgstr "볼륨 %(id)s 삭제 오류: %(error)." #, python-format msgid "There was an error deleting volume %(id)s: %(error)s." msgstr "볼륨 %(id)s 삭제 오류: %(error)s." msgid "This usually means the volume was never successfully created." msgstr "일반적으로 볼륨이 성공적으로 작성된 적이 없음을 나타냅니다." msgid "Tiering Policy is not supported on this array." msgstr "이 배열에서 계층 지정 정책이 지원되지 않습니다." #, python-format msgid "Timed out deleting %s!" msgstr "%s을(를) 삭제하는 중에 제한시간이 종료되었습니다." #, python-format msgid "Trying to create snapshot by non-existent LV: %s" msgstr "존재하지 않는 LV에서 스냅샷을 작성하려고 시도: %s" #, python-format msgid "URLError: %s" msgstr "URLError: %s" #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "볼륨 %s의 Storwize 백엔드에 액세스할 수 없습니다." #, python-format msgid "Unable to create folder path %s" msgstr "폴더 경로 %s을(를) 작성할 수 없음" #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하거나 확인하지 " "못했습니다." #, python-format msgid "Unable to create volume %s from replay" msgstr "재생에서 볼륨 %s을(를) 작성할 수 없습니다." #, python-format msgid "Unable to create volume on SC: %s" msgstr "SC에서 볼륨을 작성할 수 없음: %s" #, python-format msgid "Unable to create volume. Volume driver %s not initialized" msgstr "볼륨을 작성할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않음" msgid "Unable to delete busy volume." msgstr "사용 중인 볼륨을 삭제할 수 없습니다." #, python-format msgid "Unable to delete due to existing snapshot for volume: %s" msgstr "볼륨의 기존 스냅샷으로 인해 삭제할 수 없음: %s" #, python-format msgid "Unable to delete profile %s." msgstr "%s 프로파일을 삭제할 수 없습니다." #, python-format msgid "Unable to delete replication for %(vol)s to %(dest)s." msgstr "%(dest)s(으)로 복제된 %(vol)s을(를) 삭제할 수 없습니다." msgid "" "Unable to delete the destination volume during volume migration, (NOTE: " "database record needs to be deleted)." msgstr "" "볼륨 마이그레이션 중에 대상 볼륨을 삭제할 수 없습니다(참고: 데이터베이스 레코" "드를 삭제해야 함)." #, python-format msgid "Unable to determine whether %(volumeName)s is composite or not." msgstr "%(volumeName)s의 복합 여부를 판별할 수 없습니다." msgid "Unable to disconnect host from volume, could not determine Purity host" msgstr "" "볼륨에서 호스트의 연결을 해제할 수 없으므로, Purity 호스트를 판별할 수 없음" msgid "" "Unable to failover to the secondary. Please make sure that the secondary " "back-end is ready." msgstr "" "보조로 장애 복구할 수 없습니다. 보조 백엔드가 준비되었는지 확인하십시오." msgid "Unable to find FC initiators" msgstr "FC 개시자를 찾을 수 없음" #, python-format msgid "Unable to find VG: %s" msgstr "VG에서 찾을 수 없습니다: %s" #, python-format msgid "Unable to find controller port iscsi configuration: %s" msgstr "컨트롤러 포트 iscis 구성을 찾을 수 없음: %s" #, python-format msgid "Unable to find controller port: %s" msgstr "컨트롤러 포트를 찾을 수 없음: %s" #, python-format msgid "" "Unable to find default storage group for FAST policy : %(fastPolicyName)s." msgstr "" "FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 찾을 수 없습니다." #, python-format msgid "Unable to find disk folder %(name)s on %(ssn)s" msgstr "%(ssn)s에서 디스크 폴더 %(name)s을(를) 찾을 수 없음" #, python-format msgid "Unable to find mapping profiles: %s" msgstr "맵핑 프로파일을 찾을 수 없음: %s" #, python-format msgid "Unable to find or create QoS Node named %s" msgstr "%s(이)라는 QoS 노드를 찾거나 작성할 수 없음" #, python-format msgid "Unable to find service: %(service)s for given host: %(host)s." msgstr "제공된 호스트: %(host)s의 서비스: %(service)s을(를) 찾을 수 없습니다." msgid "Unable to get associated pool of volume." msgstr "볼륨의 연관된 풀을 가져올 수 없습니다." #, python-format msgid "Unable to get default storage group %(defaultSgName)s." msgstr "기본 스토리지 그룹 %(defaultSgName)s을(를) 가져올 수 없습니다." msgid "Unable to get device mapping from network." msgstr "네트워크에서 장치 맵핑을 가져올 수 없습니다." #, python-format msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." msgstr "빠른 정책: %(fastPolicyName)s의 정책 규칙을 가져올 수 없습니다." #, python-format msgid "Unable to locate Volume Group %s" msgstr "볼륨그룹 %s를 찾을 수 없음" #, python-format msgid "Unable to locate snapshot %s" msgstr "스냅샷 %s을(를) 찾을 수 없음" #, python-format msgid "Unable to manage existing snapshot. Volume driver %s not initialized." msgstr "" "기존 스냅샷을 관리할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않았습니" "다." #, python-format msgid "Unable to manage existing volume. Volume driver %s not initialized." msgstr "" "기존 볼륨을 관리할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않았습니다." msgid "Unable to manage_existing snapshot on a disabled service." msgstr "사용되지 않는 서비스에서 기존 스냅샷을 관리할 수 없습니다." msgid "Unable to manage_existing volume on a disabled service." msgstr "사용되지 않는 서비스에서 기존 볼륨을 관리할 수 없습니다." #, python-format msgid "Unable to map %(vol)s to %(srv)s" msgstr "%(vol)s을(를) %(srv)s에 맵핑할 수 없음" #, python-format msgid "Unable to rename lun %s on array." msgstr "배열에서 lun %s의 이름을 바꿀 수 없습니다." #, python-format msgid "Unable to rename the logical volume for volume %s." msgstr "볼륨 %s의 논리 볼륨 이름을 변경할 수 없습니다." #, python-format msgid "Unable to rename the logical volume for volume: %s" msgstr "볼륨의 논리 볼륨 이름을 변경할 수 없음: %s" #, python-format msgid "Unable to replicate %(volname)s to %(destsc)s" msgstr "%(volname)s을(를) %(destsc)s에 복제할 수 없음" #, python-format msgid "Unable to retrieve VolumeConfiguration: %s" msgstr "VolumeConfiguration을 찾을 수 없음: %s" #, python-format msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." msgstr "배열 %(array)s에서 %(poolName)s의 풀 인스턴스를 검색할 수 없습니다." #, python-format msgid "Unable to terminate volume connection: %(err)s." msgstr "볼륨 연결을 종료할 수 없음: %(err)s" #, python-format msgid "Unable to unmap Volume %s" msgstr "볼륨 %s의 맵핑을 해제할 수 없음" msgid "Unexpected build error:" msgstr "예상치 못한 빌드 오류:" msgid "Unexpected error occurs in horcm." msgstr "horcm에서 예상치 못한 오류가 발생합니다." msgid "Unexpected error occurs in snm2." msgstr "snm2에서 예상치 못한 오류가 발생합니다." #, python-format msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" msgstr "" "retype() 되돌리기에서 VolumeSet(%s)을(를) 삭제하는 중에 예상치 못한 오류 발생" #, python-format msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" msgstr "retype()에서 VolumeSet(%s)을(를) 삭제하는 중에 예상치 못한 오류 발생" #, python-format msgid "Unexpected error while invoking web service. Error - %s." msgstr "웹 서비스를 호출하는 중에 예상치 못한 오류가 발생했습니다. 오류 - %s." #, python-format msgid "Unexpected exception during cache cleanup of snapshot %s" msgstr "스냅샷 %s의 캐시 정리 중에 예상치 못한 예외 발생" #, python-format msgid "Unknown exception in post clone resize LUN %s." msgstr "사후 복제 크기 조정 LUN %s에서 알 수 없는 예외가 발생했습니다." #, python-format msgid "Unrecognized Login Response: %s" msgstr "인식되지 않은 로그인 응답: %s" #, python-format msgid "" "Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." msgstr "" "일관성 그룹을 업데이트하는 중 볼륨-%(volume_id)s 추가 실패: VolumeNotFound." #, python-format msgid "" "Update consistency group failed to remove volume-%(volume_id)s: " "VolumeNotFound." msgstr "" "일관성 그룹을 업데이트하는 중 볼륨-%(volume_id)s 제거 실패: VolumeNotFound." msgid "Update snapshot usages failed." msgstr "스냅샷 사용법을 업데이트하는 데 실패했습니다." msgid "Update volume model for transfer operation failed." msgstr "전송 조작을 위한 볼륨 모델 업데이트에 실패했습니다." #, python-format msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." msgstr "이미지에 볼륨을 업로드하는 중 오류 발생(image-id: %(image_id)s)." msgid "User does not have permission to change Storage Profile selection." msgstr "사용자에게 스토리지 프로파일 선택을 변경할 권한이 없습니다." msgid "VGC-CLUSTER command blocked and cancelled." msgstr "VGC-CLUSTER 명령이 차단되어 취소되었습니다." #, python-format msgid "Version string '%s' is not parseable" msgstr "버전 문자열 '%s'을(를) 구문 분석할 수 없음" #, python-format msgid "Virtual Volume Set %s does not exist." msgstr "가상 볼륨 세트 %s이(가) 없습니다." #, python-format msgid "Virtual disk device of backing: %s not found." msgstr "지원 가상 디스크 장치: %s을(를) 찾을 수 없습니다." #, python-format msgid "Vol copy job status %s." msgstr "볼륨 복사 작업 상태 %s." #, python-format msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." msgstr "" "볼륨 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시오." #, python-format msgid "" "Volume %(name)s is not suitable for storage assisted migration using retype." msgstr "" "볼륨 %(name)s이(가) 재입력을 사용하는 스토리지 지원 마이그레이션에 적합하지 " "않습니다." #, python-format msgid "Volume %(name)s not found on the array." msgstr "배열에서 볼륨 %(name)s을(를) 찾을 수 없습니다." #, python-format msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "배열에서 %(name)s 볼륨을 찾을 수 없습니다. 삭제할 볼륨이 없습니다. " #, python-format msgid "" "Volume %(name)s not found on the array. No volume to migrate using retype." msgstr "" "배열에서 %(name)s 볼륨을 찾을 수 없습니다. 다시 입력을 사용하여 마이그레이션" "할 볼륨이 없습니다. " #, python-format msgid "Volume %(vol)s in the consistency group could not be deleted." msgstr "일관성 그룹의 볼륨 %(vol)s을(를) 삭제할 수 없습니다." #, python-format msgid "" "Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " "%(output)s" msgstr "" "볼륨 %(volumeid)s에서 지정 명령을 보내는 데 실패, ret: %(status)s 출력: " "%(output)s" #, python-format msgid "Volume %s doesn't exist on array." msgstr "배열에 %s 볼륨이 없습니다." #, python-format msgid "Volume %s, not found on SF Cluster." msgstr "SF 클러스터에서 %s 볼륨을 찾을 수 없습니다. " #, python-format msgid "Volume %s: create failed" msgstr "볼륨 %s: 작성 실패" #, python-format msgid "" "Volume %s: driver error when trying to retype, falling back to generic " "mechanism." msgstr "" "다시 입력하려고 할 때 볼륨 %s: 드라이버 오류 발생, 일반 메커니즘으로 장애 복" "구." #, python-format msgid "Volume %s: manage failed." msgstr "볼륨 %s: 관리 실패." #, python-format msgid "Volume %s: rescheduling failed" msgstr "볼륨 %s: 재스케줄링 실패" #, python-format msgid "Volume %s: update volume state failed." msgstr "볼륨 %s: 볼륨 상태 업데이트 실패." #, python-format msgid "" "Volume : %(volumeName)s has not been added to target storage group " "%(storageGroup)s." msgstr "" "대상 스토리지 그룹 %(storageGroup)s에 볼륨: %(volumeName)s이(가) 추가되지 않" "았습니다." #, python-format msgid "" "Volume : %(volumeName)s has not been removed from source storage group " "%(storageGroup)s." msgstr "" "소스 스토리지 그룹 %(storageGroup)s에서 볼륨: %(volumeName)s이(가) 제거되지 " "않았습니다." #, python-format msgid "" "Volume : %(volumeName)s. was not successfully migrated to target pool " "%(targetPoolName)s." msgstr "" "볼륨: %(volumeName)s이(가) 대상 풀 %(targetPoolName)s에 성공적으로 마이그레이" "션되지 않았습니다." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "accept_transfer operation!" msgstr "" "accept_transfer 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) " "찾을 수 없습니다." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "attach_volume operation!" msgstr "" "attach_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" "을 수 없습니다." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "delete_volume operation!" msgstr "" "delete_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" "을 수 없습니다." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "detach_volume operation!" msgstr "" "detach_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" "을 수 없습니다." #, python-format msgid "" "Volume ID %s was not found on the SolidFire Cluster while attempting " "extend_volume operation!" msgstr "" "extend_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" "을 수 없습니다." #, python-format msgid "" "Volume ID %s was not found on the zfssa device while attempting " "delete_volume operation." msgstr "" "delete_volume 조작을 시도하는 동안 zfssa 장치에서 볼륨 ID %s을(를) 찾을 수 없" "습니다." #, python-format msgid "Volume already exists. %s" msgstr "볼륨이 이미 있습니다. %s" msgid "Volume appears unmapped" msgstr "볼륨이 맵핑되지 않은 것으로 표시" #, python-format msgid "" "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" msgstr "볼륨 작성에 실패하여, 작성된 스냅샷 %(volume_name)s@%(name)s 삭제" #, python-format msgid "Volume creation failed, deleting created snapshot %s" msgstr "볼륨 작성에 실패하여, 작성된 스냅샷 %s 삭제" msgid "Volume did not exist. It will not be deleted" msgstr "볼륨이 없어 삭제되지 않음" #, python-format msgid "Volume driver %s not initialized" msgstr "볼륨 드라이버 %s이(가) 초기화되지 않음" msgid "Volume in unexpected state" msgstr "예상치 못한 상태의 볼륨" #, python-format msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "예상치 못한 상태 %s의 볼륨, 예상된 상태는 전송 대기임" #, python-format msgid "Volume migration failed due to exception: %(reason)s." msgstr "예외로 인해 볼륨 마이그레이션에 실패: %(reason)s." msgid "Volume must be detached for clone operation." msgstr "복제 조작을 수행하려면 볼륨의 연결을 해제해야 합니다." #, python-format msgid "Volume size \"%sG\" is too large." msgstr "볼륨 크기 \"%sG\"이(가) 너무 큽니다." #, python-format msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "VolumeType %s 삭제 실패, VolumeType이 사용 중입니다." #, python-format msgid "" "WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " "attempt %(retry)s in progress." msgstr "" "WebDAV 조작에 실패, 오류 코드: %(code)s 이유: %(reason)s 재시도 시도 " "%(retry)s이(가) 진행 중입니다." #, python-format msgid "WebDAV returned with %(code)s error during %(method)s call." msgstr "%(method)s 호출 중에 WebDAV에서 %(code)s을(를) 리턴했습니다." #, python-format msgid "" "Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " "OLTP_REP, NONE." msgstr "" "워크로드: %(workload)s이(가) 올바르지 않습니다. 올바른 값은 DSS_REP, DSS, " "OLTP, OLTP_REP, NONE입니다." msgid "_check_version_fail: Parsing error." msgstr "_check_version_fail: 구문 분석 오류." msgid "_find_mappings: volume is not active" msgstr "_find_mappings: 볼륨이 활성이 아님" #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " "operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: 볼륨 %(vol)s에 지정된 vdisk 복사 조작이 없음: orig=" "%(orig)s new=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_rm_vdisk_copy_op: 볼륨 %(vol)s 메타데이터에 지정된 vdisk 복사 조작이 없음: " "orig=%(orig)s new=%(new)s." #, python-format msgid "" "_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " "operations." msgstr "_rm_vdisk_copy_op: 볼륨 %s에 등록된 vdisk 복사 조작이 없습니다." #, python-format msgid "" "_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " "copy operations." msgstr "" "_rm_vdisk_copy_op: 볼륨 메타데이터 %s에 등록된 vdisk 복사 조작이 없습니다." #, python-format msgid "" "_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " "%(host_name)s found." msgstr "" "_unmap_vdisk_from_host: 볼륨 %(vol_name)s과(와) 호스트 %(host_name)s의 맵핑" "을 찾을 수 없습니다." #, python-format msgid "_wait_for_job_complete failed after %(retries)d tries." msgstr "_wait_for_job_complete가 %(retries)d번의 재시도 후에 실패했습니다." #, python-format msgid "_wait_for_job_complete, failed after %(retries)d tries." msgstr "_wait_for_job_complete가 %(retries)d번의 재시도 후에 실패했습니다." #, python-format msgid "_wait_for_sync failed after %(retries)d tries." msgstr "_wait_for_sync가 %(retries)d번의 재시도 후에 실패했습니다." #, python-format msgid "" "backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "백업: %(vol_id)s이(가) %(vpath)s에서 %(bpath)s으로 백업 하드 링크를 제거하지 " "못했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s." #, python-format msgid "can't create 2 volumes with the same name, %s" msgstr "동일한 이름 %s의 두 볼륨을 작성할 수 없음" msgid "cinder-rtstool is not installed correctly" msgstr "cinder-rtstool이 올바르게 설치되지 않음" #, python-format msgid "" "delete: %(vol_id)s failed with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "삭제: %(vol_id)s이(가) stdout에 실패함. stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_vol: provider location empty." msgstr "delete_vol: 제공자 위치가 비어 있습니다." #, python-format msgid "ensure_export: Volume %s not found on storage." msgstr "ensure_export: 스토리지에 볼륨 %s을(를) 찾을 수 없습니다." #, python-format msgid "error opening rbd image %s" msgstr "rbd 이미지 %s 열기 오류" msgid "error refreshing volume stats" msgstr "볼륨 상태를 새로 고치는 중 오류 발생" msgid "horcm command timeout." msgstr "horcm 명령의 제한시간이 초과되었습니다." #, python-format msgid "iSCSI portal not found for service: %s" msgstr "서비스의 iSCSI 포털을 찾을 수 없음: %s" msgid "import pywbem failed!! pywbem is necessary for this volume driver." msgstr "" "pywbem 가져오기에 실패했습니다. 이 볼륨 드라이버에 pywbem이 필요합니다." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s." msgstr "" "initialize_connection: %(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수" "집하지 못했습니다." #, python-format msgid "" "initialize_connection: Failed to collect return properties for volume " "%(vol)s and connector %(conn)s.\n" msgstr "" "initialize_connection: %(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수" "집하지 못함.\n" msgid "iscsi_ip_address must be set!" msgstr "iscsi_ip_address를 설정해야 합니다." msgid "manage_existing: No source-name in ref!" msgstr "manage_existing: ref에 source-name이 없습니다." #, python-format msgid "manage_existing_get_size: %s does not exist!" msgstr "manage_existing_get_size: %s이(가) 없습니다." msgid "manage_existing_get_size: No source-name in ref!" msgstr "manage_existing_get_size: ref에 source-name이 없습니다." msgid "model server went away" msgstr "모델 서버가 사라졌음" #, python-format msgid "modify volume: %s does not exist!" msgstr "수정 볼륨: %s(이)가 없습니다." msgid "san ip must be configured!" msgstr "san ip를 구성해야 합니다." msgid "san_login must be configured!" msgstr "san_login을 구성해야 합니다." msgid "san_password must be configured!" msgstr "san_password를 구성해야 합니다." #, python-format msgid "single_user auth mode enabled, but %(param)s not set" msgstr "single_user 인증 모드를 사용하지만 %(param)s이(가) 설정되지 않음" msgid "snm2 command timeout." msgstr "snm2 명령의 제한시간이 초과되었습니다." msgid "" "storwize_svc_multihostmap_enabled is set to False, not allowing multi host " "mapping." msgstr "" "storwize_svc_multihostmap_enabled가 False로 설정되어 있어 다중 호스트 맵핑이 " "허용되지 않습니다." #, python-format msgid "unmanage: Volume %s does not exist!" msgstr "관리 취소: %s 볼륨이 없습니다." msgid "zfssa_initiator cannot be empty when creating a zfssa_initiator_group." msgstr "" "zfssa_initiator_group을(를) 작성할 때 zfssa_initiator가 비어 있지 않아야 합니" "다." msgid "" "zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " "for backend enabled volume migration. Continuing with generic volume " "migration." msgstr "" "cinder.conf. zfssa_replication_ip에 설정되지 않은 zfssa_replication_ip가 백엔" "드 사용 볼륨 마이그레이션에 필요합니다. 일반 볼륨 마이그레이션을 계속합니다." cinder-8.0.0/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po0000664000567000056710000035300712701406257024751 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev23\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-26 08:49+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-26 07:43+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "\t%(name)-35s : %(value)s" msgstr "\t%(name)-35s : %(value)s" #, python-format msgid "\t%(param)-35s : %(value)s" msgstr "\t%(param)-35s : %(value)s" #, python-format msgid "\t%(prefix)-35s : %(version)s" msgstr "\t%(prefix)-35s : %(version)s" #, python-format msgid "\t%(request)-35s : %(value)s" msgstr "\t%(request)-35s : %(value)s" #, python-format msgid "" "\n" "\n" "\n" "\n" "Request URL: %(url)s\n" "\n" "Call Method: %(method)s\n" "\n" "Request Data: %(data)s\n" "\n" "Response Data:%(res)s\n" "\n" msgstr "" "\n" "\n" "\n" "\n" "요청 URL: %(url)s\n" "\n" "호출 메소드: %(method)s\n" "\n" "요청 데이터: %(data)s\n" "\n" "응답 데이터:%(res)s\n" "\n" #, python-format msgid "%(element)s: %(val)s" msgstr "%(element)s: %(val)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s이(가) 결함을 리턴함: %(e)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" #, python-format msgid "%(volume)s assign type fibre_channel, properties %(properties)s" msgstr "%(volume)s에서 fibre_channel 유형 할당, 특성 %(properties)s" #, python-format msgid "%s is already umounted" msgstr "%s이(가) 이미 umount됨" #, python-format msgid "3PAR driver cannot perform migration. Retype exception: %s" msgstr "" "3PAR 드라이버에서 마이그레이션을 수행할 수 없습니다. 다시 입력 예외: %s" #, python-format msgid "3PAR vlun %(name)s not found on host %(host)s" msgstr "3PAR vlun %(name)s을(를) %(host)s 호스트에서 찾을 수 없음" #, python-format msgid "" "3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " "deleted because: %(reason)s" msgstr "" "볼륨 '%(name)s' 의 3PAR vlun이 삭제되었지만, 다음 이유로 인해 호스트 " "'%(host)s'이(가) 삭제됨: %(reason)s" #, python-format msgid "AUTH properties: %(authProps)s" msgstr "AUTH 특성: %(authProps)s" #, python-format msgid "AUTH properties: %s." msgstr "AUTH 특성: %s." #, python-format msgid "Accepting transfer %s" msgstr "전송 %s 승인" msgid "Activate Flexvisor cinder volume driver." msgstr "Flexvisor cinder 볼륨 드라이버를 활성화합니다." msgid "Add connection: finished iterating over all target list" msgstr "연결 추가: 모든 대상 목록 반복 완료" #, python-format msgid "Add volume response: %s" msgstr "볼륨 응답 추가: %s" #, python-format msgid "Added %s to cg." msgstr "%s이(가) cg에 추가되었습니다." #, python-format msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." msgstr "" "볼륨 %(volumeName)s이(가) 기존 스토리지 그룹 %(sgGroupName)s에 추가되었습니" "다." #, python-format msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" msgstr "개시자 그룹 이름 %(igrp)s(으)로 볼륨=%(vol)s에 ACL 추가" #, python-format msgid "Adding volume %(v)s to consistency group %(cg)s." msgstr "일관성 그룹 %(cg)s에 볼륨 %(v)s 추가" #, python-format msgid "" "Adding volume: %(volumeName)s to default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST 정책: %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨: %(volumeName)s 추" "가." #, python-format msgid "Adding volumes to cg %s." msgstr "cg %s에 볼륨 추가." #, python-format msgid "Already mounted: %s" msgstr "이미 마운트됨: %s" msgid "Attach volume completed successfully." msgstr "볼륨 연결이 성공적으로 완료되었습니다." #, python-format msgid "" "Automatically selected %(binary)s RPC version %(version)s as minimum service " "version." msgstr "" "자동으로 %(binary)s RPC 버전 %(version)s을(를) 최소 서비스 버전으로 선택했습" "니다." #, python-format msgid "" "Automatically selected %(binary)s objects version %(version)s as minimum " "service version." msgstr "" "자동으로 %(binary)s 오브젝트 버전 %(version)s을(를) 최소 서비스 버전으로 선택" "했습니다." msgid "Availability Zones retrieved successfully." msgstr "가용 구역이 성공적으로 검색되었습니다." #, python-format msgid "Available services: %s" msgstr "사용 가능한 서비스: %s" #, python-format msgid "Available services: %s." msgstr "사용 가능한 서비스: %s." #, python-format msgid "Backend name is %s." msgstr "백엔드 이름이 %s입니다." #, python-format msgid "Backend type: %s" msgstr "백엔드 유형: %s" #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "백업 VM: %(backing)s의 이름이 %(new_name)s(으)로 변경되었습니다." #, python-format msgid "Backing consistency group snapshot %s available for deletion" msgstr "삭제할 수 있는 일관성 그룹 스냅샷 %s 백업" msgid "Backing not available, no operation to be performed." msgstr "백업을 사용할 수 없습니다. 작업이 수행되지 않습니다." #, python-format msgid "Backing not found, creating for volume: %s" msgstr "백업을 찾을 수 없으므로, 볼륨 작성: %s" #, python-format msgid "" "Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " "skipping base image delete." msgstr "" "볼륨 %(volume)s의 백업 기본 이미지에 여전히 %(snapshots)s 스냅샷이 있으므로, " "기본 이미지 삭제를 건너뜁니다." #, python-format msgid "" "Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " "in %(delay)ss." msgstr "" "볼륨 %(volume)s의 백업 이미지가 사용 중입니다. %(delay)ss 안에 %(retries)s" "번 더 재시도" #, python-format msgid "Backup service: %s." msgstr "백업 서비스: %s." #, python-format msgid "Bandwidth limit is: %s." msgstr "대역폭 한계: %s." #, python-format msgid "Begin backup of volume %s." msgstr "볼륨 %s의 백업을 시작하십시오." msgid "Begin detaching volume completed successfully." msgstr "볼륨 연결 해제가 성공적으로 시작되었습니다." #, python-format msgid "" "BrcdFCZoneDriver - Add connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "BrcdFCZoneDriver - I-T 맵: %(i_t_map)s의 패브릭 %(fabric)s 연결 추가" #, python-format msgid "" "BrcdFCZoneDriver - Delete connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "BrcdFCZoneDriver - I-T 맵: %(i_t_map)s의 패브릭 %(fabric)s 연결 삭제" msgid "CHAP authentication disabled." msgstr "CHAP 인증을 사용하지 않습니다." #, python-format msgid "CONCERTO version: %s" msgstr "CONCERTO 버전: %s" msgid "Calling os-brick to detach ScaleIO volume." msgstr "ScaleIO 볼륨의 연결을 해제하기 위해 os-brick 호출." #, python-format msgid "Cancelling Migration from LUN %s." msgstr "LUN %s에서 마이그레이션 취소." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " "exists in different management group." msgstr "" "클러스터가 다른 관리 그룹에 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션" "을 제공할 수 없습니다." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has been exported." msgstr "" "볼륨을 내보냈으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공할 수 없습" "니다." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the volume " "has snapshots." msgstr "" "볼륨에 스냅샷이 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공할 수 " "없습니다." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume does " "not exist in this management group." msgstr "" "볼륨이 이 관리 그룹에 없으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공" "할 수 없습니다." #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume is " "from a different backend." msgstr "" "볼륨이 다른 백엔드에 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공" "할 수 없습니다." #, python-format msgid "" "Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu" msgstr "" "배열 %(arrayName)s의 SRP 풀 %(poolName)s에 대한 용량 통계 total_capacity_gb=" "%(total_capacity_gb)lu, free_capacity_gb=%(free_capacity_gb)lu" #, python-format msgid "Cgsnapshot %s: creating." msgstr "Cgsnapshot %s: 작성 중." #, python-format msgid "Change volume capacity request: %s." msgstr "볼륨 용량 변경 요청: %s." #, python-format msgid "Checking image clone %s from glance share." msgstr "Glance 공유의 이미지 복제 %s 확인." #, python-format msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "볼륨 %(volume)s의 원본 %(origin)s 확인." #, python-format msgid "" "Cinder ISCSI volume with current path %(path)s is no longer being managed. " "The new name is %(unm)s." msgstr "" "현재 경로가 %(path)s인 Cinder ISCSI 볼륨이 더 이상 관리되지 않습니다. 새 이름" "은 %(unm)s입니다." #, python-format msgid "" "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "현재 경로가 \"%(cr)s\"인 Cinder NFS 볼륨이 더 이상 관리되지 않습니다." #, python-format msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." msgstr "현재 경로 %(cr)s의 Cinder NFS 볼륨이 더 이상 관리되지 않습니다." msgid "Cinder secure environment indicator file exists." msgstr "Cinder 보안 환경 표시기 파일이 있습니다." #, python-format msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" msgstr "CiscoFCZoneDriver - I-T map의 연결 추가: %s" #, python-format msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" msgstr "CiscoFCZoneDriver - I-T map의 연결 삭제: %s" #, python-format msgid "Cleaning cache for share %s." msgstr "공유 %s의 캐시 정리." msgid "Cleaning up incomplete backup operations." msgstr "불완전한 백업 조작 정리." #, python-format msgid "Clone %s created." msgstr "복제 %s이(가) 작성되었습니다." #, python-format msgid "Cloning from cache to destination %s" msgstr "캐시에서 대상 %s(으)로 복제" #, python-format msgid "Cloning from snapshot to destination %s" msgstr "스냅샷에서 대상 %s(으)로 복제" #, python-format msgid "Cloning image %s from cache" msgstr "캐시에서 이미지 %s 복제." #, python-format msgid "Cloning image %s from snapshot." msgstr "스냅샷에서 이미지 %s 복제." #, python-format msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "볼륨 %(src)s을(를) 볼륨 %(dst)s(으)로 복제" #, python-format msgid "" "Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" "%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " "perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " "multi-initiator=%(multi-initiator)s" msgstr "" "스냅샷 볼륨=%(vol)s 스냅샷=%(snap)s 복제=%(clone)s snap_size=%(size)s 예약=" "%(reserve)sagent-type=%(agent-type)s perfpol-name=%(perfpol-name)s 암호화=" "%(encryption)s 암호=%(cipher)s 다중 개시자r=%(multi-initiator)s에서 볼륨 복제" #, python-format msgid "" "Cloning with volume_name %(vname)s clone_name %(cname)s export_path %(epath)s" msgstr "" "volume_name %(vname)s clone_name %(cname)s export_path %(epath)s(으)로 복제" #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "명령 [%s]에 대해 CloudByte API가 성공적으로 실행되었습니다." #, python-format msgid "" "CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." msgstr "" "볼륨 [%(cb_volume)s]에 대해 CloudByte 조작 [%(operation)s]에 성공했습니다." msgid "Complete-Migrate volume completed successfully." msgstr "볼륨 전체 마이그레이션이 성공적으로 완료되었습니다." #, python-format msgid "Completed: convert_to_base_volume: id=%s." msgstr "완료됨: convert_to_base_volume: id=%s." #, python-format msgid "Configured pools: %s" msgstr "구성된 풀: %s" #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " "%(properties)s" msgstr "" "초기화 정보 연결: {driver_volume_type: fibre_channel, 데이터: %(properties)s" #, python-format msgid "Connecting to host: %s." msgstr "호스트에 연결: %s." #, python-format msgid "Connecting to target host: %s for backend enabled migration." msgstr "백엔드 사용 마이그레이션의 대상 호스트 %s에 연결." #, python-format msgid "Connector returning fcnsinfo-%s" msgstr "커넥터에서 fcnsinfo-%s 리턴" #, python-format msgid "Consistency group %(cg)s is created successfully." msgstr "일관성 그룹 %(cg)s이(가) 성공적으로 작성되었습니다." #, python-format msgid "Consistency group %s was deleted successfully." msgstr "일관성 그룹 %s이(가) 성공적으로 삭제되었습니다." #, python-format msgid "Consistency group %s: created successfully" msgstr "일관성 그룹 %s: 성공적으로 작성됨" #, python-format msgid "Consistency group %s: creating" msgstr "일관성 그룹 %s: 작성 중" #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "%(mbps).2f MB/s에서 %(sz).2f MB 이미지 전환 " #, python-format msgid "" "Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" msgstr "%(volume_name)s을(를) userCPG=%(new_cpg)s인 전체 프로비저닝으로 변환" #, python-format msgid "" "Converting %(volume_name)s to thin dedup provisioning with userCPG=" "%(new_cpg)s" msgstr "" "%(volume_name)s을(를) userCPG=%(new_cpg)s인 씬 중복 제거 프로비저닝으로 변환" #, python-format msgid "" "Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" msgstr "%(volume_name)s을(를) userCPG=%(new_cpg)s인 씬 프로비저닝으로 변환" msgid "Coordination backend started successfully." msgstr "조정 백엔드가 성공적으로 시작되었습니다." #, python-format msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." msgstr "" "복사 오프로드 워크플로우를 사용하여 이미지 %(img)s을(를) 볼륨 %(vol)s(으)로 " "복사했습니다." #, python-format msgid "Copied image %(img)s to volume %(vol)s using local image cache." msgstr "" "로컬 이미지 캐시를 사용하여 이미지 %(img)s을(를) 볼륨 %(vol)s(으)로 복사했습" "니다." #, python-format msgid "Copied image to volume %s using regular download." msgstr "일반 다운로드를 사용하여 볼륨 %s에 이미지를 복사합니다." #, python-format msgid "Copy job to dest vol %s completed." msgstr "대상 볼륨 %s(으)로 작업 복사가 완료되었습니다." msgid "Copy volume to image completed successfully." msgstr "이미지에 볼륨 복사가 성공적으로 완료되었습니다." #, python-format msgid "Copying src vol %(src)s to dest vol %(dst)s." msgstr "소스 볼륨 %(src)s을(를) 대상 볼륨 %(dst)s에 복사합니다." #, python-format msgid "Could not find replica to delete of volume %(vol)s." msgstr "볼륨 %(vol)s을(를) 삭제하기 위한 복제본을 찾을 수 없습니다." #, python-format msgid "Could not run dpkg-query command: %(msg)s." msgstr "dpkg-query 명령을 실행할 수 없음: %(msg)s." #, python-format msgid "Could not run rpm command: %(msg)s." msgstr "rpm 명령을 실행할 수 없음: %(msg)s." #, python-format msgid "" "Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" msgstr "" "mmchattr을 사용하여 스토리지 풀을 %(pool)s(으)로 업데이트할 수 없음, 오류: " "%(error)s" #, python-format msgid "" "Couldn't find destination volume %(vol)s in the database. The entry might be " "successfully deleted during migration completion phase." msgstr "" "데이터베이스에서 대상 볼륨 %(vol)s을(를) 찾을 수 없습니다. 마이그레이션 완료 " "단계 중에 항목이 성공적으로 삭제되었을 가능성이 있습니다." #, python-format msgid "" "Couldn't find the temporary volume %(vol)s in the database. There is no need " "to clean up this volume." msgstr "" "데이터베이스에서 임시 볼륨 %(vol)s을(를) 찾을 수 없습니다.이 볼륨을 정리하지 " "않아도 됩니다." #, python-format msgid "Create Cloned Volume %(volume_id)s completed." msgstr "복제된 볼륨 %(volume_id)s 작성이 완료되었습니다." #, python-format msgid "Create Consistency Group: %(group)s." msgstr "일관성 그룹 %(group)s 작성." #, python-format msgid "Create Volume %(volume_id)s completed." msgstr "볼륨 %(volume_id)s 작성이 완료되었습니다." #, python-format msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "스냅샷 %(snapshot_id)s에서 볼륨 %(volume_id)s 작성이 완료되었습니다." #, python-format msgid "" "Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " "%(provisioning)s tiering: %(tiering)s " msgstr "" "볼륨: %(volume)s 크기: %(size)s 풀: %(pool)s 프로비저닝: %(provisioning)s 계" "층: %(tiering)s 작성" #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " "%(sourceName)s." msgstr "" "볼륨에서 복제본 작성: 볼륨 복제: %(cloneName)s 소스 볼륨: %(sourceName)s." #, python-format msgid "Create backup finished. backup: %s." msgstr "백업 작성이 완료됨. 백업: %s." #, python-format msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "백업 작성이 시작됨, 백업: %(backup_id)s 볼륨: %(volume_id)s." msgid "Create consistency group completed successfully." msgstr "일관성 그룹 생성이 성공적으로 완료되었습니다." #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "source-%(source)s에서 일관성 그룹 생성이 성공적으로 완료되었습니다." #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "볼륨 %(volume_id)s에서 내보내기 작성이 수행되었습니다." msgid "Create snapshot completed successfully" msgstr "스냅샷 작성이 성공적으로 완료되었습니다." #, python-format msgid "" "Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "일관성 그룹 %(cgId)s의 스냅샷 cgsnapshotID: %(cgsnapshot)s 작성." #, python-format msgid "Create snapshot from volume %s" msgstr "%s 볼륨에서 스냅샷 작성" #, python-format msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "스냅샷 작성: %(snapshot)s: 볼륨: %(volume)s" #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " "%(raid_snapshot_id)s, volume: %(volume)s." msgstr "" "작성 성공, 스냅샷: %(snapshot)s, RAID의 스냅샷 ID: %(raid_snapshot_id)s, 볼" "륨: %(volume)s." #, python-format msgid "Create target consistency group %(targetCg)s." msgstr "대상 일관성 그룹 %(targetCg)s 작성." #, python-format msgid "Create volume of %s GB" msgstr "%s GB의 볼륨 작성" #, python-format msgid "CreateReplay success %s" msgstr "CreateReplay 성공 %s" #, python-format msgid "" "Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]." msgstr "" "CloudByte 스냅샷 [%(cb_snap)s] w.r.t CloudByte 볼륨 [%(cb_vol)s] 및 " "OpenStack 볼륨 [%(stack_vol)s]이(가) 작성되었습니다." #, python-format msgid "Created Consistency Group %s" msgstr "일관성 그룹 %s 작성" #, python-format msgid "" "Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." "t parent OpenStack volume [%(stack_vol)s]." msgstr "" "CloudByte 스냅샷 경로 [%(cb_snap)s] w.r.t 상위 OpenStack 볼륨 [%(stack_vol)s]" "에서 복제본 [%(cb_clone)s]이(가) 작성되었습니다." #, python-format msgid "Created datastore folder: %s." msgstr "데이터 저장소 폴더 작성: %s." #, python-format msgid "" "Created lun-map:\n" "%s" msgstr "" "lun-map 작성:\n" "%s" #, python-format msgid "" "Created multi-attach E-Series host group %(label)s with clusterRef " "%(clusterRef)s" msgstr "" "clusterRef가 %(clusterRef)s인 다중 연결 E-Series 호스트 그룹 %(label)s이(가) " "작성됨 " #, python-format msgid "Created new initiator group name: %(igGroupName)s." msgstr "새 개시자 그룹 이름 %(igGroupName)s이(가) 작성되었습니다." #, python-format msgid "Created new masking view : %(maskingViewName)s." msgstr "새 마스킹 보기 %(maskingViewName)s이(가) 작성되었습니다." #, python-format msgid "Created new storage group: %(storageGroupName)s." msgstr "새 스토리지 그룹 작성: %(storageGroupName)s." #, python-format msgid "Created snap grp with label %s." msgstr "레이블이 %s인 스냅 grp가 작성되었습니다." #, python-format msgid "Created volume %(instanceId)s: %(name)s" msgstr "볼륨 %(instanceId)s이(가) 작성됨: %(name)s" #, python-format msgid "Created volume %(volname)s, volume id %(volid)s." msgstr "볼륨 %(volname)s, 볼륨 id %(volid)s이(가) 작성되었습니다." msgid "Created volume successfully." msgstr "볼륨이 성공적으로 작성되었습니다." #, python-format msgid "Created volume with label %s." msgstr "레이블이 %s인 볼륨이 작성되었습니다." #, python-format msgid "Creating %(volume)s on %(device)s" msgstr "%(device)s에서 %(volume)s 작성" msgid "Creating Consistency Group" msgstr "일관성 그룹 작성" #, python-format msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "%(container)s 컨테이너에서 %(volume_id)s 볼륨의 백업 작성" #, python-format msgid "Creating cgsnapshot %(name)s." msgstr "cgsnapshot %(name)s 작성." #, python-format msgid "Creating clone of volume: %s" msgstr "%s 볼륨의 복제 작성" #, python-format msgid "Creating clone of volume: %s." msgstr "볼륨의 복제 작성: %s" #, python-format msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." msgstr "cgsnapshot %(snap)s에서 일관성 그룹 %(name)s 작성." #, python-format msgid "" "Creating consistency group %(name)s from source consistency group " "%(source_cgid)s." msgstr "소스 일관성 그룹 %(source_cgid)s에서 일관성 그룹 %(name)s 작성." #, python-format msgid "Creating consistency group %(name)s." msgstr "일관성 그룹 %(name)s 작성." #, python-format msgid "Creating host object %(host_name)r with IQN: %(iqn)s." msgstr "IQN: %(iqn)s(으)로 호스트 오브젝트 %(host_name)r 작성" #, python-format msgid "Creating host object %(host_name)r with WWN: %(wwn)s." msgstr "WWN: %(wwn)s(으)로 호스트 오브젝트 %(host_name)r 작성" #, python-format msgid "Creating host with ports %s." msgstr "포트 %s(으)로 호스트 작성." #, python-format msgid "Creating image snapshot %s" msgstr "이미지 스냅샷 \"%s\" 작성" #, python-format msgid "Creating initiator group %(grp)s with initiator %(iname)s" msgstr "개시자 %(iname)s(으)로 개시자 그룹 %(grp)s 작성" #, python-format msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" msgstr "한 개시자 %(iname)s(으)로 개시자 그룹 %(igrp)s 작성" #, python-format msgid "Creating iscsi_target for volume: %s" msgstr "%s 볼륨에 대한 iscsi_target 작성" #, python-format msgid "Creating regular file: %s.This may take some time." msgstr "일반 파일 작성%s.이 작업을 수행하는 데 다소 시간이 걸릴 수 있습니다." #, python-format msgid "Creating server %s" msgstr "서버 %s 작성" #, python-format msgid "Creating snapshot %(snap)s of volume %(vol)s" msgstr "볼륨 %(vol)s의 스냅샷 %(snap)s 작성" #, python-format msgid "" "Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " "snap_description=%(desc)s" msgstr "" "volume_name=%(vol)s snap_name=%(name)s snap_description=%(desc)s의 스냅샷 작" "성" #, python-format msgid "Creating snapshot: %s" msgstr "스냅샷 작성: %s." #, python-format msgid "Creating temp snapshot %(snap)s from volume %(vol)s" msgstr "볼륨 %(vol)s에서 임시 스냅샷 %(snap)s 작성" #, python-format msgid "Creating transfer of volume %s" msgstr "볼륨 %s의 전송 작성" #, python-format msgid "Creating volume %s from snapshot." msgstr "스냅샷에서 볼륨 %s 작성." #, python-format msgid "Creating volume from snapshot: %s" msgstr "스냅샷에서 볼륨 작성: %s." #, python-format msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." msgstr "백업 %(backup_id)s의 복원을 위한 %(size)sGB의 볼륨 작성." #, python-format msgid "Creating volume snapshot: %s." msgstr "볼륨 스냅샷 작성: %s." #, python-format msgid "Creatng volume from snapshot. volume: %s" msgstr "스냅샷에서 볼륨 작성, 볼륨: %s" #, python-format msgid "DRBD connection for %s already removed" msgstr "%s의 DRBD 연결이 이미 제거됨" #, python-format msgid "Delete Consistency Group: %(group)s." msgstr "일관성 그룹 %(group)s 삭제." #, python-format msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "스냅샷 %(snapshot_id)s 삭제가 완료되었습니다." #, python-format msgid "Delete Snapshot: %(snapshot)s" msgstr "스냅샷 삭제: %(snapshot)s" #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "스냅샷 %(snapshot)s 삭제" #, python-format msgid "Delete Snapshot: %(snapshotName)s." msgstr "스냅샷 삭제: %(snapshotName)s." #, python-format msgid "Delete Volume %(volume_id)s completed." msgstr "볼륨 %(volume_id)s 삭제가 완료되었습니다." #, python-format msgid "Delete backup finished, backup %s deleted." msgstr "백업 삭제가 완료됨, 백업 %s이(가) 삭제되었습니다." #, python-format msgid "Delete backup started, backup: %s." msgstr "백업 삭제가 시작됨, 백업: %s." #, python-format msgid "Delete backup with id: %s" msgstr "ID가 %s인 백업 삭제" #, python-format msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" msgstr "일관성 그룹 %(group_name)s의 cgsnapshot %(snap_name)s 삭제" #, python-format msgid "Delete cgsnapshot with id: %s" msgstr "ID가 %s인 cgsnapshot 삭제" #, python-format msgid "Delete connection target list: %(targets)s" msgstr "연결 대상 목록 삭제: %(targets)s" msgid "Delete consistency group completed successfully." msgstr "일관성 그룹 삭제가 성공적으로 완료되었습니다." #, python-format msgid "Delete consistency group with id: %s" msgstr "ID가 %s인 일관성 그룹 삭제" #, python-format msgid "" "Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." msgstr "" "볼륨 '%(volume)s'의 백업 '%(backup)s' 삭제가 완료되었으며 경고가 표시됩니다." msgid "Delete snapshot completed successfully" msgstr "스냅샷 삭제가 성공적으로 완료되었습니다." #, python-format msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "소스 CG %(cgId)s의 스냅샷 cgsnapshotID: %(cgsnapshot)s 삭제." msgid "Delete snapshot metadata completed successfully." msgstr "스냅샷 메타데이터 삭제가 성공적으로 완료되었습니다." #, python-format msgid "Delete snapshot with id: %s" msgstr "ID가 %s인 스냅샷 삭제" #, python-format msgid "Delete transfer with id: %s" msgstr "ID가 %s인 전송 삭제" msgid "Delete volume metadata completed successfully." msgstr "볼륨 메타데이터 삭제가 성공적으로 완료되었습니다." msgid "Delete volume request issued successfully." msgstr "볼륨 삭제 요청이 성공적으로 실행되었습니다." #, python-format msgid "Delete volume with id: %s" msgstr "ID가 %s인 볼륨 삭제" #, python-format msgid "Deleted %(row)d rows from table=%(table)s" msgstr "테이블=%(table)s에서 %(row)d 행 삭제" #, python-format msgid "" "Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " "[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." msgstr "" "CloudByte 스냅샷 [%(snap)s] w.r.t 상위 CloudByte 볼륨 [%(cb_vol)s] 및 상위 " "OpenStack 볼륨 [%(stack_vol)s]이(가) 삭제되었습니다." #, python-format msgid "Deleted the VM backing: %s." msgstr "VM 백업 삭제: %s." #, python-format msgid "Deleted vmdk file: %s." msgstr "vmdk 파일이 삭제됨: %s." msgid "Deleted volume successfully." msgstr "볼륨이 성공적으로 삭제되었습니다." msgid "Deleting Consistency Group" msgstr "일관성 그룹 삭제" #, python-format msgid "Deleting Volume: %(volume)s" msgstr "볼륨 삭제: %(volume)s" #, python-format msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." msgstr "볼륨 %(volume)s의 백업 기본 이미지='%(basename)s' 삭제." #, python-format msgid "Deleting deleteInitiatorGrp %s " msgstr "deleteInitiatorGrp %s 삭제" #, python-format msgid "Deleting snapshot %(ss)s from %(pro)s" msgstr "%(pro)s에서 스냅샷 %(ss)s 삭제" #, python-format msgid "Deleting snapshot %s " msgstr "스냅샷 %s 삭제" #, python-format msgid "Deleting snapshot: %s" msgstr "스냅샷 삭제: %s" #, python-format msgid "Deleting stale snapshot: %s" msgstr "시간이 경과된 스냅샷 삭제: %s" #, python-format msgid "Deleting unneeded host %(host_name)r." msgstr "불필요한 호스트 %(host_name)r 삭제." #, python-format msgid "Deleting volume %s " msgstr "볼륨 %s 삭제" #, python-format msgid "Deleting volume %s." msgstr "볼륨 %s 삭제." #, python-format msgid "Detach Volume, metadata is: %s." msgstr "볼륨 연결 해제, 메타데이터: %s." msgid "Detach volume completed successfully." msgstr "볼륨 연결 해제가 성공적으로 완료되었습니다." msgid "Determined volume DB was empty at startup." msgstr "시작 시 볼륨 DB가 비어 있는 것으로 판별되었습니다." msgid "Determined volume DB was not empty at startup." msgstr "시작 시 볼륨 DB가 비어 있지 않은 것으로 판별되었습니다" #, python-format msgid "" "Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " "delete anything." msgstr "" "백업 %(backing)s의 스냅샷 %(name)s을(를) 찾을 수 없습니다. 삭제할 내용이 없습" "니다." #, python-format msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" msgstr "검색 ip %(disc_ip)s을(를) mgmt+data 서브넷 %(net_label)s에서 발견함" #, python-format msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" msgstr "검색 ip %(disc_ip)s을(를) 데이터 서브넷 %(net_label)s에서 사용" #, python-format msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" msgstr "검색 ip %(disc_ip)s을(를) 서브넷 %(net_label)s에서 사용" #, python-format msgid "Discovery ip %s is used on mgmt+data subnet" msgstr "검색 ip %s을(를) mgmt+data 서브넷에서 사용" #, python-format msgid "Dissociating volume %s " msgstr "볼륨 %s 연관 해제" #, python-format msgid "Domain id is %s." msgstr "도메인 id는 %s입니다." #, python-format msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "이미지 %(id)s을(를) 볼륨 %(vol)s(으)로 복사하는 작업이 완료되었습니다." #, python-format msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "볼륨 %(vol)s을(를) 새 이미지 %(img)s(으)로 복사 완료" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " "in cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "하위 레벨 GPFS 클러스터가 발견됨. GPFS encryption-at-rest 기능을 클러스터 디" "먼 레벨 %(cur)s에서 사용할 수 없음 - 최소 %(min)s 레벨이어야 합니다. " msgid "Driver initialization completed successfully." msgstr "드라이버 초기화가 성공적으로 완료되었습니다." msgid "Driver post RPC initialization completed successfully." msgstr "드라이버 post RPC 초기화가 성공적으로 완료되었습니다." #, python-format msgid "Driver stats: %s" msgstr "드라이버 통계: %s" #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " "extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E-series 프록시 API 버전 %(version)s이(가) SSC 추가 스펙의 전체 세트를 지원하" "지 않습니다. 프록시 버전은 %(min_version)s 이상이어야 합니다. " #, python-format msgid "E-series proxy API version %s does not support autosupport logging." msgstr "E-series 프록시 API 버전 %s에서 자동 지원 로깅을 지원하지 않습니다." #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "EQL-드라이버: 설정이 완료되었습니다, 그룹 IP는 \"%s\"입니다." #, python-format msgid "EQL-driver: executing \"%s\"." msgstr "EQL-드라이버: \"%s\" 실행." #, python-format msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "마스크 %(mask)s(으)로 볼륨 %(vol)s 편집" #, python-format msgid "Elapsed time for clear volume: %.2f sec" msgstr "볼륨 지우기를 위해 경과된 시간: %.2f sec" msgid "Embedded mode detected." msgstr "임베드된 모드가 발견되었습니다." msgid "Enabling LVM thin provisioning by default because a thin pool exists." msgstr "씬 풀이 있으므로 기본적으로 LVM 씬 프로비저닝을 사용합니다." msgid "Enabling LVM thin provisioning by default because no LVs exist." msgstr "LV가 없으므로 기본적으로 LVM 씬 프로비저닝을 사용합니다." #, python-format msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" msgstr "extend_volume 볼륨=%(vol)s new_size=%(size)s 입력" #, python-format msgid "" "Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s" msgstr "initialize_connection 볼륨=%(vol)s 커넥터=%(conn)s 위치=%(loc)s 입력" #, python-format msgid "" "Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" "%(loc)s." msgstr "terminate_connection 볼륨=%(vol)s 커넥터=%(conn)s 위치=%(loc)s 입력." #, python-format msgid "Entering unmanage_volume volume = %s" msgstr "unmanage_volume 볼륨 = %s 입력" #, python-format msgid "Exploring array subnet label %s" msgstr "배열 서브넷 레이블 %s 탐색" #, python-format msgid "Export record finished, backup %s exported." msgstr "레코드 내보내기가 완료됨, 백업 %s을(를) 내보냈습니다." #, python-format msgid "Export record started, backup: %s." msgstr "레코드 내보내기가 시작됨, 백업: %s." #, python-format msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." msgstr "lun_id %(lun_id)s에서 lun %(vol_id)s을(를) 내보냈습니다." msgid "Extend volume completed successfully." msgstr "볼륨 확장이 성공적으로 완료되었습니다." msgid "Extend volume request issued successfully." msgstr "볼륨 확장 요청이 성공적으로 실행되었습니다." #, python-format msgid "" "Extend volume: %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." msgstr "볼륨 확장 %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." #, python-format msgid "Extending volume %s." msgstr "볼륨 %s 확장." #, python-format msgid "Extending volume: %(id)s New size: %(size)s GB" msgstr "볼륨 확장: %(id)s 새 크기: %(size)sGB" #, python-format msgid "" "FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "FAST: 배열 %(arrayName)s.의 정책 %(fastPolicyName)s에 대한 용량 통계 " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." #, python-format msgid "FC Initiators %(in)s of %(ins)s need registration" msgstr "%(ins)s의 FC 개시자 %(in)s을(를) 등록해야 합니다." msgid "Failed over to replication target successfully." msgstr "복제 대상으로 장애 복구되었습니다." #, python-format msgid "Failed to create host: %(name)s. Check if it exists on the array." msgstr "호스트 작성 실패: %(name)s. 배열에 있는지 확인하십시오." #, python-format msgid "" "Failed to create hostgroup: %(name)s. Please check if it exists on the array." msgstr "hostgroup 작성 실패: %(name)s. 배열에 있는지 확인하십시오." #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "%(vol_id)s의 iet 세션 목록을 여는 데 실패: %(e)s" #, python-format msgid "Failing backend to %s" msgstr "%s의 백엔드 실패" #, python-format msgid "Failing over volume %(id)s replication: %(res)s." msgstr "볼륨 %(id)s 복제 장애 복구: %(res)s." #, python-format msgid "Fault thrown: %s" msgstr "처리된 결함: %s" #, python-format msgid "Fetched vCenter server version: %s" msgstr "가져온 vCenter 서버 버전: %s" #, python-format msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" msgstr "필터 %(cls_name)s에서 %(obj_len)d 호스트 리턴" #, python-format msgid "Filtered targets for SAN is: %(targets)s" msgstr "SAN의 필터링된 대상: %(targets)s" #, python-format msgid "Filtered targets for SAN is: %s" msgstr "SAN의 필터링된 대상: %s" #, python-format msgid "Final filtered map for delete connection: %(i_t_map)s" msgstr "연결 삭제를 위해 최종으로 필터링된 맵: %(i_t_map)s" #, python-format msgid "Final filtered map for fabric: %(i_t_map)s" msgstr "패브릭의 최종 필터링된 맵: %(i_t_map)s" #, python-format msgid "Fixing previous mount %s which was not unmounted correctly." msgstr "올바르게 마운트 해제되지 않은 이전 마운트 %s을(를) 수정합니다." #, python-format msgid "Flash Cache policy set to %s" msgstr "플래시 캐시 정책이 %s(으)로 설정됨" #, python-format msgid "Flexvisor already unassigned volume %(id)s." msgstr "Flexvisor에서 볼륨 %(id)s의 할당을 이미 해제했습니다." #, python-format msgid "Flexvisor snapshot %(id)s not existed." msgstr "Flexvisor 스냅샷 %(id)s이(가) 없습니다." #, python-format msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(cgid)s에 추가했습니다." #, python-format msgid "Flexvisor succeeded to clone volume %(id)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 복제하는 데 성공했습니다." #, python-format msgid "Flexvisor succeeded to create volume %(id)s from snapshot." msgstr "Flexvisor가 스냅샷에서 볼륨 %(id)s을(를) 작성하는 데 성공했습니다." #, python-format msgid "Flexvisor succeeded to create volume %(id)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 작성하는 데 성공했습니다." #, python-format msgid "Flexvisor succeeded to delete snapshot %(id)s." msgstr "Flexvisor에서 스냅샷 %(id)s을(를) 삭제하는 데 성공했습니다." #, python-format msgid "Flexvisor succeeded to extend volume %(id)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 확장하는 데 성공했습니다." #, python-format msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor가 그룹 %(cgid)s에서 볼륨 %(id)s을(를) 제거했습니다. " #, python-format msgid "Flexvisor succeeded to unassign volume %(id)s." msgstr "Flexvisor에서 볼륨 %(id)s의 할당을 해제하는 데 성공했습니다." #, python-format msgid "Flexvisor volume %(id)s does not exist." msgstr "Flexvisor 볼륨 %(id)s이(가) 없습니다." #, python-format msgid "Folder %s does not exist, it was already deleted." msgstr "폴더 %s이(가) 없습니다. 이미 삭제되었습니다." msgid "Force upload to image is disabled, Force option will be ignored." msgstr "이미지에 강제 업로드는 사용하지 않습니다. 강제 적용 옵션이 무시됩니다." #, python-format msgid "Found a temporary snapshot %(name)s" msgstr "임시 스냅샷 %(name)s 발견" #, python-format msgid "Found existing masking view: %(maskingViewName)s." msgstr "기존 마스킹 보기 발견: %(maskingViewName)s." msgid "Found failover volume. Competing failover." msgstr "장애 복구 볼륨이 발견되었습니다. 장애 복구 경쟁." #, python-format msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." msgstr "백엔드의 사용 가능 용량: %(free)s, 총 용량: %(total)s." #, python-format msgid "Friendly zone name after forming: %(zonename)s" msgstr "구성 후의 선호 구역 이름: %(zonename)s" #, python-format msgid "Generating transfer record for volume %s" msgstr "볼륨 %s의 전송 레코드 생성" #, python-format msgid "Get FC targets %(tg)s to register initiator %(in)s." msgstr "개시자 %(in)s을(를) 등록하기 위해 FC 대상 %(tg)s을(를) 가져옵니다." #, python-format msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." msgstr "개시자 %(in)s을(를) 등록하기 위해 ISCSI 대상 %(tg)s을(를) 가져옵니다." #, python-format msgid "Get Volume response: %s" msgstr "볼륨 가져오기 응답: %s" msgid "Get all snapshots completed successfully." msgstr "모든 스냅샷 가져오기가 성공적으로 완료되었습니다." msgid "Get all volumes completed successfully." msgstr "모든 볼륨 가져오기가 성공적으로 완료되었습니다." #, python-format msgid "Get domain by name response: %s" msgstr "이름별 도메인 가져오기 응답: %s" #, python-format msgid "Get service: %(lbl)s->%(svc)s" msgstr "서비스 가져오기: %(lbl)s->%(svc)s" msgid "Get snapshot metadata completed successfully." msgstr "스냅샷 메타데이터 가져오기가 성공적으로 완료되었습니다." msgid "Get snapshot metadata value not implemented." msgstr "스냅샷 메타데이터 값 가져오기가 구현되지 않았습니다." #, python-format msgid "Get the default ip: %s." msgstr "기본 ip 가져오기: %s." msgid "Get volume admin metadata completed successfully." msgstr "볼륨 관리 메타데이터 가져오기가 성공적으로 완료되었습니다." msgid "Get volume image-metadata completed successfully." msgstr "볼륨 이미지 메타데이터 가져오기가 성공적으로 완료되었습니다." msgid "Get volume metadata completed successfully." msgstr "볼륨 메타데이터 가져오기가 성공적으로 완료되었습니다." msgid "Getting getInitiatorGrpList" msgstr "getInitiatorGrpList 가져오기" #, python-format msgid "Getting volume information for vol_name=%s" msgstr "vol_name=%s의 볼륨 정보 가져오기" #, python-format msgid "Going to perform request again %s with valid token." msgstr "올바른 토큰으로 %s 요청을 다시 수행합니다." #, python-format msgid "HDP list: %s" msgstr "HDP 목록: %s" #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" #, python-format msgid "HPELeftHand API version %s" msgstr "HPELeftHand API 버전 %s" #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP 예외 처리: %s" #, python-format msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "Hypermetro id: %(metro_id)s. 원격 lun id: %(remote_lun_id)s." #, python-format msgid "ISCSI properties: %(properties)s" msgstr "ISCSI 특성: %(properties)s" msgid "ISCSI provider_location not stored, using discovery." msgstr "ISCSI provider_location이 저장되지 않음, 검색 사용." #, python-format msgid "ISCSI volume is: %(volume)s" msgstr "ISCSI 볼륨: %(volume)s" #, python-format msgid "Ignored LU creation error \"%s\" while ensuring export." msgstr "내보내기를 확인하는 중에 LU 작성 오류 \"%s\"이(가) 무시되었습니다." #, python-format msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export." msgstr "" "내보내기를 확인하는 동안 LUN 맵핑 항목 추가 오류 \"%s\"이(가) 무시되었습니다." #, python-format msgid "Ignored target creation error \"%s\" while ensuring export." msgstr "내보내기를 확인하는 중에 대상 작성 오류 \"%s\"이(가) 무시되었습니다." #, python-format msgid "Ignored target group creation error \"%s\" while ensuring export." msgstr "" "내보내기를 확인하는 중에 대상 그룹 작성 오류 \"%s\"이(가) 무시되었습니다." #, python-format msgid "" "Ignored target group member addition error \"%s\" while ensuring export." msgstr "" "내보내기를 확인하는 중에 대상 그룹 멤버 추가 오류 \"%s\"이(가) 무시되었습니" "다." #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "이미지 %(pool)s/%(image)s은(는) 스냅샷 %(snap)s에 종속됩니다." #, python-format msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" msgstr "이미지 %(image_id)s에 대한 이미지 복제에 실패. 메시지: %(msg)s" #, python-format msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" msgstr "%(mbps).2f MB/s에서 이미지 다운로드 %(sz).2f MB" #, python-format msgid "Image will locally be converted to raw %s" msgstr "로컬에서 이미지가 원시 %s(으)로 변환됨" #, python-format msgid "Image-volume cache disabled for host %(host)s." msgstr "호스트 %(host)s에 대해 이미지-볼륨 캐시가 사용되지 않습니다." #, python-format msgid "Image-volume cache enabled for host %(host)s." msgstr "호스트 %(host)s에 대해 이미지-볼륨 캐시가 사용되었습니다." #, python-format msgid "Import record id %s metadata from driver finished." msgstr "드라이버에서 레코드 id %s 메타데이터 가져오기가 완료되었습니다." #, python-format msgid "Import record started, backup_url: %s." msgstr "레코드 가져오기가 시작됨, backup_url: %s." #, python-format msgid "Imported %(fail)s to %(guid)s." msgstr "%(fail)s을(를) %(guid)s에 가져왔습니다." #, python-format msgid "Initialize connection: %(volume)s." msgstr "연결 초기화: %(volume)s." msgid "Initialize volume connection completed successfully." msgstr "볼륨 연결 초기화가 성공적으로 완료되었습니다." #, python-format msgid "Initialized driver %(name)s version: %(vers)s" msgstr "초기화된 드라이버 %(name)s 버전: %(vers)s" #, python-format msgid "" "Initializing RPC dependent components of volume driver %(driver_name)s " "(%(version)s)" msgstr "" "볼륨 드라이버 %(driver_name)s (%(version)s)의 RPC 종속 구성 요소 초기화" msgid "Initializing extension manager." msgstr "확장기능 관리자를 초기화 중입니다. " #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." msgstr "" "개시자 이름 %(initiatorNames)s이(가) 배열 %(storageSystemName)s에 없습니다." #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " msgstr "" "개시자 이름 %(initiatorNames)s이(가) 배열 %(storageSystemName)s에 없습니다." #, python-format msgid "Initiator group name is %(grp)s for initiator %(iname)s" msgstr "개시자 %(iname)s의 개시자 그룹 이름이 %(grp)s입니다." #, python-format msgid "LUN %(id)s extended to %(size)s GB." msgstr "LUN %(id)s이(가) %(size)sGB로 확장되었습니다." #, python-format msgid "LUN %(lun)s extended to %(size)s GB." msgstr "LUN %(lun)s이(가) %(size)sGB로 확장되었습니다." #, python-format msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "크기가 %(sz)sMB인 LUN %(lun)s이(가) 작성되었습니다." #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" "지정된 ref %s이(가) 있는 LUN은 관리 조작 중에 이름을 바꾸지 않아도 됩니다." #, python-format msgid "" "Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " "%(name)s." msgstr "" "create_volume 종료: %(volumeName)s 리턴 코드: %(rc)lu 볼륨 dict: %(name)s." #, python-format msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." msgstr "delete_volume 종료: %(volumename)s 리턴 코드: %(rc)lu." #, python-format msgid "Leaving initialize_connection: %s" msgstr "initialize_connection 종료: %s" #, python-format msgid "Loaded extension: %s" msgstr "로드된 확장: %s" #, python-format msgid "" "Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" "%(lv)s" msgstr "" "LVM 정보를 쿼리할 때 논리 볼륨을 찾을 수 없습니다. (vg_name=%(vg)s, lv_name=" "%(lv)s" msgid "Manage existing volume completed successfully." msgstr "기존 볼륨 관리가 성공적으로 완료되었습니다." #, python-format msgid "" "Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." msgstr "" "새 경로 %(path)s 및 uuid %(uuid)s(으)로 LUN의 관리 조작이 완료되었습니다." #, python-format msgid "" "Manage operation completed for volume with new label %(label)s and wwn " "%(wwn)s." msgstr "" "새 레이블 %(label)s 및 wwn %(wwn)s의 볼륨에 대한 관리 조작이 완료되었습니다." #, python-format msgid "Manage volume %s" msgstr "볼륨 %s 관리" msgid "Manage volume request issued successfully." msgstr "볼륨 요청 관리가 성공적으로 실행되었습니다." #, python-format msgid "Masking view %(maskingViewName)s successfully deleted." msgstr "마스킹 보기 %(maskingViewName)s이(가) 성공적으로 삭제되었습니다." #, python-format msgid "Migrate Volume %(volume_id)s completed." msgstr "볼륨 %(volume_id)s 마이그레이션이 완료되었습니다." msgid "Migrate volume completed successfully." msgstr "볼륨 마이그레이션이 성공적으로 완료되었습니다." msgid "Migrate volume completion issued successfully." msgstr "볼륨 마이그레이션 완료가 성공적으로 실행되었습니다." msgid "Migrate volume request issued successfully." msgstr "볼륨 마이그레이션 요청이 성공적으로 실행되었습니다." #, python-format msgid "Migrating using retype Volume: %(volume)s." msgstr "볼륨: %(volume)s 재입력을 사용하여 마이그레이션." #, python-format msgid "" "Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." msgstr "" "%(volume_name)s snap_cpg를 %(old_snap_cpg)s에서 %(new_snap_cpg)s(으)로 수정." #, python-format msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" msgstr "%(old_cpg)s에서 %(new_cpg)s(으)로 %(volume_name)s userCPG 수정" #, python-format msgid "Modifying %s comments." msgstr "%s 주석 수정." msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "PyWBEM 모듈이 설치되지 않았습니다. python-pywbem 패키지를 사용하여 PyWBEM을 " "설치하십시오. " msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." msgstr "" "모듈 PyWBEM이 설치되지 않았습니다. python-pywbem 패키지를 사용하여 PyWBEM을 " "설치하십시오." #, python-format msgid "Mounting volume: %s ..." msgstr "볼륨 마운트: %s ..." #, python-format msgid "Mounting volume: %s succeeded" msgstr "볼륨 마운트: %s 성공" #, python-format msgid "" "NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgstr "" "NON-FAST: 배열 %(arrayName)s의 풀 %(poolName)s에 대한 용량 통계 " "total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" "%(free_capacity_gb)lu." msgid "Need to remove FC Zone, building initiator target map" msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드" msgid "Need to remove FC Zone, building initiator target map." msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드." msgid "" "Neither security file nor plain text credentials are specified. Security " "file under home directory will be used for authentication if present." msgstr "" "보안 파일 및 일반 텍스트 자격 증명이 모두 지정되지 않았습니다. 홈 디렉토리에 " "보안 파일이 있는 경우 이 파일을 인증에 사용합니다." #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " "loaded." msgstr "" "제품군 %(storage_family)s 및 프로토콜 %(storage_protocol)s의 NetApp 드라이버" "가 로드되었습니다." #, python-format msgid "New Cinder secure environment indicator file created at path %s." msgstr "새 Cinder 보안 환경 표시기 파일이 %s 경로에 작성되었습니다." #, python-format msgid "" "New size is equal to the real size from backend storage, no need to extend. " "realsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" "새 크기는 백엔드 스토리지의 실제 크기와 같으므로 확장하지 않아도 됩니다. " "realsize: %(oldsize)s, newsize: %(newsize)s." #, python-format msgid "New str info is: %s." msgstr "새 str 정보: %s." #, python-format msgid "No dpkg-query info found for %(pkg)s package." msgstr "%(pkg)s 패키지의 dpkg-query 정보를 찾을 수 없습니다." #, python-format msgid "No igroup found for initiator %s" msgstr "%s 개시자의 igroup을 찾을 수 없음" #, python-format msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" msgstr "볼륨 id %(vol_id)s의 iscsi 대상이 없음: %(e)s" #, python-format msgid "No need to extend volume %s as it is already the requested new size." msgstr "볼륨 %s이(가) 이미 요청된 새 크기이므로 볼륨을 확장하지 않아도 됩니다." #, python-format msgid "" "No replication synchronization session found associated with source volume " "%(source)s on %(storageSystem)s." msgstr "" "발견된 복제 동기화 세션이 %(storageSystem)s의 소스 볼륨 %(source)s과(와) 연관" "되지 않았습니다." #, python-format msgid "" "No restore point found for backup='%(backup)s' of volume %(volume)s although " "base image is found - forcing full copy." msgstr "" "기본 이미지가 있어도 볼륨 %(volume)s의 백업='%(backup)s'의 복원 지점을 찾을 " "수 없음 - 전체 복사 시행." #, python-format msgid "No rpm info found for %(pkg)s package." msgstr "%(pkg)s 패키지의 rpm 정보를 찾을 수 없습니다." #, python-format msgid "No targets to add or remove connection for initiator: %(init_wwn)s" msgstr "개시자의 연결을 추가하거나 제거할 대상이 없음: %(init_wwn)s" #, python-format msgid "No volume found for CG: %(cg)s." msgstr "CG의 볼륨을 찾을 수 없음: %(cg)s." #, python-format msgid "Non fatal cleanup error: %s." msgstr "치명적이지 않은 정리 오류: %s." #, python-format msgid "OpenStack OS Version Info: %(info)s" msgstr "OpenStack OS 버전 정보: %(info)s" #, python-format msgid "" "Origin volume %s appears to be removed, try to remove it from backend if it " "is there." msgstr "" "원래 볼륨 %s이(가) 제거된 것으로 보입니다. 볼륨이 백엔드에 있는 경우 제거하십" "시오." #, python-format msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "%(volume_id)s 볼륨을 %(backup_id)s 백업의 복원으로 겹쳐씀" #, python-format msgid "Params for add volume request: %s." msgstr "볼륨 요청 추가 매개 변수: %s." #, python-format msgid "Parse_loc: %s" msgstr "Parse_loc: %s" #, python-format msgid "Performing post clone for %s" msgstr "%s의 사후 복제 수행" #, python-format msgid "Performing secure delete on volume: %s" msgstr "%s 볼륨에서 보안 삭제 수행" msgid "Plain text credentials are being used for authentication" msgstr "일반 텍스트 자격 증명이 인증에 사용 중임" #, python-format msgid "Pool id is %s." msgstr "풀 id는 %s입니다." #, python-format msgid "Port group instance name is %(foundPortGroupInstanceName)s." msgstr "포트 그룹 인스턴스 이름이 %(foundPortGroupInstanceName)s입니다." #, python-format msgid "Post clone resize LUN %s" msgstr "사후 복제 크기 조정 LUN %s" #, python-format msgid "Prefer use target wwpn %(wwpn)s" msgstr "대상 wwpn %(wwpn)s 사용 선호" #, python-format msgid "Profile %s has been deleted." msgstr "프로파일 %s이(가) 삭제되었습니다." msgid "Promote volume replica completed successfully." msgstr "볼륨 복제본 홍보가 성공적으로 완료되었습니다." #, python-format msgid "Protection domain id: %(domain_id)s." msgstr "보호 도메인 id: %(domain_id)s." #, python-format msgid "Protection domain name: %(domain_name)s." msgstr "보호 도메인 이름: %(domain_name)s." msgid "Proxy mode detected." msgstr "프록시 모드가 발견되었습니다." #, python-format msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" msgstr "테이블=%(table)s에서 기간=%(age)d일보다 오래된 삭제 행 제거" #, python-format msgid "QoS: %s." msgstr "QoS: %s." #, python-format msgid "Query capacity stats response: %s." msgstr "용량 통계 쿼리 응답: %s." msgid "" "RBD striping not supported - ignoring configuration settings for rbd striping" msgstr "RBD 스트리핑이 지원되지 않음 - rbd 스트리핑의 구성 설정 무시" #, python-format msgid "RBD volume %s not found, allowing delete operation to proceed." msgstr "RBD 볼륨 %s을(를) 찾을 수 없으므로, 삭제 조작을 계속할 수 없습니다." #, python-format msgid "" "REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " "certificate: %(verify_cert)s." msgstr "" "REST 서버 IP: %(ip)s, 포트: %(port)s, 사용자 이름: %(user)s. 서버의 인증서 확" "인: %(verify_cert)s." #, python-format msgid "Re-using existing purity host %(host_name)r" msgstr "기존 purity 호스트 %(host_name)r 재사용" msgid "Reconnected to coordination backend." msgstr "조정 백엔드에 다시 연결되었습니다." msgid "Reconnecting to coordination backend." msgstr "조정 백엔드에 다시 연결 중입니다." #, python-format msgid "Registering image in cache %s" msgstr "캐시 %s에 이미지 등록" #, python-format msgid "Regular file: %s created." msgstr "일반 파일: %s이(가) 작성되었습니다." #, python-format msgid "" "Relocating volume: %s to a different datastore due to insufficient disk " "space on current datastore." msgstr "" "현재 데이터 저장소의 디스크 공간이 부족하므로 볼륨 %s을(를) 다른 데이터 저장" "소로 재배치." #, python-format msgid "Remote return FC info is: %s." msgstr "원격 리턴 FC 정보: %s." msgid "Remove volume export completed successfully." msgstr "볼륨 내보내기 제거가 성공적으로 완료되었습니다." #, python-format msgid "Removed %s from cg." msgstr "cg에서 %s이(가) 제거되었습니다." #, python-format msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" msgstr "개시자 그룹 이름 %(igrp)s의 볼륨=%(vol)s에서 ACL 제거" #, python-format msgid "Removing iscsi_target for Volume ID: %s" msgstr "볼륨 ID %s에 대한 iscsi_target 제거" #, python-format msgid "Removing iscsi_target for volume: %s" msgstr "%s 볼륨에 대한 iscsi_target 제거" #, python-format msgid "Removing iscsi_target for: %s" msgstr "%s에 대한 iscsi_target 제거" #, python-format msgid "Removing iscsi_target: %s" msgstr "iscsi_target 제거: %s" #, python-format msgid "Removing non-active host: %(host)s from scheduler cache." msgstr "스케줄러 캐시에서 비활성 호스트: %(host)s 제거." #, python-format msgid "Removing volume %(v)s from consistency group %(cg)s." msgstr "일관성 그룹 %(cg)s에서 볼륨 %(v)s 제거 " #, python-format msgid "Removing volumes from cg %s." msgstr "cg %s에서 볼륨 제거." #, python-format msgid "Rename Volume %(volume_id)s completed." msgstr "볼륨 %(volume_id)s 이름 변경이 완료되었습니다." #, python-format msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." msgstr "%(id)s의 이름을 %(current_name)s에서 %(new_name)s(으)로 변경." #, python-format msgid "Renaming backing VM: %(backing)s to %(new_name)s." msgstr "백업 VM %(backing)s의 이름을 %(new_name)s(으)로 변경." #, python-format msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" msgstr "기본 스냅샷의 이름을 %(ref_name)s에서 %(new_name)s(으)로 변경" #, python-format msgid "Renaming existing volume %(ref_name)s to %(new_name)s" msgstr "기본 볼륨의 이름을 %(ref_name)s에서 %(new_name)s(으)로 변경" #, python-format msgid "Replication %(vol)s to %(dest)s." msgstr "%(vol)s을(를) %(dest)s에 복제." #, python-format msgid "Replication created for %(volname)s to %(destsc)s" msgstr "%(destsc)s에 %(volname)s에 대한 복제 작성" #, python-format msgid "Replication is not configured on backend: %s." msgstr "복제가 백엔드에 구성되지 않음: %s." #, python-format msgid "Requested image %(id)s is not in raw format." msgstr "요청된 이미지 %(id)s이(가) 원시 형식이 아닙니다." #, python-format msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." msgstr "요청된 통합 구성: %(storage_family)s 및 %(storage_protocol)s." msgid "Reserve volume completed successfully." msgstr "볼륨 예약이 성공적으로 완료되었습니다." #, python-format msgid "" "Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." msgstr "백업 상태 재설정이 시작됨, backup_id: %(backup_id)s, 상태: %(status)s." #, python-format msgid "Resetting backup %s to available (was restoring)." msgstr "백업 %s을(를) 사용 가능으로 재설정(복원)." #, python-format msgid "Resetting backup %s to error (was creating)." msgstr "백업 %s을(를) 오류로 재설정(작성)." msgid "Resetting cached RPC version pins." msgstr "캐시된 RPC 버전 핀을 재설정하는 중입니다." #, python-format msgid "" "Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." msgstr "볼륨 %(vol_id)s을(를) 이전 상태 %(status)s(으)로 재설정(백업)." #, python-format msgid "Resizing LUN %s directly to new size." msgstr "LUN %s의 크기를 직접 새 크기로 조정합니다." #, python-format msgid "Resizing LUN %s using clone operation." msgstr "복제 조작을 사용하여 LUN %s 크기 조정." #, python-format msgid "Resizing file to %sG" msgstr "파일의 크기를 %sG로 조정" #, python-format msgid "Resizing file to %sG..." msgstr "파일의 크기를 %sG(으)로 조정..." #, python-format msgid "" "Restore backup finished, backup %(backup_id)s restored to volume " "%(volume_id)s." msgstr "" "백업 복원이 완료됨, 백업 %(backup_id)s이(가) 볼륨 %(volume_id)s(으)로 복원됩" "니다." #, python-format msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "백업 복원이 시작됨, 백업: %(backup_id)s 볼륨: %(volume_id)s." #, python-format msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "백업 %(backup)s을(를) 볼륨 %(volume)s(으)로 복원 중입니다." #, python-format msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "%(backup_id)s 백업을 %(volume_id)s 볼륨으로 복원" msgid "Restoring iSCSI target from configuration file" msgstr "구성 파일에서 iSCSI 대상 복원" msgid "Resume volume delete completed successfully." msgstr "볼륨 삭제 재개가 성공적으로 완료되었습니다." #, python-format msgid "Resuming delete on backup: %s." msgstr "백업에서 삭제 재개: %s." #, python-format msgid "Retrieving secret for service: %s." msgstr "서비스의 시크릿 검색: %s." #, python-format msgid "Retrieving target for service: %s." msgstr "서비스의 대상 검색: %s." #, python-format msgid "Return FC info is: %s." msgstr "리턴 FC 정보: %s." #, python-format msgid "" "Returning connection_info: %(info)s for volume: %(volume)s with connector: " "%(connector)s." msgstr "" "커넥터 %(connector)s을(를) 사용하는 볼륨 %(volume)s의 connection_info: " "%(info)s 리턴." #, python-format msgid "Returning random Port Group: %(portGroupName)s." msgstr "임의의 포트 그룹 %(portGroupName)s(으)로 리턴합니다." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." msgstr "" "LUN(id: %(lun_id)s) smartcache를 (name: %(old_name)s, id: %(old_id)s)에서 " "(name: %(new_name)s, id: %(new_id)s)(으)로 다시 입력하는 데 성공했습니다." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." msgstr "" "LUN(id: %(lun_id)s) smartpartition을 (name: %(old_name)s, id: %(old_id)s)에" "서 (name: %(new_name)s, id: %(new_id)s)(으)로 다시 입력하는 데 성공했습니다." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " "success." msgstr "" "LUN(id: %(lun_id)s) smartqos를 %(old_qos_value)s에서 %(new_qos)s(으)로 다시 " "입력하는 데 성공했습니다." #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " "%(new_policy)s success." msgstr "" "LUN(id: %(lun_id)s) smarttier 정책을 %(old_policy)s에서 %(new_policy)s(으)로 " "다시 입력하는 데 성공했습니다." #, python-format msgid "Retype Volume %(volume_id)s is completed." msgstr "볼륨 %(volume_id)s 다시 입력이 완료되었습니다." #, python-format msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." msgstr "" "볼륨 %(volume_id)s 다시 입력이 수행되었으며 풀 %(pool_id)s(으)로 마이그레이션" "되었습니다." #, python-format msgid "" "Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " "%(old_snap_cpg)s." msgstr "" "다시 입력에서 %(volume_name)s snap_cpg를 %(new_snap_cpg)s에서 다시 " "%(old_snap_cpg)s(으)로 되돌립니다." msgid "Retype volume completed successfully." msgstr "볼륨 다시 입력이 성공적으로 완료되었습니다." msgid "Retype volume request issued successfully." msgstr "볼륨 다시 입력 요청이 성공적으로 실행되었습니다." msgid "Retype was to same Storage Profile." msgstr "동일한 스토리지 프로파일에 다시 입력되었습니다." #, python-format msgid "Review shares: %s" msgstr "공유 검토: %s" msgid "Roll detaching of volume completed successfully." msgstr "볼륨 연결 해제가 성공적으로 롤링되었습니다." #, python-format msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "%(server)s 및 vserver %(vs)s의 클러스터 최신 ssc 작업 실행" #, python-format msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "%(server)s 및 vserver %(vs)s의 시간이 경과된 ssc 새로 고치기 작업 실행" #, python-format msgid "Running with vmemclient version: %s" msgstr "vmemclient 버전으로 실행: %s" #, python-format msgid "SC server created %s" msgstr "SC 서버 작성 %s" #, python-format msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" msgstr "%(svc)s의 서비스 저장 -> %(hdp)s, %(path)s" #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " "image id: %(id)s." msgstr "" "ScaleIO copy_image_to_volume 볼륨: %(vol)s 이미지 서비스: %(service)s 이미지 " "id: %(id)s." #, python-format msgid "" "ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " "image meta: %(meta)s." msgstr "" "ScaleIO copy_volume_to_image 볼륨: %(vol)s 이미지 서비스: %(service)s 이미지 " "메타: %(meta)s." #, python-format msgid "" "ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." msgstr "" "ScaleIO가 소스 볼륨 %(src)s에서 대상 볼륨 %(tgt)s(으)로 복제된 볼륨을 작성합" "니다." #, python-format msgid "" "ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " "%(volname)s." msgstr "" "스냅샷에서 ScaleIO가 볼륨 작성: 볼륨 %(volname)s의 스냅샷 %(snapname)s." msgid "ScaleIO delete snapshot." msgstr "ScaleIO에서 스냅샷 삭제." #, python-format msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." msgstr "ScaleIO에서 볼륨 %(volname)s을(를) 크기 %(new_size)s(으)로 확장합니다." #, python-format msgid "ScaleIO get domain id by name request: %s." msgstr "ScaleIO 이름별 도메인 가져오기 요청: %s." #, python-format msgid "ScaleIO get pool id by name request: %s." msgstr "ScaleIO에서 이름별 풀 가져오기 요청: %s." #, python-format msgid "ScaleIO get volume by id request: %s." msgstr "ScaleIO의 id별 볼륨 가져오기 요청: %s." #, python-format msgid "ScaleIO rename volume request: %s." msgstr "ScaleIO 볼륨 이름 변경 요청: %s." msgid "ScaleIO snapshot group of volumes" msgstr "볼륨의 ScaleIO 스냅샷 그룹" #, python-format msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." msgstr "ScaleIO 볼륨 %(vol)s의 이름이 %(new_name)s(으)로 변경되었습니다." #, python-format msgid "" "Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " "from /etc/cinder.conf." msgstr "" "보조 ssh 호스트 키 파일 %(kwargs)s이(가) /etc/cinder.conf의 %(conf)s과(와) 함" "께 로드됩니다." msgid "" "Service not found for updating active_backend_id, assuming default for " "driver init." msgstr "" "active_backend_id 업데이트를 위한 서비스를 찾을 수 없으므로, 드라이버 초기화 " "기본값을 사용합니다." msgid "Session might have expired. Trying to relogin" msgstr "세션이 만기되었을 수 있습니다. 다시 로그인을 시도합니다." msgid "Set backend status to frozen successfully." msgstr "백엔드 상태가 동결로 설정되었습니다." #, python-format msgid "Set newly managed Cinder volume name to %(name)s." msgstr "새로 관리된 Cinder 볼륨 이름을 %(name)s(으)로 설정합니다." #, python-format msgid "Set tgt CHAP secret for service: %s." msgstr "서비스의 tgt CHAP 시크릿 설정: %s." #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "%(host)s 호스트를 %(state)s(으)로 설정 중입니다. " #, python-format msgid "Setting snapshot %(snap)s to online_flag %(flag)s" msgstr "스냅샷 %(snap)s을(를) online_flag %(flag)s(으)로 설정" #, python-format msgid "Setting volume %(vol)s to online_flag %(flag)s" msgstr "볼륨 %(vol)s을(를) online_flag %(flag)s(으)로 설정" #, python-format msgid "" "Skipping add target %(target_array)s to protection group %(pgname)s since " "it's already added." msgstr "" "대상 %(target_array)s이(가) 이미 보호 그룹 %(pgname)s에 추가되었으므로 이 작" "업을 건너뜁니다." #, python-format msgid "" "Skipping allow pgroup %(pgname)s on target array %(target_array)s since it " "is already allowed." msgstr "" "대상 배열 %(target_array)s에서 pgroup %(pgname)s이(가) 이미 허용되었으므로, " "이 작업을 건너뜁니다." #, python-format msgid "Skipping deletion of volume %s as it does not exist." msgstr "볼륨 %s이(가) 없으므로 삭제를 건너뜁니다." msgid "Skipping ensure_export. Found existing iSCSI target." msgstr "ensure_export를 건너뜁니다. 기존 iSCSI 대상이 발견되었습니다." #, python-format msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" "ensure_export를 건너뜁니다. %s 볼륨에 대한 iscsi_target을 프로비저닝하지 않습" "니다. " #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current Tenant." msgstr "" "현재 테넌트에서 액세스할 수 없으므로 이미지 볼륨 %(id)s을(를) 건너뜁니다." #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume: %s" msgstr "" "remove_export를 건너뜀. 현재 %s 볼륨에 대한 iscsi_target이 내보내지지 않았습" "니다. " #, python-format msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" "remove_export를 건너뛰고 있습니다. 볼륨에 대해 프로비저닝된 iscsi_target이 없" "음: %s" #, python-format msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "Smb 공유 %(share)s 총 크기 %(size)s 할당된 총계 %(allocated)s" #, python-format msgid "Snapshot %(disp)s '%(new)s' is now being managed." msgstr "스냅샷 %(disp)s '%(new)s'을(를) 지금 관리 중입니다." #, python-format msgid "" "Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " "'%(new)s'." msgstr "" "스냅샷 %(disp)s '%(vol)s'이(가) 더 이상 관리되지 않습니다. 스냅샷의 이름이 " "'%(new)s'(으)로 변경됩니다." #, python-format msgid "" "Snapshot %(folder)s@%(snapshot)s does not exist, it was already deleted." msgstr "스냅샷 %(folder)s@%(snapshot)s이(가) 없습니다. 이미 삭제되었습니다." #, python-format msgid "" "Snapshot %(folder)s@%(snapshot)s has dependent clones, it will be deleted " "later." msgstr "" "스냅샷 %(folder)s@%(snapshot)s에 종속 복제본이 있으므로 나중에 삭제됩니다." #, python-format msgid "Snapshot %s created successfully." msgstr "스냅샷 %s이(가) 성공적으로 작성되었습니다." #, python-format msgid "Snapshot %s does not exist in backend." msgstr "백엔드에 스냅샷 %s이(가) 없습니다." #, python-format msgid "Snapshot %s does not exist, it seems it was already deleted." msgstr "스냅샷 %s이(가) 없습니다. 이미 삭제된 것으로 보입니다." #, python-format msgid "Snapshot %s does not exist, it was already deleted." msgstr "스냅샷 %s이(가) 없습니다. 이미 삭제되었습니다." #, python-format msgid "Snapshot %s has dependent clones, will be deleted later." msgstr "스냅샷 %s에 종속 복제본이 있으므로 나중에 삭제됩니다." #, python-format msgid "Snapshot %s not found" msgstr "스냅샷 %s을(를) 찾을 수 없음" #, python-format msgid "Snapshot %s was deleted successfully." msgstr "스냅샷 %s이(가) 성공적으로 삭제되었습니다." #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "스냅샷 '%(ref)s'의 이름이 '%(new)s'(으)로 변경됩니다." msgid "Snapshot create request issued successfully." msgstr "스냅샷 작성 요청이 성공적으로 실행되었습니다." #, python-format msgid "" "Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." msgstr "" "스냅샷 작성 %(cloneName)s이(가) 완료되었습니다. 소스 볼륨: %(sourceName)s." msgid "Snapshot delete request issued successfully." msgstr "스냅샷 삭제 요청이 성공적으로 실행되었습니다." msgid "Snapshot force create request issued successfully." msgstr "스냅샷 강제 작성 요청이 성공적으로 실행되었습니다." #, python-format msgid "" "Snapshot record for %s is not present, allowing snapshot_delete to proceed." msgstr "%s의 스냅샷 레코드가 없으므로, snapshot_delete를 계속할 수 있습니다." msgid "Snapshot retrieved successfully." msgstr "스냅샷이 성공적으로 검색되었습니다." #, python-format msgid "Snapshot volume %(vol)s into snapshot %(id)s." msgstr "볼륨 %(vol)s의 스냅샷을 스냅샷 %(id)s(으)로 작성합니다." #, python-format msgid "Snapshot volume response: %s." msgstr "스냅샷 볼륨 응답: %s." #, python-format msgid "Snapshot: %(snapshot)s: not found on the array." msgstr "스냅샷 %(snapshot)s이(가) 배열에 없음" #, python-format msgid "Source Snapshot: %s" msgstr "소스 스냅샷: %s" #, python-format msgid "" "Source and destination ZFSSA shares are the same. Do nothing. volume: %s" msgstr "" "소스 및 대상 ZFSSA 공유가 동일합니다. 아무 작업도 수행하지 않습니다. 볼륨: %s" #, python-format msgid "Start to create cgsnapshot for consistency group: %(group_name)s" msgstr "일관성 그룹의 cgsnapshot 작성 시작: %(group_name)s" #, python-format msgid "Start to create consistency group: %(group_name)s id: %(id)s" msgstr "일관성 그룹 작성 시작: %(group_name)s id: %(id)s" #, python-format msgid "Start to delete consistency group: %(cg_name)s" msgstr "일관성 그룹 삭제 시작: %(cg_name)s" #, python-format msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "%(topic)s 노드(버전 %(version_string)s) 시작 중" #, python-format msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "볼륨 드라이버 %(driver_name)s (%(version)s) 시작" #, python-format msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "스토리지 그룹 %(storageGroupName)s이(가) 성공적으로 삭제되었습니다." #, python-format msgid "Storage Group %s was empty." msgstr "스토리지 그룹 %s이(가) 비어 있습니다." #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "스토리지 그룹이 정책과 연관되지 않았습니다. 예외는 %s입니다." #, python-format msgid "" "Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " "%(pool_id)s." msgstr "" "스토리지 풀 이름: %(pools)s, 스토리지 풀 이름: %(pool)s, 풀 id: %(pool_id)s." #, python-format msgid "Successful login by user %s" msgstr "%s 사용자로 로그인 완료" #, python-format msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "%(volumeName)s이(가) %(sgGroupName)s에 성공적으로 추가되었습니다." #, python-format msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "%(server)s 및 vserver %(vs)s의 완료된 ssc 작업이 성공적으로 완료됨" #, python-format msgid "" "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" "%(server)s 및 vserver %(vs)s의 시간이 경과된 새로 고치기 작업이 성공적으로 완" "료됨" #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr " %(src)s의 디스크를 %(dest)s(으)로 복사했습니다." #, python-format msgid "Successfully create volume %s" msgstr "볼륨 %s을(를) 성공적으로 작성" #, python-format msgid "" "Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " "[%(stack_vol)s]." msgstr "" "CloudByte 볼륨 [%(cb_vol)s] w.r.t OpenStack 볼륨 [%(stack_vol)s]이(가) 성공적" "으로 작성되었습니다." #, python-format msgid "Successfully created clone: %s." msgstr "성공적으로 복제본이 작성됨: %s." #, python-format msgid "" "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "볼륨 백업 %(backing)s의 스냅샷 %(snap)s이(가) 작성되었습니다." #, python-format msgid "Successfully created snapshot: %s." msgstr "성공적으로 스냅샷이 작성됨: %s." #, python-format msgid "Successfully created volume backing: %s." msgstr "성공적으로 볼륨 백업이 작성됨: %s" #, python-format msgid "Successfully deleted %s." msgstr "%s이(가) 성공적으로 삭제되었습니다." #, python-format msgid "Successfully deleted file: %s." msgstr "성공적으로 파일 삭제: %s." #, python-format msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "백업 %(backing)s의 스냅샷 %(name)s이(가) 삭제되었습니다." #, python-format msgid "Successfully deleted snapshot: %s" msgstr "성공적으로 스냅샷 삭제: %s" #, python-format msgid "Successfully deleted snapshot: %s." msgstr "성공적으로 스냅샷 삭제: %s." #, python-format msgid "" "Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]." msgstr "" "OpenStack volume [%(stack_vol)s]에 해당하는 CloudByte의 볼륨 [%(cb_vol)s]이" "(가) 성공적으로 삭제되었습니다." #, python-format msgid "Successfully deleted volume: %s" msgstr "성공적으로 볼륨 삭제: %s" #, python-format msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." msgstr "가상 디스크 %(path)s을(를) %(size)sGB(으)로 확장했습니다." #, python-format msgid "Successfully extended volume %(volume_id)s to size %(size)s." msgstr "볼륨 %(volume_id)s을(를) 크기 %(size)s(으)로 확장하는 데 성공했습니다." #, python-format msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." msgstr "볼륨 %(vol)s을(를) %(size)sGB 크기로 확장했습니다." #, python-format msgid "Successfully got volume information for volume %s" msgstr "볼륨 %s의 볼륨 정보를 성공적으로 가져옴" #, python-format msgid "Successfully initialized connection with volume: %(volume_id)s." msgstr "볼륨 %(volume_id)s과(와) 연결이 성공적으로 시작되었습니다." #, python-format msgid "" "Successfully initialized connection. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." msgstr "" "연결이 성공적으로 시작되었습니다. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." #, python-format msgid "" "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "볼륨 백업 %(backing)s을(를) 폴더 %(fol)s(으)로 이동했습니다." #, python-format msgid "" "Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " "resource pool: %(rp)s." msgstr "" "볼륨 백업 %(backing)s을(를) 데이터 저장소 %(ds)s 및 자원 풀 %(rp)s(으)로 재배" "치했습니다." msgid "Successfully retrieved InitiatorGrpList" msgstr "성공적으로 InitiatorGrpList 검색" #, python-format msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "%(ip)s 서버의 드라이버 %(driver)s을(를) 성공적으로 설정했습니다." #, python-format msgid "Successfully setup replication for %s." msgstr "%s의 설정이 성공적으로 복제되었습니다." #, python-format msgid "Successfully terminated connection for volume: %(volume_id)s." msgstr "볼륨 %(volume_id)s의 연결이 성공적으로 종료되었습니다." #, python-format msgid "" "Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " "%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " "%(storage_protocol)s." msgstr "" "볼륨 통계가 성공적으로 업데이트되었습니다. 백엔드: %(volume_backend_name)s, " "공급자: %(vendor_name)s, 드라이버 버전: %(driver_version)s, 스토리지 프로토" "콜: %(storage_protocol)s." #, python-format msgid "" "Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "OpenStack 볼륨 [%(ops_vol)s]에 해당하는 CloudByte 볼륨 [%(cb_vol)s]이(가) 성" "공적으로 업데이트되었습니다." #, python-format msgid "Switching volume %(vol)s to profile %(prof)s." msgstr "볼륨 %(vol)s을(를) 프로파일 %(prof)s에 전환." #, python-format msgid "System %(id)s has %(status)s status." msgstr "시스템 %(id)s의 상태가 %(status)s입니다." #, python-format msgid "" "System with controller addresses [%s] is not registered with web service." msgstr "제어기 주소가 [%s]인 시스템이 웹 서비스에 등록되지 않았습니다." #, python-format msgid "Target is %(map)s! Targetlist = %(tgtl)s." msgstr "대상은 %(map)s입니다. Targetlist = %(tgtl)s." #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "마스킹 보기 %(maskingView)s의 wwns: %(targetWwns)s." #, python-format msgid "Terminate connection: %(volume)s." msgstr "연결 종료: %(volume)s." msgid "Terminate volume connection completed successfully." msgstr "볼륨 연결 종료가 성공적으로 완료되었습니다." msgid "Thawed backend successfully." msgstr "백엔드의 동결이 성공적으로 해제되었습니다." msgid "" "The NAS file operations will be run as non privileged user in secure mode. " "Please ensure your libvirtd settings have been configured accordingly (see " "section 'OpenStack' in the Quobyte Manual." msgstr "" "보안 모드에서 권한이 없는 사용자로 NAS 파일 조작이 실행됩니다. libvirtd 설정" "이 적절하게 구성되었는지 확인하십시오(Quobyte 매뉴얼에서 'OpenStack' 섹션 참" "조." #, python-format msgid "The QoS sepcs is: %s." msgstr "QoS 사양: %s." #, python-format msgid "" "The image was successfully converted, but image size is unavailable. src " "%(src)s, dest %(dest)s. %(error)s" msgstr "" "이미지를 성공적으로 전환했지만 이미지 크기를 사용할 수 없습니다. src " "%(src)s, dest %(dest)s. %(error)s" #, python-format msgid "" "The multi-attach E-Series host group '%(label)s' already exists with " "clusterRef %(clusterRef)s" msgstr "" "clusterRef가 %(clusterRef)s인 다중 연결 E-Series 호스트 그룹 '%(label)s'이" "(가) 이미 있음" #, python-format msgid "The pool_name from extraSpecs is %(pool)s." msgstr "extraSpecs의 pool_name은 %(pool)s입니다." #, python-format msgid "The same hostid is: %s." msgstr "동일한 hostid: %s." #, python-format msgid "The storage group found is %(foundStorageGroupInstanceName)s." msgstr "발견한 스토리지 그룹이 %(foundStorageGroupInstanceName)s입니다." #, python-format msgid "The target instance device id is: %(deviceid)s." msgstr "대상 인스턴스 장치 id: %(deviceid)s." #, python-format msgid "" "The volume belongs to more than one storage group. Returning storage group " "%(sgName)s." msgstr "" "볼륨이 두 개 이상의 스토리지 그룹에 속합니다. 스토리지 그룹 %(sgName)s을(를) " "리턴합니다." #, python-format msgid "" "There is no backing for the snapshotted volume: %(snap)s. Not creating any " "backing for the volume: %(vol)s." msgstr "" "스냅샷이 작성된 볼륨 %(snap)s의 백업이 없습니다. 볼륨 %(vol)s의 백업을 작성하" "지 않습니다." #, python-format msgid "" "There is no backing for the source volume: %(src)s. Not creating any backing " "for volume: %(vol)s." msgstr "" "소스 볼륨 %(src)s의 백업이 없습니다. 볼륨 %(vol)s의 백업을 작성하지 않습니다." #, python-format msgid "There is no backing for the volume: %s. Need to create one." msgstr "볼륨 %s의 백업이 없습니다. 하나를 작성해야 합니다." #, python-format msgid "There is no backing for volume: %s; no need to extend the virtual disk." msgstr "볼륨 %s의 백업이 없으므로 가상 디스크를 확장하지 않아도 됩니다." #, python-format msgid "There is no backing, and so there is no snapshot: %s." msgstr "백업이 없으므로 스냅샷이 없음: %s." #, python-format msgid "There is no backing, so will not create snapshot: %s." msgstr "백업이 없으므로 스냅샷이 작성되지 않음: %s." #, python-format msgid "" "There is no snapshot point for the snapshotted volume: %(snap)s. Not " "creating any backing for the volume: %(vol)s." msgstr "" "스냅샷이 작성된 볼륨 %(snap)s의 스냅샷 지점이 없습니다. 볼륨 %(vol)s의 백업" "을 작성하지 않습니다." #, python-format msgid "Toggle san_ip from %(current)s to %(new)s." msgstr "san_ip를 %(current)s에서 %(new)s(으)로 전환합니다." msgid "Token is invalid, going to re-login and get a new one." msgstr "토큰이 올바르지 않으므로, 다시 로그인하여 새 토큰을 얻으십시오." msgid "Transfer volume completed successfully." msgstr "볼륨 전송이 성공적으로 완료되었습니다." #, python-format msgid "Tried to delete non-existent vdisk %s." msgstr "존재하지 않는 vdisk %s을(를) 삭제하려고 했습니다." #, python-format msgid "" "Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " "with delete." msgstr "" "스냅샷 %s을(를) 삭제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. " "삭제를 계속합니다." #, python-format msgid "" "Tried to delete volume %s, but it was not found in the Datera cluster. " "Continuing with delete." msgstr "" "볼륨 %s을(를) 삭제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. 삭" "제를 계속합니다." #, python-format msgid "" "Tried to detach volume %s, but it was not found in the Datera cluster. " "Continuing with detach." msgstr "" "볼륨 %s의 연결을 해제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. " "연결 해제를 계속합니다." #, python-format msgid "Trying to unmap volume from all sdcs before deletion: %s." msgstr "삭제 전에 모든 sdcs에서 볼륨의 맵핑 해제 시도: %s." msgid "Unable to accept transfer for volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 전송을 승인할 수 없습니다." msgid "Unable to attach volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 연결할 수 없습니다." msgid "Unable to create the snapshot for volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 스냅샷을 작성할 수 없습니다." msgid "Unable to delete the volume metadata, because it is in maintenance." msgstr "볼륨 메타데이터가 유지보수 중이므로 삭제할 수 없습니다." msgid "Unable to detach volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 연결을 해제할 수 없습니다." msgid "Unable to get Cinder internal context, will not use image-volume cache." msgstr "" "Cinder 내부 컨텍스트를 가져올 수 없음, 이미지-볼륨 캐시를 사용하지 않습니다." #, python-format msgid "Unable to get remote copy information for volume %s" msgstr "볼륨 %s의 원격 복사 정보를 가져올 수 없음" msgid "" "Unable to initialize the connection for volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 볼륨의 연결을 초기화할 수 없습니다." msgid "Unable to parse XML input." msgstr "XML 입력을 구문 분석할 수 없습니다." #, python-format msgid "Unable to serialize field '%s' - excluding from backup" msgstr "필드 '%s'을(를) 직렬화할 수 없음 - 백업에서 제외" #, python-format msgid "Unable to unprotect snapshot %s." msgstr "스냅샷 %s의 보호를 해제할 수 없습니다." msgid "Unable to update the metadata for volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 메타데이터를 업데이트할 수 없습니다." msgid "Unable to update volume, because it is in maintenance." msgstr "볼륨이 유지보수 중이므로 업데이트할 수 없습니다." #, python-format msgid "Unexporting lun %s." msgstr "lun %s 내보내기 취소." #, python-format msgid "Unmanage snapshot with id: %s" msgstr "ID가 %s인 스냅샷 관리 취소" #, python-format msgid "Unmanage volume %(volume_id)s completed." msgstr "볼륨 %(volume_id)s 관리 취소가 완료되었습니다." #, python-format msgid "Unmanage volume %s" msgstr "볼륨 %s 관리 취소" #, python-format msgid "Unmanage volume with id: %s" msgstr "ID가 %s인 볼륨 관리 취소" #, python-format msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." msgstr "현재 경로가 %(path)s이고 uuid가 %(uuid)s인 관리 취소된 LUN." #, python-format msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." msgstr "현재 레이블 %(label)s 및 wwn %(wwn)s의 볼륨 관리가 취소되었습니다." #, python-format msgid "Unmap volume: %(volume)s." msgstr "볼륨 맵핑 해제: %(volume)s." msgid "Unreserve volume completed successfully." msgstr "볼륨 예약 취소가 성공적으로 완료되었습니다." #, python-format msgid "" "Update Consistency Group: %(group)s. This adds and/or removes volumes from a " "CG." msgstr "" "일관성 그룹 %(group)s을(를) 업데이트하십시오. 그러면 CG에서 볼륨을 추가/또" "는 제거합니다." msgid "Update consistency group completed successfully." msgstr "일관성 그룹 업데이트가 성공적으로 완료되었습니다." #, python-format msgid "Update migrated volume %(new_volume)s completed." msgstr "마이그레이션된 볼륨 %(new_volume)s의 업데이트가 완료되었습니다." msgid "Update readonly setting on volume completed successfully." msgstr "볼륨에서 읽기 전용 설정을 업데이트하는 작업이 공적으로 완료되었습니다." msgid "Update snapshot metadata completed successfully." msgstr "스냅샷 메타데이터 업데이트가 성공적으로 완료되었습니다." msgid "Update volume admin metadata completed successfully." msgstr "볼륨 관리 메타데이터 업데이트가 성공적으로 완료되었습니다." msgid "Update volume metadata completed successfully." msgstr "볼륨 메타데이터 업데이트가 성공적으로 완료되었습니다." #, python-format msgid "Updated Consistency Group %s" msgstr "일관성 그룹 %s이(가) 업데이트됨" #, python-format msgid "" "Updating consistency group %(id)s with name %(name)s description: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." msgstr "" "일관성 그룹 %(id)s을(를) 이름 %(name)s으로 업데이트. 설명: %(description)s " "add_volumes: %(add_volumes)s remove_volumes: %(remove_volumes)s." #, python-format msgid "Updating snapshot %(id)s with info %(dict)s" msgstr "스냅샷 %(id)s을(를) 정보 %(dict)s(으)로 업데이트" #, python-format msgid "Updating status for CG: %(id)s." msgstr "CG의 상태 업데이트: %(id)s." #, python-format msgid "Updating storage service catalog information for backend '%s'" msgstr "백엔드 '%s'의 스토리지 서비스 카탈로그 정보 업데이트" msgid "Use ALUA when adding initiator to host." msgstr "개시자를 호스트에 추가할 때 ALUA를 사용하십시오." msgid "Use CHAP when adding initiator to host." msgstr "개시자를 호스트에 추가할 때 CHAP을 사용하십시오." #, python-format msgid "" "Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." msgstr "" "FC Zone 관리자 사용 %(zm_version)s, 드라이버 %(drv_name)s %(drv_version)s." #, python-format msgid "Using FC lookup service %s." msgstr "FC 검색 서비스 %s 사용." #, python-format msgid "Using compute cluster(s): %s." msgstr "컴퓨트 클러스터 사용: %s." #, python-format msgid "Using existing initiator group name: %(igGroupName)s." msgstr "기존 개시자 그룹 이름 사용: %(igGroupName)s." msgid "" "Using extra_specs for defining QoS specs will be deprecated in the N release " "of OpenStack. Please use QoS specs." msgstr "" "QoS 사양을 정의하는 데 extra_specs을 사용하는 기능은 OpenStack의 N 릴리스에" "서 더 이상 사용되지 않습니다. QoS 사양을 사용하십시오." #, python-format msgid "Using overridden vmware_host_version from config: %s" msgstr "구성에서 겹쳐쓴 vmware_host_version 사용: %s" #, python-format msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "%(cpg)s 대신 풀 %(pool)s 사용" #, python-format msgid "Using security file in %s for authentication" msgstr "인증을 위해 %s에서 보안 파일 사용" #, python-format msgid "Using service label: %s" msgstr "서비스 레이블 사용: %s" #, python-format msgid "Using target label: %s." msgstr "대상 레이블 사용: %s." msgid "VF context is changed in the session." msgstr "VF 컨텍스트가 세션에서 변경되었습니다." #, python-format msgid "Value with type=%s is not serializable" msgstr "type=%s인 값은 직렬화되지 않음" #, python-format msgid "Virtual volume %(disp)s '%(new)s' is being retyped." msgstr "가상 볼륨 %(disp)s '%(new)s'을(를) 다시 입력 중입니다." #, python-format msgid "Virtual volume %(disp)s '%(new)s' is now being managed." msgstr "가상 볼륨 %(disp)s '%(new)s'을(를) 지금 관리 중입니다." #, python-format msgid "" "Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " "%(cpg)s" msgstr "" "가상 볼륨 %(disp)s '%(new)s' snapCPG가 비어 있으므로, %(cpg)s(으)로 설정됩니" "다." #, python-format msgid "" "Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " "'%(new)s'." msgstr "" "가상 볼륨 %(disp)s '%(vol)s'이(가) 더 이상 관리되지 않습니다. 볼륨의 이름이 " "'%(new)s'(으)로 변경됩니다." #, python-format msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." msgstr "" "가상 볼륨 %(disp)s이(가) 성공적으로 %(new_type)s(으)로 다시 입력되었습니다." #, python-format msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." msgstr "가상 볼륨 '%(ref)s'의 이름이 '%(new)s'(으)로 변경됩니다." #, python-format msgid "Vol copy job completed for dest %s." msgstr "대상 %s에 대한 볼륨 복사 작업이 완료되었습니다." #, python-format msgid "Volume %(volume)s does not have meta device members." msgstr "볼륨 %(volume)s에 메타 장치 멤버가 없습니다." #, python-format msgid "" "Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." msgstr "" "볼륨 %(volume)s이(가) 이미 맵핑되었습니다. 장치 번호는 %(deviceNumber)s입니" "다." #, python-format msgid "Volume %(volumeName)s not in any storage group." msgstr "스토리지 그룹에 볼륨 %(volumeName)s이(가) 없습니다." #, python-format msgid "" "Volume %(volume_id)s: being created as %(create_type)s with specification: " "%(volume_spec)s" msgstr "" "볼륨 %(volume_id)s: 사양이 %(volume_spec)s인 %(create_type)s(으)로 작성됨" #, python-format msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "볼륨 %(volume_name)s (%(volume_id)s): 성공적으로 작성됨" #, python-format msgid "Volume %s converted." msgstr "볼륨 %s이(가) 전환되었습니다." #, python-format msgid "Volume %s created" msgstr "볼륨 %s이(가) 작성됨" #, python-format msgid "Volume %s does not exist, it seems it was already deleted." msgstr "볼륨 %s이(가) 없습니다. 이미 삭제된 것으로 보입니다." #, python-format msgid "Volume %s has been transferred." msgstr "볼륨 %s이(가) 전송되었습니다." #, python-format msgid "Volume %s is mapping to multiple hosts." msgstr "볼륨 %s이(가) 여러 호스트에 맵핑됩니다." #, python-format msgid "Volume %s is not mapped. No volume to unmap." msgstr "%s 볼륨이 맵핑되지 않았습니다. 맵핑 해제할 볼륨이 없습니다. " #, python-format msgid "Volume %s presented." msgstr "볼륨 %s이(가) 제공되었습니다." #, python-format msgid "Volume %s retyped." msgstr "볼륨 %s이(가) 다시 입력되었습니다." #, python-format msgid "Volume %s unmanaged." msgstr "볼륨 %s이(가) 관리 취소되었습니다." #, python-format msgid "Volume %s will be deleted later." msgstr "볼륨 %s이(가) 나중에 삭제됩니다." #, python-format msgid "Volume %s: retyped successfully" msgstr "볼륨 %s이(가) 성공적으로 다시 입력됨" #, python-format msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" msgstr "볼륨이 이미 맵핑됨, %(ig)s, %(vol)s 검색" #, python-format msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" msgstr "%(mbps).2f MB/s에서 볼륨 복사 %(size_in_m).2f MB" #, python-format msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." msgstr " %(mbps).2f MB/s)에서 볼륨 복사 완료 (%(size_in_m).2f MB." msgid "Volume created successfully." msgstr "볼륨이 성공적으로 작성되었습니다." msgid "Volume detach called, but volume not attached." msgstr "볼륨 연결 해제가 호출되었지만, 볼륨이 연결되어 있지 않습니다." msgid "Volume info retrieved successfully." msgstr "볼륨 정보가 성공적으로 검색되었습니다." #, python-format msgid "Volume mappings for %(name)s: %(mappings)s" msgstr "%(name)s의 볼륨 맵핑: %(mappings)s" #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s" msgstr "%(tmp)s에서 %(orig)s(으)로 볼륨 이름 변경" #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s." msgstr "%(tmp)s에서 %(orig)s(으)로 볼륨 이름이 변경되었습니다." msgid "Volume retrieved successfully." msgstr "볼륨이 성공적으로 검색되었습니다." #, python-format msgid "Volume service: %(label)s. Casted to: %(loc)s" msgstr "볼륨 서비스: %(label)s. 캐스트: %(loc)s" #, python-format msgid "Volume status is: %s." msgstr "볼륨 통계: %s." #, python-format msgid "Volume type is %s." msgstr "볼륨 유형이 %s입니다." #, python-format msgid "" "Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " "id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " "name: %(domain_name)s." msgstr "" "볼륨 유형: %(volume_type)s, 스토리지 풀 이름: %(pool_name)s, 스토리지 풀 id: " "%(pool_id)s, 보호 도메인 id: %(domain_id)s, 보호 도메인 이름: " "%(domain_name)s." msgid "Volume updated successfully." msgstr "볼륨이 성공적으로 업데이트되었습니다." #, python-format msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" "지정된 ref %s이(가) 있는 볼륨은 관리 조작 중에 이름을 바꾸지 않아도 됩니다." #, python-format msgid "Volume with the name %s wasn't found, can't unmanage" msgstr "이름이 %s인 볼륨을 찾을 수 없음. 관리 취소할 수 없음" #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " "size: %(backup_size)d, continuing with restore." msgstr "" "볼륨: %(vol_id)s, 크기: %(vol_size)d이(가) 백업: %(backup_id)s, 크기: " "%(backup_size)d보다 큽니다. 복원을 계속합니다." #, python-format msgid "WWPN on node %(node)s: %(wwpn)s." msgstr "노드 %(node)s의 WWPN: %(wwpn)s." #, python-format msgid "" "Waiting for volume expansion of %(vol)s to complete, current remaining " "actions are %(action)s. ETA: %(eta)s mins." msgstr "" "%(vol)s의 볼륨 확장이 완료될 때까지 대기 중, 현재 나머지 작업은 %(action)s입" "니다. ETA: %(eta)s분." msgid "Waiting for web service array communication." msgstr "웹 서비스 배열 통신에 대기합니다." msgid "Waiting for web service to validate the configured password." msgstr "웹 서비스에서 구성된 비밀번호를 검증하는 동안 대기합니다." #, python-format msgid "Will clone a volume from the image volume %(id)s." msgstr "이미지 볼륨 %(id)s에서 볼륨을 복제합니다." #, python-format msgid "XtremIO SW version %s" msgstr "XtremIO SW 버전 %s" #, python-format msgid "ZFSSA version: %s" msgstr "ZFSSA 버전: %s" #, python-format msgid "Zone exists in I-T mode. Skipping zone creation %s" msgstr "I-T 모드에 구역이 있습니다. %s의 구역 작성을 건너뜁니다." #, python-format msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" msgstr "I-T 모드에 구역이 있습니다. %(zonename)s의 구역 작성을 건너뜁니다." #, python-format msgid "Zone map to add: %(zonemap)s" msgstr "추가할 구역 맵: %(zonemap)s" #, python-format msgid "Zone map to add: %s" msgstr "추가할 구역 맵: %s" msgid "" "Zone name created using prefix because either host name or storage system is " "none." msgstr "" "호스트 이름이나 스토리지 시스템이 없으므로 접두어를 사용하여 구역 이름이 작성" "되었습니다." msgid "Zone name created using prefix because host name is none." msgstr "호스트 이름이 없으므로 접두어를 사용하여 구역 이름이 작성되었습니다." #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "구역 지정 정책: %s, 인식되지 않음" #, python-format msgid "Zoning policy for Fabric %(policy)s" msgstr "패브릭 %(policy)s의 구역 지정 정책" #, python-format msgid "Zoning policy for Fabric %s" msgstr "패브릭 %s의 구역 지정 정책" #, python-format msgid "Zoning policy for fabric %(policy)s" msgstr "패브릭 %(policy)s의 구역 지정 정책" #, python-format msgid "Zoning policy for fabric %s" msgstr "패브릭 %s의 구역 지정 정책" msgid "Zoning policy is not valid, no zoning will be performed." msgstr "구역 지정 정책이 올바르지 않음, 구역 지정이 수행되지 않습니다." #, python-format msgid "" "_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" "_check_volume_copy_ops:: 볼륨 %(vol)s에 지정된 vdisk 복사 조작이 없음: orig=" "%(orig)s new=%(new)s." msgid "_delete_copysession, The copysession was already completed." msgstr "_delete_copysession, copysession이 이미 완료되었습니다." #, python-format msgid "" "_delete_volume_setting, volumename:%(volumename)s, volume not found on " "ETERNUS. " msgstr "" "_delete_volume_setting, volumename:%(volumename)s, ETERNUS에 볼륨이 없습니" "다. " #, python-format msgid "_get_service_target hdp: %s." msgstr "_get_service_target hdp: %s." #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "_get_tgt_ip_from_portgroup: ip 가져오기: %s." #, python-format msgid "_get_tgt_iqn: iSCSI target iqn is: %s." msgstr "_get_tgt_iqn: iSCSI 대상 iqn: %s." #, python-format msgid "_unmap_lun, volumename: %(volumename)s, volume is not mapped." msgstr "_unmap_lun, volumename: %(volumename)s, 볼륨이 맵핑되지 않았습니다." #, python-format msgid "_unmap_lun, volumename:%(volumename)s, volume not found." msgstr "_unmap_lun, volumename:%(volumename)s, 볼륨을 찾을 수 없습니다." #, python-format msgid "" "add_host_with_check. create host success. host name: %(name)s, host id: " "%(id)s" msgstr "" "add_host_with_check.호스트 작성 성공. 호스트 이름: %(name)s, 호스트 id: " "%(id)s" #, python-format msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" msgstr "add_host_with_check. 호스트 이름: %(name)s, 호스트 id: %(id)s" #, python-format msgid "casted to %s" msgstr "%s(으)로 캐스트" #, python-format msgid "cgsnapshot %s: created successfully" msgstr "cgsnapshot %s: 성공적으로 작성됨" #, python-format msgid "cgsnapshot %s: deleted successfully" msgstr "cgsnapshot %s: 성공적으로 삭제됨" #, python-format msgid "cgsnapshot %s: deleting" msgstr "cgsnapshot %s: 삭제" #, python-format msgid "config[services]: %s." msgstr "config[services]: %s." #, python-format msgid "create_cloned_volume, info: %s, Exit method." msgstr "create_cloned_volume, 정보: %s, 메소드를 종료합니다." #, python-format msgid "" "create_cloned_volume, target volume id: %(tid)s, source volume id: %(sid)s, " "Enter method." msgstr "" "create_cloned_volume, 대상 볼륨 id: %(tid)s, 소스 볼륨 id: %(sid)s, 메소드를 " "입력합니다." #, python-format msgid "" "create_hostgroup_with_check. Create hostgroup success. hostgroup name: " "%(name)s, hostgroup id: %(id)s" msgstr "" "create_hostgroup_with_check. hostgroup 작성 성공. hostgroup 이름: %(name)s, " "hostgroup id: %(id)s" #, python-format msgid "" "create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" msgstr "" "create_hostgroup_with_check. hostgroup 이름: %(name)s, hostgroup id: %(id)s" #, python-format msgid "create_snapshot, info: %s, Exit method." msgstr "create_snapshot, 정보: %s, 메소드를 종료합니다." #, python-format msgid "create_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" "create_snapshot, 스냅 id: %(sid)s, 볼륨 id: %(vid)s, 메소드를 입력합니다." #, python-format msgid "create_volume, info: %s, Exit method." msgstr "create_volume, 정보: %s, 메소드를 종료합니다." #, python-format msgid "create_volume, volume id: %s, Enter method." msgstr "create_volume, 볼륨 id: %s, 메소드를 입력합니다." #, python-format msgid "create_volume: create_lu returns %s" msgstr "create_volume: create_lu에서%s을(를) 리턴" #, python-format msgid "create_volume_from_snapshot, info: %s, Exit method." msgstr "create_volume_from_snapshot, 정보: %s, 메소드를 종료합니다." #, python-format msgid "" "create_volume_from_snapshot, volume id: %(vid)s, snap id: %(sid)s, Enter " "method." msgstr "" "create_volume_from_snapshot, 볼륨 id: %(vid)s, 스냅 id: %(sid)s, 메소드를 입" "력합니다." #, python-format msgid "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." msgstr "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." #, python-format msgid "del_iscsi_conn: hlun not found %s." msgstr "del_iscsi_conn: hlun을 찾을 수 없음 %s." #, python-format msgid "delete lun loc %s" msgstr "lun loc %s 삭제" #, python-format msgid "delete_snapshot, delete: %s, Exit method." msgstr "delete_snapshot, 삭제: %s, 메소드를 종료합니다." #, python-format msgid "delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" "delete_snapshot, 스냅 id: %(sid)s, 볼륨 id: %(vid)s, 메소드를 입력합니다." #, python-format msgid "delete_volume, delete: %s, Exit method." msgstr "delete_volume, 삭제: %s, 메소드를 종료합니다." #, python-format msgid "delete_volume, volume id: %s, Enter method." msgstr "delete_volume, 볼륨 id: %s, 메소드를 입력합니다." #, python-format msgid "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." msgstr "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." #, python-format msgid "do_setup: %s" msgstr "do_setup: %s" #, python-format msgid "extend_volume, used pool name: %s, Exit method." msgstr "extend_volume, 사용된 풀 이름: %s, 메소드를 종료합니다." #, python-format msgid "extend_volume, volume id: %s, Enter method." msgstr "extend_volume, 볼륨 id: %s, 메소드를 입력합니다." #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "풀 %(pool)s의 사용 가능한 용량: %(free)s, 총 용량: %(total)s." #, python-format msgid "iSCSI Initiators %(in)s of %(ins)s need registration." msgstr "%(ins)s의 iSCSI 개시자 %(in)s을(를) 등록해야 합니다." #, python-format msgid "iSCSI portal found for service: %s" msgstr "서비스의 iSCSI 포털 발견: %s" #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "개시자 %(iname)s의 igroup %(grp)s을(를) 찾을 수 없음" #, python-format msgid "initialize volume %(vol)s connector %(conn)s" msgstr "볼륨 %(vol)s 커넥터 %(conn)s 초기화" #, python-format msgid "initialize_ connection: %(vol)s:%(initiator)s" msgstr "initialize_ connection: %(vol)s:%(initiator)s" #, python-format msgid "initialize_connection success. Return data: %s." msgstr "initialize_connection 성공: 데이터 리턴: %s." #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "initialize_connection 볼륨: %(volume)s, 커넥터: %(connector)s" #, python-format msgid "initialize_connection, host lun id is: %s." msgstr "initialize_connection, 호스트 lun id: %s." #, python-format msgid "initialize_connection, info: %s, Exit method." msgstr "initialize_connection, 정보: %s, 메소드를 종료합니다." #, python-format msgid "initialize_connection, initiator: %(wwpns)s, LUN ID: %(lun_id)s." msgstr "initialize_connection, 개시자: %(wwpns)s, LUN ID: %(lun_id)s." #, python-format msgid "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " "portgroup_id: %(portgroup_id)s." msgstr "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " "portgroup_id: %(portgroup_id)s." #, python-format msgid "initialize_connection, metadata is: %s." msgstr "initialize_connection, 메타데이터: %s." #, python-format msgid "" "initialize_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " "method." msgstr "" "initialize_connection, 볼륨 id: %(vid)s, 개시자: %(initiator)s, 메소드를 입력" "합니다." #, python-format msgid "" "initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " "target_luns: %(target_luns)s, Volume is already mapped." msgstr "" "initialize_connection, 볼륨: %(volume)s, target_lun: %(target_lun)s, " "target_luns: %(target_luns)s, 볼륨이 이미 맵핑되었습니다." #, python-format msgid "" "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "initialize_connection_fc, 개시자: %(wwpns)s, 볼륨 이름: %(volume)s." #, python-format msgid "initiate: connection %s" msgstr "시작: 연결 %s" msgid "initiator has no password while using chap,adding it" msgstr "chap을 사용하는 동안 개시자의 비밀번호가 없음, 추가" #, python-format msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." msgstr "개시자 이름: %(initiator_name)s, LUN ID: %(lun_id)s." msgid "" "initiator_auto_registration: False. Initiator auto registration is not " "enabled. Please register initiator manually." msgstr "" "initiator_auto_registration: False. 개시자 자동 등록이 사용되지 않습니다. 개" "시자를 수동으로 등록하십시오." #, python-format msgid "iops limit is: %s." msgstr "iops 한계: %s." #, python-format msgid "iscsi_initiators: %s" msgstr "iscsi_initiators: %s" #, python-format msgid "location is: %(location)s" msgstr "위치: %(location)s" #, python-format msgid "" "manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " "renamed to %(id)s and is now managed by Cinder." msgstr "" "manage_existing_snapshot: 볼륨 %(volume)s의 스냅샷 %(exist)s 이름이 " "%(id)s(으)로 변경되었으며 이제 Cinder에서 관리됩니다." #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " "(temporary volume %(vol2)s" msgstr "" "migrate_volume_completion이 볼륨 %(vol1)s의 오류 정리(임시 볼륨 %(vol2)s" #, python-format msgid "new cloned volume: %s" msgstr "새로 복제된 볼륨: %s" #, python-format msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" msgstr "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "%(ip)s에서 %(ssn)s에 대한 open_connection" #, python-format msgid "open_connection: Updating API version to %s" msgstr "open_connection: API 버전을 %s(으)로 업데이트" #, python-format msgid "replication failover secondary is %(ssn)s" msgstr "복제 장애 복구 보조가 %(ssn)s임" #, python-format msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "볼륨 %s을(를) error_restoring(으)로 설정(복원-백업)." #, python-format msgid "share: %(share)s -> %(info)s" msgstr "공유: %(share)s -> %(info)s" #, python-format msgid "share: %s incorrect entry" msgstr "공유: %s 올바르지 않은 항목" #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "smis_do_iscsi_discovery: %(out)s." #, python-format msgid "snapshot %s doesn't exist" msgstr "%s 스냅샷이 없습니다." #, python-format msgid "source volume for cloning: %s" msgstr "복제할 소스 볼륨: %s" #, python-format msgid "stats: stats: %s." msgstr "통계: 통계: %s." #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "stop_snapshot: 스냅샷 이름: %(snapshot)s, 볼륨 이름: %(volume)s." #, python-format msgid "targetlist: %s" msgstr "targetlist: %s" #, python-format msgid "terminate: connection %s" msgstr "종료: 연결 %s" #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "terminate_connection 볼륨: %(volume)s, 커넥터: %(con)s" #, python-format msgid "terminate_connection, return data is: %s." msgstr "terminate_connection, 리턴 데이터: %s." #, python-format msgid "terminate_connection, unmap: %s, Exit method." msgstr "terminate_connection, 맵핑 해제: %s, 메소드를 종료합니다." #, python-format msgid "" "terminate_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " "method." msgstr "" "terminate_connection, 볼륨 id: %(vid)s, 개시자: %(initiator)s, 메소드를 입력" "합니다." #, python-format msgid "terminate_connection: initiator name: %(ini)s, LUN ID: %(lunid)s." msgstr "terminate_connection: 개시자 이름: %(ini)s, LUN ID: %(lunid)s." #, python-format msgid "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." msgstr "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." #, python-format msgid "" "terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " "%(lunid)s." msgstr "" "terminate_connection_fc: 볼륨 이름: %(volume)s, wwpns: %(wwns)s, lun_id: " "%(lunid)s." #, python-format msgid "tunevv failed because the volume '%s' has snapshots." msgstr "볼륨 '%s'에 스냅샷이 있으므로 tunevv에 실패했습니다." #, python-format msgid "username: %(username)s, verify_cert: %(verify)s." msgstr "사용자 이름: %(username)s, verify_cert: %(verify)s." #, python-format msgid "vol=%s" msgstr "vol=%s" #, python-format msgid "vol_name=%(name)s provider_location=%(loc)s" msgstr "vol_name=%(name)s provider_location=%(loc)s" #, python-format msgid "volume %(name)s extended to %(size)d." msgstr "볼륨 %(name)s이(가) %(size)d(으)로 확장되었습니다." #, python-format msgid "volume %s doesn't exist" msgstr "%s 볼륨이 없습니다." #, python-format msgid "volume %s no longer exists in backend" msgstr "백엔드에 더 이상 볼륨 %s이(가) 없음" #, python-format msgid "volume: %(volume)s, lun params: %(params)s." msgstr "볼륨: %(volume)s, lun 매개 변수: %(params)s." msgid "volume_file does not support fileno() so skipping fsync()" msgstr "volume_file에서 fileno()를 지원하지 않으므로 fsync()를 건너뜀" cinder-8.0.0/cinder/locale/ko_KR/LC_MESSAGES/cinder.po0000664000567000056710000131255412701406257023244 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # bluejay , 2013 # NaleeJang , 2013 # Sungjin Kang , 2013 # Yongbok Kim , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Lucas Palm , 2016. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev22\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-25 10:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-25 06:57+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder 버전: %(version)s\n" #, python-format msgid " but size is now %d" msgstr "현재 크기는 %d 입니다. " #, python-format msgid " but size is now %d." msgstr " 그러나 현재 크기는 %d입니다." msgid " or " msgstr "또는" #, python-format msgid "%(attr)s is not set." msgstr "%(attr)s이(가) 설정되지 않았습니다." #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing은 호스트에 연결된 볼륨을 관리할 수 없습니다. 가져" "오기 전에 이 볼륨과 기존 호스트의 연결을 끊으십시오. " #, python-format msgid "%(err)s" msgstr "%(err)s" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "결과: %(res)s." #, python-format msgid "%(error_message)s" msgstr "%(error_message)s" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "%(file)s: Permission denied." msgstr "%(file)s: 권한이 거부됩니다." #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: 예상치 못한 CLI 출력과 함께 실패했습니다. \n" "명령: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "%(message)s" msgstr "%(message)s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "상태 코드: %(_status)s\n" "본문: %(_body)s" #, python-format msgid "%(message)s, subjectAltName: %(sanList)s." msgstr "%(message)s, subjectAltName: %(sanList)s." #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s: NetworkPortal 작성: 다른 서비스가 ip %(ip)s의 포트 %(port)d을" "(를) 사용하고 있지 않은지 확인하십시오. " #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s에 최소 문자 요구사항(%(min_length)s)이 있습니다." #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s의 문자 수가 %(max_length)s문자를 초과합니다." #, python-format msgid "" "%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " "unexpected mode. Image or file backups supported, actual mode is " "%(vol_mode)s." msgstr "" "%(op)s: 백업 %(bck_id)s, 볼륨 %(vol_id)s에 실패했습니다. 백업 오브젝트가 예상" "치 못한 모드에 있습니다. 이미지 또는 파일 백업이 지원되었습니다. 실제 모드는 " "%(vol_mode)s입니다." #, python-format msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" msgstr "" "%(service)s 서비스가 스토리지 어플라이언스 %(host)s에서 %(status)s이(가) 아님" #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s은(는) %(max_value)d보다 작거나 같아야 함" #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s은(는) >= %(min_value)d이어야 함. " #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "%(workers)d의 %(worker_name)s 값이 올바르지 않습니다. 이 값은 0보다 커야 합니" "다." #, python-format msgid "%s" msgstr "%s" #, python-format msgid "%s \"data\" is not in result." msgstr "%s \"data\"가 결과에 없습니다." #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s에 액세스할 수 없습니다. GPFS가 활성이고 파일 시스템이 마운트되었는지 확인" "하십시오." #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "블록이 포함되어 있지 않아서 복제 조작을 사용하여 %s 크기를 조정할 수 없습니" "다." #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "압축된 볼륨에 호스트되었으므로 복제 조작을 사용하여 %s의 크기를 조정할 수 없" "음" #, python-format msgid "%s configuration option is not set." msgstr "%s 구성 옵션이 설정되지 않았습니다. " #, python-format msgid "%s does not exist." msgstr "%s이(가) 없습니다." #, python-format msgid "%s is not a directory." msgstr "%s이(가) 디렉토리가 아닙니다. " #, python-format msgid "%s is not a string or unicode" msgstr "%s이(가) 문자열 또는 Unicode가 아님" #, python-format msgid "%s is not installed" msgstr "%s이(가) 설치되지 않음" #, python-format msgid "%s is not installed." msgstr "%s이(가) 설치되어 있지 않습니다. " #, python-format msgid "%s is not set" msgstr "%s이(가) 설정되지 않았음" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s이(가) 설정되지 않았지만, 복제 장치가 유효하게 되려면 설정해야 합니다." #, python-format msgid "%s is not set." msgstr "%s이(가) 설정되지 않았습니다." #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s은(는) 유효한 원시 또는 qcow2 이미지여야 합니다. " #, python-format msgid "%s must be an absolute path." msgstr "%s은(는) 절대 경로여야 합니다. " #, python-format msgid "%s must be an integer." msgstr "%s은(는) 정수여야 합니다." #, python-format msgid "%s not set in cinder.conf" msgstr "cinder.conf에 %s이(가) 설정되지 않음" #, python-format msgid "%s not set." msgstr "%s이(가) 설정되지 않음. " #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "'%(prot)s'이(가) 구성 파일의 flashsystem_connection_protocol에 대해 올바르지 " "않습니다. 올바른 값은 %(enabled)s입니다." msgid "'active' must be present when writing snap_info." msgstr "스냅샷 정보를 기록할 때 '활성'이 있어야 합니다. " msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id'를 지정해야 함" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' 구문 분석에 실패했습니다. " msgid "'status' must be specified." msgstr "'상태'가 지정되어야 합니다." msgid "'volume_id' must be specified" msgstr "'volume_id'를 지정해야 함" msgid "'{}' object has no attribute '{}'" msgstr "'{}' 오브젝트에 '{}' 속성이 없음" #, python-format msgid "" "(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " "(Stderr: %(stderr)s)" msgstr "" "(명령: %(cmd)s) (리턴 코드: %(exit_code)s) (Stdout: %(stdout)s) (Stderr: " "%(stderr)s)" #, python-format msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" msgstr "A LUN(HLUN)을 찾을 수 없습니다.(LDEV: %(ldev)s)" msgid "A concurrent, possibly contradictory, request has been made." msgstr "동시에 모순된 요청이 수행되었습니다." #, python-format msgid "" "A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" msgstr "" "사용 가능한 LUN(HLUN)을 찾을 수 없습니다. 다른 호스트 그룹을 추가하십시오." "(LDEV: %(ldev)s)" #, python-format msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" msgstr "호스트 그룹을 추가할 수 없습니다.(포트: %(port)s, 이름e: %(name)s)" #, python-format msgid "" "A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " "%(name)s)" msgstr "" "호스트 그룹을 삭제할 수 없습니다.(포트: %(port)s, gid: %(gid)s, 이름: " "%(name)s)" #, python-format msgid "A host group is invalid. (host group: %(gid)s)" msgstr "호스트 그룹이 올바르지 않습니다.(호스트 그룹: %(gid)s)" #, python-format msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" msgstr "쌍을 삭제할 수 없습니다.(P-VOL: %(pvol)s, S-VOL: %(svol)s)" #, python-format msgid "" "A pair could not be created. The maximum number of pair is exceeded. (copy " "method: %(copy_method)s, P-VOL: %(pvol)s)" msgstr "" "쌍을 작성할 수 없습니다. 쌍의 최대 수가 초과되었습니다.(복사 메소드: " "%(copy_method)s, P-VOL: %(pvol)s)" #, python-format msgid "A parameter is invalid. (%(param)s)" msgstr "매개변수가 올바르지 않습니다.(%(param)s)" #, python-format msgid "A parameter value is invalid. (%(meta)s)" msgstr "매개변수값이 올바르지 않습니다.(%(meta)s)" #, python-format msgid "A pool could not be found. (pool id: %(pool_id)s)" msgstr "풀을 찾을 수 없습니다.(pool id: %(pool_id)s)" #, python-format msgid "A snapshot status is invalid. (status: %(status)s)" msgstr "스냅샷 상태가 올바르지 않습니다.(상태: %(status)s)" msgid "A valid secondary target MUST be specified in order to failover." msgstr "장애 복구하려면 올바른 보조 대상을 지정해야 합니다." msgid "A volume ID or share was not specified." msgstr "볼륨 ID 또는 공유가 지정되지 않았습니다. " #, python-format msgid "A volume status is invalid. (status: %(status)s)" msgstr "볼륨 상태가 올바르지 않습니다.(상태: %(status)s)" #, python-format msgid "API %(name)s failed with error string %(err)s" msgstr "API %(name)s이(가) 실패했으며 오류 문자열은 %(err)s임" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API 버전 문자열 %(version)s 형식이 올바르지 않습니다. 형식은 MajorNum." "MinorNum 이어야 합니다." msgid "API key is missing for CloudByte driver." msgstr "CloudByte 드라이버의 API 키가 누락되었습니다." #, python-format msgid "API response: %(response)s" msgstr "API 응답: %(response)s" #, python-format msgid "API response: %s" msgstr "API 응답: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API 버전 %(version)s에서는 이 메소드를 지원하지 않습니다.." msgid "API version could not be determined." msgstr "API 버전을 판별할 수 없습니다." msgid "" "About to delete child projects having non-zero quota. This should not be " "performed" msgstr "" "할당량이 0(영)이 아닌 하위 프로젝트를 삭제하려고 합니다. 이는 수행해서는 안 " "됩니다. " msgid "Access list not available for public volume types." msgstr "액세스 목록을 공용 볼륨 유형에 사용할 수 없습니다. " msgid "Activate or deactivate QoS error." msgstr "QoS 활성화 또는 비활성화 오류입니다. " msgid "Activate snapshot error." msgstr "스냅샷 활성화 오류입니다. " msgid "Add FC port to host error." msgstr "호스트에 FC 포트 추가 오류입니다. " msgid "Add fc initiator to array error." msgstr "배열에 fc 개시자 추가 오류입니다. " msgid "Add initiator to array error." msgstr "배열에 개시자 추가 오류입니다. " msgid "Add lun to cache error." msgstr "캐시에 lun 추가 오류입니다. " msgid "Add lun to partition error." msgstr "파티션에 lun 추가 오류입니다. " msgid "Add mapping view error." msgstr "맵핑 보기 추가 오류입니다. " msgid "Add new host error." msgstr "새 호스트 추가 오류입니다. " msgid "Add port to port group error." msgstr "포트 그룹에 포트 추가 오류." #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "관리될 모든 지정된 스토리지 풀이 존재하지 않습니다. 구성을 확인하십시오. 존재" "하지 않는 풀: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "API 버전 요청은 VersionedMethod 오브젝트와 비교해야 합니다." #, python-format msgid "An error has occured in SheepdogDriver. (Reason: %(reason)s)" msgstr "SheepdogDriver에서 오류가 발생했습니다(이유: %(reason)s). " msgid "An error has occurred during backup operation" msgstr "백업 조작 중에 오류가 발생함 " #, python-format msgid "An error occured while attempting to modifySnapshot '%s'." msgstr "스냅샷 '%s'을(를) 수정하려는 중에 오류가 발생했습니다." #, python-format msgid "An error occured while seeking for volume \"%s\"." msgstr "볼륨 \"%s\"을(를) 검색하는 중에 오류가 발생했습니다. " #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "LUNcopy 조작 중 오류가 발생했습니다. LUNcopy 이름: %(luncopyname)s. LUNcopy " "상태: %(luncopystatus)s. LUNcopy 상태: %(luncopystate)s." #, python-format msgid "An error occurred while reading volume \"%s\"." msgstr "볼륨 \"%s\"을(를) 읽는 중에 오류가 발생했습니다. " #, python-format msgid "An error occurred while writing to volume \"%s\"." msgstr "볼륨 \"%s\"에 쓰는 중에 오류가 발생했습니다. " #, python-format msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" msgstr "iSCSI CHAP 사용자를 추가할 수 없습니다.(사용자 이름: %(user)s)" #, python-format msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" msgstr "iSCSI CHAP 사용자를 삭제할 수 없습니다.(사용자 이름: %(user)s)" #, python-format msgid "" "An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " "reason: %(reason)s)" msgstr "" "iSCSI 대상을 추가할 수 없습니다.(포트: %(port)s, 별명: %(alias)s, 이유: " "%(reason)s)" #, python-format msgid "" "An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " "%(alias)s)" msgstr "" "iSCSI 대상을 삭제할 수 없습니다.(포트: %(port)s, tno: %(tno)s, 별명: " "%(alias)s)" msgid "An unknown exception occurred." msgstr "알 수 없는 예외가 발생했습니다. " msgid "" "An user with a token scoped to a subproject is not allowed to see the quota " "of its parents." msgstr "" "하위 프로젝트로 범위 지정된 토큰을 가진 사용자는 상위의 할당량을 볼 수 없습니" "다. " msgid "Append port group description error." msgstr "포트 그룹 설명 추가 오류." #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "구역과 cfgs를 스위치에 적용하는 데 실패했습니다(오류 코드=%(err_code)s 오류 " "메시지=%(err_msg)s." #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "배열이 존재하지 않거나 오프라인 상태입니다. 배열의 현재 상태는 %s입니다. " msgid "Associate host to hostgroup error." msgstr "호스트 그룹에 호스트 연관 오류입니다. " msgid "Associate host to mapping view error." msgstr "맵핑 보기에 호스트 연관 오류입니다. " msgid "Associate initiator to host error." msgstr "호스트에 개시자 연관 오류입니다. " msgid "Associate lun to QoS error." msgstr "lun을 QoS에 연결 오류." msgid "Associate lun to lungroup error." msgstr "Lun 그룹에 lun 연관 오류입니다. " msgid "Associate lungroup to mapping view error." msgstr "맵핑 보기에 lun 그룹 연관 오류입니다. " msgid "Associate portgroup to mapping view error." msgstr "맵핑 보기에 포트 그룹 연관 오류입니다. " msgid "At least one valid iSCSI IP address must be set." msgstr "최소한 하나의 올바른 iSCSI IP 주소를 설정해야 합니다. " #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "올바르지 않은 인증 키로 %s 전송을 시도했습니다. " #, python-format msgid "Auth group [%s] details not found in CloudByte storage." msgstr "CloudByte 스토리지에서 인증 그룹 [%s] 세부사항을 찾을 수 없습니다. " msgid "Auth user details not found in CloudByte storage." msgstr "CloudByte 스토리지에서 인증 사용자 세부사항을 찾을 수 없습니다. " msgid "Authentication error" msgstr "인증 오류" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "인증 실패, 스위치 자격 증명 확인, 오류 코드 %s." msgid "Authorization error" msgstr "권한 부여 오류" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "가용성 구역 '%(s_az)s'이(가) 올바르지 않습니다. " msgid "Available categories:" msgstr "사용 가능한 카테고리:" msgid "" "Back-end QoS specs are not supported on this storage family and ONTAP " "version." msgstr "" "백엔드 QoS 스펙이 이 스토리지 제품군 및 ONTAP 버전에서 지원되지 않습니다. " #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "백엔드가 존재하지 않음(%(backend)s)" msgid "Backend has already been failed over. Unable to fail back." msgstr "백엔드가 이미 장애 복구되었으므로, 장애 복구할 수 없습니다." #, python-format msgid "Backend reports: %(message)s" msgstr "백엔드 보고서: %(message)s" msgid "Backend reports: item already exists" msgstr "백엔드 보고서: 항목이 이미 존재함" msgid "Backend reports: item not found" msgstr "백엔드 보고서: 항목을 찾을 수 없음" msgid "Backend server not NaServer." msgstr "백엔드 서버가 NaServer가 아님." #, python-format msgid "Backend service retry timeout hit: %(timeout)s sec" msgstr "백엔드 서비스 재시도 제한시간 도달: %(timeout)s초" msgid "Backend storage did not configure fiber channel target." msgstr "백엔드 스토리지가 파이버 채널 대상을 구성하지 않았습니다." msgid "Backing up an in-use volume must use the force flag." msgstr "사용 중인 볼륨을 백업하려면 강제 실행 플래그를 사용해야 합니다. " #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "%(backup_id)s 백업을 찾을 수 없습니다. " msgid "Backup RBD operation failed" msgstr "백업 RBD 조작이 실패함 " msgid "Backup already exists in database." msgstr "데이터베이스에 이미 백업이 있습니다. " #, python-format msgid "Backup driver reported an error: %(message)s" msgstr "백업 드라이버 오류 보고서: %(message)s" msgid "Backup id required" msgstr "백업 ID 필요" msgid "Backup is not supported for GlusterFS volumes with snapshots." msgstr "스냅샷이 있는 GlusterFS 볼륨에 대한 백업이 지원되지 않습니다." msgid "Backup is only supported for SOFS volumes without backing file." msgstr "백업 파일 없는 SOFS 볼륨에 대해서만 백업이 지원됩니다." msgid "Backup is only supported for raw-formatted GlusterFS volumes." msgstr "백업은 원시 형식화 GlusterFS 볼륨에 대해서만 지원됩니다." msgid "Backup is only supported for raw-formatted SOFS volumes." msgstr "원시 형식화 SOFS 볼륨에 대해서만 백업이 지원됩니다." msgid "Backup operation of an encrypted volume failed." msgstr "암호화된 볼륨의 백업 조작이 실패했습니다." #, python-format msgid "" "Backup service %(configured_service)s does not support verify. Backup id " "%(id)s is not verified. Skipping verify." msgstr "" "백업 서비스 %(configured_service)s이(가) 확인을 지원하지 않습니다. 백업 ID " "%(id)s이(가) 확인되지 않습니다. 확인을 건너뜁니다." #, python-format msgid "" "Backup service %(service)s does not support verify. Backup id %(id)s is not " "verified. Skipping reset." msgstr "" "백업 서비스 %(service)s이(가) 확인을 지원하지 않습니다. 백업 ID %(id)s이(가) " "확인되지 않습니다. 재설정을 건너뜁니다." #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "백업에 하나의 스냅샷만 있어야 하지만 대신 %s개가 있음" msgid "Backup status must be available" msgstr "백업 상태가 사용 가능해야 함" #, python-format msgid "Backup status must be available and not %s." msgstr "백업 상태는 사용 가능해야 하며 %s이(가) 아니어야 합니다." msgid "Backup status must be available or error" msgstr "백업 상태는 사용 가능 또는 오류여야 함" msgid "Backup to be restored has invalid size" msgstr "복원할 백업이 올바르지 않은 크기임" #, python-format msgid "Bad Status line returned: %(arg)s." msgstr "잘못된 상태 표시줄이 리턴됨: %(arg)s." #, python-format msgid "Bad key(s) in quota set: %s" msgstr "할당량 세트의 잘못된 키: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "스토리지 볼륨 백엔드 API로부터 잘못되었거나 예상치 못한 응답: %(data)s" #, python-format msgid "Bad project format: project is not in proper format (%s)" msgstr "잘못된 프로젝트 형식: 프로젝트 형식이 올바르지 않음(%s)" #, python-format msgid "Bad request sent to Datera cluster:Invalid args: %(args)s | %(message)s" msgstr "" "Datera 클러스터에 잘못된 요청이 전송됨: 올바르지 않은 인수: %(args)s | " "%(message)s" msgid "Bad response from Datera API" msgstr "Datera API의 잘못된 응답" msgid "Bad response from SolidFire API" msgstr "SolidFire API의 잘못된 응답" #, python-format msgid "Bad response from XMS, %s" msgstr "XMS의 잘못된 응답, %s" msgid "Binary" msgstr "2진" msgid "Blank components" msgstr "비어 있는 구성요소" msgid "Blockbridge API authentication scheme (token or password)" msgstr "Blockbridge API 인증 스킴(토큰 또는 비밀번호)" msgid "Blockbridge API password (for auth scheme 'password')" msgstr "Blockbridge API 비밀번호(인증 스킴 '비밀번호'의 경우)" msgid "Blockbridge API token (for auth scheme 'token')" msgstr "Blockbridge API 토큰(인증 스킴 '토큰'의 경우)" msgid "Blockbridge API user (for auth scheme 'password')" msgstr "Blockbridge API 사용자(인증 스킴 '비밀번호'의 경우)" msgid "Blockbridge api host not configured" msgstr "Blockbridge api 호스트가 구성되지 않음" #, python-format msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" msgstr "Blockbridge가 올바르지 않은 인증 스킴 '%(auth_scheme)s'(으)로 구성됨" msgid "Blockbridge default pool does not exist" msgstr "Blockbridge 기본 풀이 존재하지 않음" msgid "" "Blockbridge password not configured (required for auth scheme 'password')" msgstr "Blockbridge 비밀번호가 구성되지 않음(인증 스킴 '비밀번호'의 경우 필수)" msgid "Blockbridge pools not configured" msgstr "Blockbridge 풀이 구성되지 않음" msgid "Blockbridge token not configured (required for auth scheme 'token')" msgstr "Blockbridge 토큰이 구성되지 않음(인증 스킴 '토큰'의 경우 필수)" msgid "Blockbridge user not configured (required for auth scheme 'password')" msgstr "Blockbridge 사용자가 구성되지 않음(인증 스킴 '비밀번호'의 경우 필수)" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Brocade Fibre Channel Zoning CLI 오류: %(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Brocade Fibre Channel Zoning HTTP 오류: %(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 본인확인정보는 12바이트 - 16바이트여야 합니다. " #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 실행 출력: \n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 실행 출력: \n" " command: %(cmd)s\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E VDisk가 이미 호스트에 맵핑되어 있기 때문에 VDisk에서 호스트로의 맵" "핑이 작성되지 않았습니다. \n" "\"" msgid "CONCERTO version is not supported" msgstr "CONCERTO 버전이 지원되지 않음" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "배열에 CPG(%s)가 없음" msgid "Cache name is None, please set smartcache:cachename in key." msgstr "캐시 이름이 None입니다. 키에서 smartcache:cachename을 설정하십시오. " #, python-format msgid "Cache volume %(cache_vol)sdoes not have snapshot %(cache_snap)s." msgstr "캐시 볼륨 %(cache_vol)s에 스냅샷 %(cache_snap)s이(가) 없습니다. " #, python-format msgid "Cache volume %s does not have required properties" msgstr "캐시 볼륨 %s에 필수 특성이 없음" msgid "Call returned a None object" msgstr "호출에서 None 오브젝트를 리턴함" msgid "Can not add FC port to host." msgstr "호스트에 FC 포트를 추가할 수 없습니다." #, python-format msgid "Can not find cache id by cache name %(name)s." msgstr "캐시 이름 %(name)s을(를) 사용하여 캐시 id를 찾을 수 없습니다. " #, python-format msgid "Can not find partition id by name %(name)s." msgstr "이름 %(name)s을(를) 사용하여 파티션 id를 찾을 수 없습니다. " #, python-format msgid "Can not get pool info. pool: %s" msgstr "풀 정보를 가져올 수 없습니다. 풀: %s" #, python-format msgid "Can not translate %s to integer." msgstr "%s을(를) 정수로 변환할 수 없습니다. " #, python-format msgid "Can't access 'scality_sofs_config': %s" msgstr "'scality_sofs_config'에 액세스할 수 없음: %s" msgid "Can't attach snapshot." msgstr "스냅샷을 연결할 수 없습니다." msgid "Can't decode backup record." msgstr "백업 레코드를 디코드할 수 없습니다. " #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "복제 볼륨을 확장할 수 없음, 볼륨: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "배열에서 LUN을 찾을 수 없습니다. source-name 또는 source-id를 확인하십시오." #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "배열에서 캐시 이름을 찾을 수 없음, 캐시 이름: %(name)s." #, python-format msgid "Can't find lun id from db, volume: %(id)s" msgstr "db에서 lun id를 찾을 수 없음, 볼륨: %(id)s" #, python-format msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." msgstr "" "배열에서 lun 정보를 찾을 수 없습니다. 볼륨: %(id)s, lun 이름: %(name)s." #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "배열에서 파티션 이름을 찾을 수 없습니다. 파티션 이름은 %(name)s입니다. " #, python-format msgid "Can't find service: %s" msgstr "서비스를 찾을 수 없음: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "배열에서 스냅샷을 찾을 수 없습니다. source-name 또는 source-id를 확인하십시" "오." msgid "Can't find the same host id from arrays." msgstr "배열에서 동일한 호스트 id를 찾을 수 없습니다." #, python-format msgid "Can't get volume id from snapshot, snapshot: %(id)s" msgstr "스냅샷에서 볼륨 id를 가져올 수 없음, 스냅샷: %(id)s" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "볼륨 id를 가져올 수 없습니다. 볼륨 이름: %s." #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "LUN %(lun_id)s을(를) Cinder에 가져올 수 없습니다. LUN 유형이 일치하지 않습니" "다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 HyperMetroPair에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 복사 작업에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 그룹에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 미러에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 SplitMirror에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 마이그레이션 작업에 있습니다." #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 원격 복제 작업에 있습니다." #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. LUN 상태가 정상이 아닙니다." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷이 볼륨에 속하지 않습니다." #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷이 개시자에 공개되어 있습" "니다." #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷 상태가 정상이 아니거나 실" "행 상태가 온라인이 아닙니다." #, python-format msgid "Can't open config file: %s" msgstr "구성 파일을 열 수 없음: %s" msgid "Can't parse backup record." msgstr "백업 레코드를 구문 분석할 수 없습니다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "has no volume type." msgstr "" "볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 그룹" "에 볼륨 유형이 없습니다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " "is already in consistency group %(orig_group)s." msgstr "" "이미 일관성 그룹 %(orig_group)s에 있기 때문에 볼륨 %(volume_id)s을(를) 일관" "성 그룹 %(group_id)s에 추가할 수 없습니다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume cannot be found." msgstr "" "볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨" "을 찾을 수 없습니다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume does not exist." msgstr "" "볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨" "이 없습니다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume is in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨 " "상태 %(status)s이(가) 올바르지 않은 상태입니다. 올바른 상태는 %(valid)s입니" "다. " #, python-format msgid "" "Cannot add volume %(volume_id)s to consistency group %(group_id)s because " "volume type %(volume_type)s is not supported by the group." msgstr "" "볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 해당 " "그룹에서 볼륨 유형 %(volume_type)s을(를) 지원하지 않습니다. " #, python-format msgid "" "Cannot attach already attached volume %s; multiattach is disabled via the " "'netapp_enable_multiattach' configuration option." msgstr "" "이미 연결된 볼륨 %s은(는) 연결할 수 없습니다. 다중 연결은 " "'netapp_enable_multiattach' 구성 옵션을 통해 사용 안함으로 설정됩니다. " msgid "Cannot change VF context in the session." msgstr "세션에서 VF 컨텍스트를 변경할 수 없습니다." #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "VF 컨텍스트를 변경할 수 없음, 관리할 수 있는 VF 목록 %(vf_list)s에서 지정된 " "VF를 사용할 수 없습니다." msgid "Cannot connect to ECOM server." msgstr "ECOM 서버를 연결할 수 없습니다. " #, python-format msgid "" "Cannot create clone of size %(vol_size)s from volume of size %(src_vol_size)s" msgstr "" "크기가 %(src_vol_size)s인 볼륨에서 크기가 %(vol_size)s인 복제를 작성할 수 없" "음" #, python-format msgid "" "Cannot create consistency group %(group)s because snapshot %(snap)s is not " "in a valid state. Valid states are: %(valid)s." msgstr "" "스냅샷 %(snap)s이(가) 올바른 상태가 아니어서 일관성 그룹 %(group)s을(를)작성" "할 수 없습니다. 올바른 상태는 %(valid)s입니다. " #, python-format msgid "" "Cannot create consistency group %(group)s because source volume " "%(source_vol)s is not in a valid state. Valid states are: %(valid)s." msgstr "" "소스 볼륨 %(source_vol)s이(가) 올바른 세부 상태에 있지 않으므로 일관성 그룹 " "%(group)s을(를) 작성할 수 없습니다. 올바른 세부 상태는 %(valid)s입니다. " #, python-format msgid "Cannot create directory %s." msgstr "디렉토리 %s을(를) 작성할 수 없습니다. " msgid "Cannot create encryption specs. Volume type in use." msgstr "암호화 스펙을 작성할 수 없습니다. 볼륨 유형이 사용 중입니다." #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "디스크 형식 %s의 이미지를 작성할 수 없습니다. vmdk 디스크 형식만 허용됩니다. " #, python-format msgid "Cannot create masking view: %(maskingViewName)s. " msgstr "마스킹 보기 %(maskingViewName)s을(를) 작성할 수 없습니다. " #, python-format msgid "" "Cannot create more than %(req)s volumes on the ESeries array when " "'netapp_enable_multiattach' is set to true." msgstr "" "'netapp_enable_multiattach'가 true로 설정되면 ESeries 배열에서 %(req)s개를 초" "과하는 볼륨을 작성할 수 없습니다. " #, python-format msgid "Cannot create or find an storage group with name %(sgGroupName)s." msgstr "이름이 %(sgGroupName)s인 스토리지 그룹을 작성하거나 찾을 수 없습니다. " #, python-format msgid "" "Cannot create volume of size %(vol_size)s from snapshot of size %(snap_size)s" msgstr "" "크기가 %(snap_size)s인 스냅샷에서 크기가 %(vol_size)s인 볼륨을 작성할 수 없음" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "크기가 %s인 볼륨을 작성할 수 없습니다. 8GB의 배수가 아닙니다. " #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "이름이 %(name)s이고 스펙이 %(extra_specs)s인 volume_type을 작성할 수 없음" #, python-format msgid "Cannot delete LUN %s while snapshots exist." msgstr "스냅샷이 존재하는 동안 LUN %s을(를) 삭제할 수 없습니다. " #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)d volume instances." msgstr "" "캐시 볼륨: %(cachevol_name)s을(를) 삭제할 수 없습니다. %(updated_at)s에 업데" "이트되었으며 현재 %(numclones)d개 볼륨 인스턴스를 가지고 있습니다. " #, python-format msgid "" "Cannot delete cache volume: %(cachevol_name)s. It was updated at " "%(updated_at)s and currently has %(numclones)s volume instances." msgstr "" "캐시 볼륨: %(cachevol_name)s을(를) 삭제할 수 없습니다. %(updated_at)s에 업데" "이트되었으며 현재 %(numclones)s개 볼륨 인스턴스를 가지고 있습니다. " msgid "Cannot delete encryption specs. Volume type in use." msgstr "암호화 스펙을 삭제할 수 없습니다. 볼륨 유형이 사용 중입니다." msgid "Cannot determine storage pool settings." msgstr "스토리지 풀 설정을 판별할 수 없습니다." msgid "Cannot execute /sbin/mount.sofs" msgstr "/sbin/mount.sofs를 실행할 수 없음" #, python-format msgid "Cannot find CG group %s." msgstr "CG 그룹 %s을(를) 찾을 수 없습니다." #, python-format msgid "" "Cannot find Controller Configuration Service for storage system " "%(storage_system)s." msgstr "" "스토리지 시스템 %(storage_system)s에 대한 제어기 구성 서비스를 찾을 수 없습니" "다." #, python-format msgid "Cannot find Replication Service to create volume for snapshot %s." msgstr "스냅샷 %s에 대한 볼륨을 작성할 복제 서비스를 찾을 수 없습니다. " #, python-format msgid "Cannot find Replication Service to delete snapshot %s." msgstr "스냅샷 %s을(를) 삭제할 복제 서비스를 찾을 수 없습니다. " #, python-format msgid "Cannot find Replication service on system %s." msgstr "%s 시스템에서 복제 서비스를 찾을 수 없습니다. " #, python-format msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." msgstr "볼륨: %(id)s을(를) 찾을 수 없습니다. 관리 해제 조작. 종료 중..." #, python-format msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." msgstr "%(volumename)s 볼륨을 찾을 수 없습니다. 확장 조작. 종료 중..." #, python-format msgid "Cannot find device number for volume %(volumeName)s." msgstr "%(volumeName)s 볼륨에 대한 디바이스 번호를 찾을 수 없습니다. " msgid "Cannot find migration task." msgstr "마이그레이션 태스크를 찾을 수 없습니다. " #, python-format msgid "Cannot find replication service on system %s." msgstr "시스템 %s에서 복제 서비스를 찾을 수 없습니다. " #, python-format msgid "Cannot find source CG instance. consistencygroup_id: %s." msgstr "소스 CG 인스턴스를 찾을 수 없습니다. consistencygroup_id: %s." #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "채널 id: %(channel_id)s을(를) 사용하여 mcs_id를 가져올 수 없습니다. " msgid "Cannot get necessary pool or storage system information." msgstr "필요한 풀 또는 스토리지 시스템 정보를 가져올 수 없습니다. " #, python-format msgid "" "Cannot get or create a storage group: %(sgGroupName)s for volume " "%(volumeName)s " msgstr "" "%(volumeName)s 볼륨의 스토리지 그룹 %(sgGroupName)s을(를) 가져오거나 작성할 " "수 없음 " #, python-format msgid "Cannot get or create initiator group: %(igGroupName)s. " msgstr "개시자 그룹 %(igGroupName)s을(를) 가져오거나 작성할 수 없습니다. " #, python-format msgid "Cannot get port group: %(pgGroupName)s. " msgstr "포트 그룹 %(pgGroupName)s을(를) 가져올 수 없습니다. " #, python-format msgid "" "Cannot get storage group: %(sgGroupName)s from masking view " "%(maskingViewInstanceName)s. " msgstr "" "마스킹 보기에서 스토리지 그룹: %(sgGroupName)s을(를) 가져오지 못" "함%(maskingViewInstanceName)s." #, python-format msgid "" "Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "%(sps)s에 지원되는 크기 범위를 가져올 수 없음. 리턴 코드: %(rc)lu. 오류: " "%(error)s." #, python-format msgid "" "Cannot get the default storage group for FAST policy: %(fastPolicyName)s." msgstr "" "FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 가져올 수 없습니다." msgid "Cannot get the portgroup from the masking view." msgstr "마스킹 보기에서 portgroup을 가져올 수 없습니다." msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "스케일 SOFS를 마운트할 수 없음. syslog에서 오류 확인" msgid "Cannot ping DRBDmanage backend" msgstr "DRBDmanage 백엔드에 대해 ping을 실행할 수 없음" #, python-format msgid "Cannot place volume %(id)s on %(host)s" msgstr "%(id)s 볼륨을 %(host)s에 배치할 수 없음 " #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "소스에서 일관성 그룹 %(name)s을(를) 작성하는 데 필요한 'cgsnapshot_id'와 " "'source_cgid'를 모두 제공할 수 없습니다. " msgid "Cannot register resource" msgstr "자원을 등록할 수 없습니다. " msgid "Cannot register resources" msgstr "자원을 등록할 수 없습니다. " #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because it is not in the group." msgstr "" "일관성 그룹 %(group_id)s에서 볼륨 %(volume_id)s을(를) 제거할 수 없습니다. 해" "당 볼륨이 그룹에 있지 않습니다. " #, python-format msgid "" "Cannot remove volume %(volume_id)s from consistency group %(group_id)s " "because volume is in an invalid state: %(status)s. Valid states are: " "%(valid)s." msgstr "" "일관성 그룹 %(group_id)s에서 볼륨 %(volume_id)s을(를) 제거할 수 없습니다. 볼" "륨 상태 %(status)s이(가) 올바르지 않은 상태입니다. 올바른 상태: %(valid)s." #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "HPE3PARDriver에서 %s(으)로 재입력할 수 없습니다." msgid "Cannot retype from one 3PAR array to another." msgstr "하나의 3PAR 배열에서 다른 3PAR 배열로 재입력할 수 없습니다." msgid "Cannot retype to a CPG in a different domain." msgstr "다른 도메인의 CPG로 재입력할 수 없습니다." msgid "Cannot retype to a snap CPG in a different domain." msgstr "다른 도메인의 snap CPG로 재입력할 수 없습니다." msgid "" "Cannot run vgc-cluster command, please ensure software is installed and " "permissions are set properly." msgstr "" "vgc-cluster 명령을 실행할 수 없습니다. 소프트웨어가 설치되어 있으며 권한이 적" "절하게 설정되어 있는지 확인하십시오. " msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." msgstr "hitachi_serial_number 및 hitachi_unit_name 모두 설정할 수 없습니다." msgid "Cannot specify both protection domain name and protection domain id." msgstr "보호 도메인 이름과 보호 도메인 ID를 모두 지정할 수 없습니다. " msgid "Cannot specify both storage pool name and storage pool id." msgstr "스토리지 풀 이름과 스토리지 풀 ID를 모두 지정할 수 없습니다. " #, python-format msgid "" "Cannot update consistency group %(group_id)s because no valid name, " "description, add_volumes, or remove_volumes were provided." msgstr "" "일관성 그룹 %(group_id)s을(를) 업데이트할 수 없습니다. 올바른 이름, 설명, " "add_volumes 또는 remove_volumes를 제공하지 않았습니다. " msgid "Cannot update encryption specs. Volume type in use." msgstr "암호화 스펙을 업데이트할 수 없습니다. 볼륨 유형이 사용 중입니다." #, python-format msgid "Cannot update volume_type %(id)s" msgstr "volume_type %(id)s을(를) 업데이트할 수 없음" #, python-format msgid "Cannot verify the existence of object:%(instanceName)s." msgstr "오브젝트의 존재를 확인할 수 없음: %(instanceName)s." msgid "Cascade option is not supported." msgstr "계단식 옵션은 지원되지 않습니다." #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "Cg 스냅샷 %(cgsnapshot_id)s을(를) 찾을 수 없습니다. " msgid "Cgsnahost is empty. No consistency group will be created." msgstr "Cgsnahost가 비어 있습니다. 일관성 그룹이 작성되지 않습니다. " msgid "Cgsnapshot status must be available or error" msgstr "cg 스냅샷 상태는 사용 가능 또는 오류여야 함" msgid "Change hostlun id error." msgstr "hostlun id 변경 오류." msgid "Change lun priority error." msgstr "Lun 우선순위 변경 오류입니다. " msgid "Change lun smarttier policy error." msgstr "Lun smarttier 정책 변경 오류입니다. " #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "이 변경으로 인해 %(unders)s 자원의 사용량이 0보다 적게 됩니다. " msgid "Check access permissions for the ZFS share assigned to this driver." msgstr "이 드라이버에 지정된 ZFS 공유에 대한 액세스 권한을 확인하십시오." msgid "Check hostgroup associate error." msgstr "호스트 그룹 연관 확인 오류입니다. " msgid "Check initiator added to array error." msgstr "배열에 추가된 개시자 확인 오류입니다. " msgid "Check initiator associated to host error." msgstr "호스트에 연관된 개시자 확인 오류입니다. " msgid "Check lungroup associate error." msgstr "Lun 그룹 연관 확인 오류입니다. " msgid "Check portgroup associate error." msgstr "포트 그룹 연관 확인 오류입니다. " msgid "" "Check the state of the http service. Also ensure that the https port number " "is the same as the one specified in cinder.conf." msgstr "" "http 서비스 상태를 확인하십시오. 또한 https 포트 번호가 cinder.conf에 지정된 " "번호 중 하나인지도 확인하십시오." msgid "Chunk size is not multiple of block size for creating hash." msgstr "청크 크기가 해시 작성을 위한 블록 크기의 배수가 아닙니다. " #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Cisco Fibre Channel Zoning CLI 오류: %(reason)s" #, python-format msgid "Clone feature is not licensed on %(storageSystem)s." msgstr "%(storageSystem)s에 복제 기능 라이센스가 없습니다." #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "복제 유형 '%(clone_type)s'이(가) 올바르지 않습니다. 올바른 값: " "'%(full_clone)s' 및 '%(linked_clone)s'." msgid "" "Cluster is not formatted. You should probably perform \"dog cluster format\"." msgstr "" "클러스터가 형식화되지 않았습니다. \"dog 클러스터 형식화\"를 수행해야 할 수 있" "습니다. " #, python-format msgid "Coho Data Cinder driver failure: %(message)s" msgstr "Coho Data Cinder 드라이버 실패: %(message)s" msgid "Coho rpc port is not configured" msgstr "Coho rpc 포트가 구성되지 않음" #, python-format msgid "Command %(cmd)s blocked in the CLI and was cancelled" msgstr "명령 %(cmd)s이(가) CLI에서 차단되어 취소됨" #, python-format msgid "CommandLineHelper._wait_for_a_condition: %s timeout" msgstr "CommandLineHelper._wait_for_a_condition: %s 제한시간" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s 제한시간" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "압축 인에이블러가 설치되어 있지 않습니다. 압축된 볼륨을 작성할 수 없습니다." #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "계산 클러스터: %(cluster)s을(를) 찾을 수 없습니다. " msgid "Condition has no field." msgstr "조건에 필드가 없습니다." #, python-format msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" msgstr "" "구성 'max_over_subscription_ratio'가 올바르지 않습니다. 0보다 커야 함: %s" msgid "Configuration error: dell_sc_ssn not set." msgstr "구성 오류: dell_sc_ssn이 설정되지 않았습니다." #, python-format msgid "Configuration file %(configurationFile)s does not exist." msgstr "구성 파일 %(configurationFile)s이(가) 없습니다. " msgid "Configuration is not found." msgstr "구성을 찾을 수 없습니다." #, python-format msgid "Configuration value %s is not set." msgstr "구성 값 %s을(를) 설정하지 않았습니다." msgid "Configured host type is not supported." msgstr "구성된 호스트 유형이 지원되지 않습니다." #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "볼륨 유형 %s에서 QoS 스펙 충돌: QoS 스펙이 볼륨 유형과 연관된 경우에는 볼륨 " "유형 추가 스펙에서 레거시 \"netapp:qos_policy_group\"이 허용되지 않습니다. " #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "glance 연결 실패: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Swift 연결 실패: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "커넥터가 제공되지 않음: %s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "커넥터에 필수 정보가 없음: %(missing)s" #, python-format msgid "" "Consistency group %s still contains volumes. The force flag is required to " "delete it." msgstr "" "일관성 그룹 %s에 아직 볼륨이 포함되어 있습니다. 강제 실행 플래그를 삭제해야 " "합니다." #, python-format msgid "Consistency group %s still has dependent cgsnapshots." msgstr "일관성 그룹 %s에 아직 종속자 cg 스냅샷이 있습니다." msgid "Consistency group is empty. No cgsnapshot will be created." msgstr "일관성 그룹이 비어 있습니다. 작성된 cg 스냅샷이 없습니다." #, python-format msgid "" "Consistency group status must be available or error, but current status is: " "%s" msgstr "" "일관성 그룹 상태가 사용 가능 또는 오류여야 하지만 현재 상태가 %s입니다." #, python-format msgid "Consistency group status must be available, but current status is: %s." msgstr "일관성 그룹 상태가 사용 가능이어야 하지만 현재 상태가 %s입니다. " #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "일관성 그룹 %(consistencygroup_id)s을(를) 찾을 수 없습니다. " msgid "Container" msgstr "컨테이너" #, python-format msgid "" "Container format: %s is unsupported by the VMDK driver, only 'bare' is " "supported." msgstr "" "컨테이너 형식: %s은(는) VMDK 드라이버에 의해 지원되지 않으며 'bare'만 지원됩" "니다." msgid "Container size smaller than required file size." msgstr "컨테이너 크기가 필요한 파일 크기보다 작습니다." msgid "Content type not supported." msgstr "컨텐츠 유형이 지원되지 않습니다." #, python-format msgid "Controller Configuration Service not found on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 제어기 구성 서비스를 찾을 수 없습니다. " #, python-format msgid "Controller IP '%(host)s' could not be resolved: %(e)s." msgstr "제어기 IP '%(host)s'을(를) 분석할 수 없음: %(e)s." #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "%(f1)s(으)로 변환되었지만 이제 형식이 %(f2)s임" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "%(vol_format)s(으)로 변환되었지만 이제 형식이 %(file_format)s임" #, python-format msgid "Converted to raw, but format is now %s" msgstr "원시로 변환되었지만 형식은 지금 %s임" #, python-format msgid "Converted to raw, but format is now %s." msgstr "원시로 변환되었지만 형식은 현재 %s입니다." msgid "Coordinator uninitialized." msgstr "조정자가 초기화되지 않습니다." #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" "%(status)s." msgstr "" "볼륨 복사 태스크에 실패: convert_to_base_volume: id=%(id)s,설정 상태=" "%(status)s을(를) 무시합니다. " #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "복사 볼륨 태스크에 실패: create_cloned_volume id=%(id)s, status=%(status)s." #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "%(src_type)s %(src_id)s에서 %(vol_id)s(으)로 메타데이터를 복사하는 중" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "사용할 Keystone 엔드포인트를 판별하지 못했습니다. 서비스 카탈로그에서 또는 " "cinder.conf 구성 옵션 'backup_swift_auth_url'을 사용하여 Swift 엔드포인트를 " "설정할 수 있습니다." msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "사용할 Swift 엔드포인트를 판별하지 못했습니다. 서비스 카탈로그에서 또는 " "cinder.conf 구성 옵션 'backup_swift_url'을 사용하여 Swift 엔드포인트를 설정" "할 수 있습니다." msgid "Could not find DISCO wsdl file." msgstr "DISCO wsdl 파일을 찾을 수 없습니다." #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "GPFS 클러스터 ID를 찾을 수 없음: %s." #, python-format msgid "Could not find GPFS file system device: %s." msgstr "GPFS 파일 시스템 디바이스를 찾을 수 없음: %s." #, python-format msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "유형이 %(type_id)s인 볼륨 %(volume_id)s의 호스트를 찾을 수 없습니다." #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s에서 구성을 찾을 수 없음" #, python-format msgid "Could not find iSCSI export for volume %(volumeName)s." msgstr "%(volumeName)s 볼륨에 대한 iSCSI 내보내기를 찾을 수 없습니다. " #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "%s 볼륨에 대한 iSCSI 내보내기를 찾을 수 없음" #, python-format msgid "Could not find iSCSI target for volume: %(volume_id)s." msgstr "볼륨: %(volume_id)s에 대한 iSCSI 대상을 찾을 수 없습니다. " #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "명령 %(cmd)s의 출력에서 키를 찾을 수 없음: %(out)s." #, python-format msgid "Could not find parameter %(param)s" msgstr "파라메터를 찾을수 없습니다: %(param)s" #, python-format msgid "Could not find target %s" msgstr "대상 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find the parent volume for Snapshot '%s' on array." msgstr "배열에서 스냅샷 '%s'의 상위 볼륨을 찾을 수 없습니다." #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "볼륨 %(vol)s에서 고유 스냅샷 %(snap)s을(를) 찾을 수 없습니다." msgid "Could not get system name." msgstr "시스템 이름을 가져올 수 없습니다. " #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "%(path)s에서 페이스트 앱 '%(name)s'을(를) 로드할 수 없음" #, python-format msgid "Could not read %s. Re-running with sudo" msgstr "%s을(를) 읽을 수 없음. sudo로 다시 실행합니다. " #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "스냅샷 %(name)s의 정보를 읽을 수 없습니다. 코드: %(code)s. 이유: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "구성을 파일 %(file_path)s에 복원할 수 없음: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "구성을 %(file_path)s에 저장할 수 없음: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "일관성 그룹 스냅샷 %s을(를) 시작할 수 없습니다." #, python-format msgid "Counter %s not found" msgstr "카운터 %s을(를) 찾을 수 없음" msgid "Create QoS policy error." msgstr "QoS 정책 작성 오류입니다. " #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "백업 작성 중단. 예상 백업 상태는 %(expected_status)s이지만 %(actual_status)s" "인 동안에는 인스턴스 연관을 변경할 수 없습니다." #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "백업 작성 중단. 예상 볼륨 상태는 %(expected_status)s이지만 %(actual_status)s" "인 동안에는 인스턴스 연관을 변경할 수 없습니다." msgid "Create consistency group failed." msgstr "일관성 그룹을 생성하는 데 실패했습니다." #, python-format msgid "" "Create encrypted volumes with type %(type)s from image %(image)s is not " "supported." msgstr "" "유형이 %(type)s인 암호화된 볼륨을 이미지 %(image)s에서 작성할 수 없습니다. " msgid "Create export for volume failed." msgstr "볼륨에 대한 내보내기 작성에 실패했습니다. " msgid "Create hostgroup error." msgstr "호스트 그룹 작성 오류입니다. " #, python-format msgid "Create hypermetro error. %s." msgstr "hypermetro 작성 오류. %s." msgid "Create lun error." msgstr "lun 작성 오류." msgid "Create lun migration error." msgstr "Lun 마이그레이션 작성 오류입니다. " msgid "Create luncopy error." msgstr "luncopy 작성 오류입니다. " msgid "Create lungroup error." msgstr "Lun 그룹 작성 오류입니다. " msgid "Create manager volume flow failed." msgstr "관리자 볼륨 플로우 작성에 실패했습니다. " msgid "Create port group error." msgstr "포트 그룹 작성 오류." msgid "Create replication error." msgstr "복제 작성 오류." #, python-format msgid "Create replication pair failed. Error: %s." msgstr "복제 쌍 작성 실패. 오류: %s." msgid "Create snapshot error." msgstr "스냅샷 작성 오류입니다. " #, python-format msgid "Create volume error. Because %s." msgstr "볼륨 작성 오류입니다. 원인은 %s입니다. " msgid "Create volume failed." msgstr "볼륨 작성에 실패했습니다. " msgid "Creating a consistency group from a source is not currently supported." msgstr "소스에서 일관성 그룹을 생성하는 기능은 현재 지원되지 않습니다." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" "%(err)s)." msgstr "" "구역 세트 작성 및 활성화에 실패: (구역 세트=%(cfg_name)s 오류=%(err)s)." #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s error=" "%(err)s)." msgstr "구역 세트 작성 및 활성화에 실패: (구역 세트=%(zoneset)s 오류=%(err)s)." #, python-format msgid "Creating usages for %(begin_period)s until %(end_period)s" msgstr "%(begin_period)s에서 %(end_period)s까지의 기간에 대한 사용 내역 작성" msgid "Current host isn't part of HGST domain." msgstr "현재 호스트가 HGST 도메인의 일부가 아닙니다. " #, python-format msgid "" "Current host not valid for volume %(id)s with type %(type)s, migration not " "allowed" msgstr "" "현재 호스트가 유형이 %(type)s인 볼륨 %(id)s에 대해 올바르지 않음. 마이그레이" "션이 허용되지 않음" #, python-format msgid "" "Currently mapped host for volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "볼륨 %(vol)s에 대해 현재 맵핑된 호스트가 %(group)s이(가) 포함된 지원되지 않" "는 호스트 그룹에 있습니다. " msgid "DEPRECATED: Deploy v1 of the Cinder API." msgstr "더 이상 사용되지 않음: Cinder API의 v1 배치" msgid "DEPRECATED: Deploy v2 of the Cinder API." msgstr "더 이상 사용되지 않음: Cinder API의 v2 배치" #, python-format msgid "" "DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " "version?" msgstr "" "DRBDmanage 드라이버 오류: 예상 키 \"%s\"이(가) 응답하지 않음, 잘못된 " "DRBDmanage 버전입니까?" msgid "" "DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " "not found." msgstr "" "DRBDmanage 드라이버 설정 오류: 일부 필수 라이브러리(dbus, drbdmanage.*)를 찾" "을 수 없습니다. " #, python-format msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" msgstr "DRBDmanage에서 하나의 자원(\"%(res)s\")을 예상했지만 %(n)d개를 얻음" #, python-format msgid "" "DRBDmanage timeout waiting for new volume after snapshot restore; resource " "\"%(res)s\", volume \"%(vol)s\"" msgstr "" "스냅샷 복원 후에 새 볼륨을 기다리는 중에 DRBDmanage 제한시간이 초과됨, 리소" "스 \"%(res)s\", 볼륨 \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " "snapshot \"%(sn)s\"" msgstr "" "스냅샷이 생성될 때까지 기다리는 동안 DRBDmanage 제한시간이 초과됨, 자원 " "\"%(res)s\", 스냅샷 \"%(sn)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " "\"%(vol)s\"" msgstr "" "볼륨이 생성될 때까지 기다리는 동안 DRBDmanage 제한시간이 초과됨, 자원 " "\"%(res)s\", 볼륨 \"%(vol)s\"" #, python-format msgid "" "DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " "\"%(res)s\", vnr %(vnr)d)" msgstr "" "볼륨 크기를 기다리는 동안 DRBDmanage 제한시간이 초과됨, 볼륨 ID \"%(id)s" "\" (res \"%(res)s\", vnr %(vnr)d)" msgid "Data ONTAP API version could not be determined." msgstr "Data ONTAP API 버전을 판별할 수 없습니다." msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." msgstr "" "7 모드에서 작동 중인 데이터 ONTAP이 QoS 정책 그룹을 지원하지 않습니다. " msgid "Database schema downgrade is not allowed." msgstr "데이터베이스 스키마 다운그레이드가 허용되지 않습니다." #, python-format msgid "Dataset %s is not shared in Nexenta Store appliance" msgstr "데이터 세트 %s이(가) Nexenta Store appliance에서 공유되지 않음" #, python-format msgid "Dataset group %s not found at Nexenta SA" msgstr "Nexenta SA에서 데이터 세트 그룹 %s을(를) 찾을 수 없음" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup는 올바른 프로비저닝 유형이지만 WSAPI 버전 '%(dedup_version)s' 버전 " "'%(version)s'이(가) 설치되어 있어야 합니다. " msgid "Dedup luns cannot be extended" msgstr "Dedup lun을 확장할 수 없음" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume" msgstr "" "중복 제거 인에이블러가 설치되어 있지 않습니다. 중복 제거된 볼륨을 작성할 수 " "없습니다." msgid "Default pool name if unspecified." msgstr "기본 풀 이름입니다(지정되지 않은 경우). " #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: quota_" "%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "%(res)s 자원에 대한 기본 할당량은 기본 할당량 플래그 quota_%(res)s에 의해 설" "정됩니다. 이제 이 플래그는 더 이상 사용되지 않습니다. 기본 할당량에 기본 할당" "량 클래스를 사용하십시오." msgid "Default volume type can not be found." msgstr "기본 볼륨 유형을 찾을 수 없습니다." msgid "" "Defines the set of exposed pools and their associated backend query strings" msgstr "노출된 풀 및 해당 연관된 백엔드 조회 문자열의 세트를 정의함" msgid "Delete LUNcopy error." msgstr "LUNcopy 삭제 오류입니다. " msgid "Delete QoS policy error." msgstr "QoS 정책 삭제 오류입니다. " msgid "Delete associated lun from lungroup error." msgstr "Lun 그룹에서 연관된 lun 삭제 오류입니다. " #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "백업 삭제가 중단되었습니다. 현재 구성된 백업 서비스 [%(configured_service)s]" "은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위해 사용된 백업 서비스가 " "아닙니다." msgid "Delete consistency group failed." msgstr "일관성 그룹 삭제에 실패했습니다. " msgid "Delete hostgroup error." msgstr "호스트 그룹 삭제 오류입니다. " msgid "Delete hostgroup from mapping view error." msgstr "맵핑 보기에서 호스트 그룹 삭제 오류입니다. " msgid "Delete lun error." msgstr "LUN 삭제 오류입니다. " msgid "Delete lun migration error." msgstr "Lun 마이그레이션 삭제 오류입니다. " msgid "Delete lungroup error." msgstr "Lun 그룹 삭제 오류입니다. " msgid "Delete lungroup from mapping view error." msgstr "맵핑 보기에에서 lun 그룹 삭제 오류입니다. " msgid "Delete mapping view error." msgstr "맵핑 보기 삭제 오류입니다. " msgid "Delete port group error." msgstr "포트 그룹 삭제 오류." msgid "Delete portgroup from mapping view error." msgstr "맵핑 보기에서 포트 그룹 삭제 오류입니다. " msgid "Delete snapshot error." msgstr "스냅샷 삭제 오류입니다. " #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "볼륨의 스냅샷 삭제가 다음 상태에서 지원되지 않음: %s" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup 중단. 예상 백업 상태는 %(expected_status)s이지만 " "%(actual_status)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." msgid "Deleting volume from database and skipping rpc." msgstr "데이터베이스에서 볼륨을 삭제하고 rpc를 건너뜁니다." #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "구역 삭제 실패: (명령=%(cmd)s 오류=%(err)s)." msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "일관성 그룹 지원을 위해서는 Dell API 2.1 이상이 필요함" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "직접 연결에서는 Dell Cinder 드라이버 구성 오류 복제가 지원되지 않습니다." #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dell Cinder 드라이버 구성 오류 replication_device %s을(를) 찾을 수 없음" msgid "Deploy v3 of the Cinder API." msgstr "Cinder API의 v3 배치" msgid "Describe-resource is admin only functionality" msgstr "Describe-resource 기능은 관리자만 사용가능" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "%(exp)s을(를) 예상했지만 대상에 migration_status %(stat)s이(가) 있음. " msgid "Destination host must be different than the current host." msgstr "대상 호스트가 현재 호스트와 달라야 합니다. " msgid "Destination volume not mid-migration." msgstr "대상 볼륨이 마이그레이션에 포함되지 않음 " msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "볼륨 분리 실패: 둘 이상의 첨부가 있지만 attachment_id가 제공되지 않았습니다. " msgid "Detach volume from instance and then try again." msgstr "인스턴스에서 볼륨을 분리하고 다시 시도하십시오." #, python-format msgid "Detected more than one volume with name %(vol_name)s" msgstr "이름이 %(vol_name)s인 둘 이상의 볼륨을 발견했음" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "%(fun)s에서 예상 열을 찾을 수 없음: %(hdr)s." #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "%(fun)s에서 예상 키 %(key)s을(를) 찾을 수 없음: %(raw)s." msgid "Disabled reason contains invalid characters or is too long" msgstr "문자가 올바르지 않거나 너무 긴 이유로 사용되지 않습니다. " #, python-format msgid "Domain with name %s wasn't found." msgstr "이름이 %s인 도메인을 찾을 수 없습니다. " #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "하위 레벨 GPFS 클러스터가 발견됨. GPFS 복제 기능을 클러스터 디먼 레벨 %(cur)s" "에서 사용할 수 없음 - 최소 %(min)s 레벨이어야 합니다. " #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "드라이버 연결 초기화에 실패했습니다(오류: %(err)s). " msgid "" "Driver is not able to do retype because the volume (LUN {}) has snapshot " "which is forbidden to migrate." msgstr "" "볼륨(LUN {})에 마이그레이션이 금지된 스냅샷이 있으므로 드라이버에서 다시 입력" "을 수행할 수 없습니다." msgid "Driver must implement initialize_connection" msgstr "드라이버가 initialize_connection을 구현해야 함" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "드라이버가 가져온 백업 데이터를 디코드했지만 필드(%s)가 누락되었습니다. " #, python-format msgid "" "E-series proxy API version %(current_version)s does not support full set of " "SSC extra specs. The proxy version must be at at least %(min_version)s." msgstr "" "E-series 프록시 API 버전 %(current_version)s이(가) SSC 추가 스펙의 전체 세트" "를 지원하지 않습니다. 프록시 버전은 %(min_version)s 이상이어야 합니다. " #, python-format msgid "" "EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " "%(out)s)." msgstr "" "EMC VNX Cinder 드라이버 CLI 예외: %(cmd)s(리턴 코드: %(rc)s) (출력: %(out)s)." #, python-format msgid "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " "(Output: %(out)s)." msgstr "" "EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (리턴 코드: %(rc)s) (출" "력: %(out)s)." msgid "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " "values." msgstr "" "EcomServerIp, EcomServerPort, EcomUserName, EcomPassword는 올바른 값을 가지" "고 있어야 합니다. " #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "소스에서 일관성 그룹 %(name)s을(를) 작성하려면 'cgsnapshot_id' 또는 " "'source_cgid'를 제공해야 합니다. " #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO: %(slo)s 또는 워크로드 %(workload)s이(가) 올바르지 않습니다. 올바른 값은" "이전 오류문을 확인하십시오. " msgid "Either hitachi_serial_number or hitachi_unit_name is required." msgstr "hitachi_serial_number 또는 hitachi_unit_name이 필요합니다." #, python-format msgid "Element Composition Service not found on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 요소 컴포지션 서비스를 찾을 수 없습니다. " msgid "Enables QoS." msgstr "QoS를 사용으로 설정합니다. " msgid "Enables compression." msgstr "압축을 사용으로 설정합니다. " msgid "Enables replication." msgstr "복제를 사용으로 설정합니다. " msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "configfs가 /sys/kernel/config에 마운트되는지 확인하십시오." #, python-format msgid "" "Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " "%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "groupInitiatorGroup에 개시자 %(initiator)s을(를) 추가하는 중 오류 발생: " "%(initiatorgroup)s. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s ." #, python-format msgid "" "Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "IQN이 %(iqn)s인 대상 그룹 %(targetgroup)s에 추가하는 중 오류 발생. 리턴 코" "드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "Error Attaching volume %(vol)s." msgstr "볼륨 %(vol)s을(를) 연결하는 중에 오류가 발생했습니다. " #, python-format msgid "" "Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Clone project: %(clone_proj)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "풀: %(pool)s의 볼륨: %(lun)s에서 스냅샷: %(snapshot)s 복제 중 오류 발생 프로" "젝트: %(project)s 복제 프로젝트: %(clone_proj)s 리턴 코드: %(ret.status)d 메" "시지: %(ret.data)s." #, python-format msgid "" "Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "복제된 볼륨 작성 오류: %(cloneName)s 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" "%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "" "복제된 볼륨 작성 중 오류: 볼륨: %(cloneName)s 소스 볼륨: %(sourceName)s. 리" "턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "그룹 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "마스킹 보기 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "볼륨 작성 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." msgstr "볼륨 작성 오류: %(volumename)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "CreateGroupReplica 오류: 소스: %(source)s 대상: %(target)s. 리턴 코드: " "%(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "%(alias)s 별명에 %(initiator)s 개시자를 작성하는 중 오류 발생. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "%(pool)s 풀에 %(project)s 프로젝트를 작성하는 중 오류 발생. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Property: %(property)s Type: %(type)s Description: " "%(description)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "특성: %(property)s 유형: %(type)s 설명: %(description)s 작성 중 오류 발생, 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Share: %(name)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "공유 작성 중 오류 발생: %(name)s 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에 %(snapshot)s 스냅샷을 가져" "오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "스냅샷 작성 오류: %(snapshot)s 공유: %(share)s 대상 풀: %(pool)s 프로젝트: " "%(project)s 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "%(alias)s 대상 작성 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "IQN %(iqn)s(으)로 대상 그룹 %(targetgroup)s을(를) 작성하는 중 오류 발생. 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "%(lun)s 볼륨(크기: %(size)s)을 작성하는 중 오류 발생. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." msgstr "새 컴포지트 볼륨 작성 중 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Creating replication action on: pool: %(pool)s Project: %(proj)s " "volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "대상: %(tgt)s 및 풀: %(tgt_pool)s에 대해 풀: %(pool)s 프로젝트: %(proj)s 볼" "륨: %(vol)s에서 복제 조치 작성 중 오류 발생, 리턴 코드: %(ret.status)d 메시" "지: %(ret.data)s." msgid "Error Creating unbound volume on an Extend operation." msgstr "확장 조작 시 언바운드 볼륨을 작성하는 중에 오류가 발생했습니다. " msgid "Error Creating unbound volume." msgstr "언바운드 볼륨을 작성하는 중 오류가 발생했습니다." #, python-format msgid "" "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "볼륨 삭제 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " "%(error)s" msgstr "" "그룹 삭제 중 오류: %(storageGroupName)s. 리턴 코드: %(rc)lu. 오류:%(error)s" #, python-format msgid "" "Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " "%(rc)lu. Error: %(error)s" msgstr "" "개시자 그룹 삭제 오류: %(initiatorGroupName)s. 리턴 코드: %(rc)lu. 오류: " "%(error)s" #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "스냅샷: %(snapshot)s 삭제 오류, 공유: %(share)s, 대상 풀: %(pool)s 프로젝트: " "%(project)s 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅샷을 삭" "제하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " "Return code: %(ret.status)d, Message: %(ret.data)s." msgstr "" "풀: %(pool)s에서 볼륨: %(lun)s 삭제 중 오류 발생, 프로젝트: %(project)s. 리" "턴 코드: %(ret.status)d, 메시지: %(ret.data)s." #, python-format msgid "" "Error Deleting project: %(project)s on pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "풀: %(pool)s에서 프로젝트: %(project)s을(를) 삭제하는 중 오류 발생, 리턴 코" "드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Deleting replication action: %(id)s Return code: %(ret.status)d " "Message: %(ret.data)s." msgstr "" "복제 조치: %(id)s 삭제 중 오류 발생, 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." msgstr "볼륨 확장 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "개시자를 가져오는 중 오류 발생: 개시자 그룹: %(initiatorgroup)s 리턴 코드:" "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " "%(data)s." msgstr "" "풀 통계 가져오기 오류: 풀: %(pool)s 리턴 코드: %(status)d 메시지: %(data)s." #, python-format msgid "" "Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "프로젝트 통계를 가져오는 중 오류 발생: 풀: %(pool)s 프로젝트: %(project)s 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "공유 가져오기 중 오류 발생: %(share)s 풀: %(pool)s 프로젝트: %(project)s 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅샷을 가" "져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " "%(ret.data)s ." msgstr "" "%(alias)s 대상을 가져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " "code: %(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트에서 %(lun)s 볼륨을 가져오는 중 오류 발생. 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Migrating volume from one pool to another. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "하나의 풀에서 다른 풀로 볼륨을 마이그레이션하는 중 오류 발생. 리턴 코드: " "%(rc)lu. 오류: %(error)s. " #, python-format msgid "" "Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " "%(error)s." msgstr "" "마스킹 보기 수정 중 오류 발생: %(groupName)s. 리턴 코드: %(rc)lu. 오류: " "%(error)s. " #, python-format msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." msgstr "풀 소유권 오류: %(host)s에서 풀 %(pool)s을(를) 소유하지 않습니다." #, python-format msgid "" "Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " "Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(props)s 특성을 가져오" "는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." msgstr "마이그레이션 세션 종료 중 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "" "Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "%(iqn)s 개시자를 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "%(pool)s 풀을 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " "%(ret.status)d Message: %(ret.data)s." msgstr "" "%(pool)s 풀에서 %(project)s 프로젝트를 확인하는 중 오류 발생. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "서비스 %(service)s 확인 중 오류 발생. 리턴 코드: %(ret.status)d 메시지:" "%(ret.data)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." #, python-format msgid "" "Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "%(alias)s 대상을 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." #, python-format msgid "" "Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " "Return code: %(ret.status)d Message: %(ret.data)s." msgstr "" "공유 확인 중 오류 발생: %(share)s, 프로젝트: %(project)s, 풀: %(pool)s, 리턴 " "코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error adding Volume: %(volumeName)s with instance path: " "%(volumeInstancePath)s." msgstr "" "다음 인스턴스 경로를 사용하여 %(volumeName)s 볼륨을 추가하는 중에 오류가 발생" "했습니다. %(volumeInstancePath)s." #, python-format msgid "" "Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " "Error: %(error)s." msgstr "" "그룹에 개시자 추가 중 오류 발생: %(groupName)s. 리턴 코드: %(rc)lu. 오류: " "%(error)s. " #, python-format msgid "Error adding volume to composite volume. Error is: %(error)s." msgstr "컴포지트 볼륨에 볼륨 추가 중 오류: %(error)s." #, python-format msgid "Error appending volume %(volumename)s to target base volume." msgstr "" "%(volumename)s 볼륨을 대상 기본 볼륨에 추가하는 중에 오류가 발생했습니다. " #, python-format msgid "" "Error associating storage group : %(storageGroupName)s. To fast Policy: " "%(fastPolicyName)s with error description: %(errordesc)s." msgstr "" "%(storageGroupName)s 스토리지 그룹을 fast 정책 %(fastPolicyName)s과(와) 연관" "시키는 중 오류 발생. 오류 설명: %(errordesc)s." #, python-format msgid "Error attaching volume %s. Target limit might be reached!" msgstr "" "볼륨 %s에 연결하는 중에 오류가 발생했습니다. 목표 한계에 도달했을 수 있습니" "다! " #, python-format msgid "" "Error break clone relationship: Sync Name: %(syncName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "복제 관계 중단 오류: 동기화 이름: %(syncName)s 리턴 코드: %(rc)lu. 오류: " "%(error)s." msgid "Error connecting to ceph cluster." msgstr "ceph 클러스터 연결 중에 오류가 발생했습니다. " #, python-format msgid "Error connecting via ssh: %s" msgstr "ssh를 통해 연결하는 중 오류 발생: %s" #, python-format msgid "Error creating volume: %s." msgstr "볼륨 작성 중 오류 발생: %s." msgid "Error deleting replay profile." msgstr "재생 프로파일을 삭제하는 중에 오류가 발생했습니다. " #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "%(ssn)s 볼륨: %(volume)s을(를) 삭제하는 중 오류 발생" #, python-format msgid "Error deleting volume %(vol)s: %(err)s." msgstr "볼륨 %(vol)s 삭제 중 오류 발생: %(err)s." #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "평가자 구문 분석 중 오류 발생: %(reason)s" #, python-format msgid "" "Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "공유 편집 중 오류 발생: %(share)s 풀: %(pool)s 리턴 코드: %(ret.status)d 메시" "지: %(ret.data)s." #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "NetworkPortal에 대해 iSER을 사용으로 설정하는 중 오류 발생: ip %(ip)s의 " "iSCSI 포트 %(port)d에서 RDMA가 지원되는지 확인하십시오. " #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "실패한 접속을 정리하는 중에 오류 발생: %(ex)s" #, python-format msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." msgstr "CloudByte API 실행 오류 [%(cmd)s], 오류: %(err)s." msgid "Error executing EQL command" msgstr "EQL 명령 실행 중 오류 발생 " #, python-format msgid "Error executing command via ssh: %s" msgstr "ssh를 통해 명령 실행 중에 오류 발생: %s" #, python-format msgid "Error extending volume %(vol)s: %(err)s." msgstr "볼륨 %(vol)s 확장 중 오류 발생: %(err)s." #, python-format msgid "Error extending volume: %(reason)s" msgstr "볼륨 확장 중 오류 발생: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "%(name)s을(를) 찾는 중에 오류가 발생했습니다. " #, python-format msgid "Error finding %s." msgstr "%s을(를) 찾는 중에 오류가 발생했습니다. " #, python-format msgid "" "Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." msgstr "" "ReplicationSettingData 가져오기 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." msgid "" "Error getting appliance version details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "어플라이언스 버전 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "Error getting domain id from name %(name)s: %(err)s." msgstr "이름 %(name)s에서 도메인 ID를 가져오는 중에 오류 발생: %(err)s." #, python-format msgid "Error getting domain id from name %(name)s: %(id)s." msgstr "이름 %(name)s에서 도메인 ID를 가져오는 중에 오류 발생: %(id)s." msgid "Error getting initiator groups." msgstr "개시자 그룹을 가져오는 중 오류가 발생했습니다." #, python-format msgid "Error getting pool id from name %(pool)s: %(err)s." msgstr "이름 %(pool)s에서 풀 ID를 가져오는 중에 오류 발생: %(err)s." #, python-format msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." msgstr "이름 %(pool_name)s에서 풀 ID를 가져오는 중에 오류 발생: %(err_msg)s." #, python-format msgid "" "Error getting replication action: %(id)s. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "복제 조치: %(id)s을(를) 가져오는 중에 오류가 발생했습니다. 리턴 코드: " "%(ret.status)d 메시지: %(ret.data)s." msgid "" "Error getting replication source details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "복제 소스 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: %(ret." "status)d 메시지: %(ret.data)s." msgid "" "Error getting replication target details. Return code: %(ret.status)d " "Message: %(ret.data)s ." msgstr "" "복제 대상 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: %(ret." "status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "버전을 가져오는 중 오류 발생: svc: %(svc)s. 리턴 코드: %(ret.status)d 메시" "지: %(ret.data)s." #, python-format msgid "" "Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " "storage: [%(cb_error)s], error code: [%(error_code)s]." msgstr "" "CloudByte 스토리지의 볼륨 [%(cb_volume)s]에 대한 조작 [%(operation)s] 중 오" "류 발생: [%(cb_error)s], 오류 코드: [%(error_code)s]." #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API 응답의 오류: 데이터=%(data)s" #, python-format msgid "Error in space-create for %(space)s of size %(size)d GB" msgstr "크기가 %(size)dGB인 %(space)s에 대한 공간 작성 시 오류 발생" #, python-format msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" msgstr "추가로 %(size)dGB를 가진 볼륨 %(space)s에 대한 공간 확장 시 오류 발생" #, python-format msgid "Error managing volume: %s." msgstr "볼륨 관리 중 오류 발생: %s." #, python-format msgid "Error mapping volume %(vol)s. %(error)s." msgstr "%(vol)s 볼륨 맵핑 중 오류: %(error)s." #, python-format msgid "" "Error modify replica synchronization: %(sv)s operation: %(operation)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "복제본 동기화 수정 오류: %(sv)s 조작: %(operation)s. 리턴 코드: %(rc)lu. 오" "류: %(error)s." #, python-format msgid "" "Error modifying Service: %(service)s Return code: %(ret.status)d Message: " "%(ret.data)s." msgstr "" "서비스 수정 중 오류 발생: %(service)s 리턴 코드: %(ret.status)d 메시지:" "%(ret.data)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." #, python-format msgid "" "Error moving volume: %(vol)s from source project: %(src)s to target project: " "%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "볼륨: %(vol)s을(를) 소스 프로젝트: %(src)s에서 대상 프로젝트: %(tgt)s(으)로 " "이동하는 중에 오류가 발생했습니다. 리턴 코드: %(ret.status)d 메시지: " "%(ret.data)s." msgid "Error not a KeyError." msgstr "KeyError가 아닌 오류입니다." msgid "Error not a TypeError." msgstr "TypeError가 아닌 오류입니다." #, python-format msgid "Error occurred when creating cgsnapshot %s." msgstr "cg 스냅샷 %s을(를) 작성하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred when deleting cgsnapshot %s." msgstr "cg 스냅샷 %s을(를) 삭제하는 중에 오류가 발생했습니다." #, python-format msgid "Error occurred when updating consistency group %s." msgstr "일관성 그룹 %s을(를) 업데이트하는 중에 오류가 발생했습니다. " #, python-format msgid "Error parsing config file: %s" msgstr "구성 파일 구문 분석 중 오류: %s" msgid "Error promoting secondary volume to primary" msgstr "2차 볼륨을 1차로 승격시키는 중 오류 발생" #, python-format msgid "Error removing volume %(vol)s. %(error)s." msgstr "%(vol)s 볼륨 제거 중 오류: %(error)s." #, python-format msgid "Error renaming volume %(vol)s: %(err)s." msgstr "볼륨 %(vol)s 이름 변경 중 오류 발생: %(err)s." #, python-format msgid "Error response: %s" msgstr "오류 응답: %s" msgid "Error retrieving volume size" msgstr "볼륨 크기 검색 시 오류 발생" #, python-format msgid "" "Error sending replication update for action id: %(id)s . Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "조치 id: %(id)s에 대한 복제 업데이트를 전송하는 중에 오류가 발생했습니다. 리" "턴 코드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "" "Error sending replication update. Returned error: %(err)s. Action: %(id)s." msgstr "" "복제 업데이트를 전송하는 중에 오류가 발생했습니다. 리턴된 오류: %(err)s. 조" "치: %(id)s." #, python-format msgid "" "Error setting replication inheritance to %(set)s for volume: %(vol)s project " "%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." msgstr "" "볼륨: %(vol)s 프로젝트 %(project)s에 대해 복제 상속을 %(set)s(으)로 설정하는 " "중에 오류 발생, 리턴 코드: %(ret.status)d 메시지: %(ret.data)s ." #, python-format msgid "" "Error severing the package: %(package)s from source: %(src)s Return code: " "%(ret.status)d Message: %(ret.data)s ." msgstr "" "소스: %(src)s에서 패키지: %(package)s을(를) 제공하는 중 오류 발생, 리턴 코" "드: %(ret.status)d 메시지: %(ret.data)s." #, python-format msgid "Error unbinding volume %(vol)s from pool. %(error)s." msgstr "풀에서 %(vol)s 볼륨 바인드 해제 중 오류: %(error)s." #, python-format msgid "" "Error verifying clone size on Volume clone: %(clone)s Size: %(size)d " "onSnapshot: %(snapshot)s" msgstr "" "복제본 크기 확인 중 오류 발생. 볼륨 복제본: %(clone)s 크기: %(size)d 스냅샷: " "%(snapshot)s" #, python-format msgid "Error while authenticating with switch: %s." msgstr "스위치로 인증하는 동안 오류 발생: %s." #, python-format msgid "Error while changing VF context %s." msgstr "VF 컨텍스트 %s을(를) 변경하는 중에 오류가 발생했습니다." #, python-format msgid "Error while checking the firmware version %s." msgstr "펌웨어 버전 %s을(를) 확인하는 중에 오류가 발생했습니다." #, python-format msgid "Error while checking transaction status: %s" msgstr "트랜잭션 상태 검사 중에 오류 발생: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "VF가 관리 %s에 사용 가능한지 확인하는 중 오류가 발생했습니다." #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "프로토콜이 %(protocol)s인 스위치 %(switch_id)s에 연결하는 중에 오류 발생. 오" "류: %(error)s." #, python-format msgid "Error while creating authentication token: %s" msgstr "인증 토큰을 작성하는 중에 오류 발생: %s" #, python-format msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." msgstr "" "스냅샷 [status] %(stat)s - [result] %(res)s을(를) 생성하는 중에 오류가 발생했" "습니다." #, python-format msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." msgstr "" "볼륨 [status] %(stat)s - [result] %(res)s을(를) 생성하는 중에 오류가 발생했습" "니다." #, python-format msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" msgstr "" "스냅샷 [status] %(stat)s - [result] %(res)s을(를) 삭제하는 중에 오류가 발생했" "습니다." #, python-format msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." msgstr "" "볼륨 [status] %(stat)s - [result] %(res)s을(를) 삭제하는 중에 오류가 발생했습" "니다." #, python-format msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." msgstr "" "볼륨 [status] %(stat)s - [result] %(res)s을(를) 확장하는 중에 오류가 발생했습" "니다." #, python-format msgid "Error while getting %(op)s details, returned code: %(status)s." msgstr "%(op)s 세부 사항을 가져오는 중에 오류 발생, 리턴 코드: %(status)s." #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "ssh를 통해 데이터를 가져오는 동안 오류 발생: (명령=%(cmd)s 오류=%(err)s)." #, python-format msgid "Error while getting disco information [%s]." msgstr "disco 정보 [%s]을(를) 가져오는 중에 오류가 발생했습니다." #, python-format msgid "Error while getting nvp value: %s." msgstr "nvp 값을 가져오는 중 오류 발생: %s." #, python-format msgid "Error while getting session information %s." msgstr "세션 정보 %s을(를) 가져오는 중에 오류가 발생했습니다." #, python-format msgid "Error while parsing the data: %s." msgstr "데이터 구문 분석 중 오류 발생: %s." #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "스위치에서 페이지 %(url)s을(를) 쿼리하는 중에 오류 발생, 이유 %(error)s." #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "구역 문자열에서 구역과 cfgs를 제거하는 중에 오류 발생: %(description)s." #, python-format msgid "Error while requesting %(service)s API." msgstr "%(service)s API를 요청하는 중 오류가 발생했습니다." #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "구역 지정 CLI 실행 중에 오류 발생: (명령=%(cmd)s 오류=%(err)s)." #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "구역 설정에서 새 구역과 cfgs를 업데이트하는 중에 오류가 발생했습니다. 오류 " "%(description)s." msgid "Error writing field to database" msgstr "데이터베이스에 필드 쓰기 오류" #, python-format msgid "Error[%(stat)s - %(res)s] while getting volume id." msgstr "볼륨 id를 가져오는 동안 오류[%(stat)s - %(res)s]이(가) 발생했습니다." #, python-format msgid "" "Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " "[%(vol)s]." msgstr "" "스냅샷 [%(snap_id)s]을(를) 볼륨 [%(vol)s]에 복원하는 동안 오류[%(stat)s - " "%(res)s]이(가) 발생했습니다." #, python-format msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." msgstr "" "볼륨 id를 가져오는 동안 오류[status] %(stat)s - [result] %(res)s]이(가) 발생" "했습니다." #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" "%(volume_id)s 볼륨에 대한 최대 스케줄링 시도 횟수 %(max_attempts)d을(를) 초과" "함" msgid "Exceeded the limit of snapshots per volume" msgstr "볼륨당 스냅샷 한계를 초과함" #, python-format msgid "Exception appending meta volume to target volume %(volumename)s." msgstr "" "대상 볼륨 %(volumename)s에 메타 볼륨을 추가하는 중에 예외가 발생했습니다. " #, python-format msgid "" "Exception during create element replica. Clone name: %(cloneName)s Source " "name: %(sourceName)s Extra specs: %(extraSpecs)s " msgstr "" "요소 복제본을 작성하는 중에 예외가 발생했습니다. 복제본 이름: %(cloneName)s " "소스 이름: %(sourceName)s 추가 스펙: %(extraSpecs)s " #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume의 예외: %s" #, python-format msgid "Exception while forming the zone string: %s." msgstr "구역 문자열을 형성하는 중에 예외 발생: %s." #, python-format msgid "Exception: %s" msgstr "예외: %s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "uuid를 예상했지만 %(uuid)s을(를) 수신했습니다. " #, python-format msgid "Expected exactly one node called \"%s\"" msgstr "\"%s\"(이)라는 정확히 하나의 노드만 필요" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "node_count에 대해 정수를 예상했지만 svcinfo lsiogrp가 리턴됨: %(node)s." #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "CLI 명령 %(cmd)s에서 출력을 예상하지 않았는데 %(out)s이(가) 생성되었습니다. " #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "vdisk_UID에서 필터링할 때 lsvdisk에서 예상 단일 vdisk가 리턴되었습니다." "%(count)s이(가) 리턴되었습니다. " #, python-format msgid "Expected volume size was %d" msgstr "예상된 볼륨 크기는 %d이지만" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "백업 내보내기가 중단되었습니다. 예상 백업 상태는 %(expected_status)s이지만 " "%(actual_status)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "레코드 내보내기가 중단되었습니다. 현재 구성된 백업 서비스 " "[%(configured_service)s]은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위" "해 사용된 백업 서비스가 아닙니다." msgid "Extend volume error." msgstr "볼륨 확장 오류입니다. " msgid "" "Extend volume is only supported for this driver when no snapshots exist." msgstr "스냅샷이 없는 경우에만 이 드라이버에 대해 확장 볼륨이 지원됩니다. " msgid "Extend volume not implemented" msgstr "볼륨 확장이 구현되지 않음" msgid "" "FAST VP Enabler is not installed. Can't set tiering policy for the volume" msgstr "" "FAST VP 인에이블러가 설치되어 있지 않습니다. 볼륨에 대해 계층화 정책을 설정" "할 수 없습니다." msgid "FAST is not supported on this array." msgstr "이 배열에서는 FAST가 지원되지 않습니다." msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC는 프로토콜이지만 OpenStack에서 wwpns를 제공하지 않습니다. " #, python-format msgid "Faield to unassign %(volume)s" msgstr "%(volume)s을(를) 지정 취소하지 못함" #, python-format msgid "Fail to create cache volume %(volume)s. Error: %(err)s" msgstr "캐시 볼륨 %(volume)s을(를) 작성하지 못했습니다. 오류: %(err)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "패브릭=%(fabric)s에 대한 연결 추가 실패: 오류:%(err)s" msgid "Failed cgsnapshot" msgstr "cg 스냅샷 실패" #, python-format msgid "Failed creating snapshot for group: %(response)s." msgstr "그룹의 스냅샷 작성 실패: %(response)s." #, python-format msgid "Failed creating snapshot for volume %(volname)s: %(response)s." msgstr "볼륨 %(volname)s에 대한 스냅샷 작성 실패: %(response)s." #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "패브릭 %s에서 활성 구역 세트를 가져오는 데 실패했습니다." #, python-format msgid "Failed getting details for pool %s." msgstr "풀 %s에 대한 세부사항을 가져오지 못했습니다. " #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "패브릭=%(fabric)s에 대한 연결 제거 실패: 오류:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "볼륨 %(volname)s을(를) 확장하지 못했습니다. " #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "%(err)s(으)로 인해 3PAR(%(url)s)로의 로그인 실패" msgid "Failed to access active zoning configuration." msgstr "활성 구역 지정 구성에 액세스하지 못했습니다. " #, python-format msgid "Failed to access zoneset status:%s" msgstr "구역 세트 상태 액세스 실패: %s" #, python-format msgid "" "Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " "%(ret)s, stderr: %(err)s)" msgstr "" "자원 잠금을 획득하는 데 실패했습니다.(serial: %(serial)s, inst: %(inst)s, " "ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "Failed to add %(vol)s into %(sg)s after %(retries)s tries." msgstr "%(retries)s번 시도 후 %(sg)s에 %(vol)s을(를) 추가하지 못했습니다." msgid "Failed to add the logical device." msgstr "논리 디바이스를 추가하는 데 실패했습니다." #, python-format msgid "" "Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " "code: %(rc)lu. Error: %(error)s." msgstr "" "볼륨 %(volumeName)s을(를) 일관성 그룹 %(cgName)s에 추가하지 못했습니다. 리턴 " "코드: %(rc)lu. 오류: %(error)s." msgid "Failed to add zoning configuration." msgstr "구역 지정 구성을 추가하지 못했습니다. " #, python-format msgid "" "Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " "%(reason)s)" msgstr "" "iSCSI 게시자 IQN을 지정하는 데 실패했습니다.(포트: %(port)s, 이유: " "%(reason)s)" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "qos_specs %(specs_id)s을(를) %(type_id)s 유형과 연관시키지 못했습니다." #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "%(volume_id)s 볼륨에 대한 iSCSI 대상을 첨부하지 못했습니다. " #, python-format msgid "Failed to backup volume metadata - %s" msgstr "볼륨 메타데이터를 백업하지 못함 - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "볼륨 메타데이터를 백업하지 못함 - 메타데이터 백업 오브젝트 'backup.%s." "meta'가 이미 존재함" #, python-format msgid "Failed to clone volume from snapshot %s." msgstr "스냅샷 %s에서 볼륨을 복제하지 못했습니다. " #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "%(vendor_name)s 배열 %(host)s에 연결 실패: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Dell REST API에 연결하는 데 실패" msgid "Failed to connect to array" msgstr "배열에 연결 실패" #, python-format msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" msgstr "sheep 디먼에 연결하지 못했습니다. 주소: %(addr)s, 포트: %(port)s" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "볼륨에 이미지를 복사할 수 없음: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "볼륨에 메타데이터를 복사하지 못함: %(reason)s" msgid "Failed to copy volume, destination device unavailable." msgstr "볼륨 복사에 실패했습니다. 대상 디바이스를 사용할 수 없습니다. " msgid "Failed to copy volume, source device unavailable." msgstr "볼륨 복사에 실패했습니다. 소스 디바이스를 사용할 수 없습니다. " #, python-format msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." msgstr "스냅샷 %(cgSnapshot)s에서 CG %(cgName)s을(를) 작성하지 못했습니다. " #, python-format msgid "Failed to create IG, %s" msgstr "IG를 작성하지 못함, %s" msgid "Failed to create SolidFire Image-Volume" msgstr "SolidFire 이미지 볼륨 작성 실패" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "볼륨 그룹을 작성할 수 없음: %(vg_name)s" #, python-format msgid "" "Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "파일을 작성하는 데 실패했습니다.(파일: %(file)s, ret: %(ret)s, stderr: " "%(err)s)" #, python-format msgid "Failed to create a temporary snapshot for volume %s." msgstr "볼륨 %s에 대한 임시 스냅샷을 작성하지 못했습니다. " msgid "Failed to create api volume flow." msgstr "api 볼륨 플로우 작성에 실패했습니다. " #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "%(reason)s(으)로 인해 cg 스냅샷 %(id)s을(를) 작성할 수 없습니다." #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "%(reason)s(으)로 인해 일관성 그룹 %(id)s을(를) 작성할 수 없습니다." #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "일관성 그룹 %(id)s 작성 실패: %(ret)s." #, python-format msgid "" "Failed to create consistency group %s because VNX consistency group cannot " "accept compressed LUNs as members." msgstr "" "일관성 그룹 %s을(를) 작성할 수 없습니다. VNX 일관성 그룹에서 압축된 LUN를 멤" "버로 승인할 수 없습니다. " #, python-format msgid "Failed to create consistency group: %(cgName)s." msgstr "일관성 그룹 작성 실패: %(cgName)s." #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "일관성 그룹 작성 실패: %(cgid)s. 오류: %(excmsg)s." #, python-format msgid "" "Failed to create consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "일관성 그룹 작성 실패: %(consistencyGroupName)s 리턴 코드: %(rc)lu. 오류: " "%(error)s." #, python-format msgid "Failed to create hardware id(s) on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 하드웨어 ID를 작성하지 못했습니다. " #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "호스트 작성 실패: %(name)s. 배열에 있는지 확인하십시오. " #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "호스트 그룹 작성 실패: %(name)s. 배열에 있는지 확인하십시오. " msgid "Failed to create iqn." msgstr "IQN 작성에 실패했습니다. " #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "%(volume_id)s 볼륨에 대한 iscsi 대상을 작성하지 못했습니다. " msgid "Failed to create manage existing flow." msgstr "기존 플로우 관리를 작성하지 못했습니다." msgid "Failed to create manage_existing flow." msgstr "manage_existing 플로우를 작성하지 못했습니다." msgid "Failed to create map on mcs, no channel can map." msgstr "MCS에서 맵 작성에 실패했습니다. 맵핑할 수 있는 채널이 없습니다. " msgid "Failed to create map." msgstr "맵 작성에 실패했습니다. " #, python-format msgid "Failed to create metadata for volume: %(reason)s" msgstr "볼륨에 대한 메타데이터를 작성하지 못함: %(reason)s" msgid "Failed to create partition." msgstr "파티션 작성에 실패했습니다. " #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "%(qos_specs)s 스펙을 가진 qos_specs %(name)s을(를) 작성하지 못했습니다. " msgid "Failed to create replica." msgstr "복제본 작성에 실패했습니다. " msgid "Failed to create scheduler manager volume flow" msgstr "스케줄러 관리자 볼륨 플로우 작성 실패" #, python-format msgid "Failed to create snapshot %s" msgstr "스냅샷 %s 작성 실패" msgid "Failed to create snapshot as no LUN ID is specified" msgstr "LUN ID를 지정하지 않아서 스냅샷을 작성하지 못함" #, python-format msgid "Failed to create snapshot for cg: %(cgName)s." msgstr "cg의 스냅샷 작성 실패: %(cgName)s. " #, python-format msgid "Failed to create snapshot for volume %s." msgstr "볼륨 %s에 대한 스냅샷을 작성하지 못했습니다. " #, python-format msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." msgstr "볼륨 %(vol)s에서 스냅샷 정책을 작성하지 못함: %(res)s." #, python-format msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." msgstr "볼륨 %(vol)s에서 스냅샷 자원 영역을 작성하지 못함: %(res)s." msgid "Failed to create snapshot." msgstr "스냅샷 작성에 실패했습니다. " #, python-format msgid "" "Failed to create snapshot. CloudByte volume information not found for " "OpenStack volume [%s]." msgstr "" "스냅샷을 작성하지 못했습니다. OpenStack 볼륨 [%s]에 대한 CloudByte 볼륨 정보" "를 찾을 수 없습니다." #, python-format msgid "Failed to create south bound connector for %s." msgstr "%s의 남쪽 방향 커텍터를 작성하지 못했습니다." #, python-format msgid "Failed to create storage group %(storageGroupName)s." msgstr "스토리지 그룹 %(storageGroupName)s을(를) 생성하지 못했습니다." #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "씬 풀을 작성하지 못함, 오류 메시지: %s" #, python-format msgid "Failed to create volume %s" msgstr "%s 볼륨을 작성하지 못함 " #, python-format msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." msgstr "" "쌍이 있기 때문에 volume_id: %(volume_id)s에 대한 SI를 삭제하지 못했습니다. " #, python-format msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "논리 디바이스를 삭제하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: %(reason)s)" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "%(reason)s(으)로 인해 cg 스냅샷 %(id)s을(를) 삭제할 수 없습니다." #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "%(reason)s(으)로 인해 일관성 그룹 %(id)s을(를) 삭제할 수 없습니다." #, python-format msgid "Failed to delete consistency group: %(cgName)s." msgstr "일관성 그룹 삭제 실패: %(cgName)s." #, python-format msgid "" "Failed to delete consistency group: %(consistencyGroupName)s Return code: " "%(rc)lu. Error: %(error)s." msgstr "" "일관성 그룹 삭제 실패: %(consistencyGroupName)s 리턴 코드: %(rc)lu. 오류: " "%(error)s." msgid "Failed to delete device." msgstr "장치 삭제에 실패했습니다. " #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "일관성 그룹 %(cgname)s에 대한 파일 세트 삭제 실패. 오류: %(excmsg)s." msgid "Failed to delete iqn." msgstr "IQN 삭제에 실패했습니다. " msgid "Failed to delete map." msgstr "맵 삭제에 실패했습니다. " msgid "Failed to delete partition." msgstr "파티션 삭제에 실패했습니다. " msgid "Failed to delete replica." msgstr "복제본 삭제에 실패했습니다. " #, python-format msgid "Failed to delete snapshot %s" msgstr "스냅샷 %s 삭제 실패" #, python-format msgid "Failed to delete snapshot for cg: %(cgId)s." msgstr "cg의 스냅샷 삭제 실패: %(cgId)s. " #, python-format msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." msgstr "쌍이 있기 때문에 snapshot_id: %s에 대한 스냅샷을 삭제하지 못했습니다. " msgid "Failed to delete snapshot." msgstr "스냅샷 삭제에 실패했습니다. " #, python-format msgid "Failed to delete volume %(volumeName)s." msgstr "%(volumeName)s 볼륨을 삭제하는 데 실패했습니다. " #, python-format msgid "" "Failed to delete volume for volume_id: %(volume_id)s because it has pair." msgstr "" "쌍이 있기 때문에 volume_id: %(volume_id)s에 대한 볼륨을 삭제하지 못했습니다. " #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "%(volume_id)s 볼륨에 대한 iSCSI 대상을 분리하지 못했습니다. " msgid "Failed to determine blockbridge API configuration" msgstr "Blockbridge API 구성 판별 실패" msgid "Failed to disassociate qos specs." msgstr "qos 스펙의 연관을 해제하지 못했습니다. " #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs %(specs_id)s을(를) %(type_id)s 유형과 연관 해제시키지 못했습니다. " #, python-format msgid "" "Failed to ensure snapshot resource area, could not locate volume for id %s" msgstr "스냅샷 자원 영역 보장 실패, id %s에 대한 볼륨을 찾을 수 없음" msgid "Failed to establish SSC connection." msgstr "SSC 연결 설정에 실패했습니다. " msgid "Failed to establish connection with Coho cluster" msgstr "Coho 클러스터와 연결하는 데 실패" #, python-format msgid "" "Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " "%(error)s." msgstr "" "CloudByte API 실행 실패 [%(cmd)s]. Http 상태: %(status)s, 오류: %(error)s. " msgid "Failed to execute common command." msgstr "공통 명령을 실행하지 못했습니다. " #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "볼륨에 대한 내보내기가 실패함: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "볼륨 %(name)s을(를) 확장하는 데 실패했습니다. 오류 메시지: %(msg)s." msgid "Failed to find QoSnode" msgstr "QoSnode을 찾는 데 실패" msgid "Failed to find Storage Center" msgstr "Storage Center 찾기 실패" msgid "Failed to find a vdisk copy in the expected pool." msgstr "예상 풀에서 vdisk 사본을 찾는 데 실패했습니다." msgid "Failed to find account for volume." msgstr "볼륨에 대한 계정을 찾지 못했습니다. " #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "경로 %(path)s에 대한 파일 세트를 찾지 못함, 명령 출력: %(cmdout)s." #, python-format msgid "Failed to find group snapshot named: %s" msgstr "%s(이)라는 그룹 스냅샷을 찾지 못했습니다." #, python-format msgid "Failed to find host %s." msgstr "호스트 %s을(를) 찾지 못했습니다. " #, python-format msgid "Failed to find iSCSI initiator group containing %(initiator)s." msgstr "%(initiator)s을(를) 포함하는 iSCSI 개시자 그룹을 찾지 못했습니다." #, python-format msgid "Failed to find storage pool for source volume %s." msgstr "소스 볼륨 %s의 스토리지 풀을 찾는 데 실패했습니다. " #, python-format msgid "Failed to get CloudByte account details for account [%s]." msgstr "계정 [%s]의 CloudByte 계정 세부사항을 가져오지 못했습니다." #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "LUN %s에 대한 LUN 대상 세부사항을 가져오지 못함" #, python-format msgid "Failed to get LUN target details for the LUN %s." msgstr "LUN %s에 대한 LUN 대상 세부사항을 가져오지 못했습니다. " #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "LUN %s에 대한 LUN 대상 목록을 가져오지 못함" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "볼륨 %(volume_id)s에 대한 파티션 ID를 가져오지 못했습니다. " #, python-format msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." msgstr "스냅샷 %(snapshot_id)s에서 Raid 스냅샷 ID를 가져오지 못했습니다. " #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "스냅샷: %(snapshot_id)s에서 Raid 스냅샷 ID를 가져오지 못했습니다. " msgid "Failed to get SplitMirror." msgstr "SplitMirror를 가져오지 못했습니다." #, python-format msgid "" "Failed to get a storage resource. The system will attempt to get the storage " "resource again. (resource: %(resource)s)" msgstr "" "스토리지 자원을 가져오는 데 실패했습니다. 시스템은 스토리지 자원을 다시 가져" "오기 위해 시도합니다.(자원: %(resource)s)" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "qos 스펙 %s의 모든 연관을 가져오지 못함 " msgid "Failed to get channel info." msgstr "채널 정보를 가져오지 못했습니다. " #, python-format msgid "Failed to get code level (%s)." msgstr "코드 레벨을 가져오는 데 실패했습니다(%s)." msgid "Failed to get device info." msgstr "디바이스 정보를 가져오지 못했습니다. " #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "배열에 CPG(%s)가 없기 때문에 도메인을 가져오지 못했습니다." msgid "Failed to get image snapshots." msgstr "이미지 스냅샷을 가져오지 못했습니다." #, python-format msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." msgstr "" "볼륨이 %(volume_id)s인 채널 %(channel_id)s의 ip를 가져오지 못했습니다. " msgid "Failed to get iqn info." msgstr "IQN 정보를 가져오지 못했습니다. " msgid "Failed to get license info." msgstr "라이센스 정보를 가져오지 못했습니다. " msgid "Failed to get lv info." msgstr "lv 정보를 가져오지 못했습니다. " msgid "Failed to get map info." msgstr "맵 정보를 가져오지 못했습니다. " msgid "Failed to get migration task." msgstr "마이그레이션 태스크를 가져오지 못했습니다." msgid "Failed to get model update from clone" msgstr "복제에서 모델 업데이트를 가져오지 못함" msgid "Failed to get name server info." msgstr "이름 서버 정보를 가져오지 못했습니다. " msgid "Failed to get network info." msgstr "네트워크 정보를 가져오지 못했습니다. " #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "새 풀: %(pool_id)s에서 새 파트 id를 가져오지 못했습니다. " msgid "Failed to get partition info." msgstr "파티션 정보를 가져오지 못했습니다. " #, python-format msgid "Failed to get pool id with volume %(volume_id)s." msgstr "볼륨이 %(volume_id)s인 풀 id를 가져오지 못했습니다. " #, python-format msgid "Failed to get remote copy information for %(volume)s due to %(err)s." msgstr "" "%(err)s(으)로 인해 %(volume)s의 원격 복사 정보를 가져오는 데 실패했습니다." #, python-format msgid "" "Failed to get remote copy information for %(volume)s. Exception: %(err)s." msgstr "%(volume)s의 원격 복사 정보를 가져오는 데 실패했습니다. 예외: %(err)s." msgid "Failed to get replica info." msgstr "복제본 정보를 가져오지 못했습니다. " msgid "Failed to get show fcns database info." msgstr "표시 fcns 데이터베이스 정보를 가져오지 못했습니다. " msgid "Failed to get size of existing volume: %(vol). Volume Manage failed." msgstr "" "기존 볼륨: %(vol)의 크기를 가져오지 못했습니다. 볼륨 관리에 실패했습니다." #, python-format msgid "Failed to get size of volume %s" msgstr "볼륨 %s의 크기 가져오기 실패" #, python-format msgid "Failed to get snapshot for volume %s." msgstr "볼륨 %s에 대한 스냅샷을 가져오지 못했습니다. " msgid "Failed to get snapshot info." msgstr "스냅샷 정보를 가져오지 못했습니다. " #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "LUN %s에 대한 대상 IQN을 가져오지 못함" msgid "Failed to get target LUN of SplitMirror." msgstr "SplitMirror의 대상 LUN을 가져오지 못했습니다." #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "LUN %s에 대한 대상 포털을 가져오지 못함" msgid "Failed to get targets" msgstr "대상을 가져오지 못함" msgid "Failed to get wwn info." msgstr "WWN 정보를 가져오지 못했습니다. " #, python-format msgid "" "Failed to get, create or add volume %(volumeName)s to masking view " "%(maskingViewName)s. The error message received was %(errorMessage)s." msgstr "" "볼륨 %(volumeName)s을(를) 가져오거나 작성하거나 마스킹 보기 " "%(maskingViewName)s에 추가하지 못함. 오류 메시지 %(errorMessage)s을(를) 수신" "했습니다. " msgid "Failed to identify volume backend." msgstr "볼륨 백엔드 식별 실패" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "공유 %(cgname)s에 대한 파일 세트 링크 실패. 오류: %(excmsg)s." #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "%s 배열에 로그온하지 못했습니다(올바르지 않은 로그인?). " #, python-format msgid "Failed to login for user %s." msgstr "사용자 %s에 대한 로그인에 실패했습니다. " msgid "Failed to login with all rest URLs." msgstr "모든 나머지 URL로 로그인하지 못했습니다. " #, python-format msgid "" "Failed to make a request to Datera cluster endpoint due to the following " "reason: %s" msgstr "다음 이유로 Datera 클러스터 엔드포인트에 요청을 작성하지 못함: %s" msgid "Failed to manage api volume flow." msgstr "API 볼륨 플로우를 관리하지 못했습니다. " #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "보고된 크기 %(size)s이(가) 부동 소수점 숫자가 아니기 때문에 기존 %(type)s " "%(name)s을(를) 관리하지 못했습니다. " #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "볼륨 크기를 가져오는 중에 오류가 발생하여 기존 볼륨 %(name)s을(를) 관리하는 " "데 실패했습니다. " #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "이름 바꾸기 조작이 실패하여 기존 볼륨 %(name)s을(를) 관리하는 데 실패했습니" "다. 오류 메시지: %(msg)s." #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "보고된 크기 %(size)s이(가) 부동 소수점 숫자가 아니므로 기존 볼륨 %(name)s을" "(를) 관리하지 못했습니다." msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the NFS share passed in the volume reference." msgstr "" "선택한 볼륨 유형의 풀이 볼륨 참조에서 전달된 NFS 공유와 일치하지 않기 때문에 " "기존 볼륨을 관리하지 못했습니다. " msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the file system passed in the volume reference." msgstr "" "선택한 볼륨 유형의 풀이 볼륨 참조에서 전달된 파일 시스템과 일치하지 않기 때문" "에 기존 볼륨을 관리하지 못했습니다. " msgid "" "Failed to manage existing volume because the pool of the volume type chosen " "does not match the pool of the host." msgstr "" "선택한 볼륨 유형의 풀이 호스트의 풀과 일치하지 않기 때문에 기존 볼륨을 관리하" "지 못했습니다. " #, python-format msgid "" "Failed to manage existing volume due to I/O group mismatch. The I/O group of " "the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " "%(opt_iogrp)s." msgstr "" "I/O 그룹 불일치로 인해 기존 볼륨을 관리하는 데 실패했습니다. 관리할 볼륨의 I/" "O 그룹은 %(vdisk_iogrp)s입니다 선택한 유형의 I/O 그룹은 %(opt_iogrp)s입니다." #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "관리할 볼륨 풀이 백엔드 풀과 일치하지 않으므로 기존 볼륨을 관리하는 데 실패했" "습니다. 관리할 볼륨의 풀은 %(vdisk_pool)s입니다. 백엔드의 풀은 " "%(backend_pool)s입니다." msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "관리할 볼륨은 압축이지만 선택한 볼륨은 압축이 아니므로 기존 볼륨을 관리하는 " "데 실패했습니다." msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "관리할 볼륨은 압축이 아니지만 선택한 볼륨은 압축이므로 기존 볼륨을 관리하는 " "데 실패했습니다." msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "관리할 볼륨이 올바른 I/O 그룹에 없으므로 기존 볼륨을 관리하는 데 실패했습니" "다." msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "관리할 볼륨은 thick이지만 선택한 볼륨은 thin이므로 기존 볼륨을 관리하는 데 실" "패했습니다." msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "관리할 볼륨은 thin이지만 선택한 볼륨은 think이므로 기존 볼륨을 관리하는 데 실" "패했습니다." #, python-format msgid "Failed to manage volume %s." msgstr "볼륨 %s을(를) 관리하지 못했습니다. " #, python-format msgid "" "Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " "%(port)s, id: %(id)s)" msgstr "" "논리 디바이스를 맵핑하는 데 실패했습니다.(LDEV: %(ldev)s, LUN: %(lun)s, 포" "트: %(port)s, id: %(id)s)" msgid "Failed to migrate volume for the first time." msgstr "볼륨 마이그레이션 첫 번째 실패. " msgid "Failed to migrate volume for the second time." msgstr "볼륨 마이그레이션 두 번째 실패. " #, python-format msgid "Failed to move LUN mapping. Return code: %s" msgstr "LUN 맵핑 이동에 실패했습니다. 리턴 코드: %s" #, python-format msgid "Failed to move volume %s." msgstr "볼륨 %s을(를) 이동하지 못했습니다. " #, python-format msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" msgstr "" "파일을 여는 데 실패했습니다.(파일: %(file)s, ret: %(ret)s, stderr: %(err)s)" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 출력 구문 분석 실패:\n" " command: %(cmd)s\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "구성 옵션 'keystone_catalog_info'를 구문 분석하지 못함. :" ": 양식이어야 함" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "구성 옵션 'swift_catalog_info'를 구문 분석하지 못함. :" ": 양식이어야 함" #, python-format msgid "" "Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " "%(reason)s)" msgstr "" "0 페이지 교정을 수행하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: %(reason)s)" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "볼륨 %(volume)s의 내보내기 제거 실패: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "%(volume_id)s 볼륨에 대한 iscsi 대상을 제거하지 못했습니다. " #, python-format msgid "" "Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " "Return code: %(rc)lu. Error: %(error)s." msgstr "" "볼륨 %(volumeName)s을(를) 일관성 그룹 %(cgName)s에서 제거하지 못했습니다. 리" "턴 코드: %(rc)lu. 오류: %(error)s." #, python-format msgid "Failed to remove volume %(volumeName)s from default SG." msgstr "기본 SG에서 볼륨 %(volumeName)s을(를) 제거하지 못했습니다. " #, python-format msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." msgstr "" "기본 SG(%(volumeName)s)에서 %(volumeName)s 볼륨을 제거하지 못했습니다. " #, python-format msgid "" "Failed to remove: %(volumename)s. from the default storage group for FAST " "policy %(fastPolicyName)s." msgstr "" "FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹에서 %(volumename)s 볼" "륨을 제거하지 못했습니다." #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "논리적 볼륨 %(name)s의 이름을 바꾸지 못함, 오류 메시지: %(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "활성 구역 지정 구성 %s을(를) 검색하지 못함" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "대상 IQN %(iqn)s의 CHAP 인증을 설정하지 못했습니다. 세부 사항: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "기존 볼륨 %(name)s에 대한 QoS 설정에 실패했습니다. 오류 메시지: %(msg)s." msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "SCST 대상에 대해 '수신 사용자' 속성을 설정하는 데 실패했습니다. " msgid "Failed to set partition." msgstr "파티션 설정에 실패했습니다. " #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "일관성 그룹 %(cgname)s에 대한 권한 설정 실패. 오류: %(excmsg)s." #, python-format msgid "" "Failed to specify a logical device for the volume %(volume_id)s to be " "unmapped." msgstr "" "맵핑되지 않는 볼륨 %(volume_id)s에 대한 논리 디바이스를 지정하는 데 실패했습" "니다." #, python-format msgid "" "Failed to specify a logical device to be deleted. (method: %(method)s, id: " "%(id)s)" msgstr "" "삭제할 논리 디바이스를 지정하는 데 실패했습니다.(메소드: %(method)s, id: " "%(id)s)" msgid "Failed to terminate migrate session." msgstr "마이그레이션 세션을 종료하는 데 실패했습니다. " #, python-format msgid "Failed to unbind volume %(volume)s" msgstr "볼륨 %(volume)s 바인드 해제 실패" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "일관성 그룹 %(cgname)s에 대한 파일 세트 링크 해제 실패. 오류: %(excmsg)s." #, python-format msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" msgstr "" "논리 디바이스를 맵핑 해제하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: " "%(reason)s)" #, python-format msgid "Failed to update consistency group: %(cgName)s." msgstr "일관성 그룹 업데이트 실패: %(cgName)s." #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "볼륨에 대한 메타데이터를 업데이트하지 못함: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "구역 지정 구성 업데이트 또는 삭제에 실패" msgid "Failed to update or delete zoning configuration." msgstr "구역 지정 구성 업데이트 또는 삭제에 실패했습니다." #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "%(qos_specs)s 스펙을 가진 qos_specs %(specs_id)s을(를) 업데이트하지 못했습니" "다. " msgid "Failed to update quota usage while retyping volume." msgstr "볼륨을 다시 입력하는 동안 할당량 사용을 업데이트하는 데 실패했습니다." msgid "Failed to update snapshot." msgstr "스냅샷을 업데이트하는 데 실패했습니다." #, python-format msgid "Failed updating model with driver provided model %(model)s" msgstr "모델을 드라이버 제공 모델 %(model)s(으)로 업데이트하지 못함" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "볼륨 %(vol_id)s 메타데이터 업데이트 실패함(제공된 %(src_type)s %(src_id)s 메" "타데이터 사용) " #, python-format msgid "Failure creating volume %s." msgstr "%s 볼륨을 작성하지 못했습니다." #, python-format msgid "Failure getting LUN info for %s." msgstr "%s에 대한 LUN 정보를 가져오지 못했습니다." #, python-format msgid "Failure in update_volume_key_value_pair:%s" msgstr "update_volume_key_value_pair의 실패: %s" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "복제된 새 LUN을 %s(으)로 이동하지 못했습니다." #, python-format msgid "Failure staging LUN %s to tmp." msgstr "LUN %s을(를) tmp로 스테이징하지 못했습니다." msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "심각한 오류: 사용자가 NetApp 볼륨을 조회하도록 허용되지 않습니다." #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "%(reason)s(으)로 인해 Fexvisor에서 볼륨 %(id)s을(를) 추가하지 못했습니다." #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "%(ret)s(으)로 인해 Fexvisor가 그룹 %(group)s에서 볼륨 %(vol)s을(를) 결합하지 " "못했습니다. " #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "%(ret)s(으)로 인해 Fexvisor가 그룹 %(group)s에서 볼륨 %(vol)s을(를) 제거하지 " "못했습니다. " #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "%(reason)s(으)로 인해 Fexvisor가 볼륨 %(id)s을(를) 제거하지 못했습니다. " #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "파이버 채널 SAN 검색 실패: %(reason)s" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "파이버 채널 구역 조작 실패: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "파이버 채널 연결 제어 실패: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "%(file_path)s 파일을 찾을 수 없습니다. " #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "파일 %(path)s에 올바르지 않은 백업 파일 %(bfile)s이(가) 있어 중단합니다." #, python-format msgid "File already exists at %s." msgstr "%s에 파일이 이미 있습니다. " #, python-format msgid "File already exists at: %s" msgstr "%s에 파일이 이미 있음" msgid "Find host in hostgroup error." msgstr "호스트 그룹에서 호스트 찾기 오류입니다. " msgid "Find host lun id error." msgstr "호스트 lun id 찾기 오류입니다. " msgid "Find lun group from mapping view error." msgstr "맵핑 보기에서 lun 그룹 찾기 오류입니다. " msgid "Find lun number error." msgstr "Lun 번호 찾기 오류입니다. " msgid "Find mapping view error." msgstr "맵핑 보기 찾기 오류입니다. " msgid "Find portgroup error." msgstr "포트 그룹 찾기 오류입니다. " msgid "Find portgroup from mapping view error." msgstr "맵핑 보기에서 포트 그룹 찾기 오류입니다. " #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "플래시 캐시 정책에는 WSAPI 버전 '%(fcache_version)s' 버전 '%(version)s'이" "(가) 설치되어 있어야 합니다. " #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor 볼륨 지정 실패:%(id)s:%(status)s." #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 볼륨 지정 실패:%(id)s:%(status)s." #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor에서 볼륨 %(id)s 스냅샷을 그룹 %(vgid)s 스냅샷 %(vgsid)s에서 찾을 " "수 없습니다." #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor 볼륨 작성 실패:%(volumeid)s:%(status)s." #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 삭제하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(cgid)s에 추가하지 못했습니다." #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "이벤트 ID로 상태를 조회할 수 없어 Flexvisor가 %(id)s 볼륨을 지정하지 못했습니" "다." #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 지정하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor에서 볼륨 %(volume)s에 iqn %(iqn)s을(를) 지정하지 못했습니다." #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 복제하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "Flexvisor가 %(id)s 볼륨을 복제하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨의 스냅샷을 작성하지 못함: %(status)s." #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor가 %(id)s 볼륨의 스냅샷을 작성하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(vgid)s에 작성하지 못했습니다." #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor가 %(volume)s 볼륨을 작성하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor가 %s 볼륨을 작성하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지 못함: %(status)s." #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 스냅샷을 삭제하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "Flexvisor가 %(id)s 스냅샷을 삭제하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 삭제하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 확장하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 확장하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "Flexvisor가 %(id)s 볼륨을 확장하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor가 풀 정보 %(id)s을(를) 가져오지 못함: %(status)s." #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor가 그룹 %(vgid)s에서 볼륨 %(id)s의 스냅샷 ID를 가져오지 못했습니다." #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor가 그룹 %(cgid)s에서 볼륨 %(id)s을(를) 제거하지 못했습니다. " #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 파생하지 못함: %(status)s." #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor가 %(id)s 스냅샷에서 볼륨을 파생하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor가 %(id)s 볼륨을 지정 취소하지 못함: %(status)s." #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor가 %(id)s 볼륨을 지정 취소하지(이벤트를 가져오지) 못했습니다." #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor에서 볼륨 %(id)s을(를) 지정 취소하지 못함: %(status)s." #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor에서 소스 볼륨 %(id)s 정보를 찾을 수 없습니다." #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 볼륨 지정 취소 실패:%(id)s:%(status)s." #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor 볼륨 %(id)s에서 그룹 %(vgid)s을(를) 결합하지 못했습니다." #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "폴더 %s이(가) Nexenta Store 어플라이언스에 없음" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS가 실행 중이 아닙니다. 상태: %s." msgid "Gateway VIP is not set" msgstr "게이트웨이 VIP가 설정되지 않음" msgid "Get FC ports by port group error." msgstr "포트 그룹별로 FC 포트 가져오기 오류." msgid "Get FC ports from array error." msgstr "배열에서 FC 포트 가져오기 오류입니다. " msgid "Get FC target wwpn error." msgstr "FC 대상 wwpn 가져오기 오류입니다. " msgid "Get HyperMetroPair error." msgstr "HyperMetroPair 가져오기 오류." msgid "Get LUN group by view error." msgstr "보기별로 LUN 그룹 가져오기 오류." msgid "Get LUNcopy information error." msgstr "LUNcopy 정보 가져오기 오류입니다. " msgid "Get QoS id by lun id error." msgstr "Lun id별 QoS id 가져오기 오류입니다. " msgid "Get QoS information error." msgstr "QoS 정보 가져오기 오류입니다. " msgid "Get QoS policy error." msgstr "QoS 정책 가져오기 오류입니다. " msgid "Get SplitMirror error." msgstr "SplitMirror 가져오기 오류." msgid "Get active client failed." msgstr "활성 클라이언트를 가져오는 데 실패했습니다." msgid "Get array info error." msgstr "배열 정보 가져오기 오류." msgid "Get cache by name error." msgstr "이름별 캐시 가져오기 오류입니다. " msgid "Get connected free FC wwn error." msgstr "연결된 사용 가능한 FC wwn 가져오기 오류입니다. " msgid "Get engines error." msgstr "엔진 가져오기 오류." msgid "Get host initiators info failed." msgstr "호스트 개시자 정보 가져오기에 실패했습니다. " msgid "Get hostgroup information error." msgstr "호스트 그룹 정보 가져오기 오류입니다. " msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "iSCSI 포트 정보 가져오기 오류입니다. huawei conf 파일에서 대상 IP가 구성되어 " "있는지 확인하십시오. " msgid "Get iSCSI port information error." msgstr "iSCSI 포트 정보 가져오기 오류입니다. " msgid "Get iSCSI target port error." msgstr "iSCSI 대상 포트 가져오기 오류입니다. " msgid "Get lun id by name error." msgstr "이름별 lun id 가져오기 오류." msgid "Get lun migration task error." msgstr "Lun 마이그레이션 태스크 가져오기 오류입니다. " msgid "Get lungroup id by lun id error." msgstr "Lun id별 lun 그룹 id 가져오기 오류입니다. " msgid "Get lungroup information error." msgstr "Lun 그룹 정보 가져오기 오류입니다. " msgid "Get migration task error." msgstr "마이그레이션 작업 가져오기 오류." msgid "Get pair failed." msgstr "쌍 가져오기 오류." msgid "Get partition by name error." msgstr "이름별 파티션 가져오기 오류입니다. " msgid "Get partition by partition id error." msgstr "파티션 id별 파티션 가져오기 오류입니다. " msgid "Get port group by view error." msgstr "보기별로 포트 그룹 가져오기 오류." msgid "Get port group error." msgstr "포트 그룹 가져오기 오류." msgid "Get port groups by port error." msgstr "포트별로 포트 그룹 가져오기 오류." msgid "Get ports by port group error." msgstr "포트 그룹별로 포트 가져오기 오류." msgid "Get remote device info failed." msgstr "원격 장치 정보를 가져오는 데 실패했습니다." msgid "Get remote devices error." msgstr "원격 장치 가져오기 오류." msgid "Get smartcache by cache id error." msgstr "캐시 id별 스마트 캐시 가져오기 오류입니다. " msgid "Get snapshot error." msgstr "스냅샷 가져오기 오류." msgid "Get snapshot id error." msgstr "스냅샷 ID 가져오기 오류입니다. " msgid "Get target IP error." msgstr "대상 IP 가져오기 오류입니다. " msgid "Get target LUN of SplitMirror error." msgstr "SplitMirror의 대상 LUN 가져오기 오류." msgid "Get views by port group error." msgstr "포트 그룹별로 보기 가져오기 오류." msgid "Get volume by name error." msgstr "이름별 볼륨 가져오기 오류입니다. " msgid "Get volume error." msgstr "볼륨 가져오기 오류입니다. " #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "글랜스 메타데이터를 업데이트할 수 없음. 볼륨 ID %(volume_id)s에 대해 %(key)s " "키가 있음" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "%(id)s 볼륨/스냅샷에 대한 글랜스 메타데이터를 찾을 수 없습니다. " #, python-format msgid "Gluster config file at %(config)s doesn't exist" msgstr "Gluster config 파일이 %(config)s에 없음" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google Cloud Storage api 실패: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google Cloud Storage 연결 실패: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google Cloud Storage oauth2 실패: %(reason)s" #, python-format msgid "Got bad path information from DRBDmanage! (%s)" msgstr "DRBDmanage에서 잘못된 경로 정보를 가져왔습니다(%s)! " msgid "HBSD error occurs." msgstr "HBSD 오류가 발생했습니다." msgid "HNAS has disconnected SSC" msgstr "HNAS에서 SSC의 연결을 끊음" msgid "HPELeftHand url not found" msgstr "HPELeftHand url을 찾을 수 없음" #, python-format msgid "" "HTTPS certificate verification was requested but cannot be enabled with " "purestorage module version %(version)s. Upgrade to a newer version to enable " "this feature." msgstr "" "HTTPS 인증서 검증이 요청되었지만 purestorage 모듈 버전 %(version)s(으)로 사용" "하게 설정할 수 없습니다. 이 기능을 사용하려면 새 버전으로 업그레이드하십시오." #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "마지막 백업 이후 해시 블록 크기가 변경되었습니다. 새 해시 블록 크기:%(new)s. " "이전 해시 블록 크기: %(old)s. 전체 백업을 수행하십시오. " #, python-format msgid "Have not created %(tier_levels)s tier(s)." msgstr "%(tier_levels)s 티어를 작성하지 않았습니다. " #, python-format msgid "Hint \"%s\" not supported." msgstr "힌트 \"%s\"이(가) 지원되지 않습니다." msgid "Host" msgstr "호스트" #, python-format msgid "Host %(host)s could not be found." msgstr "%(host)s 호스트를 찾을 수 없습니다. " #, python-format msgid "" "Host %(host)s does not match x509 certificate contents: CommonName " "%(commonName)s." msgstr "" "%(host)s 호스트가 x509 인증서 컨텐츠와 일치하지 않음: CommonName " "%(commonName)s." #, python-format msgid "Host %s has no FC initiators" msgstr "%s 호스트에 FC 개시자가 없음" #, python-format msgid "Host %s has no iSCSI initiator" msgstr "%s 호스트에 iSCSI 개시자가 없음" #, python-format msgid "Host '%s' could not be found." msgstr "'%s' 호스트를 찾을 수 없습니다. " #, python-format msgid "Host group with name %s not found" msgstr "이름이 %s인 호스트 그룹을 찾을 수 없음" #, python-format msgid "Host group with ref %s not found" msgstr "ref %s을(를) 가진 호스트 그룹을 찾을 수 없음" msgid "Host is NOT Frozen." msgstr "호스트가 동결되지 않았습니다." msgid "Host is already Frozen." msgstr "호스트가 이미 동결되었습니다." msgid "Host not found" msgstr "호스트를 찾을 수 없음" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" "호스트를 찾을 수 없습니다. %(host)s에서 %(service)s을(를) 제거하지 못했습니" "다. " #, python-format msgid "Host replication_status must be %s to failover." msgstr "장애 보구하려면 호스트 replication_status가 %s이어야 합니다." #, python-format msgid "Host type %s not supported." msgstr "호스트 유형 %s이(가) 지원되지 않습니다." #, python-format msgid "Host with ports %(ports)s not found." msgstr "포트가 %(ports)s인 호스트를 찾을 수 없습니다. " msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro와 복제를 동일한 volume_type에서 사용할 수 없습니다." #, python-format msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." msgstr "" "I/O 그룹 %(iogrp)d이(가) 올바르지 않습니다. 사용 가능한 I/O 그룹은 %(avail)s" "입니다. " msgid "ID" msgstr "ID" msgid "IP address/hostname of Blockbridge API." msgstr "Blockbridge API의 IP 주소/호스트 이름입니다. " msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "압축이 True로 설정되면 rsize도 설정해야 합니다(-1이 아님). " msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "nofmtdisk가 True로 설정되면 rsize도 -1로 설정해야 합니다." #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "flashsystem_connection_protocol에 올바르지 않은 값 '%(prot)s'을(를) 지정함: " "올바른 값은 %(enabled)s입니다." msgid "Illegal value specified for IOTYPE: 0, 1, or 2." msgstr "IOTYPE에 대해 잘못된 값이 지정됨 : 0, 1 또는 2." msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." msgstr "smarttier에 대해 잘못된 값이 지정됨: 0, 1, 2 또는 3으로 설정하십시오. " msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "storwize_svc_vol_grainsize에 잘못된 값이 지정됨:32, 64, 128 또는 256으로 설정" "하십시오. " msgid "" "Illegal value specified for thin: Can not set thin and thick at the same " "time." msgstr "" "thin에 대해 잘못된 값이 지정됨: 동시에 thin과 thick을 설정할 수 없습니다. " #, python-format msgid "Image %(image_id)s could not be found." msgstr "%(image_id)s 이미지를 찾을 수 없습니다. " #, python-format msgid "Image %(image_id)s is not active." msgstr "%(image_id)s 이미지가 활성 상태가 아닙니다. " #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "%(image_id)s 이미지는 허용할 수 없음: %(reason)s" msgid "Image location not present." msgstr "이미지 위치가 없습니다." #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "이미지 가상 크기가 %(image_size)dGB이므로 %(volume_size)dGB 크기의 볼륨에 맞" "지 않습니다. " msgid "" "ImageBusy error raised while deleting rbd volume. This may have been caused " "by a connection from a client that has crashed and, if so, may be resolved " "by retrying the delete after 30 seconds has elapsed." msgstr "" "rbd 볼륨 삭제 중 ImageBusy 오류가 발생했습니다. 이는 충돌한 클라이언트로부터" "의 연결로 인해 발생했을 수 있습니다. 이러한 경우 30초 후 삭제를 재시도하여 문" "제를 해결할 수도 있습니다." #, python-format msgid "" "Import record failed, cannot find backup service to perform the import. " "Request service %(service)s" msgstr "" "레코드 가져오기에 실패했습니다. 가져오기를 수행할 백업 서비스를 찾을 수 없습" "니다. 요청 서비스 %(service)s" msgid "Incorrect request body format" msgstr "올바르지 않은 요청 본문 형식" msgid "Incorrect request body format." msgstr "올바르지 않은 요청 본문 형식입니다." msgid "Incremental backups exist for this backup." msgstr "이 백업에 대한 증분 백업이 있습니다. " #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI 예외: %(err)s 매개변수: %(param)s(리턴 코드: %(rc)s) (출력: " "%(out)s)" msgid "Initial tier: {}, policy: {} is not valid." msgstr "초기 계층: {}, 정책: {}가 올바르지 않습니다." msgid "Input type {} is not supported." msgstr "입력 유형 {}는 지원되지 않습니다." msgid "Input volumes or snapshots are invalid." msgstr "입력 볼륨 또는 스냅샷이 올바르지 않습니다. " msgid "Input volumes or source volumes are invalid." msgstr "입력 볼륨 또는 소스 볼륨이 올바르지 않습니다. " #, python-format msgid "Instance %(uuid)s could not be found." msgstr "%(uuid)s 인스턴스를 찾을 수 없습니다. " msgid "Insufficient free space available to extend volume." msgstr "볼륨을 확장하는 데 충분한 여유 공간이 없습니다." msgid "Insufficient privileges" msgstr "권한이 충분하지 않음" msgid "Interval value (in seconds) between connection retries to ceph cluster." msgstr "ceph 클러스터에 대한 연결 재시도 사이의 간격 값(초)입니다. " #, python-format msgid "Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" "io_port_list에 대해 올바르지 않은 %(protocol)s 포트 %(port)s이(가) 지정되었습" "니다. " #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "3PAR 도메인이 잘못되었습니다: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "ALUA 값이 올바르지 않습니다. ALUA 값은 1 또는 0이어야 합니다. " msgid "Invalid Ceph args provided for backup rbd operation" msgstr "백업 rbd 조작에 올바르지 않은 Ceph 인수가 제공됨 " #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "올바르지 않은 Cg 스냅샷: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "올바르지 않은 일관성 그룹: %(reason)s" msgid "" "Invalid ConsistencyGroup: Consistency group status must be available or " "error, but current status is: in-use" msgstr "" "올바르지 않은 일관성 그룹: 일관성 그룹 상태가 사용 가능 또는 오류여야 하지만 " "현재 상태가 사용 중입니다." #, python-format msgid "" "Invalid ConsistencyGroup: Consistency group status must be available, but " "current status is: %s." msgstr "" "올바르지 않은 일관성 그룹: 일관성 그룹 상태가 사용 가능이어야 하지만현재 상태" "가 %s입니다. " msgid "Invalid ConsistencyGroup: No host to create consistency group" msgstr "올바르지 않은 일관성 그룹: 일관성 그룹을 작성할 호스트가 없음" #, python-format msgid "" "Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " "greater required for manage/unmanage support." msgstr "" "올바르지 않은 HPELeftHand API 버전 발견: %(found)s. 관리/비관리 지원을 위해서" "는 %(minimum)s 이상이 필요합니다. " #, python-format msgid "Invalid IP address format: '%s'" msgstr "올바르지 않은 IP 주소 형식: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "볼륨 %s에 대한 QoS 정책을 가져오는 중에 올바르지 않은 QoS 스펙이 발견됨" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "올바르지 않은 복제 대상: %(reason)s" #, python-format msgid "Invalid VNX authentication type: %s" msgstr "올바르지 않은 VNX 인증 유형: %s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "올바르지 않은 Virtuozzo 스토리지 공유 스펙: %r. 다음이어야 함: [MDS1[," "MDS2],...:/][:PASSWORD]." #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "올바르지 않은 XtremIO 버전 %(cur)s, %(min)s 이상의 버전이 필요함" #, python-format msgid "Invalid allocated quotas defined for the following project quotas: %s" msgstr "다음 프로젝트 할당량에 잘못 할당된 할당량이 정의됨 : %s" msgid "Invalid argument" msgstr "올바르지 않은 인수 " msgid "Invalid argument - negative seek offset." msgstr "올바르지 않은 인수 - 음수 찾기 오프셋. " #, python-format msgid "Invalid argument - whence=%s not supported" msgstr "올바르지 않은 인수 - whence=%s은(는) 지원되지 않음 " #, python-format msgid "Invalid argument - whence=%s not supported." msgstr "올바르지 않은 인수 - whence=%s은(는) 지원되지 않습니다. " #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "볼륨 %(volume_id)s의 연결 모드 '%(mode)s'가 잘못 되었습니다." #, python-format msgid "Invalid auth key: %(reason)s" msgstr "잘못된 인증 키: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "올바르지 않은 백업: %(reason)s" #, python-format msgid "" "Invalid barbican api url: version is required, e.g. 'http[s]://|" "[:port]/' url specified is: %s" msgstr "" "올바르지 않은 barbican api url: 버전이 필요합니다(예: 'http[s]://|" "[:port]/') 지정된 url은 %s입니다." msgid "Invalid cgsnapshot" msgstr "올바르지 않은 cg 스냅샷" msgid "Invalid chap user details found in CloudByte storage." msgstr "" "CloudByte 스토리지에서 올바르지 않은 chap 사용자 세부사항이 발견되었습니다. " #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "볼륨 %(name)s의 올바르지 않은 연결 초기화 응답" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "볼륨 %(name)s의 올바르지 않은 연결 초기화 응답: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s." msgid "Invalid credentials" msgstr "올바르지 않은 신임 정보" #, python-format msgid "Invalid directory: %s" msgstr "올바르지 않은 디렉토리: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "올바르지 않은 디스크 어댑터 유형: %(invalid_type)s." #, python-format msgid "Invalid disk backing: %s." msgstr "올바르지 않은 디스크 백업: %s." #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "올바르지 않은 디스크 유형: %(disk_type)s." #, python-format msgid "Invalid disk type: %s." msgstr "올바르지 않은 디스크 유형: %s." #, python-format msgid "Invalid host: %(reason)s" msgstr "잘못된 호스트: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "올바르지 않은 hpe3parclient 버전을 발견했습니다(%(found)s). 버전 %(minimum)s " "이상이 필요합니다. \"pip install --upgrade python-3parclient\"를 실행하여 " "hpe3parclient를 업그레이드하십시오." #, python-format msgid "" "Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " "greater required. Run 'pip install --upgrade python-lefthandclient' to " "upgrade the hpelefthandclient." msgstr "" "올바르지 않은 hpelefthandclient 버전을 찾았습니다(%(found)s). 버전 " "%(minimum)s 이상이 필요합니다. 'pip install --upgrade python-" "lefthandclient'를 실행하여 hpelefthandclient를 업그레이드하십시오." #, python-format msgid "Invalid image href %(image_href)s." msgstr "올바르지 않은 이미지 href %(image_href)s." msgid "Invalid image identifier or unable to access requested image." msgstr "이미지 ID가 올바르지 않거나 요청된 이미지에 액세스할 수 없습니다." msgid "Invalid imageRef provided." msgstr "올바르지 않은 imageRef가 제공되었습니다. " msgid "Invalid initiator value received" msgstr "올바르지 않은 개시자 값이 수신됨" msgid "Invalid input" msgstr "올바르지 않은 입력" #, python-format msgid "Invalid input received: %(reason)s" msgstr "잘못된 입력을 받음: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "올바르지 않은 is_public 필터 [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "올바르지 않은 lun 유형 %s이(가) 구성되었습니다." #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "잘못된 메타데이터 크기: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "잘못된 메타데이터: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "올바르지 않은 마운트 지점 기반: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "올바르지 않은 마운트 지점 기반: %s." #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" "새 snapCPG 이름이 재입력을 수행하기에 올바르지 않습니다. new_snap_cpg='%s'." #, python-format msgid "Invalid port number %(config)s for Coho rpc port" msgstr "Coho rpc 포트의 올바르지 않은 포트 번호 %(config)s" #, python-format msgid "" "Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." msgstr "" "올바르지 않은 프리페치 유형 '%s'이(가) 구성되었습니다. PrefetchType은 0,1,2,3" "이어야 합니다." #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "올바르지 않은 qos 스펙: %(reason)s" msgid "Invalid request to attach volume to an invalid target" msgstr "올바르지 않은 대상에 볼륨을 접속하는 요청이 올바르지 않습니다. " msgid "" "Invalid request to attach volume with an invalid mode. Attaching mode should " "be 'rw' or 'ro'" msgstr "" "올바르지 않은 모드로 볼륨을 접속하는 유효하지 않은 요청입니다. 접속 모드는 " "'rw' 또는 'ro'여야 합니다. " #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "올바르지 않은 예약 만기 %(expire)s." msgid "Invalid response header from RPC server" msgstr "RPC 서버의 올바르지 않은 응답 헤더" #, python-format msgid "Invalid secondary id %s." msgstr "올바르지 않은 보조 id %s." #, python-format msgid "Invalid secondary_backend_id specified. Valid backend id is %s." msgstr "" "올바르지 않은 secondary_backend_id가 지정되었습니다. 올바른 백엔드 id는 %s입" "니다." msgid "Invalid service catalog json." msgstr "올바르지 않은 서비스 카탈로그 json입니다. " msgid "Invalid sheepdog cluster status." msgstr "Sheepdog 클러스터 상태가 올바르지 않습니다. " #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "잘못된 스냅샷: %(reason)s" #, python-format msgid "Invalid status: '%s'" msgstr "올바르지 않은 상태: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "" "올바르지 않은 스토리지 풀 %s이(가) 요청되었습니다. 재입력에 실패했습니다." #, python-format msgid "Invalid storage pool %s specificed." msgstr "올바르지 않은 스토리지 풀 %s이(가) 지정되었습니다." msgid "Invalid storage pool is configured." msgstr "올바르지 않은 스토리지 풀이 구성되었습니다." #, python-format msgid "Invalid synchronize mode specified, allowed mode is %s." msgstr "올바르지 않은 동기화 모드가 지정되었습니다. 허용된 모드는 %s입니다." msgid "Invalid transport type." msgstr "올바르지 않은 전송 유형입니다." #, python-format msgid "Invalid update setting: '%s'" msgstr "올바르지 않은 업데이트 설정: '%s'" #, python-format msgid "" "Invalid url: must be in the form 'http[s]://|[:port]/" "', url specified is: %s" msgstr "" "올바르지 않은 url: 'http[s]://|[:port]/' 형식이어야 " "함, 지정된 url은 %s입니다." #, python-format msgid "Invalid value '%s' for force." msgstr "강제 실행에 대한 올바르지 않은 값 '%s'입니다. " #, python-format msgid "Invalid value '%s' for force. " msgstr "강제 실행에 대한 올바르지 않은 값 '%s'입니다. " #, python-format msgid "Invalid value '%s' for is_public. Accepted values: True or False." msgstr "" "is_public에 대해 값 '%s'이(가) 올바르지 않습니다. 승인된 값: True 또는 False." #, python-format msgid "Invalid value '%s' for skip_validation." msgstr "skip_validation에 대한 값 '%s'이(가) 올바르지 않습니다. " #, python-format msgid "Invalid value for 'bootable': '%s'" msgstr "'부트 가능'에 대한 값이 올바르지 않음: '%s'" #, python-format msgid "Invalid value for 'force': '%s'" msgstr "'강제 실행'에 대해 값이 올바르지 않음: '%s'" #, python-format msgid "Invalid value for 'readonly': '%s'" msgstr "'읽기 전용'에 대한 값이 올바르지 않음: '%s'" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "'scheduler_max_attempts'에 대한 올바르지 않은 값, >= 1이어야 함" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp 구성 옵션 netapp_host_type에 대한 값이 올바르지 않습니다. " msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp 구성 옵션 netapp_lun_ostype에 대한 값이 올바르지 않습니다. " #, python-format msgid "Invalid value for age, %(age)s" msgstr "연령에 대한 값이 올바르지 않음, %(age)s" #, python-format msgid "Invalid value: \"%s\"" msgstr "올바르지 않은 값: \"%s\"" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "작성 요청에 올바르지 않은 볼륨 크기가 제공됨: %s(크기 인수는정수(또는 정수의 " "문자열 표시)이거나 0보다 커야함). " #, python-format msgid "Invalid volume type: %(reason)s" msgstr "잘못된 볼륨 종류: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "잘못된 볼륨: %(reason)s" #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume is in an invalid state: %(status)s. Valid states " "are: ('available', 'in-use')." msgstr "" "올바르지 않은 볼륨: %(volume_id)s 볼륨을 일관성 그룹 %(group_id)s에 추가할 " "수 없습니다. 볼륨 상태 %(status)s(이)가 올바르지 않은 상태입니다. 올바른 상태" "는 ('available', 'in-use')입니다. " #, python-format msgid "" "Invalid volume: Cannot add volume %(volume_id)s to consistency group " "%(group_id)s because volume type %(volume_type)s is not supported by the " "group." msgstr "" "올바르지 않은 볼륨: %(volume_id)s 볼륨을 일관성 그룹 %(group_id)s에 추가할 " "수 없습니다. 이 그룹에서 볼륨 유형 %(volume_type)s을(를) 제공되어야 합니다." #, python-format msgid "" "Invalid volume: Cannot add volume fake-volume-uuid to consistency group " "%(group_id)s because volume cannot be found." msgstr "" "올바르지 않은 볼륨: fake-volume-uuid 볼륨을 일관성 그룹 %(group_id)s에 추가" "할 수 없습니다. 해당 볼륨을 찾을 수 없습니다. " #, python-format msgid "" "Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " "%(group_id)s because it is not in the group." msgstr "" "올바르지 않은 볼륨: fake-volume-uuid 볼륨을 일관성 그룹 %(group_id)s에서 제거" "할 수 없습니다. 해당 볼륨이 그룹에 없습니다. " #, python-format msgid "Invalid volume_type passed: %s." msgstr "올바르지 않은 volume_type이 전달됨: %s." #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "올바르지 않은 volume_type이 제공됨: %s(요청된 유형이 호환 가능하지 않음, 소" "스 볼륨을 일치시키거나 유형 인수를 생략하십시오)" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "올바르지 않은 volume_type이 제공됨: %s(요청된 유형이 호환 가능하지 않음, 유" "형 인수를 생략하도록 권장함)" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "올바르지 않은 volume_type이 제공됨: %s(이 일관성 그룹이 요청된 유형을지원해" "야 함)." #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "올바르지 않은 wwpn 형식 %(wwpns)s" msgid "Invoking web service failed." msgstr "웹 서비스 호출에 실패했습니다." msgid "Issue encountered waiting for job." msgstr "작업 대기 중에 문제가 발생했습니다." msgid "Issue encountered waiting for synchronization." msgstr "동기화 대기 중에 문제가 발생했습니다." msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." msgid "Item not found" msgstr "항목을 찾을 수 없음" #, python-format msgid "Job id not found in CloudByte's create volume [%s] response." msgstr "CloudByte의 볼륨 작성[%s] 응답에서 작업 ID를 찾을 수 없습니다. " #, python-format msgid "Job id not found in CloudByte's delete volume [%s] response." msgstr "CloudByte의 삭제 볼륨 [%s] 응답에서 작업 ID를 찾을 수 없습니다. " msgid "" "Key names can only contain alphanumeric characters, underscores, periods, " "colons and hyphens." msgstr "" "키 이름은 영숫자 문자, 밑줄, 마침표, 콜론, 하이픈만 포함할 수 있습니다." #, python-format msgid "KeyError: %s" msgstr "KeyError: %s" msgid "Keystone version 3 or greater must be used to get nested quota support." msgstr "중첩 할당량 지원을 받기 위해 Keystone 버전 3 이상을 사용해야 합니다." #, python-format msgid "LU does not exist for volume: %s" msgstr "볼륨의 LU가 없음: %s" msgid "LUN export failed!" msgstr "LUN 내보내기 실패! " msgid "LUN id({}) is not valid." msgstr "LUN id({})가 올바르지 않습니다." msgid "LUN map overflow on every channel." msgstr "모든 채널의 LUN 맵 오버플로우입니다. " #, python-format msgid "LUN not found with given ref %s." msgstr "주어진 ref %s을(를) 사용하여 LUN을 찾을 수 없습니다. " msgid "LUN number ({}) is not an integer." msgstr "LUN 번호({})가 정수가 아닙니다." #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 번호가 채널 id에 대한 경계를 벗어남: %(ch_id)s." #, python-format msgid "" "LUN with given ref %(ref)s does not satisfy volume type. Ensure LUN volume " "with ssc features is present on vserver %(vs)s." msgstr "" "주어진 ref %(ref)s를 사용하는 LUN이 볼륨 유형을 충족하지 않습니다. ssc 기능" "이 있는 LUN 볼륨이 vserver %(vs)s에 있는지 확인하십시오." #, python-format msgid "Last %s cinder syslog entries:-" msgstr "마지막 %s cinder syslog 항목:-" msgid "LeftHand cluster not found" msgstr "LeftHand 클러스터를 찾을 수 없음" msgid "License is unavailable." msgstr "라이센스를 사용할 수 없습니다." #, python-format msgid "Line %(dis)d : %(line)s" msgstr "행 %(dis)d : %(line)s" msgid "Link path already exists and its not a symlink" msgstr "링크 경로가 이미 존재하고 symlink가 아님" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "소스 볼륨의 링크된 복제본이 다음 상태에서 지원되지 않음: %s" msgid "Lock acquisition failed." msgstr "잠금 확보에 실패했습니다." msgid "Logout session error." msgstr "로그아웃 세션 오류." msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "검색 서비스가 구성되지 않았습니다. fc_san_lookup_service에 대한 구성 옵션이 " "검색 서비스의 구체적인 구현을 지정해야 합니다. " msgid "Lun migration error." msgstr "Lun 마이그레이션 오류입니다. " #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "%(md5)s 전과 %(etag)s 후의 오브젝트 %(object_name)s이(가) 동일하지 않습니다." #, python-format msgid "MSG_DENIED: %r" msgstr "MSG_DENIED: %r" #, python-format msgid "MSG_DENIED: AUTH_ERROR: %r" msgstr "MSG_DENIED: AUTH_ERROR: %r" #, python-format msgid "MSG_DENIED: RPC_MISMATCH: %r" msgstr "MSG_DENIED: RPC_MISMATCH: %r" #, python-format msgid "Malformed fcns output string: %s" msgstr "올바르지 않은 형식의 fcns 출력 문자열: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "잘못된 메시지 본문: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "올바르지 않은 형식의 이름 서버 문자열: %s" msgid "Malformed request body" msgstr "형식이 틀린 요청 본문" msgid "Malformed request body." msgstr "요청 본문의 형식이 잘못되었습니다. " msgid "Malformed request url" msgstr "형식이 틀린 요청 URL" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "%(cmd)s 명령에 대해 양식이 잘못된 응답: %(reason)s" msgid "Malformed scheduler_hints attribute" msgstr "형식이 틀린 scheduler_hints 속성" #, python-format msgid "Malformed show fcns database string: %s" msgstr "올바르지 않은 형식의 표시 fcns 데이터베이스 문자열: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgstr "" "올바르지 않은 형식의 구역 구성: (switch=%(switch)s zone_config=" "%(zone_config)s)." #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "올바르지 않은 형식의 구역 상태: (switch=%(switch)s zone_config=" "%(zone_config)s)." msgid "Manage existing get size requires 'id'." msgstr "기존 가져오기 크기를 관리하려면 'id'가 필요합니다. " msgid "Manage existing snapshot not implemented." msgstr "기존 스냅샷 관리가 구현되지 않았습니다. " #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "올바르지 않은 백엔드 참조로 인해 기존 볼륨 관리에 실패함 %(existing_ref)s: " "%(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "볼륨 유형 불일치로 인해 기존 볼륨 관리에 실패함: %(reason)s" msgid "Manage existing volume not implemented." msgstr "기존 볼륨 관리가 구현되지 않았습니다." msgid "Manage existing volume requires 'source-id'." msgstr "기존 볼륨을 관리하려면 'source-id'가 필요합니다. " #, python-format msgid "" "Manage volume is not supported if FAST is enable. FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST가 사용으로 설정된 경우 볼륨 관리가 지원되지 않습니다. FAST 정책: " "%(fastPolicyName)s." msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "장애 복구된 볼륨에서 스냅샷 관리는 허용되지 않습니다." msgid "Map info is None due to array version not supporting hypermetro." msgstr "배열 버전이 hypermetro를 지원하지 않으므로 맵 정보가 없습니다." #, python-format msgid "" "Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " "timeout. Terminating." msgstr "" "%(id)s 맵핑 준비가 할당된 %(to)d초(제한시간) 내에 완료되지 못했습니다. 종료됩" "니다. " #, python-format msgid "Masking view %(maskingViewName)s was not deleted successfully" msgstr "마스킹 보기 %(maskingViewName)s이(가) 삭제되지 않음" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "허용된 최대 백업 수(%(allowed)d)를 초과함" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "허용된 최대 스냅샷 수 (%(allowed)d)을(를) 초과함" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "할당량 '%(name)s'에 대해 허용된 최대 볼륨 수(%(allowed)d)가 초과되었습니다. " #, python-format msgid "May specify only one of %s" msgstr "%s 중 하나만 지정할 수 있음 " msgid "Metadata backup already exists for this volume" msgstr "이 볼륨에 대한 메타데이터 백업이 이미 존재함" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "메타데이터 백업 오브젝트 '%s'이(가) 이미 존재함" msgid "Metadata item was not found" msgstr "메타데이터 항목이 없음" msgid "Metadata item was not found." msgstr "메타데이터 항목을 찾을 수 없습니다. " #, python-format msgid "Metadata property key %s greater than 255 characters" msgstr "메타데이터 특성 키 %s이(가) 255자보다 큼 " #, python-format msgid "Metadata property key %s value greater than 255 characters" msgstr "메타데이터 특성 키 %s 값이 255자보다 큼 " msgid "Metadata property key blank" msgstr "메타데이터 특성 키 공백" msgid "Metadata property key blank." msgstr "메타데이터 특성 키가 공백입니다. " msgid "Metadata property key greater than 255 characters." msgstr "메타데이터 특성 키가 255자보다 깁니다. " msgid "Metadata property value greater than 255 characters." msgstr "메타데이터 특성 값이 255자보다 깁니다. " msgid "Metadata restore failed due to incompatible version" msgstr "호환 불가능한 버전으로 인해 메타데이터 복원에 실패" msgid "Metadata restore failed due to incompatible version." msgstr "호환 불가능한 버전으로 인해 메타데이터 복원 실패" #, python-format msgid "Migrate volume %(src)s failed." msgstr "볼륨 %(src)s 마이그레이션에 실패했습니다. " #, python-format msgid "Migrate volume failed between source vol %(src)s and dest vol %(dst)s." msgstr "" "소스 볼륨 %(src)s과(와) 대상 볼륨 %(dst)s 사이의 볼륨 마이그레이션에 실패했습" "니다. " #, python-format msgid "Migration of LUN %s has been stopped or faulted." msgstr "LUN %s의 마이그레이션이 중지되었거나 결함이 발생했습니다." msgid "MirrorView/S enabler is not installed." msgstr "MirrorView/S 인에이블러가 설치되지 않았습니다." msgid "" "Missing 'purestorage' python module, ensure the library is installed and " "available." msgstr "" "'purestorage' python 모듈 누락. 라이브러리가 설치되어 사용할 수 있는지 사용" "할 수 없습니다." msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "파이버 채널 SAN 구성 매개변수 누락 - fc_fabric_names" msgid "Missing request body" msgstr "요청 본문이 누락됨 " msgid "Missing request body." msgstr "요청 본문이 누락되었습니다. " #, python-format msgid "Missing required element '%s' in request body" msgstr "요청 본문에서 '%s' 필수 요소가 누락됨 " #, python-format msgid "Missing required element '%s' in request body." msgstr "요청 본문에서 필수 요소 '%s'이(가) 누락되었습니다. " msgid "Missing required element 'consistencygroup' in request body." msgstr "요청 본문에 필수 요소 'consistencygroup'이 누락되었습니다. " msgid "Missing required element 'host' in request body." msgstr "요청 본문에 필수 요소 '호스트'가 누락되었습니다. " msgid "Missing required element quota_class_set in request body." msgstr "요청 본문에서 필수 요소 quota_class_set가 누락되었습니다." msgid "Missing required element snapshot in request body." msgstr "요청 본문에 필수 요소 스냅샷이 누락되었습니다. " msgid "" "Multiple SerialNumbers found, when only one was expected for this operation. " "Please change your EMC config file." msgstr "" "이 조작에 대해 하나의 SerialNumber만 예상되었을 때 다중 SerialNumber가 발견되" "었습니다. EMC 구성 파일을 변경하십시오. " #, python-format msgid "Multiple copies of volume %s found." msgstr "볼륨 %s의 다중 사본이 발견되었습니다. " #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "'%s'에 대한 일치를 여러 개 찾았습니다. 더 구체적인 검색을 위해 ID를 사용하십" "시오." msgid "Multiple profiles found." msgstr "다중 프로파일이 발견되었습니다. " msgid "Must implement a fallback schedule" msgstr "대체 스케줄을 구현해야 함" msgid "Must implement find_retype_host" msgstr "find_retype_host를 구현해야 함" msgid "Must implement host_passes_filters" msgstr "host_passes_filters를 구현해야 함 " msgid "Must implement schedule_create_consistencygroup" msgstr "schedule_create_consistencygroup을 구현해야 함" msgid "Must implement schedule_create_volume" msgstr "schedule_create_volume을 구현해야 함" msgid "Must implement schedule_get_pools" msgstr "schedule_get_pools를 구현해야 함 " msgid "Must pass wwpn or host to lsfabric." msgstr "lsfabric에 wwpn 또는 호스트를 전달해야 합니다." msgid "" "Must run this command as cloud admin using a Keystone policy.json which " "allows cloud admin to list and get any project." msgstr "" "클라우드 관리자가 프로젝트를 나열하고 가져올 수 있는 Keystone policy.json을 " "사용하여 클라우드 관리자로 이 명령을 실행해야 합니다." msgid "Must specify 'connector'" msgstr "'커넥터'를 지정해야 함" msgid "Must specify 'connector'." msgstr "'커넥터'를 지정해야 합니다. " msgid "Must specify 'host'." msgstr "'호스트'를 지정해야 합니다. " msgid "Must specify 'new_volume'" msgstr "'new_volume'을 지정해야 함" msgid "Must specify 'status'" msgstr "'상태'를 지정해야 함" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "업데이트하려면 'status', 'attach_status' 또는 'migration_status'를 지정해야 " "합니다." msgid "Must specify a valid attach status" msgstr "올바른 접속 상태를 지정해야 함" msgid "Must specify a valid migration status" msgstr "올바른 마이그레이션 상태를 지정해야 함" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "올바른 사용자 %(valid)s을(를) 지정해야 합니다. '%(persona)s' 값이 올바르지 않" "습니다. " #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "올바른 프로비저닝 유형 %(valid)s을(를) 지정해야 합니다. '%(prov)s' 값이 올바" "르지 않습니다. " msgid "Must specify a valid status" msgstr "올바른 상태를 지정해야 함" msgid "Must specify an ExtensionManager class" msgstr "ExtensionManager 클래스를 지정해야 함" msgid "Must specify bootable in request." msgstr "요청에서 부트 가능을 지정해야 합니다." msgid "Must specify protection domain name or protection domain id." msgstr "보호 도메인 이름 또는 보호 도메인 ID를 지정해야 합니다. " msgid "Must specify readonly in request." msgstr "요청에서 읽기 전용을 지정해야 합니다." msgid "Must specify snapshot source-name or source-id." msgstr "스냅샷 source-name 또는 source-id를 지정해야 합니다." msgid "Must specify source-name or source-id." msgstr "source-name 또는 source-id가 있어야 합니다." msgid "Must specify storage pool name or id." msgstr "스토리지 풀 이름 또는 ID를 지정해야 합니다. " msgid "Must specify storage pools. Option: sio_storage_pools." msgstr "스토리지 풀을 지정해야 합니다. 옵션: sio_storage_pools." msgid "Must supply a positive value for age" msgstr "연령에 대해 양수 값을 제공해야 함" msgid "Must supply a positive, non-zero value for age" msgstr "기간에 0이 아닌 양수 값을 제공해야 함" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS 구성 '%(name)s=%(value)s'이(가) 올바르지 않습니다. 'auto', 'true' 또는 " "'false'여야 합니다." #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "NFS config 파일이 %(config)s에 없음" #, python-format msgid "NFS file %s not discovered." msgstr "NFS 파일 %s을(를) 찾을 수 없습니다." msgid "NFS file could not be discovered." msgstr "NFS 파일을 찾을 수 없습니다. " msgid "NaElement name cannot be null." msgstr "NaElement 이름은 널(null)일 수 없습니다." msgid "Name" msgstr "이름" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "요청 본문에서 이름, 설명, add_volumes 및 remove_volumes이 모두비어 있을 수 없" "습니다. " msgid "Need non-zero volume size" msgstr "0이 아닌 볼륨 크기가 필요함" #, python-format msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" msgstr "MSG_DENIED와 MSG_ACCEPTED가 모두 아님: %r" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder Driver 예외." #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "확장을 위한 새 크기는 현재 크기보다 커야 합니다. %(size)s, 확장됨: " "%(new_size)s)." #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "새 크기는 백엔드 스토리지의 실제 크기보다 커야 함: realsize: %(oldsize)s, " "newsize: %(newsize)s." msgid "New volume size must be specified as an integer." msgstr "새 볼륨 크기를 정수로 지정해야 합니다. " msgid "New volume type must be specified." msgstr "새 볼륨 유형을 지정해야 합니다." msgid "New volume type not specified in request_spec." msgstr "새 볼륨 유형이 request_spec에서 지정되지 않았습니다." #, python-format msgid "New volume_type same as original: %s." msgstr "새 volume_type이 원본과 동일함: %s." msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder 드라이버 예외" msgid "No FC initiator can be added to host." msgstr "FC 개시자를 호스트에 추가할 수 없습니다." msgid "No FC port connected to fabric." msgstr "FC 포트가 패브릭에 연결되지 않았습니다." msgid "No FCP targets found" msgstr "FCP 대상을 찾을 수 없음" msgid "No Port Group elements found in config file." msgstr "구성 파일에서 포트 그룹 요소를 찾을 수 없습니다." msgid "No VF ID is defined in the configuration file." msgstr "구성 파일에 VF ID가 정의되지 않았습니다." msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "제공된 iSCSI IP를 가진 활성 iSCSI 포털이 없음" #, python-format msgid "No available service named %s" msgstr "%s(으)로 이름 지정된 사용 가능한 서비스가 없음 " #, python-format msgid "No backup with id %s" msgstr "ID가 %s인 백업이 없음" msgid "No backups available to do an incremental backup." msgstr "증분 백업을 수행할 수 있는 백업이 없습니다. " msgid "No big enough free disk" msgstr "충분한 여유 디스크 공간 없음" #, python-format msgid "No cgsnapshot with id %s" msgstr "ID가 %s인 cg 스냅샷이 없음" msgid "No cinder entries in syslog!" msgstr "syslog에 cinder 항목이 없습니다!" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "이름이 %s인 복제 LUN을 파일러에서 찾을 수 없음" msgid "No config node found." msgstr "구성 노드를 찾을 수 없습니다. " #, python-format msgid "No consistency group with id %s" msgstr "ID가 %s인 일관성 그룹이 없음" #, python-format msgid "No element by given name %s." msgstr "지정된 이름 %s의 요소가 없습니다." msgid "No errors in logfiles!" msgstr "로그 파일에 오류가 없습니다!" #, python-format msgid "No file found with %s as backing file." msgstr "백업 파일로 %s을(를) 가진 파일을 찾을 수 없음 " #, python-format msgid "" "No free LUN IDs left. Maximum number of volumes that can be attached to host " "(%s) has been exceeded." msgstr "" "사용 가능한 LUN ID가 남아 있지 않습니다. 호스트에 연결할 수 있는 최대 볼륨 수" "(%s)가 초과되었습니다. " msgid "No free disk" msgstr "여유 디스크 공간 없음" #, python-format msgid "No good iscsi portal found in supplied list for %s." msgstr "%s에 대한 제공된 목록에서 올바른 iscsi 포털을 찾을 수 없습니다." #, python-format msgid "No good iscsi portals found for %s." msgstr "%s에 대한 올바른 iscsi 포털을 찾을 수 없습니다." #, python-format msgid "No host to create consistency group %s." msgstr "일관성 그룹 %s을(를) 작성할 호스트가 없습니다. " msgid "No iSCSI-enabled ports on target array." msgstr "대상 배열에 iSCSI 사용 포트가 없습니다. " msgid "No image_name was specified in request." msgstr "요청에 image_name이 지정되지 않았습니다. " msgid "No initiator connected to fabric." msgstr "개시자가 패브릭에 연결되지 않았습니다." #, python-format msgid "No initiator group found for initiator %s" msgstr "%s 개시자의 개시자 그룹을 찾을 수 없음" msgid "No initiators found, cannot proceed" msgstr "개시자를 찾을 수 없음, 계속할 수 없음" #, python-format msgid "No interface found on cluster for ip %s" msgstr "ip %s에 대한 클러스터에서 인터페이스를 찾을 수 없음" msgid "No ip address found." msgstr "IP 주소를 찾을 수 없습니다. " msgid "No iscsi auth groups were found in CloudByte." msgstr "CloudByte에서 iscsi 인증 그룹을 찾을 수 없습니다. " msgid "No iscsi initiators were found in CloudByte." msgstr "CloudByte에서 iscsi 개시자를 찾을 수 없습니다." #, python-format msgid "No iscsi service found for CloudByte volume [%s]." msgstr "CloudByte 볼륨 [%s]의 iscsi 서비스를 찾을 수 없습니다." msgid "No iscsi services found in CloudByte storage." msgstr "CloudByte 스토리지에서 iscsi 서비스를 찾을 수 없습니다." #, python-format msgid "No key file specified and unable to load key from %(cert)s %(e)s." msgstr "지정된 키 파일이 없으며 %(cert)s %(e)s에서 키를 로드할 수 없습니다. " msgid "No mounted Gluster shares found" msgstr "마운트된 Gluster 공유를 찾지 못함" msgid "No mounted NFS shares found" msgstr "마운트된 NFS 공유를 찾지 못함" msgid "No mounted SMBFS shares found." msgstr "마운트된 SMBFS 공유를 찾을 수 없습니다." msgid "No mounted Virtuozzo Storage shares found" msgstr "마운트된 Virtuozzo 스토리지 공유를 찾을 수 없습니다. " msgid "No mounted shares found" msgstr "마운트된 공유를 찾을 수 없음" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "볼륨 %(vol)s에 대해 I/O 그룹 %(gid)s에서 노드를 찾을 수 없습니다. " msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "볼륨 프로비저닝을 위해 사용할 수 있는 풀이 없습니다. 구성 옵션 " "netapp_pool_name_search_pattern이 올바르게 설정되었는지 확인하십시오. " msgid "" "No response was received from CloudByte storage list iSCSI auth user API " "call." msgstr "" "CloudByte 스토리지 목록 iSCSI 인증 사용자 API 호출에서 응답이 수신되지 않았습" "니다. " msgid "No response was received from CloudByte storage list tsm API call." msgstr "CloudByte 스토리지 목록 tsm API 호출에서 응답을 수신하지 못했습니다." msgid "No response was received from CloudByte's list filesystem api call." msgstr "CloudByte의 목록 파일 시스템 api 호출에서 응답을 수신하지 못했습니다." msgid "No service VIP configured and no nexenta_client_address" msgstr "서비스 VIP가 구성되지 않았으며nexenta_client_address가 없음" #, python-format msgid "No snap found with %s as backing file." msgstr "백업 파일로 %s을(를) 가진 스냅샷을 찾을 수 없음 " #, python-format msgid "No snapshot image found in snapshot group %s." msgstr "스냅샷 그룹 %s에서 스냅샷 이미지를 찾을 수 없습니다. " #, python-format msgid "No snapshots could be found on volume %s." msgstr "볼륨 %s에서 스냅샷을 찾을 수 없습니다." #, python-format msgid "No source snapshots provided to create consistency group %s." msgstr "" "일관성 그룹 %s을(를) 작성하는 데 필요한 소스 스냅샷이 제공되지 않았습니다. " #, python-format msgid "No storage path found for export path %s" msgstr "내보내기 경로 %s에 대해 스토리지 경로를 찾을 수 없음" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "해당하는 QoS 스펙 %(specs_id)s이(가) 없습니다. " msgid "No suitable discovery ip found" msgstr "적합한 발견 ip를 찾을 수 없음" #, python-format msgid "No support to restore backup version %s" msgstr "백업 버전 %s 복원을 지원하지 않음" #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "%(volume_id)s 볼륨에 대한 대상ID가 없습니다. " msgid "" "No unused LUN IDs are available on the host; multiattach is enabled which " "requires that all LUN IDs to be unique across the entire host group." msgstr "" "호스트에서 사용할 수 있는 사용하지 않은 LUN ID가 없습니다. 다중 연결이 사용으" "로 설정되며 이를 위해서는 모든 LUN ID가 전체 호스트 그룹에서 고유해야 합니" "다. " #, python-format msgid "No valid host was found. %(reason)s" msgstr "유효한 호스트가 없습니다. %(reason)s" #, python-format msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "유형이 %(type)s인 볼륨 %(id)s의 호스트가 올바르지 않음" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "ref %s(으)로 지정된 UID를 갖는 vdisk가 없습니다." #, python-format msgid "No views found for LUN: %s" msgstr "LUN의 보기가 없음: %s" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "vserver가 %(vserver)s이고 접합 경로가 %(junction)s인 클러스터에 볼륨이 없음" msgid "No volume service(s) started successfully, terminating." msgstr "볼륨 서비스가 시작되지 않아서 종료합니다. " msgid "No volume was found at CloudByte storage." msgstr "CloudByte 스토리지에서 볼륨을 찾을 수 없습니다." msgid "No volume_type should be provided when creating test replica." msgstr "테스트 복제본을 작성할 때 volume_type을 제공하지 않아야 합니다. " msgid "No volumes found in CloudByte storage." msgstr "CloudByte 스토리지에서 볼륨을 찾을 수 없습니다." msgid "No weighed hosts available" msgstr "사용 가능한 적합한 호스트가 없음" #, python-format msgid "Not a valid string: %s" msgstr "올바른 문자열이 아님: %s" msgid "Not a valid value for NaElement." msgstr "NaElement의 올바른 값이 아닙니다." #, python-format msgid "Not able to find a suitable datastore for the volume: %s." msgstr "볼륨: %s에 적합한 데이터베이스를 찾을 수 없습니다." msgid "Not an rbd snapshot" msgstr "rbd 스냅샷이 아님" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "%(image_id)s 이미지에 대한 권한이 없습니다. " msgid "Not authorized." msgstr "권한이 없습니다. " #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "백엔드(%(backend)s)에 공간이 충분하지 않음" msgid "Not enough storage space in the ZFS share to perform this operation." msgstr "ZFS 공유에서 이 조작을 수행하는 데 필요한 스토리지 공간이 부족합니다." msgid "Not stored in rbd" msgstr "rbd에 저장되지 않음" msgid "Nova returned \"error\" status while creating snapshot." msgstr "스냅샷을 작성하는 동안 Nova에서 \"오류\" 상태를 리턴함. " msgid "Null response received from CloudByte's list filesystem." msgstr "CloudByte의 목록 파일 시스템에서 널 응답을 수신했습니다." msgid "Null response received from CloudByte's list iscsi auth groups." msgstr "CloudByte의 목록 iscsi 인증 그룹에서 널 응답이 수신되었습니다. " msgid "Null response received from CloudByte's list iscsi initiators." msgstr "CloudByte의 목록 iscsi 개시자로부터 널 응답을 수신했습니다." msgid "Null response received from CloudByte's list volume iscsi service." msgstr "CloudByte의 목록 볼륨 iscsi 서비스에서 널 응답을 수신했습니다." #, python-format msgid "Null response received while creating volume [%s] at CloudByte storage." msgstr "" "CloudByte 스토리지에서 볼륨 [%s]을(를) 작성하는 중 널 응답을 수신했습니다." #, python-format msgid "Null response received while deleting volume [%s] at CloudByte storage." msgstr "" "CloudByte 스토리지에서 볼륨[%s]을 삭제하는 중에 널 응답이 수신되었습니다. " #, python-format msgid "" "Null response received while querying for [%(operation)s] based job " "[%(job)s] at CloudByte storage." msgstr "" "CloudByte 스토리지에서 [%(operation)s] 기반 작업 [%(job)s]에 대해 조회하는 중" "에 널 응답이 수신되었습니다. " msgid "Number of retries if connection to ceph cluster failed." msgstr "ceph 클러스터에 대한 연결에 실패한 경우 재시도 횟수입니다. " msgid "Object Count" msgstr "오브젝트 카운트" msgid "Object Version" msgstr "오브젝트 버전" msgid "Object is not a NetApp LUN." msgstr "오브젝트가 NetApp LUN이 아닙니다." #, python-format msgid "" "On an Extend Operation, error adding volume to composite volume: " "%(volumename)s." msgstr "" "확장 조작에서 컴포지트 볼륨 %(volumename)s에 볼륨을 추가하는 중에 오류가 발생" "했습니다." msgid "" "One of cinder-volume services is too old to accept such request. Are you " "running mixed Liberty-Mitaka cinder-volumes?" msgstr "" "cinder-volume 서비스 중 하나가 너무 오래되어 이러한 요청을 승인할 수 없습니" "다. 혼합된 Liberty-Mitaka cinder-볼륨을 사용 중입니까?" msgid "One of the required inputs from host, port or scheme was not found." msgstr "호스트, 포트 또는 스키마에서 필요한 입력 중 하나를 찾을 수 없습니다." #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "%(value)s %(verb)s 요청만이 %(unit_string)s마다 %(uri)s에 적용될 수 있습니" "다. " msgid "Only one limit can be set in a QoS spec." msgstr "QoS 스펙에서는 하나의 한계만 설정할 수 있습니다. " msgid "" "Only users with token scoped to immediate parents or root projects are " "allowed to see its children quotas." msgstr "" "직속 상위 또는 루트 프로젝트로 범위가 지정된 토큰을 가진 사용자만 하위 할당량" "을 볼 수 있습니다. " msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "OpenStack에서 관리하는 볼류만 관리를 취소할 수 있습니다." #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "조작이 상태=%(status)s과(와) 함께 실패했습니다. 전체 덤프: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "조작이 지원되지 않음: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "gpfs_images_dir 옵션이 올바르게 설정되지 않았습니다. " msgid "Option gpfs_images_share_mode is not set correctly." msgstr "gpfs_images_share_mode 옵션이 올바르게 설정되지 않았습니다. " msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base 옵션이 올바르게 설정되지 않았습니다." msgid "Option map (cls._map) is not defined." msgstr "옵션 맵(cls._map)이 정의되지 않았습니다." #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "시작 %(res)s %(prop)s은(는) '%(vals)s' 값 중 하나여야 함" msgid "Override HTTPS port to connect to Blockbridge API server." msgstr "Blockbridge API 서버에 연결할 HTTPS 포트를 대체하십시오. " #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Partition name is None, please set smartpartition:partitionname in key." msgstr "" "파티션 이름이 None입니다. 키에서 smartpartition:partitionname을 설정하십시" "오. " msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "인증에 비밀번호 또는 SSH 개인용 키가 필요합니다. san_password 또는 " "san_private_key 옵션을 설정하십시오. " msgid "Path to REST server's certificate must be specified." msgstr "REST 서버 인증서의 경로를 지정해야 합니다. " #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "미리 %(pool_list)s 풀을 작성하십시오! " #, python-format msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" msgstr "미리 풀 %(pool)s에서 %(tier_levels)s 티어를 작성하십시오! " msgid "Please re-run cinder-manage as root." msgstr "루트로 cinder-manage를 다시 실행하십시오." msgid "Please specify a name for QoS specs." msgstr "QoS 스펙에 대한 이름을 지정하십시오. " #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "정책이 %(action)s이(가) 수행되도록 허용하지 않습니다. " #, python-format msgid "Pool %(poolNameInStr)s is not found." msgstr "%(poolNameInStr)s 풀을 찾을 수 없습니다. " #, python-format msgid "Pool %s does not exist in Nexenta Store appliance" msgstr "풀 %s이(가) Nexenta Store 어플라이언스에 없음" #, python-format msgid "Pool from volume['host'] %(host)s not found." msgstr "volume['host'] %(host)s의 풀을 찾을 수 없습니다. " #, python-format msgid "Pool from volume['host'] failed with: %(ex)s." msgstr "다음과 함께 volume['host']의 풀이 실패함: %(ex)s." msgid "Pool is not available in the volume host field." msgstr "볼륨 호스트 필드에서 풀을 사용할 수 없습니다." msgid "Pool is not available in the volume host fields." msgstr "볼륨 호스트 필드에서 풀을 사용할 수 없습니다." #, python-format msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." msgstr "이름이 %(pool)s인 풀을 도메인 %(domain)s에서 찾을 수 없습니다. " #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "이름이 %(pool_name)s인 풀을 도메인 %(domain_id)s에서 찾을 수 없습니다. " #, python-format msgid "" "Pool: %(poolName)s. is not associated to storage tier for fast policy " "%(fastPolicy)s." msgstr "" "%(poolName)s 풀이 fast 정책 %(fastPolicy)s에 대한 스토리지 티어와 연관되어 있" "지 않습니다." #, python-format msgid "PoolName must be in the file %(fileName)s." msgstr "PoolName은 파일 %(fileName)s에 있어야 합니다. " #, python-format msgid "Pools %s does not exist" msgstr "Pools %s이(가) 존재하지 않음" msgid "Pools name is not set." msgstr "풀 이름이 설정되지 않았습니다. " #, python-format msgid "Primary copy status: %(status)s and synchronized: %(sync)s." msgstr "1차 사본 상태: %(status)s 및 동기화됨: %(sync)s." msgid "Project ID" msgstr "프로젝트 ID" #, python-format msgid "Project quotas are not properly setup for nested quotas: %(reason)s." msgstr "중첩된 할당량에 맞게 프로젝트 할당량이 설정되지 않음: %(reason)s." msgid "Protection Group not ready." msgstr "보호 그룹이 준비되지 않았습니다." #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "프로토콜 %(storage_protocol)s이(가) 스토리지 제품군%(storage_family)s입니다." msgid "Provided backup record is missing an id" msgstr "제공된 백업 레코드에 ID가 누락됨" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "제공된 스냅샷 상태 %(provided)s이(가) %(current)s 상태의 스냅샷에 허용되지 않" "습니다. " #, python-format msgid "" "Provider information w.r.t CloudByte storage was not found for OpenStack " "volume [%s]." msgstr "" "OpenStack 볼륨 [%s]의 제공자 정보 w.r.t CloudByte 스토리지를 찾을 수 없습니" "다." #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder 드라이버 실패: %(reason)s" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "%(specs_id)s QoS 스펙이 이미 존재합니다. " #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS 스펙 %(specs_id)s이(가) 엔티티와 연관되어 있습니다. " #, python-format msgid "QoS config is wrong. %s must > 0." msgstr "QoS 구성이 잘못되었습니다. %s이(가) 0보다 커야 합니다." #, python-format msgid "" "QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " "%(qos_policy)s." msgstr "" "IOTYPE 및 다른 qos_specs에 대해 QoS 정책을 지정해야 함, QoS 정책: " "%(qos_policy)s." #, python-format msgid "" "QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " msgstr "" "IOTYPE의 QoS 정책을 지정해야 함: 0, 1 또는 2, QoS 정책: %(qos_policy)s " #, python-format msgid "" "QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." msgstr "QoS 정책 upper_limit 및 lower_limit 충돌, QoS 정책: %(qos_policy)s." #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "QoS 스펙 %(specs_id)s에 %(specs_key)s 키를 갖는 스펙이 없습니다. " msgid "QoS specs are not supported on this storage family and ONTAP version." msgstr "QoS 스펙이 이 스토리지 제품군 및 ONTAP 버전에서 지원되지 않습니다. " msgid "Qos specs still in use." msgstr "Qos 스펙을 아직 사용 중입니다. " msgid "" "Query by service parameter is deprecated. Please use binary parameter " "instead." msgstr "" "서비스 매개변수에 의한 조회가 더 이상 사용되지 않습니다. 2진 매개변수를 사용" "하십시오. 삭제 중입니다. " msgid "Query resource pool error." msgstr "자원 풀 조회 오류입니다. " #, python-format msgid "Quota %s limit must be equal or greater than existing resources." msgstr "할당량 %s 한계는 기존 자원 이상이어야 합니다. " #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "%(class_name)s 할당량 클래스를 찾을 수 없습니다. " msgid "Quota could not be found" msgstr "할당량을 찾을 수 없음" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "자원에 대한 할당량 초과: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "할당량 초과: 코드=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "%(project_id)s 프로젝트에 대한 할당량을 찾을 수 없습니다. " #, python-format msgid "" "Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " "%(limit)d is less than in-use value of %(used)d" msgstr "" "'%(res)s' 자원의 프로젝트 '%(proj)s'에 대한 할당량 한계가 올바르지 않음: " "%(limit)d의 한계가 사용 중인 값 %(used)d보다 적음" #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "할당 예약 %(uuid)s을(를) 찾을 수 없습니다. " #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "%(project_id)s 프로젝트에 대한 할당 사용량을 찾을 수 없습니다. " #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD diff op 실패 - (ret=%(ret)s stderr=%(stderr)s)" #, python-format msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" msgid "REST server IP must by specified." msgstr "REST 서버 IP를 지정해야 합니다. " msgid "REST server password must by specified." msgstr "REST 서버 비밀번호를 지정해야 합니다. " msgid "REST server username must by specified." msgstr "REST 서버 사용자 이름을 지정해야 합니다. " msgid "RPC Version" msgstr "RPC 버전" msgid "RPC server response is incomplete" msgstr "RPC 서버 응답이 완료되지 않음" msgid "Raid did not have MCS Channel." msgstr "Raid에 MCS 채널이 없습니다. " #, python-format msgid "" "Reach limitation set by configuration option max_luns_per_storage_group. " "Operation to add %(vol)s into Storage Group %(sg)s is rejected." msgstr "" "구성 옵션 max_luns_per_storage_group으로 설정된 제한사항에 도달합니다.스토리" "지 그룹 %(sg)s에 %(vol)s을(를) 추가하는 조작이 거부됩니다." #, python-format msgid "Received error string: %s" msgstr "오류 문자열이 수신됨: %s" msgid "Reference must be for an unmanaged snapshot." msgstr "참조는 관리되지 않은 스냅샷용이어야 합니다." msgid "Reference must be for an unmanaged virtual volume." msgstr "참조는 관리되지 않는 가상 볼륨에 대한 것이어야 합니다." msgid "Reference must be the name of an unmanaged snapshot." msgstr "참조는 관리되지 않은 스냅샷의 이름이어야 합니다." msgid "Reference must be the volume name of an unmanaged virtual volume." msgstr "참조는 관리되지 않는 가상 볼륨의 볼륨 이름이어야 합니다. " msgid "Reference must contain either source-id or source-name element." msgstr "참조에는 source-id 또는 source-name 요소가 포함되어야 합니다. " msgid "Reference must contain either source-name or source-id element." msgstr "참조에는 source-name 또는 source-id 요소가 포함되어야 합니다. " msgid "Reference must contain source-id or source-name element." msgstr "참조에는 source-id 또는 source-name 요소가 포함되어야 합니다. " msgid "Reference must contain source-id or source-name key." msgstr "참조에는 source-id 또는 source-name 키가 포함되어 있어야 합니다. " msgid "Reference must contain source-id or source-name." msgstr "참조에는 source-id 또는 source-name이 포함되어 있어야 합니다. " msgid "Reference must contain source-id." msgstr "참조에는 source-id가 있어야 합니다." msgid "Reference must contain source-name element." msgstr "참조에는 source-name 요소가 포함되어야 합니다." msgid "Reference must contain source-name or source-id." msgstr "참조에는 source-name 또는 source-id가 있어야 합니다." msgid "Reference must contain source-name." msgstr "참조에는 source-name이 포함되어야 합니다. " msgid "Reference to volume to be managed must contain source-name." msgstr "관리할 볼륨에 대한 참조에 source-name이 포함되어야 합니다." #, python-format msgid "Reference to volume: %s to be managed must contain source-name." msgstr "관리할 볼륨: %s에 대한 참조에 source-name이 포함되어야 합니다." #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "볼륨 ID: %(id)s의 마이그레이션을 거부하고 있습니다. 소스 및 대상이 같은 볼륨 " "그룹이므로 구성을 확인하십시오. %(name)s." msgid "Remote pool cannot be found." msgstr "원격 풀을 찾을 수 없습니다." msgid "Remove CHAP error." msgstr "CHAP 제거 오류입니다. " msgid "Remove fc from host error." msgstr "호스트에서 fc 제거 오류입니다. " msgid "Remove host from array error." msgstr "배열에서 호스트 제거 오류입니다. " msgid "Remove host from hostgroup error." msgstr "호스트 그룹에서 호스트 제거 오류입니다. " msgid "Remove iscsi from host error." msgstr "호스트에서 iscsi 제거 오류입니다. " msgid "Remove lun from QoS error." msgstr "QoS에서 lun 제거 오류." msgid "Remove lun from cache error." msgstr "캐시에서 lun 제거 오류입니다. " msgid "Remove lun from partition error." msgstr "파티션에서 lun 제거 오류입니다. " msgid "Remove port from port group error." msgstr "포트 그룹에서 포트 제거 오류." msgid "Remove volume export failed." msgstr "볼륨 내보내기 제거에 실패했습니다. " msgid "Rename lun on array error." msgstr "배열에서 lun 이름 바꾸기 오류입니다. " msgid "Rename snapshot on array error." msgstr "배열에서 스냅샷 이름 변경 오류." #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "%(ssn)s에 %(name)s을(를) 복제하는 데 실패했습니다." #, python-format msgid "Replication Service Capability not found on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 복제 서비스 기능을 찾을 수 없습니다. " #, python-format msgid "Replication Service not found on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 복제 서비스를 찾을 수 없습니다. " msgid "Replication is not enabled" msgstr "복제가 사용되지 않음" msgid "Replication is not enabled for volume" msgstr "볼륨에 복제가 사용되지 않음" msgid "Replication not allowed yet." msgstr "복제가 아직 허용되지 않습니다." #, python-format msgid "" "Replication status for volume must be active or active-stopped, but current " "status is: %s" msgstr "" "볼륨에 대한 복제 상태가 활성 또는 활성-중지됨이어야 하지만 현재 상태가 %s입니" "다." #, python-format msgid "" "Replication status for volume must be inactive, active-stopped, or error, " "but current status is: %s" msgstr "" "볼륨에 대한 복제 상태가 비활성, 활성-중지됨 또는 오류이어야 하지만 현재 상태" "가 %s입니다." msgid "Request body and URI mismatch" msgstr "요청 본문 및 URI 불일치" msgid "Request body contains too many items" msgstr "요청 본문에 너무 많은 항목이 들어있음" msgid "Request body contains too many items." msgstr "요청 본문이 너무 많은 항목을 포함합니다." msgid "Request body empty" msgstr "요청 본문이 비어 있음" #, python-format msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" msgstr "" "Datera 클러스터에 대한 요청이 잘못된 상태를 리턴함: %(status)s | %(reason)s" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "요청된 백업이 허용된 백업 기가바이트 할당량을 초과합니다. 요청된 크기는 " "%(requested)sG이고 할당량은 %(quota)sG이며 %(consumed)sG가 이용되었습니다." #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "요청된 볼륨 또는 스냅샷이 허용된 %(name)s 할당량을 초과합니다. 요청된 크기는 " "%(requested)sG이고 할당량은 %(quota)sG이며 %(consumed)sG가 이용되었습니다." #, python-format msgid "" "Requested volume size %(size)d is larger than maximum allowed limit " "%(limit)d." msgstr "" "요청된 볼륨 크기 %(size)d이(가) 허용된 최대 한계 %(limit)d보다 큽니다. " msgid "Required configuration not found" msgstr "필수 구성을 찾을 수 없음" #, python-format msgid "Required flag %s is not set" msgstr "필수 플래그 %s이(가) 설정되지 않음" msgid "Requires an NaServer instance." msgstr "NaServer 인스턴스가 필요합니다." #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "백업 상태 재설정이 중단되었습니다. 현재 구성된 백업 서비스 " "[%(configured_service)s]은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위" "해 사용된 백업 서비스가 아닙니다." #, python-format msgid "Resizing clone %s failed." msgstr "복제본 %s 크기 조정에 실패했습니다. " msgid "Resizing image file failed." msgstr "이미지 파일의 크기 조정이 실패함. " msgid "Resource could not be found." msgstr "자원을 찾을 수 없습니다. " msgid "Resource not ready." msgstr "자원이 준비되지 않았습니다. " #, python-format msgid "Response error - %s." msgstr "응답 오류 - %s." msgid "Response error - The storage-system is offline." msgstr "응답 오류 - 스토리지 시스템이 오프라인입니다." #, python-format msgid "Response error code - %s." msgstr "응답 오류 코드 - %s." msgid "RestURL is not configured." msgstr "RestURL이 구성되지 않았습니다." #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "백업 복원 중단, 예상 볼륨 상태는 %(expected_status)s이지만 %(actual_status)s" "을(를) 가져옴" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "백업 복원이 중단되었습니다. 현재 구성된 백업 서비스 [%(configured_service)s]" "은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위해 사용된 백업 서비스가 " "아닙니다." #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "백업 복원 중단: 예상 백업 상태는 %(expected_status)s이지만 %(actual_status)s" "을(를) 가져옴" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "제공된 Cinder 스냅샷에 대해 다른 크기의 SolidFire 볼륨을 검색했습니다. 검색: " "%(ret)s 필요: %(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "제공된 Cinder 볼륨에 대해 다른 크기의 SolidFire 볼륨을 검색했습니다. 검색: " "%(ret)s 필요: %(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "명령의 재시도 수 초과: %s" msgid "Retryable SolidFire Exception encountered" msgstr "재시도할 수 있는 SolidFire 예외가 발생함" msgid "Retype cannot change encryption requirements." msgstr "다시 입력해도 암호화 요구사항을 변경할 수 없습니다. " #, python-format msgid "Retype cannot change front-end qos specs for in-use volume: %s." msgstr "" "다시 입력해도 사용 중인 볼륨 %s에 대한 프론트 엔드 qos 스펙을 변경할 수 없습" "니다. " msgid "Retype requires migration but is not allowed." msgstr "다시 입력에 마이그레이션이 필요하지만 허용되지 않습니다." #, python-format msgid "" "Rollback for Volume: %(volumeName)s has failed. Please contact your system " "administrator to manually return your volume to the default storage group " "for fast policy %(fastPolicyName)s failed." msgstr "" "%(volumeName)s 볼륨 롤백이 실패했습니다. 시스템 관리자에게 문의하여 실패한 " "fast 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹에 볼륨을 수동으로 리턴" "하도록 하십시오. " #, python-format msgid "Rolling back %(volumeName)s by deleting it." msgstr "%(volumeName)s을(를) 삭제하여 롤백하는 중입니다. " #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "VMware vCenter 버전이 %s보다 낮은 Cinder는 실행할 수 없습니다." msgid "SAN product is not configured." msgstr "SAN 제품이 구성되지 않았습니다." msgid "SAN protocol is not configured." msgstr "SAN 프로토콜이 구성되지 않았습니다." #, python-format msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" "SMBFS 구성 'smbfs_oversub_ratio'가 올바르지 않습니다. 0보다 커야 함: %s" #, python-format msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" "SMBFS 구성 'smbfs_used_ratio'가 올바르지 않습니다. 0보다 크고 1.00 이하여야 " "함: %s" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "SMBFS 구성 파일이 %(config)s에 없습니다." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 구성 파일이 설정되지 않았습니다(smbfs_shares_config)." #, python-format msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" "'%(command)s' 명령을 '%(total_attempts)r'번 시도한 후에 SSH 명령이 실패했습니" "다. " #, python-format msgid "SSH Command failed with error: '%(err)s', Command: '%(command)s'" msgstr "SSH 명령이 실패하여 오류 발생: '%(err)s', 명령: '%(command)s'" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH 명령 인젝션 발견됨: %(command)s" #, python-format msgid "SSH connection failed for %(fabric)s with error: %(err)s" msgstr "오류: %(err)s과(와) 함께 %(fabric)s에 대한 SSH 연결 실패 " #, python-format msgid "SSL Certificate expired on %s." msgstr "SSL 인증서가 %s에 만료되었습니다. " #, python-format msgid "SSL error: %(arg)s." msgstr "SSL 오류: %(arg)s." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "스케줄러 호스트 필터 %(filter_name)s을(를) 찾을 수 없습니다. " #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "Scheduler Host Weigher %(weigher_name)s을(를) 찾을 수 없습니다. " #, python-format msgid "" "Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " "is: %(progress)s%%." msgstr "" "2차 사본 상태: %(status)s 및 동기화됨: %(sync)s, 동기화 프로세스는 " "%(progress)s%%입니다. " #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "보조 id는 기본 배열과 같지 않아야 합니다. backend_id = %(secondary)s." #, python-format msgid "SerialNumber must be in the file %(fileName)s." msgstr "SerialNumber는 파일 %(fileName)s에 있어야 합니다. " #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "호스트 %(host)s의 서비스 %(service)s이(가) 제거되었습니다. " #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "호스트 %(host)s에서 서비스 %(service_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s 서비스를 찾을 수 없습니다. " #, python-format msgid "Service %s not found." msgstr "%s 서비스를 찾을 수 없음" msgid "Service is too old to fulfil this request." msgstr "서비스가 너무 오래되어 이 요청을 이행할 수 없습니다." msgid "Service is unavailable at this time." msgstr "서비스가 지금 사용 불가능합니다. " msgid "Service not found." msgstr "서비스를 찾을 수 없습니다." msgid "Set pair secondary access error." msgstr "쌍 보조 액세스 설정 오류." msgid "Sets thin provisioning." msgstr "씬 프로비저닝을 설정합니다. " msgid "" "Setting LUN QoS policy group is not supported on this storage family and " "ONTAP version." msgstr "" "이 스토리지 제품군과 ONTAP 버전에서는 LUN QoS 정책 그룹 설정이지원되지 않습니" "다. " msgid "" "Setting file qos policy group is not supported on this storage family and " "ontap version." msgstr "" "이 스토리지 제품군과 ONTAP 버전에서는 파일 qos 정책 그룹 설정이지원되지 않습" "니다. " #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_ip and nas_share_path settings." msgstr "" "올바르지 않은 형식으로 인해 공유 %s이(가) 무시됩니다. format. address:/" "export 형식이어야 합니다.nas_ip and nas_share_path 설정을 확인하십시오." #, python-format msgid "" "Share at %(dir)s is not writable by the Cinder volume service. Snapshot " "operations will not be supported." msgstr "" "%(dir)s에서의 공유를 Cinder 볼륨 서비스에서 쓸 수 없습니다. 스냅샷 조작이 지" "원되지 않습니다." #, python-format msgid "Sheepdog I/O Error, command was: \"%s\"." msgstr "Sheepdog I/O 오류, 명령: \"%s\"." msgid "" "Show operations can only be made to projects in the same hierarchy of the " "project in which users are scoped to." msgstr "" "표시 조작은 사용자가 범위 지정되는 프로젝트의 동일한 계층에 있는 프로젝트에 " "대해서만 작성될 수 있습니다. " msgid "Size" msgstr "크기" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "" "볼륨 %s에 대한 크기를 찾을 수 없습니다. 보안 삭제를 수행할 수 없습니다." #, python-format msgid "" "Size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "크기는 %(image_size)dGB이며 크기 %(volume_size)dGB의 볼륨에 맞지 않습니다." #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "지정된 이미지 크기 %(image_size)sGB는 다음 볼륨 크기보다 커야 합니다: " "%(volume_size)sGB." #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "사용 가능해질 때까지 기다리는 동안 스냅샷 %(id)s을(를) 삭제하도록 요청되었습" "니다. 동시에 요청되었을 가능성이 있습니다." #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "단계식 삭제 중에 \"삭제 중\"이 아니라 %(state)s 상태의 스냅샷 %(id)s이(가) 발" "견되었습니다." #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "%(snapshot_id)s 스냅샷을 찾을 수 없습니다. " #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(snapshot_id)s 스냅샷에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " #, python-format msgid "Snapshot %s must not be part of a consistency group." msgstr "스냅샷 %s이(가) 일관성 그룹의 일부가 아니어야 합니다. " #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "배열에 스냅샷 '%s'이(가) 없습니다." #, python-format msgid "" "Snapshot cannot be created because volume %(vol_id)s is not available, " "current volume status: %(vol_status)s." msgstr "" "볼륨 %(vol_id)s이(가) 사용 가능 상태가 아니어서 스냅샷을 작성할 수 없습니다. " "현재 볼륨 상태: %(vol_status)s." msgid "Snapshot cannot be created while volume is migrating." msgstr "볼륨 마이그레이션 중에 스냅샷을 작성할 수 없습니다. " msgid "Snapshot of secondary replica is not allowed." msgstr "2차 복제본의 스냅샷이 허용되지 않습니다." #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "볼륨의 스냅샷이 다음 상태에서 지원되지 않음: %s" #, python-format msgid "Snapshot res \"%s\" that is not deployed anywhere?" msgstr "배치되지 않은 스냅샷 res \"%s\"이(가) 있습니까? " msgid "Snapshot size must be multiple of 1 GB." msgstr "스냅샷 크기는 1GB의 배수여야 합니다. " #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "update_snapshot_status에 대해 스냅샷 상태 %(cur)s이(가) 허용되지 않습니다. " msgid "Snapshot status must be \"available\" to clone." msgstr "스냅샷 상태가 복제 \"사용 가능\"이어야 합니다. " #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "백업할 스냅샷이 사용 가능해야 하지만 현재 상태가 \"%s\"입니다. " #, python-format msgid "Snapshot with id of %s could not be found." msgstr "id가 %s인 스냅샷을 찾을 수 없습니다." #, python-format msgid "" "Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " "incremental backup" msgstr "" "스냅샷='%(snap)s'이(가) 기본 이미지='%(base)s'에 존재하지 않음 - 중단 중증분 " "백업" #, python-format msgid "Snapshots are not supported for this volume format: %s" msgstr "스냅샷이 이 볼륨 형식에 대해 지원되지 않음: %s" #, python-format msgid "Socket error: %(arg)s." msgstr "소켓 오류: %(arg)s." msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder 드라이버 예외" msgid "Sort direction array size exceeds sort key array size." msgstr "정렬 방향 배열 크기가 정렬 키 배열 크기를 초과합니다. " msgid "Source CG is empty. No consistency group will be created." msgstr "소스 CG가 비어 있습니다. 일관성 그룹이 작성되지 않습니다. " msgid "Source host details not found." msgstr "소스 호스트 세부사항을 찾을 수 없습니다." msgid "Source volume device ID is required." msgstr "소스 볼륨 디바이스 ID가 필요합니다. " msgid "Source volume not mid-migration." msgstr "소스 볼륨이 마이그레이션에 포함되지 않음 " #, python-format msgid "" "Source with host ip/name: %s not found on the target appliance for backend " "enabled volume migration, procedding with default migration." msgstr "" "백엔드 사용 볼륨 마이그레이션에 대한 대상 어플라이언스에서 호스트 ip/이름: %s" "을(를) 가진 소스를 찾을 수 없습니다. 기본 마이그레이션을 계속 진행합니다. " msgid "SpaceInfo returned byarray is invalid" msgstr "배열에서 리턴한 SpaceInfo가 올바르지 않음" #, python-format msgid "" "Specified host to map to volume %(vol)s is in unsupported host group with " "%(group)s." msgstr "" "볼륨 %(vol)s에 맵핑할 지정된 호스트가 %(group)s이(가) 포함된 지원되지 않는 호" "스트 그룹에 있습니다. " msgid "Specified logical volume does not exist." msgstr "지정된 논리적 볼륨이 존재하지 않습니다. " #, python-format msgid "Specified snapshot group with id %s could not be found." msgstr "id가 %s인 지정된 스냅샷 그룹을 찾을 수 없습니다. " msgid "Specify a password or private_key" msgstr "비밀번호 또는 private_key 지정" msgid "Specify san_password or san_private_key" msgstr "san_password 또는 san_private_key 지정" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "볼륨 유형 이름, 설명, is_public 또는 조합을 지정하십시오." msgid "Split pair error." msgstr "쌍 분할 오류." msgid "Split replication failed." msgstr "복제 분할에 실패했습니다." msgid "Start LUNcopy error." msgstr "LUNcopy 시작 오류입니다. " msgid "State" msgstr "상태" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "노드의 세부 상태가 잘못되었습니다. 현재 세부 상태는 %s입니다. " msgid "Status" msgstr "상태" msgid "Stop snapshot error." msgstr "스냅샷 중지 오류입니다. " #, python-format msgid "Storage Configuration Service not found on %(storageSystemName)s." msgstr "%(storageSystemName)s에서 스토리지 구성 서비스를 찾을 수 없습니다. " #, python-format msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." msgstr "" "%(storageSystemName)s에서 스토리지 하드웨어 ID mgmt 서비스를 찾을 수 없습니" "다. " #, python-format msgid "Storage Profile %s not found." msgstr "스토리지 프로파일 %s을(를) 찾을 수 없습니다. " #, python-format msgid "Storage Relocation Service not found on %(storageSystemName)s." msgstr "" "%(storageSystemName)s에서 스토리지 위치 재지정 서비스를 찾을 수 없습니다. " #, python-format msgid "Storage family %s is not supported." msgstr "스토리지 제품군 %s이(가) 지원되지 않습니다. " #, python-format msgid "Storage group %(storageGroupName)s was not deleted successfully" msgstr "스토리지 그룹 %(storageGroupName)s이(가) 삭제되지 않음" #, python-format msgid "Storage host %(svr)s not detected, verify name" msgstr "스토리지 호스트 %(svr)s이(가) 발견되지 않음, 이름 확인" msgid "Storage pool is not configured." msgstr "스토리지 풀이 구성되지 않았습니다." #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "스토리지 프로파일: %(storage_profile)s을(를) 찾을 수 없습니다." msgid "Storage resource could not be found." msgstr "스토리지 자원을 찾을 수 없습니다. " msgid "Storage system id not set." msgstr "스토리지 시스템 ID가 설정되지 않았습니다." #, python-format msgid "Storage system not found for pool %(poolNameInStr)s." msgstr "%(poolNameInStr)s 풀에 대한 스토리지 시스템을 찾을 수 없습니다. " msgid "Storage-assisted migration failed during manage volume." msgstr "볼륨 관리 중에 스토리지 지원 마이그레이션에 실패했습니다." #, python-format msgid "StorageSystem %(array)s is not found." msgstr "스토리지 시스템 %(array)s을(를) 찾을 수 없습니다." #, python-format msgid "String with params: %s" msgstr "문자열에 %s 매개변수가 있음" #, python-format msgid "" "Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " "project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " "for one or more of the following projects: '%(child_ids)s'" msgstr "" "하위 사용량 합계 '%(sum)s'이(가) '%(res)s' 자원의 '%(proj)s' 프로젝트에 사용 " "가능한 할당량 '%(free)s'보다 큽니다. 하나 이상의 '%(child_ids)s' 프로젝트에 " "대한 한계 또는 사용량을 낮추십시오." msgid "Switch over pair error." msgstr "쌍 전환 오류." msgid "Sync pair error." msgstr "쌍 동기화 오류." msgid "Synchronizing secondary volume to primary failed." msgstr "2차 볼륨을 1차 볼륨과 동기화하지 못했습니다. " #, python-format msgid "System %(id)s found with bad password status - %(pass_status)s." msgstr "시스템 %(id)s에 잘못된 비밀번호 상태가 있음 - %(pass_status)s." #, python-format msgid "System %(id)s found with bad status - %(status)s." msgstr "%(id)s 시스템에 잘못된 상태가 있음 - %(status)s." msgid "System does not support compression." msgstr "시스템이 압축을 지원하지 않습니다. " msgid "System is busy, retry operation." msgstr "시스템을 사용 중입니다. 조작을 재시도하십시오. " #, python-format msgid "" "TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." msgstr "" "TSM [%(tsm)s]이(가) 계정 [%(account)s]에 대한 CloudByte 스토리지에 없습니다. " msgid "Target volume type is still in use." msgstr "대상 볼륨 유형이 아직 사용 중입니다." #, python-format msgid "" "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" "템플리트 트리 불일치. 슬레이브 %(slavetag)s을(를) 마스터 %(mastertag)s에 추가" #, python-format msgid "Tenant ID: %s does not exist." msgstr "테넌트 ID: %s이(가) 존재하지 않습니다. " msgid "Terminate connection failed" msgstr "연결 종료 실패" msgid "Terminate connection unable to connect to backend." msgstr "연결 종료에서 백엔드에 연결하지 못했습니다." #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "볼륨 연결 종료 실패: %(err)s" #, python-format msgid "The %(type)s %(id)s source to be replicated was not found." msgstr "복제할 %(type)s %(id)s 소스를 찾을 수 없습니다." msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "'sort_key'와 'sort_dir' 매개변수는 더 이상 사용되지 않는 항목이므로'sort' 매" "개변수에 이를 사용할 수 없습니다. " msgid "The EQL array has closed the connection." msgstr "EQL 배열에서 연결을 종료했습니다." #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS 파일 시스템 %(fs)s이(가) 필수 릴리스 레벨이 아닙니다. 현재 레벨은 " "%(cur)s이고, 최소 %(min)s이어야 합니다. " msgid "The IP Address was not found." msgstr "IP 주소를 찾을 수 없습니다." #, python-format msgid "" "The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " "Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." msgstr "" "WebDAV 요청 실패. 이유: %(msg)s, 리턴 코드/이유: %(code)s,소스 볼륨: " "%(src)s, 대상 볼륨: %(dst)s, 메소드: %(method)s." msgid "" "The above error may show that the database has not been created.\n" "Please create a database using 'cinder-manage db sync' before running this " "command." msgstr "" "위의 오류는 데이터베이스가 작성되지 않았음을 표시할 수 있습니다.\n" "이 명령을 실행하기 전에 'cinder-manage db sync'를 사용하여 데이터베이스를 작" "성하십시오." #, python-format msgid "" "The array does not support the storage pool setting for SLO %(slo)s and " "workload %(workload)s. Please check the array for valid SLOs and workloads." msgstr "" "배열에서 SLO %(slo)s 및 워크로드 %(workload)s의 스토리지 풀 설정을 지원하지 " "않습니다. 올바른 SLO 및 워크로드의 배열을 확인하십시오." msgid "" "The back-end where the volume is created does not have replication enabled." msgstr "볼륨이 작성된 백엔드에서 복제가 사용되지 않았습니다." #, python-format msgid "" "The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgstr "" "명령 %(cmd)s에 실패했습니다.(ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" msgid "The copy should be primary or secondary" msgstr "사본은 1차 또는 2차여야 함" #, python-format msgid "" "The creation of a logical device could not be completed. (LDEV: %(ldev)s)" msgstr "논리 디바이스의 작성을 완료할 수 없습니다.(LDEV: %(ldev)s)" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "데코레이션된 메소드는 볼륨 또는 스냅샷 오브젝트를 승인해야 함" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "경로 %(path)s에 있는 디바이스를 사용할 수 없음: %(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "종료 시간(%(end)s)은 시작 시간(%(start)s) 이후여야 합니다." #, python-format msgid "The extra_spec: %s is invalid." msgstr "extra_spec: %s이(가) 올바르지 않습니다." #, python-format msgid "The extraspec: %(extraspec)s is not valid." msgstr "추가 스펙: %(extraspec)s이(가) 올바르지 않습니다. " #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "장애 복구된 볼륨을 삭제할 수 없음: %s" #, python-format msgid "The following elements are required: %s" msgstr "다음 요소가 필요함: %s" #, python-format msgid "" "The following migrations have a downgrade, which are not allowed: \n" "\t%s" msgstr "" "다음 마이그레이션에 다운그레이드가 있으며, 이는 허용되지 않습니다. \n" "\t%s" msgid "The host group or iSCSI target could not be added." msgstr "호스트 그룹 또는 iSCSI 대상을 추가할 수 없습니다." msgid "The host group or iSCSI target was not found." msgstr "호스트 그룹 또는 iSCSI 대상을 찾을 수 없습니다." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 3PAR 백엔" "드에서 복제를 재개하십시오." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the LeftHand backends." msgstr "" "호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 LeftHand " "백엔드에서 복제를 재개하십시오." msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the Storwize backends." msgstr "" "호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 Storwize " "백엔드에서 복제를 재개하십시오." #, python-format msgid "The iSCSI CHAP user %(user)s does not exist." msgstr "iSCSI CHAP 사용자 %(user)s이(가) 존재하지 않습니다." #, python-format msgid "" "The imported lun %(lun_id)s is in pool %(lun_pool)s which is not managed by " "the host %(host)s." msgstr "" "가져온 lun %(lun_id)s이(가) 풀 %(lun_pool)s에 있는데 이 풀은 %(host)s 호스트" "에서 관리되지 않습니다. " msgid "The key cannot be None." msgstr "키는 None이 되어서는 안 됩니다. " #, python-format msgid "The logical device for specified %(type)s %(id)s was already deleted." msgstr "지정된 %(type)s %(id)s의 논리 디바이스가 이미 삭제되었습니다." #, python-format msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" msgstr "" "메소드 %(method)s이(가) 제한시간을 초과했습니다.(제한시간 값: %(timeout)s)" msgid "The method update_migrated_volume is not implemented." msgstr "update_migrated_volume 메소드가 구현되지 않았습니다. " #, python-format msgid "" "The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" msgstr "" "마운트 %(mount_path)s이(가) 올바른 Quobyte USP 볼륨이 아닙니다. 오류: %(exc)s" #, python-format msgid "The parameter of the storage backend. (config_group: %(config_group)s)" msgstr "스토리지 백엔드의 매개변수입니다.(config_group: %(config_group)s)" msgid "The parent backup must be available for incremental backup." msgstr "상위 백업은 증분 백업을 수행할 수 있어야 합니다. " #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "제공된 스냅샷 '%s'이(가) 제공된 볼륨의 스냅샷이 아닙니다." msgid "" "The reference to the volume in the backend should have the format " "file_system/volume_name (volume_name cannot contain '/')" msgstr "" "백엔드의 볼륨 참조 형식은 file_system/volume_name이어야 합니다(volume_name에 " "'/'가 포함될 수 없음)." #, python-format msgid "The remote retention count must be %s or less." msgstr "원격 보유 수는 %s 이하여야 합니다." msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "볼륨 유형 extra_specs에 복제 모드가 올바르게 구성되지 않았습니다. " "replication:mode가 주기적인 경우 replication:sync_period도 지정해야 하며 300 " "- 31622400초여야 합니다." #, python-format msgid "The replication sync period must be at least %s seconds." msgstr "복제 동기화 기간은 최소 %s초여야 합니다." #, python-format msgid "" "The requested size : %(requestedSize)s is not the same as resulting size: " "%(resultSize)s." msgstr "" "요청된 크기 %(requestedSize)s이(가) 결과로 얻어진 크기 %(resultSize)s인 동안" "에는 인스턴스 연관을 변경할 수 없습니다." #, python-format msgid "The resource %(resource)s was not found." msgstr "자원 %(resource)s을(를) 찾을 수 없습니다." msgid "The results are invalid." msgstr "결과가 올바르지 않습니다. " #, python-format msgid "The retention count must be %s or less." msgstr "보유 수는 %s 이하여야 합니다." msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "볼륨이 유지보수 모드에 있으면 스냅샷을 작성할 수 없습니다. " #, python-format msgid "" "The source volume %s is not in the pool which is managed by the current host." msgstr "소스 볼륨 %s이(가) 현재 호스트에서 관리하는 풀에 없습니다. " msgid "The source volume for this WebDAV operation not found." msgstr "이 WebDAV 조작의 소스 볼륨을 찾을 수 없습니다." #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "소스 볼륨 유형 '%(src)s'이(가) 대상 볼륨 유형인 '%(dest)s'과(와) 다릅니다." #, python-format msgid "The source volume type '%s' is not available." msgstr "소스 볼륨 유형 '%s'은(는) 사용할 수 없습니다." #, python-format msgid "The specified %(desc)s is busy." msgstr "지정된 %(desc)s이(가) 사용 중입니다." #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "지정된 LUN이 제공된 풀에 속하지 않음: %s." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "mapping." msgstr "" "지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev는 쌍을 이루지 아니어야 합" "니다." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev must not be " "paired." msgstr "" "지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev는 쌍을 이루지 않아야 합니" "다." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The ldev size must be in " "multiples of gigabyte." msgstr "" "지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev 크기는 기가바이트 단위여" "야 합니다." #, python-format msgid "" "The specified ldev %(ldev)s could not be managed. The volume type must be DP-" "VOL." msgstr "" "지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. 볼륨 유형은 DP-VOL이어야 합니" "다." #, python-format msgid "" "The specified operation is not supported. The volume size must be the same " "as the source %(type)s. (volume: %(volume_id)s)" msgstr "" "지정된 조작이 지원되지 않습니다. 볼륨 크기는 소스 %(type)s과(와) 동일해야 합" "니다.(볼륨: %(volume_id)s)" msgid "The specified vdisk is mapped to a host." msgstr "지정된 vdisk가 호스트에 맵핑됩니다." msgid "The specified volume is mapped to a host." msgstr "지정된 볼륨이 호스트에 맵핑되어 있습니다. " #, python-format msgid "" "The storage array password for %s is incorrect, please update the configured " "password." msgstr "" "%s의 스토리지 배열 비밀번호가 올바르지 않습니다. 구성된 비밀번호를 업데이트하" "십시오." #, python-format msgid "The storage backend can be used. (config_group: %(config_group)s)" msgstr "스토리지 백엔드를 사용할 수 있습니다.(config_group: %(config_group)s)" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "스토리지 장치에서 %(prot)s을(를) 지원하지 않습니다. %(prot)s을(를) 지원하도" "록 장치를 구성하거나 다른 프토토콜을 사용하는 드라이버로 전환하십시오." #, python-format msgid "" "The striped meta count of %(memberCount)s is too small for volume: " "%(volumeName)s, with size %(volumeSize)s." msgstr "" "%(memberCount)s의 스트라이프 메타 개수가 크기가 %(volumeSize)s인 볼륨 " "%(volumeName)s에 대해 너무 적습니다. " #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "볼륨/스냅샷 %(id)s에 대한 메타데이터의 유형: %(metadata_type)s이(가) 올바르" "지 않습니다. " #, python-format msgid "" "The volume %(volume_id)s could not be extended. The volume type must be " "Normal." msgstr "" "볼륨 %(volume_id)s을(를) 확장할 수 없습니다. 볼륨 유형은 보통이어야 합니다." #, python-format msgid "" "The volume %(volume_id)s could not be unmanaged. The volume type must be " "%(volume_type)s." msgstr "" "볼륨 %(volume_id)s을(를) 관리 해제할 수 없습니다. 볼륨 유형은 " "%(volume_type)s(이)어야 합니다." #, python-format msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" msgstr "볼륨 %(volume_id)s을(를) 정상적으로 관리합니다. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" msgstr "볼륨 %(volume_id)s을(를) 정상적으로 관리 해제합니다. (LDEV: %(ldev)s)" #, python-format msgid "The volume %(volume_id)s to be mapped was not found." msgstr "맵핑할 볼륨 %(volume_id)s을(를) 찾을 수 없습니다." msgid "The volume cannot accept transfer in maintenance mode." msgstr "유지보수 모드에서는 볼륨이 전송을 승인할 수 없습니다. " msgid "The volume cannot be attached in maintenance mode." msgstr "유지보수 모드에서는 볼륨에 연결할 수 없습니다. " msgid "The volume cannot be detached in maintenance mode." msgstr "유지보수 모드에서는 볼륨을 분리할 수 없습니다. " msgid "The volume cannot be updated during maintenance." msgstr "유지보수 중에는 볼륨을 업데이트할 수 없습니다. " msgid "The volume connection cannot be initialized in maintenance mode." msgstr "유지보수 모드에서는 볼륨 연결을 초기화할 수 없습니다. " msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "볼륨 드라이버는 커넥터에 iSCSI 개시자 이름이 필요합니다. " msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "볼륨이 현재 3PAR에서 사용 중이므로 이번에는 삭제할 수 없습니다. 나중에 다시 " "시도할 수 있습니다." msgid "The volume label is required as input." msgstr "볼륨 레이블이 입력으로 필요합니다. " msgid "" "The volume metadata cannot be deleted when the volume is in maintenance mode." msgstr "볼륨이 유지보수 모드에 있으면 볼륨 메타데이터를 삭제할 수 없습니다. " msgid "" "The volume metadata cannot be updated when the volume is in maintenance mode." msgstr "" "볼륨이 유지보수 모드에 있으면 볼륨 메타데이터를 업데이트할 수 없습니다. " #, python-format msgid "There are no resources available for use. (resource: %(resource)s)" msgstr "사용 가능한 자원이 없습니다.(자원: %(resource)s)" msgid "There are no valid ESX hosts." msgstr "올바른 ESX 호스트가 없습니다. " #, python-format msgid "There are no valid datastores attached to %s." msgstr "%s에 접속된 올바른 데이터 저장소가 없습니다." msgid "There are no valid datastores." msgstr "올바른 데이터 저장소가 없습니다. " #, python-format msgid "" "There is no designation of the %(param)s. The specified storage is essential " "to manage the volume." msgstr "" "%(param)s의 지정이 없습니다. 지정된 스토리지는 볼륨 관리에 꼭 필요합니다." msgid "" "There is no designation of the ldev. The specified ldev is essential to " "manage the volume." msgstr "ldev의 지정이 없습니다. 지정된 ldev는 볼륨 관리에 꼭 필요합니다." msgid "There is no metadata in DB object." msgstr "DB 오브젝트에 메타데이터가 없습니다. " #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "%(volume_size)sG를 보유할 수 있는 공유가 없음" #, python-format msgid "There is no share which can host %(volume_size)sG." msgstr "%(volume_size)sG를 호스트할 수 있는 공유가 없습니다." #, python-format msgid "There is no such action: %s" msgstr "해당 조치가 없음: %s" msgid "There is no virtual disk device." msgstr "가상 디스크 디바이스가 없습니다." #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "원격 복사 그룹에 볼륨을 추가하는 중 오류 발생: %s." #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "cgsnapshot 작성 중 오류 발생: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "원격 복사 그룹을 생성하는 중 오류 발생: %s." #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "원격 복사 그룹의 동기화 기간을 설정하는 중 오류 발생: %s." #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "3PAR 배열에서 원격 복사 그룹을 설정하는 중 오류 발생: ('%s'). 볼륨이 복제 유" "형으로 인식되지 않습니다." #, python-format msgid "" "There was an error setting up a remote schedule on the LeftHand arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "LeftHand 배열에서 원격 스케줄을 설정하는 중 오류 발생: ('%s'). 볼륨이 복제 유" "형으로 인식되지 않습니다." #, python-format msgid "There was an error starting remote copy: %s." msgstr "원격 복사를 시작하는 중 오류 발생: %s." #, python-format msgid "There's no Gluster config file configured (%s)" msgstr "Gluster config 파일이 구성되지 않음(%s)" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "구성된 NFS config 파일이 없음(%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "구성된 Quobyte 볼륨이 없습니다(%s). 예제: quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "이 버전의 LVM에서는 씬 프로비저닝이 지원되지 않습니다." msgid "ThinProvisioning Enabler is not installed. Can not create thin volume" msgstr "" "씬 프로비저닝 인에이블러가 설치되어 있지 않습니다. 씬 볼륨을 작성할 수 없습니" "다." msgid "This driver does not support deleting in-use snapshots." msgstr "이 드라이버는 스냅샵 삭제 중에 사용 중인 스냅샷을 지원하지 않습니다." msgid "This driver does not support snapshotting in-use volumes." msgstr "이 드라이버는 스냅샵 작성 중에 사용 중인 볼륨을 지원하지 않습니다." msgid "This request was rate-limited." msgstr "이 요청이 비율 제한되었습니다. " #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "이 시스템 플랫폼 (%s)이(가) 지원되지 않습니다. 이 드라이버는 Win32 플랫폼만 " "지원합니다." #, python-format msgid "Tier Policy Service not found for %(storageSystemName)s." msgstr "%(storageSystemName)s에 대한 티어 정책 서비스를 찾을 수 없습니다. " #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "스냅샷 %s 작성을 위해 Nova 업데이트를 대기하는 동안 제한시간이 초과됨." #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "스냅샷 %(id)s 삭제를 위해 Nova 업데이트를 대기하는 동안 제한시간이 초과됨." msgid "" "Timeout value (in seconds) used when connecting to ceph cluster. If value < " "0, no timeout is set and default librados value is used." msgstr "" "ceph 클러스터에 연결할 때 사용되는 제한시간 값(초)입니다. 값이 < 0인 경우, 제" "한시간이 설정되지 않고 기본 librados 값이 사용됩니다." #, python-format msgid "Timeout while calling %s " msgstr "%s을(를) 호출하는 동안 제한시간 초과" #, python-format msgid "Timeout while requesting %(service)s API." msgstr "%(service)s API를 요청하는 중 제한시간을 초과했습니다." #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "백엔드 %(service)s에서 기능을 요청하는 동안 제한시간이 초과됩니다." #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "전송 %(transfer_id)s을(를) 찾을 수 없습니다. " #, python-format msgid "" "Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " "%(status)s, expected awaiting-transfer" msgstr "" "전송 %(transfer_id)s: 볼륨 id %(volume_id)s이(가) 예상치 않은 상태%(status)s" "입니다. 예상된 상태는 전송 대기입니다. " #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "ID %(meta_id)s의 백업 메타데이터를 백업 %(id)s(으)로 가져오려고 시도 중입니" "다. " #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "볼륨 조정 태스크가 완료되기 전에 중지됨: volume_name=%(volume_name)s, task-" "status=%(status)s." #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "%(type_id)s 유형이 이미 다른 qos 스펙과 연관되어 있음: %(qos_specs_id)s" msgid "Type access modification is not applicable to public volume type." msgstr "유형 액세스 수정을 공용 볼륨 유형에 적용할 수 없습니다. " msgid "Type cannot be converted into NaElement." msgstr "유형을 NaElement로 변환할 수 없습니다." #, python-format msgid "TypeError: %s" msgstr "TypeError: %s" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s이(가) 추가 및 제거 볼륨 목록에 있습니다. " #, python-format msgid "Unable to access the Storwize back-end for volume %s." msgstr "볼륨 %s의 Storwize 백엔드에 액세스할 수 없습니다." msgid "Unable to access the backend storage via file handle." msgstr "파일 핸들을 통해 백엔드 스토리지에 액세스할 수 없습니다. " #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "경로 %(path)s을(를) 통해 백엔드 스토리지에 액세스할 수 없음." #, python-format msgid "Unable to add Cinder host to apphosts for space %(space)s" msgstr "공간 %(space)s에 대한 apphosts에 Cinder 호스트를 추가할 수 없음" #, python-format msgid "Unable to complete failover of %s." msgstr "%s의 장애 복구를 완료할 수 없습니다." msgid "Unable to connect or find connection to host" msgstr "호스트에 대한 연결을 설정하거나 찾을 수 없음" msgid "Unable to create Barbican Client without project_id." msgstr "project_id 없이 Barbican Client를 작성할 수 없습니다." #, python-format msgid "Unable to create consistency group %s" msgstr "일관성 그룹 %s을(를) 작성할 수 없음" msgid "Unable to create lock. Coordination backend not started." msgstr "잠금을 생성할 수 없습니다. 조정 백엔드가 시작되지 않았습니다." #, python-format msgid "" "Unable to create or get default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" "FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하거나 확인하지 " "못했습니다." #, python-format msgid "Unable to create replica clone for volume %s." msgstr "볼륨 %s에 대한 복제본을 작성할 수 없습니다. " #, python-format msgid "Unable to create the relationship for %s." msgstr "%s의 관계를 작성할 수 없습니다." #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "%(snap)s에서 %(name)s 볼륨을 생성할 수 없습니다." #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "%(vol)s에서 %(name)s 볼륨을 생성할 수 없습니다." #, python-format msgid "Unable to create volume %s" msgstr "볼륨 %s을(를) 생성할 수 없습니다." msgid "Unable to create volume. Backend down." msgstr "볼륨을 생성할 수 없습니다. 백엔드가 종료되었습니다." #, python-format msgid "Unable to delete Consistency Group snapshot %s" msgstr "일관성 그룹 스냅샷 %s을(를) 삭제할 수 없음" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "스냅샷 %(id)s을(를) 삭제할 수 없음, 상태: %(status)s." #, python-format msgid "Unable to delete snapshot policy on volume %s." msgstr "볼륨 %s에 대한 스냅샷 정책을 삭제할 수 없습니다. " #, python-format msgid "" "Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." msgstr "볼륨 %(vol)s의 대상 볼륨을 삭제할 수 없습니다. 예외: %(err)s." msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "볼륨의 연결을 해제할 수 없습니다. 연결을 해제하려면 볼륨 상태가 '사용 중'이어" "야 하며 attach_status가 '연결됨'이어야 합니다." #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "제공된 보조 %(secondary)s에서 secondary_array를 판별할 수 없습니다." #, python-format msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." msgstr "스냅샷 %(id)s에 대한 Purity에서 스냅샷 이름을 판별할 수 없습니다. " msgid "Unable to determine system id." msgstr "시스템 ID를 판별할 수 없습니다. " msgid "Unable to determine system name." msgstr "시스템 이름을 판별할 수 없습니다. " #, python-format msgid "" "Unable to do manage snapshot operations with Purity REST API version " "%(api_version)s, requires %(required_versions)s." msgstr "" "Purity REST API 버전 %(api_version)s을(를) 사용하여 스냅샷 조작 관리를 수행" "할 수 없습니다. %(required_versions)s이(가) 필요합니다. " #, python-format msgid "" "Unable to do replication with Purity REST API version %(api_version)s, " "requires one of %(required_versions)s." msgstr "" "Purity REST API 버전 %(api_version)s(으)로 복제를 수행할 수 없습니다. " "%(required_versions)s 중 하나가 필요합니다." msgid "Unable to enable replication and snapcopy at the same time." msgstr "복제 및 snapcopy를 동시에 사용할 수 없습니다." #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Storwize 클러스터 %s과(와) 파트너십을 설정할 수 없습니다." #, python-format msgid "Unable to extend volume %s" msgstr "볼륨 %s을(를) 확장할 수 없음" #, python-format msgid "" "Unable to fail-over the volume %(id)s to the secondary back-end, because the " "replication relationship is unable to switch: %(error)s" msgstr "" "복제 관계를 전환할 수 없으므로 볼륨 %(id)s을(를) 보조 백엔드로 장애 복구할 " "수 없음: %(error)s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "\"default\"로 장애 복구할 수 없습니다. 장애 조치가 완료된 후에만 수행할 수 있" "습니다." #, python-format msgid "Unable to failover to replication target:%(reason)s)." msgstr "복제 대상으로 장애 복구할 수 없음:%(reason)s)." msgid "Unable to fetch connection information from backend." msgstr "백엔드에서 연결 정보를 페치할 수 없습니다." #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "백엔드에서 연결 정보를 페치할 수 없음: %(err)s" #, python-format msgid "Unable to find Purity ref with name=%s" msgstr "이름이 %s인 Purity ref를 찾을 수 없음" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "볼륨 그룹을 찾을 수 없음: %(vg_name)s" msgid "Unable to find failover target, no secondary targets configured." msgstr "장애 복구 대상을 찾을 수 없습니다. 보조 대상이 구성되지 않았습니다." msgid "Unable to find iSCSI mappings." msgstr "iSCSI 맵핑을 찾을 수 없습니다. " #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file을 찾을 수 없음: %s" msgid "Unable to find system log file!" msgstr "시스템 로그 파일을 찾을 수 없음!" #, python-format msgid "" "Unable to find viable pg snapshot to use forfailover on selected secondary " "array: %(id)s." msgstr "" "선택한 보조 배열에서 장애 복구를 사용하기 위해 실행 가능한 pg 스냅샷을 찾을 " "수 없음: %(id)s." #, python-format msgid "" "Unable to find viable secondary array fromconfigured targets: %(targets)s." msgstr "구성된 대상 %(targets)s에서 실행 가능한 보조 배열을 찾을 수 없습니다." #, python-format msgid "Unable to find volume %s" msgstr "볼륨 %s을(를) 찾을 수 없음" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "'%s' 파일의 블록 디바이스를 가져올 수 없음" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "" "볼륨 작성에 필요한 구성 정보를 가져올 수 없음: %(errorMessage)s(이)어야 합니" "다." msgid "Unable to get corresponding record for pool." msgstr "풀에 대한 해당 레코드를 가져올 수 없습니다. " #, python-format msgid "" "Unable to get information on space %(space)s, please verify that the cluster " "is running and connected." msgstr "" "공간 %(space)s에 대한 정보를 가져올 수 없습니다. 클러스터가 실행 중이며 연결" "되어 있는지 확인하십시오. " msgid "" "Unable to get list of IP addresses on this host, check permissions and " "networking." msgstr "" "이 호스트에서 IP 주소의 목록을 가져올 수 없습니다. 권한 및 네트워킹을 확인하" "십시오. " msgid "" "Unable to get list of domain members, check that the cluster is running." msgstr "" "도메인 멤버의 목록을 가져올 수 없습니다. 클러스터가 실행 중인지 확인하십시" "오. " msgid "" "Unable to get list of spaces to make new name. Please verify the cluster is " "running." msgstr "" "새 이름을 작성할 공간의 목록을 가져올 수 없습니다. 클러스터가 실행 중인지 확" "인하십시오. " #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "backend_name에 대한 통계를 가져올 수 없음: %s" msgid "Unable to get storage volume from job." msgstr "작업에서 스토리지 볼륨을 가져올 수 없습니다." #, python-format msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." msgstr "" "하드웨어 ID %(hardwareIdInstance)s의 대상 엔드포인트를 가져올 수 없습니다." msgid "Unable to get the name of the masking view." msgstr "마스킹 보기의 이름을 가져올 수 없습니다." msgid "Unable to get the name of the portgroup." msgstr "portgroup의 이름을 가져올 수 없습니다." #, python-format msgid "Unable to get the replication relationship for volume %s." msgstr "볼륨 %s의 복제 관계를 가져올 수 없습니다." #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. It is the source volume of " "replication session %(sync)s." msgstr "" "볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 이는 복제 세션 %(sync)s" "의 소스 볼륨입니다. " #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. The external volume is not " "in the pool managed by current cinder host." msgstr "" "볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 외부 볼륨이 현재 cinder " "호스트에 의해 관리되는 풀에 없습니다. " #, python-format msgid "" "Unable to import volume %(deviceId)s to cinder. Volume is in masking view " "%(mv)s." msgstr "" "볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 볼륨이 마스킹 보기 " "%(mv)s에 있습니다. " #, python-format msgid "Unable to load CA from %(cert)s %(e)s." msgstr "%(cert)s %(e)s에서 CA를 로드할 수 없습니다. " #, python-format msgid "Unable to load cert from %(cert)s %(e)s." msgstr "%(cert)s %(e)s에서 인증서를 로드할 수 없습니다." #, python-format msgid "Unable to load key from %(cert)s %(e)s." msgstr "%(cert)s %(e)s에서 키를 로드할 수 없습니다. " #, python-format msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "Solidfire 디바이스에서 %(account_name)s 계정을 찾을 수 없음" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "IP 주소 '%s'을(를) 관리하는 SVM을 찾을 수 없음" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "지정된 재생 프로파일 %s을(를) 찾을 수 없음" #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "기존 볼륨을 관리할 수 없습니다. 볼륨 %(volume_ref)s이(가) 이미 관리되고 있습" "니다. " #, python-format msgid "Unable to manage volume %s" msgstr "볼륨 %s을(를) 관리할 수 없음" msgid "Unable to map volume" msgstr "볼륨을 맵핑할 수 없음" msgid "Unable to map volume." msgstr "볼륨을 맵핑할 수 없습니다. " msgid "Unable to parse XML request. Please provide XML in correct format." msgstr "XML 요청을 구문 분석할 수 없습니다. 올바른 형식의 XML을 제공하십시오." msgid "Unable to parse attributes." msgstr "속성을 구문 분석할 수 없습니다. " #, python-format msgid "" "Unable to promote replica to primary for volume %s. No secondary copy " "available." msgstr "" "%s 볼륨의 복제본을 1차로 승격할 수 없습니다. 2차 사본을 사용할 수 없습니다." msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "Cinder에서 관리하지 않는 호스트는 use_chap_auth=True를 사용하여재사용할 수 없" "습니다. " msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "알 수 없는 CHAP 신임 정보로 구성된 호스트를 재사용할 수 없습니다. " #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "볼륨 %(existing)s의 이름을 %(newname)s(으)로 바꿀 수 없음" #, python-format msgid "Unable to retrieve snapshot group with id of %s." msgstr "id가 %s인 스냅샷 그룹을 검색할 수 없습니다." #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "%(specname)s을(를) 다시 입력할 수 없음, 현재 및 요청된 %(spectype)s 값을 수신" "해야 합니다. 수신한 값: %(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "재입력할 수 없음: %s 볼륨의 사본이 있습니다. 재입력을 수행하면 한계값인 2개" "의 사본을 초과합니다." #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "재입력할 수 없음: 현재 조치에 볼륨-사본이 필요함, 새 유형이 복제인 경우 이는 " "허용되지 않습니다. 볼륨 = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "%(vol)s의 미러 모드 복제를 설정할 수 없습니다. 예외: %(err)s." #, python-format msgid "Unable to snap Consistency Group %s" msgstr "일관성 그룹 %s의 스냅샷을 작성할 수 없음" msgid "Unable to terminate volume connection from backend." msgstr "백엔드에서 볼륨 연결을 종료할 수 없습니다." #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "볼륨 연결을 종료할 수 없음: %(err)s" #, python-format msgid "Unable to update consistency group %s" msgstr "일관성 그룹 %s을(를) 업데이트할 수 없음" #, python-format msgid "" "Unable to update type due to incorrect status: %(vol_status)s on volume: " "%(vol_id)s. Volume status must be available or in-use." msgstr "" "올바르지 않은 상태 %(vol_status)s(으)로 인해 다음 볼륨에서 유형을 업데이트할 " "수 없음: %(vol_id)s. 볼륨 상태가 사용 가능 또는 사용 중이어야 합니다. " #, python-format msgid "" "Unable to verify initiator group: %(igGroupName)s in masking view " "%(maskingViewName)s. " msgstr "" "마스킹 보기 %(maskingViewName)s에서 개시자 그룹 %(igGroupName)s을(를) 확인할 " "수 없습니다." msgid "Unacceptable parameters." msgstr "허용할 수 없는 매개변수입니다. " #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" "%(id)s 맵핑에 대해 예상치 못한 맵핑 상태 %(status)s. 속성:%(attr)s(이)어야 합" "니다." #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "예상치 못한 CLI 응답: 헤더/행 불일치. 헤더: %(header)s, 행: %(row)s(이)어야 " "합니다." #, python-format msgid "" "Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." msgstr "" "맵핑 %(id)s에 대해 예상치 않은 맵핑 상태 %(status)s이(가) 발생했습니다. 속" "성: %(attr)s." #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "예상치 못한 결과입니다. [%(expected)s]을(를) 예상했지만 [%(output)s]을(를) 수" "신했습니다." msgid "Unexpected response from Nimble API" msgstr "Nimble API로부터의 예기치 않은 응답" msgid "Unexpected response from Tegile IntelliFlash API" msgstr "Tegile IntelliFlash API에서 예상치 못한 응답" msgid "Unexpected status code" msgstr "예기치 않은 상태 코드" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "url %(page)s에 대해 프로토콜 %(protocol)s을(를) 사용하는 스위치 %(switch_id)s" "에서 예상치 못한 상태 코드 수신. 오류: %(error)s" msgid "Unknown Gluster exception" msgstr "알 수 없는 Gluster 예외" msgid "Unknown NFS exception" msgstr "알 수 없는 NFS 예외" msgid "Unknown RemoteFS exception" msgstr "알 수 없는 RemoteFS 예외" msgid "Unknown SMBFS exception." msgstr "알 수 없는 SMBFS 예외입니다." msgid "Unknown Virtuozzo Storage exception" msgstr "알 수 없는 Virtuozzo 스토리지 예외" msgid "Unknown action" msgstr "알 수 없는 조치" #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, Set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "관리할 볼륨: %s이(가) 이미 Cinder에서 관리 중인지 알 수 없습니다. 볼륨 관리" "를 중단합니다. 'cinder_managed' 사용자 지정 스키마 특성을 볼륨에 추가하고 값" "을 False로 설정합니다. 또는 cinder 구성 정책 'zfssa_manage_policy'의 값을 " "'loose'로 설정하여 이 제한을 제거하십시오." #, python-format msgid "" "Unknown if the volume: %s to be managed is already being managed by Cinder. " "Aborting manage volume. Please add 'cinder_managed' custom schema property " "to the volume and set its value to False. Alternatively, set the value of " "cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " "restriction." msgstr "" "관리할 볼륨: %s이(가) 이미 Cinder에서 관리 중인지 알 수 없습니다. 볼륨 관리" "를 중단합니다. 'cinder_managed' 사용자 지정 스키마 특성을 볼륨에 추가하고 값" "을 False로 설정합니다. 또는 cinder 구성 정책 'zfssa_manage_policy'의 값을 " "'loose'로 설정하여 이 제한을 제거하십시오." #, python-format msgid "Unknown operation %s." msgstr "알 수 없는 조작 %s." #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "알 수 없거나 지원되지 않는 명령 %(cmd)s" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "알 수 없는 프로토콜: %(protocol)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "알 수 없는 할당량 자원 %(unknown)s." msgid "Unknown service" msgstr "알 수 없는 서비스" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 합니다. " msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "관리 취소 및 계단식 삭제 옵션은 상호 배타적입니다." msgid "Unmanage volume not implemented." msgstr "관리 취소 볼륨이 구현되지 않았습니다." msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." msgstr "" "'장애 복구' 볼륨에서 스냅샷의 관리를 해제하는 기능은 허용되지 않습니다." msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "장애 복구된 볼륨에서 스냅샷의 관리를 해제하는 기능은 허용되지 않습니다." #, python-format msgid "Unrecognized QOS keyword: \"%s\"" msgstr "인식되지 않는 QOS 키워드: \"%s\"" #, python-format msgid "Unrecognized backing format: %s" msgstr "인식할 수 없는 백업 형식: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "인식되지 않는 read_deleted 값 '%s'" #, python-format msgid "Unset gcs options: %s" msgstr "gcs 옵션 설정 해제: %s" #, python-format msgid "Unsuccessful iscsiadm. Exception is %(ex)s. " msgstr "실패한 iscsiadm. 예외는 %(ex)s입니다. " msgid "Unsupported Clustered Data ONTAP version." msgstr "지원되지 않는 클러스터 데이터 ONTAP 버전입니다." msgid "Unsupported Content-Type" msgstr "지원되지 않는 Content-Type" msgid "" "Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " "supported." msgstr "" "지원되지 않는 Data ONTAP 버전입니다. Data ONTAP 버전 7.3.1 이상이 지원됩니다." #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "지원되지 않는 백업 메타데이터 버전(%s)" msgid "Unsupported backup metadata version requested" msgstr "지원되지 않는 백업 메타데이터 버전이 요청됨" msgid "Unsupported backup verify driver" msgstr "지원되지 않는 백업 확인 드라이버" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "스위치 %s에 지원되지 않는 펌웨어가 있습니다. 스위치가 펌웨어 v6.4 이상에서 실" "행 중인지 확인하십시오." #, python-format msgid "Unsupported volume format: %s " msgstr "지원되지 않는 볼륨 형식: %s" msgid "Update QoS policy error." msgstr "QoS 정책 업데이트 오류입니다. " msgid "" "Update and delete quota operations can only be made by an admin of immediate " "parent or by the CLOUD admin." msgstr "" "할당량 업데이트 및 삭제 조작은 직속 상위의 관리자 또는 CLOUD 관리자만 작성할 " "수 있습니다. " msgid "" "Update and delete quota operations can only be made to projects in the same " "hierarchy of the project in which users are scoped to." msgstr "" "할당량 업데이트 및 삭제 조작은 사용자가 범위 지정되는 프로젝트의 동일한 계층" "에 있는 프로젝트에 대해서만 작성될 수 있습니다. " msgid "Update list, doesn't include volume_id" msgstr "목록 업데이트, volume_id를 포함하지 않음" msgid "Updated At" msgstr "업데이트" msgid "Upload to glance of attached volume is not supported." msgstr "접속된 볼륨의 글랜스에 대한 업로드가 지원되지 않습니다." msgid "Use ALUA to associate initiator to host error." msgstr "ALUA를 사용하여 호스트에 개시자 연관 오류입니다. " msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "CHAP를 사용하여 호스트에 개시자 연관 오류입니다. CHAP 사용자 이름 및 비밀번호" "를 확인하십시오. " msgid "User ID" msgstr "사용자 ID" msgid "User does not have admin privileges" msgstr "사용자에게 관리자 권한이 없음" msgid "User is not authorized to use key manager." msgstr "사용자에게 키 관리자를 사용할 권한이 없습니다." msgid "User not authorized to perform WebDAV operations." msgstr "사용자에게 WebDAV 조작을 수행할 권한이 없습니다." msgid "UserName is not configured." msgstr "UserName이 구성되지 않았습니다." msgid "UserPassword is not configured." msgstr "UserPassword가 구성되지 않았습니다." msgid "" "V2 rollback - Volume in another storage group besides default storage group." msgstr "" "V2 롤백 - 볼륨이 기본 스토리지 그룹 옆의 다른 스토리지 그룹에 있습니다. " msgid "V2 rollback, volume is not in any storage group." msgstr "V2 롤백, 볼륨이 스토리지 그룹에 없습니다. " msgid "V3 rollback" msgstr "V3 롤백" msgid "VF is not enabled." msgstr "VF가 사용되지 않습니다." #, python-format msgid "VV Set %s does not exist." msgstr "VV 설정 %s이(가) 없습니다. " #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "QoS 스펙의 유효한 이용자: %s" #, python-format msgid "Valid control location are: %s" msgstr "유효한 제어 위치: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "볼륨 연결 유효성 검증에 실패했습니다(오류: %(err)s). " #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "값 \"%(value)s\"이(가) 구성 옵션 \"%(option)s\"에 대해 올바르지 않음" #, python-format msgid "Value %(param)s for %(param_string)s is not a boolean." msgstr "%(param_string)s에 대한 값 %(param)s이(가) 부울이 아닙니다. " msgid "Value required for 'scality_sofs_config'" msgstr "'scality_sofs_config'에 필요한 값" #, python-format msgid "ValueError: %s" msgstr "ValueError: %s" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "Vdisk %(name)s이(가) 맵핑 %(src)s -> %(tgt)s에 포함되지 않았습니다. " #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "API에서 %(req_ver)s 버전을 지원하지 않습니다. 최소 %(min_ver)s 이상, 최대 " "%(max_ver)s 이하여야 합니다." #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s이(가) id별로 오브젝트를 검색할 수 없습니다." #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s에서 조건부 업데이트를 지원하지 않습니다." #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "배열에 가상 볼륨 '%s'이(가) 없습니다." #, python-format msgid "Vol copy job for dest %s failed." msgstr "%s 대상에 대한 볼륨 복사 작업에 실패했습니다." #, python-format msgid "Volume %(deviceID)s not found." msgstr "볼륨 %(deviceID)s을(를) 찾을 수 없습니다. " #, python-format msgid "" "Volume %(name)s not found on the array. Cannot determine if there are " "volumes mapped." msgstr "" "배열에서 %(name)s 볼륨을 찾을 수 없습니다. 맵핑된 볼륨이 있는지 여부를 판별" "할 수 없습니다. " #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "볼륨 %(name)s을(를) VNX에 작성했지만 %(state)s 상태입니다." #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "볼륨 %(vol)s을(를) 풀 %(pool)s에서 작성할 수 없습니다. " #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "%(vol1)s 볼륨이 snapshot.volume_id %(vol2)s과(와) 일치하지 않습니다." #, python-format msgid "" "Volume %(vol_id)s status must be available or in-use, but current status is: " "%(vol_status)s." msgstr "" "볼륨 %(vol_id)s 상태가 사용 가능 또는 사용 중이어야 하지만 현재 상태가 " "%(vol_status)s입니다. " #, python-format msgid "" "Volume %(vol_id)s status must be available to extend, but current status is: " "%(vol_status)s." msgstr "" "볼륨 %(vol_id)s 상태가 확장에 사용 가능 상태여야 하지만 현재 상태가 " "%(vol_status)s입니다. " #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "볼륨 %(vol_id)s 상태가 읽기 전용 플래그 업데이트에 사용 가능 상태여야 하지만 " "현재 상태가 %(vol_status)s입니다. " #, python-format msgid "" "Volume %(vol_id)s status must be available, but current status is: " "%(vol_status)s." msgstr "" "볼륨 %(vol_id)s 상태가 사용 가능이어야 합니다. 현재 상태: %(vol_status)s." #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "%(volume_id)s 볼륨을 찾을 수 없습니다. " #, python-format msgid "" "Volume %(volume_id)s has no administration metadata with key " "%(metadata_key)s." msgstr "" "%(volume_id)s 볼륨에 %(metadata_key)s 키를 갖는 관리 메타데이터가 없습니다. " #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(volume_id)s 볼륨에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " #, python-format msgid "" "Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" msgstr "" "볼륨 %(volume_id)s이(가) 지원되지 않는 호스트 그룹 %(group)s에 현재 맵핑되어 " "있음" #, python-format msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" msgstr "볼륨 %(volume_id)s이(가) 호스트 %(host)s에 현재 맵핑되어 있지 않음" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "%(volume_id)s 볼륨이 여전히 첨부되어 있습니다. 먼저 불륨을 분리하십시오. " #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "볼륨 %(volume_id)s 복제 오류: %(reason)s" #, python-format msgid "Volume %(volume_name)s is busy." msgstr "볼륨 %(volume_name)s을(를) 사용 중입니다." #, python-format msgid "Volume %s could not be created from source volume." msgstr "볼륨 %s을(를) 소스 볼륨에서 작성할 수 없습니다. " #, python-format msgid "Volume %s could not be created on shares." msgstr "%s 볼륨을 공유에서 작성할 수 없습니다. " #, python-format msgid "Volume %s could not be created." msgstr "볼륨 %s을(를) 작성할 수 없습니다. " #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "볼륨 %s이(가) Nexenta SA에 없음" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "볼륨 %s이(가) Nexenta Store 어플라이언스에 없음" #, python-format msgid "Volume %s does not exist on the array." msgstr "배열에 %s 볼륨이 없습니다." #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "%s 볼륨에 지정된 provider_location이 없습니다. 건너뜁니다." #, python-format msgid "Volume %s doesn't exist on array." msgstr "배열에 %s 볼륨이 없습니다." #, python-format msgid "Volume %s doesn't exist on the ZFSSA backend." msgstr "ZFSSA 백엔드에 %s 볼륨이 없습니다." #, python-format msgid "Volume %s is already managed by OpenStack." msgstr "OpenStack에서 이미 볼륨 %s을(를) 관리합니다." #, python-format msgid "Volume %s is already part of an active migration." msgstr "볼륨 %s은(는) 이미 활성 마이그레이션의 파트입니다." #, python-format msgid "" "Volume %s is not of replicated type. This volume needs to be of a volume " "type with the extra spec replication_enabled set to ' True' to support " "replication actions." msgstr "" "볼륨 %s이(가) 복제된 유형이 아닙니다. 복제 작업을 지원하려면 이 볼륨은 추가 " "사양 replication_enabled가 ' True'로 설정된 볼륨 유형이어야 합니다." #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "볼륨 %s이(가) 온라인입니다. OpenStack을 사용하여 관리할 볼륨을 오프라인으로 " "설정합니다." #, python-format msgid "" "Volume %s must not be migrating, attached, belong to a consistency group or " "have snapshots." msgstr "" "볼륨 %s을(를) 마이그레이션하거나, 연결하거나, 일관성 그룹에 속하거나, 스냅샷" "이 없어야 합니다." #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "볼륨 %s이(가) 일관성 그룹의 일부가 아니어야 합니다. " #, python-format msgid "Volume %s must not be replicated." msgstr "볼륨 %s을(를) 복제하지 않아야 합니다. " #, python-format msgid "Volume %s must not have snapshots." msgstr "볼륨 %s에 스냅샷이 없어야 합니다. " #, python-format msgid "Volume %s not found." msgstr "볼륨 %s을(를) 찾을 수 없습니다. " #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "볼륨 %s: 볼륨 확장 시도 중 오류 발생" #, python-format msgid "Volume (%s) already exists on array" msgstr "배열에 이미 볼륨(%s)이 있음" #, python-format msgid "Volume (%s) already exists on array." msgstr "볼륨(%s)이 배열에 이미 있습니다." #, python-format msgid "Volume Group %s does not exist" msgstr "볼륨 그룹 %s이(가) 없음" #, python-format msgid "Volume Type %(id)s already exists." msgstr "%(id)s 볼륨 유형이 이미 존재합니다. " #, python-format msgid "Volume Type %(type_id)s has no extra spec with key %(id)s." msgstr "볼륨 유형 %(type_id)s에 키 %(id)s을(를) 가진 추가 스펙이 없습니다. " #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "유형이 있는 볼륨에는 %(volume_type_id)s 볼륨 유형 삭제가 허용되지 않습니다." #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "%(volume_type_id)s 볼륨 유형에 %(extra_specs_key)s 키를 갖는 추가 스펙이 없습" "니다. " msgid "Volume Type id must not be None." msgstr "볼륨 유형 ID가 있어야 합니다. " #, python-format msgid "" "Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" "CloudByte 스토리지에서 OpenStack 볼륨 [%(ops_vol)s]에 해당하는 볼륨 " "[%(cb_vol)s]을(를) 찾을 수 없습니다." #, python-format msgid "Volume [%s] not found in CloudByte storage." msgstr "CloudByte 스토리지에서 볼륨 [%s]을(를) 찾을 수 없습니다." #, python-format msgid "Volume attachment could not be found with filter: %(filter)s ." msgstr "%(filter)s 필터로 볼륨 접속을 찾을 수 없습니다. " #, python-format msgid "Volume backend config is invalid: %(reason)s" msgstr "볼륨 백엔드 구성이 올바르지 않음: %(reason)s" msgid "Volume by this name already exists" msgstr "이 이름에 의한 볼륨이 이미 존재함" msgid "Volume cannot be restored since it contains snapshots." msgstr "스냅샷이 포함되었기 때문에 볼륨을 복원할 수 없습니다." msgid "Volume create failed while extracting volume ref." msgstr "볼륨 ref를 추출하는 동안 볼륨 작성에 실패했습니다." #, python-format msgid "Volume device file path %s does not exist." msgstr "볼륨 디바이스 파일 경로 %s이(가) 존재하지 않습니다." #, python-format msgid "Volume device not found at %(device)s." msgstr "%(device)s에서 볼륨 디바이스를 찾을 수 없음. " #, python-format msgid "Volume driver %s not initialized." msgstr "볼륨 드라이버 %s이(가) 초기화되지 않았습니다." msgid "Volume driver not ready." msgstr "볼륨 드라이버가 준비 되어있지 않습니다." #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "볼륨 드라이버 오류 보고서: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "현재 삭제할 수 없는 임시 스냅샷이 볼륨에 있습니다." msgid "Volume has children and cannot be deleted!" msgstr "볼륨에 하위가 있으므로 삭제할 수 없습니다!" #, python-format msgid "Volume in consistency group %s is attached. Need to detach first." msgstr "일관성 그룹 %s의 볼륨이 접속되었습니다. 먼저 분리해야 합니다." msgid "Volume in consistency group still has dependent snapshots." msgstr "일관성 그룹의 볼륨에 아직 종속자 스냅샷이 있습니다." #, python-format msgid "Volume is attached to a server. (%s)" msgstr "볼륨이 서버에 연결됩니다(%s). " msgid "Volume is in-use." msgstr "볼륨을 사용 중입니다." msgid "Volume is not available." msgstr "볼륨을 사용할 수 없습니다. " msgid "Volume is not local to this node" msgstr "볼륨이 이 노드에 대해 로컬이 아님" msgid "Volume is not local to this node." msgstr "볼륨이 이 노드의 로컬이 아닙니다. " msgid "" "Volume metadata backup requested but this driver does not yet support this " "feature." msgstr "" "볼륨 메타데이터 백업이 요청되었으나 이 드라이버는 아직 이 기능을 설정하십시" "오." #, python-format msgid "Volume migration failed: %(reason)s" msgstr "볼륨 마이그레이션 실패: %(reason)s" msgid "Volume must be available" msgstr "볼륨이 사용 가능해야 함" msgid "Volume must be in the same availability zone as the snapshot" msgstr "볼륨이 스냅샷과 동일한 가용성 구역에 있어야 함 " msgid "Volume must be in the same availability zone as the source volume" msgstr "볼륨이 소스 볼륨과 동일한 가용성 구역에 있어야 함 " msgid "Volume must have a volume type" msgstr "볼륨에 볼륨 유형이 있어야 함" msgid "Volume must not be part of a consistency group." msgstr "볼륨은 일관성 그룹의 일부가 아니어야 합니다." msgid "Volume must not be replicated." msgstr "볼륨을 복제하지 않아야 합니다." msgid "Volume must not have snapshots." msgstr "볼륨에 스냅샷이 없어야 합니다." #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "%(instance_id)s 인스턴스에 대한 볼륨을 찾을 수 없습니다. " msgid "Volume not found on configured storage backend." msgstr "구성된 스토리지 백엔드에서 볼륨을 찾을 수 없습니다. " msgid "" "Volume not found on configured storage backend. If your volume name contains " "\"/\", please rename it and try to manage again." msgstr "" "구성된 스토리지 백엔드에 볼륨을 찾을 수 없습니다. 볼륨 이름에 \"/\"가 포함되" "지 않은 경우 이름을 변경하고 다시 관리하십시오." msgid "Volume not found on configured storage pools." msgstr "구성된 스토리지 풀에서 볼륨을 찾을 수 없습니다. " msgid "Volume not found." msgstr "볼륨을 찾을 수 없습니다. " msgid "Volume not unique." msgstr "볼륨이 고유하지 않습니다." msgid "Volume not yet assigned to host." msgstr "볼륨을 아직 호스트에 지정하지 않았습니다." msgid "Volume reference must contain source-name element." msgstr "볼륨 참조에는 source-name 요소가 포함되어 있어야 합니다. " #, python-format msgid "Volume replication for %(volume_id)s could not be found." msgstr "%(volume_id)s의 볼륨 복제를 찾을 수 없습니다. " #, python-format msgid "Volume service %s failed to start." msgstr "볼륨 서비스 %s을(를) 시작하는 데 실패했습니다. " msgid "Volume should have agent-type set as None." msgstr "볼륨의 에이전트 유형은 None으로 설정되어야 합니다. " #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "볼륨 크기 %(volume_size)sGB는 다음 이미지 minDisk 크기보다 작을 수 없습니다: " "%(min_disk)sGB." #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "볼륨 크기 '%(size)s'은(는) 0보다 큰 정수여야 함" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "볼륨 크기 '%(size)s'GB는 원래 볼륨 크기인 %(source_size)sGB보다 작을 수 없습" "니다. 해당 크기가 원래 볼륨 크기보다 크거나 같아야 합니다." #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "볼륨 크기 '%(size)s'GB은(는) 스냅샷 크기인 %(snap_size)sGB보다 작을 수 없습니" "다. 해당 크기가 원래 스냅샷 크기보다 크거나 같아야 합니다." msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "마지막 백업 이후 볼륨 크기가 증가되었습니다. 전체 백업을 수행하십시오. " msgid "Volume size must be a multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " msgid "Volume size must be multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " msgid "Volume size must multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " #, python-format msgid "Volume status for volume must be available, but current status is: %s" msgstr "볼륨의 볼륨 상태가 사용 가능이어야 하지만 현재 상태가 %s입니다." msgid "Volume status is in-use." msgstr "볼륨 상태가 사용 중입니다. " #, python-format msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" "볼륨 상태가 스냅샷에 대해 \"사용 가능\" 또는 \"사용 중\"이어야 합니다(%s임)" msgid "Volume status must be \"available\" or \"in-use\"." msgstr "볼륨 상태가 \"사용 가능\" 또는 \"사용 중\"이어야 합니다." #, python-format msgid "Volume status must be %s to reserve." msgstr "예약하려면 볼륨 상태가 %s 이어야 합니다. " msgid "Volume status must be 'available'." msgstr "볼륨 상태가 '사용 가능'이어야 합니다. " msgid "Volume to Initiator Group mapping already exists" msgstr "개시자 그룹에 대한 볼륨 맵핑이 이미 있음" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "백업할 볼륨은 사용 가능하거나 사용 중이어야 하지만 현재 상태가 \"%s\"입니다. " msgid "Volume to be restored to must be available" msgstr "복원할 볼륨이 사용 가능해야 함" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "%(volume_type_id)s 볼륨 유형을 찾을 수 없습니다. " #, python-format msgid "Volume type ID '%s' is invalid." msgstr "볼륨 유형 ID '%s'이(가) 올바르지 않습니다." #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "%(volume_type_id)s / %(project_id)s 조합에 대한 볼륨 유형 액세스가 이미 존재" "합니다." #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "%(volume_type_id)s / %(project_id)s 조합에 대한 볼륨 유형 액세스를 찾을 수 없" "습니다." #, python-format msgid "Volume type does not match for share %s." msgstr "볼륨 유형이 공유 %s과(와) 일치하지 않습니다. " #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "%(type_id)s 유형에 대한 볼륨 유형 암호화가 이미 존재합니다. " #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "유형 %(type_id)s의 볼륨 유형 암호화가 존재하지 않습니다." msgid "Volume type name can not be empty." msgstr "볼륨 유형 이름은 빈 상태로 둘 수 없습니다." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "이름이 %(volume_type_name)s인 볼륨 유형을 찾을 수 없습니다. " #, python-format msgid "Volume with volume id %s does not exist." msgstr "볼륨 ID가 %s인 볼륨이 존재하지 않습니다. " #, python-format msgid "" "Volume: %(volumeName)s is not a concatenated volume. You can only perform " "extend on concatenated volume. Exiting..." msgstr "" "%(volumeName)s 볼륨은 연결된 볼륨이 아닙니다. 연결된 볼륨에 대해서만 확장을 " "수행할 수 있습니다. 종료 중..." #, python-format msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." msgstr "" "볼륨: %(volumeName)s이(가) 스토리지 그룹 %(sgGroupName)s에 추가되지 않았습니" "다." #, python-format msgid "Volume: %s could not be found." msgstr "볼륨: %s을(를) 찾을 수 없습니다. " #, python-format msgid "Volume: %s is already being managed by Cinder." msgstr "Cinder에서 이미 볼륨: %s을(를) 관리 중입니다." msgid "Volumes will be chunked into objects of this size (in megabytes)." msgstr "볼륨은 이 크기(메가바이트)의 오브젝트로 청크됩니다. " msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "기본 및 보조 SolidFire 계정 모두에서 볼륨/계정이 초과되었습니다." #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage 구성 'vzstorage_used_ratio'가 올바르지 않습니다. 0보다 크고 1.0 이" "하여야 함: %s." #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s의 VzStorage 구성 파일이 존재하지 않습니다. " msgid "Wait replica complete timeout." msgstr "복제본 완료 대기 제한시간 초과" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "동기화 대기 실패. 실행 상태: %s." msgid "" "Waiting for all nodes to join cluster. Ensure all sheep daemons are running." msgstr "" "모든 노드가 클러스터를 결합할 때까지 기다리는 중입니다. 모든 sheep 디먼이 실" "행 중인지 확인하십시오. " msgid "We should not do switch over on primary array." msgstr "기본 배열로 전환할 수 없습니다." msgid "Wrong resource call syntax" msgstr "잘못된 자원 호출 구문" msgid "X-IO Volume Driver exception!" msgstr "X-IO 볼륨 드라이버 예외 발생!" msgid "XML support has been deprecated and will be removed in the N release." msgstr "XML 지원은 더 이상 사용되지 않으므로 N 릴리스에서 제거됩니다." msgid "XtremIO not configured correctly, no iscsi portals found" msgstr "XtremIO가 올바르게 구성되지 않음, iscsi 포털을 찾을 수 없음" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO가 올바르게 초기화되지 않음, 클러스터를 찾을 수 없음" msgid "You must implement __call__" msgstr "__call__을 구현해야 합니다. " msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "3PAR 드라이버를 사용하기 전에 hpe3parclient를 설치해야 합니다. \"pip install " "python-3parclient\"를 실행하여 hpe3parclient를 설치하십시오." msgid "You must supply an array in your EMC configuration file." msgstr "EMC 구성 파일에서 배열을 제공해야 합니다. " #, python-format msgid "" "Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " "GB. Only Extend is supported. Exiting..." msgstr "" "원래 크기 %(originalVolumeSize)sGB가 %(newSize)sGB보다 큽니다. 확장만 지원됩" "니다. 종료 중..." #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "영역" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "구역 지정 정책: %s, 인식되지 않음" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data: vdisk %s의 속성을 가져오지 못했습니다." msgid "_create_host failed to return the host name." msgstr "_create_host가 호스트 이름을 리턴하지 못함" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: 호스트 이름을 변환할 수 없습니다. 호스트 이름이 유니코드 또는 " "문자열이 아닙니다. " msgid "_create_host: No connector ports." msgstr "_create_host: 커넥터 포트가 없습니다." msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume, 복제 서비스를 찾을 수 없습니다." #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, 소스 볼륨 인스턴스: %(source_volume)s, 대상 볼륨 인스턴" "스: %(target_volume)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - CLI 출력에서 성공 메시지를 찾을 수 없습니다. \n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_create_volume_name, id_code is None." msgstr "_create_volume_name, id_code가 None입니다." msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession, 복제 서비스를 찾을 수 없습니다." #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession, 복사 세션 유형이 정의되지 않았습니다! 복사 세션: " "%(cpsession)s, 복사 유형: %(copytype)s." #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession, copysession: %(cpsession)s, 조작: %(operation)s, 리턴 코" "드: %(rc)lu, 오류: %(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume, volumename: %(volumename)s, 리턴 코드: %(rc)lu, 오류: " "%(errordesc)s." #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume, volumename: %(volumename)s, 스토리지 구성 서비스를 찾을 수 없" "습니다." #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, ETERNUS에 연결" "할 수 없습니다." msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "_extend_volume_op: 스냅샷이 포함된 볼륨 확장은 지원되지 않습니다. " #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, 커넥터: %(connector)s, 연관자: " "FUJITSU_AuthorizedTarget, ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group, 커넥터: %(connector)s, EnumerateInstanceNames, ETERNUS" "에 연결할 수 없습니다." #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group, 커넥터: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "ETERNUS에 연결할 수 없습니다." #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names, 커넥터: %(connector)s, 개시자를 찾을 수 없습니다." #, python-format msgid "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " "connect to ETERNUS." msgstr "" "_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, ETERNUS에 연결" "할 수 없습니다." #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, ETERNUS에 연결" "할 수 없습니다." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, 데이터가 없습니" "다! 드라이버 구성 파일을 편집하고 정정하십시오." #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection, 파일 이름: %(filename)s, ip: %(ip)s, 포트: " "%(port)s, 사용자: %(user)s, passwd: ****, url: %(url)s, 실패!!." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn을 찾을 수 " "없습니다." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "ETERNUS에 연결할 수 없습니다.." #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, ETERNUS에 " "연결할 수 없습니다." #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: 속성 헤더 및 값이 일치하지 않습니다. \n" " 헤더: %(header)s\n" " 값: %(row)s." msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector에서 커넥터의 호스트 이름을 리턴하지 못했습니다." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, aglist/vol_instance에서 호스트 연관 관계를 가져오는 데 실" "패, affinitygroup: %(ag)s, ReferenceNames, ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc, 호스트 연관 관계 인스턴스를 가져오는 데 실패, volmap: " "%(volmap)s, GetInstance, ETERNUS에 연결할 수 없습니다." msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, ETERNUS에 연" "결할 수 없습니다." #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, ETERNUS에 연결할 " "수 없습니다." #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, ETERNUS에 연결할 수 없습" "니다." msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port, EnumerateInstances, ETERNUS에 연결할 수 없습니다." #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port, protcol: %(protocol)s, target_port를 찾을 수 없습니다." #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay: %s(이)라는 스냅샷을 찾을 수 없음" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: 볼륨 id %s을(를) 찾을 수 없음" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: source-name을 지정해야 합니다." msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: 호스트-볼륨 연결에 대한 FC 연결 정보를 가져올 수 " "없습니다. FC 연결에 사용하는 호스트가 올바르게 구성되었습니까?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: 다음 볼륨의 I/O 그룹 %(gid)s에서 노드를 찾을 수 없" "습니다. 볼륨: %(vol)s." #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, 개시자: %(initiator)s, 대상: %(tgt)s, aglist: %(aglist)s, 스토리지 " "구성 서비스를 찾을 수 없습니다." #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, 컨트롤러 구성 서비스를 찾을 수 없습" "니다." #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats: 스토리지 풀 데이터를 가져오지 못했습니다." #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession 상태가 손상되" "었습니다." #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy 실패: %s 볼륨의 사본이 있습니다. 또 하나의 사본을 추가하면 한" "계값인 2개의 사본을 초과합니다." msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "add_vdisk_copy가 예상 풀에서 vdisk 사본 없이 시작되었습니다." #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants는 부울이어야 하지만 '%s'이(가) 수신되었습니다. " msgid "already created" msgstr "이미 작성됨" msgid "already_created" msgstr "already_created" msgid "attach snapshot from remote node" msgstr "원격 노드에서 스냅샷 연결" #, python-format msgid "attribute %s not lazy-loadable" msgstr "속성 %s이(가) lazy-loadable 속성이 아님" #, python-format msgid "" "backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "백업: %(vol_id)s이(가) %(vpath)s에서 %(bpath)s으로 백업 하드 링크를 제거하지 " "못했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to obtain backup success notification from " "server.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "백업: %(vol_id)s이(가) 서버에서 백업 성공 알림을 얻지 못했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "백업: %(vol_id)s이(가) %(bpath)s으로 백업 하드 링크를 제거하지 못했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "백업: %(vol_id)s이(가) %(bpath)s에서 dsmc를 실행하는 데 실패했습니다. \n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "backup: %(vol_id)s failed. %(path)s is not a file." msgstr "백업: %(vol_id)s이(가) 실패했습니다. %(path)s이(가) 파일이 아닙니다. " #, python-format msgid "" "backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " "regular files supported, actual file mode is %(vol_mode)s." msgstr "" "백업: %(vol_id)s이(가) 실패했습니다. %(path)s이(가) 예상치 못한 파일 유형입니" "다. 블록 또는 일반 파일이 지원되며 실제 파일 모드는 %(vol_mode)s입니다." #, python-format msgid "" "backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." msgstr "" "백업: %(vol_id)s이(가) 실패했습니다. %(path)s에서 볼륨에 대한 실제 경로를 가" "져올 수 없습니다." msgid "being attached by different mode" msgstr "다른 모드로 접속하는 중 " #, python-format msgid "call failed: %r" msgstr "호출 실패: %r" msgid "call failed: GARBAGE_ARGS" msgstr "호출 실패: GARBAGE_ARGS" msgid "call failed: PROC_UNAVAIL" msgstr "호출 실패: PROC_UNAVAIL" #, python-format msgid "call failed: PROG_MISMATCH: %r" msgstr "호출 실패: PROG_MISMATCH: %r" msgid "call failed: PROG_UNAVAIL" msgstr "호출 실패: PROG_UNAVAIL" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "lun 맵을 찾을 수 없음, ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "확장할 볼륨을 찾을 수 없음" msgid "can't handle both name and index in req" msgstr "요청의 이름 및 색인을 둘 다 처리할 수 없음" msgid "cannot understand JSON" msgstr "JSON을 이해할 수 없음" msgid "cannot understand XML" msgstr "XML을 이해할 수 없음" #, python-format msgid "cg-%s" msgstr "cg-%s" msgid "cgsnapshot assigned" msgstr "cgsnapshot가 지정됨" msgid "cgsnapshot changed" msgstr "cgsnapshot가 변경됨" msgid "cgsnapshots assigned" msgstr "cgsnapshots가 지정됨" msgid "cgsnapshots changed" msgstr "cgsnapshots가 변경됨" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: 인증에 비밀번호 또는 SSH 개인 키가 필요함: " "san_password 또는 san_private_key 옵션을 설정하십시오." msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: 시스템 ID를 판별할 수 없습니다." msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: 시스템 이름을 판별할 수 없습니다." msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist 오류." #, python-format msgid "clone depth exceeds limit of %s" msgstr "복제 깊이가 %s 한계를 초과함 " msgid "consistencygroup assigned" msgstr "consistencygroup이 지정됨" msgid "consistencygroup changed" msgstr "consistencygroup이 변경되지 않음" msgid "control_location must be defined" msgstr "control_location을 정의해야 함 " msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume, 소스 볼륨이 ETERNUS에 없습니다." #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume, 대상 볼륨 instancename: %(volume_instancename)s, 인스턴" "스 가져오기에 실패했습니다." msgid "create_cloned_volume: Source and destination size differ." msgstr "create_cloned_volume: 소스 및 대상 크기가 다릅니다. " #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: 소스 볼륨 %(src_vol)s 크기는 %(src_size)dGB이고 대상 볼" "륨 %(tgt_vol)s의 크기 %(tgt_size)dGB에 맞지 않습니다." msgid "" "create_consistencygroup_from_src must be creating from a CG snapshot, or a " "source CG." msgstr "" "create_consistencygroup_from_src는 CG 스냅샷이나 소스 CG에서 작성해야 합니다." msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src에서는 cgsnapshot 소스나 일관성 그룹 소스만 " "지원합니다. 여러 소스를 사용할 수 없습니다." msgid "" "create_consistencygroup_from_src supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src에서 cgsnapshot 소스나 일관성 그룹 소스를 지" "원합니다. 여러 소스를 사용할 수 없습니다." #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: 소스 vdisk %(src)s(%(src_id)s)이(가) 없습니다. " #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: 소스 vdisk %(src)s이(가) 없습니다. " msgid "create_host: Host name is not unicode or string." msgstr "create_host: 호스트 이름이 유니코드 또는 문자열이 아닙니다. " msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: 개시자 또는 wwpn이 제공되지 않았습니다." msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair 오류." #, python-format msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." msgstr "" "create_snapshot, eternus_pool: %(eternus_pool)s, 풀을 찾을 수 없습니다." #, python-format msgid "" "create_snapshot, snapshotname: %(snapshotname)s, source volume name: " "%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " "%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_snapshot, snapshotname: %(snapshotname)s, 소스 볼륨 이름: " "%(volumename)s, vol_instance.path: %(vol_instance)s, 대상 볼륨 이름: " "%(d_volumename)s, 풀: %(pool)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot, volumename: %(s_volumename)s, 소스 볼륨이 TERNUS에 없습니다." #, python-format msgid "" "create_snapshot, volumename: %(volumename)s, Replication Service not found." msgstr "" "create_snapshot, volumename: %(volumename)s, 복제 서비스를 찾을 수 없습니다." #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: 스냅샷에 대한 볼륨 상태는 \"사용 가능\" 또는 \"사용 중\"이어" "야 합니다. 올바르지 않은 상태는 %s입니다." msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: 소스 볼륨 가져오기에 실패했습니다." #, python-format msgid "" "create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " "ETERNUS." msgstr "" "create_volume, 볼륨: %(volume)s, EnumerateInstances, ETERNUS에 연결할 수 없습" "니다." #, python-format msgid "" "create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "create_volume, 볼륨: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, 스토리지 구성 서비스를 찾을 수 없습니다." #, python-format msgid "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, 리턴 " "코드: %(rc)lu, 오류: %(errordesc)s." msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot, 소스 볼륨이 ETERNUS에 없습니다." #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot, 대상 볼륨 instancename: " "%(volume_instancename)s, 인스턴스 가져오기에 실패했습니다." #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot: %(name)s 스냅샷이 없습니다. " #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: 볼륨 작성에 스냅샷 상태가 \"사용 가능\"해야 합니" "다. 올바르지 않은 상태는 %s입니다." msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "create_volume_from_snapshot: 소스 및 대상 크기가 다릅니다. " msgid "" "create_volume_from_snapshot: Volume size is different from snapshot based " "volume." msgstr "create_volume_from_snapshot: 볼륨 크기가 스냅샷 기반 볼륨과 다릅니다." msgid "deduplicated and auto tiering can't be both enabled." msgstr "중복 제거 및 자동 계층화를 둘 다 사용할 수는 없습니다." #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " "%(out)s\n" " stderr: %(err)s" msgstr "" "삭제: %(vol_id)s이(가) 올바르지 않은 인수 때문에 dsmc를 실행하는 데 실패함. " "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "삭제: %(vol_id)s이(가) dsmc를 실행하는 데 실패함. stdout: %(out)s\n" " stderr: %(err)s" msgid "delete_hypermetro error." msgstr "delete_hypermetro 오류." #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s ACL을 찾을 수 없습니다. 계속합니다." msgid "delete_replication error." msgstr "delete_replication 오류." #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "종속 볼륨을 갖는 %(snapshot_name)s 스냅샷 삭제 중" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "스냅샷을 갖는 %(volume_name)s 볼륨 삭제 중" msgid "detach snapshot from remote node" msgstr "원격 노드에서 스냅샷 분리" msgid "do_setup: No configured nodes." msgstr "do_setup: 구성된 노드가 없습니다." msgid "element is not a child" msgstr "요소가 하위가 아님" msgid "eqlx_cli_max_retries must be greater than or equal to 0" msgstr "eqlx_cli_max_retries는 0 이상이어야 함" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "오브젝트를 swift에 기록하는 중 오류 발생. %(etag)s swift에 있는 오브젝트의 " "MD5는 %(md5)s swift로 보낸 오브젝트의 MD5와 같지 않음" #, python-format msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." msgstr "extend_volume, eternus_pool: %(eternus_pool)s, 풀을 찾을 수 없습니다." #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume, 볼륨: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, 스토리지 구성 서비스를 찾을 수 없습니다." #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume, volumename: %(volumename)s, 리턴 코드: %(rc)lu, 오류: " "%(errordesc)s, PoolType: %(pooltype)s." #, python-format msgid "extend_volume, volumename: %(volumename)s, volume not found." msgstr "extend_volume, volumename: %(volumename)s, 볼륨을 찾을 수 없습니다." msgid "failed to create new_volume on destination host" msgstr "대상 호스트에 new_volume을 작성하지 못함 " msgid "fake" msgstr "fake" #, python-format msgid "file already exists at %s" msgstr "%s에 파일이 이미 있음 " msgid "fileno is not supported by SheepdogIOWrapper" msgstr "SheepdogIOWrapper는 fileno를 지원하지 않음" msgid "fileno() not supported by RBD()" msgstr "RBD()에서 fileno()를 지원하지 않음 " #, python-format msgid "filesystem %s does not exist in Nexenta Store appliance" msgstr "파일 시스템 %s이(가) Nexenta Store 어플라이언스에 없음" msgid "" "flashsystem_multihostmap_enabled is set to False, not allow multi host " "mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " "VDisk is already mapped to a host." msgstr "" "flashsystem_multihostmap_enabled가 False로 설정되었습니다. 멀티 호스트 맵핑" "이 허용되지 않습니다. CMMVC6071E VDisk가 이미 호스트에 맵핑되었으므로 VDisk " "대 호스트 맵핑이 작성되지 않았습니다." msgid "flush() not supported in this version of librbd" msgstr "이 버전의 librbd에서는 flush()가 지원되지 않음 " #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s 백업: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s 백업: %(backing_file)s" msgid "force delete" msgstr "삭제 강제 실행" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id 오류." msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id 오류." #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: 개시자 %(ini)s에 대한 대상 IP를 가져오지 못했습니다. 구성 " "파일을 확인하십시오. " #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: %s 볼륨의 속성을 가져오는 데 실패" msgid "glance_metadata changed" msgstr "glance_metadata가 변경됨" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode가 copy_on_write로 설정되었지만 %(vol)s과(와) %(img)s은" "(는)다른 파일 시스템에 속합니다." #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode가 copy_on_write로 설정되었지만 %(vol)s과(와) %(img)s은" "(는)다른 파일 세트에 속합니다." #, python-format msgid "" "hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " "cinder.conf" msgstr "" "hgst_group %(grp)s 및 hgst_user %(usr)s은(는) cinder.conf의 올바른 사용자/그" "룹에 맵핑되어야 함" #, python-format msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" msgstr "" "cinder.conf에서 지정된 hgst_net %(net)s을(를) 클러스터에서 찾을 수 없음" msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." msgstr "" "hgst_redundancy는 cinder.conf에서 0(비HA) 또는 1(HA)로 설정해야 합니다. " msgid "hgst_space_mode must be an octal/int in cinder.conf" msgstr "hgst_space_mode는 cinder.conf의 octal/int여야 함" #, python-format msgid "hgst_storage server %(svr)s not of format :" msgstr "hgst_storage 서버 %(svr)s의 형식이 :가 아님" msgid "hgst_storage_servers must be defined in cinder.conf" msgstr "hgst_storage_servers가 cinder.conf에서 정의되어야 함" msgid "" "http service may have been abruptly disabled or put to maintenance state in " "the middle of this operation." msgstr "" "http 서비스가 갑자기 사용 안함으로 설정되었거나 이 조작을 수행하는 중에 유지" "보수 상태로 변경된 경우일 수 있습니다." msgid "id cannot be None" msgstr "id는 None일 수 없음" #, python-format msgid "image %s not found" msgstr "%s 이미지를 찾을 수 없음 " #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection, 볼륨: %(volume)s, 볼륨을 찾을 수 없습니다." #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection: 볼륨 %s에 대한 속성을 가져오지 못했습니다. " #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection: 볼륨 %s에 대한 볼륨 속성이 누락되었습니다. " #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: 볼륨 %(vol)s에 대한 I/O 그룹 %(gid)s에서 노드를 찾을 " "수 없습니다. " #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s이(가) 정의되지 않았습니다." #, python-format msgid "invalid user '%s'" msgstr "사용자 '%s'이(가) 올바르지 않음 " #, python-format msgid "iscsi portal, %s, not found" msgstr "iscsi 포털, %s을(를) 찾을 수 없음" msgid "" "iscsi_ip_address must be set in config file when using protocol 'iSCSI'." msgstr "" "프로토콜 'iSCSI' 사용 시 구성 파일에서 iscsi_ip_address를 설정해야 합니다. " msgid "iscsiadm execution failed. " msgstr "iscsiadm 실행에 실패했습니다." #, python-format msgid "key manager error: %(reason)s" msgstr "주요 관리자 오류: %(reason)s" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key가 정의되지 않음" msgid "limit param must be an integer" msgstr "limit 매개변수는 정수여야 함" msgid "limit param must be positive" msgstr "limit 매개변수가 양수여야 함" msgid "" "manage_existing cannot manage a volume connected to hosts. Please disconnect " "this volume from existing hosts before importing" msgstr "" "manage_existing은 호스트에 연결된 볼륨을 관리할 수 없습니다. 가져오기 전에 " "이 볼륨과 기존 호스트의 연결을 끊으십시오. " msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing에는 기존 볼륨을 식별하기 위한 'name' 키가 필요합니다. " #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: 볼륨 %(vol)s에서 기존 재생 %(ss)s을(를) 관리하는 " "중 오류 발생" #, python-format msgid "marker [%s] not found" msgstr "마커 [%s]을(를) 찾을 수 없음" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp에 따옴표 누락 %s" #, python-format msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "migration_policy는 'on-demand' 또는 'never'이어야 함. 패스됨: %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "%(vol)s 볼륨에서 mkfs가 실패했습니다. 오류 메시지: %(err)s." msgid "mock" msgstr "mock" msgid "mount.glusterfs is not installed" msgstr "mount.glusterfs가 설치되지 않음" #, python-format msgid "multiple resources with name %s found by drbdmanage" msgstr "drbdmanage에서 이름이 %s인 다중 자원을 발견함" #, python-format msgid "multiple resources with snapshot ID %s found" msgstr "스냅샷 ID가 %s인 다중 자원이 발견됨" msgid "name cannot be None" msgstr "이름은 None일 수 없음" #, python-format msgid "naviseccli_path: Could not find NAVISECCLI tool %(path)s." msgstr "naviseccli_path: NAVISECCLI 도구 %(path)s을(를) 찾을 수 없습니다." #, python-format msgid "no REPLY but %r" msgstr "REPLY가 아니라 %r" #, python-format msgid "no snapshot with id %s found in drbdmanage" msgstr "drbdmanage에서 ID가 %s인 스냅샷을 찾을 수 없음" #, python-format msgid "not exactly one snapshot with id %s" msgstr "ID가 %s인 스냅샷이 정확하게 하나가 아님" #, python-format msgid "not exactly one volume with id %s" msgstr "ID가 %s인 볼륨이 정확하게 하나가 아님" #, python-format msgid "obj missing quotes %s" msgstr "오브젝트에 따옴표 누락 %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled가 해제되지 않았습니다. " msgid "progress must be an integer percentage" msgstr "진행상태는 정수 백분율이어야 함" msgid "promote_replica not implemented." msgstr "promote_replica가 구현되지 않았습니다." msgid "provider must be defined" msgstr "제공자를 정의해야 함 " #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "이 볼륨 드라이버에서는 qemu-img %(minimum_version)s 이상이 필요합니다. 현재 " "qemu-img 버전: %(current_version)s" #, python-format msgid "" "qemu-img is not installed and image is of type %s. Only RAW images can be " "used if qemu-img is not installed." msgstr "" "qemu-img가 설치되지 않고 이미지가 %s 유형입니다. qemu-img가 설치되지 않은 경" "우 RAW 이미지만 사용할 수 있습니다. " msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img가 설치되지 않고 디스크 형식이 지정되지 않았습니다. qemu-img가 설치되" "지 않은 경우 RAW 이미지만 사용할 수 있습니다. " msgid "rados and rbd python libraries not found" msgstr "rados 및 rbd python 라이브러리를 찾을 수 없음" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted는 'no', 'yes', 'only' 중에서 선택 가능하며, %r은(는) 불가능함" #, python-format msgid "replication_device should be configured on backend: %s." msgstr "백엔드에서 replication_device를 구성해야 함: %s." #, python-format msgid "replication_device with backend_id [%s] is missing." msgstr "backend_id [%s]인 replication_device가 누락되었습니다." #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover에 실패했습니다. %s을(를) 찾을 수 없습니다." msgid "replication_failover failed. Backend not configured for failover" msgstr "" "replication_failover에 실패했습니다. 백엔드를 장애 복구하도록 구성되지 않았습" "니다." #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc due to invalid arguments on " "%(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "복원: %(vol_id)s이(가) %(bpath)s으로 백업 하드 링크를 제거하지 못했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" "stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "복원: %(vol_id)s이(가) %(bpath)s에서 dsmc를 실행하는 데 실패했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "restore: %(vol_id)s failed.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "복원: %(vol_id)s이(가) 실패했습니다.\n" "stdout: %(out)s\n" " stderr: %(err)s." msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup이 중단되었습니다. 실제 오브젝트 목록이 메타데이터에 저장된 오" "브젝트 목록과일치하지 않습니다. " msgid "root element selecting a list" msgstr "목록을 선택하는 루트 요소" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb에 멤버 %s이(가) 누락됨: 새 python-rtslib-fb가 필요할 수 있습니다. " msgid "san_ip is not set." msgstr "san_ip가 설정되지 않았습니다. " msgid "san_ip must be set" msgstr "san_ip가 설정되어야 함" msgid "san_ip: Mandatory field configuration. san_ip is not set." msgstr "san_ip: 필수 필드 구성입니다. san_ip가 설정되지 않았습니다." msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "cinder.conf의 Datera 드라이버에 대해 san_login 및/또는 san_password가설정되" "지 않았습니다. 이 정보를 설정하고 cinder-volume 서비스를 다시시작하십시오. " msgid "serve() can only be called once" msgstr "serve()는 한 번만 호출할 수 있음" msgid "service not found" msgstr "서비스를 찾을 수 없음" msgid "snapshot does not exist" msgstr "스냅샷이 없음" #, python-format msgid "snapshot id:%s not found" msgstr "스냅샷 id:%s을(를) 찾을 수 없음" #, python-format msgid "snapshot-%s" msgstr "스냅샷-%s" msgid "snapshots assigned" msgstr "스냅샷이 지정됨" msgid "snapshots changed" msgstr "스냅샷이 변경됨" #, python-format msgid "source vol id:%s not found" msgstr "소스 볼륨 id:%s을(를) 찾을 수 없음" #, python-format msgid "source volume id:%s is not replicated" msgstr "소스 볼륨 id:%s을(를) 복제할 수 없음" msgid "source-name cannot be empty." msgstr "source-name은 비어 있지 않아야 합니다." msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "source-name 형식은 'vmdk_path@vm_inventory_path'이어야 합니다." #, python-format msgid "status must be %s and" msgstr "상태는 %s이어야 하며" msgid "status must be available" msgstr "상태가 사용 가능해야 함" msgid "stop_hypermetro error." msgstr "stop_hypermetro 오류." msgid "subclasses must implement construct()!" msgstr "서브클래스가 construct()를 구현해야 합니다!" msgid "sudo failed, continuing as if nothing happened" msgstr "sudo에 실패, 무시하고 계속합니다. " msgid "sync_hypermetro error." msgstr "sync_hypermetro 오류." msgid "sync_replica not implemented." msgstr "sync_replica가 구현되지 않았습니다." #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli가 설치되지 않아 기본 디렉토리(%(default_path)s)를 작성할 수 없음: " "%(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "" "terminate_connection: 커넥터에서 호스트 이름을 가져오는 데 실패했습니다." msgid "timeout creating new_volume on destination host" msgstr "대상 호스트에 new_volume을 작성하는 중에 제한시간이 초과됨 " msgid "too many body keys" msgstr "본문 키가 너무 많음" #, python-format msgid "umount: %s: not mounted" msgstr "umount: %s: 마운트되지 않았음" #, python-format msgid "umount: %s: target is busy" msgstr "umount: %s: 대상이 사용 중임" msgid "umount: : some other error" msgstr "umount: : 기타 특정 오류" msgid "umount: : target is busy" msgstr "umount: : 대상이 사용 중임" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: %s(이)라는 스냅샷을 찾을 수 없음" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: 볼륨 id %s을(를) 찾을 수 없음" #, python-format msgid "unrecognized argument %s" msgstr "인식되지 않는 인수 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "지원되지 않는 압축 알고리즘: %s" msgid "valid iqn needed for show_target" msgstr "show_target에 필요한 올바른 iqn" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s이(가) 정의되지 않았습니다. " msgid "vmemclient python library not found" msgstr "vmemclient python 라이브러리를 찾을 수 없음" #, python-format msgid "volume %s not found in drbdmanage" msgstr "drbdmanage에서 볼륨 %s을(를) 찾을 수 없음" msgid "volume assigned" msgstr "볼륨 지정됨" msgid "volume changed" msgstr "볼륨 변경됨" msgid "volume does not exist" msgstr "볼륨이 없음" msgid "volume is already attached" msgstr "볼륨이 이미 접속됨" msgid "volume is not local to this node" msgstr "볼륨이 이 노드에 대해 로컬이 아님" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "볼륨 크기 %(volume_size)d은(는) %(size)d 크기의 백업을 복원하기에 너무 작습니" "다. " #, python-format msgid "volume size %d is invalid." msgstr "볼륨 크기 %d이(가) 올바르지 않음" msgid "volume_type cannot be None" msgstr "volume_type은 None일 수 없음" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "일관성 그룹에 볼륨을 작성할 때 volume_type이 제공되어야 합니다." msgid "volume_type_id cannot be None" msgstr "volume_type_id는 None일 수 없음" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "일관성 그룹 %(name)s 작성에 volume_types가 제공되어야 합니다." #, python-format msgid "volume_types must be provided to create consistency group %s." msgstr "일관성 그룹 %s 작성에 volume_types가 제공되어야 합니다." msgid "volumes assigned" msgstr "볼륨이 지정됨" msgid "volumes changed" msgstr "볼륨이 변경됨" #, python-format msgid "wait_for_condition: %s timed out." msgstr "wait_for_condition: %s 제한시간 초과되었습니다. " #, python-format msgid "" "zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " "value is: %s." msgstr "" "zfssa_manage_policy 특성을 'strict' 또는 'loose'로 설정해야 합니다. 현재 값: " "%s." msgid "{} is not a valid option." msgstr "{}는 올바른 옵션이 아닙니다." cinder-8.0.0/cinder/locale/cinder-log-info.pot0000664000567000056710000032556512701406257022353 0ustar jenkinsjenkins00000000000000# Translations template for cinder. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the cinder project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: cinder 8.0.0.0rc2.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-23 06:37+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: cinder/coordination.py:97 msgid "Coordination backend started successfully." msgstr "" #: cinder/coordination.py:162 msgid "Reconnecting to coordination backend." msgstr "" #: cinder/coordination.py:175 msgid "Reconnected to coordination backend." msgstr "" #: cinder/manager.py:138 msgid "Resetting cached RPC version pins." msgstr "" #: cinder/rpc.py:205 #, python-format msgid "" "Automatically selected %(binary)s RPC version %(version)s as minimum " "service version." msgstr "" #: cinder/rpc.py:218 #, python-format msgid "" "Automatically selected %(binary)s objects version %(version)s as minimum " "service version." msgstr "" #: cinder/service.py:147 #, python-format msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" #: cinder/ssh_utils.py:82 #, python-format msgid "" "Secondary ssh hosts key file %(kwargs)s will be loaded along with " "%(conf)s from /etc/cinder.conf." msgstr "" #: cinder/api/extensions.py:184 msgid "Initializing extension manager." msgstr "" #: cinder/api/extensions.py:199 #, python-format msgid "Loaded extension: %s" msgstr "" #: cinder/api/contrib/backups.py:182 #, python-format msgid "Delete backup with id: %s" msgstr "" #: cinder/api/contrib/backups.py:267 #, python-format msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" #: cinder/api/contrib/backups.py:303 #, python-format msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" #: cinder/api/contrib/cgsnapshots.py:112 #, python-format msgid "Delete cgsnapshot with id: %s" msgstr "" #: cinder/api/contrib/cgsnapshots.py:178 #, python-format msgid "Creating cgsnapshot %(name)s." msgstr "" #: cinder/api/contrib/consistencygroups.py:172 #, python-format msgid "Delete consistency group with id: %s" msgstr "" #: cinder/api/contrib/consistencygroups.py:234 #, python-format msgid "Creating consistency group %(name)s." msgstr "" #: cinder/api/contrib/consistencygroups.py:285 #, python-format msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." msgstr "" #: cinder/api/contrib/consistencygroups.py:290 #, python-format msgid "" "Creating consistency group %(name)s from source consistency group " "%(source_cgid)s." msgstr "" #: cinder/api/contrib/consistencygroups.py:347 #, python-format msgid "" "Updating consistency group %(id)s with name %(name)s description: " "%(description)s add_volumes: %(add_volumes)s remove_volumes: " "%(remove_volumes)s." msgstr "" #: cinder/api/contrib/hosts.py:187 #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "" #: cinder/api/contrib/qos_specs_manage.py:80 msgid "Unable to parse XML input." msgstr "" #: cinder/api/contrib/snapshot_actions.py:90 #, python-format msgid "Updating snapshot %(id)s with info %(dict)s" msgstr "" #: cinder/api/contrib/snapshot_unmanage.py:51 #, python-format msgid "Unmanage snapshot with id: %s" msgstr "" #: cinder/api/contrib/volume_transfer.py:170 #, python-format msgid "Creating transfer of volume %s" msgstr "" #: cinder/api/contrib/volume_transfer.py:203 #, python-format msgid "Accepting transfer %s" msgstr "" #: cinder/api/contrib/volume_transfer.py:224 #, python-format msgid "Delete transfer with id: %s" msgstr "" #: cinder/api/contrib/volume_unmanage.py:54 #, python-format msgid "Unmanage volume with id: %s" msgstr "" #: cinder/api/middleware/fault.py:57 cinder/api/openstack/wsgi.py:1158 #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "" #: cinder/api/openstack/wsgi.py:825 #, python-format msgid "Fault thrown: %s" msgstr "" #: cinder/api/openstack/wsgi.py:828 #, python-format msgid "HTTP exception thrown: %s" msgstr "" #: cinder/api/openstack/wsgi.py:1033 #, python-format msgid "%(method)s %(url)s" msgstr "" #: cinder/api/openstack/wsgi.py:1161 #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "" #: cinder/api/v1/snapshots.py:116 cinder/api/v2/snapshots.py:92 #, python-format msgid "Delete snapshot with id: %s" msgstr "" #: cinder/api/v1/snapshots.py:180 cinder/api/v2/snapshots.py:170 #, python-format msgid "Create snapshot from volume %s" msgstr "" #: cinder/api/v1/volumes.py:119 #, python-format msgid "vol=%s" msgstr "" #: cinder/api/v1/volumes.py:247 cinder/api/v2/volumes.py:191 #, python-format msgid "Delete volume with id: %s" msgstr "" #: cinder/api/v1/volumes.py:374 cinder/api/v2/volumes.py:388 #, python-format msgid "Create volume of %s GB" msgstr "" #: cinder/backup/api.py:421 #, python-format msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." msgstr "" #: cinder/backup/api.py:448 #, python-format msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" #: cinder/backup/chunkeddriver.py:638 msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" #: cinder/backup/driver.py:67 #, python-format msgid "Value with type=%s is not serializable" msgstr "" #: cinder/backup/driver.py:87 cinder/backup/driver.py:114 #: cinder/backup/driver.py:138 #, python-format msgid "Unable to serialize field '%s' - excluding from backup" msgstr "" #: cinder/backup/manager.py:133 msgid "Cleaning up incomplete backup operations." msgstr "" #: cinder/backup/manager.py:155 #, python-format msgid "" "Resetting volume %(vol_id)s to previous status %(status)s (was backing-" "up)." msgstr "" #: cinder/backup/manager.py:163 #, python-format msgid "setting volume %s to error_restoring (was restoring-backup)." msgstr "" #: cinder/backup/manager.py:170 #, python-format msgid "Resetting backup %s to error (was creating)." msgstr "" #: cinder/backup/manager.py:179 #, python-format msgid "Resetting backup %s to available (was restoring)." msgstr "" #: cinder/backup/manager.py:188 #, python-format msgid "Resuming delete on backup: %s." msgstr "" #: cinder/backup/manager.py:270 #, python-format msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" #: cinder/backup/manager.py:328 #, python-format msgid "Create backup finished. backup: %s." msgstr "" #: cinder/backup/manager.py:366 #, python-format msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" #: cinder/backup/manager.py:399 #, python-format msgid "" "Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: " "%(backup_id)s, size: %(backup_size)d, continuing with restore." msgstr "" #: cinder/backup/manager.py:434 #, python-format msgid "" "Restore backup finished, backup %(backup_id)s restored to volume " "%(volume_id)s." msgstr "" #: cinder/backup/manager.py:465 #, python-format msgid "Delete backup started, backup: %s." msgstr "" #: cinder/backup/manager.py:529 #, python-format msgid "Delete backup finished, backup %s deleted." msgstr "" #: cinder/backup/manager.py:555 #, python-format msgid "Export record started, backup: %s." msgstr "" #: cinder/backup/manager.py:589 #, python-format msgid "Export record finished, backup %s exported." msgstr "" #: cinder/backup/manager.py:608 #, python-format msgid "Import record started, backup_url: %s." msgstr "" #: cinder/backup/manager.py:703 #, python-format msgid "Import record id %s metadata from driver finished." msgstr "" #: cinder/backup/manager.py:716 #, python-format msgid "Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." msgstr "" #: cinder/backup/manager.py:722 #, python-format msgid "Backup service: %s." msgstr "" #: cinder/backup/drivers/ceph.py:178 msgid "" "RBD striping not supported - ignoring configuration settings for rbd " "striping" msgstr "" #: cinder/backup/drivers/ceph.py:435 #, python-format msgid "" "Backup base image of volume %(volume)s still has %(snapshots)s snapshots " "so skipping base image delete." msgstr "" #: cinder/backup/drivers/ceph.py:441 #, python-format msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." msgstr "" #: cinder/backup/drivers/ceph.py:450 #, python-format msgid "" "Backup image of volume %(volume)s is busy, retrying %(retries)s more " "time(s) in %(delay)ss." msgstr "" #: cinder/backup/drivers/ceph.py:1070 #, python-format msgid "" "No restore point found for backup='%(backup)s' of volume %(volume)s " "although base image is found - forcing full copy." msgstr "" #: cinder/backup/drivers/ceph.py:1181 #, python-format msgid "" "Delete of backup '%(backup)s' for volume '%(volume)s' finished with " "warning." msgstr "" #: cinder/brick/local_dev/lvm.py:287 #, python-format msgid "" "Logical Volume not found when querying LVM info. (vg_name=%(vg)s, " "lv_name=%(lv)s" msgstr "" #: cinder/db/sqlalchemy/api.py:4259 #, python-format msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" msgstr "" #: cinder/db/sqlalchemy/api.py:4274 #, python-format msgid "Deleted %(row)d rows from table=%(table)s" msgstr "" #: cinder/image/image_utils.py:132 #, python-format msgid "" "The image was successfully converted, but image size is unavailable. src " "%(src)s, dest %(dest)s. %(error)s" msgstr "" #: cinder/image/image_utils.py:148 #, python-format msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" msgstr "" #: cinder/image/image_utils.py:189 #, python-format msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" msgstr "" #: cinder/scheduler/base_filter.py:91 #, python-format msgid "Filter %(cls_name)s returned %(obj_len)d host(s)" msgstr "" #: cinder/scheduler/host_manager.py:483 #, python-format msgid "Removing non-active host: %(host)s from scheduler cache." msgstr "" #: cinder/transfer/api.py:115 #, python-format msgid "Generating transfer record for volume %s" msgstr "" #: cinder/transfer/api.py:237 #, python-format msgid "Volume %s has been transferred." msgstr "" #: cinder/volume/api.py:173 msgid "Availability Zones retrieved successfully." msgstr "" #: cinder/volume/api.py:329 msgid "Volume created successfully." msgstr "" #: cinder/volume/api.py:366 cinder/volume/api.py:435 msgid "Delete volume request issued successfully." msgstr "" #: cinder/volume/api.py:441 msgid "Unable to update volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:452 cinder/volume/api.py:455 msgid "Volume updated successfully." msgstr "" #: cinder/volume/api.py:473 msgid "Volume info retrieved successfully." msgstr "" #: cinder/volume/api.py:522 msgid "Get all volumes completed successfully." msgstr "" #: cinder/volume/api.py:530 msgid "Snapshot retrieved successfully." msgstr "" #: cinder/volume/api.py:538 msgid "Volume retrieved successfully." msgstr "" #: cinder/volume/api.py:559 msgid "Get all snapshots completed successfully." msgstr "" #: cinder/volume/api.py:576 msgid "Reserve volume completed successfully." msgstr "" #: cinder/volume/api.py:588 msgid "Unreserve volume completed successfully." msgstr "" #: cinder/volume/api.py:608 msgid "Begin detaching volume completed successfully." msgstr "" #: cinder/volume/api.py:615 msgid "Roll detaching of volume completed successfully." msgstr "" #: cinder/volume/api.py:622 msgid "Unable to attach volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:642 cinder/volume/manager.py:1036 msgid "Attach volume completed successfully." msgstr "" #: cinder/volume/api.py:649 msgid "Unable to detach volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:655 cinder/volume/manager.py:1132 msgid "Detach volume completed successfully." msgstr "" #: cinder/volume/api.py:662 msgid "" "Unable to initialize the connection for volume, because it is in " "maintenance." msgstr "" #: cinder/volume/api.py:671 cinder/volume/manager.py:1518 msgid "Initialize volume connection completed successfully." msgstr "" #: cinder/volume/api.py:681 cinder/volume/manager.py:1541 msgid "Terminate volume connection completed successfully." msgstr "" #: cinder/volume/api.py:688 msgid "Unable to accept transfer for volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:696 cinder/volume/manager.py:1590 msgid "Transfer volume completed successfully." msgstr "" #: cinder/volume/api.py:718 cinder/volume/api.py:850 msgid "Unable to create the snapshot for volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:920 msgid "Snapshot create request issued successfully." msgstr "" #: cinder/volume/api.py:929 msgid "Snapshot force create request issued successfully." msgstr "" #: cinder/volume/api.py:955 msgid "Snapshot delete request issued successfully." msgstr "" #: cinder/volume/api.py:967 msgid "Get volume metadata completed successfully." msgstr "" #: cinder/volume/api.py:976 msgid "Unable to delete the volume metadata, because it is in maintenance." msgstr "" #: cinder/volume/api.py:982 msgid "Delete volume metadata completed successfully." msgstr "" #: cinder/volume/api.py:1014 msgid "Unable to update the metadata for volume, because it is in maintenance." msgstr "" #: cinder/volume/api.py:1027 msgid "Update volume metadata completed successfully." msgstr "" #: cinder/volume/api.py:1035 msgid "Get volume admin metadata completed successfully." msgstr "" #: cinder/volume/api.py:1055 msgid "Update volume admin metadata completed successfully." msgstr "" #: cinder/volume/api.py:1062 msgid "Get snapshot metadata completed successfully." msgstr "" #: cinder/volume/api.py:1070 msgid "Delete snapshot metadata completed successfully." msgstr "" #: cinder/volume/api.py:1096 msgid "Update snapshot metadata completed successfully." msgstr "" #: cinder/volume/api.py:1101 msgid "Get snapshot metadata value not implemented." msgstr "" #: cinder/volume/api.py:1118 msgid "Get volume image-metadata completed successfully." msgstr "" #: cinder/volume/api.py:1148 msgid "Force upload to image is disabled, Force option will be ignored." msgstr "" #: cinder/volume/api.py:1190 cinder/volume/manager.py:1323 msgid "Copy volume to image completed successfully." msgstr "" #: cinder/volume/api.py:1240 msgid "Extend volume request issued successfully." msgstr "" #: cinder/volume/api.py:1333 msgid "Migrate volume request issued successfully." msgstr "" #: cinder/volume/api.py:1372 msgid "Migrate volume completion issued successfully." msgstr "" #: cinder/volume/api.py:1387 msgid "Update readonly setting on volume completed successfully." msgstr "" #: cinder/volume/api.py:1513 msgid "Retype volume request issued successfully." msgstr "" #: cinder/volume/api.py:1568 msgid "Manage volume request issued successfully." msgstr "" #: cinder/volume/manager.py:265 msgid "" "Service not found for updating active_backend_id, assuming default for " "driver init." msgstr "" #: cinder/volume/manager.py:312 #, python-format msgid "Image-volume cache enabled for host %(host)s." msgstr "" #: cinder/volume/manager.py:315 #, python-format msgid "Image-volume cache disabled for host %(host)s." msgstr "" #: cinder/volume/manager.py:373 msgid "Determined volume DB was empty at startup." msgstr "" #: cinder/volume/manager.py:376 msgid "Determined volume DB was not empty at startup." msgstr "" #: cinder/volume/manager.py:421 #, python-format msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" #: cinder/volume/manager.py:508 msgid "Resume volume delete completed successfully." msgstr "" #: cinder/volume/manager.py:513 msgid "Driver initialization completed successfully." msgstr "" #: cinder/volume/manager.py:518 #, python-format msgid "" "Initializing RPC dependent components of volume driver %(driver_name)s " "(%(version)s)" msgstr "" #: cinder/volume/manager.py:543 msgid "Driver post RPC initialization completed successfully." msgstr "" #: cinder/volume/manager.py:655 msgid "Created volume successfully." msgstr "" #: cinder/volume/manager.py:809 msgid "Deleted volume successfully." msgstr "" #: cinder/volume/manager.py:875 msgid "Create snapshot completed successfully" msgstr "" #: cinder/volume/manager.py:942 msgid "Delete snapshot completed successfully" msgstr "" #: cinder/volume/manager.py:1052 cinder/volume/manager.py:1077 msgid "Volume detach called, but volume not attached." msgstr "" #: cinder/volume/manager.py:1556 msgid "Remove volume export completed successfully." msgstr "" #: cinder/volume/manager.py:1807 #, python-format msgid "" "Couldn't find the temporary volume %(vol)s in the database. There is no " "need to clean up this volume." msgstr "" #: cinder/volume/manager.py:1820 #, python-format msgid "" "Couldn't find destination volume %(vol)s in the database. The entry might" " be successfully deleted during migration completion phase." msgstr "" #: cinder/volume/manager.py:1858 #, python-format msgid "" "migrate_volume_completion is cleaning up an error for volume %(vol1)s " "(temporary volume %(vol2)s" msgstr "" #: cinder/volume/manager.py:1917 msgid "Complete-Migrate volume completed successfully." msgstr "" #: cinder/volume/manager.py:1983 msgid "Migrate volume completed successfully." msgstr "" #: cinder/volume/manager.py:2158 msgid "Extend volume completed successfully." msgstr "" #: cinder/volume/manager.py:2266 #, python-format msgid "Volume %s: retyped successfully" msgstr "" #: cinder/volume/manager.py:2327 msgid "Retype volume completed successfully." msgstr "" #: cinder/volume/manager.py:2364 msgid "Manage existing volume completed successfully." msgstr "" #: cinder/volume/manager.py:2398 msgid "Promote volume replica completed successfully." msgstr "" #: cinder/volume/manager.py:2462 #, python-format msgid "Consistency group %s: creating" msgstr "" #: cinder/volume/manager.py:2487 #, python-format msgid "Consistency group %s: created successfully" msgstr "" #: cinder/volume/manager.py:2493 msgid "Create consistency group completed successfully." msgstr "" #: cinder/volume/manager.py:2626 #, python-format msgid "Create consistency group from source-%(source)s completed successfully." msgstr "" #: cinder/volume/manager.py:2861 msgid "Delete consistency group completed successfully." msgstr "" #: cinder/volume/manager.py:3005 msgid "Update consistency group completed successfully." msgstr "" #: cinder/volume/manager.py:3015 #, python-format msgid "Cgsnapshot %s: creating." msgstr "" #: cinder/volume/manager.py:3105 #, python-format msgid "cgsnapshot %s: created successfully" msgstr "" #: cinder/volume/manager.py:3117 #, python-format msgid "cgsnapshot %s: deleting" msgstr "" #: cinder/volume/manager.py:3213 #, python-format msgid "cgsnapshot %s: deleted successfully" msgstr "" #: cinder/volume/manager.py:3364 msgid "Failed over to replication target successfully." msgstr "" #: cinder/volume/manager.py:3397 msgid "Set backend status to frozen successfully." msgstr "" #: cinder/volume/manager.py:3430 msgid "Thawed backend successfully." msgstr "" #: cinder/volume/utils.py:365 #, python-format msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" msgstr "" #: cinder/volume/utils.py:438 #, python-format msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." msgstr "" #: cinder/volume/utils.py:488 #, python-format msgid "Performing secure delete on volume: %s" msgstr "" #: cinder/volume/utils.py:516 #, python-format msgid "Elapsed time for clear volume: %.2f sec" msgstr "" #: cinder/volume/drivers/block_device.py:75 #, python-format msgid "Creating %(volume)s on %(device)s" msgstr "" #: cinder/volume/drivers/block_device.py:129 #, python-format msgid "Creating clone of volume: %s." msgstr "" #: cinder/volume/drivers/block_device.py:240 #, python-format msgid "Creating volume snapshot: %s." msgstr "" #: cinder/volume/drivers/block_device.py:255 #, python-format msgid "Creating volume %s from snapshot." msgstr "" #: cinder/volume/drivers/datera.py:249 #, python-format msgid "" "Tried to delete volume %s, but it was not found in the Datera cluster. " "Continuing with delete." msgstr "" #: cinder/volume/drivers/datera.py:282 #, python-format msgid "" "Tried to detach volume %s, but it was not found in the Datera cluster. " "Continuing with detach." msgstr "" #: cinder/volume/drivers/datera.py:317 #, python-format msgid "" "Tried to delete snapshot %s, but was not found in Datera cluster. " "Continuing with delete." msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:720 cinder/volume/drivers/lvm.py:437 #, python-format msgid "Successfully deleted snapshot: %s" msgstr "" #: cinder/volume/drivers/drbdmanagedrv.py:979 #, python-format msgid "DRBD connection for %s already removed" msgstr "" #: cinder/volume/drivers/eqlx.py:260 #, python-format msgid "EQL-driver: executing \"%s\"." msgstr "" #: cinder/volume/drivers/eqlx.py:408 #, python-format msgid "EQL-driver: Setup is complete, group IP is \"%s\"." msgstr "" #: cinder/volume/drivers/glusterfs.py:122 #, python-format msgid "%s is already umounted" msgstr "" #: cinder/volume/drivers/glusterfs.py:190 cinder/volume/drivers/remotefs.py:235 #, python-format msgid "casted to %s" msgstr "" #: cinder/volume/drivers/lvm.py:311 msgid "Enabling LVM thin provisioning by default because a thin pool exists." msgstr "" #: cinder/volume/drivers/lvm.py:315 msgid "Enabling LVM thin provisioning by default because no LVs exist." msgstr "" #: cinder/volume/drivers/lvm.py:422 #, python-format msgid "Successfully deleted volume: %s" msgstr "" #: cinder/volume/drivers/lvm.py:484 cinder/volume/drivers/nexenta/nfs.py:409 #: cinder/volume/drivers/nexenta/ns5/nfs.py:318 #, python-format msgid "Creating clone of volume: %s" msgstr "" #: cinder/volume/drivers/nfs.py:303 cinder/volume/drivers/smbfs.py:481 #: cinder/volume/drivers/vzstorage.py:231 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:706 #, python-format msgid "Extending volume %s." msgstr "" #: cinder/volume/drivers/nfs.py:311 cinder/volume/drivers/smbfs.py:488 #: cinder/volume/drivers/vzstorage.py:238 #, python-format msgid "Resizing file to %sG..." msgstr "" #: cinder/volume/drivers/nimble.py:125 #, python-format msgid "Exploring array subnet label %s" msgstr "" #: cinder/volume/drivers/nimble.py:129 #, python-format msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" msgstr "" #: cinder/volume/drivers/nimble.py:136 #, python-format msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" msgstr "" #: cinder/volume/drivers/nimble.py:143 #, python-format msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" msgstr "" #: cinder/volume/drivers/nimble.py:149 #, python-format msgid "Discovery ip %s is used on mgmt+data subnet" msgstr "" #: cinder/volume/drivers/nimble.py:196 #, python-format msgid "vol_name=%(name)s provider_location=%(loc)s" msgstr "" #: cinder/volume/drivers/nimble.py:328 #, python-format msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" msgstr "" #: cinder/volume/drivers/nimble.py:419 #, python-format msgid "Entering unmanage_volume volume = %s" msgstr "" #: cinder/volume/drivers/nimble.py:439 #, python-format msgid "Creating initiator group %(grp)s with initiator %(iname)s" msgstr "" #: cinder/volume/drivers/nimble.py:452 #, python-format msgid "igroup %(grp)s found for initiator %(iname)s" msgstr "" #: cinder/volume/drivers/nimble.py:457 #, python-format msgid "No igroup found for initiator %s" msgstr "" #: cinder/volume/drivers/nimble.py:462 #, python-format msgid "" "Entering initialize_connection volume=%(vol)s connector=%(conn)s " "location=%(loc)s" msgstr "" #: cinder/volume/drivers/nimble.py:473 #, python-format msgid "Initiator group name is %(grp)s for initiator %(iname)s" msgstr "" #: cinder/volume/drivers/nimble.py:491 #, python-format msgid "" "Entering terminate_connection volume=%(vol)s connector=%(conn)s " "location=%(loc)s." msgstr "" #: cinder/volume/drivers/nimble.py:534 msgid "Session might have expired. Trying to relogin" msgstr "" #: cinder/volume/drivers/nimble.py:596 #, python-format msgid "Successful login by user %s" msgstr "" #: cinder/volume/drivers/nimble.py:693 #, python-format msgid "Successfully create volume %s" msgstr "" #: cinder/volume/drivers/nimble.py:712 #, python-format msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" msgstr "" #: cinder/volume/drivers/nimble.py:727 #, python-format msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" msgstr "" #: cinder/volume/drivers/nimble.py:741 #, python-format msgid "Getting volume information for vol_name=%s" msgstr "" #: cinder/volume/drivers/nimble.py:749 #, python-format msgid "Successfully got volume information for volume %s" msgstr "" #: cinder/volume/drivers/nimble.py:757 #, python-format msgid "Setting volume %(vol)s to online_flag %(flag)s" msgstr "" #: cinder/volume/drivers/nimble.py:767 #, python-format msgid "Setting snapshot %(snap)s to online_flag %(flag)s" msgstr "" #: cinder/volume/drivers/nimble.py:778 #, python-format msgid "Dissociating volume %s " msgstr "" #: cinder/volume/drivers/nimble.py:787 #, python-format msgid "Deleting volume %s " msgstr "" #: cinder/volume/drivers/nimble.py:803 cinder/volume/drivers/tegile.py:251 #, python-format msgid "" "Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " "snap_description=%(desc)s" msgstr "" #: cinder/volume/drivers/nimble.py:818 #, python-format msgid "Deleting snapshot %s " msgstr "" #: cinder/volume/drivers/nimble.py:843 #, python-format msgid "" "Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s " "clone=%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-" "type)s perfpol-name=%(perfpol-name)s encryption=%(encryption)s " "cipher=%(cipher)s multi-initiator=%(multi-initiator)s" msgstr "" #: cinder/volume/drivers/nimble.py:879 #, python-format msgid "Editing Volume %(vol)s with mask %(mask)s" msgstr "" #: cinder/volume/drivers/nimble.py:889 msgid "Getting getInitiatorGrpList" msgstr "" #: cinder/volume/drivers/nimble.py:896 msgid "Successfully retrieved InitiatorGrpList" msgstr "" #: cinder/volume/drivers/nimble.py:904 #, python-format msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" msgstr "" #: cinder/volume/drivers/nimble.py:917 #, python-format msgid "Deleting deleteInitiatorGrp %s " msgstr "" #: cinder/volume/drivers/pure.py:460 #, python-format msgid "Deleting unneeded host %(host_name)r." msgstr "" #: cinder/volume/drivers/pure.py:850 cinder/volume/drivers/pure.py:898 #, python-format msgid "Renaming existing volume %(ref_name)s to %(new_name)s" msgstr "" #: cinder/volume/drivers/pure.py:924 cinder/volume/drivers/pure.py:955 #, python-format msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" msgstr "" #: cinder/volume/drivers/pure.py:1293 #, python-format msgid "" "Skipping add target %(target_array)s to protection group %(pgname)s since" " it's already added." msgstr "" #: cinder/volume/drivers/pure.py:1317 #, python-format msgid "" "Skipping allow pgroup %(pgname)s on target array %(target_array)s since " "it is already allowed." msgstr "" #: cinder/volume/drivers/pure.py:1594 cinder/volume/drivers/pure.py:1697 #, python-format msgid "Re-using existing purity host %(host_name)r" msgstr "" #: cinder/volume/drivers/pure.py:1617 #, python-format msgid "Creating host object %(host_name)r with IQN: %(iqn)s." msgstr "" #: cinder/volume/drivers/pure.py:1701 #, python-format msgid "Creating host object %(host_name)r with WWN: %(wwn)s." msgstr "" #: cinder/volume/drivers/quobyte.py:153 msgid "" "The NAS file operations will be run as non privileged user in secure " "mode. Please ensure your libvirtd settings have been configured " "accordingly (see section 'OpenStack' in the Quobyte Manual." msgstr "" #: cinder/volume/drivers/quobyte.py:427 #, python-format msgid "Fixing previous mount %s which was not unmounted correctly." msgstr "" #: cinder/volume/drivers/quobyte.py:449 #, python-format msgid "Mounting volume: %s ..." msgstr "" #: cinder/volume/drivers/quobyte.py:451 #, python-format msgid "Mounting volume: %s succeeded" msgstr "" #: cinder/volume/drivers/rbd.py:680 #, python-format msgid "volume %s no longer exists in backend" msgstr "" #: cinder/volume/drivers/rbd.py:732 #, python-format msgid "RBD volume %s not found, allowing delete operation to proceed." msgstr "" #: cinder/volume/drivers/rbd.py:766 #, python-format msgid "Unable to unprotect snapshot %s." msgstr "" #: cinder/volume/drivers/rbd.py:768 #, python-format msgid "Snapshot %s does not exist in backend." msgstr "" #: cinder/volume/drivers/rbd.py:775 #, python-format msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." msgstr "" #: cinder/volume/drivers/remotefs.py:596 msgid "Cinder secure environment indicator file exists." msgstr "" #: cinder/volume/drivers/remotefs.py:613 #, python-format msgid "New Cinder secure environment indicator file created at path %s." msgstr "" #: cinder/volume/drivers/remotefs.py:877 #, python-format msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" #: cinder/volume/drivers/remotefs.py:921 #, python-format msgid "Deleting stale snapshot: %s" msgstr "" #: cinder/volume/drivers/remotefs.py:961 #, python-format msgid "" "Snapshot record for %s is not present, allowing snapshot_delete to " "proceed." msgstr "" #: cinder/volume/drivers/scality.py:273 #, python-format msgid "Begin backup of volume %s." msgstr "" #: cinder/volume/drivers/scality.py:292 #, python-format msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" #: cinder/volume/drivers/tegile.py:202 cinder/volume/drivers/emc/scaleio.py:416 #, python-format msgid "Created volume %(volname)s, volume id %(volid)s." msgstr "" #: cinder/volume/drivers/tintri.py:153 #, python-format msgid "Snapshot %s not found" msgstr "" #: cinder/volume/drivers/tintri.py:364 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:312 #, python-format msgid "Copied image to volume %s using regular download." msgstr "" #: cinder/volume/drivers/tintri.py:373 #, python-format msgid "Creating image snapshot %s" msgstr "" #: cinder/volume/drivers/tintri.py:402 #, python-format msgid "Cloning from snapshot to destination %s" msgstr "" #: cinder/volume/drivers/tintri.py:467 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:485 #, python-format msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" msgstr "" #: cinder/volume/drivers/tintri.py:483 #, python-format msgid "Cloning image %s from snapshot." msgstr "" #: cinder/volume/drivers/tintri.py:502 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:517 #, python-format msgid "Checking image clone %s from glance share." msgstr "" #: cinder/volume/drivers/tintri.py:532 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:540 #, python-format msgid "Image will locally be converted to raw %s" msgstr "" #: cinder/volume/drivers/tintri.py:551 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:560 #, python-format msgid "Performing post clone for %s" msgstr "" #: cinder/volume/drivers/tintri.py:563 #: cinder/volume/drivers/hitachi/hnas_nfs.py:243 #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:575 #, python-format msgid "Resizing file to %sG" msgstr "" #: cinder/volume/drivers/tintri.py:718 #, python-format msgid "Manage volume %s" msgstr "" #: cinder/volume/drivers/tintri.py:751 #, python-format msgid "Unmanage volume %s" msgstr "" #: cinder/volume/drivers/vzstorage.py:325 #, python-format msgid "Skipping deletion of volume %s as it does not exist." msgstr "" #: cinder/volume/drivers/xio.py:688 #, python-format msgid "Volume %s presented." msgstr "" #: cinder/volume/drivers/xio.py:892 #, python-format msgid "Clone %s created." msgstr "" #: cinder/volume/drivers/xio.py:1160 #, python-format msgid "Volume %s created" msgstr "" #: cinder/volume/drivers/xio.py:1217 #, python-format msgid "Successfully deleted %s." msgstr "" #: cinder/volume/drivers/xio.py:1258 #, python-format msgid "volume %(name)s extended to %(size)d." msgstr "" #: cinder/volume/drivers/xio.py:1270 #, python-format msgid "Volume %s retyped." msgstr "" #: cinder/volume/drivers/xio.py:1289 #, python-format msgid "Volume %s converted." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:179 #, python-format msgid "CloudByte API executed successfully for command [%s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:284 #, python-format msgid "CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:883 #, python-format msgid "" "Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack " "volume [%(stack_vol)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:925 #, python-format msgid "" "Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:972 #, python-format msgid "" "Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume " "[%(cb_vol)s] and OpenStack volume [%(stack_vol)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1063 #, python-format msgid "" "Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] " "w.r.t parent OpenStack volume [%(stack_vol)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1116 #, python-format msgid "" "Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " "[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." msgstr "" #: cinder/volume/drivers/cloudbyte/cloudbyte.py:1218 #, python-format msgid "" "Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " "OpenStack volume [%(ops_vol)s]." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:177 #, python-format msgid "open_connection to %(ssn)s at %(ip)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:206 #, python-format msgid "open_connection: Updating API version to %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:877 #, python-format msgid "Created volume %(instanceId)s: %(name)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:966 msgid "Found failover volume. Competing failover." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:970 #, python-format msgid "Imported %(fail)s to %(guid)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1123 #, python-format msgid "Creating server %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1152 #, python-format msgid "SC server created %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1289 #, python-format msgid "Volume mappings for %(name)s: %(mappings)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:1991 #, python-format msgid "Switching volume %(vol)s to profile %(prof)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2105 #, python-format msgid "Profile %s has been deleted." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2179 #, python-format msgid "Added %s to cg." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2197 #, python-format msgid "Removed %s from cg." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2220 #, python-format msgid "Adding volumes to cg %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2223 #, python-format msgid "Removing volumes from cg %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2267 #, python-format msgid "CreateReplay success %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2490 #, python-format msgid "Volume %s unmanaged." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2590 #, python-format msgid "Replication %(vol)s to %(dest)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_api.py:2663 #, python-format msgid "Replication created for %(volname)s to %(destsc)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:208 #, python-format msgid "Non fatal cleanup error: %s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:655 #, python-format msgid "Created Consistency Group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:718 #, python-format msgid "Updated Consistency Group %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:778 #, python-format msgid "Deleting snapshot %(ss)s from %(pro)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:906 msgid "Retype was to same Storage Profile." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1024 #, python-format msgid "replication failover secondary is %(ssn)s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1053 #, python-format msgid "Failing backend to %s" msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1067 #, python-format msgid "Failing over volume %(id)s replication: %(res)s." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_common.py:1163 #, python-format msgid "" "manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has " "been renamed to %(id)s and is now managed by Cinder." msgstr "" #: cinder/volume/drivers/dell/dell_storagecenter_iscsi.py:77 #, python-format msgid "initialize_ connection: %(vol)s:%(initiator)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:109 #: cinder/volume/drivers/emc/emc_vmax_utils.py:73 msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem " "package." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:198 #, python-format msgid "" "Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " "%(name)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:260 #, python-format msgid "Deleting Volume: %(volume)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:264 #, python-format msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:287 #, python-format msgid "Delete Snapshot: %(snapshotName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:325 #, python-format msgid "Unmap volume: %(volume)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:330 #, python-format msgid "Volume %s is not mapped. No volume to unmap." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:379 #, python-format msgid "Initialize connection: %(volume)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:395 #, python-format msgid "" "Volume %(volume)s is already mapped. The device number is " "%(deviceNumber)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:533 #, python-format msgid "Terminate connection: %(volume)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:679 #, python-format msgid "" "Capacity stats for SRP pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:710 #, python-format msgid "Migrating using retype Volume: %(volume)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:926 #: cinder/volume/drivers/emc/emc_vmax_common.py:2837 #, python-format msgid "" "Adding volume: %(volumeName)s to default storage group for FAST policy: " "%(fastPolicyName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:1246 #, python-format msgid "Volume status is: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2053 #, python-format msgid "" "Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " "%(sourceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2356 #, python-format msgid "Delete Snapshot: %(snapshot)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2379 #, python-format msgid "Snapshot: %(snapshot)s: not found on the array." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2414 #, python-format msgid "Create Consistency Group: %(group)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2454 #, python-format msgid "Delete Consistency Group: %(group)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2559 #, python-format msgid "" "Create snapshot for Consistency Group %(cgId)s cgsnapshotID: " "%(cgsnapshot)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2595 #, python-format msgid "Create target consistency group %(targetCg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:2695 #, python-format msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3217 #, python-format msgid "" "FAST: capacity stats for policy %(fastPolicyName)s on array " "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3230 #, python-format msgid "" "NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " "total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3662 #, python-format msgid "Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3700 #, python-format msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:3813 #, python-format msgid "The target instance device id is: %(deviceid)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_common.py:4165 #, python-format msgid "" "Update Consistency Group: %(group)s. This adds and/or removes volumes " "from a CG." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_fast.py:598 #, python-format msgid "Storage group not associated with the policy. Exception is %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_https.py:97 msgid "" "Module PyWBEM not installed. Install PyWBEM using the python-pywbem " "package." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:180 #, python-format msgid "Leaving initialize_connection: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:199 msgid "ISCSI provider_location not stored, using discovery." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:218 #, python-format msgid "smis_do_iscsi_discovery is: %(out)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:265 #, python-format msgid "location is: %(location)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:277 #, python-format msgid "ISCSI properties: %(properties)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:279 #, python-format msgid "ISCSI volume is: %(volume)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:284 #, python-format msgid "AUTH properties: %(authProps)s" msgstr "" #: cinder/volume/drivers/emc/emc_vmax_iscsi.py:294 #, python-format msgid "AUTH properties: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:563 #, python-format msgid "Successfully added %(volumeName)s to %(sgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:725 #, python-format msgid "Found existing masking view: %(maskingViewName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:790 #, python-format msgid "Created new storage group: %(storageGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:863 #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array " "%(storageSystemName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:882 #, python-format msgid "Created new initiator group name: %(igGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:886 #, python-format msgid "Using existing initiator group name: %(igGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1075 #, python-format msgid "Created new masking view : %(maskingViewName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1202 #, python-format msgid "Port group instance name is %(foundPortGroupInstanceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1320 #, python-format msgid "The storage group found is %(foundStorageGroupInstanceName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1434 #, python-format msgid "" "Initiator Name(s) %(initiatorNames)s are not on array " "%(storageSystemName)s. " msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1643 #, python-format msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:1763 #, python-format msgid "Volume %(volumeName)s not in any storage group." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2125 #, python-format msgid "Masking view %(maskingViewName)s successfully deleted." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_masking.py:2433 #, python-format msgid "Storage Group %(storageGroupName)s successfully deleted." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:517 #, python-format msgid "" "The volume belongs to more than one storage group. Returning storage " "group %(sgName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1565 #, python-format msgid "Volume %(volume)s does not have meta device members." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:1993 #, python-format msgid "The pool_name from extraSpecs is %(pool)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2039 #, python-format msgid "Returning random Port Group: %(portGroupName)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2257 #, python-format msgid "" "No replication synchronization session found associated with source " "volume %(source)s on %(storageSystem)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2275 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1308 #, python-format msgid "Updating status for CG: %(id)s." msgstr "" #: cinder/volume/drivers/emc/emc_vmax_utils.py:2283 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1316 #, python-format msgid "No volume found for CG: %(cg)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:733 #, python-format msgid "Using security file in %s for authentication" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:741 msgid "Plain text credentials are being used for authentication" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:744 msgid "" "Neither security file nor plain text credentials are specified. Security " "file under home directory will be used for authentication if present." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:753 #, python-format msgid "iscsi_initiators: %s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1091 #, python-format msgid "Consistency group %s was deleted successfully." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1206 #, python-format msgid "Snapshot %s was deleted successfully." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:1401 #, python-format msgid "Cancelling Migration from LUN %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2077 #, python-format msgid "Toggle san_ip from %(current)s to %(new)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2169 msgid "" "initiator_auto_registration: False. Initiator auto registration is not " "enabled. Please register initiator manually." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2358 #, python-format msgid "" "Create Volume: %(volume)s Size: %(size)s pool: %(pool)s provisioning: " "%(provisioning)s tiering: %(tiering)s " msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2893 #: cinder/volume/drivers/emc/emc_vnx_cli.py:4557 #, python-format msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:2922 #, python-format msgid "Delete Snapshot: %(snapshot)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3161 #: cinder/volume/drivers/prophetstor/dplcommon.py:869 #, python-format msgid "Start to create consistency group: %(group_name)s id: %(id)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3184 #: cinder/volume/drivers/prophetstor/dplcommon.py:898 #, python-format msgid "Start to delete consistency group: %(cg_name)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3244 #: cinder/volume/drivers/prophetstor/dplcommon.py:926 #, python-format msgid "Start to create cgsnapshot for consistency group: %(group_name)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3270 #: cinder/volume/drivers/prophetstor/dplcommon.py:954 #, python-format msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3448 #, python-format msgid "Get ISCSI targets %(tg)s to register initiator %(in)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3475 #, python-format msgid "Get FC targets %(tg)s to register initiator %(in)s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3548 #, python-format msgid "iSCSI Initiators %(in)s of %(ins)s need registration." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3567 #, python-format msgid "FC Initiators %(in)s of %(ins)s need registration" msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:3795 #, python-format msgid "Storage Group %s was empty." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4083 #, python-format msgid "Successfully setup replication for %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4184 #, python-format msgid "Replication is not configured on backend: %s." msgstr "" #: cinder/volume/drivers/emc/emc_vnx_cli.py:4244 #: cinder/volume/drivers/emc/emc_vnx_cli.py:4270 #, python-format msgid "Consistency group %(cg)s is created successfully." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:113 #, python-format msgid "" "REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify " "server's certificate: %(verify_cert)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:133 #, python-format msgid "" "Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " "%(pool_id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:142 #, python-format msgid "Protection domain name: %(domain_name)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:146 #, python-format msgid "Protection domain id: %(domain_id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:241 msgid "" "Using extra_specs for defining QoS specs will be deprecated in the N " "release of OpenStack. Please use QoS specs." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:281 #, python-format msgid "" "Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage " "pool id: %(pool_id)s, protection domain id: %(domain_id)s, protection " "domain name: %(domain_name)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:322 #: cinder/volume/drivers/emc/scaleio.py:746 #, python-format msgid "ScaleIO get domain id by name request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:345 #: cinder/volume/drivers/emc/scaleio.py:772 #, python-format msgid "Domain id is %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:357 #: cinder/volume/drivers/emc/scaleio.py:783 #, python-format msgid "ScaleIO get pool id by name request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:380 #: cinder/volume/drivers/emc/scaleio.py:805 #, python-format msgid "Pool id is %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:395 #, python-format msgid "Params for add volume request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:409 #, python-format msgid "Add volume response: %s" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:439 #, python-format msgid "Snapshot volume %(vol)s into snapshot %(id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:448 #: cinder/volume/drivers/emc/scaleio.py:1141 #: cinder/volume/drivers/emc/scaleio.py:1196 #, python-format msgid "Snapshot volume response: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:475 msgid "Token is invalid, going to re-login and get a new one." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:490 #, python-format msgid "Going to perform request again %s with valid token." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:517 #, python-format msgid "" "ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " "%(volname)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:545 #, python-format msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:556 #, python-format msgid "Change volume capacity request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:591 #, python-format msgid "" "ScaleIO create cloned volume: source volume %(src)s to target volume " "%(tgt)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:624 #, python-format msgid "Trying to unmap volume from all sdcs before deletion: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:676 msgid "ScaleIO delete snapshot." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:694 #, python-format msgid "Volume type is %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:697 #, python-format msgid "iops limit is: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:700 #, python-format msgid "Bandwidth limit is: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:748 #, python-format msgid "username: %(username)s, verify_cert: %(verify)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:758 #, python-format msgid "Get domain by name response: %s" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:822 #, python-format msgid "Query capacity stats response: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:829 #, python-format msgid "free capacity of pool %(pool)s is: %(free)s, total capacity: %(total)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:851 #, python-format msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:859 #, python-format msgid "Backend name is %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:913 msgid "Calling os-brick to detach ScaleIO volume." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:921 #, python-format msgid "" "ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " "image id: %(id)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:941 #, python-format msgid "" "ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " "image meta: %(meta)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:975 #, python-format msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1000 #, python-format msgid "ScaleIO rename volume request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1020 #, python-format msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1032 #: cinder/volume/drivers/emc/scaleio.py:1047 #, python-format msgid "Get Volume response: %s" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1086 #, python-format msgid "ScaleIO get volume by id request: %s." msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1104 msgid "Creating Consistency Group" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1113 msgid "Deleting Consistency Group" msgstr "" #: cinder/volume/drivers/emc/scaleio.py:1222 msgid "ScaleIO snapshot group of volumes" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:399 #, python-format msgid "XtremIO SW version %s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:455 #, python-format msgid "volume %s doesn't exist" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:466 #, python-format msgid "snapshot %s doesn't exist" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:544 #, python-format msgid "Volume with the name %s wasn't found, can't unmanage" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:591 #, python-format msgid "" "Created lun-map:\n" "%s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:593 #, python-format msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" msgstr "" #: cinder/volume/drivers/emc/xtremio.py:830 msgid "initiator has no password while using chap,adding it" msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:451 #, python-format msgid "" "_delete_volume_setting, volumename:%(volumename)s, volume not found on " "ETERNUS. " msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:693 #, python-format msgid "" "initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " "target_luns: %(target_luns)s, Volume is already mapped." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1526 msgid "_delete_copysession, The copysession was already completed." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1880 #, python-format msgid "_unmap_lun, volumename:%(volumename)s, volume not found." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_common.py:1891 #, python-format msgid "_unmap_lun, volumename: %(volumename)s, volume is not mapped." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:48 #, python-format msgid "create_volume, volume id: %s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:61 #, python-format msgid "create_volume, info: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:67 #, python-format msgid "" "create_volume_from_snapshot, volume id: %(vid)s, snap id: %(sid)s, Enter " "method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:82 #, python-format msgid "create_volume_from_snapshot, info: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:89 #, python-format msgid "" "create_cloned_volume, target volume id: %(tid)s, source volume id: " "%(sid)s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:105 #, python-format msgid "create_cloned_volume, info: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:112 #, python-format msgid "delete_volume, volume id: %s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:117 #, python-format msgid "delete_volume, delete: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:123 #, python-format msgid "create_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:129 #, python-format msgid "create_snapshot, info: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:134 #, python-format msgid "delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:140 #, python-format msgid "delete_snapshot, delete: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:158 #, python-format msgid "" "initialize_connection, volume id: %(vid)s, initiator: %(initiator)s, " "Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:164 #, python-format msgid "initialize_connection, info: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:170 #, python-format msgid "" "terminate_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter" " method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:176 #, python-format msgid "terminate_connection, unmap: %s, Exit method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:198 #, python-format msgid "extend_volume, volume id: %s, Enter method." msgstr "" #: cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py:203 #, python-format msgid "extend_volume, used pool name: %s, Exit method." msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:255 #, python-format msgid "\t%(prefix)-35s : %(version)s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:259 #, python-format msgid "\t%(param)-35s : %(value)s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:264 #: cinder/volume/drivers/hitachi/hbsd_fc.py:86 #: cinder/volume/drivers/hitachi/hbsd_horcm.py:1399 #: cinder/volume/drivers/hitachi/hbsd_iscsi.py:97 #, python-format msgid "\t%(name)-35s : %(value)s" msgstr "" #: cinder/volume/drivers/hitachi/hbsd_common.py:269 #, python-format msgid "\t%(request)-35s : %(value)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:647 #, python-format msgid "del_iscsi_conn: hlun not found %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_backend.py:733 #, python-format msgid "targetlist: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:64 #, python-format msgid "Parse_loc: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:97 #: cinder/volume/drivers/hitachi/hnas_nfs.py:78 #, python-format msgid "%(element)s: %(val)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:193 #, python-format msgid "Backend type: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:243 #, python-format msgid "Using service label: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:249 #, python-format msgid "Available services: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:274 #, python-format msgid "_get_service_target hdp: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:275 #, python-format msgid "config[services]: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:283 #, python-format msgid "Target is %(map)s! Targetlist = %(tgtl)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:319 #, python-format msgid "Using target label: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:338 #, python-format msgid "Retrieving secret for service: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:357 #, python-format msgid "Set tgt CHAP secret for service: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:366 msgid "CHAP authentication disabled." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:371 #, python-format msgid "Retrieving target for service: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:418 #, python-format msgid "stats: stats: %s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:441 #, python-format msgid "HDP list: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:503 #: cinder/volume/drivers/hitachi/hnas_nfs.py:520 #, python-format msgid "Configured pools: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:506 #, python-format msgid "do_setup: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:510 #, python-format msgid "iSCSI portal found for service: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:565 #, python-format msgid "create_volume: create_lu returns %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:571 #, python-format msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:620 #, python-format msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:636 #, python-format msgid "delete lun loc %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:664 #, python-format msgid "initialize volume %(vol)s connector %(conn)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:698 #, python-format msgid "initiate: connection %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:732 #, python-format msgid "terminate: connection %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:976 #, python-format msgid "Set newly managed Cinder volume name to %(name)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_iscsi.py:1004 #, python-format msgid "" "Cinder ISCSI volume with current path %(path)s is no longer being " "managed. The new name is %(unm)s." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:215 #, python-format msgid "Get service: %(lbl)s->%(svc)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:219 #, python-format msgid "Available services: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:246 #, python-format msgid "LUN %(id)s extended to %(size)s GB." msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:436 #, python-format msgid "Driver stats: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:460 #, python-format msgid "nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s FSID: %(hdp)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:474 #, python-format msgid "Review shares: %s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:482 #, python-format msgid "share: %(share)s -> %(info)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:494 #, python-format msgid "Save service info for %(svc)s -> %(hdp)s, %(path)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:506 #, python-format msgid "share: %s incorrect entry" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:532 #, python-format msgid "" "Cloning with volume_name %(vname)s clone_name %(cname)s export_path " "%(epath)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:578 #, python-format msgid "Volume service: %(label)s. Casted to: %(loc)s" msgstr "" #: cinder/volume/drivers/hitachi/hnas_nfs.py:790 #, python-format msgid "Cinder NFS volume with current path %(cr)s is no longer being managed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:433 #, python-format msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:752 #, python-format msgid "" "Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " "%(cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:760 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1121 #, python-format msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:766 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1129 #, python-format msgid "Virtual volume %(disp)s '%(new)s' is being retyped." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:773 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1139 #, python-format msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:792 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1159 #, python-format msgid "Virtual volume %(disp)s '%(new)s' is now being managed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:865 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1241 #, python-format msgid "Snapshot '%(ref)s' renamed to '%(new)s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:870 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1250 #, python-format msgid "Snapshot %(disp)s '%(new)s' is now being managed." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:937 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1338 #, python-format msgid "" "Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to" " '%(new)s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:963 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1368 #, python-format msgid "" "Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " "'%(new)s'." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1372 #, python-format msgid "3PAR vlun %(name)s not found on host %(host)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1467 #, python-format msgid "" "3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was " "not deleted because: %(reason)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1586 #, python-format msgid "Flash Cache policy set to %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1713 #, python-format msgid "Using pool %(pool)s instead of %(cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:1966 #, python-format msgid "Creating temp snapshot %(snap)s from volume %(vol)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2111 #, python-format msgid "Found a temporary snapshot %(name)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2377 #, python-format msgid "3PAR driver cannot perform migration. Retype exception: %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2406 #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2490 #, python-format msgid "Completed: convert_to_base_volume: id=%s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2612 #, python-format msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2633 #, python-format msgid "Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2638 #, python-format msgid "" "Converting %(volume_name)s to thin dedup provisioning with " "userCPG=%(new_cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2643 #, python-format msgid "Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:2659 #, python-format msgid "tunevv failed because the volume '%s' has snapshots." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3566 #, python-format msgid "" "Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to " "%(new_snap_cpg)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3577 #, python-format msgid "Modifying %s comments." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_common.py:3586 #, python-format msgid "" "Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " "%(old_snap_cpg)s." msgstr "" #: cinder/volume/drivers/hpe/hpe_3par_fc.py:316 #: cinder/volume/drivers/netapp/dataontap/block_base.py:935 msgid "Need to remove FC Zone, building initiator target map" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:341 #, python-format msgid "HPELeftHand API version %s" msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:982 #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume " "is from a different backend." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:987 #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:993 #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because cluster " "exists in different management group." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1007 #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the " "volume has been exported." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1018 #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because the " "volume has snapshots." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1026 #, python-format msgid "" "Cannot provide backend assisted migration for volume: %s because volume " "does not exist in this management group." msgstr "" #: cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py:1059 #, python-format msgid "Volume name changed from %(tmp)s to %(orig)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:259 #, python-format msgid "volume: %(volume)s, lun params: %(params)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:612 #, python-format msgid "QoS: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:659 #, python-format msgid "" "create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " "%(tgt_lun_id)s, copy_name: %(copy_name)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:777 #, python-format msgid "" "New size is equal to the real size from backend storage, no need to " "extend. realsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:793 #, python-format msgid "Extend volume: %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:827 #, python-format msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:913 #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:930 #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " "%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:940 #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " "%(new_policy)s success." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:956 #, python-format msgid "" "Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s" " success." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1626 #, python-format msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1635 #, python-format msgid "" "initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: " "%(target_ip)s, portgroup_id: %(portgroup_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1659 #, python-format msgid "initialize_connection, host lun id is: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1688 #, python-format msgid "initialize_connection success. Return data: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1701 #, python-format msgid "terminate_connection: initiator name: %(ini)s, LUN ID: %(lunid)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1816 #, python-format msgid "initialize_connection, initiator: %(wwpns)s, LUN ID: %(lun_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1886 #, python-format msgid "initialize_connection, metadata is: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1920 #, python-format msgid "Return FC info is: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1936 #, python-format msgid "The same hostid is: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:1957 #, python-format msgid "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:2025 #, python-format msgid "Detach Volume, metadata is: %s." msgstr "" #: cinder/volume/drivers/huawei/huawei_driver.py:2033 #, python-format msgid "terminate_connection, return data is: %s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:61 #, python-format msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:115 #, python-format msgid "initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:187 #, python-format msgid "Remote return FC info is: %s." msgstr "" #: cinder/volume/drivers/huawei/hypermetro.py:202 #, python-format msgid "" "terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, " "lun_id: %(lunid)s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:87 #, python-format msgid "" "\n" "\n" "\n" "\n" "Request URL: %(url)s\n" "\n" "Call Method: %(method)s\n" "\n" "Request Data: %(data)s\n" "\n" "Response Data:%(res)s\n" "\n" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:423 #, python-format msgid "" "do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " "%(lun_id)s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:520 #, python-format msgid "" "create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: " "%(id)s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:531 #, python-format msgid "" "Failed to create hostgroup: %(name)s. Please check if it exists on the " "array." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:544 #, python-format msgid "" "create_hostgroup_with_check. Create hostgroup success. hostgroup name: " "%(name)s, hostgroup id: %(id)s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:634 #, python-format msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:645 #, python-format msgid "Failed to create host: %(name)s. Check if it exists on the array." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:658 #, python-format msgid "" "add_host_with_check. create host success. host name: %(name)s, host id: " "%(id)s" msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:789 msgid "Use CHAP when adding initiator to host." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:795 msgid "Use ALUA when adding initiator to host." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1103 #, python-format msgid "New str info is: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1118 #, python-format msgid "_get_tgt_iqn: iSCSI target iqn is: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1207 #, python-format msgid "_get_tgt_ip_from_portgroup: Get ip: %s." msgstr "" #: cinder/volume/drivers/huawei/rest_client.py:1249 #, python-format msgid "Get the default ip: %s." msgstr "" #: cinder/volume/drivers/huawei/smartx.py:48 #, python-format msgid "The QoS sepcs is: %s." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_common.py:715 #, python-format msgid "Volume %s is mapping to multiple hosts." msgstr "" #: cinder/volume/drivers/ibm/flashsystem_fc.py:190 #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:669 #, python-format msgid "WWPN on node %(node)s: %(wwpn)s." msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:231 #, python-format msgid "Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" msgstr "" #: cinder/volume/drivers/ibm/gpfs.py:361 #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not " "enabled in cluster daemon level %(cur)s - must be at least at level " "%(min)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/replication.py:107 #, python-format msgid "Could not find replica to delete of volume %(vol)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1488 #, python-format msgid "Unable to get remote copy information for volume %s" msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:1522 #, python-format msgid "Tried to delete non-existent vdisk %s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py:2441 #, python-format msgid "" "_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " "copy operation: orig=%(orig)s new=%(new)s." msgstr "" #: cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py:262 #: cinder/volume/drivers/netapp/eseries/library.py:1223 msgid "Need to remove FC Zone, building initiator target map." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:483 #, python-format msgid "Create Volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:893 #, python-format msgid "Delete Volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:945 #, python-format msgid "Create Cloned Volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:997 #, python-format msgid "Create export done from Volume %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1010 #, python-format msgid "" "Successfully update volume stats. backend: %(volume_backend_name)s, " "vendor: %(vendor_name)s, driver version: %(driver_version)s, storage " "protocol: %(storage_protocol)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1110 #, python-format msgid "" "Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " "%(raid_snapshot_id)s, volume: %(volume)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1142 #, python-format msgid "Delete Snapshot %(snapshot_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1207 #, python-format msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1284 #, python-format msgid "" "Successfully initialized connection. target_wwn: %(target_wwn)s, " "initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1410 #, python-format msgid "Successfully initialized connection with volume: %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1570 #, python-format msgid "Successfully extended volume %(volume_id)s to size %(size)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1606 #, python-format msgid "Successfully terminated connection for volume: %(volume_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1626 #, python-format msgid "Migrate Volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1775 #, python-format msgid "Rename Volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1806 #, python-format msgid "Unmanage volume %(volume_id)s completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1888 #, python-format msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1907 #, python-format msgid "Retype Volume %(volume_id)s is completed." msgstr "" #: cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py:1935 #, python-format msgid "Update migrated volume %(new_volume)s completed." msgstr "" #: cinder/volume/drivers/netapp/common.py:78 #, python-format msgid "OpenStack OS Version Info: %(info)s" msgstr "" #: cinder/volume/drivers/netapp/common.py:95 #, python-format msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." msgstr "" #: cinder/volume/drivers/netapp/common.py:113 #, python-format msgid "" "NetApp driver of family %(storage_family)s and protocol " "%(storage_protocol)s loaded." msgstr "" #: cinder/volume/drivers/netapp/utils.py:412 #, python-format msgid "No rpm info found for %(pkg)s package." msgstr "" #: cinder/volume/drivers/netapp/utils.py:421 #, python-format msgid "Could not run rpm command: %(msg)s." msgstr "" #: cinder/volume/drivers/netapp/utils.py:432 #, python-format msgid "No dpkg-query info found for %(pkg)s package." msgstr "" #: cinder/volume/drivers/netapp/utils.py:450 #, python-format msgid "Could not run dpkg-query command: %(msg)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:544 #, python-format msgid "No need to extend volume %s as it is already the requested new size." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:564 #, python-format msgid "Resizing LUN %s using clone operation." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:598 #, python-format msgid "Post clone resize LUN %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:665 #, python-format msgid "LUN with given ref %s need not be renamed during manage operation." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:677 #, python-format msgid "" "Manage operation completed for LUN with new path %(path)s and uuid " "%(uuid)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:730 #, python-format msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/block_base.py:1073 #, python-format msgid "Backing consistency group snapshot %s available for deletion" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:319 #, python-format msgid "Registering image in cache %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:351 #, python-format msgid "Cloning from cache to destination %s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:382 #, python-format msgid "Cleaning cache for share %s." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:497 #, python-format msgid "Cloning image %s from cache" msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_base.py:986 #, python-format msgid "Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:433 #, python-format msgid "Copied image %(img)s to volume %(vol)s using local image cache." msgstr "" #: cinder/volume/drivers/netapp/dataontap/nfs_cmode.py:441 #, python-format msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:440 #, python-format msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:474 #, python-format msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:502 #, python-format msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/ssc_cmode.py:508 #, python-format msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "" #: cinder/volume/drivers/netapp/dataontap/client/client_base.py:180 #, python-format msgid "Resizing LUN %s directly to new size." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:238 #, python-format msgid "" "The multi-attach E-Series host group '%(label)s' already exists with " "clusterRef %(clusterRef)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:244 #, python-format msgid "" "Created multi-attach E-Series host group %(label)s with clusterRef " "%(clusterRef)s" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:266 msgid "Embedded mode detected." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:269 msgid "Proxy mode detected." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:327 #, python-format msgid "System with controller addresses [%s] is not registered with web service." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:349 msgid "Waiting for web service to validate the configured password." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:352 msgid "Waiting for web service array communication." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:366 #, python-format msgid "System %(id)s has %(status)s status." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:589 #: cinder/volume/drivers/netapp/eseries/library.py:628 #: cinder/volume/drivers/netapp/eseries/library.py:652 #, python-format msgid "Created volume with label %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:679 #, python-format msgid "Copying src vol %(src)s to dest vol %(dst)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:695 #, python-format msgid "Vol copy job completed for dest %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:713 #, python-format msgid "Copy job to dest vol %s completed." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:901 #, python-format msgid "Created snap grp with label %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1373 #, python-format msgid "Creating host with ports %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1459 #, python-format msgid "E-series proxy API version %s does not support autosupport logging." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1503 #, python-format msgid "Updating storage service catalog information for backend '%s'" msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1518 #, python-format msgid "" "E-series proxy API version %(version)s does not support full set of SSC " "extra specs. The proxy version must be at at least %(min_version)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1745 #, python-format msgid "" "Waiting for volume expansion of %(vol)s to complete, current remaining " "actions are %(action)s. ETA: %(eta)s mins." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1939 #, python-format msgid "Deleting volume %s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1975 #, python-format msgid "Removing volume %(v)s from consistency group %(cg)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:1982 #, python-format msgid "Adding volume %(v)s to consistency group %(cg)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2058 #, python-format msgid "Volume with given ref %s need not be renamed during manage operation." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2063 #, python-format msgid "" "Manage operation completed for volume with new label %(label)s and wwn " "%(wwn)s." msgstr "" #: cinder/volume/drivers/netapp/eseries/library.py:2101 #, python-format msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:133 #, python-format msgid "Ignored target creation error \"%s\" while ensuring export." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:143 #, python-format msgid "Ignored target group creation error \"%s\" while ensuring export." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:155 #, python-format msgid "Ignored target group member addition error \"%s\" while ensuring export." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:226 #: cinder/volume/drivers/nexenta/nfs.py:469 #: cinder/volume/drivers/nexenta/ns5/iscsi.py:242 #, python-format msgid "Extending volume: %(id)s New size: %(size)s GB" msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:242 #, python-format msgid "Volume %s does not exist, it seems it was already deleted." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:246 #, python-format msgid "Volume %s will be deleted later." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:494 #, python-format msgid "Snapshot %s does not exist, it seems it was already deleted." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:497 #, python-format msgid "Snapshot %s has dependent clones, will be deleted later." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:505 #, python-format msgid "" "Origin volume %s appears to be removed, try to remove it from backend if " "it is there." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:611 #, python-format msgid "Ignored LU creation error \"%s\" while ensuring export." msgstr "" #: cinder/volume/drivers/nexenta/iscsi.py:620 #, python-format msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:448 #, python-format msgid "Folder %s does not exist, it was already deleted." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:458 #, python-format msgid "Snapshot %s does not exist, it was already deleted." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:518 #, python-format msgid "Snapshot %(folder)s@%(snapshot)s does not exist, it was already deleted." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:526 #, python-format msgid "" "Snapshot %(folder)s@%(snapshot)s has dependent clones, it will be deleted" " later." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:560 #, python-format msgid "Creating regular file: %s.This may take some time." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:571 #, python-format msgid "Regular file: %s created." msgstr "" #: cinder/volume/drivers/nexenta/nfs.py:685 #, python-format msgid "Already mounted: %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:259 #, python-format msgid "Creating snapshot %(snap)s of volume %(vol)s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:278 #: cinder/volume/drivers/zfssa/zfssanfs.py:239 #, python-format msgid "Deleting snapshot: %s" msgstr "" #: cinder/volume/drivers/nexenta/ns5/iscsi.py:305 #, python-format msgid "Creating volume from snapshot: %s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:205 #, python-format msgid "Flexvisor succeeded to unassign volume %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:239 #, python-format msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:275 #, python-format msgid "Prefer use target wwpn %(wwpn)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:327 #, python-format msgid "%(volume)s assign type fibre_channel, properties %(properties)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:334 #, python-format msgid "" "Connect initialization info: {driver_volume_type: fibre_channel, data: " "%(properties)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_fc.py:355 #, python-format msgid "terminate_connection volume: %(volume)s, connector: %(con)s" msgstr "" #: cinder/volume/drivers/prophetstor/dpl_iscsi.py:134 #, python-format msgid "Flexvisor already unassigned volume %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:811 #, python-format msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:834 #, python-format msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1059 #, python-format msgid "Flexvisor succeeded to create volume %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1142 #: cinder/volume/drivers/prophetstor/dplcommon.py:1186 #, python-format msgid "Flexvisor succeeded to create volume %(id)s from snapshot." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1230 #, python-format msgid "Flexvisor succeeded to clone volume %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1277 #, python-format msgid "Flexvisor volume %(id)s does not exist." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1315 #, python-format msgid "Flexvisor succeeded to extend volume %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1369 #, python-format msgid "Flexvisor snapshot %(id)s not existed." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1376 #, python-format msgid "Flexvisor succeeded to delete snapshot %(id)s." msgstr "" #: cinder/volume/drivers/prophetstor/dplcommon.py:1465 msgid "Activate Flexvisor cinder volume driver." msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:56 #, python-format msgid "Running with vmemclient version: %s" msgstr "" #: cinder/volume/drivers/violin/v7000_common.py:111 #, python-format msgid "CONCERTO version: %s" msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:71 #, python-format msgid "Initialized driver %(name)s version: %(vers)s" msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:228 #, python-format msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." msgstr "" #: cinder/volume/drivers/violin/v7000_fcp.py:241 #, python-format msgid "Unexporting lun %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:320 msgid "Backing not available, no operation to be performed." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:517 #, python-format msgid "There is no backing for the volume: %s. Need to create one." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:539 #, python-format msgid "" "Returning connection_info: %(info)s for volume: %(volume)s with " "connector: %(connector)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:593 #, python-format msgid "There is no backing, so will not create snapshot: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:598 #, python-format msgid "Successfully created snapshot: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:625 #, python-format msgid "There is no backing, and so there is no snapshot: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:629 #, python-format msgid "Successfully deleted snapshot: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1045 #, python-format msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1164 #, python-format msgid "Backing not found, creating for volume: %s" msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1187 #, python-format msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1378 #, python-format msgid "There is no backing for volume: %s; no need to extend the virtual disk." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1385 #: cinder/volume/drivers/vmware/vmdk.py:1414 #, python-format msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1397 #, python-format msgid "" "Relocating volume: %s to a different datastore due to insufficient disk " "space on current datastore." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1768 #, python-format msgid "Using overridden vmware_host_version from config: %s" msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1772 #, python-format msgid "Fetched vCenter server version: %s" msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1821 #, python-format msgid "Using compute cluster(s): %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1823 #, python-format msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1953 #: cinder/volume/drivers/vmware/volumeops.py:1192 #, python-format msgid "Successfully created clone: %s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1967 #, python-format msgid "" "There is no backing for the snapshotted volume: %(snap)s. Not creating " "any backing for the volume: %(vol)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:1975 #, python-format msgid "" "There is no snapshot point for the snapshotted volume: %(snap)s. Not " "creating any backing for the volume: %(vol)s." msgstr "" #: cinder/volume/drivers/vmware/vmdk.py:2004 #, python-format msgid "" "There is no backing for the source volume: %(src)s. Not creating any " "backing for volume: %(vol)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:311 #, python-format msgid "Deleted the VM backing: %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:645 #, python-format msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:814 #, python-format msgid "Successfully created volume backing: %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:980 #, python-format msgid "" "Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " "and resource pool: %(rp)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:998 #, python-format msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1022 #, python-format msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1084 #, python-format msgid "" "Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " "delete anything." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1095 #, python-format msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1262 #, python-format msgid "Renaming backing VM: %(backing)s to %(new_name)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1271 #, python-format msgid "Backing VM: %(backing)s renamed to %(new_name)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1344 #, python-format msgid "Successfully deleted file: %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1364 #, python-format msgid "Created datastore folder: %s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1520 #, python-format msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "" #: cinder/volume/drivers/vmware/volumeops.py:1564 #, python-format msgid "Deleted vmdk file: %s." msgstr "" #: cinder/volume/drivers/windows/smbfs.py:110 #, python-format msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" msgstr "" #: cinder/volume/drivers/zfssa/restclient.py:182 #, python-format msgid "ZFSSA version: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:140 #: cinder/volume/drivers/zfssa/zfssanfs.py:118 #, python-format msgid "Connecting to host: %s." msgstr "" #: cinder/volume/drivers/zfssa/zfssaiscsi.py:868 #, python-format msgid "Connecting to target host: %s for backend enabled migration." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:216 #, python-format msgid "Creating snapshot: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:243 #, python-format msgid "Creatng volume from snapshot. volume: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:245 #, python-format msgid "Source Snapshot: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:277 #, python-format msgid "new cloned volume: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:278 #, python-format msgid "source volume for cloning: %s" msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:299 #, python-format msgid "Checking origin %(origin)s of volume %(volume)s." msgstr "" #: cinder/volume/drivers/zfssa/zfssanfs.py:620 #, python-format msgid "Source and destination ZFSSA shares are the same. Do nothing. volume: %s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:595 #, python-format msgid "Requested image %(id)s is not in raw format." msgstr "" #: cinder/volume/flows/manager/create_volume.py:617 #, python-format msgid "" "Skipping image volume %(id)s because it is not accessible by current " "Tenant." msgstr "" #: cinder/volume/flows/manager/create_volume.py:622 #, python-format msgid "Will clone a volume from the image volume %(id)s." msgstr "" #: cinder/volume/flows/manager/create_volume.py:722 msgid "Unable to get Cinder internal context, will not use image-volume cache." msgstr "" #: cinder/volume/flows/manager/create_volume.py:813 #, python-format msgid "" "Volume %(volume_id)s: being created as %(create_type)s with " "specification: %(volume_spec)s" msgstr "" #: cinder/volume/flows/manager/create_volume.py:893 #, python-format msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "" #: cinder/volume/flows/manager/manage_existing_snapshot.py:301 #, python-format msgid "Snapshot %s created successfully." msgstr "" #: cinder/volume/targets/cxt.py:190 #, python-format msgid "Removing iscsi_target for: %s" msgstr "" #: cinder/volume/targets/cxt.py:214 cinder/volume/targets/cxt.py:234 #, python-format msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/iet.py:148 #, python-format msgid "Removing iscsi_target for volume: %s" msgstr "" #: cinder/volume/targets/iet.py:203 #, python-format msgid "Failed to open iet session list for %(vol_id)s: %(e)s" msgstr "" #: cinder/volume/targets/iscsi.py:224 #, python-format msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" #: cinder/volume/targets/iscsi.py:239 #, python-format msgid "" "Skipping remove_export. No iscsi_target is presently exported for volume:" " %s" msgstr "" #: cinder/volume/targets/iscsi.py:256 #, python-format msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" #: cinder/volume/targets/lio.py:105 #, python-format msgid "Creating iscsi_target for volume: %s" msgstr "" #: cinder/volume/targets/lio.py:147 #, python-format msgid "Removing iscsi_target: %s" msgstr "" #: cinder/volume/targets/lio.py:212 msgid "Restoring iSCSI target from configuration file" msgstr "" #: cinder/volume/targets/lio.py:216 msgid "Skipping ensure_export. Found existing iSCSI target." msgstr "" #: cinder/volume/targets/tgt.py:251 #, python-format msgid "Removing iscsi_target for Volume ID: %s" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:168 #, python-format msgid "Final filtered map for fabric: %(i_t_map)s" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:175 msgid "Add connection: finished iterating over all target list" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:214 #, python-format msgid "Delete connection target list: %(targets)s" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:230 #, python-format msgid "Final filtered map for delete connection: %(i_t_map)s" msgstr "" #: cinder/zonemanager/fc_zone_manager.py:284 #, python-format msgid "No targets to add or remove connection for initiator: %(init_wwn)s" msgstr "" #: cinder/zonemanager/utils.py:38 #, python-format msgid "Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." msgstr "" #: cinder/zonemanager/utils.py:55 #, python-format msgid "Using FC lookup service %s." msgstr "" #: cinder/zonemanager/drivers/driver_utils.py:63 msgid "" "Zone name created using prefix because either host name or storage system" " is none." msgstr "" #: cinder/zonemanager/drivers/driver_utils.py:73 msgid "Zone name created using prefix because host name is none." msgstr "" #: cinder/zonemanager/drivers/driver_utils.py:76 #, python-format msgid "Friendly zone name after forming: %(zonename)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:121 #, python-format msgid "" "BrcdFCZoneDriver - Add connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:134 #, python-format msgid "Zoning policy for Fabric %(policy)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:138 msgid "Zoning policy is not valid, no zoning will be performed." msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:171 #, python-format msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:195 #, python-format msgid "Zone map to add: %(zonemap)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:226 #, python-format msgid "" "BrcdFCZoneDriver - Delete connection for fabric %(fabric)s for I-T map: " "%(i_t_map)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:239 #, python-format msgid "Zoning policy for fabric %(policy)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py:405 #, python-format msgid "Filtered targets for SAN is: %(targets)s" msgstr "" #: cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py:312 msgid "VF context is changed in the session." msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py:312 #, python-format msgid "Connector returning fcnsinfo-%s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:131 #, python-format msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:149 #, python-format msgid "Zoning policy for Fabric %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:187 #, python-format msgid "Zone exists in I-T mode. Skipping zone creation %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:219 #, python-format msgid "Zone map to add: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:260 #, python-format msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:279 #, python-format msgid "Zoning policy for fabric %s" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:362 #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "" #: cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py:464 #, python-format msgid "Filtered targets for SAN is: %s" msgstr "" cinder-8.0.0/cinder/context.py0000664000567000056710000001704612701406257017442 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of cinder.""" import copy from oslo_config import cfg from oslo_context import context from oslo_log import log as logging from oslo_utils import timeutils import six from cinder.i18n import _, _LW from cinder import policy context_opts = [ cfg.StrOpt('cinder_internal_tenant_project_id', help='ID of the project which will be used as the Cinder ' 'internal tenant.'), cfg.StrOpt('cinder_internal_tenant_user_id', help='ID of the user to be used in volume operations as the ' 'Cinder internal tenant.'), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, project_name=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, domain=None, user_domain=None, project_domain=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ super(RequestContext, self).__init__(auth_token=auth_token, user=user_id, tenant=project_id, domain=domain, user_domain=user_domain, project_domain=project_domain, is_admin=is_admin, request_id=request_id, overwrite=overwrite) self.roles = roles or [] self.project_name = project_name self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() elif isinstance(timestamp, six.string_types): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.quota_class = quota_class if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('identity', 'compute', 'object-store')] else: # if list is empty or none self.service_catalog = [] # We need to have RequestContext attributes defined # when policy.check_is_admin invokes request logging # to make it loggable. if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles, self) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): result = super(RequestContext, self).to_dict() result['user_id'] = self.user_id result['project_id'] = self.project_id result['project_name'] = self.project_name result['domain'] = self.domain result['read_deleted'] = self.read_deleted result['roles'] = self.roles result['remote_address'] = self.remote_address result['timestamp'] = self.timestamp.isoformat() result['quota_class'] = self.quota_class result['service_catalog'] = self.service_catalog result['request_id'] = self.request_id return result @classmethod def from_dict(cls, values): return cls(**values) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" context = self.deepcopy() context.is_admin = True if 'admin' not in context.roles: context.roles.append('admin') if read_deleted is not None: context.read_deleted = read_deleted return context def deepcopy(self): return copy.deepcopy(self) # NOTE(sirp): the openstack/common version of RequestContext uses # tenant/user whereas the Cinder version uses project_id/user_id. # NOTE(adrienverge): The Cinder version of RequestContext now uses # tenant/user internally, so it is compatible with context-aware code from # openstack/common. We still need this shim for the rest of Cinder's # code. @property def project_id(self): return self.tenant @project_id.setter def project_id(self, value): self.tenant = value @property def user_id(self): return self.user @user_id.setter def user_id(self, value): self.user = value def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) def get_internal_tenant_context(): """Build and return the Cinder internal tenant context object This request context will only work for internal Cinder operations. It will not be able to make requests to remote services. To do so it will need to use the keystone client to get an auth_token. """ project_id = CONF.cinder_internal_tenant_project_id user_id = CONF.cinder_internal_tenant_user_id if project_id and user_id: return RequestContext(user_id=user_id, project_id=project_id, is_admin=True) else: LOG.warning(_LW('Unable to get internal tenant context: Missing ' 'required config parameters.')) return None cinder-8.0.0/cinder/ssh_utils.py0000664000567000056710000001624712701406250017766 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities related to SSH connection management.""" import os from eventlet import pools from oslo_config import cfg from oslo_log import log as logging import paramiko import six from cinder import exception from cinder.i18n import _, _LI LOG = logging.getLogger(__name__) ssh_opts = [ cfg.BoolOpt('strict_ssh_host_key_policy', default=False, help='Option to enable strict host key checking. When ' 'set to "True" Cinder will only connect to systems ' 'with a host key present in the configured ' '"ssh_hosts_key_file". When set to "False" the host key ' 'will be saved upon first connection and used for ' 'subsequent connections. Default=False'), cfg.StrOpt('ssh_hosts_key_file', default='$state_path/ssh_known_hosts', help='File containing SSH host keys for the systems with which ' 'Cinder needs to communicate. OPTIONAL: ' 'Default=$state_path/ssh_known_hosts'), ] CONF = cfg.CONF CONF.register_opts(ssh_opts) class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.privatekey = privatekey self.hosts_key_file = None # Validate good config setting here. # Paramiko handles the case where the file is inaccessible. if not CONF.ssh_hosts_key_file: raise exception.ParameterNotFound(param='ssh_hosts_key_file') elif not os.path.isfile(CONF.ssh_hosts_key_file): # If using the default path, just create the file. if CONF.state_path in CONF.ssh_hosts_key_file: open(CONF.ssh_hosts_key_file, 'a').close() else: msg = (_("Unable to find ssh_hosts_key_file: %s") % CONF.ssh_hosts_key_file) raise exception.InvalidInput(reason=msg) if 'hosts_key_file' in kwargs.keys(): self.hosts_key_file = kwargs.pop('hosts_key_file') LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be " "loaded along with %(conf)s from /etc/cinder.conf."), {'kwargs': self.hosts_key_file, 'conf': CONF.ssh_hosts_key_file}) LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' " "using ssh_hosts_key_file '%(key_file)s'.", {'policy': CONF.strict_ssh_host_key_policy, 'key_file': CONF.ssh_hosts_key_file}) self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy if not self.hosts_key_file: self.hosts_key_file = CONF.ssh_hosts_key_file else: self.hosts_key_file += ',' + CONF.ssh_hosts_key_file super(SSHPool, self).__init__(*args, **kwargs) def create(self): try: ssh = paramiko.SSHClient() if ',' in self.hosts_key_file: files = self.hosts_key_file.split(',') for f in files: ssh.load_host_keys(f) else: ssh.load_host_keys(self.hosts_key_file) # If strict_ssh_host_key_policy is set we want to reject, by # default if there is not entry in the known_hosts file. # Otherwise we use AutoAddPolicy which accepts on the first # Connect but fails if the keys change. load_host_keys can # handle hashed known_host entries. if self.strict_ssh_host_key_policy: ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) else: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password: ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, timeout=self.conn_timeout) elif self.privatekey: pkfile = os.path.expanduser(self.privatekey) privatekey = paramiko.RSAKey.from_private_key_file(pkfile) ssh.connect(self.ip, port=self.port, username=self.login, pkey=privatekey, timeout=self.conn_timeout) else: msg = _("Specify a password or private_key") raise exception.CinderException(msg) # Paramiko by default sets the socket timeout to 0.1 seconds, # ignoring what we set through the sshclient. This doesn't help for # keeping long lived connections. Hence we have to bypass it, by # overriding it after the transport is initialized. We are setting # the sockettimeout to None and setting a keepalive packet so that, # the server will keep the connection open. All that does is send # a keepalive packet every ssh_conn_timeout seconds. if self.conn_timeout: transport = ssh.get_transport() transport.sock.settimeout(None) transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Error connecting via ssh: %s") % six.text_type(e) LOG.error(msg) raise paramiko.SSHException(msg) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ conn = super(SSHPool, self).get() if conn: if conn.get_transport().is_active(): return conn else: conn.close() return self.create() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() if ssh in self.free_items: self.free_items.remove(ssh) if self.current_size > 0: self.current_size -= 1 cinder-8.0.0/cinder/coordination.py0000664000567000056710000002317212701406250020434 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Coordination and locking utilities.""" import inspect import random import threading import uuid import eventlet from eventlet import tpool import itertools from oslo_config import cfg from oslo_log import log import six from tooz import coordination from tooz import locking from cinder import exception from cinder.i18n import _, _LE, _LI, _LW LOG = log.getLogger(__name__) coordination_opts = [ cfg.StrOpt('backend_url', default='file://$state_path', help='The backend URL to use for distributed coordination.'), cfg.FloatOpt('heartbeat', default=1.0, help='Number of seconds between heartbeats for distributed ' 'coordination.'), cfg.FloatOpt('initial_reconnect_backoff', default=0.1, help='Initial number of seconds to wait after failed ' 'reconnection.'), cfg.FloatOpt('max_reconnect_backoff', default=60.0, help='Maximum number of seconds between sequential ' 'reconnection retries.'), ] CONF = cfg.CONF CONF.register_opts(coordination_opts, group='coordination') class Coordinator(object): """Tooz coordination wrapper. Coordination member id is created from concatenated `prefix` and `agent_id` parameters. :param str agent_id: Agent identifier :param str prefix: Used to provide member identifier with a meaningful prefix. """ def __init__(self, agent_id=None, prefix=''): self.coordinator = None self.agent_id = agent_id or str(uuid.uuid4()) self.started = False self.prefix = prefix self._ev = None self._dead = None def is_active(self): return self.coordinator is not None def start(self): """Connect to coordination backend and start heartbeat.""" if not self.started: try: self._dead = threading.Event() self._start() self.started = True # NOTE(bluex): Start heartbeat in separate thread to avoid # being blocked by long coroutines. if self.coordinator and self.coordinator.requires_beating: self._ev = eventlet.spawn( lambda: tpool.execute(self.heartbeat)) except coordination.ToozError: LOG.exception(_LE('Error starting coordination backend.')) raise LOG.info(_LI('Coordination backend started successfully.')) def stop(self): """Disconnect from coordination backend and stop heartbeat.""" if self.started: self.coordinator.stop() self._dead.set() if self._ev is not None: self._ev.wait() self._ev = None self.coordinator = None self.started = False def get_lock(self, name): """Return a Tooz backend lock. :param str name: The lock name that is used to identify it across all nodes. """ if self.coordinator is not None: return self.coordinator.get_lock(self.prefix + name) else: raise exception.LockCreationFailed(_('Coordinator uninitialized.')) def heartbeat(self): """Coordinator heartbeat. Method that every couple of seconds (config: `coordination.heartbeat`) sends heartbeat to prove that the member is not dead. If connection to coordination backend is broken it tries to reconnect every couple of seconds (config: `coordination.initial_reconnect_backoff` up to `coordination.max_reconnect_backoff`) """ while self.coordinator is not None and not self._dead.is_set(): try: self._heartbeat() except coordination.ToozConnectionError: self._reconnect() else: self._dead.wait(cfg.CONF.coordination.heartbeat) def _start(self): member_id = self.prefix + self.agent_id self.coordinator = coordination.get_coordinator( cfg.CONF.coordination.backend_url, member_id) self.coordinator.start() def _heartbeat(self): try: self.coordinator.heartbeat() return True except coordination.ToozConnectionError: LOG.exception(_LE('Connection error while sending a heartbeat ' 'to coordination backend.')) raise except coordination.ToozError: LOG.exception(_LE('Error sending a heartbeat to coordination ' 'backend.')) return False def _reconnect(self): """Reconnect with jittered exponential backoff increase.""" LOG.info(_LI('Reconnecting to coordination backend.')) cap = cfg.CONF.coordination.max_reconnect_backoff backoff = base = cfg.CONF.coordination.initial_reconnect_backoff for attempt in itertools.count(1): try: self._start() break except coordination.ToozError: backoff = min(cap, random.uniform(base, backoff * 3)) msg = _LW('Reconnect attempt %(attempt)s failed. ' 'Next try in %(backoff).2fs.') LOG.warning(msg, {'attempt': attempt, 'backoff': backoff}) self._dead.wait(backoff) LOG.info(_LI('Reconnected to coordination backend.')) COORDINATOR = Coordinator(prefix='cinder-') class Lock(locking.Lock): """Lock with dynamic name. :param str lock_name: Lock name. :param dict lock_data: Data for lock name formatting. :param coordinator: Coordinator class to use when creating lock. Defaults to the global coordinator. Using it like so:: with Lock('mylock'): ... ensures that only one process at a time will execute code in context. Lock name can be formatted using Python format string syntax:: Lock('foo-{volume.id}, {'volume': ...,}) Available field names are keys of lock_data. """ def __init__(self, lock_name, lock_data=None, coordinator=None): super(Lock, self).__init__(str(id(self))) lock_data = lock_data or {} self.coordinator = coordinator or COORDINATOR self.blocking = True self.lock = self._prepare_lock(lock_name, lock_data) def _prepare_lock(self, lock_name, lock_data): if not isinstance(lock_name, six.string_types): raise ValueError(_('Not a valid string: %s') % lock_name) return self.coordinator.get_lock(lock_name.format(**lock_data)) def acquire(self, blocking=None): """Attempts to acquire lock. :param blocking: If True, blocks until the lock is acquired. If False, returns right away. Otherwise, the value is used as a timeout value and the call returns maximum after this number of seconds. :return: returns true if acquired (false if not) :rtype: bool """ blocking = self.blocking if blocking is None else blocking return self.lock.acquire(blocking=blocking) def release(self): """Attempts to release lock. The behavior of releasing a lock which was not acquired in the first place is undefined. :return: returns true if released (false if not) :rtype: bool """ self.lock.release() def synchronized(lock_name, blocking=True, coordinator=None): """Synchronization decorator. :param str lock_name: Lock name. :param blocking: If True, blocks until the lock is acquired. If False, raises exception when not acquired. Otherwise, the value is used as a timeout value and if lock is not acquired after this number of seconds exception is raised. :param coordinator: Coordinator class to use when creating lock. Defaults to the global coordinator. :raises tooz.coordination.LockAcquireFailed: if lock is not acquired Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one process will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. Lock name can be formatted using Python format string syntax:: @synchronized('{f_name}-{vol.id}-{snap[name]}') def foo(self, vol, snap): ... Available field names are: decorated function parameters and `f_name` as a decorated function name. """ def wrap(f): @six.wraps(f) def wrapped(*a, **k): call_args = inspect.getcallargs(f, *a, **k) call_args['f_name'] = f.__name__ lock = Lock(lock_name, call_args, coordinator) with lock(blocking): return f(*a, **k) return wrapped return wrap cinder-8.0.0/cinder/compute/0000775000567000056710000000000012701406543017046 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/compute/__init__.py0000664000567000056710000000207212701406250021153 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils compute_opts = [ cfg.StrOpt('compute_api_class', default='cinder.compute.nova.API', help='The full class name of the ' 'compute API class to use'), ] CONF = cfg.CONF CONF.register_opts(compute_opts) def API(): compute_api_class = CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls() cinder-8.0.0/cinder/compute/nova.py0000664000567000056710000001760612701406250020370 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Nova. """ from novaclient import client as nova_client from novaclient import exceptions as nova_exceptions from novaclient import service_catalog from oslo_config import cfg from oslo_log import log as logging from requests import exceptions as request_exceptions from cinder import context as ctx from cinder.db import base from cinder import exception nova_opts = [ cfg.StrOpt('nova_catalog_info', default='compute:Compute Service:publicURL', help='Match this value when searching for nova in the ' 'service catalog. Format is: separated values of ' 'the form: ' '::'), cfg.StrOpt('nova_catalog_admin_info', default='compute:Compute Service:adminURL', help='Same as nova_catalog_info, but for admin endpoint.'), cfg.StrOpt('nova_endpoint_template', help='Override service catalog lookup with template for nova ' 'endpoint e.g. http://localhost:8774/v2/%(project_id)s'), cfg.StrOpt('nova_endpoint_admin_template', help='Same as nova_endpoint_template, but for admin endpoint.'), cfg.StrOpt('os_region_name', help='Region name of this node'), cfg.StrOpt('nova_ca_certificates_file', help='Location of ca certificates file to use for nova client ' 'requests.'), cfg.BoolOpt('nova_api_insecure', default=False, help='Allow to perform insecure SSL requests to nova'), ] CONF = cfg.CONF CONF.register_opts(nova_opts) LOG = logging.getLogger(__name__) # TODO(e0ne): Make Nova version configurable in Mitaka. NOVA_API_VERSION = 2 nova_extensions = [ext for ext in nova_client.discover_extensions(2) if ext.name in ("assisted_volume_snapshots", "list_extensions")] def novaclient(context, admin_endpoint=False, privileged_user=False, timeout=None): """Returns a Nova client @param admin_endpoint: If True, use the admin endpoint template from configuration ('nova_endpoint_admin_template' and 'nova_catalog_info') @param privileged_user: If True, use the account from configuration (requires 'os_privileged_user_name', 'os_privileged_user_password' and 'os_privileged_user_tenant' to be set) @param timeout: Number of seconds to wait for an answer before raising a Timeout exception (None to disable) """ # FIXME: the novaclient ServiceCatalog object is mis-named. # It actually contains the entire access blob. # Only needed parts of the service catalog are passed in, see # nova/context.py. compat_catalog = { 'access': {'serviceCatalog': context.service_catalog or []} } sc = service_catalog.ServiceCatalog(compat_catalog) nova_endpoint_template = CONF.nova_endpoint_template nova_catalog_info = CONF.nova_catalog_info if admin_endpoint: nova_endpoint_template = CONF.nova_endpoint_admin_template nova_catalog_info = CONF.nova_catalog_admin_info service_type, service_name, endpoint_type = nova_catalog_info.split(':') # Extract the region if set in configuration if CONF.os_region_name: region_filter = {'attr': 'region', 'filter_value': CONF.os_region_name} else: region_filter = {} if privileged_user and CONF.os_privileged_user_name: context = ctx.RequestContext( CONF.os_privileged_user_name, None, auth_token=CONF.os_privileged_user_password, project_name=CONF.os_privileged_user_tenant, service_catalog=context.service_catalog) # When privileged_user is used, it needs to authenticate to Keystone # before querying Nova, so we set auth_url to the identity service # endpoint. if CONF.os_privileged_user_auth_url: url = CONF.os_privileged_user_auth_url else: # We then pass region_name, endpoint_type, etc. to the # Client() constructor so that the final endpoint is # chosen correctly. url = sc.url_for(service_type='identity', endpoint_type=endpoint_type, **region_filter) LOG.debug('Creating a Nova client using "%s" user', CONF.os_privileged_user_name) else: if nova_endpoint_template: url = nova_endpoint_template % context.to_dict() else: url = sc.url_for(service_type=service_type, service_name=service_name, endpoint_type=endpoint_type, **region_filter) LOG.debug('Nova client connection created using URL: %s', url) c = nova_client.Client(NOVA_API_VERSION, context.user_id, context.auth_token, context.project_name, auth_url=url, insecure=CONF.nova_api_insecure, timeout=timeout, region_name=CONF.os_region_name, endpoint_type=endpoint_type, cacert=CONF.nova_ca_certificates_file, extensions=nova_extensions) if not privileged_user: # noauth extracts user_id:project_id from auth_token c.client.auth_token = (context.auth_token or '%s:%s' % (context.user_id, context.project_id)) c.client.management_url = url return c class API(base.Base): """API for interacting with novaclient.""" def has_extension(self, context, extension, timeout=None): try: nova_exts = novaclient(context).list_extensions.show_all() except request_exceptions.Timeout: raise exception.APITimeout(service='Nova') return extension in [e.name for e in nova_exts] def update_server_volume(self, context, server_id, attachment_id, new_volume_id): novaclient(context).volumes.update_server_volume(server_id, attachment_id, new_volume_id) def create_volume_snapshot(self, context, volume_id, create_info): nova = novaclient(context, admin_endpoint=True, privileged_user=True) # pylint: disable-msg=E1101 nova.assisted_volume_snapshots.create( volume_id, create_info=create_info) def delete_volume_snapshot(self, context, snapshot_id, delete_info): nova = novaclient(context, admin_endpoint=True, privileged_user=True) # pylint: disable-msg=E1101 nova.assisted_volume_snapshots.delete( snapshot_id, delete_info=delete_info) def get_server(self, context, server_id, privileged_user=False, timeout=None): try: return novaclient(context, privileged_user=privileged_user, timeout=timeout).servers.get(server_id) except nova_exceptions.NotFound: raise exception.ServerNotFound(uuid=server_id) except request_exceptions.Timeout: raise exception.APITimeout(service='Nova') cinder-8.0.0/cinder/hacking/0000775000567000056710000000000012701406543016776 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/hacking/__init__.py0000664000567000056710000000000012701406250021070 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/hacking/checks.py0000664000567000056710000004601712701406250020613 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import re import six """ Guidelines for writing new hacking checks - Use only for Cinder specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to cinder/tests/test_hacking.py """ # NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects UNDERSCORE_IMPORT_FILES = ['cinder/objects/__init__.py'] translated_log = re.compile( r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)" "\(\s*_\(\s*('|\")") string_translation = re.compile(r"(.)*_\(\s*('|\")") vi_header_re = re.compile(r"^#\s+vim?:.+") underscore_import_check = re.compile(r"(.)*i18n\s+import(.)* _$") underscore_import_check_multi = re.compile(r"(.)*i18n\s+import(.)* _, (.)*") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") no_audit_log = re.compile(r"(.)*LOG\.audit(.)*") no_print_statements = re.compile(r"\s*print\s*\(.+\).*") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") # NOTE(jsbryant): When other oslo libraries switch over non-namespaced # imports, we will need to add them to the regex below. oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db" "|config|utils|serialization|log)") no_contextlib_nested = re.compile(r"\s*with (contextlib\.)?nested\(") log_translation_LI = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_LE = re.compile( r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") logging_instance = re.compile( r"(.)*LOG\.(warning|info|debug|error|exception)\(") assert_None = re.compile( r".*assertEqual\(None, .*\)") assert_True = re.compile( r".*assertEqual\(True, .*\)") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" # Need to disable pylint check here as it doesn't catch CHECK_DESC # being defined in the subclasses. message = message or self.CHECK_DESC # pylint: disable=E1101 error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False def no_vi_headers(physical_line, line_number, lines): """Check for vi editor configuration in source files. By default vi modelines can only appear in the first or last 5 lines of a source file. N314 """ # NOTE(gilliard): line_number is 1-indexed if line_number <= 5 or line_number > len(lines) - 5: if vi_header_re.match(physical_line): return 0, "N314: Don't put vi configuration in source files" def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. N319 """ if logical_line.startswith("LOG.debug(_("): yield(0, "N319 Don't translate debug level logs") def no_mutable_default_args(logical_line): msg = "N322: Method's default argument shouldn't be mutable!" mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") if mutable_default_args.match(logical_line): yield (0, msg) def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. for file in UNDERSCORE_IMPORT_FILES: if file in filename: return if (underscore_import_check.match(logical_line) or underscore_import_check_multi.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif(translated_log.match(logical_line) or string_translation.match(logical_line)): yield(0, "N323: Found use of _() without explicit import of _ !") class CheckForStrUnicodeExc(BaseASTChecker): """Checks for the use of str() or unicode() on an exception. This currently only handles the case where str() or unicode() is used in the scope of an exception handler. If the exception is passed into a function, returned from an assertRaises, or used on an exception created in the same scope, this does not catch it. """ CHECK_DESC = ('N325 str() and unicode() cannot be used on an ' 'exception. Remove or use six.text_type()') def __init__(self, tree, filename): super(CheckForStrUnicodeExc, self).__init__(tree, filename) self.name = [] self.already_checked = [] # Python 2 def visit_TryExcept(self, node): for handler in node.handlers: if handler.name: self.name.append(handler.name.id) super(CheckForStrUnicodeExc, self).generic_visit(node) self.name = self.name[:-1] else: super(CheckForStrUnicodeExc, self).generic_visit(node) # Python 3 def visit_ExceptHandler(self, node): if node.name: self.name.append(node.name) super(CheckForStrUnicodeExc, self).generic_visit(node) self.name = self.name[:-1] else: super(CheckForStrUnicodeExc, self).generic_visit(node) def visit_Call(self, node): if self._check_call_names(node, ['str', 'unicode']): if node not in self.already_checked: self.already_checked.append(node) if isinstance(node.args[0], ast.Name): if node.args[0].id in self.name: self.add_error(node.args[0]) super(CheckForStrUnicodeExc, self).generic_visit(node) class CheckLoggingFormatArgs(BaseASTChecker): """Check for improper use of logging format arguments. LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.", ('volume1', 500)) The format arguments should not be a tuple as it is easy to miss. """ CHECK_DESC = 'C310 Log method arguments should not be a tuple.' LOG_METHODS = [ 'debug', 'info', 'warn', 'warning', 'error', 'exception', 'critical', 'fatal', 'trace', 'log' ] def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, six.string_types): return node else: # could be Subscript, Call or many more return None def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # extract the obj_name and method_name if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckLoggingFormatArgs, self).generic_visit(node) # obj must be a logger instance and method must be a log helper if (obj_name != 'LOG' or method_name not in self.LOG_METHODS): return super(CheckLoggingFormatArgs, self).generic_visit(node) # the call must have arguments if not len(node.args): return super(CheckLoggingFormatArgs, self).generic_visit(node) # any argument should not be a tuple for arg in node.args: if isinstance(arg, ast.Tuple): self.add_error(arg) return super(CheckLoggingFormatArgs, self).generic_visit(node) class CheckOptRegistrationArgs(BaseASTChecker): """Verifying the registration of options are well formed This class creates a check for single opt or list/tuple of opts when register_opt() or register_opts() are being called. """ CHECK_DESC = ('C311: Arguments being passed to register_opt/register_opts ' 'must be a single option or list/tuple of options ' 'respectively. Options must also end with _opt or _opts ' 'respectively.') singular_method = 'register_opt' plural_method = 'register_opts' register_methods = [ singular_method, plural_method, ] def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, six.string_types): return node else: # could be Subscript, Call or many more return None def _is_list_or_tuple(self, obj): return isinstance(obj, ast.List) or isinstance(obj, ast.Tuple) def visit_Call(self, node): """Look for the register_opt/register_opts calls.""" # extract the obj_name and method_name if isinstance(node.func, ast.Attribute): if not isinstance(node.func.value, ast.Name): return (super(CheckOptRegistrationArgs, self).generic_visit(node)) method_name = node.func.attr # obj must be instance of register_opt() or register_opts() if method_name not in self.register_methods: return (super(CheckOptRegistrationArgs, self).generic_visit(node)) if len(node.args) > 0: argument_name = self._find_name(node.args[0]) if argument_name: if (method_name == self.singular_method and not argument_name.lower().endswith('opt')): self.add_error(node.args[0]) elif (method_name == self.plural_method and not argument_name.lower().endswith('opts')): self.add_error(node.args[0]) else: # This covers instances of register_opt()/register_opts() # that are registering the objects directly and not # passing in a variable referencing the options being # registered. if (method_name == self.singular_method and self._is_list_or_tuple(node.args[0])): self.add_error(node.args[0]) elif (method_name == self.plural_method and not self._is_list_or_tuple(node.args[0])): self.add_error(node.args[0]) return super(CheckOptRegistrationArgs, self).generic_visit(node) def validate_log_translations(logical_line, filename): # Translations are not required in the test directory. # This will not catch all instances of violations, just direct # misuse of the form LOG.info('Message'). if "cinder/tests" in filename: return msg = "N328: LOG.info messages require translations `_LI()`!" if log_translation_LI.match(logical_line): yield (0, msg) msg = ("N329: LOG.exception and LOG.error messages require " "translations `_LE()`!") if log_translation_LE.match(logical_line): yield (0, msg) msg = "N330: LOG.warning messages require translations `_LW()`!" if log_translation_LW.match(logical_line): yield (0, msg) def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports, logical_line): msg = ("N333: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield(0, msg) def check_datetime_now(logical_line, noqa): if noqa: return msg = ("C301: Found datetime.now(). " "Please use timeutils.utcnow() from oslo_utils.") if 'datetime.now' in logical_line: yield(0, msg) _UNICODE_USAGE_REGEX = re.compile(r'\bunicode *\(') def check_unicode_usage(logical_line, noqa): if noqa: return msg = "C302: Found unicode() call. Please use six.text_type()." if _UNICODE_USAGE_REGEX.search(logical_line): yield(0, msg) def check_no_print_statements(logical_line, filename, noqa): # The files in cinder/cmd do need to use 'print()' so # we don't need to check those files. Other exemptions # should use '# noqa' to avoid failing here. if "cinder/cmd" not in filename and not noqa: if re.match(no_print_statements, logical_line): msg = ("C303: print() should not be used. " "Please use LOG.[info|error|warning|exception|debug]. " "If print() must be used, use '# noqa' to skip this check.") yield(0, msg) def check_no_log_audit(logical_line): """Ensure that we are not using LOG.audit messages Plans are in place going forward as discussed in the following spec (https://review.openstack.org/#/c/91446/) to take out LOG.audit messages. Given that audit was a concept invented for OpenStack we can enforce not using it. """ if no_audit_log.match(logical_line): yield(0, "C304: Found LOG.audit. Use LOG.info instead.") def check_no_contextlib_nested(logical_line): msg = ("C305: contextlib.nested is deprecated. With Python 2.7 and later " "the with-statement supports multiple nested objects. See https://" "docs.python.org/2/library/contextlib.html#contextlib.nested " "for more information.") if no_contextlib_nested.match(logical_line): yield(0, msg) def check_timeutils_strtime(logical_line): msg = ("C306: Found timeutils.strtime(). " "Please use datetime.datetime.isoformat() or datetime.strftime()") if 'timeutils.strtime' in logical_line: yield(0, msg) def no_log_warn(logical_line): msg = "C307: LOG.warn is deprecated, please use LOG.warning!" if "LOG.warn(" in logical_line: yield (0, msg) def dict_constructor_with_list_copy(logical_line): msg = ("N336: Must use a dict comprehension instead of a dict constructor " "with a sequence of key-value pairs.") if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def check_timeutils_isotime(logical_line): msg = ("C308: Found timeutils.isotime(). " "Please use datetime.datetime.isoformat()") if 'timeutils.isotime' in logical_line: yield(0, msg) def no_test_log(logical_line, filename, noqa): if "cinder/tests" not in filename or noqa: return # Skip the "integrated" tests for now if "cinder/tests/unit/integrated" in filename: return msg = "C309: Unit tests should not perform logging." if logging_instance.match(logical_line): yield (0, msg) def validate_assertIsNone(logical_line): if re.match(assert_None, logical_line): msg = ("C312: Unit tests should use assertIsNone(value) instead" " of using assertEqual(None, value).") yield(0, msg) def validate_assertTrue(logical_line): if re.match(assert_True, logical_line): msg = ("C313: Unit tests should use assertTrue(value) instead" " of using assertEqual(True, value).") yield(0, msg) def factory(register): register(no_vi_headers) register(no_translate_debug_logs) register(no_mutable_default_args) register(check_explicit_underscore_import) register(CheckForStrUnicodeExc) register(CheckLoggingFormatArgs) register(CheckOptRegistrationArgs) register(check_oslo_namespace_imports) register(check_datetime_now) register(check_timeutils_strtime) register(check_timeutils_isotime) register(validate_log_translations) register(check_unicode_usage) register(check_no_print_statements) register(check_no_log_audit) register(check_no_contextlib_nested) register(no_log_warn) register(dict_constructor_with_list_copy) register(no_test_log) register(validate_assertIsNone) register(validate_assertTrue) cinder-8.0.0/cinder/service.py0000664000567000056710000004421712701406250017407 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import inspect import os import random from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import loopingcall from oslo_service import service from oslo_service import wsgi from oslo_utils import importutils osprofiler_notifier = importutils.try_import('osprofiler.notifier') profiler = importutils.try_import('osprofiler.profiler') osprofiler_web = importutils.try_import('osprofiler.web') profiler_opts = importutils.try_import('osprofiler.opts') from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import objects from cinder.objects import base as objects_base from cinder import rpc from cinder import version LOG = logging.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='Interval, in seconds, between nodes reporting state ' 'to datastore'), cfg.IntOpt('periodic_interval', default=60, help='Interval, in seconds, between running periodic tasks'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range, in seconds, to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)'), cfg.StrOpt('osapi_volume_listen', default="0.0.0.0", help='IP address on which OpenStack Volume API listens'), cfg.PortOpt('osapi_volume_listen_port', default=8776, help='Port on which OpenStack Volume API listens'), cfg.IntOpt('osapi_volume_workers', help='Number of workers for OpenStack Volume API service. ' 'The default is equal to the number of CPUs available.'), ] CONF = cfg.CONF CONF.register_opts(service_opts) if profiler_opts: profiler_opts.set_defaults(CONF) def setup_profiler(binary, host): if (osprofiler_notifier is None or profiler is None or osprofiler_web is None or profiler_opts is None): LOG.debug('osprofiler is not present') return if CONF.profiler.enabled: _notifier = osprofiler_notifier.create( "Messaging", messaging, context.get_admin_context().to_dict(), rpc.TRANSPORT, "cinder", binary, host) osprofiler_notifier.set(_notifier) osprofiler_web.enable(CONF.profiler.hmac_keys) LOG.warning( _LW("OSProfiler is enabled.\nIt means that person who knows " "any of hmac_keys that are specified in " "/etc/cinder/cinder.conf can trace his requests. \n" "In real life only operator can read this file so there " "is no security issue. Note that even if person can " "trigger profiler, only admin user can retrieve trace " "information.\n" "To disable OSprofiler set in cinder.conf:\n" "[profiler]\nenabled=false")) else: osprofiler_web.disable() class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) if CONF.profiler.enabled: manager_class = profiler.trace_cls("rpc")(manager_class) self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.basic_config_check() self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] setup_profiler(binary, host) self.rpcserver = None def start(self): version_string = version.version_string() LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False self.manager.init_host() ctxt = context.get_admin_context() try: service_ref = objects.Service.get_by_args( ctxt, self.host, self.binary) service_ref.rpc_current_version = self.manager.RPC_API_VERSION obj_version = objects_base.OBJ_VERSIONS.get_current() service_ref.object_current_version = obj_version service_ref.save() self.service_id = service_ref.id except exception.NotFound: self._create_service_ref(ctxt) LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.CinderObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.manager.init_host_with_rpc() if self.report_interval: pulse = loopingcall.FixedIntervalLoopingCall( self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.FixedIntervalLoopingCall( self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) def basic_config_check(self): """Perform basic config checks before starting service.""" # Make sure report interval is less than service down time if self.report_interval: if CONF.service_down_time <= self.report_interval: new_down_time = int(self.report_interval * 2.5) LOG.warning( _LW("Report interval must be less than service down " "time. Current config service_down_time: " "%(service_down_time)s, report_interval for this: " "service is: %(report_interval)s. Setting global " "service_down_time to: %(new_down_time)s"), {'service_down_time': CONF.service_down_time, 'report_interval': self.report_interval, 'new_down_time': new_down_time}) CONF.set_override('service_down_time', new_down_time) def _create_service_ref(self, context): zone = CONF.storage_availability_zone kwargs = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone, 'rpc_current_version': self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } service_ref = objects.Service(context=context, **kwargs) service_ref.create() self.service_id = service_ref.id def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'cinder-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('cinder-')[2] manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name) return service_obj def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() except Exception: pass self.timers_skip = [] for x in self.timers: try: x.stop() except Exception: self.timers_skip.append(x) super(Service, self).stop(graceful=True) def wait(self): skip = getattr(self, 'timers_skip', []) for x in self.timers: if x not in skip: try: x.wait() except Exception: pass if self.rpcserver: self.rpcserver.wait() super(Service, self).wait() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service in the datastore.""" if not self.manager.is_working(): # NOTE(dulek): If manager reports a problem we're not sending # heartbeats - to indicate that service is actually down. LOG.error(_LE('Manager for service %(binary)s %(host)s is ' 'reporting problems, not sending heartbeat. ' 'Service will appear "down".'), {'binary': self.binary, 'host': self.host}) return ctxt = context.get_admin_context() zone = CONF.storage_availability_zone try: try: service_ref = objects.Service.get_by_id(ctxt, self.service_id) except exception.NotFound: LOG.debug('The service database object disappeared, ' 'recreating it.') self._create_service_ref(ctxt) service_ref = objects.Service.get_by_id(ctxt, self.service_id) service_ref.report_count += 1 if zone != service_ref.availability_zone: service_ref.availability_zone = zone service_ref.save() # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False LOG.error(_LE('Recovered model server connection!')) except db_exc.DBConnectionError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception(_LE('model server went away')) # NOTE(jsbryant) Other DB errors can happen in HA configurations. # such errors shouldn't kill this thread, so we handle them here. except db_exc.DBError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception(_LE('DBError encountered: ')) except Exception: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception(_LE('Exception encountered: ')) def reset(self): self.manager.reset() super(Service, self).reset() class WSGIService(service.ServiceBase): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = (getattr(CONF, '%s_workers' % name, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)d is invalid, " "must be greater than 0.") % {'worker_name': worker_name, 'workers': self.workers}) raise exception.InvalidInput(msg) setup_profiler(name, self.host) self.server = wsgi.Server(CONF, name, self.app, host=self.host, port=self.port) def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self): """Stop serving this API. :returns: None """ self.server.stop() def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() def reset(self): """Reset server greenpool size to default. :returns: None """ self.server.reset() def process_launcher(): return service.ProcessLauncher(CONF) # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if _launcher: raise RuntimeError(_('serve() can only be called once')) _launcher = service.launch(CONF, server, workers=workers) def wait(): LOG.debug('Full set of CONF:') for flag in CONF: flag_get = CONF.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or (flag == "sql_connection" and ("mysql:" in flag_get or "postgresql:" in flag_get))): LOG.debug('%s : FLAG SET ', flag) else: LOG.debug('%(flag)s : %(flag_get)s', {'flag': flag, 'flag_get': flag_get}) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup() class Launcher(object): def __init__(self): self.launch_service = serve self.wait = wait def get_launcher(): # Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows # due to missing support of non-blocking I/O pipes. For this reason, the # service must be spawned differently on Windows, using the ServiceLauncher # class instead. if os.name == 'nt': return Launcher() else: return process_launcher() cinder-8.0.0/cinder/keymgr/0000775000567000056710000000000012701406543016670 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/keymgr/conf_key_mgr.py0000664000567000056710000001132012701406250021674 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An implementation of a key manager that reads its key from the project's configuration options. This key manager implementation provides limited security, assuming that the key remains secret. Using the volume encryption feature as an example, encryption provides protection against a lost or stolen disk, assuming that the configuration file that contains the key is not stored on the disk. Encryption also protects the confidentiality of data as it is transmitted via iSCSI from the compute host to the storage host (again assuming that an attacker who intercepts the data does not know the secret key). Because this implementation uses a single, fixed key, it proffers no protection once that key is compromised. In particular, different volumes encrypted with a key provided by this key manager actually share the same encryption key so *any* volume can be decrypted once the fixed key is known. """ import array import binascii from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LW from cinder.keymgr import key from cinder.keymgr import key_mgr key_mgr_opts = [ cfg.StrOpt('fixed_key', help='Fixed key returned by key manager, specified in hex'), ] CONF = cfg.CONF CONF.register_opts(key_mgr_opts, group='keymgr') LOG = logging.getLogger(__name__) class ConfKeyManager(key_mgr.KeyManager): """Key Manager that supports one key defined by the fixed_key conf option. This key manager implementation supports all the methods specified by the key manager interface. This implementation creates a single key in response to all invocations of create_key. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. """ def __init__(self): super(ConfKeyManager, self).__init__() self.key_id = '00000000-0000-0000-0000-000000000000' def _generate_key(self, **kwargs): _hex = self._generate_hex_key(**kwargs) key_list = array.array('B', binascii.unhexlify(_hex)).tolist() return key.SymmetricKey('AES', key_list) def _generate_hex_key(self, **kwargs): if CONF.keymgr.fixed_key is None: LOG.warning( _LW('config option keymgr.fixed_key has not been defined:' ' some operations may fail unexpectedly')) raise ValueError(_('keymgr.fixed_key not defined')) return CONF.keymgr.fixed_key def create_key(self, ctxt, **kwargs): """Creates a key. This implementation returns a UUID for the created key. A NotAuthorized exception is raised if the specified context is None. """ if ctxt is None: raise exception.NotAuthorized() return self.key_id def store_key(self, ctxt, key, **kwargs): """Stores (i.e., registers) a key with the key manager.""" if ctxt is None: raise exception.NotAuthorized() if key != self._generate_key(): raise exception.KeyManagerError( reason="cannot store arbitrary keys") return self.key_id def copy_key(self, ctxt, key_id, **kwargs): if ctxt is None: raise exception.NotAuthorized() return self.key_id def get_key(self, ctxt, key_id, **kwargs): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() if key_id != self.key_id: raise KeyError(key_id) return self._generate_key() def delete_key(self, ctxt, key_id, **kwargs): if ctxt is None: raise exception.NotAuthorized() if key_id != self.key_id: raise exception.KeyManagerError( reason="cannot delete non-existent key") LOG.warning(_LW("Not deleting key %s"), key_id) cinder-8.0.0/cinder/keymgr/__init__.py0000664000567000056710000000206712701406250021001 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils keymgr_opts = [ cfg.StrOpt('api_class', default='cinder.keymgr.conf_key_mgr.ConfKeyManager', help='The full class name of the key manager API class'), ] CONF = cfg.CONF CONF.register_opts(keymgr_opts, group='keymgr') def API(): cls = importutils.import_class(CONF.keymgr.api_class) return cls() cinder-8.0.0/cinder/keymgr/key.py0000664000567000056710000000503312701406250020026 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base Key and SymmetricKey Classes This module defines the Key and SymmetricKey classes. The Key class is the base class to represent all encryption keys. The basis for this class was copied from Java. """ import abc import six @six.add_metaclass(abc.ABCMeta) class Key(object): """Base class to represent all keys.""" @abc.abstractmethod def get_algorithm(self): """Returns the key's algorithm. Returns the key's algorithm. For example, "DSA" indicates that this key is a DSA key and "AES" indicates that this key is an AES key. """ pass @abc.abstractmethod def get_format(self): """Returns the encoding format. Returns the key's encoding format or None if this key is not encoded. """ pass @abc.abstractmethod def get_encoded(self): """Returns the key in the format specified by its encoding.""" pass class SymmetricKey(Key): """This class represents symmetric keys.""" def __init__(self, alg, key): """Create a new SymmetricKey object. The arguments specify the algorithm for the symmetric encryption and the bytes for the key. """ self.alg = alg self.key = key def get_algorithm(self): """Returns the algorithm for symmetric encryption.""" return self.alg def get_format(self): """This method returns 'RAW'.""" return "RAW" def get_encoded(self): """Returns the key in its encoded format.""" return self.key def __eq__(self, other): if isinstance(other, SymmetricKey): return (self.alg == other.alg and self.key == other.key) return NotImplemented def __ne__(self, other): result = self.__eq__(other) if result is NotImplemented: return result return not result cinder-8.0.0/cinder/keymgr/not_implemented_key_mgr.py0000664000567000056710000000256312701406250024143 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Key manager implementation that raises NotImplementedError """ from cinder.keymgr import key_mgr class NotImplementedKeyManager(key_mgr.KeyManager): """Key Manager interface that raises NotImplementedError""" def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, **kwargs): raise NotImplementedError() def store_key(self, ctxt, key, expiration=None, **kwargs): raise NotImplementedError() def copy_key(self, ctxt, key_id, **kwargs): raise NotImplementedError() def get_key(self, ctxt, key_id, **kwargs): raise NotImplementedError() def delete_key(self, ctxt, key_id, **kwargs): raise NotImplementedError() cinder-8.0.0/cinder/keymgr/key_mgr.py0000664000567000056710000001023212701406250020670 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Key manager API """ import abc from oslo_config import cfg import six encryption_opts = [ cfg.StrOpt('encryption_auth_url', default='http://localhost:5000/v3', help='Authentication url for encryption service.'), cfg.StrOpt('encryption_api_url', default='http://localhost:9311/v1', help='Url for encryption service.'), ] CONF = cfg.CONF CONF.register_opts(encryption_opts, group='keymgr') @six.add_metaclass(abc.ABCMeta) class KeyManager(object): """Base Key Manager Interface A Key Manager is responsible for managing encryption keys for volumes. A Key Manager is responsible for creating, reading, and deleting keys. """ @abc.abstractmethod def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, **kwargs): """Creates a key. This method creates a key and returns the key's UUID. If the specified context does not permit the creation of keys, then a NotAuthorized exception should be raised. """ pass @abc.abstractmethod def store_key(self, ctxt, key, expiration=None, **kwargs): """Stores (i.e., registers) a key with the key manager. This method stores the specified key and returns its UUID that identifies it within the key manager. If the specified context does not permit the creation of keys, then a NotAuthorized exception should be raised. """ pass @abc.abstractmethod def copy_key(self, ctxt, key_id, **kwargs): """Copies (i.e., clones) a key stored by the key manager. This method copies the specified key and returns the copy's UUID. If the specified context does not permit copying keys, then a NotAuthorized error should be raised. Implementation note: This method should behave identically to store_key(context, get_key(context, )) although it is preferable to perform this operation within the key manager to avoid unnecessary handling of the key material. """ pass @abc.abstractmethod def get_key(self, ctxt, key_id, **kwargs): """Retrieves the specified key. Implementations should verify that the caller has permissions to retrieve the key by checking the context object passed in as ctxt. If the user lacks permission then a NotAuthorized exception is raised. If the specified key does not exist, then a KeyError should be raised. Implementations should preclude users from discerning the UUIDs of keys that belong to other users by repeatedly calling this method. That is, keys that belong to other users should be considered "non- existent" and completely invisible. """ pass @abc.abstractmethod def delete_key(self, ctxt, key_id, **kwargs): """Deletes the specified key. Implementations should verify that the caller has permission to delete the key by checking the context object (ctxt). A NotAuthorized exception should be raised if the caller lacks permission. If the specified key does not exist, then a KeyError should be raised. Implementations should preclude users from discerning the UUIDs of keys that belong to other users by repeatedly calling this method. That is, keys that belong to other users should be considered "non- existent" and completely invisible. """ pass cinder-8.0.0/cinder/keymgr/barbican.py0000664000567000056710000003412712701406250021005 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Key manager implementation for Barbican """ import array import base64 import binascii import re from barbicanclient import client as barbican_client from keystoneclient.auth import identity from keystoneclient import session from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _, _LE from cinder.keymgr import key as keymgr_key from cinder.keymgr import key_mgr CONF = cfg.CONF CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr') CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr') LOG = logging.getLogger(__name__) URL_PATTERN = re.compile( "(?Phttp[s]?://[^/]*)[/]?(?P(v[0-9.]+)?).*") class BarbicanKeyManager(key_mgr.KeyManager): """Key Manager Interface that wraps the Barbican client API.""" def __init__(self): self._base_url = CONF.keymgr.encryption_api_url self._parse_barbican_api_url() self._barbican_client = None self._current_context = None def _parse_barbican_api_url(self): """Setup member variables to reference the Barbican URL. The key manipulation functions in this module need to use the barbican URL with the version appended. But the barbicanclient Client() class needs the URL without the version appended. So set up a member variables here for each case. """ m = URL_PATTERN.search(self._base_url) if m is None: raise exception.KeyManagerError(_( "Invalid url: must be in the form " "'http[s]://|[:port]/', " "url specified is: %s"), self._base_url) url_info = dict(m.groupdict()) if 'url_version' not in url_info or url_info['url_version'] == "": raise exception.KeyManagerError(_( "Invalid barbican api url: version is required, " "e.g. 'http[s]://|[:port]/' " "url specified is: %s") % self._base_url) # We will also need the barbican API URL without the '/v1'. # So save that now. self._barbican_endpoint = url_info['url_base'] def _get_barbican_client(self, ctxt): """Creates a client to connect to the Barbican service. :param ctxt: the user context for authentication :return: a Barbican Client object :throws NotAuthorized: if the ctxt is None :throws KeyManagerError: if ctxt is missing project_id or project_id is None """ # Confirm context is provided, if not raise not authorized if not ctxt: msg = _("User is not authorized to use key manager.") LOG.error(msg) raise exception.NotAuthorized(msg) if not hasattr(ctxt, 'project_id') or ctxt.project_id is None: msg = _("Unable to create Barbican Client without project_id.") LOG.error(msg) raise exception.KeyManagerError(msg) # If same context, return cached barbican client if self._barbican_client and self._current_context == ctxt: return self._barbican_client try: auth = identity.v3.Token( auth_url=CONF.keymgr.encryption_auth_url, token=ctxt.auth_token, project_id=ctxt.project_id) sess = session.Session(auth=auth) self._barbican_client = barbican_client.Client( session=sess, endpoint=self._barbican_endpoint) self._current_context = ctxt except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating Barbican client.")) return self._barbican_client def create_key(self, ctxt, expiration=None, name='Cinder Volume Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :throws Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) secret_uuid = order.secret_ref.rpartition('/')[2] return secret_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating key.")) def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key', payload_content_type='application/octet-stream', payload_content_encoding='base64', algorithm='AES', bit_length=256, mode='CBC', from_copy=False): """Stores (i.e., registers) a key with the key manager. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param key: the unencrypted secret data. Known as "payload" to the barbicanclient api :param expiration: the expiration time of the secret in ISO 8601 format :param name: a friendly name for the key :param payload_content_type: the format/type of the secret data :param payload_content_encoding: the encoding of the secret data :param algorithm: the algorithm associated with this secret key :param bit_length: the bit length of this secret key :param mode: the algorithm mode used with this secret key :param from_copy: establishes whether the function is being used to copy a key. In case of the latter, it does not try to decode the key :returns: the UUID of the stored key :throws Exception: if key storage fails """ barbican_client = self._get_barbican_client(ctxt) try: if key.get_algorithm(): algorithm = key.get_algorithm() if payload_content_type == 'text/plain': payload_content_encoding = None encoded_key = key.get_encoded() elif (payload_content_type == 'application/octet-stream' and not from_copy): key_list = key.get_encoded() string_key = ''.join(map(lambda byte: "%02x" % byte, key_list)) encoded_key = base64.b64encode(binascii.unhexlify(string_key)) else: encoded_key = key.get_encoded() secret = barbican_client.secrets.create(name, encoded_key, payload_content_type, payload_content_encoding, algorithm, bit_length, None, mode, expiration) secret_ref = secret.store() secret_uuid = secret_ref.rpartition('/')[2] return secret_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error storing key.")) def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :throws Exception: if key copying fails """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error copying key.")) def _create_secret_ref(self, key_id, barbican_client): """Creates the URL required for accessing a secret. :param key_id: the UUID of the key to copy :param barbican_client: barbican key manager object :return: the URL of the requested secret """ if not key_id: msg = "Key ID is None" raise exception.KeyManagerError(msg) return self._base_url + "/secrets/" + key_id def _get_secret_data(self, secret, payload_content_type='application/octet-stream'): """Retrieves the secret data given a secret_ref and content_type. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param secret_ref: URL to access the secret :param payload_content_type: the format/type of the secret data :returns: the secret data :throws Exception: if data cannot be retrieved """ try: generated_data = secret.payload if payload_content_type == 'application/octet-stream': secret_data = base64.b64encode(generated_data) else: secret_data = generated_data return secret_data except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting secret data.")) def _get_secret(self, ctxt, secret_ref): """Creates the URL required for accessing a secret's metadata. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param secret_ref: URL to access the secret :return: the secret's metadata :throws Exception: if there is an error retrieving the data """ barbican_client = self._get_barbican_client(ctxt) try: return barbican_client.secrets.get(secret_ref) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting secret metadata.")) def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :throws Exception: if key retrieval fails """ try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting key.")) def delete_key(self, ctxt, key_id): """Deletes the specified key. :param ctxt: contains information of the user and the environment for the request (cinder/context.py) :param key_id: the UUID of the key to delete :throws Exception: if key deletion fails """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id, barbican_client) barbican_client.secrets.delete(secret_ref) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error deleting key.")) cinder-8.0.0/cinder/exception.py0000664000567000056710000007535512701406250017754 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder base exception handling. Includes decorator for re-raising Cinder-type exceptions. SHOULD include dedicated exception logging. """ import sys from oslo_config import cfg from oslo_log import log as logging from oslo_versionedobjects import exception as obj_exc import six import webob.exc from webob.util import status_generic_reasons from webob.util import status_reasons from cinder.i18n import _, _LE LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Make exception message format errors fatal.'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=500, title="", explanation=""): self.code = code # There is a strict rule about constructing status line for HTTP: # '...Status-Line, consisting of the protocol version followed by a # numeric status code and its associated textual phrase, with each # element separated by SP characters' # (http://www.faqs.org/rfcs/rfc2616.html) # 'code' and 'title' can not be empty because they correspond # to numeric status code and its associated text if title: self.title = title else: try: self.title = status_reasons[self.code] except KeyError: generic_code = self.code // 100 self.title = status_generic_reasons[generic_code] self.explanation = explanation super(ConvertedException, self).__init__() class Error(Exception): pass class CinderException(Exception): """Base Cinder Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs self.kwargs['message'] = message if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass for k, v in self.kwargs.items(): if isinstance(v, Exception): self.kwargs[k] = six.text_type(v) if self._should_format(): try: message = self.message % kwargs except Exception: exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation')) for name, value in kwargs.items(): LOG.error(_LE("%(name)s: %(value)s"), {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: six.reraise(*exc_info) # at least get the core message out if something happened message = self.message elif isinstance(message, Exception): message = six.text_type(message) # NOTE(luisg): We put the actual message in 'msg' so that we can access # it, because if we try to access the message via 'message' it will be # overshadowed by the class' message attribute self.msg = message super(CinderException, self).__init__(message) def _should_format(self): return self.kwargs['message'] is None or '%(message)' in self.message def __unicode__(self): return six.text_type(self.msg) class VolumeBackendAPIException(CinderException): message = _("Bad or unexpected response from the storage volume " "backend API: %(data)s") class VolumeDriverException(CinderException): message = _("Volume driver reported an error: %(message)s") class BackupDriverException(CinderException): message = _("Backup driver reported an error: %(message)s") class GlanceConnectionFailed(CinderException): message = _("Connection to glance failed: %(reason)s") class NotAuthorized(CinderException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(CinderException): message = _("Not authorized for image %(image_id)s.") class DriverNotInitialized(CinderException): message = _("Volume driver not ready.") class Invalid(CinderException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot: %(reason)s") class InvalidVolumeAttachMode(Invalid): message = _("Invalid attaching mode '%(mode)s' for " "volume %(volume_id)s.") class VolumeAttached(Invalid): message = _("Volume %(volume_id)s is still attached, detach volume first.") class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): message = _("Invalid input received: %(reason)s") class InvalidVolumeType(Invalid): message = _("Invalid volume type: %(reason)s") class InvalidVolume(Invalid): message = _("Invalid volume: %(reason)s") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidHost(Invalid): message = _("Invalid host: %(reason)s") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAuthKey(Invalid): message = _("Invalid auth key: %(reason)s") class InvalidConfigurationValue(Invalid): message = _('Value "%(value)s" is not valid for ' 'configuration option "%(option)s"') class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class DeviceUnavailable(Invalid): message = _("The device in the path %(path)s is unavailable: %(reason)s") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class InvalidAPIVersionString(Invalid): message = _("API Version String %(version)s is of invalid format. Must " "be of format MajorNum.MinorNum.") class VersionNotFoundForAPIMethod(Invalid): message = _("API version %(version)s is not supported on this method.") class InvalidGlobalAPIVersion(Invalid): message = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class APIException(CinderException): message = _("Error while requesting %(service)s API.") def __init__(self, message=None, **kwargs): if 'service' not in kwargs: kwargs['service'] = 'unknown' super(APIException, self).__init__(message, **kwargs) class APITimeout(APIException): message = _("Timeout while requesting %(service)s API.") class RPCTimeout(CinderException): message = _("Timeout while requesting capabilities from backend " "%(service)s.") code = 502 class NotFound(CinderException): message = _("Resource could not be found.") code = 404 safe = True class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class VolumeAttachmentNotFound(NotFound): message = _("Volume attachment could not be found with " "filter: %(filter)s .") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class VolumeAdminMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no administration metadata with " "key %(metadata_key)s.") class InvalidVolumeMetadata(Invalid): message = _("Invalid metadata: %(reason)s") class InvalidVolumeMetadataSize(Invalid): message = _("Invalid metadata size: %(reason)s") class SnapshotMetadataNotFound(NotFound): message = _("Snapshot %(snapshot_id)s has no metadata with " "key %(metadata_key)s.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeAccessNotFound(NotFound): message = _("Volume type access not found for %(volume_type_id)s / " "%(project_id)s combination.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class VolumeTypeInUse(CinderException): message = _("Volume Type %(volume_type_id)s deletion is not allowed with " "volumes present with the type.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ServerNotFound(NotFound): message = _("Instance %(uuid)s could not be found.") class VolumeIsBusy(CinderException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(CinderException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ServiceNotFound(NotFound): def __init__(self, message=None, **kwargs): if kwargs.get('host', None): self.message = _("Service %(service_id)s could not be " "found on host %(host)s.") else: self.message = _("Service %(service_id)s could not be found.") super(ServiceNotFound, self).__init__(None, **kwargs) class ServiceTooOld(Invalid): message = _("Service is too old to fulfil this request.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class InvalidNestedQuotaSetup(CinderException): message = _("Project quotas are not properly setup for nested quotas: " "%(reason)s.") class QuotaNotFound(NotFound): message = _("Quota could not be found") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(CinderException): message = _("Quota exceeded for resources: %(overs)s") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class Duplicate(CinderException): pass class VolumeTypeExists(Duplicate): message = _("Volume Type %(id)s already exists.") class VolumeTypeAccessExists(Duplicate): message = _("Volume type access for %(volume_type_id)s / " "%(project_id)s combination already exists.") class VolumeTypeEncryptionExists(Invalid): message = _("Volume type encryption for type %(type_id)s already exists.") class VolumeTypeEncryptionNotFound(NotFound): message = _("Volume type encryption for type %(type_id)s does not exist.") class MalformedRequestBody(CinderException): message = _("Malformed message body: %(reason)s") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") class ParameterNotFound(NotFound): message = _("Could not find parameter %(param)s") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") class NoValidHost(CinderException): message = _("No valid host was found. %(reason)s") class NoMoreTargets(CinderException): """No more available targets.""" pass class QuotaError(CinderException): message = _("Quota exceeded: code=%(code)s") code = 413 headers = {'Retry-After': '0'} safe = True class VolumeSizeExceedsAvailableQuota(QuotaError): message = _("Requested volume or snapshot exceeds allowed %(name)s " "quota. Requested %(requested)sG, quota is %(quota)sG and " "%(consumed)sG has been consumed.") def __init__(self, message=None, **kwargs): kwargs.setdefault('name', 'gigabytes') super(VolumeSizeExceedsAvailableQuota, self).__init__( message, **kwargs) class VolumeSizeExceedsLimit(QuotaError): message = _("Requested volume size %(size)d is larger than " "maximum allowed limit %(limit)d.") class VolumeBackupSizeExceedsAvailableQuota(QuotaError): message = _("Requested backup exceeds allowed Backup gigabytes " "quota. Requested %(requested)sG, quota is %(quota)sG and " "%(consumed)sG has been consumed.") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for " "quota '%(name)s'.") def __init__(self, message=None, **kwargs): kwargs.setdefault('name', 'volumes') super(VolumeLimitExceeded, self).__init__(message, **kwargs) class SnapshotLimitExceeded(QuotaError): message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") class BackupLimitExceeded(QuotaError): message = _("Maximum number of backups allowed (%(allowed)d) exceeded") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(CinderException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class VolumeTypeUpdateFailed(CinderException): message = _("Cannot update volume_type %(id)s") class UnknownCmd(VolumeDriverException): message = _("Unknown or unsupported command %(cmd)s") class MalformedResponse(VolumeDriverException): message = _("Malformed response to command %(cmd)s: %(reason)s") class FailedCmdWithDump(VolumeDriverException): message = _("Operation failed with status=%(status)s. Full dump: %(data)s") class InvalidConnectorException(VolumeDriverException): message = _("Connector doesn't have required information: %(missing)s") class GlanceMetadataExists(Invalid): message = _("Glance metadata cannot be updated, key %(key)s" " exists for volume id %(volume_id)s") class GlanceMetadataNotFound(NotFound): message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") class ExportFailure(Invalid): message = _("Failed to export for volume: %(reason)s") class RemoveExportException(VolumeDriverException): message = _("Failed to remove export for volume %(volume)s: %(reason)s") class MetadataCreateFailure(Invalid): message = _("Failed to create metadata for volume: %(reason)s") class MetadataUpdateFailure(Invalid): message = _("Failed to update metadata for volume: %(reason)s") class MetadataCopyFailure(Invalid): message = _("Failed to copy metadata to volume: %(reason)s") class InvalidMetadataType(Invalid): message = _("The type of metadata: %(metadata_type)s for volume/snapshot " "%(id)s is invalid.") class ImageCopyFailure(Invalid): message = _("Failed to copy image to volume: %(reason)s") class BackupInvalidCephArgs(BackupDriverException): message = _("Invalid Ceph args provided for backup rbd operation") class BackupOperationError(Invalid): message = _("An error has occurred during backup operation") class BackupMetadataUnsupportedVersion(BackupDriverException): message = _("Unsupported backup metadata version requested") class BackupVerifyUnsupportedDriver(BackupDriverException): message = _("Unsupported backup verify driver") class VolumeMetadataBackupExists(BackupDriverException): message = _("Metadata backup already exists for this volume") class BackupRBDOperationFailed(BackupDriverException): message = _("Backup RBD operation failed") class EncryptedBackupOperationFailed(BackupDriverException): message = _("Backup operation of an encrypted volume failed.") class BackupNotFound(NotFound): message = _("Backup %(backup_id)s could not be found.") class BackupFailedToGetVolumeBackend(NotFound): message = _("Failed to identify volume backend.") class InvalidBackup(Invalid): message = _("Invalid backup: %(reason)s") class SwiftConnectionFailed(BackupDriverException): message = _("Connection to swift failed: %(reason)s") class TransferNotFound(NotFound): message = _("Transfer %(transfer_id)s could not be found.") class VolumeMigrationFailed(CinderException): message = _("Volume migration failed: %(reason)s") class SSHInjectionThreat(CinderException): message = _("SSH command injection detected: %(command)s") class QoSSpecsExists(Duplicate): message = _("QoS Specs %(specs_id)s already exists.") class QoSSpecsCreateFailed(CinderException): message = _("Failed to create qos_specs: " "%(name)s with specs %(qos_specs)s.") class QoSSpecsUpdateFailed(CinderException): message = _("Failed to update qos_specs: " "%(specs_id)s with specs %(qos_specs)s.") class QoSSpecsNotFound(NotFound): message = _("No such QoS spec %(specs_id)s.") class QoSSpecsAssociateFailed(CinderException): message = _("Failed to associate qos_specs: " "%(specs_id)s with type %(type_id)s.") class QoSSpecsDisassociateFailed(CinderException): message = _("Failed to disassociate qos_specs: " "%(specs_id)s with type %(type_id)s.") class QoSSpecsKeyNotFound(NotFound): message = _("QoS spec %(specs_id)s has no spec with " "key %(specs_key)s.") class InvalidQoSSpecs(Invalid): message = _("Invalid qos specs: %(reason)s") class QoSSpecsInUse(CinderException): message = _("QoS Specs %(specs_id)s is still associated with entities.") class KeyManagerError(CinderException): message = _("key manager error: %(reason)s") class ManageExistingInvalidReference(CinderException): message = _("Manage existing volume failed due to invalid backend " "reference %(existing_ref)s: %(reason)s") class ManageExistingAlreadyManaged(CinderException): message = _("Unable to manage existing volume. " "Volume %(volume_ref)s already managed.") class InvalidReplicationTarget(Invalid): message = _("Invalid Replication Target: %(reason)s") class UnableToFailOver(CinderException): message = _("Unable to failover to replication target:" "%(reason)s).") class ReplicationError(CinderException): message = _("Volume %(volume_id)s replication " "error: %(reason)s") class ReplicationNotFound(NotFound): message = _("Volume replication for %(volume_id)s " "could not be found.") class ManageExistingVolumeTypeMismatch(CinderException): message = _("Manage existing volume failed due to volume type mismatch: " "%(reason)s") class ExtendVolumeError(CinderException): message = _("Error extending volume: %(reason)s") class EvaluatorParseException(Exception): message = _("Error during evaluator parsing: %(reason)s") class LockCreationFailed(CinderException): message = _('Unable to create lock. Coordination backend not started.') class LockingFailed(CinderException): message = _('Lock acquisition failed.') UnsupportedObjectError = obj_exc.UnsupportedObjectError OrphanedObjectError = obj_exc.OrphanedObjectError IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion ReadOnlyFieldError = obj_exc.ReadOnlyFieldError ObjectActionError = obj_exc.ObjectActionError ObjectFieldInvalid = obj_exc.ObjectFieldInvalid class VolumeGroupNotFound(CinderException): message = _('Unable to find Volume Group: %(vg_name)s') class VolumeGroupCreationFailed(CinderException): message = _('Failed to create Volume Group: %(vg_name)s') class VolumeDeviceNotFound(CinderException): message = _('Volume device not found at %(device)s.') # Driver specific exceptions # Pure Storage class PureDriverException(VolumeDriverException): message = _("Pure Storage Cinder driver failure: %(reason)s") # SolidFire class SolidFireAPIException(VolumeBackendAPIException): message = _("Bad response from SolidFire API") class SolidFireDriverException(VolumeDriverException): message = _("SolidFire Cinder Driver exception") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class SolidFireAccountNotFound(SolidFireDriverException): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class SolidFireRetryableException(VolumeBackendAPIException): message = _("Retryable SolidFire Exception encountered") # HP 3Par class Invalid3PARDomain(VolumeDriverException): message = _("Invalid 3PAR Domain: %(err)s") # RemoteFS drivers class RemoteFSException(VolumeDriverException): message = _("Unknown RemoteFS exception") class RemoteFSConcurrentRequest(RemoteFSException): message = _("A concurrent, possibly contradictory, request " "has been made.") class RemoteFSNoSharesMounted(RemoteFSException): message = _("No mounted shares found") class RemoteFSNoSuitableShareFound(RemoteFSException): message = _("There is no share which can host %(volume_size)sG") # NFS driver class NfsException(RemoteFSException): message = _("Unknown NFS exception") class NfsNoSharesMounted(RemoteFSNoSharesMounted): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG") # Smbfs driver class SmbfsException(RemoteFSException): message = _("Unknown SMBFS exception.") class SmbfsNoSharesMounted(RemoteFSNoSharesMounted): message = _("No mounted SMBFS shares found.") class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG.") # Gluster driver class GlusterfsException(RemoteFSException): message = _("Unknown Gluster exception") class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted): message = _("No mounted Gluster shares found") class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG") # Virtuozzo Storage Driver class VzStorageException(RemoteFSException): message = _("Unknown Virtuozzo Storage exception") class VzStorageNoSharesMounted(RemoteFSNoSharesMounted): message = _("No mounted Virtuozzo Storage shares found") class VzStorageNoSuitableShareFound(RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG") # Fibre Channel Zone Manager class ZoneManagerException(CinderException): message = _("Fibre Channel connection control failure: %(reason)s") class FCZoneDriverException(CinderException): message = _("Fibre Channel Zone operation failed: %(reason)s") class FCSanLookupServiceException(CinderException): message = _("Fibre Channel SAN Lookup failure: %(reason)s") class BrocadeZoningCliException(CinderException): message = _("Brocade Fibre Channel Zoning CLI error: %(reason)s") class BrocadeZoningHttpException(CinderException): message = _("Brocade Fibre Channel Zoning HTTP error: %(reason)s") class CiscoZoningCliException(CinderException): message = _("Cisco Fibre Channel Zoning CLI error: %(reason)s") class NetAppDriverException(VolumeDriverException): message = _("NetApp Cinder Driver exception.") class EMCVnxCLICmdError(VolumeBackendAPIException): message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s " "(Return Code: %(rc)s) (Output: %(out)s).") class EMCSPUnavailableException(EMCVnxCLICmdError): message = _("EMC VNX Cinder Driver SPUnavailableException: %(cmd)s " "(Return Code: %(rc)s) (Output: %(out)s).") # ConsistencyGroup class ConsistencyGroupNotFound(NotFound): message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.") class InvalidConsistencyGroup(Invalid): message = _("Invalid ConsistencyGroup: %(reason)s") # CgSnapshot class CgSnapshotNotFound(NotFound): message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") class InvalidCgSnapshot(Invalid): message = _("Invalid CgSnapshot: %(reason)s") # Hitachi Block Storage Driver class HBSDError(CinderException): message = _("HBSD error occurs.") class HBSDCmdError(HBSDError): def __init__(self, message=None, ret=None, err=None): self.ret = ret self.stderr = err super(HBSDCmdError, self).__init__(message=message) class HBSDBusy(HBSDError): message = "Device or resource is busy." class HBSDNotFound(NotFound): message = _("Storage resource could not be found.") class HBSDVolumeIsBusy(VolumeIsBusy): message = _("Volume %(volume_name)s is busy.") # Datera driver class DateraAPIException(VolumeBackendAPIException): message = _("Bad response from Datera API") # Target drivers class ISCSITargetCreateFailed(CinderException): message = _("Failed to create iscsi target for volume %(volume_id)s.") class ISCSITargetRemoveFailed(CinderException): message = _("Failed to remove iscsi target for volume %(volume_id)s.") class ISCSITargetAttachFailed(CinderException): message = _("Failed to attach iSCSI target for volume %(volume_id)s.") class ISCSITargetDetachFailed(CinderException): message = _("Failed to detach iSCSI target for volume %(volume_id)s.") class ISCSITargetHelperCommandFailed(CinderException): message = _("%(error_message)s") # X-IO driver exception. class XIODriverException(VolumeDriverException): message = _("X-IO Volume Driver exception!") # Violin Memory drivers class ViolinInvalidBackendConfig(CinderException): message = _("Volume backend config is invalid: %(reason)s") class ViolinRequestRetryTimeout(CinderException): message = _("Backend service retry timeout hit: %(timeout)s sec") class ViolinBackendErr(CinderException): message = _("Backend reports: %(message)s") class ViolinBackendErrExists(CinderException): message = _("Backend reports: item already exists") class ViolinBackendErrNotFound(CinderException): message = _("Backend reports: item not found") # ZFSSA NFS driver exception. class WebDAVClientError(CinderException): message = _("The WebDAV request failed. Reason: %(msg)s, " "Return code/reason: %(code)s, Source Volume: %(src)s, " "Destination Volume: %(dst)s, Method: %(method)s.") # XtremIO Drivers class XtremIOAlreadyMappedError(CinderException): message = _("Volume to Initiator Group mapping already exists") class XtremIOArrayBusy(CinderException): message = _("System is busy, retry operation.") class XtremIOSnapshotsLimitExceeded(CinderException): message = _("Exceeded the limit of snapshots per volume") # Infortrend EonStor DS Driver class InfortrendCliException(CinderException): message = _("Infortrend CLI exception: %(err)s Param: %(param)s " "(Return Code: %(rc)s) (Output: %(out)s)") # DOTHILL drivers class DotHillInvalidBackend(CinderException): message = _("Backend doesn't exist (%(backend)s)") class DotHillConnectionError(CinderException): message = _("%(message)s") class DotHillAuthenticationError(CinderException): message = _("%(message)s") class DotHillNotEnoughSpace(CinderException): message = _("Not enough space on backend (%(backend)s)") class DotHillRequestError(CinderException): message = _("%(message)s") class DotHillNotTargetPortal(CinderException): message = _("No active iSCSI portals with supplied iSCSI IPs") # Sheepdog class SheepdogError(VolumeBackendAPIException): message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)") class SheepdogCmdError(SheepdogError): message = _("(Command: %(cmd)s) " "(Return Code: %(exit_code)s) " "(Stdout: %(stdout)s) " "(Stderr: %(stderr)s)") class MetadataAbsent(CinderException): message = _("There is no metadata in DB object.") class NotSupportedOperation(Invalid): message = _("Operation not supported: %(operation)s.") code = 405 # Hitachi HNAS drivers class HNASConnError(CinderException): message = _("%(message)s") # Coho drivers class CohoException(VolumeDriverException): message = _("Coho Data Cinder driver failure: %(message)s") # Tegile Storage drivers class TegileAPIException(VolumeBackendAPIException): message = _("Unexpected response from Tegile IntelliFlash API") # NexentaStor driver exception class NexentaException(VolumeDriverException): message = _("%(message)s") # Google Cloud Storage(GCS) backup driver class GCSConnectionFailure(BackupDriverException): message = _("Google Cloud Storage connection failure: %(reason)s") class GCSApiFailure(BackupDriverException): message = _("Google Cloud Storage api failure: %(reason)s") class GCSOAuth2Failure(BackupDriverException): message = _("Google Cloud Storage oauth2 failure: %(reason)s") cinder-8.0.0/cinder/scheduler/0000775000567000056710000000000012701406543017350 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/host_manager.py0000664000567000056710000005155712701406250022401 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import collections from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder import context as cinder_context from cinder import exception from cinder import objects from cinder import utils from cinder.i18n import _LI, _LW from cinder.scheduler import filters from cinder.scheduler import weights from cinder.volume import utils as vol_utils host_manager_opts = [ cfg.ListOpt('scheduler_default_filters', default=[ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter' ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_default_weighers', default=[ 'CapacityWeigher' ], help='Which weigher class names to use for weighing hosts.') ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver') LOG = logging.getLogger(__name__) class ReadOnlyDict(collections.Mapping): """A read-only dict.""" def __init__(self, source=None): if source is not None: self.data = dict(source) else: self.data = {} def __getitem__(self, key): return self.data[key] def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.data) class HostState(object): """Mutable and immutable information tracked for a volume backend.""" def __init__(self, host, capabilities=None, service=None): self.capabilities = None self.service = None self.host = host self.update_capabilities(capabilities, service) self.volume_backend_name = None self.vendor_name = None self.driver_version = 0 self.storage_protocol = None self.QoS_support = False # Mutable available resources. # These will change as resources are virtually "consumed". self.total_capacity_gb = 0 # capacity has been allocated in cinder POV, which should be # sum(vol['size'] for vol in vols_on_hosts) self.allocated_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 # The apparent allocated space indicating how much capacity # has been provisioned. This could be the sum of sizes of # all volumes on a backend, which could be greater than or # equal to the allocated_capacity_gb. self.provisioned_capacity_gb = 0 self.max_over_subscription_ratio = 1.0 self.thin_provisioning_support = False self.thick_provisioning_support = False # Does this backend support attaching a volume to more than # once host/instance? self.multiattach = False # PoolState for all pools self.pools = {} self.updated = None def update_capabilities(self, capabilities=None, service=None): # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def update_from_volume_capability(self, capability, service=None): """Update information about a host from its volume_node info. 'capability' is the status info reported by volume backend, a typical capability looks like this: capability = { 'volume_backend_name': 'Local iSCSI', #\ 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'iSCSI', #- stats&capabilities 'active_volumes': 10, #\ 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', #/ 'pools': [ {'pool_name': '1st pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'allocated_capacity_gb': 270, # | 'QoS_support': 'False', # | 'reserved_percentage': 0, #/ 'dying_disks': 100, #\ 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & capabilities 'super_hero_3': 'neoncat' #/ }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': ' ', 'super_hero_2': 'Hulk', } ] } """ self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return # Update backend level info self.update_backend(capability) # Update pool level info self.update_pools(capability, service) def update_pools(self, capability, service): """Update storage pools information from backend reported info.""" if not capability: return pools = capability.get('pools', None) active_pools = set() if pools and isinstance(pools, list): # Update all pools stats according to information from list # of pools in volume capacity for pool_cap in pools: pool_name = pool_cap['pool_name'] self._append_backend_info(pool_cap) cur_pool = self.pools.get(pool_name, None) if not cur_pool: # Add new pool cur_pool = PoolState(self.host, pool_cap, pool_name) self.pools[pool_name] = cur_pool cur_pool.update_from_volume_capability(pool_cap, service) active_pools.add(pool_name) elif pools is None: # To handle legacy driver that doesn't report pool # information in the capability, we have to prepare # a pool from backend level info, or to update the one # we created in self.pools. pool_name = self.volume_backend_name if pool_name is None: # To get DEFAULT_POOL_NAME pool_name = vol_utils.extract_host(self.host, 'pool', True) if len(self.pools) == 0: # No pool was there single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool else: # this is an update from legacy driver try: single_pool = self.pools[pool_name] except KeyError: single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool single_pool.update_from_volume_capability(capability, service) active_pools.add(pool_name) # remove non-active pools from self.pools nonactive_pools = set(self.pools.keys()) - active_pools for pool in nonactive_pools: LOG.debug("Removing non-active pool %(pool)s @ %(host)s " "from scheduler cache.", {'pool': pool, 'host': self.host}) del self.pools[pool] def _append_backend_info(self, pool_cap): # Fill backend level info to pool if needed. if not pool_cap.get('volume_backend_name', None): pool_cap['volume_backend_name'] = self.volume_backend_name if not pool_cap.get('storage_protocol', None): pool_cap['storage_protocol'] = self.storage_protocol if not pool_cap.get('vendor_name', None): pool_cap['vendor_name'] = self.vendor_name if not pool_cap.get('driver_version', None): pool_cap['driver_version'] = self.driver_version if not pool_cap.get('timestamp', None): pool_cap['timestamp'] = self.updated def update_backend(self, capability): self.volume_backend_name = capability.get('volume_backend_name', None) self.vendor_name = capability.get('vendor_name', None) self.driver_version = capability.get('driver_version', None) self.storage_protocol = capability.get('storage_protocol', None) self.updated = capability['timestamp'] def consume_from_volume(self, volume): """Incrementally update host state from a volume.""" volume_gb = volume['size'] self.allocated_capacity_gb += volume_gb self.provisioned_capacity_gb += volume_gb if self.free_capacity_gb == 'infinite': # There's virtually infinite space on back-end pass elif self.free_capacity_gb == 'unknown': # Unable to determine the actual free space on back-end pass else: self.free_capacity_gb -= volume_gb self.updated = timeutils.utcnow() def __repr__(self): # FIXME(zhiteng) backend level free_capacity_gb isn't as # meaningful as it used to be before pool is introduced, we'd # come up with better representation of HostState. return ("host '%s': free_capacity_gb: %s, pools: %s" % (self.host, self.free_capacity_gb, self.pools)) class PoolState(HostState): def __init__(self, host, capabilities, pool_name): new_host = vol_utils.append_host(host, pool_name) super(PoolState, self).__init__(new_host, capabilities) self.pool_name = pool_name # No pools in pool self.pools = None def update_from_volume_capability(self, capability, service=None): """Update information about a pool from its volume_node info.""" self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return self.update_backend(capability) self.total_capacity_gb = capability.get('total_capacity_gb', 0) self.free_capacity_gb = capability.get('free_capacity_gb', 0) self.allocated_capacity_gb = capability.get( 'allocated_capacity_gb', 0) self.QoS_support = capability.get('QoS_support', False) self.reserved_percentage = capability.get('reserved_percentage', 0) # provisioned_capacity_gb is the apparent total capacity of # all the volumes created on a backend, which is greater than # or equal to allocated_capacity_gb, which is the apparent # total capacity of all the volumes created on a backend # in Cinder. Using allocated_capacity_gb as the default of # provisioned_capacity_gb if it is not set. self.provisioned_capacity_gb = capability.get( 'provisioned_capacity_gb', self.allocated_capacity_gb) self.max_over_subscription_ratio = capability.get( 'max_over_subscription_ratio', CONF.max_over_subscription_ratio) self.thin_provisioning_support = capability.get( 'thin_provisioning_support', False) self.thick_provisioning_support = capability.get( 'thick_provisioning_support', False) self.multiattach = capability.get('multiattach', False) def update_pools(self, capability): # Do nothing, since we don't have pools within pool, yet pass class HostManager(object): """Base HostManager class.""" host_state_cls = HostState def __init__(self): self.service_states = {} # { : {: {cap k : v}}} self.host_state_map = {} self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' 'filters') self.filter_classes = self.filter_handler.get_all_classes() self.weight_handler = weights.HostWeightHandler('cinder.scheduler.' 'weights') self.weight_classes = self.weight_handler.get_all_classes() self._no_capabilities_hosts = set() # Hosts having no capabilities self._update_host_state_map(cinder_context.get_admin_context()) def _choose_host_filters(self, filter_cls_names): """Return a list of available filter names. This function checks input filter names against a predefined set of acceptable filterss (all loaded filters). If input is None, it uses CONF.scheduler_default_filters instead. """ if filter_cls_names is None: filter_cls_names = CONF.scheduler_default_filters if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: found_class = False for cls in self.filter_classes: if cls.__name__ == filter_name: found_class = True good_filters.append(cls) break if not found_class: bad_filters.append(filter_name) if bad_filters: raise exception.SchedulerHostFilterNotFound( filter_name=", ".join(bad_filters)) return good_filters def _choose_host_weighers(self, weight_cls_names): """Return a list of available weigher names. This function checks input weigher names against a predefined set of acceptable weighers (all loaded weighers). If input is None, it uses CONF.scheduler_default_weighers instead. """ if weight_cls_names is None: weight_cls_names = CONF.scheduler_default_weighers if not isinstance(weight_cls_names, (list, tuple)): weight_cls_names = [weight_cls_names] good_weighers = [] bad_weighers = [] for weigher_name in weight_cls_names: found_class = False for cls in self.weight_classes: if cls.__name__ == weigher_name: good_weighers.append(cls) found_class = True break if not found_class: bad_weighers.append(weigher_name) if bad_weighers: raise exception.SchedulerHostWeigherNotFound( weigher_name=", ".join(bad_weighers)) return good_weighers def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None): """Filter hosts and return only ones passing all filters.""" filter_classes = self._choose_host_filters(filter_class_names) return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties) def get_weighed_hosts(self, hosts, weight_properties, weigher_class_names=None): """Weigh the hosts.""" weigher_classes = self._choose_host_weighers(weigher_class_names) return self.weight_handler.get_weighed_objects(weigher_classes, hosts, weight_properties) def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" if service_name != 'volume': LOG.debug('Ignoring %(service_name)s service update ' 'from %(host)s', {'service_name': service_name, 'host': host}) return # Copy the capabilities, so we don't modify the original dict capab_copy = dict(capabilities) capab_copy["timestamp"] = timeutils.utcnow() # Reported time self.service_states[host] = capab_copy LOG.debug("Received %(service_name)s service update from " "%(host)s: %(cap)s", {'service_name': service_name, 'host': host, 'cap': capabilities}) self._no_capabilities_hosts.discard(host) def has_all_capabilities(self): return len(self._no_capabilities_hosts) == 0 def _update_host_state_map(self, context): # Get resource usage across the available volume nodes: topic = CONF.volume_topic volume_services = objects.ServiceList.get_all_by_topic(context, topic, disabled=False) active_hosts = set() no_capabilities_hosts = set() for service in volume_services.objects: host = service.host if not utils.service_is_up(service): LOG.warning(_LW("volume service is down. (host: %s)"), host) continue capabilities = self.service_states.get(host, None) if capabilities is None: no_capabilities_hosts.add(host) continue host_state = self.host_state_map.get(host) if not host_state: host_state = self.host_state_cls(host, capabilities=capabilities, service= dict(service)) self.host_state_map[host] = host_state # update capabilities and attributes in host_state host_state.update_from_volume_capability(capabilities, service= dict(service)) active_hosts.add(host) self._no_capabilities_hosts = no_capabilities_hosts # remove non-active hosts from host_state_map nonactive_hosts = set(self.host_state_map.keys()) - active_hosts for host in nonactive_hosts: LOG.info(_LI("Removing non-active host: %(host)s from " "scheduler cache."), {'host': host}) del self.host_state_map[host] def get_all_host_states(self, context): """Returns a dict of all the hosts the HostManager knows about. Each of the consumable resources in HostState are populated with capabilities scheduler received from RPC. For example: {'192.168.1.100': HostState(), ...} """ self._update_host_state_map(context) # build a pool_state map and return that map instead of host_state_map all_pools = {} for host, state in self.host_state_map.items(): for key in state.pools: pool = state.pools[key] # use host.pool_name to make sure key is unique pool_key = '.'.join([host, pool.pool_name]) all_pools[pool_key] = pool return all_pools.values() def get_pools(self, context): """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_host_state_map(context) all_pools = [] for host, state in self.host_state_map.items(): for key in state.pools: pool = state.pools[key] # use host.pool_name to make sure key is unique pool_key = vol_utils.append_host(host, pool.pool_name) new_pool = dict(name=pool_key) new_pool.update(dict(capabilities=pool.capabilities)) all_pools.append(new_pool) return all_pools cinder-8.0.0/cinder/scheduler/scheduler_options.py0000664000567000056710000000660012701406250023450 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import json import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.i18n import _LE scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) class SchedulerOptions(object): """SchedulerOptions monitors a local .json file for changes. The file is reloaded if needed and converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error: LOG.exception(_LE("Could not stat scheduler options file " "%(filename)s."), {'filename': filename}) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError: LOG.exception(_LE("Could not decode scheduler options.")) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data cinder-8.0.0/cinder/scheduler/__init__.py0000664000567000056710000000151112701406250021452 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.scheduler` -- Scheduler Nodes ===================================================== .. automodule:: cinder.scheduler :platform: Unix :synopsis: Module that picks a volume node to create a volume. """ cinder-8.0.0/cinder/scheduler/filter_scheduler.py0000664000567000056710000004604712701406250023253 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The FilterScheduler is for creating volumes. You can customize this scheduler by specifying your own volume Filters and Weighing Functions. """ from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.scheduler import driver from cinder.scheduler import scheduler_options from cinder.volume import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.cost_function_cache = None self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() def schedule(self, context, topic, method, *args, **kwargs): """Schedule contract that returns best-suited host for this request.""" self._schedule(context, topic, *args, **kwargs) def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def populate_filter_properties(self, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ vol = request_spec['volume_properties'] filter_properties['size'] = vol['size'] filter_properties['availability_zone'] = vol.get('availability_zone') filter_properties['user_id'] = vol.get('user_id') filter_properties['metadata'] = vol.get('metadata') filter_properties['qos_specs'] = vol.get('qos_specs') def schedule_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): weighed_host = self._schedule_group( context, request_spec_list, filter_properties_list) if not weighed_host: raise exception.NoValidHost(reason=_("No weighed hosts available")) host = weighed_host.obj.host updated_group = driver.group_update_db(context, group, host) self.volume_rpcapi.create_consistencygroup(context, updated_group, host) def schedule_create_volume(self, context, request_spec, filter_properties): weighed_host = self._schedule(context, request_spec, filter_properties) if not weighed_host: raise exception.NoValidHost(reason=_("No weighed hosts available")) host = weighed_host.obj.host volume_id = request_spec['volume_id'] updated_volume = driver.volume_update_db(context, volume_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.volume_rpcapi.create_volume(context, updated_volume, host, request_spec, filter_properties, allow_reschedule=True) def host_passes_filters(self, context, host, request_spec, filter_properties): """Check if the specified host passes the filters.""" weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == host: return host_state volume_id = request_spec.get('volume_id', '??volume_id missing??') raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on ' '%(host)s') % {'id': volume_id, 'host': host}) def find_retype_host(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a host that can accept the volume with its new type.""" filter_properties = filter_properties or {} current_host = request_spec['volume_properties']['host'] # The volume already exists on this host, and so we shouldn't check if # it can accept the volume again in the CapacityFilter. filter_properties['vol_exists_on'] = current_host weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) if not weighed_hosts: raise exception.NoValidHost(reason=_('No valid hosts for volume ' '%(id)s with type %(type)s') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) for weighed_host in weighed_hosts: host_state = weighed_host.obj if host_state.host == current_host: return host_state if utils.extract_host(current_host, 'pool') is None: # legacy volumes created before pool is introduced has no pool # info in host. But host_state.host always include pool level # info. In this case if above exact match didn't work out, we # find host_state that are of the same host of volume being # retyped. In other words, for legacy volumes, retyping could # cause migration between pools on same host, which we consider # it is different from migration between hosts thus allow that # to happen even migration policy is 'never'. for weighed_host in weighed_hosts: host_state = weighed_host.obj backend = utils.extract_host(host_state.host, 'backend') if backend == current_host: return host_state if migration_policy == 'never': raise exception.NoValidHost(reason=_('Current host not valid for ' 'volume %(id)s with type ' '%(type)s, migration not ' 'allowed') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) top_host = self._choose_top_host(weighed_hosts, request_spec) return top_host.obj def get_pools(self, context, filters): # TODO(zhiteng) Add filters support return self.host_manager.get_pools(context) def _post_select_populate_filter_properties(self, filter_properties, host_state): """Populate filter properties with additional information. Add additional information to the filter properties after a host has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_host(filter_properties, host_state.host) def _add_retry_host(self, filter_properties, host): """Add a retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry', None) if not retry: return hosts = retry['hosts'] hosts.append(host) def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: raise exception.InvalidParameterValue( err=_("Invalid value for 'scheduler_max_attempts', " "must be >=1")) return max_attempts def _log_volume_error(self, volume_id, retry): """Log requests with exceptions from previous volume operations.""" exc = retry.pop('exc', None) # string-ified exception from volume if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts', None) if not hosts: return # no previously attempted hosts, skip last_host = hosts[-1] LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: " "%(last_host)s : %(exc)s"), {'volume_id': volume_id, 'last_host': last_host, 'exc': exc}) def _populate_retry(self, filter_properties, properties): """Populate filter properties with history of retries for request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of volume service hosts tried } filter_properties['retry'] = retry volume_id = properties.get('volume_id') self._log_volume_error(volume_id, retry) if retry['num_attempts'] > max_attempts: raise exception.NoValidHost( reason=_("Exceeded max scheduling attempts %(max_attempts)d " "for volume %(volume_id)s") % {'max_attempts': max_attempts, 'volume_id': volume_id}) def _get_weighted_candidates(self, context, request_spec, filter_properties=None): """Return a list of hosts that meet required specs. Returned list is ordered by their fitness. """ elevated = context.elevated() volume_properties = request_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. resource_properties = volume_properties.copy() volume_type = request_spec.get("volume_type", None) resource_type = request_spec.get("volume_type", None) request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, resource_properties) if resource_type is None: msg = _("volume_type cannot be None") raise exception.InvalidVolumeType(reason=msg) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) self.populate_filter_properties(request_spec, filter_properties) # If multiattach is enabled on a volume, we need to add # multiattach to extra specs, so that the capability # filtering is enabled. multiattach = volume_properties.get('multiattach', False) if multiattach and 'multiattach' not in resource_type.get( 'extra_specs', {}): if 'extra_specs' not in resource_type: resource_type['extra_specs'] = {} resource_type['extra_specs'].update( multiattach=' True') # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. hosts = self.host_manager.get_all_host_states(elevated) # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties) if not hosts: return [] LOG.debug("Filtered %s", hosts) # weighted_host = WeightedHost() ... the best # host for the job. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) return weighed_hosts def _get_weighted_candidates_group(self, context, request_spec_list, filter_properties_list=None): """Finds hosts that supports the consistencygroup. Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() weighed_hosts = [] index = 0 for request_spec in request_spec_list: volume_properties = request_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, # copying 'volume_XX' to 'resource_XX' will make both filters # happy. resource_properties = volume_properties.copy() volume_type = request_spec.get("volume_type", None) resource_type = request_spec.get("volume_type", None) request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() filter_properties = {} if filter_properties_list: filter_properties = filter_properties_list[index] if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, resource_properties) # Add consistencygroup_support in extra_specs if it is not there. # Make sure it is populated in filter_properties if 'consistencygroup_support' not in resource_type.get( 'extra_specs', {}): resource_type['extra_specs'].update( consistencygroup_support=' True') filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) self.populate_filter_properties(request_spec, filter_properties) # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. all_hosts = self.host_manager.get_all_host_states(elevated) if not all_hosts: return [] # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(all_hosts, filter_properties) if not hosts: return [] LOG.debug("Filtered %s", hosts) # weighted_host = WeightedHost() ... the best # host for the job. temp_weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not temp_weighed_hosts: return [] if index == 0: weighed_hosts = temp_weighed_hosts else: new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_hosts: # Should schedule creation of CG on backend level, # not pool level. if (utils.extract_host(host1.obj.host) == utils.extract_host(host2.obj.host)): new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts if not weighed_hosts: return [] index += 1 return weighed_hosts def _schedule(self, context, request_spec, filter_properties=None): weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) # When we get the weighed_hosts, we clear those hosts whose backend # is not same as consistencygroup's backend. CG_backend = request_spec.get('CG_backend') if weighed_hosts and CG_backend: # Get host name including host@backend#pool info from # weighed_hosts. for host in weighed_hosts[::-1]: backend = utils.extract_host(host.obj.host) if backend != CG_backend: weighed_hosts.remove(host) if not weighed_hosts: LOG.warning(_LW('No weighed hosts found for volume ' 'with properties: %s'), filter_properties['request_spec']['volume_type']) return None return self._choose_top_host(weighed_hosts, request_spec) def _schedule_group(self, context, request_spec_list, filter_properties_list=None): weighed_hosts = self._get_weighted_candidates_group( context, request_spec_list, filter_properties_list) if not weighed_hosts: return None return self._choose_top_host_group(weighed_hosts, request_spec_list) def _choose_top_host(self, weighed_hosts, request_spec): top_host = weighed_hosts[0] host_state = top_host.obj LOG.debug("Choosing %s", host_state.host) volume_properties = request_spec['volume_properties'] host_state.consume_from_volume(volume_properties) return top_host def _choose_top_host_group(self, weighed_hosts, request_spec_list): top_host = weighed_hosts[0] host_state = top_host.obj LOG.debug("Choosing %s", host_state.host) return top_host cinder-8.0.0/cinder/scheduler/manager.py0000664000567000056710000003743712701406250021345 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils from oslo_utils import importutils import six from cinder import context from cinder import db from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE from cinder import manager from cinder import objects from cinder import quota from cinder import rpc from cinder.scheduler.flows import create_volume from cinder.volume import rpcapi as volume_rpcapi scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='cinder.scheduler.filter_scheduler.' 'FilterScheduler', help='Default scheduler driver to use') CONF = cfg.CONF CONF.register_opt(scheduler_driver_opt) QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) class SchedulerManager(manager.Manager): """Chooses a host to create volumes.""" RPC_API_VERSION = '2.0' target = messaging.Target(version=RPC_API_VERSION) def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) self.additional_endpoints.append(_SchedulerV1Proxy(self)) self._startup_delay = True def init_host_with_rpc(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) eventlet.sleep(CONF.periodic_interval) self._startup_delay = False def reset(self): super(SchedulerManager, self).reset() self.driver.reset() def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} self.driver.update_service_capabilities(service_name, host, capabilities) def _wait_for_scheduler(self): # NOTE(dulek): We're waiting for scheduler to announce that it's ready # or CONF.periodic_interval seconds from service startup has passed. while self._startup_delay and not self.driver.is_ready(): eventlet.sleep(1) def create_consistencygroup(self, context, topic, group, request_spec_list=None, filter_properties_list=None): self._wait_for_scheduler() try: self.driver.schedule_create_consistencygroup( context, group, request_spec_list, filter_properties_list) except exception.NoValidHost: LOG.error(_LE("Could not find a host for consistency group " "%(group_id)s."), {'group_id': group.id}) group.status = 'error' group.save() except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to create consistency group " "%(group_id)s."), {'group_id': group.id}) group.status = 'error' group.save() def create_volume(self, context, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): self._wait_for_scheduler() # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the # volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) try: flow_engine = create_volume.get_flow(context, db, self.driver, request_spec, filter_properties, volume, snapshot_id, image_id) except Exception: msg = _("Failed to create scheduler manager volume flow") LOG.exception(msg) raise exception.CinderException(msg) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() def request_service_capabilities(self, context): volume_rpcapi.VolumeAPI().publish_service_capabilities(context) def migrate_volume_to_host(self, context, topic, volume_id, host, force_host_copy, request_spec, filter_properties=None, volume=None): """Ensure that the host exists and can accept the volume.""" self._wait_for_scheduler() # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the # volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) def _migrate_volume_set_error(self, context, ex, request_spec): if volume.status == 'maintenance': previous_status = ( volume.previous_status or 'maintenance') volume_state = {'volume_state': {'migration_status': 'error', 'status': previous_status}} else: volume_state = {'volume_state': {'migration_status': 'error'}} self._set_volume_state_and_notify('migrate_volume_to_host', volume_state, context, ex, request_spec) try: tgt_host = self.driver.host_passes_filters(context, host, request_spec, filter_properties) except exception.NoValidHost as ex: _migrate_volume_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_volume_set_error(self, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().migrate_volume(context, volume, tgt_host, force_host_copy) def retype(self, context, topic, volume_id, request_spec, filter_properties=None, volume=None): """Schedule the modification of a volume's type. :param context: the request context :param topic: the topic listened on :param volume_id: the ID of the volume to retype :param request_spec: parameters for this retype request :param filter_properties: parameters to filter by :param volume: the volume object to retype """ self._wait_for_scheduler() # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the # volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) def _retype_volume_set_error(self, context, ex, request_spec, volume_ref, msg, reservations): if reservations: QUOTAS.rollback(context, reservations) previous_status = ( volume_ref.previous_status or volume_ref.status) volume_state = {'volume_state': {'status': previous_status}} self._set_volume_state_and_notify('retype', volume_state, context, ex, request_spec, msg) reservations = request_spec.get('quota_reservations') old_reservations = request_spec.get('old_reservations', None) new_type = request_spec.get('volume_type') if new_type is None: msg = _('New volume type not specified in request_spec.') ex = exception.ParameterNotFound(param='volume_type') _retype_volume_set_error(self, context, ex, request_spec, volume, msg, reservations) # Default migration policy is 'never' migration_policy = request_spec.get('migration_policy') if not migration_policy: migration_policy = 'never' try: tgt_host = self.driver.find_retype_host(context, request_spec, filter_properties, migration_policy) except exception.NoValidHost as ex: msg = (_("Could not find a host for volume %(volume_id)s with " "type %(type_id)s.") % {'type_id': new_type['id'], 'volume_id': volume.id}) _retype_volume_set_error(self, context, ex, request_spec, volume, msg, reservations) except Exception as ex: with excutils.save_and_reraise_exception(): _retype_volume_set_error(self, context, ex, request_spec, volume, None, reservations) else: volume_rpcapi.VolumeAPI().retype(context, volume, new_type['id'], tgt_host, migration_policy, reservations, old_reservations) def manage_existing(self, context, topic, volume_id, request_spec, filter_properties=None): """Ensure that the host exists and can accept the volume.""" self._wait_for_scheduler() def _manage_existing_set_error(self, context, ex, request_spec): volume_state = {'volume_state': {'status': 'error'}} self._set_volume_state_and_notify('manage_existing', volume_state, context, ex, request_spec) volume_ref = db.volume_get(context, volume_id) try: self.driver.host_passes_filters(context, volume_ref['host'], request_spec, filter_properties) except exception.NoValidHost as ex: _manage_existing_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _manage_existing_set_error(self, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref, request_spec.get('ref')) def get_pools(self, context, filters=None): """Get active pools from scheduler's cache. NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is an RPC call (is blocking for the c-api). Also this is admin-only API extension so it won't hurt the user much to retry the request manually. """ return self.driver.get_pools(context, filters) def _set_volume_state_and_notify(self, method, updates, context, ex, request_spec, msg=None): # TODO(harlowja): move into a task that just does this later. if not msg: msg = (_LE("Failed to schedule_%(method)s: %(ex)s") % {'method': method, 'ex': six.text_type(ex)}) LOG.error(msg) volume_state = updates['volume_state'] properties = request_spec.get('volume_properties', {}) volume_id = request_spec.get('volume_id', None) if volume_id: db.volume_update(context, volume_id, volume_state) payload = dict(request_spec=request_spec, volume_properties=properties, volume_id=volume_id, state=volume_state, method=method, reason=ex) rpc.get_notifier("scheduler").error(context, 'scheduler.' + method, payload) # TODO(dulek): This goes away immediately in Newton and is just present in # Mitaka so that we can receive v1.x and v2.0 messages. class _SchedulerV1Proxy(object): target = messaging.Target(version='1.11') def __init__(self, manager): self.manager = manager def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, **kwargs): return self.manager.update_service_capabilities( context, service_name=service_name, host=host, capabilities=capabilities, **kwargs) def create_consistencygroup(self, context, topic, group, request_spec_list=None, filter_properties_list=None): return self.manager.create_consistencygroup( context, topic, group, request_spec_list=request_spec_list, filter_properties_list=None) def create_volume(self, context, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): return self.manager.create_volume( context, topic, volume_id, snapshot_id=snapshot_id, image_id=image_id, request_spec=request_spec, filter_properties=filter_properties, volume=volume) def request_service_capabilities(self, context): return self.manager.request_service_capabilities(context) def migrate_volume_to_host(self, context, topic, volume_id, host, force_host_copy, request_spec, filter_properties=None, volume=None): return self.manager.migrate_volume_to_host( context, topic, volume_id, host, force_host_copy, request_spec, filter_properties=filter_properties, volume=volume) def retype(self, context, topic, volume_id, request_spec, filter_properties=None, volume=None): return self.manager.retype(context, topic, volume_id, request_spec, filter_properties=filter_properties, volume=volume) def manage_existing(self, context, topic, volume_id, request_spec, filter_properties=None): return self.manager.manage_existing( context, topic, volume_id, request_spec, filter_properties=filter_properties) def get_pools(self, context, filters=None): return self.manager.get_pools(context, filters=filters) cinder-8.0.0/cinder/scheduler/base_handler.py0000664000567000056710000000334212701406250022326 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A common base for handling extension classes. Used by BaseFilterHandler and BaseWeightHandler """ import inspect from stevedore import extension class BaseHandler(object): """Base class to handle loading filter and weight classes.""" def __init__(self, modifier_class_type, modifier_namespace): self.namespace = modifier_namespace self.modifier_class_type = modifier_class_type self.extension_manager = extension.ExtensionManager(modifier_namespace) def _is_correct_class(self, cls): """Return whether an object is a class of the correct type. (or is not prefixed with an underscore) """ return (inspect.isclass(cls) and not cls.__name__.startswith('_') and issubclass(cls, self.modifier_class_type)) def get_all_classes(self): # We use a set, as some classes may have an entrypoint of their own, # and also be returned by a function such as 'all_filters' for example return [ext.plugin for ext in self.extension_manager if self._is_correct_class(ext.plugin)] cinder-8.0.0/cinder/scheduler/flows/0000775000567000056710000000000012701406543020502 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/flows/__init__.py0000664000567000056710000000000012701406250022574 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/flows/create_volume.py0000664000567000056710000001534212701406250023706 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils import taskflow.engines from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils from cinder.i18n import _LE from cinder import rpc from cinder import utils from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:create' class ExtractSchedulerSpecTask(flow_utils.CinderTask): """Extracts a spec object from a partial and/or incomplete request spec. Reversion strategy: N/A """ default_provides = set(['request_spec']) def __init__(self, db_api, **kwargs): super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION], **kwargs) self.db_api = db_api def _populate_request_spec(self, context, volume, snapshot_id, image_id): # Create the full request spec using the volume object. # # NOTE(dulek): At this point, a volume can be deleted before it gets # scheduled. If a delete API call is made, the volume gets instantly # delete and scheduling will fail when it tries to update the DB entry # (with the host) in ScheduleCreateVolumeTask below. volume_type_id = volume.volume_type_id vol_type = volume.volume_type return { 'volume_id': volume.id, 'snapshot_id': snapshot_id, 'image_id': image_id, 'volume_properties': { 'size': utils.as_int(volume.size, quiet=False), 'availability_zone': volume.availability_zone, 'volume_type_id': volume_type_id, }, 'volume_type': list(dict(vol_type).items()), } def execute(self, context, request_spec, volume, snapshot_id, image_id): # For RPC version < 1.2 backward compatibility if request_spec is None: request_spec = self._populate_request_spec(context, volume.id, snapshot_id, image_id) return { 'request_spec': request_spec, } class ScheduleCreateVolumeTask(flow_utils.CinderTask): """Activates a scheduler driver and handles any subsequent failures. Notification strategy: on failure the scheduler rpc notifier will be activated and a notification will be emitted indicating what errored, the reason, and the request (and misc. other data) that caused the error to be triggered. Reversion strategy: N/A """ FAILURE_TOPIC = "scheduler.create_volume" def __init__(self, db_api, driver_api, **kwargs): super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION], **kwargs) self.db_api = db_api self.driver_api = driver_api def _handle_failure(self, context, request_spec, cause): try: self._notify_failure(context, request_spec, cause) finally: LOG.error(_LE("Failed to run task %(name)s: %(cause)s"), {'cause': cause, 'name': self.name}) def _notify_failure(self, context, request_spec, cause): """When scheduling fails send out an event that it failed.""" payload = { 'request_spec': request_spec, 'volume_properties': request_spec.get('volume_properties', {}), 'volume_id': request_spec['volume_id'], 'state': 'error', 'method': 'create_volume', 'reason': cause, } try: rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC, payload) except exception.CinderException: LOG.exception(_LE("Failed notifying on %(topic)s " "payload %(payload)s"), {'topic': self.FAILURE_TOPIC, 'payload': payload}) def execute(self, context, request_spec, filter_properties): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except Exception as e: # An error happened, notify on the scheduler queue and log that # this happened and set the volume to errored out and reraise the # error *if* exception caught isn't NoValidHost. Otherwise *do not* # reraise (since what's the point?) with excutils.save_and_reraise_exception( reraise=not isinstance(e, exception.NoValidHost)): try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec['volume_id'], reason=e) def get_flow(context, db_api, driver_api, request_spec=None, filter_properties=None, volume=None, snapshot_id=None, image_id=None): """Constructs and returns the scheduler entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extract a scheduler specification from the provided inputs. 3. Use provided scheduler driver to select host and pass volume creation request further. """ create_what = { 'context': context, 'raw_request_spec': request_spec, 'filter_properties': filter_properties, 'volume': volume, 'snapshot_id': snapshot_id, 'image_id': image_id, } flow_name = ACTION.replace(":", "_") + "_scheduler" scheduler_flow = linear_flow.Flow(flow_name) # This will extract and clean the spec from the starting values. scheduler_flow.add(ExtractSchedulerSpecTask( db_api, rebind={'request_spec': 'raw_request_spec'})) # This will activate the desired scheduler driver (and handle any # driver related failures appropriately). scheduler_flow.add(ScheduleCreateVolumeTask(db_api, driver_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(scheduler_flow, store=create_what) cinder-8.0.0/cinder/scheduler/base_weight.py0000664000567000056710000001073412701406250022203 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pluggable Weighing support """ import abc import six from cinder.scheduler import base_handler def normalize(weight_list, minval=None, maxval=None): """Normalize the values in a list between 0 and 1.0. The normalization is made regarding the lower and upper values present in weight_list. If the minval and/or maxval parameters are set, these values will be used instead of the minimum and maximum from the list. If all the values are equal, they are normalized to 0. """ if not weight_list: return () if maxval is None: maxval = max(weight_list) if minval is None: minval = min(weight_list) maxval = float(maxval) minval = float(minval) if minval == maxval: return [0] * len(weight_list) range_ = maxval - minval return ((i - minval) / range_ for i in weight_list) class WeighedObject(object): """Object with weight information.""" def __init__(self, obj, weight): self.obj = obj self.weight = weight def __repr__(self): return "" % (self.obj, self.weight) @six.add_metaclass(abc.ABCMeta) class BaseWeigher(object): """Base class for pluggable weighers. The attributes maxval and minval can be specified to set up the maximum and minimum values for the weighed objects. These values will then be taken into account in the normalization step, instead of taking the values from the calculated weights. """ minval = None maxval = None def weight_multiplier(self): """How weighted this weigher should be. Override this method in a subclass, so that the returned value is read from a configuration option to permit operators specify a multiplier for the weigher. """ return 1.0 @abc.abstractmethod def _weigh_object(self, obj, weight_properties): """Override in a subclass to specify a weight for a specific object.""" def weigh_objects(self, weighed_obj_list, weight_properties): """Weigh multiple objects. Override in a subclass if you need access to all objects in order to calculate weights. Do not modify the weight of an object here, just return a list of weights. """ # Calculate the weights weights = [] for obj in weighed_obj_list: weight = self._weigh_object(obj.obj, weight_properties) # Record the min and max values if they are None. If they anything # but none we assume that the weigher has set them if self.minval is None: self.minval = weight if self.maxval is None: self.maxval = weight if weight < self.minval: self.minval = weight elif weight > self.maxval: self.maxval = weight weights.append(weight) return weights class BaseWeightHandler(base_handler.BaseHandler): object_class = WeighedObject def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties): """Return a sorted (descending), normalized list of WeighedObjects.""" if not obj_list: return [] weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weights = weigher.weigh_objects(weighed_objs, weighing_properties) # Normalize the weights weights = normalize(weights, minval=weigher.minval, maxval=weigher.maxval) for i, weight in enumerate(weights): obj = weighed_objs[i] obj.weight += weigher.weight_multiplier() * weight return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) cinder-8.0.0/cinder/scheduler/driver.py0000664000567000056710000001114512701406250021212 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from cinder.i18n import _ from cinder import objects from cinder.volume import rpcapi as volume_rpcapi scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='cinder.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule a volume'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def volume_update_db(context, volume_id, host): """Set the host and set the scheduled_at field of a volume. :returns: A Volume with the updated fields set properly. """ volume = objects.Volume.get_by_id(context, volume_id) volume.host = host volume.scheduled_at = timeutils.utcnow() volume.save() # A volume object is expected to be returned, as it is used by # filter_scheduler. return volume def group_update_db(context, group, host): """Set the host and the scheduled_at field of a consistencygroup. :returns: A Consistencygroup with the updated fields set properly. """ group.update({'host': host, 'updated_at': timeutils.utcnow()}) group.save() return group class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.volume_rpcapi = volume_rpcapi.VolumeAPI() def reset(self): """Reset volume RPC API object to load new version pins.""" self.volume_rpcapi = volume_rpcapi.VolumeAPI() def is_ready(self): """Returns True if Scheduler is ready to accept requests. This is to handle scheduler service startup when it has no volume hosts stats and will fail all the requests. """ return self.host_manager.has_all_capabilities() def update_service_capabilities(self, service_name, host, capabilities): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, host, capabilities) def host_passes_filters(self, context, volume_id, host, filter_properties): """Check if the specified host passes the filters.""" raise NotImplementedError(_("Must implement host_passes_filters")) def find_retype_host(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a host that can accept the volume with its new type.""" raise NotImplementedError(_("Must implement find_retype_host")) def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_create_volume(self, context, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_volume")) def schedule_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_create_consistencygroup")) def get_pools(self, context, filters): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_get_pools")) cinder-8.0.0/cinder/scheduler/rpcapi.py0000664000567000056710000001472712701406250021206 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo_config import cfg from oslo_serialization import jsonutils from cinder import rpc CONF = cfg.CONF class SchedulerAPI(rpc.RPCAPI): """Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Add create_volume() method 1.2 - Add request_spec, filter_properties arguments to create_volume() 1.3 - Add migrate_volume_to_host() method 1.4 - Add retype method 1.5 - Add manage_existing method 1.6 - Add create_consistencygroup method 1.7 - Add get_active_pools method 1.8 - Add sending object over RPC in create_consistencygroup method 1.9 - Adds support for sending objects over RPC in create_volume() 1.10 - Adds support for sending objects over RPC in retype() 1.11 - Adds support for sending objects over RPC in migrate_volume_to_host() ... Mitaka supports messaging 1.11. Any changes to existing methods in 1.x after this point should be done so that they can handle version cap set to 1.11. 2.0 - Remove 1.x compatibility """ RPC_API_VERSION = '2.0' TOPIC = CONF.scheduler_topic BINARY = 'cinder-scheduler' def _compat_ver(self, current, legacy): if self.client.can_send_version(current): return current else: return legacy def create_consistencygroup(self, ctxt, topic, group, request_spec_list=None, filter_properties_list=None): version = self._compat_ver('2.0', '1.8') cctxt = self.client.prepare(version=version) request_spec_p_list = [] for request_spec in request_spec_list: request_spec_p = jsonutils.to_primitive(request_spec) request_spec_p_list.append(request_spec_p) return cctxt.cast(ctxt, 'create_consistencygroup', topic=topic, group=group, request_spec_list=request_spec_p_list, filter_properties_list=filter_properties_list) def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'topic': topic, 'volume_id': volume_id, 'snapshot_id': snapshot_id, 'image_id': image_id, 'request_spec': request_spec_p, 'filter_properties': filter_properties} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.9'): version = '1.9' msg_args['volume'] = volume else: version = '1.2' cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'create_volume', **msg_args) def migrate_volume_to_host(self, ctxt, topic, volume_id, host, force_host_copy=False, request_spec=None, filter_properties=None, volume=None): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'topic': topic, 'volume_id': volume_id, 'host': host, 'force_host_copy': force_host_copy, 'request_spec': request_spec_p, 'filter_properties': filter_properties} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.11'): version = '1.11' msg_args['volume'] = volume else: version = '1.3' cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'migrate_volume_to_host', **msg_args) def retype(self, ctxt, topic, volume_id, request_spec=None, filter_properties=None, volume=None): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'topic': topic, 'volume_id': volume_id, 'request_spec': request_spec_p, 'filter_properties': filter_properties} if self.client.can_send_version('2.0'): version = '2.0' msg_args['volume'] = volume elif self.client.can_send_version('1.10'): version = '1.10' msg_args['volume'] = volume else: version = '1.4' cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'retype', **msg_args) def manage_existing(self, ctxt, topic, volume_id, request_spec=None, filter_properties=None): version = self._compat_ver('2.0', '1.5') cctxt = self.client.prepare(version=version) request_spec_p = jsonutils.to_primitive(request_spec) return cctxt.cast(ctxt, 'manage_existing', topic=topic, volume_id=volume_id, request_spec=request_spec_p, filter_properties=filter_properties) def get_pools(self, ctxt, filters=None): version = self._compat_ver('2.0', '1.7') cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'get_pools', filters=filters) def update_service_capabilities(self, ctxt, service_name, host, capabilities): # FIXME(flaper87): What to do with fanout? version = self._compat_ver('2.0', '1.0') cctxt = self.client.prepare(fanout=True, version=version) cctxt.cast(ctxt, 'update_service_capabilities', service_name=service_name, host=host, capabilities=capabilities) cinder-8.0.0/cinder/scheduler/filters/0000775000567000056710000000000012701406543021020 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/filters/capabilities_filter.py0000664000567000056710000000563712701406250025376 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from cinder.scheduler import filters from cinder.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class CapabilitiesFilter(filters.BaseHostFilter): """HostFilter to work with resource (instance & volume) type records.""" def _satisfies_extra_specs(self, capabilities, resource_type): """Check if capabilities satisfy resource type requirements. Check that the capabilities provided by the services satisfy the extra specs associated with the resource type. """ extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True for key, req in six.iteritems(extra_specs): # Either not scope format, or in capabilities scope scope = key.split(':') if len(scope) > 1 and scope[0] != "capabilities": continue elif scope[0] == "capabilities": del scope[0] cap = capabilities for index in range(len(scope)): try: cap = cap.get(scope[index]) except AttributeError: return False if cap is None: LOG.debug("Host doesn't provide capability '%(cap)s' " % {'cap': scope[index]}) return False if not extra_specs_ops.match(cap, req): LOG.debug("extra_spec requirement '%(req)s' " "does not match '%(cap)s'", {'req': req, 'cap': cap}) return False return True def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create resource_type.""" # Note(zhiteng) Currently only Cinder and Nova are using # this filter, so the resource type is either instance or # volume. resource_type = filter_properties.get('resource_type') if not self._satisfies_extra_specs(host_state.capabilities, resource_type): LOG.debug("%(host_state)s fails resource_type extra_specs " "requirements", {'host_state': host_state}) return False return True cinder-8.0.0/cinder/scheduler/filters/json_filter.py0000664000567000056710000001155012701406250023705 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_serialization import jsonutils import six from cinder.scheduler import filters class JsonFilter(filters.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts.""" def _op_compare(self, args, op): """Compare first item of args with the rest using specified operator. Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Parse capability lookup strings. Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, six.string_types): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, filter_properties): """Return a list of hosts that can fulfill query requirements.""" # TODO(zhiteng) Add description for filter_properties structure # and scheduler_hints. try: query = filter_properties['scheduler_hints']['query'] except KeyError: query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False cinder-8.0.0/cinder/scheduler/filters/__init__.py0000664000567000056710000000253512701406250023131 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from cinder.scheduler import base_filter class BaseHostFilter(base_filter.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" return self.host_passes(obj, filter_properties) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class HostFilterHandler(base_filter.BaseFilterHandler): def __init__(self, namespace): super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) cinder-8.0.0/cinder/scheduler/filters/affinity_filter.py0000664000567000056710000000761412701406250024553 0ustar jenkinsjenkins00000000000000# Copyright 2014, eBay Inc. # Copyright 2014, OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import uuidutils from cinder.scheduler import filters from cinder.volume import api as volume LOG = logging.getLogger(__name__) class AffinityFilter(filters.BaseHostFilter): def __init__(self): self.volume_api = volume.API() class DifferentBackendFilter(AffinityFilter): """Schedule volume on a different back-end from a set of volumes.""" def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('different_host', []) # scheduler hint verification: affinity_uuids can be a list of uuids # or single uuid. The checks here is to make sure every single string # in the list looks like a uuid, otherwise, this filter will fail to # pass. Note that the filter does *NOT* ignore string doesn't look # like a uuid, it is better to fail the request than serving it wrong. if isinstance(affinity_uuids, list): for uuid in affinity_uuids: if uuidutils.is_uuid_like(uuid): continue else: return False elif uuidutils.is_uuid_like(affinity_uuids): affinity_uuids = [affinity_uuids] else: # Not a list, not a string looks like uuid, don't pass it # to DB for query to avoid potential risk. return False if affinity_uuids: return not self.volume_api.get_all( context, filters={'host': host_state.host, 'id': affinity_uuids, 'deleted': False}) # With no different_host key return True class SameBackendFilter(AffinityFilter): """Schedule volume on the same back-end as another volume.""" def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('same_host', []) # scheduler hint verification: affinity_uuids can be a list of uuids # or single uuid. The checks here is to make sure every single string # in the list looks like a uuid, otherwise, this filter will fail to # pass. Note that the filter does *NOT* ignore string doesn't look # like a uuid, it is better to fail the request than serving it wrong. if isinstance(affinity_uuids, list): for uuid in affinity_uuids: if uuidutils.is_uuid_like(uuid): continue else: return False elif uuidutils.is_uuid_like(affinity_uuids): affinity_uuids = [affinity_uuids] else: # Not a list, not a string looks like uuid, don't pass it # to DB for query to avoid potential risk. return False if affinity_uuids: return self.volume_api.get_all( context, filters={'host': host_state.host, 'id': affinity_uuids, 'deleted': False}) # With no same_host key return True cinder-8.0.0/cinder/scheduler/filters/driver_filter.py0000664000567000056710000001117712701406250024234 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from cinder.i18n import _LW from cinder.scheduler.evaluator import evaluator from cinder.scheduler import filters LOG = logging.getLogger(__name__) class DriverFilter(filters.BaseHostFilter): """DriverFilter filters hosts based on a 'filter function' and metrics. DriverFilter filters based on volume host's provided 'filter function' and metrics. """ def host_passes(self, host_state, filter_properties): """Determines whether a host has a passing filter_function or not.""" stats = self._generate_stats(host_state, filter_properties) LOG.debug("Checking host '%s'", stats['host_stats']['host']) result = self._check_filter_function(stats) LOG.debug("Result: %s", result) LOG.debug("Done checking host '%s'", stats['host_stats']['host']) return result def _check_filter_function(self, stats): """Checks if a volume passes a host's filter function. Returns a tuple in the format (filter_passing, filter_invalid). Both values are booleans. """ if stats['filter_function'] is None: LOG.debug("Filter function not set :: passing host") return True try: filter_result = self._run_evaluator(stats['filter_function'], stats) except Exception as ex: # Warn the admin for now that there is an error in the # filter function. LOG.warning(_LW("Error in filtering function " "'%(function)s' : '%(error)s' :: failing host"), {'function': stats['filter_function'], 'error': ex, }) return False return filter_result def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" host_stats = stats['host_stats'] host_caps = stats['host_caps'] extra_specs = stats['extra_specs'] qos_specs = stats['qos_specs'] volume_stats = stats['volume_stats'] result = evaluator.evaluate( func, extra=extra_specs, stats=host_stats, capabilities=host_caps, volume=volume_stats, qos=qos_specs) return result def _generate_stats(self, host_state, filter_properties): """Generates statistics from host and volume data.""" host_stats = { 'host': host_state.host, 'volume_backend_name': host_state.volume_backend_name, 'vendor_name': host_state.vendor_name, 'driver_version': host_state.driver_version, 'storage_protocol': host_state.storage_protocol, 'QoS_support': host_state.QoS_support, 'total_capacity_gb': host_state.total_capacity_gb, 'allocated_capacity_gb': host_state.allocated_capacity_gb, 'free_capacity_gb': host_state.free_capacity_gb, 'reserved_percentage': host_state.reserved_percentage, 'updated': host_state.updated, } host_caps = host_state.capabilities filter_function = None if ('filter_function' in host_caps and host_caps['filter_function'] is not None): filter_function = six.text_type(host_caps['filter_function']) qos_specs = filter_properties.get('qos_specs', {}) volume_type = filter_properties.get('volume_type', {}) extra_specs = volume_type.get('extra_specs', {}) request_spec = filter_properties.get('request_spec', {}) volume_stats = request_spec.get('volume_properties', {}) stats = { 'host_stats': host_stats, 'host_caps': host_caps, 'extra_specs': extra_specs, 'qos_specs': qos_specs, 'volume_stats': volume_stats, 'volume_type': volume_type, 'filter_function': filter_function, } return stats cinder-8.0.0/cinder/scheduler/filters/capacity_filter.py0000664000567000056710000001356312701406250024537 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log as logging from cinder.i18n import _LE, _LW from cinder.scheduler import filters LOG = logging.getLogger(__name__) class CapacityFilter(filters.BaseHostFilter): """CapacityFilter filters based on volume host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" # If the volume already exists on this host, don't fail it for # insufficient capacity (e.g., if we are retyping) if host_state.host == filter_properties.get('vol_exists_on'): return True volume_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_LE("Free capacity not set: " "volume node info collection broken.")) return False free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb reserved = float(host_state.reserved_percentage) / 100 if free_space in ['infinite', 'unknown']: # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space in ['infinite', 'unknown']: # If total_space is 'infinite' or 'unknown' and reserved # is 0, we assume the back-ends can serve the request. # If total_space is 'infinite' or 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. if reserved == 0: return True return False total = float(total_space) if total <= 0: LOG.warning(_LW("Insufficient free space for volume creation. " "Total capacity is %(total).2f on host %(host)s."), {"total": total, "host": host_state.host}) return False # Calculate how much free space is left after taking into account # the reserved space. free = free_space - math.floor(total * reserved) msg_args = {"host": host_state.host, "requested": volume_size, "available": free} # Only evaluate using max_over_subscription_ratio if # thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity has exceeded over # subscription ratio. if (host_state.thin_provisioning_support and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + volume_size) / total) if provisioned_ratio > host_state.max_over_subscription_ratio: LOG.warning(_LW( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f has exceeded the maximum over " "subscription ratio %(oversub_ratio).2f on host " "%(host)s."), {"provisioned_ratio": provisioned_ratio, "oversub_ratio": host_state.max_over_subscription_ratio, "host": host_state.host}) return False else: # Thin provisioning is enabled and projected over-subscription # ratio does not exceed max_over_subscription_ratio. The host # passes if "adjusted" free virtual capacity is enough to # accommodate the volume. Adjusted free virtual capacity is # the currently available free capacity (taking into account # of reserved space) which we can over-subscribe. adjusted_free_virtual = ( free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= volume_size elif host_state.thin_provisioning_support: LOG.warning(_LW("Filtering out host %(host)s with an invalid " "maximum over subscription ratio of " "%(oversub_ratio).2f. The ratio should be a " "minimum of 1.0."), {"oversub_ratio": host_state.max_over_subscription_ratio, "host": host_state.host}) return False if free < volume_size: LOG.warning(_LW("Insufficient free space for volume creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s"), msg_args) return False LOG.debug("Space information for volume creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) return True cinder-8.0.0/cinder/scheduler/filters/extra_specs_ops.py0000664000567000056710000000442312701406250024571 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_utils import strutils # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: (strutils.bool_from_string(x) is strutils.bool_from_string(y)), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False try: if words and method(value, words[0]): return True except ValueError: pass return False cinder-8.0.0/cinder/scheduler/filters/ignore_attempted_hosts_filter.py0000664000567000056710000000364312701406250027512 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.scheduler import filters LOG = logging.getLogger(__name__) class IgnoreAttemptedHostsFilter(filters.BaseHostFilter): """Filter out previously attempted hosts A host passes this filter if it has not already been attempted for scheduling. The scheduler needs to add previously attempted hosts to the 'retry' key of filter_properties in order for this to work correctly. For example:: { 'retry': { 'hosts': ['host1', 'host2'], 'num_attempts': 3, } } """ def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" attempted = filter_properties.get('retry') if not attempted: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled.") return True hosts = attempted.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s" % {'host': host, 'pass_msg': pass_msg, 'hosts': hosts}) return passes cinder-8.0.0/cinder/scheduler/filters/availability_zone_filter.py0000664000567000056710000000232712701406250026443 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.scheduler import filters class AvailabilityZoneFilter(filters.BaseHostFilter): """Filters Hosts by availability zone.""" # Availability zones do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('resource_properties', {}) availability_zone = props.get('availability_zone') if availability_zone: return availability_zone == host_state.service['availability_zone'] return True cinder-8.0.0/cinder/scheduler/filters/instance_locality_filter.py0000664000567000056710000001173112701406250026441 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2014, Adrien Vergé # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import uuidutils from cinder.compute import nova from cinder import exception from cinder.i18n import _, _LW from cinder.scheduler import filters from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) HINT_KEYWORD = 'local_to_instance' INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host' REQUESTS_TIMEOUT = 5 class InstanceLocalityFilter(filters.BaseHostFilter): """Schedule volume on the same host as a given instance. This filter enables selection of a storage back-end located on the host where the instance's hypervisor is running. This provides data locality: the instance and the volume are located on the same physical machine. In order to work: - The Extended Server Attributes extension needs to be active in Nova (this is by default), so that the 'OS-EXT-SRV-ATTR:host' property is returned when requesting instance info. - Either an account with privileged rights for Nova must be configured in Cinder configuration (see 'os_privileged_user_name'), or the user making the call needs to have sufficient rights (see 'extended_server_attributes' in Nova policy). """ def __init__(self): # Cache Nova API answers directly into the Filter object. # Since a BaseHostFilter instance lives only during the volume's # scheduling, the cache is re-created for every new volume creation. self._cache = {} super(InstanceLocalityFilter, self).__init__() def _nova_has_extended_server_attributes(self, context): """Check Extended Server Attributes presence Find out whether the Extended Server Attributes extension is activated in Nova or not. Cache the result to query Nova only once. """ if not hasattr(self, '_nova_ext_srv_attr'): self._nova_ext_srv_attr = nova.API().has_extension( context, 'ExtendedServerAttributes', timeout=REQUESTS_TIMEOUT) return self._nova_ext_srv_attr def host_passes(self, host_state, filter_properties): context = filter_properties['context'] host = volume_utils.extract_host(host_state.host, 'host') scheduler_hints = filter_properties.get('scheduler_hints') or {} instance_uuid = scheduler_hints.get(HINT_KEYWORD, None) # Without 'local_to_instance' hint if not instance_uuid: return True if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) # TODO(adrienverge): Currently it is not recommended to allow instance # migrations for hypervisors where this hint will be used. In case of # instance migration, a previously locally-created volume will not be # automatically migrated. Also in case of instance migration during the # volume's scheduling, the result is unpredictable. A future # enhancement would be to subscribe to Nova migration events (e.g. via # Ceilometer). # First, lookup for already-known information in local cache if instance_uuid in self._cache: return self._cache[instance_uuid] == host if not self._nova_has_extended_server_attributes(context): LOG.warning(_LW('Hint "%s" dropped because ' 'ExtendedServerAttributes not active in Nova.'), HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) server = nova.API().get_server(context, instance_uuid, privileged_user=True, timeout=REQUESTS_TIMEOUT) if not hasattr(server, INSTANCE_HOST_PROP): LOG.warning(_LW('Hint "%s" dropped because Nova did not return ' 'enough information. Either Nova policy needs to ' 'be changed or a privileged account for Nova ' 'should be specified in conf.'), HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP) # Match if given instance is hosted on host return self._cache[instance_uuid] == host cinder-8.0.0/cinder/scheduler/evaluator/0000775000567000056710000000000012701406543021352 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/evaluator/__init__.py0000664000567000056710000000000012701406250023444 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/evaluator/evaluator.py0000664000567000056710000001733712701406250023734 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import re import pyparsing import six from cinder import exception from cinder.i18n import _ def _operatorOperands(tokenList): it = iter(tokenList) while 1: try: op1 = next(it) op2 = next(it) yield(op1, op2) except StopIteration: break class EvalConstant(object): def __init__(self, toks): self.value = toks[0] def eval(self): result = self.value if (isinstance(result, six.string_types) and re.match("^[a-zA-Z_]+\.[a-zA-Z_]+$", result)): (which_dict, entry) = result.split('.') try: result = _vars[which_dict][entry] except KeyError as e: raise exception.EvaluatorParseException( _("KeyError: %s") % six.text_type(e)) except TypeError as e: raise exception.EvaluatorParseException( _("TypeError: %s") % six.text_type(e)) try: result = int(result) except ValueError: try: result = float(result) except ValueError as e: raise exception.EvaluatorParseException( _("ValueError: %s") % six.text_type(e)) return result class EvalSignOp(object): operations = { '+': 1, '-': -1, } def __init__(self, toks): self.sign, self.value = toks[0] def eval(self): return self.operations[self.sign] * self.value.eval() class EvalAddOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): sum = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): if op == '+': sum += val.eval() elif op == '-': sum -= val.eval() return sum class EvalMultOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): try: if op == '*': prod *= val.eval() elif op == '/': prod /= float(val.eval()) except ZeroDivisionError as e: raise exception.EvaluatorParseException( _("ZeroDivisionError: %s") % six.text_type(e)) return prod class EvalPowerOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): prod = pow(prod, val.eval()) return prod class EvalNegateOp(object): def __init__(self, toks): self.negation, self.value = toks[0] def eval(self): return not self.value.eval() class EvalComparisonOp(object): operations = { "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, "!=": operator.ne, "==": operator.eq, "<>": operator.ne, } def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): fn = self.operations[op] val2 = val.eval() if not fn(val1, val2): break val1 = val2 else: return True return False class EvalTernaryOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): condition = self.value[0].eval() if condition: return self.value[2].eval() else: return self.value[4].eval() class EvalFunction(object): functions = { "abs": abs, "max": max, "min": min, } def __init__(self, toks): self.func, self.value = toks[0] def eval(self): args = self.value.eval() if type(args) is list: return self.functions[self.func](*args) else: return self.functions[self.func](args) class EvalCommaSeperator(object): def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() val2 = self.value[2].eval() if type(val2) is list: val_list = [] val_list.append(val1) for val in val2: val_list.append(val) return val_list return [val1, val2] class EvalBoolAndOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left and right class EvalBoolOrOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left or right _parser = None _vars = {} def _def_parser(): # Enabling packrat parsing greatly speeds up the parsing. pyparsing.ParserElement.enablePackrat() alphas = pyparsing.alphas Combine = pyparsing.Combine Forward = pyparsing.Forward nums = pyparsing.nums oneOf = pyparsing.oneOf opAssoc = pyparsing.opAssoc operatorPrecedence = pyparsing.operatorPrecedence Word = pyparsing.Word integer = Word(nums) real = Combine(Word(nums) + '.' + Word(nums)) variable = Word(alphas + '_' + '.') number = real | integer expr = Forward() fn = Word(alphas + '_' + '.') operand = number | variable | fn signop = oneOf('+ -') addop = oneOf('+ -') multop = oneOf('* /') comparisonop = oneOf(' '.join(EvalComparisonOp.operations.keys())) ternaryop = ('?', ':') boolandop = oneOf('AND and &&') boolorop = oneOf('OR or ||') negateop = oneOf('NOT not !') operand.setParseAction(EvalConstant) expr = operatorPrecedence(operand, [ (fn, 1, opAssoc.RIGHT, EvalFunction), ("^", 2, opAssoc.RIGHT, EvalPowerOp), (signop, 1, opAssoc.RIGHT, EvalSignOp), (multop, 2, opAssoc.LEFT, EvalMultOp), (addop, 2, opAssoc.LEFT, EvalAddOp), (negateop, 1, opAssoc.RIGHT, EvalNegateOp), (comparisonop, 2, opAssoc.LEFT, EvalComparisonOp), (ternaryop, 3, opAssoc.LEFT, EvalTernaryOp), (boolandop, 2, opAssoc.LEFT, EvalBoolAndOp), (boolorop, 2, opAssoc.LEFT, EvalBoolOrOp), (',', 2, opAssoc.RIGHT, EvalCommaSeperator), ]) return expr def evaluate(expression, **kwargs): """Evaluates an expression. Provides the facility to evaluate mathematical expressions, and to substitute variables from dictionaries into those expressions. Supports both integer and floating point values, and automatic promotion where necessary. """ global _parser if _parser is None: _parser = _def_parser() global _vars _vars = kwargs try: result = _parser.parseString(expression, parseAll=True)[0] except pyparsing.ParseException as e: raise exception.EvaluatorParseException( _("ParseException: %s") % six.text_type(e)) return result.eval() cinder-8.0.0/cinder/scheduler/weights/0000775000567000056710000000000012701406543021022 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/scheduler/weights/goodness.py0000664000567000056710000001224412701406250023213 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from cinder.i18n import _LW from cinder.scheduler.evaluator import evaluator from cinder.scheduler import weights LOG = logging.getLogger(__name__) class GoodnessWeigher(weights.BaseHostWeigher): """Goodness Weigher. Assign weights based on a host's goodness function. Goodness rating is the following: 0 -- host is a poor choice ... 50 -- host is a good choice ... 100 -- host is a perfect choice """ def _weigh_object(self, host_state, weight_properties): """Determine host's goodness rating based on a goodness_function.""" stats = self._generate_stats(host_state, weight_properties) LOG.debug("Checking host '%s'", stats['host_stats']['host']) result = self._check_goodness_function(stats) LOG.debug("Goodness: %s", result) LOG.debug("Done checking host '%s'", stats['host_stats']['host']) return result def _check_goodness_function(self, stats): """Gets a host's goodness rating based on its goodness function.""" goodness_rating = 0 if stats['goodness_function'] is None: LOG.warning(_LW("Goodness function not set :: defaulting to " "minimal goodness rating of 0")) else: try: goodness_result = self._run_evaluator( stats['goodness_function'], stats) except Exception as ex: LOG.warning(_LW("Error in goodness_function function " "'%(function)s' : '%(error)s' :: Defaulting " "to a goodness of 0"), {'function': stats['goodness_function'], 'error': ex, }) return goodness_rating if type(goodness_result) is bool: if goodness_result: goodness_rating = 100 elif goodness_result < 0 or goodness_result > 100: LOG.warning(_LW("Invalid goodness result. Result must be " "between 0 and 100. Result generated: '%s' " ":: Defaulting to a goodness of 0"), goodness_result) else: goodness_rating = goodness_result return goodness_rating def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" host_stats = stats['host_stats'] host_caps = stats['host_caps'] extra_specs = stats['extra_specs'] qos_specs = stats['qos_specs'] volume_stats = stats['volume_stats'] result = evaluator.evaluate( func, extra=extra_specs, stats=host_stats, capabilities=host_caps, volume=volume_stats, qos=qos_specs) return result def _generate_stats(self, host_state, weight_properties): """Generates statistics from host and volume data.""" host_stats = { 'host': host_state.host, 'volume_backend_name': host_state.volume_backend_name, 'vendor_name': host_state.vendor_name, 'driver_version': host_state.driver_version, 'storage_protocol': host_state.storage_protocol, 'QoS_support': host_state.QoS_support, 'total_capacity_gb': host_state.total_capacity_gb, 'allocated_capacity_gb': host_state.allocated_capacity_gb, 'free_capacity_gb': host_state.free_capacity_gb, 'reserved_percentage': host_state.reserved_percentage, 'updated': host_state.updated, } host_caps = host_state.capabilities goodness_function = None if ('goodness_function' in host_caps and host_caps['goodness_function'] is not None): goodness_function = six.text_type(host_caps['goodness_function']) qos_specs = weight_properties.get('qos_specs', {}) volume_type = weight_properties.get('volume_type', {}) extra_specs = volume_type.get('extra_specs', {}) request_spec = weight_properties.get('request_spec', {}) volume_stats = request_spec.get('volume_properties', {}) stats = { 'host_stats': host_stats, 'host_caps': host_caps, 'extra_specs': extra_specs, 'qos_specs': qos_specs, 'volume_stats': volume_stats, 'volume_type': volume_type, 'goodness_function': goodness_function, } return stats cinder-8.0.0/cinder/scheduler/weights/volume_number.py0000664000567000056710000000403612701406250024251 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Weighers that weigh hosts by volume number in backends: 1. Volume Number Weigher. Weigh hosts by their volume number. The default is to spread volumes across all hosts evenly. If you prefer stacking, you can set the 'volume_number_multiplier' option to a positive number and the weighing has the opposite effect of the default. """ from oslo_config import cfg from oslo_log import log as logging from cinder import db from cinder.scheduler import weights LOG = logging.getLogger(__name__) volume_number_weight_opts = [ cfg.FloatOpt('volume_number_multiplier', default=-1.0, help='Multiplier used for weighing volume number. ' 'Negative numbers mean to spread vs stack.'), ] CONF = cfg.CONF CONF.register_opts(volume_number_weight_opts) class VolumeNumberWeigher(weights.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.volume_number_multiplier def _weigh_object(self, host_state, weight_properties): """Less volume number weights win. We want spreading to be the default. """ context = weight_properties['context'] volume_number = db.volume_data_get_for_host(context=context, host=host_state.host, count_only=True) return volume_number cinder-8.0.0/cinder/scheduler/weights/__init__.py0000664000567000056710000000243312701406250023130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weights """ from cinder.scheduler import base_weight class WeighedHost(base_weight.WeighedObject): def to_dict(self): return { 'weight': self.weight, 'host': self.obj.host, } def __repr__(self): return ("WeighedHost [host: %s, weight: %s]" % (self.obj.host, self.weight)) class BaseHostWeigher(base_weight.BaseWeigher): """Base class for host weights.""" pass class HostWeightHandler(base_weight.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) cinder-8.0.0/cinder/scheduler/weights/chance.py0000664000567000056710000000164612701406250022617 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance Weigher. Assign random weights to hosts. Used to spread volumes randomly across a list of equally suitable hosts. """ import random from cinder.scheduler import weights class ChanceWeigher(weights.BaseHostWeigher): def _weigh_object(self, host_state, weight_properties): return random.random() cinder-8.0.0/cinder/scheduler/weights/capacity.py0000664000567000056710000001265012701406250023170 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Weighers that weigh hosts by their capacity, including following two weighers: 1. Capacity Weigher. Weigh hosts by their virtual or actual free capacity. For thin provisioning, weigh hosts by their virtual free capacity calculated by the total capacity multiplied by the max over subscription ratio and subtracting the provisioned capacity; Otherwise, weigh hosts by their actual free capacity, taking into account the reserved space. The default is to spread volumes across all hosts evenly. If you prefer stacking, you can set the 'capacity_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. 2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity. The default behavior is to place new volume to the host allocated the least space. This weigher is intended to simulate the behavior of SimpleScheduler. If you prefer to place volumes to host allocated the most space, you can set the 'allocated_capacity_weight_multiplier' option to a positive number and the weighing has the opposite effect of the default. """ import math from oslo_config import cfg from cinder.scheduler import weights from cinder import utils capacity_weight_opts = [ cfg.FloatOpt('capacity_weight_multiplier', default=1.0, help='Multiplier used for weighing free capacity. ' 'Negative numbers mean to stack vs spread.'), cfg.FloatOpt('allocated_capacity_weight_multiplier', default=-1.0, help='Multiplier used for weighing allocated capacity. ' 'Positive numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(capacity_weight_opts) OFFSET_MIN = 10000 OFFSET_MULT = 100 class CapacityWeigher(weights.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.capacity_weight_multiplier def weigh_objects(self, weighed_obj_list, weight_properties): """Override the weigh objects. This override calls the parent to do the weigh objects and then replaces any infinite weights with a value that is a multiple of the delta between the min and max values. NOTE(jecarey): the infinite weight value is only used when the smallest value is being favored (negative multiplier). When the largest weight value is being used a weight of -1 is used instead. See _weigh_object method. """ tmp_weights = super(weights.BaseHostWeigher, self).weigh_objects( weighed_obj_list, weight_properties) if math.isinf(self.maxval): # NOTE(jecarey): if all weights were infinite then parent # method returns 0 for all of the weights. Thus self.minval # cannot be infinite at this point copy_weights = [w for w in tmp_weights if not math.isinf(w)] self.maxval = max(copy_weights) offset = (self.maxval - self.minval) * OFFSET_MULT self.maxval += OFFSET_MIN if offset == 0.0 else offset tmp_weights = [self.maxval if math.isinf(w) else w for w in tmp_weights] return tmp_weights def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb if (free_space == 'infinite' or free_space == 'unknown' or total_space == 'infinite' or total_space == 'unknown'): # (zhiteng) 'infinite' and 'unknown' are treated the same # here, for sorting purpose. # As a partial fix for bug #1350638, 'infinite' and 'unknown' are # given the lowest weight to discourage driver from report such # capacity anymore. free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf') else: free = utils.calculate_virtual_free_capacity( total_space, free_space, host_state.provisioned_capacity_gb, host_state.thin_provisioning_support, host_state.max_over_subscription_ratio, host_state.reserved_percentage) return free class AllocatedCapacityWeigher(weights.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.allocated_capacity_weight_multiplier def _weigh_object(self, host_state, weight_properties): # Higher weights win. We want spreading (choose host with lowest # allocated_capacity first) to be the default. allocated_space = host_state.allocated_capacity_gb return allocated_space cinder-8.0.0/cinder/scheduler/base_filter.py0000664000567000056710000000702512701406250022200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ from oslo_log import log as logging from cinder.i18n import _LI from cinder.scheduler import base_handler LOG = logging.getLogger(__name__) class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, filter_properties): """Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, filter_properties): """Yield objects that pass the filter. Can be overridden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, filter_properties): yield obj # Set to true in a subclass if a filter only needs to be run once # for each request rather than for each instance run_filter_once_per_request = False def run_filter_for_index(self, index): """Return True if the filter needs to be run for n-th instances. Only need to override this if a filter needs anything other than "first only" or "all" behaviour. """ return not (self.run_filter_once_per_request and index > 0) class BaseFilterHandler(base_handler.BaseHandler): """Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) for filter_cls in filter_classes: cls_name = filter_cls.__name__ filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.debug("Filter %(cls_name)s says to stop filtering", {'cls_name': cls_name}) return list_objs = list(objs) msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)") % {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: LOG.info(msg) break LOG.debug(msg) return list_objs cinder-8.0.0/cinder/image/0000775000567000056710000000000012701406543016454 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/image/glance.py0000664000567000056710000005541512701406257020273 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2013 NTT corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an image service that uses Glance as the backend""" from __future__ import absolute_import import copy import itertools import random import shutil import sys import time import glanceclient.exc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils import six from six.moves import range from six.moves import urllib from cinder import exception from cinder.i18n import _LE, _LW glance_opts = [ cfg.ListOpt('allowed_direct_url_schemes', default=[], help='A list of url schemes that can be downloaded directly ' 'via the direct_url. Currently supported schemes: ' '[file].'), ] glance_core_properties_opts = [ cfg.ListOpt('glance_core_properties', default=['checksum', 'container_format', 'disk_format', 'image_name', 'image_id', 'min_disk', 'min_ram', 'name', 'size'], help='Default core properties of image') ] CONF = cfg.CONF CONF.register_opts(glance_opts) CONF.register_opts(glance_core_properties_opts) CONF.import_opt('glance_api_version', 'cinder.common.config') LOG = logging.getLogger(__name__) def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, netloc, use_ssl) :raises ValueError """ url = urllib.parse.urlparse(image_href) netloc = url.netloc image_id = url.path.split('/')[-1] use_ssl = (url.scheme == 'https') return (image_id, netloc, use_ssl) def _create_glance_client(context, netloc, use_ssl, version=None): """Instantiate a new glanceclient.Client object.""" if version is None: version = CONF.glance_api_version params = {} if use_ssl: scheme = 'https' # https specific params params['insecure'] = CONF.glance_api_insecure params['ssl_compression'] = CONF.glance_api_ssl_compression params['cacert'] = CONF.glance_ca_certificates_file else: scheme = 'http' if CONF.auth_strategy == 'keystone': params['token'] = context.auth_token if CONF.glance_request_timeout is not None: params['timeout'] = CONF.glance_request_timeout endpoint = '%s://%s' % (scheme, netloc) return glanceclient.Client(str(version), endpoint, **params) def get_api_servers(): """Return Iterable over shuffled api servers. Shuffle a list of CONF.glance_api_servers and return an iterator that will cycle through the list, looping around to the beginning if necessary. """ api_servers = [] for api_server in CONF.glance_api_servers: if '//' not in api_server: api_server = 'http://' + api_server url = urllib.parse.urlparse(api_server) netloc = url.netloc use_ssl = (url.scheme == 'https') api_servers.append((netloc, use_ssl)) random.shuffle(api_servers) return itertools.cycle(api_servers) class GlanceClientWrapper(object): """Glance client wrapper class that implements retries.""" def __init__(self, context=None, netloc=None, use_ssl=False, version=None): if netloc is not None: self.client = self._create_static_client(context, netloc, use_ssl, version) else: self.client = None self.api_servers = None self.version = version if CONF.glance_num_retries < 0: LOG.warning(_LW( "glance_num_retries shouldn't be a negative value. " "The number of retries will be set to 0 until this is" "corrected in the cinder.conf.")) CONF.set_override('glance_num_retries', 0) def _create_static_client(self, context, netloc, use_ssl, version): """Create a client that we'll use for every call.""" self.netloc = netloc self.use_ssl = use_ssl self.version = version return _create_glance_client(context, self.netloc, self.use_ssl, self.version) def _create_onetime_client(self, context, version): """Create a client that will be used for one call.""" if self.api_servers is None: self.api_servers = get_api_servers() self.netloc, self.use_ssl = next(self.api_servers) return _create_glance_client(context, self.netloc, self.use_ssl, version) def call(self, context, method, *args, **kwargs): """Call a glance client method. If we get a connection error, retry the request according to CONF.glance_num_retries. """ version = kwargs.pop('version', self.version) retry_excs = (glanceclient.exc.ServiceUnavailable, glanceclient.exc.InvalidEndpoint, glanceclient.exc.CommunicationError) num_attempts = 1 + CONF.glance_num_retries for attempt in range(1, num_attempts + 1): client = self.client or self._create_onetime_client(context, version) try: controller = getattr(client, kwargs.pop('controller', 'images')) return getattr(controller, method)(*args, **kwargs) except retry_excs as e: netloc = self.netloc extra = "retrying" error_msg = _LE("Error contacting glance server " "'%(netloc)s' for '%(method)s', " "%(extra)s.") if attempt == num_attempts: extra = 'done trying' LOG.exception(error_msg, {'netloc': netloc, 'method': method, 'extra': extra}) raise exception.GlanceConnectionFailed(reason=e) LOG.exception(error_msg, {'netloc': netloc, 'method': method, 'extra': extra}) time.sleep(1) class GlanceImageService(object): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self, client=None): self._client = client or GlanceClientWrapper() self._image_schema = None self.temp_images = None def detail(self, context, **kwargs): """Calls out to Glance for a list of detailed image information.""" params = self._extract_query_params(kwargs) try: images = self._client.call(context, 'list', **params) except Exception: _reraise_translated_exception() _images = [] for image in images: if self._is_image_available(context, image): _images.append(self._translate_from_glance(context, image)) return _images def _extract_query_params(self, params): _params = {} accepted_params = ('filters', 'marker', 'limit', 'sort_key', 'sort_dir') for param in accepted_params: if param in params: _params[param] = params.get(param) # ensure filters is a dict _params.setdefault('filters', {}) # NOTE(vish): don't filter out private images _params['filters'].setdefault('is_public', 'none') return _params def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" try: image = self._client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) base_image_meta = self._translate_from_glance(context, image) return base_image_meta def get_location(self, context, image_id): """Get backend storage location url. Returns a tuple containing the direct url and locations representing the backend storage location, or (None, None) if these attributes are not shown by Glance. """ if CONF.glance_api_version == 1: # image location not available in v1 return (None, None) try: # direct_url is returned by v2 api client = GlanceClientWrapper(version=2) image_meta = client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image_meta): raise exception.ImageNotFound(image_id=image_id) # some glance stores like nfs only meta data # is stored and returned as locations. # so composite of two needs to be returned. return (getattr(image_meta, 'direct_url', None), getattr(image_meta, 'locations', None)) def add_location(self, context, image_id, url, metadata): """Add a backend location url to an image. Returns a dict containing image metadata on success. """ if CONF.glance_api_version != 2: raise exception.Invalid("Image API version 2 is disabled.") client = GlanceClientWrapper(version=2) try: return client.call(context, 'add_location', image_id, url, metadata) except Exception: _reraise_translated_image_exception(image_id) def delete_locations(self, context, image_id, url_set): """Delete backend location urls from an image.""" if CONF.glance_api_version != 2: raise exception.Invalid("Image API version 2 is disabled.") client = GlanceClientWrapper(version=2) try: return client.call(context, 'delete_locations', image_id, url_set) except Exception: _reraise_translated_image_exception(image_id) def download(self, context, image_id, data=None): """Calls out to Glance for data and writes data.""" if data and 'file' in CONF.allowed_direct_url_schemes: direct_url, locations = self.get_location(context, image_id) urls = [direct_url] + [loc.get('url') for loc in locations or []] for url in urls: if url is None: continue parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == "file": # a system call to cp could have significant performance # advantages, however we do not have the path to files at # this point in the abstraction. with open(parsed_url.path, "r") as f: shutil.copyfileobj(f, data) return try: image_chunks = self._client.call(context, 'data', image_id) except Exception: _reraise_translated_image_exception(image_id) if not data: return image_chunks else: for chunk in image_chunks: data.write(chunk) def create(self, context, image_meta, data=None): """Store the image data and return the new image object.""" sent_service_image_meta = self._translate_to_glance(image_meta) if data: sent_service_image_meta['data'] = data recv_service_image_meta = self._client.call(context, 'create', **sent_service_image_meta) return self._translate_from_glance(context, recv_service_image_meta) def update(self, context, image_id, image_meta, data=None, purge_props=True): """Modify the given image with the new data.""" image_meta = self._translate_to_glance(image_meta) # NOTE(dosaboy): see comment in bug 1210467 if CONF.glance_api_version == 1: image_meta['purge_props'] = purge_props # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: image_meta['data'] = data try: # NOTE(dosaboy): the v2 api separates update from upload if data and CONF.glance_api_version > 1: self._client.call(context, 'upload', image_id, data) image_meta = self._client.call(context, 'get', image_id) else: image_meta = self._client.call(context, 'update', image_id, **image_meta) except Exception: _reraise_translated_image_exception(image_id) else: return self._translate_from_glance(context, image_meta) def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. :raises: NotAuthorized if the user is not an owner. """ try: self._client.call(context, 'delete', image_id) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) return True def _translate_from_glance(self, context, image): """Get image metadata from glance image. Extract metadata from image and convert it's properties to type cinder expected. :param image: glance image object :return: image metadata dictionary """ if CONF.glance_api_version == 2: if self._image_schema is None: self._image_schema = self._client.call(context, 'get', controller='schemas', schema_name='image', version=2) # NOTE(aarefiev): get base image property, store image 'schema' # is redundant, so ignore it. image_meta = {key: getattr(image, key) for key in image.keys() if self._image_schema.is_base_property(key) is True and key != 'schema'} # NOTE(aarefiev): nova is expected that all image properties # (custom or defined in schema-image.json) stores in # 'properties' key. image_meta['properties'] = { key: getattr(image, key) for key in image.keys() if self._image_schema.is_base_property(key) is False} else: image_meta = _extract_attributes(image) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta @staticmethod def _translate_to_glance(image_meta): image_meta = _convert_to_string(image_meta) image_meta = _remove_read_only(image_meta) # NOTE(tsekiyama): From the Image API v2, custom properties must # be stored in image_meta directly, instead of the 'properties' key. if CONF.glance_api_version >= 2: properties = image_meta.get('properties') if properties: image_meta.update(properties) del image_meta['properties'] return image_meta @staticmethod def _is_image_available(context, image): """Check image availability. This check is needed in case Nova and Glance are deployed without authentication turned on. """ # The presence of an auth token implies this is an authenticated # request and we need not handle the noauth use-case. if hasattr(context, 'auth_token') and context.auth_token: return True if image.is_public or context.is_admin: return True properties = image.properties if context.project_id and ('owner_id' in properties): return str(properties['owner_id']) == str(context.project_id) if context.project_id and ('project_id' in properties): return str(properties['project_id']) == str(context.project_id) try: user_id = properties['user_id'] except KeyError: return False return str(user_id) == str(context.user_id) def _convert_timestamps_to_datetimes(image_meta): """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta # NOTE(bcwaldon): used to store non-string data in glance metadata def _json_loads(properties, attr): prop = properties[attr] if isinstance(prop, six.string_types): properties[attr] = jsonutils.loads(prop) def _json_dumps(properties, attr): prop = properties[attr] if not isinstance(prop, six.string_types): properties[attr] = jsonutils.dumps(prop) _CONVERT_PROPS = ('block_device_mapping', 'mappings') def _convert(method, metadata): metadata = copy.deepcopy(metadata) properties = metadata.get('properties') if properties: for attr in _CONVERT_PROPS: if attr in properties: method(properties, attr) return metadata def _convert_from_string(metadata): return _convert(_json_loads, metadata) def _convert_to_string(metadata): return _convert(_json_dumps, metadata) def _extract_attributes(image): # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end # 'deleted_at' depends on 'deleted' # 'checksum' depends on 'status' == 'active' IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', 'min_disk', 'min_ram', 'is_public'] output = {} for attr in IMAGE_ATTRIBUTES: if attr == 'deleted_at' and not output['deleted']: output[attr] = None elif attr == 'checksum' and output['status'] != 'active': output[attr] = None else: output[attr] = getattr(image, attr, None) output['properties'] = getattr(image, 'properties', {}) # NOTE(jbernard): Update image properties for API version 2. For UEC # images stored in glance, the necessary boot information is stored in the # properties dict in version 1 so there is nothing more to do. However, in # version 2 these are standalone fields in the GET response. This bit of # code moves them back into the properties dict as the caller expects, thus # producing a volume with correct metadata for booting. for attr in ('kernel_id', 'ramdisk_id'): value = getattr(image, attr, None) if value: output['properties'][attr] = value return output def _remove_read_only(image_meta): IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] output = copy.deepcopy(image_meta) for attr in IMAGE_ATTRIBUTES: if attr in output: del output[attr] return output def _reraise_translated_image_exception(image_id): """Transform the exception for the image but keep its traceback intact.""" _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_image_exception(image_id, exc_value) six.reraise(type(new_exc), new_exc, exc_trace) def _reraise_translated_exception(): """Transform the exception but keep its traceback intact.""" _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_plain_exception(exc_value) six.reraise(type(new_exc), new_exc, exc_trace) def _translate_image_exception(image_id, exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.ImageNotAuthorized(image_id=image_id) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.ImageNotFound(image_id=image_id) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def _translate_plain_exception(exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.NotAuthorized(exc_value) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.NotFound(exc_value) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def get_remote_image_service(context, image_href): """Create an image_service and parse the id from the given image_href. The image_href param can be an href of the form 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the image_href is a standalone id, then the default image service is returned. :param image_href: href that describes the location of an image :returns: a tuple of the form (image_service, image_id) """ # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() return image_service, image_href try: (image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, netloc=glance_netloc, use_ssl=use_ssl) except ValueError: raise exception.InvalidImageRef(image_href=image_href) image_service = GlanceImageService(client=glance_client) return image_service, image_id def get_default_image_service(): return GlanceImageService() cinder-8.0.0/cinder/image/__init__.py0000664000567000056710000000000012701406250020546 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/image/image_utils.py0000664000567000056710000005206512701406250021333 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods to deal with images. This is essentially a copy from nova.virt.images.py Some slight modifications, but at some point we should look at maybe pushing this up to Oslo """ import contextlib import math import os import re import tempfile from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import imageutils from oslo_utils import timeutils from oslo_utils import units from cinder import exception from cinder.i18n import _, _LI, _LW from cinder import utils from cinder.volume import throttling from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) image_helper_opts = [cfg.StrOpt('image_conversion_dir', default='$state_path/conversion', help='Directory used for temporary storage ' 'during image conversion'), ] CONF = cfg.CONF CONF.register_opts(image_helper_opts) def qemu_img_info(path, run_as_root=True): """Return an object containing the parsed output from qemu-img info.""" cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path) if os.name == 'nt': cmd = cmd[2:] out, _err = utils.execute(*cmd, run_as_root=run_as_root) return imageutils.QemuImgInfo(out) def get_qemu_img_version(): info = utils.execute('qemu-img', '--help', check_exit_code=False)[0] pattern = r"qemu-img version ([0-9\.]*)" version = re.match(pattern, info) if not version: LOG.warning(_LW("qemu-img is not installed.")) return None return _get_version_from_string(version.groups()[0]) def _get_version_from_string(version_string): return [int(x) for x in version_string.split('.')] def check_qemu_img_version(minimum_version): qemu_version = get_qemu_img_version() if (qemu_version is None or qemu_version < _get_version_from_string(minimum_version)): if qemu_version: current_version = '.'.join((str(element) for element in qemu_version)) else: current_version = None _msg = _('qemu-img %(minimum_version)s or later is required by ' 'this volume driver. Current qemu-img version: ' '%(current_version)s') % {'minimum_version': minimum_version, 'current_version': current_version} raise exception.VolumeBackendAPIException(data=_msg) def _convert_image(prefix, source, dest, out_format, run_as_root=True): """Convert image to other format.""" cmd = prefix + ('qemu-img', 'convert', '-O', out_format, source, dest) # Check whether O_DIRECT is supported and set '-t none' if it is # This is needed to ensure that all data hit the device before # it gets unmapped remotely from the host for some backends # Reference Bug: #1363016 # NOTE(jdg): In the case of file devices qemu does the # flush properly and more efficiently than would be done # setting O_DIRECT, so check for that and skip the # setting for non BLK devs if (utils.is_blk_device(dest) and volume_utils.check_for_odirect_support(source, dest, 'oflag=direct')): cmd = prefix + ('qemu-img', 'convert', '-t', 'none', '-O', out_format, source, dest) start_time = timeutils.utcnow() utils.execute(*cmd, run_as_root=run_as_root) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 try: image_size = qemu_img_info(source, run_as_root=True).virtual_size except ValueError as e: msg = _LI("The image was successfully converted, but image size " "is unavailable. src %(src)s, dest %(dest)s. %(error)s") LOG.info(msg, {"src": source, "dest": dest, "error": e}) return fsz_mb = image_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, " "duration %(duration).2f sec, destination %(dest)s") LOG.debug(msg, {"src": source, "sz": fsz_mb, "duration": duration, "dest": dest}) msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s") LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) def convert_image(source, dest, out_format, run_as_root=True, throttle=None): if not throttle: throttle = throttling.Throttle.get_default() with throttle.subcommand(source, dest) as throttle_cmd: _convert_image(tuple(throttle_cmd['prefix']), source, dest, out_format, run_as_root=run_as_root) def resize_image(source, size, run_as_root=False): """Changes the virtual size of the image.""" cmd = ('qemu-img', 'resize', source, '%sG' % size) utils.execute(*cmd, run_as_root=run_as_root) def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(context, image_id, image_file) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s") LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None, run_as_root=True): fetch(context, image_service, image_id, dest, None, None) with fileutils.remove_path_on_error(dest): data = qemu_img_info(dest, run_as_root=run_as_root) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and data.virtual_size > size: params = {'image_size': data.virtual_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) def fetch_to_vhd(context, image_service, image_id, dest, blocksize, user_id=None, project_id=None, run_as_root=True): fetch_to_volume_format(context, image_service, image_id, dest, 'vpc', blocksize, user_id, project_id, run_as_root=run_as_root) def fetch_to_raw(context, image_service, image_id, dest, blocksize, user_id=None, project_id=None, size=None, run_as_root=True): fetch_to_volume_format(context, image_service, image_id, dest, 'raw', blocksize, user_id, project_id, size, run_as_root=run_as_root) def fetch_to_volume_format(context, image_service, image_id, dest, volume_format, blocksize, user_id=None, project_id=None, size=None, run_as_root=True): qemu_img = True image_meta = image_service.show(context, image_id) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. with temporary_file() as tmp: # We may be on a system that doesn't have qemu-img installed. That # is ok if we are working with a RAW image. This logic checks to see # if qemu-img is installed. If not we make sure the image is RAW and # throw an exception if not. Otherwise we stop before needing # qemu-img. Systems with qemu-img will always progress through the # whole function. try: # Use the empty tmp file to make sure qemu_img_info works. qemu_img_info(tmp, run_as_root=run_as_root) except processutils.ProcessExecutionError: qemu_img = False if image_meta: if image_meta['disk_format'] != 'raw': raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and image is of " "type %s. Only RAW images can be used if " "qemu-img is not installed.") % image_meta['disk_format'], image_id=image_id) else: raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and the disk " "format is not specified. Only RAW images " "can be used if qemu-img is not installed."), image_id=image_id) tmp_images = TemporaryImages.for_image_service(image_service) tmp_image = tmp_images.get(context, image_id) if tmp_image: tmp = tmp_image else: fetch(context, image_service, image_id, tmp, user_id, project_id) if is_xenserver_image(context, image_service, image_id): replace_xenserver_image_with_coalesced_vhd(tmp) if not qemu_img: # qemu-img is not installed but we do have a RAW image. As a # result we only need to copy the image to the destination and then # return. LOG.debug('Copying image from %(tmp)s to volume %(dest)s - ' 'size: %(size)s', {'tmp': tmp, 'dest': dest, 'size': image_meta['size']}) image_size_m = math.ceil(image_meta['size'] / units.Mi) volume_utils.copy_volume(tmp, dest, image_size_m, blocksize) return data = qemu_img_info(tmp, run_as_root=run_as_root) virt_size = data.virtual_size / units.Gi # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and virt_size > size: params = {'image_size': virt_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. LOG.debug("%s was %s, converting to %s ", image_id, fmt, volume_format) convert_image(tmp, dest, volume_format, run_as_root=run_as_root) data = qemu_img_info(dest, run_as_root=run_as_root) if not _validate_file_format(data, volume_format): raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(vol_format)s, but format is " "now %(file_format)s") % {'vol_format': volume_format, 'file_format': data. file_format}) def _validate_file_format(image_data, expected_format): if image_data.file_format == expected_format: return True elif image_data.file_format == 'vpc' and expected_format == 'vhd': # qemu-img still uses the legacy 'vpc' name for the vhd format. return True return False def upload_volume(context, image_service, image_meta, volume_path, volume_format='raw', run_as_root=True): image_id = image_meta['id'] if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s", image_id, volume_format, image_meta['disk_format']) if os.name == 'nt' or os.access(volume_path, os.R_OK): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) else: with utils.temporary_chown(volume_path): with open(volume_path) as image_file: image_service.update(context, image_id, {}, image_file) return with temporary_file() as tmp: LOG.debug("%s was %s, converting to %s", image_id, volume_format, image_meta['disk_format']) data = qemu_img_info(volume_path, run_as_root=run_as_root) backing_file = data.backing_file fmt = data.file_format if backing_file is not None: # Disallow backing files as a security measure. # This prevents a user from writing an image header into a raw # volume with a backing file pointing to data they wish to # access. raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file}) convert_image(volume_path, tmp, image_meta['disk_format'], run_as_root=run_as_root) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != image_meta['disk_format']: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % {'f1': image_meta['disk_format'], 'f2': data.file_format}) with open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, image_file) def is_xenserver_image(context, image_service, image_id): image_meta = image_service.show(context, image_id) return is_xenserver_format(image_meta) def is_xenserver_format(image_meta): return ( image_meta['disk_format'] == 'vhd' and image_meta['container_format'] == 'ovf' ) def set_vhd_parent(vhd_path, parentpath): utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) def extract_targz(archive_name, target): utils.execute('tar', '-xzf', archive_name, '-C', target) def fix_vhd_chain(vhd_chain): for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): set_vhd_parent(child, parent) def get_vhd_size(vhd_path): out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') return int(out) def resize_vhd(vhd_path, size, journal): utils.execute( 'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal) def coalesce_vhd(vhd_path): utils.execute( 'vhd-util', 'coalesce', '-n', vhd_path) def create_temporary_file(*args, **kwargs): if (CONF.image_conversion_dir and not os.path.exists(CONF.image_conversion_dir)): os.makedirs(CONF.image_conversion_dir) fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs) os.close(fd) return tmp @contextlib.contextmanager def temporary_file(*args, **kwargs): tmp = None try: tmp = create_temporary_file(*args, **kwargs) yield tmp finally: if tmp: fileutils.delete_if_exists(tmp) def temporary_dir(): if (CONF.image_conversion_dir and not os.path.exists(CONF.image_conversion_dir)): os.makedirs(CONF.image_conversion_dir) return utils.tempdir(dir=CONF.image_conversion_dir) def coalesce_chain(vhd_chain): for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): with temporary_dir() as directory_for_journal: size = get_vhd_size(child) journal_file = os.path.join( directory_for_journal, 'vhd-util-resize-journal') resize_vhd(parent, size, journal_file) coalesce_vhd(child) return vhd_chain[-1] def discover_vhd_chain(directory): counter = 0 chain = [] while True: fpath = os.path.join(directory, '%d.vhd' % counter) if os.path.exists(fpath): chain.append(fpath) else: break counter += 1 return chain def replace_xenserver_image_with_coalesced_vhd(image_file): with temporary_dir() as tempdir: extract_targz(image_file, tempdir) chain = discover_vhd_chain(tempdir) fix_vhd_chain(chain) coalesced = coalesce_chain(chain) fileutils.delete_if_exists(image_file) os.rename(coalesced, image_file) class TemporaryImages(object): """Manage temporarily downloaded images to avoid downloading it twice. In the 'with TemporaryImages.fetch(image_service, ctx, image_id) as tmp' clause, 'tmp' can be used as the downloaded image path. In addition, image_utils.fetch() will use the pre-fetched image by the TemporaryImages. This is useful to inspect image contents before conversion. """ def __init__(self, image_service): self.temporary_images = {} self.image_service = image_service image_service.temp_images = self @staticmethod def for_image_service(image_service): instance = image_service.temp_images if instance: return instance return TemporaryImages(image_service) @classmethod @contextlib.contextmanager def fetch(cls, image_service, context, image_id): tmp_images = cls.for_image_service(image_service).temporary_images with temporary_file() as tmp: fetch_verify_image(context, image_service, image_id, tmp) user = context.user_id if not tmp_images.get(user): tmp_images[user] = {} tmp_images[user][image_id] = tmp LOG.debug("Temporary image %(id)s is fetched for user %(user)s.", {'id': image_id, 'user': user}) yield tmp del tmp_images[user][image_id] LOG.debug("Temporary image %(id)s for user %(user)s is deleted.", {'id': image_id, 'user': user}) def get(self, context, image_id): user = context.user_id if not self.temporary_images.get(user): return None return self.temporary_images[user].get(image_id) cinder-8.0.0/cinder/image/cache.py0000664000567000056710000002200512701406250020063 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Pure Storage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pytz import timezone import six from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.i18n import _LW from cinder import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class ImageVolumeCache(object): def __init__(self, db, volume_api, max_cache_size_gb=0, max_cache_size_count=0): self.db = db self.volume_api = volume_api self.max_cache_size_gb = int(max_cache_size_gb) self.max_cache_size_count = int(max_cache_size_count) self.notifier = rpc.get_notifier('volume', CONF.host) def get_by_image_volume(self, context, volume_id): return self.db.image_volume_cache_get_by_volume_id(context, volume_id) def evict(self, context, cache_entry): LOG.debug('Evicting image cache entry: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) self.db.image_volume_cache_delete(context, cache_entry['volume_id']) self._notify_cache_eviction(context, cache_entry['image_id'], cache_entry['host']) def get_entry(self, context, volume_ref, image_id, image_meta): cache_entry = self.db.image_volume_cache_get_and_update_last_used( context, image_id, volume_ref['host'] ) if cache_entry: LOG.debug('Found image-volume cache entry: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) if self._should_update_entry(cache_entry, image_meta): LOG.debug('Image-volume cache entry is out-dated, evicting: ' '%(entry)s.', {'entry': self._entry_to_str(cache_entry)}) self._delete_image_volume(context, cache_entry) cache_entry = None if cache_entry: self._notify_cache_hit(context, cache_entry['image_id'], cache_entry['host']) else: self._notify_cache_miss(context, image_id, volume_ref['host']) return cache_entry def create_cache_entry(self, context, volume_ref, image_id, image_meta): """Create a new cache entry for an image. This assumes that the volume described by volume_ref has already been created and is in an available state. """ LOG.debug('Creating new image-volume cache entry for image ' '%(image_id)s on host %(host)s.', {'image_id': image_id, 'host': volume_ref['host']}) # When we are creating an image from a volume the updated_at field # will be a unicode representation of the datetime. In that case # we just need to parse it into one. If it is an actual datetime # we want to just grab it as a UTC naive datetime. image_updated_at = image_meta['updated_at'] if isinstance(image_updated_at, six.string_types): image_updated_at = timeutils.parse_strtime(image_updated_at) else: image_updated_at = image_updated_at.astimezone(timezone('UTC')) cache_entry = self.db.image_volume_cache_create( context, volume_ref['host'], image_id, image_updated_at.replace(tzinfo=None), volume_ref['id'], volume_ref['size'] ) LOG.debug('New image-volume cache entry created: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) return cache_entry def ensure_space(self, context, space_required, host): """Makes room for a cache entry. Returns True if successful, false otherwise. """ # Check to see if the cache is actually limited. if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0: return True # Make sure that we can potentially fit the image in the cache # and bail out before evicting everything else to try and make # room for it. if (self.max_cache_size_gb != 0 and space_required > self.max_cache_size_gb): return False # Assume the entries are ordered by most recently used to least used. entries = self.db.image_volume_cache_get_all_for_host(context, host) current_count = len(entries) current_size = 0 for entry in entries: current_size += entry['size'] # Add values for the entry we intend to create. current_size += space_required current_count += 1 LOG.debug('Image-volume cache for host %(host)s current_size (GB) = ' '%(size_gb)s (max = %(max_gb)s), current count = %(count)s ' '(max = %(max_count)s).', {'host': host, 'size_gb': current_size, 'max_gb': self.max_cache_size_gb, 'count': current_count, 'max_count': self.max_cache_size_count}) while ((current_size > self.max_cache_size_gb or current_count > self.max_cache_size_count) and len(entries)): entry = entries.pop() LOG.debug('Reclaiming image-volume cache space; removing cache ' 'entry %(entry)s.', {'entry': self._entry_to_str(entry)}) self._delete_image_volume(context, entry) current_size -= entry['size'] current_count -= 1 LOG.debug('Image-volume cache for host %(host)s new size (GB) = ' '%(size_gb)s, new count = %(count)s.', {'host': host, 'size_gb': current_size, 'count': current_count}) # It is only possible to not free up enough gb, we will always be able # to free enough count. This is because 0 means unlimited which means # it is guaranteed to be >0 if limited, and we can always delete down # to 0. if self.max_cache_size_gb > 0: if current_size > self.max_cache_size_gb > 0: LOG.warning(_LW('Image-volume cache for host %(host)s does ' 'not have enough space (GB).'), {'host': host}) return False return True def _notify_cache_hit(self, context, image_id, host): self._notify_cache_action(context, image_id, host, 'hit') def _notify_cache_miss(self, context, image_id, host): self._notify_cache_action(context, image_id, host, 'miss') def _notify_cache_eviction(self, context, image_id, host): self._notify_cache_action(context, image_id, host, 'evict') def _notify_cache_action(self, context, image_id, host, action): data = { 'image_id': image_id, 'host': host, } LOG.debug('ImageVolumeCache notification: action=%(action)s' ' data=%(data)s.', {'action': action, 'data': data}) self.notifier.info(context, 'image_volume_cache.%s' % action, data) def _delete_image_volume(self, context, cache_entry): """Delete a volume and remove cache entry.""" volume_ref = self.db.volume_get(context, cache_entry['volume_id']) # Delete will evict the cache entry. self.volume_api.delete(context, volume_ref) def _get_image_volume_name(self, image_id): return 'image-volume-' + image_id def _should_update_entry(self, cache_entry, image_meta): """Ensure that the cache entry image data is still valid.""" image_updated_utc = (image_meta['updated_at'] .astimezone(timezone('UTC'))) cache_updated_utc = (cache_entry['image_updated_at'] .replace(tzinfo=timezone('UTC'))) LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, ' 'requested image updated_at = %(image_utc)s.', {'entry_utc': six.text_type(cache_updated_utc), 'image_utc': six.text_type(image_updated_utc)}) return image_updated_utc != cache_updated_utc def _entry_to_str(self, cache_entry): return six.text_type({ 'id': cache_entry['id'], 'image_id': cache_entry['image_id'], 'volume_id': cache_entry['volume_id'], 'host': cache_entry['host'], 'size': cache_entry['size'], 'image_updated_at': cache_entry['image_updated_at'], 'last_used': cache_entry['last_used'], }) cinder-8.0.0/cinder/flow_utils.py0000664000567000056710000000560112701406250020130 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging # For more information please visit: https://wiki.openstack.org/wiki/TaskFlow from taskflow.listeners import base from taskflow.listeners import logging as logging_listener from taskflow import task from cinder import exception LOG = logging.getLogger(__name__) def _make_task_name(cls, addons=None): """Makes a pretty name for a task class.""" base_name = ".".join([cls.__module__, cls.__name__]) extra = '' if addons: extra = ';%s' % (", ".join([str(a) for a in addons])) return base_name + extra class CinderTask(task.Task): """The root task class for all cinder tasks. It automatically names the given task using the module and class that implement the given task as the task name. """ def __init__(self, addons=None, **kwargs): super(CinderTask, self).__init__(self.make_name(addons), **kwargs) @classmethod def make_name(cls, addons=None): return _make_task_name(cls, addons) class DynamicLogListener(logging_listener.DynamicLoggingListener): """This is used to attach to taskflow engines while they are running. It provides a bunch of useful features that expose the actions happening inside a taskflow engine, which can be useful for developers for debugging, for operations folks for monitoring and tracking of the resource actions and more... """ #: Exception is an excepted case, don't include traceback in log if fails. _NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError) def __init__(self, engine, task_listen_for=base.DEFAULT_LISTEN_FOR, flow_listen_for=base.DEFAULT_LISTEN_FOR, retry_listen_for=base.DEFAULT_LISTEN_FOR, logger=LOG): super(DynamicLogListener, self).__init__( engine, task_listen_for=task_listen_for, flow_listen_for=flow_listen_for, retry_listen_for=retry_listen_for, log=logger) def _format_failure(self, fail): if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None: exc_info = None exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False)) return (exc_info, exc_details) else: return super(DynamicLogListener, self)._format_failure(fail) cinder-8.0.0/cinder/brick/0000775000567000056710000000000012701406543016464 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/brick/__init__.py0000664000567000056710000000000012701406250020556 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/brick/README.txt0000664000567000056710000000026112701406250020154 0ustar jenkinsjenkins00000000000000Brick has been migrated to a new standalone pypi library called os-brick. We are leaving the local_dev directory here for the time being until we can migrate it to a new home. cinder-8.0.0/cinder/brick/local_dev/0000775000567000056710000000000012701406543020414 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/brick/local_dev/__init__.py0000664000567000056710000000000012701406250022506 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/brick/local_dev/lvm.py0000664000567000056710000007420112701406257021572 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM class for performing LVM operations. """ import math import os import re from os_brick import executor from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils from six import moves from cinder import exception from cinder.i18n import _LE, _LI from cinder import utils LOG = logging.getLogger(__name__) class LVM(executor.Executor): """LVM object to enable various LVM related operations.""" LVM_CMD_PREFIX = ['env', 'LC_ALL=C'] def __init__(self, vg_name, root_helper, create_vg=False, physical_volumes=None, lvm_type='default', executor=putils.execute, lvm_conf=None): """Initialize the LVM object. The LVM object is based on an LVM VolumeGroup, one instantiation for each VolumeGroup you have/use. :param vg_name: Name of existing VG or VG to create :param root_helper: Execution root_helper method to use :param create_vg: Indicates the VG doesn't exist and we want to create it :param physical_volumes: List of PVs to build VG on :param lvm_type: VG and Volume type (default, or thin) :param executor: Execute method to use, None uses common/processutils """ super(LVM, self).__init__(execute=executor, root_helper=root_helper) self.vg_name = vg_name self.pv_list = [] self.vg_size = 0.0 self.vg_free_space = 0.0 self.vg_lv_count = 0 self.vg_uuid = None self.vg_thin_pool = None self.vg_thin_pool_size = 0.0 self.vg_thin_pool_free_space = 0.0 self._supports_snapshot_lv_activation = None self._supports_lvchange_ignoreskipactivation = None self.vg_provisioned_capacity = 0.0 # Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX # before the first LVM command is executed, and use the directory # where the specified lvm_conf file is located as the value. if lvm_conf and os.path.isfile(lvm_conf): lvm_sys_dir = os.path.dirname(lvm_conf) LVM.LVM_CMD_PREFIX = ['env', 'LC_ALL=C', 'LVM_SYSTEM_DIR=' + lvm_sys_dir] if create_vg and physical_volumes is not None: self.pv_list = physical_volumes try: self._create_vg(physical_volumes) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating Volume Group')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) if self._vg_exists() is False: LOG.error(_LE('Unable to locate Volume Group %s'), vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name) # NOTE: we assume that the VG has been activated outside of Cinder if lvm_type == 'thin': pool_name = "%s-pool" % self.vg_name if self.get_volume(pool_name) is None: try: self.create_thin_pool(pool_name) except putils.ProcessExecutionError: # Maybe we just lost the race against another copy of # this driver being in init in parallel - e.g. # cinder-volume and cinder-backup starting in parallel if self.get_volume(pool_name) is None: raise self.vg_thin_pool = pool_name self.activate_lv(self.vg_thin_pool) self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) def _vg_exists(self): """Simple check to see if VG exists. :returns: True if vg specified in object exists, else False """ exists = False cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'name', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: volume_groups = out.split() if self.vg_name in volume_groups: exists = True return exists def _create_vg(self, pv_list): cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) def _get_vg_uuid(self): cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'uuid', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: return out.split() else: return [] def _get_thin_pool_free_space(self, vg_name, thin_pool_name): """Returns available thin pool free space. :param vg_name: the vg where the pool is placed :param thin_pool_name: the thin pool to gather info for :returns: Free space in GB (float), calculated using data_percent """ cmd = LVM.LVM_CMD_PREFIX +\ ['lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix'] # NOTE(gfidente): data_percent only applies to some types of LV so we # make sure to append the actual thin pool name cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) free_space = 0.0 try: (out, err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: out = out.strip() data = out.split(':') pool_size = float(data[0]) data_percent = float(data[1]) consumed_space = pool_size / 100 * data_percent free_space = pool_size - consumed_space free_space = round(free_space, 2) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error querying thin pool about data_percent')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) return free_space @staticmethod def get_lvm_version(root_helper): """Static method to get LVM version from system. :param root_helper: root_helper to use for execute :returns: version 3-tuple """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version'] (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) lines = out.split('\n') for line in lines: if 'LVM version' in line: version_list = line.split() # NOTE(gfidente): version is formatted as follows: # major.minor.patchlevel(library API version)[-customisation] version = version_list[2] version_filter = r"(\d+)\.(\d+)\.(\d+).*" r = re.search(version_filter, version) version_tuple = tuple(map(int, r.group(1, 2, 3))) return version_tuple @staticmethod def supports_thin_provisioning(root_helper): """Static method to check for thin LVM support on a system. :param root_helper: root_helper to use for execute :returns: True if supported, False otherwise """ return LVM.get_lvm_version(root_helper) >= (2, 2, 95) @property def supports_snapshot_lv_activation(self): """Property indicating whether snap activation changes are supported. Check for LVM version >= 2.02.91. (LVM2 git: e8a40f6 Allow to activate snapshot) :returns: True/False indicating support """ if self._supports_snapshot_lv_activation is not None: return self._supports_snapshot_lv_activation self._supports_snapshot_lv_activation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 91)) return self._supports_snapshot_lv_activation @property def supports_lvchange_ignoreskipactivation(self): """Property indicating whether lvchange can ignore skip activation. Check for LVM version >= 2.02.99. (LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange) """ if self._supports_lvchange_ignoreskipactivation is not None: return self._supports_lvchange_ignoreskipactivation self._supports_lvchange_ignoreskipactivation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 99)) return self._supports_lvchange_ignoreskipactivation @staticmethod def get_lv_info(root_helper, vg_name=None, lv_name=None): """Retrieve info about LVs (all, in a VG, or a single LV). :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :param lv_name: optional, gathers info for only the specified LV :returns: List of Dictionaries with LV info """ cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size', '--nosuffix'] if lv_name is not None and vg_name is not None: cmd.append("%s/%s" % (vg_name, lv_name)) elif vg_name is not None: cmd.append(vg_name) try: (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) except putils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(reraise=True) as ctx: if "not found" in err.stderr or "Failed to find" in err.stderr: ctx.reraise = False LOG.info(_LI("Logical Volume not found when querying " "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"), {'vg': vg_name, 'lv': lv_name}) out = None lv_list = [] if out is not None: volumes = out.split() iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101 for vg, name, size in iterator: lv_list.append({"vg": vg, "name": name, "size": size}) return lv_list def get_volumes(self, lv_name=None): """Get all LV's associated with this instantiation (VG). :returns: List of Dictionaries with LV info """ return self.get_lv_info(self._root_helper, self.vg_name, lv_name) def get_volume(self, name): """Get reference object of volume specified by name. :returns: dict representation of Logical Volume if exists """ ref_list = self.get_volumes(name) for r in ref_list: if r['name'] == name: return r return None @staticmethod def get_all_physical_volumes(root_helper, vg_name=None): """Static method to get all PVs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with PV info """ field_sep = '|' cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size,free', '--separator', field_sep, '--nosuffix'] (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) pvs = out.split() if vg_name is not None: pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]] pv_list = [] for pv in pvs: fields = pv.split(field_sep) pv_list.append({'vg': fields[0], 'name': fields[1], 'size': float(fields[2]), 'available': float(fields[3])}) return pv_list def get_physical_volumes(self): """Get all PVs associated with this instantiation (VG). :returns: List of Dictionaries with PV info """ self.pv_list = self.get_all_physical_volumes(self._root_helper, self.vg_name) return self.pv_list @staticmethod def get_all_volume_groups(root_helper, vg_name=None): """Static method to get all VGs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with VG info """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '--unit=g', '-o', 'name,size,free,lv_count,uuid', '--separator', ':', '--nosuffix'] if vg_name is not None: cmd.append(vg_name) (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) vg_list = [] if out is not None: vgs = out.split() for vg in vgs: fields = vg.split(':') vg_list.append({'name': fields[0], 'size': float(fields[1]), 'available': float(fields[2]), 'lv_count': int(fields[3]), 'uuid': fields[4]}) return vg_list def update_volume_group_info(self): """Update VG info for this instantiation. Used to update member fields of object and provide a dict of info for caller. :returns: Dictionaries of VG info """ vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) if len(vg_list) != 1: LOG.error(_LE('Unable to find VG: %s'), self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name) self.vg_size = float(vg_list[0]['size']) self.vg_free_space = float(vg_list[0]['available']) self.vg_lv_count = int(vg_list[0]['lv_count']) self.vg_uuid = vg_list[0]['uuid'] total_vols_size = 0.0 if self.vg_thin_pool is not None: # NOTE(xyang): If providing only self.vg_name, # get_lv_info will output info on the thin pool and all # individual volumes. # get_lv_info(self._root_helper, 'stack-vg') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg # stack-vg stack-pool 9.51 # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 # # If providing both self.vg_name and self.vg_thin_pool, # get_lv_info will output only info on the thin pool, but not # individual volumes. # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg/stack-pool # stack-vg stack-pool 9.51 # # We need info on both the thin pool and the volumes, # therefore we should provide only self.vg_name, but not # self.vg_thin_pool here. for lv in self.get_lv_info(self._root_helper, self.vg_name): lvsize = lv['size'] # get_lv_info runs "lvs" command with "--nosuffix". # This removes "g" from "1.00g" and only outputs "1.00". # Running "lvs" command without "--nosuffix" will output # "1.00g" if "g" is the unit. # Remove the unit if it is in lv['size']. if not lv['size'][-1].isdigit(): lvsize = lvsize[:-1] if lv['name'] == self.vg_thin_pool: self.vg_thin_pool_size = lvsize tpfs = self._get_thin_pool_free_space(self.vg_name, self.vg_thin_pool) self.vg_thin_pool_free_space = tpfs else: total_vols_size = total_vols_size + float(lvsize) total_vols_size = round(total_vols_size, 2) self.vg_provisioned_capacity = total_vols_size def _calculate_thin_pool_size(self): """Calculates the correct size for a thin pool. Ideally we would use 100% of the containing volume group and be done. But the 100%VG notation to lvcreate is not implemented and thus cannot be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347 Further, some amount of free space must remain in the volume group for metadata for the contained logical volumes. The exact amount depends on how much volume sharing you expect. :returns: An lvcreate-ready string for the number of calculated bytes. """ # make sure volume group information is current self.update_volume_group_info() # leave 5% free for metadata return "%sg" % (self.vg_free_space * 0.95) def create_thin_pool(self, name=None, size_str=None): """Creates a thin provisioning pool for this VG. The syntax here is slightly different than the default lvcreate -T, so we'll just write a custom cmd here and do it. :param name: Name to use for pool, default is "-pool" :param size_str: Size to allocate for pool, default is entire VG :returns: The size string passed to the lvcreate command """ if not self.supports_thin_provisioning(self._root_helper): LOG.error(_LE('Requested to setup thin provisioning, ' 'however current LVM version does not ' 'support it.')) return None if name is None: name = '%s-pool' % self.vg_name vg_pool_name = '%s/%s' % (self.vg_name, name) if not size_str: size_str = self._calculate_thin_pool_size() cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str, vg_pool_name] LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of " "total %(free)sg", {'pool': vg_pool_name, 'size': size_str, 'free': self.vg_free_space}) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) self.vg_thin_pool = name return size_str def create_volume(self, name, size_str, lv_type='default', mirror_count=0): """Creates a logical volume on the object's VG. :param name: Name to use when creating Logical Volume :param size_str: Size to use when creating Logical Volume :param lv_type: Type of Volume (default or thin) :param mirror_count: Use LVM mirroring with specified count """ if lv_type == 'thin': pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] else: cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name, '-L', size_str] if mirror_count > 0: cmd.extend(['-m', mirror_count, '--nosync', '--mirrorlog', 'mirrored']) terras = int(size_str[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd.extend(['-R', str(rsize)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating Volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise @utils.retry(putils.ProcessExecutionError) def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"), source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating snapshot')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def _mangle_lv_name(self, name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not name.startswith('snapshot'): return name return '_' + name def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error deactivating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def activate_lv(self, name, is_snapshot=False, permanent=False): """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate :param is_snapshot: whether LV is a snapshot :param permanent: whether we should drop skipactivation flag :raises: putils.ProcessExecutionError """ # This is a no-op if requested for a snapshot on a version # of LVM that doesn't support snapshot activation. # (Assume snapshot LV is always active.) if is_snapshot and not self.supports_snapshot_lv_activation: return lv_path = self.vg_name + '/' + self._mangle_lv_name(name) # Must pass --yes to activate both the snap LV and its origin LV. # Otherwise lvchange asks if you would like to do this interactively, # and fails. cmd = ['lvchange', '-a', 'y', '--yes'] if self.supports_lvchange_ignoreskipactivation: cmd.append('-K') # If permanent=True is specified, drop the skipactivation flag in # order to make this LV automatically activated after next reboot. if permanent: cmd += ['-k', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error activating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise @utils.retry(putils.ProcessExecutionError) def delete(self, name): """Delete logical volume or snapshot. :param name: Name of LV to delete """ def run_udevadm_settle(): self._execute('udevadm', 'settle', root_helper=self._root_helper, run_as_root=True, check_exit_code=False) # LV removal seems to be a race with other writers or udev in # some cases (see LP #1270192), so we enable retry deactivation LVM_CONFIG = 'activation { retry_deactivation = 1} ' try: self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.debug('Error reported running lvremove: CMD: %(command)s, ' 'RESPONSE: %(response)s', {'command': err.cmd, 'response': err.stderr}) LOG.debug('Attempting udev settle and retry of lvremove...') run_udevadm_settle() # The previous failing lvremove -f might leave behind # suspended devices; when lvmetad is not available, any # further lvm command will block forever. # Therefore we need to skip suspended devices on retry. LVM_CONFIG += 'devices { ignore_suspended_devices = 1}' self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) LOG.debug('Successfully deleted volume: %s after ' 'udev settle.', name) def revert(self, snapshot_name): """Revert an LV from snapshot. :param snapshot_name: Name of snapshot to revert """ self._execute('lvconvert', '--merge', snapshot_name, root_helper=self._root_helper, run_as_root=True) def lv_has_snapshot(self, name): cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out: out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): return True return False def extend_volume(self, lv_name, new_size): """Extend the size of an existing volume.""" # Volumes with snaps have attributes 'o' or 'O' and will be # deactivated, but Thin Volumes with snaps have attribute 'V' # and won't be deactivated because the lv_has_snapshot method looks # for 'o' or 'O' if self.lv_has_snapshot(lv_name): self.deactivate_lv(lv_name) try: cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size, '%s/%s' % (self.vg_name, lv_name)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error extending Volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def vg_mirror_free_space(self, mirror_count): free_capacity = 0.0 disks = [] for pv in self.pv_list: disks.append(float(pv['available'])) while True: disks = sorted([a for a in disks if a > 0.0], reverse=True) if len(disks) <= mirror_count: break # consume the smallest disk disk = disks[-1] disks = disks[:-1] # match extents for each mirror on the largest disks for index in list(range(mirror_count)): disks[index] -= disk free_capacity += disk return free_capacity def vg_mirror_size(self, mirror_count): return (self.vg_free_space / (mirror_count + 1)) def rename_volume(self, lv_name, new_name): """Change the name of an existing volume.""" try: self._execute('lvrename', self.vg_name, lv_name, new_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error renaming logical volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise cinder-8.0.0/cinder/consistencygroup/0000775000567000056710000000000012701406543021010 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/consistencygroup/__init__.py0000664000567000056710000000166312701406250023122 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.transfer import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF API = importutils.import_class( CONF.consistencygroup_api_class) cinder-8.0.0/cinder/consistencygroup/api.py0000664000567000056710000010203712701406250022131 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to consistency groups. """ import functools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from cinder.db import base from cinder import exception from cinder.i18n import _, _LE, _LW from cinder import objects from cinder.objects import fields as c_fields import cinder.policy from cinder import quota from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as vol_utils from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) CGQUOTAS = quota.CGQUOTAS VALID_REMOVE_VOL_FROM_CG_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') VALID_ADD_VOL_TO_CG_STATUS = ( 'available', 'in-use') def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution. This decorator requires the first 3 args of the wrapped function to be (self, context, consistencygroup) """ @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped def check_policy(context, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } target_obj = target_obj.fields if target_obj else {} target.update(target_obj) _action = 'consistencygroup:%s' % action cinder.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the volume manager for consistency groups.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zone_names = () self.volume_api = volume_api.API() super(API, self).__init__(db_driver) def _valid_availability_zone(self, availability_zone): if availability_zone in self.availability_zone_names: return True if CONF.storage_availability_zone == availability_zone: return True azs = self.volume_api.list_availability_zones() self.availability_zone_names = [az['name'] for az in azs] return availability_zone in self.availability_zone_names def _extract_availability_zone(self, availability_zone): if availability_zone is None: if CONF.default_availability_zone: availability_zone = CONF.default_availability_zone else: # For backwards compatibility use the storage_availability_zone availability_zone = CONF.storage_availability_zone valid = self._valid_availability_zone(availability_zone) if not valid: msg = _LW( "Availability zone '%s' is invalid") % (availability_zone) LOG.warning(msg) raise exception.InvalidInput(reason=msg) return availability_zone def create(self, context, name, description, cg_volume_types, availability_zone=None): check_policy(context, 'create') volume_type_list = None volume_type_list = cg_volume_types.split(',') req_volume_types = [] # NOTE: Admin context is required to get extra_specs of volume_types. req_volume_types = (self.db.volume_types_get_by_name_or_id( context.elevated(), volume_type_list)) req_volume_type_ids = "" for voltype in req_volume_types: req_volume_type_ids = ( req_volume_type_ids + voltype.get('id') + ",") if len(req_volume_type_ids) == 0: req_volume_type_ids = None availability_zone = self._extract_availability_zone(availability_zone) kwargs = {'user_id': context.user_id, 'project_id': context.project_id, 'availability_zone': availability_zone, 'status': c_fields.ConsistencyGroupStatus.CREATING, 'name': name, 'description': description, 'volume_type_id': req_volume_type_ids} group = None try: group = objects.ConsistencyGroup(context=context, **kwargs) group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" " %s."), name) request_spec_list = [] filter_properties_list = [] for req_volume_type in req_volume_types: request_spec = {'volume_type': req_volume_type.copy(), 'consistencygroup_id': group.id} filter_properties = {} request_spec_list.append(request_spec) filter_properties_list.append(filter_properties) # Update quota for consistencygroups self.update_quota(context, group, 1) self._cast_create_consistencygroup(context, group, request_spec_list, filter_properties_list) return group def create_from_src(self, context, name, description=None, cgsnapshot_id=None, source_cgid=None): check_policy(context, 'create') cgsnapshot = None orig_cg = None if cgsnapshot_id: try: cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id) except exception.CgSnapshotNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("CG snapshot %(cgsnap)s not found when " "creating consistency group %(cg)s from " "source."), {'cg': name, 'cgsnap': cgsnapshot_id}) else: orig_cg = cgsnapshot.consistencygroup source_cg = None if source_cgid: try: source_cg = objects.ConsistencyGroup.get_by_id(context, source_cgid) except exception.ConsistencyGroupNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("Source CG %(source_cg)s not found when " "creating consistency group %(cg)s from " "source."), {'cg': name, 'source_cg': source_cgid}) kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'status': c_fields.ConsistencyGroupStatus.CREATING, 'name': name, 'description': description, 'cgsnapshot_id': cgsnapshot_id, 'source_cgid': source_cgid, } if orig_cg: kwargs['volume_type_id'] = orig_cg.volume_type_id kwargs['availability_zone'] = orig_cg.availability_zone kwargs['host'] = orig_cg.host if source_cg: kwargs['volume_type_id'] = source_cg.volume_type_id kwargs['availability_zone'] = source_cg.availability_zone kwargs['host'] = source_cg.host group = None try: group = objects.ConsistencyGroup(context=context, **kwargs) group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" " %(cg)s from cgsnapshot %(cgsnap)s."), {'cg': name, 'cgsnap': cgsnapshot_id}) # Update quota for consistencygroups self.update_quota(context, group, 1) if not group.host: msg = _("No host to create consistency group %s.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) if cgsnapshot: self._create_cg_from_cgsnapshot(context, group, cgsnapshot) elif source_cg: self._create_cg_from_source_cg(context, group, source_cg) return group def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot): try: snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) if not snapshots: msg = _("Cgsnahost is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for snapshot in snapshots: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['cgsnapshot'] = cgsnapshot kwargs['consistencygroup'] = group kwargs['snapshot'] = snapshot volume_type_id = snapshot.volume_type_id if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since cgsnapshot is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, snapshot.volume_size, None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating volume " "entry from snapshot in the process of " "creating consistency group %(group)s " "from cgsnapshot %(cgsnap)s."), {'group': group.id, 'cgsnap': cgsnapshot.id}) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from cgsnapshot " "%(cgsnap)s."), {'group': group.id, 'cgsnap': cgsnapshot.id}) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.get('host')}) self.volume_rpcapi.create_consistencygroup_from_src( context, group, cgsnapshot) def _create_cg_from_source_cg(self, context, group, source_cg): try: source_vols = self.db.volume_get_all_by_group(context, source_cg.id) if not source_vols: msg = _("Source CG is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for source_vol in source_vols: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['source_cg'] = source_cg kwargs['consistencygroup'] = group kwargs['source_volume'] = source_vol volume_type_id = source_vol.get('volume_type_id') if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since source_cg is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, source_vol['size'], None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating cloned " "volume in the process of creating " "consistency group %(group)s from " "source CG %(source_cg)s."), {'group': group.id, 'source_cg': source_cg.id}) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from source CG " "%(source_cg)s."), {'group': group.id, 'source_cg': source_cg.id}) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.host}) self.volume_rpcapi.create_consistencygroup_from_src(context, group, None, source_cg) def _cast_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): try: for request_spec in request_spec_list: volume_type = request_spec.get('volume_type', None) volume_type_id = None if volume_type: volume_type_id = volume_type.get('id', None) specs = {} if volume_type_id: qos_specs = volume_types.get_volume_type_qos_specs( volume_type_id) specs = qos_specs['qos_specs'] if not specs: # to make sure we don't pass empty dict specs = None volume_properties = { 'size': 0, # Need to populate size for the scheduler 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'encryption_key_id': request_spec.get('encryption_key_id', None), 'display_description': request_spec.get('description', None), 'display_name': request_spec.get('name', None), 'volume_type_id': volume_type_id, } request_spec['volume_properties'] = volume_properties request_spec['qos_specs'] = specs except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when building " "request spec list for consistency group " "%s."), group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. self.scheduler_rpcapi.create_consistencygroup( context, CONF.volume_topic, group, request_spec_list=request_spec_list, filter_properties_list=filter_properties_list) def update_quota(self, context, group, num, project_id=None): reserve_opts = {'consistencygroups': num} try: reservations = CGQUOTAS.reserve(context, project_id=project_id, **reserve_opts) if reservations: CGQUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Failed to update quota for " "consistency group %s."), group.id) @wrap_check_policy def delete(self, context, group, force=False): if not group.host: self.update_quota(context, group, -1, group.project_id) LOG.debug("No host for consistency group %s. Deleting from " "the database.", group.id) group.destroy() return if not force and group.status not in ( [c_fields.ConsistencyGroupStatus.AVAILABLE, c_fields.ConsistencyGroupStatus.ERROR]): msg = _("Consistency group status must be available or error, " "but current status is: %s") % group.status raise exception.InvalidConsistencyGroup(reason=msg) cgsnapshots = objects.CGSnapshotList.get_all_by_group( context.elevated(), group.id) if cgsnapshots: msg = _("Consistency group %s still has dependent " "cgsnapshots.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) volumes = self.db.volume_get_all_by_group(context.elevated(), group.id) if volumes and not force: msg = _("Consistency group %s still contains volumes. " "The force flag is required to delete it.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) for volume in volumes: if volume['attach_status'] == "attached": msg = _("Volume in consistency group %s is attached. " "Need to detach first.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) snapshots = objects.SnapshotList.get_all_for_volume(context, volume['id']) if snapshots: msg = _("Volume in consistency group still has " "dependent snapshots.") LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) group.status = c_fields.ConsistencyGroupStatus.DELETING group.terminated_at = timeutils.utcnow() group.save() self.volume_rpcapi.delete_consistencygroup(context, group) def update(self, context, group, name, description, add_volumes, remove_volumes): """Update consistency group.""" if group.status != c_fields.ConsistencyGroupStatus.AVAILABLE: msg = _("Consistency group status must be available, " "but current status is: %s.") % group.status raise exception.InvalidConsistencyGroup(reason=msg) add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes = add_volumes.strip(',') add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes = remove_volumes.strip(',') remove_volumes_list = remove_volumes.split(',') invalid_uuids = [] for uuid in add_volumes_list: if uuid in remove_volumes_list: invalid_uuids.append(uuid) if invalid_uuids: msg = _("UUIDs %s are in both add and remove volume " "list.") % invalid_uuids raise exception.InvalidVolume(reason=msg) volumes = self.db.volume_get_all_by_group(context, group.id) # Validate name. if not name or name == group.name: name = None # Validate description. if not description or description == group.description: description = None # Validate volumes in add_volumes and remove_volumes. add_volumes_new = "" remove_volumes_new = "" if add_volumes_list: add_volumes_new = self._validate_add_volumes( context, volumes, add_volumes_list, group) if remove_volumes_list: remove_volumes_new = self._validate_remove_volumes( volumes, remove_volumes_list, group) if (not name and not description and not add_volumes_new and not remove_volumes_new): msg = (_("Cannot update consistency group %(group_id)s " "because no valid name, description, add_volumes, " "or remove_volumes were provided.") % {'group_id': group.id}) raise exception.InvalidConsistencyGroup(reason=msg) fields = {'updated_at': timeutils.utcnow()} # Update name and description in db now. No need to # to send them over through an RPC call. if name: fields['name'] = name if description: fields['description'] = description if not add_volumes_new and not remove_volumes_new: # Only update name or description. Set status to available. fields['status'] = 'available' else: fields['status'] = 'updating' group.update(fields) group.save() # Do an RPC call only if the update request includes # adding/removing volumes. add_volumes_new and remove_volumes_new # are strings of volume UUIDs separated by commas with no spaces # in between. if add_volumes_new or remove_volumes_new: self.volume_rpcapi.update_consistencygroup( context, group, add_volumes=add_volumes_new, remove_volumes=remove_volumes_new) def _validate_remove_volumes(self, volumes, remove_volumes_list, group): # Validate volumes in remove_volumes. remove_volumes_new = "" for volume in volumes: if volume['id'] in remove_volumes_list: if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because volume " "is in an invalid state: %(status)s. Valid " "states are: %(valid)s.") % {'volume_id': volume['id'], 'group_id': group.id, 'status': volume['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # Volume currently in CG. It will be removed from CG. if remove_volumes_new: remove_volumes_new += "," remove_volumes_new += volume['id'] for rem_vol in remove_volumes_list: if rem_vol not in remove_volumes_new: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because it " "is not in the group.") % {'volume_id': rem_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return remove_volumes_new def _validate_add_volumes(self, context, volumes, add_volumes_list, group): add_volumes_new = "" for volume in volumes: if volume['id'] in add_volumes_list: # Volume already in CG. Remove from add_volumes. add_volumes_list.remove(volume['id']) for add_vol in add_volumes_list: try: add_vol_ref = self.db.volume_get(context, add_vol) except exception.VolumeNotFound: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume cannot be " "found.") % {'volume_id': add_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) orig_group = add_vol_ref.get('consistencygroup_id', None) if orig_group: # If volume to be added is already in the group to be updated, # it should have been removed from the add_volumes_list in the # beginning of this function. If we are here, it means it is # in a different group. msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because it is already in " "consistency group %(orig_group)s.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'orig_group': orig_group}) raise exception.InvalidVolume(reason=msg) if add_vol_ref: add_vol_type_id = add_vol_ref.get('volume_type_id', None) if not add_vol_type_id: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because it has no volume " "type.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) if add_vol_type_id not in group.volume_type_id: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume type " "%(volume_type)s is not supported by the " "group.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'volume_type': add_vol_type_id}) raise exception.InvalidVolume(reason=msg) if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS): msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume is in an " "invalid state: %(status)s. Valid states are: " "%(valid)s.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # group.host and add_vol_ref['host'] are in this format: # 'host@backend#pool'. Extract host (host@backend) before # doing comparison. vol_host = vol_utils.extract_host(add_vol_ref['host']) group_host = vol_utils.extract_host(group.host) if group_host != vol_host: raise exception.InvalidVolume( reason=_("Volume is not local to this node.")) # Volume exists. It will be added to CG. if add_volumes_new: add_volumes_new += "," add_volumes_new += add_vol_ref['id'] else: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume does not exist.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return add_volumes_new def get(self, context, group_id): group = objects.ConsistencyGroup.get_by_id(context, group_id) check_policy(context, 'get', group) return group def get_all(self, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): check_policy(context, 'get_all') if filters is None: filters = {} if filters: LOG.debug("Searching by: %s", filters) if (context.is_admin and 'all_tenants' in filters): del filters['all_tenants'] groups = objects.ConsistencyGroupList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) else: groups = objects.ConsistencyGroupList.get_all_by_project( context, context.project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return groups def create_cgsnapshot(self, context, group, name, description): return self._create_cgsnapshot(context, group, name, description) def _create_cgsnapshot(self, context, group, name, description): volumes = self.db.volume_get_all_by_group( context.elevated(), group.id) if not volumes: msg = _("Consistency group is empty. No cgsnapshot " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) options = {'consistencygroup_id': group.id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'name': name, 'description': description} cgsnapshot = None cgsnapshot_id = None try: cgsnapshot = objects.CGSnapshot(context, **options) cgsnapshot.create() cgsnapshot_id = cgsnapshot.id snap_name = cgsnapshot.name snap_desc = cgsnapshot.description self.volume_api.create_snapshots_in_db( context, volumes, snap_name, snap_desc, True, cgsnapshot_id) except Exception: with excutils.save_and_reraise_exception(): try: if cgsnapshot: cgsnapshot.destroy() finally: LOG.error(_LE("Error occurred when creating cgsnapshot" " %s."), cgsnapshot_id) self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot) return cgsnapshot def delete_cgsnapshot(self, context, cgsnapshot, force=False): if cgsnapshot.status not in ["available", "error"]: msg = _("Cgsnapshot status must be available or error") raise exception.InvalidCgSnapshot(reason=msg) cgsnapshot.update({'status': 'deleting'}) cgsnapshot.save() self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot) def update_cgsnapshot(self, context, cgsnapshot, fields): cgsnapshot.update(fields) cgsnapshot.save() def get_cgsnapshot(self, context, cgsnapshot_id): check_policy(context, 'get_cgsnapshot') cgsnapshots = objects.CGSnapshot.get_by_id(context, cgsnapshot_id) return cgsnapshots def get_all_cgsnapshots(self, context, search_opts=None): check_policy(context, 'get_all_cgsnapshots') search_opts = search_opts or {} if context.is_admin and 'all_tenants' in search_opts: # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] cgsnapshots = objects.CGSnapshotList.get_all(context, search_opts) else: cgsnapshots = objects.CGSnapshotList.get_all_by_project( context.elevated(), context.project_id, search_opts) return cgsnapshots cinder-8.0.0/cinder/common/0000775000567000056710000000000012701406543016662 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/common/constants.py0000664000567000056710000000127512701406250021250 0ustar jenkinsjenkins00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The maximum value a signed INT type may have DB_MAX_INT = 0x7FFFFFFF cinder-8.0.0/cinder/common/__init__.py0000664000567000056710000000000012701406250020754 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/common/config.py0000664000567000056710000002505412701406257020511 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # Copyright 2013 NTT corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import socket from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import cors from oslo_utils import netutils from cinder.i18n import _ CONF = cfg.CONF logging.register_options(CONF) core_opts = [ cfg.StrOpt('state_path', default='/var/lib/cinder', deprecated_name='pybasedir', help="Top-level directory for maintaining cinder's state"), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', default=netutils.get_my_ipv4(), help='IP address of this host'), cfg.StrOpt('glance_host', default='$my_ip', help='Default glance host name or IP'), cfg.IntOpt('glance_port', default=9292, min=1, max=65535, help='Default glance port'), cfg.ListOpt('glance_api_servers', default=['$glance_host:$glance_port'], help='A list of the URLs of glance API servers available to ' 'cinder ([http[s]://][hostname|ip]:port). If protocol ' 'is not specified it defaults to http.'), cfg.IntOpt('glance_api_version', default=1, help='Version of the glance API to use'), cfg.IntOpt('glance_num_retries', default=0, help='Number retries when downloading an image from glance'), cfg.BoolOpt('glance_api_insecure', default=False, help='Allow to perform insecure SSL (https) requests to ' 'glance'), cfg.BoolOpt('glance_api_ssl_compression', default=False, help='Enables or disables negotiation of SSL layer ' 'compression. In some cases disabling compression ' 'can improve data throughput, such as when high ' 'network bandwidth is available and you use ' 'compressed image formats like qcow2.'), cfg.StrOpt('glance_ca_certificates_file', help='Location of ca certificates file to use for glance ' 'client requests.'), cfg.IntOpt('glance_request_timeout', help='http/https timeout value for glance operations. If no ' 'value (None) is supplied here, the glanceclient default ' 'value is used.'), cfg.StrOpt('scheduler_topic', default='cinder-scheduler', help='The topic that scheduler nodes listen on'), cfg.StrOpt('volume_topic', default='cinder-volume', help='The topic that volume nodes listen on'), cfg.StrOpt('backup_topic', default='cinder-backup', help='The topic that volume backup nodes listen on'), cfg.BoolOpt('enable_v1_api', default=True, help=_("DEPRECATED: Deploy v1 of the Cinder API.")), cfg.BoolOpt('enable_v2_api', default=True, help=_("DEPRECATED: Deploy v2 of the Cinder API.")), cfg.BoolOpt('enable_v3_api', default=True, help=_("Deploy v3 of the Cinder API.")), cfg.BoolOpt('api_rate_limit', default=True, help='Enables or disables rate limit of the API.'), cfg.ListOpt('osapi_volume_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'volume_extension option with cinder.api.contrib.' 'select_extensions'), cfg.MultiStrOpt('osapi_volume_extension', default=['cinder.api.contrib.standard_extensions'], help='osapi volume extension to load'), cfg.StrOpt('volume_manager', default='cinder.volume.manager.VolumeManager', help='Full class name for the Manager for volume'), cfg.StrOpt('backup_manager', default='cinder.backup.manager.BackupManager', help='Full class name for the Manager for volume backup'), cfg.StrOpt('scheduler_manager', default='cinder.scheduler.manager.SchedulerManager', help='Full class name for the Manager for scheduler'), cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node. This can be an opaque identifier. ' 'It is not necessarily a host name, FQDN, or IP address.'), # NOTE(vish): default to nova for compatibility with nova installs cfg.StrOpt('storage_availability_zone', default='nova', help='Availability zone of this node'), cfg.StrOpt('default_availability_zone', help='Default availability zone for new volumes. If not set, ' 'the storage_availability_zone option value is used as ' 'the default for new volumes.'), cfg.BoolOpt('allow_availability_zone_fallback', default=False, help='If the requested Cinder availability zone is ' 'unavailable, fall back to the value of ' 'default_availability_zone, then ' 'storage_availability_zone, instead of failing.'), cfg.StrOpt('default_volume_type', help='Default volume type to use'), cfg.StrOpt('volume_usage_audit_period', default='month', help='Time period for which to generate volume usages. ' 'The options are hour, day, month, or year.'), cfg.StrOpt('rootwrap_config', default='/etc/cinder/rootwrap.conf', help='Path to the rootwrap configuration file to use for ' 'running commands as root'), cfg.BoolOpt('monkey_patch', default=False, help='Enable monkey patching'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules/decorators to monkey patch'), cfg.IntOpt('service_down_time', default=60, help='Maximum time since last check-in for a service to be ' 'considered up'), cfg.StrOpt('volume_api_class', default='cinder.volume.api.API', help='The full class name of the volume API class to use'), cfg.StrOpt('backup_api_class', default='cinder.backup.api.API', help='The full class name of the volume backup API class'), cfg.StrOpt('auth_strategy', default='keystone', choices=['noauth', 'keystone'], help='The strategy to use for auth. Supports noauth or ' 'keystone.'), cfg.ListOpt('enabled_backends', help='A list of backend names to use. These backend names ' 'should be backed by a unique [CONFIG] group ' 'with its options'), cfg.BoolOpt('no_snapshot_gb_quota', default=False, help='Whether snapshots count against gigabyte quota'), cfg.StrOpt('transfer_api_class', default='cinder.transfer.api.API', help='The full class name of the volume transfer API class'), cfg.StrOpt('replication_api_class', default='cinder.replication.api.API', help='The full class name of the volume replication API class'), cfg.StrOpt('consistencygroup_api_class', default='cinder.consistencygroup.api.API', help='The full class name of the consistencygroup API class'), cfg.StrOpt('os_privileged_user_name', help='OpenStack privileged account username. Used for requests ' 'to other services (such as Nova) that require an account ' 'with special rights.'), cfg.StrOpt('os_privileged_user_password', help='Password associated with the OpenStack privileged ' 'account.', secret=True), cfg.StrOpt('os_privileged_user_tenant', help='Tenant name associated with the OpenStack privileged ' 'account.'), cfg.StrOpt('os_privileged_user_auth_url', help='Auth URL associated with the OpenStack privileged ' 'account.'), ] CONF.register_opts(global_opts) def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID', 'X-Trace-Info', 'X-Trace-HMAC', 'OpenStack-API-Version'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'OpenStack-API-Version'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD'] ) cinder-8.0.0/cinder/common/sqlalchemyutils.py0000664000567000056710000001151512701406250022455 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of paginate query.""" from oslo_log import log as logging from six.moves import range import sqlalchemy from cinder import exception from cinder.i18n import _, _LW LOG = logging.getLogger(__name__) # copied from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None, offset=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidInput(reason='Invalid sort key') query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((model_attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) return query cinder-8.0.0/cinder/version.py0000664000567000056710000000160312701406250017424 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version CINDER_VENDOR = "OpenStack Foundation" CINDER_PRODUCT = "OpenStack Cinder" CINDER_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('cinder') version_string = version_info.version_string cinder-8.0.0/cinder/db/0000775000567000056710000000000012701406543015757 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/__init__.py0000664000567000056710000000144212701406250020064 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Cinder """ from cinder.db.api import * # noqa cinder-8.0.0/cinder/db/sqlalchemy/0000775000567000056710000000000012701406543020121 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/0000775000567000056710000000000012701406543022576 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/README0000664000567000056710000000032212701406250023446 0ustar jenkinsjenkins00000000000000This is a database migration repository. More information at: https://github.com/openstack/sqlalchemy-migrate Original project is no longer maintained at: http://code.google.com/p/sqlalchemy-migrate/ cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/0000775000567000056710000000000012701406543024446 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/057_placeholder.py0000664000567000056710000000142312701406250027670 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py0000664000567000056710000002234212701406250027665 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table def define_tables(meta): migrations = Table( 'migrations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('source_compute', String(length=255)), Column('dest_compute', String(length=255)), Column('dest_host', String(length=255)), Column('status', String(length=255)), Column('instance_uuid', String(length=255)), Column('old_instance_type_id', Integer), Column('new_instance_type_id', Integer), mysql_engine='InnoDB' ) services = Table( 'services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('availability_zone', String(length=255)), mysql_engine='InnoDB' ) sm_flavors = Table( 'sm_flavors', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('label', String(length=255)), Column('description', String(length=255)), mysql_engine='InnoDB' ) sm_backend_config = Table( 'sm_backend_config', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('flavor_id', Integer, ForeignKey('sm_flavors.id'), nullable=False), Column('sr_uuid', String(length=255)), Column('sr_type', String(length=255)), Column('config_params', String(length=2047)), mysql_engine='InnoDB' ) sm_volume = Table( 'sm_volume', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), ForeignKey('volumes.id'), primary_key=True, nullable=False), Column('backend_id', Integer, ForeignKey('sm_backend_config.id'), nullable=False), Column('vdi_uuid', String(length=255)), mysql_engine='InnoDB' ) snapshots = Table( 'snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False), Column('volume_id', String(length=36), nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('status', String(length=255)), Column('progress', String(length=255)), Column('volume_size', Integer), Column('scheduled_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), mysql_engine='InnoDB' ) volume_types = Table( 'volume_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), mysql_engine='InnoDB' ) volume_metadata = Table( 'volume_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB' ) volume_type_extra_specs = Table( 'volume_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_type_id', Integer, ForeignKey('volume_types.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB' ) volumes = Table( 'volumes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False), Column('ec2_id', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('instance_uuid', String(length=36)), Column('mountpoint', String(length=255)), Column('attach_time', String(length=255)), Column('status', String(length=255)), Column('attach_status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('provider_location', String(length=256)), Column('provider_auth', String(length=256)), Column('snapshot_id', String(length=36)), Column('volume_type_id', Integer), mysql_engine='InnoDB' ) quotas = Table( 'quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), mysql_engine='InnoDB' ) iscsi_targets = Table( 'iscsi_targets', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('target_num', Integer), Column('host', String(length=255)), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=True), mysql_engine='InnoDB' ) return [sm_flavors, sm_backend_config, snapshots, volume_types, volumes, iscsi_targets, migrations, quotas, services, sm_volume, volume_metadata, volume_type_extra_specs] def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # create all tables # Take care on create order for those with FK dependencies tables = define_tables(meta) for table in tables: table.create() if migrate_engine.name == "mysql": tables = ["sm_flavors", "sm_backend_config", "snapshots", "volume_types", "volumes", "iscsi_targets", "migrate_version", "migrations", "quotas", "services", "sm_volume", "volume_metadata", "volume_type_extra_specs"] migrate_engine.execute("SET foreign_key_checks = 0") for table in tables: migrate_engine.execute( "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) migrate_engine.execute("SET foreign_key_checks = 1") migrate_engine.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % migrate_engine.url.database) migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py0000664000567000056710000000143012701406250031745 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table TABLE_NAME = 'migrations' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine table = Table(TABLE_NAME, meta, autoload=True) table.drop() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py0000664000567000056710000000366312701406250027467 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import MetaData, Integer, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # New table backups = Table( 'backups', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', String(36), primary_key=True, nullable=False), Column('volume_id', String(36), nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('availability_zone', String(length=255)), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('container', String(length=255)), Column('status', String(length=255)), Column('fail_reason', String(length=255)), Column('service_metadata', String(length=255)), Column('service', String(length=255)), Column('size', Integer()), Column('object_count', Integer()), mysql_engine='InnoDB' ) backups.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/032_add_volume_type_projects.py0000664000567000056710000000315112701406250032470 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime, UniqueConstraint from sqlalchemy import Integer, MetaData, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volume_types = Table('volume_types', meta, autoload=True) is_public = Column('is_public', Boolean) volume_types.create_column(is_public) # pylint: disable=E1120 volume_types.update().values(is_public=True).execute() volume_type_projects = Table( 'volume_type_projects', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('volume_type_id', String(36), ForeignKey('volume_types.id')), Column('project_id', String(length=255)), Column('deleted', Boolean(create_constraint=True, name=None)), UniqueConstraint('volume_type_id', 'project_id', 'deleted'), mysql_engine='InnoDB', ) volume_type_projects.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py0000664000567000056710000000174712701406250033753 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) previous_status = Column('previous_status', String(length=255)) volumes.create_column(previous_status) volumes.update().values(previous_status=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/070_placeholder.py0000664000567000056710000000153712701406250027671 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Mitaka backports. # Do not use this number for new Newton work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py0000664000567000056710000000173012701406250033135 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table, String def upgrade(migrate_engine): """Add description column to volume_types.""" meta = MetaData() meta.bind = migrate_engine volume_types = Table('volume_types', meta, autoload=True) description = Column('description', String(255)) volume_types.create_column(description) volume_types.update().values(description=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py0000664000567000056710000000165712701406250027614 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import String, Column, MetaData, Table def upgrade(migrate_engine): """Add _name_id column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) _name_id = Column('_name_id', String(36)) volumes.create_column(_name_id) volumes.update().values(_name_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py0000664000567000056710000000232312701406250031350 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, MetaData, Table def upgrade(migrate_engine): """Add bootable column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) bootable = Column('bootable', Boolean) volumes.create_column(bootable) volumes.update().values(bootable=False).execute() glance_metadata = Table('volume_glance_metadata', meta, autoload=True) glance_items = list(glance_metadata.select().execute()) for item in glance_items: volumes.update().\ where(volumes.c.id == item['volume_id']).\ values(bootable=True).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/059_placeholder.py0000664000567000056710000000142312701406250027672 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/027_placeholder.py0000664000567000056710000000142312701406250027665 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/039_add_parent_id_to_backups.py0000664000567000056710000000175712701406250032407 0ustar jenkinsjenkins00000000000000# Copyright 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) parent_id = Column('parent_id', String(length=36)) backups.create_column(parent_id) backups.update().values(parent_id=None).execute() ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.pycinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_back0000664000567000056710000000262712701406250035143 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, Integer, MetaData, Table def upgrade(migrate_engine): """Add num_dependent_backups column to backups.""" meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) num_dependent_backups = Column('num_dependent_backups', Integer, default=0) backups.create_column(num_dependent_backups) backups_list = list(backups.select().execute()) for backup in backups_list: dep_bks_list = list(backups.select().where(backups.columns.parent_id == backup.id).execute()) if dep_bks_list: backups.update().where(backups.columns.id == backup.id).values( num_dependent_backups=len(dep_bks_list)).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/024_add_replication_support.py0000664000567000056710000000270112701406250032315 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add replication columns to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) replication_status = Column('replication_status', String(255)) replication_extended_status = Column('replication_extended_status', String(255)) replication_driver_data = Column('replication_driver_data', String(255)) volumes.create_column(replication_status) volumes.create_column(replication_extended_status) volumes.create_column(replication_driver_data) volumes.update().values(replication_status='disabled', replication_extended_status=None, replication_driver_data=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py0000664000567000056710000000450012701406250032226 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from sqlalchemy import MetaData, Table # Get default values via config. The defaults will either # come from the default values set in the quota option # configuration or via cinder.conf if the user has configured # default values for quotas there. CONF = cfg.CONF CONF.import_opt('quota_volumes', 'cinder.quota') CONF.import_opt('quota_snapshots', 'cinder.quota') CONF.import_opt('quota_gigabytes', 'cinder.quota') CLASS_NAME = 'default' CREATED_AT = datetime.datetime.now() # noqa def upgrade(migrate_engine): """Add default quota class data into DB.""" meta = MetaData() meta.bind = migrate_engine quota_classes = Table('quota_classes', meta, autoload=True) rows = quota_classes.count().\ where(quota_classes.c.class_name == 'default').execute().scalar() # Do not add entries if there are already 'default' entries. We don't # want to write over something the user added. if rows: return # Set default volumes qci = quota_classes.insert() qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'volumes', 'hard_limit': CONF.quota_volumes, 'deleted': False, }) # Set default snapshots qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'snapshots', 'hard_limit': CONF.quota_snapshots, 'deleted': False, }) # Set default gigabytes qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'gigabytes', 'hard_limit': CONF.quota_gigabytes, 'deleted': False, }) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/058_placeholder.py0000664000567000056710000000142312701406250027671 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/038_add_driver_initiator_data_table.py0000664000567000056710000000263012701406250033733 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, DateTime, Integer from sqlalchemy import MetaData, String, Table, UniqueConstraint def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # New table initiator_data = Table( 'driver_initiator_data', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('id', Integer, primary_key=True, nullable=False), Column('initiator', String(length=255), index=True, nullable=False), Column('namespace', String(length=255), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255)), UniqueConstraint('initiator', 'namespace', 'key'), mysql_engine='InnoDB', mysql_charset='utf8' ) initiator_data.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056710000000000012701406250026540 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/071_placeholder.py0000664000567000056710000000153712701406250027672 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Mitaka backports. # Do not use this number for new Newton work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py0000664000567000056710000000173412701406250032070 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add attach host column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) attached_host = Column('attached_host', String(255)) volumes.create_column(attached_host) volumes.update().values(attached_host=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/044_placeholder.py0000664000567000056710000000153612701406250027671 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py0000664000567000056710000000172412701406250032123 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table from migrate.changeset.constraint import ForeignKeyConstraint def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) volumes = Table('volumes', meta, autoload=True) ForeignKeyConstraint( columns=[snapshots.c.volume_id], refcolumns=[volumes.c.id]).create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/048_add_allocated_in_quotas.py0000664000567000056710000000165712701406250032241 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yahoo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, Integer, MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine quotas = Table('quotas', meta, autoload=True) # Add a new column allocated to save allocated quota allocated = Column('allocated', Integer, default=0) quotas.create_column(allocated) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencygroups.pycinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencyg0000664000567000056710000000202012701406250035253 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add cgsnapshot_id column to consistencygroups.""" meta = MetaData() meta.bind = migrate_engine consistencygroups = Table('consistencygroups', meta, autoload=True) cgsnapshot_id = Column('cgsnapshot_id', String(36)) consistencygroups.create_column(cgsnapshot_id) consistencygroups.update().values(cgsnapshot_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/023_add_expire_reservations_index.py0000664000567000056710000000237312701406250033503 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table def _get_deleted_expire_index(table): members = sorted(['deleted', 'expire']) for idx in table.indexes: if sorted(idx.columns.keys()) == members: return idx def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) if _get_deleted_expire_index(reservations): return # Based on expire_reservations query # from: cinder/db/sqlalchemy/api.py index = Index('reservations_deleted_expire_idx', reservations.c.deleted, reservations.c.expire) index.create(migrate_engine) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py0000664000567000056710000000400012701406250030460 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, DateTime, Text, Boolean from sqlalchemy import MetaData, Integer, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Just for the ForeignKey and column creation to succeed, these are not the # actual definitions of tables . # Table('volumes', meta, Column('id', Integer(), primary_key=True, nullable=False), mysql_engine='InnoDB') Table('snapshots', meta, Column('id', Integer(), primary_key=True, nullable=False), mysql_engine='InnoDB') # Create new table volume_glance_metadata = Table( 'volume_glance_metadata', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('volume_id', String(length=36), ForeignKey('volumes.id')), Column('snapshot_id', String(length=36), ForeignKey('snapshots.id')), Column('key', String(255)), Column('value', Text), mysql_engine='InnoDB' ) try: volume_glance_metadata.create() except Exception: meta.drop_all(tables=[volume_glance_metadata]) raise cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/035_add_provider_id_column.py0000664000567000056710000000172412701406250032101 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add provider_id column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) provider_id = Column('provider_id', String(255)) volumes.create_column(provider_id) volumes.update().values(provider_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/060_placeholder.py0000664000567000056710000000142312701406250027662 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py0000664000567000056710000000337212701406250034233 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from sqlalchemy import MetaData, Table # Get default values via config. The defaults will either # come from the default values set in the quota option # configuration or via cinder.conf if the user has configured # default values for quotas there. CONF = cfg.CONF CONF.import_opt('quota_consistencygroups', 'cinder.quota') CLASS_NAME = 'default' CREATED_AT = datetime.datetime.now() # noqa def upgrade(migrate_engine): """Add default quota class data into DB.""" meta = MetaData() meta.bind = migrate_engine quota_classes = Table('quota_classes', meta, autoload=True) rows = quota_classes.count().\ where(quota_classes.c.resource == 'consistencygroups').\ execute().scalar() # Do not add entries if there are already 'consistencygroups' entries. if rows: return # Set consistencygroups qci = quota_classes.insert() qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'consistencygroups', 'hard_limit': CONF.quota_consistencygroups, 'deleted': False, }) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py0000664000567000056710000000200312701406250030371 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine sm_backend_config = Table('sm_backend_config', meta, autoload=True) sm_flavors = Table('sm_flavors', meta, autoload=True) sm_volume = Table('sm_volume', meta, autoload=True) tables = [sm_volume, sm_backend_config, sm_flavors] for table in tables: table.drop() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/046_placeholder.py0000664000567000056710000000153612701406250027673 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.pycinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.0000664000567000056710000000222112701406250035167 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) temp_volume_id = Column('temp_volume_id', String(length=36)) temp_snapshot_id = Column('temp_snapshot_id', String(length=36)) backups.create_column(temp_volume_id) backups.update().values(temp_volume_id=None).execute() backups.create_column(temp_snapshot_id) backups.update().values(temp_snapshot_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py0000664000567000056710000000156212701406250033300 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine services = Table('services', meta, autoload=True) reason = Column('disabled_reason', String(255)) services.create_column(reason) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py0000664000567000056710000000176212701406250033336 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add provider_geometry column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) provider_geometry = Column('provider_geometry', String(255)) volumes.create_column(provider_geometry) volumes.update().values(provider_geometry=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py0000664000567000056710000000301012701406250031353 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import MetaData, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine Table('volumes', meta, autoload=True) # New table transfers = Table( 'transfers', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean), Column('id', String(36), primary_key=True, nullable=False), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=False), Column('display_name', String(length=255)), Column('salt', String(length=255)), Column('crypt_hash', String(length=255)), Column('expires_at', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8' ) transfers.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/031_placeholder.py0000664000567000056710000000142312701406250027660 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/051_add_source_cgid_column_to_consistencygroups.pycinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/051_add_source_cgid_column_to_consistencygro0000664000567000056710000000200612701406250035255 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add source_cgid column to consistencygroups.""" meta = MetaData() meta.bind = migrate_engine consistencygroups = Table('consistencygroups', meta, autoload=True) source_cgid = Column('source_cgid', String(36)) consistencygroups.create_column(source_cgid) consistencygroups.update().values(source_cgid=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py0000664000567000056710000000212612701406250035022 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) backups.create_column(data_timestamp) backups.update().values(data_timestamp=backups.c.created_at).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/062_deleted_type_to_Integer.py0000664000567000056710000000227712701406250032240 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Integer from sqlalchemy import MetaData, Table def upgrade(migrate_engine): """Deleted col of volume_type_projects converted(tinyint->Int).""" meta = MetaData() meta.bind = migrate_engine volume_type_projects = Table('volume_type_projects', meta, autoload=True) if migrate_engine.name == 'postgresql': # NOTE: PostgreSQL can't cast Boolean to int automatically sql = 'ALTER TABLE volume_type_projects ALTER COLUMN deleted ' + \ 'TYPE INTEGER USING deleted::integer' migrate_engine.execute(sql) else: volume_type_projects.c.deleted.alter(Integer) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/042_placeholder.py0000664000567000056710000000153612701406250027667 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/053_add_version_columns_to_service.py0000664000567000056710000000312012701406250033655 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 SimpliVity Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine services = Table('services', meta, autoload=True) rpc_current_version = Column('rpc_current_version', String(36)) rpc_available_version = Column('rpc_available_version', String(36)) object_current_version = Column('object_current_version', String(36)) object_available_version = Column('object_available_version', String(36)) services.create_column(rpc_current_version) services.create_column(rpc_available_version) services.create_column(object_current_version) services.create_column(object_available_version) services.update().values(rpc_current_version=None).execute() services.update().values(rpc_available_version=None).execute() services.update().values(object_current_version=None).execute() services.update().values(object_available_version=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/065_add_replication_info_to_service.py0000664000567000056710000000231712701406250033766 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import Boolean, MetaData, String, Table def upgrade(migrate_engine): """Add replication info to services table.""" meta = MetaData() meta.bind = migrate_engine services = Table('services', meta, autoload=True) replication_status = Column('replication_status', String(length=36), default="not-capable") active_backend_id = Column('active_backend_id', String(length=255)) frozen = Column('frozen', Boolean, default=False) services.create_column(replication_status) services.create_column(frozen) services.create_column(active_backend_id) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py0000664000567000056710000000506512701406250033017 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, ForeignKey, MetaData, Table from sqlalchemy import Boolean, DateTime, Integer, String def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) # encryption key UUID -- must be stored per volume volumes = Table('volumes', meta, autoload=True) encryption_key = Column('encryption_key_id', String(36)) volumes.create_column(encryption_key) # encryption key UUID and volume type id -- must be stored per snapshot snapshots = Table('snapshots', meta, autoload=True) encryption_key = Column('encryption_key_id', String(36)) snapshots.create_column(encryption_key) volume_type = Column('volume_type_id', String(36)) snapshots.create_column(volume_type) volume_types = Table('volume_types', meta, autoload=True) # encryption types associated with particular volume type encryption = Table( 'encryption', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('cipher', String(length=255)), Column('control_location', String(length=255), nullable=False), Column('key_size', Integer), Column('provider', String(length=255), nullable=False), # NOTE(joel-coffman): The volume_type_id must be unique or else the # referenced volume type becomes ambiguous. That is, specifying the # volume type is not sufficient to identify a particular encryption # scheme unless each volume type is associated with at most one # encryption scheme. Column('volume_type_id', String(length=36), ForeignKey(volume_types.c.id), primary_key=True, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) encryption.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/052_add_provider_auth_column_to_snapshots.py0000664000567000056710000000175012701406250035250 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add provider_auth column to snapshots.""" meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_auth = Column('provider_auth', String(255)) snapshots.create_column(provider_auth) snapshots.update().values(provider_auth=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/036_add_provider_id_column_to_snapshots.py0000664000567000056710000000173612701406250034711 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add provider_id column to snapshots.""" meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_id = Column('provider_id', String(255)) snapshots.create_column(provider_id) snapshots.update().values(provider_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/055_add_image_volume_cache_table.py0000664000567000056710000000262512701406250033164 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Pure Storage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, DateTime, Integer from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # New table image_volume_cache = Table( 'image_volume_cache_entries', meta, Column('image_updated_at', DateTime(timezone=False)), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255), index=True, nullable=False), Column('image_id', String(length=36), index=True, nullable=False), Column('volume_id', String(length=36), nullable=False), Column('size', Integer, nullable=False), Column('last_used', DateTime, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) image_volume_cache.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/063_placeholder.py0000664000567000056710000000140112701406250027661 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): # This used to drop iscsi_targets, but since dropping it before L release # stops using it breaks rolling upgrades we postpone the dropping until N. pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/062_sqlite_upgrade.sql0000664000567000056710000000153712701406250030567 0ustar jenkinsjenkins00000000000000-- As sqlite does not support the DROP CHECK, we need to create -- the table, and move all the data to it. CREATE TABLE volume_type_projects_new ( created_at DATETIME, updated_at DATETIME, deleted_at DATETIME, deleted INTEGER, id INTEGER NOT NULL, volume_type_id VARCHAR(36), project_id VARCHAR(255), PRIMARY KEY (id), FOREIGN KEY (volume_type_id) REFERENCES volume_types(id), CONSTRAINT uniq_volume_type_projects0volume_type_id0project_id0deleted UNIQUE (volume_type_id, project_id, deleted) ); INSERT INTO volume_type_projects_new SELECT created_at, updated_at, deleted_at, deleted, id, volume_type_id, project_id FROM volume_type_projects; DROP TABLE volume_type_projects; ALTER TABLE volume_type_projects_new RENAME TO volume_type_projects; cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/068_placeholder.py0000664000567000056710000000153712701406250027700 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Mitaka backports. # Do not use this number for new Newton work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py0000664000567000056710000000173412701406250032460 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Add source volume id column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) source_volid = Column('source_volid', String(36)) volumes.create_column(source_volid) volumes.update().values(source_volid=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/056_placeholder.py0000664000567000056710000000142712701406250027673 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Mitaka work. New Mitaka work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py0000664000567000056710000000167312701406250031617 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import String, Column, MetaData, Table def upgrade(migrate_engine): """Add migration_status column to volumes.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) migration_status = Column('migration_status', String(255)) volumes.create_column(migration_status) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/030_placeholder.py0000664000567000056710000000142312701406250027657 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py0000664000567000056710000000171012701406250034033 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) snapshots.update().values(provider_location=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/067_readd_iscsi_targets_table.py0000664000567000056710000000304212701406250032557 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine Table('volumes', meta, autoload=True) table = Table( 'iscsi_targets', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('target_num', Integer), Column('host', String(length=255)), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) # We use checkfirst argument because this table may already exist if the # migration is performed on a system that was on a migration earlier than # 063 when performing the upgrade. table.create(checkfirst=True) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/066_add_allocated_id_column_to_reservations.pycinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/066_add_allocated_id_column_to_reservations.0000664000567000056710000000216412701406250035137 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData, Integer, Table, ForeignKey def upgrade(migrate_engine): """Add allocated_id to the reservations table.""" meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) Table('quotas', meta, autoload=True) allocated_id = Column('allocated_id', Integer, ForeignKey('quotas.id'), nullable=True) reservations.create_column(allocated_id) usage_id = reservations.c.usage_id usage_id.alter(nullable=True) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py0000664000567000056710000000620412701406250031631 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import ForeignKey, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # New table consistencygroups = Table( 'consistencygroups', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', String(36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('availability_zone', String(length=255)), Column('name', String(length=255)), Column('description', String(length=255)), Column('volume_type_id', String(length=255)), Column('status', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8', ) consistencygroups.create() # New table cgsnapshots = Table( 'cgsnapshots', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', String(36), primary_key=True, nullable=False), Column('consistencygroup_id', String(36), ForeignKey('consistencygroups.id'), nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('name', String(length=255)), Column('description', String(length=255)), Column('status', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8', ) cgsnapshots.create() # Add column to volumes table volumes = Table('volumes', meta, autoload=True) consistencygroup_id = Column('consistencygroup_id', String(36), ForeignKey('consistencygroups.id')) volumes.create_column(consistencygroup_id) volumes.update().values(consistencygroup_id=None).execute() # Add column to snapshots table snapshots = Table('snapshots', meta, autoload=True) cgsnapshot_id = Column('cgsnapshot_id', String(36), ForeignKey('cgsnapshots.id')) snapshots.create_column(cgsnapshot_id) snapshots.update().values(cgsnapshot_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/028_placeholder.py0000664000567000056710000000142312701406250027666 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/041_add_modified_at_column_to_service.py0000664000567000056710000000155212701406250034255 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, DateTime, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine services = Table('services', meta, autoload=True) modified_at = Column('modified_at', DateTime(timezone=False)) services.create_column(modified_at) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/029_placeholder.py0000664000567000056710000000142312701406250027667 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Do not use this number for new Kilo work. New Kilo work starts after # all the placeholders. # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/043_placeholder.py0000664000567000056710000000153612701406250027670 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py0000664000567000056710000000554412701406250031506 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): """Convert volume_type_id to UUID.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) volume_types = Table('volume_types', meta, autoload=True) extra_specs = Table('volume_type_extra_specs', meta, autoload=True) fkey_remove_list = [volumes.c.volume_type_id, volume_types.c.id, extra_specs.c.volume_type_id] for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.drop() except Exception: if migrate_engine.url.get_dialect().name.startswith('sqlite'): pass else: raise volumes.c.volume_type_id.alter(String(36)) volume_types.c.id.alter(String(36)) extra_specs.c.volume_type_id.alter(String(36)) vtype_list = list(volume_types.select().execute()) for t in vtype_list: new_id = str(uuid.uuid4()) volumes.update().\ where(volumes.c.volume_type_id == t['id']).\ values(volume_type_id=new_id).execute() extra_specs.update().\ where(extra_specs.c.volume_type_id == t['id']).\ values(volume_type_id=new_id).execute() volume_types.update().\ where(volume_types.c.id == t['id']).\ values(id=new_id).execute() for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.create() except Exception: if migrate_engine.url.get_dialect().name.startswith('sqlite'): pass else: raise cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/072_placeholder.py0000664000567000056710000000153712701406250027673 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Mitaka backports. # Do not use this number for new Newton work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py0000664000567000056710000000323412701406250031605 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from sqlalchemy import MetaData, Table # Get default value via config. The default will either # come from the default value set in the quota configuration option # or via cinder.conf if the user has configured # default value for per volume size limit there. def upgrade(migrate_engine): """Add default "per_volume_gigabytes" row into DB.""" meta = MetaData() meta.bind = migrate_engine quota_classes = Table('quota_classes', meta, autoload=True) row = quota_classes.count().\ where(quota_classes.c.resource == 'per_volume_gigabytes').\ execute().scalar() # Do not add entry if there is already 'default' entry exists # in the database. # We don't want to write over something the user added. if row: return # Set default per_volume_gigabytes for per volume size qci = quota_classes.insert() qci.execute({'created_at': timeutils.utcnow(), 'class_name': 'default', 'resource': 'per_volume_gigabytes', 'hard_limit': -1, 'deleted': False, }) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/040_add_volume_attachment.py0000664000567000056710000000653312701406250031734 0ustar jenkinsjenkins00000000000000# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import six from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import ForeignKey, MetaData, String, Table CREATED_AT = datetime.datetime.now() # noqa def upgrade(migrate_engine): """Add volume multi attachment table.""" meta = MetaData() meta.bind = migrate_engine # add the multiattach flag to the volumes table. volumes = Table('volumes', meta, autoload=True) multiattach = Column('multiattach', Boolean) volumes.create_column(multiattach) volumes.update().values(multiattach=False).execute() # The new volume_attachment table volume_attachment = Table( 'volume_attachment', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=False), Column('attached_host', String(length=255)), Column('instance_uuid', String(length=36)), Column('mountpoint', String(length=255)), Column('attach_time', DateTime), Column('detach_time', DateTime), Column('attach_mode', String(length=36)), Column('attach_status', String(length=255)), mysql_engine='InnoDB' ) volume_attachment.create() # now migrate existing volume attachment info into the # new volume_attachment table volumes_list = list(volumes.select().execute()) for volume in volumes_list: if volume.attach_status == 'attached': attachment = volume_attachment.insert() values = {'id': six.text_type(uuid.uuid4()), 'created_at': CREATED_AT, 'deleted_at': None, 'deleted': False, 'volume_id': volume.id, 'attached_host': volume.host, 'instance_uuid': volume.instance_uuid, 'mountpoint': volume.mountpoint, 'attach_time': volume.attach_time, 'attach_mode': 'rw', 'attach_status': 'attached', } attachment.execute(values) # we have no reason to keep the columns that now # exist in the volume_attachment table mountpoint = volumes.columns.mountpoint volumes.drop_column(mountpoint) instance_uuid = volumes.columns.instance_uuid volumes.drop_column(instance_uuid) attach_time = volumes.columns.attach_time volumes.drop_column(attach_time) attached_host = volumes.columns.attached_host volumes.drop_column(attached_host) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/069_placeholder.py0000664000567000056710000000153712701406250027701 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Mitaka backports. # Do not use this number for new Newton work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py0000664000567000056710000000262012701406250033672 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import Integer, MetaData, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine Table('volumes', meta, autoload=True) # New table volume_admin_metadata = Table( 'volume_admin_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_id', String(length=36), ForeignKey('volumes.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_admin_metadata.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py0000664000567000056710000000255412701406250033067 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import Integer, MetaData, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine Table('snapshots', meta, autoload=True) # New table snapshot_metadata = Table( 'snapshot_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB' ) snapshot_metadata.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/045_placeholder.py0000664000567000056710000000153612701406250027672 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New work starts after # all the placeholders. # # See this for more information: # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py0000664000567000056710000001015512701406250027714 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import MetaData, Integer, String, Table, ForeignKey def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # New table quota_classes = Table('quota_classes', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True), Column('class_name', String(length=255), index=True), Column('resource', String(length=255)), Column('hard_limit', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_classes.create() quota_usages = Table('quota_usages', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True), Column('project_id', String(length=255), index=True), Column('resource', String(length=255)), Column('in_use', Integer(), nullable=False), Column('reserved', Integer(), nullable=False), Column('until_refresh', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_usages.create() reservations = Table('reservations', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True), Column('uuid', String(length=36), nullable=False), Column('usage_id', Integer(), ForeignKey('quota_usages.id'), nullable=False), Column('project_id', String(length=255), index=True), Column('resource', String(length=255)), Column('delta', Integer(), nullable=False), Column('expire', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8', ) reservations.create() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py0000664000567000056710000000351212701406250030213 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eBay Inc. # Copyright (C) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import ForeignKey, MetaData, String, Table def upgrade(migrate_engine): """Add volume_type_rate_limit table.""" meta = MetaData() meta.bind = migrate_engine quality_of_service_specs = Table( 'quality_of_service_specs', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', String(36), primary_key=True, nullable=False), Column('specs_id', String(36), ForeignKey('quality_of_service_specs.id')), Column('key', String(255)), Column('value', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quality_of_service_specs.create() volume_types = Table('volume_types', meta, autoload=True) qos_specs_id = Column('qos_specs_id', String(36), ForeignKey('quality_of_service_specs.id')) volume_types.create_column(qos_specs_id) volume_types.update().values(qos_specs_id=None).execute() cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/064_add_restore_volume_id_to_backups.py0000664000567000056710000000171412701406250034157 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) restore_volume_id = Column('restore_volume_id', String(length=36)) backups.create_column(restore_volume_id) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/versions/033_add_encryption_unique_key.py0000664000567000056710000000755212701406250032651 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from migrate import PrimaryKeyConstraint, ForeignKeyConstraint from sqlalchemy import Column, MetaData, Table from sqlalchemy import String, Integer, Boolean, DateTime def upgrade(migrate_engine): """Add UUID primary key column to encryption.""" meta = MetaData() meta.bind = migrate_engine encryptions = Table('encryption', meta, autoload=True) # NOTE: SQLite doesn't support 'drop constraint' statament if migrate_engine.name == 'sqlite': _upgrade_sqlite(meta, encryptions) else: encryption_id_column_kwargs = {} if migrate_engine.name == 'ibm_db_sa': # NOTE(junxiebj): DB2 10.5 doesn't support primary key # constraints over nullable columns, so we have to # make the column non-nullable in the DB2 case. encryption_id_column_kwargs['nullable'] = False encryption_id = Column('encryption_id', String(36), **encryption_id_column_kwargs) encryptions.create_column(encryption_id) encryption_items = list(encryptions.select().execute()) for item in encryption_items: encryptions.update().\ where(encryptions.c.volume_type_id == item['volume_type_id']).\ values(encryption_id=str(uuid.uuid4())).execute() # NOTE (e0ne): need to drop FK first for MySQL if migrate_engine.name == 'mysql': ref_table = Table('volume_types', meta, autoload=True) params = {'columns': [encryptions.c['volume_type_id']], 'refcolumns': [ref_table.c['id']], 'name': 'encryption_ibfk_1'} volume_type_fk = ForeignKeyConstraint(**params) volume_type_fk.drop() volume_type_pk = PrimaryKeyConstraint('volume_type_id', table=encryptions) volume_type_pk.drop() pkey = PrimaryKeyConstraint(encryptions.columns.encryption_id) pkey.create() def _upgrade_sqlite(meta, encryptions): new_encryptions = Table( 'encryption_33', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('cipher', String(255)), Column('key_size', Integer), Column('provider', String(255)), Column('control_location', String(255)), Column('encryption_id', String(36), primary_key=True), Column('volume_type_id', String(36)) ) new_encryptions.create() encryption_items = list(encryptions.select().execute()) for item in encryption_items: new_encryptions.insert().\ values(created_at=item['created_at'], updated_at=item['updated_at'], deleted_at=item['deleted_at'], deleted=item['deleted'], cipher=item['cipher'], key_size=item['key_size'], provider=item['provider'], control_location=item['control_location'], encryption_id=str(uuid.uuid4()), volume_type_id=item['volume_type_id']).execute() encryptions.drop() new_encryptions.rename('encryption') cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/manage.py0000664000567000056710000000154112701406250024374 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from cinder.db.sqlalchemy import migrate_repo from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False', repository=os.path.abspath(os.path.dirname(migrate_repo.__file__))) cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/__init__.py0000664000567000056710000000000012701406250024670 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056710000000173512701406250024710 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=cinder # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] cinder-8.0.0/cinder/db/sqlalchemy/__init__.py0000664000567000056710000000000012701406250022213 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/db/sqlalchemy/models.py0000664000567000056710000005421612701406250021761 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for cinder data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_utils import timeutils from sqlalchemy import Column, Integer, String, Text, schema from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey, DateTime, Boolean from sqlalchemy.orm import relationship, backref, validates CONF = cfg.CONF BASE = declarative_base() class CinderBase(models.TimestampMixin, models.ModelBase): """Base class for Cinder Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} # TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage # of implementing of BP db-cleanup deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) metadata = None def delete(self, session): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session) class Service(BASE, CinderBase): """Represents a running service on a host.""" __tablename__ = 'services' id = Column(Integer, primary_key=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone = Column(String(255), default='cinder') disabled_reason = Column(String(255)) # adding column modified_at to contain timestamp # for manual enable/disable of cinder services # updated_at column will now contain timestamps for # periodic updates modified_at = Column(DateTime) # Version columns to support rolling upgrade. These report the max RPC API # and objects versions that the manager of the service is able to support. rpc_current_version = Column(String(36)) object_current_version = Column(String(36)) # FIXME(dulek): In M we've removed rpc_available_version and # object_available_version from the model. We need to merge a DB migration # that actually drops these columns from the DB in early Newton. # replication_status can be: enabled, disabled, not-capable, error, # failed-over or not-configured replication_status = Column(String(255), default="not-capable") active_backend_id = Column(String(255)) frozen = Column(Boolean, nullable=False, default=False) class ConsistencyGroup(BASE, CinderBase): """Represents a consistencygroup.""" __tablename__ = 'consistencygroups' id = Column(String(36), primary_key=True) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) host = Column(String(255)) availability_zone = Column(String(255)) name = Column(String(255)) description = Column(String(255)) volume_type_id = Column(String(255)) status = Column(String(255)) cgsnapshot_id = Column(String(36)) source_cgid = Column(String(36)) class Cgsnapshot(BASE, CinderBase): """Represents a cgsnapshot.""" __tablename__ = 'cgsnapshots' id = Column(String(36), primary_key=True) consistencygroup_id = Column(String(36)) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) name = Column(String(255)) description = Column(String(255)) status = Column(String(255)) consistencygroup = relationship( ConsistencyGroup, backref="cgsnapshots", foreign_keys=consistencygroup_id, primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id') class Volume(BASE, CinderBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' id = Column(String(36), primary_key=True) _name_id = Column(String(36)) # Don't access/modify this directly! @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id ec2_id = Column(Integer) user_id = Column(String(255)) project_id = Column(String(255)) snapshot_id = Column(String(36)) host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? status = Column(String(255)) # TODO(vish): enum? attach_status = Column(String(255)) # TODO(vish): enum migration_status = Column(String(255)) scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) display_name = Column(String(255)) display_description = Column(String(255)) provider_location = Column(String(255)) provider_auth = Column(String(255)) provider_geometry = Column(String(255)) provider_id = Column(String(255)) volume_type_id = Column(String(36)) source_volid = Column(String(36)) encryption_key_id = Column(String(36)) consistencygroup_id = Column(String(36)) bootable = Column(Boolean, default=False) multiattach = Column(Boolean, default=False) replication_status = Column(String(255)) replication_extended_status = Column(String(255)) replication_driver_data = Column(String(255)) previous_status = Column(String(255)) consistencygroup = relationship( ConsistencyGroup, backref="volumes", foreign_keys=consistencygroup_id, primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id') class VolumeMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a volume.""" __tablename__ = 'volume_metadata' id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) volume = relationship(Volume, backref="volume_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeMetadata.volume_id == Volume.id,' 'VolumeMetadata.deleted == False)') class VolumeAdminMetadata(BASE, CinderBase): """Represents an administrator metadata key/value pair for a volume.""" __tablename__ = 'volume_admin_metadata' id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) volume = relationship(Volume, backref="volume_admin_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeAdminMetadata.volume_id == Volume.id,' 'VolumeAdminMetadata.deleted == False)') class VolumeAttachment(BASE, CinderBase): """Represents a volume attachment for a vm.""" __tablename__ = 'volume_attachment' id = Column(String(36), primary_key=True) volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) volume = relationship(Volume, backref="volume_attachment", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeAttachment.volume_id == Volume.id,' 'VolumeAttachment.deleted == False)') instance_uuid = Column(String(36)) attached_host = Column(String(255)) mountpoint = Column(String(255)) attach_time = Column(DateTime) detach_time = Column(DateTime) attach_status = Column(String(255)) attach_mode = Column(String(255)) class VolumeTypes(BASE, CinderBase): """Represent possible volume_types of volumes offered.""" __tablename__ = "volume_types" id = Column(String(36), primary_key=True) name = Column(String(255)) description = Column(String(255)) # A reference to qos_specs entity qos_specs_id = Column(String(36), ForeignKey('quality_of_service_specs.id')) is_public = Column(Boolean, default=True) volumes = relationship(Volume, backref=backref('volume_type', uselist=False), foreign_keys=id, primaryjoin='and_(' 'Volume.volume_type_id == VolumeTypes.id, ' 'VolumeTypes.deleted == False)') class VolumeTypeProjects(BASE, CinderBase): """Represent projects associated volume_types.""" __tablename__ = "volume_type_projects" __table_args__ = (schema.UniqueConstraint( "volume_type_id", "project_id", "deleted", name="uniq_volume_type_projects0volume_type_id0project_id0deleted"), ) id = Column(Integer, primary_key=True) volume_type_id = Column(Integer, ForeignKey('volume_types.id'), nullable=False) project_id = Column(String(255)) deleted = Column(Integer, default=0) volume_type = relationship( VolumeTypes, backref="projects", foreign_keys=volume_type_id, primaryjoin='and_(' 'VolumeTypeProjects.volume_type_id == VolumeTypes.id,' 'VolumeTypeProjects.deleted == 0)') class VolumeTypeExtraSpecs(BASE, CinderBase): """Represents additional specs as key/value pairs for a volume_type.""" __tablename__ = 'volume_type_extra_specs' id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) volume_type_id = Column(String(36), ForeignKey('volume_types.id'), nullable=False) volume_type = relationship( VolumeTypes, backref="extra_specs", foreign_keys=volume_type_id, primaryjoin='and_(' 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' 'VolumeTypeExtraSpecs.deleted == False)' ) class QualityOfServiceSpecs(BASE, CinderBase): """Represents QoS specs as key/value pairs. QoS specs is standalone entity that can be associated/disassociated with volume types (one to many relation). Adjacency list relationship pattern is used in this model in order to represent following hierarchical data with in flat table, e.g, following structure qos-specs-1 'Rate-Limit' | +------> consumer = 'front-end' +------> total_bytes_sec = 1048576 +------> total_iops_sec = 500 qos-specs-2 'QoS_Level1' | +------> consumer = 'back-end' +------> max-iops = 1000 +------> min-iops = 200 is represented by: id specs_id key value ------ -------- ------------- ----- UUID-1 NULL QoSSpec_Name Rate-Limit UUID-2 UUID-1 consumer front-end UUID-3 UUID-1 total_bytes_sec 1048576 UUID-4 UUID-1 total_iops_sec 500 UUID-5 NULL QoSSpec_Name QoS_Level1 UUID-6 UUID-5 consumer back-end UUID-7 UUID-5 max-iops 1000 UUID-8 UUID-5 min-iops 200 """ __tablename__ = 'quality_of_service_specs' id = Column(String(36), primary_key=True) specs_id = Column(String(36), ForeignKey(id)) key = Column(String(255)) value = Column(String(255)) specs = relationship( "QualityOfServiceSpecs", cascade="all, delete-orphan", backref=backref("qos_spec", remote_side=id), ) vol_types = relationship( VolumeTypes, backref=backref('qos_specs'), foreign_keys=id, primaryjoin='and_(' 'or_(VolumeTypes.qos_specs_id == ' 'QualityOfServiceSpecs.id,' 'VolumeTypes.qos_specs_id == ' 'QualityOfServiceSpecs.specs_id),' 'QualityOfServiceSpecs.deleted == False)') class VolumeGlanceMetadata(BASE, CinderBase): """Glance metadata for a bootable volume.""" __tablename__ = 'volume_glance_metadata' id = Column(Integer, primary_key=True, nullable=False) volume_id = Column(String(36), ForeignKey('volumes.id')) snapshot_id = Column(String(36), ForeignKey('snapshots.id')) key = Column(String(255)) value = Column(Text) volume = relationship(Volume, backref="volume_glance_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeGlanceMetadata.volume_id == Volume.id,' 'VolumeGlanceMetadata.deleted == False)') class Quota(BASE, CinderBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) allocated = Column(Integer, default=0) class QuotaClass(BASE, CinderBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' id = Column(Integer, primary_key=True) class_name = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class QuotaUsage(BASE, CinderBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) resource = Column(String(255)) in_use = Column(Integer) reserved = Column(Integer) @property def total(self): return self.in_use + self.reserved until_refresh = Column(Integer, nullable=True) class Reservation(BASE, CinderBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=True) allocated_id = Column(Integer, ForeignKey('quotas.id'), nullable=True) project_id = Column(String(255), index=True) resource = Column(String(255)) delta = Column(Integer) expire = Column(DateTime, nullable=False) usage = relationship( "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' 'QuotaUsage.deleted == 0)') quota = relationship( "Quota", foreign_keys=allocated_id, primaryjoin='and_(Reservation.allocated_id == Quota.id)') class Snapshot(BASE, CinderBase): """Represents a snapshot of volume.""" __tablename__ = 'snapshots' id = Column(String(36), primary_key=True) @property def name(self): return CONF.snapshot_name_template % self.id @property def volume_name(self): return self.volume.name # pylint: disable=E1101 user_id = Column(String(255)) project_id = Column(String(255)) volume_id = Column(String(36)) cgsnapshot_id = Column(String(36)) status = Column(String(255)) progress = Column(String(255)) volume_size = Column(Integer) display_name = Column(String(255)) display_description = Column(String(255)) encryption_key_id = Column(String(36)) volume_type_id = Column(String(36)) provider_location = Column(String(255)) provider_id = Column(String(255)) provider_auth = Column(String(255)) volume = relationship(Volume, backref="snapshots", foreign_keys=volume_id, primaryjoin='Snapshot.volume_id == Volume.id') cgsnapshot = relationship( Cgsnapshot, backref="snapshots", foreign_keys=cgsnapshot_id, primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id') class SnapshotMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a snapshot.""" __tablename__ = 'snapshot_metadata' id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) snapshot_id = Column(String(36), ForeignKey('snapshots.id'), nullable=False) snapshot = relationship(Snapshot, backref="snapshot_metadata", foreign_keys=snapshot_id, primaryjoin='and_(' 'SnapshotMetadata.snapshot_id == Snapshot.id,' 'SnapshotMetadata.deleted == False)') class Backup(BASE, CinderBase): """Represents a backup of a volume to Swift.""" __tablename__ = 'backups' id = Column(String(36), primary_key=True) @property def name(self): return CONF.backup_name_template % self.id user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) volume_id = Column(String(36), nullable=False) host = Column(String(255)) availability_zone = Column(String(255)) display_name = Column(String(255)) display_description = Column(String(255)) container = Column(String(255)) parent_id = Column(String(36)) status = Column(String(255)) fail_reason = Column(String(255)) service_metadata = Column(String(255)) service = Column(String(255)) size = Column(Integer) object_count = Column(Integer) temp_volume_id = Column(String(36)) temp_snapshot_id = Column(String(36)) num_dependent_backups = Column(Integer) snapshot_id = Column(String(36)) data_timestamp = Column(DateTime) restore_volume_id = Column(String(36)) @validates('fail_reason') def validate_fail_reason(self, key, fail_reason): return fail_reason and fail_reason[:255] or '' class Encryption(BASE, CinderBase): """Represents encryption requirement for a volume type. Encryption here is a set of performance characteristics describing cipher, provider, and key_size for a certain volume type. """ __tablename__ = 'encryption' encryption_id = Column(String(36), primary_key=True) cipher = Column(String(255)) key_size = Column(Integer) provider = Column(String(255)) control_location = Column(String(255)) volume_type_id = Column(String(36), ForeignKey('volume_types.id')) volume_type = relationship( VolumeTypes, backref="encryption", foreign_keys=volume_type_id, primaryjoin='and_(' 'Encryption.volume_type_id == VolumeTypes.id,' 'Encryption.deleted == False)' ) class Transfer(BASE, CinderBase): """Represents a volume transfer request.""" __tablename__ = 'transfers' id = Column(String(36), primary_key=True) volume_id = Column(String(36), ForeignKey('volumes.id')) display_name = Column(String(255)) salt = Column(String(255)) crypt_hash = Column(String(255)) expires_at = Column(DateTime) volume = relationship(Volume, backref="transfer", foreign_keys=volume_id, primaryjoin='and_(' 'Transfer.volume_id == Volume.id,' 'Transfer.deleted == False)') class DriverInitiatorData(BASE, models.TimestampMixin, models.ModelBase): """Represents private key-value pair specific an initiator for drivers""" __tablename__ = 'driver_initiator_data' __table_args__ = ( schema.UniqueConstraint("initiator", "namespace", "key"), {'mysql_engine': 'InnoDB'} ) id = Column(Integer, primary_key=True, nullable=False) initiator = Column(String(255), index=True, nullable=False) namespace = Column(String(255), nullable=False) key = Column(String(255), nullable=False) value = Column(String(255)) class ImageVolumeCacheEntry(BASE, models.ModelBase): """Represents an image volume cache entry""" __tablename__ = 'image_volume_cache_entries' id = Column(Integer, primary_key=True, nullable=False) host = Column(String(255), index=True, nullable=False) image_id = Column(String(36), index=True, nullable=False) image_updated_at = Column(DateTime, nullable=False) volume_id = Column(String(36), nullable=False) size = Column(Integer, nullable=False) last_used = Column(DateTime, default=lambda: timeutils.utcnow()) def register_models(): """Register Models and create metadata. Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, it will never need to be called explicitly elsewhere unless the connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Backup, Service, Volume, VolumeMetadata, VolumeAdminMetadata, VolumeAttachment, SnapshotMetadata, Transfer, VolumeTypeExtraSpecs, VolumeTypes, VolumeGlanceMetadata, ConsistencyGroup, Cgsnapshot ) engine = create_engine(CONF.database.connection, echo=False) for model in models: model.metadata.create_all(engine) cinder-8.0.0/cinder/db/sqlalchemy/api.py0000664000567000056710000046661112701406257021264 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections import datetime as dt import functools import re import sys import threading import time import uuid from oslo_config import cfg from oslo_db import exception as db_exc from oslo_db import options from oslo_db.sqlalchemy import session as db_session from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') import six import sqlalchemy from sqlalchemy import MetaData from sqlalchemy import or_, and_, case from sqlalchemy.orm import joinedload, joinedload_all from sqlalchemy.orm import RelationshipProperty from sqlalchemy.schema import Table from sqlalchemy import sql from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from sqlalchemy.sql import sqltypes from cinder.api import common from cinder.common import sqlalchemyutils from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _, _LW, _LE, _LI from cinder.objects import fields CONF = cfg.CONF LOG = logging.getLogger(__name__) options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') _LOCK = threading.Lock() _FACADE = None def _create_facade_lazily(): global _LOCK with _LOCK: global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade( CONF.database.connection, **dict(CONF.database) ) # NOTE(geguileo): To avoid a cyclical dependency we import the # group here. Dependency cycle is objects.base requires db.api, # which requires db.sqlalchemy.api, which requires service which # requires objects.base CONF.import_group("profiler", "cinder.service") if CONF.profiler.enabled: if CONF.profiler.trace_sqlalchemy: osprofiler_sqlalchemy.add_tracing(sqlalchemy, _FACADE.get_engine(), "db") return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def dispose_engine(): get_engine().dispose() _DEFAULT_QUOTA_NAME = 'default' def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: LOG.warning(_LW('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_snapshot_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and snapshot_id as their first two arguments. """ def wrapper(context, snapshot_id, *args, **kwargs): snapshot_get(context, snapshot_id) return f(context, snapshot_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def _retry_on_deadlock(f): """Decorator to retry a DB API call if Deadlock was received.""" @functools.wraps(f) def wrapped(*args, **kwargs): while True: try: return f(*args, **kwargs) except db_exc.DBDeadlock: LOG.warning(_LW("Deadlock detected when running " "'%(func_name)s': Retrying..."), dict(func_name=f.__name__)) # Retry! time.sleep(0.5) continue functools.update_wrapper(wrapped, f) return wrapped def handle_db_data_error(f): def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) elif read_deleted == 'int_no': query = query.filter_by(deleted=0) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): (volumes, _gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: key += '_' + volume_type_name return {key: volumes} def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): (snapshots, _gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: key += '_' + volume_type_name return {key: snapshots} def _sync_backups(context, project_id, session, volume_type_id=None, volume_type_name=None): (backups, _gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'backups' return {key: backups} def _sync_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, vol_gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'gigabytes' if volume_type_name: key += '_' + volume_type_name if CONF.no_snapshot_gb_quota: return {key: vol_gigs} (_junk, snap_gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: vol_gigs + snap_gigs} def _sync_consistencygroups(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, groups) = _consistencygroup_data_get_for_project( context, project_id, session=session) key = 'consistencygroups' return {key: groups} def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): key = 'backup_gigabytes' (_junk, backup_gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: backup_gigs} QUOTA_SYNC_FUNCTIONS = { '_sync_volumes': _sync_volumes, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_consistencygroups': _sync_consistencygroups, '_sync_backups': _sync_backups, '_sync_backup_gigabytes': _sync_backup_gigabytes } ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, session=session) service_ref.delete(session=session) @require_admin_context def _service_get(context, service_id, session=None): result = model_query( context, models.Service, session=session).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get(context, service_id): return _service_get(context, service_id) @require_admin_context def service_get_all(context, filters=None): if filters and not is_valid_model_filters(models.Service, filters): return [] query = model_query(context, models.Service) if filters: try: host = filters.pop('host') host_attr = models.Service.host conditions = or_(host_attr == host, host_attr.op('LIKE')(host + '@%')) query = query.filter(conditions) except KeyError: pass query = query.filter_by(**filters) return query.all() @require_admin_context def service_get_all_by_topic(context, topic, disabled=None): query = model_query( context, models.Service, read_deleted="no").\ filter_by(topic=topic) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_binary(context, binary, disabled=None): query = model_query( context, models.Service, read_deleted="no").filter_by(binary=binary) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): result = model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() if not result: raise exception.ServiceNotFound(service_id=topic, host=host) return result @require_admin_context def service_get_by_args(context, host, binary): results = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ all() for result in results: if host == result['host']: return result raise exception.ServiceNotFound(service_id=binary, host=host) @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True session = get_session() with session.begin(): service_ref.save(session) return service_ref @require_admin_context @_retry_on_deadlock def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, session=session) if ('disabled' in values): service_ref['modified_at'] = timeutils.utcnow() service_ref['updated_at'] = literal_column('updated_at') service_ref.update(values) return service_ref ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _dict_with_extra_specs_if_authorized(context, inst_type_query): """Convert type query result to dict with extra_spec and rate_limit. Takes a volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts. NOTE the contents of extra-specs are admin readable only. If the context passed in for this request is not admin then we will return an empty extra-specs dict rather than providing the admin only details. Example response with admin context: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) if not is_admin_context(context): del(inst_type_dict['extra_specs']) else: extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']} inst_type_dict['extra_specs'] = extra_specs return inst_type_dict ################### @require_context def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get(context, project_id, resource): return _quota_get(context, project_id, resource) @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_allocated_get_all_by_project(context, project_id): rows = model_query(context, models.Quota, read_deleted='no').filter_by( project_id=project_id).all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.allocated return result @require_context def _quota_get_by_resource(context, resource, session=None): rows = model_query(context, models.Quota, session=session, read_deleted='no').filter_by( resource=resource).all() return rows @require_admin_context def quota_create(context, project_id, resource, limit, allocated): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit if allocated: quota_ref.allocated = allocated session = get_session() with session.begin(): quota_ref.save(session) return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit return quota_ref @require_context def quota_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quotas = _quota_get_by_resource(context, old_res, session=session) for quota in quotas: quota.resource = new_res @require_admin_context def quota_allocated_update(context, project_id, resource, allocated): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.allocated = allocated return quota_ref @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) ################### @require_context def _quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get(context, class_name, resource): return _quota_class_get(context, class_name, resource) def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_context def _quota_class_get_all_by_resource(context, resource, session): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(resource=resource).\ all() return result @handle_db_data_error @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit session = get_session() with session.begin(): quota_class_ref.save(session) return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit return quota_class_ref @require_context def quota_class_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quota_class_list = _quota_class_get_all_by_resource( context, old_res, session) for quota_class in quota_class_list: quota_class.resource = new_res @require_admin_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.delete(session=session) @require_admin_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_context def quota_usage_get(context, project_id, resource): result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result @require_context def quota_usage_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_admin_context def _quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.save(session=session) return quota_usage_ref ################### def _reservation_create(context, uuid, usage, project_id, resource, delta, expire, session=None, allocated_id=None): usage_id = usage['id'] if usage else None reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.allocated_id = allocated_id reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_quota_usages(context, session, project_id): # Broken out for testability rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ order_by(models.QuotaUsage.id.asc()).\ with_lockmode('update').\ all() return {row.resource: row for row in rows} def _get_quota_usages_by_resource(context, session, resource): rows = model_query(context, models.QuotaUsage, deleted="no", session=session).\ filter_by(resource=resource).\ order_by(models.QuotaUsage.id.asc()).\ with_lockmode('update').\ all() return rows @require_context @_retry_on_deadlock def quota_usage_update_resource(context, old_res, new_res): session = get_session() with session.begin(): usages = _get_quota_usages_by_resource(context, session, old_res) for usage in usages: usage.resource = new_res usage.until_refresh = 1 @require_context @_retry_on_deadlock def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None, is_allocated_reserve=False): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id # Get the current usages usages = _get_quota_usages(context, session, project_id) allocated = quota_allocated_get_all_by_project(context, project_id) allocated.pop('project_id') # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if resource not in usages: usages[resource] = _quota_usage_create(elevated, project_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif usages[resource].until_refresh is not None: usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True elif max_age and usages[resource].updated_at is not None and ( (usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age): refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] volume_type_id = getattr(resources[resource], 'volume_type_id', None) volume_type_name = getattr(resources[resource], 'volume_type_name', None) updates = sync(elevated, project_id, volume_type_id=volume_type_id, volume_type_name=volume_type_name, session=session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if res not in usages: usages[res] = _quota_usage_create( elevated, project_id, res, 0, 0, until_refresh or None, session=session ) # Update the usage usages[res].in_use = in_use usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative if is_allocated_reserve: unders = [r for r, delta in deltas.items() if delta < 0 and delta + allocated.get(r, 0) < 0] else: unders = [r for r, delta in deltas.items() if delta < 0 and delta + usages[r].in_use < 0] # TODO(mc_nair): Should ignore/zero alloc if using non-nested driver # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. overs = [r for r, delta in deltas.items() if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta + usages[r].total + allocated.get(r, 0)] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for resource, delta in deltas.items(): usage = usages[resource] allocated_id = None if is_allocated_reserve: try: quota = _quota_get(context, project_id, resource, session=session) except exception.ProjectQuotaNotFound: # If we were using the default quota, create DB entry quota = quota_create(context, project_id, resource, quotas[resource], 0) # Since there's no reserved/total for allocated, update # allocated immediately and subtract on rollback if needed quota_allocated_update(context, project_id, resource, quota.allocated + delta) allocated_id = quota.id usage = None reservation = _reservation_create( elevated, str(uuid.uuid4()), usage, project_id, resource, delta, expire, session=session, allocated_id=allocated_id) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0 and not is_allocated_reserve: usages[resource].reserved += delta if unders: LOG.warning(_LW("Change will make usage less than 0 for the following " "resources: %s"), unders) if overs: usages = {k: dict(in_use=v.in_use, reserved=v.reserved, allocated=allocated.get(k, 0)) for k, v in usages.items()} raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages=usages) return reservations def _quota_reservations(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update').\ all() def _dict_with_usage_id(usages): return {row.id: row for row in usages.values()} @require_context @_retry_on_deadlock def reservation_commit(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages(context, session, project_id) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): # Allocated reservations will have already been bumped if not reservation.allocated_id: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation.delete(session=session) @require_context @_retry_on_deadlock def reservation_rollback(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages(context, session, project_id) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): if reservation.allocated_id: reservation.quota.allocated -= reservation.delta else: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation.delete(session=session) def quota_destroy_by_project(*args, **kwargs): """Destroy all limit quotas associated with a project. Leaves usage and reservation quotas intact. """ quota_destroy_all_by_project(only_quotas=True, *args, **kwargs) @require_admin_context @_retry_on_deadlock def quota_destroy_all_by_project(context, project_id, only_quotas=False): """Destroy all quotas associated with a project. This includes limit quotas, usage quotas and reservation quotas. Optionally can only remove limit quotas and leave other types as they are. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param only_quotas: Only delete limit quotas, leave other types intact. """ session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) if only_quotas: return quota_usages = model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_usage_ref in quota_usages: quota_usage_ref.delete(session=session) reservations = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for reservation_ref in reservations: reservation_ref.delete(session=session) @require_admin_context @_retry_on_deadlock def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() results = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time).\ all() if results: for reservation in results: if reservation.delta >= 0: if reservation.allocated_id: reservation.quota.allocated -= reservation.delta reservation.quota.save(session=session) else: reservation.usage.reserved -= reservation.delta reservation.usage.save(session=session) reservation.delete(session=session) ################### @require_admin_context def volume_attach(context, values): volume_attachment_ref = models.VolumeAttachment() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_attachment_ref.update(values) session = get_session() with session.begin(): volume_attachment_ref.save(session=session) return volume_attachment_get(context, values['id'], session=session) @require_admin_context def volume_attached(context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode='rw'): """This method updates a volume attachment entry. This function saves the information related to a particular attachment for a volume. It also updates the volume record to mark the volume as attached. """ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): volume_attachment_ref = volume_attachment_get(context, attachment_id, session=session) volume_attachment_ref['mountpoint'] = mountpoint volume_attachment_ref['attach_status'] = 'attached' volume_attachment_ref['instance_uuid'] = instance_uuid volume_attachment_ref['attached_host'] = host_name volume_attachment_ref['attach_time'] = timeutils.utcnow() volume_attachment_ref['attach_mode'] = attach_mode volume_ref = _volume_get(context, volume_attachment_ref['volume_id'], session=session) volume_attachment_ref.save(session=session) volume_ref['status'] = 'in-use' volume_ref['attach_status'] = 'attached' volume_ref.save(session=session) return volume_ref @handle_db_data_error @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) if is_admin_context(context): values['volume_admin_metadata'] = \ _metadata_refs(values.get('admin_metadata'), models.VolumeAdminMetadata) elif values.get('volume_admin_metadata'): del values['volume_admin_metadata'] volume_ref = models.Volume() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_ref.update(values) session = get_session() with session.begin(): session.add(volume_ref) return _volume_get(context, values['id'], session=session) def get_booleans_for_table(table_name): booleans = set() table = getattr(models, table_name.capitalize()) if hasattr(table, '__table__'): columns = table.__table__.columns for column in columns: if isinstance(column.type, sqltypes.Boolean): booleans.add(column.name) return booleans @require_admin_context def volume_data_get_for_host(context, host, count_only=False): host_attr = models.Volume.host conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] if count_only: result = model_query(context, func.count(models.Volume.id), read_deleted="no").filter( or_(*conditions)).first() return result[0] or 0 else: result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").filter( or_(*conditions)).first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _volume_data_get_for_project(context, project_id, volume_type_id=None, session=None): query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _backup_data_get_for_project(context, project_id, volume_type_id=None, session=None): query = model_query(context, func.count(models.Backup.id), func.sum(models.Backup.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_data_get_for_project(context, project_id, volume_type_id=None): return _volume_data_get_for_project(context, project_id, volume_type_id) @require_admin_context @_retry_on_deadlock def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() with session.begin(): model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at'), 'migration_status': None}) model_query(context, models.VolumeMetadata, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeAdminMetadata, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.Transfer, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id, attachment_id): """This updates a volume attachment and marks it as detached. This method also ensures that the volume entry is correctly marked as either still attached/in-use or detached/available if this was the last detachment made. """ session = get_session() with session.begin(): attachment = None try: attachment = volume_attachment_get(context, attachment_id, session=session) except exception.VolumeAttachmentNotFound: pass # If this is already detached, attachment will be None if attachment: now = timeutils.utcnow() attachment['attach_status'] = 'detached' attachment['detach_time'] = now attachment['deleted'] = True attachment['deleted_at'] = now attachment.save(session=session) attachment_list = volume_attachment_get_used_by_volume_id( context, volume_id, session=session) remain_attachment = False if attachment_list and len(attachment_list) > 0: remain_attachment = True volume_ref = _volume_get(context, volume_id, session=session) if not remain_attachment: # Hide status update from user if we're performing volume migration # or uploading it to image if ((not volume_ref['migration_status'] and not (volume_ref['status'] == 'uploading')) or volume_ref['migration_status'] in ('success', 'error')): volume_ref['status'] = 'available' volume_ref['attach_status'] = 'detached' volume_ref.save(session=session) else: # Volume is still attached volume_ref['status'] = 'in-use' volume_ref['attach_status'] = 'attached' volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False, joined_load=True): """Get the query to retrieve the volume. :param context: the context used to run the method _volume_get_query :param session: the session to use :param project_only: the boolean used to decide whether to query the volume in the current project or all projects :param joined_load: the boolean used to decide whether the query loads the other models, which join the volume model in the database. Currently, the False value for this parameter is specially for the case of updating database during volume migration :returns: updated query or None """ if not joined_load: return model_query(context, models.Volume, session=session, project_only=project_only) if is_admin_context(context): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_admin_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')) else: return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')) @require_context def _volume_get(context, volume_id, session=None, joined_load=True): result = _volume_get_query(context, session=session, project_only=True, joined_load=joined_load) if joined_load: result = result.options(joinedload('volume_type.extra_specs')) result = result.filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_context def volume_attachment_get(context, attachment_id, session=None): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(id=attachment_id).\ first() if not result: raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' % attachment_id) return result @require_context def volume_attachment_get_used_by_volume_id(context, volume_id, session=None): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(volume_id=volume_id).\ filter(models.VolumeAttachment.attach_status != 'detached').\ all() return result @require_context def volume_attachment_get_by_host(context, volume_id, host): session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(volume_id=volume_id).\ filter_by(attached_host=host).\ filter(models.VolumeAttachment.attach_status != 'detached').\ first() return result @require_context def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid): session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(volume_id=volume_id).\ filter_by(instance_uuid=instance_uuid).\ filter(models.VolumeAttachment.attach_status != 'detached').\ first() return result @require_context def volume_get(context, volume_id): return _volume_get(context, volume_id) @require_admin_context def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() @require_admin_context def volume_get_all_by_host(context, host, filters=None): """Retrieves all volumes hosted on a host. :param context: context to query under :param host: host for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = _volume_get_query(context).filter(or_(*conditions)) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() elif not host: return [] @require_context def volume_get_all_by_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under :param group_id: group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ query = _volume_get_query(context).filter_by(consistencygroup_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes in a project. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all volumes being retrieved :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): authorize_project_context(context, project_id) # Add in the project filter without modifying the given filters filters = filters.copy() if filters else {} filters['project_id'] = project_id # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() def _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset=None, paginate_type=models.Volume): """Generate the query to include the filters and the paginate options. Returns a query with sorting / pagination criteria added or None if the given filters will not yield any results. :param context: context to query under :param session: the session to use :param marker: the last item of the previous page; we returns the next results after this value. :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :param offset: number of items to skip :param paginate_type: type of pagination to generate :returns: updated query or None """ get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs, default_dir='desc') query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return None marker_object = None if marker is not None: marker_object = get(context, marker, session) return sqlalchemyutils.paginate_query(query, paginate_type, limit, sort_keys, marker=marker_object, sort_dirs=sort_dirs, offset=offset) def _process_volume_filters(query, filters): """Common filter processing for Volume queries. Filter values that are in lists, tuples, or sets cause an 'IN' operator to be used, while exact matching ('==' operator) is used for other values. A filter key/value of 'no_migration_targets'=True causes volumes with either a NULL 'migration_status' or a 'migration_status' that does not start with 'target:' to be retrieved. A 'metadata' filter key must correspond to a dictionary value of metadata key-value pairs. :param query: Model query to use :param filters: dictionary of filters :returns: updated query or None """ filters = filters.copy() # 'no_migration_targets' is unique, must be either NULL or # not start with 'target:' if filters.get('no_migration_targets', False): filters.pop('no_migration_targets') try: column_attr = getattr(models.Volume, 'migration_status') conditions = [column_attr == None, # noqa column_attr.op('NOT LIKE')('target:%')] query = query.filter(or_(*conditions)) except AttributeError: LOG.debug("'migration_status' column could not be found.") return None # Apply exact match filters for everything else, ensure that the # filter value exists on the model for key in filters.keys(): # metadata is unique, must be a dict if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("'metadata' filter value is not valid.") return None continue try: column_attr = getattr(models.Volume, key) # Do not allow relationship properties since those require # schema specific knowledge prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug(("'%s' filter key is not valid, " "it maps to a relationship."), key) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None # Holds the simple exact matches filter_dict = {} # Iterate over all filters, special case the filter if necessary for key, value in filters.items(): if key == 'metadata': # model.VolumeMetadata defines the backref to Volumes as # 'volume_metadata' or 'volume_admin_metadata', use those as # column attribute keys col_attr = getattr(models.Volume, 'volume_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') for k, v in value.items(): query = query.filter(or_(col_attr.any(key=k, value=v), col_ad_attr.any(key=k, value=v))) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(models.Volume, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def process_sort_params(sort_keys, sort_dirs, default_keys=None, default_dir='asc'): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1) The first element in the 'sort_dirs' list (if specified), else 2) 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ if default_keys is None: default_keys = ['created_at', 'id'] # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs): default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys. if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction array size exceeds sort key array size.") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs @handle_db_data_error @require_context def volume_update(context, volume_id, values): session = get_session() with session.begin(): metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) volume_ref = _volume_get(context, volume_id, session=session) volume_ref.update(values) return volume_ref @require_context def volume_attachment_update(context, attachment_id, values): session = get_session() with session.begin(): volume_attachment_ref = volume_attachment_get(context, attachment_id, session=session) volume_attachment_ref.update(values) volume_attachment_ref.save(session=session) return volume_attachment_ref def volume_update_status_based_on_attachment(context, volume_id): """Update volume status based on attachment. Get volume and check if 'volume_attachment' parameter is present in volume. If 'volume_attachment' is None then set volume status to 'available' else set volume status to 'in-use'. :param context: context to query under :param volume_id: id of volume to be updated :returns: updated volume """ session = get_session() with session.begin(): volume_ref = _volume_get(context, volume_id, session=session) # We need to get and update volume using same session because # there is possibility that instance is deleted between the 'get' # and 'update' volume call. if not volume_ref['volume_attachment']: volume_ref.update({'status': 'available'}) else: volume_ref.update({'status': 'in-use'}) return volume_ref def volume_has_snapshots_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)) def volume_has_undeletable_snapshots_filter(): deletable_statuses = ['available', 'error'] return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses)))) def volume_has_attachments_filter(): return sql.exists().where( and_(models.Volume.id == models.VolumeAttachment.volume_id, models.VolumeAttachment.attach_status != 'detached', ~models.VolumeAttachment.deleted)) #################### def _volume_x_metadata_get_query(context, volume_id, model, session=None): return model_query(context, model, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) def _volume_x_metadata_get(context, volume_id, model, session=None): rows = _volume_x_metadata_get_query(context, volume_id, model, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, session=None): result = _volume_x_metadata_get_query(context, volume_id, model, session=session).\ filter_by(key=key).\ first() if not result: if model is models.VolumeGlanceMetadata: raise notfound_exec(id=volume_id) else: raise notfound_exec(metadata_key=key, volume_id=volume_id) return result def _volume_x_metadata_update(context, volume_id, metadata, delete, model, session=None, add=True, update=True): session = session or get_session() metadata = metadata.copy() with session.begin(subtransactions=True): # Set existing metadata to deleted if delete argument is True. This is # committed immediately to the DB if delete: expected_values = {'volume_id': volume_id} # We don't want to delete keys we are going to update if metadata: expected_values['key'] = db.Not(metadata.keys()) conditional_update(context, model, {'deleted': True}, expected_values) # Get existing metadata db_meta = _volume_x_metadata_get_query(context, volume_id, model).all() save = [] skip = [] # We only want to send changed metadata. for row in db_meta: if row.key in metadata: value = metadata.pop(row.key) if row.value != value and update: # ORM objects will not be saved until we do the bulk save row.value = value save.append(row) continue skip.append(row) # We also want to save non-existent metadata if add: save.extend(model(key=key, value=value, volume_id=volume_id) for key, value in metadata.items()) # Do a bulk save if save: session.bulk_save_objects(save, update_changed_only=True) # Construct result dictionary with current metadata save.extend(skip) result = {row['key']: row['value'] for row in save} return result def _volume_user_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeMetadata, session=session) def _volume_image_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeGlanceMetadata, session=session) @require_context @require_volume_exists def _volume_user_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata, session=session) @require_context def _volume_user_metadata_get_item(context, volume_id, key, session=None): return _volume_x_metadata_get_item(context, volume_id, key, models.VolumeMetadata, exception.VolumeMetadataNotFound, session=session) @require_context @require_volume_exists def _volume_user_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeMetadata, session=session) @require_context @require_volume_exists def _volume_image_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeGlanceMetadata, session=session) @require_context def _volume_glance_metadata_key_to_id(context, volume_id, key): db_data = volume_glance_metadata_get(context, volume_id) metadata = {meta_entry.key: meta_entry.id for meta_entry in db_data if meta_entry.key == key} metadata_id = metadata[key] return metadata_id @require_context @require_volume_exists def volume_metadata_get(context, volume_id): return _volume_user_metadata_get(context, volume_id) @require_context @require_volume_exists @_retry_on_deadlock def volume_metadata_delete(context, volume_id, key, meta_type): if meta_type == common.METADATA_TYPES.user: (_volume_user_metadata_get_query(context, volume_id). filter_by(key=key). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) elif meta_type == common.METADATA_TYPES.image: metadata_id = _volume_glance_metadata_key_to_id(context, volume_id, key) (_volume_image_metadata_get_query(context, volume_id). filter_by(id=metadata_id). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) @require_context @require_volume_exists @_retry_on_deadlock def volume_metadata_update(context, volume_id, metadata, delete, meta_type): if meta_type == common.METADATA_TYPES.user: return _volume_user_metadata_update(context, volume_id, metadata, delete) elif meta_type == common.METADATA_TYPES.image: return _volume_image_metadata_update(context, volume_id, metadata, delete) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) ################### def _volume_admin_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_update(context, volume_id, metadata, delete, session=None, add=True, update=True): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeAdminMetadata, session=session, add=add, update=update) @require_admin_context @require_volume_exists def volume_admin_metadata_get(context, volume_id): return _volume_admin_metadata_get(context, volume_id) @require_admin_context @require_volume_exists @_retry_on_deadlock def volume_admin_metadata_delete(context, volume_id, key): _volume_admin_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context @require_volume_exists @_retry_on_deadlock def volume_admin_metadata_update(context, volume_id, metadata, delete, add=True, update=True): return _volume_admin_metadata_update(context, volume_id, metadata, delete, add=add, update=update) ################### @handle_db_data_error @require_context def snapshot_create(context, values): values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), models.SnapshotMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session.add(snapshot_ref) return _snapshot_get(context, values['id'], session=session) @require_admin_context @_retry_on_deadlock def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): model_query(context, models.Snapshot, session=session).\ filter_by(id=snapshot_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.SnapshotMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_context def snapshot_get(context, snapshot_id): return _snapshot_get(context, snapshot_id) @require_admin_context def snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Retrieves all snapshots. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param filters: dictionary of filters; will do exact matching on values :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters(models.Snapshot, filters): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) # No snapshots would match, return empty list if not query: return [] return query.all() def _snaps_get_query(context, session=None, project_only=False): return model_query(context, models.Snapshot, session=session, project_only=project_only).\ options(joinedload('snapshot_metadata')) def _process_snaps_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Snapshot, filters): return None query = query.filter_by(**filters) return query @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_by_host(context, host, filters=None): if filters and not is_valid_model_filters(models.Snapshot, filters): return [] query = model_query(context, models.Snapshot, read_deleted='no', project_only=True) if filters: query = query.filter_by(**filters) # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = query.join(models.Snapshot.volume).filter( or_(*conditions)).options(joinedload('snapshot_metadata')) return query.all() elif not host: return [] @require_context def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(cgsnapshot_id=cgsnapshot_id).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """"Retrieves all snapshots in a project. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all snapshots being retrieved :param filters: dictionary of filters; will do exact matching on values :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters(models.Snapshot, filters): return [] authorize_project_context(context, project_id) # Add project_id to filters filters = filters.copy() if filters else {} filters['project_id'] = project_id session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) # No snapshots would match, return empty list if not query: return [] query = query.options(joinedload('snapshot_metadata')) return query.all() @require_context def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, session=None): authorize_project_context(context, project_id) query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.join('volume').filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context def snapshot_data_get_for_project(context, project_id, volume_type_id=None): return _snapshot_data_get_for_project(context, project_id, volume_type_id) @require_context def snapshot_get_active_by_window(context, begin, end=None, project_id=None): """Return snapshots that were active during window.""" query = model_query(context, models.Snapshot, read_deleted="yes") query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa models.Snapshot.deleted_at > begin)) query = query.options(joinedload(models.Snapshot.volume)) query = query.options(joinedload('snapshot_metadata')) if end: query = query.filter(models.Snapshot.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = _snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) return snapshot_ref #################### def _snapshot_metadata_get_query(context, snapshot_id, session=None): return model_query(context, models.SnapshotMetadata, session=session, read_deleted="no").\ filter_by(snapshot_id=snapshot_id) @require_context def _snapshot_metadata_get(context, snapshot_id, session=None): rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_snapshot_exists def snapshot_metadata_get(context, snapshot_id): return _snapshot_metadata_get(context, snapshot_id) @require_context @require_snapshot_exists @_retry_on_deadlock def snapshot_metadata_delete(context, snapshot_id, key): _snapshot_metadata_get_query(context, snapshot_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): result = _snapshot_metadata_get_query(context, snapshot_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.SnapshotMetadataNotFound(metadata_key=key, snapshot_id=snapshot_id) return result @require_context @require_snapshot_exists @_retry_on_deadlock def snapshot_metadata_update(context, snapshot_id, metadata, delete): session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _snapshot_metadata_get(context, snapshot_id, session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) except exception.SnapshotMetadataNotFound: meta_ref = models.SnapshotMetadata() item.update({"key": meta_key, "snapshot_id": snapshot_id}) meta_ref.update(item) meta_ref.save(session=session) return snapshot_metadata_get(context, snapshot_id) ################### @handle_db_data_error @require_admin_context def volume_type_create(context, values, projects=None): """Create a new volume type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = str(uuid.uuid4()) projects = projects or [] session = get_session() with session.begin(): try: _volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: _volume_type_get(context, values['id'], session) raise exception.VolumeTypeExists(id=values['id']) except exception.VolumeTypeNotFound: pass try: values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) session.add(volume_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_ref.id, "project_id": project}) access_ref.save(session=session) return volume_type_ref def _volume_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.VolumeTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.VolumeTypes.is_public == true()] projects_attr = getattr(models.VolumeTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query def _process_volume_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.VolumeTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.VolumeTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.VolumeTypes, filters): return if filters.get('extra_specs') is not None: the_filter = [] searchdict = filters.get('extra_specs') extra_specs = getattr(models.VolumeTypes, 'extra_specs') for k, v in searchdict.items(): the_filter.extend([extra_specs.any(key=k, value=v, deleted=False)]) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) del filters['extra_specs'] query = query.filter_by(**filters) return query @handle_db_data_error @require_admin_context def volume_type_update(context, volume_type_id, values): session = get_session() with session.begin(): # Check it exists volume_type_ref = _volume_type_ref_get(context, volume_type_id, session) if not volume_type_ref: raise exception.VolumeTypeNotFound(type_id=volume_type_id) # No description change if values['description'] is None: del values['description'] # No is_public change if values['is_public'] is None: del values['is_public'] # No name change if values['name'] is None: del values['name'] else: # Volume type name is unique. If change to a name that belongs to # a different volume_type , it should be prevented. check_vol_type = None try: check_vol_type = \ _volume_type_get_by_name(context, values['name'], session=session) except exception.VolumeTypeNotFoundByName: pass else: if check_vol_type.get('id') != volume_type_id: raise exception.VolumeTypeExists(id=values['name']) volume_type_ref.update(values) volume_type_ref.save(session=session) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Returns a dict describing all volume_types with name as key. If no sort parameters are specified then the returned volume types are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_type_filters function for more information :param list_result: For compatibility, if list_result = True, return a list instead of dict. :returns: list/dict of matching volume types """ session = get_session() with session.begin(): # Add context for _process_volume_types_filters filters = filters or {} filters['context'] = context # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeTypes) # No volume types would match, return empty dict or list if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_extra_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_extra_specs_if_authorized(context, row) for row in rows} return result def _volume_type_get_id_from_volume_type_query(context, id, session=None): return model_query( context, models.VolumeTypes.id, read_deleted="no", session=session, base_model=models.VolumeTypes).\ filter_by(id=id) def _volume_type_get_id_from_volume_type(context, id, session=None): result = _volume_type_get_id_from_volume_type_query( context, id, session=session).first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result[0] def _volume_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" result = _volume_type_get_query( context, session, read_deleted, expected_fields).\ filter_by(id=id).\ first() return result @require_context def _volume_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _volume_type_get_db_object(context, id, session, inactive, expected_fields) if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) vtype = _dict_with_extra_specs_if_authorized(context, result) if 'projects' in expected_fields: vtype['projects'] = [p['project_id'] for p in result['projects']] return vtype @require_context def volume_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific volume_type.""" return _volume_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) def _volume_type_get_full(context, id): """Return dict for a specific volume_type with extra_specs and projects.""" return _volume_type_get(context, id, session=None, inactive=False, expected_fields=('extra_specs', 'projects')) @require_context def _volume_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.VolumeTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result @require_context def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return _dict_with_extra_specs_if_authorized(context, result) @require_context def volume_type_get_by_name(context, name): """Return a dict describing specific volume_type.""" return _volume_type_get_by_name(context, name) @require_context def volume_types_get_by_name_or_id(context, volume_type_list): """Return a dict describing specific volume_type.""" req_volume_types = [] for vol_t in volume_type_list: if not uuidutils.is_uuid_like(vol_t): vol_type = _volume_type_get_by_name(context, vol_t) else: vol_type = _volume_type_get(context, vol_t) req_volume_types.append(vol_type) return req_volume_types @require_admin_context def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" return model_query(context, models.VolumeTypes, read_deleted=read_deleted). \ filter_by(qos_specs_id=qos_specs_id).all() @require_admin_context def volume_type_qos_associate(context, type_id, qos_specs_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeTypes). \ filter_by(id=type_id). \ update({'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from qos specs.""" session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeTypes). \ filter_by(id=type_id). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types associated with specified qos specs.""" session = get_session() with session.begin(): session.query(models.VolumeTypes). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_specs_get(context, type_id): """Return all qos specs for given volume type. result looks like: { 'qos_specs': { 'id': 'qos-specs-id', 'name': 'qos_specs_name', 'consumer': 'Consumer', 'specs': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3' } } } """ session = get_session() with session.begin(): _volume_type_get(context, type_id, session) row = session.query(models.VolumeTypes). \ options(joinedload('qos_specs')). \ filter_by(id=type_id). \ first() # row.qos_specs is a list of QualityOfServiceSpecs ref specs = _dict_with_qos_specs(row.qos_specs) if not specs: # turn empty list to None specs = None else: specs = specs[0] return {'qos_specs': specs} @require_admin_context @_retry_on_deadlock def volume_type_destroy(context, id): session = get_session() with session.begin(): _volume_type_get(context, id, session) results = model_query(context, models.Volume, session=session). \ filter_by(volume_type_id=id).all() if results: LOG.error(_LE('VolumeType %s deletion failed, ' 'VolumeType in use.'), id) raise exception.VolumeTypeInUse(volume_type_id=id) model_query(context, models.VolumeTypes, session=session).\ filter_by(id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeExtraSpecs, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_get_active_by_window(context, begin, end=None, project_id=None): """Return volumes that were active during window.""" query = model_query(context, models.Volume, read_deleted="yes") query = query.filter(or_(models.Volume.deleted_at == None, # noqa models.Volume.deleted_at > begin)) if end: query = query.filter(models.Volume.created_at < end) if project_id: query = query.filter_by(project_id=project_id) query = (query.options(joinedload('volume_metadata')). options(joinedload('volume_type')). options(joinedload('volume_attachment')). options(joinedload('consistencygroup'))) if is_admin_context(context): query = query.options(joinedload('volume_admin_metadata')) return query.all() def _volume_type_access_query(context, session=None): return model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no") @require_admin_context def volume_type_access_get_all(context, type_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) return _volume_type_access_query(context).\ filter_by(volume_type_id=volume_type_id).all() @require_admin_context def volume_type_access_add(context, type_id, project_id): """Add given tenant to the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.VolumeTypeAccessExists(volume_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def volume_type_access_remove(context, type_id, project_id): """Remove given tenant from the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) count = (_volume_type_access_query(context). filter_by(volume_type_id=volume_type_id). filter_by(project_id=project_id). soft_delete(synchronize_session=False)) if count == 0: raise exception.VolumeTypeAccessNotFound( volume_type_id=type_id, project_id=project_id) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() with session.begin(): _volume_type_extra_specs_get_item(context, volume_type_id, key, session) _volume_type_extra_specs_query(context, volume_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @handle_db_data_error @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": False}) spec_ref.save(session=session) return specs #################### @require_admin_context def qos_specs_create(context, values): """Create a new QoS specs. :param values dictionary that contains specifications for QoS e.g. {'name': 'Name', 'qos_specs': { 'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } } """ specs_id = str(uuid.uuid4()) session = get_session() with session.begin(): try: _qos_specs_get_by_name(context, values['name'], session) raise exception.QoSSpecsExists(specs_id=values['name']) except exception.QoSSpecsNotFound: pass try: # Insert a root entry for QoS specs specs_root = models.QualityOfServiceSpecs() root = dict(id=specs_id) # 'QoS_Specs_Name' is an internal reserved key to store # the name of QoS specs root['key'] = 'QoS_Specs_Name' root['value'] = values['name'] LOG.debug("DB qos_specs_create(): root %s", root) specs_root.update(root) specs_root.save(session=session) # Insert all specification entries for QoS specs for k, v in values['qos_specs'].items(): item = dict(key=k, value=v, specs_id=specs_id) item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() spec_entry.update(item) spec_entry.save(session=session) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except Exception as e: raise db_exc.DBError(e) return dict(id=specs_root.id, name=specs_root.value) @require_admin_context def _qos_specs_get_by_name(context, name, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' results = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(key='QoS_Specs_Name'). \ filter_by(value=name). \ options(joinedload('specs')).all() if not results: raise exception.QoSSpecsNotFound(specs_id=name) return results @require_admin_context def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' result = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(id=qos_specs_id). \ options(joinedload_all('specs')).all() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result def _dict_with_children_specs(specs): """Convert specs list to a dict.""" result = {} for spec in specs: # Skip deleted keys if not spec['deleted']: result.update({spec['key']: spec['value']}) return result def _dict_with_qos_specs(rows): """Convert qos specs query results to list. Qos specs query results are a list of quality_of_service_specs refs, some are root entry of a qos specs (key == 'QoS_Specs_Name') and the rest are children entry, a.k.a detailed specs for a qos specs. This function converts query results to a dict using spec name as key. """ result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': member = {} member['name'] = row['value'] member.update(dict(id=row['id'])) if row.specs: spec_dict = _dict_with_children_specs(row.specs) member.update(dict(consumer=spec_dict['consumer'])) del spec_dict['consumer'] member.update(dict(specs=spec_dict)) result.append(member) return result @require_admin_context def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Returns a list of all qos_specs. Results is like: [{ 'id': SPECS-UUID, 'name': 'qos_spec-1', 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, { 'id': SPECS-UUID, 'name': 'qos_spec-2', 'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, ] """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.QualityOfServiceSpecs) # No Qos specs would match, return empty list if query is None: return [] rows = query.all() return _dict_with_qos_specs(rows) @require_admin_context def _qos_specs_get_query(context, session): rows = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name') return rows def _process_qos_specs_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.QualityOfServiceSpecs, filters): return query = query.filter_by(**filters) return query @require_admin_context def _qos_specs_get(context, qos_spec_id, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_spec_id) return result @require_admin_context def qos_specs_get_by_name(context, name, inactive=False): rows = _qos_specs_get_by_name(context, name, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_associations_get(context, qos_specs_id): """Return all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_associations_get(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ # Raise QoSSpecsNotFound if no specs found _qos_specs_get_ref(context, qos_specs_id, None) return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context def qos_specs_associate(context, qos_specs_id, type_id): """Associate volume type from specified qos specs.""" return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from specified qos specs.""" return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_disassociate_all(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_disassociate_all(context, qos_specs_id) @require_admin_context def qos_specs_item_delete(context, qos_specs_id, key): session = get_session() with session.begin(): _qos_specs_get_item(context, qos_specs_id, key) session.query(models.QualityOfServiceSpecs). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): _qos_specs_get_ref(context, qos_specs_id, session) session.query(models.QualityOfServiceSpecs).\ filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id)).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def _qos_specs_get_item(context, qos_specs_id, key, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ first() if not result: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) return result @handle_db_data_error @require_admin_context def qos_specs_update(context, qos_specs_id, specs): """Make updates to an existing qos specs. Perform add, update or delete key/values to a qos specs. """ session = get_session() with session.begin(): # make sure qos specs exists _qos_specs_get_ref(context, qos_specs_id, session) spec_ref = None for key in specs.keys(): try: spec_ref = _qos_specs_get_item( context, qos_specs_id, key, session) except exception.QoSSpecsKeyNotFound: spec_ref = models.QualityOfServiceSpecs() id = None if spec_ref.get('id', None): id = spec_ref['id'] else: id = str(uuid.uuid4()) value = dict(id=id, key=key, value=specs[key], specs_id=qos_specs_id, deleted=False) LOG.debug('qos_specs_update() value: %s', value) spec_ref.update(value) spec_ref.save(session=session) return specs #################### @require_context def volume_type_encryption_get(context, volume_type_id, session=None): return model_query(context, models.Encryption, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id).first() @require_admin_context def volume_type_encryption_delete(context, volume_type_id): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) if not encryption: raise exception.VolumeTypeEncryptionNotFound( type_id=volume_type_id) encryption.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @handle_db_data_error @require_admin_context def volume_type_encryption_create(context, volume_type_id, values): session = get_session() with session.begin(): encryption = models.Encryption() if 'volume_type_id' not in values: values['volume_type_id'] = volume_type_id if 'encryption_id' not in values: values['encryption_id'] = six.text_type(uuid.uuid4()) encryption.update(values) session.add(encryption) return encryption @handle_db_data_error @require_admin_context def volume_type_encryption_update(context, volume_type_id, values): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) if not encryption: raise exception.VolumeTypeEncryptionNotFound( type_id=volume_type_id) encryption.update(values) return encryption def volume_type_encryption_volume_get(context, volume_type_id, session=None): volume_list = _volume_get_query(context, session=session, project_only=False).\ filter_by(volume_type_id=volume_type_id).\ all() return volume_list #################### @require_context def volume_encryption_metadata_get(context, volume_id, session=None): """Return the encryption metadata for a given volume.""" volume_ref = _volume_get(context, volume_id) encryption_ref = volume_type_encryption_get(context, volume_ref['volume_type_id']) values = { 'encryption_key_id': volume_ref['encryption_key_id'], } if encryption_ref: for key in ['control_location', 'cipher', 'key_size', 'provider']: values[key] = encryption_ref[key] return values #################### @require_context def _volume_glance_metadata_get_all(context, session=None): query = model_query(context, models.VolumeGlanceMetadata, session=session) if is_user_context(context): query = query.filter( models.Volume.id == models.VolumeGlanceMetadata.volume_id, models.Volume.project_id == context.project_id) return query.all() @require_context def volume_glance_metadata_get_all(context): """Return the Glance metadata for all volumes.""" return _volume_glance_metadata_get_all(context) @require_context def volume_glance_metadata_list_get(context, volume_id_list): """Return the glance metadata for a volume list.""" query = model_query(context, models.VolumeGlanceMetadata, session=None) query = query.filter( models.VolumeGlanceMetadata.volume_id.in_(volume_id_list)) return query.all() @require_context @require_volume_exists def _volume_glance_metadata_get(context, volume_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=volume_id) return rows @require_context @require_volume_exists def volume_glance_metadata_get(context, volume_id): """Return the Glance metadata for the specified volume.""" return _volume_glance_metadata_get(context, volume_id) @require_context @require_snapshot_exists def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=snapshot_id) return rows @require_context @require_snapshot_exists def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return _volume_snapshot_glance_metadata_get(context, snapshot_id) @require_context @require_volume_exists def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for a volume by adding a new key:value pair. This API does not support changing the value of a key once it has been created. """ session = get_session() with session.begin(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) return @require_context @require_volume_exists def volume_glance_metadata_bulk_create(context, volume_id, metadata): """Update the Glance metadata for a volume by adding new key:value pairs. This API does not support changing the value of a key once it has been created. """ session = get_session() with session.begin(): for (key, value) in metadata.items(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) @require_context @require_snapshot_exists def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.snapshot_id = snapshot_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): """Update the Glance metadata for a volume. This copies all all of the key:value pairs from the originating volume, to ensure that a volume created from the volume (clone) will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, src_volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update Glance metadata from a volume. Update the Glance metadata from a volume (created from a snapshot) by copying all of the key:value pairs from the originating snapshot. This is so that the Glance metadata from the original volume is retained. """ session = get_session() with session.begin(): metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_delete_by_volume(context, volume_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def backup_get(context, backup_id, read_deleted=None, project_only=True): return _backup_get(context, backup_id, read_deleted=read_deleted, project_only=project_only) def _backup_get(context, backup_id, session=None, read_deleted=None, project_only=True): result = model_query(context, models.Backup, session=session, project_only=project_only, read_deleted=read_deleted).\ filter_by(id=backup_id).\ first() if not result: raise exception.BackupNotFound(backup_id=backup_id) return result def _backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.Backup, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Backup) if query is None: return [] return query.all() def _backups_get_query(context, session=None, project_only=False): return model_query(context, models.Backup, session=session, project_only=project_only) def _process_backups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Backup, filters): return query = query.filter_by(**filters) return query @require_admin_context def backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context def backup_get_all_by_host(context, host): return model_query(context, models.Backup).filter_by(host=host).all() @require_context def backup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def backup_get_all_by_volume(context, volume_id, filters=None): authorize_project_context(context, volume_id) if not filters: filters = {} else: filters = filters.copy() filters['volume_id'] = volume_id return _backup_get_all(context, filters) @handle_db_data_error @require_context def backup_create(context, values): backup = models.Backup() if not values.get('id'): values['id'] = str(uuid.uuid4()) backup.update(values) session = get_session() with session.begin(): backup.save(session) return backup @handle_db_data_error @require_context def backup_update(context, backup_id, values): session = get_session() with session.begin(): backup = model_query(context, models.Backup, session=session, read_deleted="yes").\ filter_by(id=backup_id).first() if not backup: raise exception.BackupNotFound( _("No backup with id %s") % backup_id) backup.update(values) return backup @require_admin_context def backup_destroy(context, backup_id): model_query(context, models.Backup).\ filter_by(id=backup_id).\ update({'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def _transfer_get(context, transfer_id, session=None): query = model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id) if not is_admin_context(context): volume = models.Volume query = query.filter(models.Transfer.volume_id == volume.id, volume.project_id == context.project_id) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context def transfer_get(context, transfer_id): return _transfer_get(context, transfer_id) def _translate_transfers(transfers): results = [] for transfer in transfers: r = {} r['id'] = transfer['id'] r['volume_id'] = transfer['volume_id'] r['display_name'] = transfer['display_name'] r['created_at'] = transfer['created_at'] r['deleted'] = transfer['deleted'] results.append(r) return results @require_admin_context def transfer_get_all(context): results = model_query(context, models.Transfer).all() return _translate_transfers(results) @require_context def transfer_get_all_by_project(context, project_id): authorize_project_context(context, project_id) query = model_query(context, models.Transfer).\ filter(models.Volume.id == models.Transfer.volume_id, models.Volume.project_id == project_id) results = query.all() return _translate_transfers(results) @require_context def transfer_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): volume_ref = _volume_get(context, values['volume_id'], session=session) if volume_ref['status'] != 'available': msg = _('Volume must be available') LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume_ref['status'] = 'awaiting-transfer' transfer = models.Transfer() transfer.update(values) session.add(transfer) volume_ref.update(volume_ref) return transfer @require_context @_retry_on_deadlock def transfer_destroy(context, transfer_id): session = get_session() with session.begin(): transfer_ref = _transfer_get(context, transfer_id, session=session) volume_ref = _volume_get(context, transfer_ref['volume_id'], session=session) # If the volume state is not 'awaiting-transfer' don't change it, but # we can still mark the transfer record as deleted. if volume_ref['status'] != 'awaiting-transfer': LOG.error(_LE('Volume in unexpected state %s, expected ' 'awaiting-transfer'), volume_ref['status']) else: volume_ref['status'] = 'available' volume_ref.update(volume_ref) volume_ref.save(session=session) model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def transfer_accept(context, transfer_id, user_id, project_id): session = get_session() with session.begin(): transfer_ref = _transfer_get(context, transfer_id, session) volume_id = transfer_ref['volume_id'] volume_ref = _volume_get(context, volume_id, session=session) if volume_ref['status'] != 'awaiting-transfer': msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in ' 'unexpected state %(status)s, expected ' 'awaiting-transfer') % {'transfer_id': transfer_id, 'volume_id': volume_ref['id'], 'status': volume_ref['status']} LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume_ref['status'] = 'available' volume_ref['user_id'] = user_id volume_ref['project_id'] = project_id volume_ref['updated_at'] = literal_column('updated_at') volume_ref.update(volume_ref) session.query(models.Transfer).\ filter_by(id=transfer_ref['id']).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_admin_context def _consistencygroup_data_get_for_project(context, project_id, session=None): query = model_query(context, func.count(models.ConsistencyGroup.id), read_deleted="no", session=session).\ filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _consistencygroup_get(context, consistencygroup_id, session=None): result = model_query(context, models.ConsistencyGroup, session=session, project_only=True).\ filter_by(id=consistencygroup_id).\ first() if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id) return result @require_context def consistencygroup_get(context, consistencygroup_id): return _consistencygroup_get(context, consistencygroup_id) def _consistencygroups_get_query(context, session=None, project_only=False): return model_query(context, models.ConsistencyGroup, session=session, project_only=project_only) def _process_consistencygroups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.ConsistencyGroup, filters): return query = query.filter_by(**filters) return query def _consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.ConsistencyGroup, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.ConsistencyGroup) if query is None: return [] return query.all() @require_admin_context def consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all consistency groups. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_consistencygroups_filters function for more information :returns: list of matching consistency groups """ return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def consistencygroup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all consistency groups in a project. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_consistencygroups_filters function for more information :returns: list of matching consistency groups """ authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def consistencygroup_create(context, values): consistencygroup = models.ConsistencyGroup() if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): consistencygroup.update(values) session.add(consistencygroup) return _consistencygroup_get(context, values['id'], session=session) @handle_db_data_error @require_context def consistencygroup_update(context, consistencygroup_id, values): session = get_session() with session.begin(): result = model_query(context, models.ConsistencyGroup, project_only=True).\ filter_by(id=consistencygroup_id).\ first() if not result: raise exception.ConsistencyGroupNotFound( _("No consistency group with id %s") % consistencygroup_id) result.update(values) result.save(session=session) return result @require_admin_context def consistencygroup_destroy(context, consistencygroup_id): session = get_session() with session.begin(): model_query(context, models.ConsistencyGroup, session=session).\ filter_by(id=consistencygroup_id).\ update({'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def _cgsnapshot_get(context, cgsnapshot_id, session=None): result = model_query(context, models.Cgsnapshot, session=session, project_only=True).\ filter_by(id=cgsnapshot_id).\ first() if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) return result @require_context def cgsnapshot_get(context, cgsnapshot_id): return _cgsnapshot_get(context, cgsnapshot_id) def is_valid_model_filters(model, filters): """Return True if filter values exist on the model :param model: a Cinder model :param filters: dictionary of filters """ for key in filters.keys(): try: getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None): query = model_query(context, models.Cgsnapshot) if filters: if not is_valid_model_filters(models.Cgsnapshot, filters): return [] query = query.filter_by(**filters) if project_id: query = query.filter_by(project_id=project_id) if group_id: query = query.filter_by(consistencygroup_id=group_id) return query.all() @require_admin_context def cgsnapshot_get_all(context, filters=None): return _cgsnapshot_get_all(context, filters=filters) @require_admin_context def cgsnapshot_get_all_by_group(context, group_id, filters=None): return _cgsnapshot_get_all(context, group_id=group_id, filters=filters) @require_context def cgsnapshot_get_all_by_project(context, project_id, filters=None): authorize_project_context(context, project_id) return _cgsnapshot_get_all(context, project_id=project_id, filters=filters) @handle_db_data_error @require_context def cgsnapshot_create(context, values): cgsnapshot = models.Cgsnapshot() if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): cgsnapshot.update(values) session.add(cgsnapshot) return _cgsnapshot_get(context, values['id'], session=session) @handle_db_data_error @require_context def cgsnapshot_update(context, cgsnapshot_id, values): session = get_session() with session.begin(): result = model_query(context, models.Cgsnapshot, project_only=True).\ filter_by(id=cgsnapshot_id).\ first() if not result: raise exception.CgSnapshotNotFound( _("No cgsnapshot with id %s") % cgsnapshot_id) result.update(values) result.save(session=session) return result @require_admin_context def cgsnapshot_destroy(context, cgsnapshot_id): session = get_session() with session.begin(): model_query(context, models.Cgsnapshot, session=session).\ filter_by(id=cgsnapshot_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than age from cinder tables.""" try: age_in_days = int(age_in_days) except ValueError: msg = _('Invalid value for age, %(age)s') % {'age': age_in_days} LOG.exception(msg) raise exception.InvalidParameterValue(msg) if age_in_days <= 0: msg = _('Must supply a positive value for age') LOG.error(msg) raise exception.InvalidParameterValue(msg) engine = get_engine() session = get_session() metadata = MetaData() metadata.bind = engine tables = [] for model_class in models.__dict__.values(): if hasattr(model_class, "__tablename__") \ and hasattr(model_class, "deleted"): tables.append(model_class.__tablename__) # Reorder the list so the volumes table is last to avoid FK constraints tables.remove("volumes") tables.append("volumes") for table in tables: t = Table(table, metadata, autoload=True) LOG.info(_LI('Purging deleted rows older than age=%(age)d days ' 'from table=%(table)s'), {'age': age_in_days, 'table': table}) deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) try: with session.begin(): result = session.execute( t.delete() .where(t.c.deleted_at < deleted_age)) except db_exc.DBReferenceError: LOG.exception(_LE('DBError detected when purging from ' 'table=%(table)s'), {'table': table}) raise rows_purged = result.rowcount LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"), {'row': rows_purged, 'table': table}) ############################### @require_context def driver_initiator_data_update(context, initiator, namespace, updates): session = get_session() with session.begin(): set_values = updates.get('set_values', {}) for key, value in set_values.items(): data = session.query(models.DriverInitiatorData).\ filter_by(initiator=initiator).\ filter_by(namespace=namespace).\ filter_by(key=key).\ first() if data: data.update({'value': value}) data.save(session=session) else: data = models.DriverInitiatorData() data.initiator = initiator data.namespace = namespace data.key = key data.value = value session.add(data) remove_values = updates.get('remove_values', []) for key in remove_values: session.query(models.DriverInitiatorData).\ filter_by(initiator=initiator).\ filter_by(namespace=namespace).\ filter_by(key=key).\ delete() @require_context def driver_initiator_data_get(context, initiator, namespace): session = get_session() with session.begin(): return session.query(models.DriverInitiatorData).\ filter_by(initiator=initiator).\ filter_by(namespace=namespace).\ all() ############################### PAGINATION_HELPERS = { models.Volume: (_volume_get_query, _process_volume_filters, _volume_get), models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get), models.Backup: (_backups_get_query, _process_backups_filters, _backup_get), models.QualityOfServiceSpecs: (_qos_specs_get_query, _process_qos_specs_filters, _qos_specs_get), models.VolumeTypes: (_volume_type_get_query, _process_volume_types_filters, _volume_type_get_db_object), models.ConsistencyGroup: (_consistencygroups_get_query, _process_consistencygroups_filters, _consistencygroup_get) } ############################### @require_context def image_volume_cache_create(context, host, image_id, image_updated_at, volume_id, size): session = get_session() with session.begin(): cache_entry = models.ImageVolumeCacheEntry() cache_entry.host = host cache_entry.image_id = image_id cache_entry.image_updated_at = image_updated_at cache_entry.volume_id = volume_id cache_entry.size = size session.add(cache_entry) return cache_entry @require_context def image_volume_cache_delete(context, volume_id): session = get_session() with session.begin(): session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ delete() @require_context def image_volume_cache_get_and_update_last_used(context, image_id, host): session = get_session() with session.begin(): entry = session.query(models.ImageVolumeCacheEntry).\ filter_by(image_id=image_id).\ filter_by(host=host).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ first() if entry: entry.last_used = timeutils.utcnow() entry.save(session=session) return entry @require_context def image_volume_cache_get_by_volume_id(context, volume_id): session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ first() @require_context def image_volume_cache_get_all_for_host(context, host): session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(host=host).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ all() ############################### def get_model_for_versioned_object(versioned_object): # Exceptions to model mapping, in general Versioned Objects have the same # name as their ORM models counterparts, but there are some that diverge VO_TO_MODEL_EXCEPTIONS = { 'BackupImport': models.Backup, 'VolumeType': models.VolumeTypes, 'CGSnapshot': models.Cgsnapshot, } model_name = versioned_object.obj_name() return (VO_TO_MODEL_EXCEPTIONS.get(model_name) or getattr(models, model_name)) def _get_get_method(model): # Exceptions to model to get methods, in general method names are a simple # conversion changing ORM name from camel case to snake format and adding # _get to the string GET_EXCEPTIONS = { models.ConsistencyGroup: consistencygroup_get, models.VolumeTypes: _volume_type_get_full, } if model in GET_EXCEPTIONS: return GET_EXCEPTIONS[model] # General conversion # Convert camel cased model name to snake format s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) # Get method must be snake formatted model name concatenated with _get method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' return globals().get(method_name) _GET_METHODS = {} @require_context def get_by_id(context, model, id, *args, **kwargs): # Add get method to cache dictionary if it's not already there if not _GET_METHODS.get(model): _GET_METHODS[model] = _get_get_method(model) return _GET_METHODS[model](context, id, *args, **kwargs) def condition_db_filter(model, field, value): """Create matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. """ orm_field = getattr(model, field) # For values that must match and are iterables we use IN if (isinstance(value, collections.Iterable) and not isinstance(value, six.string_types)): # We cannot use in_ when one of the values is None if None not in value: return orm_field.in_(value) return or_(orm_field == v for v in value) # For values that must match and are not iterables we use == return orm_field == value def condition_not_db_filter(model, field, value, auto_none=True): """Create non matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. If auto_none is True then we'll consider NULL values as different as well, like we do in Python and not like SQL does. """ result = ~condition_db_filter(model, field, value) if (auto_none and ((isinstance(value, collections.Iterable) and not isinstance(value, six.string_types) and None not in value) or (value is not None))): orm_field = getattr(model, field) result = or_(result, orm_field.is_(None)) return result def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, sqlalchemy.sql.expression.ColumnElement)) @_retry_on_deadlock @require_context def conditional_update(context, model, values, expected_values, filters=(), include_deleted='no', project_only=False): """Compare-and-swap conditional update SQLAlchemy implementation.""" # Provided filters will become part of the where clause where_conds = list(filters) # Build where conditions with operators ==, !=, NOT IN and IN for field, condition in expected_values.items(): if not isinstance(condition, db.Condition): condition = db.Condition(condition, field) where_conds.append(condition.get_filter(model, field)) # Transform case values values = {field: case(value.whens, value.value, value.else_) if isinstance(value, db.Case) else value for field, value in values.items()} query = model_query(context, model, read_deleted=include_deleted, project_only=project_only) # Return True if we were able to change any DB entry, False otherwise result = query.filter(*where_conds).update(values, synchronize_session=False) return 0 != result cinder-8.0.0/cinder/db/base.py0000664000567000056710000000277012701406250017244 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='cinder.db', help='Driver to use for database access') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): # NOTE(mriedem): Without this call, multiple inheritance involving # the db Base class does not work correctly. super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 self.db.dispose_engine() cinder-8.0.0/cinder/db/migration.py0000664000567000056710000000445312701406250020323 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" import os import threading from oslo_config import cfg from oslo_db import options from stevedore import driver from cinder.db.sqlalchemy import api as db_api from cinder import exception from cinder.i18n import _ INIT_VERSION = 000 _IMPL = None _LOCK = threading.Lock() options.set_defaults(cfg.CONF) MIGRATE_REPO_PATH = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'sqlalchemy', 'migrate_repo', ) def get_backend(): global _IMPL if _IMPL is None: with _LOCK: if _IMPL is None: _IMPL = driver.DriverManager( "cinder.database.migration_backend", cfg.CONF.database.backend).driver return _IMPL def db_sync(version=None, init_version=INIT_VERSION, engine=None): """Migrate the database to `version` or the most recent version.""" if engine is None: engine = db_api.get_engine() current_db_version = get_backend().db_version(engine, MIGRATE_REPO_PATH, init_version) # TODO(e0ne): drop version validation when new oslo.db will be released if version and int(version) < current_db_version: msg = _('Database schema downgrade is not allowed.') raise exception.InvalidInput(reason=msg) return get_backend().db_sync(engine=engine, abs_path=MIGRATE_REPO_PATH, version=version, init_version=init_version) cinder-8.0.0/cinder/db/api.py0000664000567000056710000012711512701406250017104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. Functions in this module are imported into the cinder.db namespace. Call these functions from cinder.db namespace, not the cinder.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from oslo_config import cfg from oslo_db import concurrency as db_concurrency from oslo_db import options as db_options from cinder.api import common from cinder.common import constants from cinder.i18n import _ db_opts = [ cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('volume_name_template', default='volume-%s', help='Template string to be used to generate volume names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), cfg.StrOpt('backup_name_template', default='backup-%s', help='Template string to be used to generate backup names'), ] CONF = cfg.CONF CONF.register_opts(db_opts) db_options.set_defaults(CONF) CONF.set_default('sqlite_db', 'cinder.sqlite', group='database') _BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'} IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING) # The maximum value a signed INT type may have MAX_INT = constants.DB_MAX_INT ################### def dispose_engine(): """Force the engine to establish new connections.""" # FIXME(jdg): When using sqlite if we do the dispose # we seem to lose our DB here. Adding this check # means we don't do the dispose, but we keep our sqlite DB # This likely isn't the best way to handle this if 'sqlite' not in IMPL.get_engine().name: return IMPL.dispose_engine() else: return ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, filters=None): """Get all services.""" return IMPL.service_get_all(context, filters) def service_get_all_by_topic(context, topic, disabled=None): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic, disabled=disabled) def service_get_all_by_binary(context, binary, disabled=None): """Get all services for a given binary.""" return IMPL.service_get_all_by_binary(context, binary, disabled) def service_get_by_args(context, host, binary): """Get the state of a service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ############### def volume_attach(context, values): """Attach a volume.""" return IMPL.volume_attach(context, values) def volume_attached(context, volume_id, instance_id, host_name, mountpoint, attach_mode='rw'): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, host_name, mountpoint, attach_mode) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_host(context, host, count_only=False): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_host(context, host, count_only) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id, attachment_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id, attachment_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volumes.""" return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def volume_get_all_by_host(context, host, filters=None): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host, filters=filters) def volume_get_all_by_group(context, group_id, filters=None): """Get all volumes belonging to a consistency group.""" return IMPL.volume_get_all_by_group(context, group_id, filters=filters) def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def volume_update(context, volume_id, values): """Set the given properties on a volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) def volume_attachment_update(context, attachment_id, values): return IMPL.volume_attachment_update(context, attachment_id, values) def volume_attachment_get(context, attachment_id, session=None): return IMPL.volume_attachment_get(context, attachment_id, session) def volume_attachment_get_used_by_volume_id(context, volume_id): return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id) def volume_attachment_get_by_host(context, volume_id, host): return IMPL.volume_attachment_get_by_host(context, volume_id, host) def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid): return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid) def volume_update_status_based_on_attachment(context, volume_id): """Update volume status according to attached instance id""" return IMPL.volume_update_status_based_on_attachment(context, volume_id) def volume_has_snapshots_filter(): return IMPL.volume_has_snapshots_filter() def volume_has_undeletable_snapshots_filter(): return IMPL.volume_has_undeletable_snapshots_filter() def volume_has_attachments_filter(): return IMPL.volume_has_attachments_filter() #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshots.""" return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys, sort_dirs, offset) def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id, filters, marker, limit, sort_keys, sort_dirs, offset) def snapshot_get_by_host(context, host, filters=None): """Get all snapshots belonging to a host. :param host: Include include snapshots only for specified host. :param filters: Filters for the query in the form of key/value. """ return IMPL.snapshot_get_by_host(context, host, filters) def snapshot_get_all_for_cgsnapshot(context, project_id): """Get all snapshots belonging to a cgsnapshot.""" return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) def snapshot_data_get_for_project(context, project_id, volume_type_id=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.snapshot_data_get_for_project(context, project_id, volume_type_id) def snapshot_get_active_by_window(context, begin, end=None, project_id=None): """Get all the snapshots inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.snapshot_get_active_by_window(context, begin, end, project_id) #################### def snapshot_metadata_get(context, snapshot_id): """Get all metadata for a snapshot.""" return IMPL.snapshot_metadata_get(context, snapshot_id) def snapshot_metadata_delete(context, snapshot_id, key): """Delete the given metadata item.""" return IMPL.snapshot_metadata_delete(context, snapshot_id, key) def snapshot_metadata_update(context, snapshot_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.snapshot_metadata_update(context, snapshot_id, metadata, delete) #################### def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key, meta_type=common.METADATA_TYPES.user): """Delete the given metadata item.""" return IMPL.volume_metadata_delete(context, volume_id, key, meta_type) def volume_metadata_update(context, volume_id, metadata, delete, meta_type=common.METADATA_TYPES.user): """Update metadata if it exists, otherwise create it.""" return IMPL.volume_metadata_update(context, volume_id, metadata, delete, meta_type) ################## def volume_admin_metadata_get(context, volume_id): """Get all administration metadata for a volume.""" return IMPL.volume_admin_metadata_get(context, volume_id) def volume_admin_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" return IMPL.volume_admin_metadata_delete(context, volume_id, key) def volume_admin_metadata_update(context, volume_id, metadata, delete, add=True, update=True): """Update metadata if it exists, otherwise create it.""" return IMPL.volume_admin_metadata_update(context, volume_id, metadata, delete, add, update) ################## def volume_type_create(context, values, projects=None): """Create a new volume type.""" return IMPL.volume_type_create(context, values, projects) def volume_type_update(context, volume_type_id, values): return IMPL.volume_type_update(context, volume_type_id, values) def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Get all volume types. :param context: context to query under :param inactive: Include inactive volume types to the result set :param filters: Filters for the query in the form of key/value. :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param list_result: For compatibility, if list_result = True, return a list instead of dict. :is_public: Filter volume types based on visibility: * **True**: List public volume types only * **False**: List private volume types only * **None**: List both public and private volume types :returns: list/dict of matching volume types """ return IMPL.volume_type_get_all(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) def volume_type_get(context, id, inactive=False, expected_fields=None): """Get volume type by id. :param context: context to query under :param id: Volume type id to get. :param inactive: Consider inactive volume types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: volume type """ return IMPL.volume_type_get(context, id, inactive, expected_fields) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_types_get_by_name_or_id(context, volume_type_list): """Get volume types by name or id.""" return IMPL.volume_types_get_by_name_or_id(context, volume_type_list) def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): """Get volume types that are associated with specific qos specs.""" return IMPL.volume_type_qos_associations_get(context, qos_specs_id, inactive) def volume_type_qos_associate(context, type_id, qos_specs_id): """Associate a volume type with specific qos specs.""" return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id) def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate a volume type from specific qos specs.""" return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id) def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types from specific qos specs.""" return IMPL.volume_type_qos_disassociate_all(context, qos_specs_id) def volume_type_qos_specs_get(context, type_id): """Get all qos specs for given volume type.""" return IMPL.volume_type_qos_specs_get(context, type_id) def volume_type_destroy(context, id): """Delete a volume type.""" return IMPL.volume_type_destroy(context, id) def volume_get_active_by_window(context, begin, end=None, project_id=None): """Get all the volumes inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.volume_get_active_by_window(context, begin, end, project_id) def volume_type_access_get_all(context, type_id): """Get all volume type access of a volume type.""" return IMPL.volume_type_access_get_all(context, type_id) def volume_type_access_add(context, type_id, project_id): """Add volume type access for project.""" return IMPL.volume_type_access_add(context, type_id, project_id) def volume_type_access_remove(context, type_id, project_id): """Remove volume type access for project.""" return IMPL.volume_type_access_remove(context, type_id, project_id) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument. """ return IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) ################### def volume_type_encryption_get(context, volume_type_id, session=None): return IMPL.volume_type_encryption_get(context, volume_type_id, session) def volume_type_encryption_delete(context, volume_type_id): return IMPL.volume_type_encryption_delete(context, volume_type_id) def volume_type_encryption_create(context, volume_type_id, encryption_specs): return IMPL.volume_type_encryption_create(context, volume_type_id, encryption_specs) def volume_type_encryption_update(context, volume_type_id, encryption_specs): return IMPL.volume_type_encryption_update(context, volume_type_id, encryption_specs) def volume_type_encryption_volume_get(context, volume_type_id, session=None): return IMPL.volume_type_encryption_volume_get(context, volume_type_id, session) def volume_encryption_metadata_get(context, volume_id, session=None): return IMPL.volume_encryption_metadata_get(context, volume_id, session) ################### def qos_specs_create(context, values): """Create a qos_specs.""" return IMPL.qos_specs_create(context, values) def qos_specs_get(context, qos_specs_id): """Get all specification for a given qos_specs.""" return IMPL.qos_specs_get(context, qos_specs_id) def qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all qos_specs.""" return IMPL.qos_specs_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def qos_specs_get_by_name(context, name): """Get all specification for a given qos_specs.""" return IMPL.qos_specs_get_by_name(context, name) def qos_specs_associations_get(context, qos_specs_id): """Get all associated volume types for a given qos_specs.""" return IMPL.qos_specs_associations_get(context, qos_specs_id) def qos_specs_associate(context, qos_specs_id, type_id): """Associate qos_specs from volume type.""" return IMPL.qos_specs_associate(context, qos_specs_id, type_id) def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate qos_specs from volume type.""" return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id) def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate qos_specs from all entities.""" return IMPL.qos_specs_disassociate_all(context, qos_specs_id) def qos_specs_delete(context, qos_specs_id): """Delete the qos_specs.""" return IMPL.qos_specs_delete(context, qos_specs_id) def qos_specs_item_delete(context, qos_specs_id, key): """Delete specified key in the qos_specs.""" return IMPL.qos_specs_item_delete(context, qos_specs_id, key) def qos_specs_update(context, qos_specs_id, specs): """Update qos specs. This adds or modifies the key/value pairs specified in the specs dict argument for a given qos_specs. """ return IMPL.qos_specs_update(context, qos_specs_id, specs) ################### def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for the specified volume.""" return IMPL.volume_glance_metadata_create(context, volume_id, key, value) def volume_glance_metadata_bulk_create(context, volume_id, metadata): """Add Glance metadata for specified volume (multiple pairs).""" return IMPL.volume_glance_metadata_bulk_create(context, volume_id, metadata) def volume_glance_metadata_get_all(context): """Return the glance metadata for all volumes.""" return IMPL.volume_glance_metadata_get_all(context) def volume_glance_metadata_get(context, volume_id): """Return the glance metadata for a volume.""" return IMPL.volume_glance_metadata_get(context, volume_id) def volume_glance_metadata_list_get(context, volume_id_list): """Return the glance metadata for a volume list.""" return IMPL.volume_glance_metadata_list_get(context, volume_id_list) def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This will copy all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id) def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update the Glance metadata from a volume (created from a snapshot). This will copy all of the key:value pairs from the originating snapshot, to ensure that the Glance metadata from the original volume is retained. """ return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id) def volume_glance_metadata_delete_by_volume(context, volume_id): """Delete the glance metadata for a volume.""" return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): """Delete the glance metadata for a snapshot.""" return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): """Update the Glance metadata for a volume. Update the Glance metadata for a volume by copying all of the key:value pairs from the originating volume. This is so that a volume created from the volume (clone) will retain the original metadata. """ return IMPL.volume_glance_metadata_copy_from_volume_to_volume( context, src_volume_id, volume_id) ################### def quota_create(context, project_id, resource, limit, allocated=0): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit, allocated=allocated) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_allocated_get_all_by_project(context, project_id): """Retrieve all allocated quotas associated with a given project.""" return IMPL.quota_allocated_get_all_by_project(context, project_id) def quota_allocated_update(context, project_id, resource, allocated): """Update allocated quota to subprojects or raise if it does not exist. :raises: cinder.exception.ProjectQuotaNotFound """ return IMPL.quota_allocated_update(context, project_id, resource, allocated) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_update_resource(context, old_res, new_res): """Update resource of quotas.""" return IMPL.quota_update_resource(context, old_res, new_res) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_default(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_default(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) def quota_class_update_resource(context, resource, new_resource): """Update resource name in quota_class.""" return IMPL.quota_class_update_resource(context, resource, new_resource) def quota_class_destroy(context, class_name, resource): """Destroy the quota class or raise if it does not exist.""" return IMPL.quota_class_destroy(context, class_name, resource) def quota_class_destroy_all_by_name(context, class_name): """Destroy all quotas associated with a given quota class.""" return IMPL.quota_class_destroy_all_by_name(context, class_name) ################### def quota_usage_get(context, project_id, resource): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) ################### def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None, is_allocated_reserve=False): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=project_id, is_allocated_reserve=is_allocated_reserve) def reservation_commit(context, reservations, project_id=None): """Commit quota reservations.""" return IMPL.reservation_commit(context, reservations, project_id=project_id) def reservation_rollback(context, reservations, project_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id) def quota_destroy_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) def quota_usage_update_resource(context, old_res, new_res): """Update resource field in quota_usages.""" return IMPL.quota_usage_update_resource(context, old_res, new_res) ################### def backup_get(context, backup_id, read_deleted=None, project_only=True): """Get a backup or raise if it does not exist.""" return IMPL.backup_get(context, backup_id, read_deleted, project_only) def backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all backups.""" return IMPL.backup_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def backup_get_all_by_host(context, host): """Get all backups belonging to a host.""" return IMPL.backup_get_all_by_host(context, host) def backup_create(context, values): """Create a backup from the values dictionary.""" return IMPL.backup_create(context, values) def backup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all backups belonging to a project.""" return IMPL.backup_get_all_by_project(context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def backup_get_all_by_volume(context, volume_id, filters=None): """Get all backups belonging to a volume.""" return IMPL.backup_get_all_by_volume(context, volume_id, filters=filters) def backup_update(context, backup_id, values): """Set the given properties on a backup and update it. Raises NotFound if backup does not exist. """ return IMPL.backup_update(context, backup_id, values) def backup_destroy(context, backup_id): """Destroy the backup or raise if it does not exist.""" return IMPL.backup_destroy(context, backup_id) ################### def transfer_get(context, transfer_id): """Get a volume transfer record or raise if it does not exist.""" return IMPL.transfer_get(context, transfer_id) def transfer_get_all(context): """Get all volume transfer records.""" return IMPL.transfer_get_all(context) def transfer_get_all_by_project(context, project_id): """Get all volume transfer records for specified project.""" return IMPL.transfer_get_all_by_project(context, project_id) def transfer_create(context, values): """Create an entry in the transfers table.""" return IMPL.transfer_create(context, values) def transfer_destroy(context, transfer_id): """Destroy a record in the volume transfer table.""" return IMPL.transfer_destroy(context, transfer_id) def transfer_accept(context, transfer_id, user_id, project_id): """Accept a volume transfer.""" return IMPL.transfer_accept(context, transfer_id, user_id, project_id) ################### def consistencygroup_get(context, consistencygroup_id): """Get a consistencygroup or raise if it does not exist.""" return IMPL.consistencygroup_get(context, consistencygroup_id) def consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all consistencygroups.""" return IMPL.consistencygroup_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def consistencygroup_create(context, values): """Create a consistencygroup from the values dictionary.""" return IMPL.consistencygroup_create(context, values) def consistencygroup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all consistencygroups belonging to a project.""" return IMPL.consistencygroup_get_all_by_project(context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def consistencygroup_update(context, consistencygroup_id, values): """Set the given properties on a consistencygroup and update it. Raises NotFound if consistencygroup does not exist. """ return IMPL.consistencygroup_update(context, consistencygroup_id, values) def consistencygroup_destroy(context, consistencygroup_id): """Destroy the consistencygroup or raise if it does not exist.""" return IMPL.consistencygroup_destroy(context, consistencygroup_id) ################### def cgsnapshot_get(context, cgsnapshot_id): """Get a cgsnapshot or raise if it does not exist.""" return IMPL.cgsnapshot_get(context, cgsnapshot_id) def cgsnapshot_get_all(context, filters=None): """Get all cgsnapshots.""" return IMPL.cgsnapshot_get_all(context, filters) def cgsnapshot_create(context, values): """Create a cgsnapshot from the values dictionary.""" return IMPL.cgsnapshot_create(context, values) def cgsnapshot_get_all_by_group(context, group_id, filters=None): """Get all cgsnapshots belonging to a consistency group.""" return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters) def cgsnapshot_get_all_by_project(context, project_id, filters=None): """Get all cgsnapshots belonging to a project.""" return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters) def cgsnapshot_update(context, cgsnapshot_id, values): """Set the given properties on a cgsnapshot and update it. Raises NotFound if cgsnapshot does not exist. """ return IMPL.cgsnapshot_update(context, cgsnapshot_id, values) def cgsnapshot_destroy(context, cgsnapshot_id): """Destroy the cgsnapshot or raise if it does not exist.""" return IMPL.cgsnapshot_destroy(context, cgsnapshot_id) def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than given age from cinder tables Raises InvalidParameterValue if age_in_days is incorrect. :returns: number of deleted rows """ return IMPL.purge_deleted_rows(context, age_in_days=age_in_days) def get_booleans_for_table(table_name): return IMPL.get_booleans_for_table(table_name) ################### def driver_initiator_data_update(context, initiator, namespace, updates): """Create DriverPrivateData from the values dictionary.""" return IMPL.driver_initiator_data_update(context, initiator, namespace, updates) def driver_initiator_data_get(context, initiator, namespace): """Query for an DriverPrivateData that has the specified key""" return IMPL.driver_initiator_data_get(context, initiator, namespace) ################### def image_volume_cache_create(context, host, image_id, image_updated_at, volume_id, size): """Create a new image volume cache entry.""" return IMPL.image_volume_cache_create(context, host, image_id, image_updated_at, volume_id, size) def image_volume_cache_delete(context, volume_id): """Delete an image volume cache entry specified by volume id.""" return IMPL.image_volume_cache_delete(context, volume_id) def image_volume_cache_get_and_update_last_used(context, image_id, host): """Query for an image volume cache entry.""" return IMPL.image_volume_cache_get_and_update_last_used(context, image_id, host) def image_volume_cache_get_by_volume_id(context, volume_id): """Query to see if a volume id is an image-volume contained in the cache""" return IMPL.image_volume_cache_get_by_volume_id(context, volume_id) def image_volume_cache_get_all_for_host(context, host): """Query for all image volume cache entry for a host.""" return IMPL.image_volume_cache_get_all_for_host(context, host) ################### def get_model_for_versioned_object(versioned_object): return IMPL.get_model_for_versioned_object(versioned_object) def get_by_id(context, model, id, *args, **kwargs): return IMPL.get_by_id(context, model, id, *args, **kwargs) class Condition(object): """Class for normal condition values for conditional_update.""" def __init__(self, value, field=None): self.value = value # Field is optional and can be passed when getting the filter self.field = field def get_filter(self, model, field=None): return IMPL.condition_db_filter(model, self._get_field(field), self.value) def _get_field(self, field=None): # We must have a defined field on initialization or when called field = field or self.field if not field: raise ValueError(_('Condition has no field.')) return field class Not(Condition): """Class for negated condition values for conditional_update. By default NULL values will be treated like Python treats None instead of how SQL treats it. So for example when values are (1, 2) it will evaluate to True when we have value 3 or NULL, instead of only with 3 like SQL does. """ def __init__(self, value, field=None, auto_none=True): super(Not, self).__init__(value, field) self.auto_none = auto_none def get_filter(self, model, field=None): # If implementation has a specific method use it if hasattr(IMPL, 'condition_not_db_filter'): return IMPL.condition_not_db_filter(model, self._get_field(field), self.value, self.auto_none) # Otherwise non negated object must adming ~ operator for not return ~super(Not, self).get_filter(model, field) class Case(object): """Class for conditional value selection for conditional_update.""" def __init__(self, whens, value=None, else_=None): self.whens = whens self.value = value self.else_ = else_ def is_orm_value(obj): """Check if object is an ORM field.""" return IMPL.is_orm_value(obj) def conditional_update(context, model, values, expected_values, filters=(), include_deleted='no', project_only=False): """Compare-and-swap conditional update. Update will only occur in the DB if conditions are met. We have 4 different condition types we can use in expected_values: - Equality: {'status': 'available'} - Inequality: {'status': vol_obj.Not('deleting')} - In range: {'status': ['available', 'error'] - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) Method accepts additional filters, which are basically anything that can be passed to a sqlalchemy query's filter method, for example: [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] We can select values based on conditions using Case objects in the 'values' argument. For example: has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) case_values = db.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') db.conditional_update(context, models.Volume, {'status': case_values}, {'status': 'available'}) And we can use DB fields for example to store previous status in the corresponding field even though we don't know which value is in the db from those we allowed: db.conditional_update(context, models.Volume, {'status': 'deleting', 'previous_status': models.Volume.status}, {'status': ('available', 'error')}) WARNING: SQLAlchemy does not allow selecting order of SET clauses, so for now we cannot do things like {'previous_status': model.status, 'status': 'retyping'} because it will result in both previous_status and status being set to 'retyping'. Issue has been reported [1] and a patch to fix it [2] has been submitted. [1]: https://bitbucket.org/zzzeek/sqlalchemy/issues/3541/ [2]: https://github.com/zzzeek/sqlalchemy/pull/200 :param values: Dictionary of key-values to update in the DB. :param expected_values: Dictionary of conditions that must be met for the update to be executed. :param filters: Iterable with additional filters :param include_deleted: Should the update include deleted items, this is equivalent to read_deleted :param project_only: Should the query be limited to context's project. :returns number of db rows that were updated """ return IMPL.conditional_update(context, model, values, expected_values, filters, include_deleted, project_only) cinder-8.0.0/cinder/api/0000775000567000056710000000000012701406543016143 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/schemas/0000775000567000056710000000000012701406543017566 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/schemas/atom-link.rng0000664000567000056710000000700112701406250022162 0ustar jenkinsjenkins00000000000000 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* xml:base xml:lang cinder-8.0.0/cinder/api/schemas/v1.1/0000775000567000056710000000000012701406543020253 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/schemas/v1.1/qos_specs.rng0000664000567000056710000000023012701406250022750 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/qos_spec.rng0000664000567000056710000000070112701406250022570 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/metadata.rng0000664000567000056710000000043512701406250022540 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/qos_associations.rng0000664000567000056710000000024612701406250024341 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/limits.rng0000664000567000056710000000172312701406250022262 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/qos_association.rng0000664000567000056710000000050012701406250024147 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/extension.rng0000664000567000056710000000072112701406250022772 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/schemas/v1.1/extensions.rng0000664000567000056710000000032212701406250023152 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder/api/versions.py0000664000567000056710000002152412701406250020364 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from lxml import etree from oslo_config import cfg from cinder.api import extensions from cinder.api import openstack from cinder.api.openstack import api_version_request from cinder.api.openstack import wsgi from cinder.api.views import versions as views_versions from cinder.api import xmlutil CONF = cfg.CONF _LINKS = [{ "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/", }] _MEDIA_TYPES = [{ "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1", }, {"base": "application/xml", "type": "application/vnd.openstack.volume+xml;version=1", }, ] _KNOWN_VERSIONS = { "v1.0": { "id": "v1.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2014-06-28T12:20:21Z", "links": _LINKS, "media-types": _MEDIA_TYPES, }, "v2.0": { "id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2014-06-28T12:20:21Z", "links": _LINKS, "media-types": _MEDIA_TYPES, }, "v3.0": { "id": "v3.0", "status": "CURRENT", "version": api_version_request._MAX_API_VERSION, "min_version": api_version_request._MIN_API_VERSION, "updated": "2016-02-08T12:20:21Z", "links": _LINKS, "media-types": _MEDIA_TYPES, }, } class Versions(openstack.APIRouter): """Route versions requests.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = create_resource() mapper.connect('versions', '/', controller=self.resources['versions'], action='all') mapper.redirect('', '/') class VersionsController(wsgi.Controller): def __init__(self): super(VersionsController, self).__init__(None) @wsgi.Controller.api_version('1.0') def index(self, req): # pylint: disable=E0102 """Return versions supported prior to the microversions epoch.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v2.0') known_versions.pop('v3.0') return builder.build_versions(known_versions) @wsgi.Controller.api_version('2.0') # noqa def index(self, req): # pylint: disable=E0102 """Return versions supported prior to the microversions epoch.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v1.0') known_versions.pop('v3.0') return builder.build_versions(known_versions) @wsgi.Controller.api_version('3.0') # noqa def index(self, req): # pylint: disable=E0102 """Return versions supported after the start of microversions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v1.0') known_versions.pop('v2.0') return builder.build_versions(known_versions) # NOTE (cknight): Calling the versions API without # /v1, /v2, or /v3 in the URL will lead to this unversioned # method, which should always return info about all # available versions. @wsgi.response(300) def all(self, req): """Return all known versions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) return builder.build_versions(known_versions) class MediaTypesTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return 'media-types' in datum def make_version(elem): elem.set('id') elem.set('status') elem.set('updated') mts = MediaTypesTemplateElement('media-types') elem.append(mts) mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') mt.set('base') mt.set('type') xmlutil.make_links(elem, 'links') version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class VersionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('version', selector='version') make_version(root) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class VersionsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('versions') elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class ChoicesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('choices') elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class AtomSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_ATOM} def __init__(self, metadata=None, xmlns=None): self.metadata = metadata or {} if not xmlns: self.xmlns = wsgi.XML_NS_ATOM else: self.xmlns = xmlns def _get_most_recent_update(self, versions): recent = None for version in versions: updated = datetime.datetime.strptime(version['updated'], '%Y-%m-%dT%H:%M:%SZ') if not recent: recent = updated elif updated > recent: recent = updated return recent.strftime('%Y-%m-%dT%H:%M:%SZ') def _get_base_url(self, link_href): # Make sure no trailing / link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' def _create_feed(self, versions, feed_title, feed_id): feed = etree.Element('feed', nsmap=self.NSMAP) title = etree.SubElement(feed, 'title') title.set('type', 'text') title.text = feed_title # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) etree.SubElement(feed, 'updated').text = recent etree.SubElement(feed, 'id').text = feed_id link = etree.SubElement(feed, 'link') link.set('rel', 'self') link.set('href', feed_id) author = etree.SubElement(feed, 'author') etree.SubElement(author, 'name').text = 'Rackspace' etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' for version in versions: feed.append(self._create_version_entry(version)) return feed def _create_version_entry(self, version): entry = etree.Element('entry') etree.SubElement(entry, 'id').text = version['links'][0]['href'] title = etree.SubElement(entry, 'title') title.set('type', 'text') title.text = 'Version %s' % version['id'] etree.SubElement(entry, 'updated').text = version['updated'] for link in version['links']: link_elem = etree.SubElement(entry, 'link') link_elem.set('rel', link['rel']) link_elem.set('href', link['href']) if 'type' in link: link_elem.set('type', link['type']) content = etree.SubElement(entry, 'content') content.set('type', 'text') content.text = 'Version %s %s (%s)' % (version['id'], version['status'], version['updated']) return entry class VersionsAtomSerializer(AtomSerializer): def default(self, data): versions = data['versions'] feed_id = self._get_base_url(versions[0]['links'][0]['href']) feed = self._create_feed(versions, 'Available API Versions', feed_id) return self._to_xml(feed) class VersionAtomSerializer(AtomSerializer): def default(self, data): version = data['version'] feed_id = version['links'][0]['href'] feed = self._create_feed([version], 'About This Version', feed_id) return self._to_xml(feed) def create_resource(): return wsgi.Resource(VersionsController()) cinder-8.0.0/cinder/api/xmlutil.py0000664000567000056710000007334412701406250020221 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import re from lxml import etree import six from cinder.i18n import _ from cinder import utils XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' XMLNS_VOLUME_V1 = ('http://docs.openstack.org/api/openstack-block-storage/1.0/' 'content') XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-block-storage/2.0/' 'content') _split_pattern = re.compile(r'([^:{]*{[^}]*}[^:]*|[^:]+)') def validate_schema(xml, schema_name): if isinstance(xml, str): xml = etree.fromstring(xml) base_path = 'cinder/api/schemas/v1.1/' if schema_name in ('atom', 'atom-link'): base_path = 'cinder/api/schemas/' schema_path = os.path.join(utils.cinderdir(), '%s%s.rng' % (base_path, schema_name)) schema_doc = etree.parse(schema_path) relaxng = etree.RelaxNG(schema_doc) relaxng.assertValid(xml) class Selector(object): """Selects datum to operate on from an object.""" def __init__(self, *chain): """Initialize the selector. Each argument is a subsequent index into the object. """ self.chain = chain def __repr__(self): """Return a representation of the selector.""" return "Selector" + repr(self.chain) def __call__(self, obj, do_raise=False): """Select a datum to operate on. Selects the relevant datum within the object. :param obj: The object from which to select the object. :param do_raise: If False (the default), return None if the indexed datum does not exist. Otherwise, raise a KeyError. """ # Walk the selector list for elem in self.chain: # If it's callable, call it if callable(elem): obj = elem(obj) else: # Use indexing try: obj = obj[elem] except (KeyError, IndexError): # No sense going any further if do_raise: # Convert to a KeyError, for consistency raise KeyError(elem) return None # Return the finally-selected object return obj def get_items(obj): """Get items in obj.""" return list(obj.items()) class EmptyStringSelector(Selector): """Returns the empty string if Selector would return None.""" def __call__(self, obj, do_raise=False): """Returns empty string if the selected value does not exist.""" try: return super(EmptyStringSelector, self).__call__(obj, True) except KeyError: return "" class ConstantSelector(object): """Returns a constant.""" def __init__(self, value): """Initialize the selector. :param value: The value to return. """ self.value = value def __repr__(self): """Return a representation of the selector.""" return repr(self.value) def __call__(self, _obj, _do_raise=False): """Select a datum to operate on. Returns a constant value. Compatible with Selector.__call__(). """ return self.value class TemplateElement(object): """Represent an element in the template.""" def __init__(self, tag, attrib=None, selector=None, subselector=None, **extra): """Initialize an element. Initializes an element in the template. Keyword arguments specify attributes to be set on the element; values must be callables. See TemplateElement.set() for more information. :param tag: The name of the tag to create. :param attrib: An optional dictionary of element attributes. :param selector: An optional callable taking an object and optional boolean do_raise indicator and returning the object bound to the element. :param subselector: An optional callable taking an object and optional boolean do_raise indicator and returning the object bound to the element. This is used to further refine the datum object returned by selector in the event that it is a list of objects. """ # Convert selector into a Selector if selector is None: selector = Selector() elif not callable(selector): selector = Selector(selector) # Convert subselector into a Selector if subselector is not None and not callable(subselector): subselector = Selector(subselector) self.tag = tag self.selector = selector self.subselector = subselector self.attrib = {} self._text = None self._children = [] self._childmap = {} # Run the incoming attributes through set() so that they # become selectorized if not attrib: attrib = {} attrib.update(extra) for k, v in attrib.items(): self.set(k, v) def __repr__(self): """Return a representation of the template element.""" return ('<%s.%s %r at %#x>' % (self.__class__.__module__, self.__class__.__name__, self.tag, id(self))) def __len__(self): """Return the number of child elements.""" return len(self._children) def __contains__(self, key): """Determine whether a child node named by key exists.""" return key in self._childmap def __getitem__(self, idx): """Retrieve a child node by index or name.""" if isinstance(idx, six.string_types): # Allow access by node name return self._childmap[idx] else: return self._children[idx] def append(self, elem): """Append a child to the element.""" # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap: raise KeyError(elem.tag) self._children.append(elem) self._childmap[elem.tag] = elem def extend(self, elems): """Append children to the element.""" # Pre-evaluate the elements elemmap = {} elemlist = [] for elem in elems: # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap or elem.tag in elemmap: raise KeyError(elem.tag) elemmap[elem.tag] = elem elemlist.append(elem) # Update the children self._children.extend(elemlist) self._childmap.update(elemmap) def insert(self, idx, elem): """Insert a child element at the given index.""" # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap: raise KeyError(elem.tag) self._children.insert(idx, elem) self._childmap[elem.tag] = elem def remove(self, elem): """Remove a child element.""" # Unwrap templates... elem = elem.unwrap() # Check if element exists if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: raise ValueError(_('element is not a child')) self._children.remove(elem) del self._childmap[elem.tag] def get(self, key): """Get an attribute. Returns a callable which performs datum selection. :param key: The name of the attribute to get. """ return self.attrib[key] def set(self, key, value=None): """Set an attribute. :param key: The name of the attribute to set. :param value: A callable taking an object and optional boolean do_raise indicator and returning the datum bound to the attribute. If None, a Selector() will be constructed from the key. If a string, a Selector() will be constructed from the string. """ # Convert value to a selector if value is None: value = Selector(key) elif not callable(value): value = Selector(value) self.attrib[key] = value def keys(self): """Return the attribute names.""" return self.attrib.keys() def items(self): """Return the attribute names and values.""" return self.attrib.items() def unwrap(self): """Unwraps a template to return a template element.""" # We are a template element return self def wrap(self): """Wraps a template element to return a template.""" # Wrap in a basic Template return Template(self) def apply(self, elem, obj): """Apply text and attributes to an etree.Element. Applies the text and attribute instructions in the template element to an etree.Element instance. :param elem: An etree.Element instance. :param obj: The base object associated with this template element. """ # Start with the text... if self.text is not None: elem.text = six.text_type(self.text(obj)) # Now set up all the attributes... for key, value in self.attrib.items(): try: elem.set(key, six.text_type(value(obj, True))) except KeyError: # Attribute has no value, so don't include it pass def getAttrib(self, obj): """Get attribute.""" tmpattrib = {} # Now set up all the attributes... for key, value in self.attrib.items(): try: tmpattrib[key] = value(obj) except KeyError: # Attribute has no value, so don't include it pass return tmpattrib @staticmethod def _splitTagName(name): return _split_pattern.findall(name) def _render(self, parent, datum, patches, nsmap): """Internal rendering. Renders the template node into an etree.Element object. Returns the etree.Element object. :param parent: The parent etree.Element instance. :param datum: The datum associated with this template element. :param patches: A list of other template elements that must also be applied. :param nsmap: An optional namespace dictionary to be associated with the etree.Element instance. """ # Allocate a node if callable(self.tag): tagname = self.tag(datum) else: tagname = self.tag # If the datum is None if datum is not None: tmpattrib = self.getAttrib(datum) else: tmpattrib = {} tagnameList = self._splitTagName(tagname) insertIndex = 0 # If parent is not none and has same tagname if parent is not None: for i in range(0, len(tagnameList)): tmpInsertPos = parent.find(tagnameList[i]) if tmpInsertPos is None: break elif parent.attrib != tmpattrib: break parent = tmpInsertPos insertIndex = i + 1 if insertIndex >= len(tagnameList): insertIndex = insertIndex - 1 # Create root elem elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap) rootelem = elem subelem = elem # Create subelem for i in range((insertIndex + 1), len(tagnameList)): subelem = etree.SubElement(elem, tagnameList[i]) elem = subelem # If we have a parent, append the node to the parent if parent is not None: # If we can merge this element, then insert if insertIndex > 0: parent.insert(len(list(parent)), rootelem) else: parent.append(rootelem) # If the datum is None, do nothing else if datum is None: return rootelem # Apply this template element to the element self.apply(subelem, datum) # Additionally, apply the patches for patch in patches: patch.apply(subelem, datum) # We have fully rendered the element; return it return rootelem def render(self, parent, obj, patches=None, nsmap=None): """Render an object. Renders an object against this template node. Returns a list of two-item tuples, where the first item is an etree.Element instance and the second item is the datum associated with that instance. :param parent: The parent for the etree.Element instances. :param obj: The object to render this template element against. :param patches: A list of other template elements to apply when rendering this template element. :param nsmap: An optional namespace dictionary to attach to the etree.Element instances. """ patches = patches or [] # First, get the datum we're rendering data = None if obj is None else self.selector(obj) # Check if we should render at all if not self.will_render(data): return [] elif data is None: return [(self._render(parent, None, patches, nsmap), None)] # Make the data into a list if it isn't already if not isinstance(data, list): data = [data] elif parent is None: raise ValueError(_('root element selecting a list')) # Render all the elements elems = [] for datum in data: if self.subselector is not None: datum = self.subselector(datum) elems.append((self._render(parent, datum, patches, nsmap), datum)) # Return all the elements rendered, as well as the # corresponding datum for the next step down the tree return elems def will_render(self, datum): """Hook method. An overridable hook method to determine whether this template element will be rendered at all. By default, returns False (inhibiting rendering) if the datum is None. :param datum: The datum associated with this template element. """ # Don't render if datum is None return datum is not None def _text_get(self): """Template element text. Either None or a callable taking an object and optional boolean do_raise indicator and returning the datum bound to the text of the template element. """ return self._text def _text_set(self, value): # Convert value to a selector if value is not None and not callable(value): value = Selector(value) self._text = value def _text_del(self): self._text = None text = property(_text_get, _text_set, _text_del) def tree(self): """Return string representation of the template tree. Returns a representation of the template rooted at this element as a string, suitable for inclusion in debug logs. """ # Build the inner contents of the tag... contents = [self.tag, '!selector=%r' % self.selector] # Add the text... if self.text is not None: contents.append('!text=%r' % self.text) # Add all the other attributes for key, value in self.attrib.items(): contents.append('%s=%r' % (key, value)) # If there are no children, return it as a closed tag if len(self) == 0: return '<%s/>' % ' '.join([str(i) for i in contents]) # OK, recurse to our children children = [c.tree() for c in self] # Return the result return ('<%s>%s' % (' '.join(contents), ''.join(children), self.tag)) def SubTemplateElement(parent, tag, attrib=None, selector=None, subselector=None, **extra): """Create a template element as a child of another. Corresponds to the etree.SubElement interface. Parameters are as for TemplateElement, with the addition of the parent. """ # Convert attributes attrib = attrib or {} attrib.update(extra) # Get a TemplateElement elem = TemplateElement(tag, attrib=attrib, selector=selector, subselector=subselector) # Append the parent safely if parent is not None: parent.append(elem) return elem class Template(object): """Represent a template.""" def __init__(self, root, nsmap=None): """Initialize a template. :param root: The root element of the template. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ self.root = root.unwrap() if root is not None else None self.nsmap = nsmap or {} self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) def _serialize(self, parent, obj, siblings, nsmap=None): """Internal serialization. Recursive routine to build a tree of etree.Element instances from an object based on the template. Returns the first etree.Element instance rendered, or None. :param parent: The parent etree.Element instance. Can be None. :param obj: The object to render. :param siblings: The TemplateElement instances against which to render the object. :param nsmap: An optional namespace dictionary to be associated with the etree.Element instance rendered. """ # First step, render the element elems = siblings[0].render(parent, obj, siblings[1:], nsmap) # Now, traverse all child elements seen = set() for idx, sibling in enumerate(siblings): for child in sibling: # Have we handled this child already? if child.tag in seen: continue seen.add(child.tag) # Determine the child's siblings nieces = [child] for sib in siblings[idx + 1:]: if child.tag in sib: nieces.append(sib[child.tag]) # Now call this function for all data elements recursively for elem, datum in elems: self._serialize(elem, datum, nieces) # Return the first element; at the top level, this will be the # root element if elems: return elems[0][0] def serialize(self, obj, *args, **kwargs): """Serialize an object. Serializes an object against the template. Returns a string with the serialized XML. Positional and keyword arguments are passed to etree.tostring(). :param obj: The object to serialize. """ elem = self.make_tree(obj) if elem is None: return '' for k, v in self.serialize_options.items(): kwargs.setdefault(k, v) # Serialize it into XML return etree.tostring(elem, *args, **kwargs) def make_tree(self, obj): """Create a tree. Serializes an object against the template. Returns an Element node with appropriate children. :param obj: The object to serialize. """ # If the template is empty, return the empty string if self.root is None: return None # Get the siblings and nsmap of the root element siblings = self._siblings() nsmap = self._nsmap() # Form the element tree return self._serialize(None, obj, siblings, nsmap) def _siblings(self): """Hook method for computing root siblings. An overridable hook method to return the siblings of the root element. By default, this is the root element itself. """ return [self.root] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. """ return self.nsmap.copy() def unwrap(self): """Unwraps a template to return a template element.""" # Return the root element return self.root def wrap(self): """Wraps a template element to return a template.""" # We are a template return self def apply(self, master): """Hook method for determining slave applicability. An overridable hook method used to determine if this template is applicable as a slave to a given master template. :param master: The master template to test. """ return True def tree(self): """Return string representation of the template tree. Returns a representation of the template as a string, suitable for inclusion in debug logs. """ return "%r: %s" % (self, self.root.tree()) class MasterTemplate(Template): """Represent a master template. Master templates are versioned derivatives of templates that additionally allow slave templates to be attached. Slave templates allow modification of the serialized result without directly changing the master. """ def __init__(self, root, version, nsmap=None): """Initialize a master template. :param root: The root element of the template. :param version: The version number of the template. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ super(MasterTemplate, self).__init__(root, nsmap) self.version = version self.slaves = [] def __repr__(self): """Return string representation of the template.""" return ("<%s.%s object version %s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.version, id(self))) def _siblings(self): """Hook method for computing root siblings. An overridable hook method to return the siblings of the root element. This is the root element plus the root elements of all the slave templates. """ return [self.root] + [slave.root for slave in self.slaves] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. The namespace dictionary is computed by taking the master template's namespace dictionary and updating it from all the slave templates. """ nsmap = self.nsmap.copy() for slave in self.slaves: nsmap.update(slave._nsmap()) return nsmap def attach(self, *slaves): """Attach one or more slave templates. Attaches one or more slave templates to the master template. Slave templates must have a root element with the same tag as the master template. The slave template's apply() method will be called to determine if the slave should be applied to this master; if it returns False, that slave will be skipped. (This allows filtering of slaves based on the version of the master template.) """ slave_list = [] for slave in slaves: slave = slave.wrap() # Make sure we have a tree match if slave.root.tag != self.root.tag: msg = (_("Template tree mismatch; adding slave %(slavetag)s " "to master %(mastertag)s") % {'slavetag': slave.root.tag, 'mastertag': self.root.tag}) raise ValueError(msg) # Make sure slave applies to this template if not slave.apply(self): continue slave_list.append(slave) # Add the slaves self.slaves.extend(slave_list) def copy(self): """Return a copy of this master template.""" # Return a copy of the MasterTemplate tmp = self.__class__(self.root, self.version, self.nsmap) tmp.slaves = self.slaves[:] return tmp class SlaveTemplate(Template): """Represent a slave template. Slave templates are versioned derivatives of templates. Each slave has a minimum version and optional maximum version of the master template to which they can be attached. """ def __init__(self, root, min_vers, max_vers=None, nsmap=None): """Initialize a slave template. :param root: The root element of the template. :param min_vers: The minimum permissible version of the master template for this slave template to apply. :param max_vers: An optional upper bound for the master template version. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ super(SlaveTemplate, self).__init__(root, nsmap) self.min_vers = min_vers self.max_vers = max_vers def __repr__(self): """Return string representation of the template.""" return ("<%s.%s object versions %s-%s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self))) def apply(self, master): """Hook method for determining slave applicability. An overridable hook method used to determine if this template is applicable as a slave to a given master template. This version requires the master template to have a version number between min_vers and max_vers. :param master: The master template to test. """ # Does the master meet our minimum version requirement? if master.version < self.min_vers: return False # How about our maximum version requirement? if self.max_vers is not None and master.version > self.max_vers: return False return True class TemplateBuilder(object): """Template builder. This class exists to allow templates to be lazily built without having to build them each time they are needed. It must be subclassed, and the subclass must implement the construct() method, which must return a Template (or subclass) instance. The constructor will always return the template returned by construct(), or, if it has a copy() method, a copy of that template. """ _tmpl = None def __new__(cls, copy=True): """Construct and return a template. :param copy: If True (the default), a copy of the template will be constructed and returned, if possible. """ # Do we need to construct the template? if cls._tmpl is None: tmp = super(TemplateBuilder, cls).__new__(cls) # Construct the template cls._tmpl = tmp.construct() # If the template has a copy attribute, return the result of # calling it if copy and hasattr(cls._tmpl, 'copy'): return cls._tmpl.copy() # Return the template return cls._tmpl def construct(self): """Construct a template. Called to construct a template instance, which it must return. Only called once. """ raise NotImplementedError(_("subclasses must implement construct()!")) def make_links(parent, selector=None): """Attach an Atom element to the parent.""" elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, selector=selector) elem.set('rel') elem.set('type') elem.set('href') # Just for completeness... return elem def make_flat_dict(name, selector=None, subselector=None, ns=None): """Utility for simple XML templates. Simple templates are templates that traditionally used XMLDictSerializer with no metadata. Returns a template element where the top-level element has the given tag name, and where sub-elements have tag names derived from the object's keys and text derived from the object's values. This only works for flat dictionary objects, not dictionaries containing nested lists or dictionaries. """ # Set up the names we need... if ns is None: elemname = name tagname = Selector(0) else: elemname = '{%s}%s' % (ns, name) tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) if selector is None: selector = name # Build the root element root = TemplateElement(elemname, selector=selector, subselector=subselector) # Build an element to represent all the keys and values elem = SubTemplateElement(root, tagname, selector=get_items) elem.text = 1 # Return the template return root cinder-8.0.0/cinder/api/__init__.py0000664000567000056710000000240612701406250020251 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import paste.urlmap from cinder.i18n import _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) def root_app_factory(loader, global_conf, **local_conf): if CONF.enable_v1_api: LOG.warning(_LW('The v1 api is deprecated and is not under active ' 'development. You should set enable_v1_api=false ' 'and enable_v3_api=true in your cinder.conf file.')) return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) cinder-8.0.0/cinder/api/urlmap.py0000664000567000056710000002473112701406250020017 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log as logging import paste.urlmap try: from urllib.request import parse_http_list # pylint: disable=E0611 except ImportError: from urllib2 import parse_http_list # Python 2 from cinder.api.openstack import wsgi _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) LOG = logging.getLogger(__name__) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse 'Content-Type'-like header into a tuple. Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) # Accept application/atom+xml for the index query of each API # version mount point as well as the root index if (app_url and app_url + '/' == path_info) or path_info == '/': supported_content_types.append('application/atom+xml') if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['cinder.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) cinder-8.0.0/cinder/api/common.py0000664000567000056710000004125012701406250020002 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import enum from oslo_config import cfg from oslo_log import log as logging from six.moves import urllib import webob from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder.common import constants from cinder import exception from cinder.i18n import _ import cinder.policy from cinder import utils api_common_opts = [ cfg.IntOpt('osapi_max_limit', default=1000, help='The maximum number of items that a collection ' 'resource returns in a single response'), cfg.StrOpt('osapi_volume_base_URL', help='Base URL that will be presented to users in links ' 'to the OpenStack Volume API', deprecated_name='osapi_compute_link_prefix'), cfg.ListOpt('query_volume_filters', default=['name', 'status', 'metadata', 'availability_zone', 'bootable'], help="Volume filter options which " "non-admin user could use to " "query volumes. Default values " "are: ['name', 'status', " "'metadata', 'availability_zone' ," "'bootable']") ] CONF = cfg.CONF CONF.register_opts(api_common_opts) LOG = logging.getLogger(__name__) XML_NS_V1 = 'http://docs.openstack.org/api/openstack-block-storage/1.0/content' XML_NS_V2 = 'http://docs.openstack.org/api/openstack-block-storage/2.0/content' METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image') # Regex that matches alphanumeric characters, periods, hyphens, # colons and underscores: # ^ assert position at start of the string # [\w\.\-\:\_] match expression # $ assert position at end of the string VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE) def validate_key_names(key_names_list): """Validate each item of the list to match key name regex.""" for key_name in key_names_list: if not VALID_KEY_NAME_REGEX.match(key_name): return False return True def validate_policy(context, action): try: cinder.policy.enforce_action(context, action) return True except exception.PolicyNotAuthorized: return False def get_pagination_params(params, max_limit=None): """Return marker, limit, offset tuple from request. :param params: `wsgi.Request`'s GET dictionary, possibly containing 'marker', 'limit', and 'offset' variables. 'marker' is the id of the last element the client has seen, 'limit' is the maximum number of items to return and 'offset' is the number of items to skip from the marker or from the first element. If 'limit' is not specified, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. If no offset is present we'll default to 0 and if no marker is present we'll default to None. :max_limit: Max value 'limit' return value can take :returns: Tuple (marker, limit, offset) """ max_limit = max_limit or CONF.osapi_max_limit limit = _get_limit_param(params, max_limit) marker = _get_marker_param(params) offset = _get_offset_param(params) return marker, limit, offset def _get_limit_param(params, max_limit=None): """Extract integer limit from request's dictionary or fail. Defaults to max_limit if not present and returns max_limit if present 'limit' is greater than max_limit. """ max_limit = max_limit or CONF.osapi_max_limit try: limit = int(params.pop('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(limit, max_limit) return limit def _get_marker_param(params): """Extract marker id from request's dictionary (defaults to None).""" return params.pop('marker', None) def _get_offset_param(params): """Extract offset id from request's dictionary (defaults to 0) or fail.""" offset = params.pop('offset', 0) return utils.validate_integer(offset, 'offset', 0, constants.DB_MAX_INT) def limited(items, request, max_limit=None): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. :kwarg max_limit: The maximum number of items to return from 'items' """ max_limit = max_limit or CONF.osapi_max_limit marker, limit, offset = get_pagination_params(request.GET.copy(), max_limit) range_end = offset + (limit or max_limit) return items[offset:range_end] def limited_by_marker(items, request, max_limit=None): """Return a slice of items according to the requested marker and limit.""" max_limit = max_limit or CONF.osapi_max_limit marker, limit, __ = get_pagination_params(request.GET.copy(), max_limit) start_index = 0 if marker: start_index = -1 for i, item in enumerate(items): if 'flavorid' in item: if item['flavorid'] == marker: start_index = i + 1 break elif item['id'] == marker or item.get('uuid') == marker: start_index = i + 1 break if start_index < 0: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) range_end = start_index + limit return items[start_index:range_end] def get_sort_params(params, default_key='created_at', default_dir='desc'): """Retrieves sort keys/directions parameters. Processes the parameters to create a list of sort keys and sort directions that correspond to either the 'sort' parameter or the 'sort_key' and 'sort_dir' parameter values. The value of the 'sort' parameter is a comma- separated list of sort keys, each key is optionally appended with ':'. Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo and an exception is raised if they are supplied with the 'sort' parameter. The sort parameters are removed from the request parameters by this function. :param params: webob.multidict of request parameters (from cinder.api.openstack.wsgi.Request.params) :param default_key: default sort key value, added to the list if no sort keys are supplied :param default_dir: default sort dir value, added to the list if the corresponding key does not have a direction specified :returns: list of sort keys, list of sort dirs :raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or 'sort_dir' are supplied parameters """ if 'sort' in params and ('sort_key' in params or 'sort_dir' in params): msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and " "cannot be used with the 'sort' parameter.") raise webob.exc.HTTPBadRequest(explanation=msg) sort_keys = [] sort_dirs = [] if 'sort' in params: for sort in params.pop('sort').strip().split(','): sort_key, _sep, sort_dir = sort.partition(':') if not sort_dir: sort_dir = default_dir sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) else: sort_key = params.pop('sort_key', default_key) sort_dir = params.pop('sort_dir', default_dir) sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) return sort_keys, sort_dirs def get_request_url(request): url = request.application_url headers = request.headers forwarded = headers.get('X-Forwarded-Host') if forwarded: url_parts = list(urllib.parse.urlsplit(url)) url_parts[1] = re.split(',\s?', forwarded)[-1] url = urllib.parse.urlunsplit(url_parts).rstrip('/') return url def remove_version_from_href(href): """Removes the first api version from the href. Given: 'http://www.cinder.com/v1.1/123' Returns: 'http://www.cinder.com/123' Given: 'http://www.cinder.com/v1.1' Returns: 'http://www.cinder.com' """ parsed_url = urllib.parse.urlsplit(href) url_parts = parsed_url.path.split('/', 2) # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') if expression.match(url_parts[1]): del url_parts[1] new_path = '/'.join(url_parts) if new_path == parsed_url.path: msg = 'href %s does not contain version' % href LOG.debug(msg) raise ValueError(msg) parsed_url = list(parsed_url) parsed_url[2] = new_path return urllib.parse.urlunsplit(parsed_url) class ViewBuilder(object): """Model API responses as dictionaries.""" _collection_name = None def _get_links(self, request, identifier): return [{"rel": "self", "href": self._get_href_link(request, identifier), }, {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request, identifier, collection_name): """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(get_request_url(request), CONF.osapi_volume_base_URL) url = os.path.join(prefix, request.environ["cinder.context"].project_id, collection_name) return "%s?%s" % (url, urllib.parse.urlencode(params)) def _get_href_link(self, request, identifier): """Return an href string pointing to this object.""" prefix = self._update_link_prefix(get_request_url(request), CONF.osapi_volume_base_URL) return os.path.join(prefix, request.environ["cinder.context"].project_id, self._collection_name, str(identifier)) def _get_bookmark_link(self, request, identifier): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(get_request_url(request)) base_url = self._update_link_prefix(base_url, CONF.osapi_volume_base_URL) return os.path.join(base_url, request.environ["cinder.context"].project_id, self._collection_name, str(identifier)) def _get_collection_links(self, request, items, collection_name, item_count=None, id_key="uuid"): """Retrieve 'next' link, if applicable. The next link is included if we are returning as many items as we can, given the restrictions of limit optional request parameter and osapi_max_limit configuration parameter as long as we are returning some elements. So we return next link if: 1) 'limit' param is specified and equal to the number of items. 2) 'limit' param is NOT specified and the number of items is equal to CONF.osapi_max_limit. :param request: API request :param items: List of collection items :param collection_name: Name of collection, used to generate the next link for a pagination query :param item_count: Length of the list of the original collection items :param id_key: Attribute key used to retrieve the unique ID, used to generate the next link marker for a pagination query :returns: links """ item_count = item_count or len(items) limit = _get_limit_param(request.GET.copy()) if len(items) and limit <= item_count: return self._generate_next_link(items, id_key, request, collection_name) return [] def _generate_next_link(self, items, id_key, request, collection_name): links = [] last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] else: last_item_id = last_item["id"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id, collection_name), }) return links def _update_link_prefix(self, orig_url, prefix): if not prefix: return orig_url url_parts = list(urllib.parse.urlsplit(orig_url)) prefix_parts = list(urllib.parse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] url_parts[2] = prefix_parts[2] + url_parts[2] return urllib.parse.urlunsplit(url_parts).rstrip('/') class MetadataDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): dom = utils.safe_minidom_parse_string(text) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): dom = utils.safe_minidom_parse_string(text) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} class MetadataXMLDeserializer(wsgi.XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request.""" if metadata_node is None: return {} metadata = {} for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata def _extract_metadata_container(self, datastring): dom = utils.safe_minidom_parse_string(datastring) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} def create(self, datastring): return self._extract_metadata_container(datastring) def update_all(self, datastring): return self._extract_metadata_container(datastring) def update(self, datastring): dom = utils.safe_minidom_parse_string(datastring) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} metadata_nsmap = {None: xmlutil.XMLNS_V11} class MetaItemTemplate(xmlutil.TemplateBuilder): def construct(self): sel = xmlutil.Selector('meta', xmlutil.get_items, 0) root = xmlutil.TemplateElement('meta', selector=sel) root.set('key', 0) root.text = 1 return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) class MetadataTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return True class MetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = MetadataTemplateElement('metadata', selector='metadata') elem = xmlutil.SubTemplateElement(root, 'meta', selector=xmlutil.get_items) elem.set('key', 0) elem.text = 1 return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) cinder-8.0.0/cinder/api/openstack/0000775000567000056710000000000012701406543020132 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/openstack/versioned_method.py0000664000567000056710000000317112701406250024037 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import utils class VersionedMethod(utils.ComparableMixin): def __init__(self, name, start_version, end_version, experimental, func): """Versioning information for a single method. Minimum and maximums are inclusive. :param name: Name of the method :param start_version: Minimum acceptable version :param end_version: Maximum acceptable_version :param func: Method to call """ self.name = name self.start_version = start_version self.end_version = end_version self.experimental = experimental self.func = func def __str__(self): args = { 'name': self.name, 'start': self.start_version, 'end': self.end_version } return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args) def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self.start_version cinder-8.0.0/cinder/api/openstack/__init__.py0000664000567000056710000001144312701406250022241 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ from oslo_log import log as logging from oslo_service import wsgi as base_wsgi import routes from cinder.api.openstack import wsgi from cinder.i18n import _, _LW LOG = logging.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url is "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) def connect(self, *args, **kwargs): # NOTE(inhye): Default the format part of a route to only accept json # and xml so it doesn't eat all characters after a '.' # in the url. kwargs.setdefault('requirements', {}) if not kwargs['requirements'].get('format'): kwargs['requirements']['format'] = 'json|xml' return routes.Mapper.connect(self, *args, **kwargs) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '{project_id}/' else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper, ext_mgr) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning(_LW('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource'), {'ext_name': extension.extension.name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': extension.extension.name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper, ext_mgr): raise NotImplementedError cinder-8.0.0/cinder/api/openstack/wsgi.py0000664000567000056710000016605112701406257021470 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import math import time from xml.dom import minidom from xml.parsers import expat from lxml import etree from oslo_log import log as logging from oslo_log import versionutils from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils import six import webob import webob.exc from cinder.api.openstack import api_version_request as api_version from cinder.api.openstack import versioned_method from cinder import exception from cinder import i18n from cinder.i18n import _, _LE, _LI from cinder import utils from cinder.wsgi import common as wsgi XML_NS_V1 = 'http://docs.openstack.org/api/openstack-block-storage/1.0/content' XML_NS_V2 = 'http://docs.openstack.org/api/openstack-block-storage/2.0/content' XML_NS_ATOM = 'http://www.w3.org/2005/Atom' XML_WARNING = False LOG = logging.getLogger(__name__) SUPPORTED_CONTENT_TYPES = ( 'application/json', 'application/vnd.openstack.volume+json', 'application/xml', 'application/vnd.openstack.volume+xml', ) _MEDIA_TYPE_MAP = { 'application/vnd.openstack.volume+json': 'json', 'application/json': 'json', 'application/vnd.openstack.volume+xml': 'xml', 'application/xml': 'xml', 'application/atom+xml': 'atom', } # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' # Name of header used by clients to request a specific version # of the REST API API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version' VOLUME_SERVICE = 'volume' class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) self._resource_cache = {} if not hasattr(self, 'api_version_request'): self.api_version_request = api_version.APIVersionRequest() def cache_resource(self, resource_to_cache, id_attribute='id', name=None): """Cache the given resource. Allow API methods to cache objects, such as results from a DB query, to be used by API extensions within the same API request. The resource_to_cache can be a list or an individual resource, but ultimately resources are cached individually using the given id_attribute. Different resources types might need to be cached during the same request, they can be cached using the name parameter. For example: Controller 1: request.cache_resource(db_volumes, 'volumes') request.cache_resource(db_volume_types, 'types') Controller 2: db_volumes = request.cached_resource('volumes') db_type_1 = request.cached_resource_by_id('1', 'types') If no name is given, a default name will be used for the resource. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ if not isinstance(resource_to_cache, list): resource_to_cache = [resource_to_cache] if not name: name = self.path cached_resources = self._resource_cache.setdefault(name, {}) for resource in resource_to_cache: cached_resources[resource[id_attribute]] = resource def cached_resource(self, name=None): """Get the cached resources cached under the given resource name. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. :returns: a dict of id_attribute to the resource from the cached resources, an empty map if an empty collection was cached, or None if nothing has been cached yet under this name """ if not name: name = self.path if name not in self._resource_cache: # Nothing has been cached for this key yet return None return self._resource_cache[name] def cached_resource_by_id(self, resource_id, name=None): """Get a resource by ID cached under the given resource name. Allow an API extension to get a previously stored object within the same API request. This is basically a convenience method to lookup by ID on the dictionary of all cached resources. Note that the object data will be slightly stale. :returns: the cached resource or None if the item is not in the cache """ resources = self.cached_resource(name) if not resources: # Nothing has been cached yet for this key yet return None return resources.get(resource_id) def cache_db_items(self, key, items, item_key='id'): """Get cached database items. Allow API methods to store objects from a DB query to be used by API extensions within the same API request. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ self.cache_resource(items, item_key, key) def get_db_items(self, key): """Get database items. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. """ return self.cached_resource(key) def get_db_item(self, key, item_key): """Get database item. Allow an API extension to get a previously stored object within the same API request. Note that the object data will be slightly stale. """ return self.get_db_items(key).get(item_key) def cache_db_volumes(self, volumes): # NOTE(mgagne) Cache it twice for backward compatibility reasons self.cache_db_items('volumes', volumes, 'id') self.cache_db_items(self.path, volumes, 'id') def cache_db_volume(self, volume): # NOTE(mgagne) Cache it twice for backward compatibility reasons self.cache_db_items('volumes', [volume], 'id') self.cache_db_items(self.path, [volume], 'id') def get_db_volumes(self): return (self.get_db_items('volumes') or self.get_db_items(self.path)) def get_db_volume(self, volume_id): return (self.get_db_item('volumes', volume_id) or self.get_db_item(self.path, volume_id)) def cache_db_volume_types(self, volume_types): self.cache_db_items('volume_types', volume_types, 'id') def cache_db_volume_type(self, volume_type): self.cache_db_items('volume_types', [volume_type], 'id') def get_db_volume_types(self): return self.get_db_items('volume_types') def get_db_volume_type(self, volume_type_id): return self.get_db_item('volume_types', volume_type_id) def cache_db_snapshots(self, snapshots): self.cache_db_items('snapshots', snapshots, 'id') def cache_db_snapshot(self, snapshot): self.cache_db_items('snapshots', [snapshot], 'id') def get_db_snapshots(self): return self.get_db_items('snapshots') def get_db_snapshot(self, snapshot_id): return self.get_db_item('snapshots', snapshot_id) def cache_db_backups(self, backups): self.cache_db_items('backups', backups, 'id') def cache_db_backup(self, backup): self.cache_db_items('backups', [backup], 'id') def get_db_backups(self): return self.get_db_items('backups') def get_db_backup(self, backup_id): return self.get_db_item('backups', backup_id) def best_match_content_type(self): """Determine the requested response content-type.""" if 'cinder.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['cinder.best_content_type'] = (content_type or 'application/json') return self.environ['cinder.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None all_languages = i18n.get_available_languages() return self.accept_language.best_match(all_languages) def set_api_version_request(self, url): """Set API version request based on the request header information. Microversions starts with /v3, so if a client sends a request for version 1.0 or 2.0 with the /v3 endpoint, throw an exception. Sending a header with any microversion to a /v1 or /v2 endpoint will be ignored. Note that a microversion must be set for the legacy endpoints. This will appear as 1.0 and 2.0 for /v1 and /v2. """ if API_VERSION_REQUEST_HEADER in self.headers and 'v3' in url: hdr_string = self.headers[API_VERSION_REQUEST_HEADER] # 'latest' is a special keyword which is equivalent to requesting # the maximum version of the API supported hdr_string_list = hdr_string.split(",") volume_version = None for hdr in hdr_string_list: if VOLUME_SERVICE in hdr: service, volume_version = hdr.split() break if not volume_version: raise exception.VersionNotFoundForAPIMethod( version=volume_version) if volume_version == 'latest': self.api_version_request = api_version.max_api_version() else: self.api_version_request = api_version.APIVersionRequest( volume_version) # Check that the version requested is within the global # minimum/maximum of supported API versions if not self.api_version_request.matches( api_version.min_api_version(), api_version.max_api_version()): raise exception.InvalidGlobalAPIVersion( req_ver=self.api_version_request.get_string(), min_ver=api_version.min_api_version().get_string(), max_ver=api_version.max_api_version().get_string()) else: if 'v1' in url: self.api_version_request = api_version.legacy_api_version1() elif 'v2' in url: self.api_version_request = api_version.legacy_api_version2() else: self.api_version_request = api_version.APIVersionRequest( api_version._MIN_API_VERSION) class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, six.text_type(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """Initialize XMLDeserializer. :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = utils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named_in_namespace(self, parent, namespace, name): """Search a nodes children for the first child with a given name.""" for node in parent.childNodes: if (node.localName == name and node.namespaceURI and node.namespaceURI == namespace): return node return None def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name.""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name.""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node.""" text = [] # Cannot assume entire text will be in a single child node because SAX # parsers may split contiguous character data into multiple chunks for child in node.childNodes: if child.nodeType == child.TEXT_NODE: text.append(child.nodeValue) return ''.join(text) def default(self, datastring): return {'body': self._from_xml(datastring)} class MetadataXMLDeserializer(XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request.""" metadata = {} if metadata_node is not None: for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return jsonutils.dump_as_bytes(data) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """Initialize XMLDictSerializer. :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = list(data.keys())[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toxml('UTF-8') # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) # TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) # TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in sorted(data.items()): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in sorted(data.items()): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes def _to_xml(self, root): """Convert the xml object to an xml string.""" return etree.tostring(root, encoding='UTF-8', xml_declaration=True) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, headers=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = 200 self._code = code self._headers = headers or {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = six.text_type(value) response.headers['Content-Type'] = six.text_type(content_type) if self.obj is not None: body = serializer.serialize(self.obj) if isinstance(body, six.text_type): body = body.encode('utf-8') response.body = body return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0] def action_peek_xml(body): """Determine action to invoke.""" dom = utils.safe_minidom_parse_string(body) action_node = dom.childNodes[0] return action_node.tagName class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.NotAuthorized): msg = six.text_type(ex_value) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=six.text_type(ex_value))) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE( 'Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value)) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value)) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller, action_peek=None, **deserializers): """Initialize Resource. :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(xml=XMLDeserializer, json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(xml=XMLDictSerializer, json=JSONDictSerializer) self.action_peek = dict(xml=action_peek_xml, json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): if len(request.body) == 0: LOG.debug("Empty body provided in request") return None, '' try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return None, '' if not content_type: LOG.debug("No Content-Type provided in request") return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = next(gen) except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except exception.VersionNotFoundForAPIMethod: # If an attached extension (@wsgi.extends) for the # method has no version match its not an error. We # just don't run the extends code continue except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info(_LI("%(method)s %(url)s"), {"method": request.method, "url": request.url}) if self.support_api_request_version: # Set the version of the API requested based on the header try: request.set_api_version_request(request.url) except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=six.text_type(e))) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=six.text_type(e))) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) content_type, body = self.get_body(request) accept = request.best_match_content_type() # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _is_legacy_endpoint(self, request): version_str = request.api_version_request.get_string() return '1.0' in version_str or '2.0' in version_str def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': six.text_type(body), 'meth': six.text_type(meth)} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': six.text_type(meth)}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('cinder.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if isinstance(action_result, dict) or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _LI("%(url)s returned with HTTP %(status)d") except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _LI("%(url)s returned a fault: %(e)s") LOG.info(msg, msg_dict) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings try: # python 2.x response.headers[hdr] = val.encode('utf-8') except Exception: # python 3.x response.headers[hdr] = six.text_type(val) if (not request.api_version_request.is_null() and not self._is_legacy_endpoint(request)): response.headers[API_VERSION_REQUEST_HEADER] = ( VOLUME_SERVICE + ' ' + request.api_version_request.get_string()) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError as e: with excutils.save_and_reraise_exception(e) as ctxt: if (not self.wsgi_actions or action not in ['action', 'create', 'delete', 'update']): LOG.exception(_LE('Get method error.')) else: ctxt.reraise = False else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s", body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" try: return method(req=request, **action_args) except exception.VersionNotFoundForAPIMethod: # We deliberately don't return any message information # about the exception to the user so it looks as if # the method is simply not implemented. return Fault(webob.exc.HTTPNotFound()) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] versioned_methods = None # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) if base.__name__ == "Controller": # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if VER_METHOD_ATTR in base.__dict__: versioned_methods = getattr(base, VER_METHOD_ATTR) delattr(base, VER_METHOD_ATTR) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) @six.add_metaclass(ControllerMetaclass) class Controller(object): """Default controller.""" _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None def __getattribute__(self, key): def version_select(*args, **kwargs): """Select and call the matching version of the specified method. Look for the method which matches the name supplied and version constraints and calls it with the supplied arguments. :returns: Returns the result of the method called :raises: VersionNotFoundForAPIMethod if there is no method which matches the name and version constraints """ # The first arg to all versioned methods is always the request # object. The version for the request is attached to the # request object if len(args) == 0: version_request = kwargs['req'].api_version_request else: version_request = args[0].api_version_request func_list = self.versioned_methods[key] for func in func_list: if version_request.matches_versioned_method(func): # Update the version_select wrapper function so # other decorator attributes like wsgi.response # are still respected. functools.update_wrapper(version_select, func.func) return func.func(self, *args, **kwargs) # No version match raise exception.VersionNotFoundForAPIMethod( version=version_request) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if (version_meth_dict and key in object.__getattribute__(self, VER_METHOD_ATTR)): return version_select return object.__getattribute__(self, key) # NOTE(cyeoh): This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. Add the decorator to any method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. # TODO(cyeoh): Add check to ensure that there are no overlapping # ranges of valid versions as that is ambiguous func_list.sort(reverse=True) return f return decorator @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): return False return True @staticmethod def assert_valid_body(body, entity_name): # NOTE: After v1 api is deprecated need to merge 'is_valid_body' and # 'assert_valid_body' in to one method. Right now it is not # possible to modify 'is_valid_body' to raise exception because # in case of V1 api when 'is_valid_body' return False, # 'HTTPUnprocessableEntity' exception is getting raised and in # V2 api 'HTTPBadRequest' exception is getting raised. if not Controller.is_valid_body(body, entity_name): raise webob.exc.HTTPBadRequest( explanation=_("Missing required element '%s' in " "request body.") % entity_name) @staticmethod def validate_name_and_description(body): name = body.get('name') if name is not None: if isinstance(name, six.string_types): body['name'] = name.strip() try: utils.check_string_length(body['name'], 'Name', min_length=0, max_length=255) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) description = body.get('description') if description is not None: try: utils.check_string_length(description, 'Description', min_length=0, max_length=255) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) @staticmethod def validate_string_length(value, entity_name, min_length=0, max_length=None, remove_whitespaces=False): """Check the length of specified string. :param value: the value of the string :param entity_name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string :param remove_whitespaces: True if trimming whitespaces is needed else False """ if isinstance(value, six.string_types) and remove_whitespaces: value = value.strip() try: utils.check_string_length(value, entity_name, min_length=min_length, max_length=max_length) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = {400: "badRequest", 401: "unauthorized", 403: "forbidden", 404: "itemNotFound", 405: "badMethod", 409: "conflictingRequest", 413: "overLimit", 415: "badMediaType", 501: "notImplemented", 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. locale = req.best_match_language() code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") explanation = self.wrapped_exc.explanation fault_data = { fault_name: { 'code': code, 'message': i18n.translate(explanation, locale)}} if code == 413: retry = self.wrapped_exc.headers.get('Retry-After', None) if retry: fault_data[fault_name]['retryAfter'] = retry if not req.api_version_request.is_null(): self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = ( req.api_version_request.get_string()) self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER # 'code' is an attribute on the fault tag itself metadata = {'attributes': {fault_name: 'code'}} xml_serializer = XMLDictSerializer(metadata, XML_NS_V2) content_type = req.best_match_content_type() serializer = { 'application/xml': xml_serializer, 'application/json': JSONDictSerializer(), }[content_type] if content_type == 'application/xml': global XML_WARNING if not XML_WARNING: msg = _('XML support has been deprecated and will be removed ' 'in the N release.') versionutils.report_deprecated_feature(LOG, msg) XML_WARNING = True body = serializer.serialize(fault_data) if isinstance(body, six.text_type): body = body.encode('utf-8') self.wrapped_exc.body = body self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('cinder.context') if context: headers['x-compute-request-id'] = context.request_id class OverLimitFault(webob.exc.HTTPException): """Rate-limited request response.""" def __init__(self, message, details, retry_time): """Initialize new `OverLimitFault` with relevant information.""" hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { "overLimitFault": { "code": self.wrapped_exc.status_int, "message": message, "details": details, }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """Serializes the wrapped exception conforming to our error format.""" content_type = request.best_match_content_type() metadata = {"attributes": {"overLimitFault": "code"}} def translate(msg): locale = request.best_match_language() return i18n.translate(msg, locale) self.content['overLimitFault']['message'] = \ translate(self.content['overLimitFault']['message']) self.content['overLimitFault']['details'] = \ translate(self.content['overLimitFault']['details']) xml_serializer = XMLDictSerializer(metadata, XML_NS_V2) serializer = { 'application/xml': xml_serializer, 'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content return self.wrapped_exc cinder-8.0.0/cinder/api/openstack/api_version_request.py0000664000567000056710000001421512701406257024577 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from cinder.api.openstack import versioned_method from cinder import exception from cinder.i18n import _ from cinder import utils # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """ REST API Version History: * 3.0 - Includes all V2 APIs and extensions. V1 API is still supported. * 3.0 - Versions API updated to reflect beginning of microversions epoch. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # the minimum version of the API supported. # Explicitly using /v1 or /v2 enpoints will still work _MIN_API_VERSION = "3.0" _MAX_API_VERSION = "3.0" _LEGACY_API_VERSION1 = "1.0" _LEGACY_API_VERSION2 = "2.0" # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) def legacy_api_version1(): return APIVersionRequest(_LEGACY_API_VERSION1) def legacy_api_version2(): return APIVersionRequest(_LEGACY_API_VERSION2) class APIVersionRequest(utils.ComparableMixin): """This class represents an API Version Request. This class includes convenience methods for manipulation and comparison of version numbers as needed to implement API microversions. """ def __init__(self, version_string=None, experimental=False): """Create an API version request object.""" self._ver_major = None self._ver_minor = None if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self._ver_major = int(match.group(1)) self._ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request Major: %(major)s, Minor: %(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) def is_null(self): return self._ver_major is None and self._ver_minor is None def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self._ver_major, self._ver_minor def matches_versioned_method(self, method): """Compares this version to that of a versioned method.""" if type(method) != versioned_method.VersionedMethod: msg = _('An API version request must be compared ' 'to a VersionedMethod object.') raise exception.InvalidParameterValue(err=msg) return self.matches(method.start_version, method.end_version, method.experimental) def matches(self, min_version, max_version, experimental=False): """Compares this version to the specified min/max range. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :param experimental: Whether to match experimental APIs. :returns: boolean """ if self.is_null(): raise ValueError if isinstance(min_version, str): min_version = APIVersionRequest(version_string=min_version) if isinstance(max_version, str): max_version = APIVersionRequest(version_string=max_version) if not min_version and not max_version: return True elif ((min_version and max_version) and max_version.is_null() and min_version.is_null()): return True elif not max_version or max_version.is_null(): return min_version <= self elif not min_version or min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Returns a string representation of this object. If this method is used to create an APIVersionRequest, the resulting object will be an equivalent request. """ if self.is_null(): raise ValueError return ("%(major)s.%(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) cinder-8.0.0/cinder/api/openstack/rest_api_version_history.rst0000664000567000056710000000174012701406257026024 0ustar jenkinsjenkins00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 3.0 --- The 3.0 Cinder API includes all v2 core APIs existing prior to the introduction of microversions. The /v3 URL is used to call 3.0 APIs. This it the initial version of the Cinder API which supports microversions. A user can specify a header in the API request:: OpenStack-API-Version: volume where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 3.0 was requested. The only API change in version 3.0 is versions, i.e. GET http://localhost:8786/, which now returns information about 3.0 and later versions and their respective /v3 endpoints. All other 3.0 APIs are functionally identical to version 2.0. cinder-8.0.0/cinder/api/contrib/0000775000567000056710000000000012701406543017603 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/contrib/snapshot_unmanage.py0000664000567000056710000000552212701406250023666 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import webob from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _LI from cinder import volume LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('snapshot', 'snapshot_unmanage') class SnapshotUnmanageController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SnapshotUnmanageController, self).__init__(*args, **kwargs) self.volume_api = volume.API() @wsgi.response(202) @wsgi.action('os-unmanage') def unmanage(self, req, id, body): """Stop managing a snapshot. This action is very much like a delete, except that a different method (unmanage) is called on the Cinder driver. This has the effect of removing the snapshot from Cinder management without actually removing the backend storage object associated with it. There are no required parameters. A Not Found error is returned if the specified snapshot does not exist. """ context = req.environ['cinder.context'] authorize(context) LOG.info(_LI("Unmanage snapshot with id: %s"), id, context=context) try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot, unmanage_only=True) except exception.SnapshotNotFound as ex: raise exc.HTTPNotFound(explanation=ex.msg) except exception.InvalidSnapshot as ex: raise exc.HTTPBadRequest(explanation=ex.msg) return webob.Response(status_int=202) class Snapshot_unmanage(extensions.ExtensionDescriptor): """Enable volume unmanage operation.""" name = "SnapshotUnmanage" alias = "os-snapshot-unmanage" namespace = ('http://docs.openstack.org/snapshot/ext/snapshot-unmanage' '/api/v1') updated = "2014-12-31T00:00:00+00:00" def get_controller_extensions(self): controller = SnapshotUnmanageController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] cinder-8.0.0/cinder/api/contrib/volume_transfer.py0000664000567000056710000002136212701406250023367 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import transfers as transfer_view from cinder.api import xmlutil from cinder import exception from cinder.i18n import _, _LI from cinder import transfer as transferAPI from cinder import utils LOG = logging.getLogger(__name__) def make_transfer(elem): elem.set('id') elem.set('volume_id') elem.set('created_at') elem.set('name') elem.set('auth_key') class TransferTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('transfer', selector='transfer') make_transfer(root) alias = Volume_transfer.alias namespace = Volume_transfer.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class TransfersTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('transfers') elem = xmlutil.SubTemplateElement(root, 'transfer', selector='transfers') make_transfer(elem) alias = Volume_transfer.alias namespace = Volume_transfer.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CreateDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) transfer = self._extract_transfer(dom) return {'body': {'transfer': transfer}} def _extract_transfer(self, node): transfer = {} transfer_node = self.find_first_child_named(node, 'transfer') attributes = ['volume_id', 'name'] for attr in attributes: if transfer_node.getAttribute(attr): transfer[attr] = transfer_node.getAttribute(attr) return transfer class AcceptDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) transfer = self._extract_transfer(dom) return {'body': {'accept': transfer}} def _extract_transfer(self, node): transfer = {} transfer_node = self.find_first_child_named(node, 'accept') attributes = ['auth_key'] for attr in attributes: if transfer_node.getAttribute(attr): transfer[attr] = transfer_node.getAttribute(attr) return transfer class VolumeTransferController(wsgi.Controller): """The Volume Transfer API controller for the OpenStack API.""" _view_builder_class = transfer_view.ViewBuilder def __init__(self): self.transfer_api = transferAPI.API() super(VolumeTransferController, self).__init__() @wsgi.serializers(xml=TransferTemplate) def show(self, req, id): """Return data about active transfers.""" context = req.environ['cinder.context'] try: transfer = self.transfer_api.get(context, transfer_id=id) except exception.TransferNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, transfer) @wsgi.serializers(xml=TransfersTemplate) def index(self, req): """Returns a summary list of transfers.""" return self._get_transfers(req, is_detail=False) @wsgi.serializers(xml=TransfersTemplate) def detail(self, req): """Returns a detailed list of transfers.""" return self._get_transfers(req, is_detail=True) def _get_transfers(self, req, is_detail): """Returns a list of transfers, transformed through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() LOG.debug('Listing volume transfers') transfers = self.transfer_api.get_all(context, filters=filters) transfer_count = len(transfers) limited_list = common.limited(transfers, req) if is_detail: transfers = self._view_builder.detail_list(req, limited_list, transfer_count) else: transfers = self._view_builder.summary_list(req, limited_list, transfer_count) return transfers @wsgi.response(202) @wsgi.serializers(xml=TransferTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Create a new volume transfer.""" LOG.debug('Creating new volume transfer %s', body) self.assert_valid_body(body, 'transfer') context = req.environ['cinder.context'] transfer = body['transfer'] try: volume_id = transfer['volume_id'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) name = transfer.get('name', None) if name is not None: self.validate_string_length(name, 'Transfer name', min_length=1, max_length=255, remove_whitespaces=True) name = name.strip() LOG.info(_LI("Creating transfer of volume %s"), volume_id, context=context) try: new_transfer = self.transfer_api.create(context, volume_id, name) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer)) return transfer @wsgi.response(202) @wsgi.serializers(xml=TransferTemplate) @wsgi.deserializers(xml=AcceptDeserializer) def accept(self, req, id, body): """Accept a new volume transfer.""" transfer_id = id LOG.debug('Accepting volume transfer %s', transfer_id) self.assert_valid_body(body, 'accept') context = req.environ['cinder.context'] accept = body['accept'] try: auth_key = accept['auth_key'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) LOG.info(_LI("Accepting transfer %s"), transfer_id, context=context) try: accepted_transfer = self.transfer_api.accept(context, transfer_id, auth_key) except exception.VolumeSizeExceedsAvailableQuota as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = \ self._view_builder.summary(req, dict(accepted_transfer)) return transfer def delete(self, req, id): """Delete a transfer.""" context = req.environ['cinder.context'] LOG.info(_LI("Delete transfer with id: %s"), id, context=context) try: self.transfer_api.delete(context, transfer_id=id) except exception.TransferNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=202) class Volume_transfer(extensions.ExtensionDescriptor): """Volume transfer management support.""" name = "VolumeTransfer" alias = "os-volume-transfer" namespace = "http://docs.openstack.org/volume/ext/volume-transfer/" + \ "api/v1.1" updated = "2013-05-29T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension(Volume_transfer.alias, VolumeTransferController(), collection_actions={'detail': 'GET'}, member_actions={'accept': 'POST'}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/snapshot_actions.py0000664000567000056710000001023112701406250023524 0ustar jenkinsjenkins00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.i18n import _, _LI from cinder import objects LOG = logging.getLogger(__name__) def authorize(context, action_name): action = 'snapshot_actions:%s' % action_name extensions.extension_authorizer('snapshot', action)(context) class SnapshotActionsController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SnapshotActionsController, self).__init__(*args, **kwargs) LOG.debug("SnapshotActionsController initialized") @wsgi.action('os-update_snapshot_status') def _update_snapshot_status(self, req, id, body): """Update database fields related to status of a snapshot. Intended for creation of snapshots, so snapshot state must start as 'creating' and be changed to 'available', 'creating', or 'error'. """ context = req.environ['cinder.context'] authorize(context, 'update_snapshot_status') LOG.debug("body: %s", body) try: status = body['os-update_snapshot_status']['status'] except KeyError: msg = _("'status' must be specified.") raise webob.exc.HTTPBadRequest(explanation=msg) # Allowed state transitions status_map = {'creating': ['creating', 'available', 'error'], 'deleting': ['deleting', 'error_deleting']} current_snapshot = objects.Snapshot.get_by_id(context, id) if current_snapshot.status not in status_map: msg = _("Snapshot status %(cur)s not allowed for " "update_snapshot_status") % { 'cur': current_snapshot.status} raise webob.exc.HTTPBadRequest(explanation=msg) if status not in status_map[current_snapshot.status]: msg = _("Provided snapshot status %(provided)s not allowed for " "snapshot with status %(current)s.") % \ {'provided': status, 'current': current_snapshot.status} raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {'id': id, 'status': status} progress = body['os-update_snapshot_status'].get('progress', None) if progress: # This is expected to be a string like '73%' msg = _('progress must be an integer percentage') try: integer = int(progress[:-1]) except ValueError: raise webob.exc.HTTPBadRequest(explanation=msg) if integer < 0 or integer > 100 or progress[-1] != '%': raise webob.exc.HTTPBadRequest(explanation=msg) update_dict.update({'progress': progress}) LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"), {'id': id, 'dict': update_dict}) current_snapshot.update(update_dict) current_snapshot.save() return webob.Response(status_int=202) class Snapshot_actions(extensions.ExtensionDescriptor): """Enable snapshot manager actions.""" name = "SnapshotActions" alias = "os-snapshot-actions" namespace = \ "http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1" updated = "2013-07-16T00:00:00+00:00" def get_controller_extensions(self): controller = SnapshotActionsController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] cinder-8.0.0/cinder/api/contrib/quotas.py0000664000567000056710000004603612701406250021475 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.i18n import _ from cinder import quota from cinder import quota_utils from cinder import utils from oslo_config import cfg from oslo_utils import strutils CONF = cfg.CONF QUOTAS = quota.QUOTAS NON_QUOTA_KEYS = ['tenant_id', 'id'] authorize_update = extensions.extension_authorizer('volume', 'quotas:update') authorize_show = extensions.extension_authorizer('volume', 'quotas:show') authorize_delete = extensions.extension_authorizer('volume', 'quotas:delete') class QuotaTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_set', selector='quota_set') root.set('id') for resource in QUOTAS.resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaSetsController(wsgi.Controller): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict.""" quota_set['id'] = str(project_id) return dict(quota_set=quota_set) def _validate_existing_resource(self, key, value, quota_values): # -1 limit will always be greater than the existing value if key == 'per_volume_gigabytes' or value == -1: return v = quota_values.get(key, {}) used = (v.get('in_use', 0) + v.get('reserved', 0)) if QUOTAS.using_nested_quotas(): used += v.get('allocated', 0) if value < used: # TODO(mc_nair): after N opens, update error message to include # the current usage and requested limit msg = _("Quota %s limit must be equal or greater than existing " "resources.") % key raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, id, usages=False): values = QUOTAS.get_project_quotas(context, id, usages=usages) if usages: return values else: return {k: v['limit'] for k, v in values.items()} def _authorize_update_or_delete(self, context_project, target_project_id, parent_id): """Checks if update or delete are allowed in the current hierarchy. With hierarchical projects, only the admin of the parent or the root project has privilege to perform quota update and delete operations. :param context_project: The project in which the user is scoped to. :param target_project_id: The id of the project in which the user want to perform an update or delete operation. :param parent_id: The parent id of the project in which the user want to perform an update or delete operation. """ if context_project.parent_id and parent_id != context_project.id: msg = _("Update and delete quota operations can only be made " "by an admin of immediate parent or by the CLOUD admin.") raise webob.exc.HTTPForbidden(explanation=msg) if context_project.id != target_project_id: if not self._is_descendant(target_project_id, context_project.subtree): msg = _("Update and delete quota operations can only be made " "to projects in the same hierarchy of the project in " "which users are scoped to.") raise webob.exc.HTTPForbidden(explanation=msg) else: msg = _("Update and delete quota operations can only be made " "by an admin of immediate parent or by the CLOUD admin.") raise webob.exc.HTTPForbidden(explanation=msg) def _authorize_show(self, context_project, target_project): """Checks if show is allowed in the current hierarchy. With hierarchical projects, are allowed to perform quota show operation users with admin role in, at least, one of the following projects: the current project; the immediate parent project; or the root project. :param context_project: The project in which the user is scoped to. :param target_project: The project in which the user wants to perform a show operation. """ if target_project.parent_id: if target_project.id != context_project.id: if not self._is_descendant(target_project.id, context_project.subtree): msg = _("Show operations can only be made to projects in " "the same hierarchy of the project in which users " "are scoped to.") raise webob.exc.HTTPForbidden(explanation=msg) if context_project.id != target_project.parent_id: if context_project.parent_id: msg = _("Only users with token scoped to immediate " "parents or root projects are allowed to see " "its children quotas.") raise webob.exc.HTTPForbidden(explanation=msg) elif context_project.parent_id: msg = _("An user with a token scoped to a subproject is not " "allowed to see the quota of its parents.") raise webob.exc.HTTPForbidden(explanation=msg) def _is_descendant(self, target_project_id, subtree): if subtree is not None: for key, value in subtree.items(): if key == target_project_id: return True if self._is_descendant(target_project_id, value): return True return False @wsgi.serializers(xml=QuotaTemplate) def show(self, req, id): """Show quota for a particular tenant This works for hierarchical and non-hierarchical projects. For hierarchical projects admin of current project, immediate parent of the project or the CLOUD admin are able to perform a show. :param req: request :param id: target project id that needs to be shown """ context = req.environ['cinder.context'] authorize_show(context) params = req.params target_project_id = id if not hasattr(params, '__call__') and 'usage' in params: usage = strutils.bool_from_string(params['usage']) else: usage = False if QUOTAS.using_nested_quotas(): # With hierarchical projects, only the admin of the current project # or the root project has privilege to perform quota show # operations. target_project = quota_utils.get_project_hierarchy( context, target_project_id) context_project = quota_utils.get_project_hierarchy( context, context.project_id, subtree_as_ids=True) self._authorize_show(context_project, target_project) try: sqlalchemy_api.authorize_project_context(context, target_project_id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() quotas = self._get_quotas(context, target_project_id, usage) return self._format_quota_set(target_project_id, quotas) @wsgi.serializers(xml=QuotaTemplate) def update(self, req, id, body): """Update Quota for a particular tenant This works for hierarchical and non-hierarchical projects. For hierarchical projects only immediate parent admin or the CLOUD admin are able to perform an update. :param req: request :param id: target project id that needs to be updated :param body: key, value pair that that will be applied to the resources if the update succeeds """ context = req.environ['cinder.context'] authorize_update(context) self.validate_string_length(id, 'quota_set_name', min_length=1, max_length=255) self.assert_valid_body(body, 'quota_set') # Get the optional argument 'skip_validation' from body, # if skip_validation is False, then validate existing resource. skip_flag = body.get('skip_validation', True) if not utils.is_valid_boolstr(skip_flag): msg = _("Invalid value '%s' for skip_validation.") % skip_flag raise exception.InvalidParameterValue(err=msg) skip_flag = strutils.bool_from_string(skip_flag) target_project_id = id bad_keys = [] # NOTE(ankit): Pass #1 - In this loop for body['quota_set'].items(), # we figure out if we have any bad keys. for key, value in body['quota_set'].items(): if (key not in QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) continue if len(bad_keys) > 0: msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys) raise webob.exc.HTTPBadRequest(explanation=msg) # Saving off this value since we need to use it multiple times use_nested_quotas = QUOTAS.using_nested_quotas() if use_nested_quotas: # Get the parent_id of the target project to verify whether we are # dealing with hierarchical namespace or non-hierarchical namespace target_project = quota_utils.get_project_hierarchy( context, target_project_id, parents_as_ids=True) parent_id = target_project.parent_id if parent_id: # Get the children of the project which the token is scoped to # in order to know if the target_project is in its hierarchy. context_project = quota_utils.get_project_hierarchy( context, context.project_id, subtree_as_ids=True) self._authorize_update_or_delete(context_project, target_project.id, parent_id) # NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(), # we validate the quota limits to ensure that we can bail out if # any of the items in the set is bad. Meanwhile we validate value # to ensure that the value can't be lower than number of existing # resources. quota_values = QUOTAS.get_project_quotas(context, target_project_id, defaults=False) valid_quotas = {} reservations = [] for key in body['quota_set'].keys(): if key in NON_QUOTA_KEYS: continue value = utils.validate_integer( body['quota_set'][key], key, min_value=-1, max_value=db.MAX_INT) # Can't skip the validation of nested quotas since it could mess up # hierarchy if parent limit is less than childrens' current usage if not skip_flag or use_nested_quotas: self._validate_existing_resource(key, value, quota_values) if use_nested_quotas: try: reservations += self._update_nested_quota_allocated( context, target_project, quota_values, key, value) except exception.OverQuota as e: if reservations: db.reservation_rollback(context, reservations) raise webob.exc.HTTPBadRequest(explanation=e.message) valid_quotas[key] = value # NOTE(ankit): Pass #3 - At this point we know that all the keys and # values are valid and we can iterate and update them all in one shot # without having to worry about rolling back etc as we have done # the validation up front in the 2 loops above. for key, value in valid_quotas.items(): try: db.quota_update(context, target_project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, target_project_id, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() if reservations: db.reservation_commit(context, reservations) return {'quota_set': self._get_quotas(context, target_project_id)} def _get_quota_usage(self, quota_obj): return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) + quota_obj.get('reserved', 0)) def _update_nested_quota_allocated(self, ctxt, target_project, target_project_quotas, res, new_limit): reservations = [] # per_volume_gigabytes doesn't make sense to nest if res == "per_volume_gigabytes": return reservations quota_for_res = target_project_quotas.get(res, {}) orig_quota_from_target_proj = quota_for_res.get('limit', 0) # If limit was -1, we were "taking" current child's usage from parent if orig_quota_from_target_proj == -1: orig_quota_from_target_proj = self._get_quota_usage(quota_for_res) new_quota_from_target_proj = new_limit # If we set limit to -1, we will "take" the current usage from parent if new_limit == -1: new_quota_from_target_proj = self._get_quota_usage(quota_for_res) res_change = new_quota_from_target_proj - orig_quota_from_target_proj if res_change != 0: deltas = {res: res_change} reservations += quota_utils.update_alloc_to_next_hard_limit( ctxt, QUOTAS.resources, deltas, res, None, target_project.id) return reservations @wsgi.serializers(xml=QuotaTemplate) def defaults(self, req, id): context = req.environ['cinder.context'] authorize_show(context) return self._format_quota_set(id, QUOTAS.get_defaults( context, project_id=id)) @wsgi.serializers(xml=QuotaTemplate) def delete(self, req, id): """Delete Quota for a particular tenant. This works for hierarchical and non-hierarchical projects. For hierarchical projects only immediate parent admin or the CLOUD admin are able to perform a delete. :param req: request :param id: target project id that needs to be deleted """ context = req.environ['cinder.context'] authorize_delete(context) if QUOTAS.using_nested_quotas(): self._delete_nested_quota(context, id) else: try: db.quota_destroy_by_project(context, id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() def _delete_nested_quota(self, ctxt, proj_id): # Get the parent_id of the target project to verify whether we are # dealing with hierarchical namespace or non-hierarchical # namespace. try: project_quotas = QUOTAS.get_project_quotas( ctxt, proj_id, usages=True, defaults=False) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() target_project = quota_utils.get_project_hierarchy( ctxt, proj_id) parent_id = target_project.parent_id if parent_id: # Get the children of the project which the token is scoped to # in order to know if the target_project is in its hierarchy. context_project = quota_utils.get_project_hierarchy( ctxt, ctxt.project_id, subtree_as_ids=True) self._authorize_update_or_delete(context_project, target_project.id, parent_id) defaults = QUOTAS.get_defaults(ctxt, proj_id) # If the project which is being deleted has allocated part of its # quota to its subprojects, then subprojects' quotas should be # deleted first. for res, value in project_quotas.items(): if 'allocated' in project_quotas[res].keys(): if project_quotas[res]['allocated'] != 0: msg = _("About to delete child projects having " "non-zero quota. This should not be performed") raise webob.exc.HTTPBadRequest(explanation=msg) # Ensure quota usage wouldn't exceed limit on a delete self._validate_existing_resource( res, defaults[res], project_quotas) try: db.quota_destroy_by_project(ctxt, target_project.id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() for res, limit in project_quotas.items(): # Update child limit to 0 so the parent hierarchy gets it's # allocated values updated properly self._update_nested_quota_allocated( ctxt, target_project, project_quotas, res, 0) def validate_setup_for_nested_quota_use(self, req): """Validates that the setup supports using nested quotas. Ensures that Keystone v3 or greater is being used, and that the existing quotas make sense to nest in the current hierarchy (e.g. that no child quota would be larger than it's parent). """ ctxt = req.environ['cinder.context'] params = req.params try: quota_utils.validate_setup_for_nested_quota_use( ctxt, QUOTAS.resources, quota.NestedDbQuotaDriver(), fix_allocated_quotas=params.get('fix_allocated_quotas')) except exception.InvalidNestedQuotaSetup as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) class Quotas(extensions.ExtensionDescriptor): """Quota management support.""" name = "Quotas" alias = "os-quota-sets" namespace = "http://docs.openstack.org/volume/ext/quotas-sets/api/v1.1" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( 'os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}, collection_actions={'validate_setup_for_nested_quota_use': 'GET'}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/quota_classes.py0000664000567000056710000000745612701406250023032 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder import exception from cinder.i18n import _ from cinder import quota from cinder import utils QUOTAS = quota.QUOTAS authorize = extensions.extension_authorizer('volume', 'quota_classes') class QuotaClassTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_class_set', selector='quota_class_set') root.set('id') for resource in QUOTAS.resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaClassSetsController(wsgi.Controller): def _format_quota_set(self, quota_class, quota_set): """Convert the quota object to a result dict.""" quota_set['id'] = str(quota_class) return dict(quota_class_set=quota_set) @wsgi.serializers(xml=QuotaClassTemplate) def show(self, req, id): context = req.environ['cinder.context'] authorize(context) try: db.sqlalchemy.api.authorize_quota_class_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() return self._format_quota_set(id, QUOTAS.get_class_quotas(context, id)) @wsgi.serializers(xml=QuotaClassTemplate) def update(self, req, id, body): context = req.environ['cinder.context'] authorize(context) self.validate_string_length(id, 'quota_class_name', min_length=1, max_length=255) quota_class = id if not self.is_valid_body(body, 'quota_class_set'): msg = (_("Missing required element quota_class_set" " in request body.")) raise webob.exc.HTTPBadRequest(explanation=msg) for key, value in body['quota_class_set'].items(): if key in QUOTAS: try: value = utils.validate_integer(value, key, min_value=-1, max_value=db.MAX_INT) db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: db.quota_class_create(context, quota_class, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_class_set': QUOTAS.get_class_quotas(context, quota_class)} class Quota_classes(extensions.ExtensionDescriptor): """Quota classes management support.""" name = "QuotaClasses" alias = "os-quota-class-sets" namespace = ("http://docs.openstack.org/volume/ext/" "quota-classes-sets/api/v1.1") updated = "2012-03-12T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-class-sets', QuotaClassSetsController()) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/extended_snapshot_attributes.py0000664000567000056710000000731512701406250026143 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Extended Snapshot Attributes API extension.""" from oslo_log import log as logging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer( 'volume', 'extended_snapshot_attributes') class ExtendedSnapshotAttributesController(wsgi.Controller): def _extend_snapshot(self, req, resp_snap): db_snap = req.get_db_snapshot(resp_snap['id']) for attr in ['project_id', 'progress']: key = "%s:%s" % (Extended_snapshot_attributes.alias, attr) resp_snap[key] = db_snap[attr] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate()) snapshot = resp_obj.obj['snapshot'] self._extend_snapshot(req, snapshot) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate()) for snapshot in list(resp_obj.obj['snapshots']): self._extend_snapshot(req, snapshot) class Extended_snapshot_attributes(extensions.ExtensionDescriptor): """Extended SnapshotAttributes support.""" name = "ExtendedSnapshotAttributes" alias = "os-extended-snapshot-attributes" namespace = ("http://docs.openstack.org/volume/ext/" "extended_snapshot_attributes/api/v1") updated = "2012-06-19T00:00:00+00:00" def get_controller_extensions(self): controller = ExtendedSnapshotAttributesController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] def make_snapshot(elem): elem.set('{%s}project_id' % Extended_snapshot_attributes.namespace, '%s:project_id' % Extended_snapshot_attributes.alias) elem.set('{%s}progress' % Extended_snapshot_attributes.namespace, '%s:progress' % Extended_snapshot_attributes.alias) class ExtendedSnapshotAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshot', selector='snapshot') make_snapshot(root) alias = Extended_snapshot_attributes.alias namespace = Extended_snapshot_attributes.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshots') elem = xmlutil.SubTemplateElement(root, 'snapshot', selector='snapshots') make_snapshot(elem) alias = Extended_snapshot_attributes.alias namespace = Extended_snapshot_attributes.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) cinder-8.0.0/cinder/api/contrib/scheduler_hints.py0000664000567000056710000000360412701406250023336 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.v2 import volumes from cinder.i18n import _ LOG = logging.getLogger(__name__) class SchedulerHintsController(wsgi.Controller): @staticmethod def _extract_scheduler_hints(body): hints = {} attr = '%s:scheduler_hints' % Scheduler_hints.alias try: if attr in body: hints.update(body[attr]) except ValueError: msg = _("Malformed scheduler_hints attribute") raise webob.exc.HTTPBadRequest(explanation=msg) return hints @wsgi.extends def create(self, req, body): hints = self._extract_scheduler_hints(body) if 'volume' in body: body['volume']['scheduler_hints'] = hints yield class Scheduler_hints(extensions.ExtensionDescriptor): """Pass arbitrary key/value pairs to the scheduler.""" name = "SchedulerHints" alias = "OS-SCH-HNT" namespace = volumes.SCHEDULER_HINTS_NAMESPACE updated = "2013-04-18T00:00:00+00:00" def get_controller_extensions(self): controller = SchedulerHintsController() ext = extensions.ControllerExtension(self, 'volumes', controller) return [ext] cinder-8.0.0/cinder/api/contrib/volume_host_attribute.py0000664000567000056710000000623612701406250024606 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('volume', 'volume_host_attribute') class VolumeHostAttributeController(wsgi.Controller): def _add_volume_host_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:host" % Volume_host_attribute.alias resp_volume[key] = db_volume['host'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeHostAttributeTemplate()) volume = resp_obj.obj['volume'] self._add_volume_host_attribute(req, volume) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeListHostAttributeTemplate()) for vol in list(resp_obj.obj['volumes']): self._add_volume_host_attribute(req, vol) class Volume_host_attribute(extensions.ExtensionDescriptor): """Expose host as an attribute of a volume.""" name = "VolumeHostAttribute" alias = "os-vol-host-attr" namespace = ("http://docs.openstack.org/volume/ext/" "volume_host_attribute/api/v2") updated = "2011-11-03T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeHostAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] def make_volume(elem): elem.set('{%s}host' % Volume_host_attribute.namespace, '%s:host' % Volume_host_attribute.alias) class VolumeHostAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) alias = Volume_host_attribute.alias namespace = Volume_host_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeListHostAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) alias = Volume_host_attribute.alias namespace = Volume_host_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) cinder-8.0.0/cinder/api/contrib/consistencygroups.py0000664000567000056710000003612612701406250023761 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistencygroups api.""" from oslo_log import log as logging from oslo_utils import strutils import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import consistencygroups as consistencygroup_views from cinder.api import xmlutil from cinder import consistencygroup as consistencygroupAPI from cinder import exception from cinder.i18n import _, _LI from cinder import utils LOG = logging.getLogger(__name__) def make_consistencygroup(elem): elem.set('id') elem.set('status') elem.set('availability_zone') elem.set('created_at') elem.set('name') elem.set('description') def make_consistencygroup_from_src(elem): elem.set('id') elem.set('status') elem.set('created_at') elem.set('name') elem.set('description') elem.set('cgsnapshot_id') elem.set('source_cgid') class ConsistencyGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('consistencygroup', selector='consistencygroup') make_consistencygroup(root) alias = Consistencygroups.alias namespace = Consistencygroups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class ConsistencyGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('consistencygroups') elem = xmlutil.SubTemplateElement(root, 'consistencygroup', selector='consistencygroups') make_consistencygroup(elem) alias = Consistencygroups.alias namespace = Consistencygroups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class ConsistencyGroupFromSrcTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('consistencygroup-from-src', selector='consistencygroup-from-src') make_consistencygroup_from_src(root) alias = Consistencygroups.alias namespace = Consistencygroups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CreateDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) consistencygroup = self._extract_consistencygroup(dom) return {'body': {'consistencygroup': consistencygroup}} def _extract_consistencygroup(self, node): consistencygroup = {} consistencygroup_node = self.find_first_child_named( node, 'consistencygroup') attributes = ['name', 'description'] for attr in attributes: if consistencygroup_node.getAttribute(attr): consistencygroup[attr] = consistencygroup_node.\ getAttribute(attr) return consistencygroup class CreateFromSrcDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) consistencygroup = self._extract_consistencygroup(dom) retval = {'body': {'consistencygroup-from-src': consistencygroup}} return retval def _extract_consistencygroup(self, node): consistencygroup = {} consistencygroup_node = self.find_first_child_named( node, 'consistencygroup-from-src') attributes = ['cgsnapshot', 'source_cgid', 'name', 'description'] for attr in attributes: if consistencygroup_node.getAttribute(attr): consistencygroup[attr] = ( consistencygroup_node.getAttribute(attr)) return consistencygroup class ConsistencyGroupsController(wsgi.Controller): """The ConsistencyGroups API controller for the OpenStack API.""" _view_builder_class = consistencygroup_views.ViewBuilder def __init__(self): self.consistencygroup_api = consistencygroupAPI.API() super(ConsistencyGroupsController, self).__init__() @wsgi.serializers(xml=ConsistencyGroupTemplate) def show(self, req, id): """Return data about the given consistency group.""" LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] try: consistencygroup = self.consistencygroup_api.get( context, group_id=id) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, consistencygroup) def delete(self, req, id, body): """Delete a consistency group.""" LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] force = False if body: if not self.is_valid_body(body, 'consistencygroup'): msg = _("Missing required element 'consistencygroup' in " "request body.") raise exc.HTTPBadRequest(explanation=msg) cg_body = body['consistencygroup'] try: force = strutils.bool_from_string(cg_body.get('force', False), strict=True) except ValueError: msg = _("Invalid value '%s' for force.") % force raise exc.HTTPBadRequest(explanation=msg) LOG.info(_LI('Delete consistency group with id: %s'), id, context=context) try: group = self.consistencygroup_api.get(context, id) self.consistencygroup_api.delete(context, group, force) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202) @wsgi.serializers(xml=ConsistencyGroupsTemplate) def index(self, req): """Returns a summary list of consistency groups.""" return self._get_consistencygroups(req, is_detail=False) @wsgi.serializers(xml=ConsistencyGroupsTemplate) def detail(self, req): """Returns a detailed list of consistency groups.""" return self._get_consistencygroups(req, is_detail=True) def _get_consistencygroups(self, req, is_detail): """Returns a list of consistency groups through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) consistencygroups = self.consistencygroup_api.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) if is_detail: consistencygroups = self._view_builder.detail_list( req, consistencygroups) else: consistencygroups = self._view_builder.summary_list( req, consistencygroups) return consistencygroups @wsgi.response(202) @wsgi.serializers(xml=ConsistencyGroupTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Create a new consistency group.""" LOG.debug('Creating new consistency group %s', body) self.assert_valid_body(body, 'consistencygroup') context = req.environ['cinder.context'] consistencygroup = body['consistencygroup'] self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) volume_types = consistencygroup.get('volume_types', None) if not volume_types: msg = _("volume_types must be provided to create " "consistency group %(name)s.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) availability_zone = consistencygroup.get('availability_zone', None) LOG.info(_LI("Creating consistency group %(name)s."), {'name': name}, context=context) try: new_consistencygroup = self.consistencygroup_api.create( context, name, description, volume_types, availability_zone=availability_zone) except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeType as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) retval = self._view_builder.summary(req, new_consistencygroup) return retval @wsgi.response(202) @wsgi.serializers(xml=ConsistencyGroupFromSrcTemplate) @wsgi.deserializers(xml=CreateFromSrcDeserializer) def create_from_src(self, req, body): """Create a new consistency group from a source. The source can be a CG snapshot or a CG. Note that this does not require volume_types as the "create" API above. """ LOG.debug('Creating new consistency group %s.', body) self.assert_valid_body(body, 'consistencygroup-from-src') context = req.environ['cinder.context'] consistencygroup = body['consistencygroup-from-src'] self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None) source_cgid = consistencygroup.get('source_cgid', None) if not cgsnapshot_id and not source_cgid: msg = _("Either 'cgsnapshot_id' or 'source_cgid' must be " "provided to create consistency group %(name)s " "from source.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) if cgsnapshot_id and source_cgid: msg = _("Cannot provide both 'cgsnapshot_id' and 'source_cgid' " "to create consistency group %(name)s from " "source.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) if cgsnapshot_id: LOG.info(_LI("Creating consistency group %(name)s from " "cgsnapshot %(snap)s."), {'name': name, 'snap': cgsnapshot_id}, context=context) elif source_cgid: LOG.info(_LI("Creating consistency group %(name)s from " "source consistency group %(source_cgid)s."), {'name': name, 'source_cgid': source_cgid}, context=context) try: new_consistencygroup = self.consistencygroup_api.create_from_src( context, name, description, cgsnapshot_id, source_cgid) except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.CgSnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.CinderException as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.summary(req, new_consistencygroup) return retval @wsgi.serializers(xml=ConsistencyGroupTemplate) def update(self, req, id, body): """Update the consistency group. Expected format of the input parameter 'body': { "consistencygroup": { "name": "my_cg", "description": "My consistency group", "add_volumes": "volume-uuid-1,volume-uuid-2,..." "remove_volumes": "volume-uuid-8,volume-uuid-9,..." } } """ LOG.debug('Update called for consistency group %s.', id) if not body: msg = _("Missing request body.") raise exc.HTTPBadRequest(explanation=msg) self.assert_valid_body(body, 'consistencygroup') context = req.environ['cinder.context'] consistencygroup = body.get('consistencygroup', None) self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) add_volumes = consistencygroup.get('add_volumes', None) remove_volumes = consistencygroup.get('remove_volumes', None) if (not name and not description and not add_volumes and not remove_volumes): msg = _("Name, description, add_volumes, and remove_volumes " "can not be all empty in the request body.") raise exc.HTTPBadRequest(explanation=msg) LOG.info(_LI("Updating consistency group %(id)s with name %(name)s " "description: %(description)s add_volumes: " "%(add_volumes)s remove_volumes: %(remove_volumes)s."), {'id': id, 'name': name, 'description': description, 'add_volumes': add_volumes, 'remove_volumes': remove_volumes}, context=context) try: group = self.consistencygroup_api.get(context, id) self.consistencygroup_api.update( context, group, name, description, add_volumes, remove_volumes) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202) class Consistencygroups(extensions.ExtensionDescriptor): """consistency groups support.""" name = 'Consistencygroups' alias = 'consistencygroups' namespace = 'http://docs.openstack.org/volume/ext/consistencygroups/api/v1' updated = '2014-08-18T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Consistencygroups.alias, ConsistencyGroupsController(), collection_actions={'detail': 'GET', 'create_from_src': 'POST'}, member_actions={'delete': 'POST', 'update': 'PUT'}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/__init__.py0000664000567000056710000000230512701406250021707 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with cinder. It can't be called 'extensions' because that causes namespacing problems. """ from oslo_config import cfg from oslo_log import log as logging from cinder.api import extensions CONF = cfg.CONF LOG = logging.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.osapi_volume_ext_list) cinder-8.0.0/cinder/api/contrib/volume_encryption_metadata.py0000664000567000056710000000503412701406250025573 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume encryption metadata extension.""" from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db authorize = extensions.extension_authorizer('volume', 'volume_encryption_metadata') class VolumeEncryptionMetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.make_flat_dict('encryption', selector='encryption') return xmlutil.MasterTemplate(root, 1) class VolumeEncryptionMetadataController(wsgi.Controller): """The volume encryption metadata API extension.""" @wsgi.serializers(xml=VolumeEncryptionMetadataTemplate) def index(self, req, volume_id): """Returns the encryption metadata for a given volume.""" context = req.environ['cinder.context'] authorize(context) return db.volume_encryption_metadata_get(context, volume_id) @wsgi.serializers(xml=VolumeEncryptionMetadataTemplate) def show(self, req, volume_id, id): """Return a single encryption item.""" encryption_item = self.index(req, volume_id) if encryption_item is not None: return encryption_item[id] else: return None class Volume_encryption_metadata(extensions.ExtensionDescriptor): """Volume encryption metadata retrieval support.""" name = "VolumeEncryptionMetadata" alias = "os-volume-encryption-metadata" namespace = ("http://docs.openstack.org/volume/ext/" "os-volume-encryption-metadata/api/v1") updated = "2013-07-10T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( 'encryption', VolumeEncryptionMetadataController(), parent=dict(member_name='volume', collection_name='volumes')) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/used_limits.py0000664000567000056710000000422012701406250022467 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import quota QUOTAS = quota.QUOTAS authorize = extensions.soft_extension_authorizer('limits', 'used_limits') class UsedLimitsController(wsgi.Controller): @wsgi.extends def index(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=True) quota_map = { 'totalVolumesUsed': 'volumes', 'totalGigabytesUsed': 'gigabytes', 'totalSnapshotsUsed': 'snapshots', 'totalBackupsUsed': 'backups', 'totalBackupGigabytesUsed': 'backup_gigabytes' } used_limits = {} for display_name, single_quota in quota_map.items(): if single_quota in quotas: used_limits[display_name] = quotas[single_quota]['in_use'] resp_obj.obj['limits']['absolute'].update(used_limits) class Used_limits(extensions.ExtensionDescriptor): """Provide data on limited resources that are being used.""" name = "UsedLimits" alias = 'os-used-limits' namespace = "http://docs.openstack.org/volume/ext/used-limits/api/v1.1" updated = "2013-10-03T00:00:00+00:00" def get_controller_extensions(self): controller = UsedLimitsController() extension = extensions.ControllerExtension(self, 'limits', controller) return [extension] cinder-8.0.0/cinder/api/contrib/capabilities.py0000664000567000056710000000516212701406250022605 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import capabilities as capabilities_view from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.volume import rpcapi LOG = logging.getLogger(__name__) def authorize(context, action_name): extensions.extension_authorizer('volume', action_name)(context) class CapabilitiesController(wsgi.Controller): """The Capabilities controller for the OpenStack API.""" _view_builder_class = capabilities_view.ViewBuilder def __init__(self): # FIXME(jdg): Is it kosher that this just # skips the volume.api and goes straight to RPC # from here? self.volume_api = rpcapi.VolumeAPI() super(CapabilitiesController, self).__init__() def show(self, req, id): """Return capabilities list of given backend.""" context = req.environ['cinder.context'] authorize(context, 'capabilities') filters = {'host': id, 'binary': 'cinder-volume'} service = objects.ServiceList.get_all(context, filters) if not service: msg = (_("Can't find service: %s") % id) raise exception.NotFound(msg) try: capabilities = self.volume_api.get_capabilities(context, id, False) except oslo_messaging.MessagingTimeout: raise exception.RPCTimeout(service=id) return self._view_builder.summary(req, capabilities, id) class Capabilities(extensions.ExtensionDescriptor): """Capabilities support.""" name = "Capabilities" alias = "capabilities" namespace = "http://docs.openstack.org/volume/ext/capabilities/api/v2" updated = "2015-08-31T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Capabilities.alias, CapabilitiesController()) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/services.py0000664000567000056710000002134012701406250021773 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import timeutils import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import utils from cinder import volume CONF = cfg.CONF LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'services') class ServicesIndexTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('services') elem = xmlutil.SubTemplateElement(root, 'service', selector='services') elem.set('binary') elem.set('host') elem.set('zone') elem.set('status') elem.set('state') elem.set('update_at') elem.set('disabled_reason') elem.set('replication_status') elem.set('active_backend_id') elem.set('frozen') return xmlutil.MasterTemplate(root, 1) class ServicesUpdateTemplate(xmlutil.TemplateBuilder): def construct(self): # TODO(uni): template elements of 'host', 'service' and 'disabled' # should be deprecated to make ServicesUpdateTemplate consistent # with ServicesIndexTemplate. Still keeping it here for API # compatibility sake. root = xmlutil.TemplateElement('host') root.set('host') root.set('service') root.set('disabled') root.set('binary') root.set('status') root.set('disabled_reason') root.set('replication_status') root.set('active_backend_id') root.set('frozen') return xmlutil.MasterTemplate(root, 1) class ServiceController(wsgi.Controller): def __init__(self, ext_mgr=None): self.ext_mgr = ext_mgr super(ServiceController, self).__init__() self.volume_api = volume.API() @wsgi.serializers(xml=ServicesIndexTemplate) def index(self, req): """Return a list of all running services. Filter by host & service name. """ context = req.environ['cinder.context'] authorize(context, action='index') detailed = self.ext_mgr.is_loaded('os-extended-services') now = timeutils.utcnow(with_timezone=True) filters = {} if 'host' in req.GET: filters['host'] = req.GET['host'] if 'binary' in req.GET: filters['binary'] = req.GET['binary'] elif 'service' in req.GET: filters['binary'] = req.GET['service'] versionutils.report_deprecated_feature(LOG, _( "Query by service parameter is deprecated. " "Please use binary parameter instead.")) services = objects.ServiceList.get_all(context, filters) svcs = [] for svc in services: updated_at = svc.updated_at delta = now - (svc.updated_at or svc.created_at) delta_sec = delta.total_seconds() if svc.modified_at: delta_mod = now - svc.modified_at if abs(delta_sec) >= abs(delta_mod.total_seconds()): updated_at = svc.modified_at alive = abs(delta_sec) <= CONF.service_down_time art = (alive and "up") or "down" active = 'enabled' if svc.disabled: active = 'disabled' if updated_at: updated_at = timeutils.normalize_time(updated_at) ret_fields = {'binary': svc.binary, 'host': svc.host, 'zone': svc.availability_zone, 'status': active, 'state': art, 'updated_at': updated_at} if detailed: ret_fields['disabled_reason'] = svc.disabled_reason if svc.binary == "cinder-volume": ret_fields['replication_status'] = svc.replication_status ret_fields['active_backend_id'] = svc.active_backend_id ret_fields['frozen'] = svc.frozen svcs.append(ret_fields) return {'services': svcs} def _is_valid_as_reason(self, reason): if not reason: return False try: utils.check_string_length(reason.strip(), 'Disabled reason', min_length=1, max_length=255) except exception.InvalidInput: return False return True def _freeze(self, context, host): return self.volume_api.freeze_host(context, host) def _thaw(self, context, host): return self.volume_api.thaw_host(context, host) def _failover(self, context, host, backend_id=None): return self.volume_api.failover_host(context, host, backend_id) @wsgi.serializers(xml=ServicesUpdateTemplate) def update(self, req, id, body): """Enable/Disable scheduling for a service. Includes Freeze/Thaw which sends call down to drivers and allows volume.manager for the specified host to disable the service rather than accessing the service directly in this API layer. """ context = req.environ['cinder.context'] authorize(context, action='update') ext_loaded = self.ext_mgr.is_loaded('os-extended-services') ret_val = {} if id == "enable": disabled = False status = "enabled" if ext_loaded: ret_val['disabled_reason'] = None elif (id == "disable" or (id == "disable-log-reason" and ext_loaded)): disabled = True status = "disabled" elif id == "freeze": return self._freeze(context, body['host']) elif id == "thaw": return self._thaw(context, body['host']) elif id == "failover_host": self._failover( context, body['host'], body.get('backend_id', None) ) return webob.Response(status_int=202) else: raise webob.exc.HTTPNotFound(explanation=_("Unknown action")) try: host = body['host'] except (TypeError, KeyError): msg = _("Missing required element 'host' in request body.") raise webob.exc.HTTPBadRequest(explanation=msg) ret_val['disabled'] = disabled if id == "disable-log-reason" and ext_loaded: reason = body.get('disabled_reason') if not self._is_valid_as_reason(reason): msg = _('Disabled reason contains invalid characters ' 'or is too long') raise webob.exc.HTTPBadRequest(explanation=msg) ret_val['disabled_reason'] = reason # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. service = body.get('service', '') binary = body.get('binary', '') binary_key = binary or service if not binary_key: raise webob.exc.HTTPBadRequest() try: svc = objects.Service.get_by_args(context, host, binary_key) if not svc: raise webob.exc.HTTPNotFound(explanation=_('Unknown service')) svc.disabled = ret_val['disabled'] if 'disabled_reason' in ret_val: svc.disabled_reason = ret_val['disabled_reason'] svc.save() except exception.ServiceNotFound: raise webob.exc.HTTPNotFound(explanation=_("service not found")) ret_val.update({'host': host, 'service': service, 'binary': binary, 'status': status}) return ret_val class Services(extensions.ExtensionDescriptor): """Services support.""" name = "Services" alias = "os-services" namespace = "http://docs.openstack.org/volume/ext/services/api/v2" updated = "2012-10-28T00:00:00-00:00" def get_resources(self): resources = [] controller = ServiceController(self.ext_mgr) resource = extensions.ResourceExtension('os-services', controller) resources.append(resource) return resources cinder-8.0.0/cinder/api/contrib/volume_manage.py0000664000567000056710000001471012701406250022772 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.v2.views import volumes as volume_views from cinder.api.v2 import volumes from cinder import exception from cinder.i18n import _ from cinder import utils from cinder import volume as cinder_volume from cinder.volume import volume_types LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'volume_manage') class VolumeManageController(wsgi.Controller): """The /os-volume-manage controller for the OpenStack API.""" _view_builder_class = volume_views.ViewBuilder def __init__(self, *args, **kwargs): super(VolumeManageController, self).__init__(*args, **kwargs) self.volume_api = cinder_volume.API() @wsgi.response(202) @wsgi.serializers(xml=volumes.VolumeTemplate) @wsgi.deserializers(xml=volumes.CreateDeserializer) def create(self, req, body): """Instruct Cinder to manage a storage object. Manages an existing backend storage object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage object (driver dependent) From an API perspective, this operation behaves very much like a volume creation operation, except that properties such as image, snapshot and volume references don't make sense, because we are taking an existing storage object into Cinder management. Required HTTP Body: { 'volume': { 'host': , 'ref': , } } See the appropriate Cinder drivers' implementations of the manage_volume method to find out the accepted format of 'ref'. This API call will return with an error if any of the above elements are missing from the request, or if the 'host' element refers to a cinder host that is not registered. The volume will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'volume' are: name A name for the new volume. description A description for the new volume. volume_type ID or name of a volume type to associate with the new Cinder volume. Does not necessarily guarantee that the managed volume will have the properties described in the volume_type. The driver may choose to fail if it identifies that the specified volume_type is not compatible with the backend storage object. metadata Key/value pairs to be associated with the new volume. availability_zone The availability zone to associate with the new volume. bootable If set to True, marks the volume as bootable. """ context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'volume') volume = body['volume'] self.validate_name_and_description(volume) # Check that the required keys are present, return an error if they # are not. required_keys = set(['ref', 'host']) missing_keys = list(required_keys - set(volume.keys())) if missing_keys: msg = _("The following elements are required: %s") % \ ', '.join(missing_keys) raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Manage volume request body: %s', body) kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) else: kwargs['volume_type'] = {} kwargs['name'] = volume.get('name', None) kwargs['description'] = volume.get('description', None) kwargs['metadata'] = volume.get('metadata', None) kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['bootable'] = volume.get('bootable', False) try: new_volume = self.volume_api.manage_existing(context, volume['host'], volume['ref'], **kwargs) except exception.ServiceNotFound: msg = _("Service not found.") raise exc.HTTPNotFound(explanation=msg) utils.add_visible_admin_metadata(new_volume) return self._view_builder.detail(req, new_volume) class Volume_manage(extensions.ExtensionDescriptor): """Allows existing backend storage to be 'managed' by Cinder.""" name = 'VolumeManage' alias = 'os-volume-manage' namespace = ('http://docs.openstack.org/volume/ext/' 'os-volume-manage/api/v1') updated = '2014-02-10T00:00:00+00:00' def get_resources(self): controller = VolumeManageController() res = extensions.ResourceExtension(Volume_manage.alias, controller) return [res] cinder-8.0.0/cinder/api/contrib/volume_tenant_attribute.py0000664000567000056710000000625112701406250025117 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil authorize = extensions.soft_extension_authorizer('volume', 'volume_tenant_attribute') class VolumeTenantAttributeController(wsgi.Controller): def _add_volume_tenant_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:tenant_id" % Volume_tenant_attribute.alias resp_volume[key] = db_volume['project_id'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeTenantAttributeTemplate()) volume = resp_obj.obj['volume'] self._add_volume_tenant_attribute(req, volume) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeListTenantAttributeTemplate()) for vol in list(resp_obj.obj['volumes']): self._add_volume_tenant_attribute(req, vol) class Volume_tenant_attribute(extensions.ExtensionDescriptor): """Expose the internal project_id as an attribute of a volume.""" name = "VolumeTenantAttribute" alias = "os-vol-tenant-attr" namespace = ("http://docs.openstack.org/volume/ext/" "volume_tenant_attribute/api/v2") updated = "2011-11-03T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeTenantAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] def make_volume(elem): elem.set('{%s}tenant_id' % Volume_tenant_attribute.namespace, '%s:tenant_id' % Volume_tenant_attribute.alias) class VolumeTenantAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) alias = Volume_tenant_attribute.alias namespace = Volume_tenant_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeListTenantAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) alias = Volume_tenant_attribute.alias namespace = Volume_tenant_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) cinder-8.0.0/cinder/api/contrib/volume_type_encryption.py0000664000567000056710000001742312701406250025001 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types encryption extension.""" import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder import exception from cinder.i18n import _ from cinder import rpc from cinder import utils from cinder.volume import volume_types authorize = extensions.extension_authorizer('volume', 'volume_type_encryption') CONTROL_LOCATION = ['front-end', 'back-end'] class VolumeTypeEncryptionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.make_flat_dict('encryption', selector='encryption') return xmlutil.MasterTemplate(root, 1) class VolumeTypeEncryptionController(wsgi.Controller): """The volume type encryption API controller for the OpenStack API.""" def _get_volume_type_encryption(self, context, type_id): encryption_ref = db.volume_type_encryption_get(context, type_id) encryption_specs = {} if not encryption_ref: return encryption_specs for key, value in encryption_ref.items(): encryption_specs[key] = value return encryption_specs def _check_type(self, context, type_id): try: volume_types.get_volume_type(context, type_id) except exception.VolumeTypeNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) def _check_encryption_input(self, encryption, create=True): if encryption.get('key_size') is not None: encryption['key_size'] = utils.validate_integer( encryption['key_size'], 'key_size', min_value=0, max_value=db.MAX_INT) if create: msg = None if 'provider' not in encryption.keys(): msg = _('provider must be defined') elif 'control_location' not in encryption.keys(): msg = _('control_location must be defined') if msg is not None: raise exception.InvalidInput(reason=msg) # Check control location if 'control_location' in encryption.keys(): if encryption['control_location'] not in CONTROL_LOCATION: msg = _("Valid control location are: %s") % CONTROL_LOCATION raise exception.InvalidInput(reason=msg) def _encrypted_type_in_use(self, context, volume_type_id): volume_list = db.volume_type_encryption_volume_get(context, volume_type_id) # If there is at least one volume in the list # returned, this type is in use by a volume. if len(volume_list) > 0: return True else: return False @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) def index(self, req, type_id): """Returns the encryption specs for a given volume type.""" context = req.environ['cinder.context'] authorize(context) self._check_type(context, type_id) return self._get_volume_type_encryption(context, type_id) @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) def create(self, req, type_id, body=None): """Create encryption specs for an existing volume type.""" context = req.environ['cinder.context'] authorize(context) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot create encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) self.assert_valid_body(body, 'encryption') self._check_type(context, type_id) encryption_specs = self._get_volume_type_encryption(context, type_id) if encryption_specs: raise exception.VolumeTypeEncryptionExists(type_id=type_id) encryption_specs = body['encryption'] self._check_encryption_input(encryption_specs) db.volume_type_encryption_create(context, type_id, encryption_specs) notifier_info = dict(type_id=type_id, specs=encryption_specs) notifier = rpc.get_notifier('volumeTypeEncryption') notifier.info(context, 'volume_type_encryption.create', notifier_info) return body @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) def update(self, req, type_id, id, body=None): """Update encryption specs for a given volume type.""" context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'encryption') if len(body) > 1: expl = _('Request body contains too many items.') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot update encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) encryption_specs = body['encryption'] self._check_encryption_input(encryption_specs, create=False) db.volume_type_encryption_update(context, type_id, encryption_specs) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('volumeTypeEncryption') notifier.info(context, 'volume_type_encryption.update', notifier_info) return body @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) def show(self, req, type_id, id): """Return a single encryption item.""" context = req.environ['cinder.context'] authorize(context) self._check_type(context, type_id) encryption_specs = self._get_volume_type_encryption(context, type_id) if id not in encryption_specs: raise webob.exc.HTTPNotFound() return {id: encryption_specs[id]} def delete(self, req, type_id, id): """Delete encryption specs for a given volume type.""" context = req.environ['cinder.context'] authorize(context) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot delete encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) else: try: db.volume_type_encryption_delete(context, type_id) except exception.VolumeTypeEncryptionNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) return webob.Response(status_int=202) class Volume_type_encryption(extensions.ExtensionDescriptor): """Encryption support for volume types.""" name = "VolumeTypeEncryption" alias = "encryption" namespace = ("http://docs.openstack.org/volume/ext/" "volume-type-encryption/api/v1") updated = "2013-07-01T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Volume_type_encryption.alias, VolumeTypeEncryptionController(), parent=dict(member_name='type', collection_name='types')) resources.append(res) return resources def get_controller_extensions(self): controller = VolumeTypeEncryptionController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] cinder-8.0.0/cinder/api/contrib/volume_unmanage.py0000664000567000056710000000522712701406250023340 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import webob from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _LI from cinder import volume LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'volume_unmanage') class VolumeUnmanageController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeUnmanageController, self).__init__(*args, **kwargs) self.volume_api = volume.API() @wsgi.response(202) @wsgi.action('os-unmanage') def unmanage(self, req, id, body): """Stop managing a volume. This action is very much like a delete, except that a different method (unmanage) is called on the Cinder driver. This has the effect of removing the volume from Cinder management without actually removing the backend storage object associated with it. There are no required parameters. A Not Found error is returned if the specified volume does not exist. A Bad Request error is returned if the specified volume is still attached to an instance. """ context = req.environ['cinder.context'] authorize(context) LOG.info(_LI("Unmanage volume with id: %s"), id, context=context) try: vol = self.volume_api.get(context, id) self.volume_api.delete(context, vol, unmanage_only=True) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=202) class Volume_unmanage(extensions.ExtensionDescriptor): """Enable volume unmanage operation.""" name = "VolumeUnmanage" alias = "os-volume-unmanage" namespace = "http://docs.openstack.org/volume/ext/volume-unmanage/api/v1.1" updated = "2012-05-31T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeUnmanageController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] cinder-8.0.0/cinder/api/contrib/hosts.py0000664000567000056710000002424512701406250021317 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The hosts admin extension.""" from xml.parsers import expat from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder import exception from cinder.i18n import _, _LI from cinder import objects from cinder import utils from cinder.volume import api as volume_api CONF = cfg.CONF LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'hosts') class HostIndexTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('hosts') elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts') elem.set('service-status') elem.set('service') elem.set('zone') elem.set('service-state') elem.set('host_name') elem.set('last-update') return xmlutil.MasterTemplate(root, 1) class HostUpdateTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('host') root.set('host') root.set('status') return xmlutil.MasterTemplate(root, 1) class HostActionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('host') root.set('host') return xmlutil.MasterTemplate(root, 1) class HostShowTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('host') elem = xmlutil.make_flat_dict('resource', selector='host', subselector='resource') root.append(elem) return xmlutil.MasterTemplate(root, 1) class HostDeserializer(wsgi.XMLDeserializer): def default(self, string): try: node = utils.safe_minidom_parse_string(string) except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg) updates = {} for child in node.childNodes[0].childNodes: updates[child.tagName] = self.extract_text(child) return dict(body=updates) def _list_hosts(req, service=None): """Returns a summary list of hosts.""" curr_time = timeutils.utcnow(with_timezone=True) context = req.environ['cinder.context'] filters = {'disabled': False} services = objects.ServiceList.get_all(context, filters) zone = '' if 'zone' in req.GET: zone = req.GET['zone'] if zone: services = [s for s in services if s['availability_zone'] == zone] hosts = [] for host in services: delta = curr_time - (host.updated_at or host.created_at) alive = abs(delta.total_seconds()) <= CONF.service_down_time status = (alive and "available") or "unavailable" active = 'enabled' if host.disabled: active = 'disabled' LOG.debug('status, active and update: %s, %s, %s', status, active, host.updated_at) updated_at = host.updated_at if updated_at: updated_at = timeutils.normalize_time(updated_at) hosts.append({'host_name': host.host, 'service': host.topic, 'zone': host.availability_zone, 'service-status': status, 'service-state': active, 'last-update': updated_at, }) if service: hosts = [host for host in hosts if host['service'] == service] return hosts def check_host(fn): """Makes sure that the host exists.""" def wrapped(self, req, id, service=None, *args, **kwargs): listed_hosts = _list_hosts(req, service) hosts = [h["host_name"] for h in listed_hosts] if id in hosts: return fn(self, req, id, *args, **kwargs) else: message = _("Host '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=message) return wrapped class HostController(wsgi.Controller): """The Hosts API controller for the OpenStack API.""" def __init__(self): self.api = volume_api.HostAPI() super(HostController, self).__init__() @wsgi.serializers(xml=HostIndexTemplate) def index(self, req): authorize(req.environ['cinder.context']) return {'hosts': _list_hosts(req)} @wsgi.serializers(xml=HostUpdateTemplate) @wsgi.deserializers(xml=HostDeserializer) @check_host def update(self, req, id, body): authorize(req.environ['cinder.context']) update_values = {} for raw_key, raw_val in body.items(): key = raw_key.lower().strip() val = raw_val.lower().strip() if key == "status": if val in ("enable", "disable"): update_values['status'] = val.startswith("enable") else: explanation = _("Invalid status: '%s'") % raw_val raise webob.exc.HTTPBadRequest(explanation=explanation) else: explanation = _("Invalid update setting: '%s'") % raw_key raise webob.exc.HTTPBadRequest(explanation=explanation) update_setters = {'status': self._set_enabled_status} result = {} for key, value in update_values.items(): result.update(update_setters[key](req, id, value)) return result def _set_enabled_status(self, req, host, enabled): """Sets the specified host's ability to accept new volumes.""" context = req.environ['cinder.context'] state = "enabled" if enabled else "disabled" LOG.info(_LI("Setting host %(host)s to %(state)s."), {'host': host, 'state': state}) result = self.api.set_host_enabled(context, host=host, enabled=enabled) if result not in ("enabled", "disabled"): # An error message was returned raise webob.exc.HTTPBadRequest(explanation=result) return {"host": host, "status": result} @wsgi.serializers(xml=HostShowTemplate) def show(self, req, id): """Shows the volume usage info given by hosts. :param context: security context :param host: hostname :returns: expected to use HostShowTemplate. ex.:: {'host': {'resource':D},..} D: {'host': 'hostname','project': 'admin', 'volume_count': 1, 'total_volume_gb': 2048} """ host = id context = req.environ['cinder.context'] if not context.is_admin: msg = _("Describe-resource is admin only functionality") raise webob.exc.HTTPForbidden(explanation=msg) try: host_ref = objects.Service.get_by_host_and_topic( context, host, CONF.volume_topic) except exception.ServiceNotFound: raise webob.exc.HTTPNotFound(explanation=_("Host not found")) # Getting total available/used resource # TODO(jdg): Add summary info for Snapshots volume_refs = db.volume_get_all_by_host(context, host_ref.host) (count, sum) = db.volume_data_get_for_host(context, host_ref.host) snap_count_total = 0 snap_sum_total = 0 resources = [{'resource': {'host': host, 'project': '(total)', 'volume_count': str(count), 'total_volume_gb': str(sum), 'snapshot_count': str(snap_count_total), 'total_snapshot_gb': str(snap_sum_total)}}] project_ids = [v['project_id'] for v in volume_refs] project_ids = list(set(project_ids)) for project_id in project_ids: (count, sum) = db.volume_data_get_for_project(context, project_id) (snap_count, snap_sum) = ( objects.Snapshot.snapshot_data_get_for_project(context, project_id)) resources.append( {'resource': {'host': host, 'project': project_id, 'volume_count': str(count), 'total_volume_gb': str(sum), 'snapshot_count': str(snap_count), 'total_snapshot_gb': str(snap_sum)}}) snap_count_total += int(snap_count) snap_sum_total += int(snap_sum) resources[0]['resource']['snapshot_count'] = str(snap_count_total) resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) return {"host": resources} class Hosts(extensions.ExtensionDescriptor): """Admin-only host administration.""" name = "Hosts" alias = "os-hosts" namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1" updated = "2011-06-29T00:00:00+00:00" def get_resources(self): resources = [extensions.ResourceExtension('os-hosts', HostController(), collection_actions={ 'update': 'PUT'}, member_actions={ 'startup': 'GET', 'shutdown': 'GET', 'reboot': 'GET'})] return resources cinder-8.0.0/cinder/api/contrib/backups.py0000664000567000056710000003546712701406250021617 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The backups api.""" from oslo_log import log as logging import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import backups as backup_views from cinder.api import xmlutil from cinder import backup as backupAPI from cinder import exception from cinder.i18n import _, _LI from cinder import utils LOG = logging.getLogger(__name__) def make_backup(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('container') elem.set('parent_id') elem.set('volume_id') elem.set('object_count') elem.set('availability_zone') elem.set('created_at') elem.set('name') elem.set('description') elem.set('fail_reason') def make_backup_restore(elem): elem.set('backup_id') elem.set('volume_id') elem.set('volume_name') def make_backup_export_import_record(elem): elem.set('backup_service') elem.set('backup_url') class BackupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backup', selector='backup') make_backup(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backups') elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups') make_backup(elem) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupRestoreTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('restore', selector='restore') make_backup_restore(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupExportImportTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backup-record', selector='backup-record') make_backup_export_import_record(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CreateDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) backup = self._extract_backup(dom) return {'body': {'backup': backup}} def _extract_backup(self, node): backup = {} backup_node = self.find_first_child_named(node, 'backup') attributes = ['container', 'display_name', 'display_description', 'volume_id', 'parent_id'] for attr in attributes: if backup_node.getAttribute(attr): backup[attr] = backup_node.getAttribute(attr) return backup class RestoreDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) restore = self._extract_restore(dom) return {'body': {'restore': restore}} def _extract_restore(self, node): restore = {} restore_node = self.find_first_child_named(node, 'restore') if restore_node.getAttribute('volume_id'): restore['volume_id'] = restore_node.getAttribute('volume_id') return restore class BackupImportDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) backup = self._extract_backup(dom) retval = {'body': {'backup-record': backup}} return retval def _extract_backup(self, node): backup = {} backup_node = self.find_first_child_named(node, 'backup-record') attributes = ['backup_service', 'backup_url'] for attr in attributes: if backup_node.getAttribute(attr): backup[attr] = backup_node.getAttribute(attr) return backup class BackupsController(wsgi.Controller): """The Backups API controller for the OpenStack API.""" _view_builder_class = backup_views.ViewBuilder def __init__(self): self.backup_api = backupAPI.API() super(BackupsController, self).__init__() @wsgi.serializers(xml=BackupTemplate) def show(self, req, id): """Return data about the given backup.""" LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] try: backup = self.backup_api.get(context, backup_id=id) req.cache_db_backup(backup) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, backup) def delete(self, req, id): """Delete a backup.""" LOG.debug('Delete called for member %s.', id) context = req.environ['cinder.context'] LOG.info(_LI('Delete backup with id: %s'), id, context=context) try: backup = self.backup_api.get(context, id) self.backup_api.delete(context, backup) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202) @wsgi.serializers(xml=BackupsTemplate) def index(self, req): """Returns a summary list of backups.""" return self._get_backups(req, is_detail=False) @wsgi.serializers(xml=BackupsTemplate) def detail(self, req): """Returns a detailed list of backups.""" return self._get_backups(req, is_detail=True) @staticmethod def _get_backup_filter_options(): """Return volume search options allowed by non-admin.""" return ('name', 'status', 'volume_id') def _get_backups(self, req, is_detail): """Returns a list of backups, transformed through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) utils.remove_invalid_filter_options(context, filters, self._get_backup_filter_options()) if 'name' in filters: filters['display_name'] = filters['name'] del filters['name'] backups = self.backup_api.get_all(context, search_opts=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs, ) req.cache_db_backups(backups.objects) if is_detail: backups = self._view_builder.detail_list(req, backups.objects) else: backups = self._view_builder.summary_list(req, backups.objects) return backups # TODO(frankm): Add some checks here including # - whether requested volume_id exists so we can return some errors # immediately # - maybe also do validation of swift container name @wsgi.response(202) @wsgi.serializers(xml=BackupTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Create a new backup.""" LOG.debug('Creating new backup %s', body) self.assert_valid_body(body, 'backup') context = req.environ['cinder.context'] backup = body['backup'] try: volume_id = backup['volume_id'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) container = backup.get('container', None) self.validate_name_and_description(backup) name = backup.get('name', None) description = backup.get('description', None) incremental = backup.get('incremental', False) force = backup.get('force', False) snapshot_id = backup.get('snapshot_id', None) LOG.info(_LI("Creating backup of volume %(volume_id)s in container" " %(container)s"), {'volume_id': volume_id, 'container': container}, context=context) try: new_backup = self.backup_api.create(context, name, description, volume_id, container, incremental, None, force, snapshot_id) except (exception.InvalidVolume, exception.InvalidSnapshot) as error: raise exc.HTTPBadRequest(explanation=error.msg) except (exception.VolumeNotFound, exception.SnapshotNotFound) as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.ServiceNotFound as error: raise exc.HTTPInternalServerError(explanation=error.msg) retval = self._view_builder.summary(req, dict(new_backup)) return retval @wsgi.response(202) @wsgi.serializers(xml=BackupRestoreTemplate) @wsgi.deserializers(xml=RestoreDeserializer) def restore(self, req, id, body): """Restore an existing backup to a volume.""" LOG.debug('Restoring backup %(backup_id)s (%(body)s)', {'backup_id': id, 'body': body}) self.assert_valid_body(body, 'restore') context = req.environ['cinder.context'] restore = body['restore'] volume_id = restore.get('volume_id', None) name = restore.get('name', None) LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"), {'backup_id': id, 'volume_id': volume_id}, context=context) try: new_restore = self.backup_api.restore(context, backup_id=id, volume_id=volume_id, name=name) except exception.InvalidInput as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.VolumeSizeExceedsAvailableQuota as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) except exception.VolumeLimitExceeded as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) retval = self._view_builder.restore_summary( req, dict(new_restore)) return retval @wsgi.response(200) @wsgi.serializers(xml=BackupExportImportTemplate) def export_record(self, req, id): """Export a backup.""" LOG.debug('export record called for member %s.', id) context = req.environ['cinder.context'] try: backup_info = self.backup_api.export_record(context, id) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.export_summary( req, dict(backup_info)) LOG.debug('export record output: %s.', retval) return retval @wsgi.response(201) @wsgi.serializers(xml=BackupTemplate) @wsgi.deserializers(xml=BackupImportDeserializer) def import_record(self, req, body): """Import a backup.""" LOG.debug('Importing record from %s.', body) self.assert_valid_body(body, 'backup-record') context = req.environ['cinder.context'] import_data = body['backup-record'] # Verify that body elements are provided try: backup_service = import_data['backup_service'] backup_url = import_data['backup_url'] except KeyError: msg = _("Incorrect request body format.") raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Importing backup using %(service)s and url %(url)s.', {'service': backup_service, 'url': backup_url}) try: new_backup = self.backup_api.import_record(context, backup_service, backup_url) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.ServiceNotFound as error: raise exc.HTTPInternalServerError(explanation=error.msg) retval = self._view_builder.summary(req, dict(new_backup)) LOG.debug('import record output: %s.', retval) return retval class Backups(extensions.ExtensionDescriptor): """Backups support.""" name = 'Backups' alias = 'backups' namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1' updated = '2012-12-12T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Backups.alias, BackupsController(), collection_actions={'detail': 'GET', 'import_record': 'POST'}, member_actions={'restore': 'POST', 'export_record': 'GET', 'action': 'POST'}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/types_manage.py0000664000567000056710000001774112701406250022636 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types manage extension.""" import six import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.v1 import types from cinder.api.views import types as views_types from cinder import exception from cinder.i18n import _ from cinder import rpc from cinder import utils from cinder.volume import volume_types authorize = extensions.extension_authorizer('volume', 'types_manage') class VolumeTypesManageController(wsgi.Controller): """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder def _notify_volume_type_error(self, context, method, err, volume_type=None, id=None, name=None): payload = dict( volume_types=volume_type, name=name, id=id, error_message=err) rpc.get_notifier('volumeType').error(context, method, payload) def _notify_volume_type_info(self, context, method, volume_type): payload = dict(volume_types=volume_type) rpc.get_notifier('volumeType').info(context, method, payload) @wsgi.action("create") @wsgi.serializers(xml=types.VolumeTypeTemplate) def _create(self, req, body): """Creates a new volume type.""" context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'volume_type') vol_type = body['volume_type'] name = vol_type.get('name', None) description = vol_type.get('description') specs = vol_type.get('extra_specs', {}) is_public = vol_type.get('os-volume-type-access:is_public', True) if name is None or len(name.strip()) == 0: msg = _("Volume type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) utils.check_string_length(name, 'Type name', min_length=1, max_length=255) if description is not None: utils.check_string_length(description, 'Type description', min_length=0, max_length=255) if not utils.is_valid_boolstr(is_public): msg = _("Invalid value '%s' for is_public. Accepted values: " "True or False.") % is_public raise webob.exc.HTTPBadRequest(explanation=msg) try: volume_types.create(context, name, specs, is_public, description=description) vol_type = volume_types.get_volume_type_by_name(context, name) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.create', vol_type) except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.create', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeNotFoundByName as err: self._notify_volume_type_error( context, 'volume_type.create', err, name=name) raise webob.exc.HTTPNotFound(explanation=err.msg) return self._view_builder.show(req, vol_type) @wsgi.action("update") @wsgi.serializers(xml=types.VolumeTypeTemplate) def _update(self, req, id, body): # Update description for a given volume type. context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'volume_type') vol_type = body['volume_type'] description = vol_type.get('description') name = vol_type.get('name') is_public = vol_type.get('is_public') # Name and description can not be both None. # If name specified, name can not be empty. if name and len(name.strip()) == 0: msg = _("Volume type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) if name is None and description is None and is_public is None: msg = _("Specify volume type name, description, is_public or " "a combination thereof.") raise webob.exc.HTTPBadRequest(explanation=msg) if is_public is not None and not utils.is_valid_boolstr(is_public): msg = _("Invalid value '%s' for is_public. Accepted values: " "True or False.") % is_public raise webob.exc.HTTPBadRequest(explanation=msg) if name: utils.check_string_length(name, 'Type name', min_length=1, max_length=255) if description is not None: utils.check_string_length(description, 'Type description', min_length=0, max_length=255) try: volume_types.update(context, id, name, description, is_public=is_public) # Get the updated vol_type = volume_types.get_volume_type(context, id) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.update', vol_type) except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.update', err, id=id) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.update', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeUpdateFailed as err: self._notify_volume_type_error( context, 'volume_type.update', err, volume_type=vol_type) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return self._view_builder.show(req, vol_type) @wsgi.action("delete") def _delete(self, req, id): """Deletes an existing volume type.""" context = req.environ['cinder.context'] authorize(context) try: vol_type = volume_types.get_volume_type(context, id) volume_types.destroy(context, vol_type['id']) self._notify_volume_type_info( context, 'volume_type.delete', vol_type) except exception.VolumeTypeInUse as err: self._notify_volume_type_error( context, 'volume_type.delete', err, volume_type=vol_type) msg = _('Target volume type is still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.delete', err, id=id) raise webob.exc.HTTPNotFound(explanation=err.msg) return webob.Response(status_int=202) class Types_manage(extensions.ExtensionDescriptor): """Types manage support.""" name = "TypesManage" alias = "os-types-manage" namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1" updated = "2011-08-24T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeTypesManageController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] cinder-8.0.0/cinder/api/contrib/qos_specs_manage.py0000664000567000056710000004727112701406250023472 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The QoS specs extension""" from oslo_log import log as logging from oslo_utils import strutils import six import webob from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import qos_specs as view_qos_specs from cinder.api import xmlutil from cinder import exception from cinder.i18n import _, _LI from cinder import rpc from cinder import utils from cinder.volume import qos_specs LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'qos_specs_manage') def make_qos_specs(elem): elem.set('id') elem.set('name') elem.set('consumer') elem.append(SpecsTemplate()) def make_associations(elem): elem.set('association_type') elem.set('name') elem.set('id') class SpecsTemplate(xmlutil.TemplateBuilder): def construct(self): return xmlutil.MasterTemplate(xmlutil.make_flat_dict('specs'), 1) class QoSSpecsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('qos_specs') elem = xmlutil.SubTemplateElement(root, 'qos_spec', selector='qos_specs') make_qos_specs(elem) return xmlutil.MasterTemplate(root, 1) class QoSSpecsKeyDeserializer(wsgi.XMLDeserializer): def _extract_keys(self, key_node): keys = [] for key in key_node.childNodes: key_name = key.tagName keys.append(key_name) return keys def default(self, string): dom = utils.safe_minidom_parse_string(string) key_node = self.find_first_child_named(dom, 'keys') if not key_node: LOG.info(_LI("Unable to parse XML input.")) msg = _("Unable to parse XML request. " "Please provide XML in correct format.") raise webob.exc.HTTPBadRequest(explanation=msg) return {'body': {'keys': self._extract_keys(key_node)}} class AssociationsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('qos_associations') elem = xmlutil.SubTemplateElement(root, 'associations', selector='qos_associations') make_associations(elem) return xmlutil.MasterTemplate(root, 1) def _check_specs(context, specs_id): try: qos_specs.get_qos_specs(context, specs_id) except exception.QoSSpecsNotFound as ex: raise webob.exc.HTTPNotFound(explanation=six.text_type(ex)) class QoSSpecsController(wsgi.Controller): """The volume type extra specs API controller for the OpenStack API.""" _view_builder_class = view_qos_specs.ViewBuilder @staticmethod def _notify_qos_specs_error(context, method, payload): rpc.get_notifier('QoSSpecs').error(context, method, payload) @wsgi.serializers(xml=QoSSpecsTemplate) def index(self, req): """Returns the list of qos_specs.""" context = req.environ['cinder.context'] authorize(context) params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params allowed_search_options = ('id', 'name', 'consumer') utils.remove_invalid_filter_options(context, filters, allowed_search_options) specs = qos_specs.get_all_specs(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return self._view_builder.summary_list(req, specs) @wsgi.serializers(xml=QoSSpecsTemplate) def create(self, req, body=None): context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'qos_specs') specs = body['qos_specs'] name = specs.get('name', None) if name is None: msg = _("Please specify a name for QoS specs.") raise webob.exc.HTTPBadRequest(explanation=msg) self.validate_string_length(name, 'name', min_length=1, max_length=255, remove_whitespaces=True) name = name.strip() try: qos_specs.create(context, name, specs) spec = qos_specs.get_qos_specs_by_name(context, name) notifier_info = dict(name=name, specs=specs) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.create', notifier_info) except exception.InvalidQoSSpecs as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) except exception.QoSSpecsExists as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.QoSSpecsCreateFailed as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return self._view_builder.detail(req, spec) @wsgi.serializers(xml=QoSSpecsTemplate) def update(self, req, id, body=None): context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'qos_specs') specs = body['qos_specs'] try: qos_specs.update(context, id, specs) notifier_info = dict(id=id, specs=specs) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.update', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.InvalidQoSSpecs as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) except exception.QoSSpecsUpdateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return body @wsgi.serializers(xml=QoSSpecsTemplate) def show(self, req, id): """Return a single qos spec item.""" context = req.environ['cinder.context'] authorize(context) try: spec = qos_specs.get_qos_specs(context, id) except exception.QoSSpecsNotFound as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return self._view_builder.detail(req, spec) def delete(self, req, id): """Deletes an existing qos specs.""" context = req.environ['cinder.context'] authorize(context) force = req.params.get('force', None) # Convert string to bool type in strict manner force = strutils.bool_from_string(force) LOG.debug("Delete qos_spec: %(id)s, force: %(force)s", {'id': id, 'force': force}) try: qos_specs.delete(context, id, force) notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsInUse as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) if force: msg = _('Failed to disassociate qos specs.') raise webob.exc.HTTPInternalServerError(explanation=msg) msg = _('Qos specs still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) @wsgi.deserializers(xml=QoSSpecsKeyDeserializer) def delete_keys(self, req, id, body): """Deletes specified keys in qos specs.""" context = req.environ['cinder.context'] authorize(context) if not (body and 'keys' in body and isinstance(body.get('keys'), list)): raise webob.exc.HTTPBadRequest() keys = body['keys'] LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s", {'id': id, 'keys': keys}) try: qos_specs.delete_keys(context, id, keys) notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete_keys', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsKeyNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete_keys', notifier_err) raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) return webob.Response(status_int=202) @wsgi.serializers(xml=AssociationsTemplate) def associations(self, req, id): """List all associations of given qos specs.""" context = req.environ['cinder.context'] authorize(context) LOG.debug("Get associations for qos_spec id: %s", id) try: associates = qos_specs.get_associations(context, id) notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.associations', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associations', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.CinderException as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associations', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return self._view_builder.associations(req, associates) def associate(self, req, id): """Associate a qos specs with a volume type.""" context = req.environ['cinder.context'] authorize(context) type_id = req.params.get('vol_type_id', None) if not type_id: msg = _('Volume Type id must not be None.') notifier_err = dict(id=id, error_message=msg) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s", {'id': id, 'type_id': type_id}) try: qos_specs.associate_qos_with_type(context, id, type_id) notifier_info = dict(id=id, type_id=type_id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.associate', notifier_info) except exception.VolumeTypeNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.InvalidVolumeType as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) except exception.QoSSpecsAssociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return webob.Response(status_int=202) def disassociate(self, req, id): """Disassociate a qos specs from a volume type.""" context = req.environ['cinder.context'] authorize(context) type_id = req.params.get('vol_type_id', None) if not type_id: msg = _('Volume Type id must not be None.') notifier_err = dict(id=id, error_message=msg) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s", {'id': id, 'type_id': type_id}) try: qos_specs.disassociate_qos_specs(context, id, type_id) notifier_info = dict(id=id, type_id=type_id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.disassociate', notifier_info) except exception.VolumeTypeNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return webob.Response(status_int=202) def disassociate_all(self, req, id): """Disassociate a qos specs from all volume types.""" context = req.environ['cinder.context'] authorize(context) LOG.debug("Disassociate qos_spec: %s from all.", id) try: qos_specs.disassociate_all(context, id) notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.disassociate_all', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate_all', notifier_err) raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate_all', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=six.text_type(err)) return webob.Response(status_int=202) class Qos_specs_manage(extensions.ExtensionDescriptor): """QoS specs support.""" name = "Qos_specs_manage" alias = "qos-specs" namespace = "http://docs.openstack.org/volume/ext/qos-specs/api/v1" updated = "2013-08-02T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Qos_specs_manage.alias, QoSSpecsController(), member_actions={"associations": "GET", "associate": "GET", "disassociate": "GET", "disassociate_all": "GET", "delete_keys": "PUT"}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/extended_services.py0000664000567000056710000000165612701406250023663 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions class Extended_services(extensions.ExtensionDescriptor): """Extended services support.""" name = "ExtendedServices" alias = "os-extended-services" namespace = ("http://docs.openstack.org/volume/ext/" "extended_services/api/v2") updated = "2014-01-10T00:00:00-00:00" cinder-8.0.0/cinder/api/contrib/volume_image_metadata.py0000664000567000056710000002143412701406257024474 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Volume Image Metadata API extension.""" import six import webob from oslo_log import log as logging from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder import volume LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('volume', 'volume_image_metadata') class VolumeImageMetadataController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeImageMetadataController, self).__init__(*args, **kwargs) self.volume_api = volume.API() def _get_image_metadata(self, context, volume_id): try: volume = self.volume_api.get(context, volume_id) meta = self.volume_api.get_volume_image_metadata(context, volume) except exception.VolumeNotFound: msg = _('Volume with volume id %s does not exist.') % volume_id raise webob.exc.HTTPNotFound(explanation=msg) return (volume, meta) def _get_all_images_metadata(self, context): """Returns the image metadata for all volumes.""" try: all_metadata = self.volume_api.get_volumes_image_metadata(context) except Exception as e: LOG.debug('Problem retrieving volume image metadata. ' 'It will be skipped. Error: %s', six.text_type(e)) all_metadata = {} return all_metadata def _add_image_metadata(self, context, resp_volume_list, image_metas=None): """Appends the image metadata to each of the given volume. :param context: the request context :param resp_volume_list: the response volume list :param image_metas: The image metadata to append, if None is provided it will be retrieved from the database. An empty dict means there is no metadata and it should not be retrieved from the db. """ vol_id_list = [] for vol in resp_volume_list: vol_id_list.append(vol['id']) if image_metas is None: try: image_metas = self.volume_api.get_list_volumes_image_metadata( context, vol_id_list) except Exception as e: LOG.debug('Get image metadata error: %s', e) return if image_metas: for vol in resp_volume_list: image_meta = image_metas.get(vol['id'], {}) vol['volume_image_metadata'] = dict(image_meta) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeImageMetadataTemplate()) self._add_image_metadata(context, [resp_obj.obj['volume']]) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumesImageMetadataTemplate()) # Just get the image metadata of those volumes in response. self._add_image_metadata(context, list(resp_obj.obj.get('volumes', []))) @wsgi.action("os-set_image_metadata") @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, id, body): context = req.environ['cinder.context'] if authorize(context): try: metadata = body['os-set_image_metadata']['metadata'] except (KeyError, TypeError): msg = _("Malformed request body.") raise webob.exc.HTTPBadRequest(explanation=msg) new_metadata = self._update_volume_image_metadata(context, id, metadata, delete=False) return {'metadata': new_metadata} def _update_volume_image_metadata(self, context, volume_id, metadata, delete=False): try: volume = self.volume_api.get(context, volume_id) return self.volume_api.update_volume_metadata( context, volume, metadata, delete=False, meta_type=common.METADATA_TYPES.image) except exception.VolumeNotFound: msg = _('Volume with volume id %s does not exist.') % volume_id raise webob.exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body.") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.action("os-show_image_metadata") @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, id, body): context = req.environ['cinder.context'] return {'metadata': self._get_image_metadata(context, id)[1]} @wsgi.action("os-unset_image_metadata") def delete(self, req, id, body): """Deletes an existing image metadata.""" context = req.environ['cinder.context'] if authorize(context): try: key = body['os-unset_image_metadata']['key'] except (KeyError, TypeError): msg = _("Malformed request body.") raise webob.exc.HTTPBadRequest(explanation=msg) if key: vol, metadata = self._get_image_metadata(context, id) if key not in metadata: msg = _("Metadata item was not found.") raise webob.exc.HTTPNotFound(explanation=msg) self.volume_api.delete_volume_metadata( context, vol, key, meta_type=common.METADATA_TYPES.image) else: msg = _("The key cannot be None.") raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=200) class Volume_image_metadata(extensions.ExtensionDescriptor): """Show image metadata associated with the volume.""" name = "VolumeImageMetadata" alias = "os-vol-image-meta" namespace = ("http://docs.openstack.org/volume/ext/" "volume_image_metadata/api/v1") updated = "2012-12-07T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeImageMetadataController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] class VolumeImageMetadataMetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_image_metadata', selector='volume_image_metadata') elem = xmlutil.SubTemplateElement(root, 'meta', selector=xmlutil.get_items) elem.set('key', 0) elem.text = 1 return xmlutil.MasterTemplate(root, 1) class VolumeImageMetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') root.append(VolumeImageMetadataMetadataTemplate()) alias = Volume_image_metadata.alias namespace = Volume_image_metadata.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumesImageMetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volume') elem.append(VolumeImageMetadataMetadataTemplate()) alias = Volume_image_metadata.alias namespace = Volume_image_metadata.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) cinder-8.0.0/cinder/api/contrib/snapshot_manage.py0000664000567000056710000001270112701406250023320 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.v2 import snapshots from cinder.api.views import snapshots as snapshot_views from cinder import exception from cinder.i18n import _ from cinder import volume as cinder_volume LOG = logging.getLogger(__name__) CONF = cfg.CONF authorize = extensions.extension_authorizer('snapshot', 'snapshot_manage') class SnapshotManageController(wsgi.Controller): """The /os-snapshot-manage controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def __init__(self, *args, **kwargs): super(SnapshotManageController, self).__init__(*args, **kwargs) self.volume_api = cinder_volume.API() @wsgi.response(202) @wsgi.serializers(xml=snapshots.SnapshotTemplate) def create(self, req, body): """Instruct Cinder to manage a storage snapshot object. Manages an existing backend storage snapshot object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage snapshot object (driver dependent). From an API perspective, this operation behaves very much like a snapshot creation operation. Required HTTP Body: { "snapshot": { "volume_id": , "ref": , } } See the appropriate Cinder drivers' implementations of the manage_snapshot method to find out the accepted format of 'ref'. For example,in LVM driver, it will be the logic volume name of snapshot which you want to manage. This API call will return with an error if any of the above elements are missing from the request, or if the 'volume_id' element refers to a cinder volume that could not be found. The snapshot will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'snapshot' are: name A name for the new snapshot. description A description for the new snapshot. metadata Key/value pairs to be associated with the new snapshot. """ context = req.environ['cinder.context'] authorize(context) if not self.is_valid_body(body, 'snapshot'): msg = _("Missing required element snapshot in request body.") raise exc.HTTPBadRequest(explanation=msg) snapshot = body['snapshot'] # Check that the required keys are present, return an error if they # are not. required_keys = ('ref', 'volume_id') missing_keys = set(required_keys) - set(snapshot.keys()) if missing_keys: msg = _("The following elements are required: " "%s") % ', '.join(missing_keys) raise exc.HTTPBadRequest(explanation=msg) # Check whether volume exists volume_id = snapshot['volume_id'] try: volume = self.volume_api.get(context, volume_id) except exception.VolumeNotFound: msg = _("Volume: %s could not be found.") % volume_id raise exc.HTTPNotFound(explanation=msg) LOG.debug('Manage snapshot request body: %s', body) snapshot_parameters = {} snapshot_parameters['metadata'] = snapshot.get('metadata', None) snapshot_parameters['description'] = snapshot.get('description', None) # NOTE(wanghao) if name in request body, we are overriding the 'name' snapshot_parameters['name'] = snapshot.get('name', snapshot.get('display_name') ) try: new_snapshot = self.volume_api.manage_existing_snapshot( context, snapshot['ref'], volume, **snapshot_parameters) except exception.ServiceNotFound: msg = _("Service %s not found.") % CONF.volume_topic raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, new_snapshot) class Snapshot_manage(extensions.ExtensionDescriptor): """Allows existing backend storage to be 'managed' by Cinder.""" name = 'SnapshotManage' alias = 'os-snapshot-manage' namespace = ('http://docs.openstack.org/volume/ext/' 'os-snapshot-manage/api/v1') updated = '2014-12-31T00:00:00+00:00' def get_resources(self): controller = SnapshotManageController() return [extensions.ResourceExtension(Snapshot_manage.alias, controller)] cinder-8.0.0/cinder/api/contrib/types_extra_specs.py0000664000567000056710000001657612701406250023733 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types extra specs extension""" import webob from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import db from cinder import exception from cinder.i18n import _ from cinder import rpc from cinder.volume import volume_types authorize = extensions.extension_authorizer('volume', 'types_extra_specs') class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') return xmlutil.MasterTemplate(root, 1) class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder): def construct(self): tagname = xmlutil.Selector('key') def extraspec_sel(obj, do_raise=False): # Have to extract the key and value for later use... key, value = list(obj.items())[0] return dict(key=key, value=value) root = xmlutil.TemplateElement(tagname, selector=extraspec_sel) root.text = 'value' return xmlutil.MasterTemplate(root, 1) class VolumeTypeExtraSpecsController(wsgi.Controller): """The volume type extra specs API controller for the OpenStack API.""" def _get_extra_specs(self, context, type_id): extra_specs = db.volume_type_extra_specs_get(context, type_id) specs_dict = {} for key, value in extra_specs.items(): specs_dict[key] = value return dict(extra_specs=specs_dict) def _check_type(self, context, type_id): try: volume_types.get_volume_type(context, type_id) except exception.VolumeTypeNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) def index(self, req, type_id): """Returns the list of extra specs for a given volume type.""" context = req.environ['cinder.context'] authorize(context) self._check_type(context, type_id) return self._get_extra_specs(context, type_id) def _validate_extra_specs(self, specs): """Validating key and value of extra specs.""" for key, value in specs.items(): if key is not None: self.validate_string_length(key, 'Key "%s"' % key, min_length=1, max_length=255) if value is not None: self.validate_string_length(value, 'Value for key "%s"' % key, min_length=0, max_length=255) @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) def create(self, req, type_id, body=None): context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'extra_specs') self._check_type(context, type_id) specs = body['extra_specs'] self._check_key_names(specs.keys()) self._validate_extra_specs(specs) db.volume_type_extra_specs_update_or_create(context, type_id, specs) notifier_info = dict(type_id=type_id, specs=specs) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.create', notifier_info) return body @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) def update(self, req, type_id, id, body=None): context = req.environ['cinder.context'] authorize(context) if not body: expl = _('Request body empty') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(body) > 1: expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_key_names(body.keys()) self._validate_extra_specs(body) db.volume_type_extra_specs_update_or_create(context, type_id, body) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.update', notifier_info) return body @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) def show(self, req, type_id, id): """Return a single extra spec item.""" context = req.environ['cinder.context'] authorize(context) self._check_type(context, type_id) specs = self._get_extra_specs(context, type_id) if id in specs['extra_specs']: return {id: specs['extra_specs'][id]} else: msg = _("Volume Type %(type_id)s has no extra spec with key " "%(id)s.") % ({'type_id': type_id, 'id': id}) raise webob.exc.HTTPNotFound(explanation=msg) def delete(self, req, type_id, id): """Deletes an existing extra spec.""" context = req.environ['cinder.context'] self._check_type(context, type_id) authorize(context) try: db.volume_type_extra_specs_delete(context, type_id, id) except exception.VolumeTypeExtraSpecsNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.delete', notifier_info) return webob.Response(status_int=202) def _check_key_names(self, keys): if not common.validate_key_names(keys): expl = _('Key names can only contain alphanumeric characters, ' 'underscores, periods, colons and hyphens.') raise webob.exc.HTTPBadRequest(explanation=expl) class Types_extra_specs(extensions.ExtensionDescriptor): """Type extra specs support.""" name = "TypesExtraSpecs" alias = "os-types-extra-specs" namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1" updated = "2011-08-24T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('extra_specs', VolumeTypeExtraSpecsController(), parent=dict(member_name='type', collection_name='types') ) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/scheduler_stats.py0000664000567000056710000000437412701406250023354 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Scheduler Stats extension""" from oslo_log import log as logging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import scheduler_stats as scheduler_stats_view from cinder.scheduler import rpcapi LOG = logging.getLogger(__name__) def authorize(context, action_name): action = 'scheduler_stats:%s' % action_name extensions.extension_authorizer('scheduler', action)(context) class SchedulerStatsController(wsgi.Controller): """The Scheduler Stats controller for the OpenStack API.""" _view_builder_class = scheduler_stats_view.ViewBuilder def __init__(self): self.scheduler_api = rpcapi.SchedulerAPI() super(SchedulerStatsController, self).__init__() def get_pools(self, req): """List all active pools in scheduler.""" context = req.environ['cinder.context'] authorize(context, 'get_pools') # TODO(zhiteng) Add filters support detail = req.params.get('detail', False) pools = self.scheduler_api.get_pools(context, filters=None) return self._view_builder.pools(req, pools, detail) class Scheduler_stats(extensions.ExtensionDescriptor): """Scheduler stats support.""" name = "Scheduler_stats" alias = "scheduler-stats" namespace = "http://docs.openstack.org/volume/ext/scheduler-stats/api/v1" updated = "2014-09-07T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Scheduler_stats.alias, SchedulerStatsController(), collection_actions={"get_pools": "GET"}) resources.append(res) return resources cinder-8.0.0/cinder/api/contrib/image_create.py0000664000567000056710000000204612701406250022557 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Create Volume from Image extension.""" from cinder.api import extensions class Image_create(extensions.ExtensionDescriptor): """Allow creating a volume from an image in the Create Volume v1 API.""" name = "CreateVolumeExtension" alias = "os-image-create" namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1" updated = "2012-08-13T00:00:00+00:00" cinder-8.0.0/cinder/api/contrib/volume_type_access.py0000664000567000056710000002014612701406250024044 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume type access extension.""" from oslo_utils import uuidutils import six import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder.volume import volume_types soft_authorize = extensions.soft_extension_authorizer('volume', 'volume_type_access') authorize = extensions.extension_authorizer('volume', 'volume_type_access') def make_volume_type(elem): elem.set('{%s}is_public' % Volume_type_access.namespace, '%s:is_public' % Volume_type_access.alias) def make_volume_type_access(elem): elem.set('volume_type_id') elem.set('project_id') class VolumeTypeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type', selector='volume_type') make_volume_type(root) alias = Volume_type_access.alias namespace = Volume_type_access.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeTypesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_types') elem = xmlutil.SubTemplateElement( root, 'volume_type', selector='volume_types') make_volume_type(elem) alias = Volume_type_access.alias namespace = Volume_type_access.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeTypeAccessTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type_access') elem = xmlutil.SubTemplateElement(root, 'access', selector='volume_type_access') make_volume_type_access(elem) return xmlutil.MasterTemplate(root, 1) def _marshall_volume_type_access(vol_type): rval = [] for project_id in vol_type['projects']: rval.append({'volume_type_id': vol_type['id'], 'project_id': project_id}) return {'volume_type_access': rval} class VolumeTypeAccessController(object): """The volume type access API controller for the OpenStack API.""" def __init__(self): super(VolumeTypeAccessController, self).__init__() @wsgi.serializers(xml=VolumeTypeAccessTemplate) def index(self, req, type_id): context = req.environ['cinder.context'] authorize(context) try: vol_type = volume_types.get_volume_type( context, type_id, expected_fields=['projects']) except exception.VolumeTypeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) if vol_type['is_public']: expl = _("Access list not available for public volume types.") raise webob.exc.HTTPNotFound(explanation=expl) return _marshall_volume_type_access(vol_type) class VolumeTypeActionController(wsgi.Controller): """The volume type access API controller for the OpenStack API.""" def _check_body(self, body, action_name): self.assert_valid_body(body, action_name) access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Bad project format: " "project is not in proper format (%s)") % project raise webob.exc.HTTPBadRequest(explanation=msg) def _extend_vol_type(self, vol_type_rval, vol_type_ref): if vol_type_ref: key = "%s:is_public" % (Volume_type_access.alias) vol_type_rval[key] = vol_type_ref.get('is_public', True) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypeTemplate()) vol_type = req.cached_resource_by_id(id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.extends def index(self, req, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypesTemplate()) for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypesTemplate()) for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) @wsgi.extends(action='create') def create(self, req, body, resp_obj): context = req.environ['cinder.context'] if soft_authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=VolumeTypeTemplate()) type_id = resp_obj.obj['volume_type']['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.action('addProjectAccess') def _addProjectAccess(self, req, id, body): context = req.environ['cinder.context'] authorize(context, action="addProjectAccess") self._check_body(body, 'addProjectAccess') project = body['addProjectAccess']['project'] try: volume_types.add_volume_type_access(context, id, project) except exception.VolumeTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeNotFound as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) @wsgi.action('removeProjectAccess') def _removeProjectAccess(self, req, id, body): context = req.environ['cinder.context'] authorize(context, action="removeProjectAccess") self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] try: volume_types.remove_volume_type_access(context, id, project) except (exception.VolumeTypeNotFound, exception.VolumeTypeAccessNotFound) as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) class Volume_type_access(extensions.ExtensionDescriptor): """Volume type access support.""" name = "VolumeTypeAccess" alias = "os-volume-type-access" namespace = ("http://docs.openstack.org/volume/" "ext/os-volume-type-access/api/v1") updated = "2014-06-26T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension( Volume_type_access.alias, VolumeTypeAccessController(), parent=dict(member_name='type', collection_name='types')) resources.append(res) return resources def get_controller_extensions(self): controller = VolumeTypeActionController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] cinder-8.0.0/cinder/api/contrib/admin_actions.py0000664000567000056710000003107412701406250022765 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging import webob from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import backup from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils from cinder import volume LOG = logging.getLogger(__name__) class AdminController(wsgi.Controller): """Abstract base class for AdminControllers.""" collection = None # api collection to extend # FIXME(clayg): this will be hard to keep up-to-date # Concrete classes can expand or over-ride valid_status = set(['creating', 'available', 'deleting', 'error', 'error_deleting', ]) def __init__(self, *args, **kwargs): super(AdminController, self).__init__(*args, **kwargs) # singular name of the resource self.resource_name = self.collection.rstrip('s') self.volume_api = volume.API() self.backup_api = backup.API() def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, body): update = {} try: update['status'] = body['status'].lower() except (TypeError, KeyError): raise exc.HTTPBadRequest(explanation=_("Must specify 'status'")) if update['status'] not in self.valid_status: raise exc.HTTPBadRequest( explanation=_("Must specify a valid status")) return update def authorize(self, context, action_name): # e.g. "snapshot_admin_actions:reset_status" action = '%s_admin_actions:%s' % (self.resource_name, action_name) extensions.extension_authorizer('volume', action)(context) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): """Reset status on the resource.""" context = req.environ['cinder.context'] self.authorize(context, 'reset_status') update = self.validate_update(body['os-reset_status']) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) notifier_info = dict(id=id, update=update) notifier = rpc.get_notifier('volumeStatusUpdate') notifier.info(context, self.collection + '.reset_status.start', notifier_info) try: self._update(context, id, update) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) notifier.info(context, self.collection + '.reset_status.end', notifier_info) return webob.Response(status_int=202) @wsgi.action('os-force_delete') def _force_delete(self, req, id, body): """Delete a resource, bypassing the check that it must be available.""" context = req.environ['cinder.context'] self.authorize(context, 'force_delete') try: resource = self._get(context, id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) self._delete(context, resource, force=True) return webob.Response(status_int=202) class VolumeAdminController(AdminController): """AdminController for Volumes.""" collection = 'volumes' # FIXME(jdg): We're appending additional valid status # entries to the set we declare in the parent class # this doesn't make a ton of sense, we should probably # look at the structure of this whole process again # Perhaps we don't even want any definitions in the abstract # parent class? valid_status = AdminController.valid_status.union( ('attaching', 'in-use', 'detaching', 'maintenance')) valid_attach_status = ('detached', 'attached',) valid_migration_status = ('migrating', 'error', 'success', 'completing', 'none', 'starting',) def _update(self, *args, **kwargs): db.volume_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.volume_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete(*args, **kwargs) def validate_update(self, body): update = {} status = body.get('status', None) attach_status = body.get('attach_status', None) migration_status = body.get('migration_status', None) valid = False if status: valid = True update = super(VolumeAdminController, self).validate_update(body) if attach_status: valid = True update['attach_status'] = attach_status.lower() if update['attach_status'] not in self.valid_attach_status: raise exc.HTTPBadRequest( explanation=_("Must specify a valid attach status")) if migration_status: valid = True update['migration_status'] = migration_status.lower() if update['migration_status'] not in self.valid_migration_status: raise exc.HTTPBadRequest( explanation=_("Must specify a valid migration status")) if update['migration_status'] == 'none': update['migration_status'] = None if not valid: raise exc.HTTPBadRequest( explanation=_("Must specify 'status', 'attach_status' " "or 'migration_status' for update.")) return update @wsgi.action('os-force_detach') def _force_detach(self, req, id, body): """Roll back a bad detach after the volume been disconnected.""" context = req.environ['cinder.context'] self.authorize(context, 'force_detach') try: volume = self._get(context, id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) try: connector = body['os-force_detach'].get('connector', None) except KeyError: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'connector'.")) try: self.volume_api.terminate_connection(context, volume, connector) except exception.VolumeBackendAPIException as error: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) attachment_id = body['os-force_detach'].get('attachment_id', None) try: self.volume_api.detach(context, volume, attachment_id) except messaging.RemoteError as error: if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: msg = "Error force detaching volume - %(err_type)s: " \ "%(err_msg)s" % {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where force-detach call could fail # due to db or volume driver errors. These errors shouldn't # be exposed to the user and in such cases it should raise # 500 error. raise return webob.Response(status_int=202) @wsgi.action('os-migrate_volume') def _migrate_volume(self, req, id, body): """Migrate a volume to the specified host.""" context = req.environ['cinder.context'] self.authorize(context, 'migrate_volume') try: volume = self._get(context, id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) params = body['os-migrate_volume'] try: host = params['host'] except KeyError: raise exc.HTTPBadRequest(explanation=_("Must specify 'host'.")) force_host_copy = utils.get_bool_param('force_host_copy', params) lock_volume = utils.get_bool_param('lock_volume', params) self.volume_api.migrate_volume(context, volume, host, force_host_copy, lock_volume) return webob.Response(status_int=202) @wsgi.action('os-migrate_volume_completion') def _migrate_volume_completion(self, req, id, body): """Complete an in-progress migration.""" context = req.environ['cinder.context'] self.authorize(context, 'migrate_volume_completion') try: volume = self._get(context, id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) params = body['os-migrate_volume_completion'] try: new_volume_id = params['new_volume'] except KeyError: raise exc.HTTPBadRequest( explanation=_("Must specify 'new_volume'")) try: new_volume = self._get(context, new_volume_id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) error = params.get('error', False) ret = self.volume_api.migrate_volume_completion(context, volume, new_volume, error) return {'save_volume_id': ret} class SnapshotAdminController(AdminController): """AdminController for Snapshots.""" collection = 'snapshots' def _update(self, *args, **kwargs): context = args[0] snapshot_id = args[1] fields = args[2] snapshot = objects.Snapshot.get_by_id(context, snapshot_id) snapshot.update(fields) snapshot.save() def _get(self, *args, **kwargs): return self.volume_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete_snapshot(*args, **kwargs) class BackupAdminController(AdminController): """AdminController for Backups.""" collection = 'backups' valid_status = set(['available', 'error' ]) def _get(self, *args, **kwargs): return self.backup_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.backup_api.delete(*args, **kwargs) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): """Reset status on the resource.""" context = req.environ['cinder.context'] self.authorize(context, 'reset_status') update = self.validate_update(body['os-reset_status']) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) notifier_info = {'id': id, 'update': update} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, self.collection + '.reset_status.start', notifier_info) try: self.backup_api.reset_status(context=context, backup_id=id, status=update['status']) except exception.BackupNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) return webob.Response(status_int=202) class Admin_actions(extensions.ExtensionDescriptor): """Enable admin actions.""" name = "AdminActions" alias = "os-admin-actions" namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1" updated = "2012-08-25T00:00:00+00:00" def get_controller_extensions(self): exts = [] for class_ in (VolumeAdminController, SnapshotAdminController, BackupAdminController): controller = class_() extension = extensions.ControllerExtension( self, class_.collection, controller) exts.append(extension) return exts cinder-8.0.0/cinder/api/contrib/volume_mig_status_attribute.py0000664000567000056710000000667312701406250026015 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil authorize = extensions.soft_extension_authorizer('volume', 'volume_mig_status_attribute') class VolumeMigStatusAttributeController(wsgi.Controller): def _add_volume_mig_status_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:migstat" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['migration_status'] key = "%s:name_id" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['_name_id'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeMigStatusAttributeTemplate()) self._add_volume_mig_status_attribute(req, resp_obj.obj['volume']) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): resp_obj.attach(xml=VolumeListMigStatusAttributeTemplate()) for vol in list(resp_obj.obj['volumes']): self._add_volume_mig_status_attribute(req, vol) class Volume_mig_status_attribute(extensions.ExtensionDescriptor): """Expose migration_status as an attribute of a volume.""" name = "VolumeMigStatusAttribute" alias = "os-vol-mig-status-attr" namespace = ("http://docs.openstack.org/volume/ext/" "volume_mig_status_attribute/api/v1") updated = "2013-08-08T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeMigStatusAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] def make_volume(elem): elem.set('{%s}migstat' % Volume_mig_status_attribute.namespace, '%s:migstat' % Volume_mig_status_attribute.alias) elem.set('{%s}name_id' % Volume_mig_status_attribute.namespace, '%s:name_id' % Volume_mig_status_attribute.alias) class VolumeMigStatusAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) alias = Volume_mig_status_attribute.alias namespace = Volume_mig_status_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) class VolumeListMigStatusAttributeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) alias = Volume_mig_status_attribute.alias namespace = Volume_mig_status_attribute.namespace return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) cinder-8.0.0/cinder/api/contrib/availability_zones.py0000664000567000056710000000477612701406250024056 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi import cinder.api.views.availability_zones from cinder.api import xmlutil import cinder.exception import cinder.volume.api def make_availability_zone(elem): elem.set('name', 'zoneName') zoneStateElem = xmlutil.SubTemplateElement(elem, 'zoneState', selector='zoneState') zoneStateElem.set('available') class ListTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('availabilityZones') elem = xmlutil.SubTemplateElement(root, 'availabilityZone', selector='availabilityZoneInfo') make_availability_zone(elem) alias = Availability_zones.alias namespace = Availability_zones.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class Controller(wsgi.Controller): _view_builder_class = cinder.api.views.availability_zones.ViewBuilder def __init__(self, *args, **kwargs): super(Controller, self).__init__(*args, **kwargs) self.volume_api = cinder.volume.api.API() @wsgi.serializers(xml=ListTemplate) def index(self, req): """Describe all known availability zones.""" azs = self.volume_api.list_availability_zones() return self._view_builder.list(req, azs) class Availability_zones(extensions.ExtensionDescriptor): """Describe Availability Zones.""" name = 'AvailabilityZones' alias = 'os-availability-zone' namespace = ('http://docs.openstack.org/volume/ext/' 'os-availability-zone/api/v1') updated = '2013-06-27T00:00:00+00:00' def get_resources(self): controller = Controller() res = extensions.ResourceExtension(Availability_zones.alias, controller) return [res] cinder-8.0.0/cinder/api/contrib/volume_actions.py0000664000567000056710000003776112701406257023224 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import encodeutils from oslo_utils import strutils import six import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder import utils from cinder import volume LOG = logging.getLogger(__name__) def authorize(context, action_name): action = 'volume_actions:%s' % action_name extensions.extension_authorizer('volume', action)(context) class VolumeToImageSerializer(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('os-volume_upload_image', selector='os-volume_upload_image') root.set('id') root.set('updated_at') root.set('status') root.set('display_description') root.set('size') root.set('volume_type') root.set('image_id') root.set('container_format') root.set('disk_format') root.set('image_name') return xmlutil.MasterTemplate(root, 1) class VolumeToImageDeserializer(wsgi.XMLDeserializer): """Deserializer to handle xml-formatted requests.""" def default(self, string): dom = utils.safe_minidom_parse_string(string) action_node = dom.childNodes[0] action_name = action_node.tagName action_data = {} attributes = ["force", "image_name", "container_format", "disk_format"] for attr in attributes: if action_node.hasAttribute(attr): action_data[attr] = action_node.getAttribute(attr) if 'force' in action_data and action_data['force'] == 'True': action_data['force'] = True return {'body': {action_name: action_data}} class VolumeActionsController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeActionsController, self).__init__(*args, **kwargs) self.volume_api = volume.API() @wsgi.action('os-attach') def _attach(self, req, id, body): """Add attachment metadata.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) # instance uuid is an option now instance_uuid = None if 'instance_uuid' in body['os-attach']: instance_uuid = body['os-attach']['instance_uuid'] host_name = None # Keep API backward compatibility if 'host_name' in body['os-attach']: host_name = body['os-attach']['host_name'] mountpoint = body['os-attach']['mountpoint'] if 'mode' in body['os-attach']: mode = body['os-attach']['mode'] else: mode = 'rw' if instance_uuid is None and host_name is None: msg = _("Invalid request to attach volume to an invalid target") raise webob.exc.HTTPBadRequest(explanation=msg) if mode not in ('rw', 'ro'): msg = _("Invalid request to attach volume with an invalid mode. " "Attaching mode should be 'rw' or 'ro'") raise webob.exc.HTTPBadRequest(explanation=msg) try: self.volume_api.attach(context, volume, instance_uuid, host_name, mountpoint, mode) except messaging.RemoteError as error: if error.exc_type in ['InvalidVolume', 'InvalidUUID', 'InvalidVolumeAttachMode']: msg = "Error attaching volume - %(err_type)s: %(err_msg)s" % { 'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where attach call could fail due to # db or volume driver errors. These errors shouldn't be exposed # to the user and in such cases it should raise 500 error. raise return webob.Response(status_int=202) @wsgi.action('os-detach') def _detach(self, req, id, body): """Clear attachment metadata.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) attachment_id = None if body['os-detach']: attachment_id = body['os-detach'].get('attachment_id', None) try: self.volume_api.detach(context, volume, attachment_id) except messaging.RemoteError as error: if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: msg = "Error detaching volume - %(err_type)s: %(err_msg)s" % \ {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where detach call could fail due to # db or volume driver errors. These errors shouldn't be exposed # to the user and in such cases it should raise 500 error. raise return webob.Response(status_int=202) @wsgi.action('os-reserve') def _reserve(self, req, id, body): """Mark volume as reserved.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) self.volume_api.reserve_volume(context, volume) return webob.Response(status_int=202) @wsgi.action('os-unreserve') def _unreserve(self, req, id, body): """Unmark volume as reserved.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) self.volume_api.unreserve_volume(context, volume) return webob.Response(status_int=202) @wsgi.action('os-begin_detaching') def _begin_detaching(self, req, id, body): """Update volume status to 'detaching'.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) self.volume_api.begin_detaching(context, volume) return webob.Response(status_int=202) @wsgi.action('os-roll_detaching') def _roll_detaching(self, req, id, body): """Roll back volume status to 'in-use'.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) self.volume_api.roll_detaching(context, volume) return webob.Response(status_int=202) @wsgi.action('os-initialize_connection') def _initialize_connection(self, req, id, body): """Initialize volume attachment.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: connector = body['os-initialize_connection']['connector'] except KeyError: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'connector'")) try: info = self.volume_api.initialize_connection(context, volume, connector) except exception.InvalidInput as err: raise webob.exc.HTTPBadRequest( explanation=err) except exception.VolumeBackendAPIException as error: msg = _("Unable to fetch connection information from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) return {'connection_info': info} @wsgi.action('os-terminate_connection') def _terminate_connection(self, req, id, body): """Terminate volume attachment.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: connector = body['os-terminate_connection']['connector'] except KeyError: raise webob.exc.HTTPBadRequest( explanation=_("Must specify 'connector'")) try: self.volume_api.terminate_connection(context, volume, connector) except exception.VolumeBackendAPIException as error: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) return webob.Response(status_int=202) @wsgi.response(202) @wsgi.action('os-volume_upload_image') @wsgi.serializers(xml=VolumeToImageSerializer) @wsgi.deserializers(xml=VolumeToImageDeserializer) def _volume_upload_image(self, req, id, body): """Uploads the specified volume to image service.""" context = req.environ['cinder.context'] params = body['os-volume_upload_image'] if not params.get("image_name"): msg = _("No image_name was specified in request.") raise webob.exc.HTTPBadRequest(explanation=msg) force = params.get('force', 'False') try: force = strutils.bool_from_string(force, strict=True) except ValueError as error: err_msg = encodeutils.exception_to_unicode(error) msg = _("Invalid value for 'force': '%s'") % err_msg raise webob.exc.HTTPBadRequest(explanation=msg) try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) authorize(context, "upload_image") image_metadata = {"container_format": params.get("container_format", "bare"), "disk_format": params.get("disk_format", "raw"), "name": params["image_name"]} try: response = self.volume_api.copy_volume_to_image(context, volume, image_metadata, force) except exception.InvalidVolume as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except ValueError as error: raise webob.exc.HTTPBadRequest(explanation=six.text_type(error)) except messaging.RemoteError as error: msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) except Exception as error: raise webob.exc.HTTPBadRequest(explanation=six.text_type(error)) return {'os-volume_upload_image': response} @wsgi.action('os-extend') def _extend(self, req, id, body): """Extend size of volume.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: int(body['os-extend']['new_size']) except (KeyError, ValueError, TypeError): msg = _("New volume size must be specified as an integer.") raise webob.exc.HTTPBadRequest(explanation=msg) size = int(body['os-extend']['new_size']) try: self.volume_api.extend(context, volume, size) except exception.InvalidVolume as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202) @wsgi.action('os-update_readonly_flag') def _volume_readonly_update(self, req, id, body): """Update volume readonly flag.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: readonly_flag = body['os-update_readonly_flag']['readonly'] except KeyError: msg = _("Must specify readonly in request.") raise webob.exc.HTTPBadRequest(explanation=msg) try: readonly_flag = strutils.bool_from_string(readonly_flag, strict=True) except ValueError as error: err_msg = encodeutils.exception_to_unicode(error) msg = _("Invalid value for 'readonly': '%s'") % err_msg raise webob.exc.HTTPBadRequest(explanation=msg) self.volume_api.update_readonly_flag(context, volume, readonly_flag) return webob.Response(status_int=202) @wsgi.action('os-retype') def _retype(self, req, id, body): """Change type of existing volume.""" context = req.environ['cinder.context'] volume = self.volume_api.get(context, id) try: new_type = body['os-retype']['new_type'] except KeyError: msg = _("New volume type must be specified.") raise webob.exc.HTTPBadRequest(explanation=msg) policy = body['os-retype'].get('migration_policy') self.volume_api.retype(context, volume, new_type, policy) return webob.Response(status_int=202) @wsgi.action('os-set_bootable') def _set_bootable(self, req, id, body): """Update bootable status of a volume.""" context = req.environ['cinder.context'] try: volume = self.volume_api.get(context, id) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: bootable = body['os-set_bootable']['bootable'] except KeyError: msg = _("Must specify bootable in request.") raise webob.exc.HTTPBadRequest(explanation=msg) try: bootable = strutils.bool_from_string(bootable, strict=True) except ValueError as error: err_msg = encodeutils.exception_to_unicode(error) msg = _("Invalid value for 'bootable': '%s'") % err_msg raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {'bootable': bootable} self.volume_api.update(context, volume, update_dict) return webob.Response(status_int=200) class Volume_actions(extensions.ExtensionDescriptor): """Enable volume actions.""" name = "VolumeActions" alias = "os-volume-actions" namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1" updated = "2012-05-31T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeActionsController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] cinder-8.0.0/cinder/api/contrib/cgsnapshots.py0000664000567000056710000001625112701406250022511 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The cgsnapshots api.""" from oslo_log import log as logging import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import cgsnapshots as cgsnapshot_views from cinder.api import xmlutil from cinder import consistencygroup as consistencygroupAPI from cinder import exception from cinder.i18n import _, _LI from cinder import utils LOG = logging.getLogger(__name__) def make_cgsnapshot(elem): elem.set('id') elem.set('consistencygroup_id') elem.set('status') elem.set('created_at') elem.set('name') elem.set('description') class CgsnapshotTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('cgsnapshot', selector='cgsnapshot') make_cgsnapshot(root) alias = Cgsnapshots.alias namespace = Cgsnapshots.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CgsnapshotsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('cgsnapshots') elem = xmlutil.SubTemplateElement(root, 'cgsnapshot', selector='cgsnapshots') make_cgsnapshot(elem) alias = Cgsnapshots.alias namespace = Cgsnapshots.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CreateDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) cgsnapshot = self._extract_cgsnapshot(dom) return {'body': {'cgsnapshot': cgsnapshot}} def _extract_cgsnapshot(self, node): cgsnapshot = {} cgsnapshot_node = self.find_first_child_named(node, 'cgsnapshot') attributes = ['name', 'description'] for attr in attributes: if cgsnapshot_node.getAttribute(attr): cgsnapshot[attr] = cgsnapshot_node.getAttribute(attr) return cgsnapshot class CgsnapshotsController(wsgi.Controller): """The cgsnapshots API controller for the OpenStack API.""" _view_builder_class = cgsnapshot_views.ViewBuilder def __init__(self): self.cgsnapshot_api = consistencygroupAPI.API() super(CgsnapshotsController, self).__init__() @wsgi.serializers(xml=CgsnapshotTemplate) def show(self, req, id): """Return data about the given cgsnapshot.""" LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] try: cgsnapshot = self.cgsnapshot_api.get_cgsnapshot( context, cgsnapshot_id=id) except exception.CgSnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, cgsnapshot) def delete(self, req, id): """Delete a cgsnapshot.""" LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] LOG.info(_LI('Delete cgsnapshot with id: %s'), id, context=context) try: cgsnapshot = self.cgsnapshot_api.get_cgsnapshot( context, cgsnapshot_id=id) self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot) except exception.CgSnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidCgSnapshot: msg = _("Invalid cgsnapshot") raise exc.HTTPBadRequest(explanation=msg) except Exception: msg = _("Failed cgsnapshot") raise exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) @wsgi.serializers(xml=CgsnapshotsTemplate) def index(self, req): """Returns a summary list of cgsnapshots.""" return self._get_cgsnapshots(req, is_detail=False) @wsgi.serializers(xml=CgsnapshotsTemplate) def detail(self, req): """Returns a detailed list of cgsnapshots.""" return self._get_cgsnapshots(req, is_detail=True) def _get_cgsnapshots(self, req, is_detail): """Returns a list of cgsnapshots, transformed through view builder.""" context = req.environ['cinder.context'] cgsnapshots = self.cgsnapshot_api.get_all_cgsnapshots(context) limited_list = common.limited(cgsnapshots, req) if is_detail: cgsnapshots = self._view_builder.detail_list(req, limited_list) else: cgsnapshots = self._view_builder.summary_list(req, limited_list) return cgsnapshots @wsgi.response(202) @wsgi.serializers(xml=CgsnapshotTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Create a new cgsnapshot.""" LOG.debug('Creating new cgsnapshot %s', body) self.assert_valid_body(body, 'cgsnapshot') context = req.environ['cinder.context'] cgsnapshot = body['cgsnapshot'] self.validate_name_and_description(cgsnapshot) try: group_id = cgsnapshot['consistencygroup_id'] except KeyError: msg = _("'consistencygroup_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) try: group = self.cgsnapshot_api.get(context, group_id) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) name = cgsnapshot.get('name', None) description = cgsnapshot.get('description', None) LOG.info(_LI("Creating cgsnapshot %(name)s."), {'name': name}, context=context) try: new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot( context, group, name, description) except exception.InvalidCgSnapshot as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.CgSnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) retval = self._view_builder.summary(req, new_cgsnapshot) return retval class Cgsnapshots(extensions.ExtensionDescriptor): """cgsnapshots support.""" name = 'Cgsnapshots' alias = 'cgsnapshots' namespace = 'http://docs.openstack.org/volume/ext/cgsnapshots/api/v1' updated = '2014-08-18T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Cgsnapshots.alias, CgsnapshotsController(), collection_actions={'detail': 'GET'}) resources.append(res) return resources cinder-8.0.0/cinder/api/extensions.py0000664000567000056710000003120212701406250020705 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import webob.dec import webob.exc import cinder.api.openstack from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _LE, _LI, _LW import cinder.policy CONF = cfg.CONF LOG = logging.getLogger(__name__) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # Description comes from the docstring for the class # The XML namespace for the extension, e.g., # 'http://www.fox.in.socks/api/ext/pie/v1.0' namespace = None # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts @classmethod def nsmap(cls): """Synthesize a namespace map from extension.""" # Start with a base nsmap nsmap = ext_nsmap.copy() # Add the namespace for the extension nsmap[cls.alias] = cls.namespace return nsmap @classmethod def xmlname(cls, name): """Synthesize element and attribute names.""" return '{%s}%s' % (cls.namespace, name) def make_ext(elem): elem.set('name') elem.set('namespace') elem.set('alias') elem.set('updated') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' xmlutil.make_links(elem, 'links') ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class ExtensionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('extension', selector='extension') make_ext(root) return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) class ExtensionsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('extensions') elem = xmlutil.SubTemplateElement(root, 'extension', selector='extensions') make_ext(elem) return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['namespace'] = ext.namespace ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data @wsgi.serializers(xml=ExtensionsTemplate) def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) @wsgi.serializers(xml=ExtensionTemplate) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise webob.exc.HTTPNotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See cinder/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.info(_LI('Initializing extension manager.')) self.cls_list = CONF.osapi_volume_extension self.extensions = {} self._load_extensions() def is_loaded(self, alias): return alias in self.extensions def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.info(_LI('Loaded extension: %s'), alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.name) LOG.debug('Ext alias: %s', extension.alias) LOG.debug('Ext description: %s', ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext namespace: %s', extension.namespace) LOG.debug('Ext updated: %s', extension.updated) except AttributeError: LOG.exception(_LE("Exception loading extension.")) return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning(_LW('Failed to load extension %(ext_factory)s: ' '%(exc)s'), {'ext_factory': ext_factory, 'exc': exc}) class ControllerExtension(object): """Extend core controllers of cinder OpenStack API. Provide a way to extend existing cinder OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in cinder.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning(_LW('Failed to load extension %(classpath)s: ' '%(exc)s'), {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning(_LW('Failed to load extension ' '%(ext_name)s: %(exc)s'), {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs def extension_authorizer(api_name, extension_name): def authorize(context, target=None, action=None): if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} if action is None: act = '%s_extension:%s' % (api_name, extension_name) else: act = '%s_extension:%s:%s' % (api_name, extension_name, action) cinder.policy.enforce(context, act, target) return authorize def soft_extension_authorizer(api_name, extension_name): hard_authorize = extension_authorizer(api_name, extension_name) def authorize(context): try: hard_authorize(context) return True except exception.NotAuthorized: return False return authorize cinder-8.0.0/cinder/api/views/0000775000567000056710000000000012701406543017300 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/views/limits.py0000664000567000056710000000671712701406250021161 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime class ViewBuilder(object): """OpenStack API base limits view builder.""" def build(self, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(absolute_limits) output = { "limits": { "rate": rate_limits, "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, absolute_limits): """Builder for absolute limits absolute_limits should be given as a dict of limits. For example: {"ram": 512, "gigabytes": 1024}. """ limit_names = { "ram": ["maxTotalRAMSize"], "instances": ["maxTotalInstances"], "cores": ["maxTotalCores"], "gigabytes": ["maxTotalVolumeGigabytes"], "backup_gigabytes": ["maxTotalBackupGigabytes"], "volumes": ["maxTotalVolumes"], "snapshots": ["maxTotalSnapshots"], "backups": ["maxTotalBackups"], "key_pairs": ["maxTotalKeypairs"], "floating_ips": ["maxTotalFloatingIps"], "metadata_items": ["maxServerMeta", "maxImageMeta"], "injected_files": ["maxPersonality"], "injected_file_content_bytes": ["maxPersonalitySize"], } limits = {} for name, value in absolute_limits.items(): if name in limit_names and value is not None: for name in limit_names[name]: limits[name] = value return limits def _build_rate_limits(self, rate_limits): limits = [] for rate_limit in rate_limits: _rate_limit_key = None _rate_limit = self._build_rate_limit(rate_limit) # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break # ensure we have a key if we didn't find one if not _rate_limit_key: _rate_limit_key = { "uri": rate_limit["URI"], "regex": rate_limit["regex"], "limit": [], } limits.append(_rate_limit_key) _rate_limit_key["limit"].append(_rate_limit) return limits def _build_rate_limit(self, rate_limit): _get_utc = datetime.datetime.utcfromtimestamp next_avail = _get_utc(rate_limit["resetTime"]) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": next_avail.isoformat(), } cinder-8.0.0/cinder/api/views/versions.py0000664000567000056710000000524512701406250021523 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from oslo_config import cfg from six.moves import urllib versions_opts = [ cfg.StrOpt('public_endpoint', help="Public url to use for versions endpoint. The default " "is None, which will use the request's host_url " "attribute to populate the URL base. If Cinder is " "operating behind a proxy, you will want to change " "this to represent the proxy's URL."), ] CONF = cfg.CONF CONF.register_opts(versions_opts) def get_view_builder(req): base_url = CONF.public_endpoint or req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """Initialize ViewBuilder. :param base_url: url of the root wsgi application """ self.base_url = base_url def build_versions(self, versions): views = [self._build_version(versions[key]) for key in sorted(list(versions.keys()))] return dict(versions=views) def _build_version(self, version): view = copy.deepcopy(version) view['links'] = self._build_links(version) return view def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" links = copy.deepcopy(version_data.get('links', {})) version_num = version_data["id"].split('.')[0] links.append({'rel': 'self', 'href': self._generate_href(version=version_num)}) return links def _generate_href(self, version='v3', path=None): """Create a URL that refers to a specific version_number.""" base_url = self._get_base_url_without_version() href = urllib.parse.urljoin(base_url, version).rstrip('/') + '/' if path: href += path.lstrip('/') return href def _get_base_url_without_version(self): """Get the base URL with out the /v1 suffix.""" return re.sub('v[1-9]+/?$', '', self.base_url) cinder-8.0.0/cinder/api/views/types.py0000664000567000056710000000265212701406250021016 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): def show(self, request, volume_type, brief=False): """Trim away extraneous volume type attributes.""" trimmed = dict(id=volume_type.get('id'), name=volume_type.get('name'), is_public=volume_type.get('is_public'), extra_specs=volume_type.get('extra_specs'), description=volume_type.get('description')) return trimmed if brief else dict(volume_type=trimmed) def index(self, request, volume_types): """Index over trimmed volume types.""" volume_types_list = [self.show(request, volume_type, True) for volume_type in volume_types] return dict(volume_types=volume_types_list) cinder-8.0.0/cinder/api/views/qos_specs.py0000664000567000056710000000466112701406250021653 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eBay Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model QoS specs API responses as a python dictionary.""" _collection_name = "qos-specs" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, qos_specs, qos_count=None): """Show a list of qos_specs without many details.""" return self._list_view(self.detail, request, qos_specs, qos_count) def summary(self, request, qos_spec): """Generic, non-detailed view of a qos_specs.""" return { 'qos_specs': qos_spec, 'links': self._get_links(request, qos_spec['id']), } def detail(self, request, qos_spec): """Detailed view of a single qos_spec.""" # TODO(zhiteng) Add associations to detailed view return { 'qos_specs': qos_spec, 'links': self._get_links(request, qos_spec['id']), } def associations(self, request, associates): """View of qos specs associations.""" return { 'qos_associations': associates } def _list_view(self, func, request, qos_specs, qos_count=None): """Provide a view for a list of qos_specs.""" specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs] specs_links = self._get_collection_links(request, qos_specs, self._collection_name, qos_count) specs_dict = dict(qos_specs=specs_list) if specs_links: specs_dict['qos_specs_links'] = specs_links return specs_dict cinder-8.0.0/cinder/api/views/consistencygroups.py0000664000567000056710000000612612701406250023453 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model consistencygroup API responses as a python dictionary.""" _collection_name = "consistencygroups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, consistencygroups): """Show a list of consistency groups without many details.""" return self._list_view(self.summary, request, consistencygroups) def detail_list(self, request, consistencygroups): """Detailed view of a list of consistency groups .""" return self._list_view(self.detail, request, consistencygroups) def summary(self, request, consistencygroup): """Generic, non-detailed view of a consistency group.""" return { 'consistencygroup': { 'id': consistencygroup.id, 'name': consistencygroup.name } } def detail(self, request, consistencygroup): """Detailed view of a single consistency group.""" if consistencygroup.volume_type_id: volume_types = consistencygroup.volume_type_id.split(",") volume_types = [type_id for type_id in volume_types if type_id] else: volume_types = [] return { 'consistencygroup': { 'id': consistencygroup.id, 'status': consistencygroup.status, 'availability_zone': consistencygroup.availability_zone, 'created_at': consistencygroup.created_at, 'name': consistencygroup.name, 'description': consistencygroup.description, 'volume_types': volume_types, } } def _list_view(self, func, request, consistencygroups): """Provide a view for a list of consistency groups.""" consistencygroups_list = [ func(request, consistencygroup)['consistencygroup'] for consistencygroup in consistencygroups] cg_links = self._get_collection_links(request, consistencygroups, self._collection_name) consistencygroups_dict = dict(consistencygroups=consistencygroups_list) if cg_links: consistencygroups_dict['consistencygroup_links'] = cg_links return consistencygroups_dict cinder-8.0.0/cinder/api/views/__init__.py0000664000567000056710000000000012701406250021372 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/views/capabilities.py0000664000567000056710000000335412701406250022303 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model capabilities API responses as a python dictionary.""" _collection_name = "capabilities" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary(self, request, capabilities, id): """Summary view of a backend capabilities.""" return { 'namespace': 'OS::Storage::Capabilities::%s' % id, 'vendor_name': capabilities.get('vendor_name'), 'volume_backend_name': capabilities.get('volume_backend_name'), 'pool_name': capabilities.get('pool_name'), 'driver_version': capabilities.get('driver_version'), 'storage_protocol': capabilities.get('storage_protocol'), 'display_name': capabilities.get('display_name'), 'description': capabilities.get('description'), 'visibility': capabilities.get('visibility'), 'replication_targets': capabilities.get('replication_targets', []), 'properties': capabilities.get('properties'), } cinder-8.0.0/cinder/api/views/backups.py0000664000567000056710000000776712701406250021316 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model backup API responses as a python dictionary.""" _collection_name = "backups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, backups, backup_count=None): """Show a list of backups without many details.""" return self._list_view(self.summary, request, backups, backup_count) def detail_list(self, request, backups, backup_count=None): """Detailed view of a list of backups .""" return self._list_view(self.detail, request, backups, backup_count) def summary(self, request, backup): """Generic, non-detailed view of a backup.""" return { 'backup': { 'id': backup['id'], 'name': backup['display_name'], 'links': self._get_links(request, backup['id']), }, } def restore_summary(self, request, restore): """Generic, non-detailed view of a restore.""" return { 'restore': { 'backup_id': restore['backup_id'], 'volume_id': restore['volume_id'], 'volume_name': restore['volume_name'], }, } def detail(self, request, backup): """Detailed view of a single backup.""" return { 'backup': { 'id': backup.get('id'), 'status': backup.get('status'), 'size': backup.get('size'), 'object_count': backup.get('object_count'), 'availability_zone': backup.get('availability_zone'), 'container': backup.get('container'), 'created_at': backup.get('created_at'), 'updated_at': backup.get('updated_at'), 'name': backup.get('display_name'), 'description': backup.get('display_description'), 'fail_reason': backup.get('fail_reason'), 'volume_id': backup.get('volume_id'), 'links': self._get_links(request, backup['id']), 'is_incremental': backup.is_incremental, 'has_dependent_backups': backup.has_dependent_backups, 'snapshot_id': backup.snapshot_id, 'data_timestamp': backup.data_timestamp, } } def _list_view(self, func, request, backups, backup_count): """Provide a view for a list of backups.""" backups_list = [func(request, backup)['backup'] for backup in backups] backups_links = self._get_collection_links(request, backups, self._collection_name, backup_count) backups_dict = dict(backups=backups_list) if backups_links: backups_dict['backups_links'] = backups_links return backups_dict def export_summary(self, request, export): """Generic view of an export.""" return { 'backup-record': { 'backup_service': export['backup_service'], 'backup_url': export['backup_url'], }, } cinder-8.0.0/cinder/api/views/transfers.py0000664000567000056710000000676612701406250021673 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model transfer API responses as a python dictionary.""" _collection_name = "os-volume-transfer" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, transfers, origin_transfer_count): """Show a list of transfers without many details.""" return self._list_view(self.summary, request, transfers, origin_transfer_count) def detail_list(self, request, transfers, origin_transfer_count): """Detailed view of a list of transfers .""" return self._list_view(self.detail, request, transfers, origin_transfer_count) def summary(self, request, transfer): """Generic, non-detailed view of a transfer.""" return { 'transfer': { 'id': transfer['id'], 'volume_id': transfer.get('volume_id'), 'name': transfer['display_name'], 'links': self._get_links(request, transfer['id']), }, } def detail(self, request, transfer): """Detailed view of a single transfer.""" return { 'transfer': { 'id': transfer.get('id'), 'created_at': transfer.get('created_at'), 'name': transfer.get('display_name'), 'volume_id': transfer.get('volume_id'), 'links': self._get_links(request, transfer['id']) } } def create(self, request, transfer): """Detailed view of a single transfer when created.""" return { 'transfer': { 'id': transfer.get('id'), 'created_at': transfer.get('created_at'), 'name': transfer.get('display_name'), 'volume_id': transfer.get('volume_id'), 'auth_key': transfer.get('auth_key'), 'links': self._get_links(request, transfer['id']) } } def _list_view(self, func, request, transfers, origin_transfer_count): """Provide a view for a list of transfers.""" transfers_list = [func(request, transfer)['transfer'] for transfer in transfers] transfers_links = self._get_collection_links(request, transfers, self._collection_name, origin_transfer_count) transfers_dict = dict(transfers=transfers_list) if transfers_links: transfers_dict['transfers_links'] = transfers_links return transfers_dict cinder-8.0.0/cinder/api/views/snapshots.py0000664000567000056710000000613712701406250021676 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model snapshot API responses as a python dictionary.""" _collection_name = "snapshots" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, snapshots, snapshot_count=None): """Show a list of snapshots without many details.""" return self._list_view(self.summary, request, snapshots, snapshot_count) def detail_list(self, request, snapshots, snapshot_count=None): """Detailed view of a list of snapshots.""" return self._list_view(self.detail, request, snapshots, snapshot_count, coll_name=self._collection_name + '/detail') def summary(self, request, snapshot): """Generic, non-detailed view of a snapshot.""" if isinstance(snapshot.metadata, dict): metadata = snapshot.metadata else: metadata = {} return { 'snapshot': { 'id': snapshot.id, 'created_at': snapshot.created_at, 'updated_at': snapshot.updated_at, 'name': snapshot.display_name, 'description': snapshot.display_description, 'volume_id': snapshot.volume_id, 'status': snapshot.status, 'size': snapshot.volume_size, 'metadata': metadata, } } def detail(self, request, snapshot): """Detailed view of a single snapshot.""" # NOTE(geguileo): No additional data at the moment return self.summary(request, snapshot) def _list_view(self, func, request, snapshots, snapshot_count, coll_name=_collection_name): """Provide a view for a list of snapshots.""" snapshots_list = [func(request, snapshot)['snapshot'] for snapshot in snapshots] snapshots_links = self._get_collection_links(request, snapshots, coll_name, snapshot_count) snapshots_dict = {self._collection_name: snapshots_list} if snapshots_links: snapshots_dict[self._collection_name + '_links'] = snapshots_links return snapshots_dict cinder-8.0.0/cinder/api/views/scheduler_stats.py0000664000567000056710000000327612701406250023051 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eBay Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model scheduler-stats API responses as a python dictionary.""" _collection_name = "scheduler-stats" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary(self, request, pool): """Summary view of a single pool.""" return { 'pool': { 'name': pool.get('name'), } } def detail(self, request, pool): """Detailed view of a single pool.""" return { 'pool': { 'name': pool.get('name'), 'capabilities': pool.get('capabilities'), } } def pools(self, request, pools, detail): """Detailed/Summary view of a list of pools seen by scheduler.""" if detail: plist = [self.detail(request, pool)['pool'] for pool in pools] else: plist = [self.summary(request, pool)['pool'] for pool in pools] pools_dict = dict(pools=plist) return pools_dict cinder-8.0.0/cinder/api/views/availability_zones.py0000664000567000056710000000207312701406250023537 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cinder.api.common class ViewBuilder(cinder.api.common.ViewBuilder): """Map cinder.volumes.api list_availability_zones response into dicts.""" def list(self, request, availability_zones): def fmt(az): return { 'zoneName': az['name'], 'zoneState': {'available': az['available']}, } return {'availabilityZoneInfo': [fmt(az) for az in availability_zones]} cinder-8.0.0/cinder/api/views/cgsnapshots.py0000664000567000056710000000453612701406250022211 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model cgsnapshot API responses as a python dictionary.""" _collection_name = "cgsnapshots" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, cgsnapshots): """Show a list of cgsnapshots without many details.""" return self._list_view(self.summary, request, cgsnapshots) def detail_list(self, request, cgsnapshots): """Detailed view of a list of cgsnapshots .""" return self._list_view(self.detail, request, cgsnapshots) def summary(self, request, cgsnapshot): """Generic, non-detailed view of a cgsnapshot.""" return { 'cgsnapshot': { 'id': cgsnapshot.id, 'name': cgsnapshot.name } } def detail(self, request, cgsnapshot): """Detailed view of a single cgsnapshot.""" return { 'cgsnapshot': { 'id': cgsnapshot.id, 'consistencygroup_id': cgsnapshot.consistencygroup_id, 'status': cgsnapshot.status, 'created_at': cgsnapshot.created_at, 'name': cgsnapshot.name, 'description': cgsnapshot.description } } def _list_view(self, func, request, cgsnapshots): """Provide a view for a list of cgsnapshots.""" cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot'] for cgsnapshot in cgsnapshots] cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list) return cgsnapshots_dict cinder-8.0.0/cinder/api/middleware/0000775000567000056710000000000012701406543020260 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/middleware/auth.py0000664000567000056710000001354312701406250021574 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ import os from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import request_id from oslo_serialization import jsonutils import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder import context from cinder.i18n import _ from cinder.wsgi import common as base_wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') CONF = cfg.CONF CONF.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[CONF.auth_strategy] if not CONF.api_rate_limit: limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app class InjectContext(base_wsgi.Middleware): """Add a 'cinder.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): req.environ['cinder.context'] = self.context return self.application class CinderKeystoneContext(base_wsgi.Middleware): """Make a request context from keystone headers.""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): user_id = req.headers.get('X_USER') user_id = req.headers.get('X_USER_ID', user_id) if user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] project_name = req.headers.get('X_TENANT_NAME') req_id = req.environ.get(request_id.ENV_REQUEST_ID) # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = req.remote_addr service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( explanation=_('Invalid service catalog json.')) if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, project_name=project_name, roles=roles, auth_token=auth_token, remote_address=remote_address, service_catalog=service_catalog, request_id=req_id) req.environ['cinder.context'] = ctx return self.application class NoAuthMiddleware(base_wsgi.Middleware): """Return a fake token if one isn't specified.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') os_url = os.path.join(req.url, project_id) res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['cinder.context'] = ctx return self.application cinder-8.0.0/cinder/api/middleware/__init__.py0000664000567000056710000000000012701406250022352 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/middleware/fault.py0000664000567000056710000000613712701406250021747 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _, _LE, _LI from cinder import utils from cinder.wsgi import common as base_wsgi LOG = logging.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): LOG.exception(_LE("Caught error: %(type)s %(error)s"), {'type': type(inner), 'error': inner}) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.CinderException) else six.text_type(inner)) params = {'exception': inner.__class__.__name__, 'explanation': msg} outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) cinder-8.0.0/cinder/api/middleware/sizelimit.py0000664000567000056710000000256412701406250022645 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body limiting middleware. Compatibility shim for Kilo, while operators migrate to oslo.middleware. """ from oslo_config import cfg from oslo_log import versionutils from oslo_middleware import sizelimit # Default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='Max size for body of a request') CONF = cfg.CONF CONF.register_opt(max_request_body_size_opt) @versionutils.deprecated(as_of=versionutils.deprecated.KILO, in_favor_of='oslo_middleware.RequestBodySizeLimiter') class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): """Add a 'cinder.context' to WSGI environ.""" pass cinder-8.0.0/cinder/api/v2/0000775000567000056710000000000012701406543016472 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v2/limits.py0000664000567000056710000003547212701406250020353 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import http_client import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder.api.views import limits as limits_views from cinder.api import xmlutil from cinder.i18n import _ from cinder import quota from cinder.wsgi import common as base_wsgi QUOTAS = quota.QUOTAS LIMITS_PREFIX = "limits." # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class LimitsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('limits', selector='limits') rates = xmlutil.SubTemplateElement(root, 'rates') rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') rate.set('uri', 'uri') rate.set('regex', 'regex') limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') limit.set('value', 'value') limit.set('verb', 'verb') limit.set('remaining', 'remaining') limit.set('unit', 'unit') limit.set('next-available', 'next-available') absolute = xmlutil.SubTemplateElement(root, 'absolute', selector='absolute') limit = xmlutil.SubTemplateElement(absolute, 'limit', selector=xmlutil.get_items) limit.set('name', 0) limit.set('value', 1) return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" @wsgi.serializers(xml=LimitsTemplate) def index(self, req): """Return all global and rate limit information.""" context = req.environ['cinder.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=False) abs_limits = {k: v['limit'] for k, v in quotas.items()} rate_limits = req.environ.get("cinder.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represent a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize class, wrap WSGI app, and set up given limits. :param application: WSGI application to wrap :param limits: String describing limits :param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("cinder.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["cinder.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith(LIMITS_PREFIX): username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dump_as_bytes({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] cinder-8.0.0/cinder/api/v2/types.py0000664000567000056710000001273312701406250020211 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume type & volume types extra specs extension.""" from oslo_utils import strutils from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.v2.views import types as views_types from cinder.api import xmlutil from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume import volume_types def make_voltype(elem): elem.set('id') elem.set('name') elem.set('description') elem.set('qos_specs_id') extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') elem.append(extra_specs) class VolumeTypeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type', selector='volume_type') make_voltype(root) return xmlutil.MasterTemplate(root, 1) class VolumeTypesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_types') elem = xmlutil.SubTemplateElement(root, 'volume_type', selector='volume_types') make_voltype(elem) return xmlutil.MasterTemplate(root, 1) class VolumeTypesController(wsgi.Controller): """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder @wsgi.serializers(xml=VolumeTypesTemplate) def index(self, req): """Returns the list of volume types.""" limited_types = self._get_volume_types(req) req.cache_resource(limited_types, name='types') return self._view_builder.index(req, limited_types) @wsgi.serializers(xml=VolumeTypeTemplate) def show(self, req, id): """Return a single volume type item.""" context = req.environ['cinder.context'] # get default volume type if id is not None and id == 'default': vol_type = volume_types.get_default_volume_type() if not vol_type: msg = _("Default volume type can not be found.") raise exc.HTTPNotFound(explanation=msg) req.cache_resource(vol_type, name='types') else: try: vol_type = volume_types.get_volume_type(context, id) req.cache_resource(vol_type, name='types') except exception.VolumeTypeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.show(req, vol_type) def _parse_is_public(self, is_public): """Parse is_public into something usable. * True: List public volume types only * False: List private volume types only * None: List both public and private volume types """ if is_public is None: # preserve default value of showing only public types return True elif utils.is_none_string(is_public): return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise exc.HTTPBadRequest(explanation=msg) def _get_volume_types(self, req): """Helper function that returns a list of type dicts.""" params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) # NOTE(wanghao): Currently, we still only support to filter by # is_public. If we want to filter by more args, we should set params # to filters. filters = {} context = req.environ['cinder.context'] if context.is_admin: # Only admin has query access to all volume types filters['is_public'] = self._parse_is_public( req.params.get('is_public', None)) else: filters['is_public'] = True utils.remove_invalid_filter_options(context, filters, self._get_vol_type_filter_options() ) limited_types = volume_types.get_all_types(context, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=True) return limited_types def _get_vol_type_filter_options(self): """Return volume type search options allowed by non-admin.""" return ['is_public'] def create_resource(): return wsgi.Resource(VolumeTypesController()) cinder-8.0.0/cinder/api/v2/__init__.py0000664000567000056710000000000012701406250020564 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v2/volume_metadata.py0000664000567000056710000001356312701406250022216 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from cinder.api import common from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The volume metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, volume_id): # The metadata is at the second position of the tuple returned # from _get_volume_and_metadata return self._get_volume_and_metadata(context, volume_id)[1] def _get_volume_and_metadata(self, context, volume_id): try: volume = self.volume_api.get(context, volume_id) meta = self.volume_api.get_volume_metadata(context, volume) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) return (volume, meta) @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, volume_id): """Returns the list of metadata for a given volume.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, volume_id)} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, volume_id, body): self.assert_valid_body(body, 'metadata') context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=False) return {'metadata': new_metadata} @wsgi.serializers(xml=common.MetaItemTemplate) @wsgi.deserializers(xml=common.MetaItemDeserializer) def update(self, req, volume_id, id, body): self.assert_valid_body(body, 'meta') meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_volume_metadata(context, volume_id, meta_item, delete=False) return {'meta': meta_item} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def update_all(self, req, volume_id, body): self.assert_valid_body(body, 'metadata') metadata = body['metadata'] context = req.environ['cinder.context'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=True) return {'metadata': new_metadata} def _update_volume_metadata(self, context, volume_id, metadata, delete=False): try: volume = self.volume_api.get(context, volume_id) return self.volume_api.update_volume_metadata( context, volume, metadata, delete, meta_type=common.METADATA_TYPES.user) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, volume_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, volume_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise webob.exc.HTTPNotFound(explanation=msg) def delete(self, req, volume_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] volume, metadata = self._get_volume_and_metadata(context, volume_id) if id not in metadata: msg = _("Metadata item was not found") raise webob.exc.HTTPNotFound(explanation=msg) try: self.volume_api.delete_volume_metadata( context, volume, id, meta_type=common.METADATA_TYPES.user) except exception.VolumeNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(Controller()) cinder-8.0.0/cinder/api/v2/router.py0000664000567000056710000000767212701406250020373 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Volume API. """ from oslo_log import log as logging from cinder.api import extensions import cinder.api.openstack from cinder.api.v2 import limits from cinder.api.v2 import snapshot_metadata from cinder.api.v2 import snapshots from cinder.api.v2 import types from cinder.api.v2 import volume_metadata from cinder.api.v2 import volumes from cinder.api import versions LOG = logging.getLogger(__name__) class APIRouter(cinder.api.openstack.APIRouter): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources['volumes'] = volumes.create_resource(ext_mgr) mapper.resource("volume", "volumes", controller=self.resources['volumes'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['types'] = types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], member={'action': 'POST'}) self.resources['snapshots'] = snapshots.create_resource(ext_mgr) mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources['snapshot_metadata'] = \ snapshot_metadata.create_resource() snapshot_metadata_controller = self.resources['snapshot_metadata'] mapper.resource("snapshot_metadata", "metadata", controller=snapshot_metadata_controller, parent_resource=dict(member_name='snapshot', collection_name='snapshots')) mapper.connect("metadata", "/{project_id}/snapshots/{snapshot_id}/metadata", controller=snapshot_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['volume_metadata'] = \ volume_metadata.create_resource() volume_metadata_controller = self.resources['volume_metadata'] mapper.resource("volume_metadata", "metadata", controller=volume_metadata_controller, parent_resource=dict(member_name='volume', collection_name='volumes')) mapper.connect("metadata", "/{project_id}/volumes/{volume_id}/metadata", controller=volume_metadata_controller, action='update_all', conditions={"method": ['PUT']}) cinder-8.0.0/cinder/api/v2/snapshot_metadata.py0000664000567000056710000001345112701406250022542 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The snapshot metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, snapshot_id): try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) meta = self.volume_api.get_snapshot_metadata(context, snapshot) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) return meta @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, snapshot_id): """Returns the list of metadata for a given snapshot.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, snapshot_id)} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, snapshot_id, body): self.assert_valid_body(body, 'metadata') context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=False) return {'metadata': new_metadata} @wsgi.serializers(xml=common.MetaItemTemplate) @wsgi.deserializers(xml=common.MetaItemDeserializer) def update(self, req, snapshot_id, id, body): self.assert_valid_body(body, 'meta') meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_snapshot_metadata(context, snapshot_id, meta_item, delete=False) return {'meta': meta_item} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def update_all(self, req, snapshot_id, body): self.assert_valid_body(body, 'metadata') context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=True) return {'metadata': new_metadata} def _update_snapshot_metadata(self, context, snapshot_id, metadata, delete=False): try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) return self.volume_api.update_snapshot_metadata(context, snapshot, metadata, delete) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, snapshot_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, snapshot_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, snapshot_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] metadata = self._get_metadata(context, snapshot_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot_metadata(context, snapshot, id) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(Controller()) cinder-8.0.0/cinder/api/v2/views/0000775000567000056710000000000012701406543017627 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v2/views/types.py0000664000567000056710000000411212701406250021336 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): def show(self, request, volume_type, brief=False): """Trim away extraneous volume type attributes.""" context = request.environ['cinder.context'] trimmed = dict(id=volume_type.get('id'), name=volume_type.get('name'), is_public=volume_type.get('is_public'), description=volume_type.get('description')) if common.validate_policy( context, 'volume_extension:access_types_extra_specs'): trimmed['extra_specs'] = volume_type.get('extra_specs') if common.validate_policy( context, 'volume_extension:access_types_qos_specs_id'): trimmed['qos_specs_id'] = volume_type.get('qos_specs_id') return trimmed if brief else dict(volume_type=trimmed) def index(self, request, volume_types): """Index over trimmed volume types.""" volume_types_list = [self.show(request, volume_type, True) for volume_type in volume_types] volume_type_links = self._get_collection_links(request, volume_types, 'types') volume_types_dict = dict(volume_types=volume_types_list) if volume_type_links: volume_types_dict['volume_type_links'] = volume_type_links return volume_types_dict cinder-8.0.0/cinder/api/v2/views/__init__.py0000664000567000056710000000000012701406250021721 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v2/views/volumes.py0000664000567000056710000001344412701406250021674 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "volumes" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, volumes, volume_count=None): """Show a list of volumes without many details.""" return self._list_view(self.summary, request, volumes, volume_count) def detail_list(self, request, volumes, volume_count=None): """Detailed view of a list of volumes.""" return self._list_view(self.detail, request, volumes, volume_count, self._collection_name + '/detail') def summary(self, request, volume): """Generic, non-detailed view of a volume.""" return { 'volume': { 'id': volume['id'], 'name': volume['display_name'], 'links': self._get_links(request, volume['id']), }, } def detail(self, request, volume): """Detailed view of a single volume.""" volume_ref = { 'volume': { 'id': volume.get('id'), 'status': volume.get('status'), 'size': volume.get('size'), 'availability_zone': volume.get('availability_zone'), 'created_at': volume.get('created_at'), 'updated_at': volume.get('updated_at'), 'attachments': self._get_attachments(volume), 'name': volume.get('display_name'), 'description': volume.get('display_description'), 'volume_type': self._get_volume_type(volume), 'snapshot_id': volume.get('snapshot_id'), 'source_volid': volume.get('source_volid'), 'metadata': self._get_volume_metadata(volume), 'links': self._get_links(request, volume['id']), 'user_id': volume.get('user_id'), 'bootable': six.text_type(volume.get('bootable')).lower(), 'encrypted': self._is_volume_encrypted(volume), 'replication_status': volume.get('replication_status'), 'consistencygroup_id': volume.get('consistencygroup_id'), 'multiattach': volume.get('multiattach'), } } if request.environ['cinder.context'].is_admin: volume_ref['volume']['migration_status'] = ( volume.get('migration_status')) return volume_ref def _is_volume_encrypted(self, volume): """Determine if volume is encrypted.""" return volume.get('encryption_key_id') is not None def _get_attachments(self, volume): """Retrieve the attachments of the volume object.""" attachments = [] if volume['attach_status'] == 'attached': attaches = volume.volume_attachment for attachment in attaches: if attachment.get('attach_status') == 'attached': a = {'id': attachment.get('volume_id'), 'attachment_id': attachment.get('id'), 'volume_id': attachment.get('volume_id'), 'server_id': attachment.get('instance_uuid'), 'host_name': attachment.get('attached_host'), 'device': attachment.get('mountpoint'), 'attached_at': attachment.get('attach_time'), } attachments.append(a) return attachments def _get_volume_metadata(self, volume): """Retrieve the metadata of the volume object.""" return volume.metadata def _get_volume_type(self, volume): """Retrieve the type the volume object.""" if volume['volume_type_id'] and volume.get('volume_type'): return volume['volume_type']['name'] else: return volume['volume_type_id'] def _list_view(self, func, request, volumes, volume_count, coll_name=_collection_name): """Provide a view for a list of volumes. :param func: Function used to format the volume data :param request: API request :param volumes: List of volumes in dictionary format :param volume_count: Length of the original list of volumes :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: Volume data in dictionary format """ volumes_list = [func(request, volume)['volume'] for volume in volumes] volumes_links = self._get_collection_links(request, volumes, coll_name, volume_count) volumes_dict = dict(volumes=volumes_list) if volumes_links: volumes_dict['volumes_links'] = volumes_links return volumes_dict cinder-8.0.0/cinder/api/v2/snapshots.py0000664000567000056710000002170412701406250021065 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes snapshots api.""" from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import strutils import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.views import snapshots as snapshot_views from cinder.api import xmlutil from cinder import exception from cinder.i18n import _, _LI from cinder import utils from cinder import volume from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) def make_snapshot(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('created_at') elem.set('name') elem.set('description') elem.set('volume_id') elem.append(common.MetadataTemplate()) class SnapshotTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshot', selector='snapshot') make_snapshot(root) return xmlutil.MasterTemplate(root, 1) class SnapshotsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshots') elem = xmlutil.SubTemplateElement(root, 'snapshot', selector='snapshots') make_snapshot(elem) return xmlutil.MasterTemplate(root, 1) class SnapshotsController(wsgi.Controller): """The Snapshots API controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def __init__(self, ext_mgr=None): self.volume_api = volume.API() self.ext_mgr = ext_mgr super(SnapshotsController, self).__init__() @wsgi.serializers(xml=SnapshotTemplate) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['cinder.context'] try: snapshot = self.volume_api.get_snapshot(context, id) req.cache_db_snapshot(snapshot) except exception.SnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, snapshot) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['cinder.context'] LOG.info(_LI("Delete snapshot with id: %s"), id, context=context) try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot) except exception.SnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=202) @wsgi.serializers(xml=SnapshotsTemplate) def index(self, req): """Returns a summary list of snapshots.""" return self._items(req, is_detail=False) @wsgi.serializers(xml=SnapshotsTemplate) def detail(self, req): """Returns a detailed list of snapshots.""" return self._items(req, is_detail=True) def _items(self, req, is_detail=True): """Returns a list of snapshots, transformed through view builder.""" context = req.environ['cinder.context'] # Pop out non search_opts and create local variables search_opts = req.GET.copy() sort_keys, sort_dirs = common.get_sort_params(search_opts) marker, limit, offset = common.get_pagination_params(search_opts) # Filter out invalid options allowed_search_options = ('status', 'volume_id', 'name') utils.remove_invalid_filter_options(context, search_opts, allowed_search_options) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in search_opts: search_opts['display_name'] = search_opts['name'] del search_opts['name'] snapshots = self.volume_api.get_all_snapshots(context, search_opts=search_opts, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) req.cache_db_snapshots(snapshots.objects) if is_detail: snapshots = self._view_builder.detail_list(req, snapshots.objects) else: snapshots = self._view_builder.summary_list(req, snapshots.objects) return snapshots @wsgi.response(202) @wsgi.serializers(xml=SnapshotTemplate) def create(self, req, body): """Creates a new snapshot.""" kwargs = {} context = req.environ['cinder.context'] self.assert_valid_body(body, 'snapshot') snapshot = body['snapshot'] kwargs['metadata'] = snapshot.get('metadata', None) try: volume_id = snapshot['volume_id'] except KeyError: msg = _("'volume_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) try: volume = self.volume_api.get(context, volume_id) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) force = snapshot.get('force', False) msg = _LI("Create snapshot from volume %s") LOG.info(msg, volume_id, context=context) self.validate_name_and_description(snapshot) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.pop('name') try: force = strutils.bool_from_string(force, strict=True) except ValueError as error: err_msg = encodeutils.exception_to_unicode(error) msg = _("Invalid value for 'force': '%s'") % err_msg raise exception.InvalidParameterValue(err=msg) if force: new_snapshot = self.volume_api.create_snapshot_force( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) else: new_snapshot = self.volume_api.create_snapshot( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) req.cache_db_snapshot(new_snapshot) return self._view_builder.detail(req, new_snapshot) @wsgi.serializers(xml=SnapshotTemplate) def update(self, req, id, body): """Update a snapshot.""" context = req.environ['cinder.context'] if not body: msg = _("Missing request body") raise exc.HTTPBadRequest(explanation=msg) if 'snapshot' not in body: msg = (_("Missing required element '%s' in request body") % 'snapshot') raise exc.HTTPBadRequest(explanation=msg) snapshot = body['snapshot'] update_dict = {} valid_update_keys = ( 'name', 'description', 'display_name', 'display_description', ) self.validate_name_and_description(snapshot) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in snapshot: snapshot['display_description'] = snapshot.pop('description') for key in valid_update_keys: if key in snapshot: update_dict[key] = snapshot[key] try: snapshot = self.volume_api.get_snapshot(context, id) volume_utils.notify_about_snapshot_usage(context, snapshot, 'update.start') self.volume_api.update_snapshot(context, snapshot, update_dict) except exception.SnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) snapshot.update(update_dict) req.cache_db_snapshot(snapshot) volume_utils.notify_about_snapshot_usage(context, snapshot, 'update.end') return self._view_builder.detail(req, snapshot) def create_resource(ext_mgr): return wsgi.Resource(SnapshotsController(ext_mgr)) cinder-8.0.0/cinder/api/v2/volumes.py0000664000567000056710000004172512701406257020551 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes api.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.v2.views import volumes as volume_views from cinder.api import xmlutil from cinder import consistencygroup as consistencygroupAPI from cinder import exception from cinder.i18n import _, _LI from cinder.image import glance from cinder import utils from cinder import volume as cinder_volume from cinder.volume import utils as volume_utils from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) SCHEDULER_HINTS_NAMESPACE =\ "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2" def make_attachment(elem): elem.set('id') elem.set('attachment_id') elem.set('server_id') elem.set('host_name') elem.set('volume_id') elem.set('device') def make_volume(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('availability_zone') elem.set('created_at') elem.set('name') elem.set('bootable') elem.set('description') elem.set('volume_type') elem.set('snapshot_id') elem.set('source_volid') elem.set('consistencygroup_id') elem.set('multiattach') attachments = xmlutil.SubTemplateElement(elem, 'attachments') attachment = xmlutil.SubTemplateElement(attachments, 'attachment', selector='attachments') make_attachment(attachment) # Attach metadata node elem.append(common.MetadataTemplate()) volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM} class VolumeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) class VolumesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) class CommonDeserializer(wsgi.MetadataXMLDeserializer): """Common deserializer to handle xml-formatted volume requests. Handles standard volume attributes as well as the optional metadata attribute """ metadata_deserializer = common.MetadataXMLDeserializer() def _extract_scheduler_hints(self, volume_node): """Marshal the scheduler hints attribute of a parsed request.""" node =\ self.find_first_child_named_in_namespace(volume_node, SCHEDULER_HINTS_NAMESPACE, "scheduler_hints") if node: scheduler_hints = {} for child in self.extract_elements(node): scheduler_hints.setdefault(child.nodeName, []) value = self.extract_text(child).strip() scheduler_hints[child.nodeName].append(value) return scheduler_hints else: return None def _extract_volume(self, node): """Marshal the volume attribute of a parsed request.""" volume = {} volume_node = self.find_first_child_named(node, 'volume') attributes = ['name', 'description', 'size', 'volume_type', 'availability_zone', 'imageRef', 'image_id', 'snapshot_id', 'source_volid', 'consistencygroup_id'] for attr in attributes: if volume_node.getAttribute(attr): volume[attr] = volume_node.getAttribute(attr) metadata_node = self.find_first_child_named(volume_node, 'metadata') if metadata_node is not None: volume['metadata'] = self.extract_metadata(metadata_node) scheduler_hints = self._extract_scheduler_hints(volume_node) if scheduler_hints: volume['scheduler_hints'] = scheduler_hints return volume class CreateDeserializer(CommonDeserializer): """Deserializer to handle xml-formatted create volume requests. Handles standard volume attributes as well as the optional metadata attribute """ def default(self, string): """Deserialize an xml-formatted volume create request.""" dom = utils.safe_minidom_parse_string(string) volume = self._extract_volume(dom) return {'body': {'volume': volume}} class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" _view_builder_class = volume_views.ViewBuilder def __init__(self, ext_mgr): self.volume_api = cinder_volume.API() self.consistencygroup_api = consistencygroupAPI.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() @wsgi.serializers(xml=VolumeTemplate) def show(self, req, id): """Return data about the given volume.""" context = req.environ['cinder.context'] try: vol = self.volume_api.get(context, id, viewable_admin_meta=True) req.cache_db_volume(vol) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) utils.add_visible_admin_metadata(vol) return self._view_builder.detail(req, vol) def delete(self, req, id): """Delete a volume.""" context = req.environ['cinder.context'] cascade = utils.get_bool_param('cascade', req.params) LOG.info(_LI("Delete volume with id: %s"), id, context=context) try: volume = self.volume_api.get(context, id) self.volume_api.delete(context, volume, cascade=cascade) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=202) @wsgi.serializers(xml=VolumesTemplate) def index(self, req): """Returns a summary list of volumes.""" return self._get_volumes(req, is_detail=False) @wsgi.serializers(xml=VolumesTemplate) def detail(self, req): """Returns a detailed list of volumes.""" return self._get_volumes(req, is_detail=True) def _get_volumes(self, req, is_detail): """Returns a list of volumes, transformed through view builder.""" context = req.environ['cinder.context'] params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params utils.remove_invalid_filter_options(context, filters, self._get_volume_filter_options()) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: filters['display_name'] = filters['name'] del filters['name'] self.volume_api.check_volume_filters(filters) volumes = self.volume_api.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, viewable_admin_meta=True, offset=offset) for volume in volumes: utils.add_visible_admin_metadata(volume) req.cache_db_volumes(volumes.objects) if is_detail: volumes = self._view_builder.detail_list(req, volumes) else: volumes = self._view_builder.summary_list(req, volumes) return volumes def _image_uuid_from_ref(self, image_ref, context): # If the image ref was generated by nova api, strip image_ref # down to an id. image_uuid = None try: image_uuid = image_ref.split('/').pop() except AttributeError: msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) image_service = glance.get_default_image_service() # First see if this is an actual image ID if uuidutils.is_uuid_like(image_uuid): try: image = image_service.show(context, image_uuid) if 'id' in image: return image['id'] except Exception: # Pass and see if there is a matching image name pass # Could not find by ID, check if it is an image name try: params = {'filters': {'name': image_ref}} images = list(image_service.detail(context, **params)) if len(images) > 1: msg = _("Multiple matches found for '%s', use an ID to be more" " specific.") % image_ref raise exc.HTTPConflict(msg) for img in images: return img['id'] except Exception: # Pass and let default not found error handling take care of it pass msg = _("Invalid image identifier or unable to " "access requested image.") raise exc.HTTPBadRequest(explanation=msg) @wsgi.response(202) @wsgi.serializers(xml=VolumeTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Creates a new volume.""" self.assert_valid_body(body, 'volume') LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} self.validate_name_and_description(volume) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in volume: volume['display_name'] = volume.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.get('image_id') del volume['image_id'] req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: try: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) except exception.SnapshotNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: try: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) else: kwargs['source_volume'] = None source_replica = volume.get('source_replica') if source_replica is not None: try: src_vol = self.volume_api.get_volume(context, source_replica) if src_vol['replication_status'] == 'disabled': explanation = _('source volume id:%s is not' ' replicated') % source_replica raise exc.HTTPBadRequest(explanation=explanation) kwargs['source_replica'] = src_vol except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) else: kwargs['source_replica'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: try: kwargs['consistencygroup'] = \ self.consistencygroup_api.get(context, consistencygroup_id) except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) else: kwargs['consistencygroup'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] elif size is None and kwargs['source_replica'] is not None: size = kwargs['source_replica']['size'] LOG.info(_LI("Create volume of %s GB"), size, context=context) if self.ext_mgr.is_loaded('os-image-create'): image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) multiattach = volume.get('multiattach', False) kwargs['multiattach'] = multiattach new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) retval = self._view_builder.detail(req, new_volume) return retval def _get_volume_filter_options(self): """Return volume search options allowed by non-admin.""" return CONF.query_volume_filters @wsgi.serializers(xml=VolumeTemplate) def update(self, req, id, body): """Update a volume.""" context = req.environ['cinder.context'] if not body: msg = _("Missing request body") raise exc.HTTPBadRequest(explanation=msg) if 'volume' not in body: msg = _("Missing required element '%s' in request body") % 'volume' raise exc.HTTPBadRequest(explanation=msg) volume = body['volume'] update_dict = {} valid_update_keys = ( 'name', 'description', 'display_name', 'display_description', 'metadata', ) for key in valid_update_keys: if key in volume: update_dict[key] = volume[key] self.validate_name_and_description(update_dict) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in update_dict: update_dict['display_name'] = update_dict.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in update_dict: update_dict['display_description'] = update_dict.pop('description') try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) volume.update(update_dict) utils.add_visible_admin_metadata(volume) volume_utils.notify_about_volume_usage(context, volume, 'update.end') return self._view_builder.detail(req, volume) def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr)) cinder-8.0.0/cinder/api/v1/0000775000567000056710000000000012701406543016471 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v1/limits.py0000664000567000056710000003556512701406250020355 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import http_client import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder.api.views import limits as limits_views from cinder.api import xmlutil from cinder.i18n import _ from cinder import quota from cinder.wsgi import common as base_wsgi QUOTAS = quota.QUOTAS LIMITS_PREFIX = "limits." # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class LimitsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('limits', selector='limits') rates = xmlutil.SubTemplateElement(root, 'rates') rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') rate.set('uri', 'uri') rate.set('regex', 'regex') limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') limit.set('value', 'value') limit.set('verb', 'verb') limit.set('remaining', 'remaining') limit.set('unit', 'unit') limit.set('next-available', 'next-available') absolute = xmlutil.SubTemplateElement(root, 'absolute', selector='absolute') limit = xmlutil.SubTemplateElement(absolute, 'limit', selector=xmlutil.get_items) limit.set('name', 0) limit.set('value', 1) return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" @wsgi.serializers(xml=LimitsTemplate) def index(self, req): """Return all global and rate limit information.""" context = req.environ['cinder.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=False) abs_limits = {k: v['limit'] for k, v in quotas.items()} rate_limits = req.environ.get("cinder.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represent a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware` This wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represent a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("cinder.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["cinder.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith(LIMITS_PREFIX): username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dump_as_bytes({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] cinder-8.0.0/cinder/api/v1/types.py0000664000567000056710000000524312701406250020206 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume type & volume types extra specs extension.""" from webob import exc from cinder.api.openstack import wsgi from cinder.api.views import types as views_types from cinder.api import xmlutil from cinder import exception from cinder.volume import volume_types def make_voltype(elem): elem.set('id') elem.set('name') extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') elem.append(extra_specs) class VolumeTypeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_type', selector='volume_type') make_voltype(root) return xmlutil.MasterTemplate(root, 1) class VolumeTypesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume_types') elem = xmlutil.SubTemplateElement(root, 'volume_type', selector='volume_types') make_voltype(elem) return xmlutil.MasterTemplate(root, 1) class VolumeTypesController(wsgi.Controller): """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder @wsgi.serializers(xml=VolumeTypesTemplate) def index(self, req): """Returns the list of volume types.""" context = req.environ['cinder.context'] vol_types = volume_types.get_all_types(context) vol_types = list(vol_types.values()) req.cache_resource(vol_types, name='types') return self._view_builder.index(req, vol_types) @wsgi.serializers(xml=VolumeTypeTemplate) def show(self, req, id): """Return a single volume type item.""" context = req.environ['cinder.context'] try: vol_type = volume_types.get_volume_type(context, id) req.cache_resource(vol_type, name='types') except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.show(req, vol_type) def create_resource(): return wsgi.Resource(VolumeTypesController()) cinder-8.0.0/cinder/api/v1/__init__.py0000664000567000056710000000000012701406250020563 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v1/volume_metadata.py0000664000567000056710000001375512701406250022220 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The volume metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, volume_id): try: volume = self.volume_api.get(context, volume_id) meta = self.volume_api.get_volume_metadata(context, volume) except exception.VolumeNotFound: msg = _('volume does not exist') raise exc.HTTPNotFound(explanation=msg) return meta @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, volume_id): """Returns the list of metadata for a given volume.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, volume_id)} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, volume_id, body): try: metadata = body['metadata'] except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) context = req.environ['cinder.context'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=False) return {'metadata': new_metadata} @wsgi.serializers(xml=common.MetaItemTemplate) @wsgi.deserializers(xml=common.MetaItemDeserializer) def update(self, req, volume_id, id, body): try: meta_item = body['meta'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_volume_metadata(context, volume_id, meta_item, delete=False) return {'meta': meta_item} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def update_all(self, req, volume_id, body): try: metadata = body['metadata'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=True) return {'metadata': new_metadata} def _update_volume_metadata(self, context, volume_id, metadata, delete=False): try: volume = self.volume_api.get(context, volume_id) return self.volume_api.update_volume_metadata(context, volume, metadata, delete) except exception.VolumeNotFound: msg = _('volume does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, volume_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, volume_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, volume_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] metadata = self._get_metadata(context, volume_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete_volume_metadata(context, volume, id) except exception.VolumeNotFound: msg = _('volume does not exist') raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(Controller()) cinder-8.0.0/cinder/api/v1/router.py0000664000567000056710000000760612701406250020367 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Volume API. """ from oslo_log import log as logging from cinder.api import extensions import cinder.api.openstack from cinder.api.v1 import limits from cinder.api.v1 import snapshot_metadata from cinder.api.v1 import snapshots from cinder.api.v1 import types from cinder.api.v1 import volume_metadata from cinder.api.v1 import volumes from cinder.api import versions LOG = logging.getLogger(__name__) class APIRouter(cinder.api.openstack.APIRouter): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources['volumes'] = volumes.create_resource(ext_mgr) mapper.resource("volume", "volumes", controller=self.resources['volumes'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['types'] = types.create_resource() mapper.resource("type", "types", controller=self.resources['types']) self.resources['snapshots'] = snapshots.create_resource(ext_mgr) mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['snapshot_metadata'] = \ snapshot_metadata.create_resource() snapshot_metadata_controller = self.resources['snapshot_metadata'] mapper.resource("snapshot_metadata", "metadata", controller=snapshot_metadata_controller, parent_resource=dict(member_name='snapshot', collection_name='snapshots')) mapper.connect("metadata", "/{project_id}/snapshots/{snapshot_id}/metadata", controller=snapshot_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources['volume_metadata'] = \ volume_metadata.create_resource() volume_metadata_controller = self.resources['volume_metadata'] mapper.resource("volume_metadata", "metadata", controller=volume_metadata_controller, parent_resource=dict(member_name='volume', collection_name='volumes')) mapper.connect("metadata", "/{project_id}/volumes/{volume_id}/metadata", controller=volume_metadata_controller, action='update_all', conditions={"method": ['PUT']}) cinder-8.0.0/cinder/api/v1/snapshot_metadata.py0000664000567000056710000001416012701406250022537 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The snapshot metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, snapshot_id): try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) meta = self.volume_api.get_snapshot_metadata(context, snapshot) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) return meta @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, snapshot_id): """Returns the list of metadata for a given snapshot.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, snapshot_id)} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, snapshot_id, body): try: metadata = body['metadata'] except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) context = req.environ['cinder.context'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=False) return {'metadata': new_metadata} @wsgi.serializers(xml=common.MetaItemTemplate) @wsgi.deserializers(xml=common.MetaItemDeserializer) def update(self, req, snapshot_id, id, body): try: meta_item = body['meta'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_snapshot_metadata(context, snapshot_id, meta_item, delete=False) return {'meta': meta_item} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def update_all(self, req, snapshot_id, body): try: metadata = body['metadata'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=True) return {'metadata': new_metadata} def _update_snapshot_metadata(self, context, snapshot_id, metadata, delete=False): try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) return self.volume_api.update_snapshot_metadata(context, snapshot, metadata, delete) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, snapshot_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, snapshot_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, snapshot_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] metadata = self._get_metadata(context, snapshot_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot_metadata(context, snapshot, id) except exception.SnapshotNotFound: msg = _('snapshot does not exist') raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(Controller()) cinder-8.0.0/cinder/api/v1/snapshots.py0000664000567000056710000001745112701406250021070 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes snapshots api.""" from oslo_log import log as logging from oslo_utils import strutils import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _, _LI from cinder import utils from cinder import volume LOG = logging.getLogger(__name__) def _translate_snapshot_detail_view(snapshot): """Maps keys for snapshots details view.""" d = _translate_snapshot_summary_view(snapshot) # NOTE(gagupta): No additional data / lookups at the moment return d def _translate_snapshot_summary_view(snapshot): """Maps keys for snapshots summary view.""" d = {} d['id'] = snapshot['id'] d['created_at'] = snapshot['created_at'] d['display_name'] = snapshot['display_name'] d['display_description'] = snapshot['display_description'] d['volume_id'] = snapshot['volume_id'] d['status'] = snapshot['status'] d['size'] = snapshot['volume_size'] if snapshot.get('metadata') and isinstance(snapshot.get('metadata'), dict): d['metadata'] = snapshot['metadata'] else: d['metadata'] = {} return d def make_snapshot(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('created_at') elem.set('display_name') elem.set('display_description') elem.set('volume_id') elem.append(common.MetadataTemplate()) class SnapshotTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshot', selector='snapshot') make_snapshot(root) return xmlutil.MasterTemplate(root, 1) class SnapshotsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshots') elem = xmlutil.SubTemplateElement(root, 'snapshot', selector='snapshots') make_snapshot(elem) return xmlutil.MasterTemplate(root, 1) class SnapshotsController(wsgi.Controller): """The Snapshots API controller for the OpenStack API.""" def __init__(self, ext_mgr=None): self.volume_api = volume.API() self.ext_mgr = ext_mgr super(SnapshotsController, self).__init__() @wsgi.serializers(xml=SnapshotTemplate) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['cinder.context'] try: snapshot = self.volume_api.get_snapshot(context, id) req.cache_db_snapshot(snapshot) except exception.NotFound: raise exc.HTTPNotFound() return {'snapshot': _translate_snapshot_detail_view(snapshot)} def delete(self, req, id): """Delete a snapshot.""" context = req.environ['cinder.context'] LOG.info(_LI("Delete snapshot with id: %s"), id, context=context) try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.serializers(xml=SnapshotsTemplate) def index(self, req): """Returns a summary list of snapshots.""" return self._items(req, entity_maker=_translate_snapshot_summary_view) @wsgi.serializers(xml=SnapshotsTemplate) def detail(self, req): """Returns a detailed list of snapshots.""" return self._items(req, entity_maker=_translate_snapshot_detail_view) def _items(self, req, entity_maker): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['cinder.context'] # pop out limit and offset , they are not search_opts search_opts = req.GET.copy() search_opts.pop('limit', None) search_opts.pop('offset', None) # filter out invalid option allowed_search_options = ('status', 'volume_id', 'display_name') utils.remove_invalid_filter_options(context, search_opts, allowed_search_options) snapshots = self.volume_api.get_all_snapshots(context, search_opts=search_opts) limited_list = common.limited(snapshots.objects, req) req.cache_db_snapshots(limited_list) res = [entity_maker(snapshot) for snapshot in limited_list] return {'snapshots': res} @wsgi.serializers(xml=SnapshotTemplate) def create(self, req, body): """Creates a new snapshot.""" kwargs = {} context = req.environ['cinder.context'] if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] kwargs['metadata'] = snapshot.get('metadata', None) try: volume_id = snapshot['volume_id'] except KeyError: msg = _("'volume_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) try: volume = self.volume_api.get(context, volume_id) except exception.NotFound: raise exc.HTTPNotFound() force = snapshot.get('force', False) msg = _LI("Create snapshot from volume %s") LOG.info(msg, volume_id, context=context) if not utils.is_valid_boolstr(force): msg = _("Invalid value '%s' for force. ") % force raise exception.InvalidParameterValue(err=msg) if strutils.bool_from_string(force): new_snapshot = self.volume_api.create_snapshot_force( context, volume, snapshot.get('display_name'), snapshot.get('display_description'), **kwargs) else: new_snapshot = self.volume_api.create_snapshot( context, volume, snapshot.get('display_name'), snapshot.get('display_description'), **kwargs) req.cache_db_snapshot(new_snapshot) retval = _translate_snapshot_detail_view(new_snapshot) return {'snapshot': retval} @wsgi.serializers(xml=SnapshotTemplate) def update(self, req, id, body): """Update a snapshot.""" context = req.environ['cinder.context'] if not body: raise exc.HTTPUnprocessableEntity() if 'snapshot' not in body: raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] update_dict = {} valid_update_keys = ( 'display_name', 'display_description', ) for key in valid_update_keys: if key in snapshot: update_dict[key] = snapshot[key] try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.update_snapshot(context, snapshot, update_dict) except exception.NotFound: raise exc.HTTPNotFound() snapshot.update(update_dict) req.cache_db_snapshot(snapshot) return {'snapshot': _translate_snapshot_detail_view(snapshot)} def create_resource(ext_mgr): return wsgi.Resource(SnapshotsController(ext_mgr)) cinder-8.0.0/cinder/api/v1/volumes.py0000664000567000056710000003520412701406250020534 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes api.""" import ast from oslo_log import log as logging from oslo_utils import uuidutils import webob from webob import exc from cinder.api import common from cinder.api.openstack import wsgi from cinder.api import xmlutil from cinder import exception from cinder.i18n import _, _LI from cinder import utils from cinder import volume as cinder_volume from cinder.volume import utils as volume_utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) def _translate_attachment_detail_view(_context, vol): """Maps keys for attachment details view.""" d = _translate_attachment_summary_view(_context, vol) # No additional data / lookups at the moment return d def _translate_attachment_summary_view(_context, vol): """Maps keys for attachment summary view.""" d = [] attachments = vol.volume_attachment for attachment in attachments: if attachment.get('attach_status') == 'attached': a = {'id': attachment.get('volume_id'), 'attachment_id': attachment.get('id'), 'volume_id': attachment.get('volume_id'), 'server_id': attachment.get('instance_uuid'), 'host_name': attachment.get('attached_host'), 'device': attachment.get('mountpoint'), } d.append(a) return d def _translate_volume_detail_view(context, vol, image_id=None): """Maps keys for volumes details view.""" d = _translate_volume_summary_view(context, vol, image_id) # No additional data / lookups at the moment return d def _translate_volume_summary_view(context, vol, image_id=None): """Maps keys for volumes summary view.""" d = {} d['id'] = vol['id'] d['status'] = vol['status'] d['size'] = vol['size'] d['availability_zone'] = vol['availability_zone'] d['created_at'] = vol['created_at'] # Need to form the string true/false explicitly here to # maintain our API contract if vol['bootable']: d['bootable'] = 'true' else: d['bootable'] = 'false' if vol['multiattach']: d['multiattach'] = 'true' else: d['multiattach'] = 'false' d['attachments'] = [] if vol['attach_status'] == 'attached': d['attachments'] = _translate_attachment_detail_view(context, vol) d['display_name'] = vol['display_name'] d['display_description'] = vol['display_description'] if vol['volume_type_id'] and vol.get('volume_type'): d['volume_type'] = vol['volume_type']['name'] else: d['volume_type'] = vol['volume_type_id'] d['snapshot_id'] = vol['snapshot_id'] d['source_volid'] = vol['source_volid'] d['encrypted'] = vol['encryption_key_id'] is not None if image_id: d['image_id'] = image_id LOG.info(_LI("vol=%s"), vol, context=context) if vol.metadata: d['metadata'] = vol.metadata else: d['metadata'] = {} return d def make_attachment(elem): elem.set('id') elem.set('server_id') elem.set('host_name') elem.set('volume_id') elem.set('device') def make_volume(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('availability_zone') elem.set('created_at') elem.set('display_name') elem.set('bootable') elem.set('display_description') elem.set('volume_type') elem.set('snapshot_id') elem.set('source_volid') elem.set('multiattach') attachments = xmlutil.SubTemplateElement(elem, 'attachments') attachment = xmlutil.SubTemplateElement(attachments, 'attachment', selector='attachments') make_attachment(attachment) # Attach metadata node elem.append(common.MetadataTemplate()) volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM} class VolumeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) class VolumesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) class CommonDeserializer(wsgi.MetadataXMLDeserializer): """Common deserializer to handle xml-formatted volume requests. Handles standard volume attributes as well as the optional metadata attribute """ metadata_deserializer = common.MetadataXMLDeserializer() def _extract_volume(self, node): """Marshal the volume attribute of a parsed request.""" volume = {} volume_node = self.find_first_child_named(node, 'volume') attributes = ['display_name', 'display_description', 'size', 'volume_type', 'availability_zone', 'imageRef', 'snapshot_id', 'source_volid'] for attr in attributes: if volume_node.getAttribute(attr): volume[attr] = volume_node.getAttribute(attr) metadata_node = self.find_first_child_named(volume_node, 'metadata') if metadata_node is not None: volume['metadata'] = self.extract_metadata(metadata_node) return volume class CreateDeserializer(CommonDeserializer): """Deserializer to handle xml-formatted create volume requests. Handles standard volume attributes as well as the optional metadata attribute """ def default(self, string): """Deserialize an xml-formatted volume create request.""" dom = utils.safe_minidom_parse_string(string) volume = self._extract_volume(dom) return {'body': {'volume': volume}} class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" def __init__(self, ext_mgr): self.volume_api = cinder_volume.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() @wsgi.serializers(xml=VolumeTemplate) def show(self, req, id): """Return data about the given volume.""" context = req.environ['cinder.context'] try: vol = self.volume_api.get(context, id, viewable_admin_meta=True) req.cache_db_volume(vol) except exception.NotFound: raise exc.HTTPNotFound() utils.add_visible_admin_metadata(vol) return {'volume': _translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" context = req.environ['cinder.context'] LOG.info(_LI("Delete volume with id: %s"), id, context=context) try: volume = self.volume_api.get(context, id) self.volume_api.delete(context, volume) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.serializers(xml=VolumesTemplate) def index(self, req): """Returns a summary list of volumes.""" return self._items(req, entity_maker=_translate_volume_summary_view) @wsgi.serializers(xml=VolumesTemplate) def detail(self, req): """Returns a detailed list of volumes.""" return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" # pop out limit and offset , they are not search_opts search_opts = req.GET.copy() search_opts.pop('limit', None) search_opts.pop('offset', None) for k, v in search_opts.items(): try: search_opts[k] = ast.literal_eval(v) except (ValueError, SyntaxError): LOG.debug('Could not evaluate value %s, assuming string', v) context = req.environ['cinder.context'] utils.remove_invalid_filter_options(context, search_opts, self._get_volume_search_options()) volumes = self.volume_api.get_all(context, marker=None, limit=None, sort_keys=['created_at'], sort_dirs=['desc'], filters=search_opts, viewable_admin_meta=True) for volume in volumes: utils.add_visible_admin_metadata(volume) limited_list = common.limited(volumes.objects, req) req.cache_db_volumes(limited_list) res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} def _image_uuid_from_href(self, image_href): # If the image href was generated by nova api, strip image_href # down to an id. try: image_uuid = image_href.split('/').pop() except (TypeError, AttributeError): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) if not uuidutils.is_uuid_like(image_uuid): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) return image_uuid @wsgi.serializers(xml=VolumeTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: explanation = 'Volume type not found.' raise exc.HTTPNotFound(explanation=explanation) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: try: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) except exception.NotFound: explanation = _('snapshot id:%s not found') % snapshot_id raise exc.HTTPNotFound(explanation=explanation) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: try: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) except exception.NotFound: explanation = _('source vol id:%s not found') % source_volid raise exc.HTTPNotFound(explanation=explanation) else: kwargs['source_volume'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.info(_LI("Create volume of %s GB"), size, context=context) multiattach = volume.get('multiattach', False) kwargs['multiattach'] = multiattach image_href = None image_uuid = None if self.ext_mgr.is_loaded('os-image-create'): # NOTE(jdg): misleading name "imageRef" as it's an image-id image_href = volume.get('imageRef') if image_href is not None: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) retval = _translate_volume_detail_view(context, new_volume, image_uuid) return {'volume': retval} def _get_volume_search_options(self): """Return volume search options allowed by non-admin.""" return ('display_name', 'status', 'metadata') @wsgi.serializers(xml=VolumeTemplate) def update(self, req, id, body): """Update a volume.""" context = req.environ['cinder.context'] if not body: raise exc.HTTPUnprocessableEntity() if 'volume' not in body: raise exc.HTTPUnprocessableEntity() volume = body['volume'] update_dict = {} valid_update_keys = ( 'display_name', 'display_description', 'metadata', ) for key in valid_update_keys: if key in volume: update_dict[key] = volume[key] try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) except exception.NotFound: raise exc.HTTPNotFound() volume.update(update_dict) utils.add_visible_admin_metadata(volume) volume_utils.notify_about_volume_usage(context, volume, 'update.end') return {'volume': _translate_volume_detail_view(context, volume)} def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr)) cinder-8.0.0/cinder/api/v3/0000775000567000056710000000000012701406543016473 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v3/__init__.py0000664000567000056710000000000012701406250020565 0ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder/api/v3/router.py0000664000567000056710000000767212701406250020374 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Volume API. """ from oslo_log import log as logging from cinder.api import extensions import cinder.api.openstack from cinder.api.v2 import limits from cinder.api.v2 import snapshot_metadata from cinder.api.v2 import snapshots from cinder.api.v2 import types from cinder.api.v2 import volume_metadata from cinder.api.v2 import volumes from cinder.api import versions LOG = logging.getLogger(__name__) class APIRouter(cinder.api.openstack.APIRouter): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources['volumes'] = volumes.create_resource(ext_mgr) mapper.resource("volume", "volumes", controller=self.resources['volumes'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['types'] = types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], member={'action': 'POST'}) self.resources['snapshots'] = snapshots.create_resource(ext_mgr) mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources['snapshot_metadata'] = \ snapshot_metadata.create_resource() snapshot_metadata_controller = self.resources['snapshot_metadata'] mapper.resource("snapshot_metadata", "metadata", controller=snapshot_metadata_controller, parent_resource=dict(member_name='snapshot', collection_name='snapshots')) mapper.connect("metadata", "/{project_id}/snapshots/{snapshot_id}/metadata", controller=snapshot_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['volume_metadata'] = \ volume_metadata.create_resource() volume_metadata_controller = self.resources['volume_metadata'] mapper.resource("volume_metadata", "metadata", controller=volume_metadata_controller, parent_resource=dict(member_name='volume', collection_name='volumes')) mapper.connect("metadata", "/{project_id}/volumes/{volume_id}/metadata", controller=volume_metadata_controller, action='update_all', conditions={"method": ['PUT']}) cinder-8.0.0/cinder/rpc.py0000664000567000056710000001702412701406250016527 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', 'TRANSPORT_ALIASES', ] from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import importutils profiler = importutils.try_import('osprofiler.profiler') import cinder.context import cinder.exception from cinder.i18n import _LI from cinder import objects from cinder.objects import base CONF = cfg.CONF LOG = logging.getLogger(__name__) TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ cinder.exception.__name__, ] EXTRA_EXMODS = [] # NOTE(flaper87): The cinder.openstack.common.rpc entries are # for backwards compat with Havana rpc_backend configuration # values. The cinder.rpc entries are for compat with Folsom values. TRANSPORT_ALIASES = { 'cinder.openstack.common.rpc.impl_kombu': 'rabbit', 'cinder.openstack.common.rpc.impl_qpid': 'qpid', 'cinder.openstack.common.rpc.impl_zmq': 'zmq', 'cinder.rpc.impl_kombu': 'rabbit', 'cinder.rpc.impl_qpid': 'qpid', 'cinder.rpc.impl_zmq': 'zmq', } def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): _context = context.to_dict() if profiler is not None: prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: if profiler is not None: profiler.init(**trace_info) return cinder.context.RequestContext.from_dict(context) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) LAST_RPC_VERSIONS = {} LAST_OBJ_VERSIONS = {} class RPCAPI(object): """Mixin class aggregating methods related to RPC API compatibility.""" RPC_API_VERSION = '1.0' TOPIC = '' BINARY = '' def __init__(self): target = messaging.Target(topic=self.TOPIC, version=self.RPC_API_VERSION) obj_version_cap = self._determine_obj_version_cap() serializer = base.CinderObjectSerializer(obj_version_cap) rpc_version_cap = self._determine_rpc_version_cap() self.client = get_client(target, version_cap=rpc_version_cap, serializer=serializer) def _determine_rpc_version_cap(self): global LAST_RPC_VERSIONS if self.BINARY in LAST_RPC_VERSIONS: return LAST_RPC_VERSIONS[self.BINARY] version_cap = objects.Service.get_minimum_rpc_version( cinder.context.get_admin_context(), self.BINARY) if version_cap == 'liberty': # NOTE(dulek): This means that one of the services is Liberty, # we should cap to it's RPC version. version_cap = LIBERTY_RPC_VERSIONS[self.BINARY] LOG.info(_LI('Automatically selected %(binary)s RPC version ' '%(version)s as minimum service version.'), {'binary': self.BINARY, 'version': version_cap}) LAST_RPC_VERSIONS[self.BINARY] = version_cap return version_cap def _determine_obj_version_cap(self): global LAST_OBJ_VERSIONS if self.BINARY in LAST_OBJ_VERSIONS: return LAST_OBJ_VERSIONS[self.BINARY] version_cap = objects.Service.get_minimum_obj_version( cinder.context.get_admin_context(), self.BINARY) LOG.info(_LI('Automatically selected %(binary)s objects version ' '%(version)s as minimum service version.'), {'binary': self.BINARY, 'version': version_cap}) LAST_OBJ_VERSIONS[self.BINARY] = version_cap return version_cap # FIXME(dulek): Liberty haven't reported its RPC versions, so we need to have # them hardcoded. This dict may go away as soon as we drop compatibility with # L, which should be in early N. # # This is the only time we need to have such dictionary. We don't need to add # similar ones for any release following Liberty. LIBERTY_RPC_VERSIONS = { 'cinder-volume': '1.30', 'cinder-scheduler': '1.8', # NOTE(dulek) backup.manager had specified version '1.2', but backup.rpcapi # was really only sending messages up to '1.1'. 'cinder-backup': '1.1', } cinder-8.0.0/pylintrc0000664000567000056710000000240612701406250015712 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable=C0111,W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ # Module names matching cinder-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(cinder-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] dummy-variables-rgx=_ [Typecheck] # Disable warnings on the HTTPSConnection classes because pylint doesn't # support importing from six.moves yet, see: # https://bitbucket.org/logilab/pylint/issue/550/ ignored-classes=HTTPSConnection cinder-8.0.0/.testr.conf0000664000567000056710000000054412701406250016212 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./cinder/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list cinder-8.0.0/cinder.egg-info/0000775000567000056710000000000012701406543017064 5ustar jenkinsjenkins00000000000000cinder-8.0.0/cinder.egg-info/requires.txt0000664000567000056710000000233512701406542021466 0ustar jenkinsjenkins00000000000000pbr>=1.6 Babel>=1.3 decorator>=3.4.0 eventlet!=0.18.3,>=0.18.2 greenlet>=0.3.2 httplib2>=0.7.5 iso8601>=0.1.9 keystonemiddleware!=4.1.0,>=4.0.0 lxml>=2.3 oauth2client>=1.5.0 oslo.config>=3.7.0 oslo.concurrency>=3.5.0 oslo.context>=0.2.0 oslo.db>=4.1.0 oslo.log>=1.14.0 oslo.messaging>=4.0.0 oslo.middleware>=3.0.0 oslo.policy>=0.5.0 oslo.reports>=0.6.0 oslo.rootwrap>=2.0.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 oslo.versionedobjects>=1.5.0 osprofiler>=1.1.0 paramiko>=1.16.0 Paste PasteDeploy>=1.5.0 pycrypto>=2.6 pyparsing>=2.0.1 python-barbicanclient>=3.3.0 python-glanceclient>=2.0.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 python-novaclient!=2.33.0,>=2.29.0 python-swiftclient>=2.2.0 pytz>=2013.6 requests!=2.9.0,>=2.8.1 retrying!=1.3.0,>=1.2.3 taskflow>=1.26.0 rtslib-fb>=2.1.41 simplejson>=2.2.0 six>=1.9.0 SQLAlchemy<1.1.0,>=1.0.10 sqlalchemy-migrate>=0.9.6 stevedore>=1.5.0 suds-jurko>=0.6 WebOb>=1.2.3 oslo.i18n>=2.1.0 oslo.vmware>=1.16.0 os-brick>=1.0.0 os-win>=0.2.3 tooz>=1.28.0 google-api-python-client>=1.4.2 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7' or python_version=='2.6' or python_version=='3.3')] enum34 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 cinder-8.0.0/cinder.egg-info/dependency_links.txt0000664000567000056710000000000112701406542023131 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder.egg-info/not-zip-safe0000664000567000056710000000000112701406502021305 0ustar jenkinsjenkins00000000000000 cinder-8.0.0/cinder.egg-info/SOURCES.txt0000664000567000056710000013660512701406543020763 0ustar jenkinsjenkins00000000000000.coveragerc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg pylintrc requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini cinder/__init__.py cinder/context.py cinder/coordination.py cinder/exception.py cinder/flow_utils.py cinder/i18n.py cinder/manager.py cinder/opts.py cinder/policy.py cinder/quota.py cinder/quota_utils.py cinder/rpc.py cinder/service.py cinder/ssh_utils.py cinder/test.py cinder/utils.py cinder/version.py cinder.egg-info/PKG-INFO cinder.egg-info/SOURCES.txt cinder.egg-info/dependency_links.txt cinder.egg-info/entry_points.txt cinder.egg-info/not-zip-safe cinder.egg-info/pbr.json cinder.egg-info/requires.txt cinder.egg-info/top_level.txt cinder/api/__init__.py cinder/api/common.py cinder/api/extensions.py cinder/api/urlmap.py cinder/api/versions.py cinder/api/xmlutil.py cinder/api/contrib/__init__.py cinder/api/contrib/admin_actions.py cinder/api/contrib/availability_zones.py cinder/api/contrib/backups.py cinder/api/contrib/capabilities.py cinder/api/contrib/cgsnapshots.py cinder/api/contrib/consistencygroups.py cinder/api/contrib/extended_services.py cinder/api/contrib/extended_snapshot_attributes.py cinder/api/contrib/hosts.py cinder/api/contrib/image_create.py cinder/api/contrib/qos_specs_manage.py cinder/api/contrib/quota_classes.py cinder/api/contrib/quotas.py cinder/api/contrib/scheduler_hints.py cinder/api/contrib/scheduler_stats.py cinder/api/contrib/services.py cinder/api/contrib/snapshot_actions.py cinder/api/contrib/snapshot_manage.py cinder/api/contrib/snapshot_unmanage.py cinder/api/contrib/types_extra_specs.py cinder/api/contrib/types_manage.py cinder/api/contrib/used_limits.py cinder/api/contrib/volume_actions.py cinder/api/contrib/volume_encryption_metadata.py cinder/api/contrib/volume_host_attribute.py cinder/api/contrib/volume_image_metadata.py cinder/api/contrib/volume_manage.py cinder/api/contrib/volume_mig_status_attribute.py cinder/api/contrib/volume_tenant_attribute.py cinder/api/contrib/volume_transfer.py cinder/api/contrib/volume_type_access.py cinder/api/contrib/volume_type_encryption.py cinder/api/contrib/volume_unmanage.py cinder/api/middleware/__init__.py cinder/api/middleware/auth.py cinder/api/middleware/fault.py cinder/api/middleware/sizelimit.py cinder/api/openstack/__init__.py cinder/api/openstack/api_version_request.py cinder/api/openstack/rest_api_version_history.rst cinder/api/openstack/versioned_method.py cinder/api/openstack/wsgi.py cinder/api/schemas/atom-link.rng cinder/api/schemas/v1.1/extension.rng cinder/api/schemas/v1.1/extensions.rng cinder/api/schemas/v1.1/limits.rng cinder/api/schemas/v1.1/metadata.rng cinder/api/schemas/v1.1/qos_association.rng cinder/api/schemas/v1.1/qos_associations.rng cinder/api/schemas/v1.1/qos_spec.rng cinder/api/schemas/v1.1/qos_specs.rng cinder/api/v1/__init__.py cinder/api/v1/limits.py cinder/api/v1/router.py cinder/api/v1/snapshot_metadata.py cinder/api/v1/snapshots.py cinder/api/v1/types.py cinder/api/v1/volume_metadata.py cinder/api/v1/volumes.py cinder/api/v2/__init__.py cinder/api/v2/limits.py cinder/api/v2/router.py cinder/api/v2/snapshot_metadata.py cinder/api/v2/snapshots.py cinder/api/v2/types.py cinder/api/v2/volume_metadata.py cinder/api/v2/volumes.py cinder/api/v2/views/__init__.py cinder/api/v2/views/types.py cinder/api/v2/views/volumes.py cinder/api/v3/__init__.py cinder/api/v3/router.py cinder/api/views/__init__.py cinder/api/views/availability_zones.py cinder/api/views/backups.py cinder/api/views/capabilities.py cinder/api/views/cgsnapshots.py cinder/api/views/consistencygroups.py cinder/api/views/limits.py cinder/api/views/qos_specs.py cinder/api/views/scheduler_stats.py cinder/api/views/snapshots.py cinder/api/views/transfers.py cinder/api/views/types.py cinder/api/views/versions.py cinder/backup/__init__.py cinder/backup/api.py cinder/backup/chunkeddriver.py cinder/backup/driver.py cinder/backup/manager.py cinder/backup/rpcapi.py cinder/backup/drivers/__init__.py cinder/backup/drivers/ceph.py cinder/backup/drivers/glusterfs.py cinder/backup/drivers/google.py cinder/backup/drivers/nfs.py cinder/backup/drivers/posix.py cinder/backup/drivers/swift.py cinder/backup/drivers/tsm.py cinder/brick/README.txt cinder/brick/__init__.py cinder/brick/local_dev/__init__.py cinder/brick/local_dev/lvm.py cinder/cmd/__init__.py cinder/cmd/all.py cinder/cmd/api.py cinder/cmd/backup.py cinder/cmd/manage.py cinder/cmd/rtstool.py cinder/cmd/scheduler.py cinder/cmd/volume.py cinder/cmd/volume_usage_audit.py cinder/common/__init__.py cinder/common/config.py cinder/common/constants.py cinder/common/sqlalchemyutils.py cinder/compute/__init__.py cinder/compute/nova.py cinder/config/cinder-config-generator.conf cinder/config/generate_cinder_opts.py cinder/consistencygroup/__init__.py cinder/consistencygroup/api.py cinder/db/__init__.py cinder/db/api.py cinder/db/base.py cinder/db/migration.py cinder/db/sqlalchemy/__init__.py cinder/db/sqlalchemy/api.py cinder/db/sqlalchemy/models.py cinder/db/sqlalchemy/migrate_repo/README cinder/db/sqlalchemy/migrate_repo/__init__.py cinder/db/sqlalchemy/migrate_repo/manage.py cinder/db/sqlalchemy/migrate_repo/migrate.cfg cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py cinder/db/sqlalchemy/migrate_repo/versions/023_add_expire_reservations_index.py cinder/db/sqlalchemy/migrate_repo/versions/024_add_replication_support.py cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py cinder/db/sqlalchemy/migrate_repo/versions/027_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/028_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/029_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/030_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/031_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/032_add_volume_type_projects.py cinder/db/sqlalchemy/migrate_repo/versions/033_add_encryption_unique_key.py cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py cinder/db/sqlalchemy/migrate_repo/versions/035_add_provider_id_column.py cinder/db/sqlalchemy/migrate_repo/versions/036_add_provider_id_column_to_snapshots.py cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencygroups.py cinder/db/sqlalchemy/migrate_repo/versions/038_add_driver_initiator_data_table.py cinder/db/sqlalchemy/migrate_repo/versions/039_add_parent_id_to_backups.py cinder/db/sqlalchemy/migrate_repo/versions/040_add_volume_attachment.py cinder/db/sqlalchemy/migrate_repo/versions/041_add_modified_at_column_to_service.py cinder/db/sqlalchemy/migrate_repo/versions/042_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/043_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/044_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/045_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/046_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py cinder/db/sqlalchemy/migrate_repo/versions/048_add_allocated_in_quotas.py cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.py cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py cinder/db/sqlalchemy/migrate_repo/versions/051_add_source_cgid_column_to_consistencygroups.py cinder/db/sqlalchemy/migrate_repo/versions/052_add_provider_auth_column_to_snapshots.py cinder/db/sqlalchemy/migrate_repo/versions/053_add_version_columns_to_service.py cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.py cinder/db/sqlalchemy/migrate_repo/versions/055_add_image_volume_cache_table.py cinder/db/sqlalchemy/migrate_repo/versions/056_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/057_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/058_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/059_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/060_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py cinder/db/sqlalchemy/migrate_repo/versions/062_deleted_type_to_Integer.py cinder/db/sqlalchemy/migrate_repo/versions/062_sqlite_upgrade.sql cinder/db/sqlalchemy/migrate_repo/versions/063_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/064_add_restore_volume_id_to_backups.py cinder/db/sqlalchemy/migrate_repo/versions/065_add_replication_info_to_service.py cinder/db/sqlalchemy/migrate_repo/versions/066_add_allocated_id_column_to_reservations.py cinder/db/sqlalchemy/migrate_repo/versions/067_readd_iscsi_targets_table.py cinder/db/sqlalchemy/migrate_repo/versions/068_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/069_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/070_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/071_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/072_placeholder.py cinder/db/sqlalchemy/migrate_repo/versions/__init__.py cinder/hacking/__init__.py cinder/hacking/checks.py cinder/image/__init__.py cinder/image/cache.py cinder/image/glance.py cinder/image/image_utils.py cinder/keymgr/__init__.py cinder/keymgr/barbican.py cinder/keymgr/conf_key_mgr.py cinder/keymgr/key.py cinder/keymgr/key_mgr.py cinder/keymgr/not_implemented_key_mgr.py cinder/locale/cinder-log-error.pot cinder/locale/cinder-log-info.pot cinder/locale/cinder-log-warning.pot cinder/locale/cinder.pot cinder/locale/cs/LC_MESSAGES/cinder-log-error.po cinder/locale/cs/LC_MESSAGES/cinder-log-info.po cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po cinder/locale/cs/LC_MESSAGES/cinder.po cinder/locale/de/LC_MESSAGES/cinder.po cinder/locale/es/LC_MESSAGES/cinder.po cinder/locale/fr/LC_MESSAGES/cinder.po cinder/locale/it/LC_MESSAGES/cinder-log-error.po cinder/locale/it/LC_MESSAGES/cinder-log-info.po cinder/locale/it/LC_MESSAGES/cinder.po cinder/locale/ja/LC_MESSAGES/cinder.po cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po cinder/locale/ko_KR/LC_MESSAGES/cinder.po cinder/locale/pt_BR/LC_MESSAGES/cinder.po cinder/locale/ru/LC_MESSAGES/cinder.po cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po cinder/locale/tr_TR/LC_MESSAGES/cinder.po cinder/locale/zh_CN/LC_MESSAGES/cinder.po cinder/locale/zh_TW/LC_MESSAGES/cinder.po cinder/objects/__init__.py cinder/objects/backup.py cinder/objects/base.py cinder/objects/cgsnapshot.py cinder/objects/consistencygroup.py cinder/objects/fields.py cinder/objects/service.py cinder/objects/snapshot.py cinder/objects/volume.py cinder/objects/volume_attachment.py cinder/objects/volume_type.py cinder/replication/__init__.py cinder/replication/api.py cinder/scheduler/__init__.py cinder/scheduler/base_filter.py cinder/scheduler/base_handler.py cinder/scheduler/base_weight.py cinder/scheduler/driver.py cinder/scheduler/filter_scheduler.py cinder/scheduler/host_manager.py cinder/scheduler/manager.py cinder/scheduler/rpcapi.py cinder/scheduler/scheduler_options.py cinder/scheduler/evaluator/__init__.py cinder/scheduler/evaluator/evaluator.py cinder/scheduler/filters/__init__.py cinder/scheduler/filters/affinity_filter.py cinder/scheduler/filters/availability_zone_filter.py cinder/scheduler/filters/capabilities_filter.py cinder/scheduler/filters/capacity_filter.py cinder/scheduler/filters/driver_filter.py cinder/scheduler/filters/extra_specs_ops.py cinder/scheduler/filters/ignore_attempted_hosts_filter.py cinder/scheduler/filters/instance_locality_filter.py cinder/scheduler/filters/json_filter.py cinder/scheduler/flows/__init__.py cinder/scheduler/flows/create_volume.py cinder/scheduler/weights/__init__.py cinder/scheduler/weights/capacity.py cinder/scheduler/weights/chance.py cinder/scheduler/weights/goodness.py cinder/scheduler/weights/volume_number.py cinder/testing/README.rst cinder/tests/__init__.py cinder/tests/fixtures.py cinder/tests/functional/__init__.py cinder/tests/functional/functional_helpers.py cinder/tests/functional/test_extensions.py cinder/tests/functional/test_login.py cinder/tests/functional/test_volumes.py cinder/tests/functional/test_xml.py cinder/tests/functional/api/__init__.py cinder/tests/functional/api/client.py cinder/tests/functional/api/foxinsocks.py cinder/tests/unit/__init__.py cinder/tests/unit/cast_as_call.py cinder/tests/unit/conf_fixture.py cinder/tests/unit/declare_conf.py cinder/tests/unit/fake_backup.py cinder/tests/unit/fake_consistencygroup.py cinder/tests/unit/fake_constants.py cinder/tests/unit/fake_driver.py cinder/tests/unit/fake_hpe_3par_client.py cinder/tests/unit/fake_hpe_client_exceptions.py cinder/tests/unit/fake_hpe_lefthand_client.py cinder/tests/unit/fake_notifier.py cinder/tests/unit/fake_service.py cinder/tests/unit/fake_snapshot.py cinder/tests/unit/fake_utils.py cinder/tests/unit/fake_vmem_client.py cinder/tests/unit/fake_volume.py cinder/tests/unit/policy.json cinder/tests/unit/runtime_conf.py cinder/tests/unit/test_api.py cinder/tests/unit/test_api_urlmap.py cinder/tests/unit/test_backup.py cinder/tests/unit/test_backup_ceph.py cinder/tests/unit/test_backup_driver_base.py cinder/tests/unit/test_backup_google.py cinder/tests/unit/test_backup_swift.py cinder/tests/unit/test_backup_tsm.py cinder/tests/unit/test_block_device.py cinder/tests/unit/test_blockbridge.py cinder/tests/unit/test_cloudbyte.py cinder/tests/unit/test_cmd.py cinder/tests/unit/test_coho.py cinder/tests/unit/test_conf.py cinder/tests/unit/test_context.py cinder/tests/unit/test_coordination.py cinder/tests/unit/test_db_api.py cinder/tests/unit/test_dellfc.py cinder/tests/unit/test_dellsc.py cinder/tests/unit/test_dellscapi.py cinder/tests/unit/test_dothill.py cinder/tests/unit/test_drbdmanagedrv.py cinder/tests/unit/test_emc_vmax.py cinder/tests/unit/test_emc_vnx.py cinder/tests/unit/test_emc_xtremio.py cinder/tests/unit/test_eqlx.py cinder/tests/unit/test_evaluator.py cinder/tests/unit/test_exception.py cinder/tests/unit/test_fixtures.py cinder/tests/unit/test_glusterfs.py cinder/tests/unit/test_gpfs.py cinder/tests/unit/test_hacking.py cinder/tests/unit/test_hitachi_hbsd_horcm_fc.py cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py cinder/tests/unit/test_hitachi_hbsd_snm2_iscsi.py cinder/tests/unit/test_hitachi_hnas_backend.py cinder/tests/unit/test_hitachi_hnas_iscsi.py cinder/tests/unit/test_hitachi_hnas_nfs.py cinder/tests/unit/test_hpe3par.py cinder/tests/unit/test_hpe_xp_fc.py cinder/tests/unit/test_hpelefthand.py cinder/tests/unit/test_huawei_drivers.py cinder/tests/unit/test_huawei_drivers_compatibility.py cinder/tests/unit/test_ibm_flashsystem.py cinder/tests/unit/test_ibm_flashsystem_iscsi.py cinder/tests/unit/test_ibm_xiv_ds8k.py cinder/tests/unit/test_image_utils.py cinder/tests/unit/test_infortrend_cli.py cinder/tests/unit/test_infortrend_common.py cinder/tests/unit/test_migrations.py cinder/tests/unit/test_misc.py cinder/tests/unit/test_netapp.py cinder/tests/unit/test_netapp_nfs.py cinder/tests/unit/test_netapp_ssc.py cinder/tests/unit/test_nexenta.py cinder/tests/unit/test_nexenta5_iscsi.py cinder/tests/unit/test_nexenta5_nfs.py cinder/tests/unit/test_nexenta_edge.py cinder/tests/unit/test_nfs.py cinder/tests/unit/test_nimble.py cinder/tests/unit/test_prophetstor_dpl.py cinder/tests/unit/test_pure.py cinder/tests/unit/test_qos_specs.py cinder/tests/unit/test_quobyte.py cinder/tests/unit/test_quota.py cinder/tests/unit/test_quota_utils.py cinder/tests/unit/test_rbd.py cinder/tests/unit/test_remotefs.py cinder/tests/unit/test_rpc.py cinder/tests/unit/test_san.py cinder/tests/unit/test_scality.py cinder/tests/unit/test_service.py cinder/tests/unit/test_sheepdog.py cinder/tests/unit/test_smbfs.py cinder/tests/unit/test_solidfire.py cinder/tests/unit/test_ssh_utils.py cinder/tests/unit/test_storwize_svc.py cinder/tests/unit/test_tegile.py cinder/tests/unit/test_test.py cinder/tests/unit/test_test_utils.py cinder/tests/unit/test_tintri.py cinder/tests/unit/test_utils.py cinder/tests/unit/test_v7000_common.py cinder/tests/unit/test_v7000_fcp.py cinder/tests/unit/test_vmware_datastore.py cinder/tests/unit/test_vmware_vmdk.py cinder/tests/unit/test_vmware_volumeops.py cinder/tests/unit/test_volume.py cinder/tests/unit/test_volume_configuration.py cinder/tests/unit/test_volume_glance_metadata.py cinder/tests/unit/test_volume_rpcapi.py cinder/tests/unit/test_volume_throttling.py cinder/tests/unit/test_volume_transfer.py cinder/tests/unit/test_volume_types.py cinder/tests/unit/test_volume_types_extra_specs.py cinder/tests/unit/test_volume_utils.py cinder/tests/unit/test_vzstorage.py cinder/tests/unit/test_xio.py cinder/tests/unit/test_zfssa.py cinder/tests/unit/utils.py cinder/tests/unit/api/__init__.py cinder/tests/unit/api/common.py cinder/tests/unit/api/fakes.py cinder/tests/unit/api/test_common.py cinder/tests/unit/api/test_versions.py cinder/tests/unit/api/test_xmlutil.py cinder/tests/unit/api/contrib/__init__.py cinder/tests/unit/api/contrib/test_admin_actions.py cinder/tests/unit/api/contrib/test_availability_zones.py cinder/tests/unit/api/contrib/test_backups.py cinder/tests/unit/api/contrib/test_capabilities.py cinder/tests/unit/api/contrib/test_cgsnapshots.py cinder/tests/unit/api/contrib/test_consistencygroups.py cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py cinder/tests/unit/api/contrib/test_hosts.py cinder/tests/unit/api/contrib/test_qos_specs_manage.py cinder/tests/unit/api/contrib/test_quotas.py cinder/tests/unit/api/contrib/test_quotas_classes.py cinder/tests/unit/api/contrib/test_scheduler_hints.py cinder/tests/unit/api/contrib/test_scheduler_stats.py cinder/tests/unit/api/contrib/test_services.py cinder/tests/unit/api/contrib/test_snapshot_actions.py cinder/tests/unit/api/contrib/test_snapshot_manage.py cinder/tests/unit/api/contrib/test_snapshot_unmanage.py cinder/tests/unit/api/contrib/test_types_extra_specs.py cinder/tests/unit/api/contrib/test_types_manage.py cinder/tests/unit/api/contrib/test_used_limits.py cinder/tests/unit/api/contrib/test_volume_actions.py cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py cinder/tests/unit/api/contrib/test_volume_host_attribute.py cinder/tests/unit/api/contrib/test_volume_image_metadata.py cinder/tests/unit/api/contrib/test_volume_manage.py cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py cinder/tests/unit/api/contrib/test_volume_transfer.py cinder/tests/unit/api/contrib/test_volume_type_access.py cinder/tests/unit/api/contrib/test_volume_type_encryption.py cinder/tests/unit/api/contrib/test_volume_unmanage.py cinder/tests/unit/api/middleware/__init__.py cinder/tests/unit/api/middleware/test_auth.py cinder/tests/unit/api/middleware/test_faults.py cinder/tests/unit/api/openstack/__init__.py cinder/tests/unit/api/openstack/test_api_version_request.py cinder/tests/unit/api/openstack/test_versioned_method.py cinder/tests/unit/api/openstack/test_wsgi.py cinder/tests/unit/api/v1/__init__.py cinder/tests/unit/api/v1/stubs.py cinder/tests/unit/api/v1/test_limits.py cinder/tests/unit/api/v1/test_snapshot_metadata.py cinder/tests/unit/api/v1/test_snapshots.py cinder/tests/unit/api/v1/test_types.py cinder/tests/unit/api/v1/test_volume_metadata.py cinder/tests/unit/api/v1/test_volumes.py cinder/tests/unit/api/v2/__init__.py cinder/tests/unit/api/v2/stubs.py cinder/tests/unit/api/v2/test_limits.py cinder/tests/unit/api/v2/test_snapshot_metadata.py cinder/tests/unit/api/v2/test_snapshots.py cinder/tests/unit/api/v2/test_types.py cinder/tests/unit/api/v2/test_volume_metadata.py cinder/tests/unit/api/v2/test_volumes.py cinder/tests/unit/api/views/__init__.py cinder/tests/unit/api/views/test_versions.py cinder/tests/unit/backup/__init__.py cinder/tests/unit/backup/fake_google_client.py cinder/tests/unit/backup/fake_google_client2.py cinder/tests/unit/backup/fake_service.py cinder/tests/unit/backup/fake_service_with_verify.py cinder/tests/unit/backup/fake_swift_client.py cinder/tests/unit/backup/fake_swift_client2.py cinder/tests/unit/backup/test_rpcapi.py cinder/tests/unit/backup/drivers/__init__.py cinder/tests/unit/backup/drivers/test_backup_glusterfs.py cinder/tests/unit/backup/drivers/test_backup_nfs.py cinder/tests/unit/backup/drivers/test_backup_posix.py cinder/tests/unit/brick/__init__.py cinder/tests/unit/brick/fake_lvm.py cinder/tests/unit/brick/test_brick_lvm.py cinder/tests/unit/compute/__init__.py cinder/tests/unit/compute/test_nova.py cinder/tests/unit/db/__init__.py cinder/tests/unit/db/fakes.py cinder/tests/unit/db/test_name_id.py cinder/tests/unit/db/test_purge.py cinder/tests/unit/db/test_qos_specs.py cinder/tests/unit/db/test_transfers.py cinder/tests/unit/db/test_volume_type.py cinder/tests/unit/glance/__init__.py cinder/tests/unit/glance/stubs.py cinder/tests/unit/image/__init__.py cinder/tests/unit/image/fake.py cinder/tests/unit/image/test_cache.py cinder/tests/unit/image/test_glance.py cinder/tests/unit/keymgr/__init__.py cinder/tests/unit/keymgr/fake.py cinder/tests/unit/keymgr/mock_key_mgr.py cinder/tests/unit/keymgr/test_barbican.py cinder/tests/unit/keymgr/test_conf_key_mgr.py cinder/tests/unit/keymgr/test_key.py cinder/tests/unit/keymgr/test_key_mgr.py cinder/tests/unit/keymgr/test_mock_key_mgr.py cinder/tests/unit/keymgr/test_not_implemented_key_mgr.py cinder/tests/unit/monkey_patch_example/__init__.py cinder/tests/unit/monkey_patch_example/example_a.py cinder/tests/unit/monkey_patch_example/example_b.py cinder/tests/unit/objects/__init__.py cinder/tests/unit/objects/test_backup.py cinder/tests/unit/objects/test_base.py cinder/tests/unit/objects/test_cgsnapshot.py cinder/tests/unit/objects/test_consistencygroup.py cinder/tests/unit/objects/test_fields.py cinder/tests/unit/objects/test_objects.py cinder/tests/unit/objects/test_service.py cinder/tests/unit/objects/test_snapshot.py cinder/tests/unit/objects/test_volume.py cinder/tests/unit/objects/test_volume_attachment.py cinder/tests/unit/objects/test_volume_type.py cinder/tests/unit/scheduler/__init__.py cinder/tests/unit/scheduler/fake_hosts.py cinder/tests/unit/scheduler/fakes.py cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py cinder/tests/unit/scheduler/test_base_filter.py cinder/tests/unit/scheduler/test_capacity_weigher.py cinder/tests/unit/scheduler/test_chance_weigher.py cinder/tests/unit/scheduler/test_filter_scheduler.py cinder/tests/unit/scheduler/test_goodness_weigher.py cinder/tests/unit/scheduler/test_host_filters.py cinder/tests/unit/scheduler/test_host_manager.py cinder/tests/unit/scheduler/test_rpcapi.py cinder/tests/unit/scheduler/test_scheduler.py cinder/tests/unit/scheduler/test_scheduler_options.py cinder/tests/unit/scheduler/test_volume_number_weigher.py cinder/tests/unit/scheduler/test_weights.py cinder/tests/unit/targets/__init__.py cinder/tests/unit/targets/targets_fixture.py cinder/tests/unit/targets/test_base_iscsi_driver.py cinder/tests/unit/targets/test_cxt_driver.py cinder/tests/unit/targets/test_iet_driver.py cinder/tests/unit/targets/test_iser_driver.py cinder/tests/unit/targets/test_lio_driver.py cinder/tests/unit/targets/test_scst_driver.py cinder/tests/unit/targets/test_tgt_driver.py cinder/tests/unit/volume/__init__.py cinder/tests/unit/volume/drivers/__init__.py cinder/tests/unit/volume/drivers/test_datera.py cinder/tests/unit/volume/drivers/test_fujitsu.py cinder/tests/unit/volume/drivers/test_hgst.py cinder/tests/unit/volume/drivers/disco/__init__.py cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py cinder/tests/unit/volume/drivers/disco/test_create_snapshot.py cinder/tests/unit/volume/drivers/disco/test_create_volume.py cinder/tests/unit/volume/drivers/disco/test_create_volume_from_snapshot.py cinder/tests/unit/volume/drivers/disco/test_delete_snapshot.py cinder/tests/unit/volume/drivers/disco/test_delete_volume.py cinder/tests/unit/volume/drivers/disco/test_extend_volume.py cinder/tests/unit/volume/drivers/emc/__init__.py cinder/tests/unit/volume/drivers/emc/scaleio/__init__.py cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py cinder/tests/unit/volume/drivers/emc/scaleio/test_create_cloned_volume.py cinder/tests/unit/volume/drivers/emc/scaleio/test_create_snapshot.py cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py cinder/tests/unit/volume/drivers/emc/scaleio/test_initialize_connection.py cinder/tests/unit/volume/drivers/emc/scaleio/test_manage_existing.py cinder/tests/unit/volume/drivers/emc/scaleio/test_misc.py cinder/tests/unit/volume/drivers/netapp/__init__.py cinder/tests/unit/volume/drivers/netapp/fakes.py cinder/tests/unit/volume/drivers/netapp/test_common.py cinder/tests/unit/volume/drivers/netapp/test_utils.py cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py cinder/tests/unit/volume/drivers/netapp/eseries/__init__.py cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py cinder/tests/unit/volume/drivers/netapp/eseries/test_driver.py cinder/tests/unit/volume/drivers/netapp/eseries/test_fc_driver.py cinder/tests/unit/volume/drivers/netapp/eseries/test_host_mapper.py cinder/tests/unit/volume/drivers/netapp/eseries/test_iscsi_driver.py cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py cinder/tests/unit/volume/drivers/netapp/eseries/test_utils.py cinder/tests/unit/volume/flows/__init__.py cinder/tests/unit/volume/flows/fake_volume_api.py cinder/tests/unit/volume/flows/test_create_volume_flow.py cinder/tests/unit/volume/flows/test_manage_volume_flow.py cinder/tests/unit/windows/__init__.py cinder/tests/unit/windows/db_fakes.py cinder/tests/unit/windows/test_smbfs.py cinder/tests/unit/windows/test_windows.py cinder/tests/unit/windows/test_windows_remotefs.py cinder/tests/unit/zonemanager/__init__.py cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py cinder/tests/unit/zonemanager/test_brcd_lookup_service.py cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py cinder/tests/unit/zonemanager/test_cisco_lookup_service.py cinder/tests/unit/zonemanager/test_driverutils.py cinder/tests/unit/zonemanager/test_fc_zone_manager.py cinder/tests/unit/zonemanager/test_volume_driver.py cinder/transfer/__init__.py cinder/transfer/api.py cinder/volume/__init__.py cinder/volume/api.py cinder/volume/configuration.py cinder/volume/driver.py cinder/volume/manager.py cinder/volume/qos_specs.py cinder/volume/rpcapi.py cinder/volume/throttling.py cinder/volume/utils.py cinder/volume/volume_types.py cinder/volume/drivers/__init__.py cinder/volume/drivers/block_device.py cinder/volume/drivers/blockbridge.py cinder/volume/drivers/coho.py cinder/volume/drivers/datera.py cinder/volume/drivers/drbdmanagedrv.py cinder/volume/drivers/eqlx.py cinder/volume/drivers/glusterfs.py cinder/volume/drivers/hgst.py cinder/volume/drivers/lvm.py cinder/volume/drivers/nfs.py cinder/volume/drivers/nimble.py cinder/volume/drivers/pure.py cinder/volume/drivers/quobyte.py cinder/volume/drivers/rbd.py cinder/volume/drivers/remotefs.py cinder/volume/drivers/scality.py cinder/volume/drivers/sheepdog.py cinder/volume/drivers/smbfs.py cinder/volume/drivers/solidfire.py cinder/volume/drivers/tegile.py cinder/volume/drivers/tintri.py cinder/volume/drivers/vzstorage.py cinder/volume/drivers/xio.py cinder/volume/drivers/cloudbyte/__init__.py cinder/volume/drivers/cloudbyte/cloudbyte.py cinder/volume/drivers/cloudbyte/options.py cinder/volume/drivers/dell/__init__.py cinder/volume/drivers/dell/dell_storagecenter_api.py cinder/volume/drivers/dell/dell_storagecenter_common.py cinder/volume/drivers/dell/dell_storagecenter_fc.py cinder/volume/drivers/dell/dell_storagecenter_iscsi.py cinder/volume/drivers/disco/__init__.py cinder/volume/drivers/disco/disco.py cinder/volume/drivers/dothill/__init__.py cinder/volume/drivers/dothill/dothill_client.py cinder/volume/drivers/dothill/dothill_common.py cinder/volume/drivers/dothill/dothill_fc.py cinder/volume/drivers/dothill/dothill_iscsi.py cinder/volume/drivers/emc/__init__.py cinder/volume/drivers/emc/emc_cli_fc.py cinder/volume/drivers/emc/emc_cli_iscsi.py cinder/volume/drivers/emc/emc_vmax_common.py cinder/volume/drivers/emc/emc_vmax_fast.py cinder/volume/drivers/emc/emc_vmax_fc.py cinder/volume/drivers/emc/emc_vmax_https.py cinder/volume/drivers/emc/emc_vmax_iscsi.py cinder/volume/drivers/emc/emc_vmax_masking.py cinder/volume/drivers/emc/emc_vmax_provision.py cinder/volume/drivers/emc/emc_vmax_provision_v3.py cinder/volume/drivers/emc/emc_vmax_utils.py cinder/volume/drivers/emc/emc_vnx_cli.py cinder/volume/drivers/emc/scaleio.py cinder/volume/drivers/emc/xtremio.py cinder/volume/drivers/fujitsu/__init__.py cinder/volume/drivers/fujitsu/eternus_dx_common.py cinder/volume/drivers/fujitsu/eternus_dx_fc.py cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py cinder/volume/drivers/hitachi/__init__.py cinder/volume/drivers/hitachi/hbsd_basiclib.py cinder/volume/drivers/hitachi/hbsd_common.py cinder/volume/drivers/hitachi/hbsd_fc.py cinder/volume/drivers/hitachi/hbsd_horcm.py cinder/volume/drivers/hitachi/hbsd_iscsi.py cinder/volume/drivers/hitachi/hbsd_snm2.py cinder/volume/drivers/hitachi/hnas_backend.py cinder/volume/drivers/hitachi/hnas_iscsi.py cinder/volume/drivers/hitachi/hnas_nfs.py cinder/volume/drivers/hpe/__init__.py cinder/volume/drivers/hpe/hpe_3par_common.py cinder/volume/drivers/hpe/hpe_3par_fc.py cinder/volume/drivers/hpe/hpe_3par_iscsi.py cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py cinder/volume/drivers/hpe/hpe_xp_fc.py cinder/volume/drivers/hpe/hpe_xp_opts.py cinder/volume/drivers/huawei/__init__.py cinder/volume/drivers/huawei/constants.py cinder/volume/drivers/huawei/fc_zone_helper.py cinder/volume/drivers/huawei/huawei_conf.py cinder/volume/drivers/huawei/huawei_driver.py cinder/volume/drivers/huawei/huawei_utils.py cinder/volume/drivers/huawei/hypermetro.py cinder/volume/drivers/huawei/replication.py cinder/volume/drivers/huawei/rest_client.py cinder/volume/drivers/huawei/smartx.py cinder/volume/drivers/ibm/__init__.py cinder/volume/drivers/ibm/flashsystem_common.py cinder/volume/drivers/ibm/flashsystem_fc.py cinder/volume/drivers/ibm/flashsystem_iscsi.py cinder/volume/drivers/ibm/gpfs.py cinder/volume/drivers/ibm/xiv_ds8k.py cinder/volume/drivers/ibm/storwize_svc/__init__.py cinder/volume/drivers/ibm/storwize_svc/replication.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py cinder/volume/drivers/infortrend/__init__.py cinder/volume/drivers/infortrend/infortrend_fc_cli.py cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py cinder/volume/drivers/infortrend/eonstor_ds_cli/__init__.py cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py cinder/volume/drivers/lenovo/__init__.py cinder/volume/drivers/lenovo/lenovo_client.py cinder/volume/drivers/lenovo/lenovo_common.py cinder/volume/drivers/lenovo/lenovo_fc.py cinder/volume/drivers/lenovo/lenovo_iscsi.py cinder/volume/drivers/netapp/__init__.py cinder/volume/drivers/netapp/common.py cinder/volume/drivers/netapp/options.py cinder/volume/drivers/netapp/utils.py cinder/volume/drivers/netapp/dataontap/__init__.py cinder/volume/drivers/netapp/dataontap/block_7mode.py cinder/volume/drivers/netapp/dataontap/block_base.py cinder/volume/drivers/netapp/dataontap/block_cmode.py cinder/volume/drivers/netapp/dataontap/fc_7mode.py cinder/volume/drivers/netapp/dataontap/fc_cmode.py cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py cinder/volume/drivers/netapp/dataontap/nfs_7mode.py cinder/volume/drivers/netapp/dataontap/nfs_base.py cinder/volume/drivers/netapp/dataontap/nfs_cmode.py cinder/volume/drivers/netapp/dataontap/ssc_cmode.py cinder/volume/drivers/netapp/dataontap/client/__init__.py cinder/volume/drivers/netapp/dataontap/client/api.py cinder/volume/drivers/netapp/dataontap/client/client_7mode.py cinder/volume/drivers/netapp/dataontap/client/client_base.py cinder/volume/drivers/netapp/dataontap/client/client_cmode.py cinder/volume/drivers/netapp/dataontap/performance/__init__.py cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py cinder/volume/drivers/netapp/dataontap/performance/perf_base.py cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py cinder/volume/drivers/netapp/eseries/__init__.py cinder/volume/drivers/netapp/eseries/client.py cinder/volume/drivers/netapp/eseries/exception.py cinder/volume/drivers/netapp/eseries/fc_driver.py cinder/volume/drivers/netapp/eseries/host_mapper.py cinder/volume/drivers/netapp/eseries/iscsi_driver.py cinder/volume/drivers/netapp/eseries/library.py cinder/volume/drivers/netapp/eseries/utils.py cinder/volume/drivers/nexenta/__init__.py cinder/volume/drivers/nexenta/iscsi.py cinder/volume/drivers/nexenta/jsonrpc.py cinder/volume/drivers/nexenta/nfs.py cinder/volume/drivers/nexenta/options.py cinder/volume/drivers/nexenta/utils.py cinder/volume/drivers/nexenta/nexentaedge/__init__.py cinder/volume/drivers/nexenta/nexentaedge/iscsi.py cinder/volume/drivers/nexenta/nexentaedge/jsonrpc.py cinder/volume/drivers/nexenta/ns5/__init__.py cinder/volume/drivers/nexenta/ns5/iscsi.py cinder/volume/drivers/nexenta/ns5/jsonrpc.py cinder/volume/drivers/nexenta/ns5/nfs.py cinder/volume/drivers/prophetstor/__init__.py cinder/volume/drivers/prophetstor/dpl_fc.py cinder/volume/drivers/prophetstor/dpl_iscsi.py cinder/volume/drivers/prophetstor/dplcommon.py cinder/volume/drivers/prophetstor/options.py cinder/volume/drivers/san/__init__.py cinder/volume/drivers/san/san.py cinder/volume/drivers/san/hp/__init__.py cinder/volume/drivers/san/hp/hpmsa_client.py cinder/volume/drivers/san/hp/hpmsa_common.py cinder/volume/drivers/san/hp/hpmsa_fc.py cinder/volume/drivers/san/hp/hpmsa_iscsi.py cinder/volume/drivers/violin/__init__.py cinder/volume/drivers/violin/v7000_common.py cinder/volume/drivers/violin/v7000_fcp.py cinder/volume/drivers/vmware/__init__.py cinder/volume/drivers/vmware/datastore.py cinder/volume/drivers/vmware/exceptions.py cinder/volume/drivers/vmware/vmdk.py cinder/volume/drivers/vmware/volumeops.py cinder/volume/drivers/windows/__init__.py cinder/volume/drivers/windows/constants.py cinder/volume/drivers/windows/remotefs.py cinder/volume/drivers/windows/smbfs.py cinder/volume/drivers/windows/windows.py cinder/volume/drivers/zfssa/__init__.py cinder/volume/drivers/zfssa/restclient.py cinder/volume/drivers/zfssa/webdavclient.py cinder/volume/drivers/zfssa/zfssaiscsi.py cinder/volume/drivers/zfssa/zfssanfs.py cinder/volume/drivers/zfssa/zfssarest.py cinder/volume/flows/__init__.py cinder/volume/flows/common.py cinder/volume/flows/api/__init__.py cinder/volume/flows/api/create_volume.py cinder/volume/flows/api/manage_existing.py cinder/volume/flows/manager/__init__.py cinder/volume/flows/manager/create_volume.py cinder/volume/flows/manager/manage_existing.py cinder/volume/flows/manager/manage_existing_snapshot.py cinder/volume/targets/__init__.py cinder/volume/targets/cxt.py cinder/volume/targets/driver.py cinder/volume/targets/fake.py cinder/volume/targets/iet.py cinder/volume/targets/iscsi.py cinder/volume/targets/iser.py cinder/volume/targets/lio.py cinder/volume/targets/scst.py cinder/volume/targets/tgt.py cinder/wsgi/__init__.py cinder/wsgi/common.py cinder/wsgi/eventlet_server.py cinder/wsgi/wsgi.py cinder/zonemanager/__init__.py cinder/zonemanager/fc_common.py cinder/zonemanager/fc_san_lookup_service.py cinder/zonemanager/fc_zone_manager.py cinder/zonemanager/fczm_constants.py cinder/zonemanager/utils.py cinder/zonemanager/drivers/__init__.py cinder/zonemanager/drivers/driver_utils.py cinder/zonemanager/drivers/fc_zone_driver.py cinder/zonemanager/drivers/brocade/__init__.py cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py cinder/zonemanager/drivers/brocade/fc_zone_constants.py cinder/zonemanager/drivers/cisco/__init__.py cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py cinder/zonemanager/drivers/cisco/fc_zone_constants.py doc/.gitignore doc/Makefile doc/README.rst doc/find_autodoc_modules.sh doc/generate_autodoc_index.sh doc/ext/__init__.py doc/ext/cinder_autodoc.py doc/ext/cinder_todo.py doc/source/conf.py doc/source/database_architecture.rst doc/source/drivers.rst doc/source/index.rst doc/source/oslo-middleware.rst doc/source/scheduler-filters.rst doc/source/scheduler-weights.rst doc/source/_ga/layout.html doc/source/devref/addmethod.openstackapi.rst doc/source/devref/api.apache.rst doc/source/devref/api.rst doc/source/devref/api_microversion_dev.rst doc/source/devref/api_microversion_history.rst doc/source/devref/architecture.rst doc/source/devref/attach_detach_conventions.rst doc/source/devref/auth.rst doc/source/devref/cinder.rst doc/source/devref/database.rst doc/source/devref/development.environment.rst doc/source/devref/drivers.rst doc/source/devref/fakes.rst doc/source/devref/genconfig.rst doc/source/devref/gerrit.rst doc/source/devref/gmr.rst doc/source/devref/i18n.rst doc/source/devref/index.rst doc/source/devref/jenkins.rst doc/source/devref/launchpad.rst doc/source/devref/migration.rst doc/source/devref/releasenotes.rst doc/source/devref/replication.rst doc/source/devref/rolling.upgrades.rst doc/source/devref/rpc.rst doc/source/devref/scheduler.rst doc/source/devref/services.rst doc/source/devref/threading.rst doc/source/devref/unit_tests.rst doc/source/devref/volume.rst doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/man/cinder-manage.rst etc/cinder/README-cinder.conf.sample etc/cinder/api-httpd.conf etc/cinder/api-paste.ini etc/cinder/logging_sample.conf etc/cinder/policy.json etc/cinder/rootwrap.conf etc/cinder/rootwrap.d/volume.filters rally-jobs/README.rst rally-jobs/cinder-fake.yaml rally-jobs/cinder.yaml rally-jobs/extra/README.rst rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml releasenotes/notes/3par-license-check-51a16b5247675760.yaml releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml releasenotes/notes/discard-config-option-711a7fbf20685834.yaml releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml releasenotes/notes/updated-at-list-0f899098f7258331.yaml releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/bandit.yaml tools/check_exec.py tools/colorizer.py tools/enable-pre-commit-hook.sh tools/fast8.sh tools/generate_driver_list.py tools/install_venv.py tools/install_venv_common.py tools/lintstack.py tools/lintstack.sh tools/with_venv.sh tools/config/check_uptodate.sh tools/config/generate_sample.shcinder-8.0.0/cinder.egg-info/PKG-INFO0000664000567000056710000000303112701406542020155 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: cinder Version: 8.0.0 Summary: OpenStack Block Storage Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== CINDER ====== You have come across a storage service for an open cloud computing service. It has identified itself as `Cinder`. It was abstracted from the Nova project. * Wiki: http://wiki.openstack.org/Cinder * Developer docs: http://docs.openstack.org/developer/cinder Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://git.openstack.org/openstack/cinder.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/cinder Python client ------------- https://git.openstack.org/cgit/openstack/python-cinderclient Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 cinder-8.0.0/cinder.egg-info/top_level.txt0000664000567000056710000000000712701406542021612 0ustar jenkinsjenkins00000000000000cinder cinder-8.0.0/cinder.egg-info/pbr.json0000664000567000056710000000005612701406542020542 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "da90a31"}cinder-8.0.0/cinder.egg-info/entry_points.txt0000664000567000056710000000523012701406542022361 0ustar jenkinsjenkins00000000000000[cinder.database.migration_backend] sqlalchemy = oslo_db.sqlalchemy.migration [cinder.scheduler.filters] AvailabilityZoneFilter = cinder.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter CapabilitiesFilter = cinder.scheduler.filters.capabilities_filter:CapabilitiesFilter CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter DriverFilter = cinder.scheduler.filters.driver_filter:DriverFilter InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter JsonFilter = cinder.scheduler.filters.json_filter:JsonFilter RetryFilter = cinder.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter [cinder.scheduler.weights] AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher GoodnessWeigher = cinder.scheduler.weights.goodness:GoodnessWeigher VolumeNumberWeigher = cinder.scheduler.weights.volume_number:VolumeNumberWeigher [console_scripts] cinder-all = cinder.cmd.all:main cinder-api = cinder.cmd.api:main cinder-backup = cinder.cmd.backup:main cinder-manage = cinder.cmd.manage:main cinder-rootwrap = oslo_rootwrap.cmd:main cinder-rtstool = cinder.cmd.rtstool:main cinder-scheduler = cinder.cmd.scheduler:main cinder-volume = cinder.cmd.volume:main cinder-volume-usage-audit = cinder.cmd.volume_usage_audit:main [oslo.config.opts] cinder = cinder.opts:list_opts keystonemiddleware = keystonemiddleware.auth_token:list_opts oslo.db.concurrency = oslo.db.concurrency:list_opts oslo.messaging = oslo_messaging.opts:list_opts oslo_concurrency = oslo_concurrency.opts:list_opts [oslo.config.opts.defaults] cinder = cinder.common.config:set_middleware_defaults [oslo_messaging.notify.drivers] cinder.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver cinder.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver cinder.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver cinder.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver cinder.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver [oslo_middleware] cinder.api.middleware.sizelimit = oslo_middleware.sizelimit cinder.openstack.common.middleware.request_id = oslo_middleware.request_id [wsgi_scripts] cinder-wsgi = cinder.wsgi.wsgi:initialize_application cinder-8.0.0/AUTHORS0000664000567000056710000006644412701406542015213 0ustar jenkinsjenkins00000000000000Abel Lopez Abhijeet Malawade Abhilash Divakaran Abhinav Srivastava Abhiram Moturi Abhishek Lekshmanan Abhishek Shrivastava Accela Zhao Accela Zhao Adalberto Medeiros Adam Gandelman Adam Gandelman Adelina Tuvenie Adriano Rosso Adrien Vergé Ajaya Agrawal Alan Alan Jiang Alan Meadows Alberto Murillo Alberto Planas Alejandro Emanuel Paredes Alessandro Pilotti Alessandro Pilotti Alex Holden Alex Meade Alex O'Rourke Alex O'Rourke Alexander Bochkarev Alexander Gordeev Alexander Gorodnev Alexander Maretskiy Alexei Kornienko Alexey Khodos Alfredo Moralejo Alon Marx Amit Saha AmitKumarDas Anastasia Karpinska Andreas Jaeger Andreas Jaeger Andrei V. Ostapenko Andres Buraschi Andrew Forrest Andrew Kerr Andrey Pavlov Andy Grover Angela Smith Angus Lees Anish Bhatt Ankit Agrawal Ann Kamyshnikova Anna Sortland Anthony Lee Anthony Lee Anthony Young Anton Arefiev Anton Frolov ArkadyKanevsky Arne Wiebalck Aswad Rangnekar Atsushi SAKAI Attila Fazekas Aviram Bar-Haim Avishay Traeger Avishay Traeger Avishay Traeger Bala Gopal Raj Bardia Keyoumarsi Ben Nemec Ben Swartzlander Bertrand Lallau Bertrand Lallau Bharat Kumar Kobagana (BharatK) Bharat Kumar Kobagana Bharat Kumar Kobagana Bill Owen Bob Ball Bob Callaway Bob-OpenStack <295988511@qq.com> Boris Pavlovic Brant Knudson Brent Roskos Brian Waldon Brianna Poulos Bridget McGinnis Bryan D. Payne Carlos Goncalves Cedric Zhuang Cedric Zhuang Chang Bo Guo ChangBo Guo(gcb) Chao Zheng CZ Li Chaozhe.Chen Chet Burgess Chmouel Boudjnah Chris Buccella Chris Morrell Chris Yeoh Christian Berendt Christoph Kassen Christopher MacGown Chuck Fouts Chuck Short Cian O'Driscoll Cindy Pallares Clark Boylan ClaudiuNesa Clay Gerrard Clinton Knight Corey Bryant Cory Stone Cory Wright Craig Vyvial Craige McWhirter Csaba Henk Curt Bruns Cyril Roelandt Daisuke Fujita Dan Prince Dan Radez Daniel Allegood Daniel Gollub Daniel Tadrzak Daniel Wilson Danny Al-Gaaf Darren Birkett Davanum Srinivas Davanum Srinivas Dave Chen Dave McCowan David Medberry David Pineau David Ripton David Rosales David Sariel Deepak C Shetty Deepti Ramakrishna Deliang Fan DennyZhang Derek Chiang Dermot Tynan Derrick J. Wippler Diego Zamboni Diem Tran Dietmar Noll Digvijay Ukirde Dima Shulyak Dina Belova Dinesh Bhor Dinesh Subhraveti Dirk Mueller Dmitry Borodaenko Dmitry Guryanov Dolph Mathews Dongcan Ye Doug Hellmann Doug Hellmann Doug Schveninger Duncan Thomas Duncan Thomas Dunrong Huang Earle F. Philhower, III Ed Balduf Ed Balduf Edmund Rhudy Eduardo Costa Edward Hope-Morley Edwin Wang Edwin Wang Eiichi Aikawa Einst Crazy Elena Ezhova Eli Qiao Emilien Macchi Eoghan Glynn Eric Brown Eric Guo Eric Harney Eric Windisch Erickson Santos Erik Johannes Erik Zaadi Erlon Cruz Erlon R. Cruz Evgeny Antyshev Fabien Boucher Fei Long Wang Fengqian Gao Fergal Mc Carthy Flaper Fesp Flavio Percoco Florent Flament Florian Haas Forest Romain Francis Moorehead Frederic Lepied Furuta Tomonori GaoZqiang Gaozexu Gary W. Smith Gaurang Tapase Gauvain Pocentek Geraint North Gerald McBrearty Gerard Garcia Ghe Rivero Giulio Fidente Glenn M. Gobeli Gloria Gu Gorka Eguileor Goutham Pacha Ravi Guan Qiang Hahyun Hai-Xu Cheng Haiwei Xu Haomai Wang Harsh Mishra Harshada Mangesh Kakad Haruka Tanizawa He Yongli Helen Walsh Hiroyuki Eguchi Hui Cheng Ian Denhardt Ian Govett Igor Pugovkin Ihar Hrachyshka Ilya Tyaptin Inhye Park Ivan Kolodyazhny Ivan Kolodyazhny Ivy Zhang Jacob Gregor Jacob M. Jacob James Carey James E. Blair Jamie Lennox Jasakov Artem Jason Ni Javeme Jay Lau Jay Lee Jay Payne Jay S Bryant Jay S. Bryant Jay Wang Jean-Baptiste RANSY Jean-Baptiste Ransy Jean-Marc Saffroy Jeegn Chen Jeff Applewhite Jenny Shieh Jeremy Stanley Jesse Keating Jim Branen Jimmy McCrory Jinru Yan Joe Cropper Joe D'Andrea Joe Gordon Joe Gordon Joel Coffman Joel Friedly John Garbutt John Griffith John Griffith John McDonough Johnson Koil Raj Johnson koil raj Jon Bernard Jon Bernard Jordan Pittier Jordan Tardif JordanP Joseph Glanville Joseph Vokt Josh Durgin Joshua Harlow Joshua Huber JuPing Juan Manuel Olle Juan Zuluaga Julia Varlamova Julien Danjou Jun Ishizaki KIYOHIRO ADACHI Kai Zhang Kaitlin Farr Kallebe Monteiro Kamil Rykowski Kartik Bommepally Kazumasa Nomura Kedar Vidvans Ken'ichi Ohmichi Kendall Nelson Kenji Yasui Kevin Fox Koert van der Veer Kui Shi Kun Huang Kun Huang Kuo-tung Kao Kurt Martin Kurt Martin Kurt Taylor Lakhinder Walia Larry Matter LarryLiu Lee Lee Yarwood Lena Novokshonova Li Min Liu Liang Chen Lin Hua Cheng Lin Yang LisaLi Liu Xinguo <295988511@qq.com> LiuNanke LiuSheng Lucas Alvares Gomes Lucian Petrut Lucian Petrut Luis A. Garcia Lynxzh MENJO, Takashi MORITA Kazutaka Mandell Degerness Manjeet Singh Bhatia Manojkiran Manojkiran Marc Koderer Marc Koderer Marian Horban Mark McLoughlin Mark Sturdevant Martin Kletzander Masaki Kanno Matan Sabag Mate Lakat Mathieu Gagné Matt Fischer Matt Riedemann Matt Smith Matthew Edmonds Matthew Treinish Matthew Treinish Mehdi Abaakouk Mehdi Abaakouk Meir Kriheli Michael Basnight Michael Berlin Michael J Fork Michael Kerrin Michael Krotscheck Michael Price Michael Rowden Michael Still Michal Dulko Michal Jura Michał Dulko Midun Kumar Mike Mason Mike Perez Mike Rooney Mikhail Khodos Mikhail Khodos Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Monty Taylor Morgan Fainberg Morgan Fainberg MotoKen Mudassir Latif Mukul Patel Nashwan Azhari Nate Potter Nathaniel Potter Navneet Singh Navneet Singh Nicolas Trangez Nikesh Nikola Dipanov Nikolaj Starodubtsev Nikolay Sobolevskiy Nilesh Bhosale Nirmal Ranganathan Olga Kopylova Olivier Pilotte Ollie Leahy Ollie Leahy Ondřej Nový Pascal Wehrle Patrick East Paul Mathews Paul McMillan Pavel Boldin Pavel Kirpichyov Pedro Navarro Perez Pengfei Zhang Peter Penchev Peter Pentchev Philipp Marek Pradeep Sathasivam Pranali Deore PranaliDeore PranaliDeore Pádraig Brady Qian Gao Qin Zhao Qiu Yu Rafael Rivero Rafael Toschi Chiafarelli Rafi Khardalian Rahul Verma Raildo Mascena Rajesh Tailor Rakesh H S Rakesh Mishra Ralf Haferkamp Ramy Asselin Raunak Kumar Ravi Shekhar Jethani Ray Chen Ray Chen Rich Hagarty Richard Hedlind Rick Chen Rick Harris Rob Crittenden Robert Collins Robert Mizielski Rodrigo Barbieri Rohan Kanade Rohit Karajgi Romain Chantereau Romain Hardouin Roman Bogorodskiy Roman Podolyaka Ronald Bradford Ronen Kat Rongze Zhu Rongze Zhu RongzeZhu Rushi Agrawal Rushil Chugh Russell Bryant Ryan LIANG Ryan Liang Ryan Lucio Ryan McNair Ryan Rossiter Sachi King Sai Kiran Sam Morrison Samuel Matzek Santhoshkumar Kolathur Sascha Peilicke Sascha Peilicke Sascha Peilicke Sasikanth Scott DAngelo Scott Devoid Sean Chen Sean Chen Sean Dague Sean Dague Sean Dague Sean McCully Sean McGinnis Sean Roberts Sebastian Jeuk Seif Lotfy Seiji Aguchi Sergey Gotliv Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shao Kai Li Shay Halsband Sheel Rana Shlomi Sasson Shuangtai Tian Shyama Venugopal Silvan Kaiser Simon Dodsley Simon Lorenz Sivaramakrishna Garimella Skyler Berg Slade Baumann Sonia Ghanekar Stefan Amann Stephen Mulcahy Steven Kaufer Stuart McLaren Subramanian Neelakantan Subramanian Neelakantan Surya Ghatty Sushil Kumar Svetlana Shturm Swapnil Kulkarni Sylvain Baubeau Szymon Borkowski Szymon Wroblewski Szymon Wróblewski Takahiro Shida Takashi NATSUME Takashi Natsume Takeaki Matsumoto Takeshi Nishikawa Tao Bai TaoBai Teruaki Ishizaki Thang Pham Thelo Gaultier Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Goirand Tiago Pasqualini Timothy Okwii Tina Tobias Urdin Tom Barron Tom Barron Tom Barron Tom Cammann Tom Fifield Tom Fifield Tom Hancock Tom Patzig Tom Swanson Tomas Hancock Tomoki Sekiyama Tomoki Sekiyama Tristan Cacqueray Trung Trinh Unmesh Gurjar Unmesh Gurjar Vahid Hashemian Valeriy Ponomaryov Vasanthi Thirumalai Vasyl Khomenko Veronica Musso Victor A. Ying Victor Rodionov Victor Sergeyev Victor Stinner Vilobh Meshram Vincent Hou Vincent Hou Vipin Balachandran Viraj Hardikar Vishvananda Ishaya Vivek Dhayaal Vladimir Popovski Vladislav Kuzmin Walter A. Boring IV Walter A. Boring IV Wenhao Xu Wenhao Xu WenjunWang1992 <10191230@zte.com.cn> Wilson Liu Wu Wenxiang Xavier Queralt Xi Yang Xi Yang Xiangfei Zhu Xiao Chen Xiaoqin Li Xiaoqin Li XinXiaohui Xing Yang Xingchao Yu Xinyuan Huang XueChendi YAMADA Hideki Yaguang Tang Yaguang Tang Yaguang Tang Yang Yu YangLei Yasuaki Nagata Yejia Xu Yi Chun, Huang Yosef Berman Yoshihide Matsumoto YuanHui Xu Yucong Feng Yug Suo Yuiko Takada Yuji Hagiwara Yun Mao Yuriy Nesenenko Yuriy Taraday Yuriy Zveryanskyy Yusuke Hayashi Yuzlikeev Eduard Zhang Jinnan Zhengguang--reset-author Zhenguo Niu Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhiteng Huang Zhiteng Huang Zhiteng Huang Zhongyue Luo Zhongyue Luo ZhuRongze Zoltan Arnold Nagy abhiram moturi abhiram_moturi abhishekkekane alonma amoturi anastasia-karpinska ankitagrawal annegentle apoorvad appsdesh april caoyue chadlung chaochin cheneydc chenying chenzongliang clayg daisy-ycguo diem_tran dineshbhor eduardBM erikzaadi felix23ma fujioka yuuichi galstrom21 gfm gh159m git-harry gtt116 hgangwx huananhuawei huangtianhua huyang iberezovskiy jakedahn jbrogan jenny-shieh jiamin jking-6 john-griffith jun xie kedar-vidvans keystone keystone kshimamu leseb ling-yun lirenke lisali liu-sheng liudong liuke2 liuqing liuxinguo liyingjun liyingjun liyuanyuan llg8212 lrqrun ls1175 malei mannuray marcusvrn masahiro ikeda mikhail mouad benchchaoui neochin nikeshm nikeshmahalka nuritv peter_wang peter_wang pran1990 rackerjoe rajinir ramakris rick.chen root root ruichen sanuptpm sarat inuguri sathish-nagappan saurabh scott-dangelo scottda sdodsley shihanzhang skudriashev sparkliu srushti stack tanlin tsekiyam tswanson ubaumann unicell ustcdylan venkatamahesh vitas.yuzhou wanghao wanghong wangxiyuan wingwj wuyuting xiaolei hu xiaoxi_chen xiexs xqli-openstack yatin karel yehia-beyh yoan desbordes yogeshprasad zhangchao010 zhangchunlong1@huawei.com zhangguoqing zhangni zhangsong zhangyanzi zhaohua zhaoqin zhongjun zhu.rong zhuzhubj cinder-8.0.0/requirements.txt0000664000567000056710000000375712701406257017430 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 Babel>=1.3 # BSD decorator>=3.4.0 # BSD enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD eventlet!=0.18.3,>=0.18.2 # MIT greenlet>=0.3.2 # MIT httplib2>=0.7.5 # MIT iso8601>=0.1.9 # MIT keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD oauth2client>=1.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oslo.versionedobjects>=1.5.0 # Apache-2.0 osprofiler>=1.1.0 # Apache-2.0 paramiko>=1.16.0 # LGPL Paste # MIT PasteDeploy>=1.5.0 # MIT pycrypto>=2.6 # Public Domain pyparsing>=2.0.1 # MIT python-barbicanclient>=3.3.0 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 pytz>=2013.6 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT taskflow>=1.26.0 # Apache-2.0 rtslib-fb>=2.1.41 # Apache-2.0 simplejson>=2.2.0 # MIT six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 suds-jurko>=0.6 # LGPL WebOb>=1.2.3 # MIT oslo.i18n>=2.1.0 # Apache-2.0 oslo.vmware>=1.16.0 # Apache-2.0 os-brick>=1.0.0 # Apache-2.0 os-win>=0.2.3 # Apache-2.0 tooz>=1.28.0 # Apache-2.0 google-api-python-client>=1.4.2 # Apache-2.0 cinder-8.0.0/HACKING.rst0000664000567000056710000000531112701406250015717 0ustar jenkinsjenkins00000000000000Cinder Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Cinder Specific Commandments ---------------------------- - [N314] Check for vi editor configuration in source files. - [N319] Validate that debug level logs are not translated. - [N322] Ensure default arguments are not mutable. - [N323] Add check for explicit import of _() to ensure proper translation. - [N325] str() and unicode() cannot be used on an exception. Remove or use six.text_type(). - [N328] LOG.info messages require translations `_LI()`. - [N329] LOG.exception and LOG.error messages require translations `_LE()`. - [N330] LOG.warning messages require translations `_LW()`. - [N333] Ensure that oslo namespaces are used for namespaced libraries. - [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now(). - [C302] six.text_type should be used instead of unicode. - [C303] Ensure that there are no 'print()' statements in code that is being committed. - [C304] Enforce no use of LOG.audit messages. LOG.info should be used instead. - [C305] Prevent use of deprecated contextlib.nested. - [C306] timeutils.strtime() must not be used (deprecated). - [C307] LOG.warn is deprecated. Enforce use of LOG.warning. - [C308] timeutils.isotime() must not be used (deprecated). - [C309] Unit tests should not perform logging. - [C310] Check for improper use of logging format arguments. - [C311] Check for proper naming and usage in option registration. - [C312] Check that assertIsNone(value) is used and not assertEqual(None, value). - [C313] Check that assertTrue(value) is used and not assertEqual(True, value). General ------- - Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: except Exception as e: ... raise e # BAD except Exception: ... raise # OKAY Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. Cinder is transitioning to use mock, rather than mox, and so new tests should use mock only. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Cinder, please read the Cinder testing `README.rst `_. cinder-8.0.0/CONTRIBUTING.rst0000664000567000056710000000105412701406250016562 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not in GitHub's issue tracker: https://bugs.launchpad.net/cinder cinder-8.0.0/releasenotes/0000775000567000056710000000000012701406543016617 5ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/notes/0000775000567000056710000000000012701406543017747 5ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/notes/updated-at-list-0f899098f7258331.yaml0000664000567000056710000000011612701406250025566 0ustar jenkinsjenkins00000000000000--- features: - The updated_at timestamp is now returned in listing detail. cinder-8.0.0/releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml0000664000567000056710000000010212701406250026231 0ustar jenkinsjenkins00000000000000--- upgrade: - Backend driver for Scality SRB has been removed. cinder-8.0.0/releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml0000664000567000056710000000053512701406250030173 0ustar jenkinsjenkins00000000000000--- upgrade: - Removed storwize_svc_connection_protocol config setting. Users will now need to set different values for volume_driver in cinder.conf. FC:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver cinder-8.0.0/releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml0000664000567000056710000000012512701406250026363 0ustar jenkinsjenkins00000000000000--- upgrade: - The VMware VMDK driver now enforces minimum vCenter version of 5.1. cinder-8.0.0/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml0000664000567000056710000000011412701406250027777 0ustar jenkinsjenkins00000000000000--- features: - Added replication v2.1 support to the IBM Storwize driver.cinder-8.0.0/releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml0000664000567000056710000000011412701406250031432 0ustar jenkinsjenkins00000000000000--- features: - Added manage/unmanage snapshot support for Huawei drivers.cinder-8.0.0/releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml0000664000567000056710000000016412701406250030106 0ustar jenkinsjenkins00000000000000--- fixes: - Fixed service state reporting when backup manager is unable to initialize one of the backup drivers. cinder-8.0.0/releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml0000664000567000056710000000010712701406250030257 0ustar jenkinsjenkins00000000000000--- features: - Added multiple pools support to Storwize SVC driver. cinder-8.0.0/releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml0000664000567000056710000000011712701406250031242 0ustar jenkinsjenkins00000000000000--- features: - Added multiple management IP support to Storwize SVC driver. cinder-8.0.0/releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml0000664000567000056710000000012612701406250027152 0ustar jenkinsjenkins00000000000000--- features: - Added iSCSI CHAP uni-directional authentication for NetApp drivers. cinder-8.0.0/releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml0000664000567000056710000000010212701406250027653 0ustar jenkinsjenkins00000000000000--- features: - Retype support added to CloudByte iSCSI driver. cinder-8.0.0/releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml0000664000567000056710000000237212701406250027526 0ustar jenkinsjenkins00000000000000--- issues: - Cinder services are now automatically downgrading RPC messages to be understood by the oldest version of a service among all the deployment. Disabled and dead services are also taken into account. It is important to keep service list up to date, without old, unused records. This can be done using ``cinder-manage service remove`` command. Once situation is cleaned up services should be either restarted or ``SIGHUP`` signal should be issued to their processes to force them to reload version pins. Please note that cinder-api does not support ``SIGHUP`` signal. upgrade: - If during a *live* upgrade from Liberty a backup service will be killed while processing a restore request it may happen that such backup status won't be automatically cleaned up on the service restart. Such orphaned backups need to be cleaned up manually. - When performing a *live* upgrade from Liberty it may happen that retype calls will reserve additional quota. As by default quota reservations are invalidated after 24 hours (config option ``reservation_expire=86400``), we recommend either decreasing that time or watching for unused quota reservations manually during the upgrade process. cinder-8.0.0/releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml0000664000567000056710000000044512701406250027367 0ustar jenkinsjenkins00000000000000--- features: - cinder-backup service is now decoupled from cinder-volume, which allows more flexible scaling. upgrade: - As cinder-backup was strongly reworked in this release, the recommended upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol->c-bak. cinder-8.0.0/releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml0000664000567000056710000000066612701406250030202 0ustar jenkinsjenkins00000000000000--- fixes: - Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents user from specifying image "nfs share location" as location value for an image. Now, in order to use Tintri image direct clone, user can specify "provider_location" in image metadata to specify image nfs share location. NFS share which hosts images should be specified in a file using tintri_image_shares_config config option. cinder-8.0.0/releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml0000664000567000056710000000010512701406250026432 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for NexentaStor5 NFS storage. cinder-8.0.0/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml0000664000567000056710000000050112701406250027573 0ustar jenkinsjenkins00000000000000--- features: - New config option for Pure Storage volume drivers pure_eradicate_on_delete. When enabled will permanantly eradicate data instead of placing into pending eradication state. fixes: - Allow for eradicating Pure Storage volumes, snapshots, and pgroups when deleting their Cinder counterpart. cinder-8.0.0/releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml0000664000567000056710000000042512701406250031474 0ustar jenkinsjenkins00000000000000--- features: - Support for configuring Fibre Channel zoning on Brocade switches through Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To zone in a Virtual Fabric, set the configuration option 'fc_virtual_fabric_id' for the fabric. cinder-8.0.0/releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml0000664000567000056710000000012112701406250030557 0ustar jenkinsjenkins00000000000000--- features: - Added snapshot manage/unmanage support to the HPE 3PAR driver. cinder-8.0.0/releasenotes/notes/3par-license-check-51a16b5247675760.yaml0000664000567000056710000000011012701406250026074 0ustar jenkinsjenkins00000000000000--- features: - Disable standard capabilities based on 3PAR licenses. cinder-8.0.0/releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml0000664000567000056710000000013312701406250027461 0ustar jenkinsjenkins00000000000000--- features: - Manage and unmanage support has been added to the Nimble backend driver. cinder-8.0.0/releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml0000664000567000056710000000013212701406250027202 0ustar jenkinsjenkins00000000000000--- fixes: - Removed the need for deployers to run tox for config reference generation. cinder-8.0.0/releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml0000664000567000056710000000016012701406250026721 0ustar jenkinsjenkins00000000000000--- upgrade: - Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver and HuaweiFCDriver. cinder-8.0.0/releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml0000664000567000056710000000013112701406250030431 0ustar jenkinsjenkins00000000000000--- features: - Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers. cinder-8.0.0/releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml0000664000567000056710000000012512701406250031351 0ustar jenkinsjenkins00000000000000--- features: - Added snapshot manage/unmanage support to the HPE LeftHand driver. cinder-8.0.0/releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml0000664000567000056710000000063112701406250027461 0ustar jenkinsjenkins00000000000000--- features: - HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows for communication between the Brocade FC zone plugin and the switch to be over HTTP or HTTPs. To make use of this connector, the user would add a configuration setting in the fabric block for a Brocade switch with the name as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'. cinder-8.0.0/releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml0000664000567000056710000000015312701406250026541 0ustar jenkinsjenkins00000000000000--- features: - Support cinder_img_volume_type property in glance image metadata to specify volume type. cinder-8.0.0/releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml0000664000567000056710000000006712701406250026676 0ustar jenkinsjenkins00000000000000--- features: - Added QoS support in ScaleIO driver. cinder-8.0.0/releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml0000664000567000056710000000077512701406250030737 0ustar jenkinsjenkins00000000000000--- features: - Split nested quota support into a separate driver. In order to use nested quotas, change the following config ``quota_driver = cinder.quota.NestedDbQuotaDriver`` after running the following admin API "os-quota-sets/validate_setup_for_nested_quota_use" command to ensure the existing quota values make sense to nest. upgrade: - Nested quotas will no longer be used by default, but can be configured by setting ``quota_driver = cinder.quota.NestedDbQuotaDriver`` cinder-8.0.0/releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml0000664000567000056710000000011112701406250027245 0ustar jenkinsjenkins00000000000000--- upgrade: - The VMware VMDK driver for ESX server has been removed. cinder-8.0.0/releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml0000664000567000056710000000035412701406250027060 0ustar jenkinsjenkins00000000000000--- features: - Added Migrate and Extend for Nexenta NFS driver. - Added Retype functionality to Nexenta iSCSI and NFS drivers. upgrades: - Refactored Nexenta iSCSI driver to use single target and targetgroup with multiple zvols. cinder-8.0.0/releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml0000664000567000056710000000007712701406250026141 0ustar jenkinsjenkins00000000000000--- features: - Added driver for Tegile IntelliFlash arrays. cinder-8.0.0/releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml0000664000567000056710000000020212701406250032720 0ustar jenkinsjenkins00000000000000--- fixes: - Filtering volumes by their display name now correctly handles display names with single and double quotes. cinder-8.0.0/releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml0000664000567000056710000000012112701406250027030 0ustar jenkinsjenkins00000000000000--- features: - Added replication v2.1 support to the IBM XIV/DS8K driver. cinder-8.0.0/releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml0000664000567000056710000000010712701406250027331 0ustar jenkinsjenkins00000000000000--- upgrade: - Violin Memory 6000 array series drivers are removed. cinder-8.0.0/releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml0000664000567000056710000000027312701406250030252 0ustar jenkinsjenkins00000000000000--- security: - Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and driver_ssl_cert_path config options to allow for secure https requests to the FlashArray. cinder-8.0.0/releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml0000664000567000056710000000013312701406250030525 0ustar jenkinsjenkins00000000000000--- features: - Consistency group support has been added to the LeftHand backend driver. cinder-8.0.0/releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml0000664000567000056710000000010412701406250026633 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for Fujitsu ETERNUS DX (FC). cinder-8.0.0/releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml0000664000567000056710000000036412701406250030504 0ustar jenkinsjenkins00000000000000--- fixes: - Previously the only way to remove volumes in error states from a consistency-group was to delete the consistency group and create it again. Now it is possible to remove volumes in error and error_deleting states. cinder-8.0.0/releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml0000664000567000056710000000022012701406250026625 0ustar jenkinsjenkins00000000000000--- fixes: - Consistency group creation previously scheduled at the pool level. Now it is fixed to schedule at the backend level as designed. cinder-8.0.0/releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml0000664000567000056710000000010712701406250027604 0ustar jenkinsjenkins00000000000000--- features: - Added cinder backup driver for Google Cloud Storage. cinder-8.0.0/releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml0000664000567000056710000000011212701406250032253 0ustar jenkinsjenkins00000000000000--- features: - Added v2.1 replication support in Huawei Cinder driver. cinder-8.0.0/releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml0000664000567000056710000000011312701406250025673 0ustar jenkinsjenkins00000000000000--- upgrade: - The deprecated HP CLIQ proxy driver has now been removed. cinder-8.0.0/releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml0000664000567000056710000000035312701406250030534 0ustar jenkinsjenkins00000000000000--- features: - Added support for creating, deleting, and updating consistency groups for NetApp 7mode and CDOT backends. - Added support for taking, deleting, and restoring a cgsnapshot for NetApp 7mode and CDOT backends. cinder-8.0.0/releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml0000664000567000056710000000010712701406250026601 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for Nexenta Edge iSCSI storage. cinder-8.0.0/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml0000664000567000056710000000010512701406250030474 0ustar jenkinsjenkins00000000000000--- features: - Added Consistency Group support in ScaleIO driver. cinder-8.0.0/releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml0000664000567000056710000000010612701406250026453 0ustar jenkinsjenkins00000000000000--- features: - Adds v2.1 replication support in VNX Cinder driver. cinder-8.0.0/releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml0000664000567000056710000000011112701406250030215 0ustar jenkinsjenkins00000000000000--- features: - Support balanced FC port selection for Huawei drivers. cinder-8.0.0/releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml0000664000567000056710000000070112701406250027210 0ustar jenkinsjenkins00000000000000--- features: - All Datera DataFabric backed volume-types will now use API version 2 with Datera DataFabric upgrade: - Users of the Datera Cinder driver are now required to use Datera DataFabric version 1.0+. Versions before 1.0 will not be able to utilize this new driver since they still function on v1 of the Datera DataFabric API deprecations: - datera_api_token -- this has been replaced by san_login and san_password cinder-8.0.0/releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml0000664000567000056710000000036212701406250031235 0ustar jenkinsjenkins00000000000000--- upgrade: - It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/rootwrap.d directory. fixes: - Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set to locale using ',' as decimal separator. cinder-8.0.0/releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml0000664000567000056710000000024012701406250025363 0ustar jenkinsjenkins00000000000000--- features: - Locks may use Tooz as abstraction layer now, to support distributed lock managers and prepare Cinder to better support HA configurations. cinder-8.0.0/releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml0000664000567000056710000000010712701406250030120 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for Fujitsu ETERNUS DX (iSCSI). cinder-8.0.0/releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml0000664000567000056710000000014212701406250026450 0ustar jenkinsjenkins00000000000000--- features: - Support for creating a consistency group from consistency group in XtremIO. cinder-8.0.0/releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml0000664000567000056710000000130512701406250026510 0ustar jenkinsjenkins00000000000000--- features: - Added RPC backward compatibility layer similar to the one implemented in Nova. This means that Cinder services can be upgraded one-by-one without breakage. After all the services are upgraded SIGHUP signals should be issued to all the services to signal them to reload cached minimum RPC versions. Alternative is of course restart of them. Please note that cinder-api service doesn't support SIGHUP yet. Please also take into account that all the rolling upgrades capabilities are considered tech preview, as we don't have a CI testing it yet. upgrade: - Starting from Mitaka release Cinder is having a tech preview of rolling upgrades support. cinder-8.0.0/releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml0000664000567000056710000000011312701406250030413 0ustar jenkinsjenkins00000000000000--- features: - Added manage/unmanage volume support for Huawei drivers. cinder-8.0.0/releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml0000664000567000056710000000012712701406250031724 0ustar jenkinsjenkins00000000000000--- features: - Support for Consistency Groups in the NetApp E-Series Volume Driver. cinder-8.0.0/releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml0000664000567000056710000000011112701406250026622 0ustar jenkinsjenkins00000000000000--- features: - Added v2.1 replication support to the HPE 3PAR driver. cinder-8.0.0/releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml0000664000567000056710000000014612701406250026567 0ustar jenkinsjenkins00000000000000--- deprecations: - The XML API has been marked deprecated and will be removed in a future release. cinder-8.0.0/releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml0000664000567000056710000000011512701406250027470 0ustar jenkinsjenkins00000000000000--- features: - Added v2.1 replication support to the HPE LeftHand driver. cinder-8.0.0/releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml0000664000567000056710000000012612701406250027213 0ustar jenkinsjenkins00000000000000--- upgrade: - Removed the deprecated NPIV options for the Storwize backend driver. cinder-8.0.0/releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml0000664000567000056710000000073312701406250027302 0ustar jenkinsjenkins00000000000000--- upgrade: - Users of the ibmnas driver should switch to using the IBM GPFS driver to enable Cinder access to IBM NAS resources. For details configuring the IBM GPFS driver, see the GPFS config reference. - http://docs.openstack.org/liberty/config-reference/content/GPFS-driver.html other: - Due to the ibmnas (SONAS) driver being rendered redundant by the addition of NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed in the Mitaka release. cinder-8.0.0/releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml0000664000567000056710000000012212701406250027313 0ustar jenkinsjenkins00000000000000--- features: - Added support for manage/unmanage volume in the ScaleIO driver. cinder-8.0.0/releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml0000664000567000056710000000025512701406250027650 0ustar jenkinsjenkins00000000000000--- upgrade: - HP drivers have been rebranded to HPE. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. cinder-8.0.0/releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml0000664000567000056710000000012512701406250027277 0ustar jenkinsjenkins00000000000000--- features: - Added replication v2.1 support to the Dell Storage Center drivers. cinder-8.0.0/releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml0000664000567000056710000000012112701406250027443 0ustar jenkinsjenkins00000000000000--- fixes: - Corrected quota usage when transferring a volume between tenants. cinder-8.0.0/releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml0000664000567000056710000000012412701406250027662 0ustar jenkinsjenkins00000000000000--- features: - Added support for API microversions, as well as /v3 API endpoint. cinder-8.0.0/releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml0000664000567000056710000000014512701406250026773 0ustar jenkinsjenkins00000000000000--- features: - Added Cheesecake (v2.1) replication support to the Pure Storage Volume drivers.cinder-8.0.0/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml0000664000567000056710000000006512701406250026260 0ustar jenkinsjenkins00000000000000--- features: - Added ability to backup snapshots. cinder-8.0.0/releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml0000664000567000056710000000011112701406250026570 0ustar jenkinsjenkins00000000000000--- features: - The consistency group API now returns volume type IDs. cinder-8.0.0/releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml0000664000567000056710000000011512701406250027702 0ustar jenkinsjenkins00000000000000--- deprecations: - Deprecated IBM driver _multipath_enabled config flags. cinder-8.0.0/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml0000664000567000056710000000072112701406250026634 0ustar jenkinsjenkins00000000000000--- features: - Added additional metrics reported to the scheduler for Pure Volume Drivers for better filtering and weighing functions. - Added config option to enable/disable automatically calculation an over-subscription ratio max for Pure Volume Drivers. When disabled the drivers will now respect the max_oversubscription_ratio config option. fixes: - Fixed issue where Pure Volume Drivers would ignore reserved_percentage config option. cinder-8.0.0/releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml0000664000567000056710000000014112701406250026120 0ustar jenkinsjenkins00000000000000--- fixes: - upload-to-image using Image API v2 now correctly handles custom image properties. cinder-8.0.0/releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml0000664000567000056710000000011012701406250031547 0ustar jenkinsjenkins00000000000000--- upgrade: - Removed force_delete option from ScaleIO configuration.cinder-8.0.0/releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml0000664000567000056710000000011412701406250030350 0ustar jenkinsjenkins00000000000000--- features: - Added support for manage volume in the VMware VMDK driver.cinder-8.0.0/releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml0000664000567000056710000000013512701406250031110 0ustar jenkinsjenkins00000000000000--- features: - Added support for ZeroMQ messaging driver in cinder single backend config. cinder-8.0.0/releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml0000664000567000056710000000035712701406250027757 0ustar jenkinsjenkins00000000000000--- upgrade: - Adding or removing volume_type_access from any project during DB migration 62 must not be performed. - When running PostgreSQL it is required to upgrade and restart all the cinder-api services along with DB migration 62.cinder-8.0.0/releasenotes/notes/discard-config-option-711a7fbf20685834.yaml0000664000567000056710000000013412701406250027066 0ustar jenkinsjenkins00000000000000--- features: - New config option to enable discard (trim/unmap) support for any backend. cinder-8.0.0/releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml0000664000567000056710000000030712701406250026432 0ustar jenkinsjenkins00000000000000--- upgrade: - Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be switched to use the LVMVolumeDriver with the desired iscsi_helper configuration set to the desired iSCSI helper. cinder-8.0.0/releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml0000664000567000056710000000007612701406250026070 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for Coho Data storage. cinder-8.0.0/releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml0000664000567000056710000000017512701406250030466 0ustar jenkinsjenkins00000000000000--- features: - Added support for creating a consistency group from a source consistency group in the HPE 3PAR driver. cinder-8.0.0/releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml0000664000567000056710000000010712701406250026670 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for NexentaStor5 iSCSI storage. cinder-8.0.0/releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml0000664000567000056710000000007212701406250027027 0ustar jenkinsjenkins00000000000000--- features: - Added backend driver for DISCO storage. cinder-8.0.0/releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml0000664000567000056710000000064712701406250026642 0ustar jenkinsjenkins00000000000000--- features: - Cinder FC Zone Manager Friendly Zone Names This feature adds support for Fibre Channel user friendly zone names if implemented by the volume driver. If the volume driver passes the host name and storage system to the Fibre Channel Zone Manager in the conn_info structure, the zone manager will use these names in structuring the zone name to provide a user friendly zone name. cinder-8.0.0/releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml0000664000567000056710000000025012701406250025550 0ustar jenkinsjenkins00000000000000--- fixes: - Cinder will now correctly read Keystone's endpoint for quota calls from keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config option. cinder-8.0.0/releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml0000664000567000056710000000136612701406250031163 0ustar jenkinsjenkins00000000000000--- fixes: - | Enabled a cloud operator to correctly manage policy for volume type operations. To permit volume type operations for specific user, you can for example do as follows. * Add ``storage_type_admin`` role. * Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g. ``"admin_or_storage_type_admin": "is_admin:True or role:storage_type_admin",`` * Modify rule for types_manage and volume_type_access, e.g. ``"volume_extension:types_manage": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:addProjectAccess": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_or_storage_type_admin",`` cinder-8.0.0/releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml0000664000567000056710000000022212701406250030471 0ustar jenkinsjenkins00000000000000--- features: - It is now possible to delete a volume and its snapshots by passing an additional argument to volume delete, "cascade=True". cinder-8.0.0/releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml0000664000567000056710000000011012701406250031061 0ustar jenkinsjenkins00000000000000--- features: - Configrable migration rate in VNX driver via metadata cinder-8.0.0/releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml0000664000567000056710000000012012701406250026053 0ustar jenkinsjenkins00000000000000--- features: - Cloning of consistency group added to EMC VNX backend driver. cinder-8.0.0/releasenotes/source/0000775000567000056710000000000012701406543020117 5ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/source/index.rst0000664000567000056710000000017412701406257021764 0ustar jenkinsjenkins00000000000000====================== Cinder Release Notes ====================== .. toctree:: :maxdepth: 1 liberty unreleased cinder-8.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701406543022254 5ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701406250024520 0ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701406250022770 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: cinder-8.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701406250022312 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty cinder-8.0.0/releasenotes/source/conf.py0000664000567000056710000002156012701406250021415 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Cinder Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 4 17:02:44 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Cinder Release Notes' copyright = u'2015, Cinder Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from cinder.version import version_info as cinder_version # The full version, including alpha/beta/rc tags. release = cinder_version.version_string_with_vcs() # The short X.Y version. version = cinder_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CinderReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CinderReleaseNotes.tex', u'Cinder Release Notes Documentation', u'Cinder Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cinderreleasenotes', u'Cinder Release Notes Documentation', [u'Cinder Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CinderReleaseNotes', u'Cinder Release Notes Documentation', u'Cinder Developers', 'CinderReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False cinder-8.0.0/releasenotes/source/_static/0000775000567000056710000000000012701406543021545 5ustar jenkinsjenkins00000000000000cinder-8.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701406250024011 0ustar jenkinsjenkins00000000000000cinder-8.0.0/README.rst0000664000567000056710000000131612701406250015611 0ustar jenkinsjenkins00000000000000====== CINDER ====== You have come across a storage service for an open cloud computing service. It has identified itself as `Cinder`. It was abstracted from the Nova project. * Wiki: http://wiki.openstack.org/Cinder * Developer docs: http://docs.openstack.org/developer/cinder Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://git.openstack.org/openstack/cinder.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/cinder Python client ------------- https://git.openstack.org/cgit/openstack/python-cinderclient cinder-8.0.0/setup.py0000664000567000056710000000200412701406250015627 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) cinder-8.0.0/test-requirements.txt0000664000567000056710000000157112701406250020366 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Install bounded pep8/pyflakes first, then let flake8 install hacking<0.11,>=0.10.0 anyjson>=0.3.3 # BSD coverage>=3.6 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD mox3>=0.7.0 # Apache-2.0 PyMySQL>=0.6.2 # MIT License psycopg2>=2.5 # LGPL/ZPL oslotest>=1.10.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0 tempest-lib>=0.14.0 # Apache-2.0 bandit>=0.17.3 # Apache-2.0 reno>=0.1.1 # Apache2